diff options
Diffstat (limited to 'net')
586 files changed, 19105 insertions, 4178 deletions
diff --git a/net/802/garp.c b/net/802/garp.c index 1dcb0660c49d..941f2a324d3a 100644 --- a/net/802/garp.c +++ b/net/802/garp.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/etherdevice.h> | 14 | #include <linux/etherdevice.h> |
15 | #include <linux/rtnetlink.h> | 15 | #include <linux/rtnetlink.h> |
16 | #include <linux/llc.h> | 16 | #include <linux/llc.h> |
17 | #include <linux/slab.h> | ||
17 | #include <net/llc.h> | 18 | #include <net/llc.h> |
18 | #include <net/llc_pdu.h> | 19 | #include <net/llc_pdu.h> |
19 | #include <net/garp.h> | 20 | #include <net/garp.h> |
@@ -575,7 +576,7 @@ int garp_init_applicant(struct net_device *dev, struct garp_application *appl) | |||
575 | if (!app) | 576 | if (!app) |
576 | goto err2; | 577 | goto err2; |
577 | 578 | ||
578 | err = dev_mc_add(dev, appl->proto.group_address, ETH_ALEN, 0); | 579 | err = dev_mc_add(dev, appl->proto.group_address); |
579 | if (err < 0) | 580 | if (err < 0) |
580 | goto err3; | 581 | goto err3; |
581 | 582 | ||
@@ -615,7 +616,7 @@ void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl | |||
615 | garp_pdu_queue(app); | 616 | garp_pdu_queue(app); |
616 | garp_queue_xmit(app); | 617 | garp_queue_xmit(app); |
617 | 618 | ||
618 | dev_mc_delete(dev, appl->proto.group_address, ETH_ALEN, 0); | 619 | dev_mc_del(dev, appl->proto.group_address); |
619 | kfree(app); | 620 | kfree(app); |
620 | garp_release_port(dev); | 621 | garp_release_port(dev); |
621 | } | 622 | } |
diff --git a/net/802/p8022.c b/net/802/p8022.c index 2530f35241cd..7f353c4f437a 100644 --- a/net/802/p8022.c +++ b/net/802/p8022.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/netdevice.h> | 19 | #include <linux/netdevice.h> |
20 | #include <linux/skbuff.h> | 20 | #include <linux/skbuff.h> |
21 | #include <linux/slab.h> | ||
21 | #include <net/datalink.h> | 22 | #include <net/datalink.h> |
22 | #include <linux/mm.h> | 23 | #include <linux/mm.h> |
23 | #include <linux/in.h> | 24 | #include <linux/in.h> |
diff --git a/net/802/p8023.c b/net/802/p8023.c index 6ab1835041a7..1256a40da43c 100644 --- a/net/802/p8023.c +++ b/net/802/p8023.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/netdevice.h> | 19 | #include <linux/netdevice.h> |
20 | #include <linux/skbuff.h> | 20 | #include <linux/skbuff.h> |
21 | #include <linux/slab.h> | ||
21 | 22 | ||
22 | #include <net/datalink.h> | 23 | #include <net/datalink.h> |
23 | #include <net/p8022.h> | 24 | #include <net/p8022.h> |
diff --git a/net/802/psnap.c b/net/802/psnap.c index 6fea0750662b..21cde8fd5795 100644 --- a/net/802/psnap.c +++ b/net/802/psnap.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/netdevice.h> | 15 | #include <linux/netdevice.h> |
16 | #include <linux/skbuff.h> | 16 | #include <linux/skbuff.h> |
17 | #include <linux/slab.h> | ||
17 | #include <net/datalink.h> | 18 | #include <net/datalink.h> |
18 | #include <net/llc.h> | 19 | #include <net/llc.h> |
19 | #include <net/psnap.h> | 20 | #include <net/psnap.h> |
diff --git a/net/802/stp.c b/net/802/stp.c index 0b7a24452d11..53c8f77f0ccd 100644 --- a/net/802/stp.c +++ b/net/802/stp.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/skbuff.h> | 11 | #include <linux/skbuff.h> |
12 | #include <linux/etherdevice.h> | 12 | #include <linux/etherdevice.h> |
13 | #include <linux/llc.h> | 13 | #include <linux/llc.h> |
14 | #include <linux/slab.h> | ||
14 | #include <net/llc.h> | 15 | #include <net/llc.h> |
15 | #include <net/llc_pdu.h> | 16 | #include <net/llc_pdu.h> |
16 | #include <net/stp.h> | 17 | #include <net/stp.h> |
diff --git a/net/802/tr.c b/net/802/tr.c index 44acce47fcdc..1c6e596074df 100644 --- a/net/802/tr.c +++ b/net/802/tr.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/seq_file.h> | 36 | #include <linux/seq_file.h> |
37 | #include <linux/init.h> | 37 | #include <linux/init.h> |
38 | #include <linux/sysctl.h> | 38 | #include <linux/sysctl.h> |
39 | #include <linux/slab.h> | ||
39 | #include <net/arp.h> | 40 | #include <net/arp.h> |
40 | #include <net/net_namespace.h> | 41 | #include <net/net_namespace.h> |
41 | 42 | ||
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 453512266ea1..3c1c8c14e929 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/module.h> | 22 | #include <linux/module.h> |
23 | #include <linux/netdevice.h> | 23 | #include <linux/netdevice.h> |
24 | #include <linux/skbuff.h> | 24 | #include <linux/skbuff.h> |
25 | #include <linux/slab.h> | ||
25 | #include <linux/init.h> | 26 | #include <linux/init.h> |
26 | #include <linux/rculist.h> | 27 | #include <linux/rculist.h> |
27 | #include <net/p8022.h> | 28 | #include <net/p8022.h> |
@@ -356,13 +357,13 @@ static void vlan_sync_address(struct net_device *dev, | |||
356 | * the new address */ | 357 | * the new address */ |
357 | if (compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) && | 358 | if (compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) && |
358 | !compare_ether_addr(vlandev->dev_addr, dev->dev_addr)) | 359 | !compare_ether_addr(vlandev->dev_addr, dev->dev_addr)) |
359 | dev_unicast_delete(dev, vlandev->dev_addr); | 360 | dev_uc_del(dev, vlandev->dev_addr); |
360 | 361 | ||
361 | /* vlan address was equal to the old address and is different from | 362 | /* vlan address was equal to the old address and is different from |
362 | * the new address */ | 363 | * the new address */ |
363 | if (!compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) && | 364 | if (!compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) && |
364 | compare_ether_addr(vlandev->dev_addr, dev->dev_addr)) | 365 | compare_ether_addr(vlandev->dev_addr, dev->dev_addr)) |
365 | dev_unicast_add(dev, vlandev->dev_addr); | 366 | dev_uc_add(dev, vlandev->dev_addr); |
366 | 367 | ||
367 | memcpy(vlan->real_dev_addr, dev->dev_addr, ETH_ALEN); | 368 | memcpy(vlan->real_dev_addr, dev->dev_addr, ETH_ALEN); |
368 | } | 369 | } |
@@ -378,6 +379,8 @@ static void vlan_transfer_features(struct net_device *dev, | |||
378 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) | 379 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) |
379 | vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid; | 380 | vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid; |
380 | #endif | 381 | #endif |
382 | vlandev->real_num_tx_queues = dev->real_num_tx_queues; | ||
383 | BUG_ON(vlandev->real_num_tx_queues > vlandev->num_tx_queues); | ||
381 | 384 | ||
382 | if (old_features != vlandev->features) | 385 | if (old_features != vlandev->features) |
383 | netdev_features_change(vlandev); | 386 | netdev_features_change(vlandev); |
@@ -530,6 +533,10 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, | |||
530 | } | 533 | } |
531 | unregister_netdevice_many(&list); | 534 | unregister_netdevice_many(&list); |
532 | break; | 535 | break; |
536 | |||
537 | case NETDEV_PRE_TYPE_CHANGE: | ||
538 | /* Forbid underlaying device to change its type. */ | ||
539 | return NOTIFY_BAD; | ||
533 | } | 540 | } |
534 | 541 | ||
535 | out: | 542 | out: |
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index c0316e0ca6e8..c584a0af77d3 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c | |||
@@ -11,7 +11,7 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, | |||
11 | if (netpoll_rx(skb)) | 11 | if (netpoll_rx(skb)) |
12 | return NET_RX_DROP; | 12 | return NET_RX_DROP; |
13 | 13 | ||
14 | if (skb_bond_should_drop(skb)) | 14 | if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) |
15 | goto drop; | 15 | goto drop; |
16 | 16 | ||
17 | skb->skb_iif = skb->dev->ifindex; | 17 | skb->skb_iif = skb->dev->ifindex; |
@@ -83,7 +83,7 @@ vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp, | |||
83 | { | 83 | { |
84 | struct sk_buff *p; | 84 | struct sk_buff *p; |
85 | 85 | ||
86 | if (skb_bond_should_drop(skb)) | 86 | if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) |
87 | goto drop; | 87 | goto drop; |
88 | 88 | ||
89 | skb->skb_iif = skb->dev->ifindex; | 89 | skb->skb_iif = skb->dev->ifindex; |
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 9e83272fc5b0..b5249c5fd4d3 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
@@ -21,6 +21,7 @@ | |||
21 | */ | 21 | */ |
22 | 22 | ||
23 | #include <linux/module.h> | 23 | #include <linux/module.h> |
24 | #include <linux/slab.h> | ||
24 | #include <linux/skbuff.h> | 25 | #include <linux/skbuff.h> |
25 | #include <linux/netdevice.h> | 26 | #include <linux/netdevice.h> |
26 | #include <linux/etherdevice.h> | 27 | #include <linux/etherdevice.h> |
@@ -361,6 +362,14 @@ static netdev_tx_t vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb, | |||
361 | return ret; | 362 | return ret; |
362 | } | 363 | } |
363 | 364 | ||
365 | static u16 vlan_dev_select_queue(struct net_device *dev, struct sk_buff *skb) | ||
366 | { | ||
367 | struct net_device *rdev = vlan_dev_info(dev)->real_dev; | ||
368 | const struct net_device_ops *ops = rdev->netdev_ops; | ||
369 | |||
370 | return ops->ndo_select_queue(rdev, skb); | ||
371 | } | ||
372 | |||
364 | static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu) | 373 | static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu) |
365 | { | 374 | { |
366 | /* TODO: gotta make sure the underlying layer can handle it, | 375 | /* TODO: gotta make sure the underlying layer can handle it, |
@@ -461,7 +470,7 @@ static int vlan_dev_open(struct net_device *dev) | |||
461 | return -ENETDOWN; | 470 | return -ENETDOWN; |
462 | 471 | ||
463 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) { | 472 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) { |
464 | err = dev_unicast_add(real_dev, dev->dev_addr); | 473 | err = dev_uc_add(real_dev, dev->dev_addr); |
465 | if (err < 0) | 474 | if (err < 0) |
466 | goto out; | 475 | goto out; |
467 | } | 476 | } |
@@ -490,7 +499,7 @@ clear_allmulti: | |||
490 | dev_set_allmulti(real_dev, -1); | 499 | dev_set_allmulti(real_dev, -1); |
491 | del_unicast: | 500 | del_unicast: |
492 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) | 501 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) |
493 | dev_unicast_delete(real_dev, dev->dev_addr); | 502 | dev_uc_del(real_dev, dev->dev_addr); |
494 | out: | 503 | out: |
495 | netif_carrier_off(dev); | 504 | netif_carrier_off(dev); |
496 | return err; | 505 | return err; |
@@ -505,14 +514,14 @@ static int vlan_dev_stop(struct net_device *dev) | |||
505 | vlan_gvrp_request_leave(dev); | 514 | vlan_gvrp_request_leave(dev); |
506 | 515 | ||
507 | dev_mc_unsync(real_dev, dev); | 516 | dev_mc_unsync(real_dev, dev); |
508 | dev_unicast_unsync(real_dev, dev); | 517 | dev_uc_unsync(real_dev, dev); |
509 | if (dev->flags & IFF_ALLMULTI) | 518 | if (dev->flags & IFF_ALLMULTI) |
510 | dev_set_allmulti(real_dev, -1); | 519 | dev_set_allmulti(real_dev, -1); |
511 | if (dev->flags & IFF_PROMISC) | 520 | if (dev->flags & IFF_PROMISC) |
512 | dev_set_promiscuity(real_dev, -1); | 521 | dev_set_promiscuity(real_dev, -1); |
513 | 522 | ||
514 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) | 523 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) |
515 | dev_unicast_delete(real_dev, dev->dev_addr); | 524 | dev_uc_del(real_dev, dev->dev_addr); |
516 | 525 | ||
517 | netif_carrier_off(dev); | 526 | netif_carrier_off(dev); |
518 | return 0; | 527 | return 0; |
@@ -531,13 +540,13 @@ static int vlan_dev_set_mac_address(struct net_device *dev, void *p) | |||
531 | goto out; | 540 | goto out; |
532 | 541 | ||
533 | if (compare_ether_addr(addr->sa_data, real_dev->dev_addr)) { | 542 | if (compare_ether_addr(addr->sa_data, real_dev->dev_addr)) { |
534 | err = dev_unicast_add(real_dev, addr->sa_data); | 543 | err = dev_uc_add(real_dev, addr->sa_data); |
535 | if (err < 0) | 544 | if (err < 0) |
536 | return err; | 545 | return err; |
537 | } | 546 | } |
538 | 547 | ||
539 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) | 548 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) |
540 | dev_unicast_delete(real_dev, dev->dev_addr); | 549 | dev_uc_del(real_dev, dev->dev_addr); |
541 | 550 | ||
542 | out: | 551 | out: |
543 | memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); | 552 | memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); |
@@ -654,7 +663,7 @@ static void vlan_dev_change_rx_flags(struct net_device *dev, int change) | |||
654 | static void vlan_dev_set_rx_mode(struct net_device *vlan_dev) | 663 | static void vlan_dev_set_rx_mode(struct net_device *vlan_dev) |
655 | { | 664 | { |
656 | dev_mc_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev); | 665 | dev_mc_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev); |
657 | dev_unicast_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev); | 666 | dev_uc_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev); |
658 | } | 667 | } |
659 | 668 | ||
660 | /* | 669 | /* |
@@ -688,7 +697,8 @@ static const struct header_ops vlan_header_ops = { | |||
688 | .parse = eth_header_parse, | 697 | .parse = eth_header_parse, |
689 | }; | 698 | }; |
690 | 699 | ||
691 | static const struct net_device_ops vlan_netdev_ops, vlan_netdev_accel_ops; | 700 | static const struct net_device_ops vlan_netdev_ops, vlan_netdev_accel_ops, |
701 | vlan_netdev_ops_sq, vlan_netdev_accel_ops_sq; | ||
692 | 702 | ||
693 | static int vlan_dev_init(struct net_device *dev) | 703 | static int vlan_dev_init(struct net_device *dev) |
694 | { | 704 | { |
@@ -722,11 +732,17 @@ static int vlan_dev_init(struct net_device *dev) | |||
722 | if (real_dev->features & NETIF_F_HW_VLAN_TX) { | 732 | if (real_dev->features & NETIF_F_HW_VLAN_TX) { |
723 | dev->header_ops = real_dev->header_ops; | 733 | dev->header_ops = real_dev->header_ops; |
724 | dev->hard_header_len = real_dev->hard_header_len; | 734 | dev->hard_header_len = real_dev->hard_header_len; |
725 | dev->netdev_ops = &vlan_netdev_accel_ops; | 735 | if (real_dev->netdev_ops->ndo_select_queue) |
736 | dev->netdev_ops = &vlan_netdev_accel_ops_sq; | ||
737 | else | ||
738 | dev->netdev_ops = &vlan_netdev_accel_ops; | ||
726 | } else { | 739 | } else { |
727 | dev->header_ops = &vlan_header_ops; | 740 | dev->header_ops = &vlan_header_ops; |
728 | dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN; | 741 | dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN; |
729 | dev->netdev_ops = &vlan_netdev_ops; | 742 | if (real_dev->netdev_ops->ndo_select_queue) |
743 | dev->netdev_ops = &vlan_netdev_ops_sq; | ||
744 | else | ||
745 | dev->netdev_ops = &vlan_netdev_ops; | ||
730 | } | 746 | } |
731 | 747 | ||
732 | if (is_vlan_dev(real_dev)) | 748 | if (is_vlan_dev(real_dev)) |
@@ -865,6 +881,56 @@ static const struct net_device_ops vlan_netdev_accel_ops = { | |||
865 | #endif | 881 | #endif |
866 | }; | 882 | }; |
867 | 883 | ||
884 | static const struct net_device_ops vlan_netdev_ops_sq = { | ||
885 | .ndo_select_queue = vlan_dev_select_queue, | ||
886 | .ndo_change_mtu = vlan_dev_change_mtu, | ||
887 | .ndo_init = vlan_dev_init, | ||
888 | .ndo_uninit = vlan_dev_uninit, | ||
889 | .ndo_open = vlan_dev_open, | ||
890 | .ndo_stop = vlan_dev_stop, | ||
891 | .ndo_start_xmit = vlan_dev_hard_start_xmit, | ||
892 | .ndo_validate_addr = eth_validate_addr, | ||
893 | .ndo_set_mac_address = vlan_dev_set_mac_address, | ||
894 | .ndo_set_rx_mode = vlan_dev_set_rx_mode, | ||
895 | .ndo_set_multicast_list = vlan_dev_set_rx_mode, | ||
896 | .ndo_change_rx_flags = vlan_dev_change_rx_flags, | ||
897 | .ndo_do_ioctl = vlan_dev_ioctl, | ||
898 | .ndo_neigh_setup = vlan_dev_neigh_setup, | ||
899 | .ndo_get_stats = vlan_dev_get_stats, | ||
900 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) | ||
901 | .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup, | ||
902 | .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, | ||
903 | .ndo_fcoe_enable = vlan_dev_fcoe_enable, | ||
904 | .ndo_fcoe_disable = vlan_dev_fcoe_disable, | ||
905 | .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn, | ||
906 | #endif | ||
907 | }; | ||
908 | |||
909 | static const struct net_device_ops vlan_netdev_accel_ops_sq = { | ||
910 | .ndo_select_queue = vlan_dev_select_queue, | ||
911 | .ndo_change_mtu = vlan_dev_change_mtu, | ||
912 | .ndo_init = vlan_dev_init, | ||
913 | .ndo_uninit = vlan_dev_uninit, | ||
914 | .ndo_open = vlan_dev_open, | ||
915 | .ndo_stop = vlan_dev_stop, | ||
916 | .ndo_start_xmit = vlan_dev_hwaccel_hard_start_xmit, | ||
917 | .ndo_validate_addr = eth_validate_addr, | ||
918 | .ndo_set_mac_address = vlan_dev_set_mac_address, | ||
919 | .ndo_set_rx_mode = vlan_dev_set_rx_mode, | ||
920 | .ndo_set_multicast_list = vlan_dev_set_rx_mode, | ||
921 | .ndo_change_rx_flags = vlan_dev_change_rx_flags, | ||
922 | .ndo_do_ioctl = vlan_dev_ioctl, | ||
923 | .ndo_neigh_setup = vlan_dev_neigh_setup, | ||
924 | .ndo_get_stats = vlan_dev_get_stats, | ||
925 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) | ||
926 | .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup, | ||
927 | .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, | ||
928 | .ndo_fcoe_enable = vlan_dev_fcoe_enable, | ||
929 | .ndo_fcoe_disable = vlan_dev_fcoe_disable, | ||
930 | .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn, | ||
931 | #endif | ||
932 | }; | ||
933 | |||
868 | void vlan_setup(struct net_device *dev) | 934 | void vlan_setup(struct net_device *dev) |
869 | { | 935 | { |
870 | ether_setup(dev); | 936 | ether_setup(dev); |
diff --git a/net/9p/client.c b/net/9p/client.c index 09d4f1e2e4a8..0aa79faa9850 100644 --- a/net/9p/client.c +++ b/net/9p/client.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/poll.h> | 29 | #include <linux/poll.h> |
30 | #include <linux/idr.h> | 30 | #include <linux/idr.h> |
31 | #include <linux/mutex.h> | 31 | #include <linux/mutex.h> |
32 | #include <linux/slab.h> | ||
32 | #include <linux/sched.h> | 33 | #include <linux/sched.h> |
33 | #include <linux/uaccess.h> | 34 | #include <linux/uaccess.h> |
34 | #include <net/9p/9p.h> | 35 | #include <net/9p/9p.h> |
@@ -46,6 +47,7 @@ enum { | |||
46 | Opt_msize, | 47 | Opt_msize, |
47 | Opt_trans, | 48 | Opt_trans, |
48 | Opt_legacy, | 49 | Opt_legacy, |
50 | Opt_version, | ||
49 | Opt_err, | 51 | Opt_err, |
50 | }; | 52 | }; |
51 | 53 | ||
@@ -53,9 +55,43 @@ static const match_table_t tokens = { | |||
53 | {Opt_msize, "msize=%u"}, | 55 | {Opt_msize, "msize=%u"}, |
54 | {Opt_legacy, "noextend"}, | 56 | {Opt_legacy, "noextend"}, |
55 | {Opt_trans, "trans=%s"}, | 57 | {Opt_trans, "trans=%s"}, |
58 | {Opt_version, "version=%s"}, | ||
56 | {Opt_err, NULL}, | 59 | {Opt_err, NULL}, |
57 | }; | 60 | }; |
58 | 61 | ||
62 | inline int p9_is_proto_dotl(struct p9_client *clnt) | ||
63 | { | ||
64 | return (clnt->proto_version == p9_proto_2000L); | ||
65 | } | ||
66 | EXPORT_SYMBOL(p9_is_proto_dotl); | ||
67 | |||
68 | inline int p9_is_proto_dotu(struct p9_client *clnt) | ||
69 | { | ||
70 | return (clnt->proto_version == p9_proto_2000u); | ||
71 | } | ||
72 | EXPORT_SYMBOL(p9_is_proto_dotu); | ||
73 | |||
74 | /* Interpret mount option for protocol version */ | ||
75 | static int get_protocol_version(const substring_t *name) | ||
76 | { | ||
77 | int version = -EINVAL; | ||
78 | |||
79 | if (!strncmp("9p2000", name->from, name->to-name->from)) { | ||
80 | version = p9_proto_legacy; | ||
81 | P9_DPRINTK(P9_DEBUG_9P, "Protocol version: Legacy\n"); | ||
82 | } else if (!strncmp("9p2000.u", name->from, name->to-name->from)) { | ||
83 | version = p9_proto_2000u; | ||
84 | P9_DPRINTK(P9_DEBUG_9P, "Protocol version: 9P2000.u\n"); | ||
85 | } else if (!strncmp("9p2000.L", name->from, name->to-name->from)) { | ||
86 | version = p9_proto_2000L; | ||
87 | P9_DPRINTK(P9_DEBUG_9P, "Protocol version: 9P2000.L\n"); | ||
88 | } else { | ||
89 | P9_DPRINTK(P9_DEBUG_ERROR, "Unknown protocol version %s. ", | ||
90 | name->from); | ||
91 | } | ||
92 | return version; | ||
93 | } | ||
94 | |||
59 | static struct p9_req_t * | 95 | static struct p9_req_t * |
60 | p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...); | 96 | p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...); |
61 | 97 | ||
@@ -75,7 +111,7 @@ static int parse_opts(char *opts, struct p9_client *clnt) | |||
75 | int option; | 111 | int option; |
76 | int ret = 0; | 112 | int ret = 0; |
77 | 113 | ||
78 | clnt->dotu = 1; | 114 | clnt->proto_version = p9_proto_2000u; |
79 | clnt->msize = 8192; | 115 | clnt->msize = 8192; |
80 | 116 | ||
81 | if (!opts) | 117 | if (!opts) |
@@ -118,7 +154,13 @@ static int parse_opts(char *opts, struct p9_client *clnt) | |||
118 | } | 154 | } |
119 | break; | 155 | break; |
120 | case Opt_legacy: | 156 | case Opt_legacy: |
121 | clnt->dotu = 0; | 157 | clnt->proto_version = p9_proto_legacy; |
158 | break; | ||
159 | case Opt_version: | ||
160 | ret = get_protocol_version(&args[0]); | ||
161 | if (ret == -EINVAL) | ||
162 | goto free_and_return; | ||
163 | clnt->proto_version = ret; | ||
122 | break; | 164 | break; |
123 | default: | 165 | default: |
124 | continue; | 166 | continue; |
@@ -410,14 +452,15 @@ static int p9_check_errors(struct p9_client *c, struct p9_req_t *req) | |||
410 | int ecode; | 452 | int ecode; |
411 | char *ename; | 453 | char *ename; |
412 | 454 | ||
413 | err = p9pdu_readf(req->rc, c->dotu, "s?d", &ename, &ecode); | 455 | err = p9pdu_readf(req->rc, c->proto_version, "s?d", |
456 | &ename, &ecode); | ||
414 | if (err) { | 457 | if (err) { |
415 | P9_DPRINTK(P9_DEBUG_ERROR, "couldn't parse error%d\n", | 458 | P9_DPRINTK(P9_DEBUG_ERROR, "couldn't parse error%d\n", |
416 | err); | 459 | err); |
417 | return err; | 460 | return err; |
418 | } | 461 | } |
419 | 462 | ||
420 | if (c->dotu) | 463 | if (p9_is_proto_dotu(c)) |
421 | err = -ecode; | 464 | err = -ecode; |
422 | 465 | ||
423 | if (!err || !IS_ERR_VALUE(err)) | 466 | if (!err || !IS_ERR_VALUE(err)) |
@@ -492,7 +535,12 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...) | |||
492 | 535 | ||
493 | P9_DPRINTK(P9_DEBUG_MUX, "client %p op %d\n", c, type); | 536 | P9_DPRINTK(P9_DEBUG_MUX, "client %p op %d\n", c, type); |
494 | 537 | ||
495 | if (c->status != Connected) | 538 | /* we allow for any status other than disconnected */ |
539 | if (c->status == Disconnected) | ||
540 | return ERR_PTR(-EIO); | ||
541 | |||
542 | /* if status is begin_disconnected we allow only clunk request */ | ||
543 | if ((c->status == BeginDisconnect) && (type != P9_TCLUNK)) | ||
496 | return ERR_PTR(-EIO); | 544 | return ERR_PTR(-EIO); |
497 | 545 | ||
498 | if (signal_pending(current)) { | 546 | if (signal_pending(current)) { |
@@ -515,7 +563,7 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...) | |||
515 | /* marshall the data */ | 563 | /* marshall the data */ |
516 | p9pdu_prepare(req->tc, tag, type); | 564 | p9pdu_prepare(req->tc, tag, type); |
517 | va_start(ap, fmt); | 565 | va_start(ap, fmt); |
518 | err = p9pdu_vwritef(req->tc, c->dotu, fmt, ap); | 566 | err = p9pdu_vwritef(req->tc, c->proto_version, fmt, ap); |
519 | va_end(ap); | 567 | va_end(ap); |
520 | p9pdu_finalize(req->tc); | 568 | p9pdu_finalize(req->tc); |
521 | 569 | ||
@@ -627,14 +675,31 @@ int p9_client_version(struct p9_client *c) | |||
627 | char *version; | 675 | char *version; |
628 | int msize; | 676 | int msize; |
629 | 677 | ||
630 | P9_DPRINTK(P9_DEBUG_9P, ">>> TVERSION msize %d extended %d\n", | 678 | P9_DPRINTK(P9_DEBUG_9P, ">>> TVERSION msize %d protocol %d\n", |
631 | c->msize, c->dotu); | 679 | c->msize, c->proto_version); |
632 | req = p9_client_rpc(c, P9_TVERSION, "ds", c->msize, | 680 | |
633 | c->dotu ? "9P2000.u" : "9P2000"); | 681 | switch (c->proto_version) { |
682 | case p9_proto_2000L: | ||
683 | req = p9_client_rpc(c, P9_TVERSION, "ds", | ||
684 | c->msize, "9P2000.L"); | ||
685 | break; | ||
686 | case p9_proto_2000u: | ||
687 | req = p9_client_rpc(c, P9_TVERSION, "ds", | ||
688 | c->msize, "9P2000.u"); | ||
689 | break; | ||
690 | case p9_proto_legacy: | ||
691 | req = p9_client_rpc(c, P9_TVERSION, "ds", | ||
692 | c->msize, "9P2000"); | ||
693 | break; | ||
694 | default: | ||
695 | return -EINVAL; | ||
696 | break; | ||
697 | } | ||
698 | |||
634 | if (IS_ERR(req)) | 699 | if (IS_ERR(req)) |
635 | return PTR_ERR(req); | 700 | return PTR_ERR(req); |
636 | 701 | ||
637 | err = p9pdu_readf(req->rc, c->dotu, "ds", &msize, &version); | 702 | err = p9pdu_readf(req->rc, c->proto_version, "ds", &msize, &version); |
638 | if (err) { | 703 | if (err) { |
639 | P9_DPRINTK(P9_DEBUG_9P, "version error %d\n", err); | 704 | P9_DPRINTK(P9_DEBUG_9P, "version error %d\n", err); |
640 | p9pdu_dump(1, req->rc); | 705 | p9pdu_dump(1, req->rc); |
@@ -642,10 +707,12 @@ int p9_client_version(struct p9_client *c) | |||
642 | } | 707 | } |
643 | 708 | ||
644 | P9_DPRINTK(P9_DEBUG_9P, "<<< RVERSION msize %d %s\n", msize, version); | 709 | P9_DPRINTK(P9_DEBUG_9P, "<<< RVERSION msize %d %s\n", msize, version); |
645 | if (!memcmp(version, "9P2000.u", 8)) | 710 | if (!strncmp(version, "9P2000.L", 8)) |
646 | c->dotu = 1; | 711 | c->proto_version = p9_proto_2000L; |
647 | else if (!memcmp(version, "9P2000", 6)) | 712 | else if (!strncmp(version, "9P2000.u", 8)) |
648 | c->dotu = 0; | 713 | c->proto_version = p9_proto_2000u; |
714 | else if (!strncmp(version, "9P2000", 6)) | ||
715 | c->proto_version = p9_proto_legacy; | ||
649 | else { | 716 | else { |
650 | err = -EREMOTEIO; | 717 | err = -EREMOTEIO; |
651 | goto error; | 718 | goto error; |
@@ -700,8 +767,8 @@ struct p9_client *p9_client_create(const char *dev_name, char *options) | |||
700 | goto put_trans; | 767 | goto put_trans; |
701 | } | 768 | } |
702 | 769 | ||
703 | P9_DPRINTK(P9_DEBUG_MUX, "clnt %p trans %p msize %d dotu %d\n", | 770 | P9_DPRINTK(P9_DEBUG_MUX, "clnt %p trans %p msize %d protocol %d\n", |
704 | clnt, clnt->trans_mod, clnt->msize, clnt->dotu); | 771 | clnt, clnt->trans_mod, clnt->msize, clnt->proto_version); |
705 | 772 | ||
706 | err = clnt->trans_mod->create(clnt, dev_name, options); | 773 | err = clnt->trans_mod->create(clnt, dev_name, options); |
707 | if (err) | 774 | if (err) |
@@ -739,8 +806,10 @@ void p9_client_destroy(struct p9_client *clnt) | |||
739 | 806 | ||
740 | v9fs_put_trans(clnt->trans_mod); | 807 | v9fs_put_trans(clnt->trans_mod); |
741 | 808 | ||
742 | list_for_each_entry_safe(fid, fidptr, &clnt->fidlist, flist) | 809 | list_for_each_entry_safe(fid, fidptr, &clnt->fidlist, flist) { |
810 | printk(KERN_INFO "Found fid %d not clunked\n", fid->fid); | ||
743 | p9_fid_destroy(fid); | 811 | p9_fid_destroy(fid); |
812 | } | ||
744 | 813 | ||
745 | if (clnt->fidpool) | 814 | if (clnt->fidpool) |
746 | p9_idpool_destroy(clnt->fidpool); | 815 | p9_idpool_destroy(clnt->fidpool); |
@@ -758,6 +827,13 @@ void p9_client_disconnect(struct p9_client *clnt) | |||
758 | } | 827 | } |
759 | EXPORT_SYMBOL(p9_client_disconnect); | 828 | EXPORT_SYMBOL(p9_client_disconnect); |
760 | 829 | ||
830 | void p9_client_begin_disconnect(struct p9_client *clnt) | ||
831 | { | ||
832 | P9_DPRINTK(P9_DEBUG_9P, "clnt %p\n", clnt); | ||
833 | clnt->status = BeginDisconnect; | ||
834 | } | ||
835 | EXPORT_SYMBOL(p9_client_begin_disconnect); | ||
836 | |||
761 | struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid, | 837 | struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid, |
762 | char *uname, u32 n_uname, char *aname) | 838 | char *uname, u32 n_uname, char *aname) |
763 | { | 839 | { |
@@ -784,7 +860,7 @@ struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid, | |||
784 | goto error; | 860 | goto error; |
785 | } | 861 | } |
786 | 862 | ||
787 | err = p9pdu_readf(req->rc, clnt->dotu, "Q", &qid); | 863 | err = p9pdu_readf(req->rc, clnt->proto_version, "Q", &qid); |
788 | if (err) { | 864 | if (err) { |
789 | p9pdu_dump(1, req->rc); | 865 | p9pdu_dump(1, req->rc); |
790 | p9_free_req(clnt, req); | 866 | p9_free_req(clnt, req); |
@@ -833,7 +909,7 @@ p9_client_auth(struct p9_client *clnt, char *uname, u32 n_uname, char *aname) | |||
833 | goto error; | 909 | goto error; |
834 | } | 910 | } |
835 | 911 | ||
836 | err = p9pdu_readf(req->rc, clnt->dotu, "Q", &qid); | 912 | err = p9pdu_readf(req->rc, clnt->proto_version, "Q", &qid); |
837 | if (err) { | 913 | if (err) { |
838 | p9pdu_dump(1, req->rc); | 914 | p9pdu_dump(1, req->rc); |
839 | p9_free_req(clnt, req); | 915 | p9_free_req(clnt, req); |
@@ -891,7 +967,7 @@ struct p9_fid *p9_client_walk(struct p9_fid *oldfid, int nwname, char **wnames, | |||
891 | goto error; | 967 | goto error; |
892 | } | 968 | } |
893 | 969 | ||
894 | err = p9pdu_readf(req->rc, clnt->dotu, "R", &nwqids, &wqids); | 970 | err = p9pdu_readf(req->rc, clnt->proto_version, "R", &nwqids, &wqids); |
895 | if (err) { | 971 | if (err) { |
896 | p9pdu_dump(1, req->rc); | 972 | p9pdu_dump(1, req->rc); |
897 | p9_free_req(clnt, req); | 973 | p9_free_req(clnt, req); |
@@ -952,7 +1028,7 @@ int p9_client_open(struct p9_fid *fid, int mode) | |||
952 | goto error; | 1028 | goto error; |
953 | } | 1029 | } |
954 | 1030 | ||
955 | err = p9pdu_readf(req->rc, clnt->dotu, "Qd", &qid, &iounit); | 1031 | err = p9pdu_readf(req->rc, clnt->proto_version, "Qd", &qid, &iounit); |
956 | if (err) { | 1032 | if (err) { |
957 | p9pdu_dump(1, req->rc); | 1033 | p9pdu_dump(1, req->rc); |
958 | goto free_and_error; | 1034 | goto free_and_error; |
@@ -997,7 +1073,7 @@ int p9_client_fcreate(struct p9_fid *fid, char *name, u32 perm, int mode, | |||
997 | goto error; | 1073 | goto error; |
998 | } | 1074 | } |
999 | 1075 | ||
1000 | err = p9pdu_readf(req->rc, clnt->dotu, "Qd", &qid, &iounit); | 1076 | err = p9pdu_readf(req->rc, clnt->proto_version, "Qd", &qid, &iounit); |
1001 | if (err) { | 1077 | if (err) { |
1002 | p9pdu_dump(1, req->rc); | 1078 | p9pdu_dump(1, req->rc); |
1003 | goto free_and_error; | 1079 | goto free_and_error; |
@@ -1098,7 +1174,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset, | |||
1098 | goto error; | 1174 | goto error; |
1099 | } | 1175 | } |
1100 | 1176 | ||
1101 | err = p9pdu_readf(req->rc, clnt->dotu, "D", &count, &dataptr); | 1177 | err = p9pdu_readf(req->rc, clnt->proto_version, "D", &count, &dataptr); |
1102 | if (err) { | 1178 | if (err) { |
1103 | p9pdu_dump(1, req->rc); | 1179 | p9pdu_dump(1, req->rc); |
1104 | goto free_and_error; | 1180 | goto free_and_error; |
@@ -1159,7 +1235,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata, | |||
1159 | goto error; | 1235 | goto error; |
1160 | } | 1236 | } |
1161 | 1237 | ||
1162 | err = p9pdu_readf(req->rc, clnt->dotu, "d", &count); | 1238 | err = p9pdu_readf(req->rc, clnt->proto_version, "d", &count); |
1163 | if (err) { | 1239 | if (err) { |
1164 | p9pdu_dump(1, req->rc); | 1240 | p9pdu_dump(1, req->rc); |
1165 | goto free_and_error; | 1241 | goto free_and_error; |
@@ -1199,7 +1275,7 @@ struct p9_wstat *p9_client_stat(struct p9_fid *fid) | |||
1199 | goto error; | 1275 | goto error; |
1200 | } | 1276 | } |
1201 | 1277 | ||
1202 | err = p9pdu_readf(req->rc, clnt->dotu, "wS", &ignored, ret); | 1278 | err = p9pdu_readf(req->rc, clnt->proto_version, "wS", &ignored, ret); |
1203 | if (err) { | 1279 | if (err) { |
1204 | p9pdu_dump(1, req->rc); | 1280 | p9pdu_dump(1, req->rc); |
1205 | p9_free_req(clnt, req); | 1281 | p9_free_req(clnt, req); |
@@ -1226,7 +1302,7 @@ error: | |||
1226 | } | 1302 | } |
1227 | EXPORT_SYMBOL(p9_client_stat); | 1303 | EXPORT_SYMBOL(p9_client_stat); |
1228 | 1304 | ||
1229 | static int p9_client_statsize(struct p9_wstat *wst, int optional) | 1305 | static int p9_client_statsize(struct p9_wstat *wst, int proto_version) |
1230 | { | 1306 | { |
1231 | int ret; | 1307 | int ret; |
1232 | 1308 | ||
@@ -1245,7 +1321,7 @@ static int p9_client_statsize(struct p9_wstat *wst, int optional) | |||
1245 | if (wst->muid) | 1321 | if (wst->muid) |
1246 | ret += strlen(wst->muid); | 1322 | ret += strlen(wst->muid); |
1247 | 1323 | ||
1248 | if (optional) { | 1324 | if (proto_version == p9_proto_2000u) { |
1249 | ret += 2+4+4+4; /* extension[s] n_uid[4] n_gid[4] n_muid[4] */ | 1325 | ret += 2+4+4+4; /* extension[s] n_uid[4] n_gid[4] n_muid[4] */ |
1250 | if (wst->extension) | 1326 | if (wst->extension) |
1251 | ret += strlen(wst->extension); | 1327 | ret += strlen(wst->extension); |
@@ -1262,7 +1338,7 @@ int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst) | |||
1262 | 1338 | ||
1263 | err = 0; | 1339 | err = 0; |
1264 | clnt = fid->clnt; | 1340 | clnt = fid->clnt; |
1265 | wst->size = p9_client_statsize(wst, clnt->dotu); | 1341 | wst->size = p9_client_statsize(wst, clnt->proto_version); |
1266 | P9_DPRINTK(P9_DEBUG_9P, ">>> TWSTAT fid %d\n", fid->fid); | 1342 | P9_DPRINTK(P9_DEBUG_9P, ">>> TWSTAT fid %d\n", fid->fid); |
1267 | P9_DPRINTK(P9_DEBUG_9P, | 1343 | P9_DPRINTK(P9_DEBUG_9P, |
1268 | " sz=%x type=%x dev=%x qid=%x.%llx.%x\n" | 1344 | " sz=%x type=%x dev=%x qid=%x.%llx.%x\n" |
diff --git a/net/9p/protocol.c b/net/9p/protocol.c index fc70147c771e..e7541d5b0118 100644 --- a/net/9p/protocol.c +++ b/net/9p/protocol.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/module.h> | 28 | #include <linux/module.h> |
29 | #include <linux/errno.h> | 29 | #include <linux/errno.h> |
30 | #include <linux/uaccess.h> | 30 | #include <linux/uaccess.h> |
31 | #include <linux/slab.h> | ||
31 | #include <linux/sched.h> | 32 | #include <linux/sched.h> |
32 | #include <linux/types.h> | 33 | #include <linux/types.h> |
33 | #include <net/9p/9p.h> | 34 | #include <net/9p/9p.h> |
@@ -52,7 +53,7 @@ | |||
52 | #endif | 53 | #endif |
53 | 54 | ||
54 | static int | 55 | static int |
55 | p9pdu_writef(struct p9_fcall *pdu, int optional, const char *fmt, ...); | 56 | p9pdu_writef(struct p9_fcall *pdu, int proto_version, const char *fmt, ...); |
56 | 57 | ||
57 | #ifdef CONFIG_NET_9P_DEBUG | 58 | #ifdef CONFIG_NET_9P_DEBUG |
58 | void | 59 | void |
@@ -144,7 +145,8 @@ pdu_write_u(struct p9_fcall *pdu, const char __user *udata, size_t size) | |||
144 | */ | 145 | */ |
145 | 146 | ||
146 | static int | 147 | static int |
147 | p9pdu_vreadf(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap) | 148 | p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt, |
149 | va_list ap) | ||
148 | { | 150 | { |
149 | const char *ptr; | 151 | const char *ptr; |
150 | int errcode = 0; | 152 | int errcode = 0; |
@@ -194,7 +196,8 @@ p9pdu_vreadf(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap) | |||
194 | int16_t len; | 196 | int16_t len; |
195 | int size; | 197 | int size; |
196 | 198 | ||
197 | errcode = p9pdu_readf(pdu, optional, "w", &len); | 199 | errcode = p9pdu_readf(pdu, proto_version, |
200 | "w", &len); | ||
198 | if (errcode) | 201 | if (errcode) |
199 | break; | 202 | break; |
200 | 203 | ||
@@ -217,7 +220,7 @@ p9pdu_vreadf(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap) | |||
217 | struct p9_qid *qid = | 220 | struct p9_qid *qid = |
218 | va_arg(ap, struct p9_qid *); | 221 | va_arg(ap, struct p9_qid *); |
219 | 222 | ||
220 | errcode = p9pdu_readf(pdu, optional, "bdq", | 223 | errcode = p9pdu_readf(pdu, proto_version, "bdq", |
221 | &qid->type, &qid->version, | 224 | &qid->type, &qid->version, |
222 | &qid->path); | 225 | &qid->path); |
223 | } | 226 | } |
@@ -230,7 +233,7 @@ p9pdu_vreadf(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap) | |||
230 | stbuf->n_uid = stbuf->n_gid = stbuf->n_muid = | 233 | stbuf->n_uid = stbuf->n_gid = stbuf->n_muid = |
231 | -1; | 234 | -1; |
232 | errcode = | 235 | errcode = |
233 | p9pdu_readf(pdu, optional, | 236 | p9pdu_readf(pdu, proto_version, |
234 | "wwdQdddqssss?sddd", | 237 | "wwdQdddqssss?sddd", |
235 | &stbuf->size, &stbuf->type, | 238 | &stbuf->size, &stbuf->type, |
236 | &stbuf->dev, &stbuf->qid, | 239 | &stbuf->dev, &stbuf->qid, |
@@ -250,7 +253,7 @@ p9pdu_vreadf(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap) | |||
250 | void **data = va_arg(ap, void **); | 253 | void **data = va_arg(ap, void **); |
251 | 254 | ||
252 | errcode = | 255 | errcode = |
253 | p9pdu_readf(pdu, optional, "d", count); | 256 | p9pdu_readf(pdu, proto_version, "d", count); |
254 | if (!errcode) { | 257 | if (!errcode) { |
255 | *count = | 258 | *count = |
256 | MIN(*count, | 259 | MIN(*count, |
@@ -263,8 +266,8 @@ p9pdu_vreadf(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap) | |||
263 | int16_t *nwname = va_arg(ap, int16_t *); | 266 | int16_t *nwname = va_arg(ap, int16_t *); |
264 | char ***wnames = va_arg(ap, char ***); | 267 | char ***wnames = va_arg(ap, char ***); |
265 | 268 | ||
266 | errcode = | 269 | errcode = p9pdu_readf(pdu, proto_version, |
267 | p9pdu_readf(pdu, optional, "w", nwname); | 270 | "w", nwname); |
268 | if (!errcode) { | 271 | if (!errcode) { |
269 | *wnames = | 272 | *wnames = |
270 | kmalloc(sizeof(char *) * *nwname, | 273 | kmalloc(sizeof(char *) * *nwname, |
@@ -278,7 +281,8 @@ p9pdu_vreadf(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap) | |||
278 | 281 | ||
279 | for (i = 0; i < *nwname; i++) { | 282 | for (i = 0; i < *nwname; i++) { |
280 | errcode = | 283 | errcode = |
281 | p9pdu_readf(pdu, optional, | 284 | p9pdu_readf(pdu, |
285 | proto_version, | ||
282 | "s", | 286 | "s", |
283 | &(*wnames)[i]); | 287 | &(*wnames)[i]); |
284 | if (errcode) | 288 | if (errcode) |
@@ -306,7 +310,7 @@ p9pdu_vreadf(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap) | |||
306 | *wqids = NULL; | 310 | *wqids = NULL; |
307 | 311 | ||
308 | errcode = | 312 | errcode = |
309 | p9pdu_readf(pdu, optional, "w", nwqid); | 313 | p9pdu_readf(pdu, proto_version, "w", nwqid); |
310 | if (!errcode) { | 314 | if (!errcode) { |
311 | *wqids = | 315 | *wqids = |
312 | kmalloc(*nwqid * | 316 | kmalloc(*nwqid * |
@@ -321,7 +325,8 @@ p9pdu_vreadf(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap) | |||
321 | 325 | ||
322 | for (i = 0; i < *nwqid; i++) { | 326 | for (i = 0; i < *nwqid; i++) { |
323 | errcode = | 327 | errcode = |
324 | p9pdu_readf(pdu, optional, | 328 | p9pdu_readf(pdu, |
329 | proto_version, | ||
325 | "Q", | 330 | "Q", |
326 | &(*wqids)[i]); | 331 | &(*wqids)[i]); |
327 | if (errcode) | 332 | if (errcode) |
@@ -336,7 +341,7 @@ p9pdu_vreadf(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap) | |||
336 | } | 341 | } |
337 | break; | 342 | break; |
338 | case '?': | 343 | case '?': |
339 | if (!optional) | 344 | if (proto_version != p9_proto_2000u) |
340 | return 0; | 345 | return 0; |
341 | break; | 346 | break; |
342 | default: | 347 | default: |
@@ -352,7 +357,8 @@ p9pdu_vreadf(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap) | |||
352 | } | 357 | } |
353 | 358 | ||
354 | int | 359 | int |
355 | p9pdu_vwritef(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap) | 360 | p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt, |
361 | va_list ap) | ||
356 | { | 362 | { |
357 | const char *ptr; | 363 | const char *ptr; |
358 | int errcode = 0; | 364 | int errcode = 0; |
@@ -389,7 +395,8 @@ p9pdu_vwritef(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap) | |||
389 | if (sptr) | 395 | if (sptr) |
390 | len = MIN(strlen(sptr), USHORT_MAX); | 396 | len = MIN(strlen(sptr), USHORT_MAX); |
391 | 397 | ||
392 | errcode = p9pdu_writef(pdu, optional, "w", len); | 398 | errcode = p9pdu_writef(pdu, proto_version, |
399 | "w", len); | ||
393 | if (!errcode && pdu_write(pdu, sptr, len)) | 400 | if (!errcode && pdu_write(pdu, sptr, len)) |
394 | errcode = -EFAULT; | 401 | errcode = -EFAULT; |
395 | } | 402 | } |
@@ -398,7 +405,7 @@ p9pdu_vwritef(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap) | |||
398 | const struct p9_qid *qid = | 405 | const struct p9_qid *qid = |
399 | va_arg(ap, const struct p9_qid *); | 406 | va_arg(ap, const struct p9_qid *); |
400 | errcode = | 407 | errcode = |
401 | p9pdu_writef(pdu, optional, "bdq", | 408 | p9pdu_writef(pdu, proto_version, "bdq", |
402 | qid->type, qid->version, | 409 | qid->type, qid->version, |
403 | qid->path); | 410 | qid->path); |
404 | } break; | 411 | } break; |
@@ -406,7 +413,7 @@ p9pdu_vwritef(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap) | |||
406 | const struct p9_wstat *stbuf = | 413 | const struct p9_wstat *stbuf = |
407 | va_arg(ap, const struct p9_wstat *); | 414 | va_arg(ap, const struct p9_wstat *); |
408 | errcode = | 415 | errcode = |
409 | p9pdu_writef(pdu, optional, | 416 | p9pdu_writef(pdu, proto_version, |
410 | "wwdQdddqssss?sddd", | 417 | "wwdQdddqssss?sddd", |
411 | stbuf->size, stbuf->type, | 418 | stbuf->size, stbuf->type, |
412 | stbuf->dev, &stbuf->qid, | 419 | stbuf->dev, &stbuf->qid, |
@@ -421,8 +428,8 @@ p9pdu_vwritef(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap) | |||
421 | int32_t count = va_arg(ap, int32_t); | 428 | int32_t count = va_arg(ap, int32_t); |
422 | const void *data = va_arg(ap, const void *); | 429 | const void *data = va_arg(ap, const void *); |
423 | 430 | ||
424 | errcode = | 431 | errcode = p9pdu_writef(pdu, proto_version, "d", |
425 | p9pdu_writef(pdu, optional, "d", count); | 432 | count); |
426 | if (!errcode && pdu_write(pdu, data, count)) | 433 | if (!errcode && pdu_write(pdu, data, count)) |
427 | errcode = -EFAULT; | 434 | errcode = -EFAULT; |
428 | } | 435 | } |
@@ -431,8 +438,8 @@ p9pdu_vwritef(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap) | |||
431 | int32_t count = va_arg(ap, int32_t); | 438 | int32_t count = va_arg(ap, int32_t); |
432 | const char __user *udata = | 439 | const char __user *udata = |
433 | va_arg(ap, const void __user *); | 440 | va_arg(ap, const void __user *); |
434 | errcode = | 441 | errcode = p9pdu_writef(pdu, proto_version, "d", |
435 | p9pdu_writef(pdu, optional, "d", count); | 442 | count); |
436 | if (!errcode && pdu_write_u(pdu, udata, count)) | 443 | if (!errcode && pdu_write_u(pdu, udata, count)) |
437 | errcode = -EFAULT; | 444 | errcode = -EFAULT; |
438 | } | 445 | } |
@@ -441,14 +448,15 @@ p9pdu_vwritef(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap) | |||
441 | int16_t nwname = va_arg(ap, int); | 448 | int16_t nwname = va_arg(ap, int); |
442 | const char **wnames = va_arg(ap, const char **); | 449 | const char **wnames = va_arg(ap, const char **); |
443 | 450 | ||
444 | errcode = | 451 | errcode = p9pdu_writef(pdu, proto_version, "w", |
445 | p9pdu_writef(pdu, optional, "w", nwname); | 452 | nwname); |
446 | if (!errcode) { | 453 | if (!errcode) { |
447 | int i; | 454 | int i; |
448 | 455 | ||
449 | for (i = 0; i < nwname; i++) { | 456 | for (i = 0; i < nwname; i++) { |
450 | errcode = | 457 | errcode = |
451 | p9pdu_writef(pdu, optional, | 458 | p9pdu_writef(pdu, |
459 | proto_version, | ||
452 | "s", | 460 | "s", |
453 | wnames[i]); | 461 | wnames[i]); |
454 | if (errcode) | 462 | if (errcode) |
@@ -462,14 +470,15 @@ p9pdu_vwritef(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap) | |||
462 | struct p9_qid *wqids = | 470 | struct p9_qid *wqids = |
463 | va_arg(ap, struct p9_qid *); | 471 | va_arg(ap, struct p9_qid *); |
464 | 472 | ||
465 | errcode = | 473 | errcode = p9pdu_writef(pdu, proto_version, "w", |
466 | p9pdu_writef(pdu, optional, "w", nwqid); | 474 | nwqid); |
467 | if (!errcode) { | 475 | if (!errcode) { |
468 | int i; | 476 | int i; |
469 | 477 | ||
470 | for (i = 0; i < nwqid; i++) { | 478 | for (i = 0; i < nwqid; i++) { |
471 | errcode = | 479 | errcode = |
472 | p9pdu_writef(pdu, optional, | 480 | p9pdu_writef(pdu, |
481 | proto_version, | ||
473 | "Q", | 482 | "Q", |
474 | &wqids[i]); | 483 | &wqids[i]); |
475 | if (errcode) | 484 | if (errcode) |
@@ -479,7 +488,7 @@ p9pdu_vwritef(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap) | |||
479 | } | 488 | } |
480 | break; | 489 | break; |
481 | case '?': | 490 | case '?': |
482 | if (!optional) | 491 | if (proto_version != p9_proto_2000u) |
483 | return 0; | 492 | return 0; |
484 | break; | 493 | break; |
485 | default: | 494 | default: |
@@ -494,32 +503,32 @@ p9pdu_vwritef(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap) | |||
494 | return errcode; | 503 | return errcode; |
495 | } | 504 | } |
496 | 505 | ||
497 | int p9pdu_readf(struct p9_fcall *pdu, int optional, const char *fmt, ...) | 506 | int p9pdu_readf(struct p9_fcall *pdu, int proto_version, const char *fmt, ...) |
498 | { | 507 | { |
499 | va_list ap; | 508 | va_list ap; |
500 | int ret; | 509 | int ret; |
501 | 510 | ||
502 | va_start(ap, fmt); | 511 | va_start(ap, fmt); |
503 | ret = p9pdu_vreadf(pdu, optional, fmt, ap); | 512 | ret = p9pdu_vreadf(pdu, proto_version, fmt, ap); |
504 | va_end(ap); | 513 | va_end(ap); |
505 | 514 | ||
506 | return ret; | 515 | return ret; |
507 | } | 516 | } |
508 | 517 | ||
509 | static int | 518 | static int |
510 | p9pdu_writef(struct p9_fcall *pdu, int optional, const char *fmt, ...) | 519 | p9pdu_writef(struct p9_fcall *pdu, int proto_version, const char *fmt, ...) |
511 | { | 520 | { |
512 | va_list ap; | 521 | va_list ap; |
513 | int ret; | 522 | int ret; |
514 | 523 | ||
515 | va_start(ap, fmt); | 524 | va_start(ap, fmt); |
516 | ret = p9pdu_vwritef(pdu, optional, fmt, ap); | 525 | ret = p9pdu_vwritef(pdu, proto_version, fmt, ap); |
517 | va_end(ap); | 526 | va_end(ap); |
518 | 527 | ||
519 | return ret; | 528 | return ret; |
520 | } | 529 | } |
521 | 530 | ||
522 | int p9stat_read(char *buf, int len, struct p9_wstat *st, int dotu) | 531 | int p9stat_read(char *buf, int len, struct p9_wstat *st, int proto_version) |
523 | { | 532 | { |
524 | struct p9_fcall fake_pdu; | 533 | struct p9_fcall fake_pdu; |
525 | int ret; | 534 | int ret; |
@@ -529,7 +538,7 @@ int p9stat_read(char *buf, int len, struct p9_wstat *st, int dotu) | |||
529 | fake_pdu.sdata = buf; | 538 | fake_pdu.sdata = buf; |
530 | fake_pdu.offset = 0; | 539 | fake_pdu.offset = 0; |
531 | 540 | ||
532 | ret = p9pdu_readf(&fake_pdu, dotu, "S", st); | 541 | ret = p9pdu_readf(&fake_pdu, proto_version, "S", st); |
533 | if (ret) { | 542 | if (ret) { |
534 | P9_DPRINTK(P9_DEBUG_9P, "<<< p9stat_read failed: %d\n", ret); | 543 | P9_DPRINTK(P9_DEBUG_9P, "<<< p9stat_read failed: %d\n", ret); |
535 | p9pdu_dump(1, &fake_pdu); | 544 | p9pdu_dump(1, &fake_pdu); |
diff --git a/net/9p/protocol.h b/net/9p/protocol.h index ccde462e7ac5..2431c0f38d56 100644 --- a/net/9p/protocol.h +++ b/net/9p/protocol.h | |||
@@ -25,9 +25,9 @@ | |||
25 | * | 25 | * |
26 | */ | 26 | */ |
27 | 27 | ||
28 | int | 28 | int p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt, |
29 | p9pdu_vwritef(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap); | 29 | va_list ap); |
30 | int p9pdu_readf(struct p9_fcall *pdu, int optional, const char *fmt, ...); | 30 | int p9pdu_readf(struct p9_fcall *pdu, int proto_version, const char *fmt, ...); |
31 | int p9pdu_prepare(struct p9_fcall *pdu, int16_t tag, int8_t type); | 31 | int p9pdu_prepare(struct p9_fcall *pdu, int16_t tag, int8_t type); |
32 | int p9pdu_finalize(struct p9_fcall *pdu); | 32 | int p9pdu_finalize(struct p9_fcall *pdu); |
33 | void p9pdu_dump(int, struct p9_fcall *); | 33 | void p9pdu_dump(int, struct p9_fcall *); |
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index 31d0b05582a9..98ce9bcb0e15 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/idr.h> | 38 | #include <linux/idr.h> |
39 | #include <linux/file.h> | 39 | #include <linux/file.h> |
40 | #include <linux/parser.h> | 40 | #include <linux/parser.h> |
41 | #include <linux/slab.h> | ||
41 | #include <net/9p/9p.h> | 42 | #include <net/9p/9p.h> |
42 | #include <net/9p/client.h> | 43 | #include <net/9p/client.h> |
43 | #include <net/9p/transport.h> | 44 | #include <net/9p/transport.h> |
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c index 2c95a89c0f46..041101ab4aa5 100644 --- a/net/9p/trans_rdma.c +++ b/net/9p/trans_rdma.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <linux/file.h> | 40 | #include <linux/file.h> |
41 | #include <linux/parser.h> | 41 | #include <linux/parser.h> |
42 | #include <linux/semaphore.h> | 42 | #include <linux/semaphore.h> |
43 | #include <linux/slab.h> | ||
43 | #include <net/9p/9p.h> | 44 | #include <net/9p/9p.h> |
44 | #include <net/9p/client.h> | 45 | #include <net/9p/client.h> |
45 | #include <net/9p/transport.h> | 46 | #include <net/9p/transport.h> |
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index cb50f4ae5eef..7eb78ecc1618 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <linux/inet.h> | 37 | #include <linux/inet.h> |
38 | #include <linux/idr.h> | 38 | #include <linux/idr.h> |
39 | #include <linux/file.h> | 39 | #include <linux/file.h> |
40 | #include <linux/slab.h> | ||
40 | #include <net/9p/9p.h> | 41 | #include <net/9p/9p.h> |
41 | #include <linux/parser.h> | 42 | #include <linux/parser.h> |
42 | #include <net/9p/client.h> | 43 | #include <net/9p/client.h> |
@@ -49,8 +50,6 @@ | |||
49 | 50 | ||
50 | /* a single mutex to manage channel initialization and attachment */ | 51 | /* a single mutex to manage channel initialization and attachment */ |
51 | static DEFINE_MUTEX(virtio_9p_lock); | 52 | static DEFINE_MUTEX(virtio_9p_lock); |
52 | /* global which tracks highest initialized channel */ | ||
53 | static int chan_index; | ||
54 | 53 | ||
55 | /** | 54 | /** |
56 | * struct virtio_chan - per-instance transport information | 55 | * struct virtio_chan - per-instance transport information |
@@ -68,8 +67,7 @@ static int chan_index; | |||
68 | * | 67 | * |
69 | */ | 68 | */ |
70 | 69 | ||
71 | static struct virtio_chan { | 70 | struct virtio_chan { |
72 | bool initialized; | ||
73 | bool inuse; | 71 | bool inuse; |
74 | 72 | ||
75 | spinlock_t lock; | 73 | spinlock_t lock; |
@@ -80,7 +78,17 @@ static struct virtio_chan { | |||
80 | 78 | ||
81 | /* Scatterlist: can be too big for stack. */ | 79 | /* Scatterlist: can be too big for stack. */ |
82 | struct scatterlist sg[VIRTQUEUE_NUM]; | 80 | struct scatterlist sg[VIRTQUEUE_NUM]; |
83 | } channels[MAX_9P_CHAN]; | 81 | |
82 | int tag_len; | ||
83 | /* | ||
84 | * tag name to identify a mount Non-null terminated | ||
85 | */ | ||
86 | char *tag; | ||
87 | |||
88 | struct list_head chan_list; | ||
89 | }; | ||
90 | |||
91 | static struct list_head virtio_chan_list; | ||
84 | 92 | ||
85 | /* How many bytes left in this page. */ | 93 | /* How many bytes left in this page. */ |
86 | static unsigned int rest_of_page(void *data) | 94 | static unsigned int rest_of_page(void *data) |
@@ -213,30 +221,38 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req) | |||
213 | return 0; | 221 | return 0; |
214 | } | 222 | } |
215 | 223 | ||
224 | static ssize_t p9_mount_tag_show(struct device *dev, | ||
225 | struct device_attribute *attr, char *buf) | ||
226 | { | ||
227 | struct virtio_chan *chan; | ||
228 | struct virtio_device *vdev; | ||
229 | |||
230 | vdev = dev_to_virtio(dev); | ||
231 | chan = vdev->priv; | ||
232 | |||
233 | return snprintf(buf, chan->tag_len + 1, "%s", chan->tag); | ||
234 | } | ||
235 | |||
236 | static DEVICE_ATTR(mount_tag, 0444, p9_mount_tag_show, NULL); | ||
237 | |||
216 | /** | 238 | /** |
217 | * p9_virtio_probe - probe for existence of 9P virtio channels | 239 | * p9_virtio_probe - probe for existence of 9P virtio channels |
218 | * @vdev: virtio device to probe | 240 | * @vdev: virtio device to probe |
219 | * | 241 | * |
220 | * This probes for existing virtio channels. At present only | 242 | * This probes for existing virtio channels. |
221 | * a single channel is in use, so in the future more work may need | ||
222 | * to be done here. | ||
223 | * | 243 | * |
224 | */ | 244 | */ |
225 | 245 | ||
226 | static int p9_virtio_probe(struct virtio_device *vdev) | 246 | static int p9_virtio_probe(struct virtio_device *vdev) |
227 | { | 247 | { |
248 | __u16 tag_len; | ||
249 | char *tag; | ||
228 | int err; | 250 | int err; |
229 | struct virtio_chan *chan; | 251 | struct virtio_chan *chan; |
230 | int index; | ||
231 | 252 | ||
232 | mutex_lock(&virtio_9p_lock); | 253 | chan = kmalloc(sizeof(struct virtio_chan), GFP_KERNEL); |
233 | index = chan_index++; | 254 | if (!chan) { |
234 | chan = &channels[index]; | 255 | printk(KERN_ERR "9p: Failed to allocate virtio 9P channel\n"); |
235 | mutex_unlock(&virtio_9p_lock); | ||
236 | |||
237 | if (chan_index > MAX_9P_CHAN) { | ||
238 | printk(KERN_ERR "9p: virtio: Maximum channels exceeded\n"); | ||
239 | BUG(); | ||
240 | err = -ENOMEM; | 256 | err = -ENOMEM; |
241 | goto fail; | 257 | goto fail; |
242 | } | 258 | } |
@@ -255,15 +271,37 @@ static int p9_virtio_probe(struct virtio_device *vdev) | |||
255 | sg_init_table(chan->sg, VIRTQUEUE_NUM); | 271 | sg_init_table(chan->sg, VIRTQUEUE_NUM); |
256 | 272 | ||
257 | chan->inuse = false; | 273 | chan->inuse = false; |
258 | chan->initialized = true; | 274 | if (virtio_has_feature(vdev, VIRTIO_9P_MOUNT_TAG)) { |
275 | vdev->config->get(vdev, | ||
276 | offsetof(struct virtio_9p_config, tag_len), | ||
277 | &tag_len, sizeof(tag_len)); | ||
278 | } else { | ||
279 | err = -EINVAL; | ||
280 | goto out_free_vq; | ||
281 | } | ||
282 | tag = kmalloc(tag_len, GFP_KERNEL); | ||
283 | if (!tag) { | ||
284 | err = -ENOMEM; | ||
285 | goto out_free_vq; | ||
286 | } | ||
287 | vdev->config->get(vdev, offsetof(struct virtio_9p_config, tag), | ||
288 | tag, tag_len); | ||
289 | chan->tag = tag; | ||
290 | chan->tag_len = tag_len; | ||
291 | err = sysfs_create_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr); | ||
292 | if (err) { | ||
293 | kfree(tag); | ||
294 | goto out_free_vq; | ||
295 | } | ||
296 | mutex_lock(&virtio_9p_lock); | ||
297 | list_add_tail(&chan->chan_list, &virtio_chan_list); | ||
298 | mutex_unlock(&virtio_9p_lock); | ||
259 | return 0; | 299 | return 0; |
260 | 300 | ||
261 | out_free_vq: | 301 | out_free_vq: |
262 | vdev->config->del_vqs(vdev); | 302 | vdev->config->del_vqs(vdev); |
303 | kfree(chan); | ||
263 | fail: | 304 | fail: |
264 | mutex_lock(&virtio_9p_lock); | ||
265 | chan_index--; | ||
266 | mutex_unlock(&virtio_9p_lock); | ||
267 | return err; | 305 | return err; |
268 | } | 306 | } |
269 | 307 | ||
@@ -280,35 +318,31 @@ fail: | |||
280 | * We use a simple reference count mechanism to ensure that only a single | 318 | * We use a simple reference count mechanism to ensure that only a single |
281 | * mount has a channel open at a time. | 319 | * mount has a channel open at a time. |
282 | * | 320 | * |
283 | * Bugs: doesn't allow identification of a specific channel | ||
284 | * to allocate, channels are allocated sequentially. This was | ||
285 | * a pragmatic decision to get things rolling, but ideally some | ||
286 | * way of identifying the channel to attach to would be nice | ||
287 | * if we are going to support multiple channels. | ||
288 | * | ||
289 | */ | 321 | */ |
290 | 322 | ||
291 | static int | 323 | static int |
292 | p9_virtio_create(struct p9_client *client, const char *devname, char *args) | 324 | p9_virtio_create(struct p9_client *client, const char *devname, char *args) |
293 | { | 325 | { |
294 | struct virtio_chan *chan = channels; | 326 | struct virtio_chan *chan; |
295 | int index = 0; | 327 | int ret = -ENOENT; |
328 | int found = 0; | ||
296 | 329 | ||
297 | mutex_lock(&virtio_9p_lock); | 330 | mutex_lock(&virtio_9p_lock); |
298 | while (index < MAX_9P_CHAN) { | 331 | list_for_each_entry(chan, &virtio_chan_list, chan_list) { |
299 | if (chan->initialized && !chan->inuse) { | 332 | if (!strncmp(devname, chan->tag, chan->tag_len)) { |
300 | chan->inuse = true; | 333 | if (!chan->inuse) { |
301 | break; | 334 | chan->inuse = true; |
302 | } else { | 335 | found = 1; |
303 | index++; | 336 | break; |
304 | chan = &channels[index]; | 337 | } |
338 | ret = -EBUSY; | ||
305 | } | 339 | } |
306 | } | 340 | } |
307 | mutex_unlock(&virtio_9p_lock); | 341 | mutex_unlock(&virtio_9p_lock); |
308 | 342 | ||
309 | if (index >= MAX_9P_CHAN) { | 343 | if (!found) { |
310 | printk(KERN_ERR "9p: no channels available\n"); | 344 | printk(KERN_ERR "9p: no channels available\n"); |
311 | return -ENODEV; | 345 | return ret; |
312 | } | 346 | } |
313 | 347 | ||
314 | client->trans = (void *)chan; | 348 | client->trans = (void *)chan; |
@@ -329,11 +363,15 @@ static void p9_virtio_remove(struct virtio_device *vdev) | |||
329 | struct virtio_chan *chan = vdev->priv; | 363 | struct virtio_chan *chan = vdev->priv; |
330 | 364 | ||
331 | BUG_ON(chan->inuse); | 365 | BUG_ON(chan->inuse); |
366 | vdev->config->del_vqs(vdev); | ||
367 | |||
368 | mutex_lock(&virtio_9p_lock); | ||
369 | list_del(&chan->chan_list); | ||
370 | mutex_unlock(&virtio_9p_lock); | ||
371 | sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr); | ||
372 | kfree(chan->tag); | ||
373 | kfree(chan); | ||
332 | 374 | ||
333 | if (chan->initialized) { | ||
334 | vdev->config->del_vqs(vdev); | ||
335 | chan->initialized = false; | ||
336 | } | ||
337 | } | 375 | } |
338 | 376 | ||
339 | static struct virtio_device_id id_table[] = { | 377 | static struct virtio_device_id id_table[] = { |
@@ -341,13 +379,19 @@ static struct virtio_device_id id_table[] = { | |||
341 | { 0 }, | 379 | { 0 }, |
342 | }; | 380 | }; |
343 | 381 | ||
382 | static unsigned int features[] = { | ||
383 | VIRTIO_9P_MOUNT_TAG, | ||
384 | }; | ||
385 | |||
344 | /* The standard "struct lguest_driver": */ | 386 | /* The standard "struct lguest_driver": */ |
345 | static struct virtio_driver p9_virtio_drv = { | 387 | static struct virtio_driver p9_virtio_drv = { |
346 | .driver.name = KBUILD_MODNAME, | 388 | .feature_table = features, |
347 | .driver.owner = THIS_MODULE, | 389 | .feature_table_size = ARRAY_SIZE(features), |
348 | .id_table = id_table, | 390 | .driver.name = KBUILD_MODNAME, |
349 | .probe = p9_virtio_probe, | 391 | .driver.owner = THIS_MODULE, |
350 | .remove = p9_virtio_remove, | 392 | .id_table = id_table, |
393 | .probe = p9_virtio_probe, | ||
394 | .remove = p9_virtio_remove, | ||
351 | }; | 395 | }; |
352 | 396 | ||
353 | static struct p9_trans_module p9_virtio_trans = { | 397 | static struct p9_trans_module p9_virtio_trans = { |
@@ -364,10 +408,7 @@ static struct p9_trans_module p9_virtio_trans = { | |||
364 | /* The standard init function */ | 408 | /* The standard init function */ |
365 | static int __init p9_virtio_init(void) | 409 | static int __init p9_virtio_init(void) |
366 | { | 410 | { |
367 | int count; | 411 | INIT_LIST_HEAD(&virtio_chan_list); |
368 | |||
369 | for (count = 0; count < MAX_9P_CHAN; count++) | ||
370 | channels[count].initialized = false; | ||
371 | 412 | ||
372 | v9fs_register_trans(&p9_virtio_trans); | 413 | v9fs_register_trans(&p9_virtio_trans); |
373 | return register_virtio_driver(&p9_virtio_drv); | 414 | return register_virtio_driver(&p9_virtio_drv); |
diff --git a/net/9p/util.c b/net/9p/util.c index dc4ec05ad93d..e048701a72d2 100644 --- a/net/9p/util.c +++ b/net/9p/util.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/sched.h> | 30 | #include <linux/sched.h> |
31 | #include <linux/parser.h> | 31 | #include <linux/parser.h> |
32 | #include <linux/idr.h> | 32 | #include <linux/idr.h> |
33 | #include <linux/slab.h> | ||
33 | #include <net/9p/9p.h> | 34 | #include <net/9p/9p.h> |
34 | 35 | ||
35 | /** | 36 | /** |
diff --git a/net/Kconfig b/net/Kconfig index 041c35edb763..0d68b40fc0e6 100644 --- a/net/Kconfig +++ b/net/Kconfig | |||
@@ -186,6 +186,7 @@ source "net/sctp/Kconfig" | |||
186 | source "net/rds/Kconfig" | 186 | source "net/rds/Kconfig" |
187 | source "net/tipc/Kconfig" | 187 | source "net/tipc/Kconfig" |
188 | source "net/atm/Kconfig" | 188 | source "net/atm/Kconfig" |
189 | source "net/l2tp/Kconfig" | ||
189 | source "net/802/Kconfig" | 190 | source "net/802/Kconfig" |
190 | source "net/bridge/Kconfig" | 191 | source "net/bridge/Kconfig" |
191 | source "net/dsa/Kconfig" | 192 | source "net/dsa/Kconfig" |
@@ -203,6 +204,11 @@ source "net/ieee802154/Kconfig" | |||
203 | source "net/sched/Kconfig" | 204 | source "net/sched/Kconfig" |
204 | source "net/dcb/Kconfig" | 205 | source "net/dcb/Kconfig" |
205 | 206 | ||
207 | config RPS | ||
208 | boolean | ||
209 | depends on SMP && SYSFS | ||
210 | default y | ||
211 | |||
206 | menu "Network testing" | 212 | menu "Network testing" |
207 | 213 | ||
208 | config NET_PKTGEN | 214 | config NET_PKTGEN |
@@ -275,5 +281,7 @@ source "net/wimax/Kconfig" | |||
275 | 281 | ||
276 | source "net/rfkill/Kconfig" | 282 | source "net/rfkill/Kconfig" |
277 | source "net/9p/Kconfig" | 283 | source "net/9p/Kconfig" |
284 | source "net/caif/Kconfig" | ||
285 | |||
278 | 286 | ||
279 | endif # if NET | 287 | endif # if NET |
diff --git a/net/Makefile b/net/Makefile index 1542e7268a7b..cb7bdc1210cb 100644 --- a/net/Makefile +++ b/net/Makefile | |||
@@ -40,6 +40,7 @@ obj-$(CONFIG_BT) += bluetooth/ | |||
40 | obj-$(CONFIG_SUNRPC) += sunrpc/ | 40 | obj-$(CONFIG_SUNRPC) += sunrpc/ |
41 | obj-$(CONFIG_AF_RXRPC) += rxrpc/ | 41 | obj-$(CONFIG_AF_RXRPC) += rxrpc/ |
42 | obj-$(CONFIG_ATM) += atm/ | 42 | obj-$(CONFIG_ATM) += atm/ |
43 | obj-$(CONFIG_L2TP) += l2tp/ | ||
43 | obj-$(CONFIG_DECNET) += decnet/ | 44 | obj-$(CONFIG_DECNET) += decnet/ |
44 | obj-$(CONFIG_ECONET) += econet/ | 45 | obj-$(CONFIG_ECONET) += econet/ |
45 | obj-$(CONFIG_PHONET) += phonet/ | 46 | obj-$(CONFIG_PHONET) += phonet/ |
@@ -56,6 +57,7 @@ obj-$(CONFIG_NETLABEL) += netlabel/ | |||
56 | obj-$(CONFIG_IUCV) += iucv/ | 57 | obj-$(CONFIG_IUCV) += iucv/ |
57 | obj-$(CONFIG_RFKILL) += rfkill/ | 58 | obj-$(CONFIG_RFKILL) += rfkill/ |
58 | obj-$(CONFIG_NET_9P) += 9p/ | 59 | obj-$(CONFIG_NET_9P) += 9p/ |
60 | obj-$(CONFIG_CAIF) += caif/ | ||
59 | ifneq ($(CONFIG_DCB),) | 61 | ifneq ($(CONFIG_DCB),) |
60 | obj-y += dcb/ | 62 | obj-y += dcb/ |
61 | endif | 63 | endif |
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c index f2b3b56aa779..50dce7981321 100644 --- a/net/appletalk/aarp.c +++ b/net/appletalk/aarp.c | |||
@@ -30,6 +30,7 @@ | |||
30 | */ | 30 | */ |
31 | 31 | ||
32 | #include <linux/if_arp.h> | 32 | #include <linux/if_arp.h> |
33 | #include <linux/slab.h> | ||
33 | #include <net/sock.h> | 34 | #include <net/sock.h> |
34 | #include <net/datalink.h> | 35 | #include <net/datalink.h> |
35 | #include <net/psnap.h> | 36 | #include <net/psnap.h> |
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index 9fc4da56fb1d..c410b93fda2e 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c | |||
@@ -57,6 +57,7 @@ | |||
57 | #include <linux/smp_lock.h> | 57 | #include <linux/smp_lock.h> |
58 | #include <linux/termios.h> /* For TIOCOUTQ/INQ */ | 58 | #include <linux/termios.h> /* For TIOCOUTQ/INQ */ |
59 | #include <linux/compat.h> | 59 | #include <linux/compat.h> |
60 | #include <linux/slab.h> | ||
60 | #include <net/datalink.h> | 61 | #include <net/datalink.h> |
61 | #include <net/psnap.h> | 62 | #include <net/psnap.h> |
62 | #include <net/sock.h> | 63 | #include <net/sock.h> |
@@ -781,7 +782,7 @@ static int atif_ioctl(int cmd, void __user *arg) | |||
781 | atrtr_create(&rtdef, dev); | 782 | atrtr_create(&rtdef, dev); |
782 | } | 783 | } |
783 | } | 784 | } |
784 | dev_mc_add(dev, aarp_mcast, 6, 1); | 785 | dev_mc_add_global(dev, aarp_mcast); |
785 | return 0; | 786 | return 0; |
786 | 787 | ||
787 | case SIOCGIFADDR: | 788 | case SIOCGIFADDR: |
diff --git a/net/atm/addr.c b/net/atm/addr.c index cf3ae8b47572..dcda35c66f15 100644 --- a/net/atm/addr.c +++ b/net/atm/addr.c | |||
@@ -4,6 +4,7 @@ | |||
4 | 4 | ||
5 | #include <linux/atm.h> | 5 | #include <linux/atm.h> |
6 | #include <linux/atmdev.h> | 6 | #include <linux/atmdev.h> |
7 | #include <linux/slab.h> | ||
7 | #include <linux/uaccess.h> | 8 | #include <linux/uaccess.h> |
8 | 9 | ||
9 | #include "signaling.h" | 10 | #include "signaling.h" |
diff --git a/net/atm/atm_sysfs.c b/net/atm/atm_sysfs.c index f693b78eb467..799c631f0fed 100644 --- a/net/atm/atm_sysfs.c +++ b/net/atm/atm_sysfs.c | |||
@@ -1,6 +1,7 @@ | |||
1 | /* ATM driver model support. */ | 1 | /* ATM driver model support. */ |
2 | 2 | ||
3 | #include <linux/kernel.h> | 3 | #include <linux/kernel.h> |
4 | #include <linux/slab.h> | ||
4 | #include <linux/init.h> | 5 | #include <linux/init.h> |
5 | #include <linux/kobject.h> | 6 | #include <linux/kobject.h> |
6 | #include <linux/atmdev.h> | 7 | #include <linux/atmdev.h> |
diff --git a/net/atm/br2684.c b/net/atm/br2684.c index 4d64d87e7578..d6c7ceaf13e9 100644 --- a/net/atm/br2684.c +++ b/net/atm/br2684.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/rtnetlink.h> | 18 | #include <linux/rtnetlink.h> |
19 | #include <linux/ip.h> | 19 | #include <linux/ip.h> |
20 | #include <linux/uaccess.h> | 20 | #include <linux/uaccess.h> |
21 | #include <linux/slab.h> | ||
21 | #include <net/arp.h> | 22 | #include <net/arp.h> |
22 | #include <linux/atm.h> | 23 | #include <linux/atm.h> |
23 | #include <linux/atmdev.h> | 24 | #include <linux/atmdev.h> |
diff --git a/net/atm/clip.c b/net/atm/clip.c index ebfa022008f7..313aba11316b 100644 --- a/net/atm/clip.c +++ b/net/atm/clip.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/seq_file.h> | 30 | #include <linux/seq_file.h> |
31 | #include <linux/rcupdate.h> | 31 | #include <linux/rcupdate.h> |
32 | #include <linux/jhash.h> | 32 | #include <linux/jhash.h> |
33 | #include <linux/slab.h> | ||
33 | #include <net/route.h> /* for struct rtable and routing */ | 34 | #include <net/route.h> /* for struct rtable and routing */ |
34 | #include <net/icmp.h> /* icmp_send */ | 35 | #include <net/icmp.h> /* icmp_send */ |
35 | #include <linux/param.h> /* for HZ */ | 36 | #include <linux/param.h> /* for HZ */ |
diff --git a/net/atm/common.c b/net/atm/common.c index 74d095a081e3..b43feb1a3995 100644 --- a/net/atm/common.c +++ b/net/atm/common.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/skbuff.h> | 18 | #include <linux/skbuff.h> |
19 | #include <linux/bitops.h> | 19 | #include <linux/bitops.h> |
20 | #include <linux/init.h> | 20 | #include <linux/init.h> |
21 | #include <linux/slab.h> | ||
21 | #include <net/sock.h> /* struct sock */ | 22 | #include <net/sock.h> /* struct sock */ |
22 | #include <linux/uaccess.h> | 23 | #include <linux/uaccess.h> |
23 | #include <linux/poll.h> | 24 | #include <linux/poll.h> |
@@ -89,10 +90,13 @@ static void vcc_sock_destruct(struct sock *sk) | |||
89 | 90 | ||
90 | static void vcc_def_wakeup(struct sock *sk) | 91 | static void vcc_def_wakeup(struct sock *sk) |
91 | { | 92 | { |
92 | read_lock(&sk->sk_callback_lock); | 93 | struct socket_wq *wq; |
93 | if (sk_has_sleeper(sk)) | 94 | |
94 | wake_up(sk->sk_sleep); | 95 | rcu_read_lock(); |
95 | read_unlock(&sk->sk_callback_lock); | 96 | wq = rcu_dereference(sk->sk_wq); |
97 | if (wq_has_sleeper(wq)) | ||
98 | wake_up(&wq->wait); | ||
99 | rcu_read_unlock(); | ||
96 | } | 100 | } |
97 | 101 | ||
98 | static inline int vcc_writable(struct sock *sk) | 102 | static inline int vcc_writable(struct sock *sk) |
@@ -105,16 +109,19 @@ static inline int vcc_writable(struct sock *sk) | |||
105 | 109 | ||
106 | static void vcc_write_space(struct sock *sk) | 110 | static void vcc_write_space(struct sock *sk) |
107 | { | 111 | { |
108 | read_lock(&sk->sk_callback_lock); | 112 | struct socket_wq *wq; |
113 | |||
114 | rcu_read_lock(); | ||
109 | 115 | ||
110 | if (vcc_writable(sk)) { | 116 | if (vcc_writable(sk)) { |
111 | if (sk_has_sleeper(sk)) | 117 | wq = rcu_dereference(sk->sk_wq); |
112 | wake_up_interruptible(sk->sk_sleep); | 118 | if (wq_has_sleeper(wq)) |
119 | wake_up_interruptible(&wq->wait); | ||
113 | 120 | ||
114 | sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); | 121 | sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); |
115 | } | 122 | } |
116 | 123 | ||
117 | read_unlock(&sk->sk_callback_lock); | 124 | rcu_read_unlock(); |
118 | } | 125 | } |
119 | 126 | ||
120 | static struct proto vcc_proto = { | 127 | static struct proto vcc_proto = { |
@@ -548,7 +555,7 @@ int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, | |||
548 | } | 555 | } |
549 | 556 | ||
550 | eff = (size+3) & ~3; /* align to word boundary */ | 557 | eff = (size+3) & ~3; /* align to word boundary */ |
551 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 558 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
552 | error = 0; | 559 | error = 0; |
553 | while (!(skb = alloc_tx(vcc, eff))) { | 560 | while (!(skb = alloc_tx(vcc, eff))) { |
554 | if (m->msg_flags & MSG_DONTWAIT) { | 561 | if (m->msg_flags & MSG_DONTWAIT) { |
@@ -567,9 +574,9 @@ int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, | |||
567 | send_sig(SIGPIPE, current, 0); | 574 | send_sig(SIGPIPE, current, 0); |
568 | break; | 575 | break; |
569 | } | 576 | } |
570 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 577 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
571 | } | 578 | } |
572 | finish_wait(sk->sk_sleep, &wait); | 579 | finish_wait(sk_sleep(sk), &wait); |
573 | if (error) | 580 | if (error) |
574 | goto out; | 581 | goto out; |
575 | skb->dev = NULL; /* for paths shared with net_device interfaces */ | 582 | skb->dev = NULL; /* for paths shared with net_device interfaces */ |
@@ -594,7 +601,7 @@ unsigned int vcc_poll(struct file *file, struct socket *sock, poll_table *wait) | |||
594 | struct atm_vcc *vcc; | 601 | struct atm_vcc *vcc; |
595 | unsigned int mask; | 602 | unsigned int mask; |
596 | 603 | ||
597 | sock_poll_wait(file, sk->sk_sleep, wait); | 604 | sock_poll_wait(file, sk_sleep(sk), wait); |
598 | mask = 0; | 605 | mask = 0; |
599 | 606 | ||
600 | vcc = ATM_SD(sock); | 607 | vcc = ATM_SD(sock); |
diff --git a/net/atm/lec.c b/net/atm/lec.c index 5da5753157f9..feeaf5718472 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c | |||
@@ -6,6 +6,7 @@ | |||
6 | 6 | ||
7 | #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ | 7 | #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ |
8 | 8 | ||
9 | #include <linux/slab.h> | ||
9 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
10 | #include <linux/bitops.h> | 11 | #include <linux/bitops.h> |
11 | #include <linux/capability.h> | 12 | #include <linux/capability.h> |
diff --git a/net/atm/mpc.c b/net/atm/mpc.c index a6521c8aa88b..436f2e177657 100644 --- a/net/atm/mpc.c +++ b/net/atm/mpc.c | |||
@@ -2,6 +2,7 @@ | |||
2 | 2 | ||
3 | #include <linux/kernel.h> | 3 | #include <linux/kernel.h> |
4 | #include <linux/string.h> | 4 | #include <linux/string.h> |
5 | #include <linux/slab.h> | ||
5 | #include <linux/timer.h> | 6 | #include <linux/timer.h> |
6 | #include <linux/init.h> | 7 | #include <linux/init.h> |
7 | #include <linux/bitops.h> | 8 | #include <linux/bitops.h> |
diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c index 4c141810eb6d..e773d8336918 100644 --- a/net/atm/mpoa_caches.c +++ b/net/atm/mpoa_caches.c | |||
@@ -1,5 +1,6 @@ | |||
1 | #include <linux/types.h> | 1 | #include <linux/types.h> |
2 | #include <linux/atmmpc.h> | 2 | #include <linux/atmmpc.h> |
3 | #include <linux/slab.h> | ||
3 | #include <linux/time.h> | 4 | #include <linux/time.h> |
4 | 5 | ||
5 | #include "mpoa_caches.h" | 6 | #include "mpoa_caches.h" |
diff --git a/net/atm/mpoa_proc.c b/net/atm/mpoa_proc.c index b9bdb98427e4..53e500292271 100644 --- a/net/atm/mpoa_proc.c +++ b/net/atm/mpoa_proc.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/uaccess.h> | 12 | #include <linux/uaccess.h> |
13 | #include <linux/atmmpc.h> | 13 | #include <linux/atmmpc.h> |
14 | #include <linux/atm.h> | 14 | #include <linux/atm.h> |
15 | #include <linux/gfp.h> | ||
15 | #include "mpc.h" | 16 | #include "mpc.h" |
16 | #include "mpoa_caches.h" | 17 | #include "mpoa_caches.h" |
17 | 18 | ||
diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c index 400839273c67..e49bb6d948a1 100644 --- a/net/atm/pppoatm.c +++ b/net/atm/pppoatm.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/module.h> | 38 | #include <linux/module.h> |
39 | #include <linux/init.h> | 39 | #include <linux/init.h> |
40 | #include <linux/skbuff.h> | 40 | #include <linux/skbuff.h> |
41 | #include <linux/slab.h> | ||
41 | #include <linux/atm.h> | 42 | #include <linux/atm.h> |
42 | #include <linux/atmdev.h> | 43 | #include <linux/atmdev.h> |
43 | #include <linux/capability.h> | 44 | #include <linux/capability.h> |
diff --git a/net/atm/proc.c b/net/atm/proc.c index 7a96b2376bd7..6262aeae398e 100644 --- a/net/atm/proc.c +++ b/net/atm/proc.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/netdevice.h> | 22 | #include <linux/netdevice.h> |
23 | #include <linux/atmclip.h> | 23 | #include <linux/atmclip.h> |
24 | #include <linux/init.h> /* for __init */ | 24 | #include <linux/init.h> /* for __init */ |
25 | #include <linux/slab.h> | ||
25 | #include <net/net_namespace.h> | 26 | #include <net/net_namespace.h> |
26 | #include <net/atmclip.h> | 27 | #include <net/atmclip.h> |
27 | #include <linux/uaccess.h> | 28 | #include <linux/uaccess.h> |
@@ -406,7 +407,6 @@ EXPORT_SYMBOL(atm_proc_root); | |||
406 | 407 | ||
407 | int atm_proc_dev_register(struct atm_dev *dev) | 408 | int atm_proc_dev_register(struct atm_dev *dev) |
408 | { | 409 | { |
409 | int digits, num; | ||
410 | int error; | 410 | int error; |
411 | 411 | ||
412 | /* No proc info */ | 412 | /* No proc info */ |
@@ -414,16 +414,9 @@ int atm_proc_dev_register(struct atm_dev *dev) | |||
414 | return 0; | 414 | return 0; |
415 | 415 | ||
416 | error = -ENOMEM; | 416 | error = -ENOMEM; |
417 | digits = 0; | 417 | dev->proc_name = kasprintf(GFP_KERNEL, "%s:%d", dev->type, dev->number); |
418 | for (num = dev->number; num; num /= 10) | ||
419 | digits++; | ||
420 | if (!digits) | ||
421 | digits++; | ||
422 | |||
423 | dev->proc_name = kmalloc(strlen(dev->type) + digits + 2, GFP_KERNEL); | ||
424 | if (!dev->proc_name) | 418 | if (!dev->proc_name) |
425 | goto err_out; | 419 | goto err_out; |
426 | sprintf(dev->proc_name, "%s:%d", dev->type, dev->number); | ||
427 | 420 | ||
428 | dev->proc_entry = proc_create_data(dev->proc_name, 0, atm_proc_root, | 421 | dev->proc_entry = proc_create_data(dev->proc_name, 0, atm_proc_root, |
429 | &proc_atm_dev_ops, dev); | 422 | &proc_atm_dev_ops, dev); |
diff --git a/net/atm/raw.c b/net/atm/raw.c index d0c4bd047dc4..b4f7b9ff3c74 100644 --- a/net/atm/raw.c +++ b/net/atm/raw.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
11 | #include <linux/skbuff.h> | 11 | #include <linux/skbuff.h> |
12 | #include <linux/mm.h> | 12 | #include <linux/mm.h> |
13 | #include <linux/slab.h> | ||
13 | 14 | ||
14 | #include "common.h" | 15 | #include "common.h" |
15 | #include "protocols.h" | 16 | #include "protocols.h" |
diff --git a/net/atm/resources.c b/net/atm/resources.c index 90082904f20d..d29e58261511 100644 --- a/net/atm/resources.c +++ b/net/atm/resources.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/capability.h> | 19 | #include <linux/capability.h> |
20 | #include <linux/delay.h> | 20 | #include <linux/delay.h> |
21 | #include <linux/mutex.h> | 21 | #include <linux/mutex.h> |
22 | #include <linux/slab.h> | ||
22 | 23 | ||
23 | #include <net/sock.h> /* for struct sock */ | 24 | #include <net/sock.h> /* for struct sock */ |
24 | 25 | ||
diff --git a/net/atm/signaling.c b/net/atm/signaling.c index ad1d28ae512b..509c8ac02b63 100644 --- a/net/atm/signaling.c +++ b/net/atm/signaling.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/atmsvc.h> | 14 | #include <linux/atmsvc.h> |
15 | #include <linux/atmdev.h> | 15 | #include <linux/atmdev.h> |
16 | #include <linux/bitops.h> | 16 | #include <linux/bitops.h> |
17 | #include <linux/slab.h> | ||
17 | 18 | ||
18 | #include "resources.h" | 19 | #include "resources.h" |
19 | #include "signaling.h" | 20 | #include "signaling.h" |
@@ -130,7 +131,7 @@ static int sigd_send(struct atm_vcc *vcc, struct sk_buff *skb) | |||
130 | } | 131 | } |
131 | sk->sk_ack_backlog++; | 132 | sk->sk_ack_backlog++; |
132 | skb_queue_tail(&sk->sk_receive_queue, skb); | 133 | skb_queue_tail(&sk->sk_receive_queue, skb); |
133 | pr_debug("waking sk->sk_sleep 0x%p\n", sk->sk_sleep); | 134 | pr_debug("waking sk_sleep(sk) 0x%p\n", sk_sleep(sk)); |
134 | sk->sk_state_change(sk); | 135 | sk->sk_state_change(sk); |
135 | as_indicate_complete: | 136 | as_indicate_complete: |
136 | release_sock(sk); | 137 | release_sock(sk); |
diff --git a/net/atm/svc.c b/net/atm/svc.c index 3ba9a45a51ac..754ee4791d96 100644 --- a/net/atm/svc.c +++ b/net/atm/svc.c | |||
@@ -49,14 +49,14 @@ static void svc_disconnect(struct atm_vcc *vcc) | |||
49 | 49 | ||
50 | pr_debug("%p\n", vcc); | 50 | pr_debug("%p\n", vcc); |
51 | if (test_bit(ATM_VF_REGIS, &vcc->flags)) { | 51 | if (test_bit(ATM_VF_REGIS, &vcc->flags)) { |
52 | prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); | 52 | prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); |
53 | sigd_enq(vcc, as_close, NULL, NULL, NULL); | 53 | sigd_enq(vcc, as_close, NULL, NULL, NULL); |
54 | while (!test_bit(ATM_VF_RELEASED, &vcc->flags) && sigd) { | 54 | while (!test_bit(ATM_VF_RELEASED, &vcc->flags) && sigd) { |
55 | schedule(); | 55 | schedule(); |
56 | prepare_to_wait(sk->sk_sleep, &wait, | 56 | prepare_to_wait(sk_sleep(sk), &wait, |
57 | TASK_UNINTERRUPTIBLE); | 57 | TASK_UNINTERRUPTIBLE); |
58 | } | 58 | } |
59 | finish_wait(sk->sk_sleep, &wait); | 59 | finish_wait(sk_sleep(sk), &wait); |
60 | } | 60 | } |
61 | /* beware - socket is still in use by atmsigd until the last | 61 | /* beware - socket is still in use by atmsigd until the last |
62 | as_indicate has been answered */ | 62 | as_indicate has been answered */ |
@@ -125,13 +125,13 @@ static int svc_bind(struct socket *sock, struct sockaddr *sockaddr, | |||
125 | } | 125 | } |
126 | vcc->local = *addr; | 126 | vcc->local = *addr; |
127 | set_bit(ATM_VF_WAITING, &vcc->flags); | 127 | set_bit(ATM_VF_WAITING, &vcc->flags); |
128 | prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); | 128 | prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); |
129 | sigd_enq(vcc, as_bind, NULL, NULL, &vcc->local); | 129 | sigd_enq(vcc, as_bind, NULL, NULL, &vcc->local); |
130 | while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { | 130 | while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { |
131 | schedule(); | 131 | schedule(); |
132 | prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); | 132 | prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); |
133 | } | 133 | } |
134 | finish_wait(sk->sk_sleep, &wait); | 134 | finish_wait(sk_sleep(sk), &wait); |
135 | clear_bit(ATM_VF_REGIS, &vcc->flags); /* doesn't count */ | 135 | clear_bit(ATM_VF_REGIS, &vcc->flags); /* doesn't count */ |
136 | if (!sigd) { | 136 | if (!sigd) { |
137 | error = -EUNATCH; | 137 | error = -EUNATCH; |
@@ -201,10 +201,10 @@ static int svc_connect(struct socket *sock, struct sockaddr *sockaddr, | |||
201 | } | 201 | } |
202 | vcc->remote = *addr; | 202 | vcc->remote = *addr; |
203 | set_bit(ATM_VF_WAITING, &vcc->flags); | 203 | set_bit(ATM_VF_WAITING, &vcc->flags); |
204 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 204 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
205 | sigd_enq(vcc, as_connect, NULL, NULL, &vcc->remote); | 205 | sigd_enq(vcc, as_connect, NULL, NULL, &vcc->remote); |
206 | if (flags & O_NONBLOCK) { | 206 | if (flags & O_NONBLOCK) { |
207 | finish_wait(sk->sk_sleep, &wait); | 207 | finish_wait(sk_sleep(sk), &wait); |
208 | sock->state = SS_CONNECTING; | 208 | sock->state = SS_CONNECTING; |
209 | error = -EINPROGRESS; | 209 | error = -EINPROGRESS; |
210 | goto out; | 210 | goto out; |
@@ -213,7 +213,7 @@ static int svc_connect(struct socket *sock, struct sockaddr *sockaddr, | |||
213 | while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { | 213 | while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { |
214 | schedule(); | 214 | schedule(); |
215 | if (!signal_pending(current)) { | 215 | if (!signal_pending(current)) { |
216 | prepare_to_wait(sk->sk_sleep, &wait, | 216 | prepare_to_wait(sk_sleep(sk), &wait, |
217 | TASK_INTERRUPTIBLE); | 217 | TASK_INTERRUPTIBLE); |
218 | continue; | 218 | continue; |
219 | } | 219 | } |
@@ -232,14 +232,14 @@ static int svc_connect(struct socket *sock, struct sockaddr *sockaddr, | |||
232 | */ | 232 | */ |
233 | sigd_enq(vcc, as_close, NULL, NULL, NULL); | 233 | sigd_enq(vcc, as_close, NULL, NULL, NULL); |
234 | while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { | 234 | while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { |
235 | prepare_to_wait(sk->sk_sleep, &wait, | 235 | prepare_to_wait(sk_sleep(sk), &wait, |
236 | TASK_INTERRUPTIBLE); | 236 | TASK_INTERRUPTIBLE); |
237 | schedule(); | 237 | schedule(); |
238 | } | 238 | } |
239 | if (!sk->sk_err) | 239 | if (!sk->sk_err) |
240 | while (!test_bit(ATM_VF_RELEASED, &vcc->flags) && | 240 | while (!test_bit(ATM_VF_RELEASED, &vcc->flags) && |
241 | sigd) { | 241 | sigd) { |
242 | prepare_to_wait(sk->sk_sleep, &wait, | 242 | prepare_to_wait(sk_sleep(sk), &wait, |
243 | TASK_INTERRUPTIBLE); | 243 | TASK_INTERRUPTIBLE); |
244 | schedule(); | 244 | schedule(); |
245 | } | 245 | } |
@@ -250,7 +250,7 @@ static int svc_connect(struct socket *sock, struct sockaddr *sockaddr, | |||
250 | error = -EINTR; | 250 | error = -EINTR; |
251 | break; | 251 | break; |
252 | } | 252 | } |
253 | finish_wait(sk->sk_sleep, &wait); | 253 | finish_wait(sk_sleep(sk), &wait); |
254 | if (error) | 254 | if (error) |
255 | goto out; | 255 | goto out; |
256 | if (!sigd) { | 256 | if (!sigd) { |
@@ -302,13 +302,13 @@ static int svc_listen(struct socket *sock, int backlog) | |||
302 | goto out; | 302 | goto out; |
303 | } | 303 | } |
304 | set_bit(ATM_VF_WAITING, &vcc->flags); | 304 | set_bit(ATM_VF_WAITING, &vcc->flags); |
305 | prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); | 305 | prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); |
306 | sigd_enq(vcc, as_listen, NULL, NULL, &vcc->local); | 306 | sigd_enq(vcc, as_listen, NULL, NULL, &vcc->local); |
307 | while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { | 307 | while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { |
308 | schedule(); | 308 | schedule(); |
309 | prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); | 309 | prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); |
310 | } | 310 | } |
311 | finish_wait(sk->sk_sleep, &wait); | 311 | finish_wait(sk_sleep(sk), &wait); |
312 | if (!sigd) { | 312 | if (!sigd) { |
313 | error = -EUNATCH; | 313 | error = -EUNATCH; |
314 | goto out; | 314 | goto out; |
@@ -343,7 +343,7 @@ static int svc_accept(struct socket *sock, struct socket *newsock, int flags) | |||
343 | while (1) { | 343 | while (1) { |
344 | DEFINE_WAIT(wait); | 344 | DEFINE_WAIT(wait); |
345 | 345 | ||
346 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 346 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
347 | while (!(skb = skb_dequeue(&sk->sk_receive_queue)) && | 347 | while (!(skb = skb_dequeue(&sk->sk_receive_queue)) && |
348 | sigd) { | 348 | sigd) { |
349 | if (test_bit(ATM_VF_RELEASED, &old_vcc->flags)) | 349 | if (test_bit(ATM_VF_RELEASED, &old_vcc->flags)) |
@@ -363,10 +363,10 @@ static int svc_accept(struct socket *sock, struct socket *newsock, int flags) | |||
363 | error = -ERESTARTSYS; | 363 | error = -ERESTARTSYS; |
364 | break; | 364 | break; |
365 | } | 365 | } |
366 | prepare_to_wait(sk->sk_sleep, &wait, | 366 | prepare_to_wait(sk_sleep(sk), &wait, |
367 | TASK_INTERRUPTIBLE); | 367 | TASK_INTERRUPTIBLE); |
368 | } | 368 | } |
369 | finish_wait(sk->sk_sleep, &wait); | 369 | finish_wait(sk_sleep(sk), &wait); |
370 | if (error) | 370 | if (error) |
371 | goto out; | 371 | goto out; |
372 | if (!skb) { | 372 | if (!skb) { |
@@ -392,17 +392,17 @@ static int svc_accept(struct socket *sock, struct socket *newsock, int flags) | |||
392 | } | 392 | } |
393 | /* wait should be short, so we ignore the non-blocking flag */ | 393 | /* wait should be short, so we ignore the non-blocking flag */ |
394 | set_bit(ATM_VF_WAITING, &new_vcc->flags); | 394 | set_bit(ATM_VF_WAITING, &new_vcc->flags); |
395 | prepare_to_wait(sk_atm(new_vcc)->sk_sleep, &wait, | 395 | prepare_to_wait(sk_sleep(sk_atm(new_vcc)), &wait, |
396 | TASK_UNINTERRUPTIBLE); | 396 | TASK_UNINTERRUPTIBLE); |
397 | sigd_enq(new_vcc, as_accept, old_vcc, NULL, NULL); | 397 | sigd_enq(new_vcc, as_accept, old_vcc, NULL, NULL); |
398 | while (test_bit(ATM_VF_WAITING, &new_vcc->flags) && sigd) { | 398 | while (test_bit(ATM_VF_WAITING, &new_vcc->flags) && sigd) { |
399 | release_sock(sk); | 399 | release_sock(sk); |
400 | schedule(); | 400 | schedule(); |
401 | lock_sock(sk); | 401 | lock_sock(sk); |
402 | prepare_to_wait(sk_atm(new_vcc)->sk_sleep, &wait, | 402 | prepare_to_wait(sk_sleep(sk_atm(new_vcc)), &wait, |
403 | TASK_UNINTERRUPTIBLE); | 403 | TASK_UNINTERRUPTIBLE); |
404 | } | 404 | } |
405 | finish_wait(sk_atm(new_vcc)->sk_sleep, &wait); | 405 | finish_wait(sk_sleep(sk_atm(new_vcc)), &wait); |
406 | if (!sigd) { | 406 | if (!sigd) { |
407 | error = -EUNATCH; | 407 | error = -EUNATCH; |
408 | goto out; | 408 | goto out; |
@@ -438,14 +438,14 @@ int svc_change_qos(struct atm_vcc *vcc, struct atm_qos *qos) | |||
438 | DEFINE_WAIT(wait); | 438 | DEFINE_WAIT(wait); |
439 | 439 | ||
440 | set_bit(ATM_VF_WAITING, &vcc->flags); | 440 | set_bit(ATM_VF_WAITING, &vcc->flags); |
441 | prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); | 441 | prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); |
442 | sigd_enq2(vcc, as_modify, NULL, NULL, &vcc->local, qos, 0); | 442 | sigd_enq2(vcc, as_modify, NULL, NULL, &vcc->local, qos, 0); |
443 | while (test_bit(ATM_VF_WAITING, &vcc->flags) && | 443 | while (test_bit(ATM_VF_WAITING, &vcc->flags) && |
444 | !test_bit(ATM_VF_RELEASED, &vcc->flags) && sigd) { | 444 | !test_bit(ATM_VF_RELEASED, &vcc->flags) && sigd) { |
445 | schedule(); | 445 | schedule(); |
446 | prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); | 446 | prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); |
447 | } | 447 | } |
448 | finish_wait(sk->sk_sleep, &wait); | 448 | finish_wait(sk_sleep(sk), &wait); |
449 | if (!sigd) | 449 | if (!sigd) |
450 | return -EUNATCH; | 450 | return -EUNATCH; |
451 | return -sk->sk_err; | 451 | return -sk->sk_err; |
@@ -534,20 +534,20 @@ static int svc_addparty(struct socket *sock, struct sockaddr *sockaddr, | |||
534 | 534 | ||
535 | lock_sock(sk); | 535 | lock_sock(sk); |
536 | set_bit(ATM_VF_WAITING, &vcc->flags); | 536 | set_bit(ATM_VF_WAITING, &vcc->flags); |
537 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 537 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
538 | sigd_enq(vcc, as_addparty, NULL, NULL, | 538 | sigd_enq(vcc, as_addparty, NULL, NULL, |
539 | (struct sockaddr_atmsvc *) sockaddr); | 539 | (struct sockaddr_atmsvc *) sockaddr); |
540 | if (flags & O_NONBLOCK) { | 540 | if (flags & O_NONBLOCK) { |
541 | finish_wait(sk->sk_sleep, &wait); | 541 | finish_wait(sk_sleep(sk), &wait); |
542 | error = -EINPROGRESS; | 542 | error = -EINPROGRESS; |
543 | goto out; | 543 | goto out; |
544 | } | 544 | } |
545 | pr_debug("added wait queue\n"); | 545 | pr_debug("added wait queue\n"); |
546 | while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { | 546 | while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { |
547 | schedule(); | 547 | schedule(); |
548 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 548 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
549 | } | 549 | } |
550 | finish_wait(sk->sk_sleep, &wait); | 550 | finish_wait(sk_sleep(sk), &wait); |
551 | error = xchg(&sk->sk_err_soft, 0); | 551 | error = xchg(&sk->sk_err_soft, 0); |
552 | out: | 552 | out: |
553 | release_sock(sk); | 553 | release_sock(sk); |
@@ -563,13 +563,13 @@ static int svc_dropparty(struct socket *sock, int ep_ref) | |||
563 | 563 | ||
564 | lock_sock(sk); | 564 | lock_sock(sk); |
565 | set_bit(ATM_VF_WAITING, &vcc->flags); | 565 | set_bit(ATM_VF_WAITING, &vcc->flags); |
566 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 566 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
567 | sigd_enq2(vcc, as_dropparty, NULL, NULL, NULL, NULL, ep_ref); | 567 | sigd_enq2(vcc, as_dropparty, NULL, NULL, NULL, NULL, ep_ref); |
568 | while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { | 568 | while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { |
569 | schedule(); | 569 | schedule(); |
570 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 570 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
571 | } | 571 | } |
572 | finish_wait(sk->sk_sleep, &wait); | 572 | finish_wait(sk_sleep(sk), &wait); |
573 | if (!sigd) { | 573 | if (!sigd) { |
574 | error = -EUNATCH; | 574 | error = -EUNATCH; |
575 | goto out; | 575 | goto out; |
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index a5beedf43e2d..cfdfd7e2a172 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/string.h> | 25 | #include <linux/string.h> |
26 | #include <linux/sockios.h> | 26 | #include <linux/sockios.h> |
27 | #include <linux/net.h> | 27 | #include <linux/net.h> |
28 | #include <linux/slab.h> | ||
28 | #include <net/ax25.h> | 29 | #include <net/ax25.h> |
29 | #include <linux/inet.h> | 30 | #include <linux/inet.h> |
30 | #include <linux/netdevice.h> | 31 | #include <linux/netdevice.h> |
@@ -1280,7 +1281,7 @@ static int __must_check ax25_connect(struct socket *sock, | |||
1280 | DEFINE_WAIT(wait); | 1281 | DEFINE_WAIT(wait); |
1281 | 1282 | ||
1282 | for (;;) { | 1283 | for (;;) { |
1283 | prepare_to_wait(sk->sk_sleep, &wait, | 1284 | prepare_to_wait(sk_sleep(sk), &wait, |
1284 | TASK_INTERRUPTIBLE); | 1285 | TASK_INTERRUPTIBLE); |
1285 | if (sk->sk_state != TCP_SYN_SENT) | 1286 | if (sk->sk_state != TCP_SYN_SENT) |
1286 | break; | 1287 | break; |
@@ -1293,7 +1294,7 @@ static int __must_check ax25_connect(struct socket *sock, | |||
1293 | err = -ERESTARTSYS; | 1294 | err = -ERESTARTSYS; |
1294 | break; | 1295 | break; |
1295 | } | 1296 | } |
1296 | finish_wait(sk->sk_sleep, &wait); | 1297 | finish_wait(sk_sleep(sk), &wait); |
1297 | 1298 | ||
1298 | if (err) | 1299 | if (err) |
1299 | goto out_release; | 1300 | goto out_release; |
@@ -1345,7 +1346,7 @@ static int ax25_accept(struct socket *sock, struct socket *newsock, int flags) | |||
1345 | * hooked into the SABM we saved | 1346 | * hooked into the SABM we saved |
1346 | */ | 1347 | */ |
1347 | for (;;) { | 1348 | for (;;) { |
1348 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 1349 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
1349 | skb = skb_dequeue(&sk->sk_receive_queue); | 1350 | skb = skb_dequeue(&sk->sk_receive_queue); |
1350 | if (skb) | 1351 | if (skb) |
1351 | break; | 1352 | break; |
@@ -1363,7 +1364,7 @@ static int ax25_accept(struct socket *sock, struct socket *newsock, int flags) | |||
1363 | err = -ERESTARTSYS; | 1364 | err = -ERESTARTSYS; |
1364 | break; | 1365 | break; |
1365 | } | 1366 | } |
1366 | finish_wait(sk->sk_sleep, &wait); | 1367 | finish_wait(sk_sleep(sk), &wait); |
1367 | 1368 | ||
1368 | if (err) | 1369 | if (err) |
1369 | goto out; | 1370 | goto out; |
diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c index a7a0e0c9698b..c1cb982f6e86 100644 --- a/net/ax25/ax25_dev.c +++ b/net/ax25/ax25_dev.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/errno.h> | 9 | #include <linux/errno.h> |
10 | #include <linux/types.h> | 10 | #include <linux/types.h> |
11 | #include <linux/socket.h> | 11 | #include <linux/socket.h> |
12 | #include <linux/slab.h> | ||
12 | #include <linux/in.h> | 13 | #include <linux/in.h> |
13 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
14 | #include <linux/timer.h> | 15 | #include <linux/timer.h> |
diff --git a/net/ax25/ax25_ds_subr.c b/net/ax25/ax25_ds_subr.c index b5e59787be2f..85816e612dc0 100644 --- a/net/ax25/ax25_ds_subr.c +++ b/net/ax25/ax25_ds_subr.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/sockios.h> | 17 | #include <linux/sockios.h> |
18 | #include <linux/spinlock.h> | 18 | #include <linux/spinlock.h> |
19 | #include <linux/net.h> | 19 | #include <linux/net.h> |
20 | #include <linux/gfp.h> | ||
20 | #include <net/ax25.h> | 21 | #include <net/ax25.h> |
21 | #include <linux/inet.h> | 22 | #include <linux/inet.h> |
22 | #include <linux/netdevice.h> | 23 | #include <linux/netdevice.h> |
diff --git a/net/ax25/ax25_iface.c b/net/ax25/ax25_iface.c index 71338f112108..5a0dda8df492 100644 --- a/net/ax25/ax25_iface.c +++ b/net/ax25/ax25_iface.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/string.h> | 17 | #include <linux/string.h> |
18 | #include <linux/sockios.h> | 18 | #include <linux/sockios.h> |
19 | #include <linux/net.h> | 19 | #include <linux/net.h> |
20 | #include <linux/slab.h> | ||
20 | #include <net/ax25.h> | 21 | #include <net/ax25.h> |
21 | #include <linux/inet.h> | 22 | #include <linux/inet.h> |
22 | #include <linux/netdevice.h> | 23 | #include <linux/netdevice.h> |
diff --git a/net/ax25/ax25_in.c b/net/ax25/ax25_in.c index de56d3983de0..9bb776541203 100644 --- a/net/ax25/ax25_in.c +++ b/net/ax25/ax25_in.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/string.h> | 18 | #include <linux/string.h> |
19 | #include <linux/sockios.h> | 19 | #include <linux/sockios.h> |
20 | #include <linux/net.h> | 20 | #include <linux/net.h> |
21 | #include <linux/slab.h> | ||
21 | #include <net/ax25.h> | 22 | #include <net/ax25.h> |
22 | #include <linux/inet.h> | 23 | #include <linux/inet.h> |
23 | #include <linux/netdevice.h> | 24 | #include <linux/netdevice.h> |
diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c index f047a57aa95c..cf0c47a26530 100644 --- a/net/ax25/ax25_ip.c +++ b/net/ax25/ax25_ip.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/string.h> | 16 | #include <linux/string.h> |
17 | #include <linux/sockios.h> | 17 | #include <linux/sockios.h> |
18 | #include <linux/net.h> | 18 | #include <linux/net.h> |
19 | #include <linux/slab.h> | ||
19 | #include <net/ax25.h> | 20 | #include <net/ax25.h> |
20 | #include <linux/inet.h> | 21 | #include <linux/inet.h> |
21 | #include <linux/netdevice.h> | 22 | #include <linux/netdevice.h> |
diff --git a/net/ax25/ax25_out.c b/net/ax25/ax25_out.c index 14912600ec57..37507d806f65 100644 --- a/net/ax25/ax25_out.c +++ b/net/ax25/ax25_out.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/sockios.h> | 19 | #include <linux/sockios.h> |
20 | #include <linux/spinlock.h> | 20 | #include <linux/spinlock.h> |
21 | #include <linux/net.h> | 21 | #include <linux/net.h> |
22 | #include <linux/slab.h> | ||
22 | #include <net/ax25.h> | 23 | #include <net/ax25.h> |
23 | #include <linux/inet.h> | 24 | #include <linux/inet.h> |
24 | #include <linux/netdevice.h> | 25 | #include <linux/netdevice.h> |
diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c index c833ba4c45a5..7805945a5fd6 100644 --- a/net/ax25/ax25_route.c +++ b/net/ax25/ax25_route.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/string.h> | 23 | #include <linux/string.h> |
24 | #include <linux/sockios.h> | 24 | #include <linux/sockios.h> |
25 | #include <linux/net.h> | 25 | #include <linux/net.h> |
26 | #include <linux/slab.h> | ||
26 | #include <net/ax25.h> | 27 | #include <net/ax25.h> |
27 | #include <linux/inet.h> | 28 | #include <linux/inet.h> |
28 | #include <linux/netdevice.h> | 29 | #include <linux/netdevice.h> |
diff --git a/net/ax25/ax25_subr.c b/net/ax25/ax25_subr.c index 034aa10a5198..c6715ee4ab8f 100644 --- a/net/ax25/ax25_subr.c +++ b/net/ax25/ax25_subr.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/string.h> | 18 | #include <linux/string.h> |
19 | #include <linux/sockios.h> | 19 | #include <linux/sockios.h> |
20 | #include <linux/net.h> | 20 | #include <linux/net.h> |
21 | #include <linux/slab.h> | ||
21 | #include <net/ax25.h> | 22 | #include <net/ax25.h> |
22 | #include <linux/inet.h> | 23 | #include <linux/inet.h> |
23 | #include <linux/netdevice.h> | 24 | #include <linux/netdevice.h> |
diff --git a/net/ax25/ax25_uid.c b/net/ax25/ax25_uid.c index 9f13f6eefcba..d349be9578f5 100644 --- a/net/ax25/ax25_uid.c +++ b/net/ax25/ax25_uid.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/sockios.h> | 18 | #include <linux/sockios.h> |
19 | #include <linux/net.h> | 19 | #include <linux/net.h> |
20 | #include <linux/spinlock.h> | 20 | #include <linux/spinlock.h> |
21 | #include <linux/slab.h> | ||
21 | #include <net/ax25.h> | 22 | #include <net/ax25.h> |
22 | #include <linux/inet.h> | 23 | #include <linux/inet.h> |
23 | #include <linux/netdevice.h> | 24 | #include <linux/netdevice.h> |
diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c index 5159be6b2625..ebe0ef3f1d83 100644 --- a/net/ax25/sysctl_net_ax25.c +++ b/net/ax25/sysctl_net_ax25.c | |||
@@ -7,6 +7,7 @@ | |||
7 | * Copyright (C) 1996 Mike Shaver (shaver@zeroknowledge.com) | 7 | * Copyright (C) 1996 Mike Shaver (shaver@zeroknowledge.com) |
8 | */ | 8 | */ |
9 | #include <linux/mm.h> | 9 | #include <linux/mm.h> |
10 | #include <linux/slab.h> | ||
10 | #include <linux/sysctl.h> | 11 | #include <linux/sysctl.h> |
11 | #include <linux/spinlock.h> | 12 | #include <linux/spinlock.h> |
12 | #include <net/ax25.h> | 13 | #include <net/ax25.h> |
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index 087cc51f5927..421c45bd1b95 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c | |||
@@ -31,7 +31,6 @@ | |||
31 | #include <linux/errno.h> | 31 | #include <linux/errno.h> |
32 | #include <linux/kernel.h> | 32 | #include <linux/kernel.h> |
33 | #include <linux/sched.h> | 33 | #include <linux/sched.h> |
34 | #include <linux/slab.h> | ||
35 | #include <linux/skbuff.h> | 34 | #include <linux/skbuff.h> |
36 | #include <linux/init.h> | 35 | #include <linux/init.h> |
37 | #include <linux/poll.h> | 36 | #include <linux/poll.h> |
@@ -289,7 +288,7 @@ unsigned int bt_sock_poll(struct file * file, struct socket *sock, poll_table *w | |||
289 | 288 | ||
290 | BT_DBG("sock %p, sk %p", sock, sk); | 289 | BT_DBG("sock %p, sk %p", sock, sk); |
291 | 290 | ||
292 | poll_wait(file, sk->sk_sleep, wait); | 291 | poll_wait(file, sk_sleep(sk), wait); |
293 | 292 | ||
294 | if (sk->sk_state == BT_LISTEN) | 293 | if (sk->sk_state == BT_LISTEN) |
295 | return bt_accept_poll(sk); | 294 | return bt_accept_poll(sk); |
@@ -379,7 +378,7 @@ int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo) | |||
379 | 378 | ||
380 | BT_DBG("sk %p", sk); | 379 | BT_DBG("sk %p", sk); |
381 | 380 | ||
382 | add_wait_queue(sk->sk_sleep, &wait); | 381 | add_wait_queue(sk_sleep(sk), &wait); |
383 | while (sk->sk_state != state) { | 382 | while (sk->sk_state != state) { |
384 | set_current_state(TASK_INTERRUPTIBLE); | 383 | set_current_state(TASK_INTERRUPTIBLE); |
385 | 384 | ||
@@ -402,7 +401,7 @@ int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo) | |||
402 | break; | 401 | break; |
403 | } | 402 | } |
404 | set_current_state(TASK_RUNNING); | 403 | set_current_state(TASK_RUNNING); |
405 | remove_wait_queue(sk->sk_sleep, &wait); | 404 | remove_wait_queue(sk_sleep(sk), &wait); |
406 | return err; | 405 | return err; |
407 | } | 406 | } |
408 | EXPORT_SYMBOL(bt_sock_wait_state); | 407 | EXPORT_SYMBOL(bt_sock_wait_state); |
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c index ef09c7b3a858..f10b41fb05a0 100644 --- a/net/bluetooth/bnep/core.c +++ b/net/bluetooth/bnep/core.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/freezer.h> | 35 | #include <linux/freezer.h> |
36 | #include <linux/errno.h> | 36 | #include <linux/errno.h> |
37 | #include <linux/net.h> | 37 | #include <linux/net.h> |
38 | #include <linux/slab.h> | ||
38 | #include <net/sock.h> | 39 | #include <net/sock.h> |
39 | 40 | ||
40 | #include <linux/socket.h> | 41 | #include <linux/socket.h> |
@@ -473,7 +474,7 @@ static int bnep_session(void *arg) | |||
473 | set_user_nice(current, -15); | 474 | set_user_nice(current, -15); |
474 | 475 | ||
475 | init_waitqueue_entry(&wait, current); | 476 | init_waitqueue_entry(&wait, current); |
476 | add_wait_queue(sk->sk_sleep, &wait); | 477 | add_wait_queue(sk_sleep(sk), &wait); |
477 | while (!atomic_read(&s->killed)) { | 478 | while (!atomic_read(&s->killed)) { |
478 | set_current_state(TASK_INTERRUPTIBLE); | 479 | set_current_state(TASK_INTERRUPTIBLE); |
479 | 480 | ||
@@ -495,7 +496,7 @@ static int bnep_session(void *arg) | |||
495 | schedule(); | 496 | schedule(); |
496 | } | 497 | } |
497 | set_current_state(TASK_RUNNING); | 498 | set_current_state(TASK_RUNNING); |
498 | remove_wait_queue(sk->sk_sleep, &wait); | 499 | remove_wait_queue(sk_sleep(sk), &wait); |
499 | 500 | ||
500 | /* Cleanup session */ | 501 | /* Cleanup session */ |
501 | down_write(&bnep_session_sem); | 502 | down_write(&bnep_session_sem); |
@@ -506,7 +507,7 @@ static int bnep_session(void *arg) | |||
506 | /* Wakeup user-space polling for socket errors */ | 507 | /* Wakeup user-space polling for socket errors */ |
507 | s->sock->sk->sk_err = EUNATCH; | 508 | s->sock->sk->sk_err = EUNATCH; |
508 | 509 | ||
509 | wake_up_interruptible(s->sock->sk->sk_sleep); | 510 | wake_up_interruptible(sk_sleep(s->sock->sk)); |
510 | 511 | ||
511 | /* Release the socket */ | 512 | /* Release the socket */ |
512 | fput(s->sock->file); | 513 | fput(s->sock->file); |
@@ -637,7 +638,7 @@ int bnep_del_connection(struct bnep_conndel_req *req) | |||
637 | 638 | ||
638 | /* Kill session thread */ | 639 | /* Kill session thread */ |
639 | atomic_inc(&s->killed); | 640 | atomic_inc(&s->killed); |
640 | wake_up_interruptible(s->sock->sk->sk_sleep); | 641 | wake_up_interruptible(sk_sleep(s->sock->sk)); |
641 | } else | 642 | } else |
642 | err = -ENOENT; | 643 | err = -ENOENT; |
643 | 644 | ||
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c index b6234b73c4cf..0faad5ce6dc4 100644 --- a/net/bluetooth/bnep/netdev.c +++ b/net/bluetooth/bnep/netdev.c | |||
@@ -26,6 +26,7 @@ | |||
26 | */ | 26 | */ |
27 | 27 | ||
28 | #include <linux/module.h> | 28 | #include <linux/module.h> |
29 | #include <linux/slab.h> | ||
29 | 30 | ||
30 | #include <linux/socket.h> | 31 | #include <linux/socket.h> |
31 | #include <linux/netdevice.h> | 32 | #include <linux/netdevice.h> |
@@ -87,7 +88,7 @@ static void bnep_net_set_mc_list(struct net_device *dev) | |||
87 | memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN); | 88 | memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN); |
88 | r->len = htons(ETH_ALEN * 2); | 89 | r->len = htons(ETH_ALEN * 2); |
89 | } else { | 90 | } else { |
90 | struct dev_mc_list *dmi = dev->mc_list; | 91 | struct netdev_hw_addr *ha; |
91 | int i, len = skb->len; | 92 | int i, len = skb->len; |
92 | 93 | ||
93 | if (dev->flags & IFF_BROADCAST) { | 94 | if (dev->flags & IFF_BROADCAST) { |
@@ -97,18 +98,18 @@ static void bnep_net_set_mc_list(struct net_device *dev) | |||
97 | 98 | ||
98 | /* FIXME: We should group addresses here. */ | 99 | /* FIXME: We should group addresses here. */ |
99 | 100 | ||
100 | for (i = 0; | 101 | i = 0; |
101 | i < netdev_mc_count(dev) && i < BNEP_MAX_MULTICAST_FILTERS; | 102 | netdev_for_each_mc_addr(ha, dev) { |
102 | i++) { | 103 | if (i == BNEP_MAX_MULTICAST_FILTERS) |
103 | memcpy(__skb_put(skb, ETH_ALEN), dmi->dmi_addr, ETH_ALEN); | 104 | break; |
104 | memcpy(__skb_put(skb, ETH_ALEN), dmi->dmi_addr, ETH_ALEN); | 105 | memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN); |
105 | dmi = dmi->next; | 106 | memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN); |
106 | } | 107 | } |
107 | r->len = htons(skb->len - len); | 108 | r->len = htons(skb->len - len); |
108 | } | 109 | } |
109 | 110 | ||
110 | skb_queue_tail(&sk->sk_write_queue, skb); | 111 | skb_queue_tail(&sk->sk_write_queue, skb); |
111 | wake_up_interruptible(sk->sk_sleep); | 112 | wake_up_interruptible(sk_sleep(sk)); |
112 | #endif | 113 | #endif |
113 | } | 114 | } |
114 | 115 | ||
@@ -192,11 +193,11 @@ static netdev_tx_t bnep_net_xmit(struct sk_buff *skb, | |||
192 | /* | 193 | /* |
193 | * We cannot send L2CAP packets from here as we are potentially in a bh. | 194 | * We cannot send L2CAP packets from here as we are potentially in a bh. |
194 | * So we have to queue them and wake up session thread which is sleeping | 195 | * So we have to queue them and wake up session thread which is sleeping |
195 | * on the sk->sk_sleep. | 196 | * on the sk_sleep(sk). |
196 | */ | 197 | */ |
197 | dev->trans_start = jiffies; | 198 | dev->trans_start = jiffies; |
198 | skb_queue_tail(&sk->sk_write_queue, skb); | 199 | skb_queue_tail(&sk->sk_write_queue, skb); |
199 | wake_up_interruptible(sk->sk_sleep); | 200 | wake_up_interruptible(sk_sleep(sk)); |
200 | 201 | ||
201 | if (skb_queue_len(&sk->sk_write_queue) >= BNEP_TX_QUEUE_LEN) { | 202 | if (skb_queue_len(&sk->sk_write_queue) >= BNEP_TX_QUEUE_LEN) { |
202 | BT_DBG("tx queue is full"); | 203 | BT_DBG("tx queue is full"); |
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c index 2ff6ac7b2ed4..2862f53b66b1 100644 --- a/net/bluetooth/bnep/sock.c +++ b/net/bluetooth/bnep/sock.c | |||
@@ -30,7 +30,6 @@ | |||
30 | #include <linux/capability.h> | 30 | #include <linux/capability.h> |
31 | #include <linux/errno.h> | 31 | #include <linux/errno.h> |
32 | #include <linux/kernel.h> | 32 | #include <linux/kernel.h> |
33 | #include <linux/slab.h> | ||
34 | #include <linux/poll.h> | 33 | #include <linux/poll.h> |
35 | #include <linux/fcntl.h> | 34 | #include <linux/fcntl.h> |
36 | #include <linux/skbuff.h> | 35 | #include <linux/skbuff.h> |
@@ -39,6 +38,7 @@ | |||
39 | #include <linux/file.h> | 38 | #include <linux/file.h> |
40 | #include <linux/init.h> | 39 | #include <linux/init.h> |
41 | #include <linux/compat.h> | 40 | #include <linux/compat.h> |
41 | #include <linux/gfp.h> | ||
42 | #include <net/sock.h> | 42 | #include <net/sock.h> |
43 | 43 | ||
44 | #include <asm/system.h> | 44 | #include <asm/system.h> |
diff --git a/net/bluetooth/cmtp/cmtp.h b/net/bluetooth/cmtp/cmtp.h index e4663aa14d26..785e79e953c5 100644 --- a/net/bluetooth/cmtp/cmtp.h +++ b/net/bluetooth/cmtp/cmtp.h | |||
@@ -125,7 +125,7 @@ static inline void cmtp_schedule(struct cmtp_session *session) | |||
125 | { | 125 | { |
126 | struct sock *sk = session->sock->sk; | 126 | struct sock *sk = session->sock->sk; |
127 | 127 | ||
128 | wake_up_interruptible(sk->sk_sleep); | 128 | wake_up_interruptible(sk_sleep(sk)); |
129 | } | 129 | } |
130 | 130 | ||
131 | /* CMTP init defines */ | 131 | /* CMTP init defines */ |
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c index 0073ec8495da..d4c6af082d48 100644 --- a/net/bluetooth/cmtp/core.c +++ b/net/bluetooth/cmtp/core.c | |||
@@ -284,7 +284,7 @@ static int cmtp_session(void *arg) | |||
284 | set_user_nice(current, -15); | 284 | set_user_nice(current, -15); |
285 | 285 | ||
286 | init_waitqueue_entry(&wait, current); | 286 | init_waitqueue_entry(&wait, current); |
287 | add_wait_queue(sk->sk_sleep, &wait); | 287 | add_wait_queue(sk_sleep(sk), &wait); |
288 | while (!atomic_read(&session->terminate)) { | 288 | while (!atomic_read(&session->terminate)) { |
289 | set_current_state(TASK_INTERRUPTIBLE); | 289 | set_current_state(TASK_INTERRUPTIBLE); |
290 | 290 | ||
@@ -301,7 +301,7 @@ static int cmtp_session(void *arg) | |||
301 | schedule(); | 301 | schedule(); |
302 | } | 302 | } |
303 | set_current_state(TASK_RUNNING); | 303 | set_current_state(TASK_RUNNING); |
304 | remove_wait_queue(sk->sk_sleep, &wait); | 304 | remove_wait_queue(sk_sleep(sk), &wait); |
305 | 305 | ||
306 | down_write(&cmtp_session_sem); | 306 | down_write(&cmtp_session_sem); |
307 | 307 | ||
diff --git a/net/bluetooth/cmtp/sock.c b/net/bluetooth/cmtp/sock.c index 978cc3a718ad..7ea1979a8e4f 100644 --- a/net/bluetooth/cmtp/sock.c +++ b/net/bluetooth/cmtp/sock.c | |||
@@ -26,7 +26,6 @@ | |||
26 | #include <linux/capability.h> | 26 | #include <linux/capability.h> |
27 | #include <linux/errno.h> | 27 | #include <linux/errno.h> |
28 | #include <linux/kernel.h> | 28 | #include <linux/kernel.h> |
29 | #include <linux/slab.h> | ||
30 | #include <linux/poll.h> | 29 | #include <linux/poll.h> |
31 | #include <linux/fcntl.h> | 30 | #include <linux/fcntl.h> |
32 | #include <linux/skbuff.h> | 31 | #include <linux/skbuff.h> |
@@ -34,6 +33,7 @@ | |||
34 | #include <linux/ioctl.h> | 33 | #include <linux/ioctl.h> |
35 | #include <linux/file.h> | 34 | #include <linux/file.h> |
36 | #include <linux/compat.h> | 35 | #include <linux/compat.h> |
36 | #include <linux/gfp.h> | ||
37 | #include <net/sock.h> | 37 | #include <net/sock.h> |
38 | 38 | ||
39 | #include <linux/isdn/capilli.h> | 39 | #include <linux/isdn/capilli.h> |
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c index cafb55b0cea5..0e8e1a59856c 100644 --- a/net/bluetooth/hci_sysfs.c +++ b/net/bluetooth/hci_sysfs.c | |||
@@ -1,6 +1,7 @@ | |||
1 | /* Bluetooth HCI driver model support. */ | 1 | /* Bluetooth HCI driver model support. */ |
2 | 2 | ||
3 | #include <linux/kernel.h> | 3 | #include <linux/kernel.h> |
4 | #include <linux/slab.h> | ||
4 | #include <linux/init.h> | 5 | #include <linux/init.h> |
5 | #include <linux/debugfs.h> | 6 | #include <linux/debugfs.h> |
6 | #include <linux/seq_file.h> | 7 | #include <linux/seq_file.h> |
@@ -8,8 +9,7 @@ | |||
8 | #include <net/bluetooth/bluetooth.h> | 9 | #include <net/bluetooth/bluetooth.h> |
9 | #include <net/bluetooth/hci_core.h> | 10 | #include <net/bluetooth/hci_core.h> |
10 | 11 | ||
11 | struct class *bt_class = NULL; | 12 | static struct class *bt_class; |
12 | EXPORT_SYMBOL_GPL(bt_class); | ||
13 | 13 | ||
14 | struct dentry *bt_debugfs = NULL; | 14 | struct dentry *bt_debugfs = NULL; |
15 | EXPORT_SYMBOL_GPL(bt_debugfs); | 15 | EXPORT_SYMBOL_GPL(bt_debugfs); |
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index 280529ad9274..bfe641b7dfaf 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c | |||
@@ -561,8 +561,8 @@ static int hidp_session(void *arg) | |||
561 | 561 | ||
562 | init_waitqueue_entry(&ctrl_wait, current); | 562 | init_waitqueue_entry(&ctrl_wait, current); |
563 | init_waitqueue_entry(&intr_wait, current); | 563 | init_waitqueue_entry(&intr_wait, current); |
564 | add_wait_queue(ctrl_sk->sk_sleep, &ctrl_wait); | 564 | add_wait_queue(sk_sleep(ctrl_sk), &ctrl_wait); |
565 | add_wait_queue(intr_sk->sk_sleep, &intr_wait); | 565 | add_wait_queue(sk_sleep(intr_sk), &intr_wait); |
566 | while (!atomic_read(&session->terminate)) { | 566 | while (!atomic_read(&session->terminate)) { |
567 | set_current_state(TASK_INTERRUPTIBLE); | 567 | set_current_state(TASK_INTERRUPTIBLE); |
568 | 568 | ||
@@ -584,8 +584,8 @@ static int hidp_session(void *arg) | |||
584 | schedule(); | 584 | schedule(); |
585 | } | 585 | } |
586 | set_current_state(TASK_RUNNING); | 586 | set_current_state(TASK_RUNNING); |
587 | remove_wait_queue(intr_sk->sk_sleep, &intr_wait); | 587 | remove_wait_queue(sk_sleep(intr_sk), &intr_wait); |
588 | remove_wait_queue(ctrl_sk->sk_sleep, &ctrl_wait); | 588 | remove_wait_queue(sk_sleep(ctrl_sk), &ctrl_wait); |
589 | 589 | ||
590 | down_write(&hidp_session_sem); | 590 | down_write(&hidp_session_sem); |
591 | 591 | ||
@@ -609,7 +609,7 @@ static int hidp_session(void *arg) | |||
609 | 609 | ||
610 | fput(session->intr_sock->file); | 610 | fput(session->intr_sock->file); |
611 | 611 | ||
612 | wait_event_timeout(*(ctrl_sk->sk_sleep), | 612 | wait_event_timeout(*(sk_sleep(ctrl_sk)), |
613 | (ctrl_sk->sk_state == BT_CLOSED), msecs_to_jiffies(500)); | 613 | (ctrl_sk->sk_state == BT_CLOSED), msecs_to_jiffies(500)); |
614 | 614 | ||
615 | fput(session->ctrl_sock->file); | 615 | fput(session->ctrl_sock->file); |
diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h index a4e215d50c10..8d934a19da0a 100644 --- a/net/bluetooth/hidp/hidp.h +++ b/net/bluetooth/hidp/hidp.h | |||
@@ -164,8 +164,8 @@ static inline void hidp_schedule(struct hidp_session *session) | |||
164 | struct sock *ctrl_sk = session->ctrl_sock->sk; | 164 | struct sock *ctrl_sk = session->ctrl_sock->sk; |
165 | struct sock *intr_sk = session->intr_sock->sk; | 165 | struct sock *intr_sk = session->intr_sock->sk; |
166 | 166 | ||
167 | wake_up_interruptible(ctrl_sk->sk_sleep); | 167 | wake_up_interruptible(sk_sleep(ctrl_sk)); |
168 | wake_up_interruptible(intr_sk->sk_sleep); | 168 | wake_up_interruptible(sk_sleep(intr_sk)); |
169 | } | 169 | } |
170 | 170 | ||
171 | /* HIDP init defines */ | 171 | /* HIDP init defines */ |
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c index 9cfef68b9fec..250dfd46237d 100644 --- a/net/bluetooth/hidp/sock.c +++ b/net/bluetooth/hidp/sock.c | |||
@@ -26,7 +26,6 @@ | |||
26 | #include <linux/capability.h> | 26 | #include <linux/capability.h> |
27 | #include <linux/errno.h> | 27 | #include <linux/errno.h> |
28 | #include <linux/kernel.h> | 28 | #include <linux/kernel.h> |
29 | #include <linux/slab.h> | ||
30 | #include <linux/poll.h> | 29 | #include <linux/poll.h> |
31 | #include <linux/fcntl.h> | 30 | #include <linux/fcntl.h> |
32 | #include <linux/skbuff.h> | 31 | #include <linux/skbuff.h> |
@@ -35,6 +34,7 @@ | |||
35 | #include <linux/file.h> | 34 | #include <linux/file.h> |
36 | #include <linux/init.h> | 35 | #include <linux/init.h> |
37 | #include <linux/compat.h> | 36 | #include <linux/compat.h> |
37 | #include <linux/gfp.h> | ||
38 | #include <net/sock.h> | 38 | #include <net/sock.h> |
39 | 39 | ||
40 | #include "hidp.h" | 40 | #include "hidp.h" |
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c index 400efa26ddba..864c76f4a678 100644 --- a/net/bluetooth/l2cap.c +++ b/net/bluetooth/l2cap.c | |||
@@ -40,6 +40,8 @@ | |||
40 | #include <linux/skbuff.h> | 40 | #include <linux/skbuff.h> |
41 | #include <linux/list.h> | 41 | #include <linux/list.h> |
42 | #include <linux/device.h> | 42 | #include <linux/device.h> |
43 | #include <linux/debugfs.h> | ||
44 | #include <linux/seq_file.h> | ||
43 | #include <linux/uaccess.h> | 45 | #include <linux/uaccess.h> |
44 | #include <linux/crc16.h> | 46 | #include <linux/crc16.h> |
45 | #include <net/sock.h> | 47 | #include <net/sock.h> |
@@ -1000,7 +1002,8 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al | |||
1000 | 1002 | ||
1001 | BT_DBG("sk %p", sk); | 1003 | BT_DBG("sk %p", sk); |
1002 | 1004 | ||
1003 | if (!addr || addr->sa_family != AF_BLUETOOTH) | 1005 | if (!addr || alen < sizeof(addr->sa_family) || |
1006 | addr->sa_family != AF_BLUETOOTH) | ||
1004 | return -EINVAL; | 1007 | return -EINVAL; |
1005 | 1008 | ||
1006 | memset(&la, 0, sizeof(la)); | 1009 | memset(&la, 0, sizeof(la)); |
@@ -1144,7 +1147,7 @@ static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int fl | |||
1144 | BT_DBG("sk %p timeo %ld", sk, timeo); | 1147 | BT_DBG("sk %p timeo %ld", sk, timeo); |
1145 | 1148 | ||
1146 | /* Wait for an incoming connection. (wake-one). */ | 1149 | /* Wait for an incoming connection. (wake-one). */ |
1147 | add_wait_queue_exclusive(sk->sk_sleep, &wait); | 1150 | add_wait_queue_exclusive(sk_sleep(sk), &wait); |
1148 | while (!(nsk = bt_accept_dequeue(sk, newsock))) { | 1151 | while (!(nsk = bt_accept_dequeue(sk, newsock))) { |
1149 | set_current_state(TASK_INTERRUPTIBLE); | 1152 | set_current_state(TASK_INTERRUPTIBLE); |
1150 | if (!timeo) { | 1153 | if (!timeo) { |
@@ -1167,7 +1170,7 @@ static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int fl | |||
1167 | } | 1170 | } |
1168 | } | 1171 | } |
1169 | set_current_state(TASK_RUNNING); | 1172 | set_current_state(TASK_RUNNING); |
1170 | remove_wait_queue(sk->sk_sleep, &wait); | 1173 | remove_wait_queue(sk_sleep(sk), &wait); |
1171 | 1174 | ||
1172 | if (err) | 1175 | if (err) |
1173 | goto done; | 1176 | goto done; |
@@ -1623,7 +1626,10 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms | |||
1623 | /* Connectionless channel */ | 1626 | /* Connectionless channel */ |
1624 | if (sk->sk_type == SOCK_DGRAM) { | 1627 | if (sk->sk_type == SOCK_DGRAM) { |
1625 | skb = l2cap_create_connless_pdu(sk, msg, len); | 1628 | skb = l2cap_create_connless_pdu(sk, msg, len); |
1626 | err = l2cap_do_send(sk, skb); | 1629 | if (IS_ERR(skb)) |
1630 | err = PTR_ERR(skb); | ||
1631 | else | ||
1632 | err = l2cap_do_send(sk, skb); | ||
1627 | goto done; | 1633 | goto done; |
1628 | } | 1634 | } |
1629 | 1635 | ||
@@ -2830,6 +2836,11 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr | |||
2830 | int len = cmd->len - sizeof(*rsp); | 2836 | int len = cmd->len - sizeof(*rsp); |
2831 | char req[64]; | 2837 | char req[64]; |
2832 | 2838 | ||
2839 | if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) { | ||
2840 | l2cap_send_disconn_req(conn, sk); | ||
2841 | goto done; | ||
2842 | } | ||
2843 | |||
2833 | /* throw out any old stored conf requests */ | 2844 | /* throw out any old stored conf requests */ |
2834 | result = L2CAP_CONF_SUCCESS; | 2845 | result = L2CAP_CONF_SUCCESS; |
2835 | len = l2cap_parse_conf_rsp(sk, rsp->data, | 2846 | len = l2cap_parse_conf_rsp(sk, rsp->data, |
@@ -3937,29 +3948,42 @@ drop: | |||
3937 | return 0; | 3948 | return 0; |
3938 | } | 3949 | } |
3939 | 3950 | ||
3940 | static ssize_t l2cap_sysfs_show(struct class *dev, char *buf) | 3951 | static int l2cap_debugfs_show(struct seq_file *f, void *p) |
3941 | { | 3952 | { |
3942 | struct sock *sk; | 3953 | struct sock *sk; |
3943 | struct hlist_node *node; | 3954 | struct hlist_node *node; |
3944 | char *str = buf; | ||
3945 | 3955 | ||
3946 | read_lock_bh(&l2cap_sk_list.lock); | 3956 | read_lock_bh(&l2cap_sk_list.lock); |
3947 | 3957 | ||
3948 | sk_for_each(sk, node, &l2cap_sk_list.head) { | 3958 | sk_for_each(sk, node, &l2cap_sk_list.head) { |
3949 | struct l2cap_pinfo *pi = l2cap_pi(sk); | 3959 | struct l2cap_pinfo *pi = l2cap_pi(sk); |
3950 | 3960 | ||
3951 | str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n", | 3961 | seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n", |
3952 | batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), | 3962 | batostr(&bt_sk(sk)->src), |
3953 | sk->sk_state, __le16_to_cpu(pi->psm), pi->scid, | 3963 | batostr(&bt_sk(sk)->dst), |
3954 | pi->dcid, pi->imtu, pi->omtu, pi->sec_level); | 3964 | sk->sk_state, __le16_to_cpu(pi->psm), |
3965 | pi->scid, pi->dcid, | ||
3966 | pi->imtu, pi->omtu, pi->sec_level); | ||
3955 | } | 3967 | } |
3956 | 3968 | ||
3957 | read_unlock_bh(&l2cap_sk_list.lock); | 3969 | read_unlock_bh(&l2cap_sk_list.lock); |
3958 | 3970 | ||
3959 | return str - buf; | 3971 | return 0; |
3960 | } | 3972 | } |
3961 | 3973 | ||
3962 | static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL); | 3974 | static int l2cap_debugfs_open(struct inode *inode, struct file *file) |
3975 | { | ||
3976 | return single_open(file, l2cap_debugfs_show, inode->i_private); | ||
3977 | } | ||
3978 | |||
3979 | static const struct file_operations l2cap_debugfs_fops = { | ||
3980 | .open = l2cap_debugfs_open, | ||
3981 | .read = seq_read, | ||
3982 | .llseek = seq_lseek, | ||
3983 | .release = single_release, | ||
3984 | }; | ||
3985 | |||
3986 | static struct dentry *l2cap_debugfs; | ||
3963 | 3987 | ||
3964 | static const struct proto_ops l2cap_sock_ops = { | 3988 | static const struct proto_ops l2cap_sock_ops = { |
3965 | .family = PF_BLUETOOTH, | 3989 | .family = PF_BLUETOOTH, |
@@ -4019,8 +4043,12 @@ static int __init l2cap_init(void) | |||
4019 | goto error; | 4043 | goto error; |
4020 | } | 4044 | } |
4021 | 4045 | ||
4022 | if (class_create_file(bt_class, &class_attr_l2cap) < 0) | 4046 | if (bt_debugfs) { |
4023 | BT_ERR("Failed to create L2CAP info file"); | 4047 | l2cap_debugfs = debugfs_create_file("l2cap", 0444, |
4048 | bt_debugfs, NULL, &l2cap_debugfs_fops); | ||
4049 | if (!l2cap_debugfs) | ||
4050 | BT_ERR("Failed to create L2CAP debug file"); | ||
4051 | } | ||
4024 | 4052 | ||
4025 | BT_INFO("L2CAP ver %s", VERSION); | 4053 | BT_INFO("L2CAP ver %s", VERSION); |
4026 | BT_INFO("L2CAP socket layer initialized"); | 4054 | BT_INFO("L2CAP socket layer initialized"); |
@@ -4034,7 +4062,7 @@ error: | |||
4034 | 4062 | ||
4035 | static void __exit l2cap_exit(void) | 4063 | static void __exit l2cap_exit(void) |
4036 | { | 4064 | { |
4037 | class_remove_file(bt_class, &class_attr_l2cap); | 4065 | debugfs_remove(l2cap_debugfs); |
4038 | 4066 | ||
4039 | if (bt_sock_unregister(BTPROTO_L2CAP) < 0) | 4067 | if (bt_sock_unregister(BTPROTO_L2CAP) < 0) |
4040 | BT_ERR("L2CAP socket unregistration failed"); | 4068 | BT_ERR("L2CAP socket unregistration failed"); |
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index 89f4a59eb82b..7dca91bb8c57 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c | |||
@@ -33,9 +33,12 @@ | |||
33 | #include <linux/init.h> | 33 | #include <linux/init.h> |
34 | #include <linux/wait.h> | 34 | #include <linux/wait.h> |
35 | #include <linux/device.h> | 35 | #include <linux/device.h> |
36 | #include <linux/debugfs.h> | ||
37 | #include <linux/seq_file.h> | ||
36 | #include <linux/net.h> | 38 | #include <linux/net.h> |
37 | #include <linux/mutex.h> | 39 | #include <linux/mutex.h> |
38 | #include <linux/kthread.h> | 40 | #include <linux/kthread.h> |
41 | #include <linux/slab.h> | ||
39 | 42 | ||
40 | #include <net/sock.h> | 43 | #include <net/sock.h> |
41 | #include <asm/uaccess.h> | 44 | #include <asm/uaccess.h> |
@@ -2098,11 +2101,10 @@ static struct hci_cb rfcomm_cb = { | |||
2098 | .security_cfm = rfcomm_security_cfm | 2101 | .security_cfm = rfcomm_security_cfm |
2099 | }; | 2102 | }; |
2100 | 2103 | ||
2101 | static ssize_t rfcomm_dlc_sysfs_show(struct class *dev, char *buf) | 2104 | static int rfcomm_dlc_debugfs_show(struct seq_file *f, void *x) |
2102 | { | 2105 | { |
2103 | struct rfcomm_session *s; | 2106 | struct rfcomm_session *s; |
2104 | struct list_head *pp, *p; | 2107 | struct list_head *pp, *p; |
2105 | char *str = buf; | ||
2106 | 2108 | ||
2107 | rfcomm_lock(); | 2109 | rfcomm_lock(); |
2108 | 2110 | ||
@@ -2112,18 +2114,32 @@ static ssize_t rfcomm_dlc_sysfs_show(struct class *dev, char *buf) | |||
2112 | struct sock *sk = s->sock->sk; | 2114 | struct sock *sk = s->sock->sk; |
2113 | struct rfcomm_dlc *d = list_entry(pp, struct rfcomm_dlc, list); | 2115 | struct rfcomm_dlc *d = list_entry(pp, struct rfcomm_dlc, list); |
2114 | 2116 | ||
2115 | str += sprintf(str, "%s %s %ld %d %d %d %d\n", | 2117 | seq_printf(f, "%s %s %ld %d %d %d %d\n", |
2116 | batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), | 2118 | batostr(&bt_sk(sk)->src), |
2117 | d->state, d->dlci, d->mtu, d->rx_credits, d->tx_credits); | 2119 | batostr(&bt_sk(sk)->dst), |
2120 | d->state, d->dlci, d->mtu, | ||
2121 | d->rx_credits, d->tx_credits); | ||
2118 | } | 2122 | } |
2119 | } | 2123 | } |
2120 | 2124 | ||
2121 | rfcomm_unlock(); | 2125 | rfcomm_unlock(); |
2122 | 2126 | ||
2123 | return (str - buf); | 2127 | return 0; |
2124 | } | 2128 | } |
2125 | 2129 | ||
2126 | static CLASS_ATTR(rfcomm_dlc, S_IRUGO, rfcomm_dlc_sysfs_show, NULL); | 2130 | static int rfcomm_dlc_debugfs_open(struct inode *inode, struct file *file) |
2131 | { | ||
2132 | return single_open(file, rfcomm_dlc_debugfs_show, inode->i_private); | ||
2133 | } | ||
2134 | |||
2135 | static const struct file_operations rfcomm_dlc_debugfs_fops = { | ||
2136 | .open = rfcomm_dlc_debugfs_open, | ||
2137 | .read = seq_read, | ||
2138 | .llseek = seq_lseek, | ||
2139 | .release = single_release, | ||
2140 | }; | ||
2141 | |||
2142 | static struct dentry *rfcomm_dlc_debugfs; | ||
2127 | 2143 | ||
2128 | /* ---- Initialization ---- */ | 2144 | /* ---- Initialization ---- */ |
2129 | static int __init rfcomm_init(void) | 2145 | static int __init rfcomm_init(void) |
@@ -2140,8 +2156,12 @@ static int __init rfcomm_init(void) | |||
2140 | goto unregister; | 2156 | goto unregister; |
2141 | } | 2157 | } |
2142 | 2158 | ||
2143 | if (class_create_file(bt_class, &class_attr_rfcomm_dlc) < 0) | 2159 | if (bt_debugfs) { |
2144 | BT_ERR("Failed to create RFCOMM info file"); | 2160 | rfcomm_dlc_debugfs = debugfs_create_file("rfcomm_dlc", 0444, |
2161 | bt_debugfs, NULL, &rfcomm_dlc_debugfs_fops); | ||
2162 | if (!rfcomm_dlc_debugfs) | ||
2163 | BT_ERR("Failed to create RFCOMM debug file"); | ||
2164 | } | ||
2145 | 2165 | ||
2146 | err = rfcomm_init_ttys(); | 2166 | err = rfcomm_init_ttys(); |
2147 | if (err < 0) | 2167 | if (err < 0) |
@@ -2169,7 +2189,7 @@ unregister: | |||
2169 | 2189 | ||
2170 | static void __exit rfcomm_exit(void) | 2190 | static void __exit rfcomm_exit(void) |
2171 | { | 2191 | { |
2172 | class_remove_file(bt_class, &class_attr_rfcomm_dlc); | 2192 | debugfs_remove(rfcomm_dlc_debugfs); |
2173 | 2193 | ||
2174 | hci_unregister_cb(&rfcomm_cb); | 2194 | hci_unregister_cb(&rfcomm_cb); |
2175 | 2195 | ||
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index 4b5968dda673..43fbf6b4b4bf 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c | |||
@@ -40,6 +40,8 @@ | |||
40 | #include <linux/skbuff.h> | 40 | #include <linux/skbuff.h> |
41 | #include <linux/list.h> | 41 | #include <linux/list.h> |
42 | #include <linux/device.h> | 42 | #include <linux/device.h> |
43 | #include <linux/debugfs.h> | ||
44 | #include <linux/seq_file.h> | ||
43 | #include <net/sock.h> | 45 | #include <net/sock.h> |
44 | 46 | ||
45 | #include <asm/system.h> | 47 | #include <asm/system.h> |
@@ -395,7 +397,8 @@ static int rfcomm_sock_connect(struct socket *sock, struct sockaddr *addr, int a | |||
395 | 397 | ||
396 | BT_DBG("sk %p", sk); | 398 | BT_DBG("sk %p", sk); |
397 | 399 | ||
398 | if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_rc)) | 400 | if (alen < sizeof(struct sockaddr_rc) || |
401 | addr->sa_family != AF_BLUETOOTH) | ||
399 | return -EINVAL; | 402 | return -EINVAL; |
400 | 403 | ||
401 | lock_sock(sk); | 404 | lock_sock(sk); |
@@ -500,7 +503,7 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f | |||
500 | BT_DBG("sk %p timeo %ld", sk, timeo); | 503 | BT_DBG("sk %p timeo %ld", sk, timeo); |
501 | 504 | ||
502 | /* Wait for an incoming connection. (wake-one). */ | 505 | /* Wait for an incoming connection. (wake-one). */ |
503 | add_wait_queue_exclusive(sk->sk_sleep, &wait); | 506 | add_wait_queue_exclusive(sk_sleep(sk), &wait); |
504 | while (!(nsk = bt_accept_dequeue(sk, newsock))) { | 507 | while (!(nsk = bt_accept_dequeue(sk, newsock))) { |
505 | set_current_state(TASK_INTERRUPTIBLE); | 508 | set_current_state(TASK_INTERRUPTIBLE); |
506 | if (!timeo) { | 509 | if (!timeo) { |
@@ -523,7 +526,7 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f | |||
523 | } | 526 | } |
524 | } | 527 | } |
525 | set_current_state(TASK_RUNNING); | 528 | set_current_state(TASK_RUNNING); |
526 | remove_wait_queue(sk->sk_sleep, &wait); | 529 | remove_wait_queue(sk_sleep(sk), &wait); |
527 | 530 | ||
528 | if (err) | 531 | if (err) |
529 | goto done; | 532 | goto done; |
@@ -618,7 +621,7 @@ static long rfcomm_sock_data_wait(struct sock *sk, long timeo) | |||
618 | { | 621 | { |
619 | DECLARE_WAITQUEUE(wait, current); | 622 | DECLARE_WAITQUEUE(wait, current); |
620 | 623 | ||
621 | add_wait_queue(sk->sk_sleep, &wait); | 624 | add_wait_queue(sk_sleep(sk), &wait); |
622 | for (;;) { | 625 | for (;;) { |
623 | set_current_state(TASK_INTERRUPTIBLE); | 626 | set_current_state(TASK_INTERRUPTIBLE); |
624 | 627 | ||
@@ -637,7 +640,7 @@ static long rfcomm_sock_data_wait(struct sock *sk, long timeo) | |||
637 | } | 640 | } |
638 | 641 | ||
639 | __set_current_state(TASK_RUNNING); | 642 | __set_current_state(TASK_RUNNING); |
640 | remove_wait_queue(sk->sk_sleep, &wait); | 643 | remove_wait_queue(sk_sleep(sk), &wait); |
641 | return timeo; | 644 | return timeo; |
642 | } | 645 | } |
643 | 646 | ||
@@ -1061,26 +1064,38 @@ done: | |||
1061 | return result; | 1064 | return result; |
1062 | } | 1065 | } |
1063 | 1066 | ||
1064 | static ssize_t rfcomm_sock_sysfs_show(struct class *dev, char *buf) | 1067 | static int rfcomm_sock_debugfs_show(struct seq_file *f, void *p) |
1065 | { | 1068 | { |
1066 | struct sock *sk; | 1069 | struct sock *sk; |
1067 | struct hlist_node *node; | 1070 | struct hlist_node *node; |
1068 | char *str = buf; | ||
1069 | 1071 | ||
1070 | read_lock_bh(&rfcomm_sk_list.lock); | 1072 | read_lock_bh(&rfcomm_sk_list.lock); |
1071 | 1073 | ||
1072 | sk_for_each(sk, node, &rfcomm_sk_list.head) { | 1074 | sk_for_each(sk, node, &rfcomm_sk_list.head) { |
1073 | str += sprintf(str, "%s %s %d %d\n", | 1075 | seq_printf(f, "%s %s %d %d\n", |
1074 | batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), | 1076 | batostr(&bt_sk(sk)->src), |
1077 | batostr(&bt_sk(sk)->dst), | ||
1075 | sk->sk_state, rfcomm_pi(sk)->channel); | 1078 | sk->sk_state, rfcomm_pi(sk)->channel); |
1076 | } | 1079 | } |
1077 | 1080 | ||
1078 | read_unlock_bh(&rfcomm_sk_list.lock); | 1081 | read_unlock_bh(&rfcomm_sk_list.lock); |
1079 | 1082 | ||
1080 | return (str - buf); | 1083 | return 0; |
1081 | } | 1084 | } |
1082 | 1085 | ||
1083 | static CLASS_ATTR(rfcomm, S_IRUGO, rfcomm_sock_sysfs_show, NULL); | 1086 | static int rfcomm_sock_debugfs_open(struct inode *inode, struct file *file) |
1087 | { | ||
1088 | return single_open(file, rfcomm_sock_debugfs_show, inode->i_private); | ||
1089 | } | ||
1090 | |||
1091 | static const struct file_operations rfcomm_sock_debugfs_fops = { | ||
1092 | .open = rfcomm_sock_debugfs_open, | ||
1093 | .read = seq_read, | ||
1094 | .llseek = seq_lseek, | ||
1095 | .release = single_release, | ||
1096 | }; | ||
1097 | |||
1098 | static struct dentry *rfcomm_sock_debugfs; | ||
1084 | 1099 | ||
1085 | static const struct proto_ops rfcomm_sock_ops = { | 1100 | static const struct proto_ops rfcomm_sock_ops = { |
1086 | .family = PF_BLUETOOTH, | 1101 | .family = PF_BLUETOOTH, |
@@ -1120,8 +1135,12 @@ int __init rfcomm_init_sockets(void) | |||
1120 | if (err < 0) | 1135 | if (err < 0) |
1121 | goto error; | 1136 | goto error; |
1122 | 1137 | ||
1123 | if (class_create_file(bt_class, &class_attr_rfcomm) < 0) | 1138 | if (bt_debugfs) { |
1124 | BT_ERR("Failed to create RFCOMM info file"); | 1139 | rfcomm_sock_debugfs = debugfs_create_file("rfcomm", 0444, |
1140 | bt_debugfs, NULL, &rfcomm_sock_debugfs_fops); | ||
1141 | if (!rfcomm_sock_debugfs) | ||
1142 | BT_ERR("Failed to create RFCOMM debug file"); | ||
1143 | } | ||
1125 | 1144 | ||
1126 | BT_INFO("RFCOMM socket layer initialized"); | 1145 | BT_INFO("RFCOMM socket layer initialized"); |
1127 | 1146 | ||
@@ -1135,7 +1154,7 @@ error: | |||
1135 | 1154 | ||
1136 | void rfcomm_cleanup_sockets(void) | 1155 | void rfcomm_cleanup_sockets(void) |
1137 | { | 1156 | { |
1138 | class_remove_file(bt_class, &class_attr_rfcomm); | 1157 | debugfs_remove(rfcomm_sock_debugfs); |
1139 | 1158 | ||
1140 | if (bt_sock_unregister(BTPROTO_RFCOMM) < 0) | 1159 | if (bt_sock_unregister(BTPROTO_RFCOMM) < 0) |
1141 | BT_ERR("RFCOMM socket layer unregistration failed"); | 1160 | BT_ERR("RFCOMM socket layer unregistration failed"); |
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index dd8f6ec57dce..b406d3eff53a 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c | |||
@@ -38,6 +38,8 @@ | |||
38 | #include <linux/socket.h> | 38 | #include <linux/socket.h> |
39 | #include <linux/skbuff.h> | 39 | #include <linux/skbuff.h> |
40 | #include <linux/device.h> | 40 | #include <linux/device.h> |
41 | #include <linux/debugfs.h> | ||
42 | #include <linux/seq_file.h> | ||
41 | #include <linux/list.h> | 43 | #include <linux/list.h> |
42 | #include <net/sock.h> | 44 | #include <net/sock.h> |
43 | 45 | ||
@@ -497,7 +499,8 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen | |||
497 | 499 | ||
498 | BT_DBG("sk %p", sk); | 500 | BT_DBG("sk %p", sk); |
499 | 501 | ||
500 | if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_sco)) | 502 | if (alen < sizeof(struct sockaddr_sco) || |
503 | addr->sa_family != AF_BLUETOOTH) | ||
501 | return -EINVAL; | 504 | return -EINVAL; |
502 | 505 | ||
503 | if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) | 506 | if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) |
@@ -564,7 +567,7 @@ static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flag | |||
564 | BT_DBG("sk %p timeo %ld", sk, timeo); | 567 | BT_DBG("sk %p timeo %ld", sk, timeo); |
565 | 568 | ||
566 | /* Wait for an incoming connection. (wake-one). */ | 569 | /* Wait for an incoming connection. (wake-one). */ |
567 | add_wait_queue_exclusive(sk->sk_sleep, &wait); | 570 | add_wait_queue_exclusive(sk_sleep(sk), &wait); |
568 | while (!(ch = bt_accept_dequeue(sk, newsock))) { | 571 | while (!(ch = bt_accept_dequeue(sk, newsock))) { |
569 | set_current_state(TASK_INTERRUPTIBLE); | 572 | set_current_state(TASK_INTERRUPTIBLE); |
570 | if (!timeo) { | 573 | if (!timeo) { |
@@ -587,7 +590,7 @@ static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flag | |||
587 | } | 590 | } |
588 | } | 591 | } |
589 | set_current_state(TASK_RUNNING); | 592 | set_current_state(TASK_RUNNING); |
590 | remove_wait_queue(sk->sk_sleep, &wait); | 593 | remove_wait_queue(sk_sleep(sk), &wait); |
591 | 594 | ||
592 | if (err) | 595 | if (err) |
593 | goto done; | 596 | goto done; |
@@ -953,26 +956,36 @@ drop: | |||
953 | return 0; | 956 | return 0; |
954 | } | 957 | } |
955 | 958 | ||
956 | static ssize_t sco_sysfs_show(struct class *dev, char *buf) | 959 | static int sco_debugfs_show(struct seq_file *f, void *p) |
957 | { | 960 | { |
958 | struct sock *sk; | 961 | struct sock *sk; |
959 | struct hlist_node *node; | 962 | struct hlist_node *node; |
960 | char *str = buf; | ||
961 | 963 | ||
962 | read_lock_bh(&sco_sk_list.lock); | 964 | read_lock_bh(&sco_sk_list.lock); |
963 | 965 | ||
964 | sk_for_each(sk, node, &sco_sk_list.head) { | 966 | sk_for_each(sk, node, &sco_sk_list.head) { |
965 | str += sprintf(str, "%s %s %d\n", | 967 | seq_printf(f, "%s %s %d\n", batostr(&bt_sk(sk)->src), |
966 | batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), | 968 | batostr(&bt_sk(sk)->dst), sk->sk_state); |
967 | sk->sk_state); | ||
968 | } | 969 | } |
969 | 970 | ||
970 | read_unlock_bh(&sco_sk_list.lock); | 971 | read_unlock_bh(&sco_sk_list.lock); |
971 | 972 | ||
972 | return (str - buf); | 973 | return 0; |
973 | } | 974 | } |
974 | 975 | ||
975 | static CLASS_ATTR(sco, S_IRUGO, sco_sysfs_show, NULL); | 976 | static int sco_debugfs_open(struct inode *inode, struct file *file) |
977 | { | ||
978 | return single_open(file, sco_debugfs_show, inode->i_private); | ||
979 | } | ||
980 | |||
981 | static const struct file_operations sco_debugfs_fops = { | ||
982 | .open = sco_debugfs_open, | ||
983 | .read = seq_read, | ||
984 | .llseek = seq_lseek, | ||
985 | .release = single_release, | ||
986 | }; | ||
987 | |||
988 | static struct dentry *sco_debugfs; | ||
976 | 989 | ||
977 | static const struct proto_ops sco_sock_ops = { | 990 | static const struct proto_ops sco_sock_ops = { |
978 | .family = PF_BLUETOOTH, | 991 | .family = PF_BLUETOOTH, |
@@ -1030,8 +1043,12 @@ static int __init sco_init(void) | |||
1030 | goto error; | 1043 | goto error; |
1031 | } | 1044 | } |
1032 | 1045 | ||
1033 | if (class_create_file(bt_class, &class_attr_sco) < 0) | 1046 | if (bt_debugfs) { |
1034 | BT_ERR("Failed to create SCO info file"); | 1047 | sco_debugfs = debugfs_create_file("sco", 0444, |
1048 | bt_debugfs, NULL, &sco_debugfs_fops); | ||
1049 | if (!sco_debugfs) | ||
1050 | BT_ERR("Failed to create SCO debug file"); | ||
1051 | } | ||
1035 | 1052 | ||
1036 | BT_INFO("SCO (Voice Link) ver %s", VERSION); | 1053 | BT_INFO("SCO (Voice Link) ver %s", VERSION); |
1037 | BT_INFO("SCO socket layer initialized"); | 1054 | BT_INFO("SCO socket layer initialized"); |
@@ -1045,7 +1062,7 @@ error: | |||
1045 | 1062 | ||
1046 | static void __exit sco_exit(void) | 1063 | static void __exit sco_exit(void) |
1047 | { | 1064 | { |
1048 | class_remove_file(bt_class, &class_attr_sco); | 1065 | debugfs_remove(sco_debugfs); |
1049 | 1066 | ||
1050 | if (bt_sock_unregister(BTPROTO_SCO) < 0) | 1067 | if (bt_sock_unregister(BTPROTO_SCO) < 0) |
1051 | BT_ERR("SCO socket unregistration failed"); | 1068 | BT_ERR("SCO socket unregistration failed"); |
diff --git a/net/bridge/Kconfig b/net/bridge/Kconfig index d115d5cea5b6..9190ae462cb4 100644 --- a/net/bridge/Kconfig +++ b/net/bridge/Kconfig | |||
@@ -33,14 +33,14 @@ config BRIDGE | |||
33 | If unsure, say N. | 33 | If unsure, say N. |
34 | 34 | ||
35 | config BRIDGE_IGMP_SNOOPING | 35 | config BRIDGE_IGMP_SNOOPING |
36 | bool "IGMP snooping" | 36 | bool "IGMP/MLD snooping" |
37 | depends on BRIDGE | 37 | depends on BRIDGE |
38 | depends on INET | 38 | depends on INET |
39 | default y | 39 | default y |
40 | ---help--- | 40 | ---help--- |
41 | If you say Y here, then the Ethernet bridge will be able selectively | 41 | If you say Y here, then the Ethernet bridge will be able selectively |
42 | forward multicast traffic based on IGMP traffic received from each | 42 | forward multicast traffic based on IGMP/MLD traffic received from |
43 | port. | 43 | each port. |
44 | 44 | ||
45 | Say N to exclude this support and reduce the binary size. | 45 | Say N to exclude this support and reduce the binary size. |
46 | 46 | ||
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index eb7062d2e9e5..82599405dc15 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c | |||
@@ -26,21 +26,22 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) | |||
26 | const unsigned char *dest = skb->data; | 26 | const unsigned char *dest = skb->data; |
27 | struct net_bridge_fdb_entry *dst; | 27 | struct net_bridge_fdb_entry *dst; |
28 | struct net_bridge_mdb_entry *mdst; | 28 | struct net_bridge_mdb_entry *mdst; |
29 | struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats); | ||
29 | 30 | ||
30 | BR_INPUT_SKB_CB(skb)->brdev = dev; | 31 | brstats->tx_packets++; |
32 | brstats->tx_bytes += skb->len; | ||
31 | 33 | ||
32 | dev->stats.tx_packets++; | 34 | BR_INPUT_SKB_CB(skb)->brdev = dev; |
33 | dev->stats.tx_bytes += skb->len; | ||
34 | 35 | ||
35 | skb_reset_mac_header(skb); | 36 | skb_reset_mac_header(skb); |
36 | skb_pull(skb, ETH_HLEN); | 37 | skb_pull(skb, ETH_HLEN); |
37 | 38 | ||
38 | if (dest[0] & 1) { | 39 | if (is_multicast_ether_addr(dest)) { |
39 | if (br_multicast_rcv(br, NULL, skb)) | 40 | if (br_multicast_rcv(br, NULL, skb)) |
40 | goto out; | 41 | goto out; |
41 | 42 | ||
42 | mdst = br_mdb_get(br, skb); | 43 | mdst = br_mdb_get(br, skb); |
43 | if (mdst || BR_INPUT_SKB_CB(skb)->mrouters_only) | 44 | if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) |
44 | br_multicast_deliver(mdst, skb); | 45 | br_multicast_deliver(mdst, skb); |
45 | else | 46 | else |
46 | br_flood_deliver(br, skb); | 47 | br_flood_deliver(br, skb); |
@@ -81,6 +82,31 @@ static int br_dev_stop(struct net_device *dev) | |||
81 | return 0; | 82 | return 0; |
82 | } | 83 | } |
83 | 84 | ||
85 | static struct net_device_stats *br_get_stats(struct net_device *dev) | ||
86 | { | ||
87 | struct net_bridge *br = netdev_priv(dev); | ||
88 | struct net_device_stats *stats = &dev->stats; | ||
89 | struct br_cpu_netstats sum = { 0 }; | ||
90 | unsigned int cpu; | ||
91 | |||
92 | for_each_possible_cpu(cpu) { | ||
93 | const struct br_cpu_netstats *bstats | ||
94 | = per_cpu_ptr(br->stats, cpu); | ||
95 | |||
96 | sum.tx_bytes += bstats->tx_bytes; | ||
97 | sum.tx_packets += bstats->tx_packets; | ||
98 | sum.rx_bytes += bstats->rx_bytes; | ||
99 | sum.rx_packets += bstats->rx_packets; | ||
100 | } | ||
101 | |||
102 | stats->tx_bytes = sum.tx_bytes; | ||
103 | stats->tx_packets = sum.tx_packets; | ||
104 | stats->rx_bytes = sum.rx_bytes; | ||
105 | stats->rx_packets = sum.rx_packets; | ||
106 | |||
107 | return stats; | ||
108 | } | ||
109 | |||
84 | static int br_change_mtu(struct net_device *dev, int new_mtu) | 110 | static int br_change_mtu(struct net_device *dev, int new_mtu) |
85 | { | 111 | { |
86 | struct net_bridge *br = netdev_priv(dev); | 112 | struct net_bridge *br = netdev_priv(dev); |
@@ -180,19 +206,28 @@ static const struct net_device_ops br_netdev_ops = { | |||
180 | .ndo_open = br_dev_open, | 206 | .ndo_open = br_dev_open, |
181 | .ndo_stop = br_dev_stop, | 207 | .ndo_stop = br_dev_stop, |
182 | .ndo_start_xmit = br_dev_xmit, | 208 | .ndo_start_xmit = br_dev_xmit, |
209 | .ndo_get_stats = br_get_stats, | ||
183 | .ndo_set_mac_address = br_set_mac_address, | 210 | .ndo_set_mac_address = br_set_mac_address, |
184 | .ndo_set_multicast_list = br_dev_set_multicast_list, | 211 | .ndo_set_multicast_list = br_dev_set_multicast_list, |
185 | .ndo_change_mtu = br_change_mtu, | 212 | .ndo_change_mtu = br_change_mtu, |
186 | .ndo_do_ioctl = br_dev_ioctl, | 213 | .ndo_do_ioctl = br_dev_ioctl, |
187 | }; | 214 | }; |
188 | 215 | ||
216 | static void br_dev_free(struct net_device *dev) | ||
217 | { | ||
218 | struct net_bridge *br = netdev_priv(dev); | ||
219 | |||
220 | free_percpu(br->stats); | ||
221 | free_netdev(dev); | ||
222 | } | ||
223 | |||
189 | void br_dev_setup(struct net_device *dev) | 224 | void br_dev_setup(struct net_device *dev) |
190 | { | 225 | { |
191 | random_ether_addr(dev->dev_addr); | 226 | random_ether_addr(dev->dev_addr); |
192 | ether_setup(dev); | 227 | ether_setup(dev); |
193 | 228 | ||
194 | dev->netdev_ops = &br_netdev_ops; | 229 | dev->netdev_ops = &br_netdev_ops; |
195 | dev->destructor = free_netdev; | 230 | dev->destructor = br_dev_free; |
196 | SET_ETHTOOL_OPS(dev, &br_ethtool_ops); | 231 | SET_ETHTOOL_OPS(dev, &br_ethtool_ops); |
197 | dev->tx_queue_len = 0; | 232 | dev->tx_queue_len = 0; |
198 | dev->priv_flags = IFF_EBRIDGE; | 233 | dev->priv_flags = IFF_EBRIDGE; |
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index 3b8e038ab32c..9101a4e56201 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/etherdevice.h> | 20 | #include <linux/etherdevice.h> |
21 | #include <linux/jhash.h> | 21 | #include <linux/jhash.h> |
22 | #include <linux/random.h> | 22 | #include <linux/random.h> |
23 | #include <linux/slab.h> | ||
23 | #include <asm/atomic.h> | 24 | #include <asm/atomic.h> |
24 | #include <asm/unaligned.h> | 25 | #include <asm/unaligned.h> |
25 | #include "br_private.h" | 26 | #include "br_private.h" |
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index d61e6f741125..396f077216a3 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c | |||
@@ -12,6 +12,7 @@ | |||
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/err.h> | 14 | #include <linux/err.h> |
15 | #include <linux/slab.h> | ||
15 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
16 | #include <linux/netdevice.h> | 17 | #include <linux/netdevice.h> |
17 | #include <linux/skbuff.h> | 18 | #include <linux/skbuff.h> |
@@ -19,6 +20,11 @@ | |||
19 | #include <linux/netfilter_bridge.h> | 20 | #include <linux/netfilter_bridge.h> |
20 | #include "br_private.h" | 21 | #include "br_private.h" |
21 | 22 | ||
23 | static int deliver_clone(const struct net_bridge_port *prev, | ||
24 | struct sk_buff *skb, | ||
25 | void (*__packet_hook)(const struct net_bridge_port *p, | ||
26 | struct sk_buff *skb)); | ||
27 | |||
22 | /* Don't forward packets to originating port or forwarding diasabled */ | 28 | /* Don't forward packets to originating port or forwarding diasabled */ |
23 | static inline int should_deliver(const struct net_bridge_port *p, | 29 | static inline int should_deliver(const struct net_bridge_port *p, |
24 | const struct sk_buff *skb) | 30 | const struct sk_buff *skb) |
@@ -94,17 +100,22 @@ void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) | |||
94 | } | 100 | } |
95 | 101 | ||
96 | /* called with rcu_read_lock */ | 102 | /* called with rcu_read_lock */ |
97 | void br_forward(const struct net_bridge_port *to, struct sk_buff *skb) | 103 | void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0) |
98 | { | 104 | { |
99 | if (should_deliver(to, skb)) { | 105 | if (should_deliver(to, skb)) { |
100 | __br_forward(to, skb); | 106 | if (skb0) |
107 | deliver_clone(to, skb, __br_forward); | ||
108 | else | ||
109 | __br_forward(to, skb); | ||
101 | return; | 110 | return; |
102 | } | 111 | } |
103 | 112 | ||
104 | kfree_skb(skb); | 113 | if (!skb0) |
114 | kfree_skb(skb); | ||
105 | } | 115 | } |
106 | 116 | ||
107 | static int deliver_clone(struct net_bridge_port *prev, struct sk_buff *skb, | 117 | static int deliver_clone(const struct net_bridge_port *prev, |
118 | struct sk_buff *skb, | ||
108 | void (*__packet_hook)(const struct net_bridge_port *p, | 119 | void (*__packet_hook)(const struct net_bridge_port *p, |
109 | struct sk_buff *skb)) | 120 | struct sk_buff *skb)) |
110 | { | 121 | { |
@@ -197,17 +208,15 @@ static void br_multicast_flood(struct net_bridge_mdb_entry *mdst, | |||
197 | { | 208 | { |
198 | struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev; | 209 | struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev; |
199 | struct net_bridge *br = netdev_priv(dev); | 210 | struct net_bridge *br = netdev_priv(dev); |
200 | struct net_bridge_port *port; | 211 | struct net_bridge_port *prev = NULL; |
201 | struct net_bridge_port *lport, *rport; | ||
202 | struct net_bridge_port *prev; | ||
203 | struct net_bridge_port_group *p; | 212 | struct net_bridge_port_group *p; |
204 | struct hlist_node *rp; | 213 | struct hlist_node *rp; |
205 | 214 | ||
206 | prev = NULL; | 215 | rp = rcu_dereference(br->router_list.first); |
207 | 216 | p = mdst ? rcu_dereference(mdst->ports) : NULL; | |
208 | rp = br->router_list.first; | ||
209 | p = mdst ? mdst->ports : NULL; | ||
210 | while (p || rp) { | 217 | while (p || rp) { |
218 | struct net_bridge_port *port, *lport, *rport; | ||
219 | |||
211 | lport = p ? p->port : NULL; | 220 | lport = p ? p->port : NULL; |
212 | rport = rp ? hlist_entry(rp, struct net_bridge_port, rlist) : | 221 | rport = rp ? hlist_entry(rp, struct net_bridge_port, rlist) : |
213 | NULL; | 222 | NULL; |
@@ -220,9 +229,9 @@ static void br_multicast_flood(struct net_bridge_mdb_entry *mdst, | |||
220 | goto out; | 229 | goto out; |
221 | 230 | ||
222 | if ((unsigned long)lport >= (unsigned long)port) | 231 | if ((unsigned long)lport >= (unsigned long)port) |
223 | p = p->next; | 232 | p = rcu_dereference(p->next); |
224 | if ((unsigned long)rport >= (unsigned long)port) | 233 | if ((unsigned long)rport >= (unsigned long)port) |
225 | rp = rp->next; | 234 | rp = rcu_dereference(rp->next); |
226 | } | 235 | } |
227 | 236 | ||
228 | if (!prev) | 237 | if (!prev) |
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index b6a3872f5681..521439333316 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
20 | #include <linux/rtnetlink.h> | 20 | #include <linux/rtnetlink.h> |
21 | #include <linux/if_ether.h> | 21 | #include <linux/if_ether.h> |
22 | #include <linux/slab.h> | ||
22 | #include <net/sock.h> | 23 | #include <net/sock.h> |
23 | 24 | ||
24 | #include "br_private.h" | 25 | #include "br_private.h" |
@@ -185,6 +186,12 @@ static struct net_device *new_bridge_dev(struct net *net, const char *name) | |||
185 | br = netdev_priv(dev); | 186 | br = netdev_priv(dev); |
186 | br->dev = dev; | 187 | br->dev = dev; |
187 | 188 | ||
189 | br->stats = alloc_percpu(struct br_cpu_netstats); | ||
190 | if (!br->stats) { | ||
191 | free_netdev(dev); | ||
192 | return NULL; | ||
193 | } | ||
194 | |||
188 | spin_lock_init(&br->lock); | 195 | spin_lock_init(&br->lock); |
189 | INIT_LIST_HEAD(&br->port_list); | 196 | INIT_LIST_HEAD(&br->port_list); |
190 | spin_lock_init(&br->hash_lock); | 197 | spin_lock_init(&br->hash_lock); |
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 53b39851d87d..e7f4c1d02f57 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c | |||
@@ -11,6 +11,7 @@ | |||
11 | * 2 of the License, or (at your option) any later version. | 11 | * 2 of the License, or (at your option) any later version. |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/slab.h> | ||
14 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
15 | #include <linux/netdevice.h> | 16 | #include <linux/netdevice.h> |
16 | #include <linux/etherdevice.h> | 17 | #include <linux/etherdevice.h> |
@@ -23,9 +24,11 @@ const u8 br_group_address[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; | |||
23 | static int br_pass_frame_up(struct sk_buff *skb) | 24 | static int br_pass_frame_up(struct sk_buff *skb) |
24 | { | 25 | { |
25 | struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev; | 26 | struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev; |
27 | struct net_bridge *br = netdev_priv(brdev); | ||
28 | struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats); | ||
26 | 29 | ||
27 | brdev->stats.rx_packets++; | 30 | brstats->rx_packets++; |
28 | brdev->stats.rx_bytes += skb->len; | 31 | brstats->rx_bytes += skb->len; |
29 | 32 | ||
30 | indev = skb->dev; | 33 | indev = skb->dev; |
31 | skb->dev = brdev; | 34 | skb->dev = brdev; |
@@ -70,7 +73,7 @@ int br_handle_frame_finish(struct sk_buff *skb) | |||
70 | 73 | ||
71 | if (is_multicast_ether_addr(dest)) { | 74 | if (is_multicast_ether_addr(dest)) { |
72 | mdst = br_mdb_get(br, skb); | 75 | mdst = br_mdb_get(br, skb); |
73 | if (mdst || BR_INPUT_SKB_CB(skb)->mrouters_only) { | 76 | if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) { |
74 | if ((mdst && !hlist_unhashed(&mdst->mglist)) || | 77 | if ((mdst && !hlist_unhashed(&mdst->mglist)) || |
75 | br_multicast_is_router(br)) | 78 | br_multicast_is_router(br)) |
76 | skb2 = skb; | 79 | skb2 = skb; |
@@ -90,7 +93,7 @@ int br_handle_frame_finish(struct sk_buff *skb) | |||
90 | 93 | ||
91 | if (skb) { | 94 | if (skb) { |
92 | if (dst) | 95 | if (dst) |
93 | br_forward(dst->dst, skb); | 96 | br_forward(dst->dst, skb, skb2); |
94 | else | 97 | else |
95 | br_flood_forward(br, skb, skb2); | 98 | br_flood_forward(br, skb, skb2); |
96 | } | 99 | } |
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c index 2af6e4a90262..995afc4b04dc 100644 --- a/net/bridge/br_ioctl.c +++ b/net/bridge/br_ioctl.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
16 | #include <linux/if_bridge.h> | 16 | #include <linux/if_bridge.h> |
17 | #include <linux/netdevice.h> | 17 | #include <linux/netdevice.h> |
18 | #include <linux/slab.h> | ||
18 | #include <linux/times.h> | 19 | #include <linux/times.h> |
19 | #include <net/net_namespace.h> | 20 | #include <net/net_namespace.h> |
20 | #include <asm/uaccess.h> | 21 | #include <asm/uaccess.h> |
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 12ce1eaa4f3e..7128abdce45f 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c | |||
@@ -24,31 +24,108 @@ | |||
24 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
25 | #include <linux/timer.h> | 25 | #include <linux/timer.h> |
26 | #include <net/ip.h> | 26 | #include <net/ip.h> |
27 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
28 | #include <net/ipv6.h> | ||
29 | #include <net/mld.h> | ||
30 | #include <net/addrconf.h> | ||
31 | #include <net/ip6_checksum.h> | ||
32 | #endif | ||
27 | 33 | ||
28 | #include "br_private.h" | 34 | #include "br_private.h" |
29 | 35 | ||
30 | static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb, __be32 ip) | 36 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
37 | static inline int ipv6_is_local_multicast(const struct in6_addr *addr) | ||
31 | { | 38 | { |
32 | return jhash_1word(mdb->secret, (u32)ip) & (mdb->max - 1); | 39 | if (ipv6_addr_is_multicast(addr) && |
40 | IPV6_ADDR_MC_SCOPE(addr) <= IPV6_ADDR_SCOPE_LINKLOCAL) | ||
41 | return 1; | ||
42 | return 0; | ||
43 | } | ||
44 | #endif | ||
45 | |||
46 | static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b) | ||
47 | { | ||
48 | if (a->proto != b->proto) | ||
49 | return 0; | ||
50 | switch (a->proto) { | ||
51 | case htons(ETH_P_IP): | ||
52 | return a->u.ip4 == b->u.ip4; | ||
53 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
54 | case htons(ETH_P_IPV6): | ||
55 | return ipv6_addr_equal(&a->u.ip6, &b->u.ip6); | ||
56 | #endif | ||
57 | } | ||
58 | return 0; | ||
59 | } | ||
60 | |||
61 | static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip) | ||
62 | { | ||
63 | return jhash_1word(mdb->secret, (__force u32)ip) & (mdb->max - 1); | ||
64 | } | ||
65 | |||
66 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
67 | static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb, | ||
68 | const struct in6_addr *ip) | ||
69 | { | ||
70 | return jhash2((__force u32 *)ip->s6_addr32, 4, mdb->secret) & (mdb->max - 1); | ||
71 | } | ||
72 | #endif | ||
73 | |||
74 | static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb, | ||
75 | struct br_ip *ip) | ||
76 | { | ||
77 | switch (ip->proto) { | ||
78 | case htons(ETH_P_IP): | ||
79 | return __br_ip4_hash(mdb, ip->u.ip4); | ||
80 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
81 | case htons(ETH_P_IPV6): | ||
82 | return __br_ip6_hash(mdb, &ip->u.ip6); | ||
83 | #endif | ||
84 | } | ||
85 | return 0; | ||
33 | } | 86 | } |
34 | 87 | ||
35 | static struct net_bridge_mdb_entry *__br_mdb_ip_get( | 88 | static struct net_bridge_mdb_entry *__br_mdb_ip_get( |
36 | struct net_bridge_mdb_htable *mdb, __be32 dst, int hash) | 89 | struct net_bridge_mdb_htable *mdb, struct br_ip *dst, int hash) |
37 | { | 90 | { |
38 | struct net_bridge_mdb_entry *mp; | 91 | struct net_bridge_mdb_entry *mp; |
39 | struct hlist_node *p; | 92 | struct hlist_node *p; |
40 | 93 | ||
41 | hlist_for_each_entry_rcu(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) { | 94 | hlist_for_each_entry_rcu(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) { |
42 | if (dst == mp->addr) | 95 | if (br_ip_equal(&mp->addr, dst)) |
43 | return mp; | 96 | return mp; |
44 | } | 97 | } |
45 | 98 | ||
46 | return NULL; | 99 | return NULL; |
47 | } | 100 | } |
48 | 101 | ||
49 | static struct net_bridge_mdb_entry *br_mdb_ip_get( | 102 | static struct net_bridge_mdb_entry *br_mdb_ip4_get( |
50 | struct net_bridge_mdb_htable *mdb, __be32 dst) | 103 | struct net_bridge_mdb_htable *mdb, __be32 dst) |
51 | { | 104 | { |
105 | struct br_ip br_dst; | ||
106 | |||
107 | br_dst.u.ip4 = dst; | ||
108 | br_dst.proto = htons(ETH_P_IP); | ||
109 | |||
110 | return __br_mdb_ip_get(mdb, &br_dst, __br_ip4_hash(mdb, dst)); | ||
111 | } | ||
112 | |||
113 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
114 | static struct net_bridge_mdb_entry *br_mdb_ip6_get( | ||
115 | struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst) | ||
116 | { | ||
117 | struct br_ip br_dst; | ||
118 | |||
119 | ipv6_addr_copy(&br_dst.u.ip6, dst); | ||
120 | br_dst.proto = htons(ETH_P_IPV6); | ||
121 | |||
122 | return __br_mdb_ip_get(mdb, &br_dst, __br_ip6_hash(mdb, dst)); | ||
123 | } | ||
124 | #endif | ||
125 | |||
126 | static struct net_bridge_mdb_entry *br_mdb_ip_get( | ||
127 | struct net_bridge_mdb_htable *mdb, struct br_ip *dst) | ||
128 | { | ||
52 | return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst)); | 129 | return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst)); |
53 | } | 130 | } |
54 | 131 | ||
@@ -56,18 +133,30 @@ struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, | |||
56 | struct sk_buff *skb) | 133 | struct sk_buff *skb) |
57 | { | 134 | { |
58 | struct net_bridge_mdb_htable *mdb = br->mdb; | 135 | struct net_bridge_mdb_htable *mdb = br->mdb; |
136 | struct br_ip ip; | ||
59 | 137 | ||
60 | if (!mdb || br->multicast_disabled) | 138 | if (!mdb || br->multicast_disabled) |
61 | return NULL; | 139 | return NULL; |
62 | 140 | ||
141 | if (BR_INPUT_SKB_CB(skb)->igmp) | ||
142 | return NULL; | ||
143 | |||
144 | ip.proto = skb->protocol; | ||
145 | |||
63 | switch (skb->protocol) { | 146 | switch (skb->protocol) { |
64 | case htons(ETH_P_IP): | 147 | case htons(ETH_P_IP): |
65 | if (BR_INPUT_SKB_CB(skb)->igmp) | 148 | ip.u.ip4 = ip_hdr(skb)->daddr; |
66 | break; | 149 | break; |
67 | return br_mdb_ip_get(mdb, ip_hdr(skb)->daddr); | 150 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
151 | case htons(ETH_P_IPV6): | ||
152 | ipv6_addr_copy(&ip.u.ip6, &ipv6_hdr(skb)->daddr); | ||
153 | break; | ||
154 | #endif | ||
155 | default: | ||
156 | return NULL; | ||
68 | } | 157 | } |
69 | 158 | ||
70 | return NULL; | 159 | return br_mdb_ip_get(mdb, &ip); |
71 | } | 160 | } |
72 | 161 | ||
73 | static void br_mdb_free(struct rcu_head *head) | 162 | static void br_mdb_free(struct rcu_head *head) |
@@ -94,7 +183,7 @@ static int br_mdb_copy(struct net_bridge_mdb_htable *new, | |||
94 | for (i = 0; i < old->max; i++) | 183 | for (i = 0; i < old->max; i++) |
95 | hlist_for_each_entry(mp, p, &old->mhash[i], hlist[old->ver]) | 184 | hlist_for_each_entry(mp, p, &old->mhash[i], hlist[old->ver]) |
96 | hlist_add_head(&mp->hlist[new->ver], | 185 | hlist_add_head(&mp->hlist[new->ver], |
97 | &new->mhash[br_ip_hash(new, mp->addr)]); | 186 | &new->mhash[br_ip_hash(new, &mp->addr)]); |
98 | 187 | ||
99 | if (!elasticity) | 188 | if (!elasticity) |
100 | return 0; | 189 | return 0; |
@@ -162,7 +251,7 @@ static void br_multicast_del_pg(struct net_bridge *br, | |||
162 | struct net_bridge_port_group *p; | 251 | struct net_bridge_port_group *p; |
163 | struct net_bridge_port_group **pp; | 252 | struct net_bridge_port_group **pp; |
164 | 253 | ||
165 | mp = br_mdb_ip_get(mdb, pg->addr); | 254 | mp = br_mdb_ip_get(mdb, &pg->addr); |
166 | if (WARN_ON(!mp)) | 255 | if (WARN_ON(!mp)) |
167 | return; | 256 | return; |
168 | 257 | ||
@@ -170,7 +259,7 @@ static void br_multicast_del_pg(struct net_bridge *br, | |||
170 | if (p != pg) | 259 | if (p != pg) |
171 | continue; | 260 | continue; |
172 | 261 | ||
173 | *pp = p->next; | 262 | rcu_assign_pointer(*pp, p->next); |
174 | hlist_del_init(&p->mglist); | 263 | hlist_del_init(&p->mglist); |
175 | del_timer(&p->timer); | 264 | del_timer(&p->timer); |
176 | del_timer(&p->query_timer); | 265 | del_timer(&p->query_timer); |
@@ -248,8 +337,8 @@ out: | |||
248 | return 0; | 337 | return 0; |
249 | } | 338 | } |
250 | 339 | ||
251 | static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br, | 340 | static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br, |
252 | __be32 group) | 341 | __be32 group) |
253 | { | 342 | { |
254 | struct sk_buff *skb; | 343 | struct sk_buff *skb; |
255 | struct igmphdr *ih; | 344 | struct igmphdr *ih; |
@@ -313,12 +402,104 @@ out: | |||
313 | return skb; | 402 | return skb; |
314 | } | 403 | } |
315 | 404 | ||
405 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
406 | static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, | ||
407 | struct in6_addr *group) | ||
408 | { | ||
409 | struct sk_buff *skb; | ||
410 | struct ipv6hdr *ip6h; | ||
411 | struct mld_msg *mldq; | ||
412 | struct ethhdr *eth; | ||
413 | u8 *hopopt; | ||
414 | unsigned long interval; | ||
415 | |||
416 | skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*ip6h) + | ||
417 | 8 + sizeof(*mldq)); | ||
418 | if (!skb) | ||
419 | goto out; | ||
420 | |||
421 | skb->protocol = htons(ETH_P_IPV6); | ||
422 | |||
423 | /* Ethernet header */ | ||
424 | skb_reset_mac_header(skb); | ||
425 | eth = eth_hdr(skb); | ||
426 | |||
427 | memcpy(eth->h_source, br->dev->dev_addr, 6); | ||
428 | ipv6_eth_mc_map(group, eth->h_dest); | ||
429 | eth->h_proto = htons(ETH_P_IPV6); | ||
430 | skb_put(skb, sizeof(*eth)); | ||
431 | |||
432 | /* IPv6 header + HbH option */ | ||
433 | skb_set_network_header(skb, skb->len); | ||
434 | ip6h = ipv6_hdr(skb); | ||
435 | |||
436 | *(__force __be32 *)ip6h = htonl(0x60000000); | ||
437 | ip6h->payload_len = 8 + sizeof(*mldq); | ||
438 | ip6h->nexthdr = IPPROTO_HOPOPTS; | ||
439 | ip6h->hop_limit = 1; | ||
440 | ipv6_addr_set(&ip6h->saddr, 0, 0, 0, 0); | ||
441 | ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1)); | ||
442 | |||
443 | hopopt = (u8 *)(ip6h + 1); | ||
444 | hopopt[0] = IPPROTO_ICMPV6; /* next hdr */ | ||
445 | hopopt[1] = 0; /* length of HbH */ | ||
446 | hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */ | ||
447 | hopopt[3] = 2; /* Length of RA Option */ | ||
448 | hopopt[4] = 0; /* Type = 0x0000 (MLD) */ | ||
449 | hopopt[5] = 0; | ||
450 | hopopt[6] = IPV6_TLV_PAD0; /* Pad0 */ | ||
451 | hopopt[7] = IPV6_TLV_PAD0; /* Pad0 */ | ||
452 | |||
453 | skb_put(skb, sizeof(*ip6h) + 8); | ||
454 | |||
455 | /* ICMPv6 */ | ||
456 | skb_set_transport_header(skb, skb->len); | ||
457 | mldq = (struct mld_msg *) icmp6_hdr(skb); | ||
458 | |||
459 | interval = ipv6_addr_any(group) ? br->multicast_last_member_interval : | ||
460 | br->multicast_query_response_interval; | ||
461 | |||
462 | mldq->mld_type = ICMPV6_MGM_QUERY; | ||
463 | mldq->mld_code = 0; | ||
464 | mldq->mld_cksum = 0; | ||
465 | mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval)); | ||
466 | mldq->mld_reserved = 0; | ||
467 | ipv6_addr_copy(&mldq->mld_mca, group); | ||
468 | |||
469 | /* checksum */ | ||
470 | mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, | ||
471 | sizeof(*mldq), IPPROTO_ICMPV6, | ||
472 | csum_partial(mldq, | ||
473 | sizeof(*mldq), 0)); | ||
474 | skb_put(skb, sizeof(*mldq)); | ||
475 | |||
476 | __skb_pull(skb, sizeof(*eth)); | ||
477 | |||
478 | out: | ||
479 | return skb; | ||
480 | } | ||
481 | #endif | ||
482 | |||
483 | static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br, | ||
484 | struct br_ip *addr) | ||
485 | { | ||
486 | switch (addr->proto) { | ||
487 | case htons(ETH_P_IP): | ||
488 | return br_ip4_multicast_alloc_query(br, addr->u.ip4); | ||
489 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
490 | case htons(ETH_P_IPV6): | ||
491 | return br_ip6_multicast_alloc_query(br, &addr->u.ip6); | ||
492 | #endif | ||
493 | } | ||
494 | return NULL; | ||
495 | } | ||
496 | |||
316 | static void br_multicast_send_group_query(struct net_bridge_mdb_entry *mp) | 497 | static void br_multicast_send_group_query(struct net_bridge_mdb_entry *mp) |
317 | { | 498 | { |
318 | struct net_bridge *br = mp->br; | 499 | struct net_bridge *br = mp->br; |
319 | struct sk_buff *skb; | 500 | struct sk_buff *skb; |
320 | 501 | ||
321 | skb = br_multicast_alloc_query(br, mp->addr); | 502 | skb = br_multicast_alloc_query(br, &mp->addr); |
322 | if (!skb) | 503 | if (!skb) |
323 | goto timer; | 504 | goto timer; |
324 | 505 | ||
@@ -352,7 +533,7 @@ static void br_multicast_send_port_group_query(struct net_bridge_port_group *pg) | |||
352 | struct net_bridge *br = port->br; | 533 | struct net_bridge *br = port->br; |
353 | struct sk_buff *skb; | 534 | struct sk_buff *skb; |
354 | 535 | ||
355 | skb = br_multicast_alloc_query(br, pg->addr); | 536 | skb = br_multicast_alloc_query(br, &pg->addr); |
356 | if (!skb) | 537 | if (!skb) |
357 | goto timer; | 538 | goto timer; |
358 | 539 | ||
@@ -382,8 +563,8 @@ out: | |||
382 | } | 563 | } |
383 | 564 | ||
384 | static struct net_bridge_mdb_entry *br_multicast_get_group( | 565 | static struct net_bridge_mdb_entry *br_multicast_get_group( |
385 | struct net_bridge *br, struct net_bridge_port *port, __be32 group, | 566 | struct net_bridge *br, struct net_bridge_port *port, |
386 | int hash) | 567 | struct br_ip *group, int hash) |
387 | { | 568 | { |
388 | struct net_bridge_mdb_htable *mdb = br->mdb; | 569 | struct net_bridge_mdb_htable *mdb = br->mdb; |
389 | struct net_bridge_mdb_entry *mp; | 570 | struct net_bridge_mdb_entry *mp; |
@@ -395,9 +576,8 @@ static struct net_bridge_mdb_entry *br_multicast_get_group( | |||
395 | 576 | ||
396 | hlist_for_each_entry(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) { | 577 | hlist_for_each_entry(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) { |
397 | count++; | 578 | count++; |
398 | if (unlikely(group == mp->addr)) { | 579 | if (unlikely(br_ip_equal(group, &mp->addr))) |
399 | return mp; | 580 | return mp; |
400 | } | ||
401 | } | 581 | } |
402 | 582 | ||
403 | elasticity = 0; | 583 | elasticity = 0; |
@@ -462,7 +642,8 @@ err: | |||
462 | } | 642 | } |
463 | 643 | ||
464 | static struct net_bridge_mdb_entry *br_multicast_new_group( | 644 | static struct net_bridge_mdb_entry *br_multicast_new_group( |
465 | struct net_bridge *br, struct net_bridge_port *port, __be32 group) | 645 | struct net_bridge *br, struct net_bridge_port *port, |
646 | struct br_ip *group) | ||
466 | { | 647 | { |
467 | struct net_bridge_mdb_htable *mdb = br->mdb; | 648 | struct net_bridge_mdb_htable *mdb = br->mdb; |
468 | struct net_bridge_mdb_entry *mp; | 649 | struct net_bridge_mdb_entry *mp; |
@@ -495,7 +676,7 @@ rehash: | |||
495 | goto out; | 676 | goto out; |
496 | 677 | ||
497 | mp->br = br; | 678 | mp->br = br; |
498 | mp->addr = group; | 679 | mp->addr = *group; |
499 | setup_timer(&mp->timer, br_multicast_group_expired, | 680 | setup_timer(&mp->timer, br_multicast_group_expired, |
500 | (unsigned long)mp); | 681 | (unsigned long)mp); |
501 | setup_timer(&mp->query_timer, br_multicast_group_query_expired, | 682 | setup_timer(&mp->query_timer, br_multicast_group_query_expired, |
@@ -509,7 +690,8 @@ out: | |||
509 | } | 690 | } |
510 | 691 | ||
511 | static int br_multicast_add_group(struct net_bridge *br, | 692 | static int br_multicast_add_group(struct net_bridge *br, |
512 | struct net_bridge_port *port, __be32 group) | 693 | struct net_bridge_port *port, |
694 | struct br_ip *group) | ||
513 | { | 695 | { |
514 | struct net_bridge_mdb_entry *mp; | 696 | struct net_bridge_mdb_entry *mp; |
515 | struct net_bridge_port_group *p; | 697 | struct net_bridge_port_group *p; |
@@ -517,9 +699,6 @@ static int br_multicast_add_group(struct net_bridge *br, | |||
517 | unsigned long now = jiffies; | 699 | unsigned long now = jiffies; |
518 | int err; | 700 | int err; |
519 | 701 | ||
520 | if (ipv4_is_local_multicast(group)) | ||
521 | return 0; | ||
522 | |||
523 | spin_lock(&br->multicast_lock); | 702 | spin_lock(&br->multicast_lock); |
524 | if (!netif_running(br->dev) || | 703 | if (!netif_running(br->dev) || |
525 | (port && port->state == BR_STATE_DISABLED)) | 704 | (port && port->state == BR_STATE_DISABLED)) |
@@ -548,7 +727,7 @@ static int br_multicast_add_group(struct net_bridge *br, | |||
548 | if (unlikely(!p)) | 727 | if (unlikely(!p)) |
549 | goto err; | 728 | goto err; |
550 | 729 | ||
551 | p->addr = group; | 730 | p->addr = *group; |
552 | p->port = port; | 731 | p->port = port; |
553 | p->next = *pp; | 732 | p->next = *pp; |
554 | hlist_add_head(&p->mglist, &port->mglist); | 733 | hlist_add_head(&p->mglist, &port->mglist); |
@@ -569,6 +748,38 @@ err: | |||
569 | return err; | 748 | return err; |
570 | } | 749 | } |
571 | 750 | ||
751 | static int br_ip4_multicast_add_group(struct net_bridge *br, | ||
752 | struct net_bridge_port *port, | ||
753 | __be32 group) | ||
754 | { | ||
755 | struct br_ip br_group; | ||
756 | |||
757 | if (ipv4_is_local_multicast(group)) | ||
758 | return 0; | ||
759 | |||
760 | br_group.u.ip4 = group; | ||
761 | br_group.proto = htons(ETH_P_IP); | ||
762 | |||
763 | return br_multicast_add_group(br, port, &br_group); | ||
764 | } | ||
765 | |||
766 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
767 | static int br_ip6_multicast_add_group(struct net_bridge *br, | ||
768 | struct net_bridge_port *port, | ||
769 | const struct in6_addr *group) | ||
770 | { | ||
771 | struct br_ip br_group; | ||
772 | |||
773 | if (ipv6_is_local_multicast(group)) | ||
774 | return 0; | ||
775 | |||
776 | ipv6_addr_copy(&br_group.u.ip6, group); | ||
777 | br_group.proto = htons(ETH_P_IP); | ||
778 | |||
779 | return br_multicast_add_group(br, port, &br_group); | ||
780 | } | ||
781 | #endif | ||
782 | |||
572 | static void br_multicast_router_expired(unsigned long data) | 783 | static void br_multicast_router_expired(unsigned long data) |
573 | { | 784 | { |
574 | struct net_bridge_port *port = (void *)data; | 785 | struct net_bridge_port *port = (void *)data; |
@@ -590,19 +801,15 @@ static void br_multicast_local_router_expired(unsigned long data) | |||
590 | { | 801 | { |
591 | } | 802 | } |
592 | 803 | ||
593 | static void br_multicast_send_query(struct net_bridge *br, | 804 | static void __br_multicast_send_query(struct net_bridge *br, |
594 | struct net_bridge_port *port, u32 sent) | 805 | struct net_bridge_port *port, |
806 | struct br_ip *ip) | ||
595 | { | 807 | { |
596 | unsigned long time; | ||
597 | struct sk_buff *skb; | 808 | struct sk_buff *skb; |
598 | 809 | ||
599 | if (!netif_running(br->dev) || br->multicast_disabled || | 810 | skb = br_multicast_alloc_query(br, ip); |
600 | timer_pending(&br->multicast_querier_timer)) | ||
601 | return; | ||
602 | |||
603 | skb = br_multicast_alloc_query(br, 0); | ||
604 | if (!skb) | 811 | if (!skb) |
605 | goto timer; | 812 | return; |
606 | 813 | ||
607 | if (port) { | 814 | if (port) { |
608 | __skb_push(skb, sizeof(struct ethhdr)); | 815 | __skb_push(skb, sizeof(struct ethhdr)); |
@@ -611,8 +818,28 @@ static void br_multicast_send_query(struct net_bridge *br, | |||
611 | dev_queue_xmit); | 818 | dev_queue_xmit); |
612 | } else | 819 | } else |
613 | netif_rx(skb); | 820 | netif_rx(skb); |
821 | } | ||
822 | |||
823 | static void br_multicast_send_query(struct net_bridge *br, | ||
824 | struct net_bridge_port *port, u32 sent) | ||
825 | { | ||
826 | unsigned long time; | ||
827 | struct br_ip br_group; | ||
828 | |||
829 | if (!netif_running(br->dev) || br->multicast_disabled || | ||
830 | timer_pending(&br->multicast_querier_timer)) | ||
831 | return; | ||
832 | |||
833 | memset(&br_group.u, 0, sizeof(br_group.u)); | ||
834 | |||
835 | br_group.proto = htons(ETH_P_IP); | ||
836 | __br_multicast_send_query(br, port, &br_group); | ||
837 | |||
838 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
839 | br_group.proto = htons(ETH_P_IPV6); | ||
840 | __br_multicast_send_query(br, port, &br_group); | ||
841 | #endif | ||
614 | 842 | ||
615 | timer: | ||
616 | time = jiffies; | 843 | time = jiffies; |
617 | time += sent < br->multicast_startup_query_count ? | 844 | time += sent < br->multicast_startup_query_count ? |
618 | br->multicast_startup_query_interval : | 845 | br->multicast_startup_query_interval : |
@@ -697,9 +924,9 @@ void br_multicast_disable_port(struct net_bridge_port *port) | |||
697 | spin_unlock(&br->multicast_lock); | 924 | spin_unlock(&br->multicast_lock); |
698 | } | 925 | } |
699 | 926 | ||
700 | static int br_multicast_igmp3_report(struct net_bridge *br, | 927 | static int br_ip4_multicast_igmp3_report(struct net_bridge *br, |
701 | struct net_bridge_port *port, | 928 | struct net_bridge_port *port, |
702 | struct sk_buff *skb) | 929 | struct sk_buff *skb) |
703 | { | 930 | { |
704 | struct igmpv3_report *ih; | 931 | struct igmpv3_report *ih; |
705 | struct igmpv3_grec *grec; | 932 | struct igmpv3_grec *grec; |
@@ -722,11 +949,11 @@ static int br_multicast_igmp3_report(struct net_bridge *br, | |||
722 | if (!pskb_may_pull(skb, len)) | 949 | if (!pskb_may_pull(skb, len)) |
723 | return -EINVAL; | 950 | return -EINVAL; |
724 | 951 | ||
725 | grec = (void *)(skb->data + len); | 952 | grec = (void *)(skb->data + len - sizeof(*grec)); |
726 | group = grec->grec_mca; | 953 | group = grec->grec_mca; |
727 | type = grec->grec_type; | 954 | type = grec->grec_type; |
728 | 955 | ||
729 | len += grec->grec_nsrcs * 4; | 956 | len += ntohs(grec->grec_nsrcs) * 4; |
730 | if (!pskb_may_pull(skb, len)) | 957 | if (!pskb_may_pull(skb, len)) |
731 | return -EINVAL; | 958 | return -EINVAL; |
732 | 959 | ||
@@ -744,7 +971,7 @@ static int br_multicast_igmp3_report(struct net_bridge *br, | |||
744 | continue; | 971 | continue; |
745 | } | 972 | } |
746 | 973 | ||
747 | err = br_multicast_add_group(br, port, group); | 974 | err = br_ip4_multicast_add_group(br, port, group); |
748 | if (err) | 975 | if (err) |
749 | break; | 976 | break; |
750 | } | 977 | } |
@@ -752,24 +979,87 @@ static int br_multicast_igmp3_report(struct net_bridge *br, | |||
752 | return err; | 979 | return err; |
753 | } | 980 | } |
754 | 981 | ||
982 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
983 | static int br_ip6_multicast_mld2_report(struct net_bridge *br, | ||
984 | struct net_bridge_port *port, | ||
985 | struct sk_buff *skb) | ||
986 | { | ||
987 | struct icmp6hdr *icmp6h; | ||
988 | struct mld2_grec *grec; | ||
989 | int i; | ||
990 | int len; | ||
991 | int num; | ||
992 | int err = 0; | ||
993 | |||
994 | if (!pskb_may_pull(skb, sizeof(*icmp6h))) | ||
995 | return -EINVAL; | ||
996 | |||
997 | icmp6h = icmp6_hdr(skb); | ||
998 | num = ntohs(icmp6h->icmp6_dataun.un_data16[1]); | ||
999 | len = sizeof(*icmp6h); | ||
1000 | |||
1001 | for (i = 0; i < num; i++) { | ||
1002 | __be16 *nsrcs, _nsrcs; | ||
1003 | |||
1004 | nsrcs = skb_header_pointer(skb, | ||
1005 | len + offsetof(struct mld2_grec, | ||
1006 | grec_mca), | ||
1007 | sizeof(_nsrcs), &_nsrcs); | ||
1008 | if (!nsrcs) | ||
1009 | return -EINVAL; | ||
1010 | |||
1011 | if (!pskb_may_pull(skb, | ||
1012 | len + sizeof(*grec) + | ||
1013 | sizeof(struct in6_addr) * (*nsrcs))) | ||
1014 | return -EINVAL; | ||
1015 | |||
1016 | grec = (struct mld2_grec *)(skb->data + len); | ||
1017 | len += sizeof(*grec) + sizeof(struct in6_addr) * (*nsrcs); | ||
1018 | |||
1019 | /* We treat these as MLDv1 reports for now. */ | ||
1020 | switch (grec->grec_type) { | ||
1021 | case MLD2_MODE_IS_INCLUDE: | ||
1022 | case MLD2_MODE_IS_EXCLUDE: | ||
1023 | case MLD2_CHANGE_TO_INCLUDE: | ||
1024 | case MLD2_CHANGE_TO_EXCLUDE: | ||
1025 | case MLD2_ALLOW_NEW_SOURCES: | ||
1026 | case MLD2_BLOCK_OLD_SOURCES: | ||
1027 | break; | ||
1028 | |||
1029 | default: | ||
1030 | continue; | ||
1031 | } | ||
1032 | |||
1033 | err = br_ip6_multicast_add_group(br, port, &grec->grec_mca); | ||
1034 | if (!err) | ||
1035 | break; | ||
1036 | } | ||
1037 | |||
1038 | return err; | ||
1039 | } | ||
1040 | #endif | ||
1041 | |||
1042 | /* | ||
1043 | * Add port to rotuer_list | ||
1044 | * list is maintained ordered by pointer value | ||
1045 | * and locked by br->multicast_lock and RCU | ||
1046 | */ | ||
755 | static void br_multicast_add_router(struct net_bridge *br, | 1047 | static void br_multicast_add_router(struct net_bridge *br, |
756 | struct net_bridge_port *port) | 1048 | struct net_bridge_port *port) |
757 | { | 1049 | { |
758 | struct hlist_node *p; | 1050 | struct net_bridge_port *p; |
759 | struct hlist_node **h; | 1051 | struct hlist_node *n, *slot = NULL; |
760 | 1052 | ||
761 | for (h = &br->router_list.first; | 1053 | hlist_for_each_entry(p, n, &br->router_list, rlist) { |
762 | (p = *h) && | 1054 | if ((unsigned long) port >= (unsigned long) p) |
763 | (unsigned long)container_of(p, struct net_bridge_port, rlist) > | 1055 | break; |
764 | (unsigned long)port; | 1056 | slot = n; |
765 | h = &p->next) | 1057 | } |
766 | ; | 1058 | |
767 | 1059 | if (slot) | |
768 | port->rlist.pprev = h; | 1060 | hlist_add_after_rcu(slot, &port->rlist); |
769 | port->rlist.next = p; | 1061 | else |
770 | rcu_assign_pointer(*h, &port->rlist); | 1062 | hlist_add_head_rcu(&port->rlist, &br->router_list); |
771 | if (p) | ||
772 | p->pprev = &port->rlist.next; | ||
773 | } | 1063 | } |
774 | 1064 | ||
775 | static void br_multicast_mark_router(struct net_bridge *br, | 1065 | static void br_multicast_mark_router(struct net_bridge *br, |
@@ -799,7 +1089,7 @@ timer: | |||
799 | 1089 | ||
800 | static void br_multicast_query_received(struct net_bridge *br, | 1090 | static void br_multicast_query_received(struct net_bridge *br, |
801 | struct net_bridge_port *port, | 1091 | struct net_bridge_port *port, |
802 | __be32 saddr) | 1092 | int saddr) |
803 | { | 1093 | { |
804 | if (saddr) | 1094 | if (saddr) |
805 | mod_timer(&br->multicast_querier_timer, | 1095 | mod_timer(&br->multicast_querier_timer, |
@@ -810,9 +1100,9 @@ static void br_multicast_query_received(struct net_bridge *br, | |||
810 | br_multicast_mark_router(br, port); | 1100 | br_multicast_mark_router(br, port); |
811 | } | 1101 | } |
812 | 1102 | ||
813 | static int br_multicast_query(struct net_bridge *br, | 1103 | static int br_ip4_multicast_query(struct net_bridge *br, |
814 | struct net_bridge_port *port, | 1104 | struct net_bridge_port *port, |
815 | struct sk_buff *skb) | 1105 | struct sk_buff *skb) |
816 | { | 1106 | { |
817 | struct iphdr *iph = ip_hdr(skb); | 1107 | struct iphdr *iph = ip_hdr(skb); |
818 | struct igmphdr *ih = igmp_hdr(skb); | 1108 | struct igmphdr *ih = igmp_hdr(skb); |
@@ -823,13 +1113,14 @@ static int br_multicast_query(struct net_bridge *br, | |||
823 | unsigned long max_delay; | 1113 | unsigned long max_delay; |
824 | unsigned long now = jiffies; | 1114 | unsigned long now = jiffies; |
825 | __be32 group; | 1115 | __be32 group; |
1116 | int err = 0; | ||
826 | 1117 | ||
827 | spin_lock(&br->multicast_lock); | 1118 | spin_lock(&br->multicast_lock); |
828 | if (!netif_running(br->dev) || | 1119 | if (!netif_running(br->dev) || |
829 | (port && port->state == BR_STATE_DISABLED)) | 1120 | (port && port->state == BR_STATE_DISABLED)) |
830 | goto out; | 1121 | goto out; |
831 | 1122 | ||
832 | br_multicast_query_received(br, port, iph->saddr); | 1123 | br_multicast_query_received(br, port, !!iph->saddr); |
833 | 1124 | ||
834 | group = ih->group; | 1125 | group = ih->group; |
835 | 1126 | ||
@@ -841,21 +1132,23 @@ static int br_multicast_query(struct net_bridge *br, | |||
841 | group = 0; | 1132 | group = 0; |
842 | } | 1133 | } |
843 | } else { | 1134 | } else { |
844 | if (!pskb_may_pull(skb, sizeof(struct igmpv3_query))) | 1135 | if (!pskb_may_pull(skb, sizeof(struct igmpv3_query))) { |
845 | return -EINVAL; | 1136 | err = -EINVAL; |
1137 | goto out; | ||
1138 | } | ||
846 | 1139 | ||
847 | ih3 = igmpv3_query_hdr(skb); | 1140 | ih3 = igmpv3_query_hdr(skb); |
848 | if (ih3->nsrcs) | 1141 | if (ih3->nsrcs) |
849 | return 0; | 1142 | goto out; |
850 | 1143 | ||
851 | max_delay = ih3->code ? 1 : | 1144 | max_delay = ih3->code ? |
852 | IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE); | 1145 | IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; |
853 | } | 1146 | } |
854 | 1147 | ||
855 | if (!group) | 1148 | if (!group) |
856 | goto out; | 1149 | goto out; |
857 | 1150 | ||
858 | mp = br_mdb_ip_get(br->mdb, group); | 1151 | mp = br_mdb_ip4_get(br->mdb, group); |
859 | if (!mp) | 1152 | if (!mp) |
860 | goto out; | 1153 | goto out; |
861 | 1154 | ||
@@ -876,12 +1169,81 @@ static int br_multicast_query(struct net_bridge *br, | |||
876 | 1169 | ||
877 | out: | 1170 | out: |
878 | spin_unlock(&br->multicast_lock); | 1171 | spin_unlock(&br->multicast_lock); |
879 | return 0; | 1172 | return err; |
880 | } | 1173 | } |
881 | 1174 | ||
1175 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
1176 | static int br_ip6_multicast_query(struct net_bridge *br, | ||
1177 | struct net_bridge_port *port, | ||
1178 | struct sk_buff *skb) | ||
1179 | { | ||
1180 | struct ipv6hdr *ip6h = ipv6_hdr(skb); | ||
1181 | struct mld_msg *mld = (struct mld_msg *) icmp6_hdr(skb); | ||
1182 | struct net_bridge_mdb_entry *mp; | ||
1183 | struct mld2_query *mld2q; | ||
1184 | struct net_bridge_port_group *p, **pp; | ||
1185 | unsigned long max_delay; | ||
1186 | unsigned long now = jiffies; | ||
1187 | struct in6_addr *group = NULL; | ||
1188 | int err = 0; | ||
1189 | |||
1190 | spin_lock(&br->multicast_lock); | ||
1191 | if (!netif_running(br->dev) || | ||
1192 | (port && port->state == BR_STATE_DISABLED)) | ||
1193 | goto out; | ||
1194 | |||
1195 | br_multicast_query_received(br, port, !ipv6_addr_any(&ip6h->saddr)); | ||
1196 | |||
1197 | if (skb->len == sizeof(*mld)) { | ||
1198 | if (!pskb_may_pull(skb, sizeof(*mld))) { | ||
1199 | err = -EINVAL; | ||
1200 | goto out; | ||
1201 | } | ||
1202 | mld = (struct mld_msg *) icmp6_hdr(skb); | ||
1203 | max_delay = msecs_to_jiffies(htons(mld->mld_maxdelay)); | ||
1204 | if (max_delay) | ||
1205 | group = &mld->mld_mca; | ||
1206 | } else if (skb->len >= sizeof(*mld2q)) { | ||
1207 | if (!pskb_may_pull(skb, sizeof(*mld2q))) { | ||
1208 | err = -EINVAL; | ||
1209 | goto out; | ||
1210 | } | ||
1211 | mld2q = (struct mld2_query *)icmp6_hdr(skb); | ||
1212 | if (!mld2q->mld2q_nsrcs) | ||
1213 | group = &mld2q->mld2q_mca; | ||
1214 | max_delay = mld2q->mld2q_mrc ? MLDV2_MRC(mld2q->mld2q_mrc) : 1; | ||
1215 | } | ||
1216 | |||
1217 | if (!group) | ||
1218 | goto out; | ||
1219 | |||
1220 | mp = br_mdb_ip6_get(br->mdb, group); | ||
1221 | if (!mp) | ||
1222 | goto out; | ||
1223 | |||
1224 | max_delay *= br->multicast_last_member_count; | ||
1225 | if (!hlist_unhashed(&mp->mglist) && | ||
1226 | (timer_pending(&mp->timer) ? | ||
1227 | time_after(mp->timer.expires, now + max_delay) : | ||
1228 | try_to_del_timer_sync(&mp->timer) >= 0)) | ||
1229 | mod_timer(&mp->timer, now + max_delay); | ||
1230 | |||
1231 | for (pp = &mp->ports; (p = *pp); pp = &p->next) { | ||
1232 | if (timer_pending(&p->timer) ? | ||
1233 | time_after(p->timer.expires, now + max_delay) : | ||
1234 | try_to_del_timer_sync(&p->timer) >= 0) | ||
1235 | mod_timer(&mp->timer, now + max_delay); | ||
1236 | } | ||
1237 | |||
1238 | out: | ||
1239 | spin_unlock(&br->multicast_lock); | ||
1240 | return err; | ||
1241 | } | ||
1242 | #endif | ||
1243 | |||
882 | static void br_multicast_leave_group(struct net_bridge *br, | 1244 | static void br_multicast_leave_group(struct net_bridge *br, |
883 | struct net_bridge_port *port, | 1245 | struct net_bridge_port *port, |
884 | __be32 group) | 1246 | struct br_ip *group) |
885 | { | 1247 | { |
886 | struct net_bridge_mdb_htable *mdb; | 1248 | struct net_bridge_mdb_htable *mdb; |
887 | struct net_bridge_mdb_entry *mp; | 1249 | struct net_bridge_mdb_entry *mp; |
@@ -889,9 +1251,6 @@ static void br_multicast_leave_group(struct net_bridge *br, | |||
889 | unsigned long now; | 1251 | unsigned long now; |
890 | unsigned long time; | 1252 | unsigned long time; |
891 | 1253 | ||
892 | if (ipv4_is_local_multicast(group)) | ||
893 | return; | ||
894 | |||
895 | spin_lock(&br->multicast_lock); | 1254 | spin_lock(&br->multicast_lock); |
896 | if (!netif_running(br->dev) || | 1255 | if (!netif_running(br->dev) || |
897 | (port && port->state == BR_STATE_DISABLED) || | 1256 | (port && port->state == BR_STATE_DISABLED) || |
@@ -942,6 +1301,38 @@ out: | |||
942 | spin_unlock(&br->multicast_lock); | 1301 | spin_unlock(&br->multicast_lock); |
943 | } | 1302 | } |
944 | 1303 | ||
1304 | static void br_ip4_multicast_leave_group(struct net_bridge *br, | ||
1305 | struct net_bridge_port *port, | ||
1306 | __be32 group) | ||
1307 | { | ||
1308 | struct br_ip br_group; | ||
1309 | |||
1310 | if (ipv4_is_local_multicast(group)) | ||
1311 | return; | ||
1312 | |||
1313 | br_group.u.ip4 = group; | ||
1314 | br_group.proto = htons(ETH_P_IP); | ||
1315 | |||
1316 | br_multicast_leave_group(br, port, &br_group); | ||
1317 | } | ||
1318 | |||
1319 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
1320 | static void br_ip6_multicast_leave_group(struct net_bridge *br, | ||
1321 | struct net_bridge_port *port, | ||
1322 | const struct in6_addr *group) | ||
1323 | { | ||
1324 | struct br_ip br_group; | ||
1325 | |||
1326 | if (ipv6_is_local_multicast(group)) | ||
1327 | return; | ||
1328 | |||
1329 | ipv6_addr_copy(&br_group.u.ip6, group); | ||
1330 | br_group.proto = htons(ETH_P_IPV6); | ||
1331 | |||
1332 | br_multicast_leave_group(br, port, &br_group); | ||
1333 | } | ||
1334 | #endif | ||
1335 | |||
945 | static int br_multicast_ipv4_rcv(struct net_bridge *br, | 1336 | static int br_multicast_ipv4_rcv(struct net_bridge *br, |
946 | struct net_bridge_port *port, | 1337 | struct net_bridge_port *port, |
947 | struct sk_buff *skb) | 1338 | struct sk_buff *skb) |
@@ -953,9 +1344,6 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br, | |||
953 | unsigned offset; | 1344 | unsigned offset; |
954 | int err; | 1345 | int err; |
955 | 1346 | ||
956 | BR_INPUT_SKB_CB(skb)->igmp = 0; | ||
957 | BR_INPUT_SKB_CB(skb)->mrouters_only = 0; | ||
958 | |||
959 | /* We treat OOM as packet loss for now. */ | 1347 | /* We treat OOM as packet loss for now. */ |
960 | if (!pskb_may_pull(skb, sizeof(*iph))) | 1348 | if (!pskb_may_pull(skb, sizeof(*iph))) |
961 | return -EINVAL; | 1349 | return -EINVAL; |
@@ -987,7 +1375,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br, | |||
987 | 1375 | ||
988 | err = pskb_trim_rcsum(skb2, len); | 1376 | err = pskb_trim_rcsum(skb2, len); |
989 | if (err) | 1377 | if (err) |
990 | return err; | 1378 | goto err_out; |
991 | } | 1379 | } |
992 | 1380 | ||
993 | len -= ip_hdrlen(skb2); | 1381 | len -= ip_hdrlen(skb2); |
@@ -999,8 +1387,6 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br, | |||
999 | if (!pskb_may_pull(skb2, sizeof(*ih))) | 1387 | if (!pskb_may_pull(skb2, sizeof(*ih))) |
1000 | goto out; | 1388 | goto out; |
1001 | 1389 | ||
1002 | iph = ip_hdr(skb2); | ||
1003 | |||
1004 | switch (skb2->ip_summed) { | 1390 | switch (skb2->ip_summed) { |
1005 | case CHECKSUM_COMPLETE: | 1391 | case CHECKSUM_COMPLETE: |
1006 | if (!csum_fold(skb2->csum)) | 1392 | if (!csum_fold(skb2->csum)) |
@@ -1009,7 +1395,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br, | |||
1009 | case CHECKSUM_NONE: | 1395 | case CHECKSUM_NONE: |
1010 | skb2->csum = 0; | 1396 | skb2->csum = 0; |
1011 | if (skb_checksum_complete(skb2)) | 1397 | if (skb_checksum_complete(skb2)) |
1012 | return -EINVAL; | 1398 | goto out; |
1013 | } | 1399 | } |
1014 | 1400 | ||
1015 | err = 0; | 1401 | err = 0; |
@@ -1021,17 +1407,134 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br, | |||
1021 | case IGMP_HOST_MEMBERSHIP_REPORT: | 1407 | case IGMP_HOST_MEMBERSHIP_REPORT: |
1022 | case IGMPV2_HOST_MEMBERSHIP_REPORT: | 1408 | case IGMPV2_HOST_MEMBERSHIP_REPORT: |
1023 | BR_INPUT_SKB_CB(skb2)->mrouters_only = 1; | 1409 | BR_INPUT_SKB_CB(skb2)->mrouters_only = 1; |
1024 | err = br_multicast_add_group(br, port, ih->group); | 1410 | err = br_ip4_multicast_add_group(br, port, ih->group); |
1025 | break; | 1411 | break; |
1026 | case IGMPV3_HOST_MEMBERSHIP_REPORT: | 1412 | case IGMPV3_HOST_MEMBERSHIP_REPORT: |
1027 | err = br_multicast_igmp3_report(br, port, skb2); | 1413 | err = br_ip4_multicast_igmp3_report(br, port, skb2); |
1028 | break; | 1414 | break; |
1029 | case IGMP_HOST_MEMBERSHIP_QUERY: | 1415 | case IGMP_HOST_MEMBERSHIP_QUERY: |
1030 | err = br_multicast_query(br, port, skb2); | 1416 | err = br_ip4_multicast_query(br, port, skb2); |
1031 | break; | 1417 | break; |
1032 | case IGMP_HOST_LEAVE_MESSAGE: | 1418 | case IGMP_HOST_LEAVE_MESSAGE: |
1033 | br_multicast_leave_group(br, port, ih->group); | 1419 | br_ip4_multicast_leave_group(br, port, ih->group); |
1420 | break; | ||
1421 | } | ||
1422 | |||
1423 | out: | ||
1424 | __skb_push(skb2, offset); | ||
1425 | err_out: | ||
1426 | if (skb2 != skb) | ||
1427 | kfree_skb(skb2); | ||
1428 | return err; | ||
1429 | } | ||
1430 | |||
1431 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
1432 | static int br_multicast_ipv6_rcv(struct net_bridge *br, | ||
1433 | struct net_bridge_port *port, | ||
1434 | struct sk_buff *skb) | ||
1435 | { | ||
1436 | struct sk_buff *skb2 = skb; | ||
1437 | struct ipv6hdr *ip6h; | ||
1438 | struct icmp6hdr *icmp6h; | ||
1439 | u8 nexthdr; | ||
1440 | unsigned len; | ||
1441 | unsigned offset; | ||
1442 | int err; | ||
1443 | |||
1444 | if (!pskb_may_pull(skb, sizeof(*ip6h))) | ||
1445 | return -EINVAL; | ||
1446 | |||
1447 | ip6h = ipv6_hdr(skb); | ||
1448 | |||
1449 | /* | ||
1450 | * We're interested in MLD messages only. | ||
1451 | * - Version is 6 | ||
1452 | * - MLD has always Router Alert hop-by-hop option | ||
1453 | * - But we do not support jumbrograms. | ||
1454 | */ | ||
1455 | if (ip6h->version != 6 || | ||
1456 | ip6h->nexthdr != IPPROTO_HOPOPTS || | ||
1457 | ip6h->payload_len == 0) | ||
1458 | return 0; | ||
1459 | |||
1460 | len = ntohs(ip6h->payload_len); | ||
1461 | if (skb->len < len) | ||
1462 | return -EINVAL; | ||
1463 | |||
1464 | nexthdr = ip6h->nexthdr; | ||
1465 | offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr); | ||
1466 | |||
1467 | if (offset < 0 || nexthdr != IPPROTO_ICMPV6) | ||
1468 | return 0; | ||
1469 | |||
1470 | /* Okay, we found ICMPv6 header */ | ||
1471 | skb2 = skb_clone(skb, GFP_ATOMIC); | ||
1472 | if (!skb2) | ||
1473 | return -ENOMEM; | ||
1474 | |||
1475 | len -= offset - skb_network_offset(skb2); | ||
1476 | |||
1477 | __skb_pull(skb2, offset); | ||
1478 | skb_reset_transport_header(skb2); | ||
1479 | |||
1480 | err = -EINVAL; | ||
1481 | if (!pskb_may_pull(skb2, sizeof(*icmp6h))) | ||
1482 | goto out; | ||
1483 | |||
1484 | icmp6h = icmp6_hdr(skb2); | ||
1485 | |||
1486 | switch (icmp6h->icmp6_type) { | ||
1487 | case ICMPV6_MGM_QUERY: | ||
1488 | case ICMPV6_MGM_REPORT: | ||
1489 | case ICMPV6_MGM_REDUCTION: | ||
1490 | case ICMPV6_MLD2_REPORT: | ||
1491 | break; | ||
1492 | default: | ||
1493 | err = 0; | ||
1494 | goto out; | ||
1495 | } | ||
1496 | |||
1497 | /* Okay, we found MLD message. Check further. */ | ||
1498 | if (skb2->len > len) { | ||
1499 | err = pskb_trim_rcsum(skb2, len); | ||
1500 | if (err) | ||
1501 | goto out; | ||
1502 | } | ||
1503 | |||
1504 | switch (skb2->ip_summed) { | ||
1505 | case CHECKSUM_COMPLETE: | ||
1506 | if (!csum_fold(skb2->csum)) | ||
1507 | break; | ||
1508 | /*FALLTHROUGH*/ | ||
1509 | case CHECKSUM_NONE: | ||
1510 | skb2->csum = 0; | ||
1511 | if (skb_checksum_complete(skb2)) | ||
1512 | goto out; | ||
1513 | } | ||
1514 | |||
1515 | err = 0; | ||
1516 | |||
1517 | BR_INPUT_SKB_CB(skb)->igmp = 1; | ||
1518 | |||
1519 | switch (icmp6h->icmp6_type) { | ||
1520 | case ICMPV6_MGM_REPORT: | ||
1521 | { | ||
1522 | struct mld_msg *mld = (struct mld_msg *)icmp6h; | ||
1523 | BR_INPUT_SKB_CB(skb2)->mrouters_only = 1; | ||
1524 | err = br_ip6_multicast_add_group(br, port, &mld->mld_mca); | ||
1525 | break; | ||
1526 | } | ||
1527 | case ICMPV6_MLD2_REPORT: | ||
1528 | err = br_ip6_multicast_mld2_report(br, port, skb2); | ||
1034 | break; | 1529 | break; |
1530 | case ICMPV6_MGM_QUERY: | ||
1531 | err = br_ip6_multicast_query(br, port, skb2); | ||
1532 | break; | ||
1533 | case ICMPV6_MGM_REDUCTION: | ||
1534 | { | ||
1535 | struct mld_msg *mld = (struct mld_msg *)icmp6h; | ||
1536 | br_ip6_multicast_leave_group(br, port, &mld->mld_mca); | ||
1537 | } | ||
1035 | } | 1538 | } |
1036 | 1539 | ||
1037 | out: | 1540 | out: |
@@ -1040,16 +1543,24 @@ out: | |||
1040 | kfree_skb(skb2); | 1543 | kfree_skb(skb2); |
1041 | return err; | 1544 | return err; |
1042 | } | 1545 | } |
1546 | #endif | ||
1043 | 1547 | ||
1044 | int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, | 1548 | int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, |
1045 | struct sk_buff *skb) | 1549 | struct sk_buff *skb) |
1046 | { | 1550 | { |
1551 | BR_INPUT_SKB_CB(skb)->igmp = 0; | ||
1552 | BR_INPUT_SKB_CB(skb)->mrouters_only = 0; | ||
1553 | |||
1047 | if (br->multicast_disabled) | 1554 | if (br->multicast_disabled) |
1048 | return 0; | 1555 | return 0; |
1049 | 1556 | ||
1050 | switch (skb->protocol) { | 1557 | switch (skb->protocol) { |
1051 | case htons(ETH_P_IP): | 1558 | case htons(ETH_P_IP): |
1052 | return br_multicast_ipv4_rcv(br, port, skb); | 1559 | return br_multicast_ipv4_rcv(br, port, skb); |
1560 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
1561 | case htons(ETH_P_IPV6): | ||
1562 | return br_multicast_ipv6_rcv(br, port, skb); | ||
1563 | #endif | ||
1053 | } | 1564 | } |
1054 | 1565 | ||
1055 | return 0; | 1566 | return 0; |
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index 268e2e725888..4c4977d12fd6 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c | |||
@@ -23,6 +23,7 @@ | |||
23 | 23 | ||
24 | #include <linux/module.h> | 24 | #include <linux/module.h> |
25 | #include <linux/kernel.h> | 25 | #include <linux/kernel.h> |
26 | #include <linux/slab.h> | ||
26 | #include <linux/ip.h> | 27 | #include <linux/ip.h> |
27 | #include <linux/netdevice.h> | 28 | #include <linux/netdevice.h> |
28 | #include <linux/skbuff.h> | 29 | #include <linux/skbuff.h> |
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index fcffb3fb1177..aa56ac2c8829 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c | |||
@@ -11,6 +11,7 @@ | |||
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
14 | #include <linux/slab.h> | ||
14 | #include <net/rtnetlink.h> | 15 | #include <net/rtnetlink.h> |
15 | #include <net/net_namespace.h> | 16 | #include <net/net_namespace.h> |
16 | #include <net/sock.h> | 17 | #include <net/sock.h> |
diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c index 763a3ec292e5..1413b72acc7f 100644 --- a/net/bridge/br_notify.c +++ b/net/bridge/br_notify.c | |||
@@ -82,6 +82,10 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v | |||
82 | case NETDEV_UNREGISTER: | 82 | case NETDEV_UNREGISTER: |
83 | br_del_if(br, dev); | 83 | br_del_if(br, dev); |
84 | break; | 84 | break; |
85 | |||
86 | case NETDEV_PRE_TYPE_CHANGE: | ||
87 | /* Forbid underlaying device to change its type. */ | ||
88 | return NOTIFY_BAD; | ||
85 | } | 89 | } |
86 | 90 | ||
87 | /* Events that may cause spanning tree to refresh */ | 91 | /* Events that may cause spanning tree to refresh */ |
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 1cf2cef78584..018499ebe19d 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
@@ -45,6 +45,17 @@ struct mac_addr | |||
45 | unsigned char addr[6]; | 45 | unsigned char addr[6]; |
46 | }; | 46 | }; |
47 | 47 | ||
48 | struct br_ip | ||
49 | { | ||
50 | union { | ||
51 | __be32 ip4; | ||
52 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
53 | struct in6_addr ip6; | ||
54 | #endif | ||
55 | } u; | ||
56 | __be16 proto; | ||
57 | }; | ||
58 | |||
48 | struct net_bridge_fdb_entry | 59 | struct net_bridge_fdb_entry |
49 | { | 60 | { |
50 | struct hlist_node hlist; | 61 | struct hlist_node hlist; |
@@ -64,7 +75,7 @@ struct net_bridge_port_group { | |||
64 | struct rcu_head rcu; | 75 | struct rcu_head rcu; |
65 | struct timer_list timer; | 76 | struct timer_list timer; |
66 | struct timer_list query_timer; | 77 | struct timer_list query_timer; |
67 | __be32 addr; | 78 | struct br_ip addr; |
68 | u32 queries_sent; | 79 | u32 queries_sent; |
69 | }; | 80 | }; |
70 | 81 | ||
@@ -77,7 +88,7 @@ struct net_bridge_mdb_entry | |||
77 | struct rcu_head rcu; | 88 | struct rcu_head rcu; |
78 | struct timer_list timer; | 89 | struct timer_list timer; |
79 | struct timer_list query_timer; | 90 | struct timer_list query_timer; |
80 | __be32 addr; | 91 | struct br_ip addr; |
81 | u32 queries_sent; | 92 | u32 queries_sent; |
82 | }; | 93 | }; |
83 | 94 | ||
@@ -130,11 +141,20 @@ struct net_bridge_port | |||
130 | #endif | 141 | #endif |
131 | }; | 142 | }; |
132 | 143 | ||
144 | struct br_cpu_netstats { | ||
145 | unsigned long rx_packets; | ||
146 | unsigned long rx_bytes; | ||
147 | unsigned long tx_packets; | ||
148 | unsigned long tx_bytes; | ||
149 | }; | ||
150 | |||
133 | struct net_bridge | 151 | struct net_bridge |
134 | { | 152 | { |
135 | spinlock_t lock; | 153 | spinlock_t lock; |
136 | struct list_head port_list; | 154 | struct list_head port_list; |
137 | struct net_device *dev; | 155 | struct net_device *dev; |
156 | |||
157 | struct br_cpu_netstats __percpu *stats; | ||
138 | spinlock_t hash_lock; | 158 | spinlock_t hash_lock; |
139 | struct hlist_head hash[BR_HASH_SIZE]; | 159 | struct hlist_head hash[BR_HASH_SIZE]; |
140 | unsigned long feature_mask; | 160 | unsigned long feature_mask; |
@@ -206,12 +226,20 @@ struct net_bridge | |||
206 | 226 | ||
207 | struct br_input_skb_cb { | 227 | struct br_input_skb_cb { |
208 | struct net_device *brdev; | 228 | struct net_device *brdev; |
229 | #ifdef CONFIG_BRIDGE_IGMP_SNOOPING | ||
209 | int igmp; | 230 | int igmp; |
210 | int mrouters_only; | 231 | int mrouters_only; |
232 | #endif | ||
211 | }; | 233 | }; |
212 | 234 | ||
213 | #define BR_INPUT_SKB_CB(__skb) ((struct br_input_skb_cb *)(__skb)->cb) | 235 | #define BR_INPUT_SKB_CB(__skb) ((struct br_input_skb_cb *)(__skb)->cb) |
214 | 236 | ||
237 | #ifdef CONFIG_BRIDGE_IGMP_SNOOPING | ||
238 | # define BR_INPUT_SKB_CB_MROUTERS_ONLY(__skb) (BR_INPUT_SKB_CB(__skb)->mrouters_only) | ||
239 | #else | ||
240 | # define BR_INPUT_SKB_CB_MROUTERS_ONLY(__skb) (0) | ||
241 | #endif | ||
242 | |||
215 | extern struct notifier_block br_device_notifier; | 243 | extern struct notifier_block br_device_notifier; |
216 | extern const u8 br_group_address[ETH_ALEN]; | 244 | extern const u8 br_group_address[ETH_ALEN]; |
217 | 245 | ||
@@ -252,7 +280,7 @@ extern void br_deliver(const struct net_bridge_port *to, | |||
252 | struct sk_buff *skb); | 280 | struct sk_buff *skb); |
253 | extern int br_dev_queue_push_xmit(struct sk_buff *skb); | 281 | extern int br_dev_queue_push_xmit(struct sk_buff *skb); |
254 | extern void br_forward(const struct net_bridge_port *to, | 282 | extern void br_forward(const struct net_bridge_port *to, |
255 | struct sk_buff *skb); | 283 | struct sk_buff *skb, struct sk_buff *skb0); |
256 | extern int br_forward_finish(struct sk_buff *skb); | 284 | extern int br_forward_finish(struct sk_buff *skb); |
257 | extern void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb); | 285 | extern void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb); |
258 | extern void br_flood_forward(struct net_bridge *br, struct sk_buff *skb, | 286 | extern void br_flood_forward(struct net_bridge *br, struct sk_buff *skb, |
@@ -423,7 +451,7 @@ extern void br_ifinfo_notify(int event, struct net_bridge_port *port); | |||
423 | 451 | ||
424 | #ifdef CONFIG_SYSFS | 452 | #ifdef CONFIG_SYSFS |
425 | /* br_sysfs_if.c */ | 453 | /* br_sysfs_if.c */ |
426 | extern struct sysfs_ops brport_sysfs_ops; | 454 | extern const struct sysfs_ops brport_sysfs_ops; |
427 | extern int br_sysfs_addif(struct net_bridge_port *p); | 455 | extern int br_sysfs_addif(struct net_bridge_port *p); |
428 | 456 | ||
429 | /* br_sysfs_br.c */ | 457 | /* br_sysfs_br.c */ |
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c index 81ae40b3f655..d66cce11f3bf 100644 --- a/net/bridge/br_stp_bpdu.c +++ b/net/bridge/br_stp_bpdu.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/netfilter_bridge.h> | 15 | #include <linux/netfilter_bridge.h> |
16 | #include <linux/etherdevice.h> | 16 | #include <linux/etherdevice.h> |
17 | #include <linux/llc.h> | 17 | #include <linux/llc.h> |
18 | #include <linux/slab.h> | ||
18 | #include <net/net_namespace.h> | 19 | #include <net/net_namespace.h> |
19 | #include <net/llc.h> | 20 | #include <net/llc.h> |
20 | #include <net/llc_pdu.h> | 21 | #include <net/llc_pdu.h> |
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c index 696596cd3384..0b9916489d6b 100644 --- a/net/bridge/br_sysfs_if.c +++ b/net/bridge/br_sysfs_if.c | |||
@@ -238,7 +238,7 @@ static ssize_t brport_store(struct kobject * kobj, | |||
238 | return ret; | 238 | return ret; |
239 | } | 239 | } |
240 | 240 | ||
241 | struct sysfs_ops brport_sysfs_ops = { | 241 | const struct sysfs_ops brport_sysfs_ops = { |
242 | .show = brport_show, | 242 | .show = brport_show, |
243 | .store = brport_store, | 243 | .store = brport_store, |
244 | }; | 244 | }; |
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c index c6ac657074a6..f9560f3dbdc7 100644 --- a/net/bridge/netfilter/ebt_ulog.c +++ b/net/bridge/netfilter/ebt_ulog.c | |||
@@ -29,6 +29,7 @@ | |||
29 | */ | 29 | */ |
30 | 30 | ||
31 | #include <linux/module.h> | 31 | #include <linux/module.h> |
32 | #include <linux/slab.h> | ||
32 | #include <linux/spinlock.h> | 33 | #include <linux/spinlock.h> |
33 | #include <linux/socket.h> | 34 | #include <linux/socket.h> |
34 | #include <linux/skbuff.h> | 35 | #include <linux/skbuff.h> |
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index dfb58056a89a..f0865fd1e3ec 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/netfilter_bridge/ebtables.h> | 23 | #include <linux/netfilter_bridge/ebtables.h> |
24 | #include <linux/spinlock.h> | 24 | #include <linux/spinlock.h> |
25 | #include <linux/mutex.h> | 25 | #include <linux/mutex.h> |
26 | #include <linux/slab.h> | ||
26 | #include <asm/uaccess.h> | 27 | #include <asm/uaccess.h> |
27 | #include <linux/smp.h> | 28 | #include <linux/smp.h> |
28 | #include <linux/cpumask.h> | 29 | #include <linux/cpumask.h> |
diff --git a/net/caif/Kconfig b/net/caif/Kconfig new file mode 100644 index 000000000000..cd1daf6008bd --- /dev/null +++ b/net/caif/Kconfig | |||
@@ -0,0 +1,48 @@ | |||
1 | # | ||
2 | # CAIF net configurations | ||
3 | # | ||
4 | |||
5 | #menu "CAIF Support" | ||
6 | comment "CAIF Support" | ||
7 | menuconfig CAIF | ||
8 | tristate "Enable CAIF support" | ||
9 | select CRC_CCITT | ||
10 | default n | ||
11 | ---help--- | ||
12 | The "Communication CPU to Application CPU Interface" (CAIF) is a packet | ||
13 | based connection-oriented MUX protocol developed by ST-Ericsson for use | ||
14 | with its modems. It is accessed from user space as sockets (PF_CAIF). | ||
15 | |||
16 | Say Y (or M) here if you build for a phone product (e.g. Android or | ||
17 | MeeGo ) that uses CAIF as transport, if unsure say N. | ||
18 | |||
19 | If you select to build it as module then CAIF_NETDEV also needs to be | ||
20 | built as modules. You will also need to say yes to any CAIF physical | ||
21 | devices that your platform requires. | ||
22 | |||
23 | See Documentation/networking/caif for a further explanation on how to | ||
24 | use and configure CAIF. | ||
25 | |||
26 | if CAIF | ||
27 | |||
28 | config CAIF_DEBUG | ||
29 | bool "Enable Debug" | ||
30 | default n | ||
31 | --- help --- | ||
32 | Enable the inclusion of debug code in the CAIF stack. | ||
33 | Be aware that doing this will impact performance. | ||
34 | If unsure say N. | ||
35 | |||
36 | |||
37 | config CAIF_NETDEV | ||
38 | tristate "CAIF GPRS Network device" | ||
39 | default CAIF | ||
40 | ---help--- | ||
41 | Say Y if you will be using a CAIF based GPRS network device. | ||
42 | This can be either built-in or a loadable module, | ||
43 | If you select to build it as a built-in then the main CAIF device must | ||
44 | also be a built-in. | ||
45 | If unsure say Y. | ||
46 | |||
47 | endif | ||
48 | #endmenu | ||
diff --git a/net/caif/Makefile b/net/caif/Makefile new file mode 100644 index 000000000000..34852af2595e --- /dev/null +++ b/net/caif/Makefile | |||
@@ -0,0 +1,26 @@ | |||
1 | ifeq ($(CONFIG_CAIF_DEBUG),1) | ||
2 | CAIF_DBG_FLAGS := -DDEBUG | ||
3 | endif | ||
4 | |||
5 | ccflags-y := $(CAIF_FLAGS) $(CAIF_DBG_FLAGS) | ||
6 | |||
7 | caif-objs := caif_dev.o \ | ||
8 | cfcnfg.o cfmuxl.o cfctrl.o \ | ||
9 | cffrml.o cfveil.o cfdbgl.o\ | ||
10 | cfserl.o cfdgml.o \ | ||
11 | cfrfml.o cfvidl.o cfutill.o \ | ||
12 | cfsrvl.o cfpkt_skbuff.o caif_config_util.o | ||
13 | clean-dirs:= .tmp_versions | ||
14 | |||
15 | clean-files:= \ | ||
16 | Module.symvers \ | ||
17 | modules.order \ | ||
18 | *.cmd \ | ||
19 | *.o \ | ||
20 | *~ | ||
21 | |||
22 | obj-$(CONFIG_CAIF) += caif.o | ||
23 | obj-$(CONFIG_CAIF_NETDEV) += chnl_net.o | ||
24 | obj-$(CONFIG_CAIF) += caif_socket.o | ||
25 | |||
26 | export-objs := caif.o | ||
diff --git a/net/caif/caif_config_util.c b/net/caif/caif_config_util.c new file mode 100644 index 000000000000..6f36580366f0 --- /dev/null +++ b/net/caif/caif_config_util.c | |||
@@ -0,0 +1,87 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson AB 2010 | ||
3 | * Author: Sjur Brendeland sjur.brandeland@stericsson.com | ||
4 | * License terms: GNU General Public License (GPL) version 2 | ||
5 | */ | ||
6 | |||
7 | #include <linux/module.h> | ||
8 | #include <linux/spinlock.h> | ||
9 | #include <net/caif/cfctrl.h> | ||
10 | #include <net/caif/cfcnfg.h> | ||
11 | #include <net/caif/caif_dev.h> | ||
12 | |||
13 | int connect_req_to_link_param(struct cfcnfg *cnfg, | ||
14 | struct caif_connect_request *s, | ||
15 | struct cfctrl_link_param *l) | ||
16 | { | ||
17 | struct dev_info *dev_info; | ||
18 | enum cfcnfg_phy_preference pref; | ||
19 | memset(l, 0, sizeof(*l)); | ||
20 | l->priority = s->priority; | ||
21 | |||
22 | if (s->link_name[0] != '\0') | ||
23 | l->phyid = cfcnfg_get_named(cnfg, s->link_name); | ||
24 | else { | ||
25 | switch (s->link_selector) { | ||
26 | case CAIF_LINK_HIGH_BANDW: | ||
27 | pref = CFPHYPREF_HIGH_BW; | ||
28 | break; | ||
29 | case CAIF_LINK_LOW_LATENCY: | ||
30 | pref = CFPHYPREF_LOW_LAT; | ||
31 | break; | ||
32 | default: | ||
33 | return -EINVAL; | ||
34 | } | ||
35 | dev_info = cfcnfg_get_phyid(cnfg, pref); | ||
36 | if (dev_info == NULL) | ||
37 | return -ENODEV; | ||
38 | l->phyid = dev_info->id; | ||
39 | } | ||
40 | switch (s->protocol) { | ||
41 | case CAIFPROTO_AT: | ||
42 | l->linktype = CFCTRL_SRV_VEI; | ||
43 | if (s->sockaddr.u.at.type == CAIF_ATTYPE_PLAIN) | ||
44 | l->chtype = 0x02; | ||
45 | else | ||
46 | l->chtype = s->sockaddr.u.at.type; | ||
47 | l->endpoint = 0x00; | ||
48 | break; | ||
49 | case CAIFPROTO_DATAGRAM: | ||
50 | l->linktype = CFCTRL_SRV_DATAGRAM; | ||
51 | l->chtype = 0x00; | ||
52 | l->u.datagram.connid = s->sockaddr.u.dgm.connection_id; | ||
53 | break; | ||
54 | case CAIFPROTO_DATAGRAM_LOOP: | ||
55 | l->linktype = CFCTRL_SRV_DATAGRAM; | ||
56 | l->chtype = 0x03; | ||
57 | l->endpoint = 0x00; | ||
58 | l->u.datagram.connid = s->sockaddr.u.dgm.connection_id; | ||
59 | break; | ||
60 | case CAIFPROTO_RFM: | ||
61 | l->linktype = CFCTRL_SRV_RFM; | ||
62 | l->u.datagram.connid = s->sockaddr.u.rfm.connection_id; | ||
63 | strncpy(l->u.rfm.volume, s->sockaddr.u.rfm.volume, | ||
64 | sizeof(l->u.rfm.volume)-1); | ||
65 | l->u.rfm.volume[sizeof(l->u.rfm.volume)-1] = 0; | ||
66 | break; | ||
67 | case CAIFPROTO_UTIL: | ||
68 | l->linktype = CFCTRL_SRV_UTIL; | ||
69 | l->endpoint = 0x00; | ||
70 | l->chtype = 0x00; | ||
71 | strncpy(l->u.utility.name, s->sockaddr.u.util.service, | ||
72 | sizeof(l->u.utility.name)-1); | ||
73 | l->u.utility.name[sizeof(l->u.utility.name)-1] = 0; | ||
74 | caif_assert(sizeof(l->u.utility.name) > 10); | ||
75 | l->u.utility.paramlen = s->param.size; | ||
76 | if (l->u.utility.paramlen > sizeof(l->u.utility.params)) | ||
77 | l->u.utility.paramlen = sizeof(l->u.utility.params); | ||
78 | |||
79 | memcpy(l->u.utility.params, s->param.data, | ||
80 | l->u.utility.paramlen); | ||
81 | |||
82 | break; | ||
83 | default: | ||
84 | return -EINVAL; | ||
85 | } | ||
86 | return 0; | ||
87 | } | ||
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c new file mode 100644 index 000000000000..024fd5bb2d39 --- /dev/null +++ b/net/caif/caif_dev.c | |||
@@ -0,0 +1,418 @@ | |||
1 | /* | ||
2 | * CAIF Interface registration. | ||
3 | * Copyright (C) ST-Ericsson AB 2010 | ||
4 | * Author: Sjur Brendeland/sjur.brandeland@stericsson.com | ||
5 | * License terms: GNU General Public License (GPL) version 2 | ||
6 | * | ||
7 | * Borrowed heavily from file: pn_dev.c. Thanks to | ||
8 | * Remi Denis-Courmont <remi.denis-courmont@nokia.com> | ||
9 | * and Sakari Ailus <sakari.ailus@nokia.com> | ||
10 | */ | ||
11 | |||
12 | #include <linux/version.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/if_arp.h> | ||
16 | #include <linux/net.h> | ||
17 | #include <linux/netdevice.h> | ||
18 | #include <linux/skbuff.h> | ||
19 | #include <linux/sched.h> | ||
20 | #include <linux/wait.h> | ||
21 | #include <net/netns/generic.h> | ||
22 | #include <net/net_namespace.h> | ||
23 | #include <net/pkt_sched.h> | ||
24 | #include <net/caif/caif_device.h> | ||
25 | #include <net/caif/caif_dev.h> | ||
26 | #include <net/caif/caif_layer.h> | ||
27 | #include <net/caif/cfpkt.h> | ||
28 | #include <net/caif/cfcnfg.h> | ||
29 | |||
30 | MODULE_LICENSE("GPL"); | ||
31 | #define TIMEOUT (HZ*5) | ||
32 | |||
33 | /* Used for local tracking of the CAIF net devices */ | ||
34 | struct caif_device_entry { | ||
35 | struct cflayer layer; | ||
36 | struct list_head list; | ||
37 | atomic_t in_use; | ||
38 | atomic_t state; | ||
39 | u16 phyid; | ||
40 | struct net_device *netdev; | ||
41 | wait_queue_head_t event; | ||
42 | }; | ||
43 | |||
44 | struct caif_device_entry_list { | ||
45 | struct list_head list; | ||
46 | /* Protects simulanous deletes in list */ | ||
47 | spinlock_t lock; | ||
48 | }; | ||
49 | |||
50 | struct caif_net { | ||
51 | struct caif_device_entry_list caifdevs; | ||
52 | }; | ||
53 | |||
54 | static int caif_net_id; | ||
55 | static struct cfcnfg *cfg; | ||
56 | |||
57 | static struct caif_device_entry_list *caif_device_list(struct net *net) | ||
58 | { | ||
59 | struct caif_net *caifn; | ||
60 | BUG_ON(!net); | ||
61 | caifn = net_generic(net, caif_net_id); | ||
62 | BUG_ON(!caifn); | ||
63 | return &caifn->caifdevs; | ||
64 | } | ||
65 | |||
66 | /* Allocate new CAIF device. */ | ||
67 | static struct caif_device_entry *caif_device_alloc(struct net_device *dev) | ||
68 | { | ||
69 | struct caif_device_entry_list *caifdevs; | ||
70 | struct caif_device_entry *caifd; | ||
71 | caifdevs = caif_device_list(dev_net(dev)); | ||
72 | BUG_ON(!caifdevs); | ||
73 | caifd = kzalloc(sizeof(*caifd), GFP_ATOMIC); | ||
74 | if (!caifd) | ||
75 | return NULL; | ||
76 | caifd->netdev = dev; | ||
77 | list_add(&caifd->list, &caifdevs->list); | ||
78 | init_waitqueue_head(&caifd->event); | ||
79 | return caifd; | ||
80 | } | ||
81 | |||
82 | static struct caif_device_entry *caif_get(struct net_device *dev) | ||
83 | { | ||
84 | struct caif_device_entry_list *caifdevs = | ||
85 | caif_device_list(dev_net(dev)); | ||
86 | struct caif_device_entry *caifd; | ||
87 | BUG_ON(!caifdevs); | ||
88 | list_for_each_entry(caifd, &caifdevs->list, list) { | ||
89 | if (caifd->netdev == dev) | ||
90 | return caifd; | ||
91 | } | ||
92 | return NULL; | ||
93 | } | ||
94 | |||
95 | static void caif_device_destroy(struct net_device *dev) | ||
96 | { | ||
97 | struct caif_device_entry_list *caifdevs = | ||
98 | caif_device_list(dev_net(dev)); | ||
99 | struct caif_device_entry *caifd; | ||
100 | ASSERT_RTNL(); | ||
101 | if (dev->type != ARPHRD_CAIF) | ||
102 | return; | ||
103 | |||
104 | spin_lock_bh(&caifdevs->lock); | ||
105 | caifd = caif_get(dev); | ||
106 | if (caifd == NULL) { | ||
107 | spin_unlock_bh(&caifdevs->lock); | ||
108 | return; | ||
109 | } | ||
110 | |||
111 | list_del(&caifd->list); | ||
112 | spin_unlock_bh(&caifdevs->lock); | ||
113 | |||
114 | kfree(caifd); | ||
115 | return; | ||
116 | } | ||
117 | |||
118 | static int transmit(struct cflayer *layer, struct cfpkt *pkt) | ||
119 | { | ||
120 | struct caif_device_entry *caifd = | ||
121 | container_of(layer, struct caif_device_entry, layer); | ||
122 | struct sk_buff *skb, *skb2; | ||
123 | int ret = -EINVAL; | ||
124 | skb = cfpkt_tonative(pkt); | ||
125 | skb->dev = caifd->netdev; | ||
126 | /* | ||
127 | * Don't allow SKB to be destroyed upon error, but signal resend | ||
128 | * notification to clients. We can't rely on the return value as | ||
129 | * congestion (NET_XMIT_CN) sometimes drops the packet, sometimes don't. | ||
130 | */ | ||
131 | if (netif_queue_stopped(caifd->netdev)) | ||
132 | return -EAGAIN; | ||
133 | skb2 = skb_get(skb); | ||
134 | |||
135 | ret = dev_queue_xmit(skb2); | ||
136 | |||
137 | if (!ret) | ||
138 | kfree_skb(skb); | ||
139 | else | ||
140 | return -EAGAIN; | ||
141 | |||
142 | return 0; | ||
143 | } | ||
144 | |||
145 | static int modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl) | ||
146 | { | ||
147 | struct caif_device_entry *caifd; | ||
148 | struct caif_dev_common *caifdev; | ||
149 | caifd = container_of(layr, struct caif_device_entry, layer); | ||
150 | caifdev = netdev_priv(caifd->netdev); | ||
151 | if (ctrl == _CAIF_MODEMCMD_PHYIF_USEFULL) { | ||
152 | atomic_set(&caifd->in_use, 1); | ||
153 | wake_up_interruptible(&caifd->event); | ||
154 | |||
155 | } else if (ctrl == _CAIF_MODEMCMD_PHYIF_USELESS) { | ||
156 | atomic_set(&caifd->in_use, 0); | ||
157 | wake_up_interruptible(&caifd->event); | ||
158 | } | ||
159 | return 0; | ||
160 | } | ||
161 | |||
162 | /* | ||
163 | * Stuff received packets to associated sockets. | ||
164 | * On error, returns non-zero and releases the skb. | ||
165 | */ | ||
166 | static int receive(struct sk_buff *skb, struct net_device *dev, | ||
167 | struct packet_type *pkttype, struct net_device *orig_dev) | ||
168 | { | ||
169 | struct net *net; | ||
170 | struct cfpkt *pkt; | ||
171 | struct caif_device_entry *caifd; | ||
172 | net = dev_net(dev); | ||
173 | pkt = cfpkt_fromnative(CAIF_DIR_IN, skb); | ||
174 | caifd = caif_get(dev); | ||
175 | if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) | ||
176 | return NET_RX_DROP; | ||
177 | |||
178 | if (caifd->layer.up->receive(caifd->layer.up, pkt)) | ||
179 | return NET_RX_DROP; | ||
180 | |||
181 | return 0; | ||
182 | } | ||
183 | |||
184 | static struct packet_type caif_packet_type __read_mostly = { | ||
185 | .type = cpu_to_be16(ETH_P_CAIF), | ||
186 | .func = receive, | ||
187 | }; | ||
188 | |||
189 | static void dev_flowctrl(struct net_device *dev, int on) | ||
190 | { | ||
191 | struct caif_device_entry *caifd = caif_get(dev); | ||
192 | if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) | ||
193 | return; | ||
194 | |||
195 | caifd->layer.up->ctrlcmd(caifd->layer.up, | ||
196 | on ? | ||
197 | _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND : | ||
198 | _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND, | ||
199 | caifd->layer.id); | ||
200 | } | ||
201 | |||
202 | /* notify Caif of device events */ | ||
203 | static int caif_device_notify(struct notifier_block *me, unsigned long what, | ||
204 | void *arg) | ||
205 | { | ||
206 | struct net_device *dev = arg; | ||
207 | struct caif_device_entry *caifd = NULL; | ||
208 | struct caif_dev_common *caifdev; | ||
209 | enum cfcnfg_phy_preference pref; | ||
210 | int res = -EINVAL; | ||
211 | enum cfcnfg_phy_type phy_type; | ||
212 | |||
213 | if (dev->type != ARPHRD_CAIF) | ||
214 | return 0; | ||
215 | |||
216 | switch (what) { | ||
217 | case NETDEV_REGISTER: | ||
218 | pr_info("CAIF: %s():register %s\n", __func__, dev->name); | ||
219 | caifd = caif_device_alloc(dev); | ||
220 | if (caifd == NULL) | ||
221 | break; | ||
222 | caifdev = netdev_priv(dev); | ||
223 | caifdev->flowctrl = dev_flowctrl; | ||
224 | atomic_set(&caifd->state, what); | ||
225 | res = 0; | ||
226 | break; | ||
227 | |||
228 | case NETDEV_UP: | ||
229 | pr_info("CAIF: %s(): up %s\n", __func__, dev->name); | ||
230 | caifd = caif_get(dev); | ||
231 | if (caifd == NULL) | ||
232 | break; | ||
233 | caifdev = netdev_priv(dev); | ||
234 | if (atomic_read(&caifd->state) == NETDEV_UP) { | ||
235 | pr_info("CAIF: %s():%s already up\n", | ||
236 | __func__, dev->name); | ||
237 | break; | ||
238 | } | ||
239 | atomic_set(&caifd->state, what); | ||
240 | caifd->layer.transmit = transmit; | ||
241 | caifd->layer.modemcmd = modemcmd; | ||
242 | |||
243 | if (caifdev->use_frag) | ||
244 | phy_type = CFPHYTYPE_FRAG; | ||
245 | else | ||
246 | phy_type = CFPHYTYPE_CAIF; | ||
247 | |||
248 | switch (caifdev->link_select) { | ||
249 | case CAIF_LINK_HIGH_BANDW: | ||
250 | pref = CFPHYPREF_HIGH_BW; | ||
251 | break; | ||
252 | case CAIF_LINK_LOW_LATENCY: | ||
253 | pref = CFPHYPREF_LOW_LAT; | ||
254 | break; | ||
255 | default: | ||
256 | pref = CFPHYPREF_HIGH_BW; | ||
257 | break; | ||
258 | } | ||
259 | |||
260 | cfcnfg_add_phy_layer(get_caif_conf(), | ||
261 | phy_type, | ||
262 | dev, | ||
263 | &caifd->layer, | ||
264 | &caifd->phyid, | ||
265 | pref, | ||
266 | caifdev->use_fcs, | ||
267 | caifdev->use_stx); | ||
268 | strncpy(caifd->layer.name, dev->name, | ||
269 | sizeof(caifd->layer.name) - 1); | ||
270 | caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0; | ||
271 | break; | ||
272 | |||
273 | case NETDEV_GOING_DOWN: | ||
274 | caifd = caif_get(dev); | ||
275 | if (caifd == NULL) | ||
276 | break; | ||
277 | pr_info("CAIF: %s():going down %s\n", __func__, dev->name); | ||
278 | |||
279 | if (atomic_read(&caifd->state) == NETDEV_GOING_DOWN || | ||
280 | atomic_read(&caifd->state) == NETDEV_DOWN) | ||
281 | break; | ||
282 | |||
283 | atomic_set(&caifd->state, what); | ||
284 | if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) | ||
285 | return -EINVAL; | ||
286 | caifd->layer.up->ctrlcmd(caifd->layer.up, | ||
287 | _CAIF_CTRLCMD_PHYIF_DOWN_IND, | ||
288 | caifd->layer.id); | ||
289 | res = wait_event_interruptible_timeout(caifd->event, | ||
290 | atomic_read(&caifd->in_use) == 0, | ||
291 | TIMEOUT); | ||
292 | break; | ||
293 | |||
294 | case NETDEV_DOWN: | ||
295 | caifd = caif_get(dev); | ||
296 | if (caifd == NULL) | ||
297 | break; | ||
298 | pr_info("CAIF: %s(): down %s\n", __func__, dev->name); | ||
299 | if (atomic_read(&caifd->in_use)) | ||
300 | pr_warning("CAIF: %s(): " | ||
301 | "Unregistering an active CAIF device: %s\n", | ||
302 | __func__, dev->name); | ||
303 | cfcnfg_del_phy_layer(get_caif_conf(), &caifd->layer); | ||
304 | atomic_set(&caifd->state, what); | ||
305 | break; | ||
306 | |||
307 | case NETDEV_UNREGISTER: | ||
308 | caifd = caif_get(dev); | ||
309 | pr_info("CAIF: %s(): unregister %s\n", __func__, dev->name); | ||
310 | atomic_set(&caifd->state, what); | ||
311 | caif_device_destroy(dev); | ||
312 | break; | ||
313 | } | ||
314 | return 0; | ||
315 | } | ||
316 | |||
317 | static struct notifier_block caif_device_notifier = { | ||
318 | .notifier_call = caif_device_notify, | ||
319 | .priority = 0, | ||
320 | }; | ||
321 | |||
322 | |||
323 | struct cfcnfg *get_caif_conf(void) | ||
324 | { | ||
325 | return cfg; | ||
326 | } | ||
327 | EXPORT_SYMBOL(get_caif_conf); | ||
328 | |||
329 | int caif_connect_client(struct caif_connect_request *conn_req, | ||
330 | struct cflayer *client_layer) | ||
331 | { | ||
332 | struct cfctrl_link_param param; | ||
333 | int ret; | ||
334 | ret = connect_req_to_link_param(get_caif_conf(), conn_req, ¶m); | ||
335 | if (ret) | ||
336 | return ret; | ||
337 | /* Hook up the adaptation layer. */ | ||
338 | return cfcnfg_add_adaptation_layer(get_caif_conf(), | ||
339 | ¶m, client_layer); | ||
340 | } | ||
341 | EXPORT_SYMBOL(caif_connect_client); | ||
342 | |||
343 | int caif_disconnect_client(struct cflayer *adap_layer) | ||
344 | { | ||
345 | return cfcnfg_disconn_adapt_layer(get_caif_conf(), adap_layer); | ||
346 | } | ||
347 | EXPORT_SYMBOL(caif_disconnect_client); | ||
348 | |||
349 | void caif_release_client(struct cflayer *adap_layer) | ||
350 | { | ||
351 | cfcnfg_release_adap_layer(adap_layer); | ||
352 | } | ||
353 | EXPORT_SYMBOL(caif_release_client); | ||
354 | |||
355 | /* Per-namespace Caif devices handling */ | ||
356 | static int caif_init_net(struct net *net) | ||
357 | { | ||
358 | struct caif_net *caifn = net_generic(net, caif_net_id); | ||
359 | INIT_LIST_HEAD(&caifn->caifdevs.list); | ||
360 | spin_lock_init(&caifn->caifdevs.lock); | ||
361 | return 0; | ||
362 | } | ||
363 | |||
364 | static void caif_exit_net(struct net *net) | ||
365 | { | ||
366 | struct net_device *dev; | ||
367 | int res; | ||
368 | rtnl_lock(); | ||
369 | for_each_netdev(net, dev) { | ||
370 | if (dev->type != ARPHRD_CAIF) | ||
371 | continue; | ||
372 | res = dev_close(dev); | ||
373 | caif_device_destroy(dev); | ||
374 | } | ||
375 | rtnl_unlock(); | ||
376 | } | ||
377 | |||
378 | static struct pernet_operations caif_net_ops = { | ||
379 | .init = caif_init_net, | ||
380 | .exit = caif_exit_net, | ||
381 | .id = &caif_net_id, | ||
382 | .size = sizeof(struct caif_net), | ||
383 | }; | ||
384 | |||
385 | /* Initialize Caif devices list */ | ||
386 | static int __init caif_device_init(void) | ||
387 | { | ||
388 | int result; | ||
389 | cfg = cfcnfg_create(); | ||
390 | if (!cfg) { | ||
391 | pr_warning("CAIF: %s(): can't create cfcnfg.\n", __func__); | ||
392 | goto err_cfcnfg_create_failed; | ||
393 | } | ||
394 | result = register_pernet_device(&caif_net_ops); | ||
395 | |||
396 | if (result) { | ||
397 | kfree(cfg); | ||
398 | cfg = NULL; | ||
399 | return result; | ||
400 | } | ||
401 | dev_add_pack(&caif_packet_type); | ||
402 | register_netdevice_notifier(&caif_device_notifier); | ||
403 | |||
404 | return result; | ||
405 | err_cfcnfg_create_failed: | ||
406 | return -ENODEV; | ||
407 | } | ||
408 | |||
409 | static void __exit caif_device_exit(void) | ||
410 | { | ||
411 | dev_remove_pack(&caif_packet_type); | ||
412 | unregister_pernet_device(&caif_net_ops); | ||
413 | unregister_netdevice_notifier(&caif_device_notifier); | ||
414 | cfcnfg_remove(cfg); | ||
415 | } | ||
416 | |||
417 | module_init(caif_device_init); | ||
418 | module_exit(caif_device_exit); | ||
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c new file mode 100644 index 000000000000..c3a70c5c893a --- /dev/null +++ b/net/caif/caif_socket.c | |||
@@ -0,0 +1,1252 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson AB 2010 | ||
3 | * Author: Sjur Brendeland sjur.brandeland@stericsson.com | ||
4 | * License terms: GNU General Public License (GPL) version 2 | ||
5 | */ | ||
6 | |||
7 | #include <linux/fs.h> | ||
8 | #include <linux/init.h> | ||
9 | #include <linux/module.h> | ||
10 | #include <linux/sched.h> | ||
11 | #include <linux/spinlock.h> | ||
12 | #include <linux/mutex.h> | ||
13 | #include <linux/list.h> | ||
14 | #include <linux/wait.h> | ||
15 | #include <linux/poll.h> | ||
16 | #include <linux/tcp.h> | ||
17 | #include <linux/uaccess.h> | ||
18 | #include <linux/mutex.h> | ||
19 | #include <linux/debugfs.h> | ||
20 | #include <linux/caif/caif_socket.h> | ||
21 | #include <asm/atomic.h> | ||
22 | #include <net/sock.h> | ||
23 | #include <net/tcp_states.h> | ||
24 | #include <net/caif/caif_layer.h> | ||
25 | #include <net/caif/caif_dev.h> | ||
26 | #include <net/caif/cfpkt.h> | ||
27 | |||
28 | MODULE_LICENSE("GPL"); | ||
29 | MODULE_ALIAS_NETPROTO(AF_CAIF); | ||
30 | |||
31 | #define CAIF_DEF_SNDBUF (CAIF_MAX_PAYLOAD_SIZE*10) | ||
32 | #define CAIF_DEF_RCVBUF (CAIF_MAX_PAYLOAD_SIZE*100) | ||
33 | |||
34 | /* | ||
35 | * CAIF state is re-using the TCP socket states. | ||
36 | * caif_states stored in sk_state reflect the state as reported by | ||
37 | * the CAIF stack, while sk_socket->state is the state of the socket. | ||
38 | */ | ||
39 | enum caif_states { | ||
40 | CAIF_CONNECTED = TCP_ESTABLISHED, | ||
41 | CAIF_CONNECTING = TCP_SYN_SENT, | ||
42 | CAIF_DISCONNECTED = TCP_CLOSE | ||
43 | }; | ||
44 | |||
45 | #define TX_FLOW_ON_BIT 1 | ||
46 | #define RX_FLOW_ON_BIT 2 | ||
47 | |||
48 | static struct dentry *debugfsdir; | ||
49 | |||
50 | #ifdef CONFIG_DEBUG_FS | ||
51 | struct debug_fs_counter { | ||
52 | atomic_t caif_nr_socks; | ||
53 | atomic_t num_connect_req; | ||
54 | atomic_t num_connect_resp; | ||
55 | atomic_t num_connect_fail_resp; | ||
56 | atomic_t num_disconnect; | ||
57 | atomic_t num_remote_shutdown_ind; | ||
58 | atomic_t num_tx_flow_off_ind; | ||
59 | atomic_t num_tx_flow_on_ind; | ||
60 | atomic_t num_rx_flow_off; | ||
61 | atomic_t num_rx_flow_on; | ||
62 | }; | ||
63 | struct debug_fs_counter cnt; | ||
64 | #define dbfs_atomic_inc(v) atomic_inc(v) | ||
65 | #define dbfs_atomic_dec(v) atomic_dec(v) | ||
66 | #else | ||
67 | #define dbfs_atomic_inc(v) | ||
68 | #define dbfs_atomic_dec(v) | ||
69 | #endif | ||
70 | |||
71 | struct caifsock { | ||
72 | struct sock sk; /* must be first member */ | ||
73 | struct cflayer layer; | ||
74 | char name[CAIF_LAYER_NAME_SZ]; /* Used for debugging */ | ||
75 | u32 flow_state; | ||
76 | struct caif_connect_request conn_req; | ||
77 | struct mutex readlock; | ||
78 | struct dentry *debugfs_socket_dir; | ||
79 | }; | ||
80 | |||
81 | static int rx_flow_is_on(struct caifsock *cf_sk) | ||
82 | { | ||
83 | return test_bit(RX_FLOW_ON_BIT, | ||
84 | (void *) &cf_sk->flow_state); | ||
85 | } | ||
86 | |||
87 | static int tx_flow_is_on(struct caifsock *cf_sk) | ||
88 | { | ||
89 | return test_bit(TX_FLOW_ON_BIT, | ||
90 | (void *) &cf_sk->flow_state); | ||
91 | } | ||
92 | |||
93 | static void set_rx_flow_off(struct caifsock *cf_sk) | ||
94 | { | ||
95 | clear_bit(RX_FLOW_ON_BIT, | ||
96 | (void *) &cf_sk->flow_state); | ||
97 | } | ||
98 | |||
99 | static void set_rx_flow_on(struct caifsock *cf_sk) | ||
100 | { | ||
101 | set_bit(RX_FLOW_ON_BIT, | ||
102 | (void *) &cf_sk->flow_state); | ||
103 | } | ||
104 | |||
105 | static void set_tx_flow_off(struct caifsock *cf_sk) | ||
106 | { | ||
107 | clear_bit(TX_FLOW_ON_BIT, | ||
108 | (void *) &cf_sk->flow_state); | ||
109 | } | ||
110 | |||
111 | static void set_tx_flow_on(struct caifsock *cf_sk) | ||
112 | { | ||
113 | set_bit(TX_FLOW_ON_BIT, | ||
114 | (void *) &cf_sk->flow_state); | ||
115 | } | ||
116 | |||
117 | static void caif_read_lock(struct sock *sk) | ||
118 | { | ||
119 | struct caifsock *cf_sk; | ||
120 | cf_sk = container_of(sk, struct caifsock, sk); | ||
121 | mutex_lock(&cf_sk->readlock); | ||
122 | } | ||
123 | |||
124 | static void caif_read_unlock(struct sock *sk) | ||
125 | { | ||
126 | struct caifsock *cf_sk; | ||
127 | cf_sk = container_of(sk, struct caifsock, sk); | ||
128 | mutex_unlock(&cf_sk->readlock); | ||
129 | } | ||
130 | |||
131 | int sk_rcvbuf_lowwater(struct caifsock *cf_sk) | ||
132 | { | ||
133 | /* A quarter of full buffer is used a low water mark */ | ||
134 | return cf_sk->sk.sk_rcvbuf / 4; | ||
135 | } | ||
136 | |||
137 | void caif_flow_ctrl(struct sock *sk, int mode) | ||
138 | { | ||
139 | struct caifsock *cf_sk; | ||
140 | cf_sk = container_of(sk, struct caifsock, sk); | ||
141 | if (cf_sk->layer.dn) | ||
142 | cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, mode); | ||
143 | } | ||
144 | |||
145 | /* | ||
146 | * Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are | ||
147 | * not dropped, but CAIF is sending flow off instead. | ||
148 | */ | ||
149 | int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | ||
150 | { | ||
151 | int err; | ||
152 | int skb_len; | ||
153 | unsigned long flags; | ||
154 | struct sk_buff_head *list = &sk->sk_receive_queue; | ||
155 | struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); | ||
156 | |||
157 | if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= | ||
158 | (unsigned)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) { | ||
159 | trace_printk("CAIF: %s():" | ||
160 | " sending flow OFF (queue len = %d %d)\n", | ||
161 | __func__, | ||
162 | atomic_read(&cf_sk->sk.sk_rmem_alloc), | ||
163 | sk_rcvbuf_lowwater(cf_sk)); | ||
164 | set_rx_flow_off(cf_sk); | ||
165 | if (cf_sk->layer.dn) | ||
166 | cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, | ||
167 | CAIF_MODEMCMD_FLOW_OFF_REQ); | ||
168 | } | ||
169 | |||
170 | err = sk_filter(sk, skb); | ||
171 | if (err) | ||
172 | return err; | ||
173 | if (!sk_rmem_schedule(sk, skb->truesize) && rx_flow_is_on(cf_sk)) { | ||
174 | set_rx_flow_off(cf_sk); | ||
175 | trace_printk("CAIF: %s():" | ||
176 | " sending flow OFF due to rmem_schedule\n", | ||
177 | __func__); | ||
178 | if (cf_sk->layer.dn) | ||
179 | cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, | ||
180 | CAIF_MODEMCMD_FLOW_OFF_REQ); | ||
181 | } | ||
182 | skb->dev = NULL; | ||
183 | skb_set_owner_r(skb, sk); | ||
184 | /* Cache the SKB length before we tack it onto the receive | ||
185 | * queue. Once it is added it no longer belongs to us and | ||
186 | * may be freed by other threads of control pulling packets | ||
187 | * from the queue. | ||
188 | */ | ||
189 | skb_len = skb->len; | ||
190 | spin_lock_irqsave(&list->lock, flags); | ||
191 | if (!sock_flag(sk, SOCK_DEAD)) | ||
192 | __skb_queue_tail(list, skb); | ||
193 | spin_unlock_irqrestore(&list->lock, flags); | ||
194 | |||
195 | if (!sock_flag(sk, SOCK_DEAD)) | ||
196 | sk->sk_data_ready(sk, skb_len); | ||
197 | else | ||
198 | kfree_skb(skb); | ||
199 | return 0; | ||
200 | } | ||
201 | |||
202 | /* Packet Receive Callback function called from CAIF Stack */ | ||
203 | static int caif_sktrecv_cb(struct cflayer *layr, struct cfpkt *pkt) | ||
204 | { | ||
205 | struct caifsock *cf_sk; | ||
206 | struct sk_buff *skb; | ||
207 | |||
208 | cf_sk = container_of(layr, struct caifsock, layer); | ||
209 | skb = cfpkt_tonative(pkt); | ||
210 | |||
211 | if (unlikely(cf_sk->sk.sk_state != CAIF_CONNECTED)) { | ||
212 | cfpkt_destroy(pkt); | ||
213 | return 0; | ||
214 | } | ||
215 | caif_queue_rcv_skb(&cf_sk->sk, skb); | ||
216 | return 0; | ||
217 | } | ||
218 | |||
219 | /* Packet Control Callback function called from CAIF */ | ||
220 | static void caif_ctrl_cb(struct cflayer *layr, | ||
221 | enum caif_ctrlcmd flow, | ||
222 | int phyid) | ||
223 | { | ||
224 | struct caifsock *cf_sk = container_of(layr, struct caifsock, layer); | ||
225 | switch (flow) { | ||
226 | case CAIF_CTRLCMD_FLOW_ON_IND: | ||
227 | /* OK from modem to start sending again */ | ||
228 | dbfs_atomic_inc(&cnt.num_tx_flow_on_ind); | ||
229 | set_tx_flow_on(cf_sk); | ||
230 | cf_sk->sk.sk_state_change(&cf_sk->sk); | ||
231 | break; | ||
232 | |||
233 | case CAIF_CTRLCMD_FLOW_OFF_IND: | ||
234 | /* Modem asks us to shut up */ | ||
235 | dbfs_atomic_inc(&cnt.num_tx_flow_off_ind); | ||
236 | set_tx_flow_off(cf_sk); | ||
237 | cf_sk->sk.sk_state_change(&cf_sk->sk); | ||
238 | break; | ||
239 | |||
240 | case CAIF_CTRLCMD_INIT_RSP: | ||
241 | /* We're now connected */ | ||
242 | dbfs_atomic_inc(&cnt.num_connect_resp); | ||
243 | cf_sk->sk.sk_state = CAIF_CONNECTED; | ||
244 | set_tx_flow_on(cf_sk); | ||
245 | cf_sk->sk.sk_state_change(&cf_sk->sk); | ||
246 | break; | ||
247 | |||
248 | case CAIF_CTRLCMD_DEINIT_RSP: | ||
249 | /* We're now disconnected */ | ||
250 | cf_sk->sk.sk_state = CAIF_DISCONNECTED; | ||
251 | cf_sk->sk.sk_state_change(&cf_sk->sk); | ||
252 | cfcnfg_release_adap_layer(&cf_sk->layer); | ||
253 | break; | ||
254 | |||
255 | case CAIF_CTRLCMD_INIT_FAIL_RSP: | ||
256 | /* Connect request failed */ | ||
257 | dbfs_atomic_inc(&cnt.num_connect_fail_resp); | ||
258 | cf_sk->sk.sk_err = ECONNREFUSED; | ||
259 | cf_sk->sk.sk_state = CAIF_DISCONNECTED; | ||
260 | cf_sk->sk.sk_shutdown = SHUTDOWN_MASK; | ||
261 | /* | ||
262 | * Socket "standards" seems to require POLLOUT to | ||
263 | * be set at connect failure. | ||
264 | */ | ||
265 | set_tx_flow_on(cf_sk); | ||
266 | cf_sk->sk.sk_state_change(&cf_sk->sk); | ||
267 | break; | ||
268 | |||
269 | case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND: | ||
270 | /* Modem has closed this connection, or device is down. */ | ||
271 | dbfs_atomic_inc(&cnt.num_remote_shutdown_ind); | ||
272 | cf_sk->sk.sk_shutdown = SHUTDOWN_MASK; | ||
273 | cf_sk->sk.sk_err = ECONNRESET; | ||
274 | set_rx_flow_on(cf_sk); | ||
275 | cf_sk->sk.sk_error_report(&cf_sk->sk); | ||
276 | break; | ||
277 | |||
278 | default: | ||
279 | pr_debug("CAIF: %s(): Unexpected flow command %d\n", | ||
280 | __func__, flow); | ||
281 | } | ||
282 | } | ||
283 | |||
284 | static void caif_check_flow_release(struct sock *sk) | ||
285 | { | ||
286 | struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); | ||
287 | |||
288 | if (cf_sk->layer.dn == NULL || cf_sk->layer.dn->modemcmd == NULL) | ||
289 | return; | ||
290 | if (rx_flow_is_on(cf_sk)) | ||
291 | return; | ||
292 | |||
293 | if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) { | ||
294 | dbfs_atomic_inc(&cnt.num_rx_flow_on); | ||
295 | set_rx_flow_on(cf_sk); | ||
296 | cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, | ||
297 | CAIF_MODEMCMD_FLOW_ON_REQ); | ||
298 | } | ||
299 | } | ||
300 | /* | ||
301 | * Copied from sock.c:sock_queue_rcv_skb(), and added check that user buffer | ||
302 | * has sufficient size. | ||
303 | */ | ||
304 | |||
305 | static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, | ||
306 | struct msghdr *m, size_t buf_len, int flags) | ||
307 | |||
308 | { | ||
309 | struct sock *sk = sock->sk; | ||
310 | struct sk_buff *skb; | ||
311 | int ret = 0; | ||
312 | int len; | ||
313 | |||
314 | if (unlikely(!buf_len)) | ||
315 | return -EINVAL; | ||
316 | |||
317 | skb = skb_recv_datagram(sk, flags, 0 , &ret); | ||
318 | if (!skb) | ||
319 | goto read_error; | ||
320 | |||
321 | len = skb->len; | ||
322 | |||
323 | if (skb && skb->len > buf_len && !(flags & MSG_PEEK)) { | ||
324 | len = buf_len; | ||
325 | /* | ||
326 | * Push skb back on receive queue if buffer too small. | ||
327 | * This has a built-in race where multi-threaded receive | ||
328 | * may get packet in wrong order, but multiple read does | ||
329 | * not really guarantee ordered delivery anyway. | ||
330 | * Let's optimize for speed without taking locks. | ||
331 | */ | ||
332 | |||
333 | skb_queue_head(&sk->sk_receive_queue, skb); | ||
334 | ret = -EMSGSIZE; | ||
335 | goto read_error; | ||
336 | } | ||
337 | |||
338 | ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, len); | ||
339 | if (ret) | ||
340 | goto read_error; | ||
341 | |||
342 | skb_free_datagram(sk, skb); | ||
343 | |||
344 | caif_check_flow_release(sk); | ||
345 | |||
346 | return len; | ||
347 | |||
348 | read_error: | ||
349 | return ret; | ||
350 | } | ||
351 | |||
352 | |||
353 | /* Copied from unix_stream_wait_data, identical except for lock call. */ | ||
354 | static long caif_stream_data_wait(struct sock *sk, long timeo) | ||
355 | { | ||
356 | DEFINE_WAIT(wait); | ||
357 | lock_sock(sk); | ||
358 | |||
359 | for (;;) { | ||
360 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); | ||
361 | |||
362 | if (!skb_queue_empty(&sk->sk_receive_queue) || | ||
363 | sk->sk_err || | ||
364 | sk->sk_state != CAIF_CONNECTED || | ||
365 | sock_flag(sk, SOCK_DEAD) || | ||
366 | (sk->sk_shutdown & RCV_SHUTDOWN) || | ||
367 | signal_pending(current) || | ||
368 | !timeo) | ||
369 | break; | ||
370 | |||
371 | set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | ||
372 | release_sock(sk); | ||
373 | timeo = schedule_timeout(timeo); | ||
374 | lock_sock(sk); | ||
375 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | ||
376 | } | ||
377 | |||
378 | finish_wait(sk_sleep(sk), &wait); | ||
379 | release_sock(sk); | ||
380 | return timeo; | ||
381 | } | ||
382 | |||
383 | |||
384 | /* | ||
385 | * Copied from unix_stream_recvmsg, but removed credit checks, | ||
386 | * changed locking calls, changed address handling. | ||
387 | */ | ||
388 | static int caif_stream_recvmsg(struct kiocb *iocb, struct socket *sock, | ||
389 | struct msghdr *msg, size_t size, | ||
390 | int flags) | ||
391 | { | ||
392 | struct sock *sk = sock->sk; | ||
393 | int copied = 0; | ||
394 | int target; | ||
395 | int err = 0; | ||
396 | long timeo; | ||
397 | |||
398 | err = -EOPNOTSUPP; | ||
399 | if (flags&MSG_OOB) | ||
400 | goto out; | ||
401 | |||
402 | msg->msg_namelen = 0; | ||
403 | |||
404 | /* | ||
405 | * Lock the socket to prevent queue disordering | ||
406 | * while sleeps in memcpy_tomsg | ||
407 | */ | ||
408 | err = -EAGAIN; | ||
409 | if (sk->sk_state == CAIF_CONNECTING) | ||
410 | goto out; | ||
411 | |||
412 | caif_read_lock(sk); | ||
413 | target = sock_rcvlowat(sk, flags&MSG_WAITALL, size); | ||
414 | timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT); | ||
415 | |||
416 | do { | ||
417 | int chunk; | ||
418 | struct sk_buff *skb; | ||
419 | |||
420 | lock_sock(sk); | ||
421 | skb = skb_dequeue(&sk->sk_receive_queue); | ||
422 | caif_check_flow_release(sk); | ||
423 | |||
424 | if (skb == NULL) { | ||
425 | if (copied >= target) | ||
426 | goto unlock; | ||
427 | /* | ||
428 | * POSIX 1003.1g mandates this order. | ||
429 | */ | ||
430 | err = sock_error(sk); | ||
431 | if (err) | ||
432 | goto unlock; | ||
433 | err = -ECONNRESET; | ||
434 | if (sk->sk_shutdown & RCV_SHUTDOWN) | ||
435 | goto unlock; | ||
436 | |||
437 | err = -EPIPE; | ||
438 | if (sk->sk_state != CAIF_CONNECTED) | ||
439 | goto unlock; | ||
440 | if (sock_flag(sk, SOCK_DEAD)) | ||
441 | goto unlock; | ||
442 | |||
443 | release_sock(sk); | ||
444 | |||
445 | err = -EAGAIN; | ||
446 | if (!timeo) | ||
447 | break; | ||
448 | |||
449 | caif_read_unlock(sk); | ||
450 | |||
451 | timeo = caif_stream_data_wait(sk, timeo); | ||
452 | |||
453 | if (signal_pending(current)) { | ||
454 | err = sock_intr_errno(timeo); | ||
455 | goto out; | ||
456 | } | ||
457 | caif_read_lock(sk); | ||
458 | continue; | ||
459 | unlock: | ||
460 | release_sock(sk); | ||
461 | break; | ||
462 | } | ||
463 | release_sock(sk); | ||
464 | chunk = min_t(unsigned int, skb->len, size); | ||
465 | if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) { | ||
466 | skb_queue_head(&sk->sk_receive_queue, skb); | ||
467 | if (copied == 0) | ||
468 | copied = -EFAULT; | ||
469 | break; | ||
470 | } | ||
471 | copied += chunk; | ||
472 | size -= chunk; | ||
473 | |||
474 | /* Mark read part of skb as used */ | ||
475 | if (!(flags & MSG_PEEK)) { | ||
476 | skb_pull(skb, chunk); | ||
477 | |||
478 | /* put the skb back if we didn't use it up. */ | ||
479 | if (skb->len) { | ||
480 | skb_queue_head(&sk->sk_receive_queue, skb); | ||
481 | break; | ||
482 | } | ||
483 | kfree_skb(skb); | ||
484 | |||
485 | } else { | ||
486 | /* | ||
487 | * It is questionable, see note in unix_dgram_recvmsg. | ||
488 | */ | ||
489 | /* put message back and return */ | ||
490 | skb_queue_head(&sk->sk_receive_queue, skb); | ||
491 | break; | ||
492 | } | ||
493 | } while (size); | ||
494 | caif_read_unlock(sk); | ||
495 | |||
496 | out: | ||
497 | return copied ? : err; | ||
498 | } | ||
499 | |||
500 | /* | ||
501 | * Copied from sock.c:sock_wait_for_wmem, but change to wait for | ||
502 | * CAIF flow-on and sock_writable. | ||
503 | */ | ||
504 | static long caif_wait_for_flow_on(struct caifsock *cf_sk, | ||
505 | int wait_writeable, long timeo, int *err) | ||
506 | { | ||
507 | struct sock *sk = &cf_sk->sk; | ||
508 | DEFINE_WAIT(wait); | ||
509 | for (;;) { | ||
510 | *err = 0; | ||
511 | if (tx_flow_is_on(cf_sk) && | ||
512 | (!wait_writeable || sock_writeable(&cf_sk->sk))) | ||
513 | break; | ||
514 | *err = -ETIMEDOUT; | ||
515 | if (!timeo) | ||
516 | break; | ||
517 | *err = -ERESTARTSYS; | ||
518 | if (signal_pending(current)) | ||
519 | break; | ||
520 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); | ||
521 | *err = -ECONNRESET; | ||
522 | if (sk->sk_shutdown & SHUTDOWN_MASK) | ||
523 | break; | ||
524 | *err = -sk->sk_err; | ||
525 | if (sk->sk_err) | ||
526 | break; | ||
527 | *err = -EPIPE; | ||
528 | if (cf_sk->sk.sk_state != CAIF_CONNECTED) | ||
529 | break; | ||
530 | timeo = schedule_timeout(timeo); | ||
531 | } | ||
532 | finish_wait(sk_sleep(sk), &wait); | ||
533 | return timeo; | ||
534 | } | ||
535 | |||
536 | /* | ||
537 | * Transmit a SKB. The device may temporarily request re-transmission | ||
538 | * by returning EAGAIN. | ||
539 | */ | ||
540 | static int transmit_skb(struct sk_buff *skb, struct caifsock *cf_sk, | ||
541 | int noblock, long timeo) | ||
542 | { | ||
543 | struct cfpkt *pkt; | ||
544 | int ret, loopcnt = 0; | ||
545 | |||
546 | pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb); | ||
547 | memset(cfpkt_info(pkt), 0, sizeof(struct caif_payload_info)); | ||
548 | do { | ||
549 | |||
550 | ret = -ETIMEDOUT; | ||
551 | |||
552 | /* Slight paranoia, probably not needed. */ | ||
553 | if (unlikely(loopcnt++ > 1000)) { | ||
554 | pr_warning("CAIF: %s(): transmit retries failed," | ||
555 | " error = %d\n", __func__, ret); | ||
556 | break; | ||
557 | } | ||
558 | |||
559 | if (cf_sk->layer.dn != NULL) | ||
560 | ret = cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt); | ||
561 | if (likely(ret >= 0)) | ||
562 | break; | ||
563 | /* if transmit return -EAGAIN, then retry */ | ||
564 | if (noblock && ret == -EAGAIN) | ||
565 | break; | ||
566 | timeo = caif_wait_for_flow_on(cf_sk, 0, timeo, &ret); | ||
567 | if (signal_pending(current)) { | ||
568 | ret = sock_intr_errno(timeo); | ||
569 | break; | ||
570 | } | ||
571 | if (ret) | ||
572 | break; | ||
573 | if (cf_sk->sk.sk_state != CAIF_CONNECTED || | ||
574 | sock_flag(&cf_sk->sk, SOCK_DEAD) || | ||
575 | (cf_sk->sk.sk_shutdown & RCV_SHUTDOWN)) { | ||
576 | ret = -EPIPE; | ||
577 | cf_sk->sk.sk_err = EPIPE; | ||
578 | break; | ||
579 | } | ||
580 | } while (ret == -EAGAIN); | ||
581 | return ret; | ||
582 | } | ||
583 | |||
584 | /* Copied from af_unix:unix_dgram_sendmsg, and adapted to CAIF */ | ||
585 | static int caif_seqpkt_sendmsg(struct kiocb *kiocb, struct socket *sock, | ||
586 | struct msghdr *msg, size_t len) | ||
587 | { | ||
588 | struct sock *sk = sock->sk; | ||
589 | struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); | ||
590 | int buffer_size; | ||
591 | int ret = 0; | ||
592 | struct sk_buff *skb = NULL; | ||
593 | int noblock; | ||
594 | long timeo; | ||
595 | caif_assert(cf_sk); | ||
596 | ret = sock_error(sk); | ||
597 | if (ret) | ||
598 | goto err; | ||
599 | |||
600 | ret = -EOPNOTSUPP; | ||
601 | if (msg->msg_flags&MSG_OOB) | ||
602 | goto err; | ||
603 | |||
604 | ret = -EOPNOTSUPP; | ||
605 | if (msg->msg_namelen) | ||
606 | goto err; | ||
607 | |||
608 | ret = -EINVAL; | ||
609 | if (unlikely(msg->msg_iov->iov_base == NULL)) | ||
610 | goto err; | ||
611 | noblock = msg->msg_flags & MSG_DONTWAIT; | ||
612 | |||
613 | buffer_size = len + CAIF_NEEDED_HEADROOM + CAIF_NEEDED_TAILROOM; | ||
614 | |||
615 | ret = -EMSGSIZE; | ||
616 | if (buffer_size > CAIF_MAX_PAYLOAD_SIZE) | ||
617 | goto err; | ||
618 | |||
619 | timeo = sock_sndtimeo(sk, noblock); | ||
620 | timeo = caif_wait_for_flow_on(container_of(sk, struct caifsock, sk), | ||
621 | 1, timeo, &ret); | ||
622 | |||
623 | ret = -EPIPE; | ||
624 | if (cf_sk->sk.sk_state != CAIF_CONNECTED || | ||
625 | sock_flag(sk, SOCK_DEAD) || | ||
626 | (sk->sk_shutdown & RCV_SHUTDOWN)) | ||
627 | goto err; | ||
628 | |||
629 | ret = -ENOMEM; | ||
630 | skb = sock_alloc_send_skb(sk, buffer_size, noblock, &ret); | ||
631 | if (!skb) | ||
632 | goto err; | ||
633 | skb_reserve(skb, CAIF_NEEDED_HEADROOM); | ||
634 | |||
635 | ret = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); | ||
636 | |||
637 | if (ret) | ||
638 | goto err; | ||
639 | ret = transmit_skb(skb, cf_sk, noblock, timeo); | ||
640 | if (ret < 0) | ||
641 | goto err; | ||
642 | return len; | ||
643 | err: | ||
644 | kfree_skb(skb); | ||
645 | return ret; | ||
646 | } | ||
647 | |||
648 | /* | ||
649 | * Copied from unix_stream_sendmsg and adapted to CAIF: | ||
650 | * Changed removed permission handling and added waiting for flow on | ||
651 | * and other minor adaptations. | ||
652 | */ | ||
653 | static int caif_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, | ||
654 | struct msghdr *msg, size_t len) | ||
655 | { | ||
656 | struct sock *sk = sock->sk; | ||
657 | struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); | ||
658 | int err, size; | ||
659 | struct sk_buff *skb; | ||
660 | int sent = 0; | ||
661 | long timeo; | ||
662 | |||
663 | err = -EOPNOTSUPP; | ||
664 | |||
665 | if (unlikely(msg->msg_flags&MSG_OOB)) | ||
666 | goto out_err; | ||
667 | |||
668 | if (unlikely(msg->msg_namelen)) | ||
669 | goto out_err; | ||
670 | |||
671 | timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); | ||
672 | timeo = caif_wait_for_flow_on(cf_sk, 1, timeo, &err); | ||
673 | |||
674 | if (unlikely(sk->sk_shutdown & SEND_SHUTDOWN)) | ||
675 | goto pipe_err; | ||
676 | |||
677 | while (sent < len) { | ||
678 | |||
679 | size = len-sent; | ||
680 | |||
681 | if (size > CAIF_MAX_PAYLOAD_SIZE) | ||
682 | size = CAIF_MAX_PAYLOAD_SIZE; | ||
683 | |||
684 | /* If size is more than half of sndbuf, chop up message */ | ||
685 | if (size > ((sk->sk_sndbuf >> 1) - 64)) | ||
686 | size = (sk->sk_sndbuf >> 1) - 64; | ||
687 | |||
688 | if (size > SKB_MAX_ALLOC) | ||
689 | size = SKB_MAX_ALLOC; | ||
690 | |||
691 | skb = sock_alloc_send_skb(sk, | ||
692 | size + CAIF_NEEDED_HEADROOM | ||
693 | + CAIF_NEEDED_TAILROOM, | ||
694 | msg->msg_flags&MSG_DONTWAIT, | ||
695 | &err); | ||
696 | if (skb == NULL) | ||
697 | goto out_err; | ||
698 | |||
699 | skb_reserve(skb, CAIF_NEEDED_HEADROOM); | ||
700 | /* | ||
701 | * If you pass two values to the sock_alloc_send_skb | ||
702 | * it tries to grab the large buffer with GFP_NOFS | ||
703 | * (which can fail easily), and if it fails grab the | ||
704 | * fallback size buffer which is under a page and will | ||
705 | * succeed. [Alan] | ||
706 | */ | ||
707 | size = min_t(int, size, skb_tailroom(skb)); | ||
708 | |||
709 | err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size); | ||
710 | if (err) { | ||
711 | kfree_skb(skb); | ||
712 | goto out_err; | ||
713 | } | ||
714 | err = transmit_skb(skb, cf_sk, | ||
715 | msg->msg_flags&MSG_DONTWAIT, timeo); | ||
716 | if (err < 0) { | ||
717 | kfree_skb(skb); | ||
718 | goto pipe_err; | ||
719 | } | ||
720 | sent += size; | ||
721 | } | ||
722 | |||
723 | return sent; | ||
724 | |||
725 | pipe_err: | ||
726 | if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL)) | ||
727 | send_sig(SIGPIPE, current, 0); | ||
728 | err = -EPIPE; | ||
729 | out_err: | ||
730 | return sent ? : err; | ||
731 | } | ||
732 | |||
733 | static int setsockopt(struct socket *sock, | ||
734 | int lvl, int opt, char __user *ov, unsigned int ol) | ||
735 | { | ||
736 | struct sock *sk = sock->sk; | ||
737 | struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); | ||
738 | int prio, linksel; | ||
739 | struct ifreq ifreq; | ||
740 | |||
741 | if (cf_sk->sk.sk_socket->state != SS_UNCONNECTED) | ||
742 | return -ENOPROTOOPT; | ||
743 | |||
744 | switch (opt) { | ||
745 | case CAIFSO_LINK_SELECT: | ||
746 | if (ol < sizeof(int)) | ||
747 | return -EINVAL; | ||
748 | if (lvl != SOL_CAIF) | ||
749 | goto bad_sol; | ||
750 | if (copy_from_user(&linksel, ov, sizeof(int))) | ||
751 | return -EINVAL; | ||
752 | lock_sock(&(cf_sk->sk)); | ||
753 | cf_sk->conn_req.link_selector = linksel; | ||
754 | release_sock(&cf_sk->sk); | ||
755 | return 0; | ||
756 | |||
757 | case SO_PRIORITY: | ||
758 | if (lvl != SOL_SOCKET) | ||
759 | goto bad_sol; | ||
760 | if (ol < sizeof(int)) | ||
761 | return -EINVAL; | ||
762 | if (copy_from_user(&prio, ov, sizeof(int))) | ||
763 | return -EINVAL; | ||
764 | lock_sock(&(cf_sk->sk)); | ||
765 | cf_sk->conn_req.priority = prio; | ||
766 | release_sock(&cf_sk->sk); | ||
767 | return 0; | ||
768 | |||
769 | case SO_BINDTODEVICE: | ||
770 | if (lvl != SOL_SOCKET) | ||
771 | goto bad_sol; | ||
772 | if (ol < sizeof(struct ifreq)) | ||
773 | return -EINVAL; | ||
774 | if (copy_from_user(&ifreq, ov, sizeof(ifreq))) | ||
775 | return -EFAULT; | ||
776 | lock_sock(&(cf_sk->sk)); | ||
777 | strncpy(cf_sk->conn_req.link_name, ifreq.ifr_name, | ||
778 | sizeof(cf_sk->conn_req.link_name)); | ||
779 | cf_sk->conn_req.link_name | ||
780 | [sizeof(cf_sk->conn_req.link_name)-1] = 0; | ||
781 | release_sock(&cf_sk->sk); | ||
782 | return 0; | ||
783 | |||
784 | case CAIFSO_REQ_PARAM: | ||
785 | if (lvl != SOL_CAIF) | ||
786 | goto bad_sol; | ||
787 | if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL) | ||
788 | return -ENOPROTOOPT; | ||
789 | lock_sock(&(cf_sk->sk)); | ||
790 | cf_sk->conn_req.param.size = ol; | ||
791 | if (ol > sizeof(cf_sk->conn_req.param.data) || | ||
792 | copy_from_user(&cf_sk->conn_req.param.data, ov, ol)) { | ||
793 | release_sock(&cf_sk->sk); | ||
794 | return -EINVAL; | ||
795 | } | ||
796 | release_sock(&cf_sk->sk); | ||
797 | return 0; | ||
798 | |||
799 | default: | ||
800 | return -ENOPROTOOPT; | ||
801 | } | ||
802 | |||
803 | return 0; | ||
804 | bad_sol: | ||
805 | return -ENOPROTOOPT; | ||
806 | |||
807 | } | ||
808 | |||
809 | /* | ||
810 | * caif_connect() - Connect a CAIF Socket | ||
811 | * Copied and modified af_irda.c:irda_connect(). | ||
812 | * | ||
813 | * Note : by consulting "errno", the user space caller may learn the cause | ||
814 | * of the failure. Most of them are visible in the function, others may come | ||
815 | * from subroutines called and are listed here : | ||
816 | * o -EAFNOSUPPORT: bad socket family or type. | ||
817 | * o -ESOCKTNOSUPPORT: bad socket type or protocol | ||
818 | * o -EINVAL: bad socket address, or CAIF link type | ||
819 | * o -ECONNREFUSED: remote end refused the connection. | ||
820 | * o -EINPROGRESS: connect request sent but timed out (or non-blocking) | ||
821 | * o -EISCONN: already connected. | ||
822 | * o -ETIMEDOUT: Connection timed out (send timeout) | ||
823 | * o -ENODEV: No link layer to send request | ||
824 | * o -ECONNRESET: Received Shutdown indication or lost link layer | ||
825 | * o -ENOMEM: Out of memory | ||
826 | * | ||
827 | * State Strategy: | ||
828 | * o sk_state: holds the CAIF_* protocol state, it's updated by | ||
829 | * caif_ctrl_cb. | ||
830 | * o sock->state: holds the SS_* socket state and is updated by connect and | ||
831 | * disconnect. | ||
832 | */ | ||
833 | static int caif_connect(struct socket *sock, struct sockaddr *uaddr, | ||
834 | int addr_len, int flags) | ||
835 | { | ||
836 | struct sock *sk = sock->sk; | ||
837 | struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); | ||
838 | long timeo; | ||
839 | int err; | ||
840 | lock_sock(sk); | ||
841 | |||
842 | err = -EAFNOSUPPORT; | ||
843 | if (uaddr->sa_family != AF_CAIF) | ||
844 | goto out; | ||
845 | |||
846 | err = -ESOCKTNOSUPPORT; | ||
847 | if (unlikely(!(sk->sk_type == SOCK_STREAM && | ||
848 | cf_sk->sk.sk_protocol == CAIFPROTO_AT) && | ||
849 | sk->sk_type != SOCK_SEQPACKET)) | ||
850 | goto out; | ||
851 | switch (sock->state) { | ||
852 | case SS_UNCONNECTED: | ||
853 | /* Normal case, a fresh connect */ | ||
854 | caif_assert(sk->sk_state == CAIF_DISCONNECTED); | ||
855 | break; | ||
856 | case SS_CONNECTING: | ||
857 | switch (sk->sk_state) { | ||
858 | case CAIF_CONNECTED: | ||
859 | sock->state = SS_CONNECTED; | ||
860 | err = -EISCONN; | ||
861 | goto out; | ||
862 | case CAIF_DISCONNECTED: | ||
863 | /* Reconnect allowed */ | ||
864 | break; | ||
865 | case CAIF_CONNECTING: | ||
866 | err = -EALREADY; | ||
867 | if (flags & O_NONBLOCK) | ||
868 | goto out; | ||
869 | goto wait_connect; | ||
870 | } | ||
871 | break; | ||
872 | case SS_CONNECTED: | ||
873 | caif_assert(sk->sk_state == CAIF_CONNECTED || | ||
874 | sk->sk_state == CAIF_DISCONNECTED); | ||
875 | if (sk->sk_shutdown & SHUTDOWN_MASK) { | ||
876 | /* Allow re-connect after SHUTDOWN_IND */ | ||
877 | caif_disconnect_client(&cf_sk->layer); | ||
878 | break; | ||
879 | } | ||
880 | /* No reconnect on a seqpacket socket */ | ||
881 | err = -EISCONN; | ||
882 | goto out; | ||
883 | case SS_DISCONNECTING: | ||
884 | case SS_FREE: | ||
885 | caif_assert(1); /*Should never happen */ | ||
886 | break; | ||
887 | } | ||
888 | sk->sk_state = CAIF_DISCONNECTED; | ||
889 | sock->state = SS_UNCONNECTED; | ||
890 | sk_stream_kill_queues(&cf_sk->sk); | ||
891 | |||
892 | err = -EINVAL; | ||
893 | if (addr_len != sizeof(struct sockaddr_caif) || | ||
894 | !uaddr) | ||
895 | goto out; | ||
896 | |||
897 | memcpy(&cf_sk->conn_req.sockaddr, uaddr, | ||
898 | sizeof(struct sockaddr_caif)); | ||
899 | |||
900 | /* Move to connecting socket, start sending Connect Requests */ | ||
901 | sock->state = SS_CONNECTING; | ||
902 | sk->sk_state = CAIF_CONNECTING; | ||
903 | |||
904 | dbfs_atomic_inc(&cnt.num_connect_req); | ||
905 | cf_sk->layer.receive = caif_sktrecv_cb; | ||
906 | err = caif_connect_client(&cf_sk->conn_req, | ||
907 | &cf_sk->layer); | ||
908 | if (err < 0) { | ||
909 | cf_sk->sk.sk_socket->state = SS_UNCONNECTED; | ||
910 | cf_sk->sk.sk_state = CAIF_DISCONNECTED; | ||
911 | goto out; | ||
912 | } | ||
913 | |||
914 | err = -EINPROGRESS; | ||
915 | wait_connect: | ||
916 | |||
917 | if (sk->sk_state != CAIF_CONNECTED && (flags & O_NONBLOCK)) | ||
918 | goto out; | ||
919 | |||
920 | timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); | ||
921 | |||
922 | release_sock(sk); | ||
923 | err = wait_event_interruptible_timeout(*sk_sleep(sk), | ||
924 | sk->sk_state != CAIF_CONNECTING, | ||
925 | timeo); | ||
926 | lock_sock(sk); | ||
927 | if (err < 0) | ||
928 | goto out; /* -ERESTARTSYS */ | ||
929 | if (err == 0 && sk->sk_state != CAIF_CONNECTED) { | ||
930 | err = -ETIMEDOUT; | ||
931 | goto out; | ||
932 | } | ||
933 | |||
934 | if (sk->sk_state != CAIF_CONNECTED) { | ||
935 | sock->state = SS_UNCONNECTED; | ||
936 | err = sock_error(sk); | ||
937 | if (!err) | ||
938 | err = -ECONNREFUSED; | ||
939 | goto out; | ||
940 | } | ||
941 | sock->state = SS_CONNECTED; | ||
942 | err = 0; | ||
943 | out: | ||
944 | release_sock(sk); | ||
945 | return err; | ||
946 | } | ||
947 | |||
948 | |||
949 | /* | ||
950 | * caif_release() - Disconnect a CAIF Socket | ||
951 | * Copied and modified af_irda.c:irda_release(). | ||
952 | */ | ||
953 | static int caif_release(struct socket *sock) | ||
954 | { | ||
955 | struct sock *sk = sock->sk; | ||
956 | struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); | ||
957 | int res = 0; | ||
958 | |||
959 | if (!sk) | ||
960 | return 0; | ||
961 | |||
962 | set_tx_flow_off(cf_sk); | ||
963 | |||
964 | /* | ||
965 | * Ensure that packets are not queued after this point in time. | ||
966 | * caif_queue_rcv_skb checks SOCK_DEAD holding the queue lock, | ||
967 | * this ensures no packets when sock is dead. | ||
968 | */ | ||
969 | spin_lock(&sk->sk_receive_queue.lock); | ||
970 | sock_set_flag(sk, SOCK_DEAD); | ||
971 | spin_unlock(&sk->sk_receive_queue.lock); | ||
972 | sock->sk = NULL; | ||
973 | |||
974 | dbfs_atomic_inc(&cnt.num_disconnect); | ||
975 | |||
976 | if (cf_sk->debugfs_socket_dir != NULL) | ||
977 | debugfs_remove_recursive(cf_sk->debugfs_socket_dir); | ||
978 | |||
979 | lock_sock(&(cf_sk->sk)); | ||
980 | sk->sk_state = CAIF_DISCONNECTED; | ||
981 | sk->sk_shutdown = SHUTDOWN_MASK; | ||
982 | |||
983 | if (cf_sk->sk.sk_socket->state == SS_CONNECTED || | ||
984 | cf_sk->sk.sk_socket->state == SS_CONNECTING) | ||
985 | res = caif_disconnect_client(&cf_sk->layer); | ||
986 | |||
987 | cf_sk->sk.sk_socket->state = SS_DISCONNECTING; | ||
988 | wake_up_interruptible_poll(sk_sleep(sk), POLLERR|POLLHUP); | ||
989 | |||
990 | sock_orphan(sk); | ||
991 | cf_sk->layer.dn = NULL; | ||
992 | sk_stream_kill_queues(&cf_sk->sk); | ||
993 | release_sock(sk); | ||
994 | sock_put(sk); | ||
995 | return res; | ||
996 | } | ||
997 | |||
998 | /* Copied from af_unix.c:unix_poll(), added CAIF tx_flow handling */ | ||
999 | static unsigned int caif_poll(struct file *file, | ||
1000 | struct socket *sock, poll_table *wait) | ||
1001 | { | ||
1002 | struct sock *sk = sock->sk; | ||
1003 | unsigned int mask; | ||
1004 | struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); | ||
1005 | |||
1006 | sock_poll_wait(file, sk_sleep(sk), wait); | ||
1007 | mask = 0; | ||
1008 | |||
1009 | /* exceptional events? */ | ||
1010 | if (sk->sk_err) | ||
1011 | mask |= POLLERR; | ||
1012 | if (sk->sk_shutdown == SHUTDOWN_MASK) | ||
1013 | mask |= POLLHUP; | ||
1014 | if (sk->sk_shutdown & RCV_SHUTDOWN) | ||
1015 | mask |= POLLRDHUP; | ||
1016 | |||
1017 | /* readable? */ | ||
1018 | if (!skb_queue_empty(&sk->sk_receive_queue) || | ||
1019 | (sk->sk_shutdown & RCV_SHUTDOWN)) | ||
1020 | mask |= POLLIN | POLLRDNORM; | ||
1021 | |||
1022 | /* Connection-based need to check for termination and startup */ | ||
1023 | if (sk->sk_state == CAIF_DISCONNECTED) | ||
1024 | mask |= POLLHUP; | ||
1025 | |||
1026 | /* | ||
1027 | * we set writable also when the other side has shut down the | ||
1028 | * connection. This prevents stuck sockets. | ||
1029 | */ | ||
1030 | if (sock_writeable(sk) && tx_flow_is_on(cf_sk)) | ||
1031 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; | ||
1032 | |||
1033 | return mask; | ||
1034 | } | ||
1035 | |||
1036 | static const struct proto_ops caif_seqpacket_ops = { | ||
1037 | .family = PF_CAIF, | ||
1038 | .owner = THIS_MODULE, | ||
1039 | .release = caif_release, | ||
1040 | .bind = sock_no_bind, | ||
1041 | .connect = caif_connect, | ||
1042 | .socketpair = sock_no_socketpair, | ||
1043 | .accept = sock_no_accept, | ||
1044 | .getname = sock_no_getname, | ||
1045 | .poll = caif_poll, | ||
1046 | .ioctl = sock_no_ioctl, | ||
1047 | .listen = sock_no_listen, | ||
1048 | .shutdown = sock_no_shutdown, | ||
1049 | .setsockopt = setsockopt, | ||
1050 | .getsockopt = sock_no_getsockopt, | ||
1051 | .sendmsg = caif_seqpkt_sendmsg, | ||
1052 | .recvmsg = caif_seqpkt_recvmsg, | ||
1053 | .mmap = sock_no_mmap, | ||
1054 | .sendpage = sock_no_sendpage, | ||
1055 | }; | ||
1056 | |||
1057 | static const struct proto_ops caif_stream_ops = { | ||
1058 | .family = PF_CAIF, | ||
1059 | .owner = THIS_MODULE, | ||
1060 | .release = caif_release, | ||
1061 | .bind = sock_no_bind, | ||
1062 | .connect = caif_connect, | ||
1063 | .socketpair = sock_no_socketpair, | ||
1064 | .accept = sock_no_accept, | ||
1065 | .getname = sock_no_getname, | ||
1066 | .poll = caif_poll, | ||
1067 | .ioctl = sock_no_ioctl, | ||
1068 | .listen = sock_no_listen, | ||
1069 | .shutdown = sock_no_shutdown, | ||
1070 | .setsockopt = setsockopt, | ||
1071 | .getsockopt = sock_no_getsockopt, | ||
1072 | .sendmsg = caif_stream_sendmsg, | ||
1073 | .recvmsg = caif_stream_recvmsg, | ||
1074 | .mmap = sock_no_mmap, | ||
1075 | .sendpage = sock_no_sendpage, | ||
1076 | }; | ||
1077 | |||
1078 | /* This function is called when a socket is finally destroyed. */ | ||
1079 | static void caif_sock_destructor(struct sock *sk) | ||
1080 | { | ||
1081 | struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); | ||
1082 | caif_assert(!atomic_read(&sk->sk_wmem_alloc)); | ||
1083 | caif_assert(sk_unhashed(sk)); | ||
1084 | caif_assert(!sk->sk_socket); | ||
1085 | if (!sock_flag(sk, SOCK_DEAD)) { | ||
1086 | pr_info("Attempt to release alive CAIF socket: %p\n", sk); | ||
1087 | return; | ||
1088 | } | ||
1089 | sk_stream_kill_queues(&cf_sk->sk); | ||
1090 | dbfs_atomic_dec(&cnt.caif_nr_socks); | ||
1091 | } | ||
1092 | |||
1093 | static int caif_create(struct net *net, struct socket *sock, int protocol, | ||
1094 | int kern) | ||
1095 | { | ||
1096 | struct sock *sk = NULL; | ||
1097 | struct caifsock *cf_sk = NULL; | ||
1098 | static struct proto prot = {.name = "PF_CAIF", | ||
1099 | .owner = THIS_MODULE, | ||
1100 | .obj_size = sizeof(struct caifsock), | ||
1101 | }; | ||
1102 | |||
1103 | if (!capable(CAP_SYS_ADMIN) && !capable(CAP_NET_ADMIN)) | ||
1104 | return -EPERM; | ||
1105 | /* | ||
1106 | * The sock->type specifies the socket type to use. | ||
1107 | * The CAIF socket is a packet stream in the sense | ||
1108 | * that it is packet based. CAIF trusts the reliability | ||
1109 | * of the link, no resending is implemented. | ||
1110 | */ | ||
1111 | if (sock->type == SOCK_SEQPACKET) | ||
1112 | sock->ops = &caif_seqpacket_ops; | ||
1113 | else if (sock->type == SOCK_STREAM) | ||
1114 | sock->ops = &caif_stream_ops; | ||
1115 | else | ||
1116 | return -ESOCKTNOSUPPORT; | ||
1117 | |||
1118 | if (protocol < 0 || protocol >= CAIFPROTO_MAX) | ||
1119 | return -EPROTONOSUPPORT; | ||
1120 | /* | ||
1121 | * Set the socket state to unconnected. The socket state | ||
1122 | * is really not used at all in the net/core or socket.c but the | ||
1123 | * initialization makes sure that sock->state is not uninitialized. | ||
1124 | */ | ||
1125 | sk = sk_alloc(net, PF_CAIF, GFP_KERNEL, &prot); | ||
1126 | if (!sk) | ||
1127 | return -ENOMEM; | ||
1128 | |||
1129 | cf_sk = container_of(sk, struct caifsock, sk); | ||
1130 | |||
1131 | /* Store the protocol */ | ||
1132 | sk->sk_protocol = (unsigned char) protocol; | ||
1133 | |||
1134 | /* Sendbuf dictates the amount of outbound packets not yet sent */ | ||
1135 | sk->sk_sndbuf = CAIF_DEF_SNDBUF; | ||
1136 | sk->sk_rcvbuf = CAIF_DEF_RCVBUF; | ||
1137 | |||
1138 | /* | ||
1139 | * Lock in order to try to stop someone from opening the socket | ||
1140 | * too early. | ||
1141 | */ | ||
1142 | lock_sock(&(cf_sk->sk)); | ||
1143 | |||
1144 | /* Initialize the nozero default sock structure data. */ | ||
1145 | sock_init_data(sock, sk); | ||
1146 | sk->sk_destruct = caif_sock_destructor; | ||
1147 | |||
1148 | mutex_init(&cf_sk->readlock); /* single task reading lock */ | ||
1149 | cf_sk->layer.ctrlcmd = caif_ctrl_cb; | ||
1150 | cf_sk->sk.sk_socket->state = SS_UNCONNECTED; | ||
1151 | cf_sk->sk.sk_state = CAIF_DISCONNECTED; | ||
1152 | |||
1153 | set_tx_flow_off(cf_sk); | ||
1154 | set_rx_flow_on(cf_sk); | ||
1155 | |||
1156 | /* Set default options on configuration */ | ||
1157 | cf_sk->conn_req.priority = CAIF_PRIO_NORMAL; | ||
1158 | cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY; | ||
1159 | cf_sk->conn_req.protocol = protocol; | ||
1160 | /* Increase the number of sockets created. */ | ||
1161 | dbfs_atomic_inc(&cnt.caif_nr_socks); | ||
1162 | #ifdef CONFIG_DEBUG_FS | ||
1163 | if (!IS_ERR(debugfsdir)) { | ||
1164 | /* Fill in some information concerning the misc socket. */ | ||
1165 | snprintf(cf_sk->name, sizeof(cf_sk->name), "cfsk%d", | ||
1166 | atomic_read(&cnt.caif_nr_socks)); | ||
1167 | |||
1168 | cf_sk->debugfs_socket_dir = | ||
1169 | debugfs_create_dir(cf_sk->name, debugfsdir); | ||
1170 | debugfs_create_u32("sk_state", S_IRUSR | S_IWUSR, | ||
1171 | cf_sk->debugfs_socket_dir, | ||
1172 | (u32 *) &cf_sk->sk.sk_state); | ||
1173 | debugfs_create_u32("flow_state", S_IRUSR | S_IWUSR, | ||
1174 | cf_sk->debugfs_socket_dir, &cf_sk->flow_state); | ||
1175 | debugfs_create_u32("sk_rmem_alloc", S_IRUSR | S_IWUSR, | ||
1176 | cf_sk->debugfs_socket_dir, | ||
1177 | (u32 *) &cf_sk->sk.sk_rmem_alloc); | ||
1178 | debugfs_create_u32("sk_wmem_alloc", S_IRUSR | S_IWUSR, | ||
1179 | cf_sk->debugfs_socket_dir, | ||
1180 | (u32 *) &cf_sk->sk.sk_wmem_alloc); | ||
1181 | debugfs_create_u32("identity", S_IRUSR | S_IWUSR, | ||
1182 | cf_sk->debugfs_socket_dir, | ||
1183 | (u32 *) &cf_sk->layer.id); | ||
1184 | } | ||
1185 | #endif | ||
1186 | release_sock(&cf_sk->sk); | ||
1187 | return 0; | ||
1188 | } | ||
1189 | |||
1190 | |||
1191 | static struct net_proto_family caif_family_ops = { | ||
1192 | .family = PF_CAIF, | ||
1193 | .create = caif_create, | ||
1194 | .owner = THIS_MODULE, | ||
1195 | }; | ||
1196 | |||
1197 | int af_caif_init(void) | ||
1198 | { | ||
1199 | int err = sock_register(&caif_family_ops); | ||
1200 | if (!err) | ||
1201 | return err; | ||
1202 | return 0; | ||
1203 | } | ||
1204 | |||
1205 | static int __init caif_sktinit_module(void) | ||
1206 | { | ||
1207 | #ifdef CONFIG_DEBUG_FS | ||
1208 | debugfsdir = debugfs_create_dir("caif_sk", NULL); | ||
1209 | if (!IS_ERR(debugfsdir)) { | ||
1210 | debugfs_create_u32("num_sockets", S_IRUSR | S_IWUSR, | ||
1211 | debugfsdir, | ||
1212 | (u32 *) &cnt.caif_nr_socks); | ||
1213 | debugfs_create_u32("num_connect_req", S_IRUSR | S_IWUSR, | ||
1214 | debugfsdir, | ||
1215 | (u32 *) &cnt.num_connect_req); | ||
1216 | debugfs_create_u32("num_connect_resp", S_IRUSR | S_IWUSR, | ||
1217 | debugfsdir, | ||
1218 | (u32 *) &cnt.num_connect_resp); | ||
1219 | debugfs_create_u32("num_connect_fail_resp", S_IRUSR | S_IWUSR, | ||
1220 | debugfsdir, | ||
1221 | (u32 *) &cnt.num_connect_fail_resp); | ||
1222 | debugfs_create_u32("num_disconnect", S_IRUSR | S_IWUSR, | ||
1223 | debugfsdir, | ||
1224 | (u32 *) &cnt.num_disconnect); | ||
1225 | debugfs_create_u32("num_remote_shutdown_ind", | ||
1226 | S_IRUSR | S_IWUSR, debugfsdir, | ||
1227 | (u32 *) &cnt.num_remote_shutdown_ind); | ||
1228 | debugfs_create_u32("num_tx_flow_off_ind", S_IRUSR | S_IWUSR, | ||
1229 | debugfsdir, | ||
1230 | (u32 *) &cnt.num_tx_flow_off_ind); | ||
1231 | debugfs_create_u32("num_tx_flow_on_ind", S_IRUSR | S_IWUSR, | ||
1232 | debugfsdir, | ||
1233 | (u32 *) &cnt.num_tx_flow_on_ind); | ||
1234 | debugfs_create_u32("num_rx_flow_off", S_IRUSR | S_IWUSR, | ||
1235 | debugfsdir, | ||
1236 | (u32 *) &cnt.num_rx_flow_off); | ||
1237 | debugfs_create_u32("num_rx_flow_on", S_IRUSR | S_IWUSR, | ||
1238 | debugfsdir, | ||
1239 | (u32 *) &cnt.num_rx_flow_on); | ||
1240 | } | ||
1241 | #endif | ||
1242 | return af_caif_init(); | ||
1243 | } | ||
1244 | |||
1245 | static void __exit caif_sktexit_module(void) | ||
1246 | { | ||
1247 | sock_unregister(PF_CAIF); | ||
1248 | if (debugfsdir != NULL) | ||
1249 | debugfs_remove_recursive(debugfsdir); | ||
1250 | } | ||
1251 | module_init(caif_sktinit_module); | ||
1252 | module_exit(caif_sktexit_module); | ||
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c new file mode 100644 index 000000000000..471c62939fad --- /dev/null +++ b/net/caif/cfcnfg.c | |||
@@ -0,0 +1,471 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson AB 2010 | ||
3 | * Author: Sjur Brendeland/sjur.brandeland@stericsson.com | ||
4 | * License terms: GNU General Public License (GPL) version 2 | ||
5 | */ | ||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/stddef.h> | ||
8 | #include <linux/slab.h> | ||
9 | #include <net/caif/caif_layer.h> | ||
10 | #include <net/caif/cfpkt.h> | ||
11 | #include <net/caif/cfcnfg.h> | ||
12 | #include <net/caif/cfctrl.h> | ||
13 | #include <net/caif/cfmuxl.h> | ||
14 | #include <net/caif/cffrml.h> | ||
15 | #include <net/caif/cfserl.h> | ||
16 | #include <net/caif/cfsrvl.h> | ||
17 | |||
18 | #include <linux/module.h> | ||
19 | #include <asm/atomic.h> | ||
20 | |||
21 | #define MAX_PHY_LAYERS 7 | ||
22 | #define PHY_NAME_LEN 20 | ||
23 | |||
24 | #define container_obj(layr) container_of(layr, struct cfcnfg, layer) | ||
25 | |||
26 | /* Information about CAIF physical interfaces held by Config Module in order | ||
27 | * to manage physical interfaces | ||
28 | */ | ||
29 | struct cfcnfg_phyinfo { | ||
30 | /* Pointer to the layer below the MUX (framing layer) */ | ||
31 | struct cflayer *frm_layer; | ||
32 | /* Pointer to the lowest actual physical layer */ | ||
33 | struct cflayer *phy_layer; | ||
34 | /* Unique identifier of the physical interface */ | ||
35 | unsigned int id; | ||
36 | /* Preference of the physical in interface */ | ||
37 | enum cfcnfg_phy_preference pref; | ||
38 | |||
39 | /* Reference count, number of channels using the device */ | ||
40 | int phy_ref_count; | ||
41 | |||
42 | /* Information about the physical device */ | ||
43 | struct dev_info dev_info; | ||
44 | }; | ||
45 | |||
46 | struct cfcnfg { | ||
47 | struct cflayer layer; | ||
48 | struct cflayer *ctrl; | ||
49 | struct cflayer *mux; | ||
50 | u8 last_phyid; | ||
51 | struct cfcnfg_phyinfo phy_layers[MAX_PHY_LAYERS]; | ||
52 | }; | ||
53 | |||
54 | static void cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, | ||
55 | enum cfctrl_srv serv, u8 phyid, | ||
56 | struct cflayer *adapt_layer); | ||
57 | static void cfcnfg_linkdestroy_rsp(struct cflayer *layer, u8 channel_id); | ||
58 | static void cfcnfg_reject_rsp(struct cflayer *layer, u8 channel_id, | ||
59 | struct cflayer *adapt_layer); | ||
60 | static void cfctrl_resp_func(void); | ||
61 | static void cfctrl_enum_resp(void); | ||
62 | |||
63 | struct cfcnfg *cfcnfg_create(void) | ||
64 | { | ||
65 | struct cfcnfg *this; | ||
66 | struct cfctrl_rsp *resp; | ||
67 | /* Initiate this layer */ | ||
68 | this = kmalloc(sizeof(struct cfcnfg), GFP_ATOMIC); | ||
69 | if (!this) { | ||
70 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
71 | return NULL; | ||
72 | } | ||
73 | memset(this, 0, sizeof(struct cfcnfg)); | ||
74 | this->mux = cfmuxl_create(); | ||
75 | if (!this->mux) | ||
76 | goto out_of_mem; | ||
77 | this->ctrl = cfctrl_create(); | ||
78 | if (!this->ctrl) | ||
79 | goto out_of_mem; | ||
80 | /* Initiate response functions */ | ||
81 | resp = cfctrl_get_respfuncs(this->ctrl); | ||
82 | resp->enum_rsp = cfctrl_enum_resp; | ||
83 | resp->linkerror_ind = cfctrl_resp_func; | ||
84 | resp->linkdestroy_rsp = cfcnfg_linkdestroy_rsp; | ||
85 | resp->sleep_rsp = cfctrl_resp_func; | ||
86 | resp->wake_rsp = cfctrl_resp_func; | ||
87 | resp->restart_rsp = cfctrl_resp_func; | ||
88 | resp->radioset_rsp = cfctrl_resp_func; | ||
89 | resp->linksetup_rsp = cfcnfg_linkup_rsp; | ||
90 | resp->reject_rsp = cfcnfg_reject_rsp; | ||
91 | |||
92 | this->last_phyid = 1; | ||
93 | |||
94 | cfmuxl_set_uplayer(this->mux, this->ctrl, 0); | ||
95 | layer_set_dn(this->ctrl, this->mux); | ||
96 | layer_set_up(this->ctrl, this); | ||
97 | return this; | ||
98 | out_of_mem: | ||
99 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
100 | kfree(this->mux); | ||
101 | kfree(this->ctrl); | ||
102 | kfree(this); | ||
103 | return NULL; | ||
104 | } | ||
105 | EXPORT_SYMBOL(cfcnfg_create); | ||
106 | |||
107 | void cfcnfg_remove(struct cfcnfg *cfg) | ||
108 | { | ||
109 | if (cfg) { | ||
110 | kfree(cfg->mux); | ||
111 | kfree(cfg->ctrl); | ||
112 | kfree(cfg); | ||
113 | } | ||
114 | } | ||
115 | |||
116 | static void cfctrl_resp_func(void) | ||
117 | { | ||
118 | } | ||
119 | |||
120 | static void cfctrl_enum_resp(void) | ||
121 | { | ||
122 | } | ||
123 | |||
124 | struct dev_info *cfcnfg_get_phyid(struct cfcnfg *cnfg, | ||
125 | enum cfcnfg_phy_preference phy_pref) | ||
126 | { | ||
127 | u16 i; | ||
128 | |||
129 | /* Try to match with specified preference */ | ||
130 | for (i = 1; i < MAX_PHY_LAYERS; i++) { | ||
131 | if (cnfg->phy_layers[i].id == i && | ||
132 | cnfg->phy_layers[i].pref == phy_pref && | ||
133 | cnfg->phy_layers[i].frm_layer != NULL) { | ||
134 | caif_assert(cnfg->phy_layers != NULL); | ||
135 | caif_assert(cnfg->phy_layers[i].id == i); | ||
136 | return &cnfg->phy_layers[i].dev_info; | ||
137 | } | ||
138 | } | ||
139 | /* Otherwise just return something */ | ||
140 | for (i = 1; i < MAX_PHY_LAYERS; i++) { | ||
141 | if (cnfg->phy_layers[i].id == i) { | ||
142 | caif_assert(cnfg->phy_layers != NULL); | ||
143 | caif_assert(cnfg->phy_layers[i].id == i); | ||
144 | return &cnfg->phy_layers[i].dev_info; | ||
145 | } | ||
146 | } | ||
147 | |||
148 | return NULL; | ||
149 | } | ||
150 | |||
151 | static struct cfcnfg_phyinfo *cfcnfg_get_phyinfo(struct cfcnfg *cnfg, | ||
152 | u8 phyid) | ||
153 | { | ||
154 | int i; | ||
155 | /* Try to match with specified preference */ | ||
156 | for (i = 0; i < MAX_PHY_LAYERS; i++) | ||
157 | if (cnfg->phy_layers[i].frm_layer != NULL && | ||
158 | cnfg->phy_layers[i].id == phyid) | ||
159 | return &cnfg->phy_layers[i]; | ||
160 | return NULL; | ||
161 | } | ||
162 | |||
163 | int cfcnfg_get_named(struct cfcnfg *cnfg, char *name) | ||
164 | { | ||
165 | int i; | ||
166 | |||
167 | /* Try to match with specified name */ | ||
168 | for (i = 0; i < MAX_PHY_LAYERS; i++) { | ||
169 | if (cnfg->phy_layers[i].frm_layer != NULL | ||
170 | && strcmp(cnfg->phy_layers[i].phy_layer->name, | ||
171 | name) == 0) | ||
172 | return cnfg->phy_layers[i].frm_layer->id; | ||
173 | } | ||
174 | return 0; | ||
175 | } | ||
176 | |||
177 | int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer) | ||
178 | { | ||
179 | u8 channel_id = 0; | ||
180 | int ret = 0; | ||
181 | struct cflayer *servl = NULL; | ||
182 | struct cfcnfg_phyinfo *phyinfo = NULL; | ||
183 | u8 phyid = 0; | ||
184 | caif_assert(adap_layer != NULL); | ||
185 | channel_id = adap_layer->id; | ||
186 | if (adap_layer->dn == NULL || channel_id == 0) { | ||
187 | pr_err("CAIF: %s():adap_layer->id is 0\n", __func__); | ||
188 | ret = -ENOTCONN; | ||
189 | goto end; | ||
190 | } | ||
191 | servl = cfmuxl_remove_uplayer(cnfg->mux, channel_id); | ||
192 | if (servl == NULL) | ||
193 | goto end; | ||
194 | layer_set_up(servl, NULL); | ||
195 | ret = cfctrl_linkdown_req(cnfg->ctrl, channel_id, adap_layer); | ||
196 | if (servl == NULL) { | ||
197 | pr_err("CAIF: %s(): PROTOCOL ERROR " | ||
198 | "- Error removing service_layer Channel_Id(%d)", | ||
199 | __func__, channel_id); | ||
200 | ret = -EINVAL; | ||
201 | goto end; | ||
202 | } | ||
203 | caif_assert(channel_id == servl->id); | ||
204 | if (adap_layer->dn != NULL) { | ||
205 | phyid = cfsrvl_getphyid(adap_layer->dn); | ||
206 | |||
207 | phyinfo = cfcnfg_get_phyinfo(cnfg, phyid); | ||
208 | if (phyinfo == NULL) { | ||
209 | pr_warning("CAIF: %s(): " | ||
210 | "No interface to send disconnect to\n", | ||
211 | __func__); | ||
212 | ret = -ENODEV; | ||
213 | goto end; | ||
214 | } | ||
215 | if (phyinfo->id != phyid || | ||
216 | phyinfo->phy_layer->id != phyid || | ||
217 | phyinfo->frm_layer->id != phyid) { | ||
218 | pr_err("CAIF: %s(): " | ||
219 | "Inconsistency in phy registration\n", | ||
220 | __func__); | ||
221 | ret = -EINVAL; | ||
222 | goto end; | ||
223 | } | ||
224 | } | ||
225 | if (phyinfo != NULL && --phyinfo->phy_ref_count == 0 && | ||
226 | phyinfo->phy_layer != NULL && | ||
227 | phyinfo->phy_layer->modemcmd != NULL) { | ||
228 | phyinfo->phy_layer->modemcmd(phyinfo->phy_layer, | ||
229 | _CAIF_MODEMCMD_PHYIF_USELESS); | ||
230 | } | ||
231 | end: | ||
232 | cfsrvl_put(servl); | ||
233 | cfctrl_cancel_req(cnfg->ctrl, adap_layer); | ||
234 | if (adap_layer->ctrlcmd != NULL) | ||
235 | adap_layer->ctrlcmd(adap_layer, CAIF_CTRLCMD_DEINIT_RSP, 0); | ||
236 | return ret; | ||
237 | |||
238 | } | ||
239 | EXPORT_SYMBOL(cfcnfg_disconn_adapt_layer); | ||
240 | |||
241 | void cfcnfg_release_adap_layer(struct cflayer *adap_layer) | ||
242 | { | ||
243 | if (adap_layer->dn) | ||
244 | cfsrvl_put(adap_layer->dn); | ||
245 | } | ||
246 | EXPORT_SYMBOL(cfcnfg_release_adap_layer); | ||
247 | |||
248 | static void cfcnfg_linkdestroy_rsp(struct cflayer *layer, u8 channel_id) | ||
249 | { | ||
250 | } | ||
251 | |||
252 | int cfcnfg_add_adaptation_layer(struct cfcnfg *cnfg, | ||
253 | struct cfctrl_link_param *param, | ||
254 | struct cflayer *adap_layer) | ||
255 | { | ||
256 | struct cflayer *frml; | ||
257 | if (adap_layer == NULL) { | ||
258 | pr_err("CAIF: %s(): adap_layer is zero", __func__); | ||
259 | return -EINVAL; | ||
260 | } | ||
261 | if (adap_layer->receive == NULL) { | ||
262 | pr_err("CAIF: %s(): adap_layer->receive is NULL", __func__); | ||
263 | return -EINVAL; | ||
264 | } | ||
265 | if (adap_layer->ctrlcmd == NULL) { | ||
266 | pr_err("CAIF: %s(): adap_layer->ctrlcmd == NULL", __func__); | ||
267 | return -EINVAL; | ||
268 | } | ||
269 | frml = cnfg->phy_layers[param->phyid].frm_layer; | ||
270 | if (frml == NULL) { | ||
271 | pr_err("CAIF: %s(): Specified PHY type does not exist!", | ||
272 | __func__); | ||
273 | return -ENODEV; | ||
274 | } | ||
275 | caif_assert(param->phyid == cnfg->phy_layers[param->phyid].id); | ||
276 | caif_assert(cnfg->phy_layers[param->phyid].frm_layer->id == | ||
277 | param->phyid); | ||
278 | caif_assert(cnfg->phy_layers[param->phyid].phy_layer->id == | ||
279 | param->phyid); | ||
280 | /* FIXME: ENUMERATE INITIALLY WHEN ACTIVATING PHYSICAL INTERFACE */ | ||
281 | cfctrl_enum_req(cnfg->ctrl, param->phyid); | ||
282 | return cfctrl_linkup_request(cnfg->ctrl, param, adap_layer); | ||
283 | } | ||
284 | EXPORT_SYMBOL(cfcnfg_add_adaptation_layer); | ||
285 | |||
286 | static void cfcnfg_reject_rsp(struct cflayer *layer, u8 channel_id, | ||
287 | struct cflayer *adapt_layer) | ||
288 | { | ||
289 | if (adapt_layer != NULL && adapt_layer->ctrlcmd != NULL) | ||
290 | adapt_layer->ctrlcmd(adapt_layer, | ||
291 | CAIF_CTRLCMD_INIT_FAIL_RSP, 0); | ||
292 | } | ||
293 | |||
294 | static void | ||
295 | cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv, | ||
296 | u8 phyid, struct cflayer *adapt_layer) | ||
297 | { | ||
298 | struct cfcnfg *cnfg = container_obj(layer); | ||
299 | struct cflayer *servicel = NULL; | ||
300 | struct cfcnfg_phyinfo *phyinfo; | ||
301 | if (adapt_layer == NULL) { | ||
302 | pr_debug("CAIF: %s(): link setup response " | ||
303 | "but no client exist, send linkdown back\n", | ||
304 | __func__); | ||
305 | cfctrl_linkdown_req(cnfg->ctrl, channel_id, NULL); | ||
306 | return; | ||
307 | } | ||
308 | |||
309 | caif_assert(cnfg != NULL); | ||
310 | caif_assert(phyid != 0); | ||
311 | phyinfo = &cnfg->phy_layers[phyid]; | ||
312 | caif_assert(phyinfo != NULL); | ||
313 | caif_assert(phyinfo->id == phyid); | ||
314 | caif_assert(phyinfo->phy_layer != NULL); | ||
315 | caif_assert(phyinfo->phy_layer->id == phyid); | ||
316 | |||
317 | if (phyinfo != NULL && | ||
318 | phyinfo->phy_ref_count++ == 0 && | ||
319 | phyinfo->phy_layer != NULL && | ||
320 | phyinfo->phy_layer->modemcmd != NULL) { | ||
321 | caif_assert(phyinfo->phy_layer->id == phyid); | ||
322 | phyinfo->phy_layer->modemcmd(phyinfo->phy_layer, | ||
323 | _CAIF_MODEMCMD_PHYIF_USEFULL); | ||
324 | |||
325 | } | ||
326 | adapt_layer->id = channel_id; | ||
327 | |||
328 | switch (serv) { | ||
329 | case CFCTRL_SRV_VEI: | ||
330 | servicel = cfvei_create(channel_id, &phyinfo->dev_info); | ||
331 | break; | ||
332 | case CFCTRL_SRV_DATAGRAM: | ||
333 | servicel = cfdgml_create(channel_id, &phyinfo->dev_info); | ||
334 | break; | ||
335 | case CFCTRL_SRV_RFM: | ||
336 | servicel = cfrfml_create(channel_id, &phyinfo->dev_info); | ||
337 | break; | ||
338 | case CFCTRL_SRV_UTIL: | ||
339 | servicel = cfutill_create(channel_id, &phyinfo->dev_info); | ||
340 | break; | ||
341 | case CFCTRL_SRV_VIDEO: | ||
342 | servicel = cfvidl_create(channel_id, &phyinfo->dev_info); | ||
343 | break; | ||
344 | case CFCTRL_SRV_DBG: | ||
345 | servicel = cfdbgl_create(channel_id, &phyinfo->dev_info); | ||
346 | break; | ||
347 | default: | ||
348 | pr_err("CAIF: %s(): Protocol error. " | ||
349 | "Link setup response - unknown channel type\n", | ||
350 | __func__); | ||
351 | return; | ||
352 | } | ||
353 | if (!servicel) { | ||
354 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
355 | return; | ||
356 | } | ||
357 | layer_set_dn(servicel, cnfg->mux); | ||
358 | cfmuxl_set_uplayer(cnfg->mux, servicel, channel_id); | ||
359 | layer_set_up(servicel, adapt_layer); | ||
360 | layer_set_dn(adapt_layer, servicel); | ||
361 | cfsrvl_get(servicel); | ||
362 | servicel->ctrlcmd(servicel, CAIF_CTRLCMD_INIT_RSP, 0); | ||
363 | } | ||
364 | |||
365 | void | ||
366 | cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type, | ||
367 | void *dev, struct cflayer *phy_layer, u16 *phyid, | ||
368 | enum cfcnfg_phy_preference pref, | ||
369 | bool fcs, bool stx) | ||
370 | { | ||
371 | struct cflayer *frml; | ||
372 | struct cflayer *phy_driver = NULL; | ||
373 | int i; | ||
374 | |||
375 | |||
376 | if (cnfg->phy_layers[cnfg->last_phyid].frm_layer == NULL) { | ||
377 | *phyid = cnfg->last_phyid; | ||
378 | |||
379 | /* range: * 1..(MAX_PHY_LAYERS-1) */ | ||
380 | cnfg->last_phyid = | ||
381 | (cnfg->last_phyid % (MAX_PHY_LAYERS - 1)) + 1; | ||
382 | } else { | ||
383 | *phyid = 0; | ||
384 | for (i = 1; i < MAX_PHY_LAYERS; i++) { | ||
385 | if (cnfg->phy_layers[i].frm_layer == NULL) { | ||
386 | *phyid = i; | ||
387 | break; | ||
388 | } | ||
389 | } | ||
390 | } | ||
391 | if (*phyid == 0) { | ||
392 | pr_err("CAIF: %s(): No Available PHY ID\n", __func__); | ||
393 | return; | ||
394 | } | ||
395 | |||
396 | switch (phy_type) { | ||
397 | case CFPHYTYPE_FRAG: | ||
398 | phy_driver = | ||
399 | cfserl_create(CFPHYTYPE_FRAG, *phyid, stx); | ||
400 | if (!phy_driver) { | ||
401 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
402 | return; | ||
403 | } | ||
404 | |||
405 | break; | ||
406 | case CFPHYTYPE_CAIF: | ||
407 | phy_driver = NULL; | ||
408 | break; | ||
409 | default: | ||
410 | pr_err("CAIF: %s(): %d", __func__, phy_type); | ||
411 | return; | ||
412 | break; | ||
413 | } | ||
414 | |||
415 | phy_layer->id = *phyid; | ||
416 | cnfg->phy_layers[*phyid].pref = pref; | ||
417 | cnfg->phy_layers[*phyid].id = *phyid; | ||
418 | cnfg->phy_layers[*phyid].dev_info.id = *phyid; | ||
419 | cnfg->phy_layers[*phyid].dev_info.dev = dev; | ||
420 | cnfg->phy_layers[*phyid].phy_layer = phy_layer; | ||
421 | cnfg->phy_layers[*phyid].phy_ref_count = 0; | ||
422 | phy_layer->type = phy_type; | ||
423 | frml = cffrml_create(*phyid, fcs); | ||
424 | if (!frml) { | ||
425 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
426 | return; | ||
427 | } | ||
428 | cnfg->phy_layers[*phyid].frm_layer = frml; | ||
429 | cfmuxl_set_dnlayer(cnfg->mux, frml, *phyid); | ||
430 | layer_set_up(frml, cnfg->mux); | ||
431 | |||
432 | if (phy_driver != NULL) { | ||
433 | phy_driver->id = *phyid; | ||
434 | layer_set_dn(frml, phy_driver); | ||
435 | layer_set_up(phy_driver, frml); | ||
436 | layer_set_dn(phy_driver, phy_layer); | ||
437 | layer_set_up(phy_layer, phy_driver); | ||
438 | } else { | ||
439 | layer_set_dn(frml, phy_layer); | ||
440 | layer_set_up(phy_layer, frml); | ||
441 | } | ||
442 | } | ||
443 | EXPORT_SYMBOL(cfcnfg_add_phy_layer); | ||
444 | |||
445 | int cfcnfg_del_phy_layer(struct cfcnfg *cnfg, struct cflayer *phy_layer) | ||
446 | { | ||
447 | struct cflayer *frml, *frml_dn; | ||
448 | u16 phyid; | ||
449 | phyid = phy_layer->id; | ||
450 | caif_assert(phyid == cnfg->phy_layers[phyid].id); | ||
451 | caif_assert(phy_layer == cnfg->phy_layers[phyid].phy_layer); | ||
452 | caif_assert(phy_layer->id == phyid); | ||
453 | caif_assert(cnfg->phy_layers[phyid].frm_layer->id == phyid); | ||
454 | |||
455 | memset(&cnfg->phy_layers[phy_layer->id], 0, | ||
456 | sizeof(struct cfcnfg_phyinfo)); | ||
457 | frml = cfmuxl_remove_dnlayer(cnfg->mux, phy_layer->id); | ||
458 | frml_dn = frml->dn; | ||
459 | cffrml_set_uplayer(frml, NULL); | ||
460 | cffrml_set_dnlayer(frml, NULL); | ||
461 | kfree(frml); | ||
462 | |||
463 | if (phy_layer != frml_dn) { | ||
464 | layer_set_up(frml_dn, NULL); | ||
465 | layer_set_dn(frml_dn, NULL); | ||
466 | kfree(frml_dn); | ||
467 | } | ||
468 | layer_set_up(phy_layer, NULL); | ||
469 | return 0; | ||
470 | } | ||
471 | EXPORT_SYMBOL(cfcnfg_del_phy_layer); | ||
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c new file mode 100644 index 000000000000..a521d32cfe56 --- /dev/null +++ b/net/caif/cfctrl.c | |||
@@ -0,0 +1,693 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson AB 2010 | ||
3 | * Author: Sjur Brendeland/sjur.brandeland@stericsson.com | ||
4 | * License terms: GNU General Public License (GPL) version 2 | ||
5 | */ | ||
6 | |||
7 | #include <linux/stddef.h> | ||
8 | #include <linux/spinlock.h> | ||
9 | #include <linux/slab.h> | ||
10 | #include <net/caif/caif_layer.h> | ||
11 | #include <net/caif/cfpkt.h> | ||
12 | #include <net/caif/cfctrl.h> | ||
13 | |||
14 | #define container_obj(layr) container_of(layr, struct cfctrl, serv.layer) | ||
15 | #define UTILITY_NAME_LENGTH 16 | ||
16 | #define CFPKT_CTRL_PKT_LEN 20 | ||
17 | |||
18 | |||
19 | #ifdef CAIF_NO_LOOP | ||
20 | static int handle_loop(struct cfctrl *ctrl, | ||
21 | int cmd, struct cfpkt *pkt){ | ||
22 | return CAIF_FAILURE; | ||
23 | } | ||
24 | #else | ||
25 | static int handle_loop(struct cfctrl *ctrl, | ||
26 | int cmd, struct cfpkt *pkt); | ||
27 | #endif | ||
28 | static int cfctrl_recv(struct cflayer *layr, struct cfpkt *pkt); | ||
29 | static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, | ||
30 | int phyid); | ||
31 | |||
32 | |||
33 | struct cflayer *cfctrl_create(void) | ||
34 | { | ||
35 | struct dev_info dev_info; | ||
36 | struct cfctrl *this = | ||
37 | kmalloc(sizeof(struct cfctrl), GFP_ATOMIC); | ||
38 | if (!this) { | ||
39 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
40 | return NULL; | ||
41 | } | ||
42 | caif_assert(offsetof(struct cfctrl, serv.layer) == 0); | ||
43 | memset(&dev_info, 0, sizeof(dev_info)); | ||
44 | dev_info.id = 0xff; | ||
45 | memset(this, 0, sizeof(*this)); | ||
46 | cfsrvl_init(&this->serv, 0, &dev_info); | ||
47 | spin_lock_init(&this->info_list_lock); | ||
48 | atomic_set(&this->req_seq_no, 1); | ||
49 | atomic_set(&this->rsp_seq_no, 1); | ||
50 | this->serv.layer.receive = cfctrl_recv; | ||
51 | sprintf(this->serv.layer.name, "ctrl"); | ||
52 | this->serv.layer.ctrlcmd = cfctrl_ctrlcmd; | ||
53 | spin_lock_init(&this->loop_linkid_lock); | ||
54 | this->loop_linkid = 1; | ||
55 | return &this->serv.layer; | ||
56 | } | ||
57 | |||
58 | static bool param_eq(struct cfctrl_link_param *p1, struct cfctrl_link_param *p2) | ||
59 | { | ||
60 | bool eq = | ||
61 | p1->linktype == p2->linktype && | ||
62 | p1->priority == p2->priority && | ||
63 | p1->phyid == p2->phyid && | ||
64 | p1->endpoint == p2->endpoint && p1->chtype == p2->chtype; | ||
65 | |||
66 | if (!eq) | ||
67 | return false; | ||
68 | |||
69 | switch (p1->linktype) { | ||
70 | case CFCTRL_SRV_VEI: | ||
71 | return true; | ||
72 | case CFCTRL_SRV_DATAGRAM: | ||
73 | return p1->u.datagram.connid == p2->u.datagram.connid; | ||
74 | case CFCTRL_SRV_RFM: | ||
75 | return | ||
76 | p1->u.rfm.connid == p2->u.rfm.connid && | ||
77 | strcmp(p1->u.rfm.volume, p2->u.rfm.volume) == 0; | ||
78 | case CFCTRL_SRV_UTIL: | ||
79 | return | ||
80 | p1->u.utility.fifosize_kb == p2->u.utility.fifosize_kb | ||
81 | && p1->u.utility.fifosize_bufs == | ||
82 | p2->u.utility.fifosize_bufs | ||
83 | && strcmp(p1->u.utility.name, p2->u.utility.name) == 0 | ||
84 | && p1->u.utility.paramlen == p2->u.utility.paramlen | ||
85 | && memcmp(p1->u.utility.params, p2->u.utility.params, | ||
86 | p1->u.utility.paramlen) == 0; | ||
87 | |||
88 | case CFCTRL_SRV_VIDEO: | ||
89 | return p1->u.video.connid == p2->u.video.connid; | ||
90 | case CFCTRL_SRV_DBG: | ||
91 | return true; | ||
92 | case CFCTRL_SRV_DECM: | ||
93 | return false; | ||
94 | default: | ||
95 | return false; | ||
96 | } | ||
97 | return false; | ||
98 | } | ||
99 | |||
100 | bool cfctrl_req_eq(struct cfctrl_request_info *r1, | ||
101 | struct cfctrl_request_info *r2) | ||
102 | { | ||
103 | if (r1->cmd != r2->cmd) | ||
104 | return false; | ||
105 | if (r1->cmd == CFCTRL_CMD_LINK_SETUP) | ||
106 | return param_eq(&r1->param, &r2->param); | ||
107 | else | ||
108 | return r1->channel_id == r2->channel_id; | ||
109 | } | ||
110 | |||
111 | /* Insert request at the end */ | ||
112 | void cfctrl_insert_req(struct cfctrl *ctrl, | ||
113 | struct cfctrl_request_info *req) | ||
114 | { | ||
115 | struct cfctrl_request_info *p; | ||
116 | spin_lock(&ctrl->info_list_lock); | ||
117 | req->next = NULL; | ||
118 | atomic_inc(&ctrl->req_seq_no); | ||
119 | req->sequence_no = atomic_read(&ctrl->req_seq_no); | ||
120 | if (ctrl->first_req == NULL) { | ||
121 | ctrl->first_req = req; | ||
122 | spin_unlock(&ctrl->info_list_lock); | ||
123 | return; | ||
124 | } | ||
125 | p = ctrl->first_req; | ||
126 | while (p->next != NULL) | ||
127 | p = p->next; | ||
128 | p->next = req; | ||
129 | spin_unlock(&ctrl->info_list_lock); | ||
130 | } | ||
131 | |||
132 | /* Compare and remove request */ | ||
133 | struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl, | ||
134 | struct cfctrl_request_info *req) | ||
135 | { | ||
136 | struct cfctrl_request_info *p; | ||
137 | struct cfctrl_request_info *ret; | ||
138 | |||
139 | spin_lock(&ctrl->info_list_lock); | ||
140 | if (ctrl->first_req == NULL) { | ||
141 | spin_unlock(&ctrl->info_list_lock); | ||
142 | return NULL; | ||
143 | } | ||
144 | |||
145 | if (cfctrl_req_eq(req, ctrl->first_req)) { | ||
146 | ret = ctrl->first_req; | ||
147 | caif_assert(ctrl->first_req); | ||
148 | atomic_set(&ctrl->rsp_seq_no, | ||
149 | ctrl->first_req->sequence_no); | ||
150 | ctrl->first_req = ctrl->first_req->next; | ||
151 | spin_unlock(&ctrl->info_list_lock); | ||
152 | return ret; | ||
153 | } | ||
154 | |||
155 | p = ctrl->first_req; | ||
156 | |||
157 | while (p->next != NULL) { | ||
158 | if (cfctrl_req_eq(req, p->next)) { | ||
159 | pr_warning("CAIF: %s(): Requests are not " | ||
160 | "received in order\n", | ||
161 | __func__); | ||
162 | ret = p->next; | ||
163 | atomic_set(&ctrl->rsp_seq_no, | ||
164 | p->next->sequence_no); | ||
165 | p->next = p->next->next; | ||
166 | spin_unlock(&ctrl->info_list_lock); | ||
167 | return ret; | ||
168 | } | ||
169 | p = p->next; | ||
170 | } | ||
171 | spin_unlock(&ctrl->info_list_lock); | ||
172 | |||
173 | pr_warning("CAIF: %s(): Request does not match\n", | ||
174 | __func__); | ||
175 | return NULL; | ||
176 | } | ||
177 | |||
178 | struct cfctrl_rsp *cfctrl_get_respfuncs(struct cflayer *layer) | ||
179 | { | ||
180 | struct cfctrl *this = container_obj(layer); | ||
181 | return &this->res; | ||
182 | } | ||
183 | |||
184 | void cfctrl_set_dnlayer(struct cflayer *this, struct cflayer *dn) | ||
185 | { | ||
186 | this->dn = dn; | ||
187 | } | ||
188 | |||
189 | void cfctrl_set_uplayer(struct cflayer *this, struct cflayer *up) | ||
190 | { | ||
191 | this->up = up; | ||
192 | } | ||
193 | |||
194 | static void init_info(struct caif_payload_info *info, struct cfctrl *cfctrl) | ||
195 | { | ||
196 | info->hdr_len = 0; | ||
197 | info->channel_id = cfctrl->serv.layer.id; | ||
198 | info->dev_info = &cfctrl->serv.dev_info; | ||
199 | } | ||
200 | |||
201 | void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid) | ||
202 | { | ||
203 | struct cfctrl *cfctrl = container_obj(layer); | ||
204 | int ret; | ||
205 | struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); | ||
206 | if (!pkt) { | ||
207 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
208 | return; | ||
209 | } | ||
210 | caif_assert(offsetof(struct cfctrl, serv.layer) == 0); | ||
211 | init_info(cfpkt_info(pkt), cfctrl); | ||
212 | cfpkt_info(pkt)->dev_info->id = physlinkid; | ||
213 | cfctrl->serv.dev_info.id = physlinkid; | ||
214 | cfpkt_addbdy(pkt, CFCTRL_CMD_ENUM); | ||
215 | cfpkt_addbdy(pkt, physlinkid); | ||
216 | ret = | ||
217 | cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt); | ||
218 | if (ret < 0) { | ||
219 | pr_err("CAIF: %s(): Could not transmit enum message\n", | ||
220 | __func__); | ||
221 | cfpkt_destroy(pkt); | ||
222 | } | ||
223 | } | ||
224 | |||
225 | int cfctrl_linkup_request(struct cflayer *layer, | ||
226 | struct cfctrl_link_param *param, | ||
227 | struct cflayer *user_layer) | ||
228 | { | ||
229 | struct cfctrl *cfctrl = container_obj(layer); | ||
230 | u32 tmp32; | ||
231 | u16 tmp16; | ||
232 | u8 tmp8; | ||
233 | struct cfctrl_request_info *req; | ||
234 | int ret; | ||
235 | char utility_name[16]; | ||
236 | struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); | ||
237 | if (!pkt) { | ||
238 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
239 | return -ENOMEM; | ||
240 | } | ||
241 | cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_SETUP); | ||
242 | cfpkt_addbdy(pkt, (param->chtype << 4) + param->linktype); | ||
243 | cfpkt_addbdy(pkt, (param->priority << 3) + param->phyid); | ||
244 | cfpkt_addbdy(pkt, param->endpoint & 0x03); | ||
245 | |||
246 | switch (param->linktype) { | ||
247 | case CFCTRL_SRV_VEI: | ||
248 | break; | ||
249 | case CFCTRL_SRV_VIDEO: | ||
250 | cfpkt_addbdy(pkt, (u8) param->u.video.connid); | ||
251 | break; | ||
252 | case CFCTRL_SRV_DBG: | ||
253 | break; | ||
254 | case CFCTRL_SRV_DATAGRAM: | ||
255 | tmp32 = cpu_to_le32(param->u.datagram.connid); | ||
256 | cfpkt_add_body(pkt, &tmp32, 4); | ||
257 | break; | ||
258 | case CFCTRL_SRV_RFM: | ||
259 | /* Construct a frame, convert DatagramConnectionID to network | ||
260 | * format long and copy it out... | ||
261 | */ | ||
262 | tmp32 = cpu_to_le32(param->u.rfm.connid); | ||
263 | cfpkt_add_body(pkt, &tmp32, 4); | ||
264 | /* Add volume name, including zero termination... */ | ||
265 | cfpkt_add_body(pkt, param->u.rfm.volume, | ||
266 | strlen(param->u.rfm.volume) + 1); | ||
267 | break; | ||
268 | case CFCTRL_SRV_UTIL: | ||
269 | tmp16 = cpu_to_le16(param->u.utility.fifosize_kb); | ||
270 | cfpkt_add_body(pkt, &tmp16, 2); | ||
271 | tmp16 = cpu_to_le16(param->u.utility.fifosize_bufs); | ||
272 | cfpkt_add_body(pkt, &tmp16, 2); | ||
273 | memset(utility_name, 0, sizeof(utility_name)); | ||
274 | strncpy(utility_name, param->u.utility.name, | ||
275 | UTILITY_NAME_LENGTH - 1); | ||
276 | cfpkt_add_body(pkt, utility_name, UTILITY_NAME_LENGTH); | ||
277 | tmp8 = param->u.utility.paramlen; | ||
278 | cfpkt_add_body(pkt, &tmp8, 1); | ||
279 | cfpkt_add_body(pkt, param->u.utility.params, | ||
280 | param->u.utility.paramlen); | ||
281 | break; | ||
282 | default: | ||
283 | pr_warning("CAIF: %s():Request setup of bad link type = %d\n", | ||
284 | __func__, param->linktype); | ||
285 | return -EINVAL; | ||
286 | } | ||
287 | req = kmalloc(sizeof(*req), GFP_KERNEL); | ||
288 | if (!req) { | ||
289 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
290 | return -ENOMEM; | ||
291 | } | ||
292 | memset(req, 0, sizeof(*req)); | ||
293 | req->client_layer = user_layer; | ||
294 | req->cmd = CFCTRL_CMD_LINK_SETUP; | ||
295 | req->param = *param; | ||
296 | cfctrl_insert_req(cfctrl, req); | ||
297 | init_info(cfpkt_info(pkt), cfctrl); | ||
298 | /* | ||
299 | * NOTE:Always send linkup and linkdown request on the same | ||
300 | * device as the payload. Otherwise old queued up payload | ||
301 | * might arrive with the newly allocated channel ID. | ||
302 | */ | ||
303 | cfpkt_info(pkt)->dev_info->id = param->phyid; | ||
304 | ret = | ||
305 | cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt); | ||
306 | if (ret < 0) { | ||
307 | pr_err("CAIF: %s(): Could not transmit linksetup request\n", | ||
308 | __func__); | ||
309 | cfpkt_destroy(pkt); | ||
310 | return -ENODEV; | ||
311 | } | ||
312 | return 0; | ||
313 | } | ||
314 | |||
315 | int cfctrl_linkdown_req(struct cflayer *layer, u8 channelid, | ||
316 | struct cflayer *client) | ||
317 | { | ||
318 | int ret; | ||
319 | struct cfctrl *cfctrl = container_obj(layer); | ||
320 | struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); | ||
321 | if (!pkt) { | ||
322 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
323 | return -ENOMEM; | ||
324 | } | ||
325 | cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_DESTROY); | ||
326 | cfpkt_addbdy(pkt, channelid); | ||
327 | init_info(cfpkt_info(pkt), cfctrl); | ||
328 | ret = | ||
329 | cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt); | ||
330 | if (ret < 0) { | ||
331 | pr_err("CAIF: %s(): Could not transmit link-down request\n", | ||
332 | __func__); | ||
333 | cfpkt_destroy(pkt); | ||
334 | } | ||
335 | return ret; | ||
336 | } | ||
337 | |||
338 | void cfctrl_sleep_req(struct cflayer *layer) | ||
339 | { | ||
340 | int ret; | ||
341 | struct cfctrl *cfctrl = container_obj(layer); | ||
342 | struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); | ||
343 | if (!pkt) { | ||
344 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
345 | return; | ||
346 | } | ||
347 | cfpkt_addbdy(pkt, CFCTRL_CMD_SLEEP); | ||
348 | init_info(cfpkt_info(pkt), cfctrl); | ||
349 | ret = | ||
350 | cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt); | ||
351 | if (ret < 0) | ||
352 | cfpkt_destroy(pkt); | ||
353 | } | ||
354 | |||
355 | void cfctrl_wake_req(struct cflayer *layer) | ||
356 | { | ||
357 | int ret; | ||
358 | struct cfctrl *cfctrl = container_obj(layer); | ||
359 | struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); | ||
360 | if (!pkt) { | ||
361 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
362 | return; | ||
363 | } | ||
364 | cfpkt_addbdy(pkt, CFCTRL_CMD_WAKE); | ||
365 | init_info(cfpkt_info(pkt), cfctrl); | ||
366 | ret = | ||
367 | cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt); | ||
368 | if (ret < 0) | ||
369 | cfpkt_destroy(pkt); | ||
370 | } | ||
371 | |||
372 | void cfctrl_getstartreason_req(struct cflayer *layer) | ||
373 | { | ||
374 | int ret; | ||
375 | struct cfctrl *cfctrl = container_obj(layer); | ||
376 | struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); | ||
377 | if (!pkt) { | ||
378 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
379 | return; | ||
380 | } | ||
381 | cfpkt_addbdy(pkt, CFCTRL_CMD_START_REASON); | ||
382 | init_info(cfpkt_info(pkt), cfctrl); | ||
383 | ret = | ||
384 | cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt); | ||
385 | if (ret < 0) | ||
386 | cfpkt_destroy(pkt); | ||
387 | } | ||
388 | |||
389 | |||
390 | void cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer) | ||
391 | { | ||
392 | struct cfctrl_request_info *p, *req; | ||
393 | struct cfctrl *ctrl = container_obj(layr); | ||
394 | spin_lock(&ctrl->info_list_lock); | ||
395 | |||
396 | if (ctrl->first_req == NULL) { | ||
397 | spin_unlock(&ctrl->info_list_lock); | ||
398 | return; | ||
399 | } | ||
400 | |||
401 | if (ctrl->first_req->client_layer == adap_layer) { | ||
402 | |||
403 | req = ctrl->first_req; | ||
404 | ctrl->first_req = ctrl->first_req->next; | ||
405 | kfree(req); | ||
406 | } | ||
407 | |||
408 | p = ctrl->first_req; | ||
409 | while (p != NULL && p->next != NULL) { | ||
410 | if (p->next->client_layer == adap_layer) { | ||
411 | |||
412 | req = p->next; | ||
413 | p->next = p->next->next; | ||
414 | kfree(p->next); | ||
415 | } | ||
416 | p = p->next; | ||
417 | } | ||
418 | |||
419 | spin_unlock(&ctrl->info_list_lock); | ||
420 | } | ||
421 | |||
422 | static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt) | ||
423 | { | ||
424 | u8 cmdrsp; | ||
425 | u8 cmd; | ||
426 | int ret = -1; | ||
427 | u16 tmp16; | ||
428 | u8 len; | ||
429 | u8 param[255]; | ||
430 | u8 linkid; | ||
431 | struct cfctrl *cfctrl = container_obj(layer); | ||
432 | struct cfctrl_request_info rsp, *req; | ||
433 | |||
434 | |||
435 | cfpkt_extr_head(pkt, &cmdrsp, 1); | ||
436 | cmd = cmdrsp & CFCTRL_CMD_MASK; | ||
437 | if (cmd != CFCTRL_CMD_LINK_ERR | ||
438 | && CFCTRL_RSP_BIT != (CFCTRL_RSP_BIT & cmdrsp)) { | ||
439 | if (handle_loop(cfctrl, cmd, pkt) == CAIF_FAILURE) | ||
440 | cmdrsp |= CFCTRL_ERR_BIT; | ||
441 | } | ||
442 | |||
443 | switch (cmd) { | ||
444 | case CFCTRL_CMD_LINK_SETUP: | ||
445 | { | ||
446 | enum cfctrl_srv serv; | ||
447 | enum cfctrl_srv servtype; | ||
448 | u8 endpoint; | ||
449 | u8 physlinkid; | ||
450 | u8 prio; | ||
451 | u8 tmp; | ||
452 | u32 tmp32; | ||
453 | u8 *cp; | ||
454 | int i; | ||
455 | struct cfctrl_link_param linkparam; | ||
456 | memset(&linkparam, 0, sizeof(linkparam)); | ||
457 | |||
458 | cfpkt_extr_head(pkt, &tmp, 1); | ||
459 | |||
460 | serv = tmp & CFCTRL_SRV_MASK; | ||
461 | linkparam.linktype = serv; | ||
462 | |||
463 | servtype = tmp >> 4; | ||
464 | linkparam.chtype = servtype; | ||
465 | |||
466 | cfpkt_extr_head(pkt, &tmp, 1); | ||
467 | physlinkid = tmp & 0x07; | ||
468 | prio = tmp >> 3; | ||
469 | |||
470 | linkparam.priority = prio; | ||
471 | linkparam.phyid = physlinkid; | ||
472 | cfpkt_extr_head(pkt, &endpoint, 1); | ||
473 | linkparam.endpoint = endpoint & 0x03; | ||
474 | |||
475 | switch (serv) { | ||
476 | case CFCTRL_SRV_VEI: | ||
477 | case CFCTRL_SRV_DBG: | ||
478 | if (CFCTRL_ERR_BIT & cmdrsp) | ||
479 | break; | ||
480 | /* Link ID */ | ||
481 | cfpkt_extr_head(pkt, &linkid, 1); | ||
482 | break; | ||
483 | case CFCTRL_SRV_VIDEO: | ||
484 | cfpkt_extr_head(pkt, &tmp, 1); | ||
485 | linkparam.u.video.connid = tmp; | ||
486 | if (CFCTRL_ERR_BIT & cmdrsp) | ||
487 | break; | ||
488 | /* Link ID */ | ||
489 | cfpkt_extr_head(pkt, &linkid, 1); | ||
490 | break; | ||
491 | |||
492 | case CFCTRL_SRV_DATAGRAM: | ||
493 | cfpkt_extr_head(pkt, &tmp32, 4); | ||
494 | linkparam.u.datagram.connid = | ||
495 | le32_to_cpu(tmp32); | ||
496 | if (CFCTRL_ERR_BIT & cmdrsp) | ||
497 | break; | ||
498 | /* Link ID */ | ||
499 | cfpkt_extr_head(pkt, &linkid, 1); | ||
500 | break; | ||
501 | case CFCTRL_SRV_RFM: | ||
502 | /* Construct a frame, convert | ||
503 | * DatagramConnectionID | ||
504 | * to network format long and copy it out... | ||
505 | */ | ||
506 | cfpkt_extr_head(pkt, &tmp32, 4); | ||
507 | linkparam.u.rfm.connid = | ||
508 | le32_to_cpu(tmp32); | ||
509 | cp = (u8 *) linkparam.u.rfm.volume; | ||
510 | for (cfpkt_extr_head(pkt, &tmp, 1); | ||
511 | cfpkt_more(pkt) && tmp != '\0'; | ||
512 | cfpkt_extr_head(pkt, &tmp, 1)) | ||
513 | *cp++ = tmp; | ||
514 | *cp = '\0'; | ||
515 | |||
516 | if (CFCTRL_ERR_BIT & cmdrsp) | ||
517 | break; | ||
518 | /* Link ID */ | ||
519 | cfpkt_extr_head(pkt, &linkid, 1); | ||
520 | |||
521 | break; | ||
522 | case CFCTRL_SRV_UTIL: | ||
523 | /* Construct a frame, convert | ||
524 | * DatagramConnectionID | ||
525 | * to network format long and copy it out... | ||
526 | */ | ||
527 | /* Fifosize KB */ | ||
528 | cfpkt_extr_head(pkt, &tmp16, 2); | ||
529 | linkparam.u.utility.fifosize_kb = | ||
530 | le16_to_cpu(tmp16); | ||
531 | /* Fifosize bufs */ | ||
532 | cfpkt_extr_head(pkt, &tmp16, 2); | ||
533 | linkparam.u.utility.fifosize_bufs = | ||
534 | le16_to_cpu(tmp16); | ||
535 | /* name */ | ||
536 | cp = (u8 *) linkparam.u.utility.name; | ||
537 | caif_assert(sizeof(linkparam.u.utility.name) | ||
538 | >= UTILITY_NAME_LENGTH); | ||
539 | for (i = 0; | ||
540 | i < UTILITY_NAME_LENGTH | ||
541 | && cfpkt_more(pkt); i++) { | ||
542 | cfpkt_extr_head(pkt, &tmp, 1); | ||
543 | *cp++ = tmp; | ||
544 | } | ||
545 | /* Length */ | ||
546 | cfpkt_extr_head(pkt, &len, 1); | ||
547 | linkparam.u.utility.paramlen = len; | ||
548 | /* Param Data */ | ||
549 | cp = linkparam.u.utility.params; | ||
550 | while (cfpkt_more(pkt) && len--) { | ||
551 | cfpkt_extr_head(pkt, &tmp, 1); | ||
552 | *cp++ = tmp; | ||
553 | } | ||
554 | if (CFCTRL_ERR_BIT & cmdrsp) | ||
555 | break; | ||
556 | /* Link ID */ | ||
557 | cfpkt_extr_head(pkt, &linkid, 1); | ||
558 | /* Length */ | ||
559 | cfpkt_extr_head(pkt, &len, 1); | ||
560 | /* Param Data */ | ||
561 | cfpkt_extr_head(pkt, ¶m, len); | ||
562 | break; | ||
563 | default: | ||
564 | pr_warning("CAIF: %s(): Request setup " | ||
565 | "- invalid link type (%d)", | ||
566 | __func__, serv); | ||
567 | goto error; | ||
568 | } | ||
569 | |||
570 | rsp.cmd = cmd; | ||
571 | rsp.param = linkparam; | ||
572 | req = cfctrl_remove_req(cfctrl, &rsp); | ||
573 | |||
574 | if (CFCTRL_ERR_BIT == (CFCTRL_ERR_BIT & cmdrsp) || | ||
575 | cfpkt_erroneous(pkt)) { | ||
576 | pr_err("CAIF: %s(): Invalid O/E bit or parse " | ||
577 | "error on CAIF control channel", | ||
578 | __func__); | ||
579 | cfctrl->res.reject_rsp(cfctrl->serv.layer.up, | ||
580 | 0, | ||
581 | req ? req->client_layer | ||
582 | : NULL); | ||
583 | } else { | ||
584 | cfctrl->res.linksetup_rsp(cfctrl->serv. | ||
585 | layer.up, linkid, | ||
586 | serv, physlinkid, | ||
587 | req ? req-> | ||
588 | client_layer : NULL); | ||
589 | } | ||
590 | |||
591 | if (req != NULL) | ||
592 | kfree(req); | ||
593 | } | ||
594 | break; | ||
595 | case CFCTRL_CMD_LINK_DESTROY: | ||
596 | cfpkt_extr_head(pkt, &linkid, 1); | ||
597 | cfctrl->res.linkdestroy_rsp(cfctrl->serv.layer.up, linkid); | ||
598 | break; | ||
599 | case CFCTRL_CMD_LINK_ERR: | ||
600 | pr_err("CAIF: %s(): Frame Error Indication received\n", | ||
601 | __func__); | ||
602 | cfctrl->res.linkerror_ind(); | ||
603 | break; | ||
604 | case CFCTRL_CMD_ENUM: | ||
605 | cfctrl->res.enum_rsp(); | ||
606 | break; | ||
607 | case CFCTRL_CMD_SLEEP: | ||
608 | cfctrl->res.sleep_rsp(); | ||
609 | break; | ||
610 | case CFCTRL_CMD_WAKE: | ||
611 | cfctrl->res.wake_rsp(); | ||
612 | break; | ||
613 | case CFCTRL_CMD_LINK_RECONF: | ||
614 | cfctrl->res.restart_rsp(); | ||
615 | break; | ||
616 | case CFCTRL_CMD_RADIO_SET: | ||
617 | cfctrl->res.radioset_rsp(); | ||
618 | break; | ||
619 | default: | ||
620 | pr_err("CAIF: %s(): Unrecognized Control Frame\n", __func__); | ||
621 | goto error; | ||
622 | break; | ||
623 | } | ||
624 | ret = 0; | ||
625 | error: | ||
626 | cfpkt_destroy(pkt); | ||
627 | return ret; | ||
628 | } | ||
629 | |||
630 | static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, | ||
631 | int phyid) | ||
632 | { | ||
633 | struct cfctrl *this = container_obj(layr); | ||
634 | switch (ctrl) { | ||
635 | case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND: | ||
636 | case CAIF_CTRLCMD_FLOW_OFF_IND: | ||
637 | spin_lock(&this->info_list_lock); | ||
638 | if (this->first_req != NULL) { | ||
639 | pr_debug("CAIF: %s(): Received flow off in " | ||
640 | "control layer", __func__); | ||
641 | } | ||
642 | spin_unlock(&this->info_list_lock); | ||
643 | break; | ||
644 | default: | ||
645 | break; | ||
646 | } | ||
647 | } | ||
648 | |||
649 | #ifndef CAIF_NO_LOOP | ||
650 | static int handle_loop(struct cfctrl *ctrl, int cmd, struct cfpkt *pkt) | ||
651 | { | ||
652 | static int last_linkid; | ||
653 | u8 linkid, linktype, tmp; | ||
654 | switch (cmd) { | ||
655 | case CFCTRL_CMD_LINK_SETUP: | ||
656 | spin_lock(&ctrl->loop_linkid_lock); | ||
657 | for (linkid = last_linkid + 1; linkid < 255; linkid++) | ||
658 | if (!ctrl->loop_linkused[linkid]) | ||
659 | goto found; | ||
660 | for (linkid = last_linkid - 1; linkid > 0; linkid--) | ||
661 | if (!ctrl->loop_linkused[linkid]) | ||
662 | goto found; | ||
663 | spin_unlock(&ctrl->loop_linkid_lock); | ||
664 | pr_err("CAIF: %s(): Out of link-ids\n", __func__); | ||
665 | return -EINVAL; | ||
666 | found: | ||
667 | if (!ctrl->loop_linkused[linkid]) | ||
668 | ctrl->loop_linkused[linkid] = 1; | ||
669 | |||
670 | last_linkid = linkid; | ||
671 | |||
672 | cfpkt_add_trail(pkt, &linkid, 1); | ||
673 | spin_unlock(&ctrl->loop_linkid_lock); | ||
674 | cfpkt_peek_head(pkt, &linktype, 1); | ||
675 | if (linktype == CFCTRL_SRV_UTIL) { | ||
676 | tmp = 0x01; | ||
677 | cfpkt_add_trail(pkt, &tmp, 1); | ||
678 | cfpkt_add_trail(pkt, &tmp, 1); | ||
679 | } | ||
680 | break; | ||
681 | |||
682 | case CFCTRL_CMD_LINK_DESTROY: | ||
683 | spin_lock(&ctrl->loop_linkid_lock); | ||
684 | cfpkt_peek_head(pkt, &linkid, 1); | ||
685 | ctrl->loop_linkused[linkid] = 0; | ||
686 | spin_unlock(&ctrl->loop_linkid_lock); | ||
687 | break; | ||
688 | default: | ||
689 | break; | ||
690 | } | ||
691 | return CAIF_SUCCESS; | ||
692 | } | ||
693 | #endif | ||
diff --git a/net/caif/cfdbgl.c b/net/caif/cfdbgl.c new file mode 100644 index 000000000000..ab6b6dc34cf8 --- /dev/null +++ b/net/caif/cfdbgl.c | |||
@@ -0,0 +1,40 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson AB 2010 | ||
3 | * Author: Sjur Brendeland/sjur.brandeland@stericsson.com | ||
4 | * License terms: GNU General Public License (GPL) version 2 | ||
5 | */ | ||
6 | |||
7 | #include <linux/stddef.h> | ||
8 | #include <linux/slab.h> | ||
9 | #include <net/caif/caif_layer.h> | ||
10 | #include <net/caif/cfsrvl.h> | ||
11 | #include <net/caif/cfpkt.h> | ||
12 | |||
13 | static int cfdbgl_receive(struct cflayer *layr, struct cfpkt *pkt); | ||
14 | static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt); | ||
15 | |||
16 | struct cflayer *cfdbgl_create(u8 channel_id, struct dev_info *dev_info) | ||
17 | { | ||
18 | struct cfsrvl *dbg = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC); | ||
19 | if (!dbg) { | ||
20 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
21 | return NULL; | ||
22 | } | ||
23 | caif_assert(offsetof(struct cfsrvl, layer) == 0); | ||
24 | memset(dbg, 0, sizeof(struct cfsrvl)); | ||
25 | cfsrvl_init(dbg, channel_id, dev_info); | ||
26 | dbg->layer.receive = cfdbgl_receive; | ||
27 | dbg->layer.transmit = cfdbgl_transmit; | ||
28 | snprintf(dbg->layer.name, CAIF_LAYER_NAME_SZ - 1, "dbg%d", channel_id); | ||
29 | return &dbg->layer; | ||
30 | } | ||
31 | |||
32 | static int cfdbgl_receive(struct cflayer *layr, struct cfpkt *pkt) | ||
33 | { | ||
34 | return layr->up->receive(layr->up, pkt); | ||
35 | } | ||
36 | |||
37 | static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt) | ||
38 | { | ||
39 | return layr->dn->transmit(layr->dn, pkt); | ||
40 | } | ||
diff --git a/net/caif/cfdgml.c b/net/caif/cfdgml.c new file mode 100644 index 000000000000..53194840ecb6 --- /dev/null +++ b/net/caif/cfdgml.c | |||
@@ -0,0 +1,108 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson AB 2010 | ||
3 | * Author: Sjur Brendeland/sjur.brandeland@stericsson.com | ||
4 | * License terms: GNU General Public License (GPL) version 2 | ||
5 | */ | ||
6 | |||
7 | #include <linux/stddef.h> | ||
8 | #include <linux/spinlock.h> | ||
9 | #include <linux/slab.h> | ||
10 | #include <net/caif/caif_layer.h> | ||
11 | #include <net/caif/cfsrvl.h> | ||
12 | #include <net/caif/cfpkt.h> | ||
13 | |||
14 | #define container_obj(layr) ((struct cfsrvl *) layr) | ||
15 | |||
16 | #define DGM_CMD_BIT 0x80 | ||
17 | #define DGM_FLOW_OFF 0x81 | ||
18 | #define DGM_FLOW_ON 0x80 | ||
19 | #define DGM_CTRL_PKT_SIZE 1 | ||
20 | |||
21 | static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt); | ||
22 | static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt); | ||
23 | |||
24 | struct cflayer *cfdgml_create(u8 channel_id, struct dev_info *dev_info) | ||
25 | { | ||
26 | struct cfsrvl *dgm = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC); | ||
27 | if (!dgm) { | ||
28 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
29 | return NULL; | ||
30 | } | ||
31 | caif_assert(offsetof(struct cfsrvl, layer) == 0); | ||
32 | memset(dgm, 0, sizeof(struct cfsrvl)); | ||
33 | cfsrvl_init(dgm, channel_id, dev_info); | ||
34 | dgm->layer.receive = cfdgml_receive; | ||
35 | dgm->layer.transmit = cfdgml_transmit; | ||
36 | snprintf(dgm->layer.name, CAIF_LAYER_NAME_SZ - 1, "dgm%d", channel_id); | ||
37 | dgm->layer.name[CAIF_LAYER_NAME_SZ - 1] = '\0'; | ||
38 | return &dgm->layer; | ||
39 | } | ||
40 | |||
41 | static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt) | ||
42 | { | ||
43 | u8 cmd = -1; | ||
44 | u8 dgmhdr[3]; | ||
45 | int ret; | ||
46 | caif_assert(layr->up != NULL); | ||
47 | caif_assert(layr->receive != NULL); | ||
48 | caif_assert(layr->ctrlcmd != NULL); | ||
49 | |||
50 | if (cfpkt_extr_head(pkt, &cmd, 1) < 0) { | ||
51 | pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); | ||
52 | cfpkt_destroy(pkt); | ||
53 | return -EPROTO; | ||
54 | } | ||
55 | |||
56 | if ((cmd & DGM_CMD_BIT) == 0) { | ||
57 | if (cfpkt_extr_head(pkt, &dgmhdr, 3) < 0) { | ||
58 | pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); | ||
59 | cfpkt_destroy(pkt); | ||
60 | return -EPROTO; | ||
61 | } | ||
62 | ret = layr->up->receive(layr->up, pkt); | ||
63 | return ret; | ||
64 | } | ||
65 | |||
66 | switch (cmd) { | ||
67 | case DGM_FLOW_OFF: /* FLOW OFF */ | ||
68 | layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_OFF_IND, 0); | ||
69 | cfpkt_destroy(pkt); | ||
70 | return 0; | ||
71 | case DGM_FLOW_ON: /* FLOW ON */ | ||
72 | layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_ON_IND, 0); | ||
73 | cfpkt_destroy(pkt); | ||
74 | return 0; | ||
75 | default: | ||
76 | cfpkt_destroy(pkt); | ||
77 | pr_info("CAIF: %s(): Unknown datagram control %d (0x%x)\n", | ||
78 | __func__, cmd, cmd); | ||
79 | return -EPROTO; | ||
80 | } | ||
81 | } | ||
82 | |||
83 | static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt) | ||
84 | { | ||
85 | u32 zero = 0; | ||
86 | struct caif_payload_info *info; | ||
87 | struct cfsrvl *service = container_obj(layr); | ||
88 | int ret; | ||
89 | if (!cfsrvl_ready(service, &ret)) | ||
90 | return ret; | ||
91 | |||
92 | cfpkt_add_head(pkt, &zero, 4); | ||
93 | |||
94 | /* Add info for MUX-layer to route the packet out. */ | ||
95 | info = cfpkt_info(pkt); | ||
96 | info->channel_id = service->layer.id; | ||
97 | /* To optimize alignment, we add up the size of CAIF header | ||
98 | * before payload. | ||
99 | */ | ||
100 | info->hdr_len = 4; | ||
101 | info->dev_info = &service->dev_info; | ||
102 | ret = layr->dn->transmit(layr->dn, pkt); | ||
103 | if (ret < 0) { | ||
104 | u32 tmp32; | ||
105 | cfpkt_extr_head(pkt, &tmp32, 4); | ||
106 | } | ||
107 | return ret; | ||
108 | } | ||
diff --git a/net/caif/cffrml.c b/net/caif/cffrml.c new file mode 100644 index 000000000000..e86a4ca3b217 --- /dev/null +++ b/net/caif/cffrml.c | |||
@@ -0,0 +1,151 @@ | |||
1 | /* | ||
2 | * CAIF Framing Layer. | ||
3 | * | ||
4 | * Copyright (C) ST-Ericsson AB 2010 | ||
5 | * Author: Sjur Brendeland/sjur.brandeland@stericsson.com | ||
6 | * License terms: GNU General Public License (GPL) version 2 | ||
7 | */ | ||
8 | |||
9 | #include <linux/stddef.h> | ||
10 | #include <linux/spinlock.h> | ||
11 | #include <linux/slab.h> | ||
12 | #include <linux/crc-ccitt.h> | ||
13 | #include <net/caif/caif_layer.h> | ||
14 | #include <net/caif/cfpkt.h> | ||
15 | #include <net/caif/cffrml.h> | ||
16 | |||
17 | #define container_obj(layr) container_of(layr, struct cffrml, layer) | ||
18 | |||
19 | struct cffrml { | ||
20 | struct cflayer layer; | ||
21 | bool dofcs; /* !< FCS active */ | ||
22 | }; | ||
23 | |||
24 | static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt); | ||
25 | static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt); | ||
26 | static void cffrml_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, | ||
27 | int phyid); | ||
28 | |||
29 | static u32 cffrml_rcv_error; | ||
30 | static u32 cffrml_rcv_checsum_error; | ||
31 | struct cflayer *cffrml_create(u16 phyid, bool use_fcs) | ||
32 | { | ||
33 | struct cffrml *this = kmalloc(sizeof(struct cffrml), GFP_ATOMIC); | ||
34 | if (!this) { | ||
35 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
36 | return NULL; | ||
37 | } | ||
38 | caif_assert(offsetof(struct cffrml, layer) == 0); | ||
39 | |||
40 | memset(this, 0, sizeof(struct cflayer)); | ||
41 | this->layer.receive = cffrml_receive; | ||
42 | this->layer.transmit = cffrml_transmit; | ||
43 | this->layer.ctrlcmd = cffrml_ctrlcmd; | ||
44 | snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "frm%d", phyid); | ||
45 | this->dofcs = use_fcs; | ||
46 | this->layer.id = phyid; | ||
47 | return (struct cflayer *) this; | ||
48 | } | ||
49 | |||
50 | void cffrml_set_uplayer(struct cflayer *this, struct cflayer *up) | ||
51 | { | ||
52 | this->up = up; | ||
53 | } | ||
54 | |||
55 | void cffrml_set_dnlayer(struct cflayer *this, struct cflayer *dn) | ||
56 | { | ||
57 | this->dn = dn; | ||
58 | } | ||
59 | |||
60 | static u16 cffrml_checksum(u16 chks, void *buf, u16 len) | ||
61 | { | ||
62 | /* FIXME: FCS should be moved to glue in order to use OS-Specific | ||
63 | * solutions | ||
64 | */ | ||
65 | return crc_ccitt(chks, buf, len); | ||
66 | } | ||
67 | |||
68 | static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt) | ||
69 | { | ||
70 | u16 tmp; | ||
71 | u16 len; | ||
72 | u16 hdrchks; | ||
73 | u16 pktchks; | ||
74 | struct cffrml *this; | ||
75 | this = container_obj(layr); | ||
76 | |||
77 | cfpkt_extr_head(pkt, &tmp, 2); | ||
78 | len = le16_to_cpu(tmp); | ||
79 | |||
80 | /* Subtract for FCS on length if FCS is not used. */ | ||
81 | if (!this->dofcs) | ||
82 | len -= 2; | ||
83 | |||
84 | if (cfpkt_setlen(pkt, len) < 0) { | ||
85 | ++cffrml_rcv_error; | ||
86 | pr_err("CAIF: %s():Framing length error (%d)\n", __func__, len); | ||
87 | cfpkt_destroy(pkt); | ||
88 | return -EPROTO; | ||
89 | } | ||
90 | /* | ||
91 | * Don't do extract if FCS is false, rather do setlen - then we don't | ||
92 | * get a cache-miss. | ||
93 | */ | ||
94 | if (this->dofcs) { | ||
95 | cfpkt_extr_trail(pkt, &tmp, 2); | ||
96 | hdrchks = le16_to_cpu(tmp); | ||
97 | pktchks = cfpkt_iterate(pkt, cffrml_checksum, 0xffff); | ||
98 | if (pktchks != hdrchks) { | ||
99 | cfpkt_add_trail(pkt, &tmp, 2); | ||
100 | ++cffrml_rcv_error; | ||
101 | ++cffrml_rcv_checsum_error; | ||
102 | pr_info("CAIF: %s(): Frame checksum error " | ||
103 | "(0x%x != 0x%x)\n", __func__, hdrchks, pktchks); | ||
104 | return -EILSEQ; | ||
105 | } | ||
106 | } | ||
107 | if (cfpkt_erroneous(pkt)) { | ||
108 | ++cffrml_rcv_error; | ||
109 | pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); | ||
110 | cfpkt_destroy(pkt); | ||
111 | return -EPROTO; | ||
112 | } | ||
113 | return layr->up->receive(layr->up, pkt); | ||
114 | } | ||
115 | |||
116 | static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt) | ||
117 | { | ||
118 | int tmp; | ||
119 | u16 chks; | ||
120 | u16 len; | ||
121 | int ret; | ||
122 | struct cffrml *this = container_obj(layr); | ||
123 | if (this->dofcs) { | ||
124 | chks = cfpkt_iterate(pkt, cffrml_checksum, 0xffff); | ||
125 | tmp = cpu_to_le16(chks); | ||
126 | cfpkt_add_trail(pkt, &tmp, 2); | ||
127 | } else { | ||
128 | cfpkt_pad_trail(pkt, 2); | ||
129 | } | ||
130 | len = cfpkt_getlen(pkt); | ||
131 | tmp = cpu_to_le16(len); | ||
132 | cfpkt_add_head(pkt, &tmp, 2); | ||
133 | cfpkt_info(pkt)->hdr_len += 2; | ||
134 | if (cfpkt_erroneous(pkt)) { | ||
135 | pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); | ||
136 | return -EPROTO; | ||
137 | } | ||
138 | ret = layr->dn->transmit(layr->dn, pkt); | ||
139 | if (ret < 0) { | ||
140 | /* Remove header on faulty packet. */ | ||
141 | cfpkt_extr_head(pkt, &tmp, 2); | ||
142 | } | ||
143 | return ret; | ||
144 | } | ||
145 | |||
146 | static void cffrml_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, | ||
147 | int phyid) | ||
148 | { | ||
149 | if (layr->up->ctrlcmd) | ||
150 | layr->up->ctrlcmd(layr->up, ctrl, layr->id); | ||
151 | } | ||
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c new file mode 100644 index 000000000000..7372f27f1d32 --- /dev/null +++ b/net/caif/cfmuxl.c | |||
@@ -0,0 +1,251 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson AB 2010 | ||
3 | * Author: Sjur Brendeland/sjur.brandeland@stericsson.com | ||
4 | * License terms: GNU General Public License (GPL) version 2 | ||
5 | */ | ||
6 | #include <linux/stddef.h> | ||
7 | #include <linux/spinlock.h> | ||
8 | #include <linux/slab.h> | ||
9 | #include <net/caif/cfpkt.h> | ||
10 | #include <net/caif/cfmuxl.h> | ||
11 | #include <net/caif/cfsrvl.h> | ||
12 | #include <net/caif/cffrml.h> | ||
13 | |||
14 | #define container_obj(layr) container_of(layr, struct cfmuxl, layer) | ||
15 | |||
16 | #define CAIF_CTRL_CHANNEL 0 | ||
17 | #define UP_CACHE_SIZE 8 | ||
18 | #define DN_CACHE_SIZE 8 | ||
19 | |||
20 | struct cfmuxl { | ||
21 | struct cflayer layer; | ||
22 | struct list_head srvl_list; | ||
23 | struct list_head frml_list; | ||
24 | struct cflayer *up_cache[UP_CACHE_SIZE]; | ||
25 | struct cflayer *dn_cache[DN_CACHE_SIZE]; | ||
26 | /* | ||
27 | * Set when inserting or removing downwards layers. | ||
28 | */ | ||
29 | spinlock_t transmit_lock; | ||
30 | |||
31 | /* | ||
32 | * Set when inserting or removing upwards layers. | ||
33 | */ | ||
34 | spinlock_t receive_lock; | ||
35 | |||
36 | }; | ||
37 | |||
38 | static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt); | ||
39 | static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt); | ||
40 | static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, | ||
41 | int phyid); | ||
42 | static struct cflayer *get_up(struct cfmuxl *muxl, u16 id); | ||
43 | |||
44 | struct cflayer *cfmuxl_create(void) | ||
45 | { | ||
46 | struct cfmuxl *this = kmalloc(sizeof(struct cfmuxl), GFP_ATOMIC); | ||
47 | if (!this) | ||
48 | return NULL; | ||
49 | memset(this, 0, sizeof(*this)); | ||
50 | this->layer.receive = cfmuxl_receive; | ||
51 | this->layer.transmit = cfmuxl_transmit; | ||
52 | this->layer.ctrlcmd = cfmuxl_ctrlcmd; | ||
53 | INIT_LIST_HEAD(&this->srvl_list); | ||
54 | INIT_LIST_HEAD(&this->frml_list); | ||
55 | spin_lock_init(&this->transmit_lock); | ||
56 | spin_lock_init(&this->receive_lock); | ||
57 | snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "mux"); | ||
58 | return &this->layer; | ||
59 | } | ||
60 | |||
61 | int cfmuxl_set_uplayer(struct cflayer *layr, struct cflayer *up, u8 linkid) | ||
62 | { | ||
63 | struct cfmuxl *muxl = container_obj(layr); | ||
64 | spin_lock(&muxl->receive_lock); | ||
65 | cfsrvl_get(up); | ||
66 | list_add(&up->node, &muxl->srvl_list); | ||
67 | spin_unlock(&muxl->receive_lock); | ||
68 | return 0; | ||
69 | } | ||
70 | |||
71 | bool cfmuxl_is_phy_inuse(struct cflayer *layr, u8 phyid) | ||
72 | { | ||
73 | struct list_head *node; | ||
74 | struct cflayer *layer; | ||
75 | struct cfmuxl *muxl = container_obj(layr); | ||
76 | bool match = false; | ||
77 | spin_lock(&muxl->receive_lock); | ||
78 | |||
79 | list_for_each(node, &muxl->srvl_list) { | ||
80 | layer = list_entry(node, struct cflayer, node); | ||
81 | if (cfsrvl_phyid_match(layer, phyid)) { | ||
82 | match = true; | ||
83 | break; | ||
84 | } | ||
85 | |||
86 | } | ||
87 | spin_unlock(&muxl->receive_lock); | ||
88 | return match; | ||
89 | } | ||
90 | |||
91 | u8 cfmuxl_get_phyid(struct cflayer *layr, u8 channel_id) | ||
92 | { | ||
93 | struct cflayer *up; | ||
94 | int phyid; | ||
95 | struct cfmuxl *muxl = container_obj(layr); | ||
96 | spin_lock(&muxl->receive_lock); | ||
97 | up = get_up(muxl, channel_id); | ||
98 | if (up != NULL) | ||
99 | phyid = cfsrvl_getphyid(up); | ||
100 | else | ||
101 | phyid = 0; | ||
102 | spin_unlock(&muxl->receive_lock); | ||
103 | return phyid; | ||
104 | } | ||
105 | |||
106 | int cfmuxl_set_dnlayer(struct cflayer *layr, struct cflayer *dn, u8 phyid) | ||
107 | { | ||
108 | struct cfmuxl *muxl = (struct cfmuxl *) layr; | ||
109 | spin_lock(&muxl->transmit_lock); | ||
110 | list_add(&dn->node, &muxl->frml_list); | ||
111 | spin_unlock(&muxl->transmit_lock); | ||
112 | return 0; | ||
113 | } | ||
114 | |||
115 | static struct cflayer *get_from_id(struct list_head *list, u16 id) | ||
116 | { | ||
117 | struct list_head *node; | ||
118 | struct cflayer *layer; | ||
119 | list_for_each(node, list) { | ||
120 | layer = list_entry(node, struct cflayer, node); | ||
121 | if (layer->id == id) | ||
122 | return layer; | ||
123 | } | ||
124 | return NULL; | ||
125 | } | ||
126 | |||
127 | struct cflayer *cfmuxl_remove_dnlayer(struct cflayer *layr, u8 phyid) | ||
128 | { | ||
129 | struct cfmuxl *muxl = container_obj(layr); | ||
130 | struct cflayer *dn; | ||
131 | spin_lock(&muxl->transmit_lock); | ||
132 | memset(muxl->dn_cache, 0, sizeof(muxl->dn_cache)); | ||
133 | dn = get_from_id(&muxl->frml_list, phyid); | ||
134 | if (dn == NULL) { | ||
135 | spin_unlock(&muxl->transmit_lock); | ||
136 | return NULL; | ||
137 | } | ||
138 | list_del(&dn->node); | ||
139 | caif_assert(dn != NULL); | ||
140 | spin_unlock(&muxl->transmit_lock); | ||
141 | return dn; | ||
142 | } | ||
143 | |||
144 | /* Invariant: lock is taken */ | ||
145 | static struct cflayer *get_up(struct cfmuxl *muxl, u16 id) | ||
146 | { | ||
147 | struct cflayer *up; | ||
148 | int idx = id % UP_CACHE_SIZE; | ||
149 | up = muxl->up_cache[idx]; | ||
150 | if (up == NULL || up->id != id) { | ||
151 | up = get_from_id(&muxl->srvl_list, id); | ||
152 | muxl->up_cache[idx] = up; | ||
153 | } | ||
154 | return up; | ||
155 | } | ||
156 | |||
157 | /* Invariant: lock is taken */ | ||
158 | static struct cflayer *get_dn(struct cfmuxl *muxl, struct dev_info *dev_info) | ||
159 | { | ||
160 | struct cflayer *dn; | ||
161 | int idx = dev_info->id % DN_CACHE_SIZE; | ||
162 | dn = muxl->dn_cache[idx]; | ||
163 | if (dn == NULL || dn->id != dev_info->id) { | ||
164 | dn = get_from_id(&muxl->frml_list, dev_info->id); | ||
165 | muxl->dn_cache[idx] = dn; | ||
166 | } | ||
167 | return dn; | ||
168 | } | ||
169 | |||
170 | struct cflayer *cfmuxl_remove_uplayer(struct cflayer *layr, u8 id) | ||
171 | { | ||
172 | struct cflayer *up; | ||
173 | struct cfmuxl *muxl = container_obj(layr); | ||
174 | spin_lock(&muxl->receive_lock); | ||
175 | up = get_up(muxl, id); | ||
176 | if (up == NULL) | ||
177 | return NULL; | ||
178 | memset(muxl->up_cache, 0, sizeof(muxl->up_cache)); | ||
179 | list_del(&up->node); | ||
180 | cfsrvl_put(up); | ||
181 | spin_unlock(&muxl->receive_lock); | ||
182 | return up; | ||
183 | } | ||
184 | |||
185 | static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt) | ||
186 | { | ||
187 | int ret; | ||
188 | struct cfmuxl *muxl = container_obj(layr); | ||
189 | u8 id; | ||
190 | struct cflayer *up; | ||
191 | if (cfpkt_extr_head(pkt, &id, 1) < 0) { | ||
192 | pr_err("CAIF: %s(): erroneous Caif Packet\n", __func__); | ||
193 | cfpkt_destroy(pkt); | ||
194 | return -EPROTO; | ||
195 | } | ||
196 | |||
197 | spin_lock(&muxl->receive_lock); | ||
198 | up = get_up(muxl, id); | ||
199 | spin_unlock(&muxl->receive_lock); | ||
200 | if (up == NULL) { | ||
201 | pr_info("CAIF: %s():Received data on unknown link ID = %d " | ||
202 | "(0x%x) up == NULL", __func__, id, id); | ||
203 | cfpkt_destroy(pkt); | ||
204 | /* | ||
205 | * Don't return ERROR, since modem misbehaves and sends out | ||
206 | * flow on before linksetup response. | ||
207 | */ | ||
208 | return /* CFGLU_EPROT; */ 0; | ||
209 | } | ||
210 | cfsrvl_get(up); | ||
211 | ret = up->receive(up, pkt); | ||
212 | cfsrvl_put(up); | ||
213 | return ret; | ||
214 | } | ||
215 | |||
216 | static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt) | ||
217 | { | ||
218 | int ret; | ||
219 | struct cfmuxl *muxl = container_obj(layr); | ||
220 | u8 linkid; | ||
221 | struct cflayer *dn; | ||
222 | struct caif_payload_info *info = cfpkt_info(pkt); | ||
223 | dn = get_dn(muxl, cfpkt_info(pkt)->dev_info); | ||
224 | if (dn == NULL) { | ||
225 | pr_warning("CAIF: %s(): Send data on unknown phy " | ||
226 | "ID = %d (0x%x)\n", | ||
227 | __func__, info->dev_info->id, info->dev_info->id); | ||
228 | return -ENOTCONN; | ||
229 | } | ||
230 | info->hdr_len += 1; | ||
231 | linkid = info->channel_id; | ||
232 | cfpkt_add_head(pkt, &linkid, 1); | ||
233 | ret = dn->transmit(dn, pkt); | ||
234 | /* Remove MUX protocol header upon error. */ | ||
235 | if (ret < 0) | ||
236 | cfpkt_extr_head(pkt, &linkid, 1); | ||
237 | return ret; | ||
238 | } | ||
239 | |||
240 | static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, | ||
241 | int phyid) | ||
242 | { | ||
243 | struct cfmuxl *muxl = container_obj(layr); | ||
244 | struct list_head *node; | ||
245 | struct cflayer *layer; | ||
246 | list_for_each(node, &muxl->srvl_list) { | ||
247 | layer = list_entry(node, struct cflayer, node); | ||
248 | if (cfsrvl_phyid_match(layer, phyid)) | ||
249 | layer->ctrlcmd(layer, ctrl, phyid); | ||
250 | } | ||
251 | } | ||
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c new file mode 100644 index 000000000000..83fff2ff6658 --- /dev/null +++ b/net/caif/cfpkt_skbuff.c | |||
@@ -0,0 +1,571 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson AB 2010 | ||
3 | * Author: Sjur Brendeland/sjur.brandeland@stericsson.com | ||
4 | * License terms: GNU General Public License (GPL) version 2 | ||
5 | */ | ||
6 | |||
7 | #include <linux/string.h> | ||
8 | #include <linux/skbuff.h> | ||
9 | #include <linux/hardirq.h> | ||
10 | #include <net/caif/cfpkt.h> | ||
11 | |||
12 | #define PKT_PREFIX CAIF_NEEDED_HEADROOM | ||
13 | #define PKT_POSTFIX CAIF_NEEDED_TAILROOM | ||
14 | #define PKT_LEN_WHEN_EXTENDING 128 | ||
15 | #define PKT_ERROR(pkt, errmsg) do { \ | ||
16 | cfpkt_priv(pkt)->erronous = true; \ | ||
17 | skb_reset_tail_pointer(&pkt->skb); \ | ||
18 | pr_warning("CAIF: " errmsg);\ | ||
19 | } while (0) | ||
20 | |||
21 | struct cfpktq { | ||
22 | struct sk_buff_head head; | ||
23 | atomic_t count; | ||
24 | /* Lock protects count updates */ | ||
25 | spinlock_t lock; | ||
26 | }; | ||
27 | |||
28 | /* | ||
29 | * net/caif/ is generic and does not | ||
30 | * understand SKB, so we do this typecast | ||
31 | */ | ||
32 | struct cfpkt { | ||
33 | struct sk_buff skb; | ||
34 | }; | ||
35 | |||
36 | /* Private data inside SKB */ | ||
37 | struct cfpkt_priv_data { | ||
38 | struct dev_info dev_info; | ||
39 | bool erronous; | ||
40 | }; | ||
41 | |||
42 | inline struct cfpkt_priv_data *cfpkt_priv(struct cfpkt *pkt) | ||
43 | { | ||
44 | return (struct cfpkt_priv_data *) pkt->skb.cb; | ||
45 | } | ||
46 | |||
47 | inline bool is_erronous(struct cfpkt *pkt) | ||
48 | { | ||
49 | return cfpkt_priv(pkt)->erronous; | ||
50 | } | ||
51 | |||
52 | inline struct sk_buff *pkt_to_skb(struct cfpkt *pkt) | ||
53 | { | ||
54 | return &pkt->skb; | ||
55 | } | ||
56 | |||
57 | inline struct cfpkt *skb_to_pkt(struct sk_buff *skb) | ||
58 | { | ||
59 | return (struct cfpkt *) skb; | ||
60 | } | ||
61 | |||
62 | |||
63 | struct cfpkt *cfpkt_fromnative(enum caif_direction dir, void *nativepkt) | ||
64 | { | ||
65 | struct cfpkt *pkt = skb_to_pkt(nativepkt); | ||
66 | cfpkt_priv(pkt)->erronous = false; | ||
67 | return pkt; | ||
68 | } | ||
69 | EXPORT_SYMBOL(cfpkt_fromnative); | ||
70 | |||
71 | void *cfpkt_tonative(struct cfpkt *pkt) | ||
72 | { | ||
73 | return (void *) pkt; | ||
74 | } | ||
75 | EXPORT_SYMBOL(cfpkt_tonative); | ||
76 | |||
77 | static struct cfpkt *cfpkt_create_pfx(u16 len, u16 pfx) | ||
78 | { | ||
79 | struct sk_buff *skb; | ||
80 | |||
81 | if (likely(in_interrupt())) | ||
82 | skb = alloc_skb(len + pfx, GFP_ATOMIC); | ||
83 | else | ||
84 | skb = alloc_skb(len + pfx, GFP_KERNEL); | ||
85 | |||
86 | if (unlikely(skb == NULL)) | ||
87 | return NULL; | ||
88 | |||
89 | skb_reserve(skb, pfx); | ||
90 | return skb_to_pkt(skb); | ||
91 | } | ||
92 | |||
93 | inline struct cfpkt *cfpkt_create(u16 len) | ||
94 | { | ||
95 | return cfpkt_create_pfx(len + PKT_POSTFIX, PKT_PREFIX); | ||
96 | } | ||
97 | EXPORT_SYMBOL(cfpkt_create); | ||
98 | |||
99 | void cfpkt_destroy(struct cfpkt *pkt) | ||
100 | { | ||
101 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
102 | kfree_skb(skb); | ||
103 | } | ||
104 | EXPORT_SYMBOL(cfpkt_destroy); | ||
105 | |||
106 | inline bool cfpkt_more(struct cfpkt *pkt) | ||
107 | { | ||
108 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
109 | return skb->len > 0; | ||
110 | } | ||
111 | EXPORT_SYMBOL(cfpkt_more); | ||
112 | |||
113 | int cfpkt_peek_head(struct cfpkt *pkt, void *data, u16 len) | ||
114 | { | ||
115 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
116 | if (skb_headlen(skb) >= len) { | ||
117 | memcpy(data, skb->data, len); | ||
118 | return 0; | ||
119 | } | ||
120 | return !cfpkt_extr_head(pkt, data, len) && | ||
121 | !cfpkt_add_head(pkt, data, len); | ||
122 | } | ||
123 | EXPORT_SYMBOL(cfpkt_peek_head); | ||
124 | |||
125 | int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len) | ||
126 | { | ||
127 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
128 | u8 *from; | ||
129 | if (unlikely(is_erronous(pkt))) | ||
130 | return -EPROTO; | ||
131 | |||
132 | if (unlikely(len > skb->len)) { | ||
133 | PKT_ERROR(pkt, "cfpkt_extr_head read beyond end of packet\n"); | ||
134 | return -EPROTO; | ||
135 | } | ||
136 | |||
137 | if (unlikely(len > skb_headlen(skb))) { | ||
138 | if (unlikely(skb_linearize(skb) != 0)) { | ||
139 | PKT_ERROR(pkt, "cfpkt_extr_head linearize failed\n"); | ||
140 | return -EPROTO; | ||
141 | } | ||
142 | } | ||
143 | from = skb_pull(skb, len); | ||
144 | from -= len; | ||
145 | memcpy(data, from, len); | ||
146 | return 0; | ||
147 | } | ||
148 | EXPORT_SYMBOL(cfpkt_extr_head); | ||
149 | |||
150 | int cfpkt_extr_trail(struct cfpkt *pkt, void *dta, u16 len) | ||
151 | { | ||
152 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
153 | u8 *data = dta; | ||
154 | u8 *from; | ||
155 | if (unlikely(is_erronous(pkt))) | ||
156 | return -EPROTO; | ||
157 | |||
158 | if (unlikely(skb_linearize(skb) != 0)) { | ||
159 | PKT_ERROR(pkt, "cfpkt_extr_trail linearize failed\n"); | ||
160 | return -EPROTO; | ||
161 | } | ||
162 | if (unlikely(skb->data + len > skb_tail_pointer(skb))) { | ||
163 | PKT_ERROR(pkt, "cfpkt_extr_trail read beyond end of packet\n"); | ||
164 | return -EPROTO; | ||
165 | } | ||
166 | from = skb_tail_pointer(skb) - len; | ||
167 | skb_trim(skb, skb->len - len); | ||
168 | memcpy(data, from, len); | ||
169 | return 0; | ||
170 | } | ||
171 | EXPORT_SYMBOL(cfpkt_extr_trail); | ||
172 | |||
173 | int cfpkt_pad_trail(struct cfpkt *pkt, u16 len) | ||
174 | { | ||
175 | return cfpkt_add_body(pkt, NULL, len); | ||
176 | } | ||
177 | EXPORT_SYMBOL(cfpkt_pad_trail); | ||
178 | |||
179 | int cfpkt_add_body(struct cfpkt *pkt, const void *data, u16 len) | ||
180 | { | ||
181 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
182 | struct sk_buff *lastskb; | ||
183 | u8 *to; | ||
184 | u16 addlen = 0; | ||
185 | |||
186 | |||
187 | if (unlikely(is_erronous(pkt))) | ||
188 | return -EPROTO; | ||
189 | |||
190 | lastskb = skb; | ||
191 | |||
192 | /* Check whether we need to add space at the tail */ | ||
193 | if (unlikely(skb_tailroom(skb) < len)) { | ||
194 | if (likely(len < PKT_LEN_WHEN_EXTENDING)) | ||
195 | addlen = PKT_LEN_WHEN_EXTENDING; | ||
196 | else | ||
197 | addlen = len; | ||
198 | } | ||
199 | |||
200 | /* Check whether we need to change the SKB before writing to the tail */ | ||
201 | if (unlikely((addlen > 0) || skb_cloned(skb) || skb_shared(skb))) { | ||
202 | |||
203 | /* Make sure data is writable */ | ||
204 | if (unlikely(skb_cow_data(skb, addlen, &lastskb) < 0)) { | ||
205 | PKT_ERROR(pkt, "cfpkt_add_body: cow failed\n"); | ||
206 | return -EPROTO; | ||
207 | } | ||
208 | /* | ||
209 | * Is the SKB non-linear after skb_cow_data()? If so, we are | ||
210 | * going to add data to the last SKB, so we need to adjust | ||
211 | * lengths of the top SKB. | ||
212 | */ | ||
213 | if (lastskb != skb) { | ||
214 | pr_warning("CAIF: %s(): Packet is non-linear\n", | ||
215 | __func__); | ||
216 | skb->len += len; | ||
217 | skb->data_len += len; | ||
218 | } | ||
219 | } | ||
220 | |||
221 | /* All set to put the last SKB and optionally write data there. */ | ||
222 | to = skb_put(lastskb, len); | ||
223 | if (likely(data)) | ||
224 | memcpy(to, data, len); | ||
225 | return 0; | ||
226 | } | ||
227 | EXPORT_SYMBOL(cfpkt_add_body); | ||
228 | |||
229 | inline int cfpkt_addbdy(struct cfpkt *pkt, u8 data) | ||
230 | { | ||
231 | return cfpkt_add_body(pkt, &data, 1); | ||
232 | } | ||
233 | EXPORT_SYMBOL(cfpkt_addbdy); | ||
234 | |||
235 | int cfpkt_add_head(struct cfpkt *pkt, const void *data2, u16 len) | ||
236 | { | ||
237 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
238 | struct sk_buff *lastskb; | ||
239 | u8 *to; | ||
240 | const u8 *data = data2; | ||
241 | if (unlikely(is_erronous(pkt))) | ||
242 | return -EPROTO; | ||
243 | if (unlikely(skb_headroom(skb) < len)) { | ||
244 | PKT_ERROR(pkt, "cfpkt_add_head: no headroom\n"); | ||
245 | return -EPROTO; | ||
246 | } | ||
247 | |||
248 | /* Make sure data is writable */ | ||
249 | if (unlikely(skb_cow_data(skb, 0, &lastskb) < 0)) { | ||
250 | PKT_ERROR(pkt, "cfpkt_add_head: cow failed\n"); | ||
251 | return -EPROTO; | ||
252 | } | ||
253 | |||
254 | to = skb_push(skb, len); | ||
255 | memcpy(to, data, len); | ||
256 | return 0; | ||
257 | } | ||
258 | EXPORT_SYMBOL(cfpkt_add_head); | ||
259 | |||
260 | inline int cfpkt_add_trail(struct cfpkt *pkt, const void *data, u16 len) | ||
261 | { | ||
262 | return cfpkt_add_body(pkt, data, len); | ||
263 | } | ||
264 | EXPORT_SYMBOL(cfpkt_add_trail); | ||
265 | |||
266 | inline u16 cfpkt_getlen(struct cfpkt *pkt) | ||
267 | { | ||
268 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
269 | return skb->len; | ||
270 | } | ||
271 | EXPORT_SYMBOL(cfpkt_getlen); | ||
272 | |||
273 | inline u16 cfpkt_iterate(struct cfpkt *pkt, | ||
274 | u16 (*iter_func)(u16, void *, u16), | ||
275 | u16 data) | ||
276 | { | ||
277 | /* | ||
278 | * Don't care about the performance hit of linearizing, | ||
279 | * Checksum should not be used on high-speed interfaces anyway. | ||
280 | */ | ||
281 | if (unlikely(is_erronous(pkt))) | ||
282 | return -EPROTO; | ||
283 | if (unlikely(skb_linearize(&pkt->skb) != 0)) { | ||
284 | PKT_ERROR(pkt, "cfpkt_iterate: linearize failed\n"); | ||
285 | return -EPROTO; | ||
286 | } | ||
287 | return iter_func(data, pkt->skb.data, cfpkt_getlen(pkt)); | ||
288 | } | ||
289 | EXPORT_SYMBOL(cfpkt_iterate); | ||
290 | |||
291 | int cfpkt_setlen(struct cfpkt *pkt, u16 len) | ||
292 | { | ||
293 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
294 | |||
295 | |||
296 | if (unlikely(is_erronous(pkt))) | ||
297 | return -EPROTO; | ||
298 | |||
299 | if (likely(len <= skb->len)) { | ||
300 | if (unlikely(skb->data_len)) | ||
301 | ___pskb_trim(skb, len); | ||
302 | else | ||
303 | skb_trim(skb, len); | ||
304 | |||
305 | return cfpkt_getlen(pkt); | ||
306 | } | ||
307 | |||
308 | /* Need to expand SKB */ | ||
309 | if (unlikely(!cfpkt_pad_trail(pkt, len - skb->len))) | ||
310 | PKT_ERROR(pkt, "cfpkt_setlen: skb_pad_trail failed\n"); | ||
311 | |||
312 | return cfpkt_getlen(pkt); | ||
313 | } | ||
314 | EXPORT_SYMBOL(cfpkt_setlen); | ||
315 | |||
316 | struct cfpkt *cfpkt_create_uplink(const unsigned char *data, unsigned int len) | ||
317 | { | ||
318 | struct cfpkt *pkt = cfpkt_create_pfx(len + PKT_POSTFIX, PKT_PREFIX); | ||
319 | if (unlikely(data != NULL)) | ||
320 | cfpkt_add_body(pkt, data, len); | ||
321 | return pkt; | ||
322 | } | ||
323 | EXPORT_SYMBOL(cfpkt_create_uplink); | ||
324 | |||
325 | struct cfpkt *cfpkt_append(struct cfpkt *dstpkt, | ||
326 | struct cfpkt *addpkt, | ||
327 | u16 expectlen) | ||
328 | { | ||
329 | struct sk_buff *dst = pkt_to_skb(dstpkt); | ||
330 | struct sk_buff *add = pkt_to_skb(addpkt); | ||
331 | u16 addlen = skb_headlen(add); | ||
332 | u16 neededtailspace; | ||
333 | struct sk_buff *tmp; | ||
334 | u16 dstlen; | ||
335 | u16 createlen; | ||
336 | if (unlikely(is_erronous(dstpkt) || is_erronous(addpkt))) { | ||
337 | cfpkt_destroy(addpkt); | ||
338 | return dstpkt; | ||
339 | } | ||
340 | if (expectlen > addlen) | ||
341 | neededtailspace = expectlen; | ||
342 | else | ||
343 | neededtailspace = addlen; | ||
344 | |||
345 | if (dst->tail + neededtailspace > dst->end) { | ||
346 | /* Create a dumplicate of 'dst' with more tail space */ | ||
347 | dstlen = skb_headlen(dst); | ||
348 | createlen = dstlen + neededtailspace; | ||
349 | tmp = pkt_to_skb( | ||
350 | cfpkt_create(createlen + PKT_PREFIX + PKT_POSTFIX)); | ||
351 | if (!tmp) | ||
352 | return NULL; | ||
353 | skb_set_tail_pointer(tmp, dstlen); | ||
354 | tmp->len = dstlen; | ||
355 | memcpy(tmp->data, dst->data, dstlen); | ||
356 | cfpkt_destroy(dstpkt); | ||
357 | dst = tmp; | ||
358 | } | ||
359 | memcpy(skb_tail_pointer(dst), add->data, skb_headlen(add)); | ||
360 | cfpkt_destroy(addpkt); | ||
361 | dst->tail += addlen; | ||
362 | dst->len += addlen; | ||
363 | return skb_to_pkt(dst); | ||
364 | } | ||
365 | EXPORT_SYMBOL(cfpkt_append); | ||
366 | |||
367 | struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos) | ||
368 | { | ||
369 | struct sk_buff *skb2; | ||
370 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
371 | u8 *split = skb->data + pos; | ||
372 | u16 len2nd = skb_tail_pointer(skb) - split; | ||
373 | |||
374 | if (unlikely(is_erronous(pkt))) | ||
375 | return NULL; | ||
376 | |||
377 | if (skb->data + pos > skb_tail_pointer(skb)) { | ||
378 | PKT_ERROR(pkt, | ||
379 | "cfpkt_split: trying to split beyond end of packet"); | ||
380 | return NULL; | ||
381 | } | ||
382 | |||
383 | /* Create a new packet for the second part of the data */ | ||
384 | skb2 = pkt_to_skb( | ||
385 | cfpkt_create_pfx(len2nd + PKT_PREFIX + PKT_POSTFIX, | ||
386 | PKT_PREFIX)); | ||
387 | |||
388 | if (skb2 == NULL) | ||
389 | return NULL; | ||
390 | |||
391 | /* Reduce the length of the original packet */ | ||
392 | skb_set_tail_pointer(skb, pos); | ||
393 | skb->len = pos; | ||
394 | |||
395 | memcpy(skb2->data, split, len2nd); | ||
396 | skb2->tail += len2nd; | ||
397 | skb2->len += len2nd; | ||
398 | return skb_to_pkt(skb2); | ||
399 | } | ||
400 | EXPORT_SYMBOL(cfpkt_split); | ||
401 | |||
402 | char *cfpkt_log_pkt(struct cfpkt *pkt, char *buf, int buflen) | ||
403 | { | ||
404 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
405 | char *p = buf; | ||
406 | int i; | ||
407 | |||
408 | /* | ||
409 | * Sanity check buffer length, it needs to be at least as large as | ||
410 | * the header info: ~=50+ bytes | ||
411 | */ | ||
412 | if (buflen < 50) | ||
413 | return NULL; | ||
414 | |||
415 | snprintf(buf, buflen, "%s: pkt:%p len:%ld(%ld+%ld) {%ld,%ld} data: [", | ||
416 | is_erronous(pkt) ? "ERRONOUS-SKB" : | ||
417 | (skb->data_len != 0 ? "COMPLEX-SKB" : "SKB"), | ||
418 | skb, | ||
419 | (long) skb->len, | ||
420 | (long) (skb_tail_pointer(skb) - skb->data), | ||
421 | (long) skb->data_len, | ||
422 | (long) (skb->data - skb->head), | ||
423 | (long) (skb_tail_pointer(skb) - skb->head)); | ||
424 | p = buf + strlen(buf); | ||
425 | |||
426 | for (i = 0; i < skb_tail_pointer(skb) - skb->data && i < 300; i++) { | ||
427 | if (p > buf + buflen - 10) { | ||
428 | sprintf(p, "..."); | ||
429 | p = buf + strlen(buf); | ||
430 | break; | ||
431 | } | ||
432 | sprintf(p, "%02x,", skb->data[i]); | ||
433 | p = buf + strlen(buf); | ||
434 | } | ||
435 | sprintf(p, "]\n"); | ||
436 | return buf; | ||
437 | } | ||
438 | EXPORT_SYMBOL(cfpkt_log_pkt); | ||
439 | |||
440 | int cfpkt_raw_append(struct cfpkt *pkt, void **buf, unsigned int buflen) | ||
441 | { | ||
442 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
443 | struct sk_buff *lastskb; | ||
444 | |||
445 | caif_assert(buf != NULL); | ||
446 | if (unlikely(is_erronous(pkt))) | ||
447 | return -EPROTO; | ||
448 | /* Make sure SKB is writable */ | ||
449 | if (unlikely(skb_cow_data(skb, 0, &lastskb) < 0)) { | ||
450 | PKT_ERROR(pkt, "cfpkt_raw_append: skb_cow_data failed\n"); | ||
451 | return -EPROTO; | ||
452 | } | ||
453 | |||
454 | if (unlikely(skb_linearize(skb) != 0)) { | ||
455 | PKT_ERROR(pkt, "cfpkt_raw_append: linearize failed\n"); | ||
456 | return -EPROTO; | ||
457 | } | ||
458 | |||
459 | if (unlikely(skb_tailroom(skb) < buflen)) { | ||
460 | PKT_ERROR(pkt, "cfpkt_raw_append: buffer too short - failed\n"); | ||
461 | return -EPROTO; | ||
462 | } | ||
463 | |||
464 | *buf = skb_put(skb, buflen); | ||
465 | return 1; | ||
466 | } | ||
467 | EXPORT_SYMBOL(cfpkt_raw_append); | ||
468 | |||
469 | int cfpkt_raw_extract(struct cfpkt *pkt, void **buf, unsigned int buflen) | ||
470 | { | ||
471 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
472 | |||
473 | caif_assert(buf != NULL); | ||
474 | if (unlikely(is_erronous(pkt))) | ||
475 | return -EPROTO; | ||
476 | |||
477 | if (unlikely(buflen > skb->len)) { | ||
478 | PKT_ERROR(pkt, "cfpkt_raw_extract: buflen too large " | ||
479 | "- failed\n"); | ||
480 | return -EPROTO; | ||
481 | } | ||
482 | |||
483 | if (unlikely(buflen > skb_headlen(skb))) { | ||
484 | if (unlikely(skb_linearize(skb) != 0)) { | ||
485 | PKT_ERROR(pkt, "cfpkt_raw_extract: linearize failed\n"); | ||
486 | return -EPROTO; | ||
487 | } | ||
488 | } | ||
489 | |||
490 | *buf = skb->data; | ||
491 | skb_pull(skb, buflen); | ||
492 | |||
493 | return 1; | ||
494 | } | ||
495 | EXPORT_SYMBOL(cfpkt_raw_extract); | ||
496 | |||
497 | inline bool cfpkt_erroneous(struct cfpkt *pkt) | ||
498 | { | ||
499 | return cfpkt_priv(pkt)->erronous; | ||
500 | } | ||
501 | EXPORT_SYMBOL(cfpkt_erroneous); | ||
502 | |||
503 | struct cfpktq *cfpktq_create(void) | ||
504 | { | ||
505 | struct cfpktq *q = kmalloc(sizeof(struct cfpktq), GFP_ATOMIC); | ||
506 | if (!q) | ||
507 | return NULL; | ||
508 | skb_queue_head_init(&q->head); | ||
509 | atomic_set(&q->count, 0); | ||
510 | spin_lock_init(&q->lock); | ||
511 | return q; | ||
512 | } | ||
513 | EXPORT_SYMBOL(cfpktq_create); | ||
514 | |||
515 | void cfpkt_queue(struct cfpktq *pktq, struct cfpkt *pkt, unsigned short prio) | ||
516 | { | ||
517 | atomic_inc(&pktq->count); | ||
518 | spin_lock(&pktq->lock); | ||
519 | skb_queue_tail(&pktq->head, pkt_to_skb(pkt)); | ||
520 | spin_unlock(&pktq->lock); | ||
521 | |||
522 | } | ||
523 | EXPORT_SYMBOL(cfpkt_queue); | ||
524 | |||
525 | struct cfpkt *cfpkt_qpeek(struct cfpktq *pktq) | ||
526 | { | ||
527 | struct cfpkt *tmp; | ||
528 | spin_lock(&pktq->lock); | ||
529 | tmp = skb_to_pkt(skb_peek(&pktq->head)); | ||
530 | spin_unlock(&pktq->lock); | ||
531 | return tmp; | ||
532 | } | ||
533 | EXPORT_SYMBOL(cfpkt_qpeek); | ||
534 | |||
535 | struct cfpkt *cfpkt_dequeue(struct cfpktq *pktq) | ||
536 | { | ||
537 | struct cfpkt *pkt; | ||
538 | spin_lock(&pktq->lock); | ||
539 | pkt = skb_to_pkt(skb_dequeue(&pktq->head)); | ||
540 | if (pkt) { | ||
541 | atomic_dec(&pktq->count); | ||
542 | caif_assert(atomic_read(&pktq->count) >= 0); | ||
543 | } | ||
544 | spin_unlock(&pktq->lock); | ||
545 | return pkt; | ||
546 | } | ||
547 | EXPORT_SYMBOL(cfpkt_dequeue); | ||
548 | |||
549 | int cfpkt_qcount(struct cfpktq *pktq) | ||
550 | { | ||
551 | return atomic_read(&pktq->count); | ||
552 | } | ||
553 | EXPORT_SYMBOL(cfpkt_qcount); | ||
554 | |||
555 | struct cfpkt *cfpkt_clone_release(struct cfpkt *pkt) | ||
556 | { | ||
557 | struct cfpkt *clone; | ||
558 | clone = skb_to_pkt(skb_clone(pkt_to_skb(pkt), GFP_ATOMIC)); | ||
559 | /* Free original packet. */ | ||
560 | cfpkt_destroy(pkt); | ||
561 | if (!clone) | ||
562 | return NULL; | ||
563 | return clone; | ||
564 | } | ||
565 | EXPORT_SYMBOL(cfpkt_clone_release); | ||
566 | |||
567 | struct caif_payload_info *cfpkt_info(struct cfpkt *pkt) | ||
568 | { | ||
569 | return (struct caif_payload_info *)&pkt_to_skb(pkt)->cb; | ||
570 | } | ||
571 | EXPORT_SYMBOL(cfpkt_info); | ||
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c new file mode 100644 index 000000000000..cd2830fec935 --- /dev/null +++ b/net/caif/cfrfml.c | |||
@@ -0,0 +1,108 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson AB 2010 | ||
3 | * Author: Sjur Brendeland/sjur.brandeland@stericsson.com | ||
4 | * License terms: GNU General Public License (GPL) version 2 | ||
5 | */ | ||
6 | |||
7 | #include <linux/stddef.h> | ||
8 | #include <linux/spinlock.h> | ||
9 | #include <linux/slab.h> | ||
10 | #include <net/caif/caif_layer.h> | ||
11 | #include <net/caif/cfsrvl.h> | ||
12 | #include <net/caif/cfpkt.h> | ||
13 | |||
14 | #define container_obj(layr) container_of(layr, struct cfsrvl, layer) | ||
15 | |||
16 | #define RFM_SEGMENTATION_BIT 0x01 | ||
17 | #define RFM_PAYLOAD 0x00 | ||
18 | #define RFM_CMD_BIT 0x80 | ||
19 | #define RFM_FLOW_OFF 0x81 | ||
20 | #define RFM_FLOW_ON 0x80 | ||
21 | #define RFM_SET_PIN 0x82 | ||
22 | #define RFM_CTRL_PKT_SIZE 1 | ||
23 | |||
24 | static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt); | ||
25 | static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt); | ||
26 | static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl); | ||
27 | |||
28 | struct cflayer *cfrfml_create(u8 channel_id, struct dev_info *dev_info) | ||
29 | { | ||
30 | struct cfsrvl *rfm = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC); | ||
31 | if (!rfm) { | ||
32 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
33 | return NULL; | ||
34 | } | ||
35 | caif_assert(offsetof(struct cfsrvl, layer) == 0); | ||
36 | memset(rfm, 0, sizeof(struct cfsrvl)); | ||
37 | cfsrvl_init(rfm, channel_id, dev_info); | ||
38 | rfm->layer.modemcmd = cfservl_modemcmd; | ||
39 | rfm->layer.receive = cfrfml_receive; | ||
40 | rfm->layer.transmit = cfrfml_transmit; | ||
41 | snprintf(rfm->layer.name, CAIF_LAYER_NAME_SZ, "rfm%d", channel_id); | ||
42 | return &rfm->layer; | ||
43 | } | ||
44 | |||
45 | static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl) | ||
46 | { | ||
47 | return -EPROTO; | ||
48 | } | ||
49 | |||
50 | static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt) | ||
51 | { | ||
52 | u8 tmp; | ||
53 | bool segmented; | ||
54 | int ret; | ||
55 | caif_assert(layr->up != NULL); | ||
56 | caif_assert(layr->receive != NULL); | ||
57 | |||
58 | /* | ||
59 | * RFM is taking care of segmentation and stripping of | ||
60 | * segmentation bit. | ||
61 | */ | ||
62 | if (cfpkt_extr_head(pkt, &tmp, 1) < 0) { | ||
63 | pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); | ||
64 | cfpkt_destroy(pkt); | ||
65 | return -EPROTO; | ||
66 | } | ||
67 | segmented = tmp & RFM_SEGMENTATION_BIT; | ||
68 | caif_assert(!segmented); | ||
69 | |||
70 | ret = layr->up->receive(layr->up, pkt); | ||
71 | return ret; | ||
72 | } | ||
73 | |||
74 | static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt) | ||
75 | { | ||
76 | u8 tmp = 0; | ||
77 | int ret; | ||
78 | struct cfsrvl *service = container_obj(layr); | ||
79 | |||
80 | caif_assert(layr->dn != NULL); | ||
81 | caif_assert(layr->dn->transmit != NULL); | ||
82 | |||
83 | if (!cfsrvl_ready(service, &ret)) | ||
84 | return ret; | ||
85 | |||
86 | if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { | ||
87 | pr_err("CAIF: %s():Packet too large - size=%d\n", | ||
88 | __func__, cfpkt_getlen(pkt)); | ||
89 | return -EOVERFLOW; | ||
90 | } | ||
91 | if (cfpkt_add_head(pkt, &tmp, 1) < 0) { | ||
92 | pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); | ||
93 | return -EPROTO; | ||
94 | } | ||
95 | |||
96 | /* Add info for MUX-layer to route the packet out. */ | ||
97 | cfpkt_info(pkt)->channel_id = service->layer.id; | ||
98 | /* | ||
99 | * To optimize alignment, we add up the size of CAIF header before | ||
100 | * payload. | ||
101 | */ | ||
102 | cfpkt_info(pkt)->hdr_len = 1; | ||
103 | cfpkt_info(pkt)->dev_info = &service->dev_info; | ||
104 | ret = layr->dn->transmit(layr->dn, pkt); | ||
105 | if (ret < 0) | ||
106 | cfpkt_extr_head(pkt, &tmp, 1); | ||
107 | return ret; | ||
108 | } | ||
diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c new file mode 100644 index 000000000000..06029ea2da2f --- /dev/null +++ b/net/caif/cfserl.c | |||
@@ -0,0 +1,192 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson AB 2010 | ||
3 | * Author: Sjur Brendeland/sjur.brandeland@stericsson.com | ||
4 | * License terms: GNU General Public License (GPL) version 2 | ||
5 | */ | ||
6 | |||
7 | #include <linux/stddef.h> | ||
8 | #include <linux/spinlock.h> | ||
9 | #include <linux/slab.h> | ||
10 | #include <net/caif/caif_layer.h> | ||
11 | #include <net/caif/cfpkt.h> | ||
12 | #include <net/caif/cfserl.h> | ||
13 | |||
14 | #define container_obj(layr) ((struct cfserl *) layr) | ||
15 | |||
16 | #define CFSERL_STX 0x02 | ||
17 | #define CAIF_MINIUM_PACKET_SIZE 4 | ||
18 | struct cfserl { | ||
19 | struct cflayer layer; | ||
20 | struct cfpkt *incomplete_frm; | ||
21 | /* Protects parallel processing of incoming packets */ | ||
22 | spinlock_t sync; | ||
23 | bool usestx; | ||
24 | }; | ||
25 | #define STXLEN(layr) (layr->usestx ? 1 : 0) | ||
26 | |||
27 | static int cfserl_receive(struct cflayer *layr, struct cfpkt *pkt); | ||
28 | static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt); | ||
29 | static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, | ||
30 | int phyid); | ||
31 | |||
32 | struct cflayer *cfserl_create(int type, int instance, bool use_stx) | ||
33 | { | ||
34 | struct cfserl *this = kmalloc(sizeof(struct cfserl), GFP_ATOMIC); | ||
35 | if (!this) { | ||
36 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
37 | return NULL; | ||
38 | } | ||
39 | caif_assert(offsetof(struct cfserl, layer) == 0); | ||
40 | memset(this, 0, sizeof(struct cfserl)); | ||
41 | this->layer.receive = cfserl_receive; | ||
42 | this->layer.transmit = cfserl_transmit; | ||
43 | this->layer.ctrlcmd = cfserl_ctrlcmd; | ||
44 | this->layer.type = type; | ||
45 | this->usestx = use_stx; | ||
46 | spin_lock_init(&this->sync); | ||
47 | snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "ser1"); | ||
48 | return &this->layer; | ||
49 | } | ||
50 | |||
51 | static int cfserl_receive(struct cflayer *l, struct cfpkt *newpkt) | ||
52 | { | ||
53 | struct cfserl *layr = container_obj(l); | ||
54 | u16 pkt_len; | ||
55 | struct cfpkt *pkt = NULL; | ||
56 | struct cfpkt *tail_pkt = NULL; | ||
57 | u8 tmp8; | ||
58 | u16 tmp; | ||
59 | u8 stx = CFSERL_STX; | ||
60 | int ret; | ||
61 | u16 expectlen = 0; | ||
62 | caif_assert(newpkt != NULL); | ||
63 | spin_lock(&layr->sync); | ||
64 | |||
65 | if (layr->incomplete_frm != NULL) { | ||
66 | |||
67 | layr->incomplete_frm = | ||
68 | cfpkt_append(layr->incomplete_frm, newpkt, expectlen); | ||
69 | pkt = layr->incomplete_frm; | ||
70 | } else { | ||
71 | pkt = newpkt; | ||
72 | } | ||
73 | layr->incomplete_frm = NULL; | ||
74 | |||
75 | do { | ||
76 | /* Search for STX at start of pkt if STX is used */ | ||
77 | if (layr->usestx) { | ||
78 | cfpkt_extr_head(pkt, &tmp8, 1); | ||
79 | if (tmp8 != CFSERL_STX) { | ||
80 | while (cfpkt_more(pkt) | ||
81 | && tmp8 != CFSERL_STX) { | ||
82 | cfpkt_extr_head(pkt, &tmp8, 1); | ||
83 | } | ||
84 | if (!cfpkt_more(pkt)) { | ||
85 | cfpkt_destroy(pkt); | ||
86 | layr->incomplete_frm = NULL; | ||
87 | spin_unlock(&layr->sync); | ||
88 | return -EPROTO; | ||
89 | } | ||
90 | } | ||
91 | } | ||
92 | |||
93 | pkt_len = cfpkt_getlen(pkt); | ||
94 | |||
95 | /* | ||
96 | * pkt_len is the accumulated length of the packet data | ||
97 | * we have received so far. | ||
98 | * Exit if frame doesn't hold length. | ||
99 | */ | ||
100 | |||
101 | if (pkt_len < 2) { | ||
102 | if (layr->usestx) | ||
103 | cfpkt_add_head(pkt, &stx, 1); | ||
104 | layr->incomplete_frm = pkt; | ||
105 | spin_unlock(&layr->sync); | ||
106 | return 0; | ||
107 | } | ||
108 | |||
109 | /* | ||
110 | * Find length of frame. | ||
111 | * expectlen is the length we need for a full frame. | ||
112 | */ | ||
113 | cfpkt_peek_head(pkt, &tmp, 2); | ||
114 | expectlen = le16_to_cpu(tmp) + 2; | ||
115 | /* | ||
116 | * Frame error handling | ||
117 | */ | ||
118 | if (expectlen < CAIF_MINIUM_PACKET_SIZE | ||
119 | || expectlen > CAIF_MAX_FRAMESIZE) { | ||
120 | if (!layr->usestx) { | ||
121 | if (pkt != NULL) | ||
122 | cfpkt_destroy(pkt); | ||
123 | layr->incomplete_frm = NULL; | ||
124 | expectlen = 0; | ||
125 | spin_unlock(&layr->sync); | ||
126 | return -EPROTO; | ||
127 | } | ||
128 | continue; | ||
129 | } | ||
130 | |||
131 | if (pkt_len < expectlen) { | ||
132 | /* Too little received data */ | ||
133 | if (layr->usestx) | ||
134 | cfpkt_add_head(pkt, &stx, 1); | ||
135 | layr->incomplete_frm = pkt; | ||
136 | spin_unlock(&layr->sync); | ||
137 | return 0; | ||
138 | } | ||
139 | |||
140 | /* | ||
141 | * Enough data for at least one frame. | ||
142 | * Split the frame, if too long | ||
143 | */ | ||
144 | if (pkt_len > expectlen) | ||
145 | tail_pkt = cfpkt_split(pkt, expectlen); | ||
146 | else | ||
147 | tail_pkt = NULL; | ||
148 | |||
149 | /* Send the first part of packet upwards.*/ | ||
150 | spin_unlock(&layr->sync); | ||
151 | ret = layr->layer.up->receive(layr->layer.up, pkt); | ||
152 | spin_lock(&layr->sync); | ||
153 | if (ret == -EILSEQ) { | ||
154 | if (layr->usestx) { | ||
155 | if (tail_pkt != NULL) | ||
156 | pkt = cfpkt_append(pkt, tail_pkt, 0); | ||
157 | |||
158 | /* Start search for next STX if frame failed */ | ||
159 | continue; | ||
160 | } else { | ||
161 | cfpkt_destroy(pkt); | ||
162 | pkt = NULL; | ||
163 | } | ||
164 | } | ||
165 | |||
166 | pkt = tail_pkt; | ||
167 | |||
168 | } while (pkt != NULL); | ||
169 | |||
170 | spin_unlock(&layr->sync); | ||
171 | return 0; | ||
172 | } | ||
173 | |||
174 | static int cfserl_transmit(struct cflayer *layer, struct cfpkt *newpkt) | ||
175 | { | ||
176 | struct cfserl *layr = container_obj(layer); | ||
177 | int ret; | ||
178 | u8 tmp8 = CFSERL_STX; | ||
179 | if (layr->usestx) | ||
180 | cfpkt_add_head(newpkt, &tmp8, 1); | ||
181 | ret = layer->dn->transmit(layer->dn, newpkt); | ||
182 | if (ret < 0) | ||
183 | cfpkt_extr_head(newpkt, &tmp8, 1); | ||
184 | |||
185 | return ret; | ||
186 | } | ||
187 | |||
188 | static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, | ||
189 | int phyid) | ||
190 | { | ||
191 | layr->up->ctrlcmd(layr->up, ctrl, phyid); | ||
192 | } | ||
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c new file mode 100644 index 000000000000..aff31f34528f --- /dev/null +++ b/net/caif/cfsrvl.c | |||
@@ -0,0 +1,192 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson AB 2010 | ||
3 | * Author: Sjur Brendeland/sjur.brandeland@stericsson.com | ||
4 | * License terms: GNU General Public License (GPL) version 2 | ||
5 | */ | ||
6 | |||
7 | #include <linux/kernel.h> | ||
8 | #include <linux/types.h> | ||
9 | #include <linux/errno.h> | ||
10 | #include <linux/slab.h> | ||
11 | #include <net/caif/caif_layer.h> | ||
12 | #include <net/caif/cfsrvl.h> | ||
13 | #include <net/caif/cfpkt.h> | ||
14 | |||
15 | #define SRVL_CTRL_PKT_SIZE 1 | ||
16 | #define SRVL_FLOW_OFF 0x81 | ||
17 | #define SRVL_FLOW_ON 0x80 | ||
18 | #define SRVL_SET_PIN 0x82 | ||
19 | #define SRVL_CTRL_PKT_SIZE 1 | ||
20 | |||
21 | #define container_obj(layr) container_of(layr, struct cfsrvl, layer) | ||
22 | |||
23 | static void cfservl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, | ||
24 | int phyid) | ||
25 | { | ||
26 | struct cfsrvl *service = container_obj(layr); | ||
27 | caif_assert(layr->up != NULL); | ||
28 | caif_assert(layr->up->ctrlcmd != NULL); | ||
29 | switch (ctrl) { | ||
30 | case CAIF_CTRLCMD_INIT_RSP: | ||
31 | service->open = true; | ||
32 | layr->up->ctrlcmd(layr->up, ctrl, phyid); | ||
33 | break; | ||
34 | case CAIF_CTRLCMD_DEINIT_RSP: | ||
35 | case CAIF_CTRLCMD_INIT_FAIL_RSP: | ||
36 | service->open = false; | ||
37 | layr->up->ctrlcmd(layr->up, ctrl, phyid); | ||
38 | break; | ||
39 | case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND: | ||
40 | if (phyid != service->dev_info.id) | ||
41 | break; | ||
42 | if (service->modem_flow_on) | ||
43 | layr->up->ctrlcmd(layr->up, | ||
44 | CAIF_CTRLCMD_FLOW_OFF_IND, phyid); | ||
45 | service->phy_flow_on = false; | ||
46 | break; | ||
47 | case _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND: | ||
48 | if (phyid != service->dev_info.id) | ||
49 | return; | ||
50 | if (service->modem_flow_on) { | ||
51 | layr->up->ctrlcmd(layr->up, | ||
52 | CAIF_CTRLCMD_FLOW_ON_IND, | ||
53 | phyid); | ||
54 | } | ||
55 | service->phy_flow_on = true; | ||
56 | break; | ||
57 | case CAIF_CTRLCMD_FLOW_OFF_IND: | ||
58 | if (service->phy_flow_on) { | ||
59 | layr->up->ctrlcmd(layr->up, | ||
60 | CAIF_CTRLCMD_FLOW_OFF_IND, phyid); | ||
61 | } | ||
62 | service->modem_flow_on = false; | ||
63 | break; | ||
64 | case CAIF_CTRLCMD_FLOW_ON_IND: | ||
65 | if (service->phy_flow_on) { | ||
66 | layr->up->ctrlcmd(layr->up, | ||
67 | CAIF_CTRLCMD_FLOW_ON_IND, phyid); | ||
68 | } | ||
69 | service->modem_flow_on = true; | ||
70 | break; | ||
71 | case _CAIF_CTRLCMD_PHYIF_DOWN_IND: | ||
72 | /* In case interface is down, let's fake a remove shutdown */ | ||
73 | layr->up->ctrlcmd(layr->up, | ||
74 | CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND, phyid); | ||
75 | break; | ||
76 | case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND: | ||
77 | layr->up->ctrlcmd(layr->up, ctrl, phyid); | ||
78 | break; | ||
79 | default: | ||
80 | pr_warning("CAIF: %s(): " | ||
81 | "Unexpected ctrl in cfsrvl (%d)\n", __func__, ctrl); | ||
82 | /* We have both modem and phy flow on, send flow on */ | ||
83 | layr->up->ctrlcmd(layr->up, ctrl, phyid); | ||
84 | service->phy_flow_on = true; | ||
85 | break; | ||
86 | } | ||
87 | } | ||
88 | |||
89 | static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl) | ||
90 | { | ||
91 | struct cfsrvl *service = container_obj(layr); | ||
92 | caif_assert(layr != NULL); | ||
93 | caif_assert(layr->dn != NULL); | ||
94 | caif_assert(layr->dn->transmit != NULL); | ||
95 | switch (ctrl) { | ||
96 | case CAIF_MODEMCMD_FLOW_ON_REQ: | ||
97 | { | ||
98 | struct cfpkt *pkt; | ||
99 | struct caif_payload_info *info; | ||
100 | u8 flow_on = SRVL_FLOW_ON; | ||
101 | pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE); | ||
102 | if (!pkt) { | ||
103 | pr_warning("CAIF: %s(): Out of memory\n", | ||
104 | __func__); | ||
105 | return -ENOMEM; | ||
106 | } | ||
107 | |||
108 | if (cfpkt_add_head(pkt, &flow_on, 1) < 0) { | ||
109 | pr_err("CAIF: %s(): Packet is erroneous!\n", | ||
110 | __func__); | ||
111 | cfpkt_destroy(pkt); | ||
112 | return -EPROTO; | ||
113 | } | ||
114 | info = cfpkt_info(pkt); | ||
115 | info->channel_id = service->layer.id; | ||
116 | info->hdr_len = 1; | ||
117 | info->dev_info = &service->dev_info; | ||
118 | return layr->dn->transmit(layr->dn, pkt); | ||
119 | } | ||
120 | case CAIF_MODEMCMD_FLOW_OFF_REQ: | ||
121 | { | ||
122 | struct cfpkt *pkt; | ||
123 | struct caif_payload_info *info; | ||
124 | u8 flow_off = SRVL_FLOW_OFF; | ||
125 | pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE); | ||
126 | if (cfpkt_add_head(pkt, &flow_off, 1) < 0) { | ||
127 | pr_err("CAIF: %s(): Packet is erroneous!\n", | ||
128 | __func__); | ||
129 | cfpkt_destroy(pkt); | ||
130 | return -EPROTO; | ||
131 | } | ||
132 | info = cfpkt_info(pkt); | ||
133 | info->channel_id = service->layer.id; | ||
134 | info->hdr_len = 1; | ||
135 | info->dev_info = &service->dev_info; | ||
136 | return layr->dn->transmit(layr->dn, pkt); | ||
137 | } | ||
138 | default: | ||
139 | break; | ||
140 | } | ||
141 | return -EINVAL; | ||
142 | } | ||
143 | |||
144 | void cfservl_destroy(struct cflayer *layer) | ||
145 | { | ||
146 | kfree(layer); | ||
147 | } | ||
148 | |||
149 | void cfsrvl_init(struct cfsrvl *service, | ||
150 | u8 channel_id, | ||
151 | struct dev_info *dev_info) | ||
152 | { | ||
153 | caif_assert(offsetof(struct cfsrvl, layer) == 0); | ||
154 | service->open = false; | ||
155 | service->modem_flow_on = true; | ||
156 | service->phy_flow_on = true; | ||
157 | service->layer.id = channel_id; | ||
158 | service->layer.ctrlcmd = cfservl_ctrlcmd; | ||
159 | service->layer.modemcmd = cfservl_modemcmd; | ||
160 | service->dev_info = *dev_info; | ||
161 | kref_init(&service->ref); | ||
162 | } | ||
163 | |||
164 | void cfsrvl_release(struct kref *kref) | ||
165 | { | ||
166 | struct cfsrvl *service = container_of(kref, struct cfsrvl, ref); | ||
167 | kfree(service); | ||
168 | } | ||
169 | |||
170 | bool cfsrvl_ready(struct cfsrvl *service, int *err) | ||
171 | { | ||
172 | if (service->open && service->modem_flow_on && service->phy_flow_on) | ||
173 | return true; | ||
174 | if (!service->open) { | ||
175 | *err = -ENOTCONN; | ||
176 | return false; | ||
177 | } | ||
178 | caif_assert(!(service->modem_flow_on && service->phy_flow_on)); | ||
179 | *err = -EAGAIN; | ||
180 | return false; | ||
181 | } | ||
182 | u8 cfsrvl_getphyid(struct cflayer *layer) | ||
183 | { | ||
184 | struct cfsrvl *servl = container_obj(layer); | ||
185 | return servl->dev_info.id; | ||
186 | } | ||
187 | |||
188 | bool cfsrvl_phyid_match(struct cflayer *layer, int phyid) | ||
189 | { | ||
190 | struct cfsrvl *servl = container_obj(layer); | ||
191 | return servl->dev_info.id == phyid; | ||
192 | } | ||
diff --git a/net/caif/cfutill.c b/net/caif/cfutill.c new file mode 100644 index 000000000000..5fd2c9ea8b42 --- /dev/null +++ b/net/caif/cfutill.c | |||
@@ -0,0 +1,115 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson AB 2010 | ||
3 | * Author: Sjur Brendeland/sjur.brandeland@stericsson.com | ||
4 | * License terms: GNU General Public License (GPL) version 2 | ||
5 | */ | ||
6 | |||
7 | #include <linux/kernel.h> | ||
8 | #include <linux/types.h> | ||
9 | #include <linux/slab.h> | ||
10 | #include <linux/errno.h> | ||
11 | #include <net/caif/caif_layer.h> | ||
12 | #include <net/caif/cfsrvl.h> | ||
13 | #include <net/caif/cfpkt.h> | ||
14 | |||
15 | #define container_obj(layr) ((struct cfsrvl *) layr) | ||
16 | #define UTIL_PAYLOAD 0x00 | ||
17 | #define UTIL_CMD_BIT 0x80 | ||
18 | #define UTIL_REMOTE_SHUTDOWN 0x82 | ||
19 | #define UTIL_FLOW_OFF 0x81 | ||
20 | #define UTIL_FLOW_ON 0x80 | ||
21 | #define UTIL_CTRL_PKT_SIZE 1 | ||
22 | static int cfutill_receive(struct cflayer *layr, struct cfpkt *pkt); | ||
23 | static int cfutill_transmit(struct cflayer *layr, struct cfpkt *pkt); | ||
24 | |||
25 | struct cflayer *cfutill_create(u8 channel_id, struct dev_info *dev_info) | ||
26 | { | ||
27 | struct cfsrvl *util = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC); | ||
28 | if (!util) { | ||
29 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
30 | return NULL; | ||
31 | } | ||
32 | caif_assert(offsetof(struct cfsrvl, layer) == 0); | ||
33 | memset(util, 0, sizeof(struct cfsrvl)); | ||
34 | cfsrvl_init(util, channel_id, dev_info); | ||
35 | util->layer.receive = cfutill_receive; | ||
36 | util->layer.transmit = cfutill_transmit; | ||
37 | snprintf(util->layer.name, CAIF_LAYER_NAME_SZ - 1, "util1"); | ||
38 | return &util->layer; | ||
39 | } | ||
40 | |||
41 | static int cfutill_receive(struct cflayer *layr, struct cfpkt *pkt) | ||
42 | { | ||
43 | u8 cmd = -1; | ||
44 | struct cfsrvl *service = container_obj(layr); | ||
45 | caif_assert(layr != NULL); | ||
46 | caif_assert(layr->up != NULL); | ||
47 | caif_assert(layr->up->receive != NULL); | ||
48 | caif_assert(layr->up->ctrlcmd != NULL); | ||
49 | if (cfpkt_extr_head(pkt, &cmd, 1) < 0) { | ||
50 | pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); | ||
51 | cfpkt_destroy(pkt); | ||
52 | return -EPROTO; | ||
53 | } | ||
54 | |||
55 | switch (cmd) { | ||
56 | case UTIL_PAYLOAD: | ||
57 | return layr->up->receive(layr->up, pkt); | ||
58 | case UTIL_FLOW_OFF: | ||
59 | layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_OFF_IND, 0); | ||
60 | cfpkt_destroy(pkt); | ||
61 | return 0; | ||
62 | case UTIL_FLOW_ON: | ||
63 | layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_ON_IND, 0); | ||
64 | cfpkt_destroy(pkt); | ||
65 | return 0; | ||
66 | case UTIL_REMOTE_SHUTDOWN: /* Remote Shutdown Request */ | ||
67 | pr_err("CAIF: %s(): REMOTE SHUTDOWN REQUEST RECEIVED\n", | ||
68 | __func__); | ||
69 | layr->ctrlcmd(layr, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND, 0); | ||
70 | service->open = false; | ||
71 | cfpkt_destroy(pkt); | ||
72 | return 0; | ||
73 | default: | ||
74 | cfpkt_destroy(pkt); | ||
75 | pr_warning("CAIF: %s(): Unknown service control %d (0x%x)\n", | ||
76 | __func__, cmd, cmd); | ||
77 | return -EPROTO; | ||
78 | } | ||
79 | } | ||
80 | |||
81 | static int cfutill_transmit(struct cflayer *layr, struct cfpkt *pkt) | ||
82 | { | ||
83 | u8 zero = 0; | ||
84 | struct caif_payload_info *info; | ||
85 | int ret; | ||
86 | struct cfsrvl *service = container_obj(layr); | ||
87 | caif_assert(layr != NULL); | ||
88 | caif_assert(layr->dn != NULL); | ||
89 | caif_assert(layr->dn->transmit != NULL); | ||
90 | if (!cfsrvl_ready(service, &ret)) | ||
91 | return ret; | ||
92 | |||
93 | if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { | ||
94 | pr_err("CAIF: %s(): packet too large size=%d\n", | ||
95 | __func__, cfpkt_getlen(pkt)); | ||
96 | return -EOVERFLOW; | ||
97 | } | ||
98 | |||
99 | cfpkt_add_head(pkt, &zero, 1); | ||
100 | /* Add info for MUX-layer to route the packet out. */ | ||
101 | info = cfpkt_info(pkt); | ||
102 | info->channel_id = service->layer.id; | ||
103 | /* | ||
104 | * To optimize alignment, we add up the size of CAIF header before | ||
105 | * payload. | ||
106 | */ | ||
107 | info->hdr_len = 1; | ||
108 | info->dev_info = &service->dev_info; | ||
109 | ret = layr->dn->transmit(layr->dn, pkt); | ||
110 | if (ret < 0) { | ||
111 | u32 tmp32; | ||
112 | cfpkt_extr_head(pkt, &tmp32, 4); | ||
113 | } | ||
114 | return ret; | ||
115 | } | ||
diff --git a/net/caif/cfveil.c b/net/caif/cfveil.c new file mode 100644 index 000000000000..0fd827f49491 --- /dev/null +++ b/net/caif/cfveil.c | |||
@@ -0,0 +1,107 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson AB 2010 | ||
3 | * Author: Sjur Brendeland/sjur.brandeland@stericsson.com | ||
4 | * License terms: GNU General Public License (GPL) version 2 | ||
5 | */ | ||
6 | |||
7 | #include <linux/stddef.h> | ||
8 | #include <linux/slab.h> | ||
9 | #include <net/caif/caif_layer.h> | ||
10 | #include <net/caif/cfsrvl.h> | ||
11 | #include <net/caif/cfpkt.h> | ||
12 | |||
13 | #define VEI_PAYLOAD 0x00 | ||
14 | #define VEI_CMD_BIT 0x80 | ||
15 | #define VEI_FLOW_OFF 0x81 | ||
16 | #define VEI_FLOW_ON 0x80 | ||
17 | #define VEI_SET_PIN 0x82 | ||
18 | #define VEI_CTRL_PKT_SIZE 1 | ||
19 | #define container_obj(layr) container_of(layr, struct cfsrvl, layer) | ||
20 | |||
21 | static int cfvei_receive(struct cflayer *layr, struct cfpkt *pkt); | ||
22 | static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt); | ||
23 | |||
24 | struct cflayer *cfvei_create(u8 channel_id, struct dev_info *dev_info) | ||
25 | { | ||
26 | struct cfsrvl *vei = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC); | ||
27 | if (!vei) { | ||
28 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
29 | return NULL; | ||
30 | } | ||
31 | caif_assert(offsetof(struct cfsrvl, layer) == 0); | ||
32 | memset(vei, 0, sizeof(struct cfsrvl)); | ||
33 | cfsrvl_init(vei, channel_id, dev_info); | ||
34 | vei->layer.receive = cfvei_receive; | ||
35 | vei->layer.transmit = cfvei_transmit; | ||
36 | snprintf(vei->layer.name, CAIF_LAYER_NAME_SZ - 1, "vei%d", channel_id); | ||
37 | return &vei->layer; | ||
38 | } | ||
39 | |||
40 | static int cfvei_receive(struct cflayer *layr, struct cfpkt *pkt) | ||
41 | { | ||
42 | u8 cmd; | ||
43 | int ret; | ||
44 | caif_assert(layr->up != NULL); | ||
45 | caif_assert(layr->receive != NULL); | ||
46 | caif_assert(layr->ctrlcmd != NULL); | ||
47 | |||
48 | |||
49 | if (cfpkt_extr_head(pkt, &cmd, 1) < 0) { | ||
50 | pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); | ||
51 | cfpkt_destroy(pkt); | ||
52 | return -EPROTO; | ||
53 | } | ||
54 | switch (cmd) { | ||
55 | case VEI_PAYLOAD: | ||
56 | ret = layr->up->receive(layr->up, pkt); | ||
57 | return ret; | ||
58 | case VEI_FLOW_OFF: | ||
59 | layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_OFF_IND, 0); | ||
60 | cfpkt_destroy(pkt); | ||
61 | return 0; | ||
62 | case VEI_FLOW_ON: | ||
63 | layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_ON_IND, 0); | ||
64 | cfpkt_destroy(pkt); | ||
65 | return 0; | ||
66 | case VEI_SET_PIN: /* SET RS232 PIN */ | ||
67 | cfpkt_destroy(pkt); | ||
68 | return 0; | ||
69 | default: /* SET RS232 PIN */ | ||
70 | pr_warning("CAIF: %s():Unknown VEI control packet %d (0x%x)!\n", | ||
71 | __func__, cmd, cmd); | ||
72 | cfpkt_destroy(pkt); | ||
73 | return -EPROTO; | ||
74 | } | ||
75 | } | ||
76 | |||
77 | static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt) | ||
78 | { | ||
79 | u8 tmp = 0; | ||
80 | struct caif_payload_info *info; | ||
81 | int ret; | ||
82 | struct cfsrvl *service = container_obj(layr); | ||
83 | if (!cfsrvl_ready(service, &ret)) | ||
84 | return ret; | ||
85 | caif_assert(layr->dn != NULL); | ||
86 | caif_assert(layr->dn->transmit != NULL); | ||
87 | if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { | ||
88 | pr_warning("CAIF: %s(): Packet too large - size=%d\n", | ||
89 | __func__, cfpkt_getlen(pkt)); | ||
90 | return -EOVERFLOW; | ||
91 | } | ||
92 | |||
93 | if (cfpkt_add_head(pkt, &tmp, 1) < 0) { | ||
94 | pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); | ||
95 | return -EPROTO; | ||
96 | } | ||
97 | |||
98 | /* Add info-> for MUX-layer to route the packet out. */ | ||
99 | info = cfpkt_info(pkt); | ||
100 | info->channel_id = service->layer.id; | ||
101 | info->hdr_len = 1; | ||
102 | info->dev_info = &service->dev_info; | ||
103 | ret = layr->dn->transmit(layr->dn, pkt); | ||
104 | if (ret < 0) | ||
105 | cfpkt_extr_head(pkt, &tmp, 1); | ||
106 | return ret; | ||
107 | } | ||
diff --git a/net/caif/cfvidl.c b/net/caif/cfvidl.c new file mode 100644 index 000000000000..89ad4ea239f1 --- /dev/null +++ b/net/caif/cfvidl.c | |||
@@ -0,0 +1,65 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson AB 2010 | ||
3 | * Author: Sjur Brendeland/sjur.brandeland@stericsson.com | ||
4 | * License terms: GNU General Public License (GPL) version 2 | ||
5 | */ | ||
6 | |||
7 | #include <linux/kernel.h> | ||
8 | #include <linux/types.h> | ||
9 | #include <linux/slab.h> | ||
10 | #include <linux/errno.h> | ||
11 | #include <net/caif/caif_layer.h> | ||
12 | #include <net/caif/cfsrvl.h> | ||
13 | #include <net/caif/cfpkt.h> | ||
14 | |||
15 | #define container_obj(layr) ((struct cfsrvl *) layr) | ||
16 | |||
17 | static int cfvidl_receive(struct cflayer *layr, struct cfpkt *pkt); | ||
18 | static int cfvidl_transmit(struct cflayer *layr, struct cfpkt *pkt); | ||
19 | |||
20 | struct cflayer *cfvidl_create(u8 channel_id, struct dev_info *dev_info) | ||
21 | { | ||
22 | struct cfsrvl *vid = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC); | ||
23 | if (!vid) { | ||
24 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
25 | return NULL; | ||
26 | } | ||
27 | caif_assert(offsetof(struct cfsrvl, layer) == 0); | ||
28 | |||
29 | memset(vid, 0, sizeof(struct cfsrvl)); | ||
30 | cfsrvl_init(vid, channel_id, dev_info); | ||
31 | vid->layer.receive = cfvidl_receive; | ||
32 | vid->layer.transmit = cfvidl_transmit; | ||
33 | snprintf(vid->layer.name, CAIF_LAYER_NAME_SZ - 1, "vid1"); | ||
34 | return &vid->layer; | ||
35 | } | ||
36 | |||
37 | static int cfvidl_receive(struct cflayer *layr, struct cfpkt *pkt) | ||
38 | { | ||
39 | u32 videoheader; | ||
40 | if (cfpkt_extr_head(pkt, &videoheader, 4) < 0) { | ||
41 | pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); | ||
42 | cfpkt_destroy(pkt); | ||
43 | return -EPROTO; | ||
44 | } | ||
45 | return layr->up->receive(layr->up, pkt); | ||
46 | } | ||
47 | |||
48 | static int cfvidl_transmit(struct cflayer *layr, struct cfpkt *pkt) | ||
49 | { | ||
50 | struct cfsrvl *service = container_obj(layr); | ||
51 | struct caif_payload_info *info; | ||
52 | u32 videoheader = 0; | ||
53 | int ret; | ||
54 | if (!cfsrvl_ready(service, &ret)) | ||
55 | return ret; | ||
56 | cfpkt_add_head(pkt, &videoheader, 4); | ||
57 | /* Add info for MUX-layer to route the packet out */ | ||
58 | info = cfpkt_info(pkt); | ||
59 | info->channel_id = service->layer.id; | ||
60 | info->dev_info = &service->dev_info; | ||
61 | ret = layr->dn->transmit(layr->dn, pkt); | ||
62 | if (ret < 0) | ||
63 | cfpkt_extr_head(pkt, &videoheader, 4); | ||
64 | return ret; | ||
65 | } | ||
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c new file mode 100644 index 000000000000..610966abe2dc --- /dev/null +++ b/net/caif/chnl_net.c | |||
@@ -0,0 +1,467 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson AB 2010 | ||
3 | * Authors: Sjur Brendeland/sjur.brandeland@stericsson.com | ||
4 | * Daniel Martensson / Daniel.Martensson@stericsson.com | ||
5 | * License terms: GNU General Public License (GPL) version 2 | ||
6 | */ | ||
7 | |||
8 | #include <linux/version.h> | ||
9 | #include <linux/fs.h> | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/netdevice.h> | ||
13 | #include <linux/if_ether.h> | ||
14 | #include <linux/moduleparam.h> | ||
15 | #include <linux/ip.h> | ||
16 | #include <linux/sched.h> | ||
17 | #include <linux/sockios.h> | ||
18 | #include <linux/caif/if_caif.h> | ||
19 | #include <net/rtnetlink.h> | ||
20 | #include <net/caif/caif_layer.h> | ||
21 | #include <net/caif/cfcnfg.h> | ||
22 | #include <net/caif/cfpkt.h> | ||
23 | #include <net/caif/caif_dev.h> | ||
24 | |||
25 | /* GPRS PDP connection has MTU to 1500 */ | ||
26 | #define SIZE_MTU 1500 | ||
27 | /* 5 sec. connect timeout */ | ||
28 | #define CONNECT_TIMEOUT (5 * HZ) | ||
29 | #define CAIF_NET_DEFAULT_QUEUE_LEN 500 | ||
30 | |||
31 | #undef pr_debug | ||
32 | #define pr_debug pr_warning | ||
33 | |||
34 | /*This list is protected by the rtnl lock. */ | ||
35 | static LIST_HEAD(chnl_net_list); | ||
36 | |||
37 | MODULE_LICENSE("GPL"); | ||
38 | MODULE_ALIAS_RTNL_LINK("caif"); | ||
39 | |||
40 | enum caif_states { | ||
41 | CAIF_CONNECTED = 1, | ||
42 | CAIF_CONNECTING, | ||
43 | CAIF_DISCONNECTED, | ||
44 | CAIF_SHUTDOWN | ||
45 | }; | ||
46 | |||
47 | struct chnl_net { | ||
48 | struct cflayer chnl; | ||
49 | struct net_device_stats stats; | ||
50 | struct caif_connect_request conn_req; | ||
51 | struct list_head list_field; | ||
52 | struct net_device *netdev; | ||
53 | char name[256]; | ||
54 | wait_queue_head_t netmgmt_wq; | ||
55 | /* Flow status to remember and control the transmission. */ | ||
56 | bool flowenabled; | ||
57 | enum caif_states state; | ||
58 | }; | ||
59 | |||
60 | static void robust_list_del(struct list_head *delete_node) | ||
61 | { | ||
62 | struct list_head *list_node; | ||
63 | struct list_head *n; | ||
64 | ASSERT_RTNL(); | ||
65 | list_for_each_safe(list_node, n, &chnl_net_list) { | ||
66 | if (list_node == delete_node) { | ||
67 | list_del(list_node); | ||
68 | return; | ||
69 | } | ||
70 | } | ||
71 | WARN_ON(1); | ||
72 | } | ||
73 | |||
74 | static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt) | ||
75 | { | ||
76 | struct sk_buff *skb; | ||
77 | struct chnl_net *priv = container_of(layr, struct chnl_net, chnl); | ||
78 | int pktlen; | ||
79 | int err = 0; | ||
80 | |||
81 | priv = container_of(layr, struct chnl_net, chnl); | ||
82 | |||
83 | if (!priv) | ||
84 | return -EINVAL; | ||
85 | |||
86 | /* Get length of CAIF packet. */ | ||
87 | pktlen = cfpkt_getlen(pkt); | ||
88 | |||
89 | skb = (struct sk_buff *) cfpkt_tonative(pkt); | ||
90 | /* Pass some minimum information and | ||
91 | * send the packet to the net stack. | ||
92 | */ | ||
93 | skb->dev = priv->netdev; | ||
94 | skb->protocol = htons(ETH_P_IP); | ||
95 | |||
96 | /* If we change the header in loop mode, the checksum is corrupted. */ | ||
97 | if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP) | ||
98 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
99 | else | ||
100 | skb->ip_summed = CHECKSUM_NONE; | ||
101 | |||
102 | if (in_interrupt()) | ||
103 | netif_rx(skb); | ||
104 | else | ||
105 | netif_rx_ni(skb); | ||
106 | |||
107 | /* Update statistics. */ | ||
108 | priv->netdev->stats.rx_packets++; | ||
109 | priv->netdev->stats.rx_bytes += pktlen; | ||
110 | |||
111 | return err; | ||
112 | } | ||
113 | |||
114 | static int delete_device(struct chnl_net *dev) | ||
115 | { | ||
116 | ASSERT_RTNL(); | ||
117 | if (dev->netdev) | ||
118 | unregister_netdevice(dev->netdev); | ||
119 | return 0; | ||
120 | } | ||
121 | |||
122 | static void close_work(struct work_struct *work) | ||
123 | { | ||
124 | struct chnl_net *dev = NULL; | ||
125 | struct list_head *list_node; | ||
126 | struct list_head *_tmp; | ||
127 | /* May be called with or without RTNL lock held */ | ||
128 | int islocked = rtnl_is_locked(); | ||
129 | if (!islocked) | ||
130 | rtnl_lock(); | ||
131 | list_for_each_safe(list_node, _tmp, &chnl_net_list) { | ||
132 | dev = list_entry(list_node, struct chnl_net, list_field); | ||
133 | if (dev->state == CAIF_SHUTDOWN) | ||
134 | dev_close(dev->netdev); | ||
135 | } | ||
136 | if (!islocked) | ||
137 | rtnl_unlock(); | ||
138 | } | ||
139 | static DECLARE_WORK(close_worker, close_work); | ||
140 | |||
141 | static void chnl_flowctrl_cb(struct cflayer *layr, enum caif_ctrlcmd flow, | ||
142 | int phyid) | ||
143 | { | ||
144 | struct chnl_net *priv = container_of(layr, struct chnl_net, chnl); | ||
145 | pr_debug("CAIF: %s(): NET flowctrl func called flow: %s\n", | ||
146 | __func__, | ||
147 | flow == CAIF_CTRLCMD_FLOW_ON_IND ? "ON" : | ||
148 | flow == CAIF_CTRLCMD_INIT_RSP ? "INIT" : | ||
149 | flow == CAIF_CTRLCMD_FLOW_OFF_IND ? "OFF" : | ||
150 | flow == CAIF_CTRLCMD_DEINIT_RSP ? "CLOSE/DEINIT" : | ||
151 | flow == CAIF_CTRLCMD_INIT_FAIL_RSP ? "OPEN_FAIL" : | ||
152 | flow == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND ? | ||
153 | "REMOTE_SHUTDOWN" : "UKNOWN CTRL COMMAND"); | ||
154 | |||
155 | |||
156 | |||
157 | switch (flow) { | ||
158 | case CAIF_CTRLCMD_FLOW_OFF_IND: | ||
159 | priv->flowenabled = false; | ||
160 | netif_stop_queue(priv->netdev); | ||
161 | break; | ||
162 | case CAIF_CTRLCMD_DEINIT_RSP: | ||
163 | priv->state = CAIF_DISCONNECTED; | ||
164 | break; | ||
165 | case CAIF_CTRLCMD_INIT_FAIL_RSP: | ||
166 | priv->state = CAIF_DISCONNECTED; | ||
167 | wake_up_interruptible(&priv->netmgmt_wq); | ||
168 | break; | ||
169 | case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND: | ||
170 | priv->state = CAIF_SHUTDOWN; | ||
171 | netif_tx_disable(priv->netdev); | ||
172 | schedule_work(&close_worker); | ||
173 | break; | ||
174 | case CAIF_CTRLCMD_FLOW_ON_IND: | ||
175 | priv->flowenabled = true; | ||
176 | netif_wake_queue(priv->netdev); | ||
177 | break; | ||
178 | case CAIF_CTRLCMD_INIT_RSP: | ||
179 | priv->state = CAIF_CONNECTED; | ||
180 | priv->flowenabled = true; | ||
181 | netif_wake_queue(priv->netdev); | ||
182 | wake_up_interruptible(&priv->netmgmt_wq); | ||
183 | break; | ||
184 | default: | ||
185 | break; | ||
186 | } | ||
187 | } | ||
188 | |||
189 | static int chnl_net_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
190 | { | ||
191 | struct chnl_net *priv; | ||
192 | struct cfpkt *pkt = NULL; | ||
193 | int len; | ||
194 | int result = -1; | ||
195 | /* Get our private data. */ | ||
196 | priv = netdev_priv(dev); | ||
197 | |||
198 | if (skb->len > priv->netdev->mtu) { | ||
199 | pr_warning("CAIF: %s(): Size of skb exceeded MTU\n", __func__); | ||
200 | return -ENOSPC; | ||
201 | } | ||
202 | |||
203 | if (!priv->flowenabled) { | ||
204 | pr_debug("CAIF: %s(): dropping packets flow off\n", __func__); | ||
205 | return NETDEV_TX_BUSY; | ||
206 | } | ||
207 | |||
208 | if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP) | ||
209 | swap(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr); | ||
210 | |||
211 | /* Store original SKB length. */ | ||
212 | len = skb->len; | ||
213 | |||
214 | pkt = cfpkt_fromnative(CAIF_DIR_OUT, (void *) skb); | ||
215 | |||
216 | /* Send the packet down the stack. */ | ||
217 | result = priv->chnl.dn->transmit(priv->chnl.dn, pkt); | ||
218 | if (result) { | ||
219 | if (result == -EAGAIN) | ||
220 | result = NETDEV_TX_BUSY; | ||
221 | return result; | ||
222 | } | ||
223 | |||
224 | /* Update statistics. */ | ||
225 | dev->stats.tx_packets++; | ||
226 | dev->stats.tx_bytes += len; | ||
227 | |||
228 | return NETDEV_TX_OK; | ||
229 | } | ||
230 | |||
231 | static int chnl_net_open(struct net_device *dev) | ||
232 | { | ||
233 | struct chnl_net *priv = NULL; | ||
234 | int result = -1; | ||
235 | ASSERT_RTNL(); | ||
236 | priv = netdev_priv(dev); | ||
237 | if (!priv) { | ||
238 | pr_debug("CAIF: %s(): chnl_net_open: no priv\n", __func__); | ||
239 | return -ENODEV; | ||
240 | } | ||
241 | |||
242 | if (priv->state != CAIF_CONNECTING) { | ||
243 | priv->state = CAIF_CONNECTING; | ||
244 | result = caif_connect_client(&priv->conn_req, &priv->chnl); | ||
245 | if (result != 0) { | ||
246 | priv->state = CAIF_DISCONNECTED; | ||
247 | pr_debug("CAIF: %s(): err: " | ||
248 | "Unable to register and open device," | ||
249 | " Err:%d\n", | ||
250 | __func__, | ||
251 | result); | ||
252 | return result; | ||
253 | } | ||
254 | } | ||
255 | |||
256 | result = wait_event_interruptible_timeout(priv->netmgmt_wq, | ||
257 | priv->state != CAIF_CONNECTING, | ||
258 | CONNECT_TIMEOUT); | ||
259 | |||
260 | if (result == -ERESTARTSYS) { | ||
261 | pr_debug("CAIF: %s(): wait_event_interruptible" | ||
262 | " woken by a signal\n", __func__); | ||
263 | return -ERESTARTSYS; | ||
264 | } | ||
265 | if (result == 0) { | ||
266 | pr_debug("CAIF: %s(): connect timeout\n", __func__); | ||
267 | caif_disconnect_client(&priv->chnl); | ||
268 | priv->state = CAIF_DISCONNECTED; | ||
269 | pr_debug("CAIF: %s(): state disconnected\n", __func__); | ||
270 | return -ETIMEDOUT; | ||
271 | } | ||
272 | |||
273 | if (priv->state != CAIF_CONNECTED) { | ||
274 | pr_debug("CAIF: %s(): connect failed\n", __func__); | ||
275 | return -ECONNREFUSED; | ||
276 | } | ||
277 | pr_debug("CAIF: %s(): CAIF Netdevice connected\n", __func__); | ||
278 | return 0; | ||
279 | } | ||
280 | |||
281 | static int chnl_net_stop(struct net_device *dev) | ||
282 | { | ||
283 | struct chnl_net *priv; | ||
284 | |||
285 | ASSERT_RTNL(); | ||
286 | priv = netdev_priv(dev); | ||
287 | priv->state = CAIF_DISCONNECTED; | ||
288 | caif_disconnect_client(&priv->chnl); | ||
289 | return 0; | ||
290 | } | ||
291 | |||
292 | static int chnl_net_init(struct net_device *dev) | ||
293 | { | ||
294 | struct chnl_net *priv; | ||
295 | ASSERT_RTNL(); | ||
296 | priv = netdev_priv(dev); | ||
297 | strncpy(priv->name, dev->name, sizeof(priv->name)); | ||
298 | return 0; | ||
299 | } | ||
300 | |||
301 | static void chnl_net_uninit(struct net_device *dev) | ||
302 | { | ||
303 | struct chnl_net *priv; | ||
304 | ASSERT_RTNL(); | ||
305 | priv = netdev_priv(dev); | ||
306 | robust_list_del(&priv->list_field); | ||
307 | } | ||
308 | |||
309 | static const struct net_device_ops netdev_ops = { | ||
310 | .ndo_open = chnl_net_open, | ||
311 | .ndo_stop = chnl_net_stop, | ||
312 | .ndo_init = chnl_net_init, | ||
313 | .ndo_uninit = chnl_net_uninit, | ||
314 | .ndo_start_xmit = chnl_net_start_xmit, | ||
315 | }; | ||
316 | |||
317 | static void ipcaif_net_setup(struct net_device *dev) | ||
318 | { | ||
319 | struct chnl_net *priv; | ||
320 | dev->netdev_ops = &netdev_ops; | ||
321 | dev->destructor = free_netdev; | ||
322 | dev->flags |= IFF_NOARP; | ||
323 | dev->flags |= IFF_POINTOPOINT; | ||
324 | dev->needed_headroom = CAIF_NEEDED_HEADROOM; | ||
325 | dev->needed_tailroom = CAIF_NEEDED_TAILROOM; | ||
326 | dev->mtu = SIZE_MTU; | ||
327 | dev->tx_queue_len = CAIF_NET_DEFAULT_QUEUE_LEN; | ||
328 | |||
329 | priv = netdev_priv(dev); | ||
330 | priv->chnl.receive = chnl_recv_cb; | ||
331 | priv->chnl.ctrlcmd = chnl_flowctrl_cb; | ||
332 | priv->netdev = dev; | ||
333 | priv->conn_req.protocol = CAIFPROTO_DATAGRAM; | ||
334 | priv->conn_req.link_selector = CAIF_LINK_HIGH_BANDW; | ||
335 | priv->conn_req.priority = CAIF_PRIO_LOW; | ||
336 | /* Insert illegal value */ | ||
337 | priv->conn_req.sockaddr.u.dgm.connection_id = -1; | ||
338 | priv->flowenabled = false; | ||
339 | |||
340 | ASSERT_RTNL(); | ||
341 | init_waitqueue_head(&priv->netmgmt_wq); | ||
342 | list_add(&priv->list_field, &chnl_net_list); | ||
343 | } | ||
344 | |||
345 | |||
346 | static int ipcaif_fill_info(struct sk_buff *skb, const struct net_device *dev) | ||
347 | { | ||
348 | struct chnl_net *priv; | ||
349 | u8 loop; | ||
350 | priv = netdev_priv(dev); | ||
351 | NLA_PUT_U32(skb, IFLA_CAIF_IPV4_CONNID, | ||
352 | priv->conn_req.sockaddr.u.dgm.connection_id); | ||
353 | NLA_PUT_U32(skb, IFLA_CAIF_IPV6_CONNID, | ||
354 | priv->conn_req.sockaddr.u.dgm.connection_id); | ||
355 | loop = priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP; | ||
356 | NLA_PUT_U8(skb, IFLA_CAIF_LOOPBACK, loop); | ||
357 | |||
358 | |||
359 | return 0; | ||
360 | nla_put_failure: | ||
361 | return -EMSGSIZE; | ||
362 | |||
363 | } | ||
364 | |||
365 | static void caif_netlink_parms(struct nlattr *data[], | ||
366 | struct caif_connect_request *conn_req) | ||
367 | { | ||
368 | if (!data) { | ||
369 | pr_warning("CAIF: %s: no params data found\n", __func__); | ||
370 | return; | ||
371 | } | ||
372 | if (data[IFLA_CAIF_IPV4_CONNID]) | ||
373 | conn_req->sockaddr.u.dgm.connection_id = | ||
374 | nla_get_u32(data[IFLA_CAIF_IPV4_CONNID]); | ||
375 | if (data[IFLA_CAIF_IPV6_CONNID]) | ||
376 | conn_req->sockaddr.u.dgm.connection_id = | ||
377 | nla_get_u32(data[IFLA_CAIF_IPV6_CONNID]); | ||
378 | if (data[IFLA_CAIF_LOOPBACK]) { | ||
379 | if (nla_get_u8(data[IFLA_CAIF_LOOPBACK])) | ||
380 | conn_req->protocol = CAIFPROTO_DATAGRAM_LOOP; | ||
381 | else | ||
382 | conn_req->protocol = CAIFPROTO_DATAGRAM; | ||
383 | } | ||
384 | } | ||
385 | |||
386 | static int ipcaif_newlink(struct net *src_net, struct net_device *dev, | ||
387 | struct nlattr *tb[], struct nlattr *data[]) | ||
388 | { | ||
389 | int ret; | ||
390 | struct chnl_net *caifdev; | ||
391 | ASSERT_RTNL(); | ||
392 | caifdev = netdev_priv(dev); | ||
393 | caif_netlink_parms(data, &caifdev->conn_req); | ||
394 | dev_net_set(caifdev->netdev, src_net); | ||
395 | |||
396 | ret = register_netdevice(dev); | ||
397 | if (ret) | ||
398 | pr_warning("CAIF: %s(): device rtml registration failed\n", | ||
399 | __func__); | ||
400 | return ret; | ||
401 | } | ||
402 | |||
403 | static int ipcaif_changelink(struct net_device *dev, struct nlattr *tb[], | ||
404 | struct nlattr *data[]) | ||
405 | { | ||
406 | struct chnl_net *caifdev; | ||
407 | ASSERT_RTNL(); | ||
408 | caifdev = netdev_priv(dev); | ||
409 | caif_netlink_parms(data, &caifdev->conn_req); | ||
410 | netdev_state_change(dev); | ||
411 | return 0; | ||
412 | } | ||
413 | |||
414 | static size_t ipcaif_get_size(const struct net_device *dev) | ||
415 | { | ||
416 | return | ||
417 | /* IFLA_CAIF_IPV4_CONNID */ | ||
418 | nla_total_size(4) + | ||
419 | /* IFLA_CAIF_IPV6_CONNID */ | ||
420 | nla_total_size(4) + | ||
421 | /* IFLA_CAIF_LOOPBACK */ | ||
422 | nla_total_size(2) + | ||
423 | 0; | ||
424 | } | ||
425 | |||
426 | static const struct nla_policy ipcaif_policy[IFLA_CAIF_MAX + 1] = { | ||
427 | [IFLA_CAIF_IPV4_CONNID] = { .type = NLA_U32 }, | ||
428 | [IFLA_CAIF_IPV6_CONNID] = { .type = NLA_U32 }, | ||
429 | [IFLA_CAIF_LOOPBACK] = { .type = NLA_U8 } | ||
430 | }; | ||
431 | |||
432 | |||
433 | static struct rtnl_link_ops ipcaif_link_ops __read_mostly = { | ||
434 | .kind = "caif", | ||
435 | .priv_size = sizeof(struct chnl_net), | ||
436 | .setup = ipcaif_net_setup, | ||
437 | .maxtype = IFLA_CAIF_MAX, | ||
438 | .policy = ipcaif_policy, | ||
439 | .newlink = ipcaif_newlink, | ||
440 | .changelink = ipcaif_changelink, | ||
441 | .get_size = ipcaif_get_size, | ||
442 | .fill_info = ipcaif_fill_info, | ||
443 | |||
444 | }; | ||
445 | |||
446 | static int __init chnl_init_module(void) | ||
447 | { | ||
448 | return rtnl_link_register(&ipcaif_link_ops); | ||
449 | } | ||
450 | |||
451 | static void __exit chnl_exit_module(void) | ||
452 | { | ||
453 | struct chnl_net *dev = NULL; | ||
454 | struct list_head *list_node; | ||
455 | struct list_head *_tmp; | ||
456 | rtnl_link_unregister(&ipcaif_link_ops); | ||
457 | rtnl_lock(); | ||
458 | list_for_each_safe(list_node, _tmp, &chnl_net_list) { | ||
459 | dev = list_entry(list_node, struct chnl_net, list_field); | ||
460 | list_del(list_node); | ||
461 | delete_device(dev); | ||
462 | } | ||
463 | rtnl_unlock(); | ||
464 | } | ||
465 | |||
466 | module_init(chnl_init_module); | ||
467 | module_exit(chnl_exit_module); | ||
diff --git a/net/can/bcm.c b/net/can/bcm.c index e32af52238a2..907dc871fac8 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c | |||
@@ -56,6 +56,7 @@ | |||
56 | #include <linux/can.h> | 56 | #include <linux/can.h> |
57 | #include <linux/can/core.h> | 57 | #include <linux/can/core.h> |
58 | #include <linux/can/bcm.h> | 58 | #include <linux/can/bcm.h> |
59 | #include <linux/slab.h> | ||
59 | #include <net/sock.h> | 60 | #include <net/sock.h> |
60 | #include <net/net_namespace.h> | 61 | #include <net/net_namespace.h> |
61 | 62 | ||
@@ -1478,6 +1479,9 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len, | |||
1478 | struct sock *sk = sock->sk; | 1479 | struct sock *sk = sock->sk; |
1479 | struct bcm_sock *bo = bcm_sk(sk); | 1480 | struct bcm_sock *bo = bcm_sk(sk); |
1480 | 1481 | ||
1482 | if (len < sizeof(*addr)) | ||
1483 | return -EINVAL; | ||
1484 | |||
1481 | if (bo->bound) | 1485 | if (bo->bound) |
1482 | return -EISCONN; | 1486 | return -EISCONN; |
1483 | 1487 | ||
diff --git a/net/can/raw.c b/net/can/raw.c index abca920440b5..da99cf153b33 100644 --- a/net/can/raw.c +++ b/net/can/raw.c | |||
@@ -45,6 +45,7 @@ | |||
45 | #include <linux/init.h> | 45 | #include <linux/init.h> |
46 | #include <linux/uio.h> | 46 | #include <linux/uio.h> |
47 | #include <linux/net.h> | 47 | #include <linux/net.h> |
48 | #include <linux/slab.h> | ||
48 | #include <linux/netdevice.h> | 49 | #include <linux/netdevice.h> |
49 | #include <linux/socket.h> | 50 | #include <linux/socket.h> |
50 | #include <linux/if_arp.h> | 51 | #include <linux/if_arp.h> |
@@ -444,7 +445,7 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, | |||
444 | return -EFAULT; | 445 | return -EFAULT; |
445 | } | 446 | } |
446 | } else if (count == 1) { | 447 | } else if (count == 1) { |
447 | if (copy_from_user(&sfilter, optval, optlen)) | 448 | if (copy_from_user(&sfilter, optval, sizeof(sfilter))) |
448 | return -EFAULT; | 449 | return -EFAULT; |
449 | } | 450 | } |
450 | 451 | ||
diff --git a/net/compat.c b/net/compat.c index a1fb1b079a82..ec24d9edb025 100644 --- a/net/compat.c +++ b/net/compat.c | |||
@@ -12,6 +12,7 @@ | |||
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
15 | #include <linux/gfp.h> | ||
15 | #include <linux/fs.h> | 16 | #include <linux/fs.h> |
16 | #include <linux/types.h> | 17 | #include <linux/types.h> |
17 | #include <linux/file.h> | 18 | #include <linux/file.h> |
diff --git a/net/core/Makefile b/net/core/Makefile index 08791ac3e05a..51c3eec850ef 100644 --- a/net/core/Makefile +++ b/net/core/Makefile | |||
@@ -7,7 +7,7 @@ obj-y := sock.o request_sock.o skbuff.o iovec.o datagram.o stream.o scm.o \ | |||
7 | 7 | ||
8 | obj-$(CONFIG_SYSCTL) += sysctl_net_core.o | 8 | obj-$(CONFIG_SYSCTL) += sysctl_net_core.o |
9 | 9 | ||
10 | obj-y += dev.o ethtool.o dev_mcast.o dst.o netevent.o \ | 10 | obj-y += dev.o ethtool.o dev_addr_lists.o dst.o netevent.o \ |
11 | neighbour.o rtnetlink.o utils.o link_watch.o filter.o | 11 | neighbour.o rtnetlink.o utils.o link_watch.o filter.o |
12 | 12 | ||
13 | obj-$(CONFIG_XFRM) += flow.o | 13 | obj-$(CONFIG_XFRM) += flow.o |
diff --git a/net/core/datagram.c b/net/core/datagram.c index 95c2e0840d0d..e0097531417a 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c | |||
@@ -48,6 +48,7 @@ | |||
48 | #include <linux/poll.h> | 48 | #include <linux/poll.h> |
49 | #include <linux/highmem.h> | 49 | #include <linux/highmem.h> |
50 | #include <linux/spinlock.h> | 50 | #include <linux/spinlock.h> |
51 | #include <linux/slab.h> | ||
51 | 52 | ||
52 | #include <net/protocol.h> | 53 | #include <net/protocol.h> |
53 | #include <linux/skbuff.h> | 54 | #include <linux/skbuff.h> |
@@ -85,7 +86,7 @@ static int wait_for_packet(struct sock *sk, int *err, long *timeo_p) | |||
85 | int error; | 86 | int error; |
86 | DEFINE_WAIT_FUNC(wait, receiver_wake_function); | 87 | DEFINE_WAIT_FUNC(wait, receiver_wake_function); |
87 | 88 | ||
88 | prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 89 | prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
89 | 90 | ||
90 | /* Socket errors? */ | 91 | /* Socket errors? */ |
91 | error = sock_error(sk); | 92 | error = sock_error(sk); |
@@ -114,7 +115,7 @@ static int wait_for_packet(struct sock *sk, int *err, long *timeo_p) | |||
114 | error = 0; | 115 | error = 0; |
115 | *timeo_p = schedule_timeout(*timeo_p); | 116 | *timeo_p = schedule_timeout(*timeo_p); |
116 | out: | 117 | out: |
117 | finish_wait(sk->sk_sleep, &wait); | 118 | finish_wait(sk_sleep(sk), &wait); |
118 | return error; | 119 | return error; |
119 | interrupted: | 120 | interrupted: |
120 | error = sock_intr_errno(*timeo_p); | 121 | error = sock_intr_errno(*timeo_p); |
@@ -228,9 +229,18 @@ EXPORT_SYMBOL(skb_free_datagram); | |||
228 | 229 | ||
229 | void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb) | 230 | void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb) |
230 | { | 231 | { |
231 | lock_sock(sk); | 232 | if (likely(atomic_read(&skb->users) == 1)) |
232 | skb_free_datagram(sk, skb); | 233 | smp_rmb(); |
233 | release_sock(sk); | 234 | else if (likely(!atomic_dec_and_test(&skb->users))) |
235 | return; | ||
236 | |||
237 | lock_sock_bh(sk); | ||
238 | skb_orphan(skb); | ||
239 | sk_mem_reclaim_partial(sk); | ||
240 | unlock_sock_bh(sk); | ||
241 | |||
242 | /* skb is now orphaned, can be freed outside of locked section */ | ||
243 | __kfree_skb(skb); | ||
234 | } | 244 | } |
235 | EXPORT_SYMBOL(skb_free_datagram_locked); | 245 | EXPORT_SYMBOL(skb_free_datagram_locked); |
236 | 246 | ||
@@ -725,7 +735,7 @@ unsigned int datagram_poll(struct file *file, struct socket *sock, | |||
725 | struct sock *sk = sock->sk; | 735 | struct sock *sk = sock->sk; |
726 | unsigned int mask; | 736 | unsigned int mask; |
727 | 737 | ||
728 | sock_poll_wait(file, sk->sk_sleep, wait); | 738 | sock_poll_wait(file, sk_sleep(sk), wait); |
729 | mask = 0; | 739 | mask = 0; |
730 | 740 | ||
731 | /* exceptional events? */ | 741 | /* exceptional events? */ |
diff --git a/net/core/dev.c b/net/core/dev.c index bcc490cc9452..36d53be4fca6 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -80,6 +80,7 @@ | |||
80 | #include <linux/types.h> | 80 | #include <linux/types.h> |
81 | #include <linux/kernel.h> | 81 | #include <linux/kernel.h> |
82 | #include <linux/hash.h> | 82 | #include <linux/hash.h> |
83 | #include <linux/slab.h> | ||
83 | #include <linux/sched.h> | 84 | #include <linux/sched.h> |
84 | #include <linux/mutex.h> | 85 | #include <linux/mutex.h> |
85 | #include <linux/string.h> | 86 | #include <linux/string.h> |
@@ -129,6 +130,7 @@ | |||
129 | #include <linux/jhash.h> | 130 | #include <linux/jhash.h> |
130 | #include <linux/random.h> | 131 | #include <linux/random.h> |
131 | #include <trace/events/napi.h> | 132 | #include <trace/events/napi.h> |
133 | #include <linux/pci.h> | ||
132 | 134 | ||
133 | #include "net-sysfs.h" | 135 | #include "net-sysfs.h" |
134 | 136 | ||
@@ -206,6 +208,20 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) | |||
206 | return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)]; | 208 | return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)]; |
207 | } | 209 | } |
208 | 210 | ||
211 | static inline void rps_lock(struct softnet_data *sd) | ||
212 | { | ||
213 | #ifdef CONFIG_RPS | ||
214 | spin_lock(&sd->input_pkt_queue.lock); | ||
215 | #endif | ||
216 | } | ||
217 | |||
218 | static inline void rps_unlock(struct softnet_data *sd) | ||
219 | { | ||
220 | #ifdef CONFIG_RPS | ||
221 | spin_unlock(&sd->input_pkt_queue.lock); | ||
222 | #endif | ||
223 | } | ||
224 | |||
209 | /* Device list insertion */ | 225 | /* Device list insertion */ |
210 | static int list_netdevice(struct net_device *dev) | 226 | static int list_netdevice(struct net_device *dev) |
211 | { | 227 | { |
@@ -248,7 +264,7 @@ static RAW_NOTIFIER_HEAD(netdev_chain); | |||
248 | * queue in the local softnet handler. | 264 | * queue in the local softnet handler. |
249 | */ | 265 | */ |
250 | 266 | ||
251 | DEFINE_PER_CPU(struct softnet_data, softnet_data); | 267 | DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); |
252 | EXPORT_PER_CPU_SYMBOL(softnet_data); | 268 | EXPORT_PER_CPU_SYMBOL(softnet_data); |
253 | 269 | ||
254 | #ifdef CONFIG_LOCKDEP | 270 | #ifdef CONFIG_LOCKDEP |
@@ -772,14 +788,17 @@ EXPORT_SYMBOL(__dev_getfirstbyhwtype); | |||
772 | 788 | ||
773 | struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) | 789 | struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) |
774 | { | 790 | { |
775 | struct net_device *dev; | 791 | struct net_device *dev, *ret = NULL; |
776 | 792 | ||
777 | rtnl_lock(); | 793 | rcu_read_lock(); |
778 | dev = __dev_getfirstbyhwtype(net, type); | 794 | for_each_netdev_rcu(net, dev) |
779 | if (dev) | 795 | if (dev->type == type) { |
780 | dev_hold(dev); | 796 | dev_hold(dev); |
781 | rtnl_unlock(); | 797 | ret = dev; |
782 | return dev; | 798 | break; |
799 | } | ||
800 | rcu_read_unlock(); | ||
801 | return ret; | ||
783 | } | 802 | } |
784 | EXPORT_SYMBOL(dev_getfirstbyhwtype); | 803 | EXPORT_SYMBOL(dev_getfirstbyhwtype); |
785 | 804 | ||
@@ -1084,9 +1103,9 @@ void netdev_state_change(struct net_device *dev) | |||
1084 | } | 1103 | } |
1085 | EXPORT_SYMBOL(netdev_state_change); | 1104 | EXPORT_SYMBOL(netdev_state_change); |
1086 | 1105 | ||
1087 | void netdev_bonding_change(struct net_device *dev, unsigned long event) | 1106 | int netdev_bonding_change(struct net_device *dev, unsigned long event) |
1088 | { | 1107 | { |
1089 | call_netdevice_notifiers(event, dev); | 1108 | return call_netdevice_notifiers(event, dev); |
1090 | } | 1109 | } |
1091 | EXPORT_SYMBOL(netdev_bonding_change); | 1110 | EXPORT_SYMBOL(netdev_bonding_change); |
1092 | 1111 | ||
@@ -1416,6 +1435,7 @@ EXPORT_SYMBOL(unregister_netdevice_notifier); | |||
1416 | 1435 | ||
1417 | int call_netdevice_notifiers(unsigned long val, struct net_device *dev) | 1436 | int call_netdevice_notifiers(unsigned long val, struct net_device *dev) |
1418 | { | 1437 | { |
1438 | ASSERT_RTNL(); | ||
1419 | return raw_notifier_call_chain(&netdev_chain, val, dev); | 1439 | return raw_notifier_call_chain(&netdev_chain, val, dev); |
1420 | } | 1440 | } |
1421 | 1441 | ||
@@ -1537,8 +1557,9 @@ static inline void __netif_reschedule(struct Qdisc *q) | |||
1537 | 1557 | ||
1538 | local_irq_save(flags); | 1558 | local_irq_save(flags); |
1539 | sd = &__get_cpu_var(softnet_data); | 1559 | sd = &__get_cpu_var(softnet_data); |
1540 | q->next_sched = sd->output_queue; | 1560 | q->next_sched = NULL; |
1541 | sd->output_queue = q; | 1561 | *sd->output_queue_tailp = q; |
1562 | sd->output_queue_tailp = &q->next_sched; | ||
1542 | raise_softirq_irqoff(NET_TX_SOFTIRQ); | 1563 | raise_softirq_irqoff(NET_TX_SOFTIRQ); |
1543 | local_irq_restore(flags); | 1564 | local_irq_restore(flags); |
1544 | } | 1565 | } |
@@ -1783,18 +1804,27 @@ EXPORT_SYMBOL(netdev_rx_csum_fault); | |||
1783 | * 2. No high memory really exists on this machine. | 1804 | * 2. No high memory really exists on this machine. |
1784 | */ | 1805 | */ |
1785 | 1806 | ||
1786 | static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb) | 1807 | static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) |
1787 | { | 1808 | { |
1788 | #ifdef CONFIG_HIGHMEM | 1809 | #ifdef CONFIG_HIGHMEM |
1789 | int i; | 1810 | int i; |
1811 | if (!(dev->features & NETIF_F_HIGHDMA)) { | ||
1812 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) | ||
1813 | if (PageHighMem(skb_shinfo(skb)->frags[i].page)) | ||
1814 | return 1; | ||
1815 | } | ||
1790 | 1816 | ||
1791 | if (dev->features & NETIF_F_HIGHDMA) | 1817 | if (PCI_DMA_BUS_IS_PHYS) { |
1792 | return 0; | 1818 | struct device *pdev = dev->dev.parent; |
1793 | |||
1794 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) | ||
1795 | if (PageHighMem(skb_shinfo(skb)->frags[i].page)) | ||
1796 | return 1; | ||
1797 | 1819 | ||
1820 | if (!pdev) | ||
1821 | return 0; | ||
1822 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | ||
1823 | dma_addr_t addr = page_to_phys(skb_shinfo(skb)->frags[i].page); | ||
1824 | if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask) | ||
1825 | return 1; | ||
1826 | } | ||
1827 | } | ||
1798 | #endif | 1828 | #endif |
1799 | return 0; | 1829 | return 0; |
1800 | } | 1830 | } |
@@ -1852,6 +1882,17 @@ static int dev_gso_segment(struct sk_buff *skb) | |||
1852 | return 0; | 1882 | return 0; |
1853 | } | 1883 | } |
1854 | 1884 | ||
1885 | /* | ||
1886 | * Try to orphan skb early, right before transmission by the device. | ||
1887 | * We cannot orphan skb if tx timestamp is requested, since | ||
1888 | * drivers need to call skb_tstamp_tx() to send the timestamp. | ||
1889 | */ | ||
1890 | static inline void skb_orphan_try(struct sk_buff *skb) | ||
1891 | { | ||
1892 | if (!skb_tx(skb)->flags) | ||
1893 | skb_orphan(skb); | ||
1894 | } | ||
1895 | |||
1855 | int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, | 1896 | int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, |
1856 | struct netdev_queue *txq) | 1897 | struct netdev_queue *txq) |
1857 | { | 1898 | { |
@@ -1862,13 +1903,6 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, | |||
1862 | if (!list_empty(&ptype_all)) | 1903 | if (!list_empty(&ptype_all)) |
1863 | dev_queue_xmit_nit(skb, dev); | 1904 | dev_queue_xmit_nit(skb, dev); |
1864 | 1905 | ||
1865 | if (netif_needs_gso(dev, skb)) { | ||
1866 | if (unlikely(dev_gso_segment(skb))) | ||
1867 | goto out_kfree_skb; | ||
1868 | if (skb->next) | ||
1869 | goto gso; | ||
1870 | } | ||
1871 | |||
1872 | /* | 1906 | /* |
1873 | * If device doesnt need skb->dst, release it right now while | 1907 | * If device doesnt need skb->dst, release it right now while |
1874 | * its hot in this cpu cache | 1908 | * its hot in this cpu cache |
@@ -1876,23 +1910,18 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, | |||
1876 | if (dev->priv_flags & IFF_XMIT_DST_RELEASE) | 1910 | if (dev->priv_flags & IFF_XMIT_DST_RELEASE) |
1877 | skb_dst_drop(skb); | 1911 | skb_dst_drop(skb); |
1878 | 1912 | ||
1913 | skb_orphan_try(skb); | ||
1914 | |||
1915 | if (netif_needs_gso(dev, skb)) { | ||
1916 | if (unlikely(dev_gso_segment(skb))) | ||
1917 | goto out_kfree_skb; | ||
1918 | if (skb->next) | ||
1919 | goto gso; | ||
1920 | } | ||
1921 | |||
1879 | rc = ops->ndo_start_xmit(skb, dev); | 1922 | rc = ops->ndo_start_xmit(skb, dev); |
1880 | if (rc == NETDEV_TX_OK) | 1923 | if (rc == NETDEV_TX_OK) |
1881 | txq_trans_update(txq); | 1924 | txq_trans_update(txq); |
1882 | /* | ||
1883 | * TODO: if skb_orphan() was called by | ||
1884 | * dev->hard_start_xmit() (for example, the unmodified | ||
1885 | * igb driver does that; bnx2 doesn't), then | ||
1886 | * skb_tx_software_timestamp() will be unable to send | ||
1887 | * back the time stamp. | ||
1888 | * | ||
1889 | * How can this be prevented? Always create another | ||
1890 | * reference to the socket before calling | ||
1891 | * dev->hard_start_xmit()? Prevent that skb_orphan() | ||
1892 | * does anything in dev->hard_start_xmit() by clearing | ||
1893 | * the skb destructor before the call and restoring it | ||
1894 | * afterwards, then doing the skb_orphan() ourselves? | ||
1895 | */ | ||
1896 | return rc; | 1925 | return rc; |
1897 | } | 1926 | } |
1898 | 1927 | ||
@@ -1931,7 +1960,7 @@ out_kfree_skb: | |||
1931 | return rc; | 1960 | return rc; |
1932 | } | 1961 | } |
1933 | 1962 | ||
1934 | static u32 skb_tx_hashrnd; | 1963 | static u32 hashrnd __read_mostly; |
1935 | 1964 | ||
1936 | u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb) | 1965 | u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb) |
1937 | { | 1966 | { |
@@ -1947,9 +1976,9 @@ u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb) | |||
1947 | if (skb->sk && skb->sk->sk_hash) | 1976 | if (skb->sk && skb->sk->sk_hash) |
1948 | hash = skb->sk->sk_hash; | 1977 | hash = skb->sk->sk_hash; |
1949 | else | 1978 | else |
1950 | hash = skb->protocol; | 1979 | hash = (__force u16) skb->protocol; |
1951 | 1980 | ||
1952 | hash = jhash_1word(hash, skb_tx_hashrnd); | 1981 | hash = jhash_1word(hash, hashrnd); |
1953 | 1982 | ||
1954 | return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32); | 1983 | return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32); |
1955 | } | 1984 | } |
@@ -1959,10 +1988,9 @@ static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index) | |||
1959 | { | 1988 | { |
1960 | if (unlikely(queue_index >= dev->real_num_tx_queues)) { | 1989 | if (unlikely(queue_index >= dev->real_num_tx_queues)) { |
1961 | if (net_ratelimit()) { | 1990 | if (net_ratelimit()) { |
1962 | WARN(1, "%s selects TX queue %d, but " | 1991 | pr_warning("%s selects TX queue %d, but " |
1963 | "real number of TX queues is %d\n", | 1992 | "real number of TX queues is %d\n", |
1964 | dev->name, queue_index, | 1993 | dev->name, queue_index, dev->real_num_tx_queues); |
1965 | dev->real_num_tx_queues); | ||
1966 | } | 1994 | } |
1967 | return 0; | 1995 | return 0; |
1968 | } | 1996 | } |
@@ -1988,8 +2016,12 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev, | |||
1988 | if (dev->real_num_tx_queues > 1) | 2016 | if (dev->real_num_tx_queues > 1) |
1989 | queue_index = skb_tx_hash(dev, skb); | 2017 | queue_index = skb_tx_hash(dev, skb); |
1990 | 2018 | ||
1991 | if (sk && sk->sk_dst_cache) | 2019 | if (sk) { |
1992 | sk_tx_queue_set(sk, queue_index); | 2020 | struct dst_entry *dst = rcu_dereference_check(sk->sk_dst_cache, 1); |
2021 | |||
2022 | if (dst && skb_dst(skb) == dst) | ||
2023 | sk_tx_queue_set(sk, queue_index); | ||
2024 | } | ||
1993 | } | 2025 | } |
1994 | } | 2026 | } |
1995 | 2027 | ||
@@ -2173,8 +2205,235 @@ int netdev_max_backlog __read_mostly = 1000; | |||
2173 | int netdev_budget __read_mostly = 300; | 2205 | int netdev_budget __read_mostly = 300; |
2174 | int weight_p __read_mostly = 64; /* old backlog weight */ | 2206 | int weight_p __read_mostly = 64; /* old backlog weight */ |
2175 | 2207 | ||
2176 | DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, }; | 2208 | #ifdef CONFIG_RPS |
2209 | |||
2210 | /* One global table that all flow-based protocols share. */ | ||
2211 | struct rps_sock_flow_table *rps_sock_flow_table __read_mostly; | ||
2212 | EXPORT_SYMBOL(rps_sock_flow_table); | ||
2213 | |||
2214 | /* | ||
2215 | * get_rps_cpu is called from netif_receive_skb and returns the target | ||
2216 | * CPU from the RPS map of the receiving queue for a given skb. | ||
2217 | * rcu_read_lock must be held on entry. | ||
2218 | */ | ||
2219 | static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, | ||
2220 | struct rps_dev_flow **rflowp) | ||
2221 | { | ||
2222 | struct ipv6hdr *ip6; | ||
2223 | struct iphdr *ip; | ||
2224 | struct netdev_rx_queue *rxqueue; | ||
2225 | struct rps_map *map; | ||
2226 | struct rps_dev_flow_table *flow_table; | ||
2227 | struct rps_sock_flow_table *sock_flow_table; | ||
2228 | int cpu = -1; | ||
2229 | u8 ip_proto; | ||
2230 | u16 tcpu; | ||
2231 | u32 addr1, addr2, ihl; | ||
2232 | union { | ||
2233 | u32 v32; | ||
2234 | u16 v16[2]; | ||
2235 | } ports; | ||
2236 | |||
2237 | if (skb_rx_queue_recorded(skb)) { | ||
2238 | u16 index = skb_get_rx_queue(skb); | ||
2239 | if (unlikely(index >= dev->num_rx_queues)) { | ||
2240 | if (net_ratelimit()) { | ||
2241 | pr_warning("%s received packet on queue " | ||
2242 | "%u, but number of RX queues is %u\n", | ||
2243 | dev->name, index, dev->num_rx_queues); | ||
2244 | } | ||
2245 | goto done; | ||
2246 | } | ||
2247 | rxqueue = dev->_rx + index; | ||
2248 | } else | ||
2249 | rxqueue = dev->_rx; | ||
2250 | |||
2251 | if (!rxqueue->rps_map && !rxqueue->rps_flow_table) | ||
2252 | goto done; | ||
2253 | |||
2254 | if (skb->rxhash) | ||
2255 | goto got_hash; /* Skip hash computation on packet header */ | ||
2256 | |||
2257 | switch (skb->protocol) { | ||
2258 | case __constant_htons(ETH_P_IP): | ||
2259 | if (!pskb_may_pull(skb, sizeof(*ip))) | ||
2260 | goto done; | ||
2261 | |||
2262 | ip = (struct iphdr *) skb->data; | ||
2263 | ip_proto = ip->protocol; | ||
2264 | addr1 = (__force u32) ip->saddr; | ||
2265 | addr2 = (__force u32) ip->daddr; | ||
2266 | ihl = ip->ihl; | ||
2267 | break; | ||
2268 | case __constant_htons(ETH_P_IPV6): | ||
2269 | if (!pskb_may_pull(skb, sizeof(*ip6))) | ||
2270 | goto done; | ||
2271 | |||
2272 | ip6 = (struct ipv6hdr *) skb->data; | ||
2273 | ip_proto = ip6->nexthdr; | ||
2274 | addr1 = (__force u32) ip6->saddr.s6_addr32[3]; | ||
2275 | addr2 = (__force u32) ip6->daddr.s6_addr32[3]; | ||
2276 | ihl = (40 >> 2); | ||
2277 | break; | ||
2278 | default: | ||
2279 | goto done; | ||
2280 | } | ||
2281 | switch (ip_proto) { | ||
2282 | case IPPROTO_TCP: | ||
2283 | case IPPROTO_UDP: | ||
2284 | case IPPROTO_DCCP: | ||
2285 | case IPPROTO_ESP: | ||
2286 | case IPPROTO_AH: | ||
2287 | case IPPROTO_SCTP: | ||
2288 | case IPPROTO_UDPLITE: | ||
2289 | if (pskb_may_pull(skb, (ihl * 4) + 4)) { | ||
2290 | ports.v32 = * (__force u32 *) (skb->data + (ihl * 4)); | ||
2291 | if (ports.v16[1] < ports.v16[0]) | ||
2292 | swap(ports.v16[0], ports.v16[1]); | ||
2293 | break; | ||
2294 | } | ||
2295 | default: | ||
2296 | ports.v32 = 0; | ||
2297 | break; | ||
2298 | } | ||
2299 | |||
2300 | /* get a consistent hash (same value on both flow directions) */ | ||
2301 | if (addr2 < addr1) | ||
2302 | swap(addr1, addr2); | ||
2303 | skb->rxhash = jhash_3words(addr1, addr2, ports.v32, hashrnd); | ||
2304 | if (!skb->rxhash) | ||
2305 | skb->rxhash = 1; | ||
2306 | |||
2307 | got_hash: | ||
2308 | flow_table = rcu_dereference(rxqueue->rps_flow_table); | ||
2309 | sock_flow_table = rcu_dereference(rps_sock_flow_table); | ||
2310 | if (flow_table && sock_flow_table) { | ||
2311 | u16 next_cpu; | ||
2312 | struct rps_dev_flow *rflow; | ||
2313 | |||
2314 | rflow = &flow_table->flows[skb->rxhash & flow_table->mask]; | ||
2315 | tcpu = rflow->cpu; | ||
2177 | 2316 | ||
2317 | next_cpu = sock_flow_table->ents[skb->rxhash & | ||
2318 | sock_flow_table->mask]; | ||
2319 | |||
2320 | /* | ||
2321 | * If the desired CPU (where last recvmsg was done) is | ||
2322 | * different from current CPU (one in the rx-queue flow | ||
2323 | * table entry), switch if one of the following holds: | ||
2324 | * - Current CPU is unset (equal to RPS_NO_CPU). | ||
2325 | * - Current CPU is offline. | ||
2326 | * - The current CPU's queue tail has advanced beyond the | ||
2327 | * last packet that was enqueued using this table entry. | ||
2328 | * This guarantees that all previous packets for the flow | ||
2329 | * have been dequeued, thus preserving in order delivery. | ||
2330 | */ | ||
2331 | if (unlikely(tcpu != next_cpu) && | ||
2332 | (tcpu == RPS_NO_CPU || !cpu_online(tcpu) || | ||
2333 | ((int)(per_cpu(softnet_data, tcpu).input_queue_head - | ||
2334 | rflow->last_qtail)) >= 0)) { | ||
2335 | tcpu = rflow->cpu = next_cpu; | ||
2336 | if (tcpu != RPS_NO_CPU) | ||
2337 | rflow->last_qtail = per_cpu(softnet_data, | ||
2338 | tcpu).input_queue_head; | ||
2339 | } | ||
2340 | if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) { | ||
2341 | *rflowp = rflow; | ||
2342 | cpu = tcpu; | ||
2343 | goto done; | ||
2344 | } | ||
2345 | } | ||
2346 | |||
2347 | map = rcu_dereference(rxqueue->rps_map); | ||
2348 | if (map) { | ||
2349 | tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32]; | ||
2350 | |||
2351 | if (cpu_online(tcpu)) { | ||
2352 | cpu = tcpu; | ||
2353 | goto done; | ||
2354 | } | ||
2355 | } | ||
2356 | |||
2357 | done: | ||
2358 | return cpu; | ||
2359 | } | ||
2360 | |||
2361 | /* Called from hardirq (IPI) context */ | ||
2362 | static void rps_trigger_softirq(void *data) | ||
2363 | { | ||
2364 | struct softnet_data *sd = data; | ||
2365 | |||
2366 | __napi_schedule(&sd->backlog); | ||
2367 | sd->received_rps++; | ||
2368 | } | ||
2369 | |||
2370 | #endif /* CONFIG_RPS */ | ||
2371 | |||
2372 | /* | ||
2373 | * Check if this softnet_data structure is another cpu one | ||
2374 | * If yes, queue it to our IPI list and return 1 | ||
2375 | * If no, return 0 | ||
2376 | */ | ||
2377 | static int rps_ipi_queued(struct softnet_data *sd) | ||
2378 | { | ||
2379 | #ifdef CONFIG_RPS | ||
2380 | struct softnet_data *mysd = &__get_cpu_var(softnet_data); | ||
2381 | |||
2382 | if (sd != mysd) { | ||
2383 | sd->rps_ipi_next = mysd->rps_ipi_list; | ||
2384 | mysd->rps_ipi_list = sd; | ||
2385 | |||
2386 | __raise_softirq_irqoff(NET_RX_SOFTIRQ); | ||
2387 | return 1; | ||
2388 | } | ||
2389 | #endif /* CONFIG_RPS */ | ||
2390 | return 0; | ||
2391 | } | ||
2392 | |||
2393 | /* | ||
2394 | * enqueue_to_backlog is called to queue an skb to a per CPU backlog | ||
2395 | * queue (may be a remote CPU queue). | ||
2396 | */ | ||
2397 | static int enqueue_to_backlog(struct sk_buff *skb, int cpu, | ||
2398 | unsigned int *qtail) | ||
2399 | { | ||
2400 | struct softnet_data *sd; | ||
2401 | unsigned long flags; | ||
2402 | |||
2403 | sd = &per_cpu(softnet_data, cpu); | ||
2404 | |||
2405 | local_irq_save(flags); | ||
2406 | |||
2407 | rps_lock(sd); | ||
2408 | if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) { | ||
2409 | if (skb_queue_len(&sd->input_pkt_queue)) { | ||
2410 | enqueue: | ||
2411 | __skb_queue_tail(&sd->input_pkt_queue, skb); | ||
2412 | #ifdef CONFIG_RPS | ||
2413 | *qtail = sd->input_queue_head + | ||
2414 | skb_queue_len(&sd->input_pkt_queue); | ||
2415 | #endif | ||
2416 | rps_unlock(sd); | ||
2417 | local_irq_restore(flags); | ||
2418 | return NET_RX_SUCCESS; | ||
2419 | } | ||
2420 | |||
2421 | /* Schedule NAPI for backlog device */ | ||
2422 | if (napi_schedule_prep(&sd->backlog)) { | ||
2423 | if (!rps_ipi_queued(sd)) | ||
2424 | __napi_schedule(&sd->backlog); | ||
2425 | } | ||
2426 | goto enqueue; | ||
2427 | } | ||
2428 | |||
2429 | sd->dropped++; | ||
2430 | rps_unlock(sd); | ||
2431 | |||
2432 | local_irq_restore(flags); | ||
2433 | |||
2434 | kfree_skb(skb); | ||
2435 | return NET_RX_DROP; | ||
2436 | } | ||
2178 | 2437 | ||
2179 | /** | 2438 | /** |
2180 | * netif_rx - post buffer to the network code | 2439 | * netif_rx - post buffer to the network code |
@@ -2193,8 +2452,7 @@ DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, }; | |||
2193 | 2452 | ||
2194 | int netif_rx(struct sk_buff *skb) | 2453 | int netif_rx(struct sk_buff *skb) |
2195 | { | 2454 | { |
2196 | struct softnet_data *queue; | 2455 | int ret; |
2197 | unsigned long flags; | ||
2198 | 2456 | ||
2199 | /* if netpoll wants it, pretend we never saw it */ | 2457 | /* if netpoll wants it, pretend we never saw it */ |
2200 | if (netpoll_rx(skb)) | 2458 | if (netpoll_rx(skb)) |
@@ -2203,31 +2461,29 @@ int netif_rx(struct sk_buff *skb) | |||
2203 | if (!skb->tstamp.tv64) | 2461 | if (!skb->tstamp.tv64) |
2204 | net_timestamp(skb); | 2462 | net_timestamp(skb); |
2205 | 2463 | ||
2206 | /* | 2464 | #ifdef CONFIG_RPS |
2207 | * The code is rearranged so that the path is the most | 2465 | { |
2208 | * short when CPU is congested, but is still operating. | 2466 | struct rps_dev_flow voidflow, *rflow = &voidflow; |
2209 | */ | 2467 | int cpu; |
2210 | local_irq_save(flags); | ||
2211 | queue = &__get_cpu_var(softnet_data); | ||
2212 | 2468 | ||
2213 | __get_cpu_var(netdev_rx_stat).total++; | 2469 | rcu_read_lock(); |
2214 | if (queue->input_pkt_queue.qlen <= netdev_max_backlog) { | ||
2215 | if (queue->input_pkt_queue.qlen) { | ||
2216 | enqueue: | ||
2217 | __skb_queue_tail(&queue->input_pkt_queue, skb); | ||
2218 | local_irq_restore(flags); | ||
2219 | return NET_RX_SUCCESS; | ||
2220 | } | ||
2221 | 2470 | ||
2222 | napi_schedule(&queue->backlog); | 2471 | cpu = get_rps_cpu(skb->dev, skb, &rflow); |
2223 | goto enqueue; | 2472 | if (cpu < 0) |
2224 | } | 2473 | cpu = smp_processor_id(); |
2225 | 2474 | ||
2226 | __get_cpu_var(netdev_rx_stat).dropped++; | 2475 | ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); |
2227 | local_irq_restore(flags); | ||
2228 | 2476 | ||
2229 | kfree_skb(skb); | 2477 | rcu_read_unlock(); |
2230 | return NET_RX_DROP; | 2478 | } |
2479 | #else | ||
2480 | { | ||
2481 | unsigned int qtail; | ||
2482 | ret = enqueue_to_backlog(skb, get_cpu(), &qtail); | ||
2483 | put_cpu(); | ||
2484 | } | ||
2485 | #endif | ||
2486 | return ret; | ||
2231 | } | 2487 | } |
2232 | EXPORT_SYMBOL(netif_rx); | 2488 | EXPORT_SYMBOL(netif_rx); |
2233 | 2489 | ||
@@ -2272,6 +2528,7 @@ static void net_tx_action(struct softirq_action *h) | |||
2272 | local_irq_disable(); | 2528 | local_irq_disable(); |
2273 | head = sd->output_queue; | 2529 | head = sd->output_queue; |
2274 | sd->output_queue = NULL; | 2530 | sd->output_queue = NULL; |
2531 | sd->output_queue_tailp = &sd->output_queue; | ||
2275 | local_irq_enable(); | 2532 | local_irq_enable(); |
2276 | 2533 | ||
2277 | while (head) { | 2534 | while (head) { |
@@ -2464,25 +2721,60 @@ void netif_nit_deliver(struct sk_buff *skb) | |||
2464 | rcu_read_unlock(); | 2721 | rcu_read_unlock(); |
2465 | } | 2722 | } |
2466 | 2723 | ||
2467 | /** | 2724 | static inline void skb_bond_set_mac_by_master(struct sk_buff *skb, |
2468 | * netif_receive_skb - process receive buffer from network | 2725 | struct net_device *master) |
2469 | * @skb: buffer to process | 2726 | { |
2470 | * | 2727 | if (skb->pkt_type == PACKET_HOST) { |
2471 | * netif_receive_skb() is the main receive data processing function. | 2728 | u16 *dest = (u16 *) eth_hdr(skb)->h_dest; |
2472 | * It always succeeds. The buffer may be dropped during processing | 2729 | |
2473 | * for congestion control or by the protocol layers. | 2730 | memcpy(dest, master->dev_addr, ETH_ALEN); |
2474 | * | 2731 | } |
2475 | * This function may only be called from softirq context and interrupts | 2732 | } |
2476 | * should be enabled. | 2733 | |
2477 | * | 2734 | /* On bonding slaves other than the currently active slave, suppress |
2478 | * Return values (usually ignored): | 2735 | * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and |
2479 | * NET_RX_SUCCESS: no congestion | 2736 | * ARP on active-backup slaves with arp_validate enabled. |
2480 | * NET_RX_DROP: packet was dropped | ||
2481 | */ | 2737 | */ |
2482 | int netif_receive_skb(struct sk_buff *skb) | 2738 | int __skb_bond_should_drop(struct sk_buff *skb, struct net_device *master) |
2739 | { | ||
2740 | struct net_device *dev = skb->dev; | ||
2741 | |||
2742 | if (master->priv_flags & IFF_MASTER_ARPMON) | ||
2743 | dev->last_rx = jiffies; | ||
2744 | |||
2745 | if ((master->priv_flags & IFF_MASTER_ALB) && master->br_port) { | ||
2746 | /* Do address unmangle. The local destination address | ||
2747 | * will be always the one master has. Provides the right | ||
2748 | * functionality in a bridge. | ||
2749 | */ | ||
2750 | skb_bond_set_mac_by_master(skb, master); | ||
2751 | } | ||
2752 | |||
2753 | if (dev->priv_flags & IFF_SLAVE_INACTIVE) { | ||
2754 | if ((dev->priv_flags & IFF_SLAVE_NEEDARP) && | ||
2755 | skb->protocol == __cpu_to_be16(ETH_P_ARP)) | ||
2756 | return 0; | ||
2757 | |||
2758 | if (master->priv_flags & IFF_MASTER_ALB) { | ||
2759 | if (skb->pkt_type != PACKET_BROADCAST && | ||
2760 | skb->pkt_type != PACKET_MULTICAST) | ||
2761 | return 0; | ||
2762 | } | ||
2763 | if (master->priv_flags & IFF_MASTER_8023AD && | ||
2764 | skb->protocol == __cpu_to_be16(ETH_P_SLOW)) | ||
2765 | return 0; | ||
2766 | |||
2767 | return 1; | ||
2768 | } | ||
2769 | return 0; | ||
2770 | } | ||
2771 | EXPORT_SYMBOL(__skb_bond_should_drop); | ||
2772 | |||
2773 | static int __netif_receive_skb(struct sk_buff *skb) | ||
2483 | { | 2774 | { |
2484 | struct packet_type *ptype, *pt_prev; | 2775 | struct packet_type *ptype, *pt_prev; |
2485 | struct net_device *orig_dev; | 2776 | struct net_device *orig_dev; |
2777 | struct net_device *master; | ||
2486 | struct net_device *null_or_orig; | 2778 | struct net_device *null_or_orig; |
2487 | struct net_device *null_or_bond; | 2779 | struct net_device *null_or_bond; |
2488 | int ret = NET_RX_DROP; | 2780 | int ret = NET_RX_DROP; |
@@ -2503,14 +2795,15 @@ int netif_receive_skb(struct sk_buff *skb) | |||
2503 | 2795 | ||
2504 | null_or_orig = NULL; | 2796 | null_or_orig = NULL; |
2505 | orig_dev = skb->dev; | 2797 | orig_dev = skb->dev; |
2506 | if (orig_dev->master) { | 2798 | master = ACCESS_ONCE(orig_dev->master); |
2507 | if (skb_bond_should_drop(skb)) | 2799 | if (master) { |
2800 | if (skb_bond_should_drop(skb, master)) | ||
2508 | null_or_orig = orig_dev; /* deliver only exact match */ | 2801 | null_or_orig = orig_dev; /* deliver only exact match */ |
2509 | else | 2802 | else |
2510 | skb->dev = orig_dev->master; | 2803 | skb->dev = master; |
2511 | } | 2804 | } |
2512 | 2805 | ||
2513 | __get_cpu_var(netdev_rx_stat).total++; | 2806 | __get_cpu_var(softnet_data).processed++; |
2514 | 2807 | ||
2515 | skb_reset_network_header(skb); | 2808 | skb_reset_network_header(skb); |
2516 | skb_reset_transport_header(skb); | 2809 | skb_reset_transport_header(skb); |
@@ -2588,20 +2881,72 @@ out: | |||
2588 | rcu_read_unlock(); | 2881 | rcu_read_unlock(); |
2589 | return ret; | 2882 | return ret; |
2590 | } | 2883 | } |
2884 | |||
2885 | /** | ||
2886 | * netif_receive_skb - process receive buffer from network | ||
2887 | * @skb: buffer to process | ||
2888 | * | ||
2889 | * netif_receive_skb() is the main receive data processing function. | ||
2890 | * It always succeeds. The buffer may be dropped during processing | ||
2891 | * for congestion control or by the protocol layers. | ||
2892 | * | ||
2893 | * This function may only be called from softirq context and interrupts | ||
2894 | * should be enabled. | ||
2895 | * | ||
2896 | * Return values (usually ignored): | ||
2897 | * NET_RX_SUCCESS: no congestion | ||
2898 | * NET_RX_DROP: packet was dropped | ||
2899 | */ | ||
2900 | int netif_receive_skb(struct sk_buff *skb) | ||
2901 | { | ||
2902 | #ifdef CONFIG_RPS | ||
2903 | struct rps_dev_flow voidflow, *rflow = &voidflow; | ||
2904 | int cpu, ret; | ||
2905 | |||
2906 | rcu_read_lock(); | ||
2907 | |||
2908 | cpu = get_rps_cpu(skb->dev, skb, &rflow); | ||
2909 | |||
2910 | if (cpu >= 0) { | ||
2911 | ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); | ||
2912 | rcu_read_unlock(); | ||
2913 | } else { | ||
2914 | rcu_read_unlock(); | ||
2915 | ret = __netif_receive_skb(skb); | ||
2916 | } | ||
2917 | |||
2918 | return ret; | ||
2919 | #else | ||
2920 | return __netif_receive_skb(skb); | ||
2921 | #endif | ||
2922 | } | ||
2591 | EXPORT_SYMBOL(netif_receive_skb); | 2923 | EXPORT_SYMBOL(netif_receive_skb); |
2592 | 2924 | ||
2593 | /* Network device is going away, flush any packets still pending */ | 2925 | /* Network device is going away, flush any packets still pending |
2926 | * Called with irqs disabled. | ||
2927 | */ | ||
2594 | static void flush_backlog(void *arg) | 2928 | static void flush_backlog(void *arg) |
2595 | { | 2929 | { |
2596 | struct net_device *dev = arg; | 2930 | struct net_device *dev = arg; |
2597 | struct softnet_data *queue = &__get_cpu_var(softnet_data); | 2931 | struct softnet_data *sd = &__get_cpu_var(softnet_data); |
2598 | struct sk_buff *skb, *tmp; | 2932 | struct sk_buff *skb, *tmp; |
2599 | 2933 | ||
2600 | skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp) | 2934 | rps_lock(sd); |
2935 | skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { | ||
2601 | if (skb->dev == dev) { | 2936 | if (skb->dev == dev) { |
2602 | __skb_unlink(skb, &queue->input_pkt_queue); | 2937 | __skb_unlink(skb, &sd->input_pkt_queue); |
2603 | kfree_skb(skb); | 2938 | kfree_skb(skb); |
2939 | input_queue_head_add(sd, 1); | ||
2604 | } | 2940 | } |
2941 | } | ||
2942 | rps_unlock(sd); | ||
2943 | |||
2944 | skb_queue_walk_safe(&sd->process_queue, skb, tmp) { | ||
2945 | if (skb->dev == dev) { | ||
2946 | __skb_unlink(skb, &sd->process_queue); | ||
2947 | kfree_skb(skb); | ||
2948 | } | ||
2949 | } | ||
2605 | } | 2950 | } |
2606 | 2951 | ||
2607 | static int napi_gro_complete(struct sk_buff *skb) | 2952 | static int napi_gro_complete(struct sk_buff *skb) |
@@ -2904,27 +3249,76 @@ gro_result_t napi_gro_frags(struct napi_struct *napi) | |||
2904 | } | 3249 | } |
2905 | EXPORT_SYMBOL(napi_gro_frags); | 3250 | EXPORT_SYMBOL(napi_gro_frags); |
2906 | 3251 | ||
3252 | /* | ||
3253 | * net_rps_action sends any pending IPI's for rps. | ||
3254 | * Note: called with local irq disabled, but exits with local irq enabled. | ||
3255 | */ | ||
3256 | static void net_rps_action_and_irq_enable(struct softnet_data *sd) | ||
3257 | { | ||
3258 | #ifdef CONFIG_RPS | ||
3259 | struct softnet_data *remsd = sd->rps_ipi_list; | ||
3260 | |||
3261 | if (remsd) { | ||
3262 | sd->rps_ipi_list = NULL; | ||
3263 | |||
3264 | local_irq_enable(); | ||
3265 | |||
3266 | /* Send pending IPI's to kick RPS processing on remote cpus. */ | ||
3267 | while (remsd) { | ||
3268 | struct softnet_data *next = remsd->rps_ipi_next; | ||
3269 | |||
3270 | if (cpu_online(remsd->cpu)) | ||
3271 | __smp_call_function_single(remsd->cpu, | ||
3272 | &remsd->csd, 0); | ||
3273 | remsd = next; | ||
3274 | } | ||
3275 | } else | ||
3276 | #endif | ||
3277 | local_irq_enable(); | ||
3278 | } | ||
3279 | |||
2907 | static int process_backlog(struct napi_struct *napi, int quota) | 3280 | static int process_backlog(struct napi_struct *napi, int quota) |
2908 | { | 3281 | { |
2909 | int work = 0; | 3282 | int work = 0; |
2910 | struct softnet_data *queue = &__get_cpu_var(softnet_data); | 3283 | struct softnet_data *sd = &__get_cpu_var(softnet_data); |
2911 | unsigned long start_time = jiffies; | ||
2912 | 3284 | ||
3285 | #ifdef CONFIG_RPS | ||
3286 | /* Check if we have pending ipi, its better to send them now, | ||
3287 | * not waiting net_rx_action() end. | ||
3288 | */ | ||
3289 | if (sd->rps_ipi_list) { | ||
3290 | local_irq_disable(); | ||
3291 | net_rps_action_and_irq_enable(sd); | ||
3292 | } | ||
3293 | #endif | ||
2913 | napi->weight = weight_p; | 3294 | napi->weight = weight_p; |
2914 | do { | 3295 | local_irq_disable(); |
3296 | while (work < quota) { | ||
2915 | struct sk_buff *skb; | 3297 | struct sk_buff *skb; |
3298 | unsigned int qlen; | ||
2916 | 3299 | ||
2917 | local_irq_disable(); | 3300 | while ((skb = __skb_dequeue(&sd->process_queue))) { |
2918 | skb = __skb_dequeue(&queue->input_pkt_queue); | ||
2919 | if (!skb) { | ||
2920 | __napi_complete(napi); | ||
2921 | local_irq_enable(); | 3301 | local_irq_enable(); |
2922 | break; | 3302 | __netif_receive_skb(skb); |
3303 | if (++work >= quota) | ||
3304 | return work; | ||
3305 | local_irq_disable(); | ||
2923 | } | 3306 | } |
2924 | local_irq_enable(); | ||
2925 | 3307 | ||
2926 | netif_receive_skb(skb); | 3308 | rps_lock(sd); |
2927 | } while (++work < quota && jiffies == start_time); | 3309 | qlen = skb_queue_len(&sd->input_pkt_queue); |
3310 | if (qlen) { | ||
3311 | input_queue_head_add(sd, qlen); | ||
3312 | skb_queue_splice_tail_init(&sd->input_pkt_queue, | ||
3313 | &sd->process_queue); | ||
3314 | } | ||
3315 | if (qlen < quota - work) { | ||
3316 | __napi_complete(napi); | ||
3317 | quota = work + qlen; | ||
3318 | } | ||
3319 | rps_unlock(sd); | ||
3320 | } | ||
3321 | local_irq_enable(); | ||
2928 | 3322 | ||
2929 | return work; | 3323 | return work; |
2930 | } | 3324 | } |
@@ -3012,17 +3406,16 @@ void netif_napi_del(struct napi_struct *napi) | |||
3012 | } | 3406 | } |
3013 | EXPORT_SYMBOL(netif_napi_del); | 3407 | EXPORT_SYMBOL(netif_napi_del); |
3014 | 3408 | ||
3015 | |||
3016 | static void net_rx_action(struct softirq_action *h) | 3409 | static void net_rx_action(struct softirq_action *h) |
3017 | { | 3410 | { |
3018 | struct list_head *list = &__get_cpu_var(softnet_data).poll_list; | 3411 | struct softnet_data *sd = &__get_cpu_var(softnet_data); |
3019 | unsigned long time_limit = jiffies + 2; | 3412 | unsigned long time_limit = jiffies + 2; |
3020 | int budget = netdev_budget; | 3413 | int budget = netdev_budget; |
3021 | void *have; | 3414 | void *have; |
3022 | 3415 | ||
3023 | local_irq_disable(); | 3416 | local_irq_disable(); |
3024 | 3417 | ||
3025 | while (!list_empty(list)) { | 3418 | while (!list_empty(&sd->poll_list)) { |
3026 | struct napi_struct *n; | 3419 | struct napi_struct *n; |
3027 | int work, weight; | 3420 | int work, weight; |
3028 | 3421 | ||
@@ -3040,7 +3433,7 @@ static void net_rx_action(struct softirq_action *h) | |||
3040 | * entries to the tail of this list, and only ->poll() | 3433 | * entries to the tail of this list, and only ->poll() |
3041 | * calls can remove this head entry from the list. | 3434 | * calls can remove this head entry from the list. |
3042 | */ | 3435 | */ |
3043 | n = list_first_entry(list, struct napi_struct, poll_list); | 3436 | n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list); |
3044 | 3437 | ||
3045 | have = netpoll_poll_lock(n); | 3438 | have = netpoll_poll_lock(n); |
3046 | 3439 | ||
@@ -3075,13 +3468,13 @@ static void net_rx_action(struct softirq_action *h) | |||
3075 | napi_complete(n); | 3468 | napi_complete(n); |
3076 | local_irq_disable(); | 3469 | local_irq_disable(); |
3077 | } else | 3470 | } else |
3078 | list_move_tail(&n->poll_list, list); | 3471 | list_move_tail(&n->poll_list, &sd->poll_list); |
3079 | } | 3472 | } |
3080 | 3473 | ||
3081 | netpoll_poll_unlock(have); | 3474 | netpoll_poll_unlock(have); |
3082 | } | 3475 | } |
3083 | out: | 3476 | out: |
3084 | local_irq_enable(); | 3477 | net_rps_action_and_irq_enable(sd); |
3085 | 3478 | ||
3086 | #ifdef CONFIG_NET_DMA | 3479 | #ifdef CONFIG_NET_DMA |
3087 | /* | 3480 | /* |
@@ -3094,7 +3487,7 @@ out: | |||
3094 | return; | 3487 | return; |
3095 | 3488 | ||
3096 | softnet_break: | 3489 | softnet_break: |
3097 | __get_cpu_var(netdev_rx_stat).time_squeeze++; | 3490 | sd->time_squeeze++; |
3098 | __raise_softirq_irqoff(NET_RX_SOFTIRQ); | 3491 | __raise_softirq_irqoff(NET_RX_SOFTIRQ); |
3099 | goto out; | 3492 | goto out; |
3100 | } | 3493 | } |
@@ -3295,17 +3688,17 @@ static int dev_seq_show(struct seq_file *seq, void *v) | |||
3295 | return 0; | 3688 | return 0; |
3296 | } | 3689 | } |
3297 | 3690 | ||
3298 | static struct netif_rx_stats *softnet_get_online(loff_t *pos) | 3691 | static struct softnet_data *softnet_get_online(loff_t *pos) |
3299 | { | 3692 | { |
3300 | struct netif_rx_stats *rc = NULL; | 3693 | struct softnet_data *sd = NULL; |
3301 | 3694 | ||
3302 | while (*pos < nr_cpu_ids) | 3695 | while (*pos < nr_cpu_ids) |
3303 | if (cpu_online(*pos)) { | 3696 | if (cpu_online(*pos)) { |
3304 | rc = &per_cpu(netdev_rx_stat, *pos); | 3697 | sd = &per_cpu(softnet_data, *pos); |
3305 | break; | 3698 | break; |
3306 | } else | 3699 | } else |
3307 | ++*pos; | 3700 | ++*pos; |
3308 | return rc; | 3701 | return sd; |
3309 | } | 3702 | } |
3310 | 3703 | ||
3311 | static void *softnet_seq_start(struct seq_file *seq, loff_t *pos) | 3704 | static void *softnet_seq_start(struct seq_file *seq, loff_t *pos) |
@@ -3325,12 +3718,12 @@ static void softnet_seq_stop(struct seq_file *seq, void *v) | |||
3325 | 3718 | ||
3326 | static int softnet_seq_show(struct seq_file *seq, void *v) | 3719 | static int softnet_seq_show(struct seq_file *seq, void *v) |
3327 | { | 3720 | { |
3328 | struct netif_rx_stats *s = v; | 3721 | struct softnet_data *sd = v; |
3329 | 3722 | ||
3330 | seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n", | 3723 | seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n", |
3331 | s->total, s->dropped, s->time_squeeze, 0, | 3724 | sd->processed, sd->dropped, sd->time_squeeze, 0, |
3332 | 0, 0, 0, 0, /* was fastroute */ | 3725 | 0, 0, 0, 0, /* was fastroute */ |
3333 | s->cpu_collision); | 3726 | sd->cpu_collision, sd->received_rps); |
3334 | return 0; | 3727 | return 0; |
3335 | } | 3728 | } |
3336 | 3729 | ||
@@ -3553,11 +3946,10 @@ int netdev_set_master(struct net_device *slave, struct net_device *master) | |||
3553 | 3946 | ||
3554 | slave->master = master; | 3947 | slave->master = master; |
3555 | 3948 | ||
3556 | synchronize_net(); | 3949 | if (old) { |
3557 | 3950 | synchronize_net(); | |
3558 | if (old) | ||
3559 | dev_put(old); | 3951 | dev_put(old); |
3560 | 3952 | } | |
3561 | if (master) | 3953 | if (master) |
3562 | slave->flags |= IFF_SLAVE; | 3954 | slave->flags |= IFF_SLAVE; |
3563 | else | 3955 | else |
@@ -3734,562 +4126,6 @@ void dev_set_rx_mode(struct net_device *dev) | |||
3734 | netif_addr_unlock_bh(dev); | 4126 | netif_addr_unlock_bh(dev); |
3735 | } | 4127 | } |
3736 | 4128 | ||
3737 | /* hw addresses list handling functions */ | ||
3738 | |||
3739 | static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr, | ||
3740 | int addr_len, unsigned char addr_type) | ||
3741 | { | ||
3742 | struct netdev_hw_addr *ha; | ||
3743 | int alloc_size; | ||
3744 | |||
3745 | if (addr_len > MAX_ADDR_LEN) | ||
3746 | return -EINVAL; | ||
3747 | |||
3748 | list_for_each_entry(ha, &list->list, list) { | ||
3749 | if (!memcmp(ha->addr, addr, addr_len) && | ||
3750 | ha->type == addr_type) { | ||
3751 | ha->refcount++; | ||
3752 | return 0; | ||
3753 | } | ||
3754 | } | ||
3755 | |||
3756 | |||
3757 | alloc_size = sizeof(*ha); | ||
3758 | if (alloc_size < L1_CACHE_BYTES) | ||
3759 | alloc_size = L1_CACHE_BYTES; | ||
3760 | ha = kmalloc(alloc_size, GFP_ATOMIC); | ||
3761 | if (!ha) | ||
3762 | return -ENOMEM; | ||
3763 | memcpy(ha->addr, addr, addr_len); | ||
3764 | ha->type = addr_type; | ||
3765 | ha->refcount = 1; | ||
3766 | ha->synced = false; | ||
3767 | list_add_tail_rcu(&ha->list, &list->list); | ||
3768 | list->count++; | ||
3769 | return 0; | ||
3770 | } | ||
3771 | |||
3772 | static void ha_rcu_free(struct rcu_head *head) | ||
3773 | { | ||
3774 | struct netdev_hw_addr *ha; | ||
3775 | |||
3776 | ha = container_of(head, struct netdev_hw_addr, rcu_head); | ||
3777 | kfree(ha); | ||
3778 | } | ||
3779 | |||
3780 | static int __hw_addr_del(struct netdev_hw_addr_list *list, unsigned char *addr, | ||
3781 | int addr_len, unsigned char addr_type) | ||
3782 | { | ||
3783 | struct netdev_hw_addr *ha; | ||
3784 | |||
3785 | list_for_each_entry(ha, &list->list, list) { | ||
3786 | if (!memcmp(ha->addr, addr, addr_len) && | ||
3787 | (ha->type == addr_type || !addr_type)) { | ||
3788 | if (--ha->refcount) | ||
3789 | return 0; | ||
3790 | list_del_rcu(&ha->list); | ||
3791 | call_rcu(&ha->rcu_head, ha_rcu_free); | ||
3792 | list->count--; | ||
3793 | return 0; | ||
3794 | } | ||
3795 | } | ||
3796 | return -ENOENT; | ||
3797 | } | ||
3798 | |||
3799 | static int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list, | ||
3800 | struct netdev_hw_addr_list *from_list, | ||
3801 | int addr_len, | ||
3802 | unsigned char addr_type) | ||
3803 | { | ||
3804 | int err; | ||
3805 | struct netdev_hw_addr *ha, *ha2; | ||
3806 | unsigned char type; | ||
3807 | |||
3808 | list_for_each_entry(ha, &from_list->list, list) { | ||
3809 | type = addr_type ? addr_type : ha->type; | ||
3810 | err = __hw_addr_add(to_list, ha->addr, addr_len, type); | ||
3811 | if (err) | ||
3812 | goto unroll; | ||
3813 | } | ||
3814 | return 0; | ||
3815 | |||
3816 | unroll: | ||
3817 | list_for_each_entry(ha2, &from_list->list, list) { | ||
3818 | if (ha2 == ha) | ||
3819 | break; | ||
3820 | type = addr_type ? addr_type : ha2->type; | ||
3821 | __hw_addr_del(to_list, ha2->addr, addr_len, type); | ||
3822 | } | ||
3823 | return err; | ||
3824 | } | ||
3825 | |||
3826 | static void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list, | ||
3827 | struct netdev_hw_addr_list *from_list, | ||
3828 | int addr_len, | ||
3829 | unsigned char addr_type) | ||
3830 | { | ||
3831 | struct netdev_hw_addr *ha; | ||
3832 | unsigned char type; | ||
3833 | |||
3834 | list_for_each_entry(ha, &from_list->list, list) { | ||
3835 | type = addr_type ? addr_type : ha->type; | ||
3836 | __hw_addr_del(to_list, ha->addr, addr_len, addr_type); | ||
3837 | } | ||
3838 | } | ||
3839 | |||
3840 | static int __hw_addr_sync(struct netdev_hw_addr_list *to_list, | ||
3841 | struct netdev_hw_addr_list *from_list, | ||
3842 | int addr_len) | ||
3843 | { | ||
3844 | int err = 0; | ||
3845 | struct netdev_hw_addr *ha, *tmp; | ||
3846 | |||
3847 | list_for_each_entry_safe(ha, tmp, &from_list->list, list) { | ||
3848 | if (!ha->synced) { | ||
3849 | err = __hw_addr_add(to_list, ha->addr, | ||
3850 | addr_len, ha->type); | ||
3851 | if (err) | ||
3852 | break; | ||
3853 | ha->synced = true; | ||
3854 | ha->refcount++; | ||
3855 | } else if (ha->refcount == 1) { | ||
3856 | __hw_addr_del(to_list, ha->addr, addr_len, ha->type); | ||
3857 | __hw_addr_del(from_list, ha->addr, addr_len, ha->type); | ||
3858 | } | ||
3859 | } | ||
3860 | return err; | ||
3861 | } | ||
3862 | |||
3863 | static void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, | ||
3864 | struct netdev_hw_addr_list *from_list, | ||
3865 | int addr_len) | ||
3866 | { | ||
3867 | struct netdev_hw_addr *ha, *tmp; | ||
3868 | |||
3869 | list_for_each_entry_safe(ha, tmp, &from_list->list, list) { | ||
3870 | if (ha->synced) { | ||
3871 | __hw_addr_del(to_list, ha->addr, | ||
3872 | addr_len, ha->type); | ||
3873 | ha->synced = false; | ||
3874 | __hw_addr_del(from_list, ha->addr, | ||
3875 | addr_len, ha->type); | ||
3876 | } | ||
3877 | } | ||
3878 | } | ||
3879 | |||
3880 | static void __hw_addr_flush(struct netdev_hw_addr_list *list) | ||
3881 | { | ||
3882 | struct netdev_hw_addr *ha, *tmp; | ||
3883 | |||
3884 | list_for_each_entry_safe(ha, tmp, &list->list, list) { | ||
3885 | list_del_rcu(&ha->list); | ||
3886 | call_rcu(&ha->rcu_head, ha_rcu_free); | ||
3887 | } | ||
3888 | list->count = 0; | ||
3889 | } | ||
3890 | |||
3891 | static void __hw_addr_init(struct netdev_hw_addr_list *list) | ||
3892 | { | ||
3893 | INIT_LIST_HEAD(&list->list); | ||
3894 | list->count = 0; | ||
3895 | } | ||
3896 | |||
3897 | /* Device addresses handling functions */ | ||
3898 | |||
3899 | static void dev_addr_flush(struct net_device *dev) | ||
3900 | { | ||
3901 | /* rtnl_mutex must be held here */ | ||
3902 | |||
3903 | __hw_addr_flush(&dev->dev_addrs); | ||
3904 | dev->dev_addr = NULL; | ||
3905 | } | ||
3906 | |||
3907 | static int dev_addr_init(struct net_device *dev) | ||
3908 | { | ||
3909 | unsigned char addr[MAX_ADDR_LEN]; | ||
3910 | struct netdev_hw_addr *ha; | ||
3911 | int err; | ||
3912 | |||
3913 | /* rtnl_mutex must be held here */ | ||
3914 | |||
3915 | __hw_addr_init(&dev->dev_addrs); | ||
3916 | memset(addr, 0, sizeof(addr)); | ||
3917 | err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr), | ||
3918 | NETDEV_HW_ADDR_T_LAN); | ||
3919 | if (!err) { | ||
3920 | /* | ||
3921 | * Get the first (previously created) address from the list | ||
3922 | * and set dev_addr pointer to this location. | ||
3923 | */ | ||
3924 | ha = list_first_entry(&dev->dev_addrs.list, | ||
3925 | struct netdev_hw_addr, list); | ||
3926 | dev->dev_addr = ha->addr; | ||
3927 | } | ||
3928 | return err; | ||
3929 | } | ||
3930 | |||
3931 | /** | ||
3932 | * dev_addr_add - Add a device address | ||
3933 | * @dev: device | ||
3934 | * @addr: address to add | ||
3935 | * @addr_type: address type | ||
3936 | * | ||
3937 | * Add a device address to the device or increase the reference count if | ||
3938 | * it already exists. | ||
3939 | * | ||
3940 | * The caller must hold the rtnl_mutex. | ||
3941 | */ | ||
3942 | int dev_addr_add(struct net_device *dev, unsigned char *addr, | ||
3943 | unsigned char addr_type) | ||
3944 | { | ||
3945 | int err; | ||
3946 | |||
3947 | ASSERT_RTNL(); | ||
3948 | |||
3949 | err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type); | ||
3950 | if (!err) | ||
3951 | call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); | ||
3952 | return err; | ||
3953 | } | ||
3954 | EXPORT_SYMBOL(dev_addr_add); | ||
3955 | |||
3956 | /** | ||
3957 | * dev_addr_del - Release a device address. | ||
3958 | * @dev: device | ||
3959 | * @addr: address to delete | ||
3960 | * @addr_type: address type | ||
3961 | * | ||
3962 | * Release reference to a device address and remove it from the device | ||
3963 | * if the reference count drops to zero. | ||
3964 | * | ||
3965 | * The caller must hold the rtnl_mutex. | ||
3966 | */ | ||
3967 | int dev_addr_del(struct net_device *dev, unsigned char *addr, | ||
3968 | unsigned char addr_type) | ||
3969 | { | ||
3970 | int err; | ||
3971 | struct netdev_hw_addr *ha; | ||
3972 | |||
3973 | ASSERT_RTNL(); | ||
3974 | |||
3975 | /* | ||
3976 | * We can not remove the first address from the list because | ||
3977 | * dev->dev_addr points to that. | ||
3978 | */ | ||
3979 | ha = list_first_entry(&dev->dev_addrs.list, | ||
3980 | struct netdev_hw_addr, list); | ||
3981 | if (ha->addr == dev->dev_addr && ha->refcount == 1) | ||
3982 | return -ENOENT; | ||
3983 | |||
3984 | err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len, | ||
3985 | addr_type); | ||
3986 | if (!err) | ||
3987 | call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); | ||
3988 | return err; | ||
3989 | } | ||
3990 | EXPORT_SYMBOL(dev_addr_del); | ||
3991 | |||
3992 | /** | ||
3993 | * dev_addr_add_multiple - Add device addresses from another device | ||
3994 | * @to_dev: device to which addresses will be added | ||
3995 | * @from_dev: device from which addresses will be added | ||
3996 | * @addr_type: address type - 0 means type will be used from from_dev | ||
3997 | * | ||
3998 | * Add device addresses of the one device to another. | ||
3999 | ** | ||
4000 | * The caller must hold the rtnl_mutex. | ||
4001 | */ | ||
4002 | int dev_addr_add_multiple(struct net_device *to_dev, | ||
4003 | struct net_device *from_dev, | ||
4004 | unsigned char addr_type) | ||
4005 | { | ||
4006 | int err; | ||
4007 | |||
4008 | ASSERT_RTNL(); | ||
4009 | |||
4010 | if (from_dev->addr_len != to_dev->addr_len) | ||
4011 | return -EINVAL; | ||
4012 | err = __hw_addr_add_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs, | ||
4013 | to_dev->addr_len, addr_type); | ||
4014 | if (!err) | ||
4015 | call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev); | ||
4016 | return err; | ||
4017 | } | ||
4018 | EXPORT_SYMBOL(dev_addr_add_multiple); | ||
4019 | |||
4020 | /** | ||
4021 | * dev_addr_del_multiple - Delete device addresses by another device | ||
4022 | * @to_dev: device where the addresses will be deleted | ||
4023 | * @from_dev: device by which addresses the addresses will be deleted | ||
4024 | * @addr_type: address type - 0 means type will used from from_dev | ||
4025 | * | ||
4026 | * Deletes addresses in to device by the list of addresses in from device. | ||
4027 | * | ||
4028 | * The caller must hold the rtnl_mutex. | ||
4029 | */ | ||
4030 | int dev_addr_del_multiple(struct net_device *to_dev, | ||
4031 | struct net_device *from_dev, | ||
4032 | unsigned char addr_type) | ||
4033 | { | ||
4034 | ASSERT_RTNL(); | ||
4035 | |||
4036 | if (from_dev->addr_len != to_dev->addr_len) | ||
4037 | return -EINVAL; | ||
4038 | __hw_addr_del_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs, | ||
4039 | to_dev->addr_len, addr_type); | ||
4040 | call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev); | ||
4041 | return 0; | ||
4042 | } | ||
4043 | EXPORT_SYMBOL(dev_addr_del_multiple); | ||
4044 | |||
4045 | /* multicast addresses handling functions */ | ||
4046 | |||
4047 | int __dev_addr_delete(struct dev_addr_list **list, int *count, | ||
4048 | void *addr, int alen, int glbl) | ||
4049 | { | ||
4050 | struct dev_addr_list *da; | ||
4051 | |||
4052 | for (; (da = *list) != NULL; list = &da->next) { | ||
4053 | if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 && | ||
4054 | alen == da->da_addrlen) { | ||
4055 | if (glbl) { | ||
4056 | int old_glbl = da->da_gusers; | ||
4057 | da->da_gusers = 0; | ||
4058 | if (old_glbl == 0) | ||
4059 | break; | ||
4060 | } | ||
4061 | if (--da->da_users) | ||
4062 | return 0; | ||
4063 | |||
4064 | *list = da->next; | ||
4065 | kfree(da); | ||
4066 | (*count)--; | ||
4067 | return 0; | ||
4068 | } | ||
4069 | } | ||
4070 | return -ENOENT; | ||
4071 | } | ||
4072 | |||
4073 | int __dev_addr_add(struct dev_addr_list **list, int *count, | ||
4074 | void *addr, int alen, int glbl) | ||
4075 | { | ||
4076 | struct dev_addr_list *da; | ||
4077 | |||
4078 | for (da = *list; da != NULL; da = da->next) { | ||
4079 | if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 && | ||
4080 | da->da_addrlen == alen) { | ||
4081 | if (glbl) { | ||
4082 | int old_glbl = da->da_gusers; | ||
4083 | da->da_gusers = 1; | ||
4084 | if (old_glbl) | ||
4085 | return 0; | ||
4086 | } | ||
4087 | da->da_users++; | ||
4088 | return 0; | ||
4089 | } | ||
4090 | } | ||
4091 | |||
4092 | da = kzalloc(sizeof(*da), GFP_ATOMIC); | ||
4093 | if (da == NULL) | ||
4094 | return -ENOMEM; | ||
4095 | memcpy(da->da_addr, addr, alen); | ||
4096 | da->da_addrlen = alen; | ||
4097 | da->da_users = 1; | ||
4098 | da->da_gusers = glbl ? 1 : 0; | ||
4099 | da->next = *list; | ||
4100 | *list = da; | ||
4101 | (*count)++; | ||
4102 | return 0; | ||
4103 | } | ||
4104 | |||
4105 | /** | ||
4106 | * dev_unicast_delete - Release secondary unicast address. | ||
4107 | * @dev: device | ||
4108 | * @addr: address to delete | ||
4109 | * | ||
4110 | * Release reference to a secondary unicast address and remove it | ||
4111 | * from the device if the reference count drops to zero. | ||
4112 | * | ||
4113 | * The caller must hold the rtnl_mutex. | ||
4114 | */ | ||
4115 | int dev_unicast_delete(struct net_device *dev, void *addr) | ||
4116 | { | ||
4117 | int err; | ||
4118 | |||
4119 | ASSERT_RTNL(); | ||
4120 | |||
4121 | netif_addr_lock_bh(dev); | ||
4122 | err = __hw_addr_del(&dev->uc, addr, dev->addr_len, | ||
4123 | NETDEV_HW_ADDR_T_UNICAST); | ||
4124 | if (!err) | ||
4125 | __dev_set_rx_mode(dev); | ||
4126 | netif_addr_unlock_bh(dev); | ||
4127 | return err; | ||
4128 | } | ||
4129 | EXPORT_SYMBOL(dev_unicast_delete); | ||
4130 | |||
4131 | /** | ||
4132 | * dev_unicast_add - add a secondary unicast address | ||
4133 | * @dev: device | ||
4134 | * @addr: address to add | ||
4135 | * | ||
4136 | * Add a secondary unicast address to the device or increase | ||
4137 | * the reference count if it already exists. | ||
4138 | * | ||
4139 | * The caller must hold the rtnl_mutex. | ||
4140 | */ | ||
4141 | int dev_unicast_add(struct net_device *dev, void *addr) | ||
4142 | { | ||
4143 | int err; | ||
4144 | |||
4145 | ASSERT_RTNL(); | ||
4146 | |||
4147 | netif_addr_lock_bh(dev); | ||
4148 | err = __hw_addr_add(&dev->uc, addr, dev->addr_len, | ||
4149 | NETDEV_HW_ADDR_T_UNICAST); | ||
4150 | if (!err) | ||
4151 | __dev_set_rx_mode(dev); | ||
4152 | netif_addr_unlock_bh(dev); | ||
4153 | return err; | ||
4154 | } | ||
4155 | EXPORT_SYMBOL(dev_unicast_add); | ||
4156 | |||
4157 | int __dev_addr_sync(struct dev_addr_list **to, int *to_count, | ||
4158 | struct dev_addr_list **from, int *from_count) | ||
4159 | { | ||
4160 | struct dev_addr_list *da, *next; | ||
4161 | int err = 0; | ||
4162 | |||
4163 | da = *from; | ||
4164 | while (da != NULL) { | ||
4165 | next = da->next; | ||
4166 | if (!da->da_synced) { | ||
4167 | err = __dev_addr_add(to, to_count, | ||
4168 | da->da_addr, da->da_addrlen, 0); | ||
4169 | if (err < 0) | ||
4170 | break; | ||
4171 | da->da_synced = 1; | ||
4172 | da->da_users++; | ||
4173 | } else if (da->da_users == 1) { | ||
4174 | __dev_addr_delete(to, to_count, | ||
4175 | da->da_addr, da->da_addrlen, 0); | ||
4176 | __dev_addr_delete(from, from_count, | ||
4177 | da->da_addr, da->da_addrlen, 0); | ||
4178 | } | ||
4179 | da = next; | ||
4180 | } | ||
4181 | return err; | ||
4182 | } | ||
4183 | EXPORT_SYMBOL_GPL(__dev_addr_sync); | ||
4184 | |||
4185 | void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, | ||
4186 | struct dev_addr_list **from, int *from_count) | ||
4187 | { | ||
4188 | struct dev_addr_list *da, *next; | ||
4189 | |||
4190 | da = *from; | ||
4191 | while (da != NULL) { | ||
4192 | next = da->next; | ||
4193 | if (da->da_synced) { | ||
4194 | __dev_addr_delete(to, to_count, | ||
4195 | da->da_addr, da->da_addrlen, 0); | ||
4196 | da->da_synced = 0; | ||
4197 | __dev_addr_delete(from, from_count, | ||
4198 | da->da_addr, da->da_addrlen, 0); | ||
4199 | } | ||
4200 | da = next; | ||
4201 | } | ||
4202 | } | ||
4203 | EXPORT_SYMBOL_GPL(__dev_addr_unsync); | ||
4204 | |||
4205 | /** | ||
4206 | * dev_unicast_sync - Synchronize device's unicast list to another device | ||
4207 | * @to: destination device | ||
4208 | * @from: source device | ||
4209 | * | ||
4210 | * Add newly added addresses to the destination device and release | ||
4211 | * addresses that have no users left. The source device must be | ||
4212 | * locked by netif_tx_lock_bh. | ||
4213 | * | ||
4214 | * This function is intended to be called from the dev->set_rx_mode | ||
4215 | * function of layered software devices. | ||
4216 | */ | ||
4217 | int dev_unicast_sync(struct net_device *to, struct net_device *from) | ||
4218 | { | ||
4219 | int err = 0; | ||
4220 | |||
4221 | if (to->addr_len != from->addr_len) | ||
4222 | return -EINVAL; | ||
4223 | |||
4224 | netif_addr_lock_bh(to); | ||
4225 | err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len); | ||
4226 | if (!err) | ||
4227 | __dev_set_rx_mode(to); | ||
4228 | netif_addr_unlock_bh(to); | ||
4229 | return err; | ||
4230 | } | ||
4231 | EXPORT_SYMBOL(dev_unicast_sync); | ||
4232 | |||
4233 | /** | ||
4234 | * dev_unicast_unsync - Remove synchronized addresses from the destination device | ||
4235 | * @to: destination device | ||
4236 | * @from: source device | ||
4237 | * | ||
4238 | * Remove all addresses that were added to the destination device by | ||
4239 | * dev_unicast_sync(). This function is intended to be called from the | ||
4240 | * dev->stop function of layered software devices. | ||
4241 | */ | ||
4242 | void dev_unicast_unsync(struct net_device *to, struct net_device *from) | ||
4243 | { | ||
4244 | if (to->addr_len != from->addr_len) | ||
4245 | return; | ||
4246 | |||
4247 | netif_addr_lock_bh(from); | ||
4248 | netif_addr_lock(to); | ||
4249 | __hw_addr_unsync(&to->uc, &from->uc, to->addr_len); | ||
4250 | __dev_set_rx_mode(to); | ||
4251 | netif_addr_unlock(to); | ||
4252 | netif_addr_unlock_bh(from); | ||
4253 | } | ||
4254 | EXPORT_SYMBOL(dev_unicast_unsync); | ||
4255 | |||
4256 | static void dev_unicast_flush(struct net_device *dev) | ||
4257 | { | ||
4258 | netif_addr_lock_bh(dev); | ||
4259 | __hw_addr_flush(&dev->uc); | ||
4260 | netif_addr_unlock_bh(dev); | ||
4261 | } | ||
4262 | |||
4263 | static void dev_unicast_init(struct net_device *dev) | ||
4264 | { | ||
4265 | __hw_addr_init(&dev->uc); | ||
4266 | } | ||
4267 | |||
4268 | |||
4269 | static void __dev_addr_discard(struct dev_addr_list **list) | ||
4270 | { | ||
4271 | struct dev_addr_list *tmp; | ||
4272 | |||
4273 | while (*list != NULL) { | ||
4274 | tmp = *list; | ||
4275 | *list = tmp->next; | ||
4276 | if (tmp->da_users > tmp->da_gusers) | ||
4277 | printk("__dev_addr_discard: address leakage! " | ||
4278 | "da_users=%d\n", tmp->da_users); | ||
4279 | kfree(tmp); | ||
4280 | } | ||
4281 | } | ||
4282 | |||
4283 | static void dev_addr_discard(struct net_device *dev) | ||
4284 | { | ||
4285 | netif_addr_lock_bh(dev); | ||
4286 | |||
4287 | __dev_addr_discard(&dev->mc_list); | ||
4288 | netdev_mc_count(dev) = 0; | ||
4289 | |||
4290 | netif_addr_unlock_bh(dev); | ||
4291 | } | ||
4292 | |||
4293 | /** | 4129 | /** |
4294 | * dev_get_flags - get flags reported to userspace | 4130 | * dev_get_flags - get flags reported to userspace |
4295 | * @dev: device | 4131 | * @dev: device |
@@ -4600,8 +4436,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd) | |||
4600 | return -EINVAL; | 4436 | return -EINVAL; |
4601 | if (!netif_device_present(dev)) | 4437 | if (!netif_device_present(dev)) |
4602 | return -ENODEV; | 4438 | return -ENODEV; |
4603 | return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data, | 4439 | return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data); |
4604 | dev->addr_len, 1); | ||
4605 | 4440 | ||
4606 | case SIOCDELMULTI: | 4441 | case SIOCDELMULTI: |
4607 | if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) || | 4442 | if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) || |
@@ -4609,8 +4444,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd) | |||
4609 | return -EINVAL; | 4444 | return -EINVAL; |
4610 | if (!netif_device_present(dev)) | 4445 | if (!netif_device_present(dev)) |
4611 | return -ENODEV; | 4446 | return -ENODEV; |
4612 | return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data, | 4447 | return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data); |
4613 | dev->addr_len, 1); | ||
4614 | 4448 | ||
4615 | case SIOCSIFTXQLEN: | 4449 | case SIOCSIFTXQLEN: |
4616 | if (ifr->ifr_qlen < 0) | 4450 | if (ifr->ifr_qlen < 0) |
@@ -4917,8 +4751,8 @@ static void rollback_registered_many(struct list_head *head) | |||
4917 | /* | 4751 | /* |
4918 | * Flush the unicast and multicast chains | 4752 | * Flush the unicast and multicast chains |
4919 | */ | 4753 | */ |
4920 | dev_unicast_flush(dev); | 4754 | dev_uc_flush(dev); |
4921 | dev_addr_discard(dev); | 4755 | dev_mc_flush(dev); |
4922 | 4756 | ||
4923 | if (dev->netdev_ops->ndo_uninit) | 4757 | if (dev->netdev_ops->ndo_uninit) |
4924 | dev->netdev_ops->ndo_uninit(dev); | 4758 | dev->netdev_ops->ndo_uninit(dev); |
@@ -5067,6 +4901,24 @@ int register_netdevice(struct net_device *dev) | |||
5067 | 4901 | ||
5068 | dev->iflink = -1; | 4902 | dev->iflink = -1; |
5069 | 4903 | ||
4904 | #ifdef CONFIG_RPS | ||
4905 | if (!dev->num_rx_queues) { | ||
4906 | /* | ||
4907 | * Allocate a single RX queue if driver never called | ||
4908 | * alloc_netdev_mq | ||
4909 | */ | ||
4910 | |||
4911 | dev->_rx = kzalloc(sizeof(struct netdev_rx_queue), GFP_KERNEL); | ||
4912 | if (!dev->_rx) { | ||
4913 | ret = -ENOMEM; | ||
4914 | goto out; | ||
4915 | } | ||
4916 | |||
4917 | dev->_rx->first = dev->_rx; | ||
4918 | atomic_set(&dev->_rx->count, 1); | ||
4919 | dev->num_rx_queues = 1; | ||
4920 | } | ||
4921 | #endif | ||
5070 | /* Init, if this function is available */ | 4922 | /* Init, if this function is available */ |
5071 | if (dev->netdev_ops->ndo_init) { | 4923 | if (dev->netdev_ops->ndo_init) { |
5072 | ret = dev->netdev_ops->ndo_init(dev); | 4924 | ret = dev->netdev_ops->ndo_init(dev); |
@@ -5427,6 +5279,10 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
5427 | struct net_device *dev; | 5279 | struct net_device *dev; |
5428 | size_t alloc_size; | 5280 | size_t alloc_size; |
5429 | struct net_device *p; | 5281 | struct net_device *p; |
5282 | #ifdef CONFIG_RPS | ||
5283 | struct netdev_rx_queue *rx; | ||
5284 | int i; | ||
5285 | #endif | ||
5430 | 5286 | ||
5431 | BUG_ON(strlen(name) >= sizeof(dev->name)); | 5287 | BUG_ON(strlen(name) >= sizeof(dev->name)); |
5432 | 5288 | ||
@@ -5452,13 +5308,32 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
5452 | goto free_p; | 5308 | goto free_p; |
5453 | } | 5309 | } |
5454 | 5310 | ||
5311 | #ifdef CONFIG_RPS | ||
5312 | rx = kcalloc(queue_count, sizeof(struct netdev_rx_queue), GFP_KERNEL); | ||
5313 | if (!rx) { | ||
5314 | printk(KERN_ERR "alloc_netdev: Unable to allocate " | ||
5315 | "rx queues.\n"); | ||
5316 | goto free_tx; | ||
5317 | } | ||
5318 | |||
5319 | atomic_set(&rx->count, queue_count); | ||
5320 | |||
5321 | /* | ||
5322 | * Set a pointer to first element in the array which holds the | ||
5323 | * reference count. | ||
5324 | */ | ||
5325 | for (i = 0; i < queue_count; i++) | ||
5326 | rx[i].first = rx; | ||
5327 | #endif | ||
5328 | |||
5455 | dev = PTR_ALIGN(p, NETDEV_ALIGN); | 5329 | dev = PTR_ALIGN(p, NETDEV_ALIGN); |
5456 | dev->padded = (char *)dev - (char *)p; | 5330 | dev->padded = (char *)dev - (char *)p; |
5457 | 5331 | ||
5458 | if (dev_addr_init(dev)) | 5332 | if (dev_addr_init(dev)) |
5459 | goto free_tx; | 5333 | goto free_rx; |
5460 | 5334 | ||
5461 | dev_unicast_init(dev); | 5335 | dev_mc_init(dev); |
5336 | dev_uc_init(dev); | ||
5462 | 5337 | ||
5463 | dev_net_set(dev, &init_net); | 5338 | dev_net_set(dev, &init_net); |
5464 | 5339 | ||
@@ -5466,6 +5341,11 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
5466 | dev->num_tx_queues = queue_count; | 5341 | dev->num_tx_queues = queue_count; |
5467 | dev->real_num_tx_queues = queue_count; | 5342 | dev->real_num_tx_queues = queue_count; |
5468 | 5343 | ||
5344 | #ifdef CONFIG_RPS | ||
5345 | dev->_rx = rx; | ||
5346 | dev->num_rx_queues = queue_count; | ||
5347 | #endif | ||
5348 | |||
5469 | dev->gso_max_size = GSO_MAX_SIZE; | 5349 | dev->gso_max_size = GSO_MAX_SIZE; |
5470 | 5350 | ||
5471 | netdev_init_queues(dev); | 5351 | netdev_init_queues(dev); |
@@ -5480,9 +5360,12 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
5480 | strcpy(dev->name, name); | 5360 | strcpy(dev->name, name); |
5481 | return dev; | 5361 | return dev; |
5482 | 5362 | ||
5363 | free_rx: | ||
5364 | #ifdef CONFIG_RPS | ||
5365 | kfree(rx); | ||
5483 | free_tx: | 5366 | free_tx: |
5367 | #endif | ||
5484 | kfree(tx); | 5368 | kfree(tx); |
5485 | |||
5486 | free_p: | 5369 | free_p: |
5487 | kfree(p); | 5370 | kfree(p); |
5488 | return NULL; | 5371 | return NULL; |
@@ -5684,8 +5567,8 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char | |||
5684 | /* | 5567 | /* |
5685 | * Flush the unicast and multicast chains | 5568 | * Flush the unicast and multicast chains |
5686 | */ | 5569 | */ |
5687 | dev_unicast_flush(dev); | 5570 | dev_uc_flush(dev); |
5688 | dev_addr_discard(dev); | 5571 | dev_mc_flush(dev); |
5689 | 5572 | ||
5690 | netdev_unregister_kobject(dev); | 5573 | netdev_unregister_kobject(dev); |
5691 | 5574 | ||
@@ -5728,7 +5611,6 @@ static int dev_cpu_callback(struct notifier_block *nfb, | |||
5728 | void *ocpu) | 5611 | void *ocpu) |
5729 | { | 5612 | { |
5730 | struct sk_buff **list_skb; | 5613 | struct sk_buff **list_skb; |
5731 | struct Qdisc **list_net; | ||
5732 | struct sk_buff *skb; | 5614 | struct sk_buff *skb; |
5733 | unsigned int cpu, oldcpu = (unsigned long)ocpu; | 5615 | unsigned int cpu, oldcpu = (unsigned long)ocpu; |
5734 | struct softnet_data *sd, *oldsd; | 5616 | struct softnet_data *sd, *oldsd; |
@@ -5749,19 +5631,23 @@ static int dev_cpu_callback(struct notifier_block *nfb, | |||
5749 | *list_skb = oldsd->completion_queue; | 5631 | *list_skb = oldsd->completion_queue; |
5750 | oldsd->completion_queue = NULL; | 5632 | oldsd->completion_queue = NULL; |
5751 | 5633 | ||
5752 | /* Find end of our output_queue. */ | ||
5753 | list_net = &sd->output_queue; | ||
5754 | while (*list_net) | ||
5755 | list_net = &(*list_net)->next_sched; | ||
5756 | /* Append output queue from offline CPU. */ | 5634 | /* Append output queue from offline CPU. */ |
5757 | *list_net = oldsd->output_queue; | 5635 | if (oldsd->output_queue) { |
5758 | oldsd->output_queue = NULL; | 5636 | *sd->output_queue_tailp = oldsd->output_queue; |
5637 | sd->output_queue_tailp = oldsd->output_queue_tailp; | ||
5638 | oldsd->output_queue = NULL; | ||
5639 | oldsd->output_queue_tailp = &oldsd->output_queue; | ||
5640 | } | ||
5759 | 5641 | ||
5760 | raise_softirq_irqoff(NET_TX_SOFTIRQ); | 5642 | raise_softirq_irqoff(NET_TX_SOFTIRQ); |
5761 | local_irq_enable(); | 5643 | local_irq_enable(); |
5762 | 5644 | ||
5763 | /* Process offline CPU's input_pkt_queue */ | 5645 | /* Process offline CPU's input_pkt_queue */ |
5764 | while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) | 5646 | while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) { |
5647 | netif_rx(skb); | ||
5648 | input_queue_head_add(oldsd, 1); | ||
5649 | } | ||
5650 | while ((skb = __skb_dequeue(&oldsd->process_queue))) | ||
5765 | netif_rx(skb); | 5651 | netif_rx(skb); |
5766 | 5652 | ||
5767 | return NOTIFY_OK; | 5653 | return NOTIFY_OK; |
@@ -5978,17 +5864,26 @@ static int __init net_dev_init(void) | |||
5978 | */ | 5864 | */ |
5979 | 5865 | ||
5980 | for_each_possible_cpu(i) { | 5866 | for_each_possible_cpu(i) { |
5981 | struct softnet_data *queue; | 5867 | struct softnet_data *sd = &per_cpu(softnet_data, i); |
5982 | 5868 | ||
5983 | queue = &per_cpu(softnet_data, i); | 5869 | memset(sd, 0, sizeof(*sd)); |
5984 | skb_queue_head_init(&queue->input_pkt_queue); | 5870 | skb_queue_head_init(&sd->input_pkt_queue); |
5985 | queue->completion_queue = NULL; | 5871 | skb_queue_head_init(&sd->process_queue); |
5986 | INIT_LIST_HEAD(&queue->poll_list); | 5872 | sd->completion_queue = NULL; |
5873 | INIT_LIST_HEAD(&sd->poll_list); | ||
5874 | sd->output_queue = NULL; | ||
5875 | sd->output_queue_tailp = &sd->output_queue; | ||
5876 | #ifdef CONFIG_RPS | ||
5877 | sd->csd.func = rps_trigger_softirq; | ||
5878 | sd->csd.info = sd; | ||
5879 | sd->csd.flags = 0; | ||
5880 | sd->cpu = i; | ||
5881 | #endif | ||
5987 | 5882 | ||
5988 | queue->backlog.poll = process_backlog; | 5883 | sd->backlog.poll = process_backlog; |
5989 | queue->backlog.weight = weight_p; | 5884 | sd->backlog.weight = weight_p; |
5990 | queue->backlog.gro_list = NULL; | 5885 | sd->backlog.gro_list = NULL; |
5991 | queue->backlog.gro_count = 0; | 5886 | sd->backlog.gro_count = 0; |
5992 | } | 5887 | } |
5993 | 5888 | ||
5994 | dev_boot_phase = 0; | 5889 | dev_boot_phase = 0; |
@@ -6023,7 +5918,7 @@ subsys_initcall(net_dev_init); | |||
6023 | 5918 | ||
6024 | static int __init initialize_hashrnd(void) | 5919 | static int __init initialize_hashrnd(void) |
6025 | { | 5920 | { |
6026 | get_random_bytes(&skb_tx_hashrnd, sizeof(skb_tx_hashrnd)); | 5921 | get_random_bytes(&hashrnd, sizeof(hashrnd)); |
6027 | return 0; | 5922 | return 0; |
6028 | } | 5923 | } |
6029 | 5924 | ||
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c new file mode 100644 index 000000000000..508f9c18992f --- /dev/null +++ b/net/core/dev_addr_lists.c | |||
@@ -0,0 +1,741 @@ | |||
1 | /* | ||
2 | * net/core/dev_addr_lists.c - Functions for handling net device lists | ||
3 | * Copyright (c) 2010 Jiri Pirko <jpirko@redhat.com> | ||
4 | * | ||
5 | * This file contains functions for working with unicast, multicast and device | ||
6 | * addresses lists. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | */ | ||
13 | |||
14 | #include <linux/netdevice.h> | ||
15 | #include <linux/rtnetlink.h> | ||
16 | #include <linux/list.h> | ||
17 | #include <linux/proc_fs.h> | ||
18 | |||
19 | /* | ||
20 | * General list handling functions | ||
21 | */ | ||
22 | |||
23 | static int __hw_addr_add_ex(struct netdev_hw_addr_list *list, | ||
24 | unsigned char *addr, int addr_len, | ||
25 | unsigned char addr_type, bool global) | ||
26 | { | ||
27 | struct netdev_hw_addr *ha; | ||
28 | int alloc_size; | ||
29 | |||
30 | if (addr_len > MAX_ADDR_LEN) | ||
31 | return -EINVAL; | ||
32 | |||
33 | list_for_each_entry(ha, &list->list, list) { | ||
34 | if (!memcmp(ha->addr, addr, addr_len) && | ||
35 | ha->type == addr_type) { | ||
36 | if (global) { | ||
37 | /* check if addr is already used as global */ | ||
38 | if (ha->global_use) | ||
39 | return 0; | ||
40 | else | ||
41 | ha->global_use = true; | ||
42 | } | ||
43 | ha->refcount++; | ||
44 | return 0; | ||
45 | } | ||
46 | } | ||
47 | |||
48 | |||
49 | alloc_size = sizeof(*ha); | ||
50 | if (alloc_size < L1_CACHE_BYTES) | ||
51 | alloc_size = L1_CACHE_BYTES; | ||
52 | ha = kmalloc(alloc_size, GFP_ATOMIC); | ||
53 | if (!ha) | ||
54 | return -ENOMEM; | ||
55 | memcpy(ha->addr, addr, addr_len); | ||
56 | ha->type = addr_type; | ||
57 | ha->refcount = 1; | ||
58 | ha->global_use = global; | ||
59 | ha->synced = false; | ||
60 | list_add_tail_rcu(&ha->list, &list->list); | ||
61 | list->count++; | ||
62 | return 0; | ||
63 | } | ||
64 | |||
65 | static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr, | ||
66 | int addr_len, unsigned char addr_type) | ||
67 | { | ||
68 | return __hw_addr_add_ex(list, addr, addr_len, addr_type, false); | ||
69 | } | ||
70 | |||
71 | static void ha_rcu_free(struct rcu_head *head) | ||
72 | { | ||
73 | struct netdev_hw_addr *ha; | ||
74 | |||
75 | ha = container_of(head, struct netdev_hw_addr, rcu_head); | ||
76 | kfree(ha); | ||
77 | } | ||
78 | |||
79 | static int __hw_addr_del_ex(struct netdev_hw_addr_list *list, | ||
80 | unsigned char *addr, int addr_len, | ||
81 | unsigned char addr_type, bool global) | ||
82 | { | ||
83 | struct netdev_hw_addr *ha; | ||
84 | |||
85 | list_for_each_entry(ha, &list->list, list) { | ||
86 | if (!memcmp(ha->addr, addr, addr_len) && | ||
87 | (ha->type == addr_type || !addr_type)) { | ||
88 | if (global) { | ||
89 | if (!ha->global_use) | ||
90 | break; | ||
91 | else | ||
92 | ha->global_use = false; | ||
93 | } | ||
94 | if (--ha->refcount) | ||
95 | return 0; | ||
96 | list_del_rcu(&ha->list); | ||
97 | call_rcu(&ha->rcu_head, ha_rcu_free); | ||
98 | list->count--; | ||
99 | return 0; | ||
100 | } | ||
101 | } | ||
102 | return -ENOENT; | ||
103 | } | ||
104 | |||
105 | static int __hw_addr_del(struct netdev_hw_addr_list *list, unsigned char *addr, | ||
106 | int addr_len, unsigned char addr_type) | ||
107 | { | ||
108 | return __hw_addr_del_ex(list, addr, addr_len, addr_type, false); | ||
109 | } | ||
110 | |||
111 | int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list, | ||
112 | struct netdev_hw_addr_list *from_list, | ||
113 | int addr_len, unsigned char addr_type) | ||
114 | { | ||
115 | int err; | ||
116 | struct netdev_hw_addr *ha, *ha2; | ||
117 | unsigned char type; | ||
118 | |||
119 | list_for_each_entry(ha, &from_list->list, list) { | ||
120 | type = addr_type ? addr_type : ha->type; | ||
121 | err = __hw_addr_add(to_list, ha->addr, addr_len, type); | ||
122 | if (err) | ||
123 | goto unroll; | ||
124 | } | ||
125 | return 0; | ||
126 | |||
127 | unroll: | ||
128 | list_for_each_entry(ha2, &from_list->list, list) { | ||
129 | if (ha2 == ha) | ||
130 | break; | ||
131 | type = addr_type ? addr_type : ha2->type; | ||
132 | __hw_addr_del(to_list, ha2->addr, addr_len, type); | ||
133 | } | ||
134 | return err; | ||
135 | } | ||
136 | EXPORT_SYMBOL(__hw_addr_add_multiple); | ||
137 | |||
138 | void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list, | ||
139 | struct netdev_hw_addr_list *from_list, | ||
140 | int addr_len, unsigned char addr_type) | ||
141 | { | ||
142 | struct netdev_hw_addr *ha; | ||
143 | unsigned char type; | ||
144 | |||
145 | list_for_each_entry(ha, &from_list->list, list) { | ||
146 | type = addr_type ? addr_type : ha->type; | ||
147 | __hw_addr_del(to_list, ha->addr, addr_len, addr_type); | ||
148 | } | ||
149 | } | ||
150 | EXPORT_SYMBOL(__hw_addr_del_multiple); | ||
151 | |||
152 | int __hw_addr_sync(struct netdev_hw_addr_list *to_list, | ||
153 | struct netdev_hw_addr_list *from_list, | ||
154 | int addr_len) | ||
155 | { | ||
156 | int err = 0; | ||
157 | struct netdev_hw_addr *ha, *tmp; | ||
158 | |||
159 | list_for_each_entry_safe(ha, tmp, &from_list->list, list) { | ||
160 | if (!ha->synced) { | ||
161 | err = __hw_addr_add(to_list, ha->addr, | ||
162 | addr_len, ha->type); | ||
163 | if (err) | ||
164 | break; | ||
165 | ha->synced = true; | ||
166 | ha->refcount++; | ||
167 | } else if (ha->refcount == 1) { | ||
168 | __hw_addr_del(to_list, ha->addr, addr_len, ha->type); | ||
169 | __hw_addr_del(from_list, ha->addr, addr_len, ha->type); | ||
170 | } | ||
171 | } | ||
172 | return err; | ||
173 | } | ||
174 | EXPORT_SYMBOL(__hw_addr_sync); | ||
175 | |||
176 | void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, | ||
177 | struct netdev_hw_addr_list *from_list, | ||
178 | int addr_len) | ||
179 | { | ||
180 | struct netdev_hw_addr *ha, *tmp; | ||
181 | |||
182 | list_for_each_entry_safe(ha, tmp, &from_list->list, list) { | ||
183 | if (ha->synced) { | ||
184 | __hw_addr_del(to_list, ha->addr, | ||
185 | addr_len, ha->type); | ||
186 | ha->synced = false; | ||
187 | __hw_addr_del(from_list, ha->addr, | ||
188 | addr_len, ha->type); | ||
189 | } | ||
190 | } | ||
191 | } | ||
192 | EXPORT_SYMBOL(__hw_addr_unsync); | ||
193 | |||
194 | void __hw_addr_flush(struct netdev_hw_addr_list *list) | ||
195 | { | ||
196 | struct netdev_hw_addr *ha, *tmp; | ||
197 | |||
198 | list_for_each_entry_safe(ha, tmp, &list->list, list) { | ||
199 | list_del_rcu(&ha->list); | ||
200 | call_rcu(&ha->rcu_head, ha_rcu_free); | ||
201 | } | ||
202 | list->count = 0; | ||
203 | } | ||
204 | EXPORT_SYMBOL(__hw_addr_flush); | ||
205 | |||
206 | void __hw_addr_init(struct netdev_hw_addr_list *list) | ||
207 | { | ||
208 | INIT_LIST_HEAD(&list->list); | ||
209 | list->count = 0; | ||
210 | } | ||
211 | EXPORT_SYMBOL(__hw_addr_init); | ||
212 | |||
213 | /* | ||
214 | * Device addresses handling functions | ||
215 | */ | ||
216 | |||
217 | /** | ||
218 | * dev_addr_flush - Flush device address list | ||
219 | * @dev: device | ||
220 | * | ||
221 | * Flush device address list and reset ->dev_addr. | ||
222 | * | ||
223 | * The caller must hold the rtnl_mutex. | ||
224 | */ | ||
225 | void dev_addr_flush(struct net_device *dev) | ||
226 | { | ||
227 | /* rtnl_mutex must be held here */ | ||
228 | |||
229 | __hw_addr_flush(&dev->dev_addrs); | ||
230 | dev->dev_addr = NULL; | ||
231 | } | ||
232 | EXPORT_SYMBOL(dev_addr_flush); | ||
233 | |||
234 | /** | ||
235 | * dev_addr_init - Init device address list | ||
236 | * @dev: device | ||
237 | * | ||
238 | * Init device address list and create the first element, | ||
239 | * used by ->dev_addr. | ||
240 | * | ||
241 | * The caller must hold the rtnl_mutex. | ||
242 | */ | ||
243 | int dev_addr_init(struct net_device *dev) | ||
244 | { | ||
245 | unsigned char addr[MAX_ADDR_LEN]; | ||
246 | struct netdev_hw_addr *ha; | ||
247 | int err; | ||
248 | |||
249 | /* rtnl_mutex must be held here */ | ||
250 | |||
251 | __hw_addr_init(&dev->dev_addrs); | ||
252 | memset(addr, 0, sizeof(addr)); | ||
253 | err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr), | ||
254 | NETDEV_HW_ADDR_T_LAN); | ||
255 | if (!err) { | ||
256 | /* | ||
257 | * Get the first (previously created) address from the list | ||
258 | * and set dev_addr pointer to this location. | ||
259 | */ | ||
260 | ha = list_first_entry(&dev->dev_addrs.list, | ||
261 | struct netdev_hw_addr, list); | ||
262 | dev->dev_addr = ha->addr; | ||
263 | } | ||
264 | return err; | ||
265 | } | ||
266 | EXPORT_SYMBOL(dev_addr_init); | ||
267 | |||
268 | /** | ||
269 | * dev_addr_add - Add a device address | ||
270 | * @dev: device | ||
271 | * @addr: address to add | ||
272 | * @addr_type: address type | ||
273 | * | ||
274 | * Add a device address to the device or increase the reference count if | ||
275 | * it already exists. | ||
276 | * | ||
277 | * The caller must hold the rtnl_mutex. | ||
278 | */ | ||
279 | int dev_addr_add(struct net_device *dev, unsigned char *addr, | ||
280 | unsigned char addr_type) | ||
281 | { | ||
282 | int err; | ||
283 | |||
284 | ASSERT_RTNL(); | ||
285 | |||
286 | err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type); | ||
287 | if (!err) | ||
288 | call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); | ||
289 | return err; | ||
290 | } | ||
291 | EXPORT_SYMBOL(dev_addr_add); | ||
292 | |||
293 | /** | ||
294 | * dev_addr_del - Release a device address. | ||
295 | * @dev: device | ||
296 | * @addr: address to delete | ||
297 | * @addr_type: address type | ||
298 | * | ||
299 | * Release reference to a device address and remove it from the device | ||
300 | * if the reference count drops to zero. | ||
301 | * | ||
302 | * The caller must hold the rtnl_mutex. | ||
303 | */ | ||
304 | int dev_addr_del(struct net_device *dev, unsigned char *addr, | ||
305 | unsigned char addr_type) | ||
306 | { | ||
307 | int err; | ||
308 | struct netdev_hw_addr *ha; | ||
309 | |||
310 | ASSERT_RTNL(); | ||
311 | |||
312 | /* | ||
313 | * We can not remove the first address from the list because | ||
314 | * dev->dev_addr points to that. | ||
315 | */ | ||
316 | ha = list_first_entry(&dev->dev_addrs.list, | ||
317 | struct netdev_hw_addr, list); | ||
318 | if (ha->addr == dev->dev_addr && ha->refcount == 1) | ||
319 | return -ENOENT; | ||
320 | |||
321 | err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len, | ||
322 | addr_type); | ||
323 | if (!err) | ||
324 | call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); | ||
325 | return err; | ||
326 | } | ||
327 | EXPORT_SYMBOL(dev_addr_del); | ||
328 | |||
329 | /** | ||
330 | * dev_addr_add_multiple - Add device addresses from another device | ||
331 | * @to_dev: device to which addresses will be added | ||
332 | * @from_dev: device from which addresses will be added | ||
333 | * @addr_type: address type - 0 means type will be used from from_dev | ||
334 | * | ||
335 | * Add device addresses of the one device to another. | ||
336 | ** | ||
337 | * The caller must hold the rtnl_mutex. | ||
338 | */ | ||
339 | int dev_addr_add_multiple(struct net_device *to_dev, | ||
340 | struct net_device *from_dev, | ||
341 | unsigned char addr_type) | ||
342 | { | ||
343 | int err; | ||
344 | |||
345 | ASSERT_RTNL(); | ||
346 | |||
347 | if (from_dev->addr_len != to_dev->addr_len) | ||
348 | return -EINVAL; | ||
349 | err = __hw_addr_add_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs, | ||
350 | to_dev->addr_len, addr_type); | ||
351 | if (!err) | ||
352 | call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev); | ||
353 | return err; | ||
354 | } | ||
355 | EXPORT_SYMBOL(dev_addr_add_multiple); | ||
356 | |||
357 | /** | ||
358 | * dev_addr_del_multiple - Delete device addresses by another device | ||
359 | * @to_dev: device where the addresses will be deleted | ||
360 | * @from_dev: device by which addresses the addresses will be deleted | ||
361 | * @addr_type: address type - 0 means type will used from from_dev | ||
362 | * | ||
363 | * Deletes addresses in to device by the list of addresses in from device. | ||
364 | * | ||
365 | * The caller must hold the rtnl_mutex. | ||
366 | */ | ||
367 | int dev_addr_del_multiple(struct net_device *to_dev, | ||
368 | struct net_device *from_dev, | ||
369 | unsigned char addr_type) | ||
370 | { | ||
371 | ASSERT_RTNL(); | ||
372 | |||
373 | if (from_dev->addr_len != to_dev->addr_len) | ||
374 | return -EINVAL; | ||
375 | __hw_addr_del_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs, | ||
376 | to_dev->addr_len, addr_type); | ||
377 | call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev); | ||
378 | return 0; | ||
379 | } | ||
380 | EXPORT_SYMBOL(dev_addr_del_multiple); | ||
381 | |||
382 | /* | ||
383 | * Unicast list handling functions | ||
384 | */ | ||
385 | |||
386 | /** | ||
387 | * dev_uc_add - Add a secondary unicast address | ||
388 | * @dev: device | ||
389 | * @addr: address to add | ||
390 | * | ||
391 | * Add a secondary unicast address to the device or increase | ||
392 | * the reference count if it already exists. | ||
393 | */ | ||
394 | int dev_uc_add(struct net_device *dev, unsigned char *addr) | ||
395 | { | ||
396 | int err; | ||
397 | |||
398 | netif_addr_lock_bh(dev); | ||
399 | err = __hw_addr_add(&dev->uc, addr, dev->addr_len, | ||
400 | NETDEV_HW_ADDR_T_UNICAST); | ||
401 | if (!err) | ||
402 | __dev_set_rx_mode(dev); | ||
403 | netif_addr_unlock_bh(dev); | ||
404 | return err; | ||
405 | } | ||
406 | EXPORT_SYMBOL(dev_uc_add); | ||
407 | |||
408 | /** | ||
409 | * dev_uc_del - Release secondary unicast address. | ||
410 | * @dev: device | ||
411 | * @addr: address to delete | ||
412 | * | ||
413 | * Release reference to a secondary unicast address and remove it | ||
414 | * from the device if the reference count drops to zero. | ||
415 | */ | ||
416 | int dev_uc_del(struct net_device *dev, unsigned char *addr) | ||
417 | { | ||
418 | int err; | ||
419 | |||
420 | netif_addr_lock_bh(dev); | ||
421 | err = __hw_addr_del(&dev->uc, addr, dev->addr_len, | ||
422 | NETDEV_HW_ADDR_T_UNICAST); | ||
423 | if (!err) | ||
424 | __dev_set_rx_mode(dev); | ||
425 | netif_addr_unlock_bh(dev); | ||
426 | return err; | ||
427 | } | ||
428 | EXPORT_SYMBOL(dev_uc_del); | ||
429 | |||
430 | /** | ||
431 | * dev_uc_sync - Synchronize device's unicast list to another device | ||
432 | * @to: destination device | ||
433 | * @from: source device | ||
434 | * | ||
435 | * Add newly added addresses to the destination device and release | ||
436 | * addresses that have no users left. The source device must be | ||
437 | * locked by netif_tx_lock_bh. | ||
438 | * | ||
439 | * This function is intended to be called from the dev->set_rx_mode | ||
440 | * function of layered software devices. | ||
441 | */ | ||
442 | int dev_uc_sync(struct net_device *to, struct net_device *from) | ||
443 | { | ||
444 | int err = 0; | ||
445 | |||
446 | if (to->addr_len != from->addr_len) | ||
447 | return -EINVAL; | ||
448 | |||
449 | netif_addr_lock_bh(to); | ||
450 | err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len); | ||
451 | if (!err) | ||
452 | __dev_set_rx_mode(to); | ||
453 | netif_addr_unlock_bh(to); | ||
454 | return err; | ||
455 | } | ||
456 | EXPORT_SYMBOL(dev_uc_sync); | ||
457 | |||
458 | /** | ||
459 | * dev_uc_unsync - Remove synchronized addresses from the destination device | ||
460 | * @to: destination device | ||
461 | * @from: source device | ||
462 | * | ||
463 | * Remove all addresses that were added to the destination device by | ||
464 | * dev_uc_sync(). This function is intended to be called from the | ||
465 | * dev->stop function of layered software devices. | ||
466 | */ | ||
467 | void dev_uc_unsync(struct net_device *to, struct net_device *from) | ||
468 | { | ||
469 | if (to->addr_len != from->addr_len) | ||
470 | return; | ||
471 | |||
472 | netif_addr_lock_bh(from); | ||
473 | netif_addr_lock(to); | ||
474 | __hw_addr_unsync(&to->uc, &from->uc, to->addr_len); | ||
475 | __dev_set_rx_mode(to); | ||
476 | netif_addr_unlock(to); | ||
477 | netif_addr_unlock_bh(from); | ||
478 | } | ||
479 | EXPORT_SYMBOL(dev_uc_unsync); | ||
480 | |||
481 | /** | ||
482 | * dev_uc_flush - Flush unicast addresses | ||
483 | * @dev: device | ||
484 | * | ||
485 | * Flush unicast addresses. | ||
486 | */ | ||
487 | void dev_uc_flush(struct net_device *dev) | ||
488 | { | ||
489 | netif_addr_lock_bh(dev); | ||
490 | __hw_addr_flush(&dev->uc); | ||
491 | netif_addr_unlock_bh(dev); | ||
492 | } | ||
493 | EXPORT_SYMBOL(dev_uc_flush); | ||
494 | |||
495 | /** | ||
496 | * dev_uc_flush - Init unicast address list | ||
497 | * @dev: device | ||
498 | * | ||
499 | * Init unicast address list. | ||
500 | */ | ||
501 | void dev_uc_init(struct net_device *dev) | ||
502 | { | ||
503 | __hw_addr_init(&dev->uc); | ||
504 | } | ||
505 | EXPORT_SYMBOL(dev_uc_init); | ||
506 | |||
507 | /* | ||
508 | * Multicast list handling functions | ||
509 | */ | ||
510 | |||
511 | static int __dev_mc_add(struct net_device *dev, unsigned char *addr, | ||
512 | bool global) | ||
513 | { | ||
514 | int err; | ||
515 | |||
516 | netif_addr_lock_bh(dev); | ||
517 | err = __hw_addr_add_ex(&dev->mc, addr, dev->addr_len, | ||
518 | NETDEV_HW_ADDR_T_MULTICAST, global); | ||
519 | if (!err) | ||
520 | __dev_set_rx_mode(dev); | ||
521 | netif_addr_unlock_bh(dev); | ||
522 | return err; | ||
523 | } | ||
524 | /** | ||
525 | * dev_mc_add - Add a multicast address | ||
526 | * @dev: device | ||
527 | * @addr: address to add | ||
528 | * | ||
529 | * Add a multicast address to the device or increase | ||
530 | * the reference count if it already exists. | ||
531 | */ | ||
532 | int dev_mc_add(struct net_device *dev, unsigned char *addr) | ||
533 | { | ||
534 | return __dev_mc_add(dev, addr, false); | ||
535 | } | ||
536 | EXPORT_SYMBOL(dev_mc_add); | ||
537 | |||
538 | /** | ||
539 | * dev_mc_add_global - Add a global multicast address | ||
540 | * @dev: device | ||
541 | * @addr: address to add | ||
542 | * | ||
543 | * Add a global multicast address to the device. | ||
544 | */ | ||
545 | int dev_mc_add_global(struct net_device *dev, unsigned char *addr) | ||
546 | { | ||
547 | return __dev_mc_add(dev, addr, true); | ||
548 | } | ||
549 | EXPORT_SYMBOL(dev_mc_add_global); | ||
550 | |||
551 | static int __dev_mc_del(struct net_device *dev, unsigned char *addr, | ||
552 | bool global) | ||
553 | { | ||
554 | int err; | ||
555 | |||
556 | netif_addr_lock_bh(dev); | ||
557 | err = __hw_addr_del_ex(&dev->mc, addr, dev->addr_len, | ||
558 | NETDEV_HW_ADDR_T_MULTICAST, global); | ||
559 | if (!err) | ||
560 | __dev_set_rx_mode(dev); | ||
561 | netif_addr_unlock_bh(dev); | ||
562 | return err; | ||
563 | } | ||
564 | |||
565 | /** | ||
566 | * dev_mc_del - Delete a multicast address. | ||
567 | * @dev: device | ||
568 | * @addr: address to delete | ||
569 | * | ||
570 | * Release reference to a multicast address and remove it | ||
571 | * from the device if the reference count drops to zero. | ||
572 | */ | ||
573 | int dev_mc_del(struct net_device *dev, unsigned char *addr) | ||
574 | { | ||
575 | return __dev_mc_del(dev, addr, false); | ||
576 | } | ||
577 | EXPORT_SYMBOL(dev_mc_del); | ||
578 | |||
579 | /** | ||
580 | * dev_mc_del_global - Delete a global multicast address. | ||
581 | * @dev: device | ||
582 | * @addr: address to delete | ||
583 | * | ||
584 | * Release reference to a multicast address and remove it | ||
585 | * from the device if the reference count drops to zero. | ||
586 | */ | ||
587 | int dev_mc_del_global(struct net_device *dev, unsigned char *addr) | ||
588 | { | ||
589 | return __dev_mc_del(dev, addr, true); | ||
590 | } | ||
591 | EXPORT_SYMBOL(dev_mc_del_global); | ||
592 | |||
593 | /** | ||
594 | * dev_mc_sync - Synchronize device's unicast list to another device | ||
595 | * @to: destination device | ||
596 | * @from: source device | ||
597 | * | ||
598 | * Add newly added addresses to the destination device and release | ||
599 | * addresses that have no users left. The source device must be | ||
600 | * locked by netif_tx_lock_bh. | ||
601 | * | ||
602 | * This function is intended to be called from the dev->set_multicast_list | ||
603 | * or dev->set_rx_mode function of layered software devices. | ||
604 | */ | ||
605 | int dev_mc_sync(struct net_device *to, struct net_device *from) | ||
606 | { | ||
607 | int err = 0; | ||
608 | |||
609 | if (to->addr_len != from->addr_len) | ||
610 | return -EINVAL; | ||
611 | |||
612 | netif_addr_lock_bh(to); | ||
613 | err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len); | ||
614 | if (!err) | ||
615 | __dev_set_rx_mode(to); | ||
616 | netif_addr_unlock_bh(to); | ||
617 | return err; | ||
618 | } | ||
619 | EXPORT_SYMBOL(dev_mc_sync); | ||
620 | |||
621 | /** | ||
622 | * dev_mc_unsync - Remove synchronized addresses from the destination device | ||
623 | * @to: destination device | ||
624 | * @from: source device | ||
625 | * | ||
626 | * Remove all addresses that were added to the destination device by | ||
627 | * dev_mc_sync(). This function is intended to be called from the | ||
628 | * dev->stop function of layered software devices. | ||
629 | */ | ||
630 | void dev_mc_unsync(struct net_device *to, struct net_device *from) | ||
631 | { | ||
632 | if (to->addr_len != from->addr_len) | ||
633 | return; | ||
634 | |||
635 | netif_addr_lock_bh(from); | ||
636 | netif_addr_lock(to); | ||
637 | __hw_addr_unsync(&to->mc, &from->mc, to->addr_len); | ||
638 | __dev_set_rx_mode(to); | ||
639 | netif_addr_unlock(to); | ||
640 | netif_addr_unlock_bh(from); | ||
641 | } | ||
642 | EXPORT_SYMBOL(dev_mc_unsync); | ||
643 | |||
644 | /** | ||
645 | * dev_mc_flush - Flush multicast addresses | ||
646 | * @dev: device | ||
647 | * | ||
648 | * Flush multicast addresses. | ||
649 | */ | ||
650 | void dev_mc_flush(struct net_device *dev) | ||
651 | { | ||
652 | netif_addr_lock_bh(dev); | ||
653 | __hw_addr_flush(&dev->mc); | ||
654 | netif_addr_unlock_bh(dev); | ||
655 | } | ||
656 | EXPORT_SYMBOL(dev_mc_flush); | ||
657 | |||
658 | /** | ||
659 | * dev_mc_flush - Init multicast address list | ||
660 | * @dev: device | ||
661 | * | ||
662 | * Init multicast address list. | ||
663 | */ | ||
664 | void dev_mc_init(struct net_device *dev) | ||
665 | { | ||
666 | __hw_addr_init(&dev->mc); | ||
667 | } | ||
668 | EXPORT_SYMBOL(dev_mc_init); | ||
669 | |||
670 | #ifdef CONFIG_PROC_FS | ||
671 | #include <linux/seq_file.h> | ||
672 | |||
673 | static int dev_mc_seq_show(struct seq_file *seq, void *v) | ||
674 | { | ||
675 | struct netdev_hw_addr *ha; | ||
676 | struct net_device *dev = v; | ||
677 | |||
678 | if (v == SEQ_START_TOKEN) | ||
679 | return 0; | ||
680 | |||
681 | netif_addr_lock_bh(dev); | ||
682 | netdev_for_each_mc_addr(ha, dev) { | ||
683 | int i; | ||
684 | |||
685 | seq_printf(seq, "%-4d %-15s %-5d %-5d ", dev->ifindex, | ||
686 | dev->name, ha->refcount, ha->global_use); | ||
687 | |||
688 | for (i = 0; i < dev->addr_len; i++) | ||
689 | seq_printf(seq, "%02x", ha->addr[i]); | ||
690 | |||
691 | seq_putc(seq, '\n'); | ||
692 | } | ||
693 | netif_addr_unlock_bh(dev); | ||
694 | return 0; | ||
695 | } | ||
696 | |||
697 | static const struct seq_operations dev_mc_seq_ops = { | ||
698 | .start = dev_seq_start, | ||
699 | .next = dev_seq_next, | ||
700 | .stop = dev_seq_stop, | ||
701 | .show = dev_mc_seq_show, | ||
702 | }; | ||
703 | |||
704 | static int dev_mc_seq_open(struct inode *inode, struct file *file) | ||
705 | { | ||
706 | return seq_open_net(inode, file, &dev_mc_seq_ops, | ||
707 | sizeof(struct seq_net_private)); | ||
708 | } | ||
709 | |||
710 | static const struct file_operations dev_mc_seq_fops = { | ||
711 | .owner = THIS_MODULE, | ||
712 | .open = dev_mc_seq_open, | ||
713 | .read = seq_read, | ||
714 | .llseek = seq_lseek, | ||
715 | .release = seq_release_net, | ||
716 | }; | ||
717 | |||
718 | #endif | ||
719 | |||
720 | static int __net_init dev_mc_net_init(struct net *net) | ||
721 | { | ||
722 | if (!proc_net_fops_create(net, "dev_mcast", 0, &dev_mc_seq_fops)) | ||
723 | return -ENOMEM; | ||
724 | return 0; | ||
725 | } | ||
726 | |||
727 | static void __net_exit dev_mc_net_exit(struct net *net) | ||
728 | { | ||
729 | proc_net_remove(net, "dev_mcast"); | ||
730 | } | ||
731 | |||
732 | static struct pernet_operations __net_initdata dev_mc_net_ops = { | ||
733 | .init = dev_mc_net_init, | ||
734 | .exit = dev_mc_net_exit, | ||
735 | }; | ||
736 | |||
737 | void __init dev_mcast_init(void) | ||
738 | { | ||
739 | register_pernet_subsys(&dev_mc_net_ops); | ||
740 | } | ||
741 | |||
diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c deleted file mode 100644 index fd91569e2394..000000000000 --- a/net/core/dev_mcast.c +++ /dev/null | |||
@@ -1,231 +0,0 @@ | |||
1 | /* | ||
2 | * Linux NET3: Multicast List maintenance. | ||
3 | * | ||
4 | * Authors: | ||
5 | * Tim Kordas <tjk@nostromo.eeap.cwru.edu> | ||
6 | * Richard Underwood <richard@wuzz.demon.co.uk> | ||
7 | * | ||
8 | * Stir fried together from the IP multicast and CAP patches above | ||
9 | * Alan Cox <alan@lxorguk.ukuu.org.uk> | ||
10 | * | ||
11 | * Fixes: | ||
12 | * Alan Cox : Update the device on a real delete | ||
13 | * rather than any time but... | ||
14 | * Alan Cox : IFF_ALLMULTI support. | ||
15 | * Alan Cox : New format set_multicast_list() calls. | ||
16 | * Gleb Natapov : Remove dev_mc_lock. | ||
17 | * | ||
18 | * This program is free software; you can redistribute it and/or | ||
19 | * modify it under the terms of the GNU General Public License | ||
20 | * as published by the Free Software Foundation; either version | ||
21 | * 2 of the License, or (at your option) any later version. | ||
22 | */ | ||
23 | |||
24 | #include <linux/module.h> | ||
25 | #include <asm/uaccess.h> | ||
26 | #include <asm/system.h> | ||
27 | #include <linux/bitops.h> | ||
28 | #include <linux/types.h> | ||
29 | #include <linux/kernel.h> | ||
30 | #include <linux/string.h> | ||
31 | #include <linux/mm.h> | ||
32 | #include <linux/socket.h> | ||
33 | #include <linux/sockios.h> | ||
34 | #include <linux/in.h> | ||
35 | #include <linux/errno.h> | ||
36 | #include <linux/interrupt.h> | ||
37 | #include <linux/if_ether.h> | ||
38 | #include <linux/inet.h> | ||
39 | #include <linux/netdevice.h> | ||
40 | #include <linux/etherdevice.h> | ||
41 | #include <linux/proc_fs.h> | ||
42 | #include <linux/seq_file.h> | ||
43 | #include <linux/init.h> | ||
44 | #include <net/net_namespace.h> | ||
45 | #include <net/ip.h> | ||
46 | #include <net/route.h> | ||
47 | #include <linux/skbuff.h> | ||
48 | #include <net/sock.h> | ||
49 | #include <net/arp.h> | ||
50 | |||
51 | |||
52 | /* | ||
53 | * Device multicast list maintenance. | ||
54 | * | ||
55 | * This is used both by IP and by the user level maintenance functions. | ||
56 | * Unlike BSD we maintain a usage count on a given multicast address so | ||
57 | * that a casual user application can add/delete multicasts used by | ||
58 | * protocols without doing damage to the protocols when it deletes the | ||
59 | * entries. It also helps IP as it tracks overlapping maps. | ||
60 | * | ||
61 | * Device mc lists are changed by bh at least if IPv6 is enabled, | ||
62 | * so that it must be bh protected. | ||
63 | * | ||
64 | * We block accesses to device mc filters with netif_tx_lock. | ||
65 | */ | ||
66 | |||
67 | /* | ||
68 | * Delete a device level multicast | ||
69 | */ | ||
70 | |||
71 | int dev_mc_delete(struct net_device *dev, void *addr, int alen, int glbl) | ||
72 | { | ||
73 | int err; | ||
74 | |||
75 | netif_addr_lock_bh(dev); | ||
76 | err = __dev_addr_delete(&dev->mc_list, &dev->mc_count, | ||
77 | addr, alen, glbl); | ||
78 | if (!err) { | ||
79 | /* | ||
80 | * We have altered the list, so the card | ||
81 | * loaded filter is now wrong. Fix it | ||
82 | */ | ||
83 | |||
84 | __dev_set_rx_mode(dev); | ||
85 | } | ||
86 | netif_addr_unlock_bh(dev); | ||
87 | return err; | ||
88 | } | ||
89 | |||
90 | /* | ||
91 | * Add a device level multicast | ||
92 | */ | ||
93 | |||
94 | int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl) | ||
95 | { | ||
96 | int err; | ||
97 | |||
98 | netif_addr_lock_bh(dev); | ||
99 | if (alen != dev->addr_len) | ||
100 | return -EINVAL; | ||
101 | err = __dev_addr_add(&dev->mc_list, &dev->mc_count, addr, alen, glbl); | ||
102 | if (!err) | ||
103 | __dev_set_rx_mode(dev); | ||
104 | netif_addr_unlock_bh(dev); | ||
105 | return err; | ||
106 | } | ||
107 | |||
108 | /** | ||
109 | * dev_mc_sync - Synchronize device's multicast list to another device | ||
110 | * @to: destination device | ||
111 | * @from: source device | ||
112 | * | ||
113 | * Add newly added addresses to the destination device and release | ||
114 | * addresses that have no users left. The source device must be | ||
115 | * locked by netif_tx_lock_bh. | ||
116 | * | ||
117 | * This function is intended to be called from the dev->set_multicast_list | ||
118 | * or dev->set_rx_mode function of layered software devices. | ||
119 | */ | ||
120 | int dev_mc_sync(struct net_device *to, struct net_device *from) | ||
121 | { | ||
122 | int err = 0; | ||
123 | |||
124 | netif_addr_lock_bh(to); | ||
125 | err = __dev_addr_sync(&to->mc_list, &to->mc_count, | ||
126 | &from->mc_list, &from->mc_count); | ||
127 | if (!err) | ||
128 | __dev_set_rx_mode(to); | ||
129 | netif_addr_unlock_bh(to); | ||
130 | |||
131 | return err; | ||
132 | } | ||
133 | EXPORT_SYMBOL(dev_mc_sync); | ||
134 | |||
135 | |||
136 | /** | ||
137 | * dev_mc_unsync - Remove synchronized addresses from the destination | ||
138 | * device | ||
139 | * @to: destination device | ||
140 | * @from: source device | ||
141 | * | ||
142 | * Remove all addresses that were added to the destination device by | ||
143 | * dev_mc_sync(). This function is intended to be called from the | ||
144 | * dev->stop function of layered software devices. | ||
145 | */ | ||
146 | void dev_mc_unsync(struct net_device *to, struct net_device *from) | ||
147 | { | ||
148 | netif_addr_lock_bh(from); | ||
149 | netif_addr_lock(to); | ||
150 | |||
151 | __dev_addr_unsync(&to->mc_list, &to->mc_count, | ||
152 | &from->mc_list, &from->mc_count); | ||
153 | __dev_set_rx_mode(to); | ||
154 | |||
155 | netif_addr_unlock(to); | ||
156 | netif_addr_unlock_bh(from); | ||
157 | } | ||
158 | EXPORT_SYMBOL(dev_mc_unsync); | ||
159 | |||
160 | #ifdef CONFIG_PROC_FS | ||
161 | static int dev_mc_seq_show(struct seq_file *seq, void *v) | ||
162 | { | ||
163 | struct dev_addr_list *m; | ||
164 | struct net_device *dev = v; | ||
165 | |||
166 | if (v == SEQ_START_TOKEN) | ||
167 | return 0; | ||
168 | |||
169 | netif_addr_lock_bh(dev); | ||
170 | for (m = dev->mc_list; m; m = m->next) { | ||
171 | int i; | ||
172 | |||
173 | seq_printf(seq, "%-4d %-15s %-5d %-5d ", dev->ifindex, | ||
174 | dev->name, m->dmi_users, m->dmi_gusers); | ||
175 | |||
176 | for (i = 0; i < m->dmi_addrlen; i++) | ||
177 | seq_printf(seq, "%02x", m->dmi_addr[i]); | ||
178 | |||
179 | seq_putc(seq, '\n'); | ||
180 | } | ||
181 | netif_addr_unlock_bh(dev); | ||
182 | return 0; | ||
183 | } | ||
184 | |||
185 | static const struct seq_operations dev_mc_seq_ops = { | ||
186 | .start = dev_seq_start, | ||
187 | .next = dev_seq_next, | ||
188 | .stop = dev_seq_stop, | ||
189 | .show = dev_mc_seq_show, | ||
190 | }; | ||
191 | |||
192 | static int dev_mc_seq_open(struct inode *inode, struct file *file) | ||
193 | { | ||
194 | return seq_open_net(inode, file, &dev_mc_seq_ops, | ||
195 | sizeof(struct seq_net_private)); | ||
196 | } | ||
197 | |||
198 | static const struct file_operations dev_mc_seq_fops = { | ||
199 | .owner = THIS_MODULE, | ||
200 | .open = dev_mc_seq_open, | ||
201 | .read = seq_read, | ||
202 | .llseek = seq_lseek, | ||
203 | .release = seq_release_net, | ||
204 | }; | ||
205 | |||
206 | #endif | ||
207 | |||
208 | static int __net_init dev_mc_net_init(struct net *net) | ||
209 | { | ||
210 | if (!proc_net_fops_create(net, "dev_mcast", 0, &dev_mc_seq_fops)) | ||
211 | return -ENOMEM; | ||
212 | return 0; | ||
213 | } | ||
214 | |||
215 | static void __net_exit dev_mc_net_exit(struct net *net) | ||
216 | { | ||
217 | proc_net_remove(net, "dev_mcast"); | ||
218 | } | ||
219 | |||
220 | static struct pernet_operations __net_initdata dev_mc_net_ops = { | ||
221 | .init = dev_mc_net_init, | ||
222 | .exit = dev_mc_net_exit, | ||
223 | }; | ||
224 | |||
225 | void __init dev_mcast_init(void) | ||
226 | { | ||
227 | register_pernet_subsys(&dev_mc_net_ops); | ||
228 | } | ||
229 | |||
230 | EXPORT_SYMBOL(dev_mc_add); | ||
231 | EXPORT_SYMBOL(dev_mc_delete); | ||
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index f8c874975350..cf208d8042b1 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/percpu.h> | 21 | #include <linux/percpu.h> |
22 | #include <linux/timer.h> | 22 | #include <linux/timer.h> |
23 | #include <linux/bitops.h> | 23 | #include <linux/bitops.h> |
24 | #include <linux/slab.h> | ||
24 | #include <net/genetlink.h> | 25 | #include <net/genetlink.h> |
25 | #include <net/netevent.h> | 26 | #include <net/netevent.h> |
26 | 27 | ||
diff --git a/net/core/dst.c b/net/core/dst.c index cb1b3488b739..9920722cc82b 100644 --- a/net/core/dst.c +++ b/net/core/dst.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/workqueue.h> | 12 | #include <linux/workqueue.h> |
13 | #include <linux/mm.h> | 13 | #include <linux/mm.h> |
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/slab.h> | ||
15 | #include <linux/netdevice.h> | 16 | #include <linux/netdevice.h> |
16 | #include <linux/skbuff.h> | 17 | #include <linux/skbuff.h> |
17 | #include <linux/string.h> | 18 | #include <linux/string.h> |
@@ -43,7 +44,7 @@ static atomic_t dst_total = ATOMIC_INIT(0); | |||
43 | */ | 44 | */ |
44 | static struct { | 45 | static struct { |
45 | spinlock_t lock; | 46 | spinlock_t lock; |
46 | struct dst_entry *list; | 47 | struct dst_entry *list; |
47 | unsigned long timer_inc; | 48 | unsigned long timer_inc; |
48 | unsigned long timer_expires; | 49 | unsigned long timer_expires; |
49 | } dst_garbage = { | 50 | } dst_garbage = { |
@@ -51,7 +52,7 @@ static struct { | |||
51 | .timer_inc = DST_GC_MAX, | 52 | .timer_inc = DST_GC_MAX, |
52 | }; | 53 | }; |
53 | static void dst_gc_task(struct work_struct *work); | 54 | static void dst_gc_task(struct work_struct *work); |
54 | static void ___dst_free(struct dst_entry * dst); | 55 | static void ___dst_free(struct dst_entry *dst); |
55 | 56 | ||
56 | static DECLARE_DELAYED_WORK(dst_gc_work, dst_gc_task); | 57 | static DECLARE_DELAYED_WORK(dst_gc_work, dst_gc_task); |
57 | 58 | ||
@@ -135,8 +136,8 @@ loop: | |||
135 | } | 136 | } |
136 | expires = dst_garbage.timer_expires; | 137 | expires = dst_garbage.timer_expires; |
137 | /* | 138 | /* |
138 | * if the next desired timer is more than 4 seconds in the future | 139 | * if the next desired timer is more than 4 seconds in the |
139 | * then round the timer to whole seconds | 140 | * future then round the timer to whole seconds |
140 | */ | 141 | */ |
141 | if (expires > 4*HZ) | 142 | if (expires > 4*HZ) |
142 | expires = round_jiffies_relative(expires); | 143 | expires = round_jiffies_relative(expires); |
@@ -151,7 +152,8 @@ loop: | |||
151 | " expires: %lu elapsed: %lu us\n", | 152 | " expires: %lu elapsed: %lu us\n", |
152 | atomic_read(&dst_total), delayed, work_performed, | 153 | atomic_read(&dst_total), delayed, work_performed, |
153 | expires, | 154 | expires, |
154 | elapsed.tv_sec * USEC_PER_SEC + elapsed.tv_nsec / NSEC_PER_USEC); | 155 | elapsed.tv_sec * USEC_PER_SEC + |
156 | elapsed.tv_nsec / NSEC_PER_USEC); | ||
155 | #endif | 157 | #endif |
156 | } | 158 | } |
157 | 159 | ||
@@ -162,9 +164,9 @@ int dst_discard(struct sk_buff *skb) | |||
162 | } | 164 | } |
163 | EXPORT_SYMBOL(dst_discard); | 165 | EXPORT_SYMBOL(dst_discard); |
164 | 166 | ||
165 | void * dst_alloc(struct dst_ops * ops) | 167 | void *dst_alloc(struct dst_ops *ops) |
166 | { | 168 | { |
167 | struct dst_entry * dst; | 169 | struct dst_entry *dst; |
168 | 170 | ||
169 | if (ops->gc && atomic_read(&ops->entries) > ops->gc_thresh) { | 171 | if (ops->gc && atomic_read(&ops->entries) > ops->gc_thresh) { |
170 | if (ops->gc(ops)) | 172 | if (ops->gc(ops)) |
@@ -184,19 +186,20 @@ void * dst_alloc(struct dst_ops * ops) | |||
184 | atomic_inc(&ops->entries); | 186 | atomic_inc(&ops->entries); |
185 | return dst; | 187 | return dst; |
186 | } | 188 | } |
189 | EXPORT_SYMBOL(dst_alloc); | ||
187 | 190 | ||
188 | static void ___dst_free(struct dst_entry * dst) | 191 | static void ___dst_free(struct dst_entry *dst) |
189 | { | 192 | { |
190 | /* The first case (dev==NULL) is required, when | 193 | /* The first case (dev==NULL) is required, when |
191 | protocol module is unloaded. | 194 | protocol module is unloaded. |
192 | */ | 195 | */ |
193 | if (dst->dev == NULL || !(dst->dev->flags&IFF_UP)) { | 196 | if (dst->dev == NULL || !(dst->dev->flags&IFF_UP)) |
194 | dst->input = dst->output = dst_discard; | 197 | dst->input = dst->output = dst_discard; |
195 | } | ||
196 | dst->obsolete = 2; | 198 | dst->obsolete = 2; |
197 | } | 199 | } |
200 | EXPORT_SYMBOL(__dst_free); | ||
198 | 201 | ||
199 | void __dst_free(struct dst_entry * dst) | 202 | void __dst_free(struct dst_entry *dst) |
200 | { | 203 | { |
201 | spin_lock_bh(&dst_garbage.lock); | 204 | spin_lock_bh(&dst_garbage.lock); |
202 | ___dst_free(dst); | 205 | ___dst_free(dst); |
@@ -261,15 +264,16 @@ again: | |||
261 | } | 264 | } |
262 | return NULL; | 265 | return NULL; |
263 | } | 266 | } |
267 | EXPORT_SYMBOL(dst_destroy); | ||
264 | 268 | ||
265 | void dst_release(struct dst_entry *dst) | 269 | void dst_release(struct dst_entry *dst) |
266 | { | 270 | { |
267 | if (dst) { | 271 | if (dst) { |
268 | int newrefcnt; | 272 | int newrefcnt; |
269 | 273 | ||
270 | smp_mb__before_atomic_dec(); | 274 | smp_mb__before_atomic_dec(); |
271 | newrefcnt = atomic_dec_return(&dst->__refcnt); | 275 | newrefcnt = atomic_dec_return(&dst->__refcnt); |
272 | WARN_ON(newrefcnt < 0); | 276 | WARN_ON(newrefcnt < 0); |
273 | } | 277 | } |
274 | } | 278 | } |
275 | EXPORT_SYMBOL(dst_release); | 279 | EXPORT_SYMBOL(dst_release); |
@@ -282,8 +286,8 @@ EXPORT_SYMBOL(dst_release); | |||
282 | * | 286 | * |
283 | * Commented and originally written by Alexey. | 287 | * Commented and originally written by Alexey. |
284 | */ | 288 | */ |
285 | static inline void dst_ifdown(struct dst_entry *dst, struct net_device *dev, | 289 | static void dst_ifdown(struct dst_entry *dst, struct net_device *dev, |
286 | int unregister) | 290 | int unregister) |
287 | { | 291 | { |
288 | if (dst->ops->ifdown) | 292 | if (dst->ops->ifdown) |
289 | dst->ops->ifdown(dst, dev, unregister); | 293 | dst->ops->ifdown(dst, dev, unregister); |
@@ -305,7 +309,8 @@ static inline void dst_ifdown(struct dst_entry *dst, struct net_device *dev, | |||
305 | } | 309 | } |
306 | } | 310 | } |
307 | 311 | ||
308 | static int dst_dev_event(struct notifier_block *this, unsigned long event, void *ptr) | 312 | static int dst_dev_event(struct notifier_block *this, unsigned long event, |
313 | void *ptr) | ||
309 | { | 314 | { |
310 | struct net_device *dev = ptr; | 315 | struct net_device *dev = ptr; |
311 | struct dst_entry *dst, *last = NULL; | 316 | struct dst_entry *dst, *last = NULL; |
@@ -328,9 +333,8 @@ static int dst_dev_event(struct notifier_block *this, unsigned long event, void | |||
328 | last->next = dst; | 333 | last->next = dst; |
329 | else | 334 | else |
330 | dst_busy_list = dst; | 335 | dst_busy_list = dst; |
331 | for (; dst; dst = dst->next) { | 336 | for (; dst; dst = dst->next) |
332 | dst_ifdown(dst, dev, event != NETDEV_DOWN); | 337 | dst_ifdown(dst, dev, event != NETDEV_DOWN); |
333 | } | ||
334 | mutex_unlock(&dst_gc_mutex); | 338 | mutex_unlock(&dst_gc_mutex); |
335 | break; | 339 | break; |
336 | } | 340 | } |
@@ -345,7 +349,3 @@ void __init dst_init(void) | |||
345 | { | 349 | { |
346 | register_netdevice_notifier(&dst_dev_notifier); | 350 | register_netdevice_notifier(&dst_dev_notifier); |
347 | } | 351 | } |
348 | |||
349 | EXPORT_SYMBOL(__dst_free); | ||
350 | EXPORT_SYMBOL(dst_alloc); | ||
351 | EXPORT_SYMBOL(dst_destroy); | ||
diff --git a/net/core/ethtool.c b/net/core/ethtool.c index f4cb6b6299d9..1a7db92037fa 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c | |||
@@ -18,7 +18,8 @@ | |||
18 | #include <linux/ethtool.h> | 18 | #include <linux/ethtool.h> |
19 | #include <linux/netdevice.h> | 19 | #include <linux/netdevice.h> |
20 | #include <linux/bitops.h> | 20 | #include <linux/bitops.h> |
21 | #include <asm/uaccess.h> | 21 | #include <linux/uaccess.h> |
22 | #include <linux/slab.h> | ||
22 | 23 | ||
23 | /* | 24 | /* |
24 | * Some useful ethtool_ops methods that're device independent. | 25 | * Some useful ethtool_ops methods that're device independent. |
@@ -30,6 +31,7 @@ u32 ethtool_op_get_link(struct net_device *dev) | |||
30 | { | 31 | { |
31 | return netif_carrier_ok(dev) ? 1 : 0; | 32 | return netif_carrier_ok(dev) ? 1 : 0; |
32 | } | 33 | } |
34 | EXPORT_SYMBOL(ethtool_op_get_link); | ||
33 | 35 | ||
34 | u32 ethtool_op_get_rx_csum(struct net_device *dev) | 36 | u32 ethtool_op_get_rx_csum(struct net_device *dev) |
35 | { | 37 | { |
@@ -62,6 +64,7 @@ int ethtool_op_set_tx_hw_csum(struct net_device *dev, u32 data) | |||
62 | 64 | ||
63 | return 0; | 65 | return 0; |
64 | } | 66 | } |
67 | EXPORT_SYMBOL(ethtool_op_set_tx_hw_csum); | ||
65 | 68 | ||
66 | int ethtool_op_set_tx_ipv6_csum(struct net_device *dev, u32 data) | 69 | int ethtool_op_set_tx_ipv6_csum(struct net_device *dev, u32 data) |
67 | { | 70 | { |
@@ -72,11 +75,13 @@ int ethtool_op_set_tx_ipv6_csum(struct net_device *dev, u32 data) | |||
72 | 75 | ||
73 | return 0; | 76 | return 0; |
74 | } | 77 | } |
78 | EXPORT_SYMBOL(ethtool_op_set_tx_ipv6_csum); | ||
75 | 79 | ||
76 | u32 ethtool_op_get_sg(struct net_device *dev) | 80 | u32 ethtool_op_get_sg(struct net_device *dev) |
77 | { | 81 | { |
78 | return (dev->features & NETIF_F_SG) != 0; | 82 | return (dev->features & NETIF_F_SG) != 0; |
79 | } | 83 | } |
84 | EXPORT_SYMBOL(ethtool_op_get_sg); | ||
80 | 85 | ||
81 | int ethtool_op_set_sg(struct net_device *dev, u32 data) | 86 | int ethtool_op_set_sg(struct net_device *dev, u32 data) |
82 | { | 87 | { |
@@ -87,11 +92,13 @@ int ethtool_op_set_sg(struct net_device *dev, u32 data) | |||
87 | 92 | ||
88 | return 0; | 93 | return 0; |
89 | } | 94 | } |
95 | EXPORT_SYMBOL(ethtool_op_set_sg); | ||
90 | 96 | ||
91 | u32 ethtool_op_get_tso(struct net_device *dev) | 97 | u32 ethtool_op_get_tso(struct net_device *dev) |
92 | { | 98 | { |
93 | return (dev->features & NETIF_F_TSO) != 0; | 99 | return (dev->features & NETIF_F_TSO) != 0; |
94 | } | 100 | } |
101 | EXPORT_SYMBOL(ethtool_op_get_tso); | ||
95 | 102 | ||
96 | int ethtool_op_set_tso(struct net_device *dev, u32 data) | 103 | int ethtool_op_set_tso(struct net_device *dev, u32 data) |
97 | { | 104 | { |
@@ -102,11 +109,13 @@ int ethtool_op_set_tso(struct net_device *dev, u32 data) | |||
102 | 109 | ||
103 | return 0; | 110 | return 0; |
104 | } | 111 | } |
112 | EXPORT_SYMBOL(ethtool_op_set_tso); | ||
105 | 113 | ||
106 | u32 ethtool_op_get_ufo(struct net_device *dev) | 114 | u32 ethtool_op_get_ufo(struct net_device *dev) |
107 | { | 115 | { |
108 | return (dev->features & NETIF_F_UFO) != 0; | 116 | return (dev->features & NETIF_F_UFO) != 0; |
109 | } | 117 | } |
118 | EXPORT_SYMBOL(ethtool_op_get_ufo); | ||
110 | 119 | ||
111 | int ethtool_op_set_ufo(struct net_device *dev, u32 data) | 120 | int ethtool_op_set_ufo(struct net_device *dev, u32 data) |
112 | { | 121 | { |
@@ -116,12 +125,13 @@ int ethtool_op_set_ufo(struct net_device *dev, u32 data) | |||
116 | dev->features &= ~NETIF_F_UFO; | 125 | dev->features &= ~NETIF_F_UFO; |
117 | return 0; | 126 | return 0; |
118 | } | 127 | } |
128 | EXPORT_SYMBOL(ethtool_op_set_ufo); | ||
119 | 129 | ||
120 | /* the following list of flags are the same as their associated | 130 | /* the following list of flags are the same as their associated |
121 | * NETIF_F_xxx values in include/linux/netdevice.h | 131 | * NETIF_F_xxx values in include/linux/netdevice.h |
122 | */ | 132 | */ |
123 | static const u32 flags_dup_features = | 133 | static const u32 flags_dup_features = |
124 | (ETH_FLAG_LRO | ETH_FLAG_NTUPLE); | 134 | (ETH_FLAG_LRO | ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH); |
125 | 135 | ||
126 | u32 ethtool_op_get_flags(struct net_device *dev) | 136 | u32 ethtool_op_get_flags(struct net_device *dev) |
127 | { | 137 | { |
@@ -132,6 +142,7 @@ u32 ethtool_op_get_flags(struct net_device *dev) | |||
132 | 142 | ||
133 | return dev->features & flags_dup_features; | 143 | return dev->features & flags_dup_features; |
134 | } | 144 | } |
145 | EXPORT_SYMBOL(ethtool_op_get_flags); | ||
135 | 146 | ||
136 | int ethtool_op_set_flags(struct net_device *dev, u32 data) | 147 | int ethtool_op_set_flags(struct net_device *dev, u32 data) |
137 | { | 148 | { |
@@ -152,9 +163,15 @@ int ethtool_op_set_flags(struct net_device *dev, u32 data) | |||
152 | features &= ~NETIF_F_NTUPLE; | 163 | features &= ~NETIF_F_NTUPLE; |
153 | } | 164 | } |
154 | 165 | ||
166 | if (data & ETH_FLAG_RXHASH) | ||
167 | features |= NETIF_F_RXHASH; | ||
168 | else | ||
169 | features &= ~NETIF_F_RXHASH; | ||
170 | |||
155 | dev->features = features; | 171 | dev->features = features; |
156 | return 0; | 172 | return 0; |
157 | } | 173 | } |
174 | EXPORT_SYMBOL(ethtool_op_set_flags); | ||
158 | 175 | ||
159 | void ethtool_ntuple_flush(struct net_device *dev) | 176 | void ethtool_ntuple_flush(struct net_device *dev) |
160 | { | 177 | { |
@@ -200,7 +217,8 @@ static int ethtool_set_settings(struct net_device *dev, void __user *useraddr) | |||
200 | return dev->ethtool_ops->set_settings(dev, &cmd); | 217 | return dev->ethtool_ops->set_settings(dev, &cmd); |
201 | } | 218 | } |
202 | 219 | ||
203 | static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr) | 220 | static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev, |
221 | void __user *useraddr) | ||
204 | { | 222 | { |
205 | struct ethtool_drvinfo info; | 223 | struct ethtool_drvinfo info; |
206 | const struct ethtool_ops *ops = dev->ethtool_ops; | 224 | const struct ethtool_ops *ops = dev->ethtool_ops; |
@@ -240,7 +258,7 @@ static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev, void _ | |||
240 | } | 258 | } |
241 | 259 | ||
242 | static noinline_for_stack int ethtool_get_sset_info(struct net_device *dev, | 260 | static noinline_for_stack int ethtool_get_sset_info(struct net_device *dev, |
243 | void __user *useraddr) | 261 | void __user *useraddr) |
244 | { | 262 | { |
245 | struct ethtool_sset_info info; | 263 | struct ethtool_sset_info info; |
246 | const struct ethtool_ops *ops = dev->ethtool_ops; | 264 | const struct ethtool_ops *ops = dev->ethtool_ops; |
@@ -299,7 +317,8 @@ out: | |||
299 | return ret; | 317 | return ret; |
300 | } | 318 | } |
301 | 319 | ||
302 | static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, void __user *useraddr) | 320 | static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, |
321 | void __user *useraddr) | ||
303 | { | 322 | { |
304 | struct ethtool_rxnfc cmd; | 323 | struct ethtool_rxnfc cmd; |
305 | 324 | ||
@@ -312,7 +331,8 @@ static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, void __u | |||
312 | return dev->ethtool_ops->set_rxnfc(dev, &cmd); | 331 | return dev->ethtool_ops->set_rxnfc(dev, &cmd); |
313 | } | 332 | } |
314 | 333 | ||
315 | static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev, void __user *useraddr) | 334 | static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev, |
335 | void __user *useraddr) | ||
316 | { | 336 | { |
317 | struct ethtool_rxnfc info; | 337 | struct ethtool_rxnfc info; |
318 | const struct ethtool_ops *ops = dev->ethtool_ops; | 338 | const struct ethtool_ops *ops = dev->ethtool_ops; |
@@ -357,8 +377,8 @@ err_out: | |||
357 | } | 377 | } |
358 | 378 | ||
359 | static void __rx_ntuple_filter_add(struct ethtool_rx_ntuple_list *list, | 379 | static void __rx_ntuple_filter_add(struct ethtool_rx_ntuple_list *list, |
360 | struct ethtool_rx_ntuple_flow_spec *spec, | 380 | struct ethtool_rx_ntuple_flow_spec *spec, |
361 | struct ethtool_rx_ntuple_flow_spec_container *fsc) | 381 | struct ethtool_rx_ntuple_flow_spec_container *fsc) |
362 | { | 382 | { |
363 | 383 | ||
364 | /* don't add filters forever */ | 384 | /* don't add filters forever */ |
@@ -384,7 +404,8 @@ static void __rx_ntuple_filter_add(struct ethtool_rx_ntuple_list *list, | |||
384 | list->count++; | 404 | list->count++; |
385 | } | 405 | } |
386 | 406 | ||
387 | static noinline_for_stack int ethtool_set_rx_ntuple(struct net_device *dev, void __user *useraddr) | 407 | static noinline_for_stack int ethtool_set_rx_ntuple(struct net_device *dev, |
408 | void __user *useraddr) | ||
388 | { | 409 | { |
389 | struct ethtool_rx_ntuple cmd; | 410 | struct ethtool_rx_ntuple cmd; |
390 | const struct ethtool_ops *ops = dev->ethtool_ops; | 411 | const struct ethtool_ops *ops = dev->ethtool_ops; |
@@ -509,125 +530,125 @@ static int ethtool_get_rx_ntuple(struct net_device *dev, void __user *useraddr) | |||
509 | case UDP_V4_FLOW: | 530 | case UDP_V4_FLOW: |
510 | case SCTP_V4_FLOW: | 531 | case SCTP_V4_FLOW: |
511 | sprintf(p, "\tSrc IP addr: 0x%x\n", | 532 | sprintf(p, "\tSrc IP addr: 0x%x\n", |
512 | fsc->fs.h_u.tcp_ip4_spec.ip4src); | 533 | fsc->fs.h_u.tcp_ip4_spec.ip4src); |
513 | p += ETH_GSTRING_LEN; | 534 | p += ETH_GSTRING_LEN; |
514 | num_strings++; | 535 | num_strings++; |
515 | sprintf(p, "\tSrc IP mask: 0x%x\n", | 536 | sprintf(p, "\tSrc IP mask: 0x%x\n", |
516 | fsc->fs.m_u.tcp_ip4_spec.ip4src); | 537 | fsc->fs.m_u.tcp_ip4_spec.ip4src); |
517 | p += ETH_GSTRING_LEN; | 538 | p += ETH_GSTRING_LEN; |
518 | num_strings++; | 539 | num_strings++; |
519 | sprintf(p, "\tDest IP addr: 0x%x\n", | 540 | sprintf(p, "\tDest IP addr: 0x%x\n", |
520 | fsc->fs.h_u.tcp_ip4_spec.ip4dst); | 541 | fsc->fs.h_u.tcp_ip4_spec.ip4dst); |
521 | p += ETH_GSTRING_LEN; | 542 | p += ETH_GSTRING_LEN; |
522 | num_strings++; | 543 | num_strings++; |
523 | sprintf(p, "\tDest IP mask: 0x%x\n", | 544 | sprintf(p, "\tDest IP mask: 0x%x\n", |
524 | fsc->fs.m_u.tcp_ip4_spec.ip4dst); | 545 | fsc->fs.m_u.tcp_ip4_spec.ip4dst); |
525 | p += ETH_GSTRING_LEN; | 546 | p += ETH_GSTRING_LEN; |
526 | num_strings++; | 547 | num_strings++; |
527 | sprintf(p, "\tSrc Port: %d, mask: 0x%x\n", | 548 | sprintf(p, "\tSrc Port: %d, mask: 0x%x\n", |
528 | fsc->fs.h_u.tcp_ip4_spec.psrc, | 549 | fsc->fs.h_u.tcp_ip4_spec.psrc, |
529 | fsc->fs.m_u.tcp_ip4_spec.psrc); | 550 | fsc->fs.m_u.tcp_ip4_spec.psrc); |
530 | p += ETH_GSTRING_LEN; | 551 | p += ETH_GSTRING_LEN; |
531 | num_strings++; | 552 | num_strings++; |
532 | sprintf(p, "\tDest Port: %d, mask: 0x%x\n", | 553 | sprintf(p, "\tDest Port: %d, mask: 0x%x\n", |
533 | fsc->fs.h_u.tcp_ip4_spec.pdst, | 554 | fsc->fs.h_u.tcp_ip4_spec.pdst, |
534 | fsc->fs.m_u.tcp_ip4_spec.pdst); | 555 | fsc->fs.m_u.tcp_ip4_spec.pdst); |
535 | p += ETH_GSTRING_LEN; | 556 | p += ETH_GSTRING_LEN; |
536 | num_strings++; | 557 | num_strings++; |
537 | sprintf(p, "\tTOS: %d, mask: 0x%x\n", | 558 | sprintf(p, "\tTOS: %d, mask: 0x%x\n", |
538 | fsc->fs.h_u.tcp_ip4_spec.tos, | 559 | fsc->fs.h_u.tcp_ip4_spec.tos, |
539 | fsc->fs.m_u.tcp_ip4_spec.tos); | 560 | fsc->fs.m_u.tcp_ip4_spec.tos); |
540 | p += ETH_GSTRING_LEN; | 561 | p += ETH_GSTRING_LEN; |
541 | num_strings++; | 562 | num_strings++; |
542 | break; | 563 | break; |
543 | case AH_ESP_V4_FLOW: | 564 | case AH_ESP_V4_FLOW: |
544 | case ESP_V4_FLOW: | 565 | case ESP_V4_FLOW: |
545 | sprintf(p, "\tSrc IP addr: 0x%x\n", | 566 | sprintf(p, "\tSrc IP addr: 0x%x\n", |
546 | fsc->fs.h_u.ah_ip4_spec.ip4src); | 567 | fsc->fs.h_u.ah_ip4_spec.ip4src); |
547 | p += ETH_GSTRING_LEN; | 568 | p += ETH_GSTRING_LEN; |
548 | num_strings++; | 569 | num_strings++; |
549 | sprintf(p, "\tSrc IP mask: 0x%x\n", | 570 | sprintf(p, "\tSrc IP mask: 0x%x\n", |
550 | fsc->fs.m_u.ah_ip4_spec.ip4src); | 571 | fsc->fs.m_u.ah_ip4_spec.ip4src); |
551 | p += ETH_GSTRING_LEN; | 572 | p += ETH_GSTRING_LEN; |
552 | num_strings++; | 573 | num_strings++; |
553 | sprintf(p, "\tDest IP addr: 0x%x\n", | 574 | sprintf(p, "\tDest IP addr: 0x%x\n", |
554 | fsc->fs.h_u.ah_ip4_spec.ip4dst); | 575 | fsc->fs.h_u.ah_ip4_spec.ip4dst); |
555 | p += ETH_GSTRING_LEN; | 576 | p += ETH_GSTRING_LEN; |
556 | num_strings++; | 577 | num_strings++; |
557 | sprintf(p, "\tDest IP mask: 0x%x\n", | 578 | sprintf(p, "\tDest IP mask: 0x%x\n", |
558 | fsc->fs.m_u.ah_ip4_spec.ip4dst); | 579 | fsc->fs.m_u.ah_ip4_spec.ip4dst); |
559 | p += ETH_GSTRING_LEN; | 580 | p += ETH_GSTRING_LEN; |
560 | num_strings++; | 581 | num_strings++; |
561 | sprintf(p, "\tSPI: %d, mask: 0x%x\n", | 582 | sprintf(p, "\tSPI: %d, mask: 0x%x\n", |
562 | fsc->fs.h_u.ah_ip4_spec.spi, | 583 | fsc->fs.h_u.ah_ip4_spec.spi, |
563 | fsc->fs.m_u.ah_ip4_spec.spi); | 584 | fsc->fs.m_u.ah_ip4_spec.spi); |
564 | p += ETH_GSTRING_LEN; | 585 | p += ETH_GSTRING_LEN; |
565 | num_strings++; | 586 | num_strings++; |
566 | sprintf(p, "\tTOS: %d, mask: 0x%x\n", | 587 | sprintf(p, "\tTOS: %d, mask: 0x%x\n", |
567 | fsc->fs.h_u.ah_ip4_spec.tos, | 588 | fsc->fs.h_u.ah_ip4_spec.tos, |
568 | fsc->fs.m_u.ah_ip4_spec.tos); | 589 | fsc->fs.m_u.ah_ip4_spec.tos); |
569 | p += ETH_GSTRING_LEN; | 590 | p += ETH_GSTRING_LEN; |
570 | num_strings++; | 591 | num_strings++; |
571 | break; | 592 | break; |
572 | case IP_USER_FLOW: | 593 | case IP_USER_FLOW: |
573 | sprintf(p, "\tSrc IP addr: 0x%x\n", | 594 | sprintf(p, "\tSrc IP addr: 0x%x\n", |
574 | fsc->fs.h_u.raw_ip4_spec.ip4src); | 595 | fsc->fs.h_u.raw_ip4_spec.ip4src); |
575 | p += ETH_GSTRING_LEN; | 596 | p += ETH_GSTRING_LEN; |
576 | num_strings++; | 597 | num_strings++; |
577 | sprintf(p, "\tSrc IP mask: 0x%x\n", | 598 | sprintf(p, "\tSrc IP mask: 0x%x\n", |
578 | fsc->fs.m_u.raw_ip4_spec.ip4src); | 599 | fsc->fs.m_u.raw_ip4_spec.ip4src); |
579 | p += ETH_GSTRING_LEN; | 600 | p += ETH_GSTRING_LEN; |
580 | num_strings++; | 601 | num_strings++; |
581 | sprintf(p, "\tDest IP addr: 0x%x\n", | 602 | sprintf(p, "\tDest IP addr: 0x%x\n", |
582 | fsc->fs.h_u.raw_ip4_spec.ip4dst); | 603 | fsc->fs.h_u.raw_ip4_spec.ip4dst); |
583 | p += ETH_GSTRING_LEN; | 604 | p += ETH_GSTRING_LEN; |
584 | num_strings++; | 605 | num_strings++; |
585 | sprintf(p, "\tDest IP mask: 0x%x\n", | 606 | sprintf(p, "\tDest IP mask: 0x%x\n", |
586 | fsc->fs.m_u.raw_ip4_spec.ip4dst); | 607 | fsc->fs.m_u.raw_ip4_spec.ip4dst); |
587 | p += ETH_GSTRING_LEN; | 608 | p += ETH_GSTRING_LEN; |
588 | num_strings++; | 609 | num_strings++; |
589 | break; | 610 | break; |
590 | case IPV4_FLOW: | 611 | case IPV4_FLOW: |
591 | sprintf(p, "\tSrc IP addr: 0x%x\n", | 612 | sprintf(p, "\tSrc IP addr: 0x%x\n", |
592 | fsc->fs.h_u.usr_ip4_spec.ip4src); | 613 | fsc->fs.h_u.usr_ip4_spec.ip4src); |
593 | p += ETH_GSTRING_LEN; | 614 | p += ETH_GSTRING_LEN; |
594 | num_strings++; | 615 | num_strings++; |
595 | sprintf(p, "\tSrc IP mask: 0x%x\n", | 616 | sprintf(p, "\tSrc IP mask: 0x%x\n", |
596 | fsc->fs.m_u.usr_ip4_spec.ip4src); | 617 | fsc->fs.m_u.usr_ip4_spec.ip4src); |
597 | p += ETH_GSTRING_LEN; | 618 | p += ETH_GSTRING_LEN; |
598 | num_strings++; | 619 | num_strings++; |
599 | sprintf(p, "\tDest IP addr: 0x%x\n", | 620 | sprintf(p, "\tDest IP addr: 0x%x\n", |
600 | fsc->fs.h_u.usr_ip4_spec.ip4dst); | 621 | fsc->fs.h_u.usr_ip4_spec.ip4dst); |
601 | p += ETH_GSTRING_LEN; | 622 | p += ETH_GSTRING_LEN; |
602 | num_strings++; | 623 | num_strings++; |
603 | sprintf(p, "\tDest IP mask: 0x%x\n", | 624 | sprintf(p, "\tDest IP mask: 0x%x\n", |
604 | fsc->fs.m_u.usr_ip4_spec.ip4dst); | 625 | fsc->fs.m_u.usr_ip4_spec.ip4dst); |
605 | p += ETH_GSTRING_LEN; | 626 | p += ETH_GSTRING_LEN; |
606 | num_strings++; | 627 | num_strings++; |
607 | sprintf(p, "\tL4 bytes: 0x%x, mask: 0x%x\n", | 628 | sprintf(p, "\tL4 bytes: 0x%x, mask: 0x%x\n", |
608 | fsc->fs.h_u.usr_ip4_spec.l4_4_bytes, | 629 | fsc->fs.h_u.usr_ip4_spec.l4_4_bytes, |
609 | fsc->fs.m_u.usr_ip4_spec.l4_4_bytes); | 630 | fsc->fs.m_u.usr_ip4_spec.l4_4_bytes); |
610 | p += ETH_GSTRING_LEN; | 631 | p += ETH_GSTRING_LEN; |
611 | num_strings++; | 632 | num_strings++; |
612 | sprintf(p, "\tTOS: %d, mask: 0x%x\n", | 633 | sprintf(p, "\tTOS: %d, mask: 0x%x\n", |
613 | fsc->fs.h_u.usr_ip4_spec.tos, | 634 | fsc->fs.h_u.usr_ip4_spec.tos, |
614 | fsc->fs.m_u.usr_ip4_spec.tos); | 635 | fsc->fs.m_u.usr_ip4_spec.tos); |
615 | p += ETH_GSTRING_LEN; | 636 | p += ETH_GSTRING_LEN; |
616 | num_strings++; | 637 | num_strings++; |
617 | sprintf(p, "\tIP Version: %d, mask: 0x%x\n", | 638 | sprintf(p, "\tIP Version: %d, mask: 0x%x\n", |
618 | fsc->fs.h_u.usr_ip4_spec.ip_ver, | 639 | fsc->fs.h_u.usr_ip4_spec.ip_ver, |
619 | fsc->fs.m_u.usr_ip4_spec.ip_ver); | 640 | fsc->fs.m_u.usr_ip4_spec.ip_ver); |
620 | p += ETH_GSTRING_LEN; | 641 | p += ETH_GSTRING_LEN; |
621 | num_strings++; | 642 | num_strings++; |
622 | sprintf(p, "\tProtocol: %d, mask: 0x%x\n", | 643 | sprintf(p, "\tProtocol: %d, mask: 0x%x\n", |
623 | fsc->fs.h_u.usr_ip4_spec.proto, | 644 | fsc->fs.h_u.usr_ip4_spec.proto, |
624 | fsc->fs.m_u.usr_ip4_spec.proto); | 645 | fsc->fs.m_u.usr_ip4_spec.proto); |
625 | p += ETH_GSTRING_LEN; | 646 | p += ETH_GSTRING_LEN; |
626 | num_strings++; | 647 | num_strings++; |
627 | break; | 648 | break; |
628 | }; | 649 | }; |
629 | sprintf(p, "\tVLAN: %d, mask: 0x%x\n", | 650 | sprintf(p, "\tVLAN: %d, mask: 0x%x\n", |
630 | fsc->fs.vlan_tag, fsc->fs.vlan_tag_mask); | 651 | fsc->fs.vlan_tag, fsc->fs.vlan_tag_mask); |
631 | p += ETH_GSTRING_LEN; | 652 | p += ETH_GSTRING_LEN; |
632 | num_strings++; | 653 | num_strings++; |
633 | sprintf(p, "\tUser-defined: 0x%Lx\n", fsc->fs.data); | 654 | sprintf(p, "\tUser-defined: 0x%Lx\n", fsc->fs.data); |
@@ -640,7 +661,7 @@ static int ethtool_get_rx_ntuple(struct net_device *dev, void __user *useraddr) | |||
640 | sprintf(p, "\tAction: Drop\n"); | 661 | sprintf(p, "\tAction: Drop\n"); |
641 | else | 662 | else |
642 | sprintf(p, "\tAction: Direct to queue %d\n", | 663 | sprintf(p, "\tAction: Direct to queue %d\n", |
643 | fsc->fs.action); | 664 | fsc->fs.action); |
644 | p += ETH_GSTRING_LEN; | 665 | p += ETH_GSTRING_LEN; |
645 | num_strings++; | 666 | num_strings++; |
646 | unknown_filter: | 667 | unknown_filter: |
@@ -852,7 +873,8 @@ static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr) | |||
852 | return ret; | 873 | return ret; |
853 | } | 874 | } |
854 | 875 | ||
855 | static noinline_for_stack int ethtool_get_coalesce(struct net_device *dev, void __user *useraddr) | 876 | static noinline_for_stack int ethtool_get_coalesce(struct net_device *dev, |
877 | void __user *useraddr) | ||
856 | { | 878 | { |
857 | struct ethtool_coalesce coalesce = { .cmd = ETHTOOL_GCOALESCE }; | 879 | struct ethtool_coalesce coalesce = { .cmd = ETHTOOL_GCOALESCE }; |
858 | 880 | ||
@@ -866,7 +888,8 @@ static noinline_for_stack int ethtool_get_coalesce(struct net_device *dev, void | |||
866 | return 0; | 888 | return 0; |
867 | } | 889 | } |
868 | 890 | ||
869 | static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr) | 891 | static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev, |
892 | void __user *useraddr) | ||
870 | { | 893 | { |
871 | struct ethtool_coalesce coalesce; | 894 | struct ethtool_coalesce coalesce; |
872 | 895 | ||
@@ -970,6 +993,7 @@ static int ethtool_set_tx_csum(struct net_device *dev, char __user *useraddr) | |||
970 | 993 | ||
971 | return dev->ethtool_ops->set_tx_csum(dev, edata.data); | 994 | return dev->ethtool_ops->set_tx_csum(dev, edata.data); |
972 | } | 995 | } |
996 | EXPORT_SYMBOL(ethtool_op_set_tx_csum); | ||
973 | 997 | ||
974 | static int ethtool_set_rx_csum(struct net_device *dev, char __user *useraddr) | 998 | static int ethtool_set_rx_csum(struct net_device *dev, char __user *useraddr) |
975 | { | 999 | { |
@@ -1041,7 +1065,7 @@ static int ethtool_get_gso(struct net_device *dev, char __user *useraddr) | |||
1041 | 1065 | ||
1042 | edata.data = dev->features & NETIF_F_GSO; | 1066 | edata.data = dev->features & NETIF_F_GSO; |
1043 | if (copy_to_user(useraddr, &edata, sizeof(edata))) | 1067 | if (copy_to_user(useraddr, &edata, sizeof(edata))) |
1044 | return -EFAULT; | 1068 | return -EFAULT; |
1045 | return 0; | 1069 | return 0; |
1046 | } | 1070 | } |
1047 | 1071 | ||
@@ -1064,7 +1088,7 @@ static int ethtool_get_gro(struct net_device *dev, char __user *useraddr) | |||
1064 | 1088 | ||
1065 | edata.data = dev->features & NETIF_F_GRO; | 1089 | edata.data = dev->features & NETIF_F_GRO; |
1066 | if (copy_to_user(useraddr, &edata, sizeof(edata))) | 1090 | if (copy_to_user(useraddr, &edata, sizeof(edata))) |
1067 | return -EFAULT; | 1091 | return -EFAULT; |
1068 | return 0; | 1092 | return 0; |
1069 | } | 1093 | } |
1070 | 1094 | ||
@@ -1276,7 +1300,8 @@ static int ethtool_set_value(struct net_device *dev, char __user *useraddr, | |||
1276 | return actor(dev, edata.data); | 1300 | return actor(dev, edata.data); |
1277 | } | 1301 | } |
1278 | 1302 | ||
1279 | static noinline_for_stack int ethtool_flash_device(struct net_device *dev, char __user *useraddr) | 1303 | static noinline_for_stack int ethtool_flash_device(struct net_device *dev, |
1304 | char __user *useraddr) | ||
1280 | { | 1305 | { |
1281 | struct ethtool_flash efl; | 1306 | struct ethtool_flash efl; |
1282 | 1307 | ||
@@ -1305,11 +1330,11 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) | |||
1305 | if (!dev->ethtool_ops) | 1330 | if (!dev->ethtool_ops) |
1306 | return -EOPNOTSUPP; | 1331 | return -EOPNOTSUPP; |
1307 | 1332 | ||
1308 | if (copy_from_user(ðcmd, useraddr, sizeof (ethcmd))) | 1333 | if (copy_from_user(ðcmd, useraddr, sizeof(ethcmd))) |
1309 | return -EFAULT; | 1334 | return -EFAULT; |
1310 | 1335 | ||
1311 | /* Allow some commands to be done by anyone */ | 1336 | /* Allow some commands to be done by anyone */ |
1312 | switch(ethcmd) { | 1337 | switch (ethcmd) { |
1313 | case ETHTOOL_GDRVINFO: | 1338 | case ETHTOOL_GDRVINFO: |
1314 | case ETHTOOL_GMSGLVL: | 1339 | case ETHTOOL_GMSGLVL: |
1315 | case ETHTOOL_GCOALESCE: | 1340 | case ETHTOOL_GCOALESCE: |
@@ -1337,10 +1362,11 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) | |||
1337 | return -EPERM; | 1362 | return -EPERM; |
1338 | } | 1363 | } |
1339 | 1364 | ||
1340 | if (dev->ethtool_ops->begin) | 1365 | if (dev->ethtool_ops->begin) { |
1341 | if ((rc = dev->ethtool_ops->begin(dev)) < 0) | 1366 | rc = dev->ethtool_ops->begin(dev); |
1367 | if (rc < 0) | ||
1342 | return rc; | 1368 | return rc; |
1343 | 1369 | } | |
1344 | old_features = dev->features; | 1370 | old_features = dev->features; |
1345 | 1371 | ||
1346 | switch (ethcmd) { | 1372 | switch (ethcmd) { |
@@ -1530,16 +1556,3 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) | |||
1530 | 1556 | ||
1531 | return rc; | 1557 | return rc; |
1532 | } | 1558 | } |
1533 | |||
1534 | EXPORT_SYMBOL(ethtool_op_get_link); | ||
1535 | EXPORT_SYMBOL(ethtool_op_get_sg); | ||
1536 | EXPORT_SYMBOL(ethtool_op_get_tso); | ||
1537 | EXPORT_SYMBOL(ethtool_op_set_sg); | ||
1538 | EXPORT_SYMBOL(ethtool_op_set_tso); | ||
1539 | EXPORT_SYMBOL(ethtool_op_set_tx_csum); | ||
1540 | EXPORT_SYMBOL(ethtool_op_set_tx_hw_csum); | ||
1541 | EXPORT_SYMBOL(ethtool_op_set_tx_ipv6_csum); | ||
1542 | EXPORT_SYMBOL(ethtool_op_set_ufo); | ||
1543 | EXPORT_SYMBOL(ethtool_op_get_ufo); | ||
1544 | EXPORT_SYMBOL(ethtool_op_set_flags); | ||
1545 | EXPORT_SYMBOL(ethtool_op_get_flags); | ||
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index 9a24377146bf..42e84e08a1be 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c | |||
@@ -10,6 +10,7 @@ | |||
10 | 10 | ||
11 | #include <linux/types.h> | 11 | #include <linux/types.h> |
12 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
13 | #include <linux/slab.h> | ||
13 | #include <linux/list.h> | 14 | #include <linux/list.h> |
14 | #include <net/net_namespace.h> | 15 | #include <net/net_namespace.h> |
15 | #include <net/sock.h> | 16 | #include <net/sock.h> |
@@ -38,6 +39,24 @@ int fib_default_rule_add(struct fib_rules_ops *ops, | |||
38 | } | 39 | } |
39 | EXPORT_SYMBOL(fib_default_rule_add); | 40 | EXPORT_SYMBOL(fib_default_rule_add); |
40 | 41 | ||
42 | u32 fib_default_rule_pref(struct fib_rules_ops *ops) | ||
43 | { | ||
44 | struct list_head *pos; | ||
45 | struct fib_rule *rule; | ||
46 | |||
47 | if (!list_empty(&ops->rules_list)) { | ||
48 | pos = ops->rules_list.next; | ||
49 | if (pos->next != &ops->rules_list) { | ||
50 | rule = list_entry(pos->next, struct fib_rule, list); | ||
51 | if (rule->pref) | ||
52 | return rule->pref - 1; | ||
53 | } | ||
54 | } | ||
55 | |||
56 | return 0; | ||
57 | } | ||
58 | EXPORT_SYMBOL(fib_default_rule_pref); | ||
59 | |||
41 | static void notify_rule_change(int event, struct fib_rule *rule, | 60 | static void notify_rule_change(int event, struct fib_rule *rule, |
42 | struct fib_rules_ops *ops, struct nlmsghdr *nlh, | 61 | struct fib_rules_ops *ops, struct nlmsghdr *nlh, |
43 | u32 pid); | 62 | u32 pid); |
@@ -103,12 +122,12 @@ errout: | |||
103 | } | 122 | } |
104 | 123 | ||
105 | struct fib_rules_ops * | 124 | struct fib_rules_ops * |
106 | fib_rules_register(struct fib_rules_ops *tmpl, struct net *net) | 125 | fib_rules_register(const struct fib_rules_ops *tmpl, struct net *net) |
107 | { | 126 | { |
108 | struct fib_rules_ops *ops; | 127 | struct fib_rules_ops *ops; |
109 | int err; | 128 | int err; |
110 | 129 | ||
111 | ops = kmemdup(tmpl, sizeof (*ops), GFP_KERNEL); | 130 | ops = kmemdup(tmpl, sizeof(*ops), GFP_KERNEL); |
112 | if (ops == NULL) | 131 | if (ops == NULL) |
113 | return ERR_PTR(-ENOMEM); | 132 | return ERR_PTR(-ENOMEM); |
114 | 133 | ||
@@ -123,7 +142,6 @@ fib_rules_register(struct fib_rules_ops *tmpl, struct net *net) | |||
123 | 142 | ||
124 | return ops; | 143 | return ops; |
125 | } | 144 | } |
126 | |||
127 | EXPORT_SYMBOL_GPL(fib_rules_register); | 145 | EXPORT_SYMBOL_GPL(fib_rules_register); |
128 | 146 | ||
129 | void fib_rules_cleanup_ops(struct fib_rules_ops *ops) | 147 | void fib_rules_cleanup_ops(struct fib_rules_ops *ops) |
@@ -157,7 +175,6 @@ void fib_rules_unregister(struct fib_rules_ops *ops) | |||
157 | 175 | ||
158 | call_rcu(&ops->rcu, fib_rules_put_rcu); | 176 | call_rcu(&ops->rcu, fib_rules_put_rcu); |
159 | } | 177 | } |
160 | |||
161 | EXPORT_SYMBOL_GPL(fib_rules_unregister); | 178 | EXPORT_SYMBOL_GPL(fib_rules_unregister); |
162 | 179 | ||
163 | static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops, | 180 | static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops, |
@@ -220,7 +237,6 @@ out: | |||
220 | 237 | ||
221 | return err; | 238 | return err; |
222 | } | 239 | } |
223 | |||
224 | EXPORT_SYMBOL_GPL(fib_rules_lookup); | 240 | EXPORT_SYMBOL_GPL(fib_rules_lookup); |
225 | 241 | ||
226 | static int validate_rulemsg(struct fib_rule_hdr *frh, struct nlattr **tb, | 242 | static int validate_rulemsg(struct fib_rule_hdr *frh, struct nlattr **tb, |
@@ -519,6 +535,7 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule, | |||
519 | return -EMSGSIZE; | 535 | return -EMSGSIZE; |
520 | 536 | ||
521 | frh = nlmsg_data(nlh); | 537 | frh = nlmsg_data(nlh); |
538 | frh->family = ops->family; | ||
522 | frh->table = rule->table; | 539 | frh->table = rule->table; |
523 | NLA_PUT_U32(skb, FRA_TABLE, rule->table); | 540 | NLA_PUT_U32(skb, FRA_TABLE, rule->table); |
524 | frh->res1 = 0; | 541 | frh->res1 = 0; |
@@ -613,7 +630,7 @@ static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb) | |||
613 | break; | 630 | break; |
614 | 631 | ||
615 | cb->args[1] = 0; | 632 | cb->args[1] = 0; |
616 | skip: | 633 | skip: |
617 | idx++; | 634 | idx++; |
618 | } | 635 | } |
619 | rcu_read_unlock(); | 636 | rcu_read_unlock(); |
@@ -685,7 +702,6 @@ static int fib_rules_event(struct notifier_block *this, unsigned long event, | |||
685 | struct fib_rules_ops *ops; | 702 | struct fib_rules_ops *ops; |
686 | 703 | ||
687 | ASSERT_RTNL(); | 704 | ASSERT_RTNL(); |
688 | rcu_read_lock(); | ||
689 | 705 | ||
690 | switch (event) { | 706 | switch (event) { |
691 | case NETDEV_REGISTER: | 707 | case NETDEV_REGISTER: |
@@ -699,8 +715,6 @@ static int fib_rules_event(struct notifier_block *this, unsigned long event, | |||
699 | break; | 715 | break; |
700 | } | 716 | } |
701 | 717 | ||
702 | rcu_read_unlock(); | ||
703 | |||
704 | return NOTIFY_DONE; | 718 | return NOTIFY_DONE; |
705 | } | 719 | } |
706 | 720 | ||
diff --git a/net/core/filter.c b/net/core/filter.c index d38ef7fd50f0..da69fb728d32 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/inet.h> | 25 | #include <linux/inet.h> |
26 | #include <linux/netdevice.h> | 26 | #include <linux/netdevice.h> |
27 | #include <linux/if_packet.h> | 27 | #include <linux/if_packet.h> |
28 | #include <linux/gfp.h> | ||
28 | #include <net/ip.h> | 29 | #include <net/ip.h> |
29 | #include <net/protocol.h> | 30 | #include <net/protocol.h> |
30 | #include <net/netlink.h> | 31 | #include <net/netlink.h> |
@@ -301,6 +302,8 @@ load_b: | |||
301 | A = skb->pkt_type; | 302 | A = skb->pkt_type; |
302 | continue; | 303 | continue; |
303 | case SKF_AD_IFINDEX: | 304 | case SKF_AD_IFINDEX: |
305 | if (!skb->dev) | ||
306 | return 0; | ||
304 | A = skb->dev->ifindex; | 307 | A = skb->dev->ifindex; |
305 | continue; | 308 | continue; |
306 | case SKF_AD_MARK: | 309 | case SKF_AD_MARK: |
@@ -309,6 +312,11 @@ load_b: | |||
309 | case SKF_AD_QUEUE: | 312 | case SKF_AD_QUEUE: |
310 | A = skb->queue_mapping; | 313 | A = skb->queue_mapping; |
311 | continue; | 314 | continue; |
315 | case SKF_AD_HATYPE: | ||
316 | if (!skb->dev) | ||
317 | return 0; | ||
318 | A = skb->dev->type; | ||
319 | continue; | ||
312 | case SKF_AD_NLATTR: { | 320 | case SKF_AD_NLATTR: { |
313 | struct nlattr *nla; | 321 | struct nlattr *nla; |
314 | 322 | ||
diff --git a/net/core/flow.c b/net/core/flow.c index 96015871ecea..161900674009 100644 --- a/net/core/flow.c +++ b/net/core/flow.c | |||
@@ -26,113 +26,158 @@ | |||
26 | #include <linux/security.h> | 26 | #include <linux/security.h> |
27 | 27 | ||
28 | struct flow_cache_entry { | 28 | struct flow_cache_entry { |
29 | struct flow_cache_entry *next; | 29 | union { |
30 | u16 family; | 30 | struct hlist_node hlist; |
31 | u8 dir; | 31 | struct list_head gc_list; |
32 | u32 genid; | 32 | } u; |
33 | struct flowi key; | 33 | u16 family; |
34 | void *object; | 34 | u8 dir; |
35 | atomic_t *object_ref; | 35 | u32 genid; |
36 | struct flowi key; | ||
37 | struct flow_cache_object *object; | ||
36 | }; | 38 | }; |
37 | 39 | ||
38 | atomic_t flow_cache_genid = ATOMIC_INIT(0); | 40 | struct flow_cache_percpu { |
39 | 41 | struct hlist_head *hash_table; | |
40 | static u32 flow_hash_shift; | 42 | int hash_count; |
41 | #define flow_hash_size (1 << flow_hash_shift) | 43 | u32 hash_rnd; |
42 | static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL }; | 44 | int hash_rnd_recalc; |
43 | 45 | struct tasklet_struct flush_tasklet; | |
44 | #define flow_table(cpu) (per_cpu(flow_tables, cpu)) | 46 | }; |
45 | |||
46 | static struct kmem_cache *flow_cachep __read_mostly; | ||
47 | 47 | ||
48 | static int flow_lwm, flow_hwm; | 48 | struct flow_flush_info { |
49 | struct flow_cache *cache; | ||
50 | atomic_t cpuleft; | ||
51 | struct completion completion; | ||
52 | }; | ||
49 | 53 | ||
50 | struct flow_percpu_info { | 54 | struct flow_cache { |
51 | int hash_rnd_recalc; | 55 | u32 hash_shift; |
52 | u32 hash_rnd; | 56 | unsigned long order; |
53 | int count; | 57 | struct flow_cache_percpu *percpu; |
58 | struct notifier_block hotcpu_notifier; | ||
59 | int low_watermark; | ||
60 | int high_watermark; | ||
61 | struct timer_list rnd_timer; | ||
54 | }; | 62 | }; |
55 | static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 }; | ||
56 | 63 | ||
57 | #define flow_hash_rnd_recalc(cpu) \ | 64 | atomic_t flow_cache_genid = ATOMIC_INIT(0); |
58 | (per_cpu(flow_hash_info, cpu).hash_rnd_recalc) | 65 | static struct flow_cache flow_cache_global; |
59 | #define flow_hash_rnd(cpu) \ | 66 | static struct kmem_cache *flow_cachep; |
60 | (per_cpu(flow_hash_info, cpu).hash_rnd) | ||
61 | #define flow_count(cpu) \ | ||
62 | (per_cpu(flow_hash_info, cpu).count) | ||
63 | 67 | ||
64 | static struct timer_list flow_hash_rnd_timer; | 68 | static DEFINE_SPINLOCK(flow_cache_gc_lock); |
69 | static LIST_HEAD(flow_cache_gc_list); | ||
65 | 70 | ||
66 | #define FLOW_HASH_RND_PERIOD (10 * 60 * HZ) | 71 | #define flow_cache_hash_size(cache) (1 << (cache)->hash_shift) |
67 | 72 | #define FLOW_HASH_RND_PERIOD (10 * 60 * HZ) | |
68 | struct flow_flush_info { | ||
69 | atomic_t cpuleft; | ||
70 | struct completion completion; | ||
71 | }; | ||
72 | static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL }; | ||
73 | |||
74 | #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu)) | ||
75 | 73 | ||
76 | static void flow_cache_new_hashrnd(unsigned long arg) | 74 | static void flow_cache_new_hashrnd(unsigned long arg) |
77 | { | 75 | { |
76 | struct flow_cache *fc = (void *) arg; | ||
78 | int i; | 77 | int i; |
79 | 78 | ||
80 | for_each_possible_cpu(i) | 79 | for_each_possible_cpu(i) |
81 | flow_hash_rnd_recalc(i) = 1; | 80 | per_cpu_ptr(fc->percpu, i)->hash_rnd_recalc = 1; |
82 | 81 | ||
83 | flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; | 82 | fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; |
84 | add_timer(&flow_hash_rnd_timer); | 83 | add_timer(&fc->rnd_timer); |
84 | } | ||
85 | |||
86 | static int flow_entry_valid(struct flow_cache_entry *fle) | ||
87 | { | ||
88 | if (atomic_read(&flow_cache_genid) != fle->genid) | ||
89 | return 0; | ||
90 | if (fle->object && !fle->object->ops->check(fle->object)) | ||
91 | return 0; | ||
92 | return 1; | ||
85 | } | 93 | } |
86 | 94 | ||
87 | static void flow_entry_kill(int cpu, struct flow_cache_entry *fle) | 95 | static void flow_entry_kill(struct flow_cache_entry *fle) |
88 | { | 96 | { |
89 | if (fle->object) | 97 | if (fle->object) |
90 | atomic_dec(fle->object_ref); | 98 | fle->object->ops->delete(fle->object); |
91 | kmem_cache_free(flow_cachep, fle); | 99 | kmem_cache_free(flow_cachep, fle); |
92 | flow_count(cpu)--; | ||
93 | } | 100 | } |
94 | 101 | ||
95 | static void __flow_cache_shrink(int cpu, int shrink_to) | 102 | static void flow_cache_gc_task(struct work_struct *work) |
96 | { | 103 | { |
97 | struct flow_cache_entry *fle, **flp; | 104 | struct list_head gc_list; |
98 | int i; | 105 | struct flow_cache_entry *fce, *n; |
99 | 106 | ||
100 | for (i = 0; i < flow_hash_size; i++) { | 107 | INIT_LIST_HEAD(&gc_list); |
101 | int k = 0; | 108 | spin_lock_bh(&flow_cache_gc_lock); |
109 | list_splice_tail_init(&flow_cache_gc_list, &gc_list); | ||
110 | spin_unlock_bh(&flow_cache_gc_lock); | ||
102 | 111 | ||
103 | flp = &flow_table(cpu)[i]; | 112 | list_for_each_entry_safe(fce, n, &gc_list, u.gc_list) |
104 | while ((fle = *flp) != NULL && k < shrink_to) { | 113 | flow_entry_kill(fce); |
105 | k++; | 114 | } |
106 | flp = &fle->next; | 115 | static DECLARE_WORK(flow_cache_gc_work, flow_cache_gc_task); |
107 | } | 116 | |
108 | while ((fle = *flp) != NULL) { | 117 | static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp, |
109 | *flp = fle->next; | 118 | int deleted, struct list_head *gc_list) |
110 | flow_entry_kill(cpu, fle); | 119 | { |
111 | } | 120 | if (deleted) { |
121 | fcp->hash_count -= deleted; | ||
122 | spin_lock_bh(&flow_cache_gc_lock); | ||
123 | list_splice_tail(gc_list, &flow_cache_gc_list); | ||
124 | spin_unlock_bh(&flow_cache_gc_lock); | ||
125 | schedule_work(&flow_cache_gc_work); | ||
112 | } | 126 | } |
113 | } | 127 | } |
114 | 128 | ||
115 | static void flow_cache_shrink(int cpu) | 129 | static void __flow_cache_shrink(struct flow_cache *fc, |
130 | struct flow_cache_percpu *fcp, | ||
131 | int shrink_to) | ||
116 | { | 132 | { |
117 | int shrink_to = flow_lwm / flow_hash_size; | 133 | struct flow_cache_entry *fle; |
134 | struct hlist_node *entry, *tmp; | ||
135 | LIST_HEAD(gc_list); | ||
136 | int i, deleted = 0; | ||
137 | |||
138 | for (i = 0; i < flow_cache_hash_size(fc); i++) { | ||
139 | int saved = 0; | ||
140 | |||
141 | hlist_for_each_entry_safe(fle, entry, tmp, | ||
142 | &fcp->hash_table[i], u.hlist) { | ||
143 | if (saved < shrink_to && | ||
144 | flow_entry_valid(fle)) { | ||
145 | saved++; | ||
146 | } else { | ||
147 | deleted++; | ||
148 | hlist_del(&fle->u.hlist); | ||
149 | list_add_tail(&fle->u.gc_list, &gc_list); | ||
150 | } | ||
151 | } | ||
152 | } | ||
118 | 153 | ||
119 | __flow_cache_shrink(cpu, shrink_to); | 154 | flow_cache_queue_garbage(fcp, deleted, &gc_list); |
120 | } | 155 | } |
121 | 156 | ||
122 | static void flow_new_hash_rnd(int cpu) | 157 | static void flow_cache_shrink(struct flow_cache *fc, |
158 | struct flow_cache_percpu *fcp) | ||
123 | { | 159 | { |
124 | get_random_bytes(&flow_hash_rnd(cpu), sizeof(u32)); | 160 | int shrink_to = fc->low_watermark / flow_cache_hash_size(fc); |
125 | flow_hash_rnd_recalc(cpu) = 0; | ||
126 | 161 | ||
127 | __flow_cache_shrink(cpu, 0); | 162 | __flow_cache_shrink(fc, fcp, shrink_to); |
128 | } | 163 | } |
129 | 164 | ||
130 | static u32 flow_hash_code(struct flowi *key, int cpu) | 165 | static void flow_new_hash_rnd(struct flow_cache *fc, |
166 | struct flow_cache_percpu *fcp) | ||
167 | { | ||
168 | get_random_bytes(&fcp->hash_rnd, sizeof(u32)); | ||
169 | fcp->hash_rnd_recalc = 0; | ||
170 | __flow_cache_shrink(fc, fcp, 0); | ||
171 | } | ||
172 | |||
173 | static u32 flow_hash_code(struct flow_cache *fc, | ||
174 | struct flow_cache_percpu *fcp, | ||
175 | struct flowi *key) | ||
131 | { | 176 | { |
132 | u32 *k = (u32 *) key; | 177 | u32 *k = (u32 *) key; |
133 | 178 | ||
134 | return (jhash2(k, (sizeof(*key) / sizeof(u32)), flow_hash_rnd(cpu)) & | 179 | return (jhash2(k, (sizeof(*key) / sizeof(u32)), fcp->hash_rnd) |
135 | (flow_hash_size - 1)); | 180 | & (flow_cache_hash_size(fc) - 1)); |
136 | } | 181 | } |
137 | 182 | ||
138 | #if (BITS_PER_LONG == 64) | 183 | #if (BITS_PER_LONG == 64) |
@@ -165,114 +210,117 @@ static int flow_key_compare(struct flowi *key1, struct flowi *key2) | |||
165 | return 0; | 210 | return 0; |
166 | } | 211 | } |
167 | 212 | ||
168 | void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir, | 213 | struct flow_cache_object * |
169 | flow_resolve_t resolver) | 214 | flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir, |
215 | flow_resolve_t resolver, void *ctx) | ||
170 | { | 216 | { |
171 | struct flow_cache_entry *fle, **head; | 217 | struct flow_cache *fc = &flow_cache_global; |
218 | struct flow_cache_percpu *fcp; | ||
219 | struct flow_cache_entry *fle, *tfle; | ||
220 | struct hlist_node *entry; | ||
221 | struct flow_cache_object *flo; | ||
172 | unsigned int hash; | 222 | unsigned int hash; |
173 | int cpu; | ||
174 | 223 | ||
175 | local_bh_disable(); | 224 | local_bh_disable(); |
176 | cpu = smp_processor_id(); | 225 | fcp = per_cpu_ptr(fc->percpu, smp_processor_id()); |
177 | 226 | ||
178 | fle = NULL; | 227 | fle = NULL; |
228 | flo = NULL; | ||
179 | /* Packet really early in init? Making flow_cache_init a | 229 | /* Packet really early in init? Making flow_cache_init a |
180 | * pre-smp initcall would solve this. --RR */ | 230 | * pre-smp initcall would solve this. --RR */ |
181 | if (!flow_table(cpu)) | 231 | if (!fcp->hash_table) |
182 | goto nocache; | 232 | goto nocache; |
183 | 233 | ||
184 | if (flow_hash_rnd_recalc(cpu)) | 234 | if (fcp->hash_rnd_recalc) |
185 | flow_new_hash_rnd(cpu); | 235 | flow_new_hash_rnd(fc, fcp); |
186 | hash = flow_hash_code(key, cpu); | ||
187 | 236 | ||
188 | head = &flow_table(cpu)[hash]; | 237 | hash = flow_hash_code(fc, fcp, key); |
189 | for (fle = *head; fle; fle = fle->next) { | 238 | hlist_for_each_entry(tfle, entry, &fcp->hash_table[hash], u.hlist) { |
190 | if (fle->family == family && | 239 | if (tfle->family == family && |
191 | fle->dir == dir && | 240 | tfle->dir == dir && |
192 | flow_key_compare(key, &fle->key) == 0) { | 241 | flow_key_compare(key, &tfle->key) == 0) { |
193 | if (fle->genid == atomic_read(&flow_cache_genid)) { | 242 | fle = tfle; |
194 | void *ret = fle->object; | ||
195 | |||
196 | if (ret) | ||
197 | atomic_inc(fle->object_ref); | ||
198 | local_bh_enable(); | ||
199 | |||
200 | return ret; | ||
201 | } | ||
202 | break; | 243 | break; |
203 | } | 244 | } |
204 | } | 245 | } |
205 | 246 | ||
206 | if (!fle) { | 247 | if (unlikely(!fle)) { |
207 | if (flow_count(cpu) > flow_hwm) | 248 | if (fcp->hash_count > fc->high_watermark) |
208 | flow_cache_shrink(cpu); | 249 | flow_cache_shrink(fc, fcp); |
209 | 250 | ||
210 | fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC); | 251 | fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC); |
211 | if (fle) { | 252 | if (fle) { |
212 | fle->next = *head; | ||
213 | *head = fle; | ||
214 | fle->family = family; | 253 | fle->family = family; |
215 | fle->dir = dir; | 254 | fle->dir = dir; |
216 | memcpy(&fle->key, key, sizeof(*key)); | 255 | memcpy(&fle->key, key, sizeof(*key)); |
217 | fle->object = NULL; | 256 | fle->object = NULL; |
218 | flow_count(cpu)++; | 257 | hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]); |
258 | fcp->hash_count++; | ||
219 | } | 259 | } |
260 | } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) { | ||
261 | flo = fle->object; | ||
262 | if (!flo) | ||
263 | goto ret_object; | ||
264 | flo = flo->ops->get(flo); | ||
265 | if (flo) | ||
266 | goto ret_object; | ||
267 | } else if (fle->object) { | ||
268 | flo = fle->object; | ||
269 | flo->ops->delete(flo); | ||
270 | fle->object = NULL; | ||
220 | } | 271 | } |
221 | 272 | ||
222 | nocache: | 273 | nocache: |
223 | { | 274 | flo = NULL; |
224 | int err; | 275 | if (fle) { |
225 | void *obj; | 276 | flo = fle->object; |
226 | atomic_t *obj_ref; | 277 | fle->object = NULL; |
227 | |||
228 | err = resolver(net, key, family, dir, &obj, &obj_ref); | ||
229 | |||
230 | if (fle && !err) { | ||
231 | fle->genid = atomic_read(&flow_cache_genid); | ||
232 | |||
233 | if (fle->object) | ||
234 | atomic_dec(fle->object_ref); | ||
235 | |||
236 | fle->object = obj; | ||
237 | fle->object_ref = obj_ref; | ||
238 | if (obj) | ||
239 | atomic_inc(fle->object_ref); | ||
240 | } | ||
241 | local_bh_enable(); | ||
242 | |||
243 | if (err) | ||
244 | obj = ERR_PTR(err); | ||
245 | return obj; | ||
246 | } | 278 | } |
279 | flo = resolver(net, key, family, dir, flo, ctx); | ||
280 | if (fle) { | ||
281 | fle->genid = atomic_read(&flow_cache_genid); | ||
282 | if (!IS_ERR(flo)) | ||
283 | fle->object = flo; | ||
284 | else | ||
285 | fle->genid--; | ||
286 | } else { | ||
287 | if (flo && !IS_ERR(flo)) | ||
288 | flo->ops->delete(flo); | ||
289 | } | ||
290 | ret_object: | ||
291 | local_bh_enable(); | ||
292 | return flo; | ||
247 | } | 293 | } |
248 | 294 | ||
249 | static void flow_cache_flush_tasklet(unsigned long data) | 295 | static void flow_cache_flush_tasklet(unsigned long data) |
250 | { | 296 | { |
251 | struct flow_flush_info *info = (void *)data; | 297 | struct flow_flush_info *info = (void *)data; |
252 | int i; | 298 | struct flow_cache *fc = info->cache; |
253 | int cpu; | 299 | struct flow_cache_percpu *fcp; |
254 | 300 | struct flow_cache_entry *fle; | |
255 | cpu = smp_processor_id(); | 301 | struct hlist_node *entry, *tmp; |
256 | for (i = 0; i < flow_hash_size; i++) { | 302 | LIST_HEAD(gc_list); |
257 | struct flow_cache_entry *fle; | 303 | int i, deleted = 0; |
258 | 304 | ||
259 | fle = flow_table(cpu)[i]; | 305 | fcp = per_cpu_ptr(fc->percpu, smp_processor_id()); |
260 | for (; fle; fle = fle->next) { | 306 | for (i = 0; i < flow_cache_hash_size(fc); i++) { |
261 | unsigned genid = atomic_read(&flow_cache_genid); | 307 | hlist_for_each_entry_safe(fle, entry, tmp, |
262 | 308 | &fcp->hash_table[i], u.hlist) { | |
263 | if (!fle->object || fle->genid == genid) | 309 | if (flow_entry_valid(fle)) |
264 | continue; | 310 | continue; |
265 | 311 | ||
266 | fle->object = NULL; | 312 | deleted++; |
267 | atomic_dec(fle->object_ref); | 313 | hlist_del(&fle->u.hlist); |
314 | list_add_tail(&fle->u.gc_list, &gc_list); | ||
268 | } | 315 | } |
269 | } | 316 | } |
270 | 317 | ||
318 | flow_cache_queue_garbage(fcp, deleted, &gc_list); | ||
319 | |||
271 | if (atomic_dec_and_test(&info->cpuleft)) | 320 | if (atomic_dec_and_test(&info->cpuleft)) |
272 | complete(&info->completion); | 321 | complete(&info->completion); |
273 | } | 322 | } |
274 | 323 | ||
275 | static void flow_cache_flush_per_cpu(void *) __attribute__((__unused__)); | ||
276 | static void flow_cache_flush_per_cpu(void *data) | 324 | static void flow_cache_flush_per_cpu(void *data) |
277 | { | 325 | { |
278 | struct flow_flush_info *info = data; | 326 | struct flow_flush_info *info = data; |
@@ -280,8 +328,7 @@ static void flow_cache_flush_per_cpu(void *data) | |||
280 | struct tasklet_struct *tasklet; | 328 | struct tasklet_struct *tasklet; |
281 | 329 | ||
282 | cpu = smp_processor_id(); | 330 | cpu = smp_processor_id(); |
283 | 331 | tasklet = &per_cpu_ptr(info->cache->percpu, cpu)->flush_tasklet; | |
284 | tasklet = flow_flush_tasklet(cpu); | ||
285 | tasklet->data = (unsigned long)info; | 332 | tasklet->data = (unsigned long)info; |
286 | tasklet_schedule(tasklet); | 333 | tasklet_schedule(tasklet); |
287 | } | 334 | } |
@@ -294,6 +341,7 @@ void flow_cache_flush(void) | |||
294 | /* Don't want cpus going down or up during this. */ | 341 | /* Don't want cpus going down or up during this. */ |
295 | get_online_cpus(); | 342 | get_online_cpus(); |
296 | mutex_lock(&flow_flush_sem); | 343 | mutex_lock(&flow_flush_sem); |
344 | info.cache = &flow_cache_global; | ||
297 | atomic_set(&info.cpuleft, num_online_cpus()); | 345 | atomic_set(&info.cpuleft, num_online_cpus()); |
298 | init_completion(&info.completion); | 346 | init_completion(&info.completion); |
299 | 347 | ||
@@ -307,62 +355,75 @@ void flow_cache_flush(void) | |||
307 | put_online_cpus(); | 355 | put_online_cpus(); |
308 | } | 356 | } |
309 | 357 | ||
310 | static void __init flow_cache_cpu_prepare(int cpu) | 358 | static void __init flow_cache_cpu_prepare(struct flow_cache *fc, |
359 | struct flow_cache_percpu *fcp) | ||
311 | { | 360 | { |
312 | struct tasklet_struct *tasklet; | 361 | fcp->hash_table = (struct hlist_head *) |
313 | unsigned long order; | 362 | __get_free_pages(GFP_KERNEL|__GFP_ZERO, fc->order); |
314 | 363 | if (!fcp->hash_table) | |
315 | for (order = 0; | 364 | panic("NET: failed to allocate flow cache order %lu\n", fc->order); |
316 | (PAGE_SIZE << order) < | 365 | |
317 | (sizeof(struct flow_cache_entry *)*flow_hash_size); | 366 | fcp->hash_rnd_recalc = 1; |
318 | order++) | 367 | fcp->hash_count = 0; |
319 | /* NOTHING */; | 368 | tasklet_init(&fcp->flush_tasklet, flow_cache_flush_tasklet, 0); |
320 | |||
321 | flow_table(cpu) = (struct flow_cache_entry **) | ||
322 | __get_free_pages(GFP_KERNEL|__GFP_ZERO, order); | ||
323 | if (!flow_table(cpu)) | ||
324 | panic("NET: failed to allocate flow cache order %lu\n", order); | ||
325 | |||
326 | flow_hash_rnd_recalc(cpu) = 1; | ||
327 | flow_count(cpu) = 0; | ||
328 | |||
329 | tasklet = flow_flush_tasklet(cpu); | ||
330 | tasklet_init(tasklet, flow_cache_flush_tasklet, 0); | ||
331 | } | 369 | } |
332 | 370 | ||
333 | static int flow_cache_cpu(struct notifier_block *nfb, | 371 | static int flow_cache_cpu(struct notifier_block *nfb, |
334 | unsigned long action, | 372 | unsigned long action, |
335 | void *hcpu) | 373 | void *hcpu) |
336 | { | 374 | { |
375 | struct flow_cache *fc = container_of(nfb, struct flow_cache, hotcpu_notifier); | ||
376 | int cpu = (unsigned long) hcpu; | ||
377 | struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu); | ||
378 | |||
337 | if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) | 379 | if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) |
338 | __flow_cache_shrink((unsigned long)hcpu, 0); | 380 | __flow_cache_shrink(fc, fcp, 0); |
339 | return NOTIFY_OK; | 381 | return NOTIFY_OK; |
340 | } | 382 | } |
341 | 383 | ||
342 | static int __init flow_cache_init(void) | 384 | static int flow_cache_init(struct flow_cache *fc) |
343 | { | 385 | { |
386 | unsigned long order; | ||
344 | int i; | 387 | int i; |
345 | 388 | ||
346 | flow_cachep = kmem_cache_create("flow_cache", | 389 | fc->hash_shift = 10; |
347 | sizeof(struct flow_cache_entry), | 390 | fc->low_watermark = 2 * flow_cache_hash_size(fc); |
348 | 0, SLAB_PANIC, | 391 | fc->high_watermark = 4 * flow_cache_hash_size(fc); |
349 | NULL); | 392 | |
350 | flow_hash_shift = 10; | 393 | for (order = 0; |
351 | flow_lwm = 2 * flow_hash_size; | 394 | (PAGE_SIZE << order) < |
352 | flow_hwm = 4 * flow_hash_size; | 395 | (sizeof(struct hlist_head)*flow_cache_hash_size(fc)); |
396 | order++) | ||
397 | /* NOTHING */; | ||
398 | fc->order = order; | ||
399 | fc->percpu = alloc_percpu(struct flow_cache_percpu); | ||
353 | 400 | ||
354 | setup_timer(&flow_hash_rnd_timer, flow_cache_new_hashrnd, 0); | 401 | setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd, |
355 | flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; | 402 | (unsigned long) fc); |
356 | add_timer(&flow_hash_rnd_timer); | 403 | fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; |
404 | add_timer(&fc->rnd_timer); | ||
357 | 405 | ||
358 | for_each_possible_cpu(i) | 406 | for_each_possible_cpu(i) |
359 | flow_cache_cpu_prepare(i); | 407 | flow_cache_cpu_prepare(fc, per_cpu_ptr(fc->percpu, i)); |
408 | |||
409 | fc->hotcpu_notifier = (struct notifier_block){ | ||
410 | .notifier_call = flow_cache_cpu, | ||
411 | }; | ||
412 | register_hotcpu_notifier(&fc->hotcpu_notifier); | ||
360 | 413 | ||
361 | hotcpu_notifier(flow_cache_cpu, 0); | ||
362 | return 0; | 414 | return 0; |
363 | } | 415 | } |
364 | 416 | ||
365 | module_init(flow_cache_init); | 417 | static int __init flow_cache_init_global(void) |
418 | { | ||
419 | flow_cachep = kmem_cache_create("flow_cache", | ||
420 | sizeof(struct flow_cache_entry), | ||
421 | 0, SLAB_PANIC, NULL); | ||
422 | |||
423 | return flow_cache_init(&flow_cache_global); | ||
424 | } | ||
425 | |||
426 | module_init(flow_cache_init_global); | ||
366 | 427 | ||
367 | EXPORT_SYMBOL(flow_cache_genid); | 428 | EXPORT_SYMBOL(flow_cache_genid); |
368 | EXPORT_SYMBOL(flow_cache_lookup); | 429 | EXPORT_SYMBOL(flow_cache_lookup); |
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c index 493775f4f2f1..cf8e70392fe0 100644 --- a/net/core/gen_estimator.c +++ b/net/core/gen_estimator.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/rtnetlink.h> | 32 | #include <linux/rtnetlink.h> |
33 | #include <linux/init.h> | 33 | #include <linux/init.h> |
34 | #include <linux/rbtree.h> | 34 | #include <linux/rbtree.h> |
35 | #include <linux/slab.h> | ||
35 | #include <net/sock.h> | 36 | #include <net/sock.h> |
36 | #include <net/gen_stats.h> | 37 | #include <net/gen_stats.h> |
37 | 38 | ||
diff --git a/net/core/iovec.c b/net/core/iovec.c index 16ad45d4882b..1e7f4e91a935 100644 --- a/net/core/iovec.c +++ b/net/core/iovec.c | |||
@@ -20,7 +20,6 @@ | |||
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
22 | #include <linux/mm.h> | 22 | #include <linux/mm.h> |
23 | #include <linux/slab.h> | ||
24 | #include <linux/net.h> | 23 | #include <linux/net.h> |
25 | #include <linux/in6.h> | 24 | #include <linux/in6.h> |
26 | #include <asm/uaccess.h> | 25 | #include <asm/uaccess.h> |
diff --git a/net/core/link_watch.c b/net/core/link_watch.c index 5910b555a54a..bdbce2f5875b 100644 --- a/net/core/link_watch.c +++ b/net/core/link_watch.c | |||
@@ -19,7 +19,6 @@ | |||
19 | #include <linux/rtnetlink.h> | 19 | #include <linux/rtnetlink.h> |
20 | #include <linux/jiffies.h> | 20 | #include <linux/jiffies.h> |
21 | #include <linux/spinlock.h> | 21 | #include <linux/spinlock.h> |
22 | #include <linux/slab.h> | ||
23 | #include <linux/workqueue.h> | 22 | #include <linux/workqueue.h> |
24 | #include <linux/bitops.h> | 23 | #include <linux/bitops.h> |
25 | #include <asm/types.h> | 24 | #include <asm/types.h> |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index d102f6d9abdc..bff37908bd55 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
@@ -15,6 +15,7 @@ | |||
15 | * Harald Welte Add neighbour cache statistics like rtstat | 15 | * Harald Welte Add neighbour cache statistics like rtstat |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include <linux/slab.h> | ||
18 | #include <linux/types.h> | 19 | #include <linux/types.h> |
19 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
20 | #include <linux/module.h> | 21 | #include <linux/module.h> |
@@ -771,6 +772,8 @@ static __inline__ int neigh_max_probes(struct neighbour *n) | |||
771 | } | 772 | } |
772 | 773 | ||
773 | static void neigh_invalidate(struct neighbour *neigh) | 774 | static void neigh_invalidate(struct neighbour *neigh) |
775 | __releases(neigh->lock) | ||
776 | __acquires(neigh->lock) | ||
774 | { | 777 | { |
775 | struct sk_buff *skb; | 778 | struct sk_buff *skb; |
776 | 779 | ||
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 099c753c4213..c57c4b228bb5 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c | |||
@@ -13,9 +13,11 @@ | |||
13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
14 | #include <linux/netdevice.h> | 14 | #include <linux/netdevice.h> |
15 | #include <linux/if_arp.h> | 15 | #include <linux/if_arp.h> |
16 | #include <linux/slab.h> | ||
16 | #include <net/sock.h> | 17 | #include <net/sock.h> |
17 | #include <linux/rtnetlink.h> | 18 | #include <linux/rtnetlink.h> |
18 | #include <linux/wireless.h> | 19 | #include <linux/wireless.h> |
20 | #include <linux/vmalloc.h> | ||
19 | #include <net/wext.h> | 21 | #include <net/wext.h> |
20 | 22 | ||
21 | #include "net-sysfs.h" | 23 | #include "net-sysfs.h" |
@@ -466,6 +468,304 @@ static struct attribute_group wireless_group = { | |||
466 | }; | 468 | }; |
467 | #endif | 469 | #endif |
468 | 470 | ||
471 | #ifdef CONFIG_RPS | ||
472 | /* | ||
473 | * RX queue sysfs structures and functions. | ||
474 | */ | ||
475 | struct rx_queue_attribute { | ||
476 | struct attribute attr; | ||
477 | ssize_t (*show)(struct netdev_rx_queue *queue, | ||
478 | struct rx_queue_attribute *attr, char *buf); | ||
479 | ssize_t (*store)(struct netdev_rx_queue *queue, | ||
480 | struct rx_queue_attribute *attr, const char *buf, size_t len); | ||
481 | }; | ||
482 | #define to_rx_queue_attr(_attr) container_of(_attr, \ | ||
483 | struct rx_queue_attribute, attr) | ||
484 | |||
485 | #define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj) | ||
486 | |||
487 | static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr, | ||
488 | char *buf) | ||
489 | { | ||
490 | struct rx_queue_attribute *attribute = to_rx_queue_attr(attr); | ||
491 | struct netdev_rx_queue *queue = to_rx_queue(kobj); | ||
492 | |||
493 | if (!attribute->show) | ||
494 | return -EIO; | ||
495 | |||
496 | return attribute->show(queue, attribute, buf); | ||
497 | } | ||
498 | |||
499 | static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr, | ||
500 | const char *buf, size_t count) | ||
501 | { | ||
502 | struct rx_queue_attribute *attribute = to_rx_queue_attr(attr); | ||
503 | struct netdev_rx_queue *queue = to_rx_queue(kobj); | ||
504 | |||
505 | if (!attribute->store) | ||
506 | return -EIO; | ||
507 | |||
508 | return attribute->store(queue, attribute, buf, count); | ||
509 | } | ||
510 | |||
511 | static struct sysfs_ops rx_queue_sysfs_ops = { | ||
512 | .show = rx_queue_attr_show, | ||
513 | .store = rx_queue_attr_store, | ||
514 | }; | ||
515 | |||
516 | static ssize_t show_rps_map(struct netdev_rx_queue *queue, | ||
517 | struct rx_queue_attribute *attribute, char *buf) | ||
518 | { | ||
519 | struct rps_map *map; | ||
520 | cpumask_var_t mask; | ||
521 | size_t len = 0; | ||
522 | int i; | ||
523 | |||
524 | if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) | ||
525 | return -ENOMEM; | ||
526 | |||
527 | rcu_read_lock(); | ||
528 | map = rcu_dereference(queue->rps_map); | ||
529 | if (map) | ||
530 | for (i = 0; i < map->len; i++) | ||
531 | cpumask_set_cpu(map->cpus[i], mask); | ||
532 | |||
533 | len += cpumask_scnprintf(buf + len, PAGE_SIZE, mask); | ||
534 | if (PAGE_SIZE - len < 3) { | ||
535 | rcu_read_unlock(); | ||
536 | free_cpumask_var(mask); | ||
537 | return -EINVAL; | ||
538 | } | ||
539 | rcu_read_unlock(); | ||
540 | |||
541 | free_cpumask_var(mask); | ||
542 | len += sprintf(buf + len, "\n"); | ||
543 | return len; | ||
544 | } | ||
545 | |||
546 | static void rps_map_release(struct rcu_head *rcu) | ||
547 | { | ||
548 | struct rps_map *map = container_of(rcu, struct rps_map, rcu); | ||
549 | |||
550 | kfree(map); | ||
551 | } | ||
552 | |||
553 | static ssize_t store_rps_map(struct netdev_rx_queue *queue, | ||
554 | struct rx_queue_attribute *attribute, | ||
555 | const char *buf, size_t len) | ||
556 | { | ||
557 | struct rps_map *old_map, *map; | ||
558 | cpumask_var_t mask; | ||
559 | int err, cpu, i; | ||
560 | static DEFINE_SPINLOCK(rps_map_lock); | ||
561 | |||
562 | if (!capable(CAP_NET_ADMIN)) | ||
563 | return -EPERM; | ||
564 | |||
565 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) | ||
566 | return -ENOMEM; | ||
567 | |||
568 | err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits); | ||
569 | if (err) { | ||
570 | free_cpumask_var(mask); | ||
571 | return err; | ||
572 | } | ||
573 | |||
574 | map = kzalloc(max_t(unsigned, | ||
575 | RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES), | ||
576 | GFP_KERNEL); | ||
577 | if (!map) { | ||
578 | free_cpumask_var(mask); | ||
579 | return -ENOMEM; | ||
580 | } | ||
581 | |||
582 | i = 0; | ||
583 | for_each_cpu_and(cpu, mask, cpu_online_mask) | ||
584 | map->cpus[i++] = cpu; | ||
585 | |||
586 | if (i) | ||
587 | map->len = i; | ||
588 | else { | ||
589 | kfree(map); | ||
590 | map = NULL; | ||
591 | } | ||
592 | |||
593 | spin_lock(&rps_map_lock); | ||
594 | old_map = queue->rps_map; | ||
595 | rcu_assign_pointer(queue->rps_map, map); | ||
596 | spin_unlock(&rps_map_lock); | ||
597 | |||
598 | if (old_map) | ||
599 | call_rcu(&old_map->rcu, rps_map_release); | ||
600 | |||
601 | free_cpumask_var(mask); | ||
602 | return len; | ||
603 | } | ||
604 | |||
605 | static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, | ||
606 | struct rx_queue_attribute *attr, | ||
607 | char *buf) | ||
608 | { | ||
609 | struct rps_dev_flow_table *flow_table; | ||
610 | unsigned int val = 0; | ||
611 | |||
612 | rcu_read_lock(); | ||
613 | flow_table = rcu_dereference(queue->rps_flow_table); | ||
614 | if (flow_table) | ||
615 | val = flow_table->mask + 1; | ||
616 | rcu_read_unlock(); | ||
617 | |||
618 | return sprintf(buf, "%u\n", val); | ||
619 | } | ||
620 | |||
621 | static void rps_dev_flow_table_release_work(struct work_struct *work) | ||
622 | { | ||
623 | struct rps_dev_flow_table *table = container_of(work, | ||
624 | struct rps_dev_flow_table, free_work); | ||
625 | |||
626 | vfree(table); | ||
627 | } | ||
628 | |||
629 | static void rps_dev_flow_table_release(struct rcu_head *rcu) | ||
630 | { | ||
631 | struct rps_dev_flow_table *table = container_of(rcu, | ||
632 | struct rps_dev_flow_table, rcu); | ||
633 | |||
634 | INIT_WORK(&table->free_work, rps_dev_flow_table_release_work); | ||
635 | schedule_work(&table->free_work); | ||
636 | } | ||
637 | |||
638 | static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, | ||
639 | struct rx_queue_attribute *attr, | ||
640 | const char *buf, size_t len) | ||
641 | { | ||
642 | unsigned int count; | ||
643 | char *endp; | ||
644 | struct rps_dev_flow_table *table, *old_table; | ||
645 | static DEFINE_SPINLOCK(rps_dev_flow_lock); | ||
646 | |||
647 | if (!capable(CAP_NET_ADMIN)) | ||
648 | return -EPERM; | ||
649 | |||
650 | count = simple_strtoul(buf, &endp, 0); | ||
651 | if (endp == buf) | ||
652 | return -EINVAL; | ||
653 | |||
654 | if (count) { | ||
655 | int i; | ||
656 | |||
657 | if (count > 1<<30) { | ||
658 | /* Enforce a limit to prevent overflow */ | ||
659 | return -EINVAL; | ||
660 | } | ||
661 | count = roundup_pow_of_two(count); | ||
662 | table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(count)); | ||
663 | if (!table) | ||
664 | return -ENOMEM; | ||
665 | |||
666 | table->mask = count - 1; | ||
667 | for (i = 0; i < count; i++) | ||
668 | table->flows[i].cpu = RPS_NO_CPU; | ||
669 | } else | ||
670 | table = NULL; | ||
671 | |||
672 | spin_lock(&rps_dev_flow_lock); | ||
673 | old_table = queue->rps_flow_table; | ||
674 | rcu_assign_pointer(queue->rps_flow_table, table); | ||
675 | spin_unlock(&rps_dev_flow_lock); | ||
676 | |||
677 | if (old_table) | ||
678 | call_rcu(&old_table->rcu, rps_dev_flow_table_release); | ||
679 | |||
680 | return len; | ||
681 | } | ||
682 | |||
683 | static struct rx_queue_attribute rps_cpus_attribute = | ||
684 | __ATTR(rps_cpus, S_IRUGO | S_IWUSR, show_rps_map, store_rps_map); | ||
685 | |||
686 | |||
687 | static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute = | ||
688 | __ATTR(rps_flow_cnt, S_IRUGO | S_IWUSR, | ||
689 | show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt); | ||
690 | |||
691 | static struct attribute *rx_queue_default_attrs[] = { | ||
692 | &rps_cpus_attribute.attr, | ||
693 | &rps_dev_flow_table_cnt_attribute.attr, | ||
694 | NULL | ||
695 | }; | ||
696 | |||
697 | static void rx_queue_release(struct kobject *kobj) | ||
698 | { | ||
699 | struct netdev_rx_queue *queue = to_rx_queue(kobj); | ||
700 | struct netdev_rx_queue *first = queue->first; | ||
701 | |||
702 | if (queue->rps_map) | ||
703 | call_rcu(&queue->rps_map->rcu, rps_map_release); | ||
704 | |||
705 | if (queue->rps_flow_table) | ||
706 | call_rcu(&queue->rps_flow_table->rcu, | ||
707 | rps_dev_flow_table_release); | ||
708 | |||
709 | if (atomic_dec_and_test(&first->count)) | ||
710 | kfree(first); | ||
711 | } | ||
712 | |||
713 | static struct kobj_type rx_queue_ktype = { | ||
714 | .sysfs_ops = &rx_queue_sysfs_ops, | ||
715 | .release = rx_queue_release, | ||
716 | .default_attrs = rx_queue_default_attrs, | ||
717 | }; | ||
718 | |||
719 | static int rx_queue_add_kobject(struct net_device *net, int index) | ||
720 | { | ||
721 | struct netdev_rx_queue *queue = net->_rx + index; | ||
722 | struct kobject *kobj = &queue->kobj; | ||
723 | int error = 0; | ||
724 | |||
725 | kobj->kset = net->queues_kset; | ||
726 | error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL, | ||
727 | "rx-%u", index); | ||
728 | if (error) { | ||
729 | kobject_put(kobj); | ||
730 | return error; | ||
731 | } | ||
732 | |||
733 | kobject_uevent(kobj, KOBJ_ADD); | ||
734 | |||
735 | return error; | ||
736 | } | ||
737 | |||
738 | static int rx_queue_register_kobjects(struct net_device *net) | ||
739 | { | ||
740 | int i; | ||
741 | int error = 0; | ||
742 | |||
743 | net->queues_kset = kset_create_and_add("queues", | ||
744 | NULL, &net->dev.kobj); | ||
745 | if (!net->queues_kset) | ||
746 | return -ENOMEM; | ||
747 | for (i = 0; i < net->num_rx_queues; i++) { | ||
748 | error = rx_queue_add_kobject(net, i); | ||
749 | if (error) | ||
750 | break; | ||
751 | } | ||
752 | |||
753 | if (error) | ||
754 | while (--i >= 0) | ||
755 | kobject_put(&net->_rx[i].kobj); | ||
756 | |||
757 | return error; | ||
758 | } | ||
759 | |||
760 | static void rx_queue_remove_kobjects(struct net_device *net) | ||
761 | { | ||
762 | int i; | ||
763 | |||
764 | for (i = 0; i < net->num_rx_queues; i++) | ||
765 | kobject_put(&net->_rx[i].kobj); | ||
766 | kset_unregister(net->queues_kset); | ||
767 | } | ||
768 | #endif /* CONFIG_RPS */ | ||
469 | #endif /* CONFIG_SYSFS */ | 769 | #endif /* CONFIG_SYSFS */ |
470 | 770 | ||
471 | #ifdef CONFIG_HOTPLUG | 771 | #ifdef CONFIG_HOTPLUG |
@@ -529,6 +829,10 @@ void netdev_unregister_kobject(struct net_device * net) | |||
529 | if (!net_eq(dev_net(net), &init_net)) | 829 | if (!net_eq(dev_net(net), &init_net)) |
530 | return; | 830 | return; |
531 | 831 | ||
832 | #ifdef CONFIG_RPS | ||
833 | rx_queue_remove_kobjects(net); | ||
834 | #endif | ||
835 | |||
532 | device_del(dev); | 836 | device_del(dev); |
533 | } | 837 | } |
534 | 838 | ||
@@ -537,6 +841,7 @@ int netdev_register_kobject(struct net_device *net) | |||
537 | { | 841 | { |
538 | struct device *dev = &(net->dev); | 842 | struct device *dev = &(net->dev); |
539 | const struct attribute_group **groups = net->sysfs_groups; | 843 | const struct attribute_group **groups = net->sysfs_groups; |
844 | int error = 0; | ||
540 | 845 | ||
541 | dev->class = &net_class; | 846 | dev->class = &net_class; |
542 | dev->platform_data = net; | 847 | dev->platform_data = net; |
@@ -563,7 +868,19 @@ int netdev_register_kobject(struct net_device *net) | |||
563 | if (!net_eq(dev_net(net), &init_net)) | 868 | if (!net_eq(dev_net(net), &init_net)) |
564 | return 0; | 869 | return 0; |
565 | 870 | ||
566 | return device_add(dev); | 871 | error = device_add(dev); |
872 | if (error) | ||
873 | return error; | ||
874 | |||
875 | #ifdef CONFIG_RPS | ||
876 | error = rx_queue_register_kobjects(net); | ||
877 | if (error) { | ||
878 | device_del(dev); | ||
879 | return error; | ||
880 | } | ||
881 | #endif | ||
882 | |||
883 | return error; | ||
567 | } | 884 | } |
568 | 885 | ||
569 | int netdev_class_create_file(struct class_attribute *class_attr) | 886 | int netdev_class_create_file(struct class_attribute *class_attr) |
diff --git a/net/core/net-traces.c b/net/core/net-traces.c index f1e982c508bb..afa6380ed88a 100644 --- a/net/core/net-traces.c +++ b/net/core/net-traces.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/workqueue.h> | 19 | #include <linux/workqueue.h> |
20 | #include <linux/netlink.h> | 20 | #include <linux/netlink.h> |
21 | #include <linux/net_dropmon.h> | 21 | #include <linux/net_dropmon.h> |
22 | #include <linux/slab.h> | ||
22 | 23 | ||
23 | #include <asm/unaligned.h> | 24 | #include <asm/unaligned.h> |
24 | #include <asm/bitops.h> | 25 | #include <asm/bitops.h> |
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index bd8c4712ea24..c988e685433a 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c | |||
@@ -27,6 +27,51 @@ EXPORT_SYMBOL(init_net); | |||
27 | 27 | ||
28 | #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */ | 28 | #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */ |
29 | 29 | ||
30 | static void net_generic_release(struct rcu_head *rcu) | ||
31 | { | ||
32 | struct net_generic *ng; | ||
33 | |||
34 | ng = container_of(rcu, struct net_generic, rcu); | ||
35 | kfree(ng); | ||
36 | } | ||
37 | |||
38 | static int net_assign_generic(struct net *net, int id, void *data) | ||
39 | { | ||
40 | struct net_generic *ng, *old_ng; | ||
41 | |||
42 | BUG_ON(!mutex_is_locked(&net_mutex)); | ||
43 | BUG_ON(id == 0); | ||
44 | |||
45 | ng = old_ng = net->gen; | ||
46 | if (old_ng->len >= id) | ||
47 | goto assign; | ||
48 | |||
49 | ng = kzalloc(sizeof(struct net_generic) + | ||
50 | id * sizeof(void *), GFP_KERNEL); | ||
51 | if (ng == NULL) | ||
52 | return -ENOMEM; | ||
53 | |||
54 | /* | ||
55 | * Some synchronisation notes: | ||
56 | * | ||
57 | * The net_generic explores the net->gen array inside rcu | ||
58 | * read section. Besides once set the net->gen->ptr[x] | ||
59 | * pointer never changes (see rules in netns/generic.h). | ||
60 | * | ||
61 | * That said, we simply duplicate this array and schedule | ||
62 | * the old copy for kfree after a grace period. | ||
63 | */ | ||
64 | |||
65 | ng->len = id; | ||
66 | memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*)); | ||
67 | |||
68 | rcu_assign_pointer(net->gen, ng); | ||
69 | call_rcu(&old_ng->rcu, net_generic_release); | ||
70 | assign: | ||
71 | ng->ptr[id - 1] = data; | ||
72 | return 0; | ||
73 | } | ||
74 | |||
30 | static int ops_init(const struct pernet_operations *ops, struct net *net) | 75 | static int ops_init(const struct pernet_operations *ops, struct net *net) |
31 | { | 76 | { |
32 | int err; | 77 | int err; |
@@ -469,10 +514,10 @@ EXPORT_SYMBOL_GPL(register_pernet_subsys); | |||
469 | * addition run the exit method for all existing network | 514 | * addition run the exit method for all existing network |
470 | * namespaces. | 515 | * namespaces. |
471 | */ | 516 | */ |
472 | void unregister_pernet_subsys(struct pernet_operations *module) | 517 | void unregister_pernet_subsys(struct pernet_operations *ops) |
473 | { | 518 | { |
474 | mutex_lock(&net_mutex); | 519 | mutex_lock(&net_mutex); |
475 | unregister_pernet_operations(module); | 520 | unregister_pernet_operations(ops); |
476 | mutex_unlock(&net_mutex); | 521 | mutex_unlock(&net_mutex); |
477 | } | 522 | } |
478 | EXPORT_SYMBOL_GPL(unregister_pernet_subsys); | 523 | EXPORT_SYMBOL_GPL(unregister_pernet_subsys); |
@@ -526,49 +571,3 @@ void unregister_pernet_device(struct pernet_operations *ops) | |||
526 | mutex_unlock(&net_mutex); | 571 | mutex_unlock(&net_mutex); |
527 | } | 572 | } |
528 | EXPORT_SYMBOL_GPL(unregister_pernet_device); | 573 | EXPORT_SYMBOL_GPL(unregister_pernet_device); |
529 | |||
530 | static void net_generic_release(struct rcu_head *rcu) | ||
531 | { | ||
532 | struct net_generic *ng; | ||
533 | |||
534 | ng = container_of(rcu, struct net_generic, rcu); | ||
535 | kfree(ng); | ||
536 | } | ||
537 | |||
538 | int net_assign_generic(struct net *net, int id, void *data) | ||
539 | { | ||
540 | struct net_generic *ng, *old_ng; | ||
541 | |||
542 | BUG_ON(!mutex_is_locked(&net_mutex)); | ||
543 | BUG_ON(id == 0); | ||
544 | |||
545 | ng = old_ng = net->gen; | ||
546 | if (old_ng->len >= id) | ||
547 | goto assign; | ||
548 | |||
549 | ng = kzalloc(sizeof(struct net_generic) + | ||
550 | id * sizeof(void *), GFP_KERNEL); | ||
551 | if (ng == NULL) | ||
552 | return -ENOMEM; | ||
553 | |||
554 | /* | ||
555 | * Some synchronisation notes: | ||
556 | * | ||
557 | * The net_generic explores the net->gen array inside rcu | ||
558 | * read section. Besides once set the net->gen->ptr[x] | ||
559 | * pointer never changes (see rules in netns/generic.h). | ||
560 | * | ||
561 | * That said, we simply duplicate this array and schedule | ||
562 | * the old copy for kfree after a grace period. | ||
563 | */ | ||
564 | |||
565 | ng->len = id; | ||
566 | memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*)); | ||
567 | |||
568 | rcu_assign_pointer(net->gen, ng); | ||
569 | call_rcu(&old_ng->rcu, net_generic_release); | ||
570 | assign: | ||
571 | ng->ptr[id - 1] = data; | ||
572 | return 0; | ||
573 | } | ||
574 | EXPORT_SYMBOL_GPL(net_assign_generic); | ||
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 7aa697253765..a58f59b97597 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/delay.h> | 22 | #include <linux/delay.h> |
23 | #include <linux/rcupdate.h> | 23 | #include <linux/rcupdate.h> |
24 | #include <linux/workqueue.h> | 24 | #include <linux/workqueue.h> |
25 | #include <linux/slab.h> | ||
25 | #include <net/tcp.h> | 26 | #include <net/tcp.h> |
26 | #include <net/udp.h> | 27 | #include <net/udp.h> |
27 | #include <asm/unaligned.h> | 28 | #include <asm/unaligned.h> |
@@ -614,7 +615,7 @@ void netpoll_print_options(struct netpoll *np) | |||
614 | np->name, np->local_port); | 615 | np->name, np->local_port); |
615 | printk(KERN_INFO "%s: local IP %pI4\n", | 616 | printk(KERN_INFO "%s: local IP %pI4\n", |
616 | np->name, &np->local_ip); | 617 | np->name, &np->local_ip); |
617 | printk(KERN_INFO "%s: interface %s\n", | 618 | printk(KERN_INFO "%s: interface '%s'\n", |
618 | np->name, np->dev_name); | 619 | np->name, np->dev_name); |
619 | printk(KERN_INFO "%s: remote port %d\n", | 620 | printk(KERN_INFO "%s: remote port %d\n", |
620 | np->name, np->remote_port); | 621 | np->name, np->remote_port); |
@@ -661,6 +662,9 @@ int netpoll_parse_options(struct netpoll *np, char *opt) | |||
661 | if ((delim = strchr(cur, '@')) == NULL) | 662 | if ((delim = strchr(cur, '@')) == NULL) |
662 | goto parse_failed; | 663 | goto parse_failed; |
663 | *delim = 0; | 664 | *delim = 0; |
665 | if (*cur == ' ' || *cur == '\t') | ||
666 | printk(KERN_INFO "%s: warning: whitespace" | ||
667 | "is not allowed\n", np->name); | ||
664 | np->remote_port = simple_strtol(cur, NULL, 10); | 668 | np->remote_port = simple_strtol(cur, NULL, 10); |
665 | cur = delim; | 669 | cur = delim; |
666 | } | 670 | } |
@@ -708,7 +712,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt) | |||
708 | return 0; | 712 | return 0; |
709 | 713 | ||
710 | parse_failed: | 714 | parse_failed: |
711 | printk(KERN_INFO "%s: couldn't parse config at %s!\n", | 715 | printk(KERN_INFO "%s: couldn't parse config at '%s'!\n", |
712 | np->name, cur); | 716 | np->name, cur); |
713 | return -1; | 717 | return -1; |
714 | } | 718 | } |
@@ -735,7 +739,7 @@ int netpoll_setup(struct netpoll *np) | |||
735 | npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL); | 739 | npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL); |
736 | if (!npinfo) { | 740 | if (!npinfo) { |
737 | err = -ENOMEM; | 741 | err = -ENOMEM; |
738 | goto release; | 742 | goto put; |
739 | } | 743 | } |
740 | 744 | ||
741 | npinfo->rx_flags = 0; | 745 | npinfo->rx_flags = 0; |
@@ -845,7 +849,7 @@ int netpoll_setup(struct netpoll *np) | |||
845 | 849 | ||
846 | kfree(npinfo); | 850 | kfree(npinfo); |
847 | } | 851 | } |
848 | 852 | put: | |
849 | dev_put(ndev); | 853 | dev_put(ndev); |
850 | return err; | 854 | return err; |
851 | } | 855 | } |
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 43923811bd6a..2ad68da418df 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -169,7 +169,7 @@ | |||
169 | #include <asm/dma.h> | 169 | #include <asm/dma.h> |
170 | #include <asm/div64.h> /* do_div */ | 170 | #include <asm/div64.h> /* do_div */ |
171 | 171 | ||
172 | #define VERSION "2.72" | 172 | #define VERSION "2.73" |
173 | #define IP_NAME_SZ 32 | 173 | #define IP_NAME_SZ 32 |
174 | #define MAX_MPLS_LABELS 16 /* This is the max label stack depth */ | 174 | #define MAX_MPLS_LABELS 16 /* This is the max label stack depth */ |
175 | #define MPLS_STACK_BOTTOM htonl(0x00000100) | 175 | #define MPLS_STACK_BOTTOM htonl(0x00000100) |
@@ -190,6 +190,7 @@ | |||
190 | #define F_IPSEC_ON (1<<12) /* ipsec on for flows */ | 190 | #define F_IPSEC_ON (1<<12) /* ipsec on for flows */ |
191 | #define F_QUEUE_MAP_RND (1<<13) /* queue map Random */ | 191 | #define F_QUEUE_MAP_RND (1<<13) /* queue map Random */ |
192 | #define F_QUEUE_MAP_CPU (1<<14) /* queue map mirrors smp_processor_id() */ | 192 | #define F_QUEUE_MAP_CPU (1<<14) /* queue map mirrors smp_processor_id() */ |
193 | #define F_NODE (1<<15) /* Node memory alloc*/ | ||
193 | 194 | ||
194 | /* Thread control flag bits */ | 195 | /* Thread control flag bits */ |
195 | #define T_STOP (1<<0) /* Stop run */ | 196 | #define T_STOP (1<<0) /* Stop run */ |
@@ -372,6 +373,7 @@ struct pktgen_dev { | |||
372 | 373 | ||
373 | u16 queue_map_min; | 374 | u16 queue_map_min; |
374 | u16 queue_map_max; | 375 | u16 queue_map_max; |
376 | int node; /* Memory node */ | ||
375 | 377 | ||
376 | #ifdef CONFIG_XFRM | 378 | #ifdef CONFIG_XFRM |
377 | __u8 ipsmode; /* IPSEC mode (config) */ | 379 | __u8 ipsmode; /* IPSEC mode (config) */ |
@@ -607,6 +609,9 @@ static int pktgen_if_show(struct seq_file *seq, void *v) | |||
607 | if (pkt_dev->traffic_class) | 609 | if (pkt_dev->traffic_class) |
608 | seq_printf(seq, " traffic_class: 0x%02x\n", pkt_dev->traffic_class); | 610 | seq_printf(seq, " traffic_class: 0x%02x\n", pkt_dev->traffic_class); |
609 | 611 | ||
612 | if (pkt_dev->node >= 0) | ||
613 | seq_printf(seq, " node: %d\n", pkt_dev->node); | ||
614 | |||
610 | seq_printf(seq, " Flags: "); | 615 | seq_printf(seq, " Flags: "); |
611 | 616 | ||
612 | if (pkt_dev->flags & F_IPV6) | 617 | if (pkt_dev->flags & F_IPV6) |
@@ -660,6 +665,9 @@ static int pktgen_if_show(struct seq_file *seq, void *v) | |||
660 | if (pkt_dev->flags & F_SVID_RND) | 665 | if (pkt_dev->flags & F_SVID_RND) |
661 | seq_printf(seq, "SVID_RND "); | 666 | seq_printf(seq, "SVID_RND "); |
662 | 667 | ||
668 | if (pkt_dev->flags & F_NODE) | ||
669 | seq_printf(seq, "NODE_ALLOC "); | ||
670 | |||
663 | seq_puts(seq, "\n"); | 671 | seq_puts(seq, "\n"); |
664 | 672 | ||
665 | /* not really stopped, more like last-running-at */ | 673 | /* not really stopped, more like last-running-at */ |
@@ -1074,6 +1082,21 @@ static ssize_t pktgen_if_write(struct file *file, | |||
1074 | pkt_dev->dst_mac_count); | 1082 | pkt_dev->dst_mac_count); |
1075 | return count; | 1083 | return count; |
1076 | } | 1084 | } |
1085 | if (!strcmp(name, "node")) { | ||
1086 | len = num_arg(&user_buffer[i], 10, &value); | ||
1087 | if (len < 0) | ||
1088 | return len; | ||
1089 | |||
1090 | i += len; | ||
1091 | |||
1092 | if (node_possible(value)) { | ||
1093 | pkt_dev->node = value; | ||
1094 | sprintf(pg_result, "OK: node=%d", pkt_dev->node); | ||
1095 | } | ||
1096 | else | ||
1097 | sprintf(pg_result, "ERROR: node not possible"); | ||
1098 | return count; | ||
1099 | } | ||
1077 | if (!strcmp(name, "flag")) { | 1100 | if (!strcmp(name, "flag")) { |
1078 | char f[32]; | 1101 | char f[32]; |
1079 | memset(f, 0, 32); | 1102 | memset(f, 0, 32); |
@@ -1166,12 +1189,18 @@ static ssize_t pktgen_if_write(struct file *file, | |||
1166 | else if (strcmp(f, "!IPV6") == 0) | 1189 | else if (strcmp(f, "!IPV6") == 0) |
1167 | pkt_dev->flags &= ~F_IPV6; | 1190 | pkt_dev->flags &= ~F_IPV6; |
1168 | 1191 | ||
1192 | else if (strcmp(f, "NODE_ALLOC") == 0) | ||
1193 | pkt_dev->flags |= F_NODE; | ||
1194 | |||
1195 | else if (strcmp(f, "!NODE_ALLOC") == 0) | ||
1196 | pkt_dev->flags &= ~F_NODE; | ||
1197 | |||
1169 | else { | 1198 | else { |
1170 | sprintf(pg_result, | 1199 | sprintf(pg_result, |
1171 | "Flag -:%s:- unknown\nAvailable flags, (prepend ! to un-set flag):\n%s", | 1200 | "Flag -:%s:- unknown\nAvailable flags, (prepend ! to un-set flag):\n%s", |
1172 | f, | 1201 | f, |
1173 | "IPSRC_RND, IPDST_RND, UDPSRC_RND, UDPDST_RND, " | 1202 | "IPSRC_RND, IPDST_RND, UDPSRC_RND, UDPDST_RND, " |
1174 | "MACSRC_RND, MACDST_RND, TXSIZE_RND, IPV6, MPLS_RND, VID_RND, SVID_RND, FLOW_SEQ, IPSEC\n"); | 1203 | "MACSRC_RND, MACDST_RND, TXSIZE_RND, IPV6, MPLS_RND, VID_RND, SVID_RND, FLOW_SEQ, IPSEC, NODE_ALLOC\n"); |
1175 | return count; | 1204 | return count; |
1176 | } | 1205 | } |
1177 | sprintf(pg_result, "OK: flags=0x%x", pkt_dev->flags); | 1206 | sprintf(pg_result, "OK: flags=0x%x", pkt_dev->flags); |
@@ -2572,9 +2601,27 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, | |||
2572 | mod_cur_headers(pkt_dev); | 2601 | mod_cur_headers(pkt_dev); |
2573 | 2602 | ||
2574 | datalen = (odev->hard_header_len + 16) & ~0xf; | 2603 | datalen = (odev->hard_header_len + 16) & ~0xf; |
2575 | skb = __netdev_alloc_skb(odev, | 2604 | |
2576 | pkt_dev->cur_pkt_size + 64 | 2605 | if (pkt_dev->flags & F_NODE) { |
2577 | + datalen + pkt_dev->pkt_overhead, GFP_NOWAIT); | 2606 | int node; |
2607 | |||
2608 | if (pkt_dev->node >= 0) | ||
2609 | node = pkt_dev->node; | ||
2610 | else | ||
2611 | node = numa_node_id(); | ||
2612 | |||
2613 | skb = __alloc_skb(NET_SKB_PAD + pkt_dev->cur_pkt_size + 64 | ||
2614 | + datalen + pkt_dev->pkt_overhead, GFP_NOWAIT, 0, node); | ||
2615 | if (likely(skb)) { | ||
2616 | skb_reserve(skb, NET_SKB_PAD); | ||
2617 | skb->dev = odev; | ||
2618 | } | ||
2619 | } | ||
2620 | else | ||
2621 | skb = __netdev_alloc_skb(odev, | ||
2622 | pkt_dev->cur_pkt_size + 64 | ||
2623 | + datalen + pkt_dev->pkt_overhead, GFP_NOWAIT); | ||
2624 | |||
2578 | if (!skb) { | 2625 | if (!skb) { |
2579 | sprintf(pkt_dev->result, "No memory"); | 2626 | sprintf(pkt_dev->result, "No memory"); |
2580 | return NULL; | 2627 | return NULL; |
@@ -3674,6 +3721,7 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname) | |||
3674 | pkt_dev->svlan_p = 0; | 3721 | pkt_dev->svlan_p = 0; |
3675 | pkt_dev->svlan_cfi = 0; | 3722 | pkt_dev->svlan_cfi = 0; |
3676 | pkt_dev->svlan_id = 0xffff; | 3723 | pkt_dev->svlan_id = 0xffff; |
3724 | pkt_dev->node = -1; | ||
3677 | 3725 | ||
3678 | err = pktgen_setup_dev(pkt_dev, ifname); | 3726 | err = pktgen_setup_dev(pkt_dev, ifname); |
3679 | if (err) | 3727 | if (err) |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 4568120d8533..23a71cb21273 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -98,7 +98,7 @@ int lockdep_rtnl_is_held(void) | |||
98 | EXPORT_SYMBOL(lockdep_rtnl_is_held); | 98 | EXPORT_SYMBOL(lockdep_rtnl_is_held); |
99 | #endif /* #ifdef CONFIG_PROVE_LOCKING */ | 99 | #endif /* #ifdef CONFIG_PROVE_LOCKING */ |
100 | 100 | ||
101 | static struct rtnl_link *rtnl_msg_handlers[NPROTO]; | 101 | static struct rtnl_link *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1]; |
102 | 102 | ||
103 | static inline int rtm_msgindex(int msgtype) | 103 | static inline int rtm_msgindex(int msgtype) |
104 | { | 104 | { |
@@ -118,7 +118,11 @@ static rtnl_doit_func rtnl_get_doit(int protocol, int msgindex) | |||
118 | { | 118 | { |
119 | struct rtnl_link *tab; | 119 | struct rtnl_link *tab; |
120 | 120 | ||
121 | tab = rtnl_msg_handlers[protocol]; | 121 | if (protocol <= RTNL_FAMILY_MAX) |
122 | tab = rtnl_msg_handlers[protocol]; | ||
123 | else | ||
124 | tab = NULL; | ||
125 | |||
122 | if (tab == NULL || tab[msgindex].doit == NULL) | 126 | if (tab == NULL || tab[msgindex].doit == NULL) |
123 | tab = rtnl_msg_handlers[PF_UNSPEC]; | 127 | tab = rtnl_msg_handlers[PF_UNSPEC]; |
124 | 128 | ||
@@ -129,7 +133,11 @@ static rtnl_dumpit_func rtnl_get_dumpit(int protocol, int msgindex) | |||
129 | { | 133 | { |
130 | struct rtnl_link *tab; | 134 | struct rtnl_link *tab; |
131 | 135 | ||
132 | tab = rtnl_msg_handlers[protocol]; | 136 | if (protocol <= RTNL_FAMILY_MAX) |
137 | tab = rtnl_msg_handlers[protocol]; | ||
138 | else | ||
139 | tab = NULL; | ||
140 | |||
133 | if (tab == NULL || tab[msgindex].dumpit == NULL) | 141 | if (tab == NULL || tab[msgindex].dumpit == NULL) |
134 | tab = rtnl_msg_handlers[PF_UNSPEC]; | 142 | tab = rtnl_msg_handlers[PF_UNSPEC]; |
135 | 143 | ||
@@ -159,7 +167,7 @@ int __rtnl_register(int protocol, int msgtype, | |||
159 | struct rtnl_link *tab; | 167 | struct rtnl_link *tab; |
160 | int msgindex; | 168 | int msgindex; |
161 | 169 | ||
162 | BUG_ON(protocol < 0 || protocol >= NPROTO); | 170 | BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); |
163 | msgindex = rtm_msgindex(msgtype); | 171 | msgindex = rtm_msgindex(msgtype); |
164 | 172 | ||
165 | tab = rtnl_msg_handlers[protocol]; | 173 | tab = rtnl_msg_handlers[protocol]; |
@@ -211,7 +219,7 @@ int rtnl_unregister(int protocol, int msgtype) | |||
211 | { | 219 | { |
212 | int msgindex; | 220 | int msgindex; |
213 | 221 | ||
214 | BUG_ON(protocol < 0 || protocol >= NPROTO); | 222 | BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); |
215 | msgindex = rtm_msgindex(msgtype); | 223 | msgindex = rtm_msgindex(msgtype); |
216 | 224 | ||
217 | if (rtnl_msg_handlers[protocol] == NULL) | 225 | if (rtnl_msg_handlers[protocol] == NULL) |
@@ -233,7 +241,7 @@ EXPORT_SYMBOL_GPL(rtnl_unregister); | |||
233 | */ | 241 | */ |
234 | void rtnl_unregister_all(int protocol) | 242 | void rtnl_unregister_all(int protocol) |
235 | { | 243 | { |
236 | BUG_ON(protocol < 0 || protocol >= NPROTO); | 244 | BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); |
237 | 245 | ||
238 | kfree(rtnl_msg_handlers[protocol]); | 246 | kfree(rtnl_msg_handlers[protocol]); |
239 | rtnl_msg_handlers[protocol] = NULL; | 247 | rtnl_msg_handlers[protocol] = NULL; |
@@ -600,7 +608,41 @@ static void copy_rtnl_link_stats(struct rtnl_link_stats *a, | |||
600 | 608 | ||
601 | a->rx_compressed = b->rx_compressed; | 609 | a->rx_compressed = b->rx_compressed; |
602 | a->tx_compressed = b->tx_compressed; | 610 | a->tx_compressed = b->tx_compressed; |
603 | }; | 611 | } |
612 | |||
613 | static void copy_rtnl_link_stats64(void *v, const struct net_device_stats *b) | ||
614 | { | ||
615 | struct rtnl_link_stats64 a; | ||
616 | |||
617 | a.rx_packets = b->rx_packets; | ||
618 | a.tx_packets = b->tx_packets; | ||
619 | a.rx_bytes = b->rx_bytes; | ||
620 | a.tx_bytes = b->tx_bytes; | ||
621 | a.rx_errors = b->rx_errors; | ||
622 | a.tx_errors = b->tx_errors; | ||
623 | a.rx_dropped = b->rx_dropped; | ||
624 | a.tx_dropped = b->tx_dropped; | ||
625 | |||
626 | a.multicast = b->multicast; | ||
627 | a.collisions = b->collisions; | ||
628 | |||
629 | a.rx_length_errors = b->rx_length_errors; | ||
630 | a.rx_over_errors = b->rx_over_errors; | ||
631 | a.rx_crc_errors = b->rx_crc_errors; | ||
632 | a.rx_frame_errors = b->rx_frame_errors; | ||
633 | a.rx_fifo_errors = b->rx_fifo_errors; | ||
634 | a.rx_missed_errors = b->rx_missed_errors; | ||
635 | |||
636 | a.tx_aborted_errors = b->tx_aborted_errors; | ||
637 | a.tx_carrier_errors = b->tx_carrier_errors; | ||
638 | a.tx_fifo_errors = b->tx_fifo_errors; | ||
639 | a.tx_heartbeat_errors = b->tx_heartbeat_errors; | ||
640 | a.tx_window_errors = b->tx_window_errors; | ||
641 | |||
642 | a.rx_compressed = b->rx_compressed; | ||
643 | a.tx_compressed = b->tx_compressed; | ||
644 | memcpy(v, &a, sizeof(a)); | ||
645 | } | ||
604 | 646 | ||
605 | static inline int rtnl_vfinfo_size(const struct net_device *dev) | 647 | static inline int rtnl_vfinfo_size(const struct net_device *dev) |
606 | { | 648 | { |
@@ -619,6 +661,7 @@ static inline size_t if_nlmsg_size(const struct net_device *dev) | |||
619 | + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */ | 661 | + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */ |
620 | + nla_total_size(sizeof(struct rtnl_link_ifmap)) | 662 | + nla_total_size(sizeof(struct rtnl_link_ifmap)) |
621 | + nla_total_size(sizeof(struct rtnl_link_stats)) | 663 | + nla_total_size(sizeof(struct rtnl_link_stats)) |
664 | + nla_total_size(sizeof(struct rtnl_link_stats64)) | ||
622 | + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ | 665 | + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ |
623 | + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */ | 666 | + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */ |
624 | + nla_total_size(4) /* IFLA_TXQLEN */ | 667 | + nla_total_size(4) /* IFLA_TXQLEN */ |
@@ -698,6 +741,12 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, | |||
698 | stats = dev_get_stats(dev); | 741 | stats = dev_get_stats(dev); |
699 | copy_rtnl_link_stats(nla_data(attr), stats); | 742 | copy_rtnl_link_stats(nla_data(attr), stats); |
700 | 743 | ||
744 | attr = nla_reserve(skb, IFLA_STATS64, | ||
745 | sizeof(struct rtnl_link_stats64)); | ||
746 | if (attr == NULL) | ||
747 | goto nla_put_failure; | ||
748 | copy_rtnl_link_stats64(nla_data(attr), stats); | ||
749 | |||
701 | if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent) { | 750 | if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent) { |
702 | int i; | 751 | int i; |
703 | struct ifla_vf_info ivi; | 752 | struct ifla_vf_info ivi; |
@@ -1270,10 +1319,11 @@ replay: | |||
1270 | err = ops->newlink(net, dev, tb, data); | 1319 | err = ops->newlink(net, dev, tb, data); |
1271 | else | 1320 | else |
1272 | err = register_netdevice(dev); | 1321 | err = register_netdevice(dev); |
1273 | if (err < 0 && !IS_ERR(dev)) { | 1322 | |
1323 | if (err < 0 && !IS_ERR(dev)) | ||
1274 | free_netdev(dev); | 1324 | free_netdev(dev); |
1325 | if (err < 0) | ||
1275 | goto out; | 1326 | goto out; |
1276 | } | ||
1277 | 1327 | ||
1278 | err = rtnl_configure_link(dev, ifm); | 1328 | err = rtnl_configure_link(dev, ifm); |
1279 | if (err < 0) | 1329 | if (err < 0) |
@@ -1335,7 +1385,7 @@ static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb) | |||
1335 | 1385 | ||
1336 | if (s_idx == 0) | 1386 | if (s_idx == 0) |
1337 | s_idx = 1; | 1387 | s_idx = 1; |
1338 | for (idx = 1; idx < NPROTO; idx++) { | 1388 | for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) { |
1339 | int type = cb->nlh->nlmsg_type-RTM_BASE; | 1389 | int type = cb->nlh->nlmsg_type-RTM_BASE; |
1340 | if (idx < s_idx || idx == PF_PACKET) | 1390 | if (idx < s_idx || idx == PF_PACKET) |
1341 | continue; | 1391 | continue; |
@@ -1403,9 +1453,6 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
1403 | return 0; | 1453 | return 0; |
1404 | 1454 | ||
1405 | family = ((struct rtgenmsg *)NLMSG_DATA(nlh))->rtgen_family; | 1455 | family = ((struct rtgenmsg *)NLMSG_DATA(nlh))->rtgen_family; |
1406 | if (family >= NPROTO) | ||
1407 | return -EAFNOSUPPORT; | ||
1408 | |||
1409 | sz_idx = type>>2; | 1456 | sz_idx = type>>2; |
1410 | kind = type&3; | 1457 | kind = type&3; |
1411 | 1458 | ||
@@ -1473,6 +1520,7 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi | |||
1473 | case NETDEV_POST_INIT: | 1520 | case NETDEV_POST_INIT: |
1474 | case NETDEV_REGISTER: | 1521 | case NETDEV_REGISTER: |
1475 | case NETDEV_CHANGE: | 1522 | case NETDEV_CHANGE: |
1523 | case NETDEV_PRE_TYPE_CHANGE: | ||
1476 | case NETDEV_GOING_DOWN: | 1524 | case NETDEV_GOING_DOWN: |
1477 | case NETDEV_UNREGISTER: | 1525 | case NETDEV_UNREGISTER: |
1478 | case NETDEV_UNREGISTER_BATCH: | 1526 | case NETDEV_UNREGISTER_BATCH: |
diff --git a/net/core/scm.c b/net/core/scm.c index 9b264634acfd..b88f6f9d0b97 100644 --- a/net/core/scm.c +++ b/net/core/scm.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/security.h> | 26 | #include <linux/security.h> |
27 | #include <linux/pid.h> | 27 | #include <linux/pid.h> |
28 | #include <linux/nsproxy.h> | 28 | #include <linux/nsproxy.h> |
29 | #include <linux/slab.h> | ||
29 | 30 | ||
30 | #include <asm/system.h> | 31 | #include <asm/system.h> |
31 | #include <asm/uaccess.h> | 32 | #include <asm/uaccess.h> |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 93c4e060c91e..8b9c109166a7 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -117,7 +117,7 @@ static const struct pipe_buf_operations sock_pipe_buf_ops = { | |||
117 | * | 117 | * |
118 | * Out of line support code for skb_put(). Not user callable. | 118 | * Out of line support code for skb_put(). Not user callable. |
119 | */ | 119 | */ |
120 | void skb_over_panic(struct sk_buff *skb, int sz, void *here) | 120 | static void skb_over_panic(struct sk_buff *skb, int sz, void *here) |
121 | { | 121 | { |
122 | printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p " | 122 | printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p " |
123 | "data:%p tail:%#lx end:%#lx dev:%s\n", | 123 | "data:%p tail:%#lx end:%#lx dev:%s\n", |
@@ -126,7 +126,6 @@ void skb_over_panic(struct sk_buff *skb, int sz, void *here) | |||
126 | skb->dev ? skb->dev->name : "<NULL>"); | 126 | skb->dev ? skb->dev->name : "<NULL>"); |
127 | BUG(); | 127 | BUG(); |
128 | } | 128 | } |
129 | EXPORT_SYMBOL(skb_over_panic); | ||
130 | 129 | ||
131 | /** | 130 | /** |
132 | * skb_under_panic - private function | 131 | * skb_under_panic - private function |
@@ -137,7 +136,7 @@ EXPORT_SYMBOL(skb_over_panic); | |||
137 | * Out of line support code for skb_push(). Not user callable. | 136 | * Out of line support code for skb_push(). Not user callable. |
138 | */ | 137 | */ |
139 | 138 | ||
140 | void skb_under_panic(struct sk_buff *skb, int sz, void *here) | 139 | static void skb_under_panic(struct sk_buff *skb, int sz, void *here) |
141 | { | 140 | { |
142 | printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p " | 141 | printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p " |
143 | "data:%p tail:%#lx end:%#lx dev:%s\n", | 142 | "data:%p tail:%#lx end:%#lx dev:%s\n", |
@@ -146,7 +145,6 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here) | |||
146 | skb->dev ? skb->dev->name : "<NULL>"); | 145 | skb->dev ? skb->dev->name : "<NULL>"); |
147 | BUG(); | 146 | BUG(); |
148 | } | 147 | } |
149 | EXPORT_SYMBOL(skb_under_panic); | ||
150 | 148 | ||
151 | /* Allocate a new skbuff. We do this ourselves so we can fill in a few | 149 | /* Allocate a new skbuff. We do this ourselves so we can fill in a few |
152 | * 'private' fields and also do memory statistics to find all the | 150 | * 'private' fields and also do memory statistics to find all the |
@@ -534,6 +532,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) | |||
534 | new->network_header = old->network_header; | 532 | new->network_header = old->network_header; |
535 | new->mac_header = old->mac_header; | 533 | new->mac_header = old->mac_header; |
536 | skb_dst_set(new, dst_clone(skb_dst(old))); | 534 | skb_dst_set(new, dst_clone(skb_dst(old))); |
535 | new->rxhash = old->rxhash; | ||
537 | #ifdef CONFIG_XFRM | 536 | #ifdef CONFIG_XFRM |
538 | new->sp = secpath_get(old->sp); | 537 | new->sp = secpath_get(old->sp); |
539 | #endif | 538 | #endif |
@@ -581,6 +580,7 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) | |||
581 | C(len); | 580 | C(len); |
582 | C(data_len); | 581 | C(data_len); |
583 | C(mac_len); | 582 | C(mac_len); |
583 | C(rxhash); | ||
584 | n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; | 584 | n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; |
585 | n->cloned = 1; | 585 | n->cloned = 1; |
586 | n->nohdr = 0; | 586 | n->nohdr = 0; |
@@ -1051,7 +1051,7 @@ EXPORT_SYMBOL(skb_push); | |||
1051 | */ | 1051 | */ |
1052 | unsigned char *skb_pull(struct sk_buff *skb, unsigned int len) | 1052 | unsigned char *skb_pull(struct sk_buff *skb, unsigned int len) |
1053 | { | 1053 | { |
1054 | return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len); | 1054 | return skb_pull_inline(skb, len); |
1055 | } | 1055 | } |
1056 | EXPORT_SYMBOL(skb_pull); | 1056 | EXPORT_SYMBOL(skb_pull); |
1057 | 1057 | ||
diff --git a/net/core/sock.c b/net/core/sock.c index c5812bbc2cc9..94c4affdda9b 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -327,6 +327,10 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested) | |||
327 | 327 | ||
328 | skb->dev = NULL; | 328 | skb->dev = NULL; |
329 | 329 | ||
330 | if (sk_rcvqueues_full(sk, skb)) { | ||
331 | atomic_inc(&sk->sk_drops); | ||
332 | goto discard_and_relse; | ||
333 | } | ||
330 | if (nested) | 334 | if (nested) |
331 | bh_lock_sock_nested(sk); | 335 | bh_lock_sock_nested(sk); |
332 | else | 336 | else |
@@ -364,11 +368,11 @@ EXPORT_SYMBOL(sk_reset_txq); | |||
364 | 368 | ||
365 | struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) | 369 | struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) |
366 | { | 370 | { |
367 | struct dst_entry *dst = sk->sk_dst_cache; | 371 | struct dst_entry *dst = __sk_dst_get(sk); |
368 | 372 | ||
369 | if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { | 373 | if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { |
370 | sk_tx_queue_clear(sk); | 374 | sk_tx_queue_clear(sk); |
371 | sk->sk_dst_cache = NULL; | 375 | rcu_assign_pointer(sk->sk_dst_cache, NULL); |
372 | dst_release(dst); | 376 | dst_release(dst); |
373 | return NULL; | 377 | return NULL; |
374 | } | 378 | } |
@@ -1157,7 +1161,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority) | |||
1157 | skb_queue_head_init(&newsk->sk_async_wait_queue); | 1161 | skb_queue_head_init(&newsk->sk_async_wait_queue); |
1158 | #endif | 1162 | #endif |
1159 | 1163 | ||
1160 | rwlock_init(&newsk->sk_dst_lock); | 1164 | spin_lock_init(&newsk->sk_dst_lock); |
1161 | rwlock_init(&newsk->sk_callback_lock); | 1165 | rwlock_init(&newsk->sk_callback_lock); |
1162 | lockdep_set_class_and_name(&newsk->sk_callback_lock, | 1166 | lockdep_set_class_and_name(&newsk->sk_callback_lock, |
1163 | af_callback_keys + newsk->sk_family, | 1167 | af_callback_keys + newsk->sk_family, |
@@ -1207,7 +1211,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority) | |||
1207 | */ | 1211 | */ |
1208 | sk_refcnt_debug_inc(newsk); | 1212 | sk_refcnt_debug_inc(newsk); |
1209 | sk_set_socket(newsk, NULL); | 1213 | sk_set_socket(newsk, NULL); |
1210 | newsk->sk_sleep = NULL; | 1214 | newsk->sk_wq = NULL; |
1211 | 1215 | ||
1212 | if (newsk->sk_prot->sockets_allocated) | 1216 | if (newsk->sk_prot->sockets_allocated) |
1213 | percpu_counter_inc(newsk->sk_prot->sockets_allocated); | 1217 | percpu_counter_inc(newsk->sk_prot->sockets_allocated); |
@@ -1395,7 +1399,7 @@ static long sock_wait_for_wmem(struct sock *sk, long timeo) | |||
1395 | if (signal_pending(current)) | 1399 | if (signal_pending(current)) |
1396 | break; | 1400 | break; |
1397 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); | 1401 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); |
1398 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 1402 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
1399 | if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) | 1403 | if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) |
1400 | break; | 1404 | break; |
1401 | if (sk->sk_shutdown & SEND_SHUTDOWN) | 1405 | if (sk->sk_shutdown & SEND_SHUTDOWN) |
@@ -1404,7 +1408,7 @@ static long sock_wait_for_wmem(struct sock *sk, long timeo) | |||
1404 | break; | 1408 | break; |
1405 | timeo = schedule_timeout(timeo); | 1409 | timeo = schedule_timeout(timeo); |
1406 | } | 1410 | } |
1407 | finish_wait(sk->sk_sleep, &wait); | 1411 | finish_wait(sk_sleep(sk), &wait); |
1408 | return timeo; | 1412 | return timeo; |
1409 | } | 1413 | } |
1410 | 1414 | ||
@@ -1570,11 +1574,11 @@ int sk_wait_data(struct sock *sk, long *timeo) | |||
1570 | int rc; | 1574 | int rc; |
1571 | DEFINE_WAIT(wait); | 1575 | DEFINE_WAIT(wait); |
1572 | 1576 | ||
1573 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 1577 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
1574 | set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 1578 | set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); |
1575 | rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue)); | 1579 | rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue)); |
1576 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 1580 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); |
1577 | finish_wait(sk->sk_sleep, &wait); | 1581 | finish_wait(sk_sleep(sk), &wait); |
1578 | return rc; | 1582 | return rc; |
1579 | } | 1583 | } |
1580 | EXPORT_SYMBOL(sk_wait_data); | 1584 | EXPORT_SYMBOL(sk_wait_data); |
@@ -1796,41 +1800,53 @@ EXPORT_SYMBOL(sock_no_sendpage); | |||
1796 | 1800 | ||
1797 | static void sock_def_wakeup(struct sock *sk) | 1801 | static void sock_def_wakeup(struct sock *sk) |
1798 | { | 1802 | { |
1799 | read_lock(&sk->sk_callback_lock); | 1803 | struct socket_wq *wq; |
1800 | if (sk_has_sleeper(sk)) | 1804 | |
1801 | wake_up_interruptible_all(sk->sk_sleep); | 1805 | rcu_read_lock(); |
1802 | read_unlock(&sk->sk_callback_lock); | 1806 | wq = rcu_dereference(sk->sk_wq); |
1807 | if (wq_has_sleeper(wq)) | ||
1808 | wake_up_interruptible_all(&wq->wait); | ||
1809 | rcu_read_unlock(); | ||
1803 | } | 1810 | } |
1804 | 1811 | ||
1805 | static void sock_def_error_report(struct sock *sk) | 1812 | static void sock_def_error_report(struct sock *sk) |
1806 | { | 1813 | { |
1807 | read_lock(&sk->sk_callback_lock); | 1814 | struct socket_wq *wq; |
1808 | if (sk_has_sleeper(sk)) | 1815 | |
1809 | wake_up_interruptible_poll(sk->sk_sleep, POLLERR); | 1816 | rcu_read_lock(); |
1817 | wq = rcu_dereference(sk->sk_wq); | ||
1818 | if (wq_has_sleeper(wq)) | ||
1819 | wake_up_interruptible_poll(&wq->wait, POLLERR); | ||
1810 | sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR); | 1820 | sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR); |
1811 | read_unlock(&sk->sk_callback_lock); | 1821 | rcu_read_unlock(); |
1812 | } | 1822 | } |
1813 | 1823 | ||
1814 | static void sock_def_readable(struct sock *sk, int len) | 1824 | static void sock_def_readable(struct sock *sk, int len) |
1815 | { | 1825 | { |
1816 | read_lock(&sk->sk_callback_lock); | 1826 | struct socket_wq *wq; |
1817 | if (sk_has_sleeper(sk)) | 1827 | |
1818 | wake_up_interruptible_sync_poll(sk->sk_sleep, POLLIN | | 1828 | rcu_read_lock(); |
1829 | wq = rcu_dereference(sk->sk_wq); | ||
1830 | if (wq_has_sleeper(wq)) | ||
1831 | wake_up_interruptible_sync_poll(&wq->wait, POLLIN | | ||
1819 | POLLRDNORM | POLLRDBAND); | 1832 | POLLRDNORM | POLLRDBAND); |
1820 | sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); | 1833 | sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); |
1821 | read_unlock(&sk->sk_callback_lock); | 1834 | rcu_read_unlock(); |
1822 | } | 1835 | } |
1823 | 1836 | ||
1824 | static void sock_def_write_space(struct sock *sk) | 1837 | static void sock_def_write_space(struct sock *sk) |
1825 | { | 1838 | { |
1826 | read_lock(&sk->sk_callback_lock); | 1839 | struct socket_wq *wq; |
1840 | |||
1841 | rcu_read_lock(); | ||
1827 | 1842 | ||
1828 | /* Do not wake up a writer until he can make "significant" | 1843 | /* Do not wake up a writer until he can make "significant" |
1829 | * progress. --DaveM | 1844 | * progress. --DaveM |
1830 | */ | 1845 | */ |
1831 | if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { | 1846 | if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { |
1832 | if (sk_has_sleeper(sk)) | 1847 | wq = rcu_dereference(sk->sk_wq); |
1833 | wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT | | 1848 | if (wq_has_sleeper(wq)) |
1849 | wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | | ||
1834 | POLLWRNORM | POLLWRBAND); | 1850 | POLLWRNORM | POLLWRBAND); |
1835 | 1851 | ||
1836 | /* Should agree with poll, otherwise some programs break */ | 1852 | /* Should agree with poll, otherwise some programs break */ |
@@ -1838,7 +1854,7 @@ static void sock_def_write_space(struct sock *sk) | |||
1838 | sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); | 1854 | sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); |
1839 | } | 1855 | } |
1840 | 1856 | ||
1841 | read_unlock(&sk->sk_callback_lock); | 1857 | rcu_read_unlock(); |
1842 | } | 1858 | } |
1843 | 1859 | ||
1844 | static void sock_def_destruct(struct sock *sk) | 1860 | static void sock_def_destruct(struct sock *sk) |
@@ -1885,7 +1901,6 @@ void sock_init_data(struct socket *sock, struct sock *sk) | |||
1885 | sk->sk_allocation = GFP_KERNEL; | 1901 | sk->sk_allocation = GFP_KERNEL; |
1886 | sk->sk_rcvbuf = sysctl_rmem_default; | 1902 | sk->sk_rcvbuf = sysctl_rmem_default; |
1887 | sk->sk_sndbuf = sysctl_wmem_default; | 1903 | sk->sk_sndbuf = sysctl_wmem_default; |
1888 | sk->sk_backlog.limit = sk->sk_rcvbuf << 1; | ||
1889 | sk->sk_state = TCP_CLOSE; | 1904 | sk->sk_state = TCP_CLOSE; |
1890 | sk_set_socket(sk, sock); | 1905 | sk_set_socket(sk, sock); |
1891 | 1906 | ||
@@ -1893,12 +1908,12 @@ void sock_init_data(struct socket *sock, struct sock *sk) | |||
1893 | 1908 | ||
1894 | if (sock) { | 1909 | if (sock) { |
1895 | sk->sk_type = sock->type; | 1910 | sk->sk_type = sock->type; |
1896 | sk->sk_sleep = &sock->wait; | 1911 | sk->sk_wq = sock->wq; |
1897 | sock->sk = sk; | 1912 | sock->sk = sk; |
1898 | } else | 1913 | } else |
1899 | sk->sk_sleep = NULL; | 1914 | sk->sk_wq = NULL; |
1900 | 1915 | ||
1901 | rwlock_init(&sk->sk_dst_lock); | 1916 | spin_lock_init(&sk->sk_dst_lock); |
1902 | rwlock_init(&sk->sk_callback_lock); | 1917 | rwlock_init(&sk->sk_callback_lock); |
1903 | lockdep_set_class_and_name(&sk->sk_callback_lock, | 1918 | lockdep_set_class_and_name(&sk->sk_callback_lock, |
1904 | af_callback_keys + sk->sk_family, | 1919 | af_callback_keys + sk->sk_family, |
diff --git a/net/core/stream.c b/net/core/stream.c index a37debfeb1b2..cc196f42b8d8 100644 --- a/net/core/stream.c +++ b/net/core/stream.c | |||
@@ -28,15 +28,19 @@ | |||
28 | void sk_stream_write_space(struct sock *sk) | 28 | void sk_stream_write_space(struct sock *sk) |
29 | { | 29 | { |
30 | struct socket *sock = sk->sk_socket; | 30 | struct socket *sock = sk->sk_socket; |
31 | struct socket_wq *wq; | ||
31 | 32 | ||
32 | if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && sock) { | 33 | if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && sock) { |
33 | clear_bit(SOCK_NOSPACE, &sock->flags); | 34 | clear_bit(SOCK_NOSPACE, &sock->flags); |
34 | 35 | ||
35 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) | 36 | rcu_read_lock(); |
36 | wake_up_interruptible_poll(sk->sk_sleep, POLLOUT | | 37 | wq = rcu_dereference(sk->sk_wq); |
38 | if (wq_has_sleeper(wq)) | ||
39 | wake_up_interruptible_poll(&wq->wait, POLLOUT | | ||
37 | POLLWRNORM | POLLWRBAND); | 40 | POLLWRNORM | POLLWRBAND); |
38 | if (sock->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN)) | 41 | if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN)) |
39 | sock_wake_async(sock, SOCK_WAKE_SPACE, POLL_OUT); | 42 | sock_wake_async(sock, SOCK_WAKE_SPACE, POLL_OUT); |
43 | rcu_read_unlock(); | ||
40 | } | 44 | } |
41 | } | 45 | } |
42 | 46 | ||
@@ -66,13 +70,13 @@ int sk_stream_wait_connect(struct sock *sk, long *timeo_p) | |||
66 | if (signal_pending(tsk)) | 70 | if (signal_pending(tsk)) |
67 | return sock_intr_errno(*timeo_p); | 71 | return sock_intr_errno(*timeo_p); |
68 | 72 | ||
69 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 73 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
70 | sk->sk_write_pending++; | 74 | sk->sk_write_pending++; |
71 | done = sk_wait_event(sk, timeo_p, | 75 | done = sk_wait_event(sk, timeo_p, |
72 | !sk->sk_err && | 76 | !sk->sk_err && |
73 | !((1 << sk->sk_state) & | 77 | !((1 << sk->sk_state) & |
74 | ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))); | 78 | ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))); |
75 | finish_wait(sk->sk_sleep, &wait); | 79 | finish_wait(sk_sleep(sk), &wait); |
76 | sk->sk_write_pending--; | 80 | sk->sk_write_pending--; |
77 | } while (!done); | 81 | } while (!done); |
78 | return 0; | 82 | return 0; |
@@ -96,13 +100,13 @@ void sk_stream_wait_close(struct sock *sk, long timeout) | |||
96 | DEFINE_WAIT(wait); | 100 | DEFINE_WAIT(wait); |
97 | 101 | ||
98 | do { | 102 | do { |
99 | prepare_to_wait(sk->sk_sleep, &wait, | 103 | prepare_to_wait(sk_sleep(sk), &wait, |
100 | TASK_INTERRUPTIBLE); | 104 | TASK_INTERRUPTIBLE); |
101 | if (sk_wait_event(sk, &timeout, !sk_stream_closing(sk))) | 105 | if (sk_wait_event(sk, &timeout, !sk_stream_closing(sk))) |
102 | break; | 106 | break; |
103 | } while (!signal_pending(current) && timeout); | 107 | } while (!signal_pending(current) && timeout); |
104 | 108 | ||
105 | finish_wait(sk->sk_sleep, &wait); | 109 | finish_wait(sk_sleep(sk), &wait); |
106 | } | 110 | } |
107 | } | 111 | } |
108 | 112 | ||
@@ -126,7 +130,7 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p) | |||
126 | while (1) { | 130 | while (1) { |
127 | set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | 131 | set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); |
128 | 132 | ||
129 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 133 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
130 | 134 | ||
131 | if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) | 135 | if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) |
132 | goto do_error; | 136 | goto do_error; |
@@ -157,7 +161,7 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p) | |||
157 | *timeo_p = current_timeo; | 161 | *timeo_p = current_timeo; |
158 | } | 162 | } |
159 | out: | 163 | out: |
160 | finish_wait(sk->sk_sleep, &wait); | 164 | finish_wait(sk_sleep(sk), &wait); |
161 | return err; | 165 | return err; |
162 | 166 | ||
163 | do_error: | 167 | do_error: |
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 06124872af5b..dcc7d25996ab 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c | |||
@@ -11,11 +11,72 @@ | |||
11 | #include <linux/socket.h> | 11 | #include <linux/socket.h> |
12 | #include <linux/netdevice.h> | 12 | #include <linux/netdevice.h> |
13 | #include <linux/ratelimit.h> | 13 | #include <linux/ratelimit.h> |
14 | #include <linux/vmalloc.h> | ||
14 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/slab.h> | ||
15 | 17 | ||
16 | #include <net/ip.h> | 18 | #include <net/ip.h> |
17 | #include <net/sock.h> | 19 | #include <net/sock.h> |
18 | 20 | ||
21 | #ifdef CONFIG_RPS | ||
22 | static int rps_sock_flow_sysctl(ctl_table *table, int write, | ||
23 | void __user *buffer, size_t *lenp, loff_t *ppos) | ||
24 | { | ||
25 | unsigned int orig_size, size; | ||
26 | int ret, i; | ||
27 | ctl_table tmp = { | ||
28 | .data = &size, | ||
29 | .maxlen = sizeof(size), | ||
30 | .mode = table->mode | ||
31 | }; | ||
32 | struct rps_sock_flow_table *orig_sock_table, *sock_table; | ||
33 | static DEFINE_MUTEX(sock_flow_mutex); | ||
34 | |||
35 | mutex_lock(&sock_flow_mutex); | ||
36 | |||
37 | orig_sock_table = rps_sock_flow_table; | ||
38 | size = orig_size = orig_sock_table ? orig_sock_table->mask + 1 : 0; | ||
39 | |||
40 | ret = proc_dointvec(&tmp, write, buffer, lenp, ppos); | ||
41 | |||
42 | if (write) { | ||
43 | if (size) { | ||
44 | if (size > 1<<30) { | ||
45 | /* Enforce limit to prevent overflow */ | ||
46 | mutex_unlock(&sock_flow_mutex); | ||
47 | return -EINVAL; | ||
48 | } | ||
49 | size = roundup_pow_of_two(size); | ||
50 | if (size != orig_size) { | ||
51 | sock_table = | ||
52 | vmalloc(RPS_SOCK_FLOW_TABLE_SIZE(size)); | ||
53 | if (!sock_table) { | ||
54 | mutex_unlock(&sock_flow_mutex); | ||
55 | return -ENOMEM; | ||
56 | } | ||
57 | |||
58 | sock_table->mask = size - 1; | ||
59 | } else | ||
60 | sock_table = orig_sock_table; | ||
61 | |||
62 | for (i = 0; i < size; i++) | ||
63 | sock_table->ents[i] = RPS_NO_CPU; | ||
64 | } else | ||
65 | sock_table = NULL; | ||
66 | |||
67 | if (sock_table != orig_sock_table) { | ||
68 | rcu_assign_pointer(rps_sock_flow_table, sock_table); | ||
69 | synchronize_rcu(); | ||
70 | vfree(orig_sock_table); | ||
71 | } | ||
72 | } | ||
73 | |||
74 | mutex_unlock(&sock_flow_mutex); | ||
75 | |||
76 | return ret; | ||
77 | } | ||
78 | #endif /* CONFIG_RPS */ | ||
79 | |||
19 | static struct ctl_table net_core_table[] = { | 80 | static struct ctl_table net_core_table[] = { |
20 | #ifdef CONFIG_NET | 81 | #ifdef CONFIG_NET |
21 | { | 82 | { |
@@ -81,6 +142,14 @@ static struct ctl_table net_core_table[] = { | |||
81 | .mode = 0644, | 142 | .mode = 0644, |
82 | .proc_handler = proc_dointvec | 143 | .proc_handler = proc_dointvec |
83 | }, | 144 | }, |
145 | #ifdef CONFIG_RPS | ||
146 | { | ||
147 | .procname = "rps_sock_flow_entries", | ||
148 | .maxlen = sizeof(int), | ||
149 | .mode = 0644, | ||
150 | .proc_handler = rps_sock_flow_sysctl | ||
151 | }, | ||
152 | #endif | ||
84 | #endif /* CONFIG_NET */ | 153 | #endif /* CONFIG_NET */ |
85 | { | 154 | { |
86 | .procname = "netdev_budget", | 155 | .procname = "netdev_budget", |
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index 813e399220a7..19ac2b985485 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c | |||
@@ -19,6 +19,7 @@ | |||
19 | 19 | ||
20 | #include <linux/netdevice.h> | 20 | #include <linux/netdevice.h> |
21 | #include <linux/netlink.h> | 21 | #include <linux/netlink.h> |
22 | #include <linux/slab.h> | ||
22 | #include <net/netlink.h> | 23 | #include <net/netlink.h> |
23 | #include <net/rtnetlink.h> | 24 | #include <net/rtnetlink.h> |
24 | #include <linux/dcbnl.h> | 25 | #include <linux/dcbnl.h> |
diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c index 49d27c556bec..36479ca61e03 100644 --- a/net/dccp/ccid.c +++ b/net/dccp/ccid.c | |||
@@ -11,6 +11,8 @@ | |||
11 | * published by the Free Software Foundation. | 11 | * published by the Free Software Foundation. |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/slab.h> | ||
15 | |||
14 | #include "ccid.h" | 16 | #include "ccid.h" |
15 | #include "ccids/lib/tfrc.h" | 17 | #include "ccids/lib/tfrc.h" |
16 | 18 | ||
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c index a47a8c918ee8..9b3ae9922be1 100644 --- a/net/dccp/ccids/ccid2.c +++ b/net/dccp/ccids/ccid2.c | |||
@@ -23,6 +23,7 @@ | |||
23 | /* | 23 | /* |
24 | * This implementation should follow RFC 4341 | 24 | * This implementation should follow RFC 4341 |
25 | */ | 25 | */ |
26 | #include <linux/slab.h> | ||
26 | #include "../feat.h" | 27 | #include "../feat.h" |
27 | #include "../ccid.h" | 28 | #include "../ccid.h" |
28 | #include "../dccp.h" | 29 | #include "../dccp.h" |
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c index bcd7632299f5..d3235899c7e3 100644 --- a/net/dccp/ccids/ccid3.c +++ b/net/dccp/ccids/ccid3.c | |||
@@ -208,7 +208,7 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data) | |||
208 | goto restart_timer; | 208 | goto restart_timer; |
209 | } | 209 | } |
210 | 210 | ||
211 | ccid3_pr_debug("%s(%p, state=%s) - entry \n", dccp_role(sk), sk, | 211 | ccid3_pr_debug("%s(%p, state=%s) - entry\n", dccp_role(sk), sk, |
212 | ccid3_tx_state_name(hc->tx_state)); | 212 | ccid3_tx_state_name(hc->tx_state)); |
213 | 213 | ||
214 | if (hc->tx_state == TFRC_SSTATE_FBACK) | 214 | if (hc->tx_state == TFRC_SSTATE_FBACK) |
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h index 5ef32c2f0d6a..a10a61a1ded2 100644 --- a/net/dccp/dccp.h +++ b/net/dccp/dccp.h | |||
@@ -189,7 +189,7 @@ enum { | |||
189 | #define DCCP_MIB_MAX __DCCP_MIB_MAX | 189 | #define DCCP_MIB_MAX __DCCP_MIB_MAX |
190 | struct dccp_mib { | 190 | struct dccp_mib { |
191 | unsigned long mibs[DCCP_MIB_MAX]; | 191 | unsigned long mibs[DCCP_MIB_MAX]; |
192 | } __SNMP_MIB_ALIGN__; | 192 | }; |
193 | 193 | ||
194 | DECLARE_SNMP_STAT(struct dccp_mib, dccp_statistics); | 194 | DECLARE_SNMP_STAT(struct dccp_mib, dccp_statistics); |
195 | #define DCCP_INC_STATS(field) SNMP_INC_STATS(dccp_statistics, field) | 195 | #define DCCP_INC_STATS(field) SNMP_INC_STATS(dccp_statistics, field) |
@@ -223,7 +223,7 @@ static inline void dccp_csum_outgoing(struct sk_buff *skb) | |||
223 | skb->csum = skb_checksum(skb, 0, (cov > skb->len)? skb->len : cov, 0); | 223 | skb->csum = skb_checksum(skb, 0, (cov > skb->len)? skb->len : cov, 0); |
224 | } | 224 | } |
225 | 225 | ||
226 | extern void dccp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb); | 226 | extern void dccp_v4_send_check(struct sock *sk, struct sk_buff *skb); |
227 | 227 | ||
228 | extern int dccp_retransmit_skb(struct sock *sk); | 228 | extern int dccp_retransmit_skb(struct sock *sk); |
229 | 229 | ||
diff --git a/net/dccp/feat.c b/net/dccp/feat.c index 972b8dc918d6..df7dd26cf07e 100644 --- a/net/dccp/feat.c +++ b/net/dccp/feat.c | |||
@@ -22,6 +22,7 @@ | |||
22 | * 2 of the License, or (at your option) any later version. | 22 | * 2 of the License, or (at your option) any later version. |
23 | */ | 23 | */ |
24 | #include <linux/module.h> | 24 | #include <linux/module.h> |
25 | #include <linux/slab.h> | ||
25 | #include "ccid.h" | 26 | #include "ccid.h" |
26 | #include "feat.h" | 27 | #include "feat.h" |
27 | 28 | ||
diff --git a/net/dccp/input.c b/net/dccp/input.c index 7648f316310f..58f7bc156850 100644 --- a/net/dccp/input.c +++ b/net/dccp/input.c | |||
@@ -12,6 +12,7 @@ | |||
12 | 12 | ||
13 | #include <linux/dccp.h> | 13 | #include <linux/dccp.h> |
14 | #include <linux/skbuff.h> | 14 | #include <linux/skbuff.h> |
15 | #include <linux/slab.h> | ||
15 | 16 | ||
16 | #include <net/sock.h> | 17 | #include <net/sock.h> |
17 | 18 | ||
@@ -414,7 +415,7 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk, | |||
414 | if (!between48(DCCP_SKB_CB(skb)->dccpd_ack_seq, | 415 | if (!between48(DCCP_SKB_CB(skb)->dccpd_ack_seq, |
415 | dp->dccps_awl, dp->dccps_awh)) { | 416 | dp->dccps_awl, dp->dccps_awh)) { |
416 | dccp_pr_debug("invalid ackno: S.AWL=%llu, " | 417 | dccp_pr_debug("invalid ackno: S.AWL=%llu, " |
417 | "P.ackno=%llu, S.AWH=%llu \n", | 418 | "P.ackno=%llu, S.AWH=%llu\n", |
418 | (unsigned long long)dp->dccps_awl, | 419 | (unsigned long long)dp->dccps_awl, |
419 | (unsigned long long)DCCP_SKB_CB(skb)->dccpd_ack_seq, | 420 | (unsigned long long)DCCP_SKB_CB(skb)->dccpd_ack_seq, |
420 | (unsigned long long)dp->dccps_awh); | 421 | (unsigned long long)dp->dccps_awh); |
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index b195c4feaa0a..d9b11ef8694c 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c | |||
@@ -12,6 +12,7 @@ | |||
12 | 12 | ||
13 | #include <linux/dccp.h> | 13 | #include <linux/dccp.h> |
14 | #include <linux/icmp.h> | 14 | #include <linux/icmp.h> |
15 | #include <linux/slab.h> | ||
15 | #include <linux/module.h> | 16 | #include <linux/module.h> |
16 | #include <linux/skbuff.h> | 17 | #include <linux/skbuff.h> |
17 | #include <linux/random.h> | 18 | #include <linux/random.h> |
@@ -348,7 +349,7 @@ static inline __sum16 dccp_v4_csum_finish(struct sk_buff *skb, | |||
348 | return csum_tcpudp_magic(src, dst, skb->len, IPPROTO_DCCP, skb->csum); | 349 | return csum_tcpudp_magic(src, dst, skb->len, IPPROTO_DCCP, skb->csum); |
349 | } | 350 | } |
350 | 351 | ||
351 | void dccp_v4_send_check(struct sock *sk, int unused, struct sk_buff *skb) | 352 | void dccp_v4_send_check(struct sock *sk, struct sk_buff *skb) |
352 | { | 353 | { |
353 | const struct inet_sock *inet = inet_sk(sk); | 354 | const struct inet_sock *inet = inet_sk(sk); |
354 | struct dccp_hdr *dh = dccp_hdr(skb); | 355 | struct dccp_hdr *dh = dccp_hdr(skb); |
@@ -998,11 +999,11 @@ static struct inet_protosw dccp_v4_protosw = { | |||
998 | 999 | ||
999 | static int __net_init dccp_v4_init_net(struct net *net) | 1000 | static int __net_init dccp_v4_init_net(struct net *net) |
1000 | { | 1001 | { |
1001 | int err; | 1002 | if (dccp_hashinfo.bhash == NULL) |
1003 | return -ESOCKTNOSUPPORT; | ||
1002 | 1004 | ||
1003 | err = inet_ctl_sock_create(&net->dccp.v4_ctl_sk, PF_INET, | 1005 | return inet_ctl_sock_create(&net->dccp.v4_ctl_sk, PF_INET, |
1004 | SOCK_DCCP, IPPROTO_DCCP, net); | 1006 | SOCK_DCCP, IPPROTO_DCCP, net); |
1005 | return err; | ||
1006 | } | 1007 | } |
1007 | 1008 | ||
1008 | static void __net_exit dccp_v4_exit_net(struct net *net) | 1009 | static void __net_exit dccp_v4_exit_net(struct net *net) |
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 1aec6349e858..091698899594 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c | |||
@@ -14,6 +14,7 @@ | |||
14 | 14 | ||
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/random.h> | 16 | #include <linux/random.h> |
17 | #include <linux/slab.h> | ||
17 | #include <linux/xfrm.h> | 18 | #include <linux/xfrm.h> |
18 | 19 | ||
19 | #include <net/addrconf.h> | 20 | #include <net/addrconf.h> |
@@ -59,8 +60,7 @@ static inline __sum16 dccp_v6_csum_finish(struct sk_buff *skb, | |||
59 | return csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_DCCP, skb->csum); | 60 | return csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_DCCP, skb->csum); |
60 | } | 61 | } |
61 | 62 | ||
62 | static inline void dccp_v6_send_check(struct sock *sk, int unused_value, | 63 | static inline void dccp_v6_send_check(struct sock *sk, struct sk_buff *skb) |
63 | struct sk_buff *skb) | ||
64 | { | 64 | { |
65 | struct ipv6_pinfo *np = inet6_sk(sk); | 65 | struct ipv6_pinfo *np = inet6_sk(sk); |
66 | struct dccp_hdr *dh = dccp_hdr(skb); | 66 | struct dccp_hdr *dh = dccp_hdr(skb); |
@@ -292,7 +292,7 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req, | |||
292 | &ireq6->loc_addr, | 292 | &ireq6->loc_addr, |
293 | &ireq6->rmt_addr); | 293 | &ireq6->rmt_addr); |
294 | ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr); | 294 | ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr); |
295 | err = ip6_xmit(sk, skb, &fl, opt, 0); | 295 | err = ip6_xmit(sk, skb, &fl, opt); |
296 | err = net_xmit_eval(err); | 296 | err = net_xmit_eval(err); |
297 | } | 297 | } |
298 | 298 | ||
@@ -347,7 +347,7 @@ static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb) | |||
347 | if (!ip6_dst_lookup(ctl_sk, &dst, &fl)) { | 347 | if (!ip6_dst_lookup(ctl_sk, &dst, &fl)) { |
348 | if (xfrm_lookup(net, &dst, &fl, NULL, 0) >= 0) { | 348 | if (xfrm_lookup(net, &dst, &fl, NULL, 0) >= 0) { |
349 | skb_dst_set(skb, dst); | 349 | skb_dst_set(skb, dst); |
350 | ip6_xmit(ctl_sk, skb, &fl, NULL, 0); | 350 | ip6_xmit(ctl_sk, skb, &fl, NULL); |
351 | DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS); | 351 | DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS); |
352 | DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS); | 352 | DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS); |
353 | return; | 353 | return; |
@@ -1191,11 +1191,11 @@ static struct inet_protosw dccp_v6_protosw = { | |||
1191 | 1191 | ||
1192 | static int __net_init dccp_v6_init_net(struct net *net) | 1192 | static int __net_init dccp_v6_init_net(struct net *net) |
1193 | { | 1193 | { |
1194 | int err; | 1194 | if (dccp_hashinfo.bhash == NULL) |
1195 | return -ESOCKTNOSUPPORT; | ||
1195 | 1196 | ||
1196 | err = inet_ctl_sock_create(&net->dccp.v6_ctl_sk, PF_INET6, | 1197 | return inet_ctl_sock_create(&net->dccp.v6_ctl_sk, PF_INET6, |
1197 | SOCK_DCCP, IPPROTO_DCCP, net); | 1198 | SOCK_DCCP, IPPROTO_DCCP, net); |
1198 | return err; | ||
1199 | } | 1199 | } |
1200 | 1200 | ||
1201 | static void __net_exit dccp_v6_exit_net(struct net *net) | 1201 | static void __net_exit dccp_v6_exit_net(struct net *net) |
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c index 0d508c359fa9..128b089d3aef 100644 --- a/net/dccp/minisocks.c +++ b/net/dccp/minisocks.c | |||
@@ -11,6 +11,7 @@ | |||
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/dccp.h> | 13 | #include <linux/dccp.h> |
14 | #include <linux/gfp.h> | ||
14 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
15 | #include <linux/skbuff.h> | 16 | #include <linux/skbuff.h> |
16 | #include <linux/timer.h> | 17 | #include <linux/timer.h> |
diff --git a/net/dccp/output.c b/net/dccp/output.c index d6bb753bf6ad..aadbdb58758b 100644 --- a/net/dccp/output.c +++ b/net/dccp/output.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/dccp.h> | 13 | #include <linux/dccp.h> |
14 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
15 | #include <linux/skbuff.h> | 15 | #include <linux/skbuff.h> |
16 | #include <linux/slab.h> | ||
16 | 17 | ||
17 | #include <net/inet_sock.h> | 18 | #include <net/inet_sock.h> |
18 | #include <net/sock.h> | 19 | #include <net/sock.h> |
@@ -128,14 +129,14 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) | |||
128 | break; | 129 | break; |
129 | } | 130 | } |
130 | 131 | ||
131 | icsk->icsk_af_ops->send_check(sk, 0, skb); | 132 | icsk->icsk_af_ops->send_check(sk, skb); |
132 | 133 | ||
133 | if (set_ack) | 134 | if (set_ack) |
134 | dccp_event_ack_sent(sk); | 135 | dccp_event_ack_sent(sk); |
135 | 136 | ||
136 | DCCP_INC_STATS(DCCP_MIB_OUTSEGS); | 137 | DCCP_INC_STATS(DCCP_MIB_OUTSEGS); |
137 | 138 | ||
138 | err = icsk->icsk_af_ops->queue_xmit(skb, 0); | 139 | err = icsk->icsk_af_ops->queue_xmit(skb); |
139 | return net_xmit_eval(err); | 140 | return net_xmit_eval(err); |
140 | } | 141 | } |
141 | return -ENOBUFS; | 142 | return -ENOBUFS; |
@@ -194,15 +195,17 @@ EXPORT_SYMBOL_GPL(dccp_sync_mss); | |||
194 | 195 | ||
195 | void dccp_write_space(struct sock *sk) | 196 | void dccp_write_space(struct sock *sk) |
196 | { | 197 | { |
197 | read_lock(&sk->sk_callback_lock); | 198 | struct socket_wq *wq; |
198 | 199 | ||
199 | if (sk_has_sleeper(sk)) | 200 | rcu_read_lock(); |
200 | wake_up_interruptible(sk->sk_sleep); | 201 | wq = rcu_dereference(sk->sk_wq); |
202 | if (wq_has_sleeper(wq)) | ||
203 | wake_up_interruptible(&wq->wait); | ||
201 | /* Should agree with poll, otherwise some programs break */ | 204 | /* Should agree with poll, otherwise some programs break */ |
202 | if (sock_writeable(sk)) | 205 | if (sock_writeable(sk)) |
203 | sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); | 206 | sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); |
204 | 207 | ||
205 | read_unlock(&sk->sk_callback_lock); | 208 | rcu_read_unlock(); |
206 | } | 209 | } |
207 | 210 | ||
208 | /** | 211 | /** |
@@ -224,7 +227,7 @@ static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb, int delay) | |||
224 | dccp_pr_debug("delayed send by %d msec\n", delay); | 227 | dccp_pr_debug("delayed send by %d msec\n", delay); |
225 | jiffdelay = msecs_to_jiffies(delay); | 228 | jiffdelay = msecs_to_jiffies(delay); |
226 | 229 | ||
227 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 230 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
228 | 231 | ||
229 | sk->sk_write_pending++; | 232 | sk->sk_write_pending++; |
230 | release_sock(sk); | 233 | release_sock(sk); |
@@ -240,7 +243,7 @@ static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb, int delay) | |||
240 | rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb); | 243 | rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb); |
241 | } while ((delay = rc) > 0); | 244 | } while ((delay = rc) > 0); |
242 | out: | 245 | out: |
243 | finish_wait(sk->sk_sleep, &wait); | 246 | finish_wait(sk_sleep(sk), &wait); |
244 | return rc; | 247 | return rc; |
245 | 248 | ||
246 | do_error: | 249 | do_error: |
diff --git a/net/dccp/probe.c b/net/dccp/probe.c index f5b3464f1242..078e48d442fd 100644 --- a/net/dccp/probe.c +++ b/net/dccp/probe.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/module.h> | 30 | #include <linux/module.h> |
31 | #include <linux/kfifo.h> | 31 | #include <linux/kfifo.h> |
32 | #include <linux/vmalloc.h> | 32 | #include <linux/vmalloc.h> |
33 | #include <linux/gfp.h> | ||
33 | #include <net/net_namespace.h> | 34 | #include <net/net_namespace.h> |
34 | 35 | ||
35 | #include "dccp.h" | 36 | #include "dccp.h" |
diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 0ef7061920c0..b03ecf6b2bb0 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/if_arp.h> | 20 | #include <linux/if_arp.h> |
21 | #include <linux/init.h> | 21 | #include <linux/init.h> |
22 | #include <linux/random.h> | 22 | #include <linux/random.h> |
23 | #include <linux/slab.h> | ||
23 | #include <net/checksum.h> | 24 | #include <net/checksum.h> |
24 | 25 | ||
25 | #include <net/inet_sock.h> | 26 | #include <net/inet_sock.h> |
@@ -311,7 +312,7 @@ unsigned int dccp_poll(struct file *file, struct socket *sock, | |||
311 | unsigned int mask; | 312 | unsigned int mask; |
312 | struct sock *sk = sock->sk; | 313 | struct sock *sk = sock->sk; |
313 | 314 | ||
314 | sock_poll_wait(file, sk->sk_sleep, wait); | 315 | sock_poll_wait(file, sk_sleep(sk), wait); |
315 | if (sk->sk_state == DCCP_LISTEN) | 316 | if (sk->sk_state == DCCP_LISTEN) |
316 | return inet_csk_listen_poll(sk); | 317 | return inet_csk_listen_poll(sk); |
317 | 318 | ||
@@ -1036,7 +1037,7 @@ static int __init dccp_init(void) | |||
1036 | FIELD_SIZEOF(struct sk_buff, cb)); | 1037 | FIELD_SIZEOF(struct sk_buff, cb)); |
1037 | rc = percpu_counter_init(&dccp_orphan_count, 0); | 1038 | rc = percpu_counter_init(&dccp_orphan_count, 0); |
1038 | if (rc) | 1039 | if (rc) |
1039 | goto out; | 1040 | goto out_fail; |
1040 | rc = -ENOBUFS; | 1041 | rc = -ENOBUFS; |
1041 | inet_hashinfo_init(&dccp_hashinfo); | 1042 | inet_hashinfo_init(&dccp_hashinfo); |
1042 | dccp_hashinfo.bind_bucket_cachep = | 1043 | dccp_hashinfo.bind_bucket_cachep = |
@@ -1125,8 +1126,9 @@ static int __init dccp_init(void) | |||
1125 | goto out_sysctl_exit; | 1126 | goto out_sysctl_exit; |
1126 | 1127 | ||
1127 | dccp_timestamping_init(); | 1128 | dccp_timestamping_init(); |
1128 | out: | 1129 | |
1129 | return rc; | 1130 | return 0; |
1131 | |||
1130 | out_sysctl_exit: | 1132 | out_sysctl_exit: |
1131 | dccp_sysctl_exit(); | 1133 | dccp_sysctl_exit(); |
1132 | out_ackvec_exit: | 1134 | out_ackvec_exit: |
@@ -1135,18 +1137,19 @@ out_free_dccp_mib: | |||
1135 | dccp_mib_exit(); | 1137 | dccp_mib_exit(); |
1136 | out_free_dccp_bhash: | 1138 | out_free_dccp_bhash: |
1137 | free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order); | 1139 | free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order); |
1138 | dccp_hashinfo.bhash = NULL; | ||
1139 | out_free_dccp_locks: | 1140 | out_free_dccp_locks: |
1140 | inet_ehash_locks_free(&dccp_hashinfo); | 1141 | inet_ehash_locks_free(&dccp_hashinfo); |
1141 | out_free_dccp_ehash: | 1142 | out_free_dccp_ehash: |
1142 | free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order); | 1143 | free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order); |
1143 | dccp_hashinfo.ehash = NULL; | ||
1144 | out_free_bind_bucket_cachep: | 1144 | out_free_bind_bucket_cachep: |
1145 | kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep); | 1145 | kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep); |
1146 | dccp_hashinfo.bind_bucket_cachep = NULL; | ||
1147 | out_free_percpu: | 1146 | out_free_percpu: |
1148 | percpu_counter_destroy(&dccp_orphan_count); | 1147 | percpu_counter_destroy(&dccp_orphan_count); |
1149 | goto out; | 1148 | out_fail: |
1149 | dccp_hashinfo.bhash = NULL; | ||
1150 | dccp_hashinfo.ehash = NULL; | ||
1151 | dccp_hashinfo.bind_bucket_cachep = NULL; | ||
1152 | return rc; | ||
1150 | } | 1153 | } |
1151 | 1154 | ||
1152 | static void __exit dccp_fini(void) | 1155 | static void __exit dccp_fini(void) |
diff --git a/net/dccp/timer.c b/net/dccp/timer.c index bbfeb5eae46a..1a9aa05d4dc4 100644 --- a/net/dccp/timer.c +++ b/net/dccp/timer.c | |||
@@ -38,7 +38,7 @@ static int dccp_write_timeout(struct sock *sk) | |||
38 | 38 | ||
39 | if (sk->sk_state == DCCP_REQUESTING || sk->sk_state == DCCP_PARTOPEN) { | 39 | if (sk->sk_state == DCCP_REQUESTING || sk->sk_state == DCCP_PARTOPEN) { |
40 | if (icsk->icsk_retransmits != 0) | 40 | if (icsk->icsk_retransmits != 0) |
41 | dst_negative_advice(&sk->sk_dst_cache, sk); | 41 | dst_negative_advice(sk); |
42 | retry_until = icsk->icsk_syn_retries ? | 42 | retry_until = icsk->icsk_syn_retries ? |
43 | : sysctl_dccp_request_retries; | 43 | : sysctl_dccp_request_retries; |
44 | } else { | 44 | } else { |
@@ -63,7 +63,7 @@ static int dccp_write_timeout(struct sock *sk) | |||
63 | Golden words :-). | 63 | Golden words :-). |
64 | */ | 64 | */ |
65 | 65 | ||
66 | dst_negative_advice(&sk->sk_dst_cache, sk); | 66 | dst_negative_advice(sk); |
67 | } | 67 | } |
68 | 68 | ||
69 | retry_until = sysctl_dccp_retries2; | 69 | retry_until = sysctl_dccp_retries2; |
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index 2b494fac9468..d6b93d19790f 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c | |||
@@ -446,7 +446,7 @@ static void dn_destruct(struct sock *sk) | |||
446 | skb_queue_purge(&scp->other_xmit_queue); | 446 | skb_queue_purge(&scp->other_xmit_queue); |
447 | skb_queue_purge(&scp->other_receive_queue); | 447 | skb_queue_purge(&scp->other_receive_queue); |
448 | 448 | ||
449 | dst_release(xchg(&sk->sk_dst_cache, NULL)); | 449 | dst_release(rcu_dereference_check(sk->sk_dst_cache, 1)); |
450 | } | 450 | } |
451 | 451 | ||
452 | static int dn_memory_pressure; | 452 | static int dn_memory_pressure; |
@@ -832,7 +832,7 @@ static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation) | |||
832 | scp->segsize_loc = dst_metric(__sk_dst_get(sk), RTAX_ADVMSS); | 832 | scp->segsize_loc = dst_metric(__sk_dst_get(sk), RTAX_ADVMSS); |
833 | dn_send_conn_conf(sk, allocation); | 833 | dn_send_conn_conf(sk, allocation); |
834 | 834 | ||
835 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 835 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
836 | for(;;) { | 836 | for(;;) { |
837 | release_sock(sk); | 837 | release_sock(sk); |
838 | if (scp->state == DN_CC) | 838 | if (scp->state == DN_CC) |
@@ -850,9 +850,9 @@ static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation) | |||
850 | err = -EAGAIN; | 850 | err = -EAGAIN; |
851 | if (!*timeo) | 851 | if (!*timeo) |
852 | break; | 852 | break; |
853 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 853 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
854 | } | 854 | } |
855 | finish_wait(sk->sk_sleep, &wait); | 855 | finish_wait(sk_sleep(sk), &wait); |
856 | if (err == 0) { | 856 | if (err == 0) { |
857 | sk->sk_socket->state = SS_CONNECTED; | 857 | sk->sk_socket->state = SS_CONNECTED; |
858 | } else if (scp->state != DN_CC) { | 858 | } else if (scp->state != DN_CC) { |
@@ -873,7 +873,7 @@ static int dn_wait_run(struct sock *sk, long *timeo) | |||
873 | if (!*timeo) | 873 | if (!*timeo) |
874 | return -EALREADY; | 874 | return -EALREADY; |
875 | 875 | ||
876 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 876 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
877 | for(;;) { | 877 | for(;;) { |
878 | release_sock(sk); | 878 | release_sock(sk); |
879 | if (scp->state == DN_CI || scp->state == DN_CC) | 879 | if (scp->state == DN_CI || scp->state == DN_CC) |
@@ -891,9 +891,9 @@ static int dn_wait_run(struct sock *sk, long *timeo) | |||
891 | err = -ETIMEDOUT; | 891 | err = -ETIMEDOUT; |
892 | if (!*timeo) | 892 | if (!*timeo) |
893 | break; | 893 | break; |
894 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 894 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
895 | } | 895 | } |
896 | finish_wait(sk->sk_sleep, &wait); | 896 | finish_wait(sk_sleep(sk), &wait); |
897 | out: | 897 | out: |
898 | if (err == 0) { | 898 | if (err == 0) { |
899 | sk->sk_socket->state = SS_CONNECTED; | 899 | sk->sk_socket->state = SS_CONNECTED; |
@@ -1040,7 +1040,7 @@ static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo) | |||
1040 | struct sk_buff *skb = NULL; | 1040 | struct sk_buff *skb = NULL; |
1041 | int err = 0; | 1041 | int err = 0; |
1042 | 1042 | ||
1043 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 1043 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
1044 | for(;;) { | 1044 | for(;;) { |
1045 | release_sock(sk); | 1045 | release_sock(sk); |
1046 | skb = skb_dequeue(&sk->sk_receive_queue); | 1046 | skb = skb_dequeue(&sk->sk_receive_queue); |
@@ -1060,9 +1060,9 @@ static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo) | |||
1060 | err = -EAGAIN; | 1060 | err = -EAGAIN; |
1061 | if (!*timeo) | 1061 | if (!*timeo) |
1062 | break; | 1062 | break; |
1063 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 1063 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
1064 | } | 1064 | } |
1065 | finish_wait(sk->sk_sleep, &wait); | 1065 | finish_wait(sk_sleep(sk), &wait); |
1066 | 1066 | ||
1067 | return skb == NULL ? ERR_PTR(err) : skb; | 1067 | return skb == NULL ? ERR_PTR(err) : skb; |
1068 | } | 1068 | } |
@@ -1105,7 +1105,7 @@ static int dn_accept(struct socket *sock, struct socket *newsock, int flags) | |||
1105 | release_sock(sk); | 1105 | release_sock(sk); |
1106 | 1106 | ||
1107 | dst = skb_dst(skb); | 1107 | dst = skb_dst(skb); |
1108 | dst_release(xchg(&newsk->sk_dst_cache, dst)); | 1108 | sk_dst_set(newsk, dst); |
1109 | skb_dst_set(skb, NULL); | 1109 | skb_dst_set(skb, NULL); |
1110 | 1110 | ||
1111 | DN_SK(newsk)->state = DN_CR; | 1111 | DN_SK(newsk)->state = DN_CR; |
@@ -1746,11 +1746,11 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1746 | goto out; | 1746 | goto out; |
1747 | } | 1747 | } |
1748 | 1748 | ||
1749 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 1749 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
1750 | set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 1750 | set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); |
1751 | sk_wait_event(sk, &timeo, dn_data_ready(sk, queue, flags, target)); | 1751 | sk_wait_event(sk, &timeo, dn_data_ready(sk, queue, flags, target)); |
1752 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 1752 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); |
1753 | finish_wait(sk->sk_sleep, &wait); | 1753 | finish_wait(sk_sleep(sk), &wait); |
1754 | } | 1754 | } |
1755 | 1755 | ||
1756 | skb_queue_walk_safe(queue, skb, n) { | 1756 | skb_queue_walk_safe(queue, skb, n) { |
@@ -1956,7 +1956,7 @@ static int dn_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
1956 | } | 1956 | } |
1957 | 1957 | ||
1958 | if ((flags & MSG_TRYHARD) && sk->sk_dst_cache) | 1958 | if ((flags & MSG_TRYHARD) && sk->sk_dst_cache) |
1959 | dst_negative_advice(&sk->sk_dst_cache, sk); | 1959 | dst_negative_advice(sk); |
1960 | 1960 | ||
1961 | mss = scp->segsize_rem; | 1961 | mss = scp->segsize_rem; |
1962 | fctype = scp->services_rem & NSP_FC_MASK; | 1962 | fctype = scp->services_rem & NSP_FC_MASK; |
@@ -2003,12 +2003,12 @@ static int dn_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
2003 | goto out; | 2003 | goto out; |
2004 | } | 2004 | } |
2005 | 2005 | ||
2006 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 2006 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
2007 | set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 2007 | set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); |
2008 | sk_wait_event(sk, &timeo, | 2008 | sk_wait_event(sk, &timeo, |
2009 | !dn_queue_too_long(scp, queue, flags)); | 2009 | !dn_queue_too_long(scp, queue, flags)); |
2010 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 2010 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); |
2011 | finish_wait(sk->sk_sleep, &wait); | 2011 | finish_wait(sk_sleep(sk), &wait); |
2012 | continue; | 2012 | continue; |
2013 | } | 2013 | } |
2014 | 2014 | ||
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c index 238af093495b..615dbe3b43f9 100644 --- a/net/decnet/dn_dev.c +++ b/net/decnet/dn_dev.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <linux/skbuff.h> | 40 | #include <linux/skbuff.h> |
41 | #include <linux/sysctl.h> | 41 | #include <linux/sysctl.h> |
42 | #include <linux/notifier.h> | 42 | #include <linux/notifier.h> |
43 | #include <linux/slab.h> | ||
43 | #include <asm/uaccess.h> | 44 | #include <asm/uaccess.h> |
44 | #include <asm/system.h> | 45 | #include <asm/system.h> |
45 | #include <net/net_namespace.h> | 46 | #include <net/net_namespace.h> |
@@ -349,7 +350,7 @@ static void dn_dev_del_ifa(struct dn_dev *dn_db, struct dn_ifaddr **ifap, int de | |||
349 | if (dn_db->dev->type == ARPHRD_ETHER) { | 350 | if (dn_db->dev->type == ARPHRD_ETHER) { |
350 | if (ifa1->ifa_local != dn_eth2dn(dev->dev_addr)) { | 351 | if (ifa1->ifa_local != dn_eth2dn(dev->dev_addr)) { |
351 | dn_dn2eth(mac_addr, ifa1->ifa_local); | 352 | dn_dn2eth(mac_addr, ifa1->ifa_local); |
352 | dev_mc_delete(dev, mac_addr, ETH_ALEN, 0); | 353 | dev_mc_del(dev, mac_addr); |
353 | } | 354 | } |
354 | } | 355 | } |
355 | 356 | ||
@@ -380,7 +381,7 @@ static int dn_dev_insert_ifa(struct dn_dev *dn_db, struct dn_ifaddr *ifa) | |||
380 | if (dev->type == ARPHRD_ETHER) { | 381 | if (dev->type == ARPHRD_ETHER) { |
381 | if (ifa->ifa_local != dn_eth2dn(dev->dev_addr)) { | 382 | if (ifa->ifa_local != dn_eth2dn(dev->dev_addr)) { |
382 | dn_dn2eth(mac_addr, ifa->ifa_local); | 383 | dn_dn2eth(mac_addr, ifa->ifa_local); |
383 | dev_mc_add(dev, mac_addr, ETH_ALEN, 0); | 384 | dev_mc_add(dev, mac_addr); |
384 | } | 385 | } |
385 | } | 386 | } |
386 | 387 | ||
@@ -1000,9 +1001,9 @@ static int dn_eth_up(struct net_device *dev) | |||
1000 | struct dn_dev *dn_db = dev->dn_ptr; | 1001 | struct dn_dev *dn_db = dev->dn_ptr; |
1001 | 1002 | ||
1002 | if (dn_db->parms.forwarding == 0) | 1003 | if (dn_db->parms.forwarding == 0) |
1003 | dev_mc_add(dev, dn_rt_all_end_mcast, ETH_ALEN, 0); | 1004 | dev_mc_add(dev, dn_rt_all_end_mcast); |
1004 | else | 1005 | else |
1005 | dev_mc_add(dev, dn_rt_all_rt_mcast, ETH_ALEN, 0); | 1006 | dev_mc_add(dev, dn_rt_all_rt_mcast); |
1006 | 1007 | ||
1007 | dn_db->use_long = 1; | 1008 | dn_db->use_long = 1; |
1008 | 1009 | ||
@@ -1014,9 +1015,9 @@ static void dn_eth_down(struct net_device *dev) | |||
1014 | struct dn_dev *dn_db = dev->dn_ptr; | 1015 | struct dn_dev *dn_db = dev->dn_ptr; |
1015 | 1016 | ||
1016 | if (dn_db->parms.forwarding == 0) | 1017 | if (dn_db->parms.forwarding == 0) |
1017 | dev_mc_delete(dev, dn_rt_all_end_mcast, ETH_ALEN, 0); | 1018 | dev_mc_del(dev, dn_rt_all_end_mcast); |
1018 | else | 1019 | else |
1019 | dev_mc_delete(dev, dn_rt_all_rt_mcast, ETH_ALEN, 0); | 1020 | dev_mc_del(dev, dn_rt_all_rt_mcast); |
1020 | } | 1021 | } |
1021 | 1022 | ||
1022 | static void dn_dev_set_timer(struct net_device *dev); | 1023 | static void dn_dev_set_timer(struct net_device *dev); |
diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c index e9d48700e83a..4ab96c15166d 100644 --- a/net/decnet/dn_fib.c +++ b/net/decnet/dn_fib.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/string.h> | 20 | #include <linux/string.h> |
21 | #include <linux/net.h> | 21 | #include <linux/net.h> |
22 | #include <linux/socket.h> | 22 | #include <linux/socket.h> |
23 | #include <linux/slab.h> | ||
23 | #include <linux/sockios.h> | 24 | #include <linux/sockios.h> |
24 | #include <linux/init.h> | 25 | #include <linux/init.h> |
25 | #include <linux/skbuff.h> | 26 | #include <linux/skbuff.h> |
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c index 794b5bf95af1..deb723dba44b 100644 --- a/net/decnet/dn_neigh.c +++ b/net/decnet/dn_neigh.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/module.h> | 28 | #include <linux/module.h> |
29 | #include <linux/socket.h> | 29 | #include <linux/socket.h> |
30 | #include <linux/if_arp.h> | 30 | #include <linux/if_arp.h> |
31 | #include <linux/slab.h> | ||
31 | #include <linux/if_ether.h> | 32 | #include <linux/if_ether.h> |
32 | #include <linux/init.h> | 33 | #include <linux/init.h> |
33 | #include <linux/proc_fs.h> | 34 | #include <linux/proc_fs.h> |
diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c index 932408dca86d..25a37299bc65 100644 --- a/net/decnet/dn_nsp_in.c +++ b/net/decnet/dn_nsp_in.c | |||
@@ -57,6 +57,7 @@ | |||
57 | #include <linux/netdevice.h> | 57 | #include <linux/netdevice.h> |
58 | #include <linux/inet.h> | 58 | #include <linux/inet.h> |
59 | #include <linux/route.h> | 59 | #include <linux/route.h> |
60 | #include <linux/slab.h> | ||
60 | #include <net/sock.h> | 61 | #include <net/sock.h> |
61 | #include <net/tcp_states.h> | 62 | #include <net/tcp_states.h> |
62 | #include <asm/system.h> | 63 | #include <asm/system.h> |
diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c index a65e929ce76c..baeb1eaf011b 100644 --- a/net/decnet/dn_nsp_out.c +++ b/net/decnet/dn_nsp_out.c | |||
@@ -50,6 +50,7 @@ | |||
50 | #include <linux/netdevice.h> | 50 | #include <linux/netdevice.h> |
51 | #include <linux/inet.h> | 51 | #include <linux/inet.h> |
52 | #include <linux/route.h> | 52 | #include <linux/route.h> |
53 | #include <linux/slab.h> | ||
53 | #include <net/sock.h> | 54 | #include <net/sock.h> |
54 | #include <asm/system.h> | 55 | #include <asm/system.h> |
55 | #include <linux/fcntl.h> | 56 | #include <linux/fcntl.h> |
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index a7bf03ca0a36..70ebe74027d5 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c | |||
@@ -66,6 +66,7 @@ | |||
66 | #include <linux/inet.h> | 66 | #include <linux/inet.h> |
67 | #include <linux/route.h> | 67 | #include <linux/route.h> |
68 | #include <linux/in_route.h> | 68 | #include <linux/in_route.h> |
69 | #include <linux/slab.h> | ||
69 | #include <net/sock.h> | 70 | #include <net/sock.h> |
70 | #include <linux/mm.h> | 71 | #include <linux/mm.h> |
71 | #include <linux/proc_fs.h> | 72 | #include <linux/proc_fs.h> |
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c index 7466c546f286..48fdf10be7a1 100644 --- a/net/decnet/dn_rules.c +++ b/net/decnet/dn_rules.c | |||
@@ -196,7 +196,6 @@ static int dn_fib_rule_fill(struct fib_rule *rule, struct sk_buff *skb, | |||
196 | { | 196 | { |
197 | struct dn_fib_rule *r = (struct dn_fib_rule *)rule; | 197 | struct dn_fib_rule *r = (struct dn_fib_rule *)rule; |
198 | 198 | ||
199 | frh->family = AF_DECnet; | ||
200 | frh->dst_len = r->dst_len; | 199 | frh->dst_len = r->dst_len; |
201 | frh->src_len = r->src_len; | 200 | frh->src_len = r->src_len; |
202 | frh->tos = 0; | 201 | frh->tos = 0; |
@@ -212,29 +211,12 @@ nla_put_failure: | |||
212 | return -ENOBUFS; | 211 | return -ENOBUFS; |
213 | } | 212 | } |
214 | 213 | ||
215 | static u32 dn_fib_rule_default_pref(struct fib_rules_ops *ops) | ||
216 | { | ||
217 | struct list_head *pos; | ||
218 | struct fib_rule *rule; | ||
219 | |||
220 | if (!list_empty(&dn_fib_rules_ops->rules_list)) { | ||
221 | pos = dn_fib_rules_ops->rules_list.next; | ||
222 | if (pos->next != &dn_fib_rules_ops->rules_list) { | ||
223 | rule = list_entry(pos->next, struct fib_rule, list); | ||
224 | if (rule->pref) | ||
225 | return rule->pref - 1; | ||
226 | } | ||
227 | } | ||
228 | |||
229 | return 0; | ||
230 | } | ||
231 | |||
232 | static void dn_fib_rule_flush_cache(struct fib_rules_ops *ops) | 214 | static void dn_fib_rule_flush_cache(struct fib_rules_ops *ops) |
233 | { | 215 | { |
234 | dn_rt_cache_flush(-1); | 216 | dn_rt_cache_flush(-1); |
235 | } | 217 | } |
236 | 218 | ||
237 | static struct fib_rules_ops dn_fib_rules_ops_template = { | 219 | static const struct fib_rules_ops __net_initdata dn_fib_rules_ops_template = { |
238 | .family = AF_DECnet, | 220 | .family = AF_DECnet, |
239 | .rule_size = sizeof(struct dn_fib_rule), | 221 | .rule_size = sizeof(struct dn_fib_rule), |
240 | .addr_size = sizeof(u16), | 222 | .addr_size = sizeof(u16), |
@@ -243,7 +225,7 @@ static struct fib_rules_ops dn_fib_rules_ops_template = { | |||
243 | .configure = dn_fib_rule_configure, | 225 | .configure = dn_fib_rule_configure, |
244 | .compare = dn_fib_rule_compare, | 226 | .compare = dn_fib_rule_compare, |
245 | .fill = dn_fib_rule_fill, | 227 | .fill = dn_fib_rule_fill, |
246 | .default_pref = dn_fib_rule_default_pref, | 228 | .default_pref = fib_default_rule_pref, |
247 | .flush_cache = dn_fib_rule_flush_cache, | 229 | .flush_cache = dn_fib_rule_flush_cache, |
248 | .nlgroup = RTNLGRP_DECnet_RULE, | 230 | .nlgroup = RTNLGRP_DECnet_RULE, |
249 | .policy = dn_fib_rule_policy, | 231 | .policy = dn_fib_rule_policy, |
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c index b9a33bb5e9cc..f2abd3755690 100644 --- a/net/decnet/dn_table.c +++ b/net/decnet/dn_table.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/string.h> | 15 | #include <linux/string.h> |
16 | #include <linux/net.h> | 16 | #include <linux/net.h> |
17 | #include <linux/socket.h> | 17 | #include <linux/socket.h> |
18 | #include <linux/slab.h> | ||
18 | #include <linux/sockios.h> | 19 | #include <linux/sockios.h> |
19 | #include <linux/init.h> | 20 | #include <linux/init.h> |
20 | #include <linux/skbuff.h> | 21 | #include <linux/skbuff.h> |
diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c index 6d2bd3202048..64a7f39e069f 100644 --- a/net/decnet/netfilter/dn_rtmsg.c +++ b/net/decnet/netfilter/dn_rtmsg.c | |||
@@ -14,6 +14,7 @@ | |||
14 | */ | 14 | */ |
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/skbuff.h> | 16 | #include <linux/skbuff.h> |
17 | #include <linux/slab.h> | ||
17 | #include <linux/init.h> | 18 | #include <linux/init.h> |
18 | #include <linux/netdevice.h> | 19 | #include <linux/netdevice.h> |
19 | #include <linux/netfilter.h> | 20 | #include <linux/netfilter.h> |
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 71489f69a42c..6112a12578b2 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/list.h> | 11 | #include <linux/list.h> |
12 | #include <linux/netdevice.h> | 12 | #include <linux/netdevice.h> |
13 | #include <linux/platform_device.h> | 13 | #include <linux/platform_device.h> |
14 | #include <linux/slab.h> | ||
14 | #include <net/dsa.h> | 15 | #include <net/dsa.h> |
15 | #include "dsa_priv.h" | 16 | #include "dsa_priv.h" |
16 | 17 | ||
diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 2175e6d5cc8d..8fdca56bb08f 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c | |||
@@ -67,7 +67,7 @@ static int dsa_slave_open(struct net_device *dev) | |||
67 | return -ENETDOWN; | 67 | return -ENETDOWN; |
68 | 68 | ||
69 | if (compare_ether_addr(dev->dev_addr, master->dev_addr)) { | 69 | if (compare_ether_addr(dev->dev_addr, master->dev_addr)) { |
70 | err = dev_unicast_add(master, dev->dev_addr); | 70 | err = dev_uc_add(master, dev->dev_addr); |
71 | if (err < 0) | 71 | if (err < 0) |
72 | goto out; | 72 | goto out; |
73 | } | 73 | } |
@@ -90,7 +90,7 @@ clear_allmulti: | |||
90 | dev_set_allmulti(master, -1); | 90 | dev_set_allmulti(master, -1); |
91 | del_unicast: | 91 | del_unicast: |
92 | if (compare_ether_addr(dev->dev_addr, master->dev_addr)) | 92 | if (compare_ether_addr(dev->dev_addr, master->dev_addr)) |
93 | dev_unicast_delete(master, dev->dev_addr); | 93 | dev_uc_del(master, dev->dev_addr); |
94 | out: | 94 | out: |
95 | return err; | 95 | return err; |
96 | } | 96 | } |
@@ -101,14 +101,14 @@ static int dsa_slave_close(struct net_device *dev) | |||
101 | struct net_device *master = p->parent->dst->master_netdev; | 101 | struct net_device *master = p->parent->dst->master_netdev; |
102 | 102 | ||
103 | dev_mc_unsync(master, dev); | 103 | dev_mc_unsync(master, dev); |
104 | dev_unicast_unsync(master, dev); | 104 | dev_uc_unsync(master, dev); |
105 | if (dev->flags & IFF_ALLMULTI) | 105 | if (dev->flags & IFF_ALLMULTI) |
106 | dev_set_allmulti(master, -1); | 106 | dev_set_allmulti(master, -1); |
107 | if (dev->flags & IFF_PROMISC) | 107 | if (dev->flags & IFF_PROMISC) |
108 | dev_set_promiscuity(master, -1); | 108 | dev_set_promiscuity(master, -1); |
109 | 109 | ||
110 | if (compare_ether_addr(dev->dev_addr, master->dev_addr)) | 110 | if (compare_ether_addr(dev->dev_addr, master->dev_addr)) |
111 | dev_unicast_delete(master, dev->dev_addr); | 111 | dev_uc_del(master, dev->dev_addr); |
112 | 112 | ||
113 | return 0; | 113 | return 0; |
114 | } | 114 | } |
@@ -130,7 +130,7 @@ static void dsa_slave_set_rx_mode(struct net_device *dev) | |||
130 | struct net_device *master = p->parent->dst->master_netdev; | 130 | struct net_device *master = p->parent->dst->master_netdev; |
131 | 131 | ||
132 | dev_mc_sync(master, dev); | 132 | dev_mc_sync(master, dev); |
133 | dev_unicast_sync(master, dev); | 133 | dev_uc_sync(master, dev); |
134 | } | 134 | } |
135 | 135 | ||
136 | static int dsa_slave_set_mac_address(struct net_device *dev, void *a) | 136 | static int dsa_slave_set_mac_address(struct net_device *dev, void *a) |
@@ -147,13 +147,13 @@ static int dsa_slave_set_mac_address(struct net_device *dev, void *a) | |||
147 | goto out; | 147 | goto out; |
148 | 148 | ||
149 | if (compare_ether_addr(addr->sa_data, master->dev_addr)) { | 149 | if (compare_ether_addr(addr->sa_data, master->dev_addr)) { |
150 | err = dev_unicast_add(master, addr->sa_data); | 150 | err = dev_uc_add(master, addr->sa_data); |
151 | if (err < 0) | 151 | if (err < 0) |
152 | return err; | 152 | return err; |
153 | } | 153 | } |
154 | 154 | ||
155 | if (compare_ether_addr(dev->dev_addr, master->dev_addr)) | 155 | if (compare_ether_addr(dev->dev_addr, master->dev_addr)) |
156 | dev_unicast_delete(master, dev->dev_addr); | 156 | dev_uc_del(master, dev->dev_addr); |
157 | 157 | ||
158 | out: | 158 | out: |
159 | memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); | 159 | memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); |
diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c index cdf2d28a0297..98dfe80b4538 100644 --- a/net/dsa/tag_dsa.c +++ b/net/dsa/tag_dsa.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/etherdevice.h> | 11 | #include <linux/etherdevice.h> |
12 | #include <linux/list.h> | 12 | #include <linux/list.h> |
13 | #include <linux/netdevice.h> | 13 | #include <linux/netdevice.h> |
14 | #include <linux/slab.h> | ||
14 | #include "dsa_priv.h" | 15 | #include "dsa_priv.h" |
15 | 16 | ||
16 | #define DSA_HLEN 4 | 17 | #define DSA_HLEN 4 |
diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c index 8f53948cff4f..6f383322ad25 100644 --- a/net/dsa/tag_edsa.c +++ b/net/dsa/tag_edsa.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/etherdevice.h> | 11 | #include <linux/etherdevice.h> |
12 | #include <linux/list.h> | 12 | #include <linux/list.h> |
13 | #include <linux/netdevice.h> | 13 | #include <linux/netdevice.h> |
14 | #include <linux/slab.h> | ||
14 | #include "dsa_priv.h" | 15 | #include "dsa_priv.h" |
15 | 16 | ||
16 | #define DSA_HLEN 4 | 17 | #define DSA_HLEN 4 |
diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c index a85c829853c0..d6d7d0add3cb 100644 --- a/net/dsa/tag_trailer.c +++ b/net/dsa/tag_trailer.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/etherdevice.h> | 11 | #include <linux/etherdevice.h> |
12 | #include <linux/list.h> | 12 | #include <linux/list.h> |
13 | #include <linux/netdevice.h> | 13 | #include <linux/netdevice.h> |
14 | #include <linux/slab.h> | ||
14 | #include "dsa_priv.h" | 15 | #include "dsa_priv.h" |
15 | 16 | ||
16 | netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev) | 17 | netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev) |
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c index 29b4931aae52..2a5a8053e000 100644 --- a/net/econet/af_econet.c +++ b/net/econet/af_econet.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/wireless.h> | 30 | #include <linux/wireless.h> |
31 | #include <linux/skbuff.h> | 31 | #include <linux/skbuff.h> |
32 | #include <linux/udp.h> | 32 | #include <linux/udp.h> |
33 | #include <linux/slab.h> | ||
33 | #include <net/sock.h> | 34 | #include <net/sock.h> |
34 | #include <net/inet_common.h> | 35 | #include <net/inet_common.h> |
35 | #include <linux/stat.h> | 36 | #include <linux/stat.h> |
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index 205a1c12f3c0..61ec0329316c 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c | |||
@@ -136,7 +136,7 @@ int eth_rebuild_header(struct sk_buff *skb) | |||
136 | default: | 136 | default: |
137 | printk(KERN_DEBUG | 137 | printk(KERN_DEBUG |
138 | "%s: unable to resolve type %X addresses.\n", | 138 | "%s: unable to resolve type %X addresses.\n", |
139 | dev->name, (int)eth->h_proto); | 139 | dev->name, ntohs(eth->h_proto)); |
140 | 140 | ||
141 | memcpy(eth->h_source, dev->dev_addr, ETH_ALEN); | 141 | memcpy(eth->h_source, dev->dev_addr, ETH_ALEN); |
142 | break; | 142 | break; |
@@ -162,7 +162,7 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev) | |||
162 | 162 | ||
163 | skb->dev = dev; | 163 | skb->dev = dev; |
164 | skb_reset_mac_header(skb); | 164 | skb_reset_mac_header(skb); |
165 | skb_pull(skb, ETH_HLEN); | 165 | skb_pull_inline(skb, ETH_HLEN); |
166 | eth = eth_hdr(skb); | 166 | eth = eth_hdr(skb); |
167 | 167 | ||
168 | if (unlikely(is_multicast_ether_addr(eth->h_dest))) { | 168 | if (unlikely(is_multicast_ether_addr(eth->h_dest))) { |
diff --git a/net/ethernet/pe2.c b/net/ethernet/pe2.c index d60e15d9365e..eb00796758c3 100644 --- a/net/ethernet/pe2.c +++ b/net/ethernet/pe2.c | |||
@@ -3,6 +3,7 @@ | |||
3 | #include <linux/module.h> | 3 | #include <linux/module.h> |
4 | #include <linux/netdevice.h> | 4 | #include <linux/netdevice.h> |
5 | #include <linux/skbuff.h> | 5 | #include <linux/skbuff.h> |
6 | #include <linux/slab.h> | ||
6 | 7 | ||
7 | #include <net/datalink.h> | 8 | #include <net/datalink.h> |
8 | 9 | ||
diff --git a/net/ieee802154/af_ieee802154.c b/net/ieee802154/af_ieee802154.c index bad1c49fd960..93c91b633a56 100644 --- a/net/ieee802154/af_ieee802154.c +++ b/net/ieee802154/af_ieee802154.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/if.h> | 28 | #include <linux/if.h> |
29 | #include <linux/termios.h> /* For TIOCOUTQ/INQ */ | 29 | #include <linux/termios.h> /* For TIOCOUTQ/INQ */ |
30 | #include <linux/list.h> | 30 | #include <linux/list.h> |
31 | #include <linux/slab.h> | ||
31 | #include <net/datalink.h> | 32 | #include <net/datalink.h> |
32 | #include <net/psnap.h> | 33 | #include <net/psnap.h> |
33 | #include <net/sock.h> | 34 | #include <net/sock.h> |
@@ -126,6 +127,9 @@ static int ieee802154_sock_connect(struct socket *sock, struct sockaddr *uaddr, | |||
126 | { | 127 | { |
127 | struct sock *sk = sock->sk; | 128 | struct sock *sk = sock->sk; |
128 | 129 | ||
130 | if (addr_len < sizeof(uaddr->sa_family)) | ||
131 | return -EINVAL; | ||
132 | |||
129 | if (uaddr->sa_family == AF_UNSPEC) | 133 | if (uaddr->sa_family == AF_UNSPEC) |
130 | return sk->sk_prot->disconnect(sk, flags); | 134 | return sk->sk_prot->disconnect(sk, flags); |
131 | 135 | ||
@@ -147,6 +151,9 @@ static int ieee802154_dev_ioctl(struct sock *sk, struct ifreq __user *arg, | |||
147 | dev_load(sock_net(sk), ifr.ifr_name); | 151 | dev_load(sock_net(sk), ifr.ifr_name); |
148 | dev = dev_get_by_name(sock_net(sk), ifr.ifr_name); | 152 | dev = dev_get_by_name(sock_net(sk), ifr.ifr_name); |
149 | 153 | ||
154 | if (!dev) | ||
155 | return -ENODEV; | ||
156 | |||
150 | if (dev->type == ARPHRD_IEEE802154 && dev->netdev_ops->ndo_do_ioctl) | 157 | if (dev->type == ARPHRD_IEEE802154 && dev->netdev_ops->ndo_do_ioctl) |
151 | ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, cmd); | 158 | ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, cmd); |
152 | 159 | ||
diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c index 9aac5aee1575..1a3334c2609a 100644 --- a/net/ieee802154/dgram.c +++ b/net/ieee802154/dgram.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
26 | #include <linux/if_arp.h> | 26 | #include <linux/if_arp.h> |
27 | #include <linux/list.h> | 27 | #include <linux/list.h> |
28 | #include <linux/slab.h> | ||
28 | #include <net/sock.h> | 29 | #include <net/sock.h> |
29 | #include <net/af_ieee802154.h> | 30 | #include <net/af_ieee802154.h> |
30 | #include <net/ieee802154.h> | 31 | #include <net/ieee802154.h> |
diff --git a/net/ieee802154/netlink.c b/net/ieee802154/netlink.c index 33137b99e471..c8097ae2482f 100644 --- a/net/ieee802154/netlink.c +++ b/net/ieee802154/netlink.c | |||
@@ -23,6 +23,7 @@ | |||
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <linux/kernel.h> | 25 | #include <linux/kernel.h> |
26 | #include <linux/gfp.h> | ||
26 | #include <net/genetlink.h> | 27 | #include <net/genetlink.h> |
27 | #include <linux/nl802154.h> | 28 | #include <linux/nl802154.h> |
28 | 29 | ||
diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c index 135c1678fb11..71ee1108d4f8 100644 --- a/net/ieee802154/nl-mac.c +++ b/net/ieee802154/nl-mac.c | |||
@@ -22,6 +22,7 @@ | |||
22 | * Maxim Osipov <maxim.osipov@siemens.com> | 22 | * Maxim Osipov <maxim.osipov@siemens.com> |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <linux/gfp.h> | ||
25 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
26 | #include <linux/if_arp.h> | 27 | #include <linux/if_arp.h> |
27 | #include <linux/netdevice.h> | 28 | #include <linux/netdevice.h> |
diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c index 199a2d9d12f9..ed0eab39f531 100644 --- a/net/ieee802154/nl-phy.c +++ b/net/ieee802154/nl-phy.c | |||
@@ -23,6 +23,7 @@ | |||
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <linux/kernel.h> | 25 | #include <linux/kernel.h> |
26 | #include <linux/slab.h> | ||
26 | #include <net/netlink.h> | 27 | #include <net/netlink.h> |
27 | #include <net/genetlink.h> | 28 | #include <net/genetlink.h> |
28 | #include <net/wpan-phy.h> | 29 | #include <net/wpan-phy.h> |
diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c index 9c9b85c00033..10970ca85748 100644 --- a/net/ieee802154/raw.c +++ b/net/ieee802154/raw.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
26 | #include <linux/if_arp.h> | 26 | #include <linux/if_arp.h> |
27 | #include <linux/list.h> | 27 | #include <linux/list.h> |
28 | #include <linux/slab.h> | ||
28 | #include <net/sock.h> | 29 | #include <net/sock.h> |
29 | #include <net/af_ieee802154.h> | 30 | #include <net/af_ieee802154.h> |
30 | 31 | ||
diff --git a/net/ieee802154/wpan-class.c b/net/ieee802154/wpan-class.c index 268691256a6d..3d803a1b9fb6 100644 --- a/net/ieee802154/wpan-class.c +++ b/net/ieee802154/wpan-class.c | |||
@@ -16,6 +16,7 @@ | |||
16 | * | 16 | * |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #include <linux/slab.h> | ||
19 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
20 | #include <linux/module.h> | 21 | #include <linux/module.h> |
21 | #include <linux/device.h> | 22 | #include <linux/device.h> |
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index 0c94a1ac2946..8e3a1fd938ab 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig | |||
@@ -250,6 +250,20 @@ config IP_MROUTE | |||
250 | <file:Documentation/networking/multicast.txt>. If you haven't heard | 250 | <file:Documentation/networking/multicast.txt>. If you haven't heard |
251 | about it, you don't need it. | 251 | about it, you don't need it. |
252 | 252 | ||
253 | config IP_MROUTE_MULTIPLE_TABLES | ||
254 | bool "IP: multicast policy routing" | ||
255 | depends on IP_MROUTE && IP_ADVANCED_ROUTER | ||
256 | select FIB_RULES | ||
257 | help | ||
258 | Normally, a multicast router runs a userspace daemon and decides | ||
259 | what to do with a multicast packet based on the source and | ||
260 | destination addresses. If you say Y here, the multicast router | ||
261 | will also be able to take interfaces and packet marks into | ||
262 | account and run multiple instances of userspace daemons | ||
263 | simultaneously, each one handling a single table. | ||
264 | |||
265 | If unsure, say N. | ||
266 | |||
253 | config IP_PIMSM_V1 | 267 | config IP_PIMSM_V1 |
254 | bool "IP: PIM-SM version 1 support" | 268 | bool "IP: PIM-SM version 1 support" |
255 | depends on IP_MROUTE | 269 | depends on IP_MROUTE |
@@ -587,9 +601,15 @@ choice | |||
587 | config DEFAULT_HTCP | 601 | config DEFAULT_HTCP |
588 | bool "Htcp" if TCP_CONG_HTCP=y | 602 | bool "Htcp" if TCP_CONG_HTCP=y |
589 | 603 | ||
604 | config DEFAULT_HYBLA | ||
605 | bool "Hybla" if TCP_CONG_HYBLA=y | ||
606 | |||
590 | config DEFAULT_VEGAS | 607 | config DEFAULT_VEGAS |
591 | bool "Vegas" if TCP_CONG_VEGAS=y | 608 | bool "Vegas" if TCP_CONG_VEGAS=y |
592 | 609 | ||
610 | config DEFAULT_VENO | ||
611 | bool "Veno" if TCP_CONG_VENO=y | ||
612 | |||
593 | config DEFAULT_WESTWOOD | 613 | config DEFAULT_WESTWOOD |
594 | bool "Westwood" if TCP_CONG_WESTWOOD=y | 614 | bool "Westwood" if TCP_CONG_WESTWOOD=y |
595 | 615 | ||
@@ -610,8 +630,10 @@ config DEFAULT_TCP_CONG | |||
610 | default "bic" if DEFAULT_BIC | 630 | default "bic" if DEFAULT_BIC |
611 | default "cubic" if DEFAULT_CUBIC | 631 | default "cubic" if DEFAULT_CUBIC |
612 | default "htcp" if DEFAULT_HTCP | 632 | default "htcp" if DEFAULT_HTCP |
633 | default "hybla" if DEFAULT_HYBLA | ||
613 | default "vegas" if DEFAULT_VEGAS | 634 | default "vegas" if DEFAULT_VEGAS |
614 | default "westwood" if DEFAULT_WESTWOOD | 635 | default "westwood" if DEFAULT_WESTWOOD |
636 | default "veno" if DEFAULT_VENO | ||
615 | default "reno" if DEFAULT_RENO | 637 | default "reno" if DEFAULT_RENO |
616 | default "cubic" | 638 | default "cubic" |
617 | 639 | ||
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 33b7dffa7732..c6c43bcd1c6f 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
@@ -86,6 +86,7 @@ | |||
86 | #include <linux/poll.h> | 86 | #include <linux/poll.h> |
87 | #include <linux/netfilter_ipv4.h> | 87 | #include <linux/netfilter_ipv4.h> |
88 | #include <linux/random.h> | 88 | #include <linux/random.h> |
89 | #include <linux/slab.h> | ||
89 | 90 | ||
90 | #include <asm/uaccess.h> | 91 | #include <asm/uaccess.h> |
91 | #include <asm/system.h> | 92 | #include <asm/system.h> |
@@ -153,7 +154,7 @@ void inet_sock_destruct(struct sock *sk) | |||
153 | WARN_ON(sk->sk_forward_alloc); | 154 | WARN_ON(sk->sk_forward_alloc); |
154 | 155 | ||
155 | kfree(inet->opt); | 156 | kfree(inet->opt); |
156 | dst_release(sk->sk_dst_cache); | 157 | dst_release(rcu_dereference_check(sk->sk_dst_cache, 1)); |
157 | sk_refcnt_debug_dec(sk); | 158 | sk_refcnt_debug_dec(sk); |
158 | } | 159 | } |
159 | EXPORT_SYMBOL(inet_sock_destruct); | 160 | EXPORT_SYMBOL(inet_sock_destruct); |
@@ -418,6 +419,8 @@ int inet_release(struct socket *sock) | |||
418 | if (sk) { | 419 | if (sk) { |
419 | long timeout; | 420 | long timeout; |
420 | 421 | ||
422 | sock_rps_reset_flow(sk); | ||
423 | |||
421 | /* Applications forget to leave groups before exiting */ | 424 | /* Applications forget to leave groups before exiting */ |
422 | ip_mc_drop_socket(sk); | 425 | ip_mc_drop_socket(sk); |
423 | 426 | ||
@@ -530,6 +533,8 @@ int inet_dgram_connect(struct socket *sock, struct sockaddr * uaddr, | |||
530 | { | 533 | { |
531 | struct sock *sk = sock->sk; | 534 | struct sock *sk = sock->sk; |
532 | 535 | ||
536 | if (addr_len < sizeof(uaddr->sa_family)) | ||
537 | return -EINVAL; | ||
533 | if (uaddr->sa_family == AF_UNSPEC) | 538 | if (uaddr->sa_family == AF_UNSPEC) |
534 | return sk->sk_prot->disconnect(sk, flags); | 539 | return sk->sk_prot->disconnect(sk, flags); |
535 | 540 | ||
@@ -543,7 +548,7 @@ static long inet_wait_for_connect(struct sock *sk, long timeo) | |||
543 | { | 548 | { |
544 | DEFINE_WAIT(wait); | 549 | DEFINE_WAIT(wait); |
545 | 550 | ||
546 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 551 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
547 | 552 | ||
548 | /* Basic assumption: if someone sets sk->sk_err, he _must_ | 553 | /* Basic assumption: if someone sets sk->sk_err, he _must_ |
549 | * change state of the socket from TCP_SYN_*. | 554 | * change state of the socket from TCP_SYN_*. |
@@ -556,9 +561,9 @@ static long inet_wait_for_connect(struct sock *sk, long timeo) | |||
556 | lock_sock(sk); | 561 | lock_sock(sk); |
557 | if (signal_pending(current) || !timeo) | 562 | if (signal_pending(current) || !timeo) |
558 | break; | 563 | break; |
559 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 564 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
560 | } | 565 | } |
561 | finish_wait(sk->sk_sleep, &wait); | 566 | finish_wait(sk_sleep(sk), &wait); |
562 | return timeo; | 567 | return timeo; |
563 | } | 568 | } |
564 | 569 | ||
@@ -573,6 +578,9 @@ int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr, | |||
573 | int err; | 578 | int err; |
574 | long timeo; | 579 | long timeo; |
575 | 580 | ||
581 | if (addr_len < sizeof(uaddr->sa_family)) | ||
582 | return -EINVAL; | ||
583 | |||
576 | lock_sock(sk); | 584 | lock_sock(sk); |
577 | 585 | ||
578 | if (uaddr->sa_family == AF_UNSPEC) { | 586 | if (uaddr->sa_family == AF_UNSPEC) { |
@@ -714,6 +722,8 @@ int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, | |||
714 | { | 722 | { |
715 | struct sock *sk = sock->sk; | 723 | struct sock *sk = sock->sk; |
716 | 724 | ||
725 | sock_rps_record_flow(sk); | ||
726 | |||
717 | /* We may need to bind the socket. */ | 727 | /* We may need to bind the socket. */ |
718 | if (!inet_sk(sk)->inet_num && inet_autobind(sk)) | 728 | if (!inet_sk(sk)->inet_num && inet_autobind(sk)) |
719 | return -EAGAIN; | 729 | return -EAGAIN; |
@@ -722,12 +732,13 @@ int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, | |||
722 | } | 732 | } |
723 | EXPORT_SYMBOL(inet_sendmsg); | 733 | EXPORT_SYMBOL(inet_sendmsg); |
724 | 734 | ||
725 | |||
726 | static ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset, | 735 | static ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset, |
727 | size_t size, int flags) | 736 | size_t size, int flags) |
728 | { | 737 | { |
729 | struct sock *sk = sock->sk; | 738 | struct sock *sk = sock->sk; |
730 | 739 | ||
740 | sock_rps_record_flow(sk); | ||
741 | |||
731 | /* We may need to bind the socket. */ | 742 | /* We may need to bind the socket. */ |
732 | if (!inet_sk(sk)->inet_num && inet_autobind(sk)) | 743 | if (!inet_sk(sk)->inet_num && inet_autobind(sk)) |
733 | return -EAGAIN; | 744 | return -EAGAIN; |
@@ -737,6 +748,22 @@ static ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset, | |||
737 | return sock_no_sendpage(sock, page, offset, size, flags); | 748 | return sock_no_sendpage(sock, page, offset, size, flags); |
738 | } | 749 | } |
739 | 750 | ||
751 | int inet_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, | ||
752 | size_t size, int flags) | ||
753 | { | ||
754 | struct sock *sk = sock->sk; | ||
755 | int addr_len = 0; | ||
756 | int err; | ||
757 | |||
758 | sock_rps_record_flow(sk); | ||
759 | |||
760 | err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT, | ||
761 | flags & ~MSG_DONTWAIT, &addr_len); | ||
762 | if (err >= 0) | ||
763 | msg->msg_namelen = addr_len; | ||
764 | return err; | ||
765 | } | ||
766 | EXPORT_SYMBOL(inet_recvmsg); | ||
740 | 767 | ||
741 | int inet_shutdown(struct socket *sock, int how) | 768 | int inet_shutdown(struct socket *sock, int how) |
742 | { | 769 | { |
@@ -866,7 +893,7 @@ const struct proto_ops inet_stream_ops = { | |||
866 | .setsockopt = sock_common_setsockopt, | 893 | .setsockopt = sock_common_setsockopt, |
867 | .getsockopt = sock_common_getsockopt, | 894 | .getsockopt = sock_common_getsockopt, |
868 | .sendmsg = tcp_sendmsg, | 895 | .sendmsg = tcp_sendmsg, |
869 | .recvmsg = sock_common_recvmsg, | 896 | .recvmsg = inet_recvmsg, |
870 | .mmap = sock_no_mmap, | 897 | .mmap = sock_no_mmap, |
871 | .sendpage = tcp_sendpage, | 898 | .sendpage = tcp_sendpage, |
872 | .splice_read = tcp_splice_read, | 899 | .splice_read = tcp_splice_read, |
@@ -893,7 +920,7 @@ const struct proto_ops inet_dgram_ops = { | |||
893 | .setsockopt = sock_common_setsockopt, | 920 | .setsockopt = sock_common_setsockopt, |
894 | .getsockopt = sock_common_getsockopt, | 921 | .getsockopt = sock_common_getsockopt, |
895 | .sendmsg = inet_sendmsg, | 922 | .sendmsg = inet_sendmsg, |
896 | .recvmsg = sock_common_recvmsg, | 923 | .recvmsg = inet_recvmsg, |
897 | .mmap = sock_no_mmap, | 924 | .mmap = sock_no_mmap, |
898 | .sendpage = inet_sendpage, | 925 | .sendpage = inet_sendpage, |
899 | #ifdef CONFIG_COMPAT | 926 | #ifdef CONFIG_COMPAT |
@@ -923,7 +950,7 @@ static const struct proto_ops inet_sockraw_ops = { | |||
923 | .setsockopt = sock_common_setsockopt, | 950 | .setsockopt = sock_common_setsockopt, |
924 | .getsockopt = sock_common_getsockopt, | 951 | .getsockopt = sock_common_getsockopt, |
925 | .sendmsg = inet_sendmsg, | 952 | .sendmsg = inet_sendmsg, |
926 | .recvmsg = sock_common_recvmsg, | 953 | .recvmsg = inet_recvmsg, |
927 | .mmap = sock_no_mmap, | 954 | .mmap = sock_no_mmap, |
928 | .sendpage = inet_sendpage, | 955 | .sendpage = inet_sendpage, |
929 | #ifdef CONFIG_COMPAT | 956 | #ifdef CONFIG_COMPAT |
@@ -1296,8 +1323,8 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head, | |||
1296 | if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) | 1323 | if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) |
1297 | goto out_unlock; | 1324 | goto out_unlock; |
1298 | 1325 | ||
1299 | id = ntohl(*(u32 *)&iph->id); | 1326 | id = ntohl(*(__be32 *)&iph->id); |
1300 | flush = (u16)((ntohl(*(u32 *)iph) ^ skb_gro_len(skb)) | (id ^ IP_DF)); | 1327 | flush = (u16)((ntohl(*(__be32 *)iph) ^ skb_gro_len(skb)) | (id ^ IP_DF)); |
1301 | id >>= 16; | 1328 | id >>= 16; |
1302 | 1329 | ||
1303 | for (p = *head; p; p = p->next) { | 1330 | for (p = *head; p; p = p->next) { |
@@ -1310,8 +1337,8 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head, | |||
1310 | 1337 | ||
1311 | if ((iph->protocol ^ iph2->protocol) | | 1338 | if ((iph->protocol ^ iph2->protocol) | |
1312 | (iph->tos ^ iph2->tos) | | 1339 | (iph->tos ^ iph2->tos) | |
1313 | (iph->saddr ^ iph2->saddr) | | 1340 | ((__force u32)iph->saddr ^ (__force u32)iph2->saddr) | |
1314 | (iph->daddr ^ iph2->daddr)) { | 1341 | ((__force u32)iph->daddr ^ (__force u32)iph2->daddr)) { |
1315 | NAPI_GRO_CB(p)->same_flow = 0; | 1342 | NAPI_GRO_CB(p)->same_flow = 0; |
1316 | continue; | 1343 | continue; |
1317 | } | 1344 | } |
@@ -1401,10 +1428,10 @@ EXPORT_SYMBOL_GPL(snmp_fold_field); | |||
1401 | int snmp_mib_init(void __percpu *ptr[2], size_t mibsize) | 1428 | int snmp_mib_init(void __percpu *ptr[2], size_t mibsize) |
1402 | { | 1429 | { |
1403 | BUG_ON(ptr == NULL); | 1430 | BUG_ON(ptr == NULL); |
1404 | ptr[0] = __alloc_percpu(mibsize, __alignof__(unsigned long long)); | 1431 | ptr[0] = __alloc_percpu(mibsize, __alignof__(unsigned long)); |
1405 | if (!ptr[0]) | 1432 | if (!ptr[0]) |
1406 | goto err0; | 1433 | goto err0; |
1407 | ptr[1] = __alloc_percpu(mibsize, __alignof__(unsigned long long)); | 1434 | ptr[1] = __alloc_percpu(mibsize, __alignof__(unsigned long)); |
1408 | if (!ptr[1]) | 1435 | if (!ptr[1]) |
1409 | goto err1; | 1436 | goto err1; |
1410 | return 0; | 1437 | return 0; |
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c index 987b47dc69ad..880a5ec6dce0 100644 --- a/net/ipv4/ah4.c +++ b/net/ipv4/ah4.c | |||
@@ -1,6 +1,7 @@ | |||
1 | #include <crypto/hash.h> | 1 | #include <crypto/hash.h> |
2 | #include <linux/err.h> | 2 | #include <linux/err.h> |
3 | #include <linux/module.h> | 3 | #include <linux/module.h> |
4 | #include <linux/slab.h> | ||
4 | #include <net/ip.h> | 5 | #include <net/ip.h> |
5 | #include <net/xfrm.h> | 6 | #include <net/xfrm.h> |
6 | #include <net/ah.h> | 7 | #include <net/ah.h> |
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index c4dd13542802..6e747065c202 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c | |||
@@ -98,6 +98,7 @@ | |||
98 | #include <linux/net.h> | 98 | #include <linux/net.h> |
99 | #include <linux/rcupdate.h> | 99 | #include <linux/rcupdate.h> |
100 | #include <linux/jhash.h> | 100 | #include <linux/jhash.h> |
101 | #include <linux/slab.h> | ||
101 | #ifdef CONFIG_SYSCTL | 102 | #ifdef CONFIG_SYSCTL |
102 | #include <linux/sysctl.h> | 103 | #include <linux/sysctl.h> |
103 | #endif | 104 | #endif |
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index 1e029dc75455..c97cd9ff697e 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c | |||
@@ -44,6 +44,7 @@ | |||
44 | #include <linux/string.h> | 44 | #include <linux/string.h> |
45 | #include <linux/jhash.h> | 45 | #include <linux/jhash.h> |
46 | #include <linux/audit.h> | 46 | #include <linux/audit.h> |
47 | #include <linux/slab.h> | ||
47 | #include <net/ip.h> | 48 | #include <net/ip.h> |
48 | #include <net/icmp.h> | 49 | #include <net/icmp.h> |
49 | #include <net/tcp.h> | 50 | #include <net/tcp.h> |
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 51ca946e3392..382bc768ed56 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
@@ -50,6 +50,7 @@ | |||
50 | #include <linux/notifier.h> | 50 | #include <linux/notifier.h> |
51 | #include <linux/inetdevice.h> | 51 | #include <linux/inetdevice.h> |
52 | #include <linux/igmp.h> | 52 | #include <linux/igmp.h> |
53 | #include <linux/slab.h> | ||
53 | #ifdef CONFIG_SYSCTL | 54 | #ifdef CONFIG_SYSCTL |
54 | #include <linux/sysctl.h> | 55 | #include <linux/sysctl.h> |
55 | #endif | 56 | #endif |
@@ -1095,10 +1096,10 @@ static int inetdev_event(struct notifier_block *this, unsigned long event, | |||
1095 | case NETDEV_DOWN: | 1096 | case NETDEV_DOWN: |
1096 | ip_mc_down(in_dev); | 1097 | ip_mc_down(in_dev); |
1097 | break; | 1098 | break; |
1098 | case NETDEV_BONDING_OLDTYPE: | 1099 | case NETDEV_PRE_TYPE_CHANGE: |
1099 | ip_mc_unmap(in_dev); | 1100 | ip_mc_unmap(in_dev); |
1100 | break; | 1101 | break; |
1101 | case NETDEV_BONDING_NEWTYPE: | 1102 | case NETDEV_POST_TYPE_CHANGE: |
1102 | ip_mc_remap(in_dev); | 1103 | ip_mc_remap(in_dev); |
1103 | break; | 1104 | break; |
1104 | case NETDEV_CHANGEMTU: | 1105 | case NETDEV_CHANGEMTU: |
@@ -1194,7 +1195,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) | |||
1194 | hlist_for_each_entry_rcu(dev, node, head, index_hlist) { | 1195 | hlist_for_each_entry_rcu(dev, node, head, index_hlist) { |
1195 | if (idx < s_idx) | 1196 | if (idx < s_idx) |
1196 | goto cont; | 1197 | goto cont; |
1197 | if (idx > s_idx) | 1198 | if (h > s_h || idx > s_idx) |
1198 | s_ip_idx = 0; | 1199 | s_ip_idx = 0; |
1199 | in_dev = __in_dev_get_rcu(dev); | 1200 | in_dev = __in_dev_get_rcu(dev); |
1200 | if (!in_dev) | 1201 | if (!in_dev) |
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 9b3e28ed5240..4f0ed458c883 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/skbuff.h> | 34 | #include <linux/skbuff.h> |
35 | #include <linux/init.h> | 35 | #include <linux/init.h> |
36 | #include <linux/list.h> | 36 | #include <linux/list.h> |
37 | #include <linux/slab.h> | ||
37 | 38 | ||
38 | #include <net/ip.h> | 39 | #include <net/ip.h> |
39 | #include <net/protocol.h> | 40 | #include <net/protocol.h> |
diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c index 14972017b9c2..4ed7e0dea1bc 100644 --- a/net/ipv4/fib_hash.c +++ b/net/ipv4/fib_hash.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/skbuff.h> | 32 | #include <linux/skbuff.h> |
33 | #include <linux/netlink.h> | 33 | #include <linux/netlink.h> |
34 | #include <linux/init.h> | 34 | #include <linux/init.h> |
35 | #include <linux/slab.h> | ||
35 | 36 | ||
36 | #include <net/net_namespace.h> | 37 | #include <net/net_namespace.h> |
37 | #include <net/ip.h> | 38 | #include <net/ip.h> |
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index ca2d07b1c706..76daeb5ff564 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c | |||
@@ -213,7 +213,6 @@ static int fib4_rule_fill(struct fib_rule *rule, struct sk_buff *skb, | |||
213 | { | 213 | { |
214 | struct fib4_rule *rule4 = (struct fib4_rule *) rule; | 214 | struct fib4_rule *rule4 = (struct fib4_rule *) rule; |
215 | 215 | ||
216 | frh->family = AF_INET; | ||
217 | frh->dst_len = rule4->dst_len; | 216 | frh->dst_len = rule4->dst_len; |
218 | frh->src_len = rule4->src_len; | 217 | frh->src_len = rule4->src_len; |
219 | frh->tos = rule4->tos; | 218 | frh->tos = rule4->tos; |
@@ -234,23 +233,6 @@ nla_put_failure: | |||
234 | return -ENOBUFS; | 233 | return -ENOBUFS; |
235 | } | 234 | } |
236 | 235 | ||
237 | static u32 fib4_rule_default_pref(struct fib_rules_ops *ops) | ||
238 | { | ||
239 | struct list_head *pos; | ||
240 | struct fib_rule *rule; | ||
241 | |||
242 | if (!list_empty(&ops->rules_list)) { | ||
243 | pos = ops->rules_list.next; | ||
244 | if (pos->next != &ops->rules_list) { | ||
245 | rule = list_entry(pos->next, struct fib_rule, list); | ||
246 | if (rule->pref) | ||
247 | return rule->pref - 1; | ||
248 | } | ||
249 | } | ||
250 | |||
251 | return 0; | ||
252 | } | ||
253 | |||
254 | static size_t fib4_rule_nlmsg_payload(struct fib_rule *rule) | 236 | static size_t fib4_rule_nlmsg_payload(struct fib_rule *rule) |
255 | { | 237 | { |
256 | return nla_total_size(4) /* dst */ | 238 | return nla_total_size(4) /* dst */ |
@@ -263,7 +245,7 @@ static void fib4_rule_flush_cache(struct fib_rules_ops *ops) | |||
263 | rt_cache_flush(ops->fro_net, -1); | 245 | rt_cache_flush(ops->fro_net, -1); |
264 | } | 246 | } |
265 | 247 | ||
266 | static struct fib_rules_ops fib4_rules_ops_template = { | 248 | static const struct fib_rules_ops __net_initdata fib4_rules_ops_template = { |
267 | .family = AF_INET, | 249 | .family = AF_INET, |
268 | .rule_size = sizeof(struct fib4_rule), | 250 | .rule_size = sizeof(struct fib4_rule), |
269 | .addr_size = sizeof(u32), | 251 | .addr_size = sizeof(u32), |
@@ -272,7 +254,7 @@ static struct fib_rules_ops fib4_rules_ops_template = { | |||
272 | .configure = fib4_rule_configure, | 254 | .configure = fib4_rule_configure, |
273 | .compare = fib4_rule_compare, | 255 | .compare = fib4_rule_compare, |
274 | .fill = fib4_rule_fill, | 256 | .fill = fib4_rule_fill, |
275 | .default_pref = fib4_rule_default_pref, | 257 | .default_pref = fib_default_rule_pref, |
276 | .nlmsg_payload = fib4_rule_nlmsg_payload, | 258 | .nlmsg_payload = fib4_rule_nlmsg_payload, |
277 | .flush_cache = fib4_rule_flush_cache, | 259 | .flush_cache = fib4_rule_flush_cache, |
278 | .nlgroup = RTNLGRP_IPV4_RULE, | 260 | .nlgroup = RTNLGRP_IPV4_RULE, |
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 1af0ea0fb6a2..20f09c5b31e8 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/proc_fs.h> | 32 | #include <linux/proc_fs.h> |
33 | #include <linux/skbuff.h> | 33 | #include <linux/skbuff.h> |
34 | #include <linux/init.h> | 34 | #include <linux/init.h> |
35 | #include <linux/slab.h> | ||
35 | 36 | ||
36 | #include <net/arp.h> | 37 | #include <net/arp.h> |
37 | #include <net/ip.h> | 38 | #include <net/ip.h> |
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index af5d89792860..c98f115fb0fd 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
@@ -71,6 +71,7 @@ | |||
71 | #include <linux/netlink.h> | 71 | #include <linux/netlink.h> |
72 | #include <linux/init.h> | 72 | #include <linux/init.h> |
73 | #include <linux/list.h> | 73 | #include <linux/list.h> |
74 | #include <linux/slab.h> | ||
74 | #include <net/net_namespace.h> | 75 | #include <net/net_namespace.h> |
75 | #include <net/ip.h> | 76 | #include <net/ip.h> |
76 | #include <net/protocol.h> | 77 | #include <net/protocol.h> |
@@ -208,7 +209,9 @@ static inline struct node *tnode_get_child_rcu(struct tnode *tn, unsigned int i) | |||
208 | { | 209 | { |
209 | struct node *ret = tnode_get_child(tn, i); | 210 | struct node *ret = tnode_get_child(tn, i); |
210 | 211 | ||
211 | return rcu_dereference(ret); | 212 | return rcu_dereference_check(ret, |
213 | rcu_read_lock_held() || | ||
214 | lockdep_rtnl_is_held()); | ||
212 | } | 215 | } |
213 | 216 | ||
214 | static inline int tnode_child_length(const struct tnode *tn) | 217 | static inline int tnode_child_length(const struct tnode *tn) |
@@ -961,7 +964,9 @@ fib_find_node(struct trie *t, u32 key) | |||
961 | struct node *n; | 964 | struct node *n; |
962 | 965 | ||
963 | pos = 0; | 966 | pos = 0; |
964 | n = rcu_dereference(t->trie); | 967 | n = rcu_dereference_check(t->trie, |
968 | rcu_read_lock_held() || | ||
969 | lockdep_rtnl_is_held()); | ||
965 | 970 | ||
966 | while (n != NULL && NODE_TYPE(n) == T_TNODE) { | 971 | while (n != NULL && NODE_TYPE(n) == T_TNODE) { |
967 | tn = (struct tnode *) n; | 972 | tn = (struct tnode *) n; |
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 4b4c2bcd15db..f3d339f728b0 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c | |||
@@ -74,6 +74,7 @@ | |||
74 | #include <linux/netdevice.h> | 74 | #include <linux/netdevice.h> |
75 | #include <linux/string.h> | 75 | #include <linux/string.h> |
76 | #include <linux/netfilter_ipv4.h> | 76 | #include <linux/netfilter_ipv4.h> |
77 | #include <linux/slab.h> | ||
77 | #include <net/snmp.h> | 78 | #include <net/snmp.h> |
78 | #include <net/ip.h> | 79 | #include <net/ip.h> |
79 | #include <net/route.h> | 80 | #include <net/route.h> |
@@ -330,9 +331,10 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param, | |||
330 | if (ip_append_data(sk, icmp_glue_bits, icmp_param, | 331 | if (ip_append_data(sk, icmp_glue_bits, icmp_param, |
331 | icmp_param->data_len+icmp_param->head_len, | 332 | icmp_param->data_len+icmp_param->head_len, |
332 | icmp_param->head_len, | 333 | icmp_param->head_len, |
333 | ipc, rt, MSG_DONTWAIT) < 0) | 334 | ipc, rt, MSG_DONTWAIT) < 0) { |
335 | ICMP_INC_STATS_BH(sock_net(sk), ICMP_MIB_OUTERRORS); | ||
334 | ip_flush_pending_frames(sk); | 336 | ip_flush_pending_frames(sk); |
335 | else if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) { | 337 | } else if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) { |
336 | struct icmphdr *icmph = icmp_hdr(skb); | 338 | struct icmphdr *icmph = icmp_hdr(skb); |
337 | __wsum csum = 0; | 339 | __wsum csum = 0; |
338 | struct sk_buff *skb1; | 340 | struct sk_buff *skb1; |
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 63bf298ca109..5fff865a4fa7 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c | |||
@@ -71,6 +71,7 @@ | |||
71 | */ | 71 | */ |
72 | 72 | ||
73 | #include <linux/module.h> | 73 | #include <linux/module.h> |
74 | #include <linux/slab.h> | ||
74 | #include <asm/uaccess.h> | 75 | #include <asm/uaccess.h> |
75 | #include <asm/system.h> | 76 | #include <asm/system.h> |
76 | #include <linux/types.h> | 77 | #include <linux/types.h> |
@@ -997,7 +998,7 @@ static void ip_mc_filter_add(struct in_device *in_dev, __be32 addr) | |||
997 | --ANK | 998 | --ANK |
998 | */ | 999 | */ |
999 | if (arp_mc_map(addr, buf, dev, 0) == 0) | 1000 | if (arp_mc_map(addr, buf, dev, 0) == 0) |
1000 | dev_mc_add(dev, buf, dev->addr_len, 0); | 1001 | dev_mc_add(dev, buf); |
1001 | } | 1002 | } |
1002 | 1003 | ||
1003 | /* | 1004 | /* |
@@ -1010,7 +1011,7 @@ static void ip_mc_filter_del(struct in_device *in_dev, __be32 addr) | |||
1010 | struct net_device *dev = in_dev->dev; | 1011 | struct net_device *dev = in_dev->dev; |
1011 | 1012 | ||
1012 | if (arp_mc_map(addr, buf, dev, 0) == 0) | 1013 | if (arp_mc_map(addr, buf, dev, 0) == 0) |
1013 | dev_mc_delete(dev, buf, dev->addr_len, 0); | 1014 | dev_mc_del(dev, buf); |
1014 | } | 1015 | } |
1015 | 1016 | ||
1016 | #ifdef CONFIG_IP_MULTICAST | 1017 | #ifdef CONFIG_IP_MULTICAST |
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 8da6429269dd..e0a3e3537b14 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
@@ -234,7 +234,7 @@ static int inet_csk_wait_for_connect(struct sock *sk, long timeo) | |||
234 | * having to remove and re-insert us on the wait queue. | 234 | * having to remove and re-insert us on the wait queue. |
235 | */ | 235 | */ |
236 | for (;;) { | 236 | for (;;) { |
237 | prepare_to_wait_exclusive(sk->sk_sleep, &wait, | 237 | prepare_to_wait_exclusive(sk_sleep(sk), &wait, |
238 | TASK_INTERRUPTIBLE); | 238 | TASK_INTERRUPTIBLE); |
239 | release_sock(sk); | 239 | release_sock(sk); |
240 | if (reqsk_queue_empty(&icsk->icsk_accept_queue)) | 240 | if (reqsk_queue_empty(&icsk->icsk_accept_queue)) |
@@ -253,7 +253,7 @@ static int inet_csk_wait_for_connect(struct sock *sk, long timeo) | |||
253 | if (!timeo) | 253 | if (!timeo) |
254 | break; | 254 | break; |
255 | } | 255 | } |
256 | finish_wait(sk->sk_sleep, &wait); | 256 | finish_wait(sk_sleep(sk), &wait); |
257 | return err; | 257 | return err; |
258 | } | 258 | } |
259 | 259 | ||
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 1aaa8110d84b..e5fa2ddce320 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/types.h> | 14 | #include <linux/types.h> |
15 | #include <linux/fcntl.h> | 15 | #include <linux/fcntl.h> |
16 | #include <linux/random.h> | 16 | #include <linux/random.h> |
17 | #include <linux/slab.h> | ||
17 | #include <linux/cache.h> | 18 | #include <linux/cache.h> |
18 | #include <linux/init.h> | 19 | #include <linux/init.h> |
19 | #include <linux/time.h> | 20 | #include <linux/time.h> |
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index eaf3e2c8646a..a2ca6aed763b 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/random.h> | 19 | #include <linux/random.h> |
20 | #include <linux/skbuff.h> | 20 | #include <linux/skbuff.h> |
21 | #include <linux/rtnetlink.h> | 21 | #include <linux/rtnetlink.h> |
22 | #include <linux/slab.h> | ||
22 | 23 | ||
23 | #include <net/inet_frag.h> | 24 | #include <net/inet_frag.h> |
24 | 25 | ||
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index cc94cc2d8b2d..c5af909cf701 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c | |||
@@ -10,6 +10,7 @@ | |||
10 | 10 | ||
11 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
12 | #include <linux/kmemcheck.h> | 12 | #include <linux/kmemcheck.h> |
13 | #include <linux/slab.h> | ||
13 | #include <net/inet_hashtables.h> | 14 | #include <net/inet_hashtables.h> |
14 | #include <net/inet_timewait_sock.h> | 15 | #include <net/inet_timewait_sock.h> |
15 | #include <net/ip.h> | 16 | #include <net/ip.h> |
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index a2991bc8e32e..af10942b326c 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/ip.h> | 25 | #include <linux/ip.h> |
26 | #include <linux/icmp.h> | 26 | #include <linux/icmp.h> |
27 | #include <linux/netdevice.h> | 27 | #include <linux/netdevice.h> |
28 | #include <linux/slab.h> | ||
28 | #include <net/sock.h> | 29 | #include <net/sock.h> |
29 | #include <net/ip.h> | 30 | #include <net/ip.h> |
30 | #include <net/tcp.h> | 31 | #include <net/tcp.h> |
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index b59430bc041c..75347ea70ea0 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/netdevice.h> | 32 | #include <linux/netdevice.h> |
33 | #include <linux/jhash.h> | 33 | #include <linux/jhash.h> |
34 | #include <linux/random.h> | 34 | #include <linux/random.h> |
35 | #include <linux/slab.h> | ||
35 | #include <net/route.h> | 36 | #include <net/route.h> |
36 | #include <net/dst.h> | 37 | #include <net/dst.h> |
37 | #include <net/sock.h> | 38 | #include <net/sock.h> |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index f47c9f76754b..fe381d12ecdd 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/types.h> | 15 | #include <linux/types.h> |
16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
17 | #include <linux/slab.h> | ||
17 | #include <asm/uaccess.h> | 18 | #include <asm/uaccess.h> |
18 | #include <linux/skbuff.h> | 19 | #include <linux/skbuff.h> |
19 | #include <linux/netdevice.h> | 20 | #include <linux/netdevice.h> |
@@ -810,11 +811,13 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev | |||
810 | tunnel->err_count = 0; | 811 | tunnel->err_count = 0; |
811 | } | 812 | } |
812 | 813 | ||
813 | max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen; | 814 | max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + rt->u.dst.header_len; |
814 | 815 | ||
815 | if (skb_headroom(skb) < max_headroom || skb_shared(skb)|| | 816 | if (skb_headroom(skb) < max_headroom || skb_shared(skb)|| |
816 | (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { | 817 | (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { |
817 | struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); | 818 | struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); |
819 | if (max_headroom > dev->needed_headroom) | ||
820 | dev->needed_headroom = max_headroom; | ||
818 | if (!new_skb) { | 821 | if (!new_skb) { |
819 | ip_rt_put(rt); | 822 | ip_rt_put(rt); |
820 | txq->tx_dropped++; | 823 | txq->tx_dropped++; |
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index c29de9879fda..f8ab7a380d4a 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c | |||
@@ -119,6 +119,7 @@ | |||
119 | #include <linux/kernel.h> | 119 | #include <linux/kernel.h> |
120 | #include <linux/string.h> | 120 | #include <linux/string.h> |
121 | #include <linux/errno.h> | 121 | #include <linux/errno.h> |
122 | #include <linux/slab.h> | ||
122 | 123 | ||
123 | #include <linux/net.h> | 124 | #include <linux/net.h> |
124 | #include <linux/socket.h> | 125 | #include <linux/socket.h> |
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index 94bf105ef3c9..4c09a31fd140 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c | |||
@@ -11,6 +11,7 @@ | |||
11 | 11 | ||
12 | #include <linux/capability.h> | 12 | #include <linux/capability.h> |
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/slab.h> | ||
14 | #include <linux/types.h> | 15 | #include <linux/types.h> |
15 | #include <asm/uaccess.h> | 16 | #include <asm/uaccess.h> |
16 | #include <linux/skbuff.h> | 17 | #include <linux/skbuff.h> |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 3451799e3dbf..f0392191740b 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -51,6 +51,7 @@ | |||
51 | #include <linux/string.h> | 51 | #include <linux/string.h> |
52 | #include <linux/errno.h> | 52 | #include <linux/errno.h> |
53 | #include <linux/highmem.h> | 53 | #include <linux/highmem.h> |
54 | #include <linux/slab.h> | ||
54 | 55 | ||
55 | #include <linux/socket.h> | 56 | #include <linux/socket.h> |
56 | #include <linux/sockios.h> | 57 | #include <linux/sockios.h> |
@@ -119,7 +120,7 @@ static int ip_dev_loopback_xmit(struct sk_buff *newskb) | |||
119 | newskb->pkt_type = PACKET_LOOPBACK; | 120 | newskb->pkt_type = PACKET_LOOPBACK; |
120 | newskb->ip_summed = CHECKSUM_UNNECESSARY; | 121 | newskb->ip_summed = CHECKSUM_UNNECESSARY; |
121 | WARN_ON(!skb_dst(newskb)); | 122 | WARN_ON(!skb_dst(newskb)); |
122 | netif_rx(newskb); | 123 | netif_rx_ni(newskb); |
123 | return 0; | 124 | return 0; |
124 | } | 125 | } |
125 | 126 | ||
@@ -310,7 +311,7 @@ int ip_output(struct sk_buff *skb) | |||
310 | !(IPCB(skb)->flags & IPSKB_REROUTED)); | 311 | !(IPCB(skb)->flags & IPSKB_REROUTED)); |
311 | } | 312 | } |
312 | 313 | ||
313 | int ip_queue_xmit(struct sk_buff *skb, int ipfragok) | 314 | int ip_queue_xmit(struct sk_buff *skb) |
314 | { | 315 | { |
315 | struct sock *sk = skb->sk; | 316 | struct sock *sk = skb->sk; |
316 | struct inet_sock *inet = inet_sk(sk); | 317 | struct inet_sock *inet = inet_sk(sk); |
@@ -369,7 +370,7 @@ packet_routed: | |||
369 | skb_reset_network_header(skb); | 370 | skb_reset_network_header(skb); |
370 | iph = ip_hdr(skb); | 371 | iph = ip_hdr(skb); |
371 | *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff)); | 372 | *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff)); |
372 | if (ip_dont_fragment(sk, &rt->u.dst) && !ipfragok) | 373 | if (ip_dont_fragment(sk, &rt->u.dst) && !skb->local_df) |
373 | iph->frag_off = htons(IP_DF); | 374 | iph->frag_off = htons(IP_DF); |
374 | else | 375 | else |
375 | iph->frag_off = 0; | 376 | iph->frag_off = 0; |
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 644dc43a55de..ce231780a2b1 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/icmp.h> | 23 | #include <linux/icmp.h> |
24 | #include <linux/inetdevice.h> | 24 | #include <linux/inetdevice.h> |
25 | #include <linux/netdevice.h> | 25 | #include <linux/netdevice.h> |
26 | #include <linux/slab.h> | ||
26 | #include <net/sock.h> | 27 | #include <net/sock.h> |
27 | #include <net/ip.h> | 28 | #include <net/ip.h> |
28 | #include <net/icmp.h> | 29 | #include <net/icmp.h> |
@@ -286,12 +287,8 @@ int ip_ra_control(struct sock *sk, unsigned char on, | |||
286 | void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, | 287 | void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, |
287 | __be16 port, u32 info, u8 *payload) | 288 | __be16 port, u32 info, u8 *payload) |
288 | { | 289 | { |
289 | struct inet_sock *inet = inet_sk(sk); | ||
290 | struct sock_exterr_skb *serr; | 290 | struct sock_exterr_skb *serr; |
291 | 291 | ||
292 | if (!inet->recverr) | ||
293 | return; | ||
294 | |||
295 | skb = skb_clone(skb, GFP_ATOMIC); | 292 | skb = skb_clone(skb, GFP_ATOMIC); |
296 | if (!skb) | 293 | if (!skb) |
297 | return; | 294 | return; |
@@ -957,6 +954,22 @@ e_inval: | |||
957 | return -EINVAL; | 954 | return -EINVAL; |
958 | } | 955 | } |
959 | 956 | ||
957 | /** | ||
958 | * ip_queue_rcv_skb - Queue an skb into sock receive queue | ||
959 | * @sk: socket | ||
960 | * @skb: buffer | ||
961 | * | ||
962 | * Queues an skb into socket receive queue. If IP_CMSG_PKTINFO option | ||
963 | * is not set, we drop skb dst entry now, while dst cache line is hot. | ||
964 | */ | ||
965 | int ip_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | ||
966 | { | ||
967 | if (!(inet_sk(sk)->cmsg_flags & IP_CMSG_PKTINFO)) | ||
968 | skb_dst_drop(skb); | ||
969 | return sock_queue_rcv_skb(sk, skb); | ||
970 | } | ||
971 | EXPORT_SYMBOL(ip_queue_rcv_skb); | ||
972 | |||
960 | int ip_setsockopt(struct sock *sk, int level, | 973 | int ip_setsockopt(struct sock *sk, int level, |
961 | int optname, char __user *optval, unsigned int optlen) | 974 | int optname, char __user *optval, unsigned int optlen) |
962 | { | 975 | { |
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 10a6a604bf32..b9d84e800cf4 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c | |||
@@ -53,6 +53,7 @@ | |||
53 | #include <linux/root_dev.h> | 53 | #include <linux/root_dev.h> |
54 | #include <linux/delay.h> | 54 | #include <linux/delay.h> |
55 | #include <linux/nfs_fs.h> | 55 | #include <linux/nfs_fs.h> |
56 | #include <linux/slab.h> | ||
56 | #include <net/net_namespace.h> | 57 | #include <net/net_namespace.h> |
57 | #include <net/arp.h> | 58 | #include <net/arp.h> |
58 | #include <net/ip.h> | 59 | #include <net/ip.h> |
@@ -187,6 +188,16 @@ struct ic_device { | |||
187 | static struct ic_device *ic_first_dev __initdata = NULL;/* List of open device */ | 188 | static struct ic_device *ic_first_dev __initdata = NULL;/* List of open device */ |
188 | static struct net_device *ic_dev __initdata = NULL; /* Selected device */ | 189 | static struct net_device *ic_dev __initdata = NULL; /* Selected device */ |
189 | 190 | ||
191 | static bool __init ic_device_match(struct net_device *dev) | ||
192 | { | ||
193 | if (user_dev_name[0] ? !strcmp(dev->name, user_dev_name) : | ||
194 | (!(dev->flags & IFF_LOOPBACK) && | ||
195 | (dev->flags & (IFF_POINTOPOINT|IFF_BROADCAST)) && | ||
196 | strncmp(dev->name, "dummy", 5))) | ||
197 | return true; | ||
198 | return false; | ||
199 | } | ||
200 | |||
190 | static int __init ic_open_devs(void) | 201 | static int __init ic_open_devs(void) |
191 | { | 202 | { |
192 | struct ic_device *d, **last; | 203 | struct ic_device *d, **last; |
@@ -207,10 +218,7 @@ static int __init ic_open_devs(void) | |||
207 | for_each_netdev(&init_net, dev) { | 218 | for_each_netdev(&init_net, dev) { |
208 | if (dev->flags & IFF_LOOPBACK) | 219 | if (dev->flags & IFF_LOOPBACK) |
209 | continue; | 220 | continue; |
210 | if (user_dev_name[0] ? !strcmp(dev->name, user_dev_name) : | 221 | if (ic_device_match(dev)) { |
211 | (!(dev->flags & IFF_LOOPBACK) && | ||
212 | (dev->flags & (IFF_POINTOPOINT|IFF_BROADCAST)) && | ||
213 | strncmp(dev->name, "dummy", 5))) { | ||
214 | int able = 0; | 222 | int able = 0; |
215 | if (dev->mtu >= 364) | 223 | if (dev->mtu >= 364) |
216 | able |= IC_BOOTP; | 224 | able |= IC_BOOTP; |
@@ -228,7 +236,7 @@ static int __init ic_open_devs(void) | |||
228 | } | 236 | } |
229 | if (!(d = kmalloc(sizeof(struct ic_device), GFP_KERNEL))) { | 237 | if (!(d = kmalloc(sizeof(struct ic_device), GFP_KERNEL))) { |
230 | rtnl_unlock(); | 238 | rtnl_unlock(); |
231 | return -1; | 239 | return -ENOMEM; |
232 | } | 240 | } |
233 | d->dev = dev; | 241 | d->dev = dev; |
234 | *last = d; | 242 | *last = d; |
@@ -253,7 +261,7 @@ static int __init ic_open_devs(void) | |||
253 | printk(KERN_ERR "IP-Config: Device `%s' not found.\n", user_dev_name); | 261 | printk(KERN_ERR "IP-Config: Device `%s' not found.\n", user_dev_name); |
254 | else | 262 | else |
255 | printk(KERN_ERR "IP-Config: No network devices available.\n"); | 263 | printk(KERN_ERR "IP-Config: No network devices available.\n"); |
256 | return -1; | 264 | return -ENODEV; |
257 | } | 265 | } |
258 | return 0; | 266 | return 0; |
259 | } | 267 | } |
@@ -968,7 +976,7 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str | |||
968 | /* Is it a reply for the device we are configuring? */ | 976 | /* Is it a reply for the device we are configuring? */ |
969 | if (b->xid != ic_dev_xid) { | 977 | if (b->xid != ic_dev_xid) { |
970 | if (net_ratelimit()) | 978 | if (net_ratelimit()) |
971 | printk(KERN_ERR "DHCP/BOOTP: Ignoring delayed packet \n"); | 979 | printk(KERN_ERR "DHCP/BOOTP: Ignoring delayed packet\n"); |
972 | goto drop_unlock; | 980 | goto drop_unlock; |
973 | } | 981 | } |
974 | 982 | ||
@@ -1303,6 +1311,32 @@ __be32 __init root_nfs_parse_addr(char *name) | |||
1303 | return addr; | 1311 | return addr; |
1304 | } | 1312 | } |
1305 | 1313 | ||
1314 | #define DEVICE_WAIT_MAX 12 /* 12 seconds */ | ||
1315 | |||
1316 | static int __init wait_for_devices(void) | ||
1317 | { | ||
1318 | int i; | ||
1319 | |||
1320 | msleep(CONF_PRE_OPEN); | ||
1321 | for (i = 0; i < DEVICE_WAIT_MAX; i++) { | ||
1322 | struct net_device *dev; | ||
1323 | int found = 0; | ||
1324 | |||
1325 | rtnl_lock(); | ||
1326 | for_each_netdev(&init_net, dev) { | ||
1327 | if (ic_device_match(dev)) { | ||
1328 | found = 1; | ||
1329 | break; | ||
1330 | } | ||
1331 | } | ||
1332 | rtnl_unlock(); | ||
1333 | if (found) | ||
1334 | return 0; | ||
1335 | ssleep(1); | ||
1336 | } | ||
1337 | return -ENODEV; | ||
1338 | } | ||
1339 | |||
1306 | /* | 1340 | /* |
1307 | * IP Autoconfig dispatcher. | 1341 | * IP Autoconfig dispatcher. |
1308 | */ | 1342 | */ |
@@ -1313,6 +1347,7 @@ static int __init ip_auto_config(void) | |||
1313 | #ifdef IPCONFIG_DYNAMIC | 1347 | #ifdef IPCONFIG_DYNAMIC |
1314 | int retries = CONF_OPEN_RETRIES; | 1348 | int retries = CONF_OPEN_RETRIES; |
1315 | #endif | 1349 | #endif |
1350 | int err; | ||
1316 | 1351 | ||
1317 | #ifdef CONFIG_PROC_FS | 1352 | #ifdef CONFIG_PROC_FS |
1318 | proc_net_fops_create(&init_net, "pnp", S_IRUGO, &pnp_seq_fops); | 1353 | proc_net_fops_create(&init_net, "pnp", S_IRUGO, &pnp_seq_fops); |
@@ -1325,12 +1360,15 @@ static int __init ip_auto_config(void) | |||
1325 | #ifdef IPCONFIG_DYNAMIC | 1360 | #ifdef IPCONFIG_DYNAMIC |
1326 | try_try_again: | 1361 | try_try_again: |
1327 | #endif | 1362 | #endif |
1328 | /* Give hardware a chance to settle */ | 1363 | /* Wait for devices to appear */ |
1329 | msleep(CONF_PRE_OPEN); | 1364 | err = wait_for_devices(); |
1365 | if (err) | ||
1366 | return err; | ||
1330 | 1367 | ||
1331 | /* Setup all network devices */ | 1368 | /* Setup all network devices */ |
1332 | if (ic_open_devs() < 0) | 1369 | err = ic_open_devs(); |
1333 | return -1; | 1370 | if (err) |
1371 | return err; | ||
1334 | 1372 | ||
1335 | /* Give drivers a chance to settle */ | 1373 | /* Give drivers a chance to settle */ |
1336 | ssleep(CONF_POST_OPEN); | 1374 | ssleep(CONF_POST_OPEN); |
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 2f302d3ac9a3..0b27b14dcc9d 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c | |||
@@ -95,6 +95,7 @@ | |||
95 | #include <linux/module.h> | 95 | #include <linux/module.h> |
96 | #include <linux/types.h> | 96 | #include <linux/types.h> |
97 | #include <linux/kernel.h> | 97 | #include <linux/kernel.h> |
98 | #include <linux/slab.h> | ||
98 | #include <asm/uaccess.h> | 99 | #include <asm/uaccess.h> |
99 | #include <linux/skbuff.h> | 100 | #include <linux/skbuff.h> |
100 | #include <linux/netdevice.h> | 101 | #include <linux/netdevice.h> |
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 8582e12e4a62..eddfd12f55b8 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
@@ -47,6 +47,7 @@ | |||
47 | #include <linux/mroute.h> | 47 | #include <linux/mroute.h> |
48 | #include <linux/init.h> | 48 | #include <linux/init.h> |
49 | #include <linux/if_ether.h> | 49 | #include <linux/if_ether.h> |
50 | #include <linux/slab.h> | ||
50 | #include <net/net_namespace.h> | 51 | #include <net/net_namespace.h> |
51 | #include <net/ip.h> | 52 | #include <net/ip.h> |
52 | #include <net/protocol.h> | 53 | #include <net/protocol.h> |
@@ -62,11 +63,40 @@ | |||
62 | #include <net/ipip.h> | 63 | #include <net/ipip.h> |
63 | #include <net/checksum.h> | 64 | #include <net/checksum.h> |
64 | #include <net/netlink.h> | 65 | #include <net/netlink.h> |
66 | #include <net/fib_rules.h> | ||
65 | 67 | ||
66 | #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2) | 68 | #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2) |
67 | #define CONFIG_IP_PIMSM 1 | 69 | #define CONFIG_IP_PIMSM 1 |
68 | #endif | 70 | #endif |
69 | 71 | ||
72 | struct mr_table { | ||
73 | struct list_head list; | ||
74 | #ifdef CONFIG_NET_NS | ||
75 | struct net *net; | ||
76 | #endif | ||
77 | u32 id; | ||
78 | struct sock *mroute_sk; | ||
79 | struct timer_list ipmr_expire_timer; | ||
80 | struct list_head mfc_unres_queue; | ||
81 | struct list_head mfc_cache_array[MFC_LINES]; | ||
82 | struct vif_device vif_table[MAXVIFS]; | ||
83 | int maxvif; | ||
84 | atomic_t cache_resolve_queue_len; | ||
85 | int mroute_do_assert; | ||
86 | int mroute_do_pim; | ||
87 | #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2) | ||
88 | int mroute_reg_vif_num; | ||
89 | #endif | ||
90 | }; | ||
91 | |||
92 | struct ipmr_rule { | ||
93 | struct fib_rule common; | ||
94 | }; | ||
95 | |||
96 | struct ipmr_result { | ||
97 | struct mr_table *mrt; | ||
98 | }; | ||
99 | |||
70 | /* Big lock, protecting vif table, mrt cache and mroute socket state. | 100 | /* Big lock, protecting vif table, mrt cache and mroute socket state. |
71 | Note that the changes are semaphored via rtnl_lock. | 101 | Note that the changes are semaphored via rtnl_lock. |
72 | */ | 102 | */ |
@@ -77,9 +107,7 @@ static DEFINE_RWLOCK(mrt_lock); | |||
77 | * Multicast router control variables | 107 | * Multicast router control variables |
78 | */ | 108 | */ |
79 | 109 | ||
80 | #define VIF_EXISTS(_net, _idx) ((_net)->ipv4.vif_table[_idx].dev != NULL) | 110 | #define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL) |
81 | |||
82 | static struct mfc_cache *mfc_unres_queue; /* Queue of unresolved entries */ | ||
83 | 111 | ||
84 | /* Special spinlock for queue of unresolved entries */ | 112 | /* Special spinlock for queue of unresolved entries */ |
85 | static DEFINE_SPINLOCK(mfc_unres_lock); | 113 | static DEFINE_SPINLOCK(mfc_unres_lock); |
@@ -94,12 +122,215 @@ static DEFINE_SPINLOCK(mfc_unres_lock); | |||
94 | 122 | ||
95 | static struct kmem_cache *mrt_cachep __read_mostly; | 123 | static struct kmem_cache *mrt_cachep __read_mostly; |
96 | 124 | ||
97 | static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local); | 125 | static struct mr_table *ipmr_new_table(struct net *net, u32 id); |
98 | static int ipmr_cache_report(struct net *net, | 126 | static int ip_mr_forward(struct net *net, struct mr_table *mrt, |
127 | struct sk_buff *skb, struct mfc_cache *cache, | ||
128 | int local); | ||
129 | static int ipmr_cache_report(struct mr_table *mrt, | ||
99 | struct sk_buff *pkt, vifi_t vifi, int assert); | 130 | struct sk_buff *pkt, vifi_t vifi, int assert); |
100 | static int ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm); | 131 | static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, |
132 | struct mfc_cache *c, struct rtmsg *rtm); | ||
133 | static void ipmr_expire_process(unsigned long arg); | ||
134 | |||
135 | #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES | ||
136 | #define ipmr_for_each_table(mrt, net) \ | ||
137 | list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list) | ||
138 | |||
139 | static struct mr_table *ipmr_get_table(struct net *net, u32 id) | ||
140 | { | ||
141 | struct mr_table *mrt; | ||
142 | |||
143 | ipmr_for_each_table(mrt, net) { | ||
144 | if (mrt->id == id) | ||
145 | return mrt; | ||
146 | } | ||
147 | return NULL; | ||
148 | } | ||
149 | |||
150 | static int ipmr_fib_lookup(struct net *net, struct flowi *flp, | ||
151 | struct mr_table **mrt) | ||
152 | { | ||
153 | struct ipmr_result res; | ||
154 | struct fib_lookup_arg arg = { .result = &res, }; | ||
155 | int err; | ||
156 | |||
157 | err = fib_rules_lookup(net->ipv4.mr_rules_ops, flp, 0, &arg); | ||
158 | if (err < 0) | ||
159 | return err; | ||
160 | *mrt = res.mrt; | ||
161 | return 0; | ||
162 | } | ||
163 | |||
164 | static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp, | ||
165 | int flags, struct fib_lookup_arg *arg) | ||
166 | { | ||
167 | struct ipmr_result *res = arg->result; | ||
168 | struct mr_table *mrt; | ||
169 | |||
170 | switch (rule->action) { | ||
171 | case FR_ACT_TO_TBL: | ||
172 | break; | ||
173 | case FR_ACT_UNREACHABLE: | ||
174 | return -ENETUNREACH; | ||
175 | case FR_ACT_PROHIBIT: | ||
176 | return -EACCES; | ||
177 | case FR_ACT_BLACKHOLE: | ||
178 | default: | ||
179 | return -EINVAL; | ||
180 | } | ||
181 | |||
182 | mrt = ipmr_get_table(rule->fr_net, rule->table); | ||
183 | if (mrt == NULL) | ||
184 | return -EAGAIN; | ||
185 | res->mrt = mrt; | ||
186 | return 0; | ||
187 | } | ||
188 | |||
189 | static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags) | ||
190 | { | ||
191 | return 1; | ||
192 | } | ||
193 | |||
194 | static const struct nla_policy ipmr_rule_policy[FRA_MAX + 1] = { | ||
195 | FRA_GENERIC_POLICY, | ||
196 | }; | ||
197 | |||
198 | static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb, | ||
199 | struct fib_rule_hdr *frh, struct nlattr **tb) | ||
200 | { | ||
201 | return 0; | ||
202 | } | ||
203 | |||
204 | static int ipmr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh, | ||
205 | struct nlattr **tb) | ||
206 | { | ||
207 | return 1; | ||
208 | } | ||
209 | |||
210 | static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb, | ||
211 | struct fib_rule_hdr *frh) | ||
212 | { | ||
213 | frh->dst_len = 0; | ||
214 | frh->src_len = 0; | ||
215 | frh->tos = 0; | ||
216 | return 0; | ||
217 | } | ||
218 | |||
219 | static const struct fib_rules_ops __net_initdata ipmr_rules_ops_template = { | ||
220 | .family = RTNL_FAMILY_IPMR, | ||
221 | .rule_size = sizeof(struct ipmr_rule), | ||
222 | .addr_size = sizeof(u32), | ||
223 | .action = ipmr_rule_action, | ||
224 | .match = ipmr_rule_match, | ||
225 | .configure = ipmr_rule_configure, | ||
226 | .compare = ipmr_rule_compare, | ||
227 | .default_pref = fib_default_rule_pref, | ||
228 | .fill = ipmr_rule_fill, | ||
229 | .nlgroup = RTNLGRP_IPV4_RULE, | ||
230 | .policy = ipmr_rule_policy, | ||
231 | .owner = THIS_MODULE, | ||
232 | }; | ||
233 | |||
234 | static int __net_init ipmr_rules_init(struct net *net) | ||
235 | { | ||
236 | struct fib_rules_ops *ops; | ||
237 | struct mr_table *mrt; | ||
238 | int err; | ||
239 | |||
240 | ops = fib_rules_register(&ipmr_rules_ops_template, net); | ||
241 | if (IS_ERR(ops)) | ||
242 | return PTR_ERR(ops); | ||
243 | |||
244 | INIT_LIST_HEAD(&net->ipv4.mr_tables); | ||
245 | |||
246 | mrt = ipmr_new_table(net, RT_TABLE_DEFAULT); | ||
247 | if (mrt == NULL) { | ||
248 | err = -ENOMEM; | ||
249 | goto err1; | ||
250 | } | ||
101 | 251 | ||
102 | static struct timer_list ipmr_expire_timer; | 252 | err = fib_default_rule_add(ops, 0x7fff, RT_TABLE_DEFAULT, 0); |
253 | if (err < 0) | ||
254 | goto err2; | ||
255 | |||
256 | net->ipv4.mr_rules_ops = ops; | ||
257 | return 0; | ||
258 | |||
259 | err2: | ||
260 | kfree(mrt); | ||
261 | err1: | ||
262 | fib_rules_unregister(ops); | ||
263 | return err; | ||
264 | } | ||
265 | |||
266 | static void __net_exit ipmr_rules_exit(struct net *net) | ||
267 | { | ||
268 | struct mr_table *mrt, *next; | ||
269 | |||
270 | list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) | ||
271 | kfree(mrt); | ||
272 | fib_rules_unregister(net->ipv4.mr_rules_ops); | ||
273 | } | ||
274 | #else | ||
275 | #define ipmr_for_each_table(mrt, net) \ | ||
276 | for (mrt = net->ipv4.mrt; mrt; mrt = NULL) | ||
277 | |||
278 | static struct mr_table *ipmr_get_table(struct net *net, u32 id) | ||
279 | { | ||
280 | return net->ipv4.mrt; | ||
281 | } | ||
282 | |||
283 | static int ipmr_fib_lookup(struct net *net, struct flowi *flp, | ||
284 | struct mr_table **mrt) | ||
285 | { | ||
286 | *mrt = net->ipv4.mrt; | ||
287 | return 0; | ||
288 | } | ||
289 | |||
290 | static int __net_init ipmr_rules_init(struct net *net) | ||
291 | { | ||
292 | net->ipv4.mrt = ipmr_new_table(net, RT_TABLE_DEFAULT); | ||
293 | return net->ipv4.mrt ? 0 : -ENOMEM; | ||
294 | } | ||
295 | |||
296 | static void __net_exit ipmr_rules_exit(struct net *net) | ||
297 | { | ||
298 | kfree(net->ipv4.mrt); | ||
299 | } | ||
300 | #endif | ||
301 | |||
302 | static struct mr_table *ipmr_new_table(struct net *net, u32 id) | ||
303 | { | ||
304 | struct mr_table *mrt; | ||
305 | unsigned int i; | ||
306 | |||
307 | mrt = ipmr_get_table(net, id); | ||
308 | if (mrt != NULL) | ||
309 | return mrt; | ||
310 | |||
311 | mrt = kzalloc(sizeof(*mrt), GFP_KERNEL); | ||
312 | if (mrt == NULL) | ||
313 | return NULL; | ||
314 | write_pnet(&mrt->net, net); | ||
315 | mrt->id = id; | ||
316 | |||
317 | /* Forwarding cache */ | ||
318 | for (i = 0; i < MFC_LINES; i++) | ||
319 | INIT_LIST_HEAD(&mrt->mfc_cache_array[i]); | ||
320 | |||
321 | INIT_LIST_HEAD(&mrt->mfc_unres_queue); | ||
322 | |||
323 | setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process, | ||
324 | (unsigned long)mrt); | ||
325 | |||
326 | #ifdef CONFIG_IP_PIMSM | ||
327 | mrt->mroute_reg_vif_num = -1; | ||
328 | #endif | ||
329 | #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES | ||
330 | list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables); | ||
331 | #endif | ||
332 | return mrt; | ||
333 | } | ||
103 | 334 | ||
104 | /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */ | 335 | /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */ |
105 | 336 | ||
@@ -200,12 +431,22 @@ failure: | |||
200 | static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) | 431 | static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) |
201 | { | 432 | { |
202 | struct net *net = dev_net(dev); | 433 | struct net *net = dev_net(dev); |
434 | struct mr_table *mrt; | ||
435 | struct flowi fl = { | ||
436 | .oif = dev->ifindex, | ||
437 | .iif = skb->skb_iif, | ||
438 | .mark = skb->mark, | ||
439 | }; | ||
440 | int err; | ||
441 | |||
442 | err = ipmr_fib_lookup(net, &fl, &mrt); | ||
443 | if (err < 0) | ||
444 | return err; | ||
203 | 445 | ||
204 | read_lock(&mrt_lock); | 446 | read_lock(&mrt_lock); |
205 | dev->stats.tx_bytes += skb->len; | 447 | dev->stats.tx_bytes += skb->len; |
206 | dev->stats.tx_packets++; | 448 | dev->stats.tx_packets++; |
207 | ipmr_cache_report(net, skb, net->ipv4.mroute_reg_vif_num, | 449 | ipmr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, IGMPMSG_WHOLEPKT); |
208 | IGMPMSG_WHOLEPKT); | ||
209 | read_unlock(&mrt_lock); | 450 | read_unlock(&mrt_lock); |
210 | kfree_skb(skb); | 451 | kfree_skb(skb); |
211 | return NETDEV_TX_OK; | 452 | return NETDEV_TX_OK; |
@@ -225,12 +466,18 @@ static void reg_vif_setup(struct net_device *dev) | |||
225 | dev->features |= NETIF_F_NETNS_LOCAL; | 466 | dev->features |= NETIF_F_NETNS_LOCAL; |
226 | } | 467 | } |
227 | 468 | ||
228 | static struct net_device *ipmr_reg_vif(struct net *net) | 469 | static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt) |
229 | { | 470 | { |
230 | struct net_device *dev; | 471 | struct net_device *dev; |
231 | struct in_device *in_dev; | 472 | struct in_device *in_dev; |
473 | char name[IFNAMSIZ]; | ||
232 | 474 | ||
233 | dev = alloc_netdev(0, "pimreg", reg_vif_setup); | 475 | if (mrt->id == RT_TABLE_DEFAULT) |
476 | sprintf(name, "pimreg"); | ||
477 | else | ||
478 | sprintf(name, "pimreg%u", mrt->id); | ||
479 | |||
480 | dev = alloc_netdev(0, name, reg_vif_setup); | ||
234 | 481 | ||
235 | if (dev == NULL) | 482 | if (dev == NULL) |
236 | return NULL; | 483 | return NULL; |
@@ -275,17 +522,17 @@ failure: | |||
275 | * @notify: Set to 1, if the caller is a notifier_call | 522 | * @notify: Set to 1, if the caller is a notifier_call |
276 | */ | 523 | */ |
277 | 524 | ||
278 | static int vif_delete(struct net *net, int vifi, int notify, | 525 | static int vif_delete(struct mr_table *mrt, int vifi, int notify, |
279 | struct list_head *head) | 526 | struct list_head *head) |
280 | { | 527 | { |
281 | struct vif_device *v; | 528 | struct vif_device *v; |
282 | struct net_device *dev; | 529 | struct net_device *dev; |
283 | struct in_device *in_dev; | 530 | struct in_device *in_dev; |
284 | 531 | ||
285 | if (vifi < 0 || vifi >= net->ipv4.maxvif) | 532 | if (vifi < 0 || vifi >= mrt->maxvif) |
286 | return -EADDRNOTAVAIL; | 533 | return -EADDRNOTAVAIL; |
287 | 534 | ||
288 | v = &net->ipv4.vif_table[vifi]; | 535 | v = &mrt->vif_table[vifi]; |
289 | 536 | ||
290 | write_lock_bh(&mrt_lock); | 537 | write_lock_bh(&mrt_lock); |
291 | dev = v->dev; | 538 | dev = v->dev; |
@@ -297,17 +544,17 @@ static int vif_delete(struct net *net, int vifi, int notify, | |||
297 | } | 544 | } |
298 | 545 | ||
299 | #ifdef CONFIG_IP_PIMSM | 546 | #ifdef CONFIG_IP_PIMSM |
300 | if (vifi == net->ipv4.mroute_reg_vif_num) | 547 | if (vifi == mrt->mroute_reg_vif_num) |
301 | net->ipv4.mroute_reg_vif_num = -1; | 548 | mrt->mroute_reg_vif_num = -1; |
302 | #endif | 549 | #endif |
303 | 550 | ||
304 | if (vifi+1 == net->ipv4.maxvif) { | 551 | if (vifi+1 == mrt->maxvif) { |
305 | int tmp; | 552 | int tmp; |
306 | for (tmp=vifi-1; tmp>=0; tmp--) { | 553 | for (tmp=vifi-1; tmp>=0; tmp--) { |
307 | if (VIF_EXISTS(net, tmp)) | 554 | if (VIF_EXISTS(mrt, tmp)) |
308 | break; | 555 | break; |
309 | } | 556 | } |
310 | net->ipv4.maxvif = tmp+1; | 557 | mrt->maxvif = tmp+1; |
311 | } | 558 | } |
312 | 559 | ||
313 | write_unlock_bh(&mrt_lock); | 560 | write_unlock_bh(&mrt_lock); |
@@ -328,7 +575,6 @@ static int vif_delete(struct net *net, int vifi, int notify, | |||
328 | 575 | ||
329 | static inline void ipmr_cache_free(struct mfc_cache *c) | 576 | static inline void ipmr_cache_free(struct mfc_cache *c) |
330 | { | 577 | { |
331 | release_net(mfc_net(c)); | ||
332 | kmem_cache_free(mrt_cachep, c); | 578 | kmem_cache_free(mrt_cachep, c); |
333 | } | 579 | } |
334 | 580 | ||
@@ -336,13 +582,13 @@ static inline void ipmr_cache_free(struct mfc_cache *c) | |||
336 | and reporting error to netlink readers. | 582 | and reporting error to netlink readers. |
337 | */ | 583 | */ |
338 | 584 | ||
339 | static void ipmr_destroy_unres(struct mfc_cache *c) | 585 | static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c) |
340 | { | 586 | { |
587 | struct net *net = read_pnet(&mrt->net); | ||
341 | struct sk_buff *skb; | 588 | struct sk_buff *skb; |
342 | struct nlmsgerr *e; | 589 | struct nlmsgerr *e; |
343 | struct net *net = mfc_net(c); | ||
344 | 590 | ||
345 | atomic_dec(&net->ipv4.cache_resolve_queue_len); | 591 | atomic_dec(&mrt->cache_resolve_queue_len); |
346 | 592 | ||
347 | while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) { | 593 | while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) { |
348 | if (ip_hdr(skb)->version == 0) { | 594 | if (ip_hdr(skb)->version == 0) { |
@@ -363,42 +609,40 @@ static void ipmr_destroy_unres(struct mfc_cache *c) | |||
363 | } | 609 | } |
364 | 610 | ||
365 | 611 | ||
366 | /* Single timer process for all the unresolved queue. */ | 612 | /* Timer process for the unresolved queue. */ |
367 | 613 | ||
368 | static void ipmr_expire_process(unsigned long dummy) | 614 | static void ipmr_expire_process(unsigned long arg) |
369 | { | 615 | { |
616 | struct mr_table *mrt = (struct mr_table *)arg; | ||
370 | unsigned long now; | 617 | unsigned long now; |
371 | unsigned long expires; | 618 | unsigned long expires; |
372 | struct mfc_cache *c, **cp; | 619 | struct mfc_cache *c, *next; |
373 | 620 | ||
374 | if (!spin_trylock(&mfc_unres_lock)) { | 621 | if (!spin_trylock(&mfc_unres_lock)) { |
375 | mod_timer(&ipmr_expire_timer, jiffies+HZ/10); | 622 | mod_timer(&mrt->ipmr_expire_timer, jiffies+HZ/10); |
376 | return; | 623 | return; |
377 | } | 624 | } |
378 | 625 | ||
379 | if (mfc_unres_queue == NULL) | 626 | if (list_empty(&mrt->mfc_unres_queue)) |
380 | goto out; | 627 | goto out; |
381 | 628 | ||
382 | now = jiffies; | 629 | now = jiffies; |
383 | expires = 10*HZ; | 630 | expires = 10*HZ; |
384 | cp = &mfc_unres_queue; | ||
385 | 631 | ||
386 | while ((c=*cp) != NULL) { | 632 | list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) { |
387 | if (time_after(c->mfc_un.unres.expires, now)) { | 633 | if (time_after(c->mfc_un.unres.expires, now)) { |
388 | unsigned long interval = c->mfc_un.unres.expires - now; | 634 | unsigned long interval = c->mfc_un.unres.expires - now; |
389 | if (interval < expires) | 635 | if (interval < expires) |
390 | expires = interval; | 636 | expires = interval; |
391 | cp = &c->next; | ||
392 | continue; | 637 | continue; |
393 | } | 638 | } |
394 | 639 | ||
395 | *cp = c->next; | 640 | list_del(&c->list); |
396 | 641 | ipmr_destroy_unres(mrt, c); | |
397 | ipmr_destroy_unres(c); | ||
398 | } | 642 | } |
399 | 643 | ||
400 | if (mfc_unres_queue != NULL) | 644 | if (!list_empty(&mrt->mfc_unres_queue)) |
401 | mod_timer(&ipmr_expire_timer, jiffies + expires); | 645 | mod_timer(&mrt->ipmr_expire_timer, jiffies + expires); |
402 | 646 | ||
403 | out: | 647 | out: |
404 | spin_unlock(&mfc_unres_lock); | 648 | spin_unlock(&mfc_unres_lock); |
@@ -406,17 +650,17 @@ out: | |||
406 | 650 | ||
407 | /* Fill oifs list. It is called under write locked mrt_lock. */ | 651 | /* Fill oifs list. It is called under write locked mrt_lock. */ |
408 | 652 | ||
409 | static void ipmr_update_thresholds(struct mfc_cache *cache, unsigned char *ttls) | 653 | static void ipmr_update_thresholds(struct mr_table *mrt, struct mfc_cache *cache, |
654 | unsigned char *ttls) | ||
410 | { | 655 | { |
411 | int vifi; | 656 | int vifi; |
412 | struct net *net = mfc_net(cache); | ||
413 | 657 | ||
414 | cache->mfc_un.res.minvif = MAXVIFS; | 658 | cache->mfc_un.res.minvif = MAXVIFS; |
415 | cache->mfc_un.res.maxvif = 0; | 659 | cache->mfc_un.res.maxvif = 0; |
416 | memset(cache->mfc_un.res.ttls, 255, MAXVIFS); | 660 | memset(cache->mfc_un.res.ttls, 255, MAXVIFS); |
417 | 661 | ||
418 | for (vifi = 0; vifi < net->ipv4.maxvif; vifi++) { | 662 | for (vifi = 0; vifi < mrt->maxvif; vifi++) { |
419 | if (VIF_EXISTS(net, vifi) && | 663 | if (VIF_EXISTS(mrt, vifi) && |
420 | ttls[vifi] && ttls[vifi] < 255) { | 664 | ttls[vifi] && ttls[vifi] < 255) { |
421 | cache->mfc_un.res.ttls[vifi] = ttls[vifi]; | 665 | cache->mfc_un.res.ttls[vifi] = ttls[vifi]; |
422 | if (cache->mfc_un.res.minvif > vifi) | 666 | if (cache->mfc_un.res.minvif > vifi) |
@@ -427,16 +671,17 @@ static void ipmr_update_thresholds(struct mfc_cache *cache, unsigned char *ttls) | |||
427 | } | 671 | } |
428 | } | 672 | } |
429 | 673 | ||
430 | static int vif_add(struct net *net, struct vifctl *vifc, int mrtsock) | 674 | static int vif_add(struct net *net, struct mr_table *mrt, |
675 | struct vifctl *vifc, int mrtsock) | ||
431 | { | 676 | { |
432 | int vifi = vifc->vifc_vifi; | 677 | int vifi = vifc->vifc_vifi; |
433 | struct vif_device *v = &net->ipv4.vif_table[vifi]; | 678 | struct vif_device *v = &mrt->vif_table[vifi]; |
434 | struct net_device *dev; | 679 | struct net_device *dev; |
435 | struct in_device *in_dev; | 680 | struct in_device *in_dev; |
436 | int err; | 681 | int err; |
437 | 682 | ||
438 | /* Is vif busy ? */ | 683 | /* Is vif busy ? */ |
439 | if (VIF_EXISTS(net, vifi)) | 684 | if (VIF_EXISTS(mrt, vifi)) |
440 | return -EADDRINUSE; | 685 | return -EADDRINUSE; |
441 | 686 | ||
442 | switch (vifc->vifc_flags) { | 687 | switch (vifc->vifc_flags) { |
@@ -446,9 +691,9 @@ static int vif_add(struct net *net, struct vifctl *vifc, int mrtsock) | |||
446 | * Special Purpose VIF in PIM | 691 | * Special Purpose VIF in PIM |
447 | * All the packets will be sent to the daemon | 692 | * All the packets will be sent to the daemon |
448 | */ | 693 | */ |
449 | if (net->ipv4.mroute_reg_vif_num >= 0) | 694 | if (mrt->mroute_reg_vif_num >= 0) |
450 | return -EADDRINUSE; | 695 | return -EADDRINUSE; |
451 | dev = ipmr_reg_vif(net); | 696 | dev = ipmr_reg_vif(net, mrt); |
452 | if (!dev) | 697 | if (!dev) |
453 | return -ENOBUFS; | 698 | return -ENOBUFS; |
454 | err = dev_set_allmulti(dev, 1); | 699 | err = dev_set_allmulti(dev, 1); |
@@ -524,49 +769,47 @@ static int vif_add(struct net *net, struct vifctl *vifc, int mrtsock) | |||
524 | v->dev = dev; | 769 | v->dev = dev; |
525 | #ifdef CONFIG_IP_PIMSM | 770 | #ifdef CONFIG_IP_PIMSM |
526 | if (v->flags&VIFF_REGISTER) | 771 | if (v->flags&VIFF_REGISTER) |
527 | net->ipv4.mroute_reg_vif_num = vifi; | 772 | mrt->mroute_reg_vif_num = vifi; |
528 | #endif | 773 | #endif |
529 | if (vifi+1 > net->ipv4.maxvif) | 774 | if (vifi+1 > mrt->maxvif) |
530 | net->ipv4.maxvif = vifi+1; | 775 | mrt->maxvif = vifi+1; |
531 | write_unlock_bh(&mrt_lock); | 776 | write_unlock_bh(&mrt_lock); |
532 | return 0; | 777 | return 0; |
533 | } | 778 | } |
534 | 779 | ||
535 | static struct mfc_cache *ipmr_cache_find(struct net *net, | 780 | static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt, |
536 | __be32 origin, | 781 | __be32 origin, |
537 | __be32 mcastgrp) | 782 | __be32 mcastgrp) |
538 | { | 783 | { |
539 | int line = MFC_HASH(mcastgrp, origin); | 784 | int line = MFC_HASH(mcastgrp, origin); |
540 | struct mfc_cache *c; | 785 | struct mfc_cache *c; |
541 | 786 | ||
542 | for (c = net->ipv4.mfc_cache_array[line]; c; c = c->next) { | 787 | list_for_each_entry(c, &mrt->mfc_cache_array[line], list) { |
543 | if (c->mfc_origin==origin && c->mfc_mcastgrp==mcastgrp) | 788 | if (c->mfc_origin == origin && c->mfc_mcastgrp == mcastgrp) |
544 | break; | 789 | return c; |
545 | } | 790 | } |
546 | return c; | 791 | return NULL; |
547 | } | 792 | } |
548 | 793 | ||
549 | /* | 794 | /* |
550 | * Allocate a multicast cache entry | 795 | * Allocate a multicast cache entry |
551 | */ | 796 | */ |
552 | static struct mfc_cache *ipmr_cache_alloc(struct net *net) | 797 | static struct mfc_cache *ipmr_cache_alloc(void) |
553 | { | 798 | { |
554 | struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL); | 799 | struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL); |
555 | if (c == NULL) | 800 | if (c == NULL) |
556 | return NULL; | 801 | return NULL; |
557 | c->mfc_un.res.minvif = MAXVIFS; | 802 | c->mfc_un.res.minvif = MAXVIFS; |
558 | mfc_net_set(c, net); | ||
559 | return c; | 803 | return c; |
560 | } | 804 | } |
561 | 805 | ||
562 | static struct mfc_cache *ipmr_cache_alloc_unres(struct net *net) | 806 | static struct mfc_cache *ipmr_cache_alloc_unres(void) |
563 | { | 807 | { |
564 | struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC); | 808 | struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC); |
565 | if (c == NULL) | 809 | if (c == NULL) |
566 | return NULL; | 810 | return NULL; |
567 | skb_queue_head_init(&c->mfc_un.unres.unresolved); | 811 | skb_queue_head_init(&c->mfc_un.unres.unresolved); |
568 | c->mfc_un.unres.expires = jiffies + 10*HZ; | 812 | c->mfc_un.unres.expires = jiffies + 10*HZ; |
569 | mfc_net_set(c, net); | ||
570 | return c; | 813 | return c; |
571 | } | 814 | } |
572 | 815 | ||
@@ -574,7 +817,8 @@ static struct mfc_cache *ipmr_cache_alloc_unres(struct net *net) | |||
574 | * A cache entry has gone into a resolved state from queued | 817 | * A cache entry has gone into a resolved state from queued |
575 | */ | 818 | */ |
576 | 819 | ||
577 | static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c) | 820 | static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt, |
821 | struct mfc_cache *uc, struct mfc_cache *c) | ||
578 | { | 822 | { |
579 | struct sk_buff *skb; | 823 | struct sk_buff *skb; |
580 | struct nlmsgerr *e; | 824 | struct nlmsgerr *e; |
@@ -587,7 +831,7 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c) | |||
587 | if (ip_hdr(skb)->version == 0) { | 831 | if (ip_hdr(skb)->version == 0) { |
588 | struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); | 832 | struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); |
589 | 833 | ||
590 | if (ipmr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) { | 834 | if (__ipmr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) { |
591 | nlh->nlmsg_len = (skb_tail_pointer(skb) - | 835 | nlh->nlmsg_len = (skb_tail_pointer(skb) - |
592 | (u8 *)nlh); | 836 | (u8 *)nlh); |
593 | } else { | 837 | } else { |
@@ -599,9 +843,9 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c) | |||
599 | memset(&e->msg, 0, sizeof(e->msg)); | 843 | memset(&e->msg, 0, sizeof(e->msg)); |
600 | } | 844 | } |
601 | 845 | ||
602 | rtnl_unicast(skb, mfc_net(c), NETLINK_CB(skb).pid); | 846 | rtnl_unicast(skb, net, NETLINK_CB(skb).pid); |
603 | } else | 847 | } else |
604 | ip_mr_forward(skb, c, 0); | 848 | ip_mr_forward(net, mrt, skb, c, 0); |
605 | } | 849 | } |
606 | } | 850 | } |
607 | 851 | ||
@@ -612,7 +856,7 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c) | |||
612 | * Called under mrt_lock. | 856 | * Called under mrt_lock. |
613 | */ | 857 | */ |
614 | 858 | ||
615 | static int ipmr_cache_report(struct net *net, | 859 | static int ipmr_cache_report(struct mr_table *mrt, |
616 | struct sk_buff *pkt, vifi_t vifi, int assert) | 860 | struct sk_buff *pkt, vifi_t vifi, int assert) |
617 | { | 861 | { |
618 | struct sk_buff *skb; | 862 | struct sk_buff *skb; |
@@ -645,7 +889,7 @@ static int ipmr_cache_report(struct net *net, | |||
645 | memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr)); | 889 | memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr)); |
646 | msg->im_msgtype = IGMPMSG_WHOLEPKT; | 890 | msg->im_msgtype = IGMPMSG_WHOLEPKT; |
647 | msg->im_mbz = 0; | 891 | msg->im_mbz = 0; |
648 | msg->im_vif = net->ipv4.mroute_reg_vif_num; | 892 | msg->im_vif = mrt->mroute_reg_vif_num; |
649 | ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2; | 893 | ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2; |
650 | ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) + | 894 | ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) + |
651 | sizeof(struct iphdr)); | 895 | sizeof(struct iphdr)); |
@@ -677,7 +921,7 @@ static int ipmr_cache_report(struct net *net, | |||
677 | skb->transport_header = skb->network_header; | 921 | skb->transport_header = skb->network_header; |
678 | } | 922 | } |
679 | 923 | ||
680 | if (net->ipv4.mroute_sk == NULL) { | 924 | if (mrt->mroute_sk == NULL) { |
681 | kfree_skb(skb); | 925 | kfree_skb(skb); |
682 | return -EINVAL; | 926 | return -EINVAL; |
683 | } | 927 | } |
@@ -685,7 +929,7 @@ static int ipmr_cache_report(struct net *net, | |||
685 | /* | 929 | /* |
686 | * Deliver to mrouted | 930 | * Deliver to mrouted |
687 | */ | 931 | */ |
688 | ret = sock_queue_rcv_skb(net->ipv4.mroute_sk, skb); | 932 | ret = sock_queue_rcv_skb(mrt->mroute_sk, skb); |
689 | if (ret < 0) { | 933 | if (ret < 0) { |
690 | if (net_ratelimit()) | 934 | if (net_ratelimit()) |
691 | printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n"); | 935 | printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n"); |
@@ -700,27 +944,29 @@ static int ipmr_cache_report(struct net *net, | |||
700 | */ | 944 | */ |
701 | 945 | ||
702 | static int | 946 | static int |
703 | ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb) | 947 | ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb) |
704 | { | 948 | { |
949 | bool found = false; | ||
705 | int err; | 950 | int err; |
706 | struct mfc_cache *c; | 951 | struct mfc_cache *c; |
707 | const struct iphdr *iph = ip_hdr(skb); | 952 | const struct iphdr *iph = ip_hdr(skb); |
708 | 953 | ||
709 | spin_lock_bh(&mfc_unres_lock); | 954 | spin_lock_bh(&mfc_unres_lock); |
710 | for (c=mfc_unres_queue; c; c=c->next) { | 955 | list_for_each_entry(c, &mrt->mfc_unres_queue, list) { |
711 | if (net_eq(mfc_net(c), net) && | 956 | if (c->mfc_mcastgrp == iph->daddr && |
712 | c->mfc_mcastgrp == iph->daddr && | 957 | c->mfc_origin == iph->saddr) { |
713 | c->mfc_origin == iph->saddr) | 958 | found = true; |
714 | break; | 959 | break; |
960 | } | ||
715 | } | 961 | } |
716 | 962 | ||
717 | if (c == NULL) { | 963 | if (!found) { |
718 | /* | 964 | /* |
719 | * Create a new entry if allowable | 965 | * Create a new entry if allowable |
720 | */ | 966 | */ |
721 | 967 | ||
722 | if (atomic_read(&net->ipv4.cache_resolve_queue_len) >= 10 || | 968 | if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 || |
723 | (c = ipmr_cache_alloc_unres(net)) == NULL) { | 969 | (c = ipmr_cache_alloc_unres()) == NULL) { |
724 | spin_unlock_bh(&mfc_unres_lock); | 970 | spin_unlock_bh(&mfc_unres_lock); |
725 | 971 | ||
726 | kfree_skb(skb); | 972 | kfree_skb(skb); |
@@ -737,7 +983,7 @@ ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb) | |||
737 | /* | 983 | /* |
738 | * Reflect first query at mrouted. | 984 | * Reflect first query at mrouted. |
739 | */ | 985 | */ |
740 | err = ipmr_cache_report(net, skb, vifi, IGMPMSG_NOCACHE); | 986 | err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE); |
741 | if (err < 0) { | 987 | if (err < 0) { |
742 | /* If the report failed throw the cache entry | 988 | /* If the report failed throw the cache entry |
743 | out - Brad Parker | 989 | out - Brad Parker |
@@ -749,11 +995,10 @@ ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb) | |||
749 | return err; | 995 | return err; |
750 | } | 996 | } |
751 | 997 | ||
752 | atomic_inc(&net->ipv4.cache_resolve_queue_len); | 998 | atomic_inc(&mrt->cache_resolve_queue_len); |
753 | c->next = mfc_unres_queue; | 999 | list_add(&c->list, &mrt->mfc_unres_queue); |
754 | mfc_unres_queue = c; | ||
755 | 1000 | ||
756 | mod_timer(&ipmr_expire_timer, c->mfc_un.unres.expires); | 1001 | mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires); |
757 | } | 1002 | } |
758 | 1003 | ||
759 | /* | 1004 | /* |
@@ -775,19 +1020,18 @@ ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb) | |||
775 | * MFC cache manipulation by user space mroute daemon | 1020 | * MFC cache manipulation by user space mroute daemon |
776 | */ | 1021 | */ |
777 | 1022 | ||
778 | static int ipmr_mfc_delete(struct net *net, struct mfcctl *mfc) | 1023 | static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc) |
779 | { | 1024 | { |
780 | int line; | 1025 | int line; |
781 | struct mfc_cache *c, **cp; | 1026 | struct mfc_cache *c, *next; |
782 | 1027 | ||
783 | line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); | 1028 | line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); |
784 | 1029 | ||
785 | for (cp = &net->ipv4.mfc_cache_array[line]; | 1030 | list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[line], list) { |
786 | (c = *cp) != NULL; cp = &c->next) { | ||
787 | if (c->mfc_origin == mfc->mfcc_origin.s_addr && | 1031 | if (c->mfc_origin == mfc->mfcc_origin.s_addr && |
788 | c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) { | 1032 | c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) { |
789 | write_lock_bh(&mrt_lock); | 1033 | write_lock_bh(&mrt_lock); |
790 | *cp = c->next; | 1034 | list_del(&c->list); |
791 | write_unlock_bh(&mrt_lock); | 1035 | write_unlock_bh(&mrt_lock); |
792 | 1036 | ||
793 | ipmr_cache_free(c); | 1037 | ipmr_cache_free(c); |
@@ -797,24 +1041,30 @@ static int ipmr_mfc_delete(struct net *net, struct mfcctl *mfc) | |||
797 | return -ENOENT; | 1041 | return -ENOENT; |
798 | } | 1042 | } |
799 | 1043 | ||
800 | static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock) | 1044 | static int ipmr_mfc_add(struct net *net, struct mr_table *mrt, |
1045 | struct mfcctl *mfc, int mrtsock) | ||
801 | { | 1046 | { |
1047 | bool found = false; | ||
802 | int line; | 1048 | int line; |
803 | struct mfc_cache *uc, *c, **cp; | 1049 | struct mfc_cache *uc, *c; |
1050 | |||
1051 | if (mfc->mfcc_parent >= MAXVIFS) | ||
1052 | return -ENFILE; | ||
804 | 1053 | ||
805 | line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); | 1054 | line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); |
806 | 1055 | ||
807 | for (cp = &net->ipv4.mfc_cache_array[line]; | 1056 | list_for_each_entry(c, &mrt->mfc_cache_array[line], list) { |
808 | (c = *cp) != NULL; cp = &c->next) { | ||
809 | if (c->mfc_origin == mfc->mfcc_origin.s_addr && | 1057 | if (c->mfc_origin == mfc->mfcc_origin.s_addr && |
810 | c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) | 1058 | c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) { |
1059 | found = true; | ||
811 | break; | 1060 | break; |
1061 | } | ||
812 | } | 1062 | } |
813 | 1063 | ||
814 | if (c != NULL) { | 1064 | if (found) { |
815 | write_lock_bh(&mrt_lock); | 1065 | write_lock_bh(&mrt_lock); |
816 | c->mfc_parent = mfc->mfcc_parent; | 1066 | c->mfc_parent = mfc->mfcc_parent; |
817 | ipmr_update_thresholds(c, mfc->mfcc_ttls); | 1067 | ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls); |
818 | if (!mrtsock) | 1068 | if (!mrtsock) |
819 | c->mfc_flags |= MFC_STATIC; | 1069 | c->mfc_flags |= MFC_STATIC; |
820 | write_unlock_bh(&mrt_lock); | 1070 | write_unlock_bh(&mrt_lock); |
@@ -824,43 +1074,42 @@ static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock) | |||
824 | if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr)) | 1074 | if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr)) |
825 | return -EINVAL; | 1075 | return -EINVAL; |
826 | 1076 | ||
827 | c = ipmr_cache_alloc(net); | 1077 | c = ipmr_cache_alloc(); |
828 | if (c == NULL) | 1078 | if (c == NULL) |
829 | return -ENOMEM; | 1079 | return -ENOMEM; |
830 | 1080 | ||
831 | c->mfc_origin = mfc->mfcc_origin.s_addr; | 1081 | c->mfc_origin = mfc->mfcc_origin.s_addr; |
832 | c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr; | 1082 | c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr; |
833 | c->mfc_parent = mfc->mfcc_parent; | 1083 | c->mfc_parent = mfc->mfcc_parent; |
834 | ipmr_update_thresholds(c, mfc->mfcc_ttls); | 1084 | ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls); |
835 | if (!mrtsock) | 1085 | if (!mrtsock) |
836 | c->mfc_flags |= MFC_STATIC; | 1086 | c->mfc_flags |= MFC_STATIC; |
837 | 1087 | ||
838 | write_lock_bh(&mrt_lock); | 1088 | write_lock_bh(&mrt_lock); |
839 | c->next = net->ipv4.mfc_cache_array[line]; | 1089 | list_add(&c->list, &mrt->mfc_cache_array[line]); |
840 | net->ipv4.mfc_cache_array[line] = c; | ||
841 | write_unlock_bh(&mrt_lock); | 1090 | write_unlock_bh(&mrt_lock); |
842 | 1091 | ||
843 | /* | 1092 | /* |
844 | * Check to see if we resolved a queued list. If so we | 1093 | * Check to see if we resolved a queued list. If so we |
845 | * need to send on the frames and tidy up. | 1094 | * need to send on the frames and tidy up. |
846 | */ | 1095 | */ |
1096 | found = false; | ||
847 | spin_lock_bh(&mfc_unres_lock); | 1097 | spin_lock_bh(&mfc_unres_lock); |
848 | for (cp = &mfc_unres_queue; (uc=*cp) != NULL; | 1098 | list_for_each_entry(uc, &mrt->mfc_unres_queue, list) { |
849 | cp = &uc->next) { | 1099 | if (uc->mfc_origin == c->mfc_origin && |
850 | if (net_eq(mfc_net(uc), net) && | ||
851 | uc->mfc_origin == c->mfc_origin && | ||
852 | uc->mfc_mcastgrp == c->mfc_mcastgrp) { | 1100 | uc->mfc_mcastgrp == c->mfc_mcastgrp) { |
853 | *cp = uc->next; | 1101 | list_del(&uc->list); |
854 | atomic_dec(&net->ipv4.cache_resolve_queue_len); | 1102 | atomic_dec(&mrt->cache_resolve_queue_len); |
1103 | found = true; | ||
855 | break; | 1104 | break; |
856 | } | 1105 | } |
857 | } | 1106 | } |
858 | if (mfc_unres_queue == NULL) | 1107 | if (list_empty(&mrt->mfc_unres_queue)) |
859 | del_timer(&ipmr_expire_timer); | 1108 | del_timer(&mrt->ipmr_expire_timer); |
860 | spin_unlock_bh(&mfc_unres_lock); | 1109 | spin_unlock_bh(&mfc_unres_lock); |
861 | 1110 | ||
862 | if (uc) { | 1111 | if (found) { |
863 | ipmr_cache_resolve(uc, c); | 1112 | ipmr_cache_resolve(net, mrt, uc, c); |
864 | ipmr_cache_free(uc); | 1113 | ipmr_cache_free(uc); |
865 | } | 1114 | } |
866 | return 0; | 1115 | return 0; |
@@ -870,53 +1119,41 @@ static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock) | |||
870 | * Close the multicast socket, and clear the vif tables etc | 1119 | * Close the multicast socket, and clear the vif tables etc |
871 | */ | 1120 | */ |
872 | 1121 | ||
873 | static void mroute_clean_tables(struct net *net) | 1122 | static void mroute_clean_tables(struct mr_table *mrt) |
874 | { | 1123 | { |
875 | int i; | 1124 | int i; |
876 | LIST_HEAD(list); | 1125 | LIST_HEAD(list); |
1126 | struct mfc_cache *c, *next; | ||
877 | 1127 | ||
878 | /* | 1128 | /* |
879 | * Shut down all active vif entries | 1129 | * Shut down all active vif entries |
880 | */ | 1130 | */ |
881 | for (i = 0; i < net->ipv4.maxvif; i++) { | 1131 | for (i = 0; i < mrt->maxvif; i++) { |
882 | if (!(net->ipv4.vif_table[i].flags&VIFF_STATIC)) | 1132 | if (!(mrt->vif_table[i].flags&VIFF_STATIC)) |
883 | vif_delete(net, i, 0, &list); | 1133 | vif_delete(mrt, i, 0, &list); |
884 | } | 1134 | } |
885 | unregister_netdevice_many(&list); | 1135 | unregister_netdevice_many(&list); |
886 | 1136 | ||
887 | /* | 1137 | /* |
888 | * Wipe the cache | 1138 | * Wipe the cache |
889 | */ | 1139 | */ |
890 | for (i=0; i<MFC_LINES; i++) { | 1140 | for (i = 0; i < MFC_LINES; i++) { |
891 | struct mfc_cache *c, **cp; | 1141 | list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) { |
892 | 1142 | if (c->mfc_flags&MFC_STATIC) | |
893 | cp = &net->ipv4.mfc_cache_array[i]; | ||
894 | while ((c = *cp) != NULL) { | ||
895 | if (c->mfc_flags&MFC_STATIC) { | ||
896 | cp = &c->next; | ||
897 | continue; | 1143 | continue; |
898 | } | ||
899 | write_lock_bh(&mrt_lock); | 1144 | write_lock_bh(&mrt_lock); |
900 | *cp = c->next; | 1145 | list_del(&c->list); |
901 | write_unlock_bh(&mrt_lock); | 1146 | write_unlock_bh(&mrt_lock); |
902 | 1147 | ||
903 | ipmr_cache_free(c); | 1148 | ipmr_cache_free(c); |
904 | } | 1149 | } |
905 | } | 1150 | } |
906 | 1151 | ||
907 | if (atomic_read(&net->ipv4.cache_resolve_queue_len) != 0) { | 1152 | if (atomic_read(&mrt->cache_resolve_queue_len) != 0) { |
908 | struct mfc_cache *c, **cp; | ||
909 | |||
910 | spin_lock_bh(&mfc_unres_lock); | 1153 | spin_lock_bh(&mfc_unres_lock); |
911 | cp = &mfc_unres_queue; | 1154 | list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) { |
912 | while ((c = *cp) != NULL) { | 1155 | list_del(&c->list); |
913 | if (!net_eq(mfc_net(c), net)) { | 1156 | ipmr_destroy_unres(mrt, c); |
914 | cp = &c->next; | ||
915 | continue; | ||
916 | } | ||
917 | *cp = c->next; | ||
918 | |||
919 | ipmr_destroy_unres(c); | ||
920 | } | 1157 | } |
921 | spin_unlock_bh(&mfc_unres_lock); | 1158 | spin_unlock_bh(&mfc_unres_lock); |
922 | } | 1159 | } |
@@ -925,16 +1162,19 @@ static void mroute_clean_tables(struct net *net) | |||
925 | static void mrtsock_destruct(struct sock *sk) | 1162 | static void mrtsock_destruct(struct sock *sk) |
926 | { | 1163 | { |
927 | struct net *net = sock_net(sk); | 1164 | struct net *net = sock_net(sk); |
1165 | struct mr_table *mrt; | ||
928 | 1166 | ||
929 | rtnl_lock(); | 1167 | rtnl_lock(); |
930 | if (sk == net->ipv4.mroute_sk) { | 1168 | ipmr_for_each_table(mrt, net) { |
931 | IPV4_DEVCONF_ALL(net, MC_FORWARDING)--; | 1169 | if (sk == mrt->mroute_sk) { |
1170 | IPV4_DEVCONF_ALL(net, MC_FORWARDING)--; | ||
932 | 1171 | ||
933 | write_lock_bh(&mrt_lock); | 1172 | write_lock_bh(&mrt_lock); |
934 | net->ipv4.mroute_sk = NULL; | 1173 | mrt->mroute_sk = NULL; |
935 | write_unlock_bh(&mrt_lock); | 1174 | write_unlock_bh(&mrt_lock); |
936 | 1175 | ||
937 | mroute_clean_tables(net); | 1176 | mroute_clean_tables(mrt); |
1177 | } | ||
938 | } | 1178 | } |
939 | rtnl_unlock(); | 1179 | rtnl_unlock(); |
940 | } | 1180 | } |
@@ -952,9 +1192,14 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi | |||
952 | struct vifctl vif; | 1192 | struct vifctl vif; |
953 | struct mfcctl mfc; | 1193 | struct mfcctl mfc; |
954 | struct net *net = sock_net(sk); | 1194 | struct net *net = sock_net(sk); |
1195 | struct mr_table *mrt; | ||
1196 | |||
1197 | mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); | ||
1198 | if (mrt == NULL) | ||
1199 | return -ENOENT; | ||
955 | 1200 | ||
956 | if (optname != MRT_INIT) { | 1201 | if (optname != MRT_INIT) { |
957 | if (sk != net->ipv4.mroute_sk && !capable(CAP_NET_ADMIN)) | 1202 | if (sk != mrt->mroute_sk && !capable(CAP_NET_ADMIN)) |
958 | return -EACCES; | 1203 | return -EACCES; |
959 | } | 1204 | } |
960 | 1205 | ||
@@ -967,7 +1212,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi | |||
967 | return -ENOPROTOOPT; | 1212 | return -ENOPROTOOPT; |
968 | 1213 | ||
969 | rtnl_lock(); | 1214 | rtnl_lock(); |
970 | if (net->ipv4.mroute_sk) { | 1215 | if (mrt->mroute_sk) { |
971 | rtnl_unlock(); | 1216 | rtnl_unlock(); |
972 | return -EADDRINUSE; | 1217 | return -EADDRINUSE; |
973 | } | 1218 | } |
@@ -975,7 +1220,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi | |||
975 | ret = ip_ra_control(sk, 1, mrtsock_destruct); | 1220 | ret = ip_ra_control(sk, 1, mrtsock_destruct); |
976 | if (ret == 0) { | 1221 | if (ret == 0) { |
977 | write_lock_bh(&mrt_lock); | 1222 | write_lock_bh(&mrt_lock); |
978 | net->ipv4.mroute_sk = sk; | 1223 | mrt->mroute_sk = sk; |
979 | write_unlock_bh(&mrt_lock); | 1224 | write_unlock_bh(&mrt_lock); |
980 | 1225 | ||
981 | IPV4_DEVCONF_ALL(net, MC_FORWARDING)++; | 1226 | IPV4_DEVCONF_ALL(net, MC_FORWARDING)++; |
@@ -983,7 +1228,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi | |||
983 | rtnl_unlock(); | 1228 | rtnl_unlock(); |
984 | return ret; | 1229 | return ret; |
985 | case MRT_DONE: | 1230 | case MRT_DONE: |
986 | if (sk != net->ipv4.mroute_sk) | 1231 | if (sk != mrt->mroute_sk) |
987 | return -EACCES; | 1232 | return -EACCES; |
988 | return ip_ra_control(sk, 0, NULL); | 1233 | return ip_ra_control(sk, 0, NULL); |
989 | case MRT_ADD_VIF: | 1234 | case MRT_ADD_VIF: |
@@ -996,9 +1241,9 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi | |||
996 | return -ENFILE; | 1241 | return -ENFILE; |
997 | rtnl_lock(); | 1242 | rtnl_lock(); |
998 | if (optname == MRT_ADD_VIF) { | 1243 | if (optname == MRT_ADD_VIF) { |
999 | ret = vif_add(net, &vif, sk == net->ipv4.mroute_sk); | 1244 | ret = vif_add(net, mrt, &vif, sk == mrt->mroute_sk); |
1000 | } else { | 1245 | } else { |
1001 | ret = vif_delete(net, vif.vifc_vifi, 0, NULL); | 1246 | ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL); |
1002 | } | 1247 | } |
1003 | rtnl_unlock(); | 1248 | rtnl_unlock(); |
1004 | return ret; | 1249 | return ret; |
@@ -1015,9 +1260,9 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi | |||
1015 | return -EFAULT; | 1260 | return -EFAULT; |
1016 | rtnl_lock(); | 1261 | rtnl_lock(); |
1017 | if (optname == MRT_DEL_MFC) | 1262 | if (optname == MRT_DEL_MFC) |
1018 | ret = ipmr_mfc_delete(net, &mfc); | 1263 | ret = ipmr_mfc_delete(mrt, &mfc); |
1019 | else | 1264 | else |
1020 | ret = ipmr_mfc_add(net, &mfc, sk == net->ipv4.mroute_sk); | 1265 | ret = ipmr_mfc_add(net, mrt, &mfc, sk == mrt->mroute_sk); |
1021 | rtnl_unlock(); | 1266 | rtnl_unlock(); |
1022 | return ret; | 1267 | return ret; |
1023 | /* | 1268 | /* |
@@ -1028,7 +1273,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi | |||
1028 | int v; | 1273 | int v; |
1029 | if (get_user(v,(int __user *)optval)) | 1274 | if (get_user(v,(int __user *)optval)) |
1030 | return -EFAULT; | 1275 | return -EFAULT; |
1031 | net->ipv4.mroute_do_assert = (v) ? 1 : 0; | 1276 | mrt->mroute_do_assert = (v) ? 1 : 0; |
1032 | return 0; | 1277 | return 0; |
1033 | } | 1278 | } |
1034 | #ifdef CONFIG_IP_PIMSM | 1279 | #ifdef CONFIG_IP_PIMSM |
@@ -1042,14 +1287,35 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi | |||
1042 | 1287 | ||
1043 | rtnl_lock(); | 1288 | rtnl_lock(); |
1044 | ret = 0; | 1289 | ret = 0; |
1045 | if (v != net->ipv4.mroute_do_pim) { | 1290 | if (v != mrt->mroute_do_pim) { |
1046 | net->ipv4.mroute_do_pim = v; | 1291 | mrt->mroute_do_pim = v; |
1047 | net->ipv4.mroute_do_assert = v; | 1292 | mrt->mroute_do_assert = v; |
1048 | } | 1293 | } |
1049 | rtnl_unlock(); | 1294 | rtnl_unlock(); |
1050 | return ret; | 1295 | return ret; |
1051 | } | 1296 | } |
1052 | #endif | 1297 | #endif |
1298 | #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES | ||
1299 | case MRT_TABLE: | ||
1300 | { | ||
1301 | u32 v; | ||
1302 | |||
1303 | if (optlen != sizeof(u32)) | ||
1304 | return -EINVAL; | ||
1305 | if (get_user(v, (u32 __user *)optval)) | ||
1306 | return -EFAULT; | ||
1307 | if (sk == mrt->mroute_sk) | ||
1308 | return -EBUSY; | ||
1309 | |||
1310 | rtnl_lock(); | ||
1311 | ret = 0; | ||
1312 | if (!ipmr_new_table(net, v)) | ||
1313 | ret = -ENOMEM; | ||
1314 | raw_sk(sk)->ipmr_table = v; | ||
1315 | rtnl_unlock(); | ||
1316 | return ret; | ||
1317 | } | ||
1318 | #endif | ||
1053 | /* | 1319 | /* |
1054 | * Spurious command, or MRT_VERSION which you cannot | 1320 | * Spurious command, or MRT_VERSION which you cannot |
1055 | * set. | 1321 | * set. |
@@ -1068,6 +1334,11 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int | |||
1068 | int olr; | 1334 | int olr; |
1069 | int val; | 1335 | int val; |
1070 | struct net *net = sock_net(sk); | 1336 | struct net *net = sock_net(sk); |
1337 | struct mr_table *mrt; | ||
1338 | |||
1339 | mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); | ||
1340 | if (mrt == NULL) | ||
1341 | return -ENOENT; | ||
1071 | 1342 | ||
1072 | if (optname != MRT_VERSION && | 1343 | if (optname != MRT_VERSION && |
1073 | #ifdef CONFIG_IP_PIMSM | 1344 | #ifdef CONFIG_IP_PIMSM |
@@ -1089,10 +1360,10 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int | |||
1089 | val = 0x0305; | 1360 | val = 0x0305; |
1090 | #ifdef CONFIG_IP_PIMSM | 1361 | #ifdef CONFIG_IP_PIMSM |
1091 | else if (optname == MRT_PIM) | 1362 | else if (optname == MRT_PIM) |
1092 | val = net->ipv4.mroute_do_pim; | 1363 | val = mrt->mroute_do_pim; |
1093 | #endif | 1364 | #endif |
1094 | else | 1365 | else |
1095 | val = net->ipv4.mroute_do_assert; | 1366 | val = mrt->mroute_do_assert; |
1096 | if (copy_to_user(optval, &val, olr)) | 1367 | if (copy_to_user(optval, &val, olr)) |
1097 | return -EFAULT; | 1368 | return -EFAULT; |
1098 | return 0; | 1369 | return 0; |
@@ -1109,16 +1380,21 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg) | |||
1109 | struct vif_device *vif; | 1380 | struct vif_device *vif; |
1110 | struct mfc_cache *c; | 1381 | struct mfc_cache *c; |
1111 | struct net *net = sock_net(sk); | 1382 | struct net *net = sock_net(sk); |
1383 | struct mr_table *mrt; | ||
1384 | |||
1385 | mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); | ||
1386 | if (mrt == NULL) | ||
1387 | return -ENOENT; | ||
1112 | 1388 | ||
1113 | switch (cmd) { | 1389 | switch (cmd) { |
1114 | case SIOCGETVIFCNT: | 1390 | case SIOCGETVIFCNT: |
1115 | if (copy_from_user(&vr, arg, sizeof(vr))) | 1391 | if (copy_from_user(&vr, arg, sizeof(vr))) |
1116 | return -EFAULT; | 1392 | return -EFAULT; |
1117 | if (vr.vifi >= net->ipv4.maxvif) | 1393 | if (vr.vifi >= mrt->maxvif) |
1118 | return -EINVAL; | 1394 | return -EINVAL; |
1119 | read_lock(&mrt_lock); | 1395 | read_lock(&mrt_lock); |
1120 | vif = &net->ipv4.vif_table[vr.vifi]; | 1396 | vif = &mrt->vif_table[vr.vifi]; |
1121 | if (VIF_EXISTS(net, vr.vifi)) { | 1397 | if (VIF_EXISTS(mrt, vr.vifi)) { |
1122 | vr.icount = vif->pkt_in; | 1398 | vr.icount = vif->pkt_in; |
1123 | vr.ocount = vif->pkt_out; | 1399 | vr.ocount = vif->pkt_out; |
1124 | vr.ibytes = vif->bytes_in; | 1400 | vr.ibytes = vif->bytes_in; |
@@ -1136,7 +1412,7 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg) | |||
1136 | return -EFAULT; | 1412 | return -EFAULT; |
1137 | 1413 | ||
1138 | read_lock(&mrt_lock); | 1414 | read_lock(&mrt_lock); |
1139 | c = ipmr_cache_find(net, sr.src.s_addr, sr.grp.s_addr); | 1415 | c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr); |
1140 | if (c) { | 1416 | if (c) { |
1141 | sr.pktcnt = c->mfc_un.res.pkt; | 1417 | sr.pktcnt = c->mfc_un.res.pkt; |
1142 | sr.bytecnt = c->mfc_un.res.bytes; | 1418 | sr.bytecnt = c->mfc_un.res.bytes; |
@@ -1159,16 +1435,20 @@ static int ipmr_device_event(struct notifier_block *this, unsigned long event, v | |||
1159 | { | 1435 | { |
1160 | struct net_device *dev = ptr; | 1436 | struct net_device *dev = ptr; |
1161 | struct net *net = dev_net(dev); | 1437 | struct net *net = dev_net(dev); |
1438 | struct mr_table *mrt; | ||
1162 | struct vif_device *v; | 1439 | struct vif_device *v; |
1163 | int ct; | 1440 | int ct; |
1164 | LIST_HEAD(list); | 1441 | LIST_HEAD(list); |
1165 | 1442 | ||
1166 | if (event != NETDEV_UNREGISTER) | 1443 | if (event != NETDEV_UNREGISTER) |
1167 | return NOTIFY_DONE; | 1444 | return NOTIFY_DONE; |
1168 | v = &net->ipv4.vif_table[0]; | 1445 | |
1169 | for (ct = 0; ct < net->ipv4.maxvif; ct++, v++) { | 1446 | ipmr_for_each_table(mrt, net) { |
1170 | if (v->dev == dev) | 1447 | v = &mrt->vif_table[0]; |
1171 | vif_delete(net, ct, 1, &list); | 1448 | for (ct = 0; ct < mrt->maxvif; ct++, v++) { |
1449 | if (v->dev == dev) | ||
1450 | vif_delete(mrt, ct, 1, &list); | ||
1451 | } | ||
1172 | } | 1452 | } |
1173 | unregister_netdevice_many(&list); | 1453 | unregister_netdevice_many(&list); |
1174 | return NOTIFY_DONE; | 1454 | return NOTIFY_DONE; |
@@ -1227,11 +1507,11 @@ static inline int ipmr_forward_finish(struct sk_buff *skb) | |||
1227 | * Processing handlers for ipmr_forward | 1507 | * Processing handlers for ipmr_forward |
1228 | */ | 1508 | */ |
1229 | 1509 | ||
1230 | static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi) | 1510 | static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt, |
1511 | struct sk_buff *skb, struct mfc_cache *c, int vifi) | ||
1231 | { | 1512 | { |
1232 | struct net *net = mfc_net(c); | ||
1233 | const struct iphdr *iph = ip_hdr(skb); | 1513 | const struct iphdr *iph = ip_hdr(skb); |
1234 | struct vif_device *vif = &net->ipv4.vif_table[vifi]; | 1514 | struct vif_device *vif = &mrt->vif_table[vifi]; |
1235 | struct net_device *dev; | 1515 | struct net_device *dev; |
1236 | struct rtable *rt; | 1516 | struct rtable *rt; |
1237 | int encap = 0; | 1517 | int encap = 0; |
@@ -1245,7 +1525,7 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi) | |||
1245 | vif->bytes_out += skb->len; | 1525 | vif->bytes_out += skb->len; |
1246 | vif->dev->stats.tx_bytes += skb->len; | 1526 | vif->dev->stats.tx_bytes += skb->len; |
1247 | vif->dev->stats.tx_packets++; | 1527 | vif->dev->stats.tx_packets++; |
1248 | ipmr_cache_report(net, skb, vifi, IGMPMSG_WHOLEPKT); | 1528 | ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT); |
1249 | goto out_free; | 1529 | goto out_free; |
1250 | } | 1530 | } |
1251 | #endif | 1531 | #endif |
@@ -1328,12 +1608,12 @@ out_free: | |||
1328 | return; | 1608 | return; |
1329 | } | 1609 | } |
1330 | 1610 | ||
1331 | static int ipmr_find_vif(struct net_device *dev) | 1611 | static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev) |
1332 | { | 1612 | { |
1333 | struct net *net = dev_net(dev); | ||
1334 | int ct; | 1613 | int ct; |
1335 | for (ct = net->ipv4.maxvif-1; ct >= 0; ct--) { | 1614 | |
1336 | if (net->ipv4.vif_table[ct].dev == dev) | 1615 | for (ct = mrt->maxvif-1; ct >= 0; ct--) { |
1616 | if (mrt->vif_table[ct].dev == dev) | ||
1337 | break; | 1617 | break; |
1338 | } | 1618 | } |
1339 | return ct; | 1619 | return ct; |
@@ -1341,11 +1621,12 @@ static int ipmr_find_vif(struct net_device *dev) | |||
1341 | 1621 | ||
1342 | /* "local" means that we should preserve one skb (for local delivery) */ | 1622 | /* "local" means that we should preserve one skb (for local delivery) */ |
1343 | 1623 | ||
1344 | static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local) | 1624 | static int ip_mr_forward(struct net *net, struct mr_table *mrt, |
1625 | struct sk_buff *skb, struct mfc_cache *cache, | ||
1626 | int local) | ||
1345 | { | 1627 | { |
1346 | int psend = -1; | 1628 | int psend = -1; |
1347 | int vif, ct; | 1629 | int vif, ct; |
1348 | struct net *net = mfc_net(cache); | ||
1349 | 1630 | ||
1350 | vif = cache->mfc_parent; | 1631 | vif = cache->mfc_parent; |
1351 | cache->mfc_un.res.pkt++; | 1632 | cache->mfc_un.res.pkt++; |
@@ -1354,7 +1635,7 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local | |||
1354 | /* | 1635 | /* |
1355 | * Wrong interface: drop packet and (maybe) send PIM assert. | 1636 | * Wrong interface: drop packet and (maybe) send PIM assert. |
1356 | */ | 1637 | */ |
1357 | if (net->ipv4.vif_table[vif].dev != skb->dev) { | 1638 | if (mrt->vif_table[vif].dev != skb->dev) { |
1358 | int true_vifi; | 1639 | int true_vifi; |
1359 | 1640 | ||
1360 | if (skb_rtable(skb)->fl.iif == 0) { | 1641 | if (skb_rtable(skb)->fl.iif == 0) { |
@@ -1373,26 +1654,26 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local | |||
1373 | } | 1654 | } |
1374 | 1655 | ||
1375 | cache->mfc_un.res.wrong_if++; | 1656 | cache->mfc_un.res.wrong_if++; |
1376 | true_vifi = ipmr_find_vif(skb->dev); | 1657 | true_vifi = ipmr_find_vif(mrt, skb->dev); |
1377 | 1658 | ||
1378 | if (true_vifi >= 0 && net->ipv4.mroute_do_assert && | 1659 | if (true_vifi >= 0 && mrt->mroute_do_assert && |
1379 | /* pimsm uses asserts, when switching from RPT to SPT, | 1660 | /* pimsm uses asserts, when switching from RPT to SPT, |
1380 | so that we cannot check that packet arrived on an oif. | 1661 | so that we cannot check that packet arrived on an oif. |
1381 | It is bad, but otherwise we would need to move pretty | 1662 | It is bad, but otherwise we would need to move pretty |
1382 | large chunk of pimd to kernel. Ough... --ANK | 1663 | large chunk of pimd to kernel. Ough... --ANK |
1383 | */ | 1664 | */ |
1384 | (net->ipv4.mroute_do_pim || | 1665 | (mrt->mroute_do_pim || |
1385 | cache->mfc_un.res.ttls[true_vifi] < 255) && | 1666 | cache->mfc_un.res.ttls[true_vifi] < 255) && |
1386 | time_after(jiffies, | 1667 | time_after(jiffies, |
1387 | cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) { | 1668 | cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) { |
1388 | cache->mfc_un.res.last_assert = jiffies; | 1669 | cache->mfc_un.res.last_assert = jiffies; |
1389 | ipmr_cache_report(net, skb, true_vifi, IGMPMSG_WRONGVIF); | 1670 | ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF); |
1390 | } | 1671 | } |
1391 | goto dont_forward; | 1672 | goto dont_forward; |
1392 | } | 1673 | } |
1393 | 1674 | ||
1394 | net->ipv4.vif_table[vif].pkt_in++; | 1675 | mrt->vif_table[vif].pkt_in++; |
1395 | net->ipv4.vif_table[vif].bytes_in += skb->len; | 1676 | mrt->vif_table[vif].bytes_in += skb->len; |
1396 | 1677 | ||
1397 | /* | 1678 | /* |
1398 | * Forward the frame | 1679 | * Forward the frame |
@@ -1402,7 +1683,8 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local | |||
1402 | if (psend != -1) { | 1683 | if (psend != -1) { |
1403 | struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); | 1684 | struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); |
1404 | if (skb2) | 1685 | if (skb2) |
1405 | ipmr_queue_xmit(skb2, cache, psend); | 1686 | ipmr_queue_xmit(net, mrt, skb2, cache, |
1687 | psend); | ||
1406 | } | 1688 | } |
1407 | psend = ct; | 1689 | psend = ct; |
1408 | } | 1690 | } |
@@ -1411,9 +1693,9 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local | |||
1411 | if (local) { | 1693 | if (local) { |
1412 | struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); | 1694 | struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); |
1413 | if (skb2) | 1695 | if (skb2) |
1414 | ipmr_queue_xmit(skb2, cache, psend); | 1696 | ipmr_queue_xmit(net, mrt, skb2, cache, psend); |
1415 | } else { | 1697 | } else { |
1416 | ipmr_queue_xmit(skb, cache, psend); | 1698 | ipmr_queue_xmit(net, mrt, skb, cache, psend); |
1417 | return 0; | 1699 | return 0; |
1418 | } | 1700 | } |
1419 | } | 1701 | } |
@@ -1434,6 +1716,8 @@ int ip_mr_input(struct sk_buff *skb) | |||
1434 | struct mfc_cache *cache; | 1716 | struct mfc_cache *cache; |
1435 | struct net *net = dev_net(skb->dev); | 1717 | struct net *net = dev_net(skb->dev); |
1436 | int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL; | 1718 | int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL; |
1719 | struct mr_table *mrt; | ||
1720 | int err; | ||
1437 | 1721 | ||
1438 | /* Packet is looped back after forward, it should not be | 1722 | /* Packet is looped back after forward, it should not be |
1439 | forwarded second time, but still can be delivered locally. | 1723 | forwarded second time, but still can be delivered locally. |
@@ -1441,6 +1725,10 @@ int ip_mr_input(struct sk_buff *skb) | |||
1441 | if (IPCB(skb)->flags&IPSKB_FORWARDED) | 1725 | if (IPCB(skb)->flags&IPSKB_FORWARDED) |
1442 | goto dont_forward; | 1726 | goto dont_forward; |
1443 | 1727 | ||
1728 | err = ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt); | ||
1729 | if (err < 0) | ||
1730 | return err; | ||
1731 | |||
1444 | if (!local) { | 1732 | if (!local) { |
1445 | if (IPCB(skb)->opt.router_alert) { | 1733 | if (IPCB(skb)->opt.router_alert) { |
1446 | if (ip_call_ra_chain(skb)) | 1734 | if (ip_call_ra_chain(skb)) |
@@ -1453,9 +1741,9 @@ int ip_mr_input(struct sk_buff *skb) | |||
1453 | that we can forward NO IGMP messages. | 1741 | that we can forward NO IGMP messages. |
1454 | */ | 1742 | */ |
1455 | read_lock(&mrt_lock); | 1743 | read_lock(&mrt_lock); |
1456 | if (net->ipv4.mroute_sk) { | 1744 | if (mrt->mroute_sk) { |
1457 | nf_reset(skb); | 1745 | nf_reset(skb); |
1458 | raw_rcv(net->ipv4.mroute_sk, skb); | 1746 | raw_rcv(mrt->mroute_sk, skb); |
1459 | read_unlock(&mrt_lock); | 1747 | read_unlock(&mrt_lock); |
1460 | return 0; | 1748 | return 0; |
1461 | } | 1749 | } |
@@ -1464,7 +1752,7 @@ int ip_mr_input(struct sk_buff *skb) | |||
1464 | } | 1752 | } |
1465 | 1753 | ||
1466 | read_lock(&mrt_lock); | 1754 | read_lock(&mrt_lock); |
1467 | cache = ipmr_cache_find(net, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr); | 1755 | cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr); |
1468 | 1756 | ||
1469 | /* | 1757 | /* |
1470 | * No usable cache entry | 1758 | * No usable cache entry |
@@ -1482,19 +1770,19 @@ int ip_mr_input(struct sk_buff *skb) | |||
1482 | skb = skb2; | 1770 | skb = skb2; |
1483 | } | 1771 | } |
1484 | 1772 | ||
1485 | vif = ipmr_find_vif(skb->dev); | 1773 | vif = ipmr_find_vif(mrt, skb->dev); |
1486 | if (vif >= 0) { | 1774 | if (vif >= 0) { |
1487 | int err = ipmr_cache_unresolved(net, vif, skb); | 1775 | int err2 = ipmr_cache_unresolved(mrt, vif, skb); |
1488 | read_unlock(&mrt_lock); | 1776 | read_unlock(&mrt_lock); |
1489 | 1777 | ||
1490 | return err; | 1778 | return err2; |
1491 | } | 1779 | } |
1492 | read_unlock(&mrt_lock); | 1780 | read_unlock(&mrt_lock); |
1493 | kfree_skb(skb); | 1781 | kfree_skb(skb); |
1494 | return -ENODEV; | 1782 | return -ENODEV; |
1495 | } | 1783 | } |
1496 | 1784 | ||
1497 | ip_mr_forward(skb, cache, local); | 1785 | ip_mr_forward(net, mrt, skb, cache, local); |
1498 | 1786 | ||
1499 | read_unlock(&mrt_lock); | 1787 | read_unlock(&mrt_lock); |
1500 | 1788 | ||
@@ -1511,11 +1799,11 @@ dont_forward: | |||
1511 | } | 1799 | } |
1512 | 1800 | ||
1513 | #ifdef CONFIG_IP_PIMSM | 1801 | #ifdef CONFIG_IP_PIMSM |
1514 | static int __pim_rcv(struct sk_buff *skb, unsigned int pimlen) | 1802 | static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb, |
1803 | unsigned int pimlen) | ||
1515 | { | 1804 | { |
1516 | struct net_device *reg_dev = NULL; | 1805 | struct net_device *reg_dev = NULL; |
1517 | struct iphdr *encap; | 1806 | struct iphdr *encap; |
1518 | struct net *net = dev_net(skb->dev); | ||
1519 | 1807 | ||
1520 | encap = (struct iphdr *)(skb_transport_header(skb) + pimlen); | 1808 | encap = (struct iphdr *)(skb_transport_header(skb) + pimlen); |
1521 | /* | 1809 | /* |
@@ -1530,8 +1818,8 @@ static int __pim_rcv(struct sk_buff *skb, unsigned int pimlen) | |||
1530 | return 1; | 1818 | return 1; |
1531 | 1819 | ||
1532 | read_lock(&mrt_lock); | 1820 | read_lock(&mrt_lock); |
1533 | if (net->ipv4.mroute_reg_vif_num >= 0) | 1821 | if (mrt->mroute_reg_vif_num >= 0) |
1534 | reg_dev = net->ipv4.vif_table[net->ipv4.mroute_reg_vif_num].dev; | 1822 | reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev; |
1535 | if (reg_dev) | 1823 | if (reg_dev) |
1536 | dev_hold(reg_dev); | 1824 | dev_hold(reg_dev); |
1537 | read_unlock(&mrt_lock); | 1825 | read_unlock(&mrt_lock); |
@@ -1566,17 +1854,21 @@ int pim_rcv_v1(struct sk_buff * skb) | |||
1566 | { | 1854 | { |
1567 | struct igmphdr *pim; | 1855 | struct igmphdr *pim; |
1568 | struct net *net = dev_net(skb->dev); | 1856 | struct net *net = dev_net(skb->dev); |
1857 | struct mr_table *mrt; | ||
1569 | 1858 | ||
1570 | if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr))) | 1859 | if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr))) |
1571 | goto drop; | 1860 | goto drop; |
1572 | 1861 | ||
1573 | pim = igmp_hdr(skb); | 1862 | pim = igmp_hdr(skb); |
1574 | 1863 | ||
1575 | if (!net->ipv4.mroute_do_pim || | 1864 | if (ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt) < 0) |
1865 | goto drop; | ||
1866 | |||
1867 | if (!mrt->mroute_do_pim || | ||
1576 | pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER) | 1868 | pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER) |
1577 | goto drop; | 1869 | goto drop; |
1578 | 1870 | ||
1579 | if (__pim_rcv(skb, sizeof(*pim))) { | 1871 | if (__pim_rcv(mrt, skb, sizeof(*pim))) { |
1580 | drop: | 1872 | drop: |
1581 | kfree_skb(skb); | 1873 | kfree_skb(skb); |
1582 | } | 1874 | } |
@@ -1588,6 +1880,8 @@ drop: | |||
1588 | static int pim_rcv(struct sk_buff * skb) | 1880 | static int pim_rcv(struct sk_buff * skb) |
1589 | { | 1881 | { |
1590 | struct pimreghdr *pim; | 1882 | struct pimreghdr *pim; |
1883 | struct net *net = dev_net(skb->dev); | ||
1884 | struct mr_table *mrt; | ||
1591 | 1885 | ||
1592 | if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr))) | 1886 | if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr))) |
1593 | goto drop; | 1887 | goto drop; |
@@ -1599,7 +1893,10 @@ static int pim_rcv(struct sk_buff * skb) | |||
1599 | csum_fold(skb_checksum(skb, 0, skb->len, 0)))) | 1893 | csum_fold(skb_checksum(skb, 0, skb->len, 0)))) |
1600 | goto drop; | 1894 | goto drop; |
1601 | 1895 | ||
1602 | if (__pim_rcv(skb, sizeof(*pim))) { | 1896 | if (ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt) < 0) |
1897 | goto drop; | ||
1898 | |||
1899 | if (__pim_rcv(mrt, skb, sizeof(*pim))) { | ||
1603 | drop: | 1900 | drop: |
1604 | kfree_skb(skb); | 1901 | kfree_skb(skb); |
1605 | } | 1902 | } |
@@ -1607,29 +1904,31 @@ drop: | |||
1607 | } | 1904 | } |
1608 | #endif | 1905 | #endif |
1609 | 1906 | ||
1610 | static int | 1907 | static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, |
1611 | ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm) | 1908 | struct mfc_cache *c, struct rtmsg *rtm) |
1612 | { | 1909 | { |
1613 | int ct; | 1910 | int ct; |
1614 | struct rtnexthop *nhp; | 1911 | struct rtnexthop *nhp; |
1615 | struct net *net = mfc_net(c); | ||
1616 | struct net_device *dev = net->ipv4.vif_table[c->mfc_parent].dev; | ||
1617 | u8 *b = skb_tail_pointer(skb); | 1912 | u8 *b = skb_tail_pointer(skb); |
1618 | struct rtattr *mp_head; | 1913 | struct rtattr *mp_head; |
1619 | 1914 | ||
1620 | if (dev) | 1915 | /* If cache is unresolved, don't try to parse IIF and OIF */ |
1621 | RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex); | 1916 | if (c->mfc_parent > MAXVIFS) |
1917 | return -ENOENT; | ||
1918 | |||
1919 | if (VIF_EXISTS(mrt, c->mfc_parent)) | ||
1920 | RTA_PUT(skb, RTA_IIF, 4, &mrt->vif_table[c->mfc_parent].dev->ifindex); | ||
1622 | 1921 | ||
1623 | mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0)); | 1922 | mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0)); |
1624 | 1923 | ||
1625 | for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { | 1924 | for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { |
1626 | if (c->mfc_un.res.ttls[ct] < 255) { | 1925 | if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) { |
1627 | if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) | 1926 | if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) |
1628 | goto rtattr_failure; | 1927 | goto rtattr_failure; |
1629 | nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); | 1928 | nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); |
1630 | nhp->rtnh_flags = 0; | 1929 | nhp->rtnh_flags = 0; |
1631 | nhp->rtnh_hops = c->mfc_un.res.ttls[ct]; | 1930 | nhp->rtnh_hops = c->mfc_un.res.ttls[ct]; |
1632 | nhp->rtnh_ifindex = net->ipv4.vif_table[ct].dev->ifindex; | 1931 | nhp->rtnh_ifindex = mrt->vif_table[ct].dev->ifindex; |
1633 | nhp->rtnh_len = sizeof(*nhp); | 1932 | nhp->rtnh_len = sizeof(*nhp); |
1634 | } | 1933 | } |
1635 | } | 1934 | } |
@@ -1647,11 +1946,16 @@ int ipmr_get_route(struct net *net, | |||
1647 | struct sk_buff *skb, struct rtmsg *rtm, int nowait) | 1946 | struct sk_buff *skb, struct rtmsg *rtm, int nowait) |
1648 | { | 1947 | { |
1649 | int err; | 1948 | int err; |
1949 | struct mr_table *mrt; | ||
1650 | struct mfc_cache *cache; | 1950 | struct mfc_cache *cache; |
1651 | struct rtable *rt = skb_rtable(skb); | 1951 | struct rtable *rt = skb_rtable(skb); |
1652 | 1952 | ||
1953 | mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); | ||
1954 | if (mrt == NULL) | ||
1955 | return -ENOENT; | ||
1956 | |||
1653 | read_lock(&mrt_lock); | 1957 | read_lock(&mrt_lock); |
1654 | cache = ipmr_cache_find(net, rt->rt_src, rt->rt_dst); | 1958 | cache = ipmr_cache_find(mrt, rt->rt_src, rt->rt_dst); |
1655 | 1959 | ||
1656 | if (cache == NULL) { | 1960 | if (cache == NULL) { |
1657 | struct sk_buff *skb2; | 1961 | struct sk_buff *skb2; |
@@ -1665,7 +1969,7 @@ int ipmr_get_route(struct net *net, | |||
1665 | } | 1969 | } |
1666 | 1970 | ||
1667 | dev = skb->dev; | 1971 | dev = skb->dev; |
1668 | if (dev == NULL || (vif = ipmr_find_vif(dev)) < 0) { | 1972 | if (dev == NULL || (vif = ipmr_find_vif(mrt, dev)) < 0) { |
1669 | read_unlock(&mrt_lock); | 1973 | read_unlock(&mrt_lock); |
1670 | return -ENODEV; | 1974 | return -ENODEV; |
1671 | } | 1975 | } |
@@ -1682,24 +1986,107 @@ int ipmr_get_route(struct net *net, | |||
1682 | iph->saddr = rt->rt_src; | 1986 | iph->saddr = rt->rt_src; |
1683 | iph->daddr = rt->rt_dst; | 1987 | iph->daddr = rt->rt_dst; |
1684 | iph->version = 0; | 1988 | iph->version = 0; |
1685 | err = ipmr_cache_unresolved(net, vif, skb2); | 1989 | err = ipmr_cache_unresolved(mrt, vif, skb2); |
1686 | read_unlock(&mrt_lock); | 1990 | read_unlock(&mrt_lock); |
1687 | return err; | 1991 | return err; |
1688 | } | 1992 | } |
1689 | 1993 | ||
1690 | if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY)) | 1994 | if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY)) |
1691 | cache->mfc_flags |= MFC_NOTIFY; | 1995 | cache->mfc_flags |= MFC_NOTIFY; |
1692 | err = ipmr_fill_mroute(skb, cache, rtm); | 1996 | err = __ipmr_fill_mroute(mrt, skb, cache, rtm); |
1693 | read_unlock(&mrt_lock); | 1997 | read_unlock(&mrt_lock); |
1694 | return err; | 1998 | return err; |
1695 | } | 1999 | } |
1696 | 2000 | ||
2001 | static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, | ||
2002 | u32 pid, u32 seq, struct mfc_cache *c) | ||
2003 | { | ||
2004 | struct nlmsghdr *nlh; | ||
2005 | struct rtmsg *rtm; | ||
2006 | |||
2007 | nlh = nlmsg_put(skb, pid, seq, RTM_NEWROUTE, sizeof(*rtm), NLM_F_MULTI); | ||
2008 | if (nlh == NULL) | ||
2009 | return -EMSGSIZE; | ||
2010 | |||
2011 | rtm = nlmsg_data(nlh); | ||
2012 | rtm->rtm_family = RTNL_FAMILY_IPMR; | ||
2013 | rtm->rtm_dst_len = 32; | ||
2014 | rtm->rtm_src_len = 32; | ||
2015 | rtm->rtm_tos = 0; | ||
2016 | rtm->rtm_table = mrt->id; | ||
2017 | NLA_PUT_U32(skb, RTA_TABLE, mrt->id); | ||
2018 | rtm->rtm_type = RTN_MULTICAST; | ||
2019 | rtm->rtm_scope = RT_SCOPE_UNIVERSE; | ||
2020 | rtm->rtm_protocol = RTPROT_UNSPEC; | ||
2021 | rtm->rtm_flags = 0; | ||
2022 | |||
2023 | NLA_PUT_BE32(skb, RTA_SRC, c->mfc_origin); | ||
2024 | NLA_PUT_BE32(skb, RTA_DST, c->mfc_mcastgrp); | ||
2025 | |||
2026 | if (__ipmr_fill_mroute(mrt, skb, c, rtm) < 0) | ||
2027 | goto nla_put_failure; | ||
2028 | |||
2029 | return nlmsg_end(skb, nlh); | ||
2030 | |||
2031 | nla_put_failure: | ||
2032 | nlmsg_cancel(skb, nlh); | ||
2033 | return -EMSGSIZE; | ||
2034 | } | ||
2035 | |||
2036 | static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb) | ||
2037 | { | ||
2038 | struct net *net = sock_net(skb->sk); | ||
2039 | struct mr_table *mrt; | ||
2040 | struct mfc_cache *mfc; | ||
2041 | unsigned int t = 0, s_t; | ||
2042 | unsigned int h = 0, s_h; | ||
2043 | unsigned int e = 0, s_e; | ||
2044 | |||
2045 | s_t = cb->args[0]; | ||
2046 | s_h = cb->args[1]; | ||
2047 | s_e = cb->args[2]; | ||
2048 | |||
2049 | read_lock(&mrt_lock); | ||
2050 | ipmr_for_each_table(mrt, net) { | ||
2051 | if (t < s_t) | ||
2052 | goto next_table; | ||
2053 | if (t > s_t) | ||
2054 | s_h = 0; | ||
2055 | for (h = s_h; h < MFC_LINES; h++) { | ||
2056 | list_for_each_entry(mfc, &mrt->mfc_cache_array[h], list) { | ||
2057 | if (e < s_e) | ||
2058 | goto next_entry; | ||
2059 | if (ipmr_fill_mroute(mrt, skb, | ||
2060 | NETLINK_CB(cb->skb).pid, | ||
2061 | cb->nlh->nlmsg_seq, | ||
2062 | mfc) < 0) | ||
2063 | goto done; | ||
2064 | next_entry: | ||
2065 | e++; | ||
2066 | } | ||
2067 | e = s_e = 0; | ||
2068 | } | ||
2069 | s_h = 0; | ||
2070 | next_table: | ||
2071 | t++; | ||
2072 | } | ||
2073 | done: | ||
2074 | read_unlock(&mrt_lock); | ||
2075 | |||
2076 | cb->args[2] = e; | ||
2077 | cb->args[1] = h; | ||
2078 | cb->args[0] = t; | ||
2079 | |||
2080 | return skb->len; | ||
2081 | } | ||
2082 | |||
1697 | #ifdef CONFIG_PROC_FS | 2083 | #ifdef CONFIG_PROC_FS |
1698 | /* | 2084 | /* |
1699 | * The /proc interfaces to multicast routing /proc/ip_mr_cache /proc/ip_mr_vif | 2085 | * The /proc interfaces to multicast routing /proc/ip_mr_cache /proc/ip_mr_vif |
1700 | */ | 2086 | */ |
1701 | struct ipmr_vif_iter { | 2087 | struct ipmr_vif_iter { |
1702 | struct seq_net_private p; | 2088 | struct seq_net_private p; |
2089 | struct mr_table *mrt; | ||
1703 | int ct; | 2090 | int ct; |
1704 | }; | 2091 | }; |
1705 | 2092 | ||
@@ -1707,11 +2094,13 @@ static struct vif_device *ipmr_vif_seq_idx(struct net *net, | |||
1707 | struct ipmr_vif_iter *iter, | 2094 | struct ipmr_vif_iter *iter, |
1708 | loff_t pos) | 2095 | loff_t pos) |
1709 | { | 2096 | { |
1710 | for (iter->ct = 0; iter->ct < net->ipv4.maxvif; ++iter->ct) { | 2097 | struct mr_table *mrt = iter->mrt; |
1711 | if (!VIF_EXISTS(net, iter->ct)) | 2098 | |
2099 | for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) { | ||
2100 | if (!VIF_EXISTS(mrt, iter->ct)) | ||
1712 | continue; | 2101 | continue; |
1713 | if (pos-- == 0) | 2102 | if (pos-- == 0) |
1714 | return &net->ipv4.vif_table[iter->ct]; | 2103 | return &mrt->vif_table[iter->ct]; |
1715 | } | 2104 | } |
1716 | return NULL; | 2105 | return NULL; |
1717 | } | 2106 | } |
@@ -1719,7 +2108,15 @@ static struct vif_device *ipmr_vif_seq_idx(struct net *net, | |||
1719 | static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos) | 2108 | static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos) |
1720 | __acquires(mrt_lock) | 2109 | __acquires(mrt_lock) |
1721 | { | 2110 | { |
2111 | struct ipmr_vif_iter *iter = seq->private; | ||
1722 | struct net *net = seq_file_net(seq); | 2112 | struct net *net = seq_file_net(seq); |
2113 | struct mr_table *mrt; | ||
2114 | |||
2115 | mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); | ||
2116 | if (mrt == NULL) | ||
2117 | return ERR_PTR(-ENOENT); | ||
2118 | |||
2119 | iter->mrt = mrt; | ||
1723 | 2120 | ||
1724 | read_lock(&mrt_lock); | 2121 | read_lock(&mrt_lock); |
1725 | return *pos ? ipmr_vif_seq_idx(net, seq->private, *pos - 1) | 2122 | return *pos ? ipmr_vif_seq_idx(net, seq->private, *pos - 1) |
@@ -1730,15 +2127,16 @@ static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
1730 | { | 2127 | { |
1731 | struct ipmr_vif_iter *iter = seq->private; | 2128 | struct ipmr_vif_iter *iter = seq->private; |
1732 | struct net *net = seq_file_net(seq); | 2129 | struct net *net = seq_file_net(seq); |
2130 | struct mr_table *mrt = iter->mrt; | ||
1733 | 2131 | ||
1734 | ++*pos; | 2132 | ++*pos; |
1735 | if (v == SEQ_START_TOKEN) | 2133 | if (v == SEQ_START_TOKEN) |
1736 | return ipmr_vif_seq_idx(net, iter, 0); | 2134 | return ipmr_vif_seq_idx(net, iter, 0); |
1737 | 2135 | ||
1738 | while (++iter->ct < net->ipv4.maxvif) { | 2136 | while (++iter->ct < mrt->maxvif) { |
1739 | if (!VIF_EXISTS(net, iter->ct)) | 2137 | if (!VIF_EXISTS(mrt, iter->ct)) |
1740 | continue; | 2138 | continue; |
1741 | return &net->ipv4.vif_table[iter->ct]; | 2139 | return &mrt->vif_table[iter->ct]; |
1742 | } | 2140 | } |
1743 | return NULL; | 2141 | return NULL; |
1744 | } | 2142 | } |
@@ -1751,7 +2149,8 @@ static void ipmr_vif_seq_stop(struct seq_file *seq, void *v) | |||
1751 | 2149 | ||
1752 | static int ipmr_vif_seq_show(struct seq_file *seq, void *v) | 2150 | static int ipmr_vif_seq_show(struct seq_file *seq, void *v) |
1753 | { | 2151 | { |
1754 | struct net *net = seq_file_net(seq); | 2152 | struct ipmr_vif_iter *iter = seq->private; |
2153 | struct mr_table *mrt = iter->mrt; | ||
1755 | 2154 | ||
1756 | if (v == SEQ_START_TOKEN) { | 2155 | if (v == SEQ_START_TOKEN) { |
1757 | seq_puts(seq, | 2156 | seq_puts(seq, |
@@ -1762,7 +2161,7 @@ static int ipmr_vif_seq_show(struct seq_file *seq, void *v) | |||
1762 | 2161 | ||
1763 | seq_printf(seq, | 2162 | seq_printf(seq, |
1764 | "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n", | 2163 | "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n", |
1765 | vif - net->ipv4.vif_table, | 2164 | vif - mrt->vif_table, |
1766 | name, vif->bytes_in, vif->pkt_in, | 2165 | name, vif->bytes_in, vif->pkt_in, |
1767 | vif->bytes_out, vif->pkt_out, | 2166 | vif->bytes_out, vif->pkt_out, |
1768 | vif->flags, vif->local, vif->remote); | 2167 | vif->flags, vif->local, vif->remote); |
@@ -1793,7 +2192,8 @@ static const struct file_operations ipmr_vif_fops = { | |||
1793 | 2192 | ||
1794 | struct ipmr_mfc_iter { | 2193 | struct ipmr_mfc_iter { |
1795 | struct seq_net_private p; | 2194 | struct seq_net_private p; |
1796 | struct mfc_cache **cache; | 2195 | struct mr_table *mrt; |
2196 | struct list_head *cache; | ||
1797 | int ct; | 2197 | int ct; |
1798 | }; | 2198 | }; |
1799 | 2199 | ||
@@ -1801,22 +2201,22 @@ struct ipmr_mfc_iter { | |||
1801 | static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net, | 2201 | static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net, |
1802 | struct ipmr_mfc_iter *it, loff_t pos) | 2202 | struct ipmr_mfc_iter *it, loff_t pos) |
1803 | { | 2203 | { |
2204 | struct mr_table *mrt = it->mrt; | ||
1804 | struct mfc_cache *mfc; | 2205 | struct mfc_cache *mfc; |
1805 | 2206 | ||
1806 | it->cache = net->ipv4.mfc_cache_array; | ||
1807 | read_lock(&mrt_lock); | 2207 | read_lock(&mrt_lock); |
1808 | for (it->ct = 0; it->ct < MFC_LINES; it->ct++) | 2208 | for (it->ct = 0; it->ct < MFC_LINES; it->ct++) { |
1809 | for (mfc = net->ipv4.mfc_cache_array[it->ct]; | 2209 | it->cache = &mrt->mfc_cache_array[it->ct]; |
1810 | mfc; mfc = mfc->next) | 2210 | list_for_each_entry(mfc, it->cache, list) |
1811 | if (pos-- == 0) | 2211 | if (pos-- == 0) |
1812 | return mfc; | 2212 | return mfc; |
2213 | } | ||
1813 | read_unlock(&mrt_lock); | 2214 | read_unlock(&mrt_lock); |
1814 | 2215 | ||
1815 | it->cache = &mfc_unres_queue; | ||
1816 | spin_lock_bh(&mfc_unres_lock); | 2216 | spin_lock_bh(&mfc_unres_lock); |
1817 | for (mfc = mfc_unres_queue; mfc; mfc = mfc->next) | 2217 | it->cache = &mrt->mfc_unres_queue; |
1818 | if (net_eq(mfc_net(mfc), net) && | 2218 | list_for_each_entry(mfc, it->cache, list) |
1819 | pos-- == 0) | 2219 | if (pos-- == 0) |
1820 | return mfc; | 2220 | return mfc; |
1821 | spin_unlock_bh(&mfc_unres_lock); | 2221 | spin_unlock_bh(&mfc_unres_lock); |
1822 | 2222 | ||
@@ -1829,7 +2229,13 @@ static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos) | |||
1829 | { | 2229 | { |
1830 | struct ipmr_mfc_iter *it = seq->private; | 2230 | struct ipmr_mfc_iter *it = seq->private; |
1831 | struct net *net = seq_file_net(seq); | 2231 | struct net *net = seq_file_net(seq); |
2232 | struct mr_table *mrt; | ||
1832 | 2233 | ||
2234 | mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); | ||
2235 | if (mrt == NULL) | ||
2236 | return ERR_PTR(-ENOENT); | ||
2237 | |||
2238 | it->mrt = mrt; | ||
1833 | it->cache = NULL; | 2239 | it->cache = NULL; |
1834 | it->ct = 0; | 2240 | it->ct = 0; |
1835 | return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1) | 2241 | return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1) |
@@ -1841,37 +2247,36 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
1841 | struct mfc_cache *mfc = v; | 2247 | struct mfc_cache *mfc = v; |
1842 | struct ipmr_mfc_iter *it = seq->private; | 2248 | struct ipmr_mfc_iter *it = seq->private; |
1843 | struct net *net = seq_file_net(seq); | 2249 | struct net *net = seq_file_net(seq); |
2250 | struct mr_table *mrt = it->mrt; | ||
1844 | 2251 | ||
1845 | ++*pos; | 2252 | ++*pos; |
1846 | 2253 | ||
1847 | if (v == SEQ_START_TOKEN) | 2254 | if (v == SEQ_START_TOKEN) |
1848 | return ipmr_mfc_seq_idx(net, seq->private, 0); | 2255 | return ipmr_mfc_seq_idx(net, seq->private, 0); |
1849 | 2256 | ||
1850 | if (mfc->next) | 2257 | if (mfc->list.next != it->cache) |
1851 | return mfc->next; | 2258 | return list_entry(mfc->list.next, struct mfc_cache, list); |
1852 | 2259 | ||
1853 | if (it->cache == &mfc_unres_queue) | 2260 | if (it->cache == &mrt->mfc_unres_queue) |
1854 | goto end_of_list; | 2261 | goto end_of_list; |
1855 | 2262 | ||
1856 | BUG_ON(it->cache != net->ipv4.mfc_cache_array); | 2263 | BUG_ON(it->cache != &mrt->mfc_cache_array[it->ct]); |
1857 | 2264 | ||
1858 | while (++it->ct < MFC_LINES) { | 2265 | while (++it->ct < MFC_LINES) { |
1859 | mfc = net->ipv4.mfc_cache_array[it->ct]; | 2266 | it->cache = &mrt->mfc_cache_array[it->ct]; |
1860 | if (mfc) | 2267 | if (list_empty(it->cache)) |
1861 | return mfc; | 2268 | continue; |
2269 | return list_first_entry(it->cache, struct mfc_cache, list); | ||
1862 | } | 2270 | } |
1863 | 2271 | ||
1864 | /* exhausted cache_array, show unresolved */ | 2272 | /* exhausted cache_array, show unresolved */ |
1865 | read_unlock(&mrt_lock); | 2273 | read_unlock(&mrt_lock); |
1866 | it->cache = &mfc_unres_queue; | 2274 | it->cache = &mrt->mfc_unres_queue; |
1867 | it->ct = 0; | 2275 | it->ct = 0; |
1868 | 2276 | ||
1869 | spin_lock_bh(&mfc_unres_lock); | 2277 | spin_lock_bh(&mfc_unres_lock); |
1870 | mfc = mfc_unres_queue; | 2278 | if (!list_empty(it->cache)) |
1871 | while (mfc && !net_eq(mfc_net(mfc), net)) | 2279 | return list_first_entry(it->cache, struct mfc_cache, list); |
1872 | mfc = mfc->next; | ||
1873 | if (mfc) | ||
1874 | return mfc; | ||
1875 | 2280 | ||
1876 | end_of_list: | 2281 | end_of_list: |
1877 | spin_unlock_bh(&mfc_unres_lock); | 2282 | spin_unlock_bh(&mfc_unres_lock); |
@@ -1883,18 +2288,17 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
1883 | static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v) | 2288 | static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v) |
1884 | { | 2289 | { |
1885 | struct ipmr_mfc_iter *it = seq->private; | 2290 | struct ipmr_mfc_iter *it = seq->private; |
1886 | struct net *net = seq_file_net(seq); | 2291 | struct mr_table *mrt = it->mrt; |
1887 | 2292 | ||
1888 | if (it->cache == &mfc_unres_queue) | 2293 | if (it->cache == &mrt->mfc_unres_queue) |
1889 | spin_unlock_bh(&mfc_unres_lock); | 2294 | spin_unlock_bh(&mfc_unres_lock); |
1890 | else if (it->cache == net->ipv4.mfc_cache_array) | 2295 | else if (it->cache == &mrt->mfc_cache_array[it->ct]) |
1891 | read_unlock(&mrt_lock); | 2296 | read_unlock(&mrt_lock); |
1892 | } | 2297 | } |
1893 | 2298 | ||
1894 | static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) | 2299 | static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) |
1895 | { | 2300 | { |
1896 | int n; | 2301 | int n; |
1897 | struct net *net = seq_file_net(seq); | ||
1898 | 2302 | ||
1899 | if (v == SEQ_START_TOKEN) { | 2303 | if (v == SEQ_START_TOKEN) { |
1900 | seq_puts(seq, | 2304 | seq_puts(seq, |
@@ -1902,20 +2306,21 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) | |||
1902 | } else { | 2306 | } else { |
1903 | const struct mfc_cache *mfc = v; | 2307 | const struct mfc_cache *mfc = v; |
1904 | const struct ipmr_mfc_iter *it = seq->private; | 2308 | const struct ipmr_mfc_iter *it = seq->private; |
2309 | const struct mr_table *mrt = it->mrt; | ||
1905 | 2310 | ||
1906 | seq_printf(seq, "%08lX %08lX %-3hd", | 2311 | seq_printf(seq, "%08X %08X %-3hd", |
1907 | (unsigned long) mfc->mfc_mcastgrp, | 2312 | (__force u32) mfc->mfc_mcastgrp, |
1908 | (unsigned long) mfc->mfc_origin, | 2313 | (__force u32) mfc->mfc_origin, |
1909 | mfc->mfc_parent); | 2314 | mfc->mfc_parent); |
1910 | 2315 | ||
1911 | if (it->cache != &mfc_unres_queue) { | 2316 | if (it->cache != &mrt->mfc_unres_queue) { |
1912 | seq_printf(seq, " %8lu %8lu %8lu", | 2317 | seq_printf(seq, " %8lu %8lu %8lu", |
1913 | mfc->mfc_un.res.pkt, | 2318 | mfc->mfc_un.res.pkt, |
1914 | mfc->mfc_un.res.bytes, | 2319 | mfc->mfc_un.res.bytes, |
1915 | mfc->mfc_un.res.wrong_if); | 2320 | mfc->mfc_un.res.wrong_if); |
1916 | for (n = mfc->mfc_un.res.minvif; | 2321 | for (n = mfc->mfc_un.res.minvif; |
1917 | n < mfc->mfc_un.res.maxvif; n++ ) { | 2322 | n < mfc->mfc_un.res.maxvif; n++ ) { |
1918 | if (VIF_EXISTS(net, n) && | 2323 | if (VIF_EXISTS(mrt, n) && |
1919 | mfc->mfc_un.res.ttls[n] < 255) | 2324 | mfc->mfc_un.res.ttls[n] < 255) |
1920 | seq_printf(seq, | 2325 | seq_printf(seq, |
1921 | " %2d:%-3d", | 2326 | " %2d:%-3d", |
@@ -1967,27 +2372,11 @@ static const struct net_protocol pim_protocol = { | |||
1967 | */ | 2372 | */ |
1968 | static int __net_init ipmr_net_init(struct net *net) | 2373 | static int __net_init ipmr_net_init(struct net *net) |
1969 | { | 2374 | { |
1970 | int err = 0; | 2375 | int err; |
1971 | 2376 | ||
1972 | net->ipv4.vif_table = kcalloc(MAXVIFS, sizeof(struct vif_device), | 2377 | err = ipmr_rules_init(net); |
1973 | GFP_KERNEL); | 2378 | if (err < 0) |
1974 | if (!net->ipv4.vif_table) { | ||
1975 | err = -ENOMEM; | ||
1976 | goto fail; | 2379 | goto fail; |
1977 | } | ||
1978 | |||
1979 | /* Forwarding cache */ | ||
1980 | net->ipv4.mfc_cache_array = kcalloc(MFC_LINES, | ||
1981 | sizeof(struct mfc_cache *), | ||
1982 | GFP_KERNEL); | ||
1983 | if (!net->ipv4.mfc_cache_array) { | ||
1984 | err = -ENOMEM; | ||
1985 | goto fail_mfc_cache; | ||
1986 | } | ||
1987 | |||
1988 | #ifdef CONFIG_IP_PIMSM | ||
1989 | net->ipv4.mroute_reg_vif_num = -1; | ||
1990 | #endif | ||
1991 | 2380 | ||
1992 | #ifdef CONFIG_PROC_FS | 2381 | #ifdef CONFIG_PROC_FS |
1993 | err = -ENOMEM; | 2382 | err = -ENOMEM; |
@@ -2002,10 +2391,8 @@ static int __net_init ipmr_net_init(struct net *net) | |||
2002 | proc_cache_fail: | 2391 | proc_cache_fail: |
2003 | proc_net_remove(net, "ip_mr_vif"); | 2392 | proc_net_remove(net, "ip_mr_vif"); |
2004 | proc_vif_fail: | 2393 | proc_vif_fail: |
2005 | kfree(net->ipv4.mfc_cache_array); | 2394 | ipmr_rules_exit(net); |
2006 | #endif | 2395 | #endif |
2007 | fail_mfc_cache: | ||
2008 | kfree(net->ipv4.vif_table); | ||
2009 | fail: | 2396 | fail: |
2010 | return err; | 2397 | return err; |
2011 | } | 2398 | } |
@@ -2016,8 +2403,7 @@ static void __net_exit ipmr_net_exit(struct net *net) | |||
2016 | proc_net_remove(net, "ip_mr_cache"); | 2403 | proc_net_remove(net, "ip_mr_cache"); |
2017 | proc_net_remove(net, "ip_mr_vif"); | 2404 | proc_net_remove(net, "ip_mr_vif"); |
2018 | #endif | 2405 | #endif |
2019 | kfree(net->ipv4.mfc_cache_array); | 2406 | ipmr_rules_exit(net); |
2020 | kfree(net->ipv4.vif_table); | ||
2021 | } | 2407 | } |
2022 | 2408 | ||
2023 | static struct pernet_operations ipmr_net_ops = { | 2409 | static struct pernet_operations ipmr_net_ops = { |
@@ -2040,7 +2426,6 @@ int __init ip_mr_init(void) | |||
2040 | if (err) | 2426 | if (err) |
2041 | goto reg_pernet_fail; | 2427 | goto reg_pernet_fail; |
2042 | 2428 | ||
2043 | setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0); | ||
2044 | err = register_netdevice_notifier(&ip_mr_notifier); | 2429 | err = register_netdevice_notifier(&ip_mr_notifier); |
2045 | if (err) | 2430 | if (err) |
2046 | goto reg_notif_fail; | 2431 | goto reg_notif_fail; |
@@ -2051,6 +2436,7 @@ int __init ip_mr_init(void) | |||
2051 | goto add_proto_fail; | 2436 | goto add_proto_fail; |
2052 | } | 2437 | } |
2053 | #endif | 2438 | #endif |
2439 | rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE, NULL, ipmr_rtm_dumproute); | ||
2054 | return 0; | 2440 | return 0; |
2055 | 2441 | ||
2056 | #ifdef CONFIG_IP_PIMSM_V2 | 2442 | #ifdef CONFIG_IP_PIMSM_V2 |
@@ -2058,7 +2444,6 @@ add_proto_fail: | |||
2058 | unregister_netdevice_notifier(&ip_mr_notifier); | 2444 | unregister_netdevice_notifier(&ip_mr_notifier); |
2059 | #endif | 2445 | #endif |
2060 | reg_notif_fail: | 2446 | reg_notif_fail: |
2061 | del_timer(&ipmr_expire_timer); | ||
2062 | unregister_pernet_subsys(&ipmr_net_ops); | 2447 | unregister_pernet_subsys(&ipmr_net_ops); |
2063 | reg_pernet_fail: | 2448 | reg_pernet_fail: |
2064 | kmem_cache_destroy(mrt_cachep); | 2449 | kmem_cache_destroy(mrt_cachep); |
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c index c14623fc4d5e..82fb43c5c59e 100644 --- a/net/ipv4/netfilter.c +++ b/net/ipv4/netfilter.c | |||
@@ -4,6 +4,7 @@ | |||
4 | #include <linux/netfilter_ipv4.h> | 4 | #include <linux/netfilter_ipv4.h> |
5 | #include <linux/ip.h> | 5 | #include <linux/ip.h> |
6 | #include <linux/skbuff.h> | 6 | #include <linux/skbuff.h> |
7 | #include <linux/gfp.h> | ||
7 | #include <net/route.h> | 8 | #include <net/route.h> |
8 | #include <net/xfrm.h> | 9 | #include <net/xfrm.h> |
9 | #include <net/ip.h> | 10 | #include <net/ip.h> |
diff --git a/net/ipv4/netfilter/arptable_filter.c b/net/ipv4/netfilter/arptable_filter.c index bfe26f32b930..79ca5e70d497 100644 --- a/net/ipv4/netfilter/arptable_filter.c +++ b/net/ipv4/netfilter/arptable_filter.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/module.h> | 8 | #include <linux/module.h> |
9 | #include <linux/netfilter/x_tables.h> | 9 | #include <linux/netfilter/x_tables.h> |
10 | #include <linux/netfilter_arp/arp_tables.h> | 10 | #include <linux/netfilter_arp/arp_tables.h> |
11 | #include <linux/slab.h> | ||
11 | 12 | ||
12 | MODULE_LICENSE("GPL"); | 13 | MODULE_LICENSE("GPL"); |
13 | MODULE_AUTHOR("David S. Miller <davem@redhat.com>"); | 14 | MODULE_AUTHOR("David S. Miller <davem@redhat.com>"); |
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c index 2855f1f38cbc..e2787048aa0a 100644 --- a/net/ipv4/netfilter/ip_queue.c +++ b/net/ipv4/netfilter/ip_queue.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/security.h> | 26 | #include <linux/security.h> |
27 | #include <linux/net.h> | 27 | #include <linux/net.h> |
28 | #include <linux/mutex.h> | 28 | #include <linux/mutex.h> |
29 | #include <linux/slab.h> | ||
29 | #include <net/net_namespace.h> | 30 | #include <net/net_namespace.h> |
30 | #include <net/sock.h> | 31 | #include <net/sock.h> |
31 | #include <net/route.h> | 32 | #include <net/route.h> |
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index 0886f96c736b..a992dc826f1c 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/jhash.h> | 14 | #include <linux/jhash.h> |
15 | #include <linux/bitops.h> | 15 | #include <linux/bitops.h> |
16 | #include <linux/skbuff.h> | 16 | #include <linux/skbuff.h> |
17 | #include <linux/slab.h> | ||
17 | #include <linux/ip.h> | 18 | #include <linux/ip.h> |
18 | #include <linux/tcp.h> | 19 | #include <linux/tcp.h> |
19 | #include <linux/udp.h> | 20 | #include <linux/udp.h> |
@@ -87,7 +88,7 @@ clusterip_config_entry_put(struct clusterip_config *c) | |||
87 | list_del(&c->list); | 88 | list_del(&c->list); |
88 | write_unlock_bh(&clusterip_lock); | 89 | write_unlock_bh(&clusterip_lock); |
89 | 90 | ||
90 | dev_mc_delete(c->dev, c->clustermac, ETH_ALEN, 0); | 91 | dev_mc_del(c->dev, c->clustermac); |
91 | dev_put(c->dev); | 92 | dev_put(c->dev); |
92 | 93 | ||
93 | /* In case anyone still accesses the file, the open/close | 94 | /* In case anyone still accesses the file, the open/close |
@@ -396,7 +397,7 @@ static bool clusterip_tg_check(const struct xt_tgchk_param *par) | |||
396 | dev_put(dev); | 397 | dev_put(dev); |
397 | return false; | 398 | return false; |
398 | } | 399 | } |
399 | dev_mc_add(config->dev,config->clustermac, ETH_ALEN, 0); | 400 | dev_mc_add(config->dev, config->clustermac); |
400 | } | 401 | } |
401 | } | 402 | } |
402 | cipinfo->config = config; | 403 | cipinfo->config = config; |
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c index 5113b8f1a379..a0e8bcf04159 100644 --- a/net/ipv4/netfilter/ipt_REJECT.c +++ b/net/ipv4/netfilter/ipt_REJECT.c | |||
@@ -12,6 +12,7 @@ | |||
12 | 12 | ||
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/skbuff.h> | 14 | #include <linux/skbuff.h> |
15 | #include <linux/slab.h> | ||
15 | #include <linux/ip.h> | 16 | #include <linux/ip.h> |
16 | #include <linux/udp.h> | 17 | #include <linux/udp.h> |
17 | #include <linux/icmp.h> | 18 | #include <linux/icmp.h> |
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c index 09a5d3f7cc41..0dbe697f164f 100644 --- a/net/ipv4/netfilter/ipt_ULOG.c +++ b/net/ipv4/netfilter/ipt_ULOG.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/module.h> | 33 | #include <linux/module.h> |
34 | #include <linux/spinlock.h> | 34 | #include <linux/spinlock.h> |
35 | #include <linux/socket.h> | 35 | #include <linux/socket.h> |
36 | #include <linux/slab.h> | ||
36 | #include <linux/skbuff.h> | 37 | #include <linux/skbuff.h> |
37 | #include <linux/kernel.h> | 38 | #include <linux/kernel.h> |
38 | #include <linux/timer.h> | 39 | #include <linux/timer.h> |
diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c index c8dc9800d620..55392466daa4 100644 --- a/net/ipv4/netfilter/iptable_filter.c +++ b/net/ipv4/netfilter/iptable_filter.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/moduleparam.h> | 14 | #include <linux/moduleparam.h> |
15 | #include <linux/netfilter_ipv4/ip_tables.h> | 15 | #include <linux/netfilter_ipv4/ip_tables.h> |
16 | #include <linux/slab.h> | ||
16 | #include <net/ip.h> | 17 | #include <net/ip.h> |
17 | 18 | ||
18 | MODULE_LICENSE("GPL"); | 19 | MODULE_LICENSE("GPL"); |
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c index b9b83464cbf4..294a2a32f293 100644 --- a/net/ipv4/netfilter/iptable_mangle.c +++ b/net/ipv4/netfilter/iptable_mangle.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/netfilter_ipv4/ip_tables.h> | 12 | #include <linux/netfilter_ipv4/ip_tables.h> |
13 | #include <linux/netdevice.h> | 13 | #include <linux/netdevice.h> |
14 | #include <linux/skbuff.h> | 14 | #include <linux/skbuff.h> |
15 | #include <linux/slab.h> | ||
15 | #include <net/sock.h> | 16 | #include <net/sock.h> |
16 | #include <net/route.h> | 17 | #include <net/route.h> |
17 | #include <linux/ip.h> | 18 | #include <linux/ip.h> |
diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c index 06fb9d11953c..07fb710cd722 100644 --- a/net/ipv4/netfilter/iptable_raw.c +++ b/net/ipv4/netfilter/iptable_raw.c | |||
@@ -5,6 +5,7 @@ | |||
5 | */ | 5 | */ |
6 | #include <linux/module.h> | 6 | #include <linux/module.h> |
7 | #include <linux/netfilter_ipv4/ip_tables.h> | 7 | #include <linux/netfilter_ipv4/ip_tables.h> |
8 | #include <linux/slab.h> | ||
8 | #include <net/ip.h> | 9 | #include <net/ip.h> |
9 | 10 | ||
10 | #define RAW_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT)) | 11 | #define RAW_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT)) |
diff --git a/net/ipv4/netfilter/iptable_security.c b/net/ipv4/netfilter/iptable_security.c index cce2f64e6f21..be45bdc4c602 100644 --- a/net/ipv4/netfilter/iptable_security.c +++ b/net/ipv4/netfilter/iptable_security.c | |||
@@ -17,6 +17,7 @@ | |||
17 | */ | 17 | */ |
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/netfilter_ipv4/ip_tables.h> | 19 | #include <linux/netfilter_ipv4/ip_tables.h> |
20 | #include <linux/slab.h> | ||
20 | #include <net/ip.h> | 21 | #include <net/ip.h> |
21 | 22 | ||
22 | MODULE_LICENSE("GPL"); | 23 | MODULE_LICENSE("GPL"); |
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c index 4595281c2863..4f8bddb760c9 100644 --- a/net/ipv4/netfilter/nf_nat_core.c +++ b/net/ipv4/netfilter/nf_nat_core.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/types.h> | 12 | #include <linux/types.h> |
13 | #include <linux/timer.h> | 13 | #include <linux/timer.h> |
14 | #include <linux/skbuff.h> | 14 | #include <linux/skbuff.h> |
15 | #include <linux/gfp.h> | ||
15 | #include <net/checksum.h> | 16 | #include <net/checksum.h> |
16 | #include <net/icmp.h> | 17 | #include <net/icmp.h> |
17 | #include <net/ip.h> | 18 | #include <net/ip.h> |
diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c index 4b6af4bb1f50..4a0c6b548eee 100644 --- a/net/ipv4/netfilter/nf_nat_helper.c +++ b/net/ipv4/netfilter/nf_nat_helper.c | |||
@@ -8,6 +8,7 @@ | |||
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | */ | 9 | */ |
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/gfp.h> | ||
11 | #include <linux/kmod.h> | 12 | #include <linux/kmod.h> |
12 | #include <linux/types.h> | 13 | #include <linux/types.h> |
13 | #include <linux/timer.h> | 14 | #include <linux/timer.h> |
diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c index ab74cc0535e2..26de2c1f7fab 100644 --- a/net/ipv4/netfilter/nf_nat_rule.c +++ b/net/ipv4/netfilter/nf_nat_rule.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/kmod.h> | 15 | #include <linux/kmod.h> |
16 | #include <linux/skbuff.h> | 16 | #include <linux/skbuff.h> |
17 | #include <linux/proc_fs.h> | 17 | #include <linux/proc_fs.h> |
18 | #include <linux/slab.h> | ||
18 | #include <net/checksum.h> | 19 | #include <net/checksum.h> |
19 | #include <net/route.h> | 20 | #include <net/route.h> |
20 | #include <linux/bitops.h> | 21 | #include <linux/bitops.h> |
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c index 0b9c7ce3d6c5..4d85b6e55f29 100644 --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c | |||
@@ -43,6 +43,7 @@ | |||
43 | #include <linux/moduleparam.h> | 43 | #include <linux/moduleparam.h> |
44 | #include <linux/types.h> | 44 | #include <linux/types.h> |
45 | #include <linux/kernel.h> | 45 | #include <linux/kernel.h> |
46 | #include <linux/slab.h> | ||
46 | #include <linux/in.h> | 47 | #include <linux/in.h> |
47 | #include <linux/ip.h> | 48 | #include <linux/ip.h> |
48 | #include <linux/udp.h> | 49 | #include <linux/udp.h> |
diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c index 5678e9562c15..c39c9cf6bee6 100644 --- a/net/ipv4/netfilter/nf_nat_standalone.c +++ b/net/ipv4/netfilter/nf_nat_standalone.c | |||
@@ -7,6 +7,7 @@ | |||
7 | */ | 7 | */ |
8 | #include <linux/types.h> | 8 | #include <linux/types.h> |
9 | #include <linux/icmp.h> | 9 | #include <linux/icmp.h> |
10 | #include <linux/gfp.h> | ||
10 | #include <linux/ip.h> | 11 | #include <linux/ip.h> |
11 | #include <linux/netfilter.h> | 12 | #include <linux/netfilter.h> |
12 | #include <linux/netfilter_ipv4.h> | 13 | #include <linux/netfilter_ipv4.h> |
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 4f1f337f4337..3dc9914c1dce 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c | |||
@@ -251,6 +251,7 @@ static const struct snmp_mib snmp4_net_list[] = { | |||
251 | SNMP_MIB_ITEM("TCPSackShiftFallback", LINUX_MIB_SACKSHIFTFALLBACK), | 251 | SNMP_MIB_ITEM("TCPSackShiftFallback", LINUX_MIB_SACKSHIFTFALLBACK), |
252 | SNMP_MIB_ITEM("TCPBacklogDrop", LINUX_MIB_TCPBACKLOGDROP), | 252 | SNMP_MIB_ITEM("TCPBacklogDrop", LINUX_MIB_TCPBACKLOGDROP), |
253 | SNMP_MIB_ITEM("TCPMinTTLDrop", LINUX_MIB_TCPMINTTLDROP), | 253 | SNMP_MIB_ITEM("TCPMinTTLDrop", LINUX_MIB_TCPMINTTLDROP), |
254 | SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP), | ||
254 | SNMP_MIB_SENTINEL | 255 | SNMP_MIB_SENTINEL |
255 | }; | 256 | }; |
256 | 257 | ||
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index ce154b47f1da..52ef5af78a45 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c | |||
@@ -60,7 +60,6 @@ | |||
60 | #include <net/net_namespace.h> | 60 | #include <net/net_namespace.h> |
61 | #include <net/dst.h> | 61 | #include <net/dst.h> |
62 | #include <net/sock.h> | 62 | #include <net/sock.h> |
63 | #include <linux/gfp.h> | ||
64 | #include <linux/ip.h> | 63 | #include <linux/ip.h> |
65 | #include <linux/net.h> | 64 | #include <linux/net.h> |
66 | #include <net/ip.h> | 65 | #include <net/ip.h> |
@@ -291,7 +290,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb) | |||
291 | { | 290 | { |
292 | /* Charge it to the socket. */ | 291 | /* Charge it to the socket. */ |
293 | 292 | ||
294 | if (sock_queue_rcv_skb(sk, skb) < 0) { | 293 | if (ip_queue_rcv_skb(sk, skb) < 0) { |
295 | kfree_skb(skb); | 294 | kfree_skb(skb); |
296 | return NET_RX_DROP; | 295 | return NET_RX_DROP; |
297 | } | 296 | } |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index d9b40248b97f..a947428ef0ae 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -90,6 +90,7 @@ | |||
90 | #include <linux/jhash.h> | 90 | #include <linux/jhash.h> |
91 | #include <linux/rcupdate.h> | 91 | #include <linux/rcupdate.h> |
92 | #include <linux/times.h> | 92 | #include <linux/times.h> |
93 | #include <linux/slab.h> | ||
93 | #include <net/dst.h> | 94 | #include <net/dst.h> |
94 | #include <net/net_namespace.h> | 95 | #include <net/net_namespace.h> |
95 | #include <net/protocol.h> | 96 | #include <net/protocol.h> |
@@ -257,10 +258,9 @@ static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat); | |||
257 | (__raw_get_cpu_var(rt_cache_stat).field++) | 258 | (__raw_get_cpu_var(rt_cache_stat).field++) |
258 | 259 | ||
259 | static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx, | 260 | static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx, |
260 | int genid) | 261 | int genid) |
261 | { | 262 | { |
262 | return jhash_3words((__force u32)(__be32)(daddr), | 263 | return jhash_3words((__force u32)daddr, (__force u32)saddr, |
263 | (__force u32)(__be32)(saddr), | ||
264 | idx, genid) | 264 | idx, genid) |
265 | & rt_hash_mask; | 265 | & rt_hash_mask; |
266 | } | 266 | } |
@@ -377,12 +377,13 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v) | |||
377 | struct rtable *r = v; | 377 | struct rtable *r = v; |
378 | int len; | 378 | int len; |
379 | 379 | ||
380 | seq_printf(seq, "%s\t%08lX\t%08lX\t%8X\t%d\t%u\t%d\t" | 380 | seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t" |
381 | "%08lX\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n", | 381 | "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n", |
382 | r->u.dst.dev ? r->u.dst.dev->name : "*", | 382 | r->u.dst.dev ? r->u.dst.dev->name : "*", |
383 | (unsigned long)r->rt_dst, (unsigned long)r->rt_gateway, | 383 | (__force u32)r->rt_dst, |
384 | (__force u32)r->rt_gateway, | ||
384 | r->rt_flags, atomic_read(&r->u.dst.__refcnt), | 385 | r->rt_flags, atomic_read(&r->u.dst.__refcnt), |
385 | r->u.dst.__use, 0, (unsigned long)r->rt_src, | 386 | r->u.dst.__use, 0, (__force u32)r->rt_src, |
386 | (dst_metric(&r->u.dst, RTAX_ADVMSS) ? | 387 | (dst_metric(&r->u.dst, RTAX_ADVMSS) ? |
387 | (int)dst_metric(&r->u.dst, RTAX_ADVMSS) + 40 : 0), | 388 | (int)dst_metric(&r->u.dst, RTAX_ADVMSS) + 40 : 0), |
388 | dst_metric(&r->u.dst, RTAX_WINDOW), | 389 | dst_metric(&r->u.dst, RTAX_WINDOW), |
@@ -684,18 +685,17 @@ static inline bool rt_caching(const struct net *net) | |||
684 | static inline bool compare_hash_inputs(const struct flowi *fl1, | 685 | static inline bool compare_hash_inputs(const struct flowi *fl1, |
685 | const struct flowi *fl2) | 686 | const struct flowi *fl2) |
686 | { | 687 | { |
687 | return (__force u32)(((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) | | 688 | return ((((__force u32)fl1->nl_u.ip4_u.daddr ^ (__force u32)fl2->nl_u.ip4_u.daddr) | |
688 | (fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr) | | 689 | ((__force u32)fl1->nl_u.ip4_u.saddr ^ (__force u32)fl2->nl_u.ip4_u.saddr) | |
689 | (fl1->iif ^ fl2->iif)) == 0); | 690 | (fl1->iif ^ fl2->iif)) == 0); |
690 | } | 691 | } |
691 | 692 | ||
692 | static inline int compare_keys(struct flowi *fl1, struct flowi *fl2) | 693 | static inline int compare_keys(struct flowi *fl1, struct flowi *fl2) |
693 | { | 694 | { |
694 | return ((__force u32)((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) | | 695 | return (((__force u32)fl1->nl_u.ip4_u.daddr ^ (__force u32)fl2->nl_u.ip4_u.daddr) | |
695 | (fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr)) | | 696 | ((__force u32)fl1->nl_u.ip4_u.saddr ^ (__force u32)fl2->nl_u.ip4_u.saddr) | |
696 | (fl1->mark ^ fl2->mark) | | 697 | (fl1->mark ^ fl2->mark) | |
697 | (*(u16 *)&fl1->nl_u.ip4_u.tos ^ | 698 | (*(u16 *)&fl1->nl_u.ip4_u.tos ^ *(u16 *)&fl2->nl_u.ip4_u.tos) | |
698 | *(u16 *)&fl2->nl_u.ip4_u.tos) | | ||
699 | (fl1->oif ^ fl2->oif) | | 699 | (fl1->oif ^ fl2->oif) | |
700 | (fl1->iif ^ fl2->iif)) == 0; | 700 | (fl1->iif ^ fl2->iif)) == 0; |
701 | } | 701 | } |
@@ -932,10 +932,8 @@ static void rt_secret_rebuild_oneshot(struct net *net) | |||
932 | { | 932 | { |
933 | del_timer_sync(&net->ipv4.rt_secret_timer); | 933 | del_timer_sync(&net->ipv4.rt_secret_timer); |
934 | rt_cache_invalidate(net); | 934 | rt_cache_invalidate(net); |
935 | if (ip_rt_secret_interval) { | 935 | if (ip_rt_secret_interval) |
936 | net->ipv4.rt_secret_timer.expires += ip_rt_secret_interval; | 936 | mod_timer(&net->ipv4.rt_secret_timer, jiffies + ip_rt_secret_interval); |
937 | add_timer(&net->ipv4.rt_secret_timer); | ||
938 | } | ||
939 | } | 937 | } |
940 | 938 | ||
941 | static void rt_emergency_hash_rebuild(struct net *net) | 939 | static void rt_emergency_hash_rebuild(struct net *net) |
@@ -1099,7 +1097,7 @@ static int slow_chain_length(const struct rtable *head) | |||
1099 | } | 1097 | } |
1100 | 1098 | ||
1101 | static int rt_intern_hash(unsigned hash, struct rtable *rt, | 1099 | static int rt_intern_hash(unsigned hash, struct rtable *rt, |
1102 | struct rtable **rp, struct sk_buff *skb) | 1100 | struct rtable **rp, struct sk_buff *skb, int ifindex) |
1103 | { | 1101 | { |
1104 | struct rtable *rth, **rthp; | 1102 | struct rtable *rth, **rthp; |
1105 | unsigned long now; | 1103 | unsigned long now; |
@@ -1214,11 +1212,16 @@ restart: | |||
1214 | slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) { | 1212 | slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) { |
1215 | struct net *net = dev_net(rt->u.dst.dev); | 1213 | struct net *net = dev_net(rt->u.dst.dev); |
1216 | int num = ++net->ipv4.current_rt_cache_rebuild_count; | 1214 | int num = ++net->ipv4.current_rt_cache_rebuild_count; |
1217 | if (!rt_caching(dev_net(rt->u.dst.dev))) { | 1215 | if (!rt_caching(net)) { |
1218 | printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n", | 1216 | printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n", |
1219 | rt->u.dst.dev->name, num); | 1217 | rt->u.dst.dev->name, num); |
1220 | } | 1218 | } |
1221 | rt_emergency_hash_rebuild(dev_net(rt->u.dst.dev)); | 1219 | rt_emergency_hash_rebuild(net); |
1220 | spin_unlock_bh(rt_hash_lock_addr(hash)); | ||
1221 | |||
1222 | hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src, | ||
1223 | ifindex, rt_genid(net)); | ||
1224 | goto restart; | ||
1222 | } | 1225 | } |
1223 | } | 1226 | } |
1224 | 1227 | ||
@@ -1443,7 +1446,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, | |||
1443 | dev_hold(rt->u.dst.dev); | 1446 | dev_hold(rt->u.dst.dev); |
1444 | if (rt->idev) | 1447 | if (rt->idev) |
1445 | in_dev_hold(rt->idev); | 1448 | in_dev_hold(rt->idev); |
1446 | rt->u.dst.obsolete = 0; | 1449 | rt->u.dst.obsolete = -1; |
1447 | rt->u.dst.lastuse = jiffies; | 1450 | rt->u.dst.lastuse = jiffies; |
1448 | rt->u.dst.path = &rt->u.dst; | 1451 | rt->u.dst.path = &rt->u.dst; |
1449 | rt->u.dst.neighbour = NULL; | 1452 | rt->u.dst.neighbour = NULL; |
@@ -1479,7 +1482,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, | |||
1479 | &netevent); | 1482 | &netevent); |
1480 | 1483 | ||
1481 | rt_del(hash, rth); | 1484 | rt_del(hash, rth); |
1482 | if (!rt_intern_hash(hash, rt, &rt, NULL)) | 1485 | if (!rt_intern_hash(hash, rt, &rt, NULL, rt->fl.oif)) |
1483 | ip_rt_put(rt); | 1486 | ip_rt_put(rt); |
1484 | goto do_next; | 1487 | goto do_next; |
1485 | } | 1488 | } |
@@ -1508,11 +1511,12 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst) | |||
1508 | struct dst_entry *ret = dst; | 1511 | struct dst_entry *ret = dst; |
1509 | 1512 | ||
1510 | if (rt) { | 1513 | if (rt) { |
1511 | if (dst->obsolete) { | 1514 | if (dst->obsolete > 0) { |
1512 | ip_rt_put(rt); | 1515 | ip_rt_put(rt); |
1513 | ret = NULL; | 1516 | ret = NULL; |
1514 | } else if ((rt->rt_flags & RTCF_REDIRECTED) || | 1517 | } else if ((rt->rt_flags & RTCF_REDIRECTED) || |
1515 | rt->u.dst.expires) { | 1518 | (rt->u.dst.expires && |
1519 | time_after_eq(jiffies, rt->u.dst.expires))) { | ||
1516 | unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src, | 1520 | unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src, |
1517 | rt->fl.oif, | 1521 | rt->fl.oif, |
1518 | rt_genid(dev_net(dst->dev))); | 1522 | rt_genid(dev_net(dst->dev))); |
@@ -1728,7 +1732,9 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu) | |||
1728 | 1732 | ||
1729 | static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie) | 1733 | static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie) |
1730 | { | 1734 | { |
1731 | return NULL; | 1735 | if (rt_is_expired((struct rtable *)dst)) |
1736 | return NULL; | ||
1737 | return dst; | ||
1732 | } | 1738 | } |
1733 | 1739 | ||
1734 | static void ipv4_dst_destroy(struct dst_entry *dst) | 1740 | static void ipv4_dst_destroy(struct dst_entry *dst) |
@@ -1890,7 +1896,8 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
1890 | if (!rth) | 1896 | if (!rth) |
1891 | goto e_nobufs; | 1897 | goto e_nobufs; |
1892 | 1898 | ||
1893 | rth->u.dst.output= ip_rt_bug; | 1899 | rth->u.dst.output = ip_rt_bug; |
1900 | rth->u.dst.obsolete = -1; | ||
1894 | 1901 | ||
1895 | atomic_set(&rth->u.dst.__refcnt, 1); | 1902 | atomic_set(&rth->u.dst.__refcnt, 1); |
1896 | rth->u.dst.flags= DST_HOST; | 1903 | rth->u.dst.flags= DST_HOST; |
@@ -1929,7 +1936,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
1929 | 1936 | ||
1930 | in_dev_put(in_dev); | 1937 | in_dev_put(in_dev); |
1931 | hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev))); | 1938 | hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev))); |
1932 | return rt_intern_hash(hash, rth, NULL, skb); | 1939 | return rt_intern_hash(hash, rth, NULL, skb, dev->ifindex); |
1933 | 1940 | ||
1934 | e_nobufs: | 1941 | e_nobufs: |
1935 | in_dev_put(in_dev); | 1942 | in_dev_put(in_dev); |
@@ -2056,6 +2063,7 @@ static int __mkroute_input(struct sk_buff *skb, | |||
2056 | rth->fl.oif = 0; | 2063 | rth->fl.oif = 0; |
2057 | rth->rt_spec_dst= spec_dst; | 2064 | rth->rt_spec_dst= spec_dst; |
2058 | 2065 | ||
2066 | rth->u.dst.obsolete = -1; | ||
2059 | rth->u.dst.input = ip_forward; | 2067 | rth->u.dst.input = ip_forward; |
2060 | rth->u.dst.output = ip_output; | 2068 | rth->u.dst.output = ip_output; |
2061 | rth->rt_genid = rt_genid(dev_net(rth->u.dst.dev)); | 2069 | rth->rt_genid = rt_genid(dev_net(rth->u.dst.dev)); |
@@ -2095,7 +2103,7 @@ static int ip_mkroute_input(struct sk_buff *skb, | |||
2095 | /* put it into the cache */ | 2103 | /* put it into the cache */ |
2096 | hash = rt_hash(daddr, saddr, fl->iif, | 2104 | hash = rt_hash(daddr, saddr, fl->iif, |
2097 | rt_genid(dev_net(rth->u.dst.dev))); | 2105 | rt_genid(dev_net(rth->u.dst.dev))); |
2098 | return rt_intern_hash(hash, rth, NULL, skb); | 2106 | return rt_intern_hash(hash, rth, NULL, skb, fl->iif); |
2099 | } | 2107 | } |
2100 | 2108 | ||
2101 | /* | 2109 | /* |
@@ -2220,6 +2228,7 @@ local_input: | |||
2220 | goto e_nobufs; | 2228 | goto e_nobufs; |
2221 | 2229 | ||
2222 | rth->u.dst.output= ip_rt_bug; | 2230 | rth->u.dst.output= ip_rt_bug; |
2231 | rth->u.dst.obsolete = -1; | ||
2223 | rth->rt_genid = rt_genid(net); | 2232 | rth->rt_genid = rt_genid(net); |
2224 | 2233 | ||
2225 | atomic_set(&rth->u.dst.__refcnt, 1); | 2234 | atomic_set(&rth->u.dst.__refcnt, 1); |
@@ -2251,7 +2260,7 @@ local_input: | |||
2251 | } | 2260 | } |
2252 | rth->rt_type = res.type; | 2261 | rth->rt_type = res.type; |
2253 | hash = rt_hash(daddr, saddr, fl.iif, rt_genid(net)); | 2262 | hash = rt_hash(daddr, saddr, fl.iif, rt_genid(net)); |
2254 | err = rt_intern_hash(hash, rth, NULL, skb); | 2263 | err = rt_intern_hash(hash, rth, NULL, skb, fl.iif); |
2255 | goto done; | 2264 | goto done; |
2256 | 2265 | ||
2257 | no_route: | 2266 | no_route: |
@@ -2309,8 +2318,8 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
2309 | rcu_read_lock(); | 2318 | rcu_read_lock(); |
2310 | for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; | 2319 | for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; |
2311 | rth = rcu_dereference(rth->u.dst.rt_next)) { | 2320 | rth = rcu_dereference(rth->u.dst.rt_next)) { |
2312 | if (((rth->fl.fl4_dst ^ daddr) | | 2321 | if ((((__force u32)rth->fl.fl4_dst ^ (__force u32)daddr) | |
2313 | (rth->fl.fl4_src ^ saddr) | | 2322 | ((__force u32)rth->fl.fl4_src ^ (__force u32)saddr) | |
2314 | (rth->fl.iif ^ iif) | | 2323 | (rth->fl.iif ^ iif) | |
2315 | rth->fl.oif | | 2324 | rth->fl.oif | |
2316 | (rth->fl.fl4_tos ^ tos)) == 0 && | 2325 | (rth->fl.fl4_tos ^ tos)) == 0 && |
@@ -2446,6 +2455,7 @@ static int __mkroute_output(struct rtable **result, | |||
2446 | rth->rt_spec_dst= fl->fl4_src; | 2455 | rth->rt_spec_dst= fl->fl4_src; |
2447 | 2456 | ||
2448 | rth->u.dst.output=ip_output; | 2457 | rth->u.dst.output=ip_output; |
2458 | rth->u.dst.obsolete = -1; | ||
2449 | rth->rt_genid = rt_genid(dev_net(dev_out)); | 2459 | rth->rt_genid = rt_genid(dev_net(dev_out)); |
2450 | 2460 | ||
2451 | RT_CACHE_STAT_INC(out_slow_tot); | 2461 | RT_CACHE_STAT_INC(out_slow_tot); |
@@ -2497,7 +2507,7 @@ static int ip_mkroute_output(struct rtable **rp, | |||
2497 | if (err == 0) { | 2507 | if (err == 0) { |
2498 | hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif, | 2508 | hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif, |
2499 | rt_genid(dev_net(dev_out))); | 2509 | rt_genid(dev_net(dev_out))); |
2500 | err = rt_intern_hash(hash, rth, rp, NULL); | 2510 | err = rt_intern_hash(hash, rth, rp, NULL, oldflp->oif); |
2501 | } | 2511 | } |
2502 | 2512 | ||
2503 | return err; | 2513 | return err; |
@@ -3103,22 +3113,20 @@ static void rt_secret_reschedule(int old) | |||
3103 | rtnl_lock(); | 3113 | rtnl_lock(); |
3104 | for_each_net(net) { | 3114 | for_each_net(net) { |
3105 | int deleted = del_timer_sync(&net->ipv4.rt_secret_timer); | 3115 | int deleted = del_timer_sync(&net->ipv4.rt_secret_timer); |
3116 | long time; | ||
3106 | 3117 | ||
3107 | if (!new) | 3118 | if (!new) |
3108 | continue; | 3119 | continue; |
3109 | 3120 | ||
3110 | if (deleted) { | 3121 | if (deleted) { |
3111 | long time = net->ipv4.rt_secret_timer.expires - jiffies; | 3122 | time = net->ipv4.rt_secret_timer.expires - jiffies; |
3112 | 3123 | ||
3113 | if (time <= 0 || (time += diff) <= 0) | 3124 | if (time <= 0 || (time += diff) <= 0) |
3114 | time = 0; | 3125 | time = 0; |
3115 | |||
3116 | net->ipv4.rt_secret_timer.expires = time; | ||
3117 | } else | 3126 | } else |
3118 | net->ipv4.rt_secret_timer.expires = new; | 3127 | time = new; |
3119 | 3128 | ||
3120 | net->ipv4.rt_secret_timer.expires += jiffies; | 3129 | mod_timer(&net->ipv4.rt_secret_timer, jiffies + time); |
3121 | add_timer(&net->ipv4.rt_secret_timer); | ||
3122 | } | 3130 | } |
3123 | rtnl_unlock(); | 3131 | rtnl_unlock(); |
3124 | } | 3132 | } |
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index c1bc074f61b7..1cd5c15174b8 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/inetdevice.h> | 12 | #include <linux/inetdevice.h> |
13 | #include <linux/seqlock.h> | 13 | #include <linux/seqlock.h> |
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/slab.h> | ||
15 | #include <net/snmp.h> | 16 | #include <net/snmp.h> |
16 | #include <net/icmp.h> | 17 | #include <net/icmp.h> |
17 | #include <net/ip.h> | 18 | #include <net/ip.h> |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 5901010fad55..8ce29747ad9b 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -265,6 +265,7 @@ | |||
265 | #include <linux/err.h> | 265 | #include <linux/err.h> |
266 | #include <linux/crypto.h> | 266 | #include <linux/crypto.h> |
267 | #include <linux/time.h> | 267 | #include <linux/time.h> |
268 | #include <linux/slab.h> | ||
268 | 269 | ||
269 | #include <net/icmp.h> | 270 | #include <net/icmp.h> |
270 | #include <net/tcp.h> | 271 | #include <net/tcp.h> |
@@ -377,7 +378,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) | |||
377 | struct sock *sk = sock->sk; | 378 | struct sock *sk = sock->sk; |
378 | struct tcp_sock *tp = tcp_sk(sk); | 379 | struct tcp_sock *tp = tcp_sk(sk); |
379 | 380 | ||
380 | sock_poll_wait(file, sk->sk_sleep, wait); | 381 | sock_poll_wait(file, sk_sleep(sk), wait); |
381 | if (sk->sk_state == TCP_LISTEN) | 382 | if (sk->sk_state == TCP_LISTEN) |
382 | return inet_csk_listen_poll(sk); | 383 | return inet_csk_listen_poll(sk); |
383 | 384 | ||
@@ -429,7 +430,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) | |||
429 | if (tp->urg_seq == tp->copied_seq && | 430 | if (tp->urg_seq == tp->copied_seq && |
430 | !sock_flag(sk, SOCK_URGINLINE) && | 431 | !sock_flag(sk, SOCK_URGINLINE) && |
431 | tp->urg_data) | 432 | tp->urg_data) |
432 | target--; | 433 | target++; |
433 | 434 | ||
434 | /* Potential race condition. If read of tp below will | 435 | /* Potential race condition. If read of tp below will |
435 | * escape above sk->sk_state, we can be illegally awaken | 436 | * escape above sk->sk_state, we can be illegally awaken |
@@ -1254,6 +1255,39 @@ static void tcp_prequeue_process(struct sock *sk) | |||
1254 | tp->ucopy.memory = 0; | 1255 | tp->ucopy.memory = 0; |
1255 | } | 1256 | } |
1256 | 1257 | ||
1258 | #ifdef CONFIG_NET_DMA | ||
1259 | static void tcp_service_net_dma(struct sock *sk, bool wait) | ||
1260 | { | ||
1261 | dma_cookie_t done, used; | ||
1262 | dma_cookie_t last_issued; | ||
1263 | struct tcp_sock *tp = tcp_sk(sk); | ||
1264 | |||
1265 | if (!tp->ucopy.dma_chan) | ||
1266 | return; | ||
1267 | |||
1268 | last_issued = tp->ucopy.dma_cookie; | ||
1269 | dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); | ||
1270 | |||
1271 | do { | ||
1272 | if (dma_async_memcpy_complete(tp->ucopy.dma_chan, | ||
1273 | last_issued, &done, | ||
1274 | &used) == DMA_SUCCESS) { | ||
1275 | /* Safe to free early-copied skbs now */ | ||
1276 | __skb_queue_purge(&sk->sk_async_wait_queue); | ||
1277 | break; | ||
1278 | } else { | ||
1279 | struct sk_buff *skb; | ||
1280 | while ((skb = skb_peek(&sk->sk_async_wait_queue)) && | ||
1281 | (dma_async_is_complete(skb->dma_cookie, done, | ||
1282 | used) == DMA_SUCCESS)) { | ||
1283 | __skb_dequeue(&sk->sk_async_wait_queue); | ||
1284 | kfree_skb(skb); | ||
1285 | } | ||
1286 | } | ||
1287 | } while (wait); | ||
1288 | } | ||
1289 | #endif | ||
1290 | |||
1257 | static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) | 1291 | static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) |
1258 | { | 1292 | { |
1259 | struct sk_buff *skb; | 1293 | struct sk_buff *skb; |
@@ -1335,6 +1369,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, | |||
1335 | sk_eat_skb(sk, skb, 0); | 1369 | sk_eat_skb(sk, skb, 0); |
1336 | if (!desc->count) | 1370 | if (!desc->count) |
1337 | break; | 1371 | break; |
1372 | tp->copied_seq = seq; | ||
1338 | } | 1373 | } |
1339 | tp->copied_seq = seq; | 1374 | tp->copied_seq = seq; |
1340 | 1375 | ||
@@ -1546,6 +1581,10 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
1546 | /* __ Set realtime policy in scheduler __ */ | 1581 | /* __ Set realtime policy in scheduler __ */ |
1547 | } | 1582 | } |
1548 | 1583 | ||
1584 | #ifdef CONFIG_NET_DMA | ||
1585 | if (tp->ucopy.dma_chan) | ||
1586 | dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); | ||
1587 | #endif | ||
1549 | if (copied >= target) { | 1588 | if (copied >= target) { |
1550 | /* Do not sleep, just process backlog. */ | 1589 | /* Do not sleep, just process backlog. */ |
1551 | release_sock(sk); | 1590 | release_sock(sk); |
@@ -1554,6 +1593,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
1554 | sk_wait_data(sk, &timeo); | 1593 | sk_wait_data(sk, &timeo); |
1555 | 1594 | ||
1556 | #ifdef CONFIG_NET_DMA | 1595 | #ifdef CONFIG_NET_DMA |
1596 | tcp_service_net_dma(sk, false); /* Don't block */ | ||
1557 | tp->ucopy.wakeup = 0; | 1597 | tp->ucopy.wakeup = 0; |
1558 | #endif | 1598 | #endif |
1559 | 1599 | ||
@@ -1633,6 +1673,9 @@ do_prequeue: | |||
1633 | copied = -EFAULT; | 1673 | copied = -EFAULT; |
1634 | break; | 1674 | break; |
1635 | } | 1675 | } |
1676 | |||
1677 | dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); | ||
1678 | |||
1636 | if ((offset + used) == skb->len) | 1679 | if ((offset + used) == skb->len) |
1637 | copied_early = 1; | 1680 | copied_early = 1; |
1638 | 1681 | ||
@@ -1702,27 +1745,9 @@ skip_copy: | |||
1702 | } | 1745 | } |
1703 | 1746 | ||
1704 | #ifdef CONFIG_NET_DMA | 1747 | #ifdef CONFIG_NET_DMA |
1705 | if (tp->ucopy.dma_chan) { | 1748 | tcp_service_net_dma(sk, true); /* Wait for queue to drain */ |
1706 | dma_cookie_t done, used; | 1749 | tp->ucopy.dma_chan = NULL; |
1707 | |||
1708 | dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); | ||
1709 | |||
1710 | while (dma_async_memcpy_complete(tp->ucopy.dma_chan, | ||
1711 | tp->ucopy.dma_cookie, &done, | ||
1712 | &used) == DMA_IN_PROGRESS) { | ||
1713 | /* do partial cleanup of sk_async_wait_queue */ | ||
1714 | while ((skb = skb_peek(&sk->sk_async_wait_queue)) && | ||
1715 | (dma_async_is_complete(skb->dma_cookie, done, | ||
1716 | used) == DMA_SUCCESS)) { | ||
1717 | __skb_dequeue(&sk->sk_async_wait_queue); | ||
1718 | kfree_skb(skb); | ||
1719 | } | ||
1720 | } | ||
1721 | 1750 | ||
1722 | /* Safe to free early-copied skbs now */ | ||
1723 | __skb_queue_purge(&sk->sk_async_wait_queue); | ||
1724 | tp->ucopy.dma_chan = NULL; | ||
1725 | } | ||
1726 | if (tp->ucopy.pinned_list) { | 1751 | if (tp->ucopy.pinned_list) { |
1727 | dma_unpin_iovec_pages(tp->ucopy.pinned_list); | 1752 | dma_unpin_iovec_pages(tp->ucopy.pinned_list); |
1728 | tp->ucopy.pinned_list = NULL; | 1753 | tp->ucopy.pinned_list = NULL; |
@@ -2273,7 +2298,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, | |||
2273 | if (sock_flag(sk, SOCK_KEEPOPEN) && | 2298 | if (sock_flag(sk, SOCK_KEEPOPEN) && |
2274 | !((1 << sk->sk_state) & | 2299 | !((1 << sk->sk_state) & |
2275 | (TCPF_CLOSE | TCPF_LISTEN))) { | 2300 | (TCPF_CLOSE | TCPF_LISTEN))) { |
2276 | __u32 elapsed = tcp_time_stamp - tp->rcv_tstamp; | 2301 | u32 elapsed = keepalive_time_elapsed(tp); |
2277 | if (tp->keepalive_time > elapsed) | 2302 | if (tp->keepalive_time > elapsed) |
2278 | elapsed = tp->keepalive_time - elapsed; | 2303 | elapsed = tp->keepalive_time - elapsed; |
2279 | else | 2304 | else |
@@ -2696,7 +2721,7 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb) | |||
2696 | struct tcphdr *th2; | 2721 | struct tcphdr *th2; |
2697 | unsigned int len; | 2722 | unsigned int len; |
2698 | unsigned int thlen; | 2723 | unsigned int thlen; |
2699 | unsigned int flags; | 2724 | __be32 flags; |
2700 | unsigned int mss = 1; | 2725 | unsigned int mss = 1; |
2701 | unsigned int hlen; | 2726 | unsigned int hlen; |
2702 | unsigned int off; | 2727 | unsigned int off; |
@@ -2746,10 +2771,10 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb) | |||
2746 | 2771 | ||
2747 | found: | 2772 | found: |
2748 | flush = NAPI_GRO_CB(p)->flush; | 2773 | flush = NAPI_GRO_CB(p)->flush; |
2749 | flush |= flags & TCP_FLAG_CWR; | 2774 | flush |= (__force int)(flags & TCP_FLAG_CWR); |
2750 | flush |= (flags ^ tcp_flag_word(th2)) & | 2775 | flush |= (__force int)((flags ^ tcp_flag_word(th2)) & |
2751 | ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH); | 2776 | ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH)); |
2752 | flush |= th->ack_seq ^ th2->ack_seq; | 2777 | flush |= (__force int)(th->ack_seq ^ th2->ack_seq); |
2753 | for (i = sizeof(*th); i < thlen; i += 4) | 2778 | for (i = sizeof(*th); i < thlen; i += 4) |
2754 | flush |= *(u32 *)((u8 *)th + i) ^ | 2779 | flush |= *(u32 *)((u8 *)th + i) ^ |
2755 | *(u32 *)((u8 *)th2 + i); | 2780 | *(u32 *)((u8 *)th2 + i); |
@@ -2770,8 +2795,9 @@ found: | |||
2770 | 2795 | ||
2771 | out_check_final: | 2796 | out_check_final: |
2772 | flush = len < mss; | 2797 | flush = len < mss; |
2773 | flush |= flags & (TCP_FLAG_URG | TCP_FLAG_PSH | TCP_FLAG_RST | | 2798 | flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH | |
2774 | TCP_FLAG_SYN | TCP_FLAG_FIN); | 2799 | TCP_FLAG_RST | TCP_FLAG_SYN | |
2800 | TCP_FLAG_FIN)); | ||
2775 | 2801 | ||
2776 | if (p && (!NAPI_GRO_CB(skb)->same_flow || flush)) | 2802 | if (p && (!NAPI_GRO_CB(skb)->same_flow || flush)) |
2777 | pp = head; | 2803 | pp = head; |
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index 6428b342b164..0ec9bd0ae94f 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/mm.h> | 10 | #include <linux/mm.h> |
11 | #include <linux/types.h> | 11 | #include <linux/types.h> |
12 | #include <linux/list.h> | 12 | #include <linux/list.h> |
13 | #include <linux/gfp.h> | ||
13 | #include <net/tcp.h> | 14 | #include <net/tcp.h> |
14 | 15 | ||
15 | int sysctl_tcp_max_ssthresh = 0; | 16 | int sysctl_tcp_max_ssthresh = 0; |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 788851ca8c5d..e82162c211bf 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -62,6 +62,7 @@ | |||
62 | */ | 62 | */ |
63 | 63 | ||
64 | #include <linux/mm.h> | 64 | #include <linux/mm.h> |
65 | #include <linux/slab.h> | ||
65 | #include <linux/module.h> | 66 | #include <linux/module.h> |
66 | #include <linux/sysctl.h> | 67 | #include <linux/sysctl.h> |
67 | #include <linux/kernel.h> | 68 | #include <linux/kernel.h> |
@@ -2511,6 +2512,9 @@ static void tcp_mark_head_lost(struct sock *sk, int packets) | |||
2511 | int err; | 2512 | int err; |
2512 | unsigned int mss; | 2513 | unsigned int mss; |
2513 | 2514 | ||
2515 | if (packets == 0) | ||
2516 | return; | ||
2517 | |||
2514 | WARN_ON(packets > tp->packets_out); | 2518 | WARN_ON(packets > tp->packets_out); |
2515 | if (tp->lost_skb_hint) { | 2519 | if (tp->lost_skb_hint) { |
2516 | skb = tp->lost_skb_hint; | 2520 | skb = tp->lost_skb_hint; |
@@ -3706,7 +3710,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) | |||
3706 | } | 3710 | } |
3707 | 3711 | ||
3708 | if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) | 3712 | if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) |
3709 | dst_confirm(sk->sk_dst_cache); | 3713 | dst_confirm(__sk_dst_get(sk)); |
3710 | 3714 | ||
3711 | return 1; | 3715 | return 1; |
3712 | 3716 | ||
@@ -4315,7 +4319,7 @@ static void tcp_ofo_queue(struct sock *sk) | |||
4315 | } | 4319 | } |
4316 | 4320 | ||
4317 | if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { | 4321 | if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { |
4318 | SOCK_DEBUG(sk, "ofo packet was already received \n"); | 4322 | SOCK_DEBUG(sk, "ofo packet was already received\n"); |
4319 | __skb_unlink(skb, &tp->out_of_order_queue); | 4323 | __skb_unlink(skb, &tp->out_of_order_queue); |
4320 | __kfree_skb(skb); | 4324 | __kfree_skb(skb); |
4321 | continue; | 4325 | continue; |
@@ -4363,6 +4367,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) | |||
4363 | if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) | 4367 | if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) |
4364 | goto drop; | 4368 | goto drop; |
4365 | 4369 | ||
4370 | skb_dst_drop(skb); | ||
4366 | __skb_pull(skb, th->doff * 4); | 4371 | __skb_pull(skb, th->doff * 4); |
4367 | 4372 | ||
4368 | TCP_ECN_accept_cwr(tp, skb); | 4373 | TCP_ECN_accept_cwr(tp, skb); |
@@ -5829,7 +5834,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
5829 | if (tp->snd_una == tp->write_seq) { | 5834 | if (tp->snd_una == tp->write_seq) { |
5830 | tcp_set_state(sk, TCP_FIN_WAIT2); | 5835 | tcp_set_state(sk, TCP_FIN_WAIT2); |
5831 | sk->sk_shutdown |= SEND_SHUTDOWN; | 5836 | sk->sk_shutdown |= SEND_SHUTDOWN; |
5832 | dst_confirm(sk->sk_dst_cache); | 5837 | dst_confirm(__sk_dst_get(sk)); |
5833 | 5838 | ||
5834 | if (!sock_flag(sk, SOCK_DEAD)) | 5839 | if (!sock_flag(sk, SOCK_DEAD)) |
5835 | /* Wake up lingering close() */ | 5840 | /* Wake up lingering close() */ |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 8d51d39ad1bb..771f8146a2e5 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -60,6 +60,7 @@ | |||
60 | #include <linux/jhash.h> | 60 | #include <linux/jhash.h> |
61 | #include <linux/init.h> | 61 | #include <linux/init.h> |
62 | #include <linux/times.h> | 62 | #include <linux/times.h> |
63 | #include <linux/slab.h> | ||
63 | 64 | ||
64 | #include <net/net_namespace.h> | 65 | #include <net/net_namespace.h> |
65 | #include <net/icmp.h> | 66 | #include <net/icmp.h> |
@@ -370,6 +371,11 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) | |||
370 | if (sk->sk_state == TCP_CLOSE) | 371 | if (sk->sk_state == TCP_CLOSE) |
371 | goto out; | 372 | goto out; |
372 | 373 | ||
374 | if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) { | ||
375 | NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); | ||
376 | goto out; | ||
377 | } | ||
378 | |||
373 | icsk = inet_csk(sk); | 379 | icsk = inet_csk(sk); |
374 | tp = tcp_sk(sk); | 380 | tp = tcp_sk(sk); |
375 | seq = ntohl(th->seq); | 381 | seq = ntohl(th->seq); |
@@ -513,26 +519,31 @@ out: | |||
513 | sock_put(sk); | 519 | sock_put(sk); |
514 | } | 520 | } |
515 | 521 | ||
516 | /* This routine computes an IPv4 TCP checksum. */ | 522 | static void __tcp_v4_send_check(struct sk_buff *skb, |
517 | void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb) | 523 | __be32 saddr, __be32 daddr) |
518 | { | 524 | { |
519 | struct inet_sock *inet = inet_sk(sk); | ||
520 | struct tcphdr *th = tcp_hdr(skb); | 525 | struct tcphdr *th = tcp_hdr(skb); |
521 | 526 | ||
522 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 527 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
523 | th->check = ~tcp_v4_check(len, inet->inet_saddr, | 528 | th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0); |
524 | inet->inet_daddr, 0); | ||
525 | skb->csum_start = skb_transport_header(skb) - skb->head; | 529 | skb->csum_start = skb_transport_header(skb) - skb->head; |
526 | skb->csum_offset = offsetof(struct tcphdr, check); | 530 | skb->csum_offset = offsetof(struct tcphdr, check); |
527 | } else { | 531 | } else { |
528 | th->check = tcp_v4_check(len, inet->inet_saddr, | 532 | th->check = tcp_v4_check(skb->len, saddr, daddr, |
529 | inet->inet_daddr, | ||
530 | csum_partial(th, | 533 | csum_partial(th, |
531 | th->doff << 2, | 534 | th->doff << 2, |
532 | skb->csum)); | 535 | skb->csum)); |
533 | } | 536 | } |
534 | } | 537 | } |
535 | 538 | ||
539 | /* This routine computes an IPv4 TCP checksum. */ | ||
540 | void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb) | ||
541 | { | ||
542 | struct inet_sock *inet = inet_sk(sk); | ||
543 | |||
544 | __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr); | ||
545 | } | ||
546 | |||
536 | int tcp_v4_gso_send_check(struct sk_buff *skb) | 547 | int tcp_v4_gso_send_check(struct sk_buff *skb) |
537 | { | 548 | { |
538 | const struct iphdr *iph; | 549 | const struct iphdr *iph; |
@@ -545,10 +556,8 @@ int tcp_v4_gso_send_check(struct sk_buff *skb) | |||
545 | th = tcp_hdr(skb); | 556 | th = tcp_hdr(skb); |
546 | 557 | ||
547 | th->check = 0; | 558 | th->check = 0; |
548 | th->check = ~tcp_v4_check(skb->len, iph->saddr, iph->daddr, 0); | ||
549 | skb->csum_start = skb_transport_header(skb) - skb->head; | ||
550 | skb->csum_offset = offsetof(struct tcphdr, check); | ||
551 | skb->ip_summed = CHECKSUM_PARTIAL; | 559 | skb->ip_summed = CHECKSUM_PARTIAL; |
560 | __tcp_v4_send_check(skb, iph->saddr, iph->daddr); | ||
552 | return 0; | 561 | return 0; |
553 | } | 562 | } |
554 | 563 | ||
@@ -757,13 +766,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, | |||
757 | skb = tcp_make_synack(sk, dst, req, rvp); | 766 | skb = tcp_make_synack(sk, dst, req, rvp); |
758 | 767 | ||
759 | if (skb) { | 768 | if (skb) { |
760 | struct tcphdr *th = tcp_hdr(skb); | 769 | __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr); |
761 | |||
762 | th->check = tcp_v4_check(skb->len, | ||
763 | ireq->loc_addr, | ||
764 | ireq->rmt_addr, | ||
765 | csum_partial(th, skb->len, | ||
766 | skb->csum)); | ||
767 | 770 | ||
768 | err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr, | 771 | err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr, |
769 | ireq->rmt_addr, | 772 | ireq->rmt_addr, |
@@ -1283,8 +1286,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1283 | goto drop_and_release; | 1286 | goto drop_and_release; |
1284 | 1287 | ||
1285 | /* Secret recipe starts with IP addresses */ | 1288 | /* Secret recipe starts with IP addresses */ |
1286 | *mess++ ^= daddr; | 1289 | *mess++ ^= (__force u32)daddr; |
1287 | *mess++ ^= saddr; | 1290 | *mess++ ^= (__force u32)saddr; |
1288 | 1291 | ||
1289 | /* plus variable length Initiator Cookie */ | 1292 | /* plus variable length Initiator Cookie */ |
1290 | c = (u8 *)mess; | 1293 | c = (u8 *)mess; |
@@ -1651,15 +1654,15 @@ int tcp_v4_rcv(struct sk_buff *skb) | |||
1651 | if (!sk) | 1654 | if (!sk) |
1652 | goto no_tcp_socket; | 1655 | goto no_tcp_socket; |
1653 | 1656 | ||
1657 | process: | ||
1658 | if (sk->sk_state == TCP_TIME_WAIT) | ||
1659 | goto do_time_wait; | ||
1660 | |||
1654 | if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) { | 1661 | if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) { |
1655 | NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); | 1662 | NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); |
1656 | goto discard_and_relse; | 1663 | goto discard_and_relse; |
1657 | } | 1664 | } |
1658 | 1665 | ||
1659 | process: | ||
1660 | if (sk->sk_state == TCP_TIME_WAIT) | ||
1661 | goto do_time_wait; | ||
1662 | |||
1663 | if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) | 1666 | if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) |
1664 | goto discard_and_relse; | 1667 | goto discard_and_relse; |
1665 | nf_reset(skb); | 1668 | nf_reset(skb); |
@@ -1669,6 +1672,8 @@ process: | |||
1669 | 1672 | ||
1670 | skb->dev = NULL; | 1673 | skb->dev = NULL; |
1671 | 1674 | ||
1675 | sock_rps_save_rxhash(sk, skb->rxhash); | ||
1676 | |||
1672 | bh_lock_sock_nested(sk); | 1677 | bh_lock_sock_nested(sk); |
1673 | ret = 0; | 1678 | ret = 0; |
1674 | if (!sock_owned_by_user(sk)) { | 1679 | if (!sock_owned_by_user(sk)) { |
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 4199bc6915c5..794c2e122a41 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -20,6 +20,7 @@ | |||
20 | 20 | ||
21 | #include <linux/mm.h> | 21 | #include <linux/mm.h> |
22 | #include <linux/module.h> | 22 | #include <linux/module.h> |
23 | #include <linux/slab.h> | ||
23 | #include <linux/sysctl.h> | 24 | #include <linux/sysctl.h> |
24 | #include <linux/workqueue.h> | 25 | #include <linux/workqueue.h> |
25 | #include <net/tcp.h> | 26 | #include <net/tcp.h> |
@@ -671,6 +672,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, | |||
671 | if (req->retrans < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && | 672 | if (req->retrans < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && |
672 | TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { | 673 | TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { |
673 | inet_rsk(req)->acked = 1; | 674 | inet_rsk(req)->acked = 1; |
675 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP); | ||
674 | return NULL; | 676 | return NULL; |
675 | } | 677 | } |
676 | 678 | ||
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index f181b78f2385..5db3a2c6cb33 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <net/tcp.h> | 37 | #include <net/tcp.h> |
38 | 38 | ||
39 | #include <linux/compiler.h> | 39 | #include <linux/compiler.h> |
40 | #include <linux/gfp.h> | ||
40 | #include <linux/module.h> | 41 | #include <linux/module.h> |
41 | 42 | ||
42 | /* People can turn this off for buggy TCP's found in printers etc. */ | 43 | /* People can turn this off for buggy TCP's found in printers etc. */ |
@@ -349,6 +350,7 @@ static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb, | |||
349 | */ | 350 | */ |
350 | static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags) | 351 | static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags) |
351 | { | 352 | { |
353 | skb->ip_summed = CHECKSUM_PARTIAL; | ||
352 | skb->csum = 0; | 354 | skb->csum = 0; |
353 | 355 | ||
354 | TCP_SKB_CB(skb)->flags = flags; | 356 | TCP_SKB_CB(skb)->flags = flags; |
@@ -859,7 +861,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, | |||
859 | th->urg_ptr = htons(tp->snd_up - tcb->seq); | 861 | th->urg_ptr = htons(tp->snd_up - tcb->seq); |
860 | th->urg = 1; | 862 | th->urg = 1; |
861 | } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) { | 863 | } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) { |
862 | th->urg_ptr = 0xFFFF; | 864 | th->urg_ptr = htons(0xFFFF); |
863 | th->urg = 1; | 865 | th->urg = 1; |
864 | } | 866 | } |
865 | } | 867 | } |
@@ -877,7 +879,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, | |||
877 | } | 879 | } |
878 | #endif | 880 | #endif |
879 | 881 | ||
880 | icsk->icsk_af_ops->send_check(sk, skb->len, skb); | 882 | icsk->icsk_af_ops->send_check(sk, skb); |
881 | 883 | ||
882 | if (likely(tcb->flags & TCPCB_FLAG_ACK)) | 884 | if (likely(tcb->flags & TCPCB_FLAG_ACK)) |
883 | tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); | 885 | tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); |
@@ -886,9 +888,10 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, | |||
886 | tcp_event_data_sent(tp, skb, sk); | 888 | tcp_event_data_sent(tp, skb, sk); |
887 | 889 | ||
888 | if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) | 890 | if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) |
889 | TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS); | 891 | TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, |
892 | tcp_skb_pcount(skb)); | ||
890 | 893 | ||
891 | err = icsk->icsk_af_ops->queue_xmit(skb, 0); | 894 | err = icsk->icsk_af_ops->queue_xmit(skb); |
892 | if (likely(err <= 0)) | 895 | if (likely(err <= 0)) |
893 | return err; | 896 | return err; |
894 | 897 | ||
@@ -2483,7 +2486,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |||
2483 | *tail-- ^= TCP_SKB_CB(skb)->seq + 1; | 2486 | *tail-- ^= TCP_SKB_CB(skb)->seq + 1; |
2484 | 2487 | ||
2485 | /* recommended */ | 2488 | /* recommended */ |
2486 | *tail-- ^= ((th->dest << 16) | th->source); | 2489 | *tail-- ^= (((__force u32)th->dest << 16) | (__force u32)th->source); |
2487 | *tail-- ^= (u32)(unsigned long)cvp; /* per sockopt */ | 2490 | *tail-- ^= (u32)(unsigned long)cvp; /* per sockopt */ |
2488 | 2491 | ||
2489 | sha_transform((__u32 *)&xvp->cookie_bakery[0], | 2492 | sha_transform((__u32 *)&xvp->cookie_bakery[0], |
@@ -2501,7 +2504,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |||
2501 | th->window = htons(min(req->rcv_wnd, 65535U)); | 2504 | th->window = htons(min(req->rcv_wnd, 65535U)); |
2502 | tcp_options_write((__be32 *)(th + 1), tp, &opts); | 2505 | tcp_options_write((__be32 *)(th + 1), tp, &opts); |
2503 | th->doff = (tcp_header_size >> 2); | 2506 | th->doff = (tcp_header_size >> 2); |
2504 | TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS); | 2507 | TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, tcp_skb_pcount(skb)); |
2505 | 2508 | ||
2506 | #ifdef CONFIG_TCP_MD5SIG | 2509 | #ifdef CONFIG_TCP_MD5SIG |
2507 | /* Okay, we have all we need - do the md5 hash if needed */ | 2510 | /* Okay, we have all we need - do the md5 hash if needed */ |
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c index 9bc805df95d2..f8efada580e8 100644 --- a/net/ipv4/tcp_probe.c +++ b/net/ipv4/tcp_probe.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/kprobes.h> | 22 | #include <linux/kprobes.h> |
23 | #include <linux/socket.h> | 23 | #include <linux/socket.h> |
24 | #include <linux/tcp.h> | 24 | #include <linux/tcp.h> |
25 | #include <linux/slab.h> | ||
25 | #include <linux/proc_fs.h> | 26 | #include <linux/proc_fs.h> |
26 | #include <linux/module.h> | 27 | #include <linux/module.h> |
27 | #include <linux/ktime.h> | 28 | #include <linux/ktime.h> |
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index a17629b8912e..440a5c6004f6 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c | |||
@@ -19,6 +19,7 @@ | |||
19 | */ | 19 | */ |
20 | 20 | ||
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <linux/gfp.h> | ||
22 | #include <net/tcp.h> | 23 | #include <net/tcp.h> |
23 | 24 | ||
24 | int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES; | 25 | int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES; |
@@ -134,7 +135,7 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk) | |||
134 | } | 135 | } |
135 | 136 | ||
136 | /* This function calculates a "timeout" which is equivalent to the timeout of a | 137 | /* This function calculates a "timeout" which is equivalent to the timeout of a |
137 | * TCP connection after "boundary" unsucessful, exponentially backed-off | 138 | * TCP connection after "boundary" unsuccessful, exponentially backed-off |
138 | * retransmissions with an initial RTO of TCP_RTO_MIN. | 139 | * retransmissions with an initial RTO of TCP_RTO_MIN. |
139 | */ | 140 | */ |
140 | static bool retransmits_timed_out(struct sock *sk, | 141 | static bool retransmits_timed_out(struct sock *sk, |
@@ -171,14 +172,14 @@ static int tcp_write_timeout(struct sock *sk) | |||
171 | 172 | ||
172 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { | 173 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { |
173 | if (icsk->icsk_retransmits) | 174 | if (icsk->icsk_retransmits) |
174 | dst_negative_advice(&sk->sk_dst_cache, sk); | 175 | dst_negative_advice(sk); |
175 | retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; | 176 | retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; |
176 | } else { | 177 | } else { |
177 | if (retransmits_timed_out(sk, sysctl_tcp_retries1)) { | 178 | if (retransmits_timed_out(sk, sysctl_tcp_retries1)) { |
178 | /* Black hole detection */ | 179 | /* Black hole detection */ |
179 | tcp_mtu_probing(icsk, sk); | 180 | tcp_mtu_probing(icsk, sk); |
180 | 181 | ||
181 | dst_negative_advice(&sk->sk_dst_cache, sk); | 182 | dst_negative_advice(sk); |
182 | } | 183 | } |
183 | 184 | ||
184 | retry_until = sysctl_tcp_retries2; | 185 | retry_until = sysctl_tcp_retries2; |
@@ -516,7 +517,7 @@ static void tcp_keepalive_timer (unsigned long data) | |||
516 | struct sock *sk = (struct sock *) data; | 517 | struct sock *sk = (struct sock *) data; |
517 | struct inet_connection_sock *icsk = inet_csk(sk); | 518 | struct inet_connection_sock *icsk = inet_csk(sk); |
518 | struct tcp_sock *tp = tcp_sk(sk); | 519 | struct tcp_sock *tp = tcp_sk(sk); |
519 | __u32 elapsed; | 520 | u32 elapsed; |
520 | 521 | ||
521 | /* Only process if socket is not in use. */ | 522 | /* Only process if socket is not in use. */ |
522 | bh_lock_sock(sk); | 523 | bh_lock_sock(sk); |
@@ -553,7 +554,7 @@ static void tcp_keepalive_timer (unsigned long data) | |||
553 | if (tp->packets_out || tcp_send_head(sk)) | 554 | if (tp->packets_out || tcp_send_head(sk)) |
554 | goto resched; | 555 | goto resched; |
555 | 556 | ||
556 | elapsed = tcp_time_stamp - tp->rcv_tstamp; | 557 | elapsed = keepalive_time_elapsed(tp); |
557 | 558 | ||
558 | if (elapsed >= keepalive_time_when(tp)) { | 559 | if (elapsed >= keepalive_time_when(tp)) { |
559 | if (icsk->icsk_probes_out >= keepalive_probes(tp)) { | 560 | if (icsk->icsk_probes_out >= keepalive_probes(tp)) { |
diff --git a/net/ipv4/tunnel4.c b/net/ipv4/tunnel4.c index 3959e0ca456a..3b3813cc80b9 100644 --- a/net/ipv4/tunnel4.c +++ b/net/ipv4/tunnel4.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/mutex.h> | 8 | #include <linux/mutex.h> |
9 | #include <linux/netdevice.h> | 9 | #include <linux/netdevice.h> |
10 | #include <linux/skbuff.h> | 10 | #include <linux/skbuff.h> |
11 | #include <linux/slab.h> | ||
11 | #include <net/icmp.h> | 12 | #include <net/icmp.h> |
12 | #include <net/ip.h> | 13 | #include <net/ip.h> |
13 | #include <net/protocol.h> | 14 | #include <net/protocol.h> |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 7af756d0f931..4560b291180b 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -95,6 +95,7 @@ | |||
95 | #include <linux/mm.h> | 95 | #include <linux/mm.h> |
96 | #include <linux/inet.h> | 96 | #include <linux/inet.h> |
97 | #include <linux/netdevice.h> | 97 | #include <linux/netdevice.h> |
98 | #include <linux/slab.h> | ||
98 | #include <net/tcp_states.h> | 99 | #include <net/tcp_states.h> |
99 | #include <linux/skbuff.h> | 100 | #include <linux/skbuff.h> |
100 | #include <linux/proc_fs.h> | 101 | #include <linux/proc_fs.h> |
@@ -306,13 +307,13 @@ static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2) | |||
306 | static unsigned int udp4_portaddr_hash(struct net *net, __be32 saddr, | 307 | static unsigned int udp4_portaddr_hash(struct net *net, __be32 saddr, |
307 | unsigned int port) | 308 | unsigned int port) |
308 | { | 309 | { |
309 | return jhash_1word(saddr, net_hash_mix(net)) ^ port; | 310 | return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port; |
310 | } | 311 | } |
311 | 312 | ||
312 | int udp_v4_get_port(struct sock *sk, unsigned short snum) | 313 | int udp_v4_get_port(struct sock *sk, unsigned short snum) |
313 | { | 314 | { |
314 | unsigned int hash2_nulladdr = | 315 | unsigned int hash2_nulladdr = |
315 | udp4_portaddr_hash(sock_net(sk), INADDR_ANY, snum); | 316 | udp4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum); |
316 | unsigned int hash2_partial = | 317 | unsigned int hash2_partial = |
317 | udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0); | 318 | udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0); |
318 | 319 | ||
@@ -465,14 +466,14 @@ static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, | |||
465 | daddr, hnum, dif, | 466 | daddr, hnum, dif, |
466 | hslot2, slot2); | 467 | hslot2, slot2); |
467 | if (!result) { | 468 | if (!result) { |
468 | hash2 = udp4_portaddr_hash(net, INADDR_ANY, hnum); | 469 | hash2 = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum); |
469 | slot2 = hash2 & udptable->mask; | 470 | slot2 = hash2 & udptable->mask; |
470 | hslot2 = &udptable->hash2[slot2]; | 471 | hslot2 = &udptable->hash2[slot2]; |
471 | if (hslot->count < hslot2->count) | 472 | if (hslot->count < hslot2->count) |
472 | goto begin; | 473 | goto begin; |
473 | 474 | ||
474 | result = udp4_lib_lookup2(net, INADDR_ANY, sport, | 475 | result = udp4_lib_lookup2(net, saddr, sport, |
475 | daddr, hnum, dif, | 476 | htonl(INADDR_ANY), hnum, dif, |
476 | hslot2, slot2); | 477 | hslot2, slot2); |
477 | } | 478 | } |
478 | rcu_read_unlock(); | 479 | rcu_read_unlock(); |
@@ -1061,10 +1062,10 @@ static unsigned int first_packet_length(struct sock *sk) | |||
1061 | spin_unlock_bh(&rcvq->lock); | 1062 | spin_unlock_bh(&rcvq->lock); |
1062 | 1063 | ||
1063 | if (!skb_queue_empty(&list_kill)) { | 1064 | if (!skb_queue_empty(&list_kill)) { |
1064 | lock_sock(sk); | 1065 | lock_sock_bh(sk); |
1065 | __skb_queue_purge(&list_kill); | 1066 | __skb_queue_purge(&list_kill); |
1066 | sk_mem_reclaim_partial(sk); | 1067 | sk_mem_reclaim_partial(sk); |
1067 | release_sock(sk); | 1068 | unlock_sock_bh(sk); |
1068 | } | 1069 | } |
1069 | return res; | 1070 | return res; |
1070 | } | 1071 | } |
@@ -1195,10 +1196,10 @@ out: | |||
1195 | return err; | 1196 | return err; |
1196 | 1197 | ||
1197 | csum_copy_err: | 1198 | csum_copy_err: |
1198 | lock_sock(sk); | 1199 | lock_sock_bh(sk); |
1199 | if (!skb_kill_datagram(sk, skb, flags)) | 1200 | if (!skb_kill_datagram(sk, skb, flags)) |
1200 | UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); | 1201 | UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); |
1201 | release_sock(sk); | 1202 | unlock_sock_bh(sk); |
1202 | 1203 | ||
1203 | if (noblock) | 1204 | if (noblock) |
1204 | return -EAGAIN; | 1205 | return -EAGAIN; |
@@ -1216,6 +1217,7 @@ int udp_disconnect(struct sock *sk, int flags) | |||
1216 | sk->sk_state = TCP_CLOSE; | 1217 | sk->sk_state = TCP_CLOSE; |
1217 | inet->inet_daddr = 0; | 1218 | inet->inet_daddr = 0; |
1218 | inet->inet_dport = 0; | 1219 | inet->inet_dport = 0; |
1220 | sock_rps_save_rxhash(sk, 0); | ||
1219 | sk->sk_bound_dev_if = 0; | 1221 | sk->sk_bound_dev_if = 0; |
1220 | if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) | 1222 | if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) |
1221 | inet_reset_saddr(sk); | 1223 | inet_reset_saddr(sk); |
@@ -1257,8 +1259,12 @@ EXPORT_SYMBOL(udp_lib_unhash); | |||
1257 | 1259 | ||
1258 | static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | 1260 | static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) |
1259 | { | 1261 | { |
1260 | int rc = sock_queue_rcv_skb(sk, skb); | 1262 | int rc; |
1263 | |||
1264 | if (inet_sk(sk)->inet_daddr) | ||
1265 | sock_rps_save_rxhash(sk, skb->rxhash); | ||
1261 | 1266 | ||
1267 | rc = ip_queue_rcv_skb(sk, skb); | ||
1262 | if (rc < 0) { | 1268 | if (rc < 0) { |
1263 | int is_udplite = IS_UDPLITE(sk); | 1269 | int is_udplite = IS_UDPLITE(sk); |
1264 | 1270 | ||
@@ -1366,6 +1372,10 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | |||
1366 | goto drop; | 1372 | goto drop; |
1367 | } | 1373 | } |
1368 | 1374 | ||
1375 | |||
1376 | if (sk_rcvqueues_full(sk, skb)) | ||
1377 | goto drop; | ||
1378 | |||
1369 | rc = 0; | 1379 | rc = 0; |
1370 | 1380 | ||
1371 | bh_lock_sock(sk); | 1381 | bh_lock_sock(sk); |
@@ -1614,9 +1624,9 @@ int udp_rcv(struct sk_buff *skb) | |||
1614 | 1624 | ||
1615 | void udp_destroy_sock(struct sock *sk) | 1625 | void udp_destroy_sock(struct sock *sk) |
1616 | { | 1626 | { |
1617 | lock_sock(sk); | 1627 | lock_sock_bh(sk); |
1618 | udp_flush_pending_frames(sk); | 1628 | udp_flush_pending_frames(sk); |
1619 | release_sock(sk); | 1629 | unlock_sock_bh(sk); |
1620 | } | 1630 | } |
1621 | 1631 | ||
1622 | /* | 1632 | /* |
diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c index f9f922a0ba88..c791bb63203f 100644 --- a/net/ipv4/xfrm4_input.c +++ b/net/ipv4/xfrm4_input.c | |||
@@ -9,6 +9,7 @@ | |||
9 | * | 9 | * |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/slab.h> | ||
12 | #include <linux/module.h> | 13 | #include <linux/module.h> |
13 | #include <linux/string.h> | 14 | #include <linux/string.h> |
14 | #include <linux/netfilter.h> | 15 | #include <linux/netfilter.h> |
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c index 3444f3b34eca..6f368413eb0e 100644 --- a/net/ipv4/xfrm4_mode_tunnel.c +++ b/net/ipv4/xfrm4_mode_tunnel.c | |||
@@ -4,6 +4,7 @@ | |||
4 | * Copyright (c) 2004-2006 Herbert Xu <herbert@gondor.apana.org.au> | 4 | * Copyright (c) 2004-2006 Herbert Xu <herbert@gondor.apana.org.au> |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include <linux/gfp.h> | ||
7 | #include <linux/init.h> | 8 | #include <linux/init.h> |
8 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
9 | #include <linux/module.h> | 10 | #include <linux/module.h> |
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index e4a1483fba77..1705476670ef 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c | |||
@@ -59,27 +59,6 @@ static int xfrm4_get_saddr(struct net *net, | |||
59 | return 0; | 59 | return 0; |
60 | } | 60 | } |
61 | 61 | ||
62 | static struct dst_entry * | ||
63 | __xfrm4_find_bundle(struct flowi *fl, struct xfrm_policy *policy) | ||
64 | { | ||
65 | struct dst_entry *dst; | ||
66 | |||
67 | read_lock_bh(&policy->lock); | ||
68 | for (dst = policy->bundles; dst; dst = dst->next) { | ||
69 | struct xfrm_dst *xdst = (struct xfrm_dst *)dst; | ||
70 | if (xdst->u.rt.fl.oif == fl->oif && /*XXX*/ | ||
71 | xdst->u.rt.fl.fl4_dst == fl->fl4_dst && | ||
72 | xdst->u.rt.fl.fl4_src == fl->fl4_src && | ||
73 | xdst->u.rt.fl.fl4_tos == fl->fl4_tos && | ||
74 | xfrm_bundle_ok(policy, xdst, fl, AF_INET, 0)) { | ||
75 | dst_clone(dst); | ||
76 | break; | ||
77 | } | ||
78 | } | ||
79 | read_unlock_bh(&policy->lock); | ||
80 | return dst; | ||
81 | } | ||
82 | |||
83 | static int xfrm4_get_tos(struct flowi *fl) | 62 | static int xfrm4_get_tos(struct flowi *fl) |
84 | { | 63 | { |
85 | return fl->fl4_tos; | 64 | return fl->fl4_tos; |
@@ -259,7 +238,6 @@ static struct xfrm_policy_afinfo xfrm4_policy_afinfo = { | |||
259 | .dst_ops = &xfrm4_dst_ops, | 238 | .dst_ops = &xfrm4_dst_ops, |
260 | .dst_lookup = xfrm4_dst_lookup, | 239 | .dst_lookup = xfrm4_dst_lookup, |
261 | .get_saddr = xfrm4_get_saddr, | 240 | .get_saddr = xfrm4_get_saddr, |
262 | .find_bundle = __xfrm4_find_bundle, | ||
263 | .decode_session = _decode_session4, | 241 | .decode_session = _decode_session4, |
264 | .get_tos = xfrm4_get_tos, | 242 | .get_tos = xfrm4_get_tos, |
265 | .init_path = xfrm4_init_path, | 243 | .init_path = xfrm4_init_path, |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 6cf3ee14ace3..3984f52181f4 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -53,6 +53,7 @@ | |||
53 | #include <linux/route.h> | 53 | #include <linux/route.h> |
54 | #include <linux/inetdevice.h> | 54 | #include <linux/inetdevice.h> |
55 | #include <linux/init.h> | 55 | #include <linux/init.h> |
56 | #include <linux/slab.h> | ||
56 | #ifdef CONFIG_SYSCTL | 57 | #ifdef CONFIG_SYSCTL |
57 | #include <linux/sysctl.h> | 58 | #include <linux/sysctl.h> |
58 | #endif | 59 | #endif |
@@ -81,7 +82,7 @@ | |||
81 | #include <linux/random.h> | 82 | #include <linux/random.h> |
82 | #endif | 83 | #endif |
83 | 84 | ||
84 | #include <asm/uaccess.h> | 85 | #include <linux/uaccess.h> |
85 | #include <asm/unaligned.h> | 86 | #include <asm/unaligned.h> |
86 | 87 | ||
87 | #include <linux/proc_fs.h> | 88 | #include <linux/proc_fs.h> |
@@ -97,7 +98,11 @@ | |||
97 | #endif | 98 | #endif |
98 | 99 | ||
99 | #define INFINITY_LIFE_TIME 0xFFFFFFFF | 100 | #define INFINITY_LIFE_TIME 0xFFFFFFFF |
100 | #define TIME_DELTA(a,b) ((unsigned long)((long)(a) - (long)(b))) | 101 | #define TIME_DELTA(a, b) ((unsigned long)((long)(a) - (long)(b))) |
102 | |||
103 | #define ADDRCONF_TIMER_FUZZ_MINUS (HZ > 50 ? HZ/50 : 1) | ||
104 | #define ADDRCONF_TIMER_FUZZ (HZ / 4) | ||
105 | #define ADDRCONF_TIMER_FUZZ_MAX (HZ) | ||
101 | 106 | ||
102 | #ifdef CONFIG_SYSCTL | 107 | #ifdef CONFIG_SYSCTL |
103 | static void addrconf_sysctl_register(struct inet6_dev *idev); | 108 | static void addrconf_sysctl_register(struct inet6_dev *idev); |
@@ -126,8 +131,8 @@ static int ipv6_count_addresses(struct inet6_dev *idev); | |||
126 | /* | 131 | /* |
127 | * Configured unicast address hash table | 132 | * Configured unicast address hash table |
128 | */ | 133 | */ |
129 | static struct inet6_ifaddr *inet6_addr_lst[IN6_ADDR_HSIZE]; | 134 | static struct hlist_head inet6_addr_lst[IN6_ADDR_HSIZE]; |
130 | static DEFINE_RWLOCK(addrconf_hash_lock); | 135 | static DEFINE_SPINLOCK(addrconf_hash_lock); |
131 | 136 | ||
132 | static void addrconf_verify(unsigned long); | 137 | static void addrconf_verify(unsigned long); |
133 | 138 | ||
@@ -137,8 +142,8 @@ static DEFINE_SPINLOCK(addrconf_verify_lock); | |||
137 | static void addrconf_join_anycast(struct inet6_ifaddr *ifp); | 142 | static void addrconf_join_anycast(struct inet6_ifaddr *ifp); |
138 | static void addrconf_leave_anycast(struct inet6_ifaddr *ifp); | 143 | static void addrconf_leave_anycast(struct inet6_ifaddr *ifp); |
139 | 144 | ||
140 | static void addrconf_bonding_change(struct net_device *dev, | 145 | static void addrconf_type_change(struct net_device *dev, |
141 | unsigned long event); | 146 | unsigned long event); |
142 | static int addrconf_ifdown(struct net_device *dev, int how); | 147 | static int addrconf_ifdown(struct net_device *dev, int how); |
143 | 148 | ||
144 | static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags); | 149 | static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags); |
@@ -151,8 +156,8 @@ static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa); | |||
151 | 156 | ||
152 | static void inet6_prefix_notify(int event, struct inet6_dev *idev, | 157 | static void inet6_prefix_notify(int event, struct inet6_dev *idev, |
153 | struct prefix_info *pinfo); | 158 | struct prefix_info *pinfo); |
154 | static int ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr, | 159 | static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr, |
155 | struct net_device *dev); | 160 | struct net_device *dev); |
156 | 161 | ||
157 | static ATOMIC_NOTIFIER_HEAD(inet6addr_chain); | 162 | static ATOMIC_NOTIFIER_HEAD(inet6addr_chain); |
158 | 163 | ||
@@ -249,8 +254,7 @@ static void addrconf_del_timer(struct inet6_ifaddr *ifp) | |||
249 | __in6_ifa_put(ifp); | 254 | __in6_ifa_put(ifp); |
250 | } | 255 | } |
251 | 256 | ||
252 | enum addrconf_timer_t | 257 | enum addrconf_timer_t { |
253 | { | ||
254 | AC_NONE, | 258 | AC_NONE, |
255 | AC_DAD, | 259 | AC_DAD, |
256 | AC_RS, | 260 | AC_RS, |
@@ -270,7 +274,8 @@ static void addrconf_mod_timer(struct inet6_ifaddr *ifp, | |||
270 | case AC_RS: | 274 | case AC_RS: |
271 | ifp->timer.function = addrconf_rs_timer; | 275 | ifp->timer.function = addrconf_rs_timer; |
272 | break; | 276 | break; |
273 | default:; | 277 | default: |
278 | break; | ||
274 | } | 279 | } |
275 | ifp->timer.expires = jiffies + when; | 280 | ifp->timer.expires = jiffies + when; |
276 | add_timer(&ifp->timer); | 281 | add_timer(&ifp->timer); |
@@ -317,7 +322,7 @@ void in6_dev_finish_destroy(struct inet6_dev *idev) | |||
317 | { | 322 | { |
318 | struct net_device *dev = idev->dev; | 323 | struct net_device *dev = idev->dev; |
319 | 324 | ||
320 | WARN_ON(idev->addr_list != NULL); | 325 | WARN_ON(!list_empty(&idev->addr_list)); |
321 | WARN_ON(idev->mc_list != NULL); | 326 | WARN_ON(idev->mc_list != NULL); |
322 | 327 | ||
323 | #ifdef NET_REFCNT_DEBUG | 328 | #ifdef NET_REFCNT_DEBUG |
@@ -325,7 +330,7 @@ void in6_dev_finish_destroy(struct inet6_dev *idev) | |||
325 | #endif | 330 | #endif |
326 | dev_put(dev); | 331 | dev_put(dev); |
327 | if (!idev->dead) { | 332 | if (!idev->dead) { |
328 | printk("Freeing alive inet6 device %p\n", idev); | 333 | pr_warning("Freeing alive inet6 device %p\n", idev); |
329 | return; | 334 | return; |
330 | } | 335 | } |
331 | snmp6_free_dev(idev); | 336 | snmp6_free_dev(idev); |
@@ -350,6 +355,8 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev) | |||
350 | 355 | ||
351 | rwlock_init(&ndev->lock); | 356 | rwlock_init(&ndev->lock); |
352 | ndev->dev = dev; | 357 | ndev->dev = dev; |
358 | INIT_LIST_HEAD(&ndev->addr_list); | ||
359 | |||
353 | memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf)); | 360 | memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf)); |
354 | ndev->cnf.mtu6 = dev->mtu; | 361 | ndev->cnf.mtu6 = dev->mtu; |
355 | ndev->cnf.sysctl = NULL; | 362 | ndev->cnf.sysctl = NULL; |
@@ -401,6 +408,7 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev) | |||
401 | #endif | 408 | #endif |
402 | 409 | ||
403 | #ifdef CONFIG_IPV6_PRIVACY | 410 | #ifdef CONFIG_IPV6_PRIVACY |
411 | INIT_LIST_HEAD(&ndev->tempaddr_list); | ||
404 | setup_timer(&ndev->regen_timer, ipv6_regen_rndid, (unsigned long)ndev); | 412 | setup_timer(&ndev->regen_timer, ipv6_regen_rndid, (unsigned long)ndev); |
405 | if ((dev->flags&IFF_LOOPBACK) || | 413 | if ((dev->flags&IFF_LOOPBACK) || |
406 | dev->type == ARPHRD_TUNNEL || | 414 | dev->type == ARPHRD_TUNNEL || |
@@ -438,8 +446,10 @@ static struct inet6_dev * ipv6_find_idev(struct net_device *dev) | |||
438 | 446 | ||
439 | ASSERT_RTNL(); | 447 | ASSERT_RTNL(); |
440 | 448 | ||
441 | if ((idev = __in6_dev_get(dev)) == NULL) { | 449 | idev = __in6_dev_get(dev); |
442 | if ((idev = ipv6_add_dev(dev)) == NULL) | 450 | if (!idev) { |
451 | idev = ipv6_add_dev(dev); | ||
452 | if (!idev) | ||
443 | return NULL; | 453 | return NULL; |
444 | } | 454 | } |
445 | 455 | ||
@@ -465,7 +475,8 @@ static void dev_forward_change(struct inet6_dev *idev) | |||
465 | else | 475 | else |
466 | ipv6_dev_mc_dec(dev, &in6addr_linklocal_allrouters); | 476 | ipv6_dev_mc_dec(dev, &in6addr_linklocal_allrouters); |
467 | } | 477 | } |
468 | for (ifa=idev->addr_list; ifa; ifa=ifa->if_next) { | 478 | |
479 | list_for_each_entry(ifa, &idev->addr_list, if_list) { | ||
469 | if (ifa->flags&IFA_F_TENTATIVE) | 480 | if (ifa->flags&IFA_F_TENTATIVE) |
470 | continue; | 481 | continue; |
471 | if (idev->cnf.forwarding) | 482 | if (idev->cnf.forwarding) |
@@ -522,12 +533,16 @@ static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old) | |||
522 | } | 533 | } |
523 | #endif | 534 | #endif |
524 | 535 | ||
525 | /* Nobody refers to this ifaddr, destroy it */ | 536 | static void inet6_ifa_finish_destroy_rcu(struct rcu_head *head) |
537 | { | ||
538 | struct inet6_ifaddr *ifp = container_of(head, struct inet6_ifaddr, rcu); | ||
539 | kfree(ifp); | ||
540 | } | ||
526 | 541 | ||
542 | /* Nobody refers to this ifaddr, destroy it */ | ||
527 | void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp) | 543 | void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp) |
528 | { | 544 | { |
529 | WARN_ON(ifp->if_next != NULL); | 545 | WARN_ON(!hlist_unhashed(&ifp->addr_lst)); |
530 | WARN_ON(ifp->lst_next != NULL); | ||
531 | 546 | ||
532 | #ifdef NET_REFCNT_DEBUG | 547 | #ifdef NET_REFCNT_DEBUG |
533 | printk(KERN_DEBUG "inet6_ifa_finish_destroy\n"); | 548 | printk(KERN_DEBUG "inet6_ifa_finish_destroy\n"); |
@@ -536,54 +551,46 @@ void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp) | |||
536 | in6_dev_put(ifp->idev); | 551 | in6_dev_put(ifp->idev); |
537 | 552 | ||
538 | if (del_timer(&ifp->timer)) | 553 | if (del_timer(&ifp->timer)) |
539 | printk("Timer is still running, when freeing ifa=%p\n", ifp); | 554 | pr_notice("Timer is still running, when freeing ifa=%p\n", ifp); |
540 | 555 | ||
541 | if (!ifp->dead) { | 556 | if (!ifp->dead) { |
542 | printk("Freeing alive inet6 address %p\n", ifp); | 557 | pr_warning("Freeing alive inet6 address %p\n", ifp); |
543 | return; | 558 | return; |
544 | } | 559 | } |
545 | dst_release(&ifp->rt->u.dst); | 560 | dst_release(&ifp->rt->u.dst); |
546 | 561 | ||
547 | kfree(ifp); | 562 | call_rcu(&ifp->rcu, inet6_ifa_finish_destroy_rcu); |
548 | } | 563 | } |
549 | 564 | ||
550 | static void | 565 | static void |
551 | ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp) | 566 | ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp) |
552 | { | 567 | { |
553 | struct inet6_ifaddr *ifa, **ifap; | 568 | struct list_head *p; |
554 | int ifp_scope = ipv6_addr_src_scope(&ifp->addr); | 569 | int ifp_scope = ipv6_addr_src_scope(&ifp->addr); |
555 | 570 | ||
556 | /* | 571 | /* |
557 | * Each device address list is sorted in order of scope - | 572 | * Each device address list is sorted in order of scope - |
558 | * global before linklocal. | 573 | * global before linklocal. |
559 | */ | 574 | */ |
560 | for (ifap = &idev->addr_list; (ifa = *ifap) != NULL; | 575 | list_for_each(p, &idev->addr_list) { |
561 | ifap = &ifa->if_next) { | 576 | struct inet6_ifaddr *ifa |
577 | = list_entry(p, struct inet6_ifaddr, if_list); | ||
562 | if (ifp_scope >= ipv6_addr_src_scope(&ifa->addr)) | 578 | if (ifp_scope >= ipv6_addr_src_scope(&ifa->addr)) |
563 | break; | 579 | break; |
564 | } | 580 | } |
565 | 581 | ||
566 | ifp->if_next = *ifap; | 582 | list_add_tail(&ifp->if_list, p); |
567 | *ifap = ifp; | ||
568 | } | 583 | } |
569 | 584 | ||
570 | /* | 585 | static u32 ipv6_addr_hash(const struct in6_addr *addr) |
571 | * Hash function taken from net_alias.c | ||
572 | */ | ||
573 | static u8 ipv6_addr_hash(const struct in6_addr *addr) | ||
574 | { | 586 | { |
575 | __u32 word; | ||
576 | |||
577 | /* | 587 | /* |
578 | * We perform the hash function over the last 64 bits of the address | 588 | * We perform the hash function over the last 64 bits of the address |
579 | * This will include the IEEE address token on links that support it. | 589 | * This will include the IEEE address token on links that support it. |
580 | */ | 590 | */ |
581 | 591 | return jhash_2words((__force u32)addr->s6_addr32[2], | |
582 | word = (__force u32)(addr->s6_addr32[2] ^ addr->s6_addr32[3]); | 592 | (__force u32)addr->s6_addr32[3], 0) |
583 | word ^= (word >> 16); | 593 | & (IN6_ADDR_HSIZE - 1); |
584 | word ^= (word >> 8); | ||
585 | |||
586 | return ((word ^ (word >> 4)) & 0x0f); | ||
587 | } | 594 | } |
588 | 595 | ||
589 | /* On success it returns ifp with increased reference count */ | 596 | /* On success it returns ifp with increased reference count */ |
@@ -594,7 +601,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen, | |||
594 | { | 601 | { |
595 | struct inet6_ifaddr *ifa = NULL; | 602 | struct inet6_ifaddr *ifa = NULL; |
596 | struct rt6_info *rt; | 603 | struct rt6_info *rt; |
597 | int hash; | 604 | unsigned int hash; |
598 | int err = 0; | 605 | int err = 0; |
599 | int addr_type = ipv6_addr_type(addr); | 606 | int addr_type = ipv6_addr_type(addr); |
600 | 607 | ||
@@ -615,7 +622,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen, | |||
615 | goto out2; | 622 | goto out2; |
616 | } | 623 | } |
617 | 624 | ||
618 | write_lock(&addrconf_hash_lock); | 625 | spin_lock(&addrconf_hash_lock); |
619 | 626 | ||
620 | /* Ignore adding duplicate addresses on an interface */ | 627 | /* Ignore adding duplicate addresses on an interface */ |
621 | if (ipv6_chk_same_addr(dev_net(idev->dev), addr, idev->dev)) { | 628 | if (ipv6_chk_same_addr(dev_net(idev->dev), addr, idev->dev)) { |
@@ -642,6 +649,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen, | |||
642 | 649 | ||
643 | spin_lock_init(&ifa->lock); | 650 | spin_lock_init(&ifa->lock); |
644 | init_timer(&ifa->timer); | 651 | init_timer(&ifa->timer); |
652 | INIT_HLIST_NODE(&ifa->addr_lst); | ||
645 | ifa->timer.data = (unsigned long) ifa; | 653 | ifa->timer.data = (unsigned long) ifa; |
646 | ifa->scope = scope; | 654 | ifa->scope = scope; |
647 | ifa->prefix_len = pfxlen; | 655 | ifa->prefix_len = pfxlen; |
@@ -668,10 +676,8 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen, | |||
668 | /* Add to big hash table */ | 676 | /* Add to big hash table */ |
669 | hash = ipv6_addr_hash(addr); | 677 | hash = ipv6_addr_hash(addr); |
670 | 678 | ||
671 | ifa->lst_next = inet6_addr_lst[hash]; | 679 | hlist_add_head_rcu(&ifa->addr_lst, &inet6_addr_lst[hash]); |
672 | inet6_addr_lst[hash] = ifa; | 680 | spin_unlock(&addrconf_hash_lock); |
673 | in6_ifa_hold(ifa); | ||
674 | write_unlock(&addrconf_hash_lock); | ||
675 | 681 | ||
676 | write_lock(&idev->lock); | 682 | write_lock(&idev->lock); |
677 | /* Add to inet6_dev unicast addr list. */ | 683 | /* Add to inet6_dev unicast addr list. */ |
@@ -679,8 +685,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen, | |||
679 | 685 | ||
680 | #ifdef CONFIG_IPV6_PRIVACY | 686 | #ifdef CONFIG_IPV6_PRIVACY |
681 | if (ifa->flags&IFA_F_TEMPORARY) { | 687 | if (ifa->flags&IFA_F_TEMPORARY) { |
682 | ifa->tmp_next = idev->tempaddr_list; | 688 | list_add(&ifa->tmp_list, &idev->tempaddr_list); |
683 | idev->tempaddr_list = ifa; | ||
684 | in6_ifa_hold(ifa); | 689 | in6_ifa_hold(ifa); |
685 | } | 690 | } |
686 | #endif | 691 | #endif |
@@ -699,7 +704,7 @@ out2: | |||
699 | 704 | ||
700 | return ifa; | 705 | return ifa; |
701 | out: | 706 | out: |
702 | write_unlock(&addrconf_hash_lock); | 707 | spin_unlock(&addrconf_hash_lock); |
703 | goto out2; | 708 | goto out2; |
704 | } | 709 | } |
705 | 710 | ||
@@ -707,7 +712,7 @@ out: | |||
707 | 712 | ||
708 | static void ipv6_del_addr(struct inet6_ifaddr *ifp) | 713 | static void ipv6_del_addr(struct inet6_ifaddr *ifp) |
709 | { | 714 | { |
710 | struct inet6_ifaddr *ifa, **ifap; | 715 | struct inet6_ifaddr *ifa, *ifn; |
711 | struct inet6_dev *idev = ifp->idev; | 716 | struct inet6_dev *idev = ifp->idev; |
712 | int hash; | 717 | int hash; |
713 | int deleted = 0, onlink = 0; | 718 | int deleted = 0, onlink = 0; |
@@ -717,42 +722,27 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp) | |||
717 | 722 | ||
718 | ifp->dead = 1; | 723 | ifp->dead = 1; |
719 | 724 | ||
720 | write_lock_bh(&addrconf_hash_lock); | 725 | spin_lock_bh(&addrconf_hash_lock); |
721 | for (ifap = &inet6_addr_lst[hash]; (ifa=*ifap) != NULL; | 726 | hlist_del_init_rcu(&ifp->addr_lst); |
722 | ifap = &ifa->lst_next) { | 727 | spin_unlock_bh(&addrconf_hash_lock); |
723 | if (ifa == ifp) { | ||
724 | *ifap = ifa->lst_next; | ||
725 | __in6_ifa_put(ifp); | ||
726 | ifa->lst_next = NULL; | ||
727 | break; | ||
728 | } | ||
729 | } | ||
730 | write_unlock_bh(&addrconf_hash_lock); | ||
731 | 728 | ||
732 | write_lock_bh(&idev->lock); | 729 | write_lock_bh(&idev->lock); |
733 | #ifdef CONFIG_IPV6_PRIVACY | 730 | #ifdef CONFIG_IPV6_PRIVACY |
734 | if (ifp->flags&IFA_F_TEMPORARY) { | 731 | if (ifp->flags&IFA_F_TEMPORARY) { |
735 | for (ifap = &idev->tempaddr_list; (ifa=*ifap) != NULL; | 732 | list_del(&ifp->tmp_list); |
736 | ifap = &ifa->tmp_next) { | 733 | if (ifp->ifpub) { |
737 | if (ifa == ifp) { | 734 | in6_ifa_put(ifp->ifpub); |
738 | *ifap = ifa->tmp_next; | 735 | ifp->ifpub = NULL; |
739 | if (ifp->ifpub) { | ||
740 | in6_ifa_put(ifp->ifpub); | ||
741 | ifp->ifpub = NULL; | ||
742 | } | ||
743 | __in6_ifa_put(ifp); | ||
744 | ifa->tmp_next = NULL; | ||
745 | break; | ||
746 | } | ||
747 | } | 736 | } |
737 | __in6_ifa_put(ifp); | ||
748 | } | 738 | } |
749 | #endif | 739 | #endif |
750 | 740 | ||
751 | for (ifap = &idev->addr_list; (ifa=*ifap) != NULL;) { | 741 | list_for_each_entry_safe(ifa, ifn, &idev->addr_list, if_list) { |
752 | if (ifa == ifp) { | 742 | if (ifa == ifp) { |
753 | *ifap = ifa->if_next; | 743 | list_del_init(&ifp->if_list); |
754 | __in6_ifa_put(ifp); | 744 | __in6_ifa_put(ifp); |
755 | ifa->if_next = NULL; | 745 | |
756 | if (!(ifp->flags & IFA_F_PERMANENT) || onlink > 0) | 746 | if (!(ifp->flags & IFA_F_PERMANENT) || onlink > 0) |
757 | break; | 747 | break; |
758 | deleted = 1; | 748 | deleted = 1; |
@@ -785,7 +775,6 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp) | |||
785 | } | 775 | } |
786 | } | 776 | } |
787 | } | 777 | } |
788 | ifap = &ifa->if_next; | ||
789 | } | 778 | } |
790 | write_unlock_bh(&idev->lock); | 779 | write_unlock_bh(&idev->lock); |
791 | 780 | ||
@@ -1164,7 +1153,7 @@ int ipv6_dev_get_saddr(struct net *net, struct net_device *dst_dev, | |||
1164 | continue; | 1153 | continue; |
1165 | 1154 | ||
1166 | read_lock_bh(&idev->lock); | 1155 | read_lock_bh(&idev->lock); |
1167 | for (score->ifa = idev->addr_list; score->ifa; score->ifa = score->ifa->if_next) { | 1156 | list_for_each_entry(score->ifa, &idev->addr_list, if_list) { |
1168 | int i; | 1157 | int i; |
1169 | 1158 | ||
1170 | /* | 1159 | /* |
@@ -1242,7 +1231,6 @@ try_nextdev: | |||
1242 | in6_ifa_put(hiscore->ifa); | 1231 | in6_ifa_put(hiscore->ifa); |
1243 | return 0; | 1232 | return 0; |
1244 | } | 1233 | } |
1245 | |||
1246 | EXPORT_SYMBOL(ipv6_dev_get_saddr); | 1234 | EXPORT_SYMBOL(ipv6_dev_get_saddr); |
1247 | 1235 | ||
1248 | int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr, | 1236 | int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr, |
@@ -1252,12 +1240,14 @@ int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr, | |||
1252 | int err = -EADDRNOTAVAIL; | 1240 | int err = -EADDRNOTAVAIL; |
1253 | 1241 | ||
1254 | rcu_read_lock(); | 1242 | rcu_read_lock(); |
1255 | if ((idev = __in6_dev_get(dev)) != NULL) { | 1243 | idev = __in6_dev_get(dev); |
1244 | if (idev) { | ||
1256 | struct inet6_ifaddr *ifp; | 1245 | struct inet6_ifaddr *ifp; |
1257 | 1246 | ||
1258 | read_lock_bh(&idev->lock); | 1247 | read_lock_bh(&idev->lock); |
1259 | for (ifp=idev->addr_list; ifp; ifp=ifp->if_next) { | 1248 | list_for_each_entry(ifp, &idev->addr_list, if_list) { |
1260 | if (ifp->scope == IFA_LINK && !(ifp->flags & banned_flags)) { | 1249 | if (ifp->scope == IFA_LINK && |
1250 | !(ifp->flags & banned_flags)) { | ||
1261 | ipv6_addr_copy(addr, &ifp->addr); | 1251 | ipv6_addr_copy(addr, &ifp->addr); |
1262 | err = 0; | 1252 | err = 0; |
1263 | break; | 1253 | break; |
@@ -1275,7 +1265,7 @@ static int ipv6_count_addresses(struct inet6_dev *idev) | |||
1275 | struct inet6_ifaddr *ifp; | 1265 | struct inet6_ifaddr *ifp; |
1276 | 1266 | ||
1277 | read_lock_bh(&idev->lock); | 1267 | read_lock_bh(&idev->lock); |
1278 | for (ifp=idev->addr_list; ifp; ifp=ifp->if_next) | 1268 | list_for_each_entry(ifp, &idev->addr_list, if_list) |
1279 | cnt++; | 1269 | cnt++; |
1280 | read_unlock_bh(&idev->lock); | 1270 | read_unlock_bh(&idev->lock); |
1281 | return cnt; | 1271 | return cnt; |
@@ -1284,11 +1274,12 @@ static int ipv6_count_addresses(struct inet6_dev *idev) | |||
1284 | int ipv6_chk_addr(struct net *net, struct in6_addr *addr, | 1274 | int ipv6_chk_addr(struct net *net, struct in6_addr *addr, |
1285 | struct net_device *dev, int strict) | 1275 | struct net_device *dev, int strict) |
1286 | { | 1276 | { |
1287 | struct inet6_ifaddr * ifp; | 1277 | struct inet6_ifaddr *ifp = NULL; |
1288 | u8 hash = ipv6_addr_hash(addr); | 1278 | struct hlist_node *node; |
1279 | unsigned int hash = ipv6_addr_hash(addr); | ||
1289 | 1280 | ||
1290 | read_lock_bh(&addrconf_hash_lock); | 1281 | rcu_read_lock_bh(); |
1291 | for(ifp = inet6_addr_lst[hash]; ifp; ifp=ifp->lst_next) { | 1282 | hlist_for_each_entry_rcu(ifp, node, &inet6_addr_lst[hash], addr_lst) { |
1292 | if (!net_eq(dev_net(ifp->idev->dev), net)) | 1283 | if (!net_eq(dev_net(ifp->idev->dev), net)) |
1293 | continue; | 1284 | continue; |
1294 | if (ipv6_addr_equal(&ifp->addr, addr) && | 1285 | if (ipv6_addr_equal(&ifp->addr, addr) && |
@@ -1298,27 +1289,28 @@ int ipv6_chk_addr(struct net *net, struct in6_addr *addr, | |||
1298 | break; | 1289 | break; |
1299 | } | 1290 | } |
1300 | } | 1291 | } |
1301 | read_unlock_bh(&addrconf_hash_lock); | 1292 | rcu_read_unlock_bh(); |
1293 | |||
1302 | return ifp != NULL; | 1294 | return ifp != NULL; |
1303 | } | 1295 | } |
1304 | EXPORT_SYMBOL(ipv6_chk_addr); | 1296 | EXPORT_SYMBOL(ipv6_chk_addr); |
1305 | 1297 | ||
1306 | static | 1298 | static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr, |
1307 | int ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr, | 1299 | struct net_device *dev) |
1308 | struct net_device *dev) | ||
1309 | { | 1300 | { |
1310 | struct inet6_ifaddr * ifp; | 1301 | unsigned int hash = ipv6_addr_hash(addr); |
1311 | u8 hash = ipv6_addr_hash(addr); | 1302 | struct inet6_ifaddr *ifp; |
1303 | struct hlist_node *node; | ||
1312 | 1304 | ||
1313 | for(ifp = inet6_addr_lst[hash]; ifp; ifp=ifp->lst_next) { | 1305 | hlist_for_each_entry(ifp, node, &inet6_addr_lst[hash], addr_lst) { |
1314 | if (!net_eq(dev_net(ifp->idev->dev), net)) | 1306 | if (!net_eq(dev_net(ifp->idev->dev), net)) |
1315 | continue; | 1307 | continue; |
1316 | if (ipv6_addr_equal(&ifp->addr, addr)) { | 1308 | if (ipv6_addr_equal(&ifp->addr, addr)) { |
1317 | if (dev == NULL || ifp->idev->dev == dev) | 1309 | if (dev == NULL || ifp->idev->dev == dev) |
1318 | break; | 1310 | return true; |
1319 | } | 1311 | } |
1320 | } | 1312 | } |
1321 | return ifp != NULL; | 1313 | return false; |
1322 | } | 1314 | } |
1323 | 1315 | ||
1324 | int ipv6_chk_prefix(struct in6_addr *addr, struct net_device *dev) | 1316 | int ipv6_chk_prefix(struct in6_addr *addr, struct net_device *dev) |
@@ -1332,7 +1324,7 @@ int ipv6_chk_prefix(struct in6_addr *addr, struct net_device *dev) | |||
1332 | idev = __in6_dev_get(dev); | 1324 | idev = __in6_dev_get(dev); |
1333 | if (idev) { | 1325 | if (idev) { |
1334 | read_lock_bh(&idev->lock); | 1326 | read_lock_bh(&idev->lock); |
1335 | for (ifa = idev->addr_list; ifa; ifa = ifa->if_next) { | 1327 | list_for_each_entry(ifa, &idev->addr_list, if_list) { |
1336 | onlink = ipv6_prefix_equal(addr, &ifa->addr, | 1328 | onlink = ipv6_prefix_equal(addr, &ifa->addr, |
1337 | ifa->prefix_len); | 1329 | ifa->prefix_len); |
1338 | if (onlink) | 1330 | if (onlink) |
@@ -1349,24 +1341,26 @@ EXPORT_SYMBOL(ipv6_chk_prefix); | |||
1349 | struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr, | 1341 | struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr, |
1350 | struct net_device *dev, int strict) | 1342 | struct net_device *dev, int strict) |
1351 | { | 1343 | { |
1352 | struct inet6_ifaddr * ifp; | 1344 | struct inet6_ifaddr *ifp, *result = NULL; |
1353 | u8 hash = ipv6_addr_hash(addr); | 1345 | unsigned int hash = ipv6_addr_hash(addr); |
1346 | struct hlist_node *node; | ||
1354 | 1347 | ||
1355 | read_lock_bh(&addrconf_hash_lock); | 1348 | rcu_read_lock_bh(); |
1356 | for(ifp = inet6_addr_lst[hash]; ifp; ifp=ifp->lst_next) { | 1349 | hlist_for_each_entry_rcu_bh(ifp, node, &inet6_addr_lst[hash], addr_lst) { |
1357 | if (!net_eq(dev_net(ifp->idev->dev), net)) | 1350 | if (!net_eq(dev_net(ifp->idev->dev), net)) |
1358 | continue; | 1351 | continue; |
1359 | if (ipv6_addr_equal(&ifp->addr, addr)) { | 1352 | if (ipv6_addr_equal(&ifp->addr, addr)) { |
1360 | if (dev == NULL || ifp->idev->dev == dev || | 1353 | if (dev == NULL || ifp->idev->dev == dev || |
1361 | !(ifp->scope&(IFA_LINK|IFA_HOST) || strict)) { | 1354 | !(ifp->scope&(IFA_LINK|IFA_HOST) || strict)) { |
1355 | result = ifp; | ||
1362 | in6_ifa_hold(ifp); | 1356 | in6_ifa_hold(ifp); |
1363 | break; | 1357 | break; |
1364 | } | 1358 | } |
1365 | } | 1359 | } |
1366 | } | 1360 | } |
1367 | read_unlock_bh(&addrconf_hash_lock); | 1361 | rcu_read_unlock_bh(); |
1368 | 1362 | ||
1369 | return ifp; | 1363 | return result; |
1370 | } | 1364 | } |
1371 | 1365 | ||
1372 | /* Gets referenced address, destroys ifaddr */ | 1366 | /* Gets referenced address, destroys ifaddr */ |
@@ -1380,6 +1374,8 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed) | |||
1380 | if (dad_failed) | 1374 | if (dad_failed) |
1381 | ifp->flags |= IFA_F_DADFAILED; | 1375 | ifp->flags |= IFA_F_DADFAILED; |
1382 | spin_unlock_bh(&ifp->lock); | 1376 | spin_unlock_bh(&ifp->lock); |
1377 | if (dad_failed) | ||
1378 | ipv6_ifa_notify(0, ifp); | ||
1383 | in6_ifa_put(ifp); | 1379 | in6_ifa_put(ifp); |
1384 | #ifdef CONFIG_IPV6_PRIVACY | 1380 | #ifdef CONFIG_IPV6_PRIVACY |
1385 | } else if (ifp->flags&IFA_F_TEMPORARY) { | 1381 | } else if (ifp->flags&IFA_F_TEMPORARY) { |
@@ -1567,7 +1563,7 @@ static int ipv6_inherit_eui64(u8 *eui, struct inet6_dev *idev) | |||
1567 | struct inet6_ifaddr *ifp; | 1563 | struct inet6_ifaddr *ifp; |
1568 | 1564 | ||
1569 | read_lock_bh(&idev->lock); | 1565 | read_lock_bh(&idev->lock); |
1570 | for (ifp=idev->addr_list; ifp; ifp=ifp->if_next) { | 1566 | list_for_each_entry(ifp, &idev->addr_list, if_list) { |
1571 | if (ifp->scope == IFA_LINK && !(ifp->flags&IFA_F_TENTATIVE)) { | 1567 | if (ifp->scope == IFA_LINK && !(ifp->flags&IFA_F_TENTATIVE)) { |
1572 | memcpy(eui, ifp->addr.s6_addr+8, 8); | 1568 | memcpy(eui, ifp->addr.s6_addr+8, 8); |
1573 | err = 0; | 1569 | err = 0; |
@@ -1735,7 +1731,8 @@ static struct inet6_dev *addrconf_add_dev(struct net_device *dev) | |||
1735 | 1731 | ||
1736 | ASSERT_RTNL(); | 1732 | ASSERT_RTNL(); |
1737 | 1733 | ||
1738 | if ((idev = ipv6_find_idev(dev)) == NULL) | 1734 | idev = ipv6_find_idev(dev); |
1735 | if (!idev) | ||
1739 | return NULL; | 1736 | return NULL; |
1740 | 1737 | ||
1741 | /* Add default multicast route */ | 1738 | /* Add default multicast route */ |
@@ -1968,7 +1965,7 @@ ok: | |||
1968 | #ifdef CONFIG_IPV6_PRIVACY | 1965 | #ifdef CONFIG_IPV6_PRIVACY |
1969 | read_lock_bh(&in6_dev->lock); | 1966 | read_lock_bh(&in6_dev->lock); |
1970 | /* update all temporary addresses in the list */ | 1967 | /* update all temporary addresses in the list */ |
1971 | for (ift=in6_dev->tempaddr_list; ift; ift=ift->tmp_next) { | 1968 | list_for_each_entry(ift, &in6_dev->tempaddr_list, tmp_list) { |
1972 | /* | 1969 | /* |
1973 | * When adjusting the lifetimes of an existing | 1970 | * When adjusting the lifetimes of an existing |
1974 | * temporary address, only lower the lifetimes. | 1971 | * temporary address, only lower the lifetimes. |
@@ -2171,7 +2168,7 @@ static int inet6_addr_del(struct net *net, int ifindex, struct in6_addr *pfx, | |||
2171 | return -ENXIO; | 2168 | return -ENXIO; |
2172 | 2169 | ||
2173 | read_lock_bh(&idev->lock); | 2170 | read_lock_bh(&idev->lock); |
2174 | for (ifp = idev->addr_list; ifp; ifp=ifp->if_next) { | 2171 | list_for_each_entry(ifp, &idev->addr_list, if_list) { |
2175 | if (ifp->prefix_len == plen && | 2172 | if (ifp->prefix_len == plen && |
2176 | ipv6_addr_equal(pfx, &ifp->addr)) { | 2173 | ipv6_addr_equal(pfx, &ifp->addr)) { |
2177 | in6_ifa_hold(ifp); | 2174 | in6_ifa_hold(ifp); |
@@ -2182,7 +2179,7 @@ static int inet6_addr_del(struct net *net, int ifindex, struct in6_addr *pfx, | |||
2182 | /* If the last address is deleted administratively, | 2179 | /* If the last address is deleted administratively, |
2183 | disable IPv6 on this interface. | 2180 | disable IPv6 on this interface. |
2184 | */ | 2181 | */ |
2185 | if (idev->addr_list == NULL) | 2182 | if (list_empty(&idev->addr_list)) |
2186 | addrconf_ifdown(idev->dev, 1); | 2183 | addrconf_ifdown(idev->dev, 1); |
2187 | return 0; | 2184 | return 0; |
2188 | } | 2185 | } |
@@ -2443,7 +2440,8 @@ static void addrconf_ip6_tnl_config(struct net_device *dev) | |||
2443 | 2440 | ||
2444 | ASSERT_RTNL(); | 2441 | ASSERT_RTNL(); |
2445 | 2442 | ||
2446 | if ((idev = addrconf_add_dev(dev)) == NULL) { | 2443 | idev = addrconf_add_dev(dev); |
2444 | if (!idev) { | ||
2447 | printk(KERN_DEBUG "init ip6-ip6: add_dev failed\n"); | 2445 | printk(KERN_DEBUG "init ip6-ip6: add_dev failed\n"); |
2448 | return; | 2446 | return; |
2449 | } | 2447 | } |
@@ -2458,7 +2456,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
2458 | int run_pending = 0; | 2456 | int run_pending = 0; |
2459 | int err; | 2457 | int err; |
2460 | 2458 | ||
2461 | switch(event) { | 2459 | switch (event) { |
2462 | case NETDEV_REGISTER: | 2460 | case NETDEV_REGISTER: |
2463 | if (!idev && dev->mtu >= IPV6_MIN_MTU) { | 2461 | if (!idev && dev->mtu >= IPV6_MIN_MTU) { |
2464 | idev = ipv6_add_dev(dev); | 2462 | idev = ipv6_add_dev(dev); |
@@ -2466,6 +2464,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
2466 | return notifier_from_errno(-ENOMEM); | 2464 | return notifier_from_errno(-ENOMEM); |
2467 | } | 2465 | } |
2468 | break; | 2466 | break; |
2467 | |||
2469 | case NETDEV_UP: | 2468 | case NETDEV_UP: |
2470 | case NETDEV_CHANGE: | 2469 | case NETDEV_CHANGE: |
2471 | if (dev->flags & IFF_SLAVE) | 2470 | if (dev->flags & IFF_SLAVE) |
@@ -2495,10 +2494,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
2495 | } | 2494 | } |
2496 | 2495 | ||
2497 | if (idev) { | 2496 | if (idev) { |
2498 | if (idev->if_flags & IF_READY) { | 2497 | if (idev->if_flags & IF_READY) |
2499 | /* device is already configured. */ | 2498 | /* device is already configured. */ |
2500 | break; | 2499 | break; |
2501 | } | ||
2502 | idev->if_flags |= IF_READY; | 2500 | idev->if_flags |= IF_READY; |
2503 | } | 2501 | } |
2504 | 2502 | ||
@@ -2510,7 +2508,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
2510 | run_pending = 1; | 2508 | run_pending = 1; |
2511 | } | 2509 | } |
2512 | 2510 | ||
2513 | switch(dev->type) { | 2511 | switch (dev->type) { |
2514 | #if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE) | 2512 | #if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE) |
2515 | case ARPHRD_SIT: | 2513 | case ARPHRD_SIT: |
2516 | addrconf_sit_config(dev); | 2514 | addrconf_sit_config(dev); |
@@ -2527,25 +2525,30 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
2527 | addrconf_dev_config(dev); | 2525 | addrconf_dev_config(dev); |
2528 | break; | 2526 | break; |
2529 | } | 2527 | } |
2528 | |||
2530 | if (idev) { | 2529 | if (idev) { |
2531 | if (run_pending) | 2530 | if (run_pending) |
2532 | addrconf_dad_run(idev); | 2531 | addrconf_dad_run(idev); |
2533 | 2532 | ||
2534 | /* If the MTU changed during the interface down, when the | 2533 | /* |
2535 | interface up, the changed MTU must be reflected in the | 2534 | * If the MTU changed during the interface down, |
2536 | idev as well as routers. | 2535 | * when the interface up, the changed MTU must be |
2536 | * reflected in the idev as well as routers. | ||
2537 | */ | 2537 | */ |
2538 | if (idev->cnf.mtu6 != dev->mtu && dev->mtu >= IPV6_MIN_MTU) { | 2538 | if (idev->cnf.mtu6 != dev->mtu && |
2539 | dev->mtu >= IPV6_MIN_MTU) { | ||
2539 | rt6_mtu_change(dev, dev->mtu); | 2540 | rt6_mtu_change(dev, dev->mtu); |
2540 | idev->cnf.mtu6 = dev->mtu; | 2541 | idev->cnf.mtu6 = dev->mtu; |
2541 | } | 2542 | } |
2542 | idev->tstamp = jiffies; | 2543 | idev->tstamp = jiffies; |
2543 | inet6_ifinfo_notify(RTM_NEWLINK, idev); | 2544 | inet6_ifinfo_notify(RTM_NEWLINK, idev); |
2544 | /* If the changed mtu during down is lower than IPV6_MIN_MTU | 2545 | |
2545 | stop IPv6 on this interface. | 2546 | /* |
2547 | * If the changed mtu during down is lower than | ||
2548 | * IPV6_MIN_MTU stop IPv6 on this interface. | ||
2546 | */ | 2549 | */ |
2547 | if (dev->mtu < IPV6_MIN_MTU) | 2550 | if (dev->mtu < IPV6_MIN_MTU) |
2548 | addrconf_ifdown(dev, event != NETDEV_DOWN); | 2551 | addrconf_ifdown(dev, 1); |
2549 | } | 2552 | } |
2550 | break; | 2553 | break; |
2551 | 2554 | ||
@@ -2562,7 +2565,10 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
2562 | break; | 2565 | break; |
2563 | } | 2566 | } |
2564 | 2567 | ||
2565 | /* MTU falled under IPV6_MIN_MTU. Stop IPv6 on this interface. */ | 2568 | /* |
2569 | * MTU falled under IPV6_MIN_MTU. | ||
2570 | * Stop IPv6 on this interface. | ||
2571 | */ | ||
2566 | 2572 | ||
2567 | case NETDEV_DOWN: | 2573 | case NETDEV_DOWN: |
2568 | case NETDEV_UNREGISTER: | 2574 | case NETDEV_UNREGISTER: |
@@ -2582,9 +2588,10 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
2582 | return notifier_from_errno(err); | 2588 | return notifier_from_errno(err); |
2583 | } | 2589 | } |
2584 | break; | 2590 | break; |
2585 | case NETDEV_BONDING_OLDTYPE: | 2591 | |
2586 | case NETDEV_BONDING_NEWTYPE: | 2592 | case NETDEV_PRE_TYPE_CHANGE: |
2587 | addrconf_bonding_change(dev, event); | 2593 | case NETDEV_POST_TYPE_CHANGE: |
2594 | addrconf_type_change(dev, event); | ||
2588 | break; | 2595 | break; |
2589 | } | 2596 | } |
2590 | 2597 | ||
@@ -2596,28 +2603,27 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
2596 | */ | 2603 | */ |
2597 | static struct notifier_block ipv6_dev_notf = { | 2604 | static struct notifier_block ipv6_dev_notf = { |
2598 | .notifier_call = addrconf_notify, | 2605 | .notifier_call = addrconf_notify, |
2599 | .priority = 0 | ||
2600 | }; | 2606 | }; |
2601 | 2607 | ||
2602 | static void addrconf_bonding_change(struct net_device *dev, unsigned long event) | 2608 | static void addrconf_type_change(struct net_device *dev, unsigned long event) |
2603 | { | 2609 | { |
2604 | struct inet6_dev *idev; | 2610 | struct inet6_dev *idev; |
2605 | ASSERT_RTNL(); | 2611 | ASSERT_RTNL(); |
2606 | 2612 | ||
2607 | idev = __in6_dev_get(dev); | 2613 | idev = __in6_dev_get(dev); |
2608 | 2614 | ||
2609 | if (event == NETDEV_BONDING_NEWTYPE) | 2615 | if (event == NETDEV_POST_TYPE_CHANGE) |
2610 | ipv6_mc_remap(idev); | 2616 | ipv6_mc_remap(idev); |
2611 | else if (event == NETDEV_BONDING_OLDTYPE) | 2617 | else if (event == NETDEV_PRE_TYPE_CHANGE) |
2612 | ipv6_mc_unmap(idev); | 2618 | ipv6_mc_unmap(idev); |
2613 | } | 2619 | } |
2614 | 2620 | ||
2615 | static int addrconf_ifdown(struct net_device *dev, int how) | 2621 | static int addrconf_ifdown(struct net_device *dev, int how) |
2616 | { | 2622 | { |
2617 | struct inet6_dev *idev; | ||
2618 | struct inet6_ifaddr *ifa, *keep_list, **bifa; | ||
2619 | struct net *net = dev_net(dev); | 2623 | struct net *net = dev_net(dev); |
2620 | int i; | 2624 | struct inet6_dev *idev; |
2625 | struct inet6_ifaddr *ifa; | ||
2626 | LIST_HEAD(keep_list); | ||
2621 | 2627 | ||
2622 | ASSERT_RTNL(); | 2628 | ASSERT_RTNL(); |
2623 | 2629 | ||
@@ -2628,8 +2634,9 @@ static int addrconf_ifdown(struct net_device *dev, int how) | |||
2628 | if (idev == NULL) | 2634 | if (idev == NULL) |
2629 | return -ENODEV; | 2635 | return -ENODEV; |
2630 | 2636 | ||
2631 | /* Step 1: remove reference to ipv6 device from parent device. | 2637 | /* |
2632 | Do not dev_put! | 2638 | * Step 1: remove reference to ipv6 device from parent device. |
2639 | * Do not dev_put! | ||
2633 | */ | 2640 | */ |
2634 | if (how) { | 2641 | if (how) { |
2635 | idev->dead = 1; | 2642 | idev->dead = 1; |
@@ -2642,40 +2649,21 @@ static int addrconf_ifdown(struct net_device *dev, int how) | |||
2642 | 2649 | ||
2643 | } | 2650 | } |
2644 | 2651 | ||
2645 | /* Step 2: clear hash table */ | ||
2646 | for (i=0; i<IN6_ADDR_HSIZE; i++) { | ||
2647 | bifa = &inet6_addr_lst[i]; | ||
2648 | |||
2649 | write_lock_bh(&addrconf_hash_lock); | ||
2650 | while ((ifa = *bifa) != NULL) { | ||
2651 | if (ifa->idev == idev && | ||
2652 | (how || !(ifa->flags&IFA_F_PERMANENT) || | ||
2653 | ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)) { | ||
2654 | *bifa = ifa->lst_next; | ||
2655 | ifa->lst_next = NULL; | ||
2656 | __in6_ifa_put(ifa); | ||
2657 | continue; | ||
2658 | } | ||
2659 | bifa = &ifa->lst_next; | ||
2660 | } | ||
2661 | write_unlock_bh(&addrconf_hash_lock); | ||
2662 | } | ||
2663 | |||
2664 | write_lock_bh(&idev->lock); | 2652 | write_lock_bh(&idev->lock); |
2665 | 2653 | ||
2666 | /* Step 3: clear flags for stateless addrconf */ | 2654 | /* Step 2: clear flags for stateless addrconf */ |
2667 | if (!how) | 2655 | if (!how) |
2668 | idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY); | 2656 | idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY); |
2669 | 2657 | ||
2670 | /* Step 4: clear address list */ | ||
2671 | #ifdef CONFIG_IPV6_PRIVACY | 2658 | #ifdef CONFIG_IPV6_PRIVACY |
2672 | if (how && del_timer(&idev->regen_timer)) | 2659 | if (how && del_timer(&idev->regen_timer)) |
2673 | in6_dev_put(idev); | 2660 | in6_dev_put(idev); |
2674 | 2661 | ||
2675 | /* clear tempaddr list */ | 2662 | /* Step 3: clear tempaddr list */ |
2676 | while ((ifa = idev->tempaddr_list) != NULL) { | 2663 | while (!list_empty(&idev->tempaddr_list)) { |
2677 | idev->tempaddr_list = ifa->tmp_next; | 2664 | ifa = list_first_entry(&idev->tempaddr_list, |
2678 | ifa->tmp_next = NULL; | 2665 | struct inet6_ifaddr, tmp_list); |
2666 | list_del(&ifa->tmp_list); | ||
2679 | ifa->dead = 1; | 2667 | ifa->dead = 1; |
2680 | write_unlock_bh(&idev->lock); | 2668 | write_unlock_bh(&idev->lock); |
2681 | spin_lock_bh(&ifa->lock); | 2669 | spin_lock_bh(&ifa->lock); |
@@ -2689,23 +2677,18 @@ static int addrconf_ifdown(struct net_device *dev, int how) | |||
2689 | write_lock_bh(&idev->lock); | 2677 | write_lock_bh(&idev->lock); |
2690 | } | 2678 | } |
2691 | #endif | 2679 | #endif |
2692 | keep_list = NULL; | ||
2693 | bifa = &keep_list; | ||
2694 | while ((ifa = idev->addr_list) != NULL) { | ||
2695 | idev->addr_list = ifa->if_next; | ||
2696 | ifa->if_next = NULL; | ||
2697 | 2680 | ||
2681 | while (!list_empty(&idev->addr_list)) { | ||
2682 | ifa = list_first_entry(&idev->addr_list, | ||
2683 | struct inet6_ifaddr, if_list); | ||
2698 | addrconf_del_timer(ifa); | 2684 | addrconf_del_timer(ifa); |
2699 | 2685 | ||
2700 | /* If just doing link down, and address is permanent | 2686 | /* If just doing link down, and address is permanent |
2701 | and not link-local, then retain it. */ | 2687 | and not link-local, then retain it. */ |
2702 | if (how == 0 && | 2688 | if (!how && |
2703 | (ifa->flags&IFA_F_PERMANENT) && | 2689 | (ifa->flags&IFA_F_PERMANENT) && |
2704 | !(ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)) { | 2690 | !(ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)) { |
2705 | 2691 | list_move_tail(&ifa->if_list, &keep_list); | |
2706 | /* Move to holding list */ | ||
2707 | *bifa = ifa; | ||
2708 | bifa = &ifa->if_next; | ||
2709 | 2692 | ||
2710 | /* If not doing DAD on this address, just keep it. */ | 2693 | /* If not doing DAD on this address, just keep it. */ |
2711 | if ((dev->flags&(IFF_NOARP|IFF_LOOPBACK)) || | 2694 | if ((dev->flags&(IFF_NOARP|IFF_LOOPBACK)) || |
@@ -2720,24 +2703,32 @@ static int addrconf_ifdown(struct net_device *dev, int how) | |||
2720 | /* Flag it for later restoration when link comes up */ | 2703 | /* Flag it for later restoration when link comes up */ |
2721 | ifa->flags |= IFA_F_TENTATIVE; | 2704 | ifa->flags |= IFA_F_TENTATIVE; |
2722 | in6_ifa_hold(ifa); | 2705 | in6_ifa_hold(ifa); |
2706 | write_unlock_bh(&idev->lock); | ||
2723 | } else { | 2707 | } else { |
2708 | list_del(&ifa->if_list); | ||
2724 | ifa->dead = 1; | 2709 | ifa->dead = 1; |
2710 | write_unlock_bh(&idev->lock); | ||
2711 | |||
2712 | /* clear hash table */ | ||
2713 | spin_lock_bh(&addrconf_hash_lock); | ||
2714 | hlist_del_init_rcu(&ifa->addr_lst); | ||
2715 | spin_unlock_bh(&addrconf_hash_lock); | ||
2725 | } | 2716 | } |
2726 | write_unlock_bh(&idev->lock); | ||
2727 | 2717 | ||
2728 | __ipv6_ifa_notify(RTM_DELADDR, ifa); | 2718 | __ipv6_ifa_notify(RTM_DELADDR, ifa); |
2729 | atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa); | 2719 | if (ifa->dead) |
2720 | atomic_notifier_call_chain(&inet6addr_chain, | ||
2721 | NETDEV_DOWN, ifa); | ||
2730 | in6_ifa_put(ifa); | 2722 | in6_ifa_put(ifa); |
2731 | 2723 | ||
2732 | write_lock_bh(&idev->lock); | 2724 | write_lock_bh(&idev->lock); |
2733 | } | 2725 | } |
2734 | 2726 | ||
2735 | idev->addr_list = keep_list; | 2727 | list_splice(&keep_list, &idev->addr_list); |
2736 | 2728 | ||
2737 | write_unlock_bh(&idev->lock); | 2729 | write_unlock_bh(&idev->lock); |
2738 | 2730 | ||
2739 | /* Step 5: Discard multicast list */ | 2731 | /* Step 5: Discard multicast list */ |
2740 | |||
2741 | if (how) | 2732 | if (how) |
2742 | ipv6_mc_destroy_dev(idev); | 2733 | ipv6_mc_destroy_dev(idev); |
2743 | else | 2734 | else |
@@ -2745,8 +2736,7 @@ static int addrconf_ifdown(struct net_device *dev, int how) | |||
2745 | 2736 | ||
2746 | idev->tstamp = jiffies; | 2737 | idev->tstamp = jiffies; |
2747 | 2738 | ||
2748 | /* Shot the device (if unregistered) */ | 2739 | /* Last: Shot the device (if unregistered) */ |
2749 | |||
2750 | if (how) { | 2740 | if (how) { |
2751 | addrconf_sysctl_unregister(idev); | 2741 | addrconf_sysctl_unregister(idev); |
2752 | neigh_parms_release(&nd_tbl, idev->nd_parms); | 2742 | neigh_parms_release(&nd_tbl, idev->nd_parms); |
@@ -2857,7 +2847,7 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags) | |||
2857 | * Optimistic nodes can start receiving | 2847 | * Optimistic nodes can start receiving |
2858 | * Frames right away | 2848 | * Frames right away |
2859 | */ | 2849 | */ |
2860 | if(ifp->flags & IFA_F_OPTIMISTIC) | 2850 | if (ifp->flags & IFA_F_OPTIMISTIC) |
2861 | ip6_ins_rt(ifp->rt); | 2851 | ip6_ins_rt(ifp->rt); |
2862 | 2852 | ||
2863 | addrconf_dad_kick(ifp); | 2853 | addrconf_dad_kick(ifp); |
@@ -2907,7 +2897,7 @@ out: | |||
2907 | 2897 | ||
2908 | static void addrconf_dad_completed(struct inet6_ifaddr *ifp) | 2898 | static void addrconf_dad_completed(struct inet6_ifaddr *ifp) |
2909 | { | 2899 | { |
2910 | struct net_device * dev = ifp->idev->dev; | 2900 | struct net_device *dev = ifp->idev->dev; |
2911 | 2901 | ||
2912 | /* | 2902 | /* |
2913 | * Configure the address for reception. Now it is valid. | 2903 | * Configure the address for reception. Now it is valid. |
@@ -2938,11 +2928,12 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp) | |||
2938 | } | 2928 | } |
2939 | } | 2929 | } |
2940 | 2930 | ||
2941 | static void addrconf_dad_run(struct inet6_dev *idev) { | 2931 | static void addrconf_dad_run(struct inet6_dev *idev) |
2932 | { | ||
2942 | struct inet6_ifaddr *ifp; | 2933 | struct inet6_ifaddr *ifp; |
2943 | 2934 | ||
2944 | read_lock_bh(&idev->lock); | 2935 | read_lock_bh(&idev->lock); |
2945 | for (ifp = idev->addr_list; ifp; ifp = ifp->if_next) { | 2936 | list_for_each_entry(ifp, &idev->addr_list, if_list) { |
2946 | spin_lock(&ifp->lock); | 2937 | spin_lock(&ifp->lock); |
2947 | if (!(ifp->flags & IFA_F_TENTATIVE)) { | 2938 | if (!(ifp->flags & IFA_F_TENTATIVE)) { |
2948 | spin_unlock(&ifp->lock); | 2939 | spin_unlock(&ifp->lock); |
@@ -2967,36 +2958,35 @@ static struct inet6_ifaddr *if6_get_first(struct seq_file *seq) | |||
2967 | struct net *net = seq_file_net(seq); | 2958 | struct net *net = seq_file_net(seq); |
2968 | 2959 | ||
2969 | for (state->bucket = 0; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) { | 2960 | for (state->bucket = 0; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) { |
2970 | ifa = inet6_addr_lst[state->bucket]; | 2961 | struct hlist_node *n; |
2971 | 2962 | hlist_for_each_entry_rcu_bh(ifa, n, &inet6_addr_lst[state->bucket], | |
2972 | while (ifa && !net_eq(dev_net(ifa->idev->dev), net)) | 2963 | addr_lst) |
2973 | ifa = ifa->lst_next; | 2964 | if (net_eq(dev_net(ifa->idev->dev), net)) |
2974 | if (ifa) | 2965 | return ifa; |
2975 | break; | ||
2976 | } | 2966 | } |
2977 | return ifa; | 2967 | return NULL; |
2978 | } | 2968 | } |
2979 | 2969 | ||
2980 | static struct inet6_ifaddr *if6_get_next(struct seq_file *seq, struct inet6_ifaddr *ifa) | 2970 | static struct inet6_ifaddr *if6_get_next(struct seq_file *seq, |
2971 | struct inet6_ifaddr *ifa) | ||
2981 | { | 2972 | { |
2982 | struct if6_iter_state *state = seq->private; | 2973 | struct if6_iter_state *state = seq->private; |
2983 | struct net *net = seq_file_net(seq); | 2974 | struct net *net = seq_file_net(seq); |
2975 | struct hlist_node *n = &ifa->addr_lst; | ||
2984 | 2976 | ||
2985 | ifa = ifa->lst_next; | 2977 | hlist_for_each_entry_continue_rcu_bh(ifa, n, addr_lst) |
2986 | try_again: | 2978 | if (net_eq(dev_net(ifa->idev->dev), net)) |
2987 | if (ifa) { | 2979 | return ifa; |
2988 | if (!net_eq(dev_net(ifa->idev->dev), net)) { | ||
2989 | ifa = ifa->lst_next; | ||
2990 | goto try_again; | ||
2991 | } | ||
2992 | } | ||
2993 | 2980 | ||
2994 | if (!ifa && ++state->bucket < IN6_ADDR_HSIZE) { | 2981 | while (++state->bucket < IN6_ADDR_HSIZE) { |
2995 | ifa = inet6_addr_lst[state->bucket]; | 2982 | hlist_for_each_entry_rcu_bh(ifa, n, |
2996 | goto try_again; | 2983 | &inet6_addr_lst[state->bucket], addr_lst) { |
2984 | if (net_eq(dev_net(ifa->idev->dev), net)) | ||
2985 | return ifa; | ||
2986 | } | ||
2997 | } | 2987 | } |
2998 | 2988 | ||
2999 | return ifa; | 2989 | return NULL; |
3000 | } | 2990 | } |
3001 | 2991 | ||
3002 | static struct inet6_ifaddr *if6_get_idx(struct seq_file *seq, loff_t pos) | 2992 | static struct inet6_ifaddr *if6_get_idx(struct seq_file *seq, loff_t pos) |
@@ -3004,15 +2994,15 @@ static struct inet6_ifaddr *if6_get_idx(struct seq_file *seq, loff_t pos) | |||
3004 | struct inet6_ifaddr *ifa = if6_get_first(seq); | 2994 | struct inet6_ifaddr *ifa = if6_get_first(seq); |
3005 | 2995 | ||
3006 | if (ifa) | 2996 | if (ifa) |
3007 | while(pos && (ifa = if6_get_next(seq, ifa)) != NULL) | 2997 | while (pos && (ifa = if6_get_next(seq, ifa)) != NULL) |
3008 | --pos; | 2998 | --pos; |
3009 | return pos ? NULL : ifa; | 2999 | return pos ? NULL : ifa; |
3010 | } | 3000 | } |
3011 | 3001 | ||
3012 | static void *if6_seq_start(struct seq_file *seq, loff_t *pos) | 3002 | static void *if6_seq_start(struct seq_file *seq, loff_t *pos) |
3013 | __acquires(addrconf_hash_lock) | 3003 | __acquires(rcu_bh) |
3014 | { | 3004 | { |
3015 | read_lock_bh(&addrconf_hash_lock); | 3005 | rcu_read_lock_bh(); |
3016 | return if6_get_idx(seq, *pos); | 3006 | return if6_get_idx(seq, *pos); |
3017 | } | 3007 | } |
3018 | 3008 | ||
@@ -3026,9 +3016,9 @@ static void *if6_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
3026 | } | 3016 | } |
3027 | 3017 | ||
3028 | static void if6_seq_stop(struct seq_file *seq, void *v) | 3018 | static void if6_seq_stop(struct seq_file *seq, void *v) |
3029 | __releases(addrconf_hash_lock) | 3019 | __releases(rcu_bh) |
3030 | { | 3020 | { |
3031 | read_unlock_bh(&addrconf_hash_lock); | 3021 | rcu_read_unlock_bh(); |
3032 | } | 3022 | } |
3033 | 3023 | ||
3034 | static int if6_seq_show(struct seq_file *seq, void *v) | 3024 | static int if6_seq_show(struct seq_file *seq, void *v) |
@@ -3098,10 +3088,12 @@ void if6_proc_exit(void) | |||
3098 | int ipv6_chk_home_addr(struct net *net, struct in6_addr *addr) | 3088 | int ipv6_chk_home_addr(struct net *net, struct in6_addr *addr) |
3099 | { | 3089 | { |
3100 | int ret = 0; | 3090 | int ret = 0; |
3101 | struct inet6_ifaddr * ifp; | 3091 | struct inet6_ifaddr *ifp = NULL; |
3102 | u8 hash = ipv6_addr_hash(addr); | 3092 | struct hlist_node *n; |
3103 | read_lock_bh(&addrconf_hash_lock); | 3093 | unsigned int hash = ipv6_addr_hash(addr); |
3104 | for (ifp = inet6_addr_lst[hash]; ifp; ifp = ifp->lst_next) { | 3094 | |
3095 | rcu_read_lock_bh(); | ||
3096 | hlist_for_each_entry_rcu_bh(ifp, n, &inet6_addr_lst[hash], addr_lst) { | ||
3105 | if (!net_eq(dev_net(ifp->idev->dev), net)) | 3097 | if (!net_eq(dev_net(ifp->idev->dev), net)) |
3106 | continue; | 3098 | continue; |
3107 | if (ipv6_addr_equal(&ifp->addr, addr) && | 3099 | if (ipv6_addr_equal(&ifp->addr, addr) && |
@@ -3110,7 +3102,7 @@ int ipv6_chk_home_addr(struct net *net, struct in6_addr *addr) | |||
3110 | break; | 3102 | break; |
3111 | } | 3103 | } |
3112 | } | 3104 | } |
3113 | read_unlock_bh(&addrconf_hash_lock); | 3105 | rcu_read_unlock_bh(); |
3114 | return ret; | 3106 | return ret; |
3115 | } | 3107 | } |
3116 | #endif | 3108 | #endif |
@@ -3121,43 +3113,35 @@ int ipv6_chk_home_addr(struct net *net, struct in6_addr *addr) | |||
3121 | 3113 | ||
3122 | static void addrconf_verify(unsigned long foo) | 3114 | static void addrconf_verify(unsigned long foo) |
3123 | { | 3115 | { |
3116 | unsigned long now, next, next_sec, next_sched; | ||
3124 | struct inet6_ifaddr *ifp; | 3117 | struct inet6_ifaddr *ifp; |
3125 | unsigned long now, next; | 3118 | struct hlist_node *node; |
3126 | int i; | 3119 | int i; |
3127 | 3120 | ||
3128 | spin_lock_bh(&addrconf_verify_lock); | 3121 | rcu_read_lock_bh(); |
3122 | spin_lock(&addrconf_verify_lock); | ||
3129 | now = jiffies; | 3123 | now = jiffies; |
3130 | next = now + ADDR_CHECK_FREQUENCY; | 3124 | next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY); |
3131 | 3125 | ||
3132 | del_timer(&addr_chk_timer); | 3126 | del_timer(&addr_chk_timer); |
3133 | 3127 | ||
3134 | for (i=0; i < IN6_ADDR_HSIZE; i++) { | 3128 | for (i = 0; i < IN6_ADDR_HSIZE; i++) { |
3135 | |||
3136 | restart: | 3129 | restart: |
3137 | read_lock(&addrconf_hash_lock); | 3130 | hlist_for_each_entry_rcu_bh(ifp, node, |
3138 | for (ifp=inet6_addr_lst[i]; ifp; ifp=ifp->lst_next) { | 3131 | &inet6_addr_lst[i], addr_lst) { |
3139 | unsigned long age; | 3132 | unsigned long age; |
3140 | #ifdef CONFIG_IPV6_PRIVACY | ||
3141 | unsigned long regen_advance; | ||
3142 | #endif | ||
3143 | 3133 | ||
3144 | if (ifp->flags & IFA_F_PERMANENT) | 3134 | if (ifp->flags & IFA_F_PERMANENT) |
3145 | continue; | 3135 | continue; |
3146 | 3136 | ||
3147 | spin_lock(&ifp->lock); | 3137 | spin_lock(&ifp->lock); |
3148 | age = (now - ifp->tstamp) / HZ; | 3138 | /* We try to batch several events at once. */ |
3149 | 3139 | age = (now - ifp->tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ; | |
3150 | #ifdef CONFIG_IPV6_PRIVACY | ||
3151 | regen_advance = ifp->idev->cnf.regen_max_retry * | ||
3152 | ifp->idev->cnf.dad_transmits * | ||
3153 | ifp->idev->nd_parms->retrans_time / HZ; | ||
3154 | #endif | ||
3155 | 3140 | ||
3156 | if (ifp->valid_lft != INFINITY_LIFE_TIME && | 3141 | if (ifp->valid_lft != INFINITY_LIFE_TIME && |
3157 | age >= ifp->valid_lft) { | 3142 | age >= ifp->valid_lft) { |
3158 | spin_unlock(&ifp->lock); | 3143 | spin_unlock(&ifp->lock); |
3159 | in6_ifa_hold(ifp); | 3144 | in6_ifa_hold(ifp); |
3160 | read_unlock(&addrconf_hash_lock); | ||
3161 | ipv6_del_addr(ifp); | 3145 | ipv6_del_addr(ifp); |
3162 | goto restart; | 3146 | goto restart; |
3163 | } else if (ifp->prefered_lft == INFINITY_LIFE_TIME) { | 3147 | } else if (ifp->prefered_lft == INFINITY_LIFE_TIME) { |
@@ -3179,7 +3163,6 @@ restart: | |||
3179 | 3163 | ||
3180 | if (deprecate) { | 3164 | if (deprecate) { |
3181 | in6_ifa_hold(ifp); | 3165 | in6_ifa_hold(ifp); |
3182 | read_unlock(&addrconf_hash_lock); | ||
3183 | 3166 | ||
3184 | ipv6_ifa_notify(0, ifp); | 3167 | ipv6_ifa_notify(0, ifp); |
3185 | in6_ifa_put(ifp); | 3168 | in6_ifa_put(ifp); |
@@ -3188,6 +3171,10 @@ restart: | |||
3188 | #ifdef CONFIG_IPV6_PRIVACY | 3171 | #ifdef CONFIG_IPV6_PRIVACY |
3189 | } else if ((ifp->flags&IFA_F_TEMPORARY) && | 3172 | } else if ((ifp->flags&IFA_F_TEMPORARY) && |
3190 | !(ifp->flags&IFA_F_TENTATIVE)) { | 3173 | !(ifp->flags&IFA_F_TENTATIVE)) { |
3174 | unsigned long regen_advance = ifp->idev->cnf.regen_max_retry * | ||
3175 | ifp->idev->cnf.dad_transmits * | ||
3176 | ifp->idev->nd_parms->retrans_time / HZ; | ||
3177 | |||
3191 | if (age >= ifp->prefered_lft - regen_advance) { | 3178 | if (age >= ifp->prefered_lft - regen_advance) { |
3192 | struct inet6_ifaddr *ifpub = ifp->ifpub; | 3179 | struct inet6_ifaddr *ifpub = ifp->ifpub; |
3193 | if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next)) | 3180 | if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next)) |
@@ -3197,7 +3184,7 @@ restart: | |||
3197 | in6_ifa_hold(ifp); | 3184 | in6_ifa_hold(ifp); |
3198 | in6_ifa_hold(ifpub); | 3185 | in6_ifa_hold(ifpub); |
3199 | spin_unlock(&ifp->lock); | 3186 | spin_unlock(&ifp->lock); |
3200 | read_unlock(&addrconf_hash_lock); | 3187 | |
3201 | spin_lock(&ifpub->lock); | 3188 | spin_lock(&ifpub->lock); |
3202 | ifpub->regen_count = 0; | 3189 | ifpub->regen_count = 0; |
3203 | spin_unlock(&ifpub->lock); | 3190 | spin_unlock(&ifpub->lock); |
@@ -3217,12 +3204,26 @@ restart: | |||
3217 | spin_unlock(&ifp->lock); | 3204 | spin_unlock(&ifp->lock); |
3218 | } | 3205 | } |
3219 | } | 3206 | } |
3220 | read_unlock(&addrconf_hash_lock); | ||
3221 | } | 3207 | } |
3222 | 3208 | ||
3223 | addr_chk_timer.expires = time_before(next, jiffies + HZ) ? jiffies + HZ : next; | 3209 | next_sec = round_jiffies_up(next); |
3210 | next_sched = next; | ||
3211 | |||
3212 | /* If rounded timeout is accurate enough, accept it. */ | ||
3213 | if (time_before(next_sec, next + ADDRCONF_TIMER_FUZZ)) | ||
3214 | next_sched = next_sec; | ||
3215 | |||
3216 | /* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */ | ||
3217 | if (time_before(next_sched, jiffies + ADDRCONF_TIMER_FUZZ_MAX)) | ||
3218 | next_sched = jiffies + ADDRCONF_TIMER_FUZZ_MAX; | ||
3219 | |||
3220 | ADBG((KERN_DEBUG "now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n", | ||
3221 | now, next, next_sec, next_sched)); | ||
3222 | |||
3223 | addr_chk_timer.expires = next_sched; | ||
3224 | add_timer(&addr_chk_timer); | 3224 | add_timer(&addr_chk_timer); |
3225 | spin_unlock_bh(&addrconf_verify_lock); | 3225 | spin_unlock(&addrconf_verify_lock); |
3226 | rcu_read_unlock_bh(); | ||
3226 | } | 3227 | } |
3227 | 3228 | ||
3228 | static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local) | 3229 | static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local) |
@@ -3512,8 +3513,7 @@ static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca, | |||
3512 | return nlmsg_end(skb, nlh); | 3513 | return nlmsg_end(skb, nlh); |
3513 | } | 3514 | } |
3514 | 3515 | ||
3515 | enum addr_type_t | 3516 | enum addr_type_t { |
3516 | { | ||
3517 | UNICAST_ADDR, | 3517 | UNICAST_ADDR, |
3518 | MULTICAST_ADDR, | 3518 | MULTICAST_ADDR, |
3519 | ANYCAST_ADDR, | 3519 | ANYCAST_ADDR, |
@@ -3524,7 +3524,6 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb, | |||
3524 | struct netlink_callback *cb, enum addr_type_t type, | 3524 | struct netlink_callback *cb, enum addr_type_t type, |
3525 | int s_ip_idx, int *p_ip_idx) | 3525 | int s_ip_idx, int *p_ip_idx) |
3526 | { | 3526 | { |
3527 | struct inet6_ifaddr *ifa; | ||
3528 | struct ifmcaddr6 *ifmca; | 3527 | struct ifmcaddr6 *ifmca; |
3529 | struct ifacaddr6 *ifaca; | 3528 | struct ifacaddr6 *ifaca; |
3530 | int err = 1; | 3529 | int err = 1; |
@@ -3532,11 +3531,12 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb, | |||
3532 | 3531 | ||
3533 | read_lock_bh(&idev->lock); | 3532 | read_lock_bh(&idev->lock); |
3534 | switch (type) { | 3533 | switch (type) { |
3535 | case UNICAST_ADDR: | 3534 | case UNICAST_ADDR: { |
3535 | struct inet6_ifaddr *ifa; | ||
3536 | |||
3536 | /* unicast address incl. temp addr */ | 3537 | /* unicast address incl. temp addr */ |
3537 | for (ifa = idev->addr_list; ifa; | 3538 | list_for_each_entry(ifa, &idev->addr_list, if_list) { |
3538 | ifa = ifa->if_next, ip_idx++) { | 3539 | if (++ip_idx < s_ip_idx) |
3539 | if (ip_idx < s_ip_idx) | ||
3540 | continue; | 3540 | continue; |
3541 | err = inet6_fill_ifaddr(skb, ifa, | 3541 | err = inet6_fill_ifaddr(skb, ifa, |
3542 | NETLINK_CB(cb->skb).pid, | 3542 | NETLINK_CB(cb->skb).pid, |
@@ -3547,6 +3547,7 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb, | |||
3547 | break; | 3547 | break; |
3548 | } | 3548 | } |
3549 | break; | 3549 | break; |
3550 | } | ||
3550 | case MULTICAST_ADDR: | 3551 | case MULTICAST_ADDR: |
3551 | /* multicast address */ | 3552 | /* multicast address */ |
3552 | for (ifmca = idev->mc_list; ifmca; | 3553 | for (ifmca = idev->mc_list; ifmca; |
@@ -3608,10 +3609,11 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb, | |||
3608 | hlist_for_each_entry_rcu(dev, node, head, index_hlist) { | 3609 | hlist_for_each_entry_rcu(dev, node, head, index_hlist) { |
3609 | if (idx < s_idx) | 3610 | if (idx < s_idx) |
3610 | goto cont; | 3611 | goto cont; |
3611 | if (idx > s_idx) | 3612 | if (h > s_h || idx > s_idx) |
3612 | s_ip_idx = 0; | 3613 | s_ip_idx = 0; |
3613 | ip_idx = 0; | 3614 | ip_idx = 0; |
3614 | if ((idev = __in6_dev_get(dev)) == NULL) | 3615 | idev = __in6_dev_get(dev); |
3616 | if (!idev) | ||
3615 | goto cont; | 3617 | goto cont; |
3616 | 3618 | ||
3617 | if (in6_dump_addrs(idev, skb, cb, type, | 3619 | if (in6_dump_addrs(idev, skb, cb, type, |
@@ -3678,12 +3680,14 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr* nlh, | |||
3678 | if (ifm->ifa_index) | 3680 | if (ifm->ifa_index) |
3679 | dev = __dev_get_by_index(net, ifm->ifa_index); | 3681 | dev = __dev_get_by_index(net, ifm->ifa_index); |
3680 | 3682 | ||
3681 | if ((ifa = ipv6_get_ifaddr(net, addr, dev, 1)) == NULL) { | 3683 | ifa = ipv6_get_ifaddr(net, addr, dev, 1); |
3684 | if (!ifa) { | ||
3682 | err = -EADDRNOTAVAIL; | 3685 | err = -EADDRNOTAVAIL; |
3683 | goto errout; | 3686 | goto errout; |
3684 | } | 3687 | } |
3685 | 3688 | ||
3686 | if ((skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_KERNEL)) == NULL) { | 3689 | skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_KERNEL); |
3690 | if (!skb) { | ||
3687 | err = -ENOBUFS; | 3691 | err = -ENOBUFS; |
3688 | goto errout_ifa; | 3692 | goto errout_ifa; |
3689 | } | 3693 | } |
@@ -3808,7 +3812,7 @@ static inline void __snmp6_fill_stats(u64 *stats, void __percpu **mib, | |||
3808 | static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype, | 3812 | static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype, |
3809 | int bytes) | 3813 | int bytes) |
3810 | { | 3814 | { |
3811 | switch(attrtype) { | 3815 | switch (attrtype) { |
3812 | case IFLA_INET6_STATS: | 3816 | case IFLA_INET6_STATS: |
3813 | __snmp6_fill_stats(stats, (void __percpu **)idev->stats.ipv6, IPSTATS_MIB_MAX, bytes); | 3817 | __snmp6_fill_stats(stats, (void __percpu **)idev->stats.ipv6, IPSTATS_MIB_MAX, bytes); |
3814 | break; | 3818 | break; |
@@ -4044,7 +4048,8 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) | |||
4044 | addrconf_leave_anycast(ifp); | 4048 | addrconf_leave_anycast(ifp); |
4045 | addrconf_leave_solict(ifp->idev, &ifp->addr); | 4049 | addrconf_leave_solict(ifp->idev, &ifp->addr); |
4046 | dst_hold(&ifp->rt->u.dst); | 4050 | dst_hold(&ifp->rt->u.dst); |
4047 | if (ip6_del_rt(ifp->rt)) | 4051 | |
4052 | if (ifp->dead && ip6_del_rt(ifp->rt)) | ||
4048 | dst_free(&ifp->rt->u.dst); | 4053 | dst_free(&ifp->rt->u.dst); |
4049 | break; | 4054 | break; |
4050 | } | 4055 | } |
@@ -4160,211 +4165,211 @@ static struct addrconf_sysctl_table | |||
4160 | .sysctl_header = NULL, | 4165 | .sysctl_header = NULL, |
4161 | .addrconf_vars = { | 4166 | .addrconf_vars = { |
4162 | { | 4167 | { |
4163 | .procname = "forwarding", | 4168 | .procname = "forwarding", |
4164 | .data = &ipv6_devconf.forwarding, | 4169 | .data = &ipv6_devconf.forwarding, |
4165 | .maxlen = sizeof(int), | 4170 | .maxlen = sizeof(int), |
4166 | .mode = 0644, | 4171 | .mode = 0644, |
4167 | .proc_handler = addrconf_sysctl_forward, | 4172 | .proc_handler = addrconf_sysctl_forward, |
4168 | }, | 4173 | }, |
4169 | { | 4174 | { |
4170 | .procname = "hop_limit", | 4175 | .procname = "hop_limit", |
4171 | .data = &ipv6_devconf.hop_limit, | 4176 | .data = &ipv6_devconf.hop_limit, |
4172 | .maxlen = sizeof(int), | 4177 | .maxlen = sizeof(int), |
4173 | .mode = 0644, | 4178 | .mode = 0644, |
4174 | .proc_handler = proc_dointvec, | 4179 | .proc_handler = proc_dointvec, |
4175 | }, | 4180 | }, |
4176 | { | 4181 | { |
4177 | .procname = "mtu", | 4182 | .procname = "mtu", |
4178 | .data = &ipv6_devconf.mtu6, | 4183 | .data = &ipv6_devconf.mtu6, |
4179 | .maxlen = sizeof(int), | 4184 | .maxlen = sizeof(int), |
4180 | .mode = 0644, | 4185 | .mode = 0644, |
4181 | .proc_handler = proc_dointvec, | 4186 | .proc_handler = proc_dointvec, |
4182 | }, | 4187 | }, |
4183 | { | 4188 | { |
4184 | .procname = "accept_ra", | 4189 | .procname = "accept_ra", |
4185 | .data = &ipv6_devconf.accept_ra, | 4190 | .data = &ipv6_devconf.accept_ra, |
4186 | .maxlen = sizeof(int), | 4191 | .maxlen = sizeof(int), |
4187 | .mode = 0644, | 4192 | .mode = 0644, |
4188 | .proc_handler = proc_dointvec, | 4193 | .proc_handler = proc_dointvec, |
4189 | }, | 4194 | }, |
4190 | { | 4195 | { |
4191 | .procname = "accept_redirects", | 4196 | .procname = "accept_redirects", |
4192 | .data = &ipv6_devconf.accept_redirects, | 4197 | .data = &ipv6_devconf.accept_redirects, |
4193 | .maxlen = sizeof(int), | 4198 | .maxlen = sizeof(int), |
4194 | .mode = 0644, | 4199 | .mode = 0644, |
4195 | .proc_handler = proc_dointvec, | 4200 | .proc_handler = proc_dointvec, |
4196 | }, | 4201 | }, |
4197 | { | 4202 | { |
4198 | .procname = "autoconf", | 4203 | .procname = "autoconf", |
4199 | .data = &ipv6_devconf.autoconf, | 4204 | .data = &ipv6_devconf.autoconf, |
4200 | .maxlen = sizeof(int), | 4205 | .maxlen = sizeof(int), |
4201 | .mode = 0644, | 4206 | .mode = 0644, |
4202 | .proc_handler = proc_dointvec, | 4207 | .proc_handler = proc_dointvec, |
4203 | }, | 4208 | }, |
4204 | { | 4209 | { |
4205 | .procname = "dad_transmits", | 4210 | .procname = "dad_transmits", |
4206 | .data = &ipv6_devconf.dad_transmits, | 4211 | .data = &ipv6_devconf.dad_transmits, |
4207 | .maxlen = sizeof(int), | 4212 | .maxlen = sizeof(int), |
4208 | .mode = 0644, | 4213 | .mode = 0644, |
4209 | .proc_handler = proc_dointvec, | 4214 | .proc_handler = proc_dointvec, |
4210 | }, | 4215 | }, |
4211 | { | 4216 | { |
4212 | .procname = "router_solicitations", | 4217 | .procname = "router_solicitations", |
4213 | .data = &ipv6_devconf.rtr_solicits, | 4218 | .data = &ipv6_devconf.rtr_solicits, |
4214 | .maxlen = sizeof(int), | 4219 | .maxlen = sizeof(int), |
4215 | .mode = 0644, | 4220 | .mode = 0644, |
4216 | .proc_handler = proc_dointvec, | 4221 | .proc_handler = proc_dointvec, |
4217 | }, | 4222 | }, |
4218 | { | 4223 | { |
4219 | .procname = "router_solicitation_interval", | 4224 | .procname = "router_solicitation_interval", |
4220 | .data = &ipv6_devconf.rtr_solicit_interval, | 4225 | .data = &ipv6_devconf.rtr_solicit_interval, |
4221 | .maxlen = sizeof(int), | 4226 | .maxlen = sizeof(int), |
4222 | .mode = 0644, | 4227 | .mode = 0644, |
4223 | .proc_handler = proc_dointvec_jiffies, | 4228 | .proc_handler = proc_dointvec_jiffies, |
4224 | }, | 4229 | }, |
4225 | { | 4230 | { |
4226 | .procname = "router_solicitation_delay", | 4231 | .procname = "router_solicitation_delay", |
4227 | .data = &ipv6_devconf.rtr_solicit_delay, | 4232 | .data = &ipv6_devconf.rtr_solicit_delay, |
4228 | .maxlen = sizeof(int), | 4233 | .maxlen = sizeof(int), |
4229 | .mode = 0644, | 4234 | .mode = 0644, |
4230 | .proc_handler = proc_dointvec_jiffies, | 4235 | .proc_handler = proc_dointvec_jiffies, |
4231 | }, | 4236 | }, |
4232 | { | 4237 | { |
4233 | .procname = "force_mld_version", | 4238 | .procname = "force_mld_version", |
4234 | .data = &ipv6_devconf.force_mld_version, | 4239 | .data = &ipv6_devconf.force_mld_version, |
4235 | .maxlen = sizeof(int), | 4240 | .maxlen = sizeof(int), |
4236 | .mode = 0644, | 4241 | .mode = 0644, |
4237 | .proc_handler = proc_dointvec, | 4242 | .proc_handler = proc_dointvec, |
4238 | }, | 4243 | }, |
4239 | #ifdef CONFIG_IPV6_PRIVACY | 4244 | #ifdef CONFIG_IPV6_PRIVACY |
4240 | { | 4245 | { |
4241 | .procname = "use_tempaddr", | 4246 | .procname = "use_tempaddr", |
4242 | .data = &ipv6_devconf.use_tempaddr, | 4247 | .data = &ipv6_devconf.use_tempaddr, |
4243 | .maxlen = sizeof(int), | 4248 | .maxlen = sizeof(int), |
4244 | .mode = 0644, | 4249 | .mode = 0644, |
4245 | .proc_handler = proc_dointvec, | 4250 | .proc_handler = proc_dointvec, |
4246 | }, | 4251 | }, |
4247 | { | 4252 | { |
4248 | .procname = "temp_valid_lft", | 4253 | .procname = "temp_valid_lft", |
4249 | .data = &ipv6_devconf.temp_valid_lft, | 4254 | .data = &ipv6_devconf.temp_valid_lft, |
4250 | .maxlen = sizeof(int), | 4255 | .maxlen = sizeof(int), |
4251 | .mode = 0644, | 4256 | .mode = 0644, |
4252 | .proc_handler = proc_dointvec, | 4257 | .proc_handler = proc_dointvec, |
4253 | }, | 4258 | }, |
4254 | { | 4259 | { |
4255 | .procname = "temp_prefered_lft", | 4260 | .procname = "temp_prefered_lft", |
4256 | .data = &ipv6_devconf.temp_prefered_lft, | 4261 | .data = &ipv6_devconf.temp_prefered_lft, |
4257 | .maxlen = sizeof(int), | 4262 | .maxlen = sizeof(int), |
4258 | .mode = 0644, | 4263 | .mode = 0644, |
4259 | .proc_handler = proc_dointvec, | 4264 | .proc_handler = proc_dointvec, |
4260 | }, | 4265 | }, |
4261 | { | 4266 | { |
4262 | .procname = "regen_max_retry", | 4267 | .procname = "regen_max_retry", |
4263 | .data = &ipv6_devconf.regen_max_retry, | 4268 | .data = &ipv6_devconf.regen_max_retry, |
4264 | .maxlen = sizeof(int), | 4269 | .maxlen = sizeof(int), |
4265 | .mode = 0644, | 4270 | .mode = 0644, |
4266 | .proc_handler = proc_dointvec, | 4271 | .proc_handler = proc_dointvec, |
4267 | }, | 4272 | }, |
4268 | { | 4273 | { |
4269 | .procname = "max_desync_factor", | 4274 | .procname = "max_desync_factor", |
4270 | .data = &ipv6_devconf.max_desync_factor, | 4275 | .data = &ipv6_devconf.max_desync_factor, |
4271 | .maxlen = sizeof(int), | 4276 | .maxlen = sizeof(int), |
4272 | .mode = 0644, | 4277 | .mode = 0644, |
4273 | .proc_handler = proc_dointvec, | 4278 | .proc_handler = proc_dointvec, |
4274 | }, | 4279 | }, |
4275 | #endif | 4280 | #endif |
4276 | { | 4281 | { |
4277 | .procname = "max_addresses", | 4282 | .procname = "max_addresses", |
4278 | .data = &ipv6_devconf.max_addresses, | 4283 | .data = &ipv6_devconf.max_addresses, |
4279 | .maxlen = sizeof(int), | 4284 | .maxlen = sizeof(int), |
4280 | .mode = 0644, | 4285 | .mode = 0644, |
4281 | .proc_handler = proc_dointvec, | 4286 | .proc_handler = proc_dointvec, |
4282 | }, | 4287 | }, |
4283 | { | 4288 | { |
4284 | .procname = "accept_ra_defrtr", | 4289 | .procname = "accept_ra_defrtr", |
4285 | .data = &ipv6_devconf.accept_ra_defrtr, | 4290 | .data = &ipv6_devconf.accept_ra_defrtr, |
4286 | .maxlen = sizeof(int), | 4291 | .maxlen = sizeof(int), |
4287 | .mode = 0644, | 4292 | .mode = 0644, |
4288 | .proc_handler = proc_dointvec, | 4293 | .proc_handler = proc_dointvec, |
4289 | }, | 4294 | }, |
4290 | { | 4295 | { |
4291 | .procname = "accept_ra_pinfo", | 4296 | .procname = "accept_ra_pinfo", |
4292 | .data = &ipv6_devconf.accept_ra_pinfo, | 4297 | .data = &ipv6_devconf.accept_ra_pinfo, |
4293 | .maxlen = sizeof(int), | 4298 | .maxlen = sizeof(int), |
4294 | .mode = 0644, | 4299 | .mode = 0644, |
4295 | .proc_handler = proc_dointvec, | 4300 | .proc_handler = proc_dointvec, |
4296 | }, | 4301 | }, |
4297 | #ifdef CONFIG_IPV6_ROUTER_PREF | 4302 | #ifdef CONFIG_IPV6_ROUTER_PREF |
4298 | { | 4303 | { |
4299 | .procname = "accept_ra_rtr_pref", | 4304 | .procname = "accept_ra_rtr_pref", |
4300 | .data = &ipv6_devconf.accept_ra_rtr_pref, | 4305 | .data = &ipv6_devconf.accept_ra_rtr_pref, |
4301 | .maxlen = sizeof(int), | 4306 | .maxlen = sizeof(int), |
4302 | .mode = 0644, | 4307 | .mode = 0644, |
4303 | .proc_handler = proc_dointvec, | 4308 | .proc_handler = proc_dointvec, |
4304 | }, | 4309 | }, |
4305 | { | 4310 | { |
4306 | .procname = "router_probe_interval", | 4311 | .procname = "router_probe_interval", |
4307 | .data = &ipv6_devconf.rtr_probe_interval, | 4312 | .data = &ipv6_devconf.rtr_probe_interval, |
4308 | .maxlen = sizeof(int), | 4313 | .maxlen = sizeof(int), |
4309 | .mode = 0644, | 4314 | .mode = 0644, |
4310 | .proc_handler = proc_dointvec_jiffies, | 4315 | .proc_handler = proc_dointvec_jiffies, |
4311 | }, | 4316 | }, |
4312 | #ifdef CONFIG_IPV6_ROUTE_INFO | 4317 | #ifdef CONFIG_IPV6_ROUTE_INFO |
4313 | { | 4318 | { |
4314 | .procname = "accept_ra_rt_info_max_plen", | 4319 | .procname = "accept_ra_rt_info_max_plen", |
4315 | .data = &ipv6_devconf.accept_ra_rt_info_max_plen, | 4320 | .data = &ipv6_devconf.accept_ra_rt_info_max_plen, |
4316 | .maxlen = sizeof(int), | 4321 | .maxlen = sizeof(int), |
4317 | .mode = 0644, | 4322 | .mode = 0644, |
4318 | .proc_handler = proc_dointvec, | 4323 | .proc_handler = proc_dointvec, |
4319 | }, | 4324 | }, |
4320 | #endif | 4325 | #endif |
4321 | #endif | 4326 | #endif |
4322 | { | 4327 | { |
4323 | .procname = "proxy_ndp", | 4328 | .procname = "proxy_ndp", |
4324 | .data = &ipv6_devconf.proxy_ndp, | 4329 | .data = &ipv6_devconf.proxy_ndp, |
4325 | .maxlen = sizeof(int), | 4330 | .maxlen = sizeof(int), |
4326 | .mode = 0644, | 4331 | .mode = 0644, |
4327 | .proc_handler = proc_dointvec, | 4332 | .proc_handler = proc_dointvec, |
4328 | }, | 4333 | }, |
4329 | { | 4334 | { |
4330 | .procname = "accept_source_route", | 4335 | .procname = "accept_source_route", |
4331 | .data = &ipv6_devconf.accept_source_route, | 4336 | .data = &ipv6_devconf.accept_source_route, |
4332 | .maxlen = sizeof(int), | 4337 | .maxlen = sizeof(int), |
4333 | .mode = 0644, | 4338 | .mode = 0644, |
4334 | .proc_handler = proc_dointvec, | 4339 | .proc_handler = proc_dointvec, |
4335 | }, | 4340 | }, |
4336 | #ifdef CONFIG_IPV6_OPTIMISTIC_DAD | 4341 | #ifdef CONFIG_IPV6_OPTIMISTIC_DAD |
4337 | { | 4342 | { |
4338 | .procname = "optimistic_dad", | 4343 | .procname = "optimistic_dad", |
4339 | .data = &ipv6_devconf.optimistic_dad, | 4344 | .data = &ipv6_devconf.optimistic_dad, |
4340 | .maxlen = sizeof(int), | 4345 | .maxlen = sizeof(int), |
4341 | .mode = 0644, | 4346 | .mode = 0644, |
4342 | .proc_handler = proc_dointvec, | 4347 | .proc_handler = proc_dointvec, |
4343 | 4348 | ||
4344 | }, | 4349 | }, |
4345 | #endif | 4350 | #endif |
4346 | #ifdef CONFIG_IPV6_MROUTE | 4351 | #ifdef CONFIG_IPV6_MROUTE |
4347 | { | 4352 | { |
4348 | .procname = "mc_forwarding", | 4353 | .procname = "mc_forwarding", |
4349 | .data = &ipv6_devconf.mc_forwarding, | 4354 | .data = &ipv6_devconf.mc_forwarding, |
4350 | .maxlen = sizeof(int), | 4355 | .maxlen = sizeof(int), |
4351 | .mode = 0444, | 4356 | .mode = 0444, |
4352 | .proc_handler = proc_dointvec, | 4357 | .proc_handler = proc_dointvec, |
4353 | }, | 4358 | }, |
4354 | #endif | 4359 | #endif |
4355 | { | 4360 | { |
4356 | .procname = "disable_ipv6", | 4361 | .procname = "disable_ipv6", |
4357 | .data = &ipv6_devconf.disable_ipv6, | 4362 | .data = &ipv6_devconf.disable_ipv6, |
4358 | .maxlen = sizeof(int), | 4363 | .maxlen = sizeof(int), |
4359 | .mode = 0644, | 4364 | .mode = 0644, |
4360 | .proc_handler = addrconf_sysctl_disable, | 4365 | .proc_handler = addrconf_sysctl_disable, |
4361 | }, | 4366 | }, |
4362 | { | 4367 | { |
4363 | .procname = "accept_dad", | 4368 | .procname = "accept_dad", |
4364 | .data = &ipv6_devconf.accept_dad, | 4369 | .data = &ipv6_devconf.accept_dad, |
4365 | .maxlen = sizeof(int), | 4370 | .maxlen = sizeof(int), |
4366 | .mode = 0644, | 4371 | .mode = 0644, |
4367 | .proc_handler = proc_dointvec, | 4372 | .proc_handler = proc_dointvec, |
4368 | }, | 4373 | }, |
4369 | { | 4374 | { |
4370 | .procname = "force_tllao", | 4375 | .procname = "force_tllao", |
@@ -4400,8 +4405,8 @@ static int __addrconf_sysctl_register(struct net *net, char *dev_name, | |||
4400 | if (t == NULL) | 4405 | if (t == NULL) |
4401 | goto out; | 4406 | goto out; |
4402 | 4407 | ||
4403 | for (i=0; t->addrconf_vars[i].data; i++) { | 4408 | for (i = 0; t->addrconf_vars[i].data; i++) { |
4404 | t->addrconf_vars[i].data += (char*)p - (char*)&ipv6_devconf; | 4409 | t->addrconf_vars[i].data += (char *)p - (char *)&ipv6_devconf; |
4405 | t->addrconf_vars[i].extra1 = idev; /* embedded; no ref */ | 4410 | t->addrconf_vars[i].extra1 = idev; /* embedded; no ref */ |
4406 | t->addrconf_vars[i].extra2 = net; | 4411 | t->addrconf_vars[i].extra2 = net; |
4407 | } | 4412 | } |
@@ -4538,14 +4543,12 @@ int register_inet6addr_notifier(struct notifier_block *nb) | |||
4538 | { | 4543 | { |
4539 | return atomic_notifier_chain_register(&inet6addr_chain, nb); | 4544 | return atomic_notifier_chain_register(&inet6addr_chain, nb); |
4540 | } | 4545 | } |
4541 | |||
4542 | EXPORT_SYMBOL(register_inet6addr_notifier); | 4546 | EXPORT_SYMBOL(register_inet6addr_notifier); |
4543 | 4547 | ||
4544 | int unregister_inet6addr_notifier(struct notifier_block *nb) | 4548 | int unregister_inet6addr_notifier(struct notifier_block *nb) |
4545 | { | 4549 | { |
4546 | return atomic_notifier_chain_unregister(&inet6addr_chain,nb); | 4550 | return atomic_notifier_chain_unregister(&inet6addr_chain, nb); |
4547 | } | 4551 | } |
4548 | |||
4549 | EXPORT_SYMBOL(unregister_inet6addr_notifier); | 4552 | EXPORT_SYMBOL(unregister_inet6addr_notifier); |
4550 | 4553 | ||
4551 | /* | 4554 | /* |
@@ -4554,11 +4557,12 @@ EXPORT_SYMBOL(unregister_inet6addr_notifier); | |||
4554 | 4557 | ||
4555 | int __init addrconf_init(void) | 4558 | int __init addrconf_init(void) |
4556 | { | 4559 | { |
4557 | int err; | 4560 | int i, err; |
4558 | 4561 | ||
4559 | if ((err = ipv6_addr_label_init()) < 0) { | 4562 | err = ipv6_addr_label_init(); |
4560 | printk(KERN_CRIT "IPv6 Addrconf: cannot initialize default policy table: %d.\n", | 4563 | if (err < 0) { |
4561 | err); | 4564 | printk(KERN_CRIT "IPv6 Addrconf:" |
4565 | " cannot initialize default policy table: %d.\n", err); | ||
4562 | return err; | 4566 | return err; |
4563 | } | 4567 | } |
4564 | 4568 | ||
@@ -4589,6 +4593,9 @@ int __init addrconf_init(void) | |||
4589 | if (err) | 4593 | if (err) |
4590 | goto errlo; | 4594 | goto errlo; |
4591 | 4595 | ||
4596 | for (i = 0; i < IN6_ADDR_HSIZE; i++) | ||
4597 | INIT_HLIST_HEAD(&inet6_addr_lst[i]); | ||
4598 | |||
4592 | register_netdevice_notifier(&ipv6_dev_notf); | 4599 | register_netdevice_notifier(&ipv6_dev_notf); |
4593 | 4600 | ||
4594 | addrconf_verify(0); | 4601 | addrconf_verify(0); |
@@ -4617,7 +4624,6 @@ errlo: | |||
4617 | 4624 | ||
4618 | void addrconf_cleanup(void) | 4625 | void addrconf_cleanup(void) |
4619 | { | 4626 | { |
4620 | struct inet6_ifaddr *ifa; | ||
4621 | struct net_device *dev; | 4627 | struct net_device *dev; |
4622 | int i; | 4628 | int i; |
4623 | 4629 | ||
@@ -4637,20 +4643,10 @@ void addrconf_cleanup(void) | |||
4637 | /* | 4643 | /* |
4638 | * Check hash table. | 4644 | * Check hash table. |
4639 | */ | 4645 | */ |
4640 | write_lock_bh(&addrconf_hash_lock); | 4646 | spin_lock_bh(&addrconf_hash_lock); |
4641 | for (i=0; i < IN6_ADDR_HSIZE; i++) { | 4647 | for (i = 0; i < IN6_ADDR_HSIZE; i++) |
4642 | for (ifa=inet6_addr_lst[i]; ifa; ) { | 4648 | WARN_ON(!hlist_empty(&inet6_addr_lst[i])); |
4643 | struct inet6_ifaddr *bifa; | 4649 | spin_unlock_bh(&addrconf_hash_lock); |
4644 | |||
4645 | bifa = ifa; | ||
4646 | ifa = ifa->lst_next; | ||
4647 | printk(KERN_DEBUG "bug: IPv6 address leakage detected: ifa=%p\n", bifa); | ||
4648 | /* Do not free it; something is wrong. | ||
4649 | Now we can investigate it with debugger. | ||
4650 | */ | ||
4651 | } | ||
4652 | } | ||
4653 | write_unlock_bh(&addrconf_hash_lock); | ||
4654 | 4650 | ||
4655 | del_timer(&addr_chk_timer); | 4651 | del_timer(&addr_chk_timer); |
4656 | rtnl_unlock(); | 4652 | rtnl_unlock(); |
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c index 6ff73c4c126a..ae404c9a746c 100644 --- a/net/ipv6/addrlabel.c +++ b/net/ipv6/addrlabel.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/list.h> | 13 | #include <linux/list.h> |
14 | #include <linux/rcupdate.h> | 14 | #include <linux/rcupdate.h> |
15 | #include <linux/in6.h> | 15 | #include <linux/in6.h> |
16 | #include <linux/slab.h> | ||
16 | #include <net/addrconf.h> | 17 | #include <net/addrconf.h> |
17 | #include <linux/if_addrlabel.h> | 18 | #include <linux/if_addrlabel.h> |
18 | #include <linux/netlink.h> | 19 | #include <linux/netlink.h> |
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 37d14e735c27..d2df3144429b 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/proc_fs.h> | 36 | #include <linux/proc_fs.h> |
37 | #include <linux/stat.h> | 37 | #include <linux/stat.h> |
38 | #include <linux/init.h> | 38 | #include <linux/init.h> |
39 | #include <linux/slab.h> | ||
39 | 40 | ||
40 | #include <linux/inet.h> | 41 | #include <linux/inet.h> |
41 | #include <linux/netdevice.h> | 42 | #include <linux/netdevice.h> |
@@ -416,6 +417,9 @@ void inet6_destroy_sock(struct sock *sk) | |||
416 | if ((skb = xchg(&np->pktoptions, NULL)) != NULL) | 417 | if ((skb = xchg(&np->pktoptions, NULL)) != NULL) |
417 | kfree_skb(skb); | 418 | kfree_skb(skb); |
418 | 419 | ||
420 | if ((skb = xchg(&np->rxpmtu, NULL)) != NULL) | ||
421 | kfree_skb(skb); | ||
422 | |||
419 | /* Free flowlabels */ | 423 | /* Free flowlabels */ |
420 | fl6_free_socklist(sk); | 424 | fl6_free_socklist(sk); |
421 | 425 | ||
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c index 5ac89025f9de..ee82d4ef26ce 100644 --- a/net/ipv6/ah6.c +++ b/net/ipv6/ah6.c | |||
@@ -26,6 +26,7 @@ | |||
26 | 26 | ||
27 | #include <crypto/hash.h> | 27 | #include <crypto/hash.h> |
28 | #include <linux/module.h> | 28 | #include <linux/module.h> |
29 | #include <linux/slab.h> | ||
29 | #include <net/ip.h> | 30 | #include <net/ip.h> |
30 | #include <net/ah.h> | 31 | #include <net/ah.h> |
31 | #include <linux/crypto.h> | 32 | #include <linux/crypto.h> |
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c index c4f6ca32fa74..b5b07054508a 100644 --- a/net/ipv6/anycast.c +++ b/net/ipv6/anycast.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/init.h> | 29 | #include <linux/init.h> |
30 | #include <linux/proc_fs.h> | 30 | #include <linux/proc_fs.h> |
31 | #include <linux/seq_file.h> | 31 | #include <linux/seq_file.h> |
32 | #include <linux/slab.h> | ||
32 | 33 | ||
33 | #include <net/net_namespace.h> | 34 | #include <net/net_namespace.h> |
34 | #include <net/sock.h> | 35 | #include <net/sock.h> |
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index e6f9cdf780fe..5959230bc6c1 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/in6.h> | 21 | #include <linux/in6.h> |
22 | #include <linux/ipv6.h> | 22 | #include <linux/ipv6.h> |
23 | #include <linux/route.h> | 23 | #include <linux/route.h> |
24 | #include <linux/slab.h> | ||
24 | 25 | ||
25 | #include <net/ipv6.h> | 26 | #include <net/ipv6.h> |
26 | #include <net/ndisc.h> | 27 | #include <net/ndisc.h> |
@@ -277,6 +278,45 @@ void ipv6_local_error(struct sock *sk, int err, struct flowi *fl, u32 info) | |||
277 | kfree_skb(skb); | 278 | kfree_skb(skb); |
278 | } | 279 | } |
279 | 280 | ||
281 | void ipv6_local_rxpmtu(struct sock *sk, struct flowi *fl, u32 mtu) | ||
282 | { | ||
283 | struct ipv6_pinfo *np = inet6_sk(sk); | ||
284 | struct ipv6hdr *iph; | ||
285 | struct sk_buff *skb; | ||
286 | struct ip6_mtuinfo *mtu_info; | ||
287 | |||
288 | if (!np->rxopt.bits.rxpmtu) | ||
289 | return; | ||
290 | |||
291 | skb = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC); | ||
292 | if (!skb) | ||
293 | return; | ||
294 | |||
295 | skb_put(skb, sizeof(struct ipv6hdr)); | ||
296 | skb_reset_network_header(skb); | ||
297 | iph = ipv6_hdr(skb); | ||
298 | ipv6_addr_copy(&iph->daddr, &fl->fl6_dst); | ||
299 | |||
300 | mtu_info = IP6CBMTU(skb); | ||
301 | if (!mtu_info) { | ||
302 | kfree_skb(skb); | ||
303 | return; | ||
304 | } | ||
305 | |||
306 | mtu_info->ip6m_mtu = mtu; | ||
307 | mtu_info->ip6m_addr.sin6_family = AF_INET6; | ||
308 | mtu_info->ip6m_addr.sin6_port = 0; | ||
309 | mtu_info->ip6m_addr.sin6_flowinfo = 0; | ||
310 | mtu_info->ip6m_addr.sin6_scope_id = fl->oif; | ||
311 | ipv6_addr_copy(&mtu_info->ip6m_addr.sin6_addr, &ipv6_hdr(skb)->daddr); | ||
312 | |||
313 | __skb_pull(skb, skb_tail_pointer(skb) - skb->data); | ||
314 | skb_reset_transport_header(skb); | ||
315 | |||
316 | skb = xchg(&np->rxpmtu, skb); | ||
317 | kfree_skb(skb); | ||
318 | } | ||
319 | |||
280 | /* | 320 | /* |
281 | * Handle MSG_ERRQUEUE | 321 | * Handle MSG_ERRQUEUE |
282 | */ | 322 | */ |
@@ -380,6 +420,54 @@ out: | |||
380 | return err; | 420 | return err; |
381 | } | 421 | } |
382 | 422 | ||
423 | /* | ||
424 | * Handle IPV6_RECVPATHMTU | ||
425 | */ | ||
426 | int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len) | ||
427 | { | ||
428 | struct ipv6_pinfo *np = inet6_sk(sk); | ||
429 | struct sk_buff *skb; | ||
430 | struct sockaddr_in6 *sin; | ||
431 | struct ip6_mtuinfo mtu_info; | ||
432 | int err; | ||
433 | int copied; | ||
434 | |||
435 | err = -EAGAIN; | ||
436 | skb = xchg(&np->rxpmtu, NULL); | ||
437 | if (skb == NULL) | ||
438 | goto out; | ||
439 | |||
440 | copied = skb->len; | ||
441 | if (copied > len) { | ||
442 | msg->msg_flags |= MSG_TRUNC; | ||
443 | copied = len; | ||
444 | } | ||
445 | err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); | ||
446 | if (err) | ||
447 | goto out_free_skb; | ||
448 | |||
449 | sock_recv_timestamp(msg, sk, skb); | ||
450 | |||
451 | memcpy(&mtu_info, IP6CBMTU(skb), sizeof(mtu_info)); | ||
452 | |||
453 | sin = (struct sockaddr_in6 *)msg->msg_name; | ||
454 | if (sin) { | ||
455 | sin->sin6_family = AF_INET6; | ||
456 | sin->sin6_flowinfo = 0; | ||
457 | sin->sin6_port = 0; | ||
458 | sin->sin6_scope_id = mtu_info.ip6m_addr.sin6_scope_id; | ||
459 | ipv6_addr_copy(&sin->sin6_addr, &mtu_info.ip6m_addr.sin6_addr); | ||
460 | } | ||
461 | |||
462 | put_cmsg(msg, SOL_IPV6, IPV6_PATHMTU, sizeof(mtu_info), &mtu_info); | ||
463 | |||
464 | err = copied; | ||
465 | |||
466 | out_free_skb: | ||
467 | kfree_skb(skb); | ||
468 | out: | ||
469 | return err; | ||
470 | } | ||
383 | 471 | ||
384 | 472 | ||
385 | int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) | 473 | int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) |
@@ -496,7 +584,7 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) | |||
496 | int datagram_send_ctl(struct net *net, | 584 | int datagram_send_ctl(struct net *net, |
497 | struct msghdr *msg, struct flowi *fl, | 585 | struct msghdr *msg, struct flowi *fl, |
498 | struct ipv6_txoptions *opt, | 586 | struct ipv6_txoptions *opt, |
499 | int *hlimit, int *tclass) | 587 | int *hlimit, int *tclass, int *dontfrag) |
500 | { | 588 | { |
501 | struct in6_pktinfo *src_info; | 589 | struct in6_pktinfo *src_info; |
502 | struct cmsghdr *cmsg; | 590 | struct cmsghdr *cmsg; |
@@ -736,6 +824,25 @@ int datagram_send_ctl(struct net *net, | |||
736 | 824 | ||
737 | break; | 825 | break; |
738 | } | 826 | } |
827 | |||
828 | case IPV6_DONTFRAG: | ||
829 | { | ||
830 | int df; | ||
831 | |||
832 | err = -EINVAL; | ||
833 | if (cmsg->cmsg_len != CMSG_LEN(sizeof(int))) { | ||
834 | goto exit_f; | ||
835 | } | ||
836 | |||
837 | df = *(int *)CMSG_DATA(cmsg); | ||
838 | if (df < 0 || df > 1) | ||
839 | goto exit_f; | ||
840 | |||
841 | err = 0; | ||
842 | *dontfrag = df; | ||
843 | |||
844 | break; | ||
845 | } | ||
739 | default: | 846 | default: |
740 | LIMIT_NETDEBUG(KERN_DEBUG "invalid cmsg type: %d\n", | 847 | LIMIT_NETDEBUG(KERN_DEBUG "invalid cmsg type: %d\n", |
741 | cmsg->cmsg_type); | 848 | cmsg->cmsg_type); |
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index 074f2c084f9f..8a659f92d17a 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/netdevice.h> | 29 | #include <linux/netdevice.h> |
30 | #include <linux/in6.h> | 30 | #include <linux/in6.h> |
31 | #include <linux/icmpv6.h> | 31 | #include <linux/icmpv6.h> |
32 | #include <linux/slab.h> | ||
32 | 33 | ||
33 | #include <net/dst.h> | 34 | #include <net/dst.h> |
34 | #include <net/sock.h> | 35 | #include <net/sock.h> |
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index 5e463c43fcc2..8e44f8f9c188 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c | |||
@@ -208,7 +208,6 @@ static int fib6_rule_fill(struct fib_rule *rule, struct sk_buff *skb, | |||
208 | { | 208 | { |
209 | struct fib6_rule *rule6 = (struct fib6_rule *) rule; | 209 | struct fib6_rule *rule6 = (struct fib6_rule *) rule; |
210 | 210 | ||
211 | frh->family = AF_INET6; | ||
212 | frh->dst_len = rule6->dst.plen; | 211 | frh->dst_len = rule6->dst.plen; |
213 | frh->src_len = rule6->src.plen; | 212 | frh->src_len = rule6->src.plen; |
214 | frh->tos = rule6->tclass; | 213 | frh->tos = rule6->tclass; |
@@ -238,7 +237,7 @@ static size_t fib6_rule_nlmsg_payload(struct fib_rule *rule) | |||
238 | + nla_total_size(16); /* src */ | 237 | + nla_total_size(16); /* src */ |
239 | } | 238 | } |
240 | 239 | ||
241 | static struct fib_rules_ops fib6_rules_ops_template = { | 240 | static const struct fib_rules_ops __net_initdata fib6_rules_ops_template = { |
242 | .family = AF_INET6, | 241 | .family = AF_INET6, |
243 | .rule_size = sizeof(struct fib6_rule), | 242 | .rule_size = sizeof(struct fib6_rule), |
244 | .addr_size = sizeof(struct in6_addr), | 243 | .addr_size = sizeof(struct in6_addr), |
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index eb9abe24bdf0..ce7992982557 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <linux/skbuff.h> | 40 | #include <linux/skbuff.h> |
41 | #include <linux/init.h> | 41 | #include <linux/init.h> |
42 | #include <linux/netfilter.h> | 42 | #include <linux/netfilter.h> |
43 | #include <linux/slab.h> | ||
43 | 44 | ||
44 | #ifdef CONFIG_SYSCTL | 45 | #ifdef CONFIG_SYSCTL |
45 | #include <linux/sysctl.h> | 46 | #include <linux/sysctl.h> |
@@ -480,8 +481,9 @@ route_done: | |||
480 | len + sizeof(struct icmp6hdr), | 481 | len + sizeof(struct icmp6hdr), |
481 | sizeof(struct icmp6hdr), hlimit, | 482 | sizeof(struct icmp6hdr), hlimit, |
482 | np->tclass, NULL, &fl, (struct rt6_info*)dst, | 483 | np->tclass, NULL, &fl, (struct rt6_info*)dst, |
483 | MSG_DONTWAIT); | 484 | MSG_DONTWAIT, np->dontfrag); |
484 | if (err) { | 485 | if (err) { |
486 | ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS); | ||
485 | ip6_flush_pending_frames(sk); | 487 | ip6_flush_pending_frames(sk); |
486 | goto out_put; | 488 | goto out_put; |
487 | } | 489 | } |
@@ -559,9 +561,11 @@ static void icmpv6_echo_reply(struct sk_buff *skb) | |||
559 | 561 | ||
560 | err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr), | 562 | err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr), |
561 | sizeof(struct icmp6hdr), hlimit, np->tclass, NULL, &fl, | 563 | sizeof(struct icmp6hdr), hlimit, np->tclass, NULL, &fl, |
562 | (struct rt6_info*)dst, MSG_DONTWAIT); | 564 | (struct rt6_info*)dst, MSG_DONTWAIT, |
565 | np->dontfrag); | ||
563 | 566 | ||
564 | if (err) { | 567 | if (err) { |
568 | ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS); | ||
565 | ip6_flush_pending_frames(sk); | 569 | ip6_flush_pending_frames(sk); |
566 | goto out_put; | 570 | goto out_put; |
567 | } | 571 | } |
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index 3516e6fe2e56..0c5e3c3b7fd5 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/in6.h> | 17 | #include <linux/in6.h> |
18 | #include <linux/ipv6.h> | 18 | #include <linux/ipv6.h> |
19 | #include <linux/jhash.h> | 19 | #include <linux/jhash.h> |
20 | #include <linux/slab.h> | ||
20 | 21 | ||
21 | #include <net/addrconf.h> | 22 | #include <net/addrconf.h> |
22 | #include <net/inet_connection_sock.h> | 23 | #include <net/inet_connection_sock.h> |
@@ -177,7 +178,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie) | |||
177 | return dst; | 178 | return dst; |
178 | } | 179 | } |
179 | 180 | ||
180 | int inet6_csk_xmit(struct sk_buff *skb, int ipfragok) | 181 | int inet6_csk_xmit(struct sk_buff *skb) |
181 | { | 182 | { |
182 | struct sock *sk = skb->sk; | 183 | struct sock *sk = skb->sk; |
183 | struct inet_sock *inet = inet_sk(sk); | 184 | struct inet_sock *inet = inet_sk(sk); |
@@ -233,7 +234,7 @@ int inet6_csk_xmit(struct sk_buff *skb, int ipfragok) | |||
233 | /* Restore final destination back after routing done */ | 234 | /* Restore final destination back after routing done */ |
234 | ipv6_addr_copy(&fl.fl6_dst, &np->daddr); | 235 | ipv6_addr_copy(&fl.fl6_dst, &np->daddr); |
235 | 236 | ||
236 | return ip6_xmit(sk, skb, &fl, np->opt, 0); | 237 | return ip6_xmit(sk, skb, &fl, np->opt); |
237 | } | 238 | } |
238 | 239 | ||
239 | EXPORT_SYMBOL_GPL(inet6_csk_xmit); | 240 | EXPORT_SYMBOL_GPL(inet6_csk_xmit); |
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 2f9847924fa5..92a122b7795d 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/in6.h> | 26 | #include <linux/in6.h> |
27 | #include <linux/init.h> | 27 | #include <linux/init.h> |
28 | #include <linux/list.h> | 28 | #include <linux/list.h> |
29 | #include <linux/slab.h> | ||
29 | 30 | ||
30 | #ifdef CONFIG_PROC_FS | 31 | #ifdef CONFIG_PROC_FS |
31 | #include <linux/proc_fs.h> | 32 | #include <linux/proc_fs.h> |
@@ -127,12 +128,24 @@ static __inline__ u32 fib6_new_sernum(void) | |||
127 | /* | 128 | /* |
128 | * test bit | 129 | * test bit |
129 | */ | 130 | */ |
131 | #if defined(__LITTLE_ENDIAN) | ||
132 | # define BITOP_BE32_SWIZZLE (0x1F & ~7) | ||
133 | #else | ||
134 | # define BITOP_BE32_SWIZZLE 0 | ||
135 | #endif | ||
130 | 136 | ||
131 | static __inline__ __be32 addr_bit_set(void *token, int fn_bit) | 137 | static __inline__ __be32 addr_bit_set(void *token, int fn_bit) |
132 | { | 138 | { |
133 | __be32 *addr = token; | 139 | __be32 *addr = token; |
134 | 140 | /* | |
135 | return htonl(1 << ((~fn_bit)&0x1F)) & addr[fn_bit>>5]; | 141 | * Here, |
142 | * 1 << ((~fn_bit ^ BITOP_BE32_SWIZZLE) & 0x1f) | ||
143 | * is optimized version of | ||
144 | * htonl(1 << ((~fn_bit)&0x1F)) | ||
145 | * See include/asm-generic/bitops/le.h. | ||
146 | */ | ||
147 | return (__force __be32)(1 << ((~fn_bit ^ BITOP_BE32_SWIZZLE) & 0x1f)) & | ||
148 | addr[fn_bit >> 5]; | ||
136 | } | 149 | } |
137 | 150 | ||
138 | static __inline__ struct fib6_node * node_alloc(void) | 151 | static __inline__ struct fib6_node * node_alloc(void) |
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index e41eba8aacf1..13654686aeab 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/route.h> | 20 | #include <linux/route.h> |
21 | #include <linux/proc_fs.h> | 21 | #include <linux/proc_fs.h> |
22 | #include <linux/seq_file.h> | 22 | #include <linux/seq_file.h> |
23 | #include <linux/slab.h> | ||
23 | 24 | ||
24 | #include <net/net_namespace.h> | 25 | #include <net/net_namespace.h> |
25 | #include <net/sock.h> | 26 | #include <net/sock.h> |
@@ -359,7 +360,8 @@ fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval, | |||
359 | msg.msg_control = (void*)(fl->opt+1); | 360 | msg.msg_control = (void*)(fl->opt+1); |
360 | flowi.oif = 0; | 361 | flowi.oif = 0; |
361 | 362 | ||
362 | err = datagram_send_ctl(net, &msg, &flowi, fl->opt, &junk, &junk); | 363 | err = datagram_send_ctl(net, &msg, &flowi, fl->opt, &junk, |
364 | &junk, &junk); | ||
363 | if (err) | 365 | if (err) |
364 | goto done; | 366 | goto done; |
365 | err = -EINVAL; | 367 | err = -EINVAL; |
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index e28f9203deca..6aa7ee1295c2 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/in6.h> | 28 | #include <linux/in6.h> |
29 | #include <linux/icmpv6.h> | 29 | #include <linux/icmpv6.h> |
30 | #include <linux/mroute6.h> | 30 | #include <linux/mroute6.h> |
31 | #include <linux/slab.h> | ||
31 | 32 | ||
32 | #include <linux/netfilter.h> | 33 | #include <linux/netfilter.h> |
33 | #include <linux/netfilter_ipv6.h> | 34 | #include <linux/netfilter_ipv6.h> |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index dabf108ad811..e7a5f17d5e95 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <linux/tcp.h> | 37 | #include <linux/tcp.h> |
38 | #include <linux/route.h> | 38 | #include <linux/route.h> |
39 | #include <linux/module.h> | 39 | #include <linux/module.h> |
40 | #include <linux/slab.h> | ||
40 | 41 | ||
41 | #include <linux/netfilter.h> | 42 | #include <linux/netfilter.h> |
42 | #include <linux/netfilter_ipv6.h> | 43 | #include <linux/netfilter_ipv6.h> |
@@ -107,7 +108,7 @@ static int ip6_dev_loopback_xmit(struct sk_buff *newskb) | |||
107 | newskb->ip_summed = CHECKSUM_UNNECESSARY; | 108 | newskb->ip_summed = CHECKSUM_UNNECESSARY; |
108 | WARN_ON(!skb_dst(newskb)); | 109 | WARN_ON(!skb_dst(newskb)); |
109 | 110 | ||
110 | netif_rx(newskb); | 111 | netif_rx_ni(newskb); |
111 | return 0; | 112 | return 0; |
112 | } | 113 | } |
113 | 114 | ||
@@ -180,11 +181,11 @@ int ip6_output(struct sk_buff *skb) | |||
180 | } | 181 | } |
181 | 182 | ||
182 | /* | 183 | /* |
183 | * xmit an sk_buff (used by TCP) | 184 | * xmit an sk_buff (used by TCP, SCTP and DCCP) |
184 | */ | 185 | */ |
185 | 186 | ||
186 | int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, | 187 | int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, |
187 | struct ipv6_txoptions *opt, int ipfragok) | 188 | struct ipv6_txoptions *opt) |
188 | { | 189 | { |
189 | struct net *net = sock_net(sk); | 190 | struct net *net = sock_net(sk); |
190 | struct ipv6_pinfo *np = inet6_sk(sk); | 191 | struct ipv6_pinfo *np = inet6_sk(sk); |
@@ -217,8 +218,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, | |||
217 | } | 218 | } |
218 | kfree_skb(skb); | 219 | kfree_skb(skb); |
219 | skb = skb2; | 220 | skb = skb2; |
220 | if (sk) | 221 | skb_set_owner_w(skb, sk); |
221 | skb_set_owner_w(skb, sk); | ||
222 | } | 222 | } |
223 | if (opt->opt_flen) | 223 | if (opt->opt_flen) |
224 | ipv6_push_frag_opts(skb, opt, &proto); | 224 | ipv6_push_frag_opts(skb, opt, &proto); |
@@ -230,10 +230,6 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, | |||
230 | skb_reset_network_header(skb); | 230 | skb_reset_network_header(skb); |
231 | hdr = ipv6_hdr(skb); | 231 | hdr = ipv6_hdr(skb); |
232 | 232 | ||
233 | /* Allow local fragmentation. */ | ||
234 | if (ipfragok) | ||
235 | skb->local_df = 1; | ||
236 | |||
237 | /* | 233 | /* |
238 | * Fill in the IPv6 header | 234 | * Fill in the IPv6 header |
239 | */ | 235 | */ |
@@ -628,7 +624,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
628 | /* We must not fragment if the socket is set to force MTU discovery | 624 | /* We must not fragment if the socket is set to force MTU discovery |
629 | * or if the skb it not generated by a local socket. | 625 | * or if the skb it not generated by a local socket. |
630 | */ | 626 | */ |
631 | if (!skb->local_df) { | 627 | if (!skb->local_df && skb->len > mtu) { |
632 | skb->dev = skb_dst(skb)->dev; | 628 | skb->dev = skb_dst(skb)->dev; |
633 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); | 629 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); |
634 | IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), | 630 | IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), |
@@ -1108,7 +1104,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, | |||
1108 | int offset, int len, int odd, struct sk_buff *skb), | 1104 | int offset, int len, int odd, struct sk_buff *skb), |
1109 | void *from, int length, int transhdrlen, | 1105 | void *from, int length, int transhdrlen, |
1110 | int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi *fl, | 1106 | int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi *fl, |
1111 | struct rt6_info *rt, unsigned int flags) | 1107 | struct rt6_info *rt, unsigned int flags, int dontfrag) |
1112 | { | 1108 | { |
1113 | struct inet_sock *inet = inet_sk(sk); | 1109 | struct inet_sock *inet = inet_sk(sk); |
1114 | struct ipv6_pinfo *np = inet6_sk(sk); | 1110 | struct ipv6_pinfo *np = inet6_sk(sk); |
@@ -1222,15 +1218,23 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, | |||
1222 | */ | 1218 | */ |
1223 | 1219 | ||
1224 | inet->cork.length += length; | 1220 | inet->cork.length += length; |
1225 | if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) && | 1221 | if (length > mtu) { |
1226 | (rt->u.dst.dev->features & NETIF_F_UFO)) { | 1222 | int proto = sk->sk_protocol; |
1223 | if (dontfrag && (proto == IPPROTO_UDP || proto == IPPROTO_RAW)){ | ||
1224 | ipv6_local_rxpmtu(sk, fl, mtu-exthdrlen); | ||
1225 | return -EMSGSIZE; | ||
1226 | } | ||
1227 | 1227 | ||
1228 | err = ip6_ufo_append_data(sk, getfrag, from, length, hh_len, | 1228 | if (proto == IPPROTO_UDP && |
1229 | fragheaderlen, transhdrlen, mtu, | 1229 | (rt->u.dst.dev->features & NETIF_F_UFO)) { |
1230 | flags); | 1230 | |
1231 | if (err) | 1231 | err = ip6_ufo_append_data(sk, getfrag, from, length, |
1232 | goto error; | 1232 | hh_len, fragheaderlen, |
1233 | return 0; | 1233 | transhdrlen, mtu, flags); |
1234 | if (err) | ||
1235 | goto error; | ||
1236 | return 0; | ||
1237 | } | ||
1234 | } | 1238 | } |
1235 | 1239 | ||
1236 | if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) | 1240 | if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 138980eec214..2599870747ec 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <linux/route.h> | 37 | #include <linux/route.h> |
38 | #include <linux/rtnetlink.h> | 38 | #include <linux/rtnetlink.h> |
39 | #include <linux/netfilter_ipv6.h> | 39 | #include <linux/netfilter_ipv6.h> |
40 | #include <linux/slab.h> | ||
40 | 41 | ||
41 | #include <asm/uaccess.h> | 42 | #include <asm/uaccess.h> |
42 | #include <asm/atomic.h> | 43 | #include <asm/atomic.h> |
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 52e0f74fdfe0..3e333268db89 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/proc_fs.h> | 33 | #include <linux/proc_fs.h> |
34 | #include <linux/seq_file.h> | 34 | #include <linux/seq_file.h> |
35 | #include <linux/init.h> | 35 | #include <linux/init.h> |
36 | #include <linux/slab.h> | ||
36 | #include <net/protocol.h> | 37 | #include <net/protocol.h> |
37 | #include <linux/skbuff.h> | 38 | #include <linux/skbuff.h> |
38 | #include <net/sock.h> | 39 | #include <net/sock.h> |
@@ -1113,6 +1114,9 @@ static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock) | |||
1113 | unsigned char ttls[MAXMIFS]; | 1114 | unsigned char ttls[MAXMIFS]; |
1114 | int i; | 1115 | int i; |
1115 | 1116 | ||
1117 | if (mfc->mf6cc_parent >= MAXMIFS) | ||
1118 | return -ENFILE; | ||
1119 | |||
1116 | memset(ttls, 255, MAXMIFS); | 1120 | memset(ttls, 255, MAXMIFS); |
1117 | for (i = 0; i < MAXMIFS; i++) { | 1121 | for (i = 0; i < MAXMIFS; i++) { |
1118 | if (IF_ISSET(i, &mfc->mf6cc_ifset)) | 1122 | if (IF_ISSET(i, &mfc->mf6cc_ifset)) |
@@ -1692,17 +1696,20 @@ ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm) | |||
1692 | int ct; | 1696 | int ct; |
1693 | struct rtnexthop *nhp; | 1697 | struct rtnexthop *nhp; |
1694 | struct net *net = mfc6_net(c); | 1698 | struct net *net = mfc6_net(c); |
1695 | struct net_device *dev = net->ipv6.vif6_table[c->mf6c_parent].dev; | ||
1696 | u8 *b = skb_tail_pointer(skb); | 1699 | u8 *b = skb_tail_pointer(skb); |
1697 | struct rtattr *mp_head; | 1700 | struct rtattr *mp_head; |
1698 | 1701 | ||
1699 | if (dev) | 1702 | /* If cache is unresolved, don't try to parse IIF and OIF */ |
1700 | RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex); | 1703 | if (c->mf6c_parent > MAXMIFS) |
1704 | return -ENOENT; | ||
1705 | |||
1706 | if (MIF_EXISTS(net, c->mf6c_parent)) | ||
1707 | RTA_PUT(skb, RTA_IIF, 4, &net->ipv6.vif6_table[c->mf6c_parent].dev->ifindex); | ||
1701 | 1708 | ||
1702 | mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0)); | 1709 | mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0)); |
1703 | 1710 | ||
1704 | for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { | 1711 | for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { |
1705 | if (c->mfc_un.res.ttls[ct] < 255) { | 1712 | if (MIF_EXISTS(net, ct) && c->mfc_un.res.ttls[ct] < 255) { |
1706 | if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) | 1713 | if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) |
1707 | goto rtattr_failure; | 1714 | goto rtattr_failure; |
1708 | nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); | 1715 | nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); |
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 430454ee5ead..bd43f0152c21 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/init.h> | 36 | #include <linux/init.h> |
37 | #include <linux/sysctl.h> | 37 | #include <linux/sysctl.h> |
38 | #include <linux/netfilter.h> | 38 | #include <linux/netfilter.h> |
39 | #include <linux/slab.h> | ||
39 | 40 | ||
40 | #include <net/sock.h> | 41 | #include <net/sock.h> |
41 | #include <net/snmp.h> | 42 | #include <net/snmp.h> |
@@ -113,9 +114,9 @@ struct ipv6_txoptions *ipv6_update_options(struct sock *sk, | |||
113 | } | 114 | } |
114 | opt = xchg(&inet6_sk(sk)->opt, opt); | 115 | opt = xchg(&inet6_sk(sk)->opt, opt); |
115 | } else { | 116 | } else { |
116 | write_lock(&sk->sk_dst_lock); | 117 | spin_lock(&sk->sk_dst_lock); |
117 | opt = xchg(&inet6_sk(sk)->opt, opt); | 118 | opt = xchg(&inet6_sk(sk)->opt, opt); |
118 | write_unlock(&sk->sk_dst_lock); | 119 | spin_unlock(&sk->sk_dst_lock); |
119 | } | 120 | } |
120 | sk_dst_reset(sk); | 121 | sk_dst_reset(sk); |
121 | 122 | ||
@@ -336,6 +337,13 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, | |||
336 | retv = 0; | 337 | retv = 0; |
337 | break; | 338 | break; |
338 | 339 | ||
340 | case IPV6_RECVPATHMTU: | ||
341 | if (optlen < sizeof(int)) | ||
342 | goto e_inval; | ||
343 | np->rxopt.bits.rxpmtu = valbool; | ||
344 | retv = 0; | ||
345 | break; | ||
346 | |||
339 | case IPV6_HOPOPTS: | 347 | case IPV6_HOPOPTS: |
340 | case IPV6_RTHDRDSTOPTS: | 348 | case IPV6_RTHDRDSTOPTS: |
341 | case IPV6_RTHDR: | 349 | case IPV6_RTHDR: |
@@ -450,7 +458,8 @@ sticky_done: | |||
450 | msg.msg_controllen = optlen; | 458 | msg.msg_controllen = optlen; |
451 | msg.msg_control = (void*)(opt+1); | 459 | msg.msg_control = (void*)(opt+1); |
452 | 460 | ||
453 | retv = datagram_send_ctl(net, &msg, &fl, opt, &junk, &junk); | 461 | retv = datagram_send_ctl(net, &msg, &fl, opt, &junk, &junk, |
462 | &junk); | ||
454 | if (retv) | 463 | if (retv) |
455 | goto done; | 464 | goto done; |
456 | update: | 465 | update: |
@@ -766,6 +775,17 @@ pref_skip_coa: | |||
766 | 775 | ||
767 | break; | 776 | break; |
768 | } | 777 | } |
778 | case IPV6_MINHOPCOUNT: | ||
779 | if (optlen < sizeof(int)) | ||
780 | goto e_inval; | ||
781 | if (val < 0 || val > 255) | ||
782 | goto e_inval; | ||
783 | np->min_hopcount = val; | ||
784 | break; | ||
785 | case IPV6_DONTFRAG: | ||
786 | np->dontfrag = valbool; | ||
787 | retv = 0; | ||
788 | break; | ||
769 | } | 789 | } |
770 | 790 | ||
771 | release_sock(sk); | 791 | release_sock(sk); |
@@ -970,14 +990,13 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, | |||
970 | case IPV6_MTU: | 990 | case IPV6_MTU: |
971 | { | 991 | { |
972 | struct dst_entry *dst; | 992 | struct dst_entry *dst; |
993 | |||
973 | val = 0; | 994 | val = 0; |
974 | lock_sock(sk); | 995 | rcu_read_lock(); |
975 | dst = sk_dst_get(sk); | 996 | dst = __sk_dst_get(sk); |
976 | if (dst) { | 997 | if (dst) |
977 | val = dst_mtu(dst); | 998 | val = dst_mtu(dst); |
978 | dst_release(dst); | 999 | rcu_read_unlock(); |
979 | } | ||
980 | release_sock(sk); | ||
981 | if (!val) | 1000 | if (!val) |
982 | return -ENOTCONN; | 1001 | return -ENOTCONN; |
983 | break; | 1002 | break; |
@@ -1055,6 +1074,38 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, | |||
1055 | val = np->rxopt.bits.rxflow; | 1074 | val = np->rxopt.bits.rxflow; |
1056 | break; | 1075 | break; |
1057 | 1076 | ||
1077 | case IPV6_RECVPATHMTU: | ||
1078 | val = np->rxopt.bits.rxpmtu; | ||
1079 | break; | ||
1080 | |||
1081 | case IPV6_PATHMTU: | ||
1082 | { | ||
1083 | struct dst_entry *dst; | ||
1084 | struct ip6_mtuinfo mtuinfo; | ||
1085 | |||
1086 | if (len < sizeof(mtuinfo)) | ||
1087 | return -EINVAL; | ||
1088 | |||
1089 | len = sizeof(mtuinfo); | ||
1090 | memset(&mtuinfo, 0, sizeof(mtuinfo)); | ||
1091 | |||
1092 | rcu_read_lock(); | ||
1093 | dst = __sk_dst_get(sk); | ||
1094 | if (dst) | ||
1095 | mtuinfo.ip6m_mtu = dst_mtu(dst); | ||
1096 | rcu_read_unlock(); | ||
1097 | if (!mtuinfo.ip6m_mtu) | ||
1098 | return -ENOTCONN; | ||
1099 | |||
1100 | if (put_user(len, optlen)) | ||
1101 | return -EFAULT; | ||
1102 | if (copy_to_user(optval, &mtuinfo, len)) | ||
1103 | return -EFAULT; | ||
1104 | |||
1105 | return 0; | ||
1106 | break; | ||
1107 | } | ||
1108 | |||
1058 | case IPV6_UNICAST_HOPS: | 1109 | case IPV6_UNICAST_HOPS: |
1059 | case IPV6_MULTICAST_HOPS: | 1110 | case IPV6_MULTICAST_HOPS: |
1060 | { | 1111 | { |
@@ -1065,12 +1116,14 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, | |||
1065 | else | 1116 | else |
1066 | val = np->mcast_hops; | 1117 | val = np->mcast_hops; |
1067 | 1118 | ||
1068 | dst = sk_dst_get(sk); | 1119 | if (val < 0) { |
1069 | if (dst) { | 1120 | rcu_read_lock(); |
1070 | if (val < 0) | 1121 | dst = __sk_dst_get(sk); |
1122 | if (dst) | ||
1071 | val = ip6_dst_hoplimit(dst); | 1123 | val = ip6_dst_hoplimit(dst); |
1072 | dst_release(dst); | 1124 | rcu_read_unlock(); |
1073 | } | 1125 | } |
1126 | |||
1074 | if (val < 0) | 1127 | if (val < 0) |
1075 | val = sock_net(sk)->ipv6.devconf_all->hop_limit; | 1128 | val = sock_net(sk)->ipv6.devconf_all->hop_limit; |
1076 | break; | 1129 | break; |
@@ -1114,6 +1167,14 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, | |||
1114 | val |= IPV6_PREFER_SRC_HOME; | 1167 | val |= IPV6_PREFER_SRC_HOME; |
1115 | break; | 1168 | break; |
1116 | 1169 | ||
1170 | case IPV6_MINHOPCOUNT: | ||
1171 | val = np->min_hopcount; | ||
1172 | break; | ||
1173 | |||
1174 | case IPV6_DONTFRAG: | ||
1175 | val = np->dontfrag; | ||
1176 | break; | ||
1177 | |||
1117 | default: | 1178 | default: |
1118 | return -ENOPROTOOPT; | 1179 | return -ENOPROTOOPT; |
1119 | } | 1180 | } |
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index bcd971915969..006aee683a0f 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c | |||
@@ -43,6 +43,8 @@ | |||
43 | #include <linux/init.h> | 43 | #include <linux/init.h> |
44 | #include <linux/proc_fs.h> | 44 | #include <linux/proc_fs.h> |
45 | #include <linux/seq_file.h> | 45 | #include <linux/seq_file.h> |
46 | #include <linux/slab.h> | ||
47 | #include <net/mld.h> | ||
46 | 48 | ||
47 | #include <linux/netfilter.h> | 49 | #include <linux/netfilter.h> |
48 | #include <linux/netfilter_ipv6.h> | 50 | #include <linux/netfilter_ipv6.h> |
@@ -70,54 +72,11 @@ | |||
70 | #define MDBG(x) | 72 | #define MDBG(x) |
71 | #endif | 73 | #endif |
72 | 74 | ||
73 | /* | 75 | /* Ensure that we have struct in6_addr aligned on 32bit word. */ |
74 | * These header formats should be in a separate include file, but icmpv6.h | 76 | static void *__mld2_query_bugs[] __attribute__((__unused__)) = { |
75 | * doesn't have in6_addr defined in all cases, there is no __u128, and no | 77 | BUILD_BUG_ON_NULL(offsetof(struct mld2_query, mld2q_srcs) % 4), |
76 | * other files reference these. | 78 | BUILD_BUG_ON_NULL(offsetof(struct mld2_report, mld2r_grec) % 4), |
77 | * | 79 | BUILD_BUG_ON_NULL(offsetof(struct mld2_grec, grec_mca) % 4) |
78 | * +-DLS 4/14/03 | ||
79 | */ | ||
80 | |||
81 | /* Multicast Listener Discovery version 2 headers */ | ||
82 | |||
83 | struct mld2_grec { | ||
84 | __u8 grec_type; | ||
85 | __u8 grec_auxwords; | ||
86 | __be16 grec_nsrcs; | ||
87 | struct in6_addr grec_mca; | ||
88 | struct in6_addr grec_src[0]; | ||
89 | }; | ||
90 | |||
91 | struct mld2_report { | ||
92 | __u8 type; | ||
93 | __u8 resv1; | ||
94 | __sum16 csum; | ||
95 | __be16 resv2; | ||
96 | __be16 ngrec; | ||
97 | struct mld2_grec grec[0]; | ||
98 | }; | ||
99 | |||
100 | struct mld2_query { | ||
101 | __u8 type; | ||
102 | __u8 code; | ||
103 | __sum16 csum; | ||
104 | __be16 mrc; | ||
105 | __be16 resv1; | ||
106 | struct in6_addr mca; | ||
107 | #if defined(__LITTLE_ENDIAN_BITFIELD) | ||
108 | __u8 qrv:3, | ||
109 | suppress:1, | ||
110 | resv2:4; | ||
111 | #elif defined(__BIG_ENDIAN_BITFIELD) | ||
112 | __u8 resv2:4, | ||
113 | suppress:1, | ||
114 | qrv:3; | ||
115 | #else | ||
116 | #error "Please fix <asm/byteorder.h>" | ||
117 | #endif | ||
118 | __u8 qqic; | ||
119 | __be16 nsrcs; | ||
120 | struct in6_addr srcs[0]; | ||
121 | }; | 80 | }; |
122 | 81 | ||
123 | static struct in6_addr mld2_all_mcr = MLD2_ALL_MCR_INIT; | 82 | static struct in6_addr mld2_all_mcr = MLD2_ALL_MCR_INIT; |
@@ -156,14 +115,6 @@ static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml, | |||
156 | ((idev)->mc_v1_seen && \ | 115 | ((idev)->mc_v1_seen && \ |
157 | time_before(jiffies, (idev)->mc_v1_seen))) | 116 | time_before(jiffies, (idev)->mc_v1_seen))) |
158 | 117 | ||
159 | #define MLDV2_MASK(value, nb) ((nb)>=32 ? (value) : ((1<<(nb))-1) & (value)) | ||
160 | #define MLDV2_EXP(thresh, nbmant, nbexp, value) \ | ||
161 | ((value) < (thresh) ? (value) : \ | ||
162 | ((MLDV2_MASK(value, nbmant) | (1<<(nbmant))) << \ | ||
163 | (MLDV2_MASK((value) >> (nbmant), nbexp) + (nbexp)))) | ||
164 | |||
165 | #define MLDV2_MRC(value) MLDV2_EXP(0x8000, 12, 3, value) | ||
166 | |||
167 | #define IPV6_MLD_MAX_MSF 64 | 118 | #define IPV6_MLD_MAX_MSF 64 |
168 | 119 | ||
169 | int sysctl_mld_max_msf __read_mostly = IPV6_MLD_MAX_MSF; | 120 | int sysctl_mld_max_msf __read_mostly = IPV6_MLD_MAX_MSF; |
@@ -714,7 +665,7 @@ static void igmp6_group_added(struct ifmcaddr6 *mc) | |||
714 | if (!(mc->mca_flags&MAF_LOADED)) { | 665 | if (!(mc->mca_flags&MAF_LOADED)) { |
715 | mc->mca_flags |= MAF_LOADED; | 666 | mc->mca_flags |= MAF_LOADED; |
716 | if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0) | 667 | if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0) |
717 | dev_mc_add(dev, buf, dev->addr_len, 0); | 668 | dev_mc_add(dev, buf); |
718 | } | 669 | } |
719 | spin_unlock_bh(&mc->mca_lock); | 670 | spin_unlock_bh(&mc->mca_lock); |
720 | 671 | ||
@@ -740,7 +691,7 @@ static void igmp6_group_dropped(struct ifmcaddr6 *mc) | |||
740 | if (mc->mca_flags&MAF_LOADED) { | 691 | if (mc->mca_flags&MAF_LOADED) { |
741 | mc->mca_flags &= ~MAF_LOADED; | 692 | mc->mca_flags &= ~MAF_LOADED; |
742 | if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0) | 693 | if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0) |
743 | dev_mc_delete(dev, buf, dev->addr_len, 0); | 694 | dev_mc_del(dev, buf); |
744 | } | 695 | } |
745 | 696 | ||
746 | if (mc->mca_flags & MAF_NOREPORT) | 697 | if (mc->mca_flags & MAF_NOREPORT) |
@@ -1160,7 +1111,7 @@ int igmp6_event_query(struct sk_buff *skb) | |||
1160 | struct in6_addr *group; | 1111 | struct in6_addr *group; |
1161 | unsigned long max_delay; | 1112 | unsigned long max_delay; |
1162 | struct inet6_dev *idev; | 1113 | struct inet6_dev *idev; |
1163 | struct icmp6hdr *hdr; | 1114 | struct mld_msg *mld; |
1164 | int group_type; | 1115 | int group_type; |
1165 | int mark = 0; | 1116 | int mark = 0; |
1166 | int len; | 1117 | int len; |
@@ -1181,8 +1132,8 @@ int igmp6_event_query(struct sk_buff *skb) | |||
1181 | if (idev == NULL) | 1132 | if (idev == NULL) |
1182 | return 0; | 1133 | return 0; |
1183 | 1134 | ||
1184 | hdr = icmp6_hdr(skb); | 1135 | mld = (struct mld_msg *)icmp6_hdr(skb); |
1185 | group = (struct in6_addr *) (hdr + 1); | 1136 | group = &mld->mld_mca; |
1186 | group_type = ipv6_addr_type(group); | 1137 | group_type = ipv6_addr_type(group); |
1187 | 1138 | ||
1188 | if (group_type != IPV6_ADDR_ANY && | 1139 | if (group_type != IPV6_ADDR_ANY && |
@@ -1196,7 +1147,7 @@ int igmp6_event_query(struct sk_buff *skb) | |||
1196 | /* MLDv1 router present */ | 1147 | /* MLDv1 router present */ |
1197 | 1148 | ||
1198 | /* Translate milliseconds to jiffies */ | 1149 | /* Translate milliseconds to jiffies */ |
1199 | max_delay = (ntohs(hdr->icmp6_maxdelay)*HZ)/1000; | 1150 | max_delay = (ntohs(mld->mld_maxdelay)*HZ)/1000; |
1200 | 1151 | ||
1201 | switchback = (idev->mc_qrv + 1) * max_delay; | 1152 | switchback = (idev->mc_qrv + 1) * max_delay; |
1202 | idev->mc_v1_seen = jiffies + switchback; | 1153 | idev->mc_v1_seen = jiffies + switchback; |
@@ -1215,14 +1166,14 @@ int igmp6_event_query(struct sk_buff *skb) | |||
1215 | return -EINVAL; | 1166 | return -EINVAL; |
1216 | } | 1167 | } |
1217 | mlh2 = (struct mld2_query *)skb_transport_header(skb); | 1168 | mlh2 = (struct mld2_query *)skb_transport_header(skb); |
1218 | max_delay = (MLDV2_MRC(ntohs(mlh2->mrc))*HZ)/1000; | 1169 | max_delay = (MLDV2_MRC(ntohs(mlh2->mld2q_mrc))*HZ)/1000; |
1219 | if (!max_delay) | 1170 | if (!max_delay) |
1220 | max_delay = 1; | 1171 | max_delay = 1; |
1221 | idev->mc_maxdelay = max_delay; | 1172 | idev->mc_maxdelay = max_delay; |
1222 | if (mlh2->qrv) | 1173 | if (mlh2->mld2q_qrv) |
1223 | idev->mc_qrv = mlh2->qrv; | 1174 | idev->mc_qrv = mlh2->mld2q_qrv; |
1224 | if (group_type == IPV6_ADDR_ANY) { /* general query */ | 1175 | if (group_type == IPV6_ADDR_ANY) { /* general query */ |
1225 | if (mlh2->nsrcs) { | 1176 | if (mlh2->mld2q_nsrcs) { |
1226 | in6_dev_put(idev); | 1177 | in6_dev_put(idev); |
1227 | return -EINVAL; /* no sources allowed */ | 1178 | return -EINVAL; /* no sources allowed */ |
1228 | } | 1179 | } |
@@ -1231,9 +1182,9 @@ int igmp6_event_query(struct sk_buff *skb) | |||
1231 | return 0; | 1182 | return 0; |
1232 | } | 1183 | } |
1233 | /* mark sources to include, if group & source-specific */ | 1184 | /* mark sources to include, if group & source-specific */ |
1234 | if (mlh2->nsrcs != 0) { | 1185 | if (mlh2->mld2q_nsrcs != 0) { |
1235 | if (!pskb_may_pull(skb, srcs_offset + | 1186 | if (!pskb_may_pull(skb, srcs_offset + |
1236 | ntohs(mlh2->nsrcs) * sizeof(struct in6_addr))) { | 1187 | ntohs(mlh2->mld2q_nsrcs) * sizeof(struct in6_addr))) { |
1237 | in6_dev_put(idev); | 1188 | in6_dev_put(idev); |
1238 | return -EINVAL; | 1189 | return -EINVAL; |
1239 | } | 1190 | } |
@@ -1269,7 +1220,7 @@ int igmp6_event_query(struct sk_buff *skb) | |||
1269 | ma->mca_flags &= ~MAF_GSQUERY; | 1220 | ma->mca_flags &= ~MAF_GSQUERY; |
1270 | } | 1221 | } |
1271 | if (!(ma->mca_flags & MAF_GSQUERY) || | 1222 | if (!(ma->mca_flags & MAF_GSQUERY) || |
1272 | mld_marksources(ma, ntohs(mlh2->nsrcs), mlh2->srcs)) | 1223 | mld_marksources(ma, ntohs(mlh2->mld2q_nsrcs), mlh2->mld2q_srcs)) |
1273 | igmp6_group_queried(ma, max_delay); | 1224 | igmp6_group_queried(ma, max_delay); |
1274 | spin_unlock_bh(&ma->mca_lock); | 1225 | spin_unlock_bh(&ma->mca_lock); |
1275 | break; | 1226 | break; |
@@ -1285,9 +1236,8 @@ int igmp6_event_query(struct sk_buff *skb) | |||
1285 | int igmp6_event_report(struct sk_buff *skb) | 1236 | int igmp6_event_report(struct sk_buff *skb) |
1286 | { | 1237 | { |
1287 | struct ifmcaddr6 *ma; | 1238 | struct ifmcaddr6 *ma; |
1288 | struct in6_addr *addrp; | ||
1289 | struct inet6_dev *idev; | 1239 | struct inet6_dev *idev; |
1290 | struct icmp6hdr *hdr; | 1240 | struct mld_msg *mld; |
1291 | int addr_type; | 1241 | int addr_type; |
1292 | 1242 | ||
1293 | /* Our own report looped back. Ignore it. */ | 1243 | /* Our own report looped back. Ignore it. */ |
@@ -1299,10 +1249,10 @@ int igmp6_event_report(struct sk_buff *skb) | |||
1299 | skb->pkt_type != PACKET_BROADCAST) | 1249 | skb->pkt_type != PACKET_BROADCAST) |
1300 | return 0; | 1250 | return 0; |
1301 | 1251 | ||
1302 | if (!pskb_may_pull(skb, sizeof(struct in6_addr))) | 1252 | if (!pskb_may_pull(skb, sizeof(*mld) - sizeof(struct icmp6hdr))) |
1303 | return -EINVAL; | 1253 | return -EINVAL; |
1304 | 1254 | ||
1305 | hdr = icmp6_hdr(skb); | 1255 | mld = (struct mld_msg *)icmp6_hdr(skb); |
1306 | 1256 | ||
1307 | /* Drop reports with not link local source */ | 1257 | /* Drop reports with not link local source */ |
1308 | addr_type = ipv6_addr_type(&ipv6_hdr(skb)->saddr); | 1258 | addr_type = ipv6_addr_type(&ipv6_hdr(skb)->saddr); |
@@ -1310,8 +1260,6 @@ int igmp6_event_report(struct sk_buff *skb) | |||
1310 | !(addr_type&IPV6_ADDR_LINKLOCAL)) | 1260 | !(addr_type&IPV6_ADDR_LINKLOCAL)) |
1311 | return -EINVAL; | 1261 | return -EINVAL; |
1312 | 1262 | ||
1313 | addrp = (struct in6_addr *) (hdr + 1); | ||
1314 | |||
1315 | idev = in6_dev_get(skb->dev); | 1263 | idev = in6_dev_get(skb->dev); |
1316 | if (idev == NULL) | 1264 | if (idev == NULL) |
1317 | return -ENODEV; | 1265 | return -ENODEV; |
@@ -1322,7 +1270,7 @@ int igmp6_event_report(struct sk_buff *skb) | |||
1322 | 1270 | ||
1323 | read_lock_bh(&idev->lock); | 1271 | read_lock_bh(&idev->lock); |
1324 | for (ma = idev->mc_list; ma; ma=ma->next) { | 1272 | for (ma = idev->mc_list; ma; ma=ma->next) { |
1325 | if (ipv6_addr_equal(&ma->mca_addr, addrp)) { | 1273 | if (ipv6_addr_equal(&ma->mca_addr, &mld->mld_mca)) { |
1326 | spin_lock(&ma->mca_lock); | 1274 | spin_lock(&ma->mca_lock); |
1327 | if (del_timer(&ma->mca_timer)) | 1275 | if (del_timer(&ma->mca_timer)) |
1328 | atomic_dec(&ma->mca_refcnt); | 1276 | atomic_dec(&ma->mca_refcnt); |
@@ -1431,11 +1379,11 @@ static struct sk_buff *mld_newpack(struct net_device *dev, int size) | |||
1431 | skb_set_transport_header(skb, skb_tail_pointer(skb) - skb->data); | 1379 | skb_set_transport_header(skb, skb_tail_pointer(skb) - skb->data); |
1432 | skb_put(skb, sizeof(*pmr)); | 1380 | skb_put(skb, sizeof(*pmr)); |
1433 | pmr = (struct mld2_report *)skb_transport_header(skb); | 1381 | pmr = (struct mld2_report *)skb_transport_header(skb); |
1434 | pmr->type = ICMPV6_MLD2_REPORT; | 1382 | pmr->mld2r_type = ICMPV6_MLD2_REPORT; |
1435 | pmr->resv1 = 0; | 1383 | pmr->mld2r_resv1 = 0; |
1436 | pmr->csum = 0; | 1384 | pmr->mld2r_cksum = 0; |
1437 | pmr->resv2 = 0; | 1385 | pmr->mld2r_resv2 = 0; |
1438 | pmr->ngrec = 0; | 1386 | pmr->mld2r_ngrec = 0; |
1439 | return skb; | 1387 | return skb; |
1440 | } | 1388 | } |
1441 | 1389 | ||
@@ -1457,9 +1405,10 @@ static void mld_sendpack(struct sk_buff *skb) | |||
1457 | mldlen = skb->tail - skb->transport_header; | 1405 | mldlen = skb->tail - skb->transport_header; |
1458 | pip6->payload_len = htons(payload_len); | 1406 | pip6->payload_len = htons(payload_len); |
1459 | 1407 | ||
1460 | pmr->csum = csum_ipv6_magic(&pip6->saddr, &pip6->daddr, mldlen, | 1408 | pmr->mld2r_cksum = csum_ipv6_magic(&pip6->saddr, &pip6->daddr, mldlen, |
1461 | IPPROTO_ICMPV6, csum_partial(skb_transport_header(skb), | 1409 | IPPROTO_ICMPV6, |
1462 | mldlen, 0)); | 1410 | csum_partial(skb_transport_header(skb), |
1411 | mldlen, 0)); | ||
1463 | 1412 | ||
1464 | dst = icmp6_dst_alloc(skb->dev, NULL, &ipv6_hdr(skb)->daddr); | 1413 | dst = icmp6_dst_alloc(skb->dev, NULL, &ipv6_hdr(skb)->daddr); |
1465 | 1414 | ||
@@ -1520,7 +1469,7 @@ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc, | |||
1520 | pgr->grec_nsrcs = 0; | 1469 | pgr->grec_nsrcs = 0; |
1521 | pgr->grec_mca = pmc->mca_addr; /* structure copy */ | 1470 | pgr->grec_mca = pmc->mca_addr; /* structure copy */ |
1522 | pmr = (struct mld2_report *)skb_transport_header(skb); | 1471 | pmr = (struct mld2_report *)skb_transport_header(skb); |
1523 | pmr->ngrec = htons(ntohs(pmr->ngrec)+1); | 1472 | pmr->mld2r_ngrec = htons(ntohs(pmr->mld2r_ngrec)+1); |
1524 | *ppgr = pgr; | 1473 | *ppgr = pgr; |
1525 | return skb; | 1474 | return skb; |
1526 | } | 1475 | } |
@@ -1556,7 +1505,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc, | |||
1556 | 1505 | ||
1557 | /* EX and TO_EX get a fresh packet, if needed */ | 1506 | /* EX and TO_EX get a fresh packet, if needed */ |
1558 | if (truncate) { | 1507 | if (truncate) { |
1559 | if (pmr && pmr->ngrec && | 1508 | if (pmr && pmr->mld2r_ngrec && |
1560 | AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) { | 1509 | AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) { |
1561 | if (skb) | 1510 | if (skb) |
1562 | mld_sendpack(skb); | 1511 | mld_sendpack(skb); |
@@ -1769,9 +1718,8 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type) | |||
1769 | struct sock *sk = net->ipv6.igmp_sk; | 1718 | struct sock *sk = net->ipv6.igmp_sk; |
1770 | struct inet6_dev *idev; | 1719 | struct inet6_dev *idev; |
1771 | struct sk_buff *skb; | 1720 | struct sk_buff *skb; |
1772 | struct icmp6hdr *hdr; | 1721 | struct mld_msg *hdr; |
1773 | const struct in6_addr *snd_addr, *saddr; | 1722 | const struct in6_addr *snd_addr, *saddr; |
1774 | struct in6_addr *addrp; | ||
1775 | struct in6_addr addr_buf; | 1723 | struct in6_addr addr_buf; |
1776 | int err, len, payload_len, full_len; | 1724 | int err, len, payload_len, full_len; |
1777 | u8 ra[8] = { IPPROTO_ICMPV6, 0, | 1725 | u8 ra[8] = { IPPROTO_ICMPV6, 0, |
@@ -1819,16 +1767,14 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type) | |||
1819 | 1767 | ||
1820 | memcpy(skb_put(skb, sizeof(ra)), ra, sizeof(ra)); | 1768 | memcpy(skb_put(skb, sizeof(ra)), ra, sizeof(ra)); |
1821 | 1769 | ||
1822 | hdr = (struct icmp6hdr *) skb_put(skb, sizeof(struct icmp6hdr)); | 1770 | hdr = (struct mld_msg *) skb_put(skb, sizeof(struct mld_msg)); |
1823 | memset(hdr, 0, sizeof(struct icmp6hdr)); | 1771 | memset(hdr, 0, sizeof(struct mld_msg)); |
1824 | hdr->icmp6_type = type; | 1772 | hdr->mld_type = type; |
1773 | ipv6_addr_copy(&hdr->mld_mca, addr); | ||
1825 | 1774 | ||
1826 | addrp = (struct in6_addr *) skb_put(skb, sizeof(struct in6_addr)); | 1775 | hdr->mld_cksum = csum_ipv6_magic(saddr, snd_addr, len, |
1827 | ipv6_addr_copy(addrp, addr); | 1776 | IPPROTO_ICMPV6, |
1828 | 1777 | csum_partial(hdr, len, 0)); | |
1829 | hdr->icmp6_cksum = csum_ipv6_magic(saddr, snd_addr, len, | ||
1830 | IPPROTO_ICMPV6, | ||
1831 | csum_partial(hdr, len, 0)); | ||
1832 | 1778 | ||
1833 | idev = in6_dev_get(skb->dev); | 1779 | idev = in6_dev_get(skb->dev); |
1834 | 1780 | ||
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 8bcc4b7db3bf..da0a4d2adc69 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c | |||
@@ -59,6 +59,7 @@ | |||
59 | #include <linux/route.h> | 59 | #include <linux/route.h> |
60 | #include <linux/init.h> | 60 | #include <linux/init.h> |
61 | #include <linux/rcupdate.h> | 61 | #include <linux/rcupdate.h> |
62 | #include <linux/slab.h> | ||
62 | #ifdef CONFIG_SYSCTL | 63 | #ifdef CONFIG_SYSCTL |
63 | #include <linux/sysctl.h> | 64 | #include <linux/sysctl.h> |
64 | #endif | 65 | #endif |
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c index 7854052be60b..6a68a74d14a3 100644 --- a/net/ipv6/netfilter/ip6_queue.c +++ b/net/ipv6/netfilter/ip6_queue.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/proc_fs.h> | 25 | #include <linux/proc_fs.h> |
26 | #include <linux/seq_file.h> | 26 | #include <linux/seq_file.h> |
27 | #include <linux/mutex.h> | 27 | #include <linux/mutex.h> |
28 | #include <linux/slab.h> | ||
28 | #include <net/net_namespace.h> | 29 | #include <net/net_namespace.h> |
29 | #include <net/sock.h> | 30 | #include <net/sock.h> |
30 | #include <net/ipv6.h> | 31 | #include <net/ipv6.h> |
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c index dd8afbaf00a8..39b50c3768e8 100644 --- a/net/ipv6/netfilter/ip6t_REJECT.c +++ b/net/ipv6/netfilter/ip6t_REJECT.c | |||
@@ -15,6 +15,7 @@ | |||
15 | * 2 of the License, or (at your option) any later version. | 15 | * 2 of the License, or (at your option) any later version. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include <linux/gfp.h> | ||
18 | #include <linux/module.h> | 19 | #include <linux/module.h> |
19 | #include <linux/skbuff.h> | 20 | #include <linux/skbuff.h> |
20 | #include <linux/icmpv6.h> | 21 | #include <linux/icmpv6.h> |
diff --git a/net/ipv6/netfilter/ip6t_hbh.c b/net/ipv6/netfilter/ip6t_hbh.c index cbe8dec9744b..e60677519e40 100644 --- a/net/ipv6/netfilter/ip6t_hbh.c +++ b/net/ipv6/netfilter/ip6t_hbh.c | |||
@@ -141,11 +141,11 @@ hbh_mt6(const struct sk_buff *skb, const struct xt_match_param *par) | |||
141 | } | 141 | } |
142 | 142 | ||
143 | /* Step to the next */ | 143 | /* Step to the next */ |
144 | pr_debug("len%04X \n", optlen); | 144 | pr_debug("len%04X\n", optlen); |
145 | 145 | ||
146 | if ((ptr > skb->len - optlen || hdrlen < optlen) && | 146 | if ((ptr > skb->len - optlen || hdrlen < optlen) && |
147 | temp < optinfo->optsnr - 1) { | 147 | temp < optinfo->optsnr - 1) { |
148 | pr_debug("new pointer is too large! \n"); | 148 | pr_debug("new pointer is too large!\n"); |
149 | break; | 149 | break; |
150 | } | 150 | } |
151 | ptr += optlen; | 151 | ptr += optlen; |
diff --git a/net/ipv6/netfilter/ip6table_filter.c b/net/ipv6/netfilter/ip6table_filter.c index 36b72cafc227..d6fc9aff3163 100644 --- a/net/ipv6/netfilter/ip6table_filter.c +++ b/net/ipv6/netfilter/ip6table_filter.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/moduleparam.h> | 13 | #include <linux/moduleparam.h> |
14 | #include <linux/netfilter_ipv6/ip6_tables.h> | 14 | #include <linux/netfilter_ipv6/ip6_tables.h> |
15 | #include <linux/slab.h> | ||
15 | 16 | ||
16 | MODULE_LICENSE("GPL"); | 17 | MODULE_LICENSE("GPL"); |
17 | MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); | 18 | MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); |
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c index 7844e557c0ec..6a102b57f356 100644 --- a/net/ipv6/netfilter/ip6table_mangle.c +++ b/net/ipv6/netfilter/ip6table_mangle.c | |||
@@ -10,6 +10,7 @@ | |||
10 | */ | 10 | */ |
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/netfilter_ipv6/ip6_tables.h> | 12 | #include <linux/netfilter_ipv6/ip6_tables.h> |
13 | #include <linux/slab.h> | ||
13 | 14 | ||
14 | MODULE_LICENSE("GPL"); | 15 | MODULE_LICENSE("GPL"); |
15 | MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); | 16 | MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); |
diff --git a/net/ipv6/netfilter/ip6table_raw.c b/net/ipv6/netfilter/ip6table_raw.c index aef31a29de9e..5b9926a011bd 100644 --- a/net/ipv6/netfilter/ip6table_raw.c +++ b/net/ipv6/netfilter/ip6table_raw.c | |||
@@ -5,6 +5,7 @@ | |||
5 | */ | 5 | */ |
6 | #include <linux/module.h> | 6 | #include <linux/module.h> |
7 | #include <linux/netfilter_ipv6/ip6_tables.h> | 7 | #include <linux/netfilter_ipv6/ip6_tables.h> |
8 | #include <linux/slab.h> | ||
8 | 9 | ||
9 | #define RAW_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT)) | 10 | #define RAW_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT)) |
10 | 11 | ||
@@ -13,7 +14,7 @@ static const struct xt_table packet_raw = { | |||
13 | .valid_hooks = RAW_VALID_HOOKS, | 14 | .valid_hooks = RAW_VALID_HOOKS, |
14 | .me = THIS_MODULE, | 15 | .me = THIS_MODULE, |
15 | .af = NFPROTO_IPV6, | 16 | .af = NFPROTO_IPV6, |
16 | .priority = NF_IP6_PRI_FIRST, | 17 | .priority = NF_IP6_PRI_RAW, |
17 | }; | 18 | }; |
18 | 19 | ||
19 | /* The work comes in here from netfilter.c. */ | 20 | /* The work comes in here from netfilter.c. */ |
diff --git a/net/ipv6/netfilter/ip6table_security.c b/net/ipv6/netfilter/ip6table_security.c index 0824d865aa9b..91aa2b4d83c9 100644 --- a/net/ipv6/netfilter/ip6table_security.c +++ b/net/ipv6/netfilter/ip6table_security.c | |||
@@ -17,6 +17,7 @@ | |||
17 | */ | 17 | */ |
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/netfilter_ipv6/ip6_tables.h> | 19 | #include <linux/netfilter_ipv6/ip6_tables.h> |
20 | #include <linux/slab.h> | ||
20 | 21 | ||
21 | MODULE_LICENSE("GPL"); | 22 | MODULE_LICENSE("GPL"); |
22 | MODULE_AUTHOR("James Morris <jmorris <at> redhat.com>"); | 23 | MODULE_AUTHOR("James Morris <jmorris <at> redhat.com>"); |
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index f1171b744650..dd5b9bd61c62 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/ipv6.h> | 27 | #include <linux/ipv6.h> |
28 | #include <linux/icmpv6.h> | 28 | #include <linux/icmpv6.h> |
29 | #include <linux/random.h> | 29 | #include <linux/random.h> |
30 | #include <linux/slab.h> | ||
30 | 31 | ||
31 | #include <net/sock.h> | 32 | #include <net/sock.h> |
32 | #include <net/snmp.h> | 33 | #include <net/snmp.h> |
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c index 58344c0fbd13..458eabfbe130 100644 --- a/net/ipv6/proc.c +++ b/net/ipv6/proc.c | |||
@@ -97,6 +97,7 @@ static const struct snmp_mib snmp6_icmp6_list[] = { | |||
97 | SNMP_MIB_ITEM("Icmp6InMsgs", ICMP6_MIB_INMSGS), | 97 | SNMP_MIB_ITEM("Icmp6InMsgs", ICMP6_MIB_INMSGS), |
98 | SNMP_MIB_ITEM("Icmp6InErrors", ICMP6_MIB_INERRORS), | 98 | SNMP_MIB_ITEM("Icmp6InErrors", ICMP6_MIB_INERRORS), |
99 | SNMP_MIB_ITEM("Icmp6OutMsgs", ICMP6_MIB_OUTMSGS), | 99 | SNMP_MIB_ITEM("Icmp6OutMsgs", ICMP6_MIB_OUTMSGS), |
100 | SNMP_MIB_ITEM("Icmp6OutErrors", ICMP6_MIB_OUTERRORS), | ||
100 | SNMP_MIB_SENTINEL | 101 | SNMP_MIB_SENTINEL |
101 | }; | 102 | }; |
102 | 103 | ||
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index ed31c37c6e39..0e3d2dd92078 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/errno.h> | 21 | #include <linux/errno.h> |
22 | #include <linux/types.h> | 22 | #include <linux/types.h> |
23 | #include <linux/socket.h> | 23 | #include <linux/socket.h> |
24 | #include <linux/slab.h> | ||
24 | #include <linux/sockios.h> | 25 | #include <linux/sockios.h> |
25 | #include <linux/net.h> | 26 | #include <linux/net.h> |
26 | #include <linux/in6.h> | 27 | #include <linux/in6.h> |
@@ -380,7 +381,7 @@ static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb) | |||
380 | } | 381 | } |
381 | 382 | ||
382 | /* Charge it to the socket. */ | 383 | /* Charge it to the socket. */ |
383 | if (sock_queue_rcv_skb(sk, skb) < 0) { | 384 | if (ip_queue_rcv_skb(sk, skb) < 0) { |
384 | kfree_skb(skb); | 385 | kfree_skb(skb); |
385 | return NET_RX_DROP; | 386 | return NET_RX_DROP; |
386 | } | 387 | } |
@@ -460,6 +461,9 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk, | |||
460 | if (flags & MSG_ERRQUEUE) | 461 | if (flags & MSG_ERRQUEUE) |
461 | return ipv6_recv_error(sk, msg, len); | 462 | return ipv6_recv_error(sk, msg, len); |
462 | 463 | ||
464 | if (np->rxpmtu && np->rxopt.bits.rxpmtu) | ||
465 | return ipv6_recv_rxpmtu(sk, msg, len); | ||
466 | |||
463 | skb = skb_recv_datagram(sk, flags, noblock, &err); | 467 | skb = skb_recv_datagram(sk, flags, noblock, &err); |
464 | if (!skb) | 468 | if (!skb) |
465 | goto out; | 469 | goto out; |
@@ -732,6 +736,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk, | |||
732 | int addr_len = msg->msg_namelen; | 736 | int addr_len = msg->msg_namelen; |
733 | int hlimit = -1; | 737 | int hlimit = -1; |
734 | int tclass = -1; | 738 | int tclass = -1; |
739 | int dontfrag = -1; | ||
735 | u16 proto; | 740 | u16 proto; |
736 | int err; | 741 | int err; |
737 | 742 | ||
@@ -810,7 +815,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk, | |||
810 | memset(opt, 0, sizeof(struct ipv6_txoptions)); | 815 | memset(opt, 0, sizeof(struct ipv6_txoptions)); |
811 | opt->tot_len = sizeof(struct ipv6_txoptions); | 816 | opt->tot_len = sizeof(struct ipv6_txoptions); |
812 | 817 | ||
813 | err = datagram_send_ctl(sock_net(sk), msg, &fl, opt, &hlimit, &tclass); | 818 | err = datagram_send_ctl(sock_net(sk), msg, &fl, opt, &hlimit, |
819 | &tclass, &dontfrag); | ||
814 | if (err < 0) { | 820 | if (err < 0) { |
815 | fl6_sock_release(flowlabel); | 821 | fl6_sock_release(flowlabel); |
816 | return err; | 822 | return err; |
@@ -879,6 +885,9 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk, | |||
879 | if (tclass < 0) | 885 | if (tclass < 0) |
880 | tclass = np->tclass; | 886 | tclass = np->tclass; |
881 | 887 | ||
888 | if (dontfrag < 0) | ||
889 | dontfrag = np->dontfrag; | ||
890 | |||
882 | if (msg->msg_flags&MSG_CONFIRM) | 891 | if (msg->msg_flags&MSG_CONFIRM) |
883 | goto do_confirm; | 892 | goto do_confirm; |
884 | 893 | ||
@@ -889,7 +898,7 @@ back_from_confirm: | |||
889 | lock_sock(sk); | 898 | lock_sock(sk); |
890 | err = ip6_append_data(sk, ip_generic_getfrag, msg->msg_iov, | 899 | err = ip6_append_data(sk, ip_generic_getfrag, msg->msg_iov, |
891 | len, 0, hlimit, tclass, opt, &fl, (struct rt6_info*)dst, | 900 | len, 0, hlimit, tclass, opt, &fl, (struct rt6_info*)dst, |
892 | msg->msg_flags); | 901 | msg->msg_flags, dontfrag); |
893 | 902 | ||
894 | if (err) | 903 | if (err) |
895 | ip6_flush_pending_frames(sk); | 904 | ip6_flush_pending_frames(sk); |
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index a555156e9779..6d4292ff5854 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <linux/random.h> | 41 | #include <linux/random.h> |
42 | #include <linux/jhash.h> | 42 | #include <linux/jhash.h> |
43 | #include <linux/skbuff.h> | 43 | #include <linux/skbuff.h> |
44 | #include <linux/slab.h> | ||
44 | 45 | ||
45 | #include <net/sock.h> | 46 | #include <net/sock.h> |
46 | #include <net/snmp.h> | 47 | #include <net/snmp.h> |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 52cd3eff31dc..05ebd7833043 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <linux/proc_fs.h> | 40 | #include <linux/proc_fs.h> |
41 | #include <linux/seq_file.h> | 41 | #include <linux/seq_file.h> |
42 | #include <linux/nsproxy.h> | 42 | #include <linux/nsproxy.h> |
43 | #include <linux/slab.h> | ||
43 | #include <net/net_namespace.h> | 44 | #include <net/net_namespace.h> |
44 | #include <net/snmp.h> | 45 | #include <net/snmp.h> |
45 | #include <net/ipv6.h> | 46 | #include <net/ipv6.h> |
@@ -814,7 +815,7 @@ struct dst_entry * ip6_route_output(struct net *net, struct sock *sk, | |||
814 | { | 815 | { |
815 | int flags = 0; | 816 | int flags = 0; |
816 | 817 | ||
817 | if (rt6_need_strict(&fl->fl6_dst)) | 818 | if (fl->oif || rt6_need_strict(&fl->fl6_dst)) |
818 | flags |= RT6_LOOKUP_F_IFACE; | 819 | flags |= RT6_LOOKUP_F_IFACE; |
819 | 820 | ||
820 | if (!ipv6_addr_any(&fl->fl6_src)) | 821 | if (!ipv6_addr_any(&fl->fl6_src)) |
@@ -879,7 +880,7 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie) | |||
879 | 880 | ||
880 | rt = (struct rt6_info *) dst; | 881 | rt = (struct rt6_info *) dst; |
881 | 882 | ||
882 | if (rt && rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie)) | 883 | if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie)) |
883 | return dst; | 884 | return dst; |
884 | 885 | ||
885 | return NULL; | 886 | return NULL; |
@@ -890,12 +891,17 @@ static struct dst_entry *ip6_negative_advice(struct dst_entry *dst) | |||
890 | struct rt6_info *rt = (struct rt6_info *) dst; | 891 | struct rt6_info *rt = (struct rt6_info *) dst; |
891 | 892 | ||
892 | if (rt) { | 893 | if (rt) { |
893 | if (rt->rt6i_flags & RTF_CACHE) | 894 | if (rt->rt6i_flags & RTF_CACHE) { |
894 | ip6_del_rt(rt); | 895 | if (rt6_check_expired(rt)) { |
895 | else | 896 | ip6_del_rt(rt); |
897 | dst = NULL; | ||
898 | } | ||
899 | } else { | ||
896 | dst_release(dst); | 900 | dst_release(dst); |
901 | dst = NULL; | ||
902 | } | ||
897 | } | 903 | } |
898 | return NULL; | 904 | return dst; |
899 | } | 905 | } |
900 | 906 | ||
901 | static void ip6_link_failure(struct sk_buff *skb) | 907 | static void ip6_link_failure(struct sk_buff *skb) |
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index b1eea811be48..5abae10cd884 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/netdevice.h> | 28 | #include <linux/netdevice.h> |
29 | #include <linux/if_arp.h> | 29 | #include <linux/if_arp.h> |
30 | #include <linux/icmp.h> | 30 | #include <linux/icmp.h> |
31 | #include <linux/slab.h> | ||
31 | #include <asm/uaccess.h> | 32 | #include <asm/uaccess.h> |
32 | #include <linux/init.h> | 33 | #include <linux/init.h> |
33 | #include <linux/netfilter_ipv4.h> | 34 | #include <linux/netfilter_ipv4.h> |
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c index f841d93bf987..fa1d8f4e0051 100644 --- a/net/ipv6/sysctl_net_ipv6.c +++ b/net/ipv6/sysctl_net_ipv6.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/sysctl.h> | 9 | #include <linux/sysctl.h> |
10 | #include <linux/in6.h> | 10 | #include <linux/in6.h> |
11 | #include <linux/ipv6.h> | 11 | #include <linux/ipv6.h> |
12 | #include <linux/slab.h> | ||
12 | #include <net/ndisc.h> | 13 | #include <net/ndisc.h> |
13 | #include <net/ipv6.h> | 14 | #include <net/ipv6.h> |
14 | #include <net/addrconf.h> | 15 | #include <net/addrconf.h> |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 9b6dbba80d31..6603511e3673 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/jhash.h> | 38 | #include <linux/jhash.h> |
39 | #include <linux/ipsec.h> | 39 | #include <linux/ipsec.h> |
40 | #include <linux/times.h> | 40 | #include <linux/times.h> |
41 | #include <linux/slab.h> | ||
41 | 42 | ||
42 | #include <linux/ipv6.h> | 43 | #include <linux/ipv6.h> |
43 | #include <linux/icmpv6.h> | 44 | #include <linux/icmpv6.h> |
@@ -74,6 +75,9 @@ static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, | |||
74 | struct request_sock *req); | 75 | struct request_sock *req); |
75 | 76 | ||
76 | static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); | 77 | static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); |
78 | static void __tcp_v6_send_check(struct sk_buff *skb, | ||
79 | struct in6_addr *saddr, | ||
80 | struct in6_addr *daddr); | ||
77 | 81 | ||
78 | static const struct inet_connection_sock_af_ops ipv6_mapped; | 82 | static const struct inet_connection_sock_af_ops ipv6_mapped; |
79 | static const struct inet_connection_sock_af_ops ipv6_specific; | 83 | static const struct inet_connection_sock_af_ops ipv6_specific; |
@@ -349,6 +353,11 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
349 | if (sk->sk_state == TCP_CLOSE) | 353 | if (sk->sk_state == TCP_CLOSE) |
350 | goto out; | 354 | goto out; |
351 | 355 | ||
356 | if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) { | ||
357 | NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); | ||
358 | goto out; | ||
359 | } | ||
360 | |||
352 | tp = tcp_sk(sk); | 361 | tp = tcp_sk(sk); |
353 | seq = ntohl(th->seq); | 362 | seq = ntohl(th->seq); |
354 | if (sk->sk_state != TCP_LISTEN && | 363 | if (sk->sk_state != TCP_LISTEN && |
@@ -502,14 +511,10 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req, | |||
502 | 511 | ||
503 | skb = tcp_make_synack(sk, dst, req, rvp); | 512 | skb = tcp_make_synack(sk, dst, req, rvp); |
504 | if (skb) { | 513 | if (skb) { |
505 | struct tcphdr *th = tcp_hdr(skb); | 514 | __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr); |
506 | |||
507 | th->check = tcp_v6_check(skb->len, | ||
508 | &treq->loc_addr, &treq->rmt_addr, | ||
509 | csum_partial(th, skb->len, skb->csum)); | ||
510 | 515 | ||
511 | ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr); | 516 | ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr); |
512 | err = ip6_xmit(sk, skb, &fl, opt, 0); | 517 | err = ip6_xmit(sk, skb, &fl, opt); |
513 | err = net_xmit_eval(err); | 518 | err = net_xmit_eval(err); |
514 | } | 519 | } |
515 | 520 | ||
@@ -917,22 +922,29 @@ static struct timewait_sock_ops tcp6_timewait_sock_ops = { | |||
917 | .twsk_destructor= tcp_twsk_destructor, | 922 | .twsk_destructor= tcp_twsk_destructor, |
918 | }; | 923 | }; |
919 | 924 | ||
920 | static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb) | 925 | static void __tcp_v6_send_check(struct sk_buff *skb, |
926 | struct in6_addr *saddr, struct in6_addr *daddr) | ||
921 | { | 927 | { |
922 | struct ipv6_pinfo *np = inet6_sk(sk); | ||
923 | struct tcphdr *th = tcp_hdr(skb); | 928 | struct tcphdr *th = tcp_hdr(skb); |
924 | 929 | ||
925 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 930 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
926 | th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0); | 931 | th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0); |
927 | skb->csum_start = skb_transport_header(skb) - skb->head; | 932 | skb->csum_start = skb_transport_header(skb) - skb->head; |
928 | skb->csum_offset = offsetof(struct tcphdr, check); | 933 | skb->csum_offset = offsetof(struct tcphdr, check); |
929 | } else { | 934 | } else { |
930 | th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, | 935 | th->check = tcp_v6_check(skb->len, saddr, daddr, |
931 | csum_partial(th, th->doff<<2, | 936 | csum_partial(th, th->doff << 2, |
932 | skb->csum)); | 937 | skb->csum)); |
933 | } | 938 | } |
934 | } | 939 | } |
935 | 940 | ||
941 | static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb) | ||
942 | { | ||
943 | struct ipv6_pinfo *np = inet6_sk(sk); | ||
944 | |||
945 | __tcp_v6_send_check(skb, &np->saddr, &np->daddr); | ||
946 | } | ||
947 | |||
936 | static int tcp_v6_gso_send_check(struct sk_buff *skb) | 948 | static int tcp_v6_gso_send_check(struct sk_buff *skb) |
937 | { | 949 | { |
938 | struct ipv6hdr *ipv6h; | 950 | struct ipv6hdr *ipv6h; |
@@ -945,11 +957,8 @@ static int tcp_v6_gso_send_check(struct sk_buff *skb) | |||
945 | th = tcp_hdr(skb); | 957 | th = tcp_hdr(skb); |
946 | 958 | ||
947 | th->check = 0; | 959 | th->check = 0; |
948 | th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len, | ||
949 | IPPROTO_TCP, 0); | ||
950 | skb->csum_start = skb_transport_header(skb) - skb->head; | ||
951 | skb->csum_offset = offsetof(struct tcphdr, check); | ||
952 | skb->ip_summed = CHECKSUM_PARTIAL; | 960 | skb->ip_summed = CHECKSUM_PARTIAL; |
961 | __tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr); | ||
953 | return 0; | 962 | return 0; |
954 | } | 963 | } |
955 | 964 | ||
@@ -1014,7 +1023,7 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, | |||
1014 | skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len); | 1023 | skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len); |
1015 | 1024 | ||
1016 | t1 = (struct tcphdr *) skb_push(buff, tot_len); | 1025 | t1 = (struct tcphdr *) skb_push(buff, tot_len); |
1017 | skb_reset_transport_header(skb); | 1026 | skb_reset_transport_header(buff); |
1018 | 1027 | ||
1019 | /* Swap the send and the receive. */ | 1028 | /* Swap the send and the receive. */ |
1020 | memset(t1, 0, sizeof(*t1)); | 1029 | memset(t1, 0, sizeof(*t1)); |
@@ -1046,15 +1055,14 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, | |||
1046 | } | 1055 | } |
1047 | #endif | 1056 | #endif |
1048 | 1057 | ||
1049 | buff->csum = csum_partial(t1, tot_len, 0); | ||
1050 | |||
1051 | memset(&fl, 0, sizeof(fl)); | 1058 | memset(&fl, 0, sizeof(fl)); |
1052 | ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr); | 1059 | ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr); |
1053 | ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr); | 1060 | ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr); |
1054 | 1061 | ||
1055 | t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst, | 1062 | buff->ip_summed = CHECKSUM_PARTIAL; |
1056 | tot_len, IPPROTO_TCP, | 1063 | buff->csum = 0; |
1057 | buff->csum); | 1064 | |
1065 | __tcp_v6_send_check(buff, &fl.fl6_src, &fl.fl6_dst); | ||
1058 | 1066 | ||
1059 | fl.proto = IPPROTO_TCP; | 1067 | fl.proto = IPPROTO_TCP; |
1060 | fl.oif = inet6_iif(skb); | 1068 | fl.oif = inet6_iif(skb); |
@@ -1069,7 +1077,7 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, | |||
1069 | if (!ip6_dst_lookup(ctl_sk, &dst, &fl)) { | 1077 | if (!ip6_dst_lookup(ctl_sk, &dst, &fl)) { |
1070 | if (xfrm_lookup(net, &dst, &fl, NULL, 0) >= 0) { | 1078 | if (xfrm_lookup(net, &dst, &fl, NULL, 0) >= 0) { |
1071 | skb_dst_set(buff, dst); | 1079 | skb_dst_set(buff, dst); |
1072 | ip6_xmit(ctl_sk, buff, &fl, NULL, 0); | 1080 | ip6_xmit(ctl_sk, buff, &fl, NULL); |
1073 | TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); | 1081 | TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); |
1074 | if (rst) | 1082 | if (rst) |
1075 | TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS); | 1083 | TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS); |
@@ -1232,12 +1240,12 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1232 | goto drop_and_free; | 1240 | goto drop_and_free; |
1233 | 1241 | ||
1234 | /* Secret recipe starts with IP addresses */ | 1242 | /* Secret recipe starts with IP addresses */ |
1235 | d = &ipv6_hdr(skb)->daddr.s6_addr32[0]; | 1243 | d = (__force u32 *)&ipv6_hdr(skb)->daddr.s6_addr32[0]; |
1236 | *mess++ ^= *d++; | 1244 | *mess++ ^= *d++; |
1237 | *mess++ ^= *d++; | 1245 | *mess++ ^= *d++; |
1238 | *mess++ ^= *d++; | 1246 | *mess++ ^= *d++; |
1239 | *mess++ ^= *d++; | 1247 | *mess++ ^= *d++; |
1240 | d = &ipv6_hdr(skb)->saddr.s6_addr32[0]; | 1248 | d = (__force u32 *)&ipv6_hdr(skb)->saddr.s6_addr32[0]; |
1241 | *mess++ ^= *d++; | 1249 | *mess++ ^= *d++; |
1242 | *mess++ ^= *d++; | 1250 | *mess++ ^= *d++; |
1243 | *mess++ ^= *d++; | 1251 | *mess++ ^= *d++; |
@@ -1675,6 +1683,7 @@ ipv6_pktoptions: | |||
1675 | static int tcp_v6_rcv(struct sk_buff *skb) | 1683 | static int tcp_v6_rcv(struct sk_buff *skb) |
1676 | { | 1684 | { |
1677 | struct tcphdr *th; | 1685 | struct tcphdr *th; |
1686 | struct ipv6hdr *hdr; | ||
1678 | struct sock *sk; | 1687 | struct sock *sk; |
1679 | int ret; | 1688 | int ret; |
1680 | struct net *net = dev_net(skb->dev); | 1689 | struct net *net = dev_net(skb->dev); |
@@ -1701,12 +1710,13 @@ static int tcp_v6_rcv(struct sk_buff *skb) | |||
1701 | goto bad_packet; | 1710 | goto bad_packet; |
1702 | 1711 | ||
1703 | th = tcp_hdr(skb); | 1712 | th = tcp_hdr(skb); |
1713 | hdr = ipv6_hdr(skb); | ||
1704 | TCP_SKB_CB(skb)->seq = ntohl(th->seq); | 1714 | TCP_SKB_CB(skb)->seq = ntohl(th->seq); |
1705 | TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + | 1715 | TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + |
1706 | skb->len - th->doff*4); | 1716 | skb->len - th->doff*4); |
1707 | TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); | 1717 | TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); |
1708 | TCP_SKB_CB(skb)->when = 0; | 1718 | TCP_SKB_CB(skb)->when = 0; |
1709 | TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(ipv6_hdr(skb)); | 1719 | TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(hdr); |
1710 | TCP_SKB_CB(skb)->sacked = 0; | 1720 | TCP_SKB_CB(skb)->sacked = 0; |
1711 | 1721 | ||
1712 | sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest); | 1722 | sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest); |
@@ -1717,6 +1727,11 @@ process: | |||
1717 | if (sk->sk_state == TCP_TIME_WAIT) | 1727 | if (sk->sk_state == TCP_TIME_WAIT) |
1718 | goto do_time_wait; | 1728 | goto do_time_wait; |
1719 | 1729 | ||
1730 | if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) { | ||
1731 | NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); | ||
1732 | goto discard_and_relse; | ||
1733 | } | ||
1734 | |||
1720 | if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) | 1735 | if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) |
1721 | goto discard_and_relse; | 1736 | goto discard_and_relse; |
1722 | 1737 | ||
diff --git a/net/ipv6/tunnel6.c b/net/ipv6/tunnel6.c index e17bc1dfc1a4..fc3c86a47452 100644 --- a/net/ipv6/tunnel6.c +++ b/net/ipv6/tunnel6.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/mutex.h> | 25 | #include <linux/mutex.h> |
26 | #include <linux/netdevice.h> | 26 | #include <linux/netdevice.h> |
27 | #include <linux/skbuff.h> | 27 | #include <linux/skbuff.h> |
28 | #include <linux/slab.h> | ||
28 | #include <net/ipv6.h> | 29 | #include <net/ipv6.h> |
29 | #include <net/protocol.h> | 30 | #include <net/protocol.h> |
30 | #include <net/xfrm.h> | 31 | #include <net/xfrm.h> |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 3c0c9c755c92..79359c8380bc 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/init.h> | 34 | #include <linux/init.h> |
35 | #include <linux/module.h> | 35 | #include <linux/module.h> |
36 | #include <linux/skbuff.h> | 36 | #include <linux/skbuff.h> |
37 | #include <linux/slab.h> | ||
37 | #include <asm/uaccess.h> | 38 | #include <asm/uaccess.h> |
38 | 39 | ||
39 | #include <net/ndisc.h> | 40 | #include <net/ndisc.h> |
@@ -90,9 +91,9 @@ static unsigned int udp6_portaddr_hash(struct net *net, | |||
90 | if (ipv6_addr_any(addr6)) | 91 | if (ipv6_addr_any(addr6)) |
91 | hash = jhash_1word(0, mix); | 92 | hash = jhash_1word(0, mix); |
92 | else if (ipv6_addr_v4mapped(addr6)) | 93 | else if (ipv6_addr_v4mapped(addr6)) |
93 | hash = jhash_1word(addr6->s6_addr32[3], mix); | 94 | hash = jhash_1word((__force u32)addr6->s6_addr32[3], mix); |
94 | else | 95 | else |
95 | hash = jhash2(addr6->s6_addr32, 4, mix); | 96 | hash = jhash2((__force u32 *)addr6->s6_addr32, 4, mix); |
96 | 97 | ||
97 | return hash ^ port; | 98 | return hash ^ port; |
98 | } | 99 | } |
@@ -258,8 +259,8 @@ static struct sock *__udp6_lib_lookup(struct net *net, | |||
258 | if (hslot->count < hslot2->count) | 259 | if (hslot->count < hslot2->count) |
259 | goto begin; | 260 | goto begin; |
260 | 261 | ||
261 | result = udp6_lib_lookup2(net, &in6addr_any, sport, | 262 | result = udp6_lib_lookup2(net, saddr, sport, |
262 | daddr, hnum, dif, | 263 | &in6addr_any, hnum, dif, |
263 | hslot2, slot2); | 264 | hslot2, slot2); |
264 | } | 265 | } |
265 | rcu_read_unlock(); | 266 | rcu_read_unlock(); |
@@ -334,6 +335,9 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, | |||
334 | if (flags & MSG_ERRQUEUE) | 335 | if (flags & MSG_ERRQUEUE) |
335 | return ipv6_recv_error(sk, msg, len); | 336 | return ipv6_recv_error(sk, msg, len); |
336 | 337 | ||
338 | if (np->rxpmtu && np->rxopt.bits.rxpmtu) | ||
339 | return ipv6_recv_rxpmtu(sk, msg, len); | ||
340 | |||
337 | try_again: | 341 | try_again: |
338 | skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), | 342 | skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), |
339 | &peeked, &err); | 343 | &peeked, &err); |
@@ -420,7 +424,7 @@ out: | |||
420 | return err; | 424 | return err; |
421 | 425 | ||
422 | csum_copy_err: | 426 | csum_copy_err: |
423 | lock_sock(sk); | 427 | lock_sock_bh(sk); |
424 | if (!skb_kill_datagram(sk, skb, flags)) { | 428 | if (!skb_kill_datagram(sk, skb, flags)) { |
425 | if (is_udp4) | 429 | if (is_udp4) |
426 | UDP_INC_STATS_USER(sock_net(sk), | 430 | UDP_INC_STATS_USER(sock_net(sk), |
@@ -429,7 +433,7 @@ csum_copy_err: | |||
429 | UDP6_INC_STATS_USER(sock_net(sk), | 433 | UDP6_INC_STATS_USER(sock_net(sk), |
430 | UDP_MIB_INERRORS, is_udplite); | 434 | UDP_MIB_INERRORS, is_udplite); |
431 | } | 435 | } |
432 | release_sock(sk); | 436 | unlock_sock_bh(sk); |
433 | 437 | ||
434 | if (flags & MSG_DONTWAIT) | 438 | if (flags & MSG_DONTWAIT) |
435 | return -EAGAIN; | 439 | return -EAGAIN; |
@@ -510,7 +514,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) | |||
510 | goto drop; | 514 | goto drop; |
511 | } | 515 | } |
512 | 516 | ||
513 | if ((rc = sock_queue_rcv_skb(sk, skb)) < 0) { | 517 | if ((rc = ip_queue_rcv_skb(sk, skb)) < 0) { |
514 | /* Note that an ENOMEM error is charged twice */ | 518 | /* Note that an ENOMEM error is charged twice */ |
515 | if (rc == -ENOMEM) | 519 | if (rc == -ENOMEM) |
516 | UDP6_INC_STATS_BH(sock_net(sk), | 520 | UDP6_INC_STATS_BH(sock_net(sk), |
@@ -580,6 +584,10 @@ static void flush_stack(struct sock **stack, unsigned int count, | |||
580 | 584 | ||
581 | sk = stack[i]; | 585 | sk = stack[i]; |
582 | if (skb1) { | 586 | if (skb1) { |
587 | if (sk_rcvqueues_full(sk, skb)) { | ||
588 | kfree_skb(skb1); | ||
589 | goto drop; | ||
590 | } | ||
583 | bh_lock_sock(sk); | 591 | bh_lock_sock(sk); |
584 | if (!sock_owned_by_user(sk)) | 592 | if (!sock_owned_by_user(sk)) |
585 | udpv6_queue_rcv_skb(sk, skb1); | 593 | udpv6_queue_rcv_skb(sk, skb1); |
@@ -755,6 +763,10 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, | |||
755 | 763 | ||
756 | /* deliver */ | 764 | /* deliver */ |
757 | 765 | ||
766 | if (sk_rcvqueues_full(sk, skb)) { | ||
767 | sock_put(sk); | ||
768 | goto discard; | ||
769 | } | ||
758 | bh_lock_sock(sk); | 770 | bh_lock_sock(sk); |
759 | if (!sock_owned_by_user(sk)) | 771 | if (!sock_owned_by_user(sk)) |
760 | udpv6_queue_rcv_skb(sk, skb); | 772 | udpv6_queue_rcv_skb(sk, skb); |
@@ -918,6 +930,7 @@ int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk, | |||
918 | int ulen = len; | 930 | int ulen = len; |
919 | int hlimit = -1; | 931 | int hlimit = -1; |
920 | int tclass = -1; | 932 | int tclass = -1; |
933 | int dontfrag = -1; | ||
921 | int corkreq = up->corkflag || msg->msg_flags&MSG_MORE; | 934 | int corkreq = up->corkflag || msg->msg_flags&MSG_MORE; |
922 | int err; | 935 | int err; |
923 | int connected = 0; | 936 | int connected = 0; |
@@ -1048,7 +1061,8 @@ do_udp_sendmsg: | |||
1048 | memset(opt, 0, sizeof(struct ipv6_txoptions)); | 1061 | memset(opt, 0, sizeof(struct ipv6_txoptions)); |
1049 | opt->tot_len = sizeof(*opt); | 1062 | opt->tot_len = sizeof(*opt); |
1050 | 1063 | ||
1051 | err = datagram_send_ctl(sock_net(sk), msg, &fl, opt, &hlimit, &tclass); | 1064 | err = datagram_send_ctl(sock_net(sk), msg, &fl, opt, &hlimit, |
1065 | &tclass, &dontfrag); | ||
1052 | if (err < 0) { | 1066 | if (err < 0) { |
1053 | fl6_sock_release(flowlabel); | 1067 | fl6_sock_release(flowlabel); |
1054 | return err; | 1068 | return err; |
@@ -1119,6 +1133,9 @@ do_udp_sendmsg: | |||
1119 | if (tclass < 0) | 1133 | if (tclass < 0) |
1120 | tclass = np->tclass; | 1134 | tclass = np->tclass; |
1121 | 1135 | ||
1136 | if (dontfrag < 0) | ||
1137 | dontfrag = np->dontfrag; | ||
1138 | |||
1122 | if (msg->msg_flags&MSG_CONFIRM) | 1139 | if (msg->msg_flags&MSG_CONFIRM) |
1123 | goto do_confirm; | 1140 | goto do_confirm; |
1124 | back_from_confirm: | 1141 | back_from_confirm: |
@@ -1142,7 +1159,7 @@ do_append_data: | |||
1142 | err = ip6_append_data(sk, getfrag, msg->msg_iov, ulen, | 1159 | err = ip6_append_data(sk, getfrag, msg->msg_iov, ulen, |
1143 | sizeof(struct udphdr), hlimit, tclass, opt, &fl, | 1160 | sizeof(struct udphdr), hlimit, tclass, opt, &fl, |
1144 | (struct rt6_info*)dst, | 1161 | (struct rt6_info*)dst, |
1145 | corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags); | 1162 | corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags, dontfrag); |
1146 | if (err) | 1163 | if (err) |
1147 | udp_v6_flush_pending_frames(sk); | 1164 | udp_v6_flush_pending_frames(sk); |
1148 | else if (!corkreq) | 1165 | else if (!corkreq) |
diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c index 3927832227b9..b809812c8d30 100644 --- a/net/ipv6/xfrm6_mode_tunnel.c +++ b/net/ipv6/xfrm6_mode_tunnel.c | |||
@@ -5,6 +5,7 @@ | |||
5 | * Copyright (c) 2004-2006 Herbert Xu <herbert@gondor.apana.org.au> | 5 | * Copyright (c) 2004-2006 Herbert Xu <herbert@gondor.apana.org.au> |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <linux/gfp.h> | ||
8 | #include <linux/init.h> | 9 | #include <linux/init.h> |
9 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
10 | #include <linux/module.h> | 11 | #include <linux/module.h> |
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index ae181651c75a..4a0e77e14468 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c | |||
@@ -67,36 +67,6 @@ static int xfrm6_get_saddr(struct net *net, | |||
67 | return 0; | 67 | return 0; |
68 | } | 68 | } |
69 | 69 | ||
70 | static struct dst_entry * | ||
71 | __xfrm6_find_bundle(struct flowi *fl, struct xfrm_policy *policy) | ||
72 | { | ||
73 | struct dst_entry *dst; | ||
74 | |||
75 | /* Still not clear if we should set fl->fl6_{src,dst}... */ | ||
76 | read_lock_bh(&policy->lock); | ||
77 | for (dst = policy->bundles; dst; dst = dst->next) { | ||
78 | struct xfrm_dst *xdst = (struct xfrm_dst*)dst; | ||
79 | struct in6_addr fl_dst_prefix, fl_src_prefix; | ||
80 | |||
81 | ipv6_addr_prefix(&fl_dst_prefix, | ||
82 | &fl->fl6_dst, | ||
83 | xdst->u.rt6.rt6i_dst.plen); | ||
84 | ipv6_addr_prefix(&fl_src_prefix, | ||
85 | &fl->fl6_src, | ||
86 | xdst->u.rt6.rt6i_src.plen); | ||
87 | if (ipv6_addr_equal(&xdst->u.rt6.rt6i_dst.addr, &fl_dst_prefix) && | ||
88 | ipv6_addr_equal(&xdst->u.rt6.rt6i_src.addr, &fl_src_prefix) && | ||
89 | xfrm_bundle_ok(policy, xdst, fl, AF_INET6, | ||
90 | (xdst->u.rt6.rt6i_dst.plen != 128 || | ||
91 | xdst->u.rt6.rt6i_src.plen != 128))) { | ||
92 | dst_clone(dst); | ||
93 | break; | ||
94 | } | ||
95 | } | ||
96 | read_unlock_bh(&policy->lock); | ||
97 | return dst; | ||
98 | } | ||
99 | |||
100 | static int xfrm6_get_tos(struct flowi *fl) | 70 | static int xfrm6_get_tos(struct flowi *fl) |
101 | { | 71 | { |
102 | return 0; | 72 | return 0; |
@@ -124,7 +94,7 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, | |||
124 | xdst->u.dst.dev = dev; | 94 | xdst->u.dst.dev = dev; |
125 | dev_hold(dev); | 95 | dev_hold(dev); |
126 | 96 | ||
127 | xdst->u.rt6.rt6i_idev = in6_dev_get(rt->u.dst.dev); | 97 | xdst->u.rt6.rt6i_idev = in6_dev_get(dev); |
128 | if (!xdst->u.rt6.rt6i_idev) | 98 | if (!xdst->u.rt6.rt6i_idev) |
129 | return -ENODEV; | 99 | return -ENODEV; |
130 | 100 | ||
@@ -291,7 +261,6 @@ static struct xfrm_policy_afinfo xfrm6_policy_afinfo = { | |||
291 | .dst_ops = &xfrm6_dst_ops, | 261 | .dst_ops = &xfrm6_dst_ops, |
292 | .dst_lookup = xfrm6_dst_lookup, | 262 | .dst_lookup = xfrm6_dst_lookup, |
293 | .get_saddr = xfrm6_get_saddr, | 263 | .get_saddr = xfrm6_get_saddr, |
294 | .find_bundle = __xfrm6_find_bundle, | ||
295 | .decode_session = _decode_session6, | 264 | .decode_session = _decode_session6, |
296 | .get_tos = xfrm6_get_tos, | 265 | .get_tos = xfrm6_get_tos, |
297 | .init_path = xfrm6_init_path, | 266 | .init_path = xfrm6_init_path, |
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c index fa85a7d22dc4..2ce3a8278f26 100644 --- a/net/ipv6/xfrm6_tunnel.c +++ b/net/ipv6/xfrm6_tunnel.c | |||
@@ -23,6 +23,7 @@ | |||
23 | */ | 23 | */ |
24 | #include <linux/module.h> | 24 | #include <linux/module.h> |
25 | #include <linux/xfrm.h> | 25 | #include <linux/xfrm.h> |
26 | #include <linux/slab.h> | ||
26 | #include <linux/rculist.h> | 27 | #include <linux/rculist.h> |
27 | #include <net/ip.h> | 28 | #include <net/ip.h> |
28 | #include <net/xfrm.h> | 29 | #include <net/xfrm.h> |
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c index f9759b54a6de..da3d21c41d90 100644 --- a/net/ipx/af_ipx.c +++ b/net/ipx/af_ipx.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <linux/net.h> | 40 | #include <linux/net.h> |
41 | #include <linux/netdevice.h> | 41 | #include <linux/netdevice.h> |
42 | #include <linux/uio.h> | 42 | #include <linux/uio.h> |
43 | #include <linux/slab.h> | ||
43 | #include <linux/skbuff.h> | 44 | #include <linux/skbuff.h> |
44 | #include <linux/smp_lock.h> | 45 | #include <linux/smp_lock.h> |
45 | #include <linux/socket.h> | 46 | #include <linux/socket.h> |
diff --git a/net/ipx/ipx_route.c b/net/ipx/ipx_route.c index e16c11423527..30f4519b092f 100644 --- a/net/ipx/ipx_route.c +++ b/net/ipx/ipx_route.c | |||
@@ -9,6 +9,7 @@ | |||
9 | 9 | ||
10 | #include <linux/list.h> | 10 | #include <linux/list.h> |
11 | #include <linux/route.h> | 11 | #include <linux/route.h> |
12 | #include <linux/slab.h> | ||
12 | #include <linux/spinlock.h> | 13 | #include <linux/spinlock.h> |
13 | 14 | ||
14 | #include <net/ipx.h> | 15 | #include <net/ipx.h> |
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index 10093aab6173..79986a674f6e 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c | |||
@@ -48,6 +48,7 @@ | |||
48 | #include <linux/smp_lock.h> | 48 | #include <linux/smp_lock.h> |
49 | #include <linux/socket.h> | 49 | #include <linux/socket.h> |
50 | #include <linux/sockios.h> | 50 | #include <linux/sockios.h> |
51 | #include <linux/slab.h> | ||
51 | #include <linux/init.h> | 52 | #include <linux/init.h> |
52 | #include <linux/net.h> | 53 | #include <linux/net.h> |
53 | #include <linux/irda.h> | 54 | #include <linux/irda.h> |
@@ -346,7 +347,7 @@ static void irda_flow_indication(void *instance, void *sap, LOCAL_FLOW flow) | |||
346 | self->tx_flow = flow; | 347 | self->tx_flow = flow; |
347 | IRDA_DEBUG(1, "%s(), IrTTP wants us to start again\n", | 348 | IRDA_DEBUG(1, "%s(), IrTTP wants us to start again\n", |
348 | __func__); | 349 | __func__); |
349 | wake_up_interruptible(sk->sk_sleep); | 350 | wake_up_interruptible(sk_sleep(sk)); |
350 | break; | 351 | break; |
351 | default: | 352 | default: |
352 | IRDA_DEBUG(0, "%s(), Unknown flow command!\n", __func__); | 353 | IRDA_DEBUG(0, "%s(), Unknown flow command!\n", __func__); |
@@ -899,7 +900,7 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags) | |||
899 | if (flags & O_NONBLOCK) | 900 | if (flags & O_NONBLOCK) |
900 | goto out; | 901 | goto out; |
901 | 902 | ||
902 | err = wait_event_interruptible(*(sk->sk_sleep), | 903 | err = wait_event_interruptible(*(sk_sleep(sk)), |
903 | skb_peek(&sk->sk_receive_queue)); | 904 | skb_peek(&sk->sk_receive_queue)); |
904 | if (err) | 905 | if (err) |
905 | goto out; | 906 | goto out; |
@@ -1065,7 +1066,7 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr, | |||
1065 | goto out; | 1066 | goto out; |
1066 | 1067 | ||
1067 | err = -ERESTARTSYS; | 1068 | err = -ERESTARTSYS; |
1068 | if (wait_event_interruptible(*(sk->sk_sleep), | 1069 | if (wait_event_interruptible(*(sk_sleep(sk)), |
1069 | (sk->sk_state != TCP_SYN_SENT))) | 1070 | (sk->sk_state != TCP_SYN_SENT))) |
1070 | goto out; | 1071 | goto out; |
1071 | 1072 | ||
@@ -1317,7 +1318,7 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
1317 | 1318 | ||
1318 | /* Check if IrTTP is wants us to slow down */ | 1319 | /* Check if IrTTP is wants us to slow down */ |
1319 | 1320 | ||
1320 | if (wait_event_interruptible(*(sk->sk_sleep), | 1321 | if (wait_event_interruptible(*(sk_sleep(sk)), |
1321 | (self->tx_flow != FLOW_STOP || sk->sk_state != TCP_ESTABLISHED))) { | 1322 | (self->tx_flow != FLOW_STOP || sk->sk_state != TCP_ESTABLISHED))) { |
1322 | err = -ERESTARTSYS; | 1323 | err = -ERESTARTSYS; |
1323 | goto out; | 1324 | goto out; |
@@ -1476,7 +1477,7 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock, | |||
1476 | if (copied >= target) | 1477 | if (copied >= target) |
1477 | break; | 1478 | break; |
1478 | 1479 | ||
1479 | prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 1480 | prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
1480 | 1481 | ||
1481 | /* | 1482 | /* |
1482 | * POSIX 1003.1g mandates this order. | 1483 | * POSIX 1003.1g mandates this order. |
@@ -1496,7 +1497,7 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock, | |||
1496 | /* Wait process until data arrives */ | 1497 | /* Wait process until data arrives */ |
1497 | schedule(); | 1498 | schedule(); |
1498 | 1499 | ||
1499 | finish_wait(sk->sk_sleep, &wait); | 1500 | finish_wait(sk_sleep(sk), &wait); |
1500 | 1501 | ||
1501 | if (err) | 1502 | if (err) |
1502 | goto out; | 1503 | goto out; |
@@ -1786,7 +1787,7 @@ static unsigned int irda_poll(struct file * file, struct socket *sock, | |||
1786 | IRDA_DEBUG(4, "%s()\n", __func__); | 1787 | IRDA_DEBUG(4, "%s()\n", __func__); |
1787 | 1788 | ||
1788 | lock_kernel(); | 1789 | lock_kernel(); |
1789 | poll_wait(file, sk->sk_sleep, wait); | 1790 | poll_wait(file, sk_sleep(sk), wait); |
1790 | mask = 0; | 1791 | mask = 0; |
1791 | 1792 | ||
1792 | /* Exceptional events? */ | 1793 | /* Exceptional events? */ |
diff --git a/net/irda/discovery.c b/net/irda/discovery.c index a6f99b5a1499..c1c8ae939126 100644 --- a/net/irda/discovery.c +++ b/net/irda/discovery.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/socket.h> | 34 | #include <linux/socket.h> |
35 | #include <linux/fs.h> | 35 | #include <linux/fs.h> |
36 | #include <linux/seq_file.h> | 36 | #include <linux/seq_file.h> |
37 | #include <linux/slab.h> | ||
37 | 38 | ||
38 | #include <net/irda/irda.h> | 39 | #include <net/irda/irda.h> |
39 | #include <net/irda/irlmp.h> | 40 | #include <net/irda/irlmp.h> |
diff --git a/net/irda/ircomm/ircomm_core.c b/net/irda/ircomm/ircomm_core.c index 018c92941aba..e97082017f4f 100644 --- a/net/irda/ircomm/ircomm_core.c +++ b/net/irda/ircomm/ircomm_core.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/proc_fs.h> | 33 | #include <linux/proc_fs.h> |
34 | #include <linux/seq_file.h> | 34 | #include <linux/seq_file.h> |
35 | #include <linux/init.h> | 35 | #include <linux/init.h> |
36 | #include <linux/slab.h> | ||
36 | 37 | ||
37 | #include <net/irda/irda.h> | 38 | #include <net/irda/irda.h> |
38 | #include <net/irda/irmod.h> | 39 | #include <net/irda/irmod.h> |
diff --git a/net/irda/ircomm/ircomm_lmp.c b/net/irda/ircomm/ircomm_lmp.c index 7ba96618660e..08fb54dc8c41 100644 --- a/net/irda/ircomm/ircomm_lmp.c +++ b/net/irda/ircomm/ircomm_lmp.c | |||
@@ -31,6 +31,7 @@ | |||
31 | ********************************************************************/ | 31 | ********************************************************************/ |
32 | 32 | ||
33 | #include <linux/init.h> | 33 | #include <linux/init.h> |
34 | #include <linux/gfp.h> | ||
34 | 35 | ||
35 | #include <net/irda/irda.h> | 36 | #include <net/irda/irda.h> |
36 | #include <net/irda/irlmp.h> | 37 | #include <net/irda/irlmp.h> |
diff --git a/net/irda/ircomm/ircomm_param.c b/net/irda/ircomm/ircomm_param.c index d57aefd9fe77..8b915f3ac3b9 100644 --- a/net/irda/ircomm/ircomm_param.c +++ b/net/irda/ircomm/ircomm_param.c | |||
@@ -28,6 +28,7 @@ | |||
28 | * | 28 | * |
29 | ********************************************************************/ | 29 | ********************************************************************/ |
30 | 30 | ||
31 | #include <linux/gfp.h> | ||
31 | #include <linux/workqueue.h> | 32 | #include <linux/workqueue.h> |
32 | #include <linux/interrupt.h> | 33 | #include <linux/interrupt.h> |
33 | 34 | ||
@@ -474,7 +475,7 @@ static int ircomm_param_dce(void *instance, irda_param_t *param, int get) | |||
474 | /* Check if any of the settings have changed */ | 475 | /* Check if any of the settings have changed */ |
475 | if (dce & 0x0f) { | 476 | if (dce & 0x0f) { |
476 | if (dce & IRCOMM_DELTA_CTS) { | 477 | if (dce & IRCOMM_DELTA_CTS) { |
477 | IRDA_DEBUG(2, "%s(), CTS \n", __func__ ); | 478 | IRDA_DEBUG(2, "%s(), CTS\n", __func__ ); |
478 | } | 479 | } |
479 | } | 480 | } |
480 | 481 | ||
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c index 8b85d774e47f..faa82ca2dfdc 100644 --- a/net/irda/ircomm/ircomm_tty.c +++ b/net/irda/ircomm/ircomm_tty.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/init.h> | 33 | #include <linux/init.h> |
34 | #include <linux/module.h> | 34 | #include <linux/module.h> |
35 | #include <linux/fs.h> | 35 | #include <linux/fs.h> |
36 | #include <linux/slab.h> | ||
36 | #include <linux/sched.h> | 37 | #include <linux/sched.h> |
37 | #include <linux/seq_file.h> | 38 | #include <linux/seq_file.h> |
38 | #include <linux/termios.h> | 39 | #include <linux/termios.h> |
diff --git a/net/irda/irda_device.c b/net/irda/irda_device.c index bf92e1473447..25cc2e695158 100644 --- a/net/irda/irda_device.c +++ b/net/irda/irda_device.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <linux/tty.h> | 41 | #include <linux/tty.h> |
42 | #include <linux/kmod.h> | 42 | #include <linux/kmod.h> |
43 | #include <linux/spinlock.h> | 43 | #include <linux/spinlock.h> |
44 | #include <linux/slab.h> | ||
44 | 45 | ||
45 | #include <asm/ioctls.h> | 46 | #include <asm/ioctls.h> |
46 | #include <asm/uaccess.h> | 47 | #include <asm/uaccess.h> |
diff --git a/net/irda/iriap.c b/net/irda/iriap.c index 294e34d3517c..79a1e5a23e10 100644 --- a/net/irda/iriap.c +++ b/net/irda/iriap.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/string.h> | 31 | #include <linux/string.h> |
32 | #include <linux/init.h> | 32 | #include <linux/init.h> |
33 | #include <linux/seq_file.h> | 33 | #include <linux/seq_file.h> |
34 | #include <linux/slab.h> | ||
34 | 35 | ||
35 | #include <asm/byteorder.h> | 36 | #include <asm/byteorder.h> |
36 | #include <asm/unaligned.h> | 37 | #include <asm/unaligned.h> |
diff --git a/net/irda/iriap_event.c b/net/irda/iriap_event.c index a301cbd93785..703774e29e32 100644 --- a/net/irda/iriap_event.c +++ b/net/irda/iriap_event.c | |||
@@ -24,6 +24,8 @@ | |||
24 | * | 24 | * |
25 | ********************************************************************/ | 25 | ********************************************************************/ |
26 | 26 | ||
27 | #include <linux/slab.h> | ||
28 | |||
27 | #include <net/irda/irda.h> | 29 | #include <net/irda/irda.h> |
28 | #include <net/irda/irlmp.h> | 30 | #include <net/irda/irlmp.h> |
29 | #include <net/irda/iriap.h> | 31 | #include <net/irda/iriap.h> |
diff --git a/net/irda/irias_object.c b/net/irda/irias_object.c index 99ebb96f1386..f07ed9fd5792 100644 --- a/net/irda/irias_object.c +++ b/net/irda/irias_object.c | |||
@@ -22,6 +22,7 @@ | |||
22 | * | 22 | * |
23 | ********************************************************************/ | 23 | ********************************************************************/ |
24 | 24 | ||
25 | #include <linux/slab.h> | ||
25 | #include <linux/string.h> | 26 | #include <linux/string.h> |
26 | #include <linux/socket.h> | 27 | #include <linux/socket.h> |
27 | #include <linux/module.h> | 28 | #include <linux/module.h> |
diff --git a/net/irda/irlan/irlan_client.c b/net/irda/irlan/irlan_client.c index 42f7d960d055..7ed3af957935 100644 --- a/net/irda/irlan/irlan_client.c +++ b/net/irda/irlan/irlan_client.c | |||
@@ -28,6 +28,7 @@ | |||
28 | 28 | ||
29 | #include <linux/kernel.h> | 29 | #include <linux/kernel.h> |
30 | #include <linux/string.h> | 30 | #include <linux/string.h> |
31 | #include <linux/slab.h> | ||
31 | #include <linux/errno.h> | 32 | #include <linux/errno.h> |
32 | #include <linux/init.h> | 33 | #include <linux/init.h> |
33 | #include <linux/netdevice.h> | 34 | #include <linux/netdevice.h> |
diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c index e486dc89ea59..a788f9e9427d 100644 --- a/net/irda/irlan/irlan_common.c +++ b/net/irda/irlan/irlan_common.c | |||
@@ -27,6 +27,7 @@ | |||
27 | 27 | ||
28 | #include <linux/kernel.h> | 28 | #include <linux/kernel.h> |
29 | #include <linux/string.h> | 29 | #include <linux/string.h> |
30 | #include <linux/gfp.h> | ||
30 | #include <linux/init.h> | 31 | #include <linux/init.h> |
31 | #include <linux/errno.h> | 32 | #include <linux/errno.h> |
32 | #include <linux/proc_fs.h> | 33 | #include <linux/proc_fs.h> |
diff --git a/net/irda/irlan/irlan_provider.c b/net/irda/irlan/irlan_provider.c index 3f81f81b2dfa..5cf5e6c872bb 100644 --- a/net/irda/irlan/irlan_provider.c +++ b/net/irda/irlan/irlan_provider.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/init.h> | 34 | #include <linux/init.h> |
35 | #include <linux/random.h> | 35 | #include <linux/random.h> |
36 | #include <linux/bitops.h> | 36 | #include <linux/bitops.h> |
37 | #include <linux/slab.h> | ||
37 | 38 | ||
38 | #include <asm/system.h> | 39 | #include <asm/system.h> |
39 | #include <asm/byteorder.h> | 40 | #include <asm/byteorder.h> |
diff --git a/net/irda/irlap_event.c b/net/irda/irlap_event.c index 94a9884d7146..d434c8880745 100644 --- a/net/irda/irlap_event.c +++ b/net/irda/irlap_event.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/kernel.h> | 29 | #include <linux/kernel.h> |
30 | #include <linux/delay.h> | 30 | #include <linux/delay.h> |
31 | #include <linux/skbuff.h> | 31 | #include <linux/skbuff.h> |
32 | #include <linux/slab.h> | ||
32 | 33 | ||
33 | #include <net/irda/irda.h> | 34 | #include <net/irda/irda.h> |
34 | #include <net/irda/irlap_event.h> | 35 | #include <net/irda/irlap_event.h> |
diff --git a/net/irda/irlap_frame.c b/net/irda/irlap_frame.c index 7af2e74deda8..688222cbf55b 100644 --- a/net/irda/irlap_frame.c +++ b/net/irda/irlap_frame.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/if_ether.h> | 29 | #include <linux/if_ether.h> |
30 | #include <linux/netdevice.h> | 30 | #include <linux/netdevice.h> |
31 | #include <linux/irda.h> | 31 | #include <linux/irda.h> |
32 | #include <linux/slab.h> | ||
32 | 33 | ||
33 | #include <net/pkt_sched.h> | 34 | #include <net/pkt_sched.h> |
34 | #include <net/sock.h> | 35 | #include <net/sock.h> |
diff --git a/net/irda/irnet/irnet_irda.c b/net/irda/irnet/irnet_irda.c index b26dee784aba..df18ab4b6c5e 100644 --- a/net/irda/irnet/irnet_irda.c +++ b/net/irda/irnet/irnet_irda.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include "irnet_irda.h" /* Private header */ | 11 | #include "irnet_irda.h" /* Private header */ |
12 | #include <linux/sched.h> | 12 | #include <linux/sched.h> |
13 | #include <linux/seq_file.h> | 13 | #include <linux/seq_file.h> |
14 | #include <linux/slab.h> | ||
14 | #include <asm/unaligned.h> | 15 | #include <asm/unaligned.h> |
15 | 16 | ||
16 | /* | 17 | /* |
diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c index 6b3602de359a..6a1a202710c5 100644 --- a/net/irda/irnet/irnet_ppp.c +++ b/net/irda/irnet/irnet_ppp.c | |||
@@ -14,6 +14,7 @@ | |||
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include <linux/sched.h> | 16 | #include <linux/sched.h> |
17 | #include <linux/slab.h> | ||
17 | #include <linux/smp_lock.h> | 18 | #include <linux/smp_lock.h> |
18 | #include "irnet_ppp.h" /* Private header */ | 19 | #include "irnet_ppp.h" /* Private header */ |
19 | /* Please put other headers in irnet.h - Thanks */ | 20 | /* Please put other headers in irnet.h - Thanks */ |
diff --git a/net/irda/irnetlink.c b/net/irda/irnetlink.c index 69b5b75f5431..6c7c4b92e4f8 100644 --- a/net/irda/irnetlink.c +++ b/net/irda/irnetlink.c | |||
@@ -15,6 +15,7 @@ | |||
15 | 15 | ||
16 | #include <linux/socket.h> | 16 | #include <linux/socket.h> |
17 | #include <linux/irda.h> | 17 | #include <linux/irda.h> |
18 | #include <linux/gfp.h> | ||
18 | #include <net/net_namespace.h> | 19 | #include <net/net_namespace.h> |
19 | #include <net/sock.h> | 20 | #include <net/sock.h> |
20 | #include <net/irda/irda.h> | 21 | #include <net/irda/irda.h> |
diff --git a/net/irda/irqueue.c b/net/irda/irqueue.c index ba01938becb5..849aaf0dabb5 100644 --- a/net/irda/irqueue.c +++ b/net/irda/irqueue.c | |||
@@ -192,6 +192,7 @@ | |||
192 | * Jean II | 192 | * Jean II |
193 | */ | 193 | */ |
194 | #include <linux/module.h> | 194 | #include <linux/module.h> |
195 | #include <linux/slab.h> | ||
195 | 196 | ||
196 | #include <net/irda/irda.h> | 197 | #include <net/irda/irda.h> |
197 | #include <net/irda/irqueue.h> | 198 | #include <net/irda/irqueue.h> |
diff --git a/net/irda/irttp.c b/net/irda/irttp.c index 9cb79f95bf63..47db1d8a0d92 100644 --- a/net/irda/irttp.c +++ b/net/irda/irttp.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/init.h> | 28 | #include <linux/init.h> |
29 | #include <linux/fs.h> | 29 | #include <linux/fs.h> |
30 | #include <linux/seq_file.h> | 30 | #include <linux/seq_file.h> |
31 | #include <linux/slab.h> | ||
31 | 32 | ||
32 | #include <asm/byteorder.h> | 33 | #include <asm/byteorder.h> |
33 | #include <asm/unaligned.h> | 34 | #include <asm/unaligned.h> |
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index c18286a2167b..8be324fe08b9 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c | |||
@@ -59,7 +59,7 @@ do { \ | |||
59 | DEFINE_WAIT(__wait); \ | 59 | DEFINE_WAIT(__wait); \ |
60 | long __timeo = timeo; \ | 60 | long __timeo = timeo; \ |
61 | ret = 0; \ | 61 | ret = 0; \ |
62 | prepare_to_wait(sk->sk_sleep, &__wait, TASK_INTERRUPTIBLE); \ | 62 | prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE); \ |
63 | while (!(condition)) { \ | 63 | while (!(condition)) { \ |
64 | if (!__timeo) { \ | 64 | if (!__timeo) { \ |
65 | ret = -EAGAIN; \ | 65 | ret = -EAGAIN; \ |
@@ -76,7 +76,7 @@ do { \ | |||
76 | if (ret) \ | 76 | if (ret) \ |
77 | break; \ | 77 | break; \ |
78 | } \ | 78 | } \ |
79 | finish_wait(sk->sk_sleep, &__wait); \ | 79 | finish_wait(sk_sleep(sk), &__wait); \ |
80 | } while (0) | 80 | } while (0) |
81 | 81 | ||
82 | #define iucv_sock_wait(sk, condition, timeo) \ | 82 | #define iucv_sock_wait(sk, condition, timeo) \ |
@@ -305,11 +305,14 @@ static inline int iucv_below_msglim(struct sock *sk) | |||
305 | */ | 305 | */ |
306 | static void iucv_sock_wake_msglim(struct sock *sk) | 306 | static void iucv_sock_wake_msglim(struct sock *sk) |
307 | { | 307 | { |
308 | read_lock(&sk->sk_callback_lock); | 308 | struct socket_wq *wq; |
309 | if (sk_has_sleeper(sk)) | 309 | |
310 | wake_up_interruptible_all(sk->sk_sleep); | 310 | rcu_read_lock(); |
311 | wq = rcu_dereference(sk->sk_wq); | ||
312 | if (wq_has_sleeper(wq)) | ||
313 | wake_up_interruptible_all(&wq->wait); | ||
311 | sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); | 314 | sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); |
312 | read_unlock(&sk->sk_callback_lock); | 315 | rcu_read_unlock(); |
313 | } | 316 | } |
314 | 317 | ||
315 | /* Timers */ | 318 | /* Timers */ |
@@ -795,7 +798,7 @@ static int iucv_sock_accept(struct socket *sock, struct socket *newsock, | |||
795 | timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); | 798 | timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); |
796 | 799 | ||
797 | /* Wait for an incoming connection */ | 800 | /* Wait for an incoming connection */ |
798 | add_wait_queue_exclusive(sk->sk_sleep, &wait); | 801 | add_wait_queue_exclusive(sk_sleep(sk), &wait); |
799 | while (!(nsk = iucv_accept_dequeue(sk, newsock))) { | 802 | while (!(nsk = iucv_accept_dequeue(sk, newsock))) { |
800 | set_current_state(TASK_INTERRUPTIBLE); | 803 | set_current_state(TASK_INTERRUPTIBLE); |
801 | if (!timeo) { | 804 | if (!timeo) { |
@@ -819,7 +822,7 @@ static int iucv_sock_accept(struct socket *sock, struct socket *newsock, | |||
819 | } | 822 | } |
820 | 823 | ||
821 | set_current_state(TASK_RUNNING); | 824 | set_current_state(TASK_RUNNING); |
822 | remove_wait_queue(sk->sk_sleep, &wait); | 825 | remove_wait_queue(sk_sleep(sk), &wait); |
823 | 826 | ||
824 | if (err) | 827 | if (err) |
825 | goto done; | 828 | goto done; |
@@ -1269,7 +1272,7 @@ unsigned int iucv_sock_poll(struct file *file, struct socket *sock, | |||
1269 | struct sock *sk = sock->sk; | 1272 | struct sock *sk = sock->sk; |
1270 | unsigned int mask = 0; | 1273 | unsigned int mask = 0; |
1271 | 1274 | ||
1272 | sock_poll_wait(file, sk->sk_sleep, wait); | 1275 | sock_poll_wait(file, sk_sleep(sk), wait); |
1273 | 1276 | ||
1274 | if (sk->sk_state == IUCV_LISTEN) | 1277 | if (sk->sk_state == IUCV_LISTEN) |
1275 | return iucv_accept_poll(sk); | 1278 | return iucv_accept_poll(sk); |
diff --git a/net/key/af_key.c b/net/key/af_key.c index 368707882647..ba9a3fcc2fed 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/in6.h> | 26 | #include <linux/in6.h> |
27 | #include <linux/proc_fs.h> | 27 | #include <linux/proc_fs.h> |
28 | #include <linux/init.h> | 28 | #include <linux/init.h> |
29 | #include <linux/slab.h> | ||
29 | #include <net/net_namespace.h> | 30 | #include <net/net_namespace.h> |
30 | #include <net/netns/generic.h> | 31 | #include <net/netns/generic.h> |
31 | #include <net/xfrm.h> | 32 | #include <net/xfrm.h> |
@@ -2129,10 +2130,9 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c | |||
2129 | int err; | 2130 | int err; |
2130 | 2131 | ||
2131 | out_skb = pfkey_xfrm_policy2msg_prep(xp); | 2132 | out_skb = pfkey_xfrm_policy2msg_prep(xp); |
2132 | if (IS_ERR(out_skb)) { | 2133 | if (IS_ERR(out_skb)) |
2133 | err = PTR_ERR(out_skb); | 2134 | return PTR_ERR(out_skb); |
2134 | goto out; | 2135 | |
2135 | } | ||
2136 | err = pfkey_xfrm_policy2msg(out_skb, xp, dir); | 2136 | err = pfkey_xfrm_policy2msg(out_skb, xp, dir); |
2137 | if (err < 0) | 2137 | if (err < 0) |
2138 | return err; | 2138 | return err; |
@@ -2148,7 +2148,6 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c | |||
2148 | out_hdr->sadb_msg_seq = c->seq; | 2148 | out_hdr->sadb_msg_seq = c->seq; |
2149 | out_hdr->sadb_msg_pid = c->pid; | 2149 | out_hdr->sadb_msg_pid = c->pid; |
2150 | pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xp_net(xp)); | 2150 | pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xp_net(xp)); |
2151 | out: | ||
2152 | return 0; | 2151 | return 0; |
2153 | 2152 | ||
2154 | } | 2153 | } |
diff --git a/net/l2tp/Kconfig b/net/l2tp/Kconfig new file mode 100644 index 000000000000..4b1e71751e10 --- /dev/null +++ b/net/l2tp/Kconfig | |||
@@ -0,0 +1,107 @@ | |||
1 | # | ||
2 | # Layer Two Tunneling Protocol (L2TP) | ||
3 | # | ||
4 | |||
5 | menuconfig L2TP | ||
6 | tristate "Layer Two Tunneling Protocol (L2TP)" | ||
7 | depends on INET | ||
8 | ---help--- | ||
9 | Layer Two Tunneling Protocol | ||
10 | |||
11 | From RFC 2661 <http://www.ietf.org/rfc/rfc2661.txt>. | ||
12 | |||
13 | L2TP facilitates the tunneling of packets across an | ||
14 | intervening network in a way that is as transparent as | ||
15 | possible to both end-users and applications. | ||
16 | |||
17 | L2TP is often used to tunnel PPP traffic over IP | ||
18 | tunnels. One IP tunnel may carry thousands of individual PPP | ||
19 | connections. L2TP is also used as a VPN protocol, popular | ||
20 | with home workers to connect to their offices. | ||
21 | |||
22 | L2TPv3 allows other protocols as well as PPP to be carried | ||
23 | over L2TP tunnels. L2TPv3 is defined in RFC 3931 | ||
24 | <http://www.ietf.org/rfc/rfc3931.txt>. | ||
25 | |||
26 | The kernel component handles only L2TP data packets: a | ||
27 | userland daemon handles L2TP the control protocol (tunnel | ||
28 | and session setup). One such daemon is OpenL2TP | ||
29 | (http://openl2tp.org/). | ||
30 | |||
31 | If you don't need L2TP, say N. To compile all L2TP code as | ||
32 | modules, choose M here. | ||
33 | |||
34 | config L2TP_DEBUGFS | ||
35 | tristate "L2TP debugfs support" | ||
36 | depends on L2TP && DEBUG_FS | ||
37 | help | ||
38 | Support for l2tp directory in debugfs filesystem. This may be | ||
39 | used to dump internal state of the l2tp drivers for problem | ||
40 | analysis. | ||
41 | |||
42 | If unsure, say 'Y'. | ||
43 | |||
44 | To compile this driver as a module, choose M here. The module | ||
45 | will be called l2tp_debugfs. | ||
46 | |||
47 | config L2TP_V3 | ||
48 | bool "L2TPv3 support (EXPERIMENTAL)" | ||
49 | depends on EXPERIMENTAL && L2TP | ||
50 | help | ||
51 | Layer Two Tunneling Protocol Version 3 | ||
52 | |||
53 | From RFC 3931 <http://www.ietf.org/rfc/rfc3931.txt>. | ||
54 | |||
55 | The Layer Two Tunneling Protocol (L2TP) provides a dynamic | ||
56 | mechanism for tunneling Layer 2 (L2) "circuits" across a | ||
57 | packet-oriented data network (e.g., over IP). L2TP, as | ||
58 | originally defined in RFC 2661, is a standard method for | ||
59 | tunneling Point-to-Point Protocol (PPP) [RFC1661] sessions. | ||
60 | L2TP has since been adopted for tunneling a number of other | ||
61 | L2 protocols, including ATM, Frame Relay, HDLC and even raw | ||
62 | ethernet frames. | ||
63 | |||
64 | If you are connecting to L2TPv3 equipment, or you want to | ||
65 | tunnel raw ethernet frames using L2TP, say Y here. If | ||
66 | unsure, say N. | ||
67 | |||
68 | config L2TP_IP | ||
69 | tristate "L2TP IP encapsulation for L2TPv3" | ||
70 | depends on L2TP_V3 | ||
71 | help | ||
72 | Support for L2TP-over-IP socket family. | ||
73 | |||
74 | The L2TPv3 protocol defines two possible encapsulations for | ||
75 | L2TP frames, namely UDP and plain IP (without UDP). This | ||
76 | driver provides a new L2TPIP socket family with which | ||
77 | userspace L2TPv3 daemons may create L2TP/IP tunnel sockets | ||
78 | when UDP encapsulation is not required. When L2TP is carried | ||
79 | in IP packets, it used IP protocol number 115, so this port | ||
80 | must be enabled in firewalls. | ||
81 | |||
82 | To compile this driver as a module, choose M here. The module | ||
83 | will be called l2tp_ip. | ||
84 | |||
85 | config L2TP_ETH | ||
86 | tristate "L2TP ethernet pseudowire support for L2TPv3" | ||
87 | depends on L2TP_V3 | ||
88 | help | ||
89 | Support for carrying raw ethernet frames over L2TPv3. | ||
90 | |||
91 | From RFC 4719 <http://www.ietf.org/rfc/rfc4719.txt>. | ||
92 | |||
93 | The Layer 2 Tunneling Protocol, Version 3 (L2TPv3) can be | ||
94 | used as a control protocol and for data encapsulation to set | ||
95 | up Pseudowires for transporting layer 2 Packet Data Units | ||
96 | across an IP network [RFC3931]. | ||
97 | |||
98 | This driver provides an ethernet virtual interface for each | ||
99 | L2TP ethernet pseudowire instance. Standard Linux tools may | ||
100 | be used to assign an IP address to the local virtual | ||
101 | interface, or add the interface to a bridge. | ||
102 | |||
103 | If you are using L2TPv3, you will almost certainly want to | ||
104 | enable this option. | ||
105 | |||
106 | To compile this driver as a module, choose M here. The module | ||
107 | will be called l2tp_eth. | ||
diff --git a/net/l2tp/Makefile b/net/l2tp/Makefile new file mode 100644 index 000000000000..110e7bc2de5e --- /dev/null +++ b/net/l2tp/Makefile | |||
@@ -0,0 +1,12 @@ | |||
1 | # | ||
2 | # Makefile for the L2TP. | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_L2TP) += l2tp_core.o | ||
6 | |||
7 | # Build l2tp as modules if L2TP is M | ||
8 | obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_PPPOL2TP)) += l2tp_ppp.o | ||
9 | obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_IP)) += l2tp_ip.o | ||
10 | obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_V3)) += l2tp_netlink.o | ||
11 | obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_ETH)) += l2tp_eth.o | ||
12 | obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_DEBUGFS)) += l2tp_debugfs.o | ||
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c new file mode 100644 index 000000000000..1712af1c7b3f --- /dev/null +++ b/net/l2tp/l2tp_core.c | |||
@@ -0,0 +1,1666 @@ | |||
1 | /* | ||
2 | * L2TP core. | ||
3 | * | ||
4 | * Copyright (c) 2008,2009,2010 Katalix Systems Ltd | ||
5 | * | ||
6 | * This file contains some code of the original L2TPv2 pppol2tp | ||
7 | * driver, which has the following copyright: | ||
8 | * | ||
9 | * Authors: Martijn van Oosterhout <kleptog@svana.org> | ||
10 | * James Chapman (jchapman@katalix.com) | ||
11 | * Contributors: | ||
12 | * Michal Ostrowski <mostrows@speakeasy.net> | ||
13 | * Arnaldo Carvalho de Melo <acme@xconectiva.com.br> | ||
14 | * David S. Miller (davem@redhat.com) | ||
15 | * | ||
16 | * This program is free software; you can redistribute it and/or modify | ||
17 | * it under the terms of the GNU General Public License version 2 as | ||
18 | * published by the Free Software Foundation. | ||
19 | */ | ||
20 | |||
21 | #include <linux/module.h> | ||
22 | #include <linux/string.h> | ||
23 | #include <linux/list.h> | ||
24 | #include <linux/rculist.h> | ||
25 | #include <linux/uaccess.h> | ||
26 | |||
27 | #include <linux/kernel.h> | ||
28 | #include <linux/spinlock.h> | ||
29 | #include <linux/kthread.h> | ||
30 | #include <linux/sched.h> | ||
31 | #include <linux/slab.h> | ||
32 | #include <linux/errno.h> | ||
33 | #include <linux/jiffies.h> | ||
34 | |||
35 | #include <linux/netdevice.h> | ||
36 | #include <linux/net.h> | ||
37 | #include <linux/inetdevice.h> | ||
38 | #include <linux/skbuff.h> | ||
39 | #include <linux/init.h> | ||
40 | #include <linux/in.h> | ||
41 | #include <linux/ip.h> | ||
42 | #include <linux/udp.h> | ||
43 | #include <linux/l2tp.h> | ||
44 | #include <linux/hash.h> | ||
45 | #include <linux/sort.h> | ||
46 | #include <linux/file.h> | ||
47 | #include <linux/nsproxy.h> | ||
48 | #include <net/net_namespace.h> | ||
49 | #include <net/netns/generic.h> | ||
50 | #include <net/dst.h> | ||
51 | #include <net/ip.h> | ||
52 | #include <net/udp.h> | ||
53 | #include <net/inet_common.h> | ||
54 | #include <net/xfrm.h> | ||
55 | #include <net/protocol.h> | ||
56 | |||
57 | #include <asm/byteorder.h> | ||
58 | #include <asm/atomic.h> | ||
59 | |||
60 | #include "l2tp_core.h" | ||
61 | |||
62 | #define L2TP_DRV_VERSION "V2.0" | ||
63 | |||
64 | /* L2TP header constants */ | ||
65 | #define L2TP_HDRFLAG_T 0x8000 | ||
66 | #define L2TP_HDRFLAG_L 0x4000 | ||
67 | #define L2TP_HDRFLAG_S 0x0800 | ||
68 | #define L2TP_HDRFLAG_O 0x0200 | ||
69 | #define L2TP_HDRFLAG_P 0x0100 | ||
70 | |||
71 | #define L2TP_HDR_VER_MASK 0x000F | ||
72 | #define L2TP_HDR_VER_2 0x0002 | ||
73 | #define L2TP_HDR_VER_3 0x0003 | ||
74 | |||
75 | /* L2TPv3 default L2-specific sublayer */ | ||
76 | #define L2TP_SLFLAG_S 0x40000000 | ||
77 | #define L2TP_SL_SEQ_MASK 0x00ffffff | ||
78 | |||
79 | #define L2TP_HDR_SIZE_SEQ 10 | ||
80 | #define L2TP_HDR_SIZE_NOSEQ 6 | ||
81 | |||
82 | /* Default trace flags */ | ||
83 | #define L2TP_DEFAULT_DEBUG_FLAGS 0 | ||
84 | |||
85 | #define PRINTK(_mask, _type, _lvl, _fmt, args...) \ | ||
86 | do { \ | ||
87 | if ((_mask) & (_type)) \ | ||
88 | printk(_lvl "L2TP: " _fmt, ##args); \ | ||
89 | } while (0) | ||
90 | |||
91 | /* Private data stored for received packets in the skb. | ||
92 | */ | ||
93 | struct l2tp_skb_cb { | ||
94 | u32 ns; | ||
95 | u16 has_seq; | ||
96 | u16 length; | ||
97 | unsigned long expires; | ||
98 | }; | ||
99 | |||
100 | #define L2TP_SKB_CB(skb) ((struct l2tp_skb_cb *) &skb->cb[sizeof(struct inet_skb_parm)]) | ||
101 | |||
102 | static atomic_t l2tp_tunnel_count; | ||
103 | static atomic_t l2tp_session_count; | ||
104 | |||
105 | /* per-net private data for this module */ | ||
106 | static unsigned int l2tp_net_id; | ||
107 | struct l2tp_net { | ||
108 | struct list_head l2tp_tunnel_list; | ||
109 | spinlock_t l2tp_tunnel_list_lock; | ||
110 | struct hlist_head l2tp_session_hlist[L2TP_HASH_SIZE_2]; | ||
111 | spinlock_t l2tp_session_hlist_lock; | ||
112 | }; | ||
113 | |||
114 | static inline struct l2tp_net *l2tp_pernet(struct net *net) | ||
115 | { | ||
116 | BUG_ON(!net); | ||
117 | |||
118 | return net_generic(net, l2tp_net_id); | ||
119 | } | ||
120 | |||
121 | /* Session hash global list for L2TPv3. | ||
122 | * The session_id SHOULD be random according to RFC3931, but several | ||
123 | * L2TP implementations use incrementing session_ids. So we do a real | ||
124 | * hash on the session_id, rather than a simple bitmask. | ||
125 | */ | ||
126 | static inline struct hlist_head * | ||
127 | l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id) | ||
128 | { | ||
129 | return &pn->l2tp_session_hlist[hash_32(session_id, L2TP_HASH_BITS_2)]; | ||
130 | |||
131 | } | ||
132 | |||
133 | /* Lookup a session by id in the global session list | ||
134 | */ | ||
135 | static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id) | ||
136 | { | ||
137 | struct l2tp_net *pn = l2tp_pernet(net); | ||
138 | struct hlist_head *session_list = | ||
139 | l2tp_session_id_hash_2(pn, session_id); | ||
140 | struct l2tp_session *session; | ||
141 | struct hlist_node *walk; | ||
142 | |||
143 | rcu_read_lock_bh(); | ||
144 | hlist_for_each_entry_rcu(session, walk, session_list, global_hlist) { | ||
145 | if (session->session_id == session_id) { | ||
146 | rcu_read_unlock_bh(); | ||
147 | return session; | ||
148 | } | ||
149 | } | ||
150 | rcu_read_unlock_bh(); | ||
151 | |||
152 | return NULL; | ||
153 | } | ||
154 | |||
155 | /* Session hash list. | ||
156 | * The session_id SHOULD be random according to RFC2661, but several | ||
157 | * L2TP implementations (Cisco and Microsoft) use incrementing | ||
158 | * session_ids. So we do a real hash on the session_id, rather than a | ||
159 | * simple bitmask. | ||
160 | */ | ||
161 | static inline struct hlist_head * | ||
162 | l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id) | ||
163 | { | ||
164 | return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)]; | ||
165 | } | ||
166 | |||
167 | /* Lookup a session by id | ||
168 | */ | ||
169 | struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id) | ||
170 | { | ||
171 | struct hlist_head *session_list; | ||
172 | struct l2tp_session *session; | ||
173 | struct hlist_node *walk; | ||
174 | |||
175 | /* In L2TPv3, session_ids are unique over all tunnels and we | ||
176 | * sometimes need to look them up before we know the | ||
177 | * tunnel. | ||
178 | */ | ||
179 | if (tunnel == NULL) | ||
180 | return l2tp_session_find_2(net, session_id); | ||
181 | |||
182 | session_list = l2tp_session_id_hash(tunnel, session_id); | ||
183 | read_lock_bh(&tunnel->hlist_lock); | ||
184 | hlist_for_each_entry(session, walk, session_list, hlist) { | ||
185 | if (session->session_id == session_id) { | ||
186 | read_unlock_bh(&tunnel->hlist_lock); | ||
187 | return session; | ||
188 | } | ||
189 | } | ||
190 | read_unlock_bh(&tunnel->hlist_lock); | ||
191 | |||
192 | return NULL; | ||
193 | } | ||
194 | EXPORT_SYMBOL_GPL(l2tp_session_find); | ||
195 | |||
196 | struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth) | ||
197 | { | ||
198 | int hash; | ||
199 | struct hlist_node *walk; | ||
200 | struct l2tp_session *session; | ||
201 | int count = 0; | ||
202 | |||
203 | read_lock_bh(&tunnel->hlist_lock); | ||
204 | for (hash = 0; hash < L2TP_HASH_SIZE; hash++) { | ||
205 | hlist_for_each_entry(session, walk, &tunnel->session_hlist[hash], hlist) { | ||
206 | if (++count > nth) { | ||
207 | read_unlock_bh(&tunnel->hlist_lock); | ||
208 | return session; | ||
209 | } | ||
210 | } | ||
211 | } | ||
212 | |||
213 | read_unlock_bh(&tunnel->hlist_lock); | ||
214 | |||
215 | return NULL; | ||
216 | } | ||
217 | EXPORT_SYMBOL_GPL(l2tp_session_find_nth); | ||
218 | |||
219 | /* Lookup a session by interface name. | ||
220 | * This is very inefficient but is only used by management interfaces. | ||
221 | */ | ||
222 | struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname) | ||
223 | { | ||
224 | struct l2tp_net *pn = l2tp_pernet(net); | ||
225 | int hash; | ||
226 | struct hlist_node *walk; | ||
227 | struct l2tp_session *session; | ||
228 | |||
229 | rcu_read_lock_bh(); | ||
230 | for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) { | ||
231 | hlist_for_each_entry_rcu(session, walk, &pn->l2tp_session_hlist[hash], global_hlist) { | ||
232 | if (!strcmp(session->ifname, ifname)) { | ||
233 | rcu_read_unlock_bh(); | ||
234 | return session; | ||
235 | } | ||
236 | } | ||
237 | } | ||
238 | |||
239 | rcu_read_unlock_bh(); | ||
240 | |||
241 | return NULL; | ||
242 | } | ||
243 | EXPORT_SYMBOL_GPL(l2tp_session_find_by_ifname); | ||
244 | |||
245 | /* Lookup a tunnel by id | ||
246 | */ | ||
247 | struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id) | ||
248 | { | ||
249 | struct l2tp_tunnel *tunnel; | ||
250 | struct l2tp_net *pn = l2tp_pernet(net); | ||
251 | |||
252 | rcu_read_lock_bh(); | ||
253 | list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) { | ||
254 | if (tunnel->tunnel_id == tunnel_id) { | ||
255 | rcu_read_unlock_bh(); | ||
256 | return tunnel; | ||
257 | } | ||
258 | } | ||
259 | rcu_read_unlock_bh(); | ||
260 | |||
261 | return NULL; | ||
262 | } | ||
263 | EXPORT_SYMBOL_GPL(l2tp_tunnel_find); | ||
264 | |||
265 | struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth) | ||
266 | { | ||
267 | struct l2tp_net *pn = l2tp_pernet(net); | ||
268 | struct l2tp_tunnel *tunnel; | ||
269 | int count = 0; | ||
270 | |||
271 | rcu_read_lock_bh(); | ||
272 | list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) { | ||
273 | if (++count > nth) { | ||
274 | rcu_read_unlock_bh(); | ||
275 | return tunnel; | ||
276 | } | ||
277 | } | ||
278 | |||
279 | rcu_read_unlock_bh(); | ||
280 | |||
281 | return NULL; | ||
282 | } | ||
283 | EXPORT_SYMBOL_GPL(l2tp_tunnel_find_nth); | ||
284 | |||
285 | /***************************************************************************** | ||
286 | * Receive data handling | ||
287 | *****************************************************************************/ | ||
288 | |||
289 | /* Queue a skb in order. We come here only if the skb has an L2TP sequence | ||
290 | * number. | ||
291 | */ | ||
292 | static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *skb) | ||
293 | { | ||
294 | struct sk_buff *skbp; | ||
295 | struct sk_buff *tmp; | ||
296 | u32 ns = L2TP_SKB_CB(skb)->ns; | ||
297 | |||
298 | spin_lock_bh(&session->reorder_q.lock); | ||
299 | skb_queue_walk_safe(&session->reorder_q, skbp, tmp) { | ||
300 | if (L2TP_SKB_CB(skbp)->ns > ns) { | ||
301 | __skb_queue_before(&session->reorder_q, skbp, skb); | ||
302 | PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, | ||
303 | "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n", | ||
304 | session->name, ns, L2TP_SKB_CB(skbp)->ns, | ||
305 | skb_queue_len(&session->reorder_q)); | ||
306 | session->stats.rx_oos_packets++; | ||
307 | goto out; | ||
308 | } | ||
309 | } | ||
310 | |||
311 | __skb_queue_tail(&session->reorder_q, skb); | ||
312 | |||
313 | out: | ||
314 | spin_unlock_bh(&session->reorder_q.lock); | ||
315 | } | ||
316 | |||
317 | /* Dequeue a single skb. | ||
318 | */ | ||
319 | static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *skb) | ||
320 | { | ||
321 | struct l2tp_tunnel *tunnel = session->tunnel; | ||
322 | int length = L2TP_SKB_CB(skb)->length; | ||
323 | |||
324 | /* We're about to requeue the skb, so return resources | ||
325 | * to its current owner (a socket receive buffer). | ||
326 | */ | ||
327 | skb_orphan(skb); | ||
328 | |||
329 | tunnel->stats.rx_packets++; | ||
330 | tunnel->stats.rx_bytes += length; | ||
331 | session->stats.rx_packets++; | ||
332 | session->stats.rx_bytes += length; | ||
333 | |||
334 | if (L2TP_SKB_CB(skb)->has_seq) { | ||
335 | /* Bump our Nr */ | ||
336 | session->nr++; | ||
337 | if (tunnel->version == L2TP_HDR_VER_2) | ||
338 | session->nr &= 0xffff; | ||
339 | else | ||
340 | session->nr &= 0xffffff; | ||
341 | |||
342 | PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, | ||
343 | "%s: updated nr to %hu\n", session->name, session->nr); | ||
344 | } | ||
345 | |||
346 | /* call private receive handler */ | ||
347 | if (session->recv_skb != NULL) | ||
348 | (*session->recv_skb)(session, skb, L2TP_SKB_CB(skb)->length); | ||
349 | else | ||
350 | kfree_skb(skb); | ||
351 | |||
352 | if (session->deref) | ||
353 | (*session->deref)(session); | ||
354 | } | ||
355 | |||
356 | /* Dequeue skbs from the session's reorder_q, subject to packet order. | ||
357 | * Skbs that have been in the queue for too long are simply discarded. | ||
358 | */ | ||
359 | static void l2tp_recv_dequeue(struct l2tp_session *session) | ||
360 | { | ||
361 | struct sk_buff *skb; | ||
362 | struct sk_buff *tmp; | ||
363 | |||
364 | /* If the pkt at the head of the queue has the nr that we | ||
365 | * expect to send up next, dequeue it and any other | ||
366 | * in-sequence packets behind it. | ||
367 | */ | ||
368 | spin_lock_bh(&session->reorder_q.lock); | ||
369 | skb_queue_walk_safe(&session->reorder_q, skb, tmp) { | ||
370 | if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) { | ||
371 | session->stats.rx_seq_discards++; | ||
372 | session->stats.rx_errors++; | ||
373 | PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, | ||
374 | "%s: oos pkt %u len %d discarded (too old), " | ||
375 | "waiting for %u, reorder_q_len=%d\n", | ||
376 | session->name, L2TP_SKB_CB(skb)->ns, | ||
377 | L2TP_SKB_CB(skb)->length, session->nr, | ||
378 | skb_queue_len(&session->reorder_q)); | ||
379 | __skb_unlink(skb, &session->reorder_q); | ||
380 | kfree_skb(skb); | ||
381 | if (session->deref) | ||
382 | (*session->deref)(session); | ||
383 | continue; | ||
384 | } | ||
385 | |||
386 | if (L2TP_SKB_CB(skb)->has_seq) { | ||
387 | if (L2TP_SKB_CB(skb)->ns != session->nr) { | ||
388 | PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, | ||
389 | "%s: holding oos pkt %u len %d, " | ||
390 | "waiting for %u, reorder_q_len=%d\n", | ||
391 | session->name, L2TP_SKB_CB(skb)->ns, | ||
392 | L2TP_SKB_CB(skb)->length, session->nr, | ||
393 | skb_queue_len(&session->reorder_q)); | ||
394 | goto out; | ||
395 | } | ||
396 | } | ||
397 | __skb_unlink(skb, &session->reorder_q); | ||
398 | |||
399 | /* Process the skb. We release the queue lock while we | ||
400 | * do so to let other contexts process the queue. | ||
401 | */ | ||
402 | spin_unlock_bh(&session->reorder_q.lock); | ||
403 | l2tp_recv_dequeue_skb(session, skb); | ||
404 | spin_lock_bh(&session->reorder_q.lock); | ||
405 | } | ||
406 | |||
407 | out: | ||
408 | spin_unlock_bh(&session->reorder_q.lock); | ||
409 | } | ||
410 | |||
411 | static inline int l2tp_verify_udp_checksum(struct sock *sk, | ||
412 | struct sk_buff *skb) | ||
413 | { | ||
414 | struct udphdr *uh = udp_hdr(skb); | ||
415 | u16 ulen = ntohs(uh->len); | ||
416 | struct inet_sock *inet; | ||
417 | __wsum psum; | ||
418 | |||
419 | if (sk->sk_no_check || skb_csum_unnecessary(skb) || !uh->check) | ||
420 | return 0; | ||
421 | |||
422 | inet = inet_sk(sk); | ||
423 | psum = csum_tcpudp_nofold(inet->inet_saddr, inet->inet_daddr, ulen, | ||
424 | IPPROTO_UDP, 0); | ||
425 | |||
426 | if ((skb->ip_summed == CHECKSUM_COMPLETE) && | ||
427 | !csum_fold(csum_add(psum, skb->csum))) | ||
428 | return 0; | ||
429 | |||
430 | skb->csum = psum; | ||
431 | |||
432 | return __skb_checksum_complete(skb); | ||
433 | } | ||
434 | |||
435 | /* Do receive processing of L2TP data frames. We handle both L2TPv2 | ||
436 | * and L2TPv3 data frames here. | ||
437 | * | ||
438 | * L2TPv2 Data Message Header | ||
439 | * | ||
440 | * 0 1 2 3 | ||
441 | * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 | ||
442 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
443 | * |T|L|x|x|S|x|O|P|x|x|x|x| Ver | Length (opt) | | ||
444 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
445 | * | Tunnel ID | Session ID | | ||
446 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
447 | * | Ns (opt) | Nr (opt) | | ||
448 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
449 | * | Offset Size (opt) | Offset pad... (opt) | ||
450 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
451 | * | ||
452 | * Data frames are marked by T=0. All other fields are the same as | ||
453 | * those in L2TP control frames. | ||
454 | * | ||
455 | * L2TPv3 Data Message Header | ||
456 | * | ||
457 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
458 | * | L2TP Session Header | | ||
459 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
460 | * | L2-Specific Sublayer | | ||
461 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
462 | * | Tunnel Payload ... | ||
463 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
464 | * | ||
465 | * L2TPv3 Session Header Over IP | ||
466 | * | ||
467 | * 0 1 2 3 | ||
468 | * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 | ||
469 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
470 | * | Session ID | | ||
471 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
472 | * | Cookie (optional, maximum 64 bits)... | ||
473 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
474 | * | | ||
475 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
476 | * | ||
477 | * L2TPv3 L2-Specific Sublayer Format | ||
478 | * | ||
479 | * 0 1 2 3 | ||
480 | * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 | ||
481 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
482 | * |x|S|x|x|x|x|x|x| Sequence Number | | ||
483 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
484 | * | ||
485 | * Cookie value, sublayer format and offset (pad) are negotiated with | ||
486 | * the peer when the session is set up. Unlike L2TPv2, we do not need | ||
487 | * to parse the packet header to determine if optional fields are | ||
488 | * present. | ||
489 | * | ||
490 | * Caller must already have parsed the frame and determined that it is | ||
491 | * a data (not control) frame before coming here. Fields up to the | ||
492 | * session-id have already been parsed and ptr points to the data | ||
493 | * after the session-id. | ||
494 | */ | ||
495 | void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, | ||
496 | unsigned char *ptr, unsigned char *optr, u16 hdrflags, | ||
497 | int length, int (*payload_hook)(struct sk_buff *skb)) | ||
498 | { | ||
499 | struct l2tp_tunnel *tunnel = session->tunnel; | ||
500 | int offset; | ||
501 | u32 ns, nr; | ||
502 | |||
503 | /* The ref count is increased since we now hold a pointer to | ||
504 | * the session. Take care to decrement the refcnt when exiting | ||
505 | * this function from now on... | ||
506 | */ | ||
507 | l2tp_session_inc_refcount(session); | ||
508 | if (session->ref) | ||
509 | (*session->ref)(session); | ||
510 | |||
511 | /* Parse and check optional cookie */ | ||
512 | if (session->peer_cookie_len > 0) { | ||
513 | if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) { | ||
514 | PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO, | ||
515 | "%s: cookie mismatch (%u/%u). Discarding.\n", | ||
516 | tunnel->name, tunnel->tunnel_id, session->session_id); | ||
517 | session->stats.rx_cookie_discards++; | ||
518 | goto discard; | ||
519 | } | ||
520 | ptr += session->peer_cookie_len; | ||
521 | } | ||
522 | |||
523 | /* Handle the optional sequence numbers. Sequence numbers are | ||
524 | * in different places for L2TPv2 and L2TPv3. | ||
525 | * | ||
526 | * If we are the LAC, enable/disable sequence numbers under | ||
527 | * the control of the LNS. If no sequence numbers present but | ||
528 | * we were expecting them, discard frame. | ||
529 | */ | ||
530 | ns = nr = 0; | ||
531 | L2TP_SKB_CB(skb)->has_seq = 0; | ||
532 | if (tunnel->version == L2TP_HDR_VER_2) { | ||
533 | if (hdrflags & L2TP_HDRFLAG_S) { | ||
534 | ns = ntohs(*(__be16 *) ptr); | ||
535 | ptr += 2; | ||
536 | nr = ntohs(*(__be16 *) ptr); | ||
537 | ptr += 2; | ||
538 | |||
539 | /* Store L2TP info in the skb */ | ||
540 | L2TP_SKB_CB(skb)->ns = ns; | ||
541 | L2TP_SKB_CB(skb)->has_seq = 1; | ||
542 | |||
543 | PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, | ||
544 | "%s: recv data ns=%u, nr=%u, session nr=%u\n", | ||
545 | session->name, ns, nr, session->nr); | ||
546 | } | ||
547 | } else if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) { | ||
548 | u32 l2h = ntohl(*(__be32 *) ptr); | ||
549 | |||
550 | if (l2h & 0x40000000) { | ||
551 | ns = l2h & 0x00ffffff; | ||
552 | |||
553 | /* Store L2TP info in the skb */ | ||
554 | L2TP_SKB_CB(skb)->ns = ns; | ||
555 | L2TP_SKB_CB(skb)->has_seq = 1; | ||
556 | |||
557 | PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, | ||
558 | "%s: recv data ns=%u, session nr=%u\n", | ||
559 | session->name, ns, session->nr); | ||
560 | } | ||
561 | } | ||
562 | |||
563 | /* Advance past L2-specific header, if present */ | ||
564 | ptr += session->l2specific_len; | ||
565 | |||
566 | if (L2TP_SKB_CB(skb)->has_seq) { | ||
567 | /* Received a packet with sequence numbers. If we're the LNS, | ||
568 | * check if we sre sending sequence numbers and if not, | ||
569 | * configure it so. | ||
570 | */ | ||
571 | if ((!session->lns_mode) && (!session->send_seq)) { | ||
572 | PRINTK(session->debug, L2TP_MSG_SEQ, KERN_INFO, | ||
573 | "%s: requested to enable seq numbers by LNS\n", | ||
574 | session->name); | ||
575 | session->send_seq = -1; | ||
576 | l2tp_session_set_header_len(session, tunnel->version); | ||
577 | } | ||
578 | } else { | ||
579 | /* No sequence numbers. | ||
580 | * If user has configured mandatory sequence numbers, discard. | ||
581 | */ | ||
582 | if (session->recv_seq) { | ||
583 | PRINTK(session->debug, L2TP_MSG_SEQ, KERN_WARNING, | ||
584 | "%s: recv data has no seq numbers when required. " | ||
585 | "Discarding\n", session->name); | ||
586 | session->stats.rx_seq_discards++; | ||
587 | goto discard; | ||
588 | } | ||
589 | |||
590 | /* If we're the LAC and we're sending sequence numbers, the | ||
591 | * LNS has requested that we no longer send sequence numbers. | ||
592 | * If we're the LNS and we're sending sequence numbers, the | ||
593 | * LAC is broken. Discard the frame. | ||
594 | */ | ||
595 | if ((!session->lns_mode) && (session->send_seq)) { | ||
596 | PRINTK(session->debug, L2TP_MSG_SEQ, KERN_INFO, | ||
597 | "%s: requested to disable seq numbers by LNS\n", | ||
598 | session->name); | ||
599 | session->send_seq = 0; | ||
600 | l2tp_session_set_header_len(session, tunnel->version); | ||
601 | } else if (session->send_seq) { | ||
602 | PRINTK(session->debug, L2TP_MSG_SEQ, KERN_WARNING, | ||
603 | "%s: recv data has no seq numbers when required. " | ||
604 | "Discarding\n", session->name); | ||
605 | session->stats.rx_seq_discards++; | ||
606 | goto discard; | ||
607 | } | ||
608 | } | ||
609 | |||
610 | /* Session data offset is handled differently for L2TPv2 and | ||
611 | * L2TPv3. For L2TPv2, there is an optional 16-bit value in | ||
612 | * the header. For L2TPv3, the offset is negotiated using AVPs | ||
613 | * in the session setup control protocol. | ||
614 | */ | ||
615 | if (tunnel->version == L2TP_HDR_VER_2) { | ||
616 | /* If offset bit set, skip it. */ | ||
617 | if (hdrflags & L2TP_HDRFLAG_O) { | ||
618 | offset = ntohs(*(__be16 *)ptr); | ||
619 | ptr += 2 + offset; | ||
620 | } | ||
621 | } else | ||
622 | ptr += session->offset; | ||
623 | |||
624 | offset = ptr - optr; | ||
625 | if (!pskb_may_pull(skb, offset)) | ||
626 | goto discard; | ||
627 | |||
628 | __skb_pull(skb, offset); | ||
629 | |||
630 | /* If caller wants to process the payload before we queue the | ||
631 | * packet, do so now. | ||
632 | */ | ||
633 | if (payload_hook) | ||
634 | if ((*payload_hook)(skb)) | ||
635 | goto discard; | ||
636 | |||
637 | /* Prepare skb for adding to the session's reorder_q. Hold | ||
638 | * packets for max reorder_timeout or 1 second if not | ||
639 | * reordering. | ||
640 | */ | ||
641 | L2TP_SKB_CB(skb)->length = length; | ||
642 | L2TP_SKB_CB(skb)->expires = jiffies + | ||
643 | (session->reorder_timeout ? session->reorder_timeout : HZ); | ||
644 | |||
645 | /* Add packet to the session's receive queue. Reordering is done here, if | ||
646 | * enabled. Saved L2TP protocol info is stored in skb->sb[]. | ||
647 | */ | ||
648 | if (L2TP_SKB_CB(skb)->has_seq) { | ||
649 | if (session->reorder_timeout != 0) { | ||
650 | /* Packet reordering enabled. Add skb to session's | ||
651 | * reorder queue, in order of ns. | ||
652 | */ | ||
653 | l2tp_recv_queue_skb(session, skb); | ||
654 | } else { | ||
655 | /* Packet reordering disabled. Discard out-of-sequence | ||
656 | * packets | ||
657 | */ | ||
658 | if (L2TP_SKB_CB(skb)->ns != session->nr) { | ||
659 | session->stats.rx_seq_discards++; | ||
660 | PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, | ||
661 | "%s: oos pkt %u len %d discarded, " | ||
662 | "waiting for %u, reorder_q_len=%d\n", | ||
663 | session->name, L2TP_SKB_CB(skb)->ns, | ||
664 | L2TP_SKB_CB(skb)->length, session->nr, | ||
665 | skb_queue_len(&session->reorder_q)); | ||
666 | goto discard; | ||
667 | } | ||
668 | skb_queue_tail(&session->reorder_q, skb); | ||
669 | } | ||
670 | } else { | ||
671 | /* No sequence numbers. Add the skb to the tail of the | ||
672 | * reorder queue. This ensures that it will be | ||
673 | * delivered after all previous sequenced skbs. | ||
674 | */ | ||
675 | skb_queue_tail(&session->reorder_q, skb); | ||
676 | } | ||
677 | |||
678 | /* Try to dequeue as many skbs from reorder_q as we can. */ | ||
679 | l2tp_recv_dequeue(session); | ||
680 | |||
681 | l2tp_session_dec_refcount(session); | ||
682 | |||
683 | return; | ||
684 | |||
685 | discard: | ||
686 | session->stats.rx_errors++; | ||
687 | kfree_skb(skb); | ||
688 | |||
689 | if (session->deref) | ||
690 | (*session->deref)(session); | ||
691 | |||
692 | l2tp_session_dec_refcount(session); | ||
693 | } | ||
694 | EXPORT_SYMBOL(l2tp_recv_common); | ||
695 | |||
696 | /* Internal UDP receive frame. Do the real work of receiving an L2TP data frame | ||
697 | * here. The skb is not on a list when we get here. | ||
698 | * Returns 0 if the packet was a data packet and was successfully passed on. | ||
699 | * Returns 1 if the packet was not a good data packet and could not be | ||
700 | * forwarded. All such packets are passed up to userspace to deal with. | ||
701 | */ | ||
702 | int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, | ||
703 | int (*payload_hook)(struct sk_buff *skb)) | ||
704 | { | ||
705 | struct l2tp_session *session = NULL; | ||
706 | unsigned char *ptr, *optr; | ||
707 | u16 hdrflags; | ||
708 | u32 tunnel_id, session_id; | ||
709 | int offset; | ||
710 | u16 version; | ||
711 | int length; | ||
712 | |||
713 | if (tunnel->sock && l2tp_verify_udp_checksum(tunnel->sock, skb)) | ||
714 | goto discard_bad_csum; | ||
715 | |||
716 | /* UDP always verifies the packet length. */ | ||
717 | __skb_pull(skb, sizeof(struct udphdr)); | ||
718 | |||
719 | /* Short packet? */ | ||
720 | if (!pskb_may_pull(skb, L2TP_HDR_SIZE_SEQ)) { | ||
721 | PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO, | ||
722 | "%s: recv short packet (len=%d)\n", tunnel->name, skb->len); | ||
723 | goto error; | ||
724 | } | ||
725 | |||
726 | /* Point to L2TP header */ | ||
727 | optr = ptr = skb->data; | ||
728 | |||
729 | /* Trace packet contents, if enabled */ | ||
730 | if (tunnel->debug & L2TP_MSG_DATA) { | ||
731 | length = min(32u, skb->len); | ||
732 | if (!pskb_may_pull(skb, length)) | ||
733 | goto error; | ||
734 | |||
735 | printk(KERN_DEBUG "%s: recv: ", tunnel->name); | ||
736 | |||
737 | offset = 0; | ||
738 | do { | ||
739 | printk(" %02X", ptr[offset]); | ||
740 | } while (++offset < length); | ||
741 | |||
742 | printk("\n"); | ||
743 | } | ||
744 | |||
745 | /* Get L2TP header flags */ | ||
746 | hdrflags = ntohs(*(__be16 *) ptr); | ||
747 | |||
748 | /* Check protocol version */ | ||
749 | version = hdrflags & L2TP_HDR_VER_MASK; | ||
750 | if (version != tunnel->version) { | ||
751 | PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO, | ||
752 | "%s: recv protocol version mismatch: got %d expected %d\n", | ||
753 | tunnel->name, version, tunnel->version); | ||
754 | goto error; | ||
755 | } | ||
756 | |||
757 | /* Get length of L2TP packet */ | ||
758 | length = skb->len; | ||
759 | |||
760 | /* If type is control packet, it is handled by userspace. */ | ||
761 | if (hdrflags & L2TP_HDRFLAG_T) { | ||
762 | PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_DEBUG, | ||
763 | "%s: recv control packet, len=%d\n", tunnel->name, length); | ||
764 | goto error; | ||
765 | } | ||
766 | |||
767 | /* Skip flags */ | ||
768 | ptr += 2; | ||
769 | |||
770 | if (tunnel->version == L2TP_HDR_VER_2) { | ||
771 | /* If length is present, skip it */ | ||
772 | if (hdrflags & L2TP_HDRFLAG_L) | ||
773 | ptr += 2; | ||
774 | |||
775 | /* Extract tunnel and session ID */ | ||
776 | tunnel_id = ntohs(*(__be16 *) ptr); | ||
777 | ptr += 2; | ||
778 | session_id = ntohs(*(__be16 *) ptr); | ||
779 | ptr += 2; | ||
780 | } else { | ||
781 | ptr += 2; /* skip reserved bits */ | ||
782 | tunnel_id = tunnel->tunnel_id; | ||
783 | session_id = ntohl(*(__be32 *) ptr); | ||
784 | ptr += 4; | ||
785 | } | ||
786 | |||
787 | /* Find the session context */ | ||
788 | session = l2tp_session_find(tunnel->l2tp_net, tunnel, session_id); | ||
789 | if (!session || !session->recv_skb) { | ||
790 | /* Not found? Pass to userspace to deal with */ | ||
791 | PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO, | ||
792 | "%s: no session found (%u/%u). Passing up.\n", | ||
793 | tunnel->name, tunnel_id, session_id); | ||
794 | goto error; | ||
795 | } | ||
796 | |||
797 | l2tp_recv_common(session, skb, ptr, optr, hdrflags, length, payload_hook); | ||
798 | |||
799 | return 0; | ||
800 | |||
801 | discard_bad_csum: | ||
802 | LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name); | ||
803 | UDP_INC_STATS_USER(tunnel->l2tp_net, UDP_MIB_INERRORS, 0); | ||
804 | tunnel->stats.rx_errors++; | ||
805 | kfree_skb(skb); | ||
806 | |||
807 | return 0; | ||
808 | |||
809 | error: | ||
810 | /* Put UDP header back */ | ||
811 | __skb_push(skb, sizeof(struct udphdr)); | ||
812 | |||
813 | return 1; | ||
814 | } | ||
815 | EXPORT_SYMBOL_GPL(l2tp_udp_recv_core); | ||
816 | |||
817 | /* UDP encapsulation receive handler. See net/ipv4/udp.c. | ||
818 | * Return codes: | ||
819 | * 0 : success. | ||
820 | * <0: error | ||
821 | * >0: skb should be passed up to userspace as UDP. | ||
822 | */ | ||
823 | int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) | ||
824 | { | ||
825 | struct l2tp_tunnel *tunnel; | ||
826 | |||
827 | tunnel = l2tp_sock_to_tunnel(sk); | ||
828 | if (tunnel == NULL) | ||
829 | goto pass_up; | ||
830 | |||
831 | PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_DEBUG, | ||
832 | "%s: received %d bytes\n", tunnel->name, skb->len); | ||
833 | |||
834 | if (l2tp_udp_recv_core(tunnel, skb, tunnel->recv_payload_hook)) | ||
835 | goto pass_up_put; | ||
836 | |||
837 | sock_put(sk); | ||
838 | return 0; | ||
839 | |||
840 | pass_up_put: | ||
841 | sock_put(sk); | ||
842 | pass_up: | ||
843 | return 1; | ||
844 | } | ||
845 | EXPORT_SYMBOL_GPL(l2tp_udp_encap_recv); | ||
846 | |||
847 | /************************************************************************ | ||
848 | * Transmit handling | ||
849 | ***********************************************************************/ | ||
850 | |||
851 | /* Build an L2TP header for the session into the buffer provided. | ||
852 | */ | ||
853 | static int l2tp_build_l2tpv2_header(struct l2tp_session *session, void *buf) | ||
854 | { | ||
855 | struct l2tp_tunnel *tunnel = session->tunnel; | ||
856 | __be16 *bufp = buf; | ||
857 | __be16 *optr = buf; | ||
858 | u16 flags = L2TP_HDR_VER_2; | ||
859 | u32 tunnel_id = tunnel->peer_tunnel_id; | ||
860 | u32 session_id = session->peer_session_id; | ||
861 | |||
862 | if (session->send_seq) | ||
863 | flags |= L2TP_HDRFLAG_S; | ||
864 | |||
865 | /* Setup L2TP header. */ | ||
866 | *bufp++ = htons(flags); | ||
867 | *bufp++ = htons(tunnel_id); | ||
868 | *bufp++ = htons(session_id); | ||
869 | if (session->send_seq) { | ||
870 | *bufp++ = htons(session->ns); | ||
871 | *bufp++ = 0; | ||
872 | session->ns++; | ||
873 | session->ns &= 0xffff; | ||
874 | PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, | ||
875 | "%s: updated ns to %u\n", session->name, session->ns); | ||
876 | } | ||
877 | |||
878 | return bufp - optr; | ||
879 | } | ||
880 | |||
881 | static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf) | ||
882 | { | ||
883 | struct l2tp_tunnel *tunnel = session->tunnel; | ||
884 | char *bufp = buf; | ||
885 | char *optr = bufp; | ||
886 | |||
887 | /* Setup L2TP header. The header differs slightly for UDP and | ||
888 | * IP encapsulations. For UDP, there is 4 bytes of flags. | ||
889 | */ | ||
890 | if (tunnel->encap == L2TP_ENCAPTYPE_UDP) { | ||
891 | u16 flags = L2TP_HDR_VER_3; | ||
892 | *((__be16 *) bufp) = htons(flags); | ||
893 | bufp += 2; | ||
894 | *((__be16 *) bufp) = 0; | ||
895 | bufp += 2; | ||
896 | } | ||
897 | |||
898 | *((__be32 *) bufp) = htonl(session->peer_session_id); | ||
899 | bufp += 4; | ||
900 | if (session->cookie_len) { | ||
901 | memcpy(bufp, &session->cookie[0], session->cookie_len); | ||
902 | bufp += session->cookie_len; | ||
903 | } | ||
904 | if (session->l2specific_len) { | ||
905 | if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) { | ||
906 | u32 l2h = 0; | ||
907 | if (session->send_seq) { | ||
908 | l2h = 0x40000000 | session->ns; | ||
909 | session->ns++; | ||
910 | session->ns &= 0xffffff; | ||
911 | PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, | ||
912 | "%s: updated ns to %u\n", session->name, session->ns); | ||
913 | } | ||
914 | |||
915 | *((__be32 *) bufp) = htonl(l2h); | ||
916 | } | ||
917 | bufp += session->l2specific_len; | ||
918 | } | ||
919 | if (session->offset) | ||
920 | bufp += session->offset; | ||
921 | |||
922 | return bufp - optr; | ||
923 | } | ||
924 | |||
925 | int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, size_t data_len) | ||
926 | { | ||
927 | struct l2tp_tunnel *tunnel = session->tunnel; | ||
928 | unsigned int len = skb->len; | ||
929 | int error; | ||
930 | |||
931 | /* Debug */ | ||
932 | if (session->send_seq) | ||
933 | PRINTK(session->debug, L2TP_MSG_DATA, KERN_DEBUG, | ||
934 | "%s: send %Zd bytes, ns=%u\n", session->name, | ||
935 | data_len, session->ns - 1); | ||
936 | else | ||
937 | PRINTK(session->debug, L2TP_MSG_DATA, KERN_DEBUG, | ||
938 | "%s: send %Zd bytes\n", session->name, data_len); | ||
939 | |||
940 | if (session->debug & L2TP_MSG_DATA) { | ||
941 | int i; | ||
942 | int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0; | ||
943 | unsigned char *datap = skb->data + uhlen; | ||
944 | |||
945 | printk(KERN_DEBUG "%s: xmit:", session->name); | ||
946 | for (i = 0; i < (len - uhlen); i++) { | ||
947 | printk(" %02X", *datap++); | ||
948 | if (i == 31) { | ||
949 | printk(" ..."); | ||
950 | break; | ||
951 | } | ||
952 | } | ||
953 | printk("\n"); | ||
954 | } | ||
955 | |||
956 | /* Queue the packet to IP for output */ | ||
957 | skb->local_df = 1; | ||
958 | error = ip_queue_xmit(skb); | ||
959 | |||
960 | /* Update stats */ | ||
961 | if (error >= 0) { | ||
962 | tunnel->stats.tx_packets++; | ||
963 | tunnel->stats.tx_bytes += len; | ||
964 | session->stats.tx_packets++; | ||
965 | session->stats.tx_bytes += len; | ||
966 | } else { | ||
967 | tunnel->stats.tx_errors++; | ||
968 | session->stats.tx_errors++; | ||
969 | } | ||
970 | |||
971 | return 0; | ||
972 | } | ||
973 | EXPORT_SYMBOL_GPL(l2tp_xmit_core); | ||
974 | |||
975 | /* Automatically called when the skb is freed. | ||
976 | */ | ||
977 | static void l2tp_sock_wfree(struct sk_buff *skb) | ||
978 | { | ||
979 | sock_put(skb->sk); | ||
980 | } | ||
981 | |||
982 | /* For data skbs that we transmit, we associate with the tunnel socket | ||
983 | * but don't do accounting. | ||
984 | */ | ||
985 | static inline void l2tp_skb_set_owner_w(struct sk_buff *skb, struct sock *sk) | ||
986 | { | ||
987 | sock_hold(sk); | ||
988 | skb->sk = sk; | ||
989 | skb->destructor = l2tp_sock_wfree; | ||
990 | } | ||
991 | |||
992 | /* If caller requires the skb to have a ppp header, the header must be | ||
993 | * inserted in the skb data before calling this function. | ||
994 | */ | ||
995 | int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len) | ||
996 | { | ||
997 | int data_len = skb->len; | ||
998 | struct l2tp_tunnel *tunnel = session->tunnel; | ||
999 | struct sock *sk = tunnel->sock; | ||
1000 | struct udphdr *uh; | ||
1001 | struct inet_sock *inet; | ||
1002 | __wsum csum; | ||
1003 | int old_headroom; | ||
1004 | int new_headroom; | ||
1005 | int headroom; | ||
1006 | int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0; | ||
1007 | int udp_len; | ||
1008 | |||
1009 | /* Check that there's enough headroom in the skb to insert IP, | ||
1010 | * UDP and L2TP headers. If not enough, expand it to | ||
1011 | * make room. Adjust truesize. | ||
1012 | */ | ||
1013 | headroom = NET_SKB_PAD + sizeof(struct iphdr) + | ||
1014 | uhlen + hdr_len; | ||
1015 | old_headroom = skb_headroom(skb); | ||
1016 | if (skb_cow_head(skb, headroom)) | ||
1017 | goto abort; | ||
1018 | |||
1019 | new_headroom = skb_headroom(skb); | ||
1020 | skb_orphan(skb); | ||
1021 | skb->truesize += new_headroom - old_headroom; | ||
1022 | |||
1023 | /* Setup L2TP header */ | ||
1024 | session->build_header(session, __skb_push(skb, hdr_len)); | ||
1025 | |||
1026 | /* Reset skb netfilter state */ | ||
1027 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); | ||
1028 | IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | | ||
1029 | IPSKB_REROUTED); | ||
1030 | nf_reset(skb); | ||
1031 | |||
1032 | /* Get routing info from the tunnel socket */ | ||
1033 | skb_dst_drop(skb); | ||
1034 | skb_dst_set(skb, dst_clone(__sk_dst_get(sk))); | ||
1035 | |||
1036 | switch (tunnel->encap) { | ||
1037 | case L2TP_ENCAPTYPE_UDP: | ||
1038 | /* Setup UDP header */ | ||
1039 | inet = inet_sk(sk); | ||
1040 | __skb_push(skb, sizeof(*uh)); | ||
1041 | skb_reset_transport_header(skb); | ||
1042 | uh = udp_hdr(skb); | ||
1043 | uh->source = inet->inet_sport; | ||
1044 | uh->dest = inet->inet_dport; | ||
1045 | udp_len = uhlen + hdr_len + data_len; | ||
1046 | uh->len = htons(udp_len); | ||
1047 | uh->check = 0; | ||
1048 | |||
1049 | /* Calculate UDP checksum if configured to do so */ | ||
1050 | if (sk->sk_no_check == UDP_CSUM_NOXMIT) | ||
1051 | skb->ip_summed = CHECKSUM_NONE; | ||
1052 | else if ((skb_dst(skb) && skb_dst(skb)->dev) && | ||
1053 | (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM))) { | ||
1054 | skb->ip_summed = CHECKSUM_COMPLETE; | ||
1055 | csum = skb_checksum(skb, 0, udp_len, 0); | ||
1056 | uh->check = csum_tcpudp_magic(inet->inet_saddr, | ||
1057 | inet->inet_daddr, | ||
1058 | udp_len, IPPROTO_UDP, csum); | ||
1059 | if (uh->check == 0) | ||
1060 | uh->check = CSUM_MANGLED_0; | ||
1061 | } else { | ||
1062 | skb->ip_summed = CHECKSUM_PARTIAL; | ||
1063 | skb->csum_start = skb_transport_header(skb) - skb->head; | ||
1064 | skb->csum_offset = offsetof(struct udphdr, check); | ||
1065 | uh->check = ~csum_tcpudp_magic(inet->inet_saddr, | ||
1066 | inet->inet_daddr, | ||
1067 | udp_len, IPPROTO_UDP, 0); | ||
1068 | } | ||
1069 | break; | ||
1070 | |||
1071 | case L2TP_ENCAPTYPE_IP: | ||
1072 | break; | ||
1073 | } | ||
1074 | |||
1075 | l2tp_skb_set_owner_w(skb, sk); | ||
1076 | |||
1077 | l2tp_xmit_core(session, skb, data_len); | ||
1078 | |||
1079 | abort: | ||
1080 | return 0; | ||
1081 | } | ||
1082 | EXPORT_SYMBOL_GPL(l2tp_xmit_skb); | ||
1083 | |||
1084 | /***************************************************************************** | ||
1085 | * Tinnel and session create/destroy. | ||
1086 | *****************************************************************************/ | ||
1087 | |||
1088 | /* Tunnel socket destruct hook. | ||
1089 | * The tunnel context is deleted only when all session sockets have been | ||
1090 | * closed. | ||
1091 | */ | ||
1092 | void l2tp_tunnel_destruct(struct sock *sk) | ||
1093 | { | ||
1094 | struct l2tp_tunnel *tunnel; | ||
1095 | |||
1096 | tunnel = sk->sk_user_data; | ||
1097 | if (tunnel == NULL) | ||
1098 | goto end; | ||
1099 | |||
1100 | PRINTK(tunnel->debug, L2TP_MSG_CONTROL, KERN_INFO, | ||
1101 | "%s: closing...\n", tunnel->name); | ||
1102 | |||
1103 | /* Close all sessions */ | ||
1104 | l2tp_tunnel_closeall(tunnel); | ||
1105 | |||
1106 | switch (tunnel->encap) { | ||
1107 | case L2TP_ENCAPTYPE_UDP: | ||
1108 | /* No longer an encapsulation socket. See net/ipv4/udp.c */ | ||
1109 | (udp_sk(sk))->encap_type = 0; | ||
1110 | (udp_sk(sk))->encap_rcv = NULL; | ||
1111 | break; | ||
1112 | case L2TP_ENCAPTYPE_IP: | ||
1113 | break; | ||
1114 | } | ||
1115 | |||
1116 | /* Remove hooks into tunnel socket */ | ||
1117 | tunnel->sock = NULL; | ||
1118 | sk->sk_destruct = tunnel->old_sk_destruct; | ||
1119 | sk->sk_user_data = NULL; | ||
1120 | |||
1121 | /* Call the original destructor */ | ||
1122 | if (sk->sk_destruct) | ||
1123 | (*sk->sk_destruct)(sk); | ||
1124 | |||
1125 | /* We're finished with the socket */ | ||
1126 | l2tp_tunnel_dec_refcount(tunnel); | ||
1127 | |||
1128 | end: | ||
1129 | return; | ||
1130 | } | ||
1131 | EXPORT_SYMBOL(l2tp_tunnel_destruct); | ||
1132 | |||
1133 | /* When the tunnel is closed, all the attached sessions need to go too. | ||
1134 | */ | ||
1135 | void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel) | ||
1136 | { | ||
1137 | int hash; | ||
1138 | struct hlist_node *walk; | ||
1139 | struct hlist_node *tmp; | ||
1140 | struct l2tp_session *session; | ||
1141 | |||
1142 | BUG_ON(tunnel == NULL); | ||
1143 | |||
1144 | PRINTK(tunnel->debug, L2TP_MSG_CONTROL, KERN_INFO, | ||
1145 | "%s: closing all sessions...\n", tunnel->name); | ||
1146 | |||
1147 | write_lock_bh(&tunnel->hlist_lock); | ||
1148 | for (hash = 0; hash < L2TP_HASH_SIZE; hash++) { | ||
1149 | again: | ||
1150 | hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) { | ||
1151 | session = hlist_entry(walk, struct l2tp_session, hlist); | ||
1152 | |||
1153 | PRINTK(session->debug, L2TP_MSG_CONTROL, KERN_INFO, | ||
1154 | "%s: closing session\n", session->name); | ||
1155 | |||
1156 | hlist_del_init(&session->hlist); | ||
1157 | |||
1158 | /* Since we should hold the sock lock while | ||
1159 | * doing any unbinding, we need to release the | ||
1160 | * lock we're holding before taking that lock. | ||
1161 | * Hold a reference to the sock so it doesn't | ||
1162 | * disappear as we're jumping between locks. | ||
1163 | */ | ||
1164 | if (session->ref != NULL) | ||
1165 | (*session->ref)(session); | ||
1166 | |||
1167 | write_unlock_bh(&tunnel->hlist_lock); | ||
1168 | |||
1169 | if (tunnel->version != L2TP_HDR_VER_2) { | ||
1170 | struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); | ||
1171 | |||
1172 | spin_lock_bh(&pn->l2tp_session_hlist_lock); | ||
1173 | hlist_del_init_rcu(&session->global_hlist); | ||
1174 | spin_unlock_bh(&pn->l2tp_session_hlist_lock); | ||
1175 | synchronize_rcu(); | ||
1176 | } | ||
1177 | |||
1178 | if (session->session_close != NULL) | ||
1179 | (*session->session_close)(session); | ||
1180 | |||
1181 | if (session->deref != NULL) | ||
1182 | (*session->deref)(session); | ||
1183 | |||
1184 | write_lock_bh(&tunnel->hlist_lock); | ||
1185 | |||
1186 | /* Now restart from the beginning of this hash | ||
1187 | * chain. We always remove a session from the | ||
1188 | * list so we are guaranteed to make forward | ||
1189 | * progress. | ||
1190 | */ | ||
1191 | goto again; | ||
1192 | } | ||
1193 | } | ||
1194 | write_unlock_bh(&tunnel->hlist_lock); | ||
1195 | } | ||
1196 | EXPORT_SYMBOL_GPL(l2tp_tunnel_closeall); | ||
1197 | |||
1198 | /* Really kill the tunnel. | ||
1199 | * Come here only when all sessions have been cleared from the tunnel. | ||
1200 | */ | ||
1201 | void l2tp_tunnel_free(struct l2tp_tunnel *tunnel) | ||
1202 | { | ||
1203 | struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); | ||
1204 | |||
1205 | BUG_ON(atomic_read(&tunnel->ref_count) != 0); | ||
1206 | BUG_ON(tunnel->sock != NULL); | ||
1207 | |||
1208 | PRINTK(tunnel->debug, L2TP_MSG_CONTROL, KERN_INFO, | ||
1209 | "%s: free...\n", tunnel->name); | ||
1210 | |||
1211 | /* Remove from tunnel list */ | ||
1212 | spin_lock_bh(&pn->l2tp_tunnel_list_lock); | ||
1213 | list_del_rcu(&tunnel->list); | ||
1214 | spin_unlock_bh(&pn->l2tp_tunnel_list_lock); | ||
1215 | synchronize_rcu(); | ||
1216 | |||
1217 | atomic_dec(&l2tp_tunnel_count); | ||
1218 | kfree(tunnel); | ||
1219 | } | ||
1220 | EXPORT_SYMBOL_GPL(l2tp_tunnel_free); | ||
1221 | |||
1222 | /* Create a socket for the tunnel, if one isn't set up by | ||
1223 | * userspace. This is used for static tunnels where there is no | ||
1224 | * managing L2TP daemon. | ||
1225 | */ | ||
1226 | static int l2tp_tunnel_sock_create(u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct socket **sockp) | ||
1227 | { | ||
1228 | int err = -EINVAL; | ||
1229 | struct sockaddr_in udp_addr; | ||
1230 | struct sockaddr_l2tpip ip_addr; | ||
1231 | struct socket *sock = NULL; | ||
1232 | |||
1233 | switch (cfg->encap) { | ||
1234 | case L2TP_ENCAPTYPE_UDP: | ||
1235 | err = sock_create(AF_INET, SOCK_DGRAM, 0, sockp); | ||
1236 | if (err < 0) | ||
1237 | goto out; | ||
1238 | |||
1239 | sock = *sockp; | ||
1240 | |||
1241 | memset(&udp_addr, 0, sizeof(udp_addr)); | ||
1242 | udp_addr.sin_family = AF_INET; | ||
1243 | udp_addr.sin_addr = cfg->local_ip; | ||
1244 | udp_addr.sin_port = htons(cfg->local_udp_port); | ||
1245 | err = kernel_bind(sock, (struct sockaddr *) &udp_addr, sizeof(udp_addr)); | ||
1246 | if (err < 0) | ||
1247 | goto out; | ||
1248 | |||
1249 | udp_addr.sin_family = AF_INET; | ||
1250 | udp_addr.sin_addr = cfg->peer_ip; | ||
1251 | udp_addr.sin_port = htons(cfg->peer_udp_port); | ||
1252 | err = kernel_connect(sock, (struct sockaddr *) &udp_addr, sizeof(udp_addr), 0); | ||
1253 | if (err < 0) | ||
1254 | goto out; | ||
1255 | |||
1256 | if (!cfg->use_udp_checksums) | ||
1257 | sock->sk->sk_no_check = UDP_CSUM_NOXMIT; | ||
1258 | |||
1259 | break; | ||
1260 | |||
1261 | case L2TP_ENCAPTYPE_IP: | ||
1262 | err = sock_create(AF_INET, SOCK_DGRAM, IPPROTO_L2TP, sockp); | ||
1263 | if (err < 0) | ||
1264 | goto out; | ||
1265 | |||
1266 | sock = *sockp; | ||
1267 | |||
1268 | memset(&ip_addr, 0, sizeof(ip_addr)); | ||
1269 | ip_addr.l2tp_family = AF_INET; | ||
1270 | ip_addr.l2tp_addr = cfg->local_ip; | ||
1271 | ip_addr.l2tp_conn_id = tunnel_id; | ||
1272 | err = kernel_bind(sock, (struct sockaddr *) &ip_addr, sizeof(ip_addr)); | ||
1273 | if (err < 0) | ||
1274 | goto out; | ||
1275 | |||
1276 | ip_addr.l2tp_family = AF_INET; | ||
1277 | ip_addr.l2tp_addr = cfg->peer_ip; | ||
1278 | ip_addr.l2tp_conn_id = peer_tunnel_id; | ||
1279 | err = kernel_connect(sock, (struct sockaddr *) &ip_addr, sizeof(ip_addr), 0); | ||
1280 | if (err < 0) | ||
1281 | goto out; | ||
1282 | |||
1283 | break; | ||
1284 | |||
1285 | default: | ||
1286 | goto out; | ||
1287 | } | ||
1288 | |||
1289 | out: | ||
1290 | if ((err < 0) && sock) { | ||
1291 | sock_release(sock); | ||
1292 | *sockp = NULL; | ||
1293 | } | ||
1294 | |||
1295 | return err; | ||
1296 | } | ||
1297 | |||
1298 | int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp) | ||
1299 | { | ||
1300 | struct l2tp_tunnel *tunnel = NULL; | ||
1301 | int err; | ||
1302 | struct socket *sock = NULL; | ||
1303 | struct sock *sk = NULL; | ||
1304 | struct l2tp_net *pn; | ||
1305 | enum l2tp_encap_type encap = L2TP_ENCAPTYPE_UDP; | ||
1306 | |||
1307 | /* Get the tunnel socket from the fd, which was opened by | ||
1308 | * the userspace L2TP daemon. If not specified, create a | ||
1309 | * kernel socket. | ||
1310 | */ | ||
1311 | if (fd < 0) { | ||
1312 | err = l2tp_tunnel_sock_create(tunnel_id, peer_tunnel_id, cfg, &sock); | ||
1313 | if (err < 0) | ||
1314 | goto err; | ||
1315 | } else { | ||
1316 | err = -EBADF; | ||
1317 | sock = sockfd_lookup(fd, &err); | ||
1318 | if (!sock) { | ||
1319 | printk(KERN_ERR "tunl %hu: sockfd_lookup(fd=%d) returned %d\n", | ||
1320 | tunnel_id, fd, err); | ||
1321 | goto err; | ||
1322 | } | ||
1323 | } | ||
1324 | |||
1325 | sk = sock->sk; | ||
1326 | |||
1327 | if (cfg != NULL) | ||
1328 | encap = cfg->encap; | ||
1329 | |||
1330 | /* Quick sanity checks */ | ||
1331 | switch (encap) { | ||
1332 | case L2TP_ENCAPTYPE_UDP: | ||
1333 | err = -EPROTONOSUPPORT; | ||
1334 | if (sk->sk_protocol != IPPROTO_UDP) { | ||
1335 | printk(KERN_ERR "tunl %hu: fd %d wrong protocol, got %d, expected %d\n", | ||
1336 | tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP); | ||
1337 | goto err; | ||
1338 | } | ||
1339 | break; | ||
1340 | case L2TP_ENCAPTYPE_IP: | ||
1341 | err = -EPROTONOSUPPORT; | ||
1342 | if (sk->sk_protocol != IPPROTO_L2TP) { | ||
1343 | printk(KERN_ERR "tunl %hu: fd %d wrong protocol, got %d, expected %d\n", | ||
1344 | tunnel_id, fd, sk->sk_protocol, IPPROTO_L2TP); | ||
1345 | goto err; | ||
1346 | } | ||
1347 | break; | ||
1348 | } | ||
1349 | |||
1350 | /* Check if this socket has already been prepped */ | ||
1351 | tunnel = (struct l2tp_tunnel *)sk->sk_user_data; | ||
1352 | if (tunnel != NULL) { | ||
1353 | /* This socket has already been prepped */ | ||
1354 | err = -EBUSY; | ||
1355 | goto err; | ||
1356 | } | ||
1357 | |||
1358 | tunnel = kzalloc(sizeof(struct l2tp_tunnel), GFP_KERNEL); | ||
1359 | if (tunnel == NULL) { | ||
1360 | err = -ENOMEM; | ||
1361 | goto err; | ||
1362 | } | ||
1363 | |||
1364 | tunnel->version = version; | ||
1365 | tunnel->tunnel_id = tunnel_id; | ||
1366 | tunnel->peer_tunnel_id = peer_tunnel_id; | ||
1367 | tunnel->debug = L2TP_DEFAULT_DEBUG_FLAGS; | ||
1368 | |||
1369 | tunnel->magic = L2TP_TUNNEL_MAGIC; | ||
1370 | sprintf(&tunnel->name[0], "tunl %u", tunnel_id); | ||
1371 | rwlock_init(&tunnel->hlist_lock); | ||
1372 | |||
1373 | /* The net we belong to */ | ||
1374 | tunnel->l2tp_net = net; | ||
1375 | pn = l2tp_pernet(net); | ||
1376 | |||
1377 | if (cfg != NULL) | ||
1378 | tunnel->debug = cfg->debug; | ||
1379 | |||
1380 | /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ | ||
1381 | tunnel->encap = encap; | ||
1382 | if (encap == L2TP_ENCAPTYPE_UDP) { | ||
1383 | /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ | ||
1384 | udp_sk(sk)->encap_type = UDP_ENCAP_L2TPINUDP; | ||
1385 | udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv; | ||
1386 | } | ||
1387 | |||
1388 | sk->sk_user_data = tunnel; | ||
1389 | |||
1390 | /* Hook on the tunnel socket destructor so that we can cleanup | ||
1391 | * if the tunnel socket goes away. | ||
1392 | */ | ||
1393 | tunnel->old_sk_destruct = sk->sk_destruct; | ||
1394 | sk->sk_destruct = &l2tp_tunnel_destruct; | ||
1395 | tunnel->sock = sk; | ||
1396 | sk->sk_allocation = GFP_ATOMIC; | ||
1397 | |||
1398 | /* Add tunnel to our list */ | ||
1399 | INIT_LIST_HEAD(&tunnel->list); | ||
1400 | spin_lock_bh(&pn->l2tp_tunnel_list_lock); | ||
1401 | list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list); | ||
1402 | spin_unlock_bh(&pn->l2tp_tunnel_list_lock); | ||
1403 | synchronize_rcu(); | ||
1404 | atomic_inc(&l2tp_tunnel_count); | ||
1405 | |||
1406 | /* Bump the reference count. The tunnel context is deleted | ||
1407 | * only when this drops to zero. | ||
1408 | */ | ||
1409 | l2tp_tunnel_inc_refcount(tunnel); | ||
1410 | |||
1411 | err = 0; | ||
1412 | err: | ||
1413 | if (tunnelp) | ||
1414 | *tunnelp = tunnel; | ||
1415 | |||
1416 | /* If tunnel's socket was created by the kernel, it doesn't | ||
1417 | * have a file. | ||
1418 | */ | ||
1419 | if (sock && sock->file) | ||
1420 | sockfd_put(sock); | ||
1421 | |||
1422 | return err; | ||
1423 | } | ||
1424 | EXPORT_SYMBOL_GPL(l2tp_tunnel_create); | ||
1425 | |||
1426 | /* This function is used by the netlink TUNNEL_DELETE command. | ||
1427 | */ | ||
1428 | int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel) | ||
1429 | { | ||
1430 | int err = 0; | ||
1431 | struct socket *sock = tunnel->sock ? tunnel->sock->sk_socket : NULL; | ||
1432 | |||
1433 | /* Force the tunnel socket to close. This will eventually | ||
1434 | * cause the tunnel to be deleted via the normal socket close | ||
1435 | * mechanisms when userspace closes the tunnel socket. | ||
1436 | */ | ||
1437 | if (sock != NULL) { | ||
1438 | err = inet_shutdown(sock, 2); | ||
1439 | |||
1440 | /* If the tunnel's socket was created by the kernel, | ||
1441 | * close the socket here since the socket was not | ||
1442 | * created by userspace. | ||
1443 | */ | ||
1444 | if (sock->file == NULL) | ||
1445 | err = inet_release(sock); | ||
1446 | } | ||
1447 | |||
1448 | return err; | ||
1449 | } | ||
1450 | EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); | ||
1451 | |||
1452 | /* Really kill the session. | ||
1453 | */ | ||
1454 | void l2tp_session_free(struct l2tp_session *session) | ||
1455 | { | ||
1456 | struct l2tp_tunnel *tunnel; | ||
1457 | |||
1458 | BUG_ON(atomic_read(&session->ref_count) != 0); | ||
1459 | |||
1460 | tunnel = session->tunnel; | ||
1461 | if (tunnel != NULL) { | ||
1462 | BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC); | ||
1463 | |||
1464 | /* Delete the session from the hash */ | ||
1465 | write_lock_bh(&tunnel->hlist_lock); | ||
1466 | hlist_del_init(&session->hlist); | ||
1467 | write_unlock_bh(&tunnel->hlist_lock); | ||
1468 | |||
1469 | /* Unlink from the global hash if not L2TPv2 */ | ||
1470 | if (tunnel->version != L2TP_HDR_VER_2) { | ||
1471 | struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); | ||
1472 | |||
1473 | spin_lock_bh(&pn->l2tp_session_hlist_lock); | ||
1474 | hlist_del_init_rcu(&session->global_hlist); | ||
1475 | spin_unlock_bh(&pn->l2tp_session_hlist_lock); | ||
1476 | synchronize_rcu(); | ||
1477 | } | ||
1478 | |||
1479 | if (session->session_id != 0) | ||
1480 | atomic_dec(&l2tp_session_count); | ||
1481 | |||
1482 | sock_put(tunnel->sock); | ||
1483 | |||
1484 | /* This will delete the tunnel context if this | ||
1485 | * is the last session on the tunnel. | ||
1486 | */ | ||
1487 | session->tunnel = NULL; | ||
1488 | l2tp_tunnel_dec_refcount(tunnel); | ||
1489 | } | ||
1490 | |||
1491 | kfree(session); | ||
1492 | |||
1493 | return; | ||
1494 | } | ||
1495 | EXPORT_SYMBOL_GPL(l2tp_session_free); | ||
1496 | |||
1497 | /* This function is used by the netlink SESSION_DELETE command and by | ||
1498 | pseudowire modules. | ||
1499 | */ | ||
1500 | int l2tp_session_delete(struct l2tp_session *session) | ||
1501 | { | ||
1502 | if (session->session_close != NULL) | ||
1503 | (*session->session_close)(session); | ||
1504 | |||
1505 | l2tp_session_dec_refcount(session); | ||
1506 | |||
1507 | return 0; | ||
1508 | } | ||
1509 | EXPORT_SYMBOL_GPL(l2tp_session_delete); | ||
1510 | |||
1511 | |||
1512 | /* We come here whenever a session's send_seq, cookie_len or | ||
1513 | * l2specific_len parameters are set. | ||
1514 | */ | ||
1515 | void l2tp_session_set_header_len(struct l2tp_session *session, int version) | ||
1516 | { | ||
1517 | if (version == L2TP_HDR_VER_2) { | ||
1518 | session->hdr_len = 6; | ||
1519 | if (session->send_seq) | ||
1520 | session->hdr_len += 4; | ||
1521 | } else { | ||
1522 | session->hdr_len = 4 + session->cookie_len + session->l2specific_len + session->offset; | ||
1523 | if (session->tunnel->encap == L2TP_ENCAPTYPE_UDP) | ||
1524 | session->hdr_len += 4; | ||
1525 | } | ||
1526 | |||
1527 | } | ||
1528 | EXPORT_SYMBOL_GPL(l2tp_session_set_header_len); | ||
1529 | |||
1530 | struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg) | ||
1531 | { | ||
1532 | struct l2tp_session *session; | ||
1533 | |||
1534 | session = kzalloc(sizeof(struct l2tp_session) + priv_size, GFP_KERNEL); | ||
1535 | if (session != NULL) { | ||
1536 | session->magic = L2TP_SESSION_MAGIC; | ||
1537 | session->tunnel = tunnel; | ||
1538 | |||
1539 | session->session_id = session_id; | ||
1540 | session->peer_session_id = peer_session_id; | ||
1541 | session->nr = 1; | ||
1542 | |||
1543 | sprintf(&session->name[0], "sess %u/%u", | ||
1544 | tunnel->tunnel_id, session->session_id); | ||
1545 | |||
1546 | skb_queue_head_init(&session->reorder_q); | ||
1547 | |||
1548 | INIT_HLIST_NODE(&session->hlist); | ||
1549 | INIT_HLIST_NODE(&session->global_hlist); | ||
1550 | |||
1551 | /* Inherit debug options from tunnel */ | ||
1552 | session->debug = tunnel->debug; | ||
1553 | |||
1554 | if (cfg) { | ||
1555 | session->pwtype = cfg->pw_type; | ||
1556 | session->debug = cfg->debug; | ||
1557 | session->mtu = cfg->mtu; | ||
1558 | session->mru = cfg->mru; | ||
1559 | session->send_seq = cfg->send_seq; | ||
1560 | session->recv_seq = cfg->recv_seq; | ||
1561 | session->lns_mode = cfg->lns_mode; | ||
1562 | session->reorder_timeout = cfg->reorder_timeout; | ||
1563 | session->offset = cfg->offset; | ||
1564 | session->l2specific_type = cfg->l2specific_type; | ||
1565 | session->l2specific_len = cfg->l2specific_len; | ||
1566 | session->cookie_len = cfg->cookie_len; | ||
1567 | memcpy(&session->cookie[0], &cfg->cookie[0], cfg->cookie_len); | ||
1568 | session->peer_cookie_len = cfg->peer_cookie_len; | ||
1569 | memcpy(&session->peer_cookie[0], &cfg->peer_cookie[0], cfg->peer_cookie_len); | ||
1570 | } | ||
1571 | |||
1572 | if (tunnel->version == L2TP_HDR_VER_2) | ||
1573 | session->build_header = l2tp_build_l2tpv2_header; | ||
1574 | else | ||
1575 | session->build_header = l2tp_build_l2tpv3_header; | ||
1576 | |||
1577 | l2tp_session_set_header_len(session, tunnel->version); | ||
1578 | |||
1579 | /* Bump the reference count. The session context is deleted | ||
1580 | * only when this drops to zero. | ||
1581 | */ | ||
1582 | l2tp_session_inc_refcount(session); | ||
1583 | l2tp_tunnel_inc_refcount(tunnel); | ||
1584 | |||
1585 | /* Ensure tunnel socket isn't deleted */ | ||
1586 | sock_hold(tunnel->sock); | ||
1587 | |||
1588 | /* Add session to the tunnel's hash list */ | ||
1589 | write_lock_bh(&tunnel->hlist_lock); | ||
1590 | hlist_add_head(&session->hlist, | ||
1591 | l2tp_session_id_hash(tunnel, session_id)); | ||
1592 | write_unlock_bh(&tunnel->hlist_lock); | ||
1593 | |||
1594 | /* And to the global session list if L2TPv3 */ | ||
1595 | if (tunnel->version != L2TP_HDR_VER_2) { | ||
1596 | struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); | ||
1597 | |||
1598 | spin_lock_bh(&pn->l2tp_session_hlist_lock); | ||
1599 | hlist_add_head_rcu(&session->global_hlist, | ||
1600 | l2tp_session_id_hash_2(pn, session_id)); | ||
1601 | spin_unlock_bh(&pn->l2tp_session_hlist_lock); | ||
1602 | synchronize_rcu(); | ||
1603 | } | ||
1604 | |||
1605 | /* Ignore management session in session count value */ | ||
1606 | if (session->session_id != 0) | ||
1607 | atomic_inc(&l2tp_session_count); | ||
1608 | } | ||
1609 | |||
1610 | return session; | ||
1611 | } | ||
1612 | EXPORT_SYMBOL_GPL(l2tp_session_create); | ||
1613 | |||
1614 | /***************************************************************************** | ||
1615 | * Init and cleanup | ||
1616 | *****************************************************************************/ | ||
1617 | |||
1618 | static __net_init int l2tp_init_net(struct net *net) | ||
1619 | { | ||
1620 | struct l2tp_net *pn = net_generic(net, l2tp_net_id); | ||
1621 | int hash; | ||
1622 | |||
1623 | INIT_LIST_HEAD(&pn->l2tp_tunnel_list); | ||
1624 | spin_lock_init(&pn->l2tp_tunnel_list_lock); | ||
1625 | |||
1626 | for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) | ||
1627 | INIT_HLIST_HEAD(&pn->l2tp_session_hlist[hash]); | ||
1628 | |||
1629 | spin_lock_init(&pn->l2tp_session_hlist_lock); | ||
1630 | |||
1631 | return 0; | ||
1632 | } | ||
1633 | |||
1634 | static struct pernet_operations l2tp_net_ops = { | ||
1635 | .init = l2tp_init_net, | ||
1636 | .id = &l2tp_net_id, | ||
1637 | .size = sizeof(struct l2tp_net), | ||
1638 | }; | ||
1639 | |||
1640 | static int __init l2tp_init(void) | ||
1641 | { | ||
1642 | int rc = 0; | ||
1643 | |||
1644 | rc = register_pernet_device(&l2tp_net_ops); | ||
1645 | if (rc) | ||
1646 | goto out; | ||
1647 | |||
1648 | printk(KERN_INFO "L2TP core driver, %s\n", L2TP_DRV_VERSION); | ||
1649 | |||
1650 | out: | ||
1651 | return rc; | ||
1652 | } | ||
1653 | |||
1654 | static void __exit l2tp_exit(void) | ||
1655 | { | ||
1656 | unregister_pernet_device(&l2tp_net_ops); | ||
1657 | } | ||
1658 | |||
1659 | module_init(l2tp_init); | ||
1660 | module_exit(l2tp_exit); | ||
1661 | |||
1662 | MODULE_AUTHOR("James Chapman <jchapman@katalix.com>"); | ||
1663 | MODULE_DESCRIPTION("L2TP core"); | ||
1664 | MODULE_LICENSE("GPL"); | ||
1665 | MODULE_VERSION(L2TP_DRV_VERSION); | ||
1666 | |||
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h new file mode 100644 index 000000000000..f0f318edd3f1 --- /dev/null +++ b/net/l2tp/l2tp_core.h | |||
@@ -0,0 +1,304 @@ | |||
1 | /* | ||
2 | * L2TP internal definitions. | ||
3 | * | ||
4 | * Copyright (c) 2008,2009 Katalix Systems Ltd | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #ifndef _L2TP_CORE_H_ | ||
12 | #define _L2TP_CORE_H_ | ||
13 | |||
14 | /* Just some random numbers */ | ||
15 | #define L2TP_TUNNEL_MAGIC 0x42114DDA | ||
16 | #define L2TP_SESSION_MAGIC 0x0C04EB7D | ||
17 | |||
18 | /* Per tunnel, session hash table size */ | ||
19 | #define L2TP_HASH_BITS 4 | ||
20 | #define L2TP_HASH_SIZE (1 << L2TP_HASH_BITS) | ||
21 | |||
22 | /* System-wide, session hash table size */ | ||
23 | #define L2TP_HASH_BITS_2 8 | ||
24 | #define L2TP_HASH_SIZE_2 (1 << L2TP_HASH_BITS_2) | ||
25 | |||
26 | /* Debug message categories for the DEBUG socket option */ | ||
27 | enum { | ||
28 | L2TP_MSG_DEBUG = (1 << 0), /* verbose debug (if | ||
29 | * compiled in) */ | ||
30 | L2TP_MSG_CONTROL = (1 << 1), /* userspace - kernel | ||
31 | * interface */ | ||
32 | L2TP_MSG_SEQ = (1 << 2), /* sequence numbers */ | ||
33 | L2TP_MSG_DATA = (1 << 3), /* data packets */ | ||
34 | }; | ||
35 | |||
36 | struct sk_buff; | ||
37 | |||
38 | struct l2tp_stats { | ||
39 | u64 tx_packets; | ||
40 | u64 tx_bytes; | ||
41 | u64 tx_errors; | ||
42 | u64 rx_packets; | ||
43 | u64 rx_bytes; | ||
44 | u64 rx_seq_discards; | ||
45 | u64 rx_oos_packets; | ||
46 | u64 rx_errors; | ||
47 | u64 rx_cookie_discards; | ||
48 | }; | ||
49 | |||
50 | struct l2tp_tunnel; | ||
51 | |||
52 | /* Describes a session. Contains information to determine incoming | ||
53 | * packets and transmit outgoing ones. | ||
54 | */ | ||
55 | struct l2tp_session_cfg { | ||
56 | enum l2tp_pwtype pw_type; | ||
57 | unsigned data_seq:2; /* data sequencing level | ||
58 | * 0 => none, 1 => IP only, | ||
59 | * 2 => all | ||
60 | */ | ||
61 | unsigned recv_seq:1; /* expect receive packets with | ||
62 | * sequence numbers? */ | ||
63 | unsigned send_seq:1; /* send packets with sequence | ||
64 | * numbers? */ | ||
65 | unsigned lns_mode:1; /* behave as LNS? LAC enables | ||
66 | * sequence numbers under | ||
67 | * control of LNS. */ | ||
68 | int debug; /* bitmask of debug message | ||
69 | * categories */ | ||
70 | u16 vlan_id; /* VLAN pseudowire only */ | ||
71 | u16 offset; /* offset to payload */ | ||
72 | u16 l2specific_len; /* Layer 2 specific length */ | ||
73 | u16 l2specific_type; /* Layer 2 specific type */ | ||
74 | u8 cookie[8]; /* optional cookie */ | ||
75 | int cookie_len; /* 0, 4 or 8 bytes */ | ||
76 | u8 peer_cookie[8]; /* peer's cookie */ | ||
77 | int peer_cookie_len; /* 0, 4 or 8 bytes */ | ||
78 | int reorder_timeout; /* configured reorder timeout | ||
79 | * (in jiffies) */ | ||
80 | int mtu; | ||
81 | int mru; | ||
82 | char *ifname; | ||
83 | }; | ||
84 | |||
85 | struct l2tp_session { | ||
86 | int magic; /* should be | ||
87 | * L2TP_SESSION_MAGIC */ | ||
88 | |||
89 | struct l2tp_tunnel *tunnel; /* back pointer to tunnel | ||
90 | * context */ | ||
91 | u32 session_id; | ||
92 | u32 peer_session_id; | ||
93 | u8 cookie[8]; | ||
94 | int cookie_len; | ||
95 | u8 peer_cookie[8]; | ||
96 | int peer_cookie_len; | ||
97 | u16 offset; /* offset from end of L2TP header | ||
98 | to beginning of data */ | ||
99 | u16 l2specific_len; | ||
100 | u16 l2specific_type; | ||
101 | u16 hdr_len; | ||
102 | u32 nr; /* session NR state (receive) */ | ||
103 | u32 ns; /* session NR state (send) */ | ||
104 | struct sk_buff_head reorder_q; /* receive reorder queue */ | ||
105 | struct hlist_node hlist; /* Hash list node */ | ||
106 | atomic_t ref_count; | ||
107 | |||
108 | char name[32]; /* for logging */ | ||
109 | char ifname[IFNAMSIZ]; | ||
110 | unsigned data_seq:2; /* data sequencing level | ||
111 | * 0 => none, 1 => IP only, | ||
112 | * 2 => all | ||
113 | */ | ||
114 | unsigned recv_seq:1; /* expect receive packets with | ||
115 | * sequence numbers? */ | ||
116 | unsigned send_seq:1; /* send packets with sequence | ||
117 | * numbers? */ | ||
118 | unsigned lns_mode:1; /* behave as LNS? LAC enables | ||
119 | * sequence numbers under | ||
120 | * control of LNS. */ | ||
121 | int debug; /* bitmask of debug message | ||
122 | * categories */ | ||
123 | int reorder_timeout; /* configured reorder timeout | ||
124 | * (in jiffies) */ | ||
125 | int mtu; | ||
126 | int mru; | ||
127 | enum l2tp_pwtype pwtype; | ||
128 | struct l2tp_stats stats; | ||
129 | struct hlist_node global_hlist; /* Global hash list node */ | ||
130 | |||
131 | int (*build_header)(struct l2tp_session *session, void *buf); | ||
132 | void (*recv_skb)(struct l2tp_session *session, struct sk_buff *skb, int data_len); | ||
133 | void (*session_close)(struct l2tp_session *session); | ||
134 | void (*ref)(struct l2tp_session *session); | ||
135 | void (*deref)(struct l2tp_session *session); | ||
136 | #if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE) | ||
137 | void (*show)(struct seq_file *m, void *priv); | ||
138 | #endif | ||
139 | uint8_t priv[0]; /* private data */ | ||
140 | }; | ||
141 | |||
142 | /* Describes the tunnel. It contains info to track all the associated | ||
143 | * sessions so incoming packets can be sorted out | ||
144 | */ | ||
145 | struct l2tp_tunnel_cfg { | ||
146 | int debug; /* bitmask of debug message | ||
147 | * categories */ | ||
148 | enum l2tp_encap_type encap; | ||
149 | |||
150 | /* Used only for kernel-created sockets */ | ||
151 | struct in_addr local_ip; | ||
152 | struct in_addr peer_ip; | ||
153 | u16 local_udp_port; | ||
154 | u16 peer_udp_port; | ||
155 | unsigned int use_udp_checksums:1; | ||
156 | }; | ||
157 | |||
158 | struct l2tp_tunnel { | ||
159 | int magic; /* Should be L2TP_TUNNEL_MAGIC */ | ||
160 | rwlock_t hlist_lock; /* protect session_hlist */ | ||
161 | struct hlist_head session_hlist[L2TP_HASH_SIZE]; | ||
162 | /* hashed list of sessions, | ||
163 | * hashed by id */ | ||
164 | u32 tunnel_id; | ||
165 | u32 peer_tunnel_id; | ||
166 | int version; /* 2=>L2TPv2, 3=>L2TPv3 */ | ||
167 | |||
168 | char name[20]; /* for logging */ | ||
169 | int debug; /* bitmask of debug message | ||
170 | * categories */ | ||
171 | enum l2tp_encap_type encap; | ||
172 | struct l2tp_stats stats; | ||
173 | |||
174 | struct list_head list; /* Keep a list of all tunnels */ | ||
175 | struct net *l2tp_net; /* the net we belong to */ | ||
176 | |||
177 | atomic_t ref_count; | ||
178 | #ifdef CONFIG_DEBUG_FS | ||
179 | void (*show)(struct seq_file *m, void *arg); | ||
180 | #endif | ||
181 | int (*recv_payload_hook)(struct sk_buff *skb); | ||
182 | void (*old_sk_destruct)(struct sock *); | ||
183 | struct sock *sock; /* Parent socket */ | ||
184 | int fd; | ||
185 | |||
186 | uint8_t priv[0]; /* private data */ | ||
187 | }; | ||
188 | |||
189 | struct l2tp_nl_cmd_ops { | ||
190 | int (*session_create)(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg); | ||
191 | int (*session_delete)(struct l2tp_session *session); | ||
192 | }; | ||
193 | |||
194 | static inline void *l2tp_tunnel_priv(struct l2tp_tunnel *tunnel) | ||
195 | { | ||
196 | return &tunnel->priv[0]; | ||
197 | } | ||
198 | |||
199 | static inline void *l2tp_session_priv(struct l2tp_session *session) | ||
200 | { | ||
201 | return &session->priv[0]; | ||
202 | } | ||
203 | |||
204 | static inline struct l2tp_tunnel *l2tp_sock_to_tunnel(struct sock *sk) | ||
205 | { | ||
206 | struct l2tp_tunnel *tunnel; | ||
207 | |||
208 | if (sk == NULL) | ||
209 | return NULL; | ||
210 | |||
211 | sock_hold(sk); | ||
212 | tunnel = (struct l2tp_tunnel *)(sk->sk_user_data); | ||
213 | if (tunnel == NULL) { | ||
214 | sock_put(sk); | ||
215 | goto out; | ||
216 | } | ||
217 | |||
218 | BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC); | ||
219 | |||
220 | out: | ||
221 | return tunnel; | ||
222 | } | ||
223 | |||
224 | extern struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id); | ||
225 | extern struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth); | ||
226 | extern struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname); | ||
227 | extern struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id); | ||
228 | extern struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth); | ||
229 | |||
230 | extern int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp); | ||
231 | extern int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel); | ||
232 | extern struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg); | ||
233 | extern int l2tp_session_delete(struct l2tp_session *session); | ||
234 | extern void l2tp_tunnel_free(struct l2tp_tunnel *tunnel); | ||
235 | extern void l2tp_session_free(struct l2tp_session *session); | ||
236 | extern void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, int length, int (*payload_hook)(struct sk_buff *skb)); | ||
237 | extern int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, int (*payload_hook)(struct sk_buff *skb)); | ||
238 | extern int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb); | ||
239 | |||
240 | extern int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, size_t data_len); | ||
241 | extern int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len); | ||
242 | extern void l2tp_tunnel_destruct(struct sock *sk); | ||
243 | extern void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel); | ||
244 | extern void l2tp_session_set_header_len(struct l2tp_session *session, int version); | ||
245 | |||
246 | extern int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, const struct l2tp_nl_cmd_ops *ops); | ||
247 | extern void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type); | ||
248 | |||
249 | /* Tunnel reference counts. Incremented per session that is added to | ||
250 | * the tunnel. | ||
251 | */ | ||
252 | static inline void l2tp_tunnel_inc_refcount_1(struct l2tp_tunnel *tunnel) | ||
253 | { | ||
254 | atomic_inc(&tunnel->ref_count); | ||
255 | } | ||
256 | |||
257 | static inline void l2tp_tunnel_dec_refcount_1(struct l2tp_tunnel *tunnel) | ||
258 | { | ||
259 | if (atomic_dec_and_test(&tunnel->ref_count)) | ||
260 | l2tp_tunnel_free(tunnel); | ||
261 | } | ||
262 | #ifdef L2TP_REFCNT_DEBUG | ||
263 | #define l2tp_tunnel_inc_refcount(_t) do { \ | ||
264 | printk(KERN_DEBUG "l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \ | ||
265 | l2tp_tunnel_inc_refcount_1(_t); \ | ||
266 | } while (0) | ||
267 | #define l2tp_tunnel_dec_refcount(_t) do { \ | ||
268 | printk(KERN_DEBUG "l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \ | ||
269 | l2tp_tunnel_dec_refcount_1(_t); \ | ||
270 | } while (0) | ||
271 | #else | ||
272 | #define l2tp_tunnel_inc_refcount(t) l2tp_tunnel_inc_refcount_1(t) | ||
273 | #define l2tp_tunnel_dec_refcount(t) l2tp_tunnel_dec_refcount_1(t) | ||
274 | #endif | ||
275 | |||
276 | /* Session reference counts. Incremented when code obtains a reference | ||
277 | * to a session. | ||
278 | */ | ||
279 | static inline void l2tp_session_inc_refcount_1(struct l2tp_session *session) | ||
280 | { | ||
281 | atomic_inc(&session->ref_count); | ||
282 | } | ||
283 | |||
284 | static inline void l2tp_session_dec_refcount_1(struct l2tp_session *session) | ||
285 | { | ||
286 | if (atomic_dec_and_test(&session->ref_count)) | ||
287 | l2tp_session_free(session); | ||
288 | } | ||
289 | |||
290 | #ifdef L2TP_REFCNT_DEBUG | ||
291 | #define l2tp_session_inc_refcount(_s) do { \ | ||
292 | printk(KERN_DEBUG "l2tp_session_inc_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_s)->name, atomic_read(&_s->ref_count)); \ | ||
293 | l2tp_session_inc_refcount_1(_s); \ | ||
294 | } while (0) | ||
295 | #define l2tp_session_dec_refcount(_s) do { \ | ||
296 | printk(KERN_DEBUG "l2tp_session_dec_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_s)->name, atomic_read(&_s->ref_count)); \ | ||
297 | l2tp_session_dec_refcount_1(_s); \ | ||
298 | } while (0) | ||
299 | #else | ||
300 | #define l2tp_session_inc_refcount(s) l2tp_session_inc_refcount_1(s) | ||
301 | #define l2tp_session_dec_refcount(s) l2tp_session_dec_refcount_1(s) | ||
302 | #endif | ||
303 | |||
304 | #endif /* _L2TP_CORE_H_ */ | ||
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c new file mode 100644 index 000000000000..104ec3b283d4 --- /dev/null +++ b/net/l2tp/l2tp_debugfs.c | |||
@@ -0,0 +1,341 @@ | |||
1 | /* | ||
2 | * L2TP subsystem debugfs | ||
3 | * | ||
4 | * Copyright (c) 2010 Katalix Systems Ltd | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/skbuff.h> | ||
14 | #include <linux/socket.h> | ||
15 | #include <linux/hash.h> | ||
16 | #include <linux/l2tp.h> | ||
17 | #include <linux/in.h> | ||
18 | #include <linux/etherdevice.h> | ||
19 | #include <linux/spinlock.h> | ||
20 | #include <linux/debugfs.h> | ||
21 | #include <net/sock.h> | ||
22 | #include <net/ip.h> | ||
23 | #include <net/icmp.h> | ||
24 | #include <net/udp.h> | ||
25 | #include <net/inet_common.h> | ||
26 | #include <net/inet_hashtables.h> | ||
27 | #include <net/tcp_states.h> | ||
28 | #include <net/protocol.h> | ||
29 | #include <net/xfrm.h> | ||
30 | #include <net/net_namespace.h> | ||
31 | #include <net/netns/generic.h> | ||
32 | |||
33 | #include "l2tp_core.h" | ||
34 | |||
35 | static struct dentry *rootdir; | ||
36 | static struct dentry *tunnels; | ||
37 | |||
38 | struct l2tp_dfs_seq_data { | ||
39 | struct net *net; | ||
40 | int tunnel_idx; /* current tunnel */ | ||
41 | int session_idx; /* index of session within current tunnel */ | ||
42 | struct l2tp_tunnel *tunnel; | ||
43 | struct l2tp_session *session; /* NULL means get next tunnel */ | ||
44 | }; | ||
45 | |||
46 | static void l2tp_dfs_next_tunnel(struct l2tp_dfs_seq_data *pd) | ||
47 | { | ||
48 | pd->tunnel = l2tp_tunnel_find_nth(pd->net, pd->tunnel_idx); | ||
49 | pd->tunnel_idx++; | ||
50 | } | ||
51 | |||
52 | static void l2tp_dfs_next_session(struct l2tp_dfs_seq_data *pd) | ||
53 | { | ||
54 | pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx); | ||
55 | pd->session_idx++; | ||
56 | |||
57 | if (pd->session == NULL) { | ||
58 | pd->session_idx = 0; | ||
59 | l2tp_dfs_next_tunnel(pd); | ||
60 | } | ||
61 | |||
62 | } | ||
63 | |||
64 | static void *l2tp_dfs_seq_start(struct seq_file *m, loff_t *offs) | ||
65 | { | ||
66 | struct l2tp_dfs_seq_data *pd = SEQ_START_TOKEN; | ||
67 | loff_t pos = *offs; | ||
68 | |||
69 | if (!pos) | ||
70 | goto out; | ||
71 | |||
72 | BUG_ON(m->private == NULL); | ||
73 | pd = m->private; | ||
74 | |||
75 | if (pd->tunnel == NULL) | ||
76 | l2tp_dfs_next_tunnel(pd); | ||
77 | else | ||
78 | l2tp_dfs_next_session(pd); | ||
79 | |||
80 | /* NULL tunnel and session indicates end of list */ | ||
81 | if ((pd->tunnel == NULL) && (pd->session == NULL)) | ||
82 | pd = NULL; | ||
83 | |||
84 | out: | ||
85 | return pd; | ||
86 | } | ||
87 | |||
88 | |||
89 | static void *l2tp_dfs_seq_next(struct seq_file *m, void *v, loff_t *pos) | ||
90 | { | ||
91 | (*pos)++; | ||
92 | return NULL; | ||
93 | } | ||
94 | |||
95 | static void l2tp_dfs_seq_stop(struct seq_file *p, void *v) | ||
96 | { | ||
97 | /* nothing to do */ | ||
98 | } | ||
99 | |||
100 | static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v) | ||
101 | { | ||
102 | struct l2tp_tunnel *tunnel = v; | ||
103 | int session_count = 0; | ||
104 | int hash; | ||
105 | struct hlist_node *walk; | ||
106 | struct hlist_node *tmp; | ||
107 | |||
108 | read_lock_bh(&tunnel->hlist_lock); | ||
109 | for (hash = 0; hash < L2TP_HASH_SIZE; hash++) { | ||
110 | hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) { | ||
111 | struct l2tp_session *session; | ||
112 | |||
113 | session = hlist_entry(walk, struct l2tp_session, hlist); | ||
114 | if (session->session_id == 0) | ||
115 | continue; | ||
116 | |||
117 | session_count++; | ||
118 | } | ||
119 | } | ||
120 | read_unlock_bh(&tunnel->hlist_lock); | ||
121 | |||
122 | seq_printf(m, "\nTUNNEL %u peer %u", tunnel->tunnel_id, tunnel->peer_tunnel_id); | ||
123 | if (tunnel->sock) { | ||
124 | struct inet_sock *inet = inet_sk(tunnel->sock); | ||
125 | seq_printf(m, " from %pI4 to %pI4\n", | ||
126 | &inet->inet_saddr, &inet->inet_daddr); | ||
127 | if (tunnel->encap == L2TP_ENCAPTYPE_UDP) | ||
128 | seq_printf(m, " source port %hu, dest port %hu\n", | ||
129 | ntohs(inet->inet_sport), ntohs(inet->inet_dport)); | ||
130 | } | ||
131 | seq_printf(m, " L2TPv%d, %s\n", tunnel->version, | ||
132 | tunnel->encap == L2TP_ENCAPTYPE_UDP ? "UDP" : | ||
133 | tunnel->encap == L2TP_ENCAPTYPE_IP ? "IP" : | ||
134 | ""); | ||
135 | seq_printf(m, " %d sessions, refcnt %d/%d\n", session_count, | ||
136 | tunnel->sock ? atomic_read(&tunnel->sock->sk_refcnt) : 0, | ||
137 | atomic_read(&tunnel->ref_count)); | ||
138 | |||
139 | seq_printf(m, " %08x rx %llu/%llu/%llu rx %llu/%llu/%llu\n", | ||
140 | tunnel->debug, | ||
141 | (unsigned long long)tunnel->stats.tx_packets, | ||
142 | (unsigned long long)tunnel->stats.tx_bytes, | ||
143 | (unsigned long long)tunnel->stats.tx_errors, | ||
144 | (unsigned long long)tunnel->stats.rx_packets, | ||
145 | (unsigned long long)tunnel->stats.rx_bytes, | ||
146 | (unsigned long long)tunnel->stats.rx_errors); | ||
147 | |||
148 | if (tunnel->show != NULL) | ||
149 | tunnel->show(m, tunnel); | ||
150 | } | ||
151 | |||
152 | static void l2tp_dfs_seq_session_show(struct seq_file *m, void *v) | ||
153 | { | ||
154 | struct l2tp_session *session = v; | ||
155 | |||
156 | seq_printf(m, " SESSION %u, peer %u, %s\n", session->session_id, | ||
157 | session->peer_session_id, | ||
158 | session->pwtype == L2TP_PWTYPE_ETH ? "ETH" : | ||
159 | session->pwtype == L2TP_PWTYPE_PPP ? "PPP" : | ||
160 | ""); | ||
161 | if (session->send_seq || session->recv_seq) | ||
162 | seq_printf(m, " nr %hu, ns %hu\n", session->nr, session->ns); | ||
163 | seq_printf(m, " refcnt %d\n", atomic_read(&session->ref_count)); | ||
164 | seq_printf(m, " config %d/%d/%c/%c/%s/%s %08x %u\n", | ||
165 | session->mtu, session->mru, | ||
166 | session->recv_seq ? 'R' : '-', | ||
167 | session->send_seq ? 'S' : '-', | ||
168 | session->data_seq == 1 ? "IPSEQ" : | ||
169 | session->data_seq == 2 ? "DATASEQ" : "-", | ||
170 | session->lns_mode ? "LNS" : "LAC", | ||
171 | session->debug, | ||
172 | jiffies_to_msecs(session->reorder_timeout)); | ||
173 | seq_printf(m, " offset %hu l2specific %hu/%hu\n", | ||
174 | session->offset, session->l2specific_type, session->l2specific_len); | ||
175 | if (session->cookie_len) { | ||
176 | seq_printf(m, " cookie %02x%02x%02x%02x", | ||
177 | session->cookie[0], session->cookie[1], | ||
178 | session->cookie[2], session->cookie[3]); | ||
179 | if (session->cookie_len == 8) | ||
180 | seq_printf(m, "%02x%02x%02x%02x", | ||
181 | session->cookie[4], session->cookie[5], | ||
182 | session->cookie[6], session->cookie[7]); | ||
183 | seq_printf(m, "\n"); | ||
184 | } | ||
185 | if (session->peer_cookie_len) { | ||
186 | seq_printf(m, " peer cookie %02x%02x%02x%02x", | ||
187 | session->peer_cookie[0], session->peer_cookie[1], | ||
188 | session->peer_cookie[2], session->peer_cookie[3]); | ||
189 | if (session->peer_cookie_len == 8) | ||
190 | seq_printf(m, "%02x%02x%02x%02x", | ||
191 | session->peer_cookie[4], session->peer_cookie[5], | ||
192 | session->peer_cookie[6], session->peer_cookie[7]); | ||
193 | seq_printf(m, "\n"); | ||
194 | } | ||
195 | |||
196 | seq_printf(m, " %hu/%hu tx %llu/%llu/%llu rx %llu/%llu/%llu\n", | ||
197 | session->nr, session->ns, | ||
198 | (unsigned long long)session->stats.tx_packets, | ||
199 | (unsigned long long)session->stats.tx_bytes, | ||
200 | (unsigned long long)session->stats.tx_errors, | ||
201 | (unsigned long long)session->stats.rx_packets, | ||
202 | (unsigned long long)session->stats.rx_bytes, | ||
203 | (unsigned long long)session->stats.rx_errors); | ||
204 | |||
205 | if (session->show != NULL) | ||
206 | session->show(m, session); | ||
207 | } | ||
208 | |||
209 | static int l2tp_dfs_seq_show(struct seq_file *m, void *v) | ||
210 | { | ||
211 | struct l2tp_dfs_seq_data *pd = v; | ||
212 | |||
213 | /* display header on line 1 */ | ||
214 | if (v == SEQ_START_TOKEN) { | ||
215 | seq_puts(m, "TUNNEL ID, peer ID from IP to IP\n"); | ||
216 | seq_puts(m, " L2TPv2/L2TPv3, UDP/IP\n"); | ||
217 | seq_puts(m, " sessions session-count, refcnt refcnt/sk->refcnt\n"); | ||
218 | seq_puts(m, " debug tx-pkts/bytes/errs rx-pkts/bytes/errs\n"); | ||
219 | seq_puts(m, " SESSION ID, peer ID, PWTYPE\n"); | ||
220 | seq_puts(m, " refcnt cnt\n"); | ||
221 | seq_puts(m, " offset OFFSET l2specific TYPE/LEN\n"); | ||
222 | seq_puts(m, " [ cookie ]\n"); | ||
223 | seq_puts(m, " [ peer cookie ]\n"); | ||
224 | seq_puts(m, " config mtu/mru/rcvseq/sendseq/dataseq/lns debug reorderto\n"); | ||
225 | seq_puts(m, " nr/ns tx-pkts/bytes/errs rx-pkts/bytes/errs\n"); | ||
226 | goto out; | ||
227 | } | ||
228 | |||
229 | /* Show the tunnel or session context */ | ||
230 | if (pd->session == NULL) | ||
231 | l2tp_dfs_seq_tunnel_show(m, pd->tunnel); | ||
232 | else | ||
233 | l2tp_dfs_seq_session_show(m, pd->session); | ||
234 | |||
235 | out: | ||
236 | return 0; | ||
237 | } | ||
238 | |||
239 | static const struct seq_operations l2tp_dfs_seq_ops = { | ||
240 | .start = l2tp_dfs_seq_start, | ||
241 | .next = l2tp_dfs_seq_next, | ||
242 | .stop = l2tp_dfs_seq_stop, | ||
243 | .show = l2tp_dfs_seq_show, | ||
244 | }; | ||
245 | |||
246 | static int l2tp_dfs_seq_open(struct inode *inode, struct file *file) | ||
247 | { | ||
248 | struct l2tp_dfs_seq_data *pd; | ||
249 | struct seq_file *seq; | ||
250 | int rc = -ENOMEM; | ||
251 | |||
252 | pd = kzalloc(GFP_KERNEL, sizeof(*pd)); | ||
253 | if (pd == NULL) | ||
254 | goto out; | ||
255 | |||
256 | /* Derive the network namespace from the pid opening the | ||
257 | * file. | ||
258 | */ | ||
259 | pd->net = get_net_ns_by_pid(current->pid); | ||
260 | if (IS_ERR(pd->net)) { | ||
261 | rc = -PTR_ERR(pd->net); | ||
262 | goto err_free_pd; | ||
263 | } | ||
264 | |||
265 | rc = seq_open(file, &l2tp_dfs_seq_ops); | ||
266 | if (rc) | ||
267 | goto err_free_net; | ||
268 | |||
269 | seq = file->private_data; | ||
270 | seq->private = pd; | ||
271 | |||
272 | out: | ||
273 | return rc; | ||
274 | |||
275 | err_free_net: | ||
276 | put_net(pd->net); | ||
277 | err_free_pd: | ||
278 | kfree(pd); | ||
279 | goto out; | ||
280 | } | ||
281 | |||
282 | static int l2tp_dfs_seq_release(struct inode *inode, struct file *file) | ||
283 | { | ||
284 | struct l2tp_dfs_seq_data *pd; | ||
285 | struct seq_file *seq; | ||
286 | |||
287 | seq = file->private_data; | ||
288 | pd = seq->private; | ||
289 | if (pd->net) | ||
290 | put_net(pd->net); | ||
291 | kfree(pd); | ||
292 | seq_release(inode, file); | ||
293 | |||
294 | return 0; | ||
295 | } | ||
296 | |||
297 | static const struct file_operations l2tp_dfs_fops = { | ||
298 | .owner = THIS_MODULE, | ||
299 | .open = l2tp_dfs_seq_open, | ||
300 | .read = seq_read, | ||
301 | .llseek = seq_lseek, | ||
302 | .release = l2tp_dfs_seq_release, | ||
303 | }; | ||
304 | |||
305 | static int __init l2tp_debugfs_init(void) | ||
306 | { | ||
307 | int rc = 0; | ||
308 | |||
309 | rootdir = debugfs_create_dir("l2tp", NULL); | ||
310 | if (IS_ERR(rootdir)) { | ||
311 | rc = PTR_ERR(rootdir); | ||
312 | rootdir = NULL; | ||
313 | goto out; | ||
314 | } | ||
315 | |||
316 | tunnels = debugfs_create_file("tunnels", 0600, rootdir, NULL, &l2tp_dfs_fops); | ||
317 | if (tunnels == NULL) | ||
318 | rc = -EIO; | ||
319 | |||
320 | printk(KERN_INFO "L2TP debugfs support\n"); | ||
321 | |||
322 | out: | ||
323 | if (rc) | ||
324 | printk(KERN_WARNING "l2tp debugfs: unable to init\n"); | ||
325 | |||
326 | return rc; | ||
327 | } | ||
328 | |||
329 | static void __exit l2tp_debugfs_exit(void) | ||
330 | { | ||
331 | debugfs_remove(tunnels); | ||
332 | debugfs_remove(rootdir); | ||
333 | } | ||
334 | |||
335 | module_init(l2tp_debugfs_init); | ||
336 | module_exit(l2tp_debugfs_exit); | ||
337 | |||
338 | MODULE_LICENSE("GPL"); | ||
339 | MODULE_AUTHOR("James Chapman <jchapman@katalix.com>"); | ||
340 | MODULE_DESCRIPTION("L2TP debugfs driver"); | ||
341 | MODULE_VERSION("1.0"); | ||
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c new file mode 100644 index 000000000000..58c6c4cda73b --- /dev/null +++ b/net/l2tp/l2tp_eth.c | |||
@@ -0,0 +1,334 @@ | |||
1 | /* | ||
2 | * L2TPv3 ethernet pseudowire driver | ||
3 | * | ||
4 | * Copyright (c) 2008,2009,2010 Katalix Systems Ltd | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/skbuff.h> | ||
14 | #include <linux/socket.h> | ||
15 | #include <linux/hash.h> | ||
16 | #include <linux/l2tp.h> | ||
17 | #include <linux/in.h> | ||
18 | #include <linux/etherdevice.h> | ||
19 | #include <linux/spinlock.h> | ||
20 | #include <net/sock.h> | ||
21 | #include <net/ip.h> | ||
22 | #include <net/icmp.h> | ||
23 | #include <net/udp.h> | ||
24 | #include <net/inet_common.h> | ||
25 | #include <net/inet_hashtables.h> | ||
26 | #include <net/tcp_states.h> | ||
27 | #include <net/protocol.h> | ||
28 | #include <net/xfrm.h> | ||
29 | #include <net/net_namespace.h> | ||
30 | #include <net/netns/generic.h> | ||
31 | |||
32 | #include "l2tp_core.h" | ||
33 | |||
34 | /* Default device name. May be overridden by name specified by user */ | ||
35 | #define L2TP_ETH_DEV_NAME "l2tpeth%d" | ||
36 | |||
37 | /* via netdev_priv() */ | ||
38 | struct l2tp_eth { | ||
39 | struct net_device *dev; | ||
40 | struct sock *tunnel_sock; | ||
41 | struct l2tp_session *session; | ||
42 | struct list_head list; | ||
43 | }; | ||
44 | |||
45 | /* via l2tp_session_priv() */ | ||
46 | struct l2tp_eth_sess { | ||
47 | struct net_device *dev; | ||
48 | }; | ||
49 | |||
50 | /* per-net private data for this module */ | ||
51 | static unsigned int l2tp_eth_net_id; | ||
52 | struct l2tp_eth_net { | ||
53 | struct list_head l2tp_eth_dev_list; | ||
54 | spinlock_t l2tp_eth_lock; | ||
55 | }; | ||
56 | |||
57 | static inline struct l2tp_eth_net *l2tp_eth_pernet(struct net *net) | ||
58 | { | ||
59 | return net_generic(net, l2tp_eth_net_id); | ||
60 | } | ||
61 | |||
62 | static int l2tp_eth_dev_init(struct net_device *dev) | ||
63 | { | ||
64 | struct l2tp_eth *priv = netdev_priv(dev); | ||
65 | |||
66 | priv->dev = dev; | ||
67 | random_ether_addr(dev->dev_addr); | ||
68 | memset(&dev->broadcast[0], 0xff, 6); | ||
69 | |||
70 | return 0; | ||
71 | } | ||
72 | |||
73 | static void l2tp_eth_dev_uninit(struct net_device *dev) | ||
74 | { | ||
75 | struct l2tp_eth *priv = netdev_priv(dev); | ||
76 | struct l2tp_eth_net *pn = l2tp_eth_pernet(dev_net(dev)); | ||
77 | |||
78 | spin_lock(&pn->l2tp_eth_lock); | ||
79 | list_del_init(&priv->list); | ||
80 | spin_unlock(&pn->l2tp_eth_lock); | ||
81 | dev_put(dev); | ||
82 | } | ||
83 | |||
84 | static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev) | ||
85 | { | ||
86 | struct l2tp_eth *priv = netdev_priv(dev); | ||
87 | struct l2tp_session *session = priv->session; | ||
88 | |||
89 | l2tp_xmit_skb(session, skb, session->hdr_len); | ||
90 | |||
91 | dev->stats.tx_bytes += skb->len; | ||
92 | dev->stats.tx_packets++; | ||
93 | |||
94 | return 0; | ||
95 | } | ||
96 | |||
97 | static struct net_device_ops l2tp_eth_netdev_ops = { | ||
98 | .ndo_init = l2tp_eth_dev_init, | ||
99 | .ndo_uninit = l2tp_eth_dev_uninit, | ||
100 | .ndo_start_xmit = l2tp_eth_dev_xmit, | ||
101 | }; | ||
102 | |||
103 | static void l2tp_eth_dev_setup(struct net_device *dev) | ||
104 | { | ||
105 | ether_setup(dev); | ||
106 | |||
107 | dev->netdev_ops = &l2tp_eth_netdev_ops; | ||
108 | dev->destructor = free_netdev; | ||
109 | } | ||
110 | |||
111 | static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len) | ||
112 | { | ||
113 | struct l2tp_eth_sess *spriv = l2tp_session_priv(session); | ||
114 | struct net_device *dev = spriv->dev; | ||
115 | |||
116 | if (session->debug & L2TP_MSG_DATA) { | ||
117 | unsigned int length; | ||
118 | int offset; | ||
119 | u8 *ptr = skb->data; | ||
120 | |||
121 | length = min(32u, skb->len); | ||
122 | if (!pskb_may_pull(skb, length)) | ||
123 | goto error; | ||
124 | |||
125 | printk(KERN_DEBUG "%s: eth recv: ", session->name); | ||
126 | |||
127 | offset = 0; | ||
128 | do { | ||
129 | printk(" %02X", ptr[offset]); | ||
130 | } while (++offset < length); | ||
131 | |||
132 | printk("\n"); | ||
133 | } | ||
134 | |||
135 | if (data_len < ETH_HLEN) | ||
136 | goto error; | ||
137 | |||
138 | secpath_reset(skb); | ||
139 | |||
140 | /* checksums verified by L2TP */ | ||
141 | skb->ip_summed = CHECKSUM_NONE; | ||
142 | |||
143 | skb_dst_drop(skb); | ||
144 | nf_reset(skb); | ||
145 | |||
146 | if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) { | ||
147 | dev->last_rx = jiffies; | ||
148 | dev->stats.rx_packets++; | ||
149 | dev->stats.rx_bytes += data_len; | ||
150 | } else | ||
151 | dev->stats.rx_errors++; | ||
152 | |||
153 | return; | ||
154 | |||
155 | error: | ||
156 | dev->stats.rx_errors++; | ||
157 | kfree_skb(skb); | ||
158 | } | ||
159 | |||
160 | static void l2tp_eth_delete(struct l2tp_session *session) | ||
161 | { | ||
162 | struct l2tp_eth_sess *spriv; | ||
163 | struct net_device *dev; | ||
164 | |||
165 | if (session) { | ||
166 | spriv = l2tp_session_priv(session); | ||
167 | dev = spriv->dev; | ||
168 | if (dev) { | ||
169 | unregister_netdev(dev); | ||
170 | spriv->dev = NULL; | ||
171 | } | ||
172 | } | ||
173 | } | ||
174 | |||
175 | #if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE) | ||
176 | static void l2tp_eth_show(struct seq_file *m, void *arg) | ||
177 | { | ||
178 | struct l2tp_session *session = arg; | ||
179 | struct l2tp_eth_sess *spriv = l2tp_session_priv(session); | ||
180 | struct net_device *dev = spriv->dev; | ||
181 | |||
182 | seq_printf(m, " interface %s\n", dev->name); | ||
183 | } | ||
184 | #endif | ||
185 | |||
186 | static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg) | ||
187 | { | ||
188 | struct net_device *dev; | ||
189 | char name[IFNAMSIZ]; | ||
190 | struct l2tp_tunnel *tunnel; | ||
191 | struct l2tp_session *session; | ||
192 | struct l2tp_eth *priv; | ||
193 | struct l2tp_eth_sess *spriv; | ||
194 | int rc; | ||
195 | struct l2tp_eth_net *pn; | ||
196 | |||
197 | tunnel = l2tp_tunnel_find(net, tunnel_id); | ||
198 | if (!tunnel) { | ||
199 | rc = -ENODEV; | ||
200 | goto out; | ||
201 | } | ||
202 | |||
203 | session = l2tp_session_find(net, tunnel, session_id); | ||
204 | if (session) { | ||
205 | rc = -EEXIST; | ||
206 | goto out; | ||
207 | } | ||
208 | |||
209 | if (cfg->ifname) { | ||
210 | dev = dev_get_by_name(net, cfg->ifname); | ||
211 | if (dev) { | ||
212 | dev_put(dev); | ||
213 | rc = -EEXIST; | ||
214 | goto out; | ||
215 | } | ||
216 | strlcpy(name, cfg->ifname, IFNAMSIZ); | ||
217 | } else | ||
218 | strcpy(name, L2TP_ETH_DEV_NAME); | ||
219 | |||
220 | session = l2tp_session_create(sizeof(*spriv), tunnel, session_id, | ||
221 | peer_session_id, cfg); | ||
222 | if (!session) { | ||
223 | rc = -ENOMEM; | ||
224 | goto out; | ||
225 | } | ||
226 | |||
227 | dev = alloc_netdev(sizeof(*priv), name, l2tp_eth_dev_setup); | ||
228 | if (!dev) { | ||
229 | rc = -ENOMEM; | ||
230 | goto out_del_session; | ||
231 | } | ||
232 | |||
233 | dev_net_set(dev, net); | ||
234 | if (session->mtu == 0) | ||
235 | session->mtu = dev->mtu - session->hdr_len; | ||
236 | dev->mtu = session->mtu; | ||
237 | dev->needed_headroom += session->hdr_len; | ||
238 | |||
239 | priv = netdev_priv(dev); | ||
240 | priv->dev = dev; | ||
241 | priv->session = session; | ||
242 | INIT_LIST_HEAD(&priv->list); | ||
243 | |||
244 | priv->tunnel_sock = tunnel->sock; | ||
245 | session->recv_skb = l2tp_eth_dev_recv; | ||
246 | session->session_close = l2tp_eth_delete; | ||
247 | #if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE) | ||
248 | session->show = l2tp_eth_show; | ||
249 | #endif | ||
250 | |||
251 | spriv = l2tp_session_priv(session); | ||
252 | spriv->dev = dev; | ||
253 | |||
254 | rc = register_netdev(dev); | ||
255 | if (rc < 0) | ||
256 | goto out_del_dev; | ||
257 | |||
258 | /* Must be done after register_netdev() */ | ||
259 | strlcpy(session->ifname, dev->name, IFNAMSIZ); | ||
260 | |||
261 | dev_hold(dev); | ||
262 | pn = l2tp_eth_pernet(dev_net(dev)); | ||
263 | spin_lock(&pn->l2tp_eth_lock); | ||
264 | list_add(&priv->list, &pn->l2tp_eth_dev_list); | ||
265 | spin_unlock(&pn->l2tp_eth_lock); | ||
266 | |||
267 | return 0; | ||
268 | |||
269 | out_del_dev: | ||
270 | free_netdev(dev); | ||
271 | out_del_session: | ||
272 | l2tp_session_delete(session); | ||
273 | out: | ||
274 | return rc; | ||
275 | } | ||
276 | |||
277 | static __net_init int l2tp_eth_init_net(struct net *net) | ||
278 | { | ||
279 | struct l2tp_eth_net *pn = net_generic(net, l2tp_eth_net_id); | ||
280 | |||
281 | INIT_LIST_HEAD(&pn->l2tp_eth_dev_list); | ||
282 | spin_lock_init(&pn->l2tp_eth_lock); | ||
283 | |||
284 | return 0; | ||
285 | } | ||
286 | |||
287 | static __net_initdata struct pernet_operations l2tp_eth_net_ops = { | ||
288 | .init = l2tp_eth_init_net, | ||
289 | .id = &l2tp_eth_net_id, | ||
290 | .size = sizeof(struct l2tp_eth_net), | ||
291 | }; | ||
292 | |||
293 | |||
294 | static const struct l2tp_nl_cmd_ops l2tp_eth_nl_cmd_ops = { | ||
295 | .session_create = l2tp_eth_create, | ||
296 | .session_delete = l2tp_session_delete, | ||
297 | }; | ||
298 | |||
299 | |||
300 | static int __init l2tp_eth_init(void) | ||
301 | { | ||
302 | int err = 0; | ||
303 | |||
304 | err = l2tp_nl_register_ops(L2TP_PWTYPE_ETH, &l2tp_eth_nl_cmd_ops); | ||
305 | if (err) | ||
306 | goto out; | ||
307 | |||
308 | err = register_pernet_device(&l2tp_eth_net_ops); | ||
309 | if (err) | ||
310 | goto out_unreg; | ||
311 | |||
312 | printk(KERN_INFO "L2TP ethernet pseudowire support (L2TPv3)\n"); | ||
313 | |||
314 | return 0; | ||
315 | |||
316 | out_unreg: | ||
317 | l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH); | ||
318 | out: | ||
319 | return err; | ||
320 | } | ||
321 | |||
322 | static void __exit l2tp_eth_exit(void) | ||
323 | { | ||
324 | unregister_pernet_device(&l2tp_eth_net_ops); | ||
325 | l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH); | ||
326 | } | ||
327 | |||
328 | module_init(l2tp_eth_init); | ||
329 | module_exit(l2tp_eth_exit); | ||
330 | |||
331 | MODULE_LICENSE("GPL"); | ||
332 | MODULE_AUTHOR("James Chapman <jchapman@katalix.com>"); | ||
333 | MODULE_DESCRIPTION("L2TP ethernet pseudowire driver"); | ||
334 | MODULE_VERSION("1.0"); | ||
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c new file mode 100644 index 000000000000..0852512d392c --- /dev/null +++ b/net/l2tp/l2tp_ip.c | |||
@@ -0,0 +1,679 @@ | |||
1 | /* | ||
2 | * L2TPv3 IP encapsulation support | ||
3 | * | ||
4 | * Copyright (c) 2008,2009,2010 Katalix Systems Ltd | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/icmp.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/skbuff.h> | ||
15 | #include <linux/random.h> | ||
16 | #include <linux/socket.h> | ||
17 | #include <linux/l2tp.h> | ||
18 | #include <linux/in.h> | ||
19 | #include <net/sock.h> | ||
20 | #include <net/ip.h> | ||
21 | #include <net/icmp.h> | ||
22 | #include <net/udp.h> | ||
23 | #include <net/inet_common.h> | ||
24 | #include <net/inet_hashtables.h> | ||
25 | #include <net/tcp_states.h> | ||
26 | #include <net/protocol.h> | ||
27 | #include <net/xfrm.h> | ||
28 | |||
29 | #include "l2tp_core.h" | ||
30 | |||
31 | struct l2tp_ip_sock { | ||
32 | /* inet_sock has to be the first member of l2tp_ip_sock */ | ||
33 | struct inet_sock inet; | ||
34 | |||
35 | __u32 conn_id; | ||
36 | __u32 peer_conn_id; | ||
37 | |||
38 | __u64 tx_packets; | ||
39 | __u64 tx_bytes; | ||
40 | __u64 tx_errors; | ||
41 | __u64 rx_packets; | ||
42 | __u64 rx_bytes; | ||
43 | __u64 rx_errors; | ||
44 | }; | ||
45 | |||
46 | static DEFINE_RWLOCK(l2tp_ip_lock); | ||
47 | static struct hlist_head l2tp_ip_table; | ||
48 | static struct hlist_head l2tp_ip_bind_table; | ||
49 | |||
50 | static inline struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk) | ||
51 | { | ||
52 | return (struct l2tp_ip_sock *)sk; | ||
53 | } | ||
54 | |||
55 | static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id) | ||
56 | { | ||
57 | struct hlist_node *node; | ||
58 | struct sock *sk; | ||
59 | |||
60 | sk_for_each_bound(sk, node, &l2tp_ip_bind_table) { | ||
61 | struct inet_sock *inet = inet_sk(sk); | ||
62 | struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk); | ||
63 | |||
64 | if (l2tp == NULL) | ||
65 | continue; | ||
66 | |||
67 | if ((l2tp->conn_id == tunnel_id) && | ||
68 | #ifdef CONFIG_NET_NS | ||
69 | (sk->sk_net == net) && | ||
70 | #endif | ||
71 | !(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) && | ||
72 | !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) | ||
73 | goto found; | ||
74 | } | ||
75 | |||
76 | sk = NULL; | ||
77 | found: | ||
78 | return sk; | ||
79 | } | ||
80 | |||
81 | static inline struct sock *l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id) | ||
82 | { | ||
83 | struct sock *sk = __l2tp_ip_bind_lookup(net, laddr, dif, tunnel_id); | ||
84 | if (sk) | ||
85 | sock_hold(sk); | ||
86 | |||
87 | return sk; | ||
88 | } | ||
89 | |||
90 | /* When processing receive frames, there are two cases to | ||
91 | * consider. Data frames consist of a non-zero session-id and an | ||
92 | * optional cookie. Control frames consist of a regular L2TP header | ||
93 | * preceded by 32-bits of zeros. | ||
94 | * | ||
95 | * L2TPv3 Session Header Over IP | ||
96 | * | ||
97 | * 0 1 2 3 | ||
98 | * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 | ||
99 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
100 | * | Session ID | | ||
101 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
102 | * | Cookie (optional, maximum 64 bits)... | ||
103 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
104 | * | | ||
105 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
106 | * | ||
107 | * L2TPv3 Control Message Header Over IP | ||
108 | * | ||
109 | * 0 1 2 3 | ||
110 | * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 | ||
111 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
112 | * | (32 bits of zeros) | | ||
113 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
114 | * |T|L|x|x|S|x|x|x|x|x|x|x| Ver | Length | | ||
115 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
116 | * | Control Connection ID | | ||
117 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
118 | * | Ns | Nr | | ||
119 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
120 | * | ||
121 | * All control frames are passed to userspace. | ||
122 | */ | ||
123 | static int l2tp_ip_recv(struct sk_buff *skb) | ||
124 | { | ||
125 | struct sock *sk; | ||
126 | u32 session_id; | ||
127 | u32 tunnel_id; | ||
128 | unsigned char *ptr, *optr; | ||
129 | struct l2tp_session *session; | ||
130 | struct l2tp_tunnel *tunnel = NULL; | ||
131 | int length; | ||
132 | int offset; | ||
133 | |||
134 | /* Point to L2TP header */ | ||
135 | optr = ptr = skb->data; | ||
136 | |||
137 | if (!pskb_may_pull(skb, 4)) | ||
138 | goto discard; | ||
139 | |||
140 | session_id = ntohl(*((__be32 *) ptr)); | ||
141 | ptr += 4; | ||
142 | |||
143 | /* RFC3931: L2TP/IP packets have the first 4 bytes containing | ||
144 | * the session_id. If it is 0, the packet is a L2TP control | ||
145 | * frame and the session_id value can be discarded. | ||
146 | */ | ||
147 | if (session_id == 0) { | ||
148 | __skb_pull(skb, 4); | ||
149 | goto pass_up; | ||
150 | } | ||
151 | |||
152 | /* Ok, this is a data packet. Lookup the session. */ | ||
153 | session = l2tp_session_find(&init_net, NULL, session_id); | ||
154 | if (session == NULL) | ||
155 | goto discard; | ||
156 | |||
157 | tunnel = session->tunnel; | ||
158 | if (tunnel == NULL) | ||
159 | goto discard; | ||
160 | |||
161 | /* Trace packet contents, if enabled */ | ||
162 | if (tunnel->debug & L2TP_MSG_DATA) { | ||
163 | length = min(32u, skb->len); | ||
164 | if (!pskb_may_pull(skb, length)) | ||
165 | goto discard; | ||
166 | |||
167 | printk(KERN_DEBUG "%s: ip recv: ", tunnel->name); | ||
168 | |||
169 | offset = 0; | ||
170 | do { | ||
171 | printk(" %02X", ptr[offset]); | ||
172 | } while (++offset < length); | ||
173 | |||
174 | printk("\n"); | ||
175 | } | ||
176 | |||
177 | l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook); | ||
178 | |||
179 | return 0; | ||
180 | |||
181 | pass_up: | ||
182 | /* Get the tunnel_id from the L2TP header */ | ||
183 | if (!pskb_may_pull(skb, 12)) | ||
184 | goto discard; | ||
185 | |||
186 | if ((skb->data[0] & 0xc0) != 0xc0) | ||
187 | goto discard; | ||
188 | |||
189 | tunnel_id = ntohl(*(__be32 *) &skb->data[4]); | ||
190 | tunnel = l2tp_tunnel_find(&init_net, tunnel_id); | ||
191 | if (tunnel != NULL) | ||
192 | sk = tunnel->sock; | ||
193 | else { | ||
194 | struct iphdr *iph = (struct iphdr *) skb_network_header(skb); | ||
195 | |||
196 | read_lock_bh(&l2tp_ip_lock); | ||
197 | sk = __l2tp_ip_bind_lookup(&init_net, iph->daddr, 0, tunnel_id); | ||
198 | read_unlock_bh(&l2tp_ip_lock); | ||
199 | } | ||
200 | |||
201 | if (sk == NULL) | ||
202 | goto discard; | ||
203 | |||
204 | sock_hold(sk); | ||
205 | |||
206 | if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) | ||
207 | goto discard_put; | ||
208 | |||
209 | nf_reset(skb); | ||
210 | |||
211 | return sk_receive_skb(sk, skb, 1); | ||
212 | |||
213 | discard_put: | ||
214 | sock_put(sk); | ||
215 | |||
216 | discard: | ||
217 | kfree_skb(skb); | ||
218 | return 0; | ||
219 | } | ||
220 | |||
221 | static int l2tp_ip_open(struct sock *sk) | ||
222 | { | ||
223 | /* Prevent autobind. We don't have ports. */ | ||
224 | inet_sk(sk)->inet_num = IPPROTO_L2TP; | ||
225 | |||
226 | write_lock_bh(&l2tp_ip_lock); | ||
227 | sk_add_node(sk, &l2tp_ip_table); | ||
228 | write_unlock_bh(&l2tp_ip_lock); | ||
229 | |||
230 | return 0; | ||
231 | } | ||
232 | |||
233 | static void l2tp_ip_close(struct sock *sk, long timeout) | ||
234 | { | ||
235 | write_lock_bh(&l2tp_ip_lock); | ||
236 | hlist_del_init(&sk->sk_bind_node); | ||
237 | hlist_del_init(&sk->sk_node); | ||
238 | write_unlock_bh(&l2tp_ip_lock); | ||
239 | sk_common_release(sk); | ||
240 | } | ||
241 | |||
242 | static void l2tp_ip_destroy_sock(struct sock *sk) | ||
243 | { | ||
244 | struct sk_buff *skb; | ||
245 | |||
246 | while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) | ||
247 | kfree_skb(skb); | ||
248 | |||
249 | sk_refcnt_debug_dec(sk); | ||
250 | } | ||
251 | |||
252 | static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) | ||
253 | { | ||
254 | struct inet_sock *inet = inet_sk(sk); | ||
255 | struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *) uaddr; | ||
256 | int ret = -EINVAL; | ||
257 | int chk_addr_ret; | ||
258 | |||
259 | ret = -EADDRINUSE; | ||
260 | read_lock_bh(&l2tp_ip_lock); | ||
261 | if (__l2tp_ip_bind_lookup(&init_net, addr->l2tp_addr.s_addr, sk->sk_bound_dev_if, addr->l2tp_conn_id)) | ||
262 | goto out_in_use; | ||
263 | |||
264 | read_unlock_bh(&l2tp_ip_lock); | ||
265 | |||
266 | lock_sock(sk); | ||
267 | if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_l2tpip)) | ||
268 | goto out; | ||
269 | |||
270 | chk_addr_ret = inet_addr_type(&init_net, addr->l2tp_addr.s_addr); | ||
271 | ret = -EADDRNOTAVAIL; | ||
272 | if (addr->l2tp_addr.s_addr && chk_addr_ret != RTN_LOCAL && | ||
273 | chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST) | ||
274 | goto out; | ||
275 | |||
276 | inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr; | ||
277 | if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST) | ||
278 | inet->inet_saddr = 0; /* Use device */ | ||
279 | sk_dst_reset(sk); | ||
280 | |||
281 | l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id; | ||
282 | |||
283 | write_lock_bh(&l2tp_ip_lock); | ||
284 | sk_add_bind_node(sk, &l2tp_ip_bind_table); | ||
285 | sk_del_node_init(sk); | ||
286 | write_unlock_bh(&l2tp_ip_lock); | ||
287 | ret = 0; | ||
288 | out: | ||
289 | release_sock(sk); | ||
290 | |||
291 | return ret; | ||
292 | |||
293 | out_in_use: | ||
294 | read_unlock_bh(&l2tp_ip_lock); | ||
295 | |||
296 | return ret; | ||
297 | } | ||
298 | |||
299 | static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | ||
300 | { | ||
301 | int rc; | ||
302 | struct inet_sock *inet = inet_sk(sk); | ||
303 | struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr; | ||
304 | struct rtable *rt; | ||
305 | __be32 saddr; | ||
306 | int oif; | ||
307 | |||
308 | rc = -EINVAL; | ||
309 | if (addr_len < sizeof(*lsa)) | ||
310 | goto out; | ||
311 | |||
312 | rc = -EAFNOSUPPORT; | ||
313 | if (lsa->l2tp_family != AF_INET) | ||
314 | goto out; | ||
315 | |||
316 | sk_dst_reset(sk); | ||
317 | |||
318 | oif = sk->sk_bound_dev_if; | ||
319 | saddr = inet->inet_saddr; | ||
320 | |||
321 | rc = -EINVAL; | ||
322 | if (ipv4_is_multicast(lsa->l2tp_addr.s_addr)) | ||
323 | goto out; | ||
324 | |||
325 | rc = ip_route_connect(&rt, lsa->l2tp_addr.s_addr, saddr, | ||
326 | RT_CONN_FLAGS(sk), oif, | ||
327 | IPPROTO_L2TP, | ||
328 | 0, 0, sk, 1); | ||
329 | if (rc) { | ||
330 | if (rc == -ENETUNREACH) | ||
331 | IP_INC_STATS_BH(&init_net, IPSTATS_MIB_OUTNOROUTES); | ||
332 | goto out; | ||
333 | } | ||
334 | |||
335 | rc = -ENETUNREACH; | ||
336 | if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { | ||
337 | ip_rt_put(rt); | ||
338 | goto out; | ||
339 | } | ||
340 | |||
341 | l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id; | ||
342 | |||
343 | if (!inet->inet_saddr) | ||
344 | inet->inet_saddr = rt->rt_src; | ||
345 | if (!inet->inet_rcv_saddr) | ||
346 | inet->inet_rcv_saddr = rt->rt_src; | ||
347 | inet->inet_daddr = rt->rt_dst; | ||
348 | sk->sk_state = TCP_ESTABLISHED; | ||
349 | inet->inet_id = jiffies; | ||
350 | |||
351 | sk_dst_set(sk, &rt->u.dst); | ||
352 | |||
353 | write_lock_bh(&l2tp_ip_lock); | ||
354 | hlist_del_init(&sk->sk_bind_node); | ||
355 | sk_add_bind_node(sk, &l2tp_ip_bind_table); | ||
356 | write_unlock_bh(&l2tp_ip_lock); | ||
357 | |||
358 | rc = 0; | ||
359 | out: | ||
360 | return rc; | ||
361 | } | ||
362 | |||
363 | static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr, | ||
364 | int *uaddr_len, int peer) | ||
365 | { | ||
366 | struct sock *sk = sock->sk; | ||
367 | struct inet_sock *inet = inet_sk(sk); | ||
368 | struct l2tp_ip_sock *lsk = l2tp_ip_sk(sk); | ||
369 | struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *)uaddr; | ||
370 | |||
371 | memset(lsa, 0, sizeof(*lsa)); | ||
372 | lsa->l2tp_family = AF_INET; | ||
373 | if (peer) { | ||
374 | if (!inet->inet_dport) | ||
375 | return -ENOTCONN; | ||
376 | lsa->l2tp_conn_id = lsk->peer_conn_id; | ||
377 | lsa->l2tp_addr.s_addr = inet->inet_daddr; | ||
378 | } else { | ||
379 | __be32 addr = inet->inet_rcv_saddr; | ||
380 | if (!addr) | ||
381 | addr = inet->inet_saddr; | ||
382 | lsa->l2tp_conn_id = lsk->conn_id; | ||
383 | lsa->l2tp_addr.s_addr = addr; | ||
384 | } | ||
385 | *uaddr_len = sizeof(*lsa); | ||
386 | return 0; | ||
387 | } | ||
388 | |||
389 | static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb) | ||
390 | { | ||
391 | int rc; | ||
392 | |||
393 | if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) | ||
394 | goto drop; | ||
395 | |||
396 | nf_reset(skb); | ||
397 | |||
398 | /* Charge it to the socket, dropping if the queue is full. */ | ||
399 | rc = sock_queue_rcv_skb(sk, skb); | ||
400 | if (rc < 0) | ||
401 | goto drop; | ||
402 | |||
403 | return 0; | ||
404 | |||
405 | drop: | ||
406 | IP_INC_STATS(&init_net, IPSTATS_MIB_INDISCARDS); | ||
407 | kfree_skb(skb); | ||
408 | return -1; | ||
409 | } | ||
410 | |||
411 | /* Userspace will call sendmsg() on the tunnel socket to send L2TP | ||
412 | * control frames. | ||
413 | */ | ||
414 | static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len) | ||
415 | { | ||
416 | struct sk_buff *skb; | ||
417 | int rc; | ||
418 | struct l2tp_ip_sock *lsa = l2tp_ip_sk(sk); | ||
419 | struct inet_sock *inet = inet_sk(sk); | ||
420 | struct ip_options *opt = inet->opt; | ||
421 | struct rtable *rt = NULL; | ||
422 | int connected = 0; | ||
423 | __be32 daddr; | ||
424 | |||
425 | if (sock_flag(sk, SOCK_DEAD)) | ||
426 | return -ENOTCONN; | ||
427 | |||
428 | /* Get and verify the address. */ | ||
429 | if (msg->msg_name) { | ||
430 | struct sockaddr_l2tpip *lip = (struct sockaddr_l2tpip *) msg->msg_name; | ||
431 | if (msg->msg_namelen < sizeof(*lip)) | ||
432 | return -EINVAL; | ||
433 | |||
434 | if (lip->l2tp_family != AF_INET) { | ||
435 | if (lip->l2tp_family != AF_UNSPEC) | ||
436 | return -EAFNOSUPPORT; | ||
437 | } | ||
438 | |||
439 | daddr = lip->l2tp_addr.s_addr; | ||
440 | } else { | ||
441 | if (sk->sk_state != TCP_ESTABLISHED) | ||
442 | return -EDESTADDRREQ; | ||
443 | |||
444 | daddr = inet->inet_daddr; | ||
445 | connected = 1; | ||
446 | } | ||
447 | |||
448 | /* Allocate a socket buffer */ | ||
449 | rc = -ENOMEM; | ||
450 | skb = sock_wmalloc(sk, 2 + NET_SKB_PAD + sizeof(struct iphdr) + | ||
451 | 4 + len, 0, GFP_KERNEL); | ||
452 | if (!skb) | ||
453 | goto error; | ||
454 | |||
455 | /* Reserve space for headers, putting IP header on 4-byte boundary. */ | ||
456 | skb_reserve(skb, 2 + NET_SKB_PAD); | ||
457 | skb_reset_network_header(skb); | ||
458 | skb_reserve(skb, sizeof(struct iphdr)); | ||
459 | skb_reset_transport_header(skb); | ||
460 | |||
461 | /* Insert 0 session_id */ | ||
462 | *((__be32 *) skb_put(skb, 4)) = 0; | ||
463 | |||
464 | /* Copy user data into skb */ | ||
465 | rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); | ||
466 | if (rc < 0) { | ||
467 | kfree_skb(skb); | ||
468 | goto error; | ||
469 | } | ||
470 | |||
471 | if (connected) | ||
472 | rt = (struct rtable *) __sk_dst_check(sk, 0); | ||
473 | |||
474 | if (rt == NULL) { | ||
475 | /* Use correct destination address if we have options. */ | ||
476 | if (opt && opt->srr) | ||
477 | daddr = opt->faddr; | ||
478 | |||
479 | { | ||
480 | struct flowi fl = { .oif = sk->sk_bound_dev_if, | ||
481 | .nl_u = { .ip4_u = { | ||
482 | .daddr = daddr, | ||
483 | .saddr = inet->inet_saddr, | ||
484 | .tos = RT_CONN_FLAGS(sk) } }, | ||
485 | .proto = sk->sk_protocol, | ||
486 | .flags = inet_sk_flowi_flags(sk), | ||
487 | .uli_u = { .ports = { | ||
488 | .sport = inet->inet_sport, | ||
489 | .dport = inet->inet_dport } } }; | ||
490 | |||
491 | /* If this fails, retransmit mechanism of transport layer will | ||
492 | * keep trying until route appears or the connection times | ||
493 | * itself out. | ||
494 | */ | ||
495 | security_sk_classify_flow(sk, &fl); | ||
496 | if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0)) | ||
497 | goto no_route; | ||
498 | } | ||
499 | sk_setup_caps(sk, &rt->u.dst); | ||
500 | } | ||
501 | skb_dst_set(skb, dst_clone(&rt->u.dst)); | ||
502 | |||
503 | /* Queue the packet to IP for output */ | ||
504 | rc = ip_queue_xmit(skb); | ||
505 | |||
506 | error: | ||
507 | /* Update stats */ | ||
508 | if (rc >= 0) { | ||
509 | lsa->tx_packets++; | ||
510 | lsa->tx_bytes += len; | ||
511 | rc = len; | ||
512 | } else { | ||
513 | lsa->tx_errors++; | ||
514 | } | ||
515 | |||
516 | return rc; | ||
517 | |||
518 | no_route: | ||
519 | IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); | ||
520 | kfree_skb(skb); | ||
521 | return -EHOSTUNREACH; | ||
522 | } | ||
523 | |||
524 | static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | ||
525 | size_t len, int noblock, int flags, int *addr_len) | ||
526 | { | ||
527 | struct inet_sock *inet = inet_sk(sk); | ||
528 | struct l2tp_ip_sock *lsk = l2tp_ip_sk(sk); | ||
529 | size_t copied = 0; | ||
530 | int err = -EOPNOTSUPP; | ||
531 | struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; | ||
532 | struct sk_buff *skb; | ||
533 | |||
534 | if (flags & MSG_OOB) | ||
535 | goto out; | ||
536 | |||
537 | if (addr_len) | ||
538 | *addr_len = sizeof(*sin); | ||
539 | |||
540 | skb = skb_recv_datagram(sk, flags, noblock, &err); | ||
541 | if (!skb) | ||
542 | goto out; | ||
543 | |||
544 | copied = skb->len; | ||
545 | if (len < copied) { | ||
546 | msg->msg_flags |= MSG_TRUNC; | ||
547 | copied = len; | ||
548 | } | ||
549 | |||
550 | err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); | ||
551 | if (err) | ||
552 | goto done; | ||
553 | |||
554 | sock_recv_timestamp(msg, sk, skb); | ||
555 | |||
556 | /* Copy the address. */ | ||
557 | if (sin) { | ||
558 | sin->sin_family = AF_INET; | ||
559 | sin->sin_addr.s_addr = ip_hdr(skb)->saddr; | ||
560 | sin->sin_port = 0; | ||
561 | memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); | ||
562 | } | ||
563 | if (inet->cmsg_flags) | ||
564 | ip_cmsg_recv(msg, skb); | ||
565 | if (flags & MSG_TRUNC) | ||
566 | copied = skb->len; | ||
567 | done: | ||
568 | skb_free_datagram(sk, skb); | ||
569 | out: | ||
570 | if (err) { | ||
571 | lsk->rx_errors++; | ||
572 | return err; | ||
573 | } | ||
574 | |||
575 | lsk->rx_packets++; | ||
576 | lsk->rx_bytes += copied; | ||
577 | |||
578 | return copied; | ||
579 | } | ||
580 | |||
581 | struct proto l2tp_ip_prot = { | ||
582 | .name = "L2TP/IP", | ||
583 | .owner = THIS_MODULE, | ||
584 | .init = l2tp_ip_open, | ||
585 | .close = l2tp_ip_close, | ||
586 | .bind = l2tp_ip_bind, | ||
587 | .connect = l2tp_ip_connect, | ||
588 | .disconnect = udp_disconnect, | ||
589 | .ioctl = udp_ioctl, | ||
590 | .destroy = l2tp_ip_destroy_sock, | ||
591 | .setsockopt = ip_setsockopt, | ||
592 | .getsockopt = ip_getsockopt, | ||
593 | .sendmsg = l2tp_ip_sendmsg, | ||
594 | .recvmsg = l2tp_ip_recvmsg, | ||
595 | .backlog_rcv = l2tp_ip_backlog_recv, | ||
596 | .hash = inet_hash, | ||
597 | .unhash = inet_unhash, | ||
598 | .obj_size = sizeof(struct l2tp_ip_sock), | ||
599 | #ifdef CONFIG_COMPAT | ||
600 | .compat_setsockopt = compat_ip_setsockopt, | ||
601 | .compat_getsockopt = compat_ip_getsockopt, | ||
602 | #endif | ||
603 | }; | ||
604 | |||
605 | static const struct proto_ops l2tp_ip_ops = { | ||
606 | .family = PF_INET, | ||
607 | .owner = THIS_MODULE, | ||
608 | .release = inet_release, | ||
609 | .bind = inet_bind, | ||
610 | .connect = inet_dgram_connect, | ||
611 | .socketpair = sock_no_socketpair, | ||
612 | .accept = sock_no_accept, | ||
613 | .getname = l2tp_ip_getname, | ||
614 | .poll = datagram_poll, | ||
615 | .ioctl = inet_ioctl, | ||
616 | .listen = sock_no_listen, | ||
617 | .shutdown = inet_shutdown, | ||
618 | .setsockopt = sock_common_setsockopt, | ||
619 | .getsockopt = sock_common_getsockopt, | ||
620 | .sendmsg = inet_sendmsg, | ||
621 | .recvmsg = sock_common_recvmsg, | ||
622 | .mmap = sock_no_mmap, | ||
623 | .sendpage = sock_no_sendpage, | ||
624 | #ifdef CONFIG_COMPAT | ||
625 | .compat_setsockopt = compat_sock_common_setsockopt, | ||
626 | .compat_getsockopt = compat_sock_common_getsockopt, | ||
627 | #endif | ||
628 | }; | ||
629 | |||
630 | static struct inet_protosw l2tp_ip_protosw = { | ||
631 | .type = SOCK_DGRAM, | ||
632 | .protocol = IPPROTO_L2TP, | ||
633 | .prot = &l2tp_ip_prot, | ||
634 | .ops = &l2tp_ip_ops, | ||
635 | .no_check = 0, | ||
636 | }; | ||
637 | |||
638 | static struct net_protocol l2tp_ip_protocol __read_mostly = { | ||
639 | .handler = l2tp_ip_recv, | ||
640 | }; | ||
641 | |||
642 | static int __init l2tp_ip_init(void) | ||
643 | { | ||
644 | int err; | ||
645 | |||
646 | printk(KERN_INFO "L2TP IP encapsulation support (L2TPv3)\n"); | ||
647 | |||
648 | err = proto_register(&l2tp_ip_prot, 1); | ||
649 | if (err != 0) | ||
650 | goto out; | ||
651 | |||
652 | err = inet_add_protocol(&l2tp_ip_protocol, IPPROTO_L2TP); | ||
653 | if (err) | ||
654 | goto out1; | ||
655 | |||
656 | inet_register_protosw(&l2tp_ip_protosw); | ||
657 | return 0; | ||
658 | |||
659 | out1: | ||
660 | proto_unregister(&l2tp_ip_prot); | ||
661 | out: | ||
662 | return err; | ||
663 | } | ||
664 | |||
665 | static void __exit l2tp_ip_exit(void) | ||
666 | { | ||
667 | inet_unregister_protosw(&l2tp_ip_protosw); | ||
668 | inet_del_protocol(&l2tp_ip_protocol, IPPROTO_L2TP); | ||
669 | proto_unregister(&l2tp_ip_prot); | ||
670 | } | ||
671 | |||
672 | module_init(l2tp_ip_init); | ||
673 | module_exit(l2tp_ip_exit); | ||
674 | |||
675 | MODULE_LICENSE("GPL"); | ||
676 | MODULE_AUTHOR("James Chapman <jchapman@katalix.com>"); | ||
677 | MODULE_DESCRIPTION("L2TP over IP"); | ||
678 | MODULE_VERSION("1.0"); | ||
679 | MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, SOCK_DGRAM, IPPROTO_L2TP); | ||
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c new file mode 100644 index 000000000000..4c1e540732d7 --- /dev/null +++ b/net/l2tp/l2tp_netlink.c | |||
@@ -0,0 +1,840 @@ | |||
1 | /* | ||
2 | * L2TP netlink layer, for management | ||
3 | * | ||
4 | * Copyright (c) 2008,2009,2010 Katalix Systems Ltd | ||
5 | * | ||
6 | * Partly based on the IrDA nelink implementation | ||
7 | * (see net/irda/irnetlink.c) which is: | ||
8 | * Copyright (c) 2007 Samuel Ortiz <samuel@sortiz.org> | ||
9 | * which is in turn partly based on the wireless netlink code: | ||
10 | * Copyright 2006 Johannes Berg <johannes@sipsolutions.net> | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of the GNU General Public License version 2 as | ||
14 | * published by the Free Software Foundation. | ||
15 | */ | ||
16 | |||
17 | #include <net/sock.h> | ||
18 | #include <net/genetlink.h> | ||
19 | #include <net/udp.h> | ||
20 | #include <linux/in.h> | ||
21 | #include <linux/udp.h> | ||
22 | #include <linux/socket.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/list.h> | ||
25 | #include <net/net_namespace.h> | ||
26 | |||
27 | #include <linux/l2tp.h> | ||
28 | |||
29 | #include "l2tp_core.h" | ||
30 | |||
31 | |||
32 | static struct genl_family l2tp_nl_family = { | ||
33 | .id = GENL_ID_GENERATE, | ||
34 | .name = L2TP_GENL_NAME, | ||
35 | .version = L2TP_GENL_VERSION, | ||
36 | .hdrsize = 0, | ||
37 | .maxattr = L2TP_ATTR_MAX, | ||
38 | }; | ||
39 | |||
40 | /* Accessed under genl lock */ | ||
41 | static const struct l2tp_nl_cmd_ops *l2tp_nl_cmd_ops[__L2TP_PWTYPE_MAX]; | ||
42 | |||
43 | static struct l2tp_session *l2tp_nl_session_find(struct genl_info *info) | ||
44 | { | ||
45 | u32 tunnel_id; | ||
46 | u32 session_id; | ||
47 | char *ifname; | ||
48 | struct l2tp_tunnel *tunnel; | ||
49 | struct l2tp_session *session = NULL; | ||
50 | struct net *net = genl_info_net(info); | ||
51 | |||
52 | if (info->attrs[L2TP_ATTR_IFNAME]) { | ||
53 | ifname = nla_data(info->attrs[L2TP_ATTR_IFNAME]); | ||
54 | session = l2tp_session_find_by_ifname(net, ifname); | ||
55 | } else if ((info->attrs[L2TP_ATTR_SESSION_ID]) && | ||
56 | (info->attrs[L2TP_ATTR_CONN_ID])) { | ||
57 | tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); | ||
58 | session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]); | ||
59 | tunnel = l2tp_tunnel_find(net, tunnel_id); | ||
60 | if (tunnel) | ||
61 | session = l2tp_session_find(net, tunnel, session_id); | ||
62 | } | ||
63 | |||
64 | return session; | ||
65 | } | ||
66 | |||
67 | static int l2tp_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info) | ||
68 | { | ||
69 | struct sk_buff *msg; | ||
70 | void *hdr; | ||
71 | int ret = -ENOBUFS; | ||
72 | |||
73 | msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); | ||
74 | if (!msg) { | ||
75 | ret = -ENOMEM; | ||
76 | goto out; | ||
77 | } | ||
78 | |||
79 | hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq, | ||
80 | &l2tp_nl_family, 0, L2TP_CMD_NOOP); | ||
81 | if (IS_ERR(hdr)) { | ||
82 | ret = PTR_ERR(hdr); | ||
83 | goto err_out; | ||
84 | } | ||
85 | |||
86 | genlmsg_end(msg, hdr); | ||
87 | |||
88 | return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid); | ||
89 | |||
90 | err_out: | ||
91 | nlmsg_free(msg); | ||
92 | |||
93 | out: | ||
94 | return ret; | ||
95 | } | ||
96 | |||
97 | static int l2tp_nl_cmd_tunnel_create(struct sk_buff *skb, struct genl_info *info) | ||
98 | { | ||
99 | u32 tunnel_id; | ||
100 | u32 peer_tunnel_id; | ||
101 | int proto_version; | ||
102 | int fd; | ||
103 | int ret = 0; | ||
104 | struct l2tp_tunnel_cfg cfg = { 0, }; | ||
105 | struct l2tp_tunnel *tunnel; | ||
106 | struct net *net = genl_info_net(info); | ||
107 | |||
108 | if (!info->attrs[L2TP_ATTR_CONN_ID]) { | ||
109 | ret = -EINVAL; | ||
110 | goto out; | ||
111 | } | ||
112 | tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); | ||
113 | |||
114 | if (!info->attrs[L2TP_ATTR_PEER_CONN_ID]) { | ||
115 | ret = -EINVAL; | ||
116 | goto out; | ||
117 | } | ||
118 | peer_tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_PEER_CONN_ID]); | ||
119 | |||
120 | if (!info->attrs[L2TP_ATTR_PROTO_VERSION]) { | ||
121 | ret = -EINVAL; | ||
122 | goto out; | ||
123 | } | ||
124 | proto_version = nla_get_u8(info->attrs[L2TP_ATTR_PROTO_VERSION]); | ||
125 | |||
126 | if (!info->attrs[L2TP_ATTR_ENCAP_TYPE]) { | ||
127 | ret = -EINVAL; | ||
128 | goto out; | ||
129 | } | ||
130 | cfg.encap = nla_get_u16(info->attrs[L2TP_ATTR_ENCAP_TYPE]); | ||
131 | |||
132 | fd = -1; | ||
133 | if (info->attrs[L2TP_ATTR_FD]) { | ||
134 | fd = nla_get_u32(info->attrs[L2TP_ATTR_FD]); | ||
135 | } else { | ||
136 | if (info->attrs[L2TP_ATTR_IP_SADDR]) | ||
137 | cfg.local_ip.s_addr = nla_get_be32(info->attrs[L2TP_ATTR_IP_SADDR]); | ||
138 | if (info->attrs[L2TP_ATTR_IP_DADDR]) | ||
139 | cfg.peer_ip.s_addr = nla_get_be32(info->attrs[L2TP_ATTR_IP_DADDR]); | ||
140 | if (info->attrs[L2TP_ATTR_UDP_SPORT]) | ||
141 | cfg.local_udp_port = nla_get_u16(info->attrs[L2TP_ATTR_UDP_SPORT]); | ||
142 | if (info->attrs[L2TP_ATTR_UDP_DPORT]) | ||
143 | cfg.peer_udp_port = nla_get_u16(info->attrs[L2TP_ATTR_UDP_DPORT]); | ||
144 | if (info->attrs[L2TP_ATTR_UDP_CSUM]) | ||
145 | cfg.use_udp_checksums = nla_get_flag(info->attrs[L2TP_ATTR_UDP_CSUM]); | ||
146 | } | ||
147 | |||
148 | if (info->attrs[L2TP_ATTR_DEBUG]) | ||
149 | cfg.debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]); | ||
150 | |||
151 | tunnel = l2tp_tunnel_find(net, tunnel_id); | ||
152 | if (tunnel != NULL) { | ||
153 | ret = -EEXIST; | ||
154 | goto out; | ||
155 | } | ||
156 | |||
157 | ret = -EINVAL; | ||
158 | switch (cfg.encap) { | ||
159 | case L2TP_ENCAPTYPE_UDP: | ||
160 | case L2TP_ENCAPTYPE_IP: | ||
161 | ret = l2tp_tunnel_create(net, fd, proto_version, tunnel_id, | ||
162 | peer_tunnel_id, &cfg, &tunnel); | ||
163 | break; | ||
164 | } | ||
165 | |||
166 | out: | ||
167 | return ret; | ||
168 | } | ||
169 | |||
170 | static int l2tp_nl_cmd_tunnel_delete(struct sk_buff *skb, struct genl_info *info) | ||
171 | { | ||
172 | struct l2tp_tunnel *tunnel; | ||
173 | u32 tunnel_id; | ||
174 | int ret = 0; | ||
175 | struct net *net = genl_info_net(info); | ||
176 | |||
177 | if (!info->attrs[L2TP_ATTR_CONN_ID]) { | ||
178 | ret = -EINVAL; | ||
179 | goto out; | ||
180 | } | ||
181 | tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); | ||
182 | |||
183 | tunnel = l2tp_tunnel_find(net, tunnel_id); | ||
184 | if (tunnel == NULL) { | ||
185 | ret = -ENODEV; | ||
186 | goto out; | ||
187 | } | ||
188 | |||
189 | (void) l2tp_tunnel_delete(tunnel); | ||
190 | |||
191 | out: | ||
192 | return ret; | ||
193 | } | ||
194 | |||
195 | static int l2tp_nl_cmd_tunnel_modify(struct sk_buff *skb, struct genl_info *info) | ||
196 | { | ||
197 | struct l2tp_tunnel *tunnel; | ||
198 | u32 tunnel_id; | ||
199 | int ret = 0; | ||
200 | struct net *net = genl_info_net(info); | ||
201 | |||
202 | if (!info->attrs[L2TP_ATTR_CONN_ID]) { | ||
203 | ret = -EINVAL; | ||
204 | goto out; | ||
205 | } | ||
206 | tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); | ||
207 | |||
208 | tunnel = l2tp_tunnel_find(net, tunnel_id); | ||
209 | if (tunnel == NULL) { | ||
210 | ret = -ENODEV; | ||
211 | goto out; | ||
212 | } | ||
213 | |||
214 | if (info->attrs[L2TP_ATTR_DEBUG]) | ||
215 | tunnel->debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]); | ||
216 | |||
217 | out: | ||
218 | return ret; | ||
219 | } | ||
220 | |||
221 | static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 pid, u32 seq, int flags, | ||
222 | struct l2tp_tunnel *tunnel) | ||
223 | { | ||
224 | void *hdr; | ||
225 | struct nlattr *nest; | ||
226 | struct sock *sk = NULL; | ||
227 | struct inet_sock *inet; | ||
228 | |||
229 | hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags, | ||
230 | L2TP_CMD_TUNNEL_GET); | ||
231 | if (IS_ERR(hdr)) | ||
232 | return PTR_ERR(hdr); | ||
233 | |||
234 | NLA_PUT_U8(skb, L2TP_ATTR_PROTO_VERSION, tunnel->version); | ||
235 | NLA_PUT_U32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id); | ||
236 | NLA_PUT_U32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id); | ||
237 | NLA_PUT_U32(skb, L2TP_ATTR_DEBUG, tunnel->debug); | ||
238 | NLA_PUT_U16(skb, L2TP_ATTR_ENCAP_TYPE, tunnel->encap); | ||
239 | |||
240 | nest = nla_nest_start(skb, L2TP_ATTR_STATS); | ||
241 | if (nest == NULL) | ||
242 | goto nla_put_failure; | ||
243 | |||
244 | NLA_PUT_U64(skb, L2TP_ATTR_TX_PACKETS, tunnel->stats.tx_packets); | ||
245 | NLA_PUT_U64(skb, L2TP_ATTR_TX_BYTES, tunnel->stats.tx_bytes); | ||
246 | NLA_PUT_U64(skb, L2TP_ATTR_TX_ERRORS, tunnel->stats.tx_errors); | ||
247 | NLA_PUT_U64(skb, L2TP_ATTR_RX_PACKETS, tunnel->stats.rx_packets); | ||
248 | NLA_PUT_U64(skb, L2TP_ATTR_RX_BYTES, tunnel->stats.rx_bytes); | ||
249 | NLA_PUT_U64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, tunnel->stats.rx_seq_discards); | ||
250 | NLA_PUT_U64(skb, L2TP_ATTR_RX_OOS_PACKETS, tunnel->stats.rx_oos_packets); | ||
251 | NLA_PUT_U64(skb, L2TP_ATTR_RX_ERRORS, tunnel->stats.rx_errors); | ||
252 | nla_nest_end(skb, nest); | ||
253 | |||
254 | sk = tunnel->sock; | ||
255 | if (!sk) | ||
256 | goto out; | ||
257 | |||
258 | inet = inet_sk(sk); | ||
259 | |||
260 | switch (tunnel->encap) { | ||
261 | case L2TP_ENCAPTYPE_UDP: | ||
262 | NLA_PUT_U16(skb, L2TP_ATTR_UDP_SPORT, ntohs(inet->inet_sport)); | ||
263 | NLA_PUT_U16(skb, L2TP_ATTR_UDP_DPORT, ntohs(inet->inet_dport)); | ||
264 | NLA_PUT_U8(skb, L2TP_ATTR_UDP_CSUM, (sk->sk_no_check != UDP_CSUM_NOXMIT)); | ||
265 | /* NOBREAK */ | ||
266 | case L2TP_ENCAPTYPE_IP: | ||
267 | NLA_PUT_BE32(skb, L2TP_ATTR_IP_SADDR, inet->inet_saddr); | ||
268 | NLA_PUT_BE32(skb, L2TP_ATTR_IP_DADDR, inet->inet_daddr); | ||
269 | break; | ||
270 | } | ||
271 | |||
272 | out: | ||
273 | return genlmsg_end(skb, hdr); | ||
274 | |||
275 | nla_put_failure: | ||
276 | genlmsg_cancel(skb, hdr); | ||
277 | return -1; | ||
278 | } | ||
279 | |||
280 | static int l2tp_nl_cmd_tunnel_get(struct sk_buff *skb, struct genl_info *info) | ||
281 | { | ||
282 | struct l2tp_tunnel *tunnel; | ||
283 | struct sk_buff *msg; | ||
284 | u32 tunnel_id; | ||
285 | int ret = -ENOBUFS; | ||
286 | struct net *net = genl_info_net(info); | ||
287 | |||
288 | if (!info->attrs[L2TP_ATTR_CONN_ID]) { | ||
289 | ret = -EINVAL; | ||
290 | goto out; | ||
291 | } | ||
292 | |||
293 | tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); | ||
294 | |||
295 | tunnel = l2tp_tunnel_find(net, tunnel_id); | ||
296 | if (tunnel == NULL) { | ||
297 | ret = -ENODEV; | ||
298 | goto out; | ||
299 | } | ||
300 | |||
301 | msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); | ||
302 | if (!msg) { | ||
303 | ret = -ENOMEM; | ||
304 | goto out; | ||
305 | } | ||
306 | |||
307 | ret = l2tp_nl_tunnel_send(msg, info->snd_pid, info->snd_seq, | ||
308 | NLM_F_ACK, tunnel); | ||
309 | if (ret < 0) | ||
310 | goto err_out; | ||
311 | |||
312 | return genlmsg_unicast(net, msg, info->snd_pid); | ||
313 | |||
314 | err_out: | ||
315 | nlmsg_free(msg); | ||
316 | |||
317 | out: | ||
318 | return ret; | ||
319 | } | ||
320 | |||
321 | static int l2tp_nl_cmd_tunnel_dump(struct sk_buff *skb, struct netlink_callback *cb) | ||
322 | { | ||
323 | int ti = cb->args[0]; | ||
324 | struct l2tp_tunnel *tunnel; | ||
325 | struct net *net = sock_net(skb->sk); | ||
326 | |||
327 | for (;;) { | ||
328 | tunnel = l2tp_tunnel_find_nth(net, ti); | ||
329 | if (tunnel == NULL) | ||
330 | goto out; | ||
331 | |||
332 | if (l2tp_nl_tunnel_send(skb, NETLINK_CB(cb->skb).pid, | ||
333 | cb->nlh->nlmsg_seq, NLM_F_MULTI, | ||
334 | tunnel) <= 0) | ||
335 | goto out; | ||
336 | |||
337 | ti++; | ||
338 | } | ||
339 | |||
340 | out: | ||
341 | cb->args[0] = ti; | ||
342 | |||
343 | return skb->len; | ||
344 | } | ||
345 | |||
346 | static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *info) | ||
347 | { | ||
348 | u32 tunnel_id = 0; | ||
349 | u32 session_id; | ||
350 | u32 peer_session_id; | ||
351 | int ret = 0; | ||
352 | struct l2tp_tunnel *tunnel; | ||
353 | struct l2tp_session *session; | ||
354 | struct l2tp_session_cfg cfg = { 0, }; | ||
355 | struct net *net = genl_info_net(info); | ||
356 | |||
357 | if (!info->attrs[L2TP_ATTR_CONN_ID]) { | ||
358 | ret = -EINVAL; | ||
359 | goto out; | ||
360 | } | ||
361 | tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); | ||
362 | tunnel = l2tp_tunnel_find(net, tunnel_id); | ||
363 | if (!tunnel) { | ||
364 | ret = -ENODEV; | ||
365 | goto out; | ||
366 | } | ||
367 | |||
368 | if (!info->attrs[L2TP_ATTR_SESSION_ID]) { | ||
369 | ret = -EINVAL; | ||
370 | goto out; | ||
371 | } | ||
372 | session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]); | ||
373 | session = l2tp_session_find(net, tunnel, session_id); | ||
374 | if (session) { | ||
375 | ret = -EEXIST; | ||
376 | goto out; | ||
377 | } | ||
378 | |||
379 | if (!info->attrs[L2TP_ATTR_PEER_SESSION_ID]) { | ||
380 | ret = -EINVAL; | ||
381 | goto out; | ||
382 | } | ||
383 | peer_session_id = nla_get_u32(info->attrs[L2TP_ATTR_PEER_SESSION_ID]); | ||
384 | |||
385 | if (!info->attrs[L2TP_ATTR_PW_TYPE]) { | ||
386 | ret = -EINVAL; | ||
387 | goto out; | ||
388 | } | ||
389 | cfg.pw_type = nla_get_u16(info->attrs[L2TP_ATTR_PW_TYPE]); | ||
390 | if (cfg.pw_type >= __L2TP_PWTYPE_MAX) { | ||
391 | ret = -EINVAL; | ||
392 | goto out; | ||
393 | } | ||
394 | |||
395 | if (tunnel->version > 2) { | ||
396 | if (info->attrs[L2TP_ATTR_OFFSET]) | ||
397 | cfg.offset = nla_get_u16(info->attrs[L2TP_ATTR_OFFSET]); | ||
398 | |||
399 | if (info->attrs[L2TP_ATTR_DATA_SEQ]) | ||
400 | cfg.data_seq = nla_get_u8(info->attrs[L2TP_ATTR_DATA_SEQ]); | ||
401 | |||
402 | cfg.l2specific_type = L2TP_L2SPECTYPE_DEFAULT; | ||
403 | if (info->attrs[L2TP_ATTR_L2SPEC_TYPE]) | ||
404 | cfg.l2specific_type = nla_get_u8(info->attrs[L2TP_ATTR_L2SPEC_TYPE]); | ||
405 | |||
406 | cfg.l2specific_len = 4; | ||
407 | if (info->attrs[L2TP_ATTR_L2SPEC_LEN]) | ||
408 | cfg.l2specific_len = nla_get_u8(info->attrs[L2TP_ATTR_L2SPEC_LEN]); | ||
409 | |||
410 | if (info->attrs[L2TP_ATTR_COOKIE]) { | ||
411 | u16 len = nla_len(info->attrs[L2TP_ATTR_COOKIE]); | ||
412 | if (len > 8) { | ||
413 | ret = -EINVAL; | ||
414 | goto out; | ||
415 | } | ||
416 | cfg.cookie_len = len; | ||
417 | memcpy(&cfg.cookie[0], nla_data(info->attrs[L2TP_ATTR_COOKIE]), len); | ||
418 | } | ||
419 | if (info->attrs[L2TP_ATTR_PEER_COOKIE]) { | ||
420 | u16 len = nla_len(info->attrs[L2TP_ATTR_PEER_COOKIE]); | ||
421 | if (len > 8) { | ||
422 | ret = -EINVAL; | ||
423 | goto out; | ||
424 | } | ||
425 | cfg.peer_cookie_len = len; | ||
426 | memcpy(&cfg.peer_cookie[0], nla_data(info->attrs[L2TP_ATTR_PEER_COOKIE]), len); | ||
427 | } | ||
428 | if (info->attrs[L2TP_ATTR_IFNAME]) | ||
429 | cfg.ifname = nla_data(info->attrs[L2TP_ATTR_IFNAME]); | ||
430 | |||
431 | if (info->attrs[L2TP_ATTR_VLAN_ID]) | ||
432 | cfg.vlan_id = nla_get_u16(info->attrs[L2TP_ATTR_VLAN_ID]); | ||
433 | } | ||
434 | |||
435 | if (info->attrs[L2TP_ATTR_DEBUG]) | ||
436 | cfg.debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]); | ||
437 | |||
438 | if (info->attrs[L2TP_ATTR_RECV_SEQ]) | ||
439 | cfg.recv_seq = nla_get_u8(info->attrs[L2TP_ATTR_RECV_SEQ]); | ||
440 | |||
441 | if (info->attrs[L2TP_ATTR_SEND_SEQ]) | ||
442 | cfg.send_seq = nla_get_u8(info->attrs[L2TP_ATTR_SEND_SEQ]); | ||
443 | |||
444 | if (info->attrs[L2TP_ATTR_LNS_MODE]) | ||
445 | cfg.lns_mode = nla_get_u8(info->attrs[L2TP_ATTR_LNS_MODE]); | ||
446 | |||
447 | if (info->attrs[L2TP_ATTR_RECV_TIMEOUT]) | ||
448 | cfg.reorder_timeout = nla_get_msecs(info->attrs[L2TP_ATTR_RECV_TIMEOUT]); | ||
449 | |||
450 | if (info->attrs[L2TP_ATTR_MTU]) | ||
451 | cfg.mtu = nla_get_u16(info->attrs[L2TP_ATTR_MTU]); | ||
452 | |||
453 | if (info->attrs[L2TP_ATTR_MRU]) | ||
454 | cfg.mru = nla_get_u16(info->attrs[L2TP_ATTR_MRU]); | ||
455 | |||
456 | if ((l2tp_nl_cmd_ops[cfg.pw_type] == NULL) || | ||
457 | (l2tp_nl_cmd_ops[cfg.pw_type]->session_create == NULL)) { | ||
458 | ret = -EPROTONOSUPPORT; | ||
459 | goto out; | ||
460 | } | ||
461 | |||
462 | /* Check that pseudowire-specific params are present */ | ||
463 | switch (cfg.pw_type) { | ||
464 | case L2TP_PWTYPE_NONE: | ||
465 | break; | ||
466 | case L2TP_PWTYPE_ETH_VLAN: | ||
467 | if (!info->attrs[L2TP_ATTR_VLAN_ID]) { | ||
468 | ret = -EINVAL; | ||
469 | goto out; | ||
470 | } | ||
471 | break; | ||
472 | case L2TP_PWTYPE_ETH: | ||
473 | break; | ||
474 | case L2TP_PWTYPE_PPP: | ||
475 | case L2TP_PWTYPE_PPP_AC: | ||
476 | break; | ||
477 | case L2TP_PWTYPE_IP: | ||
478 | default: | ||
479 | ret = -EPROTONOSUPPORT; | ||
480 | break; | ||
481 | } | ||
482 | |||
483 | ret = -EPROTONOSUPPORT; | ||
484 | if (l2tp_nl_cmd_ops[cfg.pw_type]->session_create) | ||
485 | ret = (*l2tp_nl_cmd_ops[cfg.pw_type]->session_create)(net, tunnel_id, | ||
486 | session_id, peer_session_id, &cfg); | ||
487 | |||
488 | out: | ||
489 | return ret; | ||
490 | } | ||
491 | |||
492 | static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *info) | ||
493 | { | ||
494 | int ret = 0; | ||
495 | struct l2tp_session *session; | ||
496 | u16 pw_type; | ||
497 | |||
498 | session = l2tp_nl_session_find(info); | ||
499 | if (session == NULL) { | ||
500 | ret = -ENODEV; | ||
501 | goto out; | ||
502 | } | ||
503 | |||
504 | pw_type = session->pwtype; | ||
505 | if (pw_type < __L2TP_PWTYPE_MAX) | ||
506 | if (l2tp_nl_cmd_ops[pw_type] && l2tp_nl_cmd_ops[pw_type]->session_delete) | ||
507 | ret = (*l2tp_nl_cmd_ops[pw_type]->session_delete)(session); | ||
508 | |||
509 | out: | ||
510 | return ret; | ||
511 | } | ||
512 | |||
513 | static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *info) | ||
514 | { | ||
515 | int ret = 0; | ||
516 | struct l2tp_session *session; | ||
517 | |||
518 | session = l2tp_nl_session_find(info); | ||
519 | if (session == NULL) { | ||
520 | ret = -ENODEV; | ||
521 | goto out; | ||
522 | } | ||
523 | |||
524 | if (info->attrs[L2TP_ATTR_DEBUG]) | ||
525 | session->debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]); | ||
526 | |||
527 | if (info->attrs[L2TP_ATTR_DATA_SEQ]) | ||
528 | session->data_seq = nla_get_u8(info->attrs[L2TP_ATTR_DATA_SEQ]); | ||
529 | |||
530 | if (info->attrs[L2TP_ATTR_RECV_SEQ]) | ||
531 | session->recv_seq = nla_get_u8(info->attrs[L2TP_ATTR_RECV_SEQ]); | ||
532 | |||
533 | if (info->attrs[L2TP_ATTR_SEND_SEQ]) | ||
534 | session->send_seq = nla_get_u8(info->attrs[L2TP_ATTR_SEND_SEQ]); | ||
535 | |||
536 | if (info->attrs[L2TP_ATTR_LNS_MODE]) | ||
537 | session->lns_mode = nla_get_u8(info->attrs[L2TP_ATTR_LNS_MODE]); | ||
538 | |||
539 | if (info->attrs[L2TP_ATTR_RECV_TIMEOUT]) | ||
540 | session->reorder_timeout = nla_get_msecs(info->attrs[L2TP_ATTR_RECV_TIMEOUT]); | ||
541 | |||
542 | if (info->attrs[L2TP_ATTR_MTU]) | ||
543 | session->mtu = nla_get_u16(info->attrs[L2TP_ATTR_MTU]); | ||
544 | |||
545 | if (info->attrs[L2TP_ATTR_MRU]) | ||
546 | session->mru = nla_get_u16(info->attrs[L2TP_ATTR_MRU]); | ||
547 | |||
548 | out: | ||
549 | return ret; | ||
550 | } | ||
551 | |||
552 | static int l2tp_nl_session_send(struct sk_buff *skb, u32 pid, u32 seq, int flags, | ||
553 | struct l2tp_session *session) | ||
554 | { | ||
555 | void *hdr; | ||
556 | struct nlattr *nest; | ||
557 | struct l2tp_tunnel *tunnel = session->tunnel; | ||
558 | struct sock *sk = NULL; | ||
559 | |||
560 | sk = tunnel->sock; | ||
561 | |||
562 | hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags, L2TP_CMD_SESSION_GET); | ||
563 | if (IS_ERR(hdr)) | ||
564 | return PTR_ERR(hdr); | ||
565 | |||
566 | NLA_PUT_U32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id); | ||
567 | NLA_PUT_U32(skb, L2TP_ATTR_SESSION_ID, session->session_id); | ||
568 | NLA_PUT_U32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id); | ||
569 | NLA_PUT_U32(skb, L2TP_ATTR_PEER_SESSION_ID, session->peer_session_id); | ||
570 | NLA_PUT_U32(skb, L2TP_ATTR_DEBUG, session->debug); | ||
571 | NLA_PUT_U16(skb, L2TP_ATTR_PW_TYPE, session->pwtype); | ||
572 | NLA_PUT_U16(skb, L2TP_ATTR_MTU, session->mtu); | ||
573 | if (session->mru) | ||
574 | NLA_PUT_U16(skb, L2TP_ATTR_MRU, session->mru); | ||
575 | |||
576 | if (session->ifname && session->ifname[0]) | ||
577 | NLA_PUT_STRING(skb, L2TP_ATTR_IFNAME, session->ifname); | ||
578 | if (session->cookie_len) | ||
579 | NLA_PUT(skb, L2TP_ATTR_COOKIE, session->cookie_len, &session->cookie[0]); | ||
580 | if (session->peer_cookie_len) | ||
581 | NLA_PUT(skb, L2TP_ATTR_PEER_COOKIE, session->peer_cookie_len, &session->peer_cookie[0]); | ||
582 | NLA_PUT_U8(skb, L2TP_ATTR_RECV_SEQ, session->recv_seq); | ||
583 | NLA_PUT_U8(skb, L2TP_ATTR_SEND_SEQ, session->send_seq); | ||
584 | NLA_PUT_U8(skb, L2TP_ATTR_LNS_MODE, session->lns_mode); | ||
585 | #ifdef CONFIG_XFRM | ||
586 | if ((sk) && (sk->sk_policy[0] || sk->sk_policy[1])) | ||
587 | NLA_PUT_U8(skb, L2TP_ATTR_USING_IPSEC, 1); | ||
588 | #endif | ||
589 | if (session->reorder_timeout) | ||
590 | NLA_PUT_MSECS(skb, L2TP_ATTR_RECV_TIMEOUT, session->reorder_timeout); | ||
591 | |||
592 | nest = nla_nest_start(skb, L2TP_ATTR_STATS); | ||
593 | if (nest == NULL) | ||
594 | goto nla_put_failure; | ||
595 | NLA_PUT_U64(skb, L2TP_ATTR_TX_PACKETS, session->stats.tx_packets); | ||
596 | NLA_PUT_U64(skb, L2TP_ATTR_TX_BYTES, session->stats.tx_bytes); | ||
597 | NLA_PUT_U64(skb, L2TP_ATTR_TX_ERRORS, session->stats.tx_errors); | ||
598 | NLA_PUT_U64(skb, L2TP_ATTR_RX_PACKETS, session->stats.rx_packets); | ||
599 | NLA_PUT_U64(skb, L2TP_ATTR_RX_BYTES, session->stats.rx_bytes); | ||
600 | NLA_PUT_U64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, session->stats.rx_seq_discards); | ||
601 | NLA_PUT_U64(skb, L2TP_ATTR_RX_OOS_PACKETS, session->stats.rx_oos_packets); | ||
602 | NLA_PUT_U64(skb, L2TP_ATTR_RX_ERRORS, session->stats.rx_errors); | ||
603 | nla_nest_end(skb, nest); | ||
604 | |||
605 | return genlmsg_end(skb, hdr); | ||
606 | |||
607 | nla_put_failure: | ||
608 | genlmsg_cancel(skb, hdr); | ||
609 | return -1; | ||
610 | } | ||
611 | |||
612 | static int l2tp_nl_cmd_session_get(struct sk_buff *skb, struct genl_info *info) | ||
613 | { | ||
614 | struct l2tp_session *session; | ||
615 | struct sk_buff *msg; | ||
616 | int ret; | ||
617 | |||
618 | session = l2tp_nl_session_find(info); | ||
619 | if (session == NULL) { | ||
620 | ret = -ENODEV; | ||
621 | goto out; | ||
622 | } | ||
623 | |||
624 | msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); | ||
625 | if (!msg) { | ||
626 | ret = -ENOMEM; | ||
627 | goto out; | ||
628 | } | ||
629 | |||
630 | ret = l2tp_nl_session_send(msg, info->snd_pid, info->snd_seq, | ||
631 | 0, session); | ||
632 | if (ret < 0) | ||
633 | goto err_out; | ||
634 | |||
635 | return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid); | ||
636 | |||
637 | err_out: | ||
638 | nlmsg_free(msg); | ||
639 | |||
640 | out: | ||
641 | return ret; | ||
642 | } | ||
643 | |||
644 | static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback *cb) | ||
645 | { | ||
646 | struct net *net = sock_net(skb->sk); | ||
647 | struct l2tp_session *session; | ||
648 | struct l2tp_tunnel *tunnel = NULL; | ||
649 | int ti = cb->args[0]; | ||
650 | int si = cb->args[1]; | ||
651 | |||
652 | for (;;) { | ||
653 | if (tunnel == NULL) { | ||
654 | tunnel = l2tp_tunnel_find_nth(net, ti); | ||
655 | if (tunnel == NULL) | ||
656 | goto out; | ||
657 | } | ||
658 | |||
659 | session = l2tp_session_find_nth(tunnel, si); | ||
660 | if (session == NULL) { | ||
661 | ti++; | ||
662 | tunnel = NULL; | ||
663 | si = 0; | ||
664 | continue; | ||
665 | } | ||
666 | |||
667 | if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).pid, | ||
668 | cb->nlh->nlmsg_seq, NLM_F_MULTI, | ||
669 | session) <= 0) | ||
670 | break; | ||
671 | |||
672 | si++; | ||
673 | } | ||
674 | |||
675 | out: | ||
676 | cb->args[0] = ti; | ||
677 | cb->args[1] = si; | ||
678 | |||
679 | return skb->len; | ||
680 | } | ||
681 | |||
682 | static struct nla_policy l2tp_nl_policy[L2TP_ATTR_MAX + 1] = { | ||
683 | [L2TP_ATTR_NONE] = { .type = NLA_UNSPEC, }, | ||
684 | [L2TP_ATTR_PW_TYPE] = { .type = NLA_U16, }, | ||
685 | [L2TP_ATTR_ENCAP_TYPE] = { .type = NLA_U16, }, | ||
686 | [L2TP_ATTR_OFFSET] = { .type = NLA_U16, }, | ||
687 | [L2TP_ATTR_DATA_SEQ] = { .type = NLA_U8, }, | ||
688 | [L2TP_ATTR_L2SPEC_TYPE] = { .type = NLA_U8, }, | ||
689 | [L2TP_ATTR_L2SPEC_LEN] = { .type = NLA_U8, }, | ||
690 | [L2TP_ATTR_PROTO_VERSION] = { .type = NLA_U8, }, | ||
691 | [L2TP_ATTR_CONN_ID] = { .type = NLA_U32, }, | ||
692 | [L2TP_ATTR_PEER_CONN_ID] = { .type = NLA_U32, }, | ||
693 | [L2TP_ATTR_SESSION_ID] = { .type = NLA_U32, }, | ||
694 | [L2TP_ATTR_PEER_SESSION_ID] = { .type = NLA_U32, }, | ||
695 | [L2TP_ATTR_UDP_CSUM] = { .type = NLA_U8, }, | ||
696 | [L2TP_ATTR_VLAN_ID] = { .type = NLA_U16, }, | ||
697 | [L2TP_ATTR_DEBUG] = { .type = NLA_U32, }, | ||
698 | [L2TP_ATTR_RECV_SEQ] = { .type = NLA_U8, }, | ||
699 | [L2TP_ATTR_SEND_SEQ] = { .type = NLA_U8, }, | ||
700 | [L2TP_ATTR_LNS_MODE] = { .type = NLA_U8, }, | ||
701 | [L2TP_ATTR_USING_IPSEC] = { .type = NLA_U8, }, | ||
702 | [L2TP_ATTR_RECV_TIMEOUT] = { .type = NLA_MSECS, }, | ||
703 | [L2TP_ATTR_FD] = { .type = NLA_U32, }, | ||
704 | [L2TP_ATTR_IP_SADDR] = { .type = NLA_U32, }, | ||
705 | [L2TP_ATTR_IP_DADDR] = { .type = NLA_U32, }, | ||
706 | [L2TP_ATTR_UDP_SPORT] = { .type = NLA_U16, }, | ||
707 | [L2TP_ATTR_UDP_DPORT] = { .type = NLA_U16, }, | ||
708 | [L2TP_ATTR_MTU] = { .type = NLA_U16, }, | ||
709 | [L2TP_ATTR_MRU] = { .type = NLA_U16, }, | ||
710 | [L2TP_ATTR_STATS] = { .type = NLA_NESTED, }, | ||
711 | [L2TP_ATTR_IFNAME] = { | ||
712 | .type = NLA_NUL_STRING, | ||
713 | .len = IFNAMSIZ - 1, | ||
714 | }, | ||
715 | [L2TP_ATTR_COOKIE] = { | ||
716 | .type = NLA_BINARY, | ||
717 | .len = 8, | ||
718 | }, | ||
719 | [L2TP_ATTR_PEER_COOKIE] = { | ||
720 | .type = NLA_BINARY, | ||
721 | .len = 8, | ||
722 | }, | ||
723 | }; | ||
724 | |||
725 | static struct genl_ops l2tp_nl_ops[] = { | ||
726 | { | ||
727 | .cmd = L2TP_CMD_NOOP, | ||
728 | .doit = l2tp_nl_cmd_noop, | ||
729 | .policy = l2tp_nl_policy, | ||
730 | /* can be retrieved by unprivileged users */ | ||
731 | }, | ||
732 | { | ||
733 | .cmd = L2TP_CMD_TUNNEL_CREATE, | ||
734 | .doit = l2tp_nl_cmd_tunnel_create, | ||
735 | .policy = l2tp_nl_policy, | ||
736 | .flags = GENL_ADMIN_PERM, | ||
737 | }, | ||
738 | { | ||
739 | .cmd = L2TP_CMD_TUNNEL_DELETE, | ||
740 | .doit = l2tp_nl_cmd_tunnel_delete, | ||
741 | .policy = l2tp_nl_policy, | ||
742 | .flags = GENL_ADMIN_PERM, | ||
743 | }, | ||
744 | { | ||
745 | .cmd = L2TP_CMD_TUNNEL_MODIFY, | ||
746 | .doit = l2tp_nl_cmd_tunnel_modify, | ||
747 | .policy = l2tp_nl_policy, | ||
748 | .flags = GENL_ADMIN_PERM, | ||
749 | }, | ||
750 | { | ||
751 | .cmd = L2TP_CMD_TUNNEL_GET, | ||
752 | .doit = l2tp_nl_cmd_tunnel_get, | ||
753 | .dumpit = l2tp_nl_cmd_tunnel_dump, | ||
754 | .policy = l2tp_nl_policy, | ||
755 | .flags = GENL_ADMIN_PERM, | ||
756 | }, | ||
757 | { | ||
758 | .cmd = L2TP_CMD_SESSION_CREATE, | ||
759 | .doit = l2tp_nl_cmd_session_create, | ||
760 | .policy = l2tp_nl_policy, | ||
761 | .flags = GENL_ADMIN_PERM, | ||
762 | }, | ||
763 | { | ||
764 | .cmd = L2TP_CMD_SESSION_DELETE, | ||
765 | .doit = l2tp_nl_cmd_session_delete, | ||
766 | .policy = l2tp_nl_policy, | ||
767 | .flags = GENL_ADMIN_PERM, | ||
768 | }, | ||
769 | { | ||
770 | .cmd = L2TP_CMD_SESSION_MODIFY, | ||
771 | .doit = l2tp_nl_cmd_session_modify, | ||
772 | .policy = l2tp_nl_policy, | ||
773 | .flags = GENL_ADMIN_PERM, | ||
774 | }, | ||
775 | { | ||
776 | .cmd = L2TP_CMD_SESSION_GET, | ||
777 | .doit = l2tp_nl_cmd_session_get, | ||
778 | .dumpit = l2tp_nl_cmd_session_dump, | ||
779 | .policy = l2tp_nl_policy, | ||
780 | .flags = GENL_ADMIN_PERM, | ||
781 | }, | ||
782 | }; | ||
783 | |||
784 | int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, const struct l2tp_nl_cmd_ops *ops) | ||
785 | { | ||
786 | int ret; | ||
787 | |||
788 | ret = -EINVAL; | ||
789 | if (pw_type >= __L2TP_PWTYPE_MAX) | ||
790 | goto err; | ||
791 | |||
792 | genl_lock(); | ||
793 | ret = -EBUSY; | ||
794 | if (l2tp_nl_cmd_ops[pw_type]) | ||
795 | goto out; | ||
796 | |||
797 | l2tp_nl_cmd_ops[pw_type] = ops; | ||
798 | |||
799 | out: | ||
800 | genl_unlock(); | ||
801 | err: | ||
802 | return 0; | ||
803 | } | ||
804 | EXPORT_SYMBOL_GPL(l2tp_nl_register_ops); | ||
805 | |||
806 | void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type) | ||
807 | { | ||
808 | if (pw_type < __L2TP_PWTYPE_MAX) { | ||
809 | genl_lock(); | ||
810 | l2tp_nl_cmd_ops[pw_type] = NULL; | ||
811 | genl_unlock(); | ||
812 | } | ||
813 | } | ||
814 | EXPORT_SYMBOL_GPL(l2tp_nl_unregister_ops); | ||
815 | |||
816 | static int l2tp_nl_init(void) | ||
817 | { | ||
818 | int err; | ||
819 | |||
820 | printk(KERN_INFO "L2TP netlink interface\n"); | ||
821 | err = genl_register_family_with_ops(&l2tp_nl_family, l2tp_nl_ops, | ||
822 | ARRAY_SIZE(l2tp_nl_ops)); | ||
823 | |||
824 | return err; | ||
825 | } | ||
826 | |||
827 | static void l2tp_nl_cleanup(void) | ||
828 | { | ||
829 | genl_unregister_family(&l2tp_nl_family); | ||
830 | } | ||
831 | |||
832 | module_init(l2tp_nl_init); | ||
833 | module_exit(l2tp_nl_cleanup); | ||
834 | |||
835 | MODULE_AUTHOR("James Chapman <jchapman@katalix.com>"); | ||
836 | MODULE_DESCRIPTION("L2TP netlink"); | ||
837 | MODULE_LICENSE("GPL"); | ||
838 | MODULE_VERSION("1.0"); | ||
839 | MODULE_ALIAS("net-pf-" __stringify(PF_NETLINK) "-proto-" \ | ||
840 | __stringify(NETLINK_GENERIC) "-type-" "l2tp"); | ||
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c new file mode 100644 index 000000000000..90d82b3f2889 --- /dev/null +++ b/net/l2tp/l2tp_ppp.c | |||
@@ -0,0 +1,1837 @@ | |||
1 | /***************************************************************************** | ||
2 | * Linux PPP over L2TP (PPPoX/PPPoL2TP) Sockets | ||
3 | * | ||
4 | * PPPoX --- Generic PPP encapsulation socket family | ||
5 | * PPPoL2TP --- PPP over L2TP (RFC 2661) | ||
6 | * | ||
7 | * Version: 2.0.0 | ||
8 | * | ||
9 | * Authors: James Chapman (jchapman@katalix.com) | ||
10 | * | ||
11 | * Based on original work by Martijn van Oosterhout <kleptog@svana.org> | ||
12 | * | ||
13 | * License: | ||
14 | * This program is free software; you can redistribute it and/or | ||
15 | * modify it under the terms of the GNU General Public License | ||
16 | * as published by the Free Software Foundation; either version | ||
17 | * 2 of the License, or (at your option) any later version. | ||
18 | * | ||
19 | */ | ||
20 | |||
21 | /* This driver handles only L2TP data frames; control frames are handled by a | ||
22 | * userspace application. | ||
23 | * | ||
24 | * To send data in an L2TP session, userspace opens a PPPoL2TP socket and | ||
25 | * attaches it to a bound UDP socket with local tunnel_id / session_id and | ||
26 | * peer tunnel_id / session_id set. Data can then be sent or received using | ||
27 | * regular socket sendmsg() / recvmsg() calls. Kernel parameters of the socket | ||
28 | * can be read or modified using ioctl() or [gs]etsockopt() calls. | ||
29 | * | ||
30 | * When a PPPoL2TP socket is connected with local and peer session_id values | ||
31 | * zero, the socket is treated as a special tunnel management socket. | ||
32 | * | ||
33 | * Here's example userspace code to create a socket for sending/receiving data | ||
34 | * over an L2TP session:- | ||
35 | * | ||
36 | * struct sockaddr_pppol2tp sax; | ||
37 | * int fd; | ||
38 | * int session_fd; | ||
39 | * | ||
40 | * fd = socket(AF_PPPOX, SOCK_DGRAM, PX_PROTO_OL2TP); | ||
41 | * | ||
42 | * sax.sa_family = AF_PPPOX; | ||
43 | * sax.sa_protocol = PX_PROTO_OL2TP; | ||
44 | * sax.pppol2tp.fd = tunnel_fd; // bound UDP socket | ||
45 | * sax.pppol2tp.addr.sin_addr.s_addr = addr->sin_addr.s_addr; | ||
46 | * sax.pppol2tp.addr.sin_port = addr->sin_port; | ||
47 | * sax.pppol2tp.addr.sin_family = AF_INET; | ||
48 | * sax.pppol2tp.s_tunnel = tunnel_id; | ||
49 | * sax.pppol2tp.s_session = session_id; | ||
50 | * sax.pppol2tp.d_tunnel = peer_tunnel_id; | ||
51 | * sax.pppol2tp.d_session = peer_session_id; | ||
52 | * | ||
53 | * session_fd = connect(fd, (struct sockaddr *)&sax, sizeof(sax)); | ||
54 | * | ||
55 | * A pppd plugin that allows PPP traffic to be carried over L2TP using | ||
56 | * this driver is available from the OpenL2TP project at | ||
57 | * http://openl2tp.sourceforge.net. | ||
58 | */ | ||
59 | |||
60 | #include <linux/module.h> | ||
61 | #include <linux/string.h> | ||
62 | #include <linux/list.h> | ||
63 | #include <linux/uaccess.h> | ||
64 | |||
65 | #include <linux/kernel.h> | ||
66 | #include <linux/spinlock.h> | ||
67 | #include <linux/kthread.h> | ||
68 | #include <linux/sched.h> | ||
69 | #include <linux/slab.h> | ||
70 | #include <linux/errno.h> | ||
71 | #include <linux/jiffies.h> | ||
72 | |||
73 | #include <linux/netdevice.h> | ||
74 | #include <linux/net.h> | ||
75 | #include <linux/inetdevice.h> | ||
76 | #include <linux/skbuff.h> | ||
77 | #include <linux/init.h> | ||
78 | #include <linux/ip.h> | ||
79 | #include <linux/udp.h> | ||
80 | #include <linux/if_pppox.h> | ||
81 | #include <linux/if_pppol2tp.h> | ||
82 | #include <net/sock.h> | ||
83 | #include <linux/ppp_channel.h> | ||
84 | #include <linux/ppp_defs.h> | ||
85 | #include <linux/if_ppp.h> | ||
86 | #include <linux/file.h> | ||
87 | #include <linux/hash.h> | ||
88 | #include <linux/sort.h> | ||
89 | #include <linux/proc_fs.h> | ||
90 | #include <linux/l2tp.h> | ||
91 | #include <linux/nsproxy.h> | ||
92 | #include <net/net_namespace.h> | ||
93 | #include <net/netns/generic.h> | ||
94 | #include <net/dst.h> | ||
95 | #include <net/ip.h> | ||
96 | #include <net/udp.h> | ||
97 | #include <net/xfrm.h> | ||
98 | |||
99 | #include <asm/byteorder.h> | ||
100 | #include <asm/atomic.h> | ||
101 | |||
102 | #include "l2tp_core.h" | ||
103 | |||
104 | #define PPPOL2TP_DRV_VERSION "V2.0" | ||
105 | |||
106 | /* Space for UDP, L2TP and PPP headers */ | ||
107 | #define PPPOL2TP_HEADER_OVERHEAD 40 | ||
108 | |||
109 | #define PRINTK(_mask, _type, _lvl, _fmt, args...) \ | ||
110 | do { \ | ||
111 | if ((_mask) & (_type)) \ | ||
112 | printk(_lvl "PPPOL2TP: " _fmt, ##args); \ | ||
113 | } while (0) | ||
114 | |||
115 | /* Number of bytes to build transmit L2TP headers. | ||
116 | * Unfortunately the size is different depending on whether sequence numbers | ||
117 | * are enabled. | ||
118 | */ | ||
119 | #define PPPOL2TP_L2TP_HDR_SIZE_SEQ 10 | ||
120 | #define PPPOL2TP_L2TP_HDR_SIZE_NOSEQ 6 | ||
121 | |||
122 | /* Private data of each session. This data lives at the end of struct | ||
123 | * l2tp_session, referenced via session->priv[]. | ||
124 | */ | ||
125 | struct pppol2tp_session { | ||
126 | int owner; /* pid that opened the socket */ | ||
127 | |||
128 | struct sock *sock; /* Pointer to the session | ||
129 | * PPPoX socket */ | ||
130 | struct sock *tunnel_sock; /* Pointer to the tunnel UDP | ||
131 | * socket */ | ||
132 | int flags; /* accessed by PPPIOCGFLAGS. | ||
133 | * Unused. */ | ||
134 | }; | ||
135 | |||
136 | static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb); | ||
137 | |||
138 | static struct ppp_channel_ops pppol2tp_chan_ops = { pppol2tp_xmit , NULL }; | ||
139 | static const struct proto_ops pppol2tp_ops; | ||
140 | |||
141 | /* Helpers to obtain tunnel/session contexts from sockets. | ||
142 | */ | ||
143 | static inline struct l2tp_session *pppol2tp_sock_to_session(struct sock *sk) | ||
144 | { | ||
145 | struct l2tp_session *session; | ||
146 | |||
147 | if (sk == NULL) | ||
148 | return NULL; | ||
149 | |||
150 | sock_hold(sk); | ||
151 | session = (struct l2tp_session *)(sk->sk_user_data); | ||
152 | if (session == NULL) { | ||
153 | sock_put(sk); | ||
154 | goto out; | ||
155 | } | ||
156 | |||
157 | BUG_ON(session->magic != L2TP_SESSION_MAGIC); | ||
158 | |||
159 | out: | ||
160 | return session; | ||
161 | } | ||
162 | |||
163 | /***************************************************************************** | ||
164 | * Receive data handling | ||
165 | *****************************************************************************/ | ||
166 | |||
167 | static int pppol2tp_recv_payload_hook(struct sk_buff *skb) | ||
168 | { | ||
169 | /* Skip PPP header, if present. In testing, Microsoft L2TP clients | ||
170 | * don't send the PPP header (PPP header compression enabled), but | ||
171 | * other clients can include the header. So we cope with both cases | ||
172 | * here. The PPP header is always FF03 when using L2TP. | ||
173 | * | ||
174 | * Note that skb->data[] isn't dereferenced from a u16 ptr here since | ||
175 | * the field may be unaligned. | ||
176 | */ | ||
177 | if (!pskb_may_pull(skb, 2)) | ||
178 | return 1; | ||
179 | |||
180 | if ((skb->data[0] == 0xff) && (skb->data[1] == 0x03)) | ||
181 | skb_pull(skb, 2); | ||
182 | |||
183 | return 0; | ||
184 | } | ||
185 | |||
186 | /* Receive message. This is the recvmsg for the PPPoL2TP socket. | ||
187 | */ | ||
188 | static int pppol2tp_recvmsg(struct kiocb *iocb, struct socket *sock, | ||
189 | struct msghdr *msg, size_t len, | ||
190 | int flags) | ||
191 | { | ||
192 | int err; | ||
193 | struct sk_buff *skb; | ||
194 | struct sock *sk = sock->sk; | ||
195 | |||
196 | err = -EIO; | ||
197 | if (sk->sk_state & PPPOX_BOUND) | ||
198 | goto end; | ||
199 | |||
200 | msg->msg_namelen = 0; | ||
201 | |||
202 | err = 0; | ||
203 | skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, | ||
204 | flags & MSG_DONTWAIT, &err); | ||
205 | if (!skb) | ||
206 | goto end; | ||
207 | |||
208 | if (len > skb->len) | ||
209 | len = skb->len; | ||
210 | else if (len < skb->len) | ||
211 | msg->msg_flags |= MSG_TRUNC; | ||
212 | |||
213 | err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len); | ||
214 | if (likely(err == 0)) | ||
215 | err = len; | ||
216 | |||
217 | kfree_skb(skb); | ||
218 | end: | ||
219 | return err; | ||
220 | } | ||
221 | |||
222 | static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len) | ||
223 | { | ||
224 | struct pppol2tp_session *ps = l2tp_session_priv(session); | ||
225 | struct sock *sk = NULL; | ||
226 | |||
227 | /* If the socket is bound, send it in to PPP's input queue. Otherwise | ||
228 | * queue it on the session socket. | ||
229 | */ | ||
230 | sk = ps->sock; | ||
231 | if (sk == NULL) | ||
232 | goto no_sock; | ||
233 | |||
234 | if (sk->sk_state & PPPOX_BOUND) { | ||
235 | struct pppox_sock *po; | ||
236 | PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG, | ||
237 | "%s: recv %d byte data frame, passing to ppp\n", | ||
238 | session->name, data_len); | ||
239 | |||
240 | /* We need to forget all info related to the L2TP packet | ||
241 | * gathered in the skb as we are going to reuse the same | ||
242 | * skb for the inner packet. | ||
243 | * Namely we need to: | ||
244 | * - reset xfrm (IPSec) information as it applies to | ||
245 | * the outer L2TP packet and not to the inner one | ||
246 | * - release the dst to force a route lookup on the inner | ||
247 | * IP packet since skb->dst currently points to the dst | ||
248 | * of the UDP tunnel | ||
249 | * - reset netfilter information as it doesn't apply | ||
250 | * to the inner packet either | ||
251 | */ | ||
252 | secpath_reset(skb); | ||
253 | skb_dst_drop(skb); | ||
254 | nf_reset(skb); | ||
255 | |||
256 | po = pppox_sk(sk); | ||
257 | ppp_input(&po->chan, skb); | ||
258 | } else { | ||
259 | PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_INFO, | ||
260 | "%s: socket not bound\n", session->name); | ||
261 | |||
262 | /* Not bound. Nothing we can do, so discard. */ | ||
263 | session->stats.rx_errors++; | ||
264 | kfree_skb(skb); | ||
265 | } | ||
266 | |||
267 | return; | ||
268 | |||
269 | no_sock: | ||
270 | PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_INFO, | ||
271 | "%s: no socket\n", session->name); | ||
272 | kfree_skb(skb); | ||
273 | } | ||
274 | |||
275 | static void pppol2tp_session_sock_hold(struct l2tp_session *session) | ||
276 | { | ||
277 | struct pppol2tp_session *ps = l2tp_session_priv(session); | ||
278 | |||
279 | if (ps->sock) | ||
280 | sock_hold(ps->sock); | ||
281 | } | ||
282 | |||
283 | static void pppol2tp_session_sock_put(struct l2tp_session *session) | ||
284 | { | ||
285 | struct pppol2tp_session *ps = l2tp_session_priv(session); | ||
286 | |||
287 | if (ps->sock) | ||
288 | sock_put(ps->sock); | ||
289 | } | ||
290 | |||
291 | /************************************************************************ | ||
292 | * Transmit handling | ||
293 | ***********************************************************************/ | ||
294 | |||
295 | /* This is the sendmsg for the PPPoL2TP pppol2tp_session socket. We come here | ||
296 | * when a user application does a sendmsg() on the session socket. L2TP and | ||
297 | * PPP headers must be inserted into the user's data. | ||
298 | */ | ||
299 | static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, | ||
300 | size_t total_len) | ||
301 | { | ||
302 | static const unsigned char ppph[2] = { 0xff, 0x03 }; | ||
303 | struct sock *sk = sock->sk; | ||
304 | struct sk_buff *skb; | ||
305 | int error; | ||
306 | struct l2tp_session *session; | ||
307 | struct l2tp_tunnel *tunnel; | ||
308 | struct pppol2tp_session *ps; | ||
309 | int uhlen; | ||
310 | |||
311 | error = -ENOTCONN; | ||
312 | if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) | ||
313 | goto error; | ||
314 | |||
315 | /* Get session and tunnel contexts */ | ||
316 | error = -EBADF; | ||
317 | session = pppol2tp_sock_to_session(sk); | ||
318 | if (session == NULL) | ||
319 | goto error; | ||
320 | |||
321 | ps = l2tp_session_priv(session); | ||
322 | tunnel = l2tp_sock_to_tunnel(ps->tunnel_sock); | ||
323 | if (tunnel == NULL) | ||
324 | goto error_put_sess; | ||
325 | |||
326 | uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0; | ||
327 | |||
328 | /* Allocate a socket buffer */ | ||
329 | error = -ENOMEM; | ||
330 | skb = sock_wmalloc(sk, NET_SKB_PAD + sizeof(struct iphdr) + | ||
331 | uhlen + session->hdr_len + | ||
332 | sizeof(ppph) + total_len, | ||
333 | 0, GFP_KERNEL); | ||
334 | if (!skb) | ||
335 | goto error_put_sess_tun; | ||
336 | |||
337 | /* Reserve space for headers. */ | ||
338 | skb_reserve(skb, NET_SKB_PAD); | ||
339 | skb_reset_network_header(skb); | ||
340 | skb_reserve(skb, sizeof(struct iphdr)); | ||
341 | skb_reset_transport_header(skb); | ||
342 | skb_reserve(skb, uhlen); | ||
343 | |||
344 | /* Add PPP header */ | ||
345 | skb->data[0] = ppph[0]; | ||
346 | skb->data[1] = ppph[1]; | ||
347 | skb_put(skb, 2); | ||
348 | |||
349 | /* Copy user data into skb */ | ||
350 | error = memcpy_fromiovec(skb->data, m->msg_iov, total_len); | ||
351 | if (error < 0) { | ||
352 | kfree_skb(skb); | ||
353 | goto error_put_sess_tun; | ||
354 | } | ||
355 | skb_put(skb, total_len); | ||
356 | |||
357 | l2tp_xmit_skb(session, skb, session->hdr_len); | ||
358 | |||
359 | sock_put(ps->tunnel_sock); | ||
360 | |||
361 | return error; | ||
362 | |||
363 | error_put_sess_tun: | ||
364 | sock_put(ps->tunnel_sock); | ||
365 | error_put_sess: | ||
366 | sock_put(sk); | ||
367 | error: | ||
368 | return error; | ||
369 | } | ||
370 | |||
371 | /* Transmit function called by generic PPP driver. Sends PPP frame | ||
372 | * over PPPoL2TP socket. | ||
373 | * | ||
374 | * This is almost the same as pppol2tp_sendmsg(), but rather than | ||
375 | * being called with a msghdr from userspace, it is called with a skb | ||
376 | * from the kernel. | ||
377 | * | ||
378 | * The supplied skb from ppp doesn't have enough headroom for the | ||
379 | * insertion of L2TP, UDP and IP headers so we need to allocate more | ||
380 | * headroom in the skb. This will create a cloned skb. But we must be | ||
381 | * careful in the error case because the caller will expect to free | ||
382 | * the skb it supplied, not our cloned skb. So we take care to always | ||
383 | * leave the original skb unfreed if we return an error. | ||
384 | */ | ||
385 | static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb) | ||
386 | { | ||
387 | static const u8 ppph[2] = { 0xff, 0x03 }; | ||
388 | struct sock *sk = (struct sock *) chan->private; | ||
389 | struct sock *sk_tun; | ||
390 | struct l2tp_session *session; | ||
391 | struct l2tp_tunnel *tunnel; | ||
392 | struct pppol2tp_session *ps; | ||
393 | int old_headroom; | ||
394 | int new_headroom; | ||
395 | |||
396 | if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) | ||
397 | goto abort; | ||
398 | |||
399 | /* Get session and tunnel contexts from the socket */ | ||
400 | session = pppol2tp_sock_to_session(sk); | ||
401 | if (session == NULL) | ||
402 | goto abort; | ||
403 | |||
404 | ps = l2tp_session_priv(session); | ||
405 | sk_tun = ps->tunnel_sock; | ||
406 | if (sk_tun == NULL) | ||
407 | goto abort_put_sess; | ||
408 | tunnel = l2tp_sock_to_tunnel(sk_tun); | ||
409 | if (tunnel == NULL) | ||
410 | goto abort_put_sess; | ||
411 | |||
412 | old_headroom = skb_headroom(skb); | ||
413 | if (skb_cow_head(skb, sizeof(ppph))) | ||
414 | goto abort_put_sess_tun; | ||
415 | |||
416 | new_headroom = skb_headroom(skb); | ||
417 | skb->truesize += new_headroom - old_headroom; | ||
418 | |||
419 | /* Setup PPP header */ | ||
420 | __skb_push(skb, sizeof(ppph)); | ||
421 | skb->data[0] = ppph[0]; | ||
422 | skb->data[1] = ppph[1]; | ||
423 | |||
424 | l2tp_xmit_skb(session, skb, session->hdr_len); | ||
425 | |||
426 | sock_put(sk_tun); | ||
427 | sock_put(sk); | ||
428 | return 1; | ||
429 | |||
430 | abort_put_sess_tun: | ||
431 | sock_put(sk_tun); | ||
432 | abort_put_sess: | ||
433 | sock_put(sk); | ||
434 | abort: | ||
435 | /* Free the original skb */ | ||
436 | kfree_skb(skb); | ||
437 | return 1; | ||
438 | } | ||
439 | |||
440 | /***************************************************************************** | ||
441 | * Session (and tunnel control) socket create/destroy. | ||
442 | *****************************************************************************/ | ||
443 | |||
444 | /* Called by l2tp_core when a session socket is being closed. | ||
445 | */ | ||
446 | static void pppol2tp_session_close(struct l2tp_session *session) | ||
447 | { | ||
448 | struct pppol2tp_session *ps = l2tp_session_priv(session); | ||
449 | struct sock *sk = ps->sock; | ||
450 | struct sk_buff *skb; | ||
451 | |||
452 | BUG_ON(session->magic != L2TP_SESSION_MAGIC); | ||
453 | |||
454 | if (session->session_id == 0) | ||
455 | goto out; | ||
456 | |||
457 | if (sk != NULL) { | ||
458 | lock_sock(sk); | ||
459 | |||
460 | if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) { | ||
461 | pppox_unbind_sock(sk); | ||
462 | sk->sk_state = PPPOX_DEAD; | ||
463 | sk->sk_state_change(sk); | ||
464 | } | ||
465 | |||
466 | /* Purge any queued data */ | ||
467 | skb_queue_purge(&sk->sk_receive_queue); | ||
468 | skb_queue_purge(&sk->sk_write_queue); | ||
469 | while ((skb = skb_dequeue(&session->reorder_q))) { | ||
470 | kfree_skb(skb); | ||
471 | sock_put(sk); | ||
472 | } | ||
473 | |||
474 | release_sock(sk); | ||
475 | } | ||
476 | |||
477 | out: | ||
478 | return; | ||
479 | } | ||
480 | |||
481 | /* Really kill the session socket. (Called from sock_put() if | ||
482 | * refcnt == 0.) | ||
483 | */ | ||
484 | static void pppol2tp_session_destruct(struct sock *sk) | ||
485 | { | ||
486 | struct l2tp_session *session; | ||
487 | |||
488 | if (sk->sk_user_data != NULL) { | ||
489 | session = sk->sk_user_data; | ||
490 | if (session == NULL) | ||
491 | goto out; | ||
492 | |||
493 | sk->sk_user_data = NULL; | ||
494 | BUG_ON(session->magic != L2TP_SESSION_MAGIC); | ||
495 | l2tp_session_dec_refcount(session); | ||
496 | } | ||
497 | |||
498 | out: | ||
499 | return; | ||
500 | } | ||
501 | |||
502 | /* Called when the PPPoX socket (session) is closed. | ||
503 | */ | ||
504 | static int pppol2tp_release(struct socket *sock) | ||
505 | { | ||
506 | struct sock *sk = sock->sk; | ||
507 | struct l2tp_session *session; | ||
508 | int error; | ||
509 | |||
510 | if (!sk) | ||
511 | return 0; | ||
512 | |||
513 | error = -EBADF; | ||
514 | lock_sock(sk); | ||
515 | if (sock_flag(sk, SOCK_DEAD) != 0) | ||
516 | goto error; | ||
517 | |||
518 | pppox_unbind_sock(sk); | ||
519 | |||
520 | /* Signal the death of the socket. */ | ||
521 | sk->sk_state = PPPOX_DEAD; | ||
522 | sock_orphan(sk); | ||
523 | sock->sk = NULL; | ||
524 | |||
525 | session = pppol2tp_sock_to_session(sk); | ||
526 | |||
527 | /* Purge any queued data */ | ||
528 | skb_queue_purge(&sk->sk_receive_queue); | ||
529 | skb_queue_purge(&sk->sk_write_queue); | ||
530 | if (session != NULL) { | ||
531 | struct sk_buff *skb; | ||
532 | while ((skb = skb_dequeue(&session->reorder_q))) { | ||
533 | kfree_skb(skb); | ||
534 | sock_put(sk); | ||
535 | } | ||
536 | sock_put(sk); | ||
537 | } | ||
538 | |||
539 | release_sock(sk); | ||
540 | |||
541 | /* This will delete the session context via | ||
542 | * pppol2tp_session_destruct() if the socket's refcnt drops to | ||
543 | * zero. | ||
544 | */ | ||
545 | sock_put(sk); | ||
546 | |||
547 | return 0; | ||
548 | |||
549 | error: | ||
550 | release_sock(sk); | ||
551 | return error; | ||
552 | } | ||
553 | |||
554 | static struct proto pppol2tp_sk_proto = { | ||
555 | .name = "PPPOL2TP", | ||
556 | .owner = THIS_MODULE, | ||
557 | .obj_size = sizeof(struct pppox_sock), | ||
558 | }; | ||
559 | |||
560 | static int pppol2tp_backlog_recv(struct sock *sk, struct sk_buff *skb) | ||
561 | { | ||
562 | int rc; | ||
563 | |||
564 | rc = l2tp_udp_encap_recv(sk, skb); | ||
565 | if (rc) | ||
566 | kfree_skb(skb); | ||
567 | |||
568 | return NET_RX_SUCCESS; | ||
569 | } | ||
570 | |||
571 | /* socket() handler. Initialize a new struct sock. | ||
572 | */ | ||
573 | static int pppol2tp_create(struct net *net, struct socket *sock) | ||
574 | { | ||
575 | int error = -ENOMEM; | ||
576 | struct sock *sk; | ||
577 | |||
578 | sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppol2tp_sk_proto); | ||
579 | if (!sk) | ||
580 | goto out; | ||
581 | |||
582 | sock_init_data(sock, sk); | ||
583 | |||
584 | sock->state = SS_UNCONNECTED; | ||
585 | sock->ops = &pppol2tp_ops; | ||
586 | |||
587 | sk->sk_backlog_rcv = pppol2tp_backlog_recv; | ||
588 | sk->sk_protocol = PX_PROTO_OL2TP; | ||
589 | sk->sk_family = PF_PPPOX; | ||
590 | sk->sk_state = PPPOX_NONE; | ||
591 | sk->sk_type = SOCK_STREAM; | ||
592 | sk->sk_destruct = pppol2tp_session_destruct; | ||
593 | |||
594 | error = 0; | ||
595 | |||
596 | out: | ||
597 | return error; | ||
598 | } | ||
599 | |||
600 | #if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE) | ||
601 | static void pppol2tp_show(struct seq_file *m, void *arg) | ||
602 | { | ||
603 | struct l2tp_session *session = arg; | ||
604 | struct pppol2tp_session *ps = l2tp_session_priv(session); | ||
605 | |||
606 | if (ps) { | ||
607 | struct pppox_sock *po = pppox_sk(ps->sock); | ||
608 | if (po) | ||
609 | seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan)); | ||
610 | } | ||
611 | } | ||
612 | #endif | ||
613 | |||
614 | /* connect() handler. Attach a PPPoX socket to a tunnel UDP socket | ||
615 | */ | ||
616 | static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, | ||
617 | int sockaddr_len, int flags) | ||
618 | { | ||
619 | struct sock *sk = sock->sk; | ||
620 | struct sockaddr_pppol2tp *sp = (struct sockaddr_pppol2tp *) uservaddr; | ||
621 | struct sockaddr_pppol2tpv3 *sp3 = (struct sockaddr_pppol2tpv3 *) uservaddr; | ||
622 | struct pppox_sock *po = pppox_sk(sk); | ||
623 | struct l2tp_session *session = NULL; | ||
624 | struct l2tp_tunnel *tunnel; | ||
625 | struct pppol2tp_session *ps; | ||
626 | struct dst_entry *dst; | ||
627 | struct l2tp_session_cfg cfg = { 0, }; | ||
628 | int error = 0; | ||
629 | u32 tunnel_id, peer_tunnel_id; | ||
630 | u32 session_id, peer_session_id; | ||
631 | int ver = 2; | ||
632 | int fd; | ||
633 | |||
634 | lock_sock(sk); | ||
635 | |||
636 | error = -EINVAL; | ||
637 | if (sp->sa_protocol != PX_PROTO_OL2TP) | ||
638 | goto end; | ||
639 | |||
640 | /* Check for already bound sockets */ | ||
641 | error = -EBUSY; | ||
642 | if (sk->sk_state & PPPOX_CONNECTED) | ||
643 | goto end; | ||
644 | |||
645 | /* We don't supporting rebinding anyway */ | ||
646 | error = -EALREADY; | ||
647 | if (sk->sk_user_data) | ||
648 | goto end; /* socket is already attached */ | ||
649 | |||
650 | /* Get params from socket address. Handle L2TPv2 and L2TPv3 */ | ||
651 | if (sockaddr_len == sizeof(struct sockaddr_pppol2tp)) { | ||
652 | fd = sp->pppol2tp.fd; | ||
653 | tunnel_id = sp->pppol2tp.s_tunnel; | ||
654 | peer_tunnel_id = sp->pppol2tp.d_tunnel; | ||
655 | session_id = sp->pppol2tp.s_session; | ||
656 | peer_session_id = sp->pppol2tp.d_session; | ||
657 | } else if (sockaddr_len == sizeof(struct sockaddr_pppol2tpv3)) { | ||
658 | ver = 3; | ||
659 | fd = sp3->pppol2tp.fd; | ||
660 | tunnel_id = sp3->pppol2tp.s_tunnel; | ||
661 | peer_tunnel_id = sp3->pppol2tp.d_tunnel; | ||
662 | session_id = sp3->pppol2tp.s_session; | ||
663 | peer_session_id = sp3->pppol2tp.d_session; | ||
664 | } else { | ||
665 | error = -EINVAL; | ||
666 | goto end; /* bad socket address */ | ||
667 | } | ||
668 | |||
669 | /* Don't bind if tunnel_id is 0 */ | ||
670 | error = -EINVAL; | ||
671 | if (tunnel_id == 0) | ||
672 | goto end; | ||
673 | |||
674 | tunnel = l2tp_tunnel_find(sock_net(sk), tunnel_id); | ||
675 | |||
676 | /* Special case: create tunnel context if session_id and | ||
677 | * peer_session_id is 0. Otherwise look up tunnel using supplied | ||
678 | * tunnel id. | ||
679 | */ | ||
680 | if ((session_id == 0) && (peer_session_id == 0)) { | ||
681 | if (tunnel == NULL) { | ||
682 | struct l2tp_tunnel_cfg tcfg = { | ||
683 | .encap = L2TP_ENCAPTYPE_UDP, | ||
684 | .debug = 0, | ||
685 | }; | ||
686 | error = l2tp_tunnel_create(sock_net(sk), fd, ver, tunnel_id, peer_tunnel_id, &tcfg, &tunnel); | ||
687 | if (error < 0) | ||
688 | goto end; | ||
689 | } | ||
690 | } else { | ||
691 | /* Error if we can't find the tunnel */ | ||
692 | error = -ENOENT; | ||
693 | if (tunnel == NULL) | ||
694 | goto end; | ||
695 | |||
696 | /* Error if socket is not prepped */ | ||
697 | if (tunnel->sock == NULL) | ||
698 | goto end; | ||
699 | } | ||
700 | |||
701 | if (tunnel->recv_payload_hook == NULL) | ||
702 | tunnel->recv_payload_hook = pppol2tp_recv_payload_hook; | ||
703 | |||
704 | if (tunnel->peer_tunnel_id == 0) { | ||
705 | if (ver == 2) | ||
706 | tunnel->peer_tunnel_id = sp->pppol2tp.d_tunnel; | ||
707 | else | ||
708 | tunnel->peer_tunnel_id = sp3->pppol2tp.d_tunnel; | ||
709 | } | ||
710 | |||
711 | /* Create session if it doesn't already exist. We handle the | ||
712 | * case where a session was previously created by the netlink | ||
713 | * interface by checking that the session doesn't already have | ||
714 | * a socket and its tunnel socket are what we expect. If any | ||
715 | * of those checks fail, return EEXIST to the caller. | ||
716 | */ | ||
717 | session = l2tp_session_find(sock_net(sk), tunnel, session_id); | ||
718 | if (session == NULL) { | ||
719 | /* Default MTU must allow space for UDP/L2TP/PPP | ||
720 | * headers. | ||
721 | */ | ||
722 | cfg.mtu = cfg.mru = 1500 - PPPOL2TP_HEADER_OVERHEAD; | ||
723 | |||
724 | /* Allocate and initialize a new session context. */ | ||
725 | session = l2tp_session_create(sizeof(struct pppol2tp_session), | ||
726 | tunnel, session_id, | ||
727 | peer_session_id, &cfg); | ||
728 | if (session == NULL) { | ||
729 | error = -ENOMEM; | ||
730 | goto end; | ||
731 | } | ||
732 | } else { | ||
733 | ps = l2tp_session_priv(session); | ||
734 | error = -EEXIST; | ||
735 | if (ps->sock != NULL) | ||
736 | goto end; | ||
737 | |||
738 | /* consistency checks */ | ||
739 | if (ps->tunnel_sock != tunnel->sock) | ||
740 | goto end; | ||
741 | } | ||
742 | |||
743 | /* Associate session with its PPPoL2TP socket */ | ||
744 | ps = l2tp_session_priv(session); | ||
745 | ps->owner = current->pid; | ||
746 | ps->sock = sk; | ||
747 | ps->tunnel_sock = tunnel->sock; | ||
748 | |||
749 | session->recv_skb = pppol2tp_recv; | ||
750 | session->session_close = pppol2tp_session_close; | ||
751 | #if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE) | ||
752 | session->show = pppol2tp_show; | ||
753 | #endif | ||
754 | |||
755 | /* We need to know each time a skb is dropped from the reorder | ||
756 | * queue. | ||
757 | */ | ||
758 | session->ref = pppol2tp_session_sock_hold; | ||
759 | session->deref = pppol2tp_session_sock_put; | ||
760 | |||
761 | /* If PMTU discovery was enabled, use the MTU that was discovered */ | ||
762 | dst = sk_dst_get(sk); | ||
763 | if (dst != NULL) { | ||
764 | u32 pmtu = dst_mtu(__sk_dst_get(sk)); | ||
765 | if (pmtu != 0) | ||
766 | session->mtu = session->mru = pmtu - | ||
767 | PPPOL2TP_HEADER_OVERHEAD; | ||
768 | dst_release(dst); | ||
769 | } | ||
770 | |||
771 | /* Special case: if source & dest session_id == 0x0000, this | ||
772 | * socket is being created to manage the tunnel. Just set up | ||
773 | * the internal context for use by ioctl() and sockopt() | ||
774 | * handlers. | ||
775 | */ | ||
776 | if ((session->session_id == 0) && | ||
777 | (session->peer_session_id == 0)) { | ||
778 | error = 0; | ||
779 | goto out_no_ppp; | ||
780 | } | ||
781 | |||
782 | /* The only header we need to worry about is the L2TP | ||
783 | * header. This size is different depending on whether | ||
784 | * sequence numbers are enabled for the data channel. | ||
785 | */ | ||
786 | po->chan.hdrlen = PPPOL2TP_L2TP_HDR_SIZE_NOSEQ; | ||
787 | |||
788 | po->chan.private = sk; | ||
789 | po->chan.ops = &pppol2tp_chan_ops; | ||
790 | po->chan.mtu = session->mtu; | ||
791 | |||
792 | error = ppp_register_net_channel(sock_net(sk), &po->chan); | ||
793 | if (error) | ||
794 | goto end; | ||
795 | |||
796 | out_no_ppp: | ||
797 | /* This is how we get the session context from the socket. */ | ||
798 | sk->sk_user_data = session; | ||
799 | sk->sk_state = PPPOX_CONNECTED; | ||
800 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
801 | "%s: created\n", session->name); | ||
802 | |||
803 | end: | ||
804 | release_sock(sk); | ||
805 | |||
806 | return error; | ||
807 | } | ||
808 | |||
809 | #ifdef CONFIG_L2TP_V3 | ||
810 | |||
811 | /* Called when creating sessions via the netlink interface. | ||
812 | */ | ||
813 | static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg) | ||
814 | { | ||
815 | int error; | ||
816 | struct l2tp_tunnel *tunnel; | ||
817 | struct l2tp_session *session; | ||
818 | struct pppol2tp_session *ps; | ||
819 | |||
820 | tunnel = l2tp_tunnel_find(net, tunnel_id); | ||
821 | |||
822 | /* Error if we can't find the tunnel */ | ||
823 | error = -ENOENT; | ||
824 | if (tunnel == NULL) | ||
825 | goto out; | ||
826 | |||
827 | /* Error if tunnel socket is not prepped */ | ||
828 | if (tunnel->sock == NULL) | ||
829 | goto out; | ||
830 | |||
831 | /* Check that this session doesn't already exist */ | ||
832 | error = -EEXIST; | ||
833 | session = l2tp_session_find(net, tunnel, session_id); | ||
834 | if (session != NULL) | ||
835 | goto out; | ||
836 | |||
837 | /* Default MTU values. */ | ||
838 | if (cfg->mtu == 0) | ||
839 | cfg->mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD; | ||
840 | if (cfg->mru == 0) | ||
841 | cfg->mru = cfg->mtu; | ||
842 | |||
843 | /* Allocate and initialize a new session context. */ | ||
844 | error = -ENOMEM; | ||
845 | session = l2tp_session_create(sizeof(struct pppol2tp_session), | ||
846 | tunnel, session_id, | ||
847 | peer_session_id, cfg); | ||
848 | if (session == NULL) | ||
849 | goto out; | ||
850 | |||
851 | ps = l2tp_session_priv(session); | ||
852 | ps->tunnel_sock = tunnel->sock; | ||
853 | |||
854 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
855 | "%s: created\n", session->name); | ||
856 | |||
857 | error = 0; | ||
858 | |||
859 | out: | ||
860 | return error; | ||
861 | } | ||
862 | |||
863 | /* Called when deleting sessions via the netlink interface. | ||
864 | */ | ||
865 | static int pppol2tp_session_delete(struct l2tp_session *session) | ||
866 | { | ||
867 | struct pppol2tp_session *ps = l2tp_session_priv(session); | ||
868 | |||
869 | if (ps->sock == NULL) | ||
870 | l2tp_session_dec_refcount(session); | ||
871 | |||
872 | return 0; | ||
873 | } | ||
874 | |||
875 | #endif /* CONFIG_L2TP_V3 */ | ||
876 | |||
877 | /* getname() support. | ||
878 | */ | ||
879 | static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr, | ||
880 | int *usockaddr_len, int peer) | ||
881 | { | ||
882 | int len = 0; | ||
883 | int error = 0; | ||
884 | struct l2tp_session *session; | ||
885 | struct l2tp_tunnel *tunnel; | ||
886 | struct sock *sk = sock->sk; | ||
887 | struct inet_sock *inet; | ||
888 | struct pppol2tp_session *pls; | ||
889 | |||
890 | error = -ENOTCONN; | ||
891 | if (sk == NULL) | ||
892 | goto end; | ||
893 | if (sk->sk_state != PPPOX_CONNECTED) | ||
894 | goto end; | ||
895 | |||
896 | error = -EBADF; | ||
897 | session = pppol2tp_sock_to_session(sk); | ||
898 | if (session == NULL) | ||
899 | goto end; | ||
900 | |||
901 | pls = l2tp_session_priv(session); | ||
902 | tunnel = l2tp_sock_to_tunnel(pls->tunnel_sock); | ||
903 | if (tunnel == NULL) { | ||
904 | error = -EBADF; | ||
905 | goto end_put_sess; | ||
906 | } | ||
907 | |||
908 | inet = inet_sk(sk); | ||
909 | if (tunnel->version == 2) { | ||
910 | struct sockaddr_pppol2tp sp; | ||
911 | len = sizeof(sp); | ||
912 | memset(&sp, 0, len); | ||
913 | sp.sa_family = AF_PPPOX; | ||
914 | sp.sa_protocol = PX_PROTO_OL2TP; | ||
915 | sp.pppol2tp.fd = tunnel->fd; | ||
916 | sp.pppol2tp.pid = pls->owner; | ||
917 | sp.pppol2tp.s_tunnel = tunnel->tunnel_id; | ||
918 | sp.pppol2tp.d_tunnel = tunnel->peer_tunnel_id; | ||
919 | sp.pppol2tp.s_session = session->session_id; | ||
920 | sp.pppol2tp.d_session = session->peer_session_id; | ||
921 | sp.pppol2tp.addr.sin_family = AF_INET; | ||
922 | sp.pppol2tp.addr.sin_port = inet->inet_dport; | ||
923 | sp.pppol2tp.addr.sin_addr.s_addr = inet->inet_daddr; | ||
924 | memcpy(uaddr, &sp, len); | ||
925 | } else if (tunnel->version == 3) { | ||
926 | struct sockaddr_pppol2tpv3 sp; | ||
927 | len = sizeof(sp); | ||
928 | memset(&sp, 0, len); | ||
929 | sp.sa_family = AF_PPPOX; | ||
930 | sp.sa_protocol = PX_PROTO_OL2TP; | ||
931 | sp.pppol2tp.fd = tunnel->fd; | ||
932 | sp.pppol2tp.pid = pls->owner; | ||
933 | sp.pppol2tp.s_tunnel = tunnel->tunnel_id; | ||
934 | sp.pppol2tp.d_tunnel = tunnel->peer_tunnel_id; | ||
935 | sp.pppol2tp.s_session = session->session_id; | ||
936 | sp.pppol2tp.d_session = session->peer_session_id; | ||
937 | sp.pppol2tp.addr.sin_family = AF_INET; | ||
938 | sp.pppol2tp.addr.sin_port = inet->inet_dport; | ||
939 | sp.pppol2tp.addr.sin_addr.s_addr = inet->inet_daddr; | ||
940 | memcpy(uaddr, &sp, len); | ||
941 | } | ||
942 | |||
943 | *usockaddr_len = len; | ||
944 | |||
945 | sock_put(pls->tunnel_sock); | ||
946 | end_put_sess: | ||
947 | sock_put(sk); | ||
948 | error = 0; | ||
949 | |||
950 | end: | ||
951 | return error; | ||
952 | } | ||
953 | |||
954 | /**************************************************************************** | ||
955 | * ioctl() handlers. | ||
956 | * | ||
957 | * The PPPoX socket is created for L2TP sessions: tunnels have their own UDP | ||
958 | * sockets. However, in order to control kernel tunnel features, we allow | ||
959 | * userspace to create a special "tunnel" PPPoX socket which is used for | ||
960 | * control only. Tunnel PPPoX sockets have session_id == 0 and simply allow | ||
961 | * the user application to issue L2TP setsockopt(), getsockopt() and ioctl() | ||
962 | * calls. | ||
963 | ****************************************************************************/ | ||
964 | |||
965 | static void pppol2tp_copy_stats(struct pppol2tp_ioc_stats *dest, | ||
966 | struct l2tp_stats *stats) | ||
967 | { | ||
968 | dest->tx_packets = stats->tx_packets; | ||
969 | dest->tx_bytes = stats->tx_bytes; | ||
970 | dest->tx_errors = stats->tx_errors; | ||
971 | dest->rx_packets = stats->rx_packets; | ||
972 | dest->rx_bytes = stats->rx_bytes; | ||
973 | dest->rx_seq_discards = stats->rx_seq_discards; | ||
974 | dest->rx_oos_packets = stats->rx_oos_packets; | ||
975 | dest->rx_errors = stats->rx_errors; | ||
976 | } | ||
977 | |||
978 | /* Session ioctl helper. | ||
979 | */ | ||
980 | static int pppol2tp_session_ioctl(struct l2tp_session *session, | ||
981 | unsigned int cmd, unsigned long arg) | ||
982 | { | ||
983 | struct ifreq ifr; | ||
984 | int err = 0; | ||
985 | struct sock *sk; | ||
986 | int val = (int) arg; | ||
987 | struct pppol2tp_session *ps = l2tp_session_priv(session); | ||
988 | struct l2tp_tunnel *tunnel = session->tunnel; | ||
989 | struct pppol2tp_ioc_stats stats; | ||
990 | |||
991 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG, | ||
992 | "%s: pppol2tp_session_ioctl(cmd=%#x, arg=%#lx)\n", | ||
993 | session->name, cmd, arg); | ||
994 | |||
995 | sk = ps->sock; | ||
996 | sock_hold(sk); | ||
997 | |||
998 | switch (cmd) { | ||
999 | case SIOCGIFMTU: | ||
1000 | err = -ENXIO; | ||
1001 | if (!(sk->sk_state & PPPOX_CONNECTED)) | ||
1002 | break; | ||
1003 | |||
1004 | err = -EFAULT; | ||
1005 | if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq))) | ||
1006 | break; | ||
1007 | ifr.ifr_mtu = session->mtu; | ||
1008 | if (copy_to_user((void __user *) arg, &ifr, sizeof(struct ifreq))) | ||
1009 | break; | ||
1010 | |||
1011 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1012 | "%s: get mtu=%d\n", session->name, session->mtu); | ||
1013 | err = 0; | ||
1014 | break; | ||
1015 | |||
1016 | case SIOCSIFMTU: | ||
1017 | err = -ENXIO; | ||
1018 | if (!(sk->sk_state & PPPOX_CONNECTED)) | ||
1019 | break; | ||
1020 | |||
1021 | err = -EFAULT; | ||
1022 | if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq))) | ||
1023 | break; | ||
1024 | |||
1025 | session->mtu = ifr.ifr_mtu; | ||
1026 | |||
1027 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1028 | "%s: set mtu=%d\n", session->name, session->mtu); | ||
1029 | err = 0; | ||
1030 | break; | ||
1031 | |||
1032 | case PPPIOCGMRU: | ||
1033 | err = -ENXIO; | ||
1034 | if (!(sk->sk_state & PPPOX_CONNECTED)) | ||
1035 | break; | ||
1036 | |||
1037 | err = -EFAULT; | ||
1038 | if (put_user(session->mru, (int __user *) arg)) | ||
1039 | break; | ||
1040 | |||
1041 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1042 | "%s: get mru=%d\n", session->name, session->mru); | ||
1043 | err = 0; | ||
1044 | break; | ||
1045 | |||
1046 | case PPPIOCSMRU: | ||
1047 | err = -ENXIO; | ||
1048 | if (!(sk->sk_state & PPPOX_CONNECTED)) | ||
1049 | break; | ||
1050 | |||
1051 | err = -EFAULT; | ||
1052 | if (get_user(val, (int __user *) arg)) | ||
1053 | break; | ||
1054 | |||
1055 | session->mru = val; | ||
1056 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1057 | "%s: set mru=%d\n", session->name, session->mru); | ||
1058 | err = 0; | ||
1059 | break; | ||
1060 | |||
1061 | case PPPIOCGFLAGS: | ||
1062 | err = -EFAULT; | ||
1063 | if (put_user(ps->flags, (int __user *) arg)) | ||
1064 | break; | ||
1065 | |||
1066 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1067 | "%s: get flags=%d\n", session->name, ps->flags); | ||
1068 | err = 0; | ||
1069 | break; | ||
1070 | |||
1071 | case PPPIOCSFLAGS: | ||
1072 | err = -EFAULT; | ||
1073 | if (get_user(val, (int __user *) arg)) | ||
1074 | break; | ||
1075 | ps->flags = val; | ||
1076 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1077 | "%s: set flags=%d\n", session->name, ps->flags); | ||
1078 | err = 0; | ||
1079 | break; | ||
1080 | |||
1081 | case PPPIOCGL2TPSTATS: | ||
1082 | err = -ENXIO; | ||
1083 | if (!(sk->sk_state & PPPOX_CONNECTED)) | ||
1084 | break; | ||
1085 | |||
1086 | memset(&stats, 0, sizeof(stats)); | ||
1087 | stats.tunnel_id = tunnel->tunnel_id; | ||
1088 | stats.session_id = session->session_id; | ||
1089 | pppol2tp_copy_stats(&stats, &session->stats); | ||
1090 | if (copy_to_user((void __user *) arg, &stats, | ||
1091 | sizeof(stats))) | ||
1092 | break; | ||
1093 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1094 | "%s: get L2TP stats\n", session->name); | ||
1095 | err = 0; | ||
1096 | break; | ||
1097 | |||
1098 | default: | ||
1099 | err = -ENOSYS; | ||
1100 | break; | ||
1101 | } | ||
1102 | |||
1103 | sock_put(sk); | ||
1104 | |||
1105 | return err; | ||
1106 | } | ||
1107 | |||
1108 | /* Tunnel ioctl helper. | ||
1109 | * | ||
1110 | * Note the special handling for PPPIOCGL2TPSTATS below. If the ioctl data | ||
1111 | * specifies a session_id, the session ioctl handler is called. This allows an | ||
1112 | * application to retrieve session stats via a tunnel socket. | ||
1113 | */ | ||
1114 | static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel, | ||
1115 | unsigned int cmd, unsigned long arg) | ||
1116 | { | ||
1117 | int err = 0; | ||
1118 | struct sock *sk; | ||
1119 | struct pppol2tp_ioc_stats stats; | ||
1120 | |||
1121 | PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG, | ||
1122 | "%s: pppol2tp_tunnel_ioctl(cmd=%#x, arg=%#lx)\n", | ||
1123 | tunnel->name, cmd, arg); | ||
1124 | |||
1125 | sk = tunnel->sock; | ||
1126 | sock_hold(sk); | ||
1127 | |||
1128 | switch (cmd) { | ||
1129 | case PPPIOCGL2TPSTATS: | ||
1130 | err = -ENXIO; | ||
1131 | if (!(sk->sk_state & PPPOX_CONNECTED)) | ||
1132 | break; | ||
1133 | |||
1134 | if (copy_from_user(&stats, (void __user *) arg, | ||
1135 | sizeof(stats))) { | ||
1136 | err = -EFAULT; | ||
1137 | break; | ||
1138 | } | ||
1139 | if (stats.session_id != 0) { | ||
1140 | /* resend to session ioctl handler */ | ||
1141 | struct l2tp_session *session = | ||
1142 | l2tp_session_find(sock_net(sk), tunnel, stats.session_id); | ||
1143 | if (session != NULL) | ||
1144 | err = pppol2tp_session_ioctl(session, cmd, arg); | ||
1145 | else | ||
1146 | err = -EBADR; | ||
1147 | break; | ||
1148 | } | ||
1149 | #ifdef CONFIG_XFRM | ||
1150 | stats.using_ipsec = (sk->sk_policy[0] || sk->sk_policy[1]) ? 1 : 0; | ||
1151 | #endif | ||
1152 | pppol2tp_copy_stats(&stats, &tunnel->stats); | ||
1153 | if (copy_to_user((void __user *) arg, &stats, sizeof(stats))) { | ||
1154 | err = -EFAULT; | ||
1155 | break; | ||
1156 | } | ||
1157 | PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1158 | "%s: get L2TP stats\n", tunnel->name); | ||
1159 | err = 0; | ||
1160 | break; | ||
1161 | |||
1162 | default: | ||
1163 | err = -ENOSYS; | ||
1164 | break; | ||
1165 | } | ||
1166 | |||
1167 | sock_put(sk); | ||
1168 | |||
1169 | return err; | ||
1170 | } | ||
1171 | |||
1172 | /* Main ioctl() handler. | ||
1173 | * Dispatch to tunnel or session helpers depending on the socket. | ||
1174 | */ | ||
1175 | static int pppol2tp_ioctl(struct socket *sock, unsigned int cmd, | ||
1176 | unsigned long arg) | ||
1177 | { | ||
1178 | struct sock *sk = sock->sk; | ||
1179 | struct l2tp_session *session; | ||
1180 | struct l2tp_tunnel *tunnel; | ||
1181 | struct pppol2tp_session *ps; | ||
1182 | int err; | ||
1183 | |||
1184 | if (!sk) | ||
1185 | return 0; | ||
1186 | |||
1187 | err = -EBADF; | ||
1188 | if (sock_flag(sk, SOCK_DEAD) != 0) | ||
1189 | goto end; | ||
1190 | |||
1191 | err = -ENOTCONN; | ||
1192 | if ((sk->sk_user_data == NULL) || | ||
1193 | (!(sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)))) | ||
1194 | goto end; | ||
1195 | |||
1196 | /* Get session context from the socket */ | ||
1197 | err = -EBADF; | ||
1198 | session = pppol2tp_sock_to_session(sk); | ||
1199 | if (session == NULL) | ||
1200 | goto end; | ||
1201 | |||
1202 | /* Special case: if session's session_id is zero, treat ioctl as a | ||
1203 | * tunnel ioctl | ||
1204 | */ | ||
1205 | ps = l2tp_session_priv(session); | ||
1206 | if ((session->session_id == 0) && | ||
1207 | (session->peer_session_id == 0)) { | ||
1208 | err = -EBADF; | ||
1209 | tunnel = l2tp_sock_to_tunnel(ps->tunnel_sock); | ||
1210 | if (tunnel == NULL) | ||
1211 | goto end_put_sess; | ||
1212 | |||
1213 | err = pppol2tp_tunnel_ioctl(tunnel, cmd, arg); | ||
1214 | sock_put(ps->tunnel_sock); | ||
1215 | goto end_put_sess; | ||
1216 | } | ||
1217 | |||
1218 | err = pppol2tp_session_ioctl(session, cmd, arg); | ||
1219 | |||
1220 | end_put_sess: | ||
1221 | sock_put(sk); | ||
1222 | end: | ||
1223 | return err; | ||
1224 | } | ||
1225 | |||
1226 | /***************************************************************************** | ||
1227 | * setsockopt() / getsockopt() support. | ||
1228 | * | ||
1229 | * The PPPoX socket is created for L2TP sessions: tunnels have their own UDP | ||
1230 | * sockets. In order to control kernel tunnel features, we allow userspace to | ||
1231 | * create a special "tunnel" PPPoX socket which is used for control only. | ||
1232 | * Tunnel PPPoX sockets have session_id == 0 and simply allow the user | ||
1233 | * application to issue L2TP setsockopt(), getsockopt() and ioctl() calls. | ||
1234 | *****************************************************************************/ | ||
1235 | |||
1236 | /* Tunnel setsockopt() helper. | ||
1237 | */ | ||
1238 | static int pppol2tp_tunnel_setsockopt(struct sock *sk, | ||
1239 | struct l2tp_tunnel *tunnel, | ||
1240 | int optname, int val) | ||
1241 | { | ||
1242 | int err = 0; | ||
1243 | |||
1244 | switch (optname) { | ||
1245 | case PPPOL2TP_SO_DEBUG: | ||
1246 | tunnel->debug = val; | ||
1247 | PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1248 | "%s: set debug=%x\n", tunnel->name, tunnel->debug); | ||
1249 | break; | ||
1250 | |||
1251 | default: | ||
1252 | err = -ENOPROTOOPT; | ||
1253 | break; | ||
1254 | } | ||
1255 | |||
1256 | return err; | ||
1257 | } | ||
1258 | |||
1259 | /* Session setsockopt helper. | ||
1260 | */ | ||
1261 | static int pppol2tp_session_setsockopt(struct sock *sk, | ||
1262 | struct l2tp_session *session, | ||
1263 | int optname, int val) | ||
1264 | { | ||
1265 | int err = 0; | ||
1266 | struct pppol2tp_session *ps = l2tp_session_priv(session); | ||
1267 | |||
1268 | switch (optname) { | ||
1269 | case PPPOL2TP_SO_RECVSEQ: | ||
1270 | if ((val != 0) && (val != 1)) { | ||
1271 | err = -EINVAL; | ||
1272 | break; | ||
1273 | } | ||
1274 | session->recv_seq = val ? -1 : 0; | ||
1275 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1276 | "%s: set recv_seq=%d\n", session->name, session->recv_seq); | ||
1277 | break; | ||
1278 | |||
1279 | case PPPOL2TP_SO_SENDSEQ: | ||
1280 | if ((val != 0) && (val != 1)) { | ||
1281 | err = -EINVAL; | ||
1282 | break; | ||
1283 | } | ||
1284 | session->send_seq = val ? -1 : 0; | ||
1285 | { | ||
1286 | struct sock *ssk = ps->sock; | ||
1287 | struct pppox_sock *po = pppox_sk(ssk); | ||
1288 | po->chan.hdrlen = val ? PPPOL2TP_L2TP_HDR_SIZE_SEQ : | ||
1289 | PPPOL2TP_L2TP_HDR_SIZE_NOSEQ; | ||
1290 | } | ||
1291 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1292 | "%s: set send_seq=%d\n", session->name, session->send_seq); | ||
1293 | break; | ||
1294 | |||
1295 | case PPPOL2TP_SO_LNSMODE: | ||
1296 | if ((val != 0) && (val != 1)) { | ||
1297 | err = -EINVAL; | ||
1298 | break; | ||
1299 | } | ||
1300 | session->lns_mode = val ? -1 : 0; | ||
1301 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1302 | "%s: set lns_mode=%d\n", session->name, session->lns_mode); | ||
1303 | break; | ||
1304 | |||
1305 | case PPPOL2TP_SO_DEBUG: | ||
1306 | session->debug = val; | ||
1307 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1308 | "%s: set debug=%x\n", session->name, session->debug); | ||
1309 | break; | ||
1310 | |||
1311 | case PPPOL2TP_SO_REORDERTO: | ||
1312 | session->reorder_timeout = msecs_to_jiffies(val); | ||
1313 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1314 | "%s: set reorder_timeout=%d\n", session->name, session->reorder_timeout); | ||
1315 | break; | ||
1316 | |||
1317 | default: | ||
1318 | err = -ENOPROTOOPT; | ||
1319 | break; | ||
1320 | } | ||
1321 | |||
1322 | return err; | ||
1323 | } | ||
1324 | |||
1325 | /* Main setsockopt() entry point. | ||
1326 | * Does API checks, then calls either the tunnel or session setsockopt | ||
1327 | * handler, according to whether the PPPoL2TP socket is a for a regular | ||
1328 | * session or the special tunnel type. | ||
1329 | */ | ||
1330 | static int pppol2tp_setsockopt(struct socket *sock, int level, int optname, | ||
1331 | char __user *optval, unsigned int optlen) | ||
1332 | { | ||
1333 | struct sock *sk = sock->sk; | ||
1334 | struct l2tp_session *session; | ||
1335 | struct l2tp_tunnel *tunnel; | ||
1336 | struct pppol2tp_session *ps; | ||
1337 | int val; | ||
1338 | int err; | ||
1339 | |||
1340 | if (level != SOL_PPPOL2TP) | ||
1341 | return udp_prot.setsockopt(sk, level, optname, optval, optlen); | ||
1342 | |||
1343 | if (optlen < sizeof(int)) | ||
1344 | return -EINVAL; | ||
1345 | |||
1346 | if (get_user(val, (int __user *)optval)) | ||
1347 | return -EFAULT; | ||
1348 | |||
1349 | err = -ENOTCONN; | ||
1350 | if (sk->sk_user_data == NULL) | ||
1351 | goto end; | ||
1352 | |||
1353 | /* Get session context from the socket */ | ||
1354 | err = -EBADF; | ||
1355 | session = pppol2tp_sock_to_session(sk); | ||
1356 | if (session == NULL) | ||
1357 | goto end; | ||
1358 | |||
1359 | /* Special case: if session_id == 0x0000, treat as operation on tunnel | ||
1360 | */ | ||
1361 | ps = l2tp_session_priv(session); | ||
1362 | if ((session->session_id == 0) && | ||
1363 | (session->peer_session_id == 0)) { | ||
1364 | err = -EBADF; | ||
1365 | tunnel = l2tp_sock_to_tunnel(ps->tunnel_sock); | ||
1366 | if (tunnel == NULL) | ||
1367 | goto end_put_sess; | ||
1368 | |||
1369 | err = pppol2tp_tunnel_setsockopt(sk, tunnel, optname, val); | ||
1370 | sock_put(ps->tunnel_sock); | ||
1371 | } else | ||
1372 | err = pppol2tp_session_setsockopt(sk, session, optname, val); | ||
1373 | |||
1374 | err = 0; | ||
1375 | |||
1376 | end_put_sess: | ||
1377 | sock_put(sk); | ||
1378 | end: | ||
1379 | return err; | ||
1380 | } | ||
1381 | |||
1382 | /* Tunnel getsockopt helper. Called with sock locked. | ||
1383 | */ | ||
1384 | static int pppol2tp_tunnel_getsockopt(struct sock *sk, | ||
1385 | struct l2tp_tunnel *tunnel, | ||
1386 | int optname, int *val) | ||
1387 | { | ||
1388 | int err = 0; | ||
1389 | |||
1390 | switch (optname) { | ||
1391 | case PPPOL2TP_SO_DEBUG: | ||
1392 | *val = tunnel->debug; | ||
1393 | PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1394 | "%s: get debug=%x\n", tunnel->name, tunnel->debug); | ||
1395 | break; | ||
1396 | |||
1397 | default: | ||
1398 | err = -ENOPROTOOPT; | ||
1399 | break; | ||
1400 | } | ||
1401 | |||
1402 | return err; | ||
1403 | } | ||
1404 | |||
1405 | /* Session getsockopt helper. Called with sock locked. | ||
1406 | */ | ||
1407 | static int pppol2tp_session_getsockopt(struct sock *sk, | ||
1408 | struct l2tp_session *session, | ||
1409 | int optname, int *val) | ||
1410 | { | ||
1411 | int err = 0; | ||
1412 | |||
1413 | switch (optname) { | ||
1414 | case PPPOL2TP_SO_RECVSEQ: | ||
1415 | *val = session->recv_seq; | ||
1416 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1417 | "%s: get recv_seq=%d\n", session->name, *val); | ||
1418 | break; | ||
1419 | |||
1420 | case PPPOL2TP_SO_SENDSEQ: | ||
1421 | *val = session->send_seq; | ||
1422 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1423 | "%s: get send_seq=%d\n", session->name, *val); | ||
1424 | break; | ||
1425 | |||
1426 | case PPPOL2TP_SO_LNSMODE: | ||
1427 | *val = session->lns_mode; | ||
1428 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1429 | "%s: get lns_mode=%d\n", session->name, *val); | ||
1430 | break; | ||
1431 | |||
1432 | case PPPOL2TP_SO_DEBUG: | ||
1433 | *val = session->debug; | ||
1434 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1435 | "%s: get debug=%d\n", session->name, *val); | ||
1436 | break; | ||
1437 | |||
1438 | case PPPOL2TP_SO_REORDERTO: | ||
1439 | *val = (int) jiffies_to_msecs(session->reorder_timeout); | ||
1440 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1441 | "%s: get reorder_timeout=%d\n", session->name, *val); | ||
1442 | break; | ||
1443 | |||
1444 | default: | ||
1445 | err = -ENOPROTOOPT; | ||
1446 | } | ||
1447 | |||
1448 | return err; | ||
1449 | } | ||
1450 | |||
1451 | /* Main getsockopt() entry point. | ||
1452 | * Does API checks, then calls either the tunnel or session getsockopt | ||
1453 | * handler, according to whether the PPPoX socket is a for a regular session | ||
1454 | * or the special tunnel type. | ||
1455 | */ | ||
1456 | static int pppol2tp_getsockopt(struct socket *sock, int level, | ||
1457 | int optname, char __user *optval, int __user *optlen) | ||
1458 | { | ||
1459 | struct sock *sk = sock->sk; | ||
1460 | struct l2tp_session *session; | ||
1461 | struct l2tp_tunnel *tunnel; | ||
1462 | int val, len; | ||
1463 | int err; | ||
1464 | struct pppol2tp_session *ps; | ||
1465 | |||
1466 | if (level != SOL_PPPOL2TP) | ||
1467 | return udp_prot.getsockopt(sk, level, optname, optval, optlen); | ||
1468 | |||
1469 | if (get_user(len, (int __user *) optlen)) | ||
1470 | return -EFAULT; | ||
1471 | |||
1472 | len = min_t(unsigned int, len, sizeof(int)); | ||
1473 | |||
1474 | if (len < 0) | ||
1475 | return -EINVAL; | ||
1476 | |||
1477 | err = -ENOTCONN; | ||
1478 | if (sk->sk_user_data == NULL) | ||
1479 | goto end; | ||
1480 | |||
1481 | /* Get the session context */ | ||
1482 | err = -EBADF; | ||
1483 | session = pppol2tp_sock_to_session(sk); | ||
1484 | if (session == NULL) | ||
1485 | goto end; | ||
1486 | |||
1487 | /* Special case: if session_id == 0x0000, treat as operation on tunnel */ | ||
1488 | ps = l2tp_session_priv(session); | ||
1489 | if ((session->session_id == 0) && | ||
1490 | (session->peer_session_id == 0)) { | ||
1491 | err = -EBADF; | ||
1492 | tunnel = l2tp_sock_to_tunnel(ps->tunnel_sock); | ||
1493 | if (tunnel == NULL) | ||
1494 | goto end_put_sess; | ||
1495 | |||
1496 | err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val); | ||
1497 | sock_put(ps->tunnel_sock); | ||
1498 | } else | ||
1499 | err = pppol2tp_session_getsockopt(sk, session, optname, &val); | ||
1500 | |||
1501 | err = -EFAULT; | ||
1502 | if (put_user(len, (int __user *) optlen)) | ||
1503 | goto end_put_sess; | ||
1504 | |||
1505 | if (copy_to_user((void __user *) optval, &val, len)) | ||
1506 | goto end_put_sess; | ||
1507 | |||
1508 | err = 0; | ||
1509 | |||
1510 | end_put_sess: | ||
1511 | sock_put(sk); | ||
1512 | end: | ||
1513 | return err; | ||
1514 | } | ||
1515 | |||
1516 | /***************************************************************************** | ||
1517 | * /proc filesystem for debug | ||
1518 | * Since the original pppol2tp driver provided /proc/net/pppol2tp for | ||
1519 | * L2TPv2, we dump only L2TPv2 tunnels and sessions here. | ||
1520 | *****************************************************************************/ | ||
1521 | |||
1522 | static unsigned int pppol2tp_net_id; | ||
1523 | |||
1524 | #ifdef CONFIG_PROC_FS | ||
1525 | |||
1526 | struct pppol2tp_seq_data { | ||
1527 | struct seq_net_private p; | ||
1528 | int tunnel_idx; /* current tunnel */ | ||
1529 | int session_idx; /* index of session within current tunnel */ | ||
1530 | struct l2tp_tunnel *tunnel; | ||
1531 | struct l2tp_session *session; /* NULL means get next tunnel */ | ||
1532 | }; | ||
1533 | |||
1534 | static void pppol2tp_next_tunnel(struct net *net, struct pppol2tp_seq_data *pd) | ||
1535 | { | ||
1536 | for (;;) { | ||
1537 | pd->tunnel = l2tp_tunnel_find_nth(net, pd->tunnel_idx); | ||
1538 | pd->tunnel_idx++; | ||
1539 | |||
1540 | if (pd->tunnel == NULL) | ||
1541 | break; | ||
1542 | |||
1543 | /* Ignore L2TPv3 tunnels */ | ||
1544 | if (pd->tunnel->version < 3) | ||
1545 | break; | ||
1546 | } | ||
1547 | } | ||
1548 | |||
1549 | static void pppol2tp_next_session(struct net *net, struct pppol2tp_seq_data *pd) | ||
1550 | { | ||
1551 | pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx); | ||
1552 | pd->session_idx++; | ||
1553 | |||
1554 | if (pd->session == NULL) { | ||
1555 | pd->session_idx = 0; | ||
1556 | pppol2tp_next_tunnel(net, pd); | ||
1557 | } | ||
1558 | } | ||
1559 | |||
1560 | static void *pppol2tp_seq_start(struct seq_file *m, loff_t *offs) | ||
1561 | { | ||
1562 | struct pppol2tp_seq_data *pd = SEQ_START_TOKEN; | ||
1563 | loff_t pos = *offs; | ||
1564 | struct net *net; | ||
1565 | |||
1566 | if (!pos) | ||
1567 | goto out; | ||
1568 | |||
1569 | BUG_ON(m->private == NULL); | ||
1570 | pd = m->private; | ||
1571 | net = seq_file_net(m); | ||
1572 | |||
1573 | if (pd->tunnel == NULL) | ||
1574 | pppol2tp_next_tunnel(net, pd); | ||
1575 | else | ||
1576 | pppol2tp_next_session(net, pd); | ||
1577 | |||
1578 | /* NULL tunnel and session indicates end of list */ | ||
1579 | if ((pd->tunnel == NULL) && (pd->session == NULL)) | ||
1580 | pd = NULL; | ||
1581 | |||
1582 | out: | ||
1583 | return pd; | ||
1584 | } | ||
1585 | |||
1586 | static void *pppol2tp_seq_next(struct seq_file *m, void *v, loff_t *pos) | ||
1587 | { | ||
1588 | (*pos)++; | ||
1589 | return NULL; | ||
1590 | } | ||
1591 | |||
1592 | static void pppol2tp_seq_stop(struct seq_file *p, void *v) | ||
1593 | { | ||
1594 | /* nothing to do */ | ||
1595 | } | ||
1596 | |||
1597 | static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v) | ||
1598 | { | ||
1599 | struct l2tp_tunnel *tunnel = v; | ||
1600 | |||
1601 | seq_printf(m, "\nTUNNEL '%s', %c %d\n", | ||
1602 | tunnel->name, | ||
1603 | (tunnel == tunnel->sock->sk_user_data) ? 'Y' : 'N', | ||
1604 | atomic_read(&tunnel->ref_count) - 1); | ||
1605 | seq_printf(m, " %08x %llu/%llu/%llu %llu/%llu/%llu\n", | ||
1606 | tunnel->debug, | ||
1607 | (unsigned long long)tunnel->stats.tx_packets, | ||
1608 | (unsigned long long)tunnel->stats.tx_bytes, | ||
1609 | (unsigned long long)tunnel->stats.tx_errors, | ||
1610 | (unsigned long long)tunnel->stats.rx_packets, | ||
1611 | (unsigned long long)tunnel->stats.rx_bytes, | ||
1612 | (unsigned long long)tunnel->stats.rx_errors); | ||
1613 | } | ||
1614 | |||
1615 | static void pppol2tp_seq_session_show(struct seq_file *m, void *v) | ||
1616 | { | ||
1617 | struct l2tp_session *session = v; | ||
1618 | struct l2tp_tunnel *tunnel = session->tunnel; | ||
1619 | struct pppol2tp_session *ps = l2tp_session_priv(session); | ||
1620 | struct pppox_sock *po = pppox_sk(ps->sock); | ||
1621 | u32 ip = 0; | ||
1622 | u16 port = 0; | ||
1623 | |||
1624 | if (tunnel->sock) { | ||
1625 | struct inet_sock *inet = inet_sk(tunnel->sock); | ||
1626 | ip = ntohl(inet->inet_saddr); | ||
1627 | port = ntohs(inet->inet_sport); | ||
1628 | } | ||
1629 | |||
1630 | seq_printf(m, " SESSION '%s' %08X/%d %04X/%04X -> " | ||
1631 | "%04X/%04X %d %c\n", | ||
1632 | session->name, ip, port, | ||
1633 | tunnel->tunnel_id, | ||
1634 | session->session_id, | ||
1635 | tunnel->peer_tunnel_id, | ||
1636 | session->peer_session_id, | ||
1637 | ps->sock->sk_state, | ||
1638 | (session == ps->sock->sk_user_data) ? | ||
1639 | 'Y' : 'N'); | ||
1640 | seq_printf(m, " %d/%d/%c/%c/%s %08x %u\n", | ||
1641 | session->mtu, session->mru, | ||
1642 | session->recv_seq ? 'R' : '-', | ||
1643 | session->send_seq ? 'S' : '-', | ||
1644 | session->lns_mode ? "LNS" : "LAC", | ||
1645 | session->debug, | ||
1646 | jiffies_to_msecs(session->reorder_timeout)); | ||
1647 | seq_printf(m, " %hu/%hu %llu/%llu/%llu %llu/%llu/%llu\n", | ||
1648 | session->nr, session->ns, | ||
1649 | (unsigned long long)session->stats.tx_packets, | ||
1650 | (unsigned long long)session->stats.tx_bytes, | ||
1651 | (unsigned long long)session->stats.tx_errors, | ||
1652 | (unsigned long long)session->stats.rx_packets, | ||
1653 | (unsigned long long)session->stats.rx_bytes, | ||
1654 | (unsigned long long)session->stats.rx_errors); | ||
1655 | |||
1656 | if (po) | ||
1657 | seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan)); | ||
1658 | } | ||
1659 | |||
1660 | static int pppol2tp_seq_show(struct seq_file *m, void *v) | ||
1661 | { | ||
1662 | struct pppol2tp_seq_data *pd = v; | ||
1663 | |||
1664 | /* display header on line 1 */ | ||
1665 | if (v == SEQ_START_TOKEN) { | ||
1666 | seq_puts(m, "PPPoL2TP driver info, " PPPOL2TP_DRV_VERSION "\n"); | ||
1667 | seq_puts(m, "TUNNEL name, user-data-ok session-count\n"); | ||
1668 | seq_puts(m, " debug tx-pkts/bytes/errs rx-pkts/bytes/errs\n"); | ||
1669 | seq_puts(m, " SESSION name, addr/port src-tid/sid " | ||
1670 | "dest-tid/sid state user-data-ok\n"); | ||
1671 | seq_puts(m, " mtu/mru/rcvseq/sendseq/lns debug reorderto\n"); | ||
1672 | seq_puts(m, " nr/ns tx-pkts/bytes/errs rx-pkts/bytes/errs\n"); | ||
1673 | goto out; | ||
1674 | } | ||
1675 | |||
1676 | /* Show the tunnel or session context. | ||
1677 | */ | ||
1678 | if (pd->session == NULL) | ||
1679 | pppol2tp_seq_tunnel_show(m, pd->tunnel); | ||
1680 | else | ||
1681 | pppol2tp_seq_session_show(m, pd->session); | ||
1682 | |||
1683 | out: | ||
1684 | return 0; | ||
1685 | } | ||
1686 | |||
1687 | static const struct seq_operations pppol2tp_seq_ops = { | ||
1688 | .start = pppol2tp_seq_start, | ||
1689 | .next = pppol2tp_seq_next, | ||
1690 | .stop = pppol2tp_seq_stop, | ||
1691 | .show = pppol2tp_seq_show, | ||
1692 | }; | ||
1693 | |||
1694 | /* Called when our /proc file is opened. We allocate data for use when | ||
1695 | * iterating our tunnel / session contexts and store it in the private | ||
1696 | * data of the seq_file. | ||
1697 | */ | ||
1698 | static int pppol2tp_proc_open(struct inode *inode, struct file *file) | ||
1699 | { | ||
1700 | return seq_open_net(inode, file, &pppol2tp_seq_ops, | ||
1701 | sizeof(struct pppol2tp_seq_data)); | ||
1702 | } | ||
1703 | |||
1704 | static const struct file_operations pppol2tp_proc_fops = { | ||
1705 | .owner = THIS_MODULE, | ||
1706 | .open = pppol2tp_proc_open, | ||
1707 | .read = seq_read, | ||
1708 | .llseek = seq_lseek, | ||
1709 | .release = seq_release_net, | ||
1710 | }; | ||
1711 | |||
1712 | #endif /* CONFIG_PROC_FS */ | ||
1713 | |||
1714 | /***************************************************************************** | ||
1715 | * Network namespace | ||
1716 | *****************************************************************************/ | ||
1717 | |||
1718 | static __net_init int pppol2tp_init_net(struct net *net) | ||
1719 | { | ||
1720 | struct proc_dir_entry *pde; | ||
1721 | int err = 0; | ||
1722 | |||
1723 | pde = proc_net_fops_create(net, "pppol2tp", S_IRUGO, &pppol2tp_proc_fops); | ||
1724 | if (!pde) { | ||
1725 | err = -ENOMEM; | ||
1726 | goto out; | ||
1727 | } | ||
1728 | |||
1729 | out: | ||
1730 | return err; | ||
1731 | } | ||
1732 | |||
1733 | static __net_exit void pppol2tp_exit_net(struct net *net) | ||
1734 | { | ||
1735 | proc_net_remove(net, "pppol2tp"); | ||
1736 | } | ||
1737 | |||
1738 | static struct pernet_operations pppol2tp_net_ops = { | ||
1739 | .init = pppol2tp_init_net, | ||
1740 | .exit = pppol2tp_exit_net, | ||
1741 | .id = &pppol2tp_net_id, | ||
1742 | }; | ||
1743 | |||
1744 | /***************************************************************************** | ||
1745 | * Init and cleanup | ||
1746 | *****************************************************************************/ | ||
1747 | |||
1748 | static const struct proto_ops pppol2tp_ops = { | ||
1749 | .family = AF_PPPOX, | ||
1750 | .owner = THIS_MODULE, | ||
1751 | .release = pppol2tp_release, | ||
1752 | .bind = sock_no_bind, | ||
1753 | .connect = pppol2tp_connect, | ||
1754 | .socketpair = sock_no_socketpair, | ||
1755 | .accept = sock_no_accept, | ||
1756 | .getname = pppol2tp_getname, | ||
1757 | .poll = datagram_poll, | ||
1758 | .listen = sock_no_listen, | ||
1759 | .shutdown = sock_no_shutdown, | ||
1760 | .setsockopt = pppol2tp_setsockopt, | ||
1761 | .getsockopt = pppol2tp_getsockopt, | ||
1762 | .sendmsg = pppol2tp_sendmsg, | ||
1763 | .recvmsg = pppol2tp_recvmsg, | ||
1764 | .mmap = sock_no_mmap, | ||
1765 | .ioctl = pppox_ioctl, | ||
1766 | }; | ||
1767 | |||
1768 | static struct pppox_proto pppol2tp_proto = { | ||
1769 | .create = pppol2tp_create, | ||
1770 | .ioctl = pppol2tp_ioctl | ||
1771 | }; | ||
1772 | |||
1773 | #ifdef CONFIG_L2TP_V3 | ||
1774 | |||
1775 | static const struct l2tp_nl_cmd_ops pppol2tp_nl_cmd_ops = { | ||
1776 | .session_create = pppol2tp_session_create, | ||
1777 | .session_delete = pppol2tp_session_delete, | ||
1778 | }; | ||
1779 | |||
1780 | #endif /* CONFIG_L2TP_V3 */ | ||
1781 | |||
1782 | static int __init pppol2tp_init(void) | ||
1783 | { | ||
1784 | int err; | ||
1785 | |||
1786 | err = register_pernet_device(&pppol2tp_net_ops); | ||
1787 | if (err) | ||
1788 | goto out; | ||
1789 | |||
1790 | err = proto_register(&pppol2tp_sk_proto, 0); | ||
1791 | if (err) | ||
1792 | goto out_unregister_pppol2tp_pernet; | ||
1793 | |||
1794 | err = register_pppox_proto(PX_PROTO_OL2TP, &pppol2tp_proto); | ||
1795 | if (err) | ||
1796 | goto out_unregister_pppol2tp_proto; | ||
1797 | |||
1798 | #ifdef CONFIG_L2TP_V3 | ||
1799 | err = l2tp_nl_register_ops(L2TP_PWTYPE_PPP, &pppol2tp_nl_cmd_ops); | ||
1800 | if (err) | ||
1801 | goto out_unregister_pppox; | ||
1802 | #endif | ||
1803 | |||
1804 | printk(KERN_INFO "PPPoL2TP kernel driver, %s\n", | ||
1805 | PPPOL2TP_DRV_VERSION); | ||
1806 | |||
1807 | out: | ||
1808 | return err; | ||
1809 | |||
1810 | #ifdef CONFIG_L2TP_V3 | ||
1811 | out_unregister_pppox: | ||
1812 | unregister_pppox_proto(PX_PROTO_OL2TP); | ||
1813 | #endif | ||
1814 | out_unregister_pppol2tp_proto: | ||
1815 | proto_unregister(&pppol2tp_sk_proto); | ||
1816 | out_unregister_pppol2tp_pernet: | ||
1817 | unregister_pernet_device(&pppol2tp_net_ops); | ||
1818 | goto out; | ||
1819 | } | ||
1820 | |||
1821 | static void __exit pppol2tp_exit(void) | ||
1822 | { | ||
1823 | #ifdef CONFIG_L2TP_V3 | ||
1824 | l2tp_nl_unregister_ops(L2TP_PWTYPE_PPP); | ||
1825 | #endif | ||
1826 | unregister_pppox_proto(PX_PROTO_OL2TP); | ||
1827 | proto_unregister(&pppol2tp_sk_proto); | ||
1828 | unregister_pernet_device(&pppol2tp_net_ops); | ||
1829 | } | ||
1830 | |||
1831 | module_init(pppol2tp_init); | ||
1832 | module_exit(pppol2tp_exit); | ||
1833 | |||
1834 | MODULE_AUTHOR("James Chapman <jchapman@katalix.com>"); | ||
1835 | MODULE_DESCRIPTION("PPP over L2TP over UDP"); | ||
1836 | MODULE_LICENSE("GPL"); | ||
1837 | MODULE_VERSION(PPPOL2TP_DRV_VERSION); | ||
diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c index bda96d18fd98..d5d8d555c410 100644 --- a/net/lapb/lapb_iface.c +++ b/net/lapb/lapb_iface.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/inet.h> | 29 | #include <linux/inet.h> |
30 | #include <linux/if_arp.h> | 30 | #include <linux/if_arp.h> |
31 | #include <linux/skbuff.h> | 31 | #include <linux/skbuff.h> |
32 | #include <linux/slab.h> | ||
32 | #include <net/sock.h> | 33 | #include <net/sock.h> |
33 | #include <asm/uaccess.h> | 34 | #include <asm/uaccess.h> |
34 | #include <asm/system.h> | 35 | #include <asm/system.h> |
diff --git a/net/lapb/lapb_in.c b/net/lapb/lapb_in.c index 6762e7c751eb..21904a002449 100644 --- a/net/lapb/lapb_in.c +++ b/net/lapb/lapb_in.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/inet.h> | 27 | #include <linux/inet.h> |
28 | #include <linux/netdevice.h> | 28 | #include <linux/netdevice.h> |
29 | #include <linux/skbuff.h> | 29 | #include <linux/skbuff.h> |
30 | #include <linux/slab.h> | ||
30 | #include <net/sock.h> | 31 | #include <net/sock.h> |
31 | #include <asm/uaccess.h> | 32 | #include <asm/uaccess.h> |
32 | #include <asm/system.h> | 33 | #include <asm/system.h> |
diff --git a/net/lapb/lapb_out.c b/net/lapb/lapb_out.c index 339cc5f2684f..c75a79540f9f 100644 --- a/net/lapb/lapb_out.c +++ b/net/lapb/lapb_out.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/net.h> | 25 | #include <linux/net.h> |
26 | #include <linux/inet.h> | 26 | #include <linux/inet.h> |
27 | #include <linux/skbuff.h> | 27 | #include <linux/skbuff.h> |
28 | #include <linux/slab.h> | ||
28 | #include <net/sock.h> | 29 | #include <net/sock.h> |
29 | #include <asm/uaccess.h> | 30 | #include <asm/uaccess.h> |
30 | #include <asm/system.h> | 31 | #include <asm/system.h> |
diff --git a/net/lapb/lapb_subr.c b/net/lapb/lapb_subr.c index b827f47ac133..43a2a7fb327b 100644 --- a/net/lapb/lapb_subr.c +++ b/net/lapb/lapb_subr.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/net.h> | 24 | #include <linux/net.h> |
25 | #include <linux/inet.h> | 25 | #include <linux/inet.h> |
26 | #include <linux/skbuff.h> | 26 | #include <linux/skbuff.h> |
27 | #include <linux/slab.h> | ||
27 | #include <net/sock.h> | 28 | #include <net/sock.h> |
28 | #include <asm/uaccess.h> | 29 | #include <asm/uaccess.h> |
29 | #include <asm/system.h> | 30 | #include <asm/system.h> |
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index e35d907fba2c..023ba820236f 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
26 | #include <linux/rtnetlink.h> | 26 | #include <linux/rtnetlink.h> |
27 | #include <linux/init.h> | 27 | #include <linux/init.h> |
28 | #include <linux/slab.h> | ||
28 | #include <net/llc.h> | 29 | #include <net/llc.h> |
29 | #include <net/llc_sap.h> | 30 | #include <net/llc_sap.h> |
30 | #include <net/llc_pdu.h> | 31 | #include <net/llc_pdu.h> |
@@ -535,7 +536,7 @@ static int llc_ui_wait_for_disc(struct sock *sk, long timeout) | |||
535 | int rc = 0; | 536 | int rc = 0; |
536 | 537 | ||
537 | while (1) { | 538 | while (1) { |
538 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 539 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
539 | if (sk_wait_event(sk, &timeout, sk->sk_state == TCP_CLOSE)) | 540 | if (sk_wait_event(sk, &timeout, sk->sk_state == TCP_CLOSE)) |
540 | break; | 541 | break; |
541 | rc = -ERESTARTSYS; | 542 | rc = -ERESTARTSYS; |
@@ -546,7 +547,7 @@ static int llc_ui_wait_for_disc(struct sock *sk, long timeout) | |||
546 | break; | 547 | break; |
547 | rc = 0; | 548 | rc = 0; |
548 | } | 549 | } |
549 | finish_wait(sk->sk_sleep, &wait); | 550 | finish_wait(sk_sleep(sk), &wait); |
550 | return rc; | 551 | return rc; |
551 | } | 552 | } |
552 | 553 | ||
@@ -555,13 +556,13 @@ static int llc_ui_wait_for_conn(struct sock *sk, long timeout) | |||
555 | DEFINE_WAIT(wait); | 556 | DEFINE_WAIT(wait); |
556 | 557 | ||
557 | while (1) { | 558 | while (1) { |
558 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 559 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
559 | if (sk_wait_event(sk, &timeout, sk->sk_state != TCP_SYN_SENT)) | 560 | if (sk_wait_event(sk, &timeout, sk->sk_state != TCP_SYN_SENT)) |
560 | break; | 561 | break; |
561 | if (signal_pending(current) || !timeout) | 562 | if (signal_pending(current) || !timeout) |
562 | break; | 563 | break; |
563 | } | 564 | } |
564 | finish_wait(sk->sk_sleep, &wait); | 565 | finish_wait(sk_sleep(sk), &wait); |
565 | return timeout; | 566 | return timeout; |
566 | } | 567 | } |
567 | 568 | ||
@@ -572,7 +573,7 @@ static int llc_ui_wait_for_busy_core(struct sock *sk, long timeout) | |||
572 | int rc; | 573 | int rc; |
573 | 574 | ||
574 | while (1) { | 575 | while (1) { |
575 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 576 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
576 | rc = 0; | 577 | rc = 0; |
577 | if (sk_wait_event(sk, &timeout, | 578 | if (sk_wait_event(sk, &timeout, |
578 | (sk->sk_shutdown & RCV_SHUTDOWN) || | 579 | (sk->sk_shutdown & RCV_SHUTDOWN) || |
@@ -587,7 +588,7 @@ static int llc_ui_wait_for_busy_core(struct sock *sk, long timeout) | |||
587 | if (!timeout) | 588 | if (!timeout) |
588 | break; | 589 | break; |
589 | } | 590 | } |
590 | finish_wait(sk->sk_sleep, &wait); | 591 | finish_wait(sk_sleep(sk), &wait); |
591 | return rc; | 592 | return rc; |
592 | } | 593 | } |
593 | 594 | ||
diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c index 86d6985b9d49..ea225bd2672c 100644 --- a/net/llc/llc_c_ac.c +++ b/net/llc/llc_c_ac.c | |||
@@ -18,6 +18,7 @@ | |||
18 | * See the GNU General Public License for more details. | 18 | * See the GNU General Public License for more details. |
19 | */ | 19 | */ |
20 | #include <linux/netdevice.h> | 20 | #include <linux/netdevice.h> |
21 | #include <linux/slab.h> | ||
21 | #include <net/llc_conn.h> | 22 | #include <net/llc_conn.h> |
22 | #include <net/llc_sap.h> | 23 | #include <net/llc_sap.h> |
23 | #include <net/sock.h> | 24 | #include <net/sock.h> |
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c index a12144da7974..ba137a6a224d 100644 --- a/net/llc/llc_conn.c +++ b/net/llc/llc_conn.c | |||
@@ -13,6 +13,7 @@ | |||
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/slab.h> | ||
16 | #include <net/llc_sap.h> | 17 | #include <net/llc_sap.h> |
17 | #include <net/llc_conn.h> | 18 | #include <net/llc_conn.h> |
18 | #include <net/sock.h> | 19 | #include <net/sock.h> |
diff --git a/net/llc/llc_core.c b/net/llc/llc_core.c index 78167e81dfeb..2bb0ddff8c0f 100644 --- a/net/llc/llc_core.c +++ b/net/llc/llc_core.c | |||
@@ -144,12 +144,6 @@ static struct packet_type llc_tr_packet_type __read_mostly = { | |||
144 | 144 | ||
145 | static int __init llc_init(void) | 145 | static int __init llc_init(void) |
146 | { | 146 | { |
147 | struct net_device *dev; | ||
148 | |||
149 | dev = first_net_device(&init_net); | ||
150 | if (dev != NULL) | ||
151 | dev = next_net_device(dev); | ||
152 | |||
153 | dev_add_pack(&llc_packet_type); | 147 | dev_add_pack(&llc_packet_type); |
154 | dev_add_pack(&llc_tr_packet_type); | 148 | dev_add_pack(&llc_tr_packet_type); |
155 | return 0; | 149 | return 0; |
diff --git a/net/llc/llc_if.c b/net/llc/llc_if.c index a89917130a7b..25c31c0a3fdb 100644 --- a/net/llc/llc_if.c +++ b/net/llc/llc_if.c | |||
@@ -11,6 +11,7 @@ | |||
11 | * | 11 | * |
12 | * See the GNU General Public License for more details. | 12 | * See the GNU General Public License for more details. |
13 | */ | 13 | */ |
14 | #include <linux/gfp.h> | ||
14 | #include <linux/module.h> | 15 | #include <linux/module.h> |
15 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
16 | #include <linux/netdevice.h> | 17 | #include <linux/netdevice.h> |
diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c index 57ad974e4d94..f99687439139 100644 --- a/net/llc/llc_input.c +++ b/net/llc/llc_input.c | |||
@@ -12,6 +12,7 @@ | |||
12 | * See the GNU General Public License for more details. | 12 | * See the GNU General Public License for more details. |
13 | */ | 13 | */ |
14 | #include <linux/netdevice.h> | 14 | #include <linux/netdevice.h> |
15 | #include <linux/slab.h> | ||
15 | #include <net/net_namespace.h> | 16 | #include <net/net_namespace.h> |
16 | #include <net/llc.h> | 17 | #include <net/llc.h> |
17 | #include <net/llc_pdu.h> | 18 | #include <net/llc_pdu.h> |
diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c index ad6e6e1cf22f..a432f0ec051c 100644 --- a/net/llc/llc_sap.c +++ b/net/llc/llc_sap.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <net/sock.h> | 23 | #include <net/sock.h> |
24 | #include <net/tcp_states.h> | 24 | #include <net/tcp_states.h> |
25 | #include <linux/llc.h> | 25 | #include <linux/llc.h> |
26 | #include <linux/slab.h> | ||
26 | 27 | ||
27 | static int llc_mac_header_len(unsigned short devtype) | 28 | static int llc_mac_header_len(unsigned short devtype) |
28 | { | 29 | { |
diff --git a/net/llc/llc_station.c b/net/llc/llc_station.c index 83da13339490..e4dae0244d76 100644 --- a/net/llc/llc_station.c +++ b/net/llc/llc_station.c | |||
@@ -13,6 +13,7 @@ | |||
13 | */ | 13 | */ |
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/slab.h> | ||
16 | #include <net/llc.h> | 17 | #include <net/llc.h> |
17 | #include <net/llc_sap.h> | 18 | #include <net/llc_sap.h> |
18 | #include <net/llc_conn.h> | 19 | #include <net/llc_conn.h> |
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c index 1771dd9bd137..6bb9a9a94960 100644 --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c | |||
@@ -14,6 +14,7 @@ | |||
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include <linux/ieee80211.h> | 16 | #include <linux/ieee80211.h> |
17 | #include <linux/slab.h> | ||
17 | #include <net/mac80211.h> | 18 | #include <net/mac80211.h> |
18 | #include "ieee80211_i.h" | 19 | #include "ieee80211_i.h" |
19 | #include "driver-ops.h" | 20 | #include "driver-ops.h" |
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index 2b6a0c47ed53..c163d0a149f4 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c | |||
@@ -14,6 +14,7 @@ | |||
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include <linux/ieee80211.h> | 16 | #include <linux/ieee80211.h> |
17 | #include <linux/slab.h> | ||
17 | #include <net/mac80211.h> | 18 | #include <net/mac80211.h> |
18 | #include "ieee80211_i.h" | 19 | #include "ieee80211_i.h" |
19 | #include "driver-ops.h" | 20 | #include "driver-ops.h" |
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index b575a5066219..ae37270a0633 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/ieee80211.h> | 9 | #include <linux/ieee80211.h> |
10 | #include <linux/nl80211.h> | 10 | #include <linux/nl80211.h> |
11 | #include <linux/rtnetlink.h> | 11 | #include <linux/rtnetlink.h> |
12 | #include <linux/slab.h> | ||
12 | #include <net/net_namespace.h> | 13 | #include <net/net_namespace.h> |
13 | #include <linux/rcupdate.h> | 14 | #include <linux/rcupdate.h> |
14 | #include <net/cfg80211.h> | 15 | #include <net/cfg80211.h> |
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c index d12e743cb4e1..97c9e46e859e 100644 --- a/net/mac80211/debugfs_key.c +++ b/net/mac80211/debugfs_key.c | |||
@@ -9,6 +9,7 @@ | |||
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/kobject.h> | 11 | #include <linux/kobject.h> |
12 | #include <linux/slab.h> | ||
12 | #include "ieee80211_i.h" | 13 | #include "ieee80211_i.h" |
13 | #include "key.h" | 14 | #include "key.h" |
14 | #include "debugfs.h" | 15 | #include "debugfs.h" |
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c index 623e6644b80c..20b2998fa0ed 100644 --- a/net/mac80211/debugfs_netdev.c +++ b/net/mac80211/debugfs_netdev.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/interrupt.h> | 13 | #include <linux/interrupt.h> |
14 | #include <linux/netdevice.h> | 14 | #include <linux/netdevice.h> |
15 | #include <linux/rtnetlink.h> | 15 | #include <linux/rtnetlink.h> |
16 | #include <linux/slab.h> | ||
16 | #include <linux/notifier.h> | 17 | #include <linux/notifier.h> |
17 | #include <net/mac80211.h> | 18 | #include <net/mac80211.h> |
18 | #include <net/cfg80211.h> | 19 | #include <net/cfg80211.h> |
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index 997008e236ff..ee8b63f92f71 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h | |||
@@ -84,16 +84,14 @@ static inline void drv_bss_info_changed(struct ieee80211_local *local, | |||
84 | } | 84 | } |
85 | 85 | ||
86 | static inline u64 drv_prepare_multicast(struct ieee80211_local *local, | 86 | static inline u64 drv_prepare_multicast(struct ieee80211_local *local, |
87 | int mc_count, | 87 | struct netdev_hw_addr_list *mc_list) |
88 | struct dev_addr_list *mc_list) | ||
89 | { | 88 | { |
90 | u64 ret = 0; | 89 | u64 ret = 0; |
91 | 90 | ||
92 | if (local->ops->prepare_multicast) | 91 | if (local->ops->prepare_multicast) |
93 | ret = local->ops->prepare_multicast(&local->hw, mc_count, | 92 | ret = local->ops->prepare_multicast(&local->hw, mc_list); |
94 | mc_list); | ||
95 | 93 | ||
96 | trace_drv_prepare_multicast(local, mc_count, ret); | 94 | trace_drv_prepare_multicast(local, mc_list->count, ret); |
97 | 95 | ||
98 | return ret; | 96 | return ret; |
99 | } | 97 | } |
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index d5855ae387e8..b72ee6435fa3 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c | |||
@@ -13,6 +13,7 @@ | |||
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/delay.h> | 15 | #include <linux/delay.h> |
16 | #include <linux/slab.h> | ||
16 | #include <linux/if_ether.h> | 17 | #include <linux/if_ether.h> |
17 | #include <linux/skbuff.h> | 18 | #include <linux/skbuff.h> |
18 | #include <linux/if_arp.h> | 19 | #include <linux/if_arp.h> |
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index c8077a3647c6..cbaf4981e110 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
@@ -665,8 +665,7 @@ struct ieee80211_local { | |||
665 | struct work_struct recalc_smps; | 665 | struct work_struct recalc_smps; |
666 | 666 | ||
667 | /* aggregated multicast list */ | 667 | /* aggregated multicast list */ |
668 | struct dev_addr_list *mc_list; | 668 | struct netdev_hw_addr_list mc_list; |
669 | int mc_count; | ||
670 | 669 | ||
671 | bool tim_in_locked_section; /* see ieee80211_beacon_get() */ | 670 | bool tim_in_locked_section; /* see ieee80211_beacon_get() */ |
672 | 671 | ||
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index b4ec59a8dc03..50deb017fd6e 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
@@ -10,6 +10,7 @@ | |||
10 | * it under the terms of the GNU General Public License version 2 as | 10 | * it under the terms of the GNU General Public License version 2 as |
11 | * published by the Free Software Foundation. | 11 | * published by the Free Software Foundation. |
12 | */ | 12 | */ |
13 | #include <linux/slab.h> | ||
13 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
14 | #include <linux/if_arp.h> | 15 | #include <linux/if_arp.h> |
15 | #include <linux/netdevice.h> | 16 | #include <linux/netdevice.h> |
@@ -412,8 +413,7 @@ static int ieee80211_stop(struct net_device *dev) | |||
412 | 413 | ||
413 | netif_addr_lock_bh(dev); | 414 | netif_addr_lock_bh(dev); |
414 | spin_lock_bh(&local->filter_lock); | 415 | spin_lock_bh(&local->filter_lock); |
415 | __dev_addr_unsync(&local->mc_list, &local->mc_count, | 416 | __hw_addr_unsync(&local->mc_list, &dev->mc, dev->addr_len); |
416 | &dev->mc_list, &dev->mc_count); | ||
417 | spin_unlock_bh(&local->filter_lock); | 417 | spin_unlock_bh(&local->filter_lock); |
418 | netif_addr_unlock_bh(dev); | 418 | netif_addr_unlock_bh(dev); |
419 | 419 | ||
@@ -596,8 +596,7 @@ static void ieee80211_set_multicast_list(struct net_device *dev) | |||
596 | sdata->flags ^= IEEE80211_SDATA_PROMISC; | 596 | sdata->flags ^= IEEE80211_SDATA_PROMISC; |
597 | } | 597 | } |
598 | spin_lock_bh(&local->filter_lock); | 598 | spin_lock_bh(&local->filter_lock); |
599 | __dev_addr_sync(&local->mc_list, &local->mc_count, | 599 | __hw_addr_sync(&local->mc_list, &dev->mc, dev->addr_len); |
600 | &dev->mc_list, &dev->mc_count); | ||
601 | spin_unlock_bh(&local->filter_lock); | 600 | spin_unlock_bh(&local->filter_lock); |
602 | ieee80211_queue_work(&local->hw, &local->reconfig_filter); | 601 | ieee80211_queue_work(&local->hw, &local->reconfig_filter); |
603 | } | 602 | } |
diff --git a/net/mac80211/key.c b/net/mac80211/key.c index 75705bd41956..8d4b41787dcf 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/list.h> | 14 | #include <linux/list.h> |
15 | #include <linux/rcupdate.h> | 15 | #include <linux/rcupdate.h> |
16 | #include <linux/rtnetlink.h> | 16 | #include <linux/rtnetlink.h> |
17 | #include <linux/slab.h> | ||
17 | #include <net/mac80211.h> | 18 | #include <net/mac80211.h> |
18 | #include "ieee80211_i.h" | 19 | #include "ieee80211_i.h" |
19 | #include "driver-ops.h" | 20 | #include "driver-ops.h" |
diff --git a/net/mac80211/led.c b/net/mac80211/led.c index 162a643f16b6..063aad944246 100644 --- a/net/mac80211/led.c +++ b/net/mac80211/led.c | |||
@@ -8,6 +8,7 @@ | |||
8 | 8 | ||
9 | /* just for IFNAMSIZ */ | 9 | /* just for IFNAMSIZ */ |
10 | #include <linux/if.h> | 10 | #include <linux/if.h> |
11 | #include <linux/slab.h> | ||
11 | #include "led.h" | 12 | #include "led.h" |
12 | 13 | ||
13 | void ieee80211_led_rx(struct ieee80211_local *local) | 14 | void ieee80211_led_rx(struct ieee80211_local *local) |
diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 353b6b42d9c5..bd632e1ee2c5 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c | |||
@@ -71,7 +71,7 @@ void ieee80211_configure_filter(struct ieee80211_local *local) | |||
71 | spin_lock_bh(&local->filter_lock); | 71 | spin_lock_bh(&local->filter_lock); |
72 | changed_flags = local->filter_flags ^ new_flags; | 72 | changed_flags = local->filter_flags ^ new_flags; |
73 | 73 | ||
74 | mc = drv_prepare_multicast(local, local->mc_count, local->mc_list); | 74 | mc = drv_prepare_multicast(local, &local->mc_list); |
75 | spin_unlock_bh(&local->filter_lock); | 75 | spin_unlock_bh(&local->filter_lock); |
76 | 76 | ||
77 | /* be a bit nasty */ | 77 | /* be a bit nasty */ |
@@ -390,6 +390,9 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, | |||
390 | local->uapsd_max_sp_len = IEEE80211_DEFAULT_MAX_SP_LEN; | 390 | local->uapsd_max_sp_len = IEEE80211_DEFAULT_MAX_SP_LEN; |
391 | 391 | ||
392 | INIT_LIST_HEAD(&local->interfaces); | 392 | INIT_LIST_HEAD(&local->interfaces); |
393 | |||
394 | __hw_addr_init(&local->mc_list); | ||
395 | |||
393 | mutex_init(&local->iflist_mtx); | 396 | mutex_init(&local->iflist_mtx); |
394 | mutex_init(&local->scan_mtx); | 397 | mutex_init(&local->scan_mtx); |
395 | 398 | ||
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 2669fbf8c812..7e93524459fc 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c | |||
@@ -8,6 +8,7 @@ | |||
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/slab.h> | ||
11 | #include <asm/unaligned.h> | 12 | #include <asm/unaligned.h> |
12 | #include "ieee80211_i.h" | 13 | #include "ieee80211_i.h" |
13 | #include "mesh.h" | 14 | #include "mesh.h" |
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index 36141d6e701b..d89ed7f2592b 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c | |||
@@ -7,6 +7,7 @@ | |||
7 | * published by the Free Software Foundation. | 7 | * published by the Free Software Foundation. |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/slab.h> | ||
10 | #include "mesh.h" | 11 | #include "mesh.h" |
11 | 12 | ||
12 | #ifdef CONFIG_MAC80211_VERBOSE_MHWMP_DEBUG | 13 | #ifdef CONFIG_MAC80211_VERBOSE_MHWMP_DEBUG |
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 2312efe04c62..181ffd6efd81 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/etherdevice.h> | 10 | #include <linux/etherdevice.h> |
11 | #include <linux/list.h> | 11 | #include <linux/list.h> |
12 | #include <linux/random.h> | 12 | #include <linux/random.h> |
13 | #include <linux/slab.h> | ||
13 | #include <linux/spinlock.h> | 14 | #include <linux/spinlock.h> |
14 | #include <linux/string.h> | 15 | #include <linux/string.h> |
15 | #include <net/mac80211.h> | 16 | #include <net/mac80211.h> |
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index c384154ac895..3cd5f7b5d693 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c | |||
@@ -6,6 +6,7 @@ | |||
6 | * it under the terms of the GNU General Public License version 2 as | 6 | * it under the terms of the GNU General Public License version 2 as |
7 | * published by the Free Software Foundation. | 7 | * published by the Free Software Foundation. |
8 | */ | 8 | */ |
9 | #include <linux/gfp.h> | ||
9 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
10 | #include <linux/random.h> | 11 | #include <linux/random.h> |
11 | #include "ieee80211_i.h" | 12 | #include "ieee80211_i.h" |
@@ -744,7 +745,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m | |||
744 | break; | 745 | break; |
745 | default: | 746 | default: |
746 | /* should not get here, PLINK_BLOCKED is dealt with at the | 747 | /* should not get here, PLINK_BLOCKED is dealt with at the |
747 | * beggining of the function | 748 | * beginning of the function |
748 | */ | 749 | */ |
749 | spin_unlock_bh(&sta->lock); | 750 | spin_unlock_bh(&sta->lock); |
750 | break; | 751 | break; |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 17cb8ae912bc..358226f63b81 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/rtnetlink.h> | 19 | #include <linux/rtnetlink.h> |
20 | #include <linux/pm_qos_params.h> | 20 | #include <linux/pm_qos_params.h> |
21 | #include <linux/crc32.h> | 21 | #include <linux/crc32.h> |
22 | #include <linux/slab.h> | ||
22 | #include <net/mac80211.h> | 23 | #include <net/mac80211.h> |
23 | #include <asm/unaligned.h> | 24 | #include <asm/unaligned.h> |
24 | 25 | ||
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c index 0b299d236fa1..6d0bd198af19 100644 --- a/net/mac80211/rate.c +++ b/net/mac80211/rate.c | |||
@@ -10,6 +10,7 @@ | |||
10 | 10 | ||
11 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
12 | #include <linux/rtnetlink.h> | 12 | #include <linux/rtnetlink.h> |
13 | #include <linux/slab.h> | ||
13 | #include "rate.h" | 14 | #include "rate.h" |
14 | #include "ieee80211_i.h" | 15 | #include "ieee80211_i.h" |
15 | #include "debugfs.h" | 16 | #include "debugfs.h" |
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c index 4926d929fd9f..f65ce6dcc8e2 100644 --- a/net/mac80211/rc80211_minstrel.c +++ b/net/mac80211/rc80211_minstrel.c | |||
@@ -50,6 +50,7 @@ | |||
50 | #include <linux/debugfs.h> | 50 | #include <linux/debugfs.h> |
51 | #include <linux/random.h> | 51 | #include <linux/random.h> |
52 | #include <linux/ieee80211.h> | 52 | #include <linux/ieee80211.h> |
53 | #include <linux/slab.h> | ||
53 | #include <net/mac80211.h> | 54 | #include <net/mac80211.h> |
54 | #include "rate.h" | 55 | #include "rate.h" |
55 | #include "rc80211_minstrel.h" | 56 | #include "rc80211_minstrel.h" |
diff --git a/net/mac80211/rc80211_minstrel_debugfs.c b/net/mac80211/rc80211_minstrel_debugfs.c index 56d0f24957d9..241e76f3fdf2 100644 --- a/net/mac80211/rc80211_minstrel_debugfs.c +++ b/net/mac80211/rc80211_minstrel_debugfs.c | |||
@@ -49,6 +49,7 @@ | |||
49 | #include <linux/skbuff.h> | 49 | #include <linux/skbuff.h> |
50 | #include <linux/debugfs.h> | 50 | #include <linux/debugfs.h> |
51 | #include <linux/ieee80211.h> | 51 | #include <linux/ieee80211.h> |
52 | #include <linux/slab.h> | ||
52 | #include <net/mac80211.h> | 53 | #include <net/mac80211.h> |
53 | #include "rc80211_minstrel.h" | 54 | #include "rc80211_minstrel.h" |
54 | 55 | ||
diff --git a/net/mac80211/rc80211_pid_algo.c b/net/mac80211/rc80211_pid_algo.c index 2652a374974e..aeda65466f3e 100644 --- a/net/mac80211/rc80211_pid_algo.c +++ b/net/mac80211/rc80211_pid_algo.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/types.h> | 13 | #include <linux/types.h> |
14 | #include <linux/skbuff.h> | 14 | #include <linux/skbuff.h> |
15 | #include <linux/debugfs.h> | 15 | #include <linux/debugfs.h> |
16 | #include <linux/slab.h> | ||
16 | #include <net/mac80211.h> | 17 | #include <net/mac80211.h> |
17 | #include "rate.h" | 18 | #include "rate.h" |
18 | #include "mesh.h" | 19 | #include "mesh.h" |
diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c index 45667054a5f3..47438b4a9af5 100644 --- a/net/mac80211/rc80211_pid_debugfs.c +++ b/net/mac80211/rc80211_pid_debugfs.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/netdevice.h> | 12 | #include <linux/netdevice.h> |
13 | #include <linux/types.h> | 13 | #include <linux/types.h> |
14 | #include <linux/skbuff.h> | 14 | #include <linux/skbuff.h> |
15 | #include <linux/slab.h> | ||
15 | 16 | ||
16 | #include <net/mac80211.h> | 17 | #include <net/mac80211.h> |
17 | #include "rate.h" | 18 | #include "rate.h" |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index e4f325f68fd3..9a08f2c446c6 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -10,6 +10,7 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/jiffies.h> | 12 | #include <linux/jiffies.h> |
13 | #include <linux/slab.h> | ||
13 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
14 | #include <linux/skbuff.h> | 15 | #include <linux/skbuff.h> |
15 | #include <linux/netdevice.h> | 16 | #include <linux/netdevice.h> |
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 414651217b49..e14c44195ae9 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/rtnetlink.h> | 16 | #include <linux/rtnetlink.h> |
17 | #include <linux/pm_qos_params.h> | 17 | #include <linux/pm_qos_params.h> |
18 | #include <net/sch_generic.h> | 18 | #include <net/sch_generic.h> |
19 | #include <linux/slab.h> | ||
19 | #include <net/mac80211.h> | 20 | #include <net/mac80211.h> |
20 | 21 | ||
21 | #include "ieee80211_i.h" | 22 | #include "ieee80211_i.h" |
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c index 5d745f2d7236..5f3a4113bda1 100644 --- a/net/mac80211/wep.c +++ b/net/mac80211/wep.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/err.h> | 17 | #include <linux/err.h> |
18 | #include <linux/mm.h> | 18 | #include <linux/mm.h> |
19 | #include <linux/scatterlist.h> | 19 | #include <linux/scatterlist.h> |
20 | #include <linux/slab.h> | ||
20 | #include <asm/unaligned.h> | 21 | #include <asm/unaligned.h> |
21 | 22 | ||
22 | #include <net/mac80211.h> | 23 | #include <net/mac80211.h> |
diff --git a/net/mac80211/work.c b/net/mac80211/work.c index 4c7de72c27e7..3dd07600199d 100644 --- a/net/mac80211/work.c +++ b/net/mac80211/work.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/if_arp.h> | 19 | #include <linux/if_arp.h> |
20 | #include <linux/etherdevice.h> | 20 | #include <linux/etherdevice.h> |
21 | #include <linux/crc32.h> | 21 | #include <linux/crc32.h> |
22 | #include <linux/slab.h> | ||
22 | #include <net/mac80211.h> | 23 | #include <net/mac80211.h> |
23 | #include <asm/unaligned.h> | 24 | #include <asm/unaligned.h> |
24 | 25 | ||
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c index f4971cd45c64..0adbcc941ac9 100644 --- a/net/mac80211/wpa.c +++ b/net/mac80211/wpa.c | |||
@@ -9,10 +9,10 @@ | |||
9 | 9 | ||
10 | #include <linux/netdevice.h> | 10 | #include <linux/netdevice.h> |
11 | #include <linux/types.h> | 11 | #include <linux/types.h> |
12 | #include <linux/slab.h> | ||
13 | #include <linux/skbuff.h> | 12 | #include <linux/skbuff.h> |
14 | #include <linux/compiler.h> | 13 | #include <linux/compiler.h> |
15 | #include <linux/ieee80211.h> | 14 | #include <linux/ieee80211.h> |
15 | #include <linux/gfp.h> | ||
16 | #include <asm/unaligned.h> | 16 | #include <asm/unaligned.h> |
17 | #include <net/mac80211.h> | 17 | #include <net/mac80211.h> |
18 | 18 | ||
diff --git a/net/netfilter/core.c b/net/netfilter/core.c index 60ec4e4badaa..78b505d33bfb 100644 --- a/net/netfilter/core.c +++ b/net/netfilter/core.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/inetdevice.h> | 19 | #include <linux/inetdevice.h> |
20 | #include <linux/proc_fs.h> | 20 | #include <linux/proc_fs.h> |
21 | #include <linux/mutex.h> | 21 | #include <linux/mutex.h> |
22 | #include <linux/slab.h> | ||
22 | #include <net/net_namespace.h> | 23 | #include <net/net_namespace.h> |
23 | #include <net/sock.h> | 24 | #include <net/sock.h> |
24 | 25 | ||
diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c index 3c7e42735b60..1cb0e834f8ff 100644 --- a/net/netfilter/ipvs/ip_vs_app.c +++ b/net/netfilter/ipvs/ip_vs_app.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/in.h> | 27 | #include <linux/in.h> |
28 | #include <linux/ip.h> | 28 | #include <linux/ip.h> |
29 | #include <linux/netfilter.h> | 29 | #include <linux/netfilter.h> |
30 | #include <linux/slab.h> | ||
30 | #include <net/net_namespace.h> | 31 | #include <net/net_namespace.h> |
31 | #include <net/protocol.h> | 32 | #include <net/protocol.h> |
32 | #include <net/tcp.h> | 33 | #include <net/tcp.h> |
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index 60bb41a8d8d4..d8f7e8ef67b4 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/module.h> | 32 | #include <linux/module.h> |
33 | #include <linux/vmalloc.h> | 33 | #include <linux/vmalloc.h> |
34 | #include <linux/proc_fs.h> /* for proc_net_* */ | 34 | #include <linux/proc_fs.h> /* for proc_net_* */ |
35 | #include <linux/slab.h> | ||
35 | #include <linux/seq_file.h> | 36 | #include <linux/seq_file.h> |
36 | #include <linux/jhash.h> | 37 | #include <linux/jhash.h> |
37 | #include <linux/random.h> | 38 | #include <linux/random.h> |
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index 44590887a92c..1cd6e3fd058b 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/tcp.h> | 33 | #include <linux/tcp.h> |
34 | #include <linux/sctp.h> | 34 | #include <linux/sctp.h> |
35 | #include <linux/icmp.h> | 35 | #include <linux/icmp.h> |
36 | #include <linux/slab.h> | ||
36 | 37 | ||
37 | #include <net/ip.h> | 38 | #include <net/ip.h> |
38 | #include <net/tcp.h> | 39 | #include <net/tcp.h> |
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 7ee9c3426f44..36dc1d88c2fa 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/workqueue.h> | 31 | #include <linux/workqueue.h> |
32 | #include <linux/swap.h> | 32 | #include <linux/swap.h> |
33 | #include <linux/seq_file.h> | 33 | #include <linux/seq_file.h> |
34 | #include <linux/slab.h> | ||
34 | 35 | ||
35 | #include <linux/netfilter.h> | 36 | #include <linux/netfilter.h> |
36 | #include <linux/netfilter_ipv4.h> | 37 | #include <linux/netfilter_ipv4.h> |
diff --git a/net/netfilter/ipvs/ip_vs_dh.c b/net/netfilter/ipvs/ip_vs_dh.c index fe3e18834b91..95fd0d14200b 100644 --- a/net/netfilter/ipvs/ip_vs_dh.c +++ b/net/netfilter/ipvs/ip_vs_dh.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | 39 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
40 | 40 | ||
41 | #include <linux/ip.h> | 41 | #include <linux/ip.h> |
42 | #include <linux/slab.h> | ||
42 | #include <linux/module.h> | 43 | #include <linux/module.h> |
43 | #include <linux/kernel.h> | 44 | #include <linux/kernel.h> |
44 | #include <linux/skbuff.h> | 45 | #include <linux/skbuff.h> |
diff --git a/net/netfilter/ipvs/ip_vs_est.c b/net/netfilter/ipvs/ip_vs_est.c index 702b53ca937c..ff28801962e0 100644 --- a/net/netfilter/ipvs/ip_vs_est.c +++ b/net/netfilter/ipvs/ip_vs_est.c | |||
@@ -17,7 +17,6 @@ | |||
17 | 17 | ||
18 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
19 | #include <linux/jiffies.h> | 19 | #include <linux/jiffies.h> |
20 | #include <linux/slab.h> | ||
21 | #include <linux/types.h> | 20 | #include <linux/types.h> |
22 | #include <linux/interrupt.h> | 21 | #include <linux/interrupt.h> |
23 | #include <linux/sysctl.h> | 22 | #include <linux/sysctl.h> |
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c index 73f38ea98f25..2c7f185dfae4 100644 --- a/net/netfilter/ipvs/ip_vs_ftp.c +++ b/net/netfilter/ipvs/ip_vs_ftp.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/in.h> | 32 | #include <linux/in.h> |
33 | #include <linux/ip.h> | 33 | #include <linux/ip.h> |
34 | #include <linux/netfilter.h> | 34 | #include <linux/netfilter.h> |
35 | #include <linux/gfp.h> | ||
35 | #include <net/protocol.h> | 36 | #include <net/protocol.h> |
36 | #include <net/tcp.h> | 37 | #include <net/tcp.h> |
37 | #include <asm/unaligned.h> | 38 | #include <asm/unaligned.h> |
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c index 1b9370db2305..94a45213faa6 100644 --- a/net/netfilter/ipvs/ip_vs_lblc.c +++ b/net/netfilter/ipvs/ip_vs_lblc.c | |||
@@ -43,6 +43,7 @@ | |||
43 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | 43 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
44 | 44 | ||
45 | #include <linux/ip.h> | 45 | #include <linux/ip.h> |
46 | #include <linux/slab.h> | ||
46 | #include <linux/module.h> | 47 | #include <linux/module.h> |
47 | #include <linux/kernel.h> | 48 | #include <linux/kernel.h> |
48 | #include <linux/skbuff.h> | 49 | #include <linux/skbuff.h> |
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c index caa58fa1438a..535dc2b419d8 100644 --- a/net/netfilter/ipvs/ip_vs_lblcr.c +++ b/net/netfilter/ipvs/ip_vs_lblcr.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include <linux/skbuff.h> | 46 | #include <linux/skbuff.h> |
47 | #include <linux/jiffies.h> | 47 | #include <linux/jiffies.h> |
48 | #include <linux/list.h> | 48 | #include <linux/list.h> |
49 | #include <linux/slab.h> | ||
49 | 50 | ||
50 | /* for sysctl */ | 51 | /* for sysctl */ |
51 | #include <linux/fs.h> | 52 | #include <linux/fs.h> |
diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c index 0e584553819d..7fc49f4cf5ad 100644 --- a/net/netfilter/ipvs/ip_vs_proto.c +++ b/net/netfilter/ipvs/ip_vs_proto.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
21 | #include <linux/skbuff.h> | 21 | #include <linux/skbuff.h> |
22 | #include <linux/gfp.h> | ||
22 | #include <linux/in.h> | 23 | #include <linux/in.h> |
23 | #include <linux/ip.h> | 24 | #include <linux/ip.h> |
24 | #include <net/protocol.h> | 25 | #include <net/protocol.h> |
diff --git a/net/netfilter/ipvs/ip_vs_sh.c b/net/netfilter/ipvs/ip_vs_sh.c index 8e6cfd36e6f0..e6cc174fbc06 100644 --- a/net/netfilter/ipvs/ip_vs_sh.c +++ b/net/netfilter/ipvs/ip_vs_sh.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | 36 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
37 | 37 | ||
38 | #include <linux/ip.h> | 38 | #include <linux/ip.h> |
39 | #include <linux/slab.h> | ||
39 | #include <linux/module.h> | 40 | #include <linux/module.h> |
40 | #include <linux/kernel.h> | 41 | #include <linux/kernel.h> |
41 | #include <linux/skbuff.h> | 42 | #include <linux/skbuff.h> |
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index 8fb0ae616761..7ba06939829f 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c | |||
@@ -802,7 +802,7 @@ static int sync_thread_backup(void *data) | |||
802 | ip_vs_backup_mcast_ifn, ip_vs_backup_syncid); | 802 | ip_vs_backup_mcast_ifn, ip_vs_backup_syncid); |
803 | 803 | ||
804 | while (!kthread_should_stop()) { | 804 | while (!kthread_should_stop()) { |
805 | wait_event_interruptible(*tinfo->sock->sk->sk_sleep, | 805 | wait_event_interruptible(*sk_sleep(tinfo->sock->sk), |
806 | !skb_queue_empty(&tinfo->sock->sk->sk_receive_queue) | 806 | !skb_queue_empty(&tinfo->sock->sk->sk_receive_queue) |
807 | || kthread_should_stop()); | 807 | || kthread_should_stop()); |
808 | 808 | ||
diff --git a/net/netfilter/ipvs/ip_vs_wrr.c b/net/netfilter/ipvs/ip_vs_wrr.c index 3c115fc19784..30db633f88f1 100644 --- a/net/netfilter/ipvs/ip_vs_wrr.c +++ b/net/netfilter/ipvs/ip_vs_wrr.c | |||
@@ -23,6 +23,7 @@ | |||
23 | 23 | ||
24 | #include <linux/module.h> | 24 | #include <linux/module.h> |
25 | #include <linux/kernel.h> | 25 | #include <linux/kernel.h> |
26 | #include <linux/slab.h> | ||
26 | #include <linux/net.h> | 27 | #include <linux/net.h> |
27 | #include <linux/gcd.h> | 28 | #include <linux/gcd.h> |
28 | 29 | ||
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index 223b5018c7dc..e450cd6f4eb5 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | 17 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
18 | 18 | ||
19 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
20 | #include <linux/slab.h> | ||
20 | #include <linux/tcp.h> /* for tcphdr */ | 21 | #include <linux/tcp.h> /* for tcphdr */ |
21 | #include <net/ip.h> | 22 | #include <net/ip.h> |
22 | #include <net/tcp.h> /* for csum_tcpudp_magic */ | 23 | #include <net/tcp.h> /* for csum_tcpudp_magic */ |
diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c index 018f90db511c..ab81b380eae6 100644 --- a/net/netfilter/nf_conntrack_acct.c +++ b/net/netfilter/nf_conntrack_acct.c | |||
@@ -9,6 +9,7 @@ | |||
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/netfilter.h> | 11 | #include <linux/netfilter.h> |
12 | #include <linux/slab.h> | ||
12 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
13 | #include <linux/moduleparam.h> | 14 | #include <linux/moduleparam.h> |
14 | 15 | ||
diff --git a/net/netfilter/nf_conntrack_amanda.c b/net/netfilter/nf_conntrack_amanda.c index 07d9d8857e5d..372e80f07a81 100644 --- a/net/netfilter/nf_conntrack_amanda.c +++ b/net/netfilter/nf_conntrack_amanda.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/in.h> | 16 | #include <linux/in.h> |
17 | #include <linux/udp.h> | 17 | #include <linux/udp.h> |
18 | #include <linux/netfilter.h> | 18 | #include <linux/netfilter.h> |
19 | #include <linux/gfp.h> | ||
19 | 20 | ||
20 | #include <net/netfilter/nf_conntrack.h> | 21 | #include <net/netfilter/nf_conntrack.h> |
21 | #include <net/netfilter/nf_conntrack_expect.h> | 22 | #include <net/netfilter/nf_conntrack_expect.h> |
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c index d5a9bcd7d61b..f516961a83b4 100644 --- a/net/netfilter/nf_conntrack_ecache.c +++ b/net/netfilter/nf_conntrack_ecache.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/percpu.h> | 18 | #include <linux/percpu.h> |
19 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
20 | #include <linux/netdevice.h> | 20 | #include <linux/netdevice.h> |
21 | #include <linux/slab.h> | ||
21 | 22 | ||
22 | #include <net/netfilter/nf_conntrack.h> | 23 | #include <net/netfilter/nf_conntrack.h> |
23 | #include <net/netfilter/nf_conntrack_core.h> | 24 | #include <net/netfilter/nf_conntrack_core.h> |
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c index f0732aa18e4f..2ae3169e7633 100644 --- a/net/netfilter/nf_conntrack_ftp.c +++ b/net/netfilter/nf_conntrack_ftp.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/moduleparam.h> | 13 | #include <linux/moduleparam.h> |
14 | #include <linux/netfilter.h> | 14 | #include <linux/netfilter.h> |
15 | #include <linux/ip.h> | 15 | #include <linux/ip.h> |
16 | #include <linux/slab.h> | ||
16 | #include <linux/ipv6.h> | 17 | #include <linux/ipv6.h> |
17 | #include <linux/ctype.h> | 18 | #include <linux/ctype.h> |
18 | #include <linux/inet.h> | 19 | #include <linux/inet.h> |
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c index a1c8dd917e12..a487c8038044 100644 --- a/net/netfilter/nf_conntrack_h323_main.c +++ b/net/netfilter/nf_conntrack_h323_main.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/inet.h> | 17 | #include <linux/inet.h> |
18 | #include <linux/in.h> | 18 | #include <linux/in.h> |
19 | #include <linux/ip.h> | 19 | #include <linux/ip.h> |
20 | #include <linux/slab.h> | ||
20 | #include <linux/udp.h> | 21 | #include <linux/udp.h> |
21 | #include <linux/tcp.h> | 22 | #include <linux/tcp.h> |
22 | #include <linux/skbuff.h> | 23 | #include <linux/skbuff.h> |
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index 4509fa6726f8..59e1a4cd4e8b 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c | |||
@@ -15,7 +15,6 @@ | |||
15 | #include <linux/skbuff.h> | 15 | #include <linux/skbuff.h> |
16 | #include <linux/vmalloc.h> | 16 | #include <linux/vmalloc.h> |
17 | #include <linux/stddef.h> | 17 | #include <linux/stddef.h> |
18 | #include <linux/slab.h> | ||
19 | #include <linux/random.h> | 18 | #include <linux/random.h> |
20 | #include <linux/err.h> | 19 | #include <linux/err.h> |
21 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c index 8bd98c84f77e..7673930ca342 100644 --- a/net/netfilter/nf_conntrack_irc.c +++ b/net/netfilter/nf_conntrack_irc.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/ip.h> | 15 | #include <linux/ip.h> |
16 | #include <linux/tcp.h> | 16 | #include <linux/tcp.h> |
17 | #include <linux/netfilter.h> | 17 | #include <linux/netfilter.h> |
18 | #include <linux/slab.h> | ||
18 | 19 | ||
19 | #include <net/netfilter/nf_conntrack.h> | 20 | #include <net/netfilter/nf_conntrack.h> |
20 | #include <net/netfilter/nf_conntrack_expect.h> | 21 | #include <net/netfilter/nf_conntrack_expect.h> |
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 2b2af631d2b8..afc52f2ee4ac 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/netlink.h> | 27 | #include <linux/netlink.h> |
28 | #include <linux/spinlock.h> | 28 | #include <linux/spinlock.h> |
29 | #include <linux/interrupt.h> | 29 | #include <linux/interrupt.h> |
30 | #include <linux/slab.h> | ||
30 | 31 | ||
31 | #include <linux/netfilter.h> | 32 | #include <linux/netfilter.h> |
32 | #include <net/netlink.h> | 33 | #include <net/netlink.h> |
@@ -582,7 +583,9 @@ nla_put_failure: | |||
582 | nlmsg_failure: | 583 | nlmsg_failure: |
583 | kfree_skb(skb); | 584 | kfree_skb(skb); |
584 | errout: | 585 | errout: |
585 | nfnetlink_set_err(net, 0, group, -ENOBUFS); | 586 | if (nfnetlink_set_err(net, 0, group, -ENOBUFS) > 0) |
587 | return -ENOBUFS; | ||
588 | |||
586 | return 0; | 589 | return 0; |
587 | } | 590 | } |
588 | #endif /* CONFIG_NF_CONNTRACK_EVENTS */ | 591 | #endif /* CONFIG_NF_CONNTRACK_EVENTS */ |
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c index 1a4568bf7ea5..a44fa75b5178 100644 --- a/net/netfilter/nf_conntrack_proto.c +++ b/net/netfilter/nf_conntrack_proto.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/types.h> | 12 | #include <linux/types.h> |
13 | #include <linux/netfilter.h> | 13 | #include <linux/netfilter.h> |
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/slab.h> | ||
15 | #include <linux/mutex.h> | 16 | #include <linux/mutex.h> |
16 | #include <linux/skbuff.h> | 17 | #include <linux/skbuff.h> |
17 | #include <linux/vmalloc.h> | 18 | #include <linux/vmalloc.h> |
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c index 9a2815549375..5292560d6d4a 100644 --- a/net/netfilter/nf_conntrack_proto_dccp.c +++ b/net/netfilter/nf_conntrack_proto_dccp.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/spinlock.h> | 15 | #include <linux/spinlock.h> |
16 | #include <linux/skbuff.h> | 16 | #include <linux/skbuff.h> |
17 | #include <linux/dccp.h> | 17 | #include <linux/dccp.h> |
18 | #include <linux/slab.h> | ||
18 | 19 | ||
19 | #include <net/net_namespace.h> | 20 | #include <net/net_namespace.h> |
20 | #include <net/netns/generic.h> | 21 | #include <net/netns/generic.h> |
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c index d899b1a69940..cf616e55ca41 100644 --- a/net/netfilter/nf_conntrack_proto_gre.c +++ b/net/netfilter/nf_conntrack_proto_gre.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/in.h> | 31 | #include <linux/in.h> |
32 | #include <linux/netdevice.h> | 32 | #include <linux/netdevice.h> |
33 | #include <linux/skbuff.h> | 33 | #include <linux/skbuff.h> |
34 | #include <linux/slab.h> | ||
34 | #include <net/dst.h> | 35 | #include <net/dst.h> |
35 | #include <net/net_namespace.h> | 36 | #include <net/net_namespace.h> |
36 | #include <net/netns/generic.h> | 37 | #include <net/netns/generic.h> |
diff --git a/net/netfilter/nf_conntrack_sane.c b/net/netfilter/nf_conntrack_sane.c index dcfecbb81c46..d9e27734b2a2 100644 --- a/net/netfilter/nf_conntrack_sane.c +++ b/net/netfilter/nf_conntrack_sane.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/moduleparam.h> | 21 | #include <linux/moduleparam.h> |
22 | #include <linux/netfilter.h> | 22 | #include <linux/netfilter.h> |
23 | #include <linux/slab.h> | ||
23 | #include <linux/in.h> | 24 | #include <linux/in.h> |
24 | #include <linux/tcp.h> | 25 | #include <linux/tcp.h> |
25 | #include <net/netfilter/nf_conntrack.h> | 26 | #include <net/netfilter/nf_conntrack.h> |
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index 8dd75d90efc0..c6cd1b84eddd 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c | |||
@@ -284,7 +284,7 @@ EXPORT_SYMBOL_GPL(ct_sip_parse_request); | |||
284 | * tabs, spaces and continuation lines, which are treated as a single whitespace | 284 | * tabs, spaces and continuation lines, which are treated as a single whitespace |
285 | * character. | 285 | * character. |
286 | * | 286 | * |
287 | * Some headers may appear multiple times. A comma seperated list of values is | 287 | * Some headers may appear multiple times. A comma separated list of values is |
288 | * equivalent to multiple headers. | 288 | * equivalent to multiple headers. |
289 | */ | 289 | */ |
290 | static const struct sip_header ct_sip_hdrs[] = { | 290 | static const struct sip_header ct_sip_hdrs[] = { |
@@ -421,7 +421,7 @@ int ct_sip_get_header(const struct nf_conn *ct, const char *dptr, | |||
421 | } | 421 | } |
422 | EXPORT_SYMBOL_GPL(ct_sip_get_header); | 422 | EXPORT_SYMBOL_GPL(ct_sip_get_header); |
423 | 423 | ||
424 | /* Get next header field in a list of comma seperated values */ | 424 | /* Get next header field in a list of comma separated values */ |
425 | static int ct_sip_next_header(const struct nf_conn *ct, const char *dptr, | 425 | static int ct_sip_next_header(const struct nf_conn *ct, const char *dptr, |
426 | unsigned int dataoff, unsigned int datalen, | 426 | unsigned int dataoff, unsigned int datalen, |
427 | enum sip_header_types type, | 427 | enum sip_header_types type, |
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index 24a42efe62ef..faa8eb3722b9 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c | |||
@@ -8,6 +8,7 @@ | |||
8 | 8 | ||
9 | #include <linux/types.h> | 9 | #include <linux/types.h> |
10 | #include <linux/netfilter.h> | 10 | #include <linux/netfilter.h> |
11 | #include <linux/slab.h> | ||
11 | #include <linux/module.h> | 12 | #include <linux/module.h> |
12 | #include <linux/skbuff.h> | 13 | #include <linux/skbuff.h> |
13 | #include <linux/proc_fs.h> | 14 | #include <linux/proc_fs.h> |
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c index ba095fd014e5..c49ef219899e 100644 --- a/net/netfilter/nf_queue.c +++ b/net/netfilter/nf_queue.c | |||
@@ -1,4 +1,5 @@ | |||
1 | #include <linux/kernel.h> | 1 | #include <linux/kernel.h> |
2 | #include <linux/slab.h> | ||
2 | #include <linux/init.h> | 3 | #include <linux/init.h> |
3 | #include <linux/module.h> | 4 | #include <linux/module.h> |
4 | #include <linux/proc_fs.h> | 5 | #include <linux/proc_fs.h> |
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index 8eb0cc23ada3..6afa3d52ea5f 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c | |||
@@ -113,9 +113,9 @@ int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, | |||
113 | } | 113 | } |
114 | EXPORT_SYMBOL_GPL(nfnetlink_send); | 114 | EXPORT_SYMBOL_GPL(nfnetlink_send); |
115 | 115 | ||
116 | void nfnetlink_set_err(struct net *net, u32 pid, u32 group, int error) | 116 | int nfnetlink_set_err(struct net *net, u32 pid, u32 group, int error) |
117 | { | 117 | { |
118 | netlink_set_err(net->nfnl, pid, group, error); | 118 | return netlink_set_err(net->nfnl, pid, group, error); |
119 | } | 119 | } |
120 | EXPORT_SYMBOL_GPL(nfnetlink_set_err); | 120 | EXPORT_SYMBOL_GPL(nfnetlink_set_err); |
121 | 121 | ||
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index d9b8fb8ab340..203643fb2c52 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/list.h> | 28 | #include <linux/list.h> |
29 | #include <linux/jhash.h> | 29 | #include <linux/jhash.h> |
30 | #include <linux/random.h> | 30 | #include <linux/random.h> |
31 | #include <linux/slab.h> | ||
31 | #include <net/sock.h> | 32 | #include <net/sock.h> |
32 | #include <net/netfilter/nf_log.h> | 33 | #include <net/netfilter/nf_log.h> |
33 | #include <net/netfilter/nfnetlink_log.h> | 34 | #include <net/netfilter/nfnetlink_log.h> |
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index 7ba4abc405c9..e70a6ef1f4f2 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/skbuff.h> | 18 | #include <linux/skbuff.h> |
19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
20 | #include <linux/spinlock.h> | 20 | #include <linux/spinlock.h> |
21 | #include <linux/slab.h> | ||
21 | #include <linux/notifier.h> | 22 | #include <linux/notifier.h> |
22 | #include <linux/netdevice.h> | 23 | #include <linux/netdevice.h> |
23 | #include <linux/netfilter.h> | 24 | #include <linux/netfilter.h> |
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 0a12cedfe9e3..665f5beef6ad 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/vmalloc.h> | 22 | #include <linux/vmalloc.h> |
23 | #include <linux/mutex.h> | 23 | #include <linux/mutex.h> |
24 | #include <linux/mm.h> | 24 | #include <linux/mm.h> |
25 | #include <linux/slab.h> | ||
25 | #include <net/net_namespace.h> | 26 | #include <net/net_namespace.h> |
26 | 27 | ||
27 | #include <linux/netfilter/x_tables.h> | 28 | #include <linux/netfilter/x_tables.h> |
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c index 61c50fa84703..ee18b231b950 100644 --- a/net/netfilter/xt_CT.c +++ b/net/netfilter/xt_CT.c | |||
@@ -7,6 +7,7 @@ | |||
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/module.h> | 9 | #include <linux/module.h> |
10 | #include <linux/gfp.h> | ||
10 | #include <linux/skbuff.h> | 11 | #include <linux/skbuff.h> |
11 | #include <linux/selinux.h> | 12 | #include <linux/selinux.h> |
12 | #include <linux/netfilter_ipv4/ip_tables.h> | 13 | #include <linux/netfilter_ipv4/ip_tables.h> |
diff --git a/net/netfilter/xt_LED.c b/net/netfilter/xt_LED.c index 8ff7843bb921..3271c8e52153 100644 --- a/net/netfilter/xt_LED.c +++ b/net/netfilter/xt_LED.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/module.h> | 22 | #include <linux/module.h> |
23 | #include <linux/skbuff.h> | 23 | #include <linux/skbuff.h> |
24 | #include <linux/netfilter/x_tables.h> | 24 | #include <linux/netfilter/x_tables.h> |
25 | #include <linux/slab.h> | ||
25 | #include <linux/leds.h> | 26 | #include <linux/leds.h> |
26 | #include <linux/mutex.h> | 27 | #include <linux/mutex.h> |
27 | 28 | ||
diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c index 87ae97e5516f..d16d55df4f61 100644 --- a/net/netfilter/xt_RATEEST.c +++ b/net/netfilter/xt_RATEEST.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/jhash.h> | 11 | #include <linux/jhash.h> |
12 | #include <linux/rtnetlink.h> | 12 | #include <linux/rtnetlink.h> |
13 | #include <linux/random.h> | 13 | #include <linux/random.h> |
14 | #include <linux/slab.h> | ||
14 | #include <net/gen_stats.h> | 15 | #include <net/gen_stats.h> |
15 | #include <net/netlink.h> | 16 | #include <net/netlink.h> |
16 | 17 | ||
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c index 0e357ac9a2a8..c5f4b9919e9a 100644 --- a/net/netfilter/xt_TCPMSS.c +++ b/net/netfilter/xt_TCPMSS.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/skbuff.h> | 12 | #include <linux/skbuff.h> |
13 | #include <linux/ip.h> | 13 | #include <linux/ip.h> |
14 | #include <linux/gfp.h> | ||
14 | #include <linux/ipv6.h> | 15 | #include <linux/ipv6.h> |
15 | #include <linux/tcp.h> | 16 | #include <linux/tcp.h> |
16 | #include <net/dst.h> | 17 | #include <net/dst.h> |
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c index 26997ce90e48..388ca4596098 100644 --- a/net/netfilter/xt_connlimit.c +++ b/net/netfilter/xt_connlimit.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/ip.h> | 17 | #include <linux/ip.h> |
18 | #include <linux/ipv6.h> | 18 | #include <linux/ipv6.h> |
19 | #include <linux/jhash.h> | 19 | #include <linux/jhash.h> |
20 | #include <linux/slab.h> | ||
20 | #include <linux/list.h> | 21 | #include <linux/list.h> |
21 | #include <linux/module.h> | 22 | #include <linux/module.h> |
22 | #include <linux/random.h> | 23 | #include <linux/random.h> |
diff --git a/net/netfilter/xt_dccp.c b/net/netfilter/xt_dccp.c index 0989f29ade2e..395af5943ffd 100644 --- a/net/netfilter/xt_dccp.c +++ b/net/netfilter/xt_dccp.c | |||
@@ -10,6 +10,7 @@ | |||
10 | 10 | ||
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/skbuff.h> | 12 | #include <linux/skbuff.h> |
13 | #include <linux/slab.h> | ||
13 | #include <linux/spinlock.h> | 14 | #include <linux/spinlock.h> |
14 | #include <net/ip.h> | 15 | #include <net/ip.h> |
15 | #include <linux/dccp.h> | 16 | #include <linux/dccp.h> |
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index d952806b6469..215a64835de8 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * xt_hashlimit - Netfilter module to limit the number of packets per time | 2 | * xt_hashlimit - Netfilter module to limit the number of packets per time |
3 | * seperately for each hashbucket (sourceip/sourceport/dstip/dstport) | 3 | * separately for each hashbucket (sourceip/sourceport/dstip/dstport) |
4 | * | 4 | * |
5 | * (C) 2003-2004 by Harald Welte <laforge@netfilter.org> | 5 | * (C) 2003-2004 by Harald Welte <laforge@netfilter.org> |
6 | * Copyright © CC Computer Consultants GmbH, 2007 - 2008 | 6 | * Copyright © CC Computer Consultants GmbH, 2007 - 2008 |
@@ -493,6 +493,7 @@ static void hashlimit_ipv6_mask(__be32 *i, unsigned int p) | |||
493 | case 64 ... 95: | 493 | case 64 ... 95: |
494 | i[2] = maskl(i[2], p - 64); | 494 | i[2] = maskl(i[2], p - 64); |
495 | i[3] = 0; | 495 | i[3] = 0; |
496 | break; | ||
496 | case 96 ... 127: | 497 | case 96 ... 127: |
497 | i[3] = maskl(i[3], p - 96); | 498 | i[3] = maskl(i[3], p - 96); |
498 | break; | 499 | break; |
@@ -879,7 +880,8 @@ static void dl_seq_stop(struct seq_file *s, void *v) | |||
879 | struct xt_hashlimit_htable *htable = s->private; | 880 | struct xt_hashlimit_htable *htable = s->private; |
880 | unsigned int *bucket = (unsigned int *)v; | 881 | unsigned int *bucket = (unsigned int *)v; |
881 | 882 | ||
882 | kfree(bucket); | 883 | if (!IS_ERR(bucket)) |
884 | kfree(bucket); | ||
883 | spin_unlock_bh(&htable->lock); | 885 | spin_unlock_bh(&htable->lock); |
884 | } | 886 | } |
885 | 887 | ||
diff --git a/net/netfilter/xt_limit.c b/net/netfilter/xt_limit.c index a0ca5339af41..e5d7e1ffb1a4 100644 --- a/net/netfilter/xt_limit.c +++ b/net/netfilter/xt_limit.c | |||
@@ -6,6 +6,7 @@ | |||
6 | * published by the Free Software Foundation. | 6 | * published by the Free Software Foundation. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/slab.h> | ||
9 | #include <linux/module.h> | 10 | #include <linux/module.h> |
10 | #include <linux/skbuff.h> | 11 | #include <linux/skbuff.h> |
11 | #include <linux/spinlock.h> | 12 | #include <linux/spinlock.h> |
diff --git a/net/netfilter/xt_quota.c b/net/netfilter/xt_quota.c index 390b7d09fe51..2d5562498c43 100644 --- a/net/netfilter/xt_quota.c +++ b/net/netfilter/xt_quota.c | |||
@@ -4,6 +4,7 @@ | |||
4 | * Sam Johnston <samj@samj.net> | 4 | * Sam Johnston <samj@samj.net> |
5 | */ | 5 | */ |
6 | #include <linux/skbuff.h> | 6 | #include <linux/skbuff.h> |
7 | #include <linux/slab.h> | ||
7 | #include <linux/spinlock.h> | 8 | #include <linux/spinlock.h> |
8 | 9 | ||
9 | #include <linux/netfilter/x_tables.h> | 10 | #include <linux/netfilter/x_tables.h> |
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c index 7073dbb8100c..834b736857cb 100644 --- a/net/netfilter/xt_recent.c +++ b/net/netfilter/xt_recent.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/bitops.h> | 27 | #include <linux/bitops.h> |
28 | #include <linux/skbuff.h> | 28 | #include <linux/skbuff.h> |
29 | #include <linux/inet.h> | 29 | #include <linux/inet.h> |
30 | #include <linux/slab.h> | ||
30 | #include <net/net_namespace.h> | 31 | #include <net/net_namespace.h> |
31 | #include <net/netns/generic.h> | 32 | #include <net/netns/generic.h> |
32 | 33 | ||
@@ -267,7 +268,7 @@ recent_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
267 | for (i = 0; i < e->nstamps; i++) { | 268 | for (i = 0; i < e->nstamps; i++) { |
268 | if (info->seconds && time_after(time, e->stamps[i])) | 269 | if (info->seconds && time_after(time, e->stamps[i])) |
269 | continue; | 270 | continue; |
270 | if (info->hit_count && ++hits >= info->hit_count) { | 271 | if (!info->hit_count || ++hits >= info->hit_count) { |
271 | ret = !ret; | 272 | ret = !ret; |
272 | break; | 273 | break; |
273 | } | 274 | } |
diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c index d8c0f8f1a78e..937ce0633e99 100644 --- a/net/netfilter/xt_statistic.c +++ b/net/netfilter/xt_statistic.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/spinlock.h> | 12 | #include <linux/spinlock.h> |
13 | #include <linux/skbuff.h> | 13 | #include <linux/skbuff.h> |
14 | #include <linux/net.h> | 14 | #include <linux/net.h> |
15 | #include <linux/slab.h> | ||
15 | 16 | ||
16 | #include <linux/netfilter/xt_statistic.h> | 17 | #include <linux/netfilter/xt_statistic.h> |
17 | #include <linux/netfilter/x_tables.h> | 18 | #include <linux/netfilter/x_tables.h> |
diff --git a/net/netfilter/xt_string.c b/net/netfilter/xt_string.c index b4d774111311..96801ffd8af8 100644 --- a/net/netfilter/xt_string.c +++ b/net/netfilter/xt_string.c | |||
@@ -7,6 +7,7 @@ | |||
7 | * published by the Free Software Foundation. | 7 | * published by the Free Software Foundation. |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/gfp.h> | ||
10 | #include <linux/init.h> | 11 | #include <linux/init.h> |
11 | #include <linux/module.h> | 12 | #include <linux/module.h> |
12 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c index e639298bc9c8..5f14c8462e30 100644 --- a/net/netlabel/netlabel_cipso_v4.c +++ b/net/netlabel/netlabel_cipso_v4.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/string.h> | 33 | #include <linux/string.h> |
34 | #include <linux/skbuff.h> | 34 | #include <linux/skbuff.h> |
35 | #include <linux/audit.h> | 35 | #include <linux/audit.h> |
36 | #include <linux/slab.h> | ||
36 | #include <net/sock.h> | 37 | #include <net/sock.h> |
37 | #include <net/netlink.h> | 38 | #include <net/netlink.h> |
38 | #include <net/genetlink.h> | 39 | #include <net/genetlink.h> |
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c index 0bfeaab88ef5..d37b7f80fa37 100644 --- a/net/netlabel/netlabel_domainhash.c +++ b/net/netlabel/netlabel_domainhash.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/spinlock.h> | 35 | #include <linux/spinlock.h> |
36 | #include <linux/string.h> | 36 | #include <linux/string.h> |
37 | #include <linux/audit.h> | 37 | #include <linux/audit.h> |
38 | #include <linux/slab.h> | ||
38 | #include <net/netlabel.h> | 39 | #include <net/netlabel.h> |
39 | #include <net/cipso_ipv4.h> | 40 | #include <net/cipso_ipv4.h> |
40 | #include <asm/bug.h> | 41 | #include <asm/bug.h> |
@@ -50,9 +51,12 @@ struct netlbl_domhsh_tbl { | |||
50 | }; | 51 | }; |
51 | 52 | ||
52 | /* Domain hash table */ | 53 | /* Domain hash table */ |
53 | /* XXX - updates should be so rare that having one spinlock for the entire | 54 | /* updates should be so rare that having one spinlock for the entire hash table |
54 | * hash table should be okay */ | 55 | * should be okay */ |
55 | static DEFINE_SPINLOCK(netlbl_domhsh_lock); | 56 | static DEFINE_SPINLOCK(netlbl_domhsh_lock); |
57 | #define netlbl_domhsh_rcu_deref(p) \ | ||
58 | rcu_dereference_check(p, rcu_read_lock_held() || \ | ||
59 | lockdep_is_held(&netlbl_domhsh_lock)) | ||
56 | static struct netlbl_domhsh_tbl *netlbl_domhsh = NULL; | 60 | static struct netlbl_domhsh_tbl *netlbl_domhsh = NULL; |
57 | static struct netlbl_dom_map *netlbl_domhsh_def = NULL; | 61 | static struct netlbl_dom_map *netlbl_domhsh_def = NULL; |
58 | 62 | ||
@@ -106,7 +110,8 @@ static void netlbl_domhsh_free_entry(struct rcu_head *entry) | |||
106 | * Description: | 110 | * Description: |
107 | * This is the hashing function for the domain hash table, it returns the | 111 | * This is the hashing function for the domain hash table, it returns the |
108 | * correct bucket number for the domain. The caller is responsibile for | 112 | * correct bucket number for the domain. The caller is responsibile for |
109 | * calling the rcu_read_[un]lock() functions. | 113 | * ensuring that the hash table is protected with either a RCU read lock or the |
114 | * hash table lock. | ||
110 | * | 115 | * |
111 | */ | 116 | */ |
112 | static u32 netlbl_domhsh_hash(const char *key) | 117 | static u32 netlbl_domhsh_hash(const char *key) |
@@ -120,7 +125,7 @@ static u32 netlbl_domhsh_hash(const char *key) | |||
120 | 125 | ||
121 | for (iter = 0, val = 0, len = strlen(key); iter < len; iter++) | 126 | for (iter = 0, val = 0, len = strlen(key); iter < len; iter++) |
122 | val = (val << 4 | (val >> (8 * sizeof(u32) - 4))) ^ key[iter]; | 127 | val = (val << 4 | (val >> (8 * sizeof(u32) - 4))) ^ key[iter]; |
123 | return val & (rcu_dereference(netlbl_domhsh)->size - 1); | 128 | return val & (netlbl_domhsh_rcu_deref(netlbl_domhsh)->size - 1); |
124 | } | 129 | } |
125 | 130 | ||
126 | /** | 131 | /** |
@@ -130,7 +135,8 @@ static u32 netlbl_domhsh_hash(const char *key) | |||
130 | * Description: | 135 | * Description: |
131 | * Searches the domain hash table and returns a pointer to the hash table | 136 | * Searches the domain hash table and returns a pointer to the hash table |
132 | * entry if found, otherwise NULL is returned. The caller is responsibile for | 137 | * entry if found, otherwise NULL is returned. The caller is responsibile for |
133 | * the rcu hash table locks (i.e. the caller much call rcu_read_[un]lock()). | 138 | * ensuring that the hash table is protected with either a RCU read lock or the |
139 | * hash table lock. | ||
134 | * | 140 | * |
135 | */ | 141 | */ |
136 | static struct netlbl_dom_map *netlbl_domhsh_search(const char *domain) | 142 | static struct netlbl_dom_map *netlbl_domhsh_search(const char *domain) |
@@ -141,7 +147,7 @@ static struct netlbl_dom_map *netlbl_domhsh_search(const char *domain) | |||
141 | 147 | ||
142 | if (domain != NULL) { | 148 | if (domain != NULL) { |
143 | bkt = netlbl_domhsh_hash(domain); | 149 | bkt = netlbl_domhsh_hash(domain); |
144 | bkt_list = &rcu_dereference(netlbl_domhsh)->tbl[bkt]; | 150 | bkt_list = &netlbl_domhsh_rcu_deref(netlbl_domhsh)->tbl[bkt]; |
145 | list_for_each_entry_rcu(iter, bkt_list, list) | 151 | list_for_each_entry_rcu(iter, bkt_list, list) |
146 | if (iter->valid && strcmp(iter->domain, domain) == 0) | 152 | if (iter->valid && strcmp(iter->domain, domain) == 0) |
147 | return iter; | 153 | return iter; |
@@ -159,8 +165,8 @@ static struct netlbl_dom_map *netlbl_domhsh_search(const char *domain) | |||
159 | * Searches the domain hash table and returns a pointer to the hash table | 165 | * Searches the domain hash table and returns a pointer to the hash table |
160 | * entry if an exact match is found, if an exact match is not present in the | 166 | * entry if an exact match is found, if an exact match is not present in the |
161 | * hash table then the default entry is returned if valid otherwise NULL is | 167 | * hash table then the default entry is returned if valid otherwise NULL is |
162 | * returned. The caller is responsibile for the rcu hash table locks | 168 | * returned. The caller is responsibile ensuring that the hash table is |
163 | * (i.e. the caller much call rcu_read_[un]lock()). | 169 | * protected with either a RCU read lock or the hash table lock. |
164 | * | 170 | * |
165 | */ | 171 | */ |
166 | static struct netlbl_dom_map *netlbl_domhsh_search_def(const char *domain) | 172 | static struct netlbl_dom_map *netlbl_domhsh_search_def(const char *domain) |
@@ -169,7 +175,7 @@ static struct netlbl_dom_map *netlbl_domhsh_search_def(const char *domain) | |||
169 | 175 | ||
170 | entry = netlbl_domhsh_search(domain); | 176 | entry = netlbl_domhsh_search(domain); |
171 | if (entry == NULL) { | 177 | if (entry == NULL) { |
172 | entry = rcu_dereference(netlbl_domhsh_def); | 178 | entry = netlbl_domhsh_rcu_deref(netlbl_domhsh_def); |
173 | if (entry != NULL && !entry->valid) | 179 | if (entry != NULL && !entry->valid) |
174 | entry = NULL; | 180 | entry = NULL; |
175 | } | 181 | } |
@@ -306,8 +312,11 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry, | |||
306 | struct netlbl_af6list *tmp6; | 312 | struct netlbl_af6list *tmp6; |
307 | #endif /* IPv6 */ | 313 | #endif /* IPv6 */ |
308 | 314 | ||
315 | /* XXX - we can remove this RCU read lock as the spinlock protects the | ||
316 | * entire function, but before we do we need to fixup the | ||
317 | * netlbl_af[4,6]list RCU functions to do "the right thing" with | ||
318 | * respect to rcu_dereference() when only a spinlock is held. */ | ||
309 | rcu_read_lock(); | 319 | rcu_read_lock(); |
310 | |||
311 | spin_lock(&netlbl_domhsh_lock); | 320 | spin_lock(&netlbl_domhsh_lock); |
312 | if (entry->domain != NULL) | 321 | if (entry->domain != NULL) |
313 | entry_old = netlbl_domhsh_search(entry->domain); | 322 | entry_old = netlbl_domhsh_search(entry->domain); |
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c index 6ce00205f342..1b83e0009d8d 100644 --- a/net/netlabel/netlabel_kapi.c +++ b/net/netlabel/netlabel_kapi.c | |||
@@ -30,6 +30,7 @@ | |||
30 | 30 | ||
31 | #include <linux/init.h> | 31 | #include <linux/init.h> |
32 | #include <linux/types.h> | 32 | #include <linux/types.h> |
33 | #include <linux/slab.h> | ||
33 | #include <linux/audit.h> | 34 | #include <linux/audit.h> |
34 | #include <linux/in.h> | 35 | #include <linux/in.h> |
35 | #include <linux/in6.h> | 36 | #include <linux/in6.h> |
diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c index 8203623e65ad..998e85e895d0 100644 --- a/net/netlabel/netlabel_mgmt.c +++ b/net/netlabel/netlabel_mgmt.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/skbuff.h> | 34 | #include <linux/skbuff.h> |
35 | #include <linux/in.h> | 35 | #include <linux/in.h> |
36 | #include <linux/in6.h> | 36 | #include <linux/in6.h> |
37 | #include <linux/slab.h> | ||
37 | #include <net/sock.h> | 38 | #include <net/sock.h> |
38 | #include <net/netlink.h> | 39 | #include <net/netlink.h> |
39 | #include <net/genetlink.h> | 40 | #include <net/genetlink.h> |
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c index 852d9d7976b9..a3d64aabe2f7 100644 --- a/net/netlabel/netlabel_unlabeled.c +++ b/net/netlabel/netlabel_unlabeled.c | |||
@@ -43,6 +43,7 @@ | |||
43 | #include <linux/notifier.h> | 43 | #include <linux/notifier.h> |
44 | #include <linux/netdevice.h> | 44 | #include <linux/netdevice.h> |
45 | #include <linux/security.h> | 45 | #include <linux/security.h> |
46 | #include <linux/slab.h> | ||
46 | #include <net/sock.h> | 47 | #include <net/sock.h> |
47 | #include <net/netlink.h> | 48 | #include <net/netlink.h> |
48 | #include <net/genetlink.h> | 49 | #include <net/genetlink.h> |
@@ -114,6 +115,9 @@ struct netlbl_unlhsh_walk_arg { | |||
114 | /* updates should be so rare that having one spinlock for the entire | 115 | /* updates should be so rare that having one spinlock for the entire |
115 | * hash table should be okay */ | 116 | * hash table should be okay */ |
116 | static DEFINE_SPINLOCK(netlbl_unlhsh_lock); | 117 | static DEFINE_SPINLOCK(netlbl_unlhsh_lock); |
118 | #define netlbl_unlhsh_rcu_deref(p) \ | ||
119 | rcu_dereference_check(p, rcu_read_lock_held() || \ | ||
120 | lockdep_is_held(&netlbl_unlhsh_lock)) | ||
117 | static struct netlbl_unlhsh_tbl *netlbl_unlhsh = NULL; | 121 | static struct netlbl_unlhsh_tbl *netlbl_unlhsh = NULL; |
118 | static struct netlbl_unlhsh_iface *netlbl_unlhsh_def = NULL; | 122 | static struct netlbl_unlhsh_iface *netlbl_unlhsh_def = NULL; |
119 | 123 | ||
@@ -235,15 +239,13 @@ static void netlbl_unlhsh_free_iface(struct rcu_head *entry) | |||
235 | * Description: | 239 | * Description: |
236 | * This is the hashing function for the unlabeled hash table, it returns the | 240 | * This is the hashing function for the unlabeled hash table, it returns the |
237 | * bucket number for the given device/interface. The caller is responsible for | 241 | * bucket number for the given device/interface. The caller is responsible for |
238 | * calling the rcu_read_[un]lock() functions. | 242 | * ensuring that the hash table is protected with either a RCU read lock or |
243 | * the hash table lock. | ||
239 | * | 244 | * |
240 | */ | 245 | */ |
241 | static u32 netlbl_unlhsh_hash(int ifindex) | 246 | static u32 netlbl_unlhsh_hash(int ifindex) |
242 | { | 247 | { |
243 | /* this is taken _almost_ directly from | 248 | return ifindex & (netlbl_unlhsh_rcu_deref(netlbl_unlhsh)->size - 1); |
244 | * security/selinux/netif.c:sel_netif_hasfn() as they do pretty much | ||
245 | * the same thing */ | ||
246 | return ifindex & (rcu_dereference(netlbl_unlhsh)->size - 1); | ||
247 | } | 249 | } |
248 | 250 | ||
249 | /** | 251 | /** |
@@ -253,7 +255,8 @@ static u32 netlbl_unlhsh_hash(int ifindex) | |||
253 | * Description: | 255 | * Description: |
254 | * Searches the unlabeled connection hash table and returns a pointer to the | 256 | * Searches the unlabeled connection hash table and returns a pointer to the |
255 | * interface entry which matches @ifindex, otherwise NULL is returned. The | 257 | * interface entry which matches @ifindex, otherwise NULL is returned. The |
256 | * caller is responsible for calling the rcu_read_[un]lock() functions. | 258 | * caller is responsible for ensuring that the hash table is protected with |
259 | * either a RCU read lock or the hash table lock. | ||
257 | * | 260 | * |
258 | */ | 261 | */ |
259 | static struct netlbl_unlhsh_iface *netlbl_unlhsh_search_iface(int ifindex) | 262 | static struct netlbl_unlhsh_iface *netlbl_unlhsh_search_iface(int ifindex) |
@@ -263,7 +266,7 @@ static struct netlbl_unlhsh_iface *netlbl_unlhsh_search_iface(int ifindex) | |||
263 | struct netlbl_unlhsh_iface *iter; | 266 | struct netlbl_unlhsh_iface *iter; |
264 | 267 | ||
265 | bkt = netlbl_unlhsh_hash(ifindex); | 268 | bkt = netlbl_unlhsh_hash(ifindex); |
266 | bkt_list = &rcu_dereference(netlbl_unlhsh)->tbl[bkt]; | 269 | bkt_list = &netlbl_unlhsh_rcu_deref(netlbl_unlhsh)->tbl[bkt]; |
267 | list_for_each_entry_rcu(iter, bkt_list, list) | 270 | list_for_each_entry_rcu(iter, bkt_list, list) |
268 | if (iter->valid && iter->ifindex == ifindex) | 271 | if (iter->valid && iter->ifindex == ifindex) |
269 | return iter; | 272 | return iter; |
@@ -272,33 +275,6 @@ static struct netlbl_unlhsh_iface *netlbl_unlhsh_search_iface(int ifindex) | |||
272 | } | 275 | } |
273 | 276 | ||
274 | /** | 277 | /** |
275 | * netlbl_unlhsh_search_iface_def - Search for a matching interface entry | ||
276 | * @ifindex: the network interface | ||
277 | * | ||
278 | * Description: | ||
279 | * Searches the unlabeled connection hash table and returns a pointer to the | ||
280 | * interface entry which matches @ifindex. If an exact match can not be found | ||
281 | * and there is a valid default entry, the default entry is returned, otherwise | ||
282 | * NULL is returned. The caller is responsible for calling the | ||
283 | * rcu_read_[un]lock() functions. | ||
284 | * | ||
285 | */ | ||
286 | static struct netlbl_unlhsh_iface *netlbl_unlhsh_search_iface_def(int ifindex) | ||
287 | { | ||
288 | struct netlbl_unlhsh_iface *entry; | ||
289 | |||
290 | entry = netlbl_unlhsh_search_iface(ifindex); | ||
291 | if (entry != NULL) | ||
292 | return entry; | ||
293 | |||
294 | entry = rcu_dereference(netlbl_unlhsh_def); | ||
295 | if (entry != NULL && entry->valid) | ||
296 | return entry; | ||
297 | |||
298 | return NULL; | ||
299 | } | ||
300 | |||
301 | /** | ||
302 | * netlbl_unlhsh_add_addr4 - Add a new IPv4 address entry to the hash table | 278 | * netlbl_unlhsh_add_addr4 - Add a new IPv4 address entry to the hash table |
303 | * @iface: the associated interface entry | 279 | * @iface: the associated interface entry |
304 | * @addr: IPv4 address in network byte order | 280 | * @addr: IPv4 address in network byte order |
@@ -308,8 +284,7 @@ static struct netlbl_unlhsh_iface *netlbl_unlhsh_search_iface_def(int ifindex) | |||
308 | * Description: | 284 | * Description: |
309 | * Add a new address entry into the unlabeled connection hash table using the | 285 | * Add a new address entry into the unlabeled connection hash table using the |
310 | * interface entry specified by @iface. On success zero is returned, otherwise | 286 | * interface entry specified by @iface. On success zero is returned, otherwise |
311 | * a negative value is returned. The caller is responsible for calling the | 287 | * a negative value is returned. |
312 | * rcu_read_[un]lock() functions. | ||
313 | * | 288 | * |
314 | */ | 289 | */ |
315 | static int netlbl_unlhsh_add_addr4(struct netlbl_unlhsh_iface *iface, | 290 | static int netlbl_unlhsh_add_addr4(struct netlbl_unlhsh_iface *iface, |
@@ -349,8 +324,7 @@ static int netlbl_unlhsh_add_addr4(struct netlbl_unlhsh_iface *iface, | |||
349 | * Description: | 324 | * Description: |
350 | * Add a new address entry into the unlabeled connection hash table using the | 325 | * Add a new address entry into the unlabeled connection hash table using the |
351 | * interface entry specified by @iface. On success zero is returned, otherwise | 326 | * interface entry specified by @iface. On success zero is returned, otherwise |
352 | * a negative value is returned. The caller is responsible for calling the | 327 | * a negative value is returned. |
353 | * rcu_read_[un]lock() functions. | ||
354 | * | 328 | * |
355 | */ | 329 | */ |
356 | static int netlbl_unlhsh_add_addr6(struct netlbl_unlhsh_iface *iface, | 330 | static int netlbl_unlhsh_add_addr6(struct netlbl_unlhsh_iface *iface, |
@@ -391,8 +365,7 @@ static int netlbl_unlhsh_add_addr6(struct netlbl_unlhsh_iface *iface, | |||
391 | * Description: | 365 | * Description: |
392 | * Add a new, empty, interface entry into the unlabeled connection hash table. | 366 | * Add a new, empty, interface entry into the unlabeled connection hash table. |
393 | * On success a pointer to the new interface entry is returned, on failure NULL | 367 | * On success a pointer to the new interface entry is returned, on failure NULL |
394 | * is returned. The caller is responsible for calling the rcu_read_[un]lock() | 368 | * is returned. |
395 | * functions. | ||
396 | * | 369 | * |
397 | */ | 370 | */ |
398 | static struct netlbl_unlhsh_iface *netlbl_unlhsh_add_iface(int ifindex) | 371 | static struct netlbl_unlhsh_iface *netlbl_unlhsh_add_iface(int ifindex) |
@@ -415,10 +388,10 @@ static struct netlbl_unlhsh_iface *netlbl_unlhsh_add_iface(int ifindex) | |||
415 | if (netlbl_unlhsh_search_iface(ifindex) != NULL) | 388 | if (netlbl_unlhsh_search_iface(ifindex) != NULL) |
416 | goto add_iface_failure; | 389 | goto add_iface_failure; |
417 | list_add_tail_rcu(&iface->list, | 390 | list_add_tail_rcu(&iface->list, |
418 | &rcu_dereference(netlbl_unlhsh)->tbl[bkt]); | 391 | &netlbl_unlhsh_rcu_deref(netlbl_unlhsh)->tbl[bkt]); |
419 | } else { | 392 | } else { |
420 | INIT_LIST_HEAD(&iface->list); | 393 | INIT_LIST_HEAD(&iface->list); |
421 | if (rcu_dereference(netlbl_unlhsh_def) != NULL) | 394 | if (netlbl_unlhsh_rcu_deref(netlbl_unlhsh_def) != NULL) |
422 | goto add_iface_failure; | 395 | goto add_iface_failure; |
423 | rcu_assign_pointer(netlbl_unlhsh_def, iface); | 396 | rcu_assign_pointer(netlbl_unlhsh_def, iface); |
424 | } | 397 | } |
@@ -548,8 +521,7 @@ unlhsh_add_return: | |||
548 | * | 521 | * |
549 | * Description: | 522 | * Description: |
550 | * Remove an IP address entry from the unlabeled connection hash table. | 523 | * Remove an IP address entry from the unlabeled connection hash table. |
551 | * Returns zero on success, negative values on failure. The caller is | 524 | * Returns zero on success, negative values on failure. |
552 | * responsible for calling the rcu_read_[un]lock() functions. | ||
553 | * | 525 | * |
554 | */ | 526 | */ |
555 | static int netlbl_unlhsh_remove_addr4(struct net *net, | 527 | static int netlbl_unlhsh_remove_addr4(struct net *net, |
@@ -611,8 +583,7 @@ static int netlbl_unlhsh_remove_addr4(struct net *net, | |||
611 | * | 583 | * |
612 | * Description: | 584 | * Description: |
613 | * Remove an IP address entry from the unlabeled connection hash table. | 585 | * Remove an IP address entry from the unlabeled connection hash table. |
614 | * Returns zero on success, negative values on failure. The caller is | 586 | * Returns zero on success, negative values on failure. |
615 | * responsible for calling the rcu_read_[un]lock() functions. | ||
616 | * | 587 | * |
617 | */ | 588 | */ |
618 | static int netlbl_unlhsh_remove_addr6(struct net *net, | 589 | static int netlbl_unlhsh_remove_addr6(struct net *net, |
@@ -1547,8 +1518,10 @@ int netlbl_unlabel_getattr(const struct sk_buff *skb, | |||
1547 | struct netlbl_unlhsh_iface *iface; | 1518 | struct netlbl_unlhsh_iface *iface; |
1548 | 1519 | ||
1549 | rcu_read_lock(); | 1520 | rcu_read_lock(); |
1550 | iface = netlbl_unlhsh_search_iface_def(skb->skb_iif); | 1521 | iface = netlbl_unlhsh_search_iface(skb->skb_iif); |
1551 | if (iface == NULL) | 1522 | if (iface == NULL) |
1523 | iface = rcu_dereference(netlbl_unlhsh_def); | ||
1524 | if (iface == NULL || !iface->valid) | ||
1552 | goto unlabel_getattr_nolabel; | 1525 | goto unlabel_getattr_nolabel; |
1553 | switch (family) { | 1526 | switch (family) { |
1554 | case PF_INET: { | 1527 | case PF_INET: { |
diff --git a/net/netlabel/netlabel_user.c b/net/netlabel/netlabel_user.c index 68706b4e3bf8..a3fd75ac3fa5 100644 --- a/net/netlabel/netlabel_user.c +++ b/net/netlabel/netlabel_user.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/audit.h> | 35 | #include <linux/audit.h> |
36 | #include <linux/tty.h> | 36 | #include <linux/tty.h> |
37 | #include <linux/security.h> | 37 | #include <linux/security.h> |
38 | #include <linux/gfp.h> | ||
38 | #include <net/sock.h> | 39 | #include <net/sock.h> |
39 | #include <net/netlink.h> | 40 | #include <net/netlink.h> |
40 | #include <net/genetlink.h> | 41 | #include <net/genetlink.h> |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 320d0423a240..6464a1972a69 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -545,7 +545,7 @@ static int netlink_autobind(struct socket *sock) | |||
545 | struct hlist_head *head; | 545 | struct hlist_head *head; |
546 | struct sock *osk; | 546 | struct sock *osk; |
547 | struct hlist_node *node; | 547 | struct hlist_node *node; |
548 | s32 pid = current->tgid; | 548 | s32 pid = task_tgid_vnr(current); |
549 | int err; | 549 | int err; |
550 | static s32 rover = -4097; | 550 | static s32 rover = -4097; |
551 | 551 | ||
@@ -683,6 +683,9 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr, | |||
683 | struct netlink_sock *nlk = nlk_sk(sk); | 683 | struct netlink_sock *nlk = nlk_sk(sk); |
684 | struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; | 684 | struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; |
685 | 685 | ||
686 | if (alen < sizeof(addr->sa_family)) | ||
687 | return -EINVAL; | ||
688 | |||
686 | if (addr->sa_family == AF_UNSPEC) { | 689 | if (addr->sa_family == AF_UNSPEC) { |
687 | sk->sk_state = NETLINK_UNCONNECTED; | 690 | sk->sk_state = NETLINK_UNCONNECTED; |
688 | nlk->dst_pid = 0; | 691 | nlk->dst_pid = 0; |
@@ -1093,6 +1096,7 @@ static inline int do_one_set_err(struct sock *sk, | |||
1093 | struct netlink_set_err_data *p) | 1096 | struct netlink_set_err_data *p) |
1094 | { | 1097 | { |
1095 | struct netlink_sock *nlk = nlk_sk(sk); | 1098 | struct netlink_sock *nlk = nlk_sk(sk); |
1099 | int ret = 0; | ||
1096 | 1100 | ||
1097 | if (sk == p->exclude_sk) | 1101 | if (sk == p->exclude_sk) |
1098 | goto out; | 1102 | goto out; |
@@ -1104,10 +1108,15 @@ static inline int do_one_set_err(struct sock *sk, | |||
1104 | !test_bit(p->group - 1, nlk->groups)) | 1108 | !test_bit(p->group - 1, nlk->groups)) |
1105 | goto out; | 1109 | goto out; |
1106 | 1110 | ||
1111 | if (p->code == ENOBUFS && nlk->flags & NETLINK_RECV_NO_ENOBUFS) { | ||
1112 | ret = 1; | ||
1113 | goto out; | ||
1114 | } | ||
1115 | |||
1107 | sk->sk_err = p->code; | 1116 | sk->sk_err = p->code; |
1108 | sk->sk_error_report(sk); | 1117 | sk->sk_error_report(sk); |
1109 | out: | 1118 | out: |
1110 | return 0; | 1119 | return ret; |
1111 | } | 1120 | } |
1112 | 1121 | ||
1113 | /** | 1122 | /** |
@@ -1116,12 +1125,16 @@ out: | |||
1116 | * @pid: the PID of a process that we want to skip (if any) | 1125 | * @pid: the PID of a process that we want to skip (if any) |
1117 | * @groups: the broadcast group that will notice the error | 1126 | * @groups: the broadcast group that will notice the error |
1118 | * @code: error code, must be negative (as usual in kernelspace) | 1127 | * @code: error code, must be negative (as usual in kernelspace) |
1128 | * | ||
1129 | * This function returns the number of broadcast listeners that have set the | ||
1130 | * NETLINK_RECV_NO_ENOBUFS socket option. | ||
1119 | */ | 1131 | */ |
1120 | void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code) | 1132 | int netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code) |
1121 | { | 1133 | { |
1122 | struct netlink_set_err_data info; | 1134 | struct netlink_set_err_data info; |
1123 | struct hlist_node *node; | 1135 | struct hlist_node *node; |
1124 | struct sock *sk; | 1136 | struct sock *sk; |
1137 | int ret = 0; | ||
1125 | 1138 | ||
1126 | info.exclude_sk = ssk; | 1139 | info.exclude_sk = ssk; |
1127 | info.pid = pid; | 1140 | info.pid = pid; |
@@ -1132,9 +1145,10 @@ void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code) | |||
1132 | read_lock(&nl_table_lock); | 1145 | read_lock(&nl_table_lock); |
1133 | 1146 | ||
1134 | sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list) | 1147 | sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list) |
1135 | do_one_set_err(sk, &info); | 1148 | ret += do_one_set_err(sk, &info); |
1136 | 1149 | ||
1137 | read_unlock(&nl_table_lock); | 1150 | read_unlock(&nl_table_lock); |
1151 | return ret; | ||
1138 | } | 1152 | } |
1139 | EXPORT_SYMBOL(netlink_set_err); | 1153 | EXPORT_SYMBOL(netlink_set_err); |
1140 | 1154 | ||
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index a4b6e148c5de..aa4308afcc7f 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c | |||
@@ -8,6 +8,7 @@ | |||
8 | 8 | ||
9 | #include <linux/module.h> | 9 | #include <linux/module.h> |
10 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
11 | #include <linux/slab.h> | ||
11 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
12 | #include <linux/types.h> | 13 | #include <linux/types.h> |
13 | #include <linux/socket.h> | 14 | #include <linux/socket.h> |
@@ -20,15 +21,17 @@ | |||
20 | 21 | ||
21 | static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */ | 22 | static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */ |
22 | 23 | ||
23 | static inline void genl_lock(void) | 24 | void genl_lock(void) |
24 | { | 25 | { |
25 | mutex_lock(&genl_mutex); | 26 | mutex_lock(&genl_mutex); |
26 | } | 27 | } |
28 | EXPORT_SYMBOL(genl_lock); | ||
27 | 29 | ||
28 | static inline void genl_unlock(void) | 30 | void genl_unlock(void) |
29 | { | 31 | { |
30 | mutex_unlock(&genl_mutex); | 32 | mutex_unlock(&genl_mutex); |
31 | } | 33 | } |
34 | EXPORT_SYMBOL(genl_unlock); | ||
32 | 35 | ||
33 | #define GENL_FAM_TAB_SIZE 16 | 36 | #define GENL_FAM_TAB_SIZE 16 |
34 | #define GENL_FAM_TAB_MASK (GENL_FAM_TAB_SIZE - 1) | 37 | #define GENL_FAM_TAB_MASK (GENL_FAM_TAB_SIZE - 1) |
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index a249127020a5..06cb02796a0e 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/types.h> | 15 | #include <linux/types.h> |
16 | #include <linux/socket.h> | 16 | #include <linux/socket.h> |
17 | #include <linux/in.h> | 17 | #include <linux/in.h> |
18 | #include <linux/slab.h> | ||
18 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
19 | #include <linux/sched.h> | 20 | #include <linux/sched.h> |
20 | #include <linux/timer.h> | 21 | #include <linux/timer.h> |
@@ -738,7 +739,7 @@ static int nr_connect(struct socket *sock, struct sockaddr *uaddr, | |||
738 | DEFINE_WAIT(wait); | 739 | DEFINE_WAIT(wait); |
739 | 740 | ||
740 | for (;;) { | 741 | for (;;) { |
741 | prepare_to_wait(sk->sk_sleep, &wait, | 742 | prepare_to_wait(sk_sleep(sk), &wait, |
742 | TASK_INTERRUPTIBLE); | 743 | TASK_INTERRUPTIBLE); |
743 | if (sk->sk_state != TCP_SYN_SENT) | 744 | if (sk->sk_state != TCP_SYN_SENT) |
744 | break; | 745 | break; |
@@ -751,7 +752,7 @@ static int nr_connect(struct socket *sock, struct sockaddr *uaddr, | |||
751 | err = -ERESTARTSYS; | 752 | err = -ERESTARTSYS; |
752 | break; | 753 | break; |
753 | } | 754 | } |
754 | finish_wait(sk->sk_sleep, &wait); | 755 | finish_wait(sk_sleep(sk), &wait); |
755 | if (err) | 756 | if (err) |
756 | goto out_release; | 757 | goto out_release; |
757 | } | 758 | } |
@@ -797,7 +798,7 @@ static int nr_accept(struct socket *sock, struct socket *newsock, int flags) | |||
797 | * hooked into the SABM we saved | 798 | * hooked into the SABM we saved |
798 | */ | 799 | */ |
799 | for (;;) { | 800 | for (;;) { |
800 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 801 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
801 | skb = skb_dequeue(&sk->sk_receive_queue); | 802 | skb = skb_dequeue(&sk->sk_receive_queue); |
802 | if (skb) | 803 | if (skb) |
803 | break; | 804 | break; |
@@ -815,7 +816,7 @@ static int nr_accept(struct socket *sock, struct socket *newsock, int flags) | |||
815 | err = -ERESTARTSYS; | 816 | err = -ERESTARTSYS; |
816 | break; | 817 | break; |
817 | } | 818 | } |
818 | finish_wait(sk->sk_sleep, &wait); | 819 | finish_wait(sk_sleep(sk), &wait); |
819 | if (err) | 820 | if (err) |
820 | goto out_release; | 821 | goto out_release; |
821 | 822 | ||
diff --git a/net/netrom/nr_dev.c b/net/netrom/nr_dev.c index 7aa11b01b2e2..64e6dde9749d 100644 --- a/net/netrom/nr_dev.c +++ b/net/netrom/nr_dev.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/fcntl.h> | 19 | #include <linux/fcntl.h> |
20 | #include <linux/in.h> | 20 | #include <linux/in.h> |
21 | #include <linux/if_ether.h> /* For the statistics structure. */ | 21 | #include <linux/if_ether.h> /* For the statistics structure. */ |
22 | #include <linux/slab.h> | ||
22 | 23 | ||
23 | #include <asm/system.h> | 24 | #include <asm/system.h> |
24 | #include <asm/uaccess.h> | 25 | #include <asm/uaccess.h> |
diff --git a/net/netrom/nr_in.c b/net/netrom/nr_in.c index 68176483617f..6d4ef6d65b3d 100644 --- a/net/netrom/nr_in.c +++ b/net/netrom/nr_in.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/string.h> | 16 | #include <linux/string.h> |
17 | #include <linux/sockios.h> | 17 | #include <linux/sockios.h> |
18 | #include <linux/net.h> | 18 | #include <linux/net.h> |
19 | #include <linux/slab.h> | ||
19 | #include <net/ax25.h> | 20 | #include <net/ax25.h> |
20 | #include <linux/inet.h> | 21 | #include <linux/inet.h> |
21 | #include <linux/netdevice.h> | 22 | #include <linux/netdevice.h> |
diff --git a/net/netrom/nr_loopback.c b/net/netrom/nr_loopback.c index f324d5df4186..94d4e922af53 100644 --- a/net/netrom/nr_loopback.c +++ b/net/netrom/nr_loopback.c | |||
@@ -7,6 +7,7 @@ | |||
7 | * Copyright Tomi Manninen OH2BNS (oh2bns@sral.fi) | 7 | * Copyright Tomi Manninen OH2BNS (oh2bns@sral.fi) |
8 | */ | 8 | */ |
9 | #include <linux/types.h> | 9 | #include <linux/types.h> |
10 | #include <linux/slab.h> | ||
10 | #include <linux/socket.h> | 11 | #include <linux/socket.h> |
11 | #include <linux/timer.h> | 12 | #include <linux/timer.h> |
12 | #include <net/ax25.h> | 13 | #include <net/ax25.h> |
diff --git a/net/netrom/nr_out.c b/net/netrom/nr_out.c index e3e6c44e1890..607fddb4fdbb 100644 --- a/net/netrom/nr_out.c +++ b/net/netrom/nr_out.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/string.h> | 16 | #include <linux/string.h> |
17 | #include <linux/sockios.h> | 17 | #include <linux/sockios.h> |
18 | #include <linux/net.h> | 18 | #include <linux/net.h> |
19 | #include <linux/slab.h> | ||
19 | #include <net/ax25.h> | 20 | #include <net/ax25.h> |
20 | #include <linux/inet.h> | 21 | #include <linux/inet.h> |
21 | #include <linux/netdevice.h> | 22 | #include <linux/netdevice.h> |
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c index 5cc648012f50..44059d0c8dd1 100644 --- a/net/netrom/nr_route.c +++ b/net/netrom/nr_route.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/string.h> | 17 | #include <linux/string.h> |
18 | #include <linux/sockios.h> | 18 | #include <linux/sockios.h> |
19 | #include <linux/net.h> | 19 | #include <linux/net.h> |
20 | #include <linux/slab.h> | ||
20 | #include <net/ax25.h> | 21 | #include <net/ax25.h> |
21 | #include <linux/inet.h> | 22 | #include <linux/inet.h> |
22 | #include <linux/netdevice.h> | 23 | #include <linux/netdevice.h> |
diff --git a/net/netrom/nr_subr.c b/net/netrom/nr_subr.c index 04e7d0d2fd8f..6a947ae50dbd 100644 --- a/net/netrom/nr_subr.c +++ b/net/netrom/nr_subr.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/string.h> | 15 | #include <linux/string.h> |
16 | #include <linux/sockios.h> | 16 | #include <linux/sockios.h> |
17 | #include <linux/net.h> | 17 | #include <linux/net.h> |
18 | #include <linux/slab.h> | ||
18 | #include <net/ax25.h> | 19 | #include <net/ax25.h> |
19 | #include <linux/inet.h> | 20 | #include <linux/inet.h> |
20 | #include <linux/netdevice.h> | 21 | #include <linux/netdevice.h> |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 1612d417d10c..2078a277e06b 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -60,6 +60,7 @@ | |||
60 | #include <linux/wireless.h> | 60 | #include <linux/wireless.h> |
61 | #include <linux/kernel.h> | 61 | #include <linux/kernel.h> |
62 | #include <linux/kmod.h> | 62 | #include <linux/kmod.h> |
63 | #include <linux/slab.h> | ||
63 | #include <net/net_namespace.h> | 64 | #include <net/net_namespace.h> |
64 | #include <net/ip.h> | 65 | #include <net/ip.h> |
65 | #include <net/protocol.h> | 66 | #include <net/protocol.h> |
@@ -81,6 +82,7 @@ | |||
81 | #include <linux/mutex.h> | 82 | #include <linux/mutex.h> |
82 | #include <linux/if_vlan.h> | 83 | #include <linux/if_vlan.h> |
83 | #include <linux/virtio_net.h> | 84 | #include <linux/virtio_net.h> |
85 | #include <linux/errqueue.h> | ||
84 | 86 | ||
85 | #ifdef CONFIG_INET | 87 | #ifdef CONFIG_INET |
86 | #include <net/inet_common.h> | 88 | #include <net/inet_common.h> |
@@ -314,6 +316,8 @@ static inline struct packet_sock *pkt_sk(struct sock *sk) | |||
314 | 316 | ||
315 | static void packet_sock_destruct(struct sock *sk) | 317 | static void packet_sock_destruct(struct sock *sk) |
316 | { | 318 | { |
319 | skb_queue_purge(&sk->sk_error_queue); | ||
320 | |||
317 | WARN_ON(atomic_read(&sk->sk_rmem_alloc)); | 321 | WARN_ON(atomic_read(&sk->sk_rmem_alloc)); |
318 | WARN_ON(atomic_read(&sk->sk_wmem_alloc)); | 322 | WARN_ON(atomic_read(&sk->sk_wmem_alloc)); |
319 | 323 | ||
@@ -482,6 +486,9 @@ retry: | |||
482 | skb->dev = dev; | 486 | skb->dev = dev; |
483 | skb->priority = sk->sk_priority; | 487 | skb->priority = sk->sk_priority; |
484 | skb->mark = sk->sk_mark; | 488 | skb->mark = sk->sk_mark; |
489 | err = sock_tx_timestamp(msg, sk, skb_tx(skb)); | ||
490 | if (err < 0) | ||
491 | goto out_unlock; | ||
485 | 492 | ||
486 | dev_queue_xmit(skb); | 493 | dev_queue_xmit(skb); |
487 | rcu_read_unlock(); | 494 | rcu_read_unlock(); |
@@ -1187,6 +1194,9 @@ static int packet_snd(struct socket *sock, | |||
1187 | err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len); | 1194 | err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len); |
1188 | if (err) | 1195 | if (err) |
1189 | goto out_free; | 1196 | goto out_free; |
1197 | err = sock_tx_timestamp(msg, sk, skb_tx(skb)); | ||
1198 | if (err < 0) | ||
1199 | goto out_free; | ||
1190 | 1200 | ||
1191 | skb->protocol = proto; | 1201 | skb->protocol = proto; |
1192 | skb->dev = dev; | 1202 | skb->dev = dev; |
@@ -1486,6 +1496,51 @@ out: | |||
1486 | return err; | 1496 | return err; |
1487 | } | 1497 | } |
1488 | 1498 | ||
1499 | static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len) | ||
1500 | { | ||
1501 | struct sock_exterr_skb *serr; | ||
1502 | struct sk_buff *skb, *skb2; | ||
1503 | int copied, err; | ||
1504 | |||
1505 | err = -EAGAIN; | ||
1506 | skb = skb_dequeue(&sk->sk_error_queue); | ||
1507 | if (skb == NULL) | ||
1508 | goto out; | ||
1509 | |||
1510 | copied = skb->len; | ||
1511 | if (copied > len) { | ||
1512 | msg->msg_flags |= MSG_TRUNC; | ||
1513 | copied = len; | ||
1514 | } | ||
1515 | err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); | ||
1516 | if (err) | ||
1517 | goto out_free_skb; | ||
1518 | |||
1519 | sock_recv_timestamp(msg, sk, skb); | ||
1520 | |||
1521 | serr = SKB_EXT_ERR(skb); | ||
1522 | put_cmsg(msg, SOL_PACKET, PACKET_TX_TIMESTAMP, | ||
1523 | sizeof(serr->ee), &serr->ee); | ||
1524 | |||
1525 | msg->msg_flags |= MSG_ERRQUEUE; | ||
1526 | err = copied; | ||
1527 | |||
1528 | /* Reset and regenerate socket error */ | ||
1529 | spin_lock_bh(&sk->sk_error_queue.lock); | ||
1530 | sk->sk_err = 0; | ||
1531 | if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) { | ||
1532 | sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno; | ||
1533 | spin_unlock_bh(&sk->sk_error_queue.lock); | ||
1534 | sk->sk_error_report(sk); | ||
1535 | } else | ||
1536 | spin_unlock_bh(&sk->sk_error_queue.lock); | ||
1537 | |||
1538 | out_free_skb: | ||
1539 | kfree_skb(skb); | ||
1540 | out: | ||
1541 | return err; | ||
1542 | } | ||
1543 | |||
1489 | /* | 1544 | /* |
1490 | * Pull a packet from our receive queue and hand it to the user. | 1545 | * Pull a packet from our receive queue and hand it to the user. |
1491 | * If necessary we block. | 1546 | * If necessary we block. |
@@ -1501,7 +1556,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1501 | int vnet_hdr_len = 0; | 1556 | int vnet_hdr_len = 0; |
1502 | 1557 | ||
1503 | err = -EINVAL; | 1558 | err = -EINVAL; |
1504 | if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT)) | 1559 | if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE)) |
1505 | goto out; | 1560 | goto out; |
1506 | 1561 | ||
1507 | #if 0 | 1562 | #if 0 |
@@ -1510,6 +1565,11 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1510 | return -ENODEV; | 1565 | return -ENODEV; |
1511 | #endif | 1566 | #endif |
1512 | 1567 | ||
1568 | if (flags & MSG_ERRQUEUE) { | ||
1569 | err = packet_recv_error(sk, msg, len); | ||
1570 | goto out; | ||
1571 | } | ||
1572 | |||
1513 | /* | 1573 | /* |
1514 | * Call the generic datagram receiver. This handles all sorts | 1574 | * Call the generic datagram receiver. This handles all sorts |
1515 | * of horrible races and re-entrancy so we can forget about it | 1575 | * of horrible races and re-entrancy so we can forget about it |
@@ -1691,9 +1751,9 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i, | |||
1691 | if (i->alen != dev->addr_len) | 1751 | if (i->alen != dev->addr_len) |
1692 | return -EINVAL; | 1752 | return -EINVAL; |
1693 | if (what > 0) | 1753 | if (what > 0) |
1694 | return dev_mc_add(dev, i->addr, i->alen, 0); | 1754 | return dev_mc_add(dev, i->addr); |
1695 | else | 1755 | else |
1696 | return dev_mc_delete(dev, i->addr, i->alen, 0); | 1756 | return dev_mc_del(dev, i->addr); |
1697 | break; | 1757 | break; |
1698 | case PACKET_MR_PROMISC: | 1758 | case PACKET_MR_PROMISC: |
1699 | return dev_set_promiscuity(dev, what); | 1759 | return dev_set_promiscuity(dev, what); |
@@ -1705,9 +1765,9 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i, | |||
1705 | if (i->alen != dev->addr_len) | 1765 | if (i->alen != dev->addr_len) |
1706 | return -EINVAL; | 1766 | return -EINVAL; |
1707 | if (what > 0) | 1767 | if (what > 0) |
1708 | return dev_unicast_add(dev, i->addr); | 1768 | return dev_uc_add(dev, i->addr); |
1709 | else | 1769 | else |
1710 | return dev_unicast_delete(dev, i->addr); | 1770 | return dev_uc_del(dev, i->addr); |
1711 | break; | 1771 | break; |
1712 | default: | 1772 | default: |
1713 | break; | 1773 | break; |
@@ -2168,8 +2228,6 @@ static int packet_ioctl(struct socket *sock, unsigned int cmd, | |||
2168 | case SIOCGIFDSTADDR: | 2228 | case SIOCGIFDSTADDR: |
2169 | case SIOCSIFDSTADDR: | 2229 | case SIOCSIFDSTADDR: |
2170 | case SIOCSIFFLAGS: | 2230 | case SIOCSIFFLAGS: |
2171 | if (!net_eq(sock_net(sk), &init_net)) | ||
2172 | return -ENOIOCTLCMD; | ||
2173 | return inet_dgram_ops.ioctl(sock, cmd, arg); | 2231 | return inet_dgram_ops.ioctl(sock, cmd, arg); |
2174 | #endif | 2232 | #endif |
2175 | 2233 | ||
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c index 526d0273991a..73aee7f2fcdc 100644 --- a/net/phonet/af_phonet.c +++ b/net/phonet/af_phonet.c | |||
@@ -25,6 +25,7 @@ | |||
25 | 25 | ||
26 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
28 | #include <linux/slab.h> | ||
28 | #include <asm/unaligned.h> | 29 | #include <asm/unaligned.h> |
29 | #include <net/sock.h> | 30 | #include <net/sock.h> |
30 | 31 | ||
diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c index 387197b579b1..1bd38db4fe1e 100644 --- a/net/phonet/datagram.c +++ b/net/phonet/datagram.c | |||
@@ -24,6 +24,7 @@ | |||
24 | */ | 24 | */ |
25 | 25 | ||
26 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
27 | #include <linux/slab.h> | ||
27 | #include <linux/socket.h> | 28 | #include <linux/socket.h> |
28 | #include <asm/ioctls.h> | 29 | #include <asm/ioctls.h> |
29 | #include <net/sock.h> | 30 | #include <net/sock.h> |
diff --git a/net/phonet/pep.c b/net/phonet/pep.c index 360cf377693e..af4d38bc3b22 100644 --- a/net/phonet/pep.c +++ b/net/phonet/pep.c | |||
@@ -23,6 +23,7 @@ | |||
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <linux/kernel.h> | 25 | #include <linux/kernel.h> |
26 | #include <linux/slab.h> | ||
26 | #include <linux/socket.h> | 27 | #include <linux/socket.h> |
27 | #include <net/sock.h> | 28 | #include <net/sock.h> |
28 | #include <net/tcp_states.h> | 29 | #include <net/tcp_states.h> |
@@ -663,12 +664,12 @@ static int pep_wait_connreq(struct sock *sk, int noblock) | |||
663 | if (signal_pending(tsk)) | 664 | if (signal_pending(tsk)) |
664 | return sock_intr_errno(timeo); | 665 | return sock_intr_errno(timeo); |
665 | 666 | ||
666 | prepare_to_wait_exclusive(&sk->sk_socket->wait, &wait, | 667 | prepare_to_wait_exclusive(sk_sleep(sk), &wait, |
667 | TASK_INTERRUPTIBLE); | 668 | TASK_INTERRUPTIBLE); |
668 | release_sock(sk); | 669 | release_sock(sk); |
669 | timeo = schedule_timeout(timeo); | 670 | timeo = schedule_timeout(timeo); |
670 | lock_sock(sk); | 671 | lock_sock(sk); |
671 | finish_wait(&sk->sk_socket->wait, &wait); | 672 | finish_wait(sk_sleep(sk), &wait); |
672 | } | 673 | } |
673 | 674 | ||
674 | return 0; | 675 | return 0; |
@@ -909,10 +910,10 @@ disabled: | |||
909 | goto out; | 910 | goto out; |
910 | } | 911 | } |
911 | 912 | ||
912 | prepare_to_wait(&sk->sk_socket->wait, &wait, | 913 | prepare_to_wait(sk_sleep(sk), &wait, |
913 | TASK_INTERRUPTIBLE); | 914 | TASK_INTERRUPTIBLE); |
914 | done = sk_wait_event(sk, &timeo, atomic_read(&pn->tx_credits)); | 915 | done = sk_wait_event(sk, &timeo, atomic_read(&pn->tx_credits)); |
915 | finish_wait(&sk->sk_socket->wait, &wait); | 916 | finish_wait(sk_sleep(sk), &wait); |
916 | 917 | ||
917 | if (sk->sk_state != TCP_ESTABLISHED) | 918 | if (sk->sk_state != TCP_ESTABLISHED) |
918 | goto disabled; | 919 | goto disabled; |
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c index c597cc53a6fb..c33da6576942 100644 --- a/net/phonet/pn_dev.c +++ b/net/phonet/pn_dev.c | |||
@@ -25,6 +25,7 @@ | |||
25 | 25 | ||
26 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
27 | #include <linux/net.h> | 27 | #include <linux/net.h> |
28 | #include <linux/slab.h> | ||
28 | #include <linux/netdevice.h> | 29 | #include <linux/netdevice.h> |
29 | #include <linux/phonet.h> | 30 | #include <linux/phonet.h> |
30 | #include <linux/proc_fs.h> | 31 | #include <linux/proc_fs.h> |
@@ -45,9 +46,16 @@ struct phonet_net { | |||
45 | 46 | ||
46 | int phonet_net_id __read_mostly; | 47 | int phonet_net_id __read_mostly; |
47 | 48 | ||
49 | static struct phonet_net *phonet_pernet(struct net *net) | ||
50 | { | ||
51 | BUG_ON(!net); | ||
52 | |||
53 | return net_generic(net, phonet_net_id); | ||
54 | } | ||
55 | |||
48 | struct phonet_device_list *phonet_device_list(struct net *net) | 56 | struct phonet_device_list *phonet_device_list(struct net *net) |
49 | { | 57 | { |
50 | struct phonet_net *pnn = net_generic(net, phonet_net_id); | 58 | struct phonet_net *pnn = phonet_pernet(net); |
51 | return &pnn->pndevs; | 59 | return &pnn->pndevs; |
52 | } | 60 | } |
53 | 61 | ||
@@ -107,8 +115,7 @@ static void phonet_device_destroy(struct net_device *dev) | |||
107 | if (pnd) { | 115 | if (pnd) { |
108 | u8 addr; | 116 | u8 addr; |
109 | 117 | ||
110 | for (addr = find_first_bit(pnd->addrs, 64); addr < 64; | 118 | for_each_set_bit(addr, pnd->addrs, 64) |
111 | addr = find_next_bit(pnd->addrs, 64, 1+addr)) | ||
112 | phonet_address_notify(RTM_DELADDR, dev, addr); | 119 | phonet_address_notify(RTM_DELADDR, dev, addr); |
113 | kfree(pnd); | 120 | kfree(pnd); |
114 | } | 121 | } |
@@ -261,7 +268,7 @@ static int phonet_device_autoconf(struct net_device *dev) | |||
261 | 268 | ||
262 | static void phonet_route_autodel(struct net_device *dev) | 269 | static void phonet_route_autodel(struct net_device *dev) |
263 | { | 270 | { |
264 | struct phonet_net *pnn = net_generic(dev_net(dev), phonet_net_id); | 271 | struct phonet_net *pnn = phonet_pernet(dev_net(dev)); |
265 | unsigned i; | 272 | unsigned i; |
266 | DECLARE_BITMAP(deleted, 64); | 273 | DECLARE_BITMAP(deleted, 64); |
267 | 274 | ||
@@ -313,7 +320,7 @@ static struct notifier_block phonet_device_notifier = { | |||
313 | /* Per-namespace Phonet devices handling */ | 320 | /* Per-namespace Phonet devices handling */ |
314 | static int __net_init phonet_init_net(struct net *net) | 321 | static int __net_init phonet_init_net(struct net *net) |
315 | { | 322 | { |
316 | struct phonet_net *pnn = net_generic(net, phonet_net_id); | 323 | struct phonet_net *pnn = phonet_pernet(net); |
317 | 324 | ||
318 | if (!proc_net_fops_create(net, "phonet", 0, &pn_sock_seq_fops)) | 325 | if (!proc_net_fops_create(net, "phonet", 0, &pn_sock_seq_fops)) |
319 | return -ENOMEM; | 326 | return -ENOMEM; |
@@ -326,7 +333,7 @@ static int __net_init phonet_init_net(struct net *net) | |||
326 | 333 | ||
327 | static void __net_exit phonet_exit_net(struct net *net) | 334 | static void __net_exit phonet_exit_net(struct net *net) |
328 | { | 335 | { |
329 | struct phonet_net *pnn = net_generic(net, phonet_net_id); | 336 | struct phonet_net *pnn = phonet_pernet(net); |
330 | struct net_device *dev; | 337 | struct net_device *dev; |
331 | unsigned i; | 338 | unsigned i; |
332 | 339 | ||
@@ -376,7 +383,7 @@ void phonet_device_exit(void) | |||
376 | 383 | ||
377 | int phonet_route_add(struct net_device *dev, u8 daddr) | 384 | int phonet_route_add(struct net_device *dev, u8 daddr) |
378 | { | 385 | { |
379 | struct phonet_net *pnn = net_generic(dev_net(dev), phonet_net_id); | 386 | struct phonet_net *pnn = phonet_pernet(dev_net(dev)); |
380 | struct phonet_routes *routes = &pnn->routes; | 387 | struct phonet_routes *routes = &pnn->routes; |
381 | int err = -EEXIST; | 388 | int err = -EEXIST; |
382 | 389 | ||
@@ -393,7 +400,7 @@ int phonet_route_add(struct net_device *dev, u8 daddr) | |||
393 | 400 | ||
394 | int phonet_route_del(struct net_device *dev, u8 daddr) | 401 | int phonet_route_del(struct net_device *dev, u8 daddr) |
395 | { | 402 | { |
396 | struct phonet_net *pnn = net_generic(dev_net(dev), phonet_net_id); | 403 | struct phonet_net *pnn = phonet_pernet(dev_net(dev)); |
397 | struct phonet_routes *routes = &pnn->routes; | 404 | struct phonet_routes *routes = &pnn->routes; |
398 | 405 | ||
399 | daddr = daddr >> 2; | 406 | daddr = daddr >> 2; |
@@ -413,7 +420,7 @@ int phonet_route_del(struct net_device *dev, u8 daddr) | |||
413 | 420 | ||
414 | struct net_device *phonet_route_get(struct net *net, u8 daddr) | 421 | struct net_device *phonet_route_get(struct net *net, u8 daddr) |
415 | { | 422 | { |
416 | struct phonet_net *pnn = net_generic(net, phonet_net_id); | 423 | struct phonet_net *pnn = phonet_pernet(net); |
417 | struct phonet_routes *routes = &pnn->routes; | 424 | struct phonet_routes *routes = &pnn->routes; |
418 | struct net_device *dev; | 425 | struct net_device *dev; |
419 | 426 | ||
@@ -428,7 +435,7 @@ struct net_device *phonet_route_get(struct net *net, u8 daddr) | |||
428 | 435 | ||
429 | struct net_device *phonet_route_output(struct net *net, u8 daddr) | 436 | struct net_device *phonet_route_output(struct net *net, u8 daddr) |
430 | { | 437 | { |
431 | struct phonet_net *pnn = net_generic(net, phonet_net_id); | 438 | struct phonet_net *pnn = phonet_pernet(net); |
432 | struct phonet_routes *routes = &pnn->routes; | 439 | struct phonet_routes *routes = &pnn->routes; |
433 | struct net_device *dev; | 440 | struct net_device *dev; |
434 | 441 | ||
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c index 2e6c7eb8e76a..58b3b1f991ed 100644 --- a/net/phonet/pn_netlink.c +++ b/net/phonet/pn_netlink.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
27 | #include <linux/netlink.h> | 27 | #include <linux/netlink.h> |
28 | #include <linux/phonet.h> | 28 | #include <linux/phonet.h> |
29 | #include <linux/slab.h> | ||
29 | #include <net/sock.h> | 30 | #include <net/sock.h> |
30 | #include <net/phonet/pn_dev.h> | 31 | #include <net/phonet/pn_dev.h> |
31 | 32 | ||
@@ -141,8 +142,7 @@ static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb) | |||
141 | continue; | 142 | continue; |
142 | 143 | ||
143 | addr_idx = 0; | 144 | addr_idx = 0; |
144 | for (addr = find_first_bit(pnd->addrs, 64); addr < 64; | 145 | for_each_set_bit(addr, pnd->addrs, 64) { |
145 | addr = find_next_bit(pnd->addrs, 64, 1+addr)) { | ||
146 | if (addr_idx++ < addr_start_idx) | 146 | if (addr_idx++ < addr_start_idx) |
147 | continue; | 147 | continue; |
148 | 148 | ||
diff --git a/net/phonet/socket.c b/net/phonet/socket.c index 69c8b826a0ce..6e9848bf0370 100644 --- a/net/phonet/socket.c +++ b/net/phonet/socket.c | |||
@@ -23,6 +23,7 @@ | |||
23 | * 02110-1301 USA | 23 | * 02110-1301 USA |
24 | */ | 24 | */ |
25 | 25 | ||
26 | #include <linux/gfp.h> | ||
26 | #include <linux/kernel.h> | 27 | #include <linux/kernel.h> |
27 | #include <linux/net.h> | 28 | #include <linux/net.h> |
28 | #include <linux/poll.h> | 29 | #include <linux/poll.h> |
@@ -264,7 +265,7 @@ static unsigned int pn_socket_poll(struct file *file, struct socket *sock, | |||
264 | struct pep_sock *pn = pep_sk(sk); | 265 | struct pep_sock *pn = pep_sk(sk); |
265 | unsigned int mask = 0; | 266 | unsigned int mask = 0; |
266 | 267 | ||
267 | poll_wait(file, &sock->wait, wait); | 268 | poll_wait(file, sk_sleep(sk), wait); |
268 | 269 | ||
269 | switch (sk->sk_state) { | 270 | switch (sk->sk_state) { |
270 | case TCP_LISTEN: | 271 | case TCP_LISTEN: |
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c index 853c52be781f..aebfecbdb841 100644 --- a/net/rds/af_rds.c +++ b/net/rds/af_rds.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/module.h> | 33 | #include <linux/module.h> |
34 | #include <linux/errno.h> | 34 | #include <linux/errno.h> |
35 | #include <linux/kernel.h> | 35 | #include <linux/kernel.h> |
36 | #include <linux/gfp.h> | ||
36 | #include <linux/in.h> | 37 | #include <linux/in.h> |
37 | #include <linux/poll.h> | 38 | #include <linux/poll.h> |
38 | #include <net/sock.h> | 39 | #include <net/sock.h> |
@@ -157,9 +158,10 @@ static unsigned int rds_poll(struct file *file, struct socket *sock, | |||
157 | unsigned int mask = 0; | 158 | unsigned int mask = 0; |
158 | unsigned long flags; | 159 | unsigned long flags; |
159 | 160 | ||
160 | poll_wait(file, sk->sk_sleep, wait); | 161 | poll_wait(file, sk_sleep(sk), wait); |
161 | 162 | ||
162 | poll_wait(file, &rds_poll_waitq, wait); | 163 | if (rs->rs_seen_congestion) |
164 | poll_wait(file, &rds_poll_waitq, wait); | ||
163 | 165 | ||
164 | read_lock_irqsave(&rs->rs_recv_lock, flags); | 166 | read_lock_irqsave(&rs->rs_recv_lock, flags); |
165 | if (!rs->rs_cong_monitor) { | 167 | if (!rs->rs_cong_monitor) { |
@@ -181,6 +183,10 @@ static unsigned int rds_poll(struct file *file, struct socket *sock, | |||
181 | mask |= (POLLOUT | POLLWRNORM); | 183 | mask |= (POLLOUT | POLLWRNORM); |
182 | read_unlock_irqrestore(&rs->rs_recv_lock, flags); | 184 | read_unlock_irqrestore(&rs->rs_recv_lock, flags); |
183 | 185 | ||
186 | /* clear state any time we wake a seen-congested socket */ | ||
187 | if (mask) | ||
188 | rs->rs_seen_congestion = 0; | ||
189 | |||
184 | return mask; | 190 | return mask; |
185 | } | 191 | } |
186 | 192 | ||
@@ -446,7 +452,6 @@ static void rds_sock_inc_info(struct socket *sock, unsigned int len, | |||
446 | struct rds_info_lengths *lens) | 452 | struct rds_info_lengths *lens) |
447 | { | 453 | { |
448 | struct rds_sock *rs; | 454 | struct rds_sock *rs; |
449 | struct sock *sk; | ||
450 | struct rds_incoming *inc; | 455 | struct rds_incoming *inc; |
451 | unsigned long flags; | 456 | unsigned long flags; |
452 | unsigned int total = 0; | 457 | unsigned int total = 0; |
@@ -456,7 +461,6 @@ static void rds_sock_inc_info(struct socket *sock, unsigned int len, | |||
456 | spin_lock_irqsave(&rds_sock_lock, flags); | 461 | spin_lock_irqsave(&rds_sock_lock, flags); |
457 | 462 | ||
458 | list_for_each_entry(rs, &rds_sock_list, rs_item) { | 463 | list_for_each_entry(rs, &rds_sock_list, rs_item) { |
459 | sk = rds_rs_to_sk(rs); | ||
460 | read_lock(&rs->rs_recv_lock); | 464 | read_lock(&rs->rs_recv_lock); |
461 | 465 | ||
462 | /* XXX too lazy to maintain counts.. */ | 466 | /* XXX too lazy to maintain counts.. */ |
diff --git a/net/rds/cong.c b/net/rds/cong.c index 6d06cac2649c..0871a29f0780 100644 --- a/net/rds/cong.c +++ b/net/rds/cong.c | |||
@@ -30,6 +30,7 @@ | |||
30 | * SOFTWARE. | 30 | * SOFTWARE. |
31 | * | 31 | * |
32 | */ | 32 | */ |
33 | #include <linux/slab.h> | ||
33 | #include <linux/types.h> | 34 | #include <linux/types.h> |
34 | #include <linux/rbtree.h> | 35 | #include <linux/rbtree.h> |
35 | 36 | ||
@@ -218,8 +219,6 @@ void rds_cong_queue_updates(struct rds_cong_map *map) | |||
218 | spin_lock_irqsave(&rds_cong_lock, flags); | 219 | spin_lock_irqsave(&rds_cong_lock, flags); |
219 | 220 | ||
220 | list_for_each_entry(conn, &map->m_conn_list, c_map_item) { | 221 | list_for_each_entry(conn, &map->m_conn_list, c_map_item) { |
221 | if (conn->c_loopback) | ||
222 | continue; | ||
223 | if (!test_and_set_bit(0, &conn->c_map_queued)) { | 222 | if (!test_and_set_bit(0, &conn->c_map_queued)) { |
224 | rds_stats_inc(s_cong_update_queued); | 223 | rds_stats_inc(s_cong_update_queued); |
225 | queue_delayed_work(rds_wq, &conn->c_send_w, 0); | 224 | queue_delayed_work(rds_wq, &conn->c_send_w, 0); |
diff --git a/net/rds/connection.c b/net/rds/connection.c index 278f607ab603..7619b671ca28 100644 --- a/net/rds/connection.c +++ b/net/rds/connection.c | |||
@@ -32,6 +32,7 @@ | |||
32 | */ | 32 | */ |
33 | #include <linux/kernel.h> | 33 | #include <linux/kernel.h> |
34 | #include <linux/list.h> | 34 | #include <linux/list.h> |
35 | #include <linux/slab.h> | ||
35 | #include <net/inet_hashtables.h> | 36 | #include <net/inet_hashtables.h> |
36 | 37 | ||
37 | #include "rds.h" | 38 | #include "rds.h" |
diff --git a/net/rds/ib.c b/net/rds/ib.c index 3b8992361042..8f2d6dd7700a 100644 --- a/net/rds/ib.c +++ b/net/rds/ib.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <linux/inetdevice.h> | 37 | #include <linux/inetdevice.h> |
38 | #include <linux/if_arp.h> | 38 | #include <linux/if_arp.h> |
39 | #include <linux/delay.h> | 39 | #include <linux/delay.h> |
40 | #include <linux/slab.h> | ||
40 | 41 | ||
41 | #include "rds.h" | 42 | #include "rds.h" |
42 | #include "ib.h" | 43 | #include "ib.h" |
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c index 647cb8ffc39b..10ed0d55f759 100644 --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c | |||
@@ -32,6 +32,7 @@ | |||
32 | */ | 32 | */ |
33 | #include <linux/kernel.h> | 33 | #include <linux/kernel.h> |
34 | #include <linux/in.h> | 34 | #include <linux/in.h> |
35 | #include <linux/slab.h> | ||
35 | #include <linux/vmalloc.h> | 36 | #include <linux/vmalloc.h> |
36 | 37 | ||
37 | #include "rds.h" | 38 | #include "rds.h" |
@@ -203,9 +204,10 @@ static void rds_ib_qp_event_handler(struct ib_event *event, void *data) | |||
203 | rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST); | 204 | rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST); |
204 | break; | 205 | break; |
205 | default: | 206 | default: |
206 | rds_ib_conn_error(conn, "RDS/IB: Fatal QP Event %u " | 207 | rdsdebug("Fatal QP Event %u " |
207 | "- connection %pI4->%pI4, reconnecting\n", | 208 | "- connection %pI4->%pI4, reconnecting\n", |
208 | event->event, &conn->c_laddr, &conn->c_faddr); | 209 | event->event, &conn->c_laddr, &conn->c_faddr); |
210 | rds_conn_drop(conn); | ||
209 | break; | 211 | break; |
210 | } | 212 | } |
211 | } | 213 | } |
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c index 4b0da865a72c..a54cd63f9e35 100644 --- a/net/rds/ib_rdma.c +++ b/net/rds/ib_rdma.c | |||
@@ -31,6 +31,7 @@ | |||
31 | * | 31 | * |
32 | */ | 32 | */ |
33 | #include <linux/kernel.h> | 33 | #include <linux/kernel.h> |
34 | #include <linux/slab.h> | ||
34 | 35 | ||
35 | #include "rds.h" | 36 | #include "rds.h" |
36 | #include "rdma.h" | 37 | #include "rdma.h" |
@@ -234,8 +235,8 @@ void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool) | |||
234 | { | 235 | { |
235 | flush_workqueue(rds_wq); | 236 | flush_workqueue(rds_wq); |
236 | rds_ib_flush_mr_pool(pool, 1); | 237 | rds_ib_flush_mr_pool(pool, 1); |
237 | BUG_ON(atomic_read(&pool->item_count)); | 238 | WARN_ON(atomic_read(&pool->item_count)); |
238 | BUG_ON(atomic_read(&pool->free_pinned)); | 239 | WARN_ON(atomic_read(&pool->free_pinned)); |
239 | kfree(pool); | 240 | kfree(pool); |
240 | } | 241 | } |
241 | 242 | ||
@@ -440,6 +441,7 @@ static void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr) | |||
440 | 441 | ||
441 | /* FIXME we need a way to tell a r/w MR | 442 | /* FIXME we need a way to tell a r/w MR |
442 | * from a r/o MR */ | 443 | * from a r/o MR */ |
444 | BUG_ON(in_interrupt()); | ||
443 | set_page_dirty(page); | 445 | set_page_dirty(page); |
444 | put_page(page); | 446 | put_page(page); |
445 | } | 447 | } |
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c index 04dc0d3f3c95..c74e9904a6b2 100644 --- a/net/rds/ib_recv.c +++ b/net/rds/ib_recv.c | |||
@@ -31,6 +31,7 @@ | |||
31 | * | 31 | * |
32 | */ | 32 | */ |
33 | #include <linux/kernel.h> | 33 | #include <linux/kernel.h> |
34 | #include <linux/slab.h> | ||
34 | #include <linux/pci.h> | 35 | #include <linux/pci.h> |
35 | #include <linux/dma-mapping.h> | 36 | #include <linux/dma-mapping.h> |
36 | #include <rdma/rdma_cm.h> | 37 | #include <rdma/rdma_cm.h> |
@@ -468,8 +469,8 @@ static void rds_ib_send_ack(struct rds_ib_connection *ic, unsigned int adv_credi | |||
468 | set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); | 469 | set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); |
469 | 470 | ||
470 | rds_ib_stats_inc(s_ib_ack_send_failure); | 471 | rds_ib_stats_inc(s_ib_ack_send_failure); |
471 | /* Need to finesse this later. */ | 472 | |
472 | BUG(); | 473 | rds_ib_conn_error(ic->conn, "sending ack failed\n"); |
473 | } else | 474 | } else |
474 | rds_ib_stats_inc(s_ib_ack_sent); | 475 | rds_ib_stats_inc(s_ib_ack_sent); |
475 | } | 476 | } |
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c index a10fab6886d1..17fa80803ab0 100644 --- a/net/rds/ib_send.c +++ b/net/rds/ib_send.c | |||
@@ -243,8 +243,12 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context) | |||
243 | struct rds_message *rm; | 243 | struct rds_message *rm; |
244 | 244 | ||
245 | rm = rds_send_get_message(conn, send->s_op); | 245 | rm = rds_send_get_message(conn, send->s_op); |
246 | if (rm) | 246 | if (rm) { |
247 | if (rm->m_rdma_op) | ||
248 | rds_ib_send_unmap_rdma(ic, rm->m_rdma_op); | ||
247 | rds_ib_send_rdma_complete(rm, wc.status); | 249 | rds_ib_send_rdma_complete(rm, wc.status); |
250 | rds_message_put(rm); | ||
251 | } | ||
248 | } | 252 | } |
249 | 253 | ||
250 | oldest = (oldest + 1) % ic->i_send_ring.w_nr; | 254 | oldest = (oldest + 1) % ic->i_send_ring.w_nr; |
@@ -482,6 +486,13 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, | |||
482 | BUG_ON(off % RDS_FRAG_SIZE); | 486 | BUG_ON(off % RDS_FRAG_SIZE); |
483 | BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header)); | 487 | BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header)); |
484 | 488 | ||
489 | /* Do not send cong updates to IB loopback */ | ||
490 | if (conn->c_loopback | ||
491 | && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { | ||
492 | rds_cong_map_updated(conn->c_fcong, ~(u64) 0); | ||
493 | return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; | ||
494 | } | ||
495 | |||
485 | /* FIXME we may overallocate here */ | 496 | /* FIXME we may overallocate here */ |
486 | if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0) | 497 | if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0) |
487 | i = 1; | 498 | i = 1; |
@@ -574,8 +585,7 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, | |||
574 | rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits); | 585 | rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits); |
575 | adv_credits += posted; | 586 | adv_credits += posted; |
576 | BUG_ON(adv_credits > 255); | 587 | BUG_ON(adv_credits > 255); |
577 | } else if (ic->i_rm != rm) | 588 | } |
578 | BUG(); | ||
579 | 589 | ||
580 | send = &ic->i_sends[pos]; | 590 | send = &ic->i_sends[pos]; |
581 | first = send; | 591 | first = send; |
@@ -714,8 +724,8 @@ add_header: | |||
714 | ic->i_rm = prev->s_rm; | 724 | ic->i_rm = prev->s_rm; |
715 | prev->s_rm = NULL; | 725 | prev->s_rm = NULL; |
716 | } | 726 | } |
717 | /* Finesse this later */ | 727 | |
718 | BUG(); | 728 | rds_ib_conn_error(ic->conn, "ib_post_send failed\n"); |
719 | goto out; | 729 | goto out; |
720 | } | 730 | } |
721 | 731 | ||
diff --git a/net/rds/info.c b/net/rds/info.c index 814a91a6f4a7..c45c4173a44d 100644 --- a/net/rds/info.c +++ b/net/rds/info.c | |||
@@ -32,6 +32,7 @@ | |||
32 | */ | 32 | */ |
33 | #include <linux/percpu.h> | 33 | #include <linux/percpu.h> |
34 | #include <linux/seq_file.h> | 34 | #include <linux/seq_file.h> |
35 | #include <linux/slab.h> | ||
35 | #include <linux/proc_fs.h> | 36 | #include <linux/proc_fs.h> |
36 | 37 | ||
37 | #include "rds.h" | 38 | #include "rds.h" |
diff --git a/net/rds/iw.c b/net/rds/iw.c index b28fa8525b24..c8f3d3525cb9 100644 --- a/net/rds/iw.c +++ b/net/rds/iw.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <linux/inetdevice.h> | 37 | #include <linux/inetdevice.h> |
38 | #include <linux/if_arp.h> | 38 | #include <linux/if_arp.h> |
39 | #include <linux/delay.h> | 39 | #include <linux/delay.h> |
40 | #include <linux/slab.h> | ||
40 | 41 | ||
41 | #include "rds.h" | 42 | #include "rds.h" |
42 | #include "iw.h" | 43 | #include "iw.h" |
diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c index 394cf6b4d0aa..a9d951b4fbae 100644 --- a/net/rds/iw_cm.c +++ b/net/rds/iw_cm.c | |||
@@ -32,6 +32,7 @@ | |||
32 | */ | 32 | */ |
33 | #include <linux/kernel.h> | 33 | #include <linux/kernel.h> |
34 | #include <linux/in.h> | 34 | #include <linux/in.h> |
35 | #include <linux/slab.h> | ||
35 | #include <linux/vmalloc.h> | 36 | #include <linux/vmalloc.h> |
36 | 37 | ||
37 | #include "rds.h" | 38 | #include "rds.h" |
@@ -156,9 +157,11 @@ static void rds_iw_qp_event_handler(struct ib_event *event, void *data) | |||
156 | case IB_EVENT_QP_REQ_ERR: | 157 | case IB_EVENT_QP_REQ_ERR: |
157 | case IB_EVENT_QP_FATAL: | 158 | case IB_EVENT_QP_FATAL: |
158 | default: | 159 | default: |
159 | rds_iw_conn_error(conn, "RDS/IW: Fatal QP Event %u - connection %pI4->%pI4...reconnecting\n", | 160 | rdsdebug("Fatal QP Event %u " |
161 | "- connection %pI4->%pI4, reconnecting\n", | ||
160 | event->event, &conn->c_laddr, | 162 | event->event, &conn->c_laddr, |
161 | &conn->c_faddr); | 163 | &conn->c_faddr); |
164 | rds_conn_drop(conn); | ||
162 | break; | 165 | break; |
163 | } | 166 | } |
164 | } | 167 | } |
diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c index 9eda11cca956..13dc1862d862 100644 --- a/net/rds/iw_rdma.c +++ b/net/rds/iw_rdma.c | |||
@@ -31,6 +31,7 @@ | |||
31 | * | 31 | * |
32 | */ | 32 | */ |
33 | #include <linux/kernel.h> | 33 | #include <linux/kernel.h> |
34 | #include <linux/slab.h> | ||
34 | 35 | ||
35 | #include "rds.h" | 36 | #include "rds.h" |
36 | #include "rdma.h" | 37 | #include "rdma.h" |
diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c index 54af7d6b92da..3d479067d54d 100644 --- a/net/rds/iw_recv.c +++ b/net/rds/iw_recv.c | |||
@@ -31,6 +31,7 @@ | |||
31 | * | 31 | * |
32 | */ | 32 | */ |
33 | #include <linux/kernel.h> | 33 | #include <linux/kernel.h> |
34 | #include <linux/slab.h> | ||
34 | #include <linux/pci.h> | 35 | #include <linux/pci.h> |
35 | #include <linux/dma-mapping.h> | 36 | #include <linux/dma-mapping.h> |
36 | #include <rdma/rdma_cm.h> | 37 | #include <rdma/rdma_cm.h> |
@@ -468,8 +469,8 @@ static void rds_iw_send_ack(struct rds_iw_connection *ic, unsigned int adv_credi | |||
468 | set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); | 469 | set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); |
469 | 470 | ||
470 | rds_iw_stats_inc(s_iw_ack_send_failure); | 471 | rds_iw_stats_inc(s_iw_ack_send_failure); |
471 | /* Need to finesse this later. */ | 472 | |
472 | BUG(); | 473 | rds_iw_conn_error(ic->conn, "sending ack failed\n"); |
473 | } else | 474 | } else |
474 | rds_iw_stats_inc(s_iw_ack_sent); | 475 | rds_iw_stats_inc(s_iw_ack_sent); |
475 | } | 476 | } |
diff --git a/net/rds/iw_send.c b/net/rds/iw_send.c index 1379e9d66a78..52182ff7519e 100644 --- a/net/rds/iw_send.c +++ b/net/rds/iw_send.c | |||
@@ -616,8 +616,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm, | |||
616 | rds_iw_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits); | 616 | rds_iw_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits); |
617 | adv_credits += posted; | 617 | adv_credits += posted; |
618 | BUG_ON(adv_credits > 255); | 618 | BUG_ON(adv_credits > 255); |
619 | } else if (ic->i_rm != rm) | 619 | } |
620 | BUG(); | ||
621 | 620 | ||
622 | send = &ic->i_sends[pos]; | 621 | send = &ic->i_sends[pos]; |
623 | first = send; | 622 | first = send; |
diff --git a/net/rds/loop.c b/net/rds/loop.c index 4a61997f554d..dd9879379457 100644 --- a/net/rds/loop.c +++ b/net/rds/loop.c | |||
@@ -31,6 +31,7 @@ | |||
31 | * | 31 | * |
32 | */ | 32 | */ |
33 | #include <linux/kernel.h> | 33 | #include <linux/kernel.h> |
34 | #include <linux/slab.h> | ||
34 | #include <linux/in.h> | 35 | #include <linux/in.h> |
35 | 36 | ||
36 | #include "rds.h" | 37 | #include "rds.h" |
@@ -80,16 +81,9 @@ static int rds_loop_xmit_cong_map(struct rds_connection *conn, | |||
80 | struct rds_cong_map *map, | 81 | struct rds_cong_map *map, |
81 | unsigned long offset) | 82 | unsigned long offset) |
82 | { | 83 | { |
83 | unsigned long i; | ||
84 | |||
85 | BUG_ON(offset); | 84 | BUG_ON(offset); |
86 | BUG_ON(map != conn->c_lcong); | 85 | BUG_ON(map != conn->c_lcong); |
87 | 86 | ||
88 | for (i = 0; i < RDS_CONG_MAP_PAGES; i++) { | ||
89 | memcpy((void *)conn->c_fcong->m_page_addrs[i], | ||
90 | (void *)map->m_page_addrs[i], PAGE_SIZE); | ||
91 | } | ||
92 | |||
93 | rds_cong_map_updated(conn->c_fcong, ~(u64) 0); | 87 | rds_cong_map_updated(conn->c_fcong, ~(u64) 0); |
94 | 88 | ||
95 | return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; | 89 | return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; |
diff --git a/net/rds/message.c b/net/rds/message.c index 73e600ffd87f..9a1d67e001ba 100644 --- a/net/rds/message.c +++ b/net/rds/message.c | |||
@@ -31,6 +31,7 @@ | |||
31 | * | 31 | * |
32 | */ | 32 | */ |
33 | #include <linux/kernel.h> | 33 | #include <linux/kernel.h> |
34 | #include <linux/slab.h> | ||
34 | 35 | ||
35 | #include "rds.h" | 36 | #include "rds.h" |
36 | #include "rdma.h" | 37 | #include "rdma.h" |
diff --git a/net/rds/page.c b/net/rds/page.c index 36790122dfd4..595a952d4b17 100644 --- a/net/rds/page.c +++ b/net/rds/page.c | |||
@@ -31,6 +31,7 @@ | |||
31 | * | 31 | * |
32 | */ | 32 | */ |
33 | #include <linux/highmem.h> | 33 | #include <linux/highmem.h> |
34 | #include <linux/gfp.h> | ||
34 | 35 | ||
35 | #include "rds.h" | 36 | #include "rds.h" |
36 | 37 | ||
diff --git a/net/rds/rdma.c b/net/rds/rdma.c index 4c64daa1f5d5..75fd13bb631b 100644 --- a/net/rds/rdma.c +++ b/net/rds/rdma.c | |||
@@ -31,6 +31,7 @@ | |||
31 | * | 31 | * |
32 | */ | 32 | */ |
33 | #include <linux/pagemap.h> | 33 | #include <linux/pagemap.h> |
34 | #include <linux/slab.h> | ||
34 | #include <linux/rbtree.h> | 35 | #include <linux/rbtree.h> |
35 | #include <linux/dma-mapping.h> /* for DMA_*_DEVICE */ | 36 | #include <linux/dma-mapping.h> /* for DMA_*_DEVICE */ |
36 | 37 | ||
@@ -438,8 +439,10 @@ void rds_rdma_free_op(struct rds_rdma_op *ro) | |||
438 | /* Mark page dirty if it was possibly modified, which | 439 | /* Mark page dirty if it was possibly modified, which |
439 | * is the case for a RDMA_READ which copies from remote | 440 | * is the case for a RDMA_READ which copies from remote |
440 | * to local memory */ | 441 | * to local memory */ |
441 | if (!ro->r_write) | 442 | if (!ro->r_write) { |
443 | BUG_ON(in_interrupt()); | ||
442 | set_page_dirty(page); | 444 | set_page_dirty(page); |
445 | } | ||
443 | put_page(page); | 446 | put_page(page); |
444 | } | 447 | } |
445 | 448 | ||
diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c index 9ece910ea394..e599ba2f950d 100644 --- a/net/rds/rdma_transport.c +++ b/net/rds/rdma_transport.c | |||
@@ -101,7 +101,7 @@ int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id, | |||
101 | break; | 101 | break; |
102 | 102 | ||
103 | case RDMA_CM_EVENT_DISCONNECTED: | 103 | case RDMA_CM_EVENT_DISCONNECTED: |
104 | printk(KERN_WARNING "RDS/RDMA: DISCONNECT event - dropping connection " | 104 | rdsdebug("DISCONNECT event - dropping connection " |
105 | "%pI4->%pI4\n", &conn->c_laddr, | 105 | "%pI4->%pI4\n", &conn->c_laddr, |
106 | &conn->c_faddr); | 106 | &conn->c_faddr); |
107 | rds_conn_drop(conn); | 107 | rds_conn_drop(conn); |
@@ -109,8 +109,7 @@ int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id, | |||
109 | 109 | ||
110 | default: | 110 | default: |
111 | /* things like device disconnect? */ | 111 | /* things like device disconnect? */ |
112 | printk(KERN_ERR "unknown event %u\n", event->event); | 112 | printk(KERN_ERR "RDS: unknown event %u!\n", event->event); |
113 | BUG(); | ||
114 | break; | 113 | break; |
115 | } | 114 | } |
116 | 115 | ||
@@ -134,7 +133,7 @@ static int __init rds_rdma_listen_init(void) | |||
134 | ret = PTR_ERR(cm_id); | 133 | ret = PTR_ERR(cm_id); |
135 | printk(KERN_ERR "RDS/RDMA: failed to setup listener, " | 134 | printk(KERN_ERR "RDS/RDMA: failed to setup listener, " |
136 | "rdma_create_id() returned %d\n", ret); | 135 | "rdma_create_id() returned %d\n", ret); |
137 | goto out; | 136 | return ret; |
138 | } | 137 | } |
139 | 138 | ||
140 | sin.sin_family = AF_INET, | 139 | sin.sin_family = AF_INET, |
diff --git a/net/rds/rds.h b/net/rds/rds.h index 85d6f897ecc7..c224b5bb3ba9 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h | |||
@@ -388,6 +388,8 @@ struct rds_sock { | |||
388 | 388 | ||
389 | /* flag indicating we were congested or not */ | 389 | /* flag indicating we were congested or not */ |
390 | int rs_congested; | 390 | int rs_congested; |
391 | /* seen congestion (ENOBUFS) when sending? */ | ||
392 | int rs_seen_congestion; | ||
391 | 393 | ||
392 | /* rs_lock protects all these adjacent members before the newline */ | 394 | /* rs_lock protects all these adjacent members before the newline */ |
393 | spinlock_t rs_lock; | 395 | spinlock_t rs_lock; |
@@ -490,7 +492,7 @@ void rds_sock_put(struct rds_sock *rs); | |||
490 | void rds_wake_sk_sleep(struct rds_sock *rs); | 492 | void rds_wake_sk_sleep(struct rds_sock *rs); |
491 | static inline void __rds_wake_sk_sleep(struct sock *sk) | 493 | static inline void __rds_wake_sk_sleep(struct sock *sk) |
492 | { | 494 | { |
493 | wait_queue_head_t *waitq = sk->sk_sleep; | 495 | wait_queue_head_t *waitq = sk_sleep(sk); |
494 | 496 | ||
495 | if (!sock_flag(sk, SOCK_DEAD) && waitq) | 497 | if (!sock_flag(sk, SOCK_DEAD) && waitq) |
496 | wake_up(waitq); | 498 | wake_up(waitq); |
diff --git a/net/rds/recv.c b/net/rds/recv.c index b426d67f760c..795a00b7f2cb 100644 --- a/net/rds/recv.c +++ b/net/rds/recv.c | |||
@@ -31,6 +31,7 @@ | |||
31 | * | 31 | * |
32 | */ | 32 | */ |
33 | #include <linux/kernel.h> | 33 | #include <linux/kernel.h> |
34 | #include <linux/slab.h> | ||
34 | #include <net/sock.h> | 35 | #include <net/sock.h> |
35 | #include <linux/in.h> | 36 | #include <linux/in.h> |
36 | 37 | ||
@@ -431,7 +432,7 @@ int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, | |||
431 | break; | 432 | break; |
432 | } | 433 | } |
433 | 434 | ||
434 | timeo = wait_event_interruptible_timeout(*sk->sk_sleep, | 435 | timeo = wait_event_interruptible_timeout(*sk_sleep(sk), |
435 | (!list_empty(&rs->rs_notify_queue) || | 436 | (!list_empty(&rs->rs_notify_queue) || |
436 | rs->rs_cong_notify || | 437 | rs->rs_cong_notify || |
437 | rds_next_incoming(rs, &inc)), timeo); | 438 | rds_next_incoming(rs, &inc)), timeo); |
diff --git a/net/rds/send.c b/net/rds/send.c index b2fccfc20769..9c1c6bcaa6c9 100644 --- a/net/rds/send.c +++ b/net/rds/send.c | |||
@@ -31,6 +31,7 @@ | |||
31 | * | 31 | * |
32 | */ | 32 | */ |
33 | #include <linux/kernel.h> | 33 | #include <linux/kernel.h> |
34 | #include <linux/gfp.h> | ||
34 | #include <net/sock.h> | 35 | #include <net/sock.h> |
35 | #include <linux/in.h> | 36 | #include <linux/in.h> |
36 | #include <linux/list.h> | 37 | #include <linux/list.h> |
@@ -507,12 +508,13 @@ EXPORT_SYMBOL_GPL(rds_send_get_message); | |||
507 | */ | 508 | */ |
508 | void rds_send_remove_from_sock(struct list_head *messages, int status) | 509 | void rds_send_remove_from_sock(struct list_head *messages, int status) |
509 | { | 510 | { |
510 | unsigned long flags = 0; /* silence gcc :P */ | 511 | unsigned long flags; |
511 | struct rds_sock *rs = NULL; | 512 | struct rds_sock *rs = NULL; |
512 | struct rds_message *rm; | 513 | struct rds_message *rm; |
513 | 514 | ||
514 | local_irq_save(flags); | ||
515 | while (!list_empty(messages)) { | 515 | while (!list_empty(messages)) { |
516 | int was_on_sock = 0; | ||
517 | |||
516 | rm = list_entry(messages->next, struct rds_message, | 518 | rm = list_entry(messages->next, struct rds_message, |
517 | m_conn_item); | 519 | m_conn_item); |
518 | list_del_init(&rm->m_conn_item); | 520 | list_del_init(&rm->m_conn_item); |
@@ -527,20 +529,19 @@ void rds_send_remove_from_sock(struct list_head *messages, int status) | |||
527 | * while we're messing with it. It does not prevent the | 529 | * while we're messing with it. It does not prevent the |
528 | * message from being removed from the socket, though. | 530 | * message from being removed from the socket, though. |
529 | */ | 531 | */ |
530 | spin_lock(&rm->m_rs_lock); | 532 | spin_lock_irqsave(&rm->m_rs_lock, flags); |
531 | if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) | 533 | if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) |
532 | goto unlock_and_drop; | 534 | goto unlock_and_drop; |
533 | 535 | ||
534 | if (rs != rm->m_rs) { | 536 | if (rs != rm->m_rs) { |
535 | if (rs) { | 537 | if (rs) { |
536 | spin_unlock(&rs->rs_lock); | ||
537 | rds_wake_sk_sleep(rs); | 538 | rds_wake_sk_sleep(rs); |
538 | sock_put(rds_rs_to_sk(rs)); | 539 | sock_put(rds_rs_to_sk(rs)); |
539 | } | 540 | } |
540 | rs = rm->m_rs; | 541 | rs = rm->m_rs; |
541 | spin_lock(&rs->rs_lock); | ||
542 | sock_hold(rds_rs_to_sk(rs)); | 542 | sock_hold(rds_rs_to_sk(rs)); |
543 | } | 543 | } |
544 | spin_lock(&rs->rs_lock); | ||
544 | 545 | ||
545 | if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) { | 546 | if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) { |
546 | struct rds_rdma_op *ro = rm->m_rdma_op; | 547 | struct rds_rdma_op *ro = rm->m_rdma_op; |
@@ -557,21 +558,22 @@ void rds_send_remove_from_sock(struct list_head *messages, int status) | |||
557 | notifier->n_status = status; | 558 | notifier->n_status = status; |
558 | rm->m_rdma_op->r_notifier = NULL; | 559 | rm->m_rdma_op->r_notifier = NULL; |
559 | } | 560 | } |
560 | rds_message_put(rm); | 561 | was_on_sock = 1; |
561 | rm->m_rs = NULL; | 562 | rm->m_rs = NULL; |
562 | } | 563 | } |
564 | spin_unlock(&rs->rs_lock); | ||
563 | 565 | ||
564 | unlock_and_drop: | 566 | unlock_and_drop: |
565 | spin_unlock(&rm->m_rs_lock); | 567 | spin_unlock_irqrestore(&rm->m_rs_lock, flags); |
566 | rds_message_put(rm); | 568 | rds_message_put(rm); |
569 | if (was_on_sock) | ||
570 | rds_message_put(rm); | ||
567 | } | 571 | } |
568 | 572 | ||
569 | if (rs) { | 573 | if (rs) { |
570 | spin_unlock(&rs->rs_lock); | ||
571 | rds_wake_sk_sleep(rs); | 574 | rds_wake_sk_sleep(rs); |
572 | sock_put(rds_rs_to_sk(rs)); | 575 | sock_put(rds_rs_to_sk(rs)); |
573 | } | 576 | } |
574 | local_irq_restore(flags); | ||
575 | } | 577 | } |
576 | 578 | ||
577 | /* | 579 | /* |
@@ -633,9 +635,6 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest) | |||
633 | list_move(&rm->m_sock_item, &list); | 635 | list_move(&rm->m_sock_item, &list); |
634 | rds_send_sndbuf_remove(rs, rm); | 636 | rds_send_sndbuf_remove(rs, rm); |
635 | clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags); | 637 | clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags); |
636 | |||
637 | /* If this is a RDMA operation, notify the app. */ | ||
638 | __rds_rdma_send_complete(rs, rm, RDS_RDMA_CANCELED); | ||
639 | } | 638 | } |
640 | 639 | ||
641 | /* order flag updates with the rs lock */ | 640 | /* order flag updates with the rs lock */ |
@@ -644,9 +643,6 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest) | |||
644 | 643 | ||
645 | spin_unlock_irqrestore(&rs->rs_lock, flags); | 644 | spin_unlock_irqrestore(&rs->rs_lock, flags); |
646 | 645 | ||
647 | if (wake) | ||
648 | rds_wake_sk_sleep(rs); | ||
649 | |||
650 | conn = NULL; | 646 | conn = NULL; |
651 | 647 | ||
652 | /* now remove the messages from the conn list as needed */ | 648 | /* now remove the messages from the conn list as needed */ |
@@ -654,6 +650,10 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest) | |||
654 | /* We do this here rather than in the loop above, so that | 650 | /* We do this here rather than in the loop above, so that |
655 | * we don't have to nest m_rs_lock under rs->rs_lock */ | 651 | * we don't have to nest m_rs_lock under rs->rs_lock */ |
656 | spin_lock_irqsave(&rm->m_rs_lock, flags2); | 652 | spin_lock_irqsave(&rm->m_rs_lock, flags2); |
653 | /* If this is a RDMA operation, notify the app. */ | ||
654 | spin_lock(&rs->rs_lock); | ||
655 | __rds_rdma_send_complete(rs, rm, RDS_RDMA_CANCELED); | ||
656 | spin_unlock(&rs->rs_lock); | ||
657 | rm->m_rs = NULL; | 657 | rm->m_rs = NULL; |
658 | spin_unlock_irqrestore(&rm->m_rs_lock, flags2); | 658 | spin_unlock_irqrestore(&rm->m_rs_lock, flags2); |
659 | 659 | ||
@@ -682,6 +682,9 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest) | |||
682 | if (conn) | 682 | if (conn) |
683 | spin_unlock_irqrestore(&conn->c_lock, flags); | 683 | spin_unlock_irqrestore(&conn->c_lock, flags); |
684 | 684 | ||
685 | if (wake) | ||
686 | rds_wake_sk_sleep(rs); | ||
687 | |||
685 | while (!list_empty(&list)) { | 688 | while (!list_empty(&list)) { |
686 | rm = list_entry(list.next, struct rds_message, m_sock_item); | 689 | rm = list_entry(list.next, struct rds_message, m_sock_item); |
687 | list_del_init(&rm->m_sock_item); | 690 | list_del_init(&rm->m_sock_item); |
@@ -815,7 +818,7 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, | |||
815 | int ret = 0; | 818 | int ret = 0; |
816 | int queued = 0, allocated_mr = 0; | 819 | int queued = 0, allocated_mr = 0; |
817 | int nonblock = msg->msg_flags & MSG_DONTWAIT; | 820 | int nonblock = msg->msg_flags & MSG_DONTWAIT; |
818 | long timeo = sock_rcvtimeo(sk, nonblock); | 821 | long timeo = sock_sndtimeo(sk, nonblock); |
819 | 822 | ||
820 | /* Mirror Linux UDP mirror of BSD error message compatibility */ | 823 | /* Mirror Linux UDP mirror of BSD error message compatibility */ |
821 | /* XXX: Perhaps MSG_MORE someday */ | 824 | /* XXX: Perhaps MSG_MORE someday */ |
@@ -894,8 +897,10 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, | |||
894 | queue_delayed_work(rds_wq, &conn->c_conn_w, 0); | 897 | queue_delayed_work(rds_wq, &conn->c_conn_w, 0); |
895 | 898 | ||
896 | ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs); | 899 | ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs); |
897 | if (ret) | 900 | if (ret) { |
901 | rs->rs_seen_congestion = 1; | ||
898 | goto out; | 902 | goto out; |
903 | } | ||
899 | 904 | ||
900 | while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port, | 905 | while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port, |
901 | dport, &queued)) { | 906 | dport, &queued)) { |
@@ -910,7 +915,7 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, | |||
910 | goto out; | 915 | goto out; |
911 | } | 916 | } |
912 | 917 | ||
913 | timeo = wait_event_interruptible_timeout(*sk->sk_sleep, | 918 | timeo = wait_event_interruptible_timeout(*sk_sleep(sk), |
914 | rds_send_queue_rm(rs, conn, rm, | 919 | rds_send_queue_rm(rs, conn, rm, |
915 | rs->rs_bound_port, | 920 | rs->rs_bound_port, |
916 | dport, | 921 | dport, |
diff --git a/net/rds/tcp.c b/net/rds/tcp.c index b5198aee45d3..babf4577ff7d 100644 --- a/net/rds/tcp.c +++ b/net/rds/tcp.c | |||
@@ -31,6 +31,7 @@ | |||
31 | * | 31 | * |
32 | */ | 32 | */ |
33 | #include <linux/kernel.h> | 33 | #include <linux/kernel.h> |
34 | #include <linux/slab.h> | ||
34 | #include <linux/in.h> | 35 | #include <linux/in.h> |
35 | #include <net/tcp.h> | 36 | #include <net/tcp.h> |
36 | 37 | ||
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c index 53cb1b54165d..975183fe6950 100644 --- a/net/rds/tcp_listen.c +++ b/net/rds/tcp_listen.c | |||
@@ -31,6 +31,7 @@ | |||
31 | * | 31 | * |
32 | */ | 32 | */ |
33 | #include <linux/kernel.h> | 33 | #include <linux/kernel.h> |
34 | #include <linux/gfp.h> | ||
34 | #include <linux/in.h> | 35 | #include <linux/in.h> |
35 | #include <net/tcp.h> | 36 | #include <net/tcp.h> |
36 | 37 | ||
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c index c00dafffbb5a..1aba6878fa5d 100644 --- a/net/rds/tcp_recv.c +++ b/net/rds/tcp_recv.c | |||
@@ -31,6 +31,7 @@ | |||
31 | * | 31 | * |
32 | */ | 32 | */ |
33 | #include <linux/kernel.h> | 33 | #include <linux/kernel.h> |
34 | #include <linux/slab.h> | ||
34 | #include <net/tcp.h> | 35 | #include <net/tcp.h> |
35 | 36 | ||
36 | #include "rds.h" | 37 | #include "rds.h" |
@@ -97,6 +98,7 @@ int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov, | |||
97 | goto out; | 98 | goto out; |
98 | } | 99 | } |
99 | 100 | ||
101 | rds_stats_add(s_copy_to_user, to_copy); | ||
100 | size -= to_copy; | 102 | size -= to_copy; |
101 | ret += to_copy; | 103 | ret += to_copy; |
102 | skb_off += to_copy; | 104 | skb_off += to_copy; |
diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c index 34fdcc059e54..a28b895ff0d1 100644 --- a/net/rds/tcp_send.c +++ b/net/rds/tcp_send.c | |||
@@ -240,7 +240,9 @@ void rds_tcp_write_space(struct sock *sk) | |||
240 | tc->t_last_seen_una = rds_tcp_snd_una(tc); | 240 | tc->t_last_seen_una = rds_tcp_snd_una(tc); |
241 | rds_send_drop_acked(conn, rds_tcp_snd_una(tc), rds_tcp_is_acked); | 241 | rds_send_drop_acked(conn, rds_tcp_snd_una(tc), rds_tcp_is_acked); |
242 | 242 | ||
243 | queue_delayed_work(rds_wq, &conn->c_send_w, 0); | 243 | if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) |
244 | queue_delayed_work(rds_wq, &conn->c_send_w, 0); | ||
245 | |||
244 | out: | 246 | out: |
245 | read_unlock(&sk->sk_callback_lock); | 247 | read_unlock(&sk->sk_callback_lock); |
246 | 248 | ||
diff --git a/net/rds/threads.c b/net/rds/threads.c index 00fa10e59af8..786c20eaaf5e 100644 --- a/net/rds/threads.c +++ b/net/rds/threads.c | |||
@@ -259,7 +259,7 @@ void rds_threads_exit(void) | |||
259 | 259 | ||
260 | int __init rds_threads_init(void) | 260 | int __init rds_threads_init(void) |
261 | { | 261 | { |
262 | rds_wq = create_singlethread_workqueue("krdsd"); | 262 | rds_wq = create_workqueue("krdsd"); |
263 | if (rds_wq == NULL) | 263 | if (rds_wq == NULL) |
264 | return -ENOMEM; | 264 | return -ENOMEM; |
265 | 265 | ||
diff --git a/net/rfkill/core.c b/net/rfkill/core.c index 7ae58b5b5a08..51875a0c5d48 100644 --- a/net/rfkill/core.c +++ b/net/rfkill/core.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/wait.h> | 33 | #include <linux/wait.h> |
34 | #include <linux/poll.h> | 34 | #include <linux/poll.h> |
35 | #include <linux/fs.h> | 35 | #include <linux/fs.h> |
36 | #include <linux/slab.h> | ||
36 | 37 | ||
37 | #include "rfkill.h" | 38 | #include "rfkill.h" |
38 | 39 | ||
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index e90b9b6c16ae..8e45e76a95f5 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/types.h> | 18 | #include <linux/types.h> |
19 | #include <linux/socket.h> | 19 | #include <linux/socket.h> |
20 | #include <linux/in.h> | 20 | #include <linux/in.h> |
21 | #include <linux/slab.h> | ||
21 | #include <linux/kernel.h> | 22 | #include <linux/kernel.h> |
22 | #include <linux/sched.h> | 23 | #include <linux/sched.h> |
23 | #include <linux/spinlock.h> | 24 | #include <linux/spinlock.h> |
@@ -844,7 +845,7 @@ rose_try_next_neigh: | |||
844 | DEFINE_WAIT(wait); | 845 | DEFINE_WAIT(wait); |
845 | 846 | ||
846 | for (;;) { | 847 | for (;;) { |
847 | prepare_to_wait(sk->sk_sleep, &wait, | 848 | prepare_to_wait(sk_sleep(sk), &wait, |
848 | TASK_INTERRUPTIBLE); | 849 | TASK_INTERRUPTIBLE); |
849 | if (sk->sk_state != TCP_SYN_SENT) | 850 | if (sk->sk_state != TCP_SYN_SENT) |
850 | break; | 851 | break; |
@@ -857,7 +858,7 @@ rose_try_next_neigh: | |||
857 | err = -ERESTARTSYS; | 858 | err = -ERESTARTSYS; |
858 | break; | 859 | break; |
859 | } | 860 | } |
860 | finish_wait(sk->sk_sleep, &wait); | 861 | finish_wait(sk_sleep(sk), &wait); |
861 | 862 | ||
862 | if (err) | 863 | if (err) |
863 | goto out_release; | 864 | goto out_release; |
@@ -910,7 +911,7 @@ static int rose_accept(struct socket *sock, struct socket *newsock, int flags) | |||
910 | * hooked into the SABM we saved | 911 | * hooked into the SABM we saved |
911 | */ | 912 | */ |
912 | for (;;) { | 913 | for (;;) { |
913 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 914 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
914 | 915 | ||
915 | skb = skb_dequeue(&sk->sk_receive_queue); | 916 | skb = skb_dequeue(&sk->sk_receive_queue); |
916 | if (skb) | 917 | if (skb) |
@@ -929,7 +930,7 @@ static int rose_accept(struct socket *sock, struct socket *newsock, int flags) | |||
929 | err = -ERESTARTSYS; | 930 | err = -ERESTARTSYS; |
930 | break; | 931 | break; |
931 | } | 932 | } |
932 | finish_wait(sk->sk_sleep, &wait); | 933 | finish_wait(sk_sleep(sk), &wait); |
933 | if (err) | 934 | if (err) |
934 | goto out_release; | 935 | goto out_release; |
935 | 936 | ||
diff --git a/net/rose/rose_dev.c b/net/rose/rose_dev.c index 424b893d1450..178ff4f73c85 100644 --- a/net/rose/rose_dev.c +++ b/net/rose/rose_dev.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/fcntl.h> | 19 | #include <linux/fcntl.h> |
20 | #include <linux/in.h> | 20 | #include <linux/in.h> |
21 | #include <linux/if_ether.h> | 21 | #include <linux/if_ether.h> |
22 | #include <linux/slab.h> | ||
22 | 23 | ||
23 | #include <asm/system.h> | 24 | #include <asm/system.h> |
24 | #include <asm/io.h> | 25 | #include <asm/io.h> |
diff --git a/net/rose/rose_link.c b/net/rose/rose_link.c index 5ef5f6988a2e..a750a28e0221 100644 --- a/net/rose/rose_link.c +++ b/net/rose/rose_link.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/string.h> | 16 | #include <linux/string.h> |
17 | #include <linux/sockios.h> | 17 | #include <linux/sockios.h> |
18 | #include <linux/net.h> | 18 | #include <linux/net.h> |
19 | #include <linux/slab.h> | ||
19 | #include <net/ax25.h> | 20 | #include <net/ax25.h> |
20 | #include <linux/inet.h> | 21 | #include <linux/inet.h> |
21 | #include <linux/netdevice.h> | 22 | #include <linux/netdevice.h> |
diff --git a/net/rose/rose_loopback.c b/net/rose/rose_loopback.c index 968e8bac1b5d..ae4a9d99aec7 100644 --- a/net/rose/rose_loopback.c +++ b/net/rose/rose_loopback.c | |||
@@ -7,6 +7,7 @@ | |||
7 | * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) | 7 | * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) |
8 | */ | 8 | */ |
9 | #include <linux/types.h> | 9 | #include <linux/types.h> |
10 | #include <linux/slab.h> | ||
10 | #include <linux/socket.h> | 11 | #include <linux/socket.h> |
11 | #include <linux/timer.h> | 12 | #include <linux/timer.h> |
12 | #include <net/ax25.h> | 13 | #include <net/ax25.h> |
diff --git a/net/rose/rose_out.c b/net/rose/rose_out.c index 69820f93414b..4ebf33afbe47 100644 --- a/net/rose/rose_out.c +++ b/net/rose/rose_out.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/string.h> | 15 | #include <linux/string.h> |
16 | #include <linux/sockios.h> | 16 | #include <linux/sockios.h> |
17 | #include <linux/net.h> | 17 | #include <linux/net.h> |
18 | #include <linux/gfp.h> | ||
18 | #include <net/ax25.h> | 19 | #include <net/ax25.h> |
19 | #include <linux/inet.h> | 20 | #include <linux/inet.h> |
20 | #include <linux/netdevice.h> | 21 | #include <linux/netdevice.h> |
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c index 70a0b3b4b4d2..cbc244a128bd 100644 --- a/net/rose/rose_route.c +++ b/net/rose/rose_route.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/string.h> | 16 | #include <linux/string.h> |
17 | #include <linux/sockios.h> | 17 | #include <linux/sockios.h> |
18 | #include <linux/net.h> | 18 | #include <linux/net.h> |
19 | #include <linux/slab.h> | ||
19 | #include <net/ax25.h> | 20 | #include <net/ax25.h> |
20 | #include <linux/inet.h> | 21 | #include <linux/inet.h> |
21 | #include <linux/netdevice.h> | 22 | #include <linux/netdevice.h> |
diff --git a/net/rose/rose_subr.c b/net/rose/rose_subr.c index b05108f382da..1734abba26a2 100644 --- a/net/rose/rose_subr.c +++ b/net/rose/rose_subr.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/string.h> | 15 | #include <linux/string.h> |
16 | #include <linux/sockios.h> | 16 | #include <linux/sockios.h> |
17 | #include <linux/net.h> | 17 | #include <linux/net.h> |
18 | #include <linux/slab.h> | ||
18 | #include <net/ax25.h> | 19 | #include <net/ax25.h> |
19 | #include <linux/inet.h> | 20 | #include <linux/inet.h> |
20 | #include <linux/netdevice.h> | 21 | #include <linux/netdevice.h> |
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index 287b1415cee9..0b9bb2085ce4 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c | |||
@@ -11,6 +11,7 @@ | |||
11 | 11 | ||
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/net.h> | 13 | #include <linux/net.h> |
14 | #include <linux/slab.h> | ||
14 | #include <linux/skbuff.h> | 15 | #include <linux/skbuff.h> |
15 | #include <linux/poll.h> | 16 | #include <linux/poll.h> |
16 | #include <linux/proc_fs.h> | 17 | #include <linux/proc_fs.h> |
@@ -61,13 +62,15 @@ static inline int rxrpc_writable(struct sock *sk) | |||
61 | static void rxrpc_write_space(struct sock *sk) | 62 | static void rxrpc_write_space(struct sock *sk) |
62 | { | 63 | { |
63 | _enter("%p", sk); | 64 | _enter("%p", sk); |
64 | read_lock(&sk->sk_callback_lock); | 65 | rcu_read_lock(); |
65 | if (rxrpc_writable(sk)) { | 66 | if (rxrpc_writable(sk)) { |
66 | if (sk_has_sleeper(sk)) | 67 | struct socket_wq *wq = rcu_dereference(sk->sk_wq); |
67 | wake_up_interruptible(sk->sk_sleep); | 68 | |
69 | if (wq_has_sleeper(wq)) | ||
70 | wake_up_interruptible(&wq->wait); | ||
68 | sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); | 71 | sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); |
69 | } | 72 | } |
70 | read_unlock(&sk->sk_callback_lock); | 73 | rcu_read_unlock(); |
71 | } | 74 | } |
72 | 75 | ||
73 | /* | 76 | /* |
@@ -588,7 +591,7 @@ static unsigned int rxrpc_poll(struct file *file, struct socket *sock, | |||
588 | unsigned int mask; | 591 | unsigned int mask; |
589 | struct sock *sk = sock->sk; | 592 | struct sock *sk = sock->sk; |
590 | 593 | ||
591 | sock_poll_wait(file, sk->sk_sleep, wait); | 594 | sock_poll_wait(file, sk_sleep(sk), wait); |
592 | mask = 0; | 595 | mask = 0; |
593 | 596 | ||
594 | /* the socket is readable if there are any messages waiting on the Rx | 597 | /* the socket is readable if there are any messages waiting on the Rx |
diff --git a/net/rxrpc/ar-accept.c b/net/rxrpc/ar-accept.c index 77228f28fa36..6d79310fcaae 100644 --- a/net/rxrpc/ar-accept.c +++ b/net/rxrpc/ar-accept.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/in.h> | 17 | #include <linux/in.h> |
18 | #include <linux/in6.h> | 18 | #include <linux/in6.h> |
19 | #include <linux/icmp.h> | 19 | #include <linux/icmp.h> |
20 | #include <linux/gfp.h> | ||
20 | #include <net/sock.h> | 21 | #include <net/sock.h> |
21 | #include <net/af_rxrpc.h> | 22 | #include <net/af_rxrpc.h> |
22 | #include <net/ip.h> | 23 | #include <net/ip.h> |
@@ -88,6 +89,11 @@ static int rxrpc_accept_incoming_call(struct rxrpc_local *local, | |||
88 | 89 | ||
89 | /* get a notification message to send to the server app */ | 90 | /* get a notification message to send to the server app */ |
90 | notification = alloc_skb(0, GFP_NOFS); | 91 | notification = alloc_skb(0, GFP_NOFS); |
92 | if (!notification) { | ||
93 | _debug("no memory"); | ||
94 | ret = -ENOMEM; | ||
95 | goto error_nofree; | ||
96 | } | ||
91 | rxrpc_new_skb(notification); | 97 | rxrpc_new_skb(notification); |
92 | notification->mark = RXRPC_SKB_MARK_NEW_CALL; | 98 | notification->mark = RXRPC_SKB_MARK_NEW_CALL; |
93 | 99 | ||
@@ -189,6 +195,7 @@ invalid_service: | |||
189 | ret = -ECONNREFUSED; | 195 | ret = -ECONNREFUSED; |
190 | error: | 196 | error: |
191 | rxrpc_free_skb(notification); | 197 | rxrpc_free_skb(notification); |
198 | error_nofree: | ||
192 | _leave(" = %d", ret); | 199 | _leave(" = %d", ret); |
193 | return ret; | 200 | return ret; |
194 | } | 201 | } |
diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c index b4a220977031..2714da167fb8 100644 --- a/net/rxrpc/ar-ack.c +++ b/net/rxrpc/ar-ack.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/circ_buf.h> | 13 | #include <linux/circ_buf.h> |
14 | #include <linux/net.h> | 14 | #include <linux/net.h> |
15 | #include <linux/skbuff.h> | 15 | #include <linux/skbuff.h> |
16 | #include <linux/slab.h> | ||
16 | #include <linux/udp.h> | 17 | #include <linux/udp.h> |
17 | #include <net/sock.h> | 18 | #include <net/sock.h> |
18 | #include <net/af_rxrpc.h> | 19 | #include <net/af_rxrpc.h> |
diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c index bc0019f704fe..909d092de9f4 100644 --- a/net/rxrpc/ar-call.c +++ b/net/rxrpc/ar-call.c | |||
@@ -9,6 +9,7 @@ | |||
9 | * 2 of the License, or (at your option) any later version. | 9 | * 2 of the License, or (at your option) any later version. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/slab.h> | ||
12 | #include <linux/module.h> | 13 | #include <linux/module.h> |
13 | #include <linux/circ_buf.h> | 14 | #include <linux/circ_buf.h> |
14 | #include <net/sock.h> | 15 | #include <net/sock.h> |
diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c index 9f1ce841a0bb..4106ca95ec86 100644 --- a/net/rxrpc/ar-connection.c +++ b/net/rxrpc/ar-connection.c | |||
@@ -10,6 +10,7 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/slab.h> | ||
13 | #include <linux/net.h> | 14 | #include <linux/net.h> |
14 | #include <linux/skbuff.h> | 15 | #include <linux/skbuff.h> |
15 | #include <linux/crypto.h> | 16 | #include <linux/crypto.h> |
diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c index f98c8027e5c1..89315009bab1 100644 --- a/net/rxrpc/ar-input.c +++ b/net/rxrpc/ar-input.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/in.h> | 17 | #include <linux/in.h> |
18 | #include <linux/in6.h> | 18 | #include <linux/in6.h> |
19 | #include <linux/icmp.h> | 19 | #include <linux/icmp.h> |
20 | #include <linux/gfp.h> | ||
20 | #include <net/sock.h> | 21 | #include <net/sock.h> |
21 | #include <net/af_rxrpc.h> | 22 | #include <net/af_rxrpc.h> |
22 | #include <net/ip.h> | 23 | #include <net/ip.h> |
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c index 74697b200496..5ee16f0353fe 100644 --- a/net/rxrpc/ar-key.c +++ b/net/rxrpc/ar-key.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/key-type.h> | 18 | #include <linux/key-type.h> |
19 | #include <linux/crypto.h> | 19 | #include <linux/crypto.h> |
20 | #include <linux/ctype.h> | 20 | #include <linux/ctype.h> |
21 | #include <linux/slab.h> | ||
21 | #include <net/sock.h> | 22 | #include <net/sock.h> |
22 | #include <net/af_rxrpc.h> | 23 | #include <net/af_rxrpc.h> |
23 | #include <keys/rxrpc-type.h> | 24 | #include <keys/rxrpc-type.h> |
diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c index 807535ff29b5..87f7135d238b 100644 --- a/net/rxrpc/ar-local.c +++ b/net/rxrpc/ar-local.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/net.h> | 13 | #include <linux/net.h> |
14 | #include <linux/skbuff.h> | 14 | #include <linux/skbuff.h> |
15 | #include <linux/slab.h> | ||
15 | #include <net/sock.h> | 16 | #include <net/sock.h> |
16 | #include <net/af_rxrpc.h> | 17 | #include <net/af_rxrpc.h> |
17 | #include "ar-internal.h" | 18 | #include "ar-internal.h" |
diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c index cc9102c5b588..5f22e263eda7 100644 --- a/net/rxrpc/ar-output.c +++ b/net/rxrpc/ar-output.c | |||
@@ -10,6 +10,7 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/net.h> | 12 | #include <linux/net.h> |
13 | #include <linux/gfp.h> | ||
13 | #include <linux/skbuff.h> | 14 | #include <linux/skbuff.h> |
14 | #include <linux/circ_buf.h> | 15 | #include <linux/circ_buf.h> |
15 | #include <net/sock.h> | 16 | #include <net/sock.h> |
diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c index edc026c1eb76..f0f85b0123f7 100644 --- a/net/rxrpc/ar-peer.c +++ b/net/rxrpc/ar-peer.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/in.h> | 16 | #include <linux/in.h> |
17 | #include <linux/in6.h> | 17 | #include <linux/in6.h> |
18 | #include <linux/icmp.h> | 18 | #include <linux/icmp.h> |
19 | #include <linux/slab.h> | ||
19 | #include <net/sock.h> | 20 | #include <net/sock.h> |
20 | #include <net/af_rxrpc.h> | 21 | #include <net/af_rxrpc.h> |
21 | #include <net/ip.h> | 22 | #include <net/ip.h> |
diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c index 60c2b94e6b54..0c65013e3bfe 100644 --- a/net/rxrpc/ar-recvmsg.c +++ b/net/rxrpc/ar-recvmsg.c | |||
@@ -91,7 +91,7 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
91 | 91 | ||
92 | /* wait for a message to turn up */ | 92 | /* wait for a message to turn up */ |
93 | release_sock(&rx->sk); | 93 | release_sock(&rx->sk); |
94 | prepare_to_wait_exclusive(rx->sk.sk_sleep, &wait, | 94 | prepare_to_wait_exclusive(sk_sleep(&rx->sk), &wait, |
95 | TASK_INTERRUPTIBLE); | 95 | TASK_INTERRUPTIBLE); |
96 | ret = sock_error(&rx->sk); | 96 | ret = sock_error(&rx->sk); |
97 | if (ret) | 97 | if (ret) |
@@ -102,7 +102,7 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
102 | goto wait_interrupted; | 102 | goto wait_interrupted; |
103 | timeo = schedule_timeout(timeo); | 103 | timeo = schedule_timeout(timeo); |
104 | } | 104 | } |
105 | finish_wait(rx->sk.sk_sleep, &wait); | 105 | finish_wait(sk_sleep(&rx->sk), &wait); |
106 | lock_sock(&rx->sk); | 106 | lock_sock(&rx->sk); |
107 | continue; | 107 | continue; |
108 | } | 108 | } |
@@ -356,7 +356,7 @@ csum_copy_error: | |||
356 | wait_interrupted: | 356 | wait_interrupted: |
357 | ret = sock_intr_errno(timeo); | 357 | ret = sock_intr_errno(timeo); |
358 | wait_error: | 358 | wait_error: |
359 | finish_wait(rx->sk.sk_sleep, &wait); | 359 | finish_wait(sk_sleep(&rx->sk), &wait); |
360 | if (continue_call) | 360 | if (continue_call) |
361 | rxrpc_put_call(continue_call); | 361 | rxrpc_put_call(continue_call); |
362 | if (copied) | 362 | if (copied) |
diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c index 0936e1acc30e..5e0226fe587e 100644 --- a/net/rxrpc/ar-transport.c +++ b/net/rxrpc/ar-transport.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/net.h> | 13 | #include <linux/net.h> |
14 | #include <linux/skbuff.h> | 14 | #include <linux/skbuff.h> |
15 | #include <linux/slab.h> | ||
15 | #include <net/sock.h> | 16 | #include <net/sock.h> |
16 | #include <net/af_rxrpc.h> | 17 | #include <net/af_rxrpc.h> |
17 | #include "ar-internal.h" | 18 | #include "ar-internal.h" |
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c index 713ac593e2e9..7635107726ce 100644 --- a/net/rxrpc/rxkad.c +++ b/net/rxrpc/rxkad.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/crypto.h> | 16 | #include <linux/crypto.h> |
17 | #include <linux/scatterlist.h> | 17 | #include <linux/scatterlist.h> |
18 | #include <linux/ctype.h> | 18 | #include <linux/ctype.h> |
19 | #include <linux/slab.h> | ||
19 | #include <net/sock.h> | 20 | #include <net/sock.h> |
20 | #include <net/af_rxrpc.h> | 21 | #include <net/af_rxrpc.h> |
21 | #include <keys/rxrpc-type.h> | 22 | #include <keys/rxrpc-type.h> |
diff --git a/net/sched/Kconfig b/net/sched/Kconfig index 21f9c7678aa3..2f691fb180d1 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig | |||
@@ -328,13 +328,16 @@ config NET_CLS_FLOW | |||
328 | module will be called cls_flow. | 328 | module will be called cls_flow. |
329 | 329 | ||
330 | config NET_CLS_CGROUP | 330 | config NET_CLS_CGROUP |
331 | bool "Control Group Classifier" | 331 | tristate "Control Group Classifier" |
332 | select NET_CLS | 332 | select NET_CLS |
333 | depends on CGROUPS | 333 | depends on CGROUPS |
334 | ---help--- | 334 | ---help--- |
335 | Say Y here if you want to classify packets based on the control | 335 | Say Y here if you want to classify packets based on the control |
336 | cgroup of their process. | 336 | cgroup of their process. |
337 | 337 | ||
338 | To compile this code as a module, choose M here: the | ||
339 | module will be called cls_cgroup. | ||
340 | |||
338 | config NET_EMATCH | 341 | config NET_EMATCH |
339 | bool "Extended Matches" | 342 | bool "Extended Matches" |
340 | select NET_CLS | 343 | select NET_CLS |
diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 64f5e328cee9..019045174fc3 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
16 | #include <linux/string.h> | 16 | #include <linux/string.h> |
17 | #include <linux/errno.h> | 17 | #include <linux/errno.h> |
18 | #include <linux/slab.h> | ||
18 | #include <linux/skbuff.h> | 19 | #include <linux/skbuff.h> |
19 | #include <linux/init.h> | 20 | #include <linux/init.h> |
20 | #include <linux/kmod.h> | 21 | #include <linux/kmod.h> |
@@ -667,7 +668,8 @@ nlmsg_failure: | |||
667 | } | 668 | } |
668 | 669 | ||
669 | static int | 670 | static int |
670 | act_get_notify(u32 pid, struct nlmsghdr *n, struct tc_action *a, int event) | 671 | act_get_notify(struct net *net, u32 pid, struct nlmsghdr *n, |
672 | struct tc_action *a, int event) | ||
671 | { | 673 | { |
672 | struct sk_buff *skb; | 674 | struct sk_buff *skb; |
673 | 675 | ||
@@ -679,7 +681,7 @@ act_get_notify(u32 pid, struct nlmsghdr *n, struct tc_action *a, int event) | |||
679 | return -EINVAL; | 681 | return -EINVAL; |
680 | } | 682 | } |
681 | 683 | ||
682 | return rtnl_unicast(skb, &init_net, pid); | 684 | return rtnl_unicast(skb, net, pid); |
683 | } | 685 | } |
684 | 686 | ||
685 | static struct tc_action * | 687 | static struct tc_action * |
@@ -749,7 +751,8 @@ static struct tc_action *create_a(int i) | |||
749 | return act; | 751 | return act; |
750 | } | 752 | } |
751 | 753 | ||
752 | static int tca_action_flush(struct nlattr *nla, struct nlmsghdr *n, u32 pid) | 754 | static int tca_action_flush(struct net *net, struct nlattr *nla, |
755 | struct nlmsghdr *n, u32 pid) | ||
753 | { | 756 | { |
754 | struct sk_buff *skb; | 757 | struct sk_buff *skb; |
755 | unsigned char *b; | 758 | unsigned char *b; |
@@ -808,7 +811,7 @@ static int tca_action_flush(struct nlattr *nla, struct nlmsghdr *n, u32 pid) | |||
808 | nlh->nlmsg_flags |= NLM_F_ROOT; | 811 | nlh->nlmsg_flags |= NLM_F_ROOT; |
809 | module_put(a->ops->owner); | 812 | module_put(a->ops->owner); |
810 | kfree(a); | 813 | kfree(a); |
811 | err = rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO); | 814 | err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO); |
812 | if (err > 0) | 815 | if (err > 0) |
813 | return 0; | 816 | return 0; |
814 | 817 | ||
@@ -825,7 +828,8 @@ noflush_out: | |||
825 | } | 828 | } |
826 | 829 | ||
827 | static int | 830 | static int |
828 | tca_action_gd(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int event) | 831 | tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n, |
832 | u32 pid, int event) | ||
829 | { | 833 | { |
830 | int i, ret; | 834 | int i, ret; |
831 | struct nlattr *tb[TCA_ACT_MAX_PRIO+1]; | 835 | struct nlattr *tb[TCA_ACT_MAX_PRIO+1]; |
@@ -837,7 +841,7 @@ tca_action_gd(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int event) | |||
837 | 841 | ||
838 | if (event == RTM_DELACTION && n->nlmsg_flags&NLM_F_ROOT) { | 842 | if (event == RTM_DELACTION && n->nlmsg_flags&NLM_F_ROOT) { |
839 | if (tb[1] != NULL) | 843 | if (tb[1] != NULL) |
840 | return tca_action_flush(tb[1], n, pid); | 844 | return tca_action_flush(net, tb[1], n, pid); |
841 | else | 845 | else |
842 | return -EINVAL; | 846 | return -EINVAL; |
843 | } | 847 | } |
@@ -858,7 +862,7 @@ tca_action_gd(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int event) | |||
858 | } | 862 | } |
859 | 863 | ||
860 | if (event == RTM_GETACTION) | 864 | if (event == RTM_GETACTION) |
861 | ret = act_get_notify(pid, n, head, event); | 865 | ret = act_get_notify(net, pid, n, head, event); |
862 | else { /* delete */ | 866 | else { /* delete */ |
863 | struct sk_buff *skb; | 867 | struct sk_buff *skb; |
864 | 868 | ||
@@ -877,7 +881,7 @@ tca_action_gd(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int event) | |||
877 | 881 | ||
878 | /* now do the delete */ | 882 | /* now do the delete */ |
879 | tcf_action_destroy(head, 0); | 883 | tcf_action_destroy(head, 0); |
880 | ret = rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, | 884 | ret = rtnetlink_send(skb, net, pid, RTNLGRP_TC, |
881 | n->nlmsg_flags&NLM_F_ECHO); | 885 | n->nlmsg_flags&NLM_F_ECHO); |
882 | if (ret > 0) | 886 | if (ret > 0) |
883 | return 0; | 887 | return 0; |
@@ -888,8 +892,8 @@ err: | |||
888 | return ret; | 892 | return ret; |
889 | } | 893 | } |
890 | 894 | ||
891 | static int tcf_add_notify(struct tc_action *a, u32 pid, u32 seq, int event, | 895 | static int tcf_add_notify(struct net *net, struct tc_action *a, |
892 | u16 flags) | 896 | u32 pid, u32 seq, int event, u16 flags) |
893 | { | 897 | { |
894 | struct tcamsg *t; | 898 | struct tcamsg *t; |
895 | struct nlmsghdr *nlh; | 899 | struct nlmsghdr *nlh; |
@@ -922,7 +926,7 @@ static int tcf_add_notify(struct tc_action *a, u32 pid, u32 seq, int event, | |||
922 | nlh->nlmsg_len = skb_tail_pointer(skb) - b; | 926 | nlh->nlmsg_len = skb_tail_pointer(skb) - b; |
923 | NETLINK_CB(skb).dst_group = RTNLGRP_TC; | 927 | NETLINK_CB(skb).dst_group = RTNLGRP_TC; |
924 | 928 | ||
925 | err = rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, flags&NLM_F_ECHO); | 929 | err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, flags&NLM_F_ECHO); |
926 | if (err > 0) | 930 | if (err > 0) |
927 | err = 0; | 931 | err = 0; |
928 | return err; | 932 | return err; |
@@ -935,7 +939,8 @@ nlmsg_failure: | |||
935 | 939 | ||
936 | 940 | ||
937 | static int | 941 | static int |
938 | tcf_action_add(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int ovr) | 942 | tcf_action_add(struct net *net, struct nlattr *nla, struct nlmsghdr *n, |
943 | u32 pid, int ovr) | ||
939 | { | 944 | { |
940 | int ret = 0; | 945 | int ret = 0; |
941 | struct tc_action *act; | 946 | struct tc_action *act; |
@@ -953,7 +958,7 @@ tcf_action_add(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int ovr) | |||
953 | /* dump then free all the actions after update; inserted policy | 958 | /* dump then free all the actions after update; inserted policy |
954 | * stays intact | 959 | * stays intact |
955 | * */ | 960 | * */ |
956 | ret = tcf_add_notify(act, pid, seq, RTM_NEWACTION, n->nlmsg_flags); | 961 | ret = tcf_add_notify(net, act, pid, seq, RTM_NEWACTION, n->nlmsg_flags); |
957 | for (a = act; a; a = act) { | 962 | for (a = act; a; a = act) { |
958 | act = a->next; | 963 | act = a->next; |
959 | kfree(a); | 964 | kfree(a); |
@@ -969,9 +974,6 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
969 | u32 pid = skb ? NETLINK_CB(skb).pid : 0; | 974 | u32 pid = skb ? NETLINK_CB(skb).pid : 0; |
970 | int ret = 0, ovr = 0; | 975 | int ret = 0, ovr = 0; |
971 | 976 | ||
972 | if (!net_eq(net, &init_net)) | ||
973 | return -EINVAL; | ||
974 | |||
975 | ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL); | 977 | ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL); |
976 | if (ret < 0) | 978 | if (ret < 0) |
977 | return ret; | 979 | return ret; |
@@ -994,15 +996,17 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
994 | if (n->nlmsg_flags&NLM_F_REPLACE) | 996 | if (n->nlmsg_flags&NLM_F_REPLACE) |
995 | ovr = 1; | 997 | ovr = 1; |
996 | replay: | 998 | replay: |
997 | ret = tcf_action_add(tca[TCA_ACT_TAB], n, pid, ovr); | 999 | ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, pid, ovr); |
998 | if (ret == -EAGAIN) | 1000 | if (ret == -EAGAIN) |
999 | goto replay; | 1001 | goto replay; |
1000 | break; | 1002 | break; |
1001 | case RTM_DELACTION: | 1003 | case RTM_DELACTION: |
1002 | ret = tca_action_gd(tca[TCA_ACT_TAB], n, pid, RTM_DELACTION); | 1004 | ret = tca_action_gd(net, tca[TCA_ACT_TAB], n, |
1005 | pid, RTM_DELACTION); | ||
1003 | break; | 1006 | break; |
1004 | case RTM_GETACTION: | 1007 | case RTM_GETACTION: |
1005 | ret = tca_action_gd(tca[TCA_ACT_TAB], n, pid, RTM_GETACTION); | 1008 | ret = tca_action_gd(net, tca[TCA_ACT_TAB], n, |
1009 | pid, RTM_GETACTION); | ||
1006 | break; | 1010 | break; |
1007 | default: | 1011 | default: |
1008 | BUG(); | 1012 | BUG(); |
@@ -1042,7 +1046,6 @@ find_dump_kind(const struct nlmsghdr *n) | |||
1042 | static int | 1046 | static int |
1043 | tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) | 1047 | tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) |
1044 | { | 1048 | { |
1045 | struct net *net = sock_net(skb->sk); | ||
1046 | struct nlmsghdr *nlh; | 1049 | struct nlmsghdr *nlh; |
1047 | unsigned char *b = skb_tail_pointer(skb); | 1050 | unsigned char *b = skb_tail_pointer(skb); |
1048 | struct nlattr *nest; | 1051 | struct nlattr *nest; |
@@ -1052,9 +1055,6 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) | |||
1052 | struct tcamsg *t = (struct tcamsg *) NLMSG_DATA(cb->nlh); | 1055 | struct tcamsg *t = (struct tcamsg *) NLMSG_DATA(cb->nlh); |
1053 | struct nlattr *kind = find_dump_kind(cb->nlh); | 1056 | struct nlattr *kind = find_dump_kind(cb->nlh); |
1054 | 1057 | ||
1055 | if (!net_eq(net, &init_net)) | ||
1056 | return 0; | ||
1057 | |||
1058 | if (kind == NULL) { | 1058 | if (kind == NULL) { |
1059 | printk("tc_dump_action: action bad kind\n"); | 1059 | printk("tc_dump_action: action bad kind\n"); |
1060 | return 0; | 1060 | return 0; |
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 082c520b0def..da27a170b6b7 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/rtnetlink.h> | 19 | #include <linux/rtnetlink.h> |
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/init.h> | 21 | #include <linux/init.h> |
22 | #include <linux/slab.h> | ||
22 | #include <net/netlink.h> | 23 | #include <net/netlink.h> |
23 | #include <net/pkt_sched.h> | 24 | #include <net/pkt_sched.h> |
24 | #include <linux/tc_act/tc_ipt.h> | 25 | #include <linux/tc_act/tc_ipt.h> |
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index d329170243cb..c046682054eb 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/rtnetlink.h> | 20 | #include <linux/rtnetlink.h> |
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <linux/init.h> | 22 | #include <linux/init.h> |
23 | #include <linux/gfp.h> | ||
23 | #include <net/net_namespace.h> | 24 | #include <net/net_namespace.h> |
24 | #include <net/netlink.h> | 25 | #include <net/netlink.h> |
25 | #include <net/pkt_sched.h> | 26 | #include <net/pkt_sched.h> |
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 6b0359a500e6..b7dcfedc802e 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/rtnetlink.h> | 17 | #include <linux/rtnetlink.h> |
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
20 | #include <linux/slab.h> | ||
20 | #include <net/netlink.h> | 21 | #include <net/netlink.h> |
21 | #include <net/pkt_sched.h> | 22 | #include <net/pkt_sched.h> |
22 | #include <linux/tc_act/tc_pedit.h> | 23 | #include <linux/tc_act/tc_pedit.h> |
diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 723964c3ee4f..654f73dff7c1 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/skbuff.h> | 18 | #include <linux/skbuff.h> |
19 | #include <linux/rtnetlink.h> | 19 | #include <linux/rtnetlink.h> |
20 | #include <linux/init.h> | 20 | #include <linux/init.h> |
21 | #include <linux/slab.h> | ||
21 | #include <net/act_api.h> | 22 | #include <net/act_api.h> |
22 | #include <net/netlink.h> | 23 | #include <net/netlink.h> |
23 | 24 | ||
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index 8daa1ebc7413..622ca809c15c 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c | |||
@@ -11,6 +11,7 @@ | |||
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/slab.h> | ||
14 | #include <linux/init.h> | 15 | #include <linux/init.h> |
15 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
16 | #include <linux/skbuff.h> | 17 | #include <linux/skbuff.h> |
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 3725d8fa29db..5fd0c28ef79a 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/kmod.h> | 24 | #include <linux/kmod.h> |
25 | #include <linux/netlink.h> | 25 | #include <linux/netlink.h> |
26 | #include <linux/err.h> | 26 | #include <linux/err.h> |
27 | #include <linux/slab.h> | ||
27 | #include <net/net_namespace.h> | 28 | #include <net/net_namespace.h> |
28 | #include <net/sock.h> | 29 | #include <net/sock.h> |
29 | #include <net/netlink.h> | 30 | #include <net/netlink.h> |
@@ -98,8 +99,9 @@ out: | |||
98 | } | 99 | } |
99 | EXPORT_SYMBOL(unregister_tcf_proto_ops); | 100 | EXPORT_SYMBOL(unregister_tcf_proto_ops); |
100 | 101 | ||
101 | static int tfilter_notify(struct sk_buff *oskb, struct nlmsghdr *n, | 102 | static int tfilter_notify(struct net *net, struct sk_buff *oskb, |
102 | struct tcf_proto *tp, unsigned long fh, int event); | 103 | struct nlmsghdr *n, struct tcf_proto *tp, |
104 | unsigned long fh, int event); | ||
103 | 105 | ||
104 | 106 | ||
105 | /* Select new prio value from the range, managed by kernel. */ | 107 | /* Select new prio value from the range, managed by kernel. */ |
@@ -137,9 +139,6 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
137 | int err; | 139 | int err; |
138 | int tp_created = 0; | 140 | int tp_created = 0; |
139 | 141 | ||
140 | if (!net_eq(net, &init_net)) | ||
141 | return -EINVAL; | ||
142 | |||
143 | replay: | 142 | replay: |
144 | t = NLMSG_DATA(n); | 143 | t = NLMSG_DATA(n); |
145 | protocol = TC_H_MIN(t->tcm_info); | 144 | protocol = TC_H_MIN(t->tcm_info); |
@@ -158,7 +157,7 @@ replay: | |||
158 | /* Find head of filter chain. */ | 157 | /* Find head of filter chain. */ |
159 | 158 | ||
160 | /* Find link */ | 159 | /* Find link */ |
161 | dev = __dev_get_by_index(&init_net, t->tcm_ifindex); | 160 | dev = __dev_get_by_index(net, t->tcm_ifindex); |
162 | if (dev == NULL) | 161 | if (dev == NULL) |
163 | return -ENODEV; | 162 | return -ENODEV; |
164 | 163 | ||
@@ -282,7 +281,7 @@ replay: | |||
282 | *back = tp->next; | 281 | *back = tp->next; |
283 | spin_unlock_bh(root_lock); | 282 | spin_unlock_bh(root_lock); |
284 | 283 | ||
285 | tfilter_notify(skb, n, tp, fh, RTM_DELTFILTER); | 284 | tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER); |
286 | tcf_destroy(tp); | 285 | tcf_destroy(tp); |
287 | err = 0; | 286 | err = 0; |
288 | goto errout; | 287 | goto errout; |
@@ -305,10 +304,10 @@ replay: | |||
305 | case RTM_DELTFILTER: | 304 | case RTM_DELTFILTER: |
306 | err = tp->ops->delete(tp, fh); | 305 | err = tp->ops->delete(tp, fh); |
307 | if (err == 0) | 306 | if (err == 0) |
308 | tfilter_notify(skb, n, tp, fh, RTM_DELTFILTER); | 307 | tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER); |
309 | goto errout; | 308 | goto errout; |
310 | case RTM_GETTFILTER: | 309 | case RTM_GETTFILTER: |
311 | err = tfilter_notify(skb, n, tp, fh, RTM_NEWTFILTER); | 310 | err = tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER); |
312 | goto errout; | 311 | goto errout; |
313 | default: | 312 | default: |
314 | err = -EINVAL; | 313 | err = -EINVAL; |
@@ -324,7 +323,7 @@ replay: | |||
324 | *back = tp; | 323 | *back = tp; |
325 | spin_unlock_bh(root_lock); | 324 | spin_unlock_bh(root_lock); |
326 | } | 325 | } |
327 | tfilter_notify(skb, n, tp, fh, RTM_NEWTFILTER); | 326 | tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER); |
328 | } else { | 327 | } else { |
329 | if (tp_created) | 328 | if (tp_created) |
330 | tcf_destroy(tp); | 329 | tcf_destroy(tp); |
@@ -370,8 +369,9 @@ nla_put_failure: | |||
370 | return -1; | 369 | return -1; |
371 | } | 370 | } |
372 | 371 | ||
373 | static int tfilter_notify(struct sk_buff *oskb, struct nlmsghdr *n, | 372 | static int tfilter_notify(struct net *net, struct sk_buff *oskb, |
374 | struct tcf_proto *tp, unsigned long fh, int event) | 373 | struct nlmsghdr *n, struct tcf_proto *tp, |
374 | unsigned long fh, int event) | ||
375 | { | 375 | { |
376 | struct sk_buff *skb; | 376 | struct sk_buff *skb; |
377 | u32 pid = oskb ? NETLINK_CB(oskb).pid : 0; | 377 | u32 pid = oskb ? NETLINK_CB(oskb).pid : 0; |
@@ -385,7 +385,7 @@ static int tfilter_notify(struct sk_buff *oskb, struct nlmsghdr *n, | |||
385 | return -EINVAL; | 385 | return -EINVAL; |
386 | } | 386 | } |
387 | 387 | ||
388 | return rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, | 388 | return rtnetlink_send(skb, net, pid, RTNLGRP_TC, |
389 | n->nlmsg_flags & NLM_F_ECHO); | 389 | n->nlmsg_flags & NLM_F_ECHO); |
390 | } | 390 | } |
391 | 391 | ||
@@ -418,12 +418,9 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) | |||
418 | const struct Qdisc_class_ops *cops; | 418 | const struct Qdisc_class_ops *cops; |
419 | struct tcf_dump_args arg; | 419 | struct tcf_dump_args arg; |
420 | 420 | ||
421 | if (!net_eq(net, &init_net)) | ||
422 | return 0; | ||
423 | |||
424 | if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm))) | 421 | if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm))) |
425 | return skb->len; | 422 | return skb->len; |
426 | if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) | 423 | if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL) |
427 | return skb->len; | 424 | return skb->len; |
428 | 425 | ||
429 | if (!tcm->tcm_parent) | 426 | if (!tcm->tcm_parent) |
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c index 4e2bda854119..efd4f95fd050 100644 --- a/net/sched/cls_basic.c +++ b/net/sched/cls_basic.c | |||
@@ -10,6 +10,7 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/slab.h> | ||
13 | #include <linux/types.h> | 14 | #include <linux/types.h> |
14 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
15 | #include <linux/string.h> | 16 | #include <linux/string.h> |
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c index e4877ca6727c..221180384fd7 100644 --- a/net/sched/cls_cgroup.c +++ b/net/sched/cls_cgroup.c | |||
@@ -10,6 +10,7 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/slab.h> | ||
13 | #include <linux/types.h> | 14 | #include <linux/types.h> |
14 | #include <linux/string.h> | 15 | #include <linux/string.h> |
15 | #include <linux/errno.h> | 16 | #include <linux/errno.h> |
@@ -24,6 +25,25 @@ struct cgroup_cls_state | |||
24 | u32 classid; | 25 | u32 classid; |
25 | }; | 26 | }; |
26 | 27 | ||
28 | static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss, | ||
29 | struct cgroup *cgrp); | ||
30 | static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp); | ||
31 | static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp); | ||
32 | |||
33 | struct cgroup_subsys net_cls_subsys = { | ||
34 | .name = "net_cls", | ||
35 | .create = cgrp_create, | ||
36 | .destroy = cgrp_destroy, | ||
37 | .populate = cgrp_populate, | ||
38 | #ifdef CONFIG_NET_CLS_CGROUP | ||
39 | .subsys_id = net_cls_subsys_id, | ||
40 | #else | ||
41 | #define net_cls_subsys_id net_cls_subsys.subsys_id | ||
42 | #endif | ||
43 | .module = THIS_MODULE, | ||
44 | }; | ||
45 | |||
46 | |||
27 | static inline struct cgroup_cls_state *cgrp_cls_state(struct cgroup *cgrp) | 47 | static inline struct cgroup_cls_state *cgrp_cls_state(struct cgroup *cgrp) |
28 | { | 48 | { |
29 | return container_of(cgroup_subsys_state(cgrp, net_cls_subsys_id), | 49 | return container_of(cgroup_subsys_state(cgrp, net_cls_subsys_id), |
@@ -79,14 +99,6 @@ static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) | |||
79 | return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files)); | 99 | return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files)); |
80 | } | 100 | } |
81 | 101 | ||
82 | struct cgroup_subsys net_cls_subsys = { | ||
83 | .name = "net_cls", | ||
84 | .create = cgrp_create, | ||
85 | .destroy = cgrp_destroy, | ||
86 | .populate = cgrp_populate, | ||
87 | .subsys_id = net_cls_subsys_id, | ||
88 | }; | ||
89 | |||
90 | struct cls_cgroup_head | 102 | struct cls_cgroup_head |
91 | { | 103 | { |
92 | u32 handle; | 104 | u32 handle; |
@@ -277,12 +289,19 @@ static struct tcf_proto_ops cls_cgroup_ops __read_mostly = { | |||
277 | 289 | ||
278 | static int __init init_cgroup_cls(void) | 290 | static int __init init_cgroup_cls(void) |
279 | { | 291 | { |
280 | return register_tcf_proto_ops(&cls_cgroup_ops); | 292 | int ret = register_tcf_proto_ops(&cls_cgroup_ops); |
293 | if (ret) | ||
294 | return ret; | ||
295 | ret = cgroup_load_subsys(&net_cls_subsys); | ||
296 | if (ret) | ||
297 | unregister_tcf_proto_ops(&cls_cgroup_ops); | ||
298 | return ret; | ||
281 | } | 299 | } |
282 | 300 | ||
283 | static void __exit exit_cgroup_cls(void) | 301 | static void __exit exit_cgroup_cls(void) |
284 | { | 302 | { |
285 | unregister_tcf_proto_ops(&cls_cgroup_ops); | 303 | unregister_tcf_proto_ops(&cls_cgroup_ops); |
304 | cgroup_unload_subsys(&net_cls_subsys); | ||
286 | } | 305 | } |
287 | 306 | ||
288 | module_init(init_cgroup_cls); | 307 | module_init(init_cgroup_cls); |
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c index e054c62857e1..6ed61b10e002 100644 --- a/net/sched/cls_flow.c +++ b/net/sched/cls_flow.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/ip.h> | 20 | #include <linux/ip.h> |
21 | #include <linux/ipv6.h> | 21 | #include <linux/ipv6.h> |
22 | #include <linux/if_vlan.h> | 22 | #include <linux/if_vlan.h> |
23 | #include <linux/slab.h> | ||
23 | 24 | ||
24 | #include <net/pkt_cls.h> | 25 | #include <net/pkt_cls.h> |
25 | #include <net/ip.h> | 26 | #include <net/ip.h> |
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c index 6d6e87585fb1..93b0a7b6f9b4 100644 --- a/net/sched/cls_fw.c +++ b/net/sched/cls_fw.c | |||
@@ -19,6 +19,7 @@ | |||
19 | */ | 19 | */ |
20 | 20 | ||
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <linux/slab.h> | ||
22 | #include <linux/types.h> | 23 | #include <linux/types.h> |
23 | #include <linux/kernel.h> | 24 | #include <linux/kernel.h> |
24 | #include <linux/string.h> | 25 | #include <linux/string.h> |
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c index dd872d5383ef..694dcd85dec8 100644 --- a/net/sched/cls_route.c +++ b/net/sched/cls_route.c | |||
@@ -10,6 +10,7 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/slab.h> | ||
13 | #include <linux/types.h> | 14 | #include <linux/types.h> |
14 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
15 | #include <linux/string.h> | 16 | #include <linux/string.h> |
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c index e806f2314b5e..20ef330bb918 100644 --- a/net/sched/cls_tcindex.c +++ b/net/sched/cls_tcindex.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
10 | #include <linux/skbuff.h> | 10 | #include <linux/skbuff.h> |
11 | #include <linux/errno.h> | 11 | #include <linux/errno.h> |
12 | #include <linux/slab.h> | ||
12 | #include <net/act_api.h> | 13 | #include <net/act_api.h> |
13 | #include <net/netlink.h> | 14 | #include <net/netlink.h> |
14 | #include <net/pkt_cls.h> | 15 | #include <net/pkt_cls.h> |
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 07372f60bee3..593eac056e8d 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c | |||
@@ -31,6 +31,7 @@ | |||
31 | */ | 31 | */ |
32 | 32 | ||
33 | #include <linux/module.h> | 33 | #include <linux/module.h> |
34 | #include <linux/slab.h> | ||
34 | #include <linux/types.h> | 35 | #include <linux/types.h> |
35 | #include <linux/kernel.h> | 36 | #include <linux/kernel.h> |
36 | #include <linux/string.h> | 37 | #include <linux/string.h> |
@@ -772,10 +773,10 @@ static int __init init_u32(void) | |||
772 | printk(" Performance counters on\n"); | 773 | printk(" Performance counters on\n"); |
773 | #endif | 774 | #endif |
774 | #ifdef CONFIG_NET_CLS_IND | 775 | #ifdef CONFIG_NET_CLS_IND |
775 | printk(" input device check on \n"); | 776 | printk(" input device check on\n"); |
776 | #endif | 777 | #endif |
777 | #ifdef CONFIG_NET_CLS_ACT | 778 | #ifdef CONFIG_NET_CLS_ACT |
778 | printk(" Actions configured \n"); | 779 | printk(" Actions configured\n"); |
779 | #endif | 780 | #endif |
780 | return register_tcf_proto_ops(&cls_u32_ops); | 781 | return register_tcf_proto_ops(&cls_u32_ops); |
781 | } | 782 | } |
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c index 24dce8b648a4..3bcac8aa333c 100644 --- a/net/sched/em_meta.c +++ b/net/sched/em_meta.c | |||
@@ -58,6 +58,7 @@ | |||
58 | * only available if that subsystem is enabled in the kernel. | 58 | * only available if that subsystem is enabled in the kernel. |
59 | */ | 59 | */ |
60 | 60 | ||
61 | #include <linux/slab.h> | ||
61 | #include <linux/module.h> | 62 | #include <linux/module.h> |
62 | #include <linux/types.h> | 63 | #include <linux/types.h> |
63 | #include <linux/kernel.h> | 64 | #include <linux/kernel.h> |
diff --git a/net/sched/em_nbyte.c b/net/sched/em_nbyte.c index 370a1b2ea317..1a4176aee6e5 100644 --- a/net/sched/em_nbyte.c +++ b/net/sched/em_nbyte.c | |||
@@ -9,6 +9,7 @@ | |||
9 | * Authors: Thomas Graf <tgraf@suug.ch> | 9 | * Authors: Thomas Graf <tgraf@suug.ch> |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/gfp.h> | ||
12 | #include <linux/module.h> | 13 | #include <linux/module.h> |
13 | #include <linux/types.h> | 14 | #include <linux/types.h> |
14 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
diff --git a/net/sched/em_text.c b/net/sched/em_text.c index 853c5ead87fd..763253257411 100644 --- a/net/sched/em_text.c +++ b/net/sched/em_text.c | |||
@@ -9,6 +9,7 @@ | |||
9 | * Authors: Thomas Graf <tgraf@suug.ch> | 9 | * Authors: Thomas Graf <tgraf@suug.ch> |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/slab.h> | ||
12 | #include <linux/module.h> | 13 | #include <linux/module.h> |
13 | #include <linux/types.h> | 14 | #include <linux/types.h> |
14 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
diff --git a/net/sched/ematch.c b/net/sched/ematch.c index aab59409728b..e782bdeedc58 100644 --- a/net/sched/ematch.c +++ b/net/sched/ematch.c | |||
@@ -82,6 +82,7 @@ | |||
82 | */ | 82 | */ |
83 | 83 | ||
84 | #include <linux/module.h> | 84 | #include <linux/module.h> |
85 | #include <linux/slab.h> | ||
85 | #include <linux/types.h> | 86 | #include <linux/types.h> |
86 | #include <linux/kernel.h> | 87 | #include <linux/kernel.h> |
87 | #include <linux/errno.h> | 88 | #include <linux/errno.h> |
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 6cd491013b50..9839b26674f4 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -28,16 +28,19 @@ | |||
28 | #include <linux/list.h> | 28 | #include <linux/list.h> |
29 | #include <linux/hrtimer.h> | 29 | #include <linux/hrtimer.h> |
30 | #include <linux/lockdep.h> | 30 | #include <linux/lockdep.h> |
31 | #include <linux/slab.h> | ||
31 | 32 | ||
32 | #include <net/net_namespace.h> | 33 | #include <net/net_namespace.h> |
33 | #include <net/sock.h> | 34 | #include <net/sock.h> |
34 | #include <net/netlink.h> | 35 | #include <net/netlink.h> |
35 | #include <net/pkt_sched.h> | 36 | #include <net/pkt_sched.h> |
36 | 37 | ||
37 | static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n, u32 clid, | 38 | static int qdisc_notify(struct net *net, struct sk_buff *oskb, |
39 | struct nlmsghdr *n, u32 clid, | ||
38 | struct Qdisc *old, struct Qdisc *new); | 40 | struct Qdisc *old, struct Qdisc *new); |
39 | static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n, | 41 | static int tclass_notify(struct net *net, struct sk_buff *oskb, |
40 | struct Qdisc *q, unsigned long cl, int event); | 42 | struct nlmsghdr *n, struct Qdisc *q, |
43 | unsigned long cl, int event); | ||
41 | 44 | ||
42 | /* | 45 | /* |
43 | 46 | ||
@@ -638,11 +641,12 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n) | |||
638 | } | 641 | } |
639 | EXPORT_SYMBOL(qdisc_tree_decrease_qlen); | 642 | EXPORT_SYMBOL(qdisc_tree_decrease_qlen); |
640 | 643 | ||
641 | static void notify_and_destroy(struct sk_buff *skb, struct nlmsghdr *n, u32 clid, | 644 | static void notify_and_destroy(struct net *net, struct sk_buff *skb, |
645 | struct nlmsghdr *n, u32 clid, | ||
642 | struct Qdisc *old, struct Qdisc *new) | 646 | struct Qdisc *old, struct Qdisc *new) |
643 | { | 647 | { |
644 | if (new || old) | 648 | if (new || old) |
645 | qdisc_notify(skb, n, clid, old, new); | 649 | qdisc_notify(net, skb, n, clid, old, new); |
646 | 650 | ||
647 | if (old) | 651 | if (old) |
648 | qdisc_destroy(old); | 652 | qdisc_destroy(old); |
@@ -662,6 +666,7 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, | |||
662 | struct Qdisc *new, struct Qdisc *old) | 666 | struct Qdisc *new, struct Qdisc *old) |
663 | { | 667 | { |
664 | struct Qdisc *q = old; | 668 | struct Qdisc *q = old; |
669 | struct net *net = dev_net(dev); | ||
665 | int err = 0; | 670 | int err = 0; |
666 | 671 | ||
667 | if (parent == NULL) { | 672 | if (parent == NULL) { |
@@ -698,12 +703,13 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, | |||
698 | } | 703 | } |
699 | 704 | ||
700 | if (!ingress) { | 705 | if (!ingress) { |
701 | notify_and_destroy(skb, n, classid, dev->qdisc, new); | 706 | notify_and_destroy(net, skb, n, classid, |
707 | dev->qdisc, new); | ||
702 | if (new && !new->ops->attach) | 708 | if (new && !new->ops->attach) |
703 | atomic_inc(&new->refcnt); | 709 | atomic_inc(&new->refcnt); |
704 | dev->qdisc = new ? : &noop_qdisc; | 710 | dev->qdisc = new ? : &noop_qdisc; |
705 | } else { | 711 | } else { |
706 | notify_and_destroy(skb, n, classid, old, new); | 712 | notify_and_destroy(net, skb, n, classid, old, new); |
707 | } | 713 | } |
708 | 714 | ||
709 | if (dev->flags & IFF_UP) | 715 | if (dev->flags & IFF_UP) |
@@ -721,7 +727,7 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, | |||
721 | err = -ENOENT; | 727 | err = -ENOENT; |
722 | } | 728 | } |
723 | if (!err) | 729 | if (!err) |
724 | notify_and_destroy(skb, n, classid, old, new); | 730 | notify_and_destroy(net, skb, n, classid, old, new); |
725 | } | 731 | } |
726 | return err; | 732 | return err; |
727 | } | 733 | } |
@@ -947,10 +953,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
947 | struct Qdisc *p = NULL; | 953 | struct Qdisc *p = NULL; |
948 | int err; | 954 | int err; |
949 | 955 | ||
950 | if (!net_eq(net, &init_net)) | 956 | if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL) |
951 | return -EINVAL; | ||
952 | |||
953 | if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) | ||
954 | return -ENODEV; | 957 | return -ENODEV; |
955 | 958 | ||
956 | err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); | 959 | err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); |
@@ -990,7 +993,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
990 | if ((err = qdisc_graft(dev, p, skb, n, clid, NULL, q)) != 0) | 993 | if ((err = qdisc_graft(dev, p, skb, n, clid, NULL, q)) != 0) |
991 | return err; | 994 | return err; |
992 | } else { | 995 | } else { |
993 | qdisc_notify(skb, n, clid, NULL, q); | 996 | qdisc_notify(net, skb, n, clid, NULL, q); |
994 | } | 997 | } |
995 | return 0; | 998 | return 0; |
996 | } | 999 | } |
@@ -1009,16 +1012,13 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
1009 | struct Qdisc *q, *p; | 1012 | struct Qdisc *q, *p; |
1010 | int err; | 1013 | int err; |
1011 | 1014 | ||
1012 | if (!net_eq(net, &init_net)) | ||
1013 | return -EINVAL; | ||
1014 | |||
1015 | replay: | 1015 | replay: |
1016 | /* Reinit, just in case something touches this. */ | 1016 | /* Reinit, just in case something touches this. */ |
1017 | tcm = NLMSG_DATA(n); | 1017 | tcm = NLMSG_DATA(n); |
1018 | clid = tcm->tcm_parent; | 1018 | clid = tcm->tcm_parent; |
1019 | q = p = NULL; | 1019 | q = p = NULL; |
1020 | 1020 | ||
1021 | if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) | 1021 | if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL) |
1022 | return -ENODEV; | 1022 | return -ENODEV; |
1023 | 1023 | ||
1024 | err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); | 1024 | err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); |
@@ -1105,7 +1105,7 @@ replay: | |||
1105 | return -EINVAL; | 1105 | return -EINVAL; |
1106 | err = qdisc_change(q, tca); | 1106 | err = qdisc_change(q, tca); |
1107 | if (err == 0) | 1107 | if (err == 0) |
1108 | qdisc_notify(skb, n, clid, NULL, q); | 1108 | qdisc_notify(net, skb, n, clid, NULL, q); |
1109 | return err; | 1109 | return err; |
1110 | 1110 | ||
1111 | create_n_graft: | 1111 | create_n_graft: |
@@ -1195,8 +1195,9 @@ nla_put_failure: | |||
1195 | return -1; | 1195 | return -1; |
1196 | } | 1196 | } |
1197 | 1197 | ||
1198 | static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n, | 1198 | static int qdisc_notify(struct net *net, struct sk_buff *oskb, |
1199 | u32 clid, struct Qdisc *old, struct Qdisc *new) | 1199 | struct nlmsghdr *n, u32 clid, |
1200 | struct Qdisc *old, struct Qdisc *new) | ||
1200 | { | 1201 | { |
1201 | struct sk_buff *skb; | 1202 | struct sk_buff *skb; |
1202 | u32 pid = oskb ? NETLINK_CB(oskb).pid : 0; | 1203 | u32 pid = oskb ? NETLINK_CB(oskb).pid : 0; |
@@ -1215,7 +1216,7 @@ static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n, | |||
1215 | } | 1216 | } |
1216 | 1217 | ||
1217 | if (skb->len) | 1218 | if (skb->len) |
1218 | return rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO); | 1219 | return rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO); |
1219 | 1220 | ||
1220 | err_out: | 1221 | err_out: |
1221 | kfree_skb(skb); | 1222 | kfree_skb(skb); |
@@ -1274,15 +1275,12 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb) | |||
1274 | int s_idx, s_q_idx; | 1275 | int s_idx, s_q_idx; |
1275 | struct net_device *dev; | 1276 | struct net_device *dev; |
1276 | 1277 | ||
1277 | if (!net_eq(net, &init_net)) | ||
1278 | return 0; | ||
1279 | |||
1280 | s_idx = cb->args[0]; | 1278 | s_idx = cb->args[0]; |
1281 | s_q_idx = q_idx = cb->args[1]; | 1279 | s_q_idx = q_idx = cb->args[1]; |
1282 | 1280 | ||
1283 | rcu_read_lock(); | 1281 | rcu_read_lock(); |
1284 | idx = 0; | 1282 | idx = 0; |
1285 | for_each_netdev_rcu(&init_net, dev) { | 1283 | for_each_netdev_rcu(net, dev) { |
1286 | struct netdev_queue *dev_queue; | 1284 | struct netdev_queue *dev_queue; |
1287 | 1285 | ||
1288 | if (idx < s_idx) | 1286 | if (idx < s_idx) |
@@ -1334,10 +1332,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
1334 | u32 qid = TC_H_MAJ(clid); | 1332 | u32 qid = TC_H_MAJ(clid); |
1335 | int err; | 1333 | int err; |
1336 | 1334 | ||
1337 | if (!net_eq(net, &init_net)) | 1335 | if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL) |
1338 | return -EINVAL; | ||
1339 | |||
1340 | if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) | ||
1341 | return -ENODEV; | 1336 | return -ENODEV; |
1342 | 1337 | ||
1343 | err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); | 1338 | err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); |
@@ -1418,10 +1413,10 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
1418 | if (cops->delete) | 1413 | if (cops->delete) |
1419 | err = cops->delete(q, cl); | 1414 | err = cops->delete(q, cl); |
1420 | if (err == 0) | 1415 | if (err == 0) |
1421 | tclass_notify(skb, n, q, cl, RTM_DELTCLASS); | 1416 | tclass_notify(net, skb, n, q, cl, RTM_DELTCLASS); |
1422 | goto out; | 1417 | goto out; |
1423 | case RTM_GETTCLASS: | 1418 | case RTM_GETTCLASS: |
1424 | err = tclass_notify(skb, n, q, cl, RTM_NEWTCLASS); | 1419 | err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS); |
1425 | goto out; | 1420 | goto out; |
1426 | default: | 1421 | default: |
1427 | err = -EINVAL; | 1422 | err = -EINVAL; |
@@ -1434,7 +1429,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
1434 | if (cops->change) | 1429 | if (cops->change) |
1435 | err = cops->change(q, clid, pid, tca, &new_cl); | 1430 | err = cops->change(q, clid, pid, tca, &new_cl); |
1436 | if (err == 0) | 1431 | if (err == 0) |
1437 | tclass_notify(skb, n, q, new_cl, RTM_NEWTCLASS); | 1432 | tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS); |
1438 | 1433 | ||
1439 | out: | 1434 | out: |
1440 | if (cl) | 1435 | if (cl) |
@@ -1486,8 +1481,9 @@ nla_put_failure: | |||
1486 | return -1; | 1481 | return -1; |
1487 | } | 1482 | } |
1488 | 1483 | ||
1489 | static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n, | 1484 | static int tclass_notify(struct net *net, struct sk_buff *oskb, |
1490 | struct Qdisc *q, unsigned long cl, int event) | 1485 | struct nlmsghdr *n, struct Qdisc *q, |
1486 | unsigned long cl, int event) | ||
1491 | { | 1487 | { |
1492 | struct sk_buff *skb; | 1488 | struct sk_buff *skb; |
1493 | u32 pid = oskb ? NETLINK_CB(oskb).pid : 0; | 1489 | u32 pid = oskb ? NETLINK_CB(oskb).pid : 0; |
@@ -1501,7 +1497,7 @@ static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n, | |||
1501 | return -EINVAL; | 1497 | return -EINVAL; |
1502 | } | 1498 | } |
1503 | 1499 | ||
1504 | return rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO); | 1500 | return rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO); |
1505 | } | 1501 | } |
1506 | 1502 | ||
1507 | struct qdisc_dump_args | 1503 | struct qdisc_dump_args |
@@ -1576,12 +1572,9 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) | |||
1576 | struct net_device *dev; | 1572 | struct net_device *dev; |
1577 | int t, s_t; | 1573 | int t, s_t; |
1578 | 1574 | ||
1579 | if (!net_eq(net, &init_net)) | ||
1580 | return 0; | ||
1581 | |||
1582 | if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm))) | 1575 | if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm))) |
1583 | return 0; | 1576 | return 0; |
1584 | if ((dev = dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) | 1577 | if ((dev = dev_get_by_index(net, tcm->tcm_ifindex)) == NULL) |
1585 | return 0; | 1578 | return 0; |
1586 | 1579 | ||
1587 | s_t = cb->args[0]; | 1580 | s_t = cb->args[0]; |
@@ -1691,7 +1684,7 @@ static int psched_show(struct seq_file *seq, void *v) | |||
1691 | 1684 | ||
1692 | static int psched_open(struct inode *inode, struct file *file) | 1685 | static int psched_open(struct inode *inode, struct file *file) |
1693 | { | 1686 | { |
1694 | return single_open(file, psched_show, PDE(inode)->data); | 1687 | return single_open(file, psched_show, NULL); |
1695 | } | 1688 | } |
1696 | 1689 | ||
1697 | static const struct file_operations psched_fops = { | 1690 | static const struct file_operations psched_fops = { |
@@ -1701,15 +1694,53 @@ static const struct file_operations psched_fops = { | |||
1701 | .llseek = seq_lseek, | 1694 | .llseek = seq_lseek, |
1702 | .release = single_release, | 1695 | .release = single_release, |
1703 | }; | 1696 | }; |
1697 | |||
1698 | static int __net_init psched_net_init(struct net *net) | ||
1699 | { | ||
1700 | struct proc_dir_entry *e; | ||
1701 | |||
1702 | e = proc_net_fops_create(net, "psched", 0, &psched_fops); | ||
1703 | if (e == NULL) | ||
1704 | return -ENOMEM; | ||
1705 | |||
1706 | return 0; | ||
1707 | } | ||
1708 | |||
1709 | static void __net_exit psched_net_exit(struct net *net) | ||
1710 | { | ||
1711 | proc_net_remove(net, "psched"); | ||
1712 | } | ||
1713 | #else | ||
1714 | static int __net_init psched_net_init(struct net *net) | ||
1715 | { | ||
1716 | return 0; | ||
1717 | } | ||
1718 | |||
1719 | static void __net_exit psched_net_exit(struct net *net) | ||
1720 | { | ||
1721 | } | ||
1704 | #endif | 1722 | #endif |
1705 | 1723 | ||
1724 | static struct pernet_operations psched_net_ops = { | ||
1725 | .init = psched_net_init, | ||
1726 | .exit = psched_net_exit, | ||
1727 | }; | ||
1728 | |||
1706 | static int __init pktsched_init(void) | 1729 | static int __init pktsched_init(void) |
1707 | { | 1730 | { |
1731 | int err; | ||
1732 | |||
1733 | err = register_pernet_subsys(&psched_net_ops); | ||
1734 | if (err) { | ||
1735 | printk(KERN_ERR "pktsched_init: " | ||
1736 | "cannot initialize per netns operations\n"); | ||
1737 | return err; | ||
1738 | } | ||
1739 | |||
1708 | register_qdisc(&pfifo_qdisc_ops); | 1740 | register_qdisc(&pfifo_qdisc_ops); |
1709 | register_qdisc(&bfifo_qdisc_ops); | 1741 | register_qdisc(&bfifo_qdisc_ops); |
1710 | register_qdisc(&pfifo_head_drop_qdisc_ops); | 1742 | register_qdisc(&pfifo_head_drop_qdisc_ops); |
1711 | register_qdisc(&mq_qdisc_ops); | 1743 | register_qdisc(&mq_qdisc_ops); |
1712 | proc_net_fops_create(&init_net, "psched", 0, &psched_fops); | ||
1713 | 1744 | ||
1714 | rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL); | 1745 | rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL); |
1715 | rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL); | 1746 | rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL); |
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index ab82f145f689..fcbb86a486a2 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c | |||
@@ -3,6 +3,7 @@ | |||
3 | /* Written 1998-2000 by Werner Almesberger, EPFL ICA */ | 3 | /* Written 1998-2000 by Werner Almesberger, EPFL ICA */ |
4 | 4 | ||
5 | #include <linux/module.h> | 5 | #include <linux/module.h> |
6 | #include <linux/slab.h> | ||
6 | #include <linux/init.h> | 7 | #include <linux/init.h> |
7 | #include <linux/string.h> | 8 | #include <linux/string.h> |
8 | #include <linux/errno.h> | 9 | #include <linux/errno.h> |
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 3846d65bc03e..28c01ef5abc8 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c | |||
@@ -11,6 +11,7 @@ | |||
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/slab.h> | ||
14 | #include <linux/types.h> | 15 | #include <linux/types.h> |
15 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
16 | #include <linux/string.h> | 17 | #include <linux/string.h> |
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index a65604f8f2b8..b74046a95397 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c | |||
@@ -9,6 +9,7 @@ | |||
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/slab.h> | ||
12 | #include <linux/init.h> | 13 | #include <linux/init.h> |
13 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
14 | #include <linux/netdevice.h> | 15 | #include <linux/netdevice.h> |
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index d303daa45d49..63d41f86679c 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c | |||
@@ -5,6 +5,7 @@ | |||
5 | 5 | ||
6 | #include <linux/module.h> | 6 | #include <linux/module.h> |
7 | #include <linux/init.h> | 7 | #include <linux/init.h> |
8 | #include <linux/slab.h> | ||
8 | #include <linux/types.h> | 9 | #include <linux/types.h> |
9 | #include <linux/string.h> | 10 | #include <linux/string.h> |
10 | #include <linux/errno.h> | 11 | #include <linux/errno.h> |
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c index 4b0a6cc44c77..5948bafa8ce2 100644 --- a/net/sched/sch_fifo.c +++ b/net/sched/sch_fifo.c | |||
@@ -10,6 +10,7 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/slab.h> | ||
13 | #include <linux/types.h> | 14 | #include <linux/types.h> |
14 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
15 | #include <linux/errno.h> | 16 | #include <linux/errno.h> |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 5173c1e1b19c..a969b111bd76 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/init.h> | 24 | #include <linux/init.h> |
25 | #include <linux/rcupdate.h> | 25 | #include <linux/rcupdate.h> |
26 | #include <linux/list.h> | 26 | #include <linux/list.h> |
27 | #include <linux/slab.h> | ||
27 | #include <net/pkt_sched.h> | 28 | #include <net/pkt_sched.h> |
28 | 29 | ||
29 | /* Main transmission queue. */ | 30 | /* Main transmission queue. */ |
@@ -93,7 +94,7 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb, | |||
93 | * Another cpu is holding lock, requeue & delay xmits for | 94 | * Another cpu is holding lock, requeue & delay xmits for |
94 | * some time. | 95 | * some time. |
95 | */ | 96 | */ |
96 | __get_cpu_var(netdev_rx_stat).cpu_collision++; | 97 | __get_cpu_var(softnet_data).cpu_collision++; |
97 | ret = dev_requeue_skb(skb, q); | 98 | ret = dev_requeue_skb(skb, q); |
98 | } | 99 | } |
99 | 100 | ||
@@ -528,7 +529,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, | |||
528 | unsigned int size; | 529 | unsigned int size; |
529 | int err = -ENOBUFS; | 530 | int err = -ENOBUFS; |
530 | 531 | ||
531 | /* ensure that the Qdisc and the private data are 32-byte aligned */ | 532 | /* ensure that the Qdisc and the private data are 64-byte aligned */ |
532 | size = QDISC_ALIGN(sizeof(*sch)); | 533 | size = QDISC_ALIGN(sizeof(*sch)); |
533 | size += ops->priv_size + (QDISC_ALIGNTO - 1); | 534 | size += ops->priv_size + (QDISC_ALIGNTO - 1); |
534 | 535 | ||
@@ -590,6 +591,13 @@ void qdisc_reset(struct Qdisc *qdisc) | |||
590 | } | 591 | } |
591 | EXPORT_SYMBOL(qdisc_reset); | 592 | EXPORT_SYMBOL(qdisc_reset); |
592 | 593 | ||
594 | static void qdisc_rcu_free(struct rcu_head *head) | ||
595 | { | ||
596 | struct Qdisc *qdisc = container_of(head, struct Qdisc, rcu_head); | ||
597 | |||
598 | kfree((char *) qdisc - qdisc->padded); | ||
599 | } | ||
600 | |||
593 | void qdisc_destroy(struct Qdisc *qdisc) | 601 | void qdisc_destroy(struct Qdisc *qdisc) |
594 | { | 602 | { |
595 | const struct Qdisc_ops *ops = qdisc->ops; | 603 | const struct Qdisc_ops *ops = qdisc->ops; |
@@ -613,7 +621,11 @@ void qdisc_destroy(struct Qdisc *qdisc) | |||
613 | dev_put(qdisc_dev(qdisc)); | 621 | dev_put(qdisc_dev(qdisc)); |
614 | 622 | ||
615 | kfree_skb(qdisc->gso_skb); | 623 | kfree_skb(qdisc->gso_skb); |
616 | kfree((char *) qdisc - qdisc->padded); | 624 | /* |
625 | * gen_estimator est_timer() might access qdisc->q.lock, | ||
626 | * wait a RCU grace period before freeing qdisc. | ||
627 | */ | ||
628 | call_rcu(&qdisc->rcu_head, qdisc_rcu_free); | ||
617 | } | 629 | } |
618 | EXPORT_SYMBOL(qdisc_destroy); | 630 | EXPORT_SYMBOL(qdisc_destroy); |
619 | 631 | ||
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c index 40408d595c08..51dcc2aa5c92 100644 --- a/net/sched/sch_gred.c +++ b/net/sched/sch_gred.c | |||
@@ -18,6 +18,7 @@ | |||
18 | * For all the glorious comments look at include/net/red.h | 18 | * For all the glorious comments look at include/net/red.h |
19 | */ | 19 | */ |
20 | 20 | ||
21 | #include <linux/slab.h> | ||
21 | #include <linux/module.h> | 22 | #include <linux/module.h> |
22 | #include <linux/types.h> | 23 | #include <linux/types.h> |
23 | #include <linux/kernel.h> | 24 | #include <linux/kernel.h> |
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 508cf5f3a6d5..0b52b8de562c 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/compiler.h> | 36 | #include <linux/compiler.h> |
37 | #include <linux/rbtree.h> | 37 | #include <linux/rbtree.h> |
38 | #include <linux/workqueue.h> | 38 | #include <linux/workqueue.h> |
39 | #include <linux/slab.h> | ||
39 | #include <net/netlink.h> | 40 | #include <net/netlink.h> |
40 | #include <net/pkt_sched.h> | 41 | #include <net/pkt_sched.h> |
41 | 42 | ||
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c index d1dea3d5dc92..b2aba3f5e6fa 100644 --- a/net/sched/sch_mq.c +++ b/net/sched/sch_mq.c | |||
@@ -9,6 +9,7 @@ | |||
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/types.h> | 11 | #include <linux/types.h> |
12 | #include <linux/slab.h> | ||
12 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
13 | #include <linux/string.h> | 14 | #include <linux/string.h> |
14 | #include <linux/errno.h> | 15 | #include <linux/errno.h> |
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c index 7db2c88ce585..c50876cd8704 100644 --- a/net/sched/sch_multiq.c +++ b/net/sched/sch_multiq.c | |||
@@ -18,6 +18,7 @@ | |||
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/slab.h> | ||
21 | #include <linux/types.h> | 22 | #include <linux/types.h> |
22 | #include <linux/kernel.h> | 23 | #include <linux/kernel.h> |
23 | #include <linux/string.h> | 24 | #include <linux/string.h> |
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index d8b10e054627..4714ff162bbd 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c | |||
@@ -14,6 +14,7 @@ | |||
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
17 | #include <linux/slab.h> | ||
17 | #include <linux/types.h> | 18 | #include <linux/types.h> |
18 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
19 | #include <linux/errno.h> | 20 | #include <linux/errno.h> |
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index 93285cecb246..81672e0c1b25 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c | |||
@@ -12,6 +12,7 @@ | |||
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/slab.h> | ||
15 | #include <linux/types.h> | 16 | #include <linux/types.h> |
16 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
17 | #include <linux/string.h> | 18 | #include <linux/string.h> |
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index cb21380c0605..c65762823f5e 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/ipv6.h> | 20 | #include <linux/ipv6.h> |
21 | #include <linux/skbuff.h> | 21 | #include <linux/skbuff.h> |
22 | #include <linux/jhash.h> | 22 | #include <linux/jhash.h> |
23 | #include <linux/slab.h> | ||
23 | #include <net/ip.h> | 24 | #include <net/ip.h> |
24 | #include <net/netlink.h> | 25 | #include <net/netlink.h> |
25 | #include <net/pkt_sched.h> | 26 | #include <net/pkt_sched.h> |
@@ -122,8 +123,8 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb) | |||
122 | case htons(ETH_P_IP): | 123 | case htons(ETH_P_IP): |
123 | { | 124 | { |
124 | const struct iphdr *iph = ip_hdr(skb); | 125 | const struct iphdr *iph = ip_hdr(skb); |
125 | h = iph->daddr; | 126 | h = (__force u32)iph->daddr; |
126 | h2 = iph->saddr ^ iph->protocol; | 127 | h2 = (__force u32)iph->saddr ^ iph->protocol; |
127 | if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && | 128 | if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && |
128 | (iph->protocol == IPPROTO_TCP || | 129 | (iph->protocol == IPPROTO_TCP || |
129 | iph->protocol == IPPROTO_UDP || | 130 | iph->protocol == IPPROTO_UDP || |
@@ -137,8 +138,8 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb) | |||
137 | case htons(ETH_P_IPV6): | 138 | case htons(ETH_P_IPV6): |
138 | { | 139 | { |
139 | struct ipv6hdr *iph = ipv6_hdr(skb); | 140 | struct ipv6hdr *iph = ipv6_hdr(skb); |
140 | h = iph->daddr.s6_addr32[3]; | 141 | h = (__force u32)iph->daddr.s6_addr32[3]; |
141 | h2 = iph->saddr.s6_addr32[3] ^ iph->nexthdr; | 142 | h2 = (__force u32)iph->saddr.s6_addr32[3] ^ iph->nexthdr; |
142 | if (iph->nexthdr == IPPROTO_TCP || | 143 | if (iph->nexthdr == IPPROTO_TCP || |
143 | iph->nexthdr == IPPROTO_UDP || | 144 | iph->nexthdr == IPPROTO_UDP || |
144 | iph->nexthdr == IPPROTO_UDPLITE || | 145 | iph->nexthdr == IPPROTO_UDPLITE || |
@@ -149,7 +150,7 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb) | |||
149 | break; | 150 | break; |
150 | } | 151 | } |
151 | default: | 152 | default: |
152 | h = (unsigned long)skb_dst(skb) ^ skb->protocol; | 153 | h = (unsigned long)skb_dst(skb) ^ (__force u32)skb->protocol; |
153 | h2 = (unsigned long)skb->sk; | 154 | h2 = (unsigned long)skb->sk; |
154 | } | 155 | } |
155 | 156 | ||
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index db69637069c4..3415b6ce1c0a 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/types.h> | 12 | #include <linux/types.h> |
13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
14 | #include <linux/slab.h> | ||
14 | #include <linux/string.h> | 15 | #include <linux/string.h> |
15 | #include <linux/errno.h> | 16 | #include <linux/errno.h> |
16 | #include <linux/if_arp.h> | 17 | #include <linux/if_arp.h> |
diff --git a/net/sctp/Kconfig b/net/sctp/Kconfig index 58b3e882a187..126b014eb79b 100644 --- a/net/sctp/Kconfig +++ b/net/sctp/Kconfig | |||
@@ -37,6 +37,18 @@ menuconfig IP_SCTP | |||
37 | 37 | ||
38 | if IP_SCTP | 38 | if IP_SCTP |
39 | 39 | ||
40 | config NET_SCTPPROBE | ||
41 | tristate "SCTP: Association probing" | ||
42 | depends on PROC_FS && KPROBES | ||
43 | ---help--- | ||
44 | This module allows for capturing the changes to SCTP association | ||
45 | state in response to incoming packets. It is used for debugging | ||
46 | SCTP congestion control algorithms. If you don't understand | ||
47 | what was just said, you don't need it: say N. | ||
48 | |||
49 | To compile this code as a module, choose M here: the | ||
50 | module will be called sctp_probe. | ||
51 | |||
40 | config SCTP_DBG_MSG | 52 | config SCTP_DBG_MSG |
41 | bool "SCTP: Debug messages" | 53 | bool "SCTP: Debug messages" |
42 | help | 54 | help |
diff --git a/net/sctp/Makefile b/net/sctp/Makefile index 6b794734380a..5c30b7a873df 100644 --- a/net/sctp/Makefile +++ b/net/sctp/Makefile | |||
@@ -3,6 +3,7 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | obj-$(CONFIG_IP_SCTP) += sctp.o | 5 | obj-$(CONFIG_IP_SCTP) += sctp.o |
6 | obj-$(CONFIG_NET_SCTPPROBE) += sctp_probe.o | ||
6 | 7 | ||
7 | sctp-y := sm_statetable.o sm_statefuns.o sm_sideeffect.o \ | 8 | sctp-y := sm_statetable.o sm_statefuns.o sm_sideeffect.o \ |
8 | protocol.o endpointola.o associola.o \ | 9 | protocol.o endpointola.o associola.o \ |
@@ -11,6 +12,8 @@ sctp-y := sm_statetable.o sm_statefuns.o sm_sideeffect.o \ | |||
11 | tsnmap.o bind_addr.o socket.o primitive.o \ | 12 | tsnmap.o bind_addr.o socket.o primitive.o \ |
12 | output.o input.o debug.o ssnmap.o auth.o | 13 | output.o input.o debug.o ssnmap.o auth.o |
13 | 14 | ||
15 | sctp_probe-y := probe.o | ||
16 | |||
14 | sctp-$(CONFIG_SCTP_DBG_OBJCNT) += objcnt.o | 17 | sctp-$(CONFIG_SCTP_DBG_OBJCNT) += objcnt.o |
15 | sctp-$(CONFIG_PROC_FS) += proc.o | 18 | sctp-$(CONFIG_PROC_FS) += proc.o |
16 | sctp-$(CONFIG_SYSCTL) += sysctl.o | 19 | sctp-$(CONFIG_SYSCTL) += sysctl.o |
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index df5abbff63e2..3912420cedcc 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
@@ -87,9 +87,6 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a | |||
87 | /* Retrieve the SCTP per socket area. */ | 87 | /* Retrieve the SCTP per socket area. */ |
88 | sp = sctp_sk((struct sock *)sk); | 88 | sp = sctp_sk((struct sock *)sk); |
89 | 89 | ||
90 | /* Init all variables to a known value. */ | ||
91 | memset(asoc, 0, sizeof(struct sctp_association)); | ||
92 | |||
93 | /* Discarding const is appropriate here. */ | 90 | /* Discarding const is appropriate here. */ |
94 | asoc->ep = (struct sctp_endpoint *)ep; | 91 | asoc->ep = (struct sctp_endpoint *)ep; |
95 | sctp_endpoint_hold(asoc->ep); | 92 | sctp_endpoint_hold(asoc->ep); |
@@ -762,7 +759,8 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc, | |||
762 | asoc->peer.retran_path = peer; | 759 | asoc->peer.retran_path = peer; |
763 | } | 760 | } |
764 | 761 | ||
765 | if (asoc->peer.active_path == asoc->peer.retran_path) { | 762 | if (asoc->peer.active_path == asoc->peer.retran_path && |
763 | peer->state != SCTP_UNCONFIRMED) { | ||
766 | asoc->peer.retran_path = peer; | 764 | asoc->peer.retran_path = peer; |
767 | } | 765 | } |
768 | 766 | ||
@@ -1194,8 +1192,10 @@ void sctp_assoc_update(struct sctp_association *asoc, | |||
1194 | /* Remove any peer addresses not present in the new association. */ | 1192 | /* Remove any peer addresses not present in the new association. */ |
1195 | list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { | 1193 | list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { |
1196 | trans = list_entry(pos, struct sctp_transport, transports); | 1194 | trans = list_entry(pos, struct sctp_transport, transports); |
1197 | if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) | 1195 | if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) { |
1198 | sctp_assoc_del_peer(asoc, &trans->ipaddr); | 1196 | sctp_assoc_rm_peer(asoc, trans); |
1197 | continue; | ||
1198 | } | ||
1199 | 1199 | ||
1200 | if (asoc->state >= SCTP_STATE_ESTABLISHED) | 1200 | if (asoc->state >= SCTP_STATE_ESTABLISHED) |
1201 | sctp_transport_reset(trans); | 1201 | sctp_transport_reset(trans); |
@@ -1318,12 +1318,13 @@ void sctp_assoc_update_retran_path(struct sctp_association *asoc) | |||
1318 | /* Keep track of the next transport in case | 1318 | /* Keep track of the next transport in case |
1319 | * we don't find any active transport. | 1319 | * we don't find any active transport. |
1320 | */ | 1320 | */ |
1321 | if (!next) | 1321 | if (t->state != SCTP_UNCONFIRMED && !next) |
1322 | next = t; | 1322 | next = t; |
1323 | } | 1323 | } |
1324 | } | 1324 | } |
1325 | 1325 | ||
1326 | asoc->peer.retran_path = t; | 1326 | if (t) |
1327 | asoc->peer.retran_path = t; | ||
1327 | 1328 | ||
1328 | SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_update_retran_path:association" | 1329 | SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_update_retran_path:association" |
1329 | " %p addr: ", | 1330 | " %p addr: ", |
@@ -1483,7 +1484,7 @@ void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned len) | |||
1483 | if (asoc->rwnd >= len) { | 1484 | if (asoc->rwnd >= len) { |
1484 | asoc->rwnd -= len; | 1485 | asoc->rwnd -= len; |
1485 | if (over) { | 1486 | if (over) { |
1486 | asoc->rwnd_press = asoc->rwnd; | 1487 | asoc->rwnd_press += asoc->rwnd; |
1487 | asoc->rwnd = 0; | 1488 | asoc->rwnd = 0; |
1488 | } | 1489 | } |
1489 | } else { | 1490 | } else { |
diff --git a/net/sctp/auth.c b/net/sctp/auth.c index 56935bbc1496..86366390038a 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c | |||
@@ -34,6 +34,7 @@ | |||
34 | * be incorporated into the next SCTP release. | 34 | * be incorporated into the next SCTP release. |
35 | */ | 35 | */ |
36 | 36 | ||
37 | #include <linux/slab.h> | ||
37 | #include <linux/types.h> | 38 | #include <linux/types.h> |
38 | #include <linux/crypto.h> | 39 | #include <linux/crypto.h> |
39 | #include <linux/scatterlist.h> | 40 | #include <linux/scatterlist.h> |
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c index bef133731683..faf71d179e46 100644 --- a/net/sctp/bind_addr.c +++ b/net/sctp/bind_addr.c | |||
@@ -43,6 +43,7 @@ | |||
43 | */ | 43 | */ |
44 | 44 | ||
45 | #include <linux/types.h> | 45 | #include <linux/types.h> |
46 | #include <linux/slab.h> | ||
46 | #include <linux/in.h> | 47 | #include <linux/in.h> |
47 | #include <net/sock.h> | 48 | #include <net/sock.h> |
48 | #include <net/ipv6.h> | 49 | #include <net/ipv6.h> |
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c index 8e4320040f05..476caaf100ed 100644 --- a/net/sctp/chunk.c +++ b/net/sctp/chunk.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include <linux/net.h> | 42 | #include <linux/net.h> |
43 | #include <linux/inet.h> | 43 | #include <linux/inet.h> |
44 | #include <linux/skbuff.h> | 44 | #include <linux/skbuff.h> |
45 | #include <linux/slab.h> | ||
45 | #include <net/sock.h> | 46 | #include <net/sock.h> |
46 | #include <net/sctp/sctp.h> | 47 | #include <net/sctp/sctp.h> |
47 | #include <net/sctp/sm.h> | 48 | #include <net/sctp/sm.h> |
@@ -57,9 +58,9 @@ static void sctp_datamsg_init(struct sctp_datamsg *msg) | |||
57 | msg->send_failed = 0; | 58 | msg->send_failed = 0; |
58 | msg->send_error = 0; | 59 | msg->send_error = 0; |
59 | msg->can_abandon = 0; | 60 | msg->can_abandon = 0; |
61 | msg->can_delay = 1; | ||
60 | msg->expires_at = 0; | 62 | msg->expires_at = 0; |
61 | INIT_LIST_HEAD(&msg->chunks); | 63 | INIT_LIST_HEAD(&msg->chunks); |
62 | msg->msg_size = 0; | ||
63 | } | 64 | } |
64 | 65 | ||
65 | /* Allocate and initialize datamsg. */ | 66 | /* Allocate and initialize datamsg. */ |
@@ -156,7 +157,6 @@ static void sctp_datamsg_assign(struct sctp_datamsg *msg, struct sctp_chunk *chu | |||
156 | { | 157 | { |
157 | sctp_datamsg_hold(msg); | 158 | sctp_datamsg_hold(msg); |
158 | chunk->msg = msg; | 159 | chunk->msg = msg; |
159 | msg->msg_size += chunk->skb->len; | ||
160 | } | 160 | } |
161 | 161 | ||
162 | 162 | ||
@@ -246,6 +246,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, | |||
246 | if (msg_len >= first_len) { | 246 | if (msg_len >= first_len) { |
247 | msg_len -= first_len; | 247 | msg_len -= first_len; |
248 | whole = 1; | 248 | whole = 1; |
249 | msg->can_delay = 0; | ||
249 | } | 250 | } |
250 | 251 | ||
251 | /* How many full sized? How many bytes leftover? */ | 252 | /* How many full sized? How many bytes leftover? */ |
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index 905fda582b92..e10acc01c75f 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c | |||
@@ -70,8 +70,6 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, | |||
70 | struct sctp_shared_key *null_key; | 70 | struct sctp_shared_key *null_key; |
71 | int err; | 71 | int err; |
72 | 72 | ||
73 | memset(ep, 0, sizeof(struct sctp_endpoint)); | ||
74 | |||
75 | ep->digest = kzalloc(SCTP_SIGNATURE_SIZE, gfp); | 73 | ep->digest = kzalloc(SCTP_SIGNATURE_SIZE, gfp); |
76 | if (!ep->digest) | 74 | if (!ep->digest) |
77 | return NULL; | 75 | return NULL; |
@@ -144,6 +142,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, | |||
144 | /* Use SCTP specific send buffer space queues. */ | 142 | /* Use SCTP specific send buffer space queues. */ |
145 | ep->sndbuf_policy = sctp_sndbuf_policy; | 143 | ep->sndbuf_policy = sctp_sndbuf_policy; |
146 | 144 | ||
145 | sk->sk_data_ready = sctp_data_ready; | ||
147 | sk->sk_write_space = sctp_write_space; | 146 | sk->sk_write_space = sctp_write_space; |
148 | sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); | 147 | sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); |
149 | 148 | ||
diff --git a/net/sctp/input.c b/net/sctp/input.c index 3d74b264ea22..2a570184e5a9 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c | |||
@@ -53,6 +53,7 @@ | |||
53 | #include <linux/socket.h> | 53 | #include <linux/socket.h> |
54 | #include <linux/ip.h> | 54 | #include <linux/ip.h> |
55 | #include <linux/time.h> /* For struct timeval */ | 55 | #include <linux/time.h> /* For struct timeval */ |
56 | #include <linux/slab.h> | ||
56 | #include <net/ip.h> | 57 | #include <net/ip.h> |
57 | #include <net/icmp.h> | 58 | #include <net/icmp.h> |
58 | #include <net/snmp.h> | 59 | #include <net/snmp.h> |
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c index bbf5dd2a97c4..ccb6dc48d15b 100644 --- a/net/sctp/inqueue.c +++ b/net/sctp/inqueue.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include <net/sctp/sctp.h> | 46 | #include <net/sctp/sctp.h> |
47 | #include <net/sctp/sm.h> | 47 | #include <net/sctp/sm.h> |
48 | #include <linux/interrupt.h> | 48 | #include <linux/interrupt.h> |
49 | #include <linux/slab.h> | ||
49 | 50 | ||
50 | /* Initialize an SCTP inqueue. */ | 51 | /* Initialize an SCTP inqueue. */ |
51 | void sctp_inq_init(struct sctp_inq *queue) | 52 | void sctp_inq_init(struct sctp_inq *queue) |
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 1d7ac70ba39f..732689140fb8 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c | |||
@@ -58,6 +58,7 @@ | |||
58 | #include <linux/netdevice.h> | 58 | #include <linux/netdevice.h> |
59 | #include <linux/init.h> | 59 | #include <linux/init.h> |
60 | #include <linux/ipsec.h> | 60 | #include <linux/ipsec.h> |
61 | #include <linux/slab.h> | ||
61 | 62 | ||
62 | #include <linux/ipv6.h> | 63 | #include <linux/ipv6.h> |
63 | #include <linux/icmpv6.h> | 64 | #include <linux/icmpv6.h> |
@@ -231,7 +232,7 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport) | |||
231 | if (!(transport->param_flags & SPP_PMTUD_ENABLE)) | 232 | if (!(transport->param_flags & SPP_PMTUD_ENABLE)) |
232 | skb->local_df = 1; | 233 | skb->local_df = 1; |
233 | 234 | ||
234 | return ip6_xmit(sk, skb, &fl, np->opt, 0); | 235 | return ip6_xmit(sk, skb, &fl, np->opt); |
235 | } | 236 | } |
236 | 237 | ||
237 | /* Returns the dst cache entry for the given source and destination ip | 238 | /* Returns the dst cache entry for the given source and destination ip |
@@ -276,20 +277,7 @@ static struct dst_entry *sctp_v6_get_dst(struct sctp_association *asoc, | |||
276 | static inline int sctp_v6_addr_match_len(union sctp_addr *s1, | 277 | static inline int sctp_v6_addr_match_len(union sctp_addr *s1, |
277 | union sctp_addr *s2) | 278 | union sctp_addr *s2) |
278 | { | 279 | { |
279 | struct in6_addr *a1 = &s1->v6.sin6_addr; | 280 | return ipv6_addr_diff(&s1->v6.sin6_addr, &s2->v6.sin6_addr); |
280 | struct in6_addr *a2 = &s2->v6.sin6_addr; | ||
281 | int i, j; | ||
282 | |||
283 | for (i = 0; i < 4 ; i++) { | ||
284 | __be32 a1xora2; | ||
285 | |||
286 | a1xora2 = a1->s6_addr32[i] ^ a2->s6_addr32[i]; | ||
287 | |||
288 | if ((j = fls(ntohl(a1xora2)))) | ||
289 | return (i * 32 + 32 - j); | ||
290 | } | ||
291 | |||
292 | return (i*32); | ||
293 | } | 281 | } |
294 | 282 | ||
295 | /* Fills in the source address(saddr) based on the destination address(daddr) | 283 | /* Fills in the source address(saddr) based on the destination address(daddr) |
@@ -371,13 +359,13 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist, | |||
371 | } | 359 | } |
372 | 360 | ||
373 | read_lock_bh(&in6_dev->lock); | 361 | read_lock_bh(&in6_dev->lock); |
374 | for (ifp = in6_dev->addr_list; ifp; ifp = ifp->if_next) { | 362 | list_for_each_entry(ifp, &in6_dev->addr_list, if_list) { |
375 | /* Add the address to the local list. */ | 363 | /* Add the address to the local list. */ |
376 | addr = t_new(struct sctp_sockaddr_entry, GFP_ATOMIC); | 364 | addr = t_new(struct sctp_sockaddr_entry, GFP_ATOMIC); |
377 | if (addr) { | 365 | if (addr) { |
378 | addr->a.v6.sin6_family = AF_INET6; | 366 | addr->a.v6.sin6_family = AF_INET6; |
379 | addr->a.v6.sin6_port = 0; | 367 | addr->a.v6.sin6_port = 0; |
380 | addr->a.v6.sin6_addr = ifp->addr; | 368 | ipv6_addr_copy(&addr->a.v6.sin6_addr, &ifp->addr); |
381 | addr->a.v6.sin6_scope_id = dev->ifindex; | 369 | addr->a.v6.sin6_scope_id = dev->ifindex; |
382 | addr->valid = 1; | 370 | addr->valid = 1; |
383 | INIT_LIST_HEAD(&addr->list); | 371 | INIT_LIST_HEAD(&addr->list); |
@@ -418,7 +406,7 @@ static void sctp_v6_from_sk(union sctp_addr *addr, struct sock *sk) | |||
418 | { | 406 | { |
419 | addr->v6.sin6_family = AF_INET6; | 407 | addr->v6.sin6_family = AF_INET6; |
420 | addr->v6.sin6_port = 0; | 408 | addr->v6.sin6_port = 0; |
421 | addr->v6.sin6_addr = inet6_sk(sk)->rcv_saddr; | 409 | ipv6_addr_copy(&addr->v6.sin6_addr, &inet6_sk(sk)->rcv_saddr); |
422 | } | 410 | } |
423 | 411 | ||
424 | /* Initialize sk->sk_rcv_saddr from sctp_addr. */ | 412 | /* Initialize sk->sk_rcv_saddr from sctp_addr. */ |
@@ -431,7 +419,7 @@ static void sctp_v6_to_sk_saddr(union sctp_addr *addr, struct sock *sk) | |||
431 | inet6_sk(sk)->rcv_saddr.s6_addr32[3] = | 419 | inet6_sk(sk)->rcv_saddr.s6_addr32[3] = |
432 | addr->v4.sin_addr.s_addr; | 420 | addr->v4.sin_addr.s_addr; |
433 | } else { | 421 | } else { |
434 | inet6_sk(sk)->rcv_saddr = addr->v6.sin6_addr; | 422 | ipv6_addr_copy(&inet6_sk(sk)->rcv_saddr, &addr->v6.sin6_addr); |
435 | } | 423 | } |
436 | } | 424 | } |
437 | 425 | ||
@@ -444,7 +432,7 @@ static void sctp_v6_to_sk_daddr(union sctp_addr *addr, struct sock *sk) | |||
444 | inet6_sk(sk)->daddr.s6_addr32[2] = htonl(0x0000ffff); | 432 | inet6_sk(sk)->daddr.s6_addr32[2] = htonl(0x0000ffff); |
445 | inet6_sk(sk)->daddr.s6_addr32[3] = addr->v4.sin_addr.s_addr; | 433 | inet6_sk(sk)->daddr.s6_addr32[3] = addr->v4.sin_addr.s_addr; |
446 | } else { | 434 | } else { |
447 | inet6_sk(sk)->daddr = addr->v6.sin6_addr; | 435 | ipv6_addr_copy(&inet6_sk(sk)->daddr, &addr->v6.sin6_addr); |
448 | } | 436 | } |
449 | } | 437 | } |
450 | 438 | ||
diff --git a/net/sctp/output.c b/net/sctp/output.c index 7c5589363433..a646681f5acd 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c | |||
@@ -48,6 +48,7 @@ | |||
48 | #include <linux/ip.h> | 48 | #include <linux/ip.h> |
49 | #include <linux/ipv6.h> | 49 | #include <linux/ipv6.h> |
50 | #include <linux/init.h> | 50 | #include <linux/init.h> |
51 | #include <linux/slab.h> | ||
51 | #include <net/inet_ecn.h> | 52 | #include <net/inet_ecn.h> |
52 | #include <net/ip.h> | 53 | #include <net/ip.h> |
53 | #include <net/icmp.h> | 54 | #include <net/icmp.h> |
@@ -428,24 +429,17 @@ int sctp_packet_transmit(struct sctp_packet *packet) | |||
428 | list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { | 429 | list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { |
429 | list_del_init(&chunk->list); | 430 | list_del_init(&chunk->list); |
430 | if (sctp_chunk_is_data(chunk)) { | 431 | if (sctp_chunk_is_data(chunk)) { |
432 | /* 6.3.1 C4) When data is in flight and when allowed | ||
433 | * by rule C5, a new RTT measurement MUST be made each | ||
434 | * round trip. Furthermore, new RTT measurements | ||
435 | * SHOULD be made no more than once per round-trip | ||
436 | * for a given destination transport address. | ||
437 | */ | ||
431 | 438 | ||
432 | if (!chunk->resent) { | 439 | if (!tp->rto_pending) { |
433 | 440 | chunk->rtt_in_progress = 1; | |
434 | /* 6.3.1 C4) When data is in flight and when allowed | 441 | tp->rto_pending = 1; |
435 | * by rule C5, a new RTT measurement MUST be made each | ||
436 | * round trip. Furthermore, new RTT measurements | ||
437 | * SHOULD be made no more than once per round-trip | ||
438 | * for a given destination transport address. | ||
439 | */ | ||
440 | |||
441 | if (!tp->rto_pending) { | ||
442 | chunk->rtt_in_progress = 1; | ||
443 | tp->rto_pending = 1; | ||
444 | } | ||
445 | } | 442 | } |
446 | |||
447 | chunk->resent = 1; | ||
448 | |||
449 | has_data = 1; | 443 | has_data = 1; |
450 | } | 444 | } |
451 | 445 | ||
@@ -680,7 +674,7 @@ static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet, | |||
680 | * Don't delay large message writes that may have been | 674 | * Don't delay large message writes that may have been |
681 | * fragmeneted into small peices. | 675 | * fragmeneted into small peices. |
682 | */ | 676 | */ |
683 | if ((len < max) && (chunk->msg->msg_size < max)) { | 677 | if ((len < max) && chunk->msg->can_delay) { |
684 | retval = SCTP_XMIT_NAGLE_DELAY; | 678 | retval = SCTP_XMIT_NAGLE_DELAY; |
685 | goto finish; | 679 | goto finish; |
686 | } | 680 | } |
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 229690f02a1d..5d057178ce0c 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c | |||
@@ -50,6 +50,7 @@ | |||
50 | #include <linux/list.h> /* For struct list_head */ | 50 | #include <linux/list.h> /* For struct list_head */ |
51 | #include <linux/socket.h> | 51 | #include <linux/socket.h> |
52 | #include <linux/ip.h> | 52 | #include <linux/ip.h> |
53 | #include <linux/slab.h> | ||
53 | #include <net/sock.h> /* For skb_set_owner_w */ | 54 | #include <net/sock.h> /* For skb_set_owner_w */ |
54 | 55 | ||
55 | #include <net/sctp/sctp.h> | 56 | #include <net/sctp/sctp.h> |
@@ -61,7 +62,7 @@ static void sctp_check_transmitted(struct sctp_outq *q, | |||
61 | struct list_head *transmitted_queue, | 62 | struct list_head *transmitted_queue, |
62 | struct sctp_transport *transport, | 63 | struct sctp_transport *transport, |
63 | struct sctp_sackhdr *sack, | 64 | struct sctp_sackhdr *sack, |
64 | __u32 highest_new_tsn); | 65 | __u32 *highest_new_tsn); |
65 | 66 | ||
66 | static void sctp_mark_missing(struct sctp_outq *q, | 67 | static void sctp_mark_missing(struct sctp_outq *q, |
67 | struct list_head *transmitted_queue, | 68 | struct list_head *transmitted_queue, |
@@ -307,7 +308,7 @@ int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk) | |||
307 | /* If it is data, queue it up, otherwise, send it | 308 | /* If it is data, queue it up, otherwise, send it |
308 | * immediately. | 309 | * immediately. |
309 | */ | 310 | */ |
310 | if (SCTP_CID_DATA == chunk->chunk_hdr->type) { | 311 | if (sctp_chunk_is_data(chunk)) { |
311 | /* Is it OK to queue data chunks? */ | 312 | /* Is it OK to queue data chunks? */ |
312 | /* From 9. Termination of Association | 313 | /* From 9. Termination of Association |
313 | * | 314 | * |
@@ -597,11 +598,23 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt, | |||
597 | if (fast_rtx && !chunk->fast_retransmit) | 598 | if (fast_rtx && !chunk->fast_retransmit) |
598 | continue; | 599 | continue; |
599 | 600 | ||
601 | redo: | ||
600 | /* Attempt to append this chunk to the packet. */ | 602 | /* Attempt to append this chunk to the packet. */ |
601 | status = sctp_packet_append_chunk(pkt, chunk); | 603 | status = sctp_packet_append_chunk(pkt, chunk); |
602 | 604 | ||
603 | switch (status) { | 605 | switch (status) { |
604 | case SCTP_XMIT_PMTU_FULL: | 606 | case SCTP_XMIT_PMTU_FULL: |
607 | if (!pkt->has_data && !pkt->has_cookie_echo) { | ||
608 | /* If this packet did not contain DATA then | ||
609 | * retransmission did not happen, so do it | ||
610 | * again. We'll ignore the error here since | ||
611 | * control chunks are already freed so there | ||
612 | * is nothing we can do. | ||
613 | */ | ||
614 | sctp_packet_transmit(pkt); | ||
615 | goto redo; | ||
616 | } | ||
617 | |||
605 | /* Send this packet. */ | 618 | /* Send this packet. */ |
606 | error = sctp_packet_transmit(pkt); | 619 | error = sctp_packet_transmit(pkt); |
607 | 620 | ||
@@ -646,14 +659,6 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt, | |||
646 | if (chunk->fast_retransmit == SCTP_NEED_FRTX) | 659 | if (chunk->fast_retransmit == SCTP_NEED_FRTX) |
647 | chunk->fast_retransmit = SCTP_DONT_FRTX; | 660 | chunk->fast_retransmit = SCTP_DONT_FRTX; |
648 | 661 | ||
649 | /* Force start T3-rtx timer when fast retransmitting | ||
650 | * the earliest outstanding TSN | ||
651 | */ | ||
652 | if (!timer && fast_rtx && | ||
653 | ntohl(chunk->subh.data_hdr->tsn) == | ||
654 | asoc->ctsn_ack_point + 1) | ||
655 | timer = 2; | ||
656 | |||
657 | q->empty = 0; | 662 | q->empty = 0; |
658 | break; | 663 | break; |
659 | } | 664 | } |
@@ -853,6 +858,12 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) | |||
853 | if (status != SCTP_XMIT_OK) { | 858 | if (status != SCTP_XMIT_OK) { |
854 | /* put the chunk back */ | 859 | /* put the chunk back */ |
855 | list_add(&chunk->list, &q->control_chunk_list); | 860 | list_add(&chunk->list, &q->control_chunk_list); |
861 | } else if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) { | ||
862 | /* PR-SCTP C5) If a FORWARD TSN is sent, the | ||
863 | * sender MUST assure that at least one T3-rtx | ||
864 | * timer is running. | ||
865 | */ | ||
866 | sctp_transport_reset_timers(transport); | ||
856 | } | 867 | } |
857 | break; | 868 | break; |
858 | 869 | ||
@@ -905,8 +916,7 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) | |||
905 | rtx_timeout, &start_timer); | 916 | rtx_timeout, &start_timer); |
906 | 917 | ||
907 | if (start_timer) | 918 | if (start_timer) |
908 | sctp_transport_reset_timers(transport, | 919 | sctp_transport_reset_timers(transport); |
909 | start_timer-1); | ||
910 | 920 | ||
911 | /* This can happen on COOKIE-ECHO resend. Only | 921 | /* This can happen on COOKIE-ECHO resend. Only |
912 | * one chunk can get bundled with a COOKIE-ECHO. | 922 | * one chunk can get bundled with a COOKIE-ECHO. |
@@ -1039,7 +1049,7 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) | |||
1039 | list_add_tail(&chunk->transmitted_list, | 1049 | list_add_tail(&chunk->transmitted_list, |
1040 | &transport->transmitted); | 1050 | &transport->transmitted); |
1041 | 1051 | ||
1042 | sctp_transport_reset_timers(transport, 0); | 1052 | sctp_transport_reset_timers(transport); |
1043 | 1053 | ||
1044 | q->empty = 0; | 1054 | q->empty = 0; |
1045 | 1055 | ||
@@ -1099,32 +1109,6 @@ static void sctp_sack_update_unack_data(struct sctp_association *assoc, | |||
1099 | assoc->unack_data = unack_data; | 1109 | assoc->unack_data = unack_data; |
1100 | } | 1110 | } |
1101 | 1111 | ||
1102 | /* Return the highest new tsn that is acknowledged by the given SACK chunk. */ | ||
1103 | static __u32 sctp_highest_new_tsn(struct sctp_sackhdr *sack, | ||
1104 | struct sctp_association *asoc) | ||
1105 | { | ||
1106 | struct sctp_transport *transport; | ||
1107 | struct sctp_chunk *chunk; | ||
1108 | __u32 highest_new_tsn, tsn; | ||
1109 | struct list_head *transport_list = &asoc->peer.transport_addr_list; | ||
1110 | |||
1111 | highest_new_tsn = ntohl(sack->cum_tsn_ack); | ||
1112 | |||
1113 | list_for_each_entry(transport, transport_list, transports) { | ||
1114 | list_for_each_entry(chunk, &transport->transmitted, | ||
1115 | transmitted_list) { | ||
1116 | tsn = ntohl(chunk->subh.data_hdr->tsn); | ||
1117 | |||
1118 | if (!chunk->tsn_gap_acked && | ||
1119 | TSN_lt(highest_new_tsn, tsn) && | ||
1120 | sctp_acked(sack, tsn)) | ||
1121 | highest_new_tsn = tsn; | ||
1122 | } | ||
1123 | } | ||
1124 | |||
1125 | return highest_new_tsn; | ||
1126 | } | ||
1127 | |||
1128 | /* This is where we REALLY process a SACK. | 1112 | /* This is where we REALLY process a SACK. |
1129 | * | 1113 | * |
1130 | * Process the SACK against the outqueue. Mostly, this just frees | 1114 | * Process the SACK against the outqueue. Mostly, this just frees |
@@ -1144,6 +1128,7 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack) | |||
1144 | struct sctp_transport *primary = asoc->peer.primary_path; | 1128 | struct sctp_transport *primary = asoc->peer.primary_path; |
1145 | int count_of_newacks = 0; | 1129 | int count_of_newacks = 0; |
1146 | int gap_ack_blocks; | 1130 | int gap_ack_blocks; |
1131 | u8 accum_moved = 0; | ||
1147 | 1132 | ||
1148 | /* Grab the association's destination address list. */ | 1133 | /* Grab the association's destination address list. */ |
1149 | transport_list = &asoc->peer.transport_addr_list; | 1134 | transport_list = &asoc->peer.transport_addr_list; |
@@ -1192,18 +1177,15 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack) | |||
1192 | if (gap_ack_blocks) | 1177 | if (gap_ack_blocks) |
1193 | highest_tsn += ntohs(frags[gap_ack_blocks - 1].gab.end); | 1178 | highest_tsn += ntohs(frags[gap_ack_blocks - 1].gab.end); |
1194 | 1179 | ||
1195 | if (TSN_lt(asoc->highest_sacked, highest_tsn)) { | 1180 | if (TSN_lt(asoc->highest_sacked, highest_tsn)) |
1196 | highest_new_tsn = highest_tsn; | ||
1197 | asoc->highest_sacked = highest_tsn; | 1181 | asoc->highest_sacked = highest_tsn; |
1198 | } else { | ||
1199 | highest_new_tsn = sctp_highest_new_tsn(sack, asoc); | ||
1200 | } | ||
1201 | 1182 | ||
1183 | highest_new_tsn = sack_ctsn; | ||
1202 | 1184 | ||
1203 | /* Run through the retransmit queue. Credit bytes received | 1185 | /* Run through the retransmit queue. Credit bytes received |
1204 | * and free those chunks that we can. | 1186 | * and free those chunks that we can. |
1205 | */ | 1187 | */ |
1206 | sctp_check_transmitted(q, &q->retransmit, NULL, sack, highest_new_tsn); | 1188 | sctp_check_transmitted(q, &q->retransmit, NULL, sack, &highest_new_tsn); |
1207 | 1189 | ||
1208 | /* Run through the transmitted queue. | 1190 | /* Run through the transmitted queue. |
1209 | * Credit bytes received and free those chunks which we can. | 1191 | * Credit bytes received and free those chunks which we can. |
@@ -1212,7 +1194,7 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack) | |||
1212 | */ | 1194 | */ |
1213 | list_for_each_entry(transport, transport_list, transports) { | 1195 | list_for_each_entry(transport, transport_list, transports) { |
1214 | sctp_check_transmitted(q, &transport->transmitted, | 1196 | sctp_check_transmitted(q, &transport->transmitted, |
1215 | transport, sack, highest_new_tsn); | 1197 | transport, sack, &highest_new_tsn); |
1216 | /* | 1198 | /* |
1217 | * SFR-CACC algorithm: | 1199 | * SFR-CACC algorithm: |
1218 | * C) Let count_of_newacks be the number of | 1200 | * C) Let count_of_newacks be the number of |
@@ -1222,16 +1204,22 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack) | |||
1222 | count_of_newacks ++; | 1204 | count_of_newacks ++; |
1223 | } | 1205 | } |
1224 | 1206 | ||
1207 | /* Move the Cumulative TSN Ack Point if appropriate. */ | ||
1208 | if (TSN_lt(asoc->ctsn_ack_point, sack_ctsn)) { | ||
1209 | asoc->ctsn_ack_point = sack_ctsn; | ||
1210 | accum_moved = 1; | ||
1211 | } | ||
1212 | |||
1225 | if (gap_ack_blocks) { | 1213 | if (gap_ack_blocks) { |
1214 | |||
1215 | if (asoc->fast_recovery && accum_moved) | ||
1216 | highest_new_tsn = highest_tsn; | ||
1217 | |||
1226 | list_for_each_entry(transport, transport_list, transports) | 1218 | list_for_each_entry(transport, transport_list, transports) |
1227 | sctp_mark_missing(q, &transport->transmitted, transport, | 1219 | sctp_mark_missing(q, &transport->transmitted, transport, |
1228 | highest_new_tsn, count_of_newacks); | 1220 | highest_new_tsn, count_of_newacks); |
1229 | } | 1221 | } |
1230 | 1222 | ||
1231 | /* Move the Cumulative TSN Ack Point if appropriate. */ | ||
1232 | if (TSN_lt(asoc->ctsn_ack_point, sack_ctsn)) | ||
1233 | asoc->ctsn_ack_point = sack_ctsn; | ||
1234 | |||
1235 | /* Update unack_data field in the assoc. */ | 1223 | /* Update unack_data field in the assoc. */ |
1236 | sctp_sack_update_unack_data(asoc, sack); | 1224 | sctp_sack_update_unack_data(asoc, sack); |
1237 | 1225 | ||
@@ -1314,7 +1302,7 @@ static void sctp_check_transmitted(struct sctp_outq *q, | |||
1314 | struct list_head *transmitted_queue, | 1302 | struct list_head *transmitted_queue, |
1315 | struct sctp_transport *transport, | 1303 | struct sctp_transport *transport, |
1316 | struct sctp_sackhdr *sack, | 1304 | struct sctp_sackhdr *sack, |
1317 | __u32 highest_new_tsn_in_sack) | 1305 | __u32 *highest_new_tsn_in_sack) |
1318 | { | 1306 | { |
1319 | struct list_head *lchunk; | 1307 | struct list_head *lchunk; |
1320 | struct sctp_chunk *tchunk; | 1308 | struct sctp_chunk *tchunk; |
@@ -1386,7 +1374,6 @@ static void sctp_check_transmitted(struct sctp_outq *q, | |||
1386 | * instance). | 1374 | * instance). |
1387 | */ | 1375 | */ |
1388 | if (!tchunk->tsn_gap_acked && | 1376 | if (!tchunk->tsn_gap_acked && |
1389 | !tchunk->resent && | ||
1390 | tchunk->rtt_in_progress) { | 1377 | tchunk->rtt_in_progress) { |
1391 | tchunk->rtt_in_progress = 0; | 1378 | tchunk->rtt_in_progress = 0; |
1392 | rtt = jiffies - tchunk->sent_at; | 1379 | rtt = jiffies - tchunk->sent_at; |
@@ -1403,6 +1390,7 @@ static void sctp_check_transmitted(struct sctp_outq *q, | |||
1403 | */ | 1390 | */ |
1404 | if (!tchunk->tsn_gap_acked) { | 1391 | if (!tchunk->tsn_gap_acked) { |
1405 | tchunk->tsn_gap_acked = 1; | 1392 | tchunk->tsn_gap_acked = 1; |
1393 | *highest_new_tsn_in_sack = tsn; | ||
1406 | bytes_acked += sctp_data_size(tchunk); | 1394 | bytes_acked += sctp_data_size(tchunk); |
1407 | if (!tchunk->transport) | 1395 | if (!tchunk->transport) |
1408 | migrate_bytes += sctp_data_size(tchunk); | 1396 | migrate_bytes += sctp_data_size(tchunk); |
@@ -1676,7 +1664,8 @@ static void sctp_mark_missing(struct sctp_outq *q, | |||
1676 | struct sctp_chunk *chunk; | 1664 | struct sctp_chunk *chunk; |
1677 | __u32 tsn; | 1665 | __u32 tsn; |
1678 | char do_fast_retransmit = 0; | 1666 | char do_fast_retransmit = 0; |
1679 | struct sctp_transport *primary = q->asoc->peer.primary_path; | 1667 | struct sctp_association *asoc = q->asoc; |
1668 | struct sctp_transport *primary = asoc->peer.primary_path; | ||
1680 | 1669 | ||
1681 | list_for_each_entry(chunk, transmitted_queue, transmitted_list) { | 1670 | list_for_each_entry(chunk, transmitted_queue, transmitted_list) { |
1682 | 1671 | ||
diff --git a/net/sctp/primitive.c b/net/sctp/primitive.c index 8cb4f060bce6..534c7eae9d15 100644 --- a/net/sctp/primitive.c +++ b/net/sctp/primitive.c | |||
@@ -50,6 +50,7 @@ | |||
50 | #include <linux/socket.h> | 50 | #include <linux/socket.h> |
51 | #include <linux/ip.h> | 51 | #include <linux/ip.h> |
52 | #include <linux/time.h> /* For struct timeval */ | 52 | #include <linux/time.h> /* For struct timeval */ |
53 | #include <linux/gfp.h> | ||
53 | #include <net/sock.h> | 54 | #include <net/sock.h> |
54 | #include <net/sctp/sctp.h> | 55 | #include <net/sctp/sctp.h> |
55 | #include <net/sctp/sm.h> | 56 | #include <net/sctp/sm.h> |
diff --git a/net/sctp/probe.c b/net/sctp/probe.c new file mode 100644 index 000000000000..db3a42b8b349 --- /dev/null +++ b/net/sctp/probe.c | |||
@@ -0,0 +1,214 @@ | |||
1 | /* | ||
2 | * sctp_probe - Observe the SCTP flow with kprobes. | ||
3 | * | ||
4 | * The idea for this came from Werner Almesberger's umlsim | ||
5 | * Copyright (C) 2004, Stephen Hemminger <shemminger@osdl.org> | ||
6 | * | ||
7 | * Modified for SCTP from Stephen Hemminger's code | ||
8 | * Copyright (C) 2010, Wei Yongjun <yjwei@cn.fujitsu.com> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License as published by | ||
12 | * the Free Software Foundation; either version 2 of the License, or | ||
13 | * (at your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
18 | * GNU General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public License | ||
21 | * along with this program; if not, write to the Free Software | ||
22 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
23 | */ | ||
24 | |||
25 | #include <linux/kernel.h> | ||
26 | #include <linux/kprobes.h> | ||
27 | #include <linux/socket.h> | ||
28 | #include <linux/sctp.h> | ||
29 | #include <linux/proc_fs.h> | ||
30 | #include <linux/vmalloc.h> | ||
31 | #include <linux/module.h> | ||
32 | #include <linux/kfifo.h> | ||
33 | #include <linux/time.h> | ||
34 | #include <net/net_namespace.h> | ||
35 | |||
36 | #include <net/sctp/sctp.h> | ||
37 | #include <net/sctp/sm.h> | ||
38 | |||
39 | MODULE_AUTHOR("Wei Yongjun <yjwei@cn.fujitsu.com>"); | ||
40 | MODULE_DESCRIPTION("SCTP snooper"); | ||
41 | MODULE_LICENSE("GPL"); | ||
42 | |||
43 | static int port __read_mostly = 0; | ||
44 | MODULE_PARM_DESC(port, "Port to match (0=all)"); | ||
45 | module_param(port, int, 0); | ||
46 | |||
47 | static int bufsize __read_mostly = 64 * 1024; | ||
48 | MODULE_PARM_DESC(bufsize, "Log buffer size (default 64k)"); | ||
49 | module_param(bufsize, int, 0); | ||
50 | |||
51 | static int full __read_mostly = 1; | ||
52 | MODULE_PARM_DESC(full, "Full log (1=every ack packet received, 0=only cwnd changes)"); | ||
53 | module_param(full, int, 0); | ||
54 | |||
55 | static const char procname[] = "sctpprobe"; | ||
56 | |||
57 | static struct { | ||
58 | struct kfifo fifo; | ||
59 | spinlock_t lock; | ||
60 | wait_queue_head_t wait; | ||
61 | struct timespec tstart; | ||
62 | } sctpw; | ||
63 | |||
64 | static void printl(const char *fmt, ...) | ||
65 | { | ||
66 | va_list args; | ||
67 | int len; | ||
68 | char tbuf[256]; | ||
69 | |||
70 | va_start(args, fmt); | ||
71 | len = vscnprintf(tbuf, sizeof(tbuf), fmt, args); | ||
72 | va_end(args); | ||
73 | |||
74 | kfifo_in_locked(&sctpw.fifo, tbuf, len, &sctpw.lock); | ||
75 | wake_up(&sctpw.wait); | ||
76 | } | ||
77 | |||
78 | static int sctpprobe_open(struct inode *inode, struct file *file) | ||
79 | { | ||
80 | kfifo_reset(&sctpw.fifo); | ||
81 | getnstimeofday(&sctpw.tstart); | ||
82 | |||
83 | return 0; | ||
84 | } | ||
85 | |||
86 | static ssize_t sctpprobe_read(struct file *file, char __user *buf, | ||
87 | size_t len, loff_t *ppos) | ||
88 | { | ||
89 | int error = 0, cnt = 0; | ||
90 | unsigned char *tbuf; | ||
91 | |||
92 | if (!buf) | ||
93 | return -EINVAL; | ||
94 | |||
95 | if (len == 0) | ||
96 | return 0; | ||
97 | |||
98 | tbuf = vmalloc(len); | ||
99 | if (!tbuf) | ||
100 | return -ENOMEM; | ||
101 | |||
102 | error = wait_event_interruptible(sctpw.wait, | ||
103 | kfifo_len(&sctpw.fifo) != 0); | ||
104 | if (error) | ||
105 | goto out_free; | ||
106 | |||
107 | cnt = kfifo_out_locked(&sctpw.fifo, tbuf, len, &sctpw.lock); | ||
108 | error = copy_to_user(buf, tbuf, cnt) ? -EFAULT : 0; | ||
109 | |||
110 | out_free: | ||
111 | vfree(tbuf); | ||
112 | |||
113 | return error ? error : cnt; | ||
114 | } | ||
115 | |||
116 | static const struct file_operations sctpprobe_fops = { | ||
117 | .owner = THIS_MODULE, | ||
118 | .open = sctpprobe_open, | ||
119 | .read = sctpprobe_read, | ||
120 | }; | ||
121 | |||
122 | sctp_disposition_t jsctp_sf_eat_sack(const struct sctp_endpoint *ep, | ||
123 | const struct sctp_association *asoc, | ||
124 | const sctp_subtype_t type, | ||
125 | void *arg, | ||
126 | sctp_cmd_seq_t *commands) | ||
127 | { | ||
128 | struct sctp_transport *sp; | ||
129 | static __u32 lcwnd = 0; | ||
130 | struct timespec now; | ||
131 | |||
132 | sp = asoc->peer.primary_path; | ||
133 | |||
134 | if ((full || sp->cwnd != lcwnd) && | ||
135 | (!port || asoc->peer.port == port || | ||
136 | ep->base.bind_addr.port == port)) { | ||
137 | lcwnd = sp->cwnd; | ||
138 | |||
139 | getnstimeofday(&now); | ||
140 | now = timespec_sub(now, sctpw.tstart); | ||
141 | |||
142 | printl("%lu.%06lu ", (unsigned long) now.tv_sec, | ||
143 | (unsigned long) now.tv_nsec / NSEC_PER_USEC); | ||
144 | |||
145 | printl("%p %5d %5d %5d %8d %5d ", asoc, | ||
146 | ep->base.bind_addr.port, asoc->peer.port, | ||
147 | asoc->pathmtu, asoc->peer.rwnd, asoc->unack_data); | ||
148 | |||
149 | list_for_each_entry(sp, &asoc->peer.transport_addr_list, | ||
150 | transports) { | ||
151 | if (sp == asoc->peer.primary_path) | ||
152 | printl("*"); | ||
153 | |||
154 | if (sp->ipaddr.sa.sa_family == AF_INET) | ||
155 | printl("%pI4 ", &sp->ipaddr.v4.sin_addr); | ||
156 | else | ||
157 | printl("%pI6 ", &sp->ipaddr.v6.sin6_addr); | ||
158 | |||
159 | printl("%2u %8u %8u %8u %8u %8u ", | ||
160 | sp->state, sp->cwnd, sp->ssthresh, | ||
161 | sp->flight_size, sp->partial_bytes_acked, | ||
162 | sp->pathmtu); | ||
163 | } | ||
164 | printl("\n"); | ||
165 | } | ||
166 | |||
167 | jprobe_return(); | ||
168 | return 0; | ||
169 | } | ||
170 | |||
171 | static struct jprobe sctp_recv_probe = { | ||
172 | .kp = { | ||
173 | .symbol_name = "sctp_sf_eat_sack_6_2", | ||
174 | }, | ||
175 | .entry = jsctp_sf_eat_sack, | ||
176 | }; | ||
177 | |||
178 | static __init int sctpprobe_init(void) | ||
179 | { | ||
180 | int ret = -ENOMEM; | ||
181 | |||
182 | init_waitqueue_head(&sctpw.wait); | ||
183 | spin_lock_init(&sctpw.lock); | ||
184 | if (kfifo_alloc(&sctpw.fifo, bufsize, GFP_KERNEL)) | ||
185 | return ret; | ||
186 | |||
187 | if (!proc_net_fops_create(&init_net, procname, S_IRUSR, | ||
188 | &sctpprobe_fops)) | ||
189 | goto free_kfifo; | ||
190 | |||
191 | ret = register_jprobe(&sctp_recv_probe); | ||
192 | if (ret) | ||
193 | goto remove_proc; | ||
194 | |||
195 | pr_info("SCTP probe registered (port=%d)\n", port); | ||
196 | |||
197 | return 0; | ||
198 | |||
199 | remove_proc: | ||
200 | proc_net_remove(&init_net, procname); | ||
201 | free_kfifo: | ||
202 | kfifo_free(&sctpw.fifo); | ||
203 | return ret; | ||
204 | } | ||
205 | |||
206 | static __exit void sctpprobe_exit(void) | ||
207 | { | ||
208 | kfifo_free(&sctpw.fifo); | ||
209 | proc_net_remove(&init_net, procname); | ||
210 | unregister_jprobe(&sctp_recv_probe); | ||
211 | } | ||
212 | |||
213 | module_init(sctpprobe_init); | ||
214 | module_exit(sctpprobe_exit); | ||
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index e771690f6d5d..182749867c72 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c | |||
@@ -54,6 +54,7 @@ | |||
54 | #include <linux/bootmem.h> | 54 | #include <linux/bootmem.h> |
55 | #include <linux/highmem.h> | 55 | #include <linux/highmem.h> |
56 | #include <linux/swap.h> | 56 | #include <linux/swap.h> |
57 | #include <linux/slab.h> | ||
57 | #include <net/net_namespace.h> | 58 | #include <net/net_namespace.h> |
58 | #include <net/protocol.h> | 59 | #include <net/protocol.h> |
59 | #include <net/ip.h> | 60 | #include <net/ip.h> |
@@ -473,13 +474,17 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc, | |||
473 | 474 | ||
474 | memset(&fl, 0x0, sizeof(struct flowi)); | 475 | memset(&fl, 0x0, sizeof(struct flowi)); |
475 | fl.fl4_dst = daddr->v4.sin_addr.s_addr; | 476 | fl.fl4_dst = daddr->v4.sin_addr.s_addr; |
477 | fl.fl_ip_dport = daddr->v4.sin_port; | ||
476 | fl.proto = IPPROTO_SCTP; | 478 | fl.proto = IPPROTO_SCTP; |
477 | if (asoc) { | 479 | if (asoc) { |
478 | fl.fl4_tos = RT_CONN_FLAGS(asoc->base.sk); | 480 | fl.fl4_tos = RT_CONN_FLAGS(asoc->base.sk); |
479 | fl.oif = asoc->base.sk->sk_bound_dev_if; | 481 | fl.oif = asoc->base.sk->sk_bound_dev_if; |
482 | fl.fl_ip_sport = htons(asoc->base.bind_addr.port); | ||
480 | } | 483 | } |
481 | if (saddr) | 484 | if (saddr) { |
482 | fl.fl4_src = saddr->v4.sin_addr.s_addr; | 485 | fl.fl4_src = saddr->v4.sin_addr.s_addr; |
486 | fl.fl_ip_sport = saddr->v4.sin_port; | ||
487 | } | ||
483 | 488 | ||
484 | SCTP_DEBUG_PRINTK("%s: DST:%pI4, SRC:%pI4 - ", | 489 | SCTP_DEBUG_PRINTK("%s: DST:%pI4, SRC:%pI4 - ", |
485 | __func__, &fl.fl4_dst, &fl.fl4_src); | 490 | __func__, &fl.fl4_dst, &fl.fl4_src); |
@@ -527,6 +532,7 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc, | |||
527 | if ((laddr->state == SCTP_ADDR_SRC) && | 532 | if ((laddr->state == SCTP_ADDR_SRC) && |
528 | (AF_INET == laddr->a.sa.sa_family)) { | 533 | (AF_INET == laddr->a.sa.sa_family)) { |
529 | fl.fl4_src = laddr->a.v4.sin_addr.s_addr; | 534 | fl.fl4_src = laddr->a.v4.sin_addr.s_addr; |
535 | fl.fl_ip_sport = laddr->a.v4.sin_port; | ||
530 | if (!ip_route_output_key(&init_net, &rt, &fl)) { | 536 | if (!ip_route_output_key(&init_net, &rt, &fl)) { |
531 | dst = &rt->u.dst; | 537 | dst = &rt->u.dst; |
532 | goto out_unlock; | 538 | goto out_unlock; |
@@ -853,7 +859,7 @@ static inline int sctp_v4_xmit(struct sk_buff *skb, | |||
853 | IP_PMTUDISC_DO : IP_PMTUDISC_DONT; | 859 | IP_PMTUDISC_DO : IP_PMTUDISC_DONT; |
854 | 860 | ||
855 | SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS); | 861 | SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS); |
856 | return ip_queue_xmit(skb, 0); | 862 | return ip_queue_xmit(skb); |
857 | } | 863 | } |
858 | 864 | ||
859 | static struct sctp_af sctp_af_inet; | 865 | static struct sctp_af sctp_af_inet; |
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 9e732916b671..d8261f3d7715 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c | |||
@@ -58,6 +58,7 @@ | |||
58 | #include <linux/inet.h> | 58 | #include <linux/inet.h> |
59 | #include <linux/scatterlist.h> | 59 | #include <linux/scatterlist.h> |
60 | #include <linux/crypto.h> | 60 | #include <linux/crypto.h> |
61 | #include <linux/slab.h> | ||
61 | #include <net/sock.h> | 62 | #include <net/sock.h> |
62 | 63 | ||
63 | #include <linux/skbuff.h> | 64 | #include <linux/skbuff.h> |
@@ -107,7 +108,7 @@ static const struct sctp_paramhdr prsctp_param = { | |||
107 | cpu_to_be16(sizeof(struct sctp_paramhdr)), | 108 | cpu_to_be16(sizeof(struct sctp_paramhdr)), |
108 | }; | 109 | }; |
109 | 110 | ||
110 | /* A helper to initialize to initialize an op error inside a | 111 | /* A helper to initialize an op error inside a |
111 | * provided chunk, as most cause codes will be embedded inside an | 112 | * provided chunk, as most cause codes will be embedded inside an |
112 | * abort chunk. | 113 | * abort chunk. |
113 | */ | 114 | */ |
@@ -124,6 +125,29 @@ void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code, | |||
124 | chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err); | 125 | chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err); |
125 | } | 126 | } |
126 | 127 | ||
128 | /* A helper to initialize an op error inside a | ||
129 | * provided chunk, as most cause codes will be embedded inside an | ||
130 | * abort chunk. Differs from sctp_init_cause in that it won't oops | ||
131 | * if there isn't enough space in the op error chunk | ||
132 | */ | ||
133 | int sctp_init_cause_fixed(struct sctp_chunk *chunk, __be16 cause_code, | ||
134 | size_t paylen) | ||
135 | { | ||
136 | sctp_errhdr_t err; | ||
137 | __u16 len; | ||
138 | |||
139 | /* Cause code constants are now defined in network order. */ | ||
140 | err.cause = cause_code; | ||
141 | len = sizeof(sctp_errhdr_t) + paylen; | ||
142 | err.length = htons(len); | ||
143 | |||
144 | if (skb_tailroom(chunk->skb) > len) | ||
145 | return -ENOSPC; | ||
146 | chunk->subh.err_hdr = sctp_addto_chunk_fixed(chunk, | ||
147 | sizeof(sctp_errhdr_t), | ||
148 | &err); | ||
149 | return 0; | ||
150 | } | ||
127 | /* 3.3.2 Initiation (INIT) (1) | 151 | /* 3.3.2 Initiation (INIT) (1) |
128 | * | 152 | * |
129 | * This chunk is used to initiate a SCTP association between two | 153 | * This chunk is used to initiate a SCTP association between two |
@@ -207,7 +231,8 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, | |||
207 | sp = sctp_sk(asoc->base.sk); | 231 | sp = sctp_sk(asoc->base.sk); |
208 | num_types = sp->pf->supported_addrs(sp, types); | 232 | num_types = sp->pf->supported_addrs(sp, types); |
209 | 233 | ||
210 | chunksize = sizeof(init) + addrs_len + SCTP_SAT_LEN(num_types); | 234 | chunksize = sizeof(init) + addrs_len; |
235 | chunksize += WORD_ROUND(SCTP_SAT_LEN(num_types)); | ||
211 | chunksize += sizeof(ecap_param); | 236 | chunksize += sizeof(ecap_param); |
212 | 237 | ||
213 | if (sctp_prsctp_enable) | 238 | if (sctp_prsctp_enable) |
@@ -237,14 +262,14 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, | |||
237 | /* Add HMACS parameter length if any were defined */ | 262 | /* Add HMACS parameter length if any were defined */ |
238 | auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs; | 263 | auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs; |
239 | if (auth_hmacs->length) | 264 | if (auth_hmacs->length) |
240 | chunksize += ntohs(auth_hmacs->length); | 265 | chunksize += WORD_ROUND(ntohs(auth_hmacs->length)); |
241 | else | 266 | else |
242 | auth_hmacs = NULL; | 267 | auth_hmacs = NULL; |
243 | 268 | ||
244 | /* Add CHUNKS parameter length */ | 269 | /* Add CHUNKS parameter length */ |
245 | auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks; | 270 | auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks; |
246 | if (auth_chunks->length) | 271 | if (auth_chunks->length) |
247 | chunksize += ntohs(auth_chunks->length); | 272 | chunksize += WORD_ROUND(ntohs(auth_chunks->length)); |
248 | else | 273 | else |
249 | auth_chunks = NULL; | 274 | auth_chunks = NULL; |
250 | 275 | ||
@@ -254,7 +279,8 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, | |||
254 | 279 | ||
255 | /* If we have any extensions to report, account for that */ | 280 | /* If we have any extensions to report, account for that */ |
256 | if (num_ext) | 281 | if (num_ext) |
257 | chunksize += sizeof(sctp_supported_ext_param_t) + num_ext; | 282 | chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) + |
283 | num_ext); | ||
258 | 284 | ||
259 | /* RFC 2960 3.3.2 Initiation (INIT) (1) | 285 | /* RFC 2960 3.3.2 Initiation (INIT) (1) |
260 | * | 286 | * |
@@ -396,13 +422,13 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, | |||
396 | 422 | ||
397 | auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs; | 423 | auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs; |
398 | if (auth_hmacs->length) | 424 | if (auth_hmacs->length) |
399 | chunksize += ntohs(auth_hmacs->length); | 425 | chunksize += WORD_ROUND(ntohs(auth_hmacs->length)); |
400 | else | 426 | else |
401 | auth_hmacs = NULL; | 427 | auth_hmacs = NULL; |
402 | 428 | ||
403 | auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks; | 429 | auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks; |
404 | if (auth_chunks->length) | 430 | if (auth_chunks->length) |
405 | chunksize += ntohs(auth_chunks->length); | 431 | chunksize += WORD_ROUND(ntohs(auth_chunks->length)); |
406 | else | 432 | else |
407 | auth_chunks = NULL; | 433 | auth_chunks = NULL; |
408 | 434 | ||
@@ -411,17 +437,25 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, | |||
411 | } | 437 | } |
412 | 438 | ||
413 | if (num_ext) | 439 | if (num_ext) |
414 | chunksize += sizeof(sctp_supported_ext_param_t) + num_ext; | 440 | chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) + |
441 | num_ext); | ||
415 | 442 | ||
416 | /* Now allocate and fill out the chunk. */ | 443 | /* Now allocate and fill out the chunk. */ |
417 | retval = sctp_make_chunk(asoc, SCTP_CID_INIT_ACK, 0, chunksize); | 444 | retval = sctp_make_chunk(asoc, SCTP_CID_INIT_ACK, 0, chunksize); |
418 | if (!retval) | 445 | if (!retval) |
419 | goto nomem_chunk; | 446 | goto nomem_chunk; |
420 | 447 | ||
421 | /* Per the advice in RFC 2960 6.4, send this reply to | 448 | /* RFC 2960 6.4 Multi-homed SCTP Endpoints |
422 | * the source of the INIT packet. | 449 | * |
450 | * An endpoint SHOULD transmit reply chunks (e.g., SACK, | ||
451 | * HEARTBEAT ACK, * etc.) to the same destination transport | ||
452 | * address from which it received the DATA or control chunk | ||
453 | * to which it is replying. | ||
454 | * | ||
455 | * [INIT ACK back to where the INIT came from.] | ||
423 | */ | 456 | */ |
424 | retval->transport = chunk->transport; | 457 | retval->transport = chunk->transport; |
458 | |||
425 | retval->subh.init_hdr = | 459 | retval->subh.init_hdr = |
426 | sctp_addto_chunk(retval, sizeof(initack), &initack); | 460 | sctp_addto_chunk(retval, sizeof(initack), &initack); |
427 | retval->param_hdr.v = sctp_addto_chunk(retval, addrs_len, addrs.v); | 461 | retval->param_hdr.v = sctp_addto_chunk(retval, addrs_len, addrs.v); |
@@ -460,18 +494,6 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, | |||
460 | /* We need to remove the const qualifier at this point. */ | 494 | /* We need to remove the const qualifier at this point. */ |
461 | retval->asoc = (struct sctp_association *) asoc; | 495 | retval->asoc = (struct sctp_association *) asoc; |
462 | 496 | ||
463 | /* RFC 2960 6.4 Multi-homed SCTP Endpoints | ||
464 | * | ||
465 | * An endpoint SHOULD transmit reply chunks (e.g., SACK, | ||
466 | * HEARTBEAT ACK, * etc.) to the same destination transport | ||
467 | * address from which it received the DATA or control chunk | ||
468 | * to which it is replying. | ||
469 | * | ||
470 | * [INIT ACK back to where the INIT came from.] | ||
471 | */ | ||
472 | if (chunk) | ||
473 | retval->transport = chunk->transport; | ||
474 | |||
475 | nomem_chunk: | 497 | nomem_chunk: |
476 | kfree(cookie); | 498 | kfree(cookie); |
477 | nomem_cookie: | 499 | nomem_cookie: |
@@ -1128,6 +1150,24 @@ nodata: | |||
1128 | return retval; | 1150 | return retval; |
1129 | } | 1151 | } |
1130 | 1152 | ||
1153 | /* Create an Operation Error chunk of a fixed size, | ||
1154 | * specifically, max(asoc->pathmtu, SCTP_DEFAULT_MAXSEGMENT) | ||
1155 | * This is a helper function to allocate an error chunk for | ||
1156 | * for those invalid parameter codes in which we may not want | ||
1157 | * to report all the errors, if the incomming chunk is large | ||
1158 | */ | ||
1159 | static inline struct sctp_chunk *sctp_make_op_error_fixed( | ||
1160 | const struct sctp_association *asoc, | ||
1161 | const struct sctp_chunk *chunk) | ||
1162 | { | ||
1163 | size_t size = asoc ? asoc->pathmtu : 0; | ||
1164 | |||
1165 | if (!size) | ||
1166 | size = SCTP_DEFAULT_MAXSEGMENT; | ||
1167 | |||
1168 | return sctp_make_op_error_space(asoc, chunk, size); | ||
1169 | } | ||
1170 | |||
1131 | /* Create an Operation Error chunk. */ | 1171 | /* Create an Operation Error chunk. */ |
1132 | struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc, | 1172 | struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc, |
1133 | const struct sctp_chunk *chunk, | 1173 | const struct sctp_chunk *chunk, |
@@ -1209,7 +1249,6 @@ struct sctp_chunk *sctp_chunkify(struct sk_buff *skb, | |||
1209 | INIT_LIST_HEAD(&retval->list); | 1249 | INIT_LIST_HEAD(&retval->list); |
1210 | retval->skb = skb; | 1250 | retval->skb = skb; |
1211 | retval->asoc = (struct sctp_association *)asoc; | 1251 | retval->asoc = (struct sctp_association *)asoc; |
1212 | retval->resent = 0; | ||
1213 | retval->has_tsn = 0; | 1252 | retval->has_tsn = 0; |
1214 | retval->has_ssn = 0; | 1253 | retval->has_ssn = 0; |
1215 | retval->rtt_in_progress = 0; | 1254 | retval->rtt_in_progress = 0; |
@@ -1370,6 +1409,18 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data) | |||
1370 | return target; | 1409 | return target; |
1371 | } | 1410 | } |
1372 | 1411 | ||
1412 | /* Append bytes to the end of a chunk. Returns NULL if there isn't sufficient | ||
1413 | * space in the chunk | ||
1414 | */ | ||
1415 | void *sctp_addto_chunk_fixed(struct sctp_chunk *chunk, | ||
1416 | int len, const void *data) | ||
1417 | { | ||
1418 | if (skb_tailroom(chunk->skb) > len) | ||
1419 | return sctp_addto_chunk(chunk, len, data); | ||
1420 | else | ||
1421 | return NULL; | ||
1422 | } | ||
1423 | |||
1373 | /* Append bytes from user space to the end of a chunk. Will panic if | 1424 | /* Append bytes from user space to the end of a chunk. Will panic if |
1374 | * chunk is not big enough. | 1425 | * chunk is not big enough. |
1375 | * Returns a kernel err value. | 1426 | * Returns a kernel err value. |
@@ -1973,13 +2024,12 @@ static sctp_ierror_t sctp_process_unk_param(const struct sctp_association *asoc, | |||
1973 | * returning multiple unknown parameters. | 2024 | * returning multiple unknown parameters. |
1974 | */ | 2025 | */ |
1975 | if (NULL == *errp) | 2026 | if (NULL == *errp) |
1976 | *errp = sctp_make_op_error_space(asoc, chunk, | 2027 | *errp = sctp_make_op_error_fixed(asoc, chunk); |
1977 | ntohs(chunk->chunk_hdr->length)); | ||
1978 | 2028 | ||
1979 | if (*errp) { | 2029 | if (*errp) { |
1980 | sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM, | 2030 | sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM, |
1981 | WORD_ROUND(ntohs(param.p->length))); | 2031 | WORD_ROUND(ntohs(param.p->length))); |
1982 | sctp_addto_chunk(*errp, | 2032 | sctp_addto_chunk_fixed(*errp, |
1983 | WORD_ROUND(ntohs(param.p->length)), | 2033 | WORD_ROUND(ntohs(param.p->length)), |
1984 | param.v); | 2034 | param.v); |
1985 | } else { | 2035 | } else { |
@@ -3314,21 +3364,6 @@ int sctp_process_asconf_ack(struct sctp_association *asoc, | |||
3314 | sctp_chunk_free(asconf); | 3364 | sctp_chunk_free(asconf); |
3315 | asoc->addip_last_asconf = NULL; | 3365 | asoc->addip_last_asconf = NULL; |
3316 | 3366 | ||
3317 | /* Send the next asconf chunk from the addip chunk queue. */ | ||
3318 | if (!list_empty(&asoc->addip_chunk_list)) { | ||
3319 | struct list_head *entry = asoc->addip_chunk_list.next; | ||
3320 | asconf = list_entry(entry, struct sctp_chunk, list); | ||
3321 | |||
3322 | list_del_init(entry); | ||
3323 | |||
3324 | /* Hold the chunk until an ASCONF_ACK is received. */ | ||
3325 | sctp_chunk_hold(asconf); | ||
3326 | if (sctp_primitive_ASCONF(asoc, asconf)) | ||
3327 | sctp_chunk_free(asconf); | ||
3328 | else | ||
3329 | asoc->addip_last_asconf = asconf; | ||
3330 | } | ||
3331 | |||
3332 | return retval; | 3367 | return retval; |
3333 | } | 3368 | } |
3334 | 3369 | ||
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 4e4ca65cd320..3b7230ef77c2 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c | |||
@@ -51,6 +51,7 @@ | |||
51 | #include <linux/types.h> | 51 | #include <linux/types.h> |
52 | #include <linux/socket.h> | 52 | #include <linux/socket.h> |
53 | #include <linux/ip.h> | 53 | #include <linux/ip.h> |
54 | #include <linux/gfp.h> | ||
54 | #include <net/sock.h> | 55 | #include <net/sock.h> |
55 | #include <net/sctp/sctp.h> | 56 | #include <net/sctp/sctp.h> |
56 | #include <net/sctp/sm.h> | 57 | #include <net/sctp/sm.h> |
@@ -475,7 +476,7 @@ static void sctp_do_8_2_transport_strike(struct sctp_association *asoc, | |||
475 | * used to provide an upper bound to this doubling operation. | 476 | * used to provide an upper bound to this doubling operation. |
476 | * | 477 | * |
477 | * Special Case: the first HB doesn't trigger exponential backoff. | 478 | * Special Case: the first HB doesn't trigger exponential backoff. |
478 | * The first unacknowleged HB triggers it. We do this with a flag | 479 | * The first unacknowledged HB triggers it. We do this with a flag |
479 | * that indicates that we have an outstanding HB. | 480 | * that indicates that we have an outstanding HB. |
480 | */ | 481 | */ |
481 | if (!is_hb || transport->hb_sent) { | 482 | if (!is_hb || transport->hb_sent) { |
@@ -696,11 +697,15 @@ static void sctp_cmd_setup_t2(sctp_cmd_seq_t *cmds, | |||
696 | { | 697 | { |
697 | struct sctp_transport *t; | 698 | struct sctp_transport *t; |
698 | 699 | ||
699 | t = sctp_assoc_choose_alter_transport(asoc, | 700 | if (chunk->transport) |
701 | t = chunk->transport; | ||
702 | else { | ||
703 | t = sctp_assoc_choose_alter_transport(asoc, | ||
700 | asoc->shutdown_last_sent_to); | 704 | asoc->shutdown_last_sent_to); |
705 | chunk->transport = t; | ||
706 | } | ||
701 | asoc->shutdown_last_sent_to = t; | 707 | asoc->shutdown_last_sent_to = t; |
702 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = t->rto; | 708 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = t->rto; |
703 | chunk->transport = t; | ||
704 | } | 709 | } |
705 | 710 | ||
706 | /* Helper function to change the state of an association. */ | 711 | /* Helper function to change the state of an association. */ |
@@ -961,6 +966,29 @@ static int sctp_cmd_send_msg(struct sctp_association *asoc, | |||
961 | } | 966 | } |
962 | 967 | ||
963 | 968 | ||
969 | /* Sent the next ASCONF packet currently stored in the association. | ||
970 | * This happens after the ASCONF_ACK was succeffully processed. | ||
971 | */ | ||
972 | static void sctp_cmd_send_asconf(struct sctp_association *asoc) | ||
973 | { | ||
974 | /* Send the next asconf chunk from the addip chunk | ||
975 | * queue. | ||
976 | */ | ||
977 | if (!list_empty(&asoc->addip_chunk_list)) { | ||
978 | struct list_head *entry = asoc->addip_chunk_list.next; | ||
979 | struct sctp_chunk *asconf = list_entry(entry, | ||
980 | struct sctp_chunk, list); | ||
981 | list_del_init(entry); | ||
982 | |||
983 | /* Hold the chunk until an ASCONF_ACK is received. */ | ||
984 | sctp_chunk_hold(asconf); | ||
985 | if (sctp_primitive_ASCONF(asoc, asconf)) | ||
986 | sctp_chunk_free(asconf); | ||
987 | else | ||
988 | asoc->addip_last_asconf = asconf; | ||
989 | } | ||
990 | } | ||
991 | |||
964 | 992 | ||
965 | /* These three macros allow us to pull the debugging code out of the | 993 | /* These three macros allow us to pull the debugging code out of the |
966 | * main flow of sctp_do_sm() to keep attention focused on the real | 994 | * main flow of sctp_do_sm() to keep attention focused on the real |
@@ -1616,6 +1644,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, | |||
1616 | } | 1644 | } |
1617 | error = sctp_cmd_send_msg(asoc, cmd->obj.msg); | 1645 | error = sctp_cmd_send_msg(asoc, cmd->obj.msg); |
1618 | break; | 1646 | break; |
1647 | case SCTP_CMD_SEND_NEXT_ASCONF: | ||
1648 | sctp_cmd_send_asconf(asoc); | ||
1649 | break; | ||
1619 | default: | 1650 | default: |
1620 | printk(KERN_WARNING "Impossible command: %u, %p\n", | 1651 | printk(KERN_WARNING "Impossible command: %u, %p\n", |
1621 | cmd->verb, cmd->obj.ptr); | 1652 | cmd->verb, cmd->obj.ptr); |
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 47bc20d3a85b..24b2cd555637 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
@@ -56,6 +56,7 @@ | |||
56 | #include <linux/ipv6.h> | 56 | #include <linux/ipv6.h> |
57 | #include <linux/net.h> | 57 | #include <linux/net.h> |
58 | #include <linux/inet.h> | 58 | #include <linux/inet.h> |
59 | #include <linux/slab.h> | ||
59 | #include <net/sock.h> | 60 | #include <net/sock.h> |
60 | #include <net/inet_ecn.h> | 61 | #include <net/inet_ecn.h> |
61 | #include <linux/skbuff.h> | 62 | #include <linux/skbuff.h> |
@@ -3675,8 +3676,14 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep, | |||
3675 | SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); | 3676 | SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); |
3676 | 3677 | ||
3677 | if (!sctp_process_asconf_ack((struct sctp_association *)asoc, | 3678 | if (!sctp_process_asconf_ack((struct sctp_association *)asoc, |
3678 | asconf_ack)) | 3679 | asconf_ack)) { |
3680 | /* Successfully processed ASCONF_ACK. We can | ||
3681 | * release the next asconf if we have one. | ||
3682 | */ | ||
3683 | sctp_add_cmd_sf(commands, SCTP_CMD_SEND_NEXT_ASCONF, | ||
3684 | SCTP_NULL()); | ||
3679 | return SCTP_DISPOSITION_CONSUME; | 3685 | return SCTP_DISPOSITION_CONSUME; |
3686 | } | ||
3680 | 3687 | ||
3681 | abort = sctp_make_abort(asoc, asconf_ack, | 3688 | abort = sctp_make_abort(asoc, asconf_ack, |
3682 | sizeof(sctp_errhdr_t)); | 3689 | sizeof(sctp_errhdr_t)); |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index dfc5c127efd4..ba1add0b13c3 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -67,6 +67,7 @@ | |||
67 | #include <linux/poll.h> | 67 | #include <linux/poll.h> |
68 | #include <linux/init.h> | 68 | #include <linux/init.h> |
69 | #include <linux/crypto.h> | 69 | #include <linux/crypto.h> |
70 | #include <linux/slab.h> | ||
70 | 71 | ||
71 | #include <net/ip.h> | 72 | #include <net/ip.h> |
72 | #include <net/icmp.h> | 73 | #include <net/icmp.h> |
@@ -3718,12 +3719,9 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk) | |||
3718 | sp->hmac = NULL; | 3719 | sp->hmac = NULL; |
3719 | 3720 | ||
3720 | SCTP_DBG_OBJCNT_INC(sock); | 3721 | SCTP_DBG_OBJCNT_INC(sock); |
3721 | percpu_counter_inc(&sctp_sockets_allocated); | ||
3722 | |||
3723 | /* Set socket backlog limit. */ | ||
3724 | sk->sk_backlog.limit = sysctl_sctp_rmem[1]; | ||
3725 | 3722 | ||
3726 | local_bh_disable(); | 3723 | local_bh_disable(); |
3724 | percpu_counter_inc(&sctp_sockets_allocated); | ||
3727 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); | 3725 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); |
3728 | local_bh_enable(); | 3726 | local_bh_enable(); |
3729 | 3727 | ||
@@ -3740,8 +3738,8 @@ SCTP_STATIC void sctp_destroy_sock(struct sock *sk) | |||
3740 | /* Release our hold on the endpoint. */ | 3738 | /* Release our hold on the endpoint. */ |
3741 | ep = sctp_sk(sk)->ep; | 3739 | ep = sctp_sk(sk)->ep; |
3742 | sctp_endpoint_free(ep); | 3740 | sctp_endpoint_free(ep); |
3743 | percpu_counter_dec(&sctp_sockets_allocated); | ||
3744 | local_bh_disable(); | 3741 | local_bh_disable(); |
3742 | percpu_counter_dec(&sctp_sockets_allocated); | ||
3745 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); | 3743 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); |
3746 | local_bh_enable(); | 3744 | local_bh_enable(); |
3747 | } | 3745 | } |
@@ -4386,7 +4384,7 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len, | |||
4386 | transports) { | 4384 | transports) { |
4387 | memcpy(&temp, &from->ipaddr, sizeof(temp)); | 4385 | memcpy(&temp, &from->ipaddr, sizeof(temp)); |
4388 | sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp); | 4386 | sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp); |
4389 | addrlen = sctp_get_af_specific(sk->sk_family)->sockaddr_len; | 4387 | addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; |
4390 | if (space_left < addrlen) | 4388 | if (space_left < addrlen) |
4391 | return -ENOMEM; | 4389 | return -ENOMEM; |
4392 | if (copy_to_user(to, &temp, addrlen)) | 4390 | if (copy_to_user(to, &temp, addrlen)) |
@@ -5481,7 +5479,6 @@ pp_found: | |||
5481 | */ | 5479 | */ |
5482 | int reuse = sk->sk_reuse; | 5480 | int reuse = sk->sk_reuse; |
5483 | struct sock *sk2; | 5481 | struct sock *sk2; |
5484 | struct hlist_node *node; | ||
5485 | 5482 | ||
5486 | SCTP_DEBUG_PRINTK("sctp_get_port() found a possible match\n"); | 5483 | SCTP_DEBUG_PRINTK("sctp_get_port() found a possible match\n"); |
5487 | if (pp->fastreuse && sk->sk_reuse && | 5484 | if (pp->fastreuse && sk->sk_reuse && |
@@ -5702,7 +5699,7 @@ unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait) | |||
5702 | struct sctp_sock *sp = sctp_sk(sk); | 5699 | struct sctp_sock *sp = sctp_sk(sk); |
5703 | unsigned int mask; | 5700 | unsigned int mask; |
5704 | 5701 | ||
5705 | poll_wait(file, sk->sk_sleep, wait); | 5702 | poll_wait(file, sk_sleep(sk), wait); |
5706 | 5703 | ||
5707 | /* A TCP-style listening socket becomes readable when the accept queue | 5704 | /* A TCP-style listening socket becomes readable when the accept queue |
5708 | * is not empty. | 5705 | * is not empty. |
@@ -5943,7 +5940,7 @@ static int sctp_wait_for_packet(struct sock * sk, int *err, long *timeo_p) | |||
5943 | int error; | 5940 | int error; |
5944 | DEFINE_WAIT(wait); | 5941 | DEFINE_WAIT(wait); |
5945 | 5942 | ||
5946 | prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 5943 | prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
5947 | 5944 | ||
5948 | /* Socket errors? */ | 5945 | /* Socket errors? */ |
5949 | error = sock_error(sk); | 5946 | error = sock_error(sk); |
@@ -5980,14 +5977,14 @@ static int sctp_wait_for_packet(struct sock * sk, int *err, long *timeo_p) | |||
5980 | sctp_lock_sock(sk); | 5977 | sctp_lock_sock(sk); |
5981 | 5978 | ||
5982 | ready: | 5979 | ready: |
5983 | finish_wait(sk->sk_sleep, &wait); | 5980 | finish_wait(sk_sleep(sk), &wait); |
5984 | return 0; | 5981 | return 0; |
5985 | 5982 | ||
5986 | interrupted: | 5983 | interrupted: |
5987 | error = sock_intr_errno(*timeo_p); | 5984 | error = sock_intr_errno(*timeo_p); |
5988 | 5985 | ||
5989 | out: | 5986 | out: |
5990 | finish_wait(sk->sk_sleep, &wait); | 5987 | finish_wait(sk_sleep(sk), &wait); |
5991 | *err = error; | 5988 | *err = error; |
5992 | return error; | 5989 | return error; |
5993 | } | 5990 | } |
@@ -6061,14 +6058,14 @@ static void __sctp_write_space(struct sctp_association *asoc) | |||
6061 | wake_up_interruptible(&asoc->wait); | 6058 | wake_up_interruptible(&asoc->wait); |
6062 | 6059 | ||
6063 | if (sctp_writeable(sk)) { | 6060 | if (sctp_writeable(sk)) { |
6064 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) | 6061 | if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk))) |
6065 | wake_up_interruptible(sk->sk_sleep); | 6062 | wake_up_interruptible(sk_sleep(sk)); |
6066 | 6063 | ||
6067 | /* Note that we try to include the Async I/O support | 6064 | /* Note that we try to include the Async I/O support |
6068 | * here by modeling from the current TCP/UDP code. | 6065 | * here by modeling from the current TCP/UDP code. |
6069 | * We have not tested with it yet. | 6066 | * We have not tested with it yet. |
6070 | */ | 6067 | */ |
6071 | if (sock->fasync_list && | 6068 | if (sock->wq->fasync_list && |
6072 | !(sk->sk_shutdown & SEND_SHUTDOWN)) | 6069 | !(sk->sk_shutdown & SEND_SHUTDOWN)) |
6073 | sock_wake_async(sock, | 6070 | sock_wake_async(sock, |
6074 | SOCK_WAKE_SPACE, POLL_OUT); | 6071 | SOCK_WAKE_SPACE, POLL_OUT); |
@@ -6188,6 +6185,19 @@ do_nonblock: | |||
6188 | goto out; | 6185 | goto out; |
6189 | } | 6186 | } |
6190 | 6187 | ||
6188 | void sctp_data_ready(struct sock *sk, int len) | ||
6189 | { | ||
6190 | struct socket_wq *wq; | ||
6191 | |||
6192 | rcu_read_lock(); | ||
6193 | wq = rcu_dereference(sk->sk_wq); | ||
6194 | if (wq_has_sleeper(wq)) | ||
6195 | wake_up_interruptible_sync_poll(&wq->wait, POLLIN | | ||
6196 | POLLRDNORM | POLLRDBAND); | ||
6197 | sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); | ||
6198 | rcu_read_unlock(); | ||
6199 | } | ||
6200 | |||
6191 | /* If socket sndbuf has changed, wake up all per association waiters. */ | 6201 | /* If socket sndbuf has changed, wake up all per association waiters. */ |
6192 | void sctp_write_space(struct sock *sk) | 6202 | void sctp_write_space(struct sock *sk) |
6193 | { | 6203 | { |
@@ -6296,7 +6306,7 @@ static int sctp_wait_for_accept(struct sock *sk, long timeo) | |||
6296 | 6306 | ||
6297 | 6307 | ||
6298 | for (;;) { | 6308 | for (;;) { |
6299 | prepare_to_wait_exclusive(sk->sk_sleep, &wait, | 6309 | prepare_to_wait_exclusive(sk_sleep(sk), &wait, |
6300 | TASK_INTERRUPTIBLE); | 6310 | TASK_INTERRUPTIBLE); |
6301 | 6311 | ||
6302 | if (list_empty(&ep->asocs)) { | 6312 | if (list_empty(&ep->asocs)) { |
@@ -6322,7 +6332,7 @@ static int sctp_wait_for_accept(struct sock *sk, long timeo) | |||
6322 | break; | 6332 | break; |
6323 | } | 6333 | } |
6324 | 6334 | ||
6325 | finish_wait(sk->sk_sleep, &wait); | 6335 | finish_wait(sk_sleep(sk), &wait); |
6326 | 6336 | ||
6327 | return err; | 6337 | return err; |
6328 | } | 6338 | } |
@@ -6332,7 +6342,7 @@ static void sctp_wait_for_close(struct sock *sk, long timeout) | |||
6332 | DEFINE_WAIT(wait); | 6342 | DEFINE_WAIT(wait); |
6333 | 6343 | ||
6334 | do { | 6344 | do { |
6335 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 6345 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
6336 | if (list_empty(&sctp_sk(sk)->ep->asocs)) | 6346 | if (list_empty(&sctp_sk(sk)->ep->asocs)) |
6337 | break; | 6347 | break; |
6338 | sctp_release_sock(sk); | 6348 | sctp_release_sock(sk); |
@@ -6340,7 +6350,7 @@ static void sctp_wait_for_close(struct sock *sk, long timeout) | |||
6340 | sctp_lock_sock(sk); | 6350 | sctp_lock_sock(sk); |
6341 | } while (!signal_pending(current) && timeout); | 6351 | } while (!signal_pending(current) && timeout); |
6342 | 6352 | ||
6343 | finish_wait(sk->sk_sleep, &wait); | 6353 | finish_wait(sk_sleep(sk), &wait); |
6344 | } | 6354 | } |
6345 | 6355 | ||
6346 | static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk) | 6356 | static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk) |
diff --git a/net/sctp/ssnmap.c b/net/sctp/ssnmap.c index 737d330e5ffc..442ad4ed6315 100644 --- a/net/sctp/ssnmap.c +++ b/net/sctp/ssnmap.c | |||
@@ -37,6 +37,7 @@ | |||
37 | */ | 37 | */ |
38 | 38 | ||
39 | #include <linux/types.h> | 39 | #include <linux/types.h> |
40 | #include <linux/slab.h> | ||
40 | #include <net/sctp/sctp.h> | 41 | #include <net/sctp/sctp.h> |
41 | #include <net/sctp/sm.h> | 42 | #include <net/sctp/sm.h> |
42 | 43 | ||
diff --git a/net/sctp/transport.c b/net/sctp/transport.c index b827d21dbe54..fccf4947aff1 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c | |||
@@ -48,6 +48,7 @@ | |||
48 | * be incorporated into the next SCTP release. | 48 | * be incorporated into the next SCTP release. |
49 | */ | 49 | */ |
50 | 50 | ||
51 | #include <linux/slab.h> | ||
51 | #include <linux/types.h> | 52 | #include <linux/types.h> |
52 | #include <linux/random.h> | 53 | #include <linux/random.h> |
53 | #include <net/sctp/sctp.h> | 54 | #include <net/sctp/sctp.h> |
@@ -63,9 +64,6 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer, | |||
63 | /* Copy in the address. */ | 64 | /* Copy in the address. */ |
64 | peer->ipaddr = *addr; | 65 | peer->ipaddr = *addr; |
65 | peer->af_specific = sctp_get_af_specific(addr->sa.sa_family); | 66 | peer->af_specific = sctp_get_af_specific(addr->sa.sa_family); |
66 | peer->asoc = NULL; | ||
67 | |||
68 | peer->dst = NULL; | ||
69 | memset(&peer->saddr, 0, sizeof(union sctp_addr)); | 67 | memset(&peer->saddr, 0, sizeof(union sctp_addr)); |
70 | 68 | ||
71 | /* From 6.3.1 RTO Calculation: | 69 | /* From 6.3.1 RTO Calculation: |
@@ -75,34 +73,21 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer, | |||
75 | * parameter 'RTO.Initial'. | 73 | * parameter 'RTO.Initial'. |
76 | */ | 74 | */ |
77 | peer->rto = msecs_to_jiffies(sctp_rto_initial); | 75 | peer->rto = msecs_to_jiffies(sctp_rto_initial); |
78 | peer->rtt = 0; | ||
79 | peer->rttvar = 0; | ||
80 | peer->srtt = 0; | ||
81 | peer->rto_pending = 0; | ||
82 | peer->hb_sent = 0; | ||
83 | peer->fast_recovery = 0; | ||
84 | 76 | ||
85 | peer->last_time_heard = jiffies; | 77 | peer->last_time_heard = jiffies; |
86 | peer->last_time_ecne_reduced = jiffies; | 78 | peer->last_time_ecne_reduced = jiffies; |
87 | 79 | ||
88 | peer->init_sent_count = 0; | ||
89 | |||
90 | peer->param_flags = SPP_HB_DISABLE | | 80 | peer->param_flags = SPP_HB_DISABLE | |
91 | SPP_PMTUD_ENABLE | | 81 | SPP_PMTUD_ENABLE | |
92 | SPP_SACKDELAY_ENABLE; | 82 | SPP_SACKDELAY_ENABLE; |
93 | peer->hbinterval = 0; | ||
94 | 83 | ||
95 | /* Initialize the default path max_retrans. */ | 84 | /* Initialize the default path max_retrans. */ |
96 | peer->pathmaxrxt = sctp_max_retrans_path; | 85 | peer->pathmaxrxt = sctp_max_retrans_path; |
97 | peer->error_count = 0; | ||
98 | 86 | ||
99 | INIT_LIST_HEAD(&peer->transmitted); | 87 | INIT_LIST_HEAD(&peer->transmitted); |
100 | INIT_LIST_HEAD(&peer->send_ready); | 88 | INIT_LIST_HEAD(&peer->send_ready); |
101 | INIT_LIST_HEAD(&peer->transports); | 89 | INIT_LIST_HEAD(&peer->transports); |
102 | 90 | ||
103 | peer->T3_rtx_timer.expires = 0; | ||
104 | peer->hb_timer.expires = 0; | ||
105 | |||
106 | setup_timer(&peer->T3_rtx_timer, sctp_generate_t3_rtx_event, | 91 | setup_timer(&peer->T3_rtx_timer, sctp_generate_t3_rtx_event, |
107 | (unsigned long)peer); | 92 | (unsigned long)peer); |
108 | setup_timer(&peer->hb_timer, sctp_generate_heartbeat_event, | 93 | setup_timer(&peer->hb_timer, sctp_generate_heartbeat_event, |
@@ -112,15 +97,6 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer, | |||
112 | get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce)); | 97 | get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce)); |
113 | 98 | ||
114 | atomic_set(&peer->refcnt, 1); | 99 | atomic_set(&peer->refcnt, 1); |
115 | peer->dead = 0; | ||
116 | |||
117 | peer->malloced = 0; | ||
118 | |||
119 | /* Initialize the state information for SFR-CACC */ | ||
120 | peer->cacc.changeover_active = 0; | ||
121 | peer->cacc.cycling_changeover = 0; | ||
122 | peer->cacc.next_tsn_at_change = 0; | ||
123 | peer->cacc.cacc_saw_newack = 0; | ||
124 | 100 | ||
125 | return peer; | 101 | return peer; |
126 | } | 102 | } |
@@ -194,7 +170,7 @@ static void sctp_transport_destroy(struct sctp_transport *transport) | |||
194 | /* Start T3_rtx timer if it is not already running and update the heartbeat | 170 | /* Start T3_rtx timer if it is not already running and update the heartbeat |
195 | * timer. This routine is called every time a DATA chunk is sent. | 171 | * timer. This routine is called every time a DATA chunk is sent. |
196 | */ | 172 | */ |
197 | void sctp_transport_reset_timers(struct sctp_transport *transport, int force) | 173 | void sctp_transport_reset_timers(struct sctp_transport *transport) |
198 | { | 174 | { |
199 | /* RFC 2960 6.3.2 Retransmission Timer Rules | 175 | /* RFC 2960 6.3.2 Retransmission Timer Rules |
200 | * | 176 | * |
@@ -204,7 +180,7 @@ void sctp_transport_reset_timers(struct sctp_transport *transport, int force) | |||
204 | * address. | 180 | * address. |
205 | */ | 181 | */ |
206 | 182 | ||
207 | if (force || !timer_pending(&transport->T3_rtx_timer)) | 183 | if (!timer_pending(&transport->T3_rtx_timer)) |
208 | if (!mod_timer(&transport->T3_rtx_timer, | 184 | if (!mod_timer(&transport->T3_rtx_timer, |
209 | jiffies + transport->rto)) | 185 | jiffies + transport->rto)) |
210 | sctp_transport_hold(transport); | 186 | sctp_transport_hold(transport); |
@@ -402,15 +378,16 @@ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt) | |||
402 | void sctp_transport_raise_cwnd(struct sctp_transport *transport, | 378 | void sctp_transport_raise_cwnd(struct sctp_transport *transport, |
403 | __u32 sack_ctsn, __u32 bytes_acked) | 379 | __u32 sack_ctsn, __u32 bytes_acked) |
404 | { | 380 | { |
381 | struct sctp_association *asoc = transport->asoc; | ||
405 | __u32 cwnd, ssthresh, flight_size, pba, pmtu; | 382 | __u32 cwnd, ssthresh, flight_size, pba, pmtu; |
406 | 383 | ||
407 | cwnd = transport->cwnd; | 384 | cwnd = transport->cwnd; |
408 | flight_size = transport->flight_size; | 385 | flight_size = transport->flight_size; |
409 | 386 | ||
410 | /* See if we need to exit Fast Recovery first */ | 387 | /* See if we need to exit Fast Recovery first */ |
411 | if (transport->fast_recovery && | 388 | if (asoc->fast_recovery && |
412 | TSN_lte(transport->fast_recovery_exit, sack_ctsn)) | 389 | TSN_lte(asoc->fast_recovery_exit, sack_ctsn)) |
413 | transport->fast_recovery = 0; | 390 | asoc->fast_recovery = 0; |
414 | 391 | ||
415 | /* The appropriate cwnd increase algorithm is performed if, and only | 392 | /* The appropriate cwnd increase algorithm is performed if, and only |
416 | * if the cumulative TSN whould advanced and the congestion window is | 393 | * if the cumulative TSN whould advanced and the congestion window is |
@@ -439,7 +416,7 @@ void sctp_transport_raise_cwnd(struct sctp_transport *transport, | |||
439 | * 2) the destination's path MTU. This upper bound protects | 416 | * 2) the destination's path MTU. This upper bound protects |
440 | * against the ACK-Splitting attack outlined in [SAVAGE99]. | 417 | * against the ACK-Splitting attack outlined in [SAVAGE99]. |
441 | */ | 418 | */ |
442 | if (transport->fast_recovery) | 419 | if (asoc->fast_recovery) |
443 | return; | 420 | return; |
444 | 421 | ||
445 | if (bytes_acked > pmtu) | 422 | if (bytes_acked > pmtu) |
@@ -490,6 +467,8 @@ void sctp_transport_raise_cwnd(struct sctp_transport *transport, | |||
490 | void sctp_transport_lower_cwnd(struct sctp_transport *transport, | 467 | void sctp_transport_lower_cwnd(struct sctp_transport *transport, |
491 | sctp_lower_cwnd_t reason) | 468 | sctp_lower_cwnd_t reason) |
492 | { | 469 | { |
470 | struct sctp_association *asoc = transport->asoc; | ||
471 | |||
493 | switch (reason) { | 472 | switch (reason) { |
494 | case SCTP_LOWER_CWND_T3_RTX: | 473 | case SCTP_LOWER_CWND_T3_RTX: |
495 | /* RFC 2960 Section 7.2.3, sctpimpguide | 474 | /* RFC 2960 Section 7.2.3, sctpimpguide |
@@ -500,11 +479,11 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport, | |||
500 | * partial_bytes_acked = 0 | 479 | * partial_bytes_acked = 0 |
501 | */ | 480 | */ |
502 | transport->ssthresh = max(transport->cwnd/2, | 481 | transport->ssthresh = max(transport->cwnd/2, |
503 | 4*transport->asoc->pathmtu); | 482 | 4*asoc->pathmtu); |
504 | transport->cwnd = transport->asoc->pathmtu; | 483 | transport->cwnd = asoc->pathmtu; |
505 | 484 | ||
506 | /* T3-rtx also clears fast recovery on the transport */ | 485 | /* T3-rtx also clears fast recovery */ |
507 | transport->fast_recovery = 0; | 486 | asoc->fast_recovery = 0; |
508 | break; | 487 | break; |
509 | 488 | ||
510 | case SCTP_LOWER_CWND_FAST_RTX: | 489 | case SCTP_LOWER_CWND_FAST_RTX: |
@@ -520,15 +499,15 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport, | |||
520 | * cwnd = ssthresh | 499 | * cwnd = ssthresh |
521 | * partial_bytes_acked = 0 | 500 | * partial_bytes_acked = 0 |
522 | */ | 501 | */ |
523 | if (transport->fast_recovery) | 502 | if (asoc->fast_recovery) |
524 | return; | 503 | return; |
525 | 504 | ||
526 | /* Mark Fast recovery */ | 505 | /* Mark Fast recovery */ |
527 | transport->fast_recovery = 1; | 506 | asoc->fast_recovery = 1; |
528 | transport->fast_recovery_exit = transport->asoc->next_tsn - 1; | 507 | asoc->fast_recovery_exit = asoc->next_tsn - 1; |
529 | 508 | ||
530 | transport->ssthresh = max(transport->cwnd/2, | 509 | transport->ssthresh = max(transport->cwnd/2, |
531 | 4*transport->asoc->pathmtu); | 510 | 4*asoc->pathmtu); |
532 | transport->cwnd = transport->ssthresh; | 511 | transport->cwnd = transport->ssthresh; |
533 | break; | 512 | break; |
534 | 513 | ||
@@ -548,7 +527,7 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport, | |||
548 | if (time_after(jiffies, transport->last_time_ecne_reduced + | 527 | if (time_after(jiffies, transport->last_time_ecne_reduced + |
549 | transport->rtt)) { | 528 | transport->rtt)) { |
550 | transport->ssthresh = max(transport->cwnd/2, | 529 | transport->ssthresh = max(transport->cwnd/2, |
551 | 4*transport->asoc->pathmtu); | 530 | 4*asoc->pathmtu); |
552 | transport->cwnd = transport->ssthresh; | 531 | transport->cwnd = transport->ssthresh; |
553 | transport->last_time_ecne_reduced = jiffies; | 532 | transport->last_time_ecne_reduced = jiffies; |
554 | } | 533 | } |
@@ -564,7 +543,7 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport, | |||
564 | * interval. | 543 | * interval. |
565 | */ | 544 | */ |
566 | transport->cwnd = max(transport->cwnd/2, | 545 | transport->cwnd = max(transport->cwnd/2, |
567 | 4*transport->asoc->pathmtu); | 546 | 4*asoc->pathmtu); |
568 | break; | 547 | break; |
569 | } | 548 | } |
570 | 549 | ||
@@ -649,7 +628,6 @@ void sctp_transport_reset(struct sctp_transport *t) | |||
649 | t->error_count = 0; | 628 | t->error_count = 0; |
650 | t->rto_pending = 0; | 629 | t->rto_pending = 0; |
651 | t->hb_sent = 0; | 630 | t->hb_sent = 0; |
652 | t->fast_recovery = 0; | ||
653 | 631 | ||
654 | /* Initialize the state information for SFR-CACC */ | 632 | /* Initialize the state information for SFR-CACC */ |
655 | t->cacc.changeover_active = 0; | 633 | t->cacc.changeover_active = 0; |
diff --git a/net/sctp/tsnmap.c b/net/sctp/tsnmap.c index 9bd64565021a..747d5412c463 100644 --- a/net/sctp/tsnmap.c +++ b/net/sctp/tsnmap.c | |||
@@ -42,6 +42,7 @@ | |||
42 | * be incorporated into the next SCTP release. | 42 | * be incorporated into the next SCTP release. |
43 | */ | 43 | */ |
44 | 44 | ||
45 | #include <linux/slab.h> | ||
45 | #include <linux/types.h> | 46 | #include <linux/types.h> |
46 | #include <linux/bitmap.h> | 47 | #include <linux/bitmap.h> |
47 | #include <net/sctp/sctp.h> | 48 | #include <net/sctp/sctp.h> |
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c index 8b3560fd876d..aa72e89c3ee1 100644 --- a/net/sctp/ulpevent.c +++ b/net/sctp/ulpevent.c | |||
@@ -43,6 +43,7 @@ | |||
43 | * be incorporated into the next SCTP release. | 43 | * be incorporated into the next SCTP release. |
44 | */ | 44 | */ |
45 | 45 | ||
46 | #include <linux/slab.h> | ||
46 | #include <linux/types.h> | 47 | #include <linux/types.h> |
47 | #include <linux/skbuff.h> | 48 | #include <linux/skbuff.h> |
48 | #include <net/sctp/structs.h> | 49 | #include <net/sctp/structs.h> |
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index 7b23803343cc..3a448536f0b6 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c | |||
@@ -41,6 +41,7 @@ | |||
41 | * be incorporated into the next SCTP release. | 41 | * be incorporated into the next SCTP release. |
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <linux/slab.h> | ||
44 | #include <linux/types.h> | 45 | #include <linux/types.h> |
45 | #include <linux/skbuff.h> | 46 | #include <linux/skbuff.h> |
46 | #include <net/sock.h> | 47 | #include <net/sock.h> |
diff --git a/net/socket.c b/net/socket.c index 769c386bd428..dae8c6b84a09 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -87,6 +87,7 @@ | |||
87 | #include <linux/wireless.h> | 87 | #include <linux/wireless.h> |
88 | #include <linux/nsproxy.h> | 88 | #include <linux/nsproxy.h> |
89 | #include <linux/magic.h> | 89 | #include <linux/magic.h> |
90 | #include <linux/slab.h> | ||
90 | 91 | ||
91 | #include <asm/uaccess.h> | 92 | #include <asm/uaccess.h> |
92 | #include <asm/unistd.h> | 93 | #include <asm/unistd.h> |
@@ -251,9 +252,14 @@ static struct inode *sock_alloc_inode(struct super_block *sb) | |||
251 | ei = kmem_cache_alloc(sock_inode_cachep, GFP_KERNEL); | 252 | ei = kmem_cache_alloc(sock_inode_cachep, GFP_KERNEL); |
252 | if (!ei) | 253 | if (!ei) |
253 | return NULL; | 254 | return NULL; |
254 | init_waitqueue_head(&ei->socket.wait); | 255 | ei->socket.wq = kmalloc(sizeof(struct socket_wq), GFP_KERNEL); |
256 | if (!ei->socket.wq) { | ||
257 | kmem_cache_free(sock_inode_cachep, ei); | ||
258 | return NULL; | ||
259 | } | ||
260 | init_waitqueue_head(&ei->socket.wq->wait); | ||
261 | ei->socket.wq->fasync_list = NULL; | ||
255 | 262 | ||
256 | ei->socket.fasync_list = NULL; | ||
257 | ei->socket.state = SS_UNCONNECTED; | 263 | ei->socket.state = SS_UNCONNECTED; |
258 | ei->socket.flags = 0; | 264 | ei->socket.flags = 0; |
259 | ei->socket.ops = NULL; | 265 | ei->socket.ops = NULL; |
@@ -263,10 +269,21 @@ static struct inode *sock_alloc_inode(struct super_block *sb) | |||
263 | return &ei->vfs_inode; | 269 | return &ei->vfs_inode; |
264 | } | 270 | } |
265 | 271 | ||
272 | |||
273 | static void wq_free_rcu(struct rcu_head *head) | ||
274 | { | ||
275 | struct socket_wq *wq = container_of(head, struct socket_wq, rcu); | ||
276 | |||
277 | kfree(wq); | ||
278 | } | ||
279 | |||
266 | static void sock_destroy_inode(struct inode *inode) | 280 | static void sock_destroy_inode(struct inode *inode) |
267 | { | 281 | { |
268 | kmem_cache_free(sock_inode_cachep, | 282 | struct socket_alloc *ei; |
269 | container_of(inode, struct socket_alloc, vfs_inode)); | 283 | |
284 | ei = container_of(inode, struct socket_alloc, vfs_inode); | ||
285 | call_rcu(&ei->socket.wq->rcu, wq_free_rcu); | ||
286 | kmem_cache_free(sock_inode_cachep, ei); | ||
270 | } | 287 | } |
271 | 288 | ||
272 | static void init_once(void *foo) | 289 | static void init_once(void *foo) |
@@ -512,7 +529,7 @@ void sock_release(struct socket *sock) | |||
512 | module_put(owner); | 529 | module_put(owner); |
513 | } | 530 | } |
514 | 531 | ||
515 | if (sock->fasync_list) | 532 | if (sock->wq->fasync_list) |
516 | printk(KERN_ERR "sock_release: fasync list not empty!\n"); | 533 | printk(KERN_ERR "sock_release: fasync list not empty!\n"); |
517 | 534 | ||
518 | percpu_sub(sockets_in_use, 1); | 535 | percpu_sub(sockets_in_use, 1); |
@@ -619,10 +636,9 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, | |||
619 | put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP, | 636 | put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP, |
620 | sizeof(tv), &tv); | 637 | sizeof(tv), &tv); |
621 | } else { | 638 | } else { |
622 | struct timespec ts; | 639 | skb_get_timestampns(skb, &ts[0]); |
623 | skb_get_timestampns(skb, &ts); | ||
624 | put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPNS, | 640 | put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPNS, |
625 | sizeof(ts), &ts); | 641 | sizeof(ts[0]), &ts[0]); |
626 | } | 642 | } |
627 | } | 643 | } |
628 | 644 | ||
@@ -655,13 +671,13 @@ inline void sock_recv_drops(struct msghdr *msg, struct sock *sk, struct sk_buff | |||
655 | sizeof(__u32), &skb->dropcount); | 671 | sizeof(__u32), &skb->dropcount); |
656 | } | 672 | } |
657 | 673 | ||
658 | void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, | 674 | void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, |
659 | struct sk_buff *skb) | 675 | struct sk_buff *skb) |
660 | { | 676 | { |
661 | sock_recv_timestamp(msg, sk, skb); | 677 | sock_recv_timestamp(msg, sk, skb); |
662 | sock_recv_drops(msg, sk, skb); | 678 | sock_recv_drops(msg, sk, skb); |
663 | } | 679 | } |
664 | EXPORT_SYMBOL_GPL(sock_recv_ts_and_drops); | 680 | EXPORT_SYMBOL_GPL(__sock_recv_ts_and_drops); |
665 | 681 | ||
666 | static inline int __sock_recvmsg_nosec(struct kiocb *iocb, struct socket *sock, | 682 | static inline int __sock_recvmsg_nosec(struct kiocb *iocb, struct socket *sock, |
667 | struct msghdr *msg, size_t size, int flags) | 683 | struct msghdr *msg, size_t size, int flags) |
@@ -1067,87 +1083,44 @@ static int sock_close(struct inode *inode, struct file *filp) | |||
1067 | * 1. fasync_list is modified only under process context socket lock | 1083 | * 1. fasync_list is modified only under process context socket lock |
1068 | * i.e. under semaphore. | 1084 | * i.e. under semaphore. |
1069 | * 2. fasync_list is used under read_lock(&sk->sk_callback_lock) | 1085 | * 2. fasync_list is used under read_lock(&sk->sk_callback_lock) |
1070 | * or under socket lock. | 1086 | * or under socket lock |
1071 | * 3. fasync_list can be used from softirq context, so that | ||
1072 | * modification under socket lock have to be enhanced with | ||
1073 | * write_lock_bh(&sk->sk_callback_lock). | ||
1074 | * --ANK (990710) | ||
1075 | */ | 1087 | */ |
1076 | 1088 | ||
1077 | static int sock_fasync(int fd, struct file *filp, int on) | 1089 | static int sock_fasync(int fd, struct file *filp, int on) |
1078 | { | 1090 | { |
1079 | struct fasync_struct *fa, *fna = NULL, **prev; | 1091 | struct socket *sock = filp->private_data; |
1080 | struct socket *sock; | 1092 | struct sock *sk = sock->sk; |
1081 | struct sock *sk; | ||
1082 | |||
1083 | if (on) { | ||
1084 | fna = kmalloc(sizeof(struct fasync_struct), GFP_KERNEL); | ||
1085 | if (fna == NULL) | ||
1086 | return -ENOMEM; | ||
1087 | } | ||
1088 | |||
1089 | sock = filp->private_data; | ||
1090 | 1093 | ||
1091 | sk = sock->sk; | 1094 | if (sk == NULL) |
1092 | if (sk == NULL) { | ||
1093 | kfree(fna); | ||
1094 | return -EINVAL; | 1095 | return -EINVAL; |
1095 | } | ||
1096 | 1096 | ||
1097 | lock_sock(sk); | 1097 | lock_sock(sk); |
1098 | 1098 | ||
1099 | spin_lock(&filp->f_lock); | 1099 | fasync_helper(fd, filp, on, &sock->wq->fasync_list); |
1100 | if (on) | ||
1101 | filp->f_flags |= FASYNC; | ||
1102 | else | ||
1103 | filp->f_flags &= ~FASYNC; | ||
1104 | spin_unlock(&filp->f_lock); | ||
1105 | 1100 | ||
1106 | prev = &(sock->fasync_list); | 1101 | if (!sock->wq->fasync_list) |
1107 | 1102 | sock_reset_flag(sk, SOCK_FASYNC); | |
1108 | for (fa = *prev; fa != NULL; prev = &fa->fa_next, fa = *prev) | 1103 | else |
1109 | if (fa->fa_file == filp) | ||
1110 | break; | ||
1111 | |||
1112 | if (on) { | ||
1113 | if (fa != NULL) { | ||
1114 | write_lock_bh(&sk->sk_callback_lock); | ||
1115 | fa->fa_fd = fd; | ||
1116 | write_unlock_bh(&sk->sk_callback_lock); | ||
1117 | |||
1118 | kfree(fna); | ||
1119 | goto out; | ||
1120 | } | ||
1121 | fna->fa_file = filp; | ||
1122 | fna->fa_fd = fd; | ||
1123 | fna->magic = FASYNC_MAGIC; | ||
1124 | fna->fa_next = sock->fasync_list; | ||
1125 | write_lock_bh(&sk->sk_callback_lock); | ||
1126 | sock->fasync_list = fna; | ||
1127 | sock_set_flag(sk, SOCK_FASYNC); | 1104 | sock_set_flag(sk, SOCK_FASYNC); |
1128 | write_unlock_bh(&sk->sk_callback_lock); | ||
1129 | } else { | ||
1130 | if (fa != NULL) { | ||
1131 | write_lock_bh(&sk->sk_callback_lock); | ||
1132 | *prev = fa->fa_next; | ||
1133 | if (!sock->fasync_list) | ||
1134 | sock_reset_flag(sk, SOCK_FASYNC); | ||
1135 | write_unlock_bh(&sk->sk_callback_lock); | ||
1136 | kfree(fa); | ||
1137 | } | ||
1138 | } | ||
1139 | 1105 | ||
1140 | out: | 1106 | release_sock(sk); |
1141 | release_sock(sock->sk); | ||
1142 | return 0; | 1107 | return 0; |
1143 | } | 1108 | } |
1144 | 1109 | ||
1145 | /* This function may be called only under socket lock or callback_lock */ | 1110 | /* This function may be called only under socket lock or callback_lock or rcu_lock */ |
1146 | 1111 | ||
1147 | int sock_wake_async(struct socket *sock, int how, int band) | 1112 | int sock_wake_async(struct socket *sock, int how, int band) |
1148 | { | 1113 | { |
1149 | if (!sock || !sock->fasync_list) | 1114 | struct socket_wq *wq; |
1115 | |||
1116 | if (!sock) | ||
1150 | return -1; | 1117 | return -1; |
1118 | rcu_read_lock(); | ||
1119 | wq = rcu_dereference(sock->wq); | ||
1120 | if (!wq || !wq->fasync_list) { | ||
1121 | rcu_read_unlock(); | ||
1122 | return -1; | ||
1123 | } | ||
1151 | switch (how) { | 1124 | switch (how) { |
1152 | case SOCK_WAKE_WAITD: | 1125 | case SOCK_WAKE_WAITD: |
1153 | if (test_bit(SOCK_ASYNC_WAITDATA, &sock->flags)) | 1126 | if (test_bit(SOCK_ASYNC_WAITDATA, &sock->flags)) |
@@ -1159,11 +1132,12 @@ int sock_wake_async(struct socket *sock, int how, int band) | |||
1159 | /* fall through */ | 1132 | /* fall through */ |
1160 | case SOCK_WAKE_IO: | 1133 | case SOCK_WAKE_IO: |
1161 | call_kill: | 1134 | call_kill: |
1162 | __kill_fasync(sock->fasync_list, SIGIO, band); | 1135 | kill_fasync(&wq->fasync_list, SIGIO, band); |
1163 | break; | 1136 | break; |
1164 | case SOCK_WAKE_URG: | 1137 | case SOCK_WAKE_URG: |
1165 | __kill_fasync(sock->fasync_list, SIGURG, band); | 1138 | kill_fasync(&wq->fasync_list, SIGURG, band); |
1166 | } | 1139 | } |
1140 | rcu_read_unlock(); | ||
1167 | return 0; | 1141 | return 0; |
1168 | } | 1142 | } |
1169 | 1143 | ||
@@ -2135,6 +2109,10 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, | |||
2135 | break; | 2109 | break; |
2136 | ++datagrams; | 2110 | ++datagrams; |
2137 | 2111 | ||
2112 | /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */ | ||
2113 | if (flags & MSG_WAITFORONE) | ||
2114 | flags |= MSG_DONTWAIT; | ||
2115 | |||
2138 | if (timeout) { | 2116 | if (timeout) { |
2139 | ktime_get_ts(timeout); | 2117 | ktime_get_ts(timeout); |
2140 | *timeout = timespec_sub(end_time, *timeout); | 2118 | *timeout = timespec_sub(end_time, *timeout); |
diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c index 6dcdd2517819..1419d0cdbbac 100644 --- a/net/sunrpc/addr.c +++ b/net/sunrpc/addr.c | |||
@@ -18,6 +18,7 @@ | |||
18 | 18 | ||
19 | #include <net/ipv6.h> | 19 | #include <net/ipv6.h> |
20 | #include <linux/sunrpc/clnt.h> | 20 | #include <linux/sunrpc/clnt.h> |
21 | #include <linux/slab.h> | ||
21 | 22 | ||
22 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 23 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
23 | 24 | ||
@@ -71,8 +72,9 @@ static size_t rpc_ntop6(const struct sockaddr *sap, | |||
71 | if (unlikely(len == 0)) | 72 | if (unlikely(len == 0)) |
72 | return len; | 73 | return len; |
73 | 74 | ||
74 | if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) && | 75 | if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)) |
75 | !(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_SITELOCAL)) | 76 | return len; |
77 | if (sin6->sin6_scope_id == 0) | ||
76 | return len; | 78 | return len; |
77 | 79 | ||
78 | rc = snprintf(scopebuf, sizeof(scopebuf), "%c%u", | 80 | rc = snprintf(scopebuf, sizeof(scopebuf), "%c%u", |
@@ -165,8 +167,7 @@ static int rpc_parse_scope_id(const char *buf, const size_t buflen, | |||
165 | if (*delim != IPV6_SCOPE_DELIMITER) | 167 | if (*delim != IPV6_SCOPE_DELIMITER) |
166 | return 0; | 168 | return 0; |
167 | 169 | ||
168 | if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) && | 170 | if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)) |
169 | !(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_SITELOCAL)) | ||
170 | return 0; | 171 | return 0; |
171 | 172 | ||
172 | len = (buf + buflen) - delim - 1; | 173 | len = (buf + buflen) - delim - 1; |
diff --git a/net/sunrpc/auth_generic.c b/net/sunrpc/auth_generic.c index bf88bf8e9365..8f623b0f03dd 100644 --- a/net/sunrpc/auth_generic.c +++ b/net/sunrpc/auth_generic.c | |||
@@ -5,6 +5,7 @@ | |||
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include <linux/err.h> | 7 | #include <linux/err.h> |
8 | #include <linux/slab.h> | ||
8 | #include <linux/types.h> | 9 | #include <linux/types.h> |
9 | #include <linux/module.h> | 10 | #include <linux/module.h> |
10 | #include <linux/sched.h> | 11 | #include <linux/sched.h> |
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index f7a7f8380e38..c389ccf6437d 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c | |||
@@ -206,8 +206,14 @@ gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct | |||
206 | ctx->gc_win = window_size; | 206 | ctx->gc_win = window_size; |
207 | /* gssd signals an error by passing ctx->gc_win = 0: */ | 207 | /* gssd signals an error by passing ctx->gc_win = 0: */ |
208 | if (ctx->gc_win == 0) { | 208 | if (ctx->gc_win == 0) { |
209 | /* in which case, p points to an error code which we ignore */ | 209 | /* |
210 | p = ERR_PTR(-EACCES); | 210 | * in which case, p points to an error code. Anything other |
211 | * than -EKEYEXPIRED gets converted to -EACCES. | ||
212 | */ | ||
213 | p = simple_get_bytes(p, end, &ret, sizeof(ret)); | ||
214 | if (!IS_ERR(p)) | ||
215 | p = (ret == -EKEYEXPIRED) ? ERR_PTR(-EKEYEXPIRED) : | ||
216 | ERR_PTR(-EACCES); | ||
211 | goto err; | 217 | goto err; |
212 | } | 218 | } |
213 | /* copy the opaque wire context */ | 219 | /* copy the opaque wire context */ |
@@ -646,6 +652,7 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) | |||
646 | err = PTR_ERR(p); | 652 | err = PTR_ERR(p); |
647 | switch (err) { | 653 | switch (err) { |
648 | case -EACCES: | 654 | case -EACCES: |
655 | case -EKEYEXPIRED: | ||
649 | gss_msg->msg.errno = err; | 656 | gss_msg->msg.errno = err; |
650 | err = mlen; | 657 | err = mlen; |
651 | break; | 658 | break; |
@@ -1273,9 +1280,8 @@ alloc_enc_pages(struct rpc_rqst *rqstp) | |||
1273 | rqstp->rq_release_snd_buf = priv_release_snd_buf; | 1280 | rqstp->rq_release_snd_buf = priv_release_snd_buf; |
1274 | return 0; | 1281 | return 0; |
1275 | out_free: | 1282 | out_free: |
1276 | for (i--; i >= 0; i--) { | 1283 | rqstp->rq_enc_pages_num = i; |
1277 | __free_page(rqstp->rq_enc_pages[i]); | 1284 | priv_release_snd_buf(rqstp); |
1278 | } | ||
1279 | out: | 1285 | out: |
1280 | return -EAGAIN; | 1286 | return -EAGAIN; |
1281 | } | 1287 | } |
diff --git a/net/sunrpc/auth_gss/gss_generic_token.c b/net/sunrpc/auth_gss/gss_generic_token.c index c0ba39c4f5f2..310b78e99456 100644 --- a/net/sunrpc/auth_gss/gss_generic_token.c +++ b/net/sunrpc/auth_gss/gss_generic_token.c | |||
@@ -33,7 +33,6 @@ | |||
33 | 33 | ||
34 | #include <linux/types.h> | 34 | #include <linux/types.h> |
35 | #include <linux/module.h> | 35 | #include <linux/module.h> |
36 | #include <linux/slab.h> | ||
37 | #include <linux/string.h> | 36 | #include <linux/string.h> |
38 | #include <linux/sunrpc/sched.h> | 37 | #include <linux/sunrpc/sched.h> |
39 | #include <linux/sunrpc/gss_asn1.h> | 38 | #include <linux/sunrpc/gss_asn1.h> |
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c index c93fca204558..e9b636176687 100644 --- a/net/sunrpc/auth_gss/gss_krb5_crypto.c +++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c | |||
@@ -37,7 +37,6 @@ | |||
37 | #include <linux/err.h> | 37 | #include <linux/err.h> |
38 | #include <linux/types.h> | 38 | #include <linux/types.h> |
39 | #include <linux/mm.h> | 39 | #include <linux/mm.h> |
40 | #include <linux/slab.h> | ||
41 | #include <linux/scatterlist.h> | 40 | #include <linux/scatterlist.h> |
42 | #include <linux/crypto.h> | 41 | #include <linux/crypto.h> |
43 | #include <linux/highmem.h> | 42 | #include <linux/highmem.h> |
diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c index b8f42ef7178e..88fe6e75ed7e 100644 --- a/net/sunrpc/auth_gss/gss_krb5_seal.c +++ b/net/sunrpc/auth_gss/gss_krb5_seal.c | |||
@@ -59,7 +59,6 @@ | |||
59 | */ | 59 | */ |
60 | 60 | ||
61 | #include <linux/types.h> | 61 | #include <linux/types.h> |
62 | #include <linux/slab.h> | ||
63 | #include <linux/jiffies.h> | 62 | #include <linux/jiffies.h> |
64 | #include <linux/sunrpc/gss_krb5.h> | 63 | #include <linux/sunrpc/gss_krb5.h> |
65 | #include <linux/random.h> | 64 | #include <linux/random.h> |
diff --git a/net/sunrpc/auth_gss/gss_krb5_seqnum.c b/net/sunrpc/auth_gss/gss_krb5_seqnum.c index 17562b4c35f6..6331cd6866ec 100644 --- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c +++ b/net/sunrpc/auth_gss/gss_krb5_seqnum.c | |||
@@ -32,7 +32,6 @@ | |||
32 | */ | 32 | */ |
33 | 33 | ||
34 | #include <linux/types.h> | 34 | #include <linux/types.h> |
35 | #include <linux/slab.h> | ||
36 | #include <linux/sunrpc/gss_krb5.h> | 35 | #include <linux/sunrpc/gss_krb5.h> |
37 | #include <linux/crypto.h> | 36 | #include <linux/crypto.h> |
38 | 37 | ||
diff --git a/net/sunrpc/auth_gss/gss_krb5_unseal.c b/net/sunrpc/auth_gss/gss_krb5_unseal.c index 066ec73c84d6..ce6c247edad0 100644 --- a/net/sunrpc/auth_gss/gss_krb5_unseal.c +++ b/net/sunrpc/auth_gss/gss_krb5_unseal.c | |||
@@ -58,7 +58,6 @@ | |||
58 | */ | 58 | */ |
59 | 59 | ||
60 | #include <linux/types.h> | 60 | #include <linux/types.h> |
61 | #include <linux/slab.h> | ||
62 | #include <linux/jiffies.h> | 61 | #include <linux/jiffies.h> |
63 | #include <linux/sunrpc/gss_krb5.h> | 62 | #include <linux/sunrpc/gss_krb5.h> |
64 | #include <linux/crypto.h> | 63 | #include <linux/crypto.h> |
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c index ae8e69b59c4c..a6e905637e03 100644 --- a/net/sunrpc/auth_gss/gss_krb5_wrap.c +++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c | |||
@@ -1,5 +1,4 @@ | |||
1 | #include <linux/types.h> | 1 | #include <linux/types.h> |
2 | #include <linux/slab.h> | ||
3 | #include <linux/jiffies.h> | 2 | #include <linux/jiffies.h> |
4 | #include <linux/sunrpc/gss_krb5.h> | 3 | #include <linux/sunrpc/gss_krb5.h> |
5 | #include <linux/random.h> | 4 | #include <linux/random.h> |
diff --git a/net/sunrpc/auth_gss/gss_spkm3_seal.c b/net/sunrpc/auth_gss/gss_spkm3_seal.c index c832712f8d55..5a3a65a0e2b4 100644 --- a/net/sunrpc/auth_gss/gss_spkm3_seal.c +++ b/net/sunrpc/auth_gss/gss_spkm3_seal.c | |||
@@ -34,7 +34,6 @@ | |||
34 | */ | 34 | */ |
35 | 35 | ||
36 | #include <linux/types.h> | 36 | #include <linux/types.h> |
37 | #include <linux/slab.h> | ||
38 | #include <linux/jiffies.h> | 37 | #include <linux/jiffies.h> |
39 | #include <linux/sunrpc/gss_spkm3.h> | 38 | #include <linux/sunrpc/gss_spkm3.h> |
40 | #include <linux/random.h> | 39 | #include <linux/random.h> |
diff --git a/net/sunrpc/auth_gss/gss_spkm3_token.c b/net/sunrpc/auth_gss/gss_spkm3_token.c index 3308157436d2..a99825d7caa0 100644 --- a/net/sunrpc/auth_gss/gss_spkm3_token.c +++ b/net/sunrpc/auth_gss/gss_spkm3_token.c | |||
@@ -223,7 +223,7 @@ spkm3_verify_mic_token(unsigned char **tokp, int *mic_hdrlen, unsigned char **ck | |||
223 | 223 | ||
224 | /* only support SPKM_MIC_TOK */ | 224 | /* only support SPKM_MIC_TOK */ |
225 | if((ptr[6] != 0x01) || (ptr[7] != 0x01)) { | 225 | if((ptr[6] != 0x01) || (ptr[7] != 0x01)) { |
226 | dprintk("RPC: ERROR unsupported SPKM3 token \n"); | 226 | dprintk("RPC: ERROR unsupported SPKM3 token\n"); |
227 | goto out; | 227 | goto out; |
228 | } | 228 | } |
229 | 229 | ||
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index e34bc531fcb9..b81e790ef9f4 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c | |||
@@ -37,6 +37,7 @@ | |||
37 | * | 37 | * |
38 | */ | 38 | */ |
39 | 39 | ||
40 | #include <linux/slab.h> | ||
40 | #include <linux/types.h> | 41 | #include <linux/types.h> |
41 | #include <linux/module.h> | 42 | #include <linux/module.h> |
42 | #include <linux/pagemap.h> | 43 | #include <linux/pagemap.h> |
diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c index 46b2647c5bd2..aac2f8b4ee21 100644 --- a/net/sunrpc/auth_unix.c +++ b/net/sunrpc/auth_unix.c | |||
@@ -6,6 +6,7 @@ | |||
6 | * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> | 6 | * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/slab.h> | ||
9 | #include <linux/types.h> | 10 | #include <linux/types.h> |
10 | #include <linux/sched.h> | 11 | #include <linux/sched.h> |
11 | #include <linux/module.h> | 12 | #include <linux/module.h> |
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c index 553621fb2c41..cf06af3b63c6 100644 --- a/net/sunrpc/backchannel_rqst.c +++ b/net/sunrpc/backchannel_rqst.c | |||
@@ -22,6 +22,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
22 | ******************************************************************************/ | 22 | ******************************************************************************/ |
23 | 23 | ||
24 | #include <linux/tcp.h> | 24 | #include <linux/tcp.h> |
25 | #include <linux/slab.h> | ||
25 | #include <linux/sunrpc/xprt.h> | 26 | #include <linux/sunrpc/xprt.h> |
26 | 27 | ||
27 | #ifdef RPC_DEBUG | 28 | #ifdef RPC_DEBUG |
diff --git a/net/sunrpc/bc_svc.c b/net/sunrpc/bc_svc.c index 13f214f53120..7dcfe0cc3500 100644 --- a/net/sunrpc/bc_svc.c +++ b/net/sunrpc/bc_svc.c | |||
@@ -37,21 +37,6 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
37 | 37 | ||
38 | #define RPCDBG_FACILITY RPCDBG_SVCDSP | 38 | #define RPCDBG_FACILITY RPCDBG_SVCDSP |
39 | 39 | ||
40 | void bc_release_request(struct rpc_task *task) | ||
41 | { | ||
42 | struct rpc_rqst *req = task->tk_rqstp; | ||
43 | |||
44 | dprintk("RPC: bc_release_request: task= %p\n", task); | ||
45 | |||
46 | /* | ||
47 | * Release this request only if it's a backchannel | ||
48 | * preallocated request | ||
49 | */ | ||
50 | if (!bc_prealloc(req)) | ||
51 | return; | ||
52 | xprt_free_bc_request(req); | ||
53 | } | ||
54 | |||
55 | /* Empty callback ops */ | 40 | /* Empty callback ops */ |
56 | static const struct rpc_call_ops nfs41_callback_ops = { | 41 | static const struct rpc_call_ops nfs41_callback_ops = { |
57 | }; | 42 | }; |
@@ -75,7 +60,7 @@ int bc_send(struct rpc_rqst *req) | |||
75 | rpc_put_task(task); | 60 | rpc_put_task(task); |
76 | } | 61 | } |
77 | return ret; | 62 | return ret; |
78 | dprintk("RPC: bc_send ret= %d \n", ret); | 63 | dprintk("RPC: bc_send ret= %d\n", ret); |
79 | } | 64 | } |
80 | 65 | ||
81 | #endif /* CONFIG_NFS_V4_1 */ | 66 | #endif /* CONFIG_NFS_V4_1 */ |
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 154034b675bd..19c9983d5360 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
@@ -659,6 +659,7 @@ struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req, | |||
659 | task = rpc_new_task(&task_setup_data); | 659 | task = rpc_new_task(&task_setup_data); |
660 | if (!task) { | 660 | if (!task) { |
661 | xprt_free_bc_request(req); | 661 | xprt_free_bc_request(req); |
662 | task = ERR_PTR(-ENOMEM); | ||
662 | goto out; | 663 | goto out; |
663 | } | 664 | } |
664 | task->tk_rqstp = req; | 665 | task->tk_rqstp = req; |
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 9ea45383480e..20e30c6f8355 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c | |||
@@ -587,6 +587,8 @@ static struct dentry *__rpc_lookup_create_exclusive(struct dentry *parent, | |||
587 | struct dentry *dentry; | 587 | struct dentry *dentry; |
588 | 588 | ||
589 | dentry = __rpc_lookup_create(parent, name); | 589 | dentry = __rpc_lookup_create(parent, name); |
590 | if (IS_ERR(dentry)) | ||
591 | return dentry; | ||
590 | if (dentry->d_inode == NULL) | 592 | if (dentry->d_inode == NULL) |
591 | return dentry; | 593 | return dentry; |
592 | dput(dentry); | 594 | dput(dentry); |
@@ -999,19 +1001,14 @@ rpc_fill_super(struct super_block *sb, void *data, int silent) | |||
999 | inode = rpc_get_inode(sb, S_IFDIR | 0755); | 1001 | inode = rpc_get_inode(sb, S_IFDIR | 0755); |
1000 | if (!inode) | 1002 | if (!inode) |
1001 | return -ENOMEM; | 1003 | return -ENOMEM; |
1002 | root = d_alloc_root(inode); | 1004 | sb->s_root = root = d_alloc_root(inode); |
1003 | if (!root) { | 1005 | if (!root) { |
1004 | iput(inode); | 1006 | iput(inode); |
1005 | return -ENOMEM; | 1007 | return -ENOMEM; |
1006 | } | 1008 | } |
1007 | if (rpc_populate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF, NULL)) | 1009 | if (rpc_populate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF, NULL)) |
1008 | goto out; | 1010 | return -ENOMEM; |
1009 | sb->s_root = root; | ||
1010 | return 0; | 1011 | return 0; |
1011 | out: | ||
1012 | d_genocide(root); | ||
1013 | dput(root); | ||
1014 | return -ENOMEM; | ||
1015 | } | 1012 | } |
1016 | 1013 | ||
1017 | static int | 1014 | static int |
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c index 3e3772d8eb92..121105355f60 100644 --- a/net/sunrpc/rpcb_clnt.c +++ b/net/sunrpc/rpcb_clnt.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
22 | #include <linux/errno.h> | 22 | #include <linux/errno.h> |
23 | #include <linux/mutex.h> | 23 | #include <linux/mutex.h> |
24 | #include <linux/slab.h> | ||
24 | #include <net/ipv6.h> | 25 | #include <net/ipv6.h> |
25 | 26 | ||
26 | #include <linux/sunrpc/clnt.h> | 27 | #include <linux/sunrpc/clnt.h> |
diff --git a/net/sunrpc/socklib.c b/net/sunrpc/socklib.c index a661a3acb37e..10b4319ebbca 100644 --- a/net/sunrpc/socklib.c +++ b/net/sunrpc/socklib.c | |||
@@ -8,6 +8,7 @@ | |||
8 | 8 | ||
9 | #include <linux/compiler.h> | 9 | #include <linux/compiler.h> |
10 | #include <linux/netdevice.h> | 10 | #include <linux/netdevice.h> |
11 | #include <linux/gfp.h> | ||
11 | #include <linux/skbuff.h> | 12 | #include <linux/skbuff.h> |
12 | #include <linux/types.h> | 13 | #include <linux/types.h> |
13 | #include <linux/pagemap.h> | 14 | #include <linux/pagemap.h> |
diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c index 1b4e6791ecf3..5785d2037f45 100644 --- a/net/sunrpc/stats.c +++ b/net/sunrpc/stats.c | |||
@@ -13,6 +13,7 @@ | |||
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/slab.h> | ||
16 | 17 | ||
17 | #include <linux/init.h> | 18 | #include <linux/init.h> |
18 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 538ca433a56c..d9017d64597e 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/interrupt.h> | 19 | #include <linux/interrupt.h> |
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/kthread.h> | 21 | #include <linux/kthread.h> |
22 | #include <linux/slab.h> | ||
22 | 23 | ||
23 | #include <linux/sunrpc/types.h> | 24 | #include <linux/sunrpc/types.h> |
24 | #include <linux/sunrpc/xdr.h> | 25 | #include <linux/sunrpc/xdr.h> |
@@ -133,7 +134,7 @@ svc_pool_map_choose_mode(void) | |||
133 | return SVC_POOL_PERNODE; | 134 | return SVC_POOL_PERNODE; |
134 | } | 135 | } |
135 | 136 | ||
136 | node = any_online_node(node_online_map); | 137 | node = first_online_node; |
137 | if (nr_cpus_node(node) > 2) { | 138 | if (nr_cpus_node(node) > 2) { |
138 | /* | 139 | /* |
139 | * Non-trivial SMP, or CONFIG_NUMA on | 140 | * Non-trivial SMP, or CONFIG_NUMA on |
@@ -506,6 +507,10 @@ svc_init_buffer(struct svc_rqst *rqstp, unsigned int size) | |||
506 | { | 507 | { |
507 | unsigned int pages, arghi; | 508 | unsigned int pages, arghi; |
508 | 509 | ||
510 | /* bc_xprt uses fore channel allocated buffers */ | ||
511 | if (svc_is_backchannel(rqstp)) | ||
512 | return 1; | ||
513 | |||
509 | pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply. | 514 | pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply. |
510 | * We assume one is at most one page | 515 | * We assume one is at most one page |
511 | */ | 516 | */ |
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 7d1f9e928f69..061b2e0f9118 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/errno.h> | 9 | #include <linux/errno.h> |
10 | #include <linux/freezer.h> | 10 | #include <linux/freezer.h> |
11 | #include <linux/kthread.h> | 11 | #include <linux/kthread.h> |
12 | #include <linux/slab.h> | ||
12 | #include <net/sock.h> | 13 | #include <net/sock.h> |
13 | #include <linux/sunrpc/stats.h> | 14 | #include <linux/sunrpc/stats.h> |
14 | #include <linux/sunrpc/svc_xprt.h> | 15 | #include <linux/sunrpc/svc_xprt.h> |
@@ -173,11 +174,13 @@ static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl, | |||
173 | .sin_addr.s_addr = htonl(INADDR_ANY), | 174 | .sin_addr.s_addr = htonl(INADDR_ANY), |
174 | .sin_port = htons(port), | 175 | .sin_port = htons(port), |
175 | }; | 176 | }; |
177 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
176 | struct sockaddr_in6 sin6 = { | 178 | struct sockaddr_in6 sin6 = { |
177 | .sin6_family = AF_INET6, | 179 | .sin6_family = AF_INET6, |
178 | .sin6_addr = IN6ADDR_ANY_INIT, | 180 | .sin6_addr = IN6ADDR_ANY_INIT, |
179 | .sin6_port = htons(port), | 181 | .sin6_port = htons(port), |
180 | }; | 182 | }; |
183 | #endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ | ||
181 | struct sockaddr *sap; | 184 | struct sockaddr *sap; |
182 | size_t len; | 185 | size_t len; |
183 | 186 | ||
@@ -186,10 +189,12 @@ static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl, | |||
186 | sap = (struct sockaddr *)&sin; | 189 | sap = (struct sockaddr *)&sin; |
187 | len = sizeof(sin); | 190 | len = sizeof(sin); |
188 | break; | 191 | break; |
192 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
189 | case PF_INET6: | 193 | case PF_INET6: |
190 | sap = (struct sockaddr *)&sin6; | 194 | sap = (struct sockaddr *)&sin6; |
191 | len = sizeof(sin6); | 195 | len = sizeof(sin6); |
192 | break; | 196 | break; |
197 | #endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ | ||
193 | default: | 198 | default: |
194 | return ERR_PTR(-EAFNOSUPPORT); | 199 | return ERR_PTR(-EAFNOSUPPORT); |
195 | } | 200 | } |
@@ -231,7 +236,10 @@ int svc_create_xprt(struct svc_serv *serv, const char *xprt_name, | |||
231 | err: | 236 | err: |
232 | spin_unlock(&svc_xprt_class_lock); | 237 | spin_unlock(&svc_xprt_class_lock); |
233 | dprintk("svc: transport %s not found\n", xprt_name); | 238 | dprintk("svc: transport %s not found\n", xprt_name); |
234 | return -ENOENT; | 239 | |
240 | /* This errno is exposed to user space. Provide a reasonable | ||
241 | * perror msg for a bad transport. */ | ||
242 | return -EPROTONOSUPPORT; | ||
235 | } | 243 | } |
236 | EXPORT_SYMBOL_GPL(svc_create_xprt); | 244 | EXPORT_SYMBOL_GPL(svc_create_xprt); |
237 | 245 | ||
@@ -699,8 +707,10 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) | |||
699 | spin_unlock_bh(&pool->sp_lock); | 707 | spin_unlock_bh(&pool->sp_lock); |
700 | 708 | ||
701 | len = 0; | 709 | len = 0; |
702 | if (test_bit(XPT_LISTENER, &xprt->xpt_flags) && | 710 | if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) { |
703 | !test_bit(XPT_CLOSE, &xprt->xpt_flags)) { | 711 | dprintk("svc_recv: found XPT_CLOSE\n"); |
712 | svc_delete_xprt(xprt); | ||
713 | } else if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) { | ||
704 | struct svc_xprt *newxpt; | 714 | struct svc_xprt *newxpt; |
705 | newxpt = xprt->xpt_ops->xpo_accept(xprt); | 715 | newxpt = xprt->xpt_ops->xpo_accept(xprt); |
706 | if (newxpt) { | 716 | if (newxpt) { |
@@ -726,7 +736,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) | |||
726 | svc_xprt_received(newxpt); | 736 | svc_xprt_received(newxpt); |
727 | } | 737 | } |
728 | svc_xprt_received(xprt); | 738 | svc_xprt_received(xprt); |
729 | } else if (!test_bit(XPT_CLOSE, &xprt->xpt_flags)) { | 739 | } else { |
730 | dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n", | 740 | dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n", |
731 | rqstp, pool->sp_id, xprt, | 741 | rqstp, pool->sp_id, xprt, |
732 | atomic_read(&xprt->xpt_ref.refcount)); | 742 | atomic_read(&xprt->xpt_ref.refcount)); |
@@ -739,11 +749,6 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) | |||
739 | dprintk("svc: got len=%d\n", len); | 749 | dprintk("svc: got len=%d\n", len); |
740 | } | 750 | } |
741 | 751 | ||
742 | if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) { | ||
743 | dprintk("svc_recv: found XPT_CLOSE\n"); | ||
744 | svc_delete_xprt(xprt); | ||
745 | } | ||
746 | |||
747 | /* No data, incomplete (TCP) read, or accept() */ | 752 | /* No data, incomplete (TCP) read, or accept() */ |
748 | if (len == 0 || len == -EAGAIN) { | 753 | if (len == 0 || len == -EAGAIN) { |
749 | rqstp->rq_res.len = 0; | 754 | rqstp->rq_res.len = 0; |
@@ -889,11 +894,8 @@ void svc_delete_xprt(struct svc_xprt *xprt) | |||
889 | if (test_bit(XPT_TEMP, &xprt->xpt_flags)) | 894 | if (test_bit(XPT_TEMP, &xprt->xpt_flags)) |
890 | serv->sv_tmpcnt--; | 895 | serv->sv_tmpcnt--; |
891 | 896 | ||
892 | for (dr = svc_deferred_dequeue(xprt); dr; | 897 | while ((dr = svc_deferred_dequeue(xprt)) != NULL) |
893 | dr = svc_deferred_dequeue(xprt)) { | ||
894 | svc_xprt_put(xprt); | ||
895 | kfree(dr); | 898 | kfree(dr); |
896 | } | ||
897 | 899 | ||
898 | svc_xprt_put(xprt); | 900 | svc_xprt_put(xprt); |
899 | spin_unlock_bh(&serv->sv_lock); | 901 | spin_unlock_bh(&serv->sv_lock); |
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c index d8c041114497..207311610988 100644 --- a/net/sunrpc/svcauth_unix.c +++ b/net/sunrpc/svcauth_unix.c | |||
@@ -10,11 +10,13 @@ | |||
10 | #include <linux/seq_file.h> | 10 | #include <linux/seq_file.h> |
11 | #include <linux/hash.h> | 11 | #include <linux/hash.h> |
12 | #include <linux/string.h> | 12 | #include <linux/string.h> |
13 | #include <linux/slab.h> | ||
13 | #include <net/sock.h> | 14 | #include <net/sock.h> |
14 | #include <net/ipv6.h> | 15 | #include <net/ipv6.h> |
15 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
16 | #define RPCDBG_FACILITY RPCDBG_AUTH | 17 | #define RPCDBG_FACILITY RPCDBG_AUTH |
17 | 18 | ||
19 | #include <linux/sunrpc/clnt.h> | ||
18 | 20 | ||
19 | /* | 21 | /* |
20 | * AUTHUNIX and AUTHNULL credentials are both handled here. | 22 | * AUTHUNIX and AUTHNULL credentials are both handled here. |
@@ -187,10 +189,13 @@ static int ip_map_parse(struct cache_detail *cd, | |||
187 | * for scratch: */ | 189 | * for scratch: */ |
188 | char *buf = mesg; | 190 | char *buf = mesg; |
189 | int len; | 191 | int len; |
190 | int b1, b2, b3, b4, b5, b6, b7, b8; | ||
191 | char c; | ||
192 | char class[8]; | 192 | char class[8]; |
193 | struct in6_addr addr; | 193 | union { |
194 | struct sockaddr sa; | ||
195 | struct sockaddr_in s4; | ||
196 | struct sockaddr_in6 s6; | ||
197 | } address; | ||
198 | struct sockaddr_in6 sin6; | ||
194 | int err; | 199 | int err; |
195 | 200 | ||
196 | struct ip_map *ipmp; | 201 | struct ip_map *ipmp; |
@@ -209,24 +214,24 @@ static int ip_map_parse(struct cache_detail *cd, | |||
209 | len = qword_get(&mesg, buf, mlen); | 214 | len = qword_get(&mesg, buf, mlen); |
210 | if (len <= 0) return -EINVAL; | 215 | if (len <= 0) return -EINVAL; |
211 | 216 | ||
212 | if (sscanf(buf, "%u.%u.%u.%u%c", &b1, &b2, &b3, &b4, &c) == 4) { | 217 | if (rpc_pton(buf, len, &address.sa, sizeof(address)) == 0) |
213 | addr.s6_addr32[0] = 0; | ||
214 | addr.s6_addr32[1] = 0; | ||
215 | addr.s6_addr32[2] = htonl(0xffff); | ||
216 | addr.s6_addr32[3] = | ||
217 | htonl((((((b1<<8)|b2)<<8)|b3)<<8)|b4); | ||
218 | } else if (sscanf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x%c", | ||
219 | &b1, &b2, &b3, &b4, &b5, &b6, &b7, &b8, &c) == 8) { | ||
220 | addr.s6_addr16[0] = htons(b1); | ||
221 | addr.s6_addr16[1] = htons(b2); | ||
222 | addr.s6_addr16[2] = htons(b3); | ||
223 | addr.s6_addr16[3] = htons(b4); | ||
224 | addr.s6_addr16[4] = htons(b5); | ||
225 | addr.s6_addr16[5] = htons(b6); | ||
226 | addr.s6_addr16[6] = htons(b7); | ||
227 | addr.s6_addr16[7] = htons(b8); | ||
228 | } else | ||
229 | return -EINVAL; | 218 | return -EINVAL; |
219 | switch (address.sa.sa_family) { | ||
220 | case AF_INET: | ||
221 | /* Form a mapped IPv4 address in sin6 */ | ||
222 | memset(&sin6, 0, sizeof(sin6)); | ||
223 | sin6.sin6_family = AF_INET6; | ||
224 | sin6.sin6_addr.s6_addr32[2] = htonl(0xffff); | ||
225 | sin6.sin6_addr.s6_addr32[3] = address.s4.sin_addr.s_addr; | ||
226 | break; | ||
227 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
228 | case AF_INET6: | ||
229 | memcpy(&sin6, &address.s6, sizeof(sin6)); | ||
230 | break; | ||
231 | #endif | ||
232 | default: | ||
233 | return -EINVAL; | ||
234 | } | ||
230 | 235 | ||
231 | expiry = get_expiry(&mesg); | 236 | expiry = get_expiry(&mesg); |
232 | if (expiry ==0) | 237 | if (expiry ==0) |
@@ -243,7 +248,8 @@ static int ip_map_parse(struct cache_detail *cd, | |||
243 | } else | 248 | } else |
244 | dom = NULL; | 249 | dom = NULL; |
245 | 250 | ||
246 | ipmp = ip_map_lookup(class, &addr); | 251 | /* IPv6 scope IDs are ignored for now */ |
252 | ipmp = ip_map_lookup(class, &sin6.sin6_addr); | ||
247 | if (ipmp) { | 253 | if (ipmp) { |
248 | err = ip_map_update(ipmp, | 254 | err = ip_map_update(ipmp, |
249 | container_of(dom, struct unix_domain, h), | 255 | container_of(dom, struct unix_domain, h), |
@@ -619,7 +625,7 @@ static int unix_gid_show(struct seq_file *m, | |||
619 | else | 625 | else |
620 | glen = 0; | 626 | glen = 0; |
621 | 627 | ||
622 | seq_printf(m, "%d %d:", ug->uid, glen); | 628 | seq_printf(m, "%u %d:", ug->uid, glen); |
623 | for (i = 0; i < glen; i++) | 629 | for (i = 0; i < glen; i++) |
624 | seq_printf(m, " %d", GROUP_AT(ug->gi, i)); | 630 | seq_printf(m, " %d", GROUP_AT(ug->gi, i)); |
625 | seq_printf(m, "\n"); | 631 | seq_printf(m, "\n"); |
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 870929e08e5d..ce0d5b35c2ac 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c | |||
@@ -419,8 +419,8 @@ static void svc_udp_data_ready(struct sock *sk, int count) | |||
419 | set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); | 419 | set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); |
420 | svc_xprt_enqueue(&svsk->sk_xprt); | 420 | svc_xprt_enqueue(&svsk->sk_xprt); |
421 | } | 421 | } |
422 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) | 422 | if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk))) |
423 | wake_up_interruptible(sk->sk_sleep); | 423 | wake_up_interruptible(sk_sleep(sk)); |
424 | } | 424 | } |
425 | 425 | ||
426 | /* | 426 | /* |
@@ -436,10 +436,10 @@ static void svc_write_space(struct sock *sk) | |||
436 | svc_xprt_enqueue(&svsk->sk_xprt); | 436 | svc_xprt_enqueue(&svsk->sk_xprt); |
437 | } | 437 | } |
438 | 438 | ||
439 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) { | 439 | if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk))) { |
440 | dprintk("RPC svc_write_space: someone sleeping on %p\n", | 440 | dprintk("RPC svc_write_space: someone sleeping on %p\n", |
441 | svsk); | 441 | svsk); |
442 | wake_up_interruptible(sk->sk_sleep); | 442 | wake_up_interruptible(sk_sleep(sk)); |
443 | } | 443 | } |
444 | } | 444 | } |
445 | 445 | ||
@@ -757,8 +757,8 @@ static void svc_tcp_listen_data_ready(struct sock *sk, int count_unused) | |||
757 | printk("svc: socket %p: no user data\n", sk); | 757 | printk("svc: socket %p: no user data\n", sk); |
758 | } | 758 | } |
759 | 759 | ||
760 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) | 760 | if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk))) |
761 | wake_up_interruptible_all(sk->sk_sleep); | 761 | wake_up_interruptible_all(sk_sleep(sk)); |
762 | } | 762 | } |
763 | 763 | ||
764 | /* | 764 | /* |
@@ -777,8 +777,8 @@ static void svc_tcp_state_change(struct sock *sk) | |||
777 | set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); | 777 | set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); |
778 | svc_xprt_enqueue(&svsk->sk_xprt); | 778 | svc_xprt_enqueue(&svsk->sk_xprt); |
779 | } | 779 | } |
780 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) | 780 | if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk))) |
781 | wake_up_interruptible_all(sk->sk_sleep); | 781 | wake_up_interruptible_all(sk_sleep(sk)); |
782 | } | 782 | } |
783 | 783 | ||
784 | static void svc_tcp_data_ready(struct sock *sk, int count) | 784 | static void svc_tcp_data_ready(struct sock *sk, int count) |
@@ -791,8 +791,8 @@ static void svc_tcp_data_ready(struct sock *sk, int count) | |||
791 | set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); | 791 | set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); |
792 | svc_xprt_enqueue(&svsk->sk_xprt); | 792 | svc_xprt_enqueue(&svsk->sk_xprt); |
793 | } | 793 | } |
794 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) | 794 | if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk))) |
795 | wake_up_interruptible(sk->sk_sleep); | 795 | wake_up_interruptible(sk_sleep(sk)); |
796 | } | 796 | } |
797 | 797 | ||
798 | /* | 798 | /* |
@@ -968,6 +968,7 @@ static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp) | |||
968 | return len; | 968 | return len; |
969 | err_delete: | 969 | err_delete: |
970 | set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); | 970 | set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); |
971 | svc_xprt_received(&svsk->sk_xprt); | ||
971 | err_again: | 972 | err_again: |
972 | return -EAGAIN; | 973 | return -EAGAIN; |
973 | } | 974 | } |
@@ -1357,7 +1358,7 @@ int svc_addsock(struct svc_serv *serv, const int fd, char *name_return, | |||
1357 | 1358 | ||
1358 | if (!so) | 1359 | if (!so) |
1359 | return err; | 1360 | return err; |
1360 | if (so->sk->sk_family != AF_INET) | 1361 | if ((so->sk->sk_family != PF_INET) && (so->sk->sk_family != PF_INET6)) |
1361 | err = -EAFNOSUPPORT; | 1362 | err = -EAFNOSUPPORT; |
1362 | else if (so->sk->sk_protocol != IPPROTO_TCP && | 1363 | else if (so->sk->sk_protocol != IPPROTO_TCP && |
1363 | so->sk->sk_protocol != IPPROTO_UDP) | 1364 | so->sk->sk_protocol != IPPROTO_UDP) |
@@ -1493,8 +1494,8 @@ static void svc_sock_detach(struct svc_xprt *xprt) | |||
1493 | sk->sk_data_ready = svsk->sk_odata; | 1494 | sk->sk_data_ready = svsk->sk_odata; |
1494 | sk->sk_write_space = svsk->sk_owspace; | 1495 | sk->sk_write_space = svsk->sk_owspace; |
1495 | 1496 | ||
1496 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) | 1497 | if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk))) |
1497 | wake_up_interruptible(sk->sk_sleep); | 1498 | wake_up_interruptible(sk_sleep(sk)); |
1498 | } | 1499 | } |
1499 | 1500 | ||
1500 | /* | 1501 | /* |
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 8bd690c48b69..2763fde88499 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c | |||
@@ -7,6 +7,7 @@ | |||
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/module.h> | 9 | #include <linux/module.h> |
10 | #include <linux/slab.h> | ||
10 | #include <linux/types.h> | 11 | #include <linux/types.h> |
11 | #include <linux/string.h> | 12 | #include <linux/string.h> |
12 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 469de292c23c..699ade68aac1 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
@@ -46,6 +46,7 @@ | |||
46 | 46 | ||
47 | #include <linux/sunrpc/clnt.h> | 47 | #include <linux/sunrpc/clnt.h> |
48 | #include <linux/sunrpc/metrics.h> | 48 | #include <linux/sunrpc/metrics.h> |
49 | #include <linux/sunrpc/bc_xprt.h> | ||
49 | 50 | ||
50 | #include "sunrpc.h" | 51 | #include "sunrpc.h" |
51 | 52 | ||
@@ -973,7 +974,7 @@ void xprt_reserve(struct rpc_task *task) | |||
973 | 974 | ||
974 | static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt) | 975 | static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt) |
975 | { | 976 | { |
976 | return xprt->xid++; | 977 | return (__force __be32)xprt->xid++; |
977 | } | 978 | } |
978 | 979 | ||
979 | static inline void xprt_init_xid(struct rpc_xprt *xprt) | 980 | static inline void xprt_init_xid(struct rpc_xprt *xprt) |
@@ -1032,21 +1033,16 @@ void xprt_release(struct rpc_task *task) | |||
1032 | if (req->rq_release_snd_buf) | 1033 | if (req->rq_release_snd_buf) |
1033 | req->rq_release_snd_buf(req); | 1034 | req->rq_release_snd_buf(req); |
1034 | 1035 | ||
1035 | /* | ||
1036 | * Early exit if this is a backchannel preallocated request. | ||
1037 | * There is no need to have it added to the RPC slot list. | ||
1038 | */ | ||
1039 | if (is_bc_request) | ||
1040 | return; | ||
1041 | |||
1042 | memset(req, 0, sizeof(*req)); /* mark unused */ | ||
1043 | |||
1044 | dprintk("RPC: %5u release request %p\n", task->tk_pid, req); | 1036 | dprintk("RPC: %5u release request %p\n", task->tk_pid, req); |
1037 | if (likely(!is_bc_request)) { | ||
1038 | memset(req, 0, sizeof(*req)); /* mark unused */ | ||
1045 | 1039 | ||
1046 | spin_lock(&xprt->reserve_lock); | 1040 | spin_lock(&xprt->reserve_lock); |
1047 | list_add(&req->rq_list, &xprt->free); | 1041 | list_add(&req->rq_list, &xprt->free); |
1048 | rpc_wake_up_next(&xprt->backlog); | 1042 | rpc_wake_up_next(&xprt->backlog); |
1049 | spin_unlock(&xprt->reserve_lock); | 1043 | spin_unlock(&xprt->reserve_lock); |
1044 | } else | ||
1045 | xprt_free_bc_request(req); | ||
1050 | } | 1046 | } |
1051 | 1047 | ||
1052 | /** | 1048 | /** |
diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c index 5b8a8ff93a25..d718b8fa9525 100644 --- a/net/sunrpc/xprtrdma/svc_rdma.c +++ b/net/sunrpc/xprtrdma/svc_rdma.c | |||
@@ -40,6 +40,7 @@ | |||
40 | */ | 40 | */ |
41 | #include <linux/module.h> | 41 | #include <linux/module.h> |
42 | #include <linux/init.h> | 42 | #include <linux/init.h> |
43 | #include <linux/slab.h> | ||
43 | #include <linux/fs.h> | 44 | #include <linux/fs.h> |
44 | #include <linux/sysctl.h> | 45 | #include <linux/sysctl.h> |
45 | #include <linux/sunrpc/clnt.h> | 46 | #include <linux/sunrpc/clnt.h> |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index 3fa5751af0ec..edea15a54e51 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c | |||
@@ -43,6 +43,7 @@ | |||
43 | #include <linux/sunrpc/debug.h> | 43 | #include <linux/sunrpc/debug.h> |
44 | #include <linux/sunrpc/rpc_rdma.h> | 44 | #include <linux/sunrpc/rpc_rdma.h> |
45 | #include <linux/sched.h> | 45 | #include <linux/sched.h> |
46 | #include <linux/slab.h> | ||
46 | #include <linux/spinlock.h> | 47 | #include <linux/spinlock.h> |
47 | #include <rdma/ib_verbs.h> | 48 | #include <rdma/ib_verbs.h> |
48 | #include <rdma/rdma_cm.h> | 49 | #include <rdma/rdma_cm.h> |
@@ -678,7 +679,10 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, | |||
678 | int ret; | 679 | int ret; |
679 | 680 | ||
680 | dprintk("svcrdma: Creating RDMA socket\n"); | 681 | dprintk("svcrdma: Creating RDMA socket\n"); |
681 | 682 | if (sa->sa_family != AF_INET) { | |
683 | dprintk("svcrdma: Address family %d is not supported.\n", sa->sa_family); | ||
684 | return ERR_PTR(-EAFNOSUPPORT); | ||
685 | } | ||
682 | cma_xprt = rdma_create_xprt(serv, 1); | 686 | cma_xprt = rdma_create_xprt(serv, 1); |
683 | if (!cma_xprt) | 687 | if (!cma_xprt) |
684 | return ERR_PTR(-ENOMEM); | 688 | return ERR_PTR(-ENOMEM); |
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index f96c2fe6137b..187257b1d880 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c | |||
@@ -49,6 +49,7 @@ | |||
49 | 49 | ||
50 | #include <linux/module.h> | 50 | #include <linux/module.h> |
51 | #include <linux/init.h> | 51 | #include <linux/init.h> |
52 | #include <linux/slab.h> | ||
52 | #include <linux/seq_file.h> | 53 | #include <linux/seq_file.h> |
53 | 54 | ||
54 | #include "xprt_rdma.h" | 55 | #include "xprt_rdma.h" |
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 2209aa87d899..27015c6d8eb5 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c | |||
@@ -48,6 +48,7 @@ | |||
48 | */ | 48 | */ |
49 | 49 | ||
50 | #include <linux/pci.h> /* for Tavor hack below */ | 50 | #include <linux/pci.h> /* for Tavor hack below */ |
51 | #include <linux/slab.h> | ||
51 | 52 | ||
52 | #include "xprt_rdma.h" | 53 | #include "xprt_rdma.h" |
53 | 54 | ||
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 4f55ab7ec1b1..9847c30b5001 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -548,8 +548,6 @@ static int xs_udp_send_request(struct rpc_task *task) | |||
548 | /* Still some bytes left; set up for a retry later. */ | 548 | /* Still some bytes left; set up for a retry later. */ |
549 | status = -EAGAIN; | 549 | status = -EAGAIN; |
550 | } | 550 | } |
551 | if (!transport->sock) | ||
552 | goto out; | ||
553 | 551 | ||
554 | switch (status) { | 552 | switch (status) { |
555 | case -ENOTSOCK: | 553 | case -ENOTSOCK: |
@@ -569,7 +567,7 @@ static int xs_udp_send_request(struct rpc_task *task) | |||
569 | * prompts ECONNREFUSED. */ | 567 | * prompts ECONNREFUSED. */ |
570 | clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); | 568 | clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); |
571 | } | 569 | } |
572 | out: | 570 | |
573 | return status; | 571 | return status; |
574 | } | 572 | } |
575 | 573 | ||
@@ -651,8 +649,6 @@ static int xs_tcp_send_request(struct rpc_task *task) | |||
651 | status = -EAGAIN; | 649 | status = -EAGAIN; |
652 | break; | 650 | break; |
653 | } | 651 | } |
654 | if (!transport->sock) | ||
655 | goto out; | ||
656 | 652 | ||
657 | switch (status) { | 653 | switch (status) { |
658 | case -ENOTSOCK: | 654 | case -ENOTSOCK: |
@@ -672,7 +668,7 @@ static int xs_tcp_send_request(struct rpc_task *task) | |||
672 | case -ENOTCONN: | 668 | case -ENOTCONN: |
673 | clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); | 669 | clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); |
674 | } | 670 | } |
675 | out: | 671 | |
676 | return status; | 672 | return status; |
677 | } | 673 | } |
678 | 674 | ||
@@ -1911,6 +1907,11 @@ static void xs_tcp_setup_socket(struct rpc_xprt *xprt, | |||
1911 | case -EALREADY: | 1907 | case -EALREADY: |
1912 | xprt_clear_connecting(xprt); | 1908 | xprt_clear_connecting(xprt); |
1913 | return; | 1909 | return; |
1910 | case -EINVAL: | ||
1911 | /* Happens, for instance, if the user specified a link | ||
1912 | * local IPv6 address without a scope-id. | ||
1913 | */ | ||
1914 | goto out; | ||
1914 | } | 1915 | } |
1915 | out_eagain: | 1916 | out_eagain: |
1916 | status = -EAGAIN; | 1917 | status = -EAGAIN; |
@@ -2099,7 +2100,7 @@ static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) | |||
2099 | * we allocate pages instead doing a kmalloc like rpc_malloc is because we want | 2100 | * we allocate pages instead doing a kmalloc like rpc_malloc is because we want |
2100 | * to use the server side send routines. | 2101 | * to use the server side send routines. |
2101 | */ | 2102 | */ |
2102 | void *bc_malloc(struct rpc_task *task, size_t size) | 2103 | static void *bc_malloc(struct rpc_task *task, size_t size) |
2103 | { | 2104 | { |
2104 | struct page *page; | 2105 | struct page *page; |
2105 | struct rpc_buffer *buf; | 2106 | struct rpc_buffer *buf; |
@@ -2119,7 +2120,7 @@ void *bc_malloc(struct rpc_task *task, size_t size) | |||
2119 | /* | 2120 | /* |
2120 | * Free the space allocated in the bc_alloc routine | 2121 | * Free the space allocated in the bc_alloc routine |
2121 | */ | 2122 | */ |
2122 | void bc_free(void *buffer) | 2123 | static void bc_free(void *buffer) |
2123 | { | 2124 | { |
2124 | struct rpc_buffer *buf; | 2125 | struct rpc_buffer *buf; |
2125 | 2126 | ||
@@ -2250,9 +2251,6 @@ static struct rpc_xprt_ops xs_tcp_ops = { | |||
2250 | .buf_free = rpc_free, | 2251 | .buf_free = rpc_free, |
2251 | .send_request = xs_tcp_send_request, | 2252 | .send_request = xs_tcp_send_request, |
2252 | .set_retrans_timeout = xprt_set_retrans_timeout_def, | 2253 | .set_retrans_timeout = xprt_set_retrans_timeout_def, |
2253 | #if defined(CONFIG_NFS_V4_1) | ||
2254 | .release_request = bc_release_request, | ||
2255 | #endif /* CONFIG_NFS_V4_1 */ | ||
2256 | .close = xs_tcp_close, | 2254 | .close = xs_tcp_close, |
2257 | .destroy = xs_destroy, | 2255 | .destroy = xs_destroy, |
2258 | .print_stats = xs_tcp_print_stats, | 2256 | .print_stats = xs_tcp_print_stats, |
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index a3bfd4064912..90a051912c03 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c | |||
@@ -558,10 +558,7 @@ static int tipc_bcbearer_send(struct sk_buff *buf, | |||
558 | struct tipc_bearer *unused1, | 558 | struct tipc_bearer *unused1, |
559 | struct tipc_media_addr *unused2) | 559 | struct tipc_media_addr *unused2) |
560 | { | 560 | { |
561 | static int send_count = 0; | ||
562 | |||
563 | int bp_index; | 561 | int bp_index; |
564 | int swap_time; | ||
565 | 562 | ||
566 | /* Prepare buffer for broadcasting (if first time trying to send it) */ | 563 | /* Prepare buffer for broadcasting (if first time trying to send it) */ |
567 | 564 | ||
@@ -575,11 +572,6 @@ static int tipc_bcbearer_send(struct sk_buff *buf, | |||
575 | msg_set_mc_netid(msg, tipc_net_id); | 572 | msg_set_mc_netid(msg, tipc_net_id); |
576 | } | 573 | } |
577 | 574 | ||
578 | /* Determine if bearer pairs should be swapped following this attempt */ | ||
579 | |||
580 | if ((swap_time = (++send_count >= 10))) | ||
581 | send_count = 0; | ||
582 | |||
583 | /* Send buffer over bearers until all targets reached */ | 575 | /* Send buffer over bearers until all targets reached */ |
584 | 576 | ||
585 | bcbearer->remains = tipc_cltr_bcast_nodes; | 577 | bcbearer->remains = tipc_cltr_bcast_nodes; |
@@ -595,21 +587,22 @@ static int tipc_bcbearer_send(struct sk_buff *buf, | |||
595 | if (bcbearer->remains_new.count == bcbearer->remains.count) | 587 | if (bcbearer->remains_new.count == bcbearer->remains.count) |
596 | continue; /* bearer pair doesn't add anything */ | 588 | continue; /* bearer pair doesn't add anything */ |
597 | 589 | ||
598 | if (!p->publ.blocked && | 590 | if (p->publ.blocked || |
599 | !p->media->send_msg(buf, &p->publ, &p->media->bcast_addr)) { | 591 | p->media->send_msg(buf, &p->publ, &p->media->bcast_addr)) { |
600 | if (swap_time && s && !s->publ.blocked) | 592 | /* unable to send on primary bearer */ |
601 | goto swap; | 593 | if (!s || s->publ.blocked || |
602 | else | 594 | s->media->send_msg(buf, &s->publ, |
603 | goto update; | 595 | &s->media->bcast_addr)) { |
596 | /* unable to send on either bearer */ | ||
597 | continue; | ||
598 | } | ||
599 | } | ||
600 | |||
601 | if (s) { | ||
602 | bcbearer->bpairs[bp_index].primary = s; | ||
603 | bcbearer->bpairs[bp_index].secondary = p; | ||
604 | } | 604 | } |
605 | 605 | ||
606 | if (!s || s->publ.blocked || | ||
607 | s->media->send_msg(buf, &s->publ, &s->media->bcast_addr)) | ||
608 | continue; /* unable to send using bearer pair */ | ||
609 | swap: | ||
610 | bcbearer->bpairs[bp_index].primary = s; | ||
611 | bcbearer->bpairs[bp_index].secondary = p; | ||
612 | update: | ||
613 | if (bcbearer->remains_new.count == 0) | 606 | if (bcbearer->remains_new.count == 0) |
614 | return 0; | 607 | return 0; |
615 | 608 | ||
diff --git a/net/tipc/core.c b/net/tipc/core.c index 52c571fedbe0..4e84c8431f32 100644 --- a/net/tipc/core.c +++ b/net/tipc/core.c | |||
@@ -49,7 +49,7 @@ | |||
49 | #include "config.h" | 49 | #include "config.h" |
50 | 50 | ||
51 | 51 | ||
52 | #define TIPC_MOD_VER "1.6.4" | 52 | #define TIPC_MOD_VER "2.0.0" |
53 | 53 | ||
54 | #ifndef CONFIG_TIPC_ZONES | 54 | #ifndef CONFIG_TIPC_ZONES |
55 | #define CONFIG_TIPC_ZONES 3 | 55 | #define CONFIG_TIPC_ZONES 3 |
diff --git a/net/tipc/core.h b/net/tipc/core.h index a881f92a8537..c58a1d16563a 100644 --- a/net/tipc/core.h +++ b/net/tipc/core.h | |||
@@ -56,6 +56,7 @@ | |||
56 | #include <linux/netdevice.h> | 56 | #include <linux/netdevice.h> |
57 | #include <linux/in.h> | 57 | #include <linux/in.h> |
58 | #include <linux/list.h> | 58 | #include <linux/list.h> |
59 | #include <linux/slab.h> | ||
59 | #include <linux/vmalloc.h> | 60 | #include <linux/vmalloc.h> |
60 | 61 | ||
61 | /* | 62 | /* |
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c index 524ba5696d4d..6230d16020c4 100644 --- a/net/tipc/eth_media.c +++ b/net/tipc/eth_media.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <net/tipc/tipc_bearer.h> | 38 | #include <net/tipc/tipc_bearer.h> |
39 | #include <net/tipc/tipc_msg.h> | 39 | #include <net/tipc/tipc_msg.h> |
40 | #include <linux/netdevice.h> | 40 | #include <linux/netdevice.h> |
41 | #include <linux/slab.h> | ||
41 | #include <net/net_namespace.h> | 42 | #include <net/net_namespace.h> |
42 | 43 | ||
43 | #define MAX_ETH_BEARERS 2 | 44 | #define MAX_ETH_BEARERS 2 |
diff --git a/net/tipc/link.c b/net/tipc/link.c index 1a7e4665af80..c76e82e5f982 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c | |||
@@ -877,7 +877,7 @@ static void link_state_event(struct link *l_ptr, unsigned event) | |||
877 | case TIMEOUT_EVT: | 877 | case TIMEOUT_EVT: |
878 | dbg_link("TIM "); | 878 | dbg_link("TIM "); |
879 | if (l_ptr->next_in_no != l_ptr->checkpoint) { | 879 | if (l_ptr->next_in_no != l_ptr->checkpoint) { |
880 | dbg_link("-> WW \n"); | 880 | dbg_link("-> WW\n"); |
881 | l_ptr->state = WORKING_WORKING; | 881 | l_ptr->state = WORKING_WORKING; |
882 | l_ptr->fsm_msg_cnt = 0; | 882 | l_ptr->fsm_msg_cnt = 0; |
883 | l_ptr->checkpoint = l_ptr->next_in_no; | 883 | l_ptr->checkpoint = l_ptr->next_in_no; |
@@ -934,7 +934,7 @@ static void link_state_event(struct link *l_ptr, unsigned event) | |||
934 | link_set_timer(l_ptr, cont_intv); | 934 | link_set_timer(l_ptr, cont_intv); |
935 | break; | 935 | break; |
936 | case RESET_MSG: | 936 | case RESET_MSG: |
937 | dbg_link("RES \n"); | 937 | dbg_link("RES\n"); |
938 | dbg_link(" -> RR\n"); | 938 | dbg_link(" -> RR\n"); |
939 | l_ptr->state = RESET_RESET; | 939 | l_ptr->state = RESET_RESET; |
940 | l_ptr->fsm_msg_cnt = 0; | 940 | l_ptr->fsm_msg_cnt = 0; |
@@ -947,7 +947,7 @@ static void link_state_event(struct link *l_ptr, unsigned event) | |||
947 | l_ptr->started = 1; | 947 | l_ptr->started = 1; |
948 | /* fall through */ | 948 | /* fall through */ |
949 | case TIMEOUT_EVT: | 949 | case TIMEOUT_EVT: |
950 | dbg_link("TIM \n"); | 950 | dbg_link("TIM\n"); |
951 | tipc_link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0); | 951 | tipc_link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0); |
952 | l_ptr->fsm_msg_cnt++; | 952 | l_ptr->fsm_msg_cnt++; |
953 | link_set_timer(l_ptr, cont_intv); | 953 | link_set_timer(l_ptr, cont_intv); |
@@ -1553,7 +1553,7 @@ u32 tipc_link_push_packet(struct link *l_ptr) | |||
1553 | 1553 | ||
1554 | /* Continue retransmission now, if there is anything: */ | 1554 | /* Continue retransmission now, if there is anything: */ |
1555 | 1555 | ||
1556 | if (r_q_size && buf && !skb_cloned(buf)) { | 1556 | if (r_q_size && buf) { |
1557 | msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); | 1557 | msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); |
1558 | msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); | 1558 | msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); |
1559 | if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { | 1559 | if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { |
@@ -1722,15 +1722,16 @@ void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf, | |||
1722 | dbg("Retransmitting %u in link %x\n", retransmits, l_ptr); | 1722 | dbg("Retransmitting %u in link %x\n", retransmits, l_ptr); |
1723 | 1723 | ||
1724 | if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) { | 1724 | if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) { |
1725 | if (!skb_cloned(buf)) { | 1725 | if (l_ptr->retransm_queue_size == 0) { |
1726 | msg_dbg(msg, ">NO_RETR->BCONG>"); | 1726 | msg_dbg(msg, ">NO_RETR->BCONG>"); |
1727 | dbg_print_link(l_ptr, " "); | 1727 | dbg_print_link(l_ptr, " "); |
1728 | l_ptr->retransm_queue_head = msg_seqno(msg); | 1728 | l_ptr->retransm_queue_head = msg_seqno(msg); |
1729 | l_ptr->retransm_queue_size = retransmits; | 1729 | l_ptr->retransm_queue_size = retransmits; |
1730 | return; | ||
1731 | } else { | 1730 | } else { |
1732 | /* Don't retransmit if driver already has the buffer */ | 1731 | err("Unexpected retransmit on link %s (qsize=%d)\n", |
1732 | l_ptr->name, l_ptr->retransm_queue_size); | ||
1733 | } | 1733 | } |
1734 | return; | ||
1734 | } else { | 1735 | } else { |
1735 | /* Detect repeated retransmit failures on uncongested bearer */ | 1736 | /* Detect repeated retransmit failures on uncongested bearer */ |
1736 | 1737 | ||
@@ -1745,7 +1746,7 @@ void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf, | |||
1745 | } | 1746 | } |
1746 | } | 1747 | } |
1747 | 1748 | ||
1748 | while (retransmits && (buf != l_ptr->next_out) && buf && !skb_cloned(buf)) { | 1749 | while (retransmits && (buf != l_ptr->next_out) && buf) { |
1749 | msg = buf_msg(buf); | 1750 | msg = buf_msg(buf); |
1750 | msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); | 1751 | msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); |
1751 | msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); | 1752 | msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); |
@@ -3294,7 +3295,7 @@ static void link_dump_rec_queue(struct link *l_ptr) | |||
3294 | info("buffer %x invalid\n", crs); | 3295 | info("buffer %x invalid\n", crs); |
3295 | return; | 3296 | return; |
3296 | } | 3297 | } |
3297 | msg_dbg(buf_msg(crs), "In rec queue: \n"); | 3298 | msg_dbg(buf_msg(crs), "In rec queue:\n"); |
3298 | crs = crs->next; | 3299 | crs = crs->next; |
3299 | } | 3300 | } |
3300 | } | 3301 | } |
diff --git a/net/tipc/net.c b/net/tipc/net.c index f25b1cdb64eb..d7cd1e064a80 100644 --- a/net/tipc/net.c +++ b/net/tipc/net.c | |||
@@ -116,7 +116,7 @@ | |||
116 | */ | 116 | */ |
117 | 117 | ||
118 | DEFINE_RWLOCK(tipc_net_lock); | 118 | DEFINE_RWLOCK(tipc_net_lock); |
119 | struct _zone *tipc_zones[256] = { NULL, }; | 119 | static struct _zone *tipc_zones[256] = { NULL, }; |
120 | struct network tipc_net = { tipc_zones }; | 120 | struct network tipc_net = { tipc_zones }; |
121 | 121 | ||
122 | struct tipc_node *tipc_net_select_remote_node(u32 addr, u32 ref) | 122 | struct tipc_node *tipc_net_select_remote_node(u32 addr, u32 ref) |
@@ -291,6 +291,6 @@ void tipc_net_stop(void) | |||
291 | tipc_bclink_stop(); | 291 | tipc_bclink_stop(); |
292 | net_stop(); | 292 | net_stop(); |
293 | write_unlock_bh(&tipc_net_lock); | 293 | write_unlock_bh(&tipc_net_lock); |
294 | info("Left network mode \n"); | 294 | info("Left network mode\n"); |
295 | } | 295 | } |
296 | 296 | ||
diff --git a/net/tipc/node.c b/net/tipc/node.c index 2c24e7d6d950..17cc394f424f 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c | |||
@@ -278,7 +278,7 @@ struct tipc_node *tipc_node_attach_link(struct link *l_ptr) | |||
278 | n_ptr->link_cnt++; | 278 | n_ptr->link_cnt++; |
279 | return n_ptr; | 279 | return n_ptr; |
280 | } | 280 | } |
281 | err("Attempt to establish second link on <%s> to %s \n", | 281 | err("Attempt to establish second link on <%s> to %s\n", |
282 | l_ptr->b_ptr->publ.name, | 282 | l_ptr->b_ptr->publ.name, |
283 | addr_string_fill(addr_string, l_ptr->addr)); | 283 | addr_string_fill(addr_string, l_ptr->addr)); |
284 | } | 284 | } |
diff --git a/net/tipc/ref.c b/net/tipc/ref.c index 414fc34b8bea..8dea66500cf5 100644 --- a/net/tipc/ref.c +++ b/net/tipc/ref.c | |||
@@ -153,11 +153,11 @@ void tipc_ref_table_stop(void) | |||
153 | 153 | ||
154 | u32 tipc_ref_acquire(void *object, spinlock_t **lock) | 154 | u32 tipc_ref_acquire(void *object, spinlock_t **lock) |
155 | { | 155 | { |
156 | struct reference *entry; | ||
157 | u32 index; | 156 | u32 index; |
158 | u32 index_mask; | 157 | u32 index_mask; |
159 | u32 next_plus_upper; | 158 | u32 next_plus_upper; |
160 | u32 ref; | 159 | u32 ref; |
160 | struct reference *entry = NULL; | ||
161 | 161 | ||
162 | if (!object) { | 162 | if (!object) { |
163 | err("Attempt to acquire reference to non-existent object\n"); | 163 | err("Attempt to acquire reference to non-existent object\n"); |
@@ -175,30 +175,36 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock) | |||
175 | index = tipc_ref_table.first_free; | 175 | index = tipc_ref_table.first_free; |
176 | entry = &(tipc_ref_table.entries[index]); | 176 | entry = &(tipc_ref_table.entries[index]); |
177 | index_mask = tipc_ref_table.index_mask; | 177 | index_mask = tipc_ref_table.index_mask; |
178 | /* take lock in case a previous user of entry still holds it */ | ||
179 | spin_lock_bh(&entry->lock); | ||
180 | next_plus_upper = entry->ref; | 178 | next_plus_upper = entry->ref; |
181 | tipc_ref_table.first_free = next_plus_upper & index_mask; | 179 | tipc_ref_table.first_free = next_plus_upper & index_mask; |
182 | ref = (next_plus_upper & ~index_mask) + index; | 180 | ref = (next_plus_upper & ~index_mask) + index; |
183 | entry->ref = ref; | ||
184 | entry->object = object; | ||
185 | *lock = &entry->lock; | ||
186 | } | 181 | } |
187 | else if (tipc_ref_table.init_point < tipc_ref_table.capacity) { | 182 | else if (tipc_ref_table.init_point < tipc_ref_table.capacity) { |
188 | index = tipc_ref_table.init_point++; | 183 | index = tipc_ref_table.init_point++; |
189 | entry = &(tipc_ref_table.entries[index]); | 184 | entry = &(tipc_ref_table.entries[index]); |
190 | spin_lock_init(&entry->lock); | 185 | spin_lock_init(&entry->lock); |
191 | spin_lock_bh(&entry->lock); | ||
192 | ref = tipc_ref_table.start_mask + index; | 186 | ref = tipc_ref_table.start_mask + index; |
193 | entry->ref = ref; | ||
194 | entry->object = object; | ||
195 | *lock = &entry->lock; | ||
196 | } | 187 | } |
197 | else { | 188 | else { |
198 | ref = 0; | 189 | ref = 0; |
199 | } | 190 | } |
200 | write_unlock_bh(&ref_table_lock); | 191 | write_unlock_bh(&ref_table_lock); |
201 | 192 | ||
193 | /* | ||
194 | * Grab the lock so no one else can modify this entry | ||
195 | * While we assign its ref value & object pointer | ||
196 | */ | ||
197 | if (entry) { | ||
198 | spin_lock_bh(&entry->lock); | ||
199 | entry->ref = ref; | ||
200 | entry->object = object; | ||
201 | *lock = &entry->lock; | ||
202 | /* | ||
203 | * keep it locked, the caller is responsible | ||
204 | * for unlocking this when they're done with it | ||
205 | */ | ||
206 | } | ||
207 | |||
202 | return ref; | 208 | return ref; |
203 | } | 209 | } |
204 | 210 | ||
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 4b235fc1c70f..66e889ba48fd 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
@@ -40,9 +40,9 @@ | |||
40 | #include <linux/socket.h> | 40 | #include <linux/socket.h> |
41 | #include <linux/errno.h> | 41 | #include <linux/errno.h> |
42 | #include <linux/mm.h> | 42 | #include <linux/mm.h> |
43 | #include <linux/slab.h> | ||
44 | #include <linux/poll.h> | 43 | #include <linux/poll.h> |
45 | #include <linux/fcntl.h> | 44 | #include <linux/fcntl.h> |
45 | #include <linux/gfp.h> | ||
46 | #include <asm/string.h> | 46 | #include <asm/string.h> |
47 | #include <asm/atomic.h> | 47 | #include <asm/atomic.h> |
48 | #include <net/sock.h> | 48 | #include <net/sock.h> |
@@ -446,7 +446,7 @@ static unsigned int poll(struct file *file, struct socket *sock, | |||
446 | struct sock *sk = sock->sk; | 446 | struct sock *sk = sock->sk; |
447 | u32 mask; | 447 | u32 mask; |
448 | 448 | ||
449 | poll_wait(file, sk->sk_sleep, wait); | 449 | poll_wait(file, sk_sleep(sk), wait); |
450 | 450 | ||
451 | if (!skb_queue_empty(&sk->sk_receive_queue) || | 451 | if (!skb_queue_empty(&sk->sk_receive_queue) || |
452 | (sock->state == SS_UNCONNECTED) || | 452 | (sock->state == SS_UNCONNECTED) || |
@@ -591,7 +591,7 @@ static int send_msg(struct kiocb *iocb, struct socket *sock, | |||
591 | break; | 591 | break; |
592 | } | 592 | } |
593 | release_sock(sk); | 593 | release_sock(sk); |
594 | res = wait_event_interruptible(*sk->sk_sleep, | 594 | res = wait_event_interruptible(*sk_sleep(sk), |
595 | !tport->congested); | 595 | !tport->congested); |
596 | lock_sock(sk); | 596 | lock_sock(sk); |
597 | if (res) | 597 | if (res) |
@@ -650,7 +650,7 @@ static int send_packet(struct kiocb *iocb, struct socket *sock, | |||
650 | break; | 650 | break; |
651 | } | 651 | } |
652 | release_sock(sk); | 652 | release_sock(sk); |
653 | res = wait_event_interruptible(*sk->sk_sleep, | 653 | res = wait_event_interruptible(*sk_sleep(sk), |
654 | (!tport->congested || !tport->connected)); | 654 | (!tport->congested || !tport->connected)); |
655 | lock_sock(sk); | 655 | lock_sock(sk); |
656 | if (res) | 656 | if (res) |
@@ -931,7 +931,7 @@ restart: | |||
931 | goto exit; | 931 | goto exit; |
932 | } | 932 | } |
933 | release_sock(sk); | 933 | release_sock(sk); |
934 | res = wait_event_interruptible(*sk->sk_sleep, | 934 | res = wait_event_interruptible(*sk_sleep(sk), |
935 | (!skb_queue_empty(&sk->sk_receive_queue) || | 935 | (!skb_queue_empty(&sk->sk_receive_queue) || |
936 | (sock->state == SS_DISCONNECTING))); | 936 | (sock->state == SS_DISCONNECTING))); |
937 | lock_sock(sk); | 937 | lock_sock(sk); |
@@ -1064,7 +1064,7 @@ restart: | |||
1064 | goto exit; | 1064 | goto exit; |
1065 | } | 1065 | } |
1066 | release_sock(sk); | 1066 | release_sock(sk); |
1067 | res = wait_event_interruptible(*sk->sk_sleep, | 1067 | res = wait_event_interruptible(*sk_sleep(sk), |
1068 | (!skb_queue_empty(&sk->sk_receive_queue) || | 1068 | (!skb_queue_empty(&sk->sk_receive_queue) || |
1069 | (sock->state == SS_DISCONNECTING))); | 1069 | (sock->state == SS_DISCONNECTING))); |
1070 | lock_sock(sk); | 1070 | lock_sock(sk); |
@@ -1271,8 +1271,8 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf) | |||
1271 | tipc_disconnect_port(tipc_sk_port(sk)); | 1271 | tipc_disconnect_port(tipc_sk_port(sk)); |
1272 | } | 1272 | } |
1273 | 1273 | ||
1274 | if (waitqueue_active(sk->sk_sleep)) | 1274 | if (waitqueue_active(sk_sleep(sk))) |
1275 | wake_up_interruptible(sk->sk_sleep); | 1275 | wake_up_interruptible(sk_sleep(sk)); |
1276 | return TIPC_OK; | 1276 | return TIPC_OK; |
1277 | } | 1277 | } |
1278 | 1278 | ||
@@ -1343,8 +1343,8 @@ static void wakeupdispatch(struct tipc_port *tport) | |||
1343 | { | 1343 | { |
1344 | struct sock *sk = (struct sock *)tport->usr_handle; | 1344 | struct sock *sk = (struct sock *)tport->usr_handle; |
1345 | 1345 | ||
1346 | if (waitqueue_active(sk->sk_sleep)) | 1346 | if (waitqueue_active(sk_sleep(sk))) |
1347 | wake_up_interruptible(sk->sk_sleep); | 1347 | wake_up_interruptible(sk_sleep(sk)); |
1348 | } | 1348 | } |
1349 | 1349 | ||
1350 | /** | 1350 | /** |
@@ -1426,7 +1426,7 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen, | |||
1426 | /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */ | 1426 | /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */ |
1427 | 1427 | ||
1428 | release_sock(sk); | 1428 | release_sock(sk); |
1429 | res = wait_event_interruptible_timeout(*sk->sk_sleep, | 1429 | res = wait_event_interruptible_timeout(*sk_sleep(sk), |
1430 | (!skb_queue_empty(&sk->sk_receive_queue) || | 1430 | (!skb_queue_empty(&sk->sk_receive_queue) || |
1431 | (sock->state != SS_CONNECTING)), | 1431 | (sock->state != SS_CONNECTING)), |
1432 | sk->sk_rcvtimeo); | 1432 | sk->sk_rcvtimeo); |
@@ -1521,7 +1521,7 @@ static int accept(struct socket *sock, struct socket *new_sock, int flags) | |||
1521 | goto exit; | 1521 | goto exit; |
1522 | } | 1522 | } |
1523 | release_sock(sk); | 1523 | release_sock(sk); |
1524 | res = wait_event_interruptible(*sk->sk_sleep, | 1524 | res = wait_event_interruptible(*sk_sleep(sk), |
1525 | (!skb_queue_empty(&sk->sk_receive_queue))); | 1525 | (!skb_queue_empty(&sk->sk_receive_queue))); |
1526 | lock_sock(sk); | 1526 | lock_sock(sk); |
1527 | if (res) | 1527 | if (res) |
@@ -1632,8 +1632,8 @@ restart: | |||
1632 | /* Discard any unreceived messages; wake up sleeping tasks */ | 1632 | /* Discard any unreceived messages; wake up sleeping tasks */ |
1633 | 1633 | ||
1634 | discard_rx_queue(sk); | 1634 | discard_rx_queue(sk); |
1635 | if (waitqueue_active(sk->sk_sleep)) | 1635 | if (waitqueue_active(sk_sleep(sk))) |
1636 | wake_up_interruptible(sk->sk_sleep); | 1636 | wake_up_interruptible(sk_sleep(sk)); |
1637 | res = 0; | 1637 | res = 0; |
1638 | break; | 1638 | break; |
1639 | 1639 | ||
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c index ff123e56114a..ab6eab4c45e2 100644 --- a/net/tipc/subscr.c +++ b/net/tipc/subscr.c | |||
@@ -274,7 +274,7 @@ static void subscr_cancel(struct tipc_subscr *s, | |||
274 | { | 274 | { |
275 | struct subscription *sub; | 275 | struct subscription *sub; |
276 | struct subscription *sub_temp; | 276 | struct subscription *sub_temp; |
277 | __u32 type, lower, upper; | 277 | __u32 type, lower, upper, timeout, filter; |
278 | int found = 0; | 278 | int found = 0; |
279 | 279 | ||
280 | /* Find first matching subscription, exit if not found */ | 280 | /* Find first matching subscription, exit if not found */ |
@@ -282,12 +282,18 @@ static void subscr_cancel(struct tipc_subscr *s, | |||
282 | type = ntohl(s->seq.type); | 282 | type = ntohl(s->seq.type); |
283 | lower = ntohl(s->seq.lower); | 283 | lower = ntohl(s->seq.lower); |
284 | upper = ntohl(s->seq.upper); | 284 | upper = ntohl(s->seq.upper); |
285 | timeout = ntohl(s->timeout); | ||
286 | filter = ntohl(s->filter) & ~TIPC_SUB_CANCEL; | ||
285 | 287 | ||
286 | list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list, | 288 | list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list, |
287 | subscription_list) { | 289 | subscription_list) { |
288 | if ((type == sub->seq.type) && | 290 | if ((type == sub->seq.type) && |
289 | (lower == sub->seq.lower) && | 291 | (lower == sub->seq.lower) && |
290 | (upper == sub->seq.upper)) { | 292 | (upper == sub->seq.upper) && |
293 | (timeout == sub->timeout) && | ||
294 | (filter == sub->filter) && | ||
295 | !memcmp(s->usr_handle,sub->evt.s.usr_handle, | ||
296 | sizeof(s->usr_handle)) ){ | ||
291 | found = 1; | 297 | found = 1; |
292 | break; | 298 | break; |
293 | } | 299 | } |
@@ -304,7 +310,7 @@ static void subscr_cancel(struct tipc_subscr *s, | |||
304 | k_term_timer(&sub->timer); | 310 | k_term_timer(&sub->timer); |
305 | spin_lock_bh(subscriber->lock); | 311 | spin_lock_bh(subscriber->lock); |
306 | } | 312 | } |
307 | dbg("Cancel: removing sub %u,%u,%u from subscriber %x list\n", | 313 | dbg("Cancel: removing sub %u,%u,%u from subscriber %p list\n", |
308 | sub->seq.type, sub->seq.lower, sub->seq.upper, subscriber); | 314 | sub->seq.type, sub->seq.lower, sub->seq.upper, subscriber); |
309 | subscr_del(sub); | 315 | subscr_del(sub); |
310 | } | 316 | } |
@@ -352,8 +358,7 @@ static struct subscription *subscr_subscribe(struct tipc_subscr *s, | |||
352 | sub->seq.upper = ntohl(s->seq.upper); | 358 | sub->seq.upper = ntohl(s->seq.upper); |
353 | sub->timeout = ntohl(s->timeout); | 359 | sub->timeout = ntohl(s->timeout); |
354 | sub->filter = ntohl(s->filter); | 360 | sub->filter = ntohl(s->filter); |
355 | if ((!(sub->filter & TIPC_SUB_PORTS) == | 361 | if ((sub->filter && (sub->filter != TIPC_SUB_PORTS)) || |
356 | !(sub->filter & TIPC_SUB_SERVICE)) || | ||
357 | (sub->seq.lower > sub->seq.upper)) { | 362 | (sub->seq.lower > sub->seq.upper)) { |
358 | warn("Subscription rejected, illegal request\n"); | 363 | warn("Subscription rejected, illegal request\n"); |
359 | kfree(sub); | 364 | kfree(sub); |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 3d9122e78f41..fef2cc5e9d2b 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -313,13 +313,16 @@ static inline int unix_writable(struct sock *sk) | |||
313 | 313 | ||
314 | static void unix_write_space(struct sock *sk) | 314 | static void unix_write_space(struct sock *sk) |
315 | { | 315 | { |
316 | read_lock(&sk->sk_callback_lock); | 316 | struct socket_wq *wq; |
317 | |||
318 | rcu_read_lock(); | ||
317 | if (unix_writable(sk)) { | 319 | if (unix_writable(sk)) { |
318 | if (sk_has_sleeper(sk)) | 320 | wq = rcu_dereference(sk->sk_wq); |
319 | wake_up_interruptible_sync(sk->sk_sleep); | 321 | if (wq_has_sleeper(wq)) |
322 | wake_up_interruptible_sync(&wq->wait); | ||
320 | sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); | 323 | sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); |
321 | } | 324 | } |
322 | read_unlock(&sk->sk_callback_lock); | 325 | rcu_read_unlock(); |
323 | } | 326 | } |
324 | 327 | ||
325 | /* When dgram socket disconnects (or changes its peer), we clear its receive | 328 | /* When dgram socket disconnects (or changes its peer), we clear its receive |
@@ -406,9 +409,7 @@ static int unix_release_sock(struct sock *sk, int embrion) | |||
406 | skpair->sk_err = ECONNRESET; | 409 | skpair->sk_err = ECONNRESET; |
407 | unix_state_unlock(skpair); | 410 | unix_state_unlock(skpair); |
408 | skpair->sk_state_change(skpair); | 411 | skpair->sk_state_change(skpair); |
409 | read_lock(&skpair->sk_callback_lock); | ||
410 | sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP); | 412 | sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP); |
411 | read_unlock(&skpair->sk_callback_lock); | ||
412 | } | 413 | } |
413 | sock_put(skpair); /* It may now die */ | 414 | sock_put(skpair); /* It may now die */ |
414 | unix_peer(sk) = NULL; | 415 | unix_peer(sk) = NULL; |
@@ -1142,7 +1143,7 @@ restart: | |||
1142 | newsk->sk_peercred.pid = task_tgid_vnr(current); | 1143 | newsk->sk_peercred.pid = task_tgid_vnr(current); |
1143 | current_euid_egid(&newsk->sk_peercred.uid, &newsk->sk_peercred.gid); | 1144 | current_euid_egid(&newsk->sk_peercred.uid, &newsk->sk_peercred.gid); |
1144 | newu = unix_sk(newsk); | 1145 | newu = unix_sk(newsk); |
1145 | newsk->sk_sleep = &newu->peer_wait; | 1146 | newsk->sk_wq = &newu->peer_wq; |
1146 | otheru = unix_sk(other); | 1147 | otheru = unix_sk(other); |
1147 | 1148 | ||
1148 | /* copy address information from listening to new sock*/ | 1149 | /* copy address information from listening to new sock*/ |
@@ -1736,7 +1737,7 @@ static long unix_stream_data_wait(struct sock *sk, long timeo) | |||
1736 | unix_state_lock(sk); | 1737 | unix_state_lock(sk); |
1737 | 1738 | ||
1738 | for (;;) { | 1739 | for (;;) { |
1739 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 1740 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
1740 | 1741 | ||
1741 | if (!skb_queue_empty(&sk->sk_receive_queue) || | 1742 | if (!skb_queue_empty(&sk->sk_receive_queue) || |
1742 | sk->sk_err || | 1743 | sk->sk_err || |
@@ -1752,7 +1753,7 @@ static long unix_stream_data_wait(struct sock *sk, long timeo) | |||
1752 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 1753 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); |
1753 | } | 1754 | } |
1754 | 1755 | ||
1755 | finish_wait(sk->sk_sleep, &wait); | 1756 | finish_wait(sk_sleep(sk), &wait); |
1756 | unix_state_unlock(sk); | 1757 | unix_state_unlock(sk); |
1757 | return timeo; | 1758 | return timeo; |
1758 | } | 1759 | } |
@@ -1931,12 +1932,10 @@ static int unix_shutdown(struct socket *sock, int mode) | |||
1931 | other->sk_shutdown |= peer_mode; | 1932 | other->sk_shutdown |= peer_mode; |
1932 | unix_state_unlock(other); | 1933 | unix_state_unlock(other); |
1933 | other->sk_state_change(other); | 1934 | other->sk_state_change(other); |
1934 | read_lock(&other->sk_callback_lock); | ||
1935 | if (peer_mode == SHUTDOWN_MASK) | 1935 | if (peer_mode == SHUTDOWN_MASK) |
1936 | sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP); | 1936 | sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP); |
1937 | else if (peer_mode & RCV_SHUTDOWN) | 1937 | else if (peer_mode & RCV_SHUTDOWN) |
1938 | sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN); | 1938 | sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN); |
1939 | read_unlock(&other->sk_callback_lock); | ||
1940 | } | 1939 | } |
1941 | if (other) | 1940 | if (other) |
1942 | sock_put(other); | 1941 | sock_put(other); |
@@ -1991,7 +1990,7 @@ static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table | |||
1991 | struct sock *sk = sock->sk; | 1990 | struct sock *sk = sock->sk; |
1992 | unsigned int mask; | 1991 | unsigned int mask; |
1993 | 1992 | ||
1994 | sock_poll_wait(file, sk->sk_sleep, wait); | 1993 | sock_poll_wait(file, sk_sleep(sk), wait); |
1995 | mask = 0; | 1994 | mask = 0; |
1996 | 1995 | ||
1997 | /* exceptional events? */ | 1996 | /* exceptional events? */ |
@@ -2028,7 +2027,7 @@ static unsigned int unix_dgram_poll(struct file *file, struct socket *sock, | |||
2028 | struct sock *sk = sock->sk, *other; | 2027 | struct sock *sk = sock->sk, *other; |
2029 | unsigned int mask, writable; | 2028 | unsigned int mask, writable; |
2030 | 2029 | ||
2031 | sock_poll_wait(file, sk->sk_sleep, wait); | 2030 | sock_poll_wait(file, sk_sleep(sk), wait); |
2032 | mask = 0; | 2031 | mask = 0; |
2033 | 2032 | ||
2034 | /* exceptional events? */ | 2033 | /* exceptional events? */ |
diff --git a/net/unix/garbage.c b/net/unix/garbage.c index 19c17e4a0c8b..c8df6fda0b1f 100644 --- a/net/unix/garbage.c +++ b/net/unix/garbage.c | |||
@@ -74,7 +74,6 @@ | |||
74 | #include <linux/un.h> | 74 | #include <linux/un.h> |
75 | #include <linux/net.h> | 75 | #include <linux/net.h> |
76 | #include <linux/fs.h> | 76 | #include <linux/fs.h> |
77 | #include <linux/slab.h> | ||
78 | #include <linux/skbuff.h> | 77 | #include <linux/skbuff.h> |
79 | #include <linux/netdevice.h> | 78 | #include <linux/netdevice.h> |
80 | #include <linux/file.h> | 79 | #include <linux/file.h> |
@@ -154,15 +153,6 @@ void unix_notinflight(struct file *fp) | |||
154 | } | 153 | } |
155 | } | 154 | } |
156 | 155 | ||
157 | static inline struct sk_buff *sock_queue_head(struct sock *sk) | ||
158 | { | ||
159 | return (struct sk_buff *)&sk->sk_receive_queue; | ||
160 | } | ||
161 | |||
162 | #define receive_queue_for_each_skb(sk, next, skb) \ | ||
163 | for (skb = sock_queue_head(sk)->next, next = skb->next; \ | ||
164 | skb != sock_queue_head(sk); skb = next, next = skb->next) | ||
165 | |||
166 | static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *), | 156 | static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *), |
167 | struct sk_buff_head *hitlist) | 157 | struct sk_buff_head *hitlist) |
168 | { | 158 | { |
@@ -170,7 +160,7 @@ static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *), | |||
170 | struct sk_buff *next; | 160 | struct sk_buff *next; |
171 | 161 | ||
172 | spin_lock(&x->sk_receive_queue.lock); | 162 | spin_lock(&x->sk_receive_queue.lock); |
173 | receive_queue_for_each_skb(x, next, skb) { | 163 | skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { |
174 | /* | 164 | /* |
175 | * Do we have file descriptors ? | 165 | * Do we have file descriptors ? |
176 | */ | 166 | */ |
@@ -226,7 +216,7 @@ static void scan_children(struct sock *x, void (*func)(struct unix_sock *), | |||
226 | * and perform a scan on them as well. | 216 | * and perform a scan on them as well. |
227 | */ | 217 | */ |
228 | spin_lock(&x->sk_receive_queue.lock); | 218 | spin_lock(&x->sk_receive_queue.lock); |
229 | receive_queue_for_each_skb(x, next, skb) { | 219 | skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { |
230 | u = unix_sk(skb->sk); | 220 | u = unix_sk(skb->sk); |
231 | 221 | ||
232 | /* | 222 | /* |
diff --git a/net/unix/sysctl_net_unix.c b/net/unix/sysctl_net_unix.c index d095c7be10d0..397cffebb3b6 100644 --- a/net/unix/sysctl_net_unix.c +++ b/net/unix/sysctl_net_unix.c | |||
@@ -10,6 +10,7 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/mm.h> | 12 | #include <linux/mm.h> |
13 | #include <linux/slab.h> | ||
13 | #include <linux/sysctl.h> | 14 | #include <linux/sysctl.h> |
14 | 15 | ||
15 | #include <net/af_unix.h> | 16 | #include <net/af_unix.h> |
diff --git a/net/wimax/op-msg.c b/net/wimax/op-msg.c index 7718657e93dc..d5b7c3779c43 100644 --- a/net/wimax/op-msg.c +++ b/net/wimax/op-msg.c | |||
@@ -72,6 +72,7 @@ | |||
72 | * wimax_msg_send() | 72 | * wimax_msg_send() |
73 | */ | 73 | */ |
74 | #include <linux/device.h> | 74 | #include <linux/device.h> |
75 | #include <linux/slab.h> | ||
75 | #include <net/genetlink.h> | 76 | #include <net/genetlink.h> |
76 | #include <linux/netdevice.h> | 77 | #include <linux/netdevice.h> |
77 | #include <linux/wimax.h> | 78 | #include <linux/wimax.h> |
diff --git a/net/wimax/op-reset.c b/net/wimax/op-reset.c index 4dc82a54ba30..68bedf3e5443 100644 --- a/net/wimax/op-reset.c +++ b/net/wimax/op-reset.c | |||
@@ -110,7 +110,6 @@ int wimax_gnl_doit_reset(struct sk_buff *skb, struct genl_info *info) | |||
110 | { | 110 | { |
111 | int result, ifindex; | 111 | int result, ifindex; |
112 | struct wimax_dev *wimax_dev; | 112 | struct wimax_dev *wimax_dev; |
113 | struct device *dev; | ||
114 | 113 | ||
115 | d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info); | 114 | d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info); |
116 | result = -ENODEV; | 115 | result = -ENODEV; |
@@ -123,7 +122,6 @@ int wimax_gnl_doit_reset(struct sk_buff *skb, struct genl_info *info) | |||
123 | wimax_dev = wimax_dev_get_by_genl_info(info, ifindex); | 122 | wimax_dev = wimax_dev_get_by_genl_info(info, ifindex); |
124 | if (wimax_dev == NULL) | 123 | if (wimax_dev == NULL) |
125 | goto error_no_wimax_dev; | 124 | goto error_no_wimax_dev; |
126 | dev = wimax_dev_to_dev(wimax_dev); | ||
127 | /* Execute the operation and send the result back to user space */ | 125 | /* Execute the operation and send the result back to user space */ |
128 | result = wimax_reset(wimax_dev); | 126 | result = wimax_reset(wimax_dev); |
129 | dev_put(wimax_dev->net_dev); | 127 | dev_put(wimax_dev->net_dev); |
diff --git a/net/wimax/op-state-get.c b/net/wimax/op-state-get.c index 11ad3356eb56..aff8776e2d41 100644 --- a/net/wimax/op-state-get.c +++ b/net/wimax/op-state-get.c | |||
@@ -53,7 +53,6 @@ int wimax_gnl_doit_state_get(struct sk_buff *skb, struct genl_info *info) | |||
53 | { | 53 | { |
54 | int result, ifindex; | 54 | int result, ifindex; |
55 | struct wimax_dev *wimax_dev; | 55 | struct wimax_dev *wimax_dev; |
56 | struct device *dev; | ||
57 | 56 | ||
58 | d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info); | 57 | d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info); |
59 | result = -ENODEV; | 58 | result = -ENODEV; |
@@ -66,7 +65,6 @@ int wimax_gnl_doit_state_get(struct sk_buff *skb, struct genl_info *info) | |||
66 | wimax_dev = wimax_dev_get_by_genl_info(info, ifindex); | 65 | wimax_dev = wimax_dev_get_by_genl_info(info, ifindex); |
67 | if (wimax_dev == NULL) | 66 | if (wimax_dev == NULL) |
68 | goto error_no_wimax_dev; | 67 | goto error_no_wimax_dev; |
69 | dev = wimax_dev_to_dev(wimax_dev); | ||
70 | /* Execute the operation and send the result back to user space */ | 68 | /* Execute the operation and send the result back to user space */ |
71 | result = wimax_state_get(wimax_dev); | 69 | result = wimax_state_get(wimax_dev); |
72 | dev_put(wimax_dev->net_dev); | 70 | dev_put(wimax_dev->net_dev); |
diff --git a/net/wimax/stack.c b/net/wimax/stack.c index 813e1eaea29b..1ed65dbdab03 100644 --- a/net/wimax/stack.c +++ b/net/wimax/stack.c | |||
@@ -51,6 +51,7 @@ | |||
51 | * wimax_rfkill_rm() | 51 | * wimax_rfkill_rm() |
52 | */ | 52 | */ |
53 | #include <linux/device.h> | 53 | #include <linux/device.h> |
54 | #include <linux/gfp.h> | ||
54 | #include <net/genetlink.h> | 55 | #include <net/genetlink.h> |
55 | #include <linux/netdevice.h> | 56 | #include <linux/netdevice.h> |
56 | #include <linux/wimax.h> | 57 | #include <linux/wimax.h> |
diff --git a/net/wireless/core.c b/net/wireless/core.c index 40cbbbfbccbf..37d0e0ab4432 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/module.h> | 8 | #include <linux/module.h> |
9 | #include <linux/err.h> | 9 | #include <linux/err.h> |
10 | #include <linux/list.h> | 10 | #include <linux/list.h> |
11 | #include <linux/slab.h> | ||
11 | #include <linux/nl80211.h> | 12 | #include <linux/nl80211.h> |
12 | #include <linux/debugfs.h> | 13 | #include <linux/debugfs.h> |
13 | #include <linux/notifier.h> | 14 | #include <linux/notifier.h> |
diff --git a/net/wireless/debugfs.c b/net/wireless/debugfs.c index 2e4895615037..a4991a3efec0 100644 --- a/net/wireless/debugfs.c +++ b/net/wireless/debugfs.c | |||
@@ -9,6 +9,7 @@ | |||
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/slab.h> | ||
12 | #include "core.h" | 13 | #include "core.h" |
13 | #include "debugfs.h" | 14 | #include "debugfs.h" |
14 | 15 | ||
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c index 6ef5a491fb4b..6a5acf750174 100644 --- a/net/wireless/ibss.c +++ b/net/wireless/ibss.c | |||
@@ -6,6 +6,7 @@ | |||
6 | 6 | ||
7 | #include <linux/etherdevice.h> | 7 | #include <linux/etherdevice.h> |
8 | #include <linux/if_arp.h> | 8 | #include <linux/if_arp.h> |
9 | #include <linux/slab.h> | ||
9 | #include <net/cfg80211.h> | 10 | #include <net/cfg80211.h> |
10 | #include "wext-compat.h" | 11 | #include "wext-compat.h" |
11 | #include "nl80211.h" | 12 | #include "nl80211.h" |
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index 387dd2a27d2f..48ead6f0426d 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/module.h> | 8 | #include <linux/module.h> |
9 | #include <linux/netdevice.h> | 9 | #include <linux/netdevice.h> |
10 | #include <linux/nl80211.h> | 10 | #include <linux/nl80211.h> |
11 | #include <linux/slab.h> | ||
11 | #include <linux/wireless.h> | 12 | #include <linux/wireless.h> |
12 | #include <net/cfg80211.h> | 13 | #include <net/cfg80211.h> |
13 | #include <net/iw_handler.h> | 14 | #include <net/iw_handler.h> |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index c27bef8e0c11..01da83ddcff7 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -7,6 +7,7 @@ | |||
7 | #include <linux/if.h> | 7 | #include <linux/if.h> |
8 | #include <linux/module.h> | 8 | #include <linux/module.h> |
9 | #include <linux/err.h> | 9 | #include <linux/err.h> |
10 | #include <linux/slab.h> | ||
10 | #include <linux/list.h> | 11 | #include <linux/list.h> |
11 | #include <linux/if_ether.h> | 12 | #include <linux/if_ether.h> |
12 | #include <linux/ieee80211.h> | 13 | #include <linux/ieee80211.h> |
diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 496348c48506..8f0d97dd3109 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c | |||
@@ -33,6 +33,7 @@ | |||
33 | * | 33 | * |
34 | */ | 34 | */ |
35 | #include <linux/kernel.h> | 35 | #include <linux/kernel.h> |
36 | #include <linux/slab.h> | ||
36 | #include <linux/list.h> | 37 | #include <linux/list.h> |
37 | #include <linux/random.h> | 38 | #include <linux/random.h> |
38 | #include <linux/nl80211.h> | 39 | #include <linux/nl80211.h> |
diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 978cac3414b5..a026c6d56bd3 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c | |||
@@ -4,6 +4,7 @@ | |||
4 | * Copyright 2008 Johannes Berg <johannes@sipsolutions.net> | 4 | * Copyright 2008 Johannes Berg <johannes@sipsolutions.net> |
5 | */ | 5 | */ |
6 | #include <linux/kernel.h> | 6 | #include <linux/kernel.h> |
7 | #include <linux/slab.h> | ||
7 | #include <linux/module.h> | 8 | #include <linux/module.h> |
8 | #include <linux/netdevice.h> | 9 | #include <linux/netdevice.h> |
9 | #include <linux/wireless.h> | 10 | #include <linux/wireless.h> |
diff --git a/net/wireless/sme.c b/net/wireless/sme.c index dcd7685242f7..8ddf5ae0dd03 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c | |||
@@ -7,6 +7,7 @@ | |||
7 | 7 | ||
8 | #include <linux/etherdevice.h> | 8 | #include <linux/etherdevice.h> |
9 | #include <linux/if_arp.h> | 9 | #include <linux/if_arp.h> |
10 | #include <linux/slab.h> | ||
10 | #include <linux/workqueue.h> | 11 | #include <linux/workqueue.h> |
11 | #include <linux/wireless.h> | 12 | #include <linux/wireless.h> |
12 | #include <net/iw_handler.h> | 13 | #include <net/iw_handler.h> |
diff --git a/net/wireless/util.c b/net/wireless/util.c index 7acb81b9675d..3416373a9c0c 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c | |||
@@ -5,6 +5,7 @@ | |||
5 | */ | 5 | */ |
6 | #include <linux/bitops.h> | 6 | #include <linux/bitops.h> |
7 | #include <linux/etherdevice.h> | 7 | #include <linux/etherdevice.h> |
8 | #include <linux/slab.h> | ||
8 | #include <net/cfg80211.h> | 9 | #include <net/cfg80211.h> |
9 | #include <net/ip.h> | 10 | #include <net/ip.h> |
10 | #include "core.h" | 11 | #include "core.h" |
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c index 9ab51838849e..a60a2773b497 100644 --- a/net/wireless/wext-compat.c +++ b/net/wireless/wext-compat.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/nl80211.h> | 12 | #include <linux/nl80211.h> |
13 | #include <linux/if_arp.h> | 13 | #include <linux/if_arp.h> |
14 | #include <linux/etherdevice.h> | 14 | #include <linux/etherdevice.h> |
15 | #include <linux/slab.h> | ||
15 | #include <net/iw_handler.h> | 16 | #include <net/iw_handler.h> |
16 | #include <net/cfg80211.h> | 17 | #include <net/cfg80211.h> |
17 | #include "wext-compat.h" | 18 | #include "wext-compat.h" |
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c index bfcbeee23f9c..0ef17bc42bac 100644 --- a/net/wireless/wext-core.c +++ b/net/wireless/wext-core.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
11 | #include <linux/netdevice.h> | 11 | #include <linux/netdevice.h> |
12 | #include <linux/rtnetlink.h> | 12 | #include <linux/rtnetlink.h> |
13 | #include <linux/slab.h> | ||
13 | #include <linux/wireless.h> | 14 | #include <linux/wireless.h> |
14 | #include <linux/uaccess.h> | 15 | #include <linux/uaccess.h> |
15 | #include <net/cfg80211.h> | 16 | #include <net/cfg80211.h> |
diff --git a/net/wireless/wext-priv.c b/net/wireless/wext-priv.c index a3c2277de9e5..3feb28e41c53 100644 --- a/net/wireless/wext-priv.c +++ b/net/wireless/wext-priv.c | |||
@@ -7,6 +7,7 @@ | |||
7 | * | 7 | * |
8 | * (As all part of the Linux kernel, this file is GPL) | 8 | * (As all part of the Linux kernel, this file is GPL) |
9 | */ | 9 | */ |
10 | #include <linux/slab.h> | ||
10 | #include <linux/wireless.h> | 11 | #include <linux/wireless.h> |
11 | #include <linux/netdevice.h> | 12 | #include <linux/netdevice.h> |
12 | #include <net/iw_handler.h> | 13 | #include <net/iw_handler.h> |
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c index 5615a8802536..d5c6140f4cb8 100644 --- a/net/wireless/wext-sme.c +++ b/net/wireless/wext-sme.c | |||
@@ -7,6 +7,7 @@ | |||
7 | 7 | ||
8 | #include <linux/etherdevice.h> | 8 | #include <linux/etherdevice.h> |
9 | #include <linux/if_arp.h> | 9 | #include <linux/if_arp.h> |
10 | #include <linux/slab.h> | ||
10 | #include <net/cfg80211.h> | 11 | #include <net/cfg80211.h> |
11 | #include "wext-compat.h" | 12 | #include "wext-compat.h" |
12 | #include "nl80211.h" | 13 | #include "nl80211.h" |
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index 9796f3ed1edb..296e65e01064 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c | |||
@@ -47,6 +47,7 @@ | |||
47 | #include <linux/netdevice.h> | 47 | #include <linux/netdevice.h> |
48 | #include <linux/if_arp.h> | 48 | #include <linux/if_arp.h> |
49 | #include <linux/skbuff.h> | 49 | #include <linux/skbuff.h> |
50 | #include <linux/slab.h> | ||
50 | #include <net/sock.h> | 51 | #include <net/sock.h> |
51 | #include <net/tcp_states.h> | 52 | #include <net/tcp_states.h> |
52 | #include <asm/uaccess.h> | 53 | #include <asm/uaccess.h> |
@@ -82,6 +83,41 @@ struct compat_x25_subscrip_struct { | |||
82 | }; | 83 | }; |
83 | #endif | 84 | #endif |
84 | 85 | ||
86 | |||
87 | int x25_parse_address_block(struct sk_buff *skb, | ||
88 | struct x25_address *called_addr, | ||
89 | struct x25_address *calling_addr) | ||
90 | { | ||
91 | unsigned char len; | ||
92 | int needed; | ||
93 | int rc; | ||
94 | |||
95 | if (skb->len < 1) { | ||
96 | /* packet has no address block */ | ||
97 | rc = 0; | ||
98 | goto empty; | ||
99 | } | ||
100 | |||
101 | len = *skb->data; | ||
102 | needed = 1 + (len >> 4) + (len & 0x0f); | ||
103 | |||
104 | if (skb->len < needed) { | ||
105 | /* packet is too short to hold the addresses it claims | ||
106 | to hold */ | ||
107 | rc = -1; | ||
108 | goto empty; | ||
109 | } | ||
110 | |||
111 | return x25_addr_ntoa(skb->data, called_addr, calling_addr); | ||
112 | |||
113 | empty: | ||
114 | *called_addr->x25_addr = 0; | ||
115 | *calling_addr->x25_addr = 0; | ||
116 | |||
117 | return rc; | ||
118 | } | ||
119 | |||
120 | |||
85 | int x25_addr_ntoa(unsigned char *p, struct x25_address *called_addr, | 121 | int x25_addr_ntoa(unsigned char *p, struct x25_address *called_addr, |
86 | struct x25_address *calling_addr) | 122 | struct x25_address *calling_addr) |
87 | { | 123 | { |
@@ -366,6 +402,7 @@ static void __x25_destroy_socket(struct sock *sk) | |||
366 | /* | 402 | /* |
367 | * Queue the unaccepted socket for death | 403 | * Queue the unaccepted socket for death |
368 | */ | 404 | */ |
405 | skb->sk->sk_state = TCP_LISTEN; | ||
369 | sock_set_flag(skb->sk, SOCK_DEAD); | 406 | sock_set_flag(skb->sk, SOCK_DEAD); |
370 | x25_start_heartbeat(skb->sk); | 407 | x25_start_heartbeat(skb->sk); |
371 | x25_sk(skb->sk)->state = X25_STATE_0; | 408 | x25_sk(skb->sk)->state = X25_STATE_0; |
@@ -553,7 +590,8 @@ static int x25_create(struct net *net, struct socket *sock, int protocol, | |||
553 | x25->facilities.winsize_out = X25_DEFAULT_WINDOW_SIZE; | 590 | x25->facilities.winsize_out = X25_DEFAULT_WINDOW_SIZE; |
554 | x25->facilities.pacsize_in = X25_DEFAULT_PACKET_SIZE; | 591 | x25->facilities.pacsize_in = X25_DEFAULT_PACKET_SIZE; |
555 | x25->facilities.pacsize_out = X25_DEFAULT_PACKET_SIZE; | 592 | x25->facilities.pacsize_out = X25_DEFAULT_PACKET_SIZE; |
556 | x25->facilities.throughput = X25_DEFAULT_THROUGHPUT; | 593 | x25->facilities.throughput = 0; /* by default don't negotiate |
594 | throughput */ | ||
557 | x25->facilities.reverse = X25_DEFAULT_REVERSE; | 595 | x25->facilities.reverse = X25_DEFAULT_REVERSE; |
558 | x25->dte_facilities.calling_len = 0; | 596 | x25->dte_facilities.calling_len = 0; |
559 | x25->dte_facilities.called_len = 0; | 597 | x25->dte_facilities.called_len = 0; |
@@ -681,7 +719,7 @@ static int x25_wait_for_connection_establishment(struct sock *sk) | |||
681 | DECLARE_WAITQUEUE(wait, current); | 719 | DECLARE_WAITQUEUE(wait, current); |
682 | int rc; | 720 | int rc; |
683 | 721 | ||
684 | add_wait_queue_exclusive(sk->sk_sleep, &wait); | 722 | add_wait_queue_exclusive(sk_sleep(sk), &wait); |
685 | for (;;) { | 723 | for (;;) { |
686 | __set_current_state(TASK_INTERRUPTIBLE); | 724 | __set_current_state(TASK_INTERRUPTIBLE); |
687 | rc = -ERESTARTSYS; | 725 | rc = -ERESTARTSYS; |
@@ -701,7 +739,7 @@ static int x25_wait_for_connection_establishment(struct sock *sk) | |||
701 | break; | 739 | break; |
702 | } | 740 | } |
703 | __set_current_state(TASK_RUNNING); | 741 | __set_current_state(TASK_RUNNING); |
704 | remove_wait_queue(sk->sk_sleep, &wait); | 742 | remove_wait_queue(sk_sleep(sk), &wait); |
705 | return rc; | 743 | return rc; |
706 | } | 744 | } |
707 | 745 | ||
@@ -801,7 +839,7 @@ static int x25_wait_for_data(struct sock *sk, long timeout) | |||
801 | DECLARE_WAITQUEUE(wait, current); | 839 | DECLARE_WAITQUEUE(wait, current); |
802 | int rc = 0; | 840 | int rc = 0; |
803 | 841 | ||
804 | add_wait_queue_exclusive(sk->sk_sleep, &wait); | 842 | add_wait_queue_exclusive(sk_sleep(sk), &wait); |
805 | for (;;) { | 843 | for (;;) { |
806 | __set_current_state(TASK_INTERRUPTIBLE); | 844 | __set_current_state(TASK_INTERRUPTIBLE); |
807 | if (sk->sk_shutdown & RCV_SHUTDOWN) | 845 | if (sk->sk_shutdown & RCV_SHUTDOWN) |
@@ -821,7 +859,7 @@ static int x25_wait_for_data(struct sock *sk, long timeout) | |||
821 | break; | 859 | break; |
822 | } | 860 | } |
823 | __set_current_state(TASK_RUNNING); | 861 | __set_current_state(TASK_RUNNING); |
824 | remove_wait_queue(sk->sk_sleep, &wait); | 862 | remove_wait_queue(sk_sleep(sk), &wait); |
825 | return rc; | 863 | return rc; |
826 | } | 864 | } |
827 | 865 | ||
@@ -921,16 +959,26 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb, | |||
921 | /* | 959 | /* |
922 | * Extract the X.25 addresses and convert them to ASCII strings, | 960 | * Extract the X.25 addresses and convert them to ASCII strings, |
923 | * and remove them. | 961 | * and remove them. |
962 | * | ||
963 | * Address block is mandatory in call request packets | ||
924 | */ | 964 | */ |
925 | addr_len = x25_addr_ntoa(skb->data, &source_addr, &dest_addr); | 965 | addr_len = x25_parse_address_block(skb, &source_addr, &dest_addr); |
966 | if (addr_len <= 0) | ||
967 | goto out_clear_request; | ||
926 | skb_pull(skb, addr_len); | 968 | skb_pull(skb, addr_len); |
927 | 969 | ||
928 | /* | 970 | /* |
929 | * Get the length of the facilities, skip past them for the moment | 971 | * Get the length of the facilities, skip past them for the moment |
930 | * get the call user data because this is needed to determine | 972 | * get the call user data because this is needed to determine |
931 | * the correct listener | 973 | * the correct listener |
974 | * | ||
975 | * Facilities length is mandatory in call request packets | ||
932 | */ | 976 | */ |
977 | if (skb->len < 1) | ||
978 | goto out_clear_request; | ||
933 | len = skb->data[0] + 1; | 979 | len = skb->data[0] + 1; |
980 | if (skb->len < len) | ||
981 | goto out_clear_request; | ||
934 | skb_pull(skb,len); | 982 | skb_pull(skb,len); |
935 | 983 | ||
936 | /* | 984 | /* |
@@ -1414,9 +1462,20 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) | |||
1414 | if (facilities.winsize_in < 1 || | 1462 | if (facilities.winsize_in < 1 || |
1415 | facilities.winsize_in > 127) | 1463 | facilities.winsize_in > 127) |
1416 | break; | 1464 | break; |
1417 | if (facilities.throughput < 0x03 || | 1465 | if (facilities.throughput) { |
1418 | facilities.throughput > 0xDD) | 1466 | int out = facilities.throughput & 0xf0; |
1419 | break; | 1467 | int in = facilities.throughput & 0x0f; |
1468 | if (!out) | ||
1469 | facilities.throughput |= | ||
1470 | X25_DEFAULT_THROUGHPUT << 4; | ||
1471 | else if (out < 0x30 || out > 0xD0) | ||
1472 | break; | ||
1473 | if (!in) | ||
1474 | facilities.throughput |= | ||
1475 | X25_DEFAULT_THROUGHPUT; | ||
1476 | else if (in < 0x03 || in > 0x0D) | ||
1477 | break; | ||
1478 | } | ||
1420 | if (facilities.reverse && | 1479 | if (facilities.reverse && |
1421 | (facilities.reverse & 0x81) != 0x81) | 1480 | (facilities.reverse & 0x81) != 0x81) |
1422 | break; | 1481 | break; |
diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c index 52e304212241..9005f6daeab5 100644 --- a/net/x25/x25_dev.c +++ b/net/x25/x25_dev.c | |||
@@ -20,9 +20,11 @@ | |||
20 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
21 | #include <linux/netdevice.h> | 21 | #include <linux/netdevice.h> |
22 | #include <linux/skbuff.h> | 22 | #include <linux/skbuff.h> |
23 | #include <linux/slab.h> | ||
23 | #include <net/sock.h> | 24 | #include <net/sock.h> |
24 | #include <linux/if_arp.h> | 25 | #include <linux/if_arp.h> |
25 | #include <net/x25.h> | 26 | #include <net/x25.h> |
27 | #include <net/x25device.h> | ||
26 | 28 | ||
27 | static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb) | 29 | static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb) |
28 | { | 30 | { |
@@ -114,19 +116,22 @@ int x25_lapb_receive_frame(struct sk_buff *skb, struct net_device *dev, | |||
114 | } | 116 | } |
115 | 117 | ||
116 | switch (skb->data[0]) { | 118 | switch (skb->data[0]) { |
117 | case 0x00: | 119 | |
118 | skb_pull(skb, 1); | 120 | case X25_IFACE_DATA: |
119 | if (x25_receive_data(skb, nb)) { | 121 | skb_pull(skb, 1); |
120 | x25_neigh_put(nb); | 122 | if (x25_receive_data(skb, nb)) { |
121 | goto out; | 123 | x25_neigh_put(nb); |
122 | } | 124 | goto out; |
123 | break; | 125 | } |
124 | case 0x01: | 126 | break; |
125 | x25_link_established(nb); | 127 | |
126 | break; | 128 | case X25_IFACE_CONNECT: |
127 | case 0x02: | 129 | x25_link_established(nb); |
128 | x25_link_terminated(nb); | 130 | break; |
129 | break; | 131 | |
132 | case X25_IFACE_DISCONNECT: | ||
133 | x25_link_terminated(nb); | ||
134 | break; | ||
130 | } | 135 | } |
131 | x25_neigh_put(nb); | 136 | x25_neigh_put(nb); |
132 | drop: | 137 | drop: |
@@ -147,7 +152,7 @@ void x25_establish_link(struct x25_neigh *nb) | |||
147 | return; | 152 | return; |
148 | } | 153 | } |
149 | ptr = skb_put(skb, 1); | 154 | ptr = skb_put(skb, 1); |
150 | *ptr = 0x01; | 155 | *ptr = X25_IFACE_CONNECT; |
151 | break; | 156 | break; |
152 | 157 | ||
153 | #if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE) | 158 | #if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE) |
@@ -183,7 +188,7 @@ void x25_terminate_link(struct x25_neigh *nb) | |||
183 | } | 188 | } |
184 | 189 | ||
185 | ptr = skb_put(skb, 1); | 190 | ptr = skb_put(skb, 1); |
186 | *ptr = 0x02; | 191 | *ptr = X25_IFACE_DISCONNECT; |
187 | 192 | ||
188 | skb->protocol = htons(ETH_P_X25); | 193 | skb->protocol = htons(ETH_P_X25); |
189 | skb->dev = nb->dev; | 194 | skb->dev = nb->dev; |
@@ -199,7 +204,7 @@ void x25_send_frame(struct sk_buff *skb, struct x25_neigh *nb) | |||
199 | switch (nb->dev->type) { | 204 | switch (nb->dev->type) { |
200 | case ARPHRD_X25: | 205 | case ARPHRD_X25: |
201 | dptr = skb_push(skb, 1); | 206 | dptr = skb_push(skb, 1); |
202 | *dptr = 0x00; | 207 | *dptr = X25_IFACE_DATA; |
203 | break; | 208 | break; |
204 | 209 | ||
205 | #if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE) | 210 | #if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE) |
diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c index a21f6646eb3a..771bab00754b 100644 --- a/net/x25/x25_facilities.c +++ b/net/x25/x25_facilities.c | |||
@@ -35,7 +35,7 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, | |||
35 | struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask) | 35 | struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask) |
36 | { | 36 | { |
37 | unsigned char *p = skb->data; | 37 | unsigned char *p = skb->data; |
38 | unsigned int len = *p++; | 38 | unsigned int len; |
39 | 39 | ||
40 | *vc_fac_mask = 0; | 40 | *vc_fac_mask = 0; |
41 | 41 | ||
@@ -50,6 +50,14 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, | |||
50 | memset(dte_facs->called_ae, '\0', sizeof(dte_facs->called_ae)); | 50 | memset(dte_facs->called_ae, '\0', sizeof(dte_facs->called_ae)); |
51 | memset(dte_facs->calling_ae, '\0', sizeof(dte_facs->calling_ae)); | 51 | memset(dte_facs->calling_ae, '\0', sizeof(dte_facs->calling_ae)); |
52 | 52 | ||
53 | if (skb->len < 1) | ||
54 | return 0; | ||
55 | |||
56 | len = *p++; | ||
57 | |||
58 | if (len >= skb->len) | ||
59 | return -1; | ||
60 | |||
53 | while (len > 0) { | 61 | while (len > 0) { |
54 | switch (*p & X25_FAC_CLASS_MASK) { | 62 | switch (*p & X25_FAC_CLASS_MASK) { |
55 | case X25_FAC_CLASS_A: | 63 | case X25_FAC_CLASS_A: |
@@ -247,6 +255,8 @@ int x25_negotiate_facilities(struct sk_buff *skb, struct sock *sk, | |||
247 | memcpy(new, ours, sizeof(*new)); | 255 | memcpy(new, ours, sizeof(*new)); |
248 | 256 | ||
249 | len = x25_parse_facilities(skb, &theirs, dte, &x25->vc_facil_mask); | 257 | len = x25_parse_facilities(skb, &theirs, dte, &x25->vc_facil_mask); |
258 | if (len < 0) | ||
259 | return len; | ||
250 | 260 | ||
251 | /* | 261 | /* |
252 | * They want reverse charging, we won't accept it. | 262 | * They want reverse charging, we won't accept it. |
@@ -259,9 +269,18 @@ int x25_negotiate_facilities(struct sk_buff *skb, struct sock *sk, | |||
259 | new->reverse = theirs.reverse; | 269 | new->reverse = theirs.reverse; |
260 | 270 | ||
261 | if (theirs.throughput) { | 271 | if (theirs.throughput) { |
262 | if (theirs.throughput < ours->throughput) { | 272 | int theirs_in = theirs.throughput & 0x0f; |
263 | SOCK_DEBUG(sk, "X.25: throughput negotiated down\n"); | 273 | int theirs_out = theirs.throughput & 0xf0; |
264 | new->throughput = theirs.throughput; | 274 | int ours_in = ours->throughput & 0x0f; |
275 | int ours_out = ours->throughput & 0xf0; | ||
276 | if (!ours_in || theirs_in < ours_in) { | ||
277 | SOCK_DEBUG(sk, "X.25: inbound throughput negotiated\n"); | ||
278 | new->throughput = (new->throughput & 0xf0) | theirs_in; | ||
279 | } | ||
280 | if (!ours_out || theirs_out < ours_out) { | ||
281 | SOCK_DEBUG(sk, | ||
282 | "X.25: outbound throughput negotiated\n"); | ||
283 | new->throughput = (new->throughput & 0x0f) | theirs_out; | ||
265 | } | 284 | } |
266 | } | 285 | } |
267 | 286 | ||
diff --git a/net/x25/x25_forward.c b/net/x25/x25_forward.c index 056a55f3a871..25a810793968 100644 --- a/net/x25/x25_forward.c +++ b/net/x25/x25_forward.c | |||
@@ -10,6 +10,7 @@ | |||
10 | */ | 10 | */ |
11 | #include <linux/if_arp.h> | 11 | #include <linux/if_arp.h> |
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <linux/slab.h> | ||
13 | #include <net/x25.h> | 14 | #include <net/x25.h> |
14 | 15 | ||
15 | LIST_HEAD(x25_forward_list); | 16 | LIST_HEAD(x25_forward_list); |
diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c index 96d922783547..372ac226e648 100644 --- a/net/x25/x25_in.c +++ b/net/x25/x25_in.c | |||
@@ -23,6 +23,7 @@ | |||
23 | * i-frames. | 23 | * i-frames. |
24 | */ | 24 | */ |
25 | 25 | ||
26 | #include <linux/slab.h> | ||
26 | #include <linux/errno.h> | 27 | #include <linux/errno.h> |
27 | #include <linux/kernel.h> | 28 | #include <linux/kernel.h> |
28 | #include <linux/string.h> | 29 | #include <linux/string.h> |
@@ -89,6 +90,7 @@ static int x25_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more) | |||
89 | static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametype) | 90 | static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametype) |
90 | { | 91 | { |
91 | struct x25_address source_addr, dest_addr; | 92 | struct x25_address source_addr, dest_addr; |
93 | int len; | ||
92 | 94 | ||
93 | switch (frametype) { | 95 | switch (frametype) { |
94 | case X25_CALL_ACCEPTED: { | 96 | case X25_CALL_ACCEPTED: { |
@@ -106,11 +108,17 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp | |||
106 | * Parse the data in the frame. | 108 | * Parse the data in the frame. |
107 | */ | 109 | */ |
108 | skb_pull(skb, X25_STD_MIN_LEN); | 110 | skb_pull(skb, X25_STD_MIN_LEN); |
109 | skb_pull(skb, x25_addr_ntoa(skb->data, &source_addr, &dest_addr)); | 111 | |
110 | skb_pull(skb, | 112 | len = x25_parse_address_block(skb, &source_addr, |
111 | x25_parse_facilities(skb, &x25->facilities, | 113 | &dest_addr); |
114 | if (len > 0) | ||
115 | skb_pull(skb, len); | ||
116 | |||
117 | len = x25_parse_facilities(skb, &x25->facilities, | ||
112 | &x25->dte_facilities, | 118 | &x25->dte_facilities, |
113 | &x25->vc_facil_mask)); | 119 | &x25->vc_facil_mask); |
120 | if (len > 0) | ||
121 | skb_pull(skb, len); | ||
114 | /* | 122 | /* |
115 | * Copy any Call User Data. | 123 | * Copy any Call User Data. |
116 | */ | 124 | */ |
diff --git a/net/x25/x25_link.c b/net/x25/x25_link.c index e4e1b6e49538..73e7b954ad28 100644 --- a/net/x25/x25_link.c +++ b/net/x25/x25_link.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/kernel.h> | 24 | #include <linux/kernel.h> |
25 | #include <linux/jiffies.h> | 25 | #include <linux/jiffies.h> |
26 | #include <linux/timer.h> | 26 | #include <linux/timer.h> |
27 | #include <linux/slab.h> | ||
27 | #include <linux/netdevice.h> | 28 | #include <linux/netdevice.h> |
28 | #include <linux/skbuff.h> | 29 | #include <linux/skbuff.h> |
29 | #include <asm/uaccess.h> | 30 | #include <asm/uaccess.h> |
diff --git a/net/x25/x25_out.c b/net/x25/x25_out.c index 2b96b52114d6..52351a26b6fc 100644 --- a/net/x25/x25_out.c +++ b/net/x25/x25_out.c | |||
@@ -22,6 +22,7 @@ | |||
22 | * needed cleaned seq-number fields. | 22 | * needed cleaned seq-number fields. |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <linux/slab.h> | ||
25 | #include <linux/socket.h> | 26 | #include <linux/socket.h> |
26 | #include <linux/kernel.h> | 27 | #include <linux/kernel.h> |
27 | #include <linux/string.h> | 28 | #include <linux/string.h> |
diff --git a/net/x25/x25_route.c b/net/x25/x25_route.c index b95fae9ab393..97d77c532d8c 100644 --- a/net/x25/x25_route.c +++ b/net/x25/x25_route.c | |||
@@ -19,6 +19,7 @@ | |||
19 | 19 | ||
20 | #include <linux/if_arp.h> | 20 | #include <linux/if_arp.h> |
21 | #include <linux/init.h> | 21 | #include <linux/init.h> |
22 | #include <linux/slab.h> | ||
22 | #include <net/x25.h> | 23 | #include <net/x25.h> |
23 | 24 | ||
24 | LIST_HEAD(x25_route_list); | 25 | LIST_HEAD(x25_route_list); |
diff --git a/net/x25/x25_subr.c b/net/x25/x25_subr.c index 352b32d216fc..dc20cf12f39b 100644 --- a/net/x25/x25_subr.c +++ b/net/x25/x25_subr.c | |||
@@ -23,6 +23,7 @@ | |||
23 | * restriction on response. | 23 | * restriction on response. |
24 | */ | 24 | */ |
25 | 25 | ||
26 | #include <linux/slab.h> | ||
26 | #include <linux/kernel.h> | 27 | #include <linux/kernel.h> |
27 | #include <linux/string.h> | 28 | #include <linux/string.h> |
28 | #include <linux/skbuff.h> | 29 | #include <linux/skbuff.h> |
diff --git a/net/xfrm/xfrm_hash.h b/net/xfrm/xfrm_hash.h index e5195c99f71e..1396572d2ade 100644 --- a/net/xfrm/xfrm_hash.h +++ b/net/xfrm/xfrm_hash.h | |||
@@ -16,7 +16,8 @@ static inline unsigned int __xfrm6_addr_hash(xfrm_address_t *addr) | |||
16 | 16 | ||
17 | static inline unsigned int __xfrm4_daddr_saddr_hash(xfrm_address_t *daddr, xfrm_address_t *saddr) | 17 | static inline unsigned int __xfrm4_daddr_saddr_hash(xfrm_address_t *daddr, xfrm_address_t *saddr) |
18 | { | 18 | { |
19 | return ntohl(daddr->a4 + saddr->a4); | 19 | u32 sum = (__force u32)daddr->a4 + (__force u32)saddr->a4; |
20 | return ntohl((__force __be32)sum); | ||
20 | } | 21 | } |
21 | 22 | ||
22 | static inline unsigned int __xfrm6_daddr_saddr_hash(xfrm_address_t *daddr, xfrm_address_t *saddr) | 23 | static inline unsigned int __xfrm6_daddr_saddr_hash(xfrm_address_t *daddr, xfrm_address_t *saddr) |
diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c index 0fc5ff66d1fa..fc91ad7ee26e 100644 --- a/net/xfrm/xfrm_ipcomp.c +++ b/net/xfrm/xfrm_ipcomp.c | |||
@@ -17,11 +17,11 @@ | |||
17 | 17 | ||
18 | #include <linux/crypto.h> | 18 | #include <linux/crypto.h> |
19 | #include <linux/err.h> | 19 | #include <linux/err.h> |
20 | #include <linux/gfp.h> | ||
21 | #include <linux/list.h> | 20 | #include <linux/list.h> |
22 | #include <linux/module.h> | 21 | #include <linux/module.h> |
23 | #include <linux/mutex.h> | 22 | #include <linux/mutex.h> |
24 | #include <linux/percpu.h> | 23 | #include <linux/percpu.h> |
24 | #include <linux/slab.h> | ||
25 | #include <linux/smp.h> | 25 | #include <linux/smp.h> |
26 | #include <linux/vmalloc.h> | 26 | #include <linux/vmalloc.h> |
27 | #include <net/ip.h> | 27 | #include <net/ip.h> |
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c index b9fe13138c07..6a329158bdfa 100644 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/netdevice.h> | 14 | #include <linux/netdevice.h> |
15 | #include <linux/netfilter.h> | 15 | #include <linux/netfilter.h> |
16 | #include <linux/skbuff.h> | 16 | #include <linux/skbuff.h> |
17 | #include <linux/slab.h> | ||
17 | #include <linux/spinlock.h> | 18 | #include <linux/spinlock.h> |
18 | #include <net/dst.h> | 19 | #include <net/dst.h> |
19 | #include <net/xfrm.h> | 20 | #include <net/xfrm.h> |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 843e066649cb..31f4ba43b48f 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -37,6 +37,8 @@ | |||
37 | DEFINE_MUTEX(xfrm_cfg_mutex); | 37 | DEFINE_MUTEX(xfrm_cfg_mutex); |
38 | EXPORT_SYMBOL(xfrm_cfg_mutex); | 38 | EXPORT_SYMBOL(xfrm_cfg_mutex); |
39 | 39 | ||
40 | static DEFINE_SPINLOCK(xfrm_policy_sk_bundle_lock); | ||
41 | static struct dst_entry *xfrm_policy_sk_bundles; | ||
40 | static DEFINE_RWLOCK(xfrm_policy_lock); | 42 | static DEFINE_RWLOCK(xfrm_policy_lock); |
41 | 43 | ||
42 | static DEFINE_RWLOCK(xfrm_policy_afinfo_lock); | 44 | static DEFINE_RWLOCK(xfrm_policy_afinfo_lock); |
@@ -44,12 +46,10 @@ static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO]; | |||
44 | 46 | ||
45 | static struct kmem_cache *xfrm_dst_cache __read_mostly; | 47 | static struct kmem_cache *xfrm_dst_cache __read_mostly; |
46 | 48 | ||
47 | static HLIST_HEAD(xfrm_policy_gc_list); | ||
48 | static DEFINE_SPINLOCK(xfrm_policy_gc_lock); | ||
49 | |||
50 | static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family); | 49 | static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family); |
51 | static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo); | 50 | static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo); |
52 | static void xfrm_init_pmtu(struct dst_entry *dst); | 51 | static void xfrm_init_pmtu(struct dst_entry *dst); |
52 | static int stale_bundle(struct dst_entry *dst); | ||
53 | 53 | ||
54 | static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, | 54 | static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, |
55 | int dir); | 55 | int dir); |
@@ -156,7 +156,7 @@ static void xfrm_policy_timer(unsigned long data) | |||
156 | 156 | ||
157 | read_lock(&xp->lock); | 157 | read_lock(&xp->lock); |
158 | 158 | ||
159 | if (xp->walk.dead) | 159 | if (unlikely(xp->walk.dead)) |
160 | goto out; | 160 | goto out; |
161 | 161 | ||
162 | dir = xfrm_policy_id2dir(xp->index); | 162 | dir = xfrm_policy_id2dir(xp->index); |
@@ -216,6 +216,35 @@ expired: | |||
216 | xfrm_pol_put(xp); | 216 | xfrm_pol_put(xp); |
217 | } | 217 | } |
218 | 218 | ||
219 | static struct flow_cache_object *xfrm_policy_flo_get(struct flow_cache_object *flo) | ||
220 | { | ||
221 | struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo); | ||
222 | |||
223 | if (unlikely(pol->walk.dead)) | ||
224 | flo = NULL; | ||
225 | else | ||
226 | xfrm_pol_hold(pol); | ||
227 | |||
228 | return flo; | ||
229 | } | ||
230 | |||
231 | static int xfrm_policy_flo_check(struct flow_cache_object *flo) | ||
232 | { | ||
233 | struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo); | ||
234 | |||
235 | return !pol->walk.dead; | ||
236 | } | ||
237 | |||
238 | static void xfrm_policy_flo_delete(struct flow_cache_object *flo) | ||
239 | { | ||
240 | xfrm_pol_put(container_of(flo, struct xfrm_policy, flo)); | ||
241 | } | ||
242 | |||
243 | static const struct flow_cache_ops xfrm_policy_fc_ops = { | ||
244 | .get = xfrm_policy_flo_get, | ||
245 | .check = xfrm_policy_flo_check, | ||
246 | .delete = xfrm_policy_flo_delete, | ||
247 | }; | ||
219 | 248 | ||
220 | /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2 | 249 | /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2 |
221 | * SPD calls. | 250 | * SPD calls. |
@@ -236,6 +265,7 @@ struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp) | |||
236 | atomic_set(&policy->refcnt, 1); | 265 | atomic_set(&policy->refcnt, 1); |
237 | setup_timer(&policy->timer, xfrm_policy_timer, | 266 | setup_timer(&policy->timer, xfrm_policy_timer, |
238 | (unsigned long)policy); | 267 | (unsigned long)policy); |
268 | policy->flo.ops = &xfrm_policy_fc_ops; | ||
239 | } | 269 | } |
240 | return policy; | 270 | return policy; |
241 | } | 271 | } |
@@ -247,8 +277,6 @@ void xfrm_policy_destroy(struct xfrm_policy *policy) | |||
247 | { | 277 | { |
248 | BUG_ON(!policy->walk.dead); | 278 | BUG_ON(!policy->walk.dead); |
249 | 279 | ||
250 | BUG_ON(policy->bundles); | ||
251 | |||
252 | if (del_timer(&policy->timer)) | 280 | if (del_timer(&policy->timer)) |
253 | BUG(); | 281 | BUG(); |
254 | 282 | ||
@@ -257,63 +285,20 @@ void xfrm_policy_destroy(struct xfrm_policy *policy) | |||
257 | } | 285 | } |
258 | EXPORT_SYMBOL(xfrm_policy_destroy); | 286 | EXPORT_SYMBOL(xfrm_policy_destroy); |
259 | 287 | ||
260 | static void xfrm_policy_gc_kill(struct xfrm_policy *policy) | ||
261 | { | ||
262 | struct dst_entry *dst; | ||
263 | |||
264 | while ((dst = policy->bundles) != NULL) { | ||
265 | policy->bundles = dst->next; | ||
266 | dst_free(dst); | ||
267 | } | ||
268 | |||
269 | if (del_timer(&policy->timer)) | ||
270 | atomic_dec(&policy->refcnt); | ||
271 | |||
272 | if (atomic_read(&policy->refcnt) > 1) | ||
273 | flow_cache_flush(); | ||
274 | |||
275 | xfrm_pol_put(policy); | ||
276 | } | ||
277 | |||
278 | static void xfrm_policy_gc_task(struct work_struct *work) | ||
279 | { | ||
280 | struct xfrm_policy *policy; | ||
281 | struct hlist_node *entry, *tmp; | ||
282 | struct hlist_head gc_list; | ||
283 | |||
284 | spin_lock_bh(&xfrm_policy_gc_lock); | ||
285 | gc_list.first = xfrm_policy_gc_list.first; | ||
286 | INIT_HLIST_HEAD(&xfrm_policy_gc_list); | ||
287 | spin_unlock_bh(&xfrm_policy_gc_lock); | ||
288 | |||
289 | hlist_for_each_entry_safe(policy, entry, tmp, &gc_list, bydst) | ||
290 | xfrm_policy_gc_kill(policy); | ||
291 | } | ||
292 | static DECLARE_WORK(xfrm_policy_gc_work, xfrm_policy_gc_task); | ||
293 | |||
294 | /* Rule must be locked. Release descentant resources, announce | 288 | /* Rule must be locked. Release descentant resources, announce |
295 | * entry dead. The rule must be unlinked from lists to the moment. | 289 | * entry dead. The rule must be unlinked from lists to the moment. |
296 | */ | 290 | */ |
297 | 291 | ||
298 | static void xfrm_policy_kill(struct xfrm_policy *policy) | 292 | static void xfrm_policy_kill(struct xfrm_policy *policy) |
299 | { | 293 | { |
300 | int dead; | ||
301 | |||
302 | write_lock_bh(&policy->lock); | ||
303 | dead = policy->walk.dead; | ||
304 | policy->walk.dead = 1; | 294 | policy->walk.dead = 1; |
305 | write_unlock_bh(&policy->lock); | ||
306 | 295 | ||
307 | if (unlikely(dead)) { | 296 | atomic_inc(&policy->genid); |
308 | WARN_ON(1); | ||
309 | return; | ||
310 | } | ||
311 | 297 | ||
312 | spin_lock_bh(&xfrm_policy_gc_lock); | 298 | if (del_timer(&policy->timer)) |
313 | hlist_add_head(&policy->bydst, &xfrm_policy_gc_list); | 299 | xfrm_pol_put(policy); |
314 | spin_unlock_bh(&xfrm_policy_gc_lock); | ||
315 | 300 | ||
316 | schedule_work(&xfrm_policy_gc_work); | 301 | xfrm_pol_put(policy); |
317 | } | 302 | } |
318 | 303 | ||
319 | static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024; | 304 | static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024; |
@@ -555,7 +540,6 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) | |||
555 | struct xfrm_policy *delpol; | 540 | struct xfrm_policy *delpol; |
556 | struct hlist_head *chain; | 541 | struct hlist_head *chain; |
557 | struct hlist_node *entry, *newpos; | 542 | struct hlist_node *entry, *newpos; |
558 | struct dst_entry *gc_list; | ||
559 | u32 mark = policy->mark.v & policy->mark.m; | 543 | u32 mark = policy->mark.v & policy->mark.m; |
560 | 544 | ||
561 | write_lock_bh(&xfrm_policy_lock); | 545 | write_lock_bh(&xfrm_policy_lock); |
@@ -605,34 +589,6 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) | |||
605 | else if (xfrm_bydst_should_resize(net, dir, NULL)) | 589 | else if (xfrm_bydst_should_resize(net, dir, NULL)) |
606 | schedule_work(&net->xfrm.policy_hash_work); | 590 | schedule_work(&net->xfrm.policy_hash_work); |
607 | 591 | ||
608 | read_lock_bh(&xfrm_policy_lock); | ||
609 | gc_list = NULL; | ||
610 | entry = &policy->bydst; | ||
611 | hlist_for_each_entry_continue(policy, entry, bydst) { | ||
612 | struct dst_entry *dst; | ||
613 | |||
614 | write_lock(&policy->lock); | ||
615 | dst = policy->bundles; | ||
616 | if (dst) { | ||
617 | struct dst_entry *tail = dst; | ||
618 | while (tail->next) | ||
619 | tail = tail->next; | ||
620 | tail->next = gc_list; | ||
621 | gc_list = dst; | ||
622 | |||
623 | policy->bundles = NULL; | ||
624 | } | ||
625 | write_unlock(&policy->lock); | ||
626 | } | ||
627 | read_unlock_bh(&xfrm_policy_lock); | ||
628 | |||
629 | while (gc_list) { | ||
630 | struct dst_entry *dst = gc_list; | ||
631 | |||
632 | gc_list = dst->next; | ||
633 | dst_free(dst); | ||
634 | } | ||
635 | |||
636 | return 0; | 592 | return 0; |
637 | } | 593 | } |
638 | EXPORT_SYMBOL(xfrm_policy_insert); | 594 | EXPORT_SYMBOL(xfrm_policy_insert); |
@@ -671,10 +627,8 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type, | |||
671 | } | 627 | } |
672 | write_unlock_bh(&xfrm_policy_lock); | 628 | write_unlock_bh(&xfrm_policy_lock); |
673 | 629 | ||
674 | if (ret && delete) { | 630 | if (ret && delete) |
675 | atomic_inc(&flow_cache_genid); | ||
676 | xfrm_policy_kill(ret); | 631 | xfrm_policy_kill(ret); |
677 | } | ||
678 | return ret; | 632 | return ret; |
679 | } | 633 | } |
680 | EXPORT_SYMBOL(xfrm_policy_bysel_ctx); | 634 | EXPORT_SYMBOL(xfrm_policy_bysel_ctx); |
@@ -713,10 +667,8 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type, | |||
713 | } | 667 | } |
714 | write_unlock_bh(&xfrm_policy_lock); | 668 | write_unlock_bh(&xfrm_policy_lock); |
715 | 669 | ||
716 | if (ret && delete) { | 670 | if (ret && delete) |
717 | atomic_inc(&flow_cache_genid); | ||
718 | xfrm_policy_kill(ret); | 671 | xfrm_policy_kill(ret); |
719 | } | ||
720 | return ret; | 672 | return ret; |
721 | } | 673 | } |
722 | EXPORT_SYMBOL(xfrm_policy_byid); | 674 | EXPORT_SYMBOL(xfrm_policy_byid); |
@@ -776,7 +728,6 @@ xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audi | |||
776 | int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info) | 728 | int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info) |
777 | { | 729 | { |
778 | int dir, err = 0, cnt = 0; | 730 | int dir, err = 0, cnt = 0; |
779 | struct xfrm_policy *dp; | ||
780 | 731 | ||
781 | write_lock_bh(&xfrm_policy_lock); | 732 | write_lock_bh(&xfrm_policy_lock); |
782 | 733 | ||
@@ -794,10 +745,9 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info) | |||
794 | &net->xfrm.policy_inexact[dir], bydst) { | 745 | &net->xfrm.policy_inexact[dir], bydst) { |
795 | if (pol->type != type) | 746 | if (pol->type != type) |
796 | continue; | 747 | continue; |
797 | dp = __xfrm_policy_unlink(pol, dir); | 748 | __xfrm_policy_unlink(pol, dir); |
798 | write_unlock_bh(&xfrm_policy_lock); | 749 | write_unlock_bh(&xfrm_policy_lock); |
799 | if (dp) | 750 | cnt++; |
800 | cnt++; | ||
801 | 751 | ||
802 | xfrm_audit_policy_delete(pol, 1, audit_info->loginuid, | 752 | xfrm_audit_policy_delete(pol, 1, audit_info->loginuid, |
803 | audit_info->sessionid, | 753 | audit_info->sessionid, |
@@ -816,10 +766,9 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info) | |||
816 | bydst) { | 766 | bydst) { |
817 | if (pol->type != type) | 767 | if (pol->type != type) |
818 | continue; | 768 | continue; |
819 | dp = __xfrm_policy_unlink(pol, dir); | 769 | __xfrm_policy_unlink(pol, dir); |
820 | write_unlock_bh(&xfrm_policy_lock); | 770 | write_unlock_bh(&xfrm_policy_lock); |
821 | if (dp) | 771 | cnt++; |
822 | cnt++; | ||
823 | 772 | ||
824 | xfrm_audit_policy_delete(pol, 1, | 773 | xfrm_audit_policy_delete(pol, 1, |
825 | audit_info->loginuid, | 774 | audit_info->loginuid, |
@@ -835,7 +784,6 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info) | |||
835 | } | 784 | } |
836 | if (!cnt) | 785 | if (!cnt) |
837 | err = -ESRCH; | 786 | err = -ESRCH; |
838 | atomic_inc(&flow_cache_genid); | ||
839 | out: | 787 | out: |
840 | write_unlock_bh(&xfrm_policy_lock); | 788 | write_unlock_bh(&xfrm_policy_lock); |
841 | return err; | 789 | return err; |
@@ -989,32 +937,37 @@ fail: | |||
989 | return ret; | 937 | return ret; |
990 | } | 938 | } |
991 | 939 | ||
992 | static int xfrm_policy_lookup(struct net *net, struct flowi *fl, u16 family, | 940 | static struct xfrm_policy * |
993 | u8 dir, void **objp, atomic_t **obj_refp) | 941 | __xfrm_policy_lookup(struct net *net, struct flowi *fl, u16 family, u8 dir) |
994 | { | 942 | { |
943 | #ifdef CONFIG_XFRM_SUB_POLICY | ||
995 | struct xfrm_policy *pol; | 944 | struct xfrm_policy *pol; |
996 | int err = 0; | ||
997 | 945 | ||
998 | #ifdef CONFIG_XFRM_SUB_POLICY | ||
999 | pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir); | 946 | pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir); |
1000 | if (IS_ERR(pol)) { | 947 | if (pol != NULL) |
1001 | err = PTR_ERR(pol); | 948 | return pol; |
1002 | pol = NULL; | ||
1003 | } | ||
1004 | if (pol || err) | ||
1005 | goto end; | ||
1006 | #endif | ||
1007 | pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir); | ||
1008 | if (IS_ERR(pol)) { | ||
1009 | err = PTR_ERR(pol); | ||
1010 | pol = NULL; | ||
1011 | } | ||
1012 | #ifdef CONFIG_XFRM_SUB_POLICY | ||
1013 | end: | ||
1014 | #endif | 949 | #endif |
1015 | if ((*objp = (void *) pol) != NULL) | 950 | return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir); |
1016 | *obj_refp = &pol->refcnt; | 951 | } |
1017 | return err; | 952 | |
953 | static struct flow_cache_object * | ||
954 | xfrm_policy_lookup(struct net *net, struct flowi *fl, u16 family, | ||
955 | u8 dir, struct flow_cache_object *old_obj, void *ctx) | ||
956 | { | ||
957 | struct xfrm_policy *pol; | ||
958 | |||
959 | if (old_obj) | ||
960 | xfrm_pol_put(container_of(old_obj, struct xfrm_policy, flo)); | ||
961 | |||
962 | pol = __xfrm_policy_lookup(net, fl, family, dir); | ||
963 | if (IS_ERR_OR_NULL(pol)) | ||
964 | return ERR_CAST(pol); | ||
965 | |||
966 | /* Resolver returns two references: | ||
967 | * one for cache and one for caller of flow_cache_lookup() */ | ||
968 | xfrm_pol_hold(pol); | ||
969 | |||
970 | return &pol->flo; | ||
1018 | } | 971 | } |
1019 | 972 | ||
1020 | static inline int policy_to_flow_dir(int dir) | 973 | static inline int policy_to_flow_dir(int dir) |
@@ -1104,8 +1057,6 @@ int xfrm_policy_delete(struct xfrm_policy *pol, int dir) | |||
1104 | pol = __xfrm_policy_unlink(pol, dir); | 1057 | pol = __xfrm_policy_unlink(pol, dir); |
1105 | write_unlock_bh(&xfrm_policy_lock); | 1058 | write_unlock_bh(&xfrm_policy_lock); |
1106 | if (pol) { | 1059 | if (pol) { |
1107 | if (dir < XFRM_POLICY_MAX) | ||
1108 | atomic_inc(&flow_cache_genid); | ||
1109 | xfrm_policy_kill(pol); | 1060 | xfrm_policy_kill(pol); |
1110 | return 0; | 1061 | return 0; |
1111 | } | 1062 | } |
@@ -1132,6 +1083,9 @@ int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol) | |||
1132 | __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir); | 1083 | __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir); |
1133 | } | 1084 | } |
1134 | if (old_pol) | 1085 | if (old_pol) |
1086 | /* Unlinking succeeds always. This is the only function | ||
1087 | * allowed to delete or replace socket policy. | ||
1088 | */ | ||
1135 | __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir); | 1089 | __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir); |
1136 | write_unlock_bh(&xfrm_policy_lock); | 1090 | write_unlock_bh(&xfrm_policy_lock); |
1137 | 1091 | ||
@@ -1300,18 +1254,6 @@ xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, struct flowi *fl, | |||
1300 | * still valid. | 1254 | * still valid. |
1301 | */ | 1255 | */ |
1302 | 1256 | ||
1303 | static struct dst_entry * | ||
1304 | xfrm_find_bundle(struct flowi *fl, struct xfrm_policy *policy, unsigned short family) | ||
1305 | { | ||
1306 | struct dst_entry *x; | ||
1307 | struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); | ||
1308 | if (unlikely(afinfo == NULL)) | ||
1309 | return ERR_PTR(-EINVAL); | ||
1310 | x = afinfo->find_bundle(fl, policy); | ||
1311 | xfrm_policy_put_afinfo(afinfo); | ||
1312 | return x; | ||
1313 | } | ||
1314 | |||
1315 | static inline int xfrm_get_tos(struct flowi *fl, int family) | 1257 | static inline int xfrm_get_tos(struct flowi *fl, int family) |
1316 | { | 1258 | { |
1317 | struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); | 1259 | struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); |
@@ -1327,6 +1269,54 @@ static inline int xfrm_get_tos(struct flowi *fl, int family) | |||
1327 | return tos; | 1269 | return tos; |
1328 | } | 1270 | } |
1329 | 1271 | ||
1272 | static struct flow_cache_object *xfrm_bundle_flo_get(struct flow_cache_object *flo) | ||
1273 | { | ||
1274 | struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo); | ||
1275 | struct dst_entry *dst = &xdst->u.dst; | ||
1276 | |||
1277 | if (xdst->route == NULL) { | ||
1278 | /* Dummy bundle - if it has xfrms we were not | ||
1279 | * able to build bundle as template resolution failed. | ||
1280 | * It means we need to try again resolving. */ | ||
1281 | if (xdst->num_xfrms > 0) | ||
1282 | return NULL; | ||
1283 | } else { | ||
1284 | /* Real bundle */ | ||
1285 | if (stale_bundle(dst)) | ||
1286 | return NULL; | ||
1287 | } | ||
1288 | |||
1289 | dst_hold(dst); | ||
1290 | return flo; | ||
1291 | } | ||
1292 | |||
1293 | static int xfrm_bundle_flo_check(struct flow_cache_object *flo) | ||
1294 | { | ||
1295 | struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo); | ||
1296 | struct dst_entry *dst = &xdst->u.dst; | ||
1297 | |||
1298 | if (!xdst->route) | ||
1299 | return 0; | ||
1300 | if (stale_bundle(dst)) | ||
1301 | return 0; | ||
1302 | |||
1303 | return 1; | ||
1304 | } | ||
1305 | |||
1306 | static void xfrm_bundle_flo_delete(struct flow_cache_object *flo) | ||
1307 | { | ||
1308 | struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo); | ||
1309 | struct dst_entry *dst = &xdst->u.dst; | ||
1310 | |||
1311 | dst_free(dst); | ||
1312 | } | ||
1313 | |||
1314 | static const struct flow_cache_ops xfrm_bundle_fc_ops = { | ||
1315 | .get = xfrm_bundle_flo_get, | ||
1316 | .check = xfrm_bundle_flo_check, | ||
1317 | .delete = xfrm_bundle_flo_delete, | ||
1318 | }; | ||
1319 | |||
1330 | static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family) | 1320 | static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family) |
1331 | { | 1321 | { |
1332 | struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); | 1322 | struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); |
@@ -1349,9 +1339,10 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family) | |||
1349 | BUG(); | 1339 | BUG(); |
1350 | } | 1340 | } |
1351 | xdst = dst_alloc(dst_ops) ?: ERR_PTR(-ENOBUFS); | 1341 | xdst = dst_alloc(dst_ops) ?: ERR_PTR(-ENOBUFS); |
1352 | |||
1353 | xfrm_policy_put_afinfo(afinfo); | 1342 | xfrm_policy_put_afinfo(afinfo); |
1354 | 1343 | ||
1344 | xdst->flo.ops = &xfrm_bundle_fc_ops; | ||
1345 | |||
1355 | return xdst; | 1346 | return xdst; |
1356 | } | 1347 | } |
1357 | 1348 | ||
@@ -1389,6 +1380,7 @@ static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, | |||
1389 | return err; | 1380 | return err; |
1390 | } | 1381 | } |
1391 | 1382 | ||
1383 | |||
1392 | /* Allocate chain of dst_entry's, attach known xfrm's, calculate | 1384 | /* Allocate chain of dst_entry's, attach known xfrm's, calculate |
1393 | * all the metrics... Shortly, bundle a bundle. | 1385 | * all the metrics... Shortly, bundle a bundle. |
1394 | */ | 1386 | */ |
@@ -1452,7 +1444,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy, | |||
1452 | dst_hold(dst); | 1444 | dst_hold(dst); |
1453 | 1445 | ||
1454 | dst1->xfrm = xfrm[i]; | 1446 | dst1->xfrm = xfrm[i]; |
1455 | xdst->genid = xfrm[i]->genid; | 1447 | xdst->xfrm_genid = xfrm[i]->genid; |
1456 | 1448 | ||
1457 | dst1->obsolete = -1; | 1449 | dst1->obsolete = -1; |
1458 | dst1->flags |= DST_HOST; | 1450 | dst1->flags |= DST_HOST; |
@@ -1545,7 +1537,186 @@ xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl) | |||
1545 | #endif | 1537 | #endif |
1546 | } | 1538 | } |
1547 | 1539 | ||
1548 | static int stale_bundle(struct dst_entry *dst); | 1540 | static int xfrm_expand_policies(struct flowi *fl, u16 family, |
1541 | struct xfrm_policy **pols, | ||
1542 | int *num_pols, int *num_xfrms) | ||
1543 | { | ||
1544 | int i; | ||
1545 | |||
1546 | if (*num_pols == 0 || !pols[0]) { | ||
1547 | *num_pols = 0; | ||
1548 | *num_xfrms = 0; | ||
1549 | return 0; | ||
1550 | } | ||
1551 | if (IS_ERR(pols[0])) | ||
1552 | return PTR_ERR(pols[0]); | ||
1553 | |||
1554 | *num_xfrms = pols[0]->xfrm_nr; | ||
1555 | |||
1556 | #ifdef CONFIG_XFRM_SUB_POLICY | ||
1557 | if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW && | ||
1558 | pols[0]->type != XFRM_POLICY_TYPE_MAIN) { | ||
1559 | pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]), | ||
1560 | XFRM_POLICY_TYPE_MAIN, | ||
1561 | fl, family, | ||
1562 | XFRM_POLICY_OUT); | ||
1563 | if (pols[1]) { | ||
1564 | if (IS_ERR(pols[1])) { | ||
1565 | xfrm_pols_put(pols, *num_pols); | ||
1566 | return PTR_ERR(pols[1]); | ||
1567 | } | ||
1568 | (*num_pols) ++; | ||
1569 | (*num_xfrms) += pols[1]->xfrm_nr; | ||
1570 | } | ||
1571 | } | ||
1572 | #endif | ||
1573 | for (i = 0; i < *num_pols; i++) { | ||
1574 | if (pols[i]->action != XFRM_POLICY_ALLOW) { | ||
1575 | *num_xfrms = -1; | ||
1576 | break; | ||
1577 | } | ||
1578 | } | ||
1579 | |||
1580 | return 0; | ||
1581 | |||
1582 | } | ||
1583 | |||
1584 | static struct xfrm_dst * | ||
1585 | xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols, | ||
1586 | struct flowi *fl, u16 family, | ||
1587 | struct dst_entry *dst_orig) | ||
1588 | { | ||
1589 | struct net *net = xp_net(pols[0]); | ||
1590 | struct xfrm_state *xfrm[XFRM_MAX_DEPTH]; | ||
1591 | struct dst_entry *dst; | ||
1592 | struct xfrm_dst *xdst; | ||
1593 | int err; | ||
1594 | |||
1595 | /* Try to instantiate a bundle */ | ||
1596 | err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family); | ||
1597 | if (err < 0) { | ||
1598 | if (err != -EAGAIN) | ||
1599 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); | ||
1600 | return ERR_PTR(err); | ||
1601 | } | ||
1602 | |||
1603 | dst = xfrm_bundle_create(pols[0], xfrm, err, fl, dst_orig); | ||
1604 | if (IS_ERR(dst)) { | ||
1605 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR); | ||
1606 | return ERR_CAST(dst); | ||
1607 | } | ||
1608 | |||
1609 | xdst = (struct xfrm_dst *)dst; | ||
1610 | xdst->num_xfrms = err; | ||
1611 | if (num_pols > 1) | ||
1612 | err = xfrm_dst_update_parent(dst, &pols[1]->selector); | ||
1613 | else | ||
1614 | err = xfrm_dst_update_origin(dst, fl); | ||
1615 | if (unlikely(err)) { | ||
1616 | dst_free(dst); | ||
1617 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR); | ||
1618 | return ERR_PTR(err); | ||
1619 | } | ||
1620 | |||
1621 | xdst->num_pols = num_pols; | ||
1622 | memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols); | ||
1623 | xdst->policy_genid = atomic_read(&pols[0]->genid); | ||
1624 | |||
1625 | return xdst; | ||
1626 | } | ||
1627 | |||
1628 | static struct flow_cache_object * | ||
1629 | xfrm_bundle_lookup(struct net *net, struct flowi *fl, u16 family, u8 dir, | ||
1630 | struct flow_cache_object *oldflo, void *ctx) | ||
1631 | { | ||
1632 | struct dst_entry *dst_orig = (struct dst_entry *)ctx; | ||
1633 | struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; | ||
1634 | struct xfrm_dst *xdst, *new_xdst; | ||
1635 | int num_pols = 0, num_xfrms = 0, i, err, pol_dead; | ||
1636 | |||
1637 | /* Check if the policies from old bundle are usable */ | ||
1638 | xdst = NULL; | ||
1639 | if (oldflo) { | ||
1640 | xdst = container_of(oldflo, struct xfrm_dst, flo); | ||
1641 | num_pols = xdst->num_pols; | ||
1642 | num_xfrms = xdst->num_xfrms; | ||
1643 | pol_dead = 0; | ||
1644 | for (i = 0; i < num_pols; i++) { | ||
1645 | pols[i] = xdst->pols[i]; | ||
1646 | pol_dead |= pols[i]->walk.dead; | ||
1647 | } | ||
1648 | if (pol_dead) { | ||
1649 | dst_free(&xdst->u.dst); | ||
1650 | xdst = NULL; | ||
1651 | num_pols = 0; | ||
1652 | num_xfrms = 0; | ||
1653 | oldflo = NULL; | ||
1654 | } | ||
1655 | } | ||
1656 | |||
1657 | /* Resolve policies to use if we couldn't get them from | ||
1658 | * previous cache entry */ | ||
1659 | if (xdst == NULL) { | ||
1660 | num_pols = 1; | ||
1661 | pols[0] = __xfrm_policy_lookup(net, fl, family, dir); | ||
1662 | err = xfrm_expand_policies(fl, family, pols, | ||
1663 | &num_pols, &num_xfrms); | ||
1664 | if (err < 0) | ||
1665 | goto inc_error; | ||
1666 | if (num_pols == 0) | ||
1667 | return NULL; | ||
1668 | if (num_xfrms <= 0) | ||
1669 | goto make_dummy_bundle; | ||
1670 | } | ||
1671 | |||
1672 | new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, dst_orig); | ||
1673 | if (IS_ERR(new_xdst)) { | ||
1674 | err = PTR_ERR(new_xdst); | ||
1675 | if (err != -EAGAIN) | ||
1676 | goto error; | ||
1677 | if (oldflo == NULL) | ||
1678 | goto make_dummy_bundle; | ||
1679 | dst_hold(&xdst->u.dst); | ||
1680 | return oldflo; | ||
1681 | } | ||
1682 | |||
1683 | /* Kill the previous bundle */ | ||
1684 | if (xdst) { | ||
1685 | /* The policies were stolen for newly generated bundle */ | ||
1686 | xdst->num_pols = 0; | ||
1687 | dst_free(&xdst->u.dst); | ||
1688 | } | ||
1689 | |||
1690 | /* Flow cache does not have reference, it dst_free()'s, | ||
1691 | * but we do need to return one reference for original caller */ | ||
1692 | dst_hold(&new_xdst->u.dst); | ||
1693 | return &new_xdst->flo; | ||
1694 | |||
1695 | make_dummy_bundle: | ||
1696 | /* We found policies, but there's no bundles to instantiate: | ||
1697 | * either because the policy blocks, has no transformations or | ||
1698 | * we could not build template (no xfrm_states).*/ | ||
1699 | xdst = xfrm_alloc_dst(net, family); | ||
1700 | if (IS_ERR(xdst)) { | ||
1701 | xfrm_pols_put(pols, num_pols); | ||
1702 | return ERR_CAST(xdst); | ||
1703 | } | ||
1704 | xdst->num_pols = num_pols; | ||
1705 | xdst->num_xfrms = num_xfrms; | ||
1706 | memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols); | ||
1707 | |||
1708 | dst_hold(&xdst->u.dst); | ||
1709 | return &xdst->flo; | ||
1710 | |||
1711 | inc_error: | ||
1712 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); | ||
1713 | error: | ||
1714 | if (xdst != NULL) | ||
1715 | dst_free(&xdst->u.dst); | ||
1716 | else | ||
1717 | xfrm_pols_put(pols, num_pols); | ||
1718 | return ERR_PTR(err); | ||
1719 | } | ||
1549 | 1720 | ||
1550 | /* Main function: finds/creates a bundle for given flow. | 1721 | /* Main function: finds/creates a bundle for given flow. |
1551 | * | 1722 | * |
@@ -1555,245 +1726,152 @@ static int stale_bundle(struct dst_entry *dst); | |||
1555 | int __xfrm_lookup(struct net *net, struct dst_entry **dst_p, struct flowi *fl, | 1726 | int __xfrm_lookup(struct net *net, struct dst_entry **dst_p, struct flowi *fl, |
1556 | struct sock *sk, int flags) | 1727 | struct sock *sk, int flags) |
1557 | { | 1728 | { |
1558 | struct xfrm_policy *policy; | ||
1559 | struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; | 1729 | struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; |
1560 | int npols; | 1730 | struct flow_cache_object *flo; |
1561 | int pol_dead; | 1731 | struct xfrm_dst *xdst; |
1562 | int xfrm_nr; | 1732 | struct dst_entry *dst, *dst_orig = *dst_p, *route; |
1563 | int pi; | 1733 | u16 family = dst_orig->ops->family; |
1564 | struct xfrm_state *xfrm[XFRM_MAX_DEPTH]; | ||
1565 | struct dst_entry *dst, *dst_orig = *dst_p; | ||
1566 | int nx = 0; | ||
1567 | int err; | ||
1568 | u32 genid; | ||
1569 | u16 family; | ||
1570 | u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT); | 1734 | u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT); |
1735 | int i, err, num_pols, num_xfrms = 0, drop_pols = 0; | ||
1571 | 1736 | ||
1572 | restart: | 1737 | restart: |
1573 | genid = atomic_read(&flow_cache_genid); | 1738 | dst = NULL; |
1574 | policy = NULL; | 1739 | xdst = NULL; |
1575 | for (pi = 0; pi < ARRAY_SIZE(pols); pi++) | 1740 | route = NULL; |
1576 | pols[pi] = NULL; | ||
1577 | npols = 0; | ||
1578 | pol_dead = 0; | ||
1579 | xfrm_nr = 0; | ||
1580 | 1741 | ||
1581 | if (sk && sk->sk_policy[XFRM_POLICY_OUT]) { | 1742 | if (sk && sk->sk_policy[XFRM_POLICY_OUT]) { |
1582 | policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl); | 1743 | num_pols = 1; |
1583 | err = PTR_ERR(policy); | 1744 | pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl); |
1584 | if (IS_ERR(policy)) { | 1745 | err = xfrm_expand_policies(fl, family, pols, |
1585 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); | 1746 | &num_pols, &num_xfrms); |
1747 | if (err < 0) | ||
1586 | goto dropdst; | 1748 | goto dropdst; |
1749 | |||
1750 | if (num_pols) { | ||
1751 | if (num_xfrms <= 0) { | ||
1752 | drop_pols = num_pols; | ||
1753 | goto no_transform; | ||
1754 | } | ||
1755 | |||
1756 | xdst = xfrm_resolve_and_create_bundle( | ||
1757 | pols, num_pols, fl, | ||
1758 | family, dst_orig); | ||
1759 | if (IS_ERR(xdst)) { | ||
1760 | xfrm_pols_put(pols, num_pols); | ||
1761 | err = PTR_ERR(xdst); | ||
1762 | goto dropdst; | ||
1763 | } | ||
1764 | |||
1765 | spin_lock_bh(&xfrm_policy_sk_bundle_lock); | ||
1766 | xdst->u.dst.next = xfrm_policy_sk_bundles; | ||
1767 | xfrm_policy_sk_bundles = &xdst->u.dst; | ||
1768 | spin_unlock_bh(&xfrm_policy_sk_bundle_lock); | ||
1769 | |||
1770 | route = xdst->route; | ||
1587 | } | 1771 | } |
1588 | } | 1772 | } |
1589 | 1773 | ||
1590 | if (!policy) { | 1774 | if (xdst == NULL) { |
1591 | /* To accelerate a bit... */ | 1775 | /* To accelerate a bit... */ |
1592 | if ((dst_orig->flags & DST_NOXFRM) || | 1776 | if ((dst_orig->flags & DST_NOXFRM) || |
1593 | !net->xfrm.policy_count[XFRM_POLICY_OUT]) | 1777 | !net->xfrm.policy_count[XFRM_POLICY_OUT]) |
1594 | goto nopol; | 1778 | goto nopol; |
1595 | 1779 | ||
1596 | policy = flow_cache_lookup(net, fl, dst_orig->ops->family, | 1780 | flo = flow_cache_lookup(net, fl, family, dir, |
1597 | dir, xfrm_policy_lookup); | 1781 | xfrm_bundle_lookup, dst_orig); |
1598 | err = PTR_ERR(policy); | 1782 | if (flo == NULL) |
1599 | if (IS_ERR(policy)) { | 1783 | goto nopol; |
1600 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); | 1784 | if (IS_ERR(flo)) { |
1785 | err = PTR_ERR(flo); | ||
1601 | goto dropdst; | 1786 | goto dropdst; |
1602 | } | 1787 | } |
1788 | xdst = container_of(flo, struct xfrm_dst, flo); | ||
1789 | |||
1790 | num_pols = xdst->num_pols; | ||
1791 | num_xfrms = xdst->num_xfrms; | ||
1792 | memcpy(pols, xdst->pols, sizeof(struct xfrm_policy*) * num_pols); | ||
1793 | route = xdst->route; | ||
1794 | } | ||
1795 | |||
1796 | dst = &xdst->u.dst; | ||
1797 | if (route == NULL && num_xfrms > 0) { | ||
1798 | /* The only case when xfrm_bundle_lookup() returns a | ||
1799 | * bundle with null route, is when the template could | ||
1800 | * not be resolved. It means policies are there, but | ||
1801 | * bundle could not be created, since we don't yet | ||
1802 | * have the xfrm_state's. We need to wait for KM to | ||
1803 | * negotiate new SA's or bail out with error.*/ | ||
1804 | if (net->xfrm.sysctl_larval_drop) { | ||
1805 | /* EREMOTE tells the caller to generate | ||
1806 | * a one-shot blackhole route. */ | ||
1807 | dst_release(dst); | ||
1808 | xfrm_pols_put(pols, num_pols); | ||
1809 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); | ||
1810 | return -EREMOTE; | ||
1811 | } | ||
1812 | if (flags & XFRM_LOOKUP_WAIT) { | ||
1813 | DECLARE_WAITQUEUE(wait, current); | ||
1814 | |||
1815 | add_wait_queue(&net->xfrm.km_waitq, &wait); | ||
1816 | set_current_state(TASK_INTERRUPTIBLE); | ||
1817 | schedule(); | ||
1818 | set_current_state(TASK_RUNNING); | ||
1819 | remove_wait_queue(&net->xfrm.km_waitq, &wait); | ||
1820 | |||
1821 | if (!signal_pending(current)) { | ||
1822 | dst_release(dst); | ||
1823 | goto restart; | ||
1824 | } | ||
1825 | |||
1826 | err = -ERESTART; | ||
1827 | } else | ||
1828 | err = -EAGAIN; | ||
1829 | |||
1830 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); | ||
1831 | goto error; | ||
1603 | } | 1832 | } |
1604 | 1833 | ||
1605 | if (!policy) | 1834 | no_transform: |
1835 | if (num_pols == 0) | ||
1606 | goto nopol; | 1836 | goto nopol; |
1607 | 1837 | ||
1608 | family = dst_orig->ops->family; | 1838 | if ((flags & XFRM_LOOKUP_ICMP) && |
1609 | pols[0] = policy; | 1839 | !(pols[0]->flags & XFRM_POLICY_ICMP)) { |
1610 | npols ++; | 1840 | err = -ENOENT; |
1611 | xfrm_nr += pols[0]->xfrm_nr; | ||
1612 | |||
1613 | err = -ENOENT; | ||
1614 | if ((flags & XFRM_LOOKUP_ICMP) && !(policy->flags & XFRM_POLICY_ICMP)) | ||
1615 | goto error; | 1841 | goto error; |
1842 | } | ||
1616 | 1843 | ||
1617 | policy->curlft.use_time = get_seconds(); | 1844 | for (i = 0; i < num_pols; i++) |
1845 | pols[i]->curlft.use_time = get_seconds(); | ||
1618 | 1846 | ||
1619 | switch (policy->action) { | 1847 | if (num_xfrms < 0) { |
1620 | default: | ||
1621 | case XFRM_POLICY_BLOCK: | ||
1622 | /* Prohibit the flow */ | 1848 | /* Prohibit the flow */ |
1623 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK); | 1849 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK); |
1624 | err = -EPERM; | 1850 | err = -EPERM; |
1625 | goto error; | 1851 | goto error; |
1626 | 1852 | } else if (num_xfrms > 0) { | |
1627 | case XFRM_POLICY_ALLOW: | 1853 | /* Flow transformed */ |
1628 | #ifndef CONFIG_XFRM_SUB_POLICY | 1854 | *dst_p = dst; |
1629 | if (policy->xfrm_nr == 0) { | 1855 | dst_release(dst_orig); |
1630 | /* Flow passes not transformed. */ | 1856 | } else { |
1631 | xfrm_pol_put(policy); | 1857 | /* Flow passes untransformed */ |
1632 | return 0; | 1858 | dst_release(dst); |
1633 | } | ||
1634 | #endif | ||
1635 | |||
1636 | /* Try to find matching bundle. | ||
1637 | * | ||
1638 | * LATER: help from flow cache. It is optional, this | ||
1639 | * is required only for output policy. | ||
1640 | */ | ||
1641 | dst = xfrm_find_bundle(fl, policy, family); | ||
1642 | if (IS_ERR(dst)) { | ||
1643 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR); | ||
1644 | err = PTR_ERR(dst); | ||
1645 | goto error; | ||
1646 | } | ||
1647 | |||
1648 | if (dst) | ||
1649 | break; | ||
1650 | |||
1651 | #ifdef CONFIG_XFRM_SUB_POLICY | ||
1652 | if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) { | ||
1653 | pols[1] = xfrm_policy_lookup_bytype(net, | ||
1654 | XFRM_POLICY_TYPE_MAIN, | ||
1655 | fl, family, | ||
1656 | XFRM_POLICY_OUT); | ||
1657 | if (pols[1]) { | ||
1658 | if (IS_ERR(pols[1])) { | ||
1659 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); | ||
1660 | err = PTR_ERR(pols[1]); | ||
1661 | goto error; | ||
1662 | } | ||
1663 | if (pols[1]->action == XFRM_POLICY_BLOCK) { | ||
1664 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK); | ||
1665 | err = -EPERM; | ||
1666 | goto error; | ||
1667 | } | ||
1668 | npols ++; | ||
1669 | xfrm_nr += pols[1]->xfrm_nr; | ||
1670 | } | ||
1671 | } | ||
1672 | |||
1673 | /* | ||
1674 | * Because neither flowi nor bundle information knows about | ||
1675 | * transformation template size. On more than one policy usage | ||
1676 | * we can realize whether all of them is bypass or not after | ||
1677 | * they are searched. See above not-transformed bypass | ||
1678 | * is surrounded by non-sub policy configuration, too. | ||
1679 | */ | ||
1680 | if (xfrm_nr == 0) { | ||
1681 | /* Flow passes not transformed. */ | ||
1682 | xfrm_pols_put(pols, npols); | ||
1683 | return 0; | ||
1684 | } | ||
1685 | |||
1686 | #endif | ||
1687 | nx = xfrm_tmpl_resolve(pols, npols, fl, xfrm, family); | ||
1688 | |||
1689 | if (unlikely(nx<0)) { | ||
1690 | err = nx; | ||
1691 | if (err == -EAGAIN && net->xfrm.sysctl_larval_drop) { | ||
1692 | /* EREMOTE tells the caller to generate | ||
1693 | * a one-shot blackhole route. | ||
1694 | */ | ||
1695 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); | ||
1696 | xfrm_pol_put(policy); | ||
1697 | return -EREMOTE; | ||
1698 | } | ||
1699 | if (err == -EAGAIN && (flags & XFRM_LOOKUP_WAIT)) { | ||
1700 | DECLARE_WAITQUEUE(wait, current); | ||
1701 | |||
1702 | add_wait_queue(&net->xfrm.km_waitq, &wait); | ||
1703 | set_current_state(TASK_INTERRUPTIBLE); | ||
1704 | schedule(); | ||
1705 | set_current_state(TASK_RUNNING); | ||
1706 | remove_wait_queue(&net->xfrm.km_waitq, &wait); | ||
1707 | |||
1708 | nx = xfrm_tmpl_resolve(pols, npols, fl, xfrm, family); | ||
1709 | |||
1710 | if (nx == -EAGAIN && signal_pending(current)) { | ||
1711 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); | ||
1712 | err = -ERESTART; | ||
1713 | goto error; | ||
1714 | } | ||
1715 | if (nx == -EAGAIN || | ||
1716 | genid != atomic_read(&flow_cache_genid)) { | ||
1717 | xfrm_pols_put(pols, npols); | ||
1718 | goto restart; | ||
1719 | } | ||
1720 | err = nx; | ||
1721 | } | ||
1722 | if (err < 0) { | ||
1723 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); | ||
1724 | goto error; | ||
1725 | } | ||
1726 | } | ||
1727 | if (nx == 0) { | ||
1728 | /* Flow passes not transformed. */ | ||
1729 | xfrm_pols_put(pols, npols); | ||
1730 | return 0; | ||
1731 | } | ||
1732 | |||
1733 | dst = xfrm_bundle_create(policy, xfrm, nx, fl, dst_orig); | ||
1734 | err = PTR_ERR(dst); | ||
1735 | if (IS_ERR(dst)) { | ||
1736 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR); | ||
1737 | goto error; | ||
1738 | } | ||
1739 | |||
1740 | for (pi = 0; pi < npols; pi++) { | ||
1741 | read_lock_bh(&pols[pi]->lock); | ||
1742 | pol_dead |= pols[pi]->walk.dead; | ||
1743 | read_unlock_bh(&pols[pi]->lock); | ||
1744 | } | ||
1745 | |||
1746 | write_lock_bh(&policy->lock); | ||
1747 | if (unlikely(pol_dead || stale_bundle(dst))) { | ||
1748 | /* Wow! While we worked on resolving, this | ||
1749 | * policy has gone. Retry. It is not paranoia, | ||
1750 | * we just cannot enlist new bundle to dead object. | ||
1751 | * We can't enlist stable bundles either. | ||
1752 | */ | ||
1753 | write_unlock_bh(&policy->lock); | ||
1754 | dst_free(dst); | ||
1755 | |||
1756 | if (pol_dead) | ||
1757 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLDEAD); | ||
1758 | else | ||
1759 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR); | ||
1760 | err = -EHOSTUNREACH; | ||
1761 | goto error; | ||
1762 | } | ||
1763 | |||
1764 | if (npols > 1) | ||
1765 | err = xfrm_dst_update_parent(dst, &pols[1]->selector); | ||
1766 | else | ||
1767 | err = xfrm_dst_update_origin(dst, fl); | ||
1768 | if (unlikely(err)) { | ||
1769 | write_unlock_bh(&policy->lock); | ||
1770 | dst_free(dst); | ||
1771 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR); | ||
1772 | goto error; | ||
1773 | } | ||
1774 | |||
1775 | dst->next = policy->bundles; | ||
1776 | policy->bundles = dst; | ||
1777 | dst_hold(dst); | ||
1778 | write_unlock_bh(&policy->lock); | ||
1779 | } | 1859 | } |
1780 | *dst_p = dst; | 1860 | ok: |
1781 | dst_release(dst_orig); | 1861 | xfrm_pols_put(pols, drop_pols); |
1782 | xfrm_pols_put(pols, npols); | ||
1783 | return 0; | 1862 | return 0; |
1784 | 1863 | ||
1864 | nopol: | ||
1865 | if (!(flags & XFRM_LOOKUP_ICMP)) | ||
1866 | goto ok; | ||
1867 | err = -ENOENT; | ||
1785 | error: | 1868 | error: |
1786 | xfrm_pols_put(pols, npols); | 1869 | dst_release(dst); |
1787 | dropdst: | 1870 | dropdst: |
1788 | dst_release(dst_orig); | 1871 | dst_release(dst_orig); |
1789 | *dst_p = NULL; | 1872 | *dst_p = NULL; |
1873 | xfrm_pols_put(pols, drop_pols); | ||
1790 | return err; | 1874 | return err; |
1791 | |||
1792 | nopol: | ||
1793 | err = -ENOENT; | ||
1794 | if (flags & XFRM_LOOKUP_ICMP) | ||
1795 | goto dropdst; | ||
1796 | return 0; | ||
1797 | } | 1875 | } |
1798 | EXPORT_SYMBOL(__xfrm_lookup); | 1876 | EXPORT_SYMBOL(__xfrm_lookup); |
1799 | 1877 | ||
@@ -1952,9 +2030,16 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, | |||
1952 | } | 2030 | } |
1953 | } | 2031 | } |
1954 | 2032 | ||
1955 | if (!pol) | 2033 | if (!pol) { |
1956 | pol = flow_cache_lookup(net, &fl, family, fl_dir, | 2034 | struct flow_cache_object *flo; |
1957 | xfrm_policy_lookup); | 2035 | |
2036 | flo = flow_cache_lookup(net, &fl, family, fl_dir, | ||
2037 | xfrm_policy_lookup, NULL); | ||
2038 | if (IS_ERR_OR_NULL(flo)) | ||
2039 | pol = ERR_CAST(flo); | ||
2040 | else | ||
2041 | pol = container_of(flo, struct xfrm_policy, flo); | ||
2042 | } | ||
1958 | 2043 | ||
1959 | if (IS_ERR(pol)) { | 2044 | if (IS_ERR(pol)) { |
1960 | XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); | 2045 | XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); |
@@ -2138,71 +2223,24 @@ static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst) | |||
2138 | return dst; | 2223 | return dst; |
2139 | } | 2224 | } |
2140 | 2225 | ||
2141 | static void prune_one_bundle(struct xfrm_policy *pol, int (*func)(struct dst_entry *), struct dst_entry **gc_list_p) | 2226 | static void __xfrm_garbage_collect(struct net *net) |
2142 | { | ||
2143 | struct dst_entry *dst, **dstp; | ||
2144 | |||
2145 | write_lock(&pol->lock); | ||
2146 | dstp = &pol->bundles; | ||
2147 | while ((dst=*dstp) != NULL) { | ||
2148 | if (func(dst)) { | ||
2149 | *dstp = dst->next; | ||
2150 | dst->next = *gc_list_p; | ||
2151 | *gc_list_p = dst; | ||
2152 | } else { | ||
2153 | dstp = &dst->next; | ||
2154 | } | ||
2155 | } | ||
2156 | write_unlock(&pol->lock); | ||
2157 | } | ||
2158 | |||
2159 | static void xfrm_prune_bundles(struct net *net, int (*func)(struct dst_entry *)) | ||
2160 | { | 2227 | { |
2161 | struct dst_entry *gc_list = NULL; | 2228 | struct dst_entry *head, *next; |
2162 | int dir; | ||
2163 | 2229 | ||
2164 | read_lock_bh(&xfrm_policy_lock); | 2230 | flow_cache_flush(); |
2165 | for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) { | ||
2166 | struct xfrm_policy *pol; | ||
2167 | struct hlist_node *entry; | ||
2168 | struct hlist_head *table; | ||
2169 | int i; | ||
2170 | 2231 | ||
2171 | hlist_for_each_entry(pol, entry, | 2232 | spin_lock_bh(&xfrm_policy_sk_bundle_lock); |
2172 | &net->xfrm.policy_inexact[dir], bydst) | 2233 | head = xfrm_policy_sk_bundles; |
2173 | prune_one_bundle(pol, func, &gc_list); | 2234 | xfrm_policy_sk_bundles = NULL; |
2235 | spin_unlock_bh(&xfrm_policy_sk_bundle_lock); | ||
2174 | 2236 | ||
2175 | table = net->xfrm.policy_bydst[dir].table; | 2237 | while (head) { |
2176 | for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) { | 2238 | next = head->next; |
2177 | hlist_for_each_entry(pol, entry, table + i, bydst) | 2239 | dst_free(head); |
2178 | prune_one_bundle(pol, func, &gc_list); | 2240 | head = next; |
2179 | } | ||
2180 | } | ||
2181 | read_unlock_bh(&xfrm_policy_lock); | ||
2182 | |||
2183 | while (gc_list) { | ||
2184 | struct dst_entry *dst = gc_list; | ||
2185 | gc_list = dst->next; | ||
2186 | dst_free(dst); | ||
2187 | } | 2241 | } |
2188 | } | 2242 | } |
2189 | 2243 | ||
2190 | static int unused_bundle(struct dst_entry *dst) | ||
2191 | { | ||
2192 | return !atomic_read(&dst->__refcnt); | ||
2193 | } | ||
2194 | |||
2195 | static void __xfrm_garbage_collect(struct net *net) | ||
2196 | { | ||
2197 | xfrm_prune_bundles(net, unused_bundle); | ||
2198 | } | ||
2199 | |||
2200 | static int xfrm_flush_bundles(struct net *net) | ||
2201 | { | ||
2202 | xfrm_prune_bundles(net, stale_bundle); | ||
2203 | return 0; | ||
2204 | } | ||
2205 | |||
2206 | static void xfrm_init_pmtu(struct dst_entry *dst) | 2244 | static void xfrm_init_pmtu(struct dst_entry *dst) |
2207 | { | 2245 | { |
2208 | do { | 2246 | do { |
@@ -2260,7 +2298,9 @@ int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first, | |||
2260 | return 0; | 2298 | return 0; |
2261 | if (dst->xfrm->km.state != XFRM_STATE_VALID) | 2299 | if (dst->xfrm->km.state != XFRM_STATE_VALID) |
2262 | return 0; | 2300 | return 0; |
2263 | if (xdst->genid != dst->xfrm->genid) | 2301 | if (xdst->xfrm_genid != dst->xfrm->genid) |
2302 | return 0; | ||
2303 | if (xdst->policy_genid != atomic_read(&xdst->pols[0]->genid)) | ||
2264 | return 0; | 2304 | return 0; |
2265 | 2305 | ||
2266 | if (strict && fl && | 2306 | if (strict && fl && |
@@ -2425,7 +2465,7 @@ static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void | |||
2425 | 2465 | ||
2426 | switch (event) { | 2466 | switch (event) { |
2427 | case NETDEV_DOWN: | 2467 | case NETDEV_DOWN: |
2428 | xfrm_flush_bundles(dev_net(dev)); | 2468 | __xfrm_garbage_collect(dev_net(dev)); |
2429 | } | 2469 | } |
2430 | return NOTIFY_DONE; | 2470 | return NOTIFY_DONE; |
2431 | } | 2471 | } |
@@ -2531,7 +2571,6 @@ static void xfrm_policy_fini(struct net *net) | |||
2531 | audit_info.sessionid = -1; | 2571 | audit_info.sessionid = -1; |
2532 | audit_info.secid = 0; | 2572 | audit_info.secid = 0; |
2533 | xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info); | 2573 | xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info); |
2534 | flush_work(&xfrm_policy_gc_work); | ||
2535 | 2574 | ||
2536 | WARN_ON(!list_empty(&net->xfrm.policy_all)); | 2575 | WARN_ON(!list_empty(&net->xfrm.policy_all)); |
2537 | 2576 | ||
@@ -2757,7 +2796,6 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol, | |||
2757 | struct xfrm_migrate *m, int num_migrate) | 2796 | struct xfrm_migrate *m, int num_migrate) |
2758 | { | 2797 | { |
2759 | struct xfrm_migrate *mp; | 2798 | struct xfrm_migrate *mp; |
2760 | struct dst_entry *dst; | ||
2761 | int i, j, n = 0; | 2799 | int i, j, n = 0; |
2762 | 2800 | ||
2763 | write_lock_bh(&pol->lock); | 2801 | write_lock_bh(&pol->lock); |
@@ -2782,10 +2820,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol, | |||
2782 | sizeof(pol->xfrm_vec[i].saddr)); | 2820 | sizeof(pol->xfrm_vec[i].saddr)); |
2783 | pol->xfrm_vec[i].encap_family = mp->new_family; | 2821 | pol->xfrm_vec[i].encap_family = mp->new_family; |
2784 | /* flush bundles */ | 2822 | /* flush bundles */ |
2785 | while ((dst = pol->bundles) != NULL) { | 2823 | atomic_inc(&pol->genid); |
2786 | pol->bundles = dst->next; | ||
2787 | dst_free(dst); | ||
2788 | } | ||
2789 | } | 2824 | } |
2790 | } | 2825 | } |
2791 | 2826 | ||
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 17d5b96f2fc8..5208b12fbfb4 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/audit.h> | 22 | #include <linux/audit.h> |
23 | #include <asm/uaccess.h> | 23 | #include <asm/uaccess.h> |
24 | #include <linux/ktime.h> | 24 | #include <linux/ktime.h> |
25 | #include <linux/slab.h> | ||
25 | #include <linux/interrupt.h> | 26 | #include <linux/interrupt.h> |
26 | #include <linux/kernel.h> | 27 | #include <linux/kernel.h> |
27 | 28 | ||
@@ -37,7 +38,6 @@ | |||
37 | static DEFINE_SPINLOCK(xfrm_state_lock); | 38 | static DEFINE_SPINLOCK(xfrm_state_lock); |
38 | 39 | ||
39 | static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024; | 40 | static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024; |
40 | static unsigned int xfrm_state_genid; | ||
41 | 41 | ||
42 | static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family); | 42 | static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family); |
43 | static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo); | 43 | static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo); |
@@ -923,8 +923,6 @@ static void __xfrm_state_insert(struct xfrm_state *x) | |||
923 | struct net *net = xs_net(x); | 923 | struct net *net = xs_net(x); |
924 | unsigned int h; | 924 | unsigned int h; |
925 | 925 | ||
926 | x->genid = ++xfrm_state_genid; | ||
927 | |||
928 | list_add(&x->km.all, &net->xfrm.state_all); | 926 | list_add(&x->km.all, &net->xfrm.state_all); |
929 | 927 | ||
930 | h = xfrm_dst_hash(net, &x->id.daddr, &x->props.saddr, | 928 | h = xfrm_dst_hash(net, &x->id.daddr, &x->props.saddr, |
@@ -970,7 +968,7 @@ static void __xfrm_state_bump_genids(struct xfrm_state *xnew) | |||
970 | (mark & x->mark.m) == x->mark.v && | 968 | (mark & x->mark.m) == x->mark.v && |
971 | !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) && | 969 | !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) && |
972 | !xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family)) | 970 | !xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family)) |
973 | x->genid = xfrm_state_genid; | 971 | x->genid++; |
974 | } | 972 | } |
975 | } | 973 | } |
976 | 974 | ||
diff --git a/net/xfrm/xfrm_sysctl.c b/net/xfrm/xfrm_sysctl.c index 2c4d6cdcba49..05640bc9594b 100644 --- a/net/xfrm/xfrm_sysctl.c +++ b/net/xfrm/xfrm_sysctl.c | |||
@@ -1,4 +1,5 @@ | |||
1 | #include <linux/sysctl.h> | 1 | #include <linux/sysctl.h> |
2 | #include <linux/slab.h> | ||
2 | #include <net/net_namespace.h> | 3 | #include <net/net_namespace.h> |
3 | #include <net/xfrm.h> | 4 | #include <net/xfrm.h> |
4 | 5 | ||
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 6106b72826d3..a267fbdda525 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c | |||
@@ -1741,6 +1741,10 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
1741 | if (err) | 1741 | if (err) |
1742 | return err; | 1742 | return err; |
1743 | 1743 | ||
1744 | err = verify_policy_dir(p->dir); | ||
1745 | if (err) | ||
1746 | return err; | ||
1747 | |||
1744 | if (p->index) | 1748 | if (p->index) |
1745 | xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, 0, &err); | 1749 | xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, 0, &err); |
1746 | else { | 1750 | else { |
@@ -1766,13 +1770,9 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
1766 | if (xp == NULL) | 1770 | if (xp == NULL) |
1767 | return -ENOENT; | 1771 | return -ENOENT; |
1768 | 1772 | ||
1769 | read_lock(&xp->lock); | 1773 | if (unlikely(xp->walk.dead)) |
1770 | if (xp->walk.dead) { | ||
1771 | read_unlock(&xp->lock); | ||
1772 | goto out; | 1774 | goto out; |
1773 | } | ||
1774 | 1775 | ||
1775 | read_unlock(&xp->lock); | ||
1776 | err = 0; | 1776 | err = 0; |
1777 | if (up->hard) { | 1777 | if (up->hard) { |
1778 | uid_t loginuid = NETLINK_CB(skb).loginuid; | 1778 | uid_t loginuid = NETLINK_CB(skb).loginuid; |