diff options
Diffstat (limited to 'net')
143 files changed, 15283 insertions, 2475 deletions
diff --git a/net/802/garp.c b/net/802/garp.c index 9ed7c0e7dc17..941f2a324d3a 100644 --- a/net/802/garp.c +++ b/net/802/garp.c | |||
@@ -576,7 +576,7 @@ int garp_init_applicant(struct net_device *dev, struct garp_application *appl) | |||
576 | if (!app) | 576 | if (!app) |
577 | goto err2; | 577 | goto err2; |
578 | 578 | ||
579 | err = dev_mc_add(dev, appl->proto.group_address, ETH_ALEN, 0); | 579 | err = dev_mc_add(dev, appl->proto.group_address); |
580 | if (err < 0) | 580 | if (err < 0) |
581 | goto err3; | 581 | goto err3; |
582 | 582 | ||
@@ -616,7 +616,7 @@ void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl | |||
616 | garp_pdu_queue(app); | 616 | garp_pdu_queue(app); |
617 | garp_queue_xmit(app); | 617 | garp_queue_xmit(app); |
618 | 618 | ||
619 | dev_mc_delete(dev, appl->proto.group_address, ETH_ALEN, 0); | 619 | dev_mc_del(dev, appl->proto.group_address); |
620 | kfree(app); | 620 | kfree(app); |
621 | garp_release_port(dev); | 621 | garp_release_port(dev); |
622 | } | 622 | } |
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 97da977c2a23..3c1c8c14e929 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c | |||
@@ -357,13 +357,13 @@ static void vlan_sync_address(struct net_device *dev, | |||
357 | * the new address */ | 357 | * the new address */ |
358 | if (compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) && | 358 | if (compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) && |
359 | !compare_ether_addr(vlandev->dev_addr, dev->dev_addr)) | 359 | !compare_ether_addr(vlandev->dev_addr, dev->dev_addr)) |
360 | dev_unicast_delete(dev, vlandev->dev_addr); | 360 | dev_uc_del(dev, vlandev->dev_addr); |
361 | 361 | ||
362 | /* vlan address was equal to the old address and is different from | 362 | /* vlan address was equal to the old address and is different from |
363 | * the new address */ | 363 | * the new address */ |
364 | if (!compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) && | 364 | if (!compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) && |
365 | compare_ether_addr(vlandev->dev_addr, dev->dev_addr)) | 365 | compare_ether_addr(vlandev->dev_addr, dev->dev_addr)) |
366 | dev_unicast_add(dev, vlandev->dev_addr); | 366 | dev_uc_add(dev, vlandev->dev_addr); |
367 | 367 | ||
368 | memcpy(vlan->real_dev_addr, dev->dev_addr, ETH_ALEN); | 368 | memcpy(vlan->real_dev_addr, dev->dev_addr, ETH_ALEN); |
369 | } | 369 | } |
@@ -533,6 +533,10 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, | |||
533 | } | 533 | } |
534 | unregister_netdevice_many(&list); | 534 | unregister_netdevice_many(&list); |
535 | break; | 535 | break; |
536 | |||
537 | case NETDEV_PRE_TYPE_CHANGE: | ||
538 | /* Forbid underlaying device to change its type. */ | ||
539 | return NOTIFY_BAD; | ||
536 | } | 540 | } |
537 | 541 | ||
538 | out: | 542 | out: |
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 29b6348c8d4d..b5249c5fd4d3 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
@@ -470,7 +470,7 @@ static int vlan_dev_open(struct net_device *dev) | |||
470 | return -ENETDOWN; | 470 | return -ENETDOWN; |
471 | 471 | ||
472 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) { | 472 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) { |
473 | err = dev_unicast_add(real_dev, dev->dev_addr); | 473 | err = dev_uc_add(real_dev, dev->dev_addr); |
474 | if (err < 0) | 474 | if (err < 0) |
475 | goto out; | 475 | goto out; |
476 | } | 476 | } |
@@ -499,7 +499,7 @@ clear_allmulti: | |||
499 | dev_set_allmulti(real_dev, -1); | 499 | dev_set_allmulti(real_dev, -1); |
500 | del_unicast: | 500 | del_unicast: |
501 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) | 501 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) |
502 | dev_unicast_delete(real_dev, dev->dev_addr); | 502 | dev_uc_del(real_dev, dev->dev_addr); |
503 | out: | 503 | out: |
504 | netif_carrier_off(dev); | 504 | netif_carrier_off(dev); |
505 | return err; | 505 | return err; |
@@ -514,14 +514,14 @@ static int vlan_dev_stop(struct net_device *dev) | |||
514 | vlan_gvrp_request_leave(dev); | 514 | vlan_gvrp_request_leave(dev); |
515 | 515 | ||
516 | dev_mc_unsync(real_dev, dev); | 516 | dev_mc_unsync(real_dev, dev); |
517 | dev_unicast_unsync(real_dev, dev); | 517 | dev_uc_unsync(real_dev, dev); |
518 | if (dev->flags & IFF_ALLMULTI) | 518 | if (dev->flags & IFF_ALLMULTI) |
519 | dev_set_allmulti(real_dev, -1); | 519 | dev_set_allmulti(real_dev, -1); |
520 | if (dev->flags & IFF_PROMISC) | 520 | if (dev->flags & IFF_PROMISC) |
521 | dev_set_promiscuity(real_dev, -1); | 521 | dev_set_promiscuity(real_dev, -1); |
522 | 522 | ||
523 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) | 523 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) |
524 | dev_unicast_delete(real_dev, dev->dev_addr); | 524 | dev_uc_del(real_dev, dev->dev_addr); |
525 | 525 | ||
526 | netif_carrier_off(dev); | 526 | netif_carrier_off(dev); |
527 | return 0; | 527 | return 0; |
@@ -540,13 +540,13 @@ static int vlan_dev_set_mac_address(struct net_device *dev, void *p) | |||
540 | goto out; | 540 | goto out; |
541 | 541 | ||
542 | if (compare_ether_addr(addr->sa_data, real_dev->dev_addr)) { | 542 | if (compare_ether_addr(addr->sa_data, real_dev->dev_addr)) { |
543 | err = dev_unicast_add(real_dev, addr->sa_data); | 543 | err = dev_uc_add(real_dev, addr->sa_data); |
544 | if (err < 0) | 544 | if (err < 0) |
545 | return err; | 545 | return err; |
546 | } | 546 | } |
547 | 547 | ||
548 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) | 548 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) |
549 | dev_unicast_delete(real_dev, dev->dev_addr); | 549 | dev_uc_del(real_dev, dev->dev_addr); |
550 | 550 | ||
551 | out: | 551 | out: |
552 | memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); | 552 | memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); |
@@ -663,7 +663,7 @@ static void vlan_dev_change_rx_flags(struct net_device *dev, int change) | |||
663 | static void vlan_dev_set_rx_mode(struct net_device *vlan_dev) | 663 | static void vlan_dev_set_rx_mode(struct net_device *vlan_dev) |
664 | { | 664 | { |
665 | dev_mc_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev); | 665 | dev_mc_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev); |
666 | dev_unicast_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev); | 666 | dev_uc_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev); |
667 | } | 667 | } |
668 | 668 | ||
669 | /* | 669 | /* |
diff --git a/net/Kconfig b/net/Kconfig index 041c35edb763..0d68b40fc0e6 100644 --- a/net/Kconfig +++ b/net/Kconfig | |||
@@ -186,6 +186,7 @@ source "net/sctp/Kconfig" | |||
186 | source "net/rds/Kconfig" | 186 | source "net/rds/Kconfig" |
187 | source "net/tipc/Kconfig" | 187 | source "net/tipc/Kconfig" |
188 | source "net/atm/Kconfig" | 188 | source "net/atm/Kconfig" |
189 | source "net/l2tp/Kconfig" | ||
189 | source "net/802/Kconfig" | 190 | source "net/802/Kconfig" |
190 | source "net/bridge/Kconfig" | 191 | source "net/bridge/Kconfig" |
191 | source "net/dsa/Kconfig" | 192 | source "net/dsa/Kconfig" |
@@ -203,6 +204,11 @@ source "net/ieee802154/Kconfig" | |||
203 | source "net/sched/Kconfig" | 204 | source "net/sched/Kconfig" |
204 | source "net/dcb/Kconfig" | 205 | source "net/dcb/Kconfig" |
205 | 206 | ||
207 | config RPS | ||
208 | boolean | ||
209 | depends on SMP && SYSFS | ||
210 | default y | ||
211 | |||
206 | menu "Network testing" | 212 | menu "Network testing" |
207 | 213 | ||
208 | config NET_PKTGEN | 214 | config NET_PKTGEN |
@@ -275,5 +281,7 @@ source "net/wimax/Kconfig" | |||
275 | 281 | ||
276 | source "net/rfkill/Kconfig" | 282 | source "net/rfkill/Kconfig" |
277 | source "net/9p/Kconfig" | 283 | source "net/9p/Kconfig" |
284 | source "net/caif/Kconfig" | ||
285 | |||
278 | 286 | ||
279 | endif # if NET | 287 | endif # if NET |
diff --git a/net/Makefile b/net/Makefile index 1542e7268a7b..cb7bdc1210cb 100644 --- a/net/Makefile +++ b/net/Makefile | |||
@@ -40,6 +40,7 @@ obj-$(CONFIG_BT) += bluetooth/ | |||
40 | obj-$(CONFIG_SUNRPC) += sunrpc/ | 40 | obj-$(CONFIG_SUNRPC) += sunrpc/ |
41 | obj-$(CONFIG_AF_RXRPC) += rxrpc/ | 41 | obj-$(CONFIG_AF_RXRPC) += rxrpc/ |
42 | obj-$(CONFIG_ATM) += atm/ | 42 | obj-$(CONFIG_ATM) += atm/ |
43 | obj-$(CONFIG_L2TP) += l2tp/ | ||
43 | obj-$(CONFIG_DECNET) += decnet/ | 44 | obj-$(CONFIG_DECNET) += decnet/ |
44 | obj-$(CONFIG_ECONET) += econet/ | 45 | obj-$(CONFIG_ECONET) += econet/ |
45 | obj-$(CONFIG_PHONET) += phonet/ | 46 | obj-$(CONFIG_PHONET) += phonet/ |
@@ -56,6 +57,7 @@ obj-$(CONFIG_NETLABEL) += netlabel/ | |||
56 | obj-$(CONFIG_IUCV) += iucv/ | 57 | obj-$(CONFIG_IUCV) += iucv/ |
57 | obj-$(CONFIG_RFKILL) += rfkill/ | 58 | obj-$(CONFIG_RFKILL) += rfkill/ |
58 | obj-$(CONFIG_NET_9P) += 9p/ | 59 | obj-$(CONFIG_NET_9P) += 9p/ |
60 | obj-$(CONFIG_CAIF) += caif/ | ||
59 | ifneq ($(CONFIG_DCB),) | 61 | ifneq ($(CONFIG_DCB),) |
60 | obj-y += dcb/ | 62 | obj-y += dcb/ |
61 | endif | 63 | endif |
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index 7b02967fbbe7..c410b93fda2e 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c | |||
@@ -782,7 +782,7 @@ static int atif_ioctl(int cmd, void __user *arg) | |||
782 | atrtr_create(&rtdef, dev); | 782 | atrtr_create(&rtdef, dev); |
783 | } | 783 | } |
784 | } | 784 | } |
785 | dev_mc_add(dev, aarp_mcast, 6, 1); | 785 | dev_mc_add_global(dev, aarp_mcast); |
786 | return 0; | 786 | return 0; |
787 | 787 | ||
788 | case SIOCGIFADDR: | 788 | case SIOCGIFADDR: |
diff --git a/net/atm/proc.c b/net/atm/proc.c index 696e218436e5..6262aeae398e 100644 --- a/net/atm/proc.c +++ b/net/atm/proc.c | |||
@@ -407,7 +407,6 @@ EXPORT_SYMBOL(atm_proc_root); | |||
407 | 407 | ||
408 | int atm_proc_dev_register(struct atm_dev *dev) | 408 | int atm_proc_dev_register(struct atm_dev *dev) |
409 | { | 409 | { |
410 | int digits, num; | ||
411 | int error; | 410 | int error; |
412 | 411 | ||
413 | /* No proc info */ | 412 | /* No proc info */ |
@@ -415,16 +414,9 @@ int atm_proc_dev_register(struct atm_dev *dev) | |||
415 | return 0; | 414 | return 0; |
416 | 415 | ||
417 | error = -ENOMEM; | 416 | error = -ENOMEM; |
418 | digits = 0; | 417 | dev->proc_name = kasprintf(GFP_KERNEL, "%s:%d", dev->type, dev->number); |
419 | for (num = dev->number; num; num /= 10) | ||
420 | digits++; | ||
421 | if (!digits) | ||
422 | digits++; | ||
423 | |||
424 | dev->proc_name = kmalloc(strlen(dev->type) + digits + 2, GFP_KERNEL); | ||
425 | if (!dev->proc_name) | 418 | if (!dev->proc_name) |
426 | goto err_out; | 419 | goto err_out; |
427 | sprintf(dev->proc_name, "%s:%d", dev->type, dev->number); | ||
428 | 420 | ||
429 | dev->proc_entry = proc_create_data(dev->proc_name, 0, atm_proc_root, | 421 | dev->proc_entry = proc_create_data(dev->proc_name, 0, atm_proc_root, |
430 | &proc_atm_dev_ops, dev); | 422 | &proc_atm_dev_ops, dev); |
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c index 5643a2391e76..d48b33f4d4ba 100644 --- a/net/bluetooth/bnep/netdev.c +++ b/net/bluetooth/bnep/netdev.c | |||
@@ -88,7 +88,7 @@ static void bnep_net_set_mc_list(struct net_device *dev) | |||
88 | memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN); | 88 | memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN); |
89 | r->len = htons(ETH_ALEN * 2); | 89 | r->len = htons(ETH_ALEN * 2); |
90 | } else { | 90 | } else { |
91 | struct dev_mc_list *dmi = dev->mc_list; | 91 | struct netdev_hw_addr *ha; |
92 | int i, len = skb->len; | 92 | int i, len = skb->len; |
93 | 93 | ||
94 | if (dev->flags & IFF_BROADCAST) { | 94 | if (dev->flags & IFF_BROADCAST) { |
@@ -98,12 +98,12 @@ static void bnep_net_set_mc_list(struct net_device *dev) | |||
98 | 98 | ||
99 | /* FIXME: We should group addresses here. */ | 99 | /* FIXME: We should group addresses here. */ |
100 | 100 | ||
101 | for (i = 0; | 101 | i = 0; |
102 | i < netdev_mc_count(dev) && i < BNEP_MAX_MULTICAST_FILTERS; | 102 | netdev_for_each_mc_addr(ha, dev) { |
103 | i++) { | 103 | if (i == BNEP_MAX_MULTICAST_FILTERS) |
104 | memcpy(__skb_put(skb, ETH_ALEN), dmi->dmi_addr, ETH_ALEN); | 104 | break; |
105 | memcpy(__skb_put(skb, ETH_ALEN), dmi->dmi_addr, ETH_ALEN); | 105 | memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN); |
106 | dmi = dmi->next; | 106 | memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN); |
107 | } | 107 | } |
108 | r->len = htons(skb->len - len); | 108 | r->len = htons(skb->len - len); |
109 | } | 109 | } |
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 90a9024e5c1e..5b8a6e73b02f 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c | |||
@@ -26,11 +26,12 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) | |||
26 | const unsigned char *dest = skb->data; | 26 | const unsigned char *dest = skb->data; |
27 | struct net_bridge_fdb_entry *dst; | 27 | struct net_bridge_fdb_entry *dst; |
28 | struct net_bridge_mdb_entry *mdst; | 28 | struct net_bridge_mdb_entry *mdst; |
29 | struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats); | ||
29 | 30 | ||
30 | BR_INPUT_SKB_CB(skb)->brdev = dev; | 31 | brstats->tx_packets++; |
32 | brstats->tx_bytes += skb->len; | ||
31 | 33 | ||
32 | dev->stats.tx_packets++; | 34 | BR_INPUT_SKB_CB(skb)->brdev = dev; |
33 | dev->stats.tx_bytes += skb->len; | ||
34 | 35 | ||
35 | skb_reset_mac_header(skb); | 36 | skb_reset_mac_header(skb); |
36 | skb_pull(skb, ETH_HLEN); | 37 | skb_pull(skb, ETH_HLEN); |
@@ -81,6 +82,31 @@ static int br_dev_stop(struct net_device *dev) | |||
81 | return 0; | 82 | return 0; |
82 | } | 83 | } |
83 | 84 | ||
85 | static struct net_device_stats *br_get_stats(struct net_device *dev) | ||
86 | { | ||
87 | struct net_bridge *br = netdev_priv(dev); | ||
88 | struct net_device_stats *stats = &dev->stats; | ||
89 | struct br_cpu_netstats sum = { 0 }; | ||
90 | unsigned int cpu; | ||
91 | |||
92 | for_each_possible_cpu(cpu) { | ||
93 | const struct br_cpu_netstats *bstats | ||
94 | = per_cpu_ptr(br->stats, cpu); | ||
95 | |||
96 | sum.tx_bytes += bstats->tx_bytes; | ||
97 | sum.tx_packets += bstats->tx_packets; | ||
98 | sum.rx_bytes += bstats->rx_bytes; | ||
99 | sum.rx_packets += bstats->rx_packets; | ||
100 | } | ||
101 | |||
102 | stats->tx_bytes = sum.tx_bytes; | ||
103 | stats->tx_packets = sum.tx_packets; | ||
104 | stats->rx_bytes = sum.rx_bytes; | ||
105 | stats->rx_packets = sum.rx_packets; | ||
106 | |||
107 | return stats; | ||
108 | } | ||
109 | |||
84 | static int br_change_mtu(struct net_device *dev, int new_mtu) | 110 | static int br_change_mtu(struct net_device *dev, int new_mtu) |
85 | { | 111 | { |
86 | struct net_bridge *br = netdev_priv(dev); | 112 | struct net_bridge *br = netdev_priv(dev); |
@@ -180,19 +206,28 @@ static const struct net_device_ops br_netdev_ops = { | |||
180 | .ndo_open = br_dev_open, | 206 | .ndo_open = br_dev_open, |
181 | .ndo_stop = br_dev_stop, | 207 | .ndo_stop = br_dev_stop, |
182 | .ndo_start_xmit = br_dev_xmit, | 208 | .ndo_start_xmit = br_dev_xmit, |
209 | .ndo_get_stats = br_get_stats, | ||
183 | .ndo_set_mac_address = br_set_mac_address, | 210 | .ndo_set_mac_address = br_set_mac_address, |
184 | .ndo_set_multicast_list = br_dev_set_multicast_list, | 211 | .ndo_set_multicast_list = br_dev_set_multicast_list, |
185 | .ndo_change_mtu = br_change_mtu, | 212 | .ndo_change_mtu = br_change_mtu, |
186 | .ndo_do_ioctl = br_dev_ioctl, | 213 | .ndo_do_ioctl = br_dev_ioctl, |
187 | }; | 214 | }; |
188 | 215 | ||
216 | static void br_dev_free(struct net_device *dev) | ||
217 | { | ||
218 | struct net_bridge *br = netdev_priv(dev); | ||
219 | |||
220 | free_percpu(br->stats); | ||
221 | free_netdev(dev); | ||
222 | } | ||
223 | |||
189 | void br_dev_setup(struct net_device *dev) | 224 | void br_dev_setup(struct net_device *dev) |
190 | { | 225 | { |
191 | random_ether_addr(dev->dev_addr); | 226 | random_ether_addr(dev->dev_addr); |
192 | ether_setup(dev); | 227 | ether_setup(dev); |
193 | 228 | ||
194 | dev->netdev_ops = &br_netdev_ops; | 229 | dev->netdev_ops = &br_netdev_ops; |
195 | dev->destructor = free_netdev; | 230 | dev->destructor = br_dev_free; |
196 | SET_ETHTOOL_OPS(dev, &br_ethtool_ops); | 231 | SET_ETHTOOL_OPS(dev, &br_ethtool_ops); |
197 | dev->tx_queue_len = 0; | 232 | dev->tx_queue_len = 0; |
198 | dev->priv_flags = IFF_EBRIDGE; | 233 | dev->priv_flags = IFF_EBRIDGE; |
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index 0b6b1f2ff7ac..521439333316 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c | |||
@@ -186,6 +186,12 @@ static struct net_device *new_bridge_dev(struct net *net, const char *name) | |||
186 | br = netdev_priv(dev); | 186 | br = netdev_priv(dev); |
187 | br->dev = dev; | 187 | br->dev = dev; |
188 | 188 | ||
189 | br->stats = alloc_percpu(struct br_cpu_netstats); | ||
190 | if (!br->stats) { | ||
191 | free_netdev(dev); | ||
192 | return NULL; | ||
193 | } | ||
194 | |||
189 | spin_lock_init(&br->lock); | 195 | spin_lock_init(&br->lock); |
190 | INIT_LIST_HEAD(&br->port_list); | 196 | INIT_LIST_HEAD(&br->port_list); |
191 | spin_lock_init(&br->hash_lock); | 197 | spin_lock_init(&br->hash_lock); |
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index a82dde2d2ead..e7f4c1d02f57 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c | |||
@@ -24,9 +24,11 @@ const u8 br_group_address[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; | |||
24 | static int br_pass_frame_up(struct sk_buff *skb) | 24 | static int br_pass_frame_up(struct sk_buff *skb) |
25 | { | 25 | { |
26 | struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev; | 26 | struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev; |
27 | struct net_bridge *br = netdev_priv(brdev); | ||
28 | struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats); | ||
27 | 29 | ||
28 | brdev->stats.rx_packets++; | 30 | brstats->rx_packets++; |
29 | brdev->stats.rx_bytes += skb->len; | 31 | brstats->rx_bytes += skb->len; |
30 | 32 | ||
31 | indev = skb->dev; | 33 | indev = skb->dev; |
32 | skb->dev = brdev; | 34 | skb->dev = brdev; |
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index f29ada827a6a..3fe86ffc069c 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c | |||
@@ -1003,8 +1003,6 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br, | |||
1003 | if (!pskb_may_pull(skb2, sizeof(*ih))) | 1003 | if (!pskb_may_pull(skb2, sizeof(*ih))) |
1004 | goto out; | 1004 | goto out; |
1005 | 1005 | ||
1006 | iph = ip_hdr(skb2); | ||
1007 | |||
1008 | switch (skb2->ip_summed) { | 1006 | switch (skb2->ip_summed) { |
1009 | case CHECKSUM_COMPLETE: | 1007 | case CHECKSUM_COMPLETE: |
1010 | if (!csum_fold(skb2->csum)) | 1008 | if (!csum_fold(skb2->csum)) |
diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c index 763a3ec292e5..1413b72acc7f 100644 --- a/net/bridge/br_notify.c +++ b/net/bridge/br_notify.c | |||
@@ -82,6 +82,10 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v | |||
82 | case NETDEV_UNREGISTER: | 82 | case NETDEV_UNREGISTER: |
83 | br_del_if(br, dev); | 83 | br_del_if(br, dev); |
84 | break; | 84 | break; |
85 | |||
86 | case NETDEV_PRE_TYPE_CHANGE: | ||
87 | /* Forbid underlaying device to change its type. */ | ||
88 | return NOTIFY_BAD; | ||
85 | } | 89 | } |
86 | 90 | ||
87 | /* Events that may cause spanning tree to refresh */ | 91 | /* Events that may cause spanning tree to refresh */ |
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 846d7d1e2075..791d4ab0fd4d 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
@@ -135,6 +135,14 @@ struct net_bridge | |||
135 | spinlock_t lock; | 135 | spinlock_t lock; |
136 | struct list_head port_list; | 136 | struct list_head port_list; |
137 | struct net_device *dev; | 137 | struct net_device *dev; |
138 | |||
139 | struct br_cpu_netstats __percpu { | ||
140 | unsigned long rx_packets; | ||
141 | unsigned long rx_bytes; | ||
142 | unsigned long tx_packets; | ||
143 | unsigned long tx_bytes; | ||
144 | } *stats; | ||
145 | |||
138 | spinlock_t hash_lock; | 146 | spinlock_t hash_lock; |
139 | struct hlist_head hash[BR_HASH_SIZE]; | 147 | struct hlist_head hash[BR_HASH_SIZE]; |
140 | unsigned long feature_mask; | 148 | unsigned long feature_mask; |
diff --git a/net/caif/Kconfig b/net/caif/Kconfig new file mode 100644 index 000000000000..cd1daf6008bd --- /dev/null +++ b/net/caif/Kconfig | |||
@@ -0,0 +1,48 @@ | |||
1 | # | ||
2 | # CAIF net configurations | ||
3 | # | ||
4 | |||
5 | #menu "CAIF Support" | ||
6 | comment "CAIF Support" | ||
7 | menuconfig CAIF | ||
8 | tristate "Enable CAIF support" | ||
9 | select CRC_CCITT | ||
10 | default n | ||
11 | ---help--- | ||
12 | The "Communication CPU to Application CPU Interface" (CAIF) is a packet | ||
13 | based connection-oriented MUX protocol developed by ST-Ericsson for use | ||
14 | with its modems. It is accessed from user space as sockets (PF_CAIF). | ||
15 | |||
16 | Say Y (or M) here if you build for a phone product (e.g. Android or | ||
17 | MeeGo ) that uses CAIF as transport, if unsure say N. | ||
18 | |||
19 | If you select to build it as module then CAIF_NETDEV also needs to be | ||
20 | built as modules. You will also need to say yes to any CAIF physical | ||
21 | devices that your platform requires. | ||
22 | |||
23 | See Documentation/networking/caif for a further explanation on how to | ||
24 | use and configure CAIF. | ||
25 | |||
26 | if CAIF | ||
27 | |||
28 | config CAIF_DEBUG | ||
29 | bool "Enable Debug" | ||
30 | default n | ||
31 | --- help --- | ||
32 | Enable the inclusion of debug code in the CAIF stack. | ||
33 | Be aware that doing this will impact performance. | ||
34 | If unsure say N. | ||
35 | |||
36 | |||
37 | config CAIF_NETDEV | ||
38 | tristate "CAIF GPRS Network device" | ||
39 | default CAIF | ||
40 | ---help--- | ||
41 | Say Y if you will be using a CAIF based GPRS network device. | ||
42 | This can be either built-in or a loadable module, | ||
43 | If you select to build it as a built-in then the main CAIF device must | ||
44 | also be a built-in. | ||
45 | If unsure say Y. | ||
46 | |||
47 | endif | ||
48 | #endmenu | ||
diff --git a/net/caif/Makefile b/net/caif/Makefile new file mode 100644 index 000000000000..34852af2595e --- /dev/null +++ b/net/caif/Makefile | |||
@@ -0,0 +1,26 @@ | |||
1 | ifeq ($(CONFIG_CAIF_DEBUG),1) | ||
2 | CAIF_DBG_FLAGS := -DDEBUG | ||
3 | endif | ||
4 | |||
5 | ccflags-y := $(CAIF_FLAGS) $(CAIF_DBG_FLAGS) | ||
6 | |||
7 | caif-objs := caif_dev.o \ | ||
8 | cfcnfg.o cfmuxl.o cfctrl.o \ | ||
9 | cffrml.o cfveil.o cfdbgl.o\ | ||
10 | cfserl.o cfdgml.o \ | ||
11 | cfrfml.o cfvidl.o cfutill.o \ | ||
12 | cfsrvl.o cfpkt_skbuff.o caif_config_util.o | ||
13 | clean-dirs:= .tmp_versions | ||
14 | |||
15 | clean-files:= \ | ||
16 | Module.symvers \ | ||
17 | modules.order \ | ||
18 | *.cmd \ | ||
19 | *.o \ | ||
20 | *~ | ||
21 | |||
22 | obj-$(CONFIG_CAIF) += caif.o | ||
23 | obj-$(CONFIG_CAIF_NETDEV) += chnl_net.o | ||
24 | obj-$(CONFIG_CAIF) += caif_socket.o | ||
25 | |||
26 | export-objs := caif.o | ||
diff --git a/net/caif/caif_config_util.c b/net/caif/caif_config_util.c new file mode 100644 index 000000000000..6f36580366f0 --- /dev/null +++ b/net/caif/caif_config_util.c | |||
@@ -0,0 +1,87 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson AB 2010 | ||
3 | * Author: Sjur Brendeland sjur.brandeland@stericsson.com | ||
4 | * License terms: GNU General Public License (GPL) version 2 | ||
5 | */ | ||
6 | |||
7 | #include <linux/module.h> | ||
8 | #include <linux/spinlock.h> | ||
9 | #include <net/caif/cfctrl.h> | ||
10 | #include <net/caif/cfcnfg.h> | ||
11 | #include <net/caif/caif_dev.h> | ||
12 | |||
13 | int connect_req_to_link_param(struct cfcnfg *cnfg, | ||
14 | struct caif_connect_request *s, | ||
15 | struct cfctrl_link_param *l) | ||
16 | { | ||
17 | struct dev_info *dev_info; | ||
18 | enum cfcnfg_phy_preference pref; | ||
19 | memset(l, 0, sizeof(*l)); | ||
20 | l->priority = s->priority; | ||
21 | |||
22 | if (s->link_name[0] != '\0') | ||
23 | l->phyid = cfcnfg_get_named(cnfg, s->link_name); | ||
24 | else { | ||
25 | switch (s->link_selector) { | ||
26 | case CAIF_LINK_HIGH_BANDW: | ||
27 | pref = CFPHYPREF_HIGH_BW; | ||
28 | break; | ||
29 | case CAIF_LINK_LOW_LATENCY: | ||
30 | pref = CFPHYPREF_LOW_LAT; | ||
31 | break; | ||
32 | default: | ||
33 | return -EINVAL; | ||
34 | } | ||
35 | dev_info = cfcnfg_get_phyid(cnfg, pref); | ||
36 | if (dev_info == NULL) | ||
37 | return -ENODEV; | ||
38 | l->phyid = dev_info->id; | ||
39 | } | ||
40 | switch (s->protocol) { | ||
41 | case CAIFPROTO_AT: | ||
42 | l->linktype = CFCTRL_SRV_VEI; | ||
43 | if (s->sockaddr.u.at.type == CAIF_ATTYPE_PLAIN) | ||
44 | l->chtype = 0x02; | ||
45 | else | ||
46 | l->chtype = s->sockaddr.u.at.type; | ||
47 | l->endpoint = 0x00; | ||
48 | break; | ||
49 | case CAIFPROTO_DATAGRAM: | ||
50 | l->linktype = CFCTRL_SRV_DATAGRAM; | ||
51 | l->chtype = 0x00; | ||
52 | l->u.datagram.connid = s->sockaddr.u.dgm.connection_id; | ||
53 | break; | ||
54 | case CAIFPROTO_DATAGRAM_LOOP: | ||
55 | l->linktype = CFCTRL_SRV_DATAGRAM; | ||
56 | l->chtype = 0x03; | ||
57 | l->endpoint = 0x00; | ||
58 | l->u.datagram.connid = s->sockaddr.u.dgm.connection_id; | ||
59 | break; | ||
60 | case CAIFPROTO_RFM: | ||
61 | l->linktype = CFCTRL_SRV_RFM; | ||
62 | l->u.datagram.connid = s->sockaddr.u.rfm.connection_id; | ||
63 | strncpy(l->u.rfm.volume, s->sockaddr.u.rfm.volume, | ||
64 | sizeof(l->u.rfm.volume)-1); | ||
65 | l->u.rfm.volume[sizeof(l->u.rfm.volume)-1] = 0; | ||
66 | break; | ||
67 | case CAIFPROTO_UTIL: | ||
68 | l->linktype = CFCTRL_SRV_UTIL; | ||
69 | l->endpoint = 0x00; | ||
70 | l->chtype = 0x00; | ||
71 | strncpy(l->u.utility.name, s->sockaddr.u.util.service, | ||
72 | sizeof(l->u.utility.name)-1); | ||
73 | l->u.utility.name[sizeof(l->u.utility.name)-1] = 0; | ||
74 | caif_assert(sizeof(l->u.utility.name) > 10); | ||
75 | l->u.utility.paramlen = s->param.size; | ||
76 | if (l->u.utility.paramlen > sizeof(l->u.utility.params)) | ||
77 | l->u.utility.paramlen = sizeof(l->u.utility.params); | ||
78 | |||
79 | memcpy(l->u.utility.params, s->param.data, | ||
80 | l->u.utility.paramlen); | ||
81 | |||
82 | break; | ||
83 | default: | ||
84 | return -EINVAL; | ||
85 | } | ||
86 | return 0; | ||
87 | } | ||
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c new file mode 100644 index 000000000000..e84837e1bc86 --- /dev/null +++ b/net/caif/caif_dev.c | |||
@@ -0,0 +1,413 @@ | |||
1 | /* | ||
2 | * CAIF Interface registration. | ||
3 | * Copyright (C) ST-Ericsson AB 2010 | ||
4 | * Author: Sjur Brendeland/sjur.brandeland@stericsson.com | ||
5 | * License terms: GNU General Public License (GPL) version 2 | ||
6 | * | ||
7 | * Borrowed heavily from file: pn_dev.c. Thanks to | ||
8 | * Remi Denis-Courmont <remi.denis-courmont@nokia.com> | ||
9 | * and Sakari Ailus <sakari.ailus@nokia.com> | ||
10 | */ | ||
11 | |||
12 | #include <linux/version.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/if_arp.h> | ||
16 | #include <linux/net.h> | ||
17 | #include <linux/netdevice.h> | ||
18 | #include <linux/skbuff.h> | ||
19 | #include <linux/sched.h> | ||
20 | #include <linux/wait.h> | ||
21 | #include <net/netns/generic.h> | ||
22 | #include <net/net_namespace.h> | ||
23 | #include <net/pkt_sched.h> | ||
24 | #include <net/caif/caif_device.h> | ||
25 | #include <net/caif/caif_dev.h> | ||
26 | #include <net/caif/caif_layer.h> | ||
27 | #include <net/caif/cfpkt.h> | ||
28 | #include <net/caif/cfcnfg.h> | ||
29 | |||
30 | MODULE_LICENSE("GPL"); | ||
31 | #define TIMEOUT (HZ*5) | ||
32 | |||
33 | /* Used for local tracking of the CAIF net devices */ | ||
34 | struct caif_device_entry { | ||
35 | struct cflayer layer; | ||
36 | struct list_head list; | ||
37 | atomic_t in_use; | ||
38 | atomic_t state; | ||
39 | u16 phyid; | ||
40 | struct net_device *netdev; | ||
41 | wait_queue_head_t event; | ||
42 | }; | ||
43 | |||
44 | struct caif_device_entry_list { | ||
45 | struct list_head list; | ||
46 | /* Protects simulanous deletes in list */ | ||
47 | spinlock_t lock; | ||
48 | }; | ||
49 | |||
50 | struct caif_net { | ||
51 | struct caif_device_entry_list caifdevs; | ||
52 | }; | ||
53 | |||
54 | static int caif_net_id; | ||
55 | static struct cfcnfg *cfg; | ||
56 | |||
57 | static struct caif_device_entry_list *caif_device_list(struct net *net) | ||
58 | { | ||
59 | struct caif_net *caifn; | ||
60 | BUG_ON(!net); | ||
61 | caifn = net_generic(net, caif_net_id); | ||
62 | BUG_ON(!caifn); | ||
63 | return &caifn->caifdevs; | ||
64 | } | ||
65 | |||
66 | /* Allocate new CAIF device. */ | ||
67 | static struct caif_device_entry *caif_device_alloc(struct net_device *dev) | ||
68 | { | ||
69 | struct caif_device_entry_list *caifdevs; | ||
70 | struct caif_device_entry *caifd; | ||
71 | caifdevs = caif_device_list(dev_net(dev)); | ||
72 | BUG_ON(!caifdevs); | ||
73 | caifd = kzalloc(sizeof(*caifd), GFP_ATOMIC); | ||
74 | if (!caifd) | ||
75 | return NULL; | ||
76 | caifd->netdev = dev; | ||
77 | list_add(&caifd->list, &caifdevs->list); | ||
78 | init_waitqueue_head(&caifd->event); | ||
79 | return caifd; | ||
80 | } | ||
81 | |||
82 | static struct caif_device_entry *caif_get(struct net_device *dev) | ||
83 | { | ||
84 | struct caif_device_entry_list *caifdevs = | ||
85 | caif_device_list(dev_net(dev)); | ||
86 | struct caif_device_entry *caifd; | ||
87 | BUG_ON(!caifdevs); | ||
88 | list_for_each_entry(caifd, &caifdevs->list, list) { | ||
89 | if (caifd->netdev == dev) | ||
90 | return caifd; | ||
91 | } | ||
92 | return NULL; | ||
93 | } | ||
94 | |||
95 | static void caif_device_destroy(struct net_device *dev) | ||
96 | { | ||
97 | struct caif_device_entry_list *caifdevs = | ||
98 | caif_device_list(dev_net(dev)); | ||
99 | struct caif_device_entry *caifd; | ||
100 | ASSERT_RTNL(); | ||
101 | if (dev->type != ARPHRD_CAIF) | ||
102 | return; | ||
103 | |||
104 | spin_lock_bh(&caifdevs->lock); | ||
105 | caifd = caif_get(dev); | ||
106 | if (caifd == NULL) { | ||
107 | spin_unlock_bh(&caifdevs->lock); | ||
108 | return; | ||
109 | } | ||
110 | |||
111 | list_del(&caifd->list); | ||
112 | spin_unlock_bh(&caifdevs->lock); | ||
113 | |||
114 | kfree(caifd); | ||
115 | return; | ||
116 | } | ||
117 | |||
118 | static int transmit(struct cflayer *layer, struct cfpkt *pkt) | ||
119 | { | ||
120 | struct caif_device_entry *caifd = | ||
121 | container_of(layer, struct caif_device_entry, layer); | ||
122 | struct sk_buff *skb, *skb2; | ||
123 | int ret = -EINVAL; | ||
124 | skb = cfpkt_tonative(pkt); | ||
125 | skb->dev = caifd->netdev; | ||
126 | /* | ||
127 | * Don't allow SKB to be destroyed upon error, but signal resend | ||
128 | * notification to clients. We can't rely on the return value as | ||
129 | * congestion (NET_XMIT_CN) sometimes drops the packet, sometimes don't. | ||
130 | */ | ||
131 | if (netif_queue_stopped(caifd->netdev)) | ||
132 | return -EAGAIN; | ||
133 | skb2 = skb_get(skb); | ||
134 | |||
135 | ret = dev_queue_xmit(skb2); | ||
136 | |||
137 | if (!ret) | ||
138 | kfree_skb(skb); | ||
139 | else | ||
140 | return -EAGAIN; | ||
141 | |||
142 | return 0; | ||
143 | } | ||
144 | |||
145 | static int modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl) | ||
146 | { | ||
147 | struct caif_device_entry *caifd; | ||
148 | struct caif_dev_common *caifdev; | ||
149 | caifd = container_of(layr, struct caif_device_entry, layer); | ||
150 | caifdev = netdev_priv(caifd->netdev); | ||
151 | if (ctrl == _CAIF_MODEMCMD_PHYIF_USEFULL) { | ||
152 | atomic_set(&caifd->in_use, 1); | ||
153 | wake_up_interruptible(&caifd->event); | ||
154 | |||
155 | } else if (ctrl == _CAIF_MODEMCMD_PHYIF_USELESS) { | ||
156 | atomic_set(&caifd->in_use, 0); | ||
157 | wake_up_interruptible(&caifd->event); | ||
158 | } | ||
159 | return 0; | ||
160 | } | ||
161 | |||
162 | /* | ||
163 | * Stuff received packets to associated sockets. | ||
164 | * On error, returns non-zero and releases the skb. | ||
165 | */ | ||
166 | static int receive(struct sk_buff *skb, struct net_device *dev, | ||
167 | struct packet_type *pkttype, struct net_device *orig_dev) | ||
168 | { | ||
169 | struct net *net; | ||
170 | struct cfpkt *pkt; | ||
171 | struct caif_device_entry *caifd; | ||
172 | net = dev_net(dev); | ||
173 | pkt = cfpkt_fromnative(CAIF_DIR_IN, skb); | ||
174 | caifd = caif_get(dev); | ||
175 | if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) | ||
176 | return NET_RX_DROP; | ||
177 | |||
178 | if (caifd->layer.up->receive(caifd->layer.up, pkt)) | ||
179 | return NET_RX_DROP; | ||
180 | |||
181 | return 0; | ||
182 | } | ||
183 | |||
184 | static struct packet_type caif_packet_type __read_mostly = { | ||
185 | .type = cpu_to_be16(ETH_P_CAIF), | ||
186 | .func = receive, | ||
187 | }; | ||
188 | |||
189 | static void dev_flowctrl(struct net_device *dev, int on) | ||
190 | { | ||
191 | struct caif_device_entry *caifd = caif_get(dev); | ||
192 | if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) | ||
193 | return; | ||
194 | |||
195 | caifd->layer.up->ctrlcmd(caifd->layer.up, | ||
196 | on ? | ||
197 | _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND : | ||
198 | _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND, | ||
199 | caifd->layer.id); | ||
200 | } | ||
201 | |||
202 | /* notify Caif of device events */ | ||
203 | static int caif_device_notify(struct notifier_block *me, unsigned long what, | ||
204 | void *arg) | ||
205 | { | ||
206 | struct net_device *dev = arg; | ||
207 | struct caif_device_entry *caifd = NULL; | ||
208 | struct caif_dev_common *caifdev; | ||
209 | enum cfcnfg_phy_preference pref; | ||
210 | int res = -EINVAL; | ||
211 | enum cfcnfg_phy_type phy_type; | ||
212 | |||
213 | if (dev->type != ARPHRD_CAIF) | ||
214 | return 0; | ||
215 | |||
216 | switch (what) { | ||
217 | case NETDEV_REGISTER: | ||
218 | pr_info("CAIF: %s():register %s\n", __func__, dev->name); | ||
219 | caifd = caif_device_alloc(dev); | ||
220 | if (caifd == NULL) | ||
221 | break; | ||
222 | caifdev = netdev_priv(dev); | ||
223 | caifdev->flowctrl = dev_flowctrl; | ||
224 | atomic_set(&caifd->state, what); | ||
225 | res = 0; | ||
226 | break; | ||
227 | |||
228 | case NETDEV_UP: | ||
229 | pr_info("CAIF: %s(): up %s\n", __func__, dev->name); | ||
230 | caifd = caif_get(dev); | ||
231 | if (caifd == NULL) | ||
232 | break; | ||
233 | caifdev = netdev_priv(dev); | ||
234 | if (atomic_read(&caifd->state) == NETDEV_UP) { | ||
235 | pr_info("CAIF: %s():%s already up\n", | ||
236 | __func__, dev->name); | ||
237 | break; | ||
238 | } | ||
239 | atomic_set(&caifd->state, what); | ||
240 | caifd->layer.transmit = transmit; | ||
241 | caifd->layer.modemcmd = modemcmd; | ||
242 | |||
243 | if (caifdev->use_frag) | ||
244 | phy_type = CFPHYTYPE_FRAG; | ||
245 | else | ||
246 | phy_type = CFPHYTYPE_CAIF; | ||
247 | |||
248 | switch (caifdev->link_select) { | ||
249 | case CAIF_LINK_HIGH_BANDW: | ||
250 | pref = CFPHYPREF_LOW_LAT; | ||
251 | break; | ||
252 | case CAIF_LINK_LOW_LATENCY: | ||
253 | pref = CFPHYPREF_HIGH_BW; | ||
254 | break; | ||
255 | default: | ||
256 | pref = CFPHYPREF_HIGH_BW; | ||
257 | break; | ||
258 | } | ||
259 | |||
260 | cfcnfg_add_phy_layer(get_caif_conf(), | ||
261 | phy_type, | ||
262 | dev, | ||
263 | &caifd->layer, | ||
264 | &caifd->phyid, | ||
265 | pref, | ||
266 | caifdev->use_fcs, | ||
267 | caifdev->use_stx); | ||
268 | strncpy(caifd->layer.name, dev->name, | ||
269 | sizeof(caifd->layer.name) - 1); | ||
270 | caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0; | ||
271 | break; | ||
272 | |||
273 | case NETDEV_GOING_DOWN: | ||
274 | caifd = caif_get(dev); | ||
275 | if (caifd == NULL) | ||
276 | break; | ||
277 | pr_info("CAIF: %s():going down %s\n", __func__, dev->name); | ||
278 | |||
279 | if (atomic_read(&caifd->state) == NETDEV_GOING_DOWN || | ||
280 | atomic_read(&caifd->state) == NETDEV_DOWN) | ||
281 | break; | ||
282 | |||
283 | atomic_set(&caifd->state, what); | ||
284 | if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) | ||
285 | return -EINVAL; | ||
286 | caifd->layer.up->ctrlcmd(caifd->layer.up, | ||
287 | _CAIF_CTRLCMD_PHYIF_DOWN_IND, | ||
288 | caifd->layer.id); | ||
289 | res = wait_event_interruptible_timeout(caifd->event, | ||
290 | atomic_read(&caifd->in_use) == 0, | ||
291 | TIMEOUT); | ||
292 | break; | ||
293 | |||
294 | case NETDEV_DOWN: | ||
295 | caifd = caif_get(dev); | ||
296 | if (caifd == NULL) | ||
297 | break; | ||
298 | pr_info("CAIF: %s(): down %s\n", __func__, dev->name); | ||
299 | if (atomic_read(&caifd->in_use)) | ||
300 | pr_warning("CAIF: %s(): " | ||
301 | "Unregistering an active CAIF device: %s\n", | ||
302 | __func__, dev->name); | ||
303 | cfcnfg_del_phy_layer(get_caif_conf(), &caifd->layer); | ||
304 | atomic_set(&caifd->state, what); | ||
305 | break; | ||
306 | |||
307 | case NETDEV_UNREGISTER: | ||
308 | caifd = caif_get(dev); | ||
309 | pr_info("CAIF: %s(): unregister %s\n", __func__, dev->name); | ||
310 | atomic_set(&caifd->state, what); | ||
311 | caif_device_destroy(dev); | ||
312 | break; | ||
313 | } | ||
314 | return 0; | ||
315 | } | ||
316 | |||
317 | static struct notifier_block caif_device_notifier = { | ||
318 | .notifier_call = caif_device_notify, | ||
319 | .priority = 0, | ||
320 | }; | ||
321 | |||
322 | |||
323 | struct cfcnfg *get_caif_conf(void) | ||
324 | { | ||
325 | return cfg; | ||
326 | } | ||
327 | EXPORT_SYMBOL(get_caif_conf); | ||
328 | |||
329 | int caif_connect_client(struct caif_connect_request *conn_req, | ||
330 | struct cflayer *client_layer) | ||
331 | { | ||
332 | struct cfctrl_link_param param; | ||
333 | if (connect_req_to_link_param(get_caif_conf(), conn_req, ¶m) == 0) | ||
334 | /* Hook up the adaptation layer. */ | ||
335 | return cfcnfg_add_adaptation_layer(get_caif_conf(), | ||
336 | ¶m, client_layer); | ||
337 | |||
338 | return -EINVAL; | ||
339 | |||
340 | caif_assert(0); | ||
341 | } | ||
342 | EXPORT_SYMBOL(caif_connect_client); | ||
343 | |||
344 | int caif_disconnect_client(struct cflayer *adap_layer) | ||
345 | { | ||
346 | return cfcnfg_del_adapt_layer(get_caif_conf(), adap_layer); | ||
347 | } | ||
348 | EXPORT_SYMBOL(caif_disconnect_client); | ||
349 | |||
350 | /* Per-namespace Caif devices handling */ | ||
351 | static int caif_init_net(struct net *net) | ||
352 | { | ||
353 | struct caif_net *caifn = net_generic(net, caif_net_id); | ||
354 | INIT_LIST_HEAD(&caifn->caifdevs.list); | ||
355 | spin_lock_init(&caifn->caifdevs.lock); | ||
356 | return 0; | ||
357 | } | ||
358 | |||
359 | static void caif_exit_net(struct net *net) | ||
360 | { | ||
361 | struct net_device *dev; | ||
362 | int res; | ||
363 | rtnl_lock(); | ||
364 | for_each_netdev(net, dev) { | ||
365 | if (dev->type != ARPHRD_CAIF) | ||
366 | continue; | ||
367 | res = dev_close(dev); | ||
368 | caif_device_destroy(dev); | ||
369 | } | ||
370 | rtnl_unlock(); | ||
371 | } | ||
372 | |||
373 | static struct pernet_operations caif_net_ops = { | ||
374 | .init = caif_init_net, | ||
375 | .exit = caif_exit_net, | ||
376 | .id = &caif_net_id, | ||
377 | .size = sizeof(struct caif_net), | ||
378 | }; | ||
379 | |||
380 | /* Initialize Caif devices list */ | ||
381 | static int __init caif_device_init(void) | ||
382 | { | ||
383 | int result; | ||
384 | cfg = cfcnfg_create(); | ||
385 | if (!cfg) { | ||
386 | pr_warning("CAIF: %s(): can't create cfcnfg.\n", __func__); | ||
387 | goto err_cfcnfg_create_failed; | ||
388 | } | ||
389 | result = register_pernet_device(&caif_net_ops); | ||
390 | |||
391 | if (result) { | ||
392 | kfree(cfg); | ||
393 | cfg = NULL; | ||
394 | return result; | ||
395 | } | ||
396 | dev_add_pack(&caif_packet_type); | ||
397 | register_netdevice_notifier(&caif_device_notifier); | ||
398 | |||
399 | return result; | ||
400 | err_cfcnfg_create_failed: | ||
401 | return -ENODEV; | ||
402 | } | ||
403 | |||
404 | static void __exit caif_device_exit(void) | ||
405 | { | ||
406 | dev_remove_pack(&caif_packet_type); | ||
407 | unregister_pernet_device(&caif_net_ops); | ||
408 | unregister_netdevice_notifier(&caif_device_notifier); | ||
409 | cfcnfg_remove(cfg); | ||
410 | } | ||
411 | |||
412 | module_init(caif_device_init); | ||
413 | module_exit(caif_device_exit); | ||
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c new file mode 100644 index 000000000000..cdf62b9fefac --- /dev/null +++ b/net/caif/caif_socket.c | |||
@@ -0,0 +1,1391 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson AB 2010 | ||
3 | * Author: Sjur Brendeland sjur.brandeland@stericsson.com | ||
4 | * Per Sigmond per.sigmond@stericsson.com | ||
5 | * License terms: GNU General Public License (GPL) version 2 | ||
6 | */ | ||
7 | |||
8 | #include <linux/fs.h> | ||
9 | #include <linux/init.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/sched.h> | ||
12 | #include <linux/spinlock.h> | ||
13 | #include <linux/mutex.h> | ||
14 | #include <linux/list.h> | ||
15 | #include <linux/wait.h> | ||
16 | #include <linux/poll.h> | ||
17 | #include <linux/tcp.h> | ||
18 | #include <linux/uaccess.h> | ||
19 | #include <asm/atomic.h> | ||
20 | |||
21 | #include <linux/caif/caif_socket.h> | ||
22 | #include <net/caif/caif_layer.h> | ||
23 | #include <net/caif/caif_dev.h> | ||
24 | #include <net/caif/cfpkt.h> | ||
25 | |||
26 | MODULE_LICENSE("GPL"); | ||
27 | |||
28 | #define CHNL_SKT_READ_QUEUE_HIGH 200 | ||
29 | #define CHNL_SKT_READ_QUEUE_LOW 100 | ||
30 | |||
31 | static int caif_sockbuf_size = 40000; | ||
32 | static atomic_t caif_nr_socks = ATOMIC_INIT(0); | ||
33 | |||
34 | #define CONN_STATE_OPEN_BIT 1 | ||
35 | #define CONN_STATE_PENDING_BIT 2 | ||
36 | #define CONN_STATE_PEND_DESTROY_BIT 3 | ||
37 | #define CONN_REMOTE_SHUTDOWN_BIT 4 | ||
38 | |||
39 | #define TX_FLOW_ON_BIT 1 | ||
40 | #define RX_FLOW_ON_BIT 2 | ||
41 | |||
42 | #define STATE_IS_OPEN(cf_sk) test_bit(CONN_STATE_OPEN_BIT,\ | ||
43 | (void *) &(cf_sk)->conn_state) | ||
44 | #define STATE_IS_REMOTE_SHUTDOWN(cf_sk) test_bit(CONN_REMOTE_SHUTDOWN_BIT,\ | ||
45 | (void *) &(cf_sk)->conn_state) | ||
46 | #define STATE_IS_PENDING(cf_sk) test_bit(CONN_STATE_PENDING_BIT,\ | ||
47 | (void *) &(cf_sk)->conn_state) | ||
48 | #define STATE_IS_PENDING_DESTROY(cf_sk) test_bit(CONN_STATE_PEND_DESTROY_BIT,\ | ||
49 | (void *) &(cf_sk)->conn_state) | ||
50 | |||
51 | #define SET_STATE_PENDING_DESTROY(cf_sk) set_bit(CONN_STATE_PEND_DESTROY_BIT,\ | ||
52 | (void *) &(cf_sk)->conn_state) | ||
53 | #define SET_STATE_OPEN(cf_sk) set_bit(CONN_STATE_OPEN_BIT,\ | ||
54 | (void *) &(cf_sk)->conn_state) | ||
55 | #define SET_STATE_CLOSED(cf_sk) clear_bit(CONN_STATE_OPEN_BIT,\ | ||
56 | (void *) &(cf_sk)->conn_state) | ||
57 | #define SET_PENDING_ON(cf_sk) set_bit(CONN_STATE_PENDING_BIT,\ | ||
58 | (void *) &(cf_sk)->conn_state) | ||
59 | #define SET_PENDING_OFF(cf_sk) clear_bit(CONN_STATE_PENDING_BIT,\ | ||
60 | (void *) &(cf_sk)->conn_state) | ||
61 | #define SET_REMOTE_SHUTDOWN(cf_sk) set_bit(CONN_REMOTE_SHUTDOWN_BIT,\ | ||
62 | (void *) &(cf_sk)->conn_state) | ||
63 | |||
64 | #define SET_REMOTE_SHUTDOWN_OFF(dev) clear_bit(CONN_REMOTE_SHUTDOWN_BIT,\ | ||
65 | (void *) &(dev)->conn_state) | ||
66 | #define RX_FLOW_IS_ON(cf_sk) test_bit(RX_FLOW_ON_BIT,\ | ||
67 | (void *) &(cf_sk)->flow_state) | ||
68 | #define TX_FLOW_IS_ON(cf_sk) test_bit(TX_FLOW_ON_BIT,\ | ||
69 | (void *) &(cf_sk)->flow_state) | ||
70 | |||
71 | #define SET_RX_FLOW_OFF(cf_sk) clear_bit(RX_FLOW_ON_BIT,\ | ||
72 | (void *) &(cf_sk)->flow_state) | ||
73 | #define SET_RX_FLOW_ON(cf_sk) set_bit(RX_FLOW_ON_BIT,\ | ||
74 | (void *) &(cf_sk)->flow_state) | ||
75 | #define SET_TX_FLOW_OFF(cf_sk) clear_bit(TX_FLOW_ON_BIT,\ | ||
76 | (void *) &(cf_sk)->flow_state) | ||
77 | #define SET_TX_FLOW_ON(cf_sk) set_bit(TX_FLOW_ON_BIT,\ | ||
78 | (void *) &(cf_sk)->flow_state) | ||
79 | |||
80 | #define SKT_READ_FLAG 0x01 | ||
81 | #define SKT_WRITE_FLAG 0x02 | ||
82 | static struct dentry *debugfsdir; | ||
83 | #include <linux/debugfs.h> | ||
84 | |||
85 | #ifdef CONFIG_DEBUG_FS | ||
86 | struct debug_fs_counter { | ||
87 | atomic_t num_open; | ||
88 | atomic_t num_close; | ||
89 | atomic_t num_init; | ||
90 | atomic_t num_init_resp; | ||
91 | atomic_t num_init_fail_resp; | ||
92 | atomic_t num_deinit; | ||
93 | atomic_t num_deinit_resp; | ||
94 | atomic_t num_remote_shutdown_ind; | ||
95 | atomic_t num_tx_flow_off_ind; | ||
96 | atomic_t num_tx_flow_on_ind; | ||
97 | atomic_t num_rx_flow_off; | ||
98 | atomic_t num_rx_flow_on; | ||
99 | atomic_t skb_in_use; | ||
100 | atomic_t skb_alloc; | ||
101 | atomic_t skb_free; | ||
102 | }; | ||
103 | static struct debug_fs_counter cnt; | ||
104 | #define dbfs_atomic_inc(v) atomic_inc(v) | ||
105 | #define dbfs_atomic_dec(v) atomic_dec(v) | ||
106 | #else | ||
107 | #define dbfs_atomic_inc(v) | ||
108 | #define dbfs_atomic_dec(v) | ||
109 | #endif | ||
110 | |||
111 | /* The AF_CAIF socket */ | ||
112 | struct caifsock { | ||
113 | /* NOTE: sk has to be the first member */ | ||
114 | struct sock sk; | ||
115 | struct cflayer layer; | ||
116 | char name[CAIF_LAYER_NAME_SZ]; | ||
117 | u32 conn_state; | ||
118 | u32 flow_state; | ||
119 | struct cfpktq *pktq; | ||
120 | int file_mode; | ||
121 | struct caif_connect_request conn_req; | ||
122 | int read_queue_len; | ||
123 | /* protect updates of read_queue_len */ | ||
124 | spinlock_t read_queue_len_lock; | ||
125 | struct dentry *debugfs_socket_dir; | ||
126 | }; | ||
127 | |||
128 | static void drain_queue(struct caifsock *cf_sk); | ||
129 | |||
130 | /* Packet Receive Callback function called from CAIF Stack */ | ||
131 | static int caif_sktrecv_cb(struct cflayer *layr, struct cfpkt *pkt) | ||
132 | { | ||
133 | struct caifsock *cf_sk; | ||
134 | int read_queue_high; | ||
135 | cf_sk = container_of(layr, struct caifsock, layer); | ||
136 | |||
137 | if (!STATE_IS_OPEN(cf_sk)) { | ||
138 | /*FIXME: This should be allowed finally!*/ | ||
139 | pr_debug("CAIF: %s(): called after close request\n", __func__); | ||
140 | cfpkt_destroy(pkt); | ||
141 | return 0; | ||
142 | } | ||
143 | /* NOTE: This function may be called in Tasklet context! */ | ||
144 | |||
145 | /* The queue has its own lock */ | ||
146 | cfpkt_queue(cf_sk->pktq, pkt, 0); | ||
147 | |||
148 | spin_lock(&cf_sk->read_queue_len_lock); | ||
149 | cf_sk->read_queue_len++; | ||
150 | |||
151 | read_queue_high = (cf_sk->read_queue_len > CHNL_SKT_READ_QUEUE_HIGH); | ||
152 | spin_unlock(&cf_sk->read_queue_len_lock); | ||
153 | |||
154 | if (RX_FLOW_IS_ON(cf_sk) && read_queue_high) { | ||
155 | dbfs_atomic_inc(&cnt.num_rx_flow_off); | ||
156 | SET_RX_FLOW_OFF(cf_sk); | ||
157 | |||
158 | /* Send flow off (NOTE: must not sleep) */ | ||
159 | pr_debug("CAIF: %s():" | ||
160 | " sending flow OFF (queue len = %d)\n", | ||
161 | __func__, | ||
162 | cf_sk->read_queue_len); | ||
163 | caif_assert(cf_sk->layer.dn); | ||
164 | caif_assert(cf_sk->layer.dn->ctrlcmd); | ||
165 | |||
166 | (void) cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, | ||
167 | CAIF_MODEMCMD_FLOW_OFF_REQ); | ||
168 | } | ||
169 | |||
170 | /* Signal reader that data is available. */ | ||
171 | |||
172 | wake_up_interruptible(cf_sk->sk.sk_sleep); | ||
173 | |||
174 | return 0; | ||
175 | } | ||
176 | |||
177 | /* Packet Flow Control Callback function called from CAIF */ | ||
178 | static void caif_sktflowctrl_cb(struct cflayer *layr, | ||
179 | enum caif_ctrlcmd flow, | ||
180 | int phyid) | ||
181 | { | ||
182 | struct caifsock *cf_sk; | ||
183 | |||
184 | /* NOTE: This function may be called in Tasklet context! */ | ||
185 | pr_debug("CAIF: %s(): flowctrl func called: %s.\n", | ||
186 | __func__, | ||
187 | flow == CAIF_CTRLCMD_FLOW_ON_IND ? "ON" : | ||
188 | flow == CAIF_CTRLCMD_FLOW_OFF_IND ? "OFF" : | ||
189 | flow == CAIF_CTRLCMD_INIT_RSP ? "INIT_RSP" : | ||
190 | flow == CAIF_CTRLCMD_DEINIT_RSP ? "DEINIT_RSP" : | ||
191 | flow == CAIF_CTRLCMD_INIT_FAIL_RSP ? "INIT_FAIL_RSP" : | ||
192 | flow == | ||
193 | CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND ? "REMOTE_SHUTDOWN" : | ||
194 | "UKNOWN CTRL COMMAND"); | ||
195 | |||
196 | if (layr == NULL) | ||
197 | return; | ||
198 | |||
199 | cf_sk = container_of(layr, struct caifsock, layer); | ||
200 | |||
201 | switch (flow) { | ||
202 | case CAIF_CTRLCMD_FLOW_ON_IND: | ||
203 | dbfs_atomic_inc(&cnt.num_tx_flow_on_ind); | ||
204 | /* Signal reader that data is available. */ | ||
205 | SET_TX_FLOW_ON(cf_sk); | ||
206 | wake_up_interruptible(cf_sk->sk.sk_sleep); | ||
207 | break; | ||
208 | |||
209 | case CAIF_CTRLCMD_FLOW_OFF_IND: | ||
210 | dbfs_atomic_inc(&cnt.num_tx_flow_off_ind); | ||
211 | SET_TX_FLOW_OFF(cf_sk); | ||
212 | break; | ||
213 | |||
214 | case CAIF_CTRLCMD_INIT_RSP: | ||
215 | dbfs_atomic_inc(&cnt.num_init_resp); | ||
216 | /* Signal reader that data is available. */ | ||
217 | caif_assert(STATE_IS_OPEN(cf_sk)); | ||
218 | SET_PENDING_OFF(cf_sk); | ||
219 | SET_TX_FLOW_ON(cf_sk); | ||
220 | wake_up_interruptible(cf_sk->sk.sk_sleep); | ||
221 | break; | ||
222 | |||
223 | case CAIF_CTRLCMD_DEINIT_RSP: | ||
224 | dbfs_atomic_inc(&cnt.num_deinit_resp); | ||
225 | caif_assert(!STATE_IS_OPEN(cf_sk)); | ||
226 | SET_PENDING_OFF(cf_sk); | ||
227 | if (!STATE_IS_PENDING_DESTROY(cf_sk)) { | ||
228 | if (cf_sk->sk.sk_sleep != NULL) | ||
229 | wake_up_interruptible(cf_sk->sk.sk_sleep); | ||
230 | } | ||
231 | dbfs_atomic_inc(&cnt.num_deinit); | ||
232 | sock_put(&cf_sk->sk); | ||
233 | break; | ||
234 | |||
235 | case CAIF_CTRLCMD_INIT_FAIL_RSP: | ||
236 | dbfs_atomic_inc(&cnt.num_init_fail_resp); | ||
237 | caif_assert(STATE_IS_OPEN(cf_sk)); | ||
238 | SET_STATE_CLOSED(cf_sk); | ||
239 | SET_PENDING_OFF(cf_sk); | ||
240 | SET_TX_FLOW_OFF(cf_sk); | ||
241 | wake_up_interruptible(cf_sk->sk.sk_sleep); | ||
242 | break; | ||
243 | |||
244 | case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND: | ||
245 | dbfs_atomic_inc(&cnt.num_remote_shutdown_ind); | ||
246 | SET_REMOTE_SHUTDOWN(cf_sk); | ||
247 | /* Use sk_shutdown to indicate remote shutdown indication */ | ||
248 | cf_sk->sk.sk_shutdown |= RCV_SHUTDOWN; | ||
249 | cf_sk->file_mode = 0; | ||
250 | wake_up_interruptible(cf_sk->sk.sk_sleep); | ||
251 | break; | ||
252 | |||
253 | default: | ||
254 | pr_debug("CAIF: %s(): Unexpected flow command %d\n", | ||
255 | __func__, flow); | ||
256 | } | ||
257 | } | ||
258 | |||
259 | static void skb_destructor(struct sk_buff *skb) | ||
260 | { | ||
261 | dbfs_atomic_inc(&cnt.skb_free); | ||
262 | dbfs_atomic_dec(&cnt.skb_in_use); | ||
263 | } | ||
264 | |||
265 | |||
266 | static int caif_recvmsg(struct kiocb *iocb, struct socket *sock, | ||
267 | struct msghdr *m, size_t buf_len, int flags) | ||
268 | |||
269 | { | ||
270 | struct sock *sk = sock->sk; | ||
271 | struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); | ||
272 | struct cfpkt *pkt = NULL; | ||
273 | size_t len; | ||
274 | int result; | ||
275 | struct sk_buff *skb; | ||
276 | ssize_t ret = -EIO; | ||
277 | int read_queue_low; | ||
278 | |||
279 | if (cf_sk == NULL) { | ||
280 | pr_debug("CAIF: %s(): private_data not set!\n", | ||
281 | __func__); | ||
282 | ret = -EBADFD; | ||
283 | goto read_error; | ||
284 | } | ||
285 | |||
286 | /* Don't do multiple iovec entries yet */ | ||
287 | if (m->msg_iovlen != 1) | ||
288 | return -EOPNOTSUPP; | ||
289 | |||
290 | if (unlikely(!buf_len)) | ||
291 | return -EINVAL; | ||
292 | |||
293 | lock_sock(&(cf_sk->sk)); | ||
294 | |||
295 | caif_assert(cf_sk->pktq); | ||
296 | |||
297 | if (!STATE_IS_OPEN(cf_sk)) { | ||
298 | /* Socket is closed or closing. */ | ||
299 | if (!STATE_IS_PENDING(cf_sk)) { | ||
300 | pr_debug("CAIF: %s(): socket is closed (by remote)\n", | ||
301 | __func__); | ||
302 | ret = -EPIPE; | ||
303 | } else { | ||
304 | pr_debug("CAIF: %s(): socket is closing..\n", __func__); | ||
305 | ret = -EBADF; | ||
306 | } | ||
307 | goto read_error; | ||
308 | } | ||
309 | /* Socket is open or opening. */ | ||
310 | if (STATE_IS_PENDING(cf_sk)) { | ||
311 | pr_debug("CAIF: %s(): socket is opening...\n", __func__); | ||
312 | |||
313 | if (flags & MSG_DONTWAIT) { | ||
314 | /* We can't block. */ | ||
315 | pr_debug("CAIF: %s():state pending and MSG_DONTWAIT\n", | ||
316 | __func__); | ||
317 | ret = -EAGAIN; | ||
318 | goto read_error; | ||
319 | } | ||
320 | |||
321 | /* | ||
322 | * Blocking mode; state is pending and we need to wait | ||
323 | * for its conclusion. | ||
324 | */ | ||
325 | release_sock(&cf_sk->sk); | ||
326 | |||
327 | result = | ||
328 | wait_event_interruptible(*cf_sk->sk.sk_sleep, | ||
329 | !STATE_IS_PENDING(cf_sk)); | ||
330 | |||
331 | lock_sock(&(cf_sk->sk)); | ||
332 | |||
333 | if (result == -ERESTARTSYS) { | ||
334 | pr_debug("CAIF: %s(): wait_event_interruptible" | ||
335 | " woken by a signal (1)", __func__); | ||
336 | ret = -ERESTARTSYS; | ||
337 | goto read_error; | ||
338 | } | ||
339 | } | ||
340 | |||
341 | if (STATE_IS_REMOTE_SHUTDOWN(cf_sk) || | ||
342 | !STATE_IS_OPEN(cf_sk) || | ||
343 | STATE_IS_PENDING(cf_sk)) { | ||
344 | |||
345 | pr_debug("CAIF: %s(): socket closed\n", | ||
346 | __func__); | ||
347 | ret = -ESHUTDOWN; | ||
348 | goto read_error; | ||
349 | } | ||
350 | |||
351 | /* | ||
352 | * Block if we don't have any received buffers. | ||
353 | * The queue has its own lock. | ||
354 | */ | ||
355 | while ((pkt = cfpkt_qpeek(cf_sk->pktq)) == NULL) { | ||
356 | |||
357 | if (flags & MSG_DONTWAIT) { | ||
358 | pr_debug("CAIF: %s(): MSG_DONTWAIT\n", __func__); | ||
359 | ret = -EAGAIN; | ||
360 | goto read_error; | ||
361 | } | ||
362 | trace_printk("CAIF: %s() wait_event\n", __func__); | ||
363 | |||
364 | /* Let writers in. */ | ||
365 | release_sock(&cf_sk->sk); | ||
366 | |||
367 | /* Block reader until data arrives or socket is closed. */ | ||
368 | if (wait_event_interruptible(*cf_sk->sk.sk_sleep, | ||
369 | cfpkt_qpeek(cf_sk->pktq) | ||
370 | || STATE_IS_REMOTE_SHUTDOWN(cf_sk) | ||
371 | || !STATE_IS_OPEN(cf_sk)) == | ||
372 | -ERESTARTSYS) { | ||
373 | pr_debug("CAIF: %s():" | ||
374 | " wait_event_interruptible woken by " | ||
375 | "a signal, signal_pending(current) = %d\n", | ||
376 | __func__, | ||
377 | signal_pending(current)); | ||
378 | return -ERESTARTSYS; | ||
379 | } | ||
380 | |||
381 | trace_printk("CAIF: %s() awake\n", __func__); | ||
382 | if (STATE_IS_REMOTE_SHUTDOWN(cf_sk)) { | ||
383 | pr_debug("CAIF: %s(): " | ||
384 | "received remote_shutdown indication\n", | ||
385 | __func__); | ||
386 | ret = -ESHUTDOWN; | ||
387 | goto read_error_no_unlock; | ||
388 | } | ||
389 | |||
390 | /* I want to be alone on cf_sk (except status and queue). */ | ||
391 | lock_sock(&(cf_sk->sk)); | ||
392 | |||
393 | if (!STATE_IS_OPEN(cf_sk)) { | ||
394 | /* Someone closed the link, report error. */ | ||
395 | pr_debug("CAIF: %s(): remote end shutdown!\n", | ||
396 | __func__); | ||
397 | ret = -EPIPE; | ||
398 | goto read_error; | ||
399 | } | ||
400 | } | ||
401 | |||
402 | /* The queue has its own lock. */ | ||
403 | len = cfpkt_getlen(pkt); | ||
404 | |||
405 | /* Check max length that can be copied. */ | ||
406 | if (len <= buf_len) | ||
407 | pkt = cfpkt_dequeue(cf_sk->pktq); | ||
408 | else { | ||
409 | pr_debug("CAIF: %s(): user buffer too small (%ld,%ld)\n", | ||
410 | __func__, (long) len, (long) buf_len); | ||
411 | if (sock->type == SOCK_SEQPACKET) { | ||
412 | ret = -EMSGSIZE; | ||
413 | goto read_error; | ||
414 | } | ||
415 | len = buf_len; | ||
416 | } | ||
417 | |||
418 | |||
419 | spin_lock(&cf_sk->read_queue_len_lock); | ||
420 | cf_sk->read_queue_len--; | ||
421 | read_queue_low = (cf_sk->read_queue_len < CHNL_SKT_READ_QUEUE_LOW); | ||
422 | spin_unlock(&cf_sk->read_queue_len_lock); | ||
423 | |||
424 | if (!RX_FLOW_IS_ON(cf_sk) && read_queue_low) { | ||
425 | dbfs_atomic_inc(&cnt.num_rx_flow_on); | ||
426 | SET_RX_FLOW_ON(cf_sk); | ||
427 | |||
428 | /* Send flow on. */ | ||
429 | pr_debug("CAIF: %s(): sending flow ON (queue len = %d)\n", | ||
430 | __func__, cf_sk->read_queue_len); | ||
431 | caif_assert(cf_sk->layer.dn); | ||
432 | caif_assert(cf_sk->layer.dn->ctrlcmd); | ||
433 | (void) cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, | ||
434 | CAIF_MODEMCMD_FLOW_ON_REQ); | ||
435 | |||
436 | caif_assert(cf_sk->read_queue_len >= 0); | ||
437 | } | ||
438 | |||
439 | skb = cfpkt_tonative(pkt); | ||
440 | result = skb_copy_datagram_iovec(skb, 0, m->msg_iov, len); | ||
441 | skb_pull(skb, len); | ||
442 | |||
443 | if (result) { | ||
444 | pr_debug("CAIF: %s(): copy to_iovec failed\n", __func__); | ||
445 | cfpkt_destroy(pkt); | ||
446 | ret = -EFAULT; | ||
447 | goto read_error; | ||
448 | } | ||
449 | |||
450 | /* Free packet and remove from queue */ | ||
451 | if (skb->len == 0) | ||
452 | skb_free_datagram(sk, skb); | ||
453 | |||
454 | /* Let the others in. */ | ||
455 | release_sock(&cf_sk->sk); | ||
456 | return len; | ||
457 | |||
458 | read_error: | ||
459 | release_sock(&cf_sk->sk); | ||
460 | read_error_no_unlock: | ||
461 | return ret; | ||
462 | } | ||
463 | |||
464 | /* Send a signal as a consequence of sendmsg, sendto or caif_sendmsg. */ | ||
465 | static int caif_sendmsg(struct kiocb *kiocb, struct socket *sock, | ||
466 | struct msghdr *msg, size_t len) | ||
467 | { | ||
468 | |||
469 | struct sock *sk = sock->sk; | ||
470 | struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); | ||
471 | size_t payload_size = msg->msg_iov->iov_len; | ||
472 | struct cfpkt *pkt = NULL; | ||
473 | struct caif_payload_info info; | ||
474 | unsigned char *txbuf; | ||
475 | ssize_t ret = -EIO; | ||
476 | int result; | ||
477 | struct sk_buff *skb; | ||
478 | caif_assert(msg->msg_iovlen == 1); | ||
479 | |||
480 | if (cf_sk == NULL) { | ||
481 | pr_debug("CAIF: %s(): private_data not set!\n", | ||
482 | __func__); | ||
483 | ret = -EBADFD; | ||
484 | goto write_error_no_unlock; | ||
485 | } | ||
486 | |||
487 | if (unlikely(msg->msg_iov->iov_base == NULL)) { | ||
488 | pr_warning("CAIF: %s(): Buffer is NULL.\n", __func__); | ||
489 | ret = -EINVAL; | ||
490 | goto write_error_no_unlock; | ||
491 | } | ||
492 | |||
493 | if (payload_size > CAIF_MAX_PAYLOAD_SIZE) { | ||
494 | pr_debug("CAIF: %s(): buffer too long\n", __func__); | ||
495 | if (sock->type == SOCK_SEQPACKET) { | ||
496 | ret = -EINVAL; | ||
497 | goto write_error_no_unlock; | ||
498 | } | ||
499 | payload_size = CAIF_MAX_PAYLOAD_SIZE; | ||
500 | } | ||
501 | |||
502 | /* I want to be alone on cf_sk (except status and queue) */ | ||
503 | lock_sock(&(cf_sk->sk)); | ||
504 | |||
505 | caif_assert(cf_sk->pktq); | ||
506 | |||
507 | if (!STATE_IS_OPEN(cf_sk)) { | ||
508 | /* Socket is closed or closing */ | ||
509 | if (!STATE_IS_PENDING(cf_sk)) { | ||
510 | pr_debug("CAIF: %s(): socket is closed (by remote)\n", | ||
511 | __func__); | ||
512 | ret = -EPIPE; | ||
513 | } else { | ||
514 | pr_debug("CAIF: %s(): socket is closing...\n", | ||
515 | __func__); | ||
516 | ret = -EBADF; | ||
517 | } | ||
518 | goto write_error; | ||
519 | } | ||
520 | |||
521 | /* Socket is open or opening */ | ||
522 | if (STATE_IS_PENDING(cf_sk)) { | ||
523 | pr_debug("CAIF: %s(): socket is opening...\n", __func__); | ||
524 | |||
525 | if (msg->msg_flags & MSG_DONTWAIT) { | ||
526 | /* We can't block */ | ||
527 | trace_printk("CAIF: %s():state pending:" | ||
528 | "state=MSG_DONTWAIT\n", __func__); | ||
529 | ret = -EAGAIN; | ||
530 | goto write_error; | ||
531 | } | ||
532 | /* Let readers in */ | ||
533 | release_sock(&cf_sk->sk); | ||
534 | |||
535 | /* | ||
536 | * Blocking mode; state is pending and we need to wait | ||
537 | * for its conclusion. | ||
538 | */ | ||
539 | result = | ||
540 | wait_event_interruptible(*cf_sk->sk.sk_sleep, | ||
541 | !STATE_IS_PENDING(cf_sk)); | ||
542 | /* I want to be alone on cf_sk (except status and queue) */ | ||
543 | lock_sock(&(cf_sk->sk)); | ||
544 | |||
545 | if (result == -ERESTARTSYS) { | ||
546 | pr_debug("CAIF: %s(): wait_event_interruptible" | ||
547 | " woken by a signal (1)", __func__); | ||
548 | ret = -ERESTARTSYS; | ||
549 | goto write_error; | ||
550 | } | ||
551 | } | ||
552 | if (STATE_IS_REMOTE_SHUTDOWN(cf_sk) || | ||
553 | !STATE_IS_OPEN(cf_sk) || | ||
554 | STATE_IS_PENDING(cf_sk)) { | ||
555 | |||
556 | pr_debug("CAIF: %s(): socket closed\n", | ||
557 | __func__); | ||
558 | ret = -ESHUTDOWN; | ||
559 | goto write_error; | ||
560 | } | ||
561 | |||
562 | if (!TX_FLOW_IS_ON(cf_sk)) { | ||
563 | |||
564 | /* Flow is off. Check non-block flag */ | ||
565 | if (msg->msg_flags & MSG_DONTWAIT) { | ||
566 | trace_printk("CAIF: %s(): MSG_DONTWAIT and tx flow off", | ||
567 | __func__); | ||
568 | ret = -EAGAIN; | ||
569 | goto write_error; | ||
570 | } | ||
571 | |||
572 | /* release lock before waiting */ | ||
573 | release_sock(&cf_sk->sk); | ||
574 | |||
575 | /* Wait until flow is on or socket is closed */ | ||
576 | if (wait_event_interruptible(*cf_sk->sk.sk_sleep, | ||
577 | TX_FLOW_IS_ON(cf_sk) | ||
578 | || !STATE_IS_OPEN(cf_sk) | ||
579 | || STATE_IS_REMOTE_SHUTDOWN(cf_sk) | ||
580 | ) == -ERESTARTSYS) { | ||
581 | pr_debug("CAIF: %s():" | ||
582 | " wait_event_interruptible woken by a signal", | ||
583 | __func__); | ||
584 | ret = -ERESTARTSYS; | ||
585 | goto write_error_no_unlock; | ||
586 | } | ||
587 | |||
588 | /* I want to be alone on cf_sk (except status and queue) */ | ||
589 | lock_sock(&(cf_sk->sk)); | ||
590 | |||
591 | if (!STATE_IS_OPEN(cf_sk)) { | ||
592 | /* someone closed the link, report error */ | ||
593 | pr_debug("CAIF: %s(): remote end shutdown!\n", | ||
594 | __func__); | ||
595 | ret = -EPIPE; | ||
596 | goto write_error; | ||
597 | } | ||
598 | |||
599 | if (STATE_IS_REMOTE_SHUTDOWN(cf_sk)) { | ||
600 | pr_debug("CAIF: %s(): " | ||
601 | "received remote_shutdown indication\n", | ||
602 | __func__); | ||
603 | ret = -ESHUTDOWN; | ||
604 | goto write_error; | ||
605 | } | ||
606 | } | ||
607 | |||
608 | pkt = cfpkt_create(payload_size); | ||
609 | skb = (struct sk_buff *)pkt; | ||
610 | skb->destructor = skb_destructor; | ||
611 | skb->sk = sk; | ||
612 | dbfs_atomic_inc(&cnt.skb_alloc); | ||
613 | dbfs_atomic_inc(&cnt.skb_in_use); | ||
614 | if (cfpkt_raw_append(pkt, (void **) &txbuf, payload_size) < 0) { | ||
615 | pr_debug("CAIF: %s(): cfpkt_raw_append failed\n", __func__); | ||
616 | cfpkt_destroy(pkt); | ||
617 | ret = -EINVAL; | ||
618 | goto write_error; | ||
619 | } | ||
620 | |||
621 | /* Copy data into buffer. */ | ||
622 | if (copy_from_user(txbuf, msg->msg_iov->iov_base, payload_size)) { | ||
623 | pr_debug("CAIF: %s(): copy_from_user returned non zero.\n", | ||
624 | __func__); | ||
625 | cfpkt_destroy(pkt); | ||
626 | ret = -EINVAL; | ||
627 | goto write_error; | ||
628 | } | ||
629 | memset(&info, 0, sizeof(info)); | ||
630 | |||
631 | /* Send the packet down the stack. */ | ||
632 | caif_assert(cf_sk->layer.dn); | ||
633 | caif_assert(cf_sk->layer.dn->transmit); | ||
634 | |||
635 | do { | ||
636 | ret = cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt); | ||
637 | |||
638 | if (likely((ret >= 0) || (ret != -EAGAIN))) | ||
639 | break; | ||
640 | |||
641 | /* EAGAIN - retry */ | ||
642 | if (msg->msg_flags & MSG_DONTWAIT) { | ||
643 | pr_debug("CAIF: %s(): NONBLOCK and transmit failed," | ||
644 | " error = %ld\n", __func__, (long) ret); | ||
645 | ret = -EAGAIN; | ||
646 | goto write_error; | ||
647 | } | ||
648 | |||
649 | /* Let readers in */ | ||
650 | release_sock(&cf_sk->sk); | ||
651 | |||
652 | /* Wait until flow is on or socket is closed */ | ||
653 | if (wait_event_interruptible(*cf_sk->sk.sk_sleep, | ||
654 | TX_FLOW_IS_ON(cf_sk) | ||
655 | || !STATE_IS_OPEN(cf_sk) | ||
656 | || STATE_IS_REMOTE_SHUTDOWN(cf_sk) | ||
657 | ) == -ERESTARTSYS) { | ||
658 | pr_debug("CAIF: %s(): wait_event_interruptible" | ||
659 | " woken by a signal", __func__); | ||
660 | ret = -ERESTARTSYS; | ||
661 | goto write_error_no_unlock; | ||
662 | } | ||
663 | |||
664 | /* I want to be alone on cf_sk (except status and queue) */ | ||
665 | lock_sock(&(cf_sk->sk)); | ||
666 | |||
667 | } while (ret == -EAGAIN); | ||
668 | |||
669 | if (ret < 0) { | ||
670 | cfpkt_destroy(pkt); | ||
671 | pr_debug("CAIF: %s(): transmit failed, error = %ld\n", | ||
672 | __func__, (long) ret); | ||
673 | |||
674 | goto write_error; | ||
675 | } | ||
676 | |||
677 | release_sock(&cf_sk->sk); | ||
678 | return payload_size; | ||
679 | |||
680 | write_error: | ||
681 | release_sock(&cf_sk->sk); | ||
682 | write_error_no_unlock: | ||
683 | return ret; | ||
684 | } | ||
685 | |||
686 | static unsigned int caif_poll(struct file *file, struct socket *sock, | ||
687 | poll_table *wait) | ||
688 | { | ||
689 | struct sock *sk = sock->sk; | ||
690 | struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); | ||
691 | u32 mask = 0; | ||
692 | poll_wait(file, sk->sk_sleep, wait); | ||
693 | lock_sock(&(cf_sk->sk)); | ||
694 | if (!STATE_IS_OPEN(cf_sk)) { | ||
695 | if (!STATE_IS_PENDING(cf_sk)) | ||
696 | mask |= POLLHUP; | ||
697 | } else { | ||
698 | if (cfpkt_qpeek(cf_sk->pktq) != NULL) | ||
699 | mask |= (POLLIN | POLLRDNORM); | ||
700 | if (TX_FLOW_IS_ON(cf_sk)) | ||
701 | mask |= (POLLOUT | POLLWRNORM); | ||
702 | } | ||
703 | release_sock(&cf_sk->sk); | ||
704 | trace_printk("CAIF: %s(): poll mask=0x%04x\n", | ||
705 | __func__, mask); | ||
706 | return mask; | ||
707 | } | ||
708 | |||
709 | static void drain_queue(struct caifsock *cf_sk) | ||
710 | { | ||
711 | struct cfpkt *pkt = NULL; | ||
712 | |||
713 | /* Empty the queue */ | ||
714 | do { | ||
715 | /* The queue has its own lock */ | ||
716 | if (!cf_sk->pktq) | ||
717 | break; | ||
718 | |||
719 | pkt = cfpkt_dequeue(cf_sk->pktq); | ||
720 | if (!pkt) | ||
721 | break; | ||
722 | pr_debug("CAIF: %s(): freeing packet from read queue\n", | ||
723 | __func__); | ||
724 | cfpkt_destroy(pkt); | ||
725 | |||
726 | } while (1); | ||
727 | |||
728 | cf_sk->read_queue_len = 0; | ||
729 | } | ||
730 | |||
731 | static int setsockopt(struct socket *sock, | ||
732 | int lvl, int opt, char __user *ov, unsigned int ol) | ||
733 | { | ||
734 | struct sock *sk = sock->sk; | ||
735 | struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); | ||
736 | int prio, linksel; | ||
737 | struct ifreq ifreq; | ||
738 | |||
739 | if (STATE_IS_OPEN(cf_sk)) { | ||
740 | pr_debug("CAIF: %s(): setsockopt " | ||
741 | "cannot be done on a connected socket\n", | ||
742 | __func__); | ||
743 | return -ENOPROTOOPT; | ||
744 | } | ||
745 | switch (opt) { | ||
746 | case CAIFSO_LINK_SELECT: | ||
747 | if (ol < sizeof(int)) { | ||
748 | pr_debug("CAIF: %s(): setsockopt" | ||
749 | " CAIFSO_CHANNEL_CONFIG bad size\n", __func__); | ||
750 | return -EINVAL; | ||
751 | } | ||
752 | if (lvl != SOL_CAIF) | ||
753 | goto bad_sol; | ||
754 | if (copy_from_user(&linksel, ov, sizeof(int))) | ||
755 | return -EINVAL; | ||
756 | lock_sock(&(cf_sk->sk)); | ||
757 | cf_sk->conn_req.link_selector = linksel; | ||
758 | release_sock(&cf_sk->sk); | ||
759 | return 0; | ||
760 | |||
761 | case SO_PRIORITY: | ||
762 | if (lvl != SOL_SOCKET) | ||
763 | goto bad_sol; | ||
764 | if (ol < sizeof(int)) { | ||
765 | pr_debug("CAIF: %s(): setsockopt" | ||
766 | " SO_PRIORITY bad size\n", __func__); | ||
767 | return -EINVAL; | ||
768 | } | ||
769 | if (copy_from_user(&prio, ov, sizeof(int))) | ||
770 | return -EINVAL; | ||
771 | lock_sock(&(cf_sk->sk)); | ||
772 | cf_sk->conn_req.priority = prio; | ||
773 | pr_debug("CAIF: %s(): Setting sockopt priority=%d\n", __func__, | ||
774 | cf_sk->conn_req.priority); | ||
775 | release_sock(&cf_sk->sk); | ||
776 | return 0; | ||
777 | |||
778 | case SO_BINDTODEVICE: | ||
779 | if (lvl != SOL_SOCKET) | ||
780 | goto bad_sol; | ||
781 | if (ol < sizeof(struct ifreq)) { | ||
782 | pr_debug("CAIF: %s(): setsockopt" | ||
783 | " SO_PRIORITY bad size\n", __func__); | ||
784 | return -EINVAL; | ||
785 | } | ||
786 | if (copy_from_user(&ifreq, ov, sizeof(ifreq))) | ||
787 | return -EFAULT; | ||
788 | lock_sock(&(cf_sk->sk)); | ||
789 | strncpy(cf_sk->conn_req.link_name, ifreq.ifr_name, | ||
790 | sizeof(cf_sk->conn_req.link_name)); | ||
791 | cf_sk->conn_req.link_name | ||
792 | [sizeof(cf_sk->conn_req.link_name)-1] = 0; | ||
793 | release_sock(&cf_sk->sk); | ||
794 | return 0; | ||
795 | |||
796 | case CAIFSO_REQ_PARAM: | ||
797 | if (lvl != SOL_CAIF) | ||
798 | goto bad_sol; | ||
799 | if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL) | ||
800 | return -ENOPROTOOPT; | ||
801 | if (ol > sizeof(cf_sk->conn_req.param.data)) | ||
802 | goto req_param_bad_size; | ||
803 | |||
804 | lock_sock(&(cf_sk->sk)); | ||
805 | cf_sk->conn_req.param.size = ol; | ||
806 | if (copy_from_user(&cf_sk->conn_req.param.data, ov, ol)) { | ||
807 | release_sock(&cf_sk->sk); | ||
808 | req_param_bad_size: | ||
809 | pr_debug("CAIF: %s(): setsockopt" | ||
810 | " CAIFSO_CHANNEL_CONFIG bad size\n", __func__); | ||
811 | return -EINVAL; | ||
812 | } | ||
813 | |||
814 | release_sock(&cf_sk->sk); | ||
815 | return 0; | ||
816 | |||
817 | default: | ||
818 | pr_debug("CAIF: %s(): unhandled option %d\n", __func__, opt); | ||
819 | return -EINVAL; | ||
820 | } | ||
821 | |||
822 | return 0; | ||
823 | bad_sol: | ||
824 | pr_debug("CAIF: %s(): setsockopt bad level\n", __func__); | ||
825 | return -ENOPROTOOPT; | ||
826 | |||
827 | } | ||
828 | |||
829 | static int caif_connect(struct socket *sock, struct sockaddr *uservaddr, | ||
830 | int sockaddr_len, int flags) | ||
831 | { | ||
832 | struct caifsock *cf_sk = NULL; | ||
833 | int result = -1; | ||
834 | int mode = 0; | ||
835 | int ret = -EIO; | ||
836 | struct sock *sk = sock->sk; | ||
837 | BUG_ON(sk == NULL); | ||
838 | |||
839 | cf_sk = container_of(sk, struct caifsock, sk); | ||
840 | |||
841 | trace_printk("CAIF: %s(): cf_sk=%p OPEN=%d, TX_FLOW=%d, RX_FLOW=%d\n", | ||
842 | __func__, cf_sk, | ||
843 | STATE_IS_OPEN(cf_sk), | ||
844 | TX_FLOW_IS_ON(cf_sk), RX_FLOW_IS_ON(cf_sk)); | ||
845 | |||
846 | |||
847 | if (sock->type == SOCK_SEQPACKET || sock->type == SOCK_STREAM) | ||
848 | sock->state = SS_CONNECTING; | ||
849 | else | ||
850 | goto out; | ||
851 | |||
852 | /* I want to be alone on cf_sk (except status and queue) */ | ||
853 | lock_sock(&(cf_sk->sk)); | ||
854 | |||
855 | if (sockaddr_len != sizeof(struct sockaddr_caif)) { | ||
856 | pr_debug("CAIF: %s(): Bad address len (%ld,%lu)\n", | ||
857 | __func__, (long) sockaddr_len, | ||
858 | (long unsigned) sizeof(struct sockaddr_caif)); | ||
859 | ret = -EINVAL; | ||
860 | goto open_error; | ||
861 | } | ||
862 | |||
863 | if (uservaddr->sa_family != AF_CAIF) { | ||
864 | pr_debug("CAIF: %s(): Bad address family (%d)\n", | ||
865 | __func__, uservaddr->sa_family); | ||
866 | ret = -EAFNOSUPPORT; | ||
867 | goto open_error; | ||
868 | } | ||
869 | |||
870 | memcpy(&cf_sk->conn_req.sockaddr, uservaddr, | ||
871 | sizeof(struct sockaddr_caif)); | ||
872 | |||
873 | dbfs_atomic_inc(&cnt.num_open); | ||
874 | mode = SKT_READ_FLAG | SKT_WRITE_FLAG; | ||
875 | |||
876 | /* If socket is not open, make sure socket is in fully closed state */ | ||
877 | if (!STATE_IS_OPEN(cf_sk)) { | ||
878 | /* Has link close response been received (if we ever sent it)?*/ | ||
879 | if (STATE_IS_PENDING(cf_sk)) { | ||
880 | /* | ||
881 | * Still waiting for close response from remote. | ||
882 | * If opened non-blocking, report "would block" | ||
883 | */ | ||
884 | if (flags & O_NONBLOCK) { | ||
885 | pr_debug("CAIF: %s(): O_NONBLOCK" | ||
886 | " && close pending\n", __func__); | ||
887 | ret = -EAGAIN; | ||
888 | goto open_error; | ||
889 | } | ||
890 | |||
891 | pr_debug("CAIF: %s(): Wait for close response" | ||
892 | " from remote...\n", __func__); | ||
893 | |||
894 | release_sock(&cf_sk->sk); | ||
895 | |||
896 | /* | ||
897 | * Blocking mode; close is pending and we need to wait | ||
898 | * for its conclusion. | ||
899 | */ | ||
900 | result = | ||
901 | wait_event_interruptible(*cf_sk->sk.sk_sleep, | ||
902 | !STATE_IS_PENDING(cf_sk)); | ||
903 | |||
904 | lock_sock(&(cf_sk->sk)); | ||
905 | if (result == -ERESTARTSYS) { | ||
906 | pr_debug("CAIF: %s(): wait_event_interruptible" | ||
907 | "woken by a signal (1)", __func__); | ||
908 | ret = -ERESTARTSYS; | ||
909 | goto open_error; | ||
910 | } | ||
911 | } | ||
912 | } | ||
913 | |||
914 | /* socket is now either closed, pending open or open */ | ||
915 | if (STATE_IS_OPEN(cf_sk) && !STATE_IS_PENDING(cf_sk)) { | ||
916 | /* Open */ | ||
917 | pr_debug("CAIF: %s(): Socket is already opened (cf_sk=%p)" | ||
918 | " check access f_flags = 0x%x file_mode = 0x%x\n", | ||
919 | __func__, cf_sk, mode, cf_sk->file_mode); | ||
920 | |||
921 | } else { | ||
922 | /* We are closed or pending open. | ||
923 | * If closed: send link setup | ||
924 | * If pending open: link setup already sent (we could have been | ||
925 | * interrupted by a signal last time) | ||
926 | */ | ||
927 | if (!STATE_IS_OPEN(cf_sk)) { | ||
928 | /* First opening of file; connect lower layers: */ | ||
929 | /* Drain queue (very unlikely) */ | ||
930 | drain_queue(cf_sk); | ||
931 | |||
932 | cf_sk->layer.receive = caif_sktrecv_cb; | ||
933 | SET_STATE_OPEN(cf_sk); | ||
934 | SET_PENDING_ON(cf_sk); | ||
935 | |||
936 | /* Register this channel. */ | ||
937 | result = | ||
938 | caif_connect_client(&cf_sk->conn_req, | ||
939 | &cf_sk->layer); | ||
940 | if (result < 0) { | ||
941 | pr_debug("CAIF: %s(): can't register channel\n", | ||
942 | __func__); | ||
943 | ret = -EIO; | ||
944 | SET_STATE_CLOSED(cf_sk); | ||
945 | SET_PENDING_OFF(cf_sk); | ||
946 | goto open_error; | ||
947 | } | ||
948 | dbfs_atomic_inc(&cnt.num_init); | ||
949 | } | ||
950 | |||
951 | /* If opened non-blocking, report "success". | ||
952 | */ | ||
953 | if (flags & O_NONBLOCK) { | ||
954 | pr_debug("CAIF: %s(): O_NONBLOCK success\n", | ||
955 | __func__); | ||
956 | ret = -EINPROGRESS; | ||
957 | cf_sk->sk.sk_err = -EINPROGRESS; | ||
958 | goto open_error; | ||
959 | } | ||
960 | |||
961 | trace_printk("CAIF: %s(): Wait for connect response\n", | ||
962 | __func__); | ||
963 | |||
964 | /* release lock before waiting */ | ||
965 | release_sock(&cf_sk->sk); | ||
966 | |||
967 | result = | ||
968 | wait_event_interruptible(*cf_sk->sk.sk_sleep, | ||
969 | !STATE_IS_PENDING(cf_sk)); | ||
970 | |||
971 | lock_sock(&(cf_sk->sk)); | ||
972 | |||
973 | if (result == -ERESTARTSYS) { | ||
974 | pr_debug("CAIF: %s(): wait_event_interruptible" | ||
975 | "woken by a signal (2)", __func__); | ||
976 | ret = -ERESTARTSYS; | ||
977 | goto open_error; | ||
978 | } | ||
979 | |||
980 | if (!STATE_IS_OPEN(cf_sk)) { | ||
981 | /* Lower layers said "no" */ | ||
982 | pr_debug("CAIF: %s(): Closed received\n", __func__); | ||
983 | ret = -EPIPE; | ||
984 | goto open_error; | ||
985 | } | ||
986 | |||
987 | trace_printk("CAIF: %s(): Connect received\n", __func__); | ||
988 | } | ||
989 | /* Open is ok */ | ||
990 | cf_sk->file_mode |= mode; | ||
991 | |||
992 | trace_printk("CAIF: %s(): Connected - file mode = %x\n", | ||
993 | __func__, cf_sk->file_mode); | ||
994 | |||
995 | release_sock(&cf_sk->sk); | ||
996 | return 0; | ||
997 | open_error: | ||
998 | sock->state = SS_UNCONNECTED; | ||
999 | release_sock(&cf_sk->sk); | ||
1000 | out: | ||
1001 | return ret; | ||
1002 | } | ||
1003 | |||
1004 | static int caif_shutdown(struct socket *sock, int how) | ||
1005 | { | ||
1006 | struct caifsock *cf_sk = NULL; | ||
1007 | int result = 0; | ||
1008 | int tx_flow_state_was_on; | ||
1009 | struct sock *sk = sock->sk; | ||
1010 | |||
1011 | trace_printk("CAIF: %s(): enter\n", __func__); | ||
1012 | pr_debug("f_flags=%x\n", sock->file->f_flags); | ||
1013 | |||
1014 | if (how != SHUT_RDWR) | ||
1015 | return -EOPNOTSUPP; | ||
1016 | |||
1017 | cf_sk = container_of(sk, struct caifsock, sk); | ||
1018 | if (cf_sk == NULL) { | ||
1019 | pr_debug("CAIF: %s(): COULD NOT FIND SOCKET\n", __func__); | ||
1020 | return -EBADF; | ||
1021 | } | ||
1022 | |||
1023 | /* I want to be alone on cf_sk (except status queue) */ | ||
1024 | lock_sock(&(cf_sk->sk)); | ||
1025 | sock_hold(&cf_sk->sk); | ||
1026 | |||
1027 | /* IS_CLOSED have double meaning: | ||
1028 | * 1) Spontanous Remote Shutdown Request. | ||
1029 | * 2) Ack on a channel teardown(disconnect) | ||
1030 | * Must clear bit in case we previously received | ||
1031 | * remote shudown request. | ||
1032 | */ | ||
1033 | if (STATE_IS_OPEN(cf_sk) && !STATE_IS_PENDING(cf_sk)) { | ||
1034 | SET_STATE_CLOSED(cf_sk); | ||
1035 | SET_PENDING_ON(cf_sk); | ||
1036 | tx_flow_state_was_on = TX_FLOW_IS_ON(cf_sk); | ||
1037 | SET_TX_FLOW_OFF(cf_sk); | ||
1038 | |||
1039 | /* Hold the socket until DEINIT_RSP is received */ | ||
1040 | sock_hold(&cf_sk->sk); | ||
1041 | result = caif_disconnect_client(&cf_sk->layer); | ||
1042 | |||
1043 | if (result < 0) { | ||
1044 | pr_debug("CAIF: %s(): " | ||
1045 | "caif_disconnect_client() failed\n", | ||
1046 | __func__); | ||
1047 | SET_STATE_CLOSED(cf_sk); | ||
1048 | SET_PENDING_OFF(cf_sk); | ||
1049 | SET_TX_FLOW_OFF(cf_sk); | ||
1050 | release_sock(&cf_sk->sk); | ||
1051 | sock_put(&cf_sk->sk); | ||
1052 | return -EIO; | ||
1053 | } | ||
1054 | |||
1055 | } | ||
1056 | if (STATE_IS_REMOTE_SHUTDOWN(cf_sk)) { | ||
1057 | SET_PENDING_OFF(cf_sk); | ||
1058 | SET_REMOTE_SHUTDOWN_OFF(cf_sk); | ||
1059 | } | ||
1060 | |||
1061 | /* | ||
1062 | * Socket is no longer in state pending close, | ||
1063 | * and we can release the reference. | ||
1064 | */ | ||
1065 | |||
1066 | dbfs_atomic_inc(&cnt.num_close); | ||
1067 | drain_queue(cf_sk); | ||
1068 | SET_RX_FLOW_ON(cf_sk); | ||
1069 | cf_sk->file_mode = 0; | ||
1070 | sock_put(&cf_sk->sk); | ||
1071 | release_sock(&cf_sk->sk); | ||
1072 | if (!result && (sock->file->f_flags & O_NONBLOCK)) { | ||
1073 | pr_debug("nonblocking shutdown returing -EAGAIN\n"); | ||
1074 | return -EAGAIN; | ||
1075 | } else | ||
1076 | return result; | ||
1077 | } | ||
1078 | |||
1079 | static ssize_t caif_sock_no_sendpage(struct socket *sock, | ||
1080 | struct page *page, | ||
1081 | int offset, size_t size, int flags) | ||
1082 | { | ||
1083 | return -EOPNOTSUPP; | ||
1084 | } | ||
1085 | |||
1086 | /* This function is called as part of close. */ | ||
1087 | static int caif_release(struct socket *sock) | ||
1088 | { | ||
1089 | struct sock *sk = sock->sk; | ||
1090 | struct caifsock *cf_sk = NULL; | ||
1091 | int res; | ||
1092 | caif_assert(sk != NULL); | ||
1093 | cf_sk = container_of(sk, struct caifsock, sk); | ||
1094 | |||
1095 | if (cf_sk->debugfs_socket_dir != NULL) | ||
1096 | debugfs_remove_recursive(cf_sk->debugfs_socket_dir); | ||
1097 | |||
1098 | res = caif_shutdown(sock, SHUT_RDWR); | ||
1099 | if (res && res != -EINPROGRESS) | ||
1100 | return res; | ||
1101 | |||
1102 | /* | ||
1103 | * FIXME: Shutdown should probably be possible to do async | ||
1104 | * without flushing queues, allowing reception of frames while | ||
1105 | * waiting for DEINIT_IND. | ||
1106 | * Release should always block, to allow secure decoupling of | ||
1107 | * CAIF stack. | ||
1108 | */ | ||
1109 | if (!(sock->file->f_flags & O_NONBLOCK)) { | ||
1110 | res = wait_event_interruptible(*cf_sk->sk.sk_sleep, | ||
1111 | !STATE_IS_PENDING(cf_sk)); | ||
1112 | |||
1113 | if (res == -ERESTARTSYS) { | ||
1114 | pr_debug("CAIF: %s(): wait_event_interruptible" | ||
1115 | "woken by a signal (1)", __func__); | ||
1116 | } | ||
1117 | } | ||
1118 | lock_sock(&(cf_sk->sk)); | ||
1119 | |||
1120 | sock->sk = NULL; | ||
1121 | |||
1122 | /* Detach the socket from its process context by making it orphan. */ | ||
1123 | sock_orphan(sk); | ||
1124 | |||
1125 | /* | ||
1126 | * Setting SHUTDOWN_MASK means that both send and receive are shutdown | ||
1127 | * for the socket. | ||
1128 | */ | ||
1129 | sk->sk_shutdown = SHUTDOWN_MASK; | ||
1130 | |||
1131 | /* | ||
1132 | * Set the socket state to closed, the TCP_CLOSE macro is used when | ||
1133 | * closing any socket. | ||
1134 | */ | ||
1135 | |||
1136 | /* Flush out this sockets receive queue. */ | ||
1137 | drain_queue(cf_sk); | ||
1138 | |||
1139 | /* Finally release the socket. */ | ||
1140 | SET_STATE_PENDING_DESTROY(cf_sk); | ||
1141 | |||
1142 | release_sock(&cf_sk->sk); | ||
1143 | |||
1144 | sock_put(sk); | ||
1145 | |||
1146 | /* | ||
1147 | * The rest of the cleanup will be handled from the | ||
1148 | * caif_sock_destructor | ||
1149 | */ | ||
1150 | return res; | ||
1151 | } | ||
1152 | |||
1153 | static const struct proto_ops caif_ops = { | ||
1154 | .family = PF_CAIF, | ||
1155 | .owner = THIS_MODULE, | ||
1156 | .release = caif_release, | ||
1157 | .bind = sock_no_bind, | ||
1158 | .connect = caif_connect, | ||
1159 | .socketpair = sock_no_socketpair, | ||
1160 | .accept = sock_no_accept, | ||
1161 | .getname = sock_no_getname, | ||
1162 | .poll = caif_poll, | ||
1163 | .ioctl = sock_no_ioctl, | ||
1164 | .listen = sock_no_listen, | ||
1165 | .shutdown = caif_shutdown, | ||
1166 | .setsockopt = setsockopt, | ||
1167 | .getsockopt = sock_no_getsockopt, | ||
1168 | .sendmsg = caif_sendmsg, | ||
1169 | .recvmsg = caif_recvmsg, | ||
1170 | .mmap = sock_no_mmap, | ||
1171 | .sendpage = caif_sock_no_sendpage, | ||
1172 | }; | ||
1173 | |||
1174 | /* This function is called when a socket is finally destroyed. */ | ||
1175 | static void caif_sock_destructor(struct sock *sk) | ||
1176 | { | ||
1177 | struct caifsock *cf_sk = NULL; | ||
1178 | cf_sk = container_of(sk, struct caifsock, sk); | ||
1179 | /* Error checks. */ | ||
1180 | caif_assert(!atomic_read(&sk->sk_wmem_alloc)); | ||
1181 | caif_assert(sk_unhashed(sk)); | ||
1182 | caif_assert(!sk->sk_socket); | ||
1183 | if (!sock_flag(sk, SOCK_DEAD)) { | ||
1184 | pr_debug("CAIF: %s(): 0x%p", __func__, sk); | ||
1185 | return; | ||
1186 | } | ||
1187 | |||
1188 | if (STATE_IS_OPEN(cf_sk)) { | ||
1189 | pr_debug("CAIF: %s(): socket is opened (cf_sk=%p)" | ||
1190 | " file_mode = 0x%x\n", __func__, | ||
1191 | cf_sk, cf_sk->file_mode); | ||
1192 | return; | ||
1193 | } | ||
1194 | drain_queue(cf_sk); | ||
1195 | kfree(cf_sk->pktq); | ||
1196 | |||
1197 | trace_printk("CAIF: %s(): caif_sock_destructor: Removing socket %s\n", | ||
1198 | __func__, cf_sk->name); | ||
1199 | atomic_dec(&caif_nr_socks); | ||
1200 | } | ||
1201 | |||
1202 | static int caif_create(struct net *net, struct socket *sock, int protocol, | ||
1203 | int kern) | ||
1204 | { | ||
1205 | struct sock *sk = NULL; | ||
1206 | struct caifsock *cf_sk = NULL; | ||
1207 | int result = 0; | ||
1208 | static struct proto prot = {.name = "PF_CAIF", | ||
1209 | .owner = THIS_MODULE, | ||
1210 | .obj_size = sizeof(struct caifsock), | ||
1211 | }; | ||
1212 | |||
1213 | /* | ||
1214 | * The sock->type specifies the socket type to use. | ||
1215 | * in SEQPACKET mode packet boundaries are enforced. | ||
1216 | */ | ||
1217 | if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM) | ||
1218 | return -ESOCKTNOSUPPORT; | ||
1219 | |||
1220 | if (net != &init_net) | ||
1221 | return -EAFNOSUPPORT; | ||
1222 | |||
1223 | if (protocol < 0 || protocol >= CAIFPROTO_MAX) | ||
1224 | return -EPROTONOSUPPORT; | ||
1225 | /* | ||
1226 | * Set the socket state to unconnected. The socket state is really | ||
1227 | * not used at all in the net/core or socket.c but the | ||
1228 | * initialization makes sure that sock->state is not uninitialized. | ||
1229 | */ | ||
1230 | sock->state = SS_UNCONNECTED; | ||
1231 | |||
1232 | sk = sk_alloc(net, PF_CAIF, GFP_KERNEL, &prot); | ||
1233 | if (!sk) | ||
1234 | return -ENOMEM; | ||
1235 | |||
1236 | cf_sk = container_of(sk, struct caifsock, sk); | ||
1237 | |||
1238 | /* Store the protocol */ | ||
1239 | sk->sk_protocol = (unsigned char) protocol; | ||
1240 | |||
1241 | spin_lock_init(&cf_sk->read_queue_len_lock); | ||
1242 | |||
1243 | /* Fill in some information concerning the misc socket. */ | ||
1244 | snprintf(cf_sk->name, sizeof(cf_sk->name), "cf_sk%d", | ||
1245 | atomic_read(&caif_nr_socks)); | ||
1246 | |||
1247 | /* | ||
1248 | * Lock in order to try to stop someone from opening the socket | ||
1249 | * too early. | ||
1250 | */ | ||
1251 | lock_sock(&(cf_sk->sk)); | ||
1252 | |||
1253 | /* Initialize the nozero default sock structure data. */ | ||
1254 | sock_init_data(sock, sk); | ||
1255 | sock->ops = &caif_ops; | ||
1256 | sk->sk_destruct = caif_sock_destructor; | ||
1257 | sk->sk_sndbuf = caif_sockbuf_size; | ||
1258 | sk->sk_rcvbuf = caif_sockbuf_size; | ||
1259 | |||
1260 | cf_sk->pktq = cfpktq_create(); | ||
1261 | |||
1262 | if (!cf_sk->pktq) { | ||
1263 | pr_err("CAIF: %s(): queue create failed.\n", __func__); | ||
1264 | result = -ENOMEM; | ||
1265 | release_sock(&cf_sk->sk); | ||
1266 | goto err_failed; | ||
1267 | } | ||
1268 | cf_sk->layer.ctrlcmd = caif_sktflowctrl_cb; | ||
1269 | SET_STATE_CLOSED(cf_sk); | ||
1270 | SET_PENDING_OFF(cf_sk); | ||
1271 | SET_TX_FLOW_OFF(cf_sk); | ||
1272 | SET_RX_FLOW_ON(cf_sk); | ||
1273 | |||
1274 | /* Set default options on configuration */ | ||
1275 | cf_sk->conn_req.priority = CAIF_PRIO_NORMAL; | ||
1276 | cf_sk->conn_req.link_selector = CAIF_LINK_HIGH_BANDW; | ||
1277 | cf_sk->conn_req.protocol = protocol; | ||
1278 | /* Increase the number of sockets created. */ | ||
1279 | atomic_inc(&caif_nr_socks); | ||
1280 | if (!IS_ERR(debugfsdir)) { | ||
1281 | cf_sk->debugfs_socket_dir = | ||
1282 | debugfs_create_dir(cf_sk->name, debugfsdir); | ||
1283 | debugfs_create_u32("conn_state", S_IRUSR | S_IWUSR, | ||
1284 | cf_sk->debugfs_socket_dir, &cf_sk->conn_state); | ||
1285 | debugfs_create_u32("flow_state", S_IRUSR | S_IWUSR, | ||
1286 | cf_sk->debugfs_socket_dir, &cf_sk->flow_state); | ||
1287 | debugfs_create_u32("read_queue_len", S_IRUSR | S_IWUSR, | ||
1288 | cf_sk->debugfs_socket_dir, | ||
1289 | (u32 *) &cf_sk->read_queue_len); | ||
1290 | debugfs_create_u32("identity", S_IRUSR | S_IWUSR, | ||
1291 | cf_sk->debugfs_socket_dir, | ||
1292 | (u32 *) &cf_sk->layer.id); | ||
1293 | } | ||
1294 | release_sock(&cf_sk->sk); | ||
1295 | return 0; | ||
1296 | err_failed: | ||
1297 | sk_free(sk); | ||
1298 | return result; | ||
1299 | } | ||
1300 | |||
1301 | static struct net_proto_family caif_family_ops = { | ||
1302 | .family = PF_CAIF, | ||
1303 | .create = caif_create, | ||
1304 | .owner = THIS_MODULE, | ||
1305 | }; | ||
1306 | |||
1307 | static int af_caif_init(void) | ||
1308 | { | ||
1309 | int err; | ||
1310 | err = sock_register(&caif_family_ops); | ||
1311 | |||
1312 | if (!err) | ||
1313 | return err; | ||
1314 | |||
1315 | return 0; | ||
1316 | } | ||
1317 | |||
1318 | static int __init caif_sktinit_module(void) | ||
1319 | { | ||
1320 | int stat; | ||
1321 | #ifdef CONFIG_DEBUG_FS | ||
1322 | debugfsdir = debugfs_create_dir("chnl_skt", NULL); | ||
1323 | if (!IS_ERR(debugfsdir)) { | ||
1324 | debugfs_create_u32("skb_inuse", S_IRUSR | S_IWUSR, | ||
1325 | debugfsdir, | ||
1326 | (u32 *) &cnt.skb_in_use); | ||
1327 | debugfs_create_u32("skb_alloc", S_IRUSR | S_IWUSR, | ||
1328 | debugfsdir, | ||
1329 | (u32 *) &cnt.skb_alloc); | ||
1330 | debugfs_create_u32("skb_free", S_IRUSR | S_IWUSR, | ||
1331 | debugfsdir, | ||
1332 | (u32 *) &cnt.skb_free); | ||
1333 | debugfs_create_u32("num_sockets", S_IRUSR | S_IWUSR, | ||
1334 | debugfsdir, | ||
1335 | (u32 *) &caif_nr_socks); | ||
1336 | debugfs_create_u32("num_open", S_IRUSR | S_IWUSR, | ||
1337 | debugfsdir, | ||
1338 | (u32 *) &cnt.num_open); | ||
1339 | debugfs_create_u32("num_close", S_IRUSR | S_IWUSR, | ||
1340 | debugfsdir, | ||
1341 | (u32 *) &cnt.num_close); | ||
1342 | debugfs_create_u32("num_init", S_IRUSR | S_IWUSR, | ||
1343 | debugfsdir, | ||
1344 | (u32 *) &cnt.num_init); | ||
1345 | debugfs_create_u32("num_init_resp", S_IRUSR | S_IWUSR, | ||
1346 | debugfsdir, | ||
1347 | (u32 *) &cnt.num_init_resp); | ||
1348 | debugfs_create_u32("num_init_fail_resp", S_IRUSR | S_IWUSR, | ||
1349 | debugfsdir, | ||
1350 | (u32 *) &cnt.num_init_fail_resp); | ||
1351 | debugfs_create_u32("num_deinit", S_IRUSR | S_IWUSR, | ||
1352 | debugfsdir, | ||
1353 | (u32 *) &cnt.num_deinit); | ||
1354 | debugfs_create_u32("num_deinit_resp", S_IRUSR | S_IWUSR, | ||
1355 | debugfsdir, | ||
1356 | (u32 *) &cnt.num_deinit_resp); | ||
1357 | debugfs_create_u32("num_remote_shutdown_ind", | ||
1358 | S_IRUSR | S_IWUSR, debugfsdir, | ||
1359 | (u32 *) &cnt.num_remote_shutdown_ind); | ||
1360 | debugfs_create_u32("num_tx_flow_off_ind", S_IRUSR | S_IWUSR, | ||
1361 | debugfsdir, | ||
1362 | (u32 *) &cnt.num_tx_flow_off_ind); | ||
1363 | debugfs_create_u32("num_tx_flow_on_ind", S_IRUSR | S_IWUSR, | ||
1364 | debugfsdir, | ||
1365 | (u32 *) &cnt.num_tx_flow_on_ind); | ||
1366 | debugfs_create_u32("num_rx_flow_off", S_IRUSR | S_IWUSR, | ||
1367 | debugfsdir, | ||
1368 | (u32 *) &cnt.num_rx_flow_off); | ||
1369 | debugfs_create_u32("num_rx_flow_on", S_IRUSR | S_IWUSR, | ||
1370 | debugfsdir, | ||
1371 | (u32 *) &cnt.num_rx_flow_on); | ||
1372 | } | ||
1373 | #endif | ||
1374 | stat = af_caif_init(); | ||
1375 | if (stat) { | ||
1376 | pr_err("CAIF: %s(): Failed to initialize CAIF socket layer.", | ||
1377 | __func__); | ||
1378 | return stat; | ||
1379 | } | ||
1380 | return 0; | ||
1381 | } | ||
1382 | |||
1383 | static void __exit caif_sktexit_module(void) | ||
1384 | { | ||
1385 | sock_unregister(PF_CAIF); | ||
1386 | if (debugfsdir != NULL) | ||
1387 | debugfs_remove_recursive(debugfsdir); | ||
1388 | } | ||
1389 | |||
1390 | module_init(caif_sktinit_module); | ||
1391 | module_exit(caif_sktexit_module); | ||
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c new file mode 100644 index 000000000000..c873e3d4387c --- /dev/null +++ b/net/caif/cfcnfg.c | |||
@@ -0,0 +1,530 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson AB 2010 | ||
3 | * Author: Sjur Brendeland/sjur.brandeland@stericsson.com | ||
4 | * License terms: GNU General Public License (GPL) version 2 | ||
5 | */ | ||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/stddef.h> | ||
8 | #include <linux/slab.h> | ||
9 | #include <net/caif/caif_layer.h> | ||
10 | #include <net/caif/cfpkt.h> | ||
11 | #include <net/caif/cfcnfg.h> | ||
12 | #include <net/caif/cfctrl.h> | ||
13 | #include <net/caif/cfmuxl.h> | ||
14 | #include <net/caif/cffrml.h> | ||
15 | #include <net/caif/cfserl.h> | ||
16 | #include <net/caif/cfsrvl.h> | ||
17 | |||
18 | #include <linux/module.h> | ||
19 | #include <asm/atomic.h> | ||
20 | |||
21 | #define MAX_PHY_LAYERS 7 | ||
22 | #define PHY_NAME_LEN 20 | ||
23 | |||
24 | #define container_obj(layr) container_of(layr, struct cfcnfg, layer) | ||
25 | |||
26 | /* Information about CAIF physical interfaces held by Config Module in order | ||
27 | * to manage physical interfaces | ||
28 | */ | ||
29 | struct cfcnfg_phyinfo { | ||
30 | /* Pointer to the layer below the MUX (framing layer) */ | ||
31 | struct cflayer *frm_layer; | ||
32 | /* Pointer to the lowest actual physical layer */ | ||
33 | struct cflayer *phy_layer; | ||
34 | /* Unique identifier of the physical interface */ | ||
35 | unsigned int id; | ||
36 | /* Preference of the physical in interface */ | ||
37 | enum cfcnfg_phy_preference pref; | ||
38 | |||
39 | /* Reference count, number of channels using the device */ | ||
40 | int phy_ref_count; | ||
41 | |||
42 | /* Information about the physical device */ | ||
43 | struct dev_info dev_info; | ||
44 | }; | ||
45 | |||
46 | struct cfcnfg { | ||
47 | struct cflayer layer; | ||
48 | struct cflayer *ctrl; | ||
49 | struct cflayer *mux; | ||
50 | u8 last_phyid; | ||
51 | struct cfcnfg_phyinfo phy_layers[MAX_PHY_LAYERS]; | ||
52 | }; | ||
53 | |||
54 | static void cncfg_linkup_rsp(struct cflayer *layer, u8 linkid, | ||
55 | enum cfctrl_srv serv, u8 phyid, | ||
56 | struct cflayer *adapt_layer); | ||
57 | static void cncfg_linkdestroy_rsp(struct cflayer *layer, u8 linkid, | ||
58 | struct cflayer *client_layer); | ||
59 | static void cncfg_reject_rsp(struct cflayer *layer, u8 linkid, | ||
60 | struct cflayer *adapt_layer); | ||
61 | static void cfctrl_resp_func(void); | ||
62 | static void cfctrl_enum_resp(void); | ||
63 | |||
64 | struct cfcnfg *cfcnfg_create(void) | ||
65 | { | ||
66 | struct cfcnfg *this; | ||
67 | struct cfctrl_rsp *resp; | ||
68 | /* Initiate this layer */ | ||
69 | this = kmalloc(sizeof(struct cfcnfg), GFP_ATOMIC); | ||
70 | if (!this) { | ||
71 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
72 | return NULL; | ||
73 | } | ||
74 | memset(this, 0, sizeof(struct cfcnfg)); | ||
75 | this->mux = cfmuxl_create(); | ||
76 | if (!this->mux) | ||
77 | goto out_of_mem; | ||
78 | this->ctrl = cfctrl_create(); | ||
79 | if (!this->ctrl) | ||
80 | goto out_of_mem; | ||
81 | /* Initiate response functions */ | ||
82 | resp = cfctrl_get_respfuncs(this->ctrl); | ||
83 | resp->enum_rsp = cfctrl_enum_resp; | ||
84 | resp->linkerror_ind = cfctrl_resp_func; | ||
85 | resp->linkdestroy_rsp = cncfg_linkdestroy_rsp; | ||
86 | resp->sleep_rsp = cfctrl_resp_func; | ||
87 | resp->wake_rsp = cfctrl_resp_func; | ||
88 | resp->restart_rsp = cfctrl_resp_func; | ||
89 | resp->radioset_rsp = cfctrl_resp_func; | ||
90 | resp->linksetup_rsp = cncfg_linkup_rsp; | ||
91 | resp->reject_rsp = cncfg_reject_rsp; | ||
92 | |||
93 | this->last_phyid = 1; | ||
94 | |||
95 | cfmuxl_set_uplayer(this->mux, this->ctrl, 0); | ||
96 | layer_set_dn(this->ctrl, this->mux); | ||
97 | layer_set_up(this->ctrl, this); | ||
98 | return this; | ||
99 | out_of_mem: | ||
100 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
101 | kfree(this->mux); | ||
102 | kfree(this->ctrl); | ||
103 | kfree(this); | ||
104 | return NULL; | ||
105 | } | ||
106 | EXPORT_SYMBOL(cfcnfg_create); | ||
107 | |||
108 | void cfcnfg_remove(struct cfcnfg *cfg) | ||
109 | { | ||
110 | if (cfg) { | ||
111 | kfree(cfg->mux); | ||
112 | kfree(cfg->ctrl); | ||
113 | kfree(cfg); | ||
114 | } | ||
115 | } | ||
116 | |||
117 | static void cfctrl_resp_func(void) | ||
118 | { | ||
119 | } | ||
120 | |||
121 | static void cfctrl_enum_resp(void) | ||
122 | { | ||
123 | } | ||
124 | |||
125 | struct dev_info *cfcnfg_get_phyid(struct cfcnfg *cnfg, | ||
126 | enum cfcnfg_phy_preference phy_pref) | ||
127 | { | ||
128 | u16 i; | ||
129 | |||
130 | /* Try to match with specified preference */ | ||
131 | for (i = 1; i < MAX_PHY_LAYERS; i++) { | ||
132 | if (cnfg->phy_layers[i].id == i && | ||
133 | cnfg->phy_layers[i].pref == phy_pref && | ||
134 | cnfg->phy_layers[i].frm_layer != NULL) { | ||
135 | caif_assert(cnfg->phy_layers != NULL); | ||
136 | caif_assert(cnfg->phy_layers[i].id == i); | ||
137 | return &cnfg->phy_layers[i].dev_info; | ||
138 | } | ||
139 | } | ||
140 | /* Otherwise just return something */ | ||
141 | for (i = 1; i < MAX_PHY_LAYERS; i++) { | ||
142 | if (cnfg->phy_layers[i].id == i) { | ||
143 | caif_assert(cnfg->phy_layers != NULL); | ||
144 | caif_assert(cnfg->phy_layers[i].id == i); | ||
145 | return &cnfg->phy_layers[i].dev_info; | ||
146 | } | ||
147 | } | ||
148 | |||
149 | return NULL; | ||
150 | } | ||
151 | |||
152 | static struct cfcnfg_phyinfo *cfcnfg_get_phyinfo(struct cfcnfg *cnfg, | ||
153 | u8 phyid) | ||
154 | { | ||
155 | int i; | ||
156 | /* Try to match with specified preference */ | ||
157 | for (i = 0; i < MAX_PHY_LAYERS; i++) | ||
158 | if (cnfg->phy_layers[i].frm_layer != NULL && | ||
159 | cnfg->phy_layers[i].id == phyid) | ||
160 | return &cnfg->phy_layers[i]; | ||
161 | return NULL; | ||
162 | } | ||
163 | |||
164 | int cfcnfg_get_named(struct cfcnfg *cnfg, char *name) | ||
165 | { | ||
166 | int i; | ||
167 | |||
168 | /* Try to match with specified name */ | ||
169 | for (i = 0; i < MAX_PHY_LAYERS; i++) { | ||
170 | if (cnfg->phy_layers[i].frm_layer != NULL | ||
171 | && strcmp(cnfg->phy_layers[i].phy_layer->name, | ||
172 | name) == 0) | ||
173 | return cnfg->phy_layers[i].frm_layer->id; | ||
174 | } | ||
175 | return 0; | ||
176 | } | ||
177 | |||
178 | /* | ||
179 | * NOTE: What happens on destroy failure: | ||
180 | * 1a) No response - Too early | ||
181 | * This will not happen because enumerate has already | ||
182 | * completed. | ||
183 | * 1b) No response - FATAL | ||
184 | * Not handled, but this should be a CAIF PROTOCOL ERROR | ||
185 | * Modem error, response is really expected - this | ||
186 | * case is not really handled. | ||
187 | * 2) O/E-bit indicate error | ||
188 | * Ignored - this link is destroyed anyway. | ||
189 | * 3) Not able to match on request | ||
190 | * Not handled, but this should be a CAIF PROTOCOL ERROR | ||
191 | * 4) Link-Error - (no response) | ||
192 | * Not handled, but this should be a CAIF PROTOCOL ERROR | ||
193 | */ | ||
194 | |||
195 | int cfcnfg_del_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer) | ||
196 | { | ||
197 | u8 channel_id = 0; | ||
198 | int ret = 0; | ||
199 | struct cfcnfg_phyinfo *phyinfo = NULL; | ||
200 | u8 phyid = 0; | ||
201 | |||
202 | caif_assert(adap_layer != NULL); | ||
203 | channel_id = adap_layer->id; | ||
204 | if (channel_id == 0) { | ||
205 | pr_err("CAIF: %s():adap_layer->id is 0\n", __func__); | ||
206 | ret = -ENOTCONN; | ||
207 | goto end; | ||
208 | } | ||
209 | |||
210 | if (adap_layer->dn == NULL) { | ||
211 | pr_err("CAIF: %s():adap_layer->dn is NULL\n", __func__); | ||
212 | ret = -ENODEV; | ||
213 | goto end; | ||
214 | } | ||
215 | |||
216 | if (adap_layer->dn != NULL) | ||
217 | phyid = cfsrvl_getphyid(adap_layer->dn); | ||
218 | |||
219 | phyinfo = cfcnfg_get_phyinfo(cnfg, phyid); | ||
220 | if (phyinfo == NULL) { | ||
221 | pr_warning("CAIF: %s(): No interface to send disconnect to\n", | ||
222 | __func__); | ||
223 | ret = -ENODEV; | ||
224 | goto end; | ||
225 | } | ||
226 | |||
227 | if (phyinfo->id != phyid | ||
228 | || phyinfo->phy_layer->id != phyid | ||
229 | || phyinfo->frm_layer->id != phyid) { | ||
230 | |||
231 | pr_err("CAIF: %s(): Inconsistency in phy registration\n", | ||
232 | __func__); | ||
233 | ret = -EINVAL; | ||
234 | goto end; | ||
235 | } | ||
236 | |||
237 | ret = cfctrl_linkdown_req(cnfg->ctrl, channel_id, adap_layer); | ||
238 | |||
239 | end: | ||
240 | if (phyinfo != NULL && --phyinfo->phy_ref_count == 0 && | ||
241 | phyinfo->phy_layer != NULL && | ||
242 | phyinfo->phy_layer->modemcmd != NULL) { | ||
243 | phyinfo->phy_layer->modemcmd(phyinfo->phy_layer, | ||
244 | _CAIF_MODEMCMD_PHYIF_USELESS); | ||
245 | } | ||
246 | return ret; | ||
247 | |||
248 | } | ||
249 | EXPORT_SYMBOL(cfcnfg_del_adapt_layer); | ||
250 | |||
251 | static void cncfg_linkdestroy_rsp(struct cflayer *layer, u8 linkid, | ||
252 | struct cflayer *client_layer) | ||
253 | { | ||
254 | struct cfcnfg *cnfg = container_obj(layer); | ||
255 | struct cflayer *servl; | ||
256 | |||
257 | /* | ||
258 | * 1) Remove service from the MUX layer. The MUX must | ||
259 | * guarante that no more payload sent "upwards" (receive) | ||
260 | */ | ||
261 | servl = cfmuxl_remove_uplayer(cnfg->mux, linkid); | ||
262 | |||
263 | if (servl == NULL) { | ||
264 | pr_err("CAIF: %s(): PROTOCOL ERROR " | ||
265 | "- Error removing service_layer Linkid(%d)", | ||
266 | __func__, linkid); | ||
267 | return; | ||
268 | } | ||
269 | caif_assert(linkid == servl->id); | ||
270 | |||
271 | if (servl != client_layer && servl->up != client_layer) { | ||
272 | pr_err("CAIF: %s(): Error removing service_layer " | ||
273 | "Linkid(%d) %p %p", | ||
274 | __func__, linkid, (void *) servl, | ||
275 | (void *) client_layer); | ||
276 | return; | ||
277 | } | ||
278 | |||
279 | /* | ||
280 | * 2) DEINIT_RSP must guarantee that no more packets are transmitted | ||
281 | * from client (adap_layer) when it returns. | ||
282 | */ | ||
283 | |||
284 | if (servl->ctrlcmd == NULL) { | ||
285 | pr_err("CAIF: %s(): Error servl->ctrlcmd == NULL", __func__); | ||
286 | return; | ||
287 | } | ||
288 | |||
289 | servl->ctrlcmd(servl, CAIF_CTRLCMD_DEINIT_RSP, 0); | ||
290 | |||
291 | /* 3) It is now safe to destroy the service layer. */ | ||
292 | cfservl_destroy(servl); | ||
293 | } | ||
294 | |||
295 | /* | ||
296 | * NOTE: What happens on linksetup failure: | ||
297 | * 1a) No response - Too early | ||
298 | * This will not happen because enumerate is secured | ||
299 | * before using interface. | ||
300 | * 1b) No response - FATAL | ||
301 | * Not handled, but this should be a CAIF PROTOCOL ERROR | ||
302 | * Modem error, response is really expected - this case is | ||
303 | * not really handled. | ||
304 | * 2) O/E-bit indicate error | ||
305 | * Handled in cnfg_reject_rsp | ||
306 | * 3) Not able to match on request | ||
307 | * Not handled, but this should be a CAIF PROTOCOL ERROR | ||
308 | * 4) Link-Error - (no response) | ||
309 | * Not handled, but this should be a CAIF PROTOCOL ERROR | ||
310 | */ | ||
311 | |||
312 | int | ||
313 | cfcnfg_add_adaptation_layer(struct cfcnfg *cnfg, | ||
314 | struct cfctrl_link_param *param, | ||
315 | struct cflayer *adap_layer) | ||
316 | { | ||
317 | struct cflayer *frml; | ||
318 | if (adap_layer == NULL) { | ||
319 | pr_err("CAIF: %s(): adap_layer is zero", __func__); | ||
320 | return -EINVAL; | ||
321 | } | ||
322 | if (adap_layer->receive == NULL) { | ||
323 | pr_err("CAIF: %s(): adap_layer->receive is NULL", __func__); | ||
324 | return -EINVAL; | ||
325 | } | ||
326 | if (adap_layer->ctrlcmd == NULL) { | ||
327 | pr_err("CAIF: %s(): adap_layer->ctrlcmd == NULL", __func__); | ||
328 | return -EINVAL; | ||
329 | } | ||
330 | frml = cnfg->phy_layers[param->phyid].frm_layer; | ||
331 | if (frml == NULL) { | ||
332 | pr_err("CAIF: %s(): Specified PHY type does not exist!", | ||
333 | __func__); | ||
334 | return -ENODEV; | ||
335 | } | ||
336 | caif_assert(param->phyid == cnfg->phy_layers[param->phyid].id); | ||
337 | caif_assert(cnfg->phy_layers[param->phyid].frm_layer->id == | ||
338 | param->phyid); | ||
339 | caif_assert(cnfg->phy_layers[param->phyid].phy_layer->id == | ||
340 | param->phyid); | ||
341 | /* FIXME: ENUMERATE INITIALLY WHEN ACTIVATING PHYSICAL INTERFACE */ | ||
342 | cfctrl_enum_req(cnfg->ctrl, param->phyid); | ||
343 | cfctrl_linkup_request(cnfg->ctrl, param, adap_layer); | ||
344 | return 0; | ||
345 | } | ||
346 | EXPORT_SYMBOL(cfcnfg_add_adaptation_layer); | ||
347 | |||
348 | static void cncfg_reject_rsp(struct cflayer *layer, u8 linkid, | ||
349 | struct cflayer *adapt_layer) | ||
350 | { | ||
351 | if (adapt_layer != NULL && adapt_layer->ctrlcmd != NULL) | ||
352 | adapt_layer->ctrlcmd(adapt_layer, | ||
353 | CAIF_CTRLCMD_INIT_FAIL_RSP, 0); | ||
354 | } | ||
355 | |||
356 | static void | ||
357 | cncfg_linkup_rsp(struct cflayer *layer, u8 linkid, enum cfctrl_srv serv, | ||
358 | u8 phyid, struct cflayer *adapt_layer) | ||
359 | { | ||
360 | struct cfcnfg *cnfg = container_obj(layer); | ||
361 | struct cflayer *servicel = NULL; | ||
362 | struct cfcnfg_phyinfo *phyinfo; | ||
363 | if (adapt_layer == NULL) { | ||
364 | pr_err("CAIF: %s(): PROTOCOL ERROR " | ||
365 | "- LinkUp Request/Response did not match\n", __func__); | ||
366 | return; | ||
367 | } | ||
368 | |||
369 | caif_assert(cnfg != NULL); | ||
370 | caif_assert(phyid != 0); | ||
371 | phyinfo = &cnfg->phy_layers[phyid]; | ||
372 | caif_assert(phyinfo != NULL); | ||
373 | caif_assert(phyinfo->id == phyid); | ||
374 | caif_assert(phyinfo->phy_layer != NULL); | ||
375 | caif_assert(phyinfo->phy_layer->id == phyid); | ||
376 | |||
377 | if (phyinfo != NULL && | ||
378 | phyinfo->phy_ref_count++ == 0 && | ||
379 | phyinfo->phy_layer != NULL && | ||
380 | phyinfo->phy_layer->modemcmd != NULL) { | ||
381 | caif_assert(phyinfo->phy_layer->id == phyid); | ||
382 | phyinfo->phy_layer->modemcmd(phyinfo->phy_layer, | ||
383 | _CAIF_MODEMCMD_PHYIF_USEFULL); | ||
384 | |||
385 | } | ||
386 | adapt_layer->id = linkid; | ||
387 | |||
388 | switch (serv) { | ||
389 | case CFCTRL_SRV_VEI: | ||
390 | servicel = cfvei_create(linkid, &phyinfo->dev_info); | ||
391 | break; | ||
392 | case CFCTRL_SRV_DATAGRAM: | ||
393 | servicel = cfdgml_create(linkid, &phyinfo->dev_info); | ||
394 | break; | ||
395 | case CFCTRL_SRV_RFM: | ||
396 | servicel = cfrfml_create(linkid, &phyinfo->dev_info); | ||
397 | break; | ||
398 | case CFCTRL_SRV_UTIL: | ||
399 | servicel = cfutill_create(linkid, &phyinfo->dev_info); | ||
400 | break; | ||
401 | case CFCTRL_SRV_VIDEO: | ||
402 | servicel = cfvidl_create(linkid, &phyinfo->dev_info); | ||
403 | break; | ||
404 | case CFCTRL_SRV_DBG: | ||
405 | servicel = cfdbgl_create(linkid, &phyinfo->dev_info); | ||
406 | break; | ||
407 | default: | ||
408 | pr_err("CAIF: %s(): Protocol error. " | ||
409 | "Link setup response - unknown channel type\n", | ||
410 | __func__); | ||
411 | return; | ||
412 | } | ||
413 | if (!servicel) { | ||
414 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
415 | return; | ||
416 | } | ||
417 | layer_set_dn(servicel, cnfg->mux); | ||
418 | cfmuxl_set_uplayer(cnfg->mux, servicel, linkid); | ||
419 | layer_set_up(servicel, adapt_layer); | ||
420 | layer_set_dn(adapt_layer, servicel); | ||
421 | servicel->ctrlcmd(servicel, CAIF_CTRLCMD_INIT_RSP, 0); | ||
422 | } | ||
423 | |||
424 | void | ||
425 | cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type, | ||
426 | void *dev, struct cflayer *phy_layer, u16 *phyid, | ||
427 | enum cfcnfg_phy_preference pref, | ||
428 | bool fcs, bool stx) | ||
429 | { | ||
430 | struct cflayer *frml; | ||
431 | struct cflayer *phy_driver = NULL; | ||
432 | int i; | ||
433 | |||
434 | |||
435 | if (cnfg->phy_layers[cnfg->last_phyid].frm_layer == NULL) { | ||
436 | *phyid = cnfg->last_phyid; | ||
437 | |||
438 | /* range: * 1..(MAX_PHY_LAYERS-1) */ | ||
439 | cnfg->last_phyid = | ||
440 | (cnfg->last_phyid % (MAX_PHY_LAYERS - 1)) + 1; | ||
441 | } else { | ||
442 | *phyid = 0; | ||
443 | for (i = 1; i < MAX_PHY_LAYERS; i++) { | ||
444 | if (cnfg->phy_layers[i].frm_layer == NULL) { | ||
445 | *phyid = i; | ||
446 | break; | ||
447 | } | ||
448 | } | ||
449 | } | ||
450 | if (*phyid == 0) { | ||
451 | pr_err("CAIF: %s(): No Available PHY ID\n", __func__); | ||
452 | return; | ||
453 | } | ||
454 | |||
455 | switch (phy_type) { | ||
456 | case CFPHYTYPE_FRAG: | ||
457 | phy_driver = | ||
458 | cfserl_create(CFPHYTYPE_FRAG, *phyid, stx); | ||
459 | if (!phy_driver) { | ||
460 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
461 | return; | ||
462 | } | ||
463 | |||
464 | break; | ||
465 | case CFPHYTYPE_CAIF: | ||
466 | phy_driver = NULL; | ||
467 | break; | ||
468 | default: | ||
469 | pr_err("CAIF: %s(): %d", __func__, phy_type); | ||
470 | return; | ||
471 | break; | ||
472 | } | ||
473 | |||
474 | phy_layer->id = *phyid; | ||
475 | cnfg->phy_layers[*phyid].pref = pref; | ||
476 | cnfg->phy_layers[*phyid].id = *phyid; | ||
477 | cnfg->phy_layers[*phyid].dev_info.id = *phyid; | ||
478 | cnfg->phy_layers[*phyid].dev_info.dev = dev; | ||
479 | cnfg->phy_layers[*phyid].phy_layer = phy_layer; | ||
480 | cnfg->phy_layers[*phyid].phy_ref_count = 0; | ||
481 | phy_layer->type = phy_type; | ||
482 | frml = cffrml_create(*phyid, fcs); | ||
483 | if (!frml) { | ||
484 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
485 | return; | ||
486 | } | ||
487 | cnfg->phy_layers[*phyid].frm_layer = frml; | ||
488 | cfmuxl_set_dnlayer(cnfg->mux, frml, *phyid); | ||
489 | layer_set_up(frml, cnfg->mux); | ||
490 | |||
491 | if (phy_driver != NULL) { | ||
492 | phy_driver->id = *phyid; | ||
493 | layer_set_dn(frml, phy_driver); | ||
494 | layer_set_up(phy_driver, frml); | ||
495 | layer_set_dn(phy_driver, phy_layer); | ||
496 | layer_set_up(phy_layer, phy_driver); | ||
497 | } else { | ||
498 | layer_set_dn(frml, phy_layer); | ||
499 | layer_set_up(phy_layer, frml); | ||
500 | } | ||
501 | } | ||
502 | EXPORT_SYMBOL(cfcnfg_add_phy_layer); | ||
503 | |||
504 | int cfcnfg_del_phy_layer(struct cfcnfg *cnfg, struct cflayer *phy_layer) | ||
505 | { | ||
506 | struct cflayer *frml, *frml_dn; | ||
507 | u16 phyid; | ||
508 | phyid = phy_layer->id; | ||
509 | caif_assert(phyid == cnfg->phy_layers[phyid].id); | ||
510 | caif_assert(phy_layer == cnfg->phy_layers[phyid].phy_layer); | ||
511 | caif_assert(phy_layer->id == phyid); | ||
512 | caif_assert(cnfg->phy_layers[phyid].frm_layer->id == phyid); | ||
513 | |||
514 | memset(&cnfg->phy_layers[phy_layer->id], 0, | ||
515 | sizeof(struct cfcnfg_phyinfo)); | ||
516 | frml = cfmuxl_remove_dnlayer(cnfg->mux, phy_layer->id); | ||
517 | frml_dn = frml->dn; | ||
518 | cffrml_set_uplayer(frml, NULL); | ||
519 | cffrml_set_dnlayer(frml, NULL); | ||
520 | kfree(frml); | ||
521 | |||
522 | if (phy_layer != frml_dn) { | ||
523 | layer_set_up(frml_dn, NULL); | ||
524 | layer_set_dn(frml_dn, NULL); | ||
525 | kfree(frml_dn); | ||
526 | } | ||
527 | layer_set_up(phy_layer, NULL); | ||
528 | return 0; | ||
529 | } | ||
530 | EXPORT_SYMBOL(cfcnfg_del_phy_layer); | ||
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c new file mode 100644 index 000000000000..11f80140f3cb --- /dev/null +++ b/net/caif/cfctrl.c | |||
@@ -0,0 +1,664 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson AB 2010 | ||
3 | * Author: Sjur Brendeland/sjur.brandeland@stericsson.com | ||
4 | * License terms: GNU General Public License (GPL) version 2 | ||
5 | */ | ||
6 | |||
7 | #include <linux/stddef.h> | ||
8 | #include <linux/spinlock.h> | ||
9 | #include <linux/slab.h> | ||
10 | #include <net/caif/caif_layer.h> | ||
11 | #include <net/caif/cfpkt.h> | ||
12 | #include <net/caif/cfctrl.h> | ||
13 | |||
14 | #define container_obj(layr) container_of(layr, struct cfctrl, serv.layer) | ||
15 | #define UTILITY_NAME_LENGTH 16 | ||
16 | #define CFPKT_CTRL_PKT_LEN 20 | ||
17 | |||
18 | |||
19 | #ifdef CAIF_NO_LOOP | ||
20 | static int handle_loop(struct cfctrl *ctrl, | ||
21 | int cmd, struct cfpkt *pkt){ | ||
22 | return CAIF_FAILURE; | ||
23 | } | ||
24 | #else | ||
25 | static int handle_loop(struct cfctrl *ctrl, | ||
26 | int cmd, struct cfpkt *pkt); | ||
27 | #endif | ||
28 | static int cfctrl_recv(struct cflayer *layr, struct cfpkt *pkt); | ||
29 | static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, | ||
30 | int phyid); | ||
31 | |||
32 | |||
33 | struct cflayer *cfctrl_create(void) | ||
34 | { | ||
35 | struct cfctrl *this = | ||
36 | kmalloc(sizeof(struct cfctrl), GFP_ATOMIC); | ||
37 | if (!this) { | ||
38 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
39 | return NULL; | ||
40 | } | ||
41 | caif_assert(offsetof(struct cfctrl, serv.layer) == 0); | ||
42 | memset(this, 0, sizeof(*this)); | ||
43 | spin_lock_init(&this->info_list_lock); | ||
44 | atomic_set(&this->req_seq_no, 1); | ||
45 | atomic_set(&this->rsp_seq_no, 1); | ||
46 | this->serv.dev_info.id = 0xff; | ||
47 | this->serv.layer.id = 0; | ||
48 | this->serv.layer.receive = cfctrl_recv; | ||
49 | sprintf(this->serv.layer.name, "ctrl"); | ||
50 | this->serv.layer.ctrlcmd = cfctrl_ctrlcmd; | ||
51 | spin_lock_init(&this->loop_linkid_lock); | ||
52 | this->loop_linkid = 1; | ||
53 | return &this->serv.layer; | ||
54 | } | ||
55 | |||
56 | static bool param_eq(struct cfctrl_link_param *p1, struct cfctrl_link_param *p2) | ||
57 | { | ||
58 | bool eq = | ||
59 | p1->linktype == p2->linktype && | ||
60 | p1->priority == p2->priority && | ||
61 | p1->phyid == p2->phyid && | ||
62 | p1->endpoint == p2->endpoint && p1->chtype == p2->chtype; | ||
63 | |||
64 | if (!eq) | ||
65 | return false; | ||
66 | |||
67 | switch (p1->linktype) { | ||
68 | case CFCTRL_SRV_VEI: | ||
69 | return true; | ||
70 | case CFCTRL_SRV_DATAGRAM: | ||
71 | return p1->u.datagram.connid == p2->u.datagram.connid; | ||
72 | case CFCTRL_SRV_RFM: | ||
73 | return | ||
74 | p1->u.rfm.connid == p2->u.rfm.connid && | ||
75 | strcmp(p1->u.rfm.volume, p2->u.rfm.volume) == 0; | ||
76 | case CFCTRL_SRV_UTIL: | ||
77 | return | ||
78 | p1->u.utility.fifosize_kb == p2->u.utility.fifosize_kb | ||
79 | && p1->u.utility.fifosize_bufs == | ||
80 | p2->u.utility.fifosize_bufs | ||
81 | && strcmp(p1->u.utility.name, p2->u.utility.name) == 0 | ||
82 | && p1->u.utility.paramlen == p2->u.utility.paramlen | ||
83 | && memcmp(p1->u.utility.params, p2->u.utility.params, | ||
84 | p1->u.utility.paramlen) == 0; | ||
85 | |||
86 | case CFCTRL_SRV_VIDEO: | ||
87 | return p1->u.video.connid == p2->u.video.connid; | ||
88 | case CFCTRL_SRV_DBG: | ||
89 | return true; | ||
90 | case CFCTRL_SRV_DECM: | ||
91 | return false; | ||
92 | default: | ||
93 | return false; | ||
94 | } | ||
95 | return false; | ||
96 | } | ||
97 | |||
98 | bool cfctrl_req_eq(struct cfctrl_request_info *r1, | ||
99 | struct cfctrl_request_info *r2) | ||
100 | { | ||
101 | if (r1->cmd != r2->cmd) | ||
102 | return false; | ||
103 | if (r1->cmd == CFCTRL_CMD_LINK_SETUP) | ||
104 | return param_eq(&r1->param, &r2->param); | ||
105 | else | ||
106 | return r1->channel_id == r2->channel_id; | ||
107 | } | ||
108 | |||
109 | /* Insert request at the end */ | ||
110 | void cfctrl_insert_req(struct cfctrl *ctrl, | ||
111 | struct cfctrl_request_info *req) | ||
112 | { | ||
113 | struct cfctrl_request_info *p; | ||
114 | spin_lock(&ctrl->info_list_lock); | ||
115 | req->next = NULL; | ||
116 | atomic_inc(&ctrl->req_seq_no); | ||
117 | req->sequence_no = atomic_read(&ctrl->req_seq_no); | ||
118 | if (ctrl->first_req == NULL) { | ||
119 | ctrl->first_req = req; | ||
120 | spin_unlock(&ctrl->info_list_lock); | ||
121 | return; | ||
122 | } | ||
123 | p = ctrl->first_req; | ||
124 | while (p->next != NULL) | ||
125 | p = p->next; | ||
126 | p->next = req; | ||
127 | spin_unlock(&ctrl->info_list_lock); | ||
128 | } | ||
129 | |||
130 | static void cfctrl_insert_req2(struct cfctrl *ctrl, enum cfctrl_cmd cmd, | ||
131 | u8 linkid, struct cflayer *user_layer) | ||
132 | { | ||
133 | struct cfctrl_request_info *req = kmalloc(sizeof(*req), GFP_KERNEL); | ||
134 | if (!req) { | ||
135 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
136 | return; | ||
137 | } | ||
138 | req->client_layer = user_layer; | ||
139 | req->cmd = cmd; | ||
140 | req->channel_id = linkid; | ||
141 | cfctrl_insert_req(ctrl, req); | ||
142 | } | ||
143 | |||
144 | /* Compare and remove request */ | ||
145 | struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl, | ||
146 | struct cfctrl_request_info *req) | ||
147 | { | ||
148 | struct cfctrl_request_info *p; | ||
149 | struct cfctrl_request_info *ret; | ||
150 | |||
151 | spin_lock(&ctrl->info_list_lock); | ||
152 | if (ctrl->first_req == NULL) { | ||
153 | spin_unlock(&ctrl->info_list_lock); | ||
154 | return NULL; | ||
155 | } | ||
156 | |||
157 | if (cfctrl_req_eq(req, ctrl->first_req)) { | ||
158 | ret = ctrl->first_req; | ||
159 | caif_assert(ctrl->first_req); | ||
160 | atomic_set(&ctrl->rsp_seq_no, | ||
161 | ctrl->first_req->sequence_no); | ||
162 | ctrl->first_req = ctrl->first_req->next; | ||
163 | spin_unlock(&ctrl->info_list_lock); | ||
164 | return ret; | ||
165 | } | ||
166 | |||
167 | p = ctrl->first_req; | ||
168 | |||
169 | while (p->next != NULL) { | ||
170 | if (cfctrl_req_eq(req, p->next)) { | ||
171 | pr_warning("CAIF: %s(): Requests are not " | ||
172 | "received in order\n", | ||
173 | __func__); | ||
174 | ret = p->next; | ||
175 | atomic_set(&ctrl->rsp_seq_no, | ||
176 | p->next->sequence_no); | ||
177 | p->next = p->next->next; | ||
178 | spin_unlock(&ctrl->info_list_lock); | ||
179 | return ret; | ||
180 | } | ||
181 | p = p->next; | ||
182 | } | ||
183 | spin_unlock(&ctrl->info_list_lock); | ||
184 | |||
185 | pr_warning("CAIF: %s(): Request does not match\n", | ||
186 | __func__); | ||
187 | return NULL; | ||
188 | } | ||
189 | |||
190 | struct cfctrl_rsp *cfctrl_get_respfuncs(struct cflayer *layer) | ||
191 | { | ||
192 | struct cfctrl *this = container_obj(layer); | ||
193 | return &this->res; | ||
194 | } | ||
195 | |||
196 | void cfctrl_set_dnlayer(struct cflayer *this, struct cflayer *dn) | ||
197 | { | ||
198 | this->dn = dn; | ||
199 | } | ||
200 | |||
201 | void cfctrl_set_uplayer(struct cflayer *this, struct cflayer *up) | ||
202 | { | ||
203 | this->up = up; | ||
204 | } | ||
205 | |||
206 | static void init_info(struct caif_payload_info *info, struct cfctrl *cfctrl) | ||
207 | { | ||
208 | info->hdr_len = 0; | ||
209 | info->channel_id = cfctrl->serv.layer.id; | ||
210 | info->dev_info = &cfctrl->serv.dev_info; | ||
211 | } | ||
212 | |||
213 | void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid) | ||
214 | { | ||
215 | struct cfctrl *cfctrl = container_obj(layer); | ||
216 | int ret; | ||
217 | struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); | ||
218 | if (!pkt) { | ||
219 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
220 | return; | ||
221 | } | ||
222 | caif_assert(offsetof(struct cfctrl, serv.layer) == 0); | ||
223 | init_info(cfpkt_info(pkt), cfctrl); | ||
224 | cfpkt_info(pkt)->dev_info->id = physlinkid; | ||
225 | cfctrl->serv.dev_info.id = physlinkid; | ||
226 | cfpkt_addbdy(pkt, CFCTRL_CMD_ENUM); | ||
227 | cfpkt_addbdy(pkt, physlinkid); | ||
228 | ret = | ||
229 | cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt); | ||
230 | if (ret < 0) { | ||
231 | pr_err("CAIF: %s(): Could not transmit enum message\n", | ||
232 | __func__); | ||
233 | cfpkt_destroy(pkt); | ||
234 | } | ||
235 | } | ||
236 | |||
237 | void cfctrl_linkup_request(struct cflayer *layer, | ||
238 | struct cfctrl_link_param *param, | ||
239 | struct cflayer *user_layer) | ||
240 | { | ||
241 | struct cfctrl *cfctrl = container_obj(layer); | ||
242 | u32 tmp32; | ||
243 | u16 tmp16; | ||
244 | u8 tmp8; | ||
245 | struct cfctrl_request_info *req; | ||
246 | int ret; | ||
247 | char utility_name[16]; | ||
248 | struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); | ||
249 | if (!pkt) { | ||
250 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
251 | return; | ||
252 | } | ||
253 | cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_SETUP); | ||
254 | cfpkt_addbdy(pkt, (param->chtype << 4) + param->linktype); | ||
255 | cfpkt_addbdy(pkt, (param->priority << 3) + param->phyid); | ||
256 | cfpkt_addbdy(pkt, param->endpoint & 0x03); | ||
257 | |||
258 | switch (param->linktype) { | ||
259 | case CFCTRL_SRV_VEI: | ||
260 | break; | ||
261 | case CFCTRL_SRV_VIDEO: | ||
262 | cfpkt_addbdy(pkt, (u8) param->u.video.connid); | ||
263 | break; | ||
264 | case CFCTRL_SRV_DBG: | ||
265 | break; | ||
266 | case CFCTRL_SRV_DATAGRAM: | ||
267 | tmp32 = cpu_to_le32(param->u.datagram.connid); | ||
268 | cfpkt_add_body(pkt, &tmp32, 4); | ||
269 | break; | ||
270 | case CFCTRL_SRV_RFM: | ||
271 | /* Construct a frame, convert DatagramConnectionID to network | ||
272 | * format long and copy it out... | ||
273 | */ | ||
274 | tmp32 = cpu_to_le32(param->u.rfm.connid); | ||
275 | cfpkt_add_body(pkt, &tmp32, 4); | ||
276 | /* Add volume name, including zero termination... */ | ||
277 | cfpkt_add_body(pkt, param->u.rfm.volume, | ||
278 | strlen(param->u.rfm.volume) + 1); | ||
279 | break; | ||
280 | case CFCTRL_SRV_UTIL: | ||
281 | tmp16 = cpu_to_le16(param->u.utility.fifosize_kb); | ||
282 | cfpkt_add_body(pkt, &tmp16, 2); | ||
283 | tmp16 = cpu_to_le16(param->u.utility.fifosize_bufs); | ||
284 | cfpkt_add_body(pkt, &tmp16, 2); | ||
285 | memset(utility_name, 0, sizeof(utility_name)); | ||
286 | strncpy(utility_name, param->u.utility.name, | ||
287 | UTILITY_NAME_LENGTH - 1); | ||
288 | cfpkt_add_body(pkt, utility_name, UTILITY_NAME_LENGTH); | ||
289 | tmp8 = param->u.utility.paramlen; | ||
290 | cfpkt_add_body(pkt, &tmp8, 1); | ||
291 | cfpkt_add_body(pkt, param->u.utility.params, | ||
292 | param->u.utility.paramlen); | ||
293 | break; | ||
294 | default: | ||
295 | pr_warning("CAIF: %s():Request setup of bad link type = %d\n", | ||
296 | __func__, param->linktype); | ||
297 | } | ||
298 | req = kmalloc(sizeof(*req), GFP_KERNEL); | ||
299 | if (!req) { | ||
300 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
301 | return; | ||
302 | } | ||
303 | memset(req, 0, sizeof(*req)); | ||
304 | req->client_layer = user_layer; | ||
305 | req->cmd = CFCTRL_CMD_LINK_SETUP; | ||
306 | req->param = *param; | ||
307 | cfctrl_insert_req(cfctrl, req); | ||
308 | init_info(cfpkt_info(pkt), cfctrl); | ||
309 | cfpkt_info(pkt)->dev_info->id = param->phyid; | ||
310 | ret = | ||
311 | cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt); | ||
312 | if (ret < 0) { | ||
313 | pr_err("CAIF: %s(): Could not transmit linksetup request\n", | ||
314 | __func__); | ||
315 | cfpkt_destroy(pkt); | ||
316 | } | ||
317 | } | ||
318 | |||
319 | int cfctrl_linkdown_req(struct cflayer *layer, u8 channelid, | ||
320 | struct cflayer *client) | ||
321 | { | ||
322 | int ret; | ||
323 | struct cfctrl *cfctrl = container_obj(layer); | ||
324 | struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); | ||
325 | if (!pkt) { | ||
326 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
327 | return -ENOMEM; | ||
328 | } | ||
329 | cfctrl_insert_req2(cfctrl, CFCTRL_CMD_LINK_DESTROY, channelid, client); | ||
330 | cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_DESTROY); | ||
331 | cfpkt_addbdy(pkt, channelid); | ||
332 | init_info(cfpkt_info(pkt), cfctrl); | ||
333 | ret = | ||
334 | cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt); | ||
335 | if (ret < 0) { | ||
336 | pr_err("CAIF: %s(): Could not transmit link-down request\n", | ||
337 | __func__); | ||
338 | cfpkt_destroy(pkt); | ||
339 | } | ||
340 | return ret; | ||
341 | } | ||
342 | |||
343 | void cfctrl_sleep_req(struct cflayer *layer) | ||
344 | { | ||
345 | int ret; | ||
346 | struct cfctrl *cfctrl = container_obj(layer); | ||
347 | struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); | ||
348 | if (!pkt) { | ||
349 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
350 | return; | ||
351 | } | ||
352 | cfpkt_addbdy(pkt, CFCTRL_CMD_SLEEP); | ||
353 | init_info(cfpkt_info(pkt), cfctrl); | ||
354 | ret = | ||
355 | cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt); | ||
356 | if (ret < 0) | ||
357 | cfpkt_destroy(pkt); | ||
358 | } | ||
359 | |||
360 | void cfctrl_wake_req(struct cflayer *layer) | ||
361 | { | ||
362 | int ret; | ||
363 | struct cfctrl *cfctrl = container_obj(layer); | ||
364 | struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); | ||
365 | if (!pkt) { | ||
366 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
367 | return; | ||
368 | } | ||
369 | cfpkt_addbdy(pkt, CFCTRL_CMD_WAKE); | ||
370 | init_info(cfpkt_info(pkt), cfctrl); | ||
371 | ret = | ||
372 | cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt); | ||
373 | if (ret < 0) | ||
374 | cfpkt_destroy(pkt); | ||
375 | } | ||
376 | |||
377 | void cfctrl_getstartreason_req(struct cflayer *layer) | ||
378 | { | ||
379 | int ret; | ||
380 | struct cfctrl *cfctrl = container_obj(layer); | ||
381 | struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); | ||
382 | if (!pkt) { | ||
383 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
384 | return; | ||
385 | } | ||
386 | cfpkt_addbdy(pkt, CFCTRL_CMD_START_REASON); | ||
387 | init_info(cfpkt_info(pkt), cfctrl); | ||
388 | ret = | ||
389 | cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt); | ||
390 | if (ret < 0) | ||
391 | cfpkt_destroy(pkt); | ||
392 | } | ||
393 | |||
394 | |||
395 | static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt) | ||
396 | { | ||
397 | u8 cmdrsp; | ||
398 | u8 cmd; | ||
399 | int ret = -1; | ||
400 | u16 tmp16; | ||
401 | u8 len; | ||
402 | u8 param[255]; | ||
403 | u8 linkid; | ||
404 | struct cfctrl *cfctrl = container_obj(layer); | ||
405 | struct cfctrl_request_info rsp, *req; | ||
406 | |||
407 | |||
408 | cfpkt_extr_head(pkt, &cmdrsp, 1); | ||
409 | cmd = cmdrsp & CFCTRL_CMD_MASK; | ||
410 | if (cmd != CFCTRL_CMD_LINK_ERR | ||
411 | && CFCTRL_RSP_BIT != (CFCTRL_RSP_BIT & cmdrsp)) { | ||
412 | if (handle_loop(cfctrl, cmd, pkt) == CAIF_FAILURE) { | ||
413 | pr_info("CAIF: %s() CAIF Protocol error:" | ||
414 | "Response bit not set\n", __func__); | ||
415 | goto error; | ||
416 | } | ||
417 | } | ||
418 | |||
419 | switch (cmd) { | ||
420 | case CFCTRL_CMD_LINK_SETUP: | ||
421 | { | ||
422 | enum cfctrl_srv serv; | ||
423 | enum cfctrl_srv servtype; | ||
424 | u8 endpoint; | ||
425 | u8 physlinkid; | ||
426 | u8 prio; | ||
427 | u8 tmp; | ||
428 | u32 tmp32; | ||
429 | u8 *cp; | ||
430 | int i; | ||
431 | struct cfctrl_link_param linkparam; | ||
432 | memset(&linkparam, 0, sizeof(linkparam)); | ||
433 | |||
434 | cfpkt_extr_head(pkt, &tmp, 1); | ||
435 | |||
436 | serv = tmp & CFCTRL_SRV_MASK; | ||
437 | linkparam.linktype = serv; | ||
438 | |||
439 | servtype = tmp >> 4; | ||
440 | linkparam.chtype = servtype; | ||
441 | |||
442 | cfpkt_extr_head(pkt, &tmp, 1); | ||
443 | physlinkid = tmp & 0x07; | ||
444 | prio = tmp >> 3; | ||
445 | |||
446 | linkparam.priority = prio; | ||
447 | linkparam.phyid = physlinkid; | ||
448 | cfpkt_extr_head(pkt, &endpoint, 1); | ||
449 | linkparam.endpoint = endpoint & 0x03; | ||
450 | |||
451 | switch (serv) { | ||
452 | case CFCTRL_SRV_VEI: | ||
453 | case CFCTRL_SRV_DBG: | ||
454 | /* Link ID */ | ||
455 | cfpkt_extr_head(pkt, &linkid, 1); | ||
456 | break; | ||
457 | case CFCTRL_SRV_VIDEO: | ||
458 | cfpkt_extr_head(pkt, &tmp, 1); | ||
459 | linkparam.u.video.connid = tmp; | ||
460 | /* Link ID */ | ||
461 | cfpkt_extr_head(pkt, &linkid, 1); | ||
462 | break; | ||
463 | |||
464 | case CFCTRL_SRV_DATAGRAM: | ||
465 | cfpkt_extr_head(pkt, &tmp32, 4); | ||
466 | linkparam.u.datagram.connid = | ||
467 | le32_to_cpu(tmp32); | ||
468 | /* Link ID */ | ||
469 | cfpkt_extr_head(pkt, &linkid, 1); | ||
470 | break; | ||
471 | case CFCTRL_SRV_RFM: | ||
472 | /* Construct a frame, convert | ||
473 | * DatagramConnectionID | ||
474 | * to network format long and copy it out... | ||
475 | */ | ||
476 | cfpkt_extr_head(pkt, &tmp32, 4); | ||
477 | linkparam.u.rfm.connid = | ||
478 | le32_to_cpu(tmp32); | ||
479 | cp = (u8 *) linkparam.u.rfm.volume; | ||
480 | for (cfpkt_extr_head(pkt, &tmp, 1); | ||
481 | cfpkt_more(pkt) && tmp != '\0'; | ||
482 | cfpkt_extr_head(pkt, &tmp, 1)) | ||
483 | *cp++ = tmp; | ||
484 | *cp = '\0'; | ||
485 | |||
486 | /* Link ID */ | ||
487 | cfpkt_extr_head(pkt, &linkid, 1); | ||
488 | |||
489 | break; | ||
490 | case CFCTRL_SRV_UTIL: | ||
491 | /* Construct a frame, convert | ||
492 | * DatagramConnectionID | ||
493 | * to network format long and copy it out... | ||
494 | */ | ||
495 | /* Fifosize KB */ | ||
496 | cfpkt_extr_head(pkt, &tmp16, 2); | ||
497 | linkparam.u.utility.fifosize_kb = | ||
498 | le16_to_cpu(tmp16); | ||
499 | /* Fifosize bufs */ | ||
500 | cfpkt_extr_head(pkt, &tmp16, 2); | ||
501 | linkparam.u.utility.fifosize_bufs = | ||
502 | le16_to_cpu(tmp16); | ||
503 | /* name */ | ||
504 | cp = (u8 *) linkparam.u.utility.name; | ||
505 | caif_assert(sizeof(linkparam.u.utility.name) | ||
506 | >= UTILITY_NAME_LENGTH); | ||
507 | for (i = 0; | ||
508 | i < UTILITY_NAME_LENGTH | ||
509 | && cfpkt_more(pkt); i++) { | ||
510 | cfpkt_extr_head(pkt, &tmp, 1); | ||
511 | *cp++ = tmp; | ||
512 | } | ||
513 | /* Length */ | ||
514 | cfpkt_extr_head(pkt, &len, 1); | ||
515 | linkparam.u.utility.paramlen = len; | ||
516 | /* Param Data */ | ||
517 | cp = linkparam.u.utility.params; | ||
518 | while (cfpkt_more(pkt) && len--) { | ||
519 | cfpkt_extr_head(pkt, &tmp, 1); | ||
520 | *cp++ = tmp; | ||
521 | } | ||
522 | /* Link ID */ | ||
523 | cfpkt_extr_head(pkt, &linkid, 1); | ||
524 | /* Length */ | ||
525 | cfpkt_extr_head(pkt, &len, 1); | ||
526 | /* Param Data */ | ||
527 | cfpkt_extr_head(pkt, ¶m, len); | ||
528 | break; | ||
529 | default: | ||
530 | pr_warning("CAIF: %s(): Request setup " | ||
531 | "- invalid link type (%d)", | ||
532 | __func__, serv); | ||
533 | goto error; | ||
534 | } | ||
535 | |||
536 | rsp.cmd = cmd; | ||
537 | rsp.param = linkparam; | ||
538 | req = cfctrl_remove_req(cfctrl, &rsp); | ||
539 | |||
540 | if (CFCTRL_ERR_BIT == (CFCTRL_ERR_BIT & cmdrsp) || | ||
541 | cfpkt_erroneous(pkt)) { | ||
542 | pr_err("CAIF: %s(): Invalid O/E bit or parse " | ||
543 | "error on CAIF control channel", | ||
544 | __func__); | ||
545 | cfctrl->res.reject_rsp(cfctrl->serv.layer.up, | ||
546 | 0, | ||
547 | req ? req->client_layer | ||
548 | : NULL); | ||
549 | } else { | ||
550 | cfctrl->res.linksetup_rsp(cfctrl->serv. | ||
551 | layer.up, linkid, | ||
552 | serv, physlinkid, | ||
553 | req ? req-> | ||
554 | client_layer : NULL); | ||
555 | } | ||
556 | |||
557 | if (req != NULL) | ||
558 | kfree(req); | ||
559 | } | ||
560 | break; | ||
561 | case CFCTRL_CMD_LINK_DESTROY: | ||
562 | cfpkt_extr_head(pkt, &linkid, 1); | ||
563 | rsp.cmd = cmd; | ||
564 | rsp.channel_id = linkid; | ||
565 | req = cfctrl_remove_req(cfctrl, &rsp); | ||
566 | cfctrl->res.linkdestroy_rsp(cfctrl->serv.layer.up, linkid, | ||
567 | req ? req->client_layer : NULL); | ||
568 | if (req != NULL) | ||
569 | kfree(req); | ||
570 | break; | ||
571 | case CFCTRL_CMD_LINK_ERR: | ||
572 | pr_err("CAIF: %s(): Frame Error Indication received\n", | ||
573 | __func__); | ||
574 | cfctrl->res.linkerror_ind(); | ||
575 | break; | ||
576 | case CFCTRL_CMD_ENUM: | ||
577 | cfctrl->res.enum_rsp(); | ||
578 | break; | ||
579 | case CFCTRL_CMD_SLEEP: | ||
580 | cfctrl->res.sleep_rsp(); | ||
581 | break; | ||
582 | case CFCTRL_CMD_WAKE: | ||
583 | cfctrl->res.wake_rsp(); | ||
584 | break; | ||
585 | case CFCTRL_CMD_LINK_RECONF: | ||
586 | cfctrl->res.restart_rsp(); | ||
587 | break; | ||
588 | case CFCTRL_CMD_RADIO_SET: | ||
589 | cfctrl->res.radioset_rsp(); | ||
590 | break; | ||
591 | default: | ||
592 | pr_err("CAIF: %s(): Unrecognized Control Frame\n", __func__); | ||
593 | goto error; | ||
594 | break; | ||
595 | } | ||
596 | ret = 0; | ||
597 | error: | ||
598 | cfpkt_destroy(pkt); | ||
599 | return ret; | ||
600 | } | ||
601 | |||
602 | static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, | ||
603 | int phyid) | ||
604 | { | ||
605 | struct cfctrl *this = container_obj(layr); | ||
606 | switch (ctrl) { | ||
607 | case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND: | ||
608 | case CAIF_CTRLCMD_FLOW_OFF_IND: | ||
609 | spin_lock(&this->info_list_lock); | ||
610 | if (this->first_req != NULL) { | ||
611 | pr_warning("CAIF: %s(): Received flow off in " | ||
612 | "control layer", __func__); | ||
613 | } | ||
614 | spin_unlock(&this->info_list_lock); | ||
615 | break; | ||
616 | default: | ||
617 | break; | ||
618 | } | ||
619 | } | ||
620 | |||
621 | #ifndef CAIF_NO_LOOP | ||
622 | static int handle_loop(struct cfctrl *ctrl, int cmd, struct cfpkt *pkt) | ||
623 | { | ||
624 | static int last_linkid; | ||
625 | u8 linkid, linktype, tmp; | ||
626 | switch (cmd) { | ||
627 | case CFCTRL_CMD_LINK_SETUP: | ||
628 | spin_lock(&ctrl->loop_linkid_lock); | ||
629 | for (linkid = last_linkid + 1; linkid < 255; linkid++) | ||
630 | if (!ctrl->loop_linkused[linkid]) | ||
631 | goto found; | ||
632 | for (linkid = last_linkid - 1; linkid > 0; linkid--) | ||
633 | if (!ctrl->loop_linkused[linkid]) | ||
634 | goto found; | ||
635 | spin_unlock(&ctrl->loop_linkid_lock); | ||
636 | return -EINVAL; | ||
637 | found: | ||
638 | if (!ctrl->loop_linkused[linkid]) | ||
639 | ctrl->loop_linkused[linkid] = 1; | ||
640 | |||
641 | last_linkid = linkid; | ||
642 | |||
643 | cfpkt_add_trail(pkt, &linkid, 1); | ||
644 | spin_unlock(&ctrl->loop_linkid_lock); | ||
645 | cfpkt_peek_head(pkt, &linktype, 1); | ||
646 | if (linktype == CFCTRL_SRV_UTIL) { | ||
647 | tmp = 0x01; | ||
648 | cfpkt_add_trail(pkt, &tmp, 1); | ||
649 | cfpkt_add_trail(pkt, &tmp, 1); | ||
650 | } | ||
651 | break; | ||
652 | |||
653 | case CFCTRL_CMD_LINK_DESTROY: | ||
654 | spin_lock(&ctrl->loop_linkid_lock); | ||
655 | cfpkt_peek_head(pkt, &linkid, 1); | ||
656 | ctrl->loop_linkused[linkid] = 0; | ||
657 | spin_unlock(&ctrl->loop_linkid_lock); | ||
658 | break; | ||
659 | default: | ||
660 | break; | ||
661 | } | ||
662 | return CAIF_SUCCESS; | ||
663 | } | ||
664 | #endif | ||
diff --git a/net/caif/cfdbgl.c b/net/caif/cfdbgl.c new file mode 100644 index 000000000000..ab6b6dc34cf8 --- /dev/null +++ b/net/caif/cfdbgl.c | |||
@@ -0,0 +1,40 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson AB 2010 | ||
3 | * Author: Sjur Brendeland/sjur.brandeland@stericsson.com | ||
4 | * License terms: GNU General Public License (GPL) version 2 | ||
5 | */ | ||
6 | |||
7 | #include <linux/stddef.h> | ||
8 | #include <linux/slab.h> | ||
9 | #include <net/caif/caif_layer.h> | ||
10 | #include <net/caif/cfsrvl.h> | ||
11 | #include <net/caif/cfpkt.h> | ||
12 | |||
13 | static int cfdbgl_receive(struct cflayer *layr, struct cfpkt *pkt); | ||
14 | static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt); | ||
15 | |||
16 | struct cflayer *cfdbgl_create(u8 channel_id, struct dev_info *dev_info) | ||
17 | { | ||
18 | struct cfsrvl *dbg = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC); | ||
19 | if (!dbg) { | ||
20 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
21 | return NULL; | ||
22 | } | ||
23 | caif_assert(offsetof(struct cfsrvl, layer) == 0); | ||
24 | memset(dbg, 0, sizeof(struct cfsrvl)); | ||
25 | cfsrvl_init(dbg, channel_id, dev_info); | ||
26 | dbg->layer.receive = cfdbgl_receive; | ||
27 | dbg->layer.transmit = cfdbgl_transmit; | ||
28 | snprintf(dbg->layer.name, CAIF_LAYER_NAME_SZ - 1, "dbg%d", channel_id); | ||
29 | return &dbg->layer; | ||
30 | } | ||
31 | |||
32 | static int cfdbgl_receive(struct cflayer *layr, struct cfpkt *pkt) | ||
33 | { | ||
34 | return layr->up->receive(layr->up, pkt); | ||
35 | } | ||
36 | |||
37 | static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt) | ||
38 | { | ||
39 | return layr->dn->transmit(layr->dn, pkt); | ||
40 | } | ||
diff --git a/net/caif/cfdgml.c b/net/caif/cfdgml.c new file mode 100644 index 000000000000..53194840ecb6 --- /dev/null +++ b/net/caif/cfdgml.c | |||
@@ -0,0 +1,108 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson AB 2010 | ||
3 | * Author: Sjur Brendeland/sjur.brandeland@stericsson.com | ||
4 | * License terms: GNU General Public License (GPL) version 2 | ||
5 | */ | ||
6 | |||
7 | #include <linux/stddef.h> | ||
8 | #include <linux/spinlock.h> | ||
9 | #include <linux/slab.h> | ||
10 | #include <net/caif/caif_layer.h> | ||
11 | #include <net/caif/cfsrvl.h> | ||
12 | #include <net/caif/cfpkt.h> | ||
13 | |||
14 | #define container_obj(layr) ((struct cfsrvl *) layr) | ||
15 | |||
16 | #define DGM_CMD_BIT 0x80 | ||
17 | #define DGM_FLOW_OFF 0x81 | ||
18 | #define DGM_FLOW_ON 0x80 | ||
19 | #define DGM_CTRL_PKT_SIZE 1 | ||
20 | |||
21 | static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt); | ||
22 | static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt); | ||
23 | |||
24 | struct cflayer *cfdgml_create(u8 channel_id, struct dev_info *dev_info) | ||
25 | { | ||
26 | struct cfsrvl *dgm = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC); | ||
27 | if (!dgm) { | ||
28 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
29 | return NULL; | ||
30 | } | ||
31 | caif_assert(offsetof(struct cfsrvl, layer) == 0); | ||
32 | memset(dgm, 0, sizeof(struct cfsrvl)); | ||
33 | cfsrvl_init(dgm, channel_id, dev_info); | ||
34 | dgm->layer.receive = cfdgml_receive; | ||
35 | dgm->layer.transmit = cfdgml_transmit; | ||
36 | snprintf(dgm->layer.name, CAIF_LAYER_NAME_SZ - 1, "dgm%d", channel_id); | ||
37 | dgm->layer.name[CAIF_LAYER_NAME_SZ - 1] = '\0'; | ||
38 | return &dgm->layer; | ||
39 | } | ||
40 | |||
41 | static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt) | ||
42 | { | ||
43 | u8 cmd = -1; | ||
44 | u8 dgmhdr[3]; | ||
45 | int ret; | ||
46 | caif_assert(layr->up != NULL); | ||
47 | caif_assert(layr->receive != NULL); | ||
48 | caif_assert(layr->ctrlcmd != NULL); | ||
49 | |||
50 | if (cfpkt_extr_head(pkt, &cmd, 1) < 0) { | ||
51 | pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); | ||
52 | cfpkt_destroy(pkt); | ||
53 | return -EPROTO; | ||
54 | } | ||
55 | |||
56 | if ((cmd & DGM_CMD_BIT) == 0) { | ||
57 | if (cfpkt_extr_head(pkt, &dgmhdr, 3) < 0) { | ||
58 | pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); | ||
59 | cfpkt_destroy(pkt); | ||
60 | return -EPROTO; | ||
61 | } | ||
62 | ret = layr->up->receive(layr->up, pkt); | ||
63 | return ret; | ||
64 | } | ||
65 | |||
66 | switch (cmd) { | ||
67 | case DGM_FLOW_OFF: /* FLOW OFF */ | ||
68 | layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_OFF_IND, 0); | ||
69 | cfpkt_destroy(pkt); | ||
70 | return 0; | ||
71 | case DGM_FLOW_ON: /* FLOW ON */ | ||
72 | layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_ON_IND, 0); | ||
73 | cfpkt_destroy(pkt); | ||
74 | return 0; | ||
75 | default: | ||
76 | cfpkt_destroy(pkt); | ||
77 | pr_info("CAIF: %s(): Unknown datagram control %d (0x%x)\n", | ||
78 | __func__, cmd, cmd); | ||
79 | return -EPROTO; | ||
80 | } | ||
81 | } | ||
82 | |||
83 | static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt) | ||
84 | { | ||
85 | u32 zero = 0; | ||
86 | struct caif_payload_info *info; | ||
87 | struct cfsrvl *service = container_obj(layr); | ||
88 | int ret; | ||
89 | if (!cfsrvl_ready(service, &ret)) | ||
90 | return ret; | ||
91 | |||
92 | cfpkt_add_head(pkt, &zero, 4); | ||
93 | |||
94 | /* Add info for MUX-layer to route the packet out. */ | ||
95 | info = cfpkt_info(pkt); | ||
96 | info->channel_id = service->layer.id; | ||
97 | /* To optimize alignment, we add up the size of CAIF header | ||
98 | * before payload. | ||
99 | */ | ||
100 | info->hdr_len = 4; | ||
101 | info->dev_info = &service->dev_info; | ||
102 | ret = layr->dn->transmit(layr->dn, pkt); | ||
103 | if (ret < 0) { | ||
104 | u32 tmp32; | ||
105 | cfpkt_extr_head(pkt, &tmp32, 4); | ||
106 | } | ||
107 | return ret; | ||
108 | } | ||
diff --git a/net/caif/cffrml.c b/net/caif/cffrml.c new file mode 100644 index 000000000000..e86a4ca3b217 --- /dev/null +++ b/net/caif/cffrml.c | |||
@@ -0,0 +1,151 @@ | |||
1 | /* | ||
2 | * CAIF Framing Layer. | ||
3 | * | ||
4 | * Copyright (C) ST-Ericsson AB 2010 | ||
5 | * Author: Sjur Brendeland/sjur.brandeland@stericsson.com | ||
6 | * License terms: GNU General Public License (GPL) version 2 | ||
7 | */ | ||
8 | |||
9 | #include <linux/stddef.h> | ||
10 | #include <linux/spinlock.h> | ||
11 | #include <linux/slab.h> | ||
12 | #include <linux/crc-ccitt.h> | ||
13 | #include <net/caif/caif_layer.h> | ||
14 | #include <net/caif/cfpkt.h> | ||
15 | #include <net/caif/cffrml.h> | ||
16 | |||
17 | #define container_obj(layr) container_of(layr, struct cffrml, layer) | ||
18 | |||
19 | struct cffrml { | ||
20 | struct cflayer layer; | ||
21 | bool dofcs; /* !< FCS active */ | ||
22 | }; | ||
23 | |||
24 | static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt); | ||
25 | static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt); | ||
26 | static void cffrml_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, | ||
27 | int phyid); | ||
28 | |||
29 | static u32 cffrml_rcv_error; | ||
30 | static u32 cffrml_rcv_checsum_error; | ||
31 | struct cflayer *cffrml_create(u16 phyid, bool use_fcs) | ||
32 | { | ||
33 | struct cffrml *this = kmalloc(sizeof(struct cffrml), GFP_ATOMIC); | ||
34 | if (!this) { | ||
35 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
36 | return NULL; | ||
37 | } | ||
38 | caif_assert(offsetof(struct cffrml, layer) == 0); | ||
39 | |||
40 | memset(this, 0, sizeof(struct cflayer)); | ||
41 | this->layer.receive = cffrml_receive; | ||
42 | this->layer.transmit = cffrml_transmit; | ||
43 | this->layer.ctrlcmd = cffrml_ctrlcmd; | ||
44 | snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "frm%d", phyid); | ||
45 | this->dofcs = use_fcs; | ||
46 | this->layer.id = phyid; | ||
47 | return (struct cflayer *) this; | ||
48 | } | ||
49 | |||
50 | void cffrml_set_uplayer(struct cflayer *this, struct cflayer *up) | ||
51 | { | ||
52 | this->up = up; | ||
53 | } | ||
54 | |||
55 | void cffrml_set_dnlayer(struct cflayer *this, struct cflayer *dn) | ||
56 | { | ||
57 | this->dn = dn; | ||
58 | } | ||
59 | |||
60 | static u16 cffrml_checksum(u16 chks, void *buf, u16 len) | ||
61 | { | ||
62 | /* FIXME: FCS should be moved to glue in order to use OS-Specific | ||
63 | * solutions | ||
64 | */ | ||
65 | return crc_ccitt(chks, buf, len); | ||
66 | } | ||
67 | |||
68 | static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt) | ||
69 | { | ||
70 | u16 tmp; | ||
71 | u16 len; | ||
72 | u16 hdrchks; | ||
73 | u16 pktchks; | ||
74 | struct cffrml *this; | ||
75 | this = container_obj(layr); | ||
76 | |||
77 | cfpkt_extr_head(pkt, &tmp, 2); | ||
78 | len = le16_to_cpu(tmp); | ||
79 | |||
80 | /* Subtract for FCS on length if FCS is not used. */ | ||
81 | if (!this->dofcs) | ||
82 | len -= 2; | ||
83 | |||
84 | if (cfpkt_setlen(pkt, len) < 0) { | ||
85 | ++cffrml_rcv_error; | ||
86 | pr_err("CAIF: %s():Framing length error (%d)\n", __func__, len); | ||
87 | cfpkt_destroy(pkt); | ||
88 | return -EPROTO; | ||
89 | } | ||
90 | /* | ||
91 | * Don't do extract if FCS is false, rather do setlen - then we don't | ||
92 | * get a cache-miss. | ||
93 | */ | ||
94 | if (this->dofcs) { | ||
95 | cfpkt_extr_trail(pkt, &tmp, 2); | ||
96 | hdrchks = le16_to_cpu(tmp); | ||
97 | pktchks = cfpkt_iterate(pkt, cffrml_checksum, 0xffff); | ||
98 | if (pktchks != hdrchks) { | ||
99 | cfpkt_add_trail(pkt, &tmp, 2); | ||
100 | ++cffrml_rcv_error; | ||
101 | ++cffrml_rcv_checsum_error; | ||
102 | pr_info("CAIF: %s(): Frame checksum error " | ||
103 | "(0x%x != 0x%x)\n", __func__, hdrchks, pktchks); | ||
104 | return -EILSEQ; | ||
105 | } | ||
106 | } | ||
107 | if (cfpkt_erroneous(pkt)) { | ||
108 | ++cffrml_rcv_error; | ||
109 | pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); | ||
110 | cfpkt_destroy(pkt); | ||
111 | return -EPROTO; | ||
112 | } | ||
113 | return layr->up->receive(layr->up, pkt); | ||
114 | } | ||
115 | |||
116 | static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt) | ||
117 | { | ||
118 | int tmp; | ||
119 | u16 chks; | ||
120 | u16 len; | ||
121 | int ret; | ||
122 | struct cffrml *this = container_obj(layr); | ||
123 | if (this->dofcs) { | ||
124 | chks = cfpkt_iterate(pkt, cffrml_checksum, 0xffff); | ||
125 | tmp = cpu_to_le16(chks); | ||
126 | cfpkt_add_trail(pkt, &tmp, 2); | ||
127 | } else { | ||
128 | cfpkt_pad_trail(pkt, 2); | ||
129 | } | ||
130 | len = cfpkt_getlen(pkt); | ||
131 | tmp = cpu_to_le16(len); | ||
132 | cfpkt_add_head(pkt, &tmp, 2); | ||
133 | cfpkt_info(pkt)->hdr_len += 2; | ||
134 | if (cfpkt_erroneous(pkt)) { | ||
135 | pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); | ||
136 | return -EPROTO; | ||
137 | } | ||
138 | ret = layr->dn->transmit(layr->dn, pkt); | ||
139 | if (ret < 0) { | ||
140 | /* Remove header on faulty packet. */ | ||
141 | cfpkt_extr_head(pkt, &tmp, 2); | ||
142 | } | ||
143 | return ret; | ||
144 | } | ||
145 | |||
146 | static void cffrml_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, | ||
147 | int phyid) | ||
148 | { | ||
149 | if (layr->up->ctrlcmd) | ||
150 | layr->up->ctrlcmd(layr->up, ctrl, layr->id); | ||
151 | } | ||
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c new file mode 100644 index 000000000000..6fb9f9e96cf8 --- /dev/null +++ b/net/caif/cfmuxl.c | |||
@@ -0,0 +1,246 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson AB 2010 | ||
3 | * Author: Sjur Brendeland/sjur.brandeland@stericsson.com | ||
4 | * License terms: GNU General Public License (GPL) version 2 | ||
5 | */ | ||
6 | #include <linux/stddef.h> | ||
7 | #include <linux/spinlock.h> | ||
8 | #include <linux/slab.h> | ||
9 | #include <net/caif/cfpkt.h> | ||
10 | #include <net/caif/cfmuxl.h> | ||
11 | #include <net/caif/cfsrvl.h> | ||
12 | #include <net/caif/cffrml.h> | ||
13 | |||
14 | #define container_obj(layr) container_of(layr, struct cfmuxl, layer) | ||
15 | |||
16 | #define CAIF_CTRL_CHANNEL 0 | ||
17 | #define UP_CACHE_SIZE 8 | ||
18 | #define DN_CACHE_SIZE 8 | ||
19 | |||
20 | struct cfmuxl { | ||
21 | struct cflayer layer; | ||
22 | struct list_head srvl_list; | ||
23 | struct list_head frml_list; | ||
24 | struct cflayer *up_cache[UP_CACHE_SIZE]; | ||
25 | struct cflayer *dn_cache[DN_CACHE_SIZE]; | ||
26 | /* | ||
27 | * Set when inserting or removing downwards layers. | ||
28 | */ | ||
29 | spinlock_t transmit_lock; | ||
30 | |||
31 | /* | ||
32 | * Set when inserting or removing upwards layers. | ||
33 | */ | ||
34 | spinlock_t receive_lock; | ||
35 | |||
36 | }; | ||
37 | |||
38 | static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt); | ||
39 | static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt); | ||
40 | static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, | ||
41 | int phyid); | ||
42 | static struct cflayer *get_up(struct cfmuxl *muxl, u16 id); | ||
43 | |||
44 | struct cflayer *cfmuxl_create(void) | ||
45 | { | ||
46 | struct cfmuxl *this = kmalloc(sizeof(struct cfmuxl), GFP_ATOMIC); | ||
47 | if (!this) | ||
48 | return NULL; | ||
49 | memset(this, 0, sizeof(*this)); | ||
50 | this->layer.receive = cfmuxl_receive; | ||
51 | this->layer.transmit = cfmuxl_transmit; | ||
52 | this->layer.ctrlcmd = cfmuxl_ctrlcmd; | ||
53 | INIT_LIST_HEAD(&this->srvl_list); | ||
54 | INIT_LIST_HEAD(&this->frml_list); | ||
55 | spin_lock_init(&this->transmit_lock); | ||
56 | spin_lock_init(&this->receive_lock); | ||
57 | snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "mux"); | ||
58 | return &this->layer; | ||
59 | } | ||
60 | |||
61 | int cfmuxl_set_uplayer(struct cflayer *layr, struct cflayer *up, u8 linkid) | ||
62 | { | ||
63 | struct cfmuxl *muxl = container_obj(layr); | ||
64 | spin_lock(&muxl->receive_lock); | ||
65 | list_add(&up->node, &muxl->srvl_list); | ||
66 | spin_unlock(&muxl->receive_lock); | ||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | bool cfmuxl_is_phy_inuse(struct cflayer *layr, u8 phyid) | ||
71 | { | ||
72 | struct list_head *node; | ||
73 | struct cflayer *layer; | ||
74 | struct cfmuxl *muxl = container_obj(layr); | ||
75 | bool match = false; | ||
76 | spin_lock(&muxl->receive_lock); | ||
77 | |||
78 | list_for_each(node, &muxl->srvl_list) { | ||
79 | layer = list_entry(node, struct cflayer, node); | ||
80 | if (cfsrvl_phyid_match(layer, phyid)) { | ||
81 | match = true; | ||
82 | break; | ||
83 | } | ||
84 | |||
85 | } | ||
86 | spin_unlock(&muxl->receive_lock); | ||
87 | return match; | ||
88 | } | ||
89 | |||
90 | u8 cfmuxl_get_phyid(struct cflayer *layr, u8 channel_id) | ||
91 | { | ||
92 | struct cflayer *up; | ||
93 | int phyid; | ||
94 | struct cfmuxl *muxl = container_obj(layr); | ||
95 | spin_lock(&muxl->receive_lock); | ||
96 | up = get_up(muxl, channel_id); | ||
97 | if (up != NULL) | ||
98 | phyid = cfsrvl_getphyid(up); | ||
99 | else | ||
100 | phyid = 0; | ||
101 | spin_unlock(&muxl->receive_lock); | ||
102 | return phyid; | ||
103 | } | ||
104 | |||
105 | int cfmuxl_set_dnlayer(struct cflayer *layr, struct cflayer *dn, u8 phyid) | ||
106 | { | ||
107 | struct cfmuxl *muxl = (struct cfmuxl *) layr; | ||
108 | spin_lock(&muxl->transmit_lock); | ||
109 | list_add(&dn->node, &muxl->frml_list); | ||
110 | spin_unlock(&muxl->transmit_lock); | ||
111 | return 0; | ||
112 | } | ||
113 | |||
114 | static struct cflayer *get_from_id(struct list_head *list, u16 id) | ||
115 | { | ||
116 | struct list_head *node; | ||
117 | struct cflayer *layer; | ||
118 | list_for_each(node, list) { | ||
119 | layer = list_entry(node, struct cflayer, node); | ||
120 | if (layer->id == id) | ||
121 | return layer; | ||
122 | } | ||
123 | return NULL; | ||
124 | } | ||
125 | |||
126 | struct cflayer *cfmuxl_remove_dnlayer(struct cflayer *layr, u8 phyid) | ||
127 | { | ||
128 | struct cfmuxl *muxl = container_obj(layr); | ||
129 | struct cflayer *dn; | ||
130 | spin_lock(&muxl->transmit_lock); | ||
131 | memset(muxl->dn_cache, 0, sizeof(muxl->dn_cache)); | ||
132 | dn = get_from_id(&muxl->frml_list, phyid); | ||
133 | if (dn == NULL) { | ||
134 | spin_unlock(&muxl->transmit_lock); | ||
135 | return NULL; | ||
136 | } | ||
137 | list_del(&dn->node); | ||
138 | caif_assert(dn != NULL); | ||
139 | spin_unlock(&muxl->transmit_lock); | ||
140 | return dn; | ||
141 | } | ||
142 | |||
143 | /* Invariant: lock is taken */ | ||
144 | static struct cflayer *get_up(struct cfmuxl *muxl, u16 id) | ||
145 | { | ||
146 | struct cflayer *up; | ||
147 | int idx = id % UP_CACHE_SIZE; | ||
148 | up = muxl->up_cache[idx]; | ||
149 | if (up == NULL || up->id != id) { | ||
150 | up = get_from_id(&muxl->srvl_list, id); | ||
151 | muxl->up_cache[idx] = up; | ||
152 | } | ||
153 | return up; | ||
154 | } | ||
155 | |||
156 | /* Invariant: lock is taken */ | ||
157 | static struct cflayer *get_dn(struct cfmuxl *muxl, struct dev_info *dev_info) | ||
158 | { | ||
159 | struct cflayer *dn; | ||
160 | int idx = dev_info->id % DN_CACHE_SIZE; | ||
161 | dn = muxl->dn_cache[idx]; | ||
162 | if (dn == NULL || dn->id != dev_info->id) { | ||
163 | dn = get_from_id(&muxl->frml_list, dev_info->id); | ||
164 | muxl->dn_cache[idx] = dn; | ||
165 | } | ||
166 | return dn; | ||
167 | } | ||
168 | |||
169 | struct cflayer *cfmuxl_remove_uplayer(struct cflayer *layr, u8 id) | ||
170 | { | ||
171 | struct cflayer *up; | ||
172 | struct cfmuxl *muxl = container_obj(layr); | ||
173 | spin_lock(&muxl->receive_lock); | ||
174 | up = get_up(muxl, id); | ||
175 | memset(muxl->up_cache, 0, sizeof(muxl->up_cache)); | ||
176 | list_del(&up->node); | ||
177 | spin_unlock(&muxl->receive_lock); | ||
178 | return up; | ||
179 | } | ||
180 | |||
181 | static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt) | ||
182 | { | ||
183 | int ret; | ||
184 | struct cfmuxl *muxl = container_obj(layr); | ||
185 | u8 id; | ||
186 | struct cflayer *up; | ||
187 | if (cfpkt_extr_head(pkt, &id, 1) < 0) { | ||
188 | pr_err("CAIF: %s(): erroneous Caif Packet\n", __func__); | ||
189 | cfpkt_destroy(pkt); | ||
190 | return -EPROTO; | ||
191 | } | ||
192 | |||
193 | spin_lock(&muxl->receive_lock); | ||
194 | up = get_up(muxl, id); | ||
195 | spin_unlock(&muxl->receive_lock); | ||
196 | if (up == NULL) { | ||
197 | pr_info("CAIF: %s():Received data on unknown link ID = %d " | ||
198 | "(0x%x) up == NULL", __func__, id, id); | ||
199 | cfpkt_destroy(pkt); | ||
200 | /* | ||
201 | * Don't return ERROR, since modem misbehaves and sends out | ||
202 | * flow on before linksetup response. | ||
203 | */ | ||
204 | return /* CFGLU_EPROT; */ 0; | ||
205 | } | ||
206 | |||
207 | ret = up->receive(up, pkt); | ||
208 | return ret; | ||
209 | } | ||
210 | |||
211 | static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt) | ||
212 | { | ||
213 | int ret; | ||
214 | struct cfmuxl *muxl = container_obj(layr); | ||
215 | u8 linkid; | ||
216 | struct cflayer *dn; | ||
217 | struct caif_payload_info *info = cfpkt_info(pkt); | ||
218 | dn = get_dn(muxl, cfpkt_info(pkt)->dev_info); | ||
219 | if (dn == NULL) { | ||
220 | pr_warning("CAIF: %s(): Send data on unknown phy " | ||
221 | "ID = %d (0x%x)\n", | ||
222 | __func__, info->dev_info->id, info->dev_info->id); | ||
223 | return -ENOTCONN; | ||
224 | } | ||
225 | info->hdr_len += 1; | ||
226 | linkid = info->channel_id; | ||
227 | cfpkt_add_head(pkt, &linkid, 1); | ||
228 | ret = dn->transmit(dn, pkt); | ||
229 | /* Remove MUX protocol header upon error. */ | ||
230 | if (ret < 0) | ||
231 | cfpkt_extr_head(pkt, &linkid, 1); | ||
232 | return ret; | ||
233 | } | ||
234 | |||
235 | static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, | ||
236 | int phyid) | ||
237 | { | ||
238 | struct cfmuxl *muxl = container_obj(layr); | ||
239 | struct list_head *node; | ||
240 | struct cflayer *layer; | ||
241 | list_for_each(node, &muxl->srvl_list) { | ||
242 | layer = list_entry(node, struct cflayer, node); | ||
243 | if (cfsrvl_phyid_match(layer, phyid)) | ||
244 | layer->ctrlcmd(layer, ctrl, phyid); | ||
245 | } | ||
246 | } | ||
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c new file mode 100644 index 000000000000..83fff2ff6658 --- /dev/null +++ b/net/caif/cfpkt_skbuff.c | |||
@@ -0,0 +1,571 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson AB 2010 | ||
3 | * Author: Sjur Brendeland/sjur.brandeland@stericsson.com | ||
4 | * License terms: GNU General Public License (GPL) version 2 | ||
5 | */ | ||
6 | |||
7 | #include <linux/string.h> | ||
8 | #include <linux/skbuff.h> | ||
9 | #include <linux/hardirq.h> | ||
10 | #include <net/caif/cfpkt.h> | ||
11 | |||
12 | #define PKT_PREFIX CAIF_NEEDED_HEADROOM | ||
13 | #define PKT_POSTFIX CAIF_NEEDED_TAILROOM | ||
14 | #define PKT_LEN_WHEN_EXTENDING 128 | ||
15 | #define PKT_ERROR(pkt, errmsg) do { \ | ||
16 | cfpkt_priv(pkt)->erronous = true; \ | ||
17 | skb_reset_tail_pointer(&pkt->skb); \ | ||
18 | pr_warning("CAIF: " errmsg);\ | ||
19 | } while (0) | ||
20 | |||
21 | struct cfpktq { | ||
22 | struct sk_buff_head head; | ||
23 | atomic_t count; | ||
24 | /* Lock protects count updates */ | ||
25 | spinlock_t lock; | ||
26 | }; | ||
27 | |||
28 | /* | ||
29 | * net/caif/ is generic and does not | ||
30 | * understand SKB, so we do this typecast | ||
31 | */ | ||
32 | struct cfpkt { | ||
33 | struct sk_buff skb; | ||
34 | }; | ||
35 | |||
36 | /* Private data inside SKB */ | ||
37 | struct cfpkt_priv_data { | ||
38 | struct dev_info dev_info; | ||
39 | bool erronous; | ||
40 | }; | ||
41 | |||
42 | inline struct cfpkt_priv_data *cfpkt_priv(struct cfpkt *pkt) | ||
43 | { | ||
44 | return (struct cfpkt_priv_data *) pkt->skb.cb; | ||
45 | } | ||
46 | |||
47 | inline bool is_erronous(struct cfpkt *pkt) | ||
48 | { | ||
49 | return cfpkt_priv(pkt)->erronous; | ||
50 | } | ||
51 | |||
52 | inline struct sk_buff *pkt_to_skb(struct cfpkt *pkt) | ||
53 | { | ||
54 | return &pkt->skb; | ||
55 | } | ||
56 | |||
57 | inline struct cfpkt *skb_to_pkt(struct sk_buff *skb) | ||
58 | { | ||
59 | return (struct cfpkt *) skb; | ||
60 | } | ||
61 | |||
62 | |||
63 | struct cfpkt *cfpkt_fromnative(enum caif_direction dir, void *nativepkt) | ||
64 | { | ||
65 | struct cfpkt *pkt = skb_to_pkt(nativepkt); | ||
66 | cfpkt_priv(pkt)->erronous = false; | ||
67 | return pkt; | ||
68 | } | ||
69 | EXPORT_SYMBOL(cfpkt_fromnative); | ||
70 | |||
71 | void *cfpkt_tonative(struct cfpkt *pkt) | ||
72 | { | ||
73 | return (void *) pkt; | ||
74 | } | ||
75 | EXPORT_SYMBOL(cfpkt_tonative); | ||
76 | |||
77 | static struct cfpkt *cfpkt_create_pfx(u16 len, u16 pfx) | ||
78 | { | ||
79 | struct sk_buff *skb; | ||
80 | |||
81 | if (likely(in_interrupt())) | ||
82 | skb = alloc_skb(len + pfx, GFP_ATOMIC); | ||
83 | else | ||
84 | skb = alloc_skb(len + pfx, GFP_KERNEL); | ||
85 | |||
86 | if (unlikely(skb == NULL)) | ||
87 | return NULL; | ||
88 | |||
89 | skb_reserve(skb, pfx); | ||
90 | return skb_to_pkt(skb); | ||
91 | } | ||
92 | |||
93 | inline struct cfpkt *cfpkt_create(u16 len) | ||
94 | { | ||
95 | return cfpkt_create_pfx(len + PKT_POSTFIX, PKT_PREFIX); | ||
96 | } | ||
97 | EXPORT_SYMBOL(cfpkt_create); | ||
98 | |||
99 | void cfpkt_destroy(struct cfpkt *pkt) | ||
100 | { | ||
101 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
102 | kfree_skb(skb); | ||
103 | } | ||
104 | EXPORT_SYMBOL(cfpkt_destroy); | ||
105 | |||
106 | inline bool cfpkt_more(struct cfpkt *pkt) | ||
107 | { | ||
108 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
109 | return skb->len > 0; | ||
110 | } | ||
111 | EXPORT_SYMBOL(cfpkt_more); | ||
112 | |||
113 | int cfpkt_peek_head(struct cfpkt *pkt, void *data, u16 len) | ||
114 | { | ||
115 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
116 | if (skb_headlen(skb) >= len) { | ||
117 | memcpy(data, skb->data, len); | ||
118 | return 0; | ||
119 | } | ||
120 | return !cfpkt_extr_head(pkt, data, len) && | ||
121 | !cfpkt_add_head(pkt, data, len); | ||
122 | } | ||
123 | EXPORT_SYMBOL(cfpkt_peek_head); | ||
124 | |||
125 | int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len) | ||
126 | { | ||
127 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
128 | u8 *from; | ||
129 | if (unlikely(is_erronous(pkt))) | ||
130 | return -EPROTO; | ||
131 | |||
132 | if (unlikely(len > skb->len)) { | ||
133 | PKT_ERROR(pkt, "cfpkt_extr_head read beyond end of packet\n"); | ||
134 | return -EPROTO; | ||
135 | } | ||
136 | |||
137 | if (unlikely(len > skb_headlen(skb))) { | ||
138 | if (unlikely(skb_linearize(skb) != 0)) { | ||
139 | PKT_ERROR(pkt, "cfpkt_extr_head linearize failed\n"); | ||
140 | return -EPROTO; | ||
141 | } | ||
142 | } | ||
143 | from = skb_pull(skb, len); | ||
144 | from -= len; | ||
145 | memcpy(data, from, len); | ||
146 | return 0; | ||
147 | } | ||
148 | EXPORT_SYMBOL(cfpkt_extr_head); | ||
149 | |||
150 | int cfpkt_extr_trail(struct cfpkt *pkt, void *dta, u16 len) | ||
151 | { | ||
152 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
153 | u8 *data = dta; | ||
154 | u8 *from; | ||
155 | if (unlikely(is_erronous(pkt))) | ||
156 | return -EPROTO; | ||
157 | |||
158 | if (unlikely(skb_linearize(skb) != 0)) { | ||
159 | PKT_ERROR(pkt, "cfpkt_extr_trail linearize failed\n"); | ||
160 | return -EPROTO; | ||
161 | } | ||
162 | if (unlikely(skb->data + len > skb_tail_pointer(skb))) { | ||
163 | PKT_ERROR(pkt, "cfpkt_extr_trail read beyond end of packet\n"); | ||
164 | return -EPROTO; | ||
165 | } | ||
166 | from = skb_tail_pointer(skb) - len; | ||
167 | skb_trim(skb, skb->len - len); | ||
168 | memcpy(data, from, len); | ||
169 | return 0; | ||
170 | } | ||
171 | EXPORT_SYMBOL(cfpkt_extr_trail); | ||
172 | |||
173 | int cfpkt_pad_trail(struct cfpkt *pkt, u16 len) | ||
174 | { | ||
175 | return cfpkt_add_body(pkt, NULL, len); | ||
176 | } | ||
177 | EXPORT_SYMBOL(cfpkt_pad_trail); | ||
178 | |||
179 | int cfpkt_add_body(struct cfpkt *pkt, const void *data, u16 len) | ||
180 | { | ||
181 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
182 | struct sk_buff *lastskb; | ||
183 | u8 *to; | ||
184 | u16 addlen = 0; | ||
185 | |||
186 | |||
187 | if (unlikely(is_erronous(pkt))) | ||
188 | return -EPROTO; | ||
189 | |||
190 | lastskb = skb; | ||
191 | |||
192 | /* Check whether we need to add space at the tail */ | ||
193 | if (unlikely(skb_tailroom(skb) < len)) { | ||
194 | if (likely(len < PKT_LEN_WHEN_EXTENDING)) | ||
195 | addlen = PKT_LEN_WHEN_EXTENDING; | ||
196 | else | ||
197 | addlen = len; | ||
198 | } | ||
199 | |||
200 | /* Check whether we need to change the SKB before writing to the tail */ | ||
201 | if (unlikely((addlen > 0) || skb_cloned(skb) || skb_shared(skb))) { | ||
202 | |||
203 | /* Make sure data is writable */ | ||
204 | if (unlikely(skb_cow_data(skb, addlen, &lastskb) < 0)) { | ||
205 | PKT_ERROR(pkt, "cfpkt_add_body: cow failed\n"); | ||
206 | return -EPROTO; | ||
207 | } | ||
208 | /* | ||
209 | * Is the SKB non-linear after skb_cow_data()? If so, we are | ||
210 | * going to add data to the last SKB, so we need to adjust | ||
211 | * lengths of the top SKB. | ||
212 | */ | ||
213 | if (lastskb != skb) { | ||
214 | pr_warning("CAIF: %s(): Packet is non-linear\n", | ||
215 | __func__); | ||
216 | skb->len += len; | ||
217 | skb->data_len += len; | ||
218 | } | ||
219 | } | ||
220 | |||
221 | /* All set to put the last SKB and optionally write data there. */ | ||
222 | to = skb_put(lastskb, len); | ||
223 | if (likely(data)) | ||
224 | memcpy(to, data, len); | ||
225 | return 0; | ||
226 | } | ||
227 | EXPORT_SYMBOL(cfpkt_add_body); | ||
228 | |||
229 | inline int cfpkt_addbdy(struct cfpkt *pkt, u8 data) | ||
230 | { | ||
231 | return cfpkt_add_body(pkt, &data, 1); | ||
232 | } | ||
233 | EXPORT_SYMBOL(cfpkt_addbdy); | ||
234 | |||
235 | int cfpkt_add_head(struct cfpkt *pkt, const void *data2, u16 len) | ||
236 | { | ||
237 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
238 | struct sk_buff *lastskb; | ||
239 | u8 *to; | ||
240 | const u8 *data = data2; | ||
241 | if (unlikely(is_erronous(pkt))) | ||
242 | return -EPROTO; | ||
243 | if (unlikely(skb_headroom(skb) < len)) { | ||
244 | PKT_ERROR(pkt, "cfpkt_add_head: no headroom\n"); | ||
245 | return -EPROTO; | ||
246 | } | ||
247 | |||
248 | /* Make sure data is writable */ | ||
249 | if (unlikely(skb_cow_data(skb, 0, &lastskb) < 0)) { | ||
250 | PKT_ERROR(pkt, "cfpkt_add_head: cow failed\n"); | ||
251 | return -EPROTO; | ||
252 | } | ||
253 | |||
254 | to = skb_push(skb, len); | ||
255 | memcpy(to, data, len); | ||
256 | return 0; | ||
257 | } | ||
258 | EXPORT_SYMBOL(cfpkt_add_head); | ||
259 | |||
260 | inline int cfpkt_add_trail(struct cfpkt *pkt, const void *data, u16 len) | ||
261 | { | ||
262 | return cfpkt_add_body(pkt, data, len); | ||
263 | } | ||
264 | EXPORT_SYMBOL(cfpkt_add_trail); | ||
265 | |||
266 | inline u16 cfpkt_getlen(struct cfpkt *pkt) | ||
267 | { | ||
268 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
269 | return skb->len; | ||
270 | } | ||
271 | EXPORT_SYMBOL(cfpkt_getlen); | ||
272 | |||
273 | inline u16 cfpkt_iterate(struct cfpkt *pkt, | ||
274 | u16 (*iter_func)(u16, void *, u16), | ||
275 | u16 data) | ||
276 | { | ||
277 | /* | ||
278 | * Don't care about the performance hit of linearizing, | ||
279 | * Checksum should not be used on high-speed interfaces anyway. | ||
280 | */ | ||
281 | if (unlikely(is_erronous(pkt))) | ||
282 | return -EPROTO; | ||
283 | if (unlikely(skb_linearize(&pkt->skb) != 0)) { | ||
284 | PKT_ERROR(pkt, "cfpkt_iterate: linearize failed\n"); | ||
285 | return -EPROTO; | ||
286 | } | ||
287 | return iter_func(data, pkt->skb.data, cfpkt_getlen(pkt)); | ||
288 | } | ||
289 | EXPORT_SYMBOL(cfpkt_iterate); | ||
290 | |||
291 | int cfpkt_setlen(struct cfpkt *pkt, u16 len) | ||
292 | { | ||
293 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
294 | |||
295 | |||
296 | if (unlikely(is_erronous(pkt))) | ||
297 | return -EPROTO; | ||
298 | |||
299 | if (likely(len <= skb->len)) { | ||
300 | if (unlikely(skb->data_len)) | ||
301 | ___pskb_trim(skb, len); | ||
302 | else | ||
303 | skb_trim(skb, len); | ||
304 | |||
305 | return cfpkt_getlen(pkt); | ||
306 | } | ||
307 | |||
308 | /* Need to expand SKB */ | ||
309 | if (unlikely(!cfpkt_pad_trail(pkt, len - skb->len))) | ||
310 | PKT_ERROR(pkt, "cfpkt_setlen: skb_pad_trail failed\n"); | ||
311 | |||
312 | return cfpkt_getlen(pkt); | ||
313 | } | ||
314 | EXPORT_SYMBOL(cfpkt_setlen); | ||
315 | |||
316 | struct cfpkt *cfpkt_create_uplink(const unsigned char *data, unsigned int len) | ||
317 | { | ||
318 | struct cfpkt *pkt = cfpkt_create_pfx(len + PKT_POSTFIX, PKT_PREFIX); | ||
319 | if (unlikely(data != NULL)) | ||
320 | cfpkt_add_body(pkt, data, len); | ||
321 | return pkt; | ||
322 | } | ||
323 | EXPORT_SYMBOL(cfpkt_create_uplink); | ||
324 | |||
325 | struct cfpkt *cfpkt_append(struct cfpkt *dstpkt, | ||
326 | struct cfpkt *addpkt, | ||
327 | u16 expectlen) | ||
328 | { | ||
329 | struct sk_buff *dst = pkt_to_skb(dstpkt); | ||
330 | struct sk_buff *add = pkt_to_skb(addpkt); | ||
331 | u16 addlen = skb_headlen(add); | ||
332 | u16 neededtailspace; | ||
333 | struct sk_buff *tmp; | ||
334 | u16 dstlen; | ||
335 | u16 createlen; | ||
336 | if (unlikely(is_erronous(dstpkt) || is_erronous(addpkt))) { | ||
337 | cfpkt_destroy(addpkt); | ||
338 | return dstpkt; | ||
339 | } | ||
340 | if (expectlen > addlen) | ||
341 | neededtailspace = expectlen; | ||
342 | else | ||
343 | neededtailspace = addlen; | ||
344 | |||
345 | if (dst->tail + neededtailspace > dst->end) { | ||
346 | /* Create a dumplicate of 'dst' with more tail space */ | ||
347 | dstlen = skb_headlen(dst); | ||
348 | createlen = dstlen + neededtailspace; | ||
349 | tmp = pkt_to_skb( | ||
350 | cfpkt_create(createlen + PKT_PREFIX + PKT_POSTFIX)); | ||
351 | if (!tmp) | ||
352 | return NULL; | ||
353 | skb_set_tail_pointer(tmp, dstlen); | ||
354 | tmp->len = dstlen; | ||
355 | memcpy(tmp->data, dst->data, dstlen); | ||
356 | cfpkt_destroy(dstpkt); | ||
357 | dst = tmp; | ||
358 | } | ||
359 | memcpy(skb_tail_pointer(dst), add->data, skb_headlen(add)); | ||
360 | cfpkt_destroy(addpkt); | ||
361 | dst->tail += addlen; | ||
362 | dst->len += addlen; | ||
363 | return skb_to_pkt(dst); | ||
364 | } | ||
365 | EXPORT_SYMBOL(cfpkt_append); | ||
366 | |||
367 | struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos) | ||
368 | { | ||
369 | struct sk_buff *skb2; | ||
370 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
371 | u8 *split = skb->data + pos; | ||
372 | u16 len2nd = skb_tail_pointer(skb) - split; | ||
373 | |||
374 | if (unlikely(is_erronous(pkt))) | ||
375 | return NULL; | ||
376 | |||
377 | if (skb->data + pos > skb_tail_pointer(skb)) { | ||
378 | PKT_ERROR(pkt, | ||
379 | "cfpkt_split: trying to split beyond end of packet"); | ||
380 | return NULL; | ||
381 | } | ||
382 | |||
383 | /* Create a new packet for the second part of the data */ | ||
384 | skb2 = pkt_to_skb( | ||
385 | cfpkt_create_pfx(len2nd + PKT_PREFIX + PKT_POSTFIX, | ||
386 | PKT_PREFIX)); | ||
387 | |||
388 | if (skb2 == NULL) | ||
389 | return NULL; | ||
390 | |||
391 | /* Reduce the length of the original packet */ | ||
392 | skb_set_tail_pointer(skb, pos); | ||
393 | skb->len = pos; | ||
394 | |||
395 | memcpy(skb2->data, split, len2nd); | ||
396 | skb2->tail += len2nd; | ||
397 | skb2->len += len2nd; | ||
398 | return skb_to_pkt(skb2); | ||
399 | } | ||
400 | EXPORT_SYMBOL(cfpkt_split); | ||
401 | |||
402 | char *cfpkt_log_pkt(struct cfpkt *pkt, char *buf, int buflen) | ||
403 | { | ||
404 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
405 | char *p = buf; | ||
406 | int i; | ||
407 | |||
408 | /* | ||
409 | * Sanity check buffer length, it needs to be at least as large as | ||
410 | * the header info: ~=50+ bytes | ||
411 | */ | ||
412 | if (buflen < 50) | ||
413 | return NULL; | ||
414 | |||
415 | snprintf(buf, buflen, "%s: pkt:%p len:%ld(%ld+%ld) {%ld,%ld} data: [", | ||
416 | is_erronous(pkt) ? "ERRONOUS-SKB" : | ||
417 | (skb->data_len != 0 ? "COMPLEX-SKB" : "SKB"), | ||
418 | skb, | ||
419 | (long) skb->len, | ||
420 | (long) (skb_tail_pointer(skb) - skb->data), | ||
421 | (long) skb->data_len, | ||
422 | (long) (skb->data - skb->head), | ||
423 | (long) (skb_tail_pointer(skb) - skb->head)); | ||
424 | p = buf + strlen(buf); | ||
425 | |||
426 | for (i = 0; i < skb_tail_pointer(skb) - skb->data && i < 300; i++) { | ||
427 | if (p > buf + buflen - 10) { | ||
428 | sprintf(p, "..."); | ||
429 | p = buf + strlen(buf); | ||
430 | break; | ||
431 | } | ||
432 | sprintf(p, "%02x,", skb->data[i]); | ||
433 | p = buf + strlen(buf); | ||
434 | } | ||
435 | sprintf(p, "]\n"); | ||
436 | return buf; | ||
437 | } | ||
438 | EXPORT_SYMBOL(cfpkt_log_pkt); | ||
439 | |||
440 | int cfpkt_raw_append(struct cfpkt *pkt, void **buf, unsigned int buflen) | ||
441 | { | ||
442 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
443 | struct sk_buff *lastskb; | ||
444 | |||
445 | caif_assert(buf != NULL); | ||
446 | if (unlikely(is_erronous(pkt))) | ||
447 | return -EPROTO; | ||
448 | /* Make sure SKB is writable */ | ||
449 | if (unlikely(skb_cow_data(skb, 0, &lastskb) < 0)) { | ||
450 | PKT_ERROR(pkt, "cfpkt_raw_append: skb_cow_data failed\n"); | ||
451 | return -EPROTO; | ||
452 | } | ||
453 | |||
454 | if (unlikely(skb_linearize(skb) != 0)) { | ||
455 | PKT_ERROR(pkt, "cfpkt_raw_append: linearize failed\n"); | ||
456 | return -EPROTO; | ||
457 | } | ||
458 | |||
459 | if (unlikely(skb_tailroom(skb) < buflen)) { | ||
460 | PKT_ERROR(pkt, "cfpkt_raw_append: buffer too short - failed\n"); | ||
461 | return -EPROTO; | ||
462 | } | ||
463 | |||
464 | *buf = skb_put(skb, buflen); | ||
465 | return 1; | ||
466 | } | ||
467 | EXPORT_SYMBOL(cfpkt_raw_append); | ||
468 | |||
469 | int cfpkt_raw_extract(struct cfpkt *pkt, void **buf, unsigned int buflen) | ||
470 | { | ||
471 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
472 | |||
473 | caif_assert(buf != NULL); | ||
474 | if (unlikely(is_erronous(pkt))) | ||
475 | return -EPROTO; | ||
476 | |||
477 | if (unlikely(buflen > skb->len)) { | ||
478 | PKT_ERROR(pkt, "cfpkt_raw_extract: buflen too large " | ||
479 | "- failed\n"); | ||
480 | return -EPROTO; | ||
481 | } | ||
482 | |||
483 | if (unlikely(buflen > skb_headlen(skb))) { | ||
484 | if (unlikely(skb_linearize(skb) != 0)) { | ||
485 | PKT_ERROR(pkt, "cfpkt_raw_extract: linearize failed\n"); | ||
486 | return -EPROTO; | ||
487 | } | ||
488 | } | ||
489 | |||
490 | *buf = skb->data; | ||
491 | skb_pull(skb, buflen); | ||
492 | |||
493 | return 1; | ||
494 | } | ||
495 | EXPORT_SYMBOL(cfpkt_raw_extract); | ||
496 | |||
497 | inline bool cfpkt_erroneous(struct cfpkt *pkt) | ||
498 | { | ||
499 | return cfpkt_priv(pkt)->erronous; | ||
500 | } | ||
501 | EXPORT_SYMBOL(cfpkt_erroneous); | ||
502 | |||
503 | struct cfpktq *cfpktq_create(void) | ||
504 | { | ||
505 | struct cfpktq *q = kmalloc(sizeof(struct cfpktq), GFP_ATOMIC); | ||
506 | if (!q) | ||
507 | return NULL; | ||
508 | skb_queue_head_init(&q->head); | ||
509 | atomic_set(&q->count, 0); | ||
510 | spin_lock_init(&q->lock); | ||
511 | return q; | ||
512 | } | ||
513 | EXPORT_SYMBOL(cfpktq_create); | ||
514 | |||
515 | void cfpkt_queue(struct cfpktq *pktq, struct cfpkt *pkt, unsigned short prio) | ||
516 | { | ||
517 | atomic_inc(&pktq->count); | ||
518 | spin_lock(&pktq->lock); | ||
519 | skb_queue_tail(&pktq->head, pkt_to_skb(pkt)); | ||
520 | spin_unlock(&pktq->lock); | ||
521 | |||
522 | } | ||
523 | EXPORT_SYMBOL(cfpkt_queue); | ||
524 | |||
525 | struct cfpkt *cfpkt_qpeek(struct cfpktq *pktq) | ||
526 | { | ||
527 | struct cfpkt *tmp; | ||
528 | spin_lock(&pktq->lock); | ||
529 | tmp = skb_to_pkt(skb_peek(&pktq->head)); | ||
530 | spin_unlock(&pktq->lock); | ||
531 | return tmp; | ||
532 | } | ||
533 | EXPORT_SYMBOL(cfpkt_qpeek); | ||
534 | |||
535 | struct cfpkt *cfpkt_dequeue(struct cfpktq *pktq) | ||
536 | { | ||
537 | struct cfpkt *pkt; | ||
538 | spin_lock(&pktq->lock); | ||
539 | pkt = skb_to_pkt(skb_dequeue(&pktq->head)); | ||
540 | if (pkt) { | ||
541 | atomic_dec(&pktq->count); | ||
542 | caif_assert(atomic_read(&pktq->count) >= 0); | ||
543 | } | ||
544 | spin_unlock(&pktq->lock); | ||
545 | return pkt; | ||
546 | } | ||
547 | EXPORT_SYMBOL(cfpkt_dequeue); | ||
548 | |||
549 | int cfpkt_qcount(struct cfpktq *pktq) | ||
550 | { | ||
551 | return atomic_read(&pktq->count); | ||
552 | } | ||
553 | EXPORT_SYMBOL(cfpkt_qcount); | ||
554 | |||
555 | struct cfpkt *cfpkt_clone_release(struct cfpkt *pkt) | ||
556 | { | ||
557 | struct cfpkt *clone; | ||
558 | clone = skb_to_pkt(skb_clone(pkt_to_skb(pkt), GFP_ATOMIC)); | ||
559 | /* Free original packet. */ | ||
560 | cfpkt_destroy(pkt); | ||
561 | if (!clone) | ||
562 | return NULL; | ||
563 | return clone; | ||
564 | } | ||
565 | EXPORT_SYMBOL(cfpkt_clone_release); | ||
566 | |||
567 | struct caif_payload_info *cfpkt_info(struct cfpkt *pkt) | ||
568 | { | ||
569 | return (struct caif_payload_info *)&pkt_to_skb(pkt)->cb; | ||
570 | } | ||
571 | EXPORT_SYMBOL(cfpkt_info); | ||
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c new file mode 100644 index 000000000000..cd2830fec935 --- /dev/null +++ b/net/caif/cfrfml.c | |||
@@ -0,0 +1,108 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson AB 2010 | ||
3 | * Author: Sjur Brendeland/sjur.brandeland@stericsson.com | ||
4 | * License terms: GNU General Public License (GPL) version 2 | ||
5 | */ | ||
6 | |||
7 | #include <linux/stddef.h> | ||
8 | #include <linux/spinlock.h> | ||
9 | #include <linux/slab.h> | ||
10 | #include <net/caif/caif_layer.h> | ||
11 | #include <net/caif/cfsrvl.h> | ||
12 | #include <net/caif/cfpkt.h> | ||
13 | |||
14 | #define container_obj(layr) container_of(layr, struct cfsrvl, layer) | ||
15 | |||
16 | #define RFM_SEGMENTATION_BIT 0x01 | ||
17 | #define RFM_PAYLOAD 0x00 | ||
18 | #define RFM_CMD_BIT 0x80 | ||
19 | #define RFM_FLOW_OFF 0x81 | ||
20 | #define RFM_FLOW_ON 0x80 | ||
21 | #define RFM_SET_PIN 0x82 | ||
22 | #define RFM_CTRL_PKT_SIZE 1 | ||
23 | |||
24 | static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt); | ||
25 | static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt); | ||
26 | static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl); | ||
27 | |||
28 | struct cflayer *cfrfml_create(u8 channel_id, struct dev_info *dev_info) | ||
29 | { | ||
30 | struct cfsrvl *rfm = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC); | ||
31 | if (!rfm) { | ||
32 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
33 | return NULL; | ||
34 | } | ||
35 | caif_assert(offsetof(struct cfsrvl, layer) == 0); | ||
36 | memset(rfm, 0, sizeof(struct cfsrvl)); | ||
37 | cfsrvl_init(rfm, channel_id, dev_info); | ||
38 | rfm->layer.modemcmd = cfservl_modemcmd; | ||
39 | rfm->layer.receive = cfrfml_receive; | ||
40 | rfm->layer.transmit = cfrfml_transmit; | ||
41 | snprintf(rfm->layer.name, CAIF_LAYER_NAME_SZ, "rfm%d", channel_id); | ||
42 | return &rfm->layer; | ||
43 | } | ||
44 | |||
45 | static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl) | ||
46 | { | ||
47 | return -EPROTO; | ||
48 | } | ||
49 | |||
50 | static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt) | ||
51 | { | ||
52 | u8 tmp; | ||
53 | bool segmented; | ||
54 | int ret; | ||
55 | caif_assert(layr->up != NULL); | ||
56 | caif_assert(layr->receive != NULL); | ||
57 | |||
58 | /* | ||
59 | * RFM is taking care of segmentation and stripping of | ||
60 | * segmentation bit. | ||
61 | */ | ||
62 | if (cfpkt_extr_head(pkt, &tmp, 1) < 0) { | ||
63 | pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); | ||
64 | cfpkt_destroy(pkt); | ||
65 | return -EPROTO; | ||
66 | } | ||
67 | segmented = tmp & RFM_SEGMENTATION_BIT; | ||
68 | caif_assert(!segmented); | ||
69 | |||
70 | ret = layr->up->receive(layr->up, pkt); | ||
71 | return ret; | ||
72 | } | ||
73 | |||
74 | static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt) | ||
75 | { | ||
76 | u8 tmp = 0; | ||
77 | int ret; | ||
78 | struct cfsrvl *service = container_obj(layr); | ||
79 | |||
80 | caif_assert(layr->dn != NULL); | ||
81 | caif_assert(layr->dn->transmit != NULL); | ||
82 | |||
83 | if (!cfsrvl_ready(service, &ret)) | ||
84 | return ret; | ||
85 | |||
86 | if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { | ||
87 | pr_err("CAIF: %s():Packet too large - size=%d\n", | ||
88 | __func__, cfpkt_getlen(pkt)); | ||
89 | return -EOVERFLOW; | ||
90 | } | ||
91 | if (cfpkt_add_head(pkt, &tmp, 1) < 0) { | ||
92 | pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); | ||
93 | return -EPROTO; | ||
94 | } | ||
95 | |||
96 | /* Add info for MUX-layer to route the packet out. */ | ||
97 | cfpkt_info(pkt)->channel_id = service->layer.id; | ||
98 | /* | ||
99 | * To optimize alignment, we add up the size of CAIF header before | ||
100 | * payload. | ||
101 | */ | ||
102 | cfpkt_info(pkt)->hdr_len = 1; | ||
103 | cfpkt_info(pkt)->dev_info = &service->dev_info; | ||
104 | ret = layr->dn->transmit(layr->dn, pkt); | ||
105 | if (ret < 0) | ||
106 | cfpkt_extr_head(pkt, &tmp, 1); | ||
107 | return ret; | ||
108 | } | ||
diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c new file mode 100644 index 000000000000..06029ea2da2f --- /dev/null +++ b/net/caif/cfserl.c | |||
@@ -0,0 +1,192 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson AB 2010 | ||
3 | * Author: Sjur Brendeland/sjur.brandeland@stericsson.com | ||
4 | * License terms: GNU General Public License (GPL) version 2 | ||
5 | */ | ||
6 | |||
7 | #include <linux/stddef.h> | ||
8 | #include <linux/spinlock.h> | ||
9 | #include <linux/slab.h> | ||
10 | #include <net/caif/caif_layer.h> | ||
11 | #include <net/caif/cfpkt.h> | ||
12 | #include <net/caif/cfserl.h> | ||
13 | |||
14 | #define container_obj(layr) ((struct cfserl *) layr) | ||
15 | |||
16 | #define CFSERL_STX 0x02 | ||
17 | #define CAIF_MINIUM_PACKET_SIZE 4 | ||
18 | struct cfserl { | ||
19 | struct cflayer layer; | ||
20 | struct cfpkt *incomplete_frm; | ||
21 | /* Protects parallel processing of incoming packets */ | ||
22 | spinlock_t sync; | ||
23 | bool usestx; | ||
24 | }; | ||
25 | #define STXLEN(layr) (layr->usestx ? 1 : 0) | ||
26 | |||
27 | static int cfserl_receive(struct cflayer *layr, struct cfpkt *pkt); | ||
28 | static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt); | ||
29 | static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, | ||
30 | int phyid); | ||
31 | |||
32 | struct cflayer *cfserl_create(int type, int instance, bool use_stx) | ||
33 | { | ||
34 | struct cfserl *this = kmalloc(sizeof(struct cfserl), GFP_ATOMIC); | ||
35 | if (!this) { | ||
36 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
37 | return NULL; | ||
38 | } | ||
39 | caif_assert(offsetof(struct cfserl, layer) == 0); | ||
40 | memset(this, 0, sizeof(struct cfserl)); | ||
41 | this->layer.receive = cfserl_receive; | ||
42 | this->layer.transmit = cfserl_transmit; | ||
43 | this->layer.ctrlcmd = cfserl_ctrlcmd; | ||
44 | this->layer.type = type; | ||
45 | this->usestx = use_stx; | ||
46 | spin_lock_init(&this->sync); | ||
47 | snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "ser1"); | ||
48 | return &this->layer; | ||
49 | } | ||
50 | |||
51 | static int cfserl_receive(struct cflayer *l, struct cfpkt *newpkt) | ||
52 | { | ||
53 | struct cfserl *layr = container_obj(l); | ||
54 | u16 pkt_len; | ||
55 | struct cfpkt *pkt = NULL; | ||
56 | struct cfpkt *tail_pkt = NULL; | ||
57 | u8 tmp8; | ||
58 | u16 tmp; | ||
59 | u8 stx = CFSERL_STX; | ||
60 | int ret; | ||
61 | u16 expectlen = 0; | ||
62 | caif_assert(newpkt != NULL); | ||
63 | spin_lock(&layr->sync); | ||
64 | |||
65 | if (layr->incomplete_frm != NULL) { | ||
66 | |||
67 | layr->incomplete_frm = | ||
68 | cfpkt_append(layr->incomplete_frm, newpkt, expectlen); | ||
69 | pkt = layr->incomplete_frm; | ||
70 | } else { | ||
71 | pkt = newpkt; | ||
72 | } | ||
73 | layr->incomplete_frm = NULL; | ||
74 | |||
75 | do { | ||
76 | /* Search for STX at start of pkt if STX is used */ | ||
77 | if (layr->usestx) { | ||
78 | cfpkt_extr_head(pkt, &tmp8, 1); | ||
79 | if (tmp8 != CFSERL_STX) { | ||
80 | while (cfpkt_more(pkt) | ||
81 | && tmp8 != CFSERL_STX) { | ||
82 | cfpkt_extr_head(pkt, &tmp8, 1); | ||
83 | } | ||
84 | if (!cfpkt_more(pkt)) { | ||
85 | cfpkt_destroy(pkt); | ||
86 | layr->incomplete_frm = NULL; | ||
87 | spin_unlock(&layr->sync); | ||
88 | return -EPROTO; | ||
89 | } | ||
90 | } | ||
91 | } | ||
92 | |||
93 | pkt_len = cfpkt_getlen(pkt); | ||
94 | |||
95 | /* | ||
96 | * pkt_len is the accumulated length of the packet data | ||
97 | * we have received so far. | ||
98 | * Exit if frame doesn't hold length. | ||
99 | */ | ||
100 | |||
101 | if (pkt_len < 2) { | ||
102 | if (layr->usestx) | ||
103 | cfpkt_add_head(pkt, &stx, 1); | ||
104 | layr->incomplete_frm = pkt; | ||
105 | spin_unlock(&layr->sync); | ||
106 | return 0; | ||
107 | } | ||
108 | |||
109 | /* | ||
110 | * Find length of frame. | ||
111 | * expectlen is the length we need for a full frame. | ||
112 | */ | ||
113 | cfpkt_peek_head(pkt, &tmp, 2); | ||
114 | expectlen = le16_to_cpu(tmp) + 2; | ||
115 | /* | ||
116 | * Frame error handling | ||
117 | */ | ||
118 | if (expectlen < CAIF_MINIUM_PACKET_SIZE | ||
119 | || expectlen > CAIF_MAX_FRAMESIZE) { | ||
120 | if (!layr->usestx) { | ||
121 | if (pkt != NULL) | ||
122 | cfpkt_destroy(pkt); | ||
123 | layr->incomplete_frm = NULL; | ||
124 | expectlen = 0; | ||
125 | spin_unlock(&layr->sync); | ||
126 | return -EPROTO; | ||
127 | } | ||
128 | continue; | ||
129 | } | ||
130 | |||
131 | if (pkt_len < expectlen) { | ||
132 | /* Too little received data */ | ||
133 | if (layr->usestx) | ||
134 | cfpkt_add_head(pkt, &stx, 1); | ||
135 | layr->incomplete_frm = pkt; | ||
136 | spin_unlock(&layr->sync); | ||
137 | return 0; | ||
138 | } | ||
139 | |||
140 | /* | ||
141 | * Enough data for at least one frame. | ||
142 | * Split the frame, if too long | ||
143 | */ | ||
144 | if (pkt_len > expectlen) | ||
145 | tail_pkt = cfpkt_split(pkt, expectlen); | ||
146 | else | ||
147 | tail_pkt = NULL; | ||
148 | |||
149 | /* Send the first part of packet upwards.*/ | ||
150 | spin_unlock(&layr->sync); | ||
151 | ret = layr->layer.up->receive(layr->layer.up, pkt); | ||
152 | spin_lock(&layr->sync); | ||
153 | if (ret == -EILSEQ) { | ||
154 | if (layr->usestx) { | ||
155 | if (tail_pkt != NULL) | ||
156 | pkt = cfpkt_append(pkt, tail_pkt, 0); | ||
157 | |||
158 | /* Start search for next STX if frame failed */ | ||
159 | continue; | ||
160 | } else { | ||
161 | cfpkt_destroy(pkt); | ||
162 | pkt = NULL; | ||
163 | } | ||
164 | } | ||
165 | |||
166 | pkt = tail_pkt; | ||
167 | |||
168 | } while (pkt != NULL); | ||
169 | |||
170 | spin_unlock(&layr->sync); | ||
171 | return 0; | ||
172 | } | ||
173 | |||
174 | static int cfserl_transmit(struct cflayer *layer, struct cfpkt *newpkt) | ||
175 | { | ||
176 | struct cfserl *layr = container_obj(layer); | ||
177 | int ret; | ||
178 | u8 tmp8 = CFSERL_STX; | ||
179 | if (layr->usestx) | ||
180 | cfpkt_add_head(newpkt, &tmp8, 1); | ||
181 | ret = layer->dn->transmit(layer->dn, newpkt); | ||
182 | if (ret < 0) | ||
183 | cfpkt_extr_head(newpkt, &tmp8, 1); | ||
184 | |||
185 | return ret; | ||
186 | } | ||
187 | |||
188 | static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, | ||
189 | int phyid) | ||
190 | { | ||
191 | layr->up->ctrlcmd(layr->up, ctrl, phyid); | ||
192 | } | ||
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c new file mode 100644 index 000000000000..d470c51c6431 --- /dev/null +++ b/net/caif/cfsrvl.c | |||
@@ -0,0 +1,185 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson AB 2010 | ||
3 | * Author: Sjur Brendeland/sjur.brandeland@stericsson.com | ||
4 | * License terms: GNU General Public License (GPL) version 2 | ||
5 | */ | ||
6 | |||
7 | #include <linux/kernel.h> | ||
8 | #include <linux/types.h> | ||
9 | #include <linux/errno.h> | ||
10 | #include <linux/slab.h> | ||
11 | #include <net/caif/caif_layer.h> | ||
12 | #include <net/caif/cfsrvl.h> | ||
13 | #include <net/caif/cfpkt.h> | ||
14 | |||
15 | #define SRVL_CTRL_PKT_SIZE 1 | ||
16 | #define SRVL_FLOW_OFF 0x81 | ||
17 | #define SRVL_FLOW_ON 0x80 | ||
18 | #define SRVL_SET_PIN 0x82 | ||
19 | #define SRVL_CTRL_PKT_SIZE 1 | ||
20 | |||
21 | #define container_obj(layr) container_of(layr, struct cfsrvl, layer) | ||
22 | |||
23 | static void cfservl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, | ||
24 | int phyid) | ||
25 | { | ||
26 | struct cfsrvl *service = container_obj(layr); | ||
27 | caif_assert(layr->up != NULL); | ||
28 | caif_assert(layr->up->ctrlcmd != NULL); | ||
29 | switch (ctrl) { | ||
30 | case CAIF_CTRLCMD_INIT_RSP: | ||
31 | service->open = true; | ||
32 | layr->up->ctrlcmd(layr->up, ctrl, phyid); | ||
33 | break; | ||
34 | case CAIF_CTRLCMD_DEINIT_RSP: | ||
35 | case CAIF_CTRLCMD_INIT_FAIL_RSP: | ||
36 | service->open = false; | ||
37 | layr->up->ctrlcmd(layr->up, ctrl, phyid); | ||
38 | break; | ||
39 | case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND: | ||
40 | if (phyid != service->dev_info.id) | ||
41 | break; | ||
42 | if (service->modem_flow_on) | ||
43 | layr->up->ctrlcmd(layr->up, | ||
44 | CAIF_CTRLCMD_FLOW_OFF_IND, phyid); | ||
45 | service->phy_flow_on = false; | ||
46 | break; | ||
47 | case _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND: | ||
48 | if (phyid != service->dev_info.id) | ||
49 | return; | ||
50 | if (service->modem_flow_on) { | ||
51 | layr->up->ctrlcmd(layr->up, | ||
52 | CAIF_CTRLCMD_FLOW_ON_IND, | ||
53 | phyid); | ||
54 | } | ||
55 | service->phy_flow_on = true; | ||
56 | break; | ||
57 | case CAIF_CTRLCMD_FLOW_OFF_IND: | ||
58 | if (service->phy_flow_on) { | ||
59 | layr->up->ctrlcmd(layr->up, | ||
60 | CAIF_CTRLCMD_FLOW_OFF_IND, phyid); | ||
61 | } | ||
62 | service->modem_flow_on = false; | ||
63 | break; | ||
64 | case CAIF_CTRLCMD_FLOW_ON_IND: | ||
65 | if (service->phy_flow_on) { | ||
66 | layr->up->ctrlcmd(layr->up, | ||
67 | CAIF_CTRLCMD_FLOW_ON_IND, phyid); | ||
68 | } | ||
69 | service->modem_flow_on = true; | ||
70 | break; | ||
71 | case _CAIF_CTRLCMD_PHYIF_DOWN_IND: | ||
72 | /* In case interface is down, let's fake a remove shutdown */ | ||
73 | layr->up->ctrlcmd(layr->up, | ||
74 | CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND, phyid); | ||
75 | break; | ||
76 | case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND: | ||
77 | layr->up->ctrlcmd(layr->up, ctrl, phyid); | ||
78 | break; | ||
79 | default: | ||
80 | pr_warning("CAIF: %s(): " | ||
81 | "Unexpected ctrl in cfsrvl (%d)\n", __func__, ctrl); | ||
82 | /* We have both modem and phy flow on, send flow on */ | ||
83 | layr->up->ctrlcmd(layr->up, ctrl, phyid); | ||
84 | service->phy_flow_on = true; | ||
85 | break; | ||
86 | } | ||
87 | } | ||
88 | |||
89 | static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl) | ||
90 | { | ||
91 | struct cfsrvl *service = container_obj(layr); | ||
92 | caif_assert(layr != NULL); | ||
93 | caif_assert(layr->dn != NULL); | ||
94 | caif_assert(layr->dn->transmit != NULL); | ||
95 | switch (ctrl) { | ||
96 | case CAIF_MODEMCMD_FLOW_ON_REQ: | ||
97 | { | ||
98 | struct cfpkt *pkt; | ||
99 | struct caif_payload_info *info; | ||
100 | u8 flow_on = SRVL_FLOW_ON; | ||
101 | pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE); | ||
102 | if (!pkt) { | ||
103 | pr_warning("CAIF: %s(): Out of memory\n", | ||
104 | __func__); | ||
105 | return -ENOMEM; | ||
106 | } | ||
107 | |||
108 | if (cfpkt_add_head(pkt, &flow_on, 1) < 0) { | ||
109 | pr_err("CAIF: %s(): Packet is erroneous!\n", | ||
110 | __func__); | ||
111 | cfpkt_destroy(pkt); | ||
112 | return -EPROTO; | ||
113 | } | ||
114 | info = cfpkt_info(pkt); | ||
115 | info->channel_id = service->layer.id; | ||
116 | info->hdr_len = 1; | ||
117 | info->dev_info = &service->dev_info; | ||
118 | return layr->dn->transmit(layr->dn, pkt); | ||
119 | } | ||
120 | case CAIF_MODEMCMD_FLOW_OFF_REQ: | ||
121 | { | ||
122 | struct cfpkt *pkt; | ||
123 | struct caif_payload_info *info; | ||
124 | u8 flow_off = SRVL_FLOW_OFF; | ||
125 | pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE); | ||
126 | if (cfpkt_add_head(pkt, &flow_off, 1) < 0) { | ||
127 | pr_err("CAIF: %s(): Packet is erroneous!\n", | ||
128 | __func__); | ||
129 | cfpkt_destroy(pkt); | ||
130 | return -EPROTO; | ||
131 | } | ||
132 | info = cfpkt_info(pkt); | ||
133 | info->channel_id = service->layer.id; | ||
134 | info->hdr_len = 1; | ||
135 | info->dev_info = &service->dev_info; | ||
136 | return layr->dn->transmit(layr->dn, pkt); | ||
137 | } | ||
138 | default: | ||
139 | break; | ||
140 | } | ||
141 | return -EINVAL; | ||
142 | } | ||
143 | |||
144 | void cfservl_destroy(struct cflayer *layer) | ||
145 | { | ||
146 | kfree(layer); | ||
147 | } | ||
148 | |||
149 | void cfsrvl_init(struct cfsrvl *service, | ||
150 | u8 channel_id, | ||
151 | struct dev_info *dev_info) | ||
152 | { | ||
153 | caif_assert(offsetof(struct cfsrvl, layer) == 0); | ||
154 | service->open = false; | ||
155 | service->modem_flow_on = true; | ||
156 | service->phy_flow_on = true; | ||
157 | service->layer.id = channel_id; | ||
158 | service->layer.ctrlcmd = cfservl_ctrlcmd; | ||
159 | service->layer.modemcmd = cfservl_modemcmd; | ||
160 | service->dev_info = *dev_info; | ||
161 | } | ||
162 | |||
163 | bool cfsrvl_ready(struct cfsrvl *service, int *err) | ||
164 | { | ||
165 | if (service->open && service->modem_flow_on && service->phy_flow_on) | ||
166 | return true; | ||
167 | if (!service->open) { | ||
168 | *err = -ENOTCONN; | ||
169 | return false; | ||
170 | } | ||
171 | caif_assert(!(service->modem_flow_on && service->phy_flow_on)); | ||
172 | *err = -EAGAIN; | ||
173 | return false; | ||
174 | } | ||
175 | u8 cfsrvl_getphyid(struct cflayer *layer) | ||
176 | { | ||
177 | struct cfsrvl *servl = container_obj(layer); | ||
178 | return servl->dev_info.id; | ||
179 | } | ||
180 | |||
181 | bool cfsrvl_phyid_match(struct cflayer *layer, int phyid) | ||
182 | { | ||
183 | struct cfsrvl *servl = container_obj(layer); | ||
184 | return servl->dev_info.id == phyid; | ||
185 | } | ||
diff --git a/net/caif/cfutill.c b/net/caif/cfutill.c new file mode 100644 index 000000000000..5fd2c9ea8b42 --- /dev/null +++ b/net/caif/cfutill.c | |||
@@ -0,0 +1,115 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson AB 2010 | ||
3 | * Author: Sjur Brendeland/sjur.brandeland@stericsson.com | ||
4 | * License terms: GNU General Public License (GPL) version 2 | ||
5 | */ | ||
6 | |||
7 | #include <linux/kernel.h> | ||
8 | #include <linux/types.h> | ||
9 | #include <linux/slab.h> | ||
10 | #include <linux/errno.h> | ||
11 | #include <net/caif/caif_layer.h> | ||
12 | #include <net/caif/cfsrvl.h> | ||
13 | #include <net/caif/cfpkt.h> | ||
14 | |||
15 | #define container_obj(layr) ((struct cfsrvl *) layr) | ||
16 | #define UTIL_PAYLOAD 0x00 | ||
17 | #define UTIL_CMD_BIT 0x80 | ||
18 | #define UTIL_REMOTE_SHUTDOWN 0x82 | ||
19 | #define UTIL_FLOW_OFF 0x81 | ||
20 | #define UTIL_FLOW_ON 0x80 | ||
21 | #define UTIL_CTRL_PKT_SIZE 1 | ||
22 | static int cfutill_receive(struct cflayer *layr, struct cfpkt *pkt); | ||
23 | static int cfutill_transmit(struct cflayer *layr, struct cfpkt *pkt); | ||
24 | |||
25 | struct cflayer *cfutill_create(u8 channel_id, struct dev_info *dev_info) | ||
26 | { | ||
27 | struct cfsrvl *util = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC); | ||
28 | if (!util) { | ||
29 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
30 | return NULL; | ||
31 | } | ||
32 | caif_assert(offsetof(struct cfsrvl, layer) == 0); | ||
33 | memset(util, 0, sizeof(struct cfsrvl)); | ||
34 | cfsrvl_init(util, channel_id, dev_info); | ||
35 | util->layer.receive = cfutill_receive; | ||
36 | util->layer.transmit = cfutill_transmit; | ||
37 | snprintf(util->layer.name, CAIF_LAYER_NAME_SZ - 1, "util1"); | ||
38 | return &util->layer; | ||
39 | } | ||
40 | |||
41 | static int cfutill_receive(struct cflayer *layr, struct cfpkt *pkt) | ||
42 | { | ||
43 | u8 cmd = -1; | ||
44 | struct cfsrvl *service = container_obj(layr); | ||
45 | caif_assert(layr != NULL); | ||
46 | caif_assert(layr->up != NULL); | ||
47 | caif_assert(layr->up->receive != NULL); | ||
48 | caif_assert(layr->up->ctrlcmd != NULL); | ||
49 | if (cfpkt_extr_head(pkt, &cmd, 1) < 0) { | ||
50 | pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); | ||
51 | cfpkt_destroy(pkt); | ||
52 | return -EPROTO; | ||
53 | } | ||
54 | |||
55 | switch (cmd) { | ||
56 | case UTIL_PAYLOAD: | ||
57 | return layr->up->receive(layr->up, pkt); | ||
58 | case UTIL_FLOW_OFF: | ||
59 | layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_OFF_IND, 0); | ||
60 | cfpkt_destroy(pkt); | ||
61 | return 0; | ||
62 | case UTIL_FLOW_ON: | ||
63 | layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_ON_IND, 0); | ||
64 | cfpkt_destroy(pkt); | ||
65 | return 0; | ||
66 | case UTIL_REMOTE_SHUTDOWN: /* Remote Shutdown Request */ | ||
67 | pr_err("CAIF: %s(): REMOTE SHUTDOWN REQUEST RECEIVED\n", | ||
68 | __func__); | ||
69 | layr->ctrlcmd(layr, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND, 0); | ||
70 | service->open = false; | ||
71 | cfpkt_destroy(pkt); | ||
72 | return 0; | ||
73 | default: | ||
74 | cfpkt_destroy(pkt); | ||
75 | pr_warning("CAIF: %s(): Unknown service control %d (0x%x)\n", | ||
76 | __func__, cmd, cmd); | ||
77 | return -EPROTO; | ||
78 | } | ||
79 | } | ||
80 | |||
81 | static int cfutill_transmit(struct cflayer *layr, struct cfpkt *pkt) | ||
82 | { | ||
83 | u8 zero = 0; | ||
84 | struct caif_payload_info *info; | ||
85 | int ret; | ||
86 | struct cfsrvl *service = container_obj(layr); | ||
87 | caif_assert(layr != NULL); | ||
88 | caif_assert(layr->dn != NULL); | ||
89 | caif_assert(layr->dn->transmit != NULL); | ||
90 | if (!cfsrvl_ready(service, &ret)) | ||
91 | return ret; | ||
92 | |||
93 | if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { | ||
94 | pr_err("CAIF: %s(): packet too large size=%d\n", | ||
95 | __func__, cfpkt_getlen(pkt)); | ||
96 | return -EOVERFLOW; | ||
97 | } | ||
98 | |||
99 | cfpkt_add_head(pkt, &zero, 1); | ||
100 | /* Add info for MUX-layer to route the packet out. */ | ||
101 | info = cfpkt_info(pkt); | ||
102 | info->channel_id = service->layer.id; | ||
103 | /* | ||
104 | * To optimize alignment, we add up the size of CAIF header before | ||
105 | * payload. | ||
106 | */ | ||
107 | info->hdr_len = 1; | ||
108 | info->dev_info = &service->dev_info; | ||
109 | ret = layr->dn->transmit(layr->dn, pkt); | ||
110 | if (ret < 0) { | ||
111 | u32 tmp32; | ||
112 | cfpkt_extr_head(pkt, &tmp32, 4); | ||
113 | } | ||
114 | return ret; | ||
115 | } | ||
diff --git a/net/caif/cfveil.c b/net/caif/cfveil.c new file mode 100644 index 000000000000..0fd827f49491 --- /dev/null +++ b/net/caif/cfveil.c | |||
@@ -0,0 +1,107 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson AB 2010 | ||
3 | * Author: Sjur Brendeland/sjur.brandeland@stericsson.com | ||
4 | * License terms: GNU General Public License (GPL) version 2 | ||
5 | */ | ||
6 | |||
7 | #include <linux/stddef.h> | ||
8 | #include <linux/slab.h> | ||
9 | #include <net/caif/caif_layer.h> | ||
10 | #include <net/caif/cfsrvl.h> | ||
11 | #include <net/caif/cfpkt.h> | ||
12 | |||
13 | #define VEI_PAYLOAD 0x00 | ||
14 | #define VEI_CMD_BIT 0x80 | ||
15 | #define VEI_FLOW_OFF 0x81 | ||
16 | #define VEI_FLOW_ON 0x80 | ||
17 | #define VEI_SET_PIN 0x82 | ||
18 | #define VEI_CTRL_PKT_SIZE 1 | ||
19 | #define container_obj(layr) container_of(layr, struct cfsrvl, layer) | ||
20 | |||
21 | static int cfvei_receive(struct cflayer *layr, struct cfpkt *pkt); | ||
22 | static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt); | ||
23 | |||
24 | struct cflayer *cfvei_create(u8 channel_id, struct dev_info *dev_info) | ||
25 | { | ||
26 | struct cfsrvl *vei = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC); | ||
27 | if (!vei) { | ||
28 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
29 | return NULL; | ||
30 | } | ||
31 | caif_assert(offsetof(struct cfsrvl, layer) == 0); | ||
32 | memset(vei, 0, sizeof(struct cfsrvl)); | ||
33 | cfsrvl_init(vei, channel_id, dev_info); | ||
34 | vei->layer.receive = cfvei_receive; | ||
35 | vei->layer.transmit = cfvei_transmit; | ||
36 | snprintf(vei->layer.name, CAIF_LAYER_NAME_SZ - 1, "vei%d", channel_id); | ||
37 | return &vei->layer; | ||
38 | } | ||
39 | |||
40 | static int cfvei_receive(struct cflayer *layr, struct cfpkt *pkt) | ||
41 | { | ||
42 | u8 cmd; | ||
43 | int ret; | ||
44 | caif_assert(layr->up != NULL); | ||
45 | caif_assert(layr->receive != NULL); | ||
46 | caif_assert(layr->ctrlcmd != NULL); | ||
47 | |||
48 | |||
49 | if (cfpkt_extr_head(pkt, &cmd, 1) < 0) { | ||
50 | pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); | ||
51 | cfpkt_destroy(pkt); | ||
52 | return -EPROTO; | ||
53 | } | ||
54 | switch (cmd) { | ||
55 | case VEI_PAYLOAD: | ||
56 | ret = layr->up->receive(layr->up, pkt); | ||
57 | return ret; | ||
58 | case VEI_FLOW_OFF: | ||
59 | layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_OFF_IND, 0); | ||
60 | cfpkt_destroy(pkt); | ||
61 | return 0; | ||
62 | case VEI_FLOW_ON: | ||
63 | layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_ON_IND, 0); | ||
64 | cfpkt_destroy(pkt); | ||
65 | return 0; | ||
66 | case VEI_SET_PIN: /* SET RS232 PIN */ | ||
67 | cfpkt_destroy(pkt); | ||
68 | return 0; | ||
69 | default: /* SET RS232 PIN */ | ||
70 | pr_warning("CAIF: %s():Unknown VEI control packet %d (0x%x)!\n", | ||
71 | __func__, cmd, cmd); | ||
72 | cfpkt_destroy(pkt); | ||
73 | return -EPROTO; | ||
74 | } | ||
75 | } | ||
76 | |||
77 | static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt) | ||
78 | { | ||
79 | u8 tmp = 0; | ||
80 | struct caif_payload_info *info; | ||
81 | int ret; | ||
82 | struct cfsrvl *service = container_obj(layr); | ||
83 | if (!cfsrvl_ready(service, &ret)) | ||
84 | return ret; | ||
85 | caif_assert(layr->dn != NULL); | ||
86 | caif_assert(layr->dn->transmit != NULL); | ||
87 | if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { | ||
88 | pr_warning("CAIF: %s(): Packet too large - size=%d\n", | ||
89 | __func__, cfpkt_getlen(pkt)); | ||
90 | return -EOVERFLOW; | ||
91 | } | ||
92 | |||
93 | if (cfpkt_add_head(pkt, &tmp, 1) < 0) { | ||
94 | pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); | ||
95 | return -EPROTO; | ||
96 | } | ||
97 | |||
98 | /* Add info-> for MUX-layer to route the packet out. */ | ||
99 | info = cfpkt_info(pkt); | ||
100 | info->channel_id = service->layer.id; | ||
101 | info->hdr_len = 1; | ||
102 | info->dev_info = &service->dev_info; | ||
103 | ret = layr->dn->transmit(layr->dn, pkt); | ||
104 | if (ret < 0) | ||
105 | cfpkt_extr_head(pkt, &tmp, 1); | ||
106 | return ret; | ||
107 | } | ||
diff --git a/net/caif/cfvidl.c b/net/caif/cfvidl.c new file mode 100644 index 000000000000..89ad4ea239f1 --- /dev/null +++ b/net/caif/cfvidl.c | |||
@@ -0,0 +1,65 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson AB 2010 | ||
3 | * Author: Sjur Brendeland/sjur.brandeland@stericsson.com | ||
4 | * License terms: GNU General Public License (GPL) version 2 | ||
5 | */ | ||
6 | |||
7 | #include <linux/kernel.h> | ||
8 | #include <linux/types.h> | ||
9 | #include <linux/slab.h> | ||
10 | #include <linux/errno.h> | ||
11 | #include <net/caif/caif_layer.h> | ||
12 | #include <net/caif/cfsrvl.h> | ||
13 | #include <net/caif/cfpkt.h> | ||
14 | |||
15 | #define container_obj(layr) ((struct cfsrvl *) layr) | ||
16 | |||
17 | static int cfvidl_receive(struct cflayer *layr, struct cfpkt *pkt); | ||
18 | static int cfvidl_transmit(struct cflayer *layr, struct cfpkt *pkt); | ||
19 | |||
20 | struct cflayer *cfvidl_create(u8 channel_id, struct dev_info *dev_info) | ||
21 | { | ||
22 | struct cfsrvl *vid = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC); | ||
23 | if (!vid) { | ||
24 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
25 | return NULL; | ||
26 | } | ||
27 | caif_assert(offsetof(struct cfsrvl, layer) == 0); | ||
28 | |||
29 | memset(vid, 0, sizeof(struct cfsrvl)); | ||
30 | cfsrvl_init(vid, channel_id, dev_info); | ||
31 | vid->layer.receive = cfvidl_receive; | ||
32 | vid->layer.transmit = cfvidl_transmit; | ||
33 | snprintf(vid->layer.name, CAIF_LAYER_NAME_SZ - 1, "vid1"); | ||
34 | return &vid->layer; | ||
35 | } | ||
36 | |||
37 | static int cfvidl_receive(struct cflayer *layr, struct cfpkt *pkt) | ||
38 | { | ||
39 | u32 videoheader; | ||
40 | if (cfpkt_extr_head(pkt, &videoheader, 4) < 0) { | ||
41 | pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); | ||
42 | cfpkt_destroy(pkt); | ||
43 | return -EPROTO; | ||
44 | } | ||
45 | return layr->up->receive(layr->up, pkt); | ||
46 | } | ||
47 | |||
48 | static int cfvidl_transmit(struct cflayer *layr, struct cfpkt *pkt) | ||
49 | { | ||
50 | struct cfsrvl *service = container_obj(layr); | ||
51 | struct caif_payload_info *info; | ||
52 | u32 videoheader = 0; | ||
53 | int ret; | ||
54 | if (!cfsrvl_ready(service, &ret)) | ||
55 | return ret; | ||
56 | cfpkt_add_head(pkt, &videoheader, 4); | ||
57 | /* Add info for MUX-layer to route the packet out */ | ||
58 | info = cfpkt_info(pkt); | ||
59 | info->channel_id = service->layer.id; | ||
60 | info->dev_info = &service->dev_info; | ||
61 | ret = layr->dn->transmit(layr->dn, pkt); | ||
62 | if (ret < 0) | ||
63 | cfpkt_extr_head(pkt, &videoheader, 4); | ||
64 | return ret; | ||
65 | } | ||
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c new file mode 100644 index 000000000000..f622ff1d39ba --- /dev/null +++ b/net/caif/chnl_net.c | |||
@@ -0,0 +1,451 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson AB 2010 | ||
3 | * Authors: Sjur Brendeland/sjur.brandeland@stericsson.com | ||
4 | * Daniel Martensson / Daniel.Martensson@stericsson.com | ||
5 | * License terms: GNU General Public License (GPL) version 2 | ||
6 | */ | ||
7 | |||
8 | #include <linux/version.h> | ||
9 | #include <linux/fs.h> | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/netdevice.h> | ||
13 | #include <linux/if_ether.h> | ||
14 | #include <linux/moduleparam.h> | ||
15 | #include <linux/ip.h> | ||
16 | #include <linux/sched.h> | ||
17 | #include <linux/sockios.h> | ||
18 | #include <linux/caif/if_caif.h> | ||
19 | #include <net/rtnetlink.h> | ||
20 | #include <net/caif/caif_layer.h> | ||
21 | #include <net/caif/cfcnfg.h> | ||
22 | #include <net/caif/cfpkt.h> | ||
23 | #include <net/caif/caif_dev.h> | ||
24 | |||
25 | #define CAIF_CONNECT_TIMEOUT 30 | ||
26 | #define SIZE_MTU 1500 | ||
27 | #define SIZE_MTU_MAX 4080 | ||
28 | #define SIZE_MTU_MIN 68 | ||
29 | #define CAIF_NET_DEFAULT_QUEUE_LEN 500 | ||
30 | |||
31 | #undef pr_debug | ||
32 | #define pr_debug pr_warning | ||
33 | |||
34 | /*This list is protected by the rtnl lock. */ | ||
35 | static LIST_HEAD(chnl_net_list); | ||
36 | |||
37 | MODULE_LICENSE("GPL"); | ||
38 | MODULE_ALIAS_RTNL_LINK("caif"); | ||
39 | |||
40 | struct chnl_net { | ||
41 | struct cflayer chnl; | ||
42 | struct net_device_stats stats; | ||
43 | struct caif_connect_request conn_req; | ||
44 | struct list_head list_field; | ||
45 | struct net_device *netdev; | ||
46 | char name[256]; | ||
47 | wait_queue_head_t netmgmt_wq; | ||
48 | /* Flow status to remember and control the transmission. */ | ||
49 | bool flowenabled; | ||
50 | bool pending_close; | ||
51 | }; | ||
52 | |||
53 | static void robust_list_del(struct list_head *delete_node) | ||
54 | { | ||
55 | struct list_head *list_node; | ||
56 | struct list_head *n; | ||
57 | ASSERT_RTNL(); | ||
58 | list_for_each_safe(list_node, n, &chnl_net_list) { | ||
59 | if (list_node == delete_node) { | ||
60 | list_del(list_node); | ||
61 | break; | ||
62 | } | ||
63 | } | ||
64 | } | ||
65 | |||
66 | static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt) | ||
67 | { | ||
68 | struct sk_buff *skb; | ||
69 | struct chnl_net *priv = NULL; | ||
70 | int pktlen; | ||
71 | int err = 0; | ||
72 | |||
73 | priv = container_of(layr, struct chnl_net, chnl); | ||
74 | |||
75 | if (!priv) | ||
76 | return -EINVAL; | ||
77 | |||
78 | /* Get length of CAIF packet. */ | ||
79 | pktlen = cfpkt_getlen(pkt); | ||
80 | |||
81 | skb = (struct sk_buff *) cfpkt_tonative(pkt); | ||
82 | /* Pass some minimum information and | ||
83 | * send the packet to the net stack. | ||
84 | */ | ||
85 | skb->dev = priv->netdev; | ||
86 | skb->protocol = htons(ETH_P_IP); | ||
87 | |||
88 | /* If we change the header in loop mode, the checksum is corrupted. */ | ||
89 | if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP) | ||
90 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
91 | else | ||
92 | skb->ip_summed = CHECKSUM_NONE; | ||
93 | |||
94 | /* FIXME: Drivers should call this in tasklet context. */ | ||
95 | if (in_interrupt()) | ||
96 | netif_rx(skb); | ||
97 | else | ||
98 | netif_rx_ni(skb); | ||
99 | |||
100 | /* Update statistics. */ | ||
101 | priv->netdev->stats.rx_packets++; | ||
102 | priv->netdev->stats.rx_bytes += pktlen; | ||
103 | |||
104 | return err; | ||
105 | } | ||
106 | |||
107 | static int delete_device(struct chnl_net *dev) | ||
108 | { | ||
109 | ASSERT_RTNL(); | ||
110 | if (dev->netdev) | ||
111 | unregister_netdevice(dev->netdev); | ||
112 | return 0; | ||
113 | } | ||
114 | |||
115 | static void close_work(struct work_struct *work) | ||
116 | { | ||
117 | struct chnl_net *dev = NULL; | ||
118 | struct list_head *list_node; | ||
119 | struct list_head *_tmp; | ||
120 | rtnl_lock(); | ||
121 | list_for_each_safe(list_node, _tmp, &chnl_net_list) { | ||
122 | dev = list_entry(list_node, struct chnl_net, list_field); | ||
123 | if (!dev->pending_close) | ||
124 | continue; | ||
125 | list_del(list_node); | ||
126 | delete_device(dev); | ||
127 | } | ||
128 | rtnl_unlock(); | ||
129 | } | ||
130 | static DECLARE_WORK(close_worker, close_work); | ||
131 | |||
132 | static void chnl_flowctrl_cb(struct cflayer *layr, enum caif_ctrlcmd flow, | ||
133 | int phyid) | ||
134 | { | ||
135 | struct chnl_net *priv; | ||
136 | pr_debug("CAIF: %s(): NET flowctrl func called flow: %s.\n", | ||
137 | __func__, | ||
138 | flow == CAIF_CTRLCMD_FLOW_ON_IND ? "ON" : | ||
139 | flow == CAIF_CTRLCMD_INIT_RSP ? "INIT" : | ||
140 | flow == CAIF_CTRLCMD_FLOW_OFF_IND ? "OFF" : | ||
141 | flow == CAIF_CTRLCMD_DEINIT_RSP ? "CLOSE/DEINIT" : | ||
142 | flow == CAIF_CTRLCMD_INIT_FAIL_RSP ? "OPEN_FAIL" : | ||
143 | flow == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND ? | ||
144 | "REMOTE_SHUTDOWN" : "UKNOWN CTRL COMMAND"); | ||
145 | |||
146 | priv = container_of(layr, struct chnl_net, chnl); | ||
147 | |||
148 | switch (flow) { | ||
149 | case CAIF_CTRLCMD_FLOW_OFF_IND: | ||
150 | case CAIF_CTRLCMD_DEINIT_RSP: | ||
151 | case CAIF_CTRLCMD_INIT_FAIL_RSP: | ||
152 | case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND: | ||
153 | priv->flowenabled = false; | ||
154 | netif_tx_disable(priv->netdev); | ||
155 | pr_warning("CAIF: %s(): done\n", __func__); | ||
156 | priv->pending_close = 1; | ||
157 | schedule_work(&close_worker); | ||
158 | break; | ||
159 | case CAIF_CTRLCMD_FLOW_ON_IND: | ||
160 | case CAIF_CTRLCMD_INIT_RSP: | ||
161 | priv->flowenabled = true; | ||
162 | netif_wake_queue(priv->netdev); | ||
163 | wake_up_interruptible(&priv->netmgmt_wq); | ||
164 | break; | ||
165 | default: | ||
166 | break; | ||
167 | } | ||
168 | } | ||
169 | |||
170 | static int chnl_net_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
171 | { | ||
172 | struct chnl_net *priv; | ||
173 | struct cfpkt *pkt = NULL; | ||
174 | int len; | ||
175 | int result = -1; | ||
176 | /* Get our private data. */ | ||
177 | priv = netdev_priv(dev); | ||
178 | |||
179 | if (skb->len > priv->netdev->mtu) { | ||
180 | pr_warning("CAIF: %s(): Size of skb exceeded MTU\n", __func__); | ||
181 | return -ENOSPC; | ||
182 | } | ||
183 | |||
184 | if (!priv->flowenabled) { | ||
185 | pr_debug("CAIF: %s(): dropping packets flow off\n", __func__); | ||
186 | return NETDEV_TX_BUSY; | ||
187 | } | ||
188 | |||
189 | if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP) | ||
190 | swap(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr); | ||
191 | |||
192 | /* Store original SKB length. */ | ||
193 | len = skb->len; | ||
194 | |||
195 | pkt = cfpkt_fromnative(CAIF_DIR_OUT, (void *) skb); | ||
196 | |||
197 | pr_debug("CAIF: %s(): transmit inst %s %d,%p\n", | ||
198 | __func__, dev->name, priv->chnl.dn->id, &priv->chnl.dn); | ||
199 | |||
200 | /* Send the packet down the stack. */ | ||
201 | result = priv->chnl.dn->transmit(priv->chnl.dn, pkt); | ||
202 | if (result) { | ||
203 | if (result == -EAGAIN) | ||
204 | result = NETDEV_TX_BUSY; | ||
205 | return result; | ||
206 | } | ||
207 | |||
208 | /* Update statistics. */ | ||
209 | dev->stats.tx_packets++; | ||
210 | dev->stats.tx_bytes += len; | ||
211 | |||
212 | return NETDEV_TX_OK; | ||
213 | } | ||
214 | |||
215 | static int chnl_net_open(struct net_device *dev) | ||
216 | { | ||
217 | struct chnl_net *priv = NULL; | ||
218 | int result = -1; | ||
219 | ASSERT_RTNL(); | ||
220 | |||
221 | priv = netdev_priv(dev); | ||
222 | pr_debug("CAIF: %s(): dev name: %s\n", __func__, priv->name); | ||
223 | |||
224 | if (!priv) { | ||
225 | pr_debug("CAIF: %s(): chnl_net_open: no priv\n", __func__); | ||
226 | return -ENODEV; | ||
227 | } | ||
228 | result = caif_connect_client(&priv->conn_req, &priv->chnl); | ||
229 | if (result != 0) { | ||
230 | pr_debug("CAIF: %s(): err: " | ||
231 | "Unable to register and open device, Err:%d\n", | ||
232 | __func__, | ||
233 | result); | ||
234 | return -ENODEV; | ||
235 | } | ||
236 | result = wait_event_interruptible(priv->netmgmt_wq, priv->flowenabled); | ||
237 | |||
238 | if (result == -ERESTARTSYS) { | ||
239 | pr_debug("CAIF: %s(): wait_event_interruptible" | ||
240 | " woken by a signal\n", __func__); | ||
241 | return -ERESTARTSYS; | ||
242 | } else | ||
243 | pr_debug("CAIF: %s(): Flow on recieved\n", __func__); | ||
244 | |||
245 | return 0; | ||
246 | } | ||
247 | |||
248 | static int chnl_net_stop(struct net_device *dev) | ||
249 | { | ||
250 | struct chnl_net *priv; | ||
251 | int result = -1; | ||
252 | ASSERT_RTNL(); | ||
253 | priv = netdev_priv(dev); | ||
254 | |||
255 | result = caif_disconnect_client(&priv->chnl); | ||
256 | if (result != 0) { | ||
257 | pr_debug("CAIF: %s(): chnl_net_stop: err: " | ||
258 | "Unable to STOP device, Err:%d\n", | ||
259 | __func__, result); | ||
260 | return -EBUSY; | ||
261 | } | ||
262 | result = wait_event_interruptible(priv->netmgmt_wq, | ||
263 | !priv->flowenabled); | ||
264 | |||
265 | if (result == -ERESTARTSYS) { | ||
266 | pr_debug("CAIF: %s(): wait_event_interruptible woken by" | ||
267 | " signal, signal_pending(current) = %d\n", | ||
268 | __func__, | ||
269 | signal_pending(current)); | ||
270 | } else { | ||
271 | pr_debug("CAIF: %s(): disconnect received\n", __func__); | ||
272 | |||
273 | } | ||
274 | |||
275 | return 0; | ||
276 | } | ||
277 | |||
278 | static int chnl_net_init(struct net_device *dev) | ||
279 | { | ||
280 | struct chnl_net *priv; | ||
281 | ASSERT_RTNL(); | ||
282 | priv = netdev_priv(dev); | ||
283 | strncpy(priv->name, dev->name, sizeof(priv->name)); | ||
284 | return 0; | ||
285 | } | ||
286 | |||
287 | static void chnl_net_uninit(struct net_device *dev) | ||
288 | { | ||
289 | struct chnl_net *priv; | ||
290 | ASSERT_RTNL(); | ||
291 | priv = netdev_priv(dev); | ||
292 | robust_list_del(&priv->list_field); | ||
293 | } | ||
294 | |||
295 | static const struct net_device_ops netdev_ops = { | ||
296 | .ndo_open = chnl_net_open, | ||
297 | .ndo_stop = chnl_net_stop, | ||
298 | .ndo_init = chnl_net_init, | ||
299 | .ndo_uninit = chnl_net_uninit, | ||
300 | .ndo_start_xmit = chnl_net_start_xmit, | ||
301 | }; | ||
302 | |||
303 | static void ipcaif_net_setup(struct net_device *dev) | ||
304 | { | ||
305 | struct chnl_net *priv; | ||
306 | dev->netdev_ops = &netdev_ops; | ||
307 | dev->destructor = free_netdev; | ||
308 | dev->flags |= IFF_NOARP; | ||
309 | dev->flags |= IFF_POINTOPOINT; | ||
310 | dev->needed_headroom = CAIF_NEEDED_HEADROOM; | ||
311 | dev->needed_tailroom = CAIF_NEEDED_TAILROOM; | ||
312 | dev->mtu = SIZE_MTU; | ||
313 | dev->tx_queue_len = CAIF_NET_DEFAULT_QUEUE_LEN; | ||
314 | |||
315 | priv = netdev_priv(dev); | ||
316 | priv->chnl.receive = chnl_recv_cb; | ||
317 | priv->chnl.ctrlcmd = chnl_flowctrl_cb; | ||
318 | priv->netdev = dev; | ||
319 | priv->conn_req.protocol = CAIFPROTO_DATAGRAM; | ||
320 | priv->conn_req.link_selector = CAIF_LINK_HIGH_BANDW; | ||
321 | priv->conn_req.priority = CAIF_PRIO_LOW; | ||
322 | /* Insert illegal value */ | ||
323 | priv->conn_req.sockaddr.u.dgm.connection_id = -1; | ||
324 | priv->flowenabled = false; | ||
325 | |||
326 | ASSERT_RTNL(); | ||
327 | init_waitqueue_head(&priv->netmgmt_wq); | ||
328 | list_add(&priv->list_field, &chnl_net_list); | ||
329 | } | ||
330 | |||
331 | |||
332 | static int ipcaif_fill_info(struct sk_buff *skb, const struct net_device *dev) | ||
333 | { | ||
334 | struct chnl_net *priv; | ||
335 | u8 loop; | ||
336 | priv = netdev_priv(dev); | ||
337 | NLA_PUT_U32(skb, IFLA_CAIF_IPV4_CONNID, | ||
338 | priv->conn_req.sockaddr.u.dgm.connection_id); | ||
339 | NLA_PUT_U32(skb, IFLA_CAIF_IPV6_CONNID, | ||
340 | priv->conn_req.sockaddr.u.dgm.connection_id); | ||
341 | loop = priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP; | ||
342 | NLA_PUT_U8(skb, IFLA_CAIF_LOOPBACK, loop); | ||
343 | |||
344 | |||
345 | return 0; | ||
346 | nla_put_failure: | ||
347 | return -EMSGSIZE; | ||
348 | |||
349 | } | ||
350 | |||
351 | static void caif_netlink_parms(struct nlattr *data[], | ||
352 | struct caif_connect_request *conn_req) | ||
353 | { | ||
354 | if (!data) { | ||
355 | pr_warning("CAIF: %s: no params data found\n", __func__); | ||
356 | return; | ||
357 | } | ||
358 | if (data[IFLA_CAIF_IPV4_CONNID]) | ||
359 | conn_req->sockaddr.u.dgm.connection_id = | ||
360 | nla_get_u32(data[IFLA_CAIF_IPV4_CONNID]); | ||
361 | if (data[IFLA_CAIF_IPV6_CONNID]) | ||
362 | conn_req->sockaddr.u.dgm.connection_id = | ||
363 | nla_get_u32(data[IFLA_CAIF_IPV6_CONNID]); | ||
364 | if (data[IFLA_CAIF_LOOPBACK]) { | ||
365 | if (nla_get_u8(data[IFLA_CAIF_LOOPBACK])) | ||
366 | conn_req->protocol = CAIFPROTO_DATAGRAM_LOOP; | ||
367 | else | ||
368 | conn_req->protocol = CAIFPROTO_DATAGRAM; | ||
369 | } | ||
370 | } | ||
371 | |||
372 | static int ipcaif_newlink(struct net *src_net, struct net_device *dev, | ||
373 | struct nlattr *tb[], struct nlattr *data[]) | ||
374 | { | ||
375 | int ret; | ||
376 | struct chnl_net *caifdev; | ||
377 | ASSERT_RTNL(); | ||
378 | caifdev = netdev_priv(dev); | ||
379 | caif_netlink_parms(data, &caifdev->conn_req); | ||
380 | ret = register_netdevice(dev); | ||
381 | if (ret) | ||
382 | pr_warning("CAIF: %s(): device rtml registration failed\n", | ||
383 | __func__); | ||
384 | return ret; | ||
385 | } | ||
386 | |||
387 | static int ipcaif_changelink(struct net_device *dev, struct nlattr *tb[], | ||
388 | struct nlattr *data[]) | ||
389 | { | ||
390 | struct chnl_net *caifdev; | ||
391 | ASSERT_RTNL(); | ||
392 | caifdev = netdev_priv(dev); | ||
393 | caif_netlink_parms(data, &caifdev->conn_req); | ||
394 | netdev_state_change(dev); | ||
395 | return 0; | ||
396 | } | ||
397 | |||
398 | static size_t ipcaif_get_size(const struct net_device *dev) | ||
399 | { | ||
400 | return | ||
401 | /* IFLA_CAIF_IPV4_CONNID */ | ||
402 | nla_total_size(4) + | ||
403 | /* IFLA_CAIF_IPV6_CONNID */ | ||
404 | nla_total_size(4) + | ||
405 | /* IFLA_CAIF_LOOPBACK */ | ||
406 | nla_total_size(2) + | ||
407 | 0; | ||
408 | } | ||
409 | |||
410 | static const struct nla_policy ipcaif_policy[IFLA_CAIF_MAX + 1] = { | ||
411 | [IFLA_CAIF_IPV4_CONNID] = { .type = NLA_U32 }, | ||
412 | [IFLA_CAIF_IPV6_CONNID] = { .type = NLA_U32 }, | ||
413 | [IFLA_CAIF_LOOPBACK] = { .type = NLA_U8 } | ||
414 | }; | ||
415 | |||
416 | |||
417 | static struct rtnl_link_ops ipcaif_link_ops __read_mostly = { | ||
418 | .kind = "caif", | ||
419 | .priv_size = sizeof(struct chnl_net), | ||
420 | .setup = ipcaif_net_setup, | ||
421 | .maxtype = IFLA_CAIF_MAX, | ||
422 | .policy = ipcaif_policy, | ||
423 | .newlink = ipcaif_newlink, | ||
424 | .changelink = ipcaif_changelink, | ||
425 | .get_size = ipcaif_get_size, | ||
426 | .fill_info = ipcaif_fill_info, | ||
427 | |||
428 | }; | ||
429 | |||
430 | static int __init chnl_init_module(void) | ||
431 | { | ||
432 | return rtnl_link_register(&ipcaif_link_ops); | ||
433 | } | ||
434 | |||
435 | static void __exit chnl_exit_module(void) | ||
436 | { | ||
437 | struct chnl_net *dev = NULL; | ||
438 | struct list_head *list_node; | ||
439 | struct list_head *_tmp; | ||
440 | rtnl_link_unregister(&ipcaif_link_ops); | ||
441 | rtnl_lock(); | ||
442 | list_for_each_safe(list_node, _tmp, &chnl_net_list) { | ||
443 | dev = list_entry(list_node, struct chnl_net, list_field); | ||
444 | list_del(list_node); | ||
445 | delete_device(dev); | ||
446 | } | ||
447 | rtnl_unlock(); | ||
448 | } | ||
449 | |||
450 | module_init(chnl_init_module); | ||
451 | module_exit(chnl_exit_module); | ||
diff --git a/net/core/Makefile b/net/core/Makefile index 08791ac3e05a..51c3eec850ef 100644 --- a/net/core/Makefile +++ b/net/core/Makefile | |||
@@ -7,7 +7,7 @@ obj-y := sock.o request_sock.o skbuff.o iovec.o datagram.o stream.o scm.o \ | |||
7 | 7 | ||
8 | obj-$(CONFIG_SYSCTL) += sysctl_net_core.o | 8 | obj-$(CONFIG_SYSCTL) += sysctl_net_core.o |
9 | 9 | ||
10 | obj-y += dev.o ethtool.o dev_mcast.o dst.o netevent.o \ | 10 | obj-y += dev.o ethtool.o dev_addr_lists.o dst.o netevent.o \ |
11 | neighbour.o rtnetlink.o utils.o link_watch.o filter.o | 11 | neighbour.o rtnetlink.o utils.o link_watch.o filter.o |
12 | 12 | ||
13 | obj-$(CONFIG_XFRM) += flow.o | 13 | obj-$(CONFIG_XFRM) += flow.o |
diff --git a/net/core/dev.c b/net/core/dev.c index 1c8a0ce473a8..a10a21619ae3 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -130,6 +130,7 @@ | |||
130 | #include <linux/jhash.h> | 130 | #include <linux/jhash.h> |
131 | #include <linux/random.h> | 131 | #include <linux/random.h> |
132 | #include <trace/events/napi.h> | 132 | #include <trace/events/napi.h> |
133 | #include <linux/pci.h> | ||
133 | 134 | ||
134 | #include "net-sysfs.h" | 135 | #include "net-sysfs.h" |
135 | 136 | ||
@@ -207,6 +208,20 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) | |||
207 | return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)]; | 208 | return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)]; |
208 | } | 209 | } |
209 | 210 | ||
211 | static inline void rps_lock(struct softnet_data *queue) | ||
212 | { | ||
213 | #ifdef CONFIG_RPS | ||
214 | spin_lock(&queue->input_pkt_queue.lock); | ||
215 | #endif | ||
216 | } | ||
217 | |||
218 | static inline void rps_unlock(struct softnet_data *queue) | ||
219 | { | ||
220 | #ifdef CONFIG_RPS | ||
221 | spin_unlock(&queue->input_pkt_queue.lock); | ||
222 | #endif | ||
223 | } | ||
224 | |||
210 | /* Device list insertion */ | 225 | /* Device list insertion */ |
211 | static int list_netdevice(struct net_device *dev) | 226 | static int list_netdevice(struct net_device *dev) |
212 | { | 227 | { |
@@ -773,14 +788,17 @@ EXPORT_SYMBOL(__dev_getfirstbyhwtype); | |||
773 | 788 | ||
774 | struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) | 789 | struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) |
775 | { | 790 | { |
776 | struct net_device *dev; | 791 | struct net_device *dev, *ret = NULL; |
777 | 792 | ||
778 | rtnl_lock(); | 793 | rcu_read_lock(); |
779 | dev = __dev_getfirstbyhwtype(net, type); | 794 | for_each_netdev_rcu(net, dev) |
780 | if (dev) | 795 | if (dev->type == type) { |
781 | dev_hold(dev); | 796 | dev_hold(dev); |
782 | rtnl_unlock(); | 797 | ret = dev; |
783 | return dev; | 798 | break; |
799 | } | ||
800 | rcu_read_unlock(); | ||
801 | return ret; | ||
784 | } | 802 | } |
785 | EXPORT_SYMBOL(dev_getfirstbyhwtype); | 803 | EXPORT_SYMBOL(dev_getfirstbyhwtype); |
786 | 804 | ||
@@ -1085,9 +1103,9 @@ void netdev_state_change(struct net_device *dev) | |||
1085 | } | 1103 | } |
1086 | EXPORT_SYMBOL(netdev_state_change); | 1104 | EXPORT_SYMBOL(netdev_state_change); |
1087 | 1105 | ||
1088 | void netdev_bonding_change(struct net_device *dev, unsigned long event) | 1106 | int netdev_bonding_change(struct net_device *dev, unsigned long event) |
1089 | { | 1107 | { |
1090 | call_netdevice_notifiers(event, dev); | 1108 | return call_netdevice_notifiers(event, dev); |
1091 | } | 1109 | } |
1092 | EXPORT_SYMBOL(netdev_bonding_change); | 1110 | EXPORT_SYMBOL(netdev_bonding_change); |
1093 | 1111 | ||
@@ -1784,18 +1802,27 @@ EXPORT_SYMBOL(netdev_rx_csum_fault); | |||
1784 | * 2. No high memory really exists on this machine. | 1802 | * 2. No high memory really exists on this machine. |
1785 | */ | 1803 | */ |
1786 | 1804 | ||
1787 | static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb) | 1805 | static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) |
1788 | { | 1806 | { |
1789 | #ifdef CONFIG_HIGHMEM | 1807 | #ifdef CONFIG_HIGHMEM |
1790 | int i; | 1808 | int i; |
1809 | if (!(dev->features & NETIF_F_HIGHDMA)) { | ||
1810 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) | ||
1811 | if (PageHighMem(skb_shinfo(skb)->frags[i].page)) | ||
1812 | return 1; | ||
1813 | } | ||
1791 | 1814 | ||
1792 | if (dev->features & NETIF_F_HIGHDMA) | 1815 | if (PCI_DMA_BUS_IS_PHYS) { |
1793 | return 0; | 1816 | struct device *pdev = dev->dev.parent; |
1794 | |||
1795 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) | ||
1796 | if (PageHighMem(skb_shinfo(skb)->frags[i].page)) | ||
1797 | return 1; | ||
1798 | 1817 | ||
1818 | if (!pdev) | ||
1819 | return 0; | ||
1820 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | ||
1821 | dma_addr_t addr = page_to_phys(skb_shinfo(skb)->frags[i].page); | ||
1822 | if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask) | ||
1823 | return 1; | ||
1824 | } | ||
1825 | } | ||
1799 | #endif | 1826 | #endif |
1800 | return 0; | 1827 | return 0; |
1801 | } | 1828 | } |
@@ -1932,7 +1959,7 @@ out_kfree_skb: | |||
1932 | return rc; | 1959 | return rc; |
1933 | } | 1960 | } |
1934 | 1961 | ||
1935 | static u32 skb_tx_hashrnd; | 1962 | static u32 hashrnd __read_mostly; |
1936 | 1963 | ||
1937 | u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb) | 1964 | u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb) |
1938 | { | 1965 | { |
@@ -1950,7 +1977,7 @@ u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb) | |||
1950 | else | 1977 | else |
1951 | hash = skb->protocol; | 1978 | hash = skb->protocol; |
1952 | 1979 | ||
1953 | hash = jhash_1word(hash, skb_tx_hashrnd); | 1980 | hash = jhash_1word(hash, hashrnd); |
1954 | 1981 | ||
1955 | return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32); | 1982 | return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32); |
1956 | } | 1983 | } |
@@ -1960,10 +1987,9 @@ static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index) | |||
1960 | { | 1987 | { |
1961 | if (unlikely(queue_index >= dev->real_num_tx_queues)) { | 1988 | if (unlikely(queue_index >= dev->real_num_tx_queues)) { |
1962 | if (net_ratelimit()) { | 1989 | if (net_ratelimit()) { |
1963 | WARN(1, "%s selects TX queue %d, but " | 1990 | netdev_warn(dev, "selects TX queue %d, but " |
1964 | "real number of TX queues is %d\n", | 1991 | "real number of TX queues is %d\n", |
1965 | dev->name, queue_index, | 1992 | queue_index, dev->real_num_tx_queues); |
1966 | dev->real_num_tx_queues); | ||
1967 | } | 1993 | } |
1968 | return 0; | 1994 | return 0; |
1969 | } | 1995 | } |
@@ -2176,6 +2202,178 @@ int weight_p __read_mostly = 64; /* old backlog weight */ | |||
2176 | 2202 | ||
2177 | DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, }; | 2203 | DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, }; |
2178 | 2204 | ||
2205 | #ifdef CONFIG_RPS | ||
2206 | /* | ||
2207 | * get_rps_cpu is called from netif_receive_skb and returns the target | ||
2208 | * CPU from the RPS map of the receiving queue for a given skb. | ||
2209 | */ | ||
2210 | static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb) | ||
2211 | { | ||
2212 | struct ipv6hdr *ip6; | ||
2213 | struct iphdr *ip; | ||
2214 | struct netdev_rx_queue *rxqueue; | ||
2215 | struct rps_map *map; | ||
2216 | int cpu = -1; | ||
2217 | u8 ip_proto; | ||
2218 | u32 addr1, addr2, ports, ihl; | ||
2219 | |||
2220 | rcu_read_lock(); | ||
2221 | |||
2222 | if (skb_rx_queue_recorded(skb)) { | ||
2223 | u16 index = skb_get_rx_queue(skb); | ||
2224 | if (unlikely(index >= dev->num_rx_queues)) { | ||
2225 | if (net_ratelimit()) { | ||
2226 | netdev_warn(dev, "received packet on queue " | ||
2227 | "%u, but number of RX queues is %u\n", | ||
2228 | index, dev->num_rx_queues); | ||
2229 | } | ||
2230 | goto done; | ||
2231 | } | ||
2232 | rxqueue = dev->_rx + index; | ||
2233 | } else | ||
2234 | rxqueue = dev->_rx; | ||
2235 | |||
2236 | if (!rxqueue->rps_map) | ||
2237 | goto done; | ||
2238 | |||
2239 | if (skb->rxhash) | ||
2240 | goto got_hash; /* Skip hash computation on packet header */ | ||
2241 | |||
2242 | switch (skb->protocol) { | ||
2243 | case __constant_htons(ETH_P_IP): | ||
2244 | if (!pskb_may_pull(skb, sizeof(*ip))) | ||
2245 | goto done; | ||
2246 | |||
2247 | ip = (struct iphdr *) skb->data; | ||
2248 | ip_proto = ip->protocol; | ||
2249 | addr1 = ip->saddr; | ||
2250 | addr2 = ip->daddr; | ||
2251 | ihl = ip->ihl; | ||
2252 | break; | ||
2253 | case __constant_htons(ETH_P_IPV6): | ||
2254 | if (!pskb_may_pull(skb, sizeof(*ip6))) | ||
2255 | goto done; | ||
2256 | |||
2257 | ip6 = (struct ipv6hdr *) skb->data; | ||
2258 | ip_proto = ip6->nexthdr; | ||
2259 | addr1 = ip6->saddr.s6_addr32[3]; | ||
2260 | addr2 = ip6->daddr.s6_addr32[3]; | ||
2261 | ihl = (40 >> 2); | ||
2262 | break; | ||
2263 | default: | ||
2264 | goto done; | ||
2265 | } | ||
2266 | ports = 0; | ||
2267 | switch (ip_proto) { | ||
2268 | case IPPROTO_TCP: | ||
2269 | case IPPROTO_UDP: | ||
2270 | case IPPROTO_DCCP: | ||
2271 | case IPPROTO_ESP: | ||
2272 | case IPPROTO_AH: | ||
2273 | case IPPROTO_SCTP: | ||
2274 | case IPPROTO_UDPLITE: | ||
2275 | if (pskb_may_pull(skb, (ihl * 4) + 4)) | ||
2276 | ports = *((u32 *) (skb->data + (ihl * 4))); | ||
2277 | break; | ||
2278 | |||
2279 | default: | ||
2280 | break; | ||
2281 | } | ||
2282 | |||
2283 | skb->rxhash = jhash_3words(addr1, addr2, ports, hashrnd); | ||
2284 | if (!skb->rxhash) | ||
2285 | skb->rxhash = 1; | ||
2286 | |||
2287 | got_hash: | ||
2288 | map = rcu_dereference(rxqueue->rps_map); | ||
2289 | if (map) { | ||
2290 | u16 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32]; | ||
2291 | |||
2292 | if (cpu_online(tcpu)) { | ||
2293 | cpu = tcpu; | ||
2294 | goto done; | ||
2295 | } | ||
2296 | } | ||
2297 | |||
2298 | done: | ||
2299 | rcu_read_unlock(); | ||
2300 | return cpu; | ||
2301 | } | ||
2302 | |||
2303 | /* | ||
2304 | * This structure holds the per-CPU mask of CPUs for which IPIs are scheduled | ||
2305 | * to be sent to kick remote softirq processing. There are two masks since | ||
2306 | * the sending of IPIs must be done with interrupts enabled. The select field | ||
2307 | * indicates the current mask that enqueue_backlog uses to schedule IPIs. | ||
2308 | * select is flipped before net_rps_action is called while still under lock, | ||
2309 | * net_rps_action then uses the non-selected mask to send the IPIs and clears | ||
2310 | * it without conflicting with enqueue_backlog operation. | ||
2311 | */ | ||
2312 | struct rps_remote_softirq_cpus { | ||
2313 | cpumask_t mask[2]; | ||
2314 | int select; | ||
2315 | }; | ||
2316 | static DEFINE_PER_CPU(struct rps_remote_softirq_cpus, rps_remote_softirq_cpus); | ||
2317 | |||
2318 | /* Called from hardirq (IPI) context */ | ||
2319 | static void trigger_softirq(void *data) | ||
2320 | { | ||
2321 | struct softnet_data *queue = data; | ||
2322 | __napi_schedule(&queue->backlog); | ||
2323 | __get_cpu_var(netdev_rx_stat).received_rps++; | ||
2324 | } | ||
2325 | #endif /* CONFIG_SMP */ | ||
2326 | |||
2327 | /* | ||
2328 | * enqueue_to_backlog is called to queue an skb to a per CPU backlog | ||
2329 | * queue (may be a remote CPU queue). | ||
2330 | */ | ||
2331 | static int enqueue_to_backlog(struct sk_buff *skb, int cpu) | ||
2332 | { | ||
2333 | struct softnet_data *queue; | ||
2334 | unsigned long flags; | ||
2335 | |||
2336 | queue = &per_cpu(softnet_data, cpu); | ||
2337 | |||
2338 | local_irq_save(flags); | ||
2339 | __get_cpu_var(netdev_rx_stat).total++; | ||
2340 | |||
2341 | rps_lock(queue); | ||
2342 | if (queue->input_pkt_queue.qlen <= netdev_max_backlog) { | ||
2343 | if (queue->input_pkt_queue.qlen) { | ||
2344 | enqueue: | ||
2345 | __skb_queue_tail(&queue->input_pkt_queue, skb); | ||
2346 | rps_unlock(queue); | ||
2347 | local_irq_restore(flags); | ||
2348 | return NET_RX_SUCCESS; | ||
2349 | } | ||
2350 | |||
2351 | /* Schedule NAPI for backlog device */ | ||
2352 | if (napi_schedule_prep(&queue->backlog)) { | ||
2353 | #ifdef CONFIG_RPS | ||
2354 | if (cpu != smp_processor_id()) { | ||
2355 | struct rps_remote_softirq_cpus *rcpus = | ||
2356 | &__get_cpu_var(rps_remote_softirq_cpus); | ||
2357 | |||
2358 | cpu_set(cpu, rcpus->mask[rcpus->select]); | ||
2359 | __raise_softirq_irqoff(NET_RX_SOFTIRQ); | ||
2360 | } else | ||
2361 | __napi_schedule(&queue->backlog); | ||
2362 | #else | ||
2363 | __napi_schedule(&queue->backlog); | ||
2364 | #endif | ||
2365 | } | ||
2366 | goto enqueue; | ||
2367 | } | ||
2368 | |||
2369 | rps_unlock(queue); | ||
2370 | |||
2371 | __get_cpu_var(netdev_rx_stat).dropped++; | ||
2372 | local_irq_restore(flags); | ||
2373 | |||
2374 | kfree_skb(skb); | ||
2375 | return NET_RX_DROP; | ||
2376 | } | ||
2179 | 2377 | ||
2180 | /** | 2378 | /** |
2181 | * netif_rx - post buffer to the network code | 2379 | * netif_rx - post buffer to the network code |
@@ -2194,8 +2392,7 @@ DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, }; | |||
2194 | 2392 | ||
2195 | int netif_rx(struct sk_buff *skb) | 2393 | int netif_rx(struct sk_buff *skb) |
2196 | { | 2394 | { |
2197 | struct softnet_data *queue; | 2395 | int cpu; |
2198 | unsigned long flags; | ||
2199 | 2396 | ||
2200 | /* if netpoll wants it, pretend we never saw it */ | 2397 | /* if netpoll wants it, pretend we never saw it */ |
2201 | if (netpoll_rx(skb)) | 2398 | if (netpoll_rx(skb)) |
@@ -2204,31 +2401,15 @@ int netif_rx(struct sk_buff *skb) | |||
2204 | if (!skb->tstamp.tv64) | 2401 | if (!skb->tstamp.tv64) |
2205 | net_timestamp(skb); | 2402 | net_timestamp(skb); |
2206 | 2403 | ||
2207 | /* | 2404 | #ifdef CONFIG_RPS |
2208 | * The code is rearranged so that the path is the most | 2405 | cpu = get_rps_cpu(skb->dev, skb); |
2209 | * short when CPU is congested, but is still operating. | 2406 | if (cpu < 0) |
2210 | */ | 2407 | cpu = smp_processor_id(); |
2211 | local_irq_save(flags); | 2408 | #else |
2212 | queue = &__get_cpu_var(softnet_data); | 2409 | cpu = smp_processor_id(); |
2213 | 2410 | #endif | |
2214 | __get_cpu_var(netdev_rx_stat).total++; | ||
2215 | if (queue->input_pkt_queue.qlen <= netdev_max_backlog) { | ||
2216 | if (queue->input_pkt_queue.qlen) { | ||
2217 | enqueue: | ||
2218 | __skb_queue_tail(&queue->input_pkt_queue, skb); | ||
2219 | local_irq_restore(flags); | ||
2220 | return NET_RX_SUCCESS; | ||
2221 | } | ||
2222 | |||
2223 | napi_schedule(&queue->backlog); | ||
2224 | goto enqueue; | ||
2225 | } | ||
2226 | |||
2227 | __get_cpu_var(netdev_rx_stat).dropped++; | ||
2228 | local_irq_restore(flags); | ||
2229 | 2411 | ||
2230 | kfree_skb(skb); | 2412 | return enqueue_to_backlog(skb, cpu); |
2231 | return NET_RX_DROP; | ||
2232 | } | 2413 | } |
2233 | EXPORT_SYMBOL(netif_rx); | 2414 | EXPORT_SYMBOL(netif_rx); |
2234 | 2415 | ||
@@ -2465,22 +2646,7 @@ void netif_nit_deliver(struct sk_buff *skb) | |||
2465 | rcu_read_unlock(); | 2646 | rcu_read_unlock(); |
2466 | } | 2647 | } |
2467 | 2648 | ||
2468 | /** | 2649 | static int __netif_receive_skb(struct sk_buff *skb) |
2469 | * netif_receive_skb - process receive buffer from network | ||
2470 | * @skb: buffer to process | ||
2471 | * | ||
2472 | * netif_receive_skb() is the main receive data processing function. | ||
2473 | * It always succeeds. The buffer may be dropped during processing | ||
2474 | * for congestion control or by the protocol layers. | ||
2475 | * | ||
2476 | * This function may only be called from softirq context and interrupts | ||
2477 | * should be enabled. | ||
2478 | * | ||
2479 | * Return values (usually ignored): | ||
2480 | * NET_RX_SUCCESS: no congestion | ||
2481 | * NET_RX_DROP: packet was dropped | ||
2482 | */ | ||
2483 | int netif_receive_skb(struct sk_buff *skb) | ||
2484 | { | 2650 | { |
2485 | struct packet_type *ptype, *pt_prev; | 2651 | struct packet_type *ptype, *pt_prev; |
2486 | struct net_device *orig_dev; | 2652 | struct net_device *orig_dev; |
@@ -2591,6 +2757,37 @@ out: | |||
2591 | rcu_read_unlock(); | 2757 | rcu_read_unlock(); |
2592 | return ret; | 2758 | return ret; |
2593 | } | 2759 | } |
2760 | |||
2761 | /** | ||
2762 | * netif_receive_skb - process receive buffer from network | ||
2763 | * @skb: buffer to process | ||
2764 | * | ||
2765 | * netif_receive_skb() is the main receive data processing function. | ||
2766 | * It always succeeds. The buffer may be dropped during processing | ||
2767 | * for congestion control or by the protocol layers. | ||
2768 | * | ||
2769 | * This function may only be called from softirq context and interrupts | ||
2770 | * should be enabled. | ||
2771 | * | ||
2772 | * Return values (usually ignored): | ||
2773 | * NET_RX_SUCCESS: no congestion | ||
2774 | * NET_RX_DROP: packet was dropped | ||
2775 | */ | ||
2776 | int netif_receive_skb(struct sk_buff *skb) | ||
2777 | { | ||
2778 | #ifdef CONFIG_RPS | ||
2779 | int cpu; | ||
2780 | |||
2781 | cpu = get_rps_cpu(skb->dev, skb); | ||
2782 | |||
2783 | if (cpu < 0) | ||
2784 | return __netif_receive_skb(skb); | ||
2785 | else | ||
2786 | return enqueue_to_backlog(skb, cpu); | ||
2787 | #else | ||
2788 | return __netif_receive_skb(skb); | ||
2789 | #endif | ||
2790 | } | ||
2594 | EXPORT_SYMBOL(netif_receive_skb); | 2791 | EXPORT_SYMBOL(netif_receive_skb); |
2595 | 2792 | ||
2596 | /* Network device is going away, flush any packets still pending */ | 2793 | /* Network device is going away, flush any packets still pending */ |
@@ -2600,11 +2797,13 @@ static void flush_backlog(void *arg) | |||
2600 | struct softnet_data *queue = &__get_cpu_var(softnet_data); | 2797 | struct softnet_data *queue = &__get_cpu_var(softnet_data); |
2601 | struct sk_buff *skb, *tmp; | 2798 | struct sk_buff *skb, *tmp; |
2602 | 2799 | ||
2800 | rps_lock(queue); | ||
2603 | skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp) | 2801 | skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp) |
2604 | if (skb->dev == dev) { | 2802 | if (skb->dev == dev) { |
2605 | __skb_unlink(skb, &queue->input_pkt_queue); | 2803 | __skb_unlink(skb, &queue->input_pkt_queue); |
2606 | kfree_skb(skb); | 2804 | kfree_skb(skb); |
2607 | } | 2805 | } |
2806 | rps_unlock(queue); | ||
2608 | } | 2807 | } |
2609 | 2808 | ||
2610 | static int napi_gro_complete(struct sk_buff *skb) | 2809 | static int napi_gro_complete(struct sk_buff *skb) |
@@ -2918,15 +3117,18 @@ static int process_backlog(struct napi_struct *napi, int quota) | |||
2918 | struct sk_buff *skb; | 3117 | struct sk_buff *skb; |
2919 | 3118 | ||
2920 | local_irq_disable(); | 3119 | local_irq_disable(); |
3120 | rps_lock(queue); | ||
2921 | skb = __skb_dequeue(&queue->input_pkt_queue); | 3121 | skb = __skb_dequeue(&queue->input_pkt_queue); |
2922 | if (!skb) { | 3122 | if (!skb) { |
2923 | __napi_complete(napi); | 3123 | __napi_complete(napi); |
3124 | rps_unlock(queue); | ||
2924 | local_irq_enable(); | 3125 | local_irq_enable(); |
2925 | break; | 3126 | break; |
2926 | } | 3127 | } |
3128 | rps_unlock(queue); | ||
2927 | local_irq_enable(); | 3129 | local_irq_enable(); |
2928 | 3130 | ||
2929 | netif_receive_skb(skb); | 3131 | __netif_receive_skb(skb); |
2930 | } while (++work < quota && jiffies == start_time); | 3132 | } while (++work < quota && jiffies == start_time); |
2931 | 3133 | ||
2932 | return work; | 3134 | return work; |
@@ -3015,6 +3217,24 @@ void netif_napi_del(struct napi_struct *napi) | |||
3015 | } | 3217 | } |
3016 | EXPORT_SYMBOL(netif_napi_del); | 3218 | EXPORT_SYMBOL(netif_napi_del); |
3017 | 3219 | ||
3220 | #ifdef CONFIG_RPS | ||
3221 | /* | ||
3222 | * net_rps_action sends any pending IPI's for rps. This is only called from | ||
3223 | * softirq and interrupts must be enabled. | ||
3224 | */ | ||
3225 | static void net_rps_action(cpumask_t *mask) | ||
3226 | { | ||
3227 | int cpu; | ||
3228 | |||
3229 | /* Send pending IPI's to kick RPS processing on remote cpus. */ | ||
3230 | for_each_cpu_mask_nr(cpu, *mask) { | ||
3231 | struct softnet_data *queue = &per_cpu(softnet_data, cpu); | ||
3232 | if (cpu_online(cpu)) | ||
3233 | __smp_call_function_single(cpu, &queue->csd, 0); | ||
3234 | } | ||
3235 | cpus_clear(*mask); | ||
3236 | } | ||
3237 | #endif | ||
3018 | 3238 | ||
3019 | static void net_rx_action(struct softirq_action *h) | 3239 | static void net_rx_action(struct softirq_action *h) |
3020 | { | 3240 | { |
@@ -3022,6 +3242,10 @@ static void net_rx_action(struct softirq_action *h) | |||
3022 | unsigned long time_limit = jiffies + 2; | 3242 | unsigned long time_limit = jiffies + 2; |
3023 | int budget = netdev_budget; | 3243 | int budget = netdev_budget; |
3024 | void *have; | 3244 | void *have; |
3245 | #ifdef CONFIG_RPS | ||
3246 | int select; | ||
3247 | struct rps_remote_softirq_cpus *rcpus; | ||
3248 | #endif | ||
3025 | 3249 | ||
3026 | local_irq_disable(); | 3250 | local_irq_disable(); |
3027 | 3251 | ||
@@ -3084,7 +3308,17 @@ static void net_rx_action(struct softirq_action *h) | |||
3084 | netpoll_poll_unlock(have); | 3308 | netpoll_poll_unlock(have); |
3085 | } | 3309 | } |
3086 | out: | 3310 | out: |
3311 | #ifdef CONFIG_RPS | ||
3312 | rcpus = &__get_cpu_var(rps_remote_softirq_cpus); | ||
3313 | select = rcpus->select; | ||
3314 | rcpus->select ^= 1; | ||
3315 | |||
3316 | local_irq_enable(); | ||
3317 | |||
3318 | net_rps_action(&rcpus->mask[select]); | ||
3319 | #else | ||
3087 | local_irq_enable(); | 3320 | local_irq_enable(); |
3321 | #endif | ||
3088 | 3322 | ||
3089 | #ifdef CONFIG_NET_DMA | 3323 | #ifdef CONFIG_NET_DMA |
3090 | /* | 3324 | /* |
@@ -3330,10 +3564,10 @@ static int softnet_seq_show(struct seq_file *seq, void *v) | |||
3330 | { | 3564 | { |
3331 | struct netif_rx_stats *s = v; | 3565 | struct netif_rx_stats *s = v; |
3332 | 3566 | ||
3333 | seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n", | 3567 | seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n", |
3334 | s->total, s->dropped, s->time_squeeze, 0, | 3568 | s->total, s->dropped, s->time_squeeze, 0, |
3335 | 0, 0, 0, 0, /* was fastroute */ | 3569 | 0, 0, 0, 0, /* was fastroute */ |
3336 | s->cpu_collision); | 3570 | s->cpu_collision, s->received_rps); |
3337 | return 0; | 3571 | return 0; |
3338 | } | 3572 | } |
3339 | 3573 | ||
@@ -3556,11 +3790,10 @@ int netdev_set_master(struct net_device *slave, struct net_device *master) | |||
3556 | 3790 | ||
3557 | slave->master = master; | 3791 | slave->master = master; |
3558 | 3792 | ||
3559 | synchronize_net(); | 3793 | if (old) { |
3560 | 3794 | synchronize_net(); | |
3561 | if (old) | ||
3562 | dev_put(old); | 3795 | dev_put(old); |
3563 | 3796 | } | |
3564 | if (master) | 3797 | if (master) |
3565 | slave->flags |= IFF_SLAVE; | 3798 | slave->flags |= IFF_SLAVE; |
3566 | else | 3799 | else |
@@ -3737,562 +3970,6 @@ void dev_set_rx_mode(struct net_device *dev) | |||
3737 | netif_addr_unlock_bh(dev); | 3970 | netif_addr_unlock_bh(dev); |
3738 | } | 3971 | } |
3739 | 3972 | ||
3740 | /* hw addresses list handling functions */ | ||
3741 | |||
3742 | static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr, | ||
3743 | int addr_len, unsigned char addr_type) | ||
3744 | { | ||
3745 | struct netdev_hw_addr *ha; | ||
3746 | int alloc_size; | ||
3747 | |||
3748 | if (addr_len > MAX_ADDR_LEN) | ||
3749 | return -EINVAL; | ||
3750 | |||
3751 | list_for_each_entry(ha, &list->list, list) { | ||
3752 | if (!memcmp(ha->addr, addr, addr_len) && | ||
3753 | ha->type == addr_type) { | ||
3754 | ha->refcount++; | ||
3755 | return 0; | ||
3756 | } | ||
3757 | } | ||
3758 | |||
3759 | |||
3760 | alloc_size = sizeof(*ha); | ||
3761 | if (alloc_size < L1_CACHE_BYTES) | ||
3762 | alloc_size = L1_CACHE_BYTES; | ||
3763 | ha = kmalloc(alloc_size, GFP_ATOMIC); | ||
3764 | if (!ha) | ||
3765 | return -ENOMEM; | ||
3766 | memcpy(ha->addr, addr, addr_len); | ||
3767 | ha->type = addr_type; | ||
3768 | ha->refcount = 1; | ||
3769 | ha->synced = false; | ||
3770 | list_add_tail_rcu(&ha->list, &list->list); | ||
3771 | list->count++; | ||
3772 | return 0; | ||
3773 | } | ||
3774 | |||
3775 | static void ha_rcu_free(struct rcu_head *head) | ||
3776 | { | ||
3777 | struct netdev_hw_addr *ha; | ||
3778 | |||
3779 | ha = container_of(head, struct netdev_hw_addr, rcu_head); | ||
3780 | kfree(ha); | ||
3781 | } | ||
3782 | |||
3783 | static int __hw_addr_del(struct netdev_hw_addr_list *list, unsigned char *addr, | ||
3784 | int addr_len, unsigned char addr_type) | ||
3785 | { | ||
3786 | struct netdev_hw_addr *ha; | ||
3787 | |||
3788 | list_for_each_entry(ha, &list->list, list) { | ||
3789 | if (!memcmp(ha->addr, addr, addr_len) && | ||
3790 | (ha->type == addr_type || !addr_type)) { | ||
3791 | if (--ha->refcount) | ||
3792 | return 0; | ||
3793 | list_del_rcu(&ha->list); | ||
3794 | call_rcu(&ha->rcu_head, ha_rcu_free); | ||
3795 | list->count--; | ||
3796 | return 0; | ||
3797 | } | ||
3798 | } | ||
3799 | return -ENOENT; | ||
3800 | } | ||
3801 | |||
3802 | static int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list, | ||
3803 | struct netdev_hw_addr_list *from_list, | ||
3804 | int addr_len, | ||
3805 | unsigned char addr_type) | ||
3806 | { | ||
3807 | int err; | ||
3808 | struct netdev_hw_addr *ha, *ha2; | ||
3809 | unsigned char type; | ||
3810 | |||
3811 | list_for_each_entry(ha, &from_list->list, list) { | ||
3812 | type = addr_type ? addr_type : ha->type; | ||
3813 | err = __hw_addr_add(to_list, ha->addr, addr_len, type); | ||
3814 | if (err) | ||
3815 | goto unroll; | ||
3816 | } | ||
3817 | return 0; | ||
3818 | |||
3819 | unroll: | ||
3820 | list_for_each_entry(ha2, &from_list->list, list) { | ||
3821 | if (ha2 == ha) | ||
3822 | break; | ||
3823 | type = addr_type ? addr_type : ha2->type; | ||
3824 | __hw_addr_del(to_list, ha2->addr, addr_len, type); | ||
3825 | } | ||
3826 | return err; | ||
3827 | } | ||
3828 | |||
3829 | static void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list, | ||
3830 | struct netdev_hw_addr_list *from_list, | ||
3831 | int addr_len, | ||
3832 | unsigned char addr_type) | ||
3833 | { | ||
3834 | struct netdev_hw_addr *ha; | ||
3835 | unsigned char type; | ||
3836 | |||
3837 | list_for_each_entry(ha, &from_list->list, list) { | ||
3838 | type = addr_type ? addr_type : ha->type; | ||
3839 | __hw_addr_del(to_list, ha->addr, addr_len, addr_type); | ||
3840 | } | ||
3841 | } | ||
3842 | |||
3843 | static int __hw_addr_sync(struct netdev_hw_addr_list *to_list, | ||
3844 | struct netdev_hw_addr_list *from_list, | ||
3845 | int addr_len) | ||
3846 | { | ||
3847 | int err = 0; | ||
3848 | struct netdev_hw_addr *ha, *tmp; | ||
3849 | |||
3850 | list_for_each_entry_safe(ha, tmp, &from_list->list, list) { | ||
3851 | if (!ha->synced) { | ||
3852 | err = __hw_addr_add(to_list, ha->addr, | ||
3853 | addr_len, ha->type); | ||
3854 | if (err) | ||
3855 | break; | ||
3856 | ha->synced = true; | ||
3857 | ha->refcount++; | ||
3858 | } else if (ha->refcount == 1) { | ||
3859 | __hw_addr_del(to_list, ha->addr, addr_len, ha->type); | ||
3860 | __hw_addr_del(from_list, ha->addr, addr_len, ha->type); | ||
3861 | } | ||
3862 | } | ||
3863 | return err; | ||
3864 | } | ||
3865 | |||
3866 | static void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, | ||
3867 | struct netdev_hw_addr_list *from_list, | ||
3868 | int addr_len) | ||
3869 | { | ||
3870 | struct netdev_hw_addr *ha, *tmp; | ||
3871 | |||
3872 | list_for_each_entry_safe(ha, tmp, &from_list->list, list) { | ||
3873 | if (ha->synced) { | ||
3874 | __hw_addr_del(to_list, ha->addr, | ||
3875 | addr_len, ha->type); | ||
3876 | ha->synced = false; | ||
3877 | __hw_addr_del(from_list, ha->addr, | ||
3878 | addr_len, ha->type); | ||
3879 | } | ||
3880 | } | ||
3881 | } | ||
3882 | |||
3883 | static void __hw_addr_flush(struct netdev_hw_addr_list *list) | ||
3884 | { | ||
3885 | struct netdev_hw_addr *ha, *tmp; | ||
3886 | |||
3887 | list_for_each_entry_safe(ha, tmp, &list->list, list) { | ||
3888 | list_del_rcu(&ha->list); | ||
3889 | call_rcu(&ha->rcu_head, ha_rcu_free); | ||
3890 | } | ||
3891 | list->count = 0; | ||
3892 | } | ||
3893 | |||
3894 | static void __hw_addr_init(struct netdev_hw_addr_list *list) | ||
3895 | { | ||
3896 | INIT_LIST_HEAD(&list->list); | ||
3897 | list->count = 0; | ||
3898 | } | ||
3899 | |||
3900 | /* Device addresses handling functions */ | ||
3901 | |||
3902 | static void dev_addr_flush(struct net_device *dev) | ||
3903 | { | ||
3904 | /* rtnl_mutex must be held here */ | ||
3905 | |||
3906 | __hw_addr_flush(&dev->dev_addrs); | ||
3907 | dev->dev_addr = NULL; | ||
3908 | } | ||
3909 | |||
3910 | static int dev_addr_init(struct net_device *dev) | ||
3911 | { | ||
3912 | unsigned char addr[MAX_ADDR_LEN]; | ||
3913 | struct netdev_hw_addr *ha; | ||
3914 | int err; | ||
3915 | |||
3916 | /* rtnl_mutex must be held here */ | ||
3917 | |||
3918 | __hw_addr_init(&dev->dev_addrs); | ||
3919 | memset(addr, 0, sizeof(addr)); | ||
3920 | err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr), | ||
3921 | NETDEV_HW_ADDR_T_LAN); | ||
3922 | if (!err) { | ||
3923 | /* | ||
3924 | * Get the first (previously created) address from the list | ||
3925 | * and set dev_addr pointer to this location. | ||
3926 | */ | ||
3927 | ha = list_first_entry(&dev->dev_addrs.list, | ||
3928 | struct netdev_hw_addr, list); | ||
3929 | dev->dev_addr = ha->addr; | ||
3930 | } | ||
3931 | return err; | ||
3932 | } | ||
3933 | |||
3934 | /** | ||
3935 | * dev_addr_add - Add a device address | ||
3936 | * @dev: device | ||
3937 | * @addr: address to add | ||
3938 | * @addr_type: address type | ||
3939 | * | ||
3940 | * Add a device address to the device or increase the reference count if | ||
3941 | * it already exists. | ||
3942 | * | ||
3943 | * The caller must hold the rtnl_mutex. | ||
3944 | */ | ||
3945 | int dev_addr_add(struct net_device *dev, unsigned char *addr, | ||
3946 | unsigned char addr_type) | ||
3947 | { | ||
3948 | int err; | ||
3949 | |||
3950 | ASSERT_RTNL(); | ||
3951 | |||
3952 | err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type); | ||
3953 | if (!err) | ||
3954 | call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); | ||
3955 | return err; | ||
3956 | } | ||
3957 | EXPORT_SYMBOL(dev_addr_add); | ||
3958 | |||
3959 | /** | ||
3960 | * dev_addr_del - Release a device address. | ||
3961 | * @dev: device | ||
3962 | * @addr: address to delete | ||
3963 | * @addr_type: address type | ||
3964 | * | ||
3965 | * Release reference to a device address and remove it from the device | ||
3966 | * if the reference count drops to zero. | ||
3967 | * | ||
3968 | * The caller must hold the rtnl_mutex. | ||
3969 | */ | ||
3970 | int dev_addr_del(struct net_device *dev, unsigned char *addr, | ||
3971 | unsigned char addr_type) | ||
3972 | { | ||
3973 | int err; | ||
3974 | struct netdev_hw_addr *ha; | ||
3975 | |||
3976 | ASSERT_RTNL(); | ||
3977 | |||
3978 | /* | ||
3979 | * We can not remove the first address from the list because | ||
3980 | * dev->dev_addr points to that. | ||
3981 | */ | ||
3982 | ha = list_first_entry(&dev->dev_addrs.list, | ||
3983 | struct netdev_hw_addr, list); | ||
3984 | if (ha->addr == dev->dev_addr && ha->refcount == 1) | ||
3985 | return -ENOENT; | ||
3986 | |||
3987 | err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len, | ||
3988 | addr_type); | ||
3989 | if (!err) | ||
3990 | call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); | ||
3991 | return err; | ||
3992 | } | ||
3993 | EXPORT_SYMBOL(dev_addr_del); | ||
3994 | |||
3995 | /** | ||
3996 | * dev_addr_add_multiple - Add device addresses from another device | ||
3997 | * @to_dev: device to which addresses will be added | ||
3998 | * @from_dev: device from which addresses will be added | ||
3999 | * @addr_type: address type - 0 means type will be used from from_dev | ||
4000 | * | ||
4001 | * Add device addresses of the one device to another. | ||
4002 | ** | ||
4003 | * The caller must hold the rtnl_mutex. | ||
4004 | */ | ||
4005 | int dev_addr_add_multiple(struct net_device *to_dev, | ||
4006 | struct net_device *from_dev, | ||
4007 | unsigned char addr_type) | ||
4008 | { | ||
4009 | int err; | ||
4010 | |||
4011 | ASSERT_RTNL(); | ||
4012 | |||
4013 | if (from_dev->addr_len != to_dev->addr_len) | ||
4014 | return -EINVAL; | ||
4015 | err = __hw_addr_add_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs, | ||
4016 | to_dev->addr_len, addr_type); | ||
4017 | if (!err) | ||
4018 | call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev); | ||
4019 | return err; | ||
4020 | } | ||
4021 | EXPORT_SYMBOL(dev_addr_add_multiple); | ||
4022 | |||
4023 | /** | ||
4024 | * dev_addr_del_multiple - Delete device addresses by another device | ||
4025 | * @to_dev: device where the addresses will be deleted | ||
4026 | * @from_dev: device by which addresses the addresses will be deleted | ||
4027 | * @addr_type: address type - 0 means type will used from from_dev | ||
4028 | * | ||
4029 | * Deletes addresses in to device by the list of addresses in from device. | ||
4030 | * | ||
4031 | * The caller must hold the rtnl_mutex. | ||
4032 | */ | ||
4033 | int dev_addr_del_multiple(struct net_device *to_dev, | ||
4034 | struct net_device *from_dev, | ||
4035 | unsigned char addr_type) | ||
4036 | { | ||
4037 | ASSERT_RTNL(); | ||
4038 | |||
4039 | if (from_dev->addr_len != to_dev->addr_len) | ||
4040 | return -EINVAL; | ||
4041 | __hw_addr_del_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs, | ||
4042 | to_dev->addr_len, addr_type); | ||
4043 | call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev); | ||
4044 | return 0; | ||
4045 | } | ||
4046 | EXPORT_SYMBOL(dev_addr_del_multiple); | ||
4047 | |||
4048 | /* multicast addresses handling functions */ | ||
4049 | |||
4050 | int __dev_addr_delete(struct dev_addr_list **list, int *count, | ||
4051 | void *addr, int alen, int glbl) | ||
4052 | { | ||
4053 | struct dev_addr_list *da; | ||
4054 | |||
4055 | for (; (da = *list) != NULL; list = &da->next) { | ||
4056 | if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 && | ||
4057 | alen == da->da_addrlen) { | ||
4058 | if (glbl) { | ||
4059 | int old_glbl = da->da_gusers; | ||
4060 | da->da_gusers = 0; | ||
4061 | if (old_glbl == 0) | ||
4062 | break; | ||
4063 | } | ||
4064 | if (--da->da_users) | ||
4065 | return 0; | ||
4066 | |||
4067 | *list = da->next; | ||
4068 | kfree(da); | ||
4069 | (*count)--; | ||
4070 | return 0; | ||
4071 | } | ||
4072 | } | ||
4073 | return -ENOENT; | ||
4074 | } | ||
4075 | |||
4076 | int __dev_addr_add(struct dev_addr_list **list, int *count, | ||
4077 | void *addr, int alen, int glbl) | ||
4078 | { | ||
4079 | struct dev_addr_list *da; | ||
4080 | |||
4081 | for (da = *list; da != NULL; da = da->next) { | ||
4082 | if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 && | ||
4083 | da->da_addrlen == alen) { | ||
4084 | if (glbl) { | ||
4085 | int old_glbl = da->da_gusers; | ||
4086 | da->da_gusers = 1; | ||
4087 | if (old_glbl) | ||
4088 | return 0; | ||
4089 | } | ||
4090 | da->da_users++; | ||
4091 | return 0; | ||
4092 | } | ||
4093 | } | ||
4094 | |||
4095 | da = kzalloc(sizeof(*da), GFP_ATOMIC); | ||
4096 | if (da == NULL) | ||
4097 | return -ENOMEM; | ||
4098 | memcpy(da->da_addr, addr, alen); | ||
4099 | da->da_addrlen = alen; | ||
4100 | da->da_users = 1; | ||
4101 | da->da_gusers = glbl ? 1 : 0; | ||
4102 | da->next = *list; | ||
4103 | *list = da; | ||
4104 | (*count)++; | ||
4105 | return 0; | ||
4106 | } | ||
4107 | |||
4108 | /** | ||
4109 | * dev_unicast_delete - Release secondary unicast address. | ||
4110 | * @dev: device | ||
4111 | * @addr: address to delete | ||
4112 | * | ||
4113 | * Release reference to a secondary unicast address and remove it | ||
4114 | * from the device if the reference count drops to zero. | ||
4115 | * | ||
4116 | * The caller must hold the rtnl_mutex. | ||
4117 | */ | ||
4118 | int dev_unicast_delete(struct net_device *dev, void *addr) | ||
4119 | { | ||
4120 | int err; | ||
4121 | |||
4122 | ASSERT_RTNL(); | ||
4123 | |||
4124 | netif_addr_lock_bh(dev); | ||
4125 | err = __hw_addr_del(&dev->uc, addr, dev->addr_len, | ||
4126 | NETDEV_HW_ADDR_T_UNICAST); | ||
4127 | if (!err) | ||
4128 | __dev_set_rx_mode(dev); | ||
4129 | netif_addr_unlock_bh(dev); | ||
4130 | return err; | ||
4131 | } | ||
4132 | EXPORT_SYMBOL(dev_unicast_delete); | ||
4133 | |||
4134 | /** | ||
4135 | * dev_unicast_add - add a secondary unicast address | ||
4136 | * @dev: device | ||
4137 | * @addr: address to add | ||
4138 | * | ||
4139 | * Add a secondary unicast address to the device or increase | ||
4140 | * the reference count if it already exists. | ||
4141 | * | ||
4142 | * The caller must hold the rtnl_mutex. | ||
4143 | */ | ||
4144 | int dev_unicast_add(struct net_device *dev, void *addr) | ||
4145 | { | ||
4146 | int err; | ||
4147 | |||
4148 | ASSERT_RTNL(); | ||
4149 | |||
4150 | netif_addr_lock_bh(dev); | ||
4151 | err = __hw_addr_add(&dev->uc, addr, dev->addr_len, | ||
4152 | NETDEV_HW_ADDR_T_UNICAST); | ||
4153 | if (!err) | ||
4154 | __dev_set_rx_mode(dev); | ||
4155 | netif_addr_unlock_bh(dev); | ||
4156 | return err; | ||
4157 | } | ||
4158 | EXPORT_SYMBOL(dev_unicast_add); | ||
4159 | |||
4160 | int __dev_addr_sync(struct dev_addr_list **to, int *to_count, | ||
4161 | struct dev_addr_list **from, int *from_count) | ||
4162 | { | ||
4163 | struct dev_addr_list *da, *next; | ||
4164 | int err = 0; | ||
4165 | |||
4166 | da = *from; | ||
4167 | while (da != NULL) { | ||
4168 | next = da->next; | ||
4169 | if (!da->da_synced) { | ||
4170 | err = __dev_addr_add(to, to_count, | ||
4171 | da->da_addr, da->da_addrlen, 0); | ||
4172 | if (err < 0) | ||
4173 | break; | ||
4174 | da->da_synced = 1; | ||
4175 | da->da_users++; | ||
4176 | } else if (da->da_users == 1) { | ||
4177 | __dev_addr_delete(to, to_count, | ||
4178 | da->da_addr, da->da_addrlen, 0); | ||
4179 | __dev_addr_delete(from, from_count, | ||
4180 | da->da_addr, da->da_addrlen, 0); | ||
4181 | } | ||
4182 | da = next; | ||
4183 | } | ||
4184 | return err; | ||
4185 | } | ||
4186 | EXPORT_SYMBOL_GPL(__dev_addr_sync); | ||
4187 | |||
4188 | void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, | ||
4189 | struct dev_addr_list **from, int *from_count) | ||
4190 | { | ||
4191 | struct dev_addr_list *da, *next; | ||
4192 | |||
4193 | da = *from; | ||
4194 | while (da != NULL) { | ||
4195 | next = da->next; | ||
4196 | if (da->da_synced) { | ||
4197 | __dev_addr_delete(to, to_count, | ||
4198 | da->da_addr, da->da_addrlen, 0); | ||
4199 | da->da_synced = 0; | ||
4200 | __dev_addr_delete(from, from_count, | ||
4201 | da->da_addr, da->da_addrlen, 0); | ||
4202 | } | ||
4203 | da = next; | ||
4204 | } | ||
4205 | } | ||
4206 | EXPORT_SYMBOL_GPL(__dev_addr_unsync); | ||
4207 | |||
4208 | /** | ||
4209 | * dev_unicast_sync - Synchronize device's unicast list to another device | ||
4210 | * @to: destination device | ||
4211 | * @from: source device | ||
4212 | * | ||
4213 | * Add newly added addresses to the destination device and release | ||
4214 | * addresses that have no users left. The source device must be | ||
4215 | * locked by netif_tx_lock_bh. | ||
4216 | * | ||
4217 | * This function is intended to be called from the dev->set_rx_mode | ||
4218 | * function of layered software devices. | ||
4219 | */ | ||
4220 | int dev_unicast_sync(struct net_device *to, struct net_device *from) | ||
4221 | { | ||
4222 | int err = 0; | ||
4223 | |||
4224 | if (to->addr_len != from->addr_len) | ||
4225 | return -EINVAL; | ||
4226 | |||
4227 | netif_addr_lock_bh(to); | ||
4228 | err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len); | ||
4229 | if (!err) | ||
4230 | __dev_set_rx_mode(to); | ||
4231 | netif_addr_unlock_bh(to); | ||
4232 | return err; | ||
4233 | } | ||
4234 | EXPORT_SYMBOL(dev_unicast_sync); | ||
4235 | |||
4236 | /** | ||
4237 | * dev_unicast_unsync - Remove synchronized addresses from the destination device | ||
4238 | * @to: destination device | ||
4239 | * @from: source device | ||
4240 | * | ||
4241 | * Remove all addresses that were added to the destination device by | ||
4242 | * dev_unicast_sync(). This function is intended to be called from the | ||
4243 | * dev->stop function of layered software devices. | ||
4244 | */ | ||
4245 | void dev_unicast_unsync(struct net_device *to, struct net_device *from) | ||
4246 | { | ||
4247 | if (to->addr_len != from->addr_len) | ||
4248 | return; | ||
4249 | |||
4250 | netif_addr_lock_bh(from); | ||
4251 | netif_addr_lock(to); | ||
4252 | __hw_addr_unsync(&to->uc, &from->uc, to->addr_len); | ||
4253 | __dev_set_rx_mode(to); | ||
4254 | netif_addr_unlock(to); | ||
4255 | netif_addr_unlock_bh(from); | ||
4256 | } | ||
4257 | EXPORT_SYMBOL(dev_unicast_unsync); | ||
4258 | |||
4259 | static void dev_unicast_flush(struct net_device *dev) | ||
4260 | { | ||
4261 | netif_addr_lock_bh(dev); | ||
4262 | __hw_addr_flush(&dev->uc); | ||
4263 | netif_addr_unlock_bh(dev); | ||
4264 | } | ||
4265 | |||
4266 | static void dev_unicast_init(struct net_device *dev) | ||
4267 | { | ||
4268 | __hw_addr_init(&dev->uc); | ||
4269 | } | ||
4270 | |||
4271 | |||
4272 | static void __dev_addr_discard(struct dev_addr_list **list) | ||
4273 | { | ||
4274 | struct dev_addr_list *tmp; | ||
4275 | |||
4276 | while (*list != NULL) { | ||
4277 | tmp = *list; | ||
4278 | *list = tmp->next; | ||
4279 | if (tmp->da_users > tmp->da_gusers) | ||
4280 | printk("__dev_addr_discard: address leakage! " | ||
4281 | "da_users=%d\n", tmp->da_users); | ||
4282 | kfree(tmp); | ||
4283 | } | ||
4284 | } | ||
4285 | |||
4286 | static void dev_addr_discard(struct net_device *dev) | ||
4287 | { | ||
4288 | netif_addr_lock_bh(dev); | ||
4289 | |||
4290 | __dev_addr_discard(&dev->mc_list); | ||
4291 | netdev_mc_count(dev) = 0; | ||
4292 | |||
4293 | netif_addr_unlock_bh(dev); | ||
4294 | } | ||
4295 | |||
4296 | /** | 3973 | /** |
4297 | * dev_get_flags - get flags reported to userspace | 3974 | * dev_get_flags - get flags reported to userspace |
4298 | * @dev: device | 3975 | * @dev: device |
@@ -4603,8 +4280,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd) | |||
4603 | return -EINVAL; | 4280 | return -EINVAL; |
4604 | if (!netif_device_present(dev)) | 4281 | if (!netif_device_present(dev)) |
4605 | return -ENODEV; | 4282 | return -ENODEV; |
4606 | return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data, | 4283 | return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data); |
4607 | dev->addr_len, 1); | ||
4608 | 4284 | ||
4609 | case SIOCDELMULTI: | 4285 | case SIOCDELMULTI: |
4610 | if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) || | 4286 | if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) || |
@@ -4612,8 +4288,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd) | |||
4612 | return -EINVAL; | 4288 | return -EINVAL; |
4613 | if (!netif_device_present(dev)) | 4289 | if (!netif_device_present(dev)) |
4614 | return -ENODEV; | 4290 | return -ENODEV; |
4615 | return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data, | 4291 | return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data); |
4616 | dev->addr_len, 1); | ||
4617 | 4292 | ||
4618 | case SIOCSIFTXQLEN: | 4293 | case SIOCSIFTXQLEN: |
4619 | if (ifr->ifr_qlen < 0) | 4294 | if (ifr->ifr_qlen < 0) |
@@ -4920,8 +4595,8 @@ static void rollback_registered_many(struct list_head *head) | |||
4920 | /* | 4595 | /* |
4921 | * Flush the unicast and multicast chains | 4596 | * Flush the unicast and multicast chains |
4922 | */ | 4597 | */ |
4923 | dev_unicast_flush(dev); | 4598 | dev_uc_flush(dev); |
4924 | dev_addr_discard(dev); | 4599 | dev_mc_flush(dev); |
4925 | 4600 | ||
4926 | if (dev->netdev_ops->ndo_uninit) | 4601 | if (dev->netdev_ops->ndo_uninit) |
4927 | dev->netdev_ops->ndo_uninit(dev); | 4602 | dev->netdev_ops->ndo_uninit(dev); |
@@ -5070,6 +4745,24 @@ int register_netdevice(struct net_device *dev) | |||
5070 | 4745 | ||
5071 | dev->iflink = -1; | 4746 | dev->iflink = -1; |
5072 | 4747 | ||
4748 | #ifdef CONFIG_RPS | ||
4749 | if (!dev->num_rx_queues) { | ||
4750 | /* | ||
4751 | * Allocate a single RX queue if driver never called | ||
4752 | * alloc_netdev_mq | ||
4753 | */ | ||
4754 | |||
4755 | dev->_rx = kzalloc(sizeof(struct netdev_rx_queue), GFP_KERNEL); | ||
4756 | if (!dev->_rx) { | ||
4757 | ret = -ENOMEM; | ||
4758 | goto out; | ||
4759 | } | ||
4760 | |||
4761 | dev->_rx->first = dev->_rx; | ||
4762 | atomic_set(&dev->_rx->count, 1); | ||
4763 | dev->num_rx_queues = 1; | ||
4764 | } | ||
4765 | #endif | ||
5073 | /* Init, if this function is available */ | 4766 | /* Init, if this function is available */ |
5074 | if (dev->netdev_ops->ndo_init) { | 4767 | if (dev->netdev_ops->ndo_init) { |
5075 | ret = dev->netdev_ops->ndo_init(dev); | 4768 | ret = dev->netdev_ops->ndo_init(dev); |
@@ -5430,6 +5123,10 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
5430 | struct net_device *dev; | 5123 | struct net_device *dev; |
5431 | size_t alloc_size; | 5124 | size_t alloc_size; |
5432 | struct net_device *p; | 5125 | struct net_device *p; |
5126 | #ifdef CONFIG_RPS | ||
5127 | struct netdev_rx_queue *rx; | ||
5128 | int i; | ||
5129 | #endif | ||
5433 | 5130 | ||
5434 | BUG_ON(strlen(name) >= sizeof(dev->name)); | 5131 | BUG_ON(strlen(name) >= sizeof(dev->name)); |
5435 | 5132 | ||
@@ -5455,13 +5152,32 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
5455 | goto free_p; | 5152 | goto free_p; |
5456 | } | 5153 | } |
5457 | 5154 | ||
5155 | #ifdef CONFIG_RPS | ||
5156 | rx = kcalloc(queue_count, sizeof(struct netdev_rx_queue), GFP_KERNEL); | ||
5157 | if (!rx) { | ||
5158 | printk(KERN_ERR "alloc_netdev: Unable to allocate " | ||
5159 | "rx queues.\n"); | ||
5160 | goto free_tx; | ||
5161 | } | ||
5162 | |||
5163 | atomic_set(&rx->count, queue_count); | ||
5164 | |||
5165 | /* | ||
5166 | * Set a pointer to first element in the array which holds the | ||
5167 | * reference count. | ||
5168 | */ | ||
5169 | for (i = 0; i < queue_count; i++) | ||
5170 | rx[i].first = rx; | ||
5171 | #endif | ||
5172 | |||
5458 | dev = PTR_ALIGN(p, NETDEV_ALIGN); | 5173 | dev = PTR_ALIGN(p, NETDEV_ALIGN); |
5459 | dev->padded = (char *)dev - (char *)p; | 5174 | dev->padded = (char *)dev - (char *)p; |
5460 | 5175 | ||
5461 | if (dev_addr_init(dev)) | 5176 | if (dev_addr_init(dev)) |
5462 | goto free_tx; | 5177 | goto free_rx; |
5463 | 5178 | ||
5464 | dev_unicast_init(dev); | 5179 | dev_mc_init(dev); |
5180 | dev_uc_init(dev); | ||
5465 | 5181 | ||
5466 | dev_net_set(dev, &init_net); | 5182 | dev_net_set(dev, &init_net); |
5467 | 5183 | ||
@@ -5469,6 +5185,11 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
5469 | dev->num_tx_queues = queue_count; | 5185 | dev->num_tx_queues = queue_count; |
5470 | dev->real_num_tx_queues = queue_count; | 5186 | dev->real_num_tx_queues = queue_count; |
5471 | 5187 | ||
5188 | #ifdef CONFIG_RPS | ||
5189 | dev->_rx = rx; | ||
5190 | dev->num_rx_queues = queue_count; | ||
5191 | #endif | ||
5192 | |||
5472 | dev->gso_max_size = GSO_MAX_SIZE; | 5193 | dev->gso_max_size = GSO_MAX_SIZE; |
5473 | 5194 | ||
5474 | netdev_init_queues(dev); | 5195 | netdev_init_queues(dev); |
@@ -5483,9 +5204,12 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
5483 | strcpy(dev->name, name); | 5204 | strcpy(dev->name, name); |
5484 | return dev; | 5205 | return dev; |
5485 | 5206 | ||
5207 | free_rx: | ||
5208 | #ifdef CONFIG_RPS | ||
5209 | kfree(rx); | ||
5486 | free_tx: | 5210 | free_tx: |
5211 | #endif | ||
5487 | kfree(tx); | 5212 | kfree(tx); |
5488 | |||
5489 | free_p: | 5213 | free_p: |
5490 | kfree(p); | 5214 | kfree(p); |
5491 | return NULL; | 5215 | return NULL; |
@@ -5687,8 +5411,8 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char | |||
5687 | /* | 5411 | /* |
5688 | * Flush the unicast and multicast chains | 5412 | * Flush the unicast and multicast chains |
5689 | */ | 5413 | */ |
5690 | dev_unicast_flush(dev); | 5414 | dev_uc_flush(dev); |
5691 | dev_addr_discard(dev); | 5415 | dev_mc_flush(dev); |
5692 | 5416 | ||
5693 | netdev_unregister_kobject(dev); | 5417 | netdev_unregister_kobject(dev); |
5694 | 5418 | ||
@@ -5988,6 +5712,12 @@ static int __init net_dev_init(void) | |||
5988 | queue->completion_queue = NULL; | 5712 | queue->completion_queue = NULL; |
5989 | INIT_LIST_HEAD(&queue->poll_list); | 5713 | INIT_LIST_HEAD(&queue->poll_list); |
5990 | 5714 | ||
5715 | #ifdef CONFIG_RPS | ||
5716 | queue->csd.func = trigger_softirq; | ||
5717 | queue->csd.info = queue; | ||
5718 | queue->csd.flags = 0; | ||
5719 | #endif | ||
5720 | |||
5991 | queue->backlog.poll = process_backlog; | 5721 | queue->backlog.poll = process_backlog; |
5992 | queue->backlog.weight = weight_p; | 5722 | queue->backlog.weight = weight_p; |
5993 | queue->backlog.gro_list = NULL; | 5723 | queue->backlog.gro_list = NULL; |
@@ -6026,7 +5756,7 @@ subsys_initcall(net_dev_init); | |||
6026 | 5756 | ||
6027 | static int __init initialize_hashrnd(void) | 5757 | static int __init initialize_hashrnd(void) |
6028 | { | 5758 | { |
6029 | get_random_bytes(&skb_tx_hashrnd, sizeof(skb_tx_hashrnd)); | 5759 | get_random_bytes(&hashrnd, sizeof(hashrnd)); |
6030 | return 0; | 5760 | return 0; |
6031 | } | 5761 | } |
6032 | 5762 | ||
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c new file mode 100644 index 000000000000..508f9c18992f --- /dev/null +++ b/net/core/dev_addr_lists.c | |||
@@ -0,0 +1,741 @@ | |||
1 | /* | ||
2 | * net/core/dev_addr_lists.c - Functions for handling net device lists | ||
3 | * Copyright (c) 2010 Jiri Pirko <jpirko@redhat.com> | ||
4 | * | ||
5 | * This file contains functions for working with unicast, multicast and device | ||
6 | * addresses lists. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | */ | ||
13 | |||
14 | #include <linux/netdevice.h> | ||
15 | #include <linux/rtnetlink.h> | ||
16 | #include <linux/list.h> | ||
17 | #include <linux/proc_fs.h> | ||
18 | |||
19 | /* | ||
20 | * General list handling functions | ||
21 | */ | ||
22 | |||
23 | static int __hw_addr_add_ex(struct netdev_hw_addr_list *list, | ||
24 | unsigned char *addr, int addr_len, | ||
25 | unsigned char addr_type, bool global) | ||
26 | { | ||
27 | struct netdev_hw_addr *ha; | ||
28 | int alloc_size; | ||
29 | |||
30 | if (addr_len > MAX_ADDR_LEN) | ||
31 | return -EINVAL; | ||
32 | |||
33 | list_for_each_entry(ha, &list->list, list) { | ||
34 | if (!memcmp(ha->addr, addr, addr_len) && | ||
35 | ha->type == addr_type) { | ||
36 | if (global) { | ||
37 | /* check if addr is already used as global */ | ||
38 | if (ha->global_use) | ||
39 | return 0; | ||
40 | else | ||
41 | ha->global_use = true; | ||
42 | } | ||
43 | ha->refcount++; | ||
44 | return 0; | ||
45 | } | ||
46 | } | ||
47 | |||
48 | |||
49 | alloc_size = sizeof(*ha); | ||
50 | if (alloc_size < L1_CACHE_BYTES) | ||
51 | alloc_size = L1_CACHE_BYTES; | ||
52 | ha = kmalloc(alloc_size, GFP_ATOMIC); | ||
53 | if (!ha) | ||
54 | return -ENOMEM; | ||
55 | memcpy(ha->addr, addr, addr_len); | ||
56 | ha->type = addr_type; | ||
57 | ha->refcount = 1; | ||
58 | ha->global_use = global; | ||
59 | ha->synced = false; | ||
60 | list_add_tail_rcu(&ha->list, &list->list); | ||
61 | list->count++; | ||
62 | return 0; | ||
63 | } | ||
64 | |||
65 | static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr, | ||
66 | int addr_len, unsigned char addr_type) | ||
67 | { | ||
68 | return __hw_addr_add_ex(list, addr, addr_len, addr_type, false); | ||
69 | } | ||
70 | |||
71 | static void ha_rcu_free(struct rcu_head *head) | ||
72 | { | ||
73 | struct netdev_hw_addr *ha; | ||
74 | |||
75 | ha = container_of(head, struct netdev_hw_addr, rcu_head); | ||
76 | kfree(ha); | ||
77 | } | ||
78 | |||
79 | static int __hw_addr_del_ex(struct netdev_hw_addr_list *list, | ||
80 | unsigned char *addr, int addr_len, | ||
81 | unsigned char addr_type, bool global) | ||
82 | { | ||
83 | struct netdev_hw_addr *ha; | ||
84 | |||
85 | list_for_each_entry(ha, &list->list, list) { | ||
86 | if (!memcmp(ha->addr, addr, addr_len) && | ||
87 | (ha->type == addr_type || !addr_type)) { | ||
88 | if (global) { | ||
89 | if (!ha->global_use) | ||
90 | break; | ||
91 | else | ||
92 | ha->global_use = false; | ||
93 | } | ||
94 | if (--ha->refcount) | ||
95 | return 0; | ||
96 | list_del_rcu(&ha->list); | ||
97 | call_rcu(&ha->rcu_head, ha_rcu_free); | ||
98 | list->count--; | ||
99 | return 0; | ||
100 | } | ||
101 | } | ||
102 | return -ENOENT; | ||
103 | } | ||
104 | |||
105 | static int __hw_addr_del(struct netdev_hw_addr_list *list, unsigned char *addr, | ||
106 | int addr_len, unsigned char addr_type) | ||
107 | { | ||
108 | return __hw_addr_del_ex(list, addr, addr_len, addr_type, false); | ||
109 | } | ||
110 | |||
111 | int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list, | ||
112 | struct netdev_hw_addr_list *from_list, | ||
113 | int addr_len, unsigned char addr_type) | ||
114 | { | ||
115 | int err; | ||
116 | struct netdev_hw_addr *ha, *ha2; | ||
117 | unsigned char type; | ||
118 | |||
119 | list_for_each_entry(ha, &from_list->list, list) { | ||
120 | type = addr_type ? addr_type : ha->type; | ||
121 | err = __hw_addr_add(to_list, ha->addr, addr_len, type); | ||
122 | if (err) | ||
123 | goto unroll; | ||
124 | } | ||
125 | return 0; | ||
126 | |||
127 | unroll: | ||
128 | list_for_each_entry(ha2, &from_list->list, list) { | ||
129 | if (ha2 == ha) | ||
130 | break; | ||
131 | type = addr_type ? addr_type : ha2->type; | ||
132 | __hw_addr_del(to_list, ha2->addr, addr_len, type); | ||
133 | } | ||
134 | return err; | ||
135 | } | ||
136 | EXPORT_SYMBOL(__hw_addr_add_multiple); | ||
137 | |||
138 | void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list, | ||
139 | struct netdev_hw_addr_list *from_list, | ||
140 | int addr_len, unsigned char addr_type) | ||
141 | { | ||
142 | struct netdev_hw_addr *ha; | ||
143 | unsigned char type; | ||
144 | |||
145 | list_for_each_entry(ha, &from_list->list, list) { | ||
146 | type = addr_type ? addr_type : ha->type; | ||
147 | __hw_addr_del(to_list, ha->addr, addr_len, addr_type); | ||
148 | } | ||
149 | } | ||
150 | EXPORT_SYMBOL(__hw_addr_del_multiple); | ||
151 | |||
152 | int __hw_addr_sync(struct netdev_hw_addr_list *to_list, | ||
153 | struct netdev_hw_addr_list *from_list, | ||
154 | int addr_len) | ||
155 | { | ||
156 | int err = 0; | ||
157 | struct netdev_hw_addr *ha, *tmp; | ||
158 | |||
159 | list_for_each_entry_safe(ha, tmp, &from_list->list, list) { | ||
160 | if (!ha->synced) { | ||
161 | err = __hw_addr_add(to_list, ha->addr, | ||
162 | addr_len, ha->type); | ||
163 | if (err) | ||
164 | break; | ||
165 | ha->synced = true; | ||
166 | ha->refcount++; | ||
167 | } else if (ha->refcount == 1) { | ||
168 | __hw_addr_del(to_list, ha->addr, addr_len, ha->type); | ||
169 | __hw_addr_del(from_list, ha->addr, addr_len, ha->type); | ||
170 | } | ||
171 | } | ||
172 | return err; | ||
173 | } | ||
174 | EXPORT_SYMBOL(__hw_addr_sync); | ||
175 | |||
176 | void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, | ||
177 | struct netdev_hw_addr_list *from_list, | ||
178 | int addr_len) | ||
179 | { | ||
180 | struct netdev_hw_addr *ha, *tmp; | ||
181 | |||
182 | list_for_each_entry_safe(ha, tmp, &from_list->list, list) { | ||
183 | if (ha->synced) { | ||
184 | __hw_addr_del(to_list, ha->addr, | ||
185 | addr_len, ha->type); | ||
186 | ha->synced = false; | ||
187 | __hw_addr_del(from_list, ha->addr, | ||
188 | addr_len, ha->type); | ||
189 | } | ||
190 | } | ||
191 | } | ||
192 | EXPORT_SYMBOL(__hw_addr_unsync); | ||
193 | |||
194 | void __hw_addr_flush(struct netdev_hw_addr_list *list) | ||
195 | { | ||
196 | struct netdev_hw_addr *ha, *tmp; | ||
197 | |||
198 | list_for_each_entry_safe(ha, tmp, &list->list, list) { | ||
199 | list_del_rcu(&ha->list); | ||
200 | call_rcu(&ha->rcu_head, ha_rcu_free); | ||
201 | } | ||
202 | list->count = 0; | ||
203 | } | ||
204 | EXPORT_SYMBOL(__hw_addr_flush); | ||
205 | |||
206 | void __hw_addr_init(struct netdev_hw_addr_list *list) | ||
207 | { | ||
208 | INIT_LIST_HEAD(&list->list); | ||
209 | list->count = 0; | ||
210 | } | ||
211 | EXPORT_SYMBOL(__hw_addr_init); | ||
212 | |||
213 | /* | ||
214 | * Device addresses handling functions | ||
215 | */ | ||
216 | |||
217 | /** | ||
218 | * dev_addr_flush - Flush device address list | ||
219 | * @dev: device | ||
220 | * | ||
221 | * Flush device address list and reset ->dev_addr. | ||
222 | * | ||
223 | * The caller must hold the rtnl_mutex. | ||
224 | */ | ||
225 | void dev_addr_flush(struct net_device *dev) | ||
226 | { | ||
227 | /* rtnl_mutex must be held here */ | ||
228 | |||
229 | __hw_addr_flush(&dev->dev_addrs); | ||
230 | dev->dev_addr = NULL; | ||
231 | } | ||
232 | EXPORT_SYMBOL(dev_addr_flush); | ||
233 | |||
234 | /** | ||
235 | * dev_addr_init - Init device address list | ||
236 | * @dev: device | ||
237 | * | ||
238 | * Init device address list and create the first element, | ||
239 | * used by ->dev_addr. | ||
240 | * | ||
241 | * The caller must hold the rtnl_mutex. | ||
242 | */ | ||
243 | int dev_addr_init(struct net_device *dev) | ||
244 | { | ||
245 | unsigned char addr[MAX_ADDR_LEN]; | ||
246 | struct netdev_hw_addr *ha; | ||
247 | int err; | ||
248 | |||
249 | /* rtnl_mutex must be held here */ | ||
250 | |||
251 | __hw_addr_init(&dev->dev_addrs); | ||
252 | memset(addr, 0, sizeof(addr)); | ||
253 | err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr), | ||
254 | NETDEV_HW_ADDR_T_LAN); | ||
255 | if (!err) { | ||
256 | /* | ||
257 | * Get the first (previously created) address from the list | ||
258 | * and set dev_addr pointer to this location. | ||
259 | */ | ||
260 | ha = list_first_entry(&dev->dev_addrs.list, | ||
261 | struct netdev_hw_addr, list); | ||
262 | dev->dev_addr = ha->addr; | ||
263 | } | ||
264 | return err; | ||
265 | } | ||
266 | EXPORT_SYMBOL(dev_addr_init); | ||
267 | |||
268 | /** | ||
269 | * dev_addr_add - Add a device address | ||
270 | * @dev: device | ||
271 | * @addr: address to add | ||
272 | * @addr_type: address type | ||
273 | * | ||
274 | * Add a device address to the device or increase the reference count if | ||
275 | * it already exists. | ||
276 | * | ||
277 | * The caller must hold the rtnl_mutex. | ||
278 | */ | ||
279 | int dev_addr_add(struct net_device *dev, unsigned char *addr, | ||
280 | unsigned char addr_type) | ||
281 | { | ||
282 | int err; | ||
283 | |||
284 | ASSERT_RTNL(); | ||
285 | |||
286 | err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type); | ||
287 | if (!err) | ||
288 | call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); | ||
289 | return err; | ||
290 | } | ||
291 | EXPORT_SYMBOL(dev_addr_add); | ||
292 | |||
293 | /** | ||
294 | * dev_addr_del - Release a device address. | ||
295 | * @dev: device | ||
296 | * @addr: address to delete | ||
297 | * @addr_type: address type | ||
298 | * | ||
299 | * Release reference to a device address and remove it from the device | ||
300 | * if the reference count drops to zero. | ||
301 | * | ||
302 | * The caller must hold the rtnl_mutex. | ||
303 | */ | ||
304 | int dev_addr_del(struct net_device *dev, unsigned char *addr, | ||
305 | unsigned char addr_type) | ||
306 | { | ||
307 | int err; | ||
308 | struct netdev_hw_addr *ha; | ||
309 | |||
310 | ASSERT_RTNL(); | ||
311 | |||
312 | /* | ||
313 | * We can not remove the first address from the list because | ||
314 | * dev->dev_addr points to that. | ||
315 | */ | ||
316 | ha = list_first_entry(&dev->dev_addrs.list, | ||
317 | struct netdev_hw_addr, list); | ||
318 | if (ha->addr == dev->dev_addr && ha->refcount == 1) | ||
319 | return -ENOENT; | ||
320 | |||
321 | err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len, | ||
322 | addr_type); | ||
323 | if (!err) | ||
324 | call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); | ||
325 | return err; | ||
326 | } | ||
327 | EXPORT_SYMBOL(dev_addr_del); | ||
328 | |||
329 | /** | ||
330 | * dev_addr_add_multiple - Add device addresses from another device | ||
331 | * @to_dev: device to which addresses will be added | ||
332 | * @from_dev: device from which addresses will be added | ||
333 | * @addr_type: address type - 0 means type will be used from from_dev | ||
334 | * | ||
335 | * Add device addresses of the one device to another. | ||
336 | ** | ||
337 | * The caller must hold the rtnl_mutex. | ||
338 | */ | ||
339 | int dev_addr_add_multiple(struct net_device *to_dev, | ||
340 | struct net_device *from_dev, | ||
341 | unsigned char addr_type) | ||
342 | { | ||
343 | int err; | ||
344 | |||
345 | ASSERT_RTNL(); | ||
346 | |||
347 | if (from_dev->addr_len != to_dev->addr_len) | ||
348 | return -EINVAL; | ||
349 | err = __hw_addr_add_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs, | ||
350 | to_dev->addr_len, addr_type); | ||
351 | if (!err) | ||
352 | call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev); | ||
353 | return err; | ||
354 | } | ||
355 | EXPORT_SYMBOL(dev_addr_add_multiple); | ||
356 | |||
357 | /** | ||
358 | * dev_addr_del_multiple - Delete device addresses by another device | ||
359 | * @to_dev: device where the addresses will be deleted | ||
360 | * @from_dev: device by which addresses the addresses will be deleted | ||
361 | * @addr_type: address type - 0 means type will used from from_dev | ||
362 | * | ||
363 | * Deletes addresses in to device by the list of addresses in from device. | ||
364 | * | ||
365 | * The caller must hold the rtnl_mutex. | ||
366 | */ | ||
367 | int dev_addr_del_multiple(struct net_device *to_dev, | ||
368 | struct net_device *from_dev, | ||
369 | unsigned char addr_type) | ||
370 | { | ||
371 | ASSERT_RTNL(); | ||
372 | |||
373 | if (from_dev->addr_len != to_dev->addr_len) | ||
374 | return -EINVAL; | ||
375 | __hw_addr_del_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs, | ||
376 | to_dev->addr_len, addr_type); | ||
377 | call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev); | ||
378 | return 0; | ||
379 | } | ||
380 | EXPORT_SYMBOL(dev_addr_del_multiple); | ||
381 | |||
382 | /* | ||
383 | * Unicast list handling functions | ||
384 | */ | ||
385 | |||
386 | /** | ||
387 | * dev_uc_add - Add a secondary unicast address | ||
388 | * @dev: device | ||
389 | * @addr: address to add | ||
390 | * | ||
391 | * Add a secondary unicast address to the device or increase | ||
392 | * the reference count if it already exists. | ||
393 | */ | ||
394 | int dev_uc_add(struct net_device *dev, unsigned char *addr) | ||
395 | { | ||
396 | int err; | ||
397 | |||
398 | netif_addr_lock_bh(dev); | ||
399 | err = __hw_addr_add(&dev->uc, addr, dev->addr_len, | ||
400 | NETDEV_HW_ADDR_T_UNICAST); | ||
401 | if (!err) | ||
402 | __dev_set_rx_mode(dev); | ||
403 | netif_addr_unlock_bh(dev); | ||
404 | return err; | ||
405 | } | ||
406 | EXPORT_SYMBOL(dev_uc_add); | ||
407 | |||
408 | /** | ||
409 | * dev_uc_del - Release secondary unicast address. | ||
410 | * @dev: device | ||
411 | * @addr: address to delete | ||
412 | * | ||
413 | * Release reference to a secondary unicast address and remove it | ||
414 | * from the device if the reference count drops to zero. | ||
415 | */ | ||
416 | int dev_uc_del(struct net_device *dev, unsigned char *addr) | ||
417 | { | ||
418 | int err; | ||
419 | |||
420 | netif_addr_lock_bh(dev); | ||
421 | err = __hw_addr_del(&dev->uc, addr, dev->addr_len, | ||
422 | NETDEV_HW_ADDR_T_UNICAST); | ||
423 | if (!err) | ||
424 | __dev_set_rx_mode(dev); | ||
425 | netif_addr_unlock_bh(dev); | ||
426 | return err; | ||
427 | } | ||
428 | EXPORT_SYMBOL(dev_uc_del); | ||
429 | |||
430 | /** | ||
431 | * dev_uc_sync - Synchronize device's unicast list to another device | ||
432 | * @to: destination device | ||
433 | * @from: source device | ||
434 | * | ||
435 | * Add newly added addresses to the destination device and release | ||
436 | * addresses that have no users left. The source device must be | ||
437 | * locked by netif_tx_lock_bh. | ||
438 | * | ||
439 | * This function is intended to be called from the dev->set_rx_mode | ||
440 | * function of layered software devices. | ||
441 | */ | ||
442 | int dev_uc_sync(struct net_device *to, struct net_device *from) | ||
443 | { | ||
444 | int err = 0; | ||
445 | |||
446 | if (to->addr_len != from->addr_len) | ||
447 | return -EINVAL; | ||
448 | |||
449 | netif_addr_lock_bh(to); | ||
450 | err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len); | ||
451 | if (!err) | ||
452 | __dev_set_rx_mode(to); | ||
453 | netif_addr_unlock_bh(to); | ||
454 | return err; | ||
455 | } | ||
456 | EXPORT_SYMBOL(dev_uc_sync); | ||
457 | |||
458 | /** | ||
459 | * dev_uc_unsync - Remove synchronized addresses from the destination device | ||
460 | * @to: destination device | ||
461 | * @from: source device | ||
462 | * | ||
463 | * Remove all addresses that were added to the destination device by | ||
464 | * dev_uc_sync(). This function is intended to be called from the | ||
465 | * dev->stop function of layered software devices. | ||
466 | */ | ||
467 | void dev_uc_unsync(struct net_device *to, struct net_device *from) | ||
468 | { | ||
469 | if (to->addr_len != from->addr_len) | ||
470 | return; | ||
471 | |||
472 | netif_addr_lock_bh(from); | ||
473 | netif_addr_lock(to); | ||
474 | __hw_addr_unsync(&to->uc, &from->uc, to->addr_len); | ||
475 | __dev_set_rx_mode(to); | ||
476 | netif_addr_unlock(to); | ||
477 | netif_addr_unlock_bh(from); | ||
478 | } | ||
479 | EXPORT_SYMBOL(dev_uc_unsync); | ||
480 | |||
481 | /** | ||
482 | * dev_uc_flush - Flush unicast addresses | ||
483 | * @dev: device | ||
484 | * | ||
485 | * Flush unicast addresses. | ||
486 | */ | ||
487 | void dev_uc_flush(struct net_device *dev) | ||
488 | { | ||
489 | netif_addr_lock_bh(dev); | ||
490 | __hw_addr_flush(&dev->uc); | ||
491 | netif_addr_unlock_bh(dev); | ||
492 | } | ||
493 | EXPORT_SYMBOL(dev_uc_flush); | ||
494 | |||
495 | /** | ||
496 | * dev_uc_flush - Init unicast address list | ||
497 | * @dev: device | ||
498 | * | ||
499 | * Init unicast address list. | ||
500 | */ | ||
501 | void dev_uc_init(struct net_device *dev) | ||
502 | { | ||
503 | __hw_addr_init(&dev->uc); | ||
504 | } | ||
505 | EXPORT_SYMBOL(dev_uc_init); | ||
506 | |||
507 | /* | ||
508 | * Multicast list handling functions | ||
509 | */ | ||
510 | |||
511 | static int __dev_mc_add(struct net_device *dev, unsigned char *addr, | ||
512 | bool global) | ||
513 | { | ||
514 | int err; | ||
515 | |||
516 | netif_addr_lock_bh(dev); | ||
517 | err = __hw_addr_add_ex(&dev->mc, addr, dev->addr_len, | ||
518 | NETDEV_HW_ADDR_T_MULTICAST, global); | ||
519 | if (!err) | ||
520 | __dev_set_rx_mode(dev); | ||
521 | netif_addr_unlock_bh(dev); | ||
522 | return err; | ||
523 | } | ||
524 | /** | ||
525 | * dev_mc_add - Add a multicast address | ||
526 | * @dev: device | ||
527 | * @addr: address to add | ||
528 | * | ||
529 | * Add a multicast address to the device or increase | ||
530 | * the reference count if it already exists. | ||
531 | */ | ||
532 | int dev_mc_add(struct net_device *dev, unsigned char *addr) | ||
533 | { | ||
534 | return __dev_mc_add(dev, addr, false); | ||
535 | } | ||
536 | EXPORT_SYMBOL(dev_mc_add); | ||
537 | |||
538 | /** | ||
539 | * dev_mc_add_global - Add a global multicast address | ||
540 | * @dev: device | ||
541 | * @addr: address to add | ||
542 | * | ||
543 | * Add a global multicast address to the device. | ||
544 | */ | ||
545 | int dev_mc_add_global(struct net_device *dev, unsigned char *addr) | ||
546 | { | ||
547 | return __dev_mc_add(dev, addr, true); | ||
548 | } | ||
549 | EXPORT_SYMBOL(dev_mc_add_global); | ||
550 | |||
551 | static int __dev_mc_del(struct net_device *dev, unsigned char *addr, | ||
552 | bool global) | ||
553 | { | ||
554 | int err; | ||
555 | |||
556 | netif_addr_lock_bh(dev); | ||
557 | err = __hw_addr_del_ex(&dev->mc, addr, dev->addr_len, | ||
558 | NETDEV_HW_ADDR_T_MULTICAST, global); | ||
559 | if (!err) | ||
560 | __dev_set_rx_mode(dev); | ||
561 | netif_addr_unlock_bh(dev); | ||
562 | return err; | ||
563 | } | ||
564 | |||
565 | /** | ||
566 | * dev_mc_del - Delete a multicast address. | ||
567 | * @dev: device | ||
568 | * @addr: address to delete | ||
569 | * | ||
570 | * Release reference to a multicast address and remove it | ||
571 | * from the device if the reference count drops to zero. | ||
572 | */ | ||
573 | int dev_mc_del(struct net_device *dev, unsigned char *addr) | ||
574 | { | ||
575 | return __dev_mc_del(dev, addr, false); | ||
576 | } | ||
577 | EXPORT_SYMBOL(dev_mc_del); | ||
578 | |||
579 | /** | ||
580 | * dev_mc_del_global - Delete a global multicast address. | ||
581 | * @dev: device | ||
582 | * @addr: address to delete | ||
583 | * | ||
584 | * Release reference to a multicast address and remove it | ||
585 | * from the device if the reference count drops to zero. | ||
586 | */ | ||
587 | int dev_mc_del_global(struct net_device *dev, unsigned char *addr) | ||
588 | { | ||
589 | return __dev_mc_del(dev, addr, true); | ||
590 | } | ||
591 | EXPORT_SYMBOL(dev_mc_del_global); | ||
592 | |||
593 | /** | ||
594 | * dev_mc_sync - Synchronize device's unicast list to another device | ||
595 | * @to: destination device | ||
596 | * @from: source device | ||
597 | * | ||
598 | * Add newly added addresses to the destination device and release | ||
599 | * addresses that have no users left. The source device must be | ||
600 | * locked by netif_tx_lock_bh. | ||
601 | * | ||
602 | * This function is intended to be called from the dev->set_multicast_list | ||
603 | * or dev->set_rx_mode function of layered software devices. | ||
604 | */ | ||
605 | int dev_mc_sync(struct net_device *to, struct net_device *from) | ||
606 | { | ||
607 | int err = 0; | ||
608 | |||
609 | if (to->addr_len != from->addr_len) | ||
610 | return -EINVAL; | ||
611 | |||
612 | netif_addr_lock_bh(to); | ||
613 | err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len); | ||
614 | if (!err) | ||
615 | __dev_set_rx_mode(to); | ||
616 | netif_addr_unlock_bh(to); | ||
617 | return err; | ||
618 | } | ||
619 | EXPORT_SYMBOL(dev_mc_sync); | ||
620 | |||
621 | /** | ||
622 | * dev_mc_unsync - Remove synchronized addresses from the destination device | ||
623 | * @to: destination device | ||
624 | * @from: source device | ||
625 | * | ||
626 | * Remove all addresses that were added to the destination device by | ||
627 | * dev_mc_sync(). This function is intended to be called from the | ||
628 | * dev->stop function of layered software devices. | ||
629 | */ | ||
630 | void dev_mc_unsync(struct net_device *to, struct net_device *from) | ||
631 | { | ||
632 | if (to->addr_len != from->addr_len) | ||
633 | return; | ||
634 | |||
635 | netif_addr_lock_bh(from); | ||
636 | netif_addr_lock(to); | ||
637 | __hw_addr_unsync(&to->mc, &from->mc, to->addr_len); | ||
638 | __dev_set_rx_mode(to); | ||
639 | netif_addr_unlock(to); | ||
640 | netif_addr_unlock_bh(from); | ||
641 | } | ||
642 | EXPORT_SYMBOL(dev_mc_unsync); | ||
643 | |||
644 | /** | ||
645 | * dev_mc_flush - Flush multicast addresses | ||
646 | * @dev: device | ||
647 | * | ||
648 | * Flush multicast addresses. | ||
649 | */ | ||
650 | void dev_mc_flush(struct net_device *dev) | ||
651 | { | ||
652 | netif_addr_lock_bh(dev); | ||
653 | __hw_addr_flush(&dev->mc); | ||
654 | netif_addr_unlock_bh(dev); | ||
655 | } | ||
656 | EXPORT_SYMBOL(dev_mc_flush); | ||
657 | |||
658 | /** | ||
659 | * dev_mc_flush - Init multicast address list | ||
660 | * @dev: device | ||
661 | * | ||
662 | * Init multicast address list. | ||
663 | */ | ||
664 | void dev_mc_init(struct net_device *dev) | ||
665 | { | ||
666 | __hw_addr_init(&dev->mc); | ||
667 | } | ||
668 | EXPORT_SYMBOL(dev_mc_init); | ||
669 | |||
670 | #ifdef CONFIG_PROC_FS | ||
671 | #include <linux/seq_file.h> | ||
672 | |||
673 | static int dev_mc_seq_show(struct seq_file *seq, void *v) | ||
674 | { | ||
675 | struct netdev_hw_addr *ha; | ||
676 | struct net_device *dev = v; | ||
677 | |||
678 | if (v == SEQ_START_TOKEN) | ||
679 | return 0; | ||
680 | |||
681 | netif_addr_lock_bh(dev); | ||
682 | netdev_for_each_mc_addr(ha, dev) { | ||
683 | int i; | ||
684 | |||
685 | seq_printf(seq, "%-4d %-15s %-5d %-5d ", dev->ifindex, | ||
686 | dev->name, ha->refcount, ha->global_use); | ||
687 | |||
688 | for (i = 0; i < dev->addr_len; i++) | ||
689 | seq_printf(seq, "%02x", ha->addr[i]); | ||
690 | |||
691 | seq_putc(seq, '\n'); | ||
692 | } | ||
693 | netif_addr_unlock_bh(dev); | ||
694 | return 0; | ||
695 | } | ||
696 | |||
697 | static const struct seq_operations dev_mc_seq_ops = { | ||
698 | .start = dev_seq_start, | ||
699 | .next = dev_seq_next, | ||
700 | .stop = dev_seq_stop, | ||
701 | .show = dev_mc_seq_show, | ||
702 | }; | ||
703 | |||
704 | static int dev_mc_seq_open(struct inode *inode, struct file *file) | ||
705 | { | ||
706 | return seq_open_net(inode, file, &dev_mc_seq_ops, | ||
707 | sizeof(struct seq_net_private)); | ||
708 | } | ||
709 | |||
710 | static const struct file_operations dev_mc_seq_fops = { | ||
711 | .owner = THIS_MODULE, | ||
712 | .open = dev_mc_seq_open, | ||
713 | .read = seq_read, | ||
714 | .llseek = seq_lseek, | ||
715 | .release = seq_release_net, | ||
716 | }; | ||
717 | |||
718 | #endif | ||
719 | |||
720 | static int __net_init dev_mc_net_init(struct net *net) | ||
721 | { | ||
722 | if (!proc_net_fops_create(net, "dev_mcast", 0, &dev_mc_seq_fops)) | ||
723 | return -ENOMEM; | ||
724 | return 0; | ||
725 | } | ||
726 | |||
727 | static void __net_exit dev_mc_net_exit(struct net *net) | ||
728 | { | ||
729 | proc_net_remove(net, "dev_mcast"); | ||
730 | } | ||
731 | |||
732 | static struct pernet_operations __net_initdata dev_mc_net_ops = { | ||
733 | .init = dev_mc_net_init, | ||
734 | .exit = dev_mc_net_exit, | ||
735 | }; | ||
736 | |||
737 | void __init dev_mcast_init(void) | ||
738 | { | ||
739 | register_pernet_subsys(&dev_mc_net_ops); | ||
740 | } | ||
741 | |||
diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c deleted file mode 100644 index 3dc295beb483..000000000000 --- a/net/core/dev_mcast.c +++ /dev/null | |||
@@ -1,232 +0,0 @@ | |||
1 | /* | ||
2 | * Linux NET3: Multicast List maintenance. | ||
3 | * | ||
4 | * Authors: | ||
5 | * Tim Kordas <tjk@nostromo.eeap.cwru.edu> | ||
6 | * Richard Underwood <richard@wuzz.demon.co.uk> | ||
7 | * | ||
8 | * Stir fried together from the IP multicast and CAP patches above | ||
9 | * Alan Cox <alan@lxorguk.ukuu.org.uk> | ||
10 | * | ||
11 | * Fixes: | ||
12 | * Alan Cox : Update the device on a real delete | ||
13 | * rather than any time but... | ||
14 | * Alan Cox : IFF_ALLMULTI support. | ||
15 | * Alan Cox : New format set_multicast_list() calls. | ||
16 | * Gleb Natapov : Remove dev_mc_lock. | ||
17 | * | ||
18 | * This program is free software; you can redistribute it and/or | ||
19 | * modify it under the terms of the GNU General Public License | ||
20 | * as published by the Free Software Foundation; either version | ||
21 | * 2 of the License, or (at your option) any later version. | ||
22 | */ | ||
23 | |||
24 | #include <linux/module.h> | ||
25 | #include <asm/uaccess.h> | ||
26 | #include <asm/system.h> | ||
27 | #include <linux/bitops.h> | ||
28 | #include <linux/types.h> | ||
29 | #include <linux/kernel.h> | ||
30 | #include <linux/string.h> | ||
31 | #include <linux/mm.h> | ||
32 | #include <linux/socket.h> | ||
33 | #include <linux/sockios.h> | ||
34 | #include <linux/in.h> | ||
35 | #include <linux/errno.h> | ||
36 | #include <linux/interrupt.h> | ||
37 | #include <linux/if_ether.h> | ||
38 | #include <linux/inet.h> | ||
39 | #include <linux/netdevice.h> | ||
40 | #include <linux/etherdevice.h> | ||
41 | #include <linux/proc_fs.h> | ||
42 | #include <linux/seq_file.h> | ||
43 | #include <linux/init.h> | ||
44 | #include <net/net_namespace.h> | ||
45 | #include <net/ip.h> | ||
46 | #include <net/route.h> | ||
47 | #include <linux/skbuff.h> | ||
48 | #include <net/sock.h> | ||
49 | #include <net/arp.h> | ||
50 | |||
51 | |||
52 | /* | ||
53 | * Device multicast list maintenance. | ||
54 | * | ||
55 | * This is used both by IP and by the user level maintenance functions. | ||
56 | * Unlike BSD we maintain a usage count on a given multicast address so | ||
57 | * that a casual user application can add/delete multicasts used by | ||
58 | * protocols without doing damage to the protocols when it deletes the | ||
59 | * entries. It also helps IP as it tracks overlapping maps. | ||
60 | * | ||
61 | * Device mc lists are changed by bh at least if IPv6 is enabled, | ||
62 | * so that it must be bh protected. | ||
63 | * | ||
64 | * We block accesses to device mc filters with netif_tx_lock. | ||
65 | */ | ||
66 | |||
67 | /* | ||
68 | * Delete a device level multicast | ||
69 | */ | ||
70 | |||
71 | int dev_mc_delete(struct net_device *dev, void *addr, int alen, int glbl) | ||
72 | { | ||
73 | int err; | ||
74 | |||
75 | netif_addr_lock_bh(dev); | ||
76 | err = __dev_addr_delete(&dev->mc_list, &dev->mc_count, | ||
77 | addr, alen, glbl); | ||
78 | if (!err) { | ||
79 | /* | ||
80 | * We have altered the list, so the card | ||
81 | * loaded filter is now wrong. Fix it | ||
82 | */ | ||
83 | |||
84 | __dev_set_rx_mode(dev); | ||
85 | } | ||
86 | netif_addr_unlock_bh(dev); | ||
87 | return err; | ||
88 | } | ||
89 | |||
90 | /* | ||
91 | * Add a device level multicast | ||
92 | */ | ||
93 | |||
94 | int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl) | ||
95 | { | ||
96 | int err; | ||
97 | |||
98 | netif_addr_lock_bh(dev); | ||
99 | if (alen != dev->addr_len) | ||
100 | err = -EINVAL; | ||
101 | else | ||
102 | err = __dev_addr_add(&dev->mc_list, &dev->mc_count, addr, alen, glbl); | ||
103 | if (!err) | ||
104 | __dev_set_rx_mode(dev); | ||
105 | netif_addr_unlock_bh(dev); | ||
106 | return err; | ||
107 | } | ||
108 | |||
109 | /** | ||
110 | * dev_mc_sync - Synchronize device's multicast list to another device | ||
111 | * @to: destination device | ||
112 | * @from: source device | ||
113 | * | ||
114 | * Add newly added addresses to the destination device and release | ||
115 | * addresses that have no users left. The source device must be | ||
116 | * locked by netif_tx_lock_bh. | ||
117 | * | ||
118 | * This function is intended to be called from the dev->set_multicast_list | ||
119 | * or dev->set_rx_mode function of layered software devices. | ||
120 | */ | ||
121 | int dev_mc_sync(struct net_device *to, struct net_device *from) | ||
122 | { | ||
123 | int err = 0; | ||
124 | |||
125 | netif_addr_lock_bh(to); | ||
126 | err = __dev_addr_sync(&to->mc_list, &to->mc_count, | ||
127 | &from->mc_list, &from->mc_count); | ||
128 | if (!err) | ||
129 | __dev_set_rx_mode(to); | ||
130 | netif_addr_unlock_bh(to); | ||
131 | |||
132 | return err; | ||
133 | } | ||
134 | EXPORT_SYMBOL(dev_mc_sync); | ||
135 | |||
136 | |||
137 | /** | ||
138 | * dev_mc_unsync - Remove synchronized addresses from the destination | ||
139 | * device | ||
140 | * @to: destination device | ||
141 | * @from: source device | ||
142 | * | ||
143 | * Remove all addresses that were added to the destination device by | ||
144 | * dev_mc_sync(). This function is intended to be called from the | ||
145 | * dev->stop function of layered software devices. | ||
146 | */ | ||
147 | void dev_mc_unsync(struct net_device *to, struct net_device *from) | ||
148 | { | ||
149 | netif_addr_lock_bh(from); | ||
150 | netif_addr_lock(to); | ||
151 | |||
152 | __dev_addr_unsync(&to->mc_list, &to->mc_count, | ||
153 | &from->mc_list, &from->mc_count); | ||
154 | __dev_set_rx_mode(to); | ||
155 | |||
156 | netif_addr_unlock(to); | ||
157 | netif_addr_unlock_bh(from); | ||
158 | } | ||
159 | EXPORT_SYMBOL(dev_mc_unsync); | ||
160 | |||
161 | #ifdef CONFIG_PROC_FS | ||
162 | static int dev_mc_seq_show(struct seq_file *seq, void *v) | ||
163 | { | ||
164 | struct dev_addr_list *m; | ||
165 | struct net_device *dev = v; | ||
166 | |||
167 | if (v == SEQ_START_TOKEN) | ||
168 | return 0; | ||
169 | |||
170 | netif_addr_lock_bh(dev); | ||
171 | for (m = dev->mc_list; m; m = m->next) { | ||
172 | int i; | ||
173 | |||
174 | seq_printf(seq, "%-4d %-15s %-5d %-5d ", dev->ifindex, | ||
175 | dev->name, m->dmi_users, m->dmi_gusers); | ||
176 | |||
177 | for (i = 0; i < m->dmi_addrlen; i++) | ||
178 | seq_printf(seq, "%02x", m->dmi_addr[i]); | ||
179 | |||
180 | seq_putc(seq, '\n'); | ||
181 | } | ||
182 | netif_addr_unlock_bh(dev); | ||
183 | return 0; | ||
184 | } | ||
185 | |||
186 | static const struct seq_operations dev_mc_seq_ops = { | ||
187 | .start = dev_seq_start, | ||
188 | .next = dev_seq_next, | ||
189 | .stop = dev_seq_stop, | ||
190 | .show = dev_mc_seq_show, | ||
191 | }; | ||
192 | |||
193 | static int dev_mc_seq_open(struct inode *inode, struct file *file) | ||
194 | { | ||
195 | return seq_open_net(inode, file, &dev_mc_seq_ops, | ||
196 | sizeof(struct seq_net_private)); | ||
197 | } | ||
198 | |||
199 | static const struct file_operations dev_mc_seq_fops = { | ||
200 | .owner = THIS_MODULE, | ||
201 | .open = dev_mc_seq_open, | ||
202 | .read = seq_read, | ||
203 | .llseek = seq_lseek, | ||
204 | .release = seq_release_net, | ||
205 | }; | ||
206 | |||
207 | #endif | ||
208 | |||
209 | static int __net_init dev_mc_net_init(struct net *net) | ||
210 | { | ||
211 | if (!proc_net_fops_create(net, "dev_mcast", 0, &dev_mc_seq_fops)) | ||
212 | return -ENOMEM; | ||
213 | return 0; | ||
214 | } | ||
215 | |||
216 | static void __net_exit dev_mc_net_exit(struct net *net) | ||
217 | { | ||
218 | proc_net_remove(net, "dev_mcast"); | ||
219 | } | ||
220 | |||
221 | static struct pernet_operations __net_initdata dev_mc_net_ops = { | ||
222 | .init = dev_mc_net_init, | ||
223 | .exit = dev_mc_net_exit, | ||
224 | }; | ||
225 | |||
226 | void __init dev_mcast_init(void) | ||
227 | { | ||
228 | register_pernet_subsys(&dev_mc_net_ops); | ||
229 | } | ||
230 | |||
231 | EXPORT_SYMBOL(dev_mc_add); | ||
232 | EXPORT_SYMBOL(dev_mc_delete); | ||
diff --git a/net/core/dst.c b/net/core/dst.c index f307bc18f6a0..b8c22f0f9373 100644 --- a/net/core/dst.c +++ b/net/core/dst.c | |||
@@ -44,7 +44,7 @@ static atomic_t dst_total = ATOMIC_INIT(0); | |||
44 | */ | 44 | */ |
45 | static struct { | 45 | static struct { |
46 | spinlock_t lock; | 46 | spinlock_t lock; |
47 | struct dst_entry *list; | 47 | struct dst_entry *list; |
48 | unsigned long timer_inc; | 48 | unsigned long timer_inc; |
49 | unsigned long timer_expires; | 49 | unsigned long timer_expires; |
50 | } dst_garbage = { | 50 | } dst_garbage = { |
@@ -52,7 +52,7 @@ static struct { | |||
52 | .timer_inc = DST_GC_MAX, | 52 | .timer_inc = DST_GC_MAX, |
53 | }; | 53 | }; |
54 | static void dst_gc_task(struct work_struct *work); | 54 | static void dst_gc_task(struct work_struct *work); |
55 | static void ___dst_free(struct dst_entry * dst); | 55 | static void ___dst_free(struct dst_entry *dst); |
56 | 56 | ||
57 | static DECLARE_DELAYED_WORK(dst_gc_work, dst_gc_task); | 57 | static DECLARE_DELAYED_WORK(dst_gc_work, dst_gc_task); |
58 | 58 | ||
@@ -136,8 +136,8 @@ loop: | |||
136 | } | 136 | } |
137 | expires = dst_garbage.timer_expires; | 137 | expires = dst_garbage.timer_expires; |
138 | /* | 138 | /* |
139 | * if the next desired timer is more than 4 seconds in the future | 139 | * if the next desired timer is more than 4 seconds in the |
140 | * then round the timer to whole seconds | 140 | * future then round the timer to whole seconds |
141 | */ | 141 | */ |
142 | if (expires > 4*HZ) | 142 | if (expires > 4*HZ) |
143 | expires = round_jiffies_relative(expires); | 143 | expires = round_jiffies_relative(expires); |
@@ -152,7 +152,8 @@ loop: | |||
152 | " expires: %lu elapsed: %lu us\n", | 152 | " expires: %lu elapsed: %lu us\n", |
153 | atomic_read(&dst_total), delayed, work_performed, | 153 | atomic_read(&dst_total), delayed, work_performed, |
154 | expires, | 154 | expires, |
155 | elapsed.tv_sec * USEC_PER_SEC + elapsed.tv_nsec / NSEC_PER_USEC); | 155 | elapsed.tv_sec * USEC_PER_SEC + |
156 | elapsed.tv_nsec / NSEC_PER_USEC); | ||
156 | #endif | 157 | #endif |
157 | } | 158 | } |
158 | 159 | ||
@@ -163,9 +164,9 @@ int dst_discard(struct sk_buff *skb) | |||
163 | } | 164 | } |
164 | EXPORT_SYMBOL(dst_discard); | 165 | EXPORT_SYMBOL(dst_discard); |
165 | 166 | ||
166 | void * dst_alloc(struct dst_ops * ops) | 167 | void *dst_alloc(struct dst_ops *ops) |
167 | { | 168 | { |
168 | struct dst_entry * dst; | 169 | struct dst_entry *dst; |
169 | 170 | ||
170 | if (ops->gc && atomic_read(&ops->entries) > ops->gc_thresh) { | 171 | if (ops->gc && atomic_read(&ops->entries) > ops->gc_thresh) { |
171 | if (ops->gc(ops)) | 172 | if (ops->gc(ops)) |
@@ -185,19 +186,20 @@ void * dst_alloc(struct dst_ops * ops) | |||
185 | atomic_inc(&ops->entries); | 186 | atomic_inc(&ops->entries); |
186 | return dst; | 187 | return dst; |
187 | } | 188 | } |
189 | EXPORT_SYMBOL(dst_alloc); | ||
188 | 190 | ||
189 | static void ___dst_free(struct dst_entry * dst) | 191 | static void ___dst_free(struct dst_entry *dst) |
190 | { | 192 | { |
191 | /* The first case (dev==NULL) is required, when | 193 | /* The first case (dev==NULL) is required, when |
192 | protocol module is unloaded. | 194 | protocol module is unloaded. |
193 | */ | 195 | */ |
194 | if (dst->dev == NULL || !(dst->dev->flags&IFF_UP)) { | 196 | if (dst->dev == NULL || !(dst->dev->flags&IFF_UP)) |
195 | dst->input = dst->output = dst_discard; | 197 | dst->input = dst->output = dst_discard; |
196 | } | ||
197 | dst->obsolete = 2; | 198 | dst->obsolete = 2; |
198 | } | 199 | } |
200 | EXPORT_SYMBOL(__dst_free); | ||
199 | 201 | ||
200 | void __dst_free(struct dst_entry * dst) | 202 | void __dst_free(struct dst_entry *dst) |
201 | { | 203 | { |
202 | spin_lock_bh(&dst_garbage.lock); | 204 | spin_lock_bh(&dst_garbage.lock); |
203 | ___dst_free(dst); | 205 | ___dst_free(dst); |
@@ -262,15 +264,16 @@ again: | |||
262 | } | 264 | } |
263 | return NULL; | 265 | return NULL; |
264 | } | 266 | } |
267 | EXPORT_SYMBOL(dst_destroy); | ||
265 | 268 | ||
266 | void dst_release(struct dst_entry *dst) | 269 | void dst_release(struct dst_entry *dst) |
267 | { | 270 | { |
268 | if (dst) { | 271 | if (dst) { |
269 | int newrefcnt; | 272 | int newrefcnt; |
270 | 273 | ||
271 | smp_mb__before_atomic_dec(); | 274 | smp_mb__before_atomic_dec(); |
272 | newrefcnt = atomic_dec_return(&dst->__refcnt); | 275 | newrefcnt = atomic_dec_return(&dst->__refcnt); |
273 | WARN_ON(newrefcnt < 0); | 276 | WARN_ON(newrefcnt < 0); |
274 | } | 277 | } |
275 | } | 278 | } |
276 | EXPORT_SYMBOL(dst_release); | 279 | EXPORT_SYMBOL(dst_release); |
@@ -306,7 +309,8 @@ static inline void dst_ifdown(struct dst_entry *dst, struct net_device *dev, | |||
306 | } | 309 | } |
307 | } | 310 | } |
308 | 311 | ||
309 | static int dst_dev_event(struct notifier_block *this, unsigned long event, void *ptr) | 312 | static int dst_dev_event(struct notifier_block *this, unsigned long event, |
313 | void *ptr) | ||
310 | { | 314 | { |
311 | struct net_device *dev = ptr; | 315 | struct net_device *dev = ptr; |
312 | struct dst_entry *dst, *last = NULL; | 316 | struct dst_entry *dst, *last = NULL; |
@@ -329,9 +333,8 @@ static int dst_dev_event(struct notifier_block *this, unsigned long event, void | |||
329 | last->next = dst; | 333 | last->next = dst; |
330 | else | 334 | else |
331 | dst_busy_list = dst; | 335 | dst_busy_list = dst; |
332 | for (; dst; dst = dst->next) { | 336 | for (; dst; dst = dst->next) |
333 | dst_ifdown(dst, dev, event != NETDEV_DOWN); | 337 | dst_ifdown(dst, dev, event != NETDEV_DOWN); |
334 | } | ||
335 | mutex_unlock(&dst_gc_mutex); | 338 | mutex_unlock(&dst_gc_mutex); |
336 | break; | 339 | break; |
337 | } | 340 | } |
@@ -346,7 +349,3 @@ void __init dst_init(void) | |||
346 | { | 349 | { |
347 | register_netdevice_notifier(&dst_dev_notifier); | 350 | register_netdevice_notifier(&dst_dev_notifier); |
348 | } | 351 | } |
349 | |||
350 | EXPORT_SYMBOL(__dst_free); | ||
351 | EXPORT_SYMBOL(dst_alloc); | ||
352 | EXPORT_SYMBOL(dst_destroy); | ||
diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 9d55c57f318a..1a7db92037fa 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c | |||
@@ -18,8 +18,8 @@ | |||
18 | #include <linux/ethtool.h> | 18 | #include <linux/ethtool.h> |
19 | #include <linux/netdevice.h> | 19 | #include <linux/netdevice.h> |
20 | #include <linux/bitops.h> | 20 | #include <linux/bitops.h> |
21 | #include <linux/uaccess.h> | ||
21 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
22 | #include <asm/uaccess.h> | ||
23 | 23 | ||
24 | /* | 24 | /* |
25 | * Some useful ethtool_ops methods that're device independent. | 25 | * Some useful ethtool_ops methods that're device independent. |
@@ -31,6 +31,7 @@ u32 ethtool_op_get_link(struct net_device *dev) | |||
31 | { | 31 | { |
32 | return netif_carrier_ok(dev) ? 1 : 0; | 32 | return netif_carrier_ok(dev) ? 1 : 0; |
33 | } | 33 | } |
34 | EXPORT_SYMBOL(ethtool_op_get_link); | ||
34 | 35 | ||
35 | u32 ethtool_op_get_rx_csum(struct net_device *dev) | 36 | u32 ethtool_op_get_rx_csum(struct net_device *dev) |
36 | { | 37 | { |
@@ -63,6 +64,7 @@ int ethtool_op_set_tx_hw_csum(struct net_device *dev, u32 data) | |||
63 | 64 | ||
64 | return 0; | 65 | return 0; |
65 | } | 66 | } |
67 | EXPORT_SYMBOL(ethtool_op_set_tx_hw_csum); | ||
66 | 68 | ||
67 | int ethtool_op_set_tx_ipv6_csum(struct net_device *dev, u32 data) | 69 | int ethtool_op_set_tx_ipv6_csum(struct net_device *dev, u32 data) |
68 | { | 70 | { |
@@ -73,11 +75,13 @@ int ethtool_op_set_tx_ipv6_csum(struct net_device *dev, u32 data) | |||
73 | 75 | ||
74 | return 0; | 76 | return 0; |
75 | } | 77 | } |
78 | EXPORT_SYMBOL(ethtool_op_set_tx_ipv6_csum); | ||
76 | 79 | ||
77 | u32 ethtool_op_get_sg(struct net_device *dev) | 80 | u32 ethtool_op_get_sg(struct net_device *dev) |
78 | { | 81 | { |
79 | return (dev->features & NETIF_F_SG) != 0; | 82 | return (dev->features & NETIF_F_SG) != 0; |
80 | } | 83 | } |
84 | EXPORT_SYMBOL(ethtool_op_get_sg); | ||
81 | 85 | ||
82 | int ethtool_op_set_sg(struct net_device *dev, u32 data) | 86 | int ethtool_op_set_sg(struct net_device *dev, u32 data) |
83 | { | 87 | { |
@@ -88,11 +92,13 @@ int ethtool_op_set_sg(struct net_device *dev, u32 data) | |||
88 | 92 | ||
89 | return 0; | 93 | return 0; |
90 | } | 94 | } |
95 | EXPORT_SYMBOL(ethtool_op_set_sg); | ||
91 | 96 | ||
92 | u32 ethtool_op_get_tso(struct net_device *dev) | 97 | u32 ethtool_op_get_tso(struct net_device *dev) |
93 | { | 98 | { |
94 | return (dev->features & NETIF_F_TSO) != 0; | 99 | return (dev->features & NETIF_F_TSO) != 0; |
95 | } | 100 | } |
101 | EXPORT_SYMBOL(ethtool_op_get_tso); | ||
96 | 102 | ||
97 | int ethtool_op_set_tso(struct net_device *dev, u32 data) | 103 | int ethtool_op_set_tso(struct net_device *dev, u32 data) |
98 | { | 104 | { |
@@ -103,11 +109,13 @@ int ethtool_op_set_tso(struct net_device *dev, u32 data) | |||
103 | 109 | ||
104 | return 0; | 110 | return 0; |
105 | } | 111 | } |
112 | EXPORT_SYMBOL(ethtool_op_set_tso); | ||
106 | 113 | ||
107 | u32 ethtool_op_get_ufo(struct net_device *dev) | 114 | u32 ethtool_op_get_ufo(struct net_device *dev) |
108 | { | 115 | { |
109 | return (dev->features & NETIF_F_UFO) != 0; | 116 | return (dev->features & NETIF_F_UFO) != 0; |
110 | } | 117 | } |
118 | EXPORT_SYMBOL(ethtool_op_get_ufo); | ||
111 | 119 | ||
112 | int ethtool_op_set_ufo(struct net_device *dev, u32 data) | 120 | int ethtool_op_set_ufo(struct net_device *dev, u32 data) |
113 | { | 121 | { |
@@ -117,12 +125,13 @@ int ethtool_op_set_ufo(struct net_device *dev, u32 data) | |||
117 | dev->features &= ~NETIF_F_UFO; | 125 | dev->features &= ~NETIF_F_UFO; |
118 | return 0; | 126 | return 0; |
119 | } | 127 | } |
128 | EXPORT_SYMBOL(ethtool_op_set_ufo); | ||
120 | 129 | ||
121 | /* the following list of flags are the same as their associated | 130 | /* the following list of flags are the same as their associated |
122 | * NETIF_F_xxx values in include/linux/netdevice.h | 131 | * NETIF_F_xxx values in include/linux/netdevice.h |
123 | */ | 132 | */ |
124 | static const u32 flags_dup_features = | 133 | static const u32 flags_dup_features = |
125 | (ETH_FLAG_LRO | ETH_FLAG_NTUPLE); | 134 | (ETH_FLAG_LRO | ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH); |
126 | 135 | ||
127 | u32 ethtool_op_get_flags(struct net_device *dev) | 136 | u32 ethtool_op_get_flags(struct net_device *dev) |
128 | { | 137 | { |
@@ -133,6 +142,7 @@ u32 ethtool_op_get_flags(struct net_device *dev) | |||
133 | 142 | ||
134 | return dev->features & flags_dup_features; | 143 | return dev->features & flags_dup_features; |
135 | } | 144 | } |
145 | EXPORT_SYMBOL(ethtool_op_get_flags); | ||
136 | 146 | ||
137 | int ethtool_op_set_flags(struct net_device *dev, u32 data) | 147 | int ethtool_op_set_flags(struct net_device *dev, u32 data) |
138 | { | 148 | { |
@@ -153,9 +163,15 @@ int ethtool_op_set_flags(struct net_device *dev, u32 data) | |||
153 | features &= ~NETIF_F_NTUPLE; | 163 | features &= ~NETIF_F_NTUPLE; |
154 | } | 164 | } |
155 | 165 | ||
166 | if (data & ETH_FLAG_RXHASH) | ||
167 | features |= NETIF_F_RXHASH; | ||
168 | else | ||
169 | features &= ~NETIF_F_RXHASH; | ||
170 | |||
156 | dev->features = features; | 171 | dev->features = features; |
157 | return 0; | 172 | return 0; |
158 | } | 173 | } |
174 | EXPORT_SYMBOL(ethtool_op_set_flags); | ||
159 | 175 | ||
160 | void ethtool_ntuple_flush(struct net_device *dev) | 176 | void ethtool_ntuple_flush(struct net_device *dev) |
161 | { | 177 | { |
@@ -201,7 +217,8 @@ static int ethtool_set_settings(struct net_device *dev, void __user *useraddr) | |||
201 | return dev->ethtool_ops->set_settings(dev, &cmd); | 217 | return dev->ethtool_ops->set_settings(dev, &cmd); |
202 | } | 218 | } |
203 | 219 | ||
204 | static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr) | 220 | static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev, |
221 | void __user *useraddr) | ||
205 | { | 222 | { |
206 | struct ethtool_drvinfo info; | 223 | struct ethtool_drvinfo info; |
207 | const struct ethtool_ops *ops = dev->ethtool_ops; | 224 | const struct ethtool_ops *ops = dev->ethtool_ops; |
@@ -241,7 +258,7 @@ static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev, void _ | |||
241 | } | 258 | } |
242 | 259 | ||
243 | static noinline_for_stack int ethtool_get_sset_info(struct net_device *dev, | 260 | static noinline_for_stack int ethtool_get_sset_info(struct net_device *dev, |
244 | void __user *useraddr) | 261 | void __user *useraddr) |
245 | { | 262 | { |
246 | struct ethtool_sset_info info; | 263 | struct ethtool_sset_info info; |
247 | const struct ethtool_ops *ops = dev->ethtool_ops; | 264 | const struct ethtool_ops *ops = dev->ethtool_ops; |
@@ -300,7 +317,8 @@ out: | |||
300 | return ret; | 317 | return ret; |
301 | } | 318 | } |
302 | 319 | ||
303 | static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, void __user *useraddr) | 320 | static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, |
321 | void __user *useraddr) | ||
304 | { | 322 | { |
305 | struct ethtool_rxnfc cmd; | 323 | struct ethtool_rxnfc cmd; |
306 | 324 | ||
@@ -313,7 +331,8 @@ static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, void __u | |||
313 | return dev->ethtool_ops->set_rxnfc(dev, &cmd); | 331 | return dev->ethtool_ops->set_rxnfc(dev, &cmd); |
314 | } | 332 | } |
315 | 333 | ||
316 | static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev, void __user *useraddr) | 334 | static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev, |
335 | void __user *useraddr) | ||
317 | { | 336 | { |
318 | struct ethtool_rxnfc info; | 337 | struct ethtool_rxnfc info; |
319 | const struct ethtool_ops *ops = dev->ethtool_ops; | 338 | const struct ethtool_ops *ops = dev->ethtool_ops; |
@@ -358,8 +377,8 @@ err_out: | |||
358 | } | 377 | } |
359 | 378 | ||
360 | static void __rx_ntuple_filter_add(struct ethtool_rx_ntuple_list *list, | 379 | static void __rx_ntuple_filter_add(struct ethtool_rx_ntuple_list *list, |
361 | struct ethtool_rx_ntuple_flow_spec *spec, | 380 | struct ethtool_rx_ntuple_flow_spec *spec, |
362 | struct ethtool_rx_ntuple_flow_spec_container *fsc) | 381 | struct ethtool_rx_ntuple_flow_spec_container *fsc) |
363 | { | 382 | { |
364 | 383 | ||
365 | /* don't add filters forever */ | 384 | /* don't add filters forever */ |
@@ -385,7 +404,8 @@ static void __rx_ntuple_filter_add(struct ethtool_rx_ntuple_list *list, | |||
385 | list->count++; | 404 | list->count++; |
386 | } | 405 | } |
387 | 406 | ||
388 | static noinline_for_stack int ethtool_set_rx_ntuple(struct net_device *dev, void __user *useraddr) | 407 | static noinline_for_stack int ethtool_set_rx_ntuple(struct net_device *dev, |
408 | void __user *useraddr) | ||
389 | { | 409 | { |
390 | struct ethtool_rx_ntuple cmd; | 410 | struct ethtool_rx_ntuple cmd; |
391 | const struct ethtool_ops *ops = dev->ethtool_ops; | 411 | const struct ethtool_ops *ops = dev->ethtool_ops; |
@@ -510,125 +530,125 @@ static int ethtool_get_rx_ntuple(struct net_device *dev, void __user *useraddr) | |||
510 | case UDP_V4_FLOW: | 530 | case UDP_V4_FLOW: |
511 | case SCTP_V4_FLOW: | 531 | case SCTP_V4_FLOW: |
512 | sprintf(p, "\tSrc IP addr: 0x%x\n", | 532 | sprintf(p, "\tSrc IP addr: 0x%x\n", |
513 | fsc->fs.h_u.tcp_ip4_spec.ip4src); | 533 | fsc->fs.h_u.tcp_ip4_spec.ip4src); |
514 | p += ETH_GSTRING_LEN; | 534 | p += ETH_GSTRING_LEN; |
515 | num_strings++; | 535 | num_strings++; |
516 | sprintf(p, "\tSrc IP mask: 0x%x\n", | 536 | sprintf(p, "\tSrc IP mask: 0x%x\n", |
517 | fsc->fs.m_u.tcp_ip4_spec.ip4src); | 537 | fsc->fs.m_u.tcp_ip4_spec.ip4src); |
518 | p += ETH_GSTRING_LEN; | 538 | p += ETH_GSTRING_LEN; |
519 | num_strings++; | 539 | num_strings++; |
520 | sprintf(p, "\tDest IP addr: 0x%x\n", | 540 | sprintf(p, "\tDest IP addr: 0x%x\n", |
521 | fsc->fs.h_u.tcp_ip4_spec.ip4dst); | 541 | fsc->fs.h_u.tcp_ip4_spec.ip4dst); |
522 | p += ETH_GSTRING_LEN; | 542 | p += ETH_GSTRING_LEN; |
523 | num_strings++; | 543 | num_strings++; |
524 | sprintf(p, "\tDest IP mask: 0x%x\n", | 544 | sprintf(p, "\tDest IP mask: 0x%x\n", |
525 | fsc->fs.m_u.tcp_ip4_spec.ip4dst); | 545 | fsc->fs.m_u.tcp_ip4_spec.ip4dst); |
526 | p += ETH_GSTRING_LEN; | 546 | p += ETH_GSTRING_LEN; |
527 | num_strings++; | 547 | num_strings++; |
528 | sprintf(p, "\tSrc Port: %d, mask: 0x%x\n", | 548 | sprintf(p, "\tSrc Port: %d, mask: 0x%x\n", |
529 | fsc->fs.h_u.tcp_ip4_spec.psrc, | 549 | fsc->fs.h_u.tcp_ip4_spec.psrc, |
530 | fsc->fs.m_u.tcp_ip4_spec.psrc); | 550 | fsc->fs.m_u.tcp_ip4_spec.psrc); |
531 | p += ETH_GSTRING_LEN; | 551 | p += ETH_GSTRING_LEN; |
532 | num_strings++; | 552 | num_strings++; |
533 | sprintf(p, "\tDest Port: %d, mask: 0x%x\n", | 553 | sprintf(p, "\tDest Port: %d, mask: 0x%x\n", |
534 | fsc->fs.h_u.tcp_ip4_spec.pdst, | 554 | fsc->fs.h_u.tcp_ip4_spec.pdst, |
535 | fsc->fs.m_u.tcp_ip4_spec.pdst); | 555 | fsc->fs.m_u.tcp_ip4_spec.pdst); |
536 | p += ETH_GSTRING_LEN; | 556 | p += ETH_GSTRING_LEN; |
537 | num_strings++; | 557 | num_strings++; |
538 | sprintf(p, "\tTOS: %d, mask: 0x%x\n", | 558 | sprintf(p, "\tTOS: %d, mask: 0x%x\n", |
539 | fsc->fs.h_u.tcp_ip4_spec.tos, | 559 | fsc->fs.h_u.tcp_ip4_spec.tos, |
540 | fsc->fs.m_u.tcp_ip4_spec.tos); | 560 | fsc->fs.m_u.tcp_ip4_spec.tos); |
541 | p += ETH_GSTRING_LEN; | 561 | p += ETH_GSTRING_LEN; |
542 | num_strings++; | 562 | num_strings++; |
543 | break; | 563 | break; |
544 | case AH_ESP_V4_FLOW: | 564 | case AH_ESP_V4_FLOW: |
545 | case ESP_V4_FLOW: | 565 | case ESP_V4_FLOW: |
546 | sprintf(p, "\tSrc IP addr: 0x%x\n", | 566 | sprintf(p, "\tSrc IP addr: 0x%x\n", |
547 | fsc->fs.h_u.ah_ip4_spec.ip4src); | 567 | fsc->fs.h_u.ah_ip4_spec.ip4src); |
548 | p += ETH_GSTRING_LEN; | 568 | p += ETH_GSTRING_LEN; |
549 | num_strings++; | 569 | num_strings++; |
550 | sprintf(p, "\tSrc IP mask: 0x%x\n", | 570 | sprintf(p, "\tSrc IP mask: 0x%x\n", |
551 | fsc->fs.m_u.ah_ip4_spec.ip4src); | 571 | fsc->fs.m_u.ah_ip4_spec.ip4src); |
552 | p += ETH_GSTRING_LEN; | 572 | p += ETH_GSTRING_LEN; |
553 | num_strings++; | 573 | num_strings++; |
554 | sprintf(p, "\tDest IP addr: 0x%x\n", | 574 | sprintf(p, "\tDest IP addr: 0x%x\n", |
555 | fsc->fs.h_u.ah_ip4_spec.ip4dst); | 575 | fsc->fs.h_u.ah_ip4_spec.ip4dst); |
556 | p += ETH_GSTRING_LEN; | 576 | p += ETH_GSTRING_LEN; |
557 | num_strings++; | 577 | num_strings++; |
558 | sprintf(p, "\tDest IP mask: 0x%x\n", | 578 | sprintf(p, "\tDest IP mask: 0x%x\n", |
559 | fsc->fs.m_u.ah_ip4_spec.ip4dst); | 579 | fsc->fs.m_u.ah_ip4_spec.ip4dst); |
560 | p += ETH_GSTRING_LEN; | 580 | p += ETH_GSTRING_LEN; |
561 | num_strings++; | 581 | num_strings++; |
562 | sprintf(p, "\tSPI: %d, mask: 0x%x\n", | 582 | sprintf(p, "\tSPI: %d, mask: 0x%x\n", |
563 | fsc->fs.h_u.ah_ip4_spec.spi, | 583 | fsc->fs.h_u.ah_ip4_spec.spi, |
564 | fsc->fs.m_u.ah_ip4_spec.spi); | 584 | fsc->fs.m_u.ah_ip4_spec.spi); |
565 | p += ETH_GSTRING_LEN; | 585 | p += ETH_GSTRING_LEN; |
566 | num_strings++; | 586 | num_strings++; |
567 | sprintf(p, "\tTOS: %d, mask: 0x%x\n", | 587 | sprintf(p, "\tTOS: %d, mask: 0x%x\n", |
568 | fsc->fs.h_u.ah_ip4_spec.tos, | 588 | fsc->fs.h_u.ah_ip4_spec.tos, |
569 | fsc->fs.m_u.ah_ip4_spec.tos); | 589 | fsc->fs.m_u.ah_ip4_spec.tos); |
570 | p += ETH_GSTRING_LEN; | 590 | p += ETH_GSTRING_LEN; |
571 | num_strings++; | 591 | num_strings++; |
572 | break; | 592 | break; |
573 | case IP_USER_FLOW: | 593 | case IP_USER_FLOW: |
574 | sprintf(p, "\tSrc IP addr: 0x%x\n", | 594 | sprintf(p, "\tSrc IP addr: 0x%x\n", |
575 | fsc->fs.h_u.raw_ip4_spec.ip4src); | 595 | fsc->fs.h_u.raw_ip4_spec.ip4src); |
576 | p += ETH_GSTRING_LEN; | 596 | p += ETH_GSTRING_LEN; |
577 | num_strings++; | 597 | num_strings++; |
578 | sprintf(p, "\tSrc IP mask: 0x%x\n", | 598 | sprintf(p, "\tSrc IP mask: 0x%x\n", |
579 | fsc->fs.m_u.raw_ip4_spec.ip4src); | 599 | fsc->fs.m_u.raw_ip4_spec.ip4src); |
580 | p += ETH_GSTRING_LEN; | 600 | p += ETH_GSTRING_LEN; |
581 | num_strings++; | 601 | num_strings++; |
582 | sprintf(p, "\tDest IP addr: 0x%x\n", | 602 | sprintf(p, "\tDest IP addr: 0x%x\n", |
583 | fsc->fs.h_u.raw_ip4_spec.ip4dst); | 603 | fsc->fs.h_u.raw_ip4_spec.ip4dst); |
584 | p += ETH_GSTRING_LEN; | 604 | p += ETH_GSTRING_LEN; |
585 | num_strings++; | 605 | num_strings++; |
586 | sprintf(p, "\tDest IP mask: 0x%x\n", | 606 | sprintf(p, "\tDest IP mask: 0x%x\n", |
587 | fsc->fs.m_u.raw_ip4_spec.ip4dst); | 607 | fsc->fs.m_u.raw_ip4_spec.ip4dst); |
588 | p += ETH_GSTRING_LEN; | 608 | p += ETH_GSTRING_LEN; |
589 | num_strings++; | 609 | num_strings++; |
590 | break; | 610 | break; |
591 | case IPV4_FLOW: | 611 | case IPV4_FLOW: |
592 | sprintf(p, "\tSrc IP addr: 0x%x\n", | 612 | sprintf(p, "\tSrc IP addr: 0x%x\n", |
593 | fsc->fs.h_u.usr_ip4_spec.ip4src); | 613 | fsc->fs.h_u.usr_ip4_spec.ip4src); |
594 | p += ETH_GSTRING_LEN; | 614 | p += ETH_GSTRING_LEN; |
595 | num_strings++; | 615 | num_strings++; |
596 | sprintf(p, "\tSrc IP mask: 0x%x\n", | 616 | sprintf(p, "\tSrc IP mask: 0x%x\n", |
597 | fsc->fs.m_u.usr_ip4_spec.ip4src); | 617 | fsc->fs.m_u.usr_ip4_spec.ip4src); |
598 | p += ETH_GSTRING_LEN; | 618 | p += ETH_GSTRING_LEN; |
599 | num_strings++; | 619 | num_strings++; |
600 | sprintf(p, "\tDest IP addr: 0x%x\n", | 620 | sprintf(p, "\tDest IP addr: 0x%x\n", |
601 | fsc->fs.h_u.usr_ip4_spec.ip4dst); | 621 | fsc->fs.h_u.usr_ip4_spec.ip4dst); |
602 | p += ETH_GSTRING_LEN; | 622 | p += ETH_GSTRING_LEN; |
603 | num_strings++; | 623 | num_strings++; |
604 | sprintf(p, "\tDest IP mask: 0x%x\n", | 624 | sprintf(p, "\tDest IP mask: 0x%x\n", |
605 | fsc->fs.m_u.usr_ip4_spec.ip4dst); | 625 | fsc->fs.m_u.usr_ip4_spec.ip4dst); |
606 | p += ETH_GSTRING_LEN; | 626 | p += ETH_GSTRING_LEN; |
607 | num_strings++; | 627 | num_strings++; |
608 | sprintf(p, "\tL4 bytes: 0x%x, mask: 0x%x\n", | 628 | sprintf(p, "\tL4 bytes: 0x%x, mask: 0x%x\n", |
609 | fsc->fs.h_u.usr_ip4_spec.l4_4_bytes, | 629 | fsc->fs.h_u.usr_ip4_spec.l4_4_bytes, |
610 | fsc->fs.m_u.usr_ip4_spec.l4_4_bytes); | 630 | fsc->fs.m_u.usr_ip4_spec.l4_4_bytes); |
611 | p += ETH_GSTRING_LEN; | 631 | p += ETH_GSTRING_LEN; |
612 | num_strings++; | 632 | num_strings++; |
613 | sprintf(p, "\tTOS: %d, mask: 0x%x\n", | 633 | sprintf(p, "\tTOS: %d, mask: 0x%x\n", |
614 | fsc->fs.h_u.usr_ip4_spec.tos, | 634 | fsc->fs.h_u.usr_ip4_spec.tos, |
615 | fsc->fs.m_u.usr_ip4_spec.tos); | 635 | fsc->fs.m_u.usr_ip4_spec.tos); |
616 | p += ETH_GSTRING_LEN; | 636 | p += ETH_GSTRING_LEN; |
617 | num_strings++; | 637 | num_strings++; |
618 | sprintf(p, "\tIP Version: %d, mask: 0x%x\n", | 638 | sprintf(p, "\tIP Version: %d, mask: 0x%x\n", |
619 | fsc->fs.h_u.usr_ip4_spec.ip_ver, | 639 | fsc->fs.h_u.usr_ip4_spec.ip_ver, |
620 | fsc->fs.m_u.usr_ip4_spec.ip_ver); | 640 | fsc->fs.m_u.usr_ip4_spec.ip_ver); |
621 | p += ETH_GSTRING_LEN; | 641 | p += ETH_GSTRING_LEN; |
622 | num_strings++; | 642 | num_strings++; |
623 | sprintf(p, "\tProtocol: %d, mask: 0x%x\n", | 643 | sprintf(p, "\tProtocol: %d, mask: 0x%x\n", |
624 | fsc->fs.h_u.usr_ip4_spec.proto, | 644 | fsc->fs.h_u.usr_ip4_spec.proto, |
625 | fsc->fs.m_u.usr_ip4_spec.proto); | 645 | fsc->fs.m_u.usr_ip4_spec.proto); |
626 | p += ETH_GSTRING_LEN; | 646 | p += ETH_GSTRING_LEN; |
627 | num_strings++; | 647 | num_strings++; |
628 | break; | 648 | break; |
629 | }; | 649 | }; |
630 | sprintf(p, "\tVLAN: %d, mask: 0x%x\n", | 650 | sprintf(p, "\tVLAN: %d, mask: 0x%x\n", |
631 | fsc->fs.vlan_tag, fsc->fs.vlan_tag_mask); | 651 | fsc->fs.vlan_tag, fsc->fs.vlan_tag_mask); |
632 | p += ETH_GSTRING_LEN; | 652 | p += ETH_GSTRING_LEN; |
633 | num_strings++; | 653 | num_strings++; |
634 | sprintf(p, "\tUser-defined: 0x%Lx\n", fsc->fs.data); | 654 | sprintf(p, "\tUser-defined: 0x%Lx\n", fsc->fs.data); |
@@ -641,7 +661,7 @@ static int ethtool_get_rx_ntuple(struct net_device *dev, void __user *useraddr) | |||
641 | sprintf(p, "\tAction: Drop\n"); | 661 | sprintf(p, "\tAction: Drop\n"); |
642 | else | 662 | else |
643 | sprintf(p, "\tAction: Direct to queue %d\n", | 663 | sprintf(p, "\tAction: Direct to queue %d\n", |
644 | fsc->fs.action); | 664 | fsc->fs.action); |
645 | p += ETH_GSTRING_LEN; | 665 | p += ETH_GSTRING_LEN; |
646 | num_strings++; | 666 | num_strings++; |
647 | unknown_filter: | 667 | unknown_filter: |
@@ -853,7 +873,8 @@ static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr) | |||
853 | return ret; | 873 | return ret; |
854 | } | 874 | } |
855 | 875 | ||
856 | static noinline_for_stack int ethtool_get_coalesce(struct net_device *dev, void __user *useraddr) | 876 | static noinline_for_stack int ethtool_get_coalesce(struct net_device *dev, |
877 | void __user *useraddr) | ||
857 | { | 878 | { |
858 | struct ethtool_coalesce coalesce = { .cmd = ETHTOOL_GCOALESCE }; | 879 | struct ethtool_coalesce coalesce = { .cmd = ETHTOOL_GCOALESCE }; |
859 | 880 | ||
@@ -867,7 +888,8 @@ static noinline_for_stack int ethtool_get_coalesce(struct net_device *dev, void | |||
867 | return 0; | 888 | return 0; |
868 | } | 889 | } |
869 | 890 | ||
870 | static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr) | 891 | static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev, |
892 | void __user *useraddr) | ||
871 | { | 893 | { |
872 | struct ethtool_coalesce coalesce; | 894 | struct ethtool_coalesce coalesce; |
873 | 895 | ||
@@ -971,6 +993,7 @@ static int ethtool_set_tx_csum(struct net_device *dev, char __user *useraddr) | |||
971 | 993 | ||
972 | return dev->ethtool_ops->set_tx_csum(dev, edata.data); | 994 | return dev->ethtool_ops->set_tx_csum(dev, edata.data); |
973 | } | 995 | } |
996 | EXPORT_SYMBOL(ethtool_op_set_tx_csum); | ||
974 | 997 | ||
975 | static int ethtool_set_rx_csum(struct net_device *dev, char __user *useraddr) | 998 | static int ethtool_set_rx_csum(struct net_device *dev, char __user *useraddr) |
976 | { | 999 | { |
@@ -1042,7 +1065,7 @@ static int ethtool_get_gso(struct net_device *dev, char __user *useraddr) | |||
1042 | 1065 | ||
1043 | edata.data = dev->features & NETIF_F_GSO; | 1066 | edata.data = dev->features & NETIF_F_GSO; |
1044 | if (copy_to_user(useraddr, &edata, sizeof(edata))) | 1067 | if (copy_to_user(useraddr, &edata, sizeof(edata))) |
1045 | return -EFAULT; | 1068 | return -EFAULT; |
1046 | return 0; | 1069 | return 0; |
1047 | } | 1070 | } |
1048 | 1071 | ||
@@ -1065,7 +1088,7 @@ static int ethtool_get_gro(struct net_device *dev, char __user *useraddr) | |||
1065 | 1088 | ||
1066 | edata.data = dev->features & NETIF_F_GRO; | 1089 | edata.data = dev->features & NETIF_F_GRO; |
1067 | if (copy_to_user(useraddr, &edata, sizeof(edata))) | 1090 | if (copy_to_user(useraddr, &edata, sizeof(edata))) |
1068 | return -EFAULT; | 1091 | return -EFAULT; |
1069 | return 0; | 1092 | return 0; |
1070 | } | 1093 | } |
1071 | 1094 | ||
@@ -1277,7 +1300,8 @@ static int ethtool_set_value(struct net_device *dev, char __user *useraddr, | |||
1277 | return actor(dev, edata.data); | 1300 | return actor(dev, edata.data); |
1278 | } | 1301 | } |
1279 | 1302 | ||
1280 | static noinline_for_stack int ethtool_flash_device(struct net_device *dev, char __user *useraddr) | 1303 | static noinline_for_stack int ethtool_flash_device(struct net_device *dev, |
1304 | char __user *useraddr) | ||
1281 | { | 1305 | { |
1282 | struct ethtool_flash efl; | 1306 | struct ethtool_flash efl; |
1283 | 1307 | ||
@@ -1306,11 +1330,11 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) | |||
1306 | if (!dev->ethtool_ops) | 1330 | if (!dev->ethtool_ops) |
1307 | return -EOPNOTSUPP; | 1331 | return -EOPNOTSUPP; |
1308 | 1332 | ||
1309 | if (copy_from_user(ðcmd, useraddr, sizeof (ethcmd))) | 1333 | if (copy_from_user(ðcmd, useraddr, sizeof(ethcmd))) |
1310 | return -EFAULT; | 1334 | return -EFAULT; |
1311 | 1335 | ||
1312 | /* Allow some commands to be done by anyone */ | 1336 | /* Allow some commands to be done by anyone */ |
1313 | switch(ethcmd) { | 1337 | switch (ethcmd) { |
1314 | case ETHTOOL_GDRVINFO: | 1338 | case ETHTOOL_GDRVINFO: |
1315 | case ETHTOOL_GMSGLVL: | 1339 | case ETHTOOL_GMSGLVL: |
1316 | case ETHTOOL_GCOALESCE: | 1340 | case ETHTOOL_GCOALESCE: |
@@ -1338,10 +1362,11 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) | |||
1338 | return -EPERM; | 1362 | return -EPERM; |
1339 | } | 1363 | } |
1340 | 1364 | ||
1341 | if (dev->ethtool_ops->begin) | 1365 | if (dev->ethtool_ops->begin) { |
1342 | if ((rc = dev->ethtool_ops->begin(dev)) < 0) | 1366 | rc = dev->ethtool_ops->begin(dev); |
1367 | if (rc < 0) | ||
1343 | return rc; | 1368 | return rc; |
1344 | 1369 | } | |
1345 | old_features = dev->features; | 1370 | old_features = dev->features; |
1346 | 1371 | ||
1347 | switch (ethcmd) { | 1372 | switch (ethcmd) { |
@@ -1531,16 +1556,3 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) | |||
1531 | 1556 | ||
1532 | return rc; | 1557 | return rc; |
1533 | } | 1558 | } |
1534 | |||
1535 | EXPORT_SYMBOL(ethtool_op_get_link); | ||
1536 | EXPORT_SYMBOL(ethtool_op_get_sg); | ||
1537 | EXPORT_SYMBOL(ethtool_op_get_tso); | ||
1538 | EXPORT_SYMBOL(ethtool_op_set_sg); | ||
1539 | EXPORT_SYMBOL(ethtool_op_set_tso); | ||
1540 | EXPORT_SYMBOL(ethtool_op_set_tx_csum); | ||
1541 | EXPORT_SYMBOL(ethtool_op_set_tx_hw_csum); | ||
1542 | EXPORT_SYMBOL(ethtool_op_set_tx_ipv6_csum); | ||
1543 | EXPORT_SYMBOL(ethtool_op_set_ufo); | ||
1544 | EXPORT_SYMBOL(ethtool_op_get_ufo); | ||
1545 | EXPORT_SYMBOL(ethtool_op_set_flags); | ||
1546 | EXPORT_SYMBOL(ethtool_op_get_flags); | ||
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index d2c3e7dc2e5f..05cce4ec84dd 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c | |||
@@ -109,7 +109,7 @@ fib_rules_register(struct fib_rules_ops *tmpl, struct net *net) | |||
109 | struct fib_rules_ops *ops; | 109 | struct fib_rules_ops *ops; |
110 | int err; | 110 | int err; |
111 | 111 | ||
112 | ops = kmemdup(tmpl, sizeof (*ops), GFP_KERNEL); | 112 | ops = kmemdup(tmpl, sizeof(*ops), GFP_KERNEL); |
113 | if (ops == NULL) | 113 | if (ops == NULL) |
114 | return ERR_PTR(-ENOMEM); | 114 | return ERR_PTR(-ENOMEM); |
115 | 115 | ||
@@ -124,7 +124,6 @@ fib_rules_register(struct fib_rules_ops *tmpl, struct net *net) | |||
124 | 124 | ||
125 | return ops; | 125 | return ops; |
126 | } | 126 | } |
127 | |||
128 | EXPORT_SYMBOL_GPL(fib_rules_register); | 127 | EXPORT_SYMBOL_GPL(fib_rules_register); |
129 | 128 | ||
130 | void fib_rules_cleanup_ops(struct fib_rules_ops *ops) | 129 | void fib_rules_cleanup_ops(struct fib_rules_ops *ops) |
@@ -158,7 +157,6 @@ void fib_rules_unregister(struct fib_rules_ops *ops) | |||
158 | 157 | ||
159 | call_rcu(&ops->rcu, fib_rules_put_rcu); | 158 | call_rcu(&ops->rcu, fib_rules_put_rcu); |
160 | } | 159 | } |
161 | |||
162 | EXPORT_SYMBOL_GPL(fib_rules_unregister); | 160 | EXPORT_SYMBOL_GPL(fib_rules_unregister); |
163 | 161 | ||
164 | static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops, | 162 | static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops, |
@@ -221,7 +219,6 @@ out: | |||
221 | 219 | ||
222 | return err; | 220 | return err; |
223 | } | 221 | } |
224 | |||
225 | EXPORT_SYMBOL_GPL(fib_rules_lookup); | 222 | EXPORT_SYMBOL_GPL(fib_rules_lookup); |
226 | 223 | ||
227 | static int validate_rulemsg(struct fib_rule_hdr *frh, struct nlattr **tb, | 224 | static int validate_rulemsg(struct fib_rule_hdr *frh, struct nlattr **tb, |
@@ -614,7 +611,7 @@ static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb) | |||
614 | break; | 611 | break; |
615 | 612 | ||
616 | cb->args[1] = 0; | 613 | cb->args[1] = 0; |
617 | skip: | 614 | skip: |
618 | idx++; | 615 | idx++; |
619 | } | 616 | } |
620 | rcu_read_unlock(); | 617 | rcu_read_unlock(); |
@@ -686,7 +683,6 @@ static int fib_rules_event(struct notifier_block *this, unsigned long event, | |||
686 | struct fib_rules_ops *ops; | 683 | struct fib_rules_ops *ops; |
687 | 684 | ||
688 | ASSERT_RTNL(); | 685 | ASSERT_RTNL(); |
689 | rcu_read_lock(); | ||
690 | 686 | ||
691 | switch (event) { | 687 | switch (event) { |
692 | case NETDEV_REGISTER: | 688 | case NETDEV_REGISTER: |
@@ -700,8 +696,6 @@ static int fib_rules_event(struct notifier_block *this, unsigned long event, | |||
700 | break; | 696 | break; |
701 | } | 697 | } |
702 | 698 | ||
703 | rcu_read_unlock(); | ||
704 | |||
705 | return NOTIFY_DONE; | 699 | return NOTIFY_DONE; |
706 | } | 700 | } |
707 | 701 | ||
diff --git a/net/core/flow.c b/net/core/flow.c index 96015871ecea..161900674009 100644 --- a/net/core/flow.c +++ b/net/core/flow.c | |||
@@ -26,113 +26,158 @@ | |||
26 | #include <linux/security.h> | 26 | #include <linux/security.h> |
27 | 27 | ||
28 | struct flow_cache_entry { | 28 | struct flow_cache_entry { |
29 | struct flow_cache_entry *next; | 29 | union { |
30 | u16 family; | 30 | struct hlist_node hlist; |
31 | u8 dir; | 31 | struct list_head gc_list; |
32 | u32 genid; | 32 | } u; |
33 | struct flowi key; | 33 | u16 family; |
34 | void *object; | 34 | u8 dir; |
35 | atomic_t *object_ref; | 35 | u32 genid; |
36 | struct flowi key; | ||
37 | struct flow_cache_object *object; | ||
36 | }; | 38 | }; |
37 | 39 | ||
38 | atomic_t flow_cache_genid = ATOMIC_INIT(0); | 40 | struct flow_cache_percpu { |
39 | 41 | struct hlist_head *hash_table; | |
40 | static u32 flow_hash_shift; | 42 | int hash_count; |
41 | #define flow_hash_size (1 << flow_hash_shift) | 43 | u32 hash_rnd; |
42 | static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL }; | 44 | int hash_rnd_recalc; |
43 | 45 | struct tasklet_struct flush_tasklet; | |
44 | #define flow_table(cpu) (per_cpu(flow_tables, cpu)) | 46 | }; |
45 | |||
46 | static struct kmem_cache *flow_cachep __read_mostly; | ||
47 | 47 | ||
48 | static int flow_lwm, flow_hwm; | 48 | struct flow_flush_info { |
49 | struct flow_cache *cache; | ||
50 | atomic_t cpuleft; | ||
51 | struct completion completion; | ||
52 | }; | ||
49 | 53 | ||
50 | struct flow_percpu_info { | 54 | struct flow_cache { |
51 | int hash_rnd_recalc; | 55 | u32 hash_shift; |
52 | u32 hash_rnd; | 56 | unsigned long order; |
53 | int count; | 57 | struct flow_cache_percpu *percpu; |
58 | struct notifier_block hotcpu_notifier; | ||
59 | int low_watermark; | ||
60 | int high_watermark; | ||
61 | struct timer_list rnd_timer; | ||
54 | }; | 62 | }; |
55 | static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 }; | ||
56 | 63 | ||
57 | #define flow_hash_rnd_recalc(cpu) \ | 64 | atomic_t flow_cache_genid = ATOMIC_INIT(0); |
58 | (per_cpu(flow_hash_info, cpu).hash_rnd_recalc) | 65 | static struct flow_cache flow_cache_global; |
59 | #define flow_hash_rnd(cpu) \ | 66 | static struct kmem_cache *flow_cachep; |
60 | (per_cpu(flow_hash_info, cpu).hash_rnd) | ||
61 | #define flow_count(cpu) \ | ||
62 | (per_cpu(flow_hash_info, cpu).count) | ||
63 | 67 | ||
64 | static struct timer_list flow_hash_rnd_timer; | 68 | static DEFINE_SPINLOCK(flow_cache_gc_lock); |
69 | static LIST_HEAD(flow_cache_gc_list); | ||
65 | 70 | ||
66 | #define FLOW_HASH_RND_PERIOD (10 * 60 * HZ) | 71 | #define flow_cache_hash_size(cache) (1 << (cache)->hash_shift) |
67 | 72 | #define FLOW_HASH_RND_PERIOD (10 * 60 * HZ) | |
68 | struct flow_flush_info { | ||
69 | atomic_t cpuleft; | ||
70 | struct completion completion; | ||
71 | }; | ||
72 | static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL }; | ||
73 | |||
74 | #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu)) | ||
75 | 73 | ||
76 | static void flow_cache_new_hashrnd(unsigned long arg) | 74 | static void flow_cache_new_hashrnd(unsigned long arg) |
77 | { | 75 | { |
76 | struct flow_cache *fc = (void *) arg; | ||
78 | int i; | 77 | int i; |
79 | 78 | ||
80 | for_each_possible_cpu(i) | 79 | for_each_possible_cpu(i) |
81 | flow_hash_rnd_recalc(i) = 1; | 80 | per_cpu_ptr(fc->percpu, i)->hash_rnd_recalc = 1; |
82 | 81 | ||
83 | flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; | 82 | fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; |
84 | add_timer(&flow_hash_rnd_timer); | 83 | add_timer(&fc->rnd_timer); |
84 | } | ||
85 | |||
86 | static int flow_entry_valid(struct flow_cache_entry *fle) | ||
87 | { | ||
88 | if (atomic_read(&flow_cache_genid) != fle->genid) | ||
89 | return 0; | ||
90 | if (fle->object && !fle->object->ops->check(fle->object)) | ||
91 | return 0; | ||
92 | return 1; | ||
85 | } | 93 | } |
86 | 94 | ||
87 | static void flow_entry_kill(int cpu, struct flow_cache_entry *fle) | 95 | static void flow_entry_kill(struct flow_cache_entry *fle) |
88 | { | 96 | { |
89 | if (fle->object) | 97 | if (fle->object) |
90 | atomic_dec(fle->object_ref); | 98 | fle->object->ops->delete(fle->object); |
91 | kmem_cache_free(flow_cachep, fle); | 99 | kmem_cache_free(flow_cachep, fle); |
92 | flow_count(cpu)--; | ||
93 | } | 100 | } |
94 | 101 | ||
95 | static void __flow_cache_shrink(int cpu, int shrink_to) | 102 | static void flow_cache_gc_task(struct work_struct *work) |
96 | { | 103 | { |
97 | struct flow_cache_entry *fle, **flp; | 104 | struct list_head gc_list; |
98 | int i; | 105 | struct flow_cache_entry *fce, *n; |
99 | 106 | ||
100 | for (i = 0; i < flow_hash_size; i++) { | 107 | INIT_LIST_HEAD(&gc_list); |
101 | int k = 0; | 108 | spin_lock_bh(&flow_cache_gc_lock); |
109 | list_splice_tail_init(&flow_cache_gc_list, &gc_list); | ||
110 | spin_unlock_bh(&flow_cache_gc_lock); | ||
102 | 111 | ||
103 | flp = &flow_table(cpu)[i]; | 112 | list_for_each_entry_safe(fce, n, &gc_list, u.gc_list) |
104 | while ((fle = *flp) != NULL && k < shrink_to) { | 113 | flow_entry_kill(fce); |
105 | k++; | 114 | } |
106 | flp = &fle->next; | 115 | static DECLARE_WORK(flow_cache_gc_work, flow_cache_gc_task); |
107 | } | 116 | |
108 | while ((fle = *flp) != NULL) { | 117 | static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp, |
109 | *flp = fle->next; | 118 | int deleted, struct list_head *gc_list) |
110 | flow_entry_kill(cpu, fle); | 119 | { |
111 | } | 120 | if (deleted) { |
121 | fcp->hash_count -= deleted; | ||
122 | spin_lock_bh(&flow_cache_gc_lock); | ||
123 | list_splice_tail(gc_list, &flow_cache_gc_list); | ||
124 | spin_unlock_bh(&flow_cache_gc_lock); | ||
125 | schedule_work(&flow_cache_gc_work); | ||
112 | } | 126 | } |
113 | } | 127 | } |
114 | 128 | ||
115 | static void flow_cache_shrink(int cpu) | 129 | static void __flow_cache_shrink(struct flow_cache *fc, |
130 | struct flow_cache_percpu *fcp, | ||
131 | int shrink_to) | ||
116 | { | 132 | { |
117 | int shrink_to = flow_lwm / flow_hash_size; | 133 | struct flow_cache_entry *fle; |
134 | struct hlist_node *entry, *tmp; | ||
135 | LIST_HEAD(gc_list); | ||
136 | int i, deleted = 0; | ||
137 | |||
138 | for (i = 0; i < flow_cache_hash_size(fc); i++) { | ||
139 | int saved = 0; | ||
140 | |||
141 | hlist_for_each_entry_safe(fle, entry, tmp, | ||
142 | &fcp->hash_table[i], u.hlist) { | ||
143 | if (saved < shrink_to && | ||
144 | flow_entry_valid(fle)) { | ||
145 | saved++; | ||
146 | } else { | ||
147 | deleted++; | ||
148 | hlist_del(&fle->u.hlist); | ||
149 | list_add_tail(&fle->u.gc_list, &gc_list); | ||
150 | } | ||
151 | } | ||
152 | } | ||
118 | 153 | ||
119 | __flow_cache_shrink(cpu, shrink_to); | 154 | flow_cache_queue_garbage(fcp, deleted, &gc_list); |
120 | } | 155 | } |
121 | 156 | ||
122 | static void flow_new_hash_rnd(int cpu) | 157 | static void flow_cache_shrink(struct flow_cache *fc, |
158 | struct flow_cache_percpu *fcp) | ||
123 | { | 159 | { |
124 | get_random_bytes(&flow_hash_rnd(cpu), sizeof(u32)); | 160 | int shrink_to = fc->low_watermark / flow_cache_hash_size(fc); |
125 | flow_hash_rnd_recalc(cpu) = 0; | ||
126 | 161 | ||
127 | __flow_cache_shrink(cpu, 0); | 162 | __flow_cache_shrink(fc, fcp, shrink_to); |
128 | } | 163 | } |
129 | 164 | ||
130 | static u32 flow_hash_code(struct flowi *key, int cpu) | 165 | static void flow_new_hash_rnd(struct flow_cache *fc, |
166 | struct flow_cache_percpu *fcp) | ||
167 | { | ||
168 | get_random_bytes(&fcp->hash_rnd, sizeof(u32)); | ||
169 | fcp->hash_rnd_recalc = 0; | ||
170 | __flow_cache_shrink(fc, fcp, 0); | ||
171 | } | ||
172 | |||
173 | static u32 flow_hash_code(struct flow_cache *fc, | ||
174 | struct flow_cache_percpu *fcp, | ||
175 | struct flowi *key) | ||
131 | { | 176 | { |
132 | u32 *k = (u32 *) key; | 177 | u32 *k = (u32 *) key; |
133 | 178 | ||
134 | return (jhash2(k, (sizeof(*key) / sizeof(u32)), flow_hash_rnd(cpu)) & | 179 | return (jhash2(k, (sizeof(*key) / sizeof(u32)), fcp->hash_rnd) |
135 | (flow_hash_size - 1)); | 180 | & (flow_cache_hash_size(fc) - 1)); |
136 | } | 181 | } |
137 | 182 | ||
138 | #if (BITS_PER_LONG == 64) | 183 | #if (BITS_PER_LONG == 64) |
@@ -165,114 +210,117 @@ static int flow_key_compare(struct flowi *key1, struct flowi *key2) | |||
165 | return 0; | 210 | return 0; |
166 | } | 211 | } |
167 | 212 | ||
168 | void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir, | 213 | struct flow_cache_object * |
169 | flow_resolve_t resolver) | 214 | flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir, |
215 | flow_resolve_t resolver, void *ctx) | ||
170 | { | 216 | { |
171 | struct flow_cache_entry *fle, **head; | 217 | struct flow_cache *fc = &flow_cache_global; |
218 | struct flow_cache_percpu *fcp; | ||
219 | struct flow_cache_entry *fle, *tfle; | ||
220 | struct hlist_node *entry; | ||
221 | struct flow_cache_object *flo; | ||
172 | unsigned int hash; | 222 | unsigned int hash; |
173 | int cpu; | ||
174 | 223 | ||
175 | local_bh_disable(); | 224 | local_bh_disable(); |
176 | cpu = smp_processor_id(); | 225 | fcp = per_cpu_ptr(fc->percpu, smp_processor_id()); |
177 | 226 | ||
178 | fle = NULL; | 227 | fle = NULL; |
228 | flo = NULL; | ||
179 | /* Packet really early in init? Making flow_cache_init a | 229 | /* Packet really early in init? Making flow_cache_init a |
180 | * pre-smp initcall would solve this. --RR */ | 230 | * pre-smp initcall would solve this. --RR */ |
181 | if (!flow_table(cpu)) | 231 | if (!fcp->hash_table) |
182 | goto nocache; | 232 | goto nocache; |
183 | 233 | ||
184 | if (flow_hash_rnd_recalc(cpu)) | 234 | if (fcp->hash_rnd_recalc) |
185 | flow_new_hash_rnd(cpu); | 235 | flow_new_hash_rnd(fc, fcp); |
186 | hash = flow_hash_code(key, cpu); | ||
187 | 236 | ||
188 | head = &flow_table(cpu)[hash]; | 237 | hash = flow_hash_code(fc, fcp, key); |
189 | for (fle = *head; fle; fle = fle->next) { | 238 | hlist_for_each_entry(tfle, entry, &fcp->hash_table[hash], u.hlist) { |
190 | if (fle->family == family && | 239 | if (tfle->family == family && |
191 | fle->dir == dir && | 240 | tfle->dir == dir && |
192 | flow_key_compare(key, &fle->key) == 0) { | 241 | flow_key_compare(key, &tfle->key) == 0) { |
193 | if (fle->genid == atomic_read(&flow_cache_genid)) { | 242 | fle = tfle; |
194 | void *ret = fle->object; | ||
195 | |||
196 | if (ret) | ||
197 | atomic_inc(fle->object_ref); | ||
198 | local_bh_enable(); | ||
199 | |||
200 | return ret; | ||
201 | } | ||
202 | break; | 243 | break; |
203 | } | 244 | } |
204 | } | 245 | } |
205 | 246 | ||
206 | if (!fle) { | 247 | if (unlikely(!fle)) { |
207 | if (flow_count(cpu) > flow_hwm) | 248 | if (fcp->hash_count > fc->high_watermark) |
208 | flow_cache_shrink(cpu); | 249 | flow_cache_shrink(fc, fcp); |
209 | 250 | ||
210 | fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC); | 251 | fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC); |
211 | if (fle) { | 252 | if (fle) { |
212 | fle->next = *head; | ||
213 | *head = fle; | ||
214 | fle->family = family; | 253 | fle->family = family; |
215 | fle->dir = dir; | 254 | fle->dir = dir; |
216 | memcpy(&fle->key, key, sizeof(*key)); | 255 | memcpy(&fle->key, key, sizeof(*key)); |
217 | fle->object = NULL; | 256 | fle->object = NULL; |
218 | flow_count(cpu)++; | 257 | hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]); |
258 | fcp->hash_count++; | ||
219 | } | 259 | } |
260 | } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) { | ||
261 | flo = fle->object; | ||
262 | if (!flo) | ||
263 | goto ret_object; | ||
264 | flo = flo->ops->get(flo); | ||
265 | if (flo) | ||
266 | goto ret_object; | ||
267 | } else if (fle->object) { | ||
268 | flo = fle->object; | ||
269 | flo->ops->delete(flo); | ||
270 | fle->object = NULL; | ||
220 | } | 271 | } |
221 | 272 | ||
222 | nocache: | 273 | nocache: |
223 | { | 274 | flo = NULL; |
224 | int err; | 275 | if (fle) { |
225 | void *obj; | 276 | flo = fle->object; |
226 | atomic_t *obj_ref; | 277 | fle->object = NULL; |
227 | |||
228 | err = resolver(net, key, family, dir, &obj, &obj_ref); | ||
229 | |||
230 | if (fle && !err) { | ||
231 | fle->genid = atomic_read(&flow_cache_genid); | ||
232 | |||
233 | if (fle->object) | ||
234 | atomic_dec(fle->object_ref); | ||
235 | |||
236 | fle->object = obj; | ||
237 | fle->object_ref = obj_ref; | ||
238 | if (obj) | ||
239 | atomic_inc(fle->object_ref); | ||
240 | } | ||
241 | local_bh_enable(); | ||
242 | |||
243 | if (err) | ||
244 | obj = ERR_PTR(err); | ||
245 | return obj; | ||
246 | } | 278 | } |
279 | flo = resolver(net, key, family, dir, flo, ctx); | ||
280 | if (fle) { | ||
281 | fle->genid = atomic_read(&flow_cache_genid); | ||
282 | if (!IS_ERR(flo)) | ||
283 | fle->object = flo; | ||
284 | else | ||
285 | fle->genid--; | ||
286 | } else { | ||
287 | if (flo && !IS_ERR(flo)) | ||
288 | flo->ops->delete(flo); | ||
289 | } | ||
290 | ret_object: | ||
291 | local_bh_enable(); | ||
292 | return flo; | ||
247 | } | 293 | } |
248 | 294 | ||
249 | static void flow_cache_flush_tasklet(unsigned long data) | 295 | static void flow_cache_flush_tasklet(unsigned long data) |
250 | { | 296 | { |
251 | struct flow_flush_info *info = (void *)data; | 297 | struct flow_flush_info *info = (void *)data; |
252 | int i; | 298 | struct flow_cache *fc = info->cache; |
253 | int cpu; | 299 | struct flow_cache_percpu *fcp; |
254 | 300 | struct flow_cache_entry *fle; | |
255 | cpu = smp_processor_id(); | 301 | struct hlist_node *entry, *tmp; |
256 | for (i = 0; i < flow_hash_size; i++) { | 302 | LIST_HEAD(gc_list); |
257 | struct flow_cache_entry *fle; | 303 | int i, deleted = 0; |
258 | 304 | ||
259 | fle = flow_table(cpu)[i]; | 305 | fcp = per_cpu_ptr(fc->percpu, smp_processor_id()); |
260 | for (; fle; fle = fle->next) { | 306 | for (i = 0; i < flow_cache_hash_size(fc); i++) { |
261 | unsigned genid = atomic_read(&flow_cache_genid); | 307 | hlist_for_each_entry_safe(fle, entry, tmp, |
262 | 308 | &fcp->hash_table[i], u.hlist) { | |
263 | if (!fle->object || fle->genid == genid) | 309 | if (flow_entry_valid(fle)) |
264 | continue; | 310 | continue; |
265 | 311 | ||
266 | fle->object = NULL; | 312 | deleted++; |
267 | atomic_dec(fle->object_ref); | 313 | hlist_del(&fle->u.hlist); |
314 | list_add_tail(&fle->u.gc_list, &gc_list); | ||
268 | } | 315 | } |
269 | } | 316 | } |
270 | 317 | ||
318 | flow_cache_queue_garbage(fcp, deleted, &gc_list); | ||
319 | |||
271 | if (atomic_dec_and_test(&info->cpuleft)) | 320 | if (atomic_dec_and_test(&info->cpuleft)) |
272 | complete(&info->completion); | 321 | complete(&info->completion); |
273 | } | 322 | } |
274 | 323 | ||
275 | static void flow_cache_flush_per_cpu(void *) __attribute__((__unused__)); | ||
276 | static void flow_cache_flush_per_cpu(void *data) | 324 | static void flow_cache_flush_per_cpu(void *data) |
277 | { | 325 | { |
278 | struct flow_flush_info *info = data; | 326 | struct flow_flush_info *info = data; |
@@ -280,8 +328,7 @@ static void flow_cache_flush_per_cpu(void *data) | |||
280 | struct tasklet_struct *tasklet; | 328 | struct tasklet_struct *tasklet; |
281 | 329 | ||
282 | cpu = smp_processor_id(); | 330 | cpu = smp_processor_id(); |
283 | 331 | tasklet = &per_cpu_ptr(info->cache->percpu, cpu)->flush_tasklet; | |
284 | tasklet = flow_flush_tasklet(cpu); | ||
285 | tasklet->data = (unsigned long)info; | 332 | tasklet->data = (unsigned long)info; |
286 | tasklet_schedule(tasklet); | 333 | tasklet_schedule(tasklet); |
287 | } | 334 | } |
@@ -294,6 +341,7 @@ void flow_cache_flush(void) | |||
294 | /* Don't want cpus going down or up during this. */ | 341 | /* Don't want cpus going down or up during this. */ |
295 | get_online_cpus(); | 342 | get_online_cpus(); |
296 | mutex_lock(&flow_flush_sem); | 343 | mutex_lock(&flow_flush_sem); |
344 | info.cache = &flow_cache_global; | ||
297 | atomic_set(&info.cpuleft, num_online_cpus()); | 345 | atomic_set(&info.cpuleft, num_online_cpus()); |
298 | init_completion(&info.completion); | 346 | init_completion(&info.completion); |
299 | 347 | ||
@@ -307,62 +355,75 @@ void flow_cache_flush(void) | |||
307 | put_online_cpus(); | 355 | put_online_cpus(); |
308 | } | 356 | } |
309 | 357 | ||
310 | static void __init flow_cache_cpu_prepare(int cpu) | 358 | static void __init flow_cache_cpu_prepare(struct flow_cache *fc, |
359 | struct flow_cache_percpu *fcp) | ||
311 | { | 360 | { |
312 | struct tasklet_struct *tasklet; | 361 | fcp->hash_table = (struct hlist_head *) |
313 | unsigned long order; | 362 | __get_free_pages(GFP_KERNEL|__GFP_ZERO, fc->order); |
314 | 363 | if (!fcp->hash_table) | |
315 | for (order = 0; | 364 | panic("NET: failed to allocate flow cache order %lu\n", fc->order); |
316 | (PAGE_SIZE << order) < | 365 | |
317 | (sizeof(struct flow_cache_entry *)*flow_hash_size); | 366 | fcp->hash_rnd_recalc = 1; |
318 | order++) | 367 | fcp->hash_count = 0; |
319 | /* NOTHING */; | 368 | tasklet_init(&fcp->flush_tasklet, flow_cache_flush_tasklet, 0); |
320 | |||
321 | flow_table(cpu) = (struct flow_cache_entry **) | ||
322 | __get_free_pages(GFP_KERNEL|__GFP_ZERO, order); | ||
323 | if (!flow_table(cpu)) | ||
324 | panic("NET: failed to allocate flow cache order %lu\n", order); | ||
325 | |||
326 | flow_hash_rnd_recalc(cpu) = 1; | ||
327 | flow_count(cpu) = 0; | ||
328 | |||
329 | tasklet = flow_flush_tasklet(cpu); | ||
330 | tasklet_init(tasklet, flow_cache_flush_tasklet, 0); | ||
331 | } | 369 | } |
332 | 370 | ||
333 | static int flow_cache_cpu(struct notifier_block *nfb, | 371 | static int flow_cache_cpu(struct notifier_block *nfb, |
334 | unsigned long action, | 372 | unsigned long action, |
335 | void *hcpu) | 373 | void *hcpu) |
336 | { | 374 | { |
375 | struct flow_cache *fc = container_of(nfb, struct flow_cache, hotcpu_notifier); | ||
376 | int cpu = (unsigned long) hcpu; | ||
377 | struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu); | ||
378 | |||
337 | if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) | 379 | if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) |
338 | __flow_cache_shrink((unsigned long)hcpu, 0); | 380 | __flow_cache_shrink(fc, fcp, 0); |
339 | return NOTIFY_OK; | 381 | return NOTIFY_OK; |
340 | } | 382 | } |
341 | 383 | ||
342 | static int __init flow_cache_init(void) | 384 | static int flow_cache_init(struct flow_cache *fc) |
343 | { | 385 | { |
386 | unsigned long order; | ||
344 | int i; | 387 | int i; |
345 | 388 | ||
346 | flow_cachep = kmem_cache_create("flow_cache", | 389 | fc->hash_shift = 10; |
347 | sizeof(struct flow_cache_entry), | 390 | fc->low_watermark = 2 * flow_cache_hash_size(fc); |
348 | 0, SLAB_PANIC, | 391 | fc->high_watermark = 4 * flow_cache_hash_size(fc); |
349 | NULL); | 392 | |
350 | flow_hash_shift = 10; | 393 | for (order = 0; |
351 | flow_lwm = 2 * flow_hash_size; | 394 | (PAGE_SIZE << order) < |
352 | flow_hwm = 4 * flow_hash_size; | 395 | (sizeof(struct hlist_head)*flow_cache_hash_size(fc)); |
396 | order++) | ||
397 | /* NOTHING */; | ||
398 | fc->order = order; | ||
399 | fc->percpu = alloc_percpu(struct flow_cache_percpu); | ||
353 | 400 | ||
354 | setup_timer(&flow_hash_rnd_timer, flow_cache_new_hashrnd, 0); | 401 | setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd, |
355 | flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; | 402 | (unsigned long) fc); |
356 | add_timer(&flow_hash_rnd_timer); | 403 | fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; |
404 | add_timer(&fc->rnd_timer); | ||
357 | 405 | ||
358 | for_each_possible_cpu(i) | 406 | for_each_possible_cpu(i) |
359 | flow_cache_cpu_prepare(i); | 407 | flow_cache_cpu_prepare(fc, per_cpu_ptr(fc->percpu, i)); |
408 | |||
409 | fc->hotcpu_notifier = (struct notifier_block){ | ||
410 | .notifier_call = flow_cache_cpu, | ||
411 | }; | ||
412 | register_hotcpu_notifier(&fc->hotcpu_notifier); | ||
360 | 413 | ||
361 | hotcpu_notifier(flow_cache_cpu, 0); | ||
362 | return 0; | 414 | return 0; |
363 | } | 415 | } |
364 | 416 | ||
365 | module_init(flow_cache_init); | 417 | static int __init flow_cache_init_global(void) |
418 | { | ||
419 | flow_cachep = kmem_cache_create("flow_cache", | ||
420 | sizeof(struct flow_cache_entry), | ||
421 | 0, SLAB_PANIC, NULL); | ||
422 | |||
423 | return flow_cache_init(&flow_cache_global); | ||
424 | } | ||
425 | |||
426 | module_init(flow_cache_init_global); | ||
366 | 427 | ||
367 | EXPORT_SYMBOL(flow_cache_genid); | 428 | EXPORT_SYMBOL(flow_cache_genid); |
368 | EXPORT_SYMBOL(flow_cache_lookup); | 429 | EXPORT_SYMBOL(flow_cache_lookup); |
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 59cfc7d8fc45..96ed6905b823 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c | |||
@@ -467,6 +467,217 @@ static struct attribute_group wireless_group = { | |||
467 | }; | 467 | }; |
468 | #endif | 468 | #endif |
469 | 469 | ||
470 | #ifdef CONFIG_RPS | ||
471 | /* | ||
472 | * RX queue sysfs structures and functions. | ||
473 | */ | ||
474 | struct rx_queue_attribute { | ||
475 | struct attribute attr; | ||
476 | ssize_t (*show)(struct netdev_rx_queue *queue, | ||
477 | struct rx_queue_attribute *attr, char *buf); | ||
478 | ssize_t (*store)(struct netdev_rx_queue *queue, | ||
479 | struct rx_queue_attribute *attr, const char *buf, size_t len); | ||
480 | }; | ||
481 | #define to_rx_queue_attr(_attr) container_of(_attr, \ | ||
482 | struct rx_queue_attribute, attr) | ||
483 | |||
484 | #define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj) | ||
485 | |||
486 | static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr, | ||
487 | char *buf) | ||
488 | { | ||
489 | struct rx_queue_attribute *attribute = to_rx_queue_attr(attr); | ||
490 | struct netdev_rx_queue *queue = to_rx_queue(kobj); | ||
491 | |||
492 | if (!attribute->show) | ||
493 | return -EIO; | ||
494 | |||
495 | return attribute->show(queue, attribute, buf); | ||
496 | } | ||
497 | |||
498 | static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr, | ||
499 | const char *buf, size_t count) | ||
500 | { | ||
501 | struct rx_queue_attribute *attribute = to_rx_queue_attr(attr); | ||
502 | struct netdev_rx_queue *queue = to_rx_queue(kobj); | ||
503 | |||
504 | if (!attribute->store) | ||
505 | return -EIO; | ||
506 | |||
507 | return attribute->store(queue, attribute, buf, count); | ||
508 | } | ||
509 | |||
510 | static struct sysfs_ops rx_queue_sysfs_ops = { | ||
511 | .show = rx_queue_attr_show, | ||
512 | .store = rx_queue_attr_store, | ||
513 | }; | ||
514 | |||
515 | static ssize_t show_rps_map(struct netdev_rx_queue *queue, | ||
516 | struct rx_queue_attribute *attribute, char *buf) | ||
517 | { | ||
518 | struct rps_map *map; | ||
519 | cpumask_var_t mask; | ||
520 | size_t len = 0; | ||
521 | int i; | ||
522 | |||
523 | if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) | ||
524 | return -ENOMEM; | ||
525 | |||
526 | rcu_read_lock(); | ||
527 | map = rcu_dereference(queue->rps_map); | ||
528 | if (map) | ||
529 | for (i = 0; i < map->len; i++) | ||
530 | cpumask_set_cpu(map->cpus[i], mask); | ||
531 | |||
532 | len += cpumask_scnprintf(buf + len, PAGE_SIZE, mask); | ||
533 | if (PAGE_SIZE - len < 3) { | ||
534 | rcu_read_unlock(); | ||
535 | free_cpumask_var(mask); | ||
536 | return -EINVAL; | ||
537 | } | ||
538 | rcu_read_unlock(); | ||
539 | |||
540 | free_cpumask_var(mask); | ||
541 | len += sprintf(buf + len, "\n"); | ||
542 | return len; | ||
543 | } | ||
544 | |||
545 | static void rps_map_release(struct rcu_head *rcu) | ||
546 | { | ||
547 | struct rps_map *map = container_of(rcu, struct rps_map, rcu); | ||
548 | |||
549 | kfree(map); | ||
550 | } | ||
551 | |||
552 | ssize_t store_rps_map(struct netdev_rx_queue *queue, | ||
553 | struct rx_queue_attribute *attribute, | ||
554 | const char *buf, size_t len) | ||
555 | { | ||
556 | struct rps_map *old_map, *map; | ||
557 | cpumask_var_t mask; | ||
558 | int err, cpu, i; | ||
559 | static DEFINE_SPINLOCK(rps_map_lock); | ||
560 | |||
561 | if (!capable(CAP_NET_ADMIN)) | ||
562 | return -EPERM; | ||
563 | |||
564 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) | ||
565 | return -ENOMEM; | ||
566 | |||
567 | err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits); | ||
568 | if (err) { | ||
569 | free_cpumask_var(mask); | ||
570 | return err; | ||
571 | } | ||
572 | |||
573 | map = kzalloc(max_t(unsigned, | ||
574 | RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES), | ||
575 | GFP_KERNEL); | ||
576 | if (!map) { | ||
577 | free_cpumask_var(mask); | ||
578 | return -ENOMEM; | ||
579 | } | ||
580 | |||
581 | i = 0; | ||
582 | for_each_cpu_and(cpu, mask, cpu_online_mask) | ||
583 | map->cpus[i++] = cpu; | ||
584 | |||
585 | if (i) | ||
586 | map->len = i; | ||
587 | else { | ||
588 | kfree(map); | ||
589 | map = NULL; | ||
590 | } | ||
591 | |||
592 | spin_lock(&rps_map_lock); | ||
593 | old_map = queue->rps_map; | ||
594 | rcu_assign_pointer(queue->rps_map, map); | ||
595 | spin_unlock(&rps_map_lock); | ||
596 | |||
597 | if (old_map) | ||
598 | call_rcu(&old_map->rcu, rps_map_release); | ||
599 | |||
600 | free_cpumask_var(mask); | ||
601 | return len; | ||
602 | } | ||
603 | |||
604 | static struct rx_queue_attribute rps_cpus_attribute = | ||
605 | __ATTR(rps_cpus, S_IRUGO | S_IWUSR, show_rps_map, store_rps_map); | ||
606 | |||
607 | static struct attribute *rx_queue_default_attrs[] = { | ||
608 | &rps_cpus_attribute.attr, | ||
609 | NULL | ||
610 | }; | ||
611 | |||
612 | static void rx_queue_release(struct kobject *kobj) | ||
613 | { | ||
614 | struct netdev_rx_queue *queue = to_rx_queue(kobj); | ||
615 | struct rps_map *map = queue->rps_map; | ||
616 | struct netdev_rx_queue *first = queue->first; | ||
617 | |||
618 | if (map) | ||
619 | call_rcu(&map->rcu, rps_map_release); | ||
620 | |||
621 | if (atomic_dec_and_test(&first->count)) | ||
622 | kfree(first); | ||
623 | } | ||
624 | |||
625 | static struct kobj_type rx_queue_ktype = { | ||
626 | .sysfs_ops = &rx_queue_sysfs_ops, | ||
627 | .release = rx_queue_release, | ||
628 | .default_attrs = rx_queue_default_attrs, | ||
629 | }; | ||
630 | |||
631 | static int rx_queue_add_kobject(struct net_device *net, int index) | ||
632 | { | ||
633 | struct netdev_rx_queue *queue = net->_rx + index; | ||
634 | struct kobject *kobj = &queue->kobj; | ||
635 | int error = 0; | ||
636 | |||
637 | kobj->kset = net->queues_kset; | ||
638 | error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL, | ||
639 | "rx-%u", index); | ||
640 | if (error) { | ||
641 | kobject_put(kobj); | ||
642 | return error; | ||
643 | } | ||
644 | |||
645 | kobject_uevent(kobj, KOBJ_ADD); | ||
646 | |||
647 | return error; | ||
648 | } | ||
649 | |||
650 | static int rx_queue_register_kobjects(struct net_device *net) | ||
651 | { | ||
652 | int i; | ||
653 | int error = 0; | ||
654 | |||
655 | net->queues_kset = kset_create_and_add("queues", | ||
656 | NULL, &net->dev.kobj); | ||
657 | if (!net->queues_kset) | ||
658 | return -ENOMEM; | ||
659 | for (i = 0; i < net->num_rx_queues; i++) { | ||
660 | error = rx_queue_add_kobject(net, i); | ||
661 | if (error) | ||
662 | break; | ||
663 | } | ||
664 | |||
665 | if (error) | ||
666 | while (--i >= 0) | ||
667 | kobject_put(&net->_rx[i].kobj); | ||
668 | |||
669 | return error; | ||
670 | } | ||
671 | |||
672 | static void rx_queue_remove_kobjects(struct net_device *net) | ||
673 | { | ||
674 | int i; | ||
675 | |||
676 | for (i = 0; i < net->num_rx_queues; i++) | ||
677 | kobject_put(&net->_rx[i].kobj); | ||
678 | kset_unregister(net->queues_kset); | ||
679 | } | ||
680 | #endif /* CONFIG_RPS */ | ||
470 | #endif /* CONFIG_SYSFS */ | 681 | #endif /* CONFIG_SYSFS */ |
471 | 682 | ||
472 | #ifdef CONFIG_HOTPLUG | 683 | #ifdef CONFIG_HOTPLUG |
@@ -530,6 +741,10 @@ void netdev_unregister_kobject(struct net_device * net) | |||
530 | if (!net_eq(dev_net(net), &init_net)) | 741 | if (!net_eq(dev_net(net), &init_net)) |
531 | return; | 742 | return; |
532 | 743 | ||
744 | #ifdef CONFIG_RPS | ||
745 | rx_queue_remove_kobjects(net); | ||
746 | #endif | ||
747 | |||
533 | device_del(dev); | 748 | device_del(dev); |
534 | } | 749 | } |
535 | 750 | ||
@@ -538,6 +753,7 @@ int netdev_register_kobject(struct net_device *net) | |||
538 | { | 753 | { |
539 | struct device *dev = &(net->dev); | 754 | struct device *dev = &(net->dev); |
540 | const struct attribute_group **groups = net->sysfs_groups; | 755 | const struct attribute_group **groups = net->sysfs_groups; |
756 | int error = 0; | ||
541 | 757 | ||
542 | dev->class = &net_class; | 758 | dev->class = &net_class; |
543 | dev->platform_data = net; | 759 | dev->platform_data = net; |
@@ -564,7 +780,19 @@ int netdev_register_kobject(struct net_device *net) | |||
564 | if (!net_eq(dev_net(net), &init_net)) | 780 | if (!net_eq(dev_net(net), &init_net)) |
565 | return 0; | 781 | return 0; |
566 | 782 | ||
567 | return device_add(dev); | 783 | error = device_add(dev); |
784 | if (error) | ||
785 | return error; | ||
786 | |||
787 | #ifdef CONFIG_RPS | ||
788 | error = rx_queue_register_kobjects(net); | ||
789 | if (error) { | ||
790 | device_del(dev); | ||
791 | return error; | ||
792 | } | ||
793 | #endif | ||
794 | |||
795 | return error; | ||
568 | } | 796 | } |
569 | 797 | ||
570 | int netdev_class_create_file(struct class_attribute *class_attr) | 798 | int netdev_class_create_file(struct class_attribute *class_attr) |
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 43923811bd6a..2ad68da418df 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -169,7 +169,7 @@ | |||
169 | #include <asm/dma.h> | 169 | #include <asm/dma.h> |
170 | #include <asm/div64.h> /* do_div */ | 170 | #include <asm/div64.h> /* do_div */ |
171 | 171 | ||
172 | #define VERSION "2.72" | 172 | #define VERSION "2.73" |
173 | #define IP_NAME_SZ 32 | 173 | #define IP_NAME_SZ 32 |
174 | #define MAX_MPLS_LABELS 16 /* This is the max label stack depth */ | 174 | #define MAX_MPLS_LABELS 16 /* This is the max label stack depth */ |
175 | #define MPLS_STACK_BOTTOM htonl(0x00000100) | 175 | #define MPLS_STACK_BOTTOM htonl(0x00000100) |
@@ -190,6 +190,7 @@ | |||
190 | #define F_IPSEC_ON (1<<12) /* ipsec on for flows */ | 190 | #define F_IPSEC_ON (1<<12) /* ipsec on for flows */ |
191 | #define F_QUEUE_MAP_RND (1<<13) /* queue map Random */ | 191 | #define F_QUEUE_MAP_RND (1<<13) /* queue map Random */ |
192 | #define F_QUEUE_MAP_CPU (1<<14) /* queue map mirrors smp_processor_id() */ | 192 | #define F_QUEUE_MAP_CPU (1<<14) /* queue map mirrors smp_processor_id() */ |
193 | #define F_NODE (1<<15) /* Node memory alloc*/ | ||
193 | 194 | ||
194 | /* Thread control flag bits */ | 195 | /* Thread control flag bits */ |
195 | #define T_STOP (1<<0) /* Stop run */ | 196 | #define T_STOP (1<<0) /* Stop run */ |
@@ -372,6 +373,7 @@ struct pktgen_dev { | |||
372 | 373 | ||
373 | u16 queue_map_min; | 374 | u16 queue_map_min; |
374 | u16 queue_map_max; | 375 | u16 queue_map_max; |
376 | int node; /* Memory node */ | ||
375 | 377 | ||
376 | #ifdef CONFIG_XFRM | 378 | #ifdef CONFIG_XFRM |
377 | __u8 ipsmode; /* IPSEC mode (config) */ | 379 | __u8 ipsmode; /* IPSEC mode (config) */ |
@@ -607,6 +609,9 @@ static int pktgen_if_show(struct seq_file *seq, void *v) | |||
607 | if (pkt_dev->traffic_class) | 609 | if (pkt_dev->traffic_class) |
608 | seq_printf(seq, " traffic_class: 0x%02x\n", pkt_dev->traffic_class); | 610 | seq_printf(seq, " traffic_class: 0x%02x\n", pkt_dev->traffic_class); |
609 | 611 | ||
612 | if (pkt_dev->node >= 0) | ||
613 | seq_printf(seq, " node: %d\n", pkt_dev->node); | ||
614 | |||
610 | seq_printf(seq, " Flags: "); | 615 | seq_printf(seq, " Flags: "); |
611 | 616 | ||
612 | if (pkt_dev->flags & F_IPV6) | 617 | if (pkt_dev->flags & F_IPV6) |
@@ -660,6 +665,9 @@ static int pktgen_if_show(struct seq_file *seq, void *v) | |||
660 | if (pkt_dev->flags & F_SVID_RND) | 665 | if (pkt_dev->flags & F_SVID_RND) |
661 | seq_printf(seq, "SVID_RND "); | 666 | seq_printf(seq, "SVID_RND "); |
662 | 667 | ||
668 | if (pkt_dev->flags & F_NODE) | ||
669 | seq_printf(seq, "NODE_ALLOC "); | ||
670 | |||
663 | seq_puts(seq, "\n"); | 671 | seq_puts(seq, "\n"); |
664 | 672 | ||
665 | /* not really stopped, more like last-running-at */ | 673 | /* not really stopped, more like last-running-at */ |
@@ -1074,6 +1082,21 @@ static ssize_t pktgen_if_write(struct file *file, | |||
1074 | pkt_dev->dst_mac_count); | 1082 | pkt_dev->dst_mac_count); |
1075 | return count; | 1083 | return count; |
1076 | } | 1084 | } |
1085 | if (!strcmp(name, "node")) { | ||
1086 | len = num_arg(&user_buffer[i], 10, &value); | ||
1087 | if (len < 0) | ||
1088 | return len; | ||
1089 | |||
1090 | i += len; | ||
1091 | |||
1092 | if (node_possible(value)) { | ||
1093 | pkt_dev->node = value; | ||
1094 | sprintf(pg_result, "OK: node=%d", pkt_dev->node); | ||
1095 | } | ||
1096 | else | ||
1097 | sprintf(pg_result, "ERROR: node not possible"); | ||
1098 | return count; | ||
1099 | } | ||
1077 | if (!strcmp(name, "flag")) { | 1100 | if (!strcmp(name, "flag")) { |
1078 | char f[32]; | 1101 | char f[32]; |
1079 | memset(f, 0, 32); | 1102 | memset(f, 0, 32); |
@@ -1166,12 +1189,18 @@ static ssize_t pktgen_if_write(struct file *file, | |||
1166 | else if (strcmp(f, "!IPV6") == 0) | 1189 | else if (strcmp(f, "!IPV6") == 0) |
1167 | pkt_dev->flags &= ~F_IPV6; | 1190 | pkt_dev->flags &= ~F_IPV6; |
1168 | 1191 | ||
1192 | else if (strcmp(f, "NODE_ALLOC") == 0) | ||
1193 | pkt_dev->flags |= F_NODE; | ||
1194 | |||
1195 | else if (strcmp(f, "!NODE_ALLOC") == 0) | ||
1196 | pkt_dev->flags &= ~F_NODE; | ||
1197 | |||
1169 | else { | 1198 | else { |
1170 | sprintf(pg_result, | 1199 | sprintf(pg_result, |
1171 | "Flag -:%s:- unknown\nAvailable flags, (prepend ! to un-set flag):\n%s", | 1200 | "Flag -:%s:- unknown\nAvailable flags, (prepend ! to un-set flag):\n%s", |
1172 | f, | 1201 | f, |
1173 | "IPSRC_RND, IPDST_RND, UDPSRC_RND, UDPDST_RND, " | 1202 | "IPSRC_RND, IPDST_RND, UDPSRC_RND, UDPDST_RND, " |
1174 | "MACSRC_RND, MACDST_RND, TXSIZE_RND, IPV6, MPLS_RND, VID_RND, SVID_RND, FLOW_SEQ, IPSEC\n"); | 1203 | "MACSRC_RND, MACDST_RND, TXSIZE_RND, IPV6, MPLS_RND, VID_RND, SVID_RND, FLOW_SEQ, IPSEC, NODE_ALLOC\n"); |
1175 | return count; | 1204 | return count; |
1176 | } | 1205 | } |
1177 | sprintf(pg_result, "OK: flags=0x%x", pkt_dev->flags); | 1206 | sprintf(pg_result, "OK: flags=0x%x", pkt_dev->flags); |
@@ -2572,9 +2601,27 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, | |||
2572 | mod_cur_headers(pkt_dev); | 2601 | mod_cur_headers(pkt_dev); |
2573 | 2602 | ||
2574 | datalen = (odev->hard_header_len + 16) & ~0xf; | 2603 | datalen = (odev->hard_header_len + 16) & ~0xf; |
2575 | skb = __netdev_alloc_skb(odev, | 2604 | |
2576 | pkt_dev->cur_pkt_size + 64 | 2605 | if (pkt_dev->flags & F_NODE) { |
2577 | + datalen + pkt_dev->pkt_overhead, GFP_NOWAIT); | 2606 | int node; |
2607 | |||
2608 | if (pkt_dev->node >= 0) | ||
2609 | node = pkt_dev->node; | ||
2610 | else | ||
2611 | node = numa_node_id(); | ||
2612 | |||
2613 | skb = __alloc_skb(NET_SKB_PAD + pkt_dev->cur_pkt_size + 64 | ||
2614 | + datalen + pkt_dev->pkt_overhead, GFP_NOWAIT, 0, node); | ||
2615 | if (likely(skb)) { | ||
2616 | skb_reserve(skb, NET_SKB_PAD); | ||
2617 | skb->dev = odev; | ||
2618 | } | ||
2619 | } | ||
2620 | else | ||
2621 | skb = __netdev_alloc_skb(odev, | ||
2622 | pkt_dev->cur_pkt_size + 64 | ||
2623 | + datalen + pkt_dev->pkt_overhead, GFP_NOWAIT); | ||
2624 | |||
2578 | if (!skb) { | 2625 | if (!skb) { |
2579 | sprintf(pkt_dev->result, "No memory"); | 2626 | sprintf(pkt_dev->result, "No memory"); |
2580 | return NULL; | 2627 | return NULL; |
@@ -3674,6 +3721,7 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname) | |||
3674 | pkt_dev->svlan_p = 0; | 3721 | pkt_dev->svlan_p = 0; |
3675 | pkt_dev->svlan_cfi = 0; | 3722 | pkt_dev->svlan_cfi = 0; |
3676 | pkt_dev->svlan_id = 0xffff; | 3723 | pkt_dev->svlan_id = 0xffff; |
3724 | pkt_dev->node = -1; | ||
3677 | 3725 | ||
3678 | err = pktgen_setup_dev(pkt_dev, ifname); | 3726 | err = pktgen_setup_dev(pkt_dev, ifname); |
3679 | if (err) | 3727 | if (err) |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 4568120d8533..bf919b6acea2 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -600,7 +600,41 @@ static void copy_rtnl_link_stats(struct rtnl_link_stats *a, | |||
600 | 600 | ||
601 | a->rx_compressed = b->rx_compressed; | 601 | a->rx_compressed = b->rx_compressed; |
602 | a->tx_compressed = b->tx_compressed; | 602 | a->tx_compressed = b->tx_compressed; |
603 | }; | 603 | } |
604 | |||
605 | static void copy_rtnl_link_stats64(void *v, const struct net_device_stats *b) | ||
606 | { | ||
607 | struct rtnl_link_stats64 a; | ||
608 | |||
609 | a.rx_packets = b->rx_packets; | ||
610 | a.tx_packets = b->tx_packets; | ||
611 | a.rx_bytes = b->rx_bytes; | ||
612 | a.tx_bytes = b->tx_bytes; | ||
613 | a.rx_errors = b->rx_errors; | ||
614 | a.tx_errors = b->tx_errors; | ||
615 | a.rx_dropped = b->rx_dropped; | ||
616 | a.tx_dropped = b->tx_dropped; | ||
617 | |||
618 | a.multicast = b->multicast; | ||
619 | a.collisions = b->collisions; | ||
620 | |||
621 | a.rx_length_errors = b->rx_length_errors; | ||
622 | a.rx_over_errors = b->rx_over_errors; | ||
623 | a.rx_crc_errors = b->rx_crc_errors; | ||
624 | a.rx_frame_errors = b->rx_frame_errors; | ||
625 | a.rx_fifo_errors = b->rx_fifo_errors; | ||
626 | a.rx_missed_errors = b->rx_missed_errors; | ||
627 | |||
628 | a.tx_aborted_errors = b->tx_aborted_errors; | ||
629 | a.tx_carrier_errors = b->tx_carrier_errors; | ||
630 | a.tx_fifo_errors = b->tx_fifo_errors; | ||
631 | a.tx_heartbeat_errors = b->tx_heartbeat_errors; | ||
632 | a.tx_window_errors = b->tx_window_errors; | ||
633 | |||
634 | a.rx_compressed = b->rx_compressed; | ||
635 | a.tx_compressed = b->tx_compressed; | ||
636 | memcpy(v, &a, sizeof(a)); | ||
637 | } | ||
604 | 638 | ||
605 | static inline int rtnl_vfinfo_size(const struct net_device *dev) | 639 | static inline int rtnl_vfinfo_size(const struct net_device *dev) |
606 | { | 640 | { |
@@ -619,6 +653,7 @@ static inline size_t if_nlmsg_size(const struct net_device *dev) | |||
619 | + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */ | 653 | + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */ |
620 | + nla_total_size(sizeof(struct rtnl_link_ifmap)) | 654 | + nla_total_size(sizeof(struct rtnl_link_ifmap)) |
621 | + nla_total_size(sizeof(struct rtnl_link_stats)) | 655 | + nla_total_size(sizeof(struct rtnl_link_stats)) |
656 | + nla_total_size(sizeof(struct rtnl_link_stats64)) | ||
622 | + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ | 657 | + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ |
623 | + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */ | 658 | + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */ |
624 | + nla_total_size(4) /* IFLA_TXQLEN */ | 659 | + nla_total_size(4) /* IFLA_TXQLEN */ |
@@ -698,6 +733,12 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, | |||
698 | stats = dev_get_stats(dev); | 733 | stats = dev_get_stats(dev); |
699 | copy_rtnl_link_stats(nla_data(attr), stats); | 734 | copy_rtnl_link_stats(nla_data(attr), stats); |
700 | 735 | ||
736 | attr = nla_reserve(skb, IFLA_STATS64, | ||
737 | sizeof(struct rtnl_link_stats64)); | ||
738 | if (attr == NULL) | ||
739 | goto nla_put_failure; | ||
740 | copy_rtnl_link_stats64(nla_data(attr), stats); | ||
741 | |||
701 | if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent) { | 742 | if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent) { |
702 | int i; | 743 | int i; |
703 | struct ifla_vf_info ivi; | 744 | struct ifla_vf_info ivi; |
@@ -1473,6 +1514,7 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi | |||
1473 | case NETDEV_POST_INIT: | 1514 | case NETDEV_POST_INIT: |
1474 | case NETDEV_REGISTER: | 1515 | case NETDEV_REGISTER: |
1475 | case NETDEV_CHANGE: | 1516 | case NETDEV_CHANGE: |
1517 | case NETDEV_PRE_TYPE_CHANGE: | ||
1476 | case NETDEV_GOING_DOWN: | 1518 | case NETDEV_GOING_DOWN: |
1477 | case NETDEV_UNREGISTER: | 1519 | case NETDEV_UNREGISTER: |
1478 | case NETDEV_UNREGISTER_BATCH: | 1520 | case NETDEV_UNREGISTER_BATCH: |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 93c4e060c91e..bdea0efdf8cb 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -534,6 +534,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) | |||
534 | new->network_header = old->network_header; | 534 | new->network_header = old->network_header; |
535 | new->mac_header = old->mac_header; | 535 | new->mac_header = old->mac_header; |
536 | skb_dst_set(new, dst_clone(skb_dst(old))); | 536 | skb_dst_set(new, dst_clone(skb_dst(old))); |
537 | new->rxhash = old->rxhash; | ||
537 | #ifdef CONFIG_XFRM | 538 | #ifdef CONFIG_XFRM |
538 | new->sp = secpath_get(old->sp); | 539 | new->sp = secpath_get(old->sp); |
539 | #endif | 540 | #endif |
@@ -581,6 +582,7 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) | |||
581 | C(len); | 582 | C(len); |
582 | C(data_len); | 583 | C(data_len); |
583 | C(mac_len); | 584 | C(mac_len); |
585 | C(rxhash); | ||
584 | n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; | 586 | n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; |
585 | n->cloned = 1; | 587 | n->cloned = 1; |
586 | n->nohdr = 0; | 588 | n->nohdr = 0; |
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c index bcd7632299f5..d3235899c7e3 100644 --- a/net/dccp/ccids/ccid3.c +++ b/net/dccp/ccids/ccid3.c | |||
@@ -208,7 +208,7 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data) | |||
208 | goto restart_timer; | 208 | goto restart_timer; |
209 | } | 209 | } |
210 | 210 | ||
211 | ccid3_pr_debug("%s(%p, state=%s) - entry \n", dccp_role(sk), sk, | 211 | ccid3_pr_debug("%s(%p, state=%s) - entry\n", dccp_role(sk), sk, |
212 | ccid3_tx_state_name(hc->tx_state)); | 212 | ccid3_tx_state_name(hc->tx_state)); |
213 | 213 | ||
214 | if (hc->tx_state == TFRC_SSTATE_FBACK) | 214 | if (hc->tx_state == TFRC_SSTATE_FBACK) |
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h index 5ef32c2f0d6a..53f8e12d0c10 100644 --- a/net/dccp/dccp.h +++ b/net/dccp/dccp.h | |||
@@ -189,7 +189,7 @@ enum { | |||
189 | #define DCCP_MIB_MAX __DCCP_MIB_MAX | 189 | #define DCCP_MIB_MAX __DCCP_MIB_MAX |
190 | struct dccp_mib { | 190 | struct dccp_mib { |
191 | unsigned long mibs[DCCP_MIB_MAX]; | 191 | unsigned long mibs[DCCP_MIB_MAX]; |
192 | } __SNMP_MIB_ALIGN__; | 192 | }; |
193 | 193 | ||
194 | DECLARE_SNMP_STAT(struct dccp_mib, dccp_statistics); | 194 | DECLARE_SNMP_STAT(struct dccp_mib, dccp_statistics); |
195 | #define DCCP_INC_STATS(field) SNMP_INC_STATS(dccp_statistics, field) | 195 | #define DCCP_INC_STATS(field) SNMP_INC_STATS(dccp_statistics, field) |
diff --git a/net/dccp/input.c b/net/dccp/input.c index 9ec717426024..58f7bc156850 100644 --- a/net/dccp/input.c +++ b/net/dccp/input.c | |||
@@ -415,7 +415,7 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk, | |||
415 | if (!between48(DCCP_SKB_CB(skb)->dccpd_ack_seq, | 415 | if (!between48(DCCP_SKB_CB(skb)->dccpd_ack_seq, |
416 | dp->dccps_awl, dp->dccps_awh)) { | 416 | dp->dccps_awl, dp->dccps_awh)) { |
417 | dccp_pr_debug("invalid ackno: S.AWL=%llu, " | 417 | dccp_pr_debug("invalid ackno: S.AWL=%llu, " |
418 | "P.ackno=%llu, S.AWH=%llu \n", | 418 | "P.ackno=%llu, S.AWH=%llu\n", |
419 | (unsigned long long)dp->dccps_awl, | 419 | (unsigned long long)dp->dccps_awl, |
420 | (unsigned long long)DCCP_SKB_CB(skb)->dccpd_ack_seq, | 420 | (unsigned long long)DCCP_SKB_CB(skb)->dccpd_ack_seq, |
421 | (unsigned long long)dp->dccps_awh); | 421 | (unsigned long long)dp->dccps_awh); |
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c index cead68eb254c..615dbe3b43f9 100644 --- a/net/decnet/dn_dev.c +++ b/net/decnet/dn_dev.c | |||
@@ -350,7 +350,7 @@ static void dn_dev_del_ifa(struct dn_dev *dn_db, struct dn_ifaddr **ifap, int de | |||
350 | if (dn_db->dev->type == ARPHRD_ETHER) { | 350 | if (dn_db->dev->type == ARPHRD_ETHER) { |
351 | if (ifa1->ifa_local != dn_eth2dn(dev->dev_addr)) { | 351 | if (ifa1->ifa_local != dn_eth2dn(dev->dev_addr)) { |
352 | dn_dn2eth(mac_addr, ifa1->ifa_local); | 352 | dn_dn2eth(mac_addr, ifa1->ifa_local); |
353 | dev_mc_delete(dev, mac_addr, ETH_ALEN, 0); | 353 | dev_mc_del(dev, mac_addr); |
354 | } | 354 | } |
355 | } | 355 | } |
356 | 356 | ||
@@ -381,7 +381,7 @@ static int dn_dev_insert_ifa(struct dn_dev *dn_db, struct dn_ifaddr *ifa) | |||
381 | if (dev->type == ARPHRD_ETHER) { | 381 | if (dev->type == ARPHRD_ETHER) { |
382 | if (ifa->ifa_local != dn_eth2dn(dev->dev_addr)) { | 382 | if (ifa->ifa_local != dn_eth2dn(dev->dev_addr)) { |
383 | dn_dn2eth(mac_addr, ifa->ifa_local); | 383 | dn_dn2eth(mac_addr, ifa->ifa_local); |
384 | dev_mc_add(dev, mac_addr, ETH_ALEN, 0); | 384 | dev_mc_add(dev, mac_addr); |
385 | } | 385 | } |
386 | } | 386 | } |
387 | 387 | ||
@@ -1001,9 +1001,9 @@ static int dn_eth_up(struct net_device *dev) | |||
1001 | struct dn_dev *dn_db = dev->dn_ptr; | 1001 | struct dn_dev *dn_db = dev->dn_ptr; |
1002 | 1002 | ||
1003 | if (dn_db->parms.forwarding == 0) | 1003 | if (dn_db->parms.forwarding == 0) |
1004 | dev_mc_add(dev, dn_rt_all_end_mcast, ETH_ALEN, 0); | 1004 | dev_mc_add(dev, dn_rt_all_end_mcast); |
1005 | else | 1005 | else |
1006 | dev_mc_add(dev, dn_rt_all_rt_mcast, ETH_ALEN, 0); | 1006 | dev_mc_add(dev, dn_rt_all_rt_mcast); |
1007 | 1007 | ||
1008 | dn_db->use_long = 1; | 1008 | dn_db->use_long = 1; |
1009 | 1009 | ||
@@ -1015,9 +1015,9 @@ static void dn_eth_down(struct net_device *dev) | |||
1015 | struct dn_dev *dn_db = dev->dn_ptr; | 1015 | struct dn_dev *dn_db = dev->dn_ptr; |
1016 | 1016 | ||
1017 | if (dn_db->parms.forwarding == 0) | 1017 | if (dn_db->parms.forwarding == 0) |
1018 | dev_mc_delete(dev, dn_rt_all_end_mcast, ETH_ALEN, 0); | 1018 | dev_mc_del(dev, dn_rt_all_end_mcast); |
1019 | else | 1019 | else |
1020 | dev_mc_delete(dev, dn_rt_all_rt_mcast, ETH_ALEN, 0); | 1020 | dev_mc_del(dev, dn_rt_all_rt_mcast); |
1021 | } | 1021 | } |
1022 | 1022 | ||
1023 | static void dn_dev_set_timer(struct net_device *dev); | 1023 | static void dn_dev_set_timer(struct net_device *dev); |
diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 2175e6d5cc8d..8fdca56bb08f 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c | |||
@@ -67,7 +67,7 @@ static int dsa_slave_open(struct net_device *dev) | |||
67 | return -ENETDOWN; | 67 | return -ENETDOWN; |
68 | 68 | ||
69 | if (compare_ether_addr(dev->dev_addr, master->dev_addr)) { | 69 | if (compare_ether_addr(dev->dev_addr, master->dev_addr)) { |
70 | err = dev_unicast_add(master, dev->dev_addr); | 70 | err = dev_uc_add(master, dev->dev_addr); |
71 | if (err < 0) | 71 | if (err < 0) |
72 | goto out; | 72 | goto out; |
73 | } | 73 | } |
@@ -90,7 +90,7 @@ clear_allmulti: | |||
90 | dev_set_allmulti(master, -1); | 90 | dev_set_allmulti(master, -1); |
91 | del_unicast: | 91 | del_unicast: |
92 | if (compare_ether_addr(dev->dev_addr, master->dev_addr)) | 92 | if (compare_ether_addr(dev->dev_addr, master->dev_addr)) |
93 | dev_unicast_delete(master, dev->dev_addr); | 93 | dev_uc_del(master, dev->dev_addr); |
94 | out: | 94 | out: |
95 | return err; | 95 | return err; |
96 | } | 96 | } |
@@ -101,14 +101,14 @@ static int dsa_slave_close(struct net_device *dev) | |||
101 | struct net_device *master = p->parent->dst->master_netdev; | 101 | struct net_device *master = p->parent->dst->master_netdev; |
102 | 102 | ||
103 | dev_mc_unsync(master, dev); | 103 | dev_mc_unsync(master, dev); |
104 | dev_unicast_unsync(master, dev); | 104 | dev_uc_unsync(master, dev); |
105 | if (dev->flags & IFF_ALLMULTI) | 105 | if (dev->flags & IFF_ALLMULTI) |
106 | dev_set_allmulti(master, -1); | 106 | dev_set_allmulti(master, -1); |
107 | if (dev->flags & IFF_PROMISC) | 107 | if (dev->flags & IFF_PROMISC) |
108 | dev_set_promiscuity(master, -1); | 108 | dev_set_promiscuity(master, -1); |
109 | 109 | ||
110 | if (compare_ether_addr(dev->dev_addr, master->dev_addr)) | 110 | if (compare_ether_addr(dev->dev_addr, master->dev_addr)) |
111 | dev_unicast_delete(master, dev->dev_addr); | 111 | dev_uc_del(master, dev->dev_addr); |
112 | 112 | ||
113 | return 0; | 113 | return 0; |
114 | } | 114 | } |
@@ -130,7 +130,7 @@ static void dsa_slave_set_rx_mode(struct net_device *dev) | |||
130 | struct net_device *master = p->parent->dst->master_netdev; | 130 | struct net_device *master = p->parent->dst->master_netdev; |
131 | 131 | ||
132 | dev_mc_sync(master, dev); | 132 | dev_mc_sync(master, dev); |
133 | dev_unicast_sync(master, dev); | 133 | dev_uc_sync(master, dev); |
134 | } | 134 | } |
135 | 135 | ||
136 | static int dsa_slave_set_mac_address(struct net_device *dev, void *a) | 136 | static int dsa_slave_set_mac_address(struct net_device *dev, void *a) |
@@ -147,13 +147,13 @@ static int dsa_slave_set_mac_address(struct net_device *dev, void *a) | |||
147 | goto out; | 147 | goto out; |
148 | 148 | ||
149 | if (compare_ether_addr(addr->sa_data, master->dev_addr)) { | 149 | if (compare_ether_addr(addr->sa_data, master->dev_addr)) { |
150 | err = dev_unicast_add(master, addr->sa_data); | 150 | err = dev_uc_add(master, addr->sa_data); |
151 | if (err < 0) | 151 | if (err < 0) |
152 | return err; | 152 | return err; |
153 | } | 153 | } |
154 | 154 | ||
155 | if (compare_ether_addr(dev->dev_addr, master->dev_addr)) | 155 | if (compare_ether_addr(dev->dev_addr, master->dev_addr)) |
156 | dev_unicast_delete(master, dev->dev_addr); | 156 | dev_uc_del(master, dev->dev_addr); |
157 | 157 | ||
158 | out: | 158 | out: |
159 | memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); | 159 | memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); |
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index 0c94a1ac2946..c9a1c68767ff 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig | |||
@@ -587,9 +587,15 @@ choice | |||
587 | config DEFAULT_HTCP | 587 | config DEFAULT_HTCP |
588 | bool "Htcp" if TCP_CONG_HTCP=y | 588 | bool "Htcp" if TCP_CONG_HTCP=y |
589 | 589 | ||
590 | config DEFAULT_HYBLA | ||
591 | bool "Hybla" if TCP_CONG_HYBLA=y | ||
592 | |||
590 | config DEFAULT_VEGAS | 593 | config DEFAULT_VEGAS |
591 | bool "Vegas" if TCP_CONG_VEGAS=y | 594 | bool "Vegas" if TCP_CONG_VEGAS=y |
592 | 595 | ||
596 | config DEFAULT_VENO | ||
597 | bool "Veno" if TCP_CONG_VENO=y | ||
598 | |||
593 | config DEFAULT_WESTWOOD | 599 | config DEFAULT_WESTWOOD |
594 | bool "Westwood" if TCP_CONG_WESTWOOD=y | 600 | bool "Westwood" if TCP_CONG_WESTWOOD=y |
595 | 601 | ||
@@ -610,8 +616,10 @@ config DEFAULT_TCP_CONG | |||
610 | default "bic" if DEFAULT_BIC | 616 | default "bic" if DEFAULT_BIC |
611 | default "cubic" if DEFAULT_CUBIC | 617 | default "cubic" if DEFAULT_CUBIC |
612 | default "htcp" if DEFAULT_HTCP | 618 | default "htcp" if DEFAULT_HTCP |
619 | default "hybla" if DEFAULT_HYBLA | ||
613 | default "vegas" if DEFAULT_VEGAS | 620 | default "vegas" if DEFAULT_VEGAS |
614 | default "westwood" if DEFAULT_WESTWOOD | 621 | default "westwood" if DEFAULT_WESTWOOD |
622 | default "veno" if DEFAULT_VENO | ||
615 | default "reno" if DEFAULT_RENO | 623 | default "reno" if DEFAULT_RENO |
616 | default "cubic" | 624 | default "cubic" |
617 | 625 | ||
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index f71357422380..a0beb32beaa3 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
@@ -1407,10 +1407,10 @@ EXPORT_SYMBOL_GPL(snmp_fold_field); | |||
1407 | int snmp_mib_init(void __percpu *ptr[2], size_t mibsize) | 1407 | int snmp_mib_init(void __percpu *ptr[2], size_t mibsize) |
1408 | { | 1408 | { |
1409 | BUG_ON(ptr == NULL); | 1409 | BUG_ON(ptr == NULL); |
1410 | ptr[0] = __alloc_percpu(mibsize, __alignof__(unsigned long long)); | 1410 | ptr[0] = __alloc_percpu(mibsize, __alignof__(unsigned long)); |
1411 | if (!ptr[0]) | 1411 | if (!ptr[0]) |
1412 | goto err0; | 1412 | goto err0; |
1413 | ptr[1] = __alloc_percpu(mibsize, __alignof__(unsigned long long)); | 1413 | ptr[1] = __alloc_percpu(mibsize, __alignof__(unsigned long)); |
1414 | if (!ptr[1]) | 1414 | if (!ptr[1]) |
1415 | goto err1; | 1415 | goto err1; |
1416 | return 0; | 1416 | return 0; |
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 90e3d6379a42..382bc768ed56 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
@@ -1096,10 +1096,10 @@ static int inetdev_event(struct notifier_block *this, unsigned long event, | |||
1096 | case NETDEV_DOWN: | 1096 | case NETDEV_DOWN: |
1097 | ip_mc_down(in_dev); | 1097 | ip_mc_down(in_dev); |
1098 | break; | 1098 | break; |
1099 | case NETDEV_BONDING_OLDTYPE: | 1099 | case NETDEV_PRE_TYPE_CHANGE: |
1100 | ip_mc_unmap(in_dev); | 1100 | ip_mc_unmap(in_dev); |
1101 | break; | 1101 | break; |
1102 | case NETDEV_BONDING_NEWTYPE: | 1102 | case NETDEV_POST_TYPE_CHANGE: |
1103 | ip_mc_remap(in_dev); | 1103 | ip_mc_remap(in_dev); |
1104 | break; | 1104 | break; |
1105 | case NETDEV_CHANGEMTU: | 1105 | case NETDEV_CHANGEMTU: |
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index ac4dec132735..f3d339f728b0 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c | |||
@@ -331,9 +331,10 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param, | |||
331 | if (ip_append_data(sk, icmp_glue_bits, icmp_param, | 331 | if (ip_append_data(sk, icmp_glue_bits, icmp_param, |
332 | icmp_param->data_len+icmp_param->head_len, | 332 | icmp_param->data_len+icmp_param->head_len, |
333 | icmp_param->head_len, | 333 | icmp_param->head_len, |
334 | ipc, rt, MSG_DONTWAIT) < 0) | 334 | ipc, rt, MSG_DONTWAIT) < 0) { |
335 | ICMP_INC_STATS_BH(sock_net(sk), ICMP_MIB_OUTERRORS); | ||
335 | ip_flush_pending_frames(sk); | 336 | ip_flush_pending_frames(sk); |
336 | else if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) { | 337 | } else if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) { |
337 | struct icmphdr *icmph = icmp_hdr(skb); | 338 | struct icmphdr *icmph = icmp_hdr(skb); |
338 | __wsum csum = 0; | 339 | __wsum csum = 0; |
339 | struct sk_buff *skb1; | 340 | struct sk_buff *skb1; |
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 15d3eeda92f5..5fff865a4fa7 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c | |||
@@ -998,7 +998,7 @@ static void ip_mc_filter_add(struct in_device *in_dev, __be32 addr) | |||
998 | --ANK | 998 | --ANK |
999 | */ | 999 | */ |
1000 | if (arp_mc_map(addr, buf, dev, 0) == 0) | 1000 | if (arp_mc_map(addr, buf, dev, 0) == 0) |
1001 | dev_mc_add(dev, buf, dev->addr_len, 0); | 1001 | dev_mc_add(dev, buf); |
1002 | } | 1002 | } |
1003 | 1003 | ||
1004 | /* | 1004 | /* |
@@ -1011,7 +1011,7 @@ static void ip_mc_filter_del(struct in_device *in_dev, __be32 addr) | |||
1011 | struct net_device *dev = in_dev->dev; | 1011 | struct net_device *dev = in_dev->dev; |
1012 | 1012 | ||
1013 | if (arp_mc_map(addr, buf, dev, 0) == 0) | 1013 | if (arp_mc_map(addr, buf, dev, 0) == 0) |
1014 | dev_mc_delete(dev, buf, dev->addr_len, 0); | 1014 | dev_mc_del(dev, buf); |
1015 | } | 1015 | } |
1016 | 1016 | ||
1017 | #ifdef CONFIG_IP_MULTICAST | 1017 | #ifdef CONFIG_IP_MULTICAST |
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 1e64dabbd232..b0aa0546a3b3 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c | |||
@@ -287,12 +287,8 @@ int ip_ra_control(struct sock *sk, unsigned char on, | |||
287 | void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, | 287 | void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, |
288 | __be16 port, u32 info, u8 *payload) | 288 | __be16 port, u32 info, u8 *payload) |
289 | { | 289 | { |
290 | struct inet_sock *inet = inet_sk(sk); | ||
291 | struct sock_exterr_skb *serr; | 290 | struct sock_exterr_skb *serr; |
292 | 291 | ||
293 | if (!inet->recverr) | ||
294 | return; | ||
295 | |||
296 | skb = skb_clone(skb, GFP_ATOMIC); | 292 | skb = skb_clone(skb, GFP_ATOMIC); |
297 | if (!skb) | 293 | if (!skb) |
298 | return; | 294 | return; |
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 067ce9e043dc..b9d84e800cf4 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c | |||
@@ -976,7 +976,7 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str | |||
976 | /* Is it a reply for the device we are configuring? */ | 976 | /* Is it a reply for the device we are configuring? */ |
977 | if (b->xid != ic_dev_xid) { | 977 | if (b->xid != ic_dev_xid) { |
978 | if (net_ratelimit()) | 978 | if (net_ratelimit()) |
979 | printk(KERN_ERR "DHCP/BOOTP: Ignoring delayed packet \n"); | 979 | printk(KERN_ERR "DHCP/BOOTP: Ignoring delayed packet\n"); |
980 | goto drop_unlock; | 980 | goto drop_unlock; |
981 | } | 981 | } |
982 | 982 | ||
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index ab828400ed71..a992dc826f1c 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c | |||
@@ -88,7 +88,7 @@ clusterip_config_entry_put(struct clusterip_config *c) | |||
88 | list_del(&c->list); | 88 | list_del(&c->list); |
89 | write_unlock_bh(&clusterip_lock); | 89 | write_unlock_bh(&clusterip_lock); |
90 | 90 | ||
91 | dev_mc_delete(c->dev, c->clustermac, ETH_ALEN, 0); | 91 | dev_mc_del(c->dev, c->clustermac); |
92 | dev_put(c->dev); | 92 | dev_put(c->dev); |
93 | 93 | ||
94 | /* In case anyone still accesses the file, the open/close | 94 | /* In case anyone still accesses the file, the open/close |
@@ -397,7 +397,7 @@ static bool clusterip_tg_check(const struct xt_tgchk_param *par) | |||
397 | dev_put(dev); | 397 | dev_put(dev); |
398 | return false; | 398 | return false; |
399 | } | 399 | } |
400 | dev_mc_add(config->dev,config->clustermac, ETH_ALEN, 0); | 400 | dev_mc_add(config->dev, config->clustermac); |
401 | } | 401 | } |
402 | } | 402 | } |
403 | cipinfo->config = config; | 403 | cipinfo->config = config; |
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 4f1f337f4337..3dc9914c1dce 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c | |||
@@ -251,6 +251,7 @@ static const struct snmp_mib snmp4_net_list[] = { | |||
251 | SNMP_MIB_ITEM("TCPSackShiftFallback", LINUX_MIB_SACKSHIFTFALLBACK), | 251 | SNMP_MIB_ITEM("TCPSackShiftFallback", LINUX_MIB_SACKSHIFTFALLBACK), |
252 | SNMP_MIB_ITEM("TCPBacklogDrop", LINUX_MIB_TCPBACKLOGDROP), | 252 | SNMP_MIB_ITEM("TCPBacklogDrop", LINUX_MIB_TCPBACKLOGDROP), |
253 | SNMP_MIB_ITEM("TCPMinTTLDrop", LINUX_MIB_TCPMINTTLDROP), | 253 | SNMP_MIB_ITEM("TCPMinTTLDrop", LINUX_MIB_TCPMINTTLDROP), |
254 | SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP), | ||
254 | SNMP_MIB_SENTINEL | 255 | SNMP_MIB_SENTINEL |
255 | }; | 256 | }; |
256 | 257 | ||
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index f240f57b2199..4000b10610b7 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -4319,7 +4319,7 @@ static void tcp_ofo_queue(struct sock *sk) | |||
4319 | } | 4319 | } |
4320 | 4320 | ||
4321 | if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { | 4321 | if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { |
4322 | SOCK_DEBUG(sk, "ofo packet was already received \n"); | 4322 | SOCK_DEBUG(sk, "ofo packet was already received\n"); |
4323 | __skb_unlink(skb, &tp->out_of_order_queue); | 4323 | __skb_unlink(skb, &tp->out_of_order_queue); |
4324 | __kfree_skb(skb); | 4324 | __kfree_skb(skb); |
4325 | continue; | 4325 | continue; |
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 5fabff9ac6d6..794c2e122a41 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -672,6 +672,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, | |||
672 | if (req->retrans < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && | 672 | if (req->retrans < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && |
673 | TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { | 673 | TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { |
674 | inet_rsk(req)->acked = 1; | 674 | inet_rsk(req)->acked = 1; |
675 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP); | ||
675 | return NULL; | 676 | return NULL; |
676 | } | 677 | } |
677 | 678 | ||
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index e4a1483fba77..1705476670ef 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c | |||
@@ -59,27 +59,6 @@ static int xfrm4_get_saddr(struct net *net, | |||
59 | return 0; | 59 | return 0; |
60 | } | 60 | } |
61 | 61 | ||
62 | static struct dst_entry * | ||
63 | __xfrm4_find_bundle(struct flowi *fl, struct xfrm_policy *policy) | ||
64 | { | ||
65 | struct dst_entry *dst; | ||
66 | |||
67 | read_lock_bh(&policy->lock); | ||
68 | for (dst = policy->bundles; dst; dst = dst->next) { | ||
69 | struct xfrm_dst *xdst = (struct xfrm_dst *)dst; | ||
70 | if (xdst->u.rt.fl.oif == fl->oif && /*XXX*/ | ||
71 | xdst->u.rt.fl.fl4_dst == fl->fl4_dst && | ||
72 | xdst->u.rt.fl.fl4_src == fl->fl4_src && | ||
73 | xdst->u.rt.fl.fl4_tos == fl->fl4_tos && | ||
74 | xfrm_bundle_ok(policy, xdst, fl, AF_INET, 0)) { | ||
75 | dst_clone(dst); | ||
76 | break; | ||
77 | } | ||
78 | } | ||
79 | read_unlock_bh(&policy->lock); | ||
80 | return dst; | ||
81 | } | ||
82 | |||
83 | static int xfrm4_get_tos(struct flowi *fl) | 62 | static int xfrm4_get_tos(struct flowi *fl) |
84 | { | 63 | { |
85 | return fl->fl4_tos; | 64 | return fl->fl4_tos; |
@@ -259,7 +238,6 @@ static struct xfrm_policy_afinfo xfrm4_policy_afinfo = { | |||
259 | .dst_ops = &xfrm4_dst_ops, | 238 | .dst_ops = &xfrm4_dst_ops, |
260 | .dst_lookup = xfrm4_dst_lookup, | 239 | .dst_lookup = xfrm4_dst_lookup, |
261 | .get_saddr = xfrm4_get_saddr, | 240 | .get_saddr = xfrm4_get_saddr, |
262 | .find_bundle = __xfrm4_find_bundle, | ||
263 | .decode_session = _decode_session4, | 241 | .decode_session = _decode_session4, |
264 | .get_tos = xfrm4_get_tos, | 242 | .get_tos = xfrm4_get_tos, |
265 | .init_path = xfrm4_init_path, | 243 | .init_path = xfrm4_init_path, |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 413054f02aab..1b00bfef268e 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -82,7 +82,7 @@ | |||
82 | #include <linux/random.h> | 82 | #include <linux/random.h> |
83 | #endif | 83 | #endif |
84 | 84 | ||
85 | #include <asm/uaccess.h> | 85 | #include <linux/uaccess.h> |
86 | #include <asm/unaligned.h> | 86 | #include <asm/unaligned.h> |
87 | 87 | ||
88 | #include <linux/proc_fs.h> | 88 | #include <linux/proc_fs.h> |
@@ -98,7 +98,11 @@ | |||
98 | #endif | 98 | #endif |
99 | 99 | ||
100 | #define INFINITY_LIFE_TIME 0xFFFFFFFF | 100 | #define INFINITY_LIFE_TIME 0xFFFFFFFF |
101 | #define TIME_DELTA(a,b) ((unsigned long)((long)(a) - (long)(b))) | 101 | #define TIME_DELTA(a, b) ((unsigned long)((long)(a) - (long)(b))) |
102 | |||
103 | #define ADDRCONF_TIMER_FUZZ_MINUS (HZ > 50 ? HZ/50 : 1) | ||
104 | #define ADDRCONF_TIMER_FUZZ (HZ / 4) | ||
105 | #define ADDRCONF_TIMER_FUZZ_MAX (HZ) | ||
102 | 106 | ||
103 | #ifdef CONFIG_SYSCTL | 107 | #ifdef CONFIG_SYSCTL |
104 | static void addrconf_sysctl_register(struct inet6_dev *idev); | 108 | static void addrconf_sysctl_register(struct inet6_dev *idev); |
@@ -127,8 +131,8 @@ static int ipv6_count_addresses(struct inet6_dev *idev); | |||
127 | /* | 131 | /* |
128 | * Configured unicast address hash table | 132 | * Configured unicast address hash table |
129 | */ | 133 | */ |
130 | static struct inet6_ifaddr *inet6_addr_lst[IN6_ADDR_HSIZE]; | 134 | static struct hlist_head inet6_addr_lst[IN6_ADDR_HSIZE]; |
131 | static DEFINE_RWLOCK(addrconf_hash_lock); | 135 | static DEFINE_SPINLOCK(addrconf_hash_lock); |
132 | 136 | ||
133 | static void addrconf_verify(unsigned long); | 137 | static void addrconf_verify(unsigned long); |
134 | 138 | ||
@@ -138,8 +142,8 @@ static DEFINE_SPINLOCK(addrconf_verify_lock); | |||
138 | static void addrconf_join_anycast(struct inet6_ifaddr *ifp); | 142 | static void addrconf_join_anycast(struct inet6_ifaddr *ifp); |
139 | static void addrconf_leave_anycast(struct inet6_ifaddr *ifp); | 143 | static void addrconf_leave_anycast(struct inet6_ifaddr *ifp); |
140 | 144 | ||
141 | static void addrconf_bonding_change(struct net_device *dev, | 145 | static void addrconf_type_change(struct net_device *dev, |
142 | unsigned long event); | 146 | unsigned long event); |
143 | static int addrconf_ifdown(struct net_device *dev, int how); | 147 | static int addrconf_ifdown(struct net_device *dev, int how); |
144 | 148 | ||
145 | static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags); | 149 | static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags); |
@@ -152,8 +156,8 @@ static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa); | |||
152 | 156 | ||
153 | static void inet6_prefix_notify(int event, struct inet6_dev *idev, | 157 | static void inet6_prefix_notify(int event, struct inet6_dev *idev, |
154 | struct prefix_info *pinfo); | 158 | struct prefix_info *pinfo); |
155 | static int ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr, | 159 | static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr, |
156 | struct net_device *dev); | 160 | struct net_device *dev); |
157 | 161 | ||
158 | static ATOMIC_NOTIFIER_HEAD(inet6addr_chain); | 162 | static ATOMIC_NOTIFIER_HEAD(inet6addr_chain); |
159 | 163 | ||
@@ -250,8 +254,7 @@ static void addrconf_del_timer(struct inet6_ifaddr *ifp) | |||
250 | __in6_ifa_put(ifp); | 254 | __in6_ifa_put(ifp); |
251 | } | 255 | } |
252 | 256 | ||
253 | enum addrconf_timer_t | 257 | enum addrconf_timer_t { |
254 | { | ||
255 | AC_NONE, | 258 | AC_NONE, |
256 | AC_DAD, | 259 | AC_DAD, |
257 | AC_RS, | 260 | AC_RS, |
@@ -271,7 +274,8 @@ static void addrconf_mod_timer(struct inet6_ifaddr *ifp, | |||
271 | case AC_RS: | 274 | case AC_RS: |
272 | ifp->timer.function = addrconf_rs_timer; | 275 | ifp->timer.function = addrconf_rs_timer; |
273 | break; | 276 | break; |
274 | default:; | 277 | default: |
278 | break; | ||
275 | } | 279 | } |
276 | ifp->timer.expires = jiffies + when; | 280 | ifp->timer.expires = jiffies + when; |
277 | add_timer(&ifp->timer); | 281 | add_timer(&ifp->timer); |
@@ -318,7 +322,7 @@ void in6_dev_finish_destroy(struct inet6_dev *idev) | |||
318 | { | 322 | { |
319 | struct net_device *dev = idev->dev; | 323 | struct net_device *dev = idev->dev; |
320 | 324 | ||
321 | WARN_ON(idev->addr_list != NULL); | 325 | WARN_ON(!list_empty(&idev->addr_list)); |
322 | WARN_ON(idev->mc_list != NULL); | 326 | WARN_ON(idev->mc_list != NULL); |
323 | 327 | ||
324 | #ifdef NET_REFCNT_DEBUG | 328 | #ifdef NET_REFCNT_DEBUG |
@@ -326,7 +330,7 @@ void in6_dev_finish_destroy(struct inet6_dev *idev) | |||
326 | #endif | 330 | #endif |
327 | dev_put(dev); | 331 | dev_put(dev); |
328 | if (!idev->dead) { | 332 | if (!idev->dead) { |
329 | printk("Freeing alive inet6 device %p\n", idev); | 333 | pr_warning("Freeing alive inet6 device %p\n", idev); |
330 | return; | 334 | return; |
331 | } | 335 | } |
332 | snmp6_free_dev(idev); | 336 | snmp6_free_dev(idev); |
@@ -351,6 +355,8 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev) | |||
351 | 355 | ||
352 | rwlock_init(&ndev->lock); | 356 | rwlock_init(&ndev->lock); |
353 | ndev->dev = dev; | 357 | ndev->dev = dev; |
358 | INIT_LIST_HEAD(&ndev->addr_list); | ||
359 | |||
354 | memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf)); | 360 | memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf)); |
355 | ndev->cnf.mtu6 = dev->mtu; | 361 | ndev->cnf.mtu6 = dev->mtu; |
356 | ndev->cnf.sysctl = NULL; | 362 | ndev->cnf.sysctl = NULL; |
@@ -402,6 +408,7 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev) | |||
402 | #endif | 408 | #endif |
403 | 409 | ||
404 | #ifdef CONFIG_IPV6_PRIVACY | 410 | #ifdef CONFIG_IPV6_PRIVACY |
411 | INIT_LIST_HEAD(&ndev->tempaddr_list); | ||
405 | setup_timer(&ndev->regen_timer, ipv6_regen_rndid, (unsigned long)ndev); | 412 | setup_timer(&ndev->regen_timer, ipv6_regen_rndid, (unsigned long)ndev); |
406 | if ((dev->flags&IFF_LOOPBACK) || | 413 | if ((dev->flags&IFF_LOOPBACK) || |
407 | dev->type == ARPHRD_TUNNEL || | 414 | dev->type == ARPHRD_TUNNEL || |
@@ -439,8 +446,10 @@ static struct inet6_dev * ipv6_find_idev(struct net_device *dev) | |||
439 | 446 | ||
440 | ASSERT_RTNL(); | 447 | ASSERT_RTNL(); |
441 | 448 | ||
442 | if ((idev = __in6_dev_get(dev)) == NULL) { | 449 | idev = __in6_dev_get(dev); |
443 | if ((idev = ipv6_add_dev(dev)) == NULL) | 450 | if (!idev) { |
451 | idev = ipv6_add_dev(dev); | ||
452 | if (!idev) | ||
444 | return NULL; | 453 | return NULL; |
445 | } | 454 | } |
446 | 455 | ||
@@ -466,7 +475,8 @@ static void dev_forward_change(struct inet6_dev *idev) | |||
466 | else | 475 | else |
467 | ipv6_dev_mc_dec(dev, &in6addr_linklocal_allrouters); | 476 | ipv6_dev_mc_dec(dev, &in6addr_linklocal_allrouters); |
468 | } | 477 | } |
469 | for (ifa=idev->addr_list; ifa; ifa=ifa->if_next) { | 478 | |
479 | list_for_each_entry(ifa, &idev->addr_list, if_list) { | ||
470 | if (ifa->flags&IFA_F_TENTATIVE) | 480 | if (ifa->flags&IFA_F_TENTATIVE) |
471 | continue; | 481 | continue; |
472 | if (idev->cnf.forwarding) | 482 | if (idev->cnf.forwarding) |
@@ -523,12 +533,16 @@ static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old) | |||
523 | } | 533 | } |
524 | #endif | 534 | #endif |
525 | 535 | ||
526 | /* Nobody refers to this ifaddr, destroy it */ | 536 | static void inet6_ifa_finish_destroy_rcu(struct rcu_head *head) |
537 | { | ||
538 | struct inet6_ifaddr *ifp = container_of(head, struct inet6_ifaddr, rcu); | ||
539 | kfree(ifp); | ||
540 | } | ||
527 | 541 | ||
542 | /* Nobody refers to this ifaddr, destroy it */ | ||
528 | void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp) | 543 | void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp) |
529 | { | 544 | { |
530 | WARN_ON(ifp->if_next != NULL); | 545 | WARN_ON(!hlist_unhashed(&ifp->addr_lst)); |
531 | WARN_ON(ifp->lst_next != NULL); | ||
532 | 546 | ||
533 | #ifdef NET_REFCNT_DEBUG | 547 | #ifdef NET_REFCNT_DEBUG |
534 | printk(KERN_DEBUG "inet6_ifa_finish_destroy\n"); | 548 | printk(KERN_DEBUG "inet6_ifa_finish_destroy\n"); |
@@ -537,54 +551,45 @@ void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp) | |||
537 | in6_dev_put(ifp->idev); | 551 | in6_dev_put(ifp->idev); |
538 | 552 | ||
539 | if (del_timer(&ifp->timer)) | 553 | if (del_timer(&ifp->timer)) |
540 | printk("Timer is still running, when freeing ifa=%p\n", ifp); | 554 | pr_notice("Timer is still running, when freeing ifa=%p\n", ifp); |
541 | 555 | ||
542 | if (!ifp->dead) { | 556 | if (!ifp->dead) { |
543 | printk("Freeing alive inet6 address %p\n", ifp); | 557 | pr_warning("Freeing alive inet6 address %p\n", ifp); |
544 | return; | 558 | return; |
545 | } | 559 | } |
546 | dst_release(&ifp->rt->u.dst); | 560 | dst_release(&ifp->rt->u.dst); |
547 | 561 | ||
548 | kfree(ifp); | 562 | call_rcu(&ifp->rcu, inet6_ifa_finish_destroy_rcu); |
549 | } | 563 | } |
550 | 564 | ||
551 | static void | 565 | static void |
552 | ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp) | 566 | ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp) |
553 | { | 567 | { |
554 | struct inet6_ifaddr *ifa, **ifap; | 568 | struct list_head *p; |
555 | int ifp_scope = ipv6_addr_src_scope(&ifp->addr); | 569 | int ifp_scope = ipv6_addr_src_scope(&ifp->addr); |
556 | 570 | ||
557 | /* | 571 | /* |
558 | * Each device address list is sorted in order of scope - | 572 | * Each device address list is sorted in order of scope - |
559 | * global before linklocal. | 573 | * global before linklocal. |
560 | */ | 574 | */ |
561 | for (ifap = &idev->addr_list; (ifa = *ifap) != NULL; | 575 | list_for_each(p, &idev->addr_list) { |
562 | ifap = &ifa->if_next) { | 576 | struct inet6_ifaddr *ifa |
577 | = list_entry(p, struct inet6_ifaddr, if_list); | ||
563 | if (ifp_scope >= ipv6_addr_src_scope(&ifa->addr)) | 578 | if (ifp_scope >= ipv6_addr_src_scope(&ifa->addr)) |
564 | break; | 579 | break; |
565 | } | 580 | } |
566 | 581 | ||
567 | ifp->if_next = *ifap; | 582 | list_add_tail(&ifp->if_list, p); |
568 | *ifap = ifp; | ||
569 | } | 583 | } |
570 | 584 | ||
571 | /* | 585 | static u32 ipv6_addr_hash(const struct in6_addr *addr) |
572 | * Hash function taken from net_alias.c | ||
573 | */ | ||
574 | static u8 ipv6_addr_hash(const struct in6_addr *addr) | ||
575 | { | 586 | { |
576 | __u32 word; | ||
577 | |||
578 | /* | 587 | /* |
579 | * We perform the hash function over the last 64 bits of the address | 588 | * We perform the hash function over the last 64 bits of the address |
580 | * This will include the IEEE address token on links that support it. | 589 | * This will include the IEEE address token on links that support it. |
581 | */ | 590 | */ |
582 | 591 | return jhash_2words(addr->s6_addr32[2], addr->s6_addr32[3], 0) | |
583 | word = (__force u32)(addr->s6_addr32[2] ^ addr->s6_addr32[3]); | 592 | & (IN6_ADDR_HSIZE - 1); |
584 | word ^= (word >> 16); | ||
585 | word ^= (word >> 8); | ||
586 | |||
587 | return ((word ^ (word >> 4)) & 0x0f); | ||
588 | } | 593 | } |
589 | 594 | ||
590 | /* On success it returns ifp with increased reference count */ | 595 | /* On success it returns ifp with increased reference count */ |
@@ -595,7 +600,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen, | |||
595 | { | 600 | { |
596 | struct inet6_ifaddr *ifa = NULL; | 601 | struct inet6_ifaddr *ifa = NULL; |
597 | struct rt6_info *rt; | 602 | struct rt6_info *rt; |
598 | int hash; | 603 | unsigned int hash; |
599 | int err = 0; | 604 | int err = 0; |
600 | int addr_type = ipv6_addr_type(addr); | 605 | int addr_type = ipv6_addr_type(addr); |
601 | 606 | ||
@@ -616,7 +621,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen, | |||
616 | goto out2; | 621 | goto out2; |
617 | } | 622 | } |
618 | 623 | ||
619 | write_lock(&addrconf_hash_lock); | 624 | spin_lock(&addrconf_hash_lock); |
620 | 625 | ||
621 | /* Ignore adding duplicate addresses on an interface */ | 626 | /* Ignore adding duplicate addresses on an interface */ |
622 | if (ipv6_chk_same_addr(dev_net(idev->dev), addr, idev->dev)) { | 627 | if (ipv6_chk_same_addr(dev_net(idev->dev), addr, idev->dev)) { |
@@ -643,6 +648,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen, | |||
643 | 648 | ||
644 | spin_lock_init(&ifa->lock); | 649 | spin_lock_init(&ifa->lock); |
645 | init_timer(&ifa->timer); | 650 | init_timer(&ifa->timer); |
651 | INIT_HLIST_NODE(&ifa->addr_lst); | ||
646 | ifa->timer.data = (unsigned long) ifa; | 652 | ifa->timer.data = (unsigned long) ifa; |
647 | ifa->scope = scope; | 653 | ifa->scope = scope; |
648 | ifa->prefix_len = pfxlen; | 654 | ifa->prefix_len = pfxlen; |
@@ -669,10 +675,9 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen, | |||
669 | /* Add to big hash table */ | 675 | /* Add to big hash table */ |
670 | hash = ipv6_addr_hash(addr); | 676 | hash = ipv6_addr_hash(addr); |
671 | 677 | ||
672 | ifa->lst_next = inet6_addr_lst[hash]; | 678 | hlist_add_head_rcu(&ifa->addr_lst, &inet6_addr_lst[hash]); |
673 | inet6_addr_lst[hash] = ifa; | ||
674 | in6_ifa_hold(ifa); | 679 | in6_ifa_hold(ifa); |
675 | write_unlock(&addrconf_hash_lock); | 680 | spin_unlock(&addrconf_hash_lock); |
676 | 681 | ||
677 | write_lock(&idev->lock); | 682 | write_lock(&idev->lock); |
678 | /* Add to inet6_dev unicast addr list. */ | 683 | /* Add to inet6_dev unicast addr list. */ |
@@ -680,8 +685,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen, | |||
680 | 685 | ||
681 | #ifdef CONFIG_IPV6_PRIVACY | 686 | #ifdef CONFIG_IPV6_PRIVACY |
682 | if (ifa->flags&IFA_F_TEMPORARY) { | 687 | if (ifa->flags&IFA_F_TEMPORARY) { |
683 | ifa->tmp_next = idev->tempaddr_list; | 688 | list_add(&ifa->tmp_list, &idev->tempaddr_list); |
684 | idev->tempaddr_list = ifa; | ||
685 | in6_ifa_hold(ifa); | 689 | in6_ifa_hold(ifa); |
686 | } | 690 | } |
687 | #endif | 691 | #endif |
@@ -700,7 +704,7 @@ out2: | |||
700 | 704 | ||
701 | return ifa; | 705 | return ifa; |
702 | out: | 706 | out: |
703 | write_unlock(&addrconf_hash_lock); | 707 | spin_unlock(&addrconf_hash_lock); |
704 | goto out2; | 708 | goto out2; |
705 | } | 709 | } |
706 | 710 | ||
@@ -708,7 +712,7 @@ out: | |||
708 | 712 | ||
709 | static void ipv6_del_addr(struct inet6_ifaddr *ifp) | 713 | static void ipv6_del_addr(struct inet6_ifaddr *ifp) |
710 | { | 714 | { |
711 | struct inet6_ifaddr *ifa, **ifap; | 715 | struct inet6_ifaddr *ifa, *ifn; |
712 | struct inet6_dev *idev = ifp->idev; | 716 | struct inet6_dev *idev = ifp->idev; |
713 | int hash; | 717 | int hash; |
714 | int deleted = 0, onlink = 0; | 718 | int deleted = 0, onlink = 0; |
@@ -718,42 +722,28 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp) | |||
718 | 722 | ||
719 | ifp->dead = 1; | 723 | ifp->dead = 1; |
720 | 724 | ||
721 | write_lock_bh(&addrconf_hash_lock); | 725 | spin_lock_bh(&addrconf_hash_lock); |
722 | for (ifap = &inet6_addr_lst[hash]; (ifa=*ifap) != NULL; | 726 | hlist_del_init_rcu(&ifp->addr_lst); |
723 | ifap = &ifa->lst_next) { | 727 | __in6_ifa_put(ifp); |
724 | if (ifa == ifp) { | 728 | spin_unlock_bh(&addrconf_hash_lock); |
725 | *ifap = ifa->lst_next; | ||
726 | __in6_ifa_put(ifp); | ||
727 | ifa->lst_next = NULL; | ||
728 | break; | ||
729 | } | ||
730 | } | ||
731 | write_unlock_bh(&addrconf_hash_lock); | ||
732 | 729 | ||
733 | write_lock_bh(&idev->lock); | 730 | write_lock_bh(&idev->lock); |
734 | #ifdef CONFIG_IPV6_PRIVACY | 731 | #ifdef CONFIG_IPV6_PRIVACY |
735 | if (ifp->flags&IFA_F_TEMPORARY) { | 732 | if (ifp->flags&IFA_F_TEMPORARY) { |
736 | for (ifap = &idev->tempaddr_list; (ifa=*ifap) != NULL; | 733 | list_del(&ifp->tmp_list); |
737 | ifap = &ifa->tmp_next) { | 734 | if (ifp->ifpub) { |
738 | if (ifa == ifp) { | 735 | in6_ifa_put(ifp->ifpub); |
739 | *ifap = ifa->tmp_next; | 736 | ifp->ifpub = NULL; |
740 | if (ifp->ifpub) { | ||
741 | in6_ifa_put(ifp->ifpub); | ||
742 | ifp->ifpub = NULL; | ||
743 | } | ||
744 | __in6_ifa_put(ifp); | ||
745 | ifa->tmp_next = NULL; | ||
746 | break; | ||
747 | } | ||
748 | } | 737 | } |
738 | __in6_ifa_put(ifp); | ||
749 | } | 739 | } |
750 | #endif | 740 | #endif |
751 | 741 | ||
752 | for (ifap = &idev->addr_list; (ifa=*ifap) != NULL;) { | 742 | list_for_each_entry_safe(ifa, ifn, &idev->addr_list, if_list) { |
753 | if (ifa == ifp) { | 743 | if (ifa == ifp) { |
754 | *ifap = ifa->if_next; | 744 | list_del_init(&ifp->if_list); |
755 | __in6_ifa_put(ifp); | 745 | __in6_ifa_put(ifp); |
756 | ifa->if_next = NULL; | 746 | |
757 | if (!(ifp->flags & IFA_F_PERMANENT) || onlink > 0) | 747 | if (!(ifp->flags & IFA_F_PERMANENT) || onlink > 0) |
758 | break; | 748 | break; |
759 | deleted = 1; | 749 | deleted = 1; |
@@ -786,7 +776,6 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp) | |||
786 | } | 776 | } |
787 | } | 777 | } |
788 | } | 778 | } |
789 | ifap = &ifa->if_next; | ||
790 | } | 779 | } |
791 | write_unlock_bh(&idev->lock); | 780 | write_unlock_bh(&idev->lock); |
792 | 781 | ||
@@ -1165,7 +1154,7 @@ int ipv6_dev_get_saddr(struct net *net, struct net_device *dst_dev, | |||
1165 | continue; | 1154 | continue; |
1166 | 1155 | ||
1167 | read_lock_bh(&idev->lock); | 1156 | read_lock_bh(&idev->lock); |
1168 | for (score->ifa = idev->addr_list; score->ifa; score->ifa = score->ifa->if_next) { | 1157 | list_for_each_entry(score->ifa, &idev->addr_list, if_list) { |
1169 | int i; | 1158 | int i; |
1170 | 1159 | ||
1171 | /* | 1160 | /* |
@@ -1243,7 +1232,6 @@ try_nextdev: | |||
1243 | in6_ifa_put(hiscore->ifa); | 1232 | in6_ifa_put(hiscore->ifa); |
1244 | return 0; | 1233 | return 0; |
1245 | } | 1234 | } |
1246 | |||
1247 | EXPORT_SYMBOL(ipv6_dev_get_saddr); | 1235 | EXPORT_SYMBOL(ipv6_dev_get_saddr); |
1248 | 1236 | ||
1249 | int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr, | 1237 | int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr, |
@@ -1253,12 +1241,14 @@ int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr, | |||
1253 | int err = -EADDRNOTAVAIL; | 1241 | int err = -EADDRNOTAVAIL; |
1254 | 1242 | ||
1255 | rcu_read_lock(); | 1243 | rcu_read_lock(); |
1256 | if ((idev = __in6_dev_get(dev)) != NULL) { | 1244 | idev = __in6_dev_get(dev); |
1245 | if (idev) { | ||
1257 | struct inet6_ifaddr *ifp; | 1246 | struct inet6_ifaddr *ifp; |
1258 | 1247 | ||
1259 | read_lock_bh(&idev->lock); | 1248 | read_lock_bh(&idev->lock); |
1260 | for (ifp=idev->addr_list; ifp; ifp=ifp->if_next) { | 1249 | list_for_each_entry(ifp, &idev->addr_list, if_list) { |
1261 | if (ifp->scope == IFA_LINK && !(ifp->flags & banned_flags)) { | 1250 | if (ifp->scope == IFA_LINK && |
1251 | !(ifp->flags & banned_flags)) { | ||
1262 | ipv6_addr_copy(addr, &ifp->addr); | 1252 | ipv6_addr_copy(addr, &ifp->addr); |
1263 | err = 0; | 1253 | err = 0; |
1264 | break; | 1254 | break; |
@@ -1276,7 +1266,7 @@ static int ipv6_count_addresses(struct inet6_dev *idev) | |||
1276 | struct inet6_ifaddr *ifp; | 1266 | struct inet6_ifaddr *ifp; |
1277 | 1267 | ||
1278 | read_lock_bh(&idev->lock); | 1268 | read_lock_bh(&idev->lock); |
1279 | for (ifp=idev->addr_list; ifp; ifp=ifp->if_next) | 1269 | list_for_each_entry(ifp, &idev->addr_list, if_list) |
1280 | cnt++; | 1270 | cnt++; |
1281 | read_unlock_bh(&idev->lock); | 1271 | read_unlock_bh(&idev->lock); |
1282 | return cnt; | 1272 | return cnt; |
@@ -1285,11 +1275,12 @@ static int ipv6_count_addresses(struct inet6_dev *idev) | |||
1285 | int ipv6_chk_addr(struct net *net, struct in6_addr *addr, | 1275 | int ipv6_chk_addr(struct net *net, struct in6_addr *addr, |
1286 | struct net_device *dev, int strict) | 1276 | struct net_device *dev, int strict) |
1287 | { | 1277 | { |
1288 | struct inet6_ifaddr * ifp; | 1278 | struct inet6_ifaddr *ifp = NULL; |
1289 | u8 hash = ipv6_addr_hash(addr); | 1279 | struct hlist_node *node; |
1280 | unsigned int hash = ipv6_addr_hash(addr); | ||
1290 | 1281 | ||
1291 | read_lock_bh(&addrconf_hash_lock); | 1282 | rcu_read_lock_bh(); |
1292 | for(ifp = inet6_addr_lst[hash]; ifp; ifp=ifp->lst_next) { | 1283 | hlist_for_each_entry_rcu(ifp, node, &inet6_addr_lst[hash], addr_lst) { |
1293 | if (!net_eq(dev_net(ifp->idev->dev), net)) | 1284 | if (!net_eq(dev_net(ifp->idev->dev), net)) |
1294 | continue; | 1285 | continue; |
1295 | if (ipv6_addr_equal(&ifp->addr, addr) && | 1286 | if (ipv6_addr_equal(&ifp->addr, addr) && |
@@ -1299,27 +1290,28 @@ int ipv6_chk_addr(struct net *net, struct in6_addr *addr, | |||
1299 | break; | 1290 | break; |
1300 | } | 1291 | } |
1301 | } | 1292 | } |
1302 | read_unlock_bh(&addrconf_hash_lock); | 1293 | rcu_read_unlock_bh(); |
1294 | |||
1303 | return ifp != NULL; | 1295 | return ifp != NULL; |
1304 | } | 1296 | } |
1305 | EXPORT_SYMBOL(ipv6_chk_addr); | 1297 | EXPORT_SYMBOL(ipv6_chk_addr); |
1306 | 1298 | ||
1307 | static | 1299 | static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr, |
1308 | int ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr, | 1300 | struct net_device *dev) |
1309 | struct net_device *dev) | ||
1310 | { | 1301 | { |
1311 | struct inet6_ifaddr * ifp; | 1302 | unsigned int hash = ipv6_addr_hash(addr); |
1312 | u8 hash = ipv6_addr_hash(addr); | 1303 | struct inet6_ifaddr *ifp; |
1304 | struct hlist_node *node; | ||
1313 | 1305 | ||
1314 | for(ifp = inet6_addr_lst[hash]; ifp; ifp=ifp->lst_next) { | 1306 | hlist_for_each_entry(ifp, node, &inet6_addr_lst[hash], addr_lst) { |
1315 | if (!net_eq(dev_net(ifp->idev->dev), net)) | 1307 | if (!net_eq(dev_net(ifp->idev->dev), net)) |
1316 | continue; | 1308 | continue; |
1317 | if (ipv6_addr_equal(&ifp->addr, addr)) { | 1309 | if (ipv6_addr_equal(&ifp->addr, addr)) { |
1318 | if (dev == NULL || ifp->idev->dev == dev) | 1310 | if (dev == NULL || ifp->idev->dev == dev) |
1319 | break; | 1311 | return true; |
1320 | } | 1312 | } |
1321 | } | 1313 | } |
1322 | return ifp != NULL; | 1314 | return false; |
1323 | } | 1315 | } |
1324 | 1316 | ||
1325 | int ipv6_chk_prefix(struct in6_addr *addr, struct net_device *dev) | 1317 | int ipv6_chk_prefix(struct in6_addr *addr, struct net_device *dev) |
@@ -1333,7 +1325,7 @@ int ipv6_chk_prefix(struct in6_addr *addr, struct net_device *dev) | |||
1333 | idev = __in6_dev_get(dev); | 1325 | idev = __in6_dev_get(dev); |
1334 | if (idev) { | 1326 | if (idev) { |
1335 | read_lock_bh(&idev->lock); | 1327 | read_lock_bh(&idev->lock); |
1336 | for (ifa = idev->addr_list; ifa; ifa = ifa->if_next) { | 1328 | list_for_each_entry(ifa, &idev->addr_list, if_list) { |
1337 | onlink = ipv6_prefix_equal(addr, &ifa->addr, | 1329 | onlink = ipv6_prefix_equal(addr, &ifa->addr, |
1338 | ifa->prefix_len); | 1330 | ifa->prefix_len); |
1339 | if (onlink) | 1331 | if (onlink) |
@@ -1350,24 +1342,26 @@ EXPORT_SYMBOL(ipv6_chk_prefix); | |||
1350 | struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr, | 1342 | struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr, |
1351 | struct net_device *dev, int strict) | 1343 | struct net_device *dev, int strict) |
1352 | { | 1344 | { |
1353 | struct inet6_ifaddr * ifp; | 1345 | struct inet6_ifaddr *ifp, *result = NULL; |
1354 | u8 hash = ipv6_addr_hash(addr); | 1346 | unsigned int hash = ipv6_addr_hash(addr); |
1347 | struct hlist_node *node; | ||
1355 | 1348 | ||
1356 | read_lock_bh(&addrconf_hash_lock); | 1349 | rcu_read_lock_bh(); |
1357 | for(ifp = inet6_addr_lst[hash]; ifp; ifp=ifp->lst_next) { | 1350 | hlist_for_each_entry_rcu(ifp, node, &inet6_addr_lst[hash], addr_lst) { |
1358 | if (!net_eq(dev_net(ifp->idev->dev), net)) | 1351 | if (!net_eq(dev_net(ifp->idev->dev), net)) |
1359 | continue; | 1352 | continue; |
1360 | if (ipv6_addr_equal(&ifp->addr, addr)) { | 1353 | if (ipv6_addr_equal(&ifp->addr, addr)) { |
1361 | if (dev == NULL || ifp->idev->dev == dev || | 1354 | if (dev == NULL || ifp->idev->dev == dev || |
1362 | !(ifp->scope&(IFA_LINK|IFA_HOST) || strict)) { | 1355 | !(ifp->scope&(IFA_LINK|IFA_HOST) || strict)) { |
1356 | result = ifp; | ||
1363 | in6_ifa_hold(ifp); | 1357 | in6_ifa_hold(ifp); |
1364 | break; | 1358 | break; |
1365 | } | 1359 | } |
1366 | } | 1360 | } |
1367 | } | 1361 | } |
1368 | read_unlock_bh(&addrconf_hash_lock); | 1362 | rcu_read_unlock_bh(); |
1369 | 1363 | ||
1370 | return ifp; | 1364 | return result; |
1371 | } | 1365 | } |
1372 | 1366 | ||
1373 | /* Gets referenced address, destroys ifaddr */ | 1367 | /* Gets referenced address, destroys ifaddr */ |
@@ -1570,7 +1564,7 @@ static int ipv6_inherit_eui64(u8 *eui, struct inet6_dev *idev) | |||
1570 | struct inet6_ifaddr *ifp; | 1564 | struct inet6_ifaddr *ifp; |
1571 | 1565 | ||
1572 | read_lock_bh(&idev->lock); | 1566 | read_lock_bh(&idev->lock); |
1573 | for (ifp=idev->addr_list; ifp; ifp=ifp->if_next) { | 1567 | list_for_each_entry(ifp, &idev->addr_list, if_list) { |
1574 | if (ifp->scope == IFA_LINK && !(ifp->flags&IFA_F_TENTATIVE)) { | 1568 | if (ifp->scope == IFA_LINK && !(ifp->flags&IFA_F_TENTATIVE)) { |
1575 | memcpy(eui, ifp->addr.s6_addr+8, 8); | 1569 | memcpy(eui, ifp->addr.s6_addr+8, 8); |
1576 | err = 0; | 1570 | err = 0; |
@@ -1738,7 +1732,8 @@ static struct inet6_dev *addrconf_add_dev(struct net_device *dev) | |||
1738 | 1732 | ||
1739 | ASSERT_RTNL(); | 1733 | ASSERT_RTNL(); |
1740 | 1734 | ||
1741 | if ((idev = ipv6_find_idev(dev)) == NULL) | 1735 | idev = ipv6_find_idev(dev); |
1736 | if (!idev) | ||
1742 | return NULL; | 1737 | return NULL; |
1743 | 1738 | ||
1744 | /* Add default multicast route */ | 1739 | /* Add default multicast route */ |
@@ -1971,7 +1966,7 @@ ok: | |||
1971 | #ifdef CONFIG_IPV6_PRIVACY | 1966 | #ifdef CONFIG_IPV6_PRIVACY |
1972 | read_lock_bh(&in6_dev->lock); | 1967 | read_lock_bh(&in6_dev->lock); |
1973 | /* update all temporary addresses in the list */ | 1968 | /* update all temporary addresses in the list */ |
1974 | for (ift=in6_dev->tempaddr_list; ift; ift=ift->tmp_next) { | 1969 | list_for_each_entry(ift, &in6_dev->tempaddr_list, tmp_list) { |
1975 | /* | 1970 | /* |
1976 | * When adjusting the lifetimes of an existing | 1971 | * When adjusting the lifetimes of an existing |
1977 | * temporary address, only lower the lifetimes. | 1972 | * temporary address, only lower the lifetimes. |
@@ -2174,7 +2169,7 @@ static int inet6_addr_del(struct net *net, int ifindex, struct in6_addr *pfx, | |||
2174 | return -ENXIO; | 2169 | return -ENXIO; |
2175 | 2170 | ||
2176 | read_lock_bh(&idev->lock); | 2171 | read_lock_bh(&idev->lock); |
2177 | for (ifp = idev->addr_list; ifp; ifp=ifp->if_next) { | 2172 | list_for_each_entry(ifp, &idev->addr_list, if_list) { |
2178 | if (ifp->prefix_len == plen && | 2173 | if (ifp->prefix_len == plen && |
2179 | ipv6_addr_equal(pfx, &ifp->addr)) { | 2174 | ipv6_addr_equal(pfx, &ifp->addr)) { |
2180 | in6_ifa_hold(ifp); | 2175 | in6_ifa_hold(ifp); |
@@ -2185,7 +2180,7 @@ static int inet6_addr_del(struct net *net, int ifindex, struct in6_addr *pfx, | |||
2185 | /* If the last address is deleted administratively, | 2180 | /* If the last address is deleted administratively, |
2186 | disable IPv6 on this interface. | 2181 | disable IPv6 on this interface. |
2187 | */ | 2182 | */ |
2188 | if (idev->addr_list == NULL) | 2183 | if (list_empty(&idev->addr_list)) |
2189 | addrconf_ifdown(idev->dev, 1); | 2184 | addrconf_ifdown(idev->dev, 1); |
2190 | return 0; | 2185 | return 0; |
2191 | } | 2186 | } |
@@ -2446,7 +2441,8 @@ static void addrconf_ip6_tnl_config(struct net_device *dev) | |||
2446 | 2441 | ||
2447 | ASSERT_RTNL(); | 2442 | ASSERT_RTNL(); |
2448 | 2443 | ||
2449 | if ((idev = addrconf_add_dev(dev)) == NULL) { | 2444 | idev = addrconf_add_dev(dev); |
2445 | if (!idev) { | ||
2450 | printk(KERN_DEBUG "init ip6-ip6: add_dev failed\n"); | 2446 | printk(KERN_DEBUG "init ip6-ip6: add_dev failed\n"); |
2451 | return; | 2447 | return; |
2452 | } | 2448 | } |
@@ -2461,7 +2457,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
2461 | int run_pending = 0; | 2457 | int run_pending = 0; |
2462 | int err; | 2458 | int err; |
2463 | 2459 | ||
2464 | switch(event) { | 2460 | switch (event) { |
2465 | case NETDEV_REGISTER: | 2461 | case NETDEV_REGISTER: |
2466 | if (!idev && dev->mtu >= IPV6_MIN_MTU) { | 2462 | if (!idev && dev->mtu >= IPV6_MIN_MTU) { |
2467 | idev = ipv6_add_dev(dev); | 2463 | idev = ipv6_add_dev(dev); |
@@ -2469,6 +2465,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
2469 | return notifier_from_errno(-ENOMEM); | 2465 | return notifier_from_errno(-ENOMEM); |
2470 | } | 2466 | } |
2471 | break; | 2467 | break; |
2468 | |||
2472 | case NETDEV_UP: | 2469 | case NETDEV_UP: |
2473 | case NETDEV_CHANGE: | 2470 | case NETDEV_CHANGE: |
2474 | if (dev->flags & IFF_SLAVE) | 2471 | if (dev->flags & IFF_SLAVE) |
@@ -2498,10 +2495,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
2498 | } | 2495 | } |
2499 | 2496 | ||
2500 | if (idev) { | 2497 | if (idev) { |
2501 | if (idev->if_flags & IF_READY) { | 2498 | if (idev->if_flags & IF_READY) |
2502 | /* device is already configured. */ | 2499 | /* device is already configured. */ |
2503 | break; | 2500 | break; |
2504 | } | ||
2505 | idev->if_flags |= IF_READY; | 2501 | idev->if_flags |= IF_READY; |
2506 | } | 2502 | } |
2507 | 2503 | ||
@@ -2513,7 +2509,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
2513 | run_pending = 1; | 2509 | run_pending = 1; |
2514 | } | 2510 | } |
2515 | 2511 | ||
2516 | switch(dev->type) { | 2512 | switch (dev->type) { |
2517 | #if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE) | 2513 | #if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE) |
2518 | case ARPHRD_SIT: | 2514 | case ARPHRD_SIT: |
2519 | addrconf_sit_config(dev); | 2515 | addrconf_sit_config(dev); |
@@ -2530,25 +2526,30 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
2530 | addrconf_dev_config(dev); | 2526 | addrconf_dev_config(dev); |
2531 | break; | 2527 | break; |
2532 | } | 2528 | } |
2529 | |||
2533 | if (idev) { | 2530 | if (idev) { |
2534 | if (run_pending) | 2531 | if (run_pending) |
2535 | addrconf_dad_run(idev); | 2532 | addrconf_dad_run(idev); |
2536 | 2533 | ||
2537 | /* If the MTU changed during the interface down, when the | 2534 | /* |
2538 | interface up, the changed MTU must be reflected in the | 2535 | * If the MTU changed during the interface down, |
2539 | idev as well as routers. | 2536 | * when the interface up, the changed MTU must be |
2537 | * reflected in the idev as well as routers. | ||
2540 | */ | 2538 | */ |
2541 | if (idev->cnf.mtu6 != dev->mtu && dev->mtu >= IPV6_MIN_MTU) { | 2539 | if (idev->cnf.mtu6 != dev->mtu && |
2540 | dev->mtu >= IPV6_MIN_MTU) { | ||
2542 | rt6_mtu_change(dev, dev->mtu); | 2541 | rt6_mtu_change(dev, dev->mtu); |
2543 | idev->cnf.mtu6 = dev->mtu; | 2542 | idev->cnf.mtu6 = dev->mtu; |
2544 | } | 2543 | } |
2545 | idev->tstamp = jiffies; | 2544 | idev->tstamp = jiffies; |
2546 | inet6_ifinfo_notify(RTM_NEWLINK, idev); | 2545 | inet6_ifinfo_notify(RTM_NEWLINK, idev); |
2547 | /* If the changed mtu during down is lower than IPV6_MIN_MTU | 2546 | |
2548 | stop IPv6 on this interface. | 2547 | /* |
2548 | * If the changed mtu during down is lower than | ||
2549 | * IPV6_MIN_MTU stop IPv6 on this interface. | ||
2549 | */ | 2550 | */ |
2550 | if (dev->mtu < IPV6_MIN_MTU) | 2551 | if (dev->mtu < IPV6_MIN_MTU) |
2551 | addrconf_ifdown(dev, event != NETDEV_DOWN); | 2552 | addrconf_ifdown(dev, 1); |
2552 | } | 2553 | } |
2553 | break; | 2554 | break; |
2554 | 2555 | ||
@@ -2565,7 +2566,10 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
2565 | break; | 2566 | break; |
2566 | } | 2567 | } |
2567 | 2568 | ||
2568 | /* MTU falled under IPV6_MIN_MTU. Stop IPv6 on this interface. */ | 2569 | /* |
2570 | * MTU falled under IPV6_MIN_MTU. | ||
2571 | * Stop IPv6 on this interface. | ||
2572 | */ | ||
2569 | 2573 | ||
2570 | case NETDEV_DOWN: | 2574 | case NETDEV_DOWN: |
2571 | case NETDEV_UNREGISTER: | 2575 | case NETDEV_UNREGISTER: |
@@ -2585,9 +2589,10 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
2585 | return notifier_from_errno(err); | 2589 | return notifier_from_errno(err); |
2586 | } | 2590 | } |
2587 | break; | 2591 | break; |
2588 | case NETDEV_BONDING_OLDTYPE: | 2592 | |
2589 | case NETDEV_BONDING_NEWTYPE: | 2593 | case NETDEV_PRE_TYPE_CHANGE: |
2590 | addrconf_bonding_change(dev, event); | 2594 | case NETDEV_POST_TYPE_CHANGE: |
2595 | addrconf_type_change(dev, event); | ||
2591 | break; | 2596 | break; |
2592 | } | 2597 | } |
2593 | 2598 | ||
@@ -2599,28 +2604,27 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
2599 | */ | 2604 | */ |
2600 | static struct notifier_block ipv6_dev_notf = { | 2605 | static struct notifier_block ipv6_dev_notf = { |
2601 | .notifier_call = addrconf_notify, | 2606 | .notifier_call = addrconf_notify, |
2602 | .priority = 0 | ||
2603 | }; | 2607 | }; |
2604 | 2608 | ||
2605 | static void addrconf_bonding_change(struct net_device *dev, unsigned long event) | 2609 | static void addrconf_type_change(struct net_device *dev, unsigned long event) |
2606 | { | 2610 | { |
2607 | struct inet6_dev *idev; | 2611 | struct inet6_dev *idev; |
2608 | ASSERT_RTNL(); | 2612 | ASSERT_RTNL(); |
2609 | 2613 | ||
2610 | idev = __in6_dev_get(dev); | 2614 | idev = __in6_dev_get(dev); |
2611 | 2615 | ||
2612 | if (event == NETDEV_BONDING_NEWTYPE) | 2616 | if (event == NETDEV_POST_TYPE_CHANGE) |
2613 | ipv6_mc_remap(idev); | 2617 | ipv6_mc_remap(idev); |
2614 | else if (event == NETDEV_BONDING_OLDTYPE) | 2618 | else if (event == NETDEV_PRE_TYPE_CHANGE) |
2615 | ipv6_mc_unmap(idev); | 2619 | ipv6_mc_unmap(idev); |
2616 | } | 2620 | } |
2617 | 2621 | ||
2618 | static int addrconf_ifdown(struct net_device *dev, int how) | 2622 | static int addrconf_ifdown(struct net_device *dev, int how) |
2619 | { | 2623 | { |
2620 | struct inet6_dev *idev; | ||
2621 | struct inet6_ifaddr *ifa, *keep_list, **bifa; | ||
2622 | struct net *net = dev_net(dev); | 2624 | struct net *net = dev_net(dev); |
2623 | int i; | 2625 | struct inet6_dev *idev; |
2626 | struct inet6_ifaddr *ifa; | ||
2627 | LIST_HEAD(keep_list); | ||
2624 | 2628 | ||
2625 | ASSERT_RTNL(); | 2629 | ASSERT_RTNL(); |
2626 | 2630 | ||
@@ -2631,8 +2635,9 @@ static int addrconf_ifdown(struct net_device *dev, int how) | |||
2631 | if (idev == NULL) | 2635 | if (idev == NULL) |
2632 | return -ENODEV; | 2636 | return -ENODEV; |
2633 | 2637 | ||
2634 | /* Step 1: remove reference to ipv6 device from parent device. | 2638 | /* |
2635 | Do not dev_put! | 2639 | * Step 1: remove reference to ipv6 device from parent device. |
2640 | * Do not dev_put! | ||
2636 | */ | 2641 | */ |
2637 | if (how) { | 2642 | if (how) { |
2638 | idev->dead = 1; | 2643 | idev->dead = 1; |
@@ -2645,40 +2650,21 @@ static int addrconf_ifdown(struct net_device *dev, int how) | |||
2645 | 2650 | ||
2646 | } | 2651 | } |
2647 | 2652 | ||
2648 | /* Step 2: clear hash table */ | ||
2649 | for (i=0; i<IN6_ADDR_HSIZE; i++) { | ||
2650 | bifa = &inet6_addr_lst[i]; | ||
2651 | |||
2652 | write_lock_bh(&addrconf_hash_lock); | ||
2653 | while ((ifa = *bifa) != NULL) { | ||
2654 | if (ifa->idev == idev && | ||
2655 | (how || !(ifa->flags&IFA_F_PERMANENT) || | ||
2656 | ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)) { | ||
2657 | *bifa = ifa->lst_next; | ||
2658 | ifa->lst_next = NULL; | ||
2659 | __in6_ifa_put(ifa); | ||
2660 | continue; | ||
2661 | } | ||
2662 | bifa = &ifa->lst_next; | ||
2663 | } | ||
2664 | write_unlock_bh(&addrconf_hash_lock); | ||
2665 | } | ||
2666 | |||
2667 | write_lock_bh(&idev->lock); | 2653 | write_lock_bh(&idev->lock); |
2668 | 2654 | ||
2669 | /* Step 3: clear flags for stateless addrconf */ | 2655 | /* Step 2: clear flags for stateless addrconf */ |
2670 | if (!how) | 2656 | if (!how) |
2671 | idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY); | 2657 | idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY); |
2672 | 2658 | ||
2673 | /* Step 4: clear address list */ | ||
2674 | #ifdef CONFIG_IPV6_PRIVACY | 2659 | #ifdef CONFIG_IPV6_PRIVACY |
2675 | if (how && del_timer(&idev->regen_timer)) | 2660 | if (how && del_timer(&idev->regen_timer)) |
2676 | in6_dev_put(idev); | 2661 | in6_dev_put(idev); |
2677 | 2662 | ||
2678 | /* clear tempaddr list */ | 2663 | /* Step 3: clear tempaddr list */ |
2679 | while ((ifa = idev->tempaddr_list) != NULL) { | 2664 | while (!list_empty(&idev->tempaddr_list)) { |
2680 | idev->tempaddr_list = ifa->tmp_next; | 2665 | ifa = list_first_entry(&idev->tempaddr_list, |
2681 | ifa->tmp_next = NULL; | 2666 | struct inet6_ifaddr, tmp_list); |
2667 | list_del(&ifa->tmp_list); | ||
2682 | ifa->dead = 1; | 2668 | ifa->dead = 1; |
2683 | write_unlock_bh(&idev->lock); | 2669 | write_unlock_bh(&idev->lock); |
2684 | spin_lock_bh(&ifa->lock); | 2670 | spin_lock_bh(&ifa->lock); |
@@ -2692,23 +2678,18 @@ static int addrconf_ifdown(struct net_device *dev, int how) | |||
2692 | write_lock_bh(&idev->lock); | 2678 | write_lock_bh(&idev->lock); |
2693 | } | 2679 | } |
2694 | #endif | 2680 | #endif |
2695 | keep_list = NULL; | ||
2696 | bifa = &keep_list; | ||
2697 | while ((ifa = idev->addr_list) != NULL) { | ||
2698 | idev->addr_list = ifa->if_next; | ||
2699 | ifa->if_next = NULL; | ||
2700 | 2681 | ||
2682 | while (!list_empty(&idev->addr_list)) { | ||
2683 | ifa = list_first_entry(&idev->addr_list, | ||
2684 | struct inet6_ifaddr, if_list); | ||
2701 | addrconf_del_timer(ifa); | 2685 | addrconf_del_timer(ifa); |
2702 | 2686 | ||
2703 | /* If just doing link down, and address is permanent | 2687 | /* If just doing link down, and address is permanent |
2704 | and not link-local, then retain it. */ | 2688 | and not link-local, then retain it. */ |
2705 | if (how == 0 && | 2689 | if (!how && |
2706 | (ifa->flags&IFA_F_PERMANENT) && | 2690 | (ifa->flags&IFA_F_PERMANENT) && |
2707 | !(ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)) { | 2691 | !(ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)) { |
2708 | 2692 | list_move_tail(&ifa->if_list, &keep_list); | |
2709 | /* Move to holding list */ | ||
2710 | *bifa = ifa; | ||
2711 | bifa = &ifa->if_next; | ||
2712 | 2693 | ||
2713 | /* If not doing DAD on this address, just keep it. */ | 2694 | /* If not doing DAD on this address, just keep it. */ |
2714 | if ((dev->flags&(IFF_NOARP|IFF_LOOPBACK)) || | 2695 | if ((dev->flags&(IFF_NOARP|IFF_LOOPBACK)) || |
@@ -2724,10 +2705,17 @@ static int addrconf_ifdown(struct net_device *dev, int how) | |||
2724 | ifa->flags |= IFA_F_TENTATIVE; | 2705 | ifa->flags |= IFA_F_TENTATIVE; |
2725 | in6_ifa_hold(ifa); | 2706 | in6_ifa_hold(ifa); |
2726 | } else { | 2707 | } else { |
2708 | list_del(&ifa->if_list); | ||
2727 | ifa->dead = 1; | 2709 | ifa->dead = 1; |
2728 | } | 2710 | } |
2729 | write_unlock_bh(&idev->lock); | 2711 | write_unlock_bh(&idev->lock); |
2730 | 2712 | ||
2713 | /* clear hash table */ | ||
2714 | spin_lock_bh(&addrconf_hash_lock); | ||
2715 | hlist_del_init_rcu(&ifa->addr_lst); | ||
2716 | __in6_ifa_put(ifa); | ||
2717 | spin_unlock_bh(&addrconf_hash_lock); | ||
2718 | |||
2731 | __ipv6_ifa_notify(RTM_DELADDR, ifa); | 2719 | __ipv6_ifa_notify(RTM_DELADDR, ifa); |
2732 | atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa); | 2720 | atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa); |
2733 | in6_ifa_put(ifa); | 2721 | in6_ifa_put(ifa); |
@@ -2735,12 +2723,11 @@ static int addrconf_ifdown(struct net_device *dev, int how) | |||
2735 | write_lock_bh(&idev->lock); | 2723 | write_lock_bh(&idev->lock); |
2736 | } | 2724 | } |
2737 | 2725 | ||
2738 | idev->addr_list = keep_list; | 2726 | list_splice(&keep_list, &idev->addr_list); |
2739 | 2727 | ||
2740 | write_unlock_bh(&idev->lock); | 2728 | write_unlock_bh(&idev->lock); |
2741 | 2729 | ||
2742 | /* Step 5: Discard multicast list */ | 2730 | /* Step 5: Discard multicast list */ |
2743 | |||
2744 | if (how) | 2731 | if (how) |
2745 | ipv6_mc_destroy_dev(idev); | 2732 | ipv6_mc_destroy_dev(idev); |
2746 | else | 2733 | else |
@@ -2748,8 +2735,7 @@ static int addrconf_ifdown(struct net_device *dev, int how) | |||
2748 | 2735 | ||
2749 | idev->tstamp = jiffies; | 2736 | idev->tstamp = jiffies; |
2750 | 2737 | ||
2751 | /* Shot the device (if unregistered) */ | 2738 | /* Last: Shot the device (if unregistered) */ |
2752 | |||
2753 | if (how) { | 2739 | if (how) { |
2754 | addrconf_sysctl_unregister(idev); | 2740 | addrconf_sysctl_unregister(idev); |
2755 | neigh_parms_release(&nd_tbl, idev->nd_parms); | 2741 | neigh_parms_release(&nd_tbl, idev->nd_parms); |
@@ -2860,7 +2846,7 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags) | |||
2860 | * Optimistic nodes can start receiving | 2846 | * Optimistic nodes can start receiving |
2861 | * Frames right away | 2847 | * Frames right away |
2862 | */ | 2848 | */ |
2863 | if(ifp->flags & IFA_F_OPTIMISTIC) | 2849 | if (ifp->flags & IFA_F_OPTIMISTIC) |
2864 | ip6_ins_rt(ifp->rt); | 2850 | ip6_ins_rt(ifp->rt); |
2865 | 2851 | ||
2866 | addrconf_dad_kick(ifp); | 2852 | addrconf_dad_kick(ifp); |
@@ -2910,7 +2896,7 @@ out: | |||
2910 | 2896 | ||
2911 | static void addrconf_dad_completed(struct inet6_ifaddr *ifp) | 2897 | static void addrconf_dad_completed(struct inet6_ifaddr *ifp) |
2912 | { | 2898 | { |
2913 | struct net_device * dev = ifp->idev->dev; | 2899 | struct net_device *dev = ifp->idev->dev; |
2914 | 2900 | ||
2915 | /* | 2901 | /* |
2916 | * Configure the address for reception. Now it is valid. | 2902 | * Configure the address for reception. Now it is valid. |
@@ -2941,11 +2927,12 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp) | |||
2941 | } | 2927 | } |
2942 | } | 2928 | } |
2943 | 2929 | ||
2944 | static void addrconf_dad_run(struct inet6_dev *idev) { | 2930 | static void addrconf_dad_run(struct inet6_dev *idev) |
2931 | { | ||
2945 | struct inet6_ifaddr *ifp; | 2932 | struct inet6_ifaddr *ifp; |
2946 | 2933 | ||
2947 | read_lock_bh(&idev->lock); | 2934 | read_lock_bh(&idev->lock); |
2948 | for (ifp = idev->addr_list; ifp; ifp = ifp->if_next) { | 2935 | list_for_each_entry(ifp, &idev->addr_list, if_list) { |
2949 | spin_lock(&ifp->lock); | 2936 | spin_lock(&ifp->lock); |
2950 | if (!(ifp->flags & IFA_F_TENTATIVE)) { | 2937 | if (!(ifp->flags & IFA_F_TENTATIVE)) { |
2951 | spin_unlock(&ifp->lock); | 2938 | spin_unlock(&ifp->lock); |
@@ -2970,36 +2957,35 @@ static struct inet6_ifaddr *if6_get_first(struct seq_file *seq) | |||
2970 | struct net *net = seq_file_net(seq); | 2957 | struct net *net = seq_file_net(seq); |
2971 | 2958 | ||
2972 | for (state->bucket = 0; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) { | 2959 | for (state->bucket = 0; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) { |
2973 | ifa = inet6_addr_lst[state->bucket]; | 2960 | struct hlist_node *n; |
2974 | 2961 | hlist_for_each_entry_rcu(ifa, n, &inet6_addr_lst[state->bucket], | |
2975 | while (ifa && !net_eq(dev_net(ifa->idev->dev), net)) | 2962 | addr_lst) |
2976 | ifa = ifa->lst_next; | 2963 | if (net_eq(dev_net(ifa->idev->dev), net)) |
2977 | if (ifa) | 2964 | return ifa; |
2978 | break; | ||
2979 | } | 2965 | } |
2980 | return ifa; | 2966 | return NULL; |
2981 | } | 2967 | } |
2982 | 2968 | ||
2983 | static struct inet6_ifaddr *if6_get_next(struct seq_file *seq, struct inet6_ifaddr *ifa) | 2969 | static struct inet6_ifaddr *if6_get_next(struct seq_file *seq, |
2970 | struct inet6_ifaddr *ifa) | ||
2984 | { | 2971 | { |
2985 | struct if6_iter_state *state = seq->private; | 2972 | struct if6_iter_state *state = seq->private; |
2986 | struct net *net = seq_file_net(seq); | 2973 | struct net *net = seq_file_net(seq); |
2974 | struct hlist_node *n = &ifa->addr_lst; | ||
2987 | 2975 | ||
2988 | ifa = ifa->lst_next; | 2976 | hlist_for_each_entry_continue_rcu(ifa, n, addr_lst) |
2989 | try_again: | 2977 | if (net_eq(dev_net(ifa->idev->dev), net)) |
2990 | if (ifa) { | 2978 | return ifa; |
2991 | if (!net_eq(dev_net(ifa->idev->dev), net)) { | ||
2992 | ifa = ifa->lst_next; | ||
2993 | goto try_again; | ||
2994 | } | ||
2995 | } | ||
2996 | 2979 | ||
2997 | if (!ifa && ++state->bucket < IN6_ADDR_HSIZE) { | 2980 | while (++state->bucket < IN6_ADDR_HSIZE) { |
2998 | ifa = inet6_addr_lst[state->bucket]; | 2981 | hlist_for_each_entry(ifa, n, |
2999 | goto try_again; | 2982 | &inet6_addr_lst[state->bucket], addr_lst) { |
2983 | if (net_eq(dev_net(ifa->idev->dev), net)) | ||
2984 | return ifa; | ||
2985 | } | ||
3000 | } | 2986 | } |
3001 | 2987 | ||
3002 | return ifa; | 2988 | return NULL; |
3003 | } | 2989 | } |
3004 | 2990 | ||
3005 | static struct inet6_ifaddr *if6_get_idx(struct seq_file *seq, loff_t pos) | 2991 | static struct inet6_ifaddr *if6_get_idx(struct seq_file *seq, loff_t pos) |
@@ -3007,15 +2993,15 @@ static struct inet6_ifaddr *if6_get_idx(struct seq_file *seq, loff_t pos) | |||
3007 | struct inet6_ifaddr *ifa = if6_get_first(seq); | 2993 | struct inet6_ifaddr *ifa = if6_get_first(seq); |
3008 | 2994 | ||
3009 | if (ifa) | 2995 | if (ifa) |
3010 | while(pos && (ifa = if6_get_next(seq, ifa)) != NULL) | 2996 | while (pos && (ifa = if6_get_next(seq, ifa)) != NULL) |
3011 | --pos; | 2997 | --pos; |
3012 | return pos ? NULL : ifa; | 2998 | return pos ? NULL : ifa; |
3013 | } | 2999 | } |
3014 | 3000 | ||
3015 | static void *if6_seq_start(struct seq_file *seq, loff_t *pos) | 3001 | static void *if6_seq_start(struct seq_file *seq, loff_t *pos) |
3016 | __acquires(addrconf_hash_lock) | 3002 | __acquires(rcu) |
3017 | { | 3003 | { |
3018 | read_lock_bh(&addrconf_hash_lock); | 3004 | rcu_read_lock_bh(); |
3019 | return if6_get_idx(seq, *pos); | 3005 | return if6_get_idx(seq, *pos); |
3020 | } | 3006 | } |
3021 | 3007 | ||
@@ -3029,9 +3015,9 @@ static void *if6_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
3029 | } | 3015 | } |
3030 | 3016 | ||
3031 | static void if6_seq_stop(struct seq_file *seq, void *v) | 3017 | static void if6_seq_stop(struct seq_file *seq, void *v) |
3032 | __releases(addrconf_hash_lock) | 3018 | __releases(rcu) |
3033 | { | 3019 | { |
3034 | read_unlock_bh(&addrconf_hash_lock); | 3020 | rcu_read_unlock_bh(); |
3035 | } | 3021 | } |
3036 | 3022 | ||
3037 | static int if6_seq_show(struct seq_file *seq, void *v) | 3023 | static int if6_seq_show(struct seq_file *seq, void *v) |
@@ -3101,10 +3087,12 @@ void if6_proc_exit(void) | |||
3101 | int ipv6_chk_home_addr(struct net *net, struct in6_addr *addr) | 3087 | int ipv6_chk_home_addr(struct net *net, struct in6_addr *addr) |
3102 | { | 3088 | { |
3103 | int ret = 0; | 3089 | int ret = 0; |
3104 | struct inet6_ifaddr * ifp; | 3090 | struct inet6_ifaddr *ifp = NULL; |
3105 | u8 hash = ipv6_addr_hash(addr); | 3091 | struct hlist_node *n; |
3106 | read_lock_bh(&addrconf_hash_lock); | 3092 | unsigned int hash = ipv6_addr_hash(addr); |
3107 | for (ifp = inet6_addr_lst[hash]; ifp; ifp = ifp->lst_next) { | 3093 | |
3094 | rcu_read_lock_bh(); | ||
3095 | hlist_for_each_entry_rcu(ifp, n, &inet6_addr_lst[hash], addr_lst) { | ||
3108 | if (!net_eq(dev_net(ifp->idev->dev), net)) | 3096 | if (!net_eq(dev_net(ifp->idev->dev), net)) |
3109 | continue; | 3097 | continue; |
3110 | if (ipv6_addr_equal(&ifp->addr, addr) && | 3098 | if (ipv6_addr_equal(&ifp->addr, addr) && |
@@ -3113,7 +3101,7 @@ int ipv6_chk_home_addr(struct net *net, struct in6_addr *addr) | |||
3113 | break; | 3101 | break; |
3114 | } | 3102 | } |
3115 | } | 3103 | } |
3116 | read_unlock_bh(&addrconf_hash_lock); | 3104 | rcu_read_unlock_bh(); |
3117 | return ret; | 3105 | return ret; |
3118 | } | 3106 | } |
3119 | #endif | 3107 | #endif |
@@ -3124,43 +3112,35 @@ int ipv6_chk_home_addr(struct net *net, struct in6_addr *addr) | |||
3124 | 3112 | ||
3125 | static void addrconf_verify(unsigned long foo) | 3113 | static void addrconf_verify(unsigned long foo) |
3126 | { | 3114 | { |
3115 | unsigned long now, next, next_sec, next_sched; | ||
3127 | struct inet6_ifaddr *ifp; | 3116 | struct inet6_ifaddr *ifp; |
3128 | unsigned long now, next; | 3117 | struct hlist_node *node; |
3129 | int i; | 3118 | int i; |
3130 | 3119 | ||
3131 | spin_lock_bh(&addrconf_verify_lock); | 3120 | rcu_read_lock_bh(); |
3121 | spin_lock(&addrconf_verify_lock); | ||
3132 | now = jiffies; | 3122 | now = jiffies; |
3133 | next = now + ADDR_CHECK_FREQUENCY; | 3123 | next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY); |
3134 | 3124 | ||
3135 | del_timer(&addr_chk_timer); | 3125 | del_timer(&addr_chk_timer); |
3136 | 3126 | ||
3137 | for (i=0; i < IN6_ADDR_HSIZE; i++) { | 3127 | for (i = 0; i < IN6_ADDR_HSIZE; i++) { |
3138 | |||
3139 | restart: | 3128 | restart: |
3140 | read_lock(&addrconf_hash_lock); | 3129 | hlist_for_each_entry_rcu(ifp, node, |
3141 | for (ifp=inet6_addr_lst[i]; ifp; ifp=ifp->lst_next) { | 3130 | &inet6_addr_lst[i], addr_lst) { |
3142 | unsigned long age; | 3131 | unsigned long age; |
3143 | #ifdef CONFIG_IPV6_PRIVACY | ||
3144 | unsigned long regen_advance; | ||
3145 | #endif | ||
3146 | 3132 | ||
3147 | if (ifp->flags & IFA_F_PERMANENT) | 3133 | if (ifp->flags & IFA_F_PERMANENT) |
3148 | continue; | 3134 | continue; |
3149 | 3135 | ||
3150 | spin_lock(&ifp->lock); | 3136 | spin_lock(&ifp->lock); |
3151 | age = (now - ifp->tstamp) / HZ; | 3137 | /* We try to batch several events at once. */ |
3152 | 3138 | age = (now - ifp->tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ; | |
3153 | #ifdef CONFIG_IPV6_PRIVACY | ||
3154 | regen_advance = ifp->idev->cnf.regen_max_retry * | ||
3155 | ifp->idev->cnf.dad_transmits * | ||
3156 | ifp->idev->nd_parms->retrans_time / HZ; | ||
3157 | #endif | ||
3158 | 3139 | ||
3159 | if (ifp->valid_lft != INFINITY_LIFE_TIME && | 3140 | if (ifp->valid_lft != INFINITY_LIFE_TIME && |
3160 | age >= ifp->valid_lft) { | 3141 | age >= ifp->valid_lft) { |
3161 | spin_unlock(&ifp->lock); | 3142 | spin_unlock(&ifp->lock); |
3162 | in6_ifa_hold(ifp); | 3143 | in6_ifa_hold(ifp); |
3163 | read_unlock(&addrconf_hash_lock); | ||
3164 | ipv6_del_addr(ifp); | 3144 | ipv6_del_addr(ifp); |
3165 | goto restart; | 3145 | goto restart; |
3166 | } else if (ifp->prefered_lft == INFINITY_LIFE_TIME) { | 3146 | } else if (ifp->prefered_lft == INFINITY_LIFE_TIME) { |
@@ -3182,7 +3162,6 @@ restart: | |||
3182 | 3162 | ||
3183 | if (deprecate) { | 3163 | if (deprecate) { |
3184 | in6_ifa_hold(ifp); | 3164 | in6_ifa_hold(ifp); |
3185 | read_unlock(&addrconf_hash_lock); | ||
3186 | 3165 | ||
3187 | ipv6_ifa_notify(0, ifp); | 3166 | ipv6_ifa_notify(0, ifp); |
3188 | in6_ifa_put(ifp); | 3167 | in6_ifa_put(ifp); |
@@ -3191,6 +3170,10 @@ restart: | |||
3191 | #ifdef CONFIG_IPV6_PRIVACY | 3170 | #ifdef CONFIG_IPV6_PRIVACY |
3192 | } else if ((ifp->flags&IFA_F_TEMPORARY) && | 3171 | } else if ((ifp->flags&IFA_F_TEMPORARY) && |
3193 | !(ifp->flags&IFA_F_TENTATIVE)) { | 3172 | !(ifp->flags&IFA_F_TENTATIVE)) { |
3173 | unsigned long regen_advance = ifp->idev->cnf.regen_max_retry * | ||
3174 | ifp->idev->cnf.dad_transmits * | ||
3175 | ifp->idev->nd_parms->retrans_time / HZ; | ||
3176 | |||
3194 | if (age >= ifp->prefered_lft - regen_advance) { | 3177 | if (age >= ifp->prefered_lft - regen_advance) { |
3195 | struct inet6_ifaddr *ifpub = ifp->ifpub; | 3178 | struct inet6_ifaddr *ifpub = ifp->ifpub; |
3196 | if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next)) | 3179 | if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next)) |
@@ -3200,7 +3183,7 @@ restart: | |||
3200 | in6_ifa_hold(ifp); | 3183 | in6_ifa_hold(ifp); |
3201 | in6_ifa_hold(ifpub); | 3184 | in6_ifa_hold(ifpub); |
3202 | spin_unlock(&ifp->lock); | 3185 | spin_unlock(&ifp->lock); |
3203 | read_unlock(&addrconf_hash_lock); | 3186 | |
3204 | spin_lock(&ifpub->lock); | 3187 | spin_lock(&ifpub->lock); |
3205 | ifpub->regen_count = 0; | 3188 | ifpub->regen_count = 0; |
3206 | spin_unlock(&ifpub->lock); | 3189 | spin_unlock(&ifpub->lock); |
@@ -3220,12 +3203,26 @@ restart: | |||
3220 | spin_unlock(&ifp->lock); | 3203 | spin_unlock(&ifp->lock); |
3221 | } | 3204 | } |
3222 | } | 3205 | } |
3223 | read_unlock(&addrconf_hash_lock); | ||
3224 | } | 3206 | } |
3225 | 3207 | ||
3226 | addr_chk_timer.expires = time_before(next, jiffies + HZ) ? jiffies + HZ : next; | 3208 | next_sec = round_jiffies_up(next); |
3209 | next_sched = next; | ||
3210 | |||
3211 | /* If rounded timeout is accurate enough, accept it. */ | ||
3212 | if (time_before(next_sec, next + ADDRCONF_TIMER_FUZZ)) | ||
3213 | next_sched = next_sec; | ||
3214 | |||
3215 | /* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */ | ||
3216 | if (time_before(next_sched, jiffies + ADDRCONF_TIMER_FUZZ_MAX)) | ||
3217 | next_sched = jiffies + ADDRCONF_TIMER_FUZZ_MAX; | ||
3218 | |||
3219 | ADBG((KERN_DEBUG "now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n", | ||
3220 | now, next, next_sec, next_sched)); | ||
3221 | |||
3222 | addr_chk_timer.expires = next_sched; | ||
3227 | add_timer(&addr_chk_timer); | 3223 | add_timer(&addr_chk_timer); |
3228 | spin_unlock_bh(&addrconf_verify_lock); | 3224 | spin_unlock(&addrconf_verify_lock); |
3225 | rcu_read_unlock_bh(); | ||
3229 | } | 3226 | } |
3230 | 3227 | ||
3231 | static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local) | 3228 | static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local) |
@@ -3515,8 +3512,7 @@ static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca, | |||
3515 | return nlmsg_end(skb, nlh); | 3512 | return nlmsg_end(skb, nlh); |
3516 | } | 3513 | } |
3517 | 3514 | ||
3518 | enum addr_type_t | 3515 | enum addr_type_t { |
3519 | { | ||
3520 | UNICAST_ADDR, | 3516 | UNICAST_ADDR, |
3521 | MULTICAST_ADDR, | 3517 | MULTICAST_ADDR, |
3522 | ANYCAST_ADDR, | 3518 | ANYCAST_ADDR, |
@@ -3527,7 +3523,6 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb, | |||
3527 | struct netlink_callback *cb, enum addr_type_t type, | 3523 | struct netlink_callback *cb, enum addr_type_t type, |
3528 | int s_ip_idx, int *p_ip_idx) | 3524 | int s_ip_idx, int *p_ip_idx) |
3529 | { | 3525 | { |
3530 | struct inet6_ifaddr *ifa; | ||
3531 | struct ifmcaddr6 *ifmca; | 3526 | struct ifmcaddr6 *ifmca; |
3532 | struct ifacaddr6 *ifaca; | 3527 | struct ifacaddr6 *ifaca; |
3533 | int err = 1; | 3528 | int err = 1; |
@@ -3535,11 +3530,12 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb, | |||
3535 | 3530 | ||
3536 | read_lock_bh(&idev->lock); | 3531 | read_lock_bh(&idev->lock); |
3537 | switch (type) { | 3532 | switch (type) { |
3538 | case UNICAST_ADDR: | 3533 | case UNICAST_ADDR: { |
3534 | struct inet6_ifaddr *ifa; | ||
3535 | |||
3539 | /* unicast address incl. temp addr */ | 3536 | /* unicast address incl. temp addr */ |
3540 | for (ifa = idev->addr_list; ifa; | 3537 | list_for_each_entry(ifa, &idev->addr_list, if_list) { |
3541 | ifa = ifa->if_next, ip_idx++) { | 3538 | if (++ip_idx < s_ip_idx) |
3542 | if (ip_idx < s_ip_idx) | ||
3543 | continue; | 3539 | continue; |
3544 | err = inet6_fill_ifaddr(skb, ifa, | 3540 | err = inet6_fill_ifaddr(skb, ifa, |
3545 | NETLINK_CB(cb->skb).pid, | 3541 | NETLINK_CB(cb->skb).pid, |
@@ -3550,6 +3546,7 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb, | |||
3550 | break; | 3546 | break; |
3551 | } | 3547 | } |
3552 | break; | 3548 | break; |
3549 | } | ||
3553 | case MULTICAST_ADDR: | 3550 | case MULTICAST_ADDR: |
3554 | /* multicast address */ | 3551 | /* multicast address */ |
3555 | for (ifmca = idev->mc_list; ifmca; | 3552 | for (ifmca = idev->mc_list; ifmca; |
@@ -3614,7 +3611,8 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb, | |||
3614 | if (h > s_h || idx > s_idx) | 3611 | if (h > s_h || idx > s_idx) |
3615 | s_ip_idx = 0; | 3612 | s_ip_idx = 0; |
3616 | ip_idx = 0; | 3613 | ip_idx = 0; |
3617 | if ((idev = __in6_dev_get(dev)) == NULL) | 3614 | idev = __in6_dev_get(dev); |
3615 | if (!idev) | ||
3618 | goto cont; | 3616 | goto cont; |
3619 | 3617 | ||
3620 | if (in6_dump_addrs(idev, skb, cb, type, | 3618 | if (in6_dump_addrs(idev, skb, cb, type, |
@@ -3681,12 +3679,14 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr* nlh, | |||
3681 | if (ifm->ifa_index) | 3679 | if (ifm->ifa_index) |
3682 | dev = __dev_get_by_index(net, ifm->ifa_index); | 3680 | dev = __dev_get_by_index(net, ifm->ifa_index); |
3683 | 3681 | ||
3684 | if ((ifa = ipv6_get_ifaddr(net, addr, dev, 1)) == NULL) { | 3682 | ifa = ipv6_get_ifaddr(net, addr, dev, 1); |
3683 | if (!ifa) { | ||
3685 | err = -EADDRNOTAVAIL; | 3684 | err = -EADDRNOTAVAIL; |
3686 | goto errout; | 3685 | goto errout; |
3687 | } | 3686 | } |
3688 | 3687 | ||
3689 | if ((skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_KERNEL)) == NULL) { | 3688 | skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_KERNEL); |
3689 | if (!skb) { | ||
3690 | err = -ENOBUFS; | 3690 | err = -ENOBUFS; |
3691 | goto errout_ifa; | 3691 | goto errout_ifa; |
3692 | } | 3692 | } |
@@ -3811,7 +3811,7 @@ static inline void __snmp6_fill_stats(u64 *stats, void __percpu **mib, | |||
3811 | static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype, | 3811 | static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype, |
3812 | int bytes) | 3812 | int bytes) |
3813 | { | 3813 | { |
3814 | switch(attrtype) { | 3814 | switch (attrtype) { |
3815 | case IFLA_INET6_STATS: | 3815 | case IFLA_INET6_STATS: |
3816 | __snmp6_fill_stats(stats, (void __percpu **)idev->stats.ipv6, IPSTATS_MIB_MAX, bytes); | 3816 | __snmp6_fill_stats(stats, (void __percpu **)idev->stats.ipv6, IPSTATS_MIB_MAX, bytes); |
3817 | break; | 3817 | break; |
@@ -4163,211 +4163,211 @@ static struct addrconf_sysctl_table | |||
4163 | .sysctl_header = NULL, | 4163 | .sysctl_header = NULL, |
4164 | .addrconf_vars = { | 4164 | .addrconf_vars = { |
4165 | { | 4165 | { |
4166 | .procname = "forwarding", | 4166 | .procname = "forwarding", |
4167 | .data = &ipv6_devconf.forwarding, | 4167 | .data = &ipv6_devconf.forwarding, |
4168 | .maxlen = sizeof(int), | 4168 | .maxlen = sizeof(int), |
4169 | .mode = 0644, | 4169 | .mode = 0644, |
4170 | .proc_handler = addrconf_sysctl_forward, | 4170 | .proc_handler = addrconf_sysctl_forward, |
4171 | }, | 4171 | }, |
4172 | { | 4172 | { |
4173 | .procname = "hop_limit", | 4173 | .procname = "hop_limit", |
4174 | .data = &ipv6_devconf.hop_limit, | 4174 | .data = &ipv6_devconf.hop_limit, |
4175 | .maxlen = sizeof(int), | 4175 | .maxlen = sizeof(int), |
4176 | .mode = 0644, | 4176 | .mode = 0644, |
4177 | .proc_handler = proc_dointvec, | 4177 | .proc_handler = proc_dointvec, |
4178 | }, | 4178 | }, |
4179 | { | 4179 | { |
4180 | .procname = "mtu", | 4180 | .procname = "mtu", |
4181 | .data = &ipv6_devconf.mtu6, | 4181 | .data = &ipv6_devconf.mtu6, |
4182 | .maxlen = sizeof(int), | 4182 | .maxlen = sizeof(int), |
4183 | .mode = 0644, | 4183 | .mode = 0644, |
4184 | .proc_handler = proc_dointvec, | 4184 | .proc_handler = proc_dointvec, |
4185 | }, | 4185 | }, |
4186 | { | 4186 | { |
4187 | .procname = "accept_ra", | 4187 | .procname = "accept_ra", |
4188 | .data = &ipv6_devconf.accept_ra, | 4188 | .data = &ipv6_devconf.accept_ra, |
4189 | .maxlen = sizeof(int), | 4189 | .maxlen = sizeof(int), |
4190 | .mode = 0644, | 4190 | .mode = 0644, |
4191 | .proc_handler = proc_dointvec, | 4191 | .proc_handler = proc_dointvec, |
4192 | }, | 4192 | }, |
4193 | { | 4193 | { |
4194 | .procname = "accept_redirects", | 4194 | .procname = "accept_redirects", |
4195 | .data = &ipv6_devconf.accept_redirects, | 4195 | .data = &ipv6_devconf.accept_redirects, |
4196 | .maxlen = sizeof(int), | 4196 | .maxlen = sizeof(int), |
4197 | .mode = 0644, | 4197 | .mode = 0644, |
4198 | .proc_handler = proc_dointvec, | 4198 | .proc_handler = proc_dointvec, |
4199 | }, | 4199 | }, |
4200 | { | 4200 | { |
4201 | .procname = "autoconf", | 4201 | .procname = "autoconf", |
4202 | .data = &ipv6_devconf.autoconf, | 4202 | .data = &ipv6_devconf.autoconf, |
4203 | .maxlen = sizeof(int), | 4203 | .maxlen = sizeof(int), |
4204 | .mode = 0644, | 4204 | .mode = 0644, |
4205 | .proc_handler = proc_dointvec, | 4205 | .proc_handler = proc_dointvec, |
4206 | }, | 4206 | }, |
4207 | { | 4207 | { |
4208 | .procname = "dad_transmits", | 4208 | .procname = "dad_transmits", |
4209 | .data = &ipv6_devconf.dad_transmits, | 4209 | .data = &ipv6_devconf.dad_transmits, |
4210 | .maxlen = sizeof(int), | 4210 | .maxlen = sizeof(int), |
4211 | .mode = 0644, | 4211 | .mode = 0644, |
4212 | .proc_handler = proc_dointvec, | 4212 | .proc_handler = proc_dointvec, |
4213 | }, | 4213 | }, |
4214 | { | 4214 | { |
4215 | .procname = "router_solicitations", | 4215 | .procname = "router_solicitations", |
4216 | .data = &ipv6_devconf.rtr_solicits, | 4216 | .data = &ipv6_devconf.rtr_solicits, |
4217 | .maxlen = sizeof(int), | 4217 | .maxlen = sizeof(int), |
4218 | .mode = 0644, | 4218 | .mode = 0644, |
4219 | .proc_handler = proc_dointvec, | 4219 | .proc_handler = proc_dointvec, |
4220 | }, | 4220 | }, |
4221 | { | 4221 | { |
4222 | .procname = "router_solicitation_interval", | 4222 | .procname = "router_solicitation_interval", |
4223 | .data = &ipv6_devconf.rtr_solicit_interval, | 4223 | .data = &ipv6_devconf.rtr_solicit_interval, |
4224 | .maxlen = sizeof(int), | 4224 | .maxlen = sizeof(int), |
4225 | .mode = 0644, | 4225 | .mode = 0644, |
4226 | .proc_handler = proc_dointvec_jiffies, | 4226 | .proc_handler = proc_dointvec_jiffies, |
4227 | }, | 4227 | }, |
4228 | { | 4228 | { |
4229 | .procname = "router_solicitation_delay", | 4229 | .procname = "router_solicitation_delay", |
4230 | .data = &ipv6_devconf.rtr_solicit_delay, | 4230 | .data = &ipv6_devconf.rtr_solicit_delay, |
4231 | .maxlen = sizeof(int), | 4231 | .maxlen = sizeof(int), |
4232 | .mode = 0644, | 4232 | .mode = 0644, |
4233 | .proc_handler = proc_dointvec_jiffies, | 4233 | .proc_handler = proc_dointvec_jiffies, |
4234 | }, | 4234 | }, |
4235 | { | 4235 | { |
4236 | .procname = "force_mld_version", | 4236 | .procname = "force_mld_version", |
4237 | .data = &ipv6_devconf.force_mld_version, | 4237 | .data = &ipv6_devconf.force_mld_version, |
4238 | .maxlen = sizeof(int), | 4238 | .maxlen = sizeof(int), |
4239 | .mode = 0644, | 4239 | .mode = 0644, |
4240 | .proc_handler = proc_dointvec, | 4240 | .proc_handler = proc_dointvec, |
4241 | }, | 4241 | }, |
4242 | #ifdef CONFIG_IPV6_PRIVACY | 4242 | #ifdef CONFIG_IPV6_PRIVACY |
4243 | { | 4243 | { |
4244 | .procname = "use_tempaddr", | 4244 | .procname = "use_tempaddr", |
4245 | .data = &ipv6_devconf.use_tempaddr, | 4245 | .data = &ipv6_devconf.use_tempaddr, |
4246 | .maxlen = sizeof(int), | 4246 | .maxlen = sizeof(int), |
4247 | .mode = 0644, | 4247 | .mode = 0644, |
4248 | .proc_handler = proc_dointvec, | 4248 | .proc_handler = proc_dointvec, |
4249 | }, | 4249 | }, |
4250 | { | 4250 | { |
4251 | .procname = "temp_valid_lft", | 4251 | .procname = "temp_valid_lft", |
4252 | .data = &ipv6_devconf.temp_valid_lft, | 4252 | .data = &ipv6_devconf.temp_valid_lft, |
4253 | .maxlen = sizeof(int), | 4253 | .maxlen = sizeof(int), |
4254 | .mode = 0644, | 4254 | .mode = 0644, |
4255 | .proc_handler = proc_dointvec, | 4255 | .proc_handler = proc_dointvec, |
4256 | }, | 4256 | }, |
4257 | { | 4257 | { |
4258 | .procname = "temp_prefered_lft", | 4258 | .procname = "temp_prefered_lft", |
4259 | .data = &ipv6_devconf.temp_prefered_lft, | 4259 | .data = &ipv6_devconf.temp_prefered_lft, |
4260 | .maxlen = sizeof(int), | 4260 | .maxlen = sizeof(int), |
4261 | .mode = 0644, | 4261 | .mode = 0644, |
4262 | .proc_handler = proc_dointvec, | 4262 | .proc_handler = proc_dointvec, |
4263 | }, | 4263 | }, |
4264 | { | 4264 | { |
4265 | .procname = "regen_max_retry", | 4265 | .procname = "regen_max_retry", |
4266 | .data = &ipv6_devconf.regen_max_retry, | 4266 | .data = &ipv6_devconf.regen_max_retry, |
4267 | .maxlen = sizeof(int), | 4267 | .maxlen = sizeof(int), |
4268 | .mode = 0644, | 4268 | .mode = 0644, |
4269 | .proc_handler = proc_dointvec, | 4269 | .proc_handler = proc_dointvec, |
4270 | }, | 4270 | }, |
4271 | { | 4271 | { |
4272 | .procname = "max_desync_factor", | 4272 | .procname = "max_desync_factor", |
4273 | .data = &ipv6_devconf.max_desync_factor, | 4273 | .data = &ipv6_devconf.max_desync_factor, |
4274 | .maxlen = sizeof(int), | 4274 | .maxlen = sizeof(int), |
4275 | .mode = 0644, | 4275 | .mode = 0644, |
4276 | .proc_handler = proc_dointvec, | 4276 | .proc_handler = proc_dointvec, |
4277 | }, | 4277 | }, |
4278 | #endif | 4278 | #endif |
4279 | { | 4279 | { |
4280 | .procname = "max_addresses", | 4280 | .procname = "max_addresses", |
4281 | .data = &ipv6_devconf.max_addresses, | 4281 | .data = &ipv6_devconf.max_addresses, |
4282 | .maxlen = sizeof(int), | 4282 | .maxlen = sizeof(int), |
4283 | .mode = 0644, | 4283 | .mode = 0644, |
4284 | .proc_handler = proc_dointvec, | 4284 | .proc_handler = proc_dointvec, |
4285 | }, | 4285 | }, |
4286 | { | 4286 | { |
4287 | .procname = "accept_ra_defrtr", | 4287 | .procname = "accept_ra_defrtr", |
4288 | .data = &ipv6_devconf.accept_ra_defrtr, | 4288 | .data = &ipv6_devconf.accept_ra_defrtr, |
4289 | .maxlen = sizeof(int), | 4289 | .maxlen = sizeof(int), |
4290 | .mode = 0644, | 4290 | .mode = 0644, |
4291 | .proc_handler = proc_dointvec, | 4291 | .proc_handler = proc_dointvec, |
4292 | }, | 4292 | }, |
4293 | { | 4293 | { |
4294 | .procname = "accept_ra_pinfo", | 4294 | .procname = "accept_ra_pinfo", |
4295 | .data = &ipv6_devconf.accept_ra_pinfo, | 4295 | .data = &ipv6_devconf.accept_ra_pinfo, |
4296 | .maxlen = sizeof(int), | 4296 | .maxlen = sizeof(int), |
4297 | .mode = 0644, | 4297 | .mode = 0644, |
4298 | .proc_handler = proc_dointvec, | 4298 | .proc_handler = proc_dointvec, |
4299 | }, | 4299 | }, |
4300 | #ifdef CONFIG_IPV6_ROUTER_PREF | 4300 | #ifdef CONFIG_IPV6_ROUTER_PREF |
4301 | { | 4301 | { |
4302 | .procname = "accept_ra_rtr_pref", | 4302 | .procname = "accept_ra_rtr_pref", |
4303 | .data = &ipv6_devconf.accept_ra_rtr_pref, | 4303 | .data = &ipv6_devconf.accept_ra_rtr_pref, |
4304 | .maxlen = sizeof(int), | 4304 | .maxlen = sizeof(int), |
4305 | .mode = 0644, | 4305 | .mode = 0644, |
4306 | .proc_handler = proc_dointvec, | 4306 | .proc_handler = proc_dointvec, |
4307 | }, | 4307 | }, |
4308 | { | 4308 | { |
4309 | .procname = "router_probe_interval", | 4309 | .procname = "router_probe_interval", |
4310 | .data = &ipv6_devconf.rtr_probe_interval, | 4310 | .data = &ipv6_devconf.rtr_probe_interval, |
4311 | .maxlen = sizeof(int), | 4311 | .maxlen = sizeof(int), |
4312 | .mode = 0644, | 4312 | .mode = 0644, |
4313 | .proc_handler = proc_dointvec_jiffies, | 4313 | .proc_handler = proc_dointvec_jiffies, |
4314 | }, | 4314 | }, |
4315 | #ifdef CONFIG_IPV6_ROUTE_INFO | 4315 | #ifdef CONFIG_IPV6_ROUTE_INFO |
4316 | { | 4316 | { |
4317 | .procname = "accept_ra_rt_info_max_plen", | 4317 | .procname = "accept_ra_rt_info_max_plen", |
4318 | .data = &ipv6_devconf.accept_ra_rt_info_max_plen, | 4318 | .data = &ipv6_devconf.accept_ra_rt_info_max_plen, |
4319 | .maxlen = sizeof(int), | 4319 | .maxlen = sizeof(int), |
4320 | .mode = 0644, | 4320 | .mode = 0644, |
4321 | .proc_handler = proc_dointvec, | 4321 | .proc_handler = proc_dointvec, |
4322 | }, | 4322 | }, |
4323 | #endif | 4323 | #endif |
4324 | #endif | 4324 | #endif |
4325 | { | 4325 | { |
4326 | .procname = "proxy_ndp", | 4326 | .procname = "proxy_ndp", |
4327 | .data = &ipv6_devconf.proxy_ndp, | 4327 | .data = &ipv6_devconf.proxy_ndp, |
4328 | .maxlen = sizeof(int), | 4328 | .maxlen = sizeof(int), |
4329 | .mode = 0644, | 4329 | .mode = 0644, |
4330 | .proc_handler = proc_dointvec, | 4330 | .proc_handler = proc_dointvec, |
4331 | }, | 4331 | }, |
4332 | { | 4332 | { |
4333 | .procname = "accept_source_route", | 4333 | .procname = "accept_source_route", |
4334 | .data = &ipv6_devconf.accept_source_route, | 4334 | .data = &ipv6_devconf.accept_source_route, |
4335 | .maxlen = sizeof(int), | 4335 | .maxlen = sizeof(int), |
4336 | .mode = 0644, | 4336 | .mode = 0644, |
4337 | .proc_handler = proc_dointvec, | 4337 | .proc_handler = proc_dointvec, |
4338 | }, | 4338 | }, |
4339 | #ifdef CONFIG_IPV6_OPTIMISTIC_DAD | 4339 | #ifdef CONFIG_IPV6_OPTIMISTIC_DAD |
4340 | { | 4340 | { |
4341 | .procname = "optimistic_dad", | 4341 | .procname = "optimistic_dad", |
4342 | .data = &ipv6_devconf.optimistic_dad, | 4342 | .data = &ipv6_devconf.optimistic_dad, |
4343 | .maxlen = sizeof(int), | 4343 | .maxlen = sizeof(int), |
4344 | .mode = 0644, | 4344 | .mode = 0644, |
4345 | .proc_handler = proc_dointvec, | 4345 | .proc_handler = proc_dointvec, |
4346 | 4346 | ||
4347 | }, | 4347 | }, |
4348 | #endif | 4348 | #endif |
4349 | #ifdef CONFIG_IPV6_MROUTE | 4349 | #ifdef CONFIG_IPV6_MROUTE |
4350 | { | 4350 | { |
4351 | .procname = "mc_forwarding", | 4351 | .procname = "mc_forwarding", |
4352 | .data = &ipv6_devconf.mc_forwarding, | 4352 | .data = &ipv6_devconf.mc_forwarding, |
4353 | .maxlen = sizeof(int), | 4353 | .maxlen = sizeof(int), |
4354 | .mode = 0444, | 4354 | .mode = 0444, |
4355 | .proc_handler = proc_dointvec, | 4355 | .proc_handler = proc_dointvec, |
4356 | }, | 4356 | }, |
4357 | #endif | 4357 | #endif |
4358 | { | 4358 | { |
4359 | .procname = "disable_ipv6", | 4359 | .procname = "disable_ipv6", |
4360 | .data = &ipv6_devconf.disable_ipv6, | 4360 | .data = &ipv6_devconf.disable_ipv6, |
4361 | .maxlen = sizeof(int), | 4361 | .maxlen = sizeof(int), |
4362 | .mode = 0644, | 4362 | .mode = 0644, |
4363 | .proc_handler = addrconf_sysctl_disable, | 4363 | .proc_handler = addrconf_sysctl_disable, |
4364 | }, | 4364 | }, |
4365 | { | 4365 | { |
4366 | .procname = "accept_dad", | 4366 | .procname = "accept_dad", |
4367 | .data = &ipv6_devconf.accept_dad, | 4367 | .data = &ipv6_devconf.accept_dad, |
4368 | .maxlen = sizeof(int), | 4368 | .maxlen = sizeof(int), |
4369 | .mode = 0644, | 4369 | .mode = 0644, |
4370 | .proc_handler = proc_dointvec, | 4370 | .proc_handler = proc_dointvec, |
4371 | }, | 4371 | }, |
4372 | { | 4372 | { |
4373 | .procname = "force_tllao", | 4373 | .procname = "force_tllao", |
@@ -4403,8 +4403,8 @@ static int __addrconf_sysctl_register(struct net *net, char *dev_name, | |||
4403 | if (t == NULL) | 4403 | if (t == NULL) |
4404 | goto out; | 4404 | goto out; |
4405 | 4405 | ||
4406 | for (i=0; t->addrconf_vars[i].data; i++) { | 4406 | for (i = 0; t->addrconf_vars[i].data; i++) { |
4407 | t->addrconf_vars[i].data += (char*)p - (char*)&ipv6_devconf; | 4407 | t->addrconf_vars[i].data += (char *)p - (char *)&ipv6_devconf; |
4408 | t->addrconf_vars[i].extra1 = idev; /* embedded; no ref */ | 4408 | t->addrconf_vars[i].extra1 = idev; /* embedded; no ref */ |
4409 | t->addrconf_vars[i].extra2 = net; | 4409 | t->addrconf_vars[i].extra2 = net; |
4410 | } | 4410 | } |
@@ -4541,14 +4541,12 @@ int register_inet6addr_notifier(struct notifier_block *nb) | |||
4541 | { | 4541 | { |
4542 | return atomic_notifier_chain_register(&inet6addr_chain, nb); | 4542 | return atomic_notifier_chain_register(&inet6addr_chain, nb); |
4543 | } | 4543 | } |
4544 | |||
4545 | EXPORT_SYMBOL(register_inet6addr_notifier); | 4544 | EXPORT_SYMBOL(register_inet6addr_notifier); |
4546 | 4545 | ||
4547 | int unregister_inet6addr_notifier(struct notifier_block *nb) | 4546 | int unregister_inet6addr_notifier(struct notifier_block *nb) |
4548 | { | 4547 | { |
4549 | return atomic_notifier_chain_unregister(&inet6addr_chain,nb); | 4548 | return atomic_notifier_chain_unregister(&inet6addr_chain, nb); |
4550 | } | 4549 | } |
4551 | |||
4552 | EXPORT_SYMBOL(unregister_inet6addr_notifier); | 4550 | EXPORT_SYMBOL(unregister_inet6addr_notifier); |
4553 | 4551 | ||
4554 | /* | 4552 | /* |
@@ -4557,11 +4555,12 @@ EXPORT_SYMBOL(unregister_inet6addr_notifier); | |||
4557 | 4555 | ||
4558 | int __init addrconf_init(void) | 4556 | int __init addrconf_init(void) |
4559 | { | 4557 | { |
4560 | int err; | 4558 | int i, err; |
4561 | 4559 | ||
4562 | if ((err = ipv6_addr_label_init()) < 0) { | 4560 | err = ipv6_addr_label_init(); |
4563 | printk(KERN_CRIT "IPv6 Addrconf: cannot initialize default policy table: %d.\n", | 4561 | if (err < 0) { |
4564 | err); | 4562 | printk(KERN_CRIT "IPv6 Addrconf:" |
4563 | " cannot initialize default policy table: %d.\n", err); | ||
4565 | return err; | 4564 | return err; |
4566 | } | 4565 | } |
4567 | 4566 | ||
@@ -4592,6 +4591,9 @@ int __init addrconf_init(void) | |||
4592 | if (err) | 4591 | if (err) |
4593 | goto errlo; | 4592 | goto errlo; |
4594 | 4593 | ||
4594 | for (i = 0; i < IN6_ADDR_HSIZE; i++) | ||
4595 | INIT_HLIST_HEAD(&inet6_addr_lst[i]); | ||
4596 | |||
4595 | register_netdevice_notifier(&ipv6_dev_notf); | 4597 | register_netdevice_notifier(&ipv6_dev_notf); |
4596 | 4598 | ||
4597 | addrconf_verify(0); | 4599 | addrconf_verify(0); |
@@ -4620,7 +4622,6 @@ errlo: | |||
4620 | 4622 | ||
4621 | void addrconf_cleanup(void) | 4623 | void addrconf_cleanup(void) |
4622 | { | 4624 | { |
4623 | struct inet6_ifaddr *ifa; | ||
4624 | struct net_device *dev; | 4625 | struct net_device *dev; |
4625 | int i; | 4626 | int i; |
4626 | 4627 | ||
@@ -4640,20 +4641,10 @@ void addrconf_cleanup(void) | |||
4640 | /* | 4641 | /* |
4641 | * Check hash table. | 4642 | * Check hash table. |
4642 | */ | 4643 | */ |
4643 | write_lock_bh(&addrconf_hash_lock); | 4644 | spin_lock_bh(&addrconf_hash_lock); |
4644 | for (i=0; i < IN6_ADDR_HSIZE; i++) { | 4645 | for (i = 0; i < IN6_ADDR_HSIZE; i++) |
4645 | for (ifa=inet6_addr_lst[i]; ifa; ) { | 4646 | WARN_ON(!hlist_empty(&inet6_addr_lst[i])); |
4646 | struct inet6_ifaddr *bifa; | 4647 | spin_unlock_bh(&addrconf_hash_lock); |
4647 | |||
4648 | bifa = ifa; | ||
4649 | ifa = ifa->lst_next; | ||
4650 | printk(KERN_DEBUG "bug: IPv6 address leakage detected: ifa=%p\n", bifa); | ||
4651 | /* Do not free it; something is wrong. | ||
4652 | Now we can investigate it with debugger. | ||
4653 | */ | ||
4654 | } | ||
4655 | } | ||
4656 | write_unlock_bh(&addrconf_hash_lock); | ||
4657 | 4648 | ||
4658 | del_timer(&addr_chk_timer); | 4649 | del_timer(&addr_chk_timer); |
4659 | rtnl_unlock(); | 4650 | rtnl_unlock(); |
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 3330a4bd6157..12d2fa42657d 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c | |||
@@ -483,6 +483,7 @@ route_done: | |||
483 | np->tclass, NULL, &fl, (struct rt6_info*)dst, | 483 | np->tclass, NULL, &fl, (struct rt6_info*)dst, |
484 | MSG_DONTWAIT); | 484 | MSG_DONTWAIT); |
485 | if (err) { | 485 | if (err) { |
486 | ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS); | ||
486 | ip6_flush_pending_frames(sk); | 487 | ip6_flush_pending_frames(sk); |
487 | goto out_put; | 488 | goto out_put; |
488 | } | 489 | } |
@@ -563,6 +564,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb) | |||
563 | (struct rt6_info*)dst, MSG_DONTWAIT); | 564 | (struct rt6_info*)dst, MSG_DONTWAIT); |
564 | 565 | ||
565 | if (err) { | 566 | if (err) { |
567 | ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS); | ||
566 | ip6_flush_pending_frames(sk); | 568 | ip6_flush_pending_frames(sk); |
567 | goto out_put; | 569 | goto out_put; |
568 | } | 570 | } |
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 6b82e02158c6..dc6e0b8f260d 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c | |||
@@ -128,12 +128,23 @@ static __inline__ u32 fib6_new_sernum(void) | |||
128 | /* | 128 | /* |
129 | * test bit | 129 | * test bit |
130 | */ | 130 | */ |
131 | #if defined(__LITTLE_ENDIAN) | ||
132 | # define BITOP_BE32_SWIZZLE (0x1F & ~7) | ||
133 | #else | ||
134 | # define BITOP_BE32_SWIZZLE 0 | ||
135 | #endif | ||
131 | 136 | ||
132 | static __inline__ __be32 addr_bit_set(void *token, int fn_bit) | 137 | static __inline__ __be32 addr_bit_set(void *token, int fn_bit) |
133 | { | 138 | { |
134 | __be32 *addr = token; | 139 | __be32 *addr = token; |
135 | 140 | /* | |
136 | return htonl(1 << ((~fn_bit)&0x1F)) & addr[fn_bit>>5]; | 141 | * Here, |
142 | * 1 << ((~fn_bit ^ BITOP_BE32_SWIZZLE) & 0x1f) | ||
143 | * is optimized version of | ||
144 | * htonl(1 << ((~fn_bit)&0x1F)) | ||
145 | * See include/asm-generic/bitops/le.h. | ||
146 | */ | ||
147 | return (1 << ((~fn_bit ^ BITOP_BE32_SWIZZLE) & 0x1f)) & addr[fn_bit >> 5]; | ||
137 | } | 148 | } |
138 | 149 | ||
139 | static __inline__ struct fib6_node * node_alloc(void) | 150 | static __inline__ struct fib6_node * node_alloc(void) |
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index c483ab9fd67b..62ed08213d91 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c | |||
@@ -715,7 +715,7 @@ static void igmp6_group_added(struct ifmcaddr6 *mc) | |||
715 | if (!(mc->mca_flags&MAF_LOADED)) { | 715 | if (!(mc->mca_flags&MAF_LOADED)) { |
716 | mc->mca_flags |= MAF_LOADED; | 716 | mc->mca_flags |= MAF_LOADED; |
717 | if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0) | 717 | if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0) |
718 | dev_mc_add(dev, buf, dev->addr_len, 0); | 718 | dev_mc_add(dev, buf); |
719 | } | 719 | } |
720 | spin_unlock_bh(&mc->mca_lock); | 720 | spin_unlock_bh(&mc->mca_lock); |
721 | 721 | ||
@@ -741,7 +741,7 @@ static void igmp6_group_dropped(struct ifmcaddr6 *mc) | |||
741 | if (mc->mca_flags&MAF_LOADED) { | 741 | if (mc->mca_flags&MAF_LOADED) { |
742 | mc->mca_flags &= ~MAF_LOADED; | 742 | mc->mca_flags &= ~MAF_LOADED; |
743 | if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0) | 743 | if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0) |
744 | dev_mc_delete(dev, buf, dev->addr_len, 0); | 744 | dev_mc_del(dev, buf); |
745 | } | 745 | } |
746 | 746 | ||
747 | if (mc->mca_flags & MAF_NOREPORT) | 747 | if (mc->mca_flags & MAF_NOREPORT) |
diff --git a/net/ipv6/netfilter/ip6t_hbh.c b/net/ipv6/netfilter/ip6t_hbh.c index cbe8dec9744b..e60677519e40 100644 --- a/net/ipv6/netfilter/ip6t_hbh.c +++ b/net/ipv6/netfilter/ip6t_hbh.c | |||
@@ -141,11 +141,11 @@ hbh_mt6(const struct sk_buff *skb, const struct xt_match_param *par) | |||
141 | } | 141 | } |
142 | 142 | ||
143 | /* Step to the next */ | 143 | /* Step to the next */ |
144 | pr_debug("len%04X \n", optlen); | 144 | pr_debug("len%04X\n", optlen); |
145 | 145 | ||
146 | if ((ptr > skb->len - optlen || hdrlen < optlen) && | 146 | if ((ptr > skb->len - optlen || hdrlen < optlen) && |
147 | temp < optinfo->optsnr - 1) { | 147 | temp < optinfo->optsnr - 1) { |
148 | pr_debug("new pointer is too large! \n"); | 148 | pr_debug("new pointer is too large!\n"); |
149 | break; | 149 | break; |
150 | } | 150 | } |
151 | ptr += optlen; | 151 | ptr += optlen; |
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c index 58344c0fbd13..458eabfbe130 100644 --- a/net/ipv6/proc.c +++ b/net/ipv6/proc.c | |||
@@ -97,6 +97,7 @@ static const struct snmp_mib snmp6_icmp6_list[] = { | |||
97 | SNMP_MIB_ITEM("Icmp6InMsgs", ICMP6_MIB_INMSGS), | 97 | SNMP_MIB_ITEM("Icmp6InMsgs", ICMP6_MIB_INMSGS), |
98 | SNMP_MIB_ITEM("Icmp6InErrors", ICMP6_MIB_INERRORS), | 98 | SNMP_MIB_ITEM("Icmp6InErrors", ICMP6_MIB_INERRORS), |
99 | SNMP_MIB_ITEM("Icmp6OutMsgs", ICMP6_MIB_OUTMSGS), | 99 | SNMP_MIB_ITEM("Icmp6OutMsgs", ICMP6_MIB_OUTMSGS), |
100 | SNMP_MIB_ITEM("Icmp6OutErrors", ICMP6_MIB_OUTERRORS), | ||
100 | SNMP_MIB_SENTINEL | 101 | SNMP_MIB_SENTINEL |
101 | }; | 102 | }; |
102 | 103 | ||
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index ae181651c75a..8c452fd5ceae 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c | |||
@@ -67,36 +67,6 @@ static int xfrm6_get_saddr(struct net *net, | |||
67 | return 0; | 67 | return 0; |
68 | } | 68 | } |
69 | 69 | ||
70 | static struct dst_entry * | ||
71 | __xfrm6_find_bundle(struct flowi *fl, struct xfrm_policy *policy) | ||
72 | { | ||
73 | struct dst_entry *dst; | ||
74 | |||
75 | /* Still not clear if we should set fl->fl6_{src,dst}... */ | ||
76 | read_lock_bh(&policy->lock); | ||
77 | for (dst = policy->bundles; dst; dst = dst->next) { | ||
78 | struct xfrm_dst *xdst = (struct xfrm_dst*)dst; | ||
79 | struct in6_addr fl_dst_prefix, fl_src_prefix; | ||
80 | |||
81 | ipv6_addr_prefix(&fl_dst_prefix, | ||
82 | &fl->fl6_dst, | ||
83 | xdst->u.rt6.rt6i_dst.plen); | ||
84 | ipv6_addr_prefix(&fl_src_prefix, | ||
85 | &fl->fl6_src, | ||
86 | xdst->u.rt6.rt6i_src.plen); | ||
87 | if (ipv6_addr_equal(&xdst->u.rt6.rt6i_dst.addr, &fl_dst_prefix) && | ||
88 | ipv6_addr_equal(&xdst->u.rt6.rt6i_src.addr, &fl_src_prefix) && | ||
89 | xfrm_bundle_ok(policy, xdst, fl, AF_INET6, | ||
90 | (xdst->u.rt6.rt6i_dst.plen != 128 || | ||
91 | xdst->u.rt6.rt6i_src.plen != 128))) { | ||
92 | dst_clone(dst); | ||
93 | break; | ||
94 | } | ||
95 | } | ||
96 | read_unlock_bh(&policy->lock); | ||
97 | return dst; | ||
98 | } | ||
99 | |||
100 | static int xfrm6_get_tos(struct flowi *fl) | 70 | static int xfrm6_get_tos(struct flowi *fl) |
101 | { | 71 | { |
102 | return 0; | 72 | return 0; |
@@ -291,7 +261,6 @@ static struct xfrm_policy_afinfo xfrm6_policy_afinfo = { | |||
291 | .dst_ops = &xfrm6_dst_ops, | 261 | .dst_ops = &xfrm6_dst_ops, |
292 | .dst_lookup = xfrm6_dst_lookup, | 262 | .dst_lookup = xfrm6_dst_lookup, |
293 | .get_saddr = xfrm6_get_saddr, | 263 | .get_saddr = xfrm6_get_saddr, |
294 | .find_bundle = __xfrm6_find_bundle, | ||
295 | .decode_session = _decode_session6, | 264 | .decode_session = _decode_session6, |
296 | .get_tos = xfrm6_get_tos, | 265 | .get_tos = xfrm6_get_tos, |
297 | .init_path = xfrm6_init_path, | 266 | .init_path = xfrm6_init_path, |
diff --git a/net/irda/ircomm/ircomm_param.c b/net/irda/ircomm/ircomm_param.c index e2e893b474e9..8b915f3ac3b9 100644 --- a/net/irda/ircomm/ircomm_param.c +++ b/net/irda/ircomm/ircomm_param.c | |||
@@ -475,7 +475,7 @@ static int ircomm_param_dce(void *instance, irda_param_t *param, int get) | |||
475 | /* Check if any of the settings have changed */ | 475 | /* Check if any of the settings have changed */ |
476 | if (dce & 0x0f) { | 476 | if (dce & 0x0f) { |
477 | if (dce & IRCOMM_DELTA_CTS) { | 477 | if (dce & IRCOMM_DELTA_CTS) { |
478 | IRDA_DEBUG(2, "%s(), CTS \n", __func__ ); | 478 | IRDA_DEBUG(2, "%s(), CTS\n", __func__ ); |
479 | } | 479 | } |
480 | } | 480 | } |
481 | 481 | ||
diff --git a/net/l2tp/Kconfig b/net/l2tp/Kconfig new file mode 100644 index 000000000000..4b1e71751e10 --- /dev/null +++ b/net/l2tp/Kconfig | |||
@@ -0,0 +1,107 @@ | |||
1 | # | ||
2 | # Layer Two Tunneling Protocol (L2TP) | ||
3 | # | ||
4 | |||
5 | menuconfig L2TP | ||
6 | tristate "Layer Two Tunneling Protocol (L2TP)" | ||
7 | depends on INET | ||
8 | ---help--- | ||
9 | Layer Two Tunneling Protocol | ||
10 | |||
11 | From RFC 2661 <http://www.ietf.org/rfc/rfc2661.txt>. | ||
12 | |||
13 | L2TP facilitates the tunneling of packets across an | ||
14 | intervening network in a way that is as transparent as | ||
15 | possible to both end-users and applications. | ||
16 | |||
17 | L2TP is often used to tunnel PPP traffic over IP | ||
18 | tunnels. One IP tunnel may carry thousands of individual PPP | ||
19 | connections. L2TP is also used as a VPN protocol, popular | ||
20 | with home workers to connect to their offices. | ||
21 | |||
22 | L2TPv3 allows other protocols as well as PPP to be carried | ||
23 | over L2TP tunnels. L2TPv3 is defined in RFC 3931 | ||
24 | <http://www.ietf.org/rfc/rfc3931.txt>. | ||
25 | |||
26 | The kernel component handles only L2TP data packets: a | ||
27 | userland daemon handles L2TP the control protocol (tunnel | ||
28 | and session setup). One such daemon is OpenL2TP | ||
29 | (http://openl2tp.org/). | ||
30 | |||
31 | If you don't need L2TP, say N. To compile all L2TP code as | ||
32 | modules, choose M here. | ||
33 | |||
34 | config L2TP_DEBUGFS | ||
35 | tristate "L2TP debugfs support" | ||
36 | depends on L2TP && DEBUG_FS | ||
37 | help | ||
38 | Support for l2tp directory in debugfs filesystem. This may be | ||
39 | used to dump internal state of the l2tp drivers for problem | ||
40 | analysis. | ||
41 | |||
42 | If unsure, say 'Y'. | ||
43 | |||
44 | To compile this driver as a module, choose M here. The module | ||
45 | will be called l2tp_debugfs. | ||
46 | |||
47 | config L2TP_V3 | ||
48 | bool "L2TPv3 support (EXPERIMENTAL)" | ||
49 | depends on EXPERIMENTAL && L2TP | ||
50 | help | ||
51 | Layer Two Tunneling Protocol Version 3 | ||
52 | |||
53 | From RFC 3931 <http://www.ietf.org/rfc/rfc3931.txt>. | ||
54 | |||
55 | The Layer Two Tunneling Protocol (L2TP) provides a dynamic | ||
56 | mechanism for tunneling Layer 2 (L2) "circuits" across a | ||
57 | packet-oriented data network (e.g., over IP). L2TP, as | ||
58 | originally defined in RFC 2661, is a standard method for | ||
59 | tunneling Point-to-Point Protocol (PPP) [RFC1661] sessions. | ||
60 | L2TP has since been adopted for tunneling a number of other | ||
61 | L2 protocols, including ATM, Frame Relay, HDLC and even raw | ||
62 | ethernet frames. | ||
63 | |||
64 | If you are connecting to L2TPv3 equipment, or you want to | ||
65 | tunnel raw ethernet frames using L2TP, say Y here. If | ||
66 | unsure, say N. | ||
67 | |||
68 | config L2TP_IP | ||
69 | tristate "L2TP IP encapsulation for L2TPv3" | ||
70 | depends on L2TP_V3 | ||
71 | help | ||
72 | Support for L2TP-over-IP socket family. | ||
73 | |||
74 | The L2TPv3 protocol defines two possible encapsulations for | ||
75 | L2TP frames, namely UDP and plain IP (without UDP). This | ||
76 | driver provides a new L2TPIP socket family with which | ||
77 | userspace L2TPv3 daemons may create L2TP/IP tunnel sockets | ||
78 | when UDP encapsulation is not required. When L2TP is carried | ||
79 | in IP packets, it used IP protocol number 115, so this port | ||
80 | must be enabled in firewalls. | ||
81 | |||
82 | To compile this driver as a module, choose M here. The module | ||
83 | will be called l2tp_ip. | ||
84 | |||
85 | config L2TP_ETH | ||
86 | tristate "L2TP ethernet pseudowire support for L2TPv3" | ||
87 | depends on L2TP_V3 | ||
88 | help | ||
89 | Support for carrying raw ethernet frames over L2TPv3. | ||
90 | |||
91 | From RFC 4719 <http://www.ietf.org/rfc/rfc4719.txt>. | ||
92 | |||
93 | The Layer 2 Tunneling Protocol, Version 3 (L2TPv3) can be | ||
94 | used as a control protocol and for data encapsulation to set | ||
95 | up Pseudowires for transporting layer 2 Packet Data Units | ||
96 | across an IP network [RFC3931]. | ||
97 | |||
98 | This driver provides an ethernet virtual interface for each | ||
99 | L2TP ethernet pseudowire instance. Standard Linux tools may | ||
100 | be used to assign an IP address to the local virtual | ||
101 | interface, or add the interface to a bridge. | ||
102 | |||
103 | If you are using L2TPv3, you will almost certainly want to | ||
104 | enable this option. | ||
105 | |||
106 | To compile this driver as a module, choose M here. The module | ||
107 | will be called l2tp_eth. | ||
diff --git a/net/l2tp/Makefile b/net/l2tp/Makefile new file mode 100644 index 000000000000..110e7bc2de5e --- /dev/null +++ b/net/l2tp/Makefile | |||
@@ -0,0 +1,12 @@ | |||
1 | # | ||
2 | # Makefile for the L2TP. | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_L2TP) += l2tp_core.o | ||
6 | |||
7 | # Build l2tp as modules if L2TP is M | ||
8 | obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_PPPOL2TP)) += l2tp_ppp.o | ||
9 | obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_IP)) += l2tp_ip.o | ||
10 | obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_V3)) += l2tp_netlink.o | ||
11 | obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_ETH)) += l2tp_eth.o | ||
12 | obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_DEBUGFS)) += l2tp_debugfs.o | ||
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c new file mode 100644 index 000000000000..98dfcce1a5fc --- /dev/null +++ b/net/l2tp/l2tp_core.c | |||
@@ -0,0 +1,1692 @@ | |||
1 | /* | ||
2 | * L2TP core. | ||
3 | * | ||
4 | * Copyright (c) 2008,2009,2010 Katalix Systems Ltd | ||
5 | * | ||
6 | * This file contains some code of the original L2TPv2 pppol2tp | ||
7 | * driver, which has the following copyright: | ||
8 | * | ||
9 | * Authors: Martijn van Oosterhout <kleptog@svana.org> | ||
10 | * James Chapman (jchapman@katalix.com) | ||
11 | * Contributors: | ||
12 | * Michal Ostrowski <mostrows@speakeasy.net> | ||
13 | * Arnaldo Carvalho de Melo <acme@xconectiva.com.br> | ||
14 | * David S. Miller (davem@redhat.com) | ||
15 | * | ||
16 | * This program is free software; you can redistribute it and/or modify | ||
17 | * it under the terms of the GNU General Public License version 2 as | ||
18 | * published by the Free Software Foundation. | ||
19 | */ | ||
20 | |||
21 | #include <linux/module.h> | ||
22 | #include <linux/string.h> | ||
23 | #include <linux/list.h> | ||
24 | #include <linux/rculist.h> | ||
25 | #include <linux/uaccess.h> | ||
26 | |||
27 | #include <linux/kernel.h> | ||
28 | #include <linux/spinlock.h> | ||
29 | #include <linux/kthread.h> | ||
30 | #include <linux/sched.h> | ||
31 | #include <linux/slab.h> | ||
32 | #include <linux/errno.h> | ||
33 | #include <linux/jiffies.h> | ||
34 | |||
35 | #include <linux/netdevice.h> | ||
36 | #include <linux/net.h> | ||
37 | #include <linux/inetdevice.h> | ||
38 | #include <linux/skbuff.h> | ||
39 | #include <linux/init.h> | ||
40 | #include <linux/in.h> | ||
41 | #include <linux/ip.h> | ||
42 | #include <linux/udp.h> | ||
43 | #include <linux/l2tp.h> | ||
44 | #include <linux/hash.h> | ||
45 | #include <linux/sort.h> | ||
46 | #include <linux/file.h> | ||
47 | #include <linux/nsproxy.h> | ||
48 | #include <net/net_namespace.h> | ||
49 | #include <net/netns/generic.h> | ||
50 | #include <net/dst.h> | ||
51 | #include <net/ip.h> | ||
52 | #include <net/udp.h> | ||
53 | #include <net/inet_common.h> | ||
54 | #include <net/xfrm.h> | ||
55 | #include <net/protocol.h> | ||
56 | |||
57 | #include <asm/byteorder.h> | ||
58 | #include <asm/atomic.h> | ||
59 | |||
60 | #include "l2tp_core.h" | ||
61 | |||
62 | #define L2TP_DRV_VERSION "V2.0" | ||
63 | |||
64 | /* L2TP header constants */ | ||
65 | #define L2TP_HDRFLAG_T 0x8000 | ||
66 | #define L2TP_HDRFLAG_L 0x4000 | ||
67 | #define L2TP_HDRFLAG_S 0x0800 | ||
68 | #define L2TP_HDRFLAG_O 0x0200 | ||
69 | #define L2TP_HDRFLAG_P 0x0100 | ||
70 | |||
71 | #define L2TP_HDR_VER_MASK 0x000F | ||
72 | #define L2TP_HDR_VER_2 0x0002 | ||
73 | #define L2TP_HDR_VER_3 0x0003 | ||
74 | |||
75 | /* L2TPv3 default L2-specific sublayer */ | ||
76 | #define L2TP_SLFLAG_S 0x40000000 | ||
77 | #define L2TP_SL_SEQ_MASK 0x00ffffff | ||
78 | |||
79 | #define L2TP_HDR_SIZE_SEQ 10 | ||
80 | #define L2TP_HDR_SIZE_NOSEQ 6 | ||
81 | |||
82 | /* Default trace flags */ | ||
83 | #define L2TP_DEFAULT_DEBUG_FLAGS 0 | ||
84 | |||
85 | #define PRINTK(_mask, _type, _lvl, _fmt, args...) \ | ||
86 | do { \ | ||
87 | if ((_mask) & (_type)) \ | ||
88 | printk(_lvl "L2TP: " _fmt, ##args); \ | ||
89 | } while (0) | ||
90 | |||
91 | /* Private data stored for received packets in the skb. | ||
92 | */ | ||
93 | struct l2tp_skb_cb { | ||
94 | u32 ns; | ||
95 | u16 has_seq; | ||
96 | u16 length; | ||
97 | unsigned long expires; | ||
98 | }; | ||
99 | |||
100 | #define L2TP_SKB_CB(skb) ((struct l2tp_skb_cb *) &skb->cb[sizeof(struct inet_skb_parm)]) | ||
101 | |||
102 | static atomic_t l2tp_tunnel_count; | ||
103 | static atomic_t l2tp_session_count; | ||
104 | |||
105 | /* per-net private data for this module */ | ||
106 | static unsigned int l2tp_net_id; | ||
107 | struct l2tp_net { | ||
108 | struct list_head l2tp_tunnel_list; | ||
109 | spinlock_t l2tp_tunnel_list_lock; | ||
110 | struct hlist_head l2tp_session_hlist[L2TP_HASH_SIZE_2]; | ||
111 | spinlock_t l2tp_session_hlist_lock; | ||
112 | }; | ||
113 | |||
114 | static inline struct l2tp_net *l2tp_pernet(struct net *net) | ||
115 | { | ||
116 | BUG_ON(!net); | ||
117 | |||
118 | return net_generic(net, l2tp_net_id); | ||
119 | } | ||
120 | |||
121 | /* Session hash global list for L2TPv3. | ||
122 | * The session_id SHOULD be random according to RFC3931, but several | ||
123 | * L2TP implementations use incrementing session_ids. So we do a real | ||
124 | * hash on the session_id, rather than a simple bitmask. | ||
125 | */ | ||
126 | static inline struct hlist_head * | ||
127 | l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id) | ||
128 | { | ||
129 | return &pn->l2tp_session_hlist[hash_32(session_id, L2TP_HASH_BITS_2)]; | ||
130 | |||
131 | } | ||
132 | |||
133 | /* Lookup a session by id in the global session list | ||
134 | */ | ||
135 | static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id) | ||
136 | { | ||
137 | struct l2tp_net *pn = l2tp_pernet(net); | ||
138 | struct hlist_head *session_list = | ||
139 | l2tp_session_id_hash_2(pn, session_id); | ||
140 | struct l2tp_session *session; | ||
141 | struct hlist_node *walk; | ||
142 | |||
143 | rcu_read_lock_bh(); | ||
144 | hlist_for_each_entry_rcu(session, walk, session_list, global_hlist) { | ||
145 | if (session->session_id == session_id) { | ||
146 | rcu_read_unlock_bh(); | ||
147 | return session; | ||
148 | } | ||
149 | } | ||
150 | rcu_read_unlock_bh(); | ||
151 | |||
152 | return NULL; | ||
153 | } | ||
154 | |||
155 | /* Session hash list. | ||
156 | * The session_id SHOULD be random according to RFC2661, but several | ||
157 | * L2TP implementations (Cisco and Microsoft) use incrementing | ||
158 | * session_ids. So we do a real hash on the session_id, rather than a | ||
159 | * simple bitmask. | ||
160 | */ | ||
161 | static inline struct hlist_head * | ||
162 | l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id) | ||
163 | { | ||
164 | return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)]; | ||
165 | } | ||
166 | |||
167 | /* Lookup a session by id | ||
168 | */ | ||
169 | struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id) | ||
170 | { | ||
171 | struct hlist_head *session_list; | ||
172 | struct l2tp_session *session; | ||
173 | struct hlist_node *walk; | ||
174 | |||
175 | /* In L2TPv3, session_ids are unique over all tunnels and we | ||
176 | * sometimes need to look them up before we know the | ||
177 | * tunnel. | ||
178 | */ | ||
179 | if (tunnel == NULL) | ||
180 | return l2tp_session_find_2(net, session_id); | ||
181 | |||
182 | session_list = l2tp_session_id_hash(tunnel, session_id); | ||
183 | read_lock_bh(&tunnel->hlist_lock); | ||
184 | hlist_for_each_entry(session, walk, session_list, hlist) { | ||
185 | if (session->session_id == session_id) { | ||
186 | read_unlock_bh(&tunnel->hlist_lock); | ||
187 | return session; | ||
188 | } | ||
189 | } | ||
190 | read_unlock_bh(&tunnel->hlist_lock); | ||
191 | |||
192 | return NULL; | ||
193 | } | ||
194 | EXPORT_SYMBOL_GPL(l2tp_session_find); | ||
195 | |||
196 | struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth) | ||
197 | { | ||
198 | int hash; | ||
199 | struct hlist_node *walk; | ||
200 | struct l2tp_session *session; | ||
201 | int count = 0; | ||
202 | |||
203 | read_lock_bh(&tunnel->hlist_lock); | ||
204 | for (hash = 0; hash < L2TP_HASH_SIZE; hash++) { | ||
205 | hlist_for_each_entry(session, walk, &tunnel->session_hlist[hash], hlist) { | ||
206 | if (++count > nth) { | ||
207 | read_unlock_bh(&tunnel->hlist_lock); | ||
208 | return session; | ||
209 | } | ||
210 | } | ||
211 | } | ||
212 | |||
213 | read_unlock_bh(&tunnel->hlist_lock); | ||
214 | |||
215 | return NULL; | ||
216 | } | ||
217 | EXPORT_SYMBOL_GPL(l2tp_session_find_nth); | ||
218 | |||
219 | /* Lookup a session by interface name. | ||
220 | * This is very inefficient but is only used by management interfaces. | ||
221 | */ | ||
222 | struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname) | ||
223 | { | ||
224 | struct l2tp_net *pn = l2tp_pernet(net); | ||
225 | int hash; | ||
226 | struct hlist_node *walk; | ||
227 | struct l2tp_session *session; | ||
228 | |||
229 | rcu_read_lock_bh(); | ||
230 | for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) { | ||
231 | hlist_for_each_entry_rcu(session, walk, &pn->l2tp_session_hlist[hash], global_hlist) { | ||
232 | if (!strcmp(session->ifname, ifname)) { | ||
233 | rcu_read_unlock_bh(); | ||
234 | return session; | ||
235 | } | ||
236 | } | ||
237 | } | ||
238 | |||
239 | rcu_read_unlock_bh(); | ||
240 | |||
241 | return NULL; | ||
242 | } | ||
243 | EXPORT_SYMBOL_GPL(l2tp_session_find_by_ifname); | ||
244 | |||
245 | /* Lookup a tunnel by id | ||
246 | */ | ||
247 | struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id) | ||
248 | { | ||
249 | struct l2tp_tunnel *tunnel; | ||
250 | struct l2tp_net *pn = l2tp_pernet(net); | ||
251 | |||
252 | rcu_read_lock_bh(); | ||
253 | list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) { | ||
254 | if (tunnel->tunnel_id == tunnel_id) { | ||
255 | rcu_read_unlock_bh(); | ||
256 | return tunnel; | ||
257 | } | ||
258 | } | ||
259 | rcu_read_unlock_bh(); | ||
260 | |||
261 | return NULL; | ||
262 | } | ||
263 | EXPORT_SYMBOL_GPL(l2tp_tunnel_find); | ||
264 | |||
265 | struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth) | ||
266 | { | ||
267 | struct l2tp_net *pn = l2tp_pernet(net); | ||
268 | struct l2tp_tunnel *tunnel; | ||
269 | int count = 0; | ||
270 | |||
271 | rcu_read_lock_bh(); | ||
272 | list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) { | ||
273 | if (++count > nth) { | ||
274 | rcu_read_unlock_bh(); | ||
275 | return tunnel; | ||
276 | } | ||
277 | } | ||
278 | |||
279 | rcu_read_unlock_bh(); | ||
280 | |||
281 | return NULL; | ||
282 | } | ||
283 | EXPORT_SYMBOL_GPL(l2tp_tunnel_find_nth); | ||
284 | |||
285 | /***************************************************************************** | ||
286 | * Receive data handling | ||
287 | *****************************************************************************/ | ||
288 | |||
289 | /* Queue a skb in order. We come here only if the skb has an L2TP sequence | ||
290 | * number. | ||
291 | */ | ||
292 | static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *skb) | ||
293 | { | ||
294 | struct sk_buff *skbp; | ||
295 | struct sk_buff *tmp; | ||
296 | u32 ns = L2TP_SKB_CB(skb)->ns; | ||
297 | |||
298 | spin_lock_bh(&session->reorder_q.lock); | ||
299 | skb_queue_walk_safe(&session->reorder_q, skbp, tmp) { | ||
300 | if (L2TP_SKB_CB(skbp)->ns > ns) { | ||
301 | __skb_queue_before(&session->reorder_q, skbp, skb); | ||
302 | PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, | ||
303 | "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n", | ||
304 | session->name, ns, L2TP_SKB_CB(skbp)->ns, | ||
305 | skb_queue_len(&session->reorder_q)); | ||
306 | session->stats.rx_oos_packets++; | ||
307 | goto out; | ||
308 | } | ||
309 | } | ||
310 | |||
311 | __skb_queue_tail(&session->reorder_q, skb); | ||
312 | |||
313 | out: | ||
314 | spin_unlock_bh(&session->reorder_q.lock); | ||
315 | } | ||
316 | |||
317 | /* Dequeue a single skb. | ||
318 | */ | ||
319 | static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *skb) | ||
320 | { | ||
321 | struct l2tp_tunnel *tunnel = session->tunnel; | ||
322 | int length = L2TP_SKB_CB(skb)->length; | ||
323 | |||
324 | /* We're about to requeue the skb, so return resources | ||
325 | * to its current owner (a socket receive buffer). | ||
326 | */ | ||
327 | skb_orphan(skb); | ||
328 | |||
329 | tunnel->stats.rx_packets++; | ||
330 | tunnel->stats.rx_bytes += length; | ||
331 | session->stats.rx_packets++; | ||
332 | session->stats.rx_bytes += length; | ||
333 | |||
334 | if (L2TP_SKB_CB(skb)->has_seq) { | ||
335 | /* Bump our Nr */ | ||
336 | session->nr++; | ||
337 | if (tunnel->version == L2TP_HDR_VER_2) | ||
338 | session->nr &= 0xffff; | ||
339 | else | ||
340 | session->nr &= 0xffffff; | ||
341 | |||
342 | PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, | ||
343 | "%s: updated nr to %hu\n", session->name, session->nr); | ||
344 | } | ||
345 | |||
346 | /* call private receive handler */ | ||
347 | if (session->recv_skb != NULL) | ||
348 | (*session->recv_skb)(session, skb, L2TP_SKB_CB(skb)->length); | ||
349 | else | ||
350 | kfree_skb(skb); | ||
351 | |||
352 | if (session->deref) | ||
353 | (*session->deref)(session); | ||
354 | } | ||
355 | |||
356 | /* Dequeue skbs from the session's reorder_q, subject to packet order. | ||
357 | * Skbs that have been in the queue for too long are simply discarded. | ||
358 | */ | ||
359 | static void l2tp_recv_dequeue(struct l2tp_session *session) | ||
360 | { | ||
361 | struct sk_buff *skb; | ||
362 | struct sk_buff *tmp; | ||
363 | |||
364 | /* If the pkt at the head of the queue has the nr that we | ||
365 | * expect to send up next, dequeue it and any other | ||
366 | * in-sequence packets behind it. | ||
367 | */ | ||
368 | spin_lock_bh(&session->reorder_q.lock); | ||
369 | skb_queue_walk_safe(&session->reorder_q, skb, tmp) { | ||
370 | if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) { | ||
371 | session->stats.rx_seq_discards++; | ||
372 | session->stats.rx_errors++; | ||
373 | PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, | ||
374 | "%s: oos pkt %u len %d discarded (too old), " | ||
375 | "waiting for %u, reorder_q_len=%d\n", | ||
376 | session->name, L2TP_SKB_CB(skb)->ns, | ||
377 | L2TP_SKB_CB(skb)->length, session->nr, | ||
378 | skb_queue_len(&session->reorder_q)); | ||
379 | __skb_unlink(skb, &session->reorder_q); | ||
380 | kfree_skb(skb); | ||
381 | if (session->deref) | ||
382 | (*session->deref)(session); | ||
383 | continue; | ||
384 | } | ||
385 | |||
386 | if (L2TP_SKB_CB(skb)->has_seq) { | ||
387 | if (L2TP_SKB_CB(skb)->ns != session->nr) { | ||
388 | PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, | ||
389 | "%s: holding oos pkt %u len %d, " | ||
390 | "waiting for %u, reorder_q_len=%d\n", | ||
391 | session->name, L2TP_SKB_CB(skb)->ns, | ||
392 | L2TP_SKB_CB(skb)->length, session->nr, | ||
393 | skb_queue_len(&session->reorder_q)); | ||
394 | goto out; | ||
395 | } | ||
396 | } | ||
397 | __skb_unlink(skb, &session->reorder_q); | ||
398 | |||
399 | /* Process the skb. We release the queue lock while we | ||
400 | * do so to let other contexts process the queue. | ||
401 | */ | ||
402 | spin_unlock_bh(&session->reorder_q.lock); | ||
403 | l2tp_recv_dequeue_skb(session, skb); | ||
404 | spin_lock_bh(&session->reorder_q.lock); | ||
405 | } | ||
406 | |||
407 | out: | ||
408 | spin_unlock_bh(&session->reorder_q.lock); | ||
409 | } | ||
410 | |||
411 | static inline int l2tp_verify_udp_checksum(struct sock *sk, | ||
412 | struct sk_buff *skb) | ||
413 | { | ||
414 | struct udphdr *uh = udp_hdr(skb); | ||
415 | u16 ulen = ntohs(uh->len); | ||
416 | struct inet_sock *inet; | ||
417 | __wsum psum; | ||
418 | |||
419 | if (sk->sk_no_check || skb_csum_unnecessary(skb) || !uh->check) | ||
420 | return 0; | ||
421 | |||
422 | inet = inet_sk(sk); | ||
423 | psum = csum_tcpudp_nofold(inet->inet_saddr, inet->inet_daddr, ulen, | ||
424 | IPPROTO_UDP, 0); | ||
425 | |||
426 | if ((skb->ip_summed == CHECKSUM_COMPLETE) && | ||
427 | !csum_fold(csum_add(psum, skb->csum))) | ||
428 | return 0; | ||
429 | |||
430 | skb->csum = psum; | ||
431 | |||
432 | return __skb_checksum_complete(skb); | ||
433 | } | ||
434 | |||
435 | /* Do receive processing of L2TP data frames. We handle both L2TPv2 | ||
436 | * and L2TPv3 data frames here. | ||
437 | * | ||
438 | * L2TPv2 Data Message Header | ||
439 | * | ||
440 | * 0 1 2 3 | ||
441 | * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 | ||
442 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
443 | * |T|L|x|x|S|x|O|P|x|x|x|x| Ver | Length (opt) | | ||
444 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
445 | * | Tunnel ID | Session ID | | ||
446 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
447 | * | Ns (opt) | Nr (opt) | | ||
448 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
449 | * | Offset Size (opt) | Offset pad... (opt) | ||
450 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
451 | * | ||
452 | * Data frames are marked by T=0. All other fields are the same as | ||
453 | * those in L2TP control frames. | ||
454 | * | ||
455 | * L2TPv3 Data Message Header | ||
456 | * | ||
457 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
458 | * | L2TP Session Header | | ||
459 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
460 | * | L2-Specific Sublayer | | ||
461 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
462 | * | Tunnel Payload ... | ||
463 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
464 | * | ||
465 | * L2TPv3 Session Header Over IP | ||
466 | * | ||
467 | * 0 1 2 3 | ||
468 | * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 | ||
469 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
470 | * | Session ID | | ||
471 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
472 | * | Cookie (optional, maximum 64 bits)... | ||
473 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
474 | * | | ||
475 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
476 | * | ||
477 | * L2TPv3 L2-Specific Sublayer Format | ||
478 | * | ||
479 | * 0 1 2 3 | ||
480 | * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 | ||
481 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
482 | * |x|S|x|x|x|x|x|x| Sequence Number | | ||
483 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
484 | * | ||
485 | * Cookie value, sublayer format and offset (pad) are negotiated with | ||
486 | * the peer when the session is set up. Unlike L2TPv2, we do not need | ||
487 | * to parse the packet header to determine if optional fields are | ||
488 | * present. | ||
489 | * | ||
490 | * Caller must already have parsed the frame and determined that it is | ||
491 | * a data (not control) frame before coming here. Fields up to the | ||
492 | * session-id have already been parsed and ptr points to the data | ||
493 | * after the session-id. | ||
494 | */ | ||
495 | void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, | ||
496 | unsigned char *ptr, unsigned char *optr, u16 hdrflags, | ||
497 | int length, int (*payload_hook)(struct sk_buff *skb)) | ||
498 | { | ||
499 | struct l2tp_tunnel *tunnel = session->tunnel; | ||
500 | int offset; | ||
501 | u32 ns, nr; | ||
502 | |||
503 | /* The ref count is increased since we now hold a pointer to | ||
504 | * the session. Take care to decrement the refcnt when exiting | ||
505 | * this function from now on... | ||
506 | */ | ||
507 | l2tp_session_inc_refcount(session); | ||
508 | if (session->ref) | ||
509 | (*session->ref)(session); | ||
510 | |||
511 | /* Parse and check optional cookie */ | ||
512 | if (session->peer_cookie_len > 0) { | ||
513 | if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) { | ||
514 | PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO, | ||
515 | "%s: cookie mismatch (%u/%u). Discarding.\n", | ||
516 | tunnel->name, tunnel->tunnel_id, session->session_id); | ||
517 | session->stats.rx_cookie_discards++; | ||
518 | goto discard; | ||
519 | } | ||
520 | ptr += session->peer_cookie_len; | ||
521 | } | ||
522 | |||
523 | /* Handle the optional sequence numbers. Sequence numbers are | ||
524 | * in different places for L2TPv2 and L2TPv3. | ||
525 | * | ||
526 | * If we are the LAC, enable/disable sequence numbers under | ||
527 | * the control of the LNS. If no sequence numbers present but | ||
528 | * we were expecting them, discard frame. | ||
529 | */ | ||
530 | ns = nr = 0; | ||
531 | L2TP_SKB_CB(skb)->has_seq = 0; | ||
532 | if (tunnel->version == L2TP_HDR_VER_2) { | ||
533 | if (hdrflags & L2TP_HDRFLAG_S) { | ||
534 | ns = ntohs(*(__be16 *) ptr); | ||
535 | ptr += 2; | ||
536 | nr = ntohs(*(__be16 *) ptr); | ||
537 | ptr += 2; | ||
538 | |||
539 | /* Store L2TP info in the skb */ | ||
540 | L2TP_SKB_CB(skb)->ns = ns; | ||
541 | L2TP_SKB_CB(skb)->has_seq = 1; | ||
542 | |||
543 | PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, | ||
544 | "%s: recv data ns=%u, nr=%u, session nr=%u\n", | ||
545 | session->name, ns, nr, session->nr); | ||
546 | } | ||
547 | } else if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) { | ||
548 | u32 l2h = ntohl(*(__be32 *) ptr); | ||
549 | |||
550 | if (l2h & 0x40000000) { | ||
551 | ns = l2h & 0x00ffffff; | ||
552 | |||
553 | /* Store L2TP info in the skb */ | ||
554 | L2TP_SKB_CB(skb)->ns = ns; | ||
555 | L2TP_SKB_CB(skb)->has_seq = 1; | ||
556 | |||
557 | PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, | ||
558 | "%s: recv data ns=%u, session nr=%u\n", | ||
559 | session->name, ns, session->nr); | ||
560 | } | ||
561 | } | ||
562 | |||
563 | /* Advance past L2-specific header, if present */ | ||
564 | ptr += session->l2specific_len; | ||
565 | |||
566 | if (L2TP_SKB_CB(skb)->has_seq) { | ||
567 | /* Received a packet with sequence numbers. If we're the LNS, | ||
568 | * check if we sre sending sequence numbers and if not, | ||
569 | * configure it so. | ||
570 | */ | ||
571 | if ((!session->lns_mode) && (!session->send_seq)) { | ||
572 | PRINTK(session->debug, L2TP_MSG_SEQ, KERN_INFO, | ||
573 | "%s: requested to enable seq numbers by LNS\n", | ||
574 | session->name); | ||
575 | session->send_seq = -1; | ||
576 | l2tp_session_set_header_len(session, tunnel->version); | ||
577 | } | ||
578 | } else { | ||
579 | /* No sequence numbers. | ||
580 | * If user has configured mandatory sequence numbers, discard. | ||
581 | */ | ||
582 | if (session->recv_seq) { | ||
583 | PRINTK(session->debug, L2TP_MSG_SEQ, KERN_WARNING, | ||
584 | "%s: recv data has no seq numbers when required. " | ||
585 | "Discarding\n", session->name); | ||
586 | session->stats.rx_seq_discards++; | ||
587 | goto discard; | ||
588 | } | ||
589 | |||
590 | /* If we're the LAC and we're sending sequence numbers, the | ||
591 | * LNS has requested that we no longer send sequence numbers. | ||
592 | * If we're the LNS and we're sending sequence numbers, the | ||
593 | * LAC is broken. Discard the frame. | ||
594 | */ | ||
595 | if ((!session->lns_mode) && (session->send_seq)) { | ||
596 | PRINTK(session->debug, L2TP_MSG_SEQ, KERN_INFO, | ||
597 | "%s: requested to disable seq numbers by LNS\n", | ||
598 | session->name); | ||
599 | session->send_seq = 0; | ||
600 | l2tp_session_set_header_len(session, tunnel->version); | ||
601 | } else if (session->send_seq) { | ||
602 | PRINTK(session->debug, L2TP_MSG_SEQ, KERN_WARNING, | ||
603 | "%s: recv data has no seq numbers when required. " | ||
604 | "Discarding\n", session->name); | ||
605 | session->stats.rx_seq_discards++; | ||
606 | goto discard; | ||
607 | } | ||
608 | } | ||
609 | |||
610 | /* Session data offset is handled differently for L2TPv2 and | ||
611 | * L2TPv3. For L2TPv2, there is an optional 16-bit value in | ||
612 | * the header. For L2TPv3, the offset is negotiated using AVPs | ||
613 | * in the session setup control protocol. | ||
614 | */ | ||
615 | if (tunnel->version == L2TP_HDR_VER_2) { | ||
616 | /* If offset bit set, skip it. */ | ||
617 | if (hdrflags & L2TP_HDRFLAG_O) { | ||
618 | offset = ntohs(*(__be16 *)ptr); | ||
619 | ptr += 2 + offset; | ||
620 | } | ||
621 | } else | ||
622 | ptr += session->offset; | ||
623 | |||
624 | offset = ptr - optr; | ||
625 | if (!pskb_may_pull(skb, offset)) | ||
626 | goto discard; | ||
627 | |||
628 | __skb_pull(skb, offset); | ||
629 | |||
630 | /* If caller wants to process the payload before we queue the | ||
631 | * packet, do so now. | ||
632 | */ | ||
633 | if (payload_hook) | ||
634 | if ((*payload_hook)(skb)) | ||
635 | goto discard; | ||
636 | |||
637 | /* Prepare skb for adding to the session's reorder_q. Hold | ||
638 | * packets for max reorder_timeout or 1 second if not | ||
639 | * reordering. | ||
640 | */ | ||
641 | L2TP_SKB_CB(skb)->length = length; | ||
642 | L2TP_SKB_CB(skb)->expires = jiffies + | ||
643 | (session->reorder_timeout ? session->reorder_timeout : HZ); | ||
644 | |||
645 | /* Add packet to the session's receive queue. Reordering is done here, if | ||
646 | * enabled. Saved L2TP protocol info is stored in skb->sb[]. | ||
647 | */ | ||
648 | if (L2TP_SKB_CB(skb)->has_seq) { | ||
649 | if (session->reorder_timeout != 0) { | ||
650 | /* Packet reordering enabled. Add skb to session's | ||
651 | * reorder queue, in order of ns. | ||
652 | */ | ||
653 | l2tp_recv_queue_skb(session, skb); | ||
654 | } else { | ||
655 | /* Packet reordering disabled. Discard out-of-sequence | ||
656 | * packets | ||
657 | */ | ||
658 | if (L2TP_SKB_CB(skb)->ns != session->nr) { | ||
659 | session->stats.rx_seq_discards++; | ||
660 | PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, | ||
661 | "%s: oos pkt %u len %d discarded, " | ||
662 | "waiting for %u, reorder_q_len=%d\n", | ||
663 | session->name, L2TP_SKB_CB(skb)->ns, | ||
664 | L2TP_SKB_CB(skb)->length, session->nr, | ||
665 | skb_queue_len(&session->reorder_q)); | ||
666 | goto discard; | ||
667 | } | ||
668 | skb_queue_tail(&session->reorder_q, skb); | ||
669 | } | ||
670 | } else { | ||
671 | /* No sequence numbers. Add the skb to the tail of the | ||
672 | * reorder queue. This ensures that it will be | ||
673 | * delivered after all previous sequenced skbs. | ||
674 | */ | ||
675 | skb_queue_tail(&session->reorder_q, skb); | ||
676 | } | ||
677 | |||
678 | /* Try to dequeue as many skbs from reorder_q as we can. */ | ||
679 | l2tp_recv_dequeue(session); | ||
680 | |||
681 | l2tp_session_dec_refcount(session); | ||
682 | |||
683 | return; | ||
684 | |||
685 | discard: | ||
686 | session->stats.rx_errors++; | ||
687 | kfree_skb(skb); | ||
688 | |||
689 | if (session->deref) | ||
690 | (*session->deref)(session); | ||
691 | |||
692 | l2tp_session_dec_refcount(session); | ||
693 | } | ||
694 | EXPORT_SYMBOL(l2tp_recv_common); | ||
695 | |||
696 | /* Internal UDP receive frame. Do the real work of receiving an L2TP data frame | ||
697 | * here. The skb is not on a list when we get here. | ||
698 | * Returns 0 if the packet was a data packet and was successfully passed on. | ||
699 | * Returns 1 if the packet was not a good data packet and could not be | ||
700 | * forwarded. All such packets are passed up to userspace to deal with. | ||
701 | */ | ||
702 | int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, | ||
703 | int (*payload_hook)(struct sk_buff *skb)) | ||
704 | { | ||
705 | struct l2tp_session *session = NULL; | ||
706 | unsigned char *ptr, *optr; | ||
707 | u16 hdrflags; | ||
708 | u32 tunnel_id, session_id; | ||
709 | int offset; | ||
710 | u16 version; | ||
711 | int length; | ||
712 | |||
713 | if (tunnel->sock && l2tp_verify_udp_checksum(tunnel->sock, skb)) | ||
714 | goto discard_bad_csum; | ||
715 | |||
716 | /* UDP always verifies the packet length. */ | ||
717 | __skb_pull(skb, sizeof(struct udphdr)); | ||
718 | |||
719 | /* Short packet? */ | ||
720 | if (!pskb_may_pull(skb, L2TP_HDR_SIZE_SEQ)) { | ||
721 | PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO, | ||
722 | "%s: recv short packet (len=%d)\n", tunnel->name, skb->len); | ||
723 | goto error; | ||
724 | } | ||
725 | |||
726 | /* Point to L2TP header */ | ||
727 | optr = ptr = skb->data; | ||
728 | |||
729 | /* Trace packet contents, if enabled */ | ||
730 | if (tunnel->debug & L2TP_MSG_DATA) { | ||
731 | length = min(32u, skb->len); | ||
732 | if (!pskb_may_pull(skb, length)) | ||
733 | goto error; | ||
734 | |||
735 | printk(KERN_DEBUG "%s: recv: ", tunnel->name); | ||
736 | |||
737 | offset = 0; | ||
738 | do { | ||
739 | printk(" %02X", ptr[offset]); | ||
740 | } while (++offset < length); | ||
741 | |||
742 | printk("\n"); | ||
743 | } | ||
744 | |||
745 | /* Get L2TP header flags */ | ||
746 | hdrflags = ntohs(*(__be16 *) ptr); | ||
747 | |||
748 | /* Check protocol version */ | ||
749 | version = hdrflags & L2TP_HDR_VER_MASK; | ||
750 | if (version != tunnel->version) { | ||
751 | PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO, | ||
752 | "%s: recv protocol version mismatch: got %d expected %d\n", | ||
753 | tunnel->name, version, tunnel->version); | ||
754 | goto error; | ||
755 | } | ||
756 | |||
757 | /* Get length of L2TP packet */ | ||
758 | length = skb->len; | ||
759 | |||
760 | /* If type is control packet, it is handled by userspace. */ | ||
761 | if (hdrflags & L2TP_HDRFLAG_T) { | ||
762 | PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_DEBUG, | ||
763 | "%s: recv control packet, len=%d\n", tunnel->name, length); | ||
764 | goto error; | ||
765 | } | ||
766 | |||
767 | /* Skip flags */ | ||
768 | ptr += 2; | ||
769 | |||
770 | if (tunnel->version == L2TP_HDR_VER_2) { | ||
771 | /* If length is present, skip it */ | ||
772 | if (hdrflags & L2TP_HDRFLAG_L) | ||
773 | ptr += 2; | ||
774 | |||
775 | /* Extract tunnel and session ID */ | ||
776 | tunnel_id = ntohs(*(__be16 *) ptr); | ||
777 | ptr += 2; | ||
778 | session_id = ntohs(*(__be16 *) ptr); | ||
779 | ptr += 2; | ||
780 | } else { | ||
781 | ptr += 2; /* skip reserved bits */ | ||
782 | tunnel_id = tunnel->tunnel_id; | ||
783 | session_id = ntohl(*(__be32 *) ptr); | ||
784 | ptr += 4; | ||
785 | } | ||
786 | |||
787 | /* Find the session context */ | ||
788 | session = l2tp_session_find(tunnel->l2tp_net, tunnel, session_id); | ||
789 | if (!session || !session->recv_skb) { | ||
790 | /* Not found? Pass to userspace to deal with */ | ||
791 | PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO, | ||
792 | "%s: no session found (%u/%u). Passing up.\n", | ||
793 | tunnel->name, tunnel_id, session_id); | ||
794 | goto error; | ||
795 | } | ||
796 | |||
797 | l2tp_recv_common(session, skb, ptr, optr, hdrflags, length, payload_hook); | ||
798 | |||
799 | return 0; | ||
800 | |||
801 | discard_bad_csum: | ||
802 | LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name); | ||
803 | UDP_INC_STATS_USER(tunnel->l2tp_net, UDP_MIB_INERRORS, 0); | ||
804 | tunnel->stats.rx_errors++; | ||
805 | kfree_skb(skb); | ||
806 | |||
807 | return 0; | ||
808 | |||
809 | error: | ||
810 | /* Put UDP header back */ | ||
811 | __skb_push(skb, sizeof(struct udphdr)); | ||
812 | |||
813 | return 1; | ||
814 | } | ||
815 | EXPORT_SYMBOL_GPL(l2tp_udp_recv_core); | ||
816 | |||
817 | /* UDP encapsulation receive handler. See net/ipv4/udp.c. | ||
818 | * Return codes: | ||
819 | * 0 : success. | ||
820 | * <0: error | ||
821 | * >0: skb should be passed up to userspace as UDP. | ||
822 | */ | ||
823 | int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) | ||
824 | { | ||
825 | struct l2tp_tunnel *tunnel; | ||
826 | |||
827 | tunnel = l2tp_sock_to_tunnel(sk); | ||
828 | if (tunnel == NULL) | ||
829 | goto pass_up; | ||
830 | |||
831 | PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_DEBUG, | ||
832 | "%s: received %d bytes\n", tunnel->name, skb->len); | ||
833 | |||
834 | if (l2tp_udp_recv_core(tunnel, skb, tunnel->recv_payload_hook)) | ||
835 | goto pass_up_put; | ||
836 | |||
837 | sock_put(sk); | ||
838 | return 0; | ||
839 | |||
840 | pass_up_put: | ||
841 | sock_put(sk); | ||
842 | pass_up: | ||
843 | return 1; | ||
844 | } | ||
845 | EXPORT_SYMBOL_GPL(l2tp_udp_encap_recv); | ||
846 | |||
847 | /************************************************************************ | ||
848 | * Transmit handling | ||
849 | ***********************************************************************/ | ||
850 | |||
851 | /* Build an L2TP header for the session into the buffer provided. | ||
852 | */ | ||
853 | static int l2tp_build_l2tpv2_header(struct l2tp_session *session, void *buf) | ||
854 | { | ||
855 | struct l2tp_tunnel *tunnel = session->tunnel; | ||
856 | __be16 *bufp = buf; | ||
857 | __be16 *optr = buf; | ||
858 | u16 flags = L2TP_HDR_VER_2; | ||
859 | u32 tunnel_id = tunnel->peer_tunnel_id; | ||
860 | u32 session_id = session->peer_session_id; | ||
861 | |||
862 | if (session->send_seq) | ||
863 | flags |= L2TP_HDRFLAG_S; | ||
864 | |||
865 | /* Setup L2TP header. */ | ||
866 | *bufp++ = htons(flags); | ||
867 | *bufp++ = htons(tunnel_id); | ||
868 | *bufp++ = htons(session_id); | ||
869 | if (session->send_seq) { | ||
870 | *bufp++ = htons(session->ns); | ||
871 | *bufp++ = 0; | ||
872 | session->ns++; | ||
873 | session->ns &= 0xffff; | ||
874 | PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, | ||
875 | "%s: updated ns to %u\n", session->name, session->ns); | ||
876 | } | ||
877 | |||
878 | return bufp - optr; | ||
879 | } | ||
880 | |||
881 | static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf) | ||
882 | { | ||
883 | struct l2tp_tunnel *tunnel = session->tunnel; | ||
884 | char *bufp = buf; | ||
885 | char *optr = bufp; | ||
886 | |||
887 | /* Setup L2TP header. The header differs slightly for UDP and | ||
888 | * IP encapsulations. For UDP, there is 4 bytes of flags. | ||
889 | */ | ||
890 | if (tunnel->encap == L2TP_ENCAPTYPE_UDP) { | ||
891 | u16 flags = L2TP_HDR_VER_3; | ||
892 | *((__be16 *) bufp) = htons(flags); | ||
893 | bufp += 2; | ||
894 | *((__be16 *) bufp) = 0; | ||
895 | bufp += 2; | ||
896 | } | ||
897 | |||
898 | *((__be32 *) bufp) = htonl(session->peer_session_id); | ||
899 | bufp += 4; | ||
900 | if (session->cookie_len) { | ||
901 | memcpy(bufp, &session->cookie[0], session->cookie_len); | ||
902 | bufp += session->cookie_len; | ||
903 | } | ||
904 | if (session->l2specific_len) { | ||
905 | if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) { | ||
906 | u32 l2h = 0; | ||
907 | if (session->send_seq) { | ||
908 | l2h = 0x40000000 | session->ns; | ||
909 | session->ns++; | ||
910 | session->ns &= 0xffffff; | ||
911 | PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, | ||
912 | "%s: updated ns to %u\n", session->name, session->ns); | ||
913 | } | ||
914 | |||
915 | *((__be32 *) bufp) = htonl(l2h); | ||
916 | } | ||
917 | bufp += session->l2specific_len; | ||
918 | } | ||
919 | if (session->offset) | ||
920 | bufp += session->offset; | ||
921 | |||
922 | return bufp - optr; | ||
923 | } | ||
924 | |||
925 | int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, size_t data_len) | ||
926 | { | ||
927 | struct l2tp_tunnel *tunnel = session->tunnel; | ||
928 | unsigned int len = skb->len; | ||
929 | int error; | ||
930 | |||
931 | /* Debug */ | ||
932 | if (session->send_seq) | ||
933 | PRINTK(session->debug, L2TP_MSG_DATA, KERN_DEBUG, | ||
934 | "%s: send %Zd bytes, ns=%u\n", session->name, | ||
935 | data_len, session->ns - 1); | ||
936 | else | ||
937 | PRINTK(session->debug, L2TP_MSG_DATA, KERN_DEBUG, | ||
938 | "%s: send %Zd bytes\n", session->name, data_len); | ||
939 | |||
940 | if (session->debug & L2TP_MSG_DATA) { | ||
941 | int i; | ||
942 | int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0; | ||
943 | unsigned char *datap = skb->data + uhlen; | ||
944 | |||
945 | printk(KERN_DEBUG "%s: xmit:", session->name); | ||
946 | for (i = 0; i < (len - uhlen); i++) { | ||
947 | printk(" %02X", *datap++); | ||
948 | if (i == 31) { | ||
949 | printk(" ..."); | ||
950 | break; | ||
951 | } | ||
952 | } | ||
953 | printk("\n"); | ||
954 | } | ||
955 | |||
956 | /* Queue the packet to IP for output */ | ||
957 | error = ip_queue_xmit(skb, 1); | ||
958 | |||
959 | /* Update stats */ | ||
960 | if (error >= 0) { | ||
961 | tunnel->stats.tx_packets++; | ||
962 | tunnel->stats.tx_bytes += len; | ||
963 | session->stats.tx_packets++; | ||
964 | session->stats.tx_bytes += len; | ||
965 | } else { | ||
966 | tunnel->stats.tx_errors++; | ||
967 | session->stats.tx_errors++; | ||
968 | } | ||
969 | |||
970 | return 0; | ||
971 | } | ||
972 | EXPORT_SYMBOL_GPL(l2tp_xmit_core); | ||
973 | |||
974 | /* Automatically called when the skb is freed. | ||
975 | */ | ||
976 | static void l2tp_sock_wfree(struct sk_buff *skb) | ||
977 | { | ||
978 | sock_put(skb->sk); | ||
979 | } | ||
980 | |||
981 | /* For data skbs that we transmit, we associate with the tunnel socket | ||
982 | * but don't do accounting. | ||
983 | */ | ||
984 | static inline void l2tp_skb_set_owner_w(struct sk_buff *skb, struct sock *sk) | ||
985 | { | ||
986 | sock_hold(sk); | ||
987 | skb->sk = sk; | ||
988 | skb->destructor = l2tp_sock_wfree; | ||
989 | } | ||
990 | |||
991 | /* If caller requires the skb to have a ppp header, the header must be | ||
992 | * inserted in the skb data before calling this function. | ||
993 | */ | ||
994 | int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len) | ||
995 | { | ||
996 | int data_len = skb->len; | ||
997 | struct l2tp_tunnel *tunnel = session->tunnel; | ||
998 | struct sock *sk = tunnel->sock; | ||
999 | struct udphdr *uh; | ||
1000 | struct inet_sock *inet; | ||
1001 | __wsum csum; | ||
1002 | int old_headroom; | ||
1003 | int new_headroom; | ||
1004 | int headroom; | ||
1005 | int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0; | ||
1006 | int udp_len; | ||
1007 | |||
1008 | /* Check that there's enough headroom in the skb to insert IP, | ||
1009 | * UDP and L2TP headers. If not enough, expand it to | ||
1010 | * make room. Adjust truesize. | ||
1011 | */ | ||
1012 | headroom = NET_SKB_PAD + sizeof(struct iphdr) + | ||
1013 | uhlen + hdr_len; | ||
1014 | old_headroom = skb_headroom(skb); | ||
1015 | if (skb_cow_head(skb, headroom)) | ||
1016 | goto abort; | ||
1017 | |||
1018 | new_headroom = skb_headroom(skb); | ||
1019 | skb_orphan(skb); | ||
1020 | skb->truesize += new_headroom - old_headroom; | ||
1021 | |||
1022 | /* Setup L2TP header */ | ||
1023 | session->build_header(session, __skb_push(skb, hdr_len)); | ||
1024 | |||
1025 | /* Reset skb netfilter state */ | ||
1026 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); | ||
1027 | IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | | ||
1028 | IPSKB_REROUTED); | ||
1029 | nf_reset(skb); | ||
1030 | |||
1031 | /* Get routing info from the tunnel socket */ | ||
1032 | skb_dst_drop(skb); | ||
1033 | skb_dst_set(skb, dst_clone(__sk_dst_get(sk))); | ||
1034 | |||
1035 | switch (tunnel->encap) { | ||
1036 | case L2TP_ENCAPTYPE_UDP: | ||
1037 | /* Setup UDP header */ | ||
1038 | inet = inet_sk(sk); | ||
1039 | __skb_push(skb, sizeof(*uh)); | ||
1040 | skb_reset_transport_header(skb); | ||
1041 | uh = udp_hdr(skb); | ||
1042 | uh->source = inet->inet_sport; | ||
1043 | uh->dest = inet->inet_dport; | ||
1044 | udp_len = uhlen + hdr_len + data_len; | ||
1045 | uh->len = htons(udp_len); | ||
1046 | uh->check = 0; | ||
1047 | |||
1048 | /* Calculate UDP checksum if configured to do so */ | ||
1049 | if (sk->sk_no_check == UDP_CSUM_NOXMIT) | ||
1050 | skb->ip_summed = CHECKSUM_NONE; | ||
1051 | else if ((skb_dst(skb) && skb_dst(skb)->dev) && | ||
1052 | (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM))) { | ||
1053 | skb->ip_summed = CHECKSUM_COMPLETE; | ||
1054 | csum = skb_checksum(skb, 0, udp_len, 0); | ||
1055 | uh->check = csum_tcpudp_magic(inet->inet_saddr, | ||
1056 | inet->inet_daddr, | ||
1057 | udp_len, IPPROTO_UDP, csum); | ||
1058 | if (uh->check == 0) | ||
1059 | uh->check = CSUM_MANGLED_0; | ||
1060 | } else { | ||
1061 | skb->ip_summed = CHECKSUM_PARTIAL; | ||
1062 | skb->csum_start = skb_transport_header(skb) - skb->head; | ||
1063 | skb->csum_offset = offsetof(struct udphdr, check); | ||
1064 | uh->check = ~csum_tcpudp_magic(inet->inet_saddr, | ||
1065 | inet->inet_daddr, | ||
1066 | udp_len, IPPROTO_UDP, 0); | ||
1067 | } | ||
1068 | break; | ||
1069 | |||
1070 | case L2TP_ENCAPTYPE_IP: | ||
1071 | break; | ||
1072 | } | ||
1073 | |||
1074 | l2tp_skb_set_owner_w(skb, sk); | ||
1075 | |||
1076 | l2tp_xmit_core(session, skb, data_len); | ||
1077 | |||
1078 | abort: | ||
1079 | return 0; | ||
1080 | } | ||
1081 | EXPORT_SYMBOL_GPL(l2tp_xmit_skb); | ||
1082 | |||
1083 | /***************************************************************************** | ||
1084 | * Tinnel and session create/destroy. | ||
1085 | *****************************************************************************/ | ||
1086 | |||
1087 | /* Tunnel socket destruct hook. | ||
1088 | * The tunnel context is deleted only when all session sockets have been | ||
1089 | * closed. | ||
1090 | */ | ||
1091 | void l2tp_tunnel_destruct(struct sock *sk) | ||
1092 | { | ||
1093 | struct l2tp_tunnel *tunnel; | ||
1094 | |||
1095 | tunnel = sk->sk_user_data; | ||
1096 | if (tunnel == NULL) | ||
1097 | goto end; | ||
1098 | |||
1099 | PRINTK(tunnel->debug, L2TP_MSG_CONTROL, KERN_INFO, | ||
1100 | "%s: closing...\n", tunnel->name); | ||
1101 | |||
1102 | /* Close all sessions */ | ||
1103 | l2tp_tunnel_closeall(tunnel); | ||
1104 | |||
1105 | switch (tunnel->encap) { | ||
1106 | case L2TP_ENCAPTYPE_UDP: | ||
1107 | /* No longer an encapsulation socket. See net/ipv4/udp.c */ | ||
1108 | (udp_sk(sk))->encap_type = 0; | ||
1109 | (udp_sk(sk))->encap_rcv = NULL; | ||
1110 | break; | ||
1111 | case L2TP_ENCAPTYPE_IP: | ||
1112 | break; | ||
1113 | } | ||
1114 | |||
1115 | /* Remove hooks into tunnel socket */ | ||
1116 | tunnel->sock = NULL; | ||
1117 | sk->sk_destruct = tunnel->old_sk_destruct; | ||
1118 | sk->sk_user_data = NULL; | ||
1119 | |||
1120 | /* Call the original destructor */ | ||
1121 | if (sk->sk_destruct) | ||
1122 | (*sk->sk_destruct)(sk); | ||
1123 | |||
1124 | /* We're finished with the socket */ | ||
1125 | l2tp_tunnel_dec_refcount(tunnel); | ||
1126 | |||
1127 | end: | ||
1128 | return; | ||
1129 | } | ||
1130 | EXPORT_SYMBOL(l2tp_tunnel_destruct); | ||
1131 | |||
1132 | /* When the tunnel is closed, all the attached sessions need to go too. | ||
1133 | */ | ||
1134 | void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel) | ||
1135 | { | ||
1136 | int hash; | ||
1137 | struct hlist_node *walk; | ||
1138 | struct hlist_node *tmp; | ||
1139 | struct l2tp_session *session; | ||
1140 | |||
1141 | BUG_ON(tunnel == NULL); | ||
1142 | |||
1143 | PRINTK(tunnel->debug, L2TP_MSG_CONTROL, KERN_INFO, | ||
1144 | "%s: closing all sessions...\n", tunnel->name); | ||
1145 | |||
1146 | write_lock_bh(&tunnel->hlist_lock); | ||
1147 | for (hash = 0; hash < L2TP_HASH_SIZE; hash++) { | ||
1148 | again: | ||
1149 | hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) { | ||
1150 | session = hlist_entry(walk, struct l2tp_session, hlist); | ||
1151 | |||
1152 | PRINTK(session->debug, L2TP_MSG_CONTROL, KERN_INFO, | ||
1153 | "%s: closing session\n", session->name); | ||
1154 | |||
1155 | hlist_del_init(&session->hlist); | ||
1156 | |||
1157 | /* Since we should hold the sock lock while | ||
1158 | * doing any unbinding, we need to release the | ||
1159 | * lock we're holding before taking that lock. | ||
1160 | * Hold a reference to the sock so it doesn't | ||
1161 | * disappear as we're jumping between locks. | ||
1162 | */ | ||
1163 | if (session->ref != NULL) | ||
1164 | (*session->ref)(session); | ||
1165 | |||
1166 | write_unlock_bh(&tunnel->hlist_lock); | ||
1167 | |||
1168 | if (tunnel->version != L2TP_HDR_VER_2) { | ||
1169 | struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); | ||
1170 | |||
1171 | spin_lock_bh(&pn->l2tp_session_hlist_lock); | ||
1172 | hlist_del_init_rcu(&session->global_hlist); | ||
1173 | spin_unlock_bh(&pn->l2tp_session_hlist_lock); | ||
1174 | synchronize_rcu(); | ||
1175 | } | ||
1176 | |||
1177 | if (session->session_close != NULL) | ||
1178 | (*session->session_close)(session); | ||
1179 | |||
1180 | if (session->deref != NULL) | ||
1181 | (*session->deref)(session); | ||
1182 | |||
1183 | write_lock_bh(&tunnel->hlist_lock); | ||
1184 | |||
1185 | /* Now restart from the beginning of this hash | ||
1186 | * chain. We always remove a session from the | ||
1187 | * list so we are guaranteed to make forward | ||
1188 | * progress. | ||
1189 | */ | ||
1190 | goto again; | ||
1191 | } | ||
1192 | } | ||
1193 | write_unlock_bh(&tunnel->hlist_lock); | ||
1194 | } | ||
1195 | EXPORT_SYMBOL_GPL(l2tp_tunnel_closeall); | ||
1196 | |||
1197 | /* Really kill the tunnel. | ||
1198 | * Come here only when all sessions have been cleared from the tunnel. | ||
1199 | */ | ||
1200 | void l2tp_tunnel_free(struct l2tp_tunnel *tunnel) | ||
1201 | { | ||
1202 | struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); | ||
1203 | |||
1204 | BUG_ON(atomic_read(&tunnel->ref_count) != 0); | ||
1205 | BUG_ON(tunnel->sock != NULL); | ||
1206 | |||
1207 | PRINTK(tunnel->debug, L2TP_MSG_CONTROL, KERN_INFO, | ||
1208 | "%s: free...\n", tunnel->name); | ||
1209 | |||
1210 | /* Remove from tunnel list */ | ||
1211 | spin_lock_bh(&pn->l2tp_tunnel_list_lock); | ||
1212 | list_del_rcu(&tunnel->list); | ||
1213 | spin_unlock_bh(&pn->l2tp_tunnel_list_lock); | ||
1214 | synchronize_rcu(); | ||
1215 | |||
1216 | atomic_dec(&l2tp_tunnel_count); | ||
1217 | kfree(tunnel); | ||
1218 | } | ||
1219 | EXPORT_SYMBOL_GPL(l2tp_tunnel_free); | ||
1220 | |||
1221 | /* Create a socket for the tunnel, if one isn't set up by | ||
1222 | * userspace. This is used for static tunnels where there is no | ||
1223 | * managing L2TP daemon. | ||
1224 | */ | ||
1225 | static int l2tp_tunnel_sock_create(u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct socket **sockp) | ||
1226 | { | ||
1227 | int err = -EINVAL; | ||
1228 | struct sockaddr_in udp_addr; | ||
1229 | struct sockaddr_l2tpip ip_addr; | ||
1230 | struct socket *sock = NULL; | ||
1231 | |||
1232 | switch (cfg->encap) { | ||
1233 | case L2TP_ENCAPTYPE_UDP: | ||
1234 | err = sock_create(AF_INET, SOCK_DGRAM, 0, sockp); | ||
1235 | if (err < 0) | ||
1236 | goto out; | ||
1237 | |||
1238 | sock = *sockp; | ||
1239 | |||
1240 | memset(&udp_addr, 0, sizeof(udp_addr)); | ||
1241 | udp_addr.sin_family = AF_INET; | ||
1242 | udp_addr.sin_addr = cfg->local_ip; | ||
1243 | udp_addr.sin_port = htons(cfg->local_udp_port); | ||
1244 | err = kernel_bind(sock, (struct sockaddr *) &udp_addr, sizeof(udp_addr)); | ||
1245 | if (err < 0) | ||
1246 | goto out; | ||
1247 | |||
1248 | udp_addr.sin_family = AF_INET; | ||
1249 | udp_addr.sin_addr = cfg->peer_ip; | ||
1250 | udp_addr.sin_port = htons(cfg->peer_udp_port); | ||
1251 | err = kernel_connect(sock, (struct sockaddr *) &udp_addr, sizeof(udp_addr), 0); | ||
1252 | if (err < 0) | ||
1253 | goto out; | ||
1254 | |||
1255 | if (!cfg->use_udp_checksums) | ||
1256 | sock->sk->sk_no_check = UDP_CSUM_NOXMIT; | ||
1257 | |||
1258 | break; | ||
1259 | |||
1260 | case L2TP_ENCAPTYPE_IP: | ||
1261 | err = sock_create(AF_INET, SOCK_DGRAM, IPPROTO_L2TP, sockp); | ||
1262 | if (err < 0) | ||
1263 | goto out; | ||
1264 | |||
1265 | sock = *sockp; | ||
1266 | |||
1267 | memset(&ip_addr, 0, sizeof(ip_addr)); | ||
1268 | ip_addr.l2tp_family = AF_INET; | ||
1269 | ip_addr.l2tp_addr = cfg->local_ip; | ||
1270 | ip_addr.l2tp_conn_id = tunnel_id; | ||
1271 | err = kernel_bind(sock, (struct sockaddr *) &ip_addr, sizeof(ip_addr)); | ||
1272 | if (err < 0) | ||
1273 | goto out; | ||
1274 | |||
1275 | ip_addr.l2tp_family = AF_INET; | ||
1276 | ip_addr.l2tp_addr = cfg->peer_ip; | ||
1277 | ip_addr.l2tp_conn_id = peer_tunnel_id; | ||
1278 | err = kernel_connect(sock, (struct sockaddr *) &ip_addr, sizeof(ip_addr), 0); | ||
1279 | if (err < 0) | ||
1280 | goto out; | ||
1281 | |||
1282 | break; | ||
1283 | |||
1284 | default: | ||
1285 | goto out; | ||
1286 | } | ||
1287 | |||
1288 | out: | ||
1289 | if ((err < 0) && sock) { | ||
1290 | sock_release(sock); | ||
1291 | *sockp = NULL; | ||
1292 | } | ||
1293 | |||
1294 | return err; | ||
1295 | } | ||
1296 | |||
1297 | int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp) | ||
1298 | { | ||
1299 | struct l2tp_tunnel *tunnel = NULL; | ||
1300 | int err; | ||
1301 | struct socket *sock = NULL; | ||
1302 | struct sock *sk = NULL; | ||
1303 | struct l2tp_net *pn; | ||
1304 | enum l2tp_encap_type encap = L2TP_ENCAPTYPE_UDP; | ||
1305 | |||
1306 | /* Get the tunnel socket from the fd, which was opened by | ||
1307 | * the userspace L2TP daemon. If not specified, create a | ||
1308 | * kernel socket. | ||
1309 | */ | ||
1310 | if (fd < 0) { | ||
1311 | err = l2tp_tunnel_sock_create(tunnel_id, peer_tunnel_id, cfg, &sock); | ||
1312 | if (err < 0) | ||
1313 | goto err; | ||
1314 | } else { | ||
1315 | err = -EBADF; | ||
1316 | sock = sockfd_lookup(fd, &err); | ||
1317 | if (!sock) { | ||
1318 | printk(KERN_ERR "tunl %hu: sockfd_lookup(fd=%d) returned %d\n", | ||
1319 | tunnel_id, fd, err); | ||
1320 | goto err; | ||
1321 | } | ||
1322 | } | ||
1323 | |||
1324 | sk = sock->sk; | ||
1325 | |||
1326 | if (cfg != NULL) | ||
1327 | encap = cfg->encap; | ||
1328 | |||
1329 | /* Quick sanity checks */ | ||
1330 | switch (encap) { | ||
1331 | case L2TP_ENCAPTYPE_UDP: | ||
1332 | err = -EPROTONOSUPPORT; | ||
1333 | if (sk->sk_protocol != IPPROTO_UDP) { | ||
1334 | printk(KERN_ERR "tunl %hu: fd %d wrong protocol, got %d, expected %d\n", | ||
1335 | tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP); | ||
1336 | goto err; | ||
1337 | } | ||
1338 | break; | ||
1339 | case L2TP_ENCAPTYPE_IP: | ||
1340 | err = -EPROTONOSUPPORT; | ||
1341 | if (sk->sk_protocol != IPPROTO_L2TP) { | ||
1342 | printk(KERN_ERR "tunl %hu: fd %d wrong protocol, got %d, expected %d\n", | ||
1343 | tunnel_id, fd, sk->sk_protocol, IPPROTO_L2TP); | ||
1344 | goto err; | ||
1345 | } | ||
1346 | break; | ||
1347 | } | ||
1348 | |||
1349 | /* Check if this socket has already been prepped */ | ||
1350 | tunnel = (struct l2tp_tunnel *)sk->sk_user_data; | ||
1351 | if (tunnel != NULL) { | ||
1352 | /* This socket has already been prepped */ | ||
1353 | err = -EBUSY; | ||
1354 | goto err; | ||
1355 | } | ||
1356 | |||
1357 | tunnel = kzalloc(sizeof(struct l2tp_tunnel), GFP_KERNEL); | ||
1358 | if (tunnel == NULL) { | ||
1359 | err = -ENOMEM; | ||
1360 | goto err; | ||
1361 | } | ||
1362 | |||
1363 | tunnel->version = version; | ||
1364 | tunnel->tunnel_id = tunnel_id; | ||
1365 | tunnel->peer_tunnel_id = peer_tunnel_id; | ||
1366 | tunnel->debug = L2TP_DEFAULT_DEBUG_FLAGS; | ||
1367 | |||
1368 | tunnel->magic = L2TP_TUNNEL_MAGIC; | ||
1369 | sprintf(&tunnel->name[0], "tunl %u", tunnel_id); | ||
1370 | rwlock_init(&tunnel->hlist_lock); | ||
1371 | |||
1372 | /* The net we belong to */ | ||
1373 | tunnel->l2tp_net = net; | ||
1374 | pn = l2tp_pernet(net); | ||
1375 | |||
1376 | if (cfg != NULL) | ||
1377 | tunnel->debug = cfg->debug; | ||
1378 | |||
1379 | /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ | ||
1380 | tunnel->encap = encap; | ||
1381 | if (encap == L2TP_ENCAPTYPE_UDP) { | ||
1382 | /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ | ||
1383 | udp_sk(sk)->encap_type = UDP_ENCAP_L2TPINUDP; | ||
1384 | udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv; | ||
1385 | } | ||
1386 | |||
1387 | sk->sk_user_data = tunnel; | ||
1388 | |||
1389 | /* Hook on the tunnel socket destructor so that we can cleanup | ||
1390 | * if the tunnel socket goes away. | ||
1391 | */ | ||
1392 | tunnel->old_sk_destruct = sk->sk_destruct; | ||
1393 | sk->sk_destruct = &l2tp_tunnel_destruct; | ||
1394 | tunnel->sock = sk; | ||
1395 | sk->sk_allocation = GFP_ATOMIC; | ||
1396 | |||
1397 | /* Add tunnel to our list */ | ||
1398 | INIT_LIST_HEAD(&tunnel->list); | ||
1399 | spin_lock_bh(&pn->l2tp_tunnel_list_lock); | ||
1400 | list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list); | ||
1401 | spin_unlock_bh(&pn->l2tp_tunnel_list_lock); | ||
1402 | synchronize_rcu(); | ||
1403 | atomic_inc(&l2tp_tunnel_count); | ||
1404 | |||
1405 | /* Bump the reference count. The tunnel context is deleted | ||
1406 | * only when this drops to zero. | ||
1407 | */ | ||
1408 | l2tp_tunnel_inc_refcount(tunnel); | ||
1409 | |||
1410 | err = 0; | ||
1411 | err: | ||
1412 | if (tunnelp) | ||
1413 | *tunnelp = tunnel; | ||
1414 | |||
1415 | /* If tunnel's socket was created by the kernel, it doesn't | ||
1416 | * have a file. | ||
1417 | */ | ||
1418 | if (sock && sock->file) | ||
1419 | sockfd_put(sock); | ||
1420 | |||
1421 | return err; | ||
1422 | } | ||
1423 | EXPORT_SYMBOL_GPL(l2tp_tunnel_create); | ||
1424 | |||
1425 | /* This function is used by the netlink TUNNEL_DELETE command. | ||
1426 | */ | ||
1427 | int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel) | ||
1428 | { | ||
1429 | int err = 0; | ||
1430 | struct socket *sock = tunnel->sock ? tunnel->sock->sk_socket : NULL; | ||
1431 | |||
1432 | /* Force the tunnel socket to close. This will eventually | ||
1433 | * cause the tunnel to be deleted via the normal socket close | ||
1434 | * mechanisms when userspace closes the tunnel socket. | ||
1435 | */ | ||
1436 | if (sock != NULL) { | ||
1437 | err = inet_shutdown(sock, 2); | ||
1438 | |||
1439 | /* If the tunnel's socket was created by the kernel, | ||
1440 | * close the socket here since the socket was not | ||
1441 | * created by userspace. | ||
1442 | */ | ||
1443 | if (sock->file == NULL) | ||
1444 | err = inet_release(sock); | ||
1445 | } | ||
1446 | |||
1447 | return err; | ||
1448 | } | ||
1449 | EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); | ||
1450 | |||
1451 | /* Really kill the session. | ||
1452 | */ | ||
1453 | void l2tp_session_free(struct l2tp_session *session) | ||
1454 | { | ||
1455 | struct l2tp_tunnel *tunnel; | ||
1456 | |||
1457 | BUG_ON(atomic_read(&session->ref_count) != 0); | ||
1458 | |||
1459 | tunnel = session->tunnel; | ||
1460 | if (tunnel != NULL) { | ||
1461 | BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC); | ||
1462 | |||
1463 | /* Delete the session from the hash */ | ||
1464 | write_lock_bh(&tunnel->hlist_lock); | ||
1465 | hlist_del_init(&session->hlist); | ||
1466 | write_unlock_bh(&tunnel->hlist_lock); | ||
1467 | |||
1468 | /* Unlink from the global hash if not L2TPv2 */ | ||
1469 | if (tunnel->version != L2TP_HDR_VER_2) { | ||
1470 | struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); | ||
1471 | |||
1472 | spin_lock_bh(&pn->l2tp_session_hlist_lock); | ||
1473 | hlist_del_init_rcu(&session->global_hlist); | ||
1474 | spin_unlock_bh(&pn->l2tp_session_hlist_lock); | ||
1475 | synchronize_rcu(); | ||
1476 | } | ||
1477 | |||
1478 | if (session->session_id != 0) | ||
1479 | atomic_dec(&l2tp_session_count); | ||
1480 | |||
1481 | sock_put(tunnel->sock); | ||
1482 | |||
1483 | /* This will delete the tunnel context if this | ||
1484 | * is the last session on the tunnel. | ||
1485 | */ | ||
1486 | session->tunnel = NULL; | ||
1487 | l2tp_tunnel_dec_refcount(tunnel); | ||
1488 | } | ||
1489 | |||
1490 | kfree(session); | ||
1491 | |||
1492 | return; | ||
1493 | } | ||
1494 | EXPORT_SYMBOL_GPL(l2tp_session_free); | ||
1495 | |||
1496 | /* This function is used by the netlink SESSION_DELETE command and by | ||
1497 | pseudowire modules. | ||
1498 | */ | ||
1499 | int l2tp_session_delete(struct l2tp_session *session) | ||
1500 | { | ||
1501 | if (session->session_close != NULL) | ||
1502 | (*session->session_close)(session); | ||
1503 | |||
1504 | l2tp_session_dec_refcount(session); | ||
1505 | |||
1506 | return 0; | ||
1507 | } | ||
1508 | EXPORT_SYMBOL_GPL(l2tp_session_delete); | ||
1509 | |||
1510 | |||
1511 | /* We come here whenever a session's send_seq, cookie_len or | ||
1512 | * l2specific_len parameters are set. | ||
1513 | */ | ||
1514 | void l2tp_session_set_header_len(struct l2tp_session *session, int version) | ||
1515 | { | ||
1516 | if (version == L2TP_HDR_VER_2) { | ||
1517 | session->hdr_len = 6; | ||
1518 | if (session->send_seq) | ||
1519 | session->hdr_len += 4; | ||
1520 | } else { | ||
1521 | session->hdr_len = 4 + session->cookie_len + session->l2specific_len + session->offset; | ||
1522 | if (session->tunnel->encap == L2TP_ENCAPTYPE_UDP) | ||
1523 | session->hdr_len += 4; | ||
1524 | } | ||
1525 | |||
1526 | } | ||
1527 | EXPORT_SYMBOL_GPL(l2tp_session_set_header_len); | ||
1528 | |||
1529 | struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg) | ||
1530 | { | ||
1531 | struct l2tp_session *session; | ||
1532 | |||
1533 | session = kzalloc(sizeof(struct l2tp_session) + priv_size, GFP_KERNEL); | ||
1534 | if (session != NULL) { | ||
1535 | session->magic = L2TP_SESSION_MAGIC; | ||
1536 | session->tunnel = tunnel; | ||
1537 | |||
1538 | session->session_id = session_id; | ||
1539 | session->peer_session_id = peer_session_id; | ||
1540 | session->nr = 1; | ||
1541 | |||
1542 | sprintf(&session->name[0], "sess %u/%u", | ||
1543 | tunnel->tunnel_id, session->session_id); | ||
1544 | |||
1545 | skb_queue_head_init(&session->reorder_q); | ||
1546 | |||
1547 | INIT_HLIST_NODE(&session->hlist); | ||
1548 | INIT_HLIST_NODE(&session->global_hlist); | ||
1549 | |||
1550 | /* Inherit debug options from tunnel */ | ||
1551 | session->debug = tunnel->debug; | ||
1552 | |||
1553 | if (cfg) { | ||
1554 | session->pwtype = cfg->pw_type; | ||
1555 | session->debug = cfg->debug; | ||
1556 | session->mtu = cfg->mtu; | ||
1557 | session->mru = cfg->mru; | ||
1558 | session->send_seq = cfg->send_seq; | ||
1559 | session->recv_seq = cfg->recv_seq; | ||
1560 | session->lns_mode = cfg->lns_mode; | ||
1561 | session->reorder_timeout = cfg->reorder_timeout; | ||
1562 | session->offset = cfg->offset; | ||
1563 | session->l2specific_type = cfg->l2specific_type; | ||
1564 | session->l2specific_len = cfg->l2specific_len; | ||
1565 | session->cookie_len = cfg->cookie_len; | ||
1566 | memcpy(&session->cookie[0], &cfg->cookie[0], cfg->cookie_len); | ||
1567 | session->peer_cookie_len = cfg->peer_cookie_len; | ||
1568 | memcpy(&session->peer_cookie[0], &cfg->peer_cookie[0], cfg->peer_cookie_len); | ||
1569 | } | ||
1570 | |||
1571 | if (tunnel->version == L2TP_HDR_VER_2) | ||
1572 | session->build_header = l2tp_build_l2tpv2_header; | ||
1573 | else | ||
1574 | session->build_header = l2tp_build_l2tpv3_header; | ||
1575 | |||
1576 | l2tp_session_set_header_len(session, tunnel->version); | ||
1577 | |||
1578 | /* Bump the reference count. The session context is deleted | ||
1579 | * only when this drops to zero. | ||
1580 | */ | ||
1581 | l2tp_session_inc_refcount(session); | ||
1582 | l2tp_tunnel_inc_refcount(tunnel); | ||
1583 | |||
1584 | /* Ensure tunnel socket isn't deleted */ | ||
1585 | sock_hold(tunnel->sock); | ||
1586 | |||
1587 | /* Add session to the tunnel's hash list */ | ||
1588 | write_lock_bh(&tunnel->hlist_lock); | ||
1589 | hlist_add_head(&session->hlist, | ||
1590 | l2tp_session_id_hash(tunnel, session_id)); | ||
1591 | write_unlock_bh(&tunnel->hlist_lock); | ||
1592 | |||
1593 | /* And to the global session list if L2TPv3 */ | ||
1594 | if (tunnel->version != L2TP_HDR_VER_2) { | ||
1595 | struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); | ||
1596 | |||
1597 | spin_lock_bh(&pn->l2tp_session_hlist_lock); | ||
1598 | hlist_add_head_rcu(&session->global_hlist, | ||
1599 | l2tp_session_id_hash_2(pn, session_id)); | ||
1600 | spin_unlock_bh(&pn->l2tp_session_hlist_lock); | ||
1601 | synchronize_rcu(); | ||
1602 | } | ||
1603 | |||
1604 | /* Ignore management session in session count value */ | ||
1605 | if (session->session_id != 0) | ||
1606 | atomic_inc(&l2tp_session_count); | ||
1607 | } | ||
1608 | |||
1609 | return session; | ||
1610 | } | ||
1611 | EXPORT_SYMBOL_GPL(l2tp_session_create); | ||
1612 | |||
1613 | /***************************************************************************** | ||
1614 | * Init and cleanup | ||
1615 | *****************************************************************************/ | ||
1616 | |||
1617 | static __net_init int l2tp_init_net(struct net *net) | ||
1618 | { | ||
1619 | struct l2tp_net *pn; | ||
1620 | int err; | ||
1621 | int hash; | ||
1622 | |||
1623 | pn = kzalloc(sizeof(*pn), GFP_KERNEL); | ||
1624 | if (!pn) | ||
1625 | return -ENOMEM; | ||
1626 | |||
1627 | INIT_LIST_HEAD(&pn->l2tp_tunnel_list); | ||
1628 | spin_lock_init(&pn->l2tp_tunnel_list_lock); | ||
1629 | |||
1630 | for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) | ||
1631 | INIT_HLIST_HEAD(&pn->l2tp_session_hlist[hash]); | ||
1632 | |||
1633 | spin_lock_init(&pn->l2tp_session_hlist_lock); | ||
1634 | |||
1635 | err = net_assign_generic(net, l2tp_net_id, pn); | ||
1636 | if (err) | ||
1637 | goto out; | ||
1638 | |||
1639 | return 0; | ||
1640 | |||
1641 | out: | ||
1642 | kfree(pn); | ||
1643 | return err; | ||
1644 | } | ||
1645 | |||
1646 | static __net_exit void l2tp_exit_net(struct net *net) | ||
1647 | { | ||
1648 | struct l2tp_net *pn; | ||
1649 | |||
1650 | pn = net_generic(net, l2tp_net_id); | ||
1651 | /* | ||
1652 | * if someone has cached our net then | ||
1653 | * further net_generic call will return NULL | ||
1654 | */ | ||
1655 | net_assign_generic(net, l2tp_net_id, NULL); | ||
1656 | kfree(pn); | ||
1657 | } | ||
1658 | |||
1659 | static struct pernet_operations l2tp_net_ops = { | ||
1660 | .init = l2tp_init_net, | ||
1661 | .exit = l2tp_exit_net, | ||
1662 | .id = &l2tp_net_id, | ||
1663 | .size = sizeof(struct l2tp_net), | ||
1664 | }; | ||
1665 | |||
1666 | static int __init l2tp_init(void) | ||
1667 | { | ||
1668 | int rc = 0; | ||
1669 | |||
1670 | rc = register_pernet_device(&l2tp_net_ops); | ||
1671 | if (rc) | ||
1672 | goto out; | ||
1673 | |||
1674 | printk(KERN_INFO "L2TP core driver, %s\n", L2TP_DRV_VERSION); | ||
1675 | |||
1676 | out: | ||
1677 | return rc; | ||
1678 | } | ||
1679 | |||
1680 | static void __exit l2tp_exit(void) | ||
1681 | { | ||
1682 | unregister_pernet_device(&l2tp_net_ops); | ||
1683 | } | ||
1684 | |||
1685 | module_init(l2tp_init); | ||
1686 | module_exit(l2tp_exit); | ||
1687 | |||
1688 | MODULE_AUTHOR("James Chapman <jchapman@katalix.com>"); | ||
1689 | MODULE_DESCRIPTION("L2TP core"); | ||
1690 | MODULE_LICENSE("GPL"); | ||
1691 | MODULE_VERSION(L2TP_DRV_VERSION); | ||
1692 | |||
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h new file mode 100644 index 000000000000..f0f318edd3f1 --- /dev/null +++ b/net/l2tp/l2tp_core.h | |||
@@ -0,0 +1,304 @@ | |||
1 | /* | ||
2 | * L2TP internal definitions. | ||
3 | * | ||
4 | * Copyright (c) 2008,2009 Katalix Systems Ltd | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #ifndef _L2TP_CORE_H_ | ||
12 | #define _L2TP_CORE_H_ | ||
13 | |||
14 | /* Just some random numbers */ | ||
15 | #define L2TP_TUNNEL_MAGIC 0x42114DDA | ||
16 | #define L2TP_SESSION_MAGIC 0x0C04EB7D | ||
17 | |||
18 | /* Per tunnel, session hash table size */ | ||
19 | #define L2TP_HASH_BITS 4 | ||
20 | #define L2TP_HASH_SIZE (1 << L2TP_HASH_BITS) | ||
21 | |||
22 | /* System-wide, session hash table size */ | ||
23 | #define L2TP_HASH_BITS_2 8 | ||
24 | #define L2TP_HASH_SIZE_2 (1 << L2TP_HASH_BITS_2) | ||
25 | |||
26 | /* Debug message categories for the DEBUG socket option */ | ||
27 | enum { | ||
28 | L2TP_MSG_DEBUG = (1 << 0), /* verbose debug (if | ||
29 | * compiled in) */ | ||
30 | L2TP_MSG_CONTROL = (1 << 1), /* userspace - kernel | ||
31 | * interface */ | ||
32 | L2TP_MSG_SEQ = (1 << 2), /* sequence numbers */ | ||
33 | L2TP_MSG_DATA = (1 << 3), /* data packets */ | ||
34 | }; | ||
35 | |||
36 | struct sk_buff; | ||
37 | |||
38 | struct l2tp_stats { | ||
39 | u64 tx_packets; | ||
40 | u64 tx_bytes; | ||
41 | u64 tx_errors; | ||
42 | u64 rx_packets; | ||
43 | u64 rx_bytes; | ||
44 | u64 rx_seq_discards; | ||
45 | u64 rx_oos_packets; | ||
46 | u64 rx_errors; | ||
47 | u64 rx_cookie_discards; | ||
48 | }; | ||
49 | |||
50 | struct l2tp_tunnel; | ||
51 | |||
52 | /* Describes a session. Contains information to determine incoming | ||
53 | * packets and transmit outgoing ones. | ||
54 | */ | ||
55 | struct l2tp_session_cfg { | ||
56 | enum l2tp_pwtype pw_type; | ||
57 | unsigned data_seq:2; /* data sequencing level | ||
58 | * 0 => none, 1 => IP only, | ||
59 | * 2 => all | ||
60 | */ | ||
61 | unsigned recv_seq:1; /* expect receive packets with | ||
62 | * sequence numbers? */ | ||
63 | unsigned send_seq:1; /* send packets with sequence | ||
64 | * numbers? */ | ||
65 | unsigned lns_mode:1; /* behave as LNS? LAC enables | ||
66 | * sequence numbers under | ||
67 | * control of LNS. */ | ||
68 | int debug; /* bitmask of debug message | ||
69 | * categories */ | ||
70 | u16 vlan_id; /* VLAN pseudowire only */ | ||
71 | u16 offset; /* offset to payload */ | ||
72 | u16 l2specific_len; /* Layer 2 specific length */ | ||
73 | u16 l2specific_type; /* Layer 2 specific type */ | ||
74 | u8 cookie[8]; /* optional cookie */ | ||
75 | int cookie_len; /* 0, 4 or 8 bytes */ | ||
76 | u8 peer_cookie[8]; /* peer's cookie */ | ||
77 | int peer_cookie_len; /* 0, 4 or 8 bytes */ | ||
78 | int reorder_timeout; /* configured reorder timeout | ||
79 | * (in jiffies) */ | ||
80 | int mtu; | ||
81 | int mru; | ||
82 | char *ifname; | ||
83 | }; | ||
84 | |||
85 | struct l2tp_session { | ||
86 | int magic; /* should be | ||
87 | * L2TP_SESSION_MAGIC */ | ||
88 | |||
89 | struct l2tp_tunnel *tunnel; /* back pointer to tunnel | ||
90 | * context */ | ||
91 | u32 session_id; | ||
92 | u32 peer_session_id; | ||
93 | u8 cookie[8]; | ||
94 | int cookie_len; | ||
95 | u8 peer_cookie[8]; | ||
96 | int peer_cookie_len; | ||
97 | u16 offset; /* offset from end of L2TP header | ||
98 | to beginning of data */ | ||
99 | u16 l2specific_len; | ||
100 | u16 l2specific_type; | ||
101 | u16 hdr_len; | ||
102 | u32 nr; /* session NR state (receive) */ | ||
103 | u32 ns; /* session NR state (send) */ | ||
104 | struct sk_buff_head reorder_q; /* receive reorder queue */ | ||
105 | struct hlist_node hlist; /* Hash list node */ | ||
106 | atomic_t ref_count; | ||
107 | |||
108 | char name[32]; /* for logging */ | ||
109 | char ifname[IFNAMSIZ]; | ||
110 | unsigned data_seq:2; /* data sequencing level | ||
111 | * 0 => none, 1 => IP only, | ||
112 | * 2 => all | ||
113 | */ | ||
114 | unsigned recv_seq:1; /* expect receive packets with | ||
115 | * sequence numbers? */ | ||
116 | unsigned send_seq:1; /* send packets with sequence | ||
117 | * numbers? */ | ||
118 | unsigned lns_mode:1; /* behave as LNS? LAC enables | ||
119 | * sequence numbers under | ||
120 | * control of LNS. */ | ||
121 | int debug; /* bitmask of debug message | ||
122 | * categories */ | ||
123 | int reorder_timeout; /* configured reorder timeout | ||
124 | * (in jiffies) */ | ||
125 | int mtu; | ||
126 | int mru; | ||
127 | enum l2tp_pwtype pwtype; | ||
128 | struct l2tp_stats stats; | ||
129 | struct hlist_node global_hlist; /* Global hash list node */ | ||
130 | |||
131 | int (*build_header)(struct l2tp_session *session, void *buf); | ||
132 | void (*recv_skb)(struct l2tp_session *session, struct sk_buff *skb, int data_len); | ||
133 | void (*session_close)(struct l2tp_session *session); | ||
134 | void (*ref)(struct l2tp_session *session); | ||
135 | void (*deref)(struct l2tp_session *session); | ||
136 | #if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE) | ||
137 | void (*show)(struct seq_file *m, void *priv); | ||
138 | #endif | ||
139 | uint8_t priv[0]; /* private data */ | ||
140 | }; | ||
141 | |||
142 | /* Describes the tunnel. It contains info to track all the associated | ||
143 | * sessions so incoming packets can be sorted out | ||
144 | */ | ||
145 | struct l2tp_tunnel_cfg { | ||
146 | int debug; /* bitmask of debug message | ||
147 | * categories */ | ||
148 | enum l2tp_encap_type encap; | ||
149 | |||
150 | /* Used only for kernel-created sockets */ | ||
151 | struct in_addr local_ip; | ||
152 | struct in_addr peer_ip; | ||
153 | u16 local_udp_port; | ||
154 | u16 peer_udp_port; | ||
155 | unsigned int use_udp_checksums:1; | ||
156 | }; | ||
157 | |||
158 | struct l2tp_tunnel { | ||
159 | int magic; /* Should be L2TP_TUNNEL_MAGIC */ | ||
160 | rwlock_t hlist_lock; /* protect session_hlist */ | ||
161 | struct hlist_head session_hlist[L2TP_HASH_SIZE]; | ||
162 | /* hashed list of sessions, | ||
163 | * hashed by id */ | ||
164 | u32 tunnel_id; | ||
165 | u32 peer_tunnel_id; | ||
166 | int version; /* 2=>L2TPv2, 3=>L2TPv3 */ | ||
167 | |||
168 | char name[20]; /* for logging */ | ||
169 | int debug; /* bitmask of debug message | ||
170 | * categories */ | ||
171 | enum l2tp_encap_type encap; | ||
172 | struct l2tp_stats stats; | ||
173 | |||
174 | struct list_head list; /* Keep a list of all tunnels */ | ||
175 | struct net *l2tp_net; /* the net we belong to */ | ||
176 | |||
177 | atomic_t ref_count; | ||
178 | #ifdef CONFIG_DEBUG_FS | ||
179 | void (*show)(struct seq_file *m, void *arg); | ||
180 | #endif | ||
181 | int (*recv_payload_hook)(struct sk_buff *skb); | ||
182 | void (*old_sk_destruct)(struct sock *); | ||
183 | struct sock *sock; /* Parent socket */ | ||
184 | int fd; | ||
185 | |||
186 | uint8_t priv[0]; /* private data */ | ||
187 | }; | ||
188 | |||
189 | struct l2tp_nl_cmd_ops { | ||
190 | int (*session_create)(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg); | ||
191 | int (*session_delete)(struct l2tp_session *session); | ||
192 | }; | ||
193 | |||
194 | static inline void *l2tp_tunnel_priv(struct l2tp_tunnel *tunnel) | ||
195 | { | ||
196 | return &tunnel->priv[0]; | ||
197 | } | ||
198 | |||
199 | static inline void *l2tp_session_priv(struct l2tp_session *session) | ||
200 | { | ||
201 | return &session->priv[0]; | ||
202 | } | ||
203 | |||
204 | static inline struct l2tp_tunnel *l2tp_sock_to_tunnel(struct sock *sk) | ||
205 | { | ||
206 | struct l2tp_tunnel *tunnel; | ||
207 | |||
208 | if (sk == NULL) | ||
209 | return NULL; | ||
210 | |||
211 | sock_hold(sk); | ||
212 | tunnel = (struct l2tp_tunnel *)(sk->sk_user_data); | ||
213 | if (tunnel == NULL) { | ||
214 | sock_put(sk); | ||
215 | goto out; | ||
216 | } | ||
217 | |||
218 | BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC); | ||
219 | |||
220 | out: | ||
221 | return tunnel; | ||
222 | } | ||
223 | |||
224 | extern struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id); | ||
225 | extern struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth); | ||
226 | extern struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname); | ||
227 | extern struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id); | ||
228 | extern struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth); | ||
229 | |||
230 | extern int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp); | ||
231 | extern int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel); | ||
232 | extern struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg); | ||
233 | extern int l2tp_session_delete(struct l2tp_session *session); | ||
234 | extern void l2tp_tunnel_free(struct l2tp_tunnel *tunnel); | ||
235 | extern void l2tp_session_free(struct l2tp_session *session); | ||
236 | extern void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, int length, int (*payload_hook)(struct sk_buff *skb)); | ||
237 | extern int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, int (*payload_hook)(struct sk_buff *skb)); | ||
238 | extern int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb); | ||
239 | |||
240 | extern int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, size_t data_len); | ||
241 | extern int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len); | ||
242 | extern void l2tp_tunnel_destruct(struct sock *sk); | ||
243 | extern void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel); | ||
244 | extern void l2tp_session_set_header_len(struct l2tp_session *session, int version); | ||
245 | |||
246 | extern int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, const struct l2tp_nl_cmd_ops *ops); | ||
247 | extern void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type); | ||
248 | |||
249 | /* Tunnel reference counts. Incremented per session that is added to | ||
250 | * the tunnel. | ||
251 | */ | ||
252 | static inline void l2tp_tunnel_inc_refcount_1(struct l2tp_tunnel *tunnel) | ||
253 | { | ||
254 | atomic_inc(&tunnel->ref_count); | ||
255 | } | ||
256 | |||
257 | static inline void l2tp_tunnel_dec_refcount_1(struct l2tp_tunnel *tunnel) | ||
258 | { | ||
259 | if (atomic_dec_and_test(&tunnel->ref_count)) | ||
260 | l2tp_tunnel_free(tunnel); | ||
261 | } | ||
262 | #ifdef L2TP_REFCNT_DEBUG | ||
263 | #define l2tp_tunnel_inc_refcount(_t) do { \ | ||
264 | printk(KERN_DEBUG "l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \ | ||
265 | l2tp_tunnel_inc_refcount_1(_t); \ | ||
266 | } while (0) | ||
267 | #define l2tp_tunnel_dec_refcount(_t) do { \ | ||
268 | printk(KERN_DEBUG "l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \ | ||
269 | l2tp_tunnel_dec_refcount_1(_t); \ | ||
270 | } while (0) | ||
271 | #else | ||
272 | #define l2tp_tunnel_inc_refcount(t) l2tp_tunnel_inc_refcount_1(t) | ||
273 | #define l2tp_tunnel_dec_refcount(t) l2tp_tunnel_dec_refcount_1(t) | ||
274 | #endif | ||
275 | |||
276 | /* Session reference counts. Incremented when code obtains a reference | ||
277 | * to a session. | ||
278 | */ | ||
279 | static inline void l2tp_session_inc_refcount_1(struct l2tp_session *session) | ||
280 | { | ||
281 | atomic_inc(&session->ref_count); | ||
282 | } | ||
283 | |||
284 | static inline void l2tp_session_dec_refcount_1(struct l2tp_session *session) | ||
285 | { | ||
286 | if (atomic_dec_and_test(&session->ref_count)) | ||
287 | l2tp_session_free(session); | ||
288 | } | ||
289 | |||
290 | #ifdef L2TP_REFCNT_DEBUG | ||
291 | #define l2tp_session_inc_refcount(_s) do { \ | ||
292 | printk(KERN_DEBUG "l2tp_session_inc_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_s)->name, atomic_read(&_s->ref_count)); \ | ||
293 | l2tp_session_inc_refcount_1(_s); \ | ||
294 | } while (0) | ||
295 | #define l2tp_session_dec_refcount(_s) do { \ | ||
296 | printk(KERN_DEBUG "l2tp_session_dec_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_s)->name, atomic_read(&_s->ref_count)); \ | ||
297 | l2tp_session_dec_refcount_1(_s); \ | ||
298 | } while (0) | ||
299 | #else | ||
300 | #define l2tp_session_inc_refcount(s) l2tp_session_inc_refcount_1(s) | ||
301 | #define l2tp_session_dec_refcount(s) l2tp_session_dec_refcount_1(s) | ||
302 | #endif | ||
303 | |||
304 | #endif /* _L2TP_CORE_H_ */ | ||
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c new file mode 100644 index 000000000000..908f10f9720e --- /dev/null +++ b/net/l2tp/l2tp_debugfs.c | |||
@@ -0,0 +1,341 @@ | |||
1 | /* | ||
2 | * L2TP subsystem debugfs | ||
3 | * | ||
4 | * Copyright (c) 2010 Katalix Systems Ltd | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/skbuff.h> | ||
14 | #include <linux/socket.h> | ||
15 | #include <linux/hash.h> | ||
16 | #include <linux/l2tp.h> | ||
17 | #include <linux/in.h> | ||
18 | #include <linux/etherdevice.h> | ||
19 | #include <linux/spinlock.h> | ||
20 | #include <linux/debugfs.h> | ||
21 | #include <net/sock.h> | ||
22 | #include <net/ip.h> | ||
23 | #include <net/icmp.h> | ||
24 | #include <net/udp.h> | ||
25 | #include <net/inet_common.h> | ||
26 | #include <net/inet_hashtables.h> | ||
27 | #include <net/tcp_states.h> | ||
28 | #include <net/protocol.h> | ||
29 | #include <net/xfrm.h> | ||
30 | #include <net/net_namespace.h> | ||
31 | #include <net/netns/generic.h> | ||
32 | |||
33 | #include "l2tp_core.h" | ||
34 | |||
35 | static struct dentry *rootdir; | ||
36 | static struct dentry *tunnels; | ||
37 | |||
38 | struct l2tp_dfs_seq_data { | ||
39 | struct net *net; | ||
40 | int tunnel_idx; /* current tunnel */ | ||
41 | int session_idx; /* index of session within current tunnel */ | ||
42 | struct l2tp_tunnel *tunnel; | ||
43 | struct l2tp_session *session; /* NULL means get next tunnel */ | ||
44 | }; | ||
45 | |||
46 | static void l2tp_dfs_next_tunnel(struct l2tp_dfs_seq_data *pd) | ||
47 | { | ||
48 | pd->tunnel = l2tp_tunnel_find_nth(pd->net, pd->tunnel_idx); | ||
49 | pd->tunnel_idx++; | ||
50 | } | ||
51 | |||
52 | static void l2tp_dfs_next_session(struct l2tp_dfs_seq_data *pd) | ||
53 | { | ||
54 | pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx); | ||
55 | pd->session_idx++; | ||
56 | |||
57 | if (pd->session == NULL) { | ||
58 | pd->session_idx = 0; | ||
59 | l2tp_dfs_next_tunnel(pd); | ||
60 | } | ||
61 | |||
62 | } | ||
63 | |||
64 | static void *l2tp_dfs_seq_start(struct seq_file *m, loff_t *offs) | ||
65 | { | ||
66 | struct l2tp_dfs_seq_data *pd = SEQ_START_TOKEN; | ||
67 | loff_t pos = *offs; | ||
68 | |||
69 | if (!pos) | ||
70 | goto out; | ||
71 | |||
72 | BUG_ON(m->private == NULL); | ||
73 | pd = m->private; | ||
74 | |||
75 | if (pd->tunnel == NULL) | ||
76 | l2tp_dfs_next_tunnel(pd); | ||
77 | else | ||
78 | l2tp_dfs_next_session(pd); | ||
79 | |||
80 | /* NULL tunnel and session indicates end of list */ | ||
81 | if ((pd->tunnel == NULL) && (pd->session == NULL)) | ||
82 | pd = NULL; | ||
83 | |||
84 | out: | ||
85 | return pd; | ||
86 | } | ||
87 | |||
88 | |||
89 | static void *l2tp_dfs_seq_next(struct seq_file *m, void *v, loff_t *pos) | ||
90 | { | ||
91 | (*pos)++; | ||
92 | return NULL; | ||
93 | } | ||
94 | |||
95 | static void l2tp_dfs_seq_stop(struct seq_file *p, void *v) | ||
96 | { | ||
97 | /* nothing to do */ | ||
98 | } | ||
99 | |||
100 | static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v) | ||
101 | { | ||
102 | struct l2tp_tunnel *tunnel = v; | ||
103 | int session_count = 0; | ||
104 | int hash; | ||
105 | struct hlist_node *walk; | ||
106 | struct hlist_node *tmp; | ||
107 | |||
108 | read_lock_bh(&tunnel->hlist_lock); | ||
109 | for (hash = 0; hash < L2TP_HASH_SIZE; hash++) { | ||
110 | hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) { | ||
111 | struct l2tp_session *session; | ||
112 | |||
113 | session = hlist_entry(walk, struct l2tp_session, hlist); | ||
114 | if (session->session_id == 0) | ||
115 | continue; | ||
116 | |||
117 | session_count++; | ||
118 | } | ||
119 | } | ||
120 | read_unlock_bh(&tunnel->hlist_lock); | ||
121 | |||
122 | seq_printf(m, "\nTUNNEL %u peer %u", tunnel->tunnel_id, tunnel->peer_tunnel_id); | ||
123 | if (tunnel->sock) { | ||
124 | struct inet_sock *inet = inet_sk(tunnel->sock); | ||
125 | seq_printf(m, " from " NIPQUAD_FMT " to " NIPQUAD_FMT "\n", | ||
126 | NIPQUAD(inet->inet_saddr), NIPQUAD(inet->inet_daddr)); | ||
127 | if (tunnel->encap == L2TP_ENCAPTYPE_UDP) | ||
128 | seq_printf(m, " source port %hu, dest port %hu\n", | ||
129 | ntohs(inet->inet_sport), ntohs(inet->inet_dport)); | ||
130 | } | ||
131 | seq_printf(m, " L2TPv%d, %s\n", tunnel->version, | ||
132 | tunnel->encap == L2TP_ENCAPTYPE_UDP ? "UDP" : | ||
133 | tunnel->encap == L2TP_ENCAPTYPE_IP ? "IP" : | ||
134 | ""); | ||
135 | seq_printf(m, " %d sessions, refcnt %d/%d\n", session_count, | ||
136 | tunnel->sock ? atomic_read(&tunnel->sock->sk_refcnt) : 0, | ||
137 | atomic_read(&tunnel->ref_count)); | ||
138 | |||
139 | seq_printf(m, " %08x rx %llu/%llu/%llu rx %llu/%llu/%llu\n", | ||
140 | tunnel->debug, | ||
141 | (unsigned long long)tunnel->stats.tx_packets, | ||
142 | (unsigned long long)tunnel->stats.tx_bytes, | ||
143 | (unsigned long long)tunnel->stats.tx_errors, | ||
144 | (unsigned long long)tunnel->stats.rx_packets, | ||
145 | (unsigned long long)tunnel->stats.rx_bytes, | ||
146 | (unsigned long long)tunnel->stats.rx_errors); | ||
147 | |||
148 | if (tunnel->show != NULL) | ||
149 | tunnel->show(m, tunnel); | ||
150 | } | ||
151 | |||
152 | static void l2tp_dfs_seq_session_show(struct seq_file *m, void *v) | ||
153 | { | ||
154 | struct l2tp_session *session = v; | ||
155 | |||
156 | seq_printf(m, " SESSION %u, peer %u, %s\n", session->session_id, | ||
157 | session->peer_session_id, | ||
158 | session->pwtype == L2TP_PWTYPE_ETH ? "ETH" : | ||
159 | session->pwtype == L2TP_PWTYPE_PPP ? "PPP" : | ||
160 | ""); | ||
161 | if (session->send_seq || session->recv_seq) | ||
162 | seq_printf(m, " nr %hu, ns %hu\n", session->nr, session->ns); | ||
163 | seq_printf(m, " refcnt %d\n", atomic_read(&session->ref_count)); | ||
164 | seq_printf(m, " config %d/%d/%c/%c/%s/%s %08x %u\n", | ||
165 | session->mtu, session->mru, | ||
166 | session->recv_seq ? 'R' : '-', | ||
167 | session->send_seq ? 'S' : '-', | ||
168 | session->data_seq == 1 ? "IPSEQ" : | ||
169 | session->data_seq == 2 ? "DATASEQ" : "-", | ||
170 | session->lns_mode ? "LNS" : "LAC", | ||
171 | session->debug, | ||
172 | jiffies_to_msecs(session->reorder_timeout)); | ||
173 | seq_printf(m, " offset %hu l2specific %hu/%hu\n", | ||
174 | session->offset, session->l2specific_type, session->l2specific_len); | ||
175 | if (session->cookie_len) { | ||
176 | seq_printf(m, " cookie %02x%02x%02x%02x", | ||
177 | session->cookie[0], session->cookie[1], | ||
178 | session->cookie[2], session->cookie[3]); | ||
179 | if (session->cookie_len == 8) | ||
180 | seq_printf(m, "%02x%02x%02x%02x", | ||
181 | session->cookie[4], session->cookie[5], | ||
182 | session->cookie[6], session->cookie[7]); | ||
183 | seq_printf(m, "\n"); | ||
184 | } | ||
185 | if (session->peer_cookie_len) { | ||
186 | seq_printf(m, " peer cookie %02x%02x%02x%02x", | ||
187 | session->peer_cookie[0], session->peer_cookie[1], | ||
188 | session->peer_cookie[2], session->peer_cookie[3]); | ||
189 | if (session->peer_cookie_len == 8) | ||
190 | seq_printf(m, "%02x%02x%02x%02x", | ||
191 | session->peer_cookie[4], session->peer_cookie[5], | ||
192 | session->peer_cookie[6], session->peer_cookie[7]); | ||
193 | seq_printf(m, "\n"); | ||
194 | } | ||
195 | |||
196 | seq_printf(m, " %hu/%hu tx %llu/%llu/%llu rx %llu/%llu/%llu\n", | ||
197 | session->nr, session->ns, | ||
198 | (unsigned long long)session->stats.tx_packets, | ||
199 | (unsigned long long)session->stats.tx_bytes, | ||
200 | (unsigned long long)session->stats.tx_errors, | ||
201 | (unsigned long long)session->stats.rx_packets, | ||
202 | (unsigned long long)session->stats.rx_bytes, | ||
203 | (unsigned long long)session->stats.rx_errors); | ||
204 | |||
205 | if (session->show != NULL) | ||
206 | session->show(m, session); | ||
207 | } | ||
208 | |||
209 | static int l2tp_dfs_seq_show(struct seq_file *m, void *v) | ||
210 | { | ||
211 | struct l2tp_dfs_seq_data *pd = v; | ||
212 | |||
213 | /* display header on line 1 */ | ||
214 | if (v == SEQ_START_TOKEN) { | ||
215 | seq_puts(m, "TUNNEL ID, peer ID from IP to IP\n"); | ||
216 | seq_puts(m, " L2TPv2/L2TPv3, UDP/IP\n"); | ||
217 | seq_puts(m, " sessions session-count, refcnt refcnt/sk->refcnt\n"); | ||
218 | seq_puts(m, " debug tx-pkts/bytes/errs rx-pkts/bytes/errs\n"); | ||
219 | seq_puts(m, " SESSION ID, peer ID, PWTYPE\n"); | ||
220 | seq_puts(m, " refcnt cnt\n"); | ||
221 | seq_puts(m, " offset OFFSET l2specific TYPE/LEN\n"); | ||
222 | seq_puts(m, " [ cookie ]\n"); | ||
223 | seq_puts(m, " [ peer cookie ]\n"); | ||
224 | seq_puts(m, " config mtu/mru/rcvseq/sendseq/dataseq/lns debug reorderto\n"); | ||
225 | seq_puts(m, " nr/ns tx-pkts/bytes/errs rx-pkts/bytes/errs\n"); | ||
226 | goto out; | ||
227 | } | ||
228 | |||
229 | /* Show the tunnel or session context */ | ||
230 | if (pd->session == NULL) | ||
231 | l2tp_dfs_seq_tunnel_show(m, pd->tunnel); | ||
232 | else | ||
233 | l2tp_dfs_seq_session_show(m, pd->session); | ||
234 | |||
235 | out: | ||
236 | return 0; | ||
237 | } | ||
238 | |||
239 | static const struct seq_operations l2tp_dfs_seq_ops = { | ||
240 | .start = l2tp_dfs_seq_start, | ||
241 | .next = l2tp_dfs_seq_next, | ||
242 | .stop = l2tp_dfs_seq_stop, | ||
243 | .show = l2tp_dfs_seq_show, | ||
244 | }; | ||
245 | |||
246 | static int l2tp_dfs_seq_open(struct inode *inode, struct file *file) | ||
247 | { | ||
248 | struct l2tp_dfs_seq_data *pd; | ||
249 | struct seq_file *seq; | ||
250 | int rc = -ENOMEM; | ||
251 | |||
252 | pd = kzalloc(GFP_KERNEL, sizeof(*pd)); | ||
253 | if (pd == NULL) | ||
254 | goto out; | ||
255 | |||
256 | /* Derive the network namespace from the pid opening the | ||
257 | * file. | ||
258 | */ | ||
259 | pd->net = get_net_ns_by_pid(current->pid); | ||
260 | if (IS_ERR(pd->net)) { | ||
261 | rc = -PTR_ERR(pd->net); | ||
262 | goto err_free_pd; | ||
263 | } | ||
264 | |||
265 | rc = seq_open(file, &l2tp_dfs_seq_ops); | ||
266 | if (rc) | ||
267 | goto err_free_net; | ||
268 | |||
269 | seq = file->private_data; | ||
270 | seq->private = pd; | ||
271 | |||
272 | out: | ||
273 | return rc; | ||
274 | |||
275 | err_free_net: | ||
276 | put_net(pd->net); | ||
277 | err_free_pd: | ||
278 | kfree(pd); | ||
279 | goto out; | ||
280 | } | ||
281 | |||
282 | static int l2tp_dfs_seq_release(struct inode *inode, struct file *file) | ||
283 | { | ||
284 | struct l2tp_dfs_seq_data *pd; | ||
285 | struct seq_file *seq; | ||
286 | |||
287 | seq = file->private_data; | ||
288 | pd = seq->private; | ||
289 | if (pd->net) | ||
290 | put_net(pd->net); | ||
291 | kfree(pd); | ||
292 | seq_release(inode, file); | ||
293 | |||
294 | return 0; | ||
295 | } | ||
296 | |||
297 | static const struct file_operations l2tp_dfs_fops = { | ||
298 | .owner = THIS_MODULE, | ||
299 | .open = l2tp_dfs_seq_open, | ||
300 | .read = seq_read, | ||
301 | .llseek = seq_lseek, | ||
302 | .release = l2tp_dfs_seq_release, | ||
303 | }; | ||
304 | |||
305 | static int __init l2tp_debugfs_init(void) | ||
306 | { | ||
307 | int rc = 0; | ||
308 | |||
309 | rootdir = debugfs_create_dir("l2tp", NULL); | ||
310 | if (IS_ERR(rootdir)) { | ||
311 | rc = PTR_ERR(rootdir); | ||
312 | rootdir = NULL; | ||
313 | goto out; | ||
314 | } | ||
315 | |||
316 | tunnels = debugfs_create_file("tunnels", 0600, rootdir, NULL, &l2tp_dfs_fops); | ||
317 | if (tunnels == NULL) | ||
318 | rc = -EIO; | ||
319 | |||
320 | printk(KERN_INFO "L2TP debugfs support\n"); | ||
321 | |||
322 | out: | ||
323 | if (rc) | ||
324 | printk(KERN_WARNING "l2tp debugfs: unable to init\n"); | ||
325 | |||
326 | return rc; | ||
327 | } | ||
328 | |||
329 | static void __exit l2tp_debugfs_exit(void) | ||
330 | { | ||
331 | debugfs_remove(tunnels); | ||
332 | debugfs_remove(rootdir); | ||
333 | } | ||
334 | |||
335 | module_init(l2tp_debugfs_init); | ||
336 | module_exit(l2tp_debugfs_exit); | ||
337 | |||
338 | MODULE_LICENSE("GPL"); | ||
339 | MODULE_AUTHOR("James Chapman <jchapman@katalix.com>"); | ||
340 | MODULE_DESCRIPTION("L2TP debugfs driver"); | ||
341 | MODULE_VERSION("1.0"); | ||
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c new file mode 100644 index 000000000000..ca1164afeb74 --- /dev/null +++ b/net/l2tp/l2tp_eth.c | |||
@@ -0,0 +1,361 @@ | |||
1 | /* | ||
2 | * L2TPv3 ethernet pseudowire driver | ||
3 | * | ||
4 | * Copyright (c) 2008,2009,2010 Katalix Systems Ltd | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/skbuff.h> | ||
14 | #include <linux/socket.h> | ||
15 | #include <linux/hash.h> | ||
16 | #include <linux/l2tp.h> | ||
17 | #include <linux/in.h> | ||
18 | #include <linux/etherdevice.h> | ||
19 | #include <linux/spinlock.h> | ||
20 | #include <net/sock.h> | ||
21 | #include <net/ip.h> | ||
22 | #include <net/icmp.h> | ||
23 | #include <net/udp.h> | ||
24 | #include <net/inet_common.h> | ||
25 | #include <net/inet_hashtables.h> | ||
26 | #include <net/tcp_states.h> | ||
27 | #include <net/protocol.h> | ||
28 | #include <net/xfrm.h> | ||
29 | #include <net/net_namespace.h> | ||
30 | #include <net/netns/generic.h> | ||
31 | |||
32 | #include "l2tp_core.h" | ||
33 | |||
34 | /* Default device name. May be overridden by name specified by user */ | ||
35 | #define L2TP_ETH_DEV_NAME "l2tpeth%d" | ||
36 | |||
37 | /* via netdev_priv() */ | ||
38 | struct l2tp_eth { | ||
39 | struct net_device *dev; | ||
40 | struct sock *tunnel_sock; | ||
41 | struct l2tp_session *session; | ||
42 | struct list_head list; | ||
43 | }; | ||
44 | |||
45 | /* via l2tp_session_priv() */ | ||
46 | struct l2tp_eth_sess { | ||
47 | struct net_device *dev; | ||
48 | }; | ||
49 | |||
50 | /* per-net private data for this module */ | ||
51 | static unsigned int l2tp_eth_net_id; | ||
52 | struct l2tp_eth_net { | ||
53 | struct list_head l2tp_eth_dev_list; | ||
54 | spinlock_t l2tp_eth_lock; | ||
55 | }; | ||
56 | |||
57 | static inline struct l2tp_eth_net *l2tp_eth_pernet(struct net *net) | ||
58 | { | ||
59 | return net_generic(net, l2tp_eth_net_id); | ||
60 | } | ||
61 | |||
62 | static int l2tp_eth_dev_init(struct net_device *dev) | ||
63 | { | ||
64 | struct l2tp_eth *priv = netdev_priv(dev); | ||
65 | |||
66 | priv->dev = dev; | ||
67 | random_ether_addr(dev->dev_addr); | ||
68 | memset(&dev->broadcast[0], 0xff, 6); | ||
69 | |||
70 | return 0; | ||
71 | } | ||
72 | |||
73 | static void l2tp_eth_dev_uninit(struct net_device *dev) | ||
74 | { | ||
75 | struct l2tp_eth *priv = netdev_priv(dev); | ||
76 | struct l2tp_eth_net *pn = l2tp_eth_pernet(dev_net(dev)); | ||
77 | |||
78 | spin_lock(&pn->l2tp_eth_lock); | ||
79 | list_del_init(&priv->list); | ||
80 | spin_unlock(&pn->l2tp_eth_lock); | ||
81 | dev_put(dev); | ||
82 | } | ||
83 | |||
84 | static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev) | ||
85 | { | ||
86 | struct l2tp_eth *priv = netdev_priv(dev); | ||
87 | struct l2tp_session *session = priv->session; | ||
88 | |||
89 | l2tp_xmit_skb(session, skb, session->hdr_len); | ||
90 | |||
91 | dev->stats.tx_bytes += skb->len; | ||
92 | dev->stats.tx_packets++; | ||
93 | |||
94 | return 0; | ||
95 | } | ||
96 | |||
97 | static struct net_device_ops l2tp_eth_netdev_ops = { | ||
98 | .ndo_init = l2tp_eth_dev_init, | ||
99 | .ndo_uninit = l2tp_eth_dev_uninit, | ||
100 | .ndo_start_xmit = l2tp_eth_dev_xmit, | ||
101 | }; | ||
102 | |||
103 | static void l2tp_eth_dev_setup(struct net_device *dev) | ||
104 | { | ||
105 | ether_setup(dev); | ||
106 | |||
107 | dev->netdev_ops = &l2tp_eth_netdev_ops; | ||
108 | dev->destructor = free_netdev; | ||
109 | } | ||
110 | |||
111 | static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len) | ||
112 | { | ||
113 | struct l2tp_eth_sess *spriv = l2tp_session_priv(session); | ||
114 | struct net_device *dev = spriv->dev; | ||
115 | |||
116 | if (session->debug & L2TP_MSG_DATA) { | ||
117 | unsigned int length; | ||
118 | int offset; | ||
119 | u8 *ptr = skb->data; | ||
120 | |||
121 | length = min(32u, skb->len); | ||
122 | if (!pskb_may_pull(skb, length)) | ||
123 | goto error; | ||
124 | |||
125 | printk(KERN_DEBUG "%s: eth recv: ", session->name); | ||
126 | |||
127 | offset = 0; | ||
128 | do { | ||
129 | printk(" %02X", ptr[offset]); | ||
130 | } while (++offset < length); | ||
131 | |||
132 | printk("\n"); | ||
133 | } | ||
134 | |||
135 | if (data_len < ETH_HLEN) | ||
136 | goto error; | ||
137 | |||
138 | secpath_reset(skb); | ||
139 | |||
140 | /* checksums verified by L2TP */ | ||
141 | skb->ip_summed = CHECKSUM_NONE; | ||
142 | |||
143 | skb_dst_drop(skb); | ||
144 | nf_reset(skb); | ||
145 | |||
146 | if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) { | ||
147 | dev->last_rx = jiffies; | ||
148 | dev->stats.rx_packets++; | ||
149 | dev->stats.rx_bytes += data_len; | ||
150 | } else | ||
151 | dev->stats.rx_errors++; | ||
152 | |||
153 | return; | ||
154 | |||
155 | error: | ||
156 | dev->stats.rx_errors++; | ||
157 | kfree_skb(skb); | ||
158 | } | ||
159 | |||
160 | static void l2tp_eth_delete(struct l2tp_session *session) | ||
161 | { | ||
162 | struct l2tp_eth_sess *spriv; | ||
163 | struct net_device *dev; | ||
164 | |||
165 | if (session) { | ||
166 | spriv = l2tp_session_priv(session); | ||
167 | dev = spriv->dev; | ||
168 | if (dev) { | ||
169 | unregister_netdev(dev); | ||
170 | spriv->dev = NULL; | ||
171 | } | ||
172 | } | ||
173 | } | ||
174 | |||
175 | #if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE) | ||
176 | static void l2tp_eth_show(struct seq_file *m, void *arg) | ||
177 | { | ||
178 | struct l2tp_session *session = arg; | ||
179 | struct l2tp_eth_sess *spriv = l2tp_session_priv(session); | ||
180 | struct net_device *dev = spriv->dev; | ||
181 | |||
182 | seq_printf(m, " interface %s\n", dev->name); | ||
183 | } | ||
184 | #endif | ||
185 | |||
186 | static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg) | ||
187 | { | ||
188 | struct net_device *dev; | ||
189 | char name[IFNAMSIZ]; | ||
190 | struct l2tp_tunnel *tunnel; | ||
191 | struct l2tp_session *session; | ||
192 | struct l2tp_eth *priv; | ||
193 | struct l2tp_eth_sess *spriv; | ||
194 | int rc; | ||
195 | struct l2tp_eth_net *pn; | ||
196 | |||
197 | tunnel = l2tp_tunnel_find(net, tunnel_id); | ||
198 | if (!tunnel) { | ||
199 | rc = -ENODEV; | ||
200 | goto out; | ||
201 | } | ||
202 | |||
203 | session = l2tp_session_find(net, tunnel, session_id); | ||
204 | if (session) { | ||
205 | rc = -EEXIST; | ||
206 | goto out; | ||
207 | } | ||
208 | |||
209 | if (cfg->ifname) { | ||
210 | dev = dev_get_by_name(net, cfg->ifname); | ||
211 | if (dev) { | ||
212 | dev_put(dev); | ||
213 | rc = -EEXIST; | ||
214 | goto out; | ||
215 | } | ||
216 | strlcpy(name, cfg->ifname, IFNAMSIZ); | ||
217 | } else | ||
218 | strcpy(name, L2TP_ETH_DEV_NAME); | ||
219 | |||
220 | session = l2tp_session_create(sizeof(*spriv), tunnel, session_id, | ||
221 | peer_session_id, cfg); | ||
222 | if (!session) { | ||
223 | rc = -ENOMEM; | ||
224 | goto out; | ||
225 | } | ||
226 | |||
227 | dev = alloc_netdev(sizeof(*priv), name, l2tp_eth_dev_setup); | ||
228 | if (!dev) { | ||
229 | rc = -ENOMEM; | ||
230 | goto out_del_session; | ||
231 | } | ||
232 | |||
233 | dev_net_set(dev, net); | ||
234 | if (session->mtu == 0) | ||
235 | session->mtu = dev->mtu - session->hdr_len; | ||
236 | dev->mtu = session->mtu; | ||
237 | dev->needed_headroom += session->hdr_len; | ||
238 | |||
239 | priv = netdev_priv(dev); | ||
240 | priv->dev = dev; | ||
241 | priv->session = session; | ||
242 | INIT_LIST_HEAD(&priv->list); | ||
243 | |||
244 | priv->tunnel_sock = tunnel->sock; | ||
245 | session->recv_skb = l2tp_eth_dev_recv; | ||
246 | session->session_close = l2tp_eth_delete; | ||
247 | #if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE) | ||
248 | session->show = l2tp_eth_show; | ||
249 | #endif | ||
250 | |||
251 | spriv = l2tp_session_priv(session); | ||
252 | spriv->dev = dev; | ||
253 | |||
254 | rc = register_netdev(dev); | ||
255 | if (rc < 0) | ||
256 | goto out_del_dev; | ||
257 | |||
258 | /* Must be done after register_netdev() */ | ||
259 | strlcpy(session->ifname, dev->name, IFNAMSIZ); | ||
260 | |||
261 | dev_hold(dev); | ||
262 | pn = l2tp_eth_pernet(dev_net(dev)); | ||
263 | spin_lock(&pn->l2tp_eth_lock); | ||
264 | list_add(&priv->list, &pn->l2tp_eth_dev_list); | ||
265 | spin_unlock(&pn->l2tp_eth_lock); | ||
266 | |||
267 | return 0; | ||
268 | |||
269 | out_del_dev: | ||
270 | free_netdev(dev); | ||
271 | out_del_session: | ||
272 | l2tp_session_delete(session); | ||
273 | out: | ||
274 | return rc; | ||
275 | } | ||
276 | |||
277 | static __net_init int l2tp_eth_init_net(struct net *net) | ||
278 | { | ||
279 | struct l2tp_eth_net *pn; | ||
280 | int err; | ||
281 | |||
282 | pn = kzalloc(sizeof(*pn), GFP_KERNEL); | ||
283 | if (!pn) | ||
284 | return -ENOMEM; | ||
285 | |||
286 | INIT_LIST_HEAD(&pn->l2tp_eth_dev_list); | ||
287 | spin_lock_init(&pn->l2tp_eth_lock); | ||
288 | |||
289 | err = net_assign_generic(net, l2tp_eth_net_id, pn); | ||
290 | if (err) | ||
291 | goto out; | ||
292 | |||
293 | return 0; | ||
294 | |||
295 | out: | ||
296 | kfree(pn); | ||
297 | return err; | ||
298 | } | ||
299 | |||
300 | static __net_exit void l2tp_eth_exit_net(struct net *net) | ||
301 | { | ||
302 | struct l2tp_eth_net *pn; | ||
303 | |||
304 | pn = net_generic(net, l2tp_eth_net_id); | ||
305 | /* | ||
306 | * if someone has cached our net then | ||
307 | * further net_generic call will return NULL | ||
308 | */ | ||
309 | net_assign_generic(net, l2tp_eth_net_id, NULL); | ||
310 | kfree(pn); | ||
311 | } | ||
312 | |||
313 | static __net_initdata struct pernet_operations l2tp_eth_net_ops = { | ||
314 | .init = l2tp_eth_init_net, | ||
315 | .exit = l2tp_eth_exit_net, | ||
316 | .id = &l2tp_eth_net_id, | ||
317 | .size = sizeof(struct l2tp_eth_net), | ||
318 | }; | ||
319 | |||
320 | |||
321 | static const struct l2tp_nl_cmd_ops l2tp_eth_nl_cmd_ops = { | ||
322 | .session_create = l2tp_eth_create, | ||
323 | .session_delete = l2tp_session_delete, | ||
324 | }; | ||
325 | |||
326 | |||
327 | static int __init l2tp_eth_init(void) | ||
328 | { | ||
329 | int err = 0; | ||
330 | |||
331 | err = l2tp_nl_register_ops(L2TP_PWTYPE_ETH, &l2tp_eth_nl_cmd_ops); | ||
332 | if (err) | ||
333 | goto out; | ||
334 | |||
335 | err = register_pernet_device(&l2tp_eth_net_ops); | ||
336 | if (err) | ||
337 | goto out_unreg; | ||
338 | |||
339 | printk(KERN_INFO "L2TP ethernet pseudowire support (L2TPv3)\n"); | ||
340 | |||
341 | return 0; | ||
342 | |||
343 | out_unreg: | ||
344 | l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH); | ||
345 | out: | ||
346 | return err; | ||
347 | } | ||
348 | |||
349 | static void __exit l2tp_eth_exit(void) | ||
350 | { | ||
351 | unregister_pernet_device(&l2tp_eth_net_ops); | ||
352 | l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH); | ||
353 | } | ||
354 | |||
355 | module_init(l2tp_eth_init); | ||
356 | module_exit(l2tp_eth_exit); | ||
357 | |||
358 | MODULE_LICENSE("GPL"); | ||
359 | MODULE_AUTHOR("James Chapman <jchapman@katalix.com>"); | ||
360 | MODULE_DESCRIPTION("L2TP ethernet pseudowire driver"); | ||
361 | MODULE_VERSION("1.0"); | ||
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c new file mode 100644 index 000000000000..75bf784ba18d --- /dev/null +++ b/net/l2tp/l2tp_ip.c | |||
@@ -0,0 +1,679 @@ | |||
1 | /* | ||
2 | * L2TPv3 IP encapsulation support | ||
3 | * | ||
4 | * Copyright (c) 2008,2009,2010 Katalix Systems Ltd | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/icmp.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/skbuff.h> | ||
15 | #include <linux/random.h> | ||
16 | #include <linux/socket.h> | ||
17 | #include <linux/l2tp.h> | ||
18 | #include <linux/in.h> | ||
19 | #include <net/sock.h> | ||
20 | #include <net/ip.h> | ||
21 | #include <net/icmp.h> | ||
22 | #include <net/udp.h> | ||
23 | #include <net/inet_common.h> | ||
24 | #include <net/inet_hashtables.h> | ||
25 | #include <net/tcp_states.h> | ||
26 | #include <net/protocol.h> | ||
27 | #include <net/xfrm.h> | ||
28 | |||
29 | #include "l2tp_core.h" | ||
30 | |||
31 | struct l2tp_ip_sock { | ||
32 | /* inet_sock has to be the first member of l2tp_ip_sock */ | ||
33 | struct inet_sock inet; | ||
34 | |||
35 | __u32 conn_id; | ||
36 | __u32 peer_conn_id; | ||
37 | |||
38 | __u64 tx_packets; | ||
39 | __u64 tx_bytes; | ||
40 | __u64 tx_errors; | ||
41 | __u64 rx_packets; | ||
42 | __u64 rx_bytes; | ||
43 | __u64 rx_errors; | ||
44 | }; | ||
45 | |||
46 | static DEFINE_RWLOCK(l2tp_ip_lock); | ||
47 | static struct hlist_head l2tp_ip_table; | ||
48 | static struct hlist_head l2tp_ip_bind_table; | ||
49 | |||
50 | static inline struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk) | ||
51 | { | ||
52 | return (struct l2tp_ip_sock *)sk; | ||
53 | } | ||
54 | |||
55 | static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id) | ||
56 | { | ||
57 | struct hlist_node *node; | ||
58 | struct sock *sk; | ||
59 | |||
60 | sk_for_each_bound(sk, node, &l2tp_ip_bind_table) { | ||
61 | struct inet_sock *inet = inet_sk(sk); | ||
62 | struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk); | ||
63 | |||
64 | if (l2tp == NULL) | ||
65 | continue; | ||
66 | |||
67 | if ((l2tp->conn_id == tunnel_id) && | ||
68 | #ifdef CONFIG_NET_NS | ||
69 | (sk->sk_net == net) && | ||
70 | #endif | ||
71 | !(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) && | ||
72 | !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) | ||
73 | goto found; | ||
74 | } | ||
75 | |||
76 | sk = NULL; | ||
77 | found: | ||
78 | return sk; | ||
79 | } | ||
80 | |||
81 | static inline struct sock *l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id) | ||
82 | { | ||
83 | struct sock *sk = __l2tp_ip_bind_lookup(net, laddr, dif, tunnel_id); | ||
84 | if (sk) | ||
85 | sock_hold(sk); | ||
86 | |||
87 | return sk; | ||
88 | } | ||
89 | |||
90 | /* When processing receive frames, there are two cases to | ||
91 | * consider. Data frames consist of a non-zero session-id and an | ||
92 | * optional cookie. Control frames consist of a regular L2TP header | ||
93 | * preceded by 32-bits of zeros. | ||
94 | * | ||
95 | * L2TPv3 Session Header Over IP | ||
96 | * | ||
97 | * 0 1 2 3 | ||
98 | * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 | ||
99 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
100 | * | Session ID | | ||
101 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
102 | * | Cookie (optional, maximum 64 bits)... | ||
103 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
104 | * | | ||
105 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
106 | * | ||
107 | * L2TPv3 Control Message Header Over IP | ||
108 | * | ||
109 | * 0 1 2 3 | ||
110 | * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 | ||
111 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
112 | * | (32 bits of zeros) | | ||
113 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
114 | * |T|L|x|x|S|x|x|x|x|x|x|x| Ver | Length | | ||
115 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
116 | * | Control Connection ID | | ||
117 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
118 | * | Ns | Nr | | ||
119 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
120 | * | ||
121 | * All control frames are passed to userspace. | ||
122 | */ | ||
123 | static int l2tp_ip_recv(struct sk_buff *skb) | ||
124 | { | ||
125 | struct sock *sk; | ||
126 | u32 session_id; | ||
127 | u32 tunnel_id; | ||
128 | unsigned char *ptr, *optr; | ||
129 | struct l2tp_session *session; | ||
130 | struct l2tp_tunnel *tunnel = NULL; | ||
131 | int length; | ||
132 | int offset; | ||
133 | |||
134 | /* Point to L2TP header */ | ||
135 | optr = ptr = skb->data; | ||
136 | |||
137 | if (!pskb_may_pull(skb, 4)) | ||
138 | goto discard; | ||
139 | |||
140 | session_id = ntohl(*((__be32 *) ptr)); | ||
141 | ptr += 4; | ||
142 | |||
143 | /* RFC3931: L2TP/IP packets have the first 4 bytes containing | ||
144 | * the session_id. If it is 0, the packet is a L2TP control | ||
145 | * frame and the session_id value can be discarded. | ||
146 | */ | ||
147 | if (session_id == 0) { | ||
148 | __skb_pull(skb, 4); | ||
149 | goto pass_up; | ||
150 | } | ||
151 | |||
152 | /* Ok, this is a data packet. Lookup the session. */ | ||
153 | session = l2tp_session_find(&init_net, NULL, session_id); | ||
154 | if (session == NULL) | ||
155 | goto discard; | ||
156 | |||
157 | tunnel = session->tunnel; | ||
158 | if (tunnel == NULL) | ||
159 | goto discard; | ||
160 | |||
161 | /* Trace packet contents, if enabled */ | ||
162 | if (tunnel->debug & L2TP_MSG_DATA) { | ||
163 | length = min(32u, skb->len); | ||
164 | if (!pskb_may_pull(skb, length)) | ||
165 | goto discard; | ||
166 | |||
167 | printk(KERN_DEBUG "%s: ip recv: ", tunnel->name); | ||
168 | |||
169 | offset = 0; | ||
170 | do { | ||
171 | printk(" %02X", ptr[offset]); | ||
172 | } while (++offset < length); | ||
173 | |||
174 | printk("\n"); | ||
175 | } | ||
176 | |||
177 | l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook); | ||
178 | |||
179 | return 0; | ||
180 | |||
181 | pass_up: | ||
182 | /* Get the tunnel_id from the L2TP header */ | ||
183 | if (!pskb_may_pull(skb, 12)) | ||
184 | goto discard; | ||
185 | |||
186 | if ((skb->data[0] & 0xc0) != 0xc0) | ||
187 | goto discard; | ||
188 | |||
189 | tunnel_id = ntohl(*(__be32 *) &skb->data[4]); | ||
190 | tunnel = l2tp_tunnel_find(&init_net, tunnel_id); | ||
191 | if (tunnel != NULL) | ||
192 | sk = tunnel->sock; | ||
193 | else { | ||
194 | struct iphdr *iph = (struct iphdr *) skb_network_header(skb); | ||
195 | |||
196 | read_lock_bh(&l2tp_ip_lock); | ||
197 | sk = __l2tp_ip_bind_lookup(&init_net, iph->daddr, 0, tunnel_id); | ||
198 | read_unlock_bh(&l2tp_ip_lock); | ||
199 | } | ||
200 | |||
201 | if (sk == NULL) | ||
202 | goto discard; | ||
203 | |||
204 | sock_hold(sk); | ||
205 | |||
206 | if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) | ||
207 | goto discard_put; | ||
208 | |||
209 | nf_reset(skb); | ||
210 | |||
211 | return sk_receive_skb(sk, skb, 1); | ||
212 | |||
213 | discard_put: | ||
214 | sock_put(sk); | ||
215 | |||
216 | discard: | ||
217 | kfree_skb(skb); | ||
218 | return 0; | ||
219 | } | ||
220 | |||
221 | static int l2tp_ip_open(struct sock *sk) | ||
222 | { | ||
223 | /* Prevent autobind. We don't have ports. */ | ||
224 | inet_sk(sk)->inet_num = IPPROTO_L2TP; | ||
225 | |||
226 | write_lock_bh(&l2tp_ip_lock); | ||
227 | sk_add_node(sk, &l2tp_ip_table); | ||
228 | write_unlock_bh(&l2tp_ip_lock); | ||
229 | |||
230 | return 0; | ||
231 | } | ||
232 | |||
233 | static void l2tp_ip_close(struct sock *sk, long timeout) | ||
234 | { | ||
235 | write_lock_bh(&l2tp_ip_lock); | ||
236 | hlist_del_init(&sk->sk_bind_node); | ||
237 | hlist_del_init(&sk->sk_node); | ||
238 | write_unlock_bh(&l2tp_ip_lock); | ||
239 | sk_common_release(sk); | ||
240 | } | ||
241 | |||
242 | static void l2tp_ip_destroy_sock(struct sock *sk) | ||
243 | { | ||
244 | struct sk_buff *skb; | ||
245 | |||
246 | while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) | ||
247 | kfree_skb(skb); | ||
248 | |||
249 | sk_refcnt_debug_dec(sk); | ||
250 | } | ||
251 | |||
252 | static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) | ||
253 | { | ||
254 | struct inet_sock *inet = inet_sk(sk); | ||
255 | struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *) uaddr; | ||
256 | int ret = -EINVAL; | ||
257 | int chk_addr_ret; | ||
258 | |||
259 | ret = -EADDRINUSE; | ||
260 | read_lock_bh(&l2tp_ip_lock); | ||
261 | if (__l2tp_ip_bind_lookup(&init_net, addr->l2tp_addr.s_addr, sk->sk_bound_dev_if, addr->l2tp_conn_id)) | ||
262 | goto out_in_use; | ||
263 | |||
264 | read_unlock_bh(&l2tp_ip_lock); | ||
265 | |||
266 | lock_sock(sk); | ||
267 | if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_l2tpip)) | ||
268 | goto out; | ||
269 | |||
270 | chk_addr_ret = inet_addr_type(&init_net, addr->l2tp_addr.s_addr); | ||
271 | ret = -EADDRNOTAVAIL; | ||
272 | if (addr->l2tp_addr.s_addr && chk_addr_ret != RTN_LOCAL && | ||
273 | chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST) | ||
274 | goto out; | ||
275 | |||
276 | inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr; | ||
277 | if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST) | ||
278 | inet->inet_saddr = 0; /* Use device */ | ||
279 | sk_dst_reset(sk); | ||
280 | |||
281 | l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id; | ||
282 | |||
283 | write_lock_bh(&l2tp_ip_lock); | ||
284 | sk_add_bind_node(sk, &l2tp_ip_bind_table); | ||
285 | sk_del_node_init(sk); | ||
286 | write_unlock_bh(&l2tp_ip_lock); | ||
287 | ret = 0; | ||
288 | out: | ||
289 | release_sock(sk); | ||
290 | |||
291 | return ret; | ||
292 | |||
293 | out_in_use: | ||
294 | read_unlock_bh(&l2tp_ip_lock); | ||
295 | |||
296 | return ret; | ||
297 | } | ||
298 | |||
299 | static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | ||
300 | { | ||
301 | int rc; | ||
302 | struct inet_sock *inet = inet_sk(sk); | ||
303 | struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr; | ||
304 | struct rtable *rt; | ||
305 | __be32 saddr; | ||
306 | int oif; | ||
307 | |||
308 | rc = -EINVAL; | ||
309 | if (addr_len < sizeof(*lsa)) | ||
310 | goto out; | ||
311 | |||
312 | rc = -EAFNOSUPPORT; | ||
313 | if (lsa->l2tp_family != AF_INET) | ||
314 | goto out; | ||
315 | |||
316 | sk_dst_reset(sk); | ||
317 | |||
318 | oif = sk->sk_bound_dev_if; | ||
319 | saddr = inet->inet_saddr; | ||
320 | |||
321 | rc = -EINVAL; | ||
322 | if (ipv4_is_multicast(lsa->l2tp_addr.s_addr)) | ||
323 | goto out; | ||
324 | |||
325 | rc = ip_route_connect(&rt, lsa->l2tp_addr.s_addr, saddr, | ||
326 | RT_CONN_FLAGS(sk), oif, | ||
327 | IPPROTO_L2TP, | ||
328 | 0, 0, sk, 1); | ||
329 | if (rc) { | ||
330 | if (rc == -ENETUNREACH) | ||
331 | IP_INC_STATS_BH(&init_net, IPSTATS_MIB_OUTNOROUTES); | ||
332 | goto out; | ||
333 | } | ||
334 | |||
335 | rc = -ENETUNREACH; | ||
336 | if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { | ||
337 | ip_rt_put(rt); | ||
338 | goto out; | ||
339 | } | ||
340 | |||
341 | l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id; | ||
342 | |||
343 | if (!inet->inet_saddr) | ||
344 | inet->inet_saddr = rt->rt_src; | ||
345 | if (!inet->inet_rcv_saddr) | ||
346 | inet->inet_rcv_saddr = rt->rt_src; | ||
347 | inet->inet_daddr = rt->rt_dst; | ||
348 | sk->sk_state = TCP_ESTABLISHED; | ||
349 | inet->inet_id = jiffies; | ||
350 | |||
351 | sk_dst_set(sk, &rt->u.dst); | ||
352 | |||
353 | write_lock_bh(&l2tp_ip_lock); | ||
354 | hlist_del_init(&sk->sk_bind_node); | ||
355 | sk_add_bind_node(sk, &l2tp_ip_bind_table); | ||
356 | write_unlock_bh(&l2tp_ip_lock); | ||
357 | |||
358 | rc = 0; | ||
359 | out: | ||
360 | return rc; | ||
361 | } | ||
362 | |||
363 | static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr, | ||
364 | int *uaddr_len, int peer) | ||
365 | { | ||
366 | struct sock *sk = sock->sk; | ||
367 | struct inet_sock *inet = inet_sk(sk); | ||
368 | struct l2tp_ip_sock *lsk = l2tp_ip_sk(sk); | ||
369 | struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *)uaddr; | ||
370 | |||
371 | memset(lsa, 0, sizeof(*lsa)); | ||
372 | lsa->l2tp_family = AF_INET; | ||
373 | if (peer) { | ||
374 | if (!inet->inet_dport) | ||
375 | return -ENOTCONN; | ||
376 | lsa->l2tp_conn_id = lsk->peer_conn_id; | ||
377 | lsa->l2tp_addr.s_addr = inet->inet_daddr; | ||
378 | } else { | ||
379 | __be32 addr = inet->inet_rcv_saddr; | ||
380 | if (!addr) | ||
381 | addr = inet->inet_saddr; | ||
382 | lsa->l2tp_conn_id = lsk->conn_id; | ||
383 | lsa->l2tp_addr.s_addr = addr; | ||
384 | } | ||
385 | *uaddr_len = sizeof(*lsa); | ||
386 | return 0; | ||
387 | } | ||
388 | |||
389 | static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb) | ||
390 | { | ||
391 | int rc; | ||
392 | |||
393 | if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) | ||
394 | goto drop; | ||
395 | |||
396 | nf_reset(skb); | ||
397 | |||
398 | /* Charge it to the socket, dropping if the queue is full. */ | ||
399 | rc = sock_queue_rcv_skb(sk, skb); | ||
400 | if (rc < 0) | ||
401 | goto drop; | ||
402 | |||
403 | return 0; | ||
404 | |||
405 | drop: | ||
406 | IP_INC_STATS(&init_net, IPSTATS_MIB_INDISCARDS); | ||
407 | kfree_skb(skb); | ||
408 | return -1; | ||
409 | } | ||
410 | |||
411 | /* Userspace will call sendmsg() on the tunnel socket to send L2TP | ||
412 | * control frames. | ||
413 | */ | ||
414 | static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len) | ||
415 | { | ||
416 | struct sk_buff *skb; | ||
417 | int rc; | ||
418 | struct l2tp_ip_sock *lsa = l2tp_ip_sk(sk); | ||
419 | struct inet_sock *inet = inet_sk(sk); | ||
420 | struct ip_options *opt = inet->opt; | ||
421 | struct rtable *rt = NULL; | ||
422 | int connected = 0; | ||
423 | __be32 daddr; | ||
424 | |||
425 | if (sock_flag(sk, SOCK_DEAD)) | ||
426 | return -ENOTCONN; | ||
427 | |||
428 | /* Get and verify the address. */ | ||
429 | if (msg->msg_name) { | ||
430 | struct sockaddr_l2tpip *lip = (struct sockaddr_l2tpip *) msg->msg_name; | ||
431 | if (msg->msg_namelen < sizeof(*lip)) | ||
432 | return -EINVAL; | ||
433 | |||
434 | if (lip->l2tp_family != AF_INET) { | ||
435 | if (lip->l2tp_family != AF_UNSPEC) | ||
436 | return -EAFNOSUPPORT; | ||
437 | } | ||
438 | |||
439 | daddr = lip->l2tp_addr.s_addr; | ||
440 | } else { | ||
441 | if (sk->sk_state != TCP_ESTABLISHED) | ||
442 | return -EDESTADDRREQ; | ||
443 | |||
444 | daddr = inet->inet_daddr; | ||
445 | connected = 1; | ||
446 | } | ||
447 | |||
448 | /* Allocate a socket buffer */ | ||
449 | rc = -ENOMEM; | ||
450 | skb = sock_wmalloc(sk, 2 + NET_SKB_PAD + sizeof(struct iphdr) + | ||
451 | 4 + len, 0, GFP_KERNEL); | ||
452 | if (!skb) | ||
453 | goto error; | ||
454 | |||
455 | /* Reserve space for headers, putting IP header on 4-byte boundary. */ | ||
456 | skb_reserve(skb, 2 + NET_SKB_PAD); | ||
457 | skb_reset_network_header(skb); | ||
458 | skb_reserve(skb, sizeof(struct iphdr)); | ||
459 | skb_reset_transport_header(skb); | ||
460 | |||
461 | /* Insert 0 session_id */ | ||
462 | *((__be32 *) skb_put(skb, 4)) = 0; | ||
463 | |||
464 | /* Copy user data into skb */ | ||
465 | rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); | ||
466 | if (rc < 0) { | ||
467 | kfree_skb(skb); | ||
468 | goto error; | ||
469 | } | ||
470 | |||
471 | if (connected) | ||
472 | rt = (struct rtable *) __sk_dst_check(sk, 0); | ||
473 | |||
474 | if (rt == NULL) { | ||
475 | /* Use correct destination address if we have options. */ | ||
476 | if (opt && opt->srr) | ||
477 | daddr = opt->faddr; | ||
478 | |||
479 | { | ||
480 | struct flowi fl = { .oif = sk->sk_bound_dev_if, | ||
481 | .nl_u = { .ip4_u = { | ||
482 | .daddr = daddr, | ||
483 | .saddr = inet->inet_saddr, | ||
484 | .tos = RT_CONN_FLAGS(sk) } }, | ||
485 | .proto = sk->sk_protocol, | ||
486 | .flags = inet_sk_flowi_flags(sk), | ||
487 | .uli_u = { .ports = { | ||
488 | .sport = inet->inet_sport, | ||
489 | .dport = inet->inet_dport } } }; | ||
490 | |||
491 | /* If this fails, retransmit mechanism of transport layer will | ||
492 | * keep trying until route appears or the connection times | ||
493 | * itself out. | ||
494 | */ | ||
495 | security_sk_classify_flow(sk, &fl); | ||
496 | if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0)) | ||
497 | goto no_route; | ||
498 | } | ||
499 | sk_setup_caps(sk, &rt->u.dst); | ||
500 | } | ||
501 | skb_dst_set(skb, dst_clone(&rt->u.dst)); | ||
502 | |||
503 | /* Queue the packet to IP for output */ | ||
504 | rc = ip_queue_xmit(skb, 0); | ||
505 | |||
506 | error: | ||
507 | /* Update stats */ | ||
508 | if (rc >= 0) { | ||
509 | lsa->tx_packets++; | ||
510 | lsa->tx_bytes += len; | ||
511 | rc = len; | ||
512 | } else { | ||
513 | lsa->tx_errors++; | ||
514 | } | ||
515 | |||
516 | return rc; | ||
517 | |||
518 | no_route: | ||
519 | IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); | ||
520 | kfree_skb(skb); | ||
521 | return -EHOSTUNREACH; | ||
522 | } | ||
523 | |||
524 | static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | ||
525 | size_t len, int noblock, int flags, int *addr_len) | ||
526 | { | ||
527 | struct inet_sock *inet = inet_sk(sk); | ||
528 | struct l2tp_ip_sock *lsk = l2tp_ip_sk(sk); | ||
529 | size_t copied = 0; | ||
530 | int err = -EOPNOTSUPP; | ||
531 | struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; | ||
532 | struct sk_buff *skb; | ||
533 | |||
534 | if (flags & MSG_OOB) | ||
535 | goto out; | ||
536 | |||
537 | if (addr_len) | ||
538 | *addr_len = sizeof(*sin); | ||
539 | |||
540 | skb = skb_recv_datagram(sk, flags, noblock, &err); | ||
541 | if (!skb) | ||
542 | goto out; | ||
543 | |||
544 | copied = skb->len; | ||
545 | if (len < copied) { | ||
546 | msg->msg_flags |= MSG_TRUNC; | ||
547 | copied = len; | ||
548 | } | ||
549 | |||
550 | err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); | ||
551 | if (err) | ||
552 | goto done; | ||
553 | |||
554 | sock_recv_timestamp(msg, sk, skb); | ||
555 | |||
556 | /* Copy the address. */ | ||
557 | if (sin) { | ||
558 | sin->sin_family = AF_INET; | ||
559 | sin->sin_addr.s_addr = ip_hdr(skb)->saddr; | ||
560 | sin->sin_port = 0; | ||
561 | memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); | ||
562 | } | ||
563 | if (inet->cmsg_flags) | ||
564 | ip_cmsg_recv(msg, skb); | ||
565 | if (flags & MSG_TRUNC) | ||
566 | copied = skb->len; | ||
567 | done: | ||
568 | skb_free_datagram(sk, skb); | ||
569 | out: | ||
570 | if (err) { | ||
571 | lsk->rx_errors++; | ||
572 | return err; | ||
573 | } | ||
574 | |||
575 | lsk->rx_packets++; | ||
576 | lsk->rx_bytes += copied; | ||
577 | |||
578 | return copied; | ||
579 | } | ||
580 | |||
581 | struct proto l2tp_ip_prot = { | ||
582 | .name = "L2TP/IP", | ||
583 | .owner = THIS_MODULE, | ||
584 | .init = l2tp_ip_open, | ||
585 | .close = l2tp_ip_close, | ||
586 | .bind = l2tp_ip_bind, | ||
587 | .connect = l2tp_ip_connect, | ||
588 | .disconnect = udp_disconnect, | ||
589 | .ioctl = udp_ioctl, | ||
590 | .destroy = l2tp_ip_destroy_sock, | ||
591 | .setsockopt = ip_setsockopt, | ||
592 | .getsockopt = ip_getsockopt, | ||
593 | .sendmsg = l2tp_ip_sendmsg, | ||
594 | .recvmsg = l2tp_ip_recvmsg, | ||
595 | .backlog_rcv = l2tp_ip_backlog_recv, | ||
596 | .hash = inet_hash, | ||
597 | .unhash = inet_unhash, | ||
598 | .obj_size = sizeof(struct l2tp_ip_sock), | ||
599 | #ifdef CONFIG_COMPAT | ||
600 | .compat_setsockopt = compat_ip_setsockopt, | ||
601 | .compat_getsockopt = compat_ip_getsockopt, | ||
602 | #endif | ||
603 | }; | ||
604 | |||
605 | static const struct proto_ops l2tp_ip_ops = { | ||
606 | .family = PF_INET, | ||
607 | .owner = THIS_MODULE, | ||
608 | .release = inet_release, | ||
609 | .bind = inet_bind, | ||
610 | .connect = inet_dgram_connect, | ||
611 | .socketpair = sock_no_socketpair, | ||
612 | .accept = sock_no_accept, | ||
613 | .getname = l2tp_ip_getname, | ||
614 | .poll = datagram_poll, | ||
615 | .ioctl = inet_ioctl, | ||
616 | .listen = sock_no_listen, | ||
617 | .shutdown = inet_shutdown, | ||
618 | .setsockopt = sock_common_setsockopt, | ||
619 | .getsockopt = sock_common_getsockopt, | ||
620 | .sendmsg = inet_sendmsg, | ||
621 | .recvmsg = sock_common_recvmsg, | ||
622 | .mmap = sock_no_mmap, | ||
623 | .sendpage = sock_no_sendpage, | ||
624 | #ifdef CONFIG_COMPAT | ||
625 | .compat_setsockopt = compat_sock_common_setsockopt, | ||
626 | .compat_getsockopt = compat_sock_common_getsockopt, | ||
627 | #endif | ||
628 | }; | ||
629 | |||
630 | static struct inet_protosw l2tp_ip_protosw = { | ||
631 | .type = SOCK_DGRAM, | ||
632 | .protocol = IPPROTO_L2TP, | ||
633 | .prot = &l2tp_ip_prot, | ||
634 | .ops = &l2tp_ip_ops, | ||
635 | .no_check = 0, | ||
636 | }; | ||
637 | |||
638 | static struct net_protocol l2tp_ip_protocol __read_mostly = { | ||
639 | .handler = l2tp_ip_recv, | ||
640 | }; | ||
641 | |||
642 | static int __init l2tp_ip_init(void) | ||
643 | { | ||
644 | int err; | ||
645 | |||
646 | printk(KERN_INFO "L2TP IP encapsulation support (L2TPv3)\n"); | ||
647 | |||
648 | err = proto_register(&l2tp_ip_prot, 1); | ||
649 | if (err != 0) | ||
650 | goto out; | ||
651 | |||
652 | err = inet_add_protocol(&l2tp_ip_protocol, IPPROTO_L2TP); | ||
653 | if (err) | ||
654 | goto out1; | ||
655 | |||
656 | inet_register_protosw(&l2tp_ip_protosw); | ||
657 | return 0; | ||
658 | |||
659 | out1: | ||
660 | proto_unregister(&l2tp_ip_prot); | ||
661 | out: | ||
662 | return err; | ||
663 | } | ||
664 | |||
665 | static void __exit l2tp_ip_exit(void) | ||
666 | { | ||
667 | inet_unregister_protosw(&l2tp_ip_protosw); | ||
668 | inet_del_protocol(&l2tp_ip_protocol, IPPROTO_L2TP); | ||
669 | proto_unregister(&l2tp_ip_prot); | ||
670 | } | ||
671 | |||
672 | module_init(l2tp_ip_init); | ||
673 | module_exit(l2tp_ip_exit); | ||
674 | |||
675 | MODULE_LICENSE("GPL"); | ||
676 | MODULE_AUTHOR("James Chapman <jchapman@katalix.com>"); | ||
677 | MODULE_DESCRIPTION("L2TP over IP"); | ||
678 | MODULE_VERSION("1.0"); | ||
679 | MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, SOCK_DGRAM, IPPROTO_L2TP); | ||
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c new file mode 100644 index 000000000000..4c1e540732d7 --- /dev/null +++ b/net/l2tp/l2tp_netlink.c | |||
@@ -0,0 +1,840 @@ | |||
1 | /* | ||
2 | * L2TP netlink layer, for management | ||
3 | * | ||
4 | * Copyright (c) 2008,2009,2010 Katalix Systems Ltd | ||
5 | * | ||
6 | * Partly based on the IrDA nelink implementation | ||
7 | * (see net/irda/irnetlink.c) which is: | ||
8 | * Copyright (c) 2007 Samuel Ortiz <samuel@sortiz.org> | ||
9 | * which is in turn partly based on the wireless netlink code: | ||
10 | * Copyright 2006 Johannes Berg <johannes@sipsolutions.net> | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of the GNU General Public License version 2 as | ||
14 | * published by the Free Software Foundation. | ||
15 | */ | ||
16 | |||
17 | #include <net/sock.h> | ||
18 | #include <net/genetlink.h> | ||
19 | #include <net/udp.h> | ||
20 | #include <linux/in.h> | ||
21 | #include <linux/udp.h> | ||
22 | #include <linux/socket.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/list.h> | ||
25 | #include <net/net_namespace.h> | ||
26 | |||
27 | #include <linux/l2tp.h> | ||
28 | |||
29 | #include "l2tp_core.h" | ||
30 | |||
31 | |||
32 | static struct genl_family l2tp_nl_family = { | ||
33 | .id = GENL_ID_GENERATE, | ||
34 | .name = L2TP_GENL_NAME, | ||
35 | .version = L2TP_GENL_VERSION, | ||
36 | .hdrsize = 0, | ||
37 | .maxattr = L2TP_ATTR_MAX, | ||
38 | }; | ||
39 | |||
40 | /* Accessed under genl lock */ | ||
41 | static const struct l2tp_nl_cmd_ops *l2tp_nl_cmd_ops[__L2TP_PWTYPE_MAX]; | ||
42 | |||
43 | static struct l2tp_session *l2tp_nl_session_find(struct genl_info *info) | ||
44 | { | ||
45 | u32 tunnel_id; | ||
46 | u32 session_id; | ||
47 | char *ifname; | ||
48 | struct l2tp_tunnel *tunnel; | ||
49 | struct l2tp_session *session = NULL; | ||
50 | struct net *net = genl_info_net(info); | ||
51 | |||
52 | if (info->attrs[L2TP_ATTR_IFNAME]) { | ||
53 | ifname = nla_data(info->attrs[L2TP_ATTR_IFNAME]); | ||
54 | session = l2tp_session_find_by_ifname(net, ifname); | ||
55 | } else if ((info->attrs[L2TP_ATTR_SESSION_ID]) && | ||
56 | (info->attrs[L2TP_ATTR_CONN_ID])) { | ||
57 | tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); | ||
58 | session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]); | ||
59 | tunnel = l2tp_tunnel_find(net, tunnel_id); | ||
60 | if (tunnel) | ||
61 | session = l2tp_session_find(net, tunnel, session_id); | ||
62 | } | ||
63 | |||
64 | return session; | ||
65 | } | ||
66 | |||
67 | static int l2tp_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info) | ||
68 | { | ||
69 | struct sk_buff *msg; | ||
70 | void *hdr; | ||
71 | int ret = -ENOBUFS; | ||
72 | |||
73 | msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); | ||
74 | if (!msg) { | ||
75 | ret = -ENOMEM; | ||
76 | goto out; | ||
77 | } | ||
78 | |||
79 | hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq, | ||
80 | &l2tp_nl_family, 0, L2TP_CMD_NOOP); | ||
81 | if (IS_ERR(hdr)) { | ||
82 | ret = PTR_ERR(hdr); | ||
83 | goto err_out; | ||
84 | } | ||
85 | |||
86 | genlmsg_end(msg, hdr); | ||
87 | |||
88 | return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid); | ||
89 | |||
90 | err_out: | ||
91 | nlmsg_free(msg); | ||
92 | |||
93 | out: | ||
94 | return ret; | ||
95 | } | ||
96 | |||
97 | static int l2tp_nl_cmd_tunnel_create(struct sk_buff *skb, struct genl_info *info) | ||
98 | { | ||
99 | u32 tunnel_id; | ||
100 | u32 peer_tunnel_id; | ||
101 | int proto_version; | ||
102 | int fd; | ||
103 | int ret = 0; | ||
104 | struct l2tp_tunnel_cfg cfg = { 0, }; | ||
105 | struct l2tp_tunnel *tunnel; | ||
106 | struct net *net = genl_info_net(info); | ||
107 | |||
108 | if (!info->attrs[L2TP_ATTR_CONN_ID]) { | ||
109 | ret = -EINVAL; | ||
110 | goto out; | ||
111 | } | ||
112 | tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); | ||
113 | |||
114 | if (!info->attrs[L2TP_ATTR_PEER_CONN_ID]) { | ||
115 | ret = -EINVAL; | ||
116 | goto out; | ||
117 | } | ||
118 | peer_tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_PEER_CONN_ID]); | ||
119 | |||
120 | if (!info->attrs[L2TP_ATTR_PROTO_VERSION]) { | ||
121 | ret = -EINVAL; | ||
122 | goto out; | ||
123 | } | ||
124 | proto_version = nla_get_u8(info->attrs[L2TP_ATTR_PROTO_VERSION]); | ||
125 | |||
126 | if (!info->attrs[L2TP_ATTR_ENCAP_TYPE]) { | ||
127 | ret = -EINVAL; | ||
128 | goto out; | ||
129 | } | ||
130 | cfg.encap = nla_get_u16(info->attrs[L2TP_ATTR_ENCAP_TYPE]); | ||
131 | |||
132 | fd = -1; | ||
133 | if (info->attrs[L2TP_ATTR_FD]) { | ||
134 | fd = nla_get_u32(info->attrs[L2TP_ATTR_FD]); | ||
135 | } else { | ||
136 | if (info->attrs[L2TP_ATTR_IP_SADDR]) | ||
137 | cfg.local_ip.s_addr = nla_get_be32(info->attrs[L2TP_ATTR_IP_SADDR]); | ||
138 | if (info->attrs[L2TP_ATTR_IP_DADDR]) | ||
139 | cfg.peer_ip.s_addr = nla_get_be32(info->attrs[L2TP_ATTR_IP_DADDR]); | ||
140 | if (info->attrs[L2TP_ATTR_UDP_SPORT]) | ||
141 | cfg.local_udp_port = nla_get_u16(info->attrs[L2TP_ATTR_UDP_SPORT]); | ||
142 | if (info->attrs[L2TP_ATTR_UDP_DPORT]) | ||
143 | cfg.peer_udp_port = nla_get_u16(info->attrs[L2TP_ATTR_UDP_DPORT]); | ||
144 | if (info->attrs[L2TP_ATTR_UDP_CSUM]) | ||
145 | cfg.use_udp_checksums = nla_get_flag(info->attrs[L2TP_ATTR_UDP_CSUM]); | ||
146 | } | ||
147 | |||
148 | if (info->attrs[L2TP_ATTR_DEBUG]) | ||
149 | cfg.debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]); | ||
150 | |||
151 | tunnel = l2tp_tunnel_find(net, tunnel_id); | ||
152 | if (tunnel != NULL) { | ||
153 | ret = -EEXIST; | ||
154 | goto out; | ||
155 | } | ||
156 | |||
157 | ret = -EINVAL; | ||
158 | switch (cfg.encap) { | ||
159 | case L2TP_ENCAPTYPE_UDP: | ||
160 | case L2TP_ENCAPTYPE_IP: | ||
161 | ret = l2tp_tunnel_create(net, fd, proto_version, tunnel_id, | ||
162 | peer_tunnel_id, &cfg, &tunnel); | ||
163 | break; | ||
164 | } | ||
165 | |||
166 | out: | ||
167 | return ret; | ||
168 | } | ||
169 | |||
170 | static int l2tp_nl_cmd_tunnel_delete(struct sk_buff *skb, struct genl_info *info) | ||
171 | { | ||
172 | struct l2tp_tunnel *tunnel; | ||
173 | u32 tunnel_id; | ||
174 | int ret = 0; | ||
175 | struct net *net = genl_info_net(info); | ||
176 | |||
177 | if (!info->attrs[L2TP_ATTR_CONN_ID]) { | ||
178 | ret = -EINVAL; | ||
179 | goto out; | ||
180 | } | ||
181 | tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); | ||
182 | |||
183 | tunnel = l2tp_tunnel_find(net, tunnel_id); | ||
184 | if (tunnel == NULL) { | ||
185 | ret = -ENODEV; | ||
186 | goto out; | ||
187 | } | ||
188 | |||
189 | (void) l2tp_tunnel_delete(tunnel); | ||
190 | |||
191 | out: | ||
192 | return ret; | ||
193 | } | ||
194 | |||
195 | static int l2tp_nl_cmd_tunnel_modify(struct sk_buff *skb, struct genl_info *info) | ||
196 | { | ||
197 | struct l2tp_tunnel *tunnel; | ||
198 | u32 tunnel_id; | ||
199 | int ret = 0; | ||
200 | struct net *net = genl_info_net(info); | ||
201 | |||
202 | if (!info->attrs[L2TP_ATTR_CONN_ID]) { | ||
203 | ret = -EINVAL; | ||
204 | goto out; | ||
205 | } | ||
206 | tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); | ||
207 | |||
208 | tunnel = l2tp_tunnel_find(net, tunnel_id); | ||
209 | if (tunnel == NULL) { | ||
210 | ret = -ENODEV; | ||
211 | goto out; | ||
212 | } | ||
213 | |||
214 | if (info->attrs[L2TP_ATTR_DEBUG]) | ||
215 | tunnel->debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]); | ||
216 | |||
217 | out: | ||
218 | return ret; | ||
219 | } | ||
220 | |||
221 | static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 pid, u32 seq, int flags, | ||
222 | struct l2tp_tunnel *tunnel) | ||
223 | { | ||
224 | void *hdr; | ||
225 | struct nlattr *nest; | ||
226 | struct sock *sk = NULL; | ||
227 | struct inet_sock *inet; | ||
228 | |||
229 | hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags, | ||
230 | L2TP_CMD_TUNNEL_GET); | ||
231 | if (IS_ERR(hdr)) | ||
232 | return PTR_ERR(hdr); | ||
233 | |||
234 | NLA_PUT_U8(skb, L2TP_ATTR_PROTO_VERSION, tunnel->version); | ||
235 | NLA_PUT_U32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id); | ||
236 | NLA_PUT_U32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id); | ||
237 | NLA_PUT_U32(skb, L2TP_ATTR_DEBUG, tunnel->debug); | ||
238 | NLA_PUT_U16(skb, L2TP_ATTR_ENCAP_TYPE, tunnel->encap); | ||
239 | |||
240 | nest = nla_nest_start(skb, L2TP_ATTR_STATS); | ||
241 | if (nest == NULL) | ||
242 | goto nla_put_failure; | ||
243 | |||
244 | NLA_PUT_U64(skb, L2TP_ATTR_TX_PACKETS, tunnel->stats.tx_packets); | ||
245 | NLA_PUT_U64(skb, L2TP_ATTR_TX_BYTES, tunnel->stats.tx_bytes); | ||
246 | NLA_PUT_U64(skb, L2TP_ATTR_TX_ERRORS, tunnel->stats.tx_errors); | ||
247 | NLA_PUT_U64(skb, L2TP_ATTR_RX_PACKETS, tunnel->stats.rx_packets); | ||
248 | NLA_PUT_U64(skb, L2TP_ATTR_RX_BYTES, tunnel->stats.rx_bytes); | ||
249 | NLA_PUT_U64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, tunnel->stats.rx_seq_discards); | ||
250 | NLA_PUT_U64(skb, L2TP_ATTR_RX_OOS_PACKETS, tunnel->stats.rx_oos_packets); | ||
251 | NLA_PUT_U64(skb, L2TP_ATTR_RX_ERRORS, tunnel->stats.rx_errors); | ||
252 | nla_nest_end(skb, nest); | ||
253 | |||
254 | sk = tunnel->sock; | ||
255 | if (!sk) | ||
256 | goto out; | ||
257 | |||
258 | inet = inet_sk(sk); | ||
259 | |||
260 | switch (tunnel->encap) { | ||
261 | case L2TP_ENCAPTYPE_UDP: | ||
262 | NLA_PUT_U16(skb, L2TP_ATTR_UDP_SPORT, ntohs(inet->inet_sport)); | ||
263 | NLA_PUT_U16(skb, L2TP_ATTR_UDP_DPORT, ntohs(inet->inet_dport)); | ||
264 | NLA_PUT_U8(skb, L2TP_ATTR_UDP_CSUM, (sk->sk_no_check != UDP_CSUM_NOXMIT)); | ||
265 | /* NOBREAK */ | ||
266 | case L2TP_ENCAPTYPE_IP: | ||
267 | NLA_PUT_BE32(skb, L2TP_ATTR_IP_SADDR, inet->inet_saddr); | ||
268 | NLA_PUT_BE32(skb, L2TP_ATTR_IP_DADDR, inet->inet_daddr); | ||
269 | break; | ||
270 | } | ||
271 | |||
272 | out: | ||
273 | return genlmsg_end(skb, hdr); | ||
274 | |||
275 | nla_put_failure: | ||
276 | genlmsg_cancel(skb, hdr); | ||
277 | return -1; | ||
278 | } | ||
279 | |||
280 | static int l2tp_nl_cmd_tunnel_get(struct sk_buff *skb, struct genl_info *info) | ||
281 | { | ||
282 | struct l2tp_tunnel *tunnel; | ||
283 | struct sk_buff *msg; | ||
284 | u32 tunnel_id; | ||
285 | int ret = -ENOBUFS; | ||
286 | struct net *net = genl_info_net(info); | ||
287 | |||
288 | if (!info->attrs[L2TP_ATTR_CONN_ID]) { | ||
289 | ret = -EINVAL; | ||
290 | goto out; | ||
291 | } | ||
292 | |||
293 | tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); | ||
294 | |||
295 | tunnel = l2tp_tunnel_find(net, tunnel_id); | ||
296 | if (tunnel == NULL) { | ||
297 | ret = -ENODEV; | ||
298 | goto out; | ||
299 | } | ||
300 | |||
301 | msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); | ||
302 | if (!msg) { | ||
303 | ret = -ENOMEM; | ||
304 | goto out; | ||
305 | } | ||
306 | |||
307 | ret = l2tp_nl_tunnel_send(msg, info->snd_pid, info->snd_seq, | ||
308 | NLM_F_ACK, tunnel); | ||
309 | if (ret < 0) | ||
310 | goto err_out; | ||
311 | |||
312 | return genlmsg_unicast(net, msg, info->snd_pid); | ||
313 | |||
314 | err_out: | ||
315 | nlmsg_free(msg); | ||
316 | |||
317 | out: | ||
318 | return ret; | ||
319 | } | ||
320 | |||
321 | static int l2tp_nl_cmd_tunnel_dump(struct sk_buff *skb, struct netlink_callback *cb) | ||
322 | { | ||
323 | int ti = cb->args[0]; | ||
324 | struct l2tp_tunnel *tunnel; | ||
325 | struct net *net = sock_net(skb->sk); | ||
326 | |||
327 | for (;;) { | ||
328 | tunnel = l2tp_tunnel_find_nth(net, ti); | ||
329 | if (tunnel == NULL) | ||
330 | goto out; | ||
331 | |||
332 | if (l2tp_nl_tunnel_send(skb, NETLINK_CB(cb->skb).pid, | ||
333 | cb->nlh->nlmsg_seq, NLM_F_MULTI, | ||
334 | tunnel) <= 0) | ||
335 | goto out; | ||
336 | |||
337 | ti++; | ||
338 | } | ||
339 | |||
340 | out: | ||
341 | cb->args[0] = ti; | ||
342 | |||
343 | return skb->len; | ||
344 | } | ||
345 | |||
346 | static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *info) | ||
347 | { | ||
348 | u32 tunnel_id = 0; | ||
349 | u32 session_id; | ||
350 | u32 peer_session_id; | ||
351 | int ret = 0; | ||
352 | struct l2tp_tunnel *tunnel; | ||
353 | struct l2tp_session *session; | ||
354 | struct l2tp_session_cfg cfg = { 0, }; | ||
355 | struct net *net = genl_info_net(info); | ||
356 | |||
357 | if (!info->attrs[L2TP_ATTR_CONN_ID]) { | ||
358 | ret = -EINVAL; | ||
359 | goto out; | ||
360 | } | ||
361 | tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); | ||
362 | tunnel = l2tp_tunnel_find(net, tunnel_id); | ||
363 | if (!tunnel) { | ||
364 | ret = -ENODEV; | ||
365 | goto out; | ||
366 | } | ||
367 | |||
368 | if (!info->attrs[L2TP_ATTR_SESSION_ID]) { | ||
369 | ret = -EINVAL; | ||
370 | goto out; | ||
371 | } | ||
372 | session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]); | ||
373 | session = l2tp_session_find(net, tunnel, session_id); | ||
374 | if (session) { | ||
375 | ret = -EEXIST; | ||
376 | goto out; | ||
377 | } | ||
378 | |||
379 | if (!info->attrs[L2TP_ATTR_PEER_SESSION_ID]) { | ||
380 | ret = -EINVAL; | ||
381 | goto out; | ||
382 | } | ||
383 | peer_session_id = nla_get_u32(info->attrs[L2TP_ATTR_PEER_SESSION_ID]); | ||
384 | |||
385 | if (!info->attrs[L2TP_ATTR_PW_TYPE]) { | ||
386 | ret = -EINVAL; | ||
387 | goto out; | ||
388 | } | ||
389 | cfg.pw_type = nla_get_u16(info->attrs[L2TP_ATTR_PW_TYPE]); | ||
390 | if (cfg.pw_type >= __L2TP_PWTYPE_MAX) { | ||
391 | ret = -EINVAL; | ||
392 | goto out; | ||
393 | } | ||
394 | |||
395 | if (tunnel->version > 2) { | ||
396 | if (info->attrs[L2TP_ATTR_OFFSET]) | ||
397 | cfg.offset = nla_get_u16(info->attrs[L2TP_ATTR_OFFSET]); | ||
398 | |||
399 | if (info->attrs[L2TP_ATTR_DATA_SEQ]) | ||
400 | cfg.data_seq = nla_get_u8(info->attrs[L2TP_ATTR_DATA_SEQ]); | ||
401 | |||
402 | cfg.l2specific_type = L2TP_L2SPECTYPE_DEFAULT; | ||
403 | if (info->attrs[L2TP_ATTR_L2SPEC_TYPE]) | ||
404 | cfg.l2specific_type = nla_get_u8(info->attrs[L2TP_ATTR_L2SPEC_TYPE]); | ||
405 | |||
406 | cfg.l2specific_len = 4; | ||
407 | if (info->attrs[L2TP_ATTR_L2SPEC_LEN]) | ||
408 | cfg.l2specific_len = nla_get_u8(info->attrs[L2TP_ATTR_L2SPEC_LEN]); | ||
409 | |||
410 | if (info->attrs[L2TP_ATTR_COOKIE]) { | ||
411 | u16 len = nla_len(info->attrs[L2TP_ATTR_COOKIE]); | ||
412 | if (len > 8) { | ||
413 | ret = -EINVAL; | ||
414 | goto out; | ||
415 | } | ||
416 | cfg.cookie_len = len; | ||
417 | memcpy(&cfg.cookie[0], nla_data(info->attrs[L2TP_ATTR_COOKIE]), len); | ||
418 | } | ||
419 | if (info->attrs[L2TP_ATTR_PEER_COOKIE]) { | ||
420 | u16 len = nla_len(info->attrs[L2TP_ATTR_PEER_COOKIE]); | ||
421 | if (len > 8) { | ||
422 | ret = -EINVAL; | ||
423 | goto out; | ||
424 | } | ||
425 | cfg.peer_cookie_len = len; | ||
426 | memcpy(&cfg.peer_cookie[0], nla_data(info->attrs[L2TP_ATTR_PEER_COOKIE]), len); | ||
427 | } | ||
428 | if (info->attrs[L2TP_ATTR_IFNAME]) | ||
429 | cfg.ifname = nla_data(info->attrs[L2TP_ATTR_IFNAME]); | ||
430 | |||
431 | if (info->attrs[L2TP_ATTR_VLAN_ID]) | ||
432 | cfg.vlan_id = nla_get_u16(info->attrs[L2TP_ATTR_VLAN_ID]); | ||
433 | } | ||
434 | |||
435 | if (info->attrs[L2TP_ATTR_DEBUG]) | ||
436 | cfg.debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]); | ||
437 | |||
438 | if (info->attrs[L2TP_ATTR_RECV_SEQ]) | ||
439 | cfg.recv_seq = nla_get_u8(info->attrs[L2TP_ATTR_RECV_SEQ]); | ||
440 | |||
441 | if (info->attrs[L2TP_ATTR_SEND_SEQ]) | ||
442 | cfg.send_seq = nla_get_u8(info->attrs[L2TP_ATTR_SEND_SEQ]); | ||
443 | |||
444 | if (info->attrs[L2TP_ATTR_LNS_MODE]) | ||
445 | cfg.lns_mode = nla_get_u8(info->attrs[L2TP_ATTR_LNS_MODE]); | ||
446 | |||
447 | if (info->attrs[L2TP_ATTR_RECV_TIMEOUT]) | ||
448 | cfg.reorder_timeout = nla_get_msecs(info->attrs[L2TP_ATTR_RECV_TIMEOUT]); | ||
449 | |||
450 | if (info->attrs[L2TP_ATTR_MTU]) | ||
451 | cfg.mtu = nla_get_u16(info->attrs[L2TP_ATTR_MTU]); | ||
452 | |||
453 | if (info->attrs[L2TP_ATTR_MRU]) | ||
454 | cfg.mru = nla_get_u16(info->attrs[L2TP_ATTR_MRU]); | ||
455 | |||
456 | if ((l2tp_nl_cmd_ops[cfg.pw_type] == NULL) || | ||
457 | (l2tp_nl_cmd_ops[cfg.pw_type]->session_create == NULL)) { | ||
458 | ret = -EPROTONOSUPPORT; | ||
459 | goto out; | ||
460 | } | ||
461 | |||
462 | /* Check that pseudowire-specific params are present */ | ||
463 | switch (cfg.pw_type) { | ||
464 | case L2TP_PWTYPE_NONE: | ||
465 | break; | ||
466 | case L2TP_PWTYPE_ETH_VLAN: | ||
467 | if (!info->attrs[L2TP_ATTR_VLAN_ID]) { | ||
468 | ret = -EINVAL; | ||
469 | goto out; | ||
470 | } | ||
471 | break; | ||
472 | case L2TP_PWTYPE_ETH: | ||
473 | break; | ||
474 | case L2TP_PWTYPE_PPP: | ||
475 | case L2TP_PWTYPE_PPP_AC: | ||
476 | break; | ||
477 | case L2TP_PWTYPE_IP: | ||
478 | default: | ||
479 | ret = -EPROTONOSUPPORT; | ||
480 | break; | ||
481 | } | ||
482 | |||
483 | ret = -EPROTONOSUPPORT; | ||
484 | if (l2tp_nl_cmd_ops[cfg.pw_type]->session_create) | ||
485 | ret = (*l2tp_nl_cmd_ops[cfg.pw_type]->session_create)(net, tunnel_id, | ||
486 | session_id, peer_session_id, &cfg); | ||
487 | |||
488 | out: | ||
489 | return ret; | ||
490 | } | ||
491 | |||
492 | static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *info) | ||
493 | { | ||
494 | int ret = 0; | ||
495 | struct l2tp_session *session; | ||
496 | u16 pw_type; | ||
497 | |||
498 | session = l2tp_nl_session_find(info); | ||
499 | if (session == NULL) { | ||
500 | ret = -ENODEV; | ||
501 | goto out; | ||
502 | } | ||
503 | |||
504 | pw_type = session->pwtype; | ||
505 | if (pw_type < __L2TP_PWTYPE_MAX) | ||
506 | if (l2tp_nl_cmd_ops[pw_type] && l2tp_nl_cmd_ops[pw_type]->session_delete) | ||
507 | ret = (*l2tp_nl_cmd_ops[pw_type]->session_delete)(session); | ||
508 | |||
509 | out: | ||
510 | return ret; | ||
511 | } | ||
512 | |||
513 | static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *info) | ||
514 | { | ||
515 | int ret = 0; | ||
516 | struct l2tp_session *session; | ||
517 | |||
518 | session = l2tp_nl_session_find(info); | ||
519 | if (session == NULL) { | ||
520 | ret = -ENODEV; | ||
521 | goto out; | ||
522 | } | ||
523 | |||
524 | if (info->attrs[L2TP_ATTR_DEBUG]) | ||
525 | session->debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]); | ||
526 | |||
527 | if (info->attrs[L2TP_ATTR_DATA_SEQ]) | ||
528 | session->data_seq = nla_get_u8(info->attrs[L2TP_ATTR_DATA_SEQ]); | ||
529 | |||
530 | if (info->attrs[L2TP_ATTR_RECV_SEQ]) | ||
531 | session->recv_seq = nla_get_u8(info->attrs[L2TP_ATTR_RECV_SEQ]); | ||
532 | |||
533 | if (info->attrs[L2TP_ATTR_SEND_SEQ]) | ||
534 | session->send_seq = nla_get_u8(info->attrs[L2TP_ATTR_SEND_SEQ]); | ||
535 | |||
536 | if (info->attrs[L2TP_ATTR_LNS_MODE]) | ||
537 | session->lns_mode = nla_get_u8(info->attrs[L2TP_ATTR_LNS_MODE]); | ||
538 | |||
539 | if (info->attrs[L2TP_ATTR_RECV_TIMEOUT]) | ||
540 | session->reorder_timeout = nla_get_msecs(info->attrs[L2TP_ATTR_RECV_TIMEOUT]); | ||
541 | |||
542 | if (info->attrs[L2TP_ATTR_MTU]) | ||
543 | session->mtu = nla_get_u16(info->attrs[L2TP_ATTR_MTU]); | ||
544 | |||
545 | if (info->attrs[L2TP_ATTR_MRU]) | ||
546 | session->mru = nla_get_u16(info->attrs[L2TP_ATTR_MRU]); | ||
547 | |||
548 | out: | ||
549 | return ret; | ||
550 | } | ||
551 | |||
552 | static int l2tp_nl_session_send(struct sk_buff *skb, u32 pid, u32 seq, int flags, | ||
553 | struct l2tp_session *session) | ||
554 | { | ||
555 | void *hdr; | ||
556 | struct nlattr *nest; | ||
557 | struct l2tp_tunnel *tunnel = session->tunnel; | ||
558 | struct sock *sk = NULL; | ||
559 | |||
560 | sk = tunnel->sock; | ||
561 | |||
562 | hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags, L2TP_CMD_SESSION_GET); | ||
563 | if (IS_ERR(hdr)) | ||
564 | return PTR_ERR(hdr); | ||
565 | |||
566 | NLA_PUT_U32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id); | ||
567 | NLA_PUT_U32(skb, L2TP_ATTR_SESSION_ID, session->session_id); | ||
568 | NLA_PUT_U32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id); | ||
569 | NLA_PUT_U32(skb, L2TP_ATTR_PEER_SESSION_ID, session->peer_session_id); | ||
570 | NLA_PUT_U32(skb, L2TP_ATTR_DEBUG, session->debug); | ||
571 | NLA_PUT_U16(skb, L2TP_ATTR_PW_TYPE, session->pwtype); | ||
572 | NLA_PUT_U16(skb, L2TP_ATTR_MTU, session->mtu); | ||
573 | if (session->mru) | ||
574 | NLA_PUT_U16(skb, L2TP_ATTR_MRU, session->mru); | ||
575 | |||
576 | if (session->ifname && session->ifname[0]) | ||
577 | NLA_PUT_STRING(skb, L2TP_ATTR_IFNAME, session->ifname); | ||
578 | if (session->cookie_len) | ||
579 | NLA_PUT(skb, L2TP_ATTR_COOKIE, session->cookie_len, &session->cookie[0]); | ||
580 | if (session->peer_cookie_len) | ||
581 | NLA_PUT(skb, L2TP_ATTR_PEER_COOKIE, session->peer_cookie_len, &session->peer_cookie[0]); | ||
582 | NLA_PUT_U8(skb, L2TP_ATTR_RECV_SEQ, session->recv_seq); | ||
583 | NLA_PUT_U8(skb, L2TP_ATTR_SEND_SEQ, session->send_seq); | ||
584 | NLA_PUT_U8(skb, L2TP_ATTR_LNS_MODE, session->lns_mode); | ||
585 | #ifdef CONFIG_XFRM | ||
586 | if ((sk) && (sk->sk_policy[0] || sk->sk_policy[1])) | ||
587 | NLA_PUT_U8(skb, L2TP_ATTR_USING_IPSEC, 1); | ||
588 | #endif | ||
589 | if (session->reorder_timeout) | ||
590 | NLA_PUT_MSECS(skb, L2TP_ATTR_RECV_TIMEOUT, session->reorder_timeout); | ||
591 | |||
592 | nest = nla_nest_start(skb, L2TP_ATTR_STATS); | ||
593 | if (nest == NULL) | ||
594 | goto nla_put_failure; | ||
595 | NLA_PUT_U64(skb, L2TP_ATTR_TX_PACKETS, session->stats.tx_packets); | ||
596 | NLA_PUT_U64(skb, L2TP_ATTR_TX_BYTES, session->stats.tx_bytes); | ||
597 | NLA_PUT_U64(skb, L2TP_ATTR_TX_ERRORS, session->stats.tx_errors); | ||
598 | NLA_PUT_U64(skb, L2TP_ATTR_RX_PACKETS, session->stats.rx_packets); | ||
599 | NLA_PUT_U64(skb, L2TP_ATTR_RX_BYTES, session->stats.rx_bytes); | ||
600 | NLA_PUT_U64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, session->stats.rx_seq_discards); | ||
601 | NLA_PUT_U64(skb, L2TP_ATTR_RX_OOS_PACKETS, session->stats.rx_oos_packets); | ||
602 | NLA_PUT_U64(skb, L2TP_ATTR_RX_ERRORS, session->stats.rx_errors); | ||
603 | nla_nest_end(skb, nest); | ||
604 | |||
605 | return genlmsg_end(skb, hdr); | ||
606 | |||
607 | nla_put_failure: | ||
608 | genlmsg_cancel(skb, hdr); | ||
609 | return -1; | ||
610 | } | ||
611 | |||
612 | static int l2tp_nl_cmd_session_get(struct sk_buff *skb, struct genl_info *info) | ||
613 | { | ||
614 | struct l2tp_session *session; | ||
615 | struct sk_buff *msg; | ||
616 | int ret; | ||
617 | |||
618 | session = l2tp_nl_session_find(info); | ||
619 | if (session == NULL) { | ||
620 | ret = -ENODEV; | ||
621 | goto out; | ||
622 | } | ||
623 | |||
624 | msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); | ||
625 | if (!msg) { | ||
626 | ret = -ENOMEM; | ||
627 | goto out; | ||
628 | } | ||
629 | |||
630 | ret = l2tp_nl_session_send(msg, info->snd_pid, info->snd_seq, | ||
631 | 0, session); | ||
632 | if (ret < 0) | ||
633 | goto err_out; | ||
634 | |||
635 | return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid); | ||
636 | |||
637 | err_out: | ||
638 | nlmsg_free(msg); | ||
639 | |||
640 | out: | ||
641 | return ret; | ||
642 | } | ||
643 | |||
644 | static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback *cb) | ||
645 | { | ||
646 | struct net *net = sock_net(skb->sk); | ||
647 | struct l2tp_session *session; | ||
648 | struct l2tp_tunnel *tunnel = NULL; | ||
649 | int ti = cb->args[0]; | ||
650 | int si = cb->args[1]; | ||
651 | |||
652 | for (;;) { | ||
653 | if (tunnel == NULL) { | ||
654 | tunnel = l2tp_tunnel_find_nth(net, ti); | ||
655 | if (tunnel == NULL) | ||
656 | goto out; | ||
657 | } | ||
658 | |||
659 | session = l2tp_session_find_nth(tunnel, si); | ||
660 | if (session == NULL) { | ||
661 | ti++; | ||
662 | tunnel = NULL; | ||
663 | si = 0; | ||
664 | continue; | ||
665 | } | ||
666 | |||
667 | if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).pid, | ||
668 | cb->nlh->nlmsg_seq, NLM_F_MULTI, | ||
669 | session) <= 0) | ||
670 | break; | ||
671 | |||
672 | si++; | ||
673 | } | ||
674 | |||
675 | out: | ||
676 | cb->args[0] = ti; | ||
677 | cb->args[1] = si; | ||
678 | |||
679 | return skb->len; | ||
680 | } | ||
681 | |||
682 | static struct nla_policy l2tp_nl_policy[L2TP_ATTR_MAX + 1] = { | ||
683 | [L2TP_ATTR_NONE] = { .type = NLA_UNSPEC, }, | ||
684 | [L2TP_ATTR_PW_TYPE] = { .type = NLA_U16, }, | ||
685 | [L2TP_ATTR_ENCAP_TYPE] = { .type = NLA_U16, }, | ||
686 | [L2TP_ATTR_OFFSET] = { .type = NLA_U16, }, | ||
687 | [L2TP_ATTR_DATA_SEQ] = { .type = NLA_U8, }, | ||
688 | [L2TP_ATTR_L2SPEC_TYPE] = { .type = NLA_U8, }, | ||
689 | [L2TP_ATTR_L2SPEC_LEN] = { .type = NLA_U8, }, | ||
690 | [L2TP_ATTR_PROTO_VERSION] = { .type = NLA_U8, }, | ||
691 | [L2TP_ATTR_CONN_ID] = { .type = NLA_U32, }, | ||
692 | [L2TP_ATTR_PEER_CONN_ID] = { .type = NLA_U32, }, | ||
693 | [L2TP_ATTR_SESSION_ID] = { .type = NLA_U32, }, | ||
694 | [L2TP_ATTR_PEER_SESSION_ID] = { .type = NLA_U32, }, | ||
695 | [L2TP_ATTR_UDP_CSUM] = { .type = NLA_U8, }, | ||
696 | [L2TP_ATTR_VLAN_ID] = { .type = NLA_U16, }, | ||
697 | [L2TP_ATTR_DEBUG] = { .type = NLA_U32, }, | ||
698 | [L2TP_ATTR_RECV_SEQ] = { .type = NLA_U8, }, | ||
699 | [L2TP_ATTR_SEND_SEQ] = { .type = NLA_U8, }, | ||
700 | [L2TP_ATTR_LNS_MODE] = { .type = NLA_U8, }, | ||
701 | [L2TP_ATTR_USING_IPSEC] = { .type = NLA_U8, }, | ||
702 | [L2TP_ATTR_RECV_TIMEOUT] = { .type = NLA_MSECS, }, | ||
703 | [L2TP_ATTR_FD] = { .type = NLA_U32, }, | ||
704 | [L2TP_ATTR_IP_SADDR] = { .type = NLA_U32, }, | ||
705 | [L2TP_ATTR_IP_DADDR] = { .type = NLA_U32, }, | ||
706 | [L2TP_ATTR_UDP_SPORT] = { .type = NLA_U16, }, | ||
707 | [L2TP_ATTR_UDP_DPORT] = { .type = NLA_U16, }, | ||
708 | [L2TP_ATTR_MTU] = { .type = NLA_U16, }, | ||
709 | [L2TP_ATTR_MRU] = { .type = NLA_U16, }, | ||
710 | [L2TP_ATTR_STATS] = { .type = NLA_NESTED, }, | ||
711 | [L2TP_ATTR_IFNAME] = { | ||
712 | .type = NLA_NUL_STRING, | ||
713 | .len = IFNAMSIZ - 1, | ||
714 | }, | ||
715 | [L2TP_ATTR_COOKIE] = { | ||
716 | .type = NLA_BINARY, | ||
717 | .len = 8, | ||
718 | }, | ||
719 | [L2TP_ATTR_PEER_COOKIE] = { | ||
720 | .type = NLA_BINARY, | ||
721 | .len = 8, | ||
722 | }, | ||
723 | }; | ||
724 | |||
725 | static struct genl_ops l2tp_nl_ops[] = { | ||
726 | { | ||
727 | .cmd = L2TP_CMD_NOOP, | ||
728 | .doit = l2tp_nl_cmd_noop, | ||
729 | .policy = l2tp_nl_policy, | ||
730 | /* can be retrieved by unprivileged users */ | ||
731 | }, | ||
732 | { | ||
733 | .cmd = L2TP_CMD_TUNNEL_CREATE, | ||
734 | .doit = l2tp_nl_cmd_tunnel_create, | ||
735 | .policy = l2tp_nl_policy, | ||
736 | .flags = GENL_ADMIN_PERM, | ||
737 | }, | ||
738 | { | ||
739 | .cmd = L2TP_CMD_TUNNEL_DELETE, | ||
740 | .doit = l2tp_nl_cmd_tunnel_delete, | ||
741 | .policy = l2tp_nl_policy, | ||
742 | .flags = GENL_ADMIN_PERM, | ||
743 | }, | ||
744 | { | ||
745 | .cmd = L2TP_CMD_TUNNEL_MODIFY, | ||
746 | .doit = l2tp_nl_cmd_tunnel_modify, | ||
747 | .policy = l2tp_nl_policy, | ||
748 | .flags = GENL_ADMIN_PERM, | ||
749 | }, | ||
750 | { | ||
751 | .cmd = L2TP_CMD_TUNNEL_GET, | ||
752 | .doit = l2tp_nl_cmd_tunnel_get, | ||
753 | .dumpit = l2tp_nl_cmd_tunnel_dump, | ||
754 | .policy = l2tp_nl_policy, | ||
755 | .flags = GENL_ADMIN_PERM, | ||
756 | }, | ||
757 | { | ||
758 | .cmd = L2TP_CMD_SESSION_CREATE, | ||
759 | .doit = l2tp_nl_cmd_session_create, | ||
760 | .policy = l2tp_nl_policy, | ||
761 | .flags = GENL_ADMIN_PERM, | ||
762 | }, | ||
763 | { | ||
764 | .cmd = L2TP_CMD_SESSION_DELETE, | ||
765 | .doit = l2tp_nl_cmd_session_delete, | ||
766 | .policy = l2tp_nl_policy, | ||
767 | .flags = GENL_ADMIN_PERM, | ||
768 | }, | ||
769 | { | ||
770 | .cmd = L2TP_CMD_SESSION_MODIFY, | ||
771 | .doit = l2tp_nl_cmd_session_modify, | ||
772 | .policy = l2tp_nl_policy, | ||
773 | .flags = GENL_ADMIN_PERM, | ||
774 | }, | ||
775 | { | ||
776 | .cmd = L2TP_CMD_SESSION_GET, | ||
777 | .doit = l2tp_nl_cmd_session_get, | ||
778 | .dumpit = l2tp_nl_cmd_session_dump, | ||
779 | .policy = l2tp_nl_policy, | ||
780 | .flags = GENL_ADMIN_PERM, | ||
781 | }, | ||
782 | }; | ||
783 | |||
784 | int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, const struct l2tp_nl_cmd_ops *ops) | ||
785 | { | ||
786 | int ret; | ||
787 | |||
788 | ret = -EINVAL; | ||
789 | if (pw_type >= __L2TP_PWTYPE_MAX) | ||
790 | goto err; | ||
791 | |||
792 | genl_lock(); | ||
793 | ret = -EBUSY; | ||
794 | if (l2tp_nl_cmd_ops[pw_type]) | ||
795 | goto out; | ||
796 | |||
797 | l2tp_nl_cmd_ops[pw_type] = ops; | ||
798 | |||
799 | out: | ||
800 | genl_unlock(); | ||
801 | err: | ||
802 | return 0; | ||
803 | } | ||
804 | EXPORT_SYMBOL_GPL(l2tp_nl_register_ops); | ||
805 | |||
806 | void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type) | ||
807 | { | ||
808 | if (pw_type < __L2TP_PWTYPE_MAX) { | ||
809 | genl_lock(); | ||
810 | l2tp_nl_cmd_ops[pw_type] = NULL; | ||
811 | genl_unlock(); | ||
812 | } | ||
813 | } | ||
814 | EXPORT_SYMBOL_GPL(l2tp_nl_unregister_ops); | ||
815 | |||
816 | static int l2tp_nl_init(void) | ||
817 | { | ||
818 | int err; | ||
819 | |||
820 | printk(KERN_INFO "L2TP netlink interface\n"); | ||
821 | err = genl_register_family_with_ops(&l2tp_nl_family, l2tp_nl_ops, | ||
822 | ARRAY_SIZE(l2tp_nl_ops)); | ||
823 | |||
824 | return err; | ||
825 | } | ||
826 | |||
827 | static void l2tp_nl_cleanup(void) | ||
828 | { | ||
829 | genl_unregister_family(&l2tp_nl_family); | ||
830 | } | ||
831 | |||
832 | module_init(l2tp_nl_init); | ||
833 | module_exit(l2tp_nl_cleanup); | ||
834 | |||
835 | MODULE_AUTHOR("James Chapman <jchapman@katalix.com>"); | ||
836 | MODULE_DESCRIPTION("L2TP netlink"); | ||
837 | MODULE_LICENSE("GPL"); | ||
838 | MODULE_VERSION("1.0"); | ||
839 | MODULE_ALIAS("net-pf-" __stringify(PF_NETLINK) "-proto-" \ | ||
840 | __stringify(NETLINK_GENERIC) "-type-" "l2tp"); | ||
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c new file mode 100644 index 000000000000..90d82b3f2889 --- /dev/null +++ b/net/l2tp/l2tp_ppp.c | |||
@@ -0,0 +1,1837 @@ | |||
1 | /***************************************************************************** | ||
2 | * Linux PPP over L2TP (PPPoX/PPPoL2TP) Sockets | ||
3 | * | ||
4 | * PPPoX --- Generic PPP encapsulation socket family | ||
5 | * PPPoL2TP --- PPP over L2TP (RFC 2661) | ||
6 | * | ||
7 | * Version: 2.0.0 | ||
8 | * | ||
9 | * Authors: James Chapman (jchapman@katalix.com) | ||
10 | * | ||
11 | * Based on original work by Martijn van Oosterhout <kleptog@svana.org> | ||
12 | * | ||
13 | * License: | ||
14 | * This program is free software; you can redistribute it and/or | ||
15 | * modify it under the terms of the GNU General Public License | ||
16 | * as published by the Free Software Foundation; either version | ||
17 | * 2 of the License, or (at your option) any later version. | ||
18 | * | ||
19 | */ | ||
20 | |||
21 | /* This driver handles only L2TP data frames; control frames are handled by a | ||
22 | * userspace application. | ||
23 | * | ||
24 | * To send data in an L2TP session, userspace opens a PPPoL2TP socket and | ||
25 | * attaches it to a bound UDP socket with local tunnel_id / session_id and | ||
26 | * peer tunnel_id / session_id set. Data can then be sent or received using | ||
27 | * regular socket sendmsg() / recvmsg() calls. Kernel parameters of the socket | ||
28 | * can be read or modified using ioctl() or [gs]etsockopt() calls. | ||
29 | * | ||
30 | * When a PPPoL2TP socket is connected with local and peer session_id values | ||
31 | * zero, the socket is treated as a special tunnel management socket. | ||
32 | * | ||
33 | * Here's example userspace code to create a socket for sending/receiving data | ||
34 | * over an L2TP session:- | ||
35 | * | ||
36 | * struct sockaddr_pppol2tp sax; | ||
37 | * int fd; | ||
38 | * int session_fd; | ||
39 | * | ||
40 | * fd = socket(AF_PPPOX, SOCK_DGRAM, PX_PROTO_OL2TP); | ||
41 | * | ||
42 | * sax.sa_family = AF_PPPOX; | ||
43 | * sax.sa_protocol = PX_PROTO_OL2TP; | ||
44 | * sax.pppol2tp.fd = tunnel_fd; // bound UDP socket | ||
45 | * sax.pppol2tp.addr.sin_addr.s_addr = addr->sin_addr.s_addr; | ||
46 | * sax.pppol2tp.addr.sin_port = addr->sin_port; | ||
47 | * sax.pppol2tp.addr.sin_family = AF_INET; | ||
48 | * sax.pppol2tp.s_tunnel = tunnel_id; | ||
49 | * sax.pppol2tp.s_session = session_id; | ||
50 | * sax.pppol2tp.d_tunnel = peer_tunnel_id; | ||
51 | * sax.pppol2tp.d_session = peer_session_id; | ||
52 | * | ||
53 | * session_fd = connect(fd, (struct sockaddr *)&sax, sizeof(sax)); | ||
54 | * | ||
55 | * A pppd plugin that allows PPP traffic to be carried over L2TP using | ||
56 | * this driver is available from the OpenL2TP project at | ||
57 | * http://openl2tp.sourceforge.net. | ||
58 | */ | ||
59 | |||
60 | #include <linux/module.h> | ||
61 | #include <linux/string.h> | ||
62 | #include <linux/list.h> | ||
63 | #include <linux/uaccess.h> | ||
64 | |||
65 | #include <linux/kernel.h> | ||
66 | #include <linux/spinlock.h> | ||
67 | #include <linux/kthread.h> | ||
68 | #include <linux/sched.h> | ||
69 | #include <linux/slab.h> | ||
70 | #include <linux/errno.h> | ||
71 | #include <linux/jiffies.h> | ||
72 | |||
73 | #include <linux/netdevice.h> | ||
74 | #include <linux/net.h> | ||
75 | #include <linux/inetdevice.h> | ||
76 | #include <linux/skbuff.h> | ||
77 | #include <linux/init.h> | ||
78 | #include <linux/ip.h> | ||
79 | #include <linux/udp.h> | ||
80 | #include <linux/if_pppox.h> | ||
81 | #include <linux/if_pppol2tp.h> | ||
82 | #include <net/sock.h> | ||
83 | #include <linux/ppp_channel.h> | ||
84 | #include <linux/ppp_defs.h> | ||
85 | #include <linux/if_ppp.h> | ||
86 | #include <linux/file.h> | ||
87 | #include <linux/hash.h> | ||
88 | #include <linux/sort.h> | ||
89 | #include <linux/proc_fs.h> | ||
90 | #include <linux/l2tp.h> | ||
91 | #include <linux/nsproxy.h> | ||
92 | #include <net/net_namespace.h> | ||
93 | #include <net/netns/generic.h> | ||
94 | #include <net/dst.h> | ||
95 | #include <net/ip.h> | ||
96 | #include <net/udp.h> | ||
97 | #include <net/xfrm.h> | ||
98 | |||
99 | #include <asm/byteorder.h> | ||
100 | #include <asm/atomic.h> | ||
101 | |||
102 | #include "l2tp_core.h" | ||
103 | |||
104 | #define PPPOL2TP_DRV_VERSION "V2.0" | ||
105 | |||
106 | /* Space for UDP, L2TP and PPP headers */ | ||
107 | #define PPPOL2TP_HEADER_OVERHEAD 40 | ||
108 | |||
109 | #define PRINTK(_mask, _type, _lvl, _fmt, args...) \ | ||
110 | do { \ | ||
111 | if ((_mask) & (_type)) \ | ||
112 | printk(_lvl "PPPOL2TP: " _fmt, ##args); \ | ||
113 | } while (0) | ||
114 | |||
115 | /* Number of bytes to build transmit L2TP headers. | ||
116 | * Unfortunately the size is different depending on whether sequence numbers | ||
117 | * are enabled. | ||
118 | */ | ||
119 | #define PPPOL2TP_L2TP_HDR_SIZE_SEQ 10 | ||
120 | #define PPPOL2TP_L2TP_HDR_SIZE_NOSEQ 6 | ||
121 | |||
122 | /* Private data of each session. This data lives at the end of struct | ||
123 | * l2tp_session, referenced via session->priv[]. | ||
124 | */ | ||
125 | struct pppol2tp_session { | ||
126 | int owner; /* pid that opened the socket */ | ||
127 | |||
128 | struct sock *sock; /* Pointer to the session | ||
129 | * PPPoX socket */ | ||
130 | struct sock *tunnel_sock; /* Pointer to the tunnel UDP | ||
131 | * socket */ | ||
132 | int flags; /* accessed by PPPIOCGFLAGS. | ||
133 | * Unused. */ | ||
134 | }; | ||
135 | |||
136 | static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb); | ||
137 | |||
138 | static struct ppp_channel_ops pppol2tp_chan_ops = { pppol2tp_xmit , NULL }; | ||
139 | static const struct proto_ops pppol2tp_ops; | ||
140 | |||
141 | /* Helpers to obtain tunnel/session contexts from sockets. | ||
142 | */ | ||
143 | static inline struct l2tp_session *pppol2tp_sock_to_session(struct sock *sk) | ||
144 | { | ||
145 | struct l2tp_session *session; | ||
146 | |||
147 | if (sk == NULL) | ||
148 | return NULL; | ||
149 | |||
150 | sock_hold(sk); | ||
151 | session = (struct l2tp_session *)(sk->sk_user_data); | ||
152 | if (session == NULL) { | ||
153 | sock_put(sk); | ||
154 | goto out; | ||
155 | } | ||
156 | |||
157 | BUG_ON(session->magic != L2TP_SESSION_MAGIC); | ||
158 | |||
159 | out: | ||
160 | return session; | ||
161 | } | ||
162 | |||
163 | /***************************************************************************** | ||
164 | * Receive data handling | ||
165 | *****************************************************************************/ | ||
166 | |||
167 | static int pppol2tp_recv_payload_hook(struct sk_buff *skb) | ||
168 | { | ||
169 | /* Skip PPP header, if present. In testing, Microsoft L2TP clients | ||
170 | * don't send the PPP header (PPP header compression enabled), but | ||
171 | * other clients can include the header. So we cope with both cases | ||
172 | * here. The PPP header is always FF03 when using L2TP. | ||
173 | * | ||
174 | * Note that skb->data[] isn't dereferenced from a u16 ptr here since | ||
175 | * the field may be unaligned. | ||
176 | */ | ||
177 | if (!pskb_may_pull(skb, 2)) | ||
178 | return 1; | ||
179 | |||
180 | if ((skb->data[0] == 0xff) && (skb->data[1] == 0x03)) | ||
181 | skb_pull(skb, 2); | ||
182 | |||
183 | return 0; | ||
184 | } | ||
185 | |||
186 | /* Receive message. This is the recvmsg for the PPPoL2TP socket. | ||
187 | */ | ||
188 | static int pppol2tp_recvmsg(struct kiocb *iocb, struct socket *sock, | ||
189 | struct msghdr *msg, size_t len, | ||
190 | int flags) | ||
191 | { | ||
192 | int err; | ||
193 | struct sk_buff *skb; | ||
194 | struct sock *sk = sock->sk; | ||
195 | |||
196 | err = -EIO; | ||
197 | if (sk->sk_state & PPPOX_BOUND) | ||
198 | goto end; | ||
199 | |||
200 | msg->msg_namelen = 0; | ||
201 | |||
202 | err = 0; | ||
203 | skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, | ||
204 | flags & MSG_DONTWAIT, &err); | ||
205 | if (!skb) | ||
206 | goto end; | ||
207 | |||
208 | if (len > skb->len) | ||
209 | len = skb->len; | ||
210 | else if (len < skb->len) | ||
211 | msg->msg_flags |= MSG_TRUNC; | ||
212 | |||
213 | err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len); | ||
214 | if (likely(err == 0)) | ||
215 | err = len; | ||
216 | |||
217 | kfree_skb(skb); | ||
218 | end: | ||
219 | return err; | ||
220 | } | ||
221 | |||
222 | static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len) | ||
223 | { | ||
224 | struct pppol2tp_session *ps = l2tp_session_priv(session); | ||
225 | struct sock *sk = NULL; | ||
226 | |||
227 | /* If the socket is bound, send it in to PPP's input queue. Otherwise | ||
228 | * queue it on the session socket. | ||
229 | */ | ||
230 | sk = ps->sock; | ||
231 | if (sk == NULL) | ||
232 | goto no_sock; | ||
233 | |||
234 | if (sk->sk_state & PPPOX_BOUND) { | ||
235 | struct pppox_sock *po; | ||
236 | PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG, | ||
237 | "%s: recv %d byte data frame, passing to ppp\n", | ||
238 | session->name, data_len); | ||
239 | |||
240 | /* We need to forget all info related to the L2TP packet | ||
241 | * gathered in the skb as we are going to reuse the same | ||
242 | * skb for the inner packet. | ||
243 | * Namely we need to: | ||
244 | * - reset xfrm (IPSec) information as it applies to | ||
245 | * the outer L2TP packet and not to the inner one | ||
246 | * - release the dst to force a route lookup on the inner | ||
247 | * IP packet since skb->dst currently points to the dst | ||
248 | * of the UDP tunnel | ||
249 | * - reset netfilter information as it doesn't apply | ||
250 | * to the inner packet either | ||
251 | */ | ||
252 | secpath_reset(skb); | ||
253 | skb_dst_drop(skb); | ||
254 | nf_reset(skb); | ||
255 | |||
256 | po = pppox_sk(sk); | ||
257 | ppp_input(&po->chan, skb); | ||
258 | } else { | ||
259 | PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_INFO, | ||
260 | "%s: socket not bound\n", session->name); | ||
261 | |||
262 | /* Not bound. Nothing we can do, so discard. */ | ||
263 | session->stats.rx_errors++; | ||
264 | kfree_skb(skb); | ||
265 | } | ||
266 | |||
267 | return; | ||
268 | |||
269 | no_sock: | ||
270 | PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_INFO, | ||
271 | "%s: no socket\n", session->name); | ||
272 | kfree_skb(skb); | ||
273 | } | ||
274 | |||
275 | static void pppol2tp_session_sock_hold(struct l2tp_session *session) | ||
276 | { | ||
277 | struct pppol2tp_session *ps = l2tp_session_priv(session); | ||
278 | |||
279 | if (ps->sock) | ||
280 | sock_hold(ps->sock); | ||
281 | } | ||
282 | |||
283 | static void pppol2tp_session_sock_put(struct l2tp_session *session) | ||
284 | { | ||
285 | struct pppol2tp_session *ps = l2tp_session_priv(session); | ||
286 | |||
287 | if (ps->sock) | ||
288 | sock_put(ps->sock); | ||
289 | } | ||
290 | |||
291 | /************************************************************************ | ||
292 | * Transmit handling | ||
293 | ***********************************************************************/ | ||
294 | |||
295 | /* This is the sendmsg for the PPPoL2TP pppol2tp_session socket. We come here | ||
296 | * when a user application does a sendmsg() on the session socket. L2TP and | ||
297 | * PPP headers must be inserted into the user's data. | ||
298 | */ | ||
299 | static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, | ||
300 | size_t total_len) | ||
301 | { | ||
302 | static const unsigned char ppph[2] = { 0xff, 0x03 }; | ||
303 | struct sock *sk = sock->sk; | ||
304 | struct sk_buff *skb; | ||
305 | int error; | ||
306 | struct l2tp_session *session; | ||
307 | struct l2tp_tunnel *tunnel; | ||
308 | struct pppol2tp_session *ps; | ||
309 | int uhlen; | ||
310 | |||
311 | error = -ENOTCONN; | ||
312 | if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) | ||
313 | goto error; | ||
314 | |||
315 | /* Get session and tunnel contexts */ | ||
316 | error = -EBADF; | ||
317 | session = pppol2tp_sock_to_session(sk); | ||
318 | if (session == NULL) | ||
319 | goto error; | ||
320 | |||
321 | ps = l2tp_session_priv(session); | ||
322 | tunnel = l2tp_sock_to_tunnel(ps->tunnel_sock); | ||
323 | if (tunnel == NULL) | ||
324 | goto error_put_sess; | ||
325 | |||
326 | uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0; | ||
327 | |||
328 | /* Allocate a socket buffer */ | ||
329 | error = -ENOMEM; | ||
330 | skb = sock_wmalloc(sk, NET_SKB_PAD + sizeof(struct iphdr) + | ||
331 | uhlen + session->hdr_len + | ||
332 | sizeof(ppph) + total_len, | ||
333 | 0, GFP_KERNEL); | ||
334 | if (!skb) | ||
335 | goto error_put_sess_tun; | ||
336 | |||
337 | /* Reserve space for headers. */ | ||
338 | skb_reserve(skb, NET_SKB_PAD); | ||
339 | skb_reset_network_header(skb); | ||
340 | skb_reserve(skb, sizeof(struct iphdr)); | ||
341 | skb_reset_transport_header(skb); | ||
342 | skb_reserve(skb, uhlen); | ||
343 | |||
344 | /* Add PPP header */ | ||
345 | skb->data[0] = ppph[0]; | ||
346 | skb->data[1] = ppph[1]; | ||
347 | skb_put(skb, 2); | ||
348 | |||
349 | /* Copy user data into skb */ | ||
350 | error = memcpy_fromiovec(skb->data, m->msg_iov, total_len); | ||
351 | if (error < 0) { | ||
352 | kfree_skb(skb); | ||
353 | goto error_put_sess_tun; | ||
354 | } | ||
355 | skb_put(skb, total_len); | ||
356 | |||
357 | l2tp_xmit_skb(session, skb, session->hdr_len); | ||
358 | |||
359 | sock_put(ps->tunnel_sock); | ||
360 | |||
361 | return error; | ||
362 | |||
363 | error_put_sess_tun: | ||
364 | sock_put(ps->tunnel_sock); | ||
365 | error_put_sess: | ||
366 | sock_put(sk); | ||
367 | error: | ||
368 | return error; | ||
369 | } | ||
370 | |||
371 | /* Transmit function called by generic PPP driver. Sends PPP frame | ||
372 | * over PPPoL2TP socket. | ||
373 | * | ||
374 | * This is almost the same as pppol2tp_sendmsg(), but rather than | ||
375 | * being called with a msghdr from userspace, it is called with a skb | ||
376 | * from the kernel. | ||
377 | * | ||
378 | * The supplied skb from ppp doesn't have enough headroom for the | ||
379 | * insertion of L2TP, UDP and IP headers so we need to allocate more | ||
380 | * headroom in the skb. This will create a cloned skb. But we must be | ||
381 | * careful in the error case because the caller will expect to free | ||
382 | * the skb it supplied, not our cloned skb. So we take care to always | ||
383 | * leave the original skb unfreed if we return an error. | ||
384 | */ | ||
385 | static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb) | ||
386 | { | ||
387 | static const u8 ppph[2] = { 0xff, 0x03 }; | ||
388 | struct sock *sk = (struct sock *) chan->private; | ||
389 | struct sock *sk_tun; | ||
390 | struct l2tp_session *session; | ||
391 | struct l2tp_tunnel *tunnel; | ||
392 | struct pppol2tp_session *ps; | ||
393 | int old_headroom; | ||
394 | int new_headroom; | ||
395 | |||
396 | if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) | ||
397 | goto abort; | ||
398 | |||
399 | /* Get session and tunnel contexts from the socket */ | ||
400 | session = pppol2tp_sock_to_session(sk); | ||
401 | if (session == NULL) | ||
402 | goto abort; | ||
403 | |||
404 | ps = l2tp_session_priv(session); | ||
405 | sk_tun = ps->tunnel_sock; | ||
406 | if (sk_tun == NULL) | ||
407 | goto abort_put_sess; | ||
408 | tunnel = l2tp_sock_to_tunnel(sk_tun); | ||
409 | if (tunnel == NULL) | ||
410 | goto abort_put_sess; | ||
411 | |||
412 | old_headroom = skb_headroom(skb); | ||
413 | if (skb_cow_head(skb, sizeof(ppph))) | ||
414 | goto abort_put_sess_tun; | ||
415 | |||
416 | new_headroom = skb_headroom(skb); | ||
417 | skb->truesize += new_headroom - old_headroom; | ||
418 | |||
419 | /* Setup PPP header */ | ||
420 | __skb_push(skb, sizeof(ppph)); | ||
421 | skb->data[0] = ppph[0]; | ||
422 | skb->data[1] = ppph[1]; | ||
423 | |||
424 | l2tp_xmit_skb(session, skb, session->hdr_len); | ||
425 | |||
426 | sock_put(sk_tun); | ||
427 | sock_put(sk); | ||
428 | return 1; | ||
429 | |||
430 | abort_put_sess_tun: | ||
431 | sock_put(sk_tun); | ||
432 | abort_put_sess: | ||
433 | sock_put(sk); | ||
434 | abort: | ||
435 | /* Free the original skb */ | ||
436 | kfree_skb(skb); | ||
437 | return 1; | ||
438 | } | ||
439 | |||
440 | /***************************************************************************** | ||
441 | * Session (and tunnel control) socket create/destroy. | ||
442 | *****************************************************************************/ | ||
443 | |||
444 | /* Called by l2tp_core when a session socket is being closed. | ||
445 | */ | ||
446 | static void pppol2tp_session_close(struct l2tp_session *session) | ||
447 | { | ||
448 | struct pppol2tp_session *ps = l2tp_session_priv(session); | ||
449 | struct sock *sk = ps->sock; | ||
450 | struct sk_buff *skb; | ||
451 | |||
452 | BUG_ON(session->magic != L2TP_SESSION_MAGIC); | ||
453 | |||
454 | if (session->session_id == 0) | ||
455 | goto out; | ||
456 | |||
457 | if (sk != NULL) { | ||
458 | lock_sock(sk); | ||
459 | |||
460 | if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) { | ||
461 | pppox_unbind_sock(sk); | ||
462 | sk->sk_state = PPPOX_DEAD; | ||
463 | sk->sk_state_change(sk); | ||
464 | } | ||
465 | |||
466 | /* Purge any queued data */ | ||
467 | skb_queue_purge(&sk->sk_receive_queue); | ||
468 | skb_queue_purge(&sk->sk_write_queue); | ||
469 | while ((skb = skb_dequeue(&session->reorder_q))) { | ||
470 | kfree_skb(skb); | ||
471 | sock_put(sk); | ||
472 | } | ||
473 | |||
474 | release_sock(sk); | ||
475 | } | ||
476 | |||
477 | out: | ||
478 | return; | ||
479 | } | ||
480 | |||
481 | /* Really kill the session socket. (Called from sock_put() if | ||
482 | * refcnt == 0.) | ||
483 | */ | ||
484 | static void pppol2tp_session_destruct(struct sock *sk) | ||
485 | { | ||
486 | struct l2tp_session *session; | ||
487 | |||
488 | if (sk->sk_user_data != NULL) { | ||
489 | session = sk->sk_user_data; | ||
490 | if (session == NULL) | ||
491 | goto out; | ||
492 | |||
493 | sk->sk_user_data = NULL; | ||
494 | BUG_ON(session->magic != L2TP_SESSION_MAGIC); | ||
495 | l2tp_session_dec_refcount(session); | ||
496 | } | ||
497 | |||
498 | out: | ||
499 | return; | ||
500 | } | ||
501 | |||
502 | /* Called when the PPPoX socket (session) is closed. | ||
503 | */ | ||
504 | static int pppol2tp_release(struct socket *sock) | ||
505 | { | ||
506 | struct sock *sk = sock->sk; | ||
507 | struct l2tp_session *session; | ||
508 | int error; | ||
509 | |||
510 | if (!sk) | ||
511 | return 0; | ||
512 | |||
513 | error = -EBADF; | ||
514 | lock_sock(sk); | ||
515 | if (sock_flag(sk, SOCK_DEAD) != 0) | ||
516 | goto error; | ||
517 | |||
518 | pppox_unbind_sock(sk); | ||
519 | |||
520 | /* Signal the death of the socket. */ | ||
521 | sk->sk_state = PPPOX_DEAD; | ||
522 | sock_orphan(sk); | ||
523 | sock->sk = NULL; | ||
524 | |||
525 | session = pppol2tp_sock_to_session(sk); | ||
526 | |||
527 | /* Purge any queued data */ | ||
528 | skb_queue_purge(&sk->sk_receive_queue); | ||
529 | skb_queue_purge(&sk->sk_write_queue); | ||
530 | if (session != NULL) { | ||
531 | struct sk_buff *skb; | ||
532 | while ((skb = skb_dequeue(&session->reorder_q))) { | ||
533 | kfree_skb(skb); | ||
534 | sock_put(sk); | ||
535 | } | ||
536 | sock_put(sk); | ||
537 | } | ||
538 | |||
539 | release_sock(sk); | ||
540 | |||
541 | /* This will delete the session context via | ||
542 | * pppol2tp_session_destruct() if the socket's refcnt drops to | ||
543 | * zero. | ||
544 | */ | ||
545 | sock_put(sk); | ||
546 | |||
547 | return 0; | ||
548 | |||
549 | error: | ||
550 | release_sock(sk); | ||
551 | return error; | ||
552 | } | ||
553 | |||
554 | static struct proto pppol2tp_sk_proto = { | ||
555 | .name = "PPPOL2TP", | ||
556 | .owner = THIS_MODULE, | ||
557 | .obj_size = sizeof(struct pppox_sock), | ||
558 | }; | ||
559 | |||
560 | static int pppol2tp_backlog_recv(struct sock *sk, struct sk_buff *skb) | ||
561 | { | ||
562 | int rc; | ||
563 | |||
564 | rc = l2tp_udp_encap_recv(sk, skb); | ||
565 | if (rc) | ||
566 | kfree_skb(skb); | ||
567 | |||
568 | return NET_RX_SUCCESS; | ||
569 | } | ||
570 | |||
571 | /* socket() handler. Initialize a new struct sock. | ||
572 | */ | ||
573 | static int pppol2tp_create(struct net *net, struct socket *sock) | ||
574 | { | ||
575 | int error = -ENOMEM; | ||
576 | struct sock *sk; | ||
577 | |||
578 | sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppol2tp_sk_proto); | ||
579 | if (!sk) | ||
580 | goto out; | ||
581 | |||
582 | sock_init_data(sock, sk); | ||
583 | |||
584 | sock->state = SS_UNCONNECTED; | ||
585 | sock->ops = &pppol2tp_ops; | ||
586 | |||
587 | sk->sk_backlog_rcv = pppol2tp_backlog_recv; | ||
588 | sk->sk_protocol = PX_PROTO_OL2TP; | ||
589 | sk->sk_family = PF_PPPOX; | ||
590 | sk->sk_state = PPPOX_NONE; | ||
591 | sk->sk_type = SOCK_STREAM; | ||
592 | sk->sk_destruct = pppol2tp_session_destruct; | ||
593 | |||
594 | error = 0; | ||
595 | |||
596 | out: | ||
597 | return error; | ||
598 | } | ||
599 | |||
600 | #if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE) | ||
601 | static void pppol2tp_show(struct seq_file *m, void *arg) | ||
602 | { | ||
603 | struct l2tp_session *session = arg; | ||
604 | struct pppol2tp_session *ps = l2tp_session_priv(session); | ||
605 | |||
606 | if (ps) { | ||
607 | struct pppox_sock *po = pppox_sk(ps->sock); | ||
608 | if (po) | ||
609 | seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan)); | ||
610 | } | ||
611 | } | ||
612 | #endif | ||
613 | |||
614 | /* connect() handler. Attach a PPPoX socket to a tunnel UDP socket | ||
615 | */ | ||
616 | static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, | ||
617 | int sockaddr_len, int flags) | ||
618 | { | ||
619 | struct sock *sk = sock->sk; | ||
620 | struct sockaddr_pppol2tp *sp = (struct sockaddr_pppol2tp *) uservaddr; | ||
621 | struct sockaddr_pppol2tpv3 *sp3 = (struct sockaddr_pppol2tpv3 *) uservaddr; | ||
622 | struct pppox_sock *po = pppox_sk(sk); | ||
623 | struct l2tp_session *session = NULL; | ||
624 | struct l2tp_tunnel *tunnel; | ||
625 | struct pppol2tp_session *ps; | ||
626 | struct dst_entry *dst; | ||
627 | struct l2tp_session_cfg cfg = { 0, }; | ||
628 | int error = 0; | ||
629 | u32 tunnel_id, peer_tunnel_id; | ||
630 | u32 session_id, peer_session_id; | ||
631 | int ver = 2; | ||
632 | int fd; | ||
633 | |||
634 | lock_sock(sk); | ||
635 | |||
636 | error = -EINVAL; | ||
637 | if (sp->sa_protocol != PX_PROTO_OL2TP) | ||
638 | goto end; | ||
639 | |||
640 | /* Check for already bound sockets */ | ||
641 | error = -EBUSY; | ||
642 | if (sk->sk_state & PPPOX_CONNECTED) | ||
643 | goto end; | ||
644 | |||
645 | /* We don't supporting rebinding anyway */ | ||
646 | error = -EALREADY; | ||
647 | if (sk->sk_user_data) | ||
648 | goto end; /* socket is already attached */ | ||
649 | |||
650 | /* Get params from socket address. Handle L2TPv2 and L2TPv3 */ | ||
651 | if (sockaddr_len == sizeof(struct sockaddr_pppol2tp)) { | ||
652 | fd = sp->pppol2tp.fd; | ||
653 | tunnel_id = sp->pppol2tp.s_tunnel; | ||
654 | peer_tunnel_id = sp->pppol2tp.d_tunnel; | ||
655 | session_id = sp->pppol2tp.s_session; | ||
656 | peer_session_id = sp->pppol2tp.d_session; | ||
657 | } else if (sockaddr_len == sizeof(struct sockaddr_pppol2tpv3)) { | ||
658 | ver = 3; | ||
659 | fd = sp3->pppol2tp.fd; | ||
660 | tunnel_id = sp3->pppol2tp.s_tunnel; | ||
661 | peer_tunnel_id = sp3->pppol2tp.d_tunnel; | ||
662 | session_id = sp3->pppol2tp.s_session; | ||
663 | peer_session_id = sp3->pppol2tp.d_session; | ||
664 | } else { | ||
665 | error = -EINVAL; | ||
666 | goto end; /* bad socket address */ | ||
667 | } | ||
668 | |||
669 | /* Don't bind if tunnel_id is 0 */ | ||
670 | error = -EINVAL; | ||
671 | if (tunnel_id == 0) | ||
672 | goto end; | ||
673 | |||
674 | tunnel = l2tp_tunnel_find(sock_net(sk), tunnel_id); | ||
675 | |||
676 | /* Special case: create tunnel context if session_id and | ||
677 | * peer_session_id is 0. Otherwise look up tunnel using supplied | ||
678 | * tunnel id. | ||
679 | */ | ||
680 | if ((session_id == 0) && (peer_session_id == 0)) { | ||
681 | if (tunnel == NULL) { | ||
682 | struct l2tp_tunnel_cfg tcfg = { | ||
683 | .encap = L2TP_ENCAPTYPE_UDP, | ||
684 | .debug = 0, | ||
685 | }; | ||
686 | error = l2tp_tunnel_create(sock_net(sk), fd, ver, tunnel_id, peer_tunnel_id, &tcfg, &tunnel); | ||
687 | if (error < 0) | ||
688 | goto end; | ||
689 | } | ||
690 | } else { | ||
691 | /* Error if we can't find the tunnel */ | ||
692 | error = -ENOENT; | ||
693 | if (tunnel == NULL) | ||
694 | goto end; | ||
695 | |||
696 | /* Error if socket is not prepped */ | ||
697 | if (tunnel->sock == NULL) | ||
698 | goto end; | ||
699 | } | ||
700 | |||
701 | if (tunnel->recv_payload_hook == NULL) | ||
702 | tunnel->recv_payload_hook = pppol2tp_recv_payload_hook; | ||
703 | |||
704 | if (tunnel->peer_tunnel_id == 0) { | ||
705 | if (ver == 2) | ||
706 | tunnel->peer_tunnel_id = sp->pppol2tp.d_tunnel; | ||
707 | else | ||
708 | tunnel->peer_tunnel_id = sp3->pppol2tp.d_tunnel; | ||
709 | } | ||
710 | |||
711 | /* Create session if it doesn't already exist. We handle the | ||
712 | * case where a session was previously created by the netlink | ||
713 | * interface by checking that the session doesn't already have | ||
714 | * a socket and its tunnel socket are what we expect. If any | ||
715 | * of those checks fail, return EEXIST to the caller. | ||
716 | */ | ||
717 | session = l2tp_session_find(sock_net(sk), tunnel, session_id); | ||
718 | if (session == NULL) { | ||
719 | /* Default MTU must allow space for UDP/L2TP/PPP | ||
720 | * headers. | ||
721 | */ | ||
722 | cfg.mtu = cfg.mru = 1500 - PPPOL2TP_HEADER_OVERHEAD; | ||
723 | |||
724 | /* Allocate and initialize a new session context. */ | ||
725 | session = l2tp_session_create(sizeof(struct pppol2tp_session), | ||
726 | tunnel, session_id, | ||
727 | peer_session_id, &cfg); | ||
728 | if (session == NULL) { | ||
729 | error = -ENOMEM; | ||
730 | goto end; | ||
731 | } | ||
732 | } else { | ||
733 | ps = l2tp_session_priv(session); | ||
734 | error = -EEXIST; | ||
735 | if (ps->sock != NULL) | ||
736 | goto end; | ||
737 | |||
738 | /* consistency checks */ | ||
739 | if (ps->tunnel_sock != tunnel->sock) | ||
740 | goto end; | ||
741 | } | ||
742 | |||
743 | /* Associate session with its PPPoL2TP socket */ | ||
744 | ps = l2tp_session_priv(session); | ||
745 | ps->owner = current->pid; | ||
746 | ps->sock = sk; | ||
747 | ps->tunnel_sock = tunnel->sock; | ||
748 | |||
749 | session->recv_skb = pppol2tp_recv; | ||
750 | session->session_close = pppol2tp_session_close; | ||
751 | #if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE) | ||
752 | session->show = pppol2tp_show; | ||
753 | #endif | ||
754 | |||
755 | /* We need to know each time a skb is dropped from the reorder | ||
756 | * queue. | ||
757 | */ | ||
758 | session->ref = pppol2tp_session_sock_hold; | ||
759 | session->deref = pppol2tp_session_sock_put; | ||
760 | |||
761 | /* If PMTU discovery was enabled, use the MTU that was discovered */ | ||
762 | dst = sk_dst_get(sk); | ||
763 | if (dst != NULL) { | ||
764 | u32 pmtu = dst_mtu(__sk_dst_get(sk)); | ||
765 | if (pmtu != 0) | ||
766 | session->mtu = session->mru = pmtu - | ||
767 | PPPOL2TP_HEADER_OVERHEAD; | ||
768 | dst_release(dst); | ||
769 | } | ||
770 | |||
771 | /* Special case: if source & dest session_id == 0x0000, this | ||
772 | * socket is being created to manage the tunnel. Just set up | ||
773 | * the internal context for use by ioctl() and sockopt() | ||
774 | * handlers. | ||
775 | */ | ||
776 | if ((session->session_id == 0) && | ||
777 | (session->peer_session_id == 0)) { | ||
778 | error = 0; | ||
779 | goto out_no_ppp; | ||
780 | } | ||
781 | |||
782 | /* The only header we need to worry about is the L2TP | ||
783 | * header. This size is different depending on whether | ||
784 | * sequence numbers are enabled for the data channel. | ||
785 | */ | ||
786 | po->chan.hdrlen = PPPOL2TP_L2TP_HDR_SIZE_NOSEQ; | ||
787 | |||
788 | po->chan.private = sk; | ||
789 | po->chan.ops = &pppol2tp_chan_ops; | ||
790 | po->chan.mtu = session->mtu; | ||
791 | |||
792 | error = ppp_register_net_channel(sock_net(sk), &po->chan); | ||
793 | if (error) | ||
794 | goto end; | ||
795 | |||
796 | out_no_ppp: | ||
797 | /* This is how we get the session context from the socket. */ | ||
798 | sk->sk_user_data = session; | ||
799 | sk->sk_state = PPPOX_CONNECTED; | ||
800 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
801 | "%s: created\n", session->name); | ||
802 | |||
803 | end: | ||
804 | release_sock(sk); | ||
805 | |||
806 | return error; | ||
807 | } | ||
808 | |||
809 | #ifdef CONFIG_L2TP_V3 | ||
810 | |||
811 | /* Called when creating sessions via the netlink interface. | ||
812 | */ | ||
813 | static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg) | ||
814 | { | ||
815 | int error; | ||
816 | struct l2tp_tunnel *tunnel; | ||
817 | struct l2tp_session *session; | ||
818 | struct pppol2tp_session *ps; | ||
819 | |||
820 | tunnel = l2tp_tunnel_find(net, tunnel_id); | ||
821 | |||
822 | /* Error if we can't find the tunnel */ | ||
823 | error = -ENOENT; | ||
824 | if (tunnel == NULL) | ||
825 | goto out; | ||
826 | |||
827 | /* Error if tunnel socket is not prepped */ | ||
828 | if (tunnel->sock == NULL) | ||
829 | goto out; | ||
830 | |||
831 | /* Check that this session doesn't already exist */ | ||
832 | error = -EEXIST; | ||
833 | session = l2tp_session_find(net, tunnel, session_id); | ||
834 | if (session != NULL) | ||
835 | goto out; | ||
836 | |||
837 | /* Default MTU values. */ | ||
838 | if (cfg->mtu == 0) | ||
839 | cfg->mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD; | ||
840 | if (cfg->mru == 0) | ||
841 | cfg->mru = cfg->mtu; | ||
842 | |||
843 | /* Allocate and initialize a new session context. */ | ||
844 | error = -ENOMEM; | ||
845 | session = l2tp_session_create(sizeof(struct pppol2tp_session), | ||
846 | tunnel, session_id, | ||
847 | peer_session_id, cfg); | ||
848 | if (session == NULL) | ||
849 | goto out; | ||
850 | |||
851 | ps = l2tp_session_priv(session); | ||
852 | ps->tunnel_sock = tunnel->sock; | ||
853 | |||
854 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
855 | "%s: created\n", session->name); | ||
856 | |||
857 | error = 0; | ||
858 | |||
859 | out: | ||
860 | return error; | ||
861 | } | ||
862 | |||
863 | /* Called when deleting sessions via the netlink interface. | ||
864 | */ | ||
865 | static int pppol2tp_session_delete(struct l2tp_session *session) | ||
866 | { | ||
867 | struct pppol2tp_session *ps = l2tp_session_priv(session); | ||
868 | |||
869 | if (ps->sock == NULL) | ||
870 | l2tp_session_dec_refcount(session); | ||
871 | |||
872 | return 0; | ||
873 | } | ||
874 | |||
875 | #endif /* CONFIG_L2TP_V3 */ | ||
876 | |||
877 | /* getname() support. | ||
878 | */ | ||
879 | static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr, | ||
880 | int *usockaddr_len, int peer) | ||
881 | { | ||
882 | int len = 0; | ||
883 | int error = 0; | ||
884 | struct l2tp_session *session; | ||
885 | struct l2tp_tunnel *tunnel; | ||
886 | struct sock *sk = sock->sk; | ||
887 | struct inet_sock *inet; | ||
888 | struct pppol2tp_session *pls; | ||
889 | |||
890 | error = -ENOTCONN; | ||
891 | if (sk == NULL) | ||
892 | goto end; | ||
893 | if (sk->sk_state != PPPOX_CONNECTED) | ||
894 | goto end; | ||
895 | |||
896 | error = -EBADF; | ||
897 | session = pppol2tp_sock_to_session(sk); | ||
898 | if (session == NULL) | ||
899 | goto end; | ||
900 | |||
901 | pls = l2tp_session_priv(session); | ||
902 | tunnel = l2tp_sock_to_tunnel(pls->tunnel_sock); | ||
903 | if (tunnel == NULL) { | ||
904 | error = -EBADF; | ||
905 | goto end_put_sess; | ||
906 | } | ||
907 | |||
908 | inet = inet_sk(sk); | ||
909 | if (tunnel->version == 2) { | ||
910 | struct sockaddr_pppol2tp sp; | ||
911 | len = sizeof(sp); | ||
912 | memset(&sp, 0, len); | ||
913 | sp.sa_family = AF_PPPOX; | ||
914 | sp.sa_protocol = PX_PROTO_OL2TP; | ||
915 | sp.pppol2tp.fd = tunnel->fd; | ||
916 | sp.pppol2tp.pid = pls->owner; | ||
917 | sp.pppol2tp.s_tunnel = tunnel->tunnel_id; | ||
918 | sp.pppol2tp.d_tunnel = tunnel->peer_tunnel_id; | ||
919 | sp.pppol2tp.s_session = session->session_id; | ||
920 | sp.pppol2tp.d_session = session->peer_session_id; | ||
921 | sp.pppol2tp.addr.sin_family = AF_INET; | ||
922 | sp.pppol2tp.addr.sin_port = inet->inet_dport; | ||
923 | sp.pppol2tp.addr.sin_addr.s_addr = inet->inet_daddr; | ||
924 | memcpy(uaddr, &sp, len); | ||
925 | } else if (tunnel->version == 3) { | ||
926 | struct sockaddr_pppol2tpv3 sp; | ||
927 | len = sizeof(sp); | ||
928 | memset(&sp, 0, len); | ||
929 | sp.sa_family = AF_PPPOX; | ||
930 | sp.sa_protocol = PX_PROTO_OL2TP; | ||
931 | sp.pppol2tp.fd = tunnel->fd; | ||
932 | sp.pppol2tp.pid = pls->owner; | ||
933 | sp.pppol2tp.s_tunnel = tunnel->tunnel_id; | ||
934 | sp.pppol2tp.d_tunnel = tunnel->peer_tunnel_id; | ||
935 | sp.pppol2tp.s_session = session->session_id; | ||
936 | sp.pppol2tp.d_session = session->peer_session_id; | ||
937 | sp.pppol2tp.addr.sin_family = AF_INET; | ||
938 | sp.pppol2tp.addr.sin_port = inet->inet_dport; | ||
939 | sp.pppol2tp.addr.sin_addr.s_addr = inet->inet_daddr; | ||
940 | memcpy(uaddr, &sp, len); | ||
941 | } | ||
942 | |||
943 | *usockaddr_len = len; | ||
944 | |||
945 | sock_put(pls->tunnel_sock); | ||
946 | end_put_sess: | ||
947 | sock_put(sk); | ||
948 | error = 0; | ||
949 | |||
950 | end: | ||
951 | return error; | ||
952 | } | ||
953 | |||
954 | /**************************************************************************** | ||
955 | * ioctl() handlers. | ||
956 | * | ||
957 | * The PPPoX socket is created for L2TP sessions: tunnels have their own UDP | ||
958 | * sockets. However, in order to control kernel tunnel features, we allow | ||
959 | * userspace to create a special "tunnel" PPPoX socket which is used for | ||
960 | * control only. Tunnel PPPoX sockets have session_id == 0 and simply allow | ||
961 | * the user application to issue L2TP setsockopt(), getsockopt() and ioctl() | ||
962 | * calls. | ||
963 | ****************************************************************************/ | ||
964 | |||
965 | static void pppol2tp_copy_stats(struct pppol2tp_ioc_stats *dest, | ||
966 | struct l2tp_stats *stats) | ||
967 | { | ||
968 | dest->tx_packets = stats->tx_packets; | ||
969 | dest->tx_bytes = stats->tx_bytes; | ||
970 | dest->tx_errors = stats->tx_errors; | ||
971 | dest->rx_packets = stats->rx_packets; | ||
972 | dest->rx_bytes = stats->rx_bytes; | ||
973 | dest->rx_seq_discards = stats->rx_seq_discards; | ||
974 | dest->rx_oos_packets = stats->rx_oos_packets; | ||
975 | dest->rx_errors = stats->rx_errors; | ||
976 | } | ||
977 | |||
978 | /* Session ioctl helper. | ||
979 | */ | ||
980 | static int pppol2tp_session_ioctl(struct l2tp_session *session, | ||
981 | unsigned int cmd, unsigned long arg) | ||
982 | { | ||
983 | struct ifreq ifr; | ||
984 | int err = 0; | ||
985 | struct sock *sk; | ||
986 | int val = (int) arg; | ||
987 | struct pppol2tp_session *ps = l2tp_session_priv(session); | ||
988 | struct l2tp_tunnel *tunnel = session->tunnel; | ||
989 | struct pppol2tp_ioc_stats stats; | ||
990 | |||
991 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG, | ||
992 | "%s: pppol2tp_session_ioctl(cmd=%#x, arg=%#lx)\n", | ||
993 | session->name, cmd, arg); | ||
994 | |||
995 | sk = ps->sock; | ||
996 | sock_hold(sk); | ||
997 | |||
998 | switch (cmd) { | ||
999 | case SIOCGIFMTU: | ||
1000 | err = -ENXIO; | ||
1001 | if (!(sk->sk_state & PPPOX_CONNECTED)) | ||
1002 | break; | ||
1003 | |||
1004 | err = -EFAULT; | ||
1005 | if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq))) | ||
1006 | break; | ||
1007 | ifr.ifr_mtu = session->mtu; | ||
1008 | if (copy_to_user((void __user *) arg, &ifr, sizeof(struct ifreq))) | ||
1009 | break; | ||
1010 | |||
1011 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1012 | "%s: get mtu=%d\n", session->name, session->mtu); | ||
1013 | err = 0; | ||
1014 | break; | ||
1015 | |||
1016 | case SIOCSIFMTU: | ||
1017 | err = -ENXIO; | ||
1018 | if (!(sk->sk_state & PPPOX_CONNECTED)) | ||
1019 | break; | ||
1020 | |||
1021 | err = -EFAULT; | ||
1022 | if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq))) | ||
1023 | break; | ||
1024 | |||
1025 | session->mtu = ifr.ifr_mtu; | ||
1026 | |||
1027 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1028 | "%s: set mtu=%d\n", session->name, session->mtu); | ||
1029 | err = 0; | ||
1030 | break; | ||
1031 | |||
1032 | case PPPIOCGMRU: | ||
1033 | err = -ENXIO; | ||
1034 | if (!(sk->sk_state & PPPOX_CONNECTED)) | ||
1035 | break; | ||
1036 | |||
1037 | err = -EFAULT; | ||
1038 | if (put_user(session->mru, (int __user *) arg)) | ||
1039 | break; | ||
1040 | |||
1041 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1042 | "%s: get mru=%d\n", session->name, session->mru); | ||
1043 | err = 0; | ||
1044 | break; | ||
1045 | |||
1046 | case PPPIOCSMRU: | ||
1047 | err = -ENXIO; | ||
1048 | if (!(sk->sk_state & PPPOX_CONNECTED)) | ||
1049 | break; | ||
1050 | |||
1051 | err = -EFAULT; | ||
1052 | if (get_user(val, (int __user *) arg)) | ||
1053 | break; | ||
1054 | |||
1055 | session->mru = val; | ||
1056 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1057 | "%s: set mru=%d\n", session->name, session->mru); | ||
1058 | err = 0; | ||
1059 | break; | ||
1060 | |||
1061 | case PPPIOCGFLAGS: | ||
1062 | err = -EFAULT; | ||
1063 | if (put_user(ps->flags, (int __user *) arg)) | ||
1064 | break; | ||
1065 | |||
1066 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1067 | "%s: get flags=%d\n", session->name, ps->flags); | ||
1068 | err = 0; | ||
1069 | break; | ||
1070 | |||
1071 | case PPPIOCSFLAGS: | ||
1072 | err = -EFAULT; | ||
1073 | if (get_user(val, (int __user *) arg)) | ||
1074 | break; | ||
1075 | ps->flags = val; | ||
1076 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1077 | "%s: set flags=%d\n", session->name, ps->flags); | ||
1078 | err = 0; | ||
1079 | break; | ||
1080 | |||
1081 | case PPPIOCGL2TPSTATS: | ||
1082 | err = -ENXIO; | ||
1083 | if (!(sk->sk_state & PPPOX_CONNECTED)) | ||
1084 | break; | ||
1085 | |||
1086 | memset(&stats, 0, sizeof(stats)); | ||
1087 | stats.tunnel_id = tunnel->tunnel_id; | ||
1088 | stats.session_id = session->session_id; | ||
1089 | pppol2tp_copy_stats(&stats, &session->stats); | ||
1090 | if (copy_to_user((void __user *) arg, &stats, | ||
1091 | sizeof(stats))) | ||
1092 | break; | ||
1093 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1094 | "%s: get L2TP stats\n", session->name); | ||
1095 | err = 0; | ||
1096 | break; | ||
1097 | |||
1098 | default: | ||
1099 | err = -ENOSYS; | ||
1100 | break; | ||
1101 | } | ||
1102 | |||
1103 | sock_put(sk); | ||
1104 | |||
1105 | return err; | ||
1106 | } | ||
1107 | |||
1108 | /* Tunnel ioctl helper. | ||
1109 | * | ||
1110 | * Note the special handling for PPPIOCGL2TPSTATS below. If the ioctl data | ||
1111 | * specifies a session_id, the session ioctl handler is called. This allows an | ||
1112 | * application to retrieve session stats via a tunnel socket. | ||
1113 | */ | ||
1114 | static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel, | ||
1115 | unsigned int cmd, unsigned long arg) | ||
1116 | { | ||
1117 | int err = 0; | ||
1118 | struct sock *sk; | ||
1119 | struct pppol2tp_ioc_stats stats; | ||
1120 | |||
1121 | PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG, | ||
1122 | "%s: pppol2tp_tunnel_ioctl(cmd=%#x, arg=%#lx)\n", | ||
1123 | tunnel->name, cmd, arg); | ||
1124 | |||
1125 | sk = tunnel->sock; | ||
1126 | sock_hold(sk); | ||
1127 | |||
1128 | switch (cmd) { | ||
1129 | case PPPIOCGL2TPSTATS: | ||
1130 | err = -ENXIO; | ||
1131 | if (!(sk->sk_state & PPPOX_CONNECTED)) | ||
1132 | break; | ||
1133 | |||
1134 | if (copy_from_user(&stats, (void __user *) arg, | ||
1135 | sizeof(stats))) { | ||
1136 | err = -EFAULT; | ||
1137 | break; | ||
1138 | } | ||
1139 | if (stats.session_id != 0) { | ||
1140 | /* resend to session ioctl handler */ | ||
1141 | struct l2tp_session *session = | ||
1142 | l2tp_session_find(sock_net(sk), tunnel, stats.session_id); | ||
1143 | if (session != NULL) | ||
1144 | err = pppol2tp_session_ioctl(session, cmd, arg); | ||
1145 | else | ||
1146 | err = -EBADR; | ||
1147 | break; | ||
1148 | } | ||
1149 | #ifdef CONFIG_XFRM | ||
1150 | stats.using_ipsec = (sk->sk_policy[0] || sk->sk_policy[1]) ? 1 : 0; | ||
1151 | #endif | ||
1152 | pppol2tp_copy_stats(&stats, &tunnel->stats); | ||
1153 | if (copy_to_user((void __user *) arg, &stats, sizeof(stats))) { | ||
1154 | err = -EFAULT; | ||
1155 | break; | ||
1156 | } | ||
1157 | PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1158 | "%s: get L2TP stats\n", tunnel->name); | ||
1159 | err = 0; | ||
1160 | break; | ||
1161 | |||
1162 | default: | ||
1163 | err = -ENOSYS; | ||
1164 | break; | ||
1165 | } | ||
1166 | |||
1167 | sock_put(sk); | ||
1168 | |||
1169 | return err; | ||
1170 | } | ||
1171 | |||
1172 | /* Main ioctl() handler. | ||
1173 | * Dispatch to tunnel or session helpers depending on the socket. | ||
1174 | */ | ||
1175 | static int pppol2tp_ioctl(struct socket *sock, unsigned int cmd, | ||
1176 | unsigned long arg) | ||
1177 | { | ||
1178 | struct sock *sk = sock->sk; | ||
1179 | struct l2tp_session *session; | ||
1180 | struct l2tp_tunnel *tunnel; | ||
1181 | struct pppol2tp_session *ps; | ||
1182 | int err; | ||
1183 | |||
1184 | if (!sk) | ||
1185 | return 0; | ||
1186 | |||
1187 | err = -EBADF; | ||
1188 | if (sock_flag(sk, SOCK_DEAD) != 0) | ||
1189 | goto end; | ||
1190 | |||
1191 | err = -ENOTCONN; | ||
1192 | if ((sk->sk_user_data == NULL) || | ||
1193 | (!(sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)))) | ||
1194 | goto end; | ||
1195 | |||
1196 | /* Get session context from the socket */ | ||
1197 | err = -EBADF; | ||
1198 | session = pppol2tp_sock_to_session(sk); | ||
1199 | if (session == NULL) | ||
1200 | goto end; | ||
1201 | |||
1202 | /* Special case: if session's session_id is zero, treat ioctl as a | ||
1203 | * tunnel ioctl | ||
1204 | */ | ||
1205 | ps = l2tp_session_priv(session); | ||
1206 | if ((session->session_id == 0) && | ||
1207 | (session->peer_session_id == 0)) { | ||
1208 | err = -EBADF; | ||
1209 | tunnel = l2tp_sock_to_tunnel(ps->tunnel_sock); | ||
1210 | if (tunnel == NULL) | ||
1211 | goto end_put_sess; | ||
1212 | |||
1213 | err = pppol2tp_tunnel_ioctl(tunnel, cmd, arg); | ||
1214 | sock_put(ps->tunnel_sock); | ||
1215 | goto end_put_sess; | ||
1216 | } | ||
1217 | |||
1218 | err = pppol2tp_session_ioctl(session, cmd, arg); | ||
1219 | |||
1220 | end_put_sess: | ||
1221 | sock_put(sk); | ||
1222 | end: | ||
1223 | return err; | ||
1224 | } | ||
1225 | |||
1226 | /***************************************************************************** | ||
1227 | * setsockopt() / getsockopt() support. | ||
1228 | * | ||
1229 | * The PPPoX socket is created for L2TP sessions: tunnels have their own UDP | ||
1230 | * sockets. In order to control kernel tunnel features, we allow userspace to | ||
1231 | * create a special "tunnel" PPPoX socket which is used for control only. | ||
1232 | * Tunnel PPPoX sockets have session_id == 0 and simply allow the user | ||
1233 | * application to issue L2TP setsockopt(), getsockopt() and ioctl() calls. | ||
1234 | *****************************************************************************/ | ||
1235 | |||
1236 | /* Tunnel setsockopt() helper. | ||
1237 | */ | ||
1238 | static int pppol2tp_tunnel_setsockopt(struct sock *sk, | ||
1239 | struct l2tp_tunnel *tunnel, | ||
1240 | int optname, int val) | ||
1241 | { | ||
1242 | int err = 0; | ||
1243 | |||
1244 | switch (optname) { | ||
1245 | case PPPOL2TP_SO_DEBUG: | ||
1246 | tunnel->debug = val; | ||
1247 | PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1248 | "%s: set debug=%x\n", tunnel->name, tunnel->debug); | ||
1249 | break; | ||
1250 | |||
1251 | default: | ||
1252 | err = -ENOPROTOOPT; | ||
1253 | break; | ||
1254 | } | ||
1255 | |||
1256 | return err; | ||
1257 | } | ||
1258 | |||
1259 | /* Session setsockopt helper. | ||
1260 | */ | ||
1261 | static int pppol2tp_session_setsockopt(struct sock *sk, | ||
1262 | struct l2tp_session *session, | ||
1263 | int optname, int val) | ||
1264 | { | ||
1265 | int err = 0; | ||
1266 | struct pppol2tp_session *ps = l2tp_session_priv(session); | ||
1267 | |||
1268 | switch (optname) { | ||
1269 | case PPPOL2TP_SO_RECVSEQ: | ||
1270 | if ((val != 0) && (val != 1)) { | ||
1271 | err = -EINVAL; | ||
1272 | break; | ||
1273 | } | ||
1274 | session->recv_seq = val ? -1 : 0; | ||
1275 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1276 | "%s: set recv_seq=%d\n", session->name, session->recv_seq); | ||
1277 | break; | ||
1278 | |||
1279 | case PPPOL2TP_SO_SENDSEQ: | ||
1280 | if ((val != 0) && (val != 1)) { | ||
1281 | err = -EINVAL; | ||
1282 | break; | ||
1283 | } | ||
1284 | session->send_seq = val ? -1 : 0; | ||
1285 | { | ||
1286 | struct sock *ssk = ps->sock; | ||
1287 | struct pppox_sock *po = pppox_sk(ssk); | ||
1288 | po->chan.hdrlen = val ? PPPOL2TP_L2TP_HDR_SIZE_SEQ : | ||
1289 | PPPOL2TP_L2TP_HDR_SIZE_NOSEQ; | ||
1290 | } | ||
1291 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1292 | "%s: set send_seq=%d\n", session->name, session->send_seq); | ||
1293 | break; | ||
1294 | |||
1295 | case PPPOL2TP_SO_LNSMODE: | ||
1296 | if ((val != 0) && (val != 1)) { | ||
1297 | err = -EINVAL; | ||
1298 | break; | ||
1299 | } | ||
1300 | session->lns_mode = val ? -1 : 0; | ||
1301 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1302 | "%s: set lns_mode=%d\n", session->name, session->lns_mode); | ||
1303 | break; | ||
1304 | |||
1305 | case PPPOL2TP_SO_DEBUG: | ||
1306 | session->debug = val; | ||
1307 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1308 | "%s: set debug=%x\n", session->name, session->debug); | ||
1309 | break; | ||
1310 | |||
1311 | case PPPOL2TP_SO_REORDERTO: | ||
1312 | session->reorder_timeout = msecs_to_jiffies(val); | ||
1313 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1314 | "%s: set reorder_timeout=%d\n", session->name, session->reorder_timeout); | ||
1315 | break; | ||
1316 | |||
1317 | default: | ||
1318 | err = -ENOPROTOOPT; | ||
1319 | break; | ||
1320 | } | ||
1321 | |||
1322 | return err; | ||
1323 | } | ||
1324 | |||
1325 | /* Main setsockopt() entry point. | ||
1326 | * Does API checks, then calls either the tunnel or session setsockopt | ||
1327 | * handler, according to whether the PPPoL2TP socket is a for a regular | ||
1328 | * session or the special tunnel type. | ||
1329 | */ | ||
1330 | static int pppol2tp_setsockopt(struct socket *sock, int level, int optname, | ||
1331 | char __user *optval, unsigned int optlen) | ||
1332 | { | ||
1333 | struct sock *sk = sock->sk; | ||
1334 | struct l2tp_session *session; | ||
1335 | struct l2tp_tunnel *tunnel; | ||
1336 | struct pppol2tp_session *ps; | ||
1337 | int val; | ||
1338 | int err; | ||
1339 | |||
1340 | if (level != SOL_PPPOL2TP) | ||
1341 | return udp_prot.setsockopt(sk, level, optname, optval, optlen); | ||
1342 | |||
1343 | if (optlen < sizeof(int)) | ||
1344 | return -EINVAL; | ||
1345 | |||
1346 | if (get_user(val, (int __user *)optval)) | ||
1347 | return -EFAULT; | ||
1348 | |||
1349 | err = -ENOTCONN; | ||
1350 | if (sk->sk_user_data == NULL) | ||
1351 | goto end; | ||
1352 | |||
1353 | /* Get session context from the socket */ | ||
1354 | err = -EBADF; | ||
1355 | session = pppol2tp_sock_to_session(sk); | ||
1356 | if (session == NULL) | ||
1357 | goto end; | ||
1358 | |||
1359 | /* Special case: if session_id == 0x0000, treat as operation on tunnel | ||
1360 | */ | ||
1361 | ps = l2tp_session_priv(session); | ||
1362 | if ((session->session_id == 0) && | ||
1363 | (session->peer_session_id == 0)) { | ||
1364 | err = -EBADF; | ||
1365 | tunnel = l2tp_sock_to_tunnel(ps->tunnel_sock); | ||
1366 | if (tunnel == NULL) | ||
1367 | goto end_put_sess; | ||
1368 | |||
1369 | err = pppol2tp_tunnel_setsockopt(sk, tunnel, optname, val); | ||
1370 | sock_put(ps->tunnel_sock); | ||
1371 | } else | ||
1372 | err = pppol2tp_session_setsockopt(sk, session, optname, val); | ||
1373 | |||
1374 | err = 0; | ||
1375 | |||
1376 | end_put_sess: | ||
1377 | sock_put(sk); | ||
1378 | end: | ||
1379 | return err; | ||
1380 | } | ||
1381 | |||
1382 | /* Tunnel getsockopt helper. Called with sock locked. | ||
1383 | */ | ||
1384 | static int pppol2tp_tunnel_getsockopt(struct sock *sk, | ||
1385 | struct l2tp_tunnel *tunnel, | ||
1386 | int optname, int *val) | ||
1387 | { | ||
1388 | int err = 0; | ||
1389 | |||
1390 | switch (optname) { | ||
1391 | case PPPOL2TP_SO_DEBUG: | ||
1392 | *val = tunnel->debug; | ||
1393 | PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1394 | "%s: get debug=%x\n", tunnel->name, tunnel->debug); | ||
1395 | break; | ||
1396 | |||
1397 | default: | ||
1398 | err = -ENOPROTOOPT; | ||
1399 | break; | ||
1400 | } | ||
1401 | |||
1402 | return err; | ||
1403 | } | ||
1404 | |||
1405 | /* Session getsockopt helper. Called with sock locked. | ||
1406 | */ | ||
1407 | static int pppol2tp_session_getsockopt(struct sock *sk, | ||
1408 | struct l2tp_session *session, | ||
1409 | int optname, int *val) | ||
1410 | { | ||
1411 | int err = 0; | ||
1412 | |||
1413 | switch (optname) { | ||
1414 | case PPPOL2TP_SO_RECVSEQ: | ||
1415 | *val = session->recv_seq; | ||
1416 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1417 | "%s: get recv_seq=%d\n", session->name, *val); | ||
1418 | break; | ||
1419 | |||
1420 | case PPPOL2TP_SO_SENDSEQ: | ||
1421 | *val = session->send_seq; | ||
1422 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1423 | "%s: get send_seq=%d\n", session->name, *val); | ||
1424 | break; | ||
1425 | |||
1426 | case PPPOL2TP_SO_LNSMODE: | ||
1427 | *val = session->lns_mode; | ||
1428 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1429 | "%s: get lns_mode=%d\n", session->name, *val); | ||
1430 | break; | ||
1431 | |||
1432 | case PPPOL2TP_SO_DEBUG: | ||
1433 | *val = session->debug; | ||
1434 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1435 | "%s: get debug=%d\n", session->name, *val); | ||
1436 | break; | ||
1437 | |||
1438 | case PPPOL2TP_SO_REORDERTO: | ||
1439 | *val = (int) jiffies_to_msecs(session->reorder_timeout); | ||
1440 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1441 | "%s: get reorder_timeout=%d\n", session->name, *val); | ||
1442 | break; | ||
1443 | |||
1444 | default: | ||
1445 | err = -ENOPROTOOPT; | ||
1446 | } | ||
1447 | |||
1448 | return err; | ||
1449 | } | ||
1450 | |||
1451 | /* Main getsockopt() entry point. | ||
1452 | * Does API checks, then calls either the tunnel or session getsockopt | ||
1453 | * handler, according to whether the PPPoX socket is a for a regular session | ||
1454 | * or the special tunnel type. | ||
1455 | */ | ||
1456 | static int pppol2tp_getsockopt(struct socket *sock, int level, | ||
1457 | int optname, char __user *optval, int __user *optlen) | ||
1458 | { | ||
1459 | struct sock *sk = sock->sk; | ||
1460 | struct l2tp_session *session; | ||
1461 | struct l2tp_tunnel *tunnel; | ||
1462 | int val, len; | ||
1463 | int err; | ||
1464 | struct pppol2tp_session *ps; | ||
1465 | |||
1466 | if (level != SOL_PPPOL2TP) | ||
1467 | return udp_prot.getsockopt(sk, level, optname, optval, optlen); | ||
1468 | |||
1469 | if (get_user(len, (int __user *) optlen)) | ||
1470 | return -EFAULT; | ||
1471 | |||
1472 | len = min_t(unsigned int, len, sizeof(int)); | ||
1473 | |||
1474 | if (len < 0) | ||
1475 | return -EINVAL; | ||
1476 | |||
1477 | err = -ENOTCONN; | ||
1478 | if (sk->sk_user_data == NULL) | ||
1479 | goto end; | ||
1480 | |||
1481 | /* Get the session context */ | ||
1482 | err = -EBADF; | ||
1483 | session = pppol2tp_sock_to_session(sk); | ||
1484 | if (session == NULL) | ||
1485 | goto end; | ||
1486 | |||
1487 | /* Special case: if session_id == 0x0000, treat as operation on tunnel */ | ||
1488 | ps = l2tp_session_priv(session); | ||
1489 | if ((session->session_id == 0) && | ||
1490 | (session->peer_session_id == 0)) { | ||
1491 | err = -EBADF; | ||
1492 | tunnel = l2tp_sock_to_tunnel(ps->tunnel_sock); | ||
1493 | if (tunnel == NULL) | ||
1494 | goto end_put_sess; | ||
1495 | |||
1496 | err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val); | ||
1497 | sock_put(ps->tunnel_sock); | ||
1498 | } else | ||
1499 | err = pppol2tp_session_getsockopt(sk, session, optname, &val); | ||
1500 | |||
1501 | err = -EFAULT; | ||
1502 | if (put_user(len, (int __user *) optlen)) | ||
1503 | goto end_put_sess; | ||
1504 | |||
1505 | if (copy_to_user((void __user *) optval, &val, len)) | ||
1506 | goto end_put_sess; | ||
1507 | |||
1508 | err = 0; | ||
1509 | |||
1510 | end_put_sess: | ||
1511 | sock_put(sk); | ||
1512 | end: | ||
1513 | return err; | ||
1514 | } | ||
1515 | |||
1516 | /***************************************************************************** | ||
1517 | * /proc filesystem for debug | ||
1518 | * Since the original pppol2tp driver provided /proc/net/pppol2tp for | ||
1519 | * L2TPv2, we dump only L2TPv2 tunnels and sessions here. | ||
1520 | *****************************************************************************/ | ||
1521 | |||
1522 | static unsigned int pppol2tp_net_id; | ||
1523 | |||
1524 | #ifdef CONFIG_PROC_FS | ||
1525 | |||
1526 | struct pppol2tp_seq_data { | ||
1527 | struct seq_net_private p; | ||
1528 | int tunnel_idx; /* current tunnel */ | ||
1529 | int session_idx; /* index of session within current tunnel */ | ||
1530 | struct l2tp_tunnel *tunnel; | ||
1531 | struct l2tp_session *session; /* NULL means get next tunnel */ | ||
1532 | }; | ||
1533 | |||
1534 | static void pppol2tp_next_tunnel(struct net *net, struct pppol2tp_seq_data *pd) | ||
1535 | { | ||
1536 | for (;;) { | ||
1537 | pd->tunnel = l2tp_tunnel_find_nth(net, pd->tunnel_idx); | ||
1538 | pd->tunnel_idx++; | ||
1539 | |||
1540 | if (pd->tunnel == NULL) | ||
1541 | break; | ||
1542 | |||
1543 | /* Ignore L2TPv3 tunnels */ | ||
1544 | if (pd->tunnel->version < 3) | ||
1545 | break; | ||
1546 | } | ||
1547 | } | ||
1548 | |||
1549 | static void pppol2tp_next_session(struct net *net, struct pppol2tp_seq_data *pd) | ||
1550 | { | ||
1551 | pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx); | ||
1552 | pd->session_idx++; | ||
1553 | |||
1554 | if (pd->session == NULL) { | ||
1555 | pd->session_idx = 0; | ||
1556 | pppol2tp_next_tunnel(net, pd); | ||
1557 | } | ||
1558 | } | ||
1559 | |||
1560 | static void *pppol2tp_seq_start(struct seq_file *m, loff_t *offs) | ||
1561 | { | ||
1562 | struct pppol2tp_seq_data *pd = SEQ_START_TOKEN; | ||
1563 | loff_t pos = *offs; | ||
1564 | struct net *net; | ||
1565 | |||
1566 | if (!pos) | ||
1567 | goto out; | ||
1568 | |||
1569 | BUG_ON(m->private == NULL); | ||
1570 | pd = m->private; | ||
1571 | net = seq_file_net(m); | ||
1572 | |||
1573 | if (pd->tunnel == NULL) | ||
1574 | pppol2tp_next_tunnel(net, pd); | ||
1575 | else | ||
1576 | pppol2tp_next_session(net, pd); | ||
1577 | |||
1578 | /* NULL tunnel and session indicates end of list */ | ||
1579 | if ((pd->tunnel == NULL) && (pd->session == NULL)) | ||
1580 | pd = NULL; | ||
1581 | |||
1582 | out: | ||
1583 | return pd; | ||
1584 | } | ||
1585 | |||
1586 | static void *pppol2tp_seq_next(struct seq_file *m, void *v, loff_t *pos) | ||
1587 | { | ||
1588 | (*pos)++; | ||
1589 | return NULL; | ||
1590 | } | ||
1591 | |||
1592 | static void pppol2tp_seq_stop(struct seq_file *p, void *v) | ||
1593 | { | ||
1594 | /* nothing to do */ | ||
1595 | } | ||
1596 | |||
1597 | static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v) | ||
1598 | { | ||
1599 | struct l2tp_tunnel *tunnel = v; | ||
1600 | |||
1601 | seq_printf(m, "\nTUNNEL '%s', %c %d\n", | ||
1602 | tunnel->name, | ||
1603 | (tunnel == tunnel->sock->sk_user_data) ? 'Y' : 'N', | ||
1604 | atomic_read(&tunnel->ref_count) - 1); | ||
1605 | seq_printf(m, " %08x %llu/%llu/%llu %llu/%llu/%llu\n", | ||
1606 | tunnel->debug, | ||
1607 | (unsigned long long)tunnel->stats.tx_packets, | ||
1608 | (unsigned long long)tunnel->stats.tx_bytes, | ||
1609 | (unsigned long long)tunnel->stats.tx_errors, | ||
1610 | (unsigned long long)tunnel->stats.rx_packets, | ||
1611 | (unsigned long long)tunnel->stats.rx_bytes, | ||
1612 | (unsigned long long)tunnel->stats.rx_errors); | ||
1613 | } | ||
1614 | |||
1615 | static void pppol2tp_seq_session_show(struct seq_file *m, void *v) | ||
1616 | { | ||
1617 | struct l2tp_session *session = v; | ||
1618 | struct l2tp_tunnel *tunnel = session->tunnel; | ||
1619 | struct pppol2tp_session *ps = l2tp_session_priv(session); | ||
1620 | struct pppox_sock *po = pppox_sk(ps->sock); | ||
1621 | u32 ip = 0; | ||
1622 | u16 port = 0; | ||
1623 | |||
1624 | if (tunnel->sock) { | ||
1625 | struct inet_sock *inet = inet_sk(tunnel->sock); | ||
1626 | ip = ntohl(inet->inet_saddr); | ||
1627 | port = ntohs(inet->inet_sport); | ||
1628 | } | ||
1629 | |||
1630 | seq_printf(m, " SESSION '%s' %08X/%d %04X/%04X -> " | ||
1631 | "%04X/%04X %d %c\n", | ||
1632 | session->name, ip, port, | ||
1633 | tunnel->tunnel_id, | ||
1634 | session->session_id, | ||
1635 | tunnel->peer_tunnel_id, | ||
1636 | session->peer_session_id, | ||
1637 | ps->sock->sk_state, | ||
1638 | (session == ps->sock->sk_user_data) ? | ||
1639 | 'Y' : 'N'); | ||
1640 | seq_printf(m, " %d/%d/%c/%c/%s %08x %u\n", | ||
1641 | session->mtu, session->mru, | ||
1642 | session->recv_seq ? 'R' : '-', | ||
1643 | session->send_seq ? 'S' : '-', | ||
1644 | session->lns_mode ? "LNS" : "LAC", | ||
1645 | session->debug, | ||
1646 | jiffies_to_msecs(session->reorder_timeout)); | ||
1647 | seq_printf(m, " %hu/%hu %llu/%llu/%llu %llu/%llu/%llu\n", | ||
1648 | session->nr, session->ns, | ||
1649 | (unsigned long long)session->stats.tx_packets, | ||
1650 | (unsigned long long)session->stats.tx_bytes, | ||
1651 | (unsigned long long)session->stats.tx_errors, | ||
1652 | (unsigned long long)session->stats.rx_packets, | ||
1653 | (unsigned long long)session->stats.rx_bytes, | ||
1654 | (unsigned long long)session->stats.rx_errors); | ||
1655 | |||
1656 | if (po) | ||
1657 | seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan)); | ||
1658 | } | ||
1659 | |||
1660 | static int pppol2tp_seq_show(struct seq_file *m, void *v) | ||
1661 | { | ||
1662 | struct pppol2tp_seq_data *pd = v; | ||
1663 | |||
1664 | /* display header on line 1 */ | ||
1665 | if (v == SEQ_START_TOKEN) { | ||
1666 | seq_puts(m, "PPPoL2TP driver info, " PPPOL2TP_DRV_VERSION "\n"); | ||
1667 | seq_puts(m, "TUNNEL name, user-data-ok session-count\n"); | ||
1668 | seq_puts(m, " debug tx-pkts/bytes/errs rx-pkts/bytes/errs\n"); | ||
1669 | seq_puts(m, " SESSION name, addr/port src-tid/sid " | ||
1670 | "dest-tid/sid state user-data-ok\n"); | ||
1671 | seq_puts(m, " mtu/mru/rcvseq/sendseq/lns debug reorderto\n"); | ||
1672 | seq_puts(m, " nr/ns tx-pkts/bytes/errs rx-pkts/bytes/errs\n"); | ||
1673 | goto out; | ||
1674 | } | ||
1675 | |||
1676 | /* Show the tunnel or session context. | ||
1677 | */ | ||
1678 | if (pd->session == NULL) | ||
1679 | pppol2tp_seq_tunnel_show(m, pd->tunnel); | ||
1680 | else | ||
1681 | pppol2tp_seq_session_show(m, pd->session); | ||
1682 | |||
1683 | out: | ||
1684 | return 0; | ||
1685 | } | ||
1686 | |||
1687 | static const struct seq_operations pppol2tp_seq_ops = { | ||
1688 | .start = pppol2tp_seq_start, | ||
1689 | .next = pppol2tp_seq_next, | ||
1690 | .stop = pppol2tp_seq_stop, | ||
1691 | .show = pppol2tp_seq_show, | ||
1692 | }; | ||
1693 | |||
1694 | /* Called when our /proc file is opened. We allocate data for use when | ||
1695 | * iterating our tunnel / session contexts and store it in the private | ||
1696 | * data of the seq_file. | ||
1697 | */ | ||
1698 | static int pppol2tp_proc_open(struct inode *inode, struct file *file) | ||
1699 | { | ||
1700 | return seq_open_net(inode, file, &pppol2tp_seq_ops, | ||
1701 | sizeof(struct pppol2tp_seq_data)); | ||
1702 | } | ||
1703 | |||
1704 | static const struct file_operations pppol2tp_proc_fops = { | ||
1705 | .owner = THIS_MODULE, | ||
1706 | .open = pppol2tp_proc_open, | ||
1707 | .read = seq_read, | ||
1708 | .llseek = seq_lseek, | ||
1709 | .release = seq_release_net, | ||
1710 | }; | ||
1711 | |||
1712 | #endif /* CONFIG_PROC_FS */ | ||
1713 | |||
1714 | /***************************************************************************** | ||
1715 | * Network namespace | ||
1716 | *****************************************************************************/ | ||
1717 | |||
1718 | static __net_init int pppol2tp_init_net(struct net *net) | ||
1719 | { | ||
1720 | struct proc_dir_entry *pde; | ||
1721 | int err = 0; | ||
1722 | |||
1723 | pde = proc_net_fops_create(net, "pppol2tp", S_IRUGO, &pppol2tp_proc_fops); | ||
1724 | if (!pde) { | ||
1725 | err = -ENOMEM; | ||
1726 | goto out; | ||
1727 | } | ||
1728 | |||
1729 | out: | ||
1730 | return err; | ||
1731 | } | ||
1732 | |||
1733 | static __net_exit void pppol2tp_exit_net(struct net *net) | ||
1734 | { | ||
1735 | proc_net_remove(net, "pppol2tp"); | ||
1736 | } | ||
1737 | |||
1738 | static struct pernet_operations pppol2tp_net_ops = { | ||
1739 | .init = pppol2tp_init_net, | ||
1740 | .exit = pppol2tp_exit_net, | ||
1741 | .id = &pppol2tp_net_id, | ||
1742 | }; | ||
1743 | |||
1744 | /***************************************************************************** | ||
1745 | * Init and cleanup | ||
1746 | *****************************************************************************/ | ||
1747 | |||
1748 | static const struct proto_ops pppol2tp_ops = { | ||
1749 | .family = AF_PPPOX, | ||
1750 | .owner = THIS_MODULE, | ||
1751 | .release = pppol2tp_release, | ||
1752 | .bind = sock_no_bind, | ||
1753 | .connect = pppol2tp_connect, | ||
1754 | .socketpair = sock_no_socketpair, | ||
1755 | .accept = sock_no_accept, | ||
1756 | .getname = pppol2tp_getname, | ||
1757 | .poll = datagram_poll, | ||
1758 | .listen = sock_no_listen, | ||
1759 | .shutdown = sock_no_shutdown, | ||
1760 | .setsockopt = pppol2tp_setsockopt, | ||
1761 | .getsockopt = pppol2tp_getsockopt, | ||
1762 | .sendmsg = pppol2tp_sendmsg, | ||
1763 | .recvmsg = pppol2tp_recvmsg, | ||
1764 | .mmap = sock_no_mmap, | ||
1765 | .ioctl = pppox_ioctl, | ||
1766 | }; | ||
1767 | |||
1768 | static struct pppox_proto pppol2tp_proto = { | ||
1769 | .create = pppol2tp_create, | ||
1770 | .ioctl = pppol2tp_ioctl | ||
1771 | }; | ||
1772 | |||
1773 | #ifdef CONFIG_L2TP_V3 | ||
1774 | |||
1775 | static const struct l2tp_nl_cmd_ops pppol2tp_nl_cmd_ops = { | ||
1776 | .session_create = pppol2tp_session_create, | ||
1777 | .session_delete = pppol2tp_session_delete, | ||
1778 | }; | ||
1779 | |||
1780 | #endif /* CONFIG_L2TP_V3 */ | ||
1781 | |||
1782 | static int __init pppol2tp_init(void) | ||
1783 | { | ||
1784 | int err; | ||
1785 | |||
1786 | err = register_pernet_device(&pppol2tp_net_ops); | ||
1787 | if (err) | ||
1788 | goto out; | ||
1789 | |||
1790 | err = proto_register(&pppol2tp_sk_proto, 0); | ||
1791 | if (err) | ||
1792 | goto out_unregister_pppol2tp_pernet; | ||
1793 | |||
1794 | err = register_pppox_proto(PX_PROTO_OL2TP, &pppol2tp_proto); | ||
1795 | if (err) | ||
1796 | goto out_unregister_pppol2tp_proto; | ||
1797 | |||
1798 | #ifdef CONFIG_L2TP_V3 | ||
1799 | err = l2tp_nl_register_ops(L2TP_PWTYPE_PPP, &pppol2tp_nl_cmd_ops); | ||
1800 | if (err) | ||
1801 | goto out_unregister_pppox; | ||
1802 | #endif | ||
1803 | |||
1804 | printk(KERN_INFO "PPPoL2TP kernel driver, %s\n", | ||
1805 | PPPOL2TP_DRV_VERSION); | ||
1806 | |||
1807 | out: | ||
1808 | return err; | ||
1809 | |||
1810 | #ifdef CONFIG_L2TP_V3 | ||
1811 | out_unregister_pppox: | ||
1812 | unregister_pppox_proto(PX_PROTO_OL2TP); | ||
1813 | #endif | ||
1814 | out_unregister_pppol2tp_proto: | ||
1815 | proto_unregister(&pppol2tp_sk_proto); | ||
1816 | out_unregister_pppol2tp_pernet: | ||
1817 | unregister_pernet_device(&pppol2tp_net_ops); | ||
1818 | goto out; | ||
1819 | } | ||
1820 | |||
1821 | static void __exit pppol2tp_exit(void) | ||
1822 | { | ||
1823 | #ifdef CONFIG_L2TP_V3 | ||
1824 | l2tp_nl_unregister_ops(L2TP_PWTYPE_PPP); | ||
1825 | #endif | ||
1826 | unregister_pppox_proto(PX_PROTO_OL2TP); | ||
1827 | proto_unregister(&pppol2tp_sk_proto); | ||
1828 | unregister_pernet_device(&pppol2tp_net_ops); | ||
1829 | } | ||
1830 | |||
1831 | module_init(pppol2tp_init); | ||
1832 | module_exit(pppol2tp_exit); | ||
1833 | |||
1834 | MODULE_AUTHOR("James Chapman <jchapman@katalix.com>"); | ||
1835 | MODULE_DESCRIPTION("PPP over L2TP over UDP"); | ||
1836 | MODULE_LICENSE("GPL"); | ||
1837 | MODULE_VERSION(PPPOL2TP_DRV_VERSION); | ||
diff --git a/net/llc/llc_core.c b/net/llc/llc_core.c index 78167e81dfeb..2bb0ddff8c0f 100644 --- a/net/llc/llc_core.c +++ b/net/llc/llc_core.c | |||
@@ -144,12 +144,6 @@ static struct packet_type llc_tr_packet_type __read_mostly = { | |||
144 | 144 | ||
145 | static int __init llc_init(void) | 145 | static int __init llc_init(void) |
146 | { | 146 | { |
147 | struct net_device *dev; | ||
148 | |||
149 | dev = first_net_device(&init_net); | ||
150 | if (dev != NULL) | ||
151 | dev = next_net_device(dev); | ||
152 | |||
153 | dev_add_pack(&llc_packet_type); | 147 | dev_add_pack(&llc_packet_type); |
154 | dev_add_pack(&llc_tr_packet_type); | 148 | dev_add_pack(&llc_tr_packet_type); |
155 | return 0; | 149 | return 0; |
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig index a952b7f8c648..334c359da5e8 100644 --- a/net/mac80211/Kconfig +++ b/net/mac80211/Kconfig | |||
@@ -15,8 +15,12 @@ comment "CFG80211 needs to be enabled for MAC80211" | |||
15 | 15 | ||
16 | if MAC80211 != n | 16 | if MAC80211 != n |
17 | 17 | ||
18 | config MAC80211_HAS_RC | ||
19 | def_bool n | ||
20 | |||
18 | config MAC80211_RC_PID | 21 | config MAC80211_RC_PID |
19 | bool "PID controller based rate control algorithm" if EMBEDDED | 22 | bool "PID controller based rate control algorithm" if EMBEDDED |
23 | select MAC80211_HAS_RC | ||
20 | ---help--- | 24 | ---help--- |
21 | This option enables a TX rate control algorithm for | 25 | This option enables a TX rate control algorithm for |
22 | mac80211 that uses a PID controller to select the TX | 26 | mac80211 that uses a PID controller to select the TX |
@@ -24,12 +28,14 @@ config MAC80211_RC_PID | |||
24 | 28 | ||
25 | config MAC80211_RC_MINSTREL | 29 | config MAC80211_RC_MINSTREL |
26 | bool "Minstrel" if EMBEDDED | 30 | bool "Minstrel" if EMBEDDED |
31 | select MAC80211_HAS_RC | ||
27 | default y | 32 | default y |
28 | ---help--- | 33 | ---help--- |
29 | This option enables the 'minstrel' TX rate control algorithm | 34 | This option enables the 'minstrel' TX rate control algorithm |
30 | 35 | ||
31 | choice | 36 | choice |
32 | prompt "Default rate control algorithm" | 37 | prompt "Default rate control algorithm" |
38 | depends on MAC80211_HAS_RC | ||
33 | default MAC80211_RC_DEFAULT_MINSTREL | 39 | default MAC80211_RC_DEFAULT_MINSTREL |
34 | ---help--- | 40 | ---help--- |
35 | This option selects the default rate control algorithm | 41 | This option selects the default rate control algorithm |
@@ -62,6 +68,9 @@ config MAC80211_RC_DEFAULT | |||
62 | 68 | ||
63 | endif | 69 | endif |
64 | 70 | ||
71 | comment "Some wireless drivers require a rate control algorithm" | ||
72 | depends on MAC80211_HAS_RC=n | ||
73 | |||
65 | config MAC80211_MESH | 74 | config MAC80211_MESH |
66 | bool "Enable mac80211 mesh networking (pre-802.11s) support" | 75 | bool "Enable mac80211 mesh networking (pre-802.11s) support" |
67 | depends on MAC80211 && EXPERIMENTAL | 76 | depends on MAC80211 && EXPERIMENTAL |
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index edc872e22c9b..c41aaba839fa 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -1403,6 +1403,32 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, | |||
1403 | return 0; | 1403 | return 0; |
1404 | } | 1404 | } |
1405 | 1405 | ||
1406 | static int ieee80211_set_cqm_rssi_config(struct wiphy *wiphy, | ||
1407 | struct net_device *dev, | ||
1408 | s32 rssi_thold, u32 rssi_hyst) | ||
1409 | { | ||
1410 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
1411 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
1412 | struct ieee80211_vif *vif = &sdata->vif; | ||
1413 | struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; | ||
1414 | |||
1415 | if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI)) | ||
1416 | return -EOPNOTSUPP; | ||
1417 | |||
1418 | if (rssi_thold == bss_conf->cqm_rssi_thold && | ||
1419 | rssi_hyst == bss_conf->cqm_rssi_hyst) | ||
1420 | return 0; | ||
1421 | |||
1422 | bss_conf->cqm_rssi_thold = rssi_thold; | ||
1423 | bss_conf->cqm_rssi_hyst = rssi_hyst; | ||
1424 | |||
1425 | /* tell the driver upon association, unless already associated */ | ||
1426 | if (sdata->u.mgd.associated) | ||
1427 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_CQM); | ||
1428 | |||
1429 | return 0; | ||
1430 | } | ||
1431 | |||
1406 | static int ieee80211_set_bitrate_mask(struct wiphy *wiphy, | 1432 | static int ieee80211_set_bitrate_mask(struct wiphy *wiphy, |
1407 | struct net_device *dev, | 1433 | struct net_device *dev, |
1408 | const u8 *addr, | 1434 | const u8 *addr, |
@@ -1507,4 +1533,5 @@ struct cfg80211_ops mac80211_config_ops = { | |||
1507 | .remain_on_channel = ieee80211_remain_on_channel, | 1533 | .remain_on_channel = ieee80211_remain_on_channel, |
1508 | .cancel_remain_on_channel = ieee80211_cancel_remain_on_channel, | 1534 | .cancel_remain_on_channel = ieee80211_cancel_remain_on_channel, |
1509 | .action = ieee80211_action, | 1535 | .action = ieee80211_action, |
1536 | .set_cqm_rssi_config = ieee80211_set_cqm_rssi_config, | ||
1510 | }; | 1537 | }; |
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c index d92800bb2d2f..23e720034577 100644 --- a/net/mac80211/debugfs_sta.c +++ b/net/mac80211/debugfs_sta.c | |||
@@ -57,7 +57,6 @@ STA_FILE(tx_filtered, tx_filtered_count, LU); | |||
57 | STA_FILE(tx_retry_failed, tx_retry_failed, LU); | 57 | STA_FILE(tx_retry_failed, tx_retry_failed, LU); |
58 | STA_FILE(tx_retry_count, tx_retry_count, LU); | 58 | STA_FILE(tx_retry_count, tx_retry_count, LU); |
59 | STA_FILE(last_signal, last_signal, D); | 59 | STA_FILE(last_signal, last_signal, D); |
60 | STA_FILE(last_noise, last_noise, D); | ||
61 | STA_FILE(wep_weak_iv_count, wep_weak_iv_count, LU); | 60 | STA_FILE(wep_weak_iv_count, wep_weak_iv_count, LU); |
62 | 61 | ||
63 | static ssize_t sta_flags_read(struct file *file, char __user *userbuf, | 62 | static ssize_t sta_flags_read(struct file *file, char __user *userbuf, |
@@ -289,7 +288,6 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta) | |||
289 | DEBUGFS_ADD(tx_retry_failed); | 288 | DEBUGFS_ADD(tx_retry_failed); |
290 | DEBUGFS_ADD(tx_retry_count); | 289 | DEBUGFS_ADD(tx_retry_count); |
291 | DEBUGFS_ADD(last_signal); | 290 | DEBUGFS_ADD(last_signal); |
292 | DEBUGFS_ADD(last_noise); | ||
293 | DEBUGFS_ADD(wep_weak_iv_count); | 291 | DEBUGFS_ADD(wep_weak_iv_count); |
294 | DEBUGFS_ADD(ht_capa); | 292 | DEBUGFS_ADD(ht_capa); |
295 | } | 293 | } |
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index c3d844093a2f..9179196da264 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h | |||
@@ -84,16 +84,14 @@ static inline void drv_bss_info_changed(struct ieee80211_local *local, | |||
84 | } | 84 | } |
85 | 85 | ||
86 | static inline u64 drv_prepare_multicast(struct ieee80211_local *local, | 86 | static inline u64 drv_prepare_multicast(struct ieee80211_local *local, |
87 | int mc_count, | 87 | struct netdev_hw_addr_list *mc_list) |
88 | struct dev_addr_list *mc_list) | ||
89 | { | 88 | { |
90 | u64 ret = 0; | 89 | u64 ret = 0; |
91 | 90 | ||
92 | if (local->ops->prepare_multicast) | 91 | if (local->ops->prepare_multicast) |
93 | ret = local->ops->prepare_multicast(&local->hw, mc_count, | 92 | ret = local->ops->prepare_multicast(&local->hw, mc_list); |
94 | mc_list); | ||
95 | 93 | ||
96 | trace_drv_prepare_multicast(local, mc_count, ret); | 94 | trace_drv_prepare_multicast(local, mc_list->count, ret); |
97 | 95 | ||
98 | return ret; | 96 | return ret; |
99 | } | 97 | } |
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index e2976da4e0d9..e6f3b0c7a71f 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c | |||
@@ -265,17 +265,16 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, | |||
265 | sta->sta.supp_rates[band] = supp_rates | | 265 | sta->sta.supp_rates[band] = supp_rates | |
266 | ieee80211_mandatory_rates(local, band); | 266 | ieee80211_mandatory_rates(local, band); |
267 | 267 | ||
268 | if (sta->sta.supp_rates[band] != prev_rates) { | ||
268 | #ifdef CONFIG_MAC80211_IBSS_DEBUG | 269 | #ifdef CONFIG_MAC80211_IBSS_DEBUG |
269 | if (sta->sta.supp_rates[band] != prev_rates) | ||
270 | printk(KERN_DEBUG "%s: updated supp_rates set " | 270 | printk(KERN_DEBUG "%s: updated supp_rates set " |
271 | "for %pM based on beacon info (0x%llx | " | 271 | "for %pM based on beacon/probe_response " |
272 | "0x%llx -> 0x%llx)\n", | 272 | "(0x%x -> 0x%x)\n", |
273 | sdata->name, | 273 | sdata->name, sta->sta.addr, |
274 | sta->sta.addr, | 274 | prev_rates, sta->sta.supp_rates[band]); |
275 | (unsigned long long) prev_rates, | ||
276 | (unsigned long long) supp_rates, | ||
277 | (unsigned long long) sta->sta.supp_rates[band]); | ||
278 | #endif | 275 | #endif |
276 | rate_control_rate_init(sta); | ||
277 | } | ||
279 | rcu_read_unlock(); | 278 | rcu_read_unlock(); |
280 | } else { | 279 | } else { |
281 | rcu_read_unlock(); | 280 | rcu_read_unlock(); |
@@ -371,6 +370,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, | |||
371 | sdata->name, mgmt->bssid); | 370 | sdata->name, mgmt->bssid); |
372 | #endif | 371 | #endif |
373 | ieee80211_sta_join_ibss(sdata, bss); | 372 | ieee80211_sta_join_ibss(sdata, bss); |
373 | supp_rates = ieee80211_sta_get_rates(local, elems, band); | ||
374 | ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, | 374 | ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, |
375 | supp_rates, GFP_KERNEL); | 375 | supp_rates, GFP_KERNEL); |
376 | } | 376 | } |
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 241533e1bc03..7fdacf9408b1 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
@@ -327,7 +327,7 @@ struct ieee80211_if_managed { | |||
327 | struct work_struct work; | 327 | struct work_struct work; |
328 | struct work_struct monitor_work; | 328 | struct work_struct monitor_work; |
329 | struct work_struct chswitch_work; | 329 | struct work_struct chswitch_work; |
330 | struct work_struct beacon_loss_work; | 330 | struct work_struct beacon_connection_loss_work; |
331 | 331 | ||
332 | unsigned long probe_timeout; | 332 | unsigned long probe_timeout; |
333 | int probe_send_count; | 333 | int probe_send_count; |
@@ -646,8 +646,7 @@ struct ieee80211_local { | |||
646 | struct work_struct recalc_smps; | 646 | struct work_struct recalc_smps; |
647 | 647 | ||
648 | /* aggregated multicast list */ | 648 | /* aggregated multicast list */ |
649 | struct dev_addr_list *mc_list; | 649 | struct netdev_hw_addr_list mc_list; |
650 | int mc_count; | ||
651 | 650 | ||
652 | bool tim_in_locked_section; /* see ieee80211_beacon_get() */ | 651 | bool tim_in_locked_section; /* see ieee80211_beacon_get() */ |
653 | 652 | ||
@@ -745,6 +744,7 @@ struct ieee80211_local { | |||
745 | int scan_channel_idx; | 744 | int scan_channel_idx; |
746 | int scan_ies_len; | 745 | int scan_ies_len; |
747 | 746 | ||
747 | unsigned long leave_oper_channel_time; | ||
748 | enum mac80211_scan_state next_scan_state; | 748 | enum mac80211_scan_state next_scan_state; |
749 | struct delayed_work scan_work; | 749 | struct delayed_work scan_work; |
750 | struct ieee80211_sub_if_data *scan_sdata; | 750 | struct ieee80211_sub_if_data *scan_sdata; |
@@ -1155,7 +1155,7 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local, | |||
1155 | int powersave); | 1155 | int powersave); |
1156 | void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, | 1156 | void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, |
1157 | struct ieee80211_hdr *hdr); | 1157 | struct ieee80211_hdr *hdr); |
1158 | void ieee80211_beacon_loss_work(struct work_struct *work); | 1158 | void ieee80211_beacon_connection_loss_work(struct work_struct *work); |
1159 | 1159 | ||
1160 | void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw, | 1160 | void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw, |
1161 | enum queue_stop_reason reason); | 1161 | enum queue_stop_reason reason); |
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index e08fa8eda1b3..50deb017fd6e 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
@@ -413,8 +413,7 @@ static int ieee80211_stop(struct net_device *dev) | |||
413 | 413 | ||
414 | netif_addr_lock_bh(dev); | 414 | netif_addr_lock_bh(dev); |
415 | spin_lock_bh(&local->filter_lock); | 415 | spin_lock_bh(&local->filter_lock); |
416 | __dev_addr_unsync(&local->mc_list, &local->mc_count, | 416 | __hw_addr_unsync(&local->mc_list, &dev->mc, dev->addr_len); |
417 | &dev->mc_list, &dev->mc_count); | ||
418 | spin_unlock_bh(&local->filter_lock); | 417 | spin_unlock_bh(&local->filter_lock); |
419 | netif_addr_unlock_bh(dev); | 418 | netif_addr_unlock_bh(dev); |
420 | 419 | ||
@@ -487,7 +486,7 @@ static int ieee80211_stop(struct net_device *dev) | |||
487 | cancel_work_sync(&sdata->u.mgd.work); | 486 | cancel_work_sync(&sdata->u.mgd.work); |
488 | cancel_work_sync(&sdata->u.mgd.chswitch_work); | 487 | cancel_work_sync(&sdata->u.mgd.chswitch_work); |
489 | cancel_work_sync(&sdata->u.mgd.monitor_work); | 488 | cancel_work_sync(&sdata->u.mgd.monitor_work); |
490 | cancel_work_sync(&sdata->u.mgd.beacon_loss_work); | 489 | cancel_work_sync(&sdata->u.mgd.beacon_connection_loss_work); |
491 | 490 | ||
492 | /* | 491 | /* |
493 | * When we get here, the interface is marked down. | 492 | * When we get here, the interface is marked down. |
@@ -597,8 +596,7 @@ static void ieee80211_set_multicast_list(struct net_device *dev) | |||
597 | sdata->flags ^= IEEE80211_SDATA_PROMISC; | 596 | sdata->flags ^= IEEE80211_SDATA_PROMISC; |
598 | } | 597 | } |
599 | spin_lock_bh(&local->filter_lock); | 598 | spin_lock_bh(&local->filter_lock); |
600 | __dev_addr_sync(&local->mc_list, &local->mc_count, | 599 | __hw_addr_sync(&local->mc_list, &dev->mc, dev->addr_len); |
601 | &dev->mc_list, &dev->mc_count); | ||
602 | spin_unlock_bh(&local->filter_lock); | 600 | spin_unlock_bh(&local->filter_lock); |
603 | ieee80211_queue_work(&local->hw, &local->reconfig_filter); | 601 | ieee80211_queue_work(&local->hw, &local->reconfig_filter); |
604 | } | 602 | } |
@@ -816,6 +814,118 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata, | |||
816 | return 0; | 814 | return 0; |
817 | } | 815 | } |
818 | 816 | ||
817 | static void ieee80211_assign_perm_addr(struct ieee80211_local *local, | ||
818 | struct net_device *dev, | ||
819 | enum nl80211_iftype type) | ||
820 | { | ||
821 | struct ieee80211_sub_if_data *sdata; | ||
822 | u64 mask, start, addr, val, inc; | ||
823 | u8 *m; | ||
824 | u8 tmp_addr[ETH_ALEN]; | ||
825 | int i; | ||
826 | |||
827 | /* default ... something at least */ | ||
828 | memcpy(dev->perm_addr, local->hw.wiphy->perm_addr, ETH_ALEN); | ||
829 | |||
830 | if (is_zero_ether_addr(local->hw.wiphy->addr_mask) && | ||
831 | local->hw.wiphy->n_addresses <= 1) | ||
832 | return; | ||
833 | |||
834 | |||
835 | mutex_lock(&local->iflist_mtx); | ||
836 | |||
837 | switch (type) { | ||
838 | case NL80211_IFTYPE_MONITOR: | ||
839 | /* doesn't matter */ | ||
840 | break; | ||
841 | case NL80211_IFTYPE_WDS: | ||
842 | case NL80211_IFTYPE_AP_VLAN: | ||
843 | /* match up with an AP interface */ | ||
844 | list_for_each_entry(sdata, &local->interfaces, list) { | ||
845 | if (sdata->vif.type != NL80211_IFTYPE_AP) | ||
846 | continue; | ||
847 | memcpy(dev->perm_addr, sdata->vif.addr, ETH_ALEN); | ||
848 | break; | ||
849 | } | ||
850 | /* keep default if no AP interface present */ | ||
851 | break; | ||
852 | default: | ||
853 | /* assign a new address if possible -- try n_addresses first */ | ||
854 | for (i = 0; i < local->hw.wiphy->n_addresses; i++) { | ||
855 | bool used = false; | ||
856 | |||
857 | list_for_each_entry(sdata, &local->interfaces, list) { | ||
858 | if (memcmp(local->hw.wiphy->addresses[i].addr, | ||
859 | sdata->vif.addr, ETH_ALEN) == 0) { | ||
860 | used = true; | ||
861 | break; | ||
862 | } | ||
863 | } | ||
864 | |||
865 | if (!used) { | ||
866 | memcpy(dev->perm_addr, | ||
867 | local->hw.wiphy->addresses[i].addr, | ||
868 | ETH_ALEN); | ||
869 | break; | ||
870 | } | ||
871 | } | ||
872 | |||
873 | /* try mask if available */ | ||
874 | if (is_zero_ether_addr(local->hw.wiphy->addr_mask)) | ||
875 | break; | ||
876 | |||
877 | m = local->hw.wiphy->addr_mask; | ||
878 | mask = ((u64)m[0] << 5*8) | ((u64)m[1] << 4*8) | | ||
879 | ((u64)m[2] << 3*8) | ((u64)m[3] << 2*8) | | ||
880 | ((u64)m[4] << 1*8) | ((u64)m[5] << 0*8); | ||
881 | |||
882 | if (__ffs64(mask) + hweight64(mask) != fls64(mask)) { | ||
883 | /* not a contiguous mask ... not handled now! */ | ||
884 | printk(KERN_DEBUG "not contiguous\n"); | ||
885 | break; | ||
886 | } | ||
887 | |||
888 | m = local->hw.wiphy->perm_addr; | ||
889 | start = ((u64)m[0] << 5*8) | ((u64)m[1] << 4*8) | | ||
890 | ((u64)m[2] << 3*8) | ((u64)m[3] << 2*8) | | ||
891 | ((u64)m[4] << 1*8) | ((u64)m[5] << 0*8); | ||
892 | |||
893 | inc = 1ULL<<__ffs64(mask); | ||
894 | val = (start & mask); | ||
895 | addr = (start & ~mask) | (val & mask); | ||
896 | do { | ||
897 | bool used = false; | ||
898 | |||
899 | tmp_addr[5] = addr >> 0*8; | ||
900 | tmp_addr[4] = addr >> 1*8; | ||
901 | tmp_addr[3] = addr >> 2*8; | ||
902 | tmp_addr[2] = addr >> 3*8; | ||
903 | tmp_addr[1] = addr >> 4*8; | ||
904 | tmp_addr[0] = addr >> 5*8; | ||
905 | |||
906 | val += inc; | ||
907 | |||
908 | list_for_each_entry(sdata, &local->interfaces, list) { | ||
909 | if (memcmp(tmp_addr, sdata->vif.addr, | ||
910 | ETH_ALEN) == 0) { | ||
911 | used = true; | ||
912 | break; | ||
913 | } | ||
914 | } | ||
915 | |||
916 | if (!used) { | ||
917 | memcpy(dev->perm_addr, tmp_addr, ETH_ALEN); | ||
918 | break; | ||
919 | } | ||
920 | addr = (start & ~mask) | (val & mask); | ||
921 | } while (addr != start); | ||
922 | |||
923 | break; | ||
924 | } | ||
925 | |||
926 | mutex_unlock(&local->iflist_mtx); | ||
927 | } | ||
928 | |||
819 | int ieee80211_if_add(struct ieee80211_local *local, const char *name, | 929 | int ieee80211_if_add(struct ieee80211_local *local, const char *name, |
820 | struct net_device **new_dev, enum nl80211_iftype type, | 930 | struct net_device **new_dev, enum nl80211_iftype type, |
821 | struct vif_params *params) | 931 | struct vif_params *params) |
@@ -845,8 +955,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name, | |||
845 | if (ret < 0) | 955 | if (ret < 0) |
846 | goto fail; | 956 | goto fail; |
847 | 957 | ||
848 | memcpy(ndev->dev_addr, local->hw.wiphy->perm_addr, ETH_ALEN); | 958 | ieee80211_assign_perm_addr(local, ndev, type); |
849 | memcpy(ndev->perm_addr, ndev->dev_addr, ETH_ALEN); | 959 | memcpy(ndev->dev_addr, ndev->perm_addr, ETH_ALEN); |
850 | SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy)); | 960 | SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy)); |
851 | 961 | ||
852 | /* don't use IEEE80211_DEV_TO_SUB_IF because it checks too much */ | 962 | /* don't use IEEE80211_DEV_TO_SUB_IF because it checks too much */ |
diff --git a/net/mac80211/main.c b/net/mac80211/main.c index b887e484ae04..50c1b1ada884 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c | |||
@@ -71,7 +71,7 @@ void ieee80211_configure_filter(struct ieee80211_local *local) | |||
71 | spin_lock_bh(&local->filter_lock); | 71 | spin_lock_bh(&local->filter_lock); |
72 | changed_flags = local->filter_flags ^ new_flags; | 72 | changed_flags = local->filter_flags ^ new_flags; |
73 | 73 | ||
74 | mc = drv_prepare_multicast(local, local->mc_count, local->mc_list); | 74 | mc = drv_prepare_multicast(local, &local->mc_list); |
75 | spin_unlock_bh(&local->filter_lock); | 75 | spin_unlock_bh(&local->filter_lock); |
76 | 76 | ||
77 | /* be a bit nasty */ | 77 | /* be a bit nasty */ |
@@ -388,6 +388,9 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, | |||
388 | local->uapsd_max_sp_len = IEEE80211_DEFAULT_MAX_SP_LEN; | 388 | local->uapsd_max_sp_len = IEEE80211_DEFAULT_MAX_SP_LEN; |
389 | 389 | ||
390 | INIT_LIST_HEAD(&local->interfaces); | 390 | INIT_LIST_HEAD(&local->interfaces); |
391 | |||
392 | __hw_addr_init(&local->mc_list); | ||
393 | |||
391 | mutex_init(&local->iflist_mtx); | 394 | mutex_init(&local->iflist_mtx); |
392 | mutex_init(&local->scan_mtx); | 395 | mutex_init(&local->scan_mtx); |
393 | 396 | ||
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index c8cd169fc10e..71ff42a0465b 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -754,6 +754,11 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, | |||
754 | /* And the BSSID changed - we're associated now */ | 754 | /* And the BSSID changed - we're associated now */ |
755 | bss_info_changed |= BSS_CHANGED_BSSID; | 755 | bss_info_changed |= BSS_CHANGED_BSSID; |
756 | 756 | ||
757 | /* Tell the driver to monitor connection quality (if supported) */ | ||
758 | if ((local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI) && | ||
759 | sdata->vif.bss_conf.cqm_rssi_thold) | ||
760 | bss_info_changed |= BSS_CHANGED_CQM; | ||
761 | |||
757 | ieee80211_bss_info_change_notify(sdata, bss_info_changed); | 762 | ieee80211_bss_info_change_notify(sdata, bss_info_changed); |
758 | 763 | ||
759 | mutex_lock(&local->iflist_mtx); | 764 | mutex_lock(&local->iflist_mtx); |
@@ -855,6 +860,9 @@ void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, | |||
855 | if (is_multicast_ether_addr(hdr->addr1)) | 860 | if (is_multicast_ether_addr(hdr->addr1)) |
856 | return; | 861 | return; |
857 | 862 | ||
863 | if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR) | ||
864 | return; | ||
865 | |||
858 | mod_timer(&sdata->u.mgd.conn_mon_timer, | 866 | mod_timer(&sdata->u.mgd.conn_mon_timer, |
859 | round_jiffies_up(jiffies + IEEE80211_CONNECTION_IDLE_TIME)); | 867 | round_jiffies_up(jiffies + IEEE80211_CONNECTION_IDLE_TIME)); |
860 | } | 868 | } |
@@ -932,23 +940,68 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata, | |||
932 | mutex_unlock(&ifmgd->mtx); | 940 | mutex_unlock(&ifmgd->mtx); |
933 | } | 941 | } |
934 | 942 | ||
935 | void ieee80211_beacon_loss_work(struct work_struct *work) | 943 | static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata) |
944 | { | ||
945 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | ||
946 | struct ieee80211_local *local = sdata->local; | ||
947 | u8 bssid[ETH_ALEN]; | ||
948 | |||
949 | mutex_lock(&ifmgd->mtx); | ||
950 | if (!ifmgd->associated) { | ||
951 | mutex_unlock(&ifmgd->mtx); | ||
952 | return; | ||
953 | } | ||
954 | |||
955 | memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN); | ||
956 | |||
957 | printk(KERN_DEBUG "Connection to AP %pM lost.\n", bssid); | ||
958 | |||
959 | ieee80211_set_disassoc(sdata); | ||
960 | ieee80211_recalc_idle(local); | ||
961 | mutex_unlock(&ifmgd->mtx); | ||
962 | /* | ||
963 | * must be outside lock due to cfg80211, | ||
964 | * but that's not a problem. | ||
965 | */ | ||
966 | ieee80211_send_deauth_disassoc(sdata, bssid, | ||
967 | IEEE80211_STYPE_DEAUTH, | ||
968 | WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, | ||
969 | NULL); | ||
970 | } | ||
971 | |||
972 | void ieee80211_beacon_connection_loss_work(struct work_struct *work) | ||
936 | { | 973 | { |
937 | struct ieee80211_sub_if_data *sdata = | 974 | struct ieee80211_sub_if_data *sdata = |
938 | container_of(work, struct ieee80211_sub_if_data, | 975 | container_of(work, struct ieee80211_sub_if_data, |
939 | u.mgd.beacon_loss_work); | 976 | u.mgd.beacon_connection_loss_work); |
940 | 977 | ||
941 | ieee80211_mgd_probe_ap(sdata, true); | 978 | if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR) |
979 | __ieee80211_connection_loss(sdata); | ||
980 | else | ||
981 | ieee80211_mgd_probe_ap(sdata, true); | ||
942 | } | 982 | } |
943 | 983 | ||
944 | void ieee80211_beacon_loss(struct ieee80211_vif *vif) | 984 | void ieee80211_beacon_loss(struct ieee80211_vif *vif) |
945 | { | 985 | { |
946 | struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); | 986 | struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); |
987 | struct ieee80211_hw *hw = &sdata->local->hw; | ||
947 | 988 | ||
948 | ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.beacon_loss_work); | 989 | WARN_ON(hw->flags & IEEE80211_HW_CONNECTION_MONITOR); |
990 | ieee80211_queue_work(hw, &sdata->u.mgd.beacon_connection_loss_work); | ||
949 | } | 991 | } |
950 | EXPORT_SYMBOL(ieee80211_beacon_loss); | 992 | EXPORT_SYMBOL(ieee80211_beacon_loss); |
951 | 993 | ||
994 | void ieee80211_connection_loss(struct ieee80211_vif *vif) | ||
995 | { | ||
996 | struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); | ||
997 | struct ieee80211_hw *hw = &sdata->local->hw; | ||
998 | |||
999 | WARN_ON(!(hw->flags & IEEE80211_HW_CONNECTION_MONITOR)); | ||
1000 | ieee80211_queue_work(hw, &sdata->u.mgd.beacon_connection_loss_work); | ||
1001 | } | ||
1002 | EXPORT_SYMBOL(ieee80211_connection_loss); | ||
1003 | |||
1004 | |||
952 | static enum rx_mgmt_action __must_check | 1005 | static enum rx_mgmt_action __must_check |
953 | ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata, | 1006 | ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata, |
954 | struct ieee80211_mgmt *mgmt, size_t len) | 1007 | struct ieee80211_mgmt *mgmt, size_t len) |
@@ -1638,7 +1691,8 @@ static void ieee80211_sta_bcn_mon_timer(unsigned long data) | |||
1638 | if (local->quiescing) | 1691 | if (local->quiescing) |
1639 | return; | 1692 | return; |
1640 | 1693 | ||
1641 | ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.beacon_loss_work); | 1694 | ieee80211_queue_work(&sdata->local->hw, |
1695 | &sdata->u.mgd.beacon_connection_loss_work); | ||
1642 | } | 1696 | } |
1643 | 1697 | ||
1644 | static void ieee80211_sta_conn_mon_timer(unsigned long data) | 1698 | static void ieee80211_sta_conn_mon_timer(unsigned long data) |
@@ -1690,7 +1744,7 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata) | |||
1690 | */ | 1744 | */ |
1691 | 1745 | ||
1692 | cancel_work_sync(&ifmgd->work); | 1746 | cancel_work_sync(&ifmgd->work); |
1693 | cancel_work_sync(&ifmgd->beacon_loss_work); | 1747 | cancel_work_sync(&ifmgd->beacon_connection_loss_work); |
1694 | if (del_timer_sync(&ifmgd->timer)) | 1748 | if (del_timer_sync(&ifmgd->timer)) |
1695 | set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running); | 1749 | set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running); |
1696 | 1750 | ||
@@ -1724,7 +1778,8 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata) | |||
1724 | INIT_WORK(&ifmgd->work, ieee80211_sta_work); | 1778 | INIT_WORK(&ifmgd->work, ieee80211_sta_work); |
1725 | INIT_WORK(&ifmgd->monitor_work, ieee80211_sta_monitor_work); | 1779 | INIT_WORK(&ifmgd->monitor_work, ieee80211_sta_monitor_work); |
1726 | INIT_WORK(&ifmgd->chswitch_work, ieee80211_chswitch_work); | 1780 | INIT_WORK(&ifmgd->chswitch_work, ieee80211_chswitch_work); |
1727 | INIT_WORK(&ifmgd->beacon_loss_work, ieee80211_beacon_loss_work); | 1781 | INIT_WORK(&ifmgd->beacon_connection_loss_work, |
1782 | ieee80211_beacon_connection_loss_work); | ||
1728 | setup_timer(&ifmgd->timer, ieee80211_sta_timer, | 1783 | setup_timer(&ifmgd->timer, ieee80211_sta_timer, |
1729 | (unsigned long) sdata); | 1784 | (unsigned long) sdata); |
1730 | setup_timer(&ifmgd->bcn_mon_timer, ieee80211_sta_bcn_mon_timer, | 1785 | setup_timer(&ifmgd->bcn_mon_timer, ieee80211_sta_bcn_mon_timer, |
@@ -2136,3 +2191,13 @@ int ieee80211_mgd_action(struct ieee80211_sub_if_data *sdata, | |||
2136 | *cookie = (unsigned long) skb; | 2191 | *cookie = (unsigned long) skb; |
2137 | return 0; | 2192 | return 0; |
2138 | } | 2193 | } |
2194 | |||
2195 | void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif, | ||
2196 | enum nl80211_cqm_rssi_threshold_event rssi_event, | ||
2197 | gfp_t gfp) | ||
2198 | { | ||
2199 | struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); | ||
2200 | |||
2201 | cfg80211_cqm_rssi_notify(sdata->dev, rssi_event, gfp); | ||
2202 | } | ||
2203 | EXPORT_SYMBOL(ieee80211_cqm_rssi_notify); | ||
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c index 818abfae9007..f65ce6dcc8e2 100644 --- a/net/mac80211/rc80211_minstrel.c +++ b/net/mac80211/rc80211_minstrel.c | |||
@@ -542,7 +542,7 @@ minstrel_free(void *priv) | |||
542 | kfree(priv); | 542 | kfree(priv); |
543 | } | 543 | } |
544 | 544 | ||
545 | static struct rate_control_ops mac80211_minstrel = { | 545 | struct rate_control_ops mac80211_minstrel = { |
546 | .name = "minstrel", | 546 | .name = "minstrel", |
547 | .tx_status = minstrel_tx_status, | 547 | .tx_status = minstrel_tx_status, |
548 | .get_rate = minstrel_get_rate, | 548 | .get_rate = minstrel_get_rate, |
diff --git a/net/mac80211/rc80211_minstrel.h b/net/mac80211/rc80211_minstrel.h index 38bf4168fc3a..0f5a83370aa6 100644 --- a/net/mac80211/rc80211_minstrel.h +++ b/net/mac80211/rc80211_minstrel.h | |||
@@ -80,7 +80,18 @@ struct minstrel_priv { | |||
80 | unsigned int lookaround_rate_mrr; | 80 | unsigned int lookaround_rate_mrr; |
81 | }; | 81 | }; |
82 | 82 | ||
83 | struct minstrel_debugfs_info { | ||
84 | size_t len; | ||
85 | char buf[]; | ||
86 | }; | ||
87 | |||
88 | extern struct rate_control_ops mac80211_minstrel; | ||
83 | void minstrel_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir); | 89 | void minstrel_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir); |
84 | void minstrel_remove_sta_debugfs(void *priv, void *priv_sta); | 90 | void minstrel_remove_sta_debugfs(void *priv, void *priv_sta); |
85 | 91 | ||
92 | /* debugfs */ | ||
93 | int minstrel_stats_open(struct inode *inode, struct file *file); | ||
94 | ssize_t minstrel_stats_read(struct file *file, char __user *buf, size_t len, loff_t *ppos); | ||
95 | int minstrel_stats_release(struct inode *inode, struct file *file); | ||
96 | |||
86 | #endif | 97 | #endif |
diff --git a/net/mac80211/rc80211_minstrel_debugfs.c b/net/mac80211/rc80211_minstrel_debugfs.c index 0e1f12b1b6dd..241e76f3fdf2 100644 --- a/net/mac80211/rc80211_minstrel_debugfs.c +++ b/net/mac80211/rc80211_minstrel_debugfs.c | |||
@@ -53,21 +53,15 @@ | |||
53 | #include <net/mac80211.h> | 53 | #include <net/mac80211.h> |
54 | #include "rc80211_minstrel.h" | 54 | #include "rc80211_minstrel.h" |
55 | 55 | ||
56 | struct minstrel_stats_info { | 56 | int |
57 | struct minstrel_sta_info *mi; | ||
58 | char buf[4096]; | ||
59 | size_t len; | ||
60 | }; | ||
61 | |||
62 | static int | ||
63 | minstrel_stats_open(struct inode *inode, struct file *file) | 57 | minstrel_stats_open(struct inode *inode, struct file *file) |
64 | { | 58 | { |
65 | struct minstrel_sta_info *mi = inode->i_private; | 59 | struct minstrel_sta_info *mi = inode->i_private; |
66 | struct minstrel_stats_info *ms; | 60 | struct minstrel_debugfs_info *ms; |
67 | unsigned int i, tp, prob, eprob; | 61 | unsigned int i, tp, prob, eprob; |
68 | char *p; | 62 | char *p; |
69 | 63 | ||
70 | ms = kmalloc(sizeof(*ms), GFP_KERNEL); | 64 | ms = kmalloc(sizeof(*ms) + 4096, GFP_KERNEL); |
71 | if (!ms) | 65 | if (!ms) |
72 | return -ENOMEM; | 66 | return -ENOMEM; |
73 | 67 | ||
@@ -107,36 +101,19 @@ minstrel_stats_open(struct inode *inode, struct file *file) | |||
107 | return 0; | 101 | return 0; |
108 | } | 102 | } |
109 | 103 | ||
110 | static ssize_t | 104 | ssize_t |
111 | minstrel_stats_read(struct file *file, char __user *buf, size_t len, loff_t *o) | 105 | minstrel_stats_read(struct file *file, char __user *buf, size_t len, loff_t *ppos) |
112 | { | 106 | { |
113 | struct minstrel_stats_info *ms; | 107 | struct minstrel_debugfs_info *ms; |
114 | char *src; | ||
115 | 108 | ||
116 | ms = file->private_data; | 109 | ms = file->private_data; |
117 | src = ms->buf; | 110 | return simple_read_from_buffer(buf, len, ppos, ms->buf, ms->len); |
118 | |||
119 | len = min(len, ms->len); | ||
120 | if (len <= *o) | ||
121 | return 0; | ||
122 | |||
123 | src += *o; | ||
124 | len -= *o; | ||
125 | *o += len; | ||
126 | |||
127 | if (copy_to_user(buf, src, len)) | ||
128 | return -EFAULT; | ||
129 | |||
130 | return len; | ||
131 | } | 111 | } |
132 | 112 | ||
133 | static int | 113 | int |
134 | minstrel_stats_release(struct inode *inode, struct file *file) | 114 | minstrel_stats_release(struct inode *inode, struct file *file) |
135 | { | 115 | { |
136 | struct minstrel_stats_info *ms = file->private_data; | 116 | kfree(file->private_data); |
137 | |||
138 | kfree(ms); | ||
139 | |||
140 | return 0; | 117 | return 0; |
141 | } | 118 | } |
142 | 119 | ||
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 04ea07f0e78a..e0c944fb6fc9 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -179,14 +179,6 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, | |||
179 | pos++; | 179 | pos++; |
180 | } | 180 | } |
181 | 181 | ||
182 | /* IEEE80211_RADIOTAP_DBM_ANTNOISE */ | ||
183 | if (local->hw.flags & IEEE80211_HW_NOISE_DBM) { | ||
184 | *pos = status->noise; | ||
185 | rthdr->it_present |= | ||
186 | cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTNOISE); | ||
187 | pos++; | ||
188 | } | ||
189 | |||
190 | /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */ | 182 | /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */ |
191 | 183 | ||
192 | /* IEEE80211_RADIOTAP_ANTENNA */ | 184 | /* IEEE80211_RADIOTAP_ANTENNA */ |
@@ -1078,7 +1070,6 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) | |||
1078 | sta->rx_fragments++; | 1070 | sta->rx_fragments++; |
1079 | sta->rx_bytes += rx->skb->len; | 1071 | sta->rx_bytes += rx->skb->len; |
1080 | sta->last_signal = status->signal; | 1072 | sta->last_signal = status->signal; |
1081 | sta->last_noise = status->noise; | ||
1082 | 1073 | ||
1083 | /* | 1074 | /* |
1084 | * Change STA power saving mode only at the end of a frame | 1075 | * Change STA power saving mode only at the end of a frame |
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 85507bd9e341..1ce4ce8af80f 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c | |||
@@ -14,6 +14,8 @@ | |||
14 | 14 | ||
15 | #include <linux/if_arp.h> | 15 | #include <linux/if_arp.h> |
16 | #include <linux/rtnetlink.h> | 16 | #include <linux/rtnetlink.h> |
17 | #include <linux/pm_qos_params.h> | ||
18 | #include <net/sch_generic.h> | ||
17 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
18 | #include <net/mac80211.h> | 20 | #include <net/mac80211.h> |
19 | 21 | ||
@@ -322,6 +324,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local) | |||
322 | 324 | ||
323 | ieee80211_offchannel_stop_beaconing(local); | 325 | ieee80211_offchannel_stop_beaconing(local); |
324 | 326 | ||
327 | local->leave_oper_channel_time = 0; | ||
325 | local->next_scan_state = SCAN_DECISION; | 328 | local->next_scan_state = SCAN_DECISION; |
326 | local->scan_channel_idx = 0; | 329 | local->scan_channel_idx = 0; |
327 | 330 | ||
@@ -426,11 +429,28 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata, | |||
426 | return rc; | 429 | return rc; |
427 | } | 430 | } |
428 | 431 | ||
432 | static unsigned long | ||
433 | ieee80211_scan_get_channel_time(struct ieee80211_channel *chan) | ||
434 | { | ||
435 | /* | ||
436 | * TODO: channel switching also consumes quite some time, | ||
437 | * add that delay as well to get a better estimation | ||
438 | */ | ||
439 | if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN) | ||
440 | return IEEE80211_PASSIVE_CHANNEL_TIME; | ||
441 | return IEEE80211_PROBE_DELAY + IEEE80211_CHANNEL_TIME; | ||
442 | } | ||
443 | |||
429 | static int ieee80211_scan_state_decision(struct ieee80211_local *local, | 444 | static int ieee80211_scan_state_decision(struct ieee80211_local *local, |
430 | unsigned long *next_delay) | 445 | unsigned long *next_delay) |
431 | { | 446 | { |
432 | bool associated = false; | 447 | bool associated = false; |
448 | bool tx_empty = true; | ||
449 | bool bad_latency; | ||
450 | bool listen_int_exceeded; | ||
451 | unsigned long min_beacon_int = 0; | ||
433 | struct ieee80211_sub_if_data *sdata; | 452 | struct ieee80211_sub_if_data *sdata; |
453 | struct ieee80211_channel *next_chan; | ||
434 | 454 | ||
435 | /* if no more bands/channels left, complete scan and advance to the idle state */ | 455 | /* if no more bands/channels left, complete scan and advance to the idle state */ |
436 | if (local->scan_channel_idx >= local->scan_req->n_channels) { | 456 | if (local->scan_channel_idx >= local->scan_req->n_channels) { |
@@ -438,7 +458,11 @@ static int ieee80211_scan_state_decision(struct ieee80211_local *local, | |||
438 | return 1; | 458 | return 1; |
439 | } | 459 | } |
440 | 460 | ||
441 | /* check if at least one STA interface is associated */ | 461 | /* |
462 | * check if at least one STA interface is associated, | ||
463 | * check if at least one STA interface has pending tx frames | ||
464 | * and grab the lowest used beacon interval | ||
465 | */ | ||
442 | mutex_lock(&local->iflist_mtx); | 466 | mutex_lock(&local->iflist_mtx); |
443 | list_for_each_entry(sdata, &local->interfaces, list) { | 467 | list_for_each_entry(sdata, &local->interfaces, list) { |
444 | if (!ieee80211_sdata_running(sdata)) | 468 | if (!ieee80211_sdata_running(sdata)) |
@@ -447,7 +471,16 @@ static int ieee80211_scan_state_decision(struct ieee80211_local *local, | |||
447 | if (sdata->vif.type == NL80211_IFTYPE_STATION) { | 471 | if (sdata->vif.type == NL80211_IFTYPE_STATION) { |
448 | if (sdata->u.mgd.associated) { | 472 | if (sdata->u.mgd.associated) { |
449 | associated = true; | 473 | associated = true; |
450 | break; | 474 | |
475 | if (sdata->vif.bss_conf.beacon_int < | ||
476 | min_beacon_int || min_beacon_int == 0) | ||
477 | min_beacon_int = | ||
478 | sdata->vif.bss_conf.beacon_int; | ||
479 | |||
480 | if (!qdisc_all_tx_empty(sdata->dev)) { | ||
481 | tx_empty = false; | ||
482 | break; | ||
483 | } | ||
451 | } | 484 | } |
452 | } | 485 | } |
453 | } | 486 | } |
@@ -456,11 +489,34 @@ static int ieee80211_scan_state_decision(struct ieee80211_local *local, | |||
456 | if (local->scan_channel) { | 489 | if (local->scan_channel) { |
457 | /* | 490 | /* |
458 | * we're currently scanning a different channel, let's | 491 | * we're currently scanning a different channel, let's |
459 | * switch back to the operating channel now if at least | 492 | * see if we can scan another channel without interfering |
460 | * one interface is associated. Otherwise just scan the | 493 | * with the current traffic situation. |
461 | * next channel | 494 | * |
495 | * Since we don't know if the AP has pending frames for us | ||
496 | * we can only check for our tx queues and use the current | ||
497 | * pm_qos requirements for rx. Hence, if no tx traffic occurs | ||
498 | * at all we will scan as many channels in a row as the pm_qos | ||
499 | * latency allows us to. Additionally we also check for the | ||
500 | * currently negotiated listen interval to prevent losing | ||
501 | * frames unnecessarily. | ||
502 | * | ||
503 | * Otherwise switch back to the operating channel. | ||
462 | */ | 504 | */ |
463 | if (associated) | 505 | next_chan = local->scan_req->channels[local->scan_channel_idx]; |
506 | |||
507 | bad_latency = time_after(jiffies + | ||
508 | ieee80211_scan_get_channel_time(next_chan), | ||
509 | local->leave_oper_channel_time + | ||
510 | usecs_to_jiffies(pm_qos_requirement(PM_QOS_NETWORK_LATENCY))); | ||
511 | |||
512 | listen_int_exceeded = time_after(jiffies + | ||
513 | ieee80211_scan_get_channel_time(next_chan), | ||
514 | local->leave_oper_channel_time + | ||
515 | usecs_to_jiffies(min_beacon_int * 1024) * | ||
516 | local->hw.conf.listen_interval); | ||
517 | |||
518 | if (associated && ( !tx_empty || bad_latency || | ||
519 | listen_int_exceeded)) | ||
464 | local->next_scan_state = SCAN_ENTER_OPER_CHANNEL; | 520 | local->next_scan_state = SCAN_ENTER_OPER_CHANNEL; |
465 | else | 521 | else |
466 | local->next_scan_state = SCAN_SET_CHANNEL; | 522 | local->next_scan_state = SCAN_SET_CHANNEL; |
@@ -492,6 +548,9 @@ static void ieee80211_scan_state_leave_oper_channel(struct ieee80211_local *loca | |||
492 | else | 548 | else |
493 | *next_delay = HZ / 10; | 549 | *next_delay = HZ / 10; |
494 | 550 | ||
551 | /* remember when we left the operating channel */ | ||
552 | local->leave_oper_channel_time = jiffies; | ||
553 | |||
495 | /* advance to the next channel to be scanned */ | 554 | /* advance to the next channel to be scanned */ |
496 | local->next_scan_state = SCAN_SET_CHANNEL; | 555 | local->next_scan_state = SCAN_SET_CHANNEL; |
497 | } | 556 | } |
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 822d84522937..2b635909de5c 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h | |||
@@ -200,7 +200,6 @@ struct sta_ampdu_mlme { | |||
200 | * @rx_fragments: number of received MPDUs | 200 | * @rx_fragments: number of received MPDUs |
201 | * @rx_dropped: number of dropped MPDUs from this STA | 201 | * @rx_dropped: number of dropped MPDUs from this STA |
202 | * @last_signal: signal of last received frame from this STA | 202 | * @last_signal: signal of last received frame from this STA |
203 | * @last_noise: noise of last received frame from this STA | ||
204 | * @last_seq_ctrl: last received seq/frag number from this STA (per RX queue) | 203 | * @last_seq_ctrl: last received seq/frag number from this STA (per RX queue) |
205 | * @tx_filtered_count: number of frames the hardware filtered for this STA | 204 | * @tx_filtered_count: number of frames the hardware filtered for this STA |
206 | * @tx_retry_failed: number of frames that failed retry | 205 | * @tx_retry_failed: number of frames that failed retry |
@@ -267,7 +266,6 @@ struct sta_info { | |||
267 | unsigned long rx_fragments; | 266 | unsigned long rx_fragments; |
268 | unsigned long rx_dropped; | 267 | unsigned long rx_dropped; |
269 | int last_signal; | 268 | int last_signal; |
270 | int last_noise; | ||
271 | __le16 last_seq_ctrl[NUM_RX_DATA_QUEUES]; | 269 | __le16 last_seq_ctrl[NUM_RX_DATA_QUEUES]; |
272 | 270 | ||
273 | /* Updated from TX status path only, no locking requirements */ | 271 | /* Updated from TX status path only, no locking requirements */ |
diff --git a/net/mac80211/status.c b/net/mac80211/status.c index 56d5b9a6ec5b..11805a3a626f 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c | |||
@@ -171,7 +171,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
171 | struct net_device *prev_dev = NULL; | 171 | struct net_device *prev_dev = NULL; |
172 | struct sta_info *sta, *tmp; | 172 | struct sta_info *sta, *tmp; |
173 | int retry_count = -1, i; | 173 | int retry_count = -1, i; |
174 | bool injected; | 174 | bool send_to_cooked; |
175 | 175 | ||
176 | for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { | 176 | for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { |
177 | /* the HW cannot have attempted that rate */ | 177 | /* the HW cannot have attempted that rate */ |
@@ -296,11 +296,15 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
296 | /* this was a transmitted frame, but now we want to reuse it */ | 296 | /* this was a transmitted frame, but now we want to reuse it */ |
297 | skb_orphan(skb); | 297 | skb_orphan(skb); |
298 | 298 | ||
299 | /* Need to make a copy before skb->cb gets cleared */ | ||
300 | send_to_cooked = !!(info->flags & IEEE80211_TX_CTL_INJECTED) || | ||
301 | (type != IEEE80211_FTYPE_DATA); | ||
302 | |||
299 | /* | 303 | /* |
300 | * This is a bit racy but we can avoid a lot of work | 304 | * This is a bit racy but we can avoid a lot of work |
301 | * with this test... | 305 | * with this test... |
302 | */ | 306 | */ |
303 | if (!local->monitors && !local->cooked_mntrs) { | 307 | if (!local->monitors && (!send_to_cooked || !local->cooked_mntrs)) { |
304 | dev_kfree_skb(skb); | 308 | dev_kfree_skb(skb); |
305 | return; | 309 | return; |
306 | } | 310 | } |
@@ -345,9 +349,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
345 | /* for now report the total retry_count */ | 349 | /* for now report the total retry_count */ |
346 | rthdr->data_retries = retry_count; | 350 | rthdr->data_retries = retry_count; |
347 | 351 | ||
348 | /* Need to make a copy before skb->cb gets cleared */ | ||
349 | injected = !!(info->flags & IEEE80211_TX_CTL_INJECTED); | ||
350 | |||
351 | /* XXX: is this sufficient for BPF? */ | 352 | /* XXX: is this sufficient for BPF? */ |
352 | skb_set_mac_header(skb, 0); | 353 | skb_set_mac_header(skb, 0); |
353 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 354 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
@@ -362,8 +363,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
362 | continue; | 363 | continue; |
363 | 364 | ||
364 | if ((sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) && | 365 | if ((sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) && |
365 | !injected && | 366 | !send_to_cooked) |
366 | (type == IEEE80211_FTYPE_DATA)) | ||
367 | continue; | 367 | continue; |
368 | 368 | ||
369 | if (prev_dev) { | 369 | if (prev_dev) { |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index cfc473e1b050..db25fa9ef135 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -2011,14 +2011,12 @@ void ieee80211_tx_pending(unsigned long data) | |||
2011 | while (!skb_queue_empty(&local->pending[i])) { | 2011 | while (!skb_queue_empty(&local->pending[i])) { |
2012 | struct sk_buff *skb = __skb_dequeue(&local->pending[i]); | 2012 | struct sk_buff *skb = __skb_dequeue(&local->pending[i]); |
2013 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 2013 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
2014 | struct ieee80211_sub_if_data *sdata; | ||
2015 | 2014 | ||
2016 | if (WARN_ON(!info->control.vif)) { | 2015 | if (WARN_ON(!info->control.vif)) { |
2017 | kfree_skb(skb); | 2016 | kfree_skb(skb); |
2018 | continue; | 2017 | continue; |
2019 | } | 2018 | } |
2020 | 2019 | ||
2021 | sdata = vif_to_sdata(info->control.vif); | ||
2022 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, | 2020 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, |
2023 | flags); | 2021 | flags); |
2024 | 2022 | ||
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 795424396aff..6464a1972a69 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -545,7 +545,7 @@ static int netlink_autobind(struct socket *sock) | |||
545 | struct hlist_head *head; | 545 | struct hlist_head *head; |
546 | struct sock *osk; | 546 | struct sock *osk; |
547 | struct hlist_node *node; | 547 | struct hlist_node *node; |
548 | s32 pid = current->tgid; | 548 | s32 pid = task_tgid_vnr(current); |
549 | int err; | 549 | int err; |
550 | static s32 rover = -4097; | 550 | static s32 rover = -4097; |
551 | 551 | ||
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index 06438fa2b1e5..aa4308afcc7f 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c | |||
@@ -21,15 +21,17 @@ | |||
21 | 21 | ||
22 | static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */ | 22 | static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */ |
23 | 23 | ||
24 | static inline void genl_lock(void) | 24 | void genl_lock(void) |
25 | { | 25 | { |
26 | mutex_lock(&genl_mutex); | 26 | mutex_lock(&genl_mutex); |
27 | } | 27 | } |
28 | EXPORT_SYMBOL(genl_lock); | ||
28 | 29 | ||
29 | static inline void genl_unlock(void) | 30 | void genl_unlock(void) |
30 | { | 31 | { |
31 | mutex_unlock(&genl_mutex); | 32 | mutex_unlock(&genl_mutex); |
32 | } | 33 | } |
34 | EXPORT_SYMBOL(genl_unlock); | ||
33 | 35 | ||
34 | #define GENL_FAM_TAB_SIZE 16 | 36 | #define GENL_FAM_TAB_SIZE 16 |
35 | #define GENL_FAM_TAB_MASK (GENL_FAM_TAB_SIZE - 1) | 37 | #define GENL_FAM_TAB_MASK (GENL_FAM_TAB_SIZE - 1) |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index cc90363d7e7a..d7d0310dca9d 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -1692,9 +1692,9 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i, | |||
1692 | if (i->alen != dev->addr_len) | 1692 | if (i->alen != dev->addr_len) |
1693 | return -EINVAL; | 1693 | return -EINVAL; |
1694 | if (what > 0) | 1694 | if (what > 0) |
1695 | return dev_mc_add(dev, i->addr, i->alen, 0); | 1695 | return dev_mc_add(dev, i->addr); |
1696 | else | 1696 | else |
1697 | return dev_mc_delete(dev, i->addr, i->alen, 0); | 1697 | return dev_mc_del(dev, i->addr); |
1698 | break; | 1698 | break; |
1699 | case PACKET_MR_PROMISC: | 1699 | case PACKET_MR_PROMISC: |
1700 | return dev_set_promiscuity(dev, what); | 1700 | return dev_set_promiscuity(dev, what); |
@@ -1706,9 +1706,9 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i, | |||
1706 | if (i->alen != dev->addr_len) | 1706 | if (i->alen != dev->addr_len) |
1707 | return -EINVAL; | 1707 | return -EINVAL; |
1708 | if (what > 0) | 1708 | if (what > 0) |
1709 | return dev_unicast_add(dev, i->addr); | 1709 | return dev_uc_add(dev, i->addr); |
1710 | else | 1710 | else |
1711 | return dev_unicast_delete(dev, i->addr); | 1711 | return dev_uc_del(dev, i->addr); |
1712 | break; | 1712 | break; |
1713 | default: | 1713 | default: |
1714 | break; | 1714 | break; |
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c index f81862baf4d0..7919a9edb8e9 100644 --- a/net/rds/af_rds.c +++ b/net/rds/af_rds.c | |||
@@ -160,7 +160,8 @@ static unsigned int rds_poll(struct file *file, struct socket *sock, | |||
160 | 160 | ||
161 | poll_wait(file, sk->sk_sleep, wait); | 161 | poll_wait(file, sk->sk_sleep, wait); |
162 | 162 | ||
163 | poll_wait(file, &rds_poll_waitq, wait); | 163 | if (rs->rs_seen_congestion) |
164 | poll_wait(file, &rds_poll_waitq, wait); | ||
164 | 165 | ||
165 | read_lock_irqsave(&rs->rs_recv_lock, flags); | 166 | read_lock_irqsave(&rs->rs_recv_lock, flags); |
166 | if (!rs->rs_cong_monitor) { | 167 | if (!rs->rs_cong_monitor) { |
@@ -182,6 +183,10 @@ static unsigned int rds_poll(struct file *file, struct socket *sock, | |||
182 | mask |= (POLLOUT | POLLWRNORM); | 183 | mask |= (POLLOUT | POLLWRNORM); |
183 | read_unlock_irqrestore(&rs->rs_recv_lock, flags); | 184 | read_unlock_irqrestore(&rs->rs_recv_lock, flags); |
184 | 185 | ||
186 | /* clear state any time we wake a seen-congested socket */ | ||
187 | if (mask) | ||
188 | rs->rs_seen_congestion = 0; | ||
189 | |||
185 | return mask; | 190 | return mask; |
186 | } | 191 | } |
187 | 192 | ||
@@ -447,7 +452,6 @@ static void rds_sock_inc_info(struct socket *sock, unsigned int len, | |||
447 | struct rds_info_lengths *lens) | 452 | struct rds_info_lengths *lens) |
448 | { | 453 | { |
449 | struct rds_sock *rs; | 454 | struct rds_sock *rs; |
450 | struct sock *sk; | ||
451 | struct rds_incoming *inc; | 455 | struct rds_incoming *inc; |
452 | unsigned long flags; | 456 | unsigned long flags; |
453 | unsigned int total = 0; | 457 | unsigned int total = 0; |
@@ -457,7 +461,6 @@ static void rds_sock_inc_info(struct socket *sock, unsigned int len, | |||
457 | spin_lock_irqsave(&rds_sock_lock, flags); | 461 | spin_lock_irqsave(&rds_sock_lock, flags); |
458 | 462 | ||
459 | list_for_each_entry(rs, &rds_sock_list, rs_item) { | 463 | list_for_each_entry(rs, &rds_sock_list, rs_item) { |
460 | sk = rds_rs_to_sk(rs); | ||
461 | read_lock(&rs->rs_recv_lock); | 464 | read_lock(&rs->rs_recv_lock); |
462 | 465 | ||
463 | /* XXX too lazy to maintain counts.. */ | 466 | /* XXX too lazy to maintain counts.. */ |
diff --git a/net/rds/cong.c b/net/rds/cong.c index f1da27ceb064..0871a29f0780 100644 --- a/net/rds/cong.c +++ b/net/rds/cong.c | |||
@@ -219,8 +219,6 @@ void rds_cong_queue_updates(struct rds_cong_map *map) | |||
219 | spin_lock_irqsave(&rds_cong_lock, flags); | 219 | spin_lock_irqsave(&rds_cong_lock, flags); |
220 | 220 | ||
221 | list_for_each_entry(conn, &map->m_conn_list, c_map_item) { | 221 | list_for_each_entry(conn, &map->m_conn_list, c_map_item) { |
222 | if (conn->c_loopback) | ||
223 | continue; | ||
224 | if (!test_and_set_bit(0, &conn->c_map_queued)) { | 222 | if (!test_and_set_bit(0, &conn->c_map_queued)) { |
225 | rds_stats_inc(s_cong_update_queued); | 223 | rds_stats_inc(s_cong_update_queued); |
226 | queue_delayed_work(rds_wq, &conn->c_send_w, 0); | 224 | queue_delayed_work(rds_wq, &conn->c_send_w, 0); |
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c index 88d0856cb797..10ed0d55f759 100644 --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c | |||
@@ -204,9 +204,10 @@ static void rds_ib_qp_event_handler(struct ib_event *event, void *data) | |||
204 | rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST); | 204 | rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST); |
205 | break; | 205 | break; |
206 | default: | 206 | default: |
207 | rds_ib_conn_error(conn, "RDS/IB: Fatal QP Event %u " | 207 | rdsdebug("Fatal QP Event %u " |
208 | "- connection %pI4->%pI4, reconnecting\n", | 208 | "- connection %pI4->%pI4, reconnecting\n", |
209 | event->event, &conn->c_laddr, &conn->c_faddr); | 209 | event->event, &conn->c_laddr, &conn->c_faddr); |
210 | rds_conn_drop(conn); | ||
210 | break; | 211 | break; |
211 | } | 212 | } |
212 | } | 213 | } |
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c index 059989fdb7d7..a54cd63f9e35 100644 --- a/net/rds/ib_rdma.c +++ b/net/rds/ib_rdma.c | |||
@@ -235,8 +235,8 @@ void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool) | |||
235 | { | 235 | { |
236 | flush_workqueue(rds_wq); | 236 | flush_workqueue(rds_wq); |
237 | rds_ib_flush_mr_pool(pool, 1); | 237 | rds_ib_flush_mr_pool(pool, 1); |
238 | BUG_ON(atomic_read(&pool->item_count)); | 238 | WARN_ON(atomic_read(&pool->item_count)); |
239 | BUG_ON(atomic_read(&pool->free_pinned)); | 239 | WARN_ON(atomic_read(&pool->free_pinned)); |
240 | kfree(pool); | 240 | kfree(pool); |
241 | } | 241 | } |
242 | 242 | ||
@@ -441,6 +441,7 @@ static void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr) | |||
441 | 441 | ||
442 | /* FIXME we need a way to tell a r/w MR | 442 | /* FIXME we need a way to tell a r/w MR |
443 | * from a r/o MR */ | 443 | * from a r/o MR */ |
444 | BUG_ON(in_interrupt()); | ||
444 | set_page_dirty(page); | 445 | set_page_dirty(page); |
445 | put_page(page); | 446 | put_page(page); |
446 | } | 447 | } |
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c index c7dd11b835f0..c74e9904a6b2 100644 --- a/net/rds/ib_recv.c +++ b/net/rds/ib_recv.c | |||
@@ -469,8 +469,8 @@ static void rds_ib_send_ack(struct rds_ib_connection *ic, unsigned int adv_credi | |||
469 | set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); | 469 | set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); |
470 | 470 | ||
471 | rds_ib_stats_inc(s_ib_ack_send_failure); | 471 | rds_ib_stats_inc(s_ib_ack_send_failure); |
472 | /* Need to finesse this later. */ | 472 | |
473 | BUG(); | 473 | rds_ib_conn_error(ic->conn, "sending ack failed\n"); |
474 | } else | 474 | } else |
475 | rds_ib_stats_inc(s_ib_ack_sent); | 475 | rds_ib_stats_inc(s_ib_ack_sent); |
476 | } | 476 | } |
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c index a10fab6886d1..17fa80803ab0 100644 --- a/net/rds/ib_send.c +++ b/net/rds/ib_send.c | |||
@@ -243,8 +243,12 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context) | |||
243 | struct rds_message *rm; | 243 | struct rds_message *rm; |
244 | 244 | ||
245 | rm = rds_send_get_message(conn, send->s_op); | 245 | rm = rds_send_get_message(conn, send->s_op); |
246 | if (rm) | 246 | if (rm) { |
247 | if (rm->m_rdma_op) | ||
248 | rds_ib_send_unmap_rdma(ic, rm->m_rdma_op); | ||
247 | rds_ib_send_rdma_complete(rm, wc.status); | 249 | rds_ib_send_rdma_complete(rm, wc.status); |
250 | rds_message_put(rm); | ||
251 | } | ||
248 | } | 252 | } |
249 | 253 | ||
250 | oldest = (oldest + 1) % ic->i_send_ring.w_nr; | 254 | oldest = (oldest + 1) % ic->i_send_ring.w_nr; |
@@ -482,6 +486,13 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, | |||
482 | BUG_ON(off % RDS_FRAG_SIZE); | 486 | BUG_ON(off % RDS_FRAG_SIZE); |
483 | BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header)); | 487 | BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header)); |
484 | 488 | ||
489 | /* Do not send cong updates to IB loopback */ | ||
490 | if (conn->c_loopback | ||
491 | && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { | ||
492 | rds_cong_map_updated(conn->c_fcong, ~(u64) 0); | ||
493 | return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; | ||
494 | } | ||
495 | |||
485 | /* FIXME we may overallocate here */ | 496 | /* FIXME we may overallocate here */ |
486 | if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0) | 497 | if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0) |
487 | i = 1; | 498 | i = 1; |
@@ -574,8 +585,7 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, | |||
574 | rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits); | 585 | rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits); |
575 | adv_credits += posted; | 586 | adv_credits += posted; |
576 | BUG_ON(adv_credits > 255); | 587 | BUG_ON(adv_credits > 255); |
577 | } else if (ic->i_rm != rm) | 588 | } |
578 | BUG(); | ||
579 | 589 | ||
580 | send = &ic->i_sends[pos]; | 590 | send = &ic->i_sends[pos]; |
581 | first = send; | 591 | first = send; |
@@ -714,8 +724,8 @@ add_header: | |||
714 | ic->i_rm = prev->s_rm; | 724 | ic->i_rm = prev->s_rm; |
715 | prev->s_rm = NULL; | 725 | prev->s_rm = NULL; |
716 | } | 726 | } |
717 | /* Finesse this later */ | 727 | |
718 | BUG(); | 728 | rds_ib_conn_error(ic->conn, "ib_post_send failed\n"); |
719 | goto out; | 729 | goto out; |
720 | } | 730 | } |
721 | 731 | ||
diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c index 3e9460f935d8..a9d951b4fbae 100644 --- a/net/rds/iw_cm.c +++ b/net/rds/iw_cm.c | |||
@@ -157,9 +157,11 @@ static void rds_iw_qp_event_handler(struct ib_event *event, void *data) | |||
157 | case IB_EVENT_QP_REQ_ERR: | 157 | case IB_EVENT_QP_REQ_ERR: |
158 | case IB_EVENT_QP_FATAL: | 158 | case IB_EVENT_QP_FATAL: |
159 | default: | 159 | default: |
160 | rds_iw_conn_error(conn, "RDS/IW: Fatal QP Event %u - connection %pI4->%pI4...reconnecting\n", | 160 | rdsdebug("Fatal QP Event %u " |
161 | "- connection %pI4->%pI4, reconnecting\n", | ||
161 | event->event, &conn->c_laddr, | 162 | event->event, &conn->c_laddr, |
162 | &conn->c_faddr); | 163 | &conn->c_faddr); |
164 | rds_conn_drop(conn); | ||
163 | break; | 165 | break; |
164 | } | 166 | } |
165 | } | 167 | } |
diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c index da43ee840ca3..3d479067d54d 100644 --- a/net/rds/iw_recv.c +++ b/net/rds/iw_recv.c | |||
@@ -469,8 +469,8 @@ static void rds_iw_send_ack(struct rds_iw_connection *ic, unsigned int adv_credi | |||
469 | set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); | 469 | set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); |
470 | 470 | ||
471 | rds_iw_stats_inc(s_iw_ack_send_failure); | 471 | rds_iw_stats_inc(s_iw_ack_send_failure); |
472 | /* Need to finesse this later. */ | 472 | |
473 | BUG(); | 473 | rds_iw_conn_error(ic->conn, "sending ack failed\n"); |
474 | } else | 474 | } else |
475 | rds_iw_stats_inc(s_iw_ack_sent); | 475 | rds_iw_stats_inc(s_iw_ack_sent); |
476 | } | 476 | } |
diff --git a/net/rds/iw_send.c b/net/rds/iw_send.c index 1379e9d66a78..52182ff7519e 100644 --- a/net/rds/iw_send.c +++ b/net/rds/iw_send.c | |||
@@ -616,8 +616,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm, | |||
616 | rds_iw_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits); | 616 | rds_iw_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits); |
617 | adv_credits += posted; | 617 | adv_credits += posted; |
618 | BUG_ON(adv_credits > 255); | 618 | BUG_ON(adv_credits > 255); |
619 | } else if (ic->i_rm != rm) | 619 | } |
620 | BUG(); | ||
621 | 620 | ||
622 | send = &ic->i_sends[pos]; | 621 | send = &ic->i_sends[pos]; |
623 | first = send; | 622 | first = send; |
diff --git a/net/rds/loop.c b/net/rds/loop.c index 0d7a159158b8..dd9879379457 100644 --- a/net/rds/loop.c +++ b/net/rds/loop.c | |||
@@ -81,16 +81,9 @@ static int rds_loop_xmit_cong_map(struct rds_connection *conn, | |||
81 | struct rds_cong_map *map, | 81 | struct rds_cong_map *map, |
82 | unsigned long offset) | 82 | unsigned long offset) |
83 | { | 83 | { |
84 | unsigned long i; | ||
85 | |||
86 | BUG_ON(offset); | 84 | BUG_ON(offset); |
87 | BUG_ON(map != conn->c_lcong); | 85 | BUG_ON(map != conn->c_lcong); |
88 | 86 | ||
89 | for (i = 0; i < RDS_CONG_MAP_PAGES; i++) { | ||
90 | memcpy((void *)conn->c_fcong->m_page_addrs[i], | ||
91 | (void *)map->m_page_addrs[i], PAGE_SIZE); | ||
92 | } | ||
93 | |||
94 | rds_cong_map_updated(conn->c_fcong, ~(u64) 0); | 87 | rds_cong_map_updated(conn->c_fcong, ~(u64) 0); |
95 | 88 | ||
96 | return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; | 89 | return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; |
diff --git a/net/rds/rdma.c b/net/rds/rdma.c index 5ce9437cad67..75fd13bb631b 100644 --- a/net/rds/rdma.c +++ b/net/rds/rdma.c | |||
@@ -439,8 +439,10 @@ void rds_rdma_free_op(struct rds_rdma_op *ro) | |||
439 | /* Mark page dirty if it was possibly modified, which | 439 | /* Mark page dirty if it was possibly modified, which |
440 | * is the case for a RDMA_READ which copies from remote | 440 | * is the case for a RDMA_READ which copies from remote |
441 | * to local memory */ | 441 | * to local memory */ |
442 | if (!ro->r_write) | 442 | if (!ro->r_write) { |
443 | BUG_ON(in_interrupt()); | ||
443 | set_page_dirty(page); | 444 | set_page_dirty(page); |
445 | } | ||
444 | put_page(page); | 446 | put_page(page); |
445 | } | 447 | } |
446 | 448 | ||
diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c index 9ece910ea394..5ea82fc47c3e 100644 --- a/net/rds/rdma_transport.c +++ b/net/rds/rdma_transport.c | |||
@@ -101,7 +101,7 @@ int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id, | |||
101 | break; | 101 | break; |
102 | 102 | ||
103 | case RDMA_CM_EVENT_DISCONNECTED: | 103 | case RDMA_CM_EVENT_DISCONNECTED: |
104 | printk(KERN_WARNING "RDS/RDMA: DISCONNECT event - dropping connection " | 104 | rdsdebug("DISCONNECT event - dropping connection " |
105 | "%pI4->%pI4\n", &conn->c_laddr, | 105 | "%pI4->%pI4\n", &conn->c_laddr, |
106 | &conn->c_faddr); | 106 | &conn->c_faddr); |
107 | rds_conn_drop(conn); | 107 | rds_conn_drop(conn); |
@@ -109,8 +109,7 @@ int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id, | |||
109 | 109 | ||
110 | default: | 110 | default: |
111 | /* things like device disconnect? */ | 111 | /* things like device disconnect? */ |
112 | printk(KERN_ERR "unknown event %u\n", event->event); | 112 | printk(KERN_ERR "RDS: unknown event %u!\n", event->event); |
113 | BUG(); | ||
114 | break; | 113 | break; |
115 | } | 114 | } |
116 | 115 | ||
diff --git a/net/rds/rds.h b/net/rds/rds.h index 85d6f897ecc7..4bec6e2ed495 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h | |||
@@ -388,6 +388,8 @@ struct rds_sock { | |||
388 | 388 | ||
389 | /* flag indicating we were congested or not */ | 389 | /* flag indicating we were congested or not */ |
390 | int rs_congested; | 390 | int rs_congested; |
391 | /* seen congestion (ENOBUFS) when sending? */ | ||
392 | int rs_seen_congestion; | ||
391 | 393 | ||
392 | /* rs_lock protects all these adjacent members before the newline */ | 394 | /* rs_lock protects all these adjacent members before the newline */ |
393 | spinlock_t rs_lock; | 395 | spinlock_t rs_lock; |
diff --git a/net/rds/send.c b/net/rds/send.c index f04b929ded92..53d6795ac9d0 100644 --- a/net/rds/send.c +++ b/net/rds/send.c | |||
@@ -508,12 +508,13 @@ EXPORT_SYMBOL_GPL(rds_send_get_message); | |||
508 | */ | 508 | */ |
509 | void rds_send_remove_from_sock(struct list_head *messages, int status) | 509 | void rds_send_remove_from_sock(struct list_head *messages, int status) |
510 | { | 510 | { |
511 | unsigned long flags = 0; /* silence gcc :P */ | 511 | unsigned long flags; |
512 | struct rds_sock *rs = NULL; | 512 | struct rds_sock *rs = NULL; |
513 | struct rds_message *rm; | 513 | struct rds_message *rm; |
514 | 514 | ||
515 | local_irq_save(flags); | ||
516 | while (!list_empty(messages)) { | 515 | while (!list_empty(messages)) { |
516 | int was_on_sock = 0; | ||
517 | |||
517 | rm = list_entry(messages->next, struct rds_message, | 518 | rm = list_entry(messages->next, struct rds_message, |
518 | m_conn_item); | 519 | m_conn_item); |
519 | list_del_init(&rm->m_conn_item); | 520 | list_del_init(&rm->m_conn_item); |
@@ -528,20 +529,19 @@ void rds_send_remove_from_sock(struct list_head *messages, int status) | |||
528 | * while we're messing with it. It does not prevent the | 529 | * while we're messing with it. It does not prevent the |
529 | * message from being removed from the socket, though. | 530 | * message from being removed from the socket, though. |
530 | */ | 531 | */ |
531 | spin_lock(&rm->m_rs_lock); | 532 | spin_lock_irqsave(&rm->m_rs_lock, flags); |
532 | if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) | 533 | if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) |
533 | goto unlock_and_drop; | 534 | goto unlock_and_drop; |
534 | 535 | ||
535 | if (rs != rm->m_rs) { | 536 | if (rs != rm->m_rs) { |
536 | if (rs) { | 537 | if (rs) { |
537 | spin_unlock(&rs->rs_lock); | ||
538 | rds_wake_sk_sleep(rs); | 538 | rds_wake_sk_sleep(rs); |
539 | sock_put(rds_rs_to_sk(rs)); | 539 | sock_put(rds_rs_to_sk(rs)); |
540 | } | 540 | } |
541 | rs = rm->m_rs; | 541 | rs = rm->m_rs; |
542 | spin_lock(&rs->rs_lock); | ||
543 | sock_hold(rds_rs_to_sk(rs)); | 542 | sock_hold(rds_rs_to_sk(rs)); |
544 | } | 543 | } |
544 | spin_lock(&rs->rs_lock); | ||
545 | 545 | ||
546 | if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) { | 546 | if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) { |
547 | struct rds_rdma_op *ro = rm->m_rdma_op; | 547 | struct rds_rdma_op *ro = rm->m_rdma_op; |
@@ -558,21 +558,22 @@ void rds_send_remove_from_sock(struct list_head *messages, int status) | |||
558 | notifier->n_status = status; | 558 | notifier->n_status = status; |
559 | rm->m_rdma_op->r_notifier = NULL; | 559 | rm->m_rdma_op->r_notifier = NULL; |
560 | } | 560 | } |
561 | rds_message_put(rm); | 561 | was_on_sock = 1; |
562 | rm->m_rs = NULL; | 562 | rm->m_rs = NULL; |
563 | } | 563 | } |
564 | spin_unlock(&rs->rs_lock); | ||
564 | 565 | ||
565 | unlock_and_drop: | 566 | unlock_and_drop: |
566 | spin_unlock(&rm->m_rs_lock); | 567 | spin_unlock_irqrestore(&rm->m_rs_lock, flags); |
567 | rds_message_put(rm); | 568 | rds_message_put(rm); |
569 | if (was_on_sock) | ||
570 | rds_message_put(rm); | ||
568 | } | 571 | } |
569 | 572 | ||
570 | if (rs) { | 573 | if (rs) { |
571 | spin_unlock(&rs->rs_lock); | ||
572 | rds_wake_sk_sleep(rs); | 574 | rds_wake_sk_sleep(rs); |
573 | sock_put(rds_rs_to_sk(rs)); | 575 | sock_put(rds_rs_to_sk(rs)); |
574 | } | 576 | } |
575 | local_irq_restore(flags); | ||
576 | } | 577 | } |
577 | 578 | ||
578 | /* | 579 | /* |
@@ -634,9 +635,6 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest) | |||
634 | list_move(&rm->m_sock_item, &list); | 635 | list_move(&rm->m_sock_item, &list); |
635 | rds_send_sndbuf_remove(rs, rm); | 636 | rds_send_sndbuf_remove(rs, rm); |
636 | clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags); | 637 | clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags); |
637 | |||
638 | /* If this is a RDMA operation, notify the app. */ | ||
639 | __rds_rdma_send_complete(rs, rm, RDS_RDMA_CANCELED); | ||
640 | } | 638 | } |
641 | 639 | ||
642 | /* order flag updates with the rs lock */ | 640 | /* order flag updates with the rs lock */ |
@@ -645,9 +643,6 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest) | |||
645 | 643 | ||
646 | spin_unlock_irqrestore(&rs->rs_lock, flags); | 644 | spin_unlock_irqrestore(&rs->rs_lock, flags); |
647 | 645 | ||
648 | if (wake) | ||
649 | rds_wake_sk_sleep(rs); | ||
650 | |||
651 | conn = NULL; | 646 | conn = NULL; |
652 | 647 | ||
653 | /* now remove the messages from the conn list as needed */ | 648 | /* now remove the messages from the conn list as needed */ |
@@ -655,6 +650,10 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest) | |||
655 | /* We do this here rather than in the loop above, so that | 650 | /* We do this here rather than in the loop above, so that |
656 | * we don't have to nest m_rs_lock under rs->rs_lock */ | 651 | * we don't have to nest m_rs_lock under rs->rs_lock */ |
657 | spin_lock_irqsave(&rm->m_rs_lock, flags2); | 652 | spin_lock_irqsave(&rm->m_rs_lock, flags2); |
653 | /* If this is a RDMA operation, notify the app. */ | ||
654 | spin_lock(&rs->rs_lock); | ||
655 | __rds_rdma_send_complete(rs, rm, RDS_RDMA_CANCELED); | ||
656 | spin_unlock(&rs->rs_lock); | ||
658 | rm->m_rs = NULL; | 657 | rm->m_rs = NULL; |
659 | spin_unlock_irqrestore(&rm->m_rs_lock, flags2); | 658 | spin_unlock_irqrestore(&rm->m_rs_lock, flags2); |
660 | 659 | ||
@@ -683,6 +682,9 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest) | |||
683 | if (conn) | 682 | if (conn) |
684 | spin_unlock_irqrestore(&conn->c_lock, flags); | 683 | spin_unlock_irqrestore(&conn->c_lock, flags); |
685 | 684 | ||
685 | if (wake) | ||
686 | rds_wake_sk_sleep(rs); | ||
687 | |||
686 | while (!list_empty(&list)) { | 688 | while (!list_empty(&list)) { |
687 | rm = list_entry(list.next, struct rds_message, m_sock_item); | 689 | rm = list_entry(list.next, struct rds_message, m_sock_item); |
688 | list_del_init(&rm->m_sock_item); | 690 | list_del_init(&rm->m_sock_item); |
@@ -816,7 +818,7 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, | |||
816 | int ret = 0; | 818 | int ret = 0; |
817 | int queued = 0, allocated_mr = 0; | 819 | int queued = 0, allocated_mr = 0; |
818 | int nonblock = msg->msg_flags & MSG_DONTWAIT; | 820 | int nonblock = msg->msg_flags & MSG_DONTWAIT; |
819 | long timeo = sock_rcvtimeo(sk, nonblock); | 821 | long timeo = sock_sndtimeo(sk, nonblock); |
820 | 822 | ||
821 | /* Mirror Linux UDP mirror of BSD error message compatibility */ | 823 | /* Mirror Linux UDP mirror of BSD error message compatibility */ |
822 | /* XXX: Perhaps MSG_MORE someday */ | 824 | /* XXX: Perhaps MSG_MORE someday */ |
@@ -895,8 +897,10 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, | |||
895 | queue_delayed_work(rds_wq, &conn->c_conn_w, 0); | 897 | queue_delayed_work(rds_wq, &conn->c_conn_w, 0); |
896 | 898 | ||
897 | ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs); | 899 | ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs); |
898 | if (ret) | 900 | if (ret) { |
901 | rs->rs_seen_congestion = 1; | ||
899 | goto out; | 902 | goto out; |
903 | } | ||
900 | 904 | ||
901 | while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port, | 905 | while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port, |
902 | dport, &queued)) { | 906 | dport, &queued)) { |
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c index e08ec912d8b0..1aba6878fa5d 100644 --- a/net/rds/tcp_recv.c +++ b/net/rds/tcp_recv.c | |||
@@ -98,6 +98,7 @@ int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov, | |||
98 | goto out; | 98 | goto out; |
99 | } | 99 | } |
100 | 100 | ||
101 | rds_stats_add(s_copy_to_user, to_copy); | ||
101 | size -= to_copy; | 102 | size -= to_copy; |
102 | ret += to_copy; | 103 | ret += to_copy; |
103 | skb_off += to_copy; | 104 | skb_off += to_copy; |
diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c index 34fdcc059e54..a28b895ff0d1 100644 --- a/net/rds/tcp_send.c +++ b/net/rds/tcp_send.c | |||
@@ -240,7 +240,9 @@ void rds_tcp_write_space(struct sock *sk) | |||
240 | tc->t_last_seen_una = rds_tcp_snd_una(tc); | 240 | tc->t_last_seen_una = rds_tcp_snd_una(tc); |
241 | rds_send_drop_acked(conn, rds_tcp_snd_una(tc), rds_tcp_is_acked); | 241 | rds_send_drop_acked(conn, rds_tcp_snd_una(tc), rds_tcp_is_acked); |
242 | 242 | ||
243 | queue_delayed_work(rds_wq, &conn->c_send_w, 0); | 243 | if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) |
244 | queue_delayed_work(rds_wq, &conn->c_send_w, 0); | ||
245 | |||
244 | out: | 246 | out: |
245 | read_unlock(&sk->sk_callback_lock); | 247 | read_unlock(&sk->sk_callback_lock); |
246 | 248 | ||
diff --git a/net/rds/threads.c b/net/rds/threads.c index 00fa10e59af8..786c20eaaf5e 100644 --- a/net/rds/threads.c +++ b/net/rds/threads.c | |||
@@ -259,7 +259,7 @@ void rds_threads_exit(void) | |||
259 | 259 | ||
260 | int __init rds_threads_init(void) | 260 | int __init rds_threads_init(void) |
261 | { | 261 | { |
262 | rds_wq = create_singlethread_workqueue("krdsd"); | 262 | rds_wq = create_workqueue("krdsd"); |
263 | if (rds_wq == NULL) | 263 | if (rds_wq == NULL) |
264 | return -ENOMEM; | 264 | return -ENOMEM; |
265 | 265 | ||
diff --git a/net/rfkill/core.c b/net/rfkill/core.c index a9fa86f65983..51875a0c5d48 100644 --- a/net/rfkill/core.c +++ b/net/rfkill/core.c | |||
@@ -629,6 +629,49 @@ static ssize_t rfkill_persistent_show(struct device *dev, | |||
629 | return sprintf(buf, "%d\n", rfkill->persistent); | 629 | return sprintf(buf, "%d\n", rfkill->persistent); |
630 | } | 630 | } |
631 | 631 | ||
632 | static ssize_t rfkill_hard_show(struct device *dev, | ||
633 | struct device_attribute *attr, | ||
634 | char *buf) | ||
635 | { | ||
636 | struct rfkill *rfkill = to_rfkill(dev); | ||
637 | |||
638 | return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_HW) ? 1 : 0 ); | ||
639 | } | ||
640 | |||
641 | static ssize_t rfkill_soft_show(struct device *dev, | ||
642 | struct device_attribute *attr, | ||
643 | char *buf) | ||
644 | { | ||
645 | struct rfkill *rfkill = to_rfkill(dev); | ||
646 | |||
647 | return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_SW) ? 1 : 0 ); | ||
648 | } | ||
649 | |||
650 | static ssize_t rfkill_soft_store(struct device *dev, | ||
651 | struct device_attribute *attr, | ||
652 | const char *buf, size_t count) | ||
653 | { | ||
654 | struct rfkill *rfkill = to_rfkill(dev); | ||
655 | unsigned long state; | ||
656 | int err; | ||
657 | |||
658 | if (!capable(CAP_NET_ADMIN)) | ||
659 | return -EPERM; | ||
660 | |||
661 | err = strict_strtoul(buf, 0, &state); | ||
662 | if (err) | ||
663 | return err; | ||
664 | |||
665 | if (state > 1 ) | ||
666 | return -EINVAL; | ||
667 | |||
668 | mutex_lock(&rfkill_global_mutex); | ||
669 | rfkill_set_block(rfkill, state); | ||
670 | mutex_unlock(&rfkill_global_mutex); | ||
671 | |||
672 | return err ?: count; | ||
673 | } | ||
674 | |||
632 | static u8 user_state_from_blocked(unsigned long state) | 675 | static u8 user_state_from_blocked(unsigned long state) |
633 | { | 676 | { |
634 | if (state & RFKILL_BLOCK_HW) | 677 | if (state & RFKILL_BLOCK_HW) |
@@ -644,14 +687,8 @@ static ssize_t rfkill_state_show(struct device *dev, | |||
644 | char *buf) | 687 | char *buf) |
645 | { | 688 | { |
646 | struct rfkill *rfkill = to_rfkill(dev); | 689 | struct rfkill *rfkill = to_rfkill(dev); |
647 | unsigned long flags; | ||
648 | u32 state; | ||
649 | |||
650 | spin_lock_irqsave(&rfkill->lock, flags); | ||
651 | state = rfkill->state; | ||
652 | spin_unlock_irqrestore(&rfkill->lock, flags); | ||
653 | 690 | ||
654 | return sprintf(buf, "%d\n", user_state_from_blocked(state)); | 691 | return sprintf(buf, "%d\n", user_state_from_blocked(rfkill->state)); |
655 | } | 692 | } |
656 | 693 | ||
657 | static ssize_t rfkill_state_store(struct device *dev, | 694 | static ssize_t rfkill_state_store(struct device *dev, |
@@ -701,6 +738,8 @@ static struct device_attribute rfkill_dev_attrs[] = { | |||
701 | __ATTR(persistent, S_IRUGO, rfkill_persistent_show, NULL), | 738 | __ATTR(persistent, S_IRUGO, rfkill_persistent_show, NULL), |
702 | __ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store), | 739 | __ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store), |
703 | __ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store), | 740 | __ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store), |
741 | __ATTR(soft, S_IRUGO|S_IWUSR, rfkill_soft_show, rfkill_soft_store), | ||
742 | __ATTR(hard, S_IRUGO, rfkill_hard_show, NULL), | ||
704 | __ATTR_NULL | 743 | __ATTR_NULL |
705 | }; | 744 | }; |
706 | 745 | ||
diff --git a/net/sched/act_api.c b/net/sched/act_api.c index d8e0171d9a4b..019045174fc3 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c | |||
@@ -668,7 +668,8 @@ nlmsg_failure: | |||
668 | } | 668 | } |
669 | 669 | ||
670 | static int | 670 | static int |
671 | act_get_notify(u32 pid, struct nlmsghdr *n, struct tc_action *a, int event) | 671 | act_get_notify(struct net *net, u32 pid, struct nlmsghdr *n, |
672 | struct tc_action *a, int event) | ||
672 | { | 673 | { |
673 | struct sk_buff *skb; | 674 | struct sk_buff *skb; |
674 | 675 | ||
@@ -680,7 +681,7 @@ act_get_notify(u32 pid, struct nlmsghdr *n, struct tc_action *a, int event) | |||
680 | return -EINVAL; | 681 | return -EINVAL; |
681 | } | 682 | } |
682 | 683 | ||
683 | return rtnl_unicast(skb, &init_net, pid); | 684 | return rtnl_unicast(skb, net, pid); |
684 | } | 685 | } |
685 | 686 | ||
686 | static struct tc_action * | 687 | static struct tc_action * |
@@ -750,7 +751,8 @@ static struct tc_action *create_a(int i) | |||
750 | return act; | 751 | return act; |
751 | } | 752 | } |
752 | 753 | ||
753 | static int tca_action_flush(struct nlattr *nla, struct nlmsghdr *n, u32 pid) | 754 | static int tca_action_flush(struct net *net, struct nlattr *nla, |
755 | struct nlmsghdr *n, u32 pid) | ||
754 | { | 756 | { |
755 | struct sk_buff *skb; | 757 | struct sk_buff *skb; |
756 | unsigned char *b; | 758 | unsigned char *b; |
@@ -809,7 +811,7 @@ static int tca_action_flush(struct nlattr *nla, struct nlmsghdr *n, u32 pid) | |||
809 | nlh->nlmsg_flags |= NLM_F_ROOT; | 811 | nlh->nlmsg_flags |= NLM_F_ROOT; |
810 | module_put(a->ops->owner); | 812 | module_put(a->ops->owner); |
811 | kfree(a); | 813 | kfree(a); |
812 | err = rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO); | 814 | err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO); |
813 | if (err > 0) | 815 | if (err > 0) |
814 | return 0; | 816 | return 0; |
815 | 817 | ||
@@ -826,7 +828,8 @@ noflush_out: | |||
826 | } | 828 | } |
827 | 829 | ||
828 | static int | 830 | static int |
829 | tca_action_gd(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int event) | 831 | tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n, |
832 | u32 pid, int event) | ||
830 | { | 833 | { |
831 | int i, ret; | 834 | int i, ret; |
832 | struct nlattr *tb[TCA_ACT_MAX_PRIO+1]; | 835 | struct nlattr *tb[TCA_ACT_MAX_PRIO+1]; |
@@ -838,7 +841,7 @@ tca_action_gd(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int event) | |||
838 | 841 | ||
839 | if (event == RTM_DELACTION && n->nlmsg_flags&NLM_F_ROOT) { | 842 | if (event == RTM_DELACTION && n->nlmsg_flags&NLM_F_ROOT) { |
840 | if (tb[1] != NULL) | 843 | if (tb[1] != NULL) |
841 | return tca_action_flush(tb[1], n, pid); | 844 | return tca_action_flush(net, tb[1], n, pid); |
842 | else | 845 | else |
843 | return -EINVAL; | 846 | return -EINVAL; |
844 | } | 847 | } |
@@ -859,7 +862,7 @@ tca_action_gd(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int event) | |||
859 | } | 862 | } |
860 | 863 | ||
861 | if (event == RTM_GETACTION) | 864 | if (event == RTM_GETACTION) |
862 | ret = act_get_notify(pid, n, head, event); | 865 | ret = act_get_notify(net, pid, n, head, event); |
863 | else { /* delete */ | 866 | else { /* delete */ |
864 | struct sk_buff *skb; | 867 | struct sk_buff *skb; |
865 | 868 | ||
@@ -878,7 +881,7 @@ tca_action_gd(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int event) | |||
878 | 881 | ||
879 | /* now do the delete */ | 882 | /* now do the delete */ |
880 | tcf_action_destroy(head, 0); | 883 | tcf_action_destroy(head, 0); |
881 | ret = rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, | 884 | ret = rtnetlink_send(skb, net, pid, RTNLGRP_TC, |
882 | n->nlmsg_flags&NLM_F_ECHO); | 885 | n->nlmsg_flags&NLM_F_ECHO); |
883 | if (ret > 0) | 886 | if (ret > 0) |
884 | return 0; | 887 | return 0; |
@@ -889,8 +892,8 @@ err: | |||
889 | return ret; | 892 | return ret; |
890 | } | 893 | } |
891 | 894 | ||
892 | static int tcf_add_notify(struct tc_action *a, u32 pid, u32 seq, int event, | 895 | static int tcf_add_notify(struct net *net, struct tc_action *a, |
893 | u16 flags) | 896 | u32 pid, u32 seq, int event, u16 flags) |
894 | { | 897 | { |
895 | struct tcamsg *t; | 898 | struct tcamsg *t; |
896 | struct nlmsghdr *nlh; | 899 | struct nlmsghdr *nlh; |
@@ -923,7 +926,7 @@ static int tcf_add_notify(struct tc_action *a, u32 pid, u32 seq, int event, | |||
923 | nlh->nlmsg_len = skb_tail_pointer(skb) - b; | 926 | nlh->nlmsg_len = skb_tail_pointer(skb) - b; |
924 | NETLINK_CB(skb).dst_group = RTNLGRP_TC; | 927 | NETLINK_CB(skb).dst_group = RTNLGRP_TC; |
925 | 928 | ||
926 | err = rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, flags&NLM_F_ECHO); | 929 | err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, flags&NLM_F_ECHO); |
927 | if (err > 0) | 930 | if (err > 0) |
928 | err = 0; | 931 | err = 0; |
929 | return err; | 932 | return err; |
@@ -936,7 +939,8 @@ nlmsg_failure: | |||
936 | 939 | ||
937 | 940 | ||
938 | static int | 941 | static int |
939 | tcf_action_add(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int ovr) | 942 | tcf_action_add(struct net *net, struct nlattr *nla, struct nlmsghdr *n, |
943 | u32 pid, int ovr) | ||
940 | { | 944 | { |
941 | int ret = 0; | 945 | int ret = 0; |
942 | struct tc_action *act; | 946 | struct tc_action *act; |
@@ -954,7 +958,7 @@ tcf_action_add(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int ovr) | |||
954 | /* dump then free all the actions after update; inserted policy | 958 | /* dump then free all the actions after update; inserted policy |
955 | * stays intact | 959 | * stays intact |
956 | * */ | 960 | * */ |
957 | ret = tcf_add_notify(act, pid, seq, RTM_NEWACTION, n->nlmsg_flags); | 961 | ret = tcf_add_notify(net, act, pid, seq, RTM_NEWACTION, n->nlmsg_flags); |
958 | for (a = act; a; a = act) { | 962 | for (a = act; a; a = act) { |
959 | act = a->next; | 963 | act = a->next; |
960 | kfree(a); | 964 | kfree(a); |
@@ -970,9 +974,6 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
970 | u32 pid = skb ? NETLINK_CB(skb).pid : 0; | 974 | u32 pid = skb ? NETLINK_CB(skb).pid : 0; |
971 | int ret = 0, ovr = 0; | 975 | int ret = 0, ovr = 0; |
972 | 976 | ||
973 | if (!net_eq(net, &init_net)) | ||
974 | return -EINVAL; | ||
975 | |||
976 | ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL); | 977 | ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL); |
977 | if (ret < 0) | 978 | if (ret < 0) |
978 | return ret; | 979 | return ret; |
@@ -995,15 +996,17 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
995 | if (n->nlmsg_flags&NLM_F_REPLACE) | 996 | if (n->nlmsg_flags&NLM_F_REPLACE) |
996 | ovr = 1; | 997 | ovr = 1; |
997 | replay: | 998 | replay: |
998 | ret = tcf_action_add(tca[TCA_ACT_TAB], n, pid, ovr); | 999 | ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, pid, ovr); |
999 | if (ret == -EAGAIN) | 1000 | if (ret == -EAGAIN) |
1000 | goto replay; | 1001 | goto replay; |
1001 | break; | 1002 | break; |
1002 | case RTM_DELACTION: | 1003 | case RTM_DELACTION: |
1003 | ret = tca_action_gd(tca[TCA_ACT_TAB], n, pid, RTM_DELACTION); | 1004 | ret = tca_action_gd(net, tca[TCA_ACT_TAB], n, |
1005 | pid, RTM_DELACTION); | ||
1004 | break; | 1006 | break; |
1005 | case RTM_GETACTION: | 1007 | case RTM_GETACTION: |
1006 | ret = tca_action_gd(tca[TCA_ACT_TAB], n, pid, RTM_GETACTION); | 1008 | ret = tca_action_gd(net, tca[TCA_ACT_TAB], n, |
1009 | pid, RTM_GETACTION); | ||
1007 | break; | 1010 | break; |
1008 | default: | 1011 | default: |
1009 | BUG(); | 1012 | BUG(); |
@@ -1043,7 +1046,6 @@ find_dump_kind(const struct nlmsghdr *n) | |||
1043 | static int | 1046 | static int |
1044 | tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) | 1047 | tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) |
1045 | { | 1048 | { |
1046 | struct net *net = sock_net(skb->sk); | ||
1047 | struct nlmsghdr *nlh; | 1049 | struct nlmsghdr *nlh; |
1048 | unsigned char *b = skb_tail_pointer(skb); | 1050 | unsigned char *b = skb_tail_pointer(skb); |
1049 | struct nlattr *nest; | 1051 | struct nlattr *nest; |
@@ -1053,9 +1055,6 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) | |||
1053 | struct tcamsg *t = (struct tcamsg *) NLMSG_DATA(cb->nlh); | 1055 | struct tcamsg *t = (struct tcamsg *) NLMSG_DATA(cb->nlh); |
1054 | struct nlattr *kind = find_dump_kind(cb->nlh); | 1056 | struct nlattr *kind = find_dump_kind(cb->nlh); |
1055 | 1057 | ||
1056 | if (!net_eq(net, &init_net)) | ||
1057 | return 0; | ||
1058 | |||
1059 | if (kind == NULL) { | 1058 | if (kind == NULL) { |
1060 | printk("tc_dump_action: action bad kind\n"); | 1059 | printk("tc_dump_action: action bad kind\n"); |
1061 | return 0; | 1060 | return 0; |
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index f082b27ff46d..5fd0c28ef79a 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c | |||
@@ -99,8 +99,9 @@ out: | |||
99 | } | 99 | } |
100 | EXPORT_SYMBOL(unregister_tcf_proto_ops); | 100 | EXPORT_SYMBOL(unregister_tcf_proto_ops); |
101 | 101 | ||
102 | static int tfilter_notify(struct sk_buff *oskb, struct nlmsghdr *n, | 102 | static int tfilter_notify(struct net *net, struct sk_buff *oskb, |
103 | struct tcf_proto *tp, unsigned long fh, int event); | 103 | struct nlmsghdr *n, struct tcf_proto *tp, |
104 | unsigned long fh, int event); | ||
104 | 105 | ||
105 | 106 | ||
106 | /* Select new prio value from the range, managed by kernel. */ | 107 | /* Select new prio value from the range, managed by kernel. */ |
@@ -138,9 +139,6 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
138 | int err; | 139 | int err; |
139 | int tp_created = 0; | 140 | int tp_created = 0; |
140 | 141 | ||
141 | if (!net_eq(net, &init_net)) | ||
142 | return -EINVAL; | ||
143 | |||
144 | replay: | 142 | replay: |
145 | t = NLMSG_DATA(n); | 143 | t = NLMSG_DATA(n); |
146 | protocol = TC_H_MIN(t->tcm_info); | 144 | protocol = TC_H_MIN(t->tcm_info); |
@@ -159,7 +157,7 @@ replay: | |||
159 | /* Find head of filter chain. */ | 157 | /* Find head of filter chain. */ |
160 | 158 | ||
161 | /* Find link */ | 159 | /* Find link */ |
162 | dev = __dev_get_by_index(&init_net, t->tcm_ifindex); | 160 | dev = __dev_get_by_index(net, t->tcm_ifindex); |
163 | if (dev == NULL) | 161 | if (dev == NULL) |
164 | return -ENODEV; | 162 | return -ENODEV; |
165 | 163 | ||
@@ -283,7 +281,7 @@ replay: | |||
283 | *back = tp->next; | 281 | *back = tp->next; |
284 | spin_unlock_bh(root_lock); | 282 | spin_unlock_bh(root_lock); |
285 | 283 | ||
286 | tfilter_notify(skb, n, tp, fh, RTM_DELTFILTER); | 284 | tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER); |
287 | tcf_destroy(tp); | 285 | tcf_destroy(tp); |
288 | err = 0; | 286 | err = 0; |
289 | goto errout; | 287 | goto errout; |
@@ -306,10 +304,10 @@ replay: | |||
306 | case RTM_DELTFILTER: | 304 | case RTM_DELTFILTER: |
307 | err = tp->ops->delete(tp, fh); | 305 | err = tp->ops->delete(tp, fh); |
308 | if (err == 0) | 306 | if (err == 0) |
309 | tfilter_notify(skb, n, tp, fh, RTM_DELTFILTER); | 307 | tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER); |
310 | goto errout; | 308 | goto errout; |
311 | case RTM_GETTFILTER: | 309 | case RTM_GETTFILTER: |
312 | err = tfilter_notify(skb, n, tp, fh, RTM_NEWTFILTER); | 310 | err = tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER); |
313 | goto errout; | 311 | goto errout; |
314 | default: | 312 | default: |
315 | err = -EINVAL; | 313 | err = -EINVAL; |
@@ -325,7 +323,7 @@ replay: | |||
325 | *back = tp; | 323 | *back = tp; |
326 | spin_unlock_bh(root_lock); | 324 | spin_unlock_bh(root_lock); |
327 | } | 325 | } |
328 | tfilter_notify(skb, n, tp, fh, RTM_NEWTFILTER); | 326 | tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER); |
329 | } else { | 327 | } else { |
330 | if (tp_created) | 328 | if (tp_created) |
331 | tcf_destroy(tp); | 329 | tcf_destroy(tp); |
@@ -371,8 +369,9 @@ nla_put_failure: | |||
371 | return -1; | 369 | return -1; |
372 | } | 370 | } |
373 | 371 | ||
374 | static int tfilter_notify(struct sk_buff *oskb, struct nlmsghdr *n, | 372 | static int tfilter_notify(struct net *net, struct sk_buff *oskb, |
375 | struct tcf_proto *tp, unsigned long fh, int event) | 373 | struct nlmsghdr *n, struct tcf_proto *tp, |
374 | unsigned long fh, int event) | ||
376 | { | 375 | { |
377 | struct sk_buff *skb; | 376 | struct sk_buff *skb; |
378 | u32 pid = oskb ? NETLINK_CB(oskb).pid : 0; | 377 | u32 pid = oskb ? NETLINK_CB(oskb).pid : 0; |
@@ -386,7 +385,7 @@ static int tfilter_notify(struct sk_buff *oskb, struct nlmsghdr *n, | |||
386 | return -EINVAL; | 385 | return -EINVAL; |
387 | } | 386 | } |
388 | 387 | ||
389 | return rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, | 388 | return rtnetlink_send(skb, net, pid, RTNLGRP_TC, |
390 | n->nlmsg_flags & NLM_F_ECHO); | 389 | n->nlmsg_flags & NLM_F_ECHO); |
391 | } | 390 | } |
392 | 391 | ||
@@ -419,12 +418,9 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) | |||
419 | const struct Qdisc_class_ops *cops; | 418 | const struct Qdisc_class_ops *cops; |
420 | struct tcf_dump_args arg; | 419 | struct tcf_dump_args arg; |
421 | 420 | ||
422 | if (!net_eq(net, &init_net)) | ||
423 | return 0; | ||
424 | |||
425 | if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm))) | 421 | if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm))) |
426 | return skb->len; | 422 | return skb->len; |
427 | if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) | 423 | if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL) |
428 | return skb->len; | 424 | return skb->len; |
429 | 425 | ||
430 | if (!tcm->tcm_parent) | 426 | if (!tcm->tcm_parent) |
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 17c5dfc67320..593eac056e8d 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c | |||
@@ -773,10 +773,10 @@ static int __init init_u32(void) | |||
773 | printk(" Performance counters on\n"); | 773 | printk(" Performance counters on\n"); |
774 | #endif | 774 | #endif |
775 | #ifdef CONFIG_NET_CLS_IND | 775 | #ifdef CONFIG_NET_CLS_IND |
776 | printk(" input device check on \n"); | 776 | printk(" input device check on\n"); |
777 | #endif | 777 | #endif |
778 | #ifdef CONFIG_NET_CLS_ACT | 778 | #ifdef CONFIG_NET_CLS_ACT |
779 | printk(" Actions configured \n"); | 779 | printk(" Actions configured\n"); |
780 | #endif | 780 | #endif |
781 | return register_tcf_proto_ops(&cls_u32_ops); | 781 | return register_tcf_proto_ops(&cls_u32_ops); |
782 | } | 782 | } |
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 145268ca57cf..9839b26674f4 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -35,10 +35,12 @@ | |||
35 | #include <net/netlink.h> | 35 | #include <net/netlink.h> |
36 | #include <net/pkt_sched.h> | 36 | #include <net/pkt_sched.h> |
37 | 37 | ||
38 | static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n, u32 clid, | 38 | static int qdisc_notify(struct net *net, struct sk_buff *oskb, |
39 | struct nlmsghdr *n, u32 clid, | ||
39 | struct Qdisc *old, struct Qdisc *new); | 40 | struct Qdisc *old, struct Qdisc *new); |
40 | static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n, | 41 | static int tclass_notify(struct net *net, struct sk_buff *oskb, |
41 | struct Qdisc *q, unsigned long cl, int event); | 42 | struct nlmsghdr *n, struct Qdisc *q, |
43 | unsigned long cl, int event); | ||
42 | 44 | ||
43 | /* | 45 | /* |
44 | 46 | ||
@@ -639,11 +641,12 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n) | |||
639 | } | 641 | } |
640 | EXPORT_SYMBOL(qdisc_tree_decrease_qlen); | 642 | EXPORT_SYMBOL(qdisc_tree_decrease_qlen); |
641 | 643 | ||
642 | static void notify_and_destroy(struct sk_buff *skb, struct nlmsghdr *n, u32 clid, | 644 | static void notify_and_destroy(struct net *net, struct sk_buff *skb, |
645 | struct nlmsghdr *n, u32 clid, | ||
643 | struct Qdisc *old, struct Qdisc *new) | 646 | struct Qdisc *old, struct Qdisc *new) |
644 | { | 647 | { |
645 | if (new || old) | 648 | if (new || old) |
646 | qdisc_notify(skb, n, clid, old, new); | 649 | qdisc_notify(net, skb, n, clid, old, new); |
647 | 650 | ||
648 | if (old) | 651 | if (old) |
649 | qdisc_destroy(old); | 652 | qdisc_destroy(old); |
@@ -663,6 +666,7 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, | |||
663 | struct Qdisc *new, struct Qdisc *old) | 666 | struct Qdisc *new, struct Qdisc *old) |
664 | { | 667 | { |
665 | struct Qdisc *q = old; | 668 | struct Qdisc *q = old; |
669 | struct net *net = dev_net(dev); | ||
666 | int err = 0; | 670 | int err = 0; |
667 | 671 | ||
668 | if (parent == NULL) { | 672 | if (parent == NULL) { |
@@ -699,12 +703,13 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, | |||
699 | } | 703 | } |
700 | 704 | ||
701 | if (!ingress) { | 705 | if (!ingress) { |
702 | notify_and_destroy(skb, n, classid, dev->qdisc, new); | 706 | notify_and_destroy(net, skb, n, classid, |
707 | dev->qdisc, new); | ||
703 | if (new && !new->ops->attach) | 708 | if (new && !new->ops->attach) |
704 | atomic_inc(&new->refcnt); | 709 | atomic_inc(&new->refcnt); |
705 | dev->qdisc = new ? : &noop_qdisc; | 710 | dev->qdisc = new ? : &noop_qdisc; |
706 | } else { | 711 | } else { |
707 | notify_and_destroy(skb, n, classid, old, new); | 712 | notify_and_destroy(net, skb, n, classid, old, new); |
708 | } | 713 | } |
709 | 714 | ||
710 | if (dev->flags & IFF_UP) | 715 | if (dev->flags & IFF_UP) |
@@ -722,7 +727,7 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, | |||
722 | err = -ENOENT; | 727 | err = -ENOENT; |
723 | } | 728 | } |
724 | if (!err) | 729 | if (!err) |
725 | notify_and_destroy(skb, n, classid, old, new); | 730 | notify_and_destroy(net, skb, n, classid, old, new); |
726 | } | 731 | } |
727 | return err; | 732 | return err; |
728 | } | 733 | } |
@@ -948,10 +953,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
948 | struct Qdisc *p = NULL; | 953 | struct Qdisc *p = NULL; |
949 | int err; | 954 | int err; |
950 | 955 | ||
951 | if (!net_eq(net, &init_net)) | 956 | if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL) |
952 | return -EINVAL; | ||
953 | |||
954 | if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) | ||
955 | return -ENODEV; | 957 | return -ENODEV; |
956 | 958 | ||
957 | err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); | 959 | err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); |
@@ -991,7 +993,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
991 | if ((err = qdisc_graft(dev, p, skb, n, clid, NULL, q)) != 0) | 993 | if ((err = qdisc_graft(dev, p, skb, n, clid, NULL, q)) != 0) |
992 | return err; | 994 | return err; |
993 | } else { | 995 | } else { |
994 | qdisc_notify(skb, n, clid, NULL, q); | 996 | qdisc_notify(net, skb, n, clid, NULL, q); |
995 | } | 997 | } |
996 | return 0; | 998 | return 0; |
997 | } | 999 | } |
@@ -1010,16 +1012,13 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
1010 | struct Qdisc *q, *p; | 1012 | struct Qdisc *q, *p; |
1011 | int err; | 1013 | int err; |
1012 | 1014 | ||
1013 | if (!net_eq(net, &init_net)) | ||
1014 | return -EINVAL; | ||
1015 | |||
1016 | replay: | 1015 | replay: |
1017 | /* Reinit, just in case something touches this. */ | 1016 | /* Reinit, just in case something touches this. */ |
1018 | tcm = NLMSG_DATA(n); | 1017 | tcm = NLMSG_DATA(n); |
1019 | clid = tcm->tcm_parent; | 1018 | clid = tcm->tcm_parent; |
1020 | q = p = NULL; | 1019 | q = p = NULL; |
1021 | 1020 | ||
1022 | if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) | 1021 | if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL) |
1023 | return -ENODEV; | 1022 | return -ENODEV; |
1024 | 1023 | ||
1025 | err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); | 1024 | err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); |
@@ -1106,7 +1105,7 @@ replay: | |||
1106 | return -EINVAL; | 1105 | return -EINVAL; |
1107 | err = qdisc_change(q, tca); | 1106 | err = qdisc_change(q, tca); |
1108 | if (err == 0) | 1107 | if (err == 0) |
1109 | qdisc_notify(skb, n, clid, NULL, q); | 1108 | qdisc_notify(net, skb, n, clid, NULL, q); |
1110 | return err; | 1109 | return err; |
1111 | 1110 | ||
1112 | create_n_graft: | 1111 | create_n_graft: |
@@ -1196,8 +1195,9 @@ nla_put_failure: | |||
1196 | return -1; | 1195 | return -1; |
1197 | } | 1196 | } |
1198 | 1197 | ||
1199 | static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n, | 1198 | static int qdisc_notify(struct net *net, struct sk_buff *oskb, |
1200 | u32 clid, struct Qdisc *old, struct Qdisc *new) | 1199 | struct nlmsghdr *n, u32 clid, |
1200 | struct Qdisc *old, struct Qdisc *new) | ||
1201 | { | 1201 | { |
1202 | struct sk_buff *skb; | 1202 | struct sk_buff *skb; |
1203 | u32 pid = oskb ? NETLINK_CB(oskb).pid : 0; | 1203 | u32 pid = oskb ? NETLINK_CB(oskb).pid : 0; |
@@ -1216,7 +1216,7 @@ static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n, | |||
1216 | } | 1216 | } |
1217 | 1217 | ||
1218 | if (skb->len) | 1218 | if (skb->len) |
1219 | return rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO); | 1219 | return rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO); |
1220 | 1220 | ||
1221 | err_out: | 1221 | err_out: |
1222 | kfree_skb(skb); | 1222 | kfree_skb(skb); |
@@ -1275,15 +1275,12 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb) | |||
1275 | int s_idx, s_q_idx; | 1275 | int s_idx, s_q_idx; |
1276 | struct net_device *dev; | 1276 | struct net_device *dev; |
1277 | 1277 | ||
1278 | if (!net_eq(net, &init_net)) | ||
1279 | return 0; | ||
1280 | |||
1281 | s_idx = cb->args[0]; | 1278 | s_idx = cb->args[0]; |
1282 | s_q_idx = q_idx = cb->args[1]; | 1279 | s_q_idx = q_idx = cb->args[1]; |
1283 | 1280 | ||
1284 | rcu_read_lock(); | 1281 | rcu_read_lock(); |
1285 | idx = 0; | 1282 | idx = 0; |
1286 | for_each_netdev_rcu(&init_net, dev) { | 1283 | for_each_netdev_rcu(net, dev) { |
1287 | struct netdev_queue *dev_queue; | 1284 | struct netdev_queue *dev_queue; |
1288 | 1285 | ||
1289 | if (idx < s_idx) | 1286 | if (idx < s_idx) |
@@ -1335,10 +1332,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
1335 | u32 qid = TC_H_MAJ(clid); | 1332 | u32 qid = TC_H_MAJ(clid); |
1336 | int err; | 1333 | int err; |
1337 | 1334 | ||
1338 | if (!net_eq(net, &init_net)) | 1335 | if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL) |
1339 | return -EINVAL; | ||
1340 | |||
1341 | if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) | ||
1342 | return -ENODEV; | 1336 | return -ENODEV; |
1343 | 1337 | ||
1344 | err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); | 1338 | err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); |
@@ -1419,10 +1413,10 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
1419 | if (cops->delete) | 1413 | if (cops->delete) |
1420 | err = cops->delete(q, cl); | 1414 | err = cops->delete(q, cl); |
1421 | if (err == 0) | 1415 | if (err == 0) |
1422 | tclass_notify(skb, n, q, cl, RTM_DELTCLASS); | 1416 | tclass_notify(net, skb, n, q, cl, RTM_DELTCLASS); |
1423 | goto out; | 1417 | goto out; |
1424 | case RTM_GETTCLASS: | 1418 | case RTM_GETTCLASS: |
1425 | err = tclass_notify(skb, n, q, cl, RTM_NEWTCLASS); | 1419 | err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS); |
1426 | goto out; | 1420 | goto out; |
1427 | default: | 1421 | default: |
1428 | err = -EINVAL; | 1422 | err = -EINVAL; |
@@ -1435,7 +1429,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
1435 | if (cops->change) | 1429 | if (cops->change) |
1436 | err = cops->change(q, clid, pid, tca, &new_cl); | 1430 | err = cops->change(q, clid, pid, tca, &new_cl); |
1437 | if (err == 0) | 1431 | if (err == 0) |
1438 | tclass_notify(skb, n, q, new_cl, RTM_NEWTCLASS); | 1432 | tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS); |
1439 | 1433 | ||
1440 | out: | 1434 | out: |
1441 | if (cl) | 1435 | if (cl) |
@@ -1487,8 +1481,9 @@ nla_put_failure: | |||
1487 | return -1; | 1481 | return -1; |
1488 | } | 1482 | } |
1489 | 1483 | ||
1490 | static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n, | 1484 | static int tclass_notify(struct net *net, struct sk_buff *oskb, |
1491 | struct Qdisc *q, unsigned long cl, int event) | 1485 | struct nlmsghdr *n, struct Qdisc *q, |
1486 | unsigned long cl, int event) | ||
1492 | { | 1487 | { |
1493 | struct sk_buff *skb; | 1488 | struct sk_buff *skb; |
1494 | u32 pid = oskb ? NETLINK_CB(oskb).pid : 0; | 1489 | u32 pid = oskb ? NETLINK_CB(oskb).pid : 0; |
@@ -1502,7 +1497,7 @@ static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n, | |||
1502 | return -EINVAL; | 1497 | return -EINVAL; |
1503 | } | 1498 | } |
1504 | 1499 | ||
1505 | return rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO); | 1500 | return rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO); |
1506 | } | 1501 | } |
1507 | 1502 | ||
1508 | struct qdisc_dump_args | 1503 | struct qdisc_dump_args |
@@ -1577,12 +1572,9 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) | |||
1577 | struct net_device *dev; | 1572 | struct net_device *dev; |
1578 | int t, s_t; | 1573 | int t, s_t; |
1579 | 1574 | ||
1580 | if (!net_eq(net, &init_net)) | ||
1581 | return 0; | ||
1582 | |||
1583 | if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm))) | 1575 | if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm))) |
1584 | return 0; | 1576 | return 0; |
1585 | if ((dev = dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) | 1577 | if ((dev = dev_get_by_index(net, tcm->tcm_ifindex)) == NULL) |
1586 | return 0; | 1578 | return 0; |
1587 | 1579 | ||
1588 | s_t = cb->args[0]; | 1580 | s_t = cb->args[0]; |
@@ -1692,7 +1684,7 @@ static int psched_show(struct seq_file *seq, void *v) | |||
1692 | 1684 | ||
1693 | static int psched_open(struct inode *inode, struct file *file) | 1685 | static int psched_open(struct inode *inode, struct file *file) |
1694 | { | 1686 | { |
1695 | return single_open(file, psched_show, PDE(inode)->data); | 1687 | return single_open(file, psched_show, NULL); |
1696 | } | 1688 | } |
1697 | 1689 | ||
1698 | static const struct file_operations psched_fops = { | 1690 | static const struct file_operations psched_fops = { |
@@ -1702,15 +1694,53 @@ static const struct file_operations psched_fops = { | |||
1702 | .llseek = seq_lseek, | 1694 | .llseek = seq_lseek, |
1703 | .release = single_release, | 1695 | .release = single_release, |
1704 | }; | 1696 | }; |
1697 | |||
1698 | static int __net_init psched_net_init(struct net *net) | ||
1699 | { | ||
1700 | struct proc_dir_entry *e; | ||
1701 | |||
1702 | e = proc_net_fops_create(net, "psched", 0, &psched_fops); | ||
1703 | if (e == NULL) | ||
1704 | return -ENOMEM; | ||
1705 | |||
1706 | return 0; | ||
1707 | } | ||
1708 | |||
1709 | static void __net_exit psched_net_exit(struct net *net) | ||
1710 | { | ||
1711 | proc_net_remove(net, "psched"); | ||
1712 | } | ||
1713 | #else | ||
1714 | static int __net_init psched_net_init(struct net *net) | ||
1715 | { | ||
1716 | return 0; | ||
1717 | } | ||
1718 | |||
1719 | static void __net_exit psched_net_exit(struct net *net) | ||
1720 | { | ||
1721 | } | ||
1705 | #endif | 1722 | #endif |
1706 | 1723 | ||
1724 | static struct pernet_operations psched_net_ops = { | ||
1725 | .init = psched_net_init, | ||
1726 | .exit = psched_net_exit, | ||
1727 | }; | ||
1728 | |||
1707 | static int __init pktsched_init(void) | 1729 | static int __init pktsched_init(void) |
1708 | { | 1730 | { |
1731 | int err; | ||
1732 | |||
1733 | err = register_pernet_subsys(&psched_net_ops); | ||
1734 | if (err) { | ||
1735 | printk(KERN_ERR "pktsched_init: " | ||
1736 | "cannot initialize per netns operations\n"); | ||
1737 | return err; | ||
1738 | } | ||
1739 | |||
1709 | register_qdisc(&pfifo_qdisc_ops); | 1740 | register_qdisc(&pfifo_qdisc_ops); |
1710 | register_qdisc(&bfifo_qdisc_ops); | 1741 | register_qdisc(&bfifo_qdisc_ops); |
1711 | register_qdisc(&pfifo_head_drop_qdisc_ops); | 1742 | register_qdisc(&pfifo_head_drop_qdisc_ops); |
1712 | register_qdisc(&mq_qdisc_ops); | 1743 | register_qdisc(&mq_qdisc_ops); |
1713 | proc_net_fops_create(&init_net, "psched", 0, &psched_fops); | ||
1714 | 1744 | ||
1715 | rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL); | 1745 | rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL); |
1716 | rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL); | 1746 | rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL); |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index ff4dd53eeff0..aeddabfb8e4e 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -529,7 +529,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, | |||
529 | unsigned int size; | 529 | unsigned int size; |
530 | int err = -ENOBUFS; | 530 | int err = -ENOBUFS; |
531 | 531 | ||
532 | /* ensure that the Qdisc and the private data are 32-byte aligned */ | 532 | /* ensure that the Qdisc and the private data are 64-byte aligned */ |
533 | size = QDISC_ALIGN(sizeof(*sch)); | 533 | size = QDISC_ALIGN(sizeof(*sch)); |
534 | size += ops->priv_size + (QDISC_ALIGNTO - 1); | 534 | size += ops->priv_size + (QDISC_ALIGNTO - 1); |
535 | 535 | ||
@@ -591,6 +591,13 @@ void qdisc_reset(struct Qdisc *qdisc) | |||
591 | } | 591 | } |
592 | EXPORT_SYMBOL(qdisc_reset); | 592 | EXPORT_SYMBOL(qdisc_reset); |
593 | 593 | ||
594 | static void qdisc_rcu_free(struct rcu_head *head) | ||
595 | { | ||
596 | struct Qdisc *qdisc = container_of(head, struct Qdisc, rcu_head); | ||
597 | |||
598 | kfree((char *) qdisc - qdisc->padded); | ||
599 | } | ||
600 | |||
594 | void qdisc_destroy(struct Qdisc *qdisc) | 601 | void qdisc_destroy(struct Qdisc *qdisc) |
595 | { | 602 | { |
596 | const struct Qdisc_ops *ops = qdisc->ops; | 603 | const struct Qdisc_ops *ops = qdisc->ops; |
@@ -614,7 +621,11 @@ void qdisc_destroy(struct Qdisc *qdisc) | |||
614 | dev_put(qdisc_dev(qdisc)); | 621 | dev_put(qdisc_dev(qdisc)); |
615 | 622 | ||
616 | kfree_skb(qdisc->gso_skb); | 623 | kfree_skb(qdisc->gso_skb); |
617 | kfree((char *) qdisc - qdisc->padded); | 624 | /* |
625 | * gen_estimator est_timer() might access qdisc->q.lock, | ||
626 | * wait a RCU grace period before freeing qdisc. | ||
627 | */ | ||
628 | call_rcu(&qdisc->rcu_head, qdisc_rcu_free); | ||
618 | } | 629 | } |
619 | EXPORT_SYMBOL(qdisc_destroy); | 630 | EXPORT_SYMBOL(qdisc_destroy); |
620 | 631 | ||
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 9fb5d37c37ad..14db5689fb89 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c | |||
@@ -277,20 +277,7 @@ static struct dst_entry *sctp_v6_get_dst(struct sctp_association *asoc, | |||
277 | static inline int sctp_v6_addr_match_len(union sctp_addr *s1, | 277 | static inline int sctp_v6_addr_match_len(union sctp_addr *s1, |
278 | union sctp_addr *s2) | 278 | union sctp_addr *s2) |
279 | { | 279 | { |
280 | struct in6_addr *a1 = &s1->v6.sin6_addr; | 280 | return ipv6_addr_diff(&s1->v6.sin6_addr, &s2->v6.sin6_addr); |
281 | struct in6_addr *a2 = &s2->v6.sin6_addr; | ||
282 | int i, j; | ||
283 | |||
284 | for (i = 0; i < 4 ; i++) { | ||
285 | __be32 a1xora2; | ||
286 | |||
287 | a1xora2 = a1->s6_addr32[i] ^ a2->s6_addr32[i]; | ||
288 | |||
289 | if ((j = fls(ntohl(a1xora2)))) | ||
290 | return (i * 32 + 32 - j); | ||
291 | } | ||
292 | |||
293 | return (i*32); | ||
294 | } | 281 | } |
295 | 282 | ||
296 | /* Fills in the source address(saddr) based on the destination address(daddr) | 283 | /* Fills in the source address(saddr) based on the destination address(daddr) |
@@ -372,13 +359,13 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist, | |||
372 | } | 359 | } |
373 | 360 | ||
374 | read_lock_bh(&in6_dev->lock); | 361 | read_lock_bh(&in6_dev->lock); |
375 | for (ifp = in6_dev->addr_list; ifp; ifp = ifp->if_next) { | 362 | list_for_each_entry(ifp, &in6_dev->addr_list, if_list) { |
376 | /* Add the address to the local list. */ | 363 | /* Add the address to the local list. */ |
377 | addr = t_new(struct sctp_sockaddr_entry, GFP_ATOMIC); | 364 | addr = t_new(struct sctp_sockaddr_entry, GFP_ATOMIC); |
378 | if (addr) { | 365 | if (addr) { |
379 | addr->a.v6.sin6_family = AF_INET6; | 366 | addr->a.v6.sin6_family = AF_INET6; |
380 | addr->a.v6.sin6_port = 0; | 367 | addr->a.v6.sin6_port = 0; |
381 | addr->a.v6.sin6_addr = ifp->addr; | 368 | ipv6_addr_copy(&addr->a.v6.sin6_addr, &ifp->addr); |
382 | addr->a.v6.sin6_scope_id = dev->ifindex; | 369 | addr->a.v6.sin6_scope_id = dev->ifindex; |
383 | addr->valid = 1; | 370 | addr->valid = 1; |
384 | INIT_LIST_HEAD(&addr->list); | 371 | INIT_LIST_HEAD(&addr->list); |
@@ -419,7 +406,7 @@ static void sctp_v6_from_sk(union sctp_addr *addr, struct sock *sk) | |||
419 | { | 406 | { |
420 | addr->v6.sin6_family = AF_INET6; | 407 | addr->v6.sin6_family = AF_INET6; |
421 | addr->v6.sin6_port = 0; | 408 | addr->v6.sin6_port = 0; |
422 | addr->v6.sin6_addr = inet6_sk(sk)->rcv_saddr; | 409 | ipv6_addr_copy(&addr->v6.sin6_addr, &inet6_sk(sk)->rcv_saddr); |
423 | } | 410 | } |
424 | 411 | ||
425 | /* Initialize sk->sk_rcv_saddr from sctp_addr. */ | 412 | /* Initialize sk->sk_rcv_saddr from sctp_addr. */ |
@@ -432,7 +419,7 @@ static void sctp_v6_to_sk_saddr(union sctp_addr *addr, struct sock *sk) | |||
432 | inet6_sk(sk)->rcv_saddr.s6_addr32[3] = | 419 | inet6_sk(sk)->rcv_saddr.s6_addr32[3] = |
433 | addr->v4.sin_addr.s_addr; | 420 | addr->v4.sin_addr.s_addr; |
434 | } else { | 421 | } else { |
435 | inet6_sk(sk)->rcv_saddr = addr->v6.sin6_addr; | 422 | ipv6_addr_copy(&inet6_sk(sk)->rcv_saddr, &addr->v6.sin6_addr); |
436 | } | 423 | } |
437 | } | 424 | } |
438 | 425 | ||
@@ -445,7 +432,7 @@ static void sctp_v6_to_sk_daddr(union sctp_addr *addr, struct sock *sk) | |||
445 | inet6_sk(sk)->daddr.s6_addr32[2] = htonl(0x0000ffff); | 432 | inet6_sk(sk)->daddr.s6_addr32[2] = htonl(0x0000ffff); |
446 | inet6_sk(sk)->daddr.s6_addr32[3] = addr->v4.sin_addr.s_addr; | 433 | inet6_sk(sk)->daddr.s6_addr32[3] = addr->v4.sin_addr.s_addr; |
447 | } else { | 434 | } else { |
448 | inet6_sk(sk)->daddr = addr->v6.sin6_addr; | 435 | ipv6_addr_copy(&inet6_sk(sk)->daddr, &addr->v6.sin6_addr); |
449 | } | 436 | } |
450 | } | 437 | } |
451 | 438 | ||
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 007e8baba089..c1941276f6e3 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -5482,7 +5482,6 @@ pp_found: | |||
5482 | */ | 5482 | */ |
5483 | int reuse = sk->sk_reuse; | 5483 | int reuse = sk->sk_reuse; |
5484 | struct sock *sk2; | 5484 | struct sock *sk2; |
5485 | struct hlist_node *node; | ||
5486 | 5485 | ||
5487 | SCTP_DEBUG_PRINTK("sctp_get_port() found a possible match\n"); | 5486 | SCTP_DEBUG_PRINTK("sctp_get_port() found a possible match\n"); |
5488 | if (pp->fastreuse && sk->sk_reuse && | 5487 | if (pp->fastreuse && sk->sk_reuse && |
diff --git a/net/socket.c b/net/socket.c index 5e8d0af3c0e7..35bc198bbf68 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -620,10 +620,9 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, | |||
620 | put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP, | 620 | put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP, |
621 | sizeof(tv), &tv); | 621 | sizeof(tv), &tv); |
622 | } else { | 622 | } else { |
623 | struct timespec ts; | 623 | skb_get_timestampns(skb, &ts[0]); |
624 | skb_get_timestampns(skb, &ts); | ||
625 | put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPNS, | 624 | put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPNS, |
626 | sizeof(ts), &ts); | 625 | sizeof(ts[0]), &ts[0]); |
627 | } | 626 | } |
628 | } | 627 | } |
629 | 628 | ||
diff --git a/net/sunrpc/auth_gss/gss_spkm3_token.c b/net/sunrpc/auth_gss/gss_spkm3_token.c index 3308157436d2..a99825d7caa0 100644 --- a/net/sunrpc/auth_gss/gss_spkm3_token.c +++ b/net/sunrpc/auth_gss/gss_spkm3_token.c | |||
@@ -223,7 +223,7 @@ spkm3_verify_mic_token(unsigned char **tokp, int *mic_hdrlen, unsigned char **ck | |||
223 | 223 | ||
224 | /* only support SPKM_MIC_TOK */ | 224 | /* only support SPKM_MIC_TOK */ |
225 | if((ptr[6] != 0x01) || (ptr[7] != 0x01)) { | 225 | if((ptr[6] != 0x01) || (ptr[7] != 0x01)) { |
226 | dprintk("RPC: ERROR unsupported SPKM3 token \n"); | 226 | dprintk("RPC: ERROR unsupported SPKM3 token\n"); |
227 | goto out; | 227 | goto out; |
228 | } | 228 | } |
229 | 229 | ||
diff --git a/net/sunrpc/bc_svc.c b/net/sunrpc/bc_svc.c index f0c05d3311c1..7dcfe0cc3500 100644 --- a/net/sunrpc/bc_svc.c +++ b/net/sunrpc/bc_svc.c | |||
@@ -60,7 +60,7 @@ int bc_send(struct rpc_rqst *req) | |||
60 | rpc_put_task(task); | 60 | rpc_put_task(task); |
61 | } | 61 | } |
62 | return ret; | 62 | return ret; |
63 | dprintk("RPC: bc_send ret= %d \n", ret); | 63 | dprintk("RPC: bc_send ret= %d\n", ret); |
64 | } | 64 | } |
65 | 65 | ||
66 | #endif /* CONFIG_NFS_V4_1 */ | 66 | #endif /* CONFIG_NFS_V4_1 */ |
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index a3bfd4064912..90a051912c03 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c | |||
@@ -558,10 +558,7 @@ static int tipc_bcbearer_send(struct sk_buff *buf, | |||
558 | struct tipc_bearer *unused1, | 558 | struct tipc_bearer *unused1, |
559 | struct tipc_media_addr *unused2) | 559 | struct tipc_media_addr *unused2) |
560 | { | 560 | { |
561 | static int send_count = 0; | ||
562 | |||
563 | int bp_index; | 561 | int bp_index; |
564 | int swap_time; | ||
565 | 562 | ||
566 | /* Prepare buffer for broadcasting (if first time trying to send it) */ | 563 | /* Prepare buffer for broadcasting (if first time trying to send it) */ |
567 | 564 | ||
@@ -575,11 +572,6 @@ static int tipc_bcbearer_send(struct sk_buff *buf, | |||
575 | msg_set_mc_netid(msg, tipc_net_id); | 572 | msg_set_mc_netid(msg, tipc_net_id); |
576 | } | 573 | } |
577 | 574 | ||
578 | /* Determine if bearer pairs should be swapped following this attempt */ | ||
579 | |||
580 | if ((swap_time = (++send_count >= 10))) | ||
581 | send_count = 0; | ||
582 | |||
583 | /* Send buffer over bearers until all targets reached */ | 575 | /* Send buffer over bearers until all targets reached */ |
584 | 576 | ||
585 | bcbearer->remains = tipc_cltr_bcast_nodes; | 577 | bcbearer->remains = tipc_cltr_bcast_nodes; |
@@ -595,21 +587,22 @@ static int tipc_bcbearer_send(struct sk_buff *buf, | |||
595 | if (bcbearer->remains_new.count == bcbearer->remains.count) | 587 | if (bcbearer->remains_new.count == bcbearer->remains.count) |
596 | continue; /* bearer pair doesn't add anything */ | 588 | continue; /* bearer pair doesn't add anything */ |
597 | 589 | ||
598 | if (!p->publ.blocked && | 590 | if (p->publ.blocked || |
599 | !p->media->send_msg(buf, &p->publ, &p->media->bcast_addr)) { | 591 | p->media->send_msg(buf, &p->publ, &p->media->bcast_addr)) { |
600 | if (swap_time && s && !s->publ.blocked) | 592 | /* unable to send on primary bearer */ |
601 | goto swap; | 593 | if (!s || s->publ.blocked || |
602 | else | 594 | s->media->send_msg(buf, &s->publ, |
603 | goto update; | 595 | &s->media->bcast_addr)) { |
596 | /* unable to send on either bearer */ | ||
597 | continue; | ||
598 | } | ||
599 | } | ||
600 | |||
601 | if (s) { | ||
602 | bcbearer->bpairs[bp_index].primary = s; | ||
603 | bcbearer->bpairs[bp_index].secondary = p; | ||
604 | } | 604 | } |
605 | 605 | ||
606 | if (!s || s->publ.blocked || | ||
607 | s->media->send_msg(buf, &s->publ, &s->media->bcast_addr)) | ||
608 | continue; /* unable to send using bearer pair */ | ||
609 | swap: | ||
610 | bcbearer->bpairs[bp_index].primary = s; | ||
611 | bcbearer->bpairs[bp_index].secondary = p; | ||
612 | update: | ||
613 | if (bcbearer->remains_new.count == 0) | 606 | if (bcbearer->remains_new.count == 0) |
614 | return 0; | 607 | return 0; |
615 | 608 | ||
diff --git a/net/tipc/core.c b/net/tipc/core.c index 52c571fedbe0..4e84c8431f32 100644 --- a/net/tipc/core.c +++ b/net/tipc/core.c | |||
@@ -49,7 +49,7 @@ | |||
49 | #include "config.h" | 49 | #include "config.h" |
50 | 50 | ||
51 | 51 | ||
52 | #define TIPC_MOD_VER "1.6.4" | 52 | #define TIPC_MOD_VER "2.0.0" |
53 | 53 | ||
54 | #ifndef CONFIG_TIPC_ZONES | 54 | #ifndef CONFIG_TIPC_ZONES |
55 | #define CONFIG_TIPC_ZONES 3 | 55 | #define CONFIG_TIPC_ZONES 3 |
diff --git a/net/tipc/link.c b/net/tipc/link.c index 1a7e4665af80..c76e82e5f982 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c | |||
@@ -877,7 +877,7 @@ static void link_state_event(struct link *l_ptr, unsigned event) | |||
877 | case TIMEOUT_EVT: | 877 | case TIMEOUT_EVT: |
878 | dbg_link("TIM "); | 878 | dbg_link("TIM "); |
879 | if (l_ptr->next_in_no != l_ptr->checkpoint) { | 879 | if (l_ptr->next_in_no != l_ptr->checkpoint) { |
880 | dbg_link("-> WW \n"); | 880 | dbg_link("-> WW\n"); |
881 | l_ptr->state = WORKING_WORKING; | 881 | l_ptr->state = WORKING_WORKING; |
882 | l_ptr->fsm_msg_cnt = 0; | 882 | l_ptr->fsm_msg_cnt = 0; |
883 | l_ptr->checkpoint = l_ptr->next_in_no; | 883 | l_ptr->checkpoint = l_ptr->next_in_no; |
@@ -934,7 +934,7 @@ static void link_state_event(struct link *l_ptr, unsigned event) | |||
934 | link_set_timer(l_ptr, cont_intv); | 934 | link_set_timer(l_ptr, cont_intv); |
935 | break; | 935 | break; |
936 | case RESET_MSG: | 936 | case RESET_MSG: |
937 | dbg_link("RES \n"); | 937 | dbg_link("RES\n"); |
938 | dbg_link(" -> RR\n"); | 938 | dbg_link(" -> RR\n"); |
939 | l_ptr->state = RESET_RESET; | 939 | l_ptr->state = RESET_RESET; |
940 | l_ptr->fsm_msg_cnt = 0; | 940 | l_ptr->fsm_msg_cnt = 0; |
@@ -947,7 +947,7 @@ static void link_state_event(struct link *l_ptr, unsigned event) | |||
947 | l_ptr->started = 1; | 947 | l_ptr->started = 1; |
948 | /* fall through */ | 948 | /* fall through */ |
949 | case TIMEOUT_EVT: | 949 | case TIMEOUT_EVT: |
950 | dbg_link("TIM \n"); | 950 | dbg_link("TIM\n"); |
951 | tipc_link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0); | 951 | tipc_link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0); |
952 | l_ptr->fsm_msg_cnt++; | 952 | l_ptr->fsm_msg_cnt++; |
953 | link_set_timer(l_ptr, cont_intv); | 953 | link_set_timer(l_ptr, cont_intv); |
@@ -1553,7 +1553,7 @@ u32 tipc_link_push_packet(struct link *l_ptr) | |||
1553 | 1553 | ||
1554 | /* Continue retransmission now, if there is anything: */ | 1554 | /* Continue retransmission now, if there is anything: */ |
1555 | 1555 | ||
1556 | if (r_q_size && buf && !skb_cloned(buf)) { | 1556 | if (r_q_size && buf) { |
1557 | msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); | 1557 | msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); |
1558 | msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); | 1558 | msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); |
1559 | if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { | 1559 | if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { |
@@ -1722,15 +1722,16 @@ void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf, | |||
1722 | dbg("Retransmitting %u in link %x\n", retransmits, l_ptr); | 1722 | dbg("Retransmitting %u in link %x\n", retransmits, l_ptr); |
1723 | 1723 | ||
1724 | if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) { | 1724 | if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) { |
1725 | if (!skb_cloned(buf)) { | 1725 | if (l_ptr->retransm_queue_size == 0) { |
1726 | msg_dbg(msg, ">NO_RETR->BCONG>"); | 1726 | msg_dbg(msg, ">NO_RETR->BCONG>"); |
1727 | dbg_print_link(l_ptr, " "); | 1727 | dbg_print_link(l_ptr, " "); |
1728 | l_ptr->retransm_queue_head = msg_seqno(msg); | 1728 | l_ptr->retransm_queue_head = msg_seqno(msg); |
1729 | l_ptr->retransm_queue_size = retransmits; | 1729 | l_ptr->retransm_queue_size = retransmits; |
1730 | return; | ||
1731 | } else { | 1730 | } else { |
1732 | /* Don't retransmit if driver already has the buffer */ | 1731 | err("Unexpected retransmit on link %s (qsize=%d)\n", |
1732 | l_ptr->name, l_ptr->retransm_queue_size); | ||
1733 | } | 1733 | } |
1734 | return; | ||
1734 | } else { | 1735 | } else { |
1735 | /* Detect repeated retransmit failures on uncongested bearer */ | 1736 | /* Detect repeated retransmit failures on uncongested bearer */ |
1736 | 1737 | ||
@@ -1745,7 +1746,7 @@ void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf, | |||
1745 | } | 1746 | } |
1746 | } | 1747 | } |
1747 | 1748 | ||
1748 | while (retransmits && (buf != l_ptr->next_out) && buf && !skb_cloned(buf)) { | 1749 | while (retransmits && (buf != l_ptr->next_out) && buf) { |
1749 | msg = buf_msg(buf); | 1750 | msg = buf_msg(buf); |
1750 | msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); | 1751 | msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); |
1751 | msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); | 1752 | msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); |
@@ -3294,7 +3295,7 @@ static void link_dump_rec_queue(struct link *l_ptr) | |||
3294 | info("buffer %x invalid\n", crs); | 3295 | info("buffer %x invalid\n", crs); |
3295 | return; | 3296 | return; |
3296 | } | 3297 | } |
3297 | msg_dbg(buf_msg(crs), "In rec queue: \n"); | 3298 | msg_dbg(buf_msg(crs), "In rec queue:\n"); |
3298 | crs = crs->next; | 3299 | crs = crs->next; |
3299 | } | 3300 | } |
3300 | } | 3301 | } |
diff --git a/net/tipc/net.c b/net/tipc/net.c index f25b1cdb64eb..d7cd1e064a80 100644 --- a/net/tipc/net.c +++ b/net/tipc/net.c | |||
@@ -116,7 +116,7 @@ | |||
116 | */ | 116 | */ |
117 | 117 | ||
118 | DEFINE_RWLOCK(tipc_net_lock); | 118 | DEFINE_RWLOCK(tipc_net_lock); |
119 | struct _zone *tipc_zones[256] = { NULL, }; | 119 | static struct _zone *tipc_zones[256] = { NULL, }; |
120 | struct network tipc_net = { tipc_zones }; | 120 | struct network tipc_net = { tipc_zones }; |
121 | 121 | ||
122 | struct tipc_node *tipc_net_select_remote_node(u32 addr, u32 ref) | 122 | struct tipc_node *tipc_net_select_remote_node(u32 addr, u32 ref) |
@@ -291,6 +291,6 @@ void tipc_net_stop(void) | |||
291 | tipc_bclink_stop(); | 291 | tipc_bclink_stop(); |
292 | net_stop(); | 292 | net_stop(); |
293 | write_unlock_bh(&tipc_net_lock); | 293 | write_unlock_bh(&tipc_net_lock); |
294 | info("Left network mode \n"); | 294 | info("Left network mode\n"); |
295 | } | 295 | } |
296 | 296 | ||
diff --git a/net/tipc/node.c b/net/tipc/node.c index 2c24e7d6d950..17cc394f424f 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c | |||
@@ -278,7 +278,7 @@ struct tipc_node *tipc_node_attach_link(struct link *l_ptr) | |||
278 | n_ptr->link_cnt++; | 278 | n_ptr->link_cnt++; |
279 | return n_ptr; | 279 | return n_ptr; |
280 | } | 280 | } |
281 | err("Attempt to establish second link on <%s> to %s \n", | 281 | err("Attempt to establish second link on <%s> to %s\n", |
282 | l_ptr->b_ptr->publ.name, | 282 | l_ptr->b_ptr->publ.name, |
283 | addr_string_fill(addr_string, l_ptr->addr)); | 283 | addr_string_fill(addr_string, l_ptr->addr)); |
284 | } | 284 | } |
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c index ff123e56114a..ab6eab4c45e2 100644 --- a/net/tipc/subscr.c +++ b/net/tipc/subscr.c | |||
@@ -274,7 +274,7 @@ static void subscr_cancel(struct tipc_subscr *s, | |||
274 | { | 274 | { |
275 | struct subscription *sub; | 275 | struct subscription *sub; |
276 | struct subscription *sub_temp; | 276 | struct subscription *sub_temp; |
277 | __u32 type, lower, upper; | 277 | __u32 type, lower, upper, timeout, filter; |
278 | int found = 0; | 278 | int found = 0; |
279 | 279 | ||
280 | /* Find first matching subscription, exit if not found */ | 280 | /* Find first matching subscription, exit if not found */ |
@@ -282,12 +282,18 @@ static void subscr_cancel(struct tipc_subscr *s, | |||
282 | type = ntohl(s->seq.type); | 282 | type = ntohl(s->seq.type); |
283 | lower = ntohl(s->seq.lower); | 283 | lower = ntohl(s->seq.lower); |
284 | upper = ntohl(s->seq.upper); | 284 | upper = ntohl(s->seq.upper); |
285 | timeout = ntohl(s->timeout); | ||
286 | filter = ntohl(s->filter) & ~TIPC_SUB_CANCEL; | ||
285 | 287 | ||
286 | list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list, | 288 | list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list, |
287 | subscription_list) { | 289 | subscription_list) { |
288 | if ((type == sub->seq.type) && | 290 | if ((type == sub->seq.type) && |
289 | (lower == sub->seq.lower) && | 291 | (lower == sub->seq.lower) && |
290 | (upper == sub->seq.upper)) { | 292 | (upper == sub->seq.upper) && |
293 | (timeout == sub->timeout) && | ||
294 | (filter == sub->filter) && | ||
295 | !memcmp(s->usr_handle,sub->evt.s.usr_handle, | ||
296 | sizeof(s->usr_handle)) ){ | ||
291 | found = 1; | 297 | found = 1; |
292 | break; | 298 | break; |
293 | } | 299 | } |
@@ -304,7 +310,7 @@ static void subscr_cancel(struct tipc_subscr *s, | |||
304 | k_term_timer(&sub->timer); | 310 | k_term_timer(&sub->timer); |
305 | spin_lock_bh(subscriber->lock); | 311 | spin_lock_bh(subscriber->lock); |
306 | } | 312 | } |
307 | dbg("Cancel: removing sub %u,%u,%u from subscriber %x list\n", | 313 | dbg("Cancel: removing sub %u,%u,%u from subscriber %p list\n", |
308 | sub->seq.type, sub->seq.lower, sub->seq.upper, subscriber); | 314 | sub->seq.type, sub->seq.lower, sub->seq.upper, subscriber); |
309 | subscr_del(sub); | 315 | subscr_del(sub); |
310 | } | 316 | } |
@@ -352,8 +358,7 @@ static struct subscription *subscr_subscribe(struct tipc_subscr *s, | |||
352 | sub->seq.upper = ntohl(s->seq.upper); | 358 | sub->seq.upper = ntohl(s->seq.upper); |
353 | sub->timeout = ntohl(s->timeout); | 359 | sub->timeout = ntohl(s->timeout); |
354 | sub->filter = ntohl(s->filter); | 360 | sub->filter = ntohl(s->filter); |
355 | if ((!(sub->filter & TIPC_SUB_PORTS) == | 361 | if ((sub->filter && (sub->filter != TIPC_SUB_PORTS)) || |
356 | !(sub->filter & TIPC_SUB_SERVICE)) || | ||
357 | (sub->seq.lower > sub->seq.upper)) { | 362 | (sub->seq.lower > sub->seq.upper)) { |
358 | warn("Subscription rejected, illegal request\n"); | 363 | warn("Subscription rejected, illegal request\n"); |
359 | kfree(sub); | 364 | kfree(sub); |
diff --git a/net/wimax/op-reset.c b/net/wimax/op-reset.c index 4dc82a54ba30..68bedf3e5443 100644 --- a/net/wimax/op-reset.c +++ b/net/wimax/op-reset.c | |||
@@ -110,7 +110,6 @@ int wimax_gnl_doit_reset(struct sk_buff *skb, struct genl_info *info) | |||
110 | { | 110 | { |
111 | int result, ifindex; | 111 | int result, ifindex; |
112 | struct wimax_dev *wimax_dev; | 112 | struct wimax_dev *wimax_dev; |
113 | struct device *dev; | ||
114 | 113 | ||
115 | d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info); | 114 | d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info); |
116 | result = -ENODEV; | 115 | result = -ENODEV; |
@@ -123,7 +122,6 @@ int wimax_gnl_doit_reset(struct sk_buff *skb, struct genl_info *info) | |||
123 | wimax_dev = wimax_dev_get_by_genl_info(info, ifindex); | 122 | wimax_dev = wimax_dev_get_by_genl_info(info, ifindex); |
124 | if (wimax_dev == NULL) | 123 | if (wimax_dev == NULL) |
125 | goto error_no_wimax_dev; | 124 | goto error_no_wimax_dev; |
126 | dev = wimax_dev_to_dev(wimax_dev); | ||
127 | /* Execute the operation and send the result back to user space */ | 125 | /* Execute the operation and send the result back to user space */ |
128 | result = wimax_reset(wimax_dev); | 126 | result = wimax_reset(wimax_dev); |
129 | dev_put(wimax_dev->net_dev); | 127 | dev_put(wimax_dev->net_dev); |
diff --git a/net/wimax/op-state-get.c b/net/wimax/op-state-get.c index 11ad3356eb56..aff8776e2d41 100644 --- a/net/wimax/op-state-get.c +++ b/net/wimax/op-state-get.c | |||
@@ -53,7 +53,6 @@ int wimax_gnl_doit_state_get(struct sk_buff *skb, struct genl_info *info) | |||
53 | { | 53 | { |
54 | int result, ifindex; | 54 | int result, ifindex; |
55 | struct wimax_dev *wimax_dev; | 55 | struct wimax_dev *wimax_dev; |
56 | struct device *dev; | ||
57 | 56 | ||
58 | d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info); | 57 | d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info); |
59 | result = -ENODEV; | 58 | result = -ENODEV; |
@@ -66,7 +65,6 @@ int wimax_gnl_doit_state_get(struct sk_buff *skb, struct genl_info *info) | |||
66 | wimax_dev = wimax_dev_get_by_genl_info(info, ifindex); | 65 | wimax_dev = wimax_dev_get_by_genl_info(info, ifindex); |
67 | if (wimax_dev == NULL) | 66 | if (wimax_dev == NULL) |
68 | goto error_no_wimax_dev; | 67 | goto error_no_wimax_dev; |
69 | dev = wimax_dev_to_dev(wimax_dev); | ||
70 | /* Execute the operation and send the result back to user space */ | 68 | /* Execute the operation and send the result back to user space */ |
71 | result = wimax_state_get(wimax_dev); | 69 | result = wimax_state_get(wimax_dev); |
72 | dev_put(wimax_dev->net_dev); | 70 | dev_put(wimax_dev->net_dev); |
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index 22139fa46115..4bb734a95f57 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c | |||
@@ -895,3 +895,16 @@ void cfg80211_action_tx_status(struct net_device *dev, u64 cookie, | |||
895 | nl80211_send_action_tx_status(rdev, dev, cookie, buf, len, ack, gfp); | 895 | nl80211_send_action_tx_status(rdev, dev, cookie, buf, len, ack, gfp); |
896 | } | 896 | } |
897 | EXPORT_SYMBOL(cfg80211_action_tx_status); | 897 | EXPORT_SYMBOL(cfg80211_action_tx_status); |
898 | |||
899 | void cfg80211_cqm_rssi_notify(struct net_device *dev, | ||
900 | enum nl80211_cqm_rssi_threshold_event rssi_event, | ||
901 | gfp_t gfp) | ||
902 | { | ||
903 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
904 | struct wiphy *wiphy = wdev->wiphy; | ||
905 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); | ||
906 | |||
907 | /* Indicate roaming trigger event to user space */ | ||
908 | nl80211_send_cqm_rssi_notify(rdev, dev, rssi_event, gfp); | ||
909 | } | ||
910 | EXPORT_SYMBOL(cfg80211_cqm_rssi_notify); | ||
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 030cf153bea2..596bf189549a 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -150,6 +150,7 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = { | |||
150 | .len = IEEE80211_MAX_DATA_LEN }, | 150 | .len = IEEE80211_MAX_DATA_LEN }, |
151 | [NL80211_ATTR_FRAME_MATCH] = { .type = NLA_BINARY, }, | 151 | [NL80211_ATTR_FRAME_MATCH] = { .type = NLA_BINARY, }, |
152 | [NL80211_ATTR_PS_STATE] = { .type = NLA_U32 }, | 152 | [NL80211_ATTR_PS_STATE] = { .type = NLA_U32 }, |
153 | [NL80211_ATTR_CQM] = { .type = NLA_NESTED, }, | ||
153 | }; | 154 | }; |
154 | 155 | ||
155 | /* policy for the attributes */ | 156 | /* policy for the attributes */ |
@@ -4779,6 +4780,84 @@ unlock_rtnl: | |||
4779 | return err; | 4780 | return err; |
4780 | } | 4781 | } |
4781 | 4782 | ||
4783 | static struct nla_policy | ||
4784 | nl80211_attr_cqm_policy[NL80211_ATTR_CQM_MAX + 1] __read_mostly = { | ||
4785 | [NL80211_ATTR_CQM_RSSI_THOLD] = { .type = NLA_U32 }, | ||
4786 | [NL80211_ATTR_CQM_RSSI_HYST] = { .type = NLA_U32 }, | ||
4787 | [NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT] = { .type = NLA_U32 }, | ||
4788 | }; | ||
4789 | |||
4790 | static int nl80211_set_cqm_rssi(struct genl_info *info, | ||
4791 | s32 threshold, u32 hysteresis) | ||
4792 | { | ||
4793 | struct cfg80211_registered_device *rdev; | ||
4794 | struct wireless_dev *wdev; | ||
4795 | struct net_device *dev; | ||
4796 | int err; | ||
4797 | |||
4798 | if (threshold > 0) | ||
4799 | return -EINVAL; | ||
4800 | |||
4801 | rtnl_lock(); | ||
4802 | |||
4803 | err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); | ||
4804 | if (err) | ||
4805 | goto unlock_rdev; | ||
4806 | |||
4807 | wdev = dev->ieee80211_ptr; | ||
4808 | |||
4809 | if (!rdev->ops->set_cqm_rssi_config) { | ||
4810 | err = -EOPNOTSUPP; | ||
4811 | goto unlock_rdev; | ||
4812 | } | ||
4813 | |||
4814 | if (wdev->iftype != NL80211_IFTYPE_STATION) { | ||
4815 | err = -EOPNOTSUPP; | ||
4816 | goto unlock_rdev; | ||
4817 | } | ||
4818 | |||
4819 | err = rdev->ops->set_cqm_rssi_config(wdev->wiphy, dev, | ||
4820 | threshold, hysteresis); | ||
4821 | |||
4822 | unlock_rdev: | ||
4823 | cfg80211_unlock_rdev(rdev); | ||
4824 | dev_put(dev); | ||
4825 | rtnl_unlock(); | ||
4826 | |||
4827 | return err; | ||
4828 | } | ||
4829 | |||
4830 | static int nl80211_set_cqm(struct sk_buff *skb, struct genl_info *info) | ||
4831 | { | ||
4832 | struct nlattr *attrs[NL80211_ATTR_CQM_MAX + 1]; | ||
4833 | struct nlattr *cqm; | ||
4834 | int err; | ||
4835 | |||
4836 | cqm = info->attrs[NL80211_ATTR_CQM]; | ||
4837 | if (!cqm) { | ||
4838 | err = -EINVAL; | ||
4839 | goto out; | ||
4840 | } | ||
4841 | |||
4842 | err = nla_parse_nested(attrs, NL80211_ATTR_CQM_MAX, cqm, | ||
4843 | nl80211_attr_cqm_policy); | ||
4844 | if (err) | ||
4845 | goto out; | ||
4846 | |||
4847 | if (attrs[NL80211_ATTR_CQM_RSSI_THOLD] && | ||
4848 | attrs[NL80211_ATTR_CQM_RSSI_HYST]) { | ||
4849 | s32 threshold; | ||
4850 | u32 hysteresis; | ||
4851 | threshold = nla_get_u32(attrs[NL80211_ATTR_CQM_RSSI_THOLD]); | ||
4852 | hysteresis = nla_get_u32(attrs[NL80211_ATTR_CQM_RSSI_HYST]); | ||
4853 | err = nl80211_set_cqm_rssi(info, threshold, hysteresis); | ||
4854 | } else | ||
4855 | err = -EINVAL; | ||
4856 | |||
4857 | out: | ||
4858 | return err; | ||
4859 | } | ||
4860 | |||
4782 | static struct genl_ops nl80211_ops[] = { | 4861 | static struct genl_ops nl80211_ops[] = { |
4783 | { | 4862 | { |
4784 | .cmd = NL80211_CMD_GET_WIPHY, | 4863 | .cmd = NL80211_CMD_GET_WIPHY, |
@@ -5083,6 +5162,12 @@ static struct genl_ops nl80211_ops[] = { | |||
5083 | .policy = nl80211_policy, | 5162 | .policy = nl80211_policy, |
5084 | /* can be retrieved by unprivileged users */ | 5163 | /* can be retrieved by unprivileged users */ |
5085 | }, | 5164 | }, |
5165 | { | ||
5166 | .cmd = NL80211_CMD_SET_CQM, | ||
5167 | .doit = nl80211_set_cqm, | ||
5168 | .policy = nl80211_policy, | ||
5169 | .flags = GENL_ADMIN_PERM, | ||
5170 | }, | ||
5086 | }; | 5171 | }; |
5087 | 5172 | ||
5088 | static struct genl_multicast_group nl80211_mlme_mcgrp = { | 5173 | static struct genl_multicast_group nl80211_mlme_mcgrp = { |
@@ -5833,6 +5918,52 @@ void nl80211_send_action_tx_status(struct cfg80211_registered_device *rdev, | |||
5833 | nlmsg_free(msg); | 5918 | nlmsg_free(msg); |
5834 | } | 5919 | } |
5835 | 5920 | ||
5921 | void | ||
5922 | nl80211_send_cqm_rssi_notify(struct cfg80211_registered_device *rdev, | ||
5923 | struct net_device *netdev, | ||
5924 | enum nl80211_cqm_rssi_threshold_event rssi_event, | ||
5925 | gfp_t gfp) | ||
5926 | { | ||
5927 | struct sk_buff *msg; | ||
5928 | struct nlattr *pinfoattr; | ||
5929 | void *hdr; | ||
5930 | |||
5931 | msg = nlmsg_new(NLMSG_GOODSIZE, gfp); | ||
5932 | if (!msg) | ||
5933 | return; | ||
5934 | |||
5935 | hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_NOTIFY_CQM); | ||
5936 | if (!hdr) { | ||
5937 | nlmsg_free(msg); | ||
5938 | return; | ||
5939 | } | ||
5940 | |||
5941 | NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); | ||
5942 | NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); | ||
5943 | |||
5944 | pinfoattr = nla_nest_start(msg, NL80211_ATTR_CQM); | ||
5945 | if (!pinfoattr) | ||
5946 | goto nla_put_failure; | ||
5947 | |||
5948 | NLA_PUT_U32(msg, NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT, | ||
5949 | rssi_event); | ||
5950 | |||
5951 | nla_nest_end(msg, pinfoattr); | ||
5952 | |||
5953 | if (genlmsg_end(msg, hdr) < 0) { | ||
5954 | nlmsg_free(msg); | ||
5955 | return; | ||
5956 | } | ||
5957 | |||
5958 | genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, | ||
5959 | nl80211_mlme_mcgrp.id, gfp); | ||
5960 | return; | ||
5961 | |||
5962 | nla_put_failure: | ||
5963 | genlmsg_cancel(msg, hdr); | ||
5964 | nlmsg_free(msg); | ||
5965 | } | ||
5966 | |||
5836 | static int nl80211_netlink_notify(struct notifier_block * nb, | 5967 | static int nl80211_netlink_notify(struct notifier_block * nb, |
5837 | unsigned long state, | 5968 | unsigned long state, |
5838 | void *_notify) | 5969 | void *_notify) |
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h index 4ca511102c6c..2ad7fbc7d9f1 100644 --- a/net/wireless/nl80211.h +++ b/net/wireless/nl80211.h | |||
@@ -82,4 +82,10 @@ void nl80211_send_action_tx_status(struct cfg80211_registered_device *rdev, | |||
82 | const u8 *buf, size_t len, bool ack, | 82 | const u8 *buf, size_t len, bool ack, |
83 | gfp_t gfp); | 83 | gfp_t gfp); |
84 | 84 | ||
85 | void | ||
86 | nl80211_send_cqm_rssi_notify(struct cfg80211_registered_device *rdev, | ||
87 | struct net_device *netdev, | ||
88 | enum nl80211_cqm_rssi_threshold_event rssi_event, | ||
89 | gfp_t gfp); | ||
90 | |||
85 | #endif /* __NET_WIRELESS_NL80211_H */ | 91 | #endif /* __NET_WIRELESS_NL80211_H */ |
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c index 4f5a47091fde..0ef17bc42bac 100644 --- a/net/wireless/wext-core.c +++ b/net/wireless/wext-core.c | |||
@@ -29,226 +29,226 @@ typedef int (*wext_ioctl_func)(struct net_device *, struct iwreq *, | |||
29 | * know about. | 29 | * know about. |
30 | */ | 30 | */ |
31 | static const struct iw_ioctl_description standard_ioctl[] = { | 31 | static const struct iw_ioctl_description standard_ioctl[] = { |
32 | [SIOCSIWCOMMIT - SIOCIWFIRST] = { | 32 | [IW_IOCTL_IDX(SIOCSIWCOMMIT)] = { |
33 | .header_type = IW_HEADER_TYPE_NULL, | 33 | .header_type = IW_HEADER_TYPE_NULL, |
34 | }, | 34 | }, |
35 | [SIOCGIWNAME - SIOCIWFIRST] = { | 35 | [IW_IOCTL_IDX(SIOCGIWNAME)] = { |
36 | .header_type = IW_HEADER_TYPE_CHAR, | 36 | .header_type = IW_HEADER_TYPE_CHAR, |
37 | .flags = IW_DESCR_FLAG_DUMP, | 37 | .flags = IW_DESCR_FLAG_DUMP, |
38 | }, | 38 | }, |
39 | [SIOCSIWNWID - SIOCIWFIRST] = { | 39 | [IW_IOCTL_IDX(SIOCSIWNWID)] = { |
40 | .header_type = IW_HEADER_TYPE_PARAM, | 40 | .header_type = IW_HEADER_TYPE_PARAM, |
41 | .flags = IW_DESCR_FLAG_EVENT, | 41 | .flags = IW_DESCR_FLAG_EVENT, |
42 | }, | 42 | }, |
43 | [SIOCGIWNWID - SIOCIWFIRST] = { | 43 | [IW_IOCTL_IDX(SIOCGIWNWID)] = { |
44 | .header_type = IW_HEADER_TYPE_PARAM, | 44 | .header_type = IW_HEADER_TYPE_PARAM, |
45 | .flags = IW_DESCR_FLAG_DUMP, | 45 | .flags = IW_DESCR_FLAG_DUMP, |
46 | }, | 46 | }, |
47 | [SIOCSIWFREQ - SIOCIWFIRST] = { | 47 | [IW_IOCTL_IDX(SIOCSIWFREQ)] = { |
48 | .header_type = IW_HEADER_TYPE_FREQ, | 48 | .header_type = IW_HEADER_TYPE_FREQ, |
49 | .flags = IW_DESCR_FLAG_EVENT, | 49 | .flags = IW_DESCR_FLAG_EVENT, |
50 | }, | 50 | }, |
51 | [SIOCGIWFREQ - SIOCIWFIRST] = { | 51 | [IW_IOCTL_IDX(SIOCGIWFREQ)] = { |
52 | .header_type = IW_HEADER_TYPE_FREQ, | 52 | .header_type = IW_HEADER_TYPE_FREQ, |
53 | .flags = IW_DESCR_FLAG_DUMP, | 53 | .flags = IW_DESCR_FLAG_DUMP, |
54 | }, | 54 | }, |
55 | [SIOCSIWMODE - SIOCIWFIRST] = { | 55 | [IW_IOCTL_IDX(SIOCSIWMODE)] = { |
56 | .header_type = IW_HEADER_TYPE_UINT, | 56 | .header_type = IW_HEADER_TYPE_UINT, |
57 | .flags = IW_DESCR_FLAG_EVENT, | 57 | .flags = IW_DESCR_FLAG_EVENT, |
58 | }, | 58 | }, |
59 | [SIOCGIWMODE - SIOCIWFIRST] = { | 59 | [IW_IOCTL_IDX(SIOCGIWMODE)] = { |
60 | .header_type = IW_HEADER_TYPE_UINT, | 60 | .header_type = IW_HEADER_TYPE_UINT, |
61 | .flags = IW_DESCR_FLAG_DUMP, | 61 | .flags = IW_DESCR_FLAG_DUMP, |
62 | }, | 62 | }, |
63 | [SIOCSIWSENS - SIOCIWFIRST] = { | 63 | [IW_IOCTL_IDX(SIOCSIWSENS)] = { |
64 | .header_type = IW_HEADER_TYPE_PARAM, | 64 | .header_type = IW_HEADER_TYPE_PARAM, |
65 | }, | 65 | }, |
66 | [SIOCGIWSENS - SIOCIWFIRST] = { | 66 | [IW_IOCTL_IDX(SIOCGIWSENS)] = { |
67 | .header_type = IW_HEADER_TYPE_PARAM, | 67 | .header_type = IW_HEADER_TYPE_PARAM, |
68 | }, | 68 | }, |
69 | [SIOCSIWRANGE - SIOCIWFIRST] = { | 69 | [IW_IOCTL_IDX(SIOCSIWRANGE)] = { |
70 | .header_type = IW_HEADER_TYPE_NULL, | 70 | .header_type = IW_HEADER_TYPE_NULL, |
71 | }, | 71 | }, |
72 | [SIOCGIWRANGE - SIOCIWFIRST] = { | 72 | [IW_IOCTL_IDX(SIOCGIWRANGE)] = { |
73 | .header_type = IW_HEADER_TYPE_POINT, | 73 | .header_type = IW_HEADER_TYPE_POINT, |
74 | .token_size = 1, | 74 | .token_size = 1, |
75 | .max_tokens = sizeof(struct iw_range), | 75 | .max_tokens = sizeof(struct iw_range), |
76 | .flags = IW_DESCR_FLAG_DUMP, | 76 | .flags = IW_DESCR_FLAG_DUMP, |
77 | }, | 77 | }, |
78 | [SIOCSIWPRIV - SIOCIWFIRST] = { | 78 | [IW_IOCTL_IDX(SIOCSIWPRIV)] = { |
79 | .header_type = IW_HEADER_TYPE_NULL, | 79 | .header_type = IW_HEADER_TYPE_NULL, |
80 | }, | 80 | }, |
81 | [SIOCGIWPRIV - SIOCIWFIRST] = { /* (handled directly by us) */ | 81 | [IW_IOCTL_IDX(SIOCGIWPRIV)] = { /* (handled directly by us) */ |
82 | .header_type = IW_HEADER_TYPE_POINT, | 82 | .header_type = IW_HEADER_TYPE_POINT, |
83 | .token_size = sizeof(struct iw_priv_args), | 83 | .token_size = sizeof(struct iw_priv_args), |
84 | .max_tokens = 16, | 84 | .max_tokens = 16, |
85 | .flags = IW_DESCR_FLAG_NOMAX, | 85 | .flags = IW_DESCR_FLAG_NOMAX, |
86 | }, | 86 | }, |
87 | [SIOCSIWSTATS - SIOCIWFIRST] = { | 87 | [IW_IOCTL_IDX(SIOCSIWSTATS)] = { |
88 | .header_type = IW_HEADER_TYPE_NULL, | 88 | .header_type = IW_HEADER_TYPE_NULL, |
89 | }, | 89 | }, |
90 | [SIOCGIWSTATS - SIOCIWFIRST] = { /* (handled directly by us) */ | 90 | [IW_IOCTL_IDX(SIOCGIWSTATS)] = { /* (handled directly by us) */ |
91 | .header_type = IW_HEADER_TYPE_POINT, | 91 | .header_type = IW_HEADER_TYPE_POINT, |
92 | .token_size = 1, | 92 | .token_size = 1, |
93 | .max_tokens = sizeof(struct iw_statistics), | 93 | .max_tokens = sizeof(struct iw_statistics), |
94 | .flags = IW_DESCR_FLAG_DUMP, | 94 | .flags = IW_DESCR_FLAG_DUMP, |
95 | }, | 95 | }, |
96 | [SIOCSIWSPY - SIOCIWFIRST] = { | 96 | [IW_IOCTL_IDX(SIOCSIWSPY)] = { |
97 | .header_type = IW_HEADER_TYPE_POINT, | 97 | .header_type = IW_HEADER_TYPE_POINT, |
98 | .token_size = sizeof(struct sockaddr), | 98 | .token_size = sizeof(struct sockaddr), |
99 | .max_tokens = IW_MAX_SPY, | 99 | .max_tokens = IW_MAX_SPY, |
100 | }, | 100 | }, |
101 | [SIOCGIWSPY - SIOCIWFIRST] = { | 101 | [IW_IOCTL_IDX(SIOCGIWSPY)] = { |
102 | .header_type = IW_HEADER_TYPE_POINT, | 102 | .header_type = IW_HEADER_TYPE_POINT, |
103 | .token_size = sizeof(struct sockaddr) + | 103 | .token_size = sizeof(struct sockaddr) + |
104 | sizeof(struct iw_quality), | 104 | sizeof(struct iw_quality), |
105 | .max_tokens = IW_MAX_SPY, | 105 | .max_tokens = IW_MAX_SPY, |
106 | }, | 106 | }, |
107 | [SIOCSIWTHRSPY - SIOCIWFIRST] = { | 107 | [IW_IOCTL_IDX(SIOCSIWTHRSPY)] = { |
108 | .header_type = IW_HEADER_TYPE_POINT, | 108 | .header_type = IW_HEADER_TYPE_POINT, |
109 | .token_size = sizeof(struct iw_thrspy), | 109 | .token_size = sizeof(struct iw_thrspy), |
110 | .min_tokens = 1, | 110 | .min_tokens = 1, |
111 | .max_tokens = 1, | 111 | .max_tokens = 1, |
112 | }, | 112 | }, |
113 | [SIOCGIWTHRSPY - SIOCIWFIRST] = { | 113 | [IW_IOCTL_IDX(SIOCGIWTHRSPY)] = { |
114 | .header_type = IW_HEADER_TYPE_POINT, | 114 | .header_type = IW_HEADER_TYPE_POINT, |
115 | .token_size = sizeof(struct iw_thrspy), | 115 | .token_size = sizeof(struct iw_thrspy), |
116 | .min_tokens = 1, | 116 | .min_tokens = 1, |
117 | .max_tokens = 1, | 117 | .max_tokens = 1, |
118 | }, | 118 | }, |
119 | [SIOCSIWAP - SIOCIWFIRST] = { | 119 | [IW_IOCTL_IDX(SIOCSIWAP)] = { |
120 | .header_type = IW_HEADER_TYPE_ADDR, | 120 | .header_type = IW_HEADER_TYPE_ADDR, |
121 | }, | 121 | }, |
122 | [SIOCGIWAP - SIOCIWFIRST] = { | 122 | [IW_IOCTL_IDX(SIOCGIWAP)] = { |
123 | .header_type = IW_HEADER_TYPE_ADDR, | 123 | .header_type = IW_HEADER_TYPE_ADDR, |
124 | .flags = IW_DESCR_FLAG_DUMP, | 124 | .flags = IW_DESCR_FLAG_DUMP, |
125 | }, | 125 | }, |
126 | [SIOCSIWMLME - SIOCIWFIRST] = { | 126 | [IW_IOCTL_IDX(SIOCSIWMLME)] = { |
127 | .header_type = IW_HEADER_TYPE_POINT, | 127 | .header_type = IW_HEADER_TYPE_POINT, |
128 | .token_size = 1, | 128 | .token_size = 1, |
129 | .min_tokens = sizeof(struct iw_mlme), | 129 | .min_tokens = sizeof(struct iw_mlme), |
130 | .max_tokens = sizeof(struct iw_mlme), | 130 | .max_tokens = sizeof(struct iw_mlme), |
131 | }, | 131 | }, |
132 | [SIOCGIWAPLIST - SIOCIWFIRST] = { | 132 | [IW_IOCTL_IDX(SIOCGIWAPLIST)] = { |
133 | .header_type = IW_HEADER_TYPE_POINT, | 133 | .header_type = IW_HEADER_TYPE_POINT, |
134 | .token_size = sizeof(struct sockaddr) + | 134 | .token_size = sizeof(struct sockaddr) + |
135 | sizeof(struct iw_quality), | 135 | sizeof(struct iw_quality), |
136 | .max_tokens = IW_MAX_AP, | 136 | .max_tokens = IW_MAX_AP, |
137 | .flags = IW_DESCR_FLAG_NOMAX, | 137 | .flags = IW_DESCR_FLAG_NOMAX, |
138 | }, | 138 | }, |
139 | [SIOCSIWSCAN - SIOCIWFIRST] = { | 139 | [IW_IOCTL_IDX(SIOCSIWSCAN)] = { |
140 | .header_type = IW_HEADER_TYPE_POINT, | 140 | .header_type = IW_HEADER_TYPE_POINT, |
141 | .token_size = 1, | 141 | .token_size = 1, |
142 | .min_tokens = 0, | 142 | .min_tokens = 0, |
143 | .max_tokens = sizeof(struct iw_scan_req), | 143 | .max_tokens = sizeof(struct iw_scan_req), |
144 | }, | 144 | }, |
145 | [SIOCGIWSCAN - SIOCIWFIRST] = { | 145 | [IW_IOCTL_IDX(SIOCGIWSCAN)] = { |
146 | .header_type = IW_HEADER_TYPE_POINT, | 146 | .header_type = IW_HEADER_TYPE_POINT, |
147 | .token_size = 1, | 147 | .token_size = 1, |
148 | .max_tokens = IW_SCAN_MAX_DATA, | 148 | .max_tokens = IW_SCAN_MAX_DATA, |
149 | .flags = IW_DESCR_FLAG_NOMAX, | 149 | .flags = IW_DESCR_FLAG_NOMAX, |
150 | }, | 150 | }, |
151 | [SIOCSIWESSID - SIOCIWFIRST] = { | 151 | [IW_IOCTL_IDX(SIOCSIWESSID)] = { |
152 | .header_type = IW_HEADER_TYPE_POINT, | 152 | .header_type = IW_HEADER_TYPE_POINT, |
153 | .token_size = 1, | 153 | .token_size = 1, |
154 | .max_tokens = IW_ESSID_MAX_SIZE, | 154 | .max_tokens = IW_ESSID_MAX_SIZE, |
155 | .flags = IW_DESCR_FLAG_EVENT, | 155 | .flags = IW_DESCR_FLAG_EVENT, |
156 | }, | 156 | }, |
157 | [SIOCGIWESSID - SIOCIWFIRST] = { | 157 | [IW_IOCTL_IDX(SIOCGIWESSID)] = { |
158 | .header_type = IW_HEADER_TYPE_POINT, | 158 | .header_type = IW_HEADER_TYPE_POINT, |
159 | .token_size = 1, | 159 | .token_size = 1, |
160 | .max_tokens = IW_ESSID_MAX_SIZE, | 160 | .max_tokens = IW_ESSID_MAX_SIZE, |
161 | .flags = IW_DESCR_FLAG_DUMP, | 161 | .flags = IW_DESCR_FLAG_DUMP, |
162 | }, | 162 | }, |
163 | [SIOCSIWNICKN - SIOCIWFIRST] = { | 163 | [IW_IOCTL_IDX(SIOCSIWNICKN)] = { |
164 | .header_type = IW_HEADER_TYPE_POINT, | 164 | .header_type = IW_HEADER_TYPE_POINT, |
165 | .token_size = 1, | 165 | .token_size = 1, |
166 | .max_tokens = IW_ESSID_MAX_SIZE, | 166 | .max_tokens = IW_ESSID_MAX_SIZE, |
167 | }, | 167 | }, |
168 | [SIOCGIWNICKN - SIOCIWFIRST] = { | 168 | [IW_IOCTL_IDX(SIOCGIWNICKN)] = { |
169 | .header_type = IW_HEADER_TYPE_POINT, | 169 | .header_type = IW_HEADER_TYPE_POINT, |
170 | .token_size = 1, | 170 | .token_size = 1, |
171 | .max_tokens = IW_ESSID_MAX_SIZE, | 171 | .max_tokens = IW_ESSID_MAX_SIZE, |
172 | }, | 172 | }, |
173 | [SIOCSIWRATE - SIOCIWFIRST] = { | 173 | [IW_IOCTL_IDX(SIOCSIWRATE)] = { |
174 | .header_type = IW_HEADER_TYPE_PARAM, | 174 | .header_type = IW_HEADER_TYPE_PARAM, |
175 | }, | 175 | }, |
176 | [SIOCGIWRATE - SIOCIWFIRST] = { | 176 | [IW_IOCTL_IDX(SIOCGIWRATE)] = { |
177 | .header_type = IW_HEADER_TYPE_PARAM, | 177 | .header_type = IW_HEADER_TYPE_PARAM, |
178 | }, | 178 | }, |
179 | [SIOCSIWRTS - SIOCIWFIRST] = { | 179 | [IW_IOCTL_IDX(SIOCSIWRTS)] = { |
180 | .header_type = IW_HEADER_TYPE_PARAM, | 180 | .header_type = IW_HEADER_TYPE_PARAM, |
181 | }, | 181 | }, |
182 | [SIOCGIWRTS - SIOCIWFIRST] = { | 182 | [IW_IOCTL_IDX(SIOCGIWRTS)] = { |
183 | .header_type = IW_HEADER_TYPE_PARAM, | 183 | .header_type = IW_HEADER_TYPE_PARAM, |
184 | }, | 184 | }, |
185 | [SIOCSIWFRAG - SIOCIWFIRST] = { | 185 | [IW_IOCTL_IDX(SIOCSIWFRAG)] = { |
186 | .header_type = IW_HEADER_TYPE_PARAM, | 186 | .header_type = IW_HEADER_TYPE_PARAM, |
187 | }, | 187 | }, |
188 | [SIOCGIWFRAG - SIOCIWFIRST] = { | 188 | [IW_IOCTL_IDX(SIOCGIWFRAG)] = { |
189 | .header_type = IW_HEADER_TYPE_PARAM, | 189 | .header_type = IW_HEADER_TYPE_PARAM, |
190 | }, | 190 | }, |
191 | [SIOCSIWTXPOW - SIOCIWFIRST] = { | 191 | [IW_IOCTL_IDX(SIOCSIWTXPOW)] = { |
192 | .header_type = IW_HEADER_TYPE_PARAM, | 192 | .header_type = IW_HEADER_TYPE_PARAM, |
193 | }, | 193 | }, |
194 | [SIOCGIWTXPOW - SIOCIWFIRST] = { | 194 | [IW_IOCTL_IDX(SIOCGIWTXPOW)] = { |
195 | .header_type = IW_HEADER_TYPE_PARAM, | 195 | .header_type = IW_HEADER_TYPE_PARAM, |
196 | }, | 196 | }, |
197 | [SIOCSIWRETRY - SIOCIWFIRST] = { | 197 | [IW_IOCTL_IDX(SIOCSIWRETRY)] = { |
198 | .header_type = IW_HEADER_TYPE_PARAM, | 198 | .header_type = IW_HEADER_TYPE_PARAM, |
199 | }, | 199 | }, |
200 | [SIOCGIWRETRY - SIOCIWFIRST] = { | 200 | [IW_IOCTL_IDX(SIOCGIWRETRY)] = { |
201 | .header_type = IW_HEADER_TYPE_PARAM, | 201 | .header_type = IW_HEADER_TYPE_PARAM, |
202 | }, | 202 | }, |
203 | [SIOCSIWENCODE - SIOCIWFIRST] = { | 203 | [IW_IOCTL_IDX(SIOCSIWENCODE)] = { |
204 | .header_type = IW_HEADER_TYPE_POINT, | 204 | .header_type = IW_HEADER_TYPE_POINT, |
205 | .token_size = 1, | 205 | .token_size = 1, |
206 | .max_tokens = IW_ENCODING_TOKEN_MAX, | 206 | .max_tokens = IW_ENCODING_TOKEN_MAX, |
207 | .flags = IW_DESCR_FLAG_EVENT | IW_DESCR_FLAG_RESTRICT, | 207 | .flags = IW_DESCR_FLAG_EVENT | IW_DESCR_FLAG_RESTRICT, |
208 | }, | 208 | }, |
209 | [SIOCGIWENCODE - SIOCIWFIRST] = { | 209 | [IW_IOCTL_IDX(SIOCGIWENCODE)] = { |
210 | .header_type = IW_HEADER_TYPE_POINT, | 210 | .header_type = IW_HEADER_TYPE_POINT, |
211 | .token_size = 1, | 211 | .token_size = 1, |
212 | .max_tokens = IW_ENCODING_TOKEN_MAX, | 212 | .max_tokens = IW_ENCODING_TOKEN_MAX, |
213 | .flags = IW_DESCR_FLAG_DUMP | IW_DESCR_FLAG_RESTRICT, | 213 | .flags = IW_DESCR_FLAG_DUMP | IW_DESCR_FLAG_RESTRICT, |
214 | }, | 214 | }, |
215 | [SIOCSIWPOWER - SIOCIWFIRST] = { | 215 | [IW_IOCTL_IDX(SIOCSIWPOWER)] = { |
216 | .header_type = IW_HEADER_TYPE_PARAM, | 216 | .header_type = IW_HEADER_TYPE_PARAM, |
217 | }, | 217 | }, |
218 | [SIOCGIWPOWER - SIOCIWFIRST] = { | 218 | [IW_IOCTL_IDX(SIOCGIWPOWER)] = { |
219 | .header_type = IW_HEADER_TYPE_PARAM, | 219 | .header_type = IW_HEADER_TYPE_PARAM, |
220 | }, | 220 | }, |
221 | [SIOCSIWGENIE - SIOCIWFIRST] = { | 221 | [IW_IOCTL_IDX(SIOCSIWGENIE)] = { |
222 | .header_type = IW_HEADER_TYPE_POINT, | 222 | .header_type = IW_HEADER_TYPE_POINT, |
223 | .token_size = 1, | 223 | .token_size = 1, |
224 | .max_tokens = IW_GENERIC_IE_MAX, | 224 | .max_tokens = IW_GENERIC_IE_MAX, |
225 | }, | 225 | }, |
226 | [SIOCGIWGENIE - SIOCIWFIRST] = { | 226 | [IW_IOCTL_IDX(SIOCGIWGENIE)] = { |
227 | .header_type = IW_HEADER_TYPE_POINT, | 227 | .header_type = IW_HEADER_TYPE_POINT, |
228 | .token_size = 1, | 228 | .token_size = 1, |
229 | .max_tokens = IW_GENERIC_IE_MAX, | 229 | .max_tokens = IW_GENERIC_IE_MAX, |
230 | }, | 230 | }, |
231 | [SIOCSIWAUTH - SIOCIWFIRST] = { | 231 | [IW_IOCTL_IDX(SIOCSIWAUTH)] = { |
232 | .header_type = IW_HEADER_TYPE_PARAM, | 232 | .header_type = IW_HEADER_TYPE_PARAM, |
233 | }, | 233 | }, |
234 | [SIOCGIWAUTH - SIOCIWFIRST] = { | 234 | [IW_IOCTL_IDX(SIOCGIWAUTH)] = { |
235 | .header_type = IW_HEADER_TYPE_PARAM, | 235 | .header_type = IW_HEADER_TYPE_PARAM, |
236 | }, | 236 | }, |
237 | [SIOCSIWENCODEEXT - SIOCIWFIRST] = { | 237 | [IW_IOCTL_IDX(SIOCSIWENCODEEXT)] = { |
238 | .header_type = IW_HEADER_TYPE_POINT, | 238 | .header_type = IW_HEADER_TYPE_POINT, |
239 | .token_size = 1, | 239 | .token_size = 1, |
240 | .min_tokens = sizeof(struct iw_encode_ext), | 240 | .min_tokens = sizeof(struct iw_encode_ext), |
241 | .max_tokens = sizeof(struct iw_encode_ext) + | 241 | .max_tokens = sizeof(struct iw_encode_ext) + |
242 | IW_ENCODING_TOKEN_MAX, | 242 | IW_ENCODING_TOKEN_MAX, |
243 | }, | 243 | }, |
244 | [SIOCGIWENCODEEXT - SIOCIWFIRST] = { | 244 | [IW_IOCTL_IDX(SIOCGIWENCODEEXT)] = { |
245 | .header_type = IW_HEADER_TYPE_POINT, | 245 | .header_type = IW_HEADER_TYPE_POINT, |
246 | .token_size = 1, | 246 | .token_size = 1, |
247 | .min_tokens = sizeof(struct iw_encode_ext), | 247 | .min_tokens = sizeof(struct iw_encode_ext), |
248 | .max_tokens = sizeof(struct iw_encode_ext) + | 248 | .max_tokens = sizeof(struct iw_encode_ext) + |
249 | IW_ENCODING_TOKEN_MAX, | 249 | IW_ENCODING_TOKEN_MAX, |
250 | }, | 250 | }, |
251 | [SIOCSIWPMKSA - SIOCIWFIRST] = { | 251 | [IW_IOCTL_IDX(SIOCSIWPMKSA)] = { |
252 | .header_type = IW_HEADER_TYPE_POINT, | 252 | .header_type = IW_HEADER_TYPE_POINT, |
253 | .token_size = 1, | 253 | .token_size = 1, |
254 | .min_tokens = sizeof(struct iw_pmksa), | 254 | .min_tokens = sizeof(struct iw_pmksa), |
@@ -262,44 +262,44 @@ static const unsigned standard_ioctl_num = ARRAY_SIZE(standard_ioctl); | |||
262 | * we know about. | 262 | * we know about. |
263 | */ | 263 | */ |
264 | static const struct iw_ioctl_description standard_event[] = { | 264 | static const struct iw_ioctl_description standard_event[] = { |
265 | [IWEVTXDROP - IWEVFIRST] = { | 265 | [IW_EVENT_IDX(IWEVTXDROP)] = { |
266 | .header_type = IW_HEADER_TYPE_ADDR, | 266 | .header_type = IW_HEADER_TYPE_ADDR, |
267 | }, | 267 | }, |
268 | [IWEVQUAL - IWEVFIRST] = { | 268 | [IW_EVENT_IDX(IWEVQUAL)] = { |
269 | .header_type = IW_HEADER_TYPE_QUAL, | 269 | .header_type = IW_HEADER_TYPE_QUAL, |
270 | }, | 270 | }, |
271 | [IWEVCUSTOM - IWEVFIRST] = { | 271 | [IW_EVENT_IDX(IWEVCUSTOM)] = { |
272 | .header_type = IW_HEADER_TYPE_POINT, | 272 | .header_type = IW_HEADER_TYPE_POINT, |
273 | .token_size = 1, | 273 | .token_size = 1, |
274 | .max_tokens = IW_CUSTOM_MAX, | 274 | .max_tokens = IW_CUSTOM_MAX, |
275 | }, | 275 | }, |
276 | [IWEVREGISTERED - IWEVFIRST] = { | 276 | [IW_EVENT_IDX(IWEVREGISTERED)] = { |
277 | .header_type = IW_HEADER_TYPE_ADDR, | 277 | .header_type = IW_HEADER_TYPE_ADDR, |
278 | }, | 278 | }, |
279 | [IWEVEXPIRED - IWEVFIRST] = { | 279 | [IW_EVENT_IDX(IWEVEXPIRED)] = { |
280 | .header_type = IW_HEADER_TYPE_ADDR, | 280 | .header_type = IW_HEADER_TYPE_ADDR, |
281 | }, | 281 | }, |
282 | [IWEVGENIE - IWEVFIRST] = { | 282 | [IW_EVENT_IDX(IWEVGENIE)] = { |
283 | .header_type = IW_HEADER_TYPE_POINT, | 283 | .header_type = IW_HEADER_TYPE_POINT, |
284 | .token_size = 1, | 284 | .token_size = 1, |
285 | .max_tokens = IW_GENERIC_IE_MAX, | 285 | .max_tokens = IW_GENERIC_IE_MAX, |
286 | }, | 286 | }, |
287 | [IWEVMICHAELMICFAILURE - IWEVFIRST] = { | 287 | [IW_EVENT_IDX(IWEVMICHAELMICFAILURE)] = { |
288 | .header_type = IW_HEADER_TYPE_POINT, | 288 | .header_type = IW_HEADER_TYPE_POINT, |
289 | .token_size = 1, | 289 | .token_size = 1, |
290 | .max_tokens = sizeof(struct iw_michaelmicfailure), | 290 | .max_tokens = sizeof(struct iw_michaelmicfailure), |
291 | }, | 291 | }, |
292 | [IWEVASSOCREQIE - IWEVFIRST] = { | 292 | [IW_EVENT_IDX(IWEVASSOCREQIE)] = { |
293 | .header_type = IW_HEADER_TYPE_POINT, | 293 | .header_type = IW_HEADER_TYPE_POINT, |
294 | .token_size = 1, | 294 | .token_size = 1, |
295 | .max_tokens = IW_GENERIC_IE_MAX, | 295 | .max_tokens = IW_GENERIC_IE_MAX, |
296 | }, | 296 | }, |
297 | [IWEVASSOCRESPIE - IWEVFIRST] = { | 297 | [IW_EVENT_IDX(IWEVASSOCRESPIE)] = { |
298 | .header_type = IW_HEADER_TYPE_POINT, | 298 | .header_type = IW_HEADER_TYPE_POINT, |
299 | .token_size = 1, | 299 | .token_size = 1, |
300 | .max_tokens = IW_GENERIC_IE_MAX, | 300 | .max_tokens = IW_GENERIC_IE_MAX, |
301 | }, | 301 | }, |
302 | [IWEVPMKIDCAND - IWEVFIRST] = { | 302 | [IW_EVENT_IDX(IWEVPMKIDCAND)] = { |
303 | .header_type = IW_HEADER_TYPE_POINT, | 303 | .header_type = IW_HEADER_TYPE_POINT, |
304 | .token_size = 1, | 304 | .token_size = 1, |
305 | .max_tokens = sizeof(struct iw_pmkid_cand), | 305 | .max_tokens = sizeof(struct iw_pmkid_cand), |
@@ -450,11 +450,11 @@ void wireless_send_event(struct net_device * dev, | |||
450 | 450 | ||
451 | /* Get the description of the Event */ | 451 | /* Get the description of the Event */ |
452 | if (cmd <= SIOCIWLAST) { | 452 | if (cmd <= SIOCIWLAST) { |
453 | cmd_index = cmd - SIOCIWFIRST; | 453 | cmd_index = IW_IOCTL_IDX(cmd); |
454 | if (cmd_index < standard_ioctl_num) | 454 | if (cmd_index < standard_ioctl_num) |
455 | descr = &(standard_ioctl[cmd_index]); | 455 | descr = &(standard_ioctl[cmd_index]); |
456 | } else { | 456 | } else { |
457 | cmd_index = cmd - IWEVFIRST; | 457 | cmd_index = IW_EVENT_IDX(cmd); |
458 | if (cmd_index < standard_event_num) | 458 | if (cmd_index < standard_event_num) |
459 | descr = &(standard_event[cmd_index]); | 459 | descr = &(standard_event[cmd_index]); |
460 | } | 460 | } |
@@ -663,7 +663,7 @@ static iw_handler get_handler(struct net_device *dev, unsigned int cmd) | |||
663 | return NULL; | 663 | return NULL; |
664 | 664 | ||
665 | /* Try as a standard command */ | 665 | /* Try as a standard command */ |
666 | index = cmd - SIOCIWFIRST; | 666 | index = IW_IOCTL_IDX(cmd); |
667 | if (index < handlers->num_standard) | 667 | if (index < handlers->num_standard) |
668 | return handlers->standard[index]; | 668 | return handlers->standard[index]; |
669 | 669 | ||
@@ -955,9 +955,9 @@ static int ioctl_standard_call(struct net_device * dev, | |||
955 | int ret = -EINVAL; | 955 | int ret = -EINVAL; |
956 | 956 | ||
957 | /* Get the description of the IOCTL */ | 957 | /* Get the description of the IOCTL */ |
958 | if ((cmd - SIOCIWFIRST) >= standard_ioctl_num) | 958 | if (IW_IOCTL_IDX(cmd) >= standard_ioctl_num) |
959 | return -EOPNOTSUPP; | 959 | return -EOPNOTSUPP; |
960 | descr = &(standard_ioctl[cmd - SIOCIWFIRST]); | 960 | descr = &(standard_ioctl[IW_IOCTL_IDX(cmd)]); |
961 | 961 | ||
962 | /* Check if we have a pointer to user space data or not */ | 962 | /* Check if we have a pointer to user space data or not */ |
963 | if (descr->header_type != IW_HEADER_TYPE_POINT) { | 963 | if (descr->header_type != IW_HEADER_TYPE_POINT) { |
@@ -1013,7 +1013,7 @@ static int compat_standard_call(struct net_device *dev, | |||
1013 | struct iw_point iwp; | 1013 | struct iw_point iwp; |
1014 | int err; | 1014 | int err; |
1015 | 1015 | ||
1016 | descr = standard_ioctl + (cmd - SIOCIWFIRST); | 1016 | descr = standard_ioctl + IW_IOCTL_IDX(cmd); |
1017 | 1017 | ||
1018 | if (descr->header_type != IW_HEADER_TYPE_POINT) | 1018 | if (descr->header_type != IW_HEADER_TYPE_POINT) |
1019 | return ioctl_standard_call(dev, iwr, cmd, info, handler); | 1019 | return ioctl_standard_call(dev, iwr, cmd, info, handler); |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 843e066649cb..7430ac26ec49 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -37,6 +37,8 @@ | |||
37 | DEFINE_MUTEX(xfrm_cfg_mutex); | 37 | DEFINE_MUTEX(xfrm_cfg_mutex); |
38 | EXPORT_SYMBOL(xfrm_cfg_mutex); | 38 | EXPORT_SYMBOL(xfrm_cfg_mutex); |
39 | 39 | ||
40 | static DEFINE_SPINLOCK(xfrm_policy_sk_bundle_lock); | ||
41 | static struct dst_entry *xfrm_policy_sk_bundles; | ||
40 | static DEFINE_RWLOCK(xfrm_policy_lock); | 42 | static DEFINE_RWLOCK(xfrm_policy_lock); |
41 | 43 | ||
42 | static DEFINE_RWLOCK(xfrm_policy_afinfo_lock); | 44 | static DEFINE_RWLOCK(xfrm_policy_afinfo_lock); |
@@ -44,12 +46,10 @@ static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO]; | |||
44 | 46 | ||
45 | static struct kmem_cache *xfrm_dst_cache __read_mostly; | 47 | static struct kmem_cache *xfrm_dst_cache __read_mostly; |
46 | 48 | ||
47 | static HLIST_HEAD(xfrm_policy_gc_list); | ||
48 | static DEFINE_SPINLOCK(xfrm_policy_gc_lock); | ||
49 | |||
50 | static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family); | 49 | static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family); |
51 | static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo); | 50 | static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo); |
52 | static void xfrm_init_pmtu(struct dst_entry *dst); | 51 | static void xfrm_init_pmtu(struct dst_entry *dst); |
52 | static int stale_bundle(struct dst_entry *dst); | ||
53 | 53 | ||
54 | static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, | 54 | static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, |
55 | int dir); | 55 | int dir); |
@@ -156,7 +156,7 @@ static void xfrm_policy_timer(unsigned long data) | |||
156 | 156 | ||
157 | read_lock(&xp->lock); | 157 | read_lock(&xp->lock); |
158 | 158 | ||
159 | if (xp->walk.dead) | 159 | if (unlikely(xp->walk.dead)) |
160 | goto out; | 160 | goto out; |
161 | 161 | ||
162 | dir = xfrm_policy_id2dir(xp->index); | 162 | dir = xfrm_policy_id2dir(xp->index); |
@@ -216,6 +216,35 @@ expired: | |||
216 | xfrm_pol_put(xp); | 216 | xfrm_pol_put(xp); |
217 | } | 217 | } |
218 | 218 | ||
219 | static struct flow_cache_object *xfrm_policy_flo_get(struct flow_cache_object *flo) | ||
220 | { | ||
221 | struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo); | ||
222 | |||
223 | if (unlikely(pol->walk.dead)) | ||
224 | flo = NULL; | ||
225 | else | ||
226 | xfrm_pol_hold(pol); | ||
227 | |||
228 | return flo; | ||
229 | } | ||
230 | |||
231 | static int xfrm_policy_flo_check(struct flow_cache_object *flo) | ||
232 | { | ||
233 | struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo); | ||
234 | |||
235 | return !pol->walk.dead; | ||
236 | } | ||
237 | |||
238 | static void xfrm_policy_flo_delete(struct flow_cache_object *flo) | ||
239 | { | ||
240 | xfrm_pol_put(container_of(flo, struct xfrm_policy, flo)); | ||
241 | } | ||
242 | |||
243 | static const struct flow_cache_ops xfrm_policy_fc_ops = { | ||
244 | .get = xfrm_policy_flo_get, | ||
245 | .check = xfrm_policy_flo_check, | ||
246 | .delete = xfrm_policy_flo_delete, | ||
247 | }; | ||
219 | 248 | ||
220 | /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2 | 249 | /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2 |
221 | * SPD calls. | 250 | * SPD calls. |
@@ -236,6 +265,7 @@ struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp) | |||
236 | atomic_set(&policy->refcnt, 1); | 265 | atomic_set(&policy->refcnt, 1); |
237 | setup_timer(&policy->timer, xfrm_policy_timer, | 266 | setup_timer(&policy->timer, xfrm_policy_timer, |
238 | (unsigned long)policy); | 267 | (unsigned long)policy); |
268 | policy->flo.ops = &xfrm_policy_fc_ops; | ||
239 | } | 269 | } |
240 | return policy; | 270 | return policy; |
241 | } | 271 | } |
@@ -247,8 +277,6 @@ void xfrm_policy_destroy(struct xfrm_policy *policy) | |||
247 | { | 277 | { |
248 | BUG_ON(!policy->walk.dead); | 278 | BUG_ON(!policy->walk.dead); |
249 | 279 | ||
250 | BUG_ON(policy->bundles); | ||
251 | |||
252 | if (del_timer(&policy->timer)) | 280 | if (del_timer(&policy->timer)) |
253 | BUG(); | 281 | BUG(); |
254 | 282 | ||
@@ -257,63 +285,20 @@ void xfrm_policy_destroy(struct xfrm_policy *policy) | |||
257 | } | 285 | } |
258 | EXPORT_SYMBOL(xfrm_policy_destroy); | 286 | EXPORT_SYMBOL(xfrm_policy_destroy); |
259 | 287 | ||
260 | static void xfrm_policy_gc_kill(struct xfrm_policy *policy) | ||
261 | { | ||
262 | struct dst_entry *dst; | ||
263 | |||
264 | while ((dst = policy->bundles) != NULL) { | ||
265 | policy->bundles = dst->next; | ||
266 | dst_free(dst); | ||
267 | } | ||
268 | |||
269 | if (del_timer(&policy->timer)) | ||
270 | atomic_dec(&policy->refcnt); | ||
271 | |||
272 | if (atomic_read(&policy->refcnt) > 1) | ||
273 | flow_cache_flush(); | ||
274 | |||
275 | xfrm_pol_put(policy); | ||
276 | } | ||
277 | |||
278 | static void xfrm_policy_gc_task(struct work_struct *work) | ||
279 | { | ||
280 | struct xfrm_policy *policy; | ||
281 | struct hlist_node *entry, *tmp; | ||
282 | struct hlist_head gc_list; | ||
283 | |||
284 | spin_lock_bh(&xfrm_policy_gc_lock); | ||
285 | gc_list.first = xfrm_policy_gc_list.first; | ||
286 | INIT_HLIST_HEAD(&xfrm_policy_gc_list); | ||
287 | spin_unlock_bh(&xfrm_policy_gc_lock); | ||
288 | |||
289 | hlist_for_each_entry_safe(policy, entry, tmp, &gc_list, bydst) | ||
290 | xfrm_policy_gc_kill(policy); | ||
291 | } | ||
292 | static DECLARE_WORK(xfrm_policy_gc_work, xfrm_policy_gc_task); | ||
293 | |||
294 | /* Rule must be locked. Release descentant resources, announce | 288 | /* Rule must be locked. Release descentant resources, announce |
295 | * entry dead. The rule must be unlinked from lists to the moment. | 289 | * entry dead. The rule must be unlinked from lists to the moment. |
296 | */ | 290 | */ |
297 | 291 | ||
298 | static void xfrm_policy_kill(struct xfrm_policy *policy) | 292 | static void xfrm_policy_kill(struct xfrm_policy *policy) |
299 | { | 293 | { |
300 | int dead; | ||
301 | |||
302 | write_lock_bh(&policy->lock); | ||
303 | dead = policy->walk.dead; | ||
304 | policy->walk.dead = 1; | 294 | policy->walk.dead = 1; |
305 | write_unlock_bh(&policy->lock); | ||
306 | 295 | ||
307 | if (unlikely(dead)) { | 296 | atomic_inc(&policy->genid); |
308 | WARN_ON(1); | ||
309 | return; | ||
310 | } | ||
311 | 297 | ||
312 | spin_lock_bh(&xfrm_policy_gc_lock); | 298 | if (del_timer(&policy->timer)) |
313 | hlist_add_head(&policy->bydst, &xfrm_policy_gc_list); | 299 | xfrm_pol_put(policy); |
314 | spin_unlock_bh(&xfrm_policy_gc_lock); | ||
315 | 300 | ||
316 | schedule_work(&xfrm_policy_gc_work); | 301 | xfrm_pol_put(policy); |
317 | } | 302 | } |
318 | 303 | ||
319 | static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024; | 304 | static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024; |
@@ -555,7 +540,6 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) | |||
555 | struct xfrm_policy *delpol; | 540 | struct xfrm_policy *delpol; |
556 | struct hlist_head *chain; | 541 | struct hlist_head *chain; |
557 | struct hlist_node *entry, *newpos; | 542 | struct hlist_node *entry, *newpos; |
558 | struct dst_entry *gc_list; | ||
559 | u32 mark = policy->mark.v & policy->mark.m; | 543 | u32 mark = policy->mark.v & policy->mark.m; |
560 | 544 | ||
561 | write_lock_bh(&xfrm_policy_lock); | 545 | write_lock_bh(&xfrm_policy_lock); |
@@ -605,34 +589,6 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) | |||
605 | else if (xfrm_bydst_should_resize(net, dir, NULL)) | 589 | else if (xfrm_bydst_should_resize(net, dir, NULL)) |
606 | schedule_work(&net->xfrm.policy_hash_work); | 590 | schedule_work(&net->xfrm.policy_hash_work); |
607 | 591 | ||
608 | read_lock_bh(&xfrm_policy_lock); | ||
609 | gc_list = NULL; | ||
610 | entry = &policy->bydst; | ||
611 | hlist_for_each_entry_continue(policy, entry, bydst) { | ||
612 | struct dst_entry *dst; | ||
613 | |||
614 | write_lock(&policy->lock); | ||
615 | dst = policy->bundles; | ||
616 | if (dst) { | ||
617 | struct dst_entry *tail = dst; | ||
618 | while (tail->next) | ||
619 | tail = tail->next; | ||
620 | tail->next = gc_list; | ||
621 | gc_list = dst; | ||
622 | |||
623 | policy->bundles = NULL; | ||
624 | } | ||
625 | write_unlock(&policy->lock); | ||
626 | } | ||
627 | read_unlock_bh(&xfrm_policy_lock); | ||
628 | |||
629 | while (gc_list) { | ||
630 | struct dst_entry *dst = gc_list; | ||
631 | |||
632 | gc_list = dst->next; | ||
633 | dst_free(dst); | ||
634 | } | ||
635 | |||
636 | return 0; | 592 | return 0; |
637 | } | 593 | } |
638 | EXPORT_SYMBOL(xfrm_policy_insert); | 594 | EXPORT_SYMBOL(xfrm_policy_insert); |
@@ -671,10 +627,8 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type, | |||
671 | } | 627 | } |
672 | write_unlock_bh(&xfrm_policy_lock); | 628 | write_unlock_bh(&xfrm_policy_lock); |
673 | 629 | ||
674 | if (ret && delete) { | 630 | if (ret && delete) |
675 | atomic_inc(&flow_cache_genid); | ||
676 | xfrm_policy_kill(ret); | 631 | xfrm_policy_kill(ret); |
677 | } | ||
678 | return ret; | 632 | return ret; |
679 | } | 633 | } |
680 | EXPORT_SYMBOL(xfrm_policy_bysel_ctx); | 634 | EXPORT_SYMBOL(xfrm_policy_bysel_ctx); |
@@ -713,10 +667,8 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type, | |||
713 | } | 667 | } |
714 | write_unlock_bh(&xfrm_policy_lock); | 668 | write_unlock_bh(&xfrm_policy_lock); |
715 | 669 | ||
716 | if (ret && delete) { | 670 | if (ret && delete) |
717 | atomic_inc(&flow_cache_genid); | ||
718 | xfrm_policy_kill(ret); | 671 | xfrm_policy_kill(ret); |
719 | } | ||
720 | return ret; | 672 | return ret; |
721 | } | 673 | } |
722 | EXPORT_SYMBOL(xfrm_policy_byid); | 674 | EXPORT_SYMBOL(xfrm_policy_byid); |
@@ -776,7 +728,6 @@ xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audi | |||
776 | int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info) | 728 | int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info) |
777 | { | 729 | { |
778 | int dir, err = 0, cnt = 0; | 730 | int dir, err = 0, cnt = 0; |
779 | struct xfrm_policy *dp; | ||
780 | 731 | ||
781 | write_lock_bh(&xfrm_policy_lock); | 732 | write_lock_bh(&xfrm_policy_lock); |
782 | 733 | ||
@@ -794,10 +745,9 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info) | |||
794 | &net->xfrm.policy_inexact[dir], bydst) { | 745 | &net->xfrm.policy_inexact[dir], bydst) { |
795 | if (pol->type != type) | 746 | if (pol->type != type) |
796 | continue; | 747 | continue; |
797 | dp = __xfrm_policy_unlink(pol, dir); | 748 | __xfrm_policy_unlink(pol, dir); |
798 | write_unlock_bh(&xfrm_policy_lock); | 749 | write_unlock_bh(&xfrm_policy_lock); |
799 | if (dp) | 750 | cnt++; |
800 | cnt++; | ||
801 | 751 | ||
802 | xfrm_audit_policy_delete(pol, 1, audit_info->loginuid, | 752 | xfrm_audit_policy_delete(pol, 1, audit_info->loginuid, |
803 | audit_info->sessionid, | 753 | audit_info->sessionid, |
@@ -816,10 +766,9 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info) | |||
816 | bydst) { | 766 | bydst) { |
817 | if (pol->type != type) | 767 | if (pol->type != type) |
818 | continue; | 768 | continue; |
819 | dp = __xfrm_policy_unlink(pol, dir); | 769 | __xfrm_policy_unlink(pol, dir); |
820 | write_unlock_bh(&xfrm_policy_lock); | 770 | write_unlock_bh(&xfrm_policy_lock); |
821 | if (dp) | 771 | cnt++; |
822 | cnt++; | ||
823 | 772 | ||
824 | xfrm_audit_policy_delete(pol, 1, | 773 | xfrm_audit_policy_delete(pol, 1, |
825 | audit_info->loginuid, | 774 | audit_info->loginuid, |
@@ -835,7 +784,6 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info) | |||
835 | } | 784 | } |
836 | if (!cnt) | 785 | if (!cnt) |
837 | err = -ESRCH; | 786 | err = -ESRCH; |
838 | atomic_inc(&flow_cache_genid); | ||
839 | out: | 787 | out: |
840 | write_unlock_bh(&xfrm_policy_lock); | 788 | write_unlock_bh(&xfrm_policy_lock); |
841 | return err; | 789 | return err; |
@@ -989,32 +937,37 @@ fail: | |||
989 | return ret; | 937 | return ret; |
990 | } | 938 | } |
991 | 939 | ||
992 | static int xfrm_policy_lookup(struct net *net, struct flowi *fl, u16 family, | 940 | static struct xfrm_policy * |
993 | u8 dir, void **objp, atomic_t **obj_refp) | 941 | __xfrm_policy_lookup(struct net *net, struct flowi *fl, u16 family, u8 dir) |
994 | { | 942 | { |
943 | #ifdef CONFIG_XFRM_SUB_POLICY | ||
995 | struct xfrm_policy *pol; | 944 | struct xfrm_policy *pol; |
996 | int err = 0; | ||
997 | 945 | ||
998 | #ifdef CONFIG_XFRM_SUB_POLICY | ||
999 | pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir); | 946 | pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir); |
1000 | if (IS_ERR(pol)) { | 947 | if (pol != NULL) |
1001 | err = PTR_ERR(pol); | 948 | return pol; |
1002 | pol = NULL; | ||
1003 | } | ||
1004 | if (pol || err) | ||
1005 | goto end; | ||
1006 | #endif | ||
1007 | pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir); | ||
1008 | if (IS_ERR(pol)) { | ||
1009 | err = PTR_ERR(pol); | ||
1010 | pol = NULL; | ||
1011 | } | ||
1012 | #ifdef CONFIG_XFRM_SUB_POLICY | ||
1013 | end: | ||
1014 | #endif | 949 | #endif |
1015 | if ((*objp = (void *) pol) != NULL) | 950 | return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir); |
1016 | *obj_refp = &pol->refcnt; | 951 | } |
1017 | return err; | 952 | |
953 | static struct flow_cache_object * | ||
954 | xfrm_policy_lookup(struct net *net, struct flowi *fl, u16 family, | ||
955 | u8 dir, struct flow_cache_object *old_obj, void *ctx) | ||
956 | { | ||
957 | struct xfrm_policy *pol; | ||
958 | |||
959 | if (old_obj) | ||
960 | xfrm_pol_put(container_of(old_obj, struct xfrm_policy, flo)); | ||
961 | |||
962 | pol = __xfrm_policy_lookup(net, fl, family, dir); | ||
963 | if (IS_ERR_OR_NULL(pol)) | ||
964 | return ERR_CAST(pol); | ||
965 | |||
966 | /* Resolver returns two references: | ||
967 | * one for cache and one for caller of flow_cache_lookup() */ | ||
968 | xfrm_pol_hold(pol); | ||
969 | |||
970 | return &pol->flo; | ||
1018 | } | 971 | } |
1019 | 972 | ||
1020 | static inline int policy_to_flow_dir(int dir) | 973 | static inline int policy_to_flow_dir(int dir) |
@@ -1104,8 +1057,6 @@ int xfrm_policy_delete(struct xfrm_policy *pol, int dir) | |||
1104 | pol = __xfrm_policy_unlink(pol, dir); | 1057 | pol = __xfrm_policy_unlink(pol, dir); |
1105 | write_unlock_bh(&xfrm_policy_lock); | 1058 | write_unlock_bh(&xfrm_policy_lock); |
1106 | if (pol) { | 1059 | if (pol) { |
1107 | if (dir < XFRM_POLICY_MAX) | ||
1108 | atomic_inc(&flow_cache_genid); | ||
1109 | xfrm_policy_kill(pol); | 1060 | xfrm_policy_kill(pol); |
1110 | return 0; | 1061 | return 0; |
1111 | } | 1062 | } |
@@ -1132,6 +1083,9 @@ int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol) | |||
1132 | __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir); | 1083 | __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir); |
1133 | } | 1084 | } |
1134 | if (old_pol) | 1085 | if (old_pol) |
1086 | /* Unlinking succeeds always. This is the only function | ||
1087 | * allowed to delete or replace socket policy. | ||
1088 | */ | ||
1135 | __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir); | 1089 | __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir); |
1136 | write_unlock_bh(&xfrm_policy_lock); | 1090 | write_unlock_bh(&xfrm_policy_lock); |
1137 | 1091 | ||
@@ -1300,18 +1254,6 @@ xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, struct flowi *fl, | |||
1300 | * still valid. | 1254 | * still valid. |
1301 | */ | 1255 | */ |
1302 | 1256 | ||
1303 | static struct dst_entry * | ||
1304 | xfrm_find_bundle(struct flowi *fl, struct xfrm_policy *policy, unsigned short family) | ||
1305 | { | ||
1306 | struct dst_entry *x; | ||
1307 | struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); | ||
1308 | if (unlikely(afinfo == NULL)) | ||
1309 | return ERR_PTR(-EINVAL); | ||
1310 | x = afinfo->find_bundle(fl, policy); | ||
1311 | xfrm_policy_put_afinfo(afinfo); | ||
1312 | return x; | ||
1313 | } | ||
1314 | |||
1315 | static inline int xfrm_get_tos(struct flowi *fl, int family) | 1257 | static inline int xfrm_get_tos(struct flowi *fl, int family) |
1316 | { | 1258 | { |
1317 | struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); | 1259 | struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); |
@@ -1327,6 +1269,54 @@ static inline int xfrm_get_tos(struct flowi *fl, int family) | |||
1327 | return tos; | 1269 | return tos; |
1328 | } | 1270 | } |
1329 | 1271 | ||
1272 | static struct flow_cache_object *xfrm_bundle_flo_get(struct flow_cache_object *flo) | ||
1273 | { | ||
1274 | struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo); | ||
1275 | struct dst_entry *dst = &xdst->u.dst; | ||
1276 | |||
1277 | if (xdst->route == NULL) { | ||
1278 | /* Dummy bundle - if it has xfrms we were not | ||
1279 | * able to build bundle as template resolution failed. | ||
1280 | * It means we need to try again resolving. */ | ||
1281 | if (xdst->num_xfrms > 0) | ||
1282 | return NULL; | ||
1283 | } else { | ||
1284 | /* Real bundle */ | ||
1285 | if (stale_bundle(dst)) | ||
1286 | return NULL; | ||
1287 | } | ||
1288 | |||
1289 | dst_hold(dst); | ||
1290 | return flo; | ||
1291 | } | ||
1292 | |||
1293 | static int xfrm_bundle_flo_check(struct flow_cache_object *flo) | ||
1294 | { | ||
1295 | struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo); | ||
1296 | struct dst_entry *dst = &xdst->u.dst; | ||
1297 | |||
1298 | if (!xdst->route) | ||
1299 | return 0; | ||
1300 | if (stale_bundle(dst)) | ||
1301 | return 0; | ||
1302 | |||
1303 | return 1; | ||
1304 | } | ||
1305 | |||
1306 | static void xfrm_bundle_flo_delete(struct flow_cache_object *flo) | ||
1307 | { | ||
1308 | struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo); | ||
1309 | struct dst_entry *dst = &xdst->u.dst; | ||
1310 | |||
1311 | dst_free(dst); | ||
1312 | } | ||
1313 | |||
1314 | static const struct flow_cache_ops xfrm_bundle_fc_ops = { | ||
1315 | .get = xfrm_bundle_flo_get, | ||
1316 | .check = xfrm_bundle_flo_check, | ||
1317 | .delete = xfrm_bundle_flo_delete, | ||
1318 | }; | ||
1319 | |||
1330 | static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family) | 1320 | static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family) |
1331 | { | 1321 | { |
1332 | struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); | 1322 | struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); |
@@ -1349,9 +1339,10 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family) | |||
1349 | BUG(); | 1339 | BUG(); |
1350 | } | 1340 | } |
1351 | xdst = dst_alloc(dst_ops) ?: ERR_PTR(-ENOBUFS); | 1341 | xdst = dst_alloc(dst_ops) ?: ERR_PTR(-ENOBUFS); |
1352 | |||
1353 | xfrm_policy_put_afinfo(afinfo); | 1342 | xfrm_policy_put_afinfo(afinfo); |
1354 | 1343 | ||
1344 | xdst->flo.ops = &xfrm_bundle_fc_ops; | ||
1345 | |||
1355 | return xdst; | 1346 | return xdst; |
1356 | } | 1347 | } |
1357 | 1348 | ||
@@ -1389,6 +1380,7 @@ static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, | |||
1389 | return err; | 1380 | return err; |
1390 | } | 1381 | } |
1391 | 1382 | ||
1383 | |||
1392 | /* Allocate chain of dst_entry's, attach known xfrm's, calculate | 1384 | /* Allocate chain of dst_entry's, attach known xfrm's, calculate |
1393 | * all the metrics... Shortly, bundle a bundle. | 1385 | * all the metrics... Shortly, bundle a bundle. |
1394 | */ | 1386 | */ |
@@ -1452,7 +1444,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy, | |||
1452 | dst_hold(dst); | 1444 | dst_hold(dst); |
1453 | 1445 | ||
1454 | dst1->xfrm = xfrm[i]; | 1446 | dst1->xfrm = xfrm[i]; |
1455 | xdst->genid = xfrm[i]->genid; | 1447 | xdst->xfrm_genid = xfrm[i]->genid; |
1456 | 1448 | ||
1457 | dst1->obsolete = -1; | 1449 | dst1->obsolete = -1; |
1458 | dst1->flags |= DST_HOST; | 1450 | dst1->flags |= DST_HOST; |
@@ -1545,7 +1537,186 @@ xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl) | |||
1545 | #endif | 1537 | #endif |
1546 | } | 1538 | } |
1547 | 1539 | ||
1548 | static int stale_bundle(struct dst_entry *dst); | 1540 | static int xfrm_expand_policies(struct flowi *fl, u16 family, |
1541 | struct xfrm_policy **pols, | ||
1542 | int *num_pols, int *num_xfrms) | ||
1543 | { | ||
1544 | int i; | ||
1545 | |||
1546 | if (*num_pols == 0 || !pols[0]) { | ||
1547 | *num_pols = 0; | ||
1548 | *num_xfrms = 0; | ||
1549 | return 0; | ||
1550 | } | ||
1551 | if (IS_ERR(pols[0])) | ||
1552 | return PTR_ERR(pols[0]); | ||
1553 | |||
1554 | *num_xfrms = pols[0]->xfrm_nr; | ||
1555 | |||
1556 | #ifdef CONFIG_XFRM_SUB_POLICY | ||
1557 | if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW && | ||
1558 | pols[0]->type != XFRM_POLICY_TYPE_MAIN) { | ||
1559 | pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]), | ||
1560 | XFRM_POLICY_TYPE_MAIN, | ||
1561 | fl, family, | ||
1562 | XFRM_POLICY_OUT); | ||
1563 | if (pols[1]) { | ||
1564 | if (IS_ERR(pols[1])) { | ||
1565 | xfrm_pols_put(pols, *num_pols); | ||
1566 | return PTR_ERR(pols[1]); | ||
1567 | } | ||
1568 | (*num_pols) ++; | ||
1569 | (*num_xfrms) += pols[1]->xfrm_nr; | ||
1570 | } | ||
1571 | } | ||
1572 | #endif | ||
1573 | for (i = 0; i < *num_pols; i++) { | ||
1574 | if (pols[i]->action != XFRM_POLICY_ALLOW) { | ||
1575 | *num_xfrms = -1; | ||
1576 | break; | ||
1577 | } | ||
1578 | } | ||
1579 | |||
1580 | return 0; | ||
1581 | |||
1582 | } | ||
1583 | |||
1584 | static struct xfrm_dst * | ||
1585 | xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols, | ||
1586 | struct flowi *fl, u16 family, | ||
1587 | struct dst_entry *dst_orig) | ||
1588 | { | ||
1589 | struct net *net = xp_net(pols[0]); | ||
1590 | struct xfrm_state *xfrm[XFRM_MAX_DEPTH]; | ||
1591 | struct dst_entry *dst; | ||
1592 | struct xfrm_dst *xdst; | ||
1593 | int err; | ||
1594 | |||
1595 | /* Try to instantiate a bundle */ | ||
1596 | err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family); | ||
1597 | if (err < 0) { | ||
1598 | if (err != -EAGAIN) | ||
1599 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); | ||
1600 | return ERR_PTR(err); | ||
1601 | } | ||
1602 | |||
1603 | dst = xfrm_bundle_create(pols[0], xfrm, err, fl, dst_orig); | ||
1604 | if (IS_ERR(dst)) { | ||
1605 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR); | ||
1606 | return ERR_CAST(dst); | ||
1607 | } | ||
1608 | |||
1609 | xdst = (struct xfrm_dst *)dst; | ||
1610 | xdst->num_xfrms = err; | ||
1611 | if (num_pols > 1) | ||
1612 | err = xfrm_dst_update_parent(dst, &pols[1]->selector); | ||
1613 | else | ||
1614 | err = xfrm_dst_update_origin(dst, fl); | ||
1615 | if (unlikely(err)) { | ||
1616 | dst_free(dst); | ||
1617 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR); | ||
1618 | return ERR_PTR(err); | ||
1619 | } | ||
1620 | |||
1621 | xdst->num_pols = num_pols; | ||
1622 | memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols); | ||
1623 | xdst->policy_genid = atomic_read(&pols[0]->genid); | ||
1624 | |||
1625 | return xdst; | ||
1626 | } | ||
1627 | |||
1628 | static struct flow_cache_object * | ||
1629 | xfrm_bundle_lookup(struct net *net, struct flowi *fl, u16 family, u8 dir, | ||
1630 | struct flow_cache_object *oldflo, void *ctx) | ||
1631 | { | ||
1632 | struct dst_entry *dst_orig = (struct dst_entry *)ctx; | ||
1633 | struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; | ||
1634 | struct xfrm_dst *xdst, *new_xdst; | ||
1635 | int num_pols = 0, num_xfrms = 0, i, err, pol_dead; | ||
1636 | |||
1637 | /* Check if the policies from old bundle are usable */ | ||
1638 | xdst = NULL; | ||
1639 | if (oldflo) { | ||
1640 | xdst = container_of(oldflo, struct xfrm_dst, flo); | ||
1641 | num_pols = xdst->num_pols; | ||
1642 | num_xfrms = xdst->num_xfrms; | ||
1643 | pol_dead = 0; | ||
1644 | for (i = 0; i < num_pols; i++) { | ||
1645 | pols[i] = xdst->pols[i]; | ||
1646 | pol_dead |= pols[i]->walk.dead; | ||
1647 | } | ||
1648 | if (pol_dead) { | ||
1649 | dst_free(&xdst->u.dst); | ||
1650 | xdst = NULL; | ||
1651 | num_pols = 0; | ||
1652 | num_xfrms = 0; | ||
1653 | oldflo = NULL; | ||
1654 | } | ||
1655 | } | ||
1656 | |||
1657 | /* Resolve policies to use if we couldn't get them from | ||
1658 | * previous cache entry */ | ||
1659 | if (xdst == NULL) { | ||
1660 | num_pols = 1; | ||
1661 | pols[0] = __xfrm_policy_lookup(net, fl, family, dir); | ||
1662 | err = xfrm_expand_policies(fl, family, pols, | ||
1663 | &num_pols, &num_xfrms); | ||
1664 | if (err < 0) | ||
1665 | goto inc_error; | ||
1666 | if (num_pols == 0) | ||
1667 | return NULL; | ||
1668 | if (num_xfrms <= 0) | ||
1669 | goto make_dummy_bundle; | ||
1670 | } | ||
1671 | |||
1672 | new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, dst_orig); | ||
1673 | if (IS_ERR(new_xdst)) { | ||
1674 | err = PTR_ERR(new_xdst); | ||
1675 | if (err != -EAGAIN) | ||
1676 | goto error; | ||
1677 | if (oldflo == NULL) | ||
1678 | goto make_dummy_bundle; | ||
1679 | dst_hold(&xdst->u.dst); | ||
1680 | return oldflo; | ||
1681 | } | ||
1682 | |||
1683 | /* Kill the previous bundle */ | ||
1684 | if (xdst) { | ||
1685 | /* The policies were stolen for newly generated bundle */ | ||
1686 | xdst->num_pols = 0; | ||
1687 | dst_free(&xdst->u.dst); | ||
1688 | } | ||
1689 | |||
1690 | /* Flow cache does not have reference, it dst_free()'s, | ||
1691 | * but we do need to return one reference for original caller */ | ||
1692 | dst_hold(&new_xdst->u.dst); | ||
1693 | return &new_xdst->flo; | ||
1694 | |||
1695 | make_dummy_bundle: | ||
1696 | /* We found policies, but there's no bundles to instantiate: | ||
1697 | * either because the policy blocks, has no transformations or | ||
1698 | * we could not build template (no xfrm_states).*/ | ||
1699 | xdst = xfrm_alloc_dst(net, family); | ||
1700 | if (IS_ERR(xdst)) { | ||
1701 | xfrm_pols_put(pols, num_pols); | ||
1702 | return ERR_CAST(xdst); | ||
1703 | } | ||
1704 | xdst->num_pols = num_pols; | ||
1705 | xdst->num_xfrms = num_xfrms; | ||
1706 | memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols); | ||
1707 | |||
1708 | dst_hold(&xdst->u.dst); | ||
1709 | return &xdst->flo; | ||
1710 | |||
1711 | inc_error: | ||
1712 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); | ||
1713 | error: | ||
1714 | if (xdst != NULL) | ||
1715 | dst_free(&xdst->u.dst); | ||
1716 | else | ||
1717 | xfrm_pols_put(pols, num_pols); | ||
1718 | return ERR_PTR(err); | ||
1719 | } | ||
1549 | 1720 | ||
1550 | /* Main function: finds/creates a bundle for given flow. | 1721 | /* Main function: finds/creates a bundle for given flow. |
1551 | * | 1722 | * |
@@ -1555,245 +1726,152 @@ static int stale_bundle(struct dst_entry *dst); | |||
1555 | int __xfrm_lookup(struct net *net, struct dst_entry **dst_p, struct flowi *fl, | 1726 | int __xfrm_lookup(struct net *net, struct dst_entry **dst_p, struct flowi *fl, |
1556 | struct sock *sk, int flags) | 1727 | struct sock *sk, int flags) |
1557 | { | 1728 | { |
1558 | struct xfrm_policy *policy; | ||
1559 | struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; | 1729 | struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; |
1560 | int npols; | 1730 | struct flow_cache_object *flo; |
1561 | int pol_dead; | 1731 | struct xfrm_dst *xdst; |
1562 | int xfrm_nr; | 1732 | struct dst_entry *dst, *dst_orig = *dst_p, *route; |
1563 | int pi; | 1733 | u16 family = dst_orig->ops->family; |
1564 | struct xfrm_state *xfrm[XFRM_MAX_DEPTH]; | ||
1565 | struct dst_entry *dst, *dst_orig = *dst_p; | ||
1566 | int nx = 0; | ||
1567 | int err; | ||
1568 | u32 genid; | ||
1569 | u16 family; | ||
1570 | u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT); | 1734 | u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT); |
1735 | int i, err, num_pols, num_xfrms, drop_pols = 0; | ||
1571 | 1736 | ||
1572 | restart: | 1737 | restart: |
1573 | genid = atomic_read(&flow_cache_genid); | 1738 | dst = NULL; |
1574 | policy = NULL; | 1739 | xdst = NULL; |
1575 | for (pi = 0; pi < ARRAY_SIZE(pols); pi++) | 1740 | route = NULL; |
1576 | pols[pi] = NULL; | ||
1577 | npols = 0; | ||
1578 | pol_dead = 0; | ||
1579 | xfrm_nr = 0; | ||
1580 | 1741 | ||
1581 | if (sk && sk->sk_policy[XFRM_POLICY_OUT]) { | 1742 | if (sk && sk->sk_policy[XFRM_POLICY_OUT]) { |
1582 | policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl); | 1743 | num_pols = 1; |
1583 | err = PTR_ERR(policy); | 1744 | pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl); |
1584 | if (IS_ERR(policy)) { | 1745 | err = xfrm_expand_policies(fl, family, pols, |
1585 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); | 1746 | &num_pols, &num_xfrms); |
1747 | if (err < 0) | ||
1586 | goto dropdst; | 1748 | goto dropdst; |
1749 | |||
1750 | if (num_pols) { | ||
1751 | if (num_xfrms <= 0) { | ||
1752 | drop_pols = num_pols; | ||
1753 | goto no_transform; | ||
1754 | } | ||
1755 | |||
1756 | xdst = xfrm_resolve_and_create_bundle( | ||
1757 | pols, num_pols, fl, | ||
1758 | family, dst_orig); | ||
1759 | if (IS_ERR(xdst)) { | ||
1760 | xfrm_pols_put(pols, num_pols); | ||
1761 | err = PTR_ERR(xdst); | ||
1762 | goto dropdst; | ||
1763 | } | ||
1764 | |||
1765 | spin_lock_bh(&xfrm_policy_sk_bundle_lock); | ||
1766 | xdst->u.dst.next = xfrm_policy_sk_bundles; | ||
1767 | xfrm_policy_sk_bundles = &xdst->u.dst; | ||
1768 | spin_unlock_bh(&xfrm_policy_sk_bundle_lock); | ||
1769 | |||
1770 | route = xdst->route; | ||
1587 | } | 1771 | } |
1588 | } | 1772 | } |
1589 | 1773 | ||
1590 | if (!policy) { | 1774 | if (xdst == NULL) { |
1591 | /* To accelerate a bit... */ | 1775 | /* To accelerate a bit... */ |
1592 | if ((dst_orig->flags & DST_NOXFRM) || | 1776 | if ((dst_orig->flags & DST_NOXFRM) || |
1593 | !net->xfrm.policy_count[XFRM_POLICY_OUT]) | 1777 | !net->xfrm.policy_count[XFRM_POLICY_OUT]) |
1594 | goto nopol; | 1778 | goto nopol; |
1595 | 1779 | ||
1596 | policy = flow_cache_lookup(net, fl, dst_orig->ops->family, | 1780 | flo = flow_cache_lookup(net, fl, family, dir, |
1597 | dir, xfrm_policy_lookup); | 1781 | xfrm_bundle_lookup, dst_orig); |
1598 | err = PTR_ERR(policy); | 1782 | if (flo == NULL) |
1599 | if (IS_ERR(policy)) { | 1783 | goto nopol; |
1600 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); | 1784 | if (IS_ERR(flo)) { |
1785 | err = PTR_ERR(flo); | ||
1601 | goto dropdst; | 1786 | goto dropdst; |
1602 | } | 1787 | } |
1788 | xdst = container_of(flo, struct xfrm_dst, flo); | ||
1789 | |||
1790 | num_pols = xdst->num_pols; | ||
1791 | num_xfrms = xdst->num_xfrms; | ||
1792 | memcpy(pols, xdst->pols, sizeof(struct xfrm_policy*) * num_pols); | ||
1793 | route = xdst->route; | ||
1794 | } | ||
1795 | |||
1796 | dst = &xdst->u.dst; | ||
1797 | if (route == NULL && num_xfrms > 0) { | ||
1798 | /* The only case when xfrm_bundle_lookup() returns a | ||
1799 | * bundle with null route, is when the template could | ||
1800 | * not be resolved. It means policies are there, but | ||
1801 | * bundle could not be created, since we don't yet | ||
1802 | * have the xfrm_state's. We need to wait for KM to | ||
1803 | * negotiate new SA's or bail out with error.*/ | ||
1804 | if (net->xfrm.sysctl_larval_drop) { | ||
1805 | /* EREMOTE tells the caller to generate | ||
1806 | * a one-shot blackhole route. */ | ||
1807 | dst_release(dst); | ||
1808 | xfrm_pols_put(pols, num_pols); | ||
1809 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); | ||
1810 | return -EREMOTE; | ||
1811 | } | ||
1812 | if (flags & XFRM_LOOKUP_WAIT) { | ||
1813 | DECLARE_WAITQUEUE(wait, current); | ||
1814 | |||
1815 | add_wait_queue(&net->xfrm.km_waitq, &wait); | ||
1816 | set_current_state(TASK_INTERRUPTIBLE); | ||
1817 | schedule(); | ||
1818 | set_current_state(TASK_RUNNING); | ||
1819 | remove_wait_queue(&net->xfrm.km_waitq, &wait); | ||
1820 | |||
1821 | if (!signal_pending(current)) { | ||
1822 | dst_release(dst); | ||
1823 | goto restart; | ||
1824 | } | ||
1825 | |||
1826 | err = -ERESTART; | ||
1827 | } else | ||
1828 | err = -EAGAIN; | ||
1829 | |||
1830 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); | ||
1831 | goto error; | ||
1603 | } | 1832 | } |
1604 | 1833 | ||
1605 | if (!policy) | 1834 | no_transform: |
1835 | if (num_pols == 0) | ||
1606 | goto nopol; | 1836 | goto nopol; |
1607 | 1837 | ||
1608 | family = dst_orig->ops->family; | 1838 | if ((flags & XFRM_LOOKUP_ICMP) && |
1609 | pols[0] = policy; | 1839 | !(pols[0]->flags & XFRM_POLICY_ICMP)) { |
1610 | npols ++; | 1840 | err = -ENOENT; |
1611 | xfrm_nr += pols[0]->xfrm_nr; | ||
1612 | |||
1613 | err = -ENOENT; | ||
1614 | if ((flags & XFRM_LOOKUP_ICMP) && !(policy->flags & XFRM_POLICY_ICMP)) | ||
1615 | goto error; | 1841 | goto error; |
1842 | } | ||
1616 | 1843 | ||
1617 | policy->curlft.use_time = get_seconds(); | 1844 | for (i = 0; i < num_pols; i++) |
1845 | pols[i]->curlft.use_time = get_seconds(); | ||
1618 | 1846 | ||
1619 | switch (policy->action) { | 1847 | if (num_xfrms < 0) { |
1620 | default: | ||
1621 | case XFRM_POLICY_BLOCK: | ||
1622 | /* Prohibit the flow */ | 1848 | /* Prohibit the flow */ |
1623 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK); | 1849 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK); |
1624 | err = -EPERM; | 1850 | err = -EPERM; |
1625 | goto error; | 1851 | goto error; |
1626 | 1852 | } else if (num_xfrms > 0) { | |
1627 | case XFRM_POLICY_ALLOW: | 1853 | /* Flow transformed */ |
1628 | #ifndef CONFIG_XFRM_SUB_POLICY | 1854 | *dst_p = dst; |
1629 | if (policy->xfrm_nr == 0) { | 1855 | dst_release(dst_orig); |
1630 | /* Flow passes not transformed. */ | 1856 | } else { |
1631 | xfrm_pol_put(policy); | 1857 | /* Flow passes untransformed */ |
1632 | return 0; | 1858 | dst_release(dst); |
1633 | } | ||
1634 | #endif | ||
1635 | |||
1636 | /* Try to find matching bundle. | ||
1637 | * | ||
1638 | * LATER: help from flow cache. It is optional, this | ||
1639 | * is required only for output policy. | ||
1640 | */ | ||
1641 | dst = xfrm_find_bundle(fl, policy, family); | ||
1642 | if (IS_ERR(dst)) { | ||
1643 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR); | ||
1644 | err = PTR_ERR(dst); | ||
1645 | goto error; | ||
1646 | } | ||
1647 | |||
1648 | if (dst) | ||
1649 | break; | ||
1650 | |||
1651 | #ifdef CONFIG_XFRM_SUB_POLICY | ||
1652 | if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) { | ||
1653 | pols[1] = xfrm_policy_lookup_bytype(net, | ||
1654 | XFRM_POLICY_TYPE_MAIN, | ||
1655 | fl, family, | ||
1656 | XFRM_POLICY_OUT); | ||
1657 | if (pols[1]) { | ||
1658 | if (IS_ERR(pols[1])) { | ||
1659 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); | ||
1660 | err = PTR_ERR(pols[1]); | ||
1661 | goto error; | ||
1662 | } | ||
1663 | if (pols[1]->action == XFRM_POLICY_BLOCK) { | ||
1664 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK); | ||
1665 | err = -EPERM; | ||
1666 | goto error; | ||
1667 | } | ||
1668 | npols ++; | ||
1669 | xfrm_nr += pols[1]->xfrm_nr; | ||
1670 | } | ||
1671 | } | ||
1672 | |||
1673 | /* | ||
1674 | * Because neither flowi nor bundle information knows about | ||
1675 | * transformation template size. On more than one policy usage | ||
1676 | * we can realize whether all of them is bypass or not after | ||
1677 | * they are searched. See above not-transformed bypass | ||
1678 | * is surrounded by non-sub policy configuration, too. | ||
1679 | */ | ||
1680 | if (xfrm_nr == 0) { | ||
1681 | /* Flow passes not transformed. */ | ||
1682 | xfrm_pols_put(pols, npols); | ||
1683 | return 0; | ||
1684 | } | ||
1685 | |||
1686 | #endif | ||
1687 | nx = xfrm_tmpl_resolve(pols, npols, fl, xfrm, family); | ||
1688 | |||
1689 | if (unlikely(nx<0)) { | ||
1690 | err = nx; | ||
1691 | if (err == -EAGAIN && net->xfrm.sysctl_larval_drop) { | ||
1692 | /* EREMOTE tells the caller to generate | ||
1693 | * a one-shot blackhole route. | ||
1694 | */ | ||
1695 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); | ||
1696 | xfrm_pol_put(policy); | ||
1697 | return -EREMOTE; | ||
1698 | } | ||
1699 | if (err == -EAGAIN && (flags & XFRM_LOOKUP_WAIT)) { | ||
1700 | DECLARE_WAITQUEUE(wait, current); | ||
1701 | |||
1702 | add_wait_queue(&net->xfrm.km_waitq, &wait); | ||
1703 | set_current_state(TASK_INTERRUPTIBLE); | ||
1704 | schedule(); | ||
1705 | set_current_state(TASK_RUNNING); | ||
1706 | remove_wait_queue(&net->xfrm.km_waitq, &wait); | ||
1707 | |||
1708 | nx = xfrm_tmpl_resolve(pols, npols, fl, xfrm, family); | ||
1709 | |||
1710 | if (nx == -EAGAIN && signal_pending(current)) { | ||
1711 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); | ||
1712 | err = -ERESTART; | ||
1713 | goto error; | ||
1714 | } | ||
1715 | if (nx == -EAGAIN || | ||
1716 | genid != atomic_read(&flow_cache_genid)) { | ||
1717 | xfrm_pols_put(pols, npols); | ||
1718 | goto restart; | ||
1719 | } | ||
1720 | err = nx; | ||
1721 | } | ||
1722 | if (err < 0) { | ||
1723 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); | ||
1724 | goto error; | ||
1725 | } | ||
1726 | } | ||
1727 | if (nx == 0) { | ||
1728 | /* Flow passes not transformed. */ | ||
1729 | xfrm_pols_put(pols, npols); | ||
1730 | return 0; | ||
1731 | } | ||
1732 | |||
1733 | dst = xfrm_bundle_create(policy, xfrm, nx, fl, dst_orig); | ||
1734 | err = PTR_ERR(dst); | ||
1735 | if (IS_ERR(dst)) { | ||
1736 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR); | ||
1737 | goto error; | ||
1738 | } | ||
1739 | |||
1740 | for (pi = 0; pi < npols; pi++) { | ||
1741 | read_lock_bh(&pols[pi]->lock); | ||
1742 | pol_dead |= pols[pi]->walk.dead; | ||
1743 | read_unlock_bh(&pols[pi]->lock); | ||
1744 | } | ||
1745 | |||
1746 | write_lock_bh(&policy->lock); | ||
1747 | if (unlikely(pol_dead || stale_bundle(dst))) { | ||
1748 | /* Wow! While we worked on resolving, this | ||
1749 | * policy has gone. Retry. It is not paranoia, | ||
1750 | * we just cannot enlist new bundle to dead object. | ||
1751 | * We can't enlist stable bundles either. | ||
1752 | */ | ||
1753 | write_unlock_bh(&policy->lock); | ||
1754 | dst_free(dst); | ||
1755 | |||
1756 | if (pol_dead) | ||
1757 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLDEAD); | ||
1758 | else | ||
1759 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR); | ||
1760 | err = -EHOSTUNREACH; | ||
1761 | goto error; | ||
1762 | } | ||
1763 | |||
1764 | if (npols > 1) | ||
1765 | err = xfrm_dst_update_parent(dst, &pols[1]->selector); | ||
1766 | else | ||
1767 | err = xfrm_dst_update_origin(dst, fl); | ||
1768 | if (unlikely(err)) { | ||
1769 | write_unlock_bh(&policy->lock); | ||
1770 | dst_free(dst); | ||
1771 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR); | ||
1772 | goto error; | ||
1773 | } | ||
1774 | |||
1775 | dst->next = policy->bundles; | ||
1776 | policy->bundles = dst; | ||
1777 | dst_hold(dst); | ||
1778 | write_unlock_bh(&policy->lock); | ||
1779 | } | 1859 | } |
1780 | *dst_p = dst; | 1860 | ok: |
1781 | dst_release(dst_orig); | 1861 | xfrm_pols_put(pols, drop_pols); |
1782 | xfrm_pols_put(pols, npols); | ||
1783 | return 0; | 1862 | return 0; |
1784 | 1863 | ||
1864 | nopol: | ||
1865 | if (!(flags & XFRM_LOOKUP_ICMP)) | ||
1866 | goto ok; | ||
1867 | err = -ENOENT; | ||
1785 | error: | 1868 | error: |
1786 | xfrm_pols_put(pols, npols); | 1869 | dst_release(dst); |
1787 | dropdst: | 1870 | dropdst: |
1788 | dst_release(dst_orig); | 1871 | dst_release(dst_orig); |
1789 | *dst_p = NULL; | 1872 | *dst_p = NULL; |
1873 | xfrm_pols_put(pols, drop_pols); | ||
1790 | return err; | 1874 | return err; |
1791 | |||
1792 | nopol: | ||
1793 | err = -ENOENT; | ||
1794 | if (flags & XFRM_LOOKUP_ICMP) | ||
1795 | goto dropdst; | ||
1796 | return 0; | ||
1797 | } | 1875 | } |
1798 | EXPORT_SYMBOL(__xfrm_lookup); | 1876 | EXPORT_SYMBOL(__xfrm_lookup); |
1799 | 1877 | ||
@@ -1952,9 +2030,16 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, | |||
1952 | } | 2030 | } |
1953 | } | 2031 | } |
1954 | 2032 | ||
1955 | if (!pol) | 2033 | if (!pol) { |
1956 | pol = flow_cache_lookup(net, &fl, family, fl_dir, | 2034 | struct flow_cache_object *flo; |
1957 | xfrm_policy_lookup); | 2035 | |
2036 | flo = flow_cache_lookup(net, &fl, family, fl_dir, | ||
2037 | xfrm_policy_lookup, NULL); | ||
2038 | if (IS_ERR_OR_NULL(flo)) | ||
2039 | pol = ERR_CAST(flo); | ||
2040 | else | ||
2041 | pol = container_of(flo, struct xfrm_policy, flo); | ||
2042 | } | ||
1958 | 2043 | ||
1959 | if (IS_ERR(pol)) { | 2044 | if (IS_ERR(pol)) { |
1960 | XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); | 2045 | XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); |
@@ -2138,71 +2223,24 @@ static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst) | |||
2138 | return dst; | 2223 | return dst; |
2139 | } | 2224 | } |
2140 | 2225 | ||
2141 | static void prune_one_bundle(struct xfrm_policy *pol, int (*func)(struct dst_entry *), struct dst_entry **gc_list_p) | 2226 | static void __xfrm_garbage_collect(struct net *net) |
2142 | { | ||
2143 | struct dst_entry *dst, **dstp; | ||
2144 | |||
2145 | write_lock(&pol->lock); | ||
2146 | dstp = &pol->bundles; | ||
2147 | while ((dst=*dstp) != NULL) { | ||
2148 | if (func(dst)) { | ||
2149 | *dstp = dst->next; | ||
2150 | dst->next = *gc_list_p; | ||
2151 | *gc_list_p = dst; | ||
2152 | } else { | ||
2153 | dstp = &dst->next; | ||
2154 | } | ||
2155 | } | ||
2156 | write_unlock(&pol->lock); | ||
2157 | } | ||
2158 | |||
2159 | static void xfrm_prune_bundles(struct net *net, int (*func)(struct dst_entry *)) | ||
2160 | { | 2227 | { |
2161 | struct dst_entry *gc_list = NULL; | 2228 | struct dst_entry *head, *next; |
2162 | int dir; | ||
2163 | 2229 | ||
2164 | read_lock_bh(&xfrm_policy_lock); | 2230 | flow_cache_flush(); |
2165 | for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) { | ||
2166 | struct xfrm_policy *pol; | ||
2167 | struct hlist_node *entry; | ||
2168 | struct hlist_head *table; | ||
2169 | int i; | ||
2170 | 2231 | ||
2171 | hlist_for_each_entry(pol, entry, | 2232 | spin_lock_bh(&xfrm_policy_sk_bundle_lock); |
2172 | &net->xfrm.policy_inexact[dir], bydst) | 2233 | head = xfrm_policy_sk_bundles; |
2173 | prune_one_bundle(pol, func, &gc_list); | 2234 | xfrm_policy_sk_bundles = NULL; |
2235 | spin_unlock_bh(&xfrm_policy_sk_bundle_lock); | ||
2174 | 2236 | ||
2175 | table = net->xfrm.policy_bydst[dir].table; | 2237 | while (head) { |
2176 | for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) { | 2238 | next = head->next; |
2177 | hlist_for_each_entry(pol, entry, table + i, bydst) | 2239 | dst_free(head); |
2178 | prune_one_bundle(pol, func, &gc_list); | 2240 | head = next; |
2179 | } | ||
2180 | } | ||
2181 | read_unlock_bh(&xfrm_policy_lock); | ||
2182 | |||
2183 | while (gc_list) { | ||
2184 | struct dst_entry *dst = gc_list; | ||
2185 | gc_list = dst->next; | ||
2186 | dst_free(dst); | ||
2187 | } | 2241 | } |
2188 | } | 2242 | } |
2189 | 2243 | ||
2190 | static int unused_bundle(struct dst_entry *dst) | ||
2191 | { | ||
2192 | return !atomic_read(&dst->__refcnt); | ||
2193 | } | ||
2194 | |||
2195 | static void __xfrm_garbage_collect(struct net *net) | ||
2196 | { | ||
2197 | xfrm_prune_bundles(net, unused_bundle); | ||
2198 | } | ||
2199 | |||
2200 | static int xfrm_flush_bundles(struct net *net) | ||
2201 | { | ||
2202 | xfrm_prune_bundles(net, stale_bundle); | ||
2203 | return 0; | ||
2204 | } | ||
2205 | |||
2206 | static void xfrm_init_pmtu(struct dst_entry *dst) | 2244 | static void xfrm_init_pmtu(struct dst_entry *dst) |
2207 | { | 2245 | { |
2208 | do { | 2246 | do { |
@@ -2260,7 +2298,9 @@ int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first, | |||
2260 | return 0; | 2298 | return 0; |
2261 | if (dst->xfrm->km.state != XFRM_STATE_VALID) | 2299 | if (dst->xfrm->km.state != XFRM_STATE_VALID) |
2262 | return 0; | 2300 | return 0; |
2263 | if (xdst->genid != dst->xfrm->genid) | 2301 | if (xdst->xfrm_genid != dst->xfrm->genid) |
2302 | return 0; | ||
2303 | if (xdst->policy_genid != atomic_read(&xdst->pols[0]->genid)) | ||
2264 | return 0; | 2304 | return 0; |
2265 | 2305 | ||
2266 | if (strict && fl && | 2306 | if (strict && fl && |
@@ -2425,7 +2465,7 @@ static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void | |||
2425 | 2465 | ||
2426 | switch (event) { | 2466 | switch (event) { |
2427 | case NETDEV_DOWN: | 2467 | case NETDEV_DOWN: |
2428 | xfrm_flush_bundles(dev_net(dev)); | 2468 | __xfrm_garbage_collect(dev_net(dev)); |
2429 | } | 2469 | } |
2430 | return NOTIFY_DONE; | 2470 | return NOTIFY_DONE; |
2431 | } | 2471 | } |
@@ -2531,7 +2571,6 @@ static void xfrm_policy_fini(struct net *net) | |||
2531 | audit_info.sessionid = -1; | 2571 | audit_info.sessionid = -1; |
2532 | audit_info.secid = 0; | 2572 | audit_info.secid = 0; |
2533 | xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info); | 2573 | xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info); |
2534 | flush_work(&xfrm_policy_gc_work); | ||
2535 | 2574 | ||
2536 | WARN_ON(!list_empty(&net->xfrm.policy_all)); | 2575 | WARN_ON(!list_empty(&net->xfrm.policy_all)); |
2537 | 2576 | ||
@@ -2757,7 +2796,6 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol, | |||
2757 | struct xfrm_migrate *m, int num_migrate) | 2796 | struct xfrm_migrate *m, int num_migrate) |
2758 | { | 2797 | { |
2759 | struct xfrm_migrate *mp; | 2798 | struct xfrm_migrate *mp; |
2760 | struct dst_entry *dst; | ||
2761 | int i, j, n = 0; | 2799 | int i, j, n = 0; |
2762 | 2800 | ||
2763 | write_lock_bh(&pol->lock); | 2801 | write_lock_bh(&pol->lock); |
@@ -2782,10 +2820,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol, | |||
2782 | sizeof(pol->xfrm_vec[i].saddr)); | 2820 | sizeof(pol->xfrm_vec[i].saddr)); |
2783 | pol->xfrm_vec[i].encap_family = mp->new_family; | 2821 | pol->xfrm_vec[i].encap_family = mp->new_family; |
2784 | /* flush bundles */ | 2822 | /* flush bundles */ |
2785 | while ((dst = pol->bundles) != NULL) { | 2823 | atomic_inc(&pol->genid); |
2786 | pol->bundles = dst->next; | ||
2787 | dst_free(dst); | ||
2788 | } | ||
2789 | } | 2824 | } |
2790 | } | 2825 | } |
2791 | 2826 | ||
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index add77ecb8ac4..5208b12fbfb4 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c | |||
@@ -38,7 +38,6 @@ | |||
38 | static DEFINE_SPINLOCK(xfrm_state_lock); | 38 | static DEFINE_SPINLOCK(xfrm_state_lock); |
39 | 39 | ||
40 | static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024; | 40 | static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024; |
41 | static unsigned int xfrm_state_genid; | ||
42 | 41 | ||
43 | static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family); | 42 | static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family); |
44 | static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo); | 43 | static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo); |
@@ -924,8 +923,6 @@ static void __xfrm_state_insert(struct xfrm_state *x) | |||
924 | struct net *net = xs_net(x); | 923 | struct net *net = xs_net(x); |
925 | unsigned int h; | 924 | unsigned int h; |
926 | 925 | ||
927 | x->genid = ++xfrm_state_genid; | ||
928 | |||
929 | list_add(&x->km.all, &net->xfrm.state_all); | 926 | list_add(&x->km.all, &net->xfrm.state_all); |
930 | 927 | ||
931 | h = xfrm_dst_hash(net, &x->id.daddr, &x->props.saddr, | 928 | h = xfrm_dst_hash(net, &x->id.daddr, &x->props.saddr, |
@@ -971,7 +968,7 @@ static void __xfrm_state_bump_genids(struct xfrm_state *xnew) | |||
971 | (mark & x->mark.m) == x->mark.v && | 968 | (mark & x->mark.m) == x->mark.v && |
972 | !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) && | 969 | !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) && |
973 | !xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family)) | 970 | !xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family)) |
974 | x->genid = xfrm_state_genid; | 971 | x->genid++; |
975 | } | 972 | } |
976 | } | 973 | } |
977 | 974 | ||
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 6106b72826d3..a267fbdda525 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c | |||
@@ -1741,6 +1741,10 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
1741 | if (err) | 1741 | if (err) |
1742 | return err; | 1742 | return err; |
1743 | 1743 | ||
1744 | err = verify_policy_dir(p->dir); | ||
1745 | if (err) | ||
1746 | return err; | ||
1747 | |||
1744 | if (p->index) | 1748 | if (p->index) |
1745 | xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, 0, &err); | 1749 | xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, 0, &err); |
1746 | else { | 1750 | else { |
@@ -1766,13 +1770,9 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
1766 | if (xp == NULL) | 1770 | if (xp == NULL) |
1767 | return -ENOENT; | 1771 | return -ENOENT; |
1768 | 1772 | ||
1769 | read_lock(&xp->lock); | 1773 | if (unlikely(xp->walk.dead)) |
1770 | if (xp->walk.dead) { | ||
1771 | read_unlock(&xp->lock); | ||
1772 | goto out; | 1774 | goto out; |
1773 | } | ||
1774 | 1775 | ||
1775 | read_unlock(&xp->lock); | ||
1776 | err = 0; | 1776 | err = 0; |
1777 | if (up->hard) { | 1777 | if (up->hard) { |
1778 | uid_t loginuid = NETLINK_CB(skb).loginuid; | 1778 | uid_t loginuid = NETLINK_CB(skb).loginuid; |