diff options
Diffstat (limited to 'net')
228 files changed, 18223 insertions, 3664 deletions
diff --git a/net/802/garp.c b/net/802/garp.c index 9ed7c0e7dc17..941f2a324d3a 100644 --- a/net/802/garp.c +++ b/net/802/garp.c | |||
@@ -576,7 +576,7 @@ int garp_init_applicant(struct net_device *dev, struct garp_application *appl) | |||
576 | if (!app) | 576 | if (!app) |
577 | goto err2; | 577 | goto err2; |
578 | 578 | ||
579 | err = dev_mc_add(dev, appl->proto.group_address, ETH_ALEN, 0); | 579 | err = dev_mc_add(dev, appl->proto.group_address); |
580 | if (err < 0) | 580 | if (err < 0) |
581 | goto err3; | 581 | goto err3; |
582 | 582 | ||
@@ -616,7 +616,7 @@ void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl | |||
616 | garp_pdu_queue(app); | 616 | garp_pdu_queue(app); |
617 | garp_queue_xmit(app); | 617 | garp_queue_xmit(app); |
618 | 618 | ||
619 | dev_mc_delete(dev, appl->proto.group_address, ETH_ALEN, 0); | 619 | dev_mc_del(dev, appl->proto.group_address); |
620 | kfree(app); | 620 | kfree(app); |
621 | garp_release_port(dev); | 621 | garp_release_port(dev); |
622 | } | 622 | } |
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 97da977c2a23..3c1c8c14e929 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c | |||
@@ -357,13 +357,13 @@ static void vlan_sync_address(struct net_device *dev, | |||
357 | * the new address */ | 357 | * the new address */ |
358 | if (compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) && | 358 | if (compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) && |
359 | !compare_ether_addr(vlandev->dev_addr, dev->dev_addr)) | 359 | !compare_ether_addr(vlandev->dev_addr, dev->dev_addr)) |
360 | dev_unicast_delete(dev, vlandev->dev_addr); | 360 | dev_uc_del(dev, vlandev->dev_addr); |
361 | 361 | ||
362 | /* vlan address was equal to the old address and is different from | 362 | /* vlan address was equal to the old address and is different from |
363 | * the new address */ | 363 | * the new address */ |
364 | if (!compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) && | 364 | if (!compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) && |
365 | compare_ether_addr(vlandev->dev_addr, dev->dev_addr)) | 365 | compare_ether_addr(vlandev->dev_addr, dev->dev_addr)) |
366 | dev_unicast_add(dev, vlandev->dev_addr); | 366 | dev_uc_add(dev, vlandev->dev_addr); |
367 | 367 | ||
368 | memcpy(vlan->real_dev_addr, dev->dev_addr, ETH_ALEN); | 368 | memcpy(vlan->real_dev_addr, dev->dev_addr, ETH_ALEN); |
369 | } | 369 | } |
@@ -533,6 +533,10 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, | |||
533 | } | 533 | } |
534 | unregister_netdevice_many(&list); | 534 | unregister_netdevice_many(&list); |
535 | break; | 535 | break; |
536 | |||
537 | case NETDEV_PRE_TYPE_CHANGE: | ||
538 | /* Forbid underlaying device to change its type. */ | ||
539 | return NOTIFY_BAD; | ||
536 | } | 540 | } |
537 | 541 | ||
538 | out: | 542 | out: |
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 29b6348c8d4d..b5249c5fd4d3 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
@@ -470,7 +470,7 @@ static int vlan_dev_open(struct net_device *dev) | |||
470 | return -ENETDOWN; | 470 | return -ENETDOWN; |
471 | 471 | ||
472 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) { | 472 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) { |
473 | err = dev_unicast_add(real_dev, dev->dev_addr); | 473 | err = dev_uc_add(real_dev, dev->dev_addr); |
474 | if (err < 0) | 474 | if (err < 0) |
475 | goto out; | 475 | goto out; |
476 | } | 476 | } |
@@ -499,7 +499,7 @@ clear_allmulti: | |||
499 | dev_set_allmulti(real_dev, -1); | 499 | dev_set_allmulti(real_dev, -1); |
500 | del_unicast: | 500 | del_unicast: |
501 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) | 501 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) |
502 | dev_unicast_delete(real_dev, dev->dev_addr); | 502 | dev_uc_del(real_dev, dev->dev_addr); |
503 | out: | 503 | out: |
504 | netif_carrier_off(dev); | 504 | netif_carrier_off(dev); |
505 | return err; | 505 | return err; |
@@ -514,14 +514,14 @@ static int vlan_dev_stop(struct net_device *dev) | |||
514 | vlan_gvrp_request_leave(dev); | 514 | vlan_gvrp_request_leave(dev); |
515 | 515 | ||
516 | dev_mc_unsync(real_dev, dev); | 516 | dev_mc_unsync(real_dev, dev); |
517 | dev_unicast_unsync(real_dev, dev); | 517 | dev_uc_unsync(real_dev, dev); |
518 | if (dev->flags & IFF_ALLMULTI) | 518 | if (dev->flags & IFF_ALLMULTI) |
519 | dev_set_allmulti(real_dev, -1); | 519 | dev_set_allmulti(real_dev, -1); |
520 | if (dev->flags & IFF_PROMISC) | 520 | if (dev->flags & IFF_PROMISC) |
521 | dev_set_promiscuity(real_dev, -1); | 521 | dev_set_promiscuity(real_dev, -1); |
522 | 522 | ||
523 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) | 523 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) |
524 | dev_unicast_delete(real_dev, dev->dev_addr); | 524 | dev_uc_del(real_dev, dev->dev_addr); |
525 | 525 | ||
526 | netif_carrier_off(dev); | 526 | netif_carrier_off(dev); |
527 | return 0; | 527 | return 0; |
@@ -540,13 +540,13 @@ static int vlan_dev_set_mac_address(struct net_device *dev, void *p) | |||
540 | goto out; | 540 | goto out; |
541 | 541 | ||
542 | if (compare_ether_addr(addr->sa_data, real_dev->dev_addr)) { | 542 | if (compare_ether_addr(addr->sa_data, real_dev->dev_addr)) { |
543 | err = dev_unicast_add(real_dev, addr->sa_data); | 543 | err = dev_uc_add(real_dev, addr->sa_data); |
544 | if (err < 0) | 544 | if (err < 0) |
545 | return err; | 545 | return err; |
546 | } | 546 | } |
547 | 547 | ||
548 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) | 548 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) |
549 | dev_unicast_delete(real_dev, dev->dev_addr); | 549 | dev_uc_del(real_dev, dev->dev_addr); |
550 | 550 | ||
551 | out: | 551 | out: |
552 | memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); | 552 | memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); |
@@ -663,7 +663,7 @@ static void vlan_dev_change_rx_flags(struct net_device *dev, int change) | |||
663 | static void vlan_dev_set_rx_mode(struct net_device *vlan_dev) | 663 | static void vlan_dev_set_rx_mode(struct net_device *vlan_dev) |
664 | { | 664 | { |
665 | dev_mc_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev); | 665 | dev_mc_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev); |
666 | dev_unicast_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev); | 666 | dev_uc_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev); |
667 | } | 667 | } |
668 | 668 | ||
669 | /* | 669 | /* |
diff --git a/net/Kconfig b/net/Kconfig index 041c35edb763..0d68b40fc0e6 100644 --- a/net/Kconfig +++ b/net/Kconfig | |||
@@ -186,6 +186,7 @@ source "net/sctp/Kconfig" | |||
186 | source "net/rds/Kconfig" | 186 | source "net/rds/Kconfig" |
187 | source "net/tipc/Kconfig" | 187 | source "net/tipc/Kconfig" |
188 | source "net/atm/Kconfig" | 188 | source "net/atm/Kconfig" |
189 | source "net/l2tp/Kconfig" | ||
189 | source "net/802/Kconfig" | 190 | source "net/802/Kconfig" |
190 | source "net/bridge/Kconfig" | 191 | source "net/bridge/Kconfig" |
191 | source "net/dsa/Kconfig" | 192 | source "net/dsa/Kconfig" |
@@ -203,6 +204,11 @@ source "net/ieee802154/Kconfig" | |||
203 | source "net/sched/Kconfig" | 204 | source "net/sched/Kconfig" |
204 | source "net/dcb/Kconfig" | 205 | source "net/dcb/Kconfig" |
205 | 206 | ||
207 | config RPS | ||
208 | boolean | ||
209 | depends on SMP && SYSFS | ||
210 | default y | ||
211 | |||
206 | menu "Network testing" | 212 | menu "Network testing" |
207 | 213 | ||
208 | config NET_PKTGEN | 214 | config NET_PKTGEN |
@@ -275,5 +281,7 @@ source "net/wimax/Kconfig" | |||
275 | 281 | ||
276 | source "net/rfkill/Kconfig" | 282 | source "net/rfkill/Kconfig" |
277 | source "net/9p/Kconfig" | 283 | source "net/9p/Kconfig" |
284 | source "net/caif/Kconfig" | ||
285 | |||
278 | 286 | ||
279 | endif # if NET | 287 | endif # if NET |
diff --git a/net/Makefile b/net/Makefile index 1542e7268a7b..cb7bdc1210cb 100644 --- a/net/Makefile +++ b/net/Makefile | |||
@@ -40,6 +40,7 @@ obj-$(CONFIG_BT) += bluetooth/ | |||
40 | obj-$(CONFIG_SUNRPC) += sunrpc/ | 40 | obj-$(CONFIG_SUNRPC) += sunrpc/ |
41 | obj-$(CONFIG_AF_RXRPC) += rxrpc/ | 41 | obj-$(CONFIG_AF_RXRPC) += rxrpc/ |
42 | obj-$(CONFIG_ATM) += atm/ | 42 | obj-$(CONFIG_ATM) += atm/ |
43 | obj-$(CONFIG_L2TP) += l2tp/ | ||
43 | obj-$(CONFIG_DECNET) += decnet/ | 44 | obj-$(CONFIG_DECNET) += decnet/ |
44 | obj-$(CONFIG_ECONET) += econet/ | 45 | obj-$(CONFIG_ECONET) += econet/ |
45 | obj-$(CONFIG_PHONET) += phonet/ | 46 | obj-$(CONFIG_PHONET) += phonet/ |
@@ -56,6 +57,7 @@ obj-$(CONFIG_NETLABEL) += netlabel/ | |||
56 | obj-$(CONFIG_IUCV) += iucv/ | 57 | obj-$(CONFIG_IUCV) += iucv/ |
57 | obj-$(CONFIG_RFKILL) += rfkill/ | 58 | obj-$(CONFIG_RFKILL) += rfkill/ |
58 | obj-$(CONFIG_NET_9P) += 9p/ | 59 | obj-$(CONFIG_NET_9P) += 9p/ |
60 | obj-$(CONFIG_CAIF) += caif/ | ||
59 | ifneq ($(CONFIG_DCB),) | 61 | ifneq ($(CONFIG_DCB),) |
60 | obj-y += dcb/ | 62 | obj-y += dcb/ |
61 | endif | 63 | endif |
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index 7b02967fbbe7..c410b93fda2e 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c | |||
@@ -782,7 +782,7 @@ static int atif_ioctl(int cmd, void __user *arg) | |||
782 | atrtr_create(&rtdef, dev); | 782 | atrtr_create(&rtdef, dev); |
783 | } | 783 | } |
784 | } | 784 | } |
785 | dev_mc_add(dev, aarp_mcast, 6, 1); | 785 | dev_mc_add_global(dev, aarp_mcast); |
786 | return 0; | 786 | return 0; |
787 | 787 | ||
788 | case SIOCGIFADDR: | 788 | case SIOCGIFADDR: |
diff --git a/net/atm/common.c b/net/atm/common.c index 97ed94aa0cbc..e3e10e6f8628 100644 --- a/net/atm/common.c +++ b/net/atm/common.c | |||
@@ -92,7 +92,7 @@ static void vcc_def_wakeup(struct sock *sk) | |||
92 | { | 92 | { |
93 | read_lock(&sk->sk_callback_lock); | 93 | read_lock(&sk->sk_callback_lock); |
94 | if (sk_has_sleeper(sk)) | 94 | if (sk_has_sleeper(sk)) |
95 | wake_up(sk->sk_sleep); | 95 | wake_up(sk_sleep(sk)); |
96 | read_unlock(&sk->sk_callback_lock); | 96 | read_unlock(&sk->sk_callback_lock); |
97 | } | 97 | } |
98 | 98 | ||
@@ -110,7 +110,7 @@ static void vcc_write_space(struct sock *sk) | |||
110 | 110 | ||
111 | if (vcc_writable(sk)) { | 111 | if (vcc_writable(sk)) { |
112 | if (sk_has_sleeper(sk)) | 112 | if (sk_has_sleeper(sk)) |
113 | wake_up_interruptible(sk->sk_sleep); | 113 | wake_up_interruptible(sk_sleep(sk)); |
114 | 114 | ||
115 | sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); | 115 | sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); |
116 | } | 116 | } |
@@ -549,7 +549,7 @@ int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, | |||
549 | } | 549 | } |
550 | 550 | ||
551 | eff = (size+3) & ~3; /* align to word boundary */ | 551 | eff = (size+3) & ~3; /* align to word boundary */ |
552 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 552 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
553 | error = 0; | 553 | error = 0; |
554 | while (!(skb = alloc_tx(vcc, eff))) { | 554 | while (!(skb = alloc_tx(vcc, eff))) { |
555 | if (m->msg_flags & MSG_DONTWAIT) { | 555 | if (m->msg_flags & MSG_DONTWAIT) { |
@@ -568,9 +568,9 @@ int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, | |||
568 | send_sig(SIGPIPE, current, 0); | 568 | send_sig(SIGPIPE, current, 0); |
569 | break; | 569 | break; |
570 | } | 570 | } |
571 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 571 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
572 | } | 572 | } |
573 | finish_wait(sk->sk_sleep, &wait); | 573 | finish_wait(sk_sleep(sk), &wait); |
574 | if (error) | 574 | if (error) |
575 | goto out; | 575 | goto out; |
576 | skb->dev = NULL; /* for paths shared with net_device interfaces */ | 576 | skb->dev = NULL; /* for paths shared with net_device interfaces */ |
@@ -595,7 +595,7 @@ unsigned int vcc_poll(struct file *file, struct socket *sock, poll_table *wait) | |||
595 | struct atm_vcc *vcc; | 595 | struct atm_vcc *vcc; |
596 | unsigned int mask; | 596 | unsigned int mask; |
597 | 597 | ||
598 | sock_poll_wait(file, sk->sk_sleep, wait); | 598 | sock_poll_wait(file, sk_sleep(sk), wait); |
599 | mask = 0; | 599 | mask = 0; |
600 | 600 | ||
601 | vcc = ATM_SD(sock); | 601 | vcc = ATM_SD(sock); |
diff --git a/net/atm/proc.c b/net/atm/proc.c index 696e218436e5..6262aeae398e 100644 --- a/net/atm/proc.c +++ b/net/atm/proc.c | |||
@@ -407,7 +407,6 @@ EXPORT_SYMBOL(atm_proc_root); | |||
407 | 407 | ||
408 | int atm_proc_dev_register(struct atm_dev *dev) | 408 | int atm_proc_dev_register(struct atm_dev *dev) |
409 | { | 409 | { |
410 | int digits, num; | ||
411 | int error; | 410 | int error; |
412 | 411 | ||
413 | /* No proc info */ | 412 | /* No proc info */ |
@@ -415,16 +414,9 @@ int atm_proc_dev_register(struct atm_dev *dev) | |||
415 | return 0; | 414 | return 0; |
416 | 415 | ||
417 | error = -ENOMEM; | 416 | error = -ENOMEM; |
418 | digits = 0; | 417 | dev->proc_name = kasprintf(GFP_KERNEL, "%s:%d", dev->type, dev->number); |
419 | for (num = dev->number; num; num /= 10) | ||
420 | digits++; | ||
421 | if (!digits) | ||
422 | digits++; | ||
423 | |||
424 | dev->proc_name = kmalloc(strlen(dev->type) + digits + 2, GFP_KERNEL); | ||
425 | if (!dev->proc_name) | 418 | if (!dev->proc_name) |
426 | goto err_out; | 419 | goto err_out; |
427 | sprintf(dev->proc_name, "%s:%d", dev->type, dev->number); | ||
428 | 420 | ||
429 | dev->proc_entry = proc_create_data(dev->proc_name, 0, atm_proc_root, | 421 | dev->proc_entry = proc_create_data(dev->proc_name, 0, atm_proc_root, |
430 | &proc_atm_dev_ops, dev); | 422 | &proc_atm_dev_ops, dev); |
diff --git a/net/atm/signaling.c b/net/atm/signaling.c index 6ba6e466ee54..509c8ac02b63 100644 --- a/net/atm/signaling.c +++ b/net/atm/signaling.c | |||
@@ -131,7 +131,7 @@ static int sigd_send(struct atm_vcc *vcc, struct sk_buff *skb) | |||
131 | } | 131 | } |
132 | sk->sk_ack_backlog++; | 132 | sk->sk_ack_backlog++; |
133 | skb_queue_tail(&sk->sk_receive_queue, skb); | 133 | skb_queue_tail(&sk->sk_receive_queue, skb); |
134 | pr_debug("waking sk->sk_sleep 0x%p\n", sk->sk_sleep); | 134 | pr_debug("waking sk_sleep(sk) 0x%p\n", sk_sleep(sk)); |
135 | sk->sk_state_change(sk); | 135 | sk->sk_state_change(sk); |
136 | as_indicate_complete: | 136 | as_indicate_complete: |
137 | release_sock(sk); | 137 | release_sock(sk); |
diff --git a/net/atm/svc.c b/net/atm/svc.c index 3ba9a45a51ac..754ee4791d96 100644 --- a/net/atm/svc.c +++ b/net/atm/svc.c | |||
@@ -49,14 +49,14 @@ static void svc_disconnect(struct atm_vcc *vcc) | |||
49 | 49 | ||
50 | pr_debug("%p\n", vcc); | 50 | pr_debug("%p\n", vcc); |
51 | if (test_bit(ATM_VF_REGIS, &vcc->flags)) { | 51 | if (test_bit(ATM_VF_REGIS, &vcc->flags)) { |
52 | prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); | 52 | prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); |
53 | sigd_enq(vcc, as_close, NULL, NULL, NULL); | 53 | sigd_enq(vcc, as_close, NULL, NULL, NULL); |
54 | while (!test_bit(ATM_VF_RELEASED, &vcc->flags) && sigd) { | 54 | while (!test_bit(ATM_VF_RELEASED, &vcc->flags) && sigd) { |
55 | schedule(); | 55 | schedule(); |
56 | prepare_to_wait(sk->sk_sleep, &wait, | 56 | prepare_to_wait(sk_sleep(sk), &wait, |
57 | TASK_UNINTERRUPTIBLE); | 57 | TASK_UNINTERRUPTIBLE); |
58 | } | 58 | } |
59 | finish_wait(sk->sk_sleep, &wait); | 59 | finish_wait(sk_sleep(sk), &wait); |
60 | } | 60 | } |
61 | /* beware - socket is still in use by atmsigd until the last | 61 | /* beware - socket is still in use by atmsigd until the last |
62 | as_indicate has been answered */ | 62 | as_indicate has been answered */ |
@@ -125,13 +125,13 @@ static int svc_bind(struct socket *sock, struct sockaddr *sockaddr, | |||
125 | } | 125 | } |
126 | vcc->local = *addr; | 126 | vcc->local = *addr; |
127 | set_bit(ATM_VF_WAITING, &vcc->flags); | 127 | set_bit(ATM_VF_WAITING, &vcc->flags); |
128 | prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); | 128 | prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); |
129 | sigd_enq(vcc, as_bind, NULL, NULL, &vcc->local); | 129 | sigd_enq(vcc, as_bind, NULL, NULL, &vcc->local); |
130 | while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { | 130 | while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { |
131 | schedule(); | 131 | schedule(); |
132 | prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); | 132 | prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); |
133 | } | 133 | } |
134 | finish_wait(sk->sk_sleep, &wait); | 134 | finish_wait(sk_sleep(sk), &wait); |
135 | clear_bit(ATM_VF_REGIS, &vcc->flags); /* doesn't count */ | 135 | clear_bit(ATM_VF_REGIS, &vcc->flags); /* doesn't count */ |
136 | if (!sigd) { | 136 | if (!sigd) { |
137 | error = -EUNATCH; | 137 | error = -EUNATCH; |
@@ -201,10 +201,10 @@ static int svc_connect(struct socket *sock, struct sockaddr *sockaddr, | |||
201 | } | 201 | } |
202 | vcc->remote = *addr; | 202 | vcc->remote = *addr; |
203 | set_bit(ATM_VF_WAITING, &vcc->flags); | 203 | set_bit(ATM_VF_WAITING, &vcc->flags); |
204 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 204 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
205 | sigd_enq(vcc, as_connect, NULL, NULL, &vcc->remote); | 205 | sigd_enq(vcc, as_connect, NULL, NULL, &vcc->remote); |
206 | if (flags & O_NONBLOCK) { | 206 | if (flags & O_NONBLOCK) { |
207 | finish_wait(sk->sk_sleep, &wait); | 207 | finish_wait(sk_sleep(sk), &wait); |
208 | sock->state = SS_CONNECTING; | 208 | sock->state = SS_CONNECTING; |
209 | error = -EINPROGRESS; | 209 | error = -EINPROGRESS; |
210 | goto out; | 210 | goto out; |
@@ -213,7 +213,7 @@ static int svc_connect(struct socket *sock, struct sockaddr *sockaddr, | |||
213 | while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { | 213 | while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { |
214 | schedule(); | 214 | schedule(); |
215 | if (!signal_pending(current)) { | 215 | if (!signal_pending(current)) { |
216 | prepare_to_wait(sk->sk_sleep, &wait, | 216 | prepare_to_wait(sk_sleep(sk), &wait, |
217 | TASK_INTERRUPTIBLE); | 217 | TASK_INTERRUPTIBLE); |
218 | continue; | 218 | continue; |
219 | } | 219 | } |
@@ -232,14 +232,14 @@ static int svc_connect(struct socket *sock, struct sockaddr *sockaddr, | |||
232 | */ | 232 | */ |
233 | sigd_enq(vcc, as_close, NULL, NULL, NULL); | 233 | sigd_enq(vcc, as_close, NULL, NULL, NULL); |
234 | while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { | 234 | while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { |
235 | prepare_to_wait(sk->sk_sleep, &wait, | 235 | prepare_to_wait(sk_sleep(sk), &wait, |
236 | TASK_INTERRUPTIBLE); | 236 | TASK_INTERRUPTIBLE); |
237 | schedule(); | 237 | schedule(); |
238 | } | 238 | } |
239 | if (!sk->sk_err) | 239 | if (!sk->sk_err) |
240 | while (!test_bit(ATM_VF_RELEASED, &vcc->flags) && | 240 | while (!test_bit(ATM_VF_RELEASED, &vcc->flags) && |
241 | sigd) { | 241 | sigd) { |
242 | prepare_to_wait(sk->sk_sleep, &wait, | 242 | prepare_to_wait(sk_sleep(sk), &wait, |
243 | TASK_INTERRUPTIBLE); | 243 | TASK_INTERRUPTIBLE); |
244 | schedule(); | 244 | schedule(); |
245 | } | 245 | } |
@@ -250,7 +250,7 @@ static int svc_connect(struct socket *sock, struct sockaddr *sockaddr, | |||
250 | error = -EINTR; | 250 | error = -EINTR; |
251 | break; | 251 | break; |
252 | } | 252 | } |
253 | finish_wait(sk->sk_sleep, &wait); | 253 | finish_wait(sk_sleep(sk), &wait); |
254 | if (error) | 254 | if (error) |
255 | goto out; | 255 | goto out; |
256 | if (!sigd) { | 256 | if (!sigd) { |
@@ -302,13 +302,13 @@ static int svc_listen(struct socket *sock, int backlog) | |||
302 | goto out; | 302 | goto out; |
303 | } | 303 | } |
304 | set_bit(ATM_VF_WAITING, &vcc->flags); | 304 | set_bit(ATM_VF_WAITING, &vcc->flags); |
305 | prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); | 305 | prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); |
306 | sigd_enq(vcc, as_listen, NULL, NULL, &vcc->local); | 306 | sigd_enq(vcc, as_listen, NULL, NULL, &vcc->local); |
307 | while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { | 307 | while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { |
308 | schedule(); | 308 | schedule(); |
309 | prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); | 309 | prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); |
310 | } | 310 | } |
311 | finish_wait(sk->sk_sleep, &wait); | 311 | finish_wait(sk_sleep(sk), &wait); |
312 | if (!sigd) { | 312 | if (!sigd) { |
313 | error = -EUNATCH; | 313 | error = -EUNATCH; |
314 | goto out; | 314 | goto out; |
@@ -343,7 +343,7 @@ static int svc_accept(struct socket *sock, struct socket *newsock, int flags) | |||
343 | while (1) { | 343 | while (1) { |
344 | DEFINE_WAIT(wait); | 344 | DEFINE_WAIT(wait); |
345 | 345 | ||
346 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 346 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
347 | while (!(skb = skb_dequeue(&sk->sk_receive_queue)) && | 347 | while (!(skb = skb_dequeue(&sk->sk_receive_queue)) && |
348 | sigd) { | 348 | sigd) { |
349 | if (test_bit(ATM_VF_RELEASED, &old_vcc->flags)) | 349 | if (test_bit(ATM_VF_RELEASED, &old_vcc->flags)) |
@@ -363,10 +363,10 @@ static int svc_accept(struct socket *sock, struct socket *newsock, int flags) | |||
363 | error = -ERESTARTSYS; | 363 | error = -ERESTARTSYS; |
364 | break; | 364 | break; |
365 | } | 365 | } |
366 | prepare_to_wait(sk->sk_sleep, &wait, | 366 | prepare_to_wait(sk_sleep(sk), &wait, |
367 | TASK_INTERRUPTIBLE); | 367 | TASK_INTERRUPTIBLE); |
368 | } | 368 | } |
369 | finish_wait(sk->sk_sleep, &wait); | 369 | finish_wait(sk_sleep(sk), &wait); |
370 | if (error) | 370 | if (error) |
371 | goto out; | 371 | goto out; |
372 | if (!skb) { | 372 | if (!skb) { |
@@ -392,17 +392,17 @@ static int svc_accept(struct socket *sock, struct socket *newsock, int flags) | |||
392 | } | 392 | } |
393 | /* wait should be short, so we ignore the non-blocking flag */ | 393 | /* wait should be short, so we ignore the non-blocking flag */ |
394 | set_bit(ATM_VF_WAITING, &new_vcc->flags); | 394 | set_bit(ATM_VF_WAITING, &new_vcc->flags); |
395 | prepare_to_wait(sk_atm(new_vcc)->sk_sleep, &wait, | 395 | prepare_to_wait(sk_sleep(sk_atm(new_vcc)), &wait, |
396 | TASK_UNINTERRUPTIBLE); | 396 | TASK_UNINTERRUPTIBLE); |
397 | sigd_enq(new_vcc, as_accept, old_vcc, NULL, NULL); | 397 | sigd_enq(new_vcc, as_accept, old_vcc, NULL, NULL); |
398 | while (test_bit(ATM_VF_WAITING, &new_vcc->flags) && sigd) { | 398 | while (test_bit(ATM_VF_WAITING, &new_vcc->flags) && sigd) { |
399 | release_sock(sk); | 399 | release_sock(sk); |
400 | schedule(); | 400 | schedule(); |
401 | lock_sock(sk); | 401 | lock_sock(sk); |
402 | prepare_to_wait(sk_atm(new_vcc)->sk_sleep, &wait, | 402 | prepare_to_wait(sk_sleep(sk_atm(new_vcc)), &wait, |
403 | TASK_UNINTERRUPTIBLE); | 403 | TASK_UNINTERRUPTIBLE); |
404 | } | 404 | } |
405 | finish_wait(sk_atm(new_vcc)->sk_sleep, &wait); | 405 | finish_wait(sk_sleep(sk_atm(new_vcc)), &wait); |
406 | if (!sigd) { | 406 | if (!sigd) { |
407 | error = -EUNATCH; | 407 | error = -EUNATCH; |
408 | goto out; | 408 | goto out; |
@@ -438,14 +438,14 @@ int svc_change_qos(struct atm_vcc *vcc, struct atm_qos *qos) | |||
438 | DEFINE_WAIT(wait); | 438 | DEFINE_WAIT(wait); |
439 | 439 | ||
440 | set_bit(ATM_VF_WAITING, &vcc->flags); | 440 | set_bit(ATM_VF_WAITING, &vcc->flags); |
441 | prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); | 441 | prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); |
442 | sigd_enq2(vcc, as_modify, NULL, NULL, &vcc->local, qos, 0); | 442 | sigd_enq2(vcc, as_modify, NULL, NULL, &vcc->local, qos, 0); |
443 | while (test_bit(ATM_VF_WAITING, &vcc->flags) && | 443 | while (test_bit(ATM_VF_WAITING, &vcc->flags) && |
444 | !test_bit(ATM_VF_RELEASED, &vcc->flags) && sigd) { | 444 | !test_bit(ATM_VF_RELEASED, &vcc->flags) && sigd) { |
445 | schedule(); | 445 | schedule(); |
446 | prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); | 446 | prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); |
447 | } | 447 | } |
448 | finish_wait(sk->sk_sleep, &wait); | 448 | finish_wait(sk_sleep(sk), &wait); |
449 | if (!sigd) | 449 | if (!sigd) |
450 | return -EUNATCH; | 450 | return -EUNATCH; |
451 | return -sk->sk_err; | 451 | return -sk->sk_err; |
@@ -534,20 +534,20 @@ static int svc_addparty(struct socket *sock, struct sockaddr *sockaddr, | |||
534 | 534 | ||
535 | lock_sock(sk); | 535 | lock_sock(sk); |
536 | set_bit(ATM_VF_WAITING, &vcc->flags); | 536 | set_bit(ATM_VF_WAITING, &vcc->flags); |
537 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 537 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
538 | sigd_enq(vcc, as_addparty, NULL, NULL, | 538 | sigd_enq(vcc, as_addparty, NULL, NULL, |
539 | (struct sockaddr_atmsvc *) sockaddr); | 539 | (struct sockaddr_atmsvc *) sockaddr); |
540 | if (flags & O_NONBLOCK) { | 540 | if (flags & O_NONBLOCK) { |
541 | finish_wait(sk->sk_sleep, &wait); | 541 | finish_wait(sk_sleep(sk), &wait); |
542 | error = -EINPROGRESS; | 542 | error = -EINPROGRESS; |
543 | goto out; | 543 | goto out; |
544 | } | 544 | } |
545 | pr_debug("added wait queue\n"); | 545 | pr_debug("added wait queue\n"); |
546 | while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { | 546 | while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { |
547 | schedule(); | 547 | schedule(); |
548 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 548 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
549 | } | 549 | } |
550 | finish_wait(sk->sk_sleep, &wait); | 550 | finish_wait(sk_sleep(sk), &wait); |
551 | error = xchg(&sk->sk_err_soft, 0); | 551 | error = xchg(&sk->sk_err_soft, 0); |
552 | out: | 552 | out: |
553 | release_sock(sk); | 553 | release_sock(sk); |
@@ -563,13 +563,13 @@ static int svc_dropparty(struct socket *sock, int ep_ref) | |||
563 | 563 | ||
564 | lock_sock(sk); | 564 | lock_sock(sk); |
565 | set_bit(ATM_VF_WAITING, &vcc->flags); | 565 | set_bit(ATM_VF_WAITING, &vcc->flags); |
566 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 566 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
567 | sigd_enq2(vcc, as_dropparty, NULL, NULL, NULL, NULL, ep_ref); | 567 | sigd_enq2(vcc, as_dropparty, NULL, NULL, NULL, NULL, ep_ref); |
568 | while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { | 568 | while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { |
569 | schedule(); | 569 | schedule(); |
570 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 570 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
571 | } | 571 | } |
572 | finish_wait(sk->sk_sleep, &wait); | 572 | finish_wait(sk_sleep(sk), &wait); |
573 | if (!sigd) { | 573 | if (!sigd) { |
574 | error = -EUNATCH; | 574 | error = -EUNATCH; |
575 | goto out; | 575 | goto out; |
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 65c5801261f9..cfdfd7e2a172 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c | |||
@@ -1281,7 +1281,7 @@ static int __must_check ax25_connect(struct socket *sock, | |||
1281 | DEFINE_WAIT(wait); | 1281 | DEFINE_WAIT(wait); |
1282 | 1282 | ||
1283 | for (;;) { | 1283 | for (;;) { |
1284 | prepare_to_wait(sk->sk_sleep, &wait, | 1284 | prepare_to_wait(sk_sleep(sk), &wait, |
1285 | TASK_INTERRUPTIBLE); | 1285 | TASK_INTERRUPTIBLE); |
1286 | if (sk->sk_state != TCP_SYN_SENT) | 1286 | if (sk->sk_state != TCP_SYN_SENT) |
1287 | break; | 1287 | break; |
@@ -1294,7 +1294,7 @@ static int __must_check ax25_connect(struct socket *sock, | |||
1294 | err = -ERESTARTSYS; | 1294 | err = -ERESTARTSYS; |
1295 | break; | 1295 | break; |
1296 | } | 1296 | } |
1297 | finish_wait(sk->sk_sleep, &wait); | 1297 | finish_wait(sk_sleep(sk), &wait); |
1298 | 1298 | ||
1299 | if (err) | 1299 | if (err) |
1300 | goto out_release; | 1300 | goto out_release; |
@@ -1346,7 +1346,7 @@ static int ax25_accept(struct socket *sock, struct socket *newsock, int flags) | |||
1346 | * hooked into the SABM we saved | 1346 | * hooked into the SABM we saved |
1347 | */ | 1347 | */ |
1348 | for (;;) { | 1348 | for (;;) { |
1349 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 1349 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
1350 | skb = skb_dequeue(&sk->sk_receive_queue); | 1350 | skb = skb_dequeue(&sk->sk_receive_queue); |
1351 | if (skb) | 1351 | if (skb) |
1352 | break; | 1352 | break; |
@@ -1364,7 +1364,7 @@ static int ax25_accept(struct socket *sock, struct socket *newsock, int flags) | |||
1364 | err = -ERESTARTSYS; | 1364 | err = -ERESTARTSYS; |
1365 | break; | 1365 | break; |
1366 | } | 1366 | } |
1367 | finish_wait(sk->sk_sleep, &wait); | 1367 | finish_wait(sk_sleep(sk), &wait); |
1368 | 1368 | ||
1369 | if (err) | 1369 | if (err) |
1370 | goto out; | 1370 | goto out; |
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index 404a8500fd03..421c45bd1b95 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c | |||
@@ -288,7 +288,7 @@ unsigned int bt_sock_poll(struct file * file, struct socket *sock, poll_table *w | |||
288 | 288 | ||
289 | BT_DBG("sock %p, sk %p", sock, sk); | 289 | BT_DBG("sock %p, sk %p", sock, sk); |
290 | 290 | ||
291 | poll_wait(file, sk->sk_sleep, wait); | 291 | poll_wait(file, sk_sleep(sk), wait); |
292 | 292 | ||
293 | if (sk->sk_state == BT_LISTEN) | 293 | if (sk->sk_state == BT_LISTEN) |
294 | return bt_accept_poll(sk); | 294 | return bt_accept_poll(sk); |
@@ -378,7 +378,7 @@ int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo) | |||
378 | 378 | ||
379 | BT_DBG("sk %p", sk); | 379 | BT_DBG("sk %p", sk); |
380 | 380 | ||
381 | add_wait_queue(sk->sk_sleep, &wait); | 381 | add_wait_queue(sk_sleep(sk), &wait); |
382 | while (sk->sk_state != state) { | 382 | while (sk->sk_state != state) { |
383 | set_current_state(TASK_INTERRUPTIBLE); | 383 | set_current_state(TASK_INTERRUPTIBLE); |
384 | 384 | ||
@@ -401,7 +401,7 @@ int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo) | |||
401 | break; | 401 | break; |
402 | } | 402 | } |
403 | set_current_state(TASK_RUNNING); | 403 | set_current_state(TASK_RUNNING); |
404 | remove_wait_queue(sk->sk_sleep, &wait); | 404 | remove_wait_queue(sk_sleep(sk), &wait); |
405 | return err; | 405 | return err; |
406 | } | 406 | } |
407 | EXPORT_SYMBOL(bt_sock_wait_state); | 407 | EXPORT_SYMBOL(bt_sock_wait_state); |
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c index 8062dad6d10d..f10b41fb05a0 100644 --- a/net/bluetooth/bnep/core.c +++ b/net/bluetooth/bnep/core.c | |||
@@ -474,7 +474,7 @@ static int bnep_session(void *arg) | |||
474 | set_user_nice(current, -15); | 474 | set_user_nice(current, -15); |
475 | 475 | ||
476 | init_waitqueue_entry(&wait, current); | 476 | init_waitqueue_entry(&wait, current); |
477 | add_wait_queue(sk->sk_sleep, &wait); | 477 | add_wait_queue(sk_sleep(sk), &wait); |
478 | while (!atomic_read(&s->killed)) { | 478 | while (!atomic_read(&s->killed)) { |
479 | set_current_state(TASK_INTERRUPTIBLE); | 479 | set_current_state(TASK_INTERRUPTIBLE); |
480 | 480 | ||
@@ -496,7 +496,7 @@ static int bnep_session(void *arg) | |||
496 | schedule(); | 496 | schedule(); |
497 | } | 497 | } |
498 | set_current_state(TASK_RUNNING); | 498 | set_current_state(TASK_RUNNING); |
499 | remove_wait_queue(sk->sk_sleep, &wait); | 499 | remove_wait_queue(sk_sleep(sk), &wait); |
500 | 500 | ||
501 | /* Cleanup session */ | 501 | /* Cleanup session */ |
502 | down_write(&bnep_session_sem); | 502 | down_write(&bnep_session_sem); |
@@ -507,7 +507,7 @@ static int bnep_session(void *arg) | |||
507 | /* Wakeup user-space polling for socket errors */ | 507 | /* Wakeup user-space polling for socket errors */ |
508 | s->sock->sk->sk_err = EUNATCH; | 508 | s->sock->sk->sk_err = EUNATCH; |
509 | 509 | ||
510 | wake_up_interruptible(s->sock->sk->sk_sleep); | 510 | wake_up_interruptible(sk_sleep(s->sock->sk)); |
511 | 511 | ||
512 | /* Release the socket */ | 512 | /* Release the socket */ |
513 | fput(s->sock->file); | 513 | fput(s->sock->file); |
@@ -638,7 +638,7 @@ int bnep_del_connection(struct bnep_conndel_req *req) | |||
638 | 638 | ||
639 | /* Kill session thread */ | 639 | /* Kill session thread */ |
640 | atomic_inc(&s->killed); | 640 | atomic_inc(&s->killed); |
641 | wake_up_interruptible(s->sock->sk->sk_sleep); | 641 | wake_up_interruptible(sk_sleep(s->sock->sk)); |
642 | } else | 642 | } else |
643 | err = -ENOENT; | 643 | err = -ENOENT; |
644 | 644 | ||
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c index 5643a2391e76..0faad5ce6dc4 100644 --- a/net/bluetooth/bnep/netdev.c +++ b/net/bluetooth/bnep/netdev.c | |||
@@ -88,7 +88,7 @@ static void bnep_net_set_mc_list(struct net_device *dev) | |||
88 | memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN); | 88 | memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN); |
89 | r->len = htons(ETH_ALEN * 2); | 89 | r->len = htons(ETH_ALEN * 2); |
90 | } else { | 90 | } else { |
91 | struct dev_mc_list *dmi = dev->mc_list; | 91 | struct netdev_hw_addr *ha; |
92 | int i, len = skb->len; | 92 | int i, len = skb->len; |
93 | 93 | ||
94 | if (dev->flags & IFF_BROADCAST) { | 94 | if (dev->flags & IFF_BROADCAST) { |
@@ -98,18 +98,18 @@ static void bnep_net_set_mc_list(struct net_device *dev) | |||
98 | 98 | ||
99 | /* FIXME: We should group addresses here. */ | 99 | /* FIXME: We should group addresses here. */ |
100 | 100 | ||
101 | for (i = 0; | 101 | i = 0; |
102 | i < netdev_mc_count(dev) && i < BNEP_MAX_MULTICAST_FILTERS; | 102 | netdev_for_each_mc_addr(ha, dev) { |
103 | i++) { | 103 | if (i == BNEP_MAX_MULTICAST_FILTERS) |
104 | memcpy(__skb_put(skb, ETH_ALEN), dmi->dmi_addr, ETH_ALEN); | 104 | break; |
105 | memcpy(__skb_put(skb, ETH_ALEN), dmi->dmi_addr, ETH_ALEN); | 105 | memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN); |
106 | dmi = dmi->next; | 106 | memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN); |
107 | } | 107 | } |
108 | r->len = htons(skb->len - len); | 108 | r->len = htons(skb->len - len); |
109 | } | 109 | } |
110 | 110 | ||
111 | skb_queue_tail(&sk->sk_write_queue, skb); | 111 | skb_queue_tail(&sk->sk_write_queue, skb); |
112 | wake_up_interruptible(sk->sk_sleep); | 112 | wake_up_interruptible(sk_sleep(sk)); |
113 | #endif | 113 | #endif |
114 | } | 114 | } |
115 | 115 | ||
@@ -193,11 +193,11 @@ static netdev_tx_t bnep_net_xmit(struct sk_buff *skb, | |||
193 | /* | 193 | /* |
194 | * We cannot send L2CAP packets from here as we are potentially in a bh. | 194 | * We cannot send L2CAP packets from here as we are potentially in a bh. |
195 | * So we have to queue them and wake up session thread which is sleeping | 195 | * So we have to queue them and wake up session thread which is sleeping |
196 | * on the sk->sk_sleep. | 196 | * on the sk_sleep(sk). |
197 | */ | 197 | */ |
198 | dev->trans_start = jiffies; | 198 | dev->trans_start = jiffies; |
199 | skb_queue_tail(&sk->sk_write_queue, skb); | 199 | skb_queue_tail(&sk->sk_write_queue, skb); |
200 | wake_up_interruptible(sk->sk_sleep); | 200 | wake_up_interruptible(sk_sleep(sk)); |
201 | 201 | ||
202 | if (skb_queue_len(&sk->sk_write_queue) >= BNEP_TX_QUEUE_LEN) { | 202 | if (skb_queue_len(&sk->sk_write_queue) >= BNEP_TX_QUEUE_LEN) { |
203 | BT_DBG("tx queue is full"); | 203 | BT_DBG("tx queue is full"); |
diff --git a/net/bluetooth/cmtp/cmtp.h b/net/bluetooth/cmtp/cmtp.h index e4663aa14d26..785e79e953c5 100644 --- a/net/bluetooth/cmtp/cmtp.h +++ b/net/bluetooth/cmtp/cmtp.h | |||
@@ -125,7 +125,7 @@ static inline void cmtp_schedule(struct cmtp_session *session) | |||
125 | { | 125 | { |
126 | struct sock *sk = session->sock->sk; | 126 | struct sock *sk = session->sock->sk; |
127 | 127 | ||
128 | wake_up_interruptible(sk->sk_sleep); | 128 | wake_up_interruptible(sk_sleep(sk)); |
129 | } | 129 | } |
130 | 130 | ||
131 | /* CMTP init defines */ | 131 | /* CMTP init defines */ |
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c index 0073ec8495da..d4c6af082d48 100644 --- a/net/bluetooth/cmtp/core.c +++ b/net/bluetooth/cmtp/core.c | |||
@@ -284,7 +284,7 @@ static int cmtp_session(void *arg) | |||
284 | set_user_nice(current, -15); | 284 | set_user_nice(current, -15); |
285 | 285 | ||
286 | init_waitqueue_entry(&wait, current); | 286 | init_waitqueue_entry(&wait, current); |
287 | add_wait_queue(sk->sk_sleep, &wait); | 287 | add_wait_queue(sk_sleep(sk), &wait); |
288 | while (!atomic_read(&session->terminate)) { | 288 | while (!atomic_read(&session->terminate)) { |
289 | set_current_state(TASK_INTERRUPTIBLE); | 289 | set_current_state(TASK_INTERRUPTIBLE); |
290 | 290 | ||
@@ -301,7 +301,7 @@ static int cmtp_session(void *arg) | |||
301 | schedule(); | 301 | schedule(); |
302 | } | 302 | } |
303 | set_current_state(TASK_RUNNING); | 303 | set_current_state(TASK_RUNNING); |
304 | remove_wait_queue(sk->sk_sleep, &wait); | 304 | remove_wait_queue(sk_sleep(sk), &wait); |
305 | 305 | ||
306 | down_write(&cmtp_session_sem); | 306 | down_write(&cmtp_session_sem); |
307 | 307 | ||
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index 280529ad9274..bfe641b7dfaf 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c | |||
@@ -561,8 +561,8 @@ static int hidp_session(void *arg) | |||
561 | 561 | ||
562 | init_waitqueue_entry(&ctrl_wait, current); | 562 | init_waitqueue_entry(&ctrl_wait, current); |
563 | init_waitqueue_entry(&intr_wait, current); | 563 | init_waitqueue_entry(&intr_wait, current); |
564 | add_wait_queue(ctrl_sk->sk_sleep, &ctrl_wait); | 564 | add_wait_queue(sk_sleep(ctrl_sk), &ctrl_wait); |
565 | add_wait_queue(intr_sk->sk_sleep, &intr_wait); | 565 | add_wait_queue(sk_sleep(intr_sk), &intr_wait); |
566 | while (!atomic_read(&session->terminate)) { | 566 | while (!atomic_read(&session->terminate)) { |
567 | set_current_state(TASK_INTERRUPTIBLE); | 567 | set_current_state(TASK_INTERRUPTIBLE); |
568 | 568 | ||
@@ -584,8 +584,8 @@ static int hidp_session(void *arg) | |||
584 | schedule(); | 584 | schedule(); |
585 | } | 585 | } |
586 | set_current_state(TASK_RUNNING); | 586 | set_current_state(TASK_RUNNING); |
587 | remove_wait_queue(intr_sk->sk_sleep, &intr_wait); | 587 | remove_wait_queue(sk_sleep(intr_sk), &intr_wait); |
588 | remove_wait_queue(ctrl_sk->sk_sleep, &ctrl_wait); | 588 | remove_wait_queue(sk_sleep(ctrl_sk), &ctrl_wait); |
589 | 589 | ||
590 | down_write(&hidp_session_sem); | 590 | down_write(&hidp_session_sem); |
591 | 591 | ||
@@ -609,7 +609,7 @@ static int hidp_session(void *arg) | |||
609 | 609 | ||
610 | fput(session->intr_sock->file); | 610 | fput(session->intr_sock->file); |
611 | 611 | ||
612 | wait_event_timeout(*(ctrl_sk->sk_sleep), | 612 | wait_event_timeout(*(sk_sleep(ctrl_sk)), |
613 | (ctrl_sk->sk_state == BT_CLOSED), msecs_to_jiffies(500)); | 613 | (ctrl_sk->sk_state == BT_CLOSED), msecs_to_jiffies(500)); |
614 | 614 | ||
615 | fput(session->ctrl_sock->file); | 615 | fput(session->ctrl_sock->file); |
diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h index a4e215d50c10..8d934a19da0a 100644 --- a/net/bluetooth/hidp/hidp.h +++ b/net/bluetooth/hidp/hidp.h | |||
@@ -164,8 +164,8 @@ static inline void hidp_schedule(struct hidp_session *session) | |||
164 | struct sock *ctrl_sk = session->ctrl_sock->sk; | 164 | struct sock *ctrl_sk = session->ctrl_sock->sk; |
165 | struct sock *intr_sk = session->intr_sock->sk; | 165 | struct sock *intr_sk = session->intr_sock->sk; |
166 | 166 | ||
167 | wake_up_interruptible(ctrl_sk->sk_sleep); | 167 | wake_up_interruptible(sk_sleep(ctrl_sk)); |
168 | wake_up_interruptible(intr_sk->sk_sleep); | 168 | wake_up_interruptible(sk_sleep(intr_sk)); |
169 | } | 169 | } |
170 | 170 | ||
171 | /* HIDP init defines */ | 171 | /* HIDP init defines */ |
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c index 99d68c34e4f1..c1e60eed5a97 100644 --- a/net/bluetooth/l2cap.c +++ b/net/bluetooth/l2cap.c | |||
@@ -1147,7 +1147,7 @@ static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int fl | |||
1147 | BT_DBG("sk %p timeo %ld", sk, timeo); | 1147 | BT_DBG("sk %p timeo %ld", sk, timeo); |
1148 | 1148 | ||
1149 | /* Wait for an incoming connection. (wake-one). */ | 1149 | /* Wait for an incoming connection. (wake-one). */ |
1150 | add_wait_queue_exclusive(sk->sk_sleep, &wait); | 1150 | add_wait_queue_exclusive(sk_sleep(sk), &wait); |
1151 | while (!(nsk = bt_accept_dequeue(sk, newsock))) { | 1151 | while (!(nsk = bt_accept_dequeue(sk, newsock))) { |
1152 | set_current_state(TASK_INTERRUPTIBLE); | 1152 | set_current_state(TASK_INTERRUPTIBLE); |
1153 | if (!timeo) { | 1153 | if (!timeo) { |
@@ -1170,7 +1170,7 @@ static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int fl | |||
1170 | } | 1170 | } |
1171 | } | 1171 | } |
1172 | set_current_state(TASK_RUNNING); | 1172 | set_current_state(TASK_RUNNING); |
1173 | remove_wait_queue(sk->sk_sleep, &wait); | 1173 | remove_wait_queue(sk_sleep(sk), &wait); |
1174 | 1174 | ||
1175 | if (err) | 1175 | if (err) |
1176 | goto done; | 1176 | goto done; |
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index 8ed3c37684fa..43fbf6b4b4bf 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c | |||
@@ -503,7 +503,7 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f | |||
503 | BT_DBG("sk %p timeo %ld", sk, timeo); | 503 | BT_DBG("sk %p timeo %ld", sk, timeo); |
504 | 504 | ||
505 | /* Wait for an incoming connection. (wake-one). */ | 505 | /* Wait for an incoming connection. (wake-one). */ |
506 | add_wait_queue_exclusive(sk->sk_sleep, &wait); | 506 | add_wait_queue_exclusive(sk_sleep(sk), &wait); |
507 | while (!(nsk = bt_accept_dequeue(sk, newsock))) { | 507 | while (!(nsk = bt_accept_dequeue(sk, newsock))) { |
508 | set_current_state(TASK_INTERRUPTIBLE); | 508 | set_current_state(TASK_INTERRUPTIBLE); |
509 | if (!timeo) { | 509 | if (!timeo) { |
@@ -526,7 +526,7 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f | |||
526 | } | 526 | } |
527 | } | 527 | } |
528 | set_current_state(TASK_RUNNING); | 528 | set_current_state(TASK_RUNNING); |
529 | remove_wait_queue(sk->sk_sleep, &wait); | 529 | remove_wait_queue(sk_sleep(sk), &wait); |
530 | 530 | ||
531 | if (err) | 531 | if (err) |
532 | goto done; | 532 | goto done; |
@@ -621,7 +621,7 @@ static long rfcomm_sock_data_wait(struct sock *sk, long timeo) | |||
621 | { | 621 | { |
622 | DECLARE_WAITQUEUE(wait, current); | 622 | DECLARE_WAITQUEUE(wait, current); |
623 | 623 | ||
624 | add_wait_queue(sk->sk_sleep, &wait); | 624 | add_wait_queue(sk_sleep(sk), &wait); |
625 | for (;;) { | 625 | for (;;) { |
626 | set_current_state(TASK_INTERRUPTIBLE); | 626 | set_current_state(TASK_INTERRUPTIBLE); |
627 | 627 | ||
@@ -640,7 +640,7 @@ static long rfcomm_sock_data_wait(struct sock *sk, long timeo) | |||
640 | } | 640 | } |
641 | 641 | ||
642 | __set_current_state(TASK_RUNNING); | 642 | __set_current_state(TASK_RUNNING); |
643 | remove_wait_queue(sk->sk_sleep, &wait); | 643 | remove_wait_queue(sk_sleep(sk), &wait); |
644 | return timeo; | 644 | return timeo; |
645 | } | 645 | } |
646 | 646 | ||
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index ca6b2ad1c3fc..b406d3eff53a 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c | |||
@@ -567,7 +567,7 @@ static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flag | |||
567 | BT_DBG("sk %p timeo %ld", sk, timeo); | 567 | BT_DBG("sk %p timeo %ld", sk, timeo); |
568 | 568 | ||
569 | /* Wait for an incoming connection. (wake-one). */ | 569 | /* Wait for an incoming connection. (wake-one). */ |
570 | add_wait_queue_exclusive(sk->sk_sleep, &wait); | 570 | add_wait_queue_exclusive(sk_sleep(sk), &wait); |
571 | while (!(ch = bt_accept_dequeue(sk, newsock))) { | 571 | while (!(ch = bt_accept_dequeue(sk, newsock))) { |
572 | set_current_state(TASK_INTERRUPTIBLE); | 572 | set_current_state(TASK_INTERRUPTIBLE); |
573 | if (!timeo) { | 573 | if (!timeo) { |
@@ -590,7 +590,7 @@ static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flag | |||
590 | } | 590 | } |
591 | } | 591 | } |
592 | set_current_state(TASK_RUNNING); | 592 | set_current_state(TASK_RUNNING); |
593 | remove_wait_queue(sk->sk_sleep, &wait); | 593 | remove_wait_queue(sk_sleep(sk), &wait); |
594 | 594 | ||
595 | if (err) | 595 | if (err) |
596 | goto done; | 596 | goto done; |
diff --git a/net/bridge/Kconfig b/net/bridge/Kconfig index d115d5cea5b6..9190ae462cb4 100644 --- a/net/bridge/Kconfig +++ b/net/bridge/Kconfig | |||
@@ -33,14 +33,14 @@ config BRIDGE | |||
33 | If unsure, say N. | 33 | If unsure, say N. |
34 | 34 | ||
35 | config BRIDGE_IGMP_SNOOPING | 35 | config BRIDGE_IGMP_SNOOPING |
36 | bool "IGMP snooping" | 36 | bool "IGMP/MLD snooping" |
37 | depends on BRIDGE | 37 | depends on BRIDGE |
38 | depends on INET | 38 | depends on INET |
39 | default y | 39 | default y |
40 | ---help--- | 40 | ---help--- |
41 | If you say Y here, then the Ethernet bridge will be able selectively | 41 | If you say Y here, then the Ethernet bridge will be able selectively |
42 | forward multicast traffic based on IGMP traffic received from each | 42 | forward multicast traffic based on IGMP/MLD traffic received from |
43 | port. | 43 | each port. |
44 | 44 | ||
45 | Say N to exclude this support and reduce the binary size. | 45 | Say N to exclude this support and reduce the binary size. |
46 | 46 | ||
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 90a9024e5c1e..5b8a6e73b02f 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c | |||
@@ -26,11 +26,12 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) | |||
26 | const unsigned char *dest = skb->data; | 26 | const unsigned char *dest = skb->data; |
27 | struct net_bridge_fdb_entry *dst; | 27 | struct net_bridge_fdb_entry *dst; |
28 | struct net_bridge_mdb_entry *mdst; | 28 | struct net_bridge_mdb_entry *mdst; |
29 | struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats); | ||
29 | 30 | ||
30 | BR_INPUT_SKB_CB(skb)->brdev = dev; | 31 | brstats->tx_packets++; |
32 | brstats->tx_bytes += skb->len; | ||
31 | 33 | ||
32 | dev->stats.tx_packets++; | 34 | BR_INPUT_SKB_CB(skb)->brdev = dev; |
33 | dev->stats.tx_bytes += skb->len; | ||
34 | 35 | ||
35 | skb_reset_mac_header(skb); | 36 | skb_reset_mac_header(skb); |
36 | skb_pull(skb, ETH_HLEN); | 37 | skb_pull(skb, ETH_HLEN); |
@@ -81,6 +82,31 @@ static int br_dev_stop(struct net_device *dev) | |||
81 | return 0; | 82 | return 0; |
82 | } | 83 | } |
83 | 84 | ||
85 | static struct net_device_stats *br_get_stats(struct net_device *dev) | ||
86 | { | ||
87 | struct net_bridge *br = netdev_priv(dev); | ||
88 | struct net_device_stats *stats = &dev->stats; | ||
89 | struct br_cpu_netstats sum = { 0 }; | ||
90 | unsigned int cpu; | ||
91 | |||
92 | for_each_possible_cpu(cpu) { | ||
93 | const struct br_cpu_netstats *bstats | ||
94 | = per_cpu_ptr(br->stats, cpu); | ||
95 | |||
96 | sum.tx_bytes += bstats->tx_bytes; | ||
97 | sum.tx_packets += bstats->tx_packets; | ||
98 | sum.rx_bytes += bstats->rx_bytes; | ||
99 | sum.rx_packets += bstats->rx_packets; | ||
100 | } | ||
101 | |||
102 | stats->tx_bytes = sum.tx_bytes; | ||
103 | stats->tx_packets = sum.tx_packets; | ||
104 | stats->rx_bytes = sum.rx_bytes; | ||
105 | stats->rx_packets = sum.rx_packets; | ||
106 | |||
107 | return stats; | ||
108 | } | ||
109 | |||
84 | static int br_change_mtu(struct net_device *dev, int new_mtu) | 110 | static int br_change_mtu(struct net_device *dev, int new_mtu) |
85 | { | 111 | { |
86 | struct net_bridge *br = netdev_priv(dev); | 112 | struct net_bridge *br = netdev_priv(dev); |
@@ -180,19 +206,28 @@ static const struct net_device_ops br_netdev_ops = { | |||
180 | .ndo_open = br_dev_open, | 206 | .ndo_open = br_dev_open, |
181 | .ndo_stop = br_dev_stop, | 207 | .ndo_stop = br_dev_stop, |
182 | .ndo_start_xmit = br_dev_xmit, | 208 | .ndo_start_xmit = br_dev_xmit, |
209 | .ndo_get_stats = br_get_stats, | ||
183 | .ndo_set_mac_address = br_set_mac_address, | 210 | .ndo_set_mac_address = br_set_mac_address, |
184 | .ndo_set_multicast_list = br_dev_set_multicast_list, | 211 | .ndo_set_multicast_list = br_dev_set_multicast_list, |
185 | .ndo_change_mtu = br_change_mtu, | 212 | .ndo_change_mtu = br_change_mtu, |
186 | .ndo_do_ioctl = br_dev_ioctl, | 213 | .ndo_do_ioctl = br_dev_ioctl, |
187 | }; | 214 | }; |
188 | 215 | ||
216 | static void br_dev_free(struct net_device *dev) | ||
217 | { | ||
218 | struct net_bridge *br = netdev_priv(dev); | ||
219 | |||
220 | free_percpu(br->stats); | ||
221 | free_netdev(dev); | ||
222 | } | ||
223 | |||
189 | void br_dev_setup(struct net_device *dev) | 224 | void br_dev_setup(struct net_device *dev) |
190 | { | 225 | { |
191 | random_ether_addr(dev->dev_addr); | 226 | random_ether_addr(dev->dev_addr); |
192 | ether_setup(dev); | 227 | ether_setup(dev); |
193 | 228 | ||
194 | dev->netdev_ops = &br_netdev_ops; | 229 | dev->netdev_ops = &br_netdev_ops; |
195 | dev->destructor = free_netdev; | 230 | dev->destructor = br_dev_free; |
196 | SET_ETHTOOL_OPS(dev, &br_ethtool_ops); | 231 | SET_ETHTOOL_OPS(dev, &br_ethtool_ops); |
197 | dev->tx_queue_len = 0; | 232 | dev->tx_queue_len = 0; |
198 | dev->priv_flags = IFF_EBRIDGE; | 233 | dev->priv_flags = IFF_EBRIDGE; |
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index 0b6b1f2ff7ac..521439333316 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c | |||
@@ -186,6 +186,12 @@ static struct net_device *new_bridge_dev(struct net *net, const char *name) | |||
186 | br = netdev_priv(dev); | 186 | br = netdev_priv(dev); |
187 | br->dev = dev; | 187 | br->dev = dev; |
188 | 188 | ||
189 | br->stats = alloc_percpu(struct br_cpu_netstats); | ||
190 | if (!br->stats) { | ||
191 | free_netdev(dev); | ||
192 | return NULL; | ||
193 | } | ||
194 | |||
189 | spin_lock_init(&br->lock); | 195 | spin_lock_init(&br->lock); |
190 | INIT_LIST_HEAD(&br->port_list); | 196 | INIT_LIST_HEAD(&br->port_list); |
191 | spin_lock_init(&br->hash_lock); | 197 | spin_lock_init(&br->hash_lock); |
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index a82dde2d2ead..e7f4c1d02f57 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c | |||
@@ -24,9 +24,11 @@ const u8 br_group_address[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; | |||
24 | static int br_pass_frame_up(struct sk_buff *skb) | 24 | static int br_pass_frame_up(struct sk_buff *skb) |
25 | { | 25 | { |
26 | struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev; | 26 | struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev; |
27 | struct net_bridge *br = netdev_priv(brdev); | ||
28 | struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats); | ||
27 | 29 | ||
28 | brdev->stats.rx_packets++; | 30 | brstats->rx_packets++; |
29 | brdev->stats.rx_bytes += skb->len; | 31 | brstats->rx_bytes += skb->len; |
30 | 32 | ||
31 | indev = skb->dev; | 33 | indev = skb->dev; |
32 | skb->dev = brdev; | 34 | skb->dev = brdev; |
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index eaa0e1bae49b..2048ef0f9be5 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c | |||
@@ -24,51 +24,139 @@ | |||
24 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
25 | #include <linux/timer.h> | 25 | #include <linux/timer.h> |
26 | #include <net/ip.h> | 26 | #include <net/ip.h> |
27 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
28 | #include <net/ipv6.h> | ||
29 | #include <net/mld.h> | ||
30 | #include <net/addrconf.h> | ||
31 | #include <net/ip6_checksum.h> | ||
32 | #endif | ||
27 | 33 | ||
28 | #include "br_private.h" | 34 | #include "br_private.h" |
29 | 35 | ||
30 | static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb, __be32 ip) | 36 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
37 | static inline int ipv6_is_local_multicast(const struct in6_addr *addr) | ||
31 | { | 38 | { |
32 | return jhash_1word(mdb->secret, (u32)ip) & (mdb->max - 1); | 39 | if (ipv6_addr_is_multicast(addr) && |
40 | IPV6_ADDR_MC_SCOPE(addr) <= IPV6_ADDR_SCOPE_LINKLOCAL) | ||
41 | return 1; | ||
42 | return 0; | ||
43 | } | ||
44 | #endif | ||
45 | |||
46 | static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b) | ||
47 | { | ||
48 | if (a->proto != b->proto) | ||
49 | return 0; | ||
50 | switch (a->proto) { | ||
51 | case htons(ETH_P_IP): | ||
52 | return a->u.ip4 == b->u.ip4; | ||
53 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
54 | case htons(ETH_P_IPV6): | ||
55 | return ipv6_addr_equal(&a->u.ip6, &b->u.ip6); | ||
56 | #endif | ||
57 | } | ||
58 | return 0; | ||
59 | } | ||
60 | |||
61 | static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip) | ||
62 | { | ||
63 | return jhash_1word(mdb->secret, (__force u32)ip) & (mdb->max - 1); | ||
64 | } | ||
65 | |||
66 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
67 | static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb, | ||
68 | const struct in6_addr *ip) | ||
69 | { | ||
70 | return jhash2((__force u32 *)ip->s6_addr32, 4, mdb->secret) & (mdb->max - 1); | ||
71 | } | ||
72 | #endif | ||
73 | |||
74 | static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb, | ||
75 | struct br_ip *ip) | ||
76 | { | ||
77 | switch (ip->proto) { | ||
78 | case htons(ETH_P_IP): | ||
79 | return __br_ip4_hash(mdb, ip->u.ip4); | ||
80 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
81 | case htons(ETH_P_IPV6): | ||
82 | return __br_ip6_hash(mdb, &ip->u.ip6); | ||
83 | #endif | ||
84 | } | ||
85 | return 0; | ||
33 | } | 86 | } |
34 | 87 | ||
35 | static struct net_bridge_mdb_entry *__br_mdb_ip_get( | 88 | static struct net_bridge_mdb_entry *__br_mdb_ip_get( |
36 | struct net_bridge_mdb_htable *mdb, __be32 dst, int hash) | 89 | struct net_bridge_mdb_htable *mdb, struct br_ip *dst, int hash) |
37 | { | 90 | { |
38 | struct net_bridge_mdb_entry *mp; | 91 | struct net_bridge_mdb_entry *mp; |
39 | struct hlist_node *p; | 92 | struct hlist_node *p; |
40 | 93 | ||
41 | hlist_for_each_entry_rcu(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) { | 94 | hlist_for_each_entry_rcu(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) { |
42 | if (dst == mp->addr) | 95 | if (br_ip_equal(&mp->addr, dst)) |
43 | return mp; | 96 | return mp; |
44 | } | 97 | } |
45 | 98 | ||
46 | return NULL; | 99 | return NULL; |
47 | } | 100 | } |
48 | 101 | ||
49 | static struct net_bridge_mdb_entry *br_mdb_ip_get( | 102 | static struct net_bridge_mdb_entry *br_mdb_ip4_get( |
50 | struct net_bridge_mdb_htable *mdb, __be32 dst) | 103 | struct net_bridge_mdb_htable *mdb, __be32 dst) |
51 | { | 104 | { |
52 | if (!mdb) | 105 | struct br_ip br_dst; |
53 | return NULL; | ||
54 | 106 | ||
107 | br_dst.u.ip4 = dst; | ||
108 | br_dst.proto = htons(ETH_P_IP); | ||
109 | |||
110 | return __br_mdb_ip_get(mdb, &br_dst, __br_ip4_hash(mdb, dst)); | ||
111 | } | ||
112 | |||
113 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
114 | static struct net_bridge_mdb_entry *br_mdb_ip6_get( | ||
115 | struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst) | ||
116 | { | ||
117 | struct br_ip br_dst; | ||
118 | |||
119 | ipv6_addr_copy(&br_dst.u.ip6, dst); | ||
120 | br_dst.proto = htons(ETH_P_IPV6); | ||
121 | |||
122 | return __br_mdb_ip_get(mdb, &br_dst, __br_ip6_hash(mdb, dst)); | ||
123 | } | ||
124 | #endif | ||
125 | |||
126 | static struct net_bridge_mdb_entry *br_mdb_ip_get( | ||
127 | struct net_bridge_mdb_htable *mdb, struct br_ip *dst) | ||
128 | { | ||
55 | return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst)); | 129 | return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst)); |
56 | } | 130 | } |
57 | 131 | ||
58 | struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, | 132 | struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, |
59 | struct sk_buff *skb) | 133 | struct sk_buff *skb) |
60 | { | 134 | { |
61 | if (br->multicast_disabled) | 135 | struct net_bridge_mdb_htable *mdb = br->mdb; |
136 | struct br_ip ip; | ||
137 | |||
138 | if (!mdb || br->multicast_disabled) | ||
139 | return NULL; | ||
140 | |||
141 | if (BR_INPUT_SKB_CB(skb)->igmp) | ||
62 | return NULL; | 142 | return NULL; |
63 | 143 | ||
144 | ip.proto = skb->protocol; | ||
145 | |||
64 | switch (skb->protocol) { | 146 | switch (skb->protocol) { |
65 | case htons(ETH_P_IP): | 147 | case htons(ETH_P_IP): |
66 | if (BR_INPUT_SKB_CB(skb)->igmp) | 148 | ip.u.ip4 = ip_hdr(skb)->daddr; |
67 | break; | 149 | break; |
68 | return br_mdb_ip_get(br->mdb, ip_hdr(skb)->daddr); | 150 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
151 | case htons(ETH_P_IPV6): | ||
152 | ipv6_addr_copy(&ip.u.ip6, &ipv6_hdr(skb)->daddr); | ||
153 | break; | ||
154 | #endif | ||
155 | default: | ||
156 | return NULL; | ||
69 | } | 157 | } |
70 | 158 | ||
71 | return NULL; | 159 | return br_mdb_ip_get(mdb, &ip); |
72 | } | 160 | } |
73 | 161 | ||
74 | static void br_mdb_free(struct rcu_head *head) | 162 | static void br_mdb_free(struct rcu_head *head) |
@@ -95,7 +183,7 @@ static int br_mdb_copy(struct net_bridge_mdb_htable *new, | |||
95 | for (i = 0; i < old->max; i++) | 183 | for (i = 0; i < old->max; i++) |
96 | hlist_for_each_entry(mp, p, &old->mhash[i], hlist[old->ver]) | 184 | hlist_for_each_entry(mp, p, &old->mhash[i], hlist[old->ver]) |
97 | hlist_add_head(&mp->hlist[new->ver], | 185 | hlist_add_head(&mp->hlist[new->ver], |
98 | &new->mhash[br_ip_hash(new, mp->addr)]); | 186 | &new->mhash[br_ip_hash(new, &mp->addr)]); |
99 | 187 | ||
100 | if (!elasticity) | 188 | if (!elasticity) |
101 | return 0; | 189 | return 0; |
@@ -163,7 +251,7 @@ static void br_multicast_del_pg(struct net_bridge *br, | |||
163 | struct net_bridge_port_group *p; | 251 | struct net_bridge_port_group *p; |
164 | struct net_bridge_port_group **pp; | 252 | struct net_bridge_port_group **pp; |
165 | 253 | ||
166 | mp = br_mdb_ip_get(mdb, pg->addr); | 254 | mp = br_mdb_ip_get(mdb, &pg->addr); |
167 | if (WARN_ON(!mp)) | 255 | if (WARN_ON(!mp)) |
168 | return; | 256 | return; |
169 | 257 | ||
@@ -249,8 +337,8 @@ out: | |||
249 | return 0; | 337 | return 0; |
250 | } | 338 | } |
251 | 339 | ||
252 | static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br, | 340 | static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br, |
253 | __be32 group) | 341 | __be32 group) |
254 | { | 342 | { |
255 | struct sk_buff *skb; | 343 | struct sk_buff *skb; |
256 | struct igmphdr *ih; | 344 | struct igmphdr *ih; |
@@ -314,12 +402,104 @@ out: | |||
314 | return skb; | 402 | return skb; |
315 | } | 403 | } |
316 | 404 | ||
405 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
406 | static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, | ||
407 | struct in6_addr *group) | ||
408 | { | ||
409 | struct sk_buff *skb; | ||
410 | struct ipv6hdr *ip6h; | ||
411 | struct mld_msg *mldq; | ||
412 | struct ethhdr *eth; | ||
413 | u8 *hopopt; | ||
414 | unsigned long interval; | ||
415 | |||
416 | skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*ip6h) + | ||
417 | 8 + sizeof(*mldq)); | ||
418 | if (!skb) | ||
419 | goto out; | ||
420 | |||
421 | skb->protocol = htons(ETH_P_IPV6); | ||
422 | |||
423 | /* Ethernet header */ | ||
424 | skb_reset_mac_header(skb); | ||
425 | eth = eth_hdr(skb); | ||
426 | |||
427 | memcpy(eth->h_source, br->dev->dev_addr, 6); | ||
428 | ipv6_eth_mc_map(group, eth->h_dest); | ||
429 | eth->h_proto = htons(ETH_P_IPV6); | ||
430 | skb_put(skb, sizeof(*eth)); | ||
431 | |||
432 | /* IPv6 header + HbH option */ | ||
433 | skb_set_network_header(skb, skb->len); | ||
434 | ip6h = ipv6_hdr(skb); | ||
435 | |||
436 | *(__force __be32 *)ip6h = htonl(0x60000000); | ||
437 | ip6h->payload_len = 8 + sizeof(*mldq); | ||
438 | ip6h->nexthdr = IPPROTO_HOPOPTS; | ||
439 | ip6h->hop_limit = 1; | ||
440 | ipv6_addr_set(&ip6h->saddr, 0, 0, 0, 0); | ||
441 | ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1)); | ||
442 | |||
443 | hopopt = (u8 *)(ip6h + 1); | ||
444 | hopopt[0] = IPPROTO_ICMPV6; /* next hdr */ | ||
445 | hopopt[1] = 0; /* length of HbH */ | ||
446 | hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */ | ||
447 | hopopt[3] = 2; /* Length of RA Option */ | ||
448 | hopopt[4] = 0; /* Type = 0x0000 (MLD) */ | ||
449 | hopopt[5] = 0; | ||
450 | hopopt[6] = IPV6_TLV_PAD0; /* Pad0 */ | ||
451 | hopopt[7] = IPV6_TLV_PAD0; /* Pad0 */ | ||
452 | |||
453 | skb_put(skb, sizeof(*ip6h) + 8); | ||
454 | |||
455 | /* ICMPv6 */ | ||
456 | skb_set_transport_header(skb, skb->len); | ||
457 | mldq = (struct mld_msg *) icmp6_hdr(skb); | ||
458 | |||
459 | interval = ipv6_addr_any(group) ? br->multicast_last_member_interval : | ||
460 | br->multicast_query_response_interval; | ||
461 | |||
462 | mldq->mld_type = ICMPV6_MGM_QUERY; | ||
463 | mldq->mld_code = 0; | ||
464 | mldq->mld_cksum = 0; | ||
465 | mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval)); | ||
466 | mldq->mld_reserved = 0; | ||
467 | ipv6_addr_copy(&mldq->mld_mca, group); | ||
468 | |||
469 | /* checksum */ | ||
470 | mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, | ||
471 | sizeof(*mldq), IPPROTO_ICMPV6, | ||
472 | csum_partial(mldq, | ||
473 | sizeof(*mldq), 0)); | ||
474 | skb_put(skb, sizeof(*mldq)); | ||
475 | |||
476 | __skb_pull(skb, sizeof(*eth)); | ||
477 | |||
478 | out: | ||
479 | return skb; | ||
480 | } | ||
481 | #endif | ||
482 | |||
483 | static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br, | ||
484 | struct br_ip *addr) | ||
485 | { | ||
486 | switch (addr->proto) { | ||
487 | case htons(ETH_P_IP): | ||
488 | return br_ip4_multicast_alloc_query(br, addr->u.ip4); | ||
489 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
490 | case htons(ETH_P_IPV6): | ||
491 | return br_ip6_multicast_alloc_query(br, &addr->u.ip6); | ||
492 | #endif | ||
493 | } | ||
494 | return NULL; | ||
495 | } | ||
496 | |||
317 | static void br_multicast_send_group_query(struct net_bridge_mdb_entry *mp) | 497 | static void br_multicast_send_group_query(struct net_bridge_mdb_entry *mp) |
318 | { | 498 | { |
319 | struct net_bridge *br = mp->br; | 499 | struct net_bridge *br = mp->br; |
320 | struct sk_buff *skb; | 500 | struct sk_buff *skb; |
321 | 501 | ||
322 | skb = br_multicast_alloc_query(br, mp->addr); | 502 | skb = br_multicast_alloc_query(br, &mp->addr); |
323 | if (!skb) | 503 | if (!skb) |
324 | goto timer; | 504 | goto timer; |
325 | 505 | ||
@@ -353,7 +533,7 @@ static void br_multicast_send_port_group_query(struct net_bridge_port_group *pg) | |||
353 | struct net_bridge *br = port->br; | 533 | struct net_bridge *br = port->br; |
354 | struct sk_buff *skb; | 534 | struct sk_buff *skb; |
355 | 535 | ||
356 | skb = br_multicast_alloc_query(br, pg->addr); | 536 | skb = br_multicast_alloc_query(br, &pg->addr); |
357 | if (!skb) | 537 | if (!skb) |
358 | goto timer; | 538 | goto timer; |
359 | 539 | ||
@@ -383,8 +563,8 @@ out: | |||
383 | } | 563 | } |
384 | 564 | ||
385 | static struct net_bridge_mdb_entry *br_multicast_get_group( | 565 | static struct net_bridge_mdb_entry *br_multicast_get_group( |
386 | struct net_bridge *br, struct net_bridge_port *port, __be32 group, | 566 | struct net_bridge *br, struct net_bridge_port *port, |
387 | int hash) | 567 | struct br_ip *group, int hash) |
388 | { | 568 | { |
389 | struct net_bridge_mdb_htable *mdb = br->mdb; | 569 | struct net_bridge_mdb_htable *mdb = br->mdb; |
390 | struct net_bridge_mdb_entry *mp; | 570 | struct net_bridge_mdb_entry *mp; |
@@ -396,9 +576,8 @@ static struct net_bridge_mdb_entry *br_multicast_get_group( | |||
396 | 576 | ||
397 | hlist_for_each_entry(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) { | 577 | hlist_for_each_entry(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) { |
398 | count++; | 578 | count++; |
399 | if (unlikely(group == mp->addr)) { | 579 | if (unlikely(br_ip_equal(group, &mp->addr))) |
400 | return mp; | 580 | return mp; |
401 | } | ||
402 | } | 581 | } |
403 | 582 | ||
404 | elasticity = 0; | 583 | elasticity = 0; |
@@ -463,7 +642,8 @@ err: | |||
463 | } | 642 | } |
464 | 643 | ||
465 | static struct net_bridge_mdb_entry *br_multicast_new_group( | 644 | static struct net_bridge_mdb_entry *br_multicast_new_group( |
466 | struct net_bridge *br, struct net_bridge_port *port, __be32 group) | 645 | struct net_bridge *br, struct net_bridge_port *port, |
646 | struct br_ip *group) | ||
467 | { | 647 | { |
468 | struct net_bridge_mdb_htable *mdb = br->mdb; | 648 | struct net_bridge_mdb_htable *mdb = br->mdb; |
469 | struct net_bridge_mdb_entry *mp; | 649 | struct net_bridge_mdb_entry *mp; |
@@ -496,7 +676,7 @@ rehash: | |||
496 | goto out; | 676 | goto out; |
497 | 677 | ||
498 | mp->br = br; | 678 | mp->br = br; |
499 | mp->addr = group; | 679 | mp->addr = *group; |
500 | setup_timer(&mp->timer, br_multicast_group_expired, | 680 | setup_timer(&mp->timer, br_multicast_group_expired, |
501 | (unsigned long)mp); | 681 | (unsigned long)mp); |
502 | setup_timer(&mp->query_timer, br_multicast_group_query_expired, | 682 | setup_timer(&mp->query_timer, br_multicast_group_query_expired, |
@@ -510,7 +690,8 @@ out: | |||
510 | } | 690 | } |
511 | 691 | ||
512 | static int br_multicast_add_group(struct net_bridge *br, | 692 | static int br_multicast_add_group(struct net_bridge *br, |
513 | struct net_bridge_port *port, __be32 group) | 693 | struct net_bridge_port *port, |
694 | struct br_ip *group) | ||
514 | { | 695 | { |
515 | struct net_bridge_mdb_entry *mp; | 696 | struct net_bridge_mdb_entry *mp; |
516 | struct net_bridge_port_group *p; | 697 | struct net_bridge_port_group *p; |
@@ -518,9 +699,6 @@ static int br_multicast_add_group(struct net_bridge *br, | |||
518 | unsigned long now = jiffies; | 699 | unsigned long now = jiffies; |
519 | int err; | 700 | int err; |
520 | 701 | ||
521 | if (ipv4_is_local_multicast(group)) | ||
522 | return 0; | ||
523 | |||
524 | spin_lock(&br->multicast_lock); | 702 | spin_lock(&br->multicast_lock); |
525 | if (!netif_running(br->dev) || | 703 | if (!netif_running(br->dev) || |
526 | (port && port->state == BR_STATE_DISABLED)) | 704 | (port && port->state == BR_STATE_DISABLED)) |
@@ -549,7 +727,7 @@ static int br_multicast_add_group(struct net_bridge *br, | |||
549 | if (unlikely(!p)) | 727 | if (unlikely(!p)) |
550 | goto err; | 728 | goto err; |
551 | 729 | ||
552 | p->addr = group; | 730 | p->addr = *group; |
553 | p->port = port; | 731 | p->port = port; |
554 | p->next = *pp; | 732 | p->next = *pp; |
555 | hlist_add_head(&p->mglist, &port->mglist); | 733 | hlist_add_head(&p->mglist, &port->mglist); |
@@ -570,6 +748,38 @@ err: | |||
570 | return err; | 748 | return err; |
571 | } | 749 | } |
572 | 750 | ||
751 | static int br_ip4_multicast_add_group(struct net_bridge *br, | ||
752 | struct net_bridge_port *port, | ||
753 | __be32 group) | ||
754 | { | ||
755 | struct br_ip br_group; | ||
756 | |||
757 | if (ipv4_is_local_multicast(group)) | ||
758 | return 0; | ||
759 | |||
760 | br_group.u.ip4 = group; | ||
761 | br_group.proto = htons(ETH_P_IP); | ||
762 | |||
763 | return br_multicast_add_group(br, port, &br_group); | ||
764 | } | ||
765 | |||
766 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
767 | static int br_ip6_multicast_add_group(struct net_bridge *br, | ||
768 | struct net_bridge_port *port, | ||
769 | const struct in6_addr *group) | ||
770 | { | ||
771 | struct br_ip br_group; | ||
772 | |||
773 | if (ipv6_is_local_multicast(group)) | ||
774 | return 0; | ||
775 | |||
776 | ipv6_addr_copy(&br_group.u.ip6, group); | ||
777 | br_group.proto = htons(ETH_P_IP); | ||
778 | |||
779 | return br_multicast_add_group(br, port, &br_group); | ||
780 | } | ||
781 | #endif | ||
782 | |||
573 | static void br_multicast_router_expired(unsigned long data) | 783 | static void br_multicast_router_expired(unsigned long data) |
574 | { | 784 | { |
575 | struct net_bridge_port *port = (void *)data; | 785 | struct net_bridge_port *port = (void *)data; |
@@ -591,19 +801,15 @@ static void br_multicast_local_router_expired(unsigned long data) | |||
591 | { | 801 | { |
592 | } | 802 | } |
593 | 803 | ||
594 | static void br_multicast_send_query(struct net_bridge *br, | 804 | static void __br_multicast_send_query(struct net_bridge *br, |
595 | struct net_bridge_port *port, u32 sent) | 805 | struct net_bridge_port *port, |
806 | struct br_ip *ip) | ||
596 | { | 807 | { |
597 | unsigned long time; | ||
598 | struct sk_buff *skb; | 808 | struct sk_buff *skb; |
599 | 809 | ||
600 | if (!netif_running(br->dev) || br->multicast_disabled || | 810 | skb = br_multicast_alloc_query(br, ip); |
601 | timer_pending(&br->multicast_querier_timer)) | ||
602 | return; | ||
603 | |||
604 | skb = br_multicast_alloc_query(br, 0); | ||
605 | if (!skb) | 811 | if (!skb) |
606 | goto timer; | 812 | return; |
607 | 813 | ||
608 | if (port) { | 814 | if (port) { |
609 | __skb_push(skb, sizeof(struct ethhdr)); | 815 | __skb_push(skb, sizeof(struct ethhdr)); |
@@ -612,8 +818,28 @@ static void br_multicast_send_query(struct net_bridge *br, | |||
612 | dev_queue_xmit); | 818 | dev_queue_xmit); |
613 | } else | 819 | } else |
614 | netif_rx(skb); | 820 | netif_rx(skb); |
821 | } | ||
822 | |||
823 | static void br_multicast_send_query(struct net_bridge *br, | ||
824 | struct net_bridge_port *port, u32 sent) | ||
825 | { | ||
826 | unsigned long time; | ||
827 | struct br_ip br_group; | ||
828 | |||
829 | if (!netif_running(br->dev) || br->multicast_disabled || | ||
830 | timer_pending(&br->multicast_querier_timer)) | ||
831 | return; | ||
832 | |||
833 | memset(&br_group.u, 0, sizeof(br_group.u)); | ||
834 | |||
835 | br_group.proto = htons(ETH_P_IP); | ||
836 | __br_multicast_send_query(br, port, &br_group); | ||
837 | |||
838 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
839 | br_group.proto = htons(ETH_P_IPV6); | ||
840 | __br_multicast_send_query(br, port, &br_group); | ||
841 | #endif | ||
615 | 842 | ||
616 | timer: | ||
617 | time = jiffies; | 843 | time = jiffies; |
618 | time += sent < br->multicast_startup_query_count ? | 844 | time += sent < br->multicast_startup_query_count ? |
619 | br->multicast_startup_query_interval : | 845 | br->multicast_startup_query_interval : |
@@ -698,9 +924,9 @@ void br_multicast_disable_port(struct net_bridge_port *port) | |||
698 | spin_unlock(&br->multicast_lock); | 924 | spin_unlock(&br->multicast_lock); |
699 | } | 925 | } |
700 | 926 | ||
701 | static int br_multicast_igmp3_report(struct net_bridge *br, | 927 | static int br_ip4_multicast_igmp3_report(struct net_bridge *br, |
702 | struct net_bridge_port *port, | 928 | struct net_bridge_port *port, |
703 | struct sk_buff *skb) | 929 | struct sk_buff *skb) |
704 | { | 930 | { |
705 | struct igmpv3_report *ih; | 931 | struct igmpv3_report *ih; |
706 | struct igmpv3_grec *grec; | 932 | struct igmpv3_grec *grec; |
@@ -745,7 +971,7 @@ static int br_multicast_igmp3_report(struct net_bridge *br, | |||
745 | continue; | 971 | continue; |
746 | } | 972 | } |
747 | 973 | ||
748 | err = br_multicast_add_group(br, port, group); | 974 | err = br_ip4_multicast_add_group(br, port, group); |
749 | if (err) | 975 | if (err) |
750 | break; | 976 | break; |
751 | } | 977 | } |
@@ -753,6 +979,66 @@ static int br_multicast_igmp3_report(struct net_bridge *br, | |||
753 | return err; | 979 | return err; |
754 | } | 980 | } |
755 | 981 | ||
982 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
983 | static int br_ip6_multicast_mld2_report(struct net_bridge *br, | ||
984 | struct net_bridge_port *port, | ||
985 | struct sk_buff *skb) | ||
986 | { | ||
987 | struct icmp6hdr *icmp6h; | ||
988 | struct mld2_grec *grec; | ||
989 | int i; | ||
990 | int len; | ||
991 | int num; | ||
992 | int err = 0; | ||
993 | |||
994 | if (!pskb_may_pull(skb, sizeof(*icmp6h))) | ||
995 | return -EINVAL; | ||
996 | |||
997 | icmp6h = icmp6_hdr(skb); | ||
998 | num = ntohs(icmp6h->icmp6_dataun.un_data16[1]); | ||
999 | len = sizeof(*icmp6h); | ||
1000 | |||
1001 | for (i = 0; i < num; i++) { | ||
1002 | __be16 *nsrcs, _nsrcs; | ||
1003 | |||
1004 | nsrcs = skb_header_pointer(skb, | ||
1005 | len + offsetof(struct mld2_grec, | ||
1006 | grec_mca), | ||
1007 | sizeof(_nsrcs), &_nsrcs); | ||
1008 | if (!nsrcs) | ||
1009 | return -EINVAL; | ||
1010 | |||
1011 | if (!pskb_may_pull(skb, | ||
1012 | len + sizeof(*grec) + | ||
1013 | sizeof(struct in6_addr) * (*nsrcs))) | ||
1014 | return -EINVAL; | ||
1015 | |||
1016 | grec = (struct mld2_grec *)(skb->data + len); | ||
1017 | len += sizeof(*grec) + sizeof(struct in6_addr) * (*nsrcs); | ||
1018 | |||
1019 | /* We treat these as MLDv1 reports for now. */ | ||
1020 | switch (grec->grec_type) { | ||
1021 | case MLD2_MODE_IS_INCLUDE: | ||
1022 | case MLD2_MODE_IS_EXCLUDE: | ||
1023 | case MLD2_CHANGE_TO_INCLUDE: | ||
1024 | case MLD2_CHANGE_TO_EXCLUDE: | ||
1025 | case MLD2_ALLOW_NEW_SOURCES: | ||
1026 | case MLD2_BLOCK_OLD_SOURCES: | ||
1027 | break; | ||
1028 | |||
1029 | default: | ||
1030 | continue; | ||
1031 | } | ||
1032 | |||
1033 | err = br_ip6_multicast_add_group(br, port, &grec->grec_mca); | ||
1034 | if (!err) | ||
1035 | break; | ||
1036 | } | ||
1037 | |||
1038 | return err; | ||
1039 | } | ||
1040 | #endif | ||
1041 | |||
756 | static void br_multicast_add_router(struct net_bridge *br, | 1042 | static void br_multicast_add_router(struct net_bridge *br, |
757 | struct net_bridge_port *port) | 1043 | struct net_bridge_port *port) |
758 | { | 1044 | { |
@@ -800,7 +1086,7 @@ timer: | |||
800 | 1086 | ||
801 | static void br_multicast_query_received(struct net_bridge *br, | 1087 | static void br_multicast_query_received(struct net_bridge *br, |
802 | struct net_bridge_port *port, | 1088 | struct net_bridge_port *port, |
803 | __be32 saddr) | 1089 | int saddr) |
804 | { | 1090 | { |
805 | if (saddr) | 1091 | if (saddr) |
806 | mod_timer(&br->multicast_querier_timer, | 1092 | mod_timer(&br->multicast_querier_timer, |
@@ -811,9 +1097,9 @@ static void br_multicast_query_received(struct net_bridge *br, | |||
811 | br_multicast_mark_router(br, port); | 1097 | br_multicast_mark_router(br, port); |
812 | } | 1098 | } |
813 | 1099 | ||
814 | static int br_multicast_query(struct net_bridge *br, | 1100 | static int br_ip4_multicast_query(struct net_bridge *br, |
815 | struct net_bridge_port *port, | 1101 | struct net_bridge_port *port, |
816 | struct sk_buff *skb) | 1102 | struct sk_buff *skb) |
817 | { | 1103 | { |
818 | struct iphdr *iph = ip_hdr(skb); | 1104 | struct iphdr *iph = ip_hdr(skb); |
819 | struct igmphdr *ih = igmp_hdr(skb); | 1105 | struct igmphdr *ih = igmp_hdr(skb); |
@@ -831,7 +1117,7 @@ static int br_multicast_query(struct net_bridge *br, | |||
831 | (port && port->state == BR_STATE_DISABLED)) | 1117 | (port && port->state == BR_STATE_DISABLED)) |
832 | goto out; | 1118 | goto out; |
833 | 1119 | ||
834 | br_multicast_query_received(br, port, iph->saddr); | 1120 | br_multicast_query_received(br, port, !!iph->saddr); |
835 | 1121 | ||
836 | group = ih->group; | 1122 | group = ih->group; |
837 | 1123 | ||
@@ -859,7 +1145,7 @@ static int br_multicast_query(struct net_bridge *br, | |||
859 | if (!group) | 1145 | if (!group) |
860 | goto out; | 1146 | goto out; |
861 | 1147 | ||
862 | mp = br_mdb_ip_get(br->mdb, group); | 1148 | mp = br_mdb_ip4_get(br->mdb, group); |
863 | if (!mp) | 1149 | if (!mp) |
864 | goto out; | 1150 | goto out; |
865 | 1151 | ||
@@ -883,9 +1169,78 @@ out: | |||
883 | return err; | 1169 | return err; |
884 | } | 1170 | } |
885 | 1171 | ||
1172 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
1173 | static int br_ip6_multicast_query(struct net_bridge *br, | ||
1174 | struct net_bridge_port *port, | ||
1175 | struct sk_buff *skb) | ||
1176 | { | ||
1177 | struct ipv6hdr *ip6h = ipv6_hdr(skb); | ||
1178 | struct mld_msg *mld = (struct mld_msg *) icmp6_hdr(skb); | ||
1179 | struct net_bridge_mdb_entry *mp; | ||
1180 | struct mld2_query *mld2q; | ||
1181 | struct net_bridge_port_group *p, **pp; | ||
1182 | unsigned long max_delay; | ||
1183 | unsigned long now = jiffies; | ||
1184 | struct in6_addr *group = NULL; | ||
1185 | int err = 0; | ||
1186 | |||
1187 | spin_lock(&br->multicast_lock); | ||
1188 | if (!netif_running(br->dev) || | ||
1189 | (port && port->state == BR_STATE_DISABLED)) | ||
1190 | goto out; | ||
1191 | |||
1192 | br_multicast_query_received(br, port, !ipv6_addr_any(&ip6h->saddr)); | ||
1193 | |||
1194 | if (skb->len == sizeof(*mld)) { | ||
1195 | if (!pskb_may_pull(skb, sizeof(*mld))) { | ||
1196 | err = -EINVAL; | ||
1197 | goto out; | ||
1198 | } | ||
1199 | mld = (struct mld_msg *) icmp6_hdr(skb); | ||
1200 | max_delay = msecs_to_jiffies(htons(mld->mld_maxdelay)); | ||
1201 | if (max_delay) | ||
1202 | group = &mld->mld_mca; | ||
1203 | } else if (skb->len >= sizeof(*mld2q)) { | ||
1204 | if (!pskb_may_pull(skb, sizeof(*mld2q))) { | ||
1205 | err = -EINVAL; | ||
1206 | goto out; | ||
1207 | } | ||
1208 | mld2q = (struct mld2_query *)icmp6_hdr(skb); | ||
1209 | if (!mld2q->mld2q_nsrcs) | ||
1210 | group = &mld2q->mld2q_mca; | ||
1211 | max_delay = mld2q->mld2q_mrc ? MLDV2_MRC(mld2q->mld2q_mrc) : 1; | ||
1212 | } | ||
1213 | |||
1214 | if (!group) | ||
1215 | goto out; | ||
1216 | |||
1217 | mp = br_mdb_ip6_get(br->mdb, group); | ||
1218 | if (!mp) | ||
1219 | goto out; | ||
1220 | |||
1221 | max_delay *= br->multicast_last_member_count; | ||
1222 | if (!hlist_unhashed(&mp->mglist) && | ||
1223 | (timer_pending(&mp->timer) ? | ||
1224 | time_after(mp->timer.expires, now + max_delay) : | ||
1225 | try_to_del_timer_sync(&mp->timer) >= 0)) | ||
1226 | mod_timer(&mp->timer, now + max_delay); | ||
1227 | |||
1228 | for (pp = &mp->ports; (p = *pp); pp = &p->next) { | ||
1229 | if (timer_pending(&p->timer) ? | ||
1230 | time_after(p->timer.expires, now + max_delay) : | ||
1231 | try_to_del_timer_sync(&p->timer) >= 0) | ||
1232 | mod_timer(&mp->timer, now + max_delay); | ||
1233 | } | ||
1234 | |||
1235 | out: | ||
1236 | spin_unlock(&br->multicast_lock); | ||
1237 | return err; | ||
1238 | } | ||
1239 | #endif | ||
1240 | |||
886 | static void br_multicast_leave_group(struct net_bridge *br, | 1241 | static void br_multicast_leave_group(struct net_bridge *br, |
887 | struct net_bridge_port *port, | 1242 | struct net_bridge_port *port, |
888 | __be32 group) | 1243 | struct br_ip *group) |
889 | { | 1244 | { |
890 | struct net_bridge_mdb_htable *mdb; | 1245 | struct net_bridge_mdb_htable *mdb; |
891 | struct net_bridge_mdb_entry *mp; | 1246 | struct net_bridge_mdb_entry *mp; |
@@ -893,9 +1248,6 @@ static void br_multicast_leave_group(struct net_bridge *br, | |||
893 | unsigned long now; | 1248 | unsigned long now; |
894 | unsigned long time; | 1249 | unsigned long time; |
895 | 1250 | ||
896 | if (ipv4_is_local_multicast(group)) | ||
897 | return; | ||
898 | |||
899 | spin_lock(&br->multicast_lock); | 1251 | spin_lock(&br->multicast_lock); |
900 | if (!netif_running(br->dev) || | 1252 | if (!netif_running(br->dev) || |
901 | (port && port->state == BR_STATE_DISABLED) || | 1253 | (port && port->state == BR_STATE_DISABLED) || |
@@ -946,6 +1298,38 @@ out: | |||
946 | spin_unlock(&br->multicast_lock); | 1298 | spin_unlock(&br->multicast_lock); |
947 | } | 1299 | } |
948 | 1300 | ||
1301 | static void br_ip4_multicast_leave_group(struct net_bridge *br, | ||
1302 | struct net_bridge_port *port, | ||
1303 | __be32 group) | ||
1304 | { | ||
1305 | struct br_ip br_group; | ||
1306 | |||
1307 | if (ipv4_is_local_multicast(group)) | ||
1308 | return; | ||
1309 | |||
1310 | br_group.u.ip4 = group; | ||
1311 | br_group.proto = htons(ETH_P_IP); | ||
1312 | |||
1313 | br_multicast_leave_group(br, port, &br_group); | ||
1314 | } | ||
1315 | |||
1316 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
1317 | static void br_ip6_multicast_leave_group(struct net_bridge *br, | ||
1318 | struct net_bridge_port *port, | ||
1319 | const struct in6_addr *group) | ||
1320 | { | ||
1321 | struct br_ip br_group; | ||
1322 | |||
1323 | if (ipv6_is_local_multicast(group)) | ||
1324 | return; | ||
1325 | |||
1326 | ipv6_addr_copy(&br_group.u.ip6, group); | ||
1327 | br_group.proto = htons(ETH_P_IPV6); | ||
1328 | |||
1329 | br_multicast_leave_group(br, port, &br_group); | ||
1330 | } | ||
1331 | #endif | ||
1332 | |||
949 | static int br_multicast_ipv4_rcv(struct net_bridge *br, | 1333 | static int br_multicast_ipv4_rcv(struct net_bridge *br, |
950 | struct net_bridge_port *port, | 1334 | struct net_bridge_port *port, |
951 | struct sk_buff *skb) | 1335 | struct sk_buff *skb) |
@@ -1000,8 +1384,6 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br, | |||
1000 | if (!pskb_may_pull(skb2, sizeof(*ih))) | 1384 | if (!pskb_may_pull(skb2, sizeof(*ih))) |
1001 | goto out; | 1385 | goto out; |
1002 | 1386 | ||
1003 | iph = ip_hdr(skb2); | ||
1004 | |||
1005 | switch (skb2->ip_summed) { | 1387 | switch (skb2->ip_summed) { |
1006 | case CHECKSUM_COMPLETE: | 1388 | case CHECKSUM_COMPLETE: |
1007 | if (!csum_fold(skb2->csum)) | 1389 | if (!csum_fold(skb2->csum)) |
@@ -1022,16 +1404,16 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br, | |||
1022 | case IGMP_HOST_MEMBERSHIP_REPORT: | 1404 | case IGMP_HOST_MEMBERSHIP_REPORT: |
1023 | case IGMPV2_HOST_MEMBERSHIP_REPORT: | 1405 | case IGMPV2_HOST_MEMBERSHIP_REPORT: |
1024 | BR_INPUT_SKB_CB(skb2)->mrouters_only = 1; | 1406 | BR_INPUT_SKB_CB(skb2)->mrouters_only = 1; |
1025 | err = br_multicast_add_group(br, port, ih->group); | 1407 | err = br_ip4_multicast_add_group(br, port, ih->group); |
1026 | break; | 1408 | break; |
1027 | case IGMPV3_HOST_MEMBERSHIP_REPORT: | 1409 | case IGMPV3_HOST_MEMBERSHIP_REPORT: |
1028 | err = br_multicast_igmp3_report(br, port, skb2); | 1410 | err = br_ip4_multicast_igmp3_report(br, port, skb2); |
1029 | break; | 1411 | break; |
1030 | case IGMP_HOST_MEMBERSHIP_QUERY: | 1412 | case IGMP_HOST_MEMBERSHIP_QUERY: |
1031 | err = br_multicast_query(br, port, skb2); | 1413 | err = br_ip4_multicast_query(br, port, skb2); |
1032 | break; | 1414 | break; |
1033 | case IGMP_HOST_LEAVE_MESSAGE: | 1415 | case IGMP_HOST_LEAVE_MESSAGE: |
1034 | br_multicast_leave_group(br, port, ih->group); | 1416 | br_ip4_multicast_leave_group(br, port, ih->group); |
1035 | break; | 1417 | break; |
1036 | } | 1418 | } |
1037 | 1419 | ||
@@ -1043,6 +1425,123 @@ err_out: | |||
1043 | return err; | 1425 | return err; |
1044 | } | 1426 | } |
1045 | 1427 | ||
1428 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
1429 | static int br_multicast_ipv6_rcv(struct net_bridge *br, | ||
1430 | struct net_bridge_port *port, | ||
1431 | struct sk_buff *skb) | ||
1432 | { | ||
1433 | struct sk_buff *skb2 = skb; | ||
1434 | struct ipv6hdr *ip6h; | ||
1435 | struct icmp6hdr *icmp6h; | ||
1436 | u8 nexthdr; | ||
1437 | unsigned len; | ||
1438 | unsigned offset; | ||
1439 | int err; | ||
1440 | |||
1441 | if (!pskb_may_pull(skb, sizeof(*ip6h))) | ||
1442 | return -EINVAL; | ||
1443 | |||
1444 | ip6h = ipv6_hdr(skb); | ||
1445 | |||
1446 | /* | ||
1447 | * We're interested in MLD messages only. | ||
1448 | * - Version is 6 | ||
1449 | * - MLD has always Router Alert hop-by-hop option | ||
1450 | * - But we do not support jumbrograms. | ||
1451 | */ | ||
1452 | if (ip6h->version != 6 || | ||
1453 | ip6h->nexthdr != IPPROTO_HOPOPTS || | ||
1454 | ip6h->payload_len == 0) | ||
1455 | return 0; | ||
1456 | |||
1457 | len = ntohs(ip6h->payload_len); | ||
1458 | if (skb->len < len) | ||
1459 | return -EINVAL; | ||
1460 | |||
1461 | nexthdr = ip6h->nexthdr; | ||
1462 | offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr); | ||
1463 | |||
1464 | if (offset < 0 || nexthdr != IPPROTO_ICMPV6) | ||
1465 | return 0; | ||
1466 | |||
1467 | /* Okay, we found ICMPv6 header */ | ||
1468 | skb2 = skb_clone(skb, GFP_ATOMIC); | ||
1469 | if (!skb2) | ||
1470 | return -ENOMEM; | ||
1471 | |||
1472 | len -= offset - skb_network_offset(skb2); | ||
1473 | |||
1474 | __skb_pull(skb2, offset); | ||
1475 | skb_reset_transport_header(skb2); | ||
1476 | |||
1477 | err = -EINVAL; | ||
1478 | if (!pskb_may_pull(skb2, sizeof(*icmp6h))) | ||
1479 | goto out; | ||
1480 | |||
1481 | icmp6h = icmp6_hdr(skb2); | ||
1482 | |||
1483 | switch (icmp6h->icmp6_type) { | ||
1484 | case ICMPV6_MGM_QUERY: | ||
1485 | case ICMPV6_MGM_REPORT: | ||
1486 | case ICMPV6_MGM_REDUCTION: | ||
1487 | case ICMPV6_MLD2_REPORT: | ||
1488 | break; | ||
1489 | default: | ||
1490 | err = 0; | ||
1491 | goto out; | ||
1492 | } | ||
1493 | |||
1494 | /* Okay, we found MLD message. Check further. */ | ||
1495 | if (skb2->len > len) { | ||
1496 | err = pskb_trim_rcsum(skb2, len); | ||
1497 | if (err) | ||
1498 | goto out; | ||
1499 | } | ||
1500 | |||
1501 | switch (skb2->ip_summed) { | ||
1502 | case CHECKSUM_COMPLETE: | ||
1503 | if (!csum_fold(skb2->csum)) | ||
1504 | break; | ||
1505 | /*FALLTHROUGH*/ | ||
1506 | case CHECKSUM_NONE: | ||
1507 | skb2->csum = 0; | ||
1508 | if (skb_checksum_complete(skb2)) | ||
1509 | goto out; | ||
1510 | } | ||
1511 | |||
1512 | err = 0; | ||
1513 | |||
1514 | BR_INPUT_SKB_CB(skb)->igmp = 1; | ||
1515 | |||
1516 | switch (icmp6h->icmp6_type) { | ||
1517 | case ICMPV6_MGM_REPORT: | ||
1518 | { | ||
1519 | struct mld_msg *mld = (struct mld_msg *)icmp6h; | ||
1520 | BR_INPUT_SKB_CB(skb2)->mrouters_only = 1; | ||
1521 | err = br_ip6_multicast_add_group(br, port, &mld->mld_mca); | ||
1522 | break; | ||
1523 | } | ||
1524 | case ICMPV6_MLD2_REPORT: | ||
1525 | err = br_ip6_multicast_mld2_report(br, port, skb2); | ||
1526 | break; | ||
1527 | case ICMPV6_MGM_QUERY: | ||
1528 | err = br_ip6_multicast_query(br, port, skb2); | ||
1529 | break; | ||
1530 | case ICMPV6_MGM_REDUCTION: | ||
1531 | { | ||
1532 | struct mld_msg *mld = (struct mld_msg *)icmp6h; | ||
1533 | br_ip6_multicast_leave_group(br, port, &mld->mld_mca); | ||
1534 | } | ||
1535 | } | ||
1536 | |||
1537 | out: | ||
1538 | __skb_push(skb2, offset); | ||
1539 | if (skb2 != skb) | ||
1540 | kfree_skb(skb2); | ||
1541 | return err; | ||
1542 | } | ||
1543 | #endif | ||
1544 | |||
1046 | int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, | 1545 | int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, |
1047 | struct sk_buff *skb) | 1546 | struct sk_buff *skb) |
1048 | { | 1547 | { |
@@ -1055,6 +1554,10 @@ int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, | |||
1055 | switch (skb->protocol) { | 1554 | switch (skb->protocol) { |
1056 | case htons(ETH_P_IP): | 1555 | case htons(ETH_P_IP): |
1057 | return br_multicast_ipv4_rcv(br, port, skb); | 1556 | return br_multicast_ipv4_rcv(br, port, skb); |
1557 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
1558 | case htons(ETH_P_IPV6): | ||
1559 | return br_multicast_ipv6_rcv(br, port, skb); | ||
1560 | #endif | ||
1058 | } | 1561 | } |
1059 | 1562 | ||
1060 | return 0; | 1563 | return 0; |
diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c index 763a3ec292e5..1413b72acc7f 100644 --- a/net/bridge/br_notify.c +++ b/net/bridge/br_notify.c | |||
@@ -82,6 +82,10 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v | |||
82 | case NETDEV_UNREGISTER: | 82 | case NETDEV_UNREGISTER: |
83 | br_del_if(br, dev); | 83 | br_del_if(br, dev); |
84 | break; | 84 | break; |
85 | |||
86 | case NETDEV_PRE_TYPE_CHANGE: | ||
87 | /* Forbid underlaying device to change its type. */ | ||
88 | return NOTIFY_BAD; | ||
85 | } | 89 | } |
86 | 90 | ||
87 | /* Events that may cause spanning tree to refresh */ | 91 | /* Events that may cause spanning tree to refresh */ |
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 846d7d1e2075..018499ebe19d 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
@@ -45,6 +45,17 @@ struct mac_addr | |||
45 | unsigned char addr[6]; | 45 | unsigned char addr[6]; |
46 | }; | 46 | }; |
47 | 47 | ||
48 | struct br_ip | ||
49 | { | ||
50 | union { | ||
51 | __be32 ip4; | ||
52 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
53 | struct in6_addr ip6; | ||
54 | #endif | ||
55 | } u; | ||
56 | __be16 proto; | ||
57 | }; | ||
58 | |||
48 | struct net_bridge_fdb_entry | 59 | struct net_bridge_fdb_entry |
49 | { | 60 | { |
50 | struct hlist_node hlist; | 61 | struct hlist_node hlist; |
@@ -64,7 +75,7 @@ struct net_bridge_port_group { | |||
64 | struct rcu_head rcu; | 75 | struct rcu_head rcu; |
65 | struct timer_list timer; | 76 | struct timer_list timer; |
66 | struct timer_list query_timer; | 77 | struct timer_list query_timer; |
67 | __be32 addr; | 78 | struct br_ip addr; |
68 | u32 queries_sent; | 79 | u32 queries_sent; |
69 | }; | 80 | }; |
70 | 81 | ||
@@ -77,7 +88,7 @@ struct net_bridge_mdb_entry | |||
77 | struct rcu_head rcu; | 88 | struct rcu_head rcu; |
78 | struct timer_list timer; | 89 | struct timer_list timer; |
79 | struct timer_list query_timer; | 90 | struct timer_list query_timer; |
80 | __be32 addr; | 91 | struct br_ip addr; |
81 | u32 queries_sent; | 92 | u32 queries_sent; |
82 | }; | 93 | }; |
83 | 94 | ||
@@ -130,11 +141,20 @@ struct net_bridge_port | |||
130 | #endif | 141 | #endif |
131 | }; | 142 | }; |
132 | 143 | ||
144 | struct br_cpu_netstats { | ||
145 | unsigned long rx_packets; | ||
146 | unsigned long rx_bytes; | ||
147 | unsigned long tx_packets; | ||
148 | unsigned long tx_bytes; | ||
149 | }; | ||
150 | |||
133 | struct net_bridge | 151 | struct net_bridge |
134 | { | 152 | { |
135 | spinlock_t lock; | 153 | spinlock_t lock; |
136 | struct list_head port_list; | 154 | struct list_head port_list; |
137 | struct net_device *dev; | 155 | struct net_device *dev; |
156 | |||
157 | struct br_cpu_netstats __percpu *stats; | ||
138 | spinlock_t hash_lock; | 158 | spinlock_t hash_lock; |
139 | struct hlist_head hash[BR_HASH_SIZE]; | 159 | struct hlist_head hash[BR_HASH_SIZE]; |
140 | unsigned long feature_mask; | 160 | unsigned long feature_mask; |
diff --git a/net/caif/Kconfig b/net/caif/Kconfig new file mode 100644 index 000000000000..cd1daf6008bd --- /dev/null +++ b/net/caif/Kconfig | |||
@@ -0,0 +1,48 @@ | |||
1 | # | ||
2 | # CAIF net configurations | ||
3 | # | ||
4 | |||
5 | #menu "CAIF Support" | ||
6 | comment "CAIF Support" | ||
7 | menuconfig CAIF | ||
8 | tristate "Enable CAIF support" | ||
9 | select CRC_CCITT | ||
10 | default n | ||
11 | ---help--- | ||
12 | The "Communication CPU to Application CPU Interface" (CAIF) is a packet | ||
13 | based connection-oriented MUX protocol developed by ST-Ericsson for use | ||
14 | with its modems. It is accessed from user space as sockets (PF_CAIF). | ||
15 | |||
16 | Say Y (or M) here if you build for a phone product (e.g. Android or | ||
17 | MeeGo ) that uses CAIF as transport, if unsure say N. | ||
18 | |||
19 | If you select to build it as module then CAIF_NETDEV also needs to be | ||
20 | built as modules. You will also need to say yes to any CAIF physical | ||
21 | devices that your platform requires. | ||
22 | |||
23 | See Documentation/networking/caif for a further explanation on how to | ||
24 | use and configure CAIF. | ||
25 | |||
26 | if CAIF | ||
27 | |||
28 | config CAIF_DEBUG | ||
29 | bool "Enable Debug" | ||
30 | default n | ||
31 | --- help --- | ||
32 | Enable the inclusion of debug code in the CAIF stack. | ||
33 | Be aware that doing this will impact performance. | ||
34 | If unsure say N. | ||
35 | |||
36 | |||
37 | config CAIF_NETDEV | ||
38 | tristate "CAIF GPRS Network device" | ||
39 | default CAIF | ||
40 | ---help--- | ||
41 | Say Y if you will be using a CAIF based GPRS network device. | ||
42 | This can be either built-in or a loadable module, | ||
43 | If you select to build it as a built-in then the main CAIF device must | ||
44 | also be a built-in. | ||
45 | If unsure say Y. | ||
46 | |||
47 | endif | ||
48 | #endmenu | ||
diff --git a/net/caif/Makefile b/net/caif/Makefile new file mode 100644 index 000000000000..34852af2595e --- /dev/null +++ b/net/caif/Makefile | |||
@@ -0,0 +1,26 @@ | |||
1 | ifeq ($(CONFIG_CAIF_DEBUG),1) | ||
2 | CAIF_DBG_FLAGS := -DDEBUG | ||
3 | endif | ||
4 | |||
5 | ccflags-y := $(CAIF_FLAGS) $(CAIF_DBG_FLAGS) | ||
6 | |||
7 | caif-objs := caif_dev.o \ | ||
8 | cfcnfg.o cfmuxl.o cfctrl.o \ | ||
9 | cffrml.o cfveil.o cfdbgl.o\ | ||
10 | cfserl.o cfdgml.o \ | ||
11 | cfrfml.o cfvidl.o cfutill.o \ | ||
12 | cfsrvl.o cfpkt_skbuff.o caif_config_util.o | ||
13 | clean-dirs:= .tmp_versions | ||
14 | |||
15 | clean-files:= \ | ||
16 | Module.symvers \ | ||
17 | modules.order \ | ||
18 | *.cmd \ | ||
19 | *.o \ | ||
20 | *~ | ||
21 | |||
22 | obj-$(CONFIG_CAIF) += caif.o | ||
23 | obj-$(CONFIG_CAIF_NETDEV) += chnl_net.o | ||
24 | obj-$(CONFIG_CAIF) += caif_socket.o | ||
25 | |||
26 | export-objs := caif.o | ||
diff --git a/net/caif/caif_config_util.c b/net/caif/caif_config_util.c new file mode 100644 index 000000000000..6f36580366f0 --- /dev/null +++ b/net/caif/caif_config_util.c | |||
@@ -0,0 +1,87 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson AB 2010 | ||
3 | * Author: Sjur Brendeland sjur.brandeland@stericsson.com | ||
4 | * License terms: GNU General Public License (GPL) version 2 | ||
5 | */ | ||
6 | |||
7 | #include <linux/module.h> | ||
8 | #include <linux/spinlock.h> | ||
9 | #include <net/caif/cfctrl.h> | ||
10 | #include <net/caif/cfcnfg.h> | ||
11 | #include <net/caif/caif_dev.h> | ||
12 | |||
13 | int connect_req_to_link_param(struct cfcnfg *cnfg, | ||
14 | struct caif_connect_request *s, | ||
15 | struct cfctrl_link_param *l) | ||
16 | { | ||
17 | struct dev_info *dev_info; | ||
18 | enum cfcnfg_phy_preference pref; | ||
19 | memset(l, 0, sizeof(*l)); | ||
20 | l->priority = s->priority; | ||
21 | |||
22 | if (s->link_name[0] != '\0') | ||
23 | l->phyid = cfcnfg_get_named(cnfg, s->link_name); | ||
24 | else { | ||
25 | switch (s->link_selector) { | ||
26 | case CAIF_LINK_HIGH_BANDW: | ||
27 | pref = CFPHYPREF_HIGH_BW; | ||
28 | break; | ||
29 | case CAIF_LINK_LOW_LATENCY: | ||
30 | pref = CFPHYPREF_LOW_LAT; | ||
31 | break; | ||
32 | default: | ||
33 | return -EINVAL; | ||
34 | } | ||
35 | dev_info = cfcnfg_get_phyid(cnfg, pref); | ||
36 | if (dev_info == NULL) | ||
37 | return -ENODEV; | ||
38 | l->phyid = dev_info->id; | ||
39 | } | ||
40 | switch (s->protocol) { | ||
41 | case CAIFPROTO_AT: | ||
42 | l->linktype = CFCTRL_SRV_VEI; | ||
43 | if (s->sockaddr.u.at.type == CAIF_ATTYPE_PLAIN) | ||
44 | l->chtype = 0x02; | ||
45 | else | ||
46 | l->chtype = s->sockaddr.u.at.type; | ||
47 | l->endpoint = 0x00; | ||
48 | break; | ||
49 | case CAIFPROTO_DATAGRAM: | ||
50 | l->linktype = CFCTRL_SRV_DATAGRAM; | ||
51 | l->chtype = 0x00; | ||
52 | l->u.datagram.connid = s->sockaddr.u.dgm.connection_id; | ||
53 | break; | ||
54 | case CAIFPROTO_DATAGRAM_LOOP: | ||
55 | l->linktype = CFCTRL_SRV_DATAGRAM; | ||
56 | l->chtype = 0x03; | ||
57 | l->endpoint = 0x00; | ||
58 | l->u.datagram.connid = s->sockaddr.u.dgm.connection_id; | ||
59 | break; | ||
60 | case CAIFPROTO_RFM: | ||
61 | l->linktype = CFCTRL_SRV_RFM; | ||
62 | l->u.datagram.connid = s->sockaddr.u.rfm.connection_id; | ||
63 | strncpy(l->u.rfm.volume, s->sockaddr.u.rfm.volume, | ||
64 | sizeof(l->u.rfm.volume)-1); | ||
65 | l->u.rfm.volume[sizeof(l->u.rfm.volume)-1] = 0; | ||
66 | break; | ||
67 | case CAIFPROTO_UTIL: | ||
68 | l->linktype = CFCTRL_SRV_UTIL; | ||
69 | l->endpoint = 0x00; | ||
70 | l->chtype = 0x00; | ||
71 | strncpy(l->u.utility.name, s->sockaddr.u.util.service, | ||
72 | sizeof(l->u.utility.name)-1); | ||
73 | l->u.utility.name[sizeof(l->u.utility.name)-1] = 0; | ||
74 | caif_assert(sizeof(l->u.utility.name) > 10); | ||
75 | l->u.utility.paramlen = s->param.size; | ||
76 | if (l->u.utility.paramlen > sizeof(l->u.utility.params)) | ||
77 | l->u.utility.paramlen = sizeof(l->u.utility.params); | ||
78 | |||
79 | memcpy(l->u.utility.params, s->param.data, | ||
80 | l->u.utility.paramlen); | ||
81 | |||
82 | break; | ||
83 | default: | ||
84 | return -EINVAL; | ||
85 | } | ||
86 | return 0; | ||
87 | } | ||
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c new file mode 100644 index 000000000000..e84837e1bc86 --- /dev/null +++ b/net/caif/caif_dev.c | |||
@@ -0,0 +1,413 @@ | |||
1 | /* | ||
2 | * CAIF Interface registration. | ||
3 | * Copyright (C) ST-Ericsson AB 2010 | ||
4 | * Author: Sjur Brendeland/sjur.brandeland@stericsson.com | ||
5 | * License terms: GNU General Public License (GPL) version 2 | ||
6 | * | ||
7 | * Borrowed heavily from file: pn_dev.c. Thanks to | ||
8 | * Remi Denis-Courmont <remi.denis-courmont@nokia.com> | ||
9 | * and Sakari Ailus <sakari.ailus@nokia.com> | ||
10 | */ | ||
11 | |||
12 | #include <linux/version.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/if_arp.h> | ||
16 | #include <linux/net.h> | ||
17 | #include <linux/netdevice.h> | ||
18 | #include <linux/skbuff.h> | ||
19 | #include <linux/sched.h> | ||
20 | #include <linux/wait.h> | ||
21 | #include <net/netns/generic.h> | ||
22 | #include <net/net_namespace.h> | ||
23 | #include <net/pkt_sched.h> | ||
24 | #include <net/caif/caif_device.h> | ||
25 | #include <net/caif/caif_dev.h> | ||
26 | #include <net/caif/caif_layer.h> | ||
27 | #include <net/caif/cfpkt.h> | ||
28 | #include <net/caif/cfcnfg.h> | ||
29 | |||
30 | MODULE_LICENSE("GPL"); | ||
31 | #define TIMEOUT (HZ*5) | ||
32 | |||
33 | /* Used for local tracking of the CAIF net devices */ | ||
34 | struct caif_device_entry { | ||
35 | struct cflayer layer; | ||
36 | struct list_head list; | ||
37 | atomic_t in_use; | ||
38 | atomic_t state; | ||
39 | u16 phyid; | ||
40 | struct net_device *netdev; | ||
41 | wait_queue_head_t event; | ||
42 | }; | ||
43 | |||
44 | struct caif_device_entry_list { | ||
45 | struct list_head list; | ||
46 | /* Protects simulanous deletes in list */ | ||
47 | spinlock_t lock; | ||
48 | }; | ||
49 | |||
50 | struct caif_net { | ||
51 | struct caif_device_entry_list caifdevs; | ||
52 | }; | ||
53 | |||
54 | static int caif_net_id; | ||
55 | static struct cfcnfg *cfg; | ||
56 | |||
57 | static struct caif_device_entry_list *caif_device_list(struct net *net) | ||
58 | { | ||
59 | struct caif_net *caifn; | ||
60 | BUG_ON(!net); | ||
61 | caifn = net_generic(net, caif_net_id); | ||
62 | BUG_ON(!caifn); | ||
63 | return &caifn->caifdevs; | ||
64 | } | ||
65 | |||
66 | /* Allocate new CAIF device. */ | ||
67 | static struct caif_device_entry *caif_device_alloc(struct net_device *dev) | ||
68 | { | ||
69 | struct caif_device_entry_list *caifdevs; | ||
70 | struct caif_device_entry *caifd; | ||
71 | caifdevs = caif_device_list(dev_net(dev)); | ||
72 | BUG_ON(!caifdevs); | ||
73 | caifd = kzalloc(sizeof(*caifd), GFP_ATOMIC); | ||
74 | if (!caifd) | ||
75 | return NULL; | ||
76 | caifd->netdev = dev; | ||
77 | list_add(&caifd->list, &caifdevs->list); | ||
78 | init_waitqueue_head(&caifd->event); | ||
79 | return caifd; | ||
80 | } | ||
81 | |||
82 | static struct caif_device_entry *caif_get(struct net_device *dev) | ||
83 | { | ||
84 | struct caif_device_entry_list *caifdevs = | ||
85 | caif_device_list(dev_net(dev)); | ||
86 | struct caif_device_entry *caifd; | ||
87 | BUG_ON(!caifdevs); | ||
88 | list_for_each_entry(caifd, &caifdevs->list, list) { | ||
89 | if (caifd->netdev == dev) | ||
90 | return caifd; | ||
91 | } | ||
92 | return NULL; | ||
93 | } | ||
94 | |||
95 | static void caif_device_destroy(struct net_device *dev) | ||
96 | { | ||
97 | struct caif_device_entry_list *caifdevs = | ||
98 | caif_device_list(dev_net(dev)); | ||
99 | struct caif_device_entry *caifd; | ||
100 | ASSERT_RTNL(); | ||
101 | if (dev->type != ARPHRD_CAIF) | ||
102 | return; | ||
103 | |||
104 | spin_lock_bh(&caifdevs->lock); | ||
105 | caifd = caif_get(dev); | ||
106 | if (caifd == NULL) { | ||
107 | spin_unlock_bh(&caifdevs->lock); | ||
108 | return; | ||
109 | } | ||
110 | |||
111 | list_del(&caifd->list); | ||
112 | spin_unlock_bh(&caifdevs->lock); | ||
113 | |||
114 | kfree(caifd); | ||
115 | return; | ||
116 | } | ||
117 | |||
118 | static int transmit(struct cflayer *layer, struct cfpkt *pkt) | ||
119 | { | ||
120 | struct caif_device_entry *caifd = | ||
121 | container_of(layer, struct caif_device_entry, layer); | ||
122 | struct sk_buff *skb, *skb2; | ||
123 | int ret = -EINVAL; | ||
124 | skb = cfpkt_tonative(pkt); | ||
125 | skb->dev = caifd->netdev; | ||
126 | /* | ||
127 | * Don't allow SKB to be destroyed upon error, but signal resend | ||
128 | * notification to clients. We can't rely on the return value as | ||
129 | * congestion (NET_XMIT_CN) sometimes drops the packet, sometimes don't. | ||
130 | */ | ||
131 | if (netif_queue_stopped(caifd->netdev)) | ||
132 | return -EAGAIN; | ||
133 | skb2 = skb_get(skb); | ||
134 | |||
135 | ret = dev_queue_xmit(skb2); | ||
136 | |||
137 | if (!ret) | ||
138 | kfree_skb(skb); | ||
139 | else | ||
140 | return -EAGAIN; | ||
141 | |||
142 | return 0; | ||
143 | } | ||
144 | |||
145 | static int modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl) | ||
146 | { | ||
147 | struct caif_device_entry *caifd; | ||
148 | struct caif_dev_common *caifdev; | ||
149 | caifd = container_of(layr, struct caif_device_entry, layer); | ||
150 | caifdev = netdev_priv(caifd->netdev); | ||
151 | if (ctrl == _CAIF_MODEMCMD_PHYIF_USEFULL) { | ||
152 | atomic_set(&caifd->in_use, 1); | ||
153 | wake_up_interruptible(&caifd->event); | ||
154 | |||
155 | } else if (ctrl == _CAIF_MODEMCMD_PHYIF_USELESS) { | ||
156 | atomic_set(&caifd->in_use, 0); | ||
157 | wake_up_interruptible(&caifd->event); | ||
158 | } | ||
159 | return 0; | ||
160 | } | ||
161 | |||
162 | /* | ||
163 | * Stuff received packets to associated sockets. | ||
164 | * On error, returns non-zero and releases the skb. | ||
165 | */ | ||
166 | static int receive(struct sk_buff *skb, struct net_device *dev, | ||
167 | struct packet_type *pkttype, struct net_device *orig_dev) | ||
168 | { | ||
169 | struct net *net; | ||
170 | struct cfpkt *pkt; | ||
171 | struct caif_device_entry *caifd; | ||
172 | net = dev_net(dev); | ||
173 | pkt = cfpkt_fromnative(CAIF_DIR_IN, skb); | ||
174 | caifd = caif_get(dev); | ||
175 | if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) | ||
176 | return NET_RX_DROP; | ||
177 | |||
178 | if (caifd->layer.up->receive(caifd->layer.up, pkt)) | ||
179 | return NET_RX_DROP; | ||
180 | |||
181 | return 0; | ||
182 | } | ||
183 | |||
184 | static struct packet_type caif_packet_type __read_mostly = { | ||
185 | .type = cpu_to_be16(ETH_P_CAIF), | ||
186 | .func = receive, | ||
187 | }; | ||
188 | |||
189 | static void dev_flowctrl(struct net_device *dev, int on) | ||
190 | { | ||
191 | struct caif_device_entry *caifd = caif_get(dev); | ||
192 | if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) | ||
193 | return; | ||
194 | |||
195 | caifd->layer.up->ctrlcmd(caifd->layer.up, | ||
196 | on ? | ||
197 | _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND : | ||
198 | _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND, | ||
199 | caifd->layer.id); | ||
200 | } | ||
201 | |||
202 | /* notify Caif of device events */ | ||
203 | static int caif_device_notify(struct notifier_block *me, unsigned long what, | ||
204 | void *arg) | ||
205 | { | ||
206 | struct net_device *dev = arg; | ||
207 | struct caif_device_entry *caifd = NULL; | ||
208 | struct caif_dev_common *caifdev; | ||
209 | enum cfcnfg_phy_preference pref; | ||
210 | int res = -EINVAL; | ||
211 | enum cfcnfg_phy_type phy_type; | ||
212 | |||
213 | if (dev->type != ARPHRD_CAIF) | ||
214 | return 0; | ||
215 | |||
216 | switch (what) { | ||
217 | case NETDEV_REGISTER: | ||
218 | pr_info("CAIF: %s():register %s\n", __func__, dev->name); | ||
219 | caifd = caif_device_alloc(dev); | ||
220 | if (caifd == NULL) | ||
221 | break; | ||
222 | caifdev = netdev_priv(dev); | ||
223 | caifdev->flowctrl = dev_flowctrl; | ||
224 | atomic_set(&caifd->state, what); | ||
225 | res = 0; | ||
226 | break; | ||
227 | |||
228 | case NETDEV_UP: | ||
229 | pr_info("CAIF: %s(): up %s\n", __func__, dev->name); | ||
230 | caifd = caif_get(dev); | ||
231 | if (caifd == NULL) | ||
232 | break; | ||
233 | caifdev = netdev_priv(dev); | ||
234 | if (atomic_read(&caifd->state) == NETDEV_UP) { | ||
235 | pr_info("CAIF: %s():%s already up\n", | ||
236 | __func__, dev->name); | ||
237 | break; | ||
238 | } | ||
239 | atomic_set(&caifd->state, what); | ||
240 | caifd->layer.transmit = transmit; | ||
241 | caifd->layer.modemcmd = modemcmd; | ||
242 | |||
243 | if (caifdev->use_frag) | ||
244 | phy_type = CFPHYTYPE_FRAG; | ||
245 | else | ||
246 | phy_type = CFPHYTYPE_CAIF; | ||
247 | |||
248 | switch (caifdev->link_select) { | ||
249 | case CAIF_LINK_HIGH_BANDW: | ||
250 | pref = CFPHYPREF_LOW_LAT; | ||
251 | break; | ||
252 | case CAIF_LINK_LOW_LATENCY: | ||
253 | pref = CFPHYPREF_HIGH_BW; | ||
254 | break; | ||
255 | default: | ||
256 | pref = CFPHYPREF_HIGH_BW; | ||
257 | break; | ||
258 | } | ||
259 | |||
260 | cfcnfg_add_phy_layer(get_caif_conf(), | ||
261 | phy_type, | ||
262 | dev, | ||
263 | &caifd->layer, | ||
264 | &caifd->phyid, | ||
265 | pref, | ||
266 | caifdev->use_fcs, | ||
267 | caifdev->use_stx); | ||
268 | strncpy(caifd->layer.name, dev->name, | ||
269 | sizeof(caifd->layer.name) - 1); | ||
270 | caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0; | ||
271 | break; | ||
272 | |||
273 | case NETDEV_GOING_DOWN: | ||
274 | caifd = caif_get(dev); | ||
275 | if (caifd == NULL) | ||
276 | break; | ||
277 | pr_info("CAIF: %s():going down %s\n", __func__, dev->name); | ||
278 | |||
279 | if (atomic_read(&caifd->state) == NETDEV_GOING_DOWN || | ||
280 | atomic_read(&caifd->state) == NETDEV_DOWN) | ||
281 | break; | ||
282 | |||
283 | atomic_set(&caifd->state, what); | ||
284 | if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) | ||
285 | return -EINVAL; | ||
286 | caifd->layer.up->ctrlcmd(caifd->layer.up, | ||
287 | _CAIF_CTRLCMD_PHYIF_DOWN_IND, | ||
288 | caifd->layer.id); | ||
289 | res = wait_event_interruptible_timeout(caifd->event, | ||
290 | atomic_read(&caifd->in_use) == 0, | ||
291 | TIMEOUT); | ||
292 | break; | ||
293 | |||
294 | case NETDEV_DOWN: | ||
295 | caifd = caif_get(dev); | ||
296 | if (caifd == NULL) | ||
297 | break; | ||
298 | pr_info("CAIF: %s(): down %s\n", __func__, dev->name); | ||
299 | if (atomic_read(&caifd->in_use)) | ||
300 | pr_warning("CAIF: %s(): " | ||
301 | "Unregistering an active CAIF device: %s\n", | ||
302 | __func__, dev->name); | ||
303 | cfcnfg_del_phy_layer(get_caif_conf(), &caifd->layer); | ||
304 | atomic_set(&caifd->state, what); | ||
305 | break; | ||
306 | |||
307 | case NETDEV_UNREGISTER: | ||
308 | caifd = caif_get(dev); | ||
309 | pr_info("CAIF: %s(): unregister %s\n", __func__, dev->name); | ||
310 | atomic_set(&caifd->state, what); | ||
311 | caif_device_destroy(dev); | ||
312 | break; | ||
313 | } | ||
314 | return 0; | ||
315 | } | ||
316 | |||
317 | static struct notifier_block caif_device_notifier = { | ||
318 | .notifier_call = caif_device_notify, | ||
319 | .priority = 0, | ||
320 | }; | ||
321 | |||
322 | |||
323 | struct cfcnfg *get_caif_conf(void) | ||
324 | { | ||
325 | return cfg; | ||
326 | } | ||
327 | EXPORT_SYMBOL(get_caif_conf); | ||
328 | |||
329 | int caif_connect_client(struct caif_connect_request *conn_req, | ||
330 | struct cflayer *client_layer) | ||
331 | { | ||
332 | struct cfctrl_link_param param; | ||
333 | if (connect_req_to_link_param(get_caif_conf(), conn_req, ¶m) == 0) | ||
334 | /* Hook up the adaptation layer. */ | ||
335 | return cfcnfg_add_adaptation_layer(get_caif_conf(), | ||
336 | ¶m, client_layer); | ||
337 | |||
338 | return -EINVAL; | ||
339 | |||
340 | caif_assert(0); | ||
341 | } | ||
342 | EXPORT_SYMBOL(caif_connect_client); | ||
343 | |||
344 | int caif_disconnect_client(struct cflayer *adap_layer) | ||
345 | { | ||
346 | return cfcnfg_del_adapt_layer(get_caif_conf(), adap_layer); | ||
347 | } | ||
348 | EXPORT_SYMBOL(caif_disconnect_client); | ||
349 | |||
350 | /* Per-namespace Caif devices handling */ | ||
351 | static int caif_init_net(struct net *net) | ||
352 | { | ||
353 | struct caif_net *caifn = net_generic(net, caif_net_id); | ||
354 | INIT_LIST_HEAD(&caifn->caifdevs.list); | ||
355 | spin_lock_init(&caifn->caifdevs.lock); | ||
356 | return 0; | ||
357 | } | ||
358 | |||
359 | static void caif_exit_net(struct net *net) | ||
360 | { | ||
361 | struct net_device *dev; | ||
362 | int res; | ||
363 | rtnl_lock(); | ||
364 | for_each_netdev(net, dev) { | ||
365 | if (dev->type != ARPHRD_CAIF) | ||
366 | continue; | ||
367 | res = dev_close(dev); | ||
368 | caif_device_destroy(dev); | ||
369 | } | ||
370 | rtnl_unlock(); | ||
371 | } | ||
372 | |||
373 | static struct pernet_operations caif_net_ops = { | ||
374 | .init = caif_init_net, | ||
375 | .exit = caif_exit_net, | ||
376 | .id = &caif_net_id, | ||
377 | .size = sizeof(struct caif_net), | ||
378 | }; | ||
379 | |||
380 | /* Initialize Caif devices list */ | ||
381 | static int __init caif_device_init(void) | ||
382 | { | ||
383 | int result; | ||
384 | cfg = cfcnfg_create(); | ||
385 | if (!cfg) { | ||
386 | pr_warning("CAIF: %s(): can't create cfcnfg.\n", __func__); | ||
387 | goto err_cfcnfg_create_failed; | ||
388 | } | ||
389 | result = register_pernet_device(&caif_net_ops); | ||
390 | |||
391 | if (result) { | ||
392 | kfree(cfg); | ||
393 | cfg = NULL; | ||
394 | return result; | ||
395 | } | ||
396 | dev_add_pack(&caif_packet_type); | ||
397 | register_netdevice_notifier(&caif_device_notifier); | ||
398 | |||
399 | return result; | ||
400 | err_cfcnfg_create_failed: | ||
401 | return -ENODEV; | ||
402 | } | ||
403 | |||
404 | static void __exit caif_device_exit(void) | ||
405 | { | ||
406 | dev_remove_pack(&caif_packet_type); | ||
407 | unregister_pernet_device(&caif_net_ops); | ||
408 | unregister_netdevice_notifier(&caif_device_notifier); | ||
409 | cfcnfg_remove(cfg); | ||
410 | } | ||
411 | |||
412 | module_init(caif_device_init); | ||
413 | module_exit(caif_device_exit); | ||
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c new file mode 100644 index 000000000000..d455375789fb --- /dev/null +++ b/net/caif/caif_socket.c | |||
@@ -0,0 +1,1391 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson AB 2010 | ||
3 | * Author: Sjur Brendeland sjur.brandeland@stericsson.com | ||
4 | * Per Sigmond per.sigmond@stericsson.com | ||
5 | * License terms: GNU General Public License (GPL) version 2 | ||
6 | */ | ||
7 | |||
8 | #include <linux/fs.h> | ||
9 | #include <linux/init.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/sched.h> | ||
12 | #include <linux/spinlock.h> | ||
13 | #include <linux/mutex.h> | ||
14 | #include <linux/list.h> | ||
15 | #include <linux/wait.h> | ||
16 | #include <linux/poll.h> | ||
17 | #include <linux/tcp.h> | ||
18 | #include <linux/uaccess.h> | ||
19 | #include <asm/atomic.h> | ||
20 | |||
21 | #include <linux/caif/caif_socket.h> | ||
22 | #include <net/caif/caif_layer.h> | ||
23 | #include <net/caif/caif_dev.h> | ||
24 | #include <net/caif/cfpkt.h> | ||
25 | |||
26 | MODULE_LICENSE("GPL"); | ||
27 | |||
28 | #define CHNL_SKT_READ_QUEUE_HIGH 200 | ||
29 | #define CHNL_SKT_READ_QUEUE_LOW 100 | ||
30 | |||
31 | static int caif_sockbuf_size = 40000; | ||
32 | static atomic_t caif_nr_socks = ATOMIC_INIT(0); | ||
33 | |||
34 | #define CONN_STATE_OPEN_BIT 1 | ||
35 | #define CONN_STATE_PENDING_BIT 2 | ||
36 | #define CONN_STATE_PEND_DESTROY_BIT 3 | ||
37 | #define CONN_REMOTE_SHUTDOWN_BIT 4 | ||
38 | |||
39 | #define TX_FLOW_ON_BIT 1 | ||
40 | #define RX_FLOW_ON_BIT 2 | ||
41 | |||
42 | #define STATE_IS_OPEN(cf_sk) test_bit(CONN_STATE_OPEN_BIT,\ | ||
43 | (void *) &(cf_sk)->conn_state) | ||
44 | #define STATE_IS_REMOTE_SHUTDOWN(cf_sk) test_bit(CONN_REMOTE_SHUTDOWN_BIT,\ | ||
45 | (void *) &(cf_sk)->conn_state) | ||
46 | #define STATE_IS_PENDING(cf_sk) test_bit(CONN_STATE_PENDING_BIT,\ | ||
47 | (void *) &(cf_sk)->conn_state) | ||
48 | #define STATE_IS_PENDING_DESTROY(cf_sk) test_bit(CONN_STATE_PEND_DESTROY_BIT,\ | ||
49 | (void *) &(cf_sk)->conn_state) | ||
50 | |||
51 | #define SET_STATE_PENDING_DESTROY(cf_sk) set_bit(CONN_STATE_PEND_DESTROY_BIT,\ | ||
52 | (void *) &(cf_sk)->conn_state) | ||
53 | #define SET_STATE_OPEN(cf_sk) set_bit(CONN_STATE_OPEN_BIT,\ | ||
54 | (void *) &(cf_sk)->conn_state) | ||
55 | #define SET_STATE_CLOSED(cf_sk) clear_bit(CONN_STATE_OPEN_BIT,\ | ||
56 | (void *) &(cf_sk)->conn_state) | ||
57 | #define SET_PENDING_ON(cf_sk) set_bit(CONN_STATE_PENDING_BIT,\ | ||
58 | (void *) &(cf_sk)->conn_state) | ||
59 | #define SET_PENDING_OFF(cf_sk) clear_bit(CONN_STATE_PENDING_BIT,\ | ||
60 | (void *) &(cf_sk)->conn_state) | ||
61 | #define SET_REMOTE_SHUTDOWN(cf_sk) set_bit(CONN_REMOTE_SHUTDOWN_BIT,\ | ||
62 | (void *) &(cf_sk)->conn_state) | ||
63 | |||
64 | #define SET_REMOTE_SHUTDOWN_OFF(dev) clear_bit(CONN_REMOTE_SHUTDOWN_BIT,\ | ||
65 | (void *) &(dev)->conn_state) | ||
66 | #define RX_FLOW_IS_ON(cf_sk) test_bit(RX_FLOW_ON_BIT,\ | ||
67 | (void *) &(cf_sk)->flow_state) | ||
68 | #define TX_FLOW_IS_ON(cf_sk) test_bit(TX_FLOW_ON_BIT,\ | ||
69 | (void *) &(cf_sk)->flow_state) | ||
70 | |||
71 | #define SET_RX_FLOW_OFF(cf_sk) clear_bit(RX_FLOW_ON_BIT,\ | ||
72 | (void *) &(cf_sk)->flow_state) | ||
73 | #define SET_RX_FLOW_ON(cf_sk) set_bit(RX_FLOW_ON_BIT,\ | ||
74 | (void *) &(cf_sk)->flow_state) | ||
75 | #define SET_TX_FLOW_OFF(cf_sk) clear_bit(TX_FLOW_ON_BIT,\ | ||
76 | (void *) &(cf_sk)->flow_state) | ||
77 | #define SET_TX_FLOW_ON(cf_sk) set_bit(TX_FLOW_ON_BIT,\ | ||
78 | (void *) &(cf_sk)->flow_state) | ||
79 | |||
80 | #define SKT_READ_FLAG 0x01 | ||
81 | #define SKT_WRITE_FLAG 0x02 | ||
82 | static struct dentry *debugfsdir; | ||
83 | #include <linux/debugfs.h> | ||
84 | |||
85 | #ifdef CONFIG_DEBUG_FS | ||
86 | struct debug_fs_counter { | ||
87 | atomic_t num_open; | ||
88 | atomic_t num_close; | ||
89 | atomic_t num_init; | ||
90 | atomic_t num_init_resp; | ||
91 | atomic_t num_init_fail_resp; | ||
92 | atomic_t num_deinit; | ||
93 | atomic_t num_deinit_resp; | ||
94 | atomic_t num_remote_shutdown_ind; | ||
95 | atomic_t num_tx_flow_off_ind; | ||
96 | atomic_t num_tx_flow_on_ind; | ||
97 | atomic_t num_rx_flow_off; | ||
98 | atomic_t num_rx_flow_on; | ||
99 | atomic_t skb_in_use; | ||
100 | atomic_t skb_alloc; | ||
101 | atomic_t skb_free; | ||
102 | }; | ||
103 | static struct debug_fs_counter cnt; | ||
104 | #define dbfs_atomic_inc(v) atomic_inc(v) | ||
105 | #define dbfs_atomic_dec(v) atomic_dec(v) | ||
106 | #else | ||
107 | #define dbfs_atomic_inc(v) | ||
108 | #define dbfs_atomic_dec(v) | ||
109 | #endif | ||
110 | |||
111 | /* The AF_CAIF socket */ | ||
112 | struct caifsock { | ||
113 | /* NOTE: sk has to be the first member */ | ||
114 | struct sock sk; | ||
115 | struct cflayer layer; | ||
116 | char name[CAIF_LAYER_NAME_SZ]; | ||
117 | u32 conn_state; | ||
118 | u32 flow_state; | ||
119 | struct cfpktq *pktq; | ||
120 | int file_mode; | ||
121 | struct caif_connect_request conn_req; | ||
122 | int read_queue_len; | ||
123 | /* protect updates of read_queue_len */ | ||
124 | spinlock_t read_queue_len_lock; | ||
125 | struct dentry *debugfs_socket_dir; | ||
126 | }; | ||
127 | |||
128 | static void drain_queue(struct caifsock *cf_sk); | ||
129 | |||
130 | /* Packet Receive Callback function called from CAIF Stack */ | ||
131 | static int caif_sktrecv_cb(struct cflayer *layr, struct cfpkt *pkt) | ||
132 | { | ||
133 | struct caifsock *cf_sk; | ||
134 | int read_queue_high; | ||
135 | cf_sk = container_of(layr, struct caifsock, layer); | ||
136 | |||
137 | if (!STATE_IS_OPEN(cf_sk)) { | ||
138 | /*FIXME: This should be allowed finally!*/ | ||
139 | pr_debug("CAIF: %s(): called after close request\n", __func__); | ||
140 | cfpkt_destroy(pkt); | ||
141 | return 0; | ||
142 | } | ||
143 | /* NOTE: This function may be called in Tasklet context! */ | ||
144 | |||
145 | /* The queue has its own lock */ | ||
146 | cfpkt_queue(cf_sk->pktq, pkt, 0); | ||
147 | |||
148 | spin_lock(&cf_sk->read_queue_len_lock); | ||
149 | cf_sk->read_queue_len++; | ||
150 | |||
151 | read_queue_high = (cf_sk->read_queue_len > CHNL_SKT_READ_QUEUE_HIGH); | ||
152 | spin_unlock(&cf_sk->read_queue_len_lock); | ||
153 | |||
154 | if (RX_FLOW_IS_ON(cf_sk) && read_queue_high) { | ||
155 | dbfs_atomic_inc(&cnt.num_rx_flow_off); | ||
156 | SET_RX_FLOW_OFF(cf_sk); | ||
157 | |||
158 | /* Send flow off (NOTE: must not sleep) */ | ||
159 | pr_debug("CAIF: %s():" | ||
160 | " sending flow OFF (queue len = %d)\n", | ||
161 | __func__, | ||
162 | cf_sk->read_queue_len); | ||
163 | caif_assert(cf_sk->layer.dn); | ||
164 | caif_assert(cf_sk->layer.dn->ctrlcmd); | ||
165 | |||
166 | (void) cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, | ||
167 | CAIF_MODEMCMD_FLOW_OFF_REQ); | ||
168 | } | ||
169 | |||
170 | /* Signal reader that data is available. */ | ||
171 | |||
172 | wake_up_interruptible(sk_sleep(&cf_sk->sk)); | ||
173 | |||
174 | return 0; | ||
175 | } | ||
176 | |||
177 | /* Packet Flow Control Callback function called from CAIF */ | ||
178 | static void caif_sktflowctrl_cb(struct cflayer *layr, | ||
179 | enum caif_ctrlcmd flow, | ||
180 | int phyid) | ||
181 | { | ||
182 | struct caifsock *cf_sk; | ||
183 | |||
184 | /* NOTE: This function may be called in Tasklet context! */ | ||
185 | pr_debug("CAIF: %s(): flowctrl func called: %s.\n", | ||
186 | __func__, | ||
187 | flow == CAIF_CTRLCMD_FLOW_ON_IND ? "ON" : | ||
188 | flow == CAIF_CTRLCMD_FLOW_OFF_IND ? "OFF" : | ||
189 | flow == CAIF_CTRLCMD_INIT_RSP ? "INIT_RSP" : | ||
190 | flow == CAIF_CTRLCMD_DEINIT_RSP ? "DEINIT_RSP" : | ||
191 | flow == CAIF_CTRLCMD_INIT_FAIL_RSP ? "INIT_FAIL_RSP" : | ||
192 | flow == | ||
193 | CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND ? "REMOTE_SHUTDOWN" : | ||
194 | "UKNOWN CTRL COMMAND"); | ||
195 | |||
196 | if (layr == NULL) | ||
197 | return; | ||
198 | |||
199 | cf_sk = container_of(layr, struct caifsock, layer); | ||
200 | |||
201 | switch (flow) { | ||
202 | case CAIF_CTRLCMD_FLOW_ON_IND: | ||
203 | dbfs_atomic_inc(&cnt.num_tx_flow_on_ind); | ||
204 | /* Signal reader that data is available. */ | ||
205 | SET_TX_FLOW_ON(cf_sk); | ||
206 | wake_up_interruptible(sk_sleep(&cf_sk->sk)); | ||
207 | break; | ||
208 | |||
209 | case CAIF_CTRLCMD_FLOW_OFF_IND: | ||
210 | dbfs_atomic_inc(&cnt.num_tx_flow_off_ind); | ||
211 | SET_TX_FLOW_OFF(cf_sk); | ||
212 | break; | ||
213 | |||
214 | case CAIF_CTRLCMD_INIT_RSP: | ||
215 | dbfs_atomic_inc(&cnt.num_init_resp); | ||
216 | /* Signal reader that data is available. */ | ||
217 | caif_assert(STATE_IS_OPEN(cf_sk)); | ||
218 | SET_PENDING_OFF(cf_sk); | ||
219 | SET_TX_FLOW_ON(cf_sk); | ||
220 | wake_up_interruptible(sk_sleep(&cf_sk->sk)); | ||
221 | break; | ||
222 | |||
223 | case CAIF_CTRLCMD_DEINIT_RSP: | ||
224 | dbfs_atomic_inc(&cnt.num_deinit_resp); | ||
225 | caif_assert(!STATE_IS_OPEN(cf_sk)); | ||
226 | SET_PENDING_OFF(cf_sk); | ||
227 | if (!STATE_IS_PENDING_DESTROY(cf_sk)) { | ||
228 | if (sk_sleep(&cf_sk->sk) != NULL) | ||
229 | wake_up_interruptible(sk_sleep(&cf_sk->sk)); | ||
230 | } | ||
231 | dbfs_atomic_inc(&cnt.num_deinit); | ||
232 | sock_put(&cf_sk->sk); | ||
233 | break; | ||
234 | |||
235 | case CAIF_CTRLCMD_INIT_FAIL_RSP: | ||
236 | dbfs_atomic_inc(&cnt.num_init_fail_resp); | ||
237 | caif_assert(STATE_IS_OPEN(cf_sk)); | ||
238 | SET_STATE_CLOSED(cf_sk); | ||
239 | SET_PENDING_OFF(cf_sk); | ||
240 | SET_TX_FLOW_OFF(cf_sk); | ||
241 | wake_up_interruptible(sk_sleep(&cf_sk->sk)); | ||
242 | break; | ||
243 | |||
244 | case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND: | ||
245 | dbfs_atomic_inc(&cnt.num_remote_shutdown_ind); | ||
246 | SET_REMOTE_SHUTDOWN(cf_sk); | ||
247 | /* Use sk_shutdown to indicate remote shutdown indication */ | ||
248 | cf_sk->sk.sk_shutdown |= RCV_SHUTDOWN; | ||
249 | cf_sk->file_mode = 0; | ||
250 | wake_up_interruptible(sk_sleep(&cf_sk->sk)); | ||
251 | break; | ||
252 | |||
253 | default: | ||
254 | pr_debug("CAIF: %s(): Unexpected flow command %d\n", | ||
255 | __func__, flow); | ||
256 | } | ||
257 | } | ||
258 | |||
259 | static void skb_destructor(struct sk_buff *skb) | ||
260 | { | ||
261 | dbfs_atomic_inc(&cnt.skb_free); | ||
262 | dbfs_atomic_dec(&cnt.skb_in_use); | ||
263 | } | ||
264 | |||
265 | |||
266 | static int caif_recvmsg(struct kiocb *iocb, struct socket *sock, | ||
267 | struct msghdr *m, size_t buf_len, int flags) | ||
268 | |||
269 | { | ||
270 | struct sock *sk = sock->sk; | ||
271 | struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); | ||
272 | struct cfpkt *pkt = NULL; | ||
273 | size_t len; | ||
274 | int result; | ||
275 | struct sk_buff *skb; | ||
276 | ssize_t ret = -EIO; | ||
277 | int read_queue_low; | ||
278 | |||
279 | if (cf_sk == NULL) { | ||
280 | pr_debug("CAIF: %s(): private_data not set!\n", | ||
281 | __func__); | ||
282 | ret = -EBADFD; | ||
283 | goto read_error; | ||
284 | } | ||
285 | |||
286 | /* Don't do multiple iovec entries yet */ | ||
287 | if (m->msg_iovlen != 1) | ||
288 | return -EOPNOTSUPP; | ||
289 | |||
290 | if (unlikely(!buf_len)) | ||
291 | return -EINVAL; | ||
292 | |||
293 | lock_sock(&(cf_sk->sk)); | ||
294 | |||
295 | caif_assert(cf_sk->pktq); | ||
296 | |||
297 | if (!STATE_IS_OPEN(cf_sk)) { | ||
298 | /* Socket is closed or closing. */ | ||
299 | if (!STATE_IS_PENDING(cf_sk)) { | ||
300 | pr_debug("CAIF: %s(): socket is closed (by remote)\n", | ||
301 | __func__); | ||
302 | ret = -EPIPE; | ||
303 | } else { | ||
304 | pr_debug("CAIF: %s(): socket is closing..\n", __func__); | ||
305 | ret = -EBADF; | ||
306 | } | ||
307 | goto read_error; | ||
308 | } | ||
309 | /* Socket is open or opening. */ | ||
310 | if (STATE_IS_PENDING(cf_sk)) { | ||
311 | pr_debug("CAIF: %s(): socket is opening...\n", __func__); | ||
312 | |||
313 | if (flags & MSG_DONTWAIT) { | ||
314 | /* We can't block. */ | ||
315 | pr_debug("CAIF: %s():state pending and MSG_DONTWAIT\n", | ||
316 | __func__); | ||
317 | ret = -EAGAIN; | ||
318 | goto read_error; | ||
319 | } | ||
320 | |||
321 | /* | ||
322 | * Blocking mode; state is pending and we need to wait | ||
323 | * for its conclusion. | ||
324 | */ | ||
325 | release_sock(&cf_sk->sk); | ||
326 | |||
327 | result = | ||
328 | wait_event_interruptible(*sk_sleep(&cf_sk->sk), | ||
329 | !STATE_IS_PENDING(cf_sk)); | ||
330 | |||
331 | lock_sock(&(cf_sk->sk)); | ||
332 | |||
333 | if (result == -ERESTARTSYS) { | ||
334 | pr_debug("CAIF: %s(): wait_event_interruptible" | ||
335 | " woken by a signal (1)", __func__); | ||
336 | ret = -ERESTARTSYS; | ||
337 | goto read_error; | ||
338 | } | ||
339 | } | ||
340 | |||
341 | if (STATE_IS_REMOTE_SHUTDOWN(cf_sk) || | ||
342 | !STATE_IS_OPEN(cf_sk) || | ||
343 | STATE_IS_PENDING(cf_sk)) { | ||
344 | |||
345 | pr_debug("CAIF: %s(): socket closed\n", | ||
346 | __func__); | ||
347 | ret = -ESHUTDOWN; | ||
348 | goto read_error; | ||
349 | } | ||
350 | |||
351 | /* | ||
352 | * Block if we don't have any received buffers. | ||
353 | * The queue has its own lock. | ||
354 | */ | ||
355 | while ((pkt = cfpkt_qpeek(cf_sk->pktq)) == NULL) { | ||
356 | |||
357 | if (flags & MSG_DONTWAIT) { | ||
358 | pr_debug("CAIF: %s(): MSG_DONTWAIT\n", __func__); | ||
359 | ret = -EAGAIN; | ||
360 | goto read_error; | ||
361 | } | ||
362 | trace_printk("CAIF: %s() wait_event\n", __func__); | ||
363 | |||
364 | /* Let writers in. */ | ||
365 | release_sock(&cf_sk->sk); | ||
366 | |||
367 | /* Block reader until data arrives or socket is closed. */ | ||
368 | if (wait_event_interruptible(*sk_sleep(&cf_sk->sk), | ||
369 | cfpkt_qpeek(cf_sk->pktq) | ||
370 | || STATE_IS_REMOTE_SHUTDOWN(cf_sk) | ||
371 | || !STATE_IS_OPEN(cf_sk)) == | ||
372 | -ERESTARTSYS) { | ||
373 | pr_debug("CAIF: %s():" | ||
374 | " wait_event_interruptible woken by " | ||
375 | "a signal, signal_pending(current) = %d\n", | ||
376 | __func__, | ||
377 | signal_pending(current)); | ||
378 | return -ERESTARTSYS; | ||
379 | } | ||
380 | |||
381 | trace_printk("CAIF: %s() awake\n", __func__); | ||
382 | if (STATE_IS_REMOTE_SHUTDOWN(cf_sk)) { | ||
383 | pr_debug("CAIF: %s(): " | ||
384 | "received remote_shutdown indication\n", | ||
385 | __func__); | ||
386 | ret = -ESHUTDOWN; | ||
387 | goto read_error_no_unlock; | ||
388 | } | ||
389 | |||
390 | /* I want to be alone on cf_sk (except status and queue). */ | ||
391 | lock_sock(&(cf_sk->sk)); | ||
392 | |||
393 | if (!STATE_IS_OPEN(cf_sk)) { | ||
394 | /* Someone closed the link, report error. */ | ||
395 | pr_debug("CAIF: %s(): remote end shutdown!\n", | ||
396 | __func__); | ||
397 | ret = -EPIPE; | ||
398 | goto read_error; | ||
399 | } | ||
400 | } | ||
401 | |||
402 | /* The queue has its own lock. */ | ||
403 | len = cfpkt_getlen(pkt); | ||
404 | |||
405 | /* Check max length that can be copied. */ | ||
406 | if (len <= buf_len) | ||
407 | pkt = cfpkt_dequeue(cf_sk->pktq); | ||
408 | else { | ||
409 | pr_debug("CAIF: %s(): user buffer too small (%ld,%ld)\n", | ||
410 | __func__, (long) len, (long) buf_len); | ||
411 | if (sock->type == SOCK_SEQPACKET) { | ||
412 | ret = -EMSGSIZE; | ||
413 | goto read_error; | ||
414 | } | ||
415 | len = buf_len; | ||
416 | } | ||
417 | |||
418 | |||
419 | spin_lock(&cf_sk->read_queue_len_lock); | ||
420 | cf_sk->read_queue_len--; | ||
421 | read_queue_low = (cf_sk->read_queue_len < CHNL_SKT_READ_QUEUE_LOW); | ||
422 | spin_unlock(&cf_sk->read_queue_len_lock); | ||
423 | |||
424 | if (!RX_FLOW_IS_ON(cf_sk) && read_queue_low) { | ||
425 | dbfs_atomic_inc(&cnt.num_rx_flow_on); | ||
426 | SET_RX_FLOW_ON(cf_sk); | ||
427 | |||
428 | /* Send flow on. */ | ||
429 | pr_debug("CAIF: %s(): sending flow ON (queue len = %d)\n", | ||
430 | __func__, cf_sk->read_queue_len); | ||
431 | caif_assert(cf_sk->layer.dn); | ||
432 | caif_assert(cf_sk->layer.dn->ctrlcmd); | ||
433 | (void) cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, | ||
434 | CAIF_MODEMCMD_FLOW_ON_REQ); | ||
435 | |||
436 | caif_assert(cf_sk->read_queue_len >= 0); | ||
437 | } | ||
438 | |||
439 | skb = cfpkt_tonative(pkt); | ||
440 | result = skb_copy_datagram_iovec(skb, 0, m->msg_iov, len); | ||
441 | skb_pull(skb, len); | ||
442 | |||
443 | if (result) { | ||
444 | pr_debug("CAIF: %s(): copy to_iovec failed\n", __func__); | ||
445 | cfpkt_destroy(pkt); | ||
446 | ret = -EFAULT; | ||
447 | goto read_error; | ||
448 | } | ||
449 | |||
450 | /* Free packet and remove from queue */ | ||
451 | if (skb->len == 0) | ||
452 | skb_free_datagram(sk, skb); | ||
453 | |||
454 | /* Let the others in. */ | ||
455 | release_sock(&cf_sk->sk); | ||
456 | return len; | ||
457 | |||
458 | read_error: | ||
459 | release_sock(&cf_sk->sk); | ||
460 | read_error_no_unlock: | ||
461 | return ret; | ||
462 | } | ||
463 | |||
464 | /* Send a signal as a consequence of sendmsg, sendto or caif_sendmsg. */ | ||
465 | static int caif_sendmsg(struct kiocb *kiocb, struct socket *sock, | ||
466 | struct msghdr *msg, size_t len) | ||
467 | { | ||
468 | |||
469 | struct sock *sk = sock->sk; | ||
470 | struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); | ||
471 | size_t payload_size = msg->msg_iov->iov_len; | ||
472 | struct cfpkt *pkt = NULL; | ||
473 | struct caif_payload_info info; | ||
474 | unsigned char *txbuf; | ||
475 | ssize_t ret = -EIO; | ||
476 | int result; | ||
477 | struct sk_buff *skb; | ||
478 | caif_assert(msg->msg_iovlen == 1); | ||
479 | |||
480 | if (cf_sk == NULL) { | ||
481 | pr_debug("CAIF: %s(): private_data not set!\n", | ||
482 | __func__); | ||
483 | ret = -EBADFD; | ||
484 | goto write_error_no_unlock; | ||
485 | } | ||
486 | |||
487 | if (unlikely(msg->msg_iov->iov_base == NULL)) { | ||
488 | pr_warning("CAIF: %s(): Buffer is NULL.\n", __func__); | ||
489 | ret = -EINVAL; | ||
490 | goto write_error_no_unlock; | ||
491 | } | ||
492 | |||
493 | if (payload_size > CAIF_MAX_PAYLOAD_SIZE) { | ||
494 | pr_debug("CAIF: %s(): buffer too long\n", __func__); | ||
495 | if (sock->type == SOCK_SEQPACKET) { | ||
496 | ret = -EINVAL; | ||
497 | goto write_error_no_unlock; | ||
498 | } | ||
499 | payload_size = CAIF_MAX_PAYLOAD_SIZE; | ||
500 | } | ||
501 | |||
502 | /* I want to be alone on cf_sk (except status and queue) */ | ||
503 | lock_sock(&(cf_sk->sk)); | ||
504 | |||
505 | caif_assert(cf_sk->pktq); | ||
506 | |||
507 | if (!STATE_IS_OPEN(cf_sk)) { | ||
508 | /* Socket is closed or closing */ | ||
509 | if (!STATE_IS_PENDING(cf_sk)) { | ||
510 | pr_debug("CAIF: %s(): socket is closed (by remote)\n", | ||
511 | __func__); | ||
512 | ret = -EPIPE; | ||
513 | } else { | ||
514 | pr_debug("CAIF: %s(): socket is closing...\n", | ||
515 | __func__); | ||
516 | ret = -EBADF; | ||
517 | } | ||
518 | goto write_error; | ||
519 | } | ||
520 | |||
521 | /* Socket is open or opening */ | ||
522 | if (STATE_IS_PENDING(cf_sk)) { | ||
523 | pr_debug("CAIF: %s(): socket is opening...\n", __func__); | ||
524 | |||
525 | if (msg->msg_flags & MSG_DONTWAIT) { | ||
526 | /* We can't block */ | ||
527 | trace_printk("CAIF: %s():state pending:" | ||
528 | "state=MSG_DONTWAIT\n", __func__); | ||
529 | ret = -EAGAIN; | ||
530 | goto write_error; | ||
531 | } | ||
532 | /* Let readers in */ | ||
533 | release_sock(&cf_sk->sk); | ||
534 | |||
535 | /* | ||
536 | * Blocking mode; state is pending and we need to wait | ||
537 | * for its conclusion. | ||
538 | */ | ||
539 | result = | ||
540 | wait_event_interruptible(*sk_sleep(&cf_sk->sk), | ||
541 | !STATE_IS_PENDING(cf_sk)); | ||
542 | /* I want to be alone on cf_sk (except status and queue) */ | ||
543 | lock_sock(&(cf_sk->sk)); | ||
544 | |||
545 | if (result == -ERESTARTSYS) { | ||
546 | pr_debug("CAIF: %s(): wait_event_interruptible" | ||
547 | " woken by a signal (1)", __func__); | ||
548 | ret = -ERESTARTSYS; | ||
549 | goto write_error; | ||
550 | } | ||
551 | } | ||
552 | if (STATE_IS_REMOTE_SHUTDOWN(cf_sk) || | ||
553 | !STATE_IS_OPEN(cf_sk) || | ||
554 | STATE_IS_PENDING(cf_sk)) { | ||
555 | |||
556 | pr_debug("CAIF: %s(): socket closed\n", | ||
557 | __func__); | ||
558 | ret = -ESHUTDOWN; | ||
559 | goto write_error; | ||
560 | } | ||
561 | |||
562 | if (!TX_FLOW_IS_ON(cf_sk)) { | ||
563 | |||
564 | /* Flow is off. Check non-block flag */ | ||
565 | if (msg->msg_flags & MSG_DONTWAIT) { | ||
566 | trace_printk("CAIF: %s(): MSG_DONTWAIT and tx flow off", | ||
567 | __func__); | ||
568 | ret = -EAGAIN; | ||
569 | goto write_error; | ||
570 | } | ||
571 | |||
572 | /* release lock before waiting */ | ||
573 | release_sock(&cf_sk->sk); | ||
574 | |||
575 | /* Wait until flow is on or socket is closed */ | ||
576 | if (wait_event_interruptible(*sk_sleep(&cf_sk->sk), | ||
577 | TX_FLOW_IS_ON(cf_sk) | ||
578 | || !STATE_IS_OPEN(cf_sk) | ||
579 | || STATE_IS_REMOTE_SHUTDOWN(cf_sk) | ||
580 | ) == -ERESTARTSYS) { | ||
581 | pr_debug("CAIF: %s():" | ||
582 | " wait_event_interruptible woken by a signal", | ||
583 | __func__); | ||
584 | ret = -ERESTARTSYS; | ||
585 | goto write_error_no_unlock; | ||
586 | } | ||
587 | |||
588 | /* I want to be alone on cf_sk (except status and queue) */ | ||
589 | lock_sock(&(cf_sk->sk)); | ||
590 | |||
591 | if (!STATE_IS_OPEN(cf_sk)) { | ||
592 | /* someone closed the link, report error */ | ||
593 | pr_debug("CAIF: %s(): remote end shutdown!\n", | ||
594 | __func__); | ||
595 | ret = -EPIPE; | ||
596 | goto write_error; | ||
597 | } | ||
598 | |||
599 | if (STATE_IS_REMOTE_SHUTDOWN(cf_sk)) { | ||
600 | pr_debug("CAIF: %s(): " | ||
601 | "received remote_shutdown indication\n", | ||
602 | __func__); | ||
603 | ret = -ESHUTDOWN; | ||
604 | goto write_error; | ||
605 | } | ||
606 | } | ||
607 | |||
608 | pkt = cfpkt_create(payload_size); | ||
609 | skb = (struct sk_buff *)pkt; | ||
610 | skb->destructor = skb_destructor; | ||
611 | skb->sk = sk; | ||
612 | dbfs_atomic_inc(&cnt.skb_alloc); | ||
613 | dbfs_atomic_inc(&cnt.skb_in_use); | ||
614 | if (cfpkt_raw_append(pkt, (void **) &txbuf, payload_size) < 0) { | ||
615 | pr_debug("CAIF: %s(): cfpkt_raw_append failed\n", __func__); | ||
616 | cfpkt_destroy(pkt); | ||
617 | ret = -EINVAL; | ||
618 | goto write_error; | ||
619 | } | ||
620 | |||
621 | /* Copy data into buffer. */ | ||
622 | if (copy_from_user(txbuf, msg->msg_iov->iov_base, payload_size)) { | ||
623 | pr_debug("CAIF: %s(): copy_from_user returned non zero.\n", | ||
624 | __func__); | ||
625 | cfpkt_destroy(pkt); | ||
626 | ret = -EINVAL; | ||
627 | goto write_error; | ||
628 | } | ||
629 | memset(&info, 0, sizeof(info)); | ||
630 | |||
631 | /* Send the packet down the stack. */ | ||
632 | caif_assert(cf_sk->layer.dn); | ||
633 | caif_assert(cf_sk->layer.dn->transmit); | ||
634 | |||
635 | do { | ||
636 | ret = cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt); | ||
637 | |||
638 | if (likely((ret >= 0) || (ret != -EAGAIN))) | ||
639 | break; | ||
640 | |||
641 | /* EAGAIN - retry */ | ||
642 | if (msg->msg_flags & MSG_DONTWAIT) { | ||
643 | pr_debug("CAIF: %s(): NONBLOCK and transmit failed," | ||
644 | " error = %ld\n", __func__, (long) ret); | ||
645 | ret = -EAGAIN; | ||
646 | goto write_error; | ||
647 | } | ||
648 | |||
649 | /* Let readers in */ | ||
650 | release_sock(&cf_sk->sk); | ||
651 | |||
652 | /* Wait until flow is on or socket is closed */ | ||
653 | if (wait_event_interruptible(*sk_sleep(&cf_sk->sk), | ||
654 | TX_FLOW_IS_ON(cf_sk) | ||
655 | || !STATE_IS_OPEN(cf_sk) | ||
656 | || STATE_IS_REMOTE_SHUTDOWN(cf_sk) | ||
657 | ) == -ERESTARTSYS) { | ||
658 | pr_debug("CAIF: %s(): wait_event_interruptible" | ||
659 | " woken by a signal", __func__); | ||
660 | ret = -ERESTARTSYS; | ||
661 | goto write_error_no_unlock; | ||
662 | } | ||
663 | |||
664 | /* I want to be alone on cf_sk (except status and queue) */ | ||
665 | lock_sock(&(cf_sk->sk)); | ||
666 | |||
667 | } while (ret == -EAGAIN); | ||
668 | |||
669 | if (ret < 0) { | ||
670 | cfpkt_destroy(pkt); | ||
671 | pr_debug("CAIF: %s(): transmit failed, error = %ld\n", | ||
672 | __func__, (long) ret); | ||
673 | |||
674 | goto write_error; | ||
675 | } | ||
676 | |||
677 | release_sock(&cf_sk->sk); | ||
678 | return payload_size; | ||
679 | |||
680 | write_error: | ||
681 | release_sock(&cf_sk->sk); | ||
682 | write_error_no_unlock: | ||
683 | return ret; | ||
684 | } | ||
685 | |||
686 | static unsigned int caif_poll(struct file *file, struct socket *sock, | ||
687 | poll_table *wait) | ||
688 | { | ||
689 | struct sock *sk = sock->sk; | ||
690 | struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); | ||
691 | u32 mask = 0; | ||
692 | poll_wait(file, sk_sleep(sk), wait); | ||
693 | lock_sock(&(cf_sk->sk)); | ||
694 | if (!STATE_IS_OPEN(cf_sk)) { | ||
695 | if (!STATE_IS_PENDING(cf_sk)) | ||
696 | mask |= POLLHUP; | ||
697 | } else { | ||
698 | if (cfpkt_qpeek(cf_sk->pktq) != NULL) | ||
699 | mask |= (POLLIN | POLLRDNORM); | ||
700 | if (TX_FLOW_IS_ON(cf_sk)) | ||
701 | mask |= (POLLOUT | POLLWRNORM); | ||
702 | } | ||
703 | release_sock(&cf_sk->sk); | ||
704 | trace_printk("CAIF: %s(): poll mask=0x%04x\n", | ||
705 | __func__, mask); | ||
706 | return mask; | ||
707 | } | ||
708 | |||
709 | static void drain_queue(struct caifsock *cf_sk) | ||
710 | { | ||
711 | struct cfpkt *pkt = NULL; | ||
712 | |||
713 | /* Empty the queue */ | ||
714 | do { | ||
715 | /* The queue has its own lock */ | ||
716 | if (!cf_sk->pktq) | ||
717 | break; | ||
718 | |||
719 | pkt = cfpkt_dequeue(cf_sk->pktq); | ||
720 | if (!pkt) | ||
721 | break; | ||
722 | pr_debug("CAIF: %s(): freeing packet from read queue\n", | ||
723 | __func__); | ||
724 | cfpkt_destroy(pkt); | ||
725 | |||
726 | } while (1); | ||
727 | |||
728 | cf_sk->read_queue_len = 0; | ||
729 | } | ||
730 | |||
731 | static int setsockopt(struct socket *sock, | ||
732 | int lvl, int opt, char __user *ov, unsigned int ol) | ||
733 | { | ||
734 | struct sock *sk = sock->sk; | ||
735 | struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); | ||
736 | int prio, linksel; | ||
737 | struct ifreq ifreq; | ||
738 | |||
739 | if (STATE_IS_OPEN(cf_sk)) { | ||
740 | pr_debug("CAIF: %s(): setsockopt " | ||
741 | "cannot be done on a connected socket\n", | ||
742 | __func__); | ||
743 | return -ENOPROTOOPT; | ||
744 | } | ||
745 | switch (opt) { | ||
746 | case CAIFSO_LINK_SELECT: | ||
747 | if (ol < sizeof(int)) { | ||
748 | pr_debug("CAIF: %s(): setsockopt" | ||
749 | " CAIFSO_CHANNEL_CONFIG bad size\n", __func__); | ||
750 | return -EINVAL; | ||
751 | } | ||
752 | if (lvl != SOL_CAIF) | ||
753 | goto bad_sol; | ||
754 | if (copy_from_user(&linksel, ov, sizeof(int))) | ||
755 | return -EINVAL; | ||
756 | lock_sock(&(cf_sk->sk)); | ||
757 | cf_sk->conn_req.link_selector = linksel; | ||
758 | release_sock(&cf_sk->sk); | ||
759 | return 0; | ||
760 | |||
761 | case SO_PRIORITY: | ||
762 | if (lvl != SOL_SOCKET) | ||
763 | goto bad_sol; | ||
764 | if (ol < sizeof(int)) { | ||
765 | pr_debug("CAIF: %s(): setsockopt" | ||
766 | " SO_PRIORITY bad size\n", __func__); | ||
767 | return -EINVAL; | ||
768 | } | ||
769 | if (copy_from_user(&prio, ov, sizeof(int))) | ||
770 | return -EINVAL; | ||
771 | lock_sock(&(cf_sk->sk)); | ||
772 | cf_sk->conn_req.priority = prio; | ||
773 | pr_debug("CAIF: %s(): Setting sockopt priority=%d\n", __func__, | ||
774 | cf_sk->conn_req.priority); | ||
775 | release_sock(&cf_sk->sk); | ||
776 | return 0; | ||
777 | |||
778 | case SO_BINDTODEVICE: | ||
779 | if (lvl != SOL_SOCKET) | ||
780 | goto bad_sol; | ||
781 | if (ol < sizeof(struct ifreq)) { | ||
782 | pr_debug("CAIF: %s(): setsockopt" | ||
783 | " SO_PRIORITY bad size\n", __func__); | ||
784 | return -EINVAL; | ||
785 | } | ||
786 | if (copy_from_user(&ifreq, ov, sizeof(ifreq))) | ||
787 | return -EFAULT; | ||
788 | lock_sock(&(cf_sk->sk)); | ||
789 | strncpy(cf_sk->conn_req.link_name, ifreq.ifr_name, | ||
790 | sizeof(cf_sk->conn_req.link_name)); | ||
791 | cf_sk->conn_req.link_name | ||
792 | [sizeof(cf_sk->conn_req.link_name)-1] = 0; | ||
793 | release_sock(&cf_sk->sk); | ||
794 | return 0; | ||
795 | |||
796 | case CAIFSO_REQ_PARAM: | ||
797 | if (lvl != SOL_CAIF) | ||
798 | goto bad_sol; | ||
799 | if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL) | ||
800 | return -ENOPROTOOPT; | ||
801 | if (ol > sizeof(cf_sk->conn_req.param.data)) | ||
802 | goto req_param_bad_size; | ||
803 | |||
804 | lock_sock(&(cf_sk->sk)); | ||
805 | cf_sk->conn_req.param.size = ol; | ||
806 | if (copy_from_user(&cf_sk->conn_req.param.data, ov, ol)) { | ||
807 | release_sock(&cf_sk->sk); | ||
808 | req_param_bad_size: | ||
809 | pr_debug("CAIF: %s(): setsockopt" | ||
810 | " CAIFSO_CHANNEL_CONFIG bad size\n", __func__); | ||
811 | return -EINVAL; | ||
812 | } | ||
813 | |||
814 | release_sock(&cf_sk->sk); | ||
815 | return 0; | ||
816 | |||
817 | default: | ||
818 | pr_debug("CAIF: %s(): unhandled option %d\n", __func__, opt); | ||
819 | return -EINVAL; | ||
820 | } | ||
821 | |||
822 | return 0; | ||
823 | bad_sol: | ||
824 | pr_debug("CAIF: %s(): setsockopt bad level\n", __func__); | ||
825 | return -ENOPROTOOPT; | ||
826 | |||
827 | } | ||
828 | |||
829 | static int caif_connect(struct socket *sock, struct sockaddr *uservaddr, | ||
830 | int sockaddr_len, int flags) | ||
831 | { | ||
832 | struct caifsock *cf_sk = NULL; | ||
833 | int result = -1; | ||
834 | int mode = 0; | ||
835 | int ret = -EIO; | ||
836 | struct sock *sk = sock->sk; | ||
837 | BUG_ON(sk == NULL); | ||
838 | |||
839 | cf_sk = container_of(sk, struct caifsock, sk); | ||
840 | |||
841 | trace_printk("CAIF: %s(): cf_sk=%p OPEN=%d, TX_FLOW=%d, RX_FLOW=%d\n", | ||
842 | __func__, cf_sk, | ||
843 | STATE_IS_OPEN(cf_sk), | ||
844 | TX_FLOW_IS_ON(cf_sk), RX_FLOW_IS_ON(cf_sk)); | ||
845 | |||
846 | |||
847 | if (sock->type == SOCK_SEQPACKET || sock->type == SOCK_STREAM) | ||
848 | sock->state = SS_CONNECTING; | ||
849 | else | ||
850 | goto out; | ||
851 | |||
852 | /* I want to be alone on cf_sk (except status and queue) */ | ||
853 | lock_sock(&(cf_sk->sk)); | ||
854 | |||
855 | if (sockaddr_len != sizeof(struct sockaddr_caif)) { | ||
856 | pr_debug("CAIF: %s(): Bad address len (%ld,%lu)\n", | ||
857 | __func__, (long) sockaddr_len, | ||
858 | (long unsigned) sizeof(struct sockaddr_caif)); | ||
859 | ret = -EINVAL; | ||
860 | goto open_error; | ||
861 | } | ||
862 | |||
863 | if (uservaddr->sa_family != AF_CAIF) { | ||
864 | pr_debug("CAIF: %s(): Bad address family (%d)\n", | ||
865 | __func__, uservaddr->sa_family); | ||
866 | ret = -EAFNOSUPPORT; | ||
867 | goto open_error; | ||
868 | } | ||
869 | |||
870 | memcpy(&cf_sk->conn_req.sockaddr, uservaddr, | ||
871 | sizeof(struct sockaddr_caif)); | ||
872 | |||
873 | dbfs_atomic_inc(&cnt.num_open); | ||
874 | mode = SKT_READ_FLAG | SKT_WRITE_FLAG; | ||
875 | |||
876 | /* If socket is not open, make sure socket is in fully closed state */ | ||
877 | if (!STATE_IS_OPEN(cf_sk)) { | ||
878 | /* Has link close response been received (if we ever sent it)?*/ | ||
879 | if (STATE_IS_PENDING(cf_sk)) { | ||
880 | /* | ||
881 | * Still waiting for close response from remote. | ||
882 | * If opened non-blocking, report "would block" | ||
883 | */ | ||
884 | if (flags & O_NONBLOCK) { | ||
885 | pr_debug("CAIF: %s(): O_NONBLOCK" | ||
886 | " && close pending\n", __func__); | ||
887 | ret = -EAGAIN; | ||
888 | goto open_error; | ||
889 | } | ||
890 | |||
891 | pr_debug("CAIF: %s(): Wait for close response" | ||
892 | " from remote...\n", __func__); | ||
893 | |||
894 | release_sock(&cf_sk->sk); | ||
895 | |||
896 | /* | ||
897 | * Blocking mode; close is pending and we need to wait | ||
898 | * for its conclusion. | ||
899 | */ | ||
900 | result = | ||
901 | wait_event_interruptible(*sk_sleep(&cf_sk->sk), | ||
902 | !STATE_IS_PENDING(cf_sk)); | ||
903 | |||
904 | lock_sock(&(cf_sk->sk)); | ||
905 | if (result == -ERESTARTSYS) { | ||
906 | pr_debug("CAIF: %s(): wait_event_interruptible" | ||
907 | "woken by a signal (1)", __func__); | ||
908 | ret = -ERESTARTSYS; | ||
909 | goto open_error; | ||
910 | } | ||
911 | } | ||
912 | } | ||
913 | |||
914 | /* socket is now either closed, pending open or open */ | ||
915 | if (STATE_IS_OPEN(cf_sk) && !STATE_IS_PENDING(cf_sk)) { | ||
916 | /* Open */ | ||
917 | pr_debug("CAIF: %s(): Socket is already opened (cf_sk=%p)" | ||
918 | " check access f_flags = 0x%x file_mode = 0x%x\n", | ||
919 | __func__, cf_sk, mode, cf_sk->file_mode); | ||
920 | |||
921 | } else { | ||
922 | /* We are closed or pending open. | ||
923 | * If closed: send link setup | ||
924 | * If pending open: link setup already sent (we could have been | ||
925 | * interrupted by a signal last time) | ||
926 | */ | ||
927 | if (!STATE_IS_OPEN(cf_sk)) { | ||
928 | /* First opening of file; connect lower layers: */ | ||
929 | /* Drain queue (very unlikely) */ | ||
930 | drain_queue(cf_sk); | ||
931 | |||
932 | cf_sk->layer.receive = caif_sktrecv_cb; | ||
933 | SET_STATE_OPEN(cf_sk); | ||
934 | SET_PENDING_ON(cf_sk); | ||
935 | |||
936 | /* Register this channel. */ | ||
937 | result = | ||
938 | caif_connect_client(&cf_sk->conn_req, | ||
939 | &cf_sk->layer); | ||
940 | if (result < 0) { | ||
941 | pr_debug("CAIF: %s(): can't register channel\n", | ||
942 | __func__); | ||
943 | ret = -EIO; | ||
944 | SET_STATE_CLOSED(cf_sk); | ||
945 | SET_PENDING_OFF(cf_sk); | ||
946 | goto open_error; | ||
947 | } | ||
948 | dbfs_atomic_inc(&cnt.num_init); | ||
949 | } | ||
950 | |||
951 | /* If opened non-blocking, report "success". | ||
952 | */ | ||
953 | if (flags & O_NONBLOCK) { | ||
954 | pr_debug("CAIF: %s(): O_NONBLOCK success\n", | ||
955 | __func__); | ||
956 | ret = -EINPROGRESS; | ||
957 | cf_sk->sk.sk_err = -EINPROGRESS; | ||
958 | goto open_error; | ||
959 | } | ||
960 | |||
961 | trace_printk("CAIF: %s(): Wait for connect response\n", | ||
962 | __func__); | ||
963 | |||
964 | /* release lock before waiting */ | ||
965 | release_sock(&cf_sk->sk); | ||
966 | |||
967 | result = | ||
968 | wait_event_interruptible(*sk_sleep(&cf_sk->sk), | ||
969 | !STATE_IS_PENDING(cf_sk)); | ||
970 | |||
971 | lock_sock(&(cf_sk->sk)); | ||
972 | |||
973 | if (result == -ERESTARTSYS) { | ||
974 | pr_debug("CAIF: %s(): wait_event_interruptible" | ||
975 | "woken by a signal (2)", __func__); | ||
976 | ret = -ERESTARTSYS; | ||
977 | goto open_error; | ||
978 | } | ||
979 | |||
980 | if (!STATE_IS_OPEN(cf_sk)) { | ||
981 | /* Lower layers said "no" */ | ||
982 | pr_debug("CAIF: %s(): Closed received\n", __func__); | ||
983 | ret = -EPIPE; | ||
984 | goto open_error; | ||
985 | } | ||
986 | |||
987 | trace_printk("CAIF: %s(): Connect received\n", __func__); | ||
988 | } | ||
989 | /* Open is ok */ | ||
990 | cf_sk->file_mode |= mode; | ||
991 | |||
992 | trace_printk("CAIF: %s(): Connected - file mode = %x\n", | ||
993 | __func__, cf_sk->file_mode); | ||
994 | |||
995 | release_sock(&cf_sk->sk); | ||
996 | return 0; | ||
997 | open_error: | ||
998 | sock->state = SS_UNCONNECTED; | ||
999 | release_sock(&cf_sk->sk); | ||
1000 | out: | ||
1001 | return ret; | ||
1002 | } | ||
1003 | |||
1004 | static int caif_shutdown(struct socket *sock, int how) | ||
1005 | { | ||
1006 | struct caifsock *cf_sk = NULL; | ||
1007 | int result = 0; | ||
1008 | int tx_flow_state_was_on; | ||
1009 | struct sock *sk = sock->sk; | ||
1010 | |||
1011 | trace_printk("CAIF: %s(): enter\n", __func__); | ||
1012 | pr_debug("f_flags=%x\n", sock->file->f_flags); | ||
1013 | |||
1014 | if (how != SHUT_RDWR) | ||
1015 | return -EOPNOTSUPP; | ||
1016 | |||
1017 | cf_sk = container_of(sk, struct caifsock, sk); | ||
1018 | if (cf_sk == NULL) { | ||
1019 | pr_debug("CAIF: %s(): COULD NOT FIND SOCKET\n", __func__); | ||
1020 | return -EBADF; | ||
1021 | } | ||
1022 | |||
1023 | /* I want to be alone on cf_sk (except status queue) */ | ||
1024 | lock_sock(&(cf_sk->sk)); | ||
1025 | sock_hold(&cf_sk->sk); | ||
1026 | |||
1027 | /* IS_CLOSED have double meaning: | ||
1028 | * 1) Spontanous Remote Shutdown Request. | ||
1029 | * 2) Ack on a channel teardown(disconnect) | ||
1030 | * Must clear bit in case we previously received | ||
1031 | * remote shudown request. | ||
1032 | */ | ||
1033 | if (STATE_IS_OPEN(cf_sk) && !STATE_IS_PENDING(cf_sk)) { | ||
1034 | SET_STATE_CLOSED(cf_sk); | ||
1035 | SET_PENDING_ON(cf_sk); | ||
1036 | tx_flow_state_was_on = TX_FLOW_IS_ON(cf_sk); | ||
1037 | SET_TX_FLOW_OFF(cf_sk); | ||
1038 | |||
1039 | /* Hold the socket until DEINIT_RSP is received */ | ||
1040 | sock_hold(&cf_sk->sk); | ||
1041 | result = caif_disconnect_client(&cf_sk->layer); | ||
1042 | |||
1043 | if (result < 0) { | ||
1044 | pr_debug("CAIF: %s(): " | ||
1045 | "caif_disconnect_client() failed\n", | ||
1046 | __func__); | ||
1047 | SET_STATE_CLOSED(cf_sk); | ||
1048 | SET_PENDING_OFF(cf_sk); | ||
1049 | SET_TX_FLOW_OFF(cf_sk); | ||
1050 | release_sock(&cf_sk->sk); | ||
1051 | sock_put(&cf_sk->sk); | ||
1052 | return -EIO; | ||
1053 | } | ||
1054 | |||
1055 | } | ||
1056 | if (STATE_IS_REMOTE_SHUTDOWN(cf_sk)) { | ||
1057 | SET_PENDING_OFF(cf_sk); | ||
1058 | SET_REMOTE_SHUTDOWN_OFF(cf_sk); | ||
1059 | } | ||
1060 | |||
1061 | /* | ||
1062 | * Socket is no longer in state pending close, | ||
1063 | * and we can release the reference. | ||
1064 | */ | ||
1065 | |||
1066 | dbfs_atomic_inc(&cnt.num_close); | ||
1067 | drain_queue(cf_sk); | ||
1068 | SET_RX_FLOW_ON(cf_sk); | ||
1069 | cf_sk->file_mode = 0; | ||
1070 | sock_put(&cf_sk->sk); | ||
1071 | release_sock(&cf_sk->sk); | ||
1072 | if (!result && (sock->file->f_flags & O_NONBLOCK)) { | ||
1073 | pr_debug("nonblocking shutdown returing -EAGAIN\n"); | ||
1074 | return -EAGAIN; | ||
1075 | } else | ||
1076 | return result; | ||
1077 | } | ||
1078 | |||
1079 | static ssize_t caif_sock_no_sendpage(struct socket *sock, | ||
1080 | struct page *page, | ||
1081 | int offset, size_t size, int flags) | ||
1082 | { | ||
1083 | return -EOPNOTSUPP; | ||
1084 | } | ||
1085 | |||
1086 | /* This function is called as part of close. */ | ||
1087 | static int caif_release(struct socket *sock) | ||
1088 | { | ||
1089 | struct sock *sk = sock->sk; | ||
1090 | struct caifsock *cf_sk = NULL; | ||
1091 | int res; | ||
1092 | caif_assert(sk != NULL); | ||
1093 | cf_sk = container_of(sk, struct caifsock, sk); | ||
1094 | |||
1095 | if (cf_sk->debugfs_socket_dir != NULL) | ||
1096 | debugfs_remove_recursive(cf_sk->debugfs_socket_dir); | ||
1097 | |||
1098 | res = caif_shutdown(sock, SHUT_RDWR); | ||
1099 | if (res && res != -EINPROGRESS) | ||
1100 | return res; | ||
1101 | |||
1102 | /* | ||
1103 | * FIXME: Shutdown should probably be possible to do async | ||
1104 | * without flushing queues, allowing reception of frames while | ||
1105 | * waiting for DEINIT_IND. | ||
1106 | * Release should always block, to allow secure decoupling of | ||
1107 | * CAIF stack. | ||
1108 | */ | ||
1109 | if (!(sock->file->f_flags & O_NONBLOCK)) { | ||
1110 | res = wait_event_interruptible(*sk_sleep(&cf_sk->sk), | ||
1111 | !STATE_IS_PENDING(cf_sk)); | ||
1112 | |||
1113 | if (res == -ERESTARTSYS) { | ||
1114 | pr_debug("CAIF: %s(): wait_event_interruptible" | ||
1115 | "woken by a signal (1)", __func__); | ||
1116 | } | ||
1117 | } | ||
1118 | lock_sock(&(cf_sk->sk)); | ||
1119 | |||
1120 | sock->sk = NULL; | ||
1121 | |||
1122 | /* Detach the socket from its process context by making it orphan. */ | ||
1123 | sock_orphan(sk); | ||
1124 | |||
1125 | /* | ||
1126 | * Setting SHUTDOWN_MASK means that both send and receive are shutdown | ||
1127 | * for the socket. | ||
1128 | */ | ||
1129 | sk->sk_shutdown = SHUTDOWN_MASK; | ||
1130 | |||
1131 | /* | ||
1132 | * Set the socket state to closed, the TCP_CLOSE macro is used when | ||
1133 | * closing any socket. | ||
1134 | */ | ||
1135 | |||
1136 | /* Flush out this sockets receive queue. */ | ||
1137 | drain_queue(cf_sk); | ||
1138 | |||
1139 | /* Finally release the socket. */ | ||
1140 | SET_STATE_PENDING_DESTROY(cf_sk); | ||
1141 | |||
1142 | release_sock(&cf_sk->sk); | ||
1143 | |||
1144 | sock_put(sk); | ||
1145 | |||
1146 | /* | ||
1147 | * The rest of the cleanup will be handled from the | ||
1148 | * caif_sock_destructor | ||
1149 | */ | ||
1150 | return res; | ||
1151 | } | ||
1152 | |||
1153 | static const struct proto_ops caif_ops = { | ||
1154 | .family = PF_CAIF, | ||
1155 | .owner = THIS_MODULE, | ||
1156 | .release = caif_release, | ||
1157 | .bind = sock_no_bind, | ||
1158 | .connect = caif_connect, | ||
1159 | .socketpair = sock_no_socketpair, | ||
1160 | .accept = sock_no_accept, | ||
1161 | .getname = sock_no_getname, | ||
1162 | .poll = caif_poll, | ||
1163 | .ioctl = sock_no_ioctl, | ||
1164 | .listen = sock_no_listen, | ||
1165 | .shutdown = caif_shutdown, | ||
1166 | .setsockopt = setsockopt, | ||
1167 | .getsockopt = sock_no_getsockopt, | ||
1168 | .sendmsg = caif_sendmsg, | ||
1169 | .recvmsg = caif_recvmsg, | ||
1170 | .mmap = sock_no_mmap, | ||
1171 | .sendpage = caif_sock_no_sendpage, | ||
1172 | }; | ||
1173 | |||
1174 | /* This function is called when a socket is finally destroyed. */ | ||
1175 | static void caif_sock_destructor(struct sock *sk) | ||
1176 | { | ||
1177 | struct caifsock *cf_sk = NULL; | ||
1178 | cf_sk = container_of(sk, struct caifsock, sk); | ||
1179 | /* Error checks. */ | ||
1180 | caif_assert(!atomic_read(&sk->sk_wmem_alloc)); | ||
1181 | caif_assert(sk_unhashed(sk)); | ||
1182 | caif_assert(!sk->sk_socket); | ||
1183 | if (!sock_flag(sk, SOCK_DEAD)) { | ||
1184 | pr_debug("CAIF: %s(): 0x%p", __func__, sk); | ||
1185 | return; | ||
1186 | } | ||
1187 | |||
1188 | if (STATE_IS_OPEN(cf_sk)) { | ||
1189 | pr_debug("CAIF: %s(): socket is opened (cf_sk=%p)" | ||
1190 | " file_mode = 0x%x\n", __func__, | ||
1191 | cf_sk, cf_sk->file_mode); | ||
1192 | return; | ||
1193 | } | ||
1194 | drain_queue(cf_sk); | ||
1195 | kfree(cf_sk->pktq); | ||
1196 | |||
1197 | trace_printk("CAIF: %s(): caif_sock_destructor: Removing socket %s\n", | ||
1198 | __func__, cf_sk->name); | ||
1199 | atomic_dec(&caif_nr_socks); | ||
1200 | } | ||
1201 | |||
1202 | static int caif_create(struct net *net, struct socket *sock, int protocol, | ||
1203 | int kern) | ||
1204 | { | ||
1205 | struct sock *sk = NULL; | ||
1206 | struct caifsock *cf_sk = NULL; | ||
1207 | int result = 0; | ||
1208 | static struct proto prot = {.name = "PF_CAIF", | ||
1209 | .owner = THIS_MODULE, | ||
1210 | .obj_size = sizeof(struct caifsock), | ||
1211 | }; | ||
1212 | |||
1213 | /* | ||
1214 | * The sock->type specifies the socket type to use. | ||
1215 | * in SEQPACKET mode packet boundaries are enforced. | ||
1216 | */ | ||
1217 | if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM) | ||
1218 | return -ESOCKTNOSUPPORT; | ||
1219 | |||
1220 | if (net != &init_net) | ||
1221 | return -EAFNOSUPPORT; | ||
1222 | |||
1223 | if (protocol < 0 || protocol >= CAIFPROTO_MAX) | ||
1224 | return -EPROTONOSUPPORT; | ||
1225 | /* | ||
1226 | * Set the socket state to unconnected. The socket state is really | ||
1227 | * not used at all in the net/core or socket.c but the | ||
1228 | * initialization makes sure that sock->state is not uninitialized. | ||
1229 | */ | ||
1230 | sock->state = SS_UNCONNECTED; | ||
1231 | |||
1232 | sk = sk_alloc(net, PF_CAIF, GFP_KERNEL, &prot); | ||
1233 | if (!sk) | ||
1234 | return -ENOMEM; | ||
1235 | |||
1236 | cf_sk = container_of(sk, struct caifsock, sk); | ||
1237 | |||
1238 | /* Store the protocol */ | ||
1239 | sk->sk_protocol = (unsigned char) protocol; | ||
1240 | |||
1241 | spin_lock_init(&cf_sk->read_queue_len_lock); | ||
1242 | |||
1243 | /* Fill in some information concerning the misc socket. */ | ||
1244 | snprintf(cf_sk->name, sizeof(cf_sk->name), "cf_sk%d", | ||
1245 | atomic_read(&caif_nr_socks)); | ||
1246 | |||
1247 | /* | ||
1248 | * Lock in order to try to stop someone from opening the socket | ||
1249 | * too early. | ||
1250 | */ | ||
1251 | lock_sock(&(cf_sk->sk)); | ||
1252 | |||
1253 | /* Initialize the nozero default sock structure data. */ | ||
1254 | sock_init_data(sock, sk); | ||
1255 | sock->ops = &caif_ops; | ||
1256 | sk->sk_destruct = caif_sock_destructor; | ||
1257 | sk->sk_sndbuf = caif_sockbuf_size; | ||
1258 | sk->sk_rcvbuf = caif_sockbuf_size; | ||
1259 | |||
1260 | cf_sk->pktq = cfpktq_create(); | ||
1261 | |||
1262 | if (!cf_sk->pktq) { | ||
1263 | pr_err("CAIF: %s(): queue create failed.\n", __func__); | ||
1264 | result = -ENOMEM; | ||
1265 | release_sock(&cf_sk->sk); | ||
1266 | goto err_failed; | ||
1267 | } | ||
1268 | cf_sk->layer.ctrlcmd = caif_sktflowctrl_cb; | ||
1269 | SET_STATE_CLOSED(cf_sk); | ||
1270 | SET_PENDING_OFF(cf_sk); | ||
1271 | SET_TX_FLOW_OFF(cf_sk); | ||
1272 | SET_RX_FLOW_ON(cf_sk); | ||
1273 | |||
1274 | /* Set default options on configuration */ | ||
1275 | cf_sk->conn_req.priority = CAIF_PRIO_NORMAL; | ||
1276 | cf_sk->conn_req.link_selector = CAIF_LINK_HIGH_BANDW; | ||
1277 | cf_sk->conn_req.protocol = protocol; | ||
1278 | /* Increase the number of sockets created. */ | ||
1279 | atomic_inc(&caif_nr_socks); | ||
1280 | if (!IS_ERR(debugfsdir)) { | ||
1281 | cf_sk->debugfs_socket_dir = | ||
1282 | debugfs_create_dir(cf_sk->name, debugfsdir); | ||
1283 | debugfs_create_u32("conn_state", S_IRUSR | S_IWUSR, | ||
1284 | cf_sk->debugfs_socket_dir, &cf_sk->conn_state); | ||
1285 | debugfs_create_u32("flow_state", S_IRUSR | S_IWUSR, | ||
1286 | cf_sk->debugfs_socket_dir, &cf_sk->flow_state); | ||
1287 | debugfs_create_u32("read_queue_len", S_IRUSR | S_IWUSR, | ||
1288 | cf_sk->debugfs_socket_dir, | ||
1289 | (u32 *) &cf_sk->read_queue_len); | ||
1290 | debugfs_create_u32("identity", S_IRUSR | S_IWUSR, | ||
1291 | cf_sk->debugfs_socket_dir, | ||
1292 | (u32 *) &cf_sk->layer.id); | ||
1293 | } | ||
1294 | release_sock(&cf_sk->sk); | ||
1295 | return 0; | ||
1296 | err_failed: | ||
1297 | sk_free(sk); | ||
1298 | return result; | ||
1299 | } | ||
1300 | |||
1301 | static struct net_proto_family caif_family_ops = { | ||
1302 | .family = PF_CAIF, | ||
1303 | .create = caif_create, | ||
1304 | .owner = THIS_MODULE, | ||
1305 | }; | ||
1306 | |||
1307 | static int af_caif_init(void) | ||
1308 | { | ||
1309 | int err; | ||
1310 | err = sock_register(&caif_family_ops); | ||
1311 | |||
1312 | if (!err) | ||
1313 | return err; | ||
1314 | |||
1315 | return 0; | ||
1316 | } | ||
1317 | |||
1318 | static int __init caif_sktinit_module(void) | ||
1319 | { | ||
1320 | int stat; | ||
1321 | #ifdef CONFIG_DEBUG_FS | ||
1322 | debugfsdir = debugfs_create_dir("chnl_skt", NULL); | ||
1323 | if (!IS_ERR(debugfsdir)) { | ||
1324 | debugfs_create_u32("skb_inuse", S_IRUSR | S_IWUSR, | ||
1325 | debugfsdir, | ||
1326 | (u32 *) &cnt.skb_in_use); | ||
1327 | debugfs_create_u32("skb_alloc", S_IRUSR | S_IWUSR, | ||
1328 | debugfsdir, | ||
1329 | (u32 *) &cnt.skb_alloc); | ||
1330 | debugfs_create_u32("skb_free", S_IRUSR | S_IWUSR, | ||
1331 | debugfsdir, | ||
1332 | (u32 *) &cnt.skb_free); | ||
1333 | debugfs_create_u32("num_sockets", S_IRUSR | S_IWUSR, | ||
1334 | debugfsdir, | ||
1335 | (u32 *) &caif_nr_socks); | ||
1336 | debugfs_create_u32("num_open", S_IRUSR | S_IWUSR, | ||
1337 | debugfsdir, | ||
1338 | (u32 *) &cnt.num_open); | ||
1339 | debugfs_create_u32("num_close", S_IRUSR | S_IWUSR, | ||
1340 | debugfsdir, | ||
1341 | (u32 *) &cnt.num_close); | ||
1342 | debugfs_create_u32("num_init", S_IRUSR | S_IWUSR, | ||
1343 | debugfsdir, | ||
1344 | (u32 *) &cnt.num_init); | ||
1345 | debugfs_create_u32("num_init_resp", S_IRUSR | S_IWUSR, | ||
1346 | debugfsdir, | ||
1347 | (u32 *) &cnt.num_init_resp); | ||
1348 | debugfs_create_u32("num_init_fail_resp", S_IRUSR | S_IWUSR, | ||
1349 | debugfsdir, | ||
1350 | (u32 *) &cnt.num_init_fail_resp); | ||
1351 | debugfs_create_u32("num_deinit", S_IRUSR | S_IWUSR, | ||
1352 | debugfsdir, | ||
1353 | (u32 *) &cnt.num_deinit); | ||
1354 | debugfs_create_u32("num_deinit_resp", S_IRUSR | S_IWUSR, | ||
1355 | debugfsdir, | ||
1356 | (u32 *) &cnt.num_deinit_resp); | ||
1357 | debugfs_create_u32("num_remote_shutdown_ind", | ||
1358 | S_IRUSR | S_IWUSR, debugfsdir, | ||
1359 | (u32 *) &cnt.num_remote_shutdown_ind); | ||
1360 | debugfs_create_u32("num_tx_flow_off_ind", S_IRUSR | S_IWUSR, | ||
1361 | debugfsdir, | ||
1362 | (u32 *) &cnt.num_tx_flow_off_ind); | ||
1363 | debugfs_create_u32("num_tx_flow_on_ind", S_IRUSR | S_IWUSR, | ||
1364 | debugfsdir, | ||
1365 | (u32 *) &cnt.num_tx_flow_on_ind); | ||
1366 | debugfs_create_u32("num_rx_flow_off", S_IRUSR | S_IWUSR, | ||
1367 | debugfsdir, | ||
1368 | (u32 *) &cnt.num_rx_flow_off); | ||
1369 | debugfs_create_u32("num_rx_flow_on", S_IRUSR | S_IWUSR, | ||
1370 | debugfsdir, | ||
1371 | (u32 *) &cnt.num_rx_flow_on); | ||
1372 | } | ||
1373 | #endif | ||
1374 | stat = af_caif_init(); | ||
1375 | if (stat) { | ||
1376 | pr_err("CAIF: %s(): Failed to initialize CAIF socket layer.", | ||
1377 | __func__); | ||
1378 | return stat; | ||
1379 | } | ||
1380 | return 0; | ||
1381 | } | ||
1382 | |||
1383 | static void __exit caif_sktexit_module(void) | ||
1384 | { | ||
1385 | sock_unregister(PF_CAIF); | ||
1386 | if (debugfsdir != NULL) | ||
1387 | debugfs_remove_recursive(debugfsdir); | ||
1388 | } | ||
1389 | |||
1390 | module_init(caif_sktinit_module); | ||
1391 | module_exit(caif_sktexit_module); | ||
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c new file mode 100644 index 000000000000..c873e3d4387c --- /dev/null +++ b/net/caif/cfcnfg.c | |||
@@ -0,0 +1,530 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson AB 2010 | ||
3 | * Author: Sjur Brendeland/sjur.brandeland@stericsson.com | ||
4 | * License terms: GNU General Public License (GPL) version 2 | ||
5 | */ | ||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/stddef.h> | ||
8 | #include <linux/slab.h> | ||
9 | #include <net/caif/caif_layer.h> | ||
10 | #include <net/caif/cfpkt.h> | ||
11 | #include <net/caif/cfcnfg.h> | ||
12 | #include <net/caif/cfctrl.h> | ||
13 | #include <net/caif/cfmuxl.h> | ||
14 | #include <net/caif/cffrml.h> | ||
15 | #include <net/caif/cfserl.h> | ||
16 | #include <net/caif/cfsrvl.h> | ||
17 | |||
18 | #include <linux/module.h> | ||
19 | #include <asm/atomic.h> | ||
20 | |||
21 | #define MAX_PHY_LAYERS 7 | ||
22 | #define PHY_NAME_LEN 20 | ||
23 | |||
24 | #define container_obj(layr) container_of(layr, struct cfcnfg, layer) | ||
25 | |||
26 | /* Information about CAIF physical interfaces held by Config Module in order | ||
27 | * to manage physical interfaces | ||
28 | */ | ||
29 | struct cfcnfg_phyinfo { | ||
30 | /* Pointer to the layer below the MUX (framing layer) */ | ||
31 | struct cflayer *frm_layer; | ||
32 | /* Pointer to the lowest actual physical layer */ | ||
33 | struct cflayer *phy_layer; | ||
34 | /* Unique identifier of the physical interface */ | ||
35 | unsigned int id; | ||
36 | /* Preference of the physical in interface */ | ||
37 | enum cfcnfg_phy_preference pref; | ||
38 | |||
39 | /* Reference count, number of channels using the device */ | ||
40 | int phy_ref_count; | ||
41 | |||
42 | /* Information about the physical device */ | ||
43 | struct dev_info dev_info; | ||
44 | }; | ||
45 | |||
46 | struct cfcnfg { | ||
47 | struct cflayer layer; | ||
48 | struct cflayer *ctrl; | ||
49 | struct cflayer *mux; | ||
50 | u8 last_phyid; | ||
51 | struct cfcnfg_phyinfo phy_layers[MAX_PHY_LAYERS]; | ||
52 | }; | ||
53 | |||
54 | static void cncfg_linkup_rsp(struct cflayer *layer, u8 linkid, | ||
55 | enum cfctrl_srv serv, u8 phyid, | ||
56 | struct cflayer *adapt_layer); | ||
57 | static void cncfg_linkdestroy_rsp(struct cflayer *layer, u8 linkid, | ||
58 | struct cflayer *client_layer); | ||
59 | static void cncfg_reject_rsp(struct cflayer *layer, u8 linkid, | ||
60 | struct cflayer *adapt_layer); | ||
61 | static void cfctrl_resp_func(void); | ||
62 | static void cfctrl_enum_resp(void); | ||
63 | |||
64 | struct cfcnfg *cfcnfg_create(void) | ||
65 | { | ||
66 | struct cfcnfg *this; | ||
67 | struct cfctrl_rsp *resp; | ||
68 | /* Initiate this layer */ | ||
69 | this = kmalloc(sizeof(struct cfcnfg), GFP_ATOMIC); | ||
70 | if (!this) { | ||
71 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
72 | return NULL; | ||
73 | } | ||
74 | memset(this, 0, sizeof(struct cfcnfg)); | ||
75 | this->mux = cfmuxl_create(); | ||
76 | if (!this->mux) | ||
77 | goto out_of_mem; | ||
78 | this->ctrl = cfctrl_create(); | ||
79 | if (!this->ctrl) | ||
80 | goto out_of_mem; | ||
81 | /* Initiate response functions */ | ||
82 | resp = cfctrl_get_respfuncs(this->ctrl); | ||
83 | resp->enum_rsp = cfctrl_enum_resp; | ||
84 | resp->linkerror_ind = cfctrl_resp_func; | ||
85 | resp->linkdestroy_rsp = cncfg_linkdestroy_rsp; | ||
86 | resp->sleep_rsp = cfctrl_resp_func; | ||
87 | resp->wake_rsp = cfctrl_resp_func; | ||
88 | resp->restart_rsp = cfctrl_resp_func; | ||
89 | resp->radioset_rsp = cfctrl_resp_func; | ||
90 | resp->linksetup_rsp = cncfg_linkup_rsp; | ||
91 | resp->reject_rsp = cncfg_reject_rsp; | ||
92 | |||
93 | this->last_phyid = 1; | ||
94 | |||
95 | cfmuxl_set_uplayer(this->mux, this->ctrl, 0); | ||
96 | layer_set_dn(this->ctrl, this->mux); | ||
97 | layer_set_up(this->ctrl, this); | ||
98 | return this; | ||
99 | out_of_mem: | ||
100 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
101 | kfree(this->mux); | ||
102 | kfree(this->ctrl); | ||
103 | kfree(this); | ||
104 | return NULL; | ||
105 | } | ||
106 | EXPORT_SYMBOL(cfcnfg_create); | ||
107 | |||
108 | void cfcnfg_remove(struct cfcnfg *cfg) | ||
109 | { | ||
110 | if (cfg) { | ||
111 | kfree(cfg->mux); | ||
112 | kfree(cfg->ctrl); | ||
113 | kfree(cfg); | ||
114 | } | ||
115 | } | ||
116 | |||
117 | static void cfctrl_resp_func(void) | ||
118 | { | ||
119 | } | ||
120 | |||
121 | static void cfctrl_enum_resp(void) | ||
122 | { | ||
123 | } | ||
124 | |||
125 | struct dev_info *cfcnfg_get_phyid(struct cfcnfg *cnfg, | ||
126 | enum cfcnfg_phy_preference phy_pref) | ||
127 | { | ||
128 | u16 i; | ||
129 | |||
130 | /* Try to match with specified preference */ | ||
131 | for (i = 1; i < MAX_PHY_LAYERS; i++) { | ||
132 | if (cnfg->phy_layers[i].id == i && | ||
133 | cnfg->phy_layers[i].pref == phy_pref && | ||
134 | cnfg->phy_layers[i].frm_layer != NULL) { | ||
135 | caif_assert(cnfg->phy_layers != NULL); | ||
136 | caif_assert(cnfg->phy_layers[i].id == i); | ||
137 | return &cnfg->phy_layers[i].dev_info; | ||
138 | } | ||
139 | } | ||
140 | /* Otherwise just return something */ | ||
141 | for (i = 1; i < MAX_PHY_LAYERS; i++) { | ||
142 | if (cnfg->phy_layers[i].id == i) { | ||
143 | caif_assert(cnfg->phy_layers != NULL); | ||
144 | caif_assert(cnfg->phy_layers[i].id == i); | ||
145 | return &cnfg->phy_layers[i].dev_info; | ||
146 | } | ||
147 | } | ||
148 | |||
149 | return NULL; | ||
150 | } | ||
151 | |||
152 | static struct cfcnfg_phyinfo *cfcnfg_get_phyinfo(struct cfcnfg *cnfg, | ||
153 | u8 phyid) | ||
154 | { | ||
155 | int i; | ||
156 | /* Try to match with specified preference */ | ||
157 | for (i = 0; i < MAX_PHY_LAYERS; i++) | ||
158 | if (cnfg->phy_layers[i].frm_layer != NULL && | ||
159 | cnfg->phy_layers[i].id == phyid) | ||
160 | return &cnfg->phy_layers[i]; | ||
161 | return NULL; | ||
162 | } | ||
163 | |||
164 | int cfcnfg_get_named(struct cfcnfg *cnfg, char *name) | ||
165 | { | ||
166 | int i; | ||
167 | |||
168 | /* Try to match with specified name */ | ||
169 | for (i = 0; i < MAX_PHY_LAYERS; i++) { | ||
170 | if (cnfg->phy_layers[i].frm_layer != NULL | ||
171 | && strcmp(cnfg->phy_layers[i].phy_layer->name, | ||
172 | name) == 0) | ||
173 | return cnfg->phy_layers[i].frm_layer->id; | ||
174 | } | ||
175 | return 0; | ||
176 | } | ||
177 | |||
178 | /* | ||
179 | * NOTE: What happens on destroy failure: | ||
180 | * 1a) No response - Too early | ||
181 | * This will not happen because enumerate has already | ||
182 | * completed. | ||
183 | * 1b) No response - FATAL | ||
184 | * Not handled, but this should be a CAIF PROTOCOL ERROR | ||
185 | * Modem error, response is really expected - this | ||
186 | * case is not really handled. | ||
187 | * 2) O/E-bit indicate error | ||
188 | * Ignored - this link is destroyed anyway. | ||
189 | * 3) Not able to match on request | ||
190 | * Not handled, but this should be a CAIF PROTOCOL ERROR | ||
191 | * 4) Link-Error - (no response) | ||
192 | * Not handled, but this should be a CAIF PROTOCOL ERROR | ||
193 | */ | ||
194 | |||
195 | int cfcnfg_del_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer) | ||
196 | { | ||
197 | u8 channel_id = 0; | ||
198 | int ret = 0; | ||
199 | struct cfcnfg_phyinfo *phyinfo = NULL; | ||
200 | u8 phyid = 0; | ||
201 | |||
202 | caif_assert(adap_layer != NULL); | ||
203 | channel_id = adap_layer->id; | ||
204 | if (channel_id == 0) { | ||
205 | pr_err("CAIF: %s():adap_layer->id is 0\n", __func__); | ||
206 | ret = -ENOTCONN; | ||
207 | goto end; | ||
208 | } | ||
209 | |||
210 | if (adap_layer->dn == NULL) { | ||
211 | pr_err("CAIF: %s():adap_layer->dn is NULL\n", __func__); | ||
212 | ret = -ENODEV; | ||
213 | goto end; | ||
214 | } | ||
215 | |||
216 | if (adap_layer->dn != NULL) | ||
217 | phyid = cfsrvl_getphyid(adap_layer->dn); | ||
218 | |||
219 | phyinfo = cfcnfg_get_phyinfo(cnfg, phyid); | ||
220 | if (phyinfo == NULL) { | ||
221 | pr_warning("CAIF: %s(): No interface to send disconnect to\n", | ||
222 | __func__); | ||
223 | ret = -ENODEV; | ||
224 | goto end; | ||
225 | } | ||
226 | |||
227 | if (phyinfo->id != phyid | ||
228 | || phyinfo->phy_layer->id != phyid | ||
229 | || phyinfo->frm_layer->id != phyid) { | ||
230 | |||
231 | pr_err("CAIF: %s(): Inconsistency in phy registration\n", | ||
232 | __func__); | ||
233 | ret = -EINVAL; | ||
234 | goto end; | ||
235 | } | ||
236 | |||
237 | ret = cfctrl_linkdown_req(cnfg->ctrl, channel_id, adap_layer); | ||
238 | |||
239 | end: | ||
240 | if (phyinfo != NULL && --phyinfo->phy_ref_count == 0 && | ||
241 | phyinfo->phy_layer != NULL && | ||
242 | phyinfo->phy_layer->modemcmd != NULL) { | ||
243 | phyinfo->phy_layer->modemcmd(phyinfo->phy_layer, | ||
244 | _CAIF_MODEMCMD_PHYIF_USELESS); | ||
245 | } | ||
246 | return ret; | ||
247 | |||
248 | } | ||
249 | EXPORT_SYMBOL(cfcnfg_del_adapt_layer); | ||
250 | |||
251 | static void cncfg_linkdestroy_rsp(struct cflayer *layer, u8 linkid, | ||
252 | struct cflayer *client_layer) | ||
253 | { | ||
254 | struct cfcnfg *cnfg = container_obj(layer); | ||
255 | struct cflayer *servl; | ||
256 | |||
257 | /* | ||
258 | * 1) Remove service from the MUX layer. The MUX must | ||
259 | * guarante that no more payload sent "upwards" (receive) | ||
260 | */ | ||
261 | servl = cfmuxl_remove_uplayer(cnfg->mux, linkid); | ||
262 | |||
263 | if (servl == NULL) { | ||
264 | pr_err("CAIF: %s(): PROTOCOL ERROR " | ||
265 | "- Error removing service_layer Linkid(%d)", | ||
266 | __func__, linkid); | ||
267 | return; | ||
268 | } | ||
269 | caif_assert(linkid == servl->id); | ||
270 | |||
271 | if (servl != client_layer && servl->up != client_layer) { | ||
272 | pr_err("CAIF: %s(): Error removing service_layer " | ||
273 | "Linkid(%d) %p %p", | ||
274 | __func__, linkid, (void *) servl, | ||
275 | (void *) client_layer); | ||
276 | return; | ||
277 | } | ||
278 | |||
279 | /* | ||
280 | * 2) DEINIT_RSP must guarantee that no more packets are transmitted | ||
281 | * from client (adap_layer) when it returns. | ||
282 | */ | ||
283 | |||
284 | if (servl->ctrlcmd == NULL) { | ||
285 | pr_err("CAIF: %s(): Error servl->ctrlcmd == NULL", __func__); | ||
286 | return; | ||
287 | } | ||
288 | |||
289 | servl->ctrlcmd(servl, CAIF_CTRLCMD_DEINIT_RSP, 0); | ||
290 | |||
291 | /* 3) It is now safe to destroy the service layer. */ | ||
292 | cfservl_destroy(servl); | ||
293 | } | ||
294 | |||
295 | /* | ||
296 | * NOTE: What happens on linksetup failure: | ||
297 | * 1a) No response - Too early | ||
298 | * This will not happen because enumerate is secured | ||
299 | * before using interface. | ||
300 | * 1b) No response - FATAL | ||
301 | * Not handled, but this should be a CAIF PROTOCOL ERROR | ||
302 | * Modem error, response is really expected - this case is | ||
303 | * not really handled. | ||
304 | * 2) O/E-bit indicate error | ||
305 | * Handled in cnfg_reject_rsp | ||
306 | * 3) Not able to match on request | ||
307 | * Not handled, but this should be a CAIF PROTOCOL ERROR | ||
308 | * 4) Link-Error - (no response) | ||
309 | * Not handled, but this should be a CAIF PROTOCOL ERROR | ||
310 | */ | ||
311 | |||
312 | int | ||
313 | cfcnfg_add_adaptation_layer(struct cfcnfg *cnfg, | ||
314 | struct cfctrl_link_param *param, | ||
315 | struct cflayer *adap_layer) | ||
316 | { | ||
317 | struct cflayer *frml; | ||
318 | if (adap_layer == NULL) { | ||
319 | pr_err("CAIF: %s(): adap_layer is zero", __func__); | ||
320 | return -EINVAL; | ||
321 | } | ||
322 | if (adap_layer->receive == NULL) { | ||
323 | pr_err("CAIF: %s(): adap_layer->receive is NULL", __func__); | ||
324 | return -EINVAL; | ||
325 | } | ||
326 | if (adap_layer->ctrlcmd == NULL) { | ||
327 | pr_err("CAIF: %s(): adap_layer->ctrlcmd == NULL", __func__); | ||
328 | return -EINVAL; | ||
329 | } | ||
330 | frml = cnfg->phy_layers[param->phyid].frm_layer; | ||
331 | if (frml == NULL) { | ||
332 | pr_err("CAIF: %s(): Specified PHY type does not exist!", | ||
333 | __func__); | ||
334 | return -ENODEV; | ||
335 | } | ||
336 | caif_assert(param->phyid == cnfg->phy_layers[param->phyid].id); | ||
337 | caif_assert(cnfg->phy_layers[param->phyid].frm_layer->id == | ||
338 | param->phyid); | ||
339 | caif_assert(cnfg->phy_layers[param->phyid].phy_layer->id == | ||
340 | param->phyid); | ||
341 | /* FIXME: ENUMERATE INITIALLY WHEN ACTIVATING PHYSICAL INTERFACE */ | ||
342 | cfctrl_enum_req(cnfg->ctrl, param->phyid); | ||
343 | cfctrl_linkup_request(cnfg->ctrl, param, adap_layer); | ||
344 | return 0; | ||
345 | } | ||
346 | EXPORT_SYMBOL(cfcnfg_add_adaptation_layer); | ||
347 | |||
348 | static void cncfg_reject_rsp(struct cflayer *layer, u8 linkid, | ||
349 | struct cflayer *adapt_layer) | ||
350 | { | ||
351 | if (adapt_layer != NULL && adapt_layer->ctrlcmd != NULL) | ||
352 | adapt_layer->ctrlcmd(adapt_layer, | ||
353 | CAIF_CTRLCMD_INIT_FAIL_RSP, 0); | ||
354 | } | ||
355 | |||
356 | static void | ||
357 | cncfg_linkup_rsp(struct cflayer *layer, u8 linkid, enum cfctrl_srv serv, | ||
358 | u8 phyid, struct cflayer *adapt_layer) | ||
359 | { | ||
360 | struct cfcnfg *cnfg = container_obj(layer); | ||
361 | struct cflayer *servicel = NULL; | ||
362 | struct cfcnfg_phyinfo *phyinfo; | ||
363 | if (adapt_layer == NULL) { | ||
364 | pr_err("CAIF: %s(): PROTOCOL ERROR " | ||
365 | "- LinkUp Request/Response did not match\n", __func__); | ||
366 | return; | ||
367 | } | ||
368 | |||
369 | caif_assert(cnfg != NULL); | ||
370 | caif_assert(phyid != 0); | ||
371 | phyinfo = &cnfg->phy_layers[phyid]; | ||
372 | caif_assert(phyinfo != NULL); | ||
373 | caif_assert(phyinfo->id == phyid); | ||
374 | caif_assert(phyinfo->phy_layer != NULL); | ||
375 | caif_assert(phyinfo->phy_layer->id == phyid); | ||
376 | |||
377 | if (phyinfo != NULL && | ||
378 | phyinfo->phy_ref_count++ == 0 && | ||
379 | phyinfo->phy_layer != NULL && | ||
380 | phyinfo->phy_layer->modemcmd != NULL) { | ||
381 | caif_assert(phyinfo->phy_layer->id == phyid); | ||
382 | phyinfo->phy_layer->modemcmd(phyinfo->phy_layer, | ||
383 | _CAIF_MODEMCMD_PHYIF_USEFULL); | ||
384 | |||
385 | } | ||
386 | adapt_layer->id = linkid; | ||
387 | |||
388 | switch (serv) { | ||
389 | case CFCTRL_SRV_VEI: | ||
390 | servicel = cfvei_create(linkid, &phyinfo->dev_info); | ||
391 | break; | ||
392 | case CFCTRL_SRV_DATAGRAM: | ||
393 | servicel = cfdgml_create(linkid, &phyinfo->dev_info); | ||
394 | break; | ||
395 | case CFCTRL_SRV_RFM: | ||
396 | servicel = cfrfml_create(linkid, &phyinfo->dev_info); | ||
397 | break; | ||
398 | case CFCTRL_SRV_UTIL: | ||
399 | servicel = cfutill_create(linkid, &phyinfo->dev_info); | ||
400 | break; | ||
401 | case CFCTRL_SRV_VIDEO: | ||
402 | servicel = cfvidl_create(linkid, &phyinfo->dev_info); | ||
403 | break; | ||
404 | case CFCTRL_SRV_DBG: | ||
405 | servicel = cfdbgl_create(linkid, &phyinfo->dev_info); | ||
406 | break; | ||
407 | default: | ||
408 | pr_err("CAIF: %s(): Protocol error. " | ||
409 | "Link setup response - unknown channel type\n", | ||
410 | __func__); | ||
411 | return; | ||
412 | } | ||
413 | if (!servicel) { | ||
414 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
415 | return; | ||
416 | } | ||
417 | layer_set_dn(servicel, cnfg->mux); | ||
418 | cfmuxl_set_uplayer(cnfg->mux, servicel, linkid); | ||
419 | layer_set_up(servicel, adapt_layer); | ||
420 | layer_set_dn(adapt_layer, servicel); | ||
421 | servicel->ctrlcmd(servicel, CAIF_CTRLCMD_INIT_RSP, 0); | ||
422 | } | ||
423 | |||
424 | void | ||
425 | cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type, | ||
426 | void *dev, struct cflayer *phy_layer, u16 *phyid, | ||
427 | enum cfcnfg_phy_preference pref, | ||
428 | bool fcs, bool stx) | ||
429 | { | ||
430 | struct cflayer *frml; | ||
431 | struct cflayer *phy_driver = NULL; | ||
432 | int i; | ||
433 | |||
434 | |||
435 | if (cnfg->phy_layers[cnfg->last_phyid].frm_layer == NULL) { | ||
436 | *phyid = cnfg->last_phyid; | ||
437 | |||
438 | /* range: * 1..(MAX_PHY_LAYERS-1) */ | ||
439 | cnfg->last_phyid = | ||
440 | (cnfg->last_phyid % (MAX_PHY_LAYERS - 1)) + 1; | ||
441 | } else { | ||
442 | *phyid = 0; | ||
443 | for (i = 1; i < MAX_PHY_LAYERS; i++) { | ||
444 | if (cnfg->phy_layers[i].frm_layer == NULL) { | ||
445 | *phyid = i; | ||
446 | break; | ||
447 | } | ||
448 | } | ||
449 | } | ||
450 | if (*phyid == 0) { | ||
451 | pr_err("CAIF: %s(): No Available PHY ID\n", __func__); | ||
452 | return; | ||
453 | } | ||
454 | |||
455 | switch (phy_type) { | ||
456 | case CFPHYTYPE_FRAG: | ||
457 | phy_driver = | ||
458 | cfserl_create(CFPHYTYPE_FRAG, *phyid, stx); | ||
459 | if (!phy_driver) { | ||
460 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
461 | return; | ||
462 | } | ||
463 | |||
464 | break; | ||
465 | case CFPHYTYPE_CAIF: | ||
466 | phy_driver = NULL; | ||
467 | break; | ||
468 | default: | ||
469 | pr_err("CAIF: %s(): %d", __func__, phy_type); | ||
470 | return; | ||
471 | break; | ||
472 | } | ||
473 | |||
474 | phy_layer->id = *phyid; | ||
475 | cnfg->phy_layers[*phyid].pref = pref; | ||
476 | cnfg->phy_layers[*phyid].id = *phyid; | ||
477 | cnfg->phy_layers[*phyid].dev_info.id = *phyid; | ||
478 | cnfg->phy_layers[*phyid].dev_info.dev = dev; | ||
479 | cnfg->phy_layers[*phyid].phy_layer = phy_layer; | ||
480 | cnfg->phy_layers[*phyid].phy_ref_count = 0; | ||
481 | phy_layer->type = phy_type; | ||
482 | frml = cffrml_create(*phyid, fcs); | ||
483 | if (!frml) { | ||
484 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
485 | return; | ||
486 | } | ||
487 | cnfg->phy_layers[*phyid].frm_layer = frml; | ||
488 | cfmuxl_set_dnlayer(cnfg->mux, frml, *phyid); | ||
489 | layer_set_up(frml, cnfg->mux); | ||
490 | |||
491 | if (phy_driver != NULL) { | ||
492 | phy_driver->id = *phyid; | ||
493 | layer_set_dn(frml, phy_driver); | ||
494 | layer_set_up(phy_driver, frml); | ||
495 | layer_set_dn(phy_driver, phy_layer); | ||
496 | layer_set_up(phy_layer, phy_driver); | ||
497 | } else { | ||
498 | layer_set_dn(frml, phy_layer); | ||
499 | layer_set_up(phy_layer, frml); | ||
500 | } | ||
501 | } | ||
502 | EXPORT_SYMBOL(cfcnfg_add_phy_layer); | ||
503 | |||
504 | int cfcnfg_del_phy_layer(struct cfcnfg *cnfg, struct cflayer *phy_layer) | ||
505 | { | ||
506 | struct cflayer *frml, *frml_dn; | ||
507 | u16 phyid; | ||
508 | phyid = phy_layer->id; | ||
509 | caif_assert(phyid == cnfg->phy_layers[phyid].id); | ||
510 | caif_assert(phy_layer == cnfg->phy_layers[phyid].phy_layer); | ||
511 | caif_assert(phy_layer->id == phyid); | ||
512 | caif_assert(cnfg->phy_layers[phyid].frm_layer->id == phyid); | ||
513 | |||
514 | memset(&cnfg->phy_layers[phy_layer->id], 0, | ||
515 | sizeof(struct cfcnfg_phyinfo)); | ||
516 | frml = cfmuxl_remove_dnlayer(cnfg->mux, phy_layer->id); | ||
517 | frml_dn = frml->dn; | ||
518 | cffrml_set_uplayer(frml, NULL); | ||
519 | cffrml_set_dnlayer(frml, NULL); | ||
520 | kfree(frml); | ||
521 | |||
522 | if (phy_layer != frml_dn) { | ||
523 | layer_set_up(frml_dn, NULL); | ||
524 | layer_set_dn(frml_dn, NULL); | ||
525 | kfree(frml_dn); | ||
526 | } | ||
527 | layer_set_up(phy_layer, NULL); | ||
528 | return 0; | ||
529 | } | ||
530 | EXPORT_SYMBOL(cfcnfg_del_phy_layer); | ||
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c new file mode 100644 index 000000000000..11f80140f3cb --- /dev/null +++ b/net/caif/cfctrl.c | |||
@@ -0,0 +1,664 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson AB 2010 | ||
3 | * Author: Sjur Brendeland/sjur.brandeland@stericsson.com | ||
4 | * License terms: GNU General Public License (GPL) version 2 | ||
5 | */ | ||
6 | |||
7 | #include <linux/stddef.h> | ||
8 | #include <linux/spinlock.h> | ||
9 | #include <linux/slab.h> | ||
10 | #include <net/caif/caif_layer.h> | ||
11 | #include <net/caif/cfpkt.h> | ||
12 | #include <net/caif/cfctrl.h> | ||
13 | |||
14 | #define container_obj(layr) container_of(layr, struct cfctrl, serv.layer) | ||
15 | #define UTILITY_NAME_LENGTH 16 | ||
16 | #define CFPKT_CTRL_PKT_LEN 20 | ||
17 | |||
18 | |||
19 | #ifdef CAIF_NO_LOOP | ||
20 | static int handle_loop(struct cfctrl *ctrl, | ||
21 | int cmd, struct cfpkt *pkt){ | ||
22 | return CAIF_FAILURE; | ||
23 | } | ||
24 | #else | ||
25 | static int handle_loop(struct cfctrl *ctrl, | ||
26 | int cmd, struct cfpkt *pkt); | ||
27 | #endif | ||
28 | static int cfctrl_recv(struct cflayer *layr, struct cfpkt *pkt); | ||
29 | static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, | ||
30 | int phyid); | ||
31 | |||
32 | |||
33 | struct cflayer *cfctrl_create(void) | ||
34 | { | ||
35 | struct cfctrl *this = | ||
36 | kmalloc(sizeof(struct cfctrl), GFP_ATOMIC); | ||
37 | if (!this) { | ||
38 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
39 | return NULL; | ||
40 | } | ||
41 | caif_assert(offsetof(struct cfctrl, serv.layer) == 0); | ||
42 | memset(this, 0, sizeof(*this)); | ||
43 | spin_lock_init(&this->info_list_lock); | ||
44 | atomic_set(&this->req_seq_no, 1); | ||
45 | atomic_set(&this->rsp_seq_no, 1); | ||
46 | this->serv.dev_info.id = 0xff; | ||
47 | this->serv.layer.id = 0; | ||
48 | this->serv.layer.receive = cfctrl_recv; | ||
49 | sprintf(this->serv.layer.name, "ctrl"); | ||
50 | this->serv.layer.ctrlcmd = cfctrl_ctrlcmd; | ||
51 | spin_lock_init(&this->loop_linkid_lock); | ||
52 | this->loop_linkid = 1; | ||
53 | return &this->serv.layer; | ||
54 | } | ||
55 | |||
56 | static bool param_eq(struct cfctrl_link_param *p1, struct cfctrl_link_param *p2) | ||
57 | { | ||
58 | bool eq = | ||
59 | p1->linktype == p2->linktype && | ||
60 | p1->priority == p2->priority && | ||
61 | p1->phyid == p2->phyid && | ||
62 | p1->endpoint == p2->endpoint && p1->chtype == p2->chtype; | ||
63 | |||
64 | if (!eq) | ||
65 | return false; | ||
66 | |||
67 | switch (p1->linktype) { | ||
68 | case CFCTRL_SRV_VEI: | ||
69 | return true; | ||
70 | case CFCTRL_SRV_DATAGRAM: | ||
71 | return p1->u.datagram.connid == p2->u.datagram.connid; | ||
72 | case CFCTRL_SRV_RFM: | ||
73 | return | ||
74 | p1->u.rfm.connid == p2->u.rfm.connid && | ||
75 | strcmp(p1->u.rfm.volume, p2->u.rfm.volume) == 0; | ||
76 | case CFCTRL_SRV_UTIL: | ||
77 | return | ||
78 | p1->u.utility.fifosize_kb == p2->u.utility.fifosize_kb | ||
79 | && p1->u.utility.fifosize_bufs == | ||
80 | p2->u.utility.fifosize_bufs | ||
81 | && strcmp(p1->u.utility.name, p2->u.utility.name) == 0 | ||
82 | && p1->u.utility.paramlen == p2->u.utility.paramlen | ||
83 | && memcmp(p1->u.utility.params, p2->u.utility.params, | ||
84 | p1->u.utility.paramlen) == 0; | ||
85 | |||
86 | case CFCTRL_SRV_VIDEO: | ||
87 | return p1->u.video.connid == p2->u.video.connid; | ||
88 | case CFCTRL_SRV_DBG: | ||
89 | return true; | ||
90 | case CFCTRL_SRV_DECM: | ||
91 | return false; | ||
92 | default: | ||
93 | return false; | ||
94 | } | ||
95 | return false; | ||
96 | } | ||
97 | |||
98 | bool cfctrl_req_eq(struct cfctrl_request_info *r1, | ||
99 | struct cfctrl_request_info *r2) | ||
100 | { | ||
101 | if (r1->cmd != r2->cmd) | ||
102 | return false; | ||
103 | if (r1->cmd == CFCTRL_CMD_LINK_SETUP) | ||
104 | return param_eq(&r1->param, &r2->param); | ||
105 | else | ||
106 | return r1->channel_id == r2->channel_id; | ||
107 | } | ||
108 | |||
109 | /* Insert request at the end */ | ||
110 | void cfctrl_insert_req(struct cfctrl *ctrl, | ||
111 | struct cfctrl_request_info *req) | ||
112 | { | ||
113 | struct cfctrl_request_info *p; | ||
114 | spin_lock(&ctrl->info_list_lock); | ||
115 | req->next = NULL; | ||
116 | atomic_inc(&ctrl->req_seq_no); | ||
117 | req->sequence_no = atomic_read(&ctrl->req_seq_no); | ||
118 | if (ctrl->first_req == NULL) { | ||
119 | ctrl->first_req = req; | ||
120 | spin_unlock(&ctrl->info_list_lock); | ||
121 | return; | ||
122 | } | ||
123 | p = ctrl->first_req; | ||
124 | while (p->next != NULL) | ||
125 | p = p->next; | ||
126 | p->next = req; | ||
127 | spin_unlock(&ctrl->info_list_lock); | ||
128 | } | ||
129 | |||
130 | static void cfctrl_insert_req2(struct cfctrl *ctrl, enum cfctrl_cmd cmd, | ||
131 | u8 linkid, struct cflayer *user_layer) | ||
132 | { | ||
133 | struct cfctrl_request_info *req = kmalloc(sizeof(*req), GFP_KERNEL); | ||
134 | if (!req) { | ||
135 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
136 | return; | ||
137 | } | ||
138 | req->client_layer = user_layer; | ||
139 | req->cmd = cmd; | ||
140 | req->channel_id = linkid; | ||
141 | cfctrl_insert_req(ctrl, req); | ||
142 | } | ||
143 | |||
144 | /* Compare and remove request */ | ||
145 | struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl, | ||
146 | struct cfctrl_request_info *req) | ||
147 | { | ||
148 | struct cfctrl_request_info *p; | ||
149 | struct cfctrl_request_info *ret; | ||
150 | |||
151 | spin_lock(&ctrl->info_list_lock); | ||
152 | if (ctrl->first_req == NULL) { | ||
153 | spin_unlock(&ctrl->info_list_lock); | ||
154 | return NULL; | ||
155 | } | ||
156 | |||
157 | if (cfctrl_req_eq(req, ctrl->first_req)) { | ||
158 | ret = ctrl->first_req; | ||
159 | caif_assert(ctrl->first_req); | ||
160 | atomic_set(&ctrl->rsp_seq_no, | ||
161 | ctrl->first_req->sequence_no); | ||
162 | ctrl->first_req = ctrl->first_req->next; | ||
163 | spin_unlock(&ctrl->info_list_lock); | ||
164 | return ret; | ||
165 | } | ||
166 | |||
167 | p = ctrl->first_req; | ||
168 | |||
169 | while (p->next != NULL) { | ||
170 | if (cfctrl_req_eq(req, p->next)) { | ||
171 | pr_warning("CAIF: %s(): Requests are not " | ||
172 | "received in order\n", | ||
173 | __func__); | ||
174 | ret = p->next; | ||
175 | atomic_set(&ctrl->rsp_seq_no, | ||
176 | p->next->sequence_no); | ||
177 | p->next = p->next->next; | ||
178 | spin_unlock(&ctrl->info_list_lock); | ||
179 | return ret; | ||
180 | } | ||
181 | p = p->next; | ||
182 | } | ||
183 | spin_unlock(&ctrl->info_list_lock); | ||
184 | |||
185 | pr_warning("CAIF: %s(): Request does not match\n", | ||
186 | __func__); | ||
187 | return NULL; | ||
188 | } | ||
189 | |||
190 | struct cfctrl_rsp *cfctrl_get_respfuncs(struct cflayer *layer) | ||
191 | { | ||
192 | struct cfctrl *this = container_obj(layer); | ||
193 | return &this->res; | ||
194 | } | ||
195 | |||
196 | void cfctrl_set_dnlayer(struct cflayer *this, struct cflayer *dn) | ||
197 | { | ||
198 | this->dn = dn; | ||
199 | } | ||
200 | |||
201 | void cfctrl_set_uplayer(struct cflayer *this, struct cflayer *up) | ||
202 | { | ||
203 | this->up = up; | ||
204 | } | ||
205 | |||
206 | static void init_info(struct caif_payload_info *info, struct cfctrl *cfctrl) | ||
207 | { | ||
208 | info->hdr_len = 0; | ||
209 | info->channel_id = cfctrl->serv.layer.id; | ||
210 | info->dev_info = &cfctrl->serv.dev_info; | ||
211 | } | ||
212 | |||
213 | void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid) | ||
214 | { | ||
215 | struct cfctrl *cfctrl = container_obj(layer); | ||
216 | int ret; | ||
217 | struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); | ||
218 | if (!pkt) { | ||
219 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
220 | return; | ||
221 | } | ||
222 | caif_assert(offsetof(struct cfctrl, serv.layer) == 0); | ||
223 | init_info(cfpkt_info(pkt), cfctrl); | ||
224 | cfpkt_info(pkt)->dev_info->id = physlinkid; | ||
225 | cfctrl->serv.dev_info.id = physlinkid; | ||
226 | cfpkt_addbdy(pkt, CFCTRL_CMD_ENUM); | ||
227 | cfpkt_addbdy(pkt, physlinkid); | ||
228 | ret = | ||
229 | cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt); | ||
230 | if (ret < 0) { | ||
231 | pr_err("CAIF: %s(): Could not transmit enum message\n", | ||
232 | __func__); | ||
233 | cfpkt_destroy(pkt); | ||
234 | } | ||
235 | } | ||
236 | |||
237 | void cfctrl_linkup_request(struct cflayer *layer, | ||
238 | struct cfctrl_link_param *param, | ||
239 | struct cflayer *user_layer) | ||
240 | { | ||
241 | struct cfctrl *cfctrl = container_obj(layer); | ||
242 | u32 tmp32; | ||
243 | u16 tmp16; | ||
244 | u8 tmp8; | ||
245 | struct cfctrl_request_info *req; | ||
246 | int ret; | ||
247 | char utility_name[16]; | ||
248 | struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); | ||
249 | if (!pkt) { | ||
250 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
251 | return; | ||
252 | } | ||
253 | cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_SETUP); | ||
254 | cfpkt_addbdy(pkt, (param->chtype << 4) + param->linktype); | ||
255 | cfpkt_addbdy(pkt, (param->priority << 3) + param->phyid); | ||
256 | cfpkt_addbdy(pkt, param->endpoint & 0x03); | ||
257 | |||
258 | switch (param->linktype) { | ||
259 | case CFCTRL_SRV_VEI: | ||
260 | break; | ||
261 | case CFCTRL_SRV_VIDEO: | ||
262 | cfpkt_addbdy(pkt, (u8) param->u.video.connid); | ||
263 | break; | ||
264 | case CFCTRL_SRV_DBG: | ||
265 | break; | ||
266 | case CFCTRL_SRV_DATAGRAM: | ||
267 | tmp32 = cpu_to_le32(param->u.datagram.connid); | ||
268 | cfpkt_add_body(pkt, &tmp32, 4); | ||
269 | break; | ||
270 | case CFCTRL_SRV_RFM: | ||
271 | /* Construct a frame, convert DatagramConnectionID to network | ||
272 | * format long and copy it out... | ||
273 | */ | ||
274 | tmp32 = cpu_to_le32(param->u.rfm.connid); | ||
275 | cfpkt_add_body(pkt, &tmp32, 4); | ||
276 | /* Add volume name, including zero termination... */ | ||
277 | cfpkt_add_body(pkt, param->u.rfm.volume, | ||
278 | strlen(param->u.rfm.volume) + 1); | ||
279 | break; | ||
280 | case CFCTRL_SRV_UTIL: | ||
281 | tmp16 = cpu_to_le16(param->u.utility.fifosize_kb); | ||
282 | cfpkt_add_body(pkt, &tmp16, 2); | ||
283 | tmp16 = cpu_to_le16(param->u.utility.fifosize_bufs); | ||
284 | cfpkt_add_body(pkt, &tmp16, 2); | ||
285 | memset(utility_name, 0, sizeof(utility_name)); | ||
286 | strncpy(utility_name, param->u.utility.name, | ||
287 | UTILITY_NAME_LENGTH - 1); | ||
288 | cfpkt_add_body(pkt, utility_name, UTILITY_NAME_LENGTH); | ||
289 | tmp8 = param->u.utility.paramlen; | ||
290 | cfpkt_add_body(pkt, &tmp8, 1); | ||
291 | cfpkt_add_body(pkt, param->u.utility.params, | ||
292 | param->u.utility.paramlen); | ||
293 | break; | ||
294 | default: | ||
295 | pr_warning("CAIF: %s():Request setup of bad link type = %d\n", | ||
296 | __func__, param->linktype); | ||
297 | } | ||
298 | req = kmalloc(sizeof(*req), GFP_KERNEL); | ||
299 | if (!req) { | ||
300 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
301 | return; | ||
302 | } | ||
303 | memset(req, 0, sizeof(*req)); | ||
304 | req->client_layer = user_layer; | ||
305 | req->cmd = CFCTRL_CMD_LINK_SETUP; | ||
306 | req->param = *param; | ||
307 | cfctrl_insert_req(cfctrl, req); | ||
308 | init_info(cfpkt_info(pkt), cfctrl); | ||
309 | cfpkt_info(pkt)->dev_info->id = param->phyid; | ||
310 | ret = | ||
311 | cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt); | ||
312 | if (ret < 0) { | ||
313 | pr_err("CAIF: %s(): Could not transmit linksetup request\n", | ||
314 | __func__); | ||
315 | cfpkt_destroy(pkt); | ||
316 | } | ||
317 | } | ||
318 | |||
319 | int cfctrl_linkdown_req(struct cflayer *layer, u8 channelid, | ||
320 | struct cflayer *client) | ||
321 | { | ||
322 | int ret; | ||
323 | struct cfctrl *cfctrl = container_obj(layer); | ||
324 | struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); | ||
325 | if (!pkt) { | ||
326 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
327 | return -ENOMEM; | ||
328 | } | ||
329 | cfctrl_insert_req2(cfctrl, CFCTRL_CMD_LINK_DESTROY, channelid, client); | ||
330 | cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_DESTROY); | ||
331 | cfpkt_addbdy(pkt, channelid); | ||
332 | init_info(cfpkt_info(pkt), cfctrl); | ||
333 | ret = | ||
334 | cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt); | ||
335 | if (ret < 0) { | ||
336 | pr_err("CAIF: %s(): Could not transmit link-down request\n", | ||
337 | __func__); | ||
338 | cfpkt_destroy(pkt); | ||
339 | } | ||
340 | return ret; | ||
341 | } | ||
342 | |||
343 | void cfctrl_sleep_req(struct cflayer *layer) | ||
344 | { | ||
345 | int ret; | ||
346 | struct cfctrl *cfctrl = container_obj(layer); | ||
347 | struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); | ||
348 | if (!pkt) { | ||
349 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
350 | return; | ||
351 | } | ||
352 | cfpkt_addbdy(pkt, CFCTRL_CMD_SLEEP); | ||
353 | init_info(cfpkt_info(pkt), cfctrl); | ||
354 | ret = | ||
355 | cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt); | ||
356 | if (ret < 0) | ||
357 | cfpkt_destroy(pkt); | ||
358 | } | ||
359 | |||
360 | void cfctrl_wake_req(struct cflayer *layer) | ||
361 | { | ||
362 | int ret; | ||
363 | struct cfctrl *cfctrl = container_obj(layer); | ||
364 | struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); | ||
365 | if (!pkt) { | ||
366 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
367 | return; | ||
368 | } | ||
369 | cfpkt_addbdy(pkt, CFCTRL_CMD_WAKE); | ||
370 | init_info(cfpkt_info(pkt), cfctrl); | ||
371 | ret = | ||
372 | cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt); | ||
373 | if (ret < 0) | ||
374 | cfpkt_destroy(pkt); | ||
375 | } | ||
376 | |||
377 | void cfctrl_getstartreason_req(struct cflayer *layer) | ||
378 | { | ||
379 | int ret; | ||
380 | struct cfctrl *cfctrl = container_obj(layer); | ||
381 | struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); | ||
382 | if (!pkt) { | ||
383 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
384 | return; | ||
385 | } | ||
386 | cfpkt_addbdy(pkt, CFCTRL_CMD_START_REASON); | ||
387 | init_info(cfpkt_info(pkt), cfctrl); | ||
388 | ret = | ||
389 | cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt); | ||
390 | if (ret < 0) | ||
391 | cfpkt_destroy(pkt); | ||
392 | } | ||
393 | |||
394 | |||
395 | static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt) | ||
396 | { | ||
397 | u8 cmdrsp; | ||
398 | u8 cmd; | ||
399 | int ret = -1; | ||
400 | u16 tmp16; | ||
401 | u8 len; | ||
402 | u8 param[255]; | ||
403 | u8 linkid; | ||
404 | struct cfctrl *cfctrl = container_obj(layer); | ||
405 | struct cfctrl_request_info rsp, *req; | ||
406 | |||
407 | |||
408 | cfpkt_extr_head(pkt, &cmdrsp, 1); | ||
409 | cmd = cmdrsp & CFCTRL_CMD_MASK; | ||
410 | if (cmd != CFCTRL_CMD_LINK_ERR | ||
411 | && CFCTRL_RSP_BIT != (CFCTRL_RSP_BIT & cmdrsp)) { | ||
412 | if (handle_loop(cfctrl, cmd, pkt) == CAIF_FAILURE) { | ||
413 | pr_info("CAIF: %s() CAIF Protocol error:" | ||
414 | "Response bit not set\n", __func__); | ||
415 | goto error; | ||
416 | } | ||
417 | } | ||
418 | |||
419 | switch (cmd) { | ||
420 | case CFCTRL_CMD_LINK_SETUP: | ||
421 | { | ||
422 | enum cfctrl_srv serv; | ||
423 | enum cfctrl_srv servtype; | ||
424 | u8 endpoint; | ||
425 | u8 physlinkid; | ||
426 | u8 prio; | ||
427 | u8 tmp; | ||
428 | u32 tmp32; | ||
429 | u8 *cp; | ||
430 | int i; | ||
431 | struct cfctrl_link_param linkparam; | ||
432 | memset(&linkparam, 0, sizeof(linkparam)); | ||
433 | |||
434 | cfpkt_extr_head(pkt, &tmp, 1); | ||
435 | |||
436 | serv = tmp & CFCTRL_SRV_MASK; | ||
437 | linkparam.linktype = serv; | ||
438 | |||
439 | servtype = tmp >> 4; | ||
440 | linkparam.chtype = servtype; | ||
441 | |||
442 | cfpkt_extr_head(pkt, &tmp, 1); | ||
443 | physlinkid = tmp & 0x07; | ||
444 | prio = tmp >> 3; | ||
445 | |||
446 | linkparam.priority = prio; | ||
447 | linkparam.phyid = physlinkid; | ||
448 | cfpkt_extr_head(pkt, &endpoint, 1); | ||
449 | linkparam.endpoint = endpoint & 0x03; | ||
450 | |||
451 | switch (serv) { | ||
452 | case CFCTRL_SRV_VEI: | ||
453 | case CFCTRL_SRV_DBG: | ||
454 | /* Link ID */ | ||
455 | cfpkt_extr_head(pkt, &linkid, 1); | ||
456 | break; | ||
457 | case CFCTRL_SRV_VIDEO: | ||
458 | cfpkt_extr_head(pkt, &tmp, 1); | ||
459 | linkparam.u.video.connid = tmp; | ||
460 | /* Link ID */ | ||
461 | cfpkt_extr_head(pkt, &linkid, 1); | ||
462 | break; | ||
463 | |||
464 | case CFCTRL_SRV_DATAGRAM: | ||
465 | cfpkt_extr_head(pkt, &tmp32, 4); | ||
466 | linkparam.u.datagram.connid = | ||
467 | le32_to_cpu(tmp32); | ||
468 | /* Link ID */ | ||
469 | cfpkt_extr_head(pkt, &linkid, 1); | ||
470 | break; | ||
471 | case CFCTRL_SRV_RFM: | ||
472 | /* Construct a frame, convert | ||
473 | * DatagramConnectionID | ||
474 | * to network format long and copy it out... | ||
475 | */ | ||
476 | cfpkt_extr_head(pkt, &tmp32, 4); | ||
477 | linkparam.u.rfm.connid = | ||
478 | le32_to_cpu(tmp32); | ||
479 | cp = (u8 *) linkparam.u.rfm.volume; | ||
480 | for (cfpkt_extr_head(pkt, &tmp, 1); | ||
481 | cfpkt_more(pkt) && tmp != '\0'; | ||
482 | cfpkt_extr_head(pkt, &tmp, 1)) | ||
483 | *cp++ = tmp; | ||
484 | *cp = '\0'; | ||
485 | |||
486 | /* Link ID */ | ||
487 | cfpkt_extr_head(pkt, &linkid, 1); | ||
488 | |||
489 | break; | ||
490 | case CFCTRL_SRV_UTIL: | ||
491 | /* Construct a frame, convert | ||
492 | * DatagramConnectionID | ||
493 | * to network format long and copy it out... | ||
494 | */ | ||
495 | /* Fifosize KB */ | ||
496 | cfpkt_extr_head(pkt, &tmp16, 2); | ||
497 | linkparam.u.utility.fifosize_kb = | ||
498 | le16_to_cpu(tmp16); | ||
499 | /* Fifosize bufs */ | ||
500 | cfpkt_extr_head(pkt, &tmp16, 2); | ||
501 | linkparam.u.utility.fifosize_bufs = | ||
502 | le16_to_cpu(tmp16); | ||
503 | /* name */ | ||
504 | cp = (u8 *) linkparam.u.utility.name; | ||
505 | caif_assert(sizeof(linkparam.u.utility.name) | ||
506 | >= UTILITY_NAME_LENGTH); | ||
507 | for (i = 0; | ||
508 | i < UTILITY_NAME_LENGTH | ||
509 | && cfpkt_more(pkt); i++) { | ||
510 | cfpkt_extr_head(pkt, &tmp, 1); | ||
511 | *cp++ = tmp; | ||
512 | } | ||
513 | /* Length */ | ||
514 | cfpkt_extr_head(pkt, &len, 1); | ||
515 | linkparam.u.utility.paramlen = len; | ||
516 | /* Param Data */ | ||
517 | cp = linkparam.u.utility.params; | ||
518 | while (cfpkt_more(pkt) && len--) { | ||
519 | cfpkt_extr_head(pkt, &tmp, 1); | ||
520 | *cp++ = tmp; | ||
521 | } | ||
522 | /* Link ID */ | ||
523 | cfpkt_extr_head(pkt, &linkid, 1); | ||
524 | /* Length */ | ||
525 | cfpkt_extr_head(pkt, &len, 1); | ||
526 | /* Param Data */ | ||
527 | cfpkt_extr_head(pkt, ¶m, len); | ||
528 | break; | ||
529 | default: | ||
530 | pr_warning("CAIF: %s(): Request setup " | ||
531 | "- invalid link type (%d)", | ||
532 | __func__, serv); | ||
533 | goto error; | ||
534 | } | ||
535 | |||
536 | rsp.cmd = cmd; | ||
537 | rsp.param = linkparam; | ||
538 | req = cfctrl_remove_req(cfctrl, &rsp); | ||
539 | |||
540 | if (CFCTRL_ERR_BIT == (CFCTRL_ERR_BIT & cmdrsp) || | ||
541 | cfpkt_erroneous(pkt)) { | ||
542 | pr_err("CAIF: %s(): Invalid O/E bit or parse " | ||
543 | "error on CAIF control channel", | ||
544 | __func__); | ||
545 | cfctrl->res.reject_rsp(cfctrl->serv.layer.up, | ||
546 | 0, | ||
547 | req ? req->client_layer | ||
548 | : NULL); | ||
549 | } else { | ||
550 | cfctrl->res.linksetup_rsp(cfctrl->serv. | ||
551 | layer.up, linkid, | ||
552 | serv, physlinkid, | ||
553 | req ? req-> | ||
554 | client_layer : NULL); | ||
555 | } | ||
556 | |||
557 | if (req != NULL) | ||
558 | kfree(req); | ||
559 | } | ||
560 | break; | ||
561 | case CFCTRL_CMD_LINK_DESTROY: | ||
562 | cfpkt_extr_head(pkt, &linkid, 1); | ||
563 | rsp.cmd = cmd; | ||
564 | rsp.channel_id = linkid; | ||
565 | req = cfctrl_remove_req(cfctrl, &rsp); | ||
566 | cfctrl->res.linkdestroy_rsp(cfctrl->serv.layer.up, linkid, | ||
567 | req ? req->client_layer : NULL); | ||
568 | if (req != NULL) | ||
569 | kfree(req); | ||
570 | break; | ||
571 | case CFCTRL_CMD_LINK_ERR: | ||
572 | pr_err("CAIF: %s(): Frame Error Indication received\n", | ||
573 | __func__); | ||
574 | cfctrl->res.linkerror_ind(); | ||
575 | break; | ||
576 | case CFCTRL_CMD_ENUM: | ||
577 | cfctrl->res.enum_rsp(); | ||
578 | break; | ||
579 | case CFCTRL_CMD_SLEEP: | ||
580 | cfctrl->res.sleep_rsp(); | ||
581 | break; | ||
582 | case CFCTRL_CMD_WAKE: | ||
583 | cfctrl->res.wake_rsp(); | ||
584 | break; | ||
585 | case CFCTRL_CMD_LINK_RECONF: | ||
586 | cfctrl->res.restart_rsp(); | ||
587 | break; | ||
588 | case CFCTRL_CMD_RADIO_SET: | ||
589 | cfctrl->res.radioset_rsp(); | ||
590 | break; | ||
591 | default: | ||
592 | pr_err("CAIF: %s(): Unrecognized Control Frame\n", __func__); | ||
593 | goto error; | ||
594 | break; | ||
595 | } | ||
596 | ret = 0; | ||
597 | error: | ||
598 | cfpkt_destroy(pkt); | ||
599 | return ret; | ||
600 | } | ||
601 | |||
602 | static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, | ||
603 | int phyid) | ||
604 | { | ||
605 | struct cfctrl *this = container_obj(layr); | ||
606 | switch (ctrl) { | ||
607 | case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND: | ||
608 | case CAIF_CTRLCMD_FLOW_OFF_IND: | ||
609 | spin_lock(&this->info_list_lock); | ||
610 | if (this->first_req != NULL) { | ||
611 | pr_warning("CAIF: %s(): Received flow off in " | ||
612 | "control layer", __func__); | ||
613 | } | ||
614 | spin_unlock(&this->info_list_lock); | ||
615 | break; | ||
616 | default: | ||
617 | break; | ||
618 | } | ||
619 | } | ||
620 | |||
621 | #ifndef CAIF_NO_LOOP | ||
622 | static int handle_loop(struct cfctrl *ctrl, int cmd, struct cfpkt *pkt) | ||
623 | { | ||
624 | static int last_linkid; | ||
625 | u8 linkid, linktype, tmp; | ||
626 | switch (cmd) { | ||
627 | case CFCTRL_CMD_LINK_SETUP: | ||
628 | spin_lock(&ctrl->loop_linkid_lock); | ||
629 | for (linkid = last_linkid + 1; linkid < 255; linkid++) | ||
630 | if (!ctrl->loop_linkused[linkid]) | ||
631 | goto found; | ||
632 | for (linkid = last_linkid - 1; linkid > 0; linkid--) | ||
633 | if (!ctrl->loop_linkused[linkid]) | ||
634 | goto found; | ||
635 | spin_unlock(&ctrl->loop_linkid_lock); | ||
636 | return -EINVAL; | ||
637 | found: | ||
638 | if (!ctrl->loop_linkused[linkid]) | ||
639 | ctrl->loop_linkused[linkid] = 1; | ||
640 | |||
641 | last_linkid = linkid; | ||
642 | |||
643 | cfpkt_add_trail(pkt, &linkid, 1); | ||
644 | spin_unlock(&ctrl->loop_linkid_lock); | ||
645 | cfpkt_peek_head(pkt, &linktype, 1); | ||
646 | if (linktype == CFCTRL_SRV_UTIL) { | ||
647 | tmp = 0x01; | ||
648 | cfpkt_add_trail(pkt, &tmp, 1); | ||
649 | cfpkt_add_trail(pkt, &tmp, 1); | ||
650 | } | ||
651 | break; | ||
652 | |||
653 | case CFCTRL_CMD_LINK_DESTROY: | ||
654 | spin_lock(&ctrl->loop_linkid_lock); | ||
655 | cfpkt_peek_head(pkt, &linkid, 1); | ||
656 | ctrl->loop_linkused[linkid] = 0; | ||
657 | spin_unlock(&ctrl->loop_linkid_lock); | ||
658 | break; | ||
659 | default: | ||
660 | break; | ||
661 | } | ||
662 | return CAIF_SUCCESS; | ||
663 | } | ||
664 | #endif | ||
diff --git a/net/caif/cfdbgl.c b/net/caif/cfdbgl.c new file mode 100644 index 000000000000..ab6b6dc34cf8 --- /dev/null +++ b/net/caif/cfdbgl.c | |||
@@ -0,0 +1,40 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson AB 2010 | ||
3 | * Author: Sjur Brendeland/sjur.brandeland@stericsson.com | ||
4 | * License terms: GNU General Public License (GPL) version 2 | ||
5 | */ | ||
6 | |||
7 | #include <linux/stddef.h> | ||
8 | #include <linux/slab.h> | ||
9 | #include <net/caif/caif_layer.h> | ||
10 | #include <net/caif/cfsrvl.h> | ||
11 | #include <net/caif/cfpkt.h> | ||
12 | |||
13 | static int cfdbgl_receive(struct cflayer *layr, struct cfpkt *pkt); | ||
14 | static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt); | ||
15 | |||
16 | struct cflayer *cfdbgl_create(u8 channel_id, struct dev_info *dev_info) | ||
17 | { | ||
18 | struct cfsrvl *dbg = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC); | ||
19 | if (!dbg) { | ||
20 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
21 | return NULL; | ||
22 | } | ||
23 | caif_assert(offsetof(struct cfsrvl, layer) == 0); | ||
24 | memset(dbg, 0, sizeof(struct cfsrvl)); | ||
25 | cfsrvl_init(dbg, channel_id, dev_info); | ||
26 | dbg->layer.receive = cfdbgl_receive; | ||
27 | dbg->layer.transmit = cfdbgl_transmit; | ||
28 | snprintf(dbg->layer.name, CAIF_LAYER_NAME_SZ - 1, "dbg%d", channel_id); | ||
29 | return &dbg->layer; | ||
30 | } | ||
31 | |||
32 | static int cfdbgl_receive(struct cflayer *layr, struct cfpkt *pkt) | ||
33 | { | ||
34 | return layr->up->receive(layr->up, pkt); | ||
35 | } | ||
36 | |||
37 | static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt) | ||
38 | { | ||
39 | return layr->dn->transmit(layr->dn, pkt); | ||
40 | } | ||
diff --git a/net/caif/cfdgml.c b/net/caif/cfdgml.c new file mode 100644 index 000000000000..53194840ecb6 --- /dev/null +++ b/net/caif/cfdgml.c | |||
@@ -0,0 +1,108 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson AB 2010 | ||
3 | * Author: Sjur Brendeland/sjur.brandeland@stericsson.com | ||
4 | * License terms: GNU General Public License (GPL) version 2 | ||
5 | */ | ||
6 | |||
7 | #include <linux/stddef.h> | ||
8 | #include <linux/spinlock.h> | ||
9 | #include <linux/slab.h> | ||
10 | #include <net/caif/caif_layer.h> | ||
11 | #include <net/caif/cfsrvl.h> | ||
12 | #include <net/caif/cfpkt.h> | ||
13 | |||
14 | #define container_obj(layr) ((struct cfsrvl *) layr) | ||
15 | |||
16 | #define DGM_CMD_BIT 0x80 | ||
17 | #define DGM_FLOW_OFF 0x81 | ||
18 | #define DGM_FLOW_ON 0x80 | ||
19 | #define DGM_CTRL_PKT_SIZE 1 | ||
20 | |||
21 | static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt); | ||
22 | static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt); | ||
23 | |||
24 | struct cflayer *cfdgml_create(u8 channel_id, struct dev_info *dev_info) | ||
25 | { | ||
26 | struct cfsrvl *dgm = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC); | ||
27 | if (!dgm) { | ||
28 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
29 | return NULL; | ||
30 | } | ||
31 | caif_assert(offsetof(struct cfsrvl, layer) == 0); | ||
32 | memset(dgm, 0, sizeof(struct cfsrvl)); | ||
33 | cfsrvl_init(dgm, channel_id, dev_info); | ||
34 | dgm->layer.receive = cfdgml_receive; | ||
35 | dgm->layer.transmit = cfdgml_transmit; | ||
36 | snprintf(dgm->layer.name, CAIF_LAYER_NAME_SZ - 1, "dgm%d", channel_id); | ||
37 | dgm->layer.name[CAIF_LAYER_NAME_SZ - 1] = '\0'; | ||
38 | return &dgm->layer; | ||
39 | } | ||
40 | |||
41 | static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt) | ||
42 | { | ||
43 | u8 cmd = -1; | ||
44 | u8 dgmhdr[3]; | ||
45 | int ret; | ||
46 | caif_assert(layr->up != NULL); | ||
47 | caif_assert(layr->receive != NULL); | ||
48 | caif_assert(layr->ctrlcmd != NULL); | ||
49 | |||
50 | if (cfpkt_extr_head(pkt, &cmd, 1) < 0) { | ||
51 | pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); | ||
52 | cfpkt_destroy(pkt); | ||
53 | return -EPROTO; | ||
54 | } | ||
55 | |||
56 | if ((cmd & DGM_CMD_BIT) == 0) { | ||
57 | if (cfpkt_extr_head(pkt, &dgmhdr, 3) < 0) { | ||
58 | pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); | ||
59 | cfpkt_destroy(pkt); | ||
60 | return -EPROTO; | ||
61 | } | ||
62 | ret = layr->up->receive(layr->up, pkt); | ||
63 | return ret; | ||
64 | } | ||
65 | |||
66 | switch (cmd) { | ||
67 | case DGM_FLOW_OFF: /* FLOW OFF */ | ||
68 | layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_OFF_IND, 0); | ||
69 | cfpkt_destroy(pkt); | ||
70 | return 0; | ||
71 | case DGM_FLOW_ON: /* FLOW ON */ | ||
72 | layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_ON_IND, 0); | ||
73 | cfpkt_destroy(pkt); | ||
74 | return 0; | ||
75 | default: | ||
76 | cfpkt_destroy(pkt); | ||
77 | pr_info("CAIF: %s(): Unknown datagram control %d (0x%x)\n", | ||
78 | __func__, cmd, cmd); | ||
79 | return -EPROTO; | ||
80 | } | ||
81 | } | ||
82 | |||
83 | static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt) | ||
84 | { | ||
85 | u32 zero = 0; | ||
86 | struct caif_payload_info *info; | ||
87 | struct cfsrvl *service = container_obj(layr); | ||
88 | int ret; | ||
89 | if (!cfsrvl_ready(service, &ret)) | ||
90 | return ret; | ||
91 | |||
92 | cfpkt_add_head(pkt, &zero, 4); | ||
93 | |||
94 | /* Add info for MUX-layer to route the packet out. */ | ||
95 | info = cfpkt_info(pkt); | ||
96 | info->channel_id = service->layer.id; | ||
97 | /* To optimize alignment, we add up the size of CAIF header | ||
98 | * before payload. | ||
99 | */ | ||
100 | info->hdr_len = 4; | ||
101 | info->dev_info = &service->dev_info; | ||
102 | ret = layr->dn->transmit(layr->dn, pkt); | ||
103 | if (ret < 0) { | ||
104 | u32 tmp32; | ||
105 | cfpkt_extr_head(pkt, &tmp32, 4); | ||
106 | } | ||
107 | return ret; | ||
108 | } | ||
diff --git a/net/caif/cffrml.c b/net/caif/cffrml.c new file mode 100644 index 000000000000..e86a4ca3b217 --- /dev/null +++ b/net/caif/cffrml.c | |||
@@ -0,0 +1,151 @@ | |||
1 | /* | ||
2 | * CAIF Framing Layer. | ||
3 | * | ||
4 | * Copyright (C) ST-Ericsson AB 2010 | ||
5 | * Author: Sjur Brendeland/sjur.brandeland@stericsson.com | ||
6 | * License terms: GNU General Public License (GPL) version 2 | ||
7 | */ | ||
8 | |||
9 | #include <linux/stddef.h> | ||
10 | #include <linux/spinlock.h> | ||
11 | #include <linux/slab.h> | ||
12 | #include <linux/crc-ccitt.h> | ||
13 | #include <net/caif/caif_layer.h> | ||
14 | #include <net/caif/cfpkt.h> | ||
15 | #include <net/caif/cffrml.h> | ||
16 | |||
17 | #define container_obj(layr) container_of(layr, struct cffrml, layer) | ||
18 | |||
19 | struct cffrml { | ||
20 | struct cflayer layer; | ||
21 | bool dofcs; /* !< FCS active */ | ||
22 | }; | ||
23 | |||
24 | static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt); | ||
25 | static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt); | ||
26 | static void cffrml_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, | ||
27 | int phyid); | ||
28 | |||
29 | static u32 cffrml_rcv_error; | ||
30 | static u32 cffrml_rcv_checsum_error; | ||
31 | struct cflayer *cffrml_create(u16 phyid, bool use_fcs) | ||
32 | { | ||
33 | struct cffrml *this = kmalloc(sizeof(struct cffrml), GFP_ATOMIC); | ||
34 | if (!this) { | ||
35 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
36 | return NULL; | ||
37 | } | ||
38 | caif_assert(offsetof(struct cffrml, layer) == 0); | ||
39 | |||
40 | memset(this, 0, sizeof(struct cflayer)); | ||
41 | this->layer.receive = cffrml_receive; | ||
42 | this->layer.transmit = cffrml_transmit; | ||
43 | this->layer.ctrlcmd = cffrml_ctrlcmd; | ||
44 | snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "frm%d", phyid); | ||
45 | this->dofcs = use_fcs; | ||
46 | this->layer.id = phyid; | ||
47 | return (struct cflayer *) this; | ||
48 | } | ||
49 | |||
50 | void cffrml_set_uplayer(struct cflayer *this, struct cflayer *up) | ||
51 | { | ||
52 | this->up = up; | ||
53 | } | ||
54 | |||
55 | void cffrml_set_dnlayer(struct cflayer *this, struct cflayer *dn) | ||
56 | { | ||
57 | this->dn = dn; | ||
58 | } | ||
59 | |||
60 | static u16 cffrml_checksum(u16 chks, void *buf, u16 len) | ||
61 | { | ||
62 | /* FIXME: FCS should be moved to glue in order to use OS-Specific | ||
63 | * solutions | ||
64 | */ | ||
65 | return crc_ccitt(chks, buf, len); | ||
66 | } | ||
67 | |||
68 | static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt) | ||
69 | { | ||
70 | u16 tmp; | ||
71 | u16 len; | ||
72 | u16 hdrchks; | ||
73 | u16 pktchks; | ||
74 | struct cffrml *this; | ||
75 | this = container_obj(layr); | ||
76 | |||
77 | cfpkt_extr_head(pkt, &tmp, 2); | ||
78 | len = le16_to_cpu(tmp); | ||
79 | |||
80 | /* Subtract for FCS on length if FCS is not used. */ | ||
81 | if (!this->dofcs) | ||
82 | len -= 2; | ||
83 | |||
84 | if (cfpkt_setlen(pkt, len) < 0) { | ||
85 | ++cffrml_rcv_error; | ||
86 | pr_err("CAIF: %s():Framing length error (%d)\n", __func__, len); | ||
87 | cfpkt_destroy(pkt); | ||
88 | return -EPROTO; | ||
89 | } | ||
90 | /* | ||
91 | * Don't do extract if FCS is false, rather do setlen - then we don't | ||
92 | * get a cache-miss. | ||
93 | */ | ||
94 | if (this->dofcs) { | ||
95 | cfpkt_extr_trail(pkt, &tmp, 2); | ||
96 | hdrchks = le16_to_cpu(tmp); | ||
97 | pktchks = cfpkt_iterate(pkt, cffrml_checksum, 0xffff); | ||
98 | if (pktchks != hdrchks) { | ||
99 | cfpkt_add_trail(pkt, &tmp, 2); | ||
100 | ++cffrml_rcv_error; | ||
101 | ++cffrml_rcv_checsum_error; | ||
102 | pr_info("CAIF: %s(): Frame checksum error " | ||
103 | "(0x%x != 0x%x)\n", __func__, hdrchks, pktchks); | ||
104 | return -EILSEQ; | ||
105 | } | ||
106 | } | ||
107 | if (cfpkt_erroneous(pkt)) { | ||
108 | ++cffrml_rcv_error; | ||
109 | pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); | ||
110 | cfpkt_destroy(pkt); | ||
111 | return -EPROTO; | ||
112 | } | ||
113 | return layr->up->receive(layr->up, pkt); | ||
114 | } | ||
115 | |||
116 | static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt) | ||
117 | { | ||
118 | int tmp; | ||
119 | u16 chks; | ||
120 | u16 len; | ||
121 | int ret; | ||
122 | struct cffrml *this = container_obj(layr); | ||
123 | if (this->dofcs) { | ||
124 | chks = cfpkt_iterate(pkt, cffrml_checksum, 0xffff); | ||
125 | tmp = cpu_to_le16(chks); | ||
126 | cfpkt_add_trail(pkt, &tmp, 2); | ||
127 | } else { | ||
128 | cfpkt_pad_trail(pkt, 2); | ||
129 | } | ||
130 | len = cfpkt_getlen(pkt); | ||
131 | tmp = cpu_to_le16(len); | ||
132 | cfpkt_add_head(pkt, &tmp, 2); | ||
133 | cfpkt_info(pkt)->hdr_len += 2; | ||
134 | if (cfpkt_erroneous(pkt)) { | ||
135 | pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); | ||
136 | return -EPROTO; | ||
137 | } | ||
138 | ret = layr->dn->transmit(layr->dn, pkt); | ||
139 | if (ret < 0) { | ||
140 | /* Remove header on faulty packet. */ | ||
141 | cfpkt_extr_head(pkt, &tmp, 2); | ||
142 | } | ||
143 | return ret; | ||
144 | } | ||
145 | |||
146 | static void cffrml_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, | ||
147 | int phyid) | ||
148 | { | ||
149 | if (layr->up->ctrlcmd) | ||
150 | layr->up->ctrlcmd(layr->up, ctrl, layr->id); | ||
151 | } | ||
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c new file mode 100644 index 000000000000..6fb9f9e96cf8 --- /dev/null +++ b/net/caif/cfmuxl.c | |||
@@ -0,0 +1,246 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson AB 2010 | ||
3 | * Author: Sjur Brendeland/sjur.brandeland@stericsson.com | ||
4 | * License terms: GNU General Public License (GPL) version 2 | ||
5 | */ | ||
6 | #include <linux/stddef.h> | ||
7 | #include <linux/spinlock.h> | ||
8 | #include <linux/slab.h> | ||
9 | #include <net/caif/cfpkt.h> | ||
10 | #include <net/caif/cfmuxl.h> | ||
11 | #include <net/caif/cfsrvl.h> | ||
12 | #include <net/caif/cffrml.h> | ||
13 | |||
14 | #define container_obj(layr) container_of(layr, struct cfmuxl, layer) | ||
15 | |||
16 | #define CAIF_CTRL_CHANNEL 0 | ||
17 | #define UP_CACHE_SIZE 8 | ||
18 | #define DN_CACHE_SIZE 8 | ||
19 | |||
20 | struct cfmuxl { | ||
21 | struct cflayer layer; | ||
22 | struct list_head srvl_list; | ||
23 | struct list_head frml_list; | ||
24 | struct cflayer *up_cache[UP_CACHE_SIZE]; | ||
25 | struct cflayer *dn_cache[DN_CACHE_SIZE]; | ||
26 | /* | ||
27 | * Set when inserting or removing downwards layers. | ||
28 | */ | ||
29 | spinlock_t transmit_lock; | ||
30 | |||
31 | /* | ||
32 | * Set when inserting or removing upwards layers. | ||
33 | */ | ||
34 | spinlock_t receive_lock; | ||
35 | |||
36 | }; | ||
37 | |||
38 | static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt); | ||
39 | static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt); | ||
40 | static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, | ||
41 | int phyid); | ||
42 | static struct cflayer *get_up(struct cfmuxl *muxl, u16 id); | ||
43 | |||
44 | struct cflayer *cfmuxl_create(void) | ||
45 | { | ||
46 | struct cfmuxl *this = kmalloc(sizeof(struct cfmuxl), GFP_ATOMIC); | ||
47 | if (!this) | ||
48 | return NULL; | ||
49 | memset(this, 0, sizeof(*this)); | ||
50 | this->layer.receive = cfmuxl_receive; | ||
51 | this->layer.transmit = cfmuxl_transmit; | ||
52 | this->layer.ctrlcmd = cfmuxl_ctrlcmd; | ||
53 | INIT_LIST_HEAD(&this->srvl_list); | ||
54 | INIT_LIST_HEAD(&this->frml_list); | ||
55 | spin_lock_init(&this->transmit_lock); | ||
56 | spin_lock_init(&this->receive_lock); | ||
57 | snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "mux"); | ||
58 | return &this->layer; | ||
59 | } | ||
60 | |||
61 | int cfmuxl_set_uplayer(struct cflayer *layr, struct cflayer *up, u8 linkid) | ||
62 | { | ||
63 | struct cfmuxl *muxl = container_obj(layr); | ||
64 | spin_lock(&muxl->receive_lock); | ||
65 | list_add(&up->node, &muxl->srvl_list); | ||
66 | spin_unlock(&muxl->receive_lock); | ||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | bool cfmuxl_is_phy_inuse(struct cflayer *layr, u8 phyid) | ||
71 | { | ||
72 | struct list_head *node; | ||
73 | struct cflayer *layer; | ||
74 | struct cfmuxl *muxl = container_obj(layr); | ||
75 | bool match = false; | ||
76 | spin_lock(&muxl->receive_lock); | ||
77 | |||
78 | list_for_each(node, &muxl->srvl_list) { | ||
79 | layer = list_entry(node, struct cflayer, node); | ||
80 | if (cfsrvl_phyid_match(layer, phyid)) { | ||
81 | match = true; | ||
82 | break; | ||
83 | } | ||
84 | |||
85 | } | ||
86 | spin_unlock(&muxl->receive_lock); | ||
87 | return match; | ||
88 | } | ||
89 | |||
90 | u8 cfmuxl_get_phyid(struct cflayer *layr, u8 channel_id) | ||
91 | { | ||
92 | struct cflayer *up; | ||
93 | int phyid; | ||
94 | struct cfmuxl *muxl = container_obj(layr); | ||
95 | spin_lock(&muxl->receive_lock); | ||
96 | up = get_up(muxl, channel_id); | ||
97 | if (up != NULL) | ||
98 | phyid = cfsrvl_getphyid(up); | ||
99 | else | ||
100 | phyid = 0; | ||
101 | spin_unlock(&muxl->receive_lock); | ||
102 | return phyid; | ||
103 | } | ||
104 | |||
105 | int cfmuxl_set_dnlayer(struct cflayer *layr, struct cflayer *dn, u8 phyid) | ||
106 | { | ||
107 | struct cfmuxl *muxl = (struct cfmuxl *) layr; | ||
108 | spin_lock(&muxl->transmit_lock); | ||
109 | list_add(&dn->node, &muxl->frml_list); | ||
110 | spin_unlock(&muxl->transmit_lock); | ||
111 | return 0; | ||
112 | } | ||
113 | |||
114 | static struct cflayer *get_from_id(struct list_head *list, u16 id) | ||
115 | { | ||
116 | struct list_head *node; | ||
117 | struct cflayer *layer; | ||
118 | list_for_each(node, list) { | ||
119 | layer = list_entry(node, struct cflayer, node); | ||
120 | if (layer->id == id) | ||
121 | return layer; | ||
122 | } | ||
123 | return NULL; | ||
124 | } | ||
125 | |||
126 | struct cflayer *cfmuxl_remove_dnlayer(struct cflayer *layr, u8 phyid) | ||
127 | { | ||
128 | struct cfmuxl *muxl = container_obj(layr); | ||
129 | struct cflayer *dn; | ||
130 | spin_lock(&muxl->transmit_lock); | ||
131 | memset(muxl->dn_cache, 0, sizeof(muxl->dn_cache)); | ||
132 | dn = get_from_id(&muxl->frml_list, phyid); | ||
133 | if (dn == NULL) { | ||
134 | spin_unlock(&muxl->transmit_lock); | ||
135 | return NULL; | ||
136 | } | ||
137 | list_del(&dn->node); | ||
138 | caif_assert(dn != NULL); | ||
139 | spin_unlock(&muxl->transmit_lock); | ||
140 | return dn; | ||
141 | } | ||
142 | |||
143 | /* Invariant: lock is taken */ | ||
144 | static struct cflayer *get_up(struct cfmuxl *muxl, u16 id) | ||
145 | { | ||
146 | struct cflayer *up; | ||
147 | int idx = id % UP_CACHE_SIZE; | ||
148 | up = muxl->up_cache[idx]; | ||
149 | if (up == NULL || up->id != id) { | ||
150 | up = get_from_id(&muxl->srvl_list, id); | ||
151 | muxl->up_cache[idx] = up; | ||
152 | } | ||
153 | return up; | ||
154 | } | ||
155 | |||
156 | /* Invariant: lock is taken */ | ||
157 | static struct cflayer *get_dn(struct cfmuxl *muxl, struct dev_info *dev_info) | ||
158 | { | ||
159 | struct cflayer *dn; | ||
160 | int idx = dev_info->id % DN_CACHE_SIZE; | ||
161 | dn = muxl->dn_cache[idx]; | ||
162 | if (dn == NULL || dn->id != dev_info->id) { | ||
163 | dn = get_from_id(&muxl->frml_list, dev_info->id); | ||
164 | muxl->dn_cache[idx] = dn; | ||
165 | } | ||
166 | return dn; | ||
167 | } | ||
168 | |||
169 | struct cflayer *cfmuxl_remove_uplayer(struct cflayer *layr, u8 id) | ||
170 | { | ||
171 | struct cflayer *up; | ||
172 | struct cfmuxl *muxl = container_obj(layr); | ||
173 | spin_lock(&muxl->receive_lock); | ||
174 | up = get_up(muxl, id); | ||
175 | memset(muxl->up_cache, 0, sizeof(muxl->up_cache)); | ||
176 | list_del(&up->node); | ||
177 | spin_unlock(&muxl->receive_lock); | ||
178 | return up; | ||
179 | } | ||
180 | |||
181 | static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt) | ||
182 | { | ||
183 | int ret; | ||
184 | struct cfmuxl *muxl = container_obj(layr); | ||
185 | u8 id; | ||
186 | struct cflayer *up; | ||
187 | if (cfpkt_extr_head(pkt, &id, 1) < 0) { | ||
188 | pr_err("CAIF: %s(): erroneous Caif Packet\n", __func__); | ||
189 | cfpkt_destroy(pkt); | ||
190 | return -EPROTO; | ||
191 | } | ||
192 | |||
193 | spin_lock(&muxl->receive_lock); | ||
194 | up = get_up(muxl, id); | ||
195 | spin_unlock(&muxl->receive_lock); | ||
196 | if (up == NULL) { | ||
197 | pr_info("CAIF: %s():Received data on unknown link ID = %d " | ||
198 | "(0x%x) up == NULL", __func__, id, id); | ||
199 | cfpkt_destroy(pkt); | ||
200 | /* | ||
201 | * Don't return ERROR, since modem misbehaves and sends out | ||
202 | * flow on before linksetup response. | ||
203 | */ | ||
204 | return /* CFGLU_EPROT; */ 0; | ||
205 | } | ||
206 | |||
207 | ret = up->receive(up, pkt); | ||
208 | return ret; | ||
209 | } | ||
210 | |||
211 | static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt) | ||
212 | { | ||
213 | int ret; | ||
214 | struct cfmuxl *muxl = container_obj(layr); | ||
215 | u8 linkid; | ||
216 | struct cflayer *dn; | ||
217 | struct caif_payload_info *info = cfpkt_info(pkt); | ||
218 | dn = get_dn(muxl, cfpkt_info(pkt)->dev_info); | ||
219 | if (dn == NULL) { | ||
220 | pr_warning("CAIF: %s(): Send data on unknown phy " | ||
221 | "ID = %d (0x%x)\n", | ||
222 | __func__, info->dev_info->id, info->dev_info->id); | ||
223 | return -ENOTCONN; | ||
224 | } | ||
225 | info->hdr_len += 1; | ||
226 | linkid = info->channel_id; | ||
227 | cfpkt_add_head(pkt, &linkid, 1); | ||
228 | ret = dn->transmit(dn, pkt); | ||
229 | /* Remove MUX protocol header upon error. */ | ||
230 | if (ret < 0) | ||
231 | cfpkt_extr_head(pkt, &linkid, 1); | ||
232 | return ret; | ||
233 | } | ||
234 | |||
235 | static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, | ||
236 | int phyid) | ||
237 | { | ||
238 | struct cfmuxl *muxl = container_obj(layr); | ||
239 | struct list_head *node; | ||
240 | struct cflayer *layer; | ||
241 | list_for_each(node, &muxl->srvl_list) { | ||
242 | layer = list_entry(node, struct cflayer, node); | ||
243 | if (cfsrvl_phyid_match(layer, phyid)) | ||
244 | layer->ctrlcmd(layer, ctrl, phyid); | ||
245 | } | ||
246 | } | ||
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c new file mode 100644 index 000000000000..83fff2ff6658 --- /dev/null +++ b/net/caif/cfpkt_skbuff.c | |||
@@ -0,0 +1,571 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson AB 2010 | ||
3 | * Author: Sjur Brendeland/sjur.brandeland@stericsson.com | ||
4 | * License terms: GNU General Public License (GPL) version 2 | ||
5 | */ | ||
6 | |||
7 | #include <linux/string.h> | ||
8 | #include <linux/skbuff.h> | ||
9 | #include <linux/hardirq.h> | ||
10 | #include <net/caif/cfpkt.h> | ||
11 | |||
12 | #define PKT_PREFIX CAIF_NEEDED_HEADROOM | ||
13 | #define PKT_POSTFIX CAIF_NEEDED_TAILROOM | ||
14 | #define PKT_LEN_WHEN_EXTENDING 128 | ||
15 | #define PKT_ERROR(pkt, errmsg) do { \ | ||
16 | cfpkt_priv(pkt)->erronous = true; \ | ||
17 | skb_reset_tail_pointer(&pkt->skb); \ | ||
18 | pr_warning("CAIF: " errmsg);\ | ||
19 | } while (0) | ||
20 | |||
21 | struct cfpktq { | ||
22 | struct sk_buff_head head; | ||
23 | atomic_t count; | ||
24 | /* Lock protects count updates */ | ||
25 | spinlock_t lock; | ||
26 | }; | ||
27 | |||
28 | /* | ||
29 | * net/caif/ is generic and does not | ||
30 | * understand SKB, so we do this typecast | ||
31 | */ | ||
32 | struct cfpkt { | ||
33 | struct sk_buff skb; | ||
34 | }; | ||
35 | |||
36 | /* Private data inside SKB */ | ||
37 | struct cfpkt_priv_data { | ||
38 | struct dev_info dev_info; | ||
39 | bool erronous; | ||
40 | }; | ||
41 | |||
42 | inline struct cfpkt_priv_data *cfpkt_priv(struct cfpkt *pkt) | ||
43 | { | ||
44 | return (struct cfpkt_priv_data *) pkt->skb.cb; | ||
45 | } | ||
46 | |||
47 | inline bool is_erronous(struct cfpkt *pkt) | ||
48 | { | ||
49 | return cfpkt_priv(pkt)->erronous; | ||
50 | } | ||
51 | |||
52 | inline struct sk_buff *pkt_to_skb(struct cfpkt *pkt) | ||
53 | { | ||
54 | return &pkt->skb; | ||
55 | } | ||
56 | |||
57 | inline struct cfpkt *skb_to_pkt(struct sk_buff *skb) | ||
58 | { | ||
59 | return (struct cfpkt *) skb; | ||
60 | } | ||
61 | |||
62 | |||
63 | struct cfpkt *cfpkt_fromnative(enum caif_direction dir, void *nativepkt) | ||
64 | { | ||
65 | struct cfpkt *pkt = skb_to_pkt(nativepkt); | ||
66 | cfpkt_priv(pkt)->erronous = false; | ||
67 | return pkt; | ||
68 | } | ||
69 | EXPORT_SYMBOL(cfpkt_fromnative); | ||
70 | |||
71 | void *cfpkt_tonative(struct cfpkt *pkt) | ||
72 | { | ||
73 | return (void *) pkt; | ||
74 | } | ||
75 | EXPORT_SYMBOL(cfpkt_tonative); | ||
76 | |||
77 | static struct cfpkt *cfpkt_create_pfx(u16 len, u16 pfx) | ||
78 | { | ||
79 | struct sk_buff *skb; | ||
80 | |||
81 | if (likely(in_interrupt())) | ||
82 | skb = alloc_skb(len + pfx, GFP_ATOMIC); | ||
83 | else | ||
84 | skb = alloc_skb(len + pfx, GFP_KERNEL); | ||
85 | |||
86 | if (unlikely(skb == NULL)) | ||
87 | return NULL; | ||
88 | |||
89 | skb_reserve(skb, pfx); | ||
90 | return skb_to_pkt(skb); | ||
91 | } | ||
92 | |||
93 | inline struct cfpkt *cfpkt_create(u16 len) | ||
94 | { | ||
95 | return cfpkt_create_pfx(len + PKT_POSTFIX, PKT_PREFIX); | ||
96 | } | ||
97 | EXPORT_SYMBOL(cfpkt_create); | ||
98 | |||
99 | void cfpkt_destroy(struct cfpkt *pkt) | ||
100 | { | ||
101 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
102 | kfree_skb(skb); | ||
103 | } | ||
104 | EXPORT_SYMBOL(cfpkt_destroy); | ||
105 | |||
106 | inline bool cfpkt_more(struct cfpkt *pkt) | ||
107 | { | ||
108 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
109 | return skb->len > 0; | ||
110 | } | ||
111 | EXPORT_SYMBOL(cfpkt_more); | ||
112 | |||
113 | int cfpkt_peek_head(struct cfpkt *pkt, void *data, u16 len) | ||
114 | { | ||
115 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
116 | if (skb_headlen(skb) >= len) { | ||
117 | memcpy(data, skb->data, len); | ||
118 | return 0; | ||
119 | } | ||
120 | return !cfpkt_extr_head(pkt, data, len) && | ||
121 | !cfpkt_add_head(pkt, data, len); | ||
122 | } | ||
123 | EXPORT_SYMBOL(cfpkt_peek_head); | ||
124 | |||
125 | int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len) | ||
126 | { | ||
127 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
128 | u8 *from; | ||
129 | if (unlikely(is_erronous(pkt))) | ||
130 | return -EPROTO; | ||
131 | |||
132 | if (unlikely(len > skb->len)) { | ||
133 | PKT_ERROR(pkt, "cfpkt_extr_head read beyond end of packet\n"); | ||
134 | return -EPROTO; | ||
135 | } | ||
136 | |||
137 | if (unlikely(len > skb_headlen(skb))) { | ||
138 | if (unlikely(skb_linearize(skb) != 0)) { | ||
139 | PKT_ERROR(pkt, "cfpkt_extr_head linearize failed\n"); | ||
140 | return -EPROTO; | ||
141 | } | ||
142 | } | ||
143 | from = skb_pull(skb, len); | ||
144 | from -= len; | ||
145 | memcpy(data, from, len); | ||
146 | return 0; | ||
147 | } | ||
148 | EXPORT_SYMBOL(cfpkt_extr_head); | ||
149 | |||
150 | int cfpkt_extr_trail(struct cfpkt *pkt, void *dta, u16 len) | ||
151 | { | ||
152 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
153 | u8 *data = dta; | ||
154 | u8 *from; | ||
155 | if (unlikely(is_erronous(pkt))) | ||
156 | return -EPROTO; | ||
157 | |||
158 | if (unlikely(skb_linearize(skb) != 0)) { | ||
159 | PKT_ERROR(pkt, "cfpkt_extr_trail linearize failed\n"); | ||
160 | return -EPROTO; | ||
161 | } | ||
162 | if (unlikely(skb->data + len > skb_tail_pointer(skb))) { | ||
163 | PKT_ERROR(pkt, "cfpkt_extr_trail read beyond end of packet\n"); | ||
164 | return -EPROTO; | ||
165 | } | ||
166 | from = skb_tail_pointer(skb) - len; | ||
167 | skb_trim(skb, skb->len - len); | ||
168 | memcpy(data, from, len); | ||
169 | return 0; | ||
170 | } | ||
171 | EXPORT_SYMBOL(cfpkt_extr_trail); | ||
172 | |||
173 | int cfpkt_pad_trail(struct cfpkt *pkt, u16 len) | ||
174 | { | ||
175 | return cfpkt_add_body(pkt, NULL, len); | ||
176 | } | ||
177 | EXPORT_SYMBOL(cfpkt_pad_trail); | ||
178 | |||
179 | int cfpkt_add_body(struct cfpkt *pkt, const void *data, u16 len) | ||
180 | { | ||
181 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
182 | struct sk_buff *lastskb; | ||
183 | u8 *to; | ||
184 | u16 addlen = 0; | ||
185 | |||
186 | |||
187 | if (unlikely(is_erronous(pkt))) | ||
188 | return -EPROTO; | ||
189 | |||
190 | lastskb = skb; | ||
191 | |||
192 | /* Check whether we need to add space at the tail */ | ||
193 | if (unlikely(skb_tailroom(skb) < len)) { | ||
194 | if (likely(len < PKT_LEN_WHEN_EXTENDING)) | ||
195 | addlen = PKT_LEN_WHEN_EXTENDING; | ||
196 | else | ||
197 | addlen = len; | ||
198 | } | ||
199 | |||
200 | /* Check whether we need to change the SKB before writing to the tail */ | ||
201 | if (unlikely((addlen > 0) || skb_cloned(skb) || skb_shared(skb))) { | ||
202 | |||
203 | /* Make sure data is writable */ | ||
204 | if (unlikely(skb_cow_data(skb, addlen, &lastskb) < 0)) { | ||
205 | PKT_ERROR(pkt, "cfpkt_add_body: cow failed\n"); | ||
206 | return -EPROTO; | ||
207 | } | ||
208 | /* | ||
209 | * Is the SKB non-linear after skb_cow_data()? If so, we are | ||
210 | * going to add data to the last SKB, so we need to adjust | ||
211 | * lengths of the top SKB. | ||
212 | */ | ||
213 | if (lastskb != skb) { | ||
214 | pr_warning("CAIF: %s(): Packet is non-linear\n", | ||
215 | __func__); | ||
216 | skb->len += len; | ||
217 | skb->data_len += len; | ||
218 | } | ||
219 | } | ||
220 | |||
221 | /* All set to put the last SKB and optionally write data there. */ | ||
222 | to = skb_put(lastskb, len); | ||
223 | if (likely(data)) | ||
224 | memcpy(to, data, len); | ||
225 | return 0; | ||
226 | } | ||
227 | EXPORT_SYMBOL(cfpkt_add_body); | ||
228 | |||
229 | inline int cfpkt_addbdy(struct cfpkt *pkt, u8 data) | ||
230 | { | ||
231 | return cfpkt_add_body(pkt, &data, 1); | ||
232 | } | ||
233 | EXPORT_SYMBOL(cfpkt_addbdy); | ||
234 | |||
235 | int cfpkt_add_head(struct cfpkt *pkt, const void *data2, u16 len) | ||
236 | { | ||
237 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
238 | struct sk_buff *lastskb; | ||
239 | u8 *to; | ||
240 | const u8 *data = data2; | ||
241 | if (unlikely(is_erronous(pkt))) | ||
242 | return -EPROTO; | ||
243 | if (unlikely(skb_headroom(skb) < len)) { | ||
244 | PKT_ERROR(pkt, "cfpkt_add_head: no headroom\n"); | ||
245 | return -EPROTO; | ||
246 | } | ||
247 | |||
248 | /* Make sure data is writable */ | ||
249 | if (unlikely(skb_cow_data(skb, 0, &lastskb) < 0)) { | ||
250 | PKT_ERROR(pkt, "cfpkt_add_head: cow failed\n"); | ||
251 | return -EPROTO; | ||
252 | } | ||
253 | |||
254 | to = skb_push(skb, len); | ||
255 | memcpy(to, data, len); | ||
256 | return 0; | ||
257 | } | ||
258 | EXPORT_SYMBOL(cfpkt_add_head); | ||
259 | |||
260 | inline int cfpkt_add_trail(struct cfpkt *pkt, const void *data, u16 len) | ||
261 | { | ||
262 | return cfpkt_add_body(pkt, data, len); | ||
263 | } | ||
264 | EXPORT_SYMBOL(cfpkt_add_trail); | ||
265 | |||
266 | inline u16 cfpkt_getlen(struct cfpkt *pkt) | ||
267 | { | ||
268 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
269 | return skb->len; | ||
270 | } | ||
271 | EXPORT_SYMBOL(cfpkt_getlen); | ||
272 | |||
273 | inline u16 cfpkt_iterate(struct cfpkt *pkt, | ||
274 | u16 (*iter_func)(u16, void *, u16), | ||
275 | u16 data) | ||
276 | { | ||
277 | /* | ||
278 | * Don't care about the performance hit of linearizing, | ||
279 | * Checksum should not be used on high-speed interfaces anyway. | ||
280 | */ | ||
281 | if (unlikely(is_erronous(pkt))) | ||
282 | return -EPROTO; | ||
283 | if (unlikely(skb_linearize(&pkt->skb) != 0)) { | ||
284 | PKT_ERROR(pkt, "cfpkt_iterate: linearize failed\n"); | ||
285 | return -EPROTO; | ||
286 | } | ||
287 | return iter_func(data, pkt->skb.data, cfpkt_getlen(pkt)); | ||
288 | } | ||
289 | EXPORT_SYMBOL(cfpkt_iterate); | ||
290 | |||
291 | int cfpkt_setlen(struct cfpkt *pkt, u16 len) | ||
292 | { | ||
293 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
294 | |||
295 | |||
296 | if (unlikely(is_erronous(pkt))) | ||
297 | return -EPROTO; | ||
298 | |||
299 | if (likely(len <= skb->len)) { | ||
300 | if (unlikely(skb->data_len)) | ||
301 | ___pskb_trim(skb, len); | ||
302 | else | ||
303 | skb_trim(skb, len); | ||
304 | |||
305 | return cfpkt_getlen(pkt); | ||
306 | } | ||
307 | |||
308 | /* Need to expand SKB */ | ||
309 | if (unlikely(!cfpkt_pad_trail(pkt, len - skb->len))) | ||
310 | PKT_ERROR(pkt, "cfpkt_setlen: skb_pad_trail failed\n"); | ||
311 | |||
312 | return cfpkt_getlen(pkt); | ||
313 | } | ||
314 | EXPORT_SYMBOL(cfpkt_setlen); | ||
315 | |||
316 | struct cfpkt *cfpkt_create_uplink(const unsigned char *data, unsigned int len) | ||
317 | { | ||
318 | struct cfpkt *pkt = cfpkt_create_pfx(len + PKT_POSTFIX, PKT_PREFIX); | ||
319 | if (unlikely(data != NULL)) | ||
320 | cfpkt_add_body(pkt, data, len); | ||
321 | return pkt; | ||
322 | } | ||
323 | EXPORT_SYMBOL(cfpkt_create_uplink); | ||
324 | |||
325 | struct cfpkt *cfpkt_append(struct cfpkt *dstpkt, | ||
326 | struct cfpkt *addpkt, | ||
327 | u16 expectlen) | ||
328 | { | ||
329 | struct sk_buff *dst = pkt_to_skb(dstpkt); | ||
330 | struct sk_buff *add = pkt_to_skb(addpkt); | ||
331 | u16 addlen = skb_headlen(add); | ||
332 | u16 neededtailspace; | ||
333 | struct sk_buff *tmp; | ||
334 | u16 dstlen; | ||
335 | u16 createlen; | ||
336 | if (unlikely(is_erronous(dstpkt) || is_erronous(addpkt))) { | ||
337 | cfpkt_destroy(addpkt); | ||
338 | return dstpkt; | ||
339 | } | ||
340 | if (expectlen > addlen) | ||
341 | neededtailspace = expectlen; | ||
342 | else | ||
343 | neededtailspace = addlen; | ||
344 | |||
345 | if (dst->tail + neededtailspace > dst->end) { | ||
346 | /* Create a dumplicate of 'dst' with more tail space */ | ||
347 | dstlen = skb_headlen(dst); | ||
348 | createlen = dstlen + neededtailspace; | ||
349 | tmp = pkt_to_skb( | ||
350 | cfpkt_create(createlen + PKT_PREFIX + PKT_POSTFIX)); | ||
351 | if (!tmp) | ||
352 | return NULL; | ||
353 | skb_set_tail_pointer(tmp, dstlen); | ||
354 | tmp->len = dstlen; | ||
355 | memcpy(tmp->data, dst->data, dstlen); | ||
356 | cfpkt_destroy(dstpkt); | ||
357 | dst = tmp; | ||
358 | } | ||
359 | memcpy(skb_tail_pointer(dst), add->data, skb_headlen(add)); | ||
360 | cfpkt_destroy(addpkt); | ||
361 | dst->tail += addlen; | ||
362 | dst->len += addlen; | ||
363 | return skb_to_pkt(dst); | ||
364 | } | ||
365 | EXPORT_SYMBOL(cfpkt_append); | ||
366 | |||
367 | struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos) | ||
368 | { | ||
369 | struct sk_buff *skb2; | ||
370 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
371 | u8 *split = skb->data + pos; | ||
372 | u16 len2nd = skb_tail_pointer(skb) - split; | ||
373 | |||
374 | if (unlikely(is_erronous(pkt))) | ||
375 | return NULL; | ||
376 | |||
377 | if (skb->data + pos > skb_tail_pointer(skb)) { | ||
378 | PKT_ERROR(pkt, | ||
379 | "cfpkt_split: trying to split beyond end of packet"); | ||
380 | return NULL; | ||
381 | } | ||
382 | |||
383 | /* Create a new packet for the second part of the data */ | ||
384 | skb2 = pkt_to_skb( | ||
385 | cfpkt_create_pfx(len2nd + PKT_PREFIX + PKT_POSTFIX, | ||
386 | PKT_PREFIX)); | ||
387 | |||
388 | if (skb2 == NULL) | ||
389 | return NULL; | ||
390 | |||
391 | /* Reduce the length of the original packet */ | ||
392 | skb_set_tail_pointer(skb, pos); | ||
393 | skb->len = pos; | ||
394 | |||
395 | memcpy(skb2->data, split, len2nd); | ||
396 | skb2->tail += len2nd; | ||
397 | skb2->len += len2nd; | ||
398 | return skb_to_pkt(skb2); | ||
399 | } | ||
400 | EXPORT_SYMBOL(cfpkt_split); | ||
401 | |||
402 | char *cfpkt_log_pkt(struct cfpkt *pkt, char *buf, int buflen) | ||
403 | { | ||
404 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
405 | char *p = buf; | ||
406 | int i; | ||
407 | |||
408 | /* | ||
409 | * Sanity check buffer length, it needs to be at least as large as | ||
410 | * the header info: ~=50+ bytes | ||
411 | */ | ||
412 | if (buflen < 50) | ||
413 | return NULL; | ||
414 | |||
415 | snprintf(buf, buflen, "%s: pkt:%p len:%ld(%ld+%ld) {%ld,%ld} data: [", | ||
416 | is_erronous(pkt) ? "ERRONOUS-SKB" : | ||
417 | (skb->data_len != 0 ? "COMPLEX-SKB" : "SKB"), | ||
418 | skb, | ||
419 | (long) skb->len, | ||
420 | (long) (skb_tail_pointer(skb) - skb->data), | ||
421 | (long) skb->data_len, | ||
422 | (long) (skb->data - skb->head), | ||
423 | (long) (skb_tail_pointer(skb) - skb->head)); | ||
424 | p = buf + strlen(buf); | ||
425 | |||
426 | for (i = 0; i < skb_tail_pointer(skb) - skb->data && i < 300; i++) { | ||
427 | if (p > buf + buflen - 10) { | ||
428 | sprintf(p, "..."); | ||
429 | p = buf + strlen(buf); | ||
430 | break; | ||
431 | } | ||
432 | sprintf(p, "%02x,", skb->data[i]); | ||
433 | p = buf + strlen(buf); | ||
434 | } | ||
435 | sprintf(p, "]\n"); | ||
436 | return buf; | ||
437 | } | ||
438 | EXPORT_SYMBOL(cfpkt_log_pkt); | ||
439 | |||
440 | int cfpkt_raw_append(struct cfpkt *pkt, void **buf, unsigned int buflen) | ||
441 | { | ||
442 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
443 | struct sk_buff *lastskb; | ||
444 | |||
445 | caif_assert(buf != NULL); | ||
446 | if (unlikely(is_erronous(pkt))) | ||
447 | return -EPROTO; | ||
448 | /* Make sure SKB is writable */ | ||
449 | if (unlikely(skb_cow_data(skb, 0, &lastskb) < 0)) { | ||
450 | PKT_ERROR(pkt, "cfpkt_raw_append: skb_cow_data failed\n"); | ||
451 | return -EPROTO; | ||
452 | } | ||
453 | |||
454 | if (unlikely(skb_linearize(skb) != 0)) { | ||
455 | PKT_ERROR(pkt, "cfpkt_raw_append: linearize failed\n"); | ||
456 | return -EPROTO; | ||
457 | } | ||
458 | |||
459 | if (unlikely(skb_tailroom(skb) < buflen)) { | ||
460 | PKT_ERROR(pkt, "cfpkt_raw_append: buffer too short - failed\n"); | ||
461 | return -EPROTO; | ||
462 | } | ||
463 | |||
464 | *buf = skb_put(skb, buflen); | ||
465 | return 1; | ||
466 | } | ||
467 | EXPORT_SYMBOL(cfpkt_raw_append); | ||
468 | |||
469 | int cfpkt_raw_extract(struct cfpkt *pkt, void **buf, unsigned int buflen) | ||
470 | { | ||
471 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
472 | |||
473 | caif_assert(buf != NULL); | ||
474 | if (unlikely(is_erronous(pkt))) | ||
475 | return -EPROTO; | ||
476 | |||
477 | if (unlikely(buflen > skb->len)) { | ||
478 | PKT_ERROR(pkt, "cfpkt_raw_extract: buflen too large " | ||
479 | "- failed\n"); | ||
480 | return -EPROTO; | ||
481 | } | ||
482 | |||
483 | if (unlikely(buflen > skb_headlen(skb))) { | ||
484 | if (unlikely(skb_linearize(skb) != 0)) { | ||
485 | PKT_ERROR(pkt, "cfpkt_raw_extract: linearize failed\n"); | ||
486 | return -EPROTO; | ||
487 | } | ||
488 | } | ||
489 | |||
490 | *buf = skb->data; | ||
491 | skb_pull(skb, buflen); | ||
492 | |||
493 | return 1; | ||
494 | } | ||
495 | EXPORT_SYMBOL(cfpkt_raw_extract); | ||
496 | |||
497 | inline bool cfpkt_erroneous(struct cfpkt *pkt) | ||
498 | { | ||
499 | return cfpkt_priv(pkt)->erronous; | ||
500 | } | ||
501 | EXPORT_SYMBOL(cfpkt_erroneous); | ||
502 | |||
503 | struct cfpktq *cfpktq_create(void) | ||
504 | { | ||
505 | struct cfpktq *q = kmalloc(sizeof(struct cfpktq), GFP_ATOMIC); | ||
506 | if (!q) | ||
507 | return NULL; | ||
508 | skb_queue_head_init(&q->head); | ||
509 | atomic_set(&q->count, 0); | ||
510 | spin_lock_init(&q->lock); | ||
511 | return q; | ||
512 | } | ||
513 | EXPORT_SYMBOL(cfpktq_create); | ||
514 | |||
515 | void cfpkt_queue(struct cfpktq *pktq, struct cfpkt *pkt, unsigned short prio) | ||
516 | { | ||
517 | atomic_inc(&pktq->count); | ||
518 | spin_lock(&pktq->lock); | ||
519 | skb_queue_tail(&pktq->head, pkt_to_skb(pkt)); | ||
520 | spin_unlock(&pktq->lock); | ||
521 | |||
522 | } | ||
523 | EXPORT_SYMBOL(cfpkt_queue); | ||
524 | |||
525 | struct cfpkt *cfpkt_qpeek(struct cfpktq *pktq) | ||
526 | { | ||
527 | struct cfpkt *tmp; | ||
528 | spin_lock(&pktq->lock); | ||
529 | tmp = skb_to_pkt(skb_peek(&pktq->head)); | ||
530 | spin_unlock(&pktq->lock); | ||
531 | return tmp; | ||
532 | } | ||
533 | EXPORT_SYMBOL(cfpkt_qpeek); | ||
534 | |||
535 | struct cfpkt *cfpkt_dequeue(struct cfpktq *pktq) | ||
536 | { | ||
537 | struct cfpkt *pkt; | ||
538 | spin_lock(&pktq->lock); | ||
539 | pkt = skb_to_pkt(skb_dequeue(&pktq->head)); | ||
540 | if (pkt) { | ||
541 | atomic_dec(&pktq->count); | ||
542 | caif_assert(atomic_read(&pktq->count) >= 0); | ||
543 | } | ||
544 | spin_unlock(&pktq->lock); | ||
545 | return pkt; | ||
546 | } | ||
547 | EXPORT_SYMBOL(cfpkt_dequeue); | ||
548 | |||
549 | int cfpkt_qcount(struct cfpktq *pktq) | ||
550 | { | ||
551 | return atomic_read(&pktq->count); | ||
552 | } | ||
553 | EXPORT_SYMBOL(cfpkt_qcount); | ||
554 | |||
555 | struct cfpkt *cfpkt_clone_release(struct cfpkt *pkt) | ||
556 | { | ||
557 | struct cfpkt *clone; | ||
558 | clone = skb_to_pkt(skb_clone(pkt_to_skb(pkt), GFP_ATOMIC)); | ||
559 | /* Free original packet. */ | ||
560 | cfpkt_destroy(pkt); | ||
561 | if (!clone) | ||
562 | return NULL; | ||
563 | return clone; | ||
564 | } | ||
565 | EXPORT_SYMBOL(cfpkt_clone_release); | ||
566 | |||
567 | struct caif_payload_info *cfpkt_info(struct cfpkt *pkt) | ||
568 | { | ||
569 | return (struct caif_payload_info *)&pkt_to_skb(pkt)->cb; | ||
570 | } | ||
571 | EXPORT_SYMBOL(cfpkt_info); | ||
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c new file mode 100644 index 000000000000..cd2830fec935 --- /dev/null +++ b/net/caif/cfrfml.c | |||
@@ -0,0 +1,108 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson AB 2010 | ||
3 | * Author: Sjur Brendeland/sjur.brandeland@stericsson.com | ||
4 | * License terms: GNU General Public License (GPL) version 2 | ||
5 | */ | ||
6 | |||
7 | #include <linux/stddef.h> | ||
8 | #include <linux/spinlock.h> | ||
9 | #include <linux/slab.h> | ||
10 | #include <net/caif/caif_layer.h> | ||
11 | #include <net/caif/cfsrvl.h> | ||
12 | #include <net/caif/cfpkt.h> | ||
13 | |||
14 | #define container_obj(layr) container_of(layr, struct cfsrvl, layer) | ||
15 | |||
16 | #define RFM_SEGMENTATION_BIT 0x01 | ||
17 | #define RFM_PAYLOAD 0x00 | ||
18 | #define RFM_CMD_BIT 0x80 | ||
19 | #define RFM_FLOW_OFF 0x81 | ||
20 | #define RFM_FLOW_ON 0x80 | ||
21 | #define RFM_SET_PIN 0x82 | ||
22 | #define RFM_CTRL_PKT_SIZE 1 | ||
23 | |||
24 | static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt); | ||
25 | static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt); | ||
26 | static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl); | ||
27 | |||
28 | struct cflayer *cfrfml_create(u8 channel_id, struct dev_info *dev_info) | ||
29 | { | ||
30 | struct cfsrvl *rfm = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC); | ||
31 | if (!rfm) { | ||
32 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
33 | return NULL; | ||
34 | } | ||
35 | caif_assert(offsetof(struct cfsrvl, layer) == 0); | ||
36 | memset(rfm, 0, sizeof(struct cfsrvl)); | ||
37 | cfsrvl_init(rfm, channel_id, dev_info); | ||
38 | rfm->layer.modemcmd = cfservl_modemcmd; | ||
39 | rfm->layer.receive = cfrfml_receive; | ||
40 | rfm->layer.transmit = cfrfml_transmit; | ||
41 | snprintf(rfm->layer.name, CAIF_LAYER_NAME_SZ, "rfm%d", channel_id); | ||
42 | return &rfm->layer; | ||
43 | } | ||
44 | |||
45 | static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl) | ||
46 | { | ||
47 | return -EPROTO; | ||
48 | } | ||
49 | |||
50 | static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt) | ||
51 | { | ||
52 | u8 tmp; | ||
53 | bool segmented; | ||
54 | int ret; | ||
55 | caif_assert(layr->up != NULL); | ||
56 | caif_assert(layr->receive != NULL); | ||
57 | |||
58 | /* | ||
59 | * RFM is taking care of segmentation and stripping of | ||
60 | * segmentation bit. | ||
61 | */ | ||
62 | if (cfpkt_extr_head(pkt, &tmp, 1) < 0) { | ||
63 | pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); | ||
64 | cfpkt_destroy(pkt); | ||
65 | return -EPROTO; | ||
66 | } | ||
67 | segmented = tmp & RFM_SEGMENTATION_BIT; | ||
68 | caif_assert(!segmented); | ||
69 | |||
70 | ret = layr->up->receive(layr->up, pkt); | ||
71 | return ret; | ||
72 | } | ||
73 | |||
74 | static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt) | ||
75 | { | ||
76 | u8 tmp = 0; | ||
77 | int ret; | ||
78 | struct cfsrvl *service = container_obj(layr); | ||
79 | |||
80 | caif_assert(layr->dn != NULL); | ||
81 | caif_assert(layr->dn->transmit != NULL); | ||
82 | |||
83 | if (!cfsrvl_ready(service, &ret)) | ||
84 | return ret; | ||
85 | |||
86 | if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { | ||
87 | pr_err("CAIF: %s():Packet too large - size=%d\n", | ||
88 | __func__, cfpkt_getlen(pkt)); | ||
89 | return -EOVERFLOW; | ||
90 | } | ||
91 | if (cfpkt_add_head(pkt, &tmp, 1) < 0) { | ||
92 | pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); | ||
93 | return -EPROTO; | ||
94 | } | ||
95 | |||
96 | /* Add info for MUX-layer to route the packet out. */ | ||
97 | cfpkt_info(pkt)->channel_id = service->layer.id; | ||
98 | /* | ||
99 | * To optimize alignment, we add up the size of CAIF header before | ||
100 | * payload. | ||
101 | */ | ||
102 | cfpkt_info(pkt)->hdr_len = 1; | ||
103 | cfpkt_info(pkt)->dev_info = &service->dev_info; | ||
104 | ret = layr->dn->transmit(layr->dn, pkt); | ||
105 | if (ret < 0) | ||
106 | cfpkt_extr_head(pkt, &tmp, 1); | ||
107 | return ret; | ||
108 | } | ||
diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c new file mode 100644 index 000000000000..06029ea2da2f --- /dev/null +++ b/net/caif/cfserl.c | |||
@@ -0,0 +1,192 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson AB 2010 | ||
3 | * Author: Sjur Brendeland/sjur.brandeland@stericsson.com | ||
4 | * License terms: GNU General Public License (GPL) version 2 | ||
5 | */ | ||
6 | |||
7 | #include <linux/stddef.h> | ||
8 | #include <linux/spinlock.h> | ||
9 | #include <linux/slab.h> | ||
10 | #include <net/caif/caif_layer.h> | ||
11 | #include <net/caif/cfpkt.h> | ||
12 | #include <net/caif/cfserl.h> | ||
13 | |||
14 | #define container_obj(layr) ((struct cfserl *) layr) | ||
15 | |||
16 | #define CFSERL_STX 0x02 | ||
17 | #define CAIF_MINIUM_PACKET_SIZE 4 | ||
18 | struct cfserl { | ||
19 | struct cflayer layer; | ||
20 | struct cfpkt *incomplete_frm; | ||
21 | /* Protects parallel processing of incoming packets */ | ||
22 | spinlock_t sync; | ||
23 | bool usestx; | ||
24 | }; | ||
25 | #define STXLEN(layr) (layr->usestx ? 1 : 0) | ||
26 | |||
27 | static int cfserl_receive(struct cflayer *layr, struct cfpkt *pkt); | ||
28 | static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt); | ||
29 | static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, | ||
30 | int phyid); | ||
31 | |||
32 | struct cflayer *cfserl_create(int type, int instance, bool use_stx) | ||
33 | { | ||
34 | struct cfserl *this = kmalloc(sizeof(struct cfserl), GFP_ATOMIC); | ||
35 | if (!this) { | ||
36 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
37 | return NULL; | ||
38 | } | ||
39 | caif_assert(offsetof(struct cfserl, layer) == 0); | ||
40 | memset(this, 0, sizeof(struct cfserl)); | ||
41 | this->layer.receive = cfserl_receive; | ||
42 | this->layer.transmit = cfserl_transmit; | ||
43 | this->layer.ctrlcmd = cfserl_ctrlcmd; | ||
44 | this->layer.type = type; | ||
45 | this->usestx = use_stx; | ||
46 | spin_lock_init(&this->sync); | ||
47 | snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "ser1"); | ||
48 | return &this->layer; | ||
49 | } | ||
50 | |||
51 | static int cfserl_receive(struct cflayer *l, struct cfpkt *newpkt) | ||
52 | { | ||
53 | struct cfserl *layr = container_obj(l); | ||
54 | u16 pkt_len; | ||
55 | struct cfpkt *pkt = NULL; | ||
56 | struct cfpkt *tail_pkt = NULL; | ||
57 | u8 tmp8; | ||
58 | u16 tmp; | ||
59 | u8 stx = CFSERL_STX; | ||
60 | int ret; | ||
61 | u16 expectlen = 0; | ||
62 | caif_assert(newpkt != NULL); | ||
63 | spin_lock(&layr->sync); | ||
64 | |||
65 | if (layr->incomplete_frm != NULL) { | ||
66 | |||
67 | layr->incomplete_frm = | ||
68 | cfpkt_append(layr->incomplete_frm, newpkt, expectlen); | ||
69 | pkt = layr->incomplete_frm; | ||
70 | } else { | ||
71 | pkt = newpkt; | ||
72 | } | ||
73 | layr->incomplete_frm = NULL; | ||
74 | |||
75 | do { | ||
76 | /* Search for STX at start of pkt if STX is used */ | ||
77 | if (layr->usestx) { | ||
78 | cfpkt_extr_head(pkt, &tmp8, 1); | ||
79 | if (tmp8 != CFSERL_STX) { | ||
80 | while (cfpkt_more(pkt) | ||
81 | && tmp8 != CFSERL_STX) { | ||
82 | cfpkt_extr_head(pkt, &tmp8, 1); | ||
83 | } | ||
84 | if (!cfpkt_more(pkt)) { | ||
85 | cfpkt_destroy(pkt); | ||
86 | layr->incomplete_frm = NULL; | ||
87 | spin_unlock(&layr->sync); | ||
88 | return -EPROTO; | ||
89 | } | ||
90 | } | ||
91 | } | ||
92 | |||
93 | pkt_len = cfpkt_getlen(pkt); | ||
94 | |||
95 | /* | ||
96 | * pkt_len is the accumulated length of the packet data | ||
97 | * we have received so far. | ||
98 | * Exit if frame doesn't hold length. | ||
99 | */ | ||
100 | |||
101 | if (pkt_len < 2) { | ||
102 | if (layr->usestx) | ||
103 | cfpkt_add_head(pkt, &stx, 1); | ||
104 | layr->incomplete_frm = pkt; | ||
105 | spin_unlock(&layr->sync); | ||
106 | return 0; | ||
107 | } | ||
108 | |||
109 | /* | ||
110 | * Find length of frame. | ||
111 | * expectlen is the length we need for a full frame. | ||
112 | */ | ||
113 | cfpkt_peek_head(pkt, &tmp, 2); | ||
114 | expectlen = le16_to_cpu(tmp) + 2; | ||
115 | /* | ||
116 | * Frame error handling | ||
117 | */ | ||
118 | if (expectlen < CAIF_MINIUM_PACKET_SIZE | ||
119 | || expectlen > CAIF_MAX_FRAMESIZE) { | ||
120 | if (!layr->usestx) { | ||
121 | if (pkt != NULL) | ||
122 | cfpkt_destroy(pkt); | ||
123 | layr->incomplete_frm = NULL; | ||
124 | expectlen = 0; | ||
125 | spin_unlock(&layr->sync); | ||
126 | return -EPROTO; | ||
127 | } | ||
128 | continue; | ||
129 | } | ||
130 | |||
131 | if (pkt_len < expectlen) { | ||
132 | /* Too little received data */ | ||
133 | if (layr->usestx) | ||
134 | cfpkt_add_head(pkt, &stx, 1); | ||
135 | layr->incomplete_frm = pkt; | ||
136 | spin_unlock(&layr->sync); | ||
137 | return 0; | ||
138 | } | ||
139 | |||
140 | /* | ||
141 | * Enough data for at least one frame. | ||
142 | * Split the frame, if too long | ||
143 | */ | ||
144 | if (pkt_len > expectlen) | ||
145 | tail_pkt = cfpkt_split(pkt, expectlen); | ||
146 | else | ||
147 | tail_pkt = NULL; | ||
148 | |||
149 | /* Send the first part of packet upwards.*/ | ||
150 | spin_unlock(&layr->sync); | ||
151 | ret = layr->layer.up->receive(layr->layer.up, pkt); | ||
152 | spin_lock(&layr->sync); | ||
153 | if (ret == -EILSEQ) { | ||
154 | if (layr->usestx) { | ||
155 | if (tail_pkt != NULL) | ||
156 | pkt = cfpkt_append(pkt, tail_pkt, 0); | ||
157 | |||
158 | /* Start search for next STX if frame failed */ | ||
159 | continue; | ||
160 | } else { | ||
161 | cfpkt_destroy(pkt); | ||
162 | pkt = NULL; | ||
163 | } | ||
164 | } | ||
165 | |||
166 | pkt = tail_pkt; | ||
167 | |||
168 | } while (pkt != NULL); | ||
169 | |||
170 | spin_unlock(&layr->sync); | ||
171 | return 0; | ||
172 | } | ||
173 | |||
174 | static int cfserl_transmit(struct cflayer *layer, struct cfpkt *newpkt) | ||
175 | { | ||
176 | struct cfserl *layr = container_obj(layer); | ||
177 | int ret; | ||
178 | u8 tmp8 = CFSERL_STX; | ||
179 | if (layr->usestx) | ||
180 | cfpkt_add_head(newpkt, &tmp8, 1); | ||
181 | ret = layer->dn->transmit(layer->dn, newpkt); | ||
182 | if (ret < 0) | ||
183 | cfpkt_extr_head(newpkt, &tmp8, 1); | ||
184 | |||
185 | return ret; | ||
186 | } | ||
187 | |||
188 | static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, | ||
189 | int phyid) | ||
190 | { | ||
191 | layr->up->ctrlcmd(layr->up, ctrl, phyid); | ||
192 | } | ||
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c new file mode 100644 index 000000000000..d470c51c6431 --- /dev/null +++ b/net/caif/cfsrvl.c | |||
@@ -0,0 +1,185 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson AB 2010 | ||
3 | * Author: Sjur Brendeland/sjur.brandeland@stericsson.com | ||
4 | * License terms: GNU General Public License (GPL) version 2 | ||
5 | */ | ||
6 | |||
7 | #include <linux/kernel.h> | ||
8 | #include <linux/types.h> | ||
9 | #include <linux/errno.h> | ||
10 | #include <linux/slab.h> | ||
11 | #include <net/caif/caif_layer.h> | ||
12 | #include <net/caif/cfsrvl.h> | ||
13 | #include <net/caif/cfpkt.h> | ||
14 | |||
15 | #define SRVL_CTRL_PKT_SIZE 1 | ||
16 | #define SRVL_FLOW_OFF 0x81 | ||
17 | #define SRVL_FLOW_ON 0x80 | ||
18 | #define SRVL_SET_PIN 0x82 | ||
19 | #define SRVL_CTRL_PKT_SIZE 1 | ||
20 | |||
21 | #define container_obj(layr) container_of(layr, struct cfsrvl, layer) | ||
22 | |||
23 | static void cfservl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, | ||
24 | int phyid) | ||
25 | { | ||
26 | struct cfsrvl *service = container_obj(layr); | ||
27 | caif_assert(layr->up != NULL); | ||
28 | caif_assert(layr->up->ctrlcmd != NULL); | ||
29 | switch (ctrl) { | ||
30 | case CAIF_CTRLCMD_INIT_RSP: | ||
31 | service->open = true; | ||
32 | layr->up->ctrlcmd(layr->up, ctrl, phyid); | ||
33 | break; | ||
34 | case CAIF_CTRLCMD_DEINIT_RSP: | ||
35 | case CAIF_CTRLCMD_INIT_FAIL_RSP: | ||
36 | service->open = false; | ||
37 | layr->up->ctrlcmd(layr->up, ctrl, phyid); | ||
38 | break; | ||
39 | case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND: | ||
40 | if (phyid != service->dev_info.id) | ||
41 | break; | ||
42 | if (service->modem_flow_on) | ||
43 | layr->up->ctrlcmd(layr->up, | ||
44 | CAIF_CTRLCMD_FLOW_OFF_IND, phyid); | ||
45 | service->phy_flow_on = false; | ||
46 | break; | ||
47 | case _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND: | ||
48 | if (phyid != service->dev_info.id) | ||
49 | return; | ||
50 | if (service->modem_flow_on) { | ||
51 | layr->up->ctrlcmd(layr->up, | ||
52 | CAIF_CTRLCMD_FLOW_ON_IND, | ||
53 | phyid); | ||
54 | } | ||
55 | service->phy_flow_on = true; | ||
56 | break; | ||
57 | case CAIF_CTRLCMD_FLOW_OFF_IND: | ||
58 | if (service->phy_flow_on) { | ||
59 | layr->up->ctrlcmd(layr->up, | ||
60 | CAIF_CTRLCMD_FLOW_OFF_IND, phyid); | ||
61 | } | ||
62 | service->modem_flow_on = false; | ||
63 | break; | ||
64 | case CAIF_CTRLCMD_FLOW_ON_IND: | ||
65 | if (service->phy_flow_on) { | ||
66 | layr->up->ctrlcmd(layr->up, | ||
67 | CAIF_CTRLCMD_FLOW_ON_IND, phyid); | ||
68 | } | ||
69 | service->modem_flow_on = true; | ||
70 | break; | ||
71 | case _CAIF_CTRLCMD_PHYIF_DOWN_IND: | ||
72 | /* In case interface is down, let's fake a remove shutdown */ | ||
73 | layr->up->ctrlcmd(layr->up, | ||
74 | CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND, phyid); | ||
75 | break; | ||
76 | case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND: | ||
77 | layr->up->ctrlcmd(layr->up, ctrl, phyid); | ||
78 | break; | ||
79 | default: | ||
80 | pr_warning("CAIF: %s(): " | ||
81 | "Unexpected ctrl in cfsrvl (%d)\n", __func__, ctrl); | ||
82 | /* We have both modem and phy flow on, send flow on */ | ||
83 | layr->up->ctrlcmd(layr->up, ctrl, phyid); | ||
84 | service->phy_flow_on = true; | ||
85 | break; | ||
86 | } | ||
87 | } | ||
88 | |||
89 | static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl) | ||
90 | { | ||
91 | struct cfsrvl *service = container_obj(layr); | ||
92 | caif_assert(layr != NULL); | ||
93 | caif_assert(layr->dn != NULL); | ||
94 | caif_assert(layr->dn->transmit != NULL); | ||
95 | switch (ctrl) { | ||
96 | case CAIF_MODEMCMD_FLOW_ON_REQ: | ||
97 | { | ||
98 | struct cfpkt *pkt; | ||
99 | struct caif_payload_info *info; | ||
100 | u8 flow_on = SRVL_FLOW_ON; | ||
101 | pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE); | ||
102 | if (!pkt) { | ||
103 | pr_warning("CAIF: %s(): Out of memory\n", | ||
104 | __func__); | ||
105 | return -ENOMEM; | ||
106 | } | ||
107 | |||
108 | if (cfpkt_add_head(pkt, &flow_on, 1) < 0) { | ||
109 | pr_err("CAIF: %s(): Packet is erroneous!\n", | ||
110 | __func__); | ||
111 | cfpkt_destroy(pkt); | ||
112 | return -EPROTO; | ||
113 | } | ||
114 | info = cfpkt_info(pkt); | ||
115 | info->channel_id = service->layer.id; | ||
116 | info->hdr_len = 1; | ||
117 | info->dev_info = &service->dev_info; | ||
118 | return layr->dn->transmit(layr->dn, pkt); | ||
119 | } | ||
120 | case CAIF_MODEMCMD_FLOW_OFF_REQ: | ||
121 | { | ||
122 | struct cfpkt *pkt; | ||
123 | struct caif_payload_info *info; | ||
124 | u8 flow_off = SRVL_FLOW_OFF; | ||
125 | pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE); | ||
126 | if (cfpkt_add_head(pkt, &flow_off, 1) < 0) { | ||
127 | pr_err("CAIF: %s(): Packet is erroneous!\n", | ||
128 | __func__); | ||
129 | cfpkt_destroy(pkt); | ||
130 | return -EPROTO; | ||
131 | } | ||
132 | info = cfpkt_info(pkt); | ||
133 | info->channel_id = service->layer.id; | ||
134 | info->hdr_len = 1; | ||
135 | info->dev_info = &service->dev_info; | ||
136 | return layr->dn->transmit(layr->dn, pkt); | ||
137 | } | ||
138 | default: | ||
139 | break; | ||
140 | } | ||
141 | return -EINVAL; | ||
142 | } | ||
143 | |||
144 | void cfservl_destroy(struct cflayer *layer) | ||
145 | { | ||
146 | kfree(layer); | ||
147 | } | ||
148 | |||
149 | void cfsrvl_init(struct cfsrvl *service, | ||
150 | u8 channel_id, | ||
151 | struct dev_info *dev_info) | ||
152 | { | ||
153 | caif_assert(offsetof(struct cfsrvl, layer) == 0); | ||
154 | service->open = false; | ||
155 | service->modem_flow_on = true; | ||
156 | service->phy_flow_on = true; | ||
157 | service->layer.id = channel_id; | ||
158 | service->layer.ctrlcmd = cfservl_ctrlcmd; | ||
159 | service->layer.modemcmd = cfservl_modemcmd; | ||
160 | service->dev_info = *dev_info; | ||
161 | } | ||
162 | |||
163 | bool cfsrvl_ready(struct cfsrvl *service, int *err) | ||
164 | { | ||
165 | if (service->open && service->modem_flow_on && service->phy_flow_on) | ||
166 | return true; | ||
167 | if (!service->open) { | ||
168 | *err = -ENOTCONN; | ||
169 | return false; | ||
170 | } | ||
171 | caif_assert(!(service->modem_flow_on && service->phy_flow_on)); | ||
172 | *err = -EAGAIN; | ||
173 | return false; | ||
174 | } | ||
175 | u8 cfsrvl_getphyid(struct cflayer *layer) | ||
176 | { | ||
177 | struct cfsrvl *servl = container_obj(layer); | ||
178 | return servl->dev_info.id; | ||
179 | } | ||
180 | |||
181 | bool cfsrvl_phyid_match(struct cflayer *layer, int phyid) | ||
182 | { | ||
183 | struct cfsrvl *servl = container_obj(layer); | ||
184 | return servl->dev_info.id == phyid; | ||
185 | } | ||
diff --git a/net/caif/cfutill.c b/net/caif/cfutill.c new file mode 100644 index 000000000000..5fd2c9ea8b42 --- /dev/null +++ b/net/caif/cfutill.c | |||
@@ -0,0 +1,115 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson AB 2010 | ||
3 | * Author: Sjur Brendeland/sjur.brandeland@stericsson.com | ||
4 | * License terms: GNU General Public License (GPL) version 2 | ||
5 | */ | ||
6 | |||
7 | #include <linux/kernel.h> | ||
8 | #include <linux/types.h> | ||
9 | #include <linux/slab.h> | ||
10 | #include <linux/errno.h> | ||
11 | #include <net/caif/caif_layer.h> | ||
12 | #include <net/caif/cfsrvl.h> | ||
13 | #include <net/caif/cfpkt.h> | ||
14 | |||
15 | #define container_obj(layr) ((struct cfsrvl *) layr) | ||
16 | #define UTIL_PAYLOAD 0x00 | ||
17 | #define UTIL_CMD_BIT 0x80 | ||
18 | #define UTIL_REMOTE_SHUTDOWN 0x82 | ||
19 | #define UTIL_FLOW_OFF 0x81 | ||
20 | #define UTIL_FLOW_ON 0x80 | ||
21 | #define UTIL_CTRL_PKT_SIZE 1 | ||
22 | static int cfutill_receive(struct cflayer *layr, struct cfpkt *pkt); | ||
23 | static int cfutill_transmit(struct cflayer *layr, struct cfpkt *pkt); | ||
24 | |||
25 | struct cflayer *cfutill_create(u8 channel_id, struct dev_info *dev_info) | ||
26 | { | ||
27 | struct cfsrvl *util = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC); | ||
28 | if (!util) { | ||
29 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
30 | return NULL; | ||
31 | } | ||
32 | caif_assert(offsetof(struct cfsrvl, layer) == 0); | ||
33 | memset(util, 0, sizeof(struct cfsrvl)); | ||
34 | cfsrvl_init(util, channel_id, dev_info); | ||
35 | util->layer.receive = cfutill_receive; | ||
36 | util->layer.transmit = cfutill_transmit; | ||
37 | snprintf(util->layer.name, CAIF_LAYER_NAME_SZ - 1, "util1"); | ||
38 | return &util->layer; | ||
39 | } | ||
40 | |||
41 | static int cfutill_receive(struct cflayer *layr, struct cfpkt *pkt) | ||
42 | { | ||
43 | u8 cmd = -1; | ||
44 | struct cfsrvl *service = container_obj(layr); | ||
45 | caif_assert(layr != NULL); | ||
46 | caif_assert(layr->up != NULL); | ||
47 | caif_assert(layr->up->receive != NULL); | ||
48 | caif_assert(layr->up->ctrlcmd != NULL); | ||
49 | if (cfpkt_extr_head(pkt, &cmd, 1) < 0) { | ||
50 | pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); | ||
51 | cfpkt_destroy(pkt); | ||
52 | return -EPROTO; | ||
53 | } | ||
54 | |||
55 | switch (cmd) { | ||
56 | case UTIL_PAYLOAD: | ||
57 | return layr->up->receive(layr->up, pkt); | ||
58 | case UTIL_FLOW_OFF: | ||
59 | layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_OFF_IND, 0); | ||
60 | cfpkt_destroy(pkt); | ||
61 | return 0; | ||
62 | case UTIL_FLOW_ON: | ||
63 | layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_ON_IND, 0); | ||
64 | cfpkt_destroy(pkt); | ||
65 | return 0; | ||
66 | case UTIL_REMOTE_SHUTDOWN: /* Remote Shutdown Request */ | ||
67 | pr_err("CAIF: %s(): REMOTE SHUTDOWN REQUEST RECEIVED\n", | ||
68 | __func__); | ||
69 | layr->ctrlcmd(layr, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND, 0); | ||
70 | service->open = false; | ||
71 | cfpkt_destroy(pkt); | ||
72 | return 0; | ||
73 | default: | ||
74 | cfpkt_destroy(pkt); | ||
75 | pr_warning("CAIF: %s(): Unknown service control %d (0x%x)\n", | ||
76 | __func__, cmd, cmd); | ||
77 | return -EPROTO; | ||
78 | } | ||
79 | } | ||
80 | |||
81 | static int cfutill_transmit(struct cflayer *layr, struct cfpkt *pkt) | ||
82 | { | ||
83 | u8 zero = 0; | ||
84 | struct caif_payload_info *info; | ||
85 | int ret; | ||
86 | struct cfsrvl *service = container_obj(layr); | ||
87 | caif_assert(layr != NULL); | ||
88 | caif_assert(layr->dn != NULL); | ||
89 | caif_assert(layr->dn->transmit != NULL); | ||
90 | if (!cfsrvl_ready(service, &ret)) | ||
91 | return ret; | ||
92 | |||
93 | if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { | ||
94 | pr_err("CAIF: %s(): packet too large size=%d\n", | ||
95 | __func__, cfpkt_getlen(pkt)); | ||
96 | return -EOVERFLOW; | ||
97 | } | ||
98 | |||
99 | cfpkt_add_head(pkt, &zero, 1); | ||
100 | /* Add info for MUX-layer to route the packet out. */ | ||
101 | info = cfpkt_info(pkt); | ||
102 | info->channel_id = service->layer.id; | ||
103 | /* | ||
104 | * To optimize alignment, we add up the size of CAIF header before | ||
105 | * payload. | ||
106 | */ | ||
107 | info->hdr_len = 1; | ||
108 | info->dev_info = &service->dev_info; | ||
109 | ret = layr->dn->transmit(layr->dn, pkt); | ||
110 | if (ret < 0) { | ||
111 | u32 tmp32; | ||
112 | cfpkt_extr_head(pkt, &tmp32, 4); | ||
113 | } | ||
114 | return ret; | ||
115 | } | ||
diff --git a/net/caif/cfveil.c b/net/caif/cfveil.c new file mode 100644 index 000000000000..0fd827f49491 --- /dev/null +++ b/net/caif/cfveil.c | |||
@@ -0,0 +1,107 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson AB 2010 | ||
3 | * Author: Sjur Brendeland/sjur.brandeland@stericsson.com | ||
4 | * License terms: GNU General Public License (GPL) version 2 | ||
5 | */ | ||
6 | |||
7 | #include <linux/stddef.h> | ||
8 | #include <linux/slab.h> | ||
9 | #include <net/caif/caif_layer.h> | ||
10 | #include <net/caif/cfsrvl.h> | ||
11 | #include <net/caif/cfpkt.h> | ||
12 | |||
13 | #define VEI_PAYLOAD 0x00 | ||
14 | #define VEI_CMD_BIT 0x80 | ||
15 | #define VEI_FLOW_OFF 0x81 | ||
16 | #define VEI_FLOW_ON 0x80 | ||
17 | #define VEI_SET_PIN 0x82 | ||
18 | #define VEI_CTRL_PKT_SIZE 1 | ||
19 | #define container_obj(layr) container_of(layr, struct cfsrvl, layer) | ||
20 | |||
21 | static int cfvei_receive(struct cflayer *layr, struct cfpkt *pkt); | ||
22 | static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt); | ||
23 | |||
24 | struct cflayer *cfvei_create(u8 channel_id, struct dev_info *dev_info) | ||
25 | { | ||
26 | struct cfsrvl *vei = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC); | ||
27 | if (!vei) { | ||
28 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
29 | return NULL; | ||
30 | } | ||
31 | caif_assert(offsetof(struct cfsrvl, layer) == 0); | ||
32 | memset(vei, 0, sizeof(struct cfsrvl)); | ||
33 | cfsrvl_init(vei, channel_id, dev_info); | ||
34 | vei->layer.receive = cfvei_receive; | ||
35 | vei->layer.transmit = cfvei_transmit; | ||
36 | snprintf(vei->layer.name, CAIF_LAYER_NAME_SZ - 1, "vei%d", channel_id); | ||
37 | return &vei->layer; | ||
38 | } | ||
39 | |||
40 | static int cfvei_receive(struct cflayer *layr, struct cfpkt *pkt) | ||
41 | { | ||
42 | u8 cmd; | ||
43 | int ret; | ||
44 | caif_assert(layr->up != NULL); | ||
45 | caif_assert(layr->receive != NULL); | ||
46 | caif_assert(layr->ctrlcmd != NULL); | ||
47 | |||
48 | |||
49 | if (cfpkt_extr_head(pkt, &cmd, 1) < 0) { | ||
50 | pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); | ||
51 | cfpkt_destroy(pkt); | ||
52 | return -EPROTO; | ||
53 | } | ||
54 | switch (cmd) { | ||
55 | case VEI_PAYLOAD: | ||
56 | ret = layr->up->receive(layr->up, pkt); | ||
57 | return ret; | ||
58 | case VEI_FLOW_OFF: | ||
59 | layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_OFF_IND, 0); | ||
60 | cfpkt_destroy(pkt); | ||
61 | return 0; | ||
62 | case VEI_FLOW_ON: | ||
63 | layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_ON_IND, 0); | ||
64 | cfpkt_destroy(pkt); | ||
65 | return 0; | ||
66 | case VEI_SET_PIN: /* SET RS232 PIN */ | ||
67 | cfpkt_destroy(pkt); | ||
68 | return 0; | ||
69 | default: /* SET RS232 PIN */ | ||
70 | pr_warning("CAIF: %s():Unknown VEI control packet %d (0x%x)!\n", | ||
71 | __func__, cmd, cmd); | ||
72 | cfpkt_destroy(pkt); | ||
73 | return -EPROTO; | ||
74 | } | ||
75 | } | ||
76 | |||
77 | static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt) | ||
78 | { | ||
79 | u8 tmp = 0; | ||
80 | struct caif_payload_info *info; | ||
81 | int ret; | ||
82 | struct cfsrvl *service = container_obj(layr); | ||
83 | if (!cfsrvl_ready(service, &ret)) | ||
84 | return ret; | ||
85 | caif_assert(layr->dn != NULL); | ||
86 | caif_assert(layr->dn->transmit != NULL); | ||
87 | if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { | ||
88 | pr_warning("CAIF: %s(): Packet too large - size=%d\n", | ||
89 | __func__, cfpkt_getlen(pkt)); | ||
90 | return -EOVERFLOW; | ||
91 | } | ||
92 | |||
93 | if (cfpkt_add_head(pkt, &tmp, 1) < 0) { | ||
94 | pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); | ||
95 | return -EPROTO; | ||
96 | } | ||
97 | |||
98 | /* Add info-> for MUX-layer to route the packet out. */ | ||
99 | info = cfpkt_info(pkt); | ||
100 | info->channel_id = service->layer.id; | ||
101 | info->hdr_len = 1; | ||
102 | info->dev_info = &service->dev_info; | ||
103 | ret = layr->dn->transmit(layr->dn, pkt); | ||
104 | if (ret < 0) | ||
105 | cfpkt_extr_head(pkt, &tmp, 1); | ||
106 | return ret; | ||
107 | } | ||
diff --git a/net/caif/cfvidl.c b/net/caif/cfvidl.c new file mode 100644 index 000000000000..89ad4ea239f1 --- /dev/null +++ b/net/caif/cfvidl.c | |||
@@ -0,0 +1,65 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson AB 2010 | ||
3 | * Author: Sjur Brendeland/sjur.brandeland@stericsson.com | ||
4 | * License terms: GNU General Public License (GPL) version 2 | ||
5 | */ | ||
6 | |||
7 | #include <linux/kernel.h> | ||
8 | #include <linux/types.h> | ||
9 | #include <linux/slab.h> | ||
10 | #include <linux/errno.h> | ||
11 | #include <net/caif/caif_layer.h> | ||
12 | #include <net/caif/cfsrvl.h> | ||
13 | #include <net/caif/cfpkt.h> | ||
14 | |||
15 | #define container_obj(layr) ((struct cfsrvl *) layr) | ||
16 | |||
17 | static int cfvidl_receive(struct cflayer *layr, struct cfpkt *pkt); | ||
18 | static int cfvidl_transmit(struct cflayer *layr, struct cfpkt *pkt); | ||
19 | |||
20 | struct cflayer *cfvidl_create(u8 channel_id, struct dev_info *dev_info) | ||
21 | { | ||
22 | struct cfsrvl *vid = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC); | ||
23 | if (!vid) { | ||
24 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
25 | return NULL; | ||
26 | } | ||
27 | caif_assert(offsetof(struct cfsrvl, layer) == 0); | ||
28 | |||
29 | memset(vid, 0, sizeof(struct cfsrvl)); | ||
30 | cfsrvl_init(vid, channel_id, dev_info); | ||
31 | vid->layer.receive = cfvidl_receive; | ||
32 | vid->layer.transmit = cfvidl_transmit; | ||
33 | snprintf(vid->layer.name, CAIF_LAYER_NAME_SZ - 1, "vid1"); | ||
34 | return &vid->layer; | ||
35 | } | ||
36 | |||
37 | static int cfvidl_receive(struct cflayer *layr, struct cfpkt *pkt) | ||
38 | { | ||
39 | u32 videoheader; | ||
40 | if (cfpkt_extr_head(pkt, &videoheader, 4) < 0) { | ||
41 | pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); | ||
42 | cfpkt_destroy(pkt); | ||
43 | return -EPROTO; | ||
44 | } | ||
45 | return layr->up->receive(layr->up, pkt); | ||
46 | } | ||
47 | |||
48 | static int cfvidl_transmit(struct cflayer *layr, struct cfpkt *pkt) | ||
49 | { | ||
50 | struct cfsrvl *service = container_obj(layr); | ||
51 | struct caif_payload_info *info; | ||
52 | u32 videoheader = 0; | ||
53 | int ret; | ||
54 | if (!cfsrvl_ready(service, &ret)) | ||
55 | return ret; | ||
56 | cfpkt_add_head(pkt, &videoheader, 4); | ||
57 | /* Add info for MUX-layer to route the packet out */ | ||
58 | info = cfpkt_info(pkt); | ||
59 | info->channel_id = service->layer.id; | ||
60 | info->dev_info = &service->dev_info; | ||
61 | ret = layr->dn->transmit(layr->dn, pkt); | ||
62 | if (ret < 0) | ||
63 | cfpkt_extr_head(pkt, &videoheader, 4); | ||
64 | return ret; | ||
65 | } | ||
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c new file mode 100644 index 000000000000..f622ff1d39ba --- /dev/null +++ b/net/caif/chnl_net.c | |||
@@ -0,0 +1,451 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson AB 2010 | ||
3 | * Authors: Sjur Brendeland/sjur.brandeland@stericsson.com | ||
4 | * Daniel Martensson / Daniel.Martensson@stericsson.com | ||
5 | * License terms: GNU General Public License (GPL) version 2 | ||
6 | */ | ||
7 | |||
8 | #include <linux/version.h> | ||
9 | #include <linux/fs.h> | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/netdevice.h> | ||
13 | #include <linux/if_ether.h> | ||
14 | #include <linux/moduleparam.h> | ||
15 | #include <linux/ip.h> | ||
16 | #include <linux/sched.h> | ||
17 | #include <linux/sockios.h> | ||
18 | #include <linux/caif/if_caif.h> | ||
19 | #include <net/rtnetlink.h> | ||
20 | #include <net/caif/caif_layer.h> | ||
21 | #include <net/caif/cfcnfg.h> | ||
22 | #include <net/caif/cfpkt.h> | ||
23 | #include <net/caif/caif_dev.h> | ||
24 | |||
25 | #define CAIF_CONNECT_TIMEOUT 30 | ||
26 | #define SIZE_MTU 1500 | ||
27 | #define SIZE_MTU_MAX 4080 | ||
28 | #define SIZE_MTU_MIN 68 | ||
29 | #define CAIF_NET_DEFAULT_QUEUE_LEN 500 | ||
30 | |||
31 | #undef pr_debug | ||
32 | #define pr_debug pr_warning | ||
33 | |||
34 | /*This list is protected by the rtnl lock. */ | ||
35 | static LIST_HEAD(chnl_net_list); | ||
36 | |||
37 | MODULE_LICENSE("GPL"); | ||
38 | MODULE_ALIAS_RTNL_LINK("caif"); | ||
39 | |||
40 | struct chnl_net { | ||
41 | struct cflayer chnl; | ||
42 | struct net_device_stats stats; | ||
43 | struct caif_connect_request conn_req; | ||
44 | struct list_head list_field; | ||
45 | struct net_device *netdev; | ||
46 | char name[256]; | ||
47 | wait_queue_head_t netmgmt_wq; | ||
48 | /* Flow status to remember and control the transmission. */ | ||
49 | bool flowenabled; | ||
50 | bool pending_close; | ||
51 | }; | ||
52 | |||
53 | static void robust_list_del(struct list_head *delete_node) | ||
54 | { | ||
55 | struct list_head *list_node; | ||
56 | struct list_head *n; | ||
57 | ASSERT_RTNL(); | ||
58 | list_for_each_safe(list_node, n, &chnl_net_list) { | ||
59 | if (list_node == delete_node) { | ||
60 | list_del(list_node); | ||
61 | break; | ||
62 | } | ||
63 | } | ||
64 | } | ||
65 | |||
66 | static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt) | ||
67 | { | ||
68 | struct sk_buff *skb; | ||
69 | struct chnl_net *priv = NULL; | ||
70 | int pktlen; | ||
71 | int err = 0; | ||
72 | |||
73 | priv = container_of(layr, struct chnl_net, chnl); | ||
74 | |||
75 | if (!priv) | ||
76 | return -EINVAL; | ||
77 | |||
78 | /* Get length of CAIF packet. */ | ||
79 | pktlen = cfpkt_getlen(pkt); | ||
80 | |||
81 | skb = (struct sk_buff *) cfpkt_tonative(pkt); | ||
82 | /* Pass some minimum information and | ||
83 | * send the packet to the net stack. | ||
84 | */ | ||
85 | skb->dev = priv->netdev; | ||
86 | skb->protocol = htons(ETH_P_IP); | ||
87 | |||
88 | /* If we change the header in loop mode, the checksum is corrupted. */ | ||
89 | if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP) | ||
90 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
91 | else | ||
92 | skb->ip_summed = CHECKSUM_NONE; | ||
93 | |||
94 | /* FIXME: Drivers should call this in tasklet context. */ | ||
95 | if (in_interrupt()) | ||
96 | netif_rx(skb); | ||
97 | else | ||
98 | netif_rx_ni(skb); | ||
99 | |||
100 | /* Update statistics. */ | ||
101 | priv->netdev->stats.rx_packets++; | ||
102 | priv->netdev->stats.rx_bytes += pktlen; | ||
103 | |||
104 | return err; | ||
105 | } | ||
106 | |||
107 | static int delete_device(struct chnl_net *dev) | ||
108 | { | ||
109 | ASSERT_RTNL(); | ||
110 | if (dev->netdev) | ||
111 | unregister_netdevice(dev->netdev); | ||
112 | return 0; | ||
113 | } | ||
114 | |||
115 | static void close_work(struct work_struct *work) | ||
116 | { | ||
117 | struct chnl_net *dev = NULL; | ||
118 | struct list_head *list_node; | ||
119 | struct list_head *_tmp; | ||
120 | rtnl_lock(); | ||
121 | list_for_each_safe(list_node, _tmp, &chnl_net_list) { | ||
122 | dev = list_entry(list_node, struct chnl_net, list_field); | ||
123 | if (!dev->pending_close) | ||
124 | continue; | ||
125 | list_del(list_node); | ||
126 | delete_device(dev); | ||
127 | } | ||
128 | rtnl_unlock(); | ||
129 | } | ||
130 | static DECLARE_WORK(close_worker, close_work); | ||
131 | |||
132 | static void chnl_flowctrl_cb(struct cflayer *layr, enum caif_ctrlcmd flow, | ||
133 | int phyid) | ||
134 | { | ||
135 | struct chnl_net *priv; | ||
136 | pr_debug("CAIF: %s(): NET flowctrl func called flow: %s.\n", | ||
137 | __func__, | ||
138 | flow == CAIF_CTRLCMD_FLOW_ON_IND ? "ON" : | ||
139 | flow == CAIF_CTRLCMD_INIT_RSP ? "INIT" : | ||
140 | flow == CAIF_CTRLCMD_FLOW_OFF_IND ? "OFF" : | ||
141 | flow == CAIF_CTRLCMD_DEINIT_RSP ? "CLOSE/DEINIT" : | ||
142 | flow == CAIF_CTRLCMD_INIT_FAIL_RSP ? "OPEN_FAIL" : | ||
143 | flow == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND ? | ||
144 | "REMOTE_SHUTDOWN" : "UKNOWN CTRL COMMAND"); | ||
145 | |||
146 | priv = container_of(layr, struct chnl_net, chnl); | ||
147 | |||
148 | switch (flow) { | ||
149 | case CAIF_CTRLCMD_FLOW_OFF_IND: | ||
150 | case CAIF_CTRLCMD_DEINIT_RSP: | ||
151 | case CAIF_CTRLCMD_INIT_FAIL_RSP: | ||
152 | case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND: | ||
153 | priv->flowenabled = false; | ||
154 | netif_tx_disable(priv->netdev); | ||
155 | pr_warning("CAIF: %s(): done\n", __func__); | ||
156 | priv->pending_close = 1; | ||
157 | schedule_work(&close_worker); | ||
158 | break; | ||
159 | case CAIF_CTRLCMD_FLOW_ON_IND: | ||
160 | case CAIF_CTRLCMD_INIT_RSP: | ||
161 | priv->flowenabled = true; | ||
162 | netif_wake_queue(priv->netdev); | ||
163 | wake_up_interruptible(&priv->netmgmt_wq); | ||
164 | break; | ||
165 | default: | ||
166 | break; | ||
167 | } | ||
168 | } | ||
169 | |||
170 | static int chnl_net_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
171 | { | ||
172 | struct chnl_net *priv; | ||
173 | struct cfpkt *pkt = NULL; | ||
174 | int len; | ||
175 | int result = -1; | ||
176 | /* Get our private data. */ | ||
177 | priv = netdev_priv(dev); | ||
178 | |||
179 | if (skb->len > priv->netdev->mtu) { | ||
180 | pr_warning("CAIF: %s(): Size of skb exceeded MTU\n", __func__); | ||
181 | return -ENOSPC; | ||
182 | } | ||
183 | |||
184 | if (!priv->flowenabled) { | ||
185 | pr_debug("CAIF: %s(): dropping packets flow off\n", __func__); | ||
186 | return NETDEV_TX_BUSY; | ||
187 | } | ||
188 | |||
189 | if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP) | ||
190 | swap(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr); | ||
191 | |||
192 | /* Store original SKB length. */ | ||
193 | len = skb->len; | ||
194 | |||
195 | pkt = cfpkt_fromnative(CAIF_DIR_OUT, (void *) skb); | ||
196 | |||
197 | pr_debug("CAIF: %s(): transmit inst %s %d,%p\n", | ||
198 | __func__, dev->name, priv->chnl.dn->id, &priv->chnl.dn); | ||
199 | |||
200 | /* Send the packet down the stack. */ | ||
201 | result = priv->chnl.dn->transmit(priv->chnl.dn, pkt); | ||
202 | if (result) { | ||
203 | if (result == -EAGAIN) | ||
204 | result = NETDEV_TX_BUSY; | ||
205 | return result; | ||
206 | } | ||
207 | |||
208 | /* Update statistics. */ | ||
209 | dev->stats.tx_packets++; | ||
210 | dev->stats.tx_bytes += len; | ||
211 | |||
212 | return NETDEV_TX_OK; | ||
213 | } | ||
214 | |||
215 | static int chnl_net_open(struct net_device *dev) | ||
216 | { | ||
217 | struct chnl_net *priv = NULL; | ||
218 | int result = -1; | ||
219 | ASSERT_RTNL(); | ||
220 | |||
221 | priv = netdev_priv(dev); | ||
222 | pr_debug("CAIF: %s(): dev name: %s\n", __func__, priv->name); | ||
223 | |||
224 | if (!priv) { | ||
225 | pr_debug("CAIF: %s(): chnl_net_open: no priv\n", __func__); | ||
226 | return -ENODEV; | ||
227 | } | ||
228 | result = caif_connect_client(&priv->conn_req, &priv->chnl); | ||
229 | if (result != 0) { | ||
230 | pr_debug("CAIF: %s(): err: " | ||
231 | "Unable to register and open device, Err:%d\n", | ||
232 | __func__, | ||
233 | result); | ||
234 | return -ENODEV; | ||
235 | } | ||
236 | result = wait_event_interruptible(priv->netmgmt_wq, priv->flowenabled); | ||
237 | |||
238 | if (result == -ERESTARTSYS) { | ||
239 | pr_debug("CAIF: %s(): wait_event_interruptible" | ||
240 | " woken by a signal\n", __func__); | ||
241 | return -ERESTARTSYS; | ||
242 | } else | ||
243 | pr_debug("CAIF: %s(): Flow on recieved\n", __func__); | ||
244 | |||
245 | return 0; | ||
246 | } | ||
247 | |||
248 | static int chnl_net_stop(struct net_device *dev) | ||
249 | { | ||
250 | struct chnl_net *priv; | ||
251 | int result = -1; | ||
252 | ASSERT_RTNL(); | ||
253 | priv = netdev_priv(dev); | ||
254 | |||
255 | result = caif_disconnect_client(&priv->chnl); | ||
256 | if (result != 0) { | ||
257 | pr_debug("CAIF: %s(): chnl_net_stop: err: " | ||
258 | "Unable to STOP device, Err:%d\n", | ||
259 | __func__, result); | ||
260 | return -EBUSY; | ||
261 | } | ||
262 | result = wait_event_interruptible(priv->netmgmt_wq, | ||
263 | !priv->flowenabled); | ||
264 | |||
265 | if (result == -ERESTARTSYS) { | ||
266 | pr_debug("CAIF: %s(): wait_event_interruptible woken by" | ||
267 | " signal, signal_pending(current) = %d\n", | ||
268 | __func__, | ||
269 | signal_pending(current)); | ||
270 | } else { | ||
271 | pr_debug("CAIF: %s(): disconnect received\n", __func__); | ||
272 | |||
273 | } | ||
274 | |||
275 | return 0; | ||
276 | } | ||
277 | |||
278 | static int chnl_net_init(struct net_device *dev) | ||
279 | { | ||
280 | struct chnl_net *priv; | ||
281 | ASSERT_RTNL(); | ||
282 | priv = netdev_priv(dev); | ||
283 | strncpy(priv->name, dev->name, sizeof(priv->name)); | ||
284 | return 0; | ||
285 | } | ||
286 | |||
287 | static void chnl_net_uninit(struct net_device *dev) | ||
288 | { | ||
289 | struct chnl_net *priv; | ||
290 | ASSERT_RTNL(); | ||
291 | priv = netdev_priv(dev); | ||
292 | robust_list_del(&priv->list_field); | ||
293 | } | ||
294 | |||
295 | static const struct net_device_ops netdev_ops = { | ||
296 | .ndo_open = chnl_net_open, | ||
297 | .ndo_stop = chnl_net_stop, | ||
298 | .ndo_init = chnl_net_init, | ||
299 | .ndo_uninit = chnl_net_uninit, | ||
300 | .ndo_start_xmit = chnl_net_start_xmit, | ||
301 | }; | ||
302 | |||
303 | static void ipcaif_net_setup(struct net_device *dev) | ||
304 | { | ||
305 | struct chnl_net *priv; | ||
306 | dev->netdev_ops = &netdev_ops; | ||
307 | dev->destructor = free_netdev; | ||
308 | dev->flags |= IFF_NOARP; | ||
309 | dev->flags |= IFF_POINTOPOINT; | ||
310 | dev->needed_headroom = CAIF_NEEDED_HEADROOM; | ||
311 | dev->needed_tailroom = CAIF_NEEDED_TAILROOM; | ||
312 | dev->mtu = SIZE_MTU; | ||
313 | dev->tx_queue_len = CAIF_NET_DEFAULT_QUEUE_LEN; | ||
314 | |||
315 | priv = netdev_priv(dev); | ||
316 | priv->chnl.receive = chnl_recv_cb; | ||
317 | priv->chnl.ctrlcmd = chnl_flowctrl_cb; | ||
318 | priv->netdev = dev; | ||
319 | priv->conn_req.protocol = CAIFPROTO_DATAGRAM; | ||
320 | priv->conn_req.link_selector = CAIF_LINK_HIGH_BANDW; | ||
321 | priv->conn_req.priority = CAIF_PRIO_LOW; | ||
322 | /* Insert illegal value */ | ||
323 | priv->conn_req.sockaddr.u.dgm.connection_id = -1; | ||
324 | priv->flowenabled = false; | ||
325 | |||
326 | ASSERT_RTNL(); | ||
327 | init_waitqueue_head(&priv->netmgmt_wq); | ||
328 | list_add(&priv->list_field, &chnl_net_list); | ||
329 | } | ||
330 | |||
331 | |||
332 | static int ipcaif_fill_info(struct sk_buff *skb, const struct net_device *dev) | ||
333 | { | ||
334 | struct chnl_net *priv; | ||
335 | u8 loop; | ||
336 | priv = netdev_priv(dev); | ||
337 | NLA_PUT_U32(skb, IFLA_CAIF_IPV4_CONNID, | ||
338 | priv->conn_req.sockaddr.u.dgm.connection_id); | ||
339 | NLA_PUT_U32(skb, IFLA_CAIF_IPV6_CONNID, | ||
340 | priv->conn_req.sockaddr.u.dgm.connection_id); | ||
341 | loop = priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP; | ||
342 | NLA_PUT_U8(skb, IFLA_CAIF_LOOPBACK, loop); | ||
343 | |||
344 | |||
345 | return 0; | ||
346 | nla_put_failure: | ||
347 | return -EMSGSIZE; | ||
348 | |||
349 | } | ||
350 | |||
351 | static void caif_netlink_parms(struct nlattr *data[], | ||
352 | struct caif_connect_request *conn_req) | ||
353 | { | ||
354 | if (!data) { | ||
355 | pr_warning("CAIF: %s: no params data found\n", __func__); | ||
356 | return; | ||
357 | } | ||
358 | if (data[IFLA_CAIF_IPV4_CONNID]) | ||
359 | conn_req->sockaddr.u.dgm.connection_id = | ||
360 | nla_get_u32(data[IFLA_CAIF_IPV4_CONNID]); | ||
361 | if (data[IFLA_CAIF_IPV6_CONNID]) | ||
362 | conn_req->sockaddr.u.dgm.connection_id = | ||
363 | nla_get_u32(data[IFLA_CAIF_IPV6_CONNID]); | ||
364 | if (data[IFLA_CAIF_LOOPBACK]) { | ||
365 | if (nla_get_u8(data[IFLA_CAIF_LOOPBACK])) | ||
366 | conn_req->protocol = CAIFPROTO_DATAGRAM_LOOP; | ||
367 | else | ||
368 | conn_req->protocol = CAIFPROTO_DATAGRAM; | ||
369 | } | ||
370 | } | ||
371 | |||
372 | static int ipcaif_newlink(struct net *src_net, struct net_device *dev, | ||
373 | struct nlattr *tb[], struct nlattr *data[]) | ||
374 | { | ||
375 | int ret; | ||
376 | struct chnl_net *caifdev; | ||
377 | ASSERT_RTNL(); | ||
378 | caifdev = netdev_priv(dev); | ||
379 | caif_netlink_parms(data, &caifdev->conn_req); | ||
380 | ret = register_netdevice(dev); | ||
381 | if (ret) | ||
382 | pr_warning("CAIF: %s(): device rtml registration failed\n", | ||
383 | __func__); | ||
384 | return ret; | ||
385 | } | ||
386 | |||
387 | static int ipcaif_changelink(struct net_device *dev, struct nlattr *tb[], | ||
388 | struct nlattr *data[]) | ||
389 | { | ||
390 | struct chnl_net *caifdev; | ||
391 | ASSERT_RTNL(); | ||
392 | caifdev = netdev_priv(dev); | ||
393 | caif_netlink_parms(data, &caifdev->conn_req); | ||
394 | netdev_state_change(dev); | ||
395 | return 0; | ||
396 | } | ||
397 | |||
398 | static size_t ipcaif_get_size(const struct net_device *dev) | ||
399 | { | ||
400 | return | ||
401 | /* IFLA_CAIF_IPV4_CONNID */ | ||
402 | nla_total_size(4) + | ||
403 | /* IFLA_CAIF_IPV6_CONNID */ | ||
404 | nla_total_size(4) + | ||
405 | /* IFLA_CAIF_LOOPBACK */ | ||
406 | nla_total_size(2) + | ||
407 | 0; | ||
408 | } | ||
409 | |||
410 | static const struct nla_policy ipcaif_policy[IFLA_CAIF_MAX + 1] = { | ||
411 | [IFLA_CAIF_IPV4_CONNID] = { .type = NLA_U32 }, | ||
412 | [IFLA_CAIF_IPV6_CONNID] = { .type = NLA_U32 }, | ||
413 | [IFLA_CAIF_LOOPBACK] = { .type = NLA_U8 } | ||
414 | }; | ||
415 | |||
416 | |||
417 | static struct rtnl_link_ops ipcaif_link_ops __read_mostly = { | ||
418 | .kind = "caif", | ||
419 | .priv_size = sizeof(struct chnl_net), | ||
420 | .setup = ipcaif_net_setup, | ||
421 | .maxtype = IFLA_CAIF_MAX, | ||
422 | .policy = ipcaif_policy, | ||
423 | .newlink = ipcaif_newlink, | ||
424 | .changelink = ipcaif_changelink, | ||
425 | .get_size = ipcaif_get_size, | ||
426 | .fill_info = ipcaif_fill_info, | ||
427 | |||
428 | }; | ||
429 | |||
430 | static int __init chnl_init_module(void) | ||
431 | { | ||
432 | return rtnl_link_register(&ipcaif_link_ops); | ||
433 | } | ||
434 | |||
435 | static void __exit chnl_exit_module(void) | ||
436 | { | ||
437 | struct chnl_net *dev = NULL; | ||
438 | struct list_head *list_node; | ||
439 | struct list_head *_tmp; | ||
440 | rtnl_link_unregister(&ipcaif_link_ops); | ||
441 | rtnl_lock(); | ||
442 | list_for_each_safe(list_node, _tmp, &chnl_net_list) { | ||
443 | dev = list_entry(list_node, struct chnl_net, list_field); | ||
444 | list_del(list_node); | ||
445 | delete_device(dev); | ||
446 | } | ||
447 | rtnl_unlock(); | ||
448 | } | ||
449 | |||
450 | module_init(chnl_init_module); | ||
451 | module_exit(chnl_exit_module); | ||
diff --git a/net/core/Makefile b/net/core/Makefile index 08791ac3e05a..51c3eec850ef 100644 --- a/net/core/Makefile +++ b/net/core/Makefile | |||
@@ -7,7 +7,7 @@ obj-y := sock.o request_sock.o skbuff.o iovec.o datagram.o stream.o scm.o \ | |||
7 | 7 | ||
8 | obj-$(CONFIG_SYSCTL) += sysctl_net_core.o | 8 | obj-$(CONFIG_SYSCTL) += sysctl_net_core.o |
9 | 9 | ||
10 | obj-y += dev.o ethtool.o dev_mcast.o dst.o netevent.o \ | 10 | obj-y += dev.o ethtool.o dev_addr_lists.o dst.o netevent.o \ |
11 | neighbour.o rtnetlink.o utils.o link_watch.o filter.o | 11 | neighbour.o rtnetlink.o utils.o link_watch.o filter.o |
12 | 12 | ||
13 | obj-$(CONFIG_XFRM) += flow.o | 13 | obj-$(CONFIG_XFRM) += flow.o |
diff --git a/net/core/datagram.c b/net/core/datagram.c index 2dccd4ee591b..5574a5ddf908 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c | |||
@@ -86,7 +86,7 @@ static int wait_for_packet(struct sock *sk, int *err, long *timeo_p) | |||
86 | int error; | 86 | int error; |
87 | DEFINE_WAIT_FUNC(wait, receiver_wake_function); | 87 | DEFINE_WAIT_FUNC(wait, receiver_wake_function); |
88 | 88 | ||
89 | prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 89 | prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
90 | 90 | ||
91 | /* Socket errors? */ | 91 | /* Socket errors? */ |
92 | error = sock_error(sk); | 92 | error = sock_error(sk); |
@@ -115,7 +115,7 @@ static int wait_for_packet(struct sock *sk, int *err, long *timeo_p) | |||
115 | error = 0; | 115 | error = 0; |
116 | *timeo_p = schedule_timeout(*timeo_p); | 116 | *timeo_p = schedule_timeout(*timeo_p); |
117 | out: | 117 | out: |
118 | finish_wait(sk->sk_sleep, &wait); | 118 | finish_wait(sk_sleep(sk), &wait); |
119 | return error; | 119 | return error; |
120 | interrupted: | 120 | interrupted: |
121 | error = sock_intr_errno(*timeo_p); | 121 | error = sock_intr_errno(*timeo_p); |
@@ -726,7 +726,7 @@ unsigned int datagram_poll(struct file *file, struct socket *sock, | |||
726 | struct sock *sk = sock->sk; | 726 | struct sock *sk = sock->sk; |
727 | unsigned int mask; | 727 | unsigned int mask; |
728 | 728 | ||
729 | sock_poll_wait(file, sk->sk_sleep, wait); | 729 | sock_poll_wait(file, sk_sleep(sk), wait); |
730 | mask = 0; | 730 | mask = 0; |
731 | 731 | ||
732 | /* exceptional events? */ | 732 | /* exceptional events? */ |
diff --git a/net/core/dev.c b/net/core/dev.c index f769098774b7..4d43f1a80f74 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -130,6 +130,7 @@ | |||
130 | #include <linux/jhash.h> | 130 | #include <linux/jhash.h> |
131 | #include <linux/random.h> | 131 | #include <linux/random.h> |
132 | #include <trace/events/napi.h> | 132 | #include <trace/events/napi.h> |
133 | #include <linux/pci.h> | ||
133 | 134 | ||
134 | #include "net-sysfs.h" | 135 | #include "net-sysfs.h" |
135 | 136 | ||
@@ -207,6 +208,20 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) | |||
207 | return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)]; | 208 | return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)]; |
208 | } | 209 | } |
209 | 210 | ||
211 | static inline void rps_lock(struct softnet_data *sd) | ||
212 | { | ||
213 | #ifdef CONFIG_RPS | ||
214 | spin_lock(&sd->input_pkt_queue.lock); | ||
215 | #endif | ||
216 | } | ||
217 | |||
218 | static inline void rps_unlock(struct softnet_data *sd) | ||
219 | { | ||
220 | #ifdef CONFIG_RPS | ||
221 | spin_unlock(&sd->input_pkt_queue.lock); | ||
222 | #endif | ||
223 | } | ||
224 | |||
210 | /* Device list insertion */ | 225 | /* Device list insertion */ |
211 | static int list_netdevice(struct net_device *dev) | 226 | static int list_netdevice(struct net_device *dev) |
212 | { | 227 | { |
@@ -249,7 +264,7 @@ static RAW_NOTIFIER_HEAD(netdev_chain); | |||
249 | * queue in the local softnet handler. | 264 | * queue in the local softnet handler. |
250 | */ | 265 | */ |
251 | 266 | ||
252 | DEFINE_PER_CPU(struct softnet_data, softnet_data); | 267 | DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); |
253 | EXPORT_PER_CPU_SYMBOL(softnet_data); | 268 | EXPORT_PER_CPU_SYMBOL(softnet_data); |
254 | 269 | ||
255 | #ifdef CONFIG_LOCKDEP | 270 | #ifdef CONFIG_LOCKDEP |
@@ -773,14 +788,17 @@ EXPORT_SYMBOL(__dev_getfirstbyhwtype); | |||
773 | 788 | ||
774 | struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) | 789 | struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) |
775 | { | 790 | { |
776 | struct net_device *dev; | 791 | struct net_device *dev, *ret = NULL; |
777 | 792 | ||
778 | rtnl_lock(); | 793 | rcu_read_lock(); |
779 | dev = __dev_getfirstbyhwtype(net, type); | 794 | for_each_netdev_rcu(net, dev) |
780 | if (dev) | 795 | if (dev->type == type) { |
781 | dev_hold(dev); | 796 | dev_hold(dev); |
782 | rtnl_unlock(); | 797 | ret = dev; |
783 | return dev; | 798 | break; |
799 | } | ||
800 | rcu_read_unlock(); | ||
801 | return ret; | ||
784 | } | 802 | } |
785 | EXPORT_SYMBOL(dev_getfirstbyhwtype); | 803 | EXPORT_SYMBOL(dev_getfirstbyhwtype); |
786 | 804 | ||
@@ -1085,9 +1103,9 @@ void netdev_state_change(struct net_device *dev) | |||
1085 | } | 1103 | } |
1086 | EXPORT_SYMBOL(netdev_state_change); | 1104 | EXPORT_SYMBOL(netdev_state_change); |
1087 | 1105 | ||
1088 | void netdev_bonding_change(struct net_device *dev, unsigned long event) | 1106 | int netdev_bonding_change(struct net_device *dev, unsigned long event) |
1089 | { | 1107 | { |
1090 | call_netdevice_notifiers(event, dev); | 1108 | return call_netdevice_notifiers(event, dev); |
1091 | } | 1109 | } |
1092 | EXPORT_SYMBOL(netdev_bonding_change); | 1110 | EXPORT_SYMBOL(netdev_bonding_change); |
1093 | 1111 | ||
@@ -1417,6 +1435,7 @@ EXPORT_SYMBOL(unregister_netdevice_notifier); | |||
1417 | 1435 | ||
1418 | int call_netdevice_notifiers(unsigned long val, struct net_device *dev) | 1436 | int call_netdevice_notifiers(unsigned long val, struct net_device *dev) |
1419 | { | 1437 | { |
1438 | ASSERT_RTNL(); | ||
1420 | return raw_notifier_call_chain(&netdev_chain, val, dev); | 1439 | return raw_notifier_call_chain(&netdev_chain, val, dev); |
1421 | } | 1440 | } |
1422 | 1441 | ||
@@ -1784,18 +1803,27 @@ EXPORT_SYMBOL(netdev_rx_csum_fault); | |||
1784 | * 2. No high memory really exists on this machine. | 1803 | * 2. No high memory really exists on this machine. |
1785 | */ | 1804 | */ |
1786 | 1805 | ||
1787 | static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb) | 1806 | static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) |
1788 | { | 1807 | { |
1789 | #ifdef CONFIG_HIGHMEM | 1808 | #ifdef CONFIG_HIGHMEM |
1790 | int i; | 1809 | int i; |
1810 | if (!(dev->features & NETIF_F_HIGHDMA)) { | ||
1811 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) | ||
1812 | if (PageHighMem(skb_shinfo(skb)->frags[i].page)) | ||
1813 | return 1; | ||
1814 | } | ||
1791 | 1815 | ||
1792 | if (dev->features & NETIF_F_HIGHDMA) | 1816 | if (PCI_DMA_BUS_IS_PHYS) { |
1793 | return 0; | 1817 | struct device *pdev = dev->dev.parent; |
1794 | |||
1795 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) | ||
1796 | if (PageHighMem(skb_shinfo(skb)->frags[i].page)) | ||
1797 | return 1; | ||
1798 | 1818 | ||
1819 | if (!pdev) | ||
1820 | return 0; | ||
1821 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | ||
1822 | dma_addr_t addr = page_to_phys(skb_shinfo(skb)->frags[i].page); | ||
1823 | if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask) | ||
1824 | return 1; | ||
1825 | } | ||
1826 | } | ||
1799 | #endif | 1827 | #endif |
1800 | return 0; | 1828 | return 0; |
1801 | } | 1829 | } |
@@ -1853,6 +1881,17 @@ static int dev_gso_segment(struct sk_buff *skb) | |||
1853 | return 0; | 1881 | return 0; |
1854 | } | 1882 | } |
1855 | 1883 | ||
1884 | /* | ||
1885 | * Try to orphan skb early, right before transmission by the device. | ||
1886 | * We cannot orphan skb if tx timestamp is requested, since | ||
1887 | * drivers need to call skb_tstamp_tx() to send the timestamp. | ||
1888 | */ | ||
1889 | static inline void skb_orphan_try(struct sk_buff *skb) | ||
1890 | { | ||
1891 | if (!skb_tx(skb)->flags) | ||
1892 | skb_orphan(skb); | ||
1893 | } | ||
1894 | |||
1856 | int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, | 1895 | int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, |
1857 | struct netdev_queue *txq) | 1896 | struct netdev_queue *txq) |
1858 | { | 1897 | { |
@@ -1863,13 +1902,6 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, | |||
1863 | if (!list_empty(&ptype_all)) | 1902 | if (!list_empty(&ptype_all)) |
1864 | dev_queue_xmit_nit(skb, dev); | 1903 | dev_queue_xmit_nit(skb, dev); |
1865 | 1904 | ||
1866 | if (netif_needs_gso(dev, skb)) { | ||
1867 | if (unlikely(dev_gso_segment(skb))) | ||
1868 | goto out_kfree_skb; | ||
1869 | if (skb->next) | ||
1870 | goto gso; | ||
1871 | } | ||
1872 | |||
1873 | /* | 1905 | /* |
1874 | * If device doesnt need skb->dst, release it right now while | 1906 | * If device doesnt need skb->dst, release it right now while |
1875 | * its hot in this cpu cache | 1907 | * its hot in this cpu cache |
@@ -1877,23 +1909,18 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, | |||
1877 | if (dev->priv_flags & IFF_XMIT_DST_RELEASE) | 1909 | if (dev->priv_flags & IFF_XMIT_DST_RELEASE) |
1878 | skb_dst_drop(skb); | 1910 | skb_dst_drop(skb); |
1879 | 1911 | ||
1912 | skb_orphan_try(skb); | ||
1913 | |||
1914 | if (netif_needs_gso(dev, skb)) { | ||
1915 | if (unlikely(dev_gso_segment(skb))) | ||
1916 | goto out_kfree_skb; | ||
1917 | if (skb->next) | ||
1918 | goto gso; | ||
1919 | } | ||
1920 | |||
1880 | rc = ops->ndo_start_xmit(skb, dev); | 1921 | rc = ops->ndo_start_xmit(skb, dev); |
1881 | if (rc == NETDEV_TX_OK) | 1922 | if (rc == NETDEV_TX_OK) |
1882 | txq_trans_update(txq); | 1923 | txq_trans_update(txq); |
1883 | /* | ||
1884 | * TODO: if skb_orphan() was called by | ||
1885 | * dev->hard_start_xmit() (for example, the unmodified | ||
1886 | * igb driver does that; bnx2 doesn't), then | ||
1887 | * skb_tx_software_timestamp() will be unable to send | ||
1888 | * back the time stamp. | ||
1889 | * | ||
1890 | * How can this be prevented? Always create another | ||
1891 | * reference to the socket before calling | ||
1892 | * dev->hard_start_xmit()? Prevent that skb_orphan() | ||
1893 | * does anything in dev->hard_start_xmit() by clearing | ||
1894 | * the skb destructor before the call and restoring it | ||
1895 | * afterwards, then doing the skb_orphan() ourselves? | ||
1896 | */ | ||
1897 | return rc; | 1924 | return rc; |
1898 | } | 1925 | } |
1899 | 1926 | ||
@@ -1932,7 +1959,7 @@ out_kfree_skb: | |||
1932 | return rc; | 1959 | return rc; |
1933 | } | 1960 | } |
1934 | 1961 | ||
1935 | static u32 skb_tx_hashrnd; | 1962 | static u32 hashrnd __read_mostly; |
1936 | 1963 | ||
1937 | u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb) | 1964 | u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb) |
1938 | { | 1965 | { |
@@ -1948,9 +1975,9 @@ u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb) | |||
1948 | if (skb->sk && skb->sk->sk_hash) | 1975 | if (skb->sk && skb->sk->sk_hash) |
1949 | hash = skb->sk->sk_hash; | 1976 | hash = skb->sk->sk_hash; |
1950 | else | 1977 | else |
1951 | hash = skb->protocol; | 1978 | hash = (__force u16) skb->protocol; |
1952 | 1979 | ||
1953 | hash = jhash_1word(hash, skb_tx_hashrnd); | 1980 | hash = jhash_1word(hash, hashrnd); |
1954 | 1981 | ||
1955 | return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32); | 1982 | return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32); |
1956 | } | 1983 | } |
@@ -1960,10 +1987,9 @@ static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index) | |||
1960 | { | 1987 | { |
1961 | if (unlikely(queue_index >= dev->real_num_tx_queues)) { | 1988 | if (unlikely(queue_index >= dev->real_num_tx_queues)) { |
1962 | if (net_ratelimit()) { | 1989 | if (net_ratelimit()) { |
1963 | WARN(1, "%s selects TX queue %d, but " | 1990 | pr_warning("%s selects TX queue %d, but " |
1964 | "real number of TX queues is %d\n", | 1991 | "real number of TX queues is %d\n", |
1965 | dev->name, queue_index, | 1992 | dev->name, queue_index, dev->real_num_tx_queues); |
1966 | dev->real_num_tx_queues); | ||
1967 | } | 1993 | } |
1968 | return 0; | 1994 | return 0; |
1969 | } | 1995 | } |
@@ -1990,7 +2016,7 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev, | |||
1990 | queue_index = skb_tx_hash(dev, skb); | 2016 | queue_index = skb_tx_hash(dev, skb); |
1991 | 2017 | ||
1992 | if (sk) { | 2018 | if (sk) { |
1993 | struct dst_entry *dst = rcu_dereference_bh(sk->sk_dst_cache); | 2019 | struct dst_entry *dst = rcu_dereference_check(sk->sk_dst_cache, 1); |
1994 | 2020 | ||
1995 | if (dst && skb_dst(skb) == dst) | 2021 | if (dst && skb_dst(skb) == dst) |
1996 | sk_tx_queue_set(sk, queue_index); | 2022 | sk_tx_queue_set(sk, queue_index); |
@@ -2180,6 +2206,235 @@ int weight_p __read_mostly = 64; /* old backlog weight */ | |||
2180 | 2206 | ||
2181 | DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, }; | 2207 | DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, }; |
2182 | 2208 | ||
2209 | #ifdef CONFIG_RPS | ||
2210 | |||
2211 | /* One global table that all flow-based protocols share. */ | ||
2212 | struct rps_sock_flow_table *rps_sock_flow_table __read_mostly; | ||
2213 | EXPORT_SYMBOL(rps_sock_flow_table); | ||
2214 | |||
2215 | /* | ||
2216 | * get_rps_cpu is called from netif_receive_skb and returns the target | ||
2217 | * CPU from the RPS map of the receiving queue for a given skb. | ||
2218 | * rcu_read_lock must be held on entry. | ||
2219 | */ | ||
2220 | static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, | ||
2221 | struct rps_dev_flow **rflowp) | ||
2222 | { | ||
2223 | struct ipv6hdr *ip6; | ||
2224 | struct iphdr *ip; | ||
2225 | struct netdev_rx_queue *rxqueue; | ||
2226 | struct rps_map *map; | ||
2227 | struct rps_dev_flow_table *flow_table; | ||
2228 | struct rps_sock_flow_table *sock_flow_table; | ||
2229 | int cpu = -1; | ||
2230 | u8 ip_proto; | ||
2231 | u16 tcpu; | ||
2232 | u32 addr1, addr2, ihl; | ||
2233 | union { | ||
2234 | u32 v32; | ||
2235 | u16 v16[2]; | ||
2236 | } ports; | ||
2237 | |||
2238 | if (skb_rx_queue_recorded(skb)) { | ||
2239 | u16 index = skb_get_rx_queue(skb); | ||
2240 | if (unlikely(index >= dev->num_rx_queues)) { | ||
2241 | if (net_ratelimit()) { | ||
2242 | pr_warning("%s received packet on queue " | ||
2243 | "%u, but number of RX queues is %u\n", | ||
2244 | dev->name, index, dev->num_rx_queues); | ||
2245 | } | ||
2246 | goto done; | ||
2247 | } | ||
2248 | rxqueue = dev->_rx + index; | ||
2249 | } else | ||
2250 | rxqueue = dev->_rx; | ||
2251 | |||
2252 | if (!rxqueue->rps_map && !rxqueue->rps_flow_table) | ||
2253 | goto done; | ||
2254 | |||
2255 | if (skb->rxhash) | ||
2256 | goto got_hash; /* Skip hash computation on packet header */ | ||
2257 | |||
2258 | switch (skb->protocol) { | ||
2259 | case __constant_htons(ETH_P_IP): | ||
2260 | if (!pskb_may_pull(skb, sizeof(*ip))) | ||
2261 | goto done; | ||
2262 | |||
2263 | ip = (struct iphdr *) skb->data; | ||
2264 | ip_proto = ip->protocol; | ||
2265 | addr1 = (__force u32) ip->saddr; | ||
2266 | addr2 = (__force u32) ip->daddr; | ||
2267 | ihl = ip->ihl; | ||
2268 | break; | ||
2269 | case __constant_htons(ETH_P_IPV6): | ||
2270 | if (!pskb_may_pull(skb, sizeof(*ip6))) | ||
2271 | goto done; | ||
2272 | |||
2273 | ip6 = (struct ipv6hdr *) skb->data; | ||
2274 | ip_proto = ip6->nexthdr; | ||
2275 | addr1 = (__force u32) ip6->saddr.s6_addr32[3]; | ||
2276 | addr2 = (__force u32) ip6->daddr.s6_addr32[3]; | ||
2277 | ihl = (40 >> 2); | ||
2278 | break; | ||
2279 | default: | ||
2280 | goto done; | ||
2281 | } | ||
2282 | switch (ip_proto) { | ||
2283 | case IPPROTO_TCP: | ||
2284 | case IPPROTO_UDP: | ||
2285 | case IPPROTO_DCCP: | ||
2286 | case IPPROTO_ESP: | ||
2287 | case IPPROTO_AH: | ||
2288 | case IPPROTO_SCTP: | ||
2289 | case IPPROTO_UDPLITE: | ||
2290 | if (pskb_may_pull(skb, (ihl * 4) + 4)) { | ||
2291 | ports.v32 = * (__force u32 *) (skb->data + (ihl * 4)); | ||
2292 | if (ports.v16[1] < ports.v16[0]) | ||
2293 | swap(ports.v16[0], ports.v16[1]); | ||
2294 | break; | ||
2295 | } | ||
2296 | default: | ||
2297 | ports.v32 = 0; | ||
2298 | break; | ||
2299 | } | ||
2300 | |||
2301 | /* get a consistent hash (same value on both flow directions) */ | ||
2302 | if (addr2 < addr1) | ||
2303 | swap(addr1, addr2); | ||
2304 | skb->rxhash = jhash_3words(addr1, addr2, ports.v32, hashrnd); | ||
2305 | if (!skb->rxhash) | ||
2306 | skb->rxhash = 1; | ||
2307 | |||
2308 | got_hash: | ||
2309 | flow_table = rcu_dereference(rxqueue->rps_flow_table); | ||
2310 | sock_flow_table = rcu_dereference(rps_sock_flow_table); | ||
2311 | if (flow_table && sock_flow_table) { | ||
2312 | u16 next_cpu; | ||
2313 | struct rps_dev_flow *rflow; | ||
2314 | |||
2315 | rflow = &flow_table->flows[skb->rxhash & flow_table->mask]; | ||
2316 | tcpu = rflow->cpu; | ||
2317 | |||
2318 | next_cpu = sock_flow_table->ents[skb->rxhash & | ||
2319 | sock_flow_table->mask]; | ||
2320 | |||
2321 | /* | ||
2322 | * If the desired CPU (where last recvmsg was done) is | ||
2323 | * different from current CPU (one in the rx-queue flow | ||
2324 | * table entry), switch if one of the following holds: | ||
2325 | * - Current CPU is unset (equal to RPS_NO_CPU). | ||
2326 | * - Current CPU is offline. | ||
2327 | * - The current CPU's queue tail has advanced beyond the | ||
2328 | * last packet that was enqueued using this table entry. | ||
2329 | * This guarantees that all previous packets for the flow | ||
2330 | * have been dequeued, thus preserving in order delivery. | ||
2331 | */ | ||
2332 | if (unlikely(tcpu != next_cpu) && | ||
2333 | (tcpu == RPS_NO_CPU || !cpu_online(tcpu) || | ||
2334 | ((int)(per_cpu(softnet_data, tcpu).input_queue_head - | ||
2335 | rflow->last_qtail)) >= 0)) { | ||
2336 | tcpu = rflow->cpu = next_cpu; | ||
2337 | if (tcpu != RPS_NO_CPU) | ||
2338 | rflow->last_qtail = per_cpu(softnet_data, | ||
2339 | tcpu).input_queue_head; | ||
2340 | } | ||
2341 | if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) { | ||
2342 | *rflowp = rflow; | ||
2343 | cpu = tcpu; | ||
2344 | goto done; | ||
2345 | } | ||
2346 | } | ||
2347 | |||
2348 | map = rcu_dereference(rxqueue->rps_map); | ||
2349 | if (map) { | ||
2350 | tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32]; | ||
2351 | |||
2352 | if (cpu_online(tcpu)) { | ||
2353 | cpu = tcpu; | ||
2354 | goto done; | ||
2355 | } | ||
2356 | } | ||
2357 | |||
2358 | done: | ||
2359 | return cpu; | ||
2360 | } | ||
2361 | |||
2362 | /* Called from hardirq (IPI) context */ | ||
2363 | static void rps_trigger_softirq(void *data) | ||
2364 | { | ||
2365 | struct softnet_data *sd = data; | ||
2366 | |||
2367 | __napi_schedule(&sd->backlog); | ||
2368 | __get_cpu_var(netdev_rx_stat).received_rps++; | ||
2369 | } | ||
2370 | |||
2371 | #endif /* CONFIG_RPS */ | ||
2372 | |||
2373 | /* | ||
2374 | * Check if this softnet_data structure is another cpu one | ||
2375 | * If yes, queue it to our IPI list and return 1 | ||
2376 | * If no, return 0 | ||
2377 | */ | ||
2378 | static int rps_ipi_queued(struct softnet_data *sd) | ||
2379 | { | ||
2380 | #ifdef CONFIG_RPS | ||
2381 | struct softnet_data *mysd = &__get_cpu_var(softnet_data); | ||
2382 | |||
2383 | if (sd != mysd) { | ||
2384 | sd->rps_ipi_next = mysd->rps_ipi_list; | ||
2385 | mysd->rps_ipi_list = sd; | ||
2386 | |||
2387 | __raise_softirq_irqoff(NET_RX_SOFTIRQ); | ||
2388 | return 1; | ||
2389 | } | ||
2390 | #endif /* CONFIG_RPS */ | ||
2391 | return 0; | ||
2392 | } | ||
2393 | |||
2394 | /* | ||
2395 | * enqueue_to_backlog is called to queue an skb to a per CPU backlog | ||
2396 | * queue (may be a remote CPU queue). | ||
2397 | */ | ||
2398 | static int enqueue_to_backlog(struct sk_buff *skb, int cpu, | ||
2399 | unsigned int *qtail) | ||
2400 | { | ||
2401 | struct softnet_data *sd; | ||
2402 | unsigned long flags; | ||
2403 | |||
2404 | sd = &per_cpu(softnet_data, cpu); | ||
2405 | |||
2406 | local_irq_save(flags); | ||
2407 | __get_cpu_var(netdev_rx_stat).total++; | ||
2408 | |||
2409 | rps_lock(sd); | ||
2410 | if (sd->input_pkt_queue.qlen <= netdev_max_backlog) { | ||
2411 | if (sd->input_pkt_queue.qlen) { | ||
2412 | enqueue: | ||
2413 | __skb_queue_tail(&sd->input_pkt_queue, skb); | ||
2414 | #ifdef CONFIG_RPS | ||
2415 | *qtail = sd->input_queue_head + sd->input_pkt_queue.qlen; | ||
2416 | #endif | ||
2417 | rps_unlock(sd); | ||
2418 | local_irq_restore(flags); | ||
2419 | return NET_RX_SUCCESS; | ||
2420 | } | ||
2421 | |||
2422 | /* Schedule NAPI for backlog device */ | ||
2423 | if (napi_schedule_prep(&sd->backlog)) { | ||
2424 | if (!rps_ipi_queued(sd)) | ||
2425 | __napi_schedule(&sd->backlog); | ||
2426 | } | ||
2427 | goto enqueue; | ||
2428 | } | ||
2429 | |||
2430 | rps_unlock(sd); | ||
2431 | |||
2432 | __get_cpu_var(netdev_rx_stat).dropped++; | ||
2433 | local_irq_restore(flags); | ||
2434 | |||
2435 | kfree_skb(skb); | ||
2436 | return NET_RX_DROP; | ||
2437 | } | ||
2183 | 2438 | ||
2184 | /** | 2439 | /** |
2185 | * netif_rx - post buffer to the network code | 2440 | * netif_rx - post buffer to the network code |
@@ -2198,8 +2453,7 @@ DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, }; | |||
2198 | 2453 | ||
2199 | int netif_rx(struct sk_buff *skb) | 2454 | int netif_rx(struct sk_buff *skb) |
2200 | { | 2455 | { |
2201 | struct softnet_data *queue; | 2456 | int ret; |
2202 | unsigned long flags; | ||
2203 | 2457 | ||
2204 | /* if netpoll wants it, pretend we never saw it */ | 2458 | /* if netpoll wants it, pretend we never saw it */ |
2205 | if (netpoll_rx(skb)) | 2459 | if (netpoll_rx(skb)) |
@@ -2208,31 +2462,29 @@ int netif_rx(struct sk_buff *skb) | |||
2208 | if (!skb->tstamp.tv64) | 2462 | if (!skb->tstamp.tv64) |
2209 | net_timestamp(skb); | 2463 | net_timestamp(skb); |
2210 | 2464 | ||
2211 | /* | 2465 | #ifdef CONFIG_RPS |
2212 | * The code is rearranged so that the path is the most | 2466 | { |
2213 | * short when CPU is congested, but is still operating. | 2467 | struct rps_dev_flow voidflow, *rflow = &voidflow; |
2214 | */ | 2468 | int cpu; |
2215 | local_irq_save(flags); | ||
2216 | queue = &__get_cpu_var(softnet_data); | ||
2217 | 2469 | ||
2218 | __get_cpu_var(netdev_rx_stat).total++; | 2470 | rcu_read_lock(); |
2219 | if (queue->input_pkt_queue.qlen <= netdev_max_backlog) { | ||
2220 | if (queue->input_pkt_queue.qlen) { | ||
2221 | enqueue: | ||
2222 | __skb_queue_tail(&queue->input_pkt_queue, skb); | ||
2223 | local_irq_restore(flags); | ||
2224 | return NET_RX_SUCCESS; | ||
2225 | } | ||
2226 | 2471 | ||
2227 | napi_schedule(&queue->backlog); | 2472 | cpu = get_rps_cpu(skb->dev, skb, &rflow); |
2228 | goto enqueue; | 2473 | if (cpu < 0) |
2229 | } | 2474 | cpu = smp_processor_id(); |
2230 | 2475 | ||
2231 | __get_cpu_var(netdev_rx_stat).dropped++; | 2476 | ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); |
2232 | local_irq_restore(flags); | ||
2233 | 2477 | ||
2234 | kfree_skb(skb); | 2478 | rcu_read_unlock(); |
2235 | return NET_RX_DROP; | 2479 | } |
2480 | #else | ||
2481 | { | ||
2482 | unsigned int qtail; | ||
2483 | ret = enqueue_to_backlog(skb, get_cpu(), &qtail); | ||
2484 | put_cpu(); | ||
2485 | } | ||
2486 | #endif | ||
2487 | return ret; | ||
2236 | } | 2488 | } |
2237 | EXPORT_SYMBOL(netif_rx); | 2489 | EXPORT_SYMBOL(netif_rx); |
2238 | 2490 | ||
@@ -2469,22 +2721,56 @@ void netif_nit_deliver(struct sk_buff *skb) | |||
2469 | rcu_read_unlock(); | 2721 | rcu_read_unlock(); |
2470 | } | 2722 | } |
2471 | 2723 | ||
2472 | /** | 2724 | static inline void skb_bond_set_mac_by_master(struct sk_buff *skb, |
2473 | * netif_receive_skb - process receive buffer from network | 2725 | struct net_device *master) |
2474 | * @skb: buffer to process | 2726 | { |
2475 | * | 2727 | if (skb->pkt_type == PACKET_HOST) { |
2476 | * netif_receive_skb() is the main receive data processing function. | 2728 | u16 *dest = (u16 *) eth_hdr(skb)->h_dest; |
2477 | * It always succeeds. The buffer may be dropped during processing | 2729 | |
2478 | * for congestion control or by the protocol layers. | 2730 | memcpy(dest, master->dev_addr, ETH_ALEN); |
2479 | * | 2731 | } |
2480 | * This function may only be called from softirq context and interrupts | 2732 | } |
2481 | * should be enabled. | 2733 | |
2482 | * | 2734 | /* On bonding slaves other than the currently active slave, suppress |
2483 | * Return values (usually ignored): | 2735 | * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and |
2484 | * NET_RX_SUCCESS: no congestion | 2736 | * ARP on active-backup slaves with arp_validate enabled. |
2485 | * NET_RX_DROP: packet was dropped | ||
2486 | */ | 2737 | */ |
2487 | int netif_receive_skb(struct sk_buff *skb) | 2738 | int __skb_bond_should_drop(struct sk_buff *skb, struct net_device *master) |
2739 | { | ||
2740 | struct net_device *dev = skb->dev; | ||
2741 | |||
2742 | if (master->priv_flags & IFF_MASTER_ARPMON) | ||
2743 | dev->last_rx = jiffies; | ||
2744 | |||
2745 | if ((master->priv_flags & IFF_MASTER_ALB) && master->br_port) { | ||
2746 | /* Do address unmangle. The local destination address | ||
2747 | * will be always the one master has. Provides the right | ||
2748 | * functionality in a bridge. | ||
2749 | */ | ||
2750 | skb_bond_set_mac_by_master(skb, master); | ||
2751 | } | ||
2752 | |||
2753 | if (dev->priv_flags & IFF_SLAVE_INACTIVE) { | ||
2754 | if ((dev->priv_flags & IFF_SLAVE_NEEDARP) && | ||
2755 | skb->protocol == __cpu_to_be16(ETH_P_ARP)) | ||
2756 | return 0; | ||
2757 | |||
2758 | if (master->priv_flags & IFF_MASTER_ALB) { | ||
2759 | if (skb->pkt_type != PACKET_BROADCAST && | ||
2760 | skb->pkt_type != PACKET_MULTICAST) | ||
2761 | return 0; | ||
2762 | } | ||
2763 | if (master->priv_flags & IFF_MASTER_8023AD && | ||
2764 | skb->protocol == __cpu_to_be16(ETH_P_SLOW)) | ||
2765 | return 0; | ||
2766 | |||
2767 | return 1; | ||
2768 | } | ||
2769 | return 0; | ||
2770 | } | ||
2771 | EXPORT_SYMBOL(__skb_bond_should_drop); | ||
2772 | |||
2773 | static int __netif_receive_skb(struct sk_buff *skb) | ||
2488 | { | 2774 | { |
2489 | struct packet_type *ptype, *pt_prev; | 2775 | struct packet_type *ptype, *pt_prev; |
2490 | struct net_device *orig_dev; | 2776 | struct net_device *orig_dev; |
@@ -2595,20 +2881,64 @@ out: | |||
2595 | rcu_read_unlock(); | 2881 | rcu_read_unlock(); |
2596 | return ret; | 2882 | return ret; |
2597 | } | 2883 | } |
2884 | |||
2885 | /** | ||
2886 | * netif_receive_skb - process receive buffer from network | ||
2887 | * @skb: buffer to process | ||
2888 | * | ||
2889 | * netif_receive_skb() is the main receive data processing function. | ||
2890 | * It always succeeds. The buffer may be dropped during processing | ||
2891 | * for congestion control or by the protocol layers. | ||
2892 | * | ||
2893 | * This function may only be called from softirq context and interrupts | ||
2894 | * should be enabled. | ||
2895 | * | ||
2896 | * Return values (usually ignored): | ||
2897 | * NET_RX_SUCCESS: no congestion | ||
2898 | * NET_RX_DROP: packet was dropped | ||
2899 | */ | ||
2900 | int netif_receive_skb(struct sk_buff *skb) | ||
2901 | { | ||
2902 | #ifdef CONFIG_RPS | ||
2903 | struct rps_dev_flow voidflow, *rflow = &voidflow; | ||
2904 | int cpu, ret; | ||
2905 | |||
2906 | rcu_read_lock(); | ||
2907 | |||
2908 | cpu = get_rps_cpu(skb->dev, skb, &rflow); | ||
2909 | |||
2910 | if (cpu >= 0) { | ||
2911 | ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); | ||
2912 | rcu_read_unlock(); | ||
2913 | } else { | ||
2914 | rcu_read_unlock(); | ||
2915 | ret = __netif_receive_skb(skb); | ||
2916 | } | ||
2917 | |||
2918 | return ret; | ||
2919 | #else | ||
2920 | return __netif_receive_skb(skb); | ||
2921 | #endif | ||
2922 | } | ||
2598 | EXPORT_SYMBOL(netif_receive_skb); | 2923 | EXPORT_SYMBOL(netif_receive_skb); |
2599 | 2924 | ||
2600 | /* Network device is going away, flush any packets still pending */ | 2925 | /* Network device is going away, flush any packets still pending |
2926 | * Called with irqs disabled. | ||
2927 | */ | ||
2601 | static void flush_backlog(void *arg) | 2928 | static void flush_backlog(void *arg) |
2602 | { | 2929 | { |
2603 | struct net_device *dev = arg; | 2930 | struct net_device *dev = arg; |
2604 | struct softnet_data *queue = &__get_cpu_var(softnet_data); | 2931 | struct softnet_data *sd = &__get_cpu_var(softnet_data); |
2605 | struct sk_buff *skb, *tmp; | 2932 | struct sk_buff *skb, *tmp; |
2606 | 2933 | ||
2607 | skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp) | 2934 | rps_lock(sd); |
2935 | skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) | ||
2608 | if (skb->dev == dev) { | 2936 | if (skb->dev == dev) { |
2609 | __skb_unlink(skb, &queue->input_pkt_queue); | 2937 | __skb_unlink(skb, &sd->input_pkt_queue); |
2610 | kfree_skb(skb); | 2938 | kfree_skb(skb); |
2939 | input_queue_head_incr(sd); | ||
2611 | } | 2940 | } |
2941 | rps_unlock(sd); | ||
2612 | } | 2942 | } |
2613 | 2943 | ||
2614 | static int napi_gro_complete(struct sk_buff *skb) | 2944 | static int napi_gro_complete(struct sk_buff *skb) |
@@ -2911,27 +3241,67 @@ gro_result_t napi_gro_frags(struct napi_struct *napi) | |||
2911 | } | 3241 | } |
2912 | EXPORT_SYMBOL(napi_gro_frags); | 3242 | EXPORT_SYMBOL(napi_gro_frags); |
2913 | 3243 | ||
3244 | /* | ||
3245 | * net_rps_action sends any pending IPI's for rps. | ||
3246 | * Note: called with local irq disabled, but exits with local irq enabled. | ||
3247 | */ | ||
3248 | static void net_rps_action_and_irq_enable(struct softnet_data *sd) | ||
3249 | { | ||
3250 | #ifdef CONFIG_RPS | ||
3251 | struct softnet_data *remsd = sd->rps_ipi_list; | ||
3252 | |||
3253 | if (remsd) { | ||
3254 | sd->rps_ipi_list = NULL; | ||
3255 | |||
3256 | local_irq_enable(); | ||
3257 | |||
3258 | /* Send pending IPI's to kick RPS processing on remote cpus. */ | ||
3259 | while (remsd) { | ||
3260 | struct softnet_data *next = remsd->rps_ipi_next; | ||
3261 | |||
3262 | if (cpu_online(remsd->cpu)) | ||
3263 | __smp_call_function_single(remsd->cpu, | ||
3264 | &remsd->csd, 0); | ||
3265 | remsd = next; | ||
3266 | } | ||
3267 | } else | ||
3268 | #endif | ||
3269 | local_irq_enable(); | ||
3270 | } | ||
3271 | |||
2914 | static int process_backlog(struct napi_struct *napi, int quota) | 3272 | static int process_backlog(struct napi_struct *napi, int quota) |
2915 | { | 3273 | { |
2916 | int work = 0; | 3274 | int work = 0; |
2917 | struct softnet_data *queue = &__get_cpu_var(softnet_data); | 3275 | struct softnet_data *sd = &__get_cpu_var(softnet_data); |
2918 | unsigned long start_time = jiffies; | ||
2919 | 3276 | ||
3277 | #ifdef CONFIG_RPS | ||
3278 | /* Check if we have pending ipi, its better to send them now, | ||
3279 | * not waiting net_rx_action() end. | ||
3280 | */ | ||
3281 | if (sd->rps_ipi_list) { | ||
3282 | local_irq_disable(); | ||
3283 | net_rps_action_and_irq_enable(sd); | ||
3284 | } | ||
3285 | #endif | ||
2920 | napi->weight = weight_p; | 3286 | napi->weight = weight_p; |
2921 | do { | 3287 | do { |
2922 | struct sk_buff *skb; | 3288 | struct sk_buff *skb; |
2923 | 3289 | ||
2924 | local_irq_disable(); | 3290 | local_irq_disable(); |
2925 | skb = __skb_dequeue(&queue->input_pkt_queue); | 3291 | rps_lock(sd); |
3292 | skb = __skb_dequeue(&sd->input_pkt_queue); | ||
2926 | if (!skb) { | 3293 | if (!skb) { |
2927 | __napi_complete(napi); | 3294 | __napi_complete(napi); |
3295 | rps_unlock(sd); | ||
2928 | local_irq_enable(); | 3296 | local_irq_enable(); |
2929 | break; | 3297 | break; |
2930 | } | 3298 | } |
3299 | input_queue_head_incr(sd); | ||
3300 | rps_unlock(sd); | ||
2931 | local_irq_enable(); | 3301 | local_irq_enable(); |
2932 | 3302 | ||
2933 | netif_receive_skb(skb); | 3303 | __netif_receive_skb(skb); |
2934 | } while (++work < quota && jiffies == start_time); | 3304 | } while (++work < quota); |
2935 | 3305 | ||
2936 | return work; | 3306 | return work; |
2937 | } | 3307 | } |
@@ -3019,17 +3389,16 @@ void netif_napi_del(struct napi_struct *napi) | |||
3019 | } | 3389 | } |
3020 | EXPORT_SYMBOL(netif_napi_del); | 3390 | EXPORT_SYMBOL(netif_napi_del); |
3021 | 3391 | ||
3022 | |||
3023 | static void net_rx_action(struct softirq_action *h) | 3392 | static void net_rx_action(struct softirq_action *h) |
3024 | { | 3393 | { |
3025 | struct list_head *list = &__get_cpu_var(softnet_data).poll_list; | 3394 | struct softnet_data *sd = &__get_cpu_var(softnet_data); |
3026 | unsigned long time_limit = jiffies + 2; | 3395 | unsigned long time_limit = jiffies + 2; |
3027 | int budget = netdev_budget; | 3396 | int budget = netdev_budget; |
3028 | void *have; | 3397 | void *have; |
3029 | 3398 | ||
3030 | local_irq_disable(); | 3399 | local_irq_disable(); |
3031 | 3400 | ||
3032 | while (!list_empty(list)) { | 3401 | while (!list_empty(&sd->poll_list)) { |
3033 | struct napi_struct *n; | 3402 | struct napi_struct *n; |
3034 | int work, weight; | 3403 | int work, weight; |
3035 | 3404 | ||
@@ -3047,7 +3416,7 @@ static void net_rx_action(struct softirq_action *h) | |||
3047 | * entries to the tail of this list, and only ->poll() | 3416 | * entries to the tail of this list, and only ->poll() |
3048 | * calls can remove this head entry from the list. | 3417 | * calls can remove this head entry from the list. |
3049 | */ | 3418 | */ |
3050 | n = list_first_entry(list, struct napi_struct, poll_list); | 3419 | n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list); |
3051 | 3420 | ||
3052 | have = netpoll_poll_lock(n); | 3421 | have = netpoll_poll_lock(n); |
3053 | 3422 | ||
@@ -3082,13 +3451,13 @@ static void net_rx_action(struct softirq_action *h) | |||
3082 | napi_complete(n); | 3451 | napi_complete(n); |
3083 | local_irq_disable(); | 3452 | local_irq_disable(); |
3084 | } else | 3453 | } else |
3085 | list_move_tail(&n->poll_list, list); | 3454 | list_move_tail(&n->poll_list, &sd->poll_list); |
3086 | } | 3455 | } |
3087 | 3456 | ||
3088 | netpoll_poll_unlock(have); | 3457 | netpoll_poll_unlock(have); |
3089 | } | 3458 | } |
3090 | out: | 3459 | out: |
3091 | local_irq_enable(); | 3460 | net_rps_action_and_irq_enable(sd); |
3092 | 3461 | ||
3093 | #ifdef CONFIG_NET_DMA | 3462 | #ifdef CONFIG_NET_DMA |
3094 | /* | 3463 | /* |
@@ -3334,10 +3703,10 @@ static int softnet_seq_show(struct seq_file *seq, void *v) | |||
3334 | { | 3703 | { |
3335 | struct netif_rx_stats *s = v; | 3704 | struct netif_rx_stats *s = v; |
3336 | 3705 | ||
3337 | seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n", | 3706 | seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n", |
3338 | s->total, s->dropped, s->time_squeeze, 0, | 3707 | s->total, s->dropped, s->time_squeeze, 0, |
3339 | 0, 0, 0, 0, /* was fastroute */ | 3708 | 0, 0, 0, 0, /* was fastroute */ |
3340 | s->cpu_collision); | 3709 | s->cpu_collision, s->received_rps); |
3341 | return 0; | 3710 | return 0; |
3342 | } | 3711 | } |
3343 | 3712 | ||
@@ -3560,11 +3929,10 @@ int netdev_set_master(struct net_device *slave, struct net_device *master) | |||
3560 | 3929 | ||
3561 | slave->master = master; | 3930 | slave->master = master; |
3562 | 3931 | ||
3563 | synchronize_net(); | 3932 | if (old) { |
3564 | 3933 | synchronize_net(); | |
3565 | if (old) | ||
3566 | dev_put(old); | 3934 | dev_put(old); |
3567 | 3935 | } | |
3568 | if (master) | 3936 | if (master) |
3569 | slave->flags |= IFF_SLAVE; | 3937 | slave->flags |= IFF_SLAVE; |
3570 | else | 3938 | else |
@@ -3741,562 +4109,6 @@ void dev_set_rx_mode(struct net_device *dev) | |||
3741 | netif_addr_unlock_bh(dev); | 4109 | netif_addr_unlock_bh(dev); |
3742 | } | 4110 | } |
3743 | 4111 | ||
3744 | /* hw addresses list handling functions */ | ||
3745 | |||
3746 | static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr, | ||
3747 | int addr_len, unsigned char addr_type) | ||
3748 | { | ||
3749 | struct netdev_hw_addr *ha; | ||
3750 | int alloc_size; | ||
3751 | |||
3752 | if (addr_len > MAX_ADDR_LEN) | ||
3753 | return -EINVAL; | ||
3754 | |||
3755 | list_for_each_entry(ha, &list->list, list) { | ||
3756 | if (!memcmp(ha->addr, addr, addr_len) && | ||
3757 | ha->type == addr_type) { | ||
3758 | ha->refcount++; | ||
3759 | return 0; | ||
3760 | } | ||
3761 | } | ||
3762 | |||
3763 | |||
3764 | alloc_size = sizeof(*ha); | ||
3765 | if (alloc_size < L1_CACHE_BYTES) | ||
3766 | alloc_size = L1_CACHE_BYTES; | ||
3767 | ha = kmalloc(alloc_size, GFP_ATOMIC); | ||
3768 | if (!ha) | ||
3769 | return -ENOMEM; | ||
3770 | memcpy(ha->addr, addr, addr_len); | ||
3771 | ha->type = addr_type; | ||
3772 | ha->refcount = 1; | ||
3773 | ha->synced = false; | ||
3774 | list_add_tail_rcu(&ha->list, &list->list); | ||
3775 | list->count++; | ||
3776 | return 0; | ||
3777 | } | ||
3778 | |||
3779 | static void ha_rcu_free(struct rcu_head *head) | ||
3780 | { | ||
3781 | struct netdev_hw_addr *ha; | ||
3782 | |||
3783 | ha = container_of(head, struct netdev_hw_addr, rcu_head); | ||
3784 | kfree(ha); | ||
3785 | } | ||
3786 | |||
3787 | static int __hw_addr_del(struct netdev_hw_addr_list *list, unsigned char *addr, | ||
3788 | int addr_len, unsigned char addr_type) | ||
3789 | { | ||
3790 | struct netdev_hw_addr *ha; | ||
3791 | |||
3792 | list_for_each_entry(ha, &list->list, list) { | ||
3793 | if (!memcmp(ha->addr, addr, addr_len) && | ||
3794 | (ha->type == addr_type || !addr_type)) { | ||
3795 | if (--ha->refcount) | ||
3796 | return 0; | ||
3797 | list_del_rcu(&ha->list); | ||
3798 | call_rcu(&ha->rcu_head, ha_rcu_free); | ||
3799 | list->count--; | ||
3800 | return 0; | ||
3801 | } | ||
3802 | } | ||
3803 | return -ENOENT; | ||
3804 | } | ||
3805 | |||
3806 | static int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list, | ||
3807 | struct netdev_hw_addr_list *from_list, | ||
3808 | int addr_len, | ||
3809 | unsigned char addr_type) | ||
3810 | { | ||
3811 | int err; | ||
3812 | struct netdev_hw_addr *ha, *ha2; | ||
3813 | unsigned char type; | ||
3814 | |||
3815 | list_for_each_entry(ha, &from_list->list, list) { | ||
3816 | type = addr_type ? addr_type : ha->type; | ||
3817 | err = __hw_addr_add(to_list, ha->addr, addr_len, type); | ||
3818 | if (err) | ||
3819 | goto unroll; | ||
3820 | } | ||
3821 | return 0; | ||
3822 | |||
3823 | unroll: | ||
3824 | list_for_each_entry(ha2, &from_list->list, list) { | ||
3825 | if (ha2 == ha) | ||
3826 | break; | ||
3827 | type = addr_type ? addr_type : ha2->type; | ||
3828 | __hw_addr_del(to_list, ha2->addr, addr_len, type); | ||
3829 | } | ||
3830 | return err; | ||
3831 | } | ||
3832 | |||
3833 | static void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list, | ||
3834 | struct netdev_hw_addr_list *from_list, | ||
3835 | int addr_len, | ||
3836 | unsigned char addr_type) | ||
3837 | { | ||
3838 | struct netdev_hw_addr *ha; | ||
3839 | unsigned char type; | ||
3840 | |||
3841 | list_for_each_entry(ha, &from_list->list, list) { | ||
3842 | type = addr_type ? addr_type : ha->type; | ||
3843 | __hw_addr_del(to_list, ha->addr, addr_len, addr_type); | ||
3844 | } | ||
3845 | } | ||
3846 | |||
3847 | static int __hw_addr_sync(struct netdev_hw_addr_list *to_list, | ||
3848 | struct netdev_hw_addr_list *from_list, | ||
3849 | int addr_len) | ||
3850 | { | ||
3851 | int err = 0; | ||
3852 | struct netdev_hw_addr *ha, *tmp; | ||
3853 | |||
3854 | list_for_each_entry_safe(ha, tmp, &from_list->list, list) { | ||
3855 | if (!ha->synced) { | ||
3856 | err = __hw_addr_add(to_list, ha->addr, | ||
3857 | addr_len, ha->type); | ||
3858 | if (err) | ||
3859 | break; | ||
3860 | ha->synced = true; | ||
3861 | ha->refcount++; | ||
3862 | } else if (ha->refcount == 1) { | ||
3863 | __hw_addr_del(to_list, ha->addr, addr_len, ha->type); | ||
3864 | __hw_addr_del(from_list, ha->addr, addr_len, ha->type); | ||
3865 | } | ||
3866 | } | ||
3867 | return err; | ||
3868 | } | ||
3869 | |||
3870 | static void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, | ||
3871 | struct netdev_hw_addr_list *from_list, | ||
3872 | int addr_len) | ||
3873 | { | ||
3874 | struct netdev_hw_addr *ha, *tmp; | ||
3875 | |||
3876 | list_for_each_entry_safe(ha, tmp, &from_list->list, list) { | ||
3877 | if (ha->synced) { | ||
3878 | __hw_addr_del(to_list, ha->addr, | ||
3879 | addr_len, ha->type); | ||
3880 | ha->synced = false; | ||
3881 | __hw_addr_del(from_list, ha->addr, | ||
3882 | addr_len, ha->type); | ||
3883 | } | ||
3884 | } | ||
3885 | } | ||
3886 | |||
3887 | static void __hw_addr_flush(struct netdev_hw_addr_list *list) | ||
3888 | { | ||
3889 | struct netdev_hw_addr *ha, *tmp; | ||
3890 | |||
3891 | list_for_each_entry_safe(ha, tmp, &list->list, list) { | ||
3892 | list_del_rcu(&ha->list); | ||
3893 | call_rcu(&ha->rcu_head, ha_rcu_free); | ||
3894 | } | ||
3895 | list->count = 0; | ||
3896 | } | ||
3897 | |||
3898 | static void __hw_addr_init(struct netdev_hw_addr_list *list) | ||
3899 | { | ||
3900 | INIT_LIST_HEAD(&list->list); | ||
3901 | list->count = 0; | ||
3902 | } | ||
3903 | |||
3904 | /* Device addresses handling functions */ | ||
3905 | |||
3906 | static void dev_addr_flush(struct net_device *dev) | ||
3907 | { | ||
3908 | /* rtnl_mutex must be held here */ | ||
3909 | |||
3910 | __hw_addr_flush(&dev->dev_addrs); | ||
3911 | dev->dev_addr = NULL; | ||
3912 | } | ||
3913 | |||
3914 | static int dev_addr_init(struct net_device *dev) | ||
3915 | { | ||
3916 | unsigned char addr[MAX_ADDR_LEN]; | ||
3917 | struct netdev_hw_addr *ha; | ||
3918 | int err; | ||
3919 | |||
3920 | /* rtnl_mutex must be held here */ | ||
3921 | |||
3922 | __hw_addr_init(&dev->dev_addrs); | ||
3923 | memset(addr, 0, sizeof(addr)); | ||
3924 | err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr), | ||
3925 | NETDEV_HW_ADDR_T_LAN); | ||
3926 | if (!err) { | ||
3927 | /* | ||
3928 | * Get the first (previously created) address from the list | ||
3929 | * and set dev_addr pointer to this location. | ||
3930 | */ | ||
3931 | ha = list_first_entry(&dev->dev_addrs.list, | ||
3932 | struct netdev_hw_addr, list); | ||
3933 | dev->dev_addr = ha->addr; | ||
3934 | } | ||
3935 | return err; | ||
3936 | } | ||
3937 | |||
3938 | /** | ||
3939 | * dev_addr_add - Add a device address | ||
3940 | * @dev: device | ||
3941 | * @addr: address to add | ||
3942 | * @addr_type: address type | ||
3943 | * | ||
3944 | * Add a device address to the device or increase the reference count if | ||
3945 | * it already exists. | ||
3946 | * | ||
3947 | * The caller must hold the rtnl_mutex. | ||
3948 | */ | ||
3949 | int dev_addr_add(struct net_device *dev, unsigned char *addr, | ||
3950 | unsigned char addr_type) | ||
3951 | { | ||
3952 | int err; | ||
3953 | |||
3954 | ASSERT_RTNL(); | ||
3955 | |||
3956 | err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type); | ||
3957 | if (!err) | ||
3958 | call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); | ||
3959 | return err; | ||
3960 | } | ||
3961 | EXPORT_SYMBOL(dev_addr_add); | ||
3962 | |||
3963 | /** | ||
3964 | * dev_addr_del - Release a device address. | ||
3965 | * @dev: device | ||
3966 | * @addr: address to delete | ||
3967 | * @addr_type: address type | ||
3968 | * | ||
3969 | * Release reference to a device address and remove it from the device | ||
3970 | * if the reference count drops to zero. | ||
3971 | * | ||
3972 | * The caller must hold the rtnl_mutex. | ||
3973 | */ | ||
3974 | int dev_addr_del(struct net_device *dev, unsigned char *addr, | ||
3975 | unsigned char addr_type) | ||
3976 | { | ||
3977 | int err; | ||
3978 | struct netdev_hw_addr *ha; | ||
3979 | |||
3980 | ASSERT_RTNL(); | ||
3981 | |||
3982 | /* | ||
3983 | * We can not remove the first address from the list because | ||
3984 | * dev->dev_addr points to that. | ||
3985 | */ | ||
3986 | ha = list_first_entry(&dev->dev_addrs.list, | ||
3987 | struct netdev_hw_addr, list); | ||
3988 | if (ha->addr == dev->dev_addr && ha->refcount == 1) | ||
3989 | return -ENOENT; | ||
3990 | |||
3991 | err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len, | ||
3992 | addr_type); | ||
3993 | if (!err) | ||
3994 | call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); | ||
3995 | return err; | ||
3996 | } | ||
3997 | EXPORT_SYMBOL(dev_addr_del); | ||
3998 | |||
3999 | /** | ||
4000 | * dev_addr_add_multiple - Add device addresses from another device | ||
4001 | * @to_dev: device to which addresses will be added | ||
4002 | * @from_dev: device from which addresses will be added | ||
4003 | * @addr_type: address type - 0 means type will be used from from_dev | ||
4004 | * | ||
4005 | * Add device addresses of the one device to another. | ||
4006 | ** | ||
4007 | * The caller must hold the rtnl_mutex. | ||
4008 | */ | ||
4009 | int dev_addr_add_multiple(struct net_device *to_dev, | ||
4010 | struct net_device *from_dev, | ||
4011 | unsigned char addr_type) | ||
4012 | { | ||
4013 | int err; | ||
4014 | |||
4015 | ASSERT_RTNL(); | ||
4016 | |||
4017 | if (from_dev->addr_len != to_dev->addr_len) | ||
4018 | return -EINVAL; | ||
4019 | err = __hw_addr_add_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs, | ||
4020 | to_dev->addr_len, addr_type); | ||
4021 | if (!err) | ||
4022 | call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev); | ||
4023 | return err; | ||
4024 | } | ||
4025 | EXPORT_SYMBOL(dev_addr_add_multiple); | ||
4026 | |||
4027 | /** | ||
4028 | * dev_addr_del_multiple - Delete device addresses by another device | ||
4029 | * @to_dev: device where the addresses will be deleted | ||
4030 | * @from_dev: device by which addresses the addresses will be deleted | ||
4031 | * @addr_type: address type - 0 means type will used from from_dev | ||
4032 | * | ||
4033 | * Deletes addresses in to device by the list of addresses in from device. | ||
4034 | * | ||
4035 | * The caller must hold the rtnl_mutex. | ||
4036 | */ | ||
4037 | int dev_addr_del_multiple(struct net_device *to_dev, | ||
4038 | struct net_device *from_dev, | ||
4039 | unsigned char addr_type) | ||
4040 | { | ||
4041 | ASSERT_RTNL(); | ||
4042 | |||
4043 | if (from_dev->addr_len != to_dev->addr_len) | ||
4044 | return -EINVAL; | ||
4045 | __hw_addr_del_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs, | ||
4046 | to_dev->addr_len, addr_type); | ||
4047 | call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev); | ||
4048 | return 0; | ||
4049 | } | ||
4050 | EXPORT_SYMBOL(dev_addr_del_multiple); | ||
4051 | |||
4052 | /* multicast addresses handling functions */ | ||
4053 | |||
4054 | int __dev_addr_delete(struct dev_addr_list **list, int *count, | ||
4055 | void *addr, int alen, int glbl) | ||
4056 | { | ||
4057 | struct dev_addr_list *da; | ||
4058 | |||
4059 | for (; (da = *list) != NULL; list = &da->next) { | ||
4060 | if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 && | ||
4061 | alen == da->da_addrlen) { | ||
4062 | if (glbl) { | ||
4063 | int old_glbl = da->da_gusers; | ||
4064 | da->da_gusers = 0; | ||
4065 | if (old_glbl == 0) | ||
4066 | break; | ||
4067 | } | ||
4068 | if (--da->da_users) | ||
4069 | return 0; | ||
4070 | |||
4071 | *list = da->next; | ||
4072 | kfree(da); | ||
4073 | (*count)--; | ||
4074 | return 0; | ||
4075 | } | ||
4076 | } | ||
4077 | return -ENOENT; | ||
4078 | } | ||
4079 | |||
4080 | int __dev_addr_add(struct dev_addr_list **list, int *count, | ||
4081 | void *addr, int alen, int glbl) | ||
4082 | { | ||
4083 | struct dev_addr_list *da; | ||
4084 | |||
4085 | for (da = *list; da != NULL; da = da->next) { | ||
4086 | if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 && | ||
4087 | da->da_addrlen == alen) { | ||
4088 | if (glbl) { | ||
4089 | int old_glbl = da->da_gusers; | ||
4090 | da->da_gusers = 1; | ||
4091 | if (old_glbl) | ||
4092 | return 0; | ||
4093 | } | ||
4094 | da->da_users++; | ||
4095 | return 0; | ||
4096 | } | ||
4097 | } | ||
4098 | |||
4099 | da = kzalloc(sizeof(*da), GFP_ATOMIC); | ||
4100 | if (da == NULL) | ||
4101 | return -ENOMEM; | ||
4102 | memcpy(da->da_addr, addr, alen); | ||
4103 | da->da_addrlen = alen; | ||
4104 | da->da_users = 1; | ||
4105 | da->da_gusers = glbl ? 1 : 0; | ||
4106 | da->next = *list; | ||
4107 | *list = da; | ||
4108 | (*count)++; | ||
4109 | return 0; | ||
4110 | } | ||
4111 | |||
4112 | /** | ||
4113 | * dev_unicast_delete - Release secondary unicast address. | ||
4114 | * @dev: device | ||
4115 | * @addr: address to delete | ||
4116 | * | ||
4117 | * Release reference to a secondary unicast address and remove it | ||
4118 | * from the device if the reference count drops to zero. | ||
4119 | * | ||
4120 | * The caller must hold the rtnl_mutex. | ||
4121 | */ | ||
4122 | int dev_unicast_delete(struct net_device *dev, void *addr) | ||
4123 | { | ||
4124 | int err; | ||
4125 | |||
4126 | ASSERT_RTNL(); | ||
4127 | |||
4128 | netif_addr_lock_bh(dev); | ||
4129 | err = __hw_addr_del(&dev->uc, addr, dev->addr_len, | ||
4130 | NETDEV_HW_ADDR_T_UNICAST); | ||
4131 | if (!err) | ||
4132 | __dev_set_rx_mode(dev); | ||
4133 | netif_addr_unlock_bh(dev); | ||
4134 | return err; | ||
4135 | } | ||
4136 | EXPORT_SYMBOL(dev_unicast_delete); | ||
4137 | |||
4138 | /** | ||
4139 | * dev_unicast_add - add a secondary unicast address | ||
4140 | * @dev: device | ||
4141 | * @addr: address to add | ||
4142 | * | ||
4143 | * Add a secondary unicast address to the device or increase | ||
4144 | * the reference count if it already exists. | ||
4145 | * | ||
4146 | * The caller must hold the rtnl_mutex. | ||
4147 | */ | ||
4148 | int dev_unicast_add(struct net_device *dev, void *addr) | ||
4149 | { | ||
4150 | int err; | ||
4151 | |||
4152 | ASSERT_RTNL(); | ||
4153 | |||
4154 | netif_addr_lock_bh(dev); | ||
4155 | err = __hw_addr_add(&dev->uc, addr, dev->addr_len, | ||
4156 | NETDEV_HW_ADDR_T_UNICAST); | ||
4157 | if (!err) | ||
4158 | __dev_set_rx_mode(dev); | ||
4159 | netif_addr_unlock_bh(dev); | ||
4160 | return err; | ||
4161 | } | ||
4162 | EXPORT_SYMBOL(dev_unicast_add); | ||
4163 | |||
4164 | int __dev_addr_sync(struct dev_addr_list **to, int *to_count, | ||
4165 | struct dev_addr_list **from, int *from_count) | ||
4166 | { | ||
4167 | struct dev_addr_list *da, *next; | ||
4168 | int err = 0; | ||
4169 | |||
4170 | da = *from; | ||
4171 | while (da != NULL) { | ||
4172 | next = da->next; | ||
4173 | if (!da->da_synced) { | ||
4174 | err = __dev_addr_add(to, to_count, | ||
4175 | da->da_addr, da->da_addrlen, 0); | ||
4176 | if (err < 0) | ||
4177 | break; | ||
4178 | da->da_synced = 1; | ||
4179 | da->da_users++; | ||
4180 | } else if (da->da_users == 1) { | ||
4181 | __dev_addr_delete(to, to_count, | ||
4182 | da->da_addr, da->da_addrlen, 0); | ||
4183 | __dev_addr_delete(from, from_count, | ||
4184 | da->da_addr, da->da_addrlen, 0); | ||
4185 | } | ||
4186 | da = next; | ||
4187 | } | ||
4188 | return err; | ||
4189 | } | ||
4190 | EXPORT_SYMBOL_GPL(__dev_addr_sync); | ||
4191 | |||
4192 | void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, | ||
4193 | struct dev_addr_list **from, int *from_count) | ||
4194 | { | ||
4195 | struct dev_addr_list *da, *next; | ||
4196 | |||
4197 | da = *from; | ||
4198 | while (da != NULL) { | ||
4199 | next = da->next; | ||
4200 | if (da->da_synced) { | ||
4201 | __dev_addr_delete(to, to_count, | ||
4202 | da->da_addr, da->da_addrlen, 0); | ||
4203 | da->da_synced = 0; | ||
4204 | __dev_addr_delete(from, from_count, | ||
4205 | da->da_addr, da->da_addrlen, 0); | ||
4206 | } | ||
4207 | da = next; | ||
4208 | } | ||
4209 | } | ||
4210 | EXPORT_SYMBOL_GPL(__dev_addr_unsync); | ||
4211 | |||
4212 | /** | ||
4213 | * dev_unicast_sync - Synchronize device's unicast list to another device | ||
4214 | * @to: destination device | ||
4215 | * @from: source device | ||
4216 | * | ||
4217 | * Add newly added addresses to the destination device and release | ||
4218 | * addresses that have no users left. The source device must be | ||
4219 | * locked by netif_tx_lock_bh. | ||
4220 | * | ||
4221 | * This function is intended to be called from the dev->set_rx_mode | ||
4222 | * function of layered software devices. | ||
4223 | */ | ||
4224 | int dev_unicast_sync(struct net_device *to, struct net_device *from) | ||
4225 | { | ||
4226 | int err = 0; | ||
4227 | |||
4228 | if (to->addr_len != from->addr_len) | ||
4229 | return -EINVAL; | ||
4230 | |||
4231 | netif_addr_lock_bh(to); | ||
4232 | err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len); | ||
4233 | if (!err) | ||
4234 | __dev_set_rx_mode(to); | ||
4235 | netif_addr_unlock_bh(to); | ||
4236 | return err; | ||
4237 | } | ||
4238 | EXPORT_SYMBOL(dev_unicast_sync); | ||
4239 | |||
4240 | /** | ||
4241 | * dev_unicast_unsync - Remove synchronized addresses from the destination device | ||
4242 | * @to: destination device | ||
4243 | * @from: source device | ||
4244 | * | ||
4245 | * Remove all addresses that were added to the destination device by | ||
4246 | * dev_unicast_sync(). This function is intended to be called from the | ||
4247 | * dev->stop function of layered software devices. | ||
4248 | */ | ||
4249 | void dev_unicast_unsync(struct net_device *to, struct net_device *from) | ||
4250 | { | ||
4251 | if (to->addr_len != from->addr_len) | ||
4252 | return; | ||
4253 | |||
4254 | netif_addr_lock_bh(from); | ||
4255 | netif_addr_lock(to); | ||
4256 | __hw_addr_unsync(&to->uc, &from->uc, to->addr_len); | ||
4257 | __dev_set_rx_mode(to); | ||
4258 | netif_addr_unlock(to); | ||
4259 | netif_addr_unlock_bh(from); | ||
4260 | } | ||
4261 | EXPORT_SYMBOL(dev_unicast_unsync); | ||
4262 | |||
4263 | static void dev_unicast_flush(struct net_device *dev) | ||
4264 | { | ||
4265 | netif_addr_lock_bh(dev); | ||
4266 | __hw_addr_flush(&dev->uc); | ||
4267 | netif_addr_unlock_bh(dev); | ||
4268 | } | ||
4269 | |||
4270 | static void dev_unicast_init(struct net_device *dev) | ||
4271 | { | ||
4272 | __hw_addr_init(&dev->uc); | ||
4273 | } | ||
4274 | |||
4275 | |||
4276 | static void __dev_addr_discard(struct dev_addr_list **list) | ||
4277 | { | ||
4278 | struct dev_addr_list *tmp; | ||
4279 | |||
4280 | while (*list != NULL) { | ||
4281 | tmp = *list; | ||
4282 | *list = tmp->next; | ||
4283 | if (tmp->da_users > tmp->da_gusers) | ||
4284 | printk("__dev_addr_discard: address leakage! " | ||
4285 | "da_users=%d\n", tmp->da_users); | ||
4286 | kfree(tmp); | ||
4287 | } | ||
4288 | } | ||
4289 | |||
4290 | static void dev_addr_discard(struct net_device *dev) | ||
4291 | { | ||
4292 | netif_addr_lock_bh(dev); | ||
4293 | |||
4294 | __dev_addr_discard(&dev->mc_list); | ||
4295 | netdev_mc_count(dev) = 0; | ||
4296 | |||
4297 | netif_addr_unlock_bh(dev); | ||
4298 | } | ||
4299 | |||
4300 | /** | 4112 | /** |
4301 | * dev_get_flags - get flags reported to userspace | 4113 | * dev_get_flags - get flags reported to userspace |
4302 | * @dev: device | 4114 | * @dev: device |
@@ -4607,8 +4419,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd) | |||
4607 | return -EINVAL; | 4419 | return -EINVAL; |
4608 | if (!netif_device_present(dev)) | 4420 | if (!netif_device_present(dev)) |
4609 | return -ENODEV; | 4421 | return -ENODEV; |
4610 | return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data, | 4422 | return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data); |
4611 | dev->addr_len, 1); | ||
4612 | 4423 | ||
4613 | case SIOCDELMULTI: | 4424 | case SIOCDELMULTI: |
4614 | if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) || | 4425 | if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) || |
@@ -4616,8 +4427,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd) | |||
4616 | return -EINVAL; | 4427 | return -EINVAL; |
4617 | if (!netif_device_present(dev)) | 4428 | if (!netif_device_present(dev)) |
4618 | return -ENODEV; | 4429 | return -ENODEV; |
4619 | return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data, | 4430 | return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data); |
4620 | dev->addr_len, 1); | ||
4621 | 4431 | ||
4622 | case SIOCSIFTXQLEN: | 4432 | case SIOCSIFTXQLEN: |
4623 | if (ifr->ifr_qlen < 0) | 4433 | if (ifr->ifr_qlen < 0) |
@@ -4924,8 +4734,8 @@ static void rollback_registered_many(struct list_head *head) | |||
4924 | /* | 4734 | /* |
4925 | * Flush the unicast and multicast chains | 4735 | * Flush the unicast and multicast chains |
4926 | */ | 4736 | */ |
4927 | dev_unicast_flush(dev); | 4737 | dev_uc_flush(dev); |
4928 | dev_addr_discard(dev); | 4738 | dev_mc_flush(dev); |
4929 | 4739 | ||
4930 | if (dev->netdev_ops->ndo_uninit) | 4740 | if (dev->netdev_ops->ndo_uninit) |
4931 | dev->netdev_ops->ndo_uninit(dev); | 4741 | dev->netdev_ops->ndo_uninit(dev); |
@@ -5074,6 +4884,24 @@ int register_netdevice(struct net_device *dev) | |||
5074 | 4884 | ||
5075 | dev->iflink = -1; | 4885 | dev->iflink = -1; |
5076 | 4886 | ||
4887 | #ifdef CONFIG_RPS | ||
4888 | if (!dev->num_rx_queues) { | ||
4889 | /* | ||
4890 | * Allocate a single RX queue if driver never called | ||
4891 | * alloc_netdev_mq | ||
4892 | */ | ||
4893 | |||
4894 | dev->_rx = kzalloc(sizeof(struct netdev_rx_queue), GFP_KERNEL); | ||
4895 | if (!dev->_rx) { | ||
4896 | ret = -ENOMEM; | ||
4897 | goto out; | ||
4898 | } | ||
4899 | |||
4900 | dev->_rx->first = dev->_rx; | ||
4901 | atomic_set(&dev->_rx->count, 1); | ||
4902 | dev->num_rx_queues = 1; | ||
4903 | } | ||
4904 | #endif | ||
5077 | /* Init, if this function is available */ | 4905 | /* Init, if this function is available */ |
5078 | if (dev->netdev_ops->ndo_init) { | 4906 | if (dev->netdev_ops->ndo_init) { |
5079 | ret = dev->netdev_ops->ndo_init(dev); | 4907 | ret = dev->netdev_ops->ndo_init(dev); |
@@ -5434,6 +5262,10 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
5434 | struct net_device *dev; | 5262 | struct net_device *dev; |
5435 | size_t alloc_size; | 5263 | size_t alloc_size; |
5436 | struct net_device *p; | 5264 | struct net_device *p; |
5265 | #ifdef CONFIG_RPS | ||
5266 | struct netdev_rx_queue *rx; | ||
5267 | int i; | ||
5268 | #endif | ||
5437 | 5269 | ||
5438 | BUG_ON(strlen(name) >= sizeof(dev->name)); | 5270 | BUG_ON(strlen(name) >= sizeof(dev->name)); |
5439 | 5271 | ||
@@ -5459,13 +5291,32 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
5459 | goto free_p; | 5291 | goto free_p; |
5460 | } | 5292 | } |
5461 | 5293 | ||
5294 | #ifdef CONFIG_RPS | ||
5295 | rx = kcalloc(queue_count, sizeof(struct netdev_rx_queue), GFP_KERNEL); | ||
5296 | if (!rx) { | ||
5297 | printk(KERN_ERR "alloc_netdev: Unable to allocate " | ||
5298 | "rx queues.\n"); | ||
5299 | goto free_tx; | ||
5300 | } | ||
5301 | |||
5302 | atomic_set(&rx->count, queue_count); | ||
5303 | |||
5304 | /* | ||
5305 | * Set a pointer to first element in the array which holds the | ||
5306 | * reference count. | ||
5307 | */ | ||
5308 | for (i = 0; i < queue_count; i++) | ||
5309 | rx[i].first = rx; | ||
5310 | #endif | ||
5311 | |||
5462 | dev = PTR_ALIGN(p, NETDEV_ALIGN); | 5312 | dev = PTR_ALIGN(p, NETDEV_ALIGN); |
5463 | dev->padded = (char *)dev - (char *)p; | 5313 | dev->padded = (char *)dev - (char *)p; |
5464 | 5314 | ||
5465 | if (dev_addr_init(dev)) | 5315 | if (dev_addr_init(dev)) |
5466 | goto free_tx; | 5316 | goto free_rx; |
5467 | 5317 | ||
5468 | dev_unicast_init(dev); | 5318 | dev_mc_init(dev); |
5319 | dev_uc_init(dev); | ||
5469 | 5320 | ||
5470 | dev_net_set(dev, &init_net); | 5321 | dev_net_set(dev, &init_net); |
5471 | 5322 | ||
@@ -5473,6 +5324,11 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
5473 | dev->num_tx_queues = queue_count; | 5324 | dev->num_tx_queues = queue_count; |
5474 | dev->real_num_tx_queues = queue_count; | 5325 | dev->real_num_tx_queues = queue_count; |
5475 | 5326 | ||
5327 | #ifdef CONFIG_RPS | ||
5328 | dev->_rx = rx; | ||
5329 | dev->num_rx_queues = queue_count; | ||
5330 | #endif | ||
5331 | |||
5476 | dev->gso_max_size = GSO_MAX_SIZE; | 5332 | dev->gso_max_size = GSO_MAX_SIZE; |
5477 | 5333 | ||
5478 | netdev_init_queues(dev); | 5334 | netdev_init_queues(dev); |
@@ -5487,9 +5343,12 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
5487 | strcpy(dev->name, name); | 5343 | strcpy(dev->name, name); |
5488 | return dev; | 5344 | return dev; |
5489 | 5345 | ||
5346 | free_rx: | ||
5347 | #ifdef CONFIG_RPS | ||
5348 | kfree(rx); | ||
5490 | free_tx: | 5349 | free_tx: |
5350 | #endif | ||
5491 | kfree(tx); | 5351 | kfree(tx); |
5492 | |||
5493 | free_p: | 5352 | free_p: |
5494 | kfree(p); | 5353 | kfree(p); |
5495 | return NULL; | 5354 | return NULL; |
@@ -5691,8 +5550,8 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char | |||
5691 | /* | 5550 | /* |
5692 | * Flush the unicast and multicast chains | 5551 | * Flush the unicast and multicast chains |
5693 | */ | 5552 | */ |
5694 | dev_unicast_flush(dev); | 5553 | dev_uc_flush(dev); |
5695 | dev_addr_discard(dev); | 5554 | dev_mc_flush(dev); |
5696 | 5555 | ||
5697 | netdev_unregister_kobject(dev); | 5556 | netdev_unregister_kobject(dev); |
5698 | 5557 | ||
@@ -5768,8 +5627,10 @@ static int dev_cpu_callback(struct notifier_block *nfb, | |||
5768 | local_irq_enable(); | 5627 | local_irq_enable(); |
5769 | 5628 | ||
5770 | /* Process offline CPU's input_pkt_queue */ | 5629 | /* Process offline CPU's input_pkt_queue */ |
5771 | while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) | 5630 | while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) { |
5772 | netif_rx(skb); | 5631 | netif_rx(skb); |
5632 | input_queue_head_incr(oldsd); | ||
5633 | } | ||
5773 | 5634 | ||
5774 | return NOTIFY_OK; | 5635 | return NOTIFY_OK; |
5775 | } | 5636 | } |
@@ -5985,17 +5846,23 @@ static int __init net_dev_init(void) | |||
5985 | */ | 5846 | */ |
5986 | 5847 | ||
5987 | for_each_possible_cpu(i) { | 5848 | for_each_possible_cpu(i) { |
5988 | struct softnet_data *queue; | 5849 | struct softnet_data *sd = &per_cpu(softnet_data, i); |
5850 | |||
5851 | skb_queue_head_init(&sd->input_pkt_queue); | ||
5852 | sd->completion_queue = NULL; | ||
5853 | INIT_LIST_HEAD(&sd->poll_list); | ||
5989 | 5854 | ||
5990 | queue = &per_cpu(softnet_data, i); | 5855 | #ifdef CONFIG_RPS |
5991 | skb_queue_head_init(&queue->input_pkt_queue); | 5856 | sd->csd.func = rps_trigger_softirq; |
5992 | queue->completion_queue = NULL; | 5857 | sd->csd.info = sd; |
5993 | INIT_LIST_HEAD(&queue->poll_list); | 5858 | sd->csd.flags = 0; |
5859 | sd->cpu = i; | ||
5860 | #endif | ||
5994 | 5861 | ||
5995 | queue->backlog.poll = process_backlog; | 5862 | sd->backlog.poll = process_backlog; |
5996 | queue->backlog.weight = weight_p; | 5863 | sd->backlog.weight = weight_p; |
5997 | queue->backlog.gro_list = NULL; | 5864 | sd->backlog.gro_list = NULL; |
5998 | queue->backlog.gro_count = 0; | 5865 | sd->backlog.gro_count = 0; |
5999 | } | 5866 | } |
6000 | 5867 | ||
6001 | dev_boot_phase = 0; | 5868 | dev_boot_phase = 0; |
@@ -6030,7 +5897,7 @@ subsys_initcall(net_dev_init); | |||
6030 | 5897 | ||
6031 | static int __init initialize_hashrnd(void) | 5898 | static int __init initialize_hashrnd(void) |
6032 | { | 5899 | { |
6033 | get_random_bytes(&skb_tx_hashrnd, sizeof(skb_tx_hashrnd)); | 5900 | get_random_bytes(&hashrnd, sizeof(hashrnd)); |
6034 | return 0; | 5901 | return 0; |
6035 | } | 5902 | } |
6036 | 5903 | ||
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c new file mode 100644 index 000000000000..508f9c18992f --- /dev/null +++ b/net/core/dev_addr_lists.c | |||
@@ -0,0 +1,741 @@ | |||
1 | /* | ||
2 | * net/core/dev_addr_lists.c - Functions for handling net device lists | ||
3 | * Copyright (c) 2010 Jiri Pirko <jpirko@redhat.com> | ||
4 | * | ||
5 | * This file contains functions for working with unicast, multicast and device | ||
6 | * addresses lists. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | */ | ||
13 | |||
14 | #include <linux/netdevice.h> | ||
15 | #include <linux/rtnetlink.h> | ||
16 | #include <linux/list.h> | ||
17 | #include <linux/proc_fs.h> | ||
18 | |||
19 | /* | ||
20 | * General list handling functions | ||
21 | */ | ||
22 | |||
23 | static int __hw_addr_add_ex(struct netdev_hw_addr_list *list, | ||
24 | unsigned char *addr, int addr_len, | ||
25 | unsigned char addr_type, bool global) | ||
26 | { | ||
27 | struct netdev_hw_addr *ha; | ||
28 | int alloc_size; | ||
29 | |||
30 | if (addr_len > MAX_ADDR_LEN) | ||
31 | return -EINVAL; | ||
32 | |||
33 | list_for_each_entry(ha, &list->list, list) { | ||
34 | if (!memcmp(ha->addr, addr, addr_len) && | ||
35 | ha->type == addr_type) { | ||
36 | if (global) { | ||
37 | /* check if addr is already used as global */ | ||
38 | if (ha->global_use) | ||
39 | return 0; | ||
40 | else | ||
41 | ha->global_use = true; | ||
42 | } | ||
43 | ha->refcount++; | ||
44 | return 0; | ||
45 | } | ||
46 | } | ||
47 | |||
48 | |||
49 | alloc_size = sizeof(*ha); | ||
50 | if (alloc_size < L1_CACHE_BYTES) | ||
51 | alloc_size = L1_CACHE_BYTES; | ||
52 | ha = kmalloc(alloc_size, GFP_ATOMIC); | ||
53 | if (!ha) | ||
54 | return -ENOMEM; | ||
55 | memcpy(ha->addr, addr, addr_len); | ||
56 | ha->type = addr_type; | ||
57 | ha->refcount = 1; | ||
58 | ha->global_use = global; | ||
59 | ha->synced = false; | ||
60 | list_add_tail_rcu(&ha->list, &list->list); | ||
61 | list->count++; | ||
62 | return 0; | ||
63 | } | ||
64 | |||
65 | static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr, | ||
66 | int addr_len, unsigned char addr_type) | ||
67 | { | ||
68 | return __hw_addr_add_ex(list, addr, addr_len, addr_type, false); | ||
69 | } | ||
70 | |||
71 | static void ha_rcu_free(struct rcu_head *head) | ||
72 | { | ||
73 | struct netdev_hw_addr *ha; | ||
74 | |||
75 | ha = container_of(head, struct netdev_hw_addr, rcu_head); | ||
76 | kfree(ha); | ||
77 | } | ||
78 | |||
79 | static int __hw_addr_del_ex(struct netdev_hw_addr_list *list, | ||
80 | unsigned char *addr, int addr_len, | ||
81 | unsigned char addr_type, bool global) | ||
82 | { | ||
83 | struct netdev_hw_addr *ha; | ||
84 | |||
85 | list_for_each_entry(ha, &list->list, list) { | ||
86 | if (!memcmp(ha->addr, addr, addr_len) && | ||
87 | (ha->type == addr_type || !addr_type)) { | ||
88 | if (global) { | ||
89 | if (!ha->global_use) | ||
90 | break; | ||
91 | else | ||
92 | ha->global_use = false; | ||
93 | } | ||
94 | if (--ha->refcount) | ||
95 | return 0; | ||
96 | list_del_rcu(&ha->list); | ||
97 | call_rcu(&ha->rcu_head, ha_rcu_free); | ||
98 | list->count--; | ||
99 | return 0; | ||
100 | } | ||
101 | } | ||
102 | return -ENOENT; | ||
103 | } | ||
104 | |||
105 | static int __hw_addr_del(struct netdev_hw_addr_list *list, unsigned char *addr, | ||
106 | int addr_len, unsigned char addr_type) | ||
107 | { | ||
108 | return __hw_addr_del_ex(list, addr, addr_len, addr_type, false); | ||
109 | } | ||
110 | |||
111 | int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list, | ||
112 | struct netdev_hw_addr_list *from_list, | ||
113 | int addr_len, unsigned char addr_type) | ||
114 | { | ||
115 | int err; | ||
116 | struct netdev_hw_addr *ha, *ha2; | ||
117 | unsigned char type; | ||
118 | |||
119 | list_for_each_entry(ha, &from_list->list, list) { | ||
120 | type = addr_type ? addr_type : ha->type; | ||
121 | err = __hw_addr_add(to_list, ha->addr, addr_len, type); | ||
122 | if (err) | ||
123 | goto unroll; | ||
124 | } | ||
125 | return 0; | ||
126 | |||
127 | unroll: | ||
128 | list_for_each_entry(ha2, &from_list->list, list) { | ||
129 | if (ha2 == ha) | ||
130 | break; | ||
131 | type = addr_type ? addr_type : ha2->type; | ||
132 | __hw_addr_del(to_list, ha2->addr, addr_len, type); | ||
133 | } | ||
134 | return err; | ||
135 | } | ||
136 | EXPORT_SYMBOL(__hw_addr_add_multiple); | ||
137 | |||
138 | void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list, | ||
139 | struct netdev_hw_addr_list *from_list, | ||
140 | int addr_len, unsigned char addr_type) | ||
141 | { | ||
142 | struct netdev_hw_addr *ha; | ||
143 | unsigned char type; | ||
144 | |||
145 | list_for_each_entry(ha, &from_list->list, list) { | ||
146 | type = addr_type ? addr_type : ha->type; | ||
147 | __hw_addr_del(to_list, ha->addr, addr_len, addr_type); | ||
148 | } | ||
149 | } | ||
150 | EXPORT_SYMBOL(__hw_addr_del_multiple); | ||
151 | |||
152 | int __hw_addr_sync(struct netdev_hw_addr_list *to_list, | ||
153 | struct netdev_hw_addr_list *from_list, | ||
154 | int addr_len) | ||
155 | { | ||
156 | int err = 0; | ||
157 | struct netdev_hw_addr *ha, *tmp; | ||
158 | |||
159 | list_for_each_entry_safe(ha, tmp, &from_list->list, list) { | ||
160 | if (!ha->synced) { | ||
161 | err = __hw_addr_add(to_list, ha->addr, | ||
162 | addr_len, ha->type); | ||
163 | if (err) | ||
164 | break; | ||
165 | ha->synced = true; | ||
166 | ha->refcount++; | ||
167 | } else if (ha->refcount == 1) { | ||
168 | __hw_addr_del(to_list, ha->addr, addr_len, ha->type); | ||
169 | __hw_addr_del(from_list, ha->addr, addr_len, ha->type); | ||
170 | } | ||
171 | } | ||
172 | return err; | ||
173 | } | ||
174 | EXPORT_SYMBOL(__hw_addr_sync); | ||
175 | |||
176 | void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, | ||
177 | struct netdev_hw_addr_list *from_list, | ||
178 | int addr_len) | ||
179 | { | ||
180 | struct netdev_hw_addr *ha, *tmp; | ||
181 | |||
182 | list_for_each_entry_safe(ha, tmp, &from_list->list, list) { | ||
183 | if (ha->synced) { | ||
184 | __hw_addr_del(to_list, ha->addr, | ||
185 | addr_len, ha->type); | ||
186 | ha->synced = false; | ||
187 | __hw_addr_del(from_list, ha->addr, | ||
188 | addr_len, ha->type); | ||
189 | } | ||
190 | } | ||
191 | } | ||
192 | EXPORT_SYMBOL(__hw_addr_unsync); | ||
193 | |||
194 | void __hw_addr_flush(struct netdev_hw_addr_list *list) | ||
195 | { | ||
196 | struct netdev_hw_addr *ha, *tmp; | ||
197 | |||
198 | list_for_each_entry_safe(ha, tmp, &list->list, list) { | ||
199 | list_del_rcu(&ha->list); | ||
200 | call_rcu(&ha->rcu_head, ha_rcu_free); | ||
201 | } | ||
202 | list->count = 0; | ||
203 | } | ||
204 | EXPORT_SYMBOL(__hw_addr_flush); | ||
205 | |||
206 | void __hw_addr_init(struct netdev_hw_addr_list *list) | ||
207 | { | ||
208 | INIT_LIST_HEAD(&list->list); | ||
209 | list->count = 0; | ||
210 | } | ||
211 | EXPORT_SYMBOL(__hw_addr_init); | ||
212 | |||
213 | /* | ||
214 | * Device addresses handling functions | ||
215 | */ | ||
216 | |||
217 | /** | ||
218 | * dev_addr_flush - Flush device address list | ||
219 | * @dev: device | ||
220 | * | ||
221 | * Flush device address list and reset ->dev_addr. | ||
222 | * | ||
223 | * The caller must hold the rtnl_mutex. | ||
224 | */ | ||
225 | void dev_addr_flush(struct net_device *dev) | ||
226 | { | ||
227 | /* rtnl_mutex must be held here */ | ||
228 | |||
229 | __hw_addr_flush(&dev->dev_addrs); | ||
230 | dev->dev_addr = NULL; | ||
231 | } | ||
232 | EXPORT_SYMBOL(dev_addr_flush); | ||
233 | |||
234 | /** | ||
235 | * dev_addr_init - Init device address list | ||
236 | * @dev: device | ||
237 | * | ||
238 | * Init device address list and create the first element, | ||
239 | * used by ->dev_addr. | ||
240 | * | ||
241 | * The caller must hold the rtnl_mutex. | ||
242 | */ | ||
243 | int dev_addr_init(struct net_device *dev) | ||
244 | { | ||
245 | unsigned char addr[MAX_ADDR_LEN]; | ||
246 | struct netdev_hw_addr *ha; | ||
247 | int err; | ||
248 | |||
249 | /* rtnl_mutex must be held here */ | ||
250 | |||
251 | __hw_addr_init(&dev->dev_addrs); | ||
252 | memset(addr, 0, sizeof(addr)); | ||
253 | err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr), | ||
254 | NETDEV_HW_ADDR_T_LAN); | ||
255 | if (!err) { | ||
256 | /* | ||
257 | * Get the first (previously created) address from the list | ||
258 | * and set dev_addr pointer to this location. | ||
259 | */ | ||
260 | ha = list_first_entry(&dev->dev_addrs.list, | ||
261 | struct netdev_hw_addr, list); | ||
262 | dev->dev_addr = ha->addr; | ||
263 | } | ||
264 | return err; | ||
265 | } | ||
266 | EXPORT_SYMBOL(dev_addr_init); | ||
267 | |||
268 | /** | ||
269 | * dev_addr_add - Add a device address | ||
270 | * @dev: device | ||
271 | * @addr: address to add | ||
272 | * @addr_type: address type | ||
273 | * | ||
274 | * Add a device address to the device or increase the reference count if | ||
275 | * it already exists. | ||
276 | * | ||
277 | * The caller must hold the rtnl_mutex. | ||
278 | */ | ||
279 | int dev_addr_add(struct net_device *dev, unsigned char *addr, | ||
280 | unsigned char addr_type) | ||
281 | { | ||
282 | int err; | ||
283 | |||
284 | ASSERT_RTNL(); | ||
285 | |||
286 | err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type); | ||
287 | if (!err) | ||
288 | call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); | ||
289 | return err; | ||
290 | } | ||
291 | EXPORT_SYMBOL(dev_addr_add); | ||
292 | |||
293 | /** | ||
294 | * dev_addr_del - Release a device address. | ||
295 | * @dev: device | ||
296 | * @addr: address to delete | ||
297 | * @addr_type: address type | ||
298 | * | ||
299 | * Release reference to a device address and remove it from the device | ||
300 | * if the reference count drops to zero. | ||
301 | * | ||
302 | * The caller must hold the rtnl_mutex. | ||
303 | */ | ||
304 | int dev_addr_del(struct net_device *dev, unsigned char *addr, | ||
305 | unsigned char addr_type) | ||
306 | { | ||
307 | int err; | ||
308 | struct netdev_hw_addr *ha; | ||
309 | |||
310 | ASSERT_RTNL(); | ||
311 | |||
312 | /* | ||
313 | * We can not remove the first address from the list because | ||
314 | * dev->dev_addr points to that. | ||
315 | */ | ||
316 | ha = list_first_entry(&dev->dev_addrs.list, | ||
317 | struct netdev_hw_addr, list); | ||
318 | if (ha->addr == dev->dev_addr && ha->refcount == 1) | ||
319 | return -ENOENT; | ||
320 | |||
321 | err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len, | ||
322 | addr_type); | ||
323 | if (!err) | ||
324 | call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); | ||
325 | return err; | ||
326 | } | ||
327 | EXPORT_SYMBOL(dev_addr_del); | ||
328 | |||
329 | /** | ||
330 | * dev_addr_add_multiple - Add device addresses from another device | ||
331 | * @to_dev: device to which addresses will be added | ||
332 | * @from_dev: device from which addresses will be added | ||
333 | * @addr_type: address type - 0 means type will be used from from_dev | ||
334 | * | ||
335 | * Add device addresses of the one device to another. | ||
336 | ** | ||
337 | * The caller must hold the rtnl_mutex. | ||
338 | */ | ||
339 | int dev_addr_add_multiple(struct net_device *to_dev, | ||
340 | struct net_device *from_dev, | ||
341 | unsigned char addr_type) | ||
342 | { | ||
343 | int err; | ||
344 | |||
345 | ASSERT_RTNL(); | ||
346 | |||
347 | if (from_dev->addr_len != to_dev->addr_len) | ||
348 | return -EINVAL; | ||
349 | err = __hw_addr_add_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs, | ||
350 | to_dev->addr_len, addr_type); | ||
351 | if (!err) | ||
352 | call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev); | ||
353 | return err; | ||
354 | } | ||
355 | EXPORT_SYMBOL(dev_addr_add_multiple); | ||
356 | |||
357 | /** | ||
358 | * dev_addr_del_multiple - Delete device addresses by another device | ||
359 | * @to_dev: device where the addresses will be deleted | ||
360 | * @from_dev: device by which addresses the addresses will be deleted | ||
361 | * @addr_type: address type - 0 means type will used from from_dev | ||
362 | * | ||
363 | * Deletes addresses in to device by the list of addresses in from device. | ||
364 | * | ||
365 | * The caller must hold the rtnl_mutex. | ||
366 | */ | ||
367 | int dev_addr_del_multiple(struct net_device *to_dev, | ||
368 | struct net_device *from_dev, | ||
369 | unsigned char addr_type) | ||
370 | { | ||
371 | ASSERT_RTNL(); | ||
372 | |||
373 | if (from_dev->addr_len != to_dev->addr_len) | ||
374 | return -EINVAL; | ||
375 | __hw_addr_del_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs, | ||
376 | to_dev->addr_len, addr_type); | ||
377 | call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev); | ||
378 | return 0; | ||
379 | } | ||
380 | EXPORT_SYMBOL(dev_addr_del_multiple); | ||
381 | |||
382 | /* | ||
383 | * Unicast list handling functions | ||
384 | */ | ||
385 | |||
386 | /** | ||
387 | * dev_uc_add - Add a secondary unicast address | ||
388 | * @dev: device | ||
389 | * @addr: address to add | ||
390 | * | ||
391 | * Add a secondary unicast address to the device or increase | ||
392 | * the reference count if it already exists. | ||
393 | */ | ||
394 | int dev_uc_add(struct net_device *dev, unsigned char *addr) | ||
395 | { | ||
396 | int err; | ||
397 | |||
398 | netif_addr_lock_bh(dev); | ||
399 | err = __hw_addr_add(&dev->uc, addr, dev->addr_len, | ||
400 | NETDEV_HW_ADDR_T_UNICAST); | ||
401 | if (!err) | ||
402 | __dev_set_rx_mode(dev); | ||
403 | netif_addr_unlock_bh(dev); | ||
404 | return err; | ||
405 | } | ||
406 | EXPORT_SYMBOL(dev_uc_add); | ||
407 | |||
408 | /** | ||
409 | * dev_uc_del - Release secondary unicast address. | ||
410 | * @dev: device | ||
411 | * @addr: address to delete | ||
412 | * | ||
413 | * Release reference to a secondary unicast address and remove it | ||
414 | * from the device if the reference count drops to zero. | ||
415 | */ | ||
416 | int dev_uc_del(struct net_device *dev, unsigned char *addr) | ||
417 | { | ||
418 | int err; | ||
419 | |||
420 | netif_addr_lock_bh(dev); | ||
421 | err = __hw_addr_del(&dev->uc, addr, dev->addr_len, | ||
422 | NETDEV_HW_ADDR_T_UNICAST); | ||
423 | if (!err) | ||
424 | __dev_set_rx_mode(dev); | ||
425 | netif_addr_unlock_bh(dev); | ||
426 | return err; | ||
427 | } | ||
428 | EXPORT_SYMBOL(dev_uc_del); | ||
429 | |||
430 | /** | ||
431 | * dev_uc_sync - Synchronize device's unicast list to another device | ||
432 | * @to: destination device | ||
433 | * @from: source device | ||
434 | * | ||
435 | * Add newly added addresses to the destination device and release | ||
436 | * addresses that have no users left. The source device must be | ||
437 | * locked by netif_tx_lock_bh. | ||
438 | * | ||
439 | * This function is intended to be called from the dev->set_rx_mode | ||
440 | * function of layered software devices. | ||
441 | */ | ||
442 | int dev_uc_sync(struct net_device *to, struct net_device *from) | ||
443 | { | ||
444 | int err = 0; | ||
445 | |||
446 | if (to->addr_len != from->addr_len) | ||
447 | return -EINVAL; | ||
448 | |||
449 | netif_addr_lock_bh(to); | ||
450 | err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len); | ||
451 | if (!err) | ||
452 | __dev_set_rx_mode(to); | ||
453 | netif_addr_unlock_bh(to); | ||
454 | return err; | ||
455 | } | ||
456 | EXPORT_SYMBOL(dev_uc_sync); | ||
457 | |||
458 | /** | ||
459 | * dev_uc_unsync - Remove synchronized addresses from the destination device | ||
460 | * @to: destination device | ||
461 | * @from: source device | ||
462 | * | ||
463 | * Remove all addresses that were added to the destination device by | ||
464 | * dev_uc_sync(). This function is intended to be called from the | ||
465 | * dev->stop function of layered software devices. | ||
466 | */ | ||
467 | void dev_uc_unsync(struct net_device *to, struct net_device *from) | ||
468 | { | ||
469 | if (to->addr_len != from->addr_len) | ||
470 | return; | ||
471 | |||
472 | netif_addr_lock_bh(from); | ||
473 | netif_addr_lock(to); | ||
474 | __hw_addr_unsync(&to->uc, &from->uc, to->addr_len); | ||
475 | __dev_set_rx_mode(to); | ||
476 | netif_addr_unlock(to); | ||
477 | netif_addr_unlock_bh(from); | ||
478 | } | ||
479 | EXPORT_SYMBOL(dev_uc_unsync); | ||
480 | |||
481 | /** | ||
482 | * dev_uc_flush - Flush unicast addresses | ||
483 | * @dev: device | ||
484 | * | ||
485 | * Flush unicast addresses. | ||
486 | */ | ||
487 | void dev_uc_flush(struct net_device *dev) | ||
488 | { | ||
489 | netif_addr_lock_bh(dev); | ||
490 | __hw_addr_flush(&dev->uc); | ||
491 | netif_addr_unlock_bh(dev); | ||
492 | } | ||
493 | EXPORT_SYMBOL(dev_uc_flush); | ||
494 | |||
495 | /** | ||
496 | * dev_uc_flush - Init unicast address list | ||
497 | * @dev: device | ||
498 | * | ||
499 | * Init unicast address list. | ||
500 | */ | ||
501 | void dev_uc_init(struct net_device *dev) | ||
502 | { | ||
503 | __hw_addr_init(&dev->uc); | ||
504 | } | ||
505 | EXPORT_SYMBOL(dev_uc_init); | ||
506 | |||
507 | /* | ||
508 | * Multicast list handling functions | ||
509 | */ | ||
510 | |||
511 | static int __dev_mc_add(struct net_device *dev, unsigned char *addr, | ||
512 | bool global) | ||
513 | { | ||
514 | int err; | ||
515 | |||
516 | netif_addr_lock_bh(dev); | ||
517 | err = __hw_addr_add_ex(&dev->mc, addr, dev->addr_len, | ||
518 | NETDEV_HW_ADDR_T_MULTICAST, global); | ||
519 | if (!err) | ||
520 | __dev_set_rx_mode(dev); | ||
521 | netif_addr_unlock_bh(dev); | ||
522 | return err; | ||
523 | } | ||
524 | /** | ||
525 | * dev_mc_add - Add a multicast address | ||
526 | * @dev: device | ||
527 | * @addr: address to add | ||
528 | * | ||
529 | * Add a multicast address to the device or increase | ||
530 | * the reference count if it already exists. | ||
531 | */ | ||
532 | int dev_mc_add(struct net_device *dev, unsigned char *addr) | ||
533 | { | ||
534 | return __dev_mc_add(dev, addr, false); | ||
535 | } | ||
536 | EXPORT_SYMBOL(dev_mc_add); | ||
537 | |||
538 | /** | ||
539 | * dev_mc_add_global - Add a global multicast address | ||
540 | * @dev: device | ||
541 | * @addr: address to add | ||
542 | * | ||
543 | * Add a global multicast address to the device. | ||
544 | */ | ||
545 | int dev_mc_add_global(struct net_device *dev, unsigned char *addr) | ||
546 | { | ||
547 | return __dev_mc_add(dev, addr, true); | ||
548 | } | ||
549 | EXPORT_SYMBOL(dev_mc_add_global); | ||
550 | |||
551 | static int __dev_mc_del(struct net_device *dev, unsigned char *addr, | ||
552 | bool global) | ||
553 | { | ||
554 | int err; | ||
555 | |||
556 | netif_addr_lock_bh(dev); | ||
557 | err = __hw_addr_del_ex(&dev->mc, addr, dev->addr_len, | ||
558 | NETDEV_HW_ADDR_T_MULTICAST, global); | ||
559 | if (!err) | ||
560 | __dev_set_rx_mode(dev); | ||
561 | netif_addr_unlock_bh(dev); | ||
562 | return err; | ||
563 | } | ||
564 | |||
565 | /** | ||
566 | * dev_mc_del - Delete a multicast address. | ||
567 | * @dev: device | ||
568 | * @addr: address to delete | ||
569 | * | ||
570 | * Release reference to a multicast address and remove it | ||
571 | * from the device if the reference count drops to zero. | ||
572 | */ | ||
573 | int dev_mc_del(struct net_device *dev, unsigned char *addr) | ||
574 | { | ||
575 | return __dev_mc_del(dev, addr, false); | ||
576 | } | ||
577 | EXPORT_SYMBOL(dev_mc_del); | ||
578 | |||
579 | /** | ||
580 | * dev_mc_del_global - Delete a global multicast address. | ||
581 | * @dev: device | ||
582 | * @addr: address to delete | ||
583 | * | ||
584 | * Release reference to a multicast address and remove it | ||
585 | * from the device if the reference count drops to zero. | ||
586 | */ | ||
587 | int dev_mc_del_global(struct net_device *dev, unsigned char *addr) | ||
588 | { | ||
589 | return __dev_mc_del(dev, addr, true); | ||
590 | } | ||
591 | EXPORT_SYMBOL(dev_mc_del_global); | ||
592 | |||
593 | /** | ||
594 | * dev_mc_sync - Synchronize device's unicast list to another device | ||
595 | * @to: destination device | ||
596 | * @from: source device | ||
597 | * | ||
598 | * Add newly added addresses to the destination device and release | ||
599 | * addresses that have no users left. The source device must be | ||
600 | * locked by netif_tx_lock_bh. | ||
601 | * | ||
602 | * This function is intended to be called from the dev->set_multicast_list | ||
603 | * or dev->set_rx_mode function of layered software devices. | ||
604 | */ | ||
605 | int dev_mc_sync(struct net_device *to, struct net_device *from) | ||
606 | { | ||
607 | int err = 0; | ||
608 | |||
609 | if (to->addr_len != from->addr_len) | ||
610 | return -EINVAL; | ||
611 | |||
612 | netif_addr_lock_bh(to); | ||
613 | err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len); | ||
614 | if (!err) | ||
615 | __dev_set_rx_mode(to); | ||
616 | netif_addr_unlock_bh(to); | ||
617 | return err; | ||
618 | } | ||
619 | EXPORT_SYMBOL(dev_mc_sync); | ||
620 | |||
621 | /** | ||
622 | * dev_mc_unsync - Remove synchronized addresses from the destination device | ||
623 | * @to: destination device | ||
624 | * @from: source device | ||
625 | * | ||
626 | * Remove all addresses that were added to the destination device by | ||
627 | * dev_mc_sync(). This function is intended to be called from the | ||
628 | * dev->stop function of layered software devices. | ||
629 | */ | ||
630 | void dev_mc_unsync(struct net_device *to, struct net_device *from) | ||
631 | { | ||
632 | if (to->addr_len != from->addr_len) | ||
633 | return; | ||
634 | |||
635 | netif_addr_lock_bh(from); | ||
636 | netif_addr_lock(to); | ||
637 | __hw_addr_unsync(&to->mc, &from->mc, to->addr_len); | ||
638 | __dev_set_rx_mode(to); | ||
639 | netif_addr_unlock(to); | ||
640 | netif_addr_unlock_bh(from); | ||
641 | } | ||
642 | EXPORT_SYMBOL(dev_mc_unsync); | ||
643 | |||
644 | /** | ||
645 | * dev_mc_flush - Flush multicast addresses | ||
646 | * @dev: device | ||
647 | * | ||
648 | * Flush multicast addresses. | ||
649 | */ | ||
650 | void dev_mc_flush(struct net_device *dev) | ||
651 | { | ||
652 | netif_addr_lock_bh(dev); | ||
653 | __hw_addr_flush(&dev->mc); | ||
654 | netif_addr_unlock_bh(dev); | ||
655 | } | ||
656 | EXPORT_SYMBOL(dev_mc_flush); | ||
657 | |||
658 | /** | ||
659 | * dev_mc_flush - Init multicast address list | ||
660 | * @dev: device | ||
661 | * | ||
662 | * Init multicast address list. | ||
663 | */ | ||
664 | void dev_mc_init(struct net_device *dev) | ||
665 | { | ||
666 | __hw_addr_init(&dev->mc); | ||
667 | } | ||
668 | EXPORT_SYMBOL(dev_mc_init); | ||
669 | |||
670 | #ifdef CONFIG_PROC_FS | ||
671 | #include <linux/seq_file.h> | ||
672 | |||
673 | static int dev_mc_seq_show(struct seq_file *seq, void *v) | ||
674 | { | ||
675 | struct netdev_hw_addr *ha; | ||
676 | struct net_device *dev = v; | ||
677 | |||
678 | if (v == SEQ_START_TOKEN) | ||
679 | return 0; | ||
680 | |||
681 | netif_addr_lock_bh(dev); | ||
682 | netdev_for_each_mc_addr(ha, dev) { | ||
683 | int i; | ||
684 | |||
685 | seq_printf(seq, "%-4d %-15s %-5d %-5d ", dev->ifindex, | ||
686 | dev->name, ha->refcount, ha->global_use); | ||
687 | |||
688 | for (i = 0; i < dev->addr_len; i++) | ||
689 | seq_printf(seq, "%02x", ha->addr[i]); | ||
690 | |||
691 | seq_putc(seq, '\n'); | ||
692 | } | ||
693 | netif_addr_unlock_bh(dev); | ||
694 | return 0; | ||
695 | } | ||
696 | |||
697 | static const struct seq_operations dev_mc_seq_ops = { | ||
698 | .start = dev_seq_start, | ||
699 | .next = dev_seq_next, | ||
700 | .stop = dev_seq_stop, | ||
701 | .show = dev_mc_seq_show, | ||
702 | }; | ||
703 | |||
704 | static int dev_mc_seq_open(struct inode *inode, struct file *file) | ||
705 | { | ||
706 | return seq_open_net(inode, file, &dev_mc_seq_ops, | ||
707 | sizeof(struct seq_net_private)); | ||
708 | } | ||
709 | |||
710 | static const struct file_operations dev_mc_seq_fops = { | ||
711 | .owner = THIS_MODULE, | ||
712 | .open = dev_mc_seq_open, | ||
713 | .read = seq_read, | ||
714 | .llseek = seq_lseek, | ||
715 | .release = seq_release_net, | ||
716 | }; | ||
717 | |||
718 | #endif | ||
719 | |||
720 | static int __net_init dev_mc_net_init(struct net *net) | ||
721 | { | ||
722 | if (!proc_net_fops_create(net, "dev_mcast", 0, &dev_mc_seq_fops)) | ||
723 | return -ENOMEM; | ||
724 | return 0; | ||
725 | } | ||
726 | |||
727 | static void __net_exit dev_mc_net_exit(struct net *net) | ||
728 | { | ||
729 | proc_net_remove(net, "dev_mcast"); | ||
730 | } | ||
731 | |||
732 | static struct pernet_operations __net_initdata dev_mc_net_ops = { | ||
733 | .init = dev_mc_net_init, | ||
734 | .exit = dev_mc_net_exit, | ||
735 | }; | ||
736 | |||
737 | void __init dev_mcast_init(void) | ||
738 | { | ||
739 | register_pernet_subsys(&dev_mc_net_ops); | ||
740 | } | ||
741 | |||
diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c deleted file mode 100644 index 3dc295beb483..000000000000 --- a/net/core/dev_mcast.c +++ /dev/null | |||
@@ -1,232 +0,0 @@ | |||
1 | /* | ||
2 | * Linux NET3: Multicast List maintenance. | ||
3 | * | ||
4 | * Authors: | ||
5 | * Tim Kordas <tjk@nostromo.eeap.cwru.edu> | ||
6 | * Richard Underwood <richard@wuzz.demon.co.uk> | ||
7 | * | ||
8 | * Stir fried together from the IP multicast and CAP patches above | ||
9 | * Alan Cox <alan@lxorguk.ukuu.org.uk> | ||
10 | * | ||
11 | * Fixes: | ||
12 | * Alan Cox : Update the device on a real delete | ||
13 | * rather than any time but... | ||
14 | * Alan Cox : IFF_ALLMULTI support. | ||
15 | * Alan Cox : New format set_multicast_list() calls. | ||
16 | * Gleb Natapov : Remove dev_mc_lock. | ||
17 | * | ||
18 | * This program is free software; you can redistribute it and/or | ||
19 | * modify it under the terms of the GNU General Public License | ||
20 | * as published by the Free Software Foundation; either version | ||
21 | * 2 of the License, or (at your option) any later version. | ||
22 | */ | ||
23 | |||
24 | #include <linux/module.h> | ||
25 | #include <asm/uaccess.h> | ||
26 | #include <asm/system.h> | ||
27 | #include <linux/bitops.h> | ||
28 | #include <linux/types.h> | ||
29 | #include <linux/kernel.h> | ||
30 | #include <linux/string.h> | ||
31 | #include <linux/mm.h> | ||
32 | #include <linux/socket.h> | ||
33 | #include <linux/sockios.h> | ||
34 | #include <linux/in.h> | ||
35 | #include <linux/errno.h> | ||
36 | #include <linux/interrupt.h> | ||
37 | #include <linux/if_ether.h> | ||
38 | #include <linux/inet.h> | ||
39 | #include <linux/netdevice.h> | ||
40 | #include <linux/etherdevice.h> | ||
41 | #include <linux/proc_fs.h> | ||
42 | #include <linux/seq_file.h> | ||
43 | #include <linux/init.h> | ||
44 | #include <net/net_namespace.h> | ||
45 | #include <net/ip.h> | ||
46 | #include <net/route.h> | ||
47 | #include <linux/skbuff.h> | ||
48 | #include <net/sock.h> | ||
49 | #include <net/arp.h> | ||
50 | |||
51 | |||
52 | /* | ||
53 | * Device multicast list maintenance. | ||
54 | * | ||
55 | * This is used both by IP and by the user level maintenance functions. | ||
56 | * Unlike BSD we maintain a usage count on a given multicast address so | ||
57 | * that a casual user application can add/delete multicasts used by | ||
58 | * protocols without doing damage to the protocols when it deletes the | ||
59 | * entries. It also helps IP as it tracks overlapping maps. | ||
60 | * | ||
61 | * Device mc lists are changed by bh at least if IPv6 is enabled, | ||
62 | * so that it must be bh protected. | ||
63 | * | ||
64 | * We block accesses to device mc filters with netif_tx_lock. | ||
65 | */ | ||
66 | |||
67 | /* | ||
68 | * Delete a device level multicast | ||
69 | */ | ||
70 | |||
71 | int dev_mc_delete(struct net_device *dev, void *addr, int alen, int glbl) | ||
72 | { | ||
73 | int err; | ||
74 | |||
75 | netif_addr_lock_bh(dev); | ||
76 | err = __dev_addr_delete(&dev->mc_list, &dev->mc_count, | ||
77 | addr, alen, glbl); | ||
78 | if (!err) { | ||
79 | /* | ||
80 | * We have altered the list, so the card | ||
81 | * loaded filter is now wrong. Fix it | ||
82 | */ | ||
83 | |||
84 | __dev_set_rx_mode(dev); | ||
85 | } | ||
86 | netif_addr_unlock_bh(dev); | ||
87 | return err; | ||
88 | } | ||
89 | |||
90 | /* | ||
91 | * Add a device level multicast | ||
92 | */ | ||
93 | |||
94 | int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl) | ||
95 | { | ||
96 | int err; | ||
97 | |||
98 | netif_addr_lock_bh(dev); | ||
99 | if (alen != dev->addr_len) | ||
100 | err = -EINVAL; | ||
101 | else | ||
102 | err = __dev_addr_add(&dev->mc_list, &dev->mc_count, addr, alen, glbl); | ||
103 | if (!err) | ||
104 | __dev_set_rx_mode(dev); | ||
105 | netif_addr_unlock_bh(dev); | ||
106 | return err; | ||
107 | } | ||
108 | |||
109 | /** | ||
110 | * dev_mc_sync - Synchronize device's multicast list to another device | ||
111 | * @to: destination device | ||
112 | * @from: source device | ||
113 | * | ||
114 | * Add newly added addresses to the destination device and release | ||
115 | * addresses that have no users left. The source device must be | ||
116 | * locked by netif_tx_lock_bh. | ||
117 | * | ||
118 | * This function is intended to be called from the dev->set_multicast_list | ||
119 | * or dev->set_rx_mode function of layered software devices. | ||
120 | */ | ||
121 | int dev_mc_sync(struct net_device *to, struct net_device *from) | ||
122 | { | ||
123 | int err = 0; | ||
124 | |||
125 | netif_addr_lock_bh(to); | ||
126 | err = __dev_addr_sync(&to->mc_list, &to->mc_count, | ||
127 | &from->mc_list, &from->mc_count); | ||
128 | if (!err) | ||
129 | __dev_set_rx_mode(to); | ||
130 | netif_addr_unlock_bh(to); | ||
131 | |||
132 | return err; | ||
133 | } | ||
134 | EXPORT_SYMBOL(dev_mc_sync); | ||
135 | |||
136 | |||
137 | /** | ||
138 | * dev_mc_unsync - Remove synchronized addresses from the destination | ||
139 | * device | ||
140 | * @to: destination device | ||
141 | * @from: source device | ||
142 | * | ||
143 | * Remove all addresses that were added to the destination device by | ||
144 | * dev_mc_sync(). This function is intended to be called from the | ||
145 | * dev->stop function of layered software devices. | ||
146 | */ | ||
147 | void dev_mc_unsync(struct net_device *to, struct net_device *from) | ||
148 | { | ||
149 | netif_addr_lock_bh(from); | ||
150 | netif_addr_lock(to); | ||
151 | |||
152 | __dev_addr_unsync(&to->mc_list, &to->mc_count, | ||
153 | &from->mc_list, &from->mc_count); | ||
154 | __dev_set_rx_mode(to); | ||
155 | |||
156 | netif_addr_unlock(to); | ||
157 | netif_addr_unlock_bh(from); | ||
158 | } | ||
159 | EXPORT_SYMBOL(dev_mc_unsync); | ||
160 | |||
161 | #ifdef CONFIG_PROC_FS | ||
162 | static int dev_mc_seq_show(struct seq_file *seq, void *v) | ||
163 | { | ||
164 | struct dev_addr_list *m; | ||
165 | struct net_device *dev = v; | ||
166 | |||
167 | if (v == SEQ_START_TOKEN) | ||
168 | return 0; | ||
169 | |||
170 | netif_addr_lock_bh(dev); | ||
171 | for (m = dev->mc_list; m; m = m->next) { | ||
172 | int i; | ||
173 | |||
174 | seq_printf(seq, "%-4d %-15s %-5d %-5d ", dev->ifindex, | ||
175 | dev->name, m->dmi_users, m->dmi_gusers); | ||
176 | |||
177 | for (i = 0; i < m->dmi_addrlen; i++) | ||
178 | seq_printf(seq, "%02x", m->dmi_addr[i]); | ||
179 | |||
180 | seq_putc(seq, '\n'); | ||
181 | } | ||
182 | netif_addr_unlock_bh(dev); | ||
183 | return 0; | ||
184 | } | ||
185 | |||
186 | static const struct seq_operations dev_mc_seq_ops = { | ||
187 | .start = dev_seq_start, | ||
188 | .next = dev_seq_next, | ||
189 | .stop = dev_seq_stop, | ||
190 | .show = dev_mc_seq_show, | ||
191 | }; | ||
192 | |||
193 | static int dev_mc_seq_open(struct inode *inode, struct file *file) | ||
194 | { | ||
195 | return seq_open_net(inode, file, &dev_mc_seq_ops, | ||
196 | sizeof(struct seq_net_private)); | ||
197 | } | ||
198 | |||
199 | static const struct file_operations dev_mc_seq_fops = { | ||
200 | .owner = THIS_MODULE, | ||
201 | .open = dev_mc_seq_open, | ||
202 | .read = seq_read, | ||
203 | .llseek = seq_lseek, | ||
204 | .release = seq_release_net, | ||
205 | }; | ||
206 | |||
207 | #endif | ||
208 | |||
209 | static int __net_init dev_mc_net_init(struct net *net) | ||
210 | { | ||
211 | if (!proc_net_fops_create(net, "dev_mcast", 0, &dev_mc_seq_fops)) | ||
212 | return -ENOMEM; | ||
213 | return 0; | ||
214 | } | ||
215 | |||
216 | static void __net_exit dev_mc_net_exit(struct net *net) | ||
217 | { | ||
218 | proc_net_remove(net, "dev_mcast"); | ||
219 | } | ||
220 | |||
221 | static struct pernet_operations __net_initdata dev_mc_net_ops = { | ||
222 | .init = dev_mc_net_init, | ||
223 | .exit = dev_mc_net_exit, | ||
224 | }; | ||
225 | |||
226 | void __init dev_mcast_init(void) | ||
227 | { | ||
228 | register_pernet_subsys(&dev_mc_net_ops); | ||
229 | } | ||
230 | |||
231 | EXPORT_SYMBOL(dev_mc_add); | ||
232 | EXPORT_SYMBOL(dev_mc_delete); | ||
diff --git a/net/core/dst.c b/net/core/dst.c index f307bc18f6a0..9920722cc82b 100644 --- a/net/core/dst.c +++ b/net/core/dst.c | |||
@@ -44,7 +44,7 @@ static atomic_t dst_total = ATOMIC_INIT(0); | |||
44 | */ | 44 | */ |
45 | static struct { | 45 | static struct { |
46 | spinlock_t lock; | 46 | spinlock_t lock; |
47 | struct dst_entry *list; | 47 | struct dst_entry *list; |
48 | unsigned long timer_inc; | 48 | unsigned long timer_inc; |
49 | unsigned long timer_expires; | 49 | unsigned long timer_expires; |
50 | } dst_garbage = { | 50 | } dst_garbage = { |
@@ -52,7 +52,7 @@ static struct { | |||
52 | .timer_inc = DST_GC_MAX, | 52 | .timer_inc = DST_GC_MAX, |
53 | }; | 53 | }; |
54 | static void dst_gc_task(struct work_struct *work); | 54 | static void dst_gc_task(struct work_struct *work); |
55 | static void ___dst_free(struct dst_entry * dst); | 55 | static void ___dst_free(struct dst_entry *dst); |
56 | 56 | ||
57 | static DECLARE_DELAYED_WORK(dst_gc_work, dst_gc_task); | 57 | static DECLARE_DELAYED_WORK(dst_gc_work, dst_gc_task); |
58 | 58 | ||
@@ -136,8 +136,8 @@ loop: | |||
136 | } | 136 | } |
137 | expires = dst_garbage.timer_expires; | 137 | expires = dst_garbage.timer_expires; |
138 | /* | 138 | /* |
139 | * if the next desired timer is more than 4 seconds in the future | 139 | * if the next desired timer is more than 4 seconds in the |
140 | * then round the timer to whole seconds | 140 | * future then round the timer to whole seconds |
141 | */ | 141 | */ |
142 | if (expires > 4*HZ) | 142 | if (expires > 4*HZ) |
143 | expires = round_jiffies_relative(expires); | 143 | expires = round_jiffies_relative(expires); |
@@ -152,7 +152,8 @@ loop: | |||
152 | " expires: %lu elapsed: %lu us\n", | 152 | " expires: %lu elapsed: %lu us\n", |
153 | atomic_read(&dst_total), delayed, work_performed, | 153 | atomic_read(&dst_total), delayed, work_performed, |
154 | expires, | 154 | expires, |
155 | elapsed.tv_sec * USEC_PER_SEC + elapsed.tv_nsec / NSEC_PER_USEC); | 155 | elapsed.tv_sec * USEC_PER_SEC + |
156 | elapsed.tv_nsec / NSEC_PER_USEC); | ||
156 | #endif | 157 | #endif |
157 | } | 158 | } |
158 | 159 | ||
@@ -163,9 +164,9 @@ int dst_discard(struct sk_buff *skb) | |||
163 | } | 164 | } |
164 | EXPORT_SYMBOL(dst_discard); | 165 | EXPORT_SYMBOL(dst_discard); |
165 | 166 | ||
166 | void * dst_alloc(struct dst_ops * ops) | 167 | void *dst_alloc(struct dst_ops *ops) |
167 | { | 168 | { |
168 | struct dst_entry * dst; | 169 | struct dst_entry *dst; |
169 | 170 | ||
170 | if (ops->gc && atomic_read(&ops->entries) > ops->gc_thresh) { | 171 | if (ops->gc && atomic_read(&ops->entries) > ops->gc_thresh) { |
171 | if (ops->gc(ops)) | 172 | if (ops->gc(ops)) |
@@ -185,19 +186,20 @@ void * dst_alloc(struct dst_ops * ops) | |||
185 | atomic_inc(&ops->entries); | 186 | atomic_inc(&ops->entries); |
186 | return dst; | 187 | return dst; |
187 | } | 188 | } |
189 | EXPORT_SYMBOL(dst_alloc); | ||
188 | 190 | ||
189 | static void ___dst_free(struct dst_entry * dst) | 191 | static void ___dst_free(struct dst_entry *dst) |
190 | { | 192 | { |
191 | /* The first case (dev==NULL) is required, when | 193 | /* The first case (dev==NULL) is required, when |
192 | protocol module is unloaded. | 194 | protocol module is unloaded. |
193 | */ | 195 | */ |
194 | if (dst->dev == NULL || !(dst->dev->flags&IFF_UP)) { | 196 | if (dst->dev == NULL || !(dst->dev->flags&IFF_UP)) |
195 | dst->input = dst->output = dst_discard; | 197 | dst->input = dst->output = dst_discard; |
196 | } | ||
197 | dst->obsolete = 2; | 198 | dst->obsolete = 2; |
198 | } | 199 | } |
200 | EXPORT_SYMBOL(__dst_free); | ||
199 | 201 | ||
200 | void __dst_free(struct dst_entry * dst) | 202 | void __dst_free(struct dst_entry *dst) |
201 | { | 203 | { |
202 | spin_lock_bh(&dst_garbage.lock); | 204 | spin_lock_bh(&dst_garbage.lock); |
203 | ___dst_free(dst); | 205 | ___dst_free(dst); |
@@ -262,15 +264,16 @@ again: | |||
262 | } | 264 | } |
263 | return NULL; | 265 | return NULL; |
264 | } | 266 | } |
267 | EXPORT_SYMBOL(dst_destroy); | ||
265 | 268 | ||
266 | void dst_release(struct dst_entry *dst) | 269 | void dst_release(struct dst_entry *dst) |
267 | { | 270 | { |
268 | if (dst) { | 271 | if (dst) { |
269 | int newrefcnt; | 272 | int newrefcnt; |
270 | 273 | ||
271 | smp_mb__before_atomic_dec(); | 274 | smp_mb__before_atomic_dec(); |
272 | newrefcnt = atomic_dec_return(&dst->__refcnt); | 275 | newrefcnt = atomic_dec_return(&dst->__refcnt); |
273 | WARN_ON(newrefcnt < 0); | 276 | WARN_ON(newrefcnt < 0); |
274 | } | 277 | } |
275 | } | 278 | } |
276 | EXPORT_SYMBOL(dst_release); | 279 | EXPORT_SYMBOL(dst_release); |
@@ -283,8 +286,8 @@ EXPORT_SYMBOL(dst_release); | |||
283 | * | 286 | * |
284 | * Commented and originally written by Alexey. | 287 | * Commented and originally written by Alexey. |
285 | */ | 288 | */ |
286 | static inline void dst_ifdown(struct dst_entry *dst, struct net_device *dev, | 289 | static void dst_ifdown(struct dst_entry *dst, struct net_device *dev, |
287 | int unregister) | 290 | int unregister) |
288 | { | 291 | { |
289 | if (dst->ops->ifdown) | 292 | if (dst->ops->ifdown) |
290 | dst->ops->ifdown(dst, dev, unregister); | 293 | dst->ops->ifdown(dst, dev, unregister); |
@@ -306,7 +309,8 @@ static inline void dst_ifdown(struct dst_entry *dst, struct net_device *dev, | |||
306 | } | 309 | } |
307 | } | 310 | } |
308 | 311 | ||
309 | static int dst_dev_event(struct notifier_block *this, unsigned long event, void *ptr) | 312 | static int dst_dev_event(struct notifier_block *this, unsigned long event, |
313 | void *ptr) | ||
310 | { | 314 | { |
311 | struct net_device *dev = ptr; | 315 | struct net_device *dev = ptr; |
312 | struct dst_entry *dst, *last = NULL; | 316 | struct dst_entry *dst, *last = NULL; |
@@ -329,9 +333,8 @@ static int dst_dev_event(struct notifier_block *this, unsigned long event, void | |||
329 | last->next = dst; | 333 | last->next = dst; |
330 | else | 334 | else |
331 | dst_busy_list = dst; | 335 | dst_busy_list = dst; |
332 | for (; dst; dst = dst->next) { | 336 | for (; dst; dst = dst->next) |
333 | dst_ifdown(dst, dev, event != NETDEV_DOWN); | 337 | dst_ifdown(dst, dev, event != NETDEV_DOWN); |
334 | } | ||
335 | mutex_unlock(&dst_gc_mutex); | 338 | mutex_unlock(&dst_gc_mutex); |
336 | break; | 339 | break; |
337 | } | 340 | } |
@@ -346,7 +349,3 @@ void __init dst_init(void) | |||
346 | { | 349 | { |
347 | register_netdevice_notifier(&dst_dev_notifier); | 350 | register_netdevice_notifier(&dst_dev_notifier); |
348 | } | 351 | } |
349 | |||
350 | EXPORT_SYMBOL(__dst_free); | ||
351 | EXPORT_SYMBOL(dst_alloc); | ||
352 | EXPORT_SYMBOL(dst_destroy); | ||
diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 9d55c57f318a..1a7db92037fa 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c | |||
@@ -18,8 +18,8 @@ | |||
18 | #include <linux/ethtool.h> | 18 | #include <linux/ethtool.h> |
19 | #include <linux/netdevice.h> | 19 | #include <linux/netdevice.h> |
20 | #include <linux/bitops.h> | 20 | #include <linux/bitops.h> |
21 | #include <linux/uaccess.h> | ||
21 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
22 | #include <asm/uaccess.h> | ||
23 | 23 | ||
24 | /* | 24 | /* |
25 | * Some useful ethtool_ops methods that're device independent. | 25 | * Some useful ethtool_ops methods that're device independent. |
@@ -31,6 +31,7 @@ u32 ethtool_op_get_link(struct net_device *dev) | |||
31 | { | 31 | { |
32 | return netif_carrier_ok(dev) ? 1 : 0; | 32 | return netif_carrier_ok(dev) ? 1 : 0; |
33 | } | 33 | } |
34 | EXPORT_SYMBOL(ethtool_op_get_link); | ||
34 | 35 | ||
35 | u32 ethtool_op_get_rx_csum(struct net_device *dev) | 36 | u32 ethtool_op_get_rx_csum(struct net_device *dev) |
36 | { | 37 | { |
@@ -63,6 +64,7 @@ int ethtool_op_set_tx_hw_csum(struct net_device *dev, u32 data) | |||
63 | 64 | ||
64 | return 0; | 65 | return 0; |
65 | } | 66 | } |
67 | EXPORT_SYMBOL(ethtool_op_set_tx_hw_csum); | ||
66 | 68 | ||
67 | int ethtool_op_set_tx_ipv6_csum(struct net_device *dev, u32 data) | 69 | int ethtool_op_set_tx_ipv6_csum(struct net_device *dev, u32 data) |
68 | { | 70 | { |
@@ -73,11 +75,13 @@ int ethtool_op_set_tx_ipv6_csum(struct net_device *dev, u32 data) | |||
73 | 75 | ||
74 | return 0; | 76 | return 0; |
75 | } | 77 | } |
78 | EXPORT_SYMBOL(ethtool_op_set_tx_ipv6_csum); | ||
76 | 79 | ||
77 | u32 ethtool_op_get_sg(struct net_device *dev) | 80 | u32 ethtool_op_get_sg(struct net_device *dev) |
78 | { | 81 | { |
79 | return (dev->features & NETIF_F_SG) != 0; | 82 | return (dev->features & NETIF_F_SG) != 0; |
80 | } | 83 | } |
84 | EXPORT_SYMBOL(ethtool_op_get_sg); | ||
81 | 85 | ||
82 | int ethtool_op_set_sg(struct net_device *dev, u32 data) | 86 | int ethtool_op_set_sg(struct net_device *dev, u32 data) |
83 | { | 87 | { |
@@ -88,11 +92,13 @@ int ethtool_op_set_sg(struct net_device *dev, u32 data) | |||
88 | 92 | ||
89 | return 0; | 93 | return 0; |
90 | } | 94 | } |
95 | EXPORT_SYMBOL(ethtool_op_set_sg); | ||
91 | 96 | ||
92 | u32 ethtool_op_get_tso(struct net_device *dev) | 97 | u32 ethtool_op_get_tso(struct net_device *dev) |
93 | { | 98 | { |
94 | return (dev->features & NETIF_F_TSO) != 0; | 99 | return (dev->features & NETIF_F_TSO) != 0; |
95 | } | 100 | } |
101 | EXPORT_SYMBOL(ethtool_op_get_tso); | ||
96 | 102 | ||
97 | int ethtool_op_set_tso(struct net_device *dev, u32 data) | 103 | int ethtool_op_set_tso(struct net_device *dev, u32 data) |
98 | { | 104 | { |
@@ -103,11 +109,13 @@ int ethtool_op_set_tso(struct net_device *dev, u32 data) | |||
103 | 109 | ||
104 | return 0; | 110 | return 0; |
105 | } | 111 | } |
112 | EXPORT_SYMBOL(ethtool_op_set_tso); | ||
106 | 113 | ||
107 | u32 ethtool_op_get_ufo(struct net_device *dev) | 114 | u32 ethtool_op_get_ufo(struct net_device *dev) |
108 | { | 115 | { |
109 | return (dev->features & NETIF_F_UFO) != 0; | 116 | return (dev->features & NETIF_F_UFO) != 0; |
110 | } | 117 | } |
118 | EXPORT_SYMBOL(ethtool_op_get_ufo); | ||
111 | 119 | ||
112 | int ethtool_op_set_ufo(struct net_device *dev, u32 data) | 120 | int ethtool_op_set_ufo(struct net_device *dev, u32 data) |
113 | { | 121 | { |
@@ -117,12 +125,13 @@ int ethtool_op_set_ufo(struct net_device *dev, u32 data) | |||
117 | dev->features &= ~NETIF_F_UFO; | 125 | dev->features &= ~NETIF_F_UFO; |
118 | return 0; | 126 | return 0; |
119 | } | 127 | } |
128 | EXPORT_SYMBOL(ethtool_op_set_ufo); | ||
120 | 129 | ||
121 | /* the following list of flags are the same as their associated | 130 | /* the following list of flags are the same as their associated |
122 | * NETIF_F_xxx values in include/linux/netdevice.h | 131 | * NETIF_F_xxx values in include/linux/netdevice.h |
123 | */ | 132 | */ |
124 | static const u32 flags_dup_features = | 133 | static const u32 flags_dup_features = |
125 | (ETH_FLAG_LRO | ETH_FLAG_NTUPLE); | 134 | (ETH_FLAG_LRO | ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH); |
126 | 135 | ||
127 | u32 ethtool_op_get_flags(struct net_device *dev) | 136 | u32 ethtool_op_get_flags(struct net_device *dev) |
128 | { | 137 | { |
@@ -133,6 +142,7 @@ u32 ethtool_op_get_flags(struct net_device *dev) | |||
133 | 142 | ||
134 | return dev->features & flags_dup_features; | 143 | return dev->features & flags_dup_features; |
135 | } | 144 | } |
145 | EXPORT_SYMBOL(ethtool_op_get_flags); | ||
136 | 146 | ||
137 | int ethtool_op_set_flags(struct net_device *dev, u32 data) | 147 | int ethtool_op_set_flags(struct net_device *dev, u32 data) |
138 | { | 148 | { |
@@ -153,9 +163,15 @@ int ethtool_op_set_flags(struct net_device *dev, u32 data) | |||
153 | features &= ~NETIF_F_NTUPLE; | 163 | features &= ~NETIF_F_NTUPLE; |
154 | } | 164 | } |
155 | 165 | ||
166 | if (data & ETH_FLAG_RXHASH) | ||
167 | features |= NETIF_F_RXHASH; | ||
168 | else | ||
169 | features &= ~NETIF_F_RXHASH; | ||
170 | |||
156 | dev->features = features; | 171 | dev->features = features; |
157 | return 0; | 172 | return 0; |
158 | } | 173 | } |
174 | EXPORT_SYMBOL(ethtool_op_set_flags); | ||
159 | 175 | ||
160 | void ethtool_ntuple_flush(struct net_device *dev) | 176 | void ethtool_ntuple_flush(struct net_device *dev) |
161 | { | 177 | { |
@@ -201,7 +217,8 @@ static int ethtool_set_settings(struct net_device *dev, void __user *useraddr) | |||
201 | return dev->ethtool_ops->set_settings(dev, &cmd); | 217 | return dev->ethtool_ops->set_settings(dev, &cmd); |
202 | } | 218 | } |
203 | 219 | ||
204 | static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr) | 220 | static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev, |
221 | void __user *useraddr) | ||
205 | { | 222 | { |
206 | struct ethtool_drvinfo info; | 223 | struct ethtool_drvinfo info; |
207 | const struct ethtool_ops *ops = dev->ethtool_ops; | 224 | const struct ethtool_ops *ops = dev->ethtool_ops; |
@@ -241,7 +258,7 @@ static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev, void _ | |||
241 | } | 258 | } |
242 | 259 | ||
243 | static noinline_for_stack int ethtool_get_sset_info(struct net_device *dev, | 260 | static noinline_for_stack int ethtool_get_sset_info(struct net_device *dev, |
244 | void __user *useraddr) | 261 | void __user *useraddr) |
245 | { | 262 | { |
246 | struct ethtool_sset_info info; | 263 | struct ethtool_sset_info info; |
247 | const struct ethtool_ops *ops = dev->ethtool_ops; | 264 | const struct ethtool_ops *ops = dev->ethtool_ops; |
@@ -300,7 +317,8 @@ out: | |||
300 | return ret; | 317 | return ret; |
301 | } | 318 | } |
302 | 319 | ||
303 | static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, void __user *useraddr) | 320 | static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, |
321 | void __user *useraddr) | ||
304 | { | 322 | { |
305 | struct ethtool_rxnfc cmd; | 323 | struct ethtool_rxnfc cmd; |
306 | 324 | ||
@@ -313,7 +331,8 @@ static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, void __u | |||
313 | return dev->ethtool_ops->set_rxnfc(dev, &cmd); | 331 | return dev->ethtool_ops->set_rxnfc(dev, &cmd); |
314 | } | 332 | } |
315 | 333 | ||
316 | static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev, void __user *useraddr) | 334 | static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev, |
335 | void __user *useraddr) | ||
317 | { | 336 | { |
318 | struct ethtool_rxnfc info; | 337 | struct ethtool_rxnfc info; |
319 | const struct ethtool_ops *ops = dev->ethtool_ops; | 338 | const struct ethtool_ops *ops = dev->ethtool_ops; |
@@ -358,8 +377,8 @@ err_out: | |||
358 | } | 377 | } |
359 | 378 | ||
360 | static void __rx_ntuple_filter_add(struct ethtool_rx_ntuple_list *list, | 379 | static void __rx_ntuple_filter_add(struct ethtool_rx_ntuple_list *list, |
361 | struct ethtool_rx_ntuple_flow_spec *spec, | 380 | struct ethtool_rx_ntuple_flow_spec *spec, |
362 | struct ethtool_rx_ntuple_flow_spec_container *fsc) | 381 | struct ethtool_rx_ntuple_flow_spec_container *fsc) |
363 | { | 382 | { |
364 | 383 | ||
365 | /* don't add filters forever */ | 384 | /* don't add filters forever */ |
@@ -385,7 +404,8 @@ static void __rx_ntuple_filter_add(struct ethtool_rx_ntuple_list *list, | |||
385 | list->count++; | 404 | list->count++; |
386 | } | 405 | } |
387 | 406 | ||
388 | static noinline_for_stack int ethtool_set_rx_ntuple(struct net_device *dev, void __user *useraddr) | 407 | static noinline_for_stack int ethtool_set_rx_ntuple(struct net_device *dev, |
408 | void __user *useraddr) | ||
389 | { | 409 | { |
390 | struct ethtool_rx_ntuple cmd; | 410 | struct ethtool_rx_ntuple cmd; |
391 | const struct ethtool_ops *ops = dev->ethtool_ops; | 411 | const struct ethtool_ops *ops = dev->ethtool_ops; |
@@ -510,125 +530,125 @@ static int ethtool_get_rx_ntuple(struct net_device *dev, void __user *useraddr) | |||
510 | case UDP_V4_FLOW: | 530 | case UDP_V4_FLOW: |
511 | case SCTP_V4_FLOW: | 531 | case SCTP_V4_FLOW: |
512 | sprintf(p, "\tSrc IP addr: 0x%x\n", | 532 | sprintf(p, "\tSrc IP addr: 0x%x\n", |
513 | fsc->fs.h_u.tcp_ip4_spec.ip4src); | 533 | fsc->fs.h_u.tcp_ip4_spec.ip4src); |
514 | p += ETH_GSTRING_LEN; | 534 | p += ETH_GSTRING_LEN; |
515 | num_strings++; | 535 | num_strings++; |
516 | sprintf(p, "\tSrc IP mask: 0x%x\n", | 536 | sprintf(p, "\tSrc IP mask: 0x%x\n", |
517 | fsc->fs.m_u.tcp_ip4_spec.ip4src); | 537 | fsc->fs.m_u.tcp_ip4_spec.ip4src); |
518 | p += ETH_GSTRING_LEN; | 538 | p += ETH_GSTRING_LEN; |
519 | num_strings++; | 539 | num_strings++; |
520 | sprintf(p, "\tDest IP addr: 0x%x\n", | 540 | sprintf(p, "\tDest IP addr: 0x%x\n", |
521 | fsc->fs.h_u.tcp_ip4_spec.ip4dst); | 541 | fsc->fs.h_u.tcp_ip4_spec.ip4dst); |
522 | p += ETH_GSTRING_LEN; | 542 | p += ETH_GSTRING_LEN; |
523 | num_strings++; | 543 | num_strings++; |
524 | sprintf(p, "\tDest IP mask: 0x%x\n", | 544 | sprintf(p, "\tDest IP mask: 0x%x\n", |
525 | fsc->fs.m_u.tcp_ip4_spec.ip4dst); | 545 | fsc->fs.m_u.tcp_ip4_spec.ip4dst); |
526 | p += ETH_GSTRING_LEN; | 546 | p += ETH_GSTRING_LEN; |
527 | num_strings++; | 547 | num_strings++; |
528 | sprintf(p, "\tSrc Port: %d, mask: 0x%x\n", | 548 | sprintf(p, "\tSrc Port: %d, mask: 0x%x\n", |
529 | fsc->fs.h_u.tcp_ip4_spec.psrc, | 549 | fsc->fs.h_u.tcp_ip4_spec.psrc, |
530 | fsc->fs.m_u.tcp_ip4_spec.psrc); | 550 | fsc->fs.m_u.tcp_ip4_spec.psrc); |
531 | p += ETH_GSTRING_LEN; | 551 | p += ETH_GSTRING_LEN; |
532 | num_strings++; | 552 | num_strings++; |
533 | sprintf(p, "\tDest Port: %d, mask: 0x%x\n", | 553 | sprintf(p, "\tDest Port: %d, mask: 0x%x\n", |
534 | fsc->fs.h_u.tcp_ip4_spec.pdst, | 554 | fsc->fs.h_u.tcp_ip4_spec.pdst, |
535 | fsc->fs.m_u.tcp_ip4_spec.pdst); | 555 | fsc->fs.m_u.tcp_ip4_spec.pdst); |
536 | p += ETH_GSTRING_LEN; | 556 | p += ETH_GSTRING_LEN; |
537 | num_strings++; | 557 | num_strings++; |
538 | sprintf(p, "\tTOS: %d, mask: 0x%x\n", | 558 | sprintf(p, "\tTOS: %d, mask: 0x%x\n", |
539 | fsc->fs.h_u.tcp_ip4_spec.tos, | 559 | fsc->fs.h_u.tcp_ip4_spec.tos, |
540 | fsc->fs.m_u.tcp_ip4_spec.tos); | 560 | fsc->fs.m_u.tcp_ip4_spec.tos); |
541 | p += ETH_GSTRING_LEN; | 561 | p += ETH_GSTRING_LEN; |
542 | num_strings++; | 562 | num_strings++; |
543 | break; | 563 | break; |
544 | case AH_ESP_V4_FLOW: | 564 | case AH_ESP_V4_FLOW: |
545 | case ESP_V4_FLOW: | 565 | case ESP_V4_FLOW: |
546 | sprintf(p, "\tSrc IP addr: 0x%x\n", | 566 | sprintf(p, "\tSrc IP addr: 0x%x\n", |
547 | fsc->fs.h_u.ah_ip4_spec.ip4src); | 567 | fsc->fs.h_u.ah_ip4_spec.ip4src); |
548 | p += ETH_GSTRING_LEN; | 568 | p += ETH_GSTRING_LEN; |
549 | num_strings++; | 569 | num_strings++; |
550 | sprintf(p, "\tSrc IP mask: 0x%x\n", | 570 | sprintf(p, "\tSrc IP mask: 0x%x\n", |
551 | fsc->fs.m_u.ah_ip4_spec.ip4src); | 571 | fsc->fs.m_u.ah_ip4_spec.ip4src); |
552 | p += ETH_GSTRING_LEN; | 572 | p += ETH_GSTRING_LEN; |
553 | num_strings++; | 573 | num_strings++; |
554 | sprintf(p, "\tDest IP addr: 0x%x\n", | 574 | sprintf(p, "\tDest IP addr: 0x%x\n", |
555 | fsc->fs.h_u.ah_ip4_spec.ip4dst); | 575 | fsc->fs.h_u.ah_ip4_spec.ip4dst); |
556 | p += ETH_GSTRING_LEN; | 576 | p += ETH_GSTRING_LEN; |
557 | num_strings++; | 577 | num_strings++; |
558 | sprintf(p, "\tDest IP mask: 0x%x\n", | 578 | sprintf(p, "\tDest IP mask: 0x%x\n", |
559 | fsc->fs.m_u.ah_ip4_spec.ip4dst); | 579 | fsc->fs.m_u.ah_ip4_spec.ip4dst); |
560 | p += ETH_GSTRING_LEN; | 580 | p += ETH_GSTRING_LEN; |
561 | num_strings++; | 581 | num_strings++; |
562 | sprintf(p, "\tSPI: %d, mask: 0x%x\n", | 582 | sprintf(p, "\tSPI: %d, mask: 0x%x\n", |
563 | fsc->fs.h_u.ah_ip4_spec.spi, | 583 | fsc->fs.h_u.ah_ip4_spec.spi, |
564 | fsc->fs.m_u.ah_ip4_spec.spi); | 584 | fsc->fs.m_u.ah_ip4_spec.spi); |
565 | p += ETH_GSTRING_LEN; | 585 | p += ETH_GSTRING_LEN; |
566 | num_strings++; | 586 | num_strings++; |
567 | sprintf(p, "\tTOS: %d, mask: 0x%x\n", | 587 | sprintf(p, "\tTOS: %d, mask: 0x%x\n", |
568 | fsc->fs.h_u.ah_ip4_spec.tos, | 588 | fsc->fs.h_u.ah_ip4_spec.tos, |
569 | fsc->fs.m_u.ah_ip4_spec.tos); | 589 | fsc->fs.m_u.ah_ip4_spec.tos); |
570 | p += ETH_GSTRING_LEN; | 590 | p += ETH_GSTRING_LEN; |
571 | num_strings++; | 591 | num_strings++; |
572 | break; | 592 | break; |
573 | case IP_USER_FLOW: | 593 | case IP_USER_FLOW: |
574 | sprintf(p, "\tSrc IP addr: 0x%x\n", | 594 | sprintf(p, "\tSrc IP addr: 0x%x\n", |
575 | fsc->fs.h_u.raw_ip4_spec.ip4src); | 595 | fsc->fs.h_u.raw_ip4_spec.ip4src); |
576 | p += ETH_GSTRING_LEN; | 596 | p += ETH_GSTRING_LEN; |
577 | num_strings++; | 597 | num_strings++; |
578 | sprintf(p, "\tSrc IP mask: 0x%x\n", | 598 | sprintf(p, "\tSrc IP mask: 0x%x\n", |
579 | fsc->fs.m_u.raw_ip4_spec.ip4src); | 599 | fsc->fs.m_u.raw_ip4_spec.ip4src); |
580 | p += ETH_GSTRING_LEN; | 600 | p += ETH_GSTRING_LEN; |
581 | num_strings++; | 601 | num_strings++; |
582 | sprintf(p, "\tDest IP addr: 0x%x\n", | 602 | sprintf(p, "\tDest IP addr: 0x%x\n", |
583 | fsc->fs.h_u.raw_ip4_spec.ip4dst); | 603 | fsc->fs.h_u.raw_ip4_spec.ip4dst); |
584 | p += ETH_GSTRING_LEN; | 604 | p += ETH_GSTRING_LEN; |
585 | num_strings++; | 605 | num_strings++; |
586 | sprintf(p, "\tDest IP mask: 0x%x\n", | 606 | sprintf(p, "\tDest IP mask: 0x%x\n", |
587 | fsc->fs.m_u.raw_ip4_spec.ip4dst); | 607 | fsc->fs.m_u.raw_ip4_spec.ip4dst); |
588 | p += ETH_GSTRING_LEN; | 608 | p += ETH_GSTRING_LEN; |
589 | num_strings++; | 609 | num_strings++; |
590 | break; | 610 | break; |
591 | case IPV4_FLOW: | 611 | case IPV4_FLOW: |
592 | sprintf(p, "\tSrc IP addr: 0x%x\n", | 612 | sprintf(p, "\tSrc IP addr: 0x%x\n", |
593 | fsc->fs.h_u.usr_ip4_spec.ip4src); | 613 | fsc->fs.h_u.usr_ip4_spec.ip4src); |
594 | p += ETH_GSTRING_LEN; | 614 | p += ETH_GSTRING_LEN; |
595 | num_strings++; | 615 | num_strings++; |
596 | sprintf(p, "\tSrc IP mask: 0x%x\n", | 616 | sprintf(p, "\tSrc IP mask: 0x%x\n", |
597 | fsc->fs.m_u.usr_ip4_spec.ip4src); | 617 | fsc->fs.m_u.usr_ip4_spec.ip4src); |
598 | p += ETH_GSTRING_LEN; | 618 | p += ETH_GSTRING_LEN; |
599 | num_strings++; | 619 | num_strings++; |
600 | sprintf(p, "\tDest IP addr: 0x%x\n", | 620 | sprintf(p, "\tDest IP addr: 0x%x\n", |
601 | fsc->fs.h_u.usr_ip4_spec.ip4dst); | 621 | fsc->fs.h_u.usr_ip4_spec.ip4dst); |
602 | p += ETH_GSTRING_LEN; | 622 | p += ETH_GSTRING_LEN; |
603 | num_strings++; | 623 | num_strings++; |
604 | sprintf(p, "\tDest IP mask: 0x%x\n", | 624 | sprintf(p, "\tDest IP mask: 0x%x\n", |
605 | fsc->fs.m_u.usr_ip4_spec.ip4dst); | 625 | fsc->fs.m_u.usr_ip4_spec.ip4dst); |
606 | p += ETH_GSTRING_LEN; | 626 | p += ETH_GSTRING_LEN; |
607 | num_strings++; | 627 | num_strings++; |
608 | sprintf(p, "\tL4 bytes: 0x%x, mask: 0x%x\n", | 628 | sprintf(p, "\tL4 bytes: 0x%x, mask: 0x%x\n", |
609 | fsc->fs.h_u.usr_ip4_spec.l4_4_bytes, | 629 | fsc->fs.h_u.usr_ip4_spec.l4_4_bytes, |
610 | fsc->fs.m_u.usr_ip4_spec.l4_4_bytes); | 630 | fsc->fs.m_u.usr_ip4_spec.l4_4_bytes); |
611 | p += ETH_GSTRING_LEN; | 631 | p += ETH_GSTRING_LEN; |
612 | num_strings++; | 632 | num_strings++; |
613 | sprintf(p, "\tTOS: %d, mask: 0x%x\n", | 633 | sprintf(p, "\tTOS: %d, mask: 0x%x\n", |
614 | fsc->fs.h_u.usr_ip4_spec.tos, | 634 | fsc->fs.h_u.usr_ip4_spec.tos, |
615 | fsc->fs.m_u.usr_ip4_spec.tos); | 635 | fsc->fs.m_u.usr_ip4_spec.tos); |
616 | p += ETH_GSTRING_LEN; | 636 | p += ETH_GSTRING_LEN; |
617 | num_strings++; | 637 | num_strings++; |
618 | sprintf(p, "\tIP Version: %d, mask: 0x%x\n", | 638 | sprintf(p, "\tIP Version: %d, mask: 0x%x\n", |
619 | fsc->fs.h_u.usr_ip4_spec.ip_ver, | 639 | fsc->fs.h_u.usr_ip4_spec.ip_ver, |
620 | fsc->fs.m_u.usr_ip4_spec.ip_ver); | 640 | fsc->fs.m_u.usr_ip4_spec.ip_ver); |
621 | p += ETH_GSTRING_LEN; | 641 | p += ETH_GSTRING_LEN; |
622 | num_strings++; | 642 | num_strings++; |
623 | sprintf(p, "\tProtocol: %d, mask: 0x%x\n", | 643 | sprintf(p, "\tProtocol: %d, mask: 0x%x\n", |
624 | fsc->fs.h_u.usr_ip4_spec.proto, | 644 | fsc->fs.h_u.usr_ip4_spec.proto, |
625 | fsc->fs.m_u.usr_ip4_spec.proto); | 645 | fsc->fs.m_u.usr_ip4_spec.proto); |
626 | p += ETH_GSTRING_LEN; | 646 | p += ETH_GSTRING_LEN; |
627 | num_strings++; | 647 | num_strings++; |
628 | break; | 648 | break; |
629 | }; | 649 | }; |
630 | sprintf(p, "\tVLAN: %d, mask: 0x%x\n", | 650 | sprintf(p, "\tVLAN: %d, mask: 0x%x\n", |
631 | fsc->fs.vlan_tag, fsc->fs.vlan_tag_mask); | 651 | fsc->fs.vlan_tag, fsc->fs.vlan_tag_mask); |
632 | p += ETH_GSTRING_LEN; | 652 | p += ETH_GSTRING_LEN; |
633 | num_strings++; | 653 | num_strings++; |
634 | sprintf(p, "\tUser-defined: 0x%Lx\n", fsc->fs.data); | 654 | sprintf(p, "\tUser-defined: 0x%Lx\n", fsc->fs.data); |
@@ -641,7 +661,7 @@ static int ethtool_get_rx_ntuple(struct net_device *dev, void __user *useraddr) | |||
641 | sprintf(p, "\tAction: Drop\n"); | 661 | sprintf(p, "\tAction: Drop\n"); |
642 | else | 662 | else |
643 | sprintf(p, "\tAction: Direct to queue %d\n", | 663 | sprintf(p, "\tAction: Direct to queue %d\n", |
644 | fsc->fs.action); | 664 | fsc->fs.action); |
645 | p += ETH_GSTRING_LEN; | 665 | p += ETH_GSTRING_LEN; |
646 | num_strings++; | 666 | num_strings++; |
647 | unknown_filter: | 667 | unknown_filter: |
@@ -853,7 +873,8 @@ static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr) | |||
853 | return ret; | 873 | return ret; |
854 | } | 874 | } |
855 | 875 | ||
856 | static noinline_for_stack int ethtool_get_coalesce(struct net_device *dev, void __user *useraddr) | 876 | static noinline_for_stack int ethtool_get_coalesce(struct net_device *dev, |
877 | void __user *useraddr) | ||
857 | { | 878 | { |
858 | struct ethtool_coalesce coalesce = { .cmd = ETHTOOL_GCOALESCE }; | 879 | struct ethtool_coalesce coalesce = { .cmd = ETHTOOL_GCOALESCE }; |
859 | 880 | ||
@@ -867,7 +888,8 @@ static noinline_for_stack int ethtool_get_coalesce(struct net_device *dev, void | |||
867 | return 0; | 888 | return 0; |
868 | } | 889 | } |
869 | 890 | ||
870 | static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr) | 891 | static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev, |
892 | void __user *useraddr) | ||
871 | { | 893 | { |
872 | struct ethtool_coalesce coalesce; | 894 | struct ethtool_coalesce coalesce; |
873 | 895 | ||
@@ -971,6 +993,7 @@ static int ethtool_set_tx_csum(struct net_device *dev, char __user *useraddr) | |||
971 | 993 | ||
972 | return dev->ethtool_ops->set_tx_csum(dev, edata.data); | 994 | return dev->ethtool_ops->set_tx_csum(dev, edata.data); |
973 | } | 995 | } |
996 | EXPORT_SYMBOL(ethtool_op_set_tx_csum); | ||
974 | 997 | ||
975 | static int ethtool_set_rx_csum(struct net_device *dev, char __user *useraddr) | 998 | static int ethtool_set_rx_csum(struct net_device *dev, char __user *useraddr) |
976 | { | 999 | { |
@@ -1042,7 +1065,7 @@ static int ethtool_get_gso(struct net_device *dev, char __user *useraddr) | |||
1042 | 1065 | ||
1043 | edata.data = dev->features & NETIF_F_GSO; | 1066 | edata.data = dev->features & NETIF_F_GSO; |
1044 | if (copy_to_user(useraddr, &edata, sizeof(edata))) | 1067 | if (copy_to_user(useraddr, &edata, sizeof(edata))) |
1045 | return -EFAULT; | 1068 | return -EFAULT; |
1046 | return 0; | 1069 | return 0; |
1047 | } | 1070 | } |
1048 | 1071 | ||
@@ -1065,7 +1088,7 @@ static int ethtool_get_gro(struct net_device *dev, char __user *useraddr) | |||
1065 | 1088 | ||
1066 | edata.data = dev->features & NETIF_F_GRO; | 1089 | edata.data = dev->features & NETIF_F_GRO; |
1067 | if (copy_to_user(useraddr, &edata, sizeof(edata))) | 1090 | if (copy_to_user(useraddr, &edata, sizeof(edata))) |
1068 | return -EFAULT; | 1091 | return -EFAULT; |
1069 | return 0; | 1092 | return 0; |
1070 | } | 1093 | } |
1071 | 1094 | ||
@@ -1277,7 +1300,8 @@ static int ethtool_set_value(struct net_device *dev, char __user *useraddr, | |||
1277 | return actor(dev, edata.data); | 1300 | return actor(dev, edata.data); |
1278 | } | 1301 | } |
1279 | 1302 | ||
1280 | static noinline_for_stack int ethtool_flash_device(struct net_device *dev, char __user *useraddr) | 1303 | static noinline_for_stack int ethtool_flash_device(struct net_device *dev, |
1304 | char __user *useraddr) | ||
1281 | { | 1305 | { |
1282 | struct ethtool_flash efl; | 1306 | struct ethtool_flash efl; |
1283 | 1307 | ||
@@ -1306,11 +1330,11 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) | |||
1306 | if (!dev->ethtool_ops) | 1330 | if (!dev->ethtool_ops) |
1307 | return -EOPNOTSUPP; | 1331 | return -EOPNOTSUPP; |
1308 | 1332 | ||
1309 | if (copy_from_user(ðcmd, useraddr, sizeof (ethcmd))) | 1333 | if (copy_from_user(ðcmd, useraddr, sizeof(ethcmd))) |
1310 | return -EFAULT; | 1334 | return -EFAULT; |
1311 | 1335 | ||
1312 | /* Allow some commands to be done by anyone */ | 1336 | /* Allow some commands to be done by anyone */ |
1313 | switch(ethcmd) { | 1337 | switch (ethcmd) { |
1314 | case ETHTOOL_GDRVINFO: | 1338 | case ETHTOOL_GDRVINFO: |
1315 | case ETHTOOL_GMSGLVL: | 1339 | case ETHTOOL_GMSGLVL: |
1316 | case ETHTOOL_GCOALESCE: | 1340 | case ETHTOOL_GCOALESCE: |
@@ -1338,10 +1362,11 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) | |||
1338 | return -EPERM; | 1362 | return -EPERM; |
1339 | } | 1363 | } |
1340 | 1364 | ||
1341 | if (dev->ethtool_ops->begin) | 1365 | if (dev->ethtool_ops->begin) { |
1342 | if ((rc = dev->ethtool_ops->begin(dev)) < 0) | 1366 | rc = dev->ethtool_ops->begin(dev); |
1367 | if (rc < 0) | ||
1343 | return rc; | 1368 | return rc; |
1344 | 1369 | } | |
1345 | old_features = dev->features; | 1370 | old_features = dev->features; |
1346 | 1371 | ||
1347 | switch (ethcmd) { | 1372 | switch (ethcmd) { |
@@ -1531,16 +1556,3 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) | |||
1531 | 1556 | ||
1532 | return rc; | 1557 | return rc; |
1533 | } | 1558 | } |
1534 | |||
1535 | EXPORT_SYMBOL(ethtool_op_get_link); | ||
1536 | EXPORT_SYMBOL(ethtool_op_get_sg); | ||
1537 | EXPORT_SYMBOL(ethtool_op_get_tso); | ||
1538 | EXPORT_SYMBOL(ethtool_op_set_sg); | ||
1539 | EXPORT_SYMBOL(ethtool_op_set_tso); | ||
1540 | EXPORT_SYMBOL(ethtool_op_set_tx_csum); | ||
1541 | EXPORT_SYMBOL(ethtool_op_set_tx_hw_csum); | ||
1542 | EXPORT_SYMBOL(ethtool_op_set_tx_ipv6_csum); | ||
1543 | EXPORT_SYMBOL(ethtool_op_set_ufo); | ||
1544 | EXPORT_SYMBOL(ethtool_op_get_ufo); | ||
1545 | EXPORT_SYMBOL(ethtool_op_set_flags); | ||
1546 | EXPORT_SYMBOL(ethtool_op_get_flags); | ||
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index d2c3e7dc2e5f..1bc66592453c 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c | |||
@@ -39,6 +39,24 @@ int fib_default_rule_add(struct fib_rules_ops *ops, | |||
39 | } | 39 | } |
40 | EXPORT_SYMBOL(fib_default_rule_add); | 40 | EXPORT_SYMBOL(fib_default_rule_add); |
41 | 41 | ||
42 | u32 fib_default_rule_pref(struct fib_rules_ops *ops) | ||
43 | { | ||
44 | struct list_head *pos; | ||
45 | struct fib_rule *rule; | ||
46 | |||
47 | if (!list_empty(&ops->rules_list)) { | ||
48 | pos = ops->rules_list.next; | ||
49 | if (pos->next != &ops->rules_list) { | ||
50 | rule = list_entry(pos->next, struct fib_rule, list); | ||
51 | if (rule->pref) | ||
52 | return rule->pref - 1; | ||
53 | } | ||
54 | } | ||
55 | |||
56 | return 0; | ||
57 | } | ||
58 | EXPORT_SYMBOL(fib_default_rule_pref); | ||
59 | |||
42 | static void notify_rule_change(int event, struct fib_rule *rule, | 60 | static void notify_rule_change(int event, struct fib_rule *rule, |
43 | struct fib_rules_ops *ops, struct nlmsghdr *nlh, | 61 | struct fib_rules_ops *ops, struct nlmsghdr *nlh, |
44 | u32 pid); | 62 | u32 pid); |
@@ -109,7 +127,7 @@ fib_rules_register(struct fib_rules_ops *tmpl, struct net *net) | |||
109 | struct fib_rules_ops *ops; | 127 | struct fib_rules_ops *ops; |
110 | int err; | 128 | int err; |
111 | 129 | ||
112 | ops = kmemdup(tmpl, sizeof (*ops), GFP_KERNEL); | 130 | ops = kmemdup(tmpl, sizeof(*ops), GFP_KERNEL); |
113 | if (ops == NULL) | 131 | if (ops == NULL) |
114 | return ERR_PTR(-ENOMEM); | 132 | return ERR_PTR(-ENOMEM); |
115 | 133 | ||
@@ -124,7 +142,6 @@ fib_rules_register(struct fib_rules_ops *tmpl, struct net *net) | |||
124 | 142 | ||
125 | return ops; | 143 | return ops; |
126 | } | 144 | } |
127 | |||
128 | EXPORT_SYMBOL_GPL(fib_rules_register); | 145 | EXPORT_SYMBOL_GPL(fib_rules_register); |
129 | 146 | ||
130 | void fib_rules_cleanup_ops(struct fib_rules_ops *ops) | 147 | void fib_rules_cleanup_ops(struct fib_rules_ops *ops) |
@@ -158,7 +175,6 @@ void fib_rules_unregister(struct fib_rules_ops *ops) | |||
158 | 175 | ||
159 | call_rcu(&ops->rcu, fib_rules_put_rcu); | 176 | call_rcu(&ops->rcu, fib_rules_put_rcu); |
160 | } | 177 | } |
161 | |||
162 | EXPORT_SYMBOL_GPL(fib_rules_unregister); | 178 | EXPORT_SYMBOL_GPL(fib_rules_unregister); |
163 | 179 | ||
164 | static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops, | 180 | static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops, |
@@ -221,7 +237,6 @@ out: | |||
221 | 237 | ||
222 | return err; | 238 | return err; |
223 | } | 239 | } |
224 | |||
225 | EXPORT_SYMBOL_GPL(fib_rules_lookup); | 240 | EXPORT_SYMBOL_GPL(fib_rules_lookup); |
226 | 241 | ||
227 | static int validate_rulemsg(struct fib_rule_hdr *frh, struct nlattr **tb, | 242 | static int validate_rulemsg(struct fib_rule_hdr *frh, struct nlattr **tb, |
@@ -520,6 +535,7 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule, | |||
520 | return -EMSGSIZE; | 535 | return -EMSGSIZE; |
521 | 536 | ||
522 | frh = nlmsg_data(nlh); | 537 | frh = nlmsg_data(nlh); |
538 | frh->family = ops->family; | ||
523 | frh->table = rule->table; | 539 | frh->table = rule->table; |
524 | NLA_PUT_U32(skb, FRA_TABLE, rule->table); | 540 | NLA_PUT_U32(skb, FRA_TABLE, rule->table); |
525 | frh->res1 = 0; | 541 | frh->res1 = 0; |
@@ -614,7 +630,7 @@ static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb) | |||
614 | break; | 630 | break; |
615 | 631 | ||
616 | cb->args[1] = 0; | 632 | cb->args[1] = 0; |
617 | skip: | 633 | skip: |
618 | idx++; | 634 | idx++; |
619 | } | 635 | } |
620 | rcu_read_unlock(); | 636 | rcu_read_unlock(); |
@@ -686,7 +702,6 @@ static int fib_rules_event(struct notifier_block *this, unsigned long event, | |||
686 | struct fib_rules_ops *ops; | 702 | struct fib_rules_ops *ops; |
687 | 703 | ||
688 | ASSERT_RTNL(); | 704 | ASSERT_RTNL(); |
689 | rcu_read_lock(); | ||
690 | 705 | ||
691 | switch (event) { | 706 | switch (event) { |
692 | case NETDEV_REGISTER: | 707 | case NETDEV_REGISTER: |
@@ -700,8 +715,6 @@ static int fib_rules_event(struct notifier_block *this, unsigned long event, | |||
700 | break; | 715 | break; |
701 | } | 716 | } |
702 | 717 | ||
703 | rcu_read_unlock(); | ||
704 | |||
705 | return NOTIFY_DONE; | 718 | return NOTIFY_DONE; |
706 | } | 719 | } |
707 | 720 | ||
diff --git a/net/core/filter.c b/net/core/filter.c index ff943bed21af..da69fb728d32 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -302,6 +302,8 @@ load_b: | |||
302 | A = skb->pkt_type; | 302 | A = skb->pkt_type; |
303 | continue; | 303 | continue; |
304 | case SKF_AD_IFINDEX: | 304 | case SKF_AD_IFINDEX: |
305 | if (!skb->dev) | ||
306 | return 0; | ||
305 | A = skb->dev->ifindex; | 307 | A = skb->dev->ifindex; |
306 | continue; | 308 | continue; |
307 | case SKF_AD_MARK: | 309 | case SKF_AD_MARK: |
@@ -310,6 +312,11 @@ load_b: | |||
310 | case SKF_AD_QUEUE: | 312 | case SKF_AD_QUEUE: |
311 | A = skb->queue_mapping; | 313 | A = skb->queue_mapping; |
312 | continue; | 314 | continue; |
315 | case SKF_AD_HATYPE: | ||
316 | if (!skb->dev) | ||
317 | return 0; | ||
318 | A = skb->dev->type; | ||
319 | continue; | ||
313 | case SKF_AD_NLATTR: { | 320 | case SKF_AD_NLATTR: { |
314 | struct nlattr *nla; | 321 | struct nlattr *nla; |
315 | 322 | ||
diff --git a/net/core/flow.c b/net/core/flow.c index 96015871ecea..161900674009 100644 --- a/net/core/flow.c +++ b/net/core/flow.c | |||
@@ -26,113 +26,158 @@ | |||
26 | #include <linux/security.h> | 26 | #include <linux/security.h> |
27 | 27 | ||
28 | struct flow_cache_entry { | 28 | struct flow_cache_entry { |
29 | struct flow_cache_entry *next; | 29 | union { |
30 | u16 family; | 30 | struct hlist_node hlist; |
31 | u8 dir; | 31 | struct list_head gc_list; |
32 | u32 genid; | 32 | } u; |
33 | struct flowi key; | 33 | u16 family; |
34 | void *object; | 34 | u8 dir; |
35 | atomic_t *object_ref; | 35 | u32 genid; |
36 | struct flowi key; | ||
37 | struct flow_cache_object *object; | ||
36 | }; | 38 | }; |
37 | 39 | ||
38 | atomic_t flow_cache_genid = ATOMIC_INIT(0); | 40 | struct flow_cache_percpu { |
39 | 41 | struct hlist_head *hash_table; | |
40 | static u32 flow_hash_shift; | 42 | int hash_count; |
41 | #define flow_hash_size (1 << flow_hash_shift) | 43 | u32 hash_rnd; |
42 | static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL }; | 44 | int hash_rnd_recalc; |
43 | 45 | struct tasklet_struct flush_tasklet; | |
44 | #define flow_table(cpu) (per_cpu(flow_tables, cpu)) | 46 | }; |
45 | |||
46 | static struct kmem_cache *flow_cachep __read_mostly; | ||
47 | 47 | ||
48 | static int flow_lwm, flow_hwm; | 48 | struct flow_flush_info { |
49 | struct flow_cache *cache; | ||
50 | atomic_t cpuleft; | ||
51 | struct completion completion; | ||
52 | }; | ||
49 | 53 | ||
50 | struct flow_percpu_info { | 54 | struct flow_cache { |
51 | int hash_rnd_recalc; | 55 | u32 hash_shift; |
52 | u32 hash_rnd; | 56 | unsigned long order; |
53 | int count; | 57 | struct flow_cache_percpu *percpu; |
58 | struct notifier_block hotcpu_notifier; | ||
59 | int low_watermark; | ||
60 | int high_watermark; | ||
61 | struct timer_list rnd_timer; | ||
54 | }; | 62 | }; |
55 | static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 }; | ||
56 | 63 | ||
57 | #define flow_hash_rnd_recalc(cpu) \ | 64 | atomic_t flow_cache_genid = ATOMIC_INIT(0); |
58 | (per_cpu(flow_hash_info, cpu).hash_rnd_recalc) | 65 | static struct flow_cache flow_cache_global; |
59 | #define flow_hash_rnd(cpu) \ | 66 | static struct kmem_cache *flow_cachep; |
60 | (per_cpu(flow_hash_info, cpu).hash_rnd) | ||
61 | #define flow_count(cpu) \ | ||
62 | (per_cpu(flow_hash_info, cpu).count) | ||
63 | 67 | ||
64 | static struct timer_list flow_hash_rnd_timer; | 68 | static DEFINE_SPINLOCK(flow_cache_gc_lock); |
69 | static LIST_HEAD(flow_cache_gc_list); | ||
65 | 70 | ||
66 | #define FLOW_HASH_RND_PERIOD (10 * 60 * HZ) | 71 | #define flow_cache_hash_size(cache) (1 << (cache)->hash_shift) |
67 | 72 | #define FLOW_HASH_RND_PERIOD (10 * 60 * HZ) | |
68 | struct flow_flush_info { | ||
69 | atomic_t cpuleft; | ||
70 | struct completion completion; | ||
71 | }; | ||
72 | static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL }; | ||
73 | |||
74 | #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu)) | ||
75 | 73 | ||
76 | static void flow_cache_new_hashrnd(unsigned long arg) | 74 | static void flow_cache_new_hashrnd(unsigned long arg) |
77 | { | 75 | { |
76 | struct flow_cache *fc = (void *) arg; | ||
78 | int i; | 77 | int i; |
79 | 78 | ||
80 | for_each_possible_cpu(i) | 79 | for_each_possible_cpu(i) |
81 | flow_hash_rnd_recalc(i) = 1; | 80 | per_cpu_ptr(fc->percpu, i)->hash_rnd_recalc = 1; |
82 | 81 | ||
83 | flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; | 82 | fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; |
84 | add_timer(&flow_hash_rnd_timer); | 83 | add_timer(&fc->rnd_timer); |
84 | } | ||
85 | |||
86 | static int flow_entry_valid(struct flow_cache_entry *fle) | ||
87 | { | ||
88 | if (atomic_read(&flow_cache_genid) != fle->genid) | ||
89 | return 0; | ||
90 | if (fle->object && !fle->object->ops->check(fle->object)) | ||
91 | return 0; | ||
92 | return 1; | ||
85 | } | 93 | } |
86 | 94 | ||
87 | static void flow_entry_kill(int cpu, struct flow_cache_entry *fle) | 95 | static void flow_entry_kill(struct flow_cache_entry *fle) |
88 | { | 96 | { |
89 | if (fle->object) | 97 | if (fle->object) |
90 | atomic_dec(fle->object_ref); | 98 | fle->object->ops->delete(fle->object); |
91 | kmem_cache_free(flow_cachep, fle); | 99 | kmem_cache_free(flow_cachep, fle); |
92 | flow_count(cpu)--; | ||
93 | } | 100 | } |
94 | 101 | ||
95 | static void __flow_cache_shrink(int cpu, int shrink_to) | 102 | static void flow_cache_gc_task(struct work_struct *work) |
96 | { | 103 | { |
97 | struct flow_cache_entry *fle, **flp; | 104 | struct list_head gc_list; |
98 | int i; | 105 | struct flow_cache_entry *fce, *n; |
99 | 106 | ||
100 | for (i = 0; i < flow_hash_size; i++) { | 107 | INIT_LIST_HEAD(&gc_list); |
101 | int k = 0; | 108 | spin_lock_bh(&flow_cache_gc_lock); |
109 | list_splice_tail_init(&flow_cache_gc_list, &gc_list); | ||
110 | spin_unlock_bh(&flow_cache_gc_lock); | ||
102 | 111 | ||
103 | flp = &flow_table(cpu)[i]; | 112 | list_for_each_entry_safe(fce, n, &gc_list, u.gc_list) |
104 | while ((fle = *flp) != NULL && k < shrink_to) { | 113 | flow_entry_kill(fce); |
105 | k++; | 114 | } |
106 | flp = &fle->next; | 115 | static DECLARE_WORK(flow_cache_gc_work, flow_cache_gc_task); |
107 | } | 116 | |
108 | while ((fle = *flp) != NULL) { | 117 | static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp, |
109 | *flp = fle->next; | 118 | int deleted, struct list_head *gc_list) |
110 | flow_entry_kill(cpu, fle); | 119 | { |
111 | } | 120 | if (deleted) { |
121 | fcp->hash_count -= deleted; | ||
122 | spin_lock_bh(&flow_cache_gc_lock); | ||
123 | list_splice_tail(gc_list, &flow_cache_gc_list); | ||
124 | spin_unlock_bh(&flow_cache_gc_lock); | ||
125 | schedule_work(&flow_cache_gc_work); | ||
112 | } | 126 | } |
113 | } | 127 | } |
114 | 128 | ||
115 | static void flow_cache_shrink(int cpu) | 129 | static void __flow_cache_shrink(struct flow_cache *fc, |
130 | struct flow_cache_percpu *fcp, | ||
131 | int shrink_to) | ||
116 | { | 132 | { |
117 | int shrink_to = flow_lwm / flow_hash_size; | 133 | struct flow_cache_entry *fle; |
134 | struct hlist_node *entry, *tmp; | ||
135 | LIST_HEAD(gc_list); | ||
136 | int i, deleted = 0; | ||
137 | |||
138 | for (i = 0; i < flow_cache_hash_size(fc); i++) { | ||
139 | int saved = 0; | ||
140 | |||
141 | hlist_for_each_entry_safe(fle, entry, tmp, | ||
142 | &fcp->hash_table[i], u.hlist) { | ||
143 | if (saved < shrink_to && | ||
144 | flow_entry_valid(fle)) { | ||
145 | saved++; | ||
146 | } else { | ||
147 | deleted++; | ||
148 | hlist_del(&fle->u.hlist); | ||
149 | list_add_tail(&fle->u.gc_list, &gc_list); | ||
150 | } | ||
151 | } | ||
152 | } | ||
118 | 153 | ||
119 | __flow_cache_shrink(cpu, shrink_to); | 154 | flow_cache_queue_garbage(fcp, deleted, &gc_list); |
120 | } | 155 | } |
121 | 156 | ||
122 | static void flow_new_hash_rnd(int cpu) | 157 | static void flow_cache_shrink(struct flow_cache *fc, |
158 | struct flow_cache_percpu *fcp) | ||
123 | { | 159 | { |
124 | get_random_bytes(&flow_hash_rnd(cpu), sizeof(u32)); | 160 | int shrink_to = fc->low_watermark / flow_cache_hash_size(fc); |
125 | flow_hash_rnd_recalc(cpu) = 0; | ||
126 | 161 | ||
127 | __flow_cache_shrink(cpu, 0); | 162 | __flow_cache_shrink(fc, fcp, shrink_to); |
128 | } | 163 | } |
129 | 164 | ||
130 | static u32 flow_hash_code(struct flowi *key, int cpu) | 165 | static void flow_new_hash_rnd(struct flow_cache *fc, |
166 | struct flow_cache_percpu *fcp) | ||
167 | { | ||
168 | get_random_bytes(&fcp->hash_rnd, sizeof(u32)); | ||
169 | fcp->hash_rnd_recalc = 0; | ||
170 | __flow_cache_shrink(fc, fcp, 0); | ||
171 | } | ||
172 | |||
173 | static u32 flow_hash_code(struct flow_cache *fc, | ||
174 | struct flow_cache_percpu *fcp, | ||
175 | struct flowi *key) | ||
131 | { | 176 | { |
132 | u32 *k = (u32 *) key; | 177 | u32 *k = (u32 *) key; |
133 | 178 | ||
134 | return (jhash2(k, (sizeof(*key) / sizeof(u32)), flow_hash_rnd(cpu)) & | 179 | return (jhash2(k, (sizeof(*key) / sizeof(u32)), fcp->hash_rnd) |
135 | (flow_hash_size - 1)); | 180 | & (flow_cache_hash_size(fc) - 1)); |
136 | } | 181 | } |
137 | 182 | ||
138 | #if (BITS_PER_LONG == 64) | 183 | #if (BITS_PER_LONG == 64) |
@@ -165,114 +210,117 @@ static int flow_key_compare(struct flowi *key1, struct flowi *key2) | |||
165 | return 0; | 210 | return 0; |
166 | } | 211 | } |
167 | 212 | ||
168 | void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir, | 213 | struct flow_cache_object * |
169 | flow_resolve_t resolver) | 214 | flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir, |
215 | flow_resolve_t resolver, void *ctx) | ||
170 | { | 216 | { |
171 | struct flow_cache_entry *fle, **head; | 217 | struct flow_cache *fc = &flow_cache_global; |
218 | struct flow_cache_percpu *fcp; | ||
219 | struct flow_cache_entry *fle, *tfle; | ||
220 | struct hlist_node *entry; | ||
221 | struct flow_cache_object *flo; | ||
172 | unsigned int hash; | 222 | unsigned int hash; |
173 | int cpu; | ||
174 | 223 | ||
175 | local_bh_disable(); | 224 | local_bh_disable(); |
176 | cpu = smp_processor_id(); | 225 | fcp = per_cpu_ptr(fc->percpu, smp_processor_id()); |
177 | 226 | ||
178 | fle = NULL; | 227 | fle = NULL; |
228 | flo = NULL; | ||
179 | /* Packet really early in init? Making flow_cache_init a | 229 | /* Packet really early in init? Making flow_cache_init a |
180 | * pre-smp initcall would solve this. --RR */ | 230 | * pre-smp initcall would solve this. --RR */ |
181 | if (!flow_table(cpu)) | 231 | if (!fcp->hash_table) |
182 | goto nocache; | 232 | goto nocache; |
183 | 233 | ||
184 | if (flow_hash_rnd_recalc(cpu)) | 234 | if (fcp->hash_rnd_recalc) |
185 | flow_new_hash_rnd(cpu); | 235 | flow_new_hash_rnd(fc, fcp); |
186 | hash = flow_hash_code(key, cpu); | ||
187 | 236 | ||
188 | head = &flow_table(cpu)[hash]; | 237 | hash = flow_hash_code(fc, fcp, key); |
189 | for (fle = *head; fle; fle = fle->next) { | 238 | hlist_for_each_entry(tfle, entry, &fcp->hash_table[hash], u.hlist) { |
190 | if (fle->family == family && | 239 | if (tfle->family == family && |
191 | fle->dir == dir && | 240 | tfle->dir == dir && |
192 | flow_key_compare(key, &fle->key) == 0) { | 241 | flow_key_compare(key, &tfle->key) == 0) { |
193 | if (fle->genid == atomic_read(&flow_cache_genid)) { | 242 | fle = tfle; |
194 | void *ret = fle->object; | ||
195 | |||
196 | if (ret) | ||
197 | atomic_inc(fle->object_ref); | ||
198 | local_bh_enable(); | ||
199 | |||
200 | return ret; | ||
201 | } | ||
202 | break; | 243 | break; |
203 | } | 244 | } |
204 | } | 245 | } |
205 | 246 | ||
206 | if (!fle) { | 247 | if (unlikely(!fle)) { |
207 | if (flow_count(cpu) > flow_hwm) | 248 | if (fcp->hash_count > fc->high_watermark) |
208 | flow_cache_shrink(cpu); | 249 | flow_cache_shrink(fc, fcp); |
209 | 250 | ||
210 | fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC); | 251 | fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC); |
211 | if (fle) { | 252 | if (fle) { |
212 | fle->next = *head; | ||
213 | *head = fle; | ||
214 | fle->family = family; | 253 | fle->family = family; |
215 | fle->dir = dir; | 254 | fle->dir = dir; |
216 | memcpy(&fle->key, key, sizeof(*key)); | 255 | memcpy(&fle->key, key, sizeof(*key)); |
217 | fle->object = NULL; | 256 | fle->object = NULL; |
218 | flow_count(cpu)++; | 257 | hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]); |
258 | fcp->hash_count++; | ||
219 | } | 259 | } |
260 | } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) { | ||
261 | flo = fle->object; | ||
262 | if (!flo) | ||
263 | goto ret_object; | ||
264 | flo = flo->ops->get(flo); | ||
265 | if (flo) | ||
266 | goto ret_object; | ||
267 | } else if (fle->object) { | ||
268 | flo = fle->object; | ||
269 | flo->ops->delete(flo); | ||
270 | fle->object = NULL; | ||
220 | } | 271 | } |
221 | 272 | ||
222 | nocache: | 273 | nocache: |
223 | { | 274 | flo = NULL; |
224 | int err; | 275 | if (fle) { |
225 | void *obj; | 276 | flo = fle->object; |
226 | atomic_t *obj_ref; | 277 | fle->object = NULL; |
227 | |||
228 | err = resolver(net, key, family, dir, &obj, &obj_ref); | ||
229 | |||
230 | if (fle && !err) { | ||
231 | fle->genid = atomic_read(&flow_cache_genid); | ||
232 | |||
233 | if (fle->object) | ||
234 | atomic_dec(fle->object_ref); | ||
235 | |||
236 | fle->object = obj; | ||
237 | fle->object_ref = obj_ref; | ||
238 | if (obj) | ||
239 | atomic_inc(fle->object_ref); | ||
240 | } | ||
241 | local_bh_enable(); | ||
242 | |||
243 | if (err) | ||
244 | obj = ERR_PTR(err); | ||
245 | return obj; | ||
246 | } | 278 | } |
279 | flo = resolver(net, key, family, dir, flo, ctx); | ||
280 | if (fle) { | ||
281 | fle->genid = atomic_read(&flow_cache_genid); | ||
282 | if (!IS_ERR(flo)) | ||
283 | fle->object = flo; | ||
284 | else | ||
285 | fle->genid--; | ||
286 | } else { | ||
287 | if (flo && !IS_ERR(flo)) | ||
288 | flo->ops->delete(flo); | ||
289 | } | ||
290 | ret_object: | ||
291 | local_bh_enable(); | ||
292 | return flo; | ||
247 | } | 293 | } |
248 | 294 | ||
249 | static void flow_cache_flush_tasklet(unsigned long data) | 295 | static void flow_cache_flush_tasklet(unsigned long data) |
250 | { | 296 | { |
251 | struct flow_flush_info *info = (void *)data; | 297 | struct flow_flush_info *info = (void *)data; |
252 | int i; | 298 | struct flow_cache *fc = info->cache; |
253 | int cpu; | 299 | struct flow_cache_percpu *fcp; |
254 | 300 | struct flow_cache_entry *fle; | |
255 | cpu = smp_processor_id(); | 301 | struct hlist_node *entry, *tmp; |
256 | for (i = 0; i < flow_hash_size; i++) { | 302 | LIST_HEAD(gc_list); |
257 | struct flow_cache_entry *fle; | 303 | int i, deleted = 0; |
258 | 304 | ||
259 | fle = flow_table(cpu)[i]; | 305 | fcp = per_cpu_ptr(fc->percpu, smp_processor_id()); |
260 | for (; fle; fle = fle->next) { | 306 | for (i = 0; i < flow_cache_hash_size(fc); i++) { |
261 | unsigned genid = atomic_read(&flow_cache_genid); | 307 | hlist_for_each_entry_safe(fle, entry, tmp, |
262 | 308 | &fcp->hash_table[i], u.hlist) { | |
263 | if (!fle->object || fle->genid == genid) | 309 | if (flow_entry_valid(fle)) |
264 | continue; | 310 | continue; |
265 | 311 | ||
266 | fle->object = NULL; | 312 | deleted++; |
267 | atomic_dec(fle->object_ref); | 313 | hlist_del(&fle->u.hlist); |
314 | list_add_tail(&fle->u.gc_list, &gc_list); | ||
268 | } | 315 | } |
269 | } | 316 | } |
270 | 317 | ||
318 | flow_cache_queue_garbage(fcp, deleted, &gc_list); | ||
319 | |||
271 | if (atomic_dec_and_test(&info->cpuleft)) | 320 | if (atomic_dec_and_test(&info->cpuleft)) |
272 | complete(&info->completion); | 321 | complete(&info->completion); |
273 | } | 322 | } |
274 | 323 | ||
275 | static void flow_cache_flush_per_cpu(void *) __attribute__((__unused__)); | ||
276 | static void flow_cache_flush_per_cpu(void *data) | 324 | static void flow_cache_flush_per_cpu(void *data) |
277 | { | 325 | { |
278 | struct flow_flush_info *info = data; | 326 | struct flow_flush_info *info = data; |
@@ -280,8 +328,7 @@ static void flow_cache_flush_per_cpu(void *data) | |||
280 | struct tasklet_struct *tasklet; | 328 | struct tasklet_struct *tasklet; |
281 | 329 | ||
282 | cpu = smp_processor_id(); | 330 | cpu = smp_processor_id(); |
283 | 331 | tasklet = &per_cpu_ptr(info->cache->percpu, cpu)->flush_tasklet; | |
284 | tasklet = flow_flush_tasklet(cpu); | ||
285 | tasklet->data = (unsigned long)info; | 332 | tasklet->data = (unsigned long)info; |
286 | tasklet_schedule(tasklet); | 333 | tasklet_schedule(tasklet); |
287 | } | 334 | } |
@@ -294,6 +341,7 @@ void flow_cache_flush(void) | |||
294 | /* Don't want cpus going down or up during this. */ | 341 | /* Don't want cpus going down or up during this. */ |
295 | get_online_cpus(); | 342 | get_online_cpus(); |
296 | mutex_lock(&flow_flush_sem); | 343 | mutex_lock(&flow_flush_sem); |
344 | info.cache = &flow_cache_global; | ||
297 | atomic_set(&info.cpuleft, num_online_cpus()); | 345 | atomic_set(&info.cpuleft, num_online_cpus()); |
298 | init_completion(&info.completion); | 346 | init_completion(&info.completion); |
299 | 347 | ||
@@ -307,62 +355,75 @@ void flow_cache_flush(void) | |||
307 | put_online_cpus(); | 355 | put_online_cpus(); |
308 | } | 356 | } |
309 | 357 | ||
310 | static void __init flow_cache_cpu_prepare(int cpu) | 358 | static void __init flow_cache_cpu_prepare(struct flow_cache *fc, |
359 | struct flow_cache_percpu *fcp) | ||
311 | { | 360 | { |
312 | struct tasklet_struct *tasklet; | 361 | fcp->hash_table = (struct hlist_head *) |
313 | unsigned long order; | 362 | __get_free_pages(GFP_KERNEL|__GFP_ZERO, fc->order); |
314 | 363 | if (!fcp->hash_table) | |
315 | for (order = 0; | 364 | panic("NET: failed to allocate flow cache order %lu\n", fc->order); |
316 | (PAGE_SIZE << order) < | 365 | |
317 | (sizeof(struct flow_cache_entry *)*flow_hash_size); | 366 | fcp->hash_rnd_recalc = 1; |
318 | order++) | 367 | fcp->hash_count = 0; |
319 | /* NOTHING */; | 368 | tasklet_init(&fcp->flush_tasklet, flow_cache_flush_tasklet, 0); |
320 | |||
321 | flow_table(cpu) = (struct flow_cache_entry **) | ||
322 | __get_free_pages(GFP_KERNEL|__GFP_ZERO, order); | ||
323 | if (!flow_table(cpu)) | ||
324 | panic("NET: failed to allocate flow cache order %lu\n", order); | ||
325 | |||
326 | flow_hash_rnd_recalc(cpu) = 1; | ||
327 | flow_count(cpu) = 0; | ||
328 | |||
329 | tasklet = flow_flush_tasklet(cpu); | ||
330 | tasklet_init(tasklet, flow_cache_flush_tasklet, 0); | ||
331 | } | 369 | } |
332 | 370 | ||
333 | static int flow_cache_cpu(struct notifier_block *nfb, | 371 | static int flow_cache_cpu(struct notifier_block *nfb, |
334 | unsigned long action, | 372 | unsigned long action, |
335 | void *hcpu) | 373 | void *hcpu) |
336 | { | 374 | { |
375 | struct flow_cache *fc = container_of(nfb, struct flow_cache, hotcpu_notifier); | ||
376 | int cpu = (unsigned long) hcpu; | ||
377 | struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu); | ||
378 | |||
337 | if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) | 379 | if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) |
338 | __flow_cache_shrink((unsigned long)hcpu, 0); | 380 | __flow_cache_shrink(fc, fcp, 0); |
339 | return NOTIFY_OK; | 381 | return NOTIFY_OK; |
340 | } | 382 | } |
341 | 383 | ||
342 | static int __init flow_cache_init(void) | 384 | static int flow_cache_init(struct flow_cache *fc) |
343 | { | 385 | { |
386 | unsigned long order; | ||
344 | int i; | 387 | int i; |
345 | 388 | ||
346 | flow_cachep = kmem_cache_create("flow_cache", | 389 | fc->hash_shift = 10; |
347 | sizeof(struct flow_cache_entry), | 390 | fc->low_watermark = 2 * flow_cache_hash_size(fc); |
348 | 0, SLAB_PANIC, | 391 | fc->high_watermark = 4 * flow_cache_hash_size(fc); |
349 | NULL); | 392 | |
350 | flow_hash_shift = 10; | 393 | for (order = 0; |
351 | flow_lwm = 2 * flow_hash_size; | 394 | (PAGE_SIZE << order) < |
352 | flow_hwm = 4 * flow_hash_size; | 395 | (sizeof(struct hlist_head)*flow_cache_hash_size(fc)); |
396 | order++) | ||
397 | /* NOTHING */; | ||
398 | fc->order = order; | ||
399 | fc->percpu = alloc_percpu(struct flow_cache_percpu); | ||
353 | 400 | ||
354 | setup_timer(&flow_hash_rnd_timer, flow_cache_new_hashrnd, 0); | 401 | setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd, |
355 | flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; | 402 | (unsigned long) fc); |
356 | add_timer(&flow_hash_rnd_timer); | 403 | fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; |
404 | add_timer(&fc->rnd_timer); | ||
357 | 405 | ||
358 | for_each_possible_cpu(i) | 406 | for_each_possible_cpu(i) |
359 | flow_cache_cpu_prepare(i); | 407 | flow_cache_cpu_prepare(fc, per_cpu_ptr(fc->percpu, i)); |
408 | |||
409 | fc->hotcpu_notifier = (struct notifier_block){ | ||
410 | .notifier_call = flow_cache_cpu, | ||
411 | }; | ||
412 | register_hotcpu_notifier(&fc->hotcpu_notifier); | ||
360 | 413 | ||
361 | hotcpu_notifier(flow_cache_cpu, 0); | ||
362 | return 0; | 414 | return 0; |
363 | } | 415 | } |
364 | 416 | ||
365 | module_init(flow_cache_init); | 417 | static int __init flow_cache_init_global(void) |
418 | { | ||
419 | flow_cachep = kmem_cache_create("flow_cache", | ||
420 | sizeof(struct flow_cache_entry), | ||
421 | 0, SLAB_PANIC, NULL); | ||
422 | |||
423 | return flow_cache_init(&flow_cache_global); | ||
424 | } | ||
425 | |||
426 | module_init(flow_cache_init_global); | ||
366 | 427 | ||
367 | EXPORT_SYMBOL(flow_cache_genid); | 428 | EXPORT_SYMBOL(flow_cache_genid); |
368 | EXPORT_SYMBOL(flow_cache_lookup); | 429 | EXPORT_SYMBOL(flow_cache_lookup); |
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 59cfc7d8fc45..c57c4b228bb5 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <net/sock.h> | 17 | #include <net/sock.h> |
18 | #include <linux/rtnetlink.h> | 18 | #include <linux/rtnetlink.h> |
19 | #include <linux/wireless.h> | 19 | #include <linux/wireless.h> |
20 | #include <linux/vmalloc.h> | ||
20 | #include <net/wext.h> | 21 | #include <net/wext.h> |
21 | 22 | ||
22 | #include "net-sysfs.h" | 23 | #include "net-sysfs.h" |
@@ -467,6 +468,304 @@ static struct attribute_group wireless_group = { | |||
467 | }; | 468 | }; |
468 | #endif | 469 | #endif |
469 | 470 | ||
471 | #ifdef CONFIG_RPS | ||
472 | /* | ||
473 | * RX queue sysfs structures and functions. | ||
474 | */ | ||
475 | struct rx_queue_attribute { | ||
476 | struct attribute attr; | ||
477 | ssize_t (*show)(struct netdev_rx_queue *queue, | ||
478 | struct rx_queue_attribute *attr, char *buf); | ||
479 | ssize_t (*store)(struct netdev_rx_queue *queue, | ||
480 | struct rx_queue_attribute *attr, const char *buf, size_t len); | ||
481 | }; | ||
482 | #define to_rx_queue_attr(_attr) container_of(_attr, \ | ||
483 | struct rx_queue_attribute, attr) | ||
484 | |||
485 | #define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj) | ||
486 | |||
487 | static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr, | ||
488 | char *buf) | ||
489 | { | ||
490 | struct rx_queue_attribute *attribute = to_rx_queue_attr(attr); | ||
491 | struct netdev_rx_queue *queue = to_rx_queue(kobj); | ||
492 | |||
493 | if (!attribute->show) | ||
494 | return -EIO; | ||
495 | |||
496 | return attribute->show(queue, attribute, buf); | ||
497 | } | ||
498 | |||
499 | static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr, | ||
500 | const char *buf, size_t count) | ||
501 | { | ||
502 | struct rx_queue_attribute *attribute = to_rx_queue_attr(attr); | ||
503 | struct netdev_rx_queue *queue = to_rx_queue(kobj); | ||
504 | |||
505 | if (!attribute->store) | ||
506 | return -EIO; | ||
507 | |||
508 | return attribute->store(queue, attribute, buf, count); | ||
509 | } | ||
510 | |||
511 | static struct sysfs_ops rx_queue_sysfs_ops = { | ||
512 | .show = rx_queue_attr_show, | ||
513 | .store = rx_queue_attr_store, | ||
514 | }; | ||
515 | |||
516 | static ssize_t show_rps_map(struct netdev_rx_queue *queue, | ||
517 | struct rx_queue_attribute *attribute, char *buf) | ||
518 | { | ||
519 | struct rps_map *map; | ||
520 | cpumask_var_t mask; | ||
521 | size_t len = 0; | ||
522 | int i; | ||
523 | |||
524 | if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) | ||
525 | return -ENOMEM; | ||
526 | |||
527 | rcu_read_lock(); | ||
528 | map = rcu_dereference(queue->rps_map); | ||
529 | if (map) | ||
530 | for (i = 0; i < map->len; i++) | ||
531 | cpumask_set_cpu(map->cpus[i], mask); | ||
532 | |||
533 | len += cpumask_scnprintf(buf + len, PAGE_SIZE, mask); | ||
534 | if (PAGE_SIZE - len < 3) { | ||
535 | rcu_read_unlock(); | ||
536 | free_cpumask_var(mask); | ||
537 | return -EINVAL; | ||
538 | } | ||
539 | rcu_read_unlock(); | ||
540 | |||
541 | free_cpumask_var(mask); | ||
542 | len += sprintf(buf + len, "\n"); | ||
543 | return len; | ||
544 | } | ||
545 | |||
546 | static void rps_map_release(struct rcu_head *rcu) | ||
547 | { | ||
548 | struct rps_map *map = container_of(rcu, struct rps_map, rcu); | ||
549 | |||
550 | kfree(map); | ||
551 | } | ||
552 | |||
553 | static ssize_t store_rps_map(struct netdev_rx_queue *queue, | ||
554 | struct rx_queue_attribute *attribute, | ||
555 | const char *buf, size_t len) | ||
556 | { | ||
557 | struct rps_map *old_map, *map; | ||
558 | cpumask_var_t mask; | ||
559 | int err, cpu, i; | ||
560 | static DEFINE_SPINLOCK(rps_map_lock); | ||
561 | |||
562 | if (!capable(CAP_NET_ADMIN)) | ||
563 | return -EPERM; | ||
564 | |||
565 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) | ||
566 | return -ENOMEM; | ||
567 | |||
568 | err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits); | ||
569 | if (err) { | ||
570 | free_cpumask_var(mask); | ||
571 | return err; | ||
572 | } | ||
573 | |||
574 | map = kzalloc(max_t(unsigned, | ||
575 | RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES), | ||
576 | GFP_KERNEL); | ||
577 | if (!map) { | ||
578 | free_cpumask_var(mask); | ||
579 | return -ENOMEM; | ||
580 | } | ||
581 | |||
582 | i = 0; | ||
583 | for_each_cpu_and(cpu, mask, cpu_online_mask) | ||
584 | map->cpus[i++] = cpu; | ||
585 | |||
586 | if (i) | ||
587 | map->len = i; | ||
588 | else { | ||
589 | kfree(map); | ||
590 | map = NULL; | ||
591 | } | ||
592 | |||
593 | spin_lock(&rps_map_lock); | ||
594 | old_map = queue->rps_map; | ||
595 | rcu_assign_pointer(queue->rps_map, map); | ||
596 | spin_unlock(&rps_map_lock); | ||
597 | |||
598 | if (old_map) | ||
599 | call_rcu(&old_map->rcu, rps_map_release); | ||
600 | |||
601 | free_cpumask_var(mask); | ||
602 | return len; | ||
603 | } | ||
604 | |||
605 | static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, | ||
606 | struct rx_queue_attribute *attr, | ||
607 | char *buf) | ||
608 | { | ||
609 | struct rps_dev_flow_table *flow_table; | ||
610 | unsigned int val = 0; | ||
611 | |||
612 | rcu_read_lock(); | ||
613 | flow_table = rcu_dereference(queue->rps_flow_table); | ||
614 | if (flow_table) | ||
615 | val = flow_table->mask + 1; | ||
616 | rcu_read_unlock(); | ||
617 | |||
618 | return sprintf(buf, "%u\n", val); | ||
619 | } | ||
620 | |||
621 | static void rps_dev_flow_table_release_work(struct work_struct *work) | ||
622 | { | ||
623 | struct rps_dev_flow_table *table = container_of(work, | ||
624 | struct rps_dev_flow_table, free_work); | ||
625 | |||
626 | vfree(table); | ||
627 | } | ||
628 | |||
629 | static void rps_dev_flow_table_release(struct rcu_head *rcu) | ||
630 | { | ||
631 | struct rps_dev_flow_table *table = container_of(rcu, | ||
632 | struct rps_dev_flow_table, rcu); | ||
633 | |||
634 | INIT_WORK(&table->free_work, rps_dev_flow_table_release_work); | ||
635 | schedule_work(&table->free_work); | ||
636 | } | ||
637 | |||
638 | static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, | ||
639 | struct rx_queue_attribute *attr, | ||
640 | const char *buf, size_t len) | ||
641 | { | ||
642 | unsigned int count; | ||
643 | char *endp; | ||
644 | struct rps_dev_flow_table *table, *old_table; | ||
645 | static DEFINE_SPINLOCK(rps_dev_flow_lock); | ||
646 | |||
647 | if (!capable(CAP_NET_ADMIN)) | ||
648 | return -EPERM; | ||
649 | |||
650 | count = simple_strtoul(buf, &endp, 0); | ||
651 | if (endp == buf) | ||
652 | return -EINVAL; | ||
653 | |||
654 | if (count) { | ||
655 | int i; | ||
656 | |||
657 | if (count > 1<<30) { | ||
658 | /* Enforce a limit to prevent overflow */ | ||
659 | return -EINVAL; | ||
660 | } | ||
661 | count = roundup_pow_of_two(count); | ||
662 | table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(count)); | ||
663 | if (!table) | ||
664 | return -ENOMEM; | ||
665 | |||
666 | table->mask = count - 1; | ||
667 | for (i = 0; i < count; i++) | ||
668 | table->flows[i].cpu = RPS_NO_CPU; | ||
669 | } else | ||
670 | table = NULL; | ||
671 | |||
672 | spin_lock(&rps_dev_flow_lock); | ||
673 | old_table = queue->rps_flow_table; | ||
674 | rcu_assign_pointer(queue->rps_flow_table, table); | ||
675 | spin_unlock(&rps_dev_flow_lock); | ||
676 | |||
677 | if (old_table) | ||
678 | call_rcu(&old_table->rcu, rps_dev_flow_table_release); | ||
679 | |||
680 | return len; | ||
681 | } | ||
682 | |||
683 | static struct rx_queue_attribute rps_cpus_attribute = | ||
684 | __ATTR(rps_cpus, S_IRUGO | S_IWUSR, show_rps_map, store_rps_map); | ||
685 | |||
686 | |||
687 | static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute = | ||
688 | __ATTR(rps_flow_cnt, S_IRUGO | S_IWUSR, | ||
689 | show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt); | ||
690 | |||
691 | static struct attribute *rx_queue_default_attrs[] = { | ||
692 | &rps_cpus_attribute.attr, | ||
693 | &rps_dev_flow_table_cnt_attribute.attr, | ||
694 | NULL | ||
695 | }; | ||
696 | |||
697 | static void rx_queue_release(struct kobject *kobj) | ||
698 | { | ||
699 | struct netdev_rx_queue *queue = to_rx_queue(kobj); | ||
700 | struct netdev_rx_queue *first = queue->first; | ||
701 | |||
702 | if (queue->rps_map) | ||
703 | call_rcu(&queue->rps_map->rcu, rps_map_release); | ||
704 | |||
705 | if (queue->rps_flow_table) | ||
706 | call_rcu(&queue->rps_flow_table->rcu, | ||
707 | rps_dev_flow_table_release); | ||
708 | |||
709 | if (atomic_dec_and_test(&first->count)) | ||
710 | kfree(first); | ||
711 | } | ||
712 | |||
713 | static struct kobj_type rx_queue_ktype = { | ||
714 | .sysfs_ops = &rx_queue_sysfs_ops, | ||
715 | .release = rx_queue_release, | ||
716 | .default_attrs = rx_queue_default_attrs, | ||
717 | }; | ||
718 | |||
719 | static int rx_queue_add_kobject(struct net_device *net, int index) | ||
720 | { | ||
721 | struct netdev_rx_queue *queue = net->_rx + index; | ||
722 | struct kobject *kobj = &queue->kobj; | ||
723 | int error = 0; | ||
724 | |||
725 | kobj->kset = net->queues_kset; | ||
726 | error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL, | ||
727 | "rx-%u", index); | ||
728 | if (error) { | ||
729 | kobject_put(kobj); | ||
730 | return error; | ||
731 | } | ||
732 | |||
733 | kobject_uevent(kobj, KOBJ_ADD); | ||
734 | |||
735 | return error; | ||
736 | } | ||
737 | |||
738 | static int rx_queue_register_kobjects(struct net_device *net) | ||
739 | { | ||
740 | int i; | ||
741 | int error = 0; | ||
742 | |||
743 | net->queues_kset = kset_create_and_add("queues", | ||
744 | NULL, &net->dev.kobj); | ||
745 | if (!net->queues_kset) | ||
746 | return -ENOMEM; | ||
747 | for (i = 0; i < net->num_rx_queues; i++) { | ||
748 | error = rx_queue_add_kobject(net, i); | ||
749 | if (error) | ||
750 | break; | ||
751 | } | ||
752 | |||
753 | if (error) | ||
754 | while (--i >= 0) | ||
755 | kobject_put(&net->_rx[i].kobj); | ||
756 | |||
757 | return error; | ||
758 | } | ||
759 | |||
760 | static void rx_queue_remove_kobjects(struct net_device *net) | ||
761 | { | ||
762 | int i; | ||
763 | |||
764 | for (i = 0; i < net->num_rx_queues; i++) | ||
765 | kobject_put(&net->_rx[i].kobj); | ||
766 | kset_unregister(net->queues_kset); | ||
767 | } | ||
768 | #endif /* CONFIG_RPS */ | ||
470 | #endif /* CONFIG_SYSFS */ | 769 | #endif /* CONFIG_SYSFS */ |
471 | 770 | ||
472 | #ifdef CONFIG_HOTPLUG | 771 | #ifdef CONFIG_HOTPLUG |
@@ -530,6 +829,10 @@ void netdev_unregister_kobject(struct net_device * net) | |||
530 | if (!net_eq(dev_net(net), &init_net)) | 829 | if (!net_eq(dev_net(net), &init_net)) |
531 | return; | 830 | return; |
532 | 831 | ||
832 | #ifdef CONFIG_RPS | ||
833 | rx_queue_remove_kobjects(net); | ||
834 | #endif | ||
835 | |||
533 | device_del(dev); | 836 | device_del(dev); |
534 | } | 837 | } |
535 | 838 | ||
@@ -538,6 +841,7 @@ int netdev_register_kobject(struct net_device *net) | |||
538 | { | 841 | { |
539 | struct device *dev = &(net->dev); | 842 | struct device *dev = &(net->dev); |
540 | const struct attribute_group **groups = net->sysfs_groups; | 843 | const struct attribute_group **groups = net->sysfs_groups; |
844 | int error = 0; | ||
541 | 845 | ||
542 | dev->class = &net_class; | 846 | dev->class = &net_class; |
543 | dev->platform_data = net; | 847 | dev->platform_data = net; |
@@ -564,7 +868,19 @@ int netdev_register_kobject(struct net_device *net) | |||
564 | if (!net_eq(dev_net(net), &init_net)) | 868 | if (!net_eq(dev_net(net), &init_net)) |
565 | return 0; | 869 | return 0; |
566 | 870 | ||
567 | return device_add(dev); | 871 | error = device_add(dev); |
872 | if (error) | ||
873 | return error; | ||
874 | |||
875 | #ifdef CONFIG_RPS | ||
876 | error = rx_queue_register_kobjects(net); | ||
877 | if (error) { | ||
878 | device_del(dev); | ||
879 | return error; | ||
880 | } | ||
881 | #endif | ||
882 | |||
883 | return error; | ||
568 | } | 884 | } |
569 | 885 | ||
570 | int netdev_class_create_file(struct class_attribute *class_attr) | 886 | int netdev_class_create_file(struct class_attribute *class_attr) |
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index bd8c4712ea24..69a20bfc527c 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c | |||
@@ -469,10 +469,10 @@ EXPORT_SYMBOL_GPL(register_pernet_subsys); | |||
469 | * addition run the exit method for all existing network | 469 | * addition run the exit method for all existing network |
470 | * namespaces. | 470 | * namespaces. |
471 | */ | 471 | */ |
472 | void unregister_pernet_subsys(struct pernet_operations *module) | 472 | void unregister_pernet_subsys(struct pernet_operations *ops) |
473 | { | 473 | { |
474 | mutex_lock(&net_mutex); | 474 | mutex_lock(&net_mutex); |
475 | unregister_pernet_operations(module); | 475 | unregister_pernet_operations(ops); |
476 | mutex_unlock(&net_mutex); | 476 | mutex_unlock(&net_mutex); |
477 | } | 477 | } |
478 | EXPORT_SYMBOL_GPL(unregister_pernet_subsys); | 478 | EXPORT_SYMBOL_GPL(unregister_pernet_subsys); |
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 43923811bd6a..2ad68da418df 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -169,7 +169,7 @@ | |||
169 | #include <asm/dma.h> | 169 | #include <asm/dma.h> |
170 | #include <asm/div64.h> /* do_div */ | 170 | #include <asm/div64.h> /* do_div */ |
171 | 171 | ||
172 | #define VERSION "2.72" | 172 | #define VERSION "2.73" |
173 | #define IP_NAME_SZ 32 | 173 | #define IP_NAME_SZ 32 |
174 | #define MAX_MPLS_LABELS 16 /* This is the max label stack depth */ | 174 | #define MAX_MPLS_LABELS 16 /* This is the max label stack depth */ |
175 | #define MPLS_STACK_BOTTOM htonl(0x00000100) | 175 | #define MPLS_STACK_BOTTOM htonl(0x00000100) |
@@ -190,6 +190,7 @@ | |||
190 | #define F_IPSEC_ON (1<<12) /* ipsec on for flows */ | 190 | #define F_IPSEC_ON (1<<12) /* ipsec on for flows */ |
191 | #define F_QUEUE_MAP_RND (1<<13) /* queue map Random */ | 191 | #define F_QUEUE_MAP_RND (1<<13) /* queue map Random */ |
192 | #define F_QUEUE_MAP_CPU (1<<14) /* queue map mirrors smp_processor_id() */ | 192 | #define F_QUEUE_MAP_CPU (1<<14) /* queue map mirrors smp_processor_id() */ |
193 | #define F_NODE (1<<15) /* Node memory alloc*/ | ||
193 | 194 | ||
194 | /* Thread control flag bits */ | 195 | /* Thread control flag bits */ |
195 | #define T_STOP (1<<0) /* Stop run */ | 196 | #define T_STOP (1<<0) /* Stop run */ |
@@ -372,6 +373,7 @@ struct pktgen_dev { | |||
372 | 373 | ||
373 | u16 queue_map_min; | 374 | u16 queue_map_min; |
374 | u16 queue_map_max; | 375 | u16 queue_map_max; |
376 | int node; /* Memory node */ | ||
375 | 377 | ||
376 | #ifdef CONFIG_XFRM | 378 | #ifdef CONFIG_XFRM |
377 | __u8 ipsmode; /* IPSEC mode (config) */ | 379 | __u8 ipsmode; /* IPSEC mode (config) */ |
@@ -607,6 +609,9 @@ static int pktgen_if_show(struct seq_file *seq, void *v) | |||
607 | if (pkt_dev->traffic_class) | 609 | if (pkt_dev->traffic_class) |
608 | seq_printf(seq, " traffic_class: 0x%02x\n", pkt_dev->traffic_class); | 610 | seq_printf(seq, " traffic_class: 0x%02x\n", pkt_dev->traffic_class); |
609 | 611 | ||
612 | if (pkt_dev->node >= 0) | ||
613 | seq_printf(seq, " node: %d\n", pkt_dev->node); | ||
614 | |||
610 | seq_printf(seq, " Flags: "); | 615 | seq_printf(seq, " Flags: "); |
611 | 616 | ||
612 | if (pkt_dev->flags & F_IPV6) | 617 | if (pkt_dev->flags & F_IPV6) |
@@ -660,6 +665,9 @@ static int pktgen_if_show(struct seq_file *seq, void *v) | |||
660 | if (pkt_dev->flags & F_SVID_RND) | 665 | if (pkt_dev->flags & F_SVID_RND) |
661 | seq_printf(seq, "SVID_RND "); | 666 | seq_printf(seq, "SVID_RND "); |
662 | 667 | ||
668 | if (pkt_dev->flags & F_NODE) | ||
669 | seq_printf(seq, "NODE_ALLOC "); | ||
670 | |||
663 | seq_puts(seq, "\n"); | 671 | seq_puts(seq, "\n"); |
664 | 672 | ||
665 | /* not really stopped, more like last-running-at */ | 673 | /* not really stopped, more like last-running-at */ |
@@ -1074,6 +1082,21 @@ static ssize_t pktgen_if_write(struct file *file, | |||
1074 | pkt_dev->dst_mac_count); | 1082 | pkt_dev->dst_mac_count); |
1075 | return count; | 1083 | return count; |
1076 | } | 1084 | } |
1085 | if (!strcmp(name, "node")) { | ||
1086 | len = num_arg(&user_buffer[i], 10, &value); | ||
1087 | if (len < 0) | ||
1088 | return len; | ||
1089 | |||
1090 | i += len; | ||
1091 | |||
1092 | if (node_possible(value)) { | ||
1093 | pkt_dev->node = value; | ||
1094 | sprintf(pg_result, "OK: node=%d", pkt_dev->node); | ||
1095 | } | ||
1096 | else | ||
1097 | sprintf(pg_result, "ERROR: node not possible"); | ||
1098 | return count; | ||
1099 | } | ||
1077 | if (!strcmp(name, "flag")) { | 1100 | if (!strcmp(name, "flag")) { |
1078 | char f[32]; | 1101 | char f[32]; |
1079 | memset(f, 0, 32); | 1102 | memset(f, 0, 32); |
@@ -1166,12 +1189,18 @@ static ssize_t pktgen_if_write(struct file *file, | |||
1166 | else if (strcmp(f, "!IPV6") == 0) | 1189 | else if (strcmp(f, "!IPV6") == 0) |
1167 | pkt_dev->flags &= ~F_IPV6; | 1190 | pkt_dev->flags &= ~F_IPV6; |
1168 | 1191 | ||
1192 | else if (strcmp(f, "NODE_ALLOC") == 0) | ||
1193 | pkt_dev->flags |= F_NODE; | ||
1194 | |||
1195 | else if (strcmp(f, "!NODE_ALLOC") == 0) | ||
1196 | pkt_dev->flags &= ~F_NODE; | ||
1197 | |||
1169 | else { | 1198 | else { |
1170 | sprintf(pg_result, | 1199 | sprintf(pg_result, |
1171 | "Flag -:%s:- unknown\nAvailable flags, (prepend ! to un-set flag):\n%s", | 1200 | "Flag -:%s:- unknown\nAvailable flags, (prepend ! to un-set flag):\n%s", |
1172 | f, | 1201 | f, |
1173 | "IPSRC_RND, IPDST_RND, UDPSRC_RND, UDPDST_RND, " | 1202 | "IPSRC_RND, IPDST_RND, UDPSRC_RND, UDPDST_RND, " |
1174 | "MACSRC_RND, MACDST_RND, TXSIZE_RND, IPV6, MPLS_RND, VID_RND, SVID_RND, FLOW_SEQ, IPSEC\n"); | 1203 | "MACSRC_RND, MACDST_RND, TXSIZE_RND, IPV6, MPLS_RND, VID_RND, SVID_RND, FLOW_SEQ, IPSEC, NODE_ALLOC\n"); |
1175 | return count; | 1204 | return count; |
1176 | } | 1205 | } |
1177 | sprintf(pg_result, "OK: flags=0x%x", pkt_dev->flags); | 1206 | sprintf(pg_result, "OK: flags=0x%x", pkt_dev->flags); |
@@ -2572,9 +2601,27 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, | |||
2572 | mod_cur_headers(pkt_dev); | 2601 | mod_cur_headers(pkt_dev); |
2573 | 2602 | ||
2574 | datalen = (odev->hard_header_len + 16) & ~0xf; | 2603 | datalen = (odev->hard_header_len + 16) & ~0xf; |
2575 | skb = __netdev_alloc_skb(odev, | 2604 | |
2576 | pkt_dev->cur_pkt_size + 64 | 2605 | if (pkt_dev->flags & F_NODE) { |
2577 | + datalen + pkt_dev->pkt_overhead, GFP_NOWAIT); | 2606 | int node; |
2607 | |||
2608 | if (pkt_dev->node >= 0) | ||
2609 | node = pkt_dev->node; | ||
2610 | else | ||
2611 | node = numa_node_id(); | ||
2612 | |||
2613 | skb = __alloc_skb(NET_SKB_PAD + pkt_dev->cur_pkt_size + 64 | ||
2614 | + datalen + pkt_dev->pkt_overhead, GFP_NOWAIT, 0, node); | ||
2615 | if (likely(skb)) { | ||
2616 | skb_reserve(skb, NET_SKB_PAD); | ||
2617 | skb->dev = odev; | ||
2618 | } | ||
2619 | } | ||
2620 | else | ||
2621 | skb = __netdev_alloc_skb(odev, | ||
2622 | pkt_dev->cur_pkt_size + 64 | ||
2623 | + datalen + pkt_dev->pkt_overhead, GFP_NOWAIT); | ||
2624 | |||
2578 | if (!skb) { | 2625 | if (!skb) { |
2579 | sprintf(pkt_dev->result, "No memory"); | 2626 | sprintf(pkt_dev->result, "No memory"); |
2580 | return NULL; | 2627 | return NULL; |
@@ -3674,6 +3721,7 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname) | |||
3674 | pkt_dev->svlan_p = 0; | 3721 | pkt_dev->svlan_p = 0; |
3675 | pkt_dev->svlan_cfi = 0; | 3722 | pkt_dev->svlan_cfi = 0; |
3676 | pkt_dev->svlan_id = 0xffff; | 3723 | pkt_dev->svlan_id = 0xffff; |
3724 | pkt_dev->node = -1; | ||
3677 | 3725 | ||
3678 | err = pktgen_setup_dev(pkt_dev, ifname); | 3726 | err = pktgen_setup_dev(pkt_dev, ifname); |
3679 | if (err) | 3727 | if (err) |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index fe776c9ddeca..455e35aefbc2 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -118,7 +118,11 @@ static rtnl_doit_func rtnl_get_doit(int protocol, int msgindex) | |||
118 | { | 118 | { |
119 | struct rtnl_link *tab; | 119 | struct rtnl_link *tab; |
120 | 120 | ||
121 | tab = rtnl_msg_handlers[protocol]; | 121 | if (protocol < NPROTO) |
122 | tab = rtnl_msg_handlers[protocol]; | ||
123 | else | ||
124 | tab = NULL; | ||
125 | |||
122 | if (tab == NULL || tab[msgindex].doit == NULL) | 126 | if (tab == NULL || tab[msgindex].doit == NULL) |
123 | tab = rtnl_msg_handlers[PF_UNSPEC]; | 127 | tab = rtnl_msg_handlers[PF_UNSPEC]; |
124 | 128 | ||
@@ -129,7 +133,11 @@ static rtnl_dumpit_func rtnl_get_dumpit(int protocol, int msgindex) | |||
129 | { | 133 | { |
130 | struct rtnl_link *tab; | 134 | struct rtnl_link *tab; |
131 | 135 | ||
132 | tab = rtnl_msg_handlers[protocol]; | 136 | if (protocol < NPROTO) |
137 | tab = rtnl_msg_handlers[protocol]; | ||
138 | else | ||
139 | tab = NULL; | ||
140 | |||
133 | if (tab == NULL || tab[msgindex].dumpit == NULL) | 141 | if (tab == NULL || tab[msgindex].dumpit == NULL) |
134 | tab = rtnl_msg_handlers[PF_UNSPEC]; | 142 | tab = rtnl_msg_handlers[PF_UNSPEC]; |
135 | 143 | ||
@@ -600,7 +608,41 @@ static void copy_rtnl_link_stats(struct rtnl_link_stats *a, | |||
600 | 608 | ||
601 | a->rx_compressed = b->rx_compressed; | 609 | a->rx_compressed = b->rx_compressed; |
602 | a->tx_compressed = b->tx_compressed; | 610 | a->tx_compressed = b->tx_compressed; |
603 | }; | 611 | } |
612 | |||
613 | static void copy_rtnl_link_stats64(void *v, const struct net_device_stats *b) | ||
614 | { | ||
615 | struct rtnl_link_stats64 a; | ||
616 | |||
617 | a.rx_packets = b->rx_packets; | ||
618 | a.tx_packets = b->tx_packets; | ||
619 | a.rx_bytes = b->rx_bytes; | ||
620 | a.tx_bytes = b->tx_bytes; | ||
621 | a.rx_errors = b->rx_errors; | ||
622 | a.tx_errors = b->tx_errors; | ||
623 | a.rx_dropped = b->rx_dropped; | ||
624 | a.tx_dropped = b->tx_dropped; | ||
625 | |||
626 | a.multicast = b->multicast; | ||
627 | a.collisions = b->collisions; | ||
628 | |||
629 | a.rx_length_errors = b->rx_length_errors; | ||
630 | a.rx_over_errors = b->rx_over_errors; | ||
631 | a.rx_crc_errors = b->rx_crc_errors; | ||
632 | a.rx_frame_errors = b->rx_frame_errors; | ||
633 | a.rx_fifo_errors = b->rx_fifo_errors; | ||
634 | a.rx_missed_errors = b->rx_missed_errors; | ||
635 | |||
636 | a.tx_aborted_errors = b->tx_aborted_errors; | ||
637 | a.tx_carrier_errors = b->tx_carrier_errors; | ||
638 | a.tx_fifo_errors = b->tx_fifo_errors; | ||
639 | a.tx_heartbeat_errors = b->tx_heartbeat_errors; | ||
640 | a.tx_window_errors = b->tx_window_errors; | ||
641 | |||
642 | a.rx_compressed = b->rx_compressed; | ||
643 | a.tx_compressed = b->tx_compressed; | ||
644 | memcpy(v, &a, sizeof(a)); | ||
645 | } | ||
604 | 646 | ||
605 | static inline int rtnl_vfinfo_size(const struct net_device *dev) | 647 | static inline int rtnl_vfinfo_size(const struct net_device *dev) |
606 | { | 648 | { |
@@ -619,6 +661,7 @@ static inline size_t if_nlmsg_size(const struct net_device *dev) | |||
619 | + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */ | 661 | + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */ |
620 | + nla_total_size(sizeof(struct rtnl_link_ifmap)) | 662 | + nla_total_size(sizeof(struct rtnl_link_ifmap)) |
621 | + nla_total_size(sizeof(struct rtnl_link_stats)) | 663 | + nla_total_size(sizeof(struct rtnl_link_stats)) |
664 | + nla_total_size(sizeof(struct rtnl_link_stats64)) | ||
622 | + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ | 665 | + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ |
623 | + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */ | 666 | + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */ |
624 | + nla_total_size(4) /* IFLA_TXQLEN */ | 667 | + nla_total_size(4) /* IFLA_TXQLEN */ |
@@ -698,6 +741,12 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, | |||
698 | stats = dev_get_stats(dev); | 741 | stats = dev_get_stats(dev); |
699 | copy_rtnl_link_stats(nla_data(attr), stats); | 742 | copy_rtnl_link_stats(nla_data(attr), stats); |
700 | 743 | ||
744 | attr = nla_reserve(skb, IFLA_STATS64, | ||
745 | sizeof(struct rtnl_link_stats64)); | ||
746 | if (attr == NULL) | ||
747 | goto nla_put_failure; | ||
748 | copy_rtnl_link_stats64(nla_data(attr), stats); | ||
749 | |||
701 | if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent) { | 750 | if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent) { |
702 | int i; | 751 | int i; |
703 | struct ifla_vf_info ivi; | 752 | struct ifla_vf_info ivi; |
@@ -1404,9 +1453,6 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
1404 | return 0; | 1453 | return 0; |
1405 | 1454 | ||
1406 | family = ((struct rtgenmsg *)NLMSG_DATA(nlh))->rtgen_family; | 1455 | family = ((struct rtgenmsg *)NLMSG_DATA(nlh))->rtgen_family; |
1407 | if (family >= NPROTO) | ||
1408 | return -EAFNOSUPPORT; | ||
1409 | |||
1410 | sz_idx = type>>2; | 1456 | sz_idx = type>>2; |
1411 | kind = type&3; | 1457 | kind = type&3; |
1412 | 1458 | ||
@@ -1474,6 +1520,7 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi | |||
1474 | case NETDEV_POST_INIT: | 1520 | case NETDEV_POST_INIT: |
1475 | case NETDEV_REGISTER: | 1521 | case NETDEV_REGISTER: |
1476 | case NETDEV_CHANGE: | 1522 | case NETDEV_CHANGE: |
1523 | case NETDEV_PRE_TYPE_CHANGE: | ||
1477 | case NETDEV_GOING_DOWN: | 1524 | case NETDEV_GOING_DOWN: |
1478 | case NETDEV_UNREGISTER: | 1525 | case NETDEV_UNREGISTER: |
1479 | case NETDEV_UNREGISTER_BATCH: | 1526 | case NETDEV_UNREGISTER_BATCH: |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 93c4e060c91e..4218ff49bf13 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -117,7 +117,7 @@ static const struct pipe_buf_operations sock_pipe_buf_ops = { | |||
117 | * | 117 | * |
118 | * Out of line support code for skb_put(). Not user callable. | 118 | * Out of line support code for skb_put(). Not user callable. |
119 | */ | 119 | */ |
120 | void skb_over_panic(struct sk_buff *skb, int sz, void *here) | 120 | static void skb_over_panic(struct sk_buff *skb, int sz, void *here) |
121 | { | 121 | { |
122 | printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p " | 122 | printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p " |
123 | "data:%p tail:%#lx end:%#lx dev:%s\n", | 123 | "data:%p tail:%#lx end:%#lx dev:%s\n", |
@@ -126,7 +126,6 @@ void skb_over_panic(struct sk_buff *skb, int sz, void *here) | |||
126 | skb->dev ? skb->dev->name : "<NULL>"); | 126 | skb->dev ? skb->dev->name : "<NULL>"); |
127 | BUG(); | 127 | BUG(); |
128 | } | 128 | } |
129 | EXPORT_SYMBOL(skb_over_panic); | ||
130 | 129 | ||
131 | /** | 130 | /** |
132 | * skb_under_panic - private function | 131 | * skb_under_panic - private function |
@@ -137,7 +136,7 @@ EXPORT_SYMBOL(skb_over_panic); | |||
137 | * Out of line support code for skb_push(). Not user callable. | 136 | * Out of line support code for skb_push(). Not user callable. |
138 | */ | 137 | */ |
139 | 138 | ||
140 | void skb_under_panic(struct sk_buff *skb, int sz, void *here) | 139 | static void skb_under_panic(struct sk_buff *skb, int sz, void *here) |
141 | { | 140 | { |
142 | printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p " | 141 | printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p " |
143 | "data:%p tail:%#lx end:%#lx dev:%s\n", | 142 | "data:%p tail:%#lx end:%#lx dev:%s\n", |
@@ -146,7 +145,6 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here) | |||
146 | skb->dev ? skb->dev->name : "<NULL>"); | 145 | skb->dev ? skb->dev->name : "<NULL>"); |
147 | BUG(); | 146 | BUG(); |
148 | } | 147 | } |
149 | EXPORT_SYMBOL(skb_under_panic); | ||
150 | 148 | ||
151 | /* Allocate a new skbuff. We do this ourselves so we can fill in a few | 149 | /* Allocate a new skbuff. We do this ourselves so we can fill in a few |
152 | * 'private' fields and also do memory statistics to find all the | 150 | * 'private' fields and also do memory statistics to find all the |
@@ -534,6 +532,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) | |||
534 | new->network_header = old->network_header; | 532 | new->network_header = old->network_header; |
535 | new->mac_header = old->mac_header; | 533 | new->mac_header = old->mac_header; |
536 | skb_dst_set(new, dst_clone(skb_dst(old))); | 534 | skb_dst_set(new, dst_clone(skb_dst(old))); |
535 | new->rxhash = old->rxhash; | ||
537 | #ifdef CONFIG_XFRM | 536 | #ifdef CONFIG_XFRM |
538 | new->sp = secpath_get(old->sp); | 537 | new->sp = secpath_get(old->sp); |
539 | #endif | 538 | #endif |
@@ -581,6 +580,7 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) | |||
581 | C(len); | 580 | C(len); |
582 | C(data_len); | 581 | C(data_len); |
583 | C(mac_len); | 582 | C(mac_len); |
583 | C(rxhash); | ||
584 | n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; | 584 | n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; |
585 | n->cloned = 1; | 585 | n->cloned = 1; |
586 | n->nohdr = 0; | 586 | n->nohdr = 0; |
diff --git a/net/core/sock.c b/net/core/sock.c index c5812bbc2cc9..58ebd146ce5a 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -364,11 +364,11 @@ EXPORT_SYMBOL(sk_reset_txq); | |||
364 | 364 | ||
365 | struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) | 365 | struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) |
366 | { | 366 | { |
367 | struct dst_entry *dst = sk->sk_dst_cache; | 367 | struct dst_entry *dst = __sk_dst_get(sk); |
368 | 368 | ||
369 | if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { | 369 | if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { |
370 | sk_tx_queue_clear(sk); | 370 | sk_tx_queue_clear(sk); |
371 | sk->sk_dst_cache = NULL; | 371 | rcu_assign_pointer(sk->sk_dst_cache, NULL); |
372 | dst_release(dst); | 372 | dst_release(dst); |
373 | return NULL; | 373 | return NULL; |
374 | } | 374 | } |
@@ -1157,7 +1157,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority) | |||
1157 | skb_queue_head_init(&newsk->sk_async_wait_queue); | 1157 | skb_queue_head_init(&newsk->sk_async_wait_queue); |
1158 | #endif | 1158 | #endif |
1159 | 1159 | ||
1160 | rwlock_init(&newsk->sk_dst_lock); | 1160 | spin_lock_init(&newsk->sk_dst_lock); |
1161 | rwlock_init(&newsk->sk_callback_lock); | 1161 | rwlock_init(&newsk->sk_callback_lock); |
1162 | lockdep_set_class_and_name(&newsk->sk_callback_lock, | 1162 | lockdep_set_class_and_name(&newsk->sk_callback_lock, |
1163 | af_callback_keys + newsk->sk_family, | 1163 | af_callback_keys + newsk->sk_family, |
@@ -1395,7 +1395,7 @@ static long sock_wait_for_wmem(struct sock *sk, long timeo) | |||
1395 | if (signal_pending(current)) | 1395 | if (signal_pending(current)) |
1396 | break; | 1396 | break; |
1397 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); | 1397 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); |
1398 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 1398 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
1399 | if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) | 1399 | if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) |
1400 | break; | 1400 | break; |
1401 | if (sk->sk_shutdown & SEND_SHUTDOWN) | 1401 | if (sk->sk_shutdown & SEND_SHUTDOWN) |
@@ -1404,7 +1404,7 @@ static long sock_wait_for_wmem(struct sock *sk, long timeo) | |||
1404 | break; | 1404 | break; |
1405 | timeo = schedule_timeout(timeo); | 1405 | timeo = schedule_timeout(timeo); |
1406 | } | 1406 | } |
1407 | finish_wait(sk->sk_sleep, &wait); | 1407 | finish_wait(sk_sleep(sk), &wait); |
1408 | return timeo; | 1408 | return timeo; |
1409 | } | 1409 | } |
1410 | 1410 | ||
@@ -1570,11 +1570,11 @@ int sk_wait_data(struct sock *sk, long *timeo) | |||
1570 | int rc; | 1570 | int rc; |
1571 | DEFINE_WAIT(wait); | 1571 | DEFINE_WAIT(wait); |
1572 | 1572 | ||
1573 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 1573 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
1574 | set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 1574 | set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); |
1575 | rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue)); | 1575 | rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue)); |
1576 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 1576 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); |
1577 | finish_wait(sk->sk_sleep, &wait); | 1577 | finish_wait(sk_sleep(sk), &wait); |
1578 | return rc; | 1578 | return rc; |
1579 | } | 1579 | } |
1580 | EXPORT_SYMBOL(sk_wait_data); | 1580 | EXPORT_SYMBOL(sk_wait_data); |
@@ -1798,7 +1798,7 @@ static void sock_def_wakeup(struct sock *sk) | |||
1798 | { | 1798 | { |
1799 | read_lock(&sk->sk_callback_lock); | 1799 | read_lock(&sk->sk_callback_lock); |
1800 | if (sk_has_sleeper(sk)) | 1800 | if (sk_has_sleeper(sk)) |
1801 | wake_up_interruptible_all(sk->sk_sleep); | 1801 | wake_up_interruptible_all(sk_sleep(sk)); |
1802 | read_unlock(&sk->sk_callback_lock); | 1802 | read_unlock(&sk->sk_callback_lock); |
1803 | } | 1803 | } |
1804 | 1804 | ||
@@ -1806,7 +1806,7 @@ static void sock_def_error_report(struct sock *sk) | |||
1806 | { | 1806 | { |
1807 | read_lock(&sk->sk_callback_lock); | 1807 | read_lock(&sk->sk_callback_lock); |
1808 | if (sk_has_sleeper(sk)) | 1808 | if (sk_has_sleeper(sk)) |
1809 | wake_up_interruptible_poll(sk->sk_sleep, POLLERR); | 1809 | wake_up_interruptible_poll(sk_sleep(sk), POLLERR); |
1810 | sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR); | 1810 | sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR); |
1811 | read_unlock(&sk->sk_callback_lock); | 1811 | read_unlock(&sk->sk_callback_lock); |
1812 | } | 1812 | } |
@@ -1815,7 +1815,7 @@ static void sock_def_readable(struct sock *sk, int len) | |||
1815 | { | 1815 | { |
1816 | read_lock(&sk->sk_callback_lock); | 1816 | read_lock(&sk->sk_callback_lock); |
1817 | if (sk_has_sleeper(sk)) | 1817 | if (sk_has_sleeper(sk)) |
1818 | wake_up_interruptible_sync_poll(sk->sk_sleep, POLLIN | | 1818 | wake_up_interruptible_sync_poll(sk_sleep(sk), POLLIN | |
1819 | POLLRDNORM | POLLRDBAND); | 1819 | POLLRDNORM | POLLRDBAND); |
1820 | sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); | 1820 | sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); |
1821 | read_unlock(&sk->sk_callback_lock); | 1821 | read_unlock(&sk->sk_callback_lock); |
@@ -1830,7 +1830,7 @@ static void sock_def_write_space(struct sock *sk) | |||
1830 | */ | 1830 | */ |
1831 | if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { | 1831 | if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { |
1832 | if (sk_has_sleeper(sk)) | 1832 | if (sk_has_sleeper(sk)) |
1833 | wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT | | 1833 | wake_up_interruptible_sync_poll(sk_sleep(sk), POLLOUT | |
1834 | POLLWRNORM | POLLWRBAND); | 1834 | POLLWRNORM | POLLWRBAND); |
1835 | 1835 | ||
1836 | /* Should agree with poll, otherwise some programs break */ | 1836 | /* Should agree with poll, otherwise some programs break */ |
@@ -1898,7 +1898,7 @@ void sock_init_data(struct socket *sock, struct sock *sk) | |||
1898 | } else | 1898 | } else |
1899 | sk->sk_sleep = NULL; | 1899 | sk->sk_sleep = NULL; |
1900 | 1900 | ||
1901 | rwlock_init(&sk->sk_dst_lock); | 1901 | spin_lock_init(&sk->sk_dst_lock); |
1902 | rwlock_init(&sk->sk_callback_lock); | 1902 | rwlock_init(&sk->sk_callback_lock); |
1903 | lockdep_set_class_and_name(&sk->sk_callback_lock, | 1903 | lockdep_set_class_and_name(&sk->sk_callback_lock, |
1904 | af_callback_keys + sk->sk_family, | 1904 | af_callback_keys + sk->sk_family, |
diff --git a/net/core/stream.c b/net/core/stream.c index a37debfeb1b2..7b3c3f30b107 100644 --- a/net/core/stream.c +++ b/net/core/stream.c | |||
@@ -32,8 +32,8 @@ void sk_stream_write_space(struct sock *sk) | |||
32 | if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && sock) { | 32 | if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && sock) { |
33 | clear_bit(SOCK_NOSPACE, &sock->flags); | 33 | clear_bit(SOCK_NOSPACE, &sock->flags); |
34 | 34 | ||
35 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) | 35 | if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk))) |
36 | wake_up_interruptible_poll(sk->sk_sleep, POLLOUT | | 36 | wake_up_interruptible_poll(sk_sleep(sk), POLLOUT | |
37 | POLLWRNORM | POLLWRBAND); | 37 | POLLWRNORM | POLLWRBAND); |
38 | if (sock->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN)) | 38 | if (sock->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN)) |
39 | sock_wake_async(sock, SOCK_WAKE_SPACE, POLL_OUT); | 39 | sock_wake_async(sock, SOCK_WAKE_SPACE, POLL_OUT); |
@@ -66,13 +66,13 @@ int sk_stream_wait_connect(struct sock *sk, long *timeo_p) | |||
66 | if (signal_pending(tsk)) | 66 | if (signal_pending(tsk)) |
67 | return sock_intr_errno(*timeo_p); | 67 | return sock_intr_errno(*timeo_p); |
68 | 68 | ||
69 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 69 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
70 | sk->sk_write_pending++; | 70 | sk->sk_write_pending++; |
71 | done = sk_wait_event(sk, timeo_p, | 71 | done = sk_wait_event(sk, timeo_p, |
72 | !sk->sk_err && | 72 | !sk->sk_err && |
73 | !((1 << sk->sk_state) & | 73 | !((1 << sk->sk_state) & |
74 | ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))); | 74 | ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))); |
75 | finish_wait(sk->sk_sleep, &wait); | 75 | finish_wait(sk_sleep(sk), &wait); |
76 | sk->sk_write_pending--; | 76 | sk->sk_write_pending--; |
77 | } while (!done); | 77 | } while (!done); |
78 | return 0; | 78 | return 0; |
@@ -96,13 +96,13 @@ void sk_stream_wait_close(struct sock *sk, long timeout) | |||
96 | DEFINE_WAIT(wait); | 96 | DEFINE_WAIT(wait); |
97 | 97 | ||
98 | do { | 98 | do { |
99 | prepare_to_wait(sk->sk_sleep, &wait, | 99 | prepare_to_wait(sk_sleep(sk), &wait, |
100 | TASK_INTERRUPTIBLE); | 100 | TASK_INTERRUPTIBLE); |
101 | if (sk_wait_event(sk, &timeout, !sk_stream_closing(sk))) | 101 | if (sk_wait_event(sk, &timeout, !sk_stream_closing(sk))) |
102 | break; | 102 | break; |
103 | } while (!signal_pending(current) && timeout); | 103 | } while (!signal_pending(current) && timeout); |
104 | 104 | ||
105 | finish_wait(sk->sk_sleep, &wait); | 105 | finish_wait(sk_sleep(sk), &wait); |
106 | } | 106 | } |
107 | } | 107 | } |
108 | 108 | ||
@@ -126,7 +126,7 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p) | |||
126 | while (1) { | 126 | while (1) { |
127 | set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | 127 | set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); |
128 | 128 | ||
129 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 129 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
130 | 130 | ||
131 | if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) | 131 | if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) |
132 | goto do_error; | 132 | goto do_error; |
@@ -157,7 +157,7 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p) | |||
157 | *timeo_p = current_timeo; | 157 | *timeo_p = current_timeo; |
158 | } | 158 | } |
159 | out: | 159 | out: |
160 | finish_wait(sk->sk_sleep, &wait); | 160 | finish_wait(sk_sleep(sk), &wait); |
161 | return err; | 161 | return err; |
162 | 162 | ||
163 | do_error: | 163 | do_error: |
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index b7b6b8208f75..dcc7d25996ab 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c | |||
@@ -11,12 +11,72 @@ | |||
11 | #include <linux/socket.h> | 11 | #include <linux/socket.h> |
12 | #include <linux/netdevice.h> | 12 | #include <linux/netdevice.h> |
13 | #include <linux/ratelimit.h> | 13 | #include <linux/ratelimit.h> |
14 | #include <linux/vmalloc.h> | ||
14 | #include <linux/init.h> | 15 | #include <linux/init.h> |
15 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
16 | 17 | ||
17 | #include <net/ip.h> | 18 | #include <net/ip.h> |
18 | #include <net/sock.h> | 19 | #include <net/sock.h> |
19 | 20 | ||
21 | #ifdef CONFIG_RPS | ||
22 | static int rps_sock_flow_sysctl(ctl_table *table, int write, | ||
23 | void __user *buffer, size_t *lenp, loff_t *ppos) | ||
24 | { | ||
25 | unsigned int orig_size, size; | ||
26 | int ret, i; | ||
27 | ctl_table tmp = { | ||
28 | .data = &size, | ||
29 | .maxlen = sizeof(size), | ||
30 | .mode = table->mode | ||
31 | }; | ||
32 | struct rps_sock_flow_table *orig_sock_table, *sock_table; | ||
33 | static DEFINE_MUTEX(sock_flow_mutex); | ||
34 | |||
35 | mutex_lock(&sock_flow_mutex); | ||
36 | |||
37 | orig_sock_table = rps_sock_flow_table; | ||
38 | size = orig_size = orig_sock_table ? orig_sock_table->mask + 1 : 0; | ||
39 | |||
40 | ret = proc_dointvec(&tmp, write, buffer, lenp, ppos); | ||
41 | |||
42 | if (write) { | ||
43 | if (size) { | ||
44 | if (size > 1<<30) { | ||
45 | /* Enforce limit to prevent overflow */ | ||
46 | mutex_unlock(&sock_flow_mutex); | ||
47 | return -EINVAL; | ||
48 | } | ||
49 | size = roundup_pow_of_two(size); | ||
50 | if (size != orig_size) { | ||
51 | sock_table = | ||
52 | vmalloc(RPS_SOCK_FLOW_TABLE_SIZE(size)); | ||
53 | if (!sock_table) { | ||
54 | mutex_unlock(&sock_flow_mutex); | ||
55 | return -ENOMEM; | ||
56 | } | ||
57 | |||
58 | sock_table->mask = size - 1; | ||
59 | } else | ||
60 | sock_table = orig_sock_table; | ||
61 | |||
62 | for (i = 0; i < size; i++) | ||
63 | sock_table->ents[i] = RPS_NO_CPU; | ||
64 | } else | ||
65 | sock_table = NULL; | ||
66 | |||
67 | if (sock_table != orig_sock_table) { | ||
68 | rcu_assign_pointer(rps_sock_flow_table, sock_table); | ||
69 | synchronize_rcu(); | ||
70 | vfree(orig_sock_table); | ||
71 | } | ||
72 | } | ||
73 | |||
74 | mutex_unlock(&sock_flow_mutex); | ||
75 | |||
76 | return ret; | ||
77 | } | ||
78 | #endif /* CONFIG_RPS */ | ||
79 | |||
20 | static struct ctl_table net_core_table[] = { | 80 | static struct ctl_table net_core_table[] = { |
21 | #ifdef CONFIG_NET | 81 | #ifdef CONFIG_NET |
22 | { | 82 | { |
@@ -82,6 +142,14 @@ static struct ctl_table net_core_table[] = { | |||
82 | .mode = 0644, | 142 | .mode = 0644, |
83 | .proc_handler = proc_dointvec | 143 | .proc_handler = proc_dointvec |
84 | }, | 144 | }, |
145 | #ifdef CONFIG_RPS | ||
146 | { | ||
147 | .procname = "rps_sock_flow_entries", | ||
148 | .maxlen = sizeof(int), | ||
149 | .mode = 0644, | ||
150 | .proc_handler = rps_sock_flow_sysctl | ||
151 | }, | ||
152 | #endif | ||
85 | #endif /* CONFIG_NET */ | 153 | #endif /* CONFIG_NET */ |
86 | { | 154 | { |
87 | .procname = "netdev_budget", | 155 | .procname = "netdev_budget", |
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c index bcd7632299f5..d3235899c7e3 100644 --- a/net/dccp/ccids/ccid3.c +++ b/net/dccp/ccids/ccid3.c | |||
@@ -208,7 +208,7 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data) | |||
208 | goto restart_timer; | 208 | goto restart_timer; |
209 | } | 209 | } |
210 | 210 | ||
211 | ccid3_pr_debug("%s(%p, state=%s) - entry \n", dccp_role(sk), sk, | 211 | ccid3_pr_debug("%s(%p, state=%s) - entry\n", dccp_role(sk), sk, |
212 | ccid3_tx_state_name(hc->tx_state)); | 212 | ccid3_tx_state_name(hc->tx_state)); |
213 | 213 | ||
214 | if (hc->tx_state == TFRC_SSTATE_FBACK) | 214 | if (hc->tx_state == TFRC_SSTATE_FBACK) |
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h index 5ef32c2f0d6a..a10a61a1ded2 100644 --- a/net/dccp/dccp.h +++ b/net/dccp/dccp.h | |||
@@ -189,7 +189,7 @@ enum { | |||
189 | #define DCCP_MIB_MAX __DCCP_MIB_MAX | 189 | #define DCCP_MIB_MAX __DCCP_MIB_MAX |
190 | struct dccp_mib { | 190 | struct dccp_mib { |
191 | unsigned long mibs[DCCP_MIB_MAX]; | 191 | unsigned long mibs[DCCP_MIB_MAX]; |
192 | } __SNMP_MIB_ALIGN__; | 192 | }; |
193 | 193 | ||
194 | DECLARE_SNMP_STAT(struct dccp_mib, dccp_statistics); | 194 | DECLARE_SNMP_STAT(struct dccp_mib, dccp_statistics); |
195 | #define DCCP_INC_STATS(field) SNMP_INC_STATS(dccp_statistics, field) | 195 | #define DCCP_INC_STATS(field) SNMP_INC_STATS(dccp_statistics, field) |
@@ -223,7 +223,7 @@ static inline void dccp_csum_outgoing(struct sk_buff *skb) | |||
223 | skb->csum = skb_checksum(skb, 0, (cov > skb->len)? skb->len : cov, 0); | 223 | skb->csum = skb_checksum(skb, 0, (cov > skb->len)? skb->len : cov, 0); |
224 | } | 224 | } |
225 | 225 | ||
226 | extern void dccp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb); | 226 | extern void dccp_v4_send_check(struct sock *sk, struct sk_buff *skb); |
227 | 227 | ||
228 | extern int dccp_retransmit_skb(struct sock *sk); | 228 | extern int dccp_retransmit_skb(struct sock *sk); |
229 | 229 | ||
diff --git a/net/dccp/input.c b/net/dccp/input.c index 9ec717426024..58f7bc156850 100644 --- a/net/dccp/input.c +++ b/net/dccp/input.c | |||
@@ -415,7 +415,7 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk, | |||
415 | if (!between48(DCCP_SKB_CB(skb)->dccpd_ack_seq, | 415 | if (!between48(DCCP_SKB_CB(skb)->dccpd_ack_seq, |
416 | dp->dccps_awl, dp->dccps_awh)) { | 416 | dp->dccps_awl, dp->dccps_awh)) { |
417 | dccp_pr_debug("invalid ackno: S.AWL=%llu, " | 417 | dccp_pr_debug("invalid ackno: S.AWL=%llu, " |
418 | "P.ackno=%llu, S.AWH=%llu \n", | 418 | "P.ackno=%llu, S.AWH=%llu\n", |
419 | (unsigned long long)dp->dccps_awl, | 419 | (unsigned long long)dp->dccps_awl, |
420 | (unsigned long long)DCCP_SKB_CB(skb)->dccpd_ack_seq, | 420 | (unsigned long long)DCCP_SKB_CB(skb)->dccpd_ack_seq, |
421 | (unsigned long long)dp->dccps_awh); | 421 | (unsigned long long)dp->dccps_awh); |
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 52ffa1cde15a..d9b11ef8694c 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c | |||
@@ -349,7 +349,7 @@ static inline __sum16 dccp_v4_csum_finish(struct sk_buff *skb, | |||
349 | return csum_tcpudp_magic(src, dst, skb->len, IPPROTO_DCCP, skb->csum); | 349 | return csum_tcpudp_magic(src, dst, skb->len, IPPROTO_DCCP, skb->csum); |
350 | } | 350 | } |
351 | 351 | ||
352 | void dccp_v4_send_check(struct sock *sk, int unused, struct sk_buff *skb) | 352 | void dccp_v4_send_check(struct sock *sk, struct sk_buff *skb) |
353 | { | 353 | { |
354 | const struct inet_sock *inet = inet_sk(sk); | 354 | const struct inet_sock *inet = inet_sk(sk); |
355 | struct dccp_hdr *dh = dccp_hdr(skb); | 355 | struct dccp_hdr *dh = dccp_hdr(skb); |
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 3b11e41a2929..091698899594 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c | |||
@@ -60,8 +60,7 @@ static inline __sum16 dccp_v6_csum_finish(struct sk_buff *skb, | |||
60 | return csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_DCCP, skb->csum); | 60 | return csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_DCCP, skb->csum); |
61 | } | 61 | } |
62 | 62 | ||
63 | static inline void dccp_v6_send_check(struct sock *sk, int unused_value, | 63 | static inline void dccp_v6_send_check(struct sock *sk, struct sk_buff *skb) |
64 | struct sk_buff *skb) | ||
65 | { | 64 | { |
66 | struct ipv6_pinfo *np = inet6_sk(sk); | 65 | struct ipv6_pinfo *np = inet6_sk(sk); |
67 | struct dccp_hdr *dh = dccp_hdr(skb); | 66 | struct dccp_hdr *dh = dccp_hdr(skb); |
@@ -293,7 +292,7 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req, | |||
293 | &ireq6->loc_addr, | 292 | &ireq6->loc_addr, |
294 | &ireq6->rmt_addr); | 293 | &ireq6->rmt_addr); |
295 | ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr); | 294 | ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr); |
296 | err = ip6_xmit(sk, skb, &fl, opt, 0); | 295 | err = ip6_xmit(sk, skb, &fl, opt); |
297 | err = net_xmit_eval(err); | 296 | err = net_xmit_eval(err); |
298 | } | 297 | } |
299 | 298 | ||
@@ -348,7 +347,7 @@ static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb) | |||
348 | if (!ip6_dst_lookup(ctl_sk, &dst, &fl)) { | 347 | if (!ip6_dst_lookup(ctl_sk, &dst, &fl)) { |
349 | if (xfrm_lookup(net, &dst, &fl, NULL, 0) >= 0) { | 348 | if (xfrm_lookup(net, &dst, &fl, NULL, 0) >= 0) { |
350 | skb_dst_set(skb, dst); | 349 | skb_dst_set(skb, dst); |
351 | ip6_xmit(ctl_sk, skb, &fl, NULL, 0); | 350 | ip6_xmit(ctl_sk, skb, &fl, NULL); |
352 | DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS); | 351 | DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS); |
353 | DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS); | 352 | DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS); |
354 | return; | 353 | return; |
diff --git a/net/dccp/output.c b/net/dccp/output.c index fc3f436440b4..2d3dcb39851f 100644 --- a/net/dccp/output.c +++ b/net/dccp/output.c | |||
@@ -129,14 +129,14 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) | |||
129 | break; | 129 | break; |
130 | } | 130 | } |
131 | 131 | ||
132 | icsk->icsk_af_ops->send_check(sk, 0, skb); | 132 | icsk->icsk_af_ops->send_check(sk, skb); |
133 | 133 | ||
134 | if (set_ack) | 134 | if (set_ack) |
135 | dccp_event_ack_sent(sk); | 135 | dccp_event_ack_sent(sk); |
136 | 136 | ||
137 | DCCP_INC_STATS(DCCP_MIB_OUTSEGS); | 137 | DCCP_INC_STATS(DCCP_MIB_OUTSEGS); |
138 | 138 | ||
139 | err = icsk->icsk_af_ops->queue_xmit(skb, 0); | 139 | err = icsk->icsk_af_ops->queue_xmit(skb); |
140 | return net_xmit_eval(err); | 140 | return net_xmit_eval(err); |
141 | } | 141 | } |
142 | return -ENOBUFS; | 142 | return -ENOBUFS; |
@@ -198,7 +198,7 @@ void dccp_write_space(struct sock *sk) | |||
198 | read_lock(&sk->sk_callback_lock); | 198 | read_lock(&sk->sk_callback_lock); |
199 | 199 | ||
200 | if (sk_has_sleeper(sk)) | 200 | if (sk_has_sleeper(sk)) |
201 | wake_up_interruptible(sk->sk_sleep); | 201 | wake_up_interruptible(sk_sleep(sk)); |
202 | /* Should agree with poll, otherwise some programs break */ | 202 | /* Should agree with poll, otherwise some programs break */ |
203 | if (sock_writeable(sk)) | 203 | if (sock_writeable(sk)) |
204 | sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); | 204 | sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); |
@@ -225,7 +225,7 @@ static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb, int delay) | |||
225 | dccp_pr_debug("delayed send by %d msec\n", delay); | 225 | dccp_pr_debug("delayed send by %d msec\n", delay); |
226 | jiffdelay = msecs_to_jiffies(delay); | 226 | jiffdelay = msecs_to_jiffies(delay); |
227 | 227 | ||
228 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 228 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
229 | 229 | ||
230 | sk->sk_write_pending++; | 230 | sk->sk_write_pending++; |
231 | release_sock(sk); | 231 | release_sock(sk); |
@@ -241,7 +241,7 @@ static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb, int delay) | |||
241 | rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb); | 241 | rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb); |
242 | } while ((delay = rc) > 0); | 242 | } while ((delay = rc) > 0); |
243 | out: | 243 | out: |
244 | finish_wait(sk->sk_sleep, &wait); | 244 | finish_wait(sk_sleep(sk), &wait); |
245 | return rc; | 245 | return rc; |
246 | 246 | ||
247 | do_error: | 247 | do_error: |
diff --git a/net/dccp/proto.c b/net/dccp/proto.c index a0e38d8018f5..b03ecf6b2bb0 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c | |||
@@ -312,7 +312,7 @@ unsigned int dccp_poll(struct file *file, struct socket *sock, | |||
312 | unsigned int mask; | 312 | unsigned int mask; |
313 | struct sock *sk = sock->sk; | 313 | struct sock *sk = sock->sk; |
314 | 314 | ||
315 | sock_poll_wait(file, sk->sk_sleep, wait); | 315 | sock_poll_wait(file, sk_sleep(sk), wait); |
316 | if (sk->sk_state == DCCP_LISTEN) | 316 | if (sk->sk_state == DCCP_LISTEN) |
317 | return inet_csk_listen_poll(sk); | 317 | return inet_csk_listen_poll(sk); |
318 | 318 | ||
diff --git a/net/dccp/timer.c b/net/dccp/timer.c index bbfeb5eae46a..1a9aa05d4dc4 100644 --- a/net/dccp/timer.c +++ b/net/dccp/timer.c | |||
@@ -38,7 +38,7 @@ static int dccp_write_timeout(struct sock *sk) | |||
38 | 38 | ||
39 | if (sk->sk_state == DCCP_REQUESTING || sk->sk_state == DCCP_PARTOPEN) { | 39 | if (sk->sk_state == DCCP_REQUESTING || sk->sk_state == DCCP_PARTOPEN) { |
40 | if (icsk->icsk_retransmits != 0) | 40 | if (icsk->icsk_retransmits != 0) |
41 | dst_negative_advice(&sk->sk_dst_cache, sk); | 41 | dst_negative_advice(sk); |
42 | retry_until = icsk->icsk_syn_retries ? | 42 | retry_until = icsk->icsk_syn_retries ? |
43 | : sysctl_dccp_request_retries; | 43 | : sysctl_dccp_request_retries; |
44 | } else { | 44 | } else { |
@@ -63,7 +63,7 @@ static int dccp_write_timeout(struct sock *sk) | |||
63 | Golden words :-). | 63 | Golden words :-). |
64 | */ | 64 | */ |
65 | 65 | ||
66 | dst_negative_advice(&sk->sk_dst_cache, sk); | 66 | dst_negative_advice(sk); |
67 | } | 67 | } |
68 | 68 | ||
69 | retry_until = sysctl_dccp_retries2; | 69 | retry_until = sysctl_dccp_retries2; |
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index 2b494fac9468..d6b93d19790f 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c | |||
@@ -446,7 +446,7 @@ static void dn_destruct(struct sock *sk) | |||
446 | skb_queue_purge(&scp->other_xmit_queue); | 446 | skb_queue_purge(&scp->other_xmit_queue); |
447 | skb_queue_purge(&scp->other_receive_queue); | 447 | skb_queue_purge(&scp->other_receive_queue); |
448 | 448 | ||
449 | dst_release(xchg(&sk->sk_dst_cache, NULL)); | 449 | dst_release(rcu_dereference_check(sk->sk_dst_cache, 1)); |
450 | } | 450 | } |
451 | 451 | ||
452 | static int dn_memory_pressure; | 452 | static int dn_memory_pressure; |
@@ -832,7 +832,7 @@ static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation) | |||
832 | scp->segsize_loc = dst_metric(__sk_dst_get(sk), RTAX_ADVMSS); | 832 | scp->segsize_loc = dst_metric(__sk_dst_get(sk), RTAX_ADVMSS); |
833 | dn_send_conn_conf(sk, allocation); | 833 | dn_send_conn_conf(sk, allocation); |
834 | 834 | ||
835 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 835 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
836 | for(;;) { | 836 | for(;;) { |
837 | release_sock(sk); | 837 | release_sock(sk); |
838 | if (scp->state == DN_CC) | 838 | if (scp->state == DN_CC) |
@@ -850,9 +850,9 @@ static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation) | |||
850 | err = -EAGAIN; | 850 | err = -EAGAIN; |
851 | if (!*timeo) | 851 | if (!*timeo) |
852 | break; | 852 | break; |
853 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 853 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
854 | } | 854 | } |
855 | finish_wait(sk->sk_sleep, &wait); | 855 | finish_wait(sk_sleep(sk), &wait); |
856 | if (err == 0) { | 856 | if (err == 0) { |
857 | sk->sk_socket->state = SS_CONNECTED; | 857 | sk->sk_socket->state = SS_CONNECTED; |
858 | } else if (scp->state != DN_CC) { | 858 | } else if (scp->state != DN_CC) { |
@@ -873,7 +873,7 @@ static int dn_wait_run(struct sock *sk, long *timeo) | |||
873 | if (!*timeo) | 873 | if (!*timeo) |
874 | return -EALREADY; | 874 | return -EALREADY; |
875 | 875 | ||
876 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 876 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
877 | for(;;) { | 877 | for(;;) { |
878 | release_sock(sk); | 878 | release_sock(sk); |
879 | if (scp->state == DN_CI || scp->state == DN_CC) | 879 | if (scp->state == DN_CI || scp->state == DN_CC) |
@@ -891,9 +891,9 @@ static int dn_wait_run(struct sock *sk, long *timeo) | |||
891 | err = -ETIMEDOUT; | 891 | err = -ETIMEDOUT; |
892 | if (!*timeo) | 892 | if (!*timeo) |
893 | break; | 893 | break; |
894 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 894 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
895 | } | 895 | } |
896 | finish_wait(sk->sk_sleep, &wait); | 896 | finish_wait(sk_sleep(sk), &wait); |
897 | out: | 897 | out: |
898 | if (err == 0) { | 898 | if (err == 0) { |
899 | sk->sk_socket->state = SS_CONNECTED; | 899 | sk->sk_socket->state = SS_CONNECTED; |
@@ -1040,7 +1040,7 @@ static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo) | |||
1040 | struct sk_buff *skb = NULL; | 1040 | struct sk_buff *skb = NULL; |
1041 | int err = 0; | 1041 | int err = 0; |
1042 | 1042 | ||
1043 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 1043 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
1044 | for(;;) { | 1044 | for(;;) { |
1045 | release_sock(sk); | 1045 | release_sock(sk); |
1046 | skb = skb_dequeue(&sk->sk_receive_queue); | 1046 | skb = skb_dequeue(&sk->sk_receive_queue); |
@@ -1060,9 +1060,9 @@ static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo) | |||
1060 | err = -EAGAIN; | 1060 | err = -EAGAIN; |
1061 | if (!*timeo) | 1061 | if (!*timeo) |
1062 | break; | 1062 | break; |
1063 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 1063 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
1064 | } | 1064 | } |
1065 | finish_wait(sk->sk_sleep, &wait); | 1065 | finish_wait(sk_sleep(sk), &wait); |
1066 | 1066 | ||
1067 | return skb == NULL ? ERR_PTR(err) : skb; | 1067 | return skb == NULL ? ERR_PTR(err) : skb; |
1068 | } | 1068 | } |
@@ -1105,7 +1105,7 @@ static int dn_accept(struct socket *sock, struct socket *newsock, int flags) | |||
1105 | release_sock(sk); | 1105 | release_sock(sk); |
1106 | 1106 | ||
1107 | dst = skb_dst(skb); | 1107 | dst = skb_dst(skb); |
1108 | dst_release(xchg(&newsk->sk_dst_cache, dst)); | 1108 | sk_dst_set(newsk, dst); |
1109 | skb_dst_set(skb, NULL); | 1109 | skb_dst_set(skb, NULL); |
1110 | 1110 | ||
1111 | DN_SK(newsk)->state = DN_CR; | 1111 | DN_SK(newsk)->state = DN_CR; |
@@ -1746,11 +1746,11 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1746 | goto out; | 1746 | goto out; |
1747 | } | 1747 | } |
1748 | 1748 | ||
1749 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 1749 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
1750 | set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 1750 | set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); |
1751 | sk_wait_event(sk, &timeo, dn_data_ready(sk, queue, flags, target)); | 1751 | sk_wait_event(sk, &timeo, dn_data_ready(sk, queue, flags, target)); |
1752 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 1752 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); |
1753 | finish_wait(sk->sk_sleep, &wait); | 1753 | finish_wait(sk_sleep(sk), &wait); |
1754 | } | 1754 | } |
1755 | 1755 | ||
1756 | skb_queue_walk_safe(queue, skb, n) { | 1756 | skb_queue_walk_safe(queue, skb, n) { |
@@ -1956,7 +1956,7 @@ static int dn_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
1956 | } | 1956 | } |
1957 | 1957 | ||
1958 | if ((flags & MSG_TRYHARD) && sk->sk_dst_cache) | 1958 | if ((flags & MSG_TRYHARD) && sk->sk_dst_cache) |
1959 | dst_negative_advice(&sk->sk_dst_cache, sk); | 1959 | dst_negative_advice(sk); |
1960 | 1960 | ||
1961 | mss = scp->segsize_rem; | 1961 | mss = scp->segsize_rem; |
1962 | fctype = scp->services_rem & NSP_FC_MASK; | 1962 | fctype = scp->services_rem & NSP_FC_MASK; |
@@ -2003,12 +2003,12 @@ static int dn_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
2003 | goto out; | 2003 | goto out; |
2004 | } | 2004 | } |
2005 | 2005 | ||
2006 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 2006 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
2007 | set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 2007 | set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); |
2008 | sk_wait_event(sk, &timeo, | 2008 | sk_wait_event(sk, &timeo, |
2009 | !dn_queue_too_long(scp, queue, flags)); | 2009 | !dn_queue_too_long(scp, queue, flags)); |
2010 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 2010 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); |
2011 | finish_wait(sk->sk_sleep, &wait); | 2011 | finish_wait(sk_sleep(sk), &wait); |
2012 | continue; | 2012 | continue; |
2013 | } | 2013 | } |
2014 | 2014 | ||
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c index cead68eb254c..615dbe3b43f9 100644 --- a/net/decnet/dn_dev.c +++ b/net/decnet/dn_dev.c | |||
@@ -350,7 +350,7 @@ static void dn_dev_del_ifa(struct dn_dev *dn_db, struct dn_ifaddr **ifap, int de | |||
350 | if (dn_db->dev->type == ARPHRD_ETHER) { | 350 | if (dn_db->dev->type == ARPHRD_ETHER) { |
351 | if (ifa1->ifa_local != dn_eth2dn(dev->dev_addr)) { | 351 | if (ifa1->ifa_local != dn_eth2dn(dev->dev_addr)) { |
352 | dn_dn2eth(mac_addr, ifa1->ifa_local); | 352 | dn_dn2eth(mac_addr, ifa1->ifa_local); |
353 | dev_mc_delete(dev, mac_addr, ETH_ALEN, 0); | 353 | dev_mc_del(dev, mac_addr); |
354 | } | 354 | } |
355 | } | 355 | } |
356 | 356 | ||
@@ -381,7 +381,7 @@ static int dn_dev_insert_ifa(struct dn_dev *dn_db, struct dn_ifaddr *ifa) | |||
381 | if (dev->type == ARPHRD_ETHER) { | 381 | if (dev->type == ARPHRD_ETHER) { |
382 | if (ifa->ifa_local != dn_eth2dn(dev->dev_addr)) { | 382 | if (ifa->ifa_local != dn_eth2dn(dev->dev_addr)) { |
383 | dn_dn2eth(mac_addr, ifa->ifa_local); | 383 | dn_dn2eth(mac_addr, ifa->ifa_local); |
384 | dev_mc_add(dev, mac_addr, ETH_ALEN, 0); | 384 | dev_mc_add(dev, mac_addr); |
385 | } | 385 | } |
386 | } | 386 | } |
387 | 387 | ||
@@ -1001,9 +1001,9 @@ static int dn_eth_up(struct net_device *dev) | |||
1001 | struct dn_dev *dn_db = dev->dn_ptr; | 1001 | struct dn_dev *dn_db = dev->dn_ptr; |
1002 | 1002 | ||
1003 | if (dn_db->parms.forwarding == 0) | 1003 | if (dn_db->parms.forwarding == 0) |
1004 | dev_mc_add(dev, dn_rt_all_end_mcast, ETH_ALEN, 0); | 1004 | dev_mc_add(dev, dn_rt_all_end_mcast); |
1005 | else | 1005 | else |
1006 | dev_mc_add(dev, dn_rt_all_rt_mcast, ETH_ALEN, 0); | 1006 | dev_mc_add(dev, dn_rt_all_rt_mcast); |
1007 | 1007 | ||
1008 | dn_db->use_long = 1; | 1008 | dn_db->use_long = 1; |
1009 | 1009 | ||
@@ -1015,9 +1015,9 @@ static void dn_eth_down(struct net_device *dev) | |||
1015 | struct dn_dev *dn_db = dev->dn_ptr; | 1015 | struct dn_dev *dn_db = dev->dn_ptr; |
1016 | 1016 | ||
1017 | if (dn_db->parms.forwarding == 0) | 1017 | if (dn_db->parms.forwarding == 0) |
1018 | dev_mc_delete(dev, dn_rt_all_end_mcast, ETH_ALEN, 0); | 1018 | dev_mc_del(dev, dn_rt_all_end_mcast); |
1019 | else | 1019 | else |
1020 | dev_mc_delete(dev, dn_rt_all_rt_mcast, ETH_ALEN, 0); | 1020 | dev_mc_del(dev, dn_rt_all_rt_mcast); |
1021 | } | 1021 | } |
1022 | 1022 | ||
1023 | static void dn_dev_set_timer(struct net_device *dev); | 1023 | static void dn_dev_set_timer(struct net_device *dev); |
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c index 7466c546f286..af28dcc21844 100644 --- a/net/decnet/dn_rules.c +++ b/net/decnet/dn_rules.c | |||
@@ -196,7 +196,6 @@ static int dn_fib_rule_fill(struct fib_rule *rule, struct sk_buff *skb, | |||
196 | { | 196 | { |
197 | struct dn_fib_rule *r = (struct dn_fib_rule *)rule; | 197 | struct dn_fib_rule *r = (struct dn_fib_rule *)rule; |
198 | 198 | ||
199 | frh->family = AF_DECnet; | ||
200 | frh->dst_len = r->dst_len; | 199 | frh->dst_len = r->dst_len; |
201 | frh->src_len = r->src_len; | 200 | frh->src_len = r->src_len; |
202 | frh->tos = 0; | 201 | frh->tos = 0; |
@@ -212,30 +211,13 @@ nla_put_failure: | |||
212 | return -ENOBUFS; | 211 | return -ENOBUFS; |
213 | } | 212 | } |
214 | 213 | ||
215 | static u32 dn_fib_rule_default_pref(struct fib_rules_ops *ops) | ||
216 | { | ||
217 | struct list_head *pos; | ||
218 | struct fib_rule *rule; | ||
219 | |||
220 | if (!list_empty(&dn_fib_rules_ops->rules_list)) { | ||
221 | pos = dn_fib_rules_ops->rules_list.next; | ||
222 | if (pos->next != &dn_fib_rules_ops->rules_list) { | ||
223 | rule = list_entry(pos->next, struct fib_rule, list); | ||
224 | if (rule->pref) | ||
225 | return rule->pref - 1; | ||
226 | } | ||
227 | } | ||
228 | |||
229 | return 0; | ||
230 | } | ||
231 | |||
232 | static void dn_fib_rule_flush_cache(struct fib_rules_ops *ops) | 214 | static void dn_fib_rule_flush_cache(struct fib_rules_ops *ops) |
233 | { | 215 | { |
234 | dn_rt_cache_flush(-1); | 216 | dn_rt_cache_flush(-1); |
235 | } | 217 | } |
236 | 218 | ||
237 | static struct fib_rules_ops dn_fib_rules_ops_template = { | 219 | static struct fib_rules_ops dn_fib_rules_ops_template = { |
238 | .family = AF_DECnet, | 220 | .family = FIB_RULES_DECNET, |
239 | .rule_size = sizeof(struct dn_fib_rule), | 221 | .rule_size = sizeof(struct dn_fib_rule), |
240 | .addr_size = sizeof(u16), | 222 | .addr_size = sizeof(u16), |
241 | .action = dn_fib_rule_action, | 223 | .action = dn_fib_rule_action, |
@@ -243,7 +225,7 @@ static struct fib_rules_ops dn_fib_rules_ops_template = { | |||
243 | .configure = dn_fib_rule_configure, | 225 | .configure = dn_fib_rule_configure, |
244 | .compare = dn_fib_rule_compare, | 226 | .compare = dn_fib_rule_compare, |
245 | .fill = dn_fib_rule_fill, | 227 | .fill = dn_fib_rule_fill, |
246 | .default_pref = dn_fib_rule_default_pref, | 228 | .default_pref = fib_default_rule_pref, |
247 | .flush_cache = dn_fib_rule_flush_cache, | 229 | .flush_cache = dn_fib_rule_flush_cache, |
248 | .nlgroup = RTNLGRP_DECnet_RULE, | 230 | .nlgroup = RTNLGRP_DECnet_RULE, |
249 | .policy = dn_fib_rule_policy, | 231 | .policy = dn_fib_rule_policy, |
diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 2175e6d5cc8d..8fdca56bb08f 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c | |||
@@ -67,7 +67,7 @@ static int dsa_slave_open(struct net_device *dev) | |||
67 | return -ENETDOWN; | 67 | return -ENETDOWN; |
68 | 68 | ||
69 | if (compare_ether_addr(dev->dev_addr, master->dev_addr)) { | 69 | if (compare_ether_addr(dev->dev_addr, master->dev_addr)) { |
70 | err = dev_unicast_add(master, dev->dev_addr); | 70 | err = dev_uc_add(master, dev->dev_addr); |
71 | if (err < 0) | 71 | if (err < 0) |
72 | goto out; | 72 | goto out; |
73 | } | 73 | } |
@@ -90,7 +90,7 @@ clear_allmulti: | |||
90 | dev_set_allmulti(master, -1); | 90 | dev_set_allmulti(master, -1); |
91 | del_unicast: | 91 | del_unicast: |
92 | if (compare_ether_addr(dev->dev_addr, master->dev_addr)) | 92 | if (compare_ether_addr(dev->dev_addr, master->dev_addr)) |
93 | dev_unicast_delete(master, dev->dev_addr); | 93 | dev_uc_del(master, dev->dev_addr); |
94 | out: | 94 | out: |
95 | return err; | 95 | return err; |
96 | } | 96 | } |
@@ -101,14 +101,14 @@ static int dsa_slave_close(struct net_device *dev) | |||
101 | struct net_device *master = p->parent->dst->master_netdev; | 101 | struct net_device *master = p->parent->dst->master_netdev; |
102 | 102 | ||
103 | dev_mc_unsync(master, dev); | 103 | dev_mc_unsync(master, dev); |
104 | dev_unicast_unsync(master, dev); | 104 | dev_uc_unsync(master, dev); |
105 | if (dev->flags & IFF_ALLMULTI) | 105 | if (dev->flags & IFF_ALLMULTI) |
106 | dev_set_allmulti(master, -1); | 106 | dev_set_allmulti(master, -1); |
107 | if (dev->flags & IFF_PROMISC) | 107 | if (dev->flags & IFF_PROMISC) |
108 | dev_set_promiscuity(master, -1); | 108 | dev_set_promiscuity(master, -1); |
109 | 109 | ||
110 | if (compare_ether_addr(dev->dev_addr, master->dev_addr)) | 110 | if (compare_ether_addr(dev->dev_addr, master->dev_addr)) |
111 | dev_unicast_delete(master, dev->dev_addr); | 111 | dev_uc_del(master, dev->dev_addr); |
112 | 112 | ||
113 | return 0; | 113 | return 0; |
114 | } | 114 | } |
@@ -130,7 +130,7 @@ static void dsa_slave_set_rx_mode(struct net_device *dev) | |||
130 | struct net_device *master = p->parent->dst->master_netdev; | 130 | struct net_device *master = p->parent->dst->master_netdev; |
131 | 131 | ||
132 | dev_mc_sync(master, dev); | 132 | dev_mc_sync(master, dev); |
133 | dev_unicast_sync(master, dev); | 133 | dev_uc_sync(master, dev); |
134 | } | 134 | } |
135 | 135 | ||
136 | static int dsa_slave_set_mac_address(struct net_device *dev, void *a) | 136 | static int dsa_slave_set_mac_address(struct net_device *dev, void *a) |
@@ -147,13 +147,13 @@ static int dsa_slave_set_mac_address(struct net_device *dev, void *a) | |||
147 | goto out; | 147 | goto out; |
148 | 148 | ||
149 | if (compare_ether_addr(addr->sa_data, master->dev_addr)) { | 149 | if (compare_ether_addr(addr->sa_data, master->dev_addr)) { |
150 | err = dev_unicast_add(master, addr->sa_data); | 150 | err = dev_uc_add(master, addr->sa_data); |
151 | if (err < 0) | 151 | if (err < 0) |
152 | return err; | 152 | return err; |
153 | } | 153 | } |
154 | 154 | ||
155 | if (compare_ether_addr(dev->dev_addr, master->dev_addr)) | 155 | if (compare_ether_addr(dev->dev_addr, master->dev_addr)) |
156 | dev_unicast_delete(master, dev->dev_addr); | 156 | dev_uc_del(master, dev->dev_addr); |
157 | 157 | ||
158 | out: | 158 | out: |
159 | memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); | 159 | memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); |
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index 205a1c12f3c0..0c0d272a9888 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c | |||
@@ -136,7 +136,7 @@ int eth_rebuild_header(struct sk_buff *skb) | |||
136 | default: | 136 | default: |
137 | printk(KERN_DEBUG | 137 | printk(KERN_DEBUG |
138 | "%s: unable to resolve type %X addresses.\n", | 138 | "%s: unable to resolve type %X addresses.\n", |
139 | dev->name, (int)eth->h_proto); | 139 | dev->name, ntohs(eth->h_proto)); |
140 | 140 | ||
141 | memcpy(eth->h_source, dev->dev_addr, ETH_ALEN); | 141 | memcpy(eth->h_source, dev->dev_addr, ETH_ALEN); |
142 | break; | 142 | break; |
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index 0c94a1ac2946..8e3a1fd938ab 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig | |||
@@ -250,6 +250,20 @@ config IP_MROUTE | |||
250 | <file:Documentation/networking/multicast.txt>. If you haven't heard | 250 | <file:Documentation/networking/multicast.txt>. If you haven't heard |
251 | about it, you don't need it. | 251 | about it, you don't need it. |
252 | 252 | ||
253 | config IP_MROUTE_MULTIPLE_TABLES | ||
254 | bool "IP: multicast policy routing" | ||
255 | depends on IP_MROUTE && IP_ADVANCED_ROUTER | ||
256 | select FIB_RULES | ||
257 | help | ||
258 | Normally, a multicast router runs a userspace daemon and decides | ||
259 | what to do with a multicast packet based on the source and | ||
260 | destination addresses. If you say Y here, the multicast router | ||
261 | will also be able to take interfaces and packet marks into | ||
262 | account and run multiple instances of userspace daemons | ||
263 | simultaneously, each one handling a single table. | ||
264 | |||
265 | If unsure, say N. | ||
266 | |||
253 | config IP_PIMSM_V1 | 267 | config IP_PIMSM_V1 |
254 | bool "IP: PIM-SM version 1 support" | 268 | bool "IP: PIM-SM version 1 support" |
255 | depends on IP_MROUTE | 269 | depends on IP_MROUTE |
@@ -587,9 +601,15 @@ choice | |||
587 | config DEFAULT_HTCP | 601 | config DEFAULT_HTCP |
588 | bool "Htcp" if TCP_CONG_HTCP=y | 602 | bool "Htcp" if TCP_CONG_HTCP=y |
589 | 603 | ||
604 | config DEFAULT_HYBLA | ||
605 | bool "Hybla" if TCP_CONG_HYBLA=y | ||
606 | |||
590 | config DEFAULT_VEGAS | 607 | config DEFAULT_VEGAS |
591 | bool "Vegas" if TCP_CONG_VEGAS=y | 608 | bool "Vegas" if TCP_CONG_VEGAS=y |
592 | 609 | ||
610 | config DEFAULT_VENO | ||
611 | bool "Veno" if TCP_CONG_VENO=y | ||
612 | |||
593 | config DEFAULT_WESTWOOD | 613 | config DEFAULT_WESTWOOD |
594 | bool "Westwood" if TCP_CONG_WESTWOOD=y | 614 | bool "Westwood" if TCP_CONG_WESTWOOD=y |
595 | 615 | ||
@@ -610,8 +630,10 @@ config DEFAULT_TCP_CONG | |||
610 | default "bic" if DEFAULT_BIC | 630 | default "bic" if DEFAULT_BIC |
611 | default "cubic" if DEFAULT_CUBIC | 631 | default "cubic" if DEFAULT_CUBIC |
612 | default "htcp" if DEFAULT_HTCP | 632 | default "htcp" if DEFAULT_HTCP |
633 | default "hybla" if DEFAULT_HYBLA | ||
613 | default "vegas" if DEFAULT_VEGAS | 634 | default "vegas" if DEFAULT_VEGAS |
614 | default "westwood" if DEFAULT_WESTWOOD | 635 | default "westwood" if DEFAULT_WESTWOOD |
636 | default "veno" if DEFAULT_VENO | ||
615 | default "reno" if DEFAULT_RENO | 637 | default "reno" if DEFAULT_RENO |
616 | default "cubic" | 638 | default "cubic" |
617 | 639 | ||
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index f71357422380..9f52880fae10 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
@@ -154,7 +154,7 @@ void inet_sock_destruct(struct sock *sk) | |||
154 | WARN_ON(sk->sk_forward_alloc); | 154 | WARN_ON(sk->sk_forward_alloc); |
155 | 155 | ||
156 | kfree(inet->opt); | 156 | kfree(inet->opt); |
157 | dst_release(sk->sk_dst_cache); | 157 | dst_release(rcu_dereference_check(sk->sk_dst_cache, 1)); |
158 | sk_refcnt_debug_dec(sk); | 158 | sk_refcnt_debug_dec(sk); |
159 | } | 159 | } |
160 | EXPORT_SYMBOL(inet_sock_destruct); | 160 | EXPORT_SYMBOL(inet_sock_destruct); |
@@ -419,6 +419,8 @@ int inet_release(struct socket *sock) | |||
419 | if (sk) { | 419 | if (sk) { |
420 | long timeout; | 420 | long timeout; |
421 | 421 | ||
422 | inet_rps_reset_flow(sk); | ||
423 | |||
422 | /* Applications forget to leave groups before exiting */ | 424 | /* Applications forget to leave groups before exiting */ |
423 | ip_mc_drop_socket(sk); | 425 | ip_mc_drop_socket(sk); |
424 | 426 | ||
@@ -546,7 +548,7 @@ static long inet_wait_for_connect(struct sock *sk, long timeo) | |||
546 | { | 548 | { |
547 | DEFINE_WAIT(wait); | 549 | DEFINE_WAIT(wait); |
548 | 550 | ||
549 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 551 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
550 | 552 | ||
551 | /* Basic assumption: if someone sets sk->sk_err, he _must_ | 553 | /* Basic assumption: if someone sets sk->sk_err, he _must_ |
552 | * change state of the socket from TCP_SYN_*. | 554 | * change state of the socket from TCP_SYN_*. |
@@ -559,9 +561,9 @@ static long inet_wait_for_connect(struct sock *sk, long timeo) | |||
559 | lock_sock(sk); | 561 | lock_sock(sk); |
560 | if (signal_pending(current) || !timeo) | 562 | if (signal_pending(current) || !timeo) |
561 | break; | 563 | break; |
562 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 564 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
563 | } | 565 | } |
564 | finish_wait(sk->sk_sleep, &wait); | 566 | finish_wait(sk_sleep(sk), &wait); |
565 | return timeo; | 567 | return timeo; |
566 | } | 568 | } |
567 | 569 | ||
@@ -720,6 +722,8 @@ int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, | |||
720 | { | 722 | { |
721 | struct sock *sk = sock->sk; | 723 | struct sock *sk = sock->sk; |
722 | 724 | ||
725 | inet_rps_record_flow(sk); | ||
726 | |||
723 | /* We may need to bind the socket. */ | 727 | /* We may need to bind the socket. */ |
724 | if (!inet_sk(sk)->inet_num && inet_autobind(sk)) | 728 | if (!inet_sk(sk)->inet_num && inet_autobind(sk)) |
725 | return -EAGAIN; | 729 | return -EAGAIN; |
@@ -728,12 +732,13 @@ int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, | |||
728 | } | 732 | } |
729 | EXPORT_SYMBOL(inet_sendmsg); | 733 | EXPORT_SYMBOL(inet_sendmsg); |
730 | 734 | ||
731 | |||
732 | static ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset, | 735 | static ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset, |
733 | size_t size, int flags) | 736 | size_t size, int flags) |
734 | { | 737 | { |
735 | struct sock *sk = sock->sk; | 738 | struct sock *sk = sock->sk; |
736 | 739 | ||
740 | inet_rps_record_flow(sk); | ||
741 | |||
737 | /* We may need to bind the socket. */ | 742 | /* We may need to bind the socket. */ |
738 | if (!inet_sk(sk)->inet_num && inet_autobind(sk)) | 743 | if (!inet_sk(sk)->inet_num && inet_autobind(sk)) |
739 | return -EAGAIN; | 744 | return -EAGAIN; |
@@ -743,6 +748,22 @@ static ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset, | |||
743 | return sock_no_sendpage(sock, page, offset, size, flags); | 748 | return sock_no_sendpage(sock, page, offset, size, flags); |
744 | } | 749 | } |
745 | 750 | ||
751 | int inet_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, | ||
752 | size_t size, int flags) | ||
753 | { | ||
754 | struct sock *sk = sock->sk; | ||
755 | int addr_len = 0; | ||
756 | int err; | ||
757 | |||
758 | inet_rps_record_flow(sk); | ||
759 | |||
760 | err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT, | ||
761 | flags & ~MSG_DONTWAIT, &addr_len); | ||
762 | if (err >= 0) | ||
763 | msg->msg_namelen = addr_len; | ||
764 | return err; | ||
765 | } | ||
766 | EXPORT_SYMBOL(inet_recvmsg); | ||
746 | 767 | ||
747 | int inet_shutdown(struct socket *sock, int how) | 768 | int inet_shutdown(struct socket *sock, int how) |
748 | { | 769 | { |
@@ -872,7 +893,7 @@ const struct proto_ops inet_stream_ops = { | |||
872 | .setsockopt = sock_common_setsockopt, | 893 | .setsockopt = sock_common_setsockopt, |
873 | .getsockopt = sock_common_getsockopt, | 894 | .getsockopt = sock_common_getsockopt, |
874 | .sendmsg = tcp_sendmsg, | 895 | .sendmsg = tcp_sendmsg, |
875 | .recvmsg = sock_common_recvmsg, | 896 | .recvmsg = inet_recvmsg, |
876 | .mmap = sock_no_mmap, | 897 | .mmap = sock_no_mmap, |
877 | .sendpage = tcp_sendpage, | 898 | .sendpage = tcp_sendpage, |
878 | .splice_read = tcp_splice_read, | 899 | .splice_read = tcp_splice_read, |
@@ -899,7 +920,7 @@ const struct proto_ops inet_dgram_ops = { | |||
899 | .setsockopt = sock_common_setsockopt, | 920 | .setsockopt = sock_common_setsockopt, |
900 | .getsockopt = sock_common_getsockopt, | 921 | .getsockopt = sock_common_getsockopt, |
901 | .sendmsg = inet_sendmsg, | 922 | .sendmsg = inet_sendmsg, |
902 | .recvmsg = sock_common_recvmsg, | 923 | .recvmsg = inet_recvmsg, |
903 | .mmap = sock_no_mmap, | 924 | .mmap = sock_no_mmap, |
904 | .sendpage = inet_sendpage, | 925 | .sendpage = inet_sendpage, |
905 | #ifdef CONFIG_COMPAT | 926 | #ifdef CONFIG_COMPAT |
@@ -929,7 +950,7 @@ static const struct proto_ops inet_sockraw_ops = { | |||
929 | .setsockopt = sock_common_setsockopt, | 950 | .setsockopt = sock_common_setsockopt, |
930 | .getsockopt = sock_common_getsockopt, | 951 | .getsockopt = sock_common_getsockopt, |
931 | .sendmsg = inet_sendmsg, | 952 | .sendmsg = inet_sendmsg, |
932 | .recvmsg = sock_common_recvmsg, | 953 | .recvmsg = inet_recvmsg, |
933 | .mmap = sock_no_mmap, | 954 | .mmap = sock_no_mmap, |
934 | .sendpage = inet_sendpage, | 955 | .sendpage = inet_sendpage, |
935 | #ifdef CONFIG_COMPAT | 956 | #ifdef CONFIG_COMPAT |
@@ -1302,8 +1323,8 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head, | |||
1302 | if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) | 1323 | if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) |
1303 | goto out_unlock; | 1324 | goto out_unlock; |
1304 | 1325 | ||
1305 | id = ntohl(*(u32 *)&iph->id); | 1326 | id = ntohl(*(__be32 *)&iph->id); |
1306 | flush = (u16)((ntohl(*(u32 *)iph) ^ skb_gro_len(skb)) | (id ^ IP_DF)); | 1327 | flush = (u16)((ntohl(*(__be32 *)iph) ^ skb_gro_len(skb)) | (id ^ IP_DF)); |
1307 | id >>= 16; | 1328 | id >>= 16; |
1308 | 1329 | ||
1309 | for (p = *head; p; p = p->next) { | 1330 | for (p = *head; p; p = p->next) { |
@@ -1316,8 +1337,8 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head, | |||
1316 | 1337 | ||
1317 | if ((iph->protocol ^ iph2->protocol) | | 1338 | if ((iph->protocol ^ iph2->protocol) | |
1318 | (iph->tos ^ iph2->tos) | | 1339 | (iph->tos ^ iph2->tos) | |
1319 | (iph->saddr ^ iph2->saddr) | | 1340 | ((__force u32)iph->saddr ^ (__force u32)iph2->saddr) | |
1320 | (iph->daddr ^ iph2->daddr)) { | 1341 | ((__force u32)iph->daddr ^ (__force u32)iph2->daddr)) { |
1321 | NAPI_GRO_CB(p)->same_flow = 0; | 1342 | NAPI_GRO_CB(p)->same_flow = 0; |
1322 | continue; | 1343 | continue; |
1323 | } | 1344 | } |
@@ -1407,10 +1428,10 @@ EXPORT_SYMBOL_GPL(snmp_fold_field); | |||
1407 | int snmp_mib_init(void __percpu *ptr[2], size_t mibsize) | 1428 | int snmp_mib_init(void __percpu *ptr[2], size_t mibsize) |
1408 | { | 1429 | { |
1409 | BUG_ON(ptr == NULL); | 1430 | BUG_ON(ptr == NULL); |
1410 | ptr[0] = __alloc_percpu(mibsize, __alignof__(unsigned long long)); | 1431 | ptr[0] = __alloc_percpu(mibsize, __alignof__(unsigned long)); |
1411 | if (!ptr[0]) | 1432 | if (!ptr[0]) |
1412 | goto err0; | 1433 | goto err0; |
1413 | ptr[1] = __alloc_percpu(mibsize, __alignof__(unsigned long long)); | 1434 | ptr[1] = __alloc_percpu(mibsize, __alignof__(unsigned long)); |
1414 | if (!ptr[1]) | 1435 | if (!ptr[1]) |
1415 | goto err1; | 1436 | goto err1; |
1416 | return 0; | 1437 | return 0; |
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 90e3d6379a42..382bc768ed56 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
@@ -1096,10 +1096,10 @@ static int inetdev_event(struct notifier_block *this, unsigned long event, | |||
1096 | case NETDEV_DOWN: | 1096 | case NETDEV_DOWN: |
1097 | ip_mc_down(in_dev); | 1097 | ip_mc_down(in_dev); |
1098 | break; | 1098 | break; |
1099 | case NETDEV_BONDING_OLDTYPE: | 1099 | case NETDEV_PRE_TYPE_CHANGE: |
1100 | ip_mc_unmap(in_dev); | 1100 | ip_mc_unmap(in_dev); |
1101 | break; | 1101 | break; |
1102 | case NETDEV_BONDING_NEWTYPE: | 1102 | case NETDEV_POST_TYPE_CHANGE: |
1103 | ip_mc_remap(in_dev); | 1103 | ip_mc_remap(in_dev); |
1104 | break; | 1104 | break; |
1105 | case NETDEV_CHANGEMTU: | 1105 | case NETDEV_CHANGEMTU: |
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index ca2d07b1c706..3ec84fea5b71 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c | |||
@@ -213,7 +213,6 @@ static int fib4_rule_fill(struct fib_rule *rule, struct sk_buff *skb, | |||
213 | { | 213 | { |
214 | struct fib4_rule *rule4 = (struct fib4_rule *) rule; | 214 | struct fib4_rule *rule4 = (struct fib4_rule *) rule; |
215 | 215 | ||
216 | frh->family = AF_INET; | ||
217 | frh->dst_len = rule4->dst_len; | 216 | frh->dst_len = rule4->dst_len; |
218 | frh->src_len = rule4->src_len; | 217 | frh->src_len = rule4->src_len; |
219 | frh->tos = rule4->tos; | 218 | frh->tos = rule4->tos; |
@@ -234,23 +233,6 @@ nla_put_failure: | |||
234 | return -ENOBUFS; | 233 | return -ENOBUFS; |
235 | } | 234 | } |
236 | 235 | ||
237 | static u32 fib4_rule_default_pref(struct fib_rules_ops *ops) | ||
238 | { | ||
239 | struct list_head *pos; | ||
240 | struct fib_rule *rule; | ||
241 | |||
242 | if (!list_empty(&ops->rules_list)) { | ||
243 | pos = ops->rules_list.next; | ||
244 | if (pos->next != &ops->rules_list) { | ||
245 | rule = list_entry(pos->next, struct fib_rule, list); | ||
246 | if (rule->pref) | ||
247 | return rule->pref - 1; | ||
248 | } | ||
249 | } | ||
250 | |||
251 | return 0; | ||
252 | } | ||
253 | |||
254 | static size_t fib4_rule_nlmsg_payload(struct fib_rule *rule) | 236 | static size_t fib4_rule_nlmsg_payload(struct fib_rule *rule) |
255 | { | 237 | { |
256 | return nla_total_size(4) /* dst */ | 238 | return nla_total_size(4) /* dst */ |
@@ -264,7 +246,7 @@ static void fib4_rule_flush_cache(struct fib_rules_ops *ops) | |||
264 | } | 246 | } |
265 | 247 | ||
266 | static struct fib_rules_ops fib4_rules_ops_template = { | 248 | static struct fib_rules_ops fib4_rules_ops_template = { |
267 | .family = AF_INET, | 249 | .family = FIB_RULES_IPV4, |
268 | .rule_size = sizeof(struct fib4_rule), | 250 | .rule_size = sizeof(struct fib4_rule), |
269 | .addr_size = sizeof(u32), | 251 | .addr_size = sizeof(u32), |
270 | .action = fib4_rule_action, | 252 | .action = fib4_rule_action, |
@@ -272,7 +254,7 @@ static struct fib_rules_ops fib4_rules_ops_template = { | |||
272 | .configure = fib4_rule_configure, | 254 | .configure = fib4_rule_configure, |
273 | .compare = fib4_rule_compare, | 255 | .compare = fib4_rule_compare, |
274 | .fill = fib4_rule_fill, | 256 | .fill = fib4_rule_fill, |
275 | .default_pref = fib4_rule_default_pref, | 257 | .default_pref = fib_default_rule_pref, |
276 | .nlmsg_payload = fib4_rule_nlmsg_payload, | 258 | .nlmsg_payload = fib4_rule_nlmsg_payload, |
277 | .flush_cache = fib4_rule_flush_cache, | 259 | .flush_cache = fib4_rule_flush_cache, |
278 | .nlgroup = RTNLGRP_IPV4_RULE, | 260 | .nlgroup = RTNLGRP_IPV4_RULE, |
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index ac4dec132735..f3d339f728b0 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c | |||
@@ -331,9 +331,10 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param, | |||
331 | if (ip_append_data(sk, icmp_glue_bits, icmp_param, | 331 | if (ip_append_data(sk, icmp_glue_bits, icmp_param, |
332 | icmp_param->data_len+icmp_param->head_len, | 332 | icmp_param->data_len+icmp_param->head_len, |
333 | icmp_param->head_len, | 333 | icmp_param->head_len, |
334 | ipc, rt, MSG_DONTWAIT) < 0) | 334 | ipc, rt, MSG_DONTWAIT) < 0) { |
335 | ICMP_INC_STATS_BH(sock_net(sk), ICMP_MIB_OUTERRORS); | ||
335 | ip_flush_pending_frames(sk); | 336 | ip_flush_pending_frames(sk); |
336 | else if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) { | 337 | } else if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) { |
337 | struct icmphdr *icmph = icmp_hdr(skb); | 338 | struct icmphdr *icmph = icmp_hdr(skb); |
338 | __wsum csum = 0; | 339 | __wsum csum = 0; |
339 | struct sk_buff *skb1; | 340 | struct sk_buff *skb1; |
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 15d3eeda92f5..5fff865a4fa7 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c | |||
@@ -998,7 +998,7 @@ static void ip_mc_filter_add(struct in_device *in_dev, __be32 addr) | |||
998 | --ANK | 998 | --ANK |
999 | */ | 999 | */ |
1000 | if (arp_mc_map(addr, buf, dev, 0) == 0) | 1000 | if (arp_mc_map(addr, buf, dev, 0) == 0) |
1001 | dev_mc_add(dev, buf, dev->addr_len, 0); | 1001 | dev_mc_add(dev, buf); |
1002 | } | 1002 | } |
1003 | 1003 | ||
1004 | /* | 1004 | /* |
@@ -1011,7 +1011,7 @@ static void ip_mc_filter_del(struct in_device *in_dev, __be32 addr) | |||
1011 | struct net_device *dev = in_dev->dev; | 1011 | struct net_device *dev = in_dev->dev; |
1012 | 1012 | ||
1013 | if (arp_mc_map(addr, buf, dev, 0) == 0) | 1013 | if (arp_mc_map(addr, buf, dev, 0) == 0) |
1014 | dev_mc_delete(dev, buf, dev->addr_len, 0); | 1014 | dev_mc_del(dev, buf); |
1015 | } | 1015 | } |
1016 | 1016 | ||
1017 | #ifdef CONFIG_IP_MULTICAST | 1017 | #ifdef CONFIG_IP_MULTICAST |
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 14825eb09770..78cbc39f56c4 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
@@ -240,7 +240,7 @@ static int inet_csk_wait_for_connect(struct sock *sk, long timeo) | |||
240 | * having to remove and re-insert us on the wait queue. | 240 | * having to remove and re-insert us on the wait queue. |
241 | */ | 241 | */ |
242 | for (;;) { | 242 | for (;;) { |
243 | prepare_to_wait_exclusive(sk->sk_sleep, &wait, | 243 | prepare_to_wait_exclusive(sk_sleep(sk), &wait, |
244 | TASK_INTERRUPTIBLE); | 244 | TASK_INTERRUPTIBLE); |
245 | release_sock(sk); | 245 | release_sock(sk); |
246 | if (reqsk_queue_empty(&icsk->icsk_accept_queue)) | 246 | if (reqsk_queue_empty(&icsk->icsk_accept_queue)) |
@@ -259,7 +259,7 @@ static int inet_csk_wait_for_connect(struct sock *sk, long timeo) | |||
259 | if (!timeo) | 259 | if (!timeo) |
260 | break; | 260 | break; |
261 | } | 261 | } |
262 | finish_wait(sk->sk_sleep, &wait); | 262 | finish_wait(sk_sleep(sk), &wait); |
263 | return err; | 263 | return err; |
264 | } | 264 | } |
265 | 265 | ||
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index d1bcc9f21d4f..f0392191740b 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -311,7 +311,7 @@ int ip_output(struct sk_buff *skb) | |||
311 | !(IPCB(skb)->flags & IPSKB_REROUTED)); | 311 | !(IPCB(skb)->flags & IPSKB_REROUTED)); |
312 | } | 312 | } |
313 | 313 | ||
314 | int ip_queue_xmit(struct sk_buff *skb, int ipfragok) | 314 | int ip_queue_xmit(struct sk_buff *skb) |
315 | { | 315 | { |
316 | struct sock *sk = skb->sk; | 316 | struct sock *sk = skb->sk; |
317 | struct inet_sock *inet = inet_sk(sk); | 317 | struct inet_sock *inet = inet_sk(sk); |
@@ -370,7 +370,7 @@ packet_routed: | |||
370 | skb_reset_network_header(skb); | 370 | skb_reset_network_header(skb); |
371 | iph = ip_hdr(skb); | 371 | iph = ip_hdr(skb); |
372 | *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff)); | 372 | *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff)); |
373 | if (ip_dont_fragment(sk, &rt->u.dst) && !ipfragok) | 373 | if (ip_dont_fragment(sk, &rt->u.dst) && !skb->local_df) |
374 | iph->frag_off = htons(IP_DF); | 374 | iph->frag_off = htons(IP_DF); |
375 | else | 375 | else |
376 | iph->frag_off = 0; | 376 | iph->frag_off = 0; |
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 1e64dabbd232..b0aa0546a3b3 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c | |||
@@ -287,12 +287,8 @@ int ip_ra_control(struct sock *sk, unsigned char on, | |||
287 | void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, | 287 | void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, |
288 | __be16 port, u32 info, u8 *payload) | 288 | __be16 port, u32 info, u8 *payload) |
289 | { | 289 | { |
290 | struct inet_sock *inet = inet_sk(sk); | ||
291 | struct sock_exterr_skb *serr; | 290 | struct sock_exterr_skb *serr; |
292 | 291 | ||
293 | if (!inet->recverr) | ||
294 | return; | ||
295 | |||
296 | skb = skb_clone(skb, GFP_ATOMIC); | 292 | skb = skb_clone(skb, GFP_ATOMIC); |
297 | if (!skb) | 293 | if (!skb) |
298 | return; | 294 | return; |
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 067ce9e043dc..b9d84e800cf4 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c | |||
@@ -976,7 +976,7 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str | |||
976 | /* Is it a reply for the device we are configuring? */ | 976 | /* Is it a reply for the device we are configuring? */ |
977 | if (b->xid != ic_dev_xid) { | 977 | if (b->xid != ic_dev_xid) { |
978 | if (net_ratelimit()) | 978 | if (net_ratelimit()) |
979 | printk(KERN_ERR "DHCP/BOOTP: Ignoring delayed packet \n"); | 979 | printk(KERN_ERR "DHCP/BOOTP: Ignoring delayed packet\n"); |
980 | goto drop_unlock; | 980 | goto drop_unlock; |
981 | } | 981 | } |
982 | 982 | ||
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 9d4f6d1340a4..a2df5012a1d0 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
@@ -63,11 +63,40 @@ | |||
63 | #include <net/ipip.h> | 63 | #include <net/ipip.h> |
64 | #include <net/checksum.h> | 64 | #include <net/checksum.h> |
65 | #include <net/netlink.h> | 65 | #include <net/netlink.h> |
66 | #include <net/fib_rules.h> | ||
66 | 67 | ||
67 | #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2) | 68 | #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2) |
68 | #define CONFIG_IP_PIMSM 1 | 69 | #define CONFIG_IP_PIMSM 1 |
69 | #endif | 70 | #endif |
70 | 71 | ||
72 | struct mr_table { | ||
73 | struct list_head list; | ||
74 | #ifdef CONFIG_NET_NS | ||
75 | struct net *net; | ||
76 | #endif | ||
77 | u32 id; | ||
78 | struct sock *mroute_sk; | ||
79 | struct timer_list ipmr_expire_timer; | ||
80 | struct list_head mfc_unres_queue; | ||
81 | struct list_head mfc_cache_array[MFC_LINES]; | ||
82 | struct vif_device vif_table[MAXVIFS]; | ||
83 | int maxvif; | ||
84 | atomic_t cache_resolve_queue_len; | ||
85 | int mroute_do_assert; | ||
86 | int mroute_do_pim; | ||
87 | #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2) | ||
88 | int mroute_reg_vif_num; | ||
89 | #endif | ||
90 | }; | ||
91 | |||
92 | struct ipmr_rule { | ||
93 | struct fib_rule common; | ||
94 | }; | ||
95 | |||
96 | struct ipmr_result { | ||
97 | struct mr_table *mrt; | ||
98 | }; | ||
99 | |||
71 | /* Big lock, protecting vif table, mrt cache and mroute socket state. | 100 | /* Big lock, protecting vif table, mrt cache and mroute socket state. |
72 | Note that the changes are semaphored via rtnl_lock. | 101 | Note that the changes are semaphored via rtnl_lock. |
73 | */ | 102 | */ |
@@ -78,9 +107,7 @@ static DEFINE_RWLOCK(mrt_lock); | |||
78 | * Multicast router control variables | 107 | * Multicast router control variables |
79 | */ | 108 | */ |
80 | 109 | ||
81 | #define VIF_EXISTS(_net, _idx) ((_net)->ipv4.vif_table[_idx].dev != NULL) | 110 | #define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL) |
82 | |||
83 | static struct mfc_cache *mfc_unres_queue; /* Queue of unresolved entries */ | ||
84 | 111 | ||
85 | /* Special spinlock for queue of unresolved entries */ | 112 | /* Special spinlock for queue of unresolved entries */ |
86 | static DEFINE_SPINLOCK(mfc_unres_lock); | 113 | static DEFINE_SPINLOCK(mfc_unres_lock); |
@@ -95,12 +122,215 @@ static DEFINE_SPINLOCK(mfc_unres_lock); | |||
95 | 122 | ||
96 | static struct kmem_cache *mrt_cachep __read_mostly; | 123 | static struct kmem_cache *mrt_cachep __read_mostly; |
97 | 124 | ||
98 | static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local); | 125 | static struct mr_table *ipmr_new_table(struct net *net, u32 id); |
99 | static int ipmr_cache_report(struct net *net, | 126 | static int ip_mr_forward(struct net *net, struct mr_table *mrt, |
127 | struct sk_buff *skb, struct mfc_cache *cache, | ||
128 | int local); | ||
129 | static int ipmr_cache_report(struct mr_table *mrt, | ||
100 | struct sk_buff *pkt, vifi_t vifi, int assert); | 130 | struct sk_buff *pkt, vifi_t vifi, int assert); |
101 | static int ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm); | 131 | static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, |
132 | struct mfc_cache *c, struct rtmsg *rtm); | ||
133 | static void ipmr_expire_process(unsigned long arg); | ||
134 | |||
135 | #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES | ||
136 | #define ipmr_for_each_table(mrt, net) \ | ||
137 | list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list) | ||
138 | |||
139 | static struct mr_table *ipmr_get_table(struct net *net, u32 id) | ||
140 | { | ||
141 | struct mr_table *mrt; | ||
142 | |||
143 | ipmr_for_each_table(mrt, net) { | ||
144 | if (mrt->id == id) | ||
145 | return mrt; | ||
146 | } | ||
147 | return NULL; | ||
148 | } | ||
149 | |||
150 | static int ipmr_fib_lookup(struct net *net, struct flowi *flp, | ||
151 | struct mr_table **mrt) | ||
152 | { | ||
153 | struct ipmr_result res; | ||
154 | struct fib_lookup_arg arg = { .result = &res, }; | ||
155 | int err; | ||
156 | |||
157 | err = fib_rules_lookup(net->ipv4.mr_rules_ops, flp, 0, &arg); | ||
158 | if (err < 0) | ||
159 | return err; | ||
160 | *mrt = res.mrt; | ||
161 | return 0; | ||
162 | } | ||
163 | |||
164 | static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp, | ||
165 | int flags, struct fib_lookup_arg *arg) | ||
166 | { | ||
167 | struct ipmr_result *res = arg->result; | ||
168 | struct mr_table *mrt; | ||
169 | |||
170 | switch (rule->action) { | ||
171 | case FR_ACT_TO_TBL: | ||
172 | break; | ||
173 | case FR_ACT_UNREACHABLE: | ||
174 | return -ENETUNREACH; | ||
175 | case FR_ACT_PROHIBIT: | ||
176 | return -EACCES; | ||
177 | case FR_ACT_BLACKHOLE: | ||
178 | default: | ||
179 | return -EINVAL; | ||
180 | } | ||
181 | |||
182 | mrt = ipmr_get_table(rule->fr_net, rule->table); | ||
183 | if (mrt == NULL) | ||
184 | return -EAGAIN; | ||
185 | res->mrt = mrt; | ||
186 | return 0; | ||
187 | } | ||
188 | |||
189 | static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags) | ||
190 | { | ||
191 | return 1; | ||
192 | } | ||
193 | |||
194 | static const struct nla_policy ipmr_rule_policy[FRA_MAX + 1] = { | ||
195 | FRA_GENERIC_POLICY, | ||
196 | }; | ||
197 | |||
198 | static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb, | ||
199 | struct fib_rule_hdr *frh, struct nlattr **tb) | ||
200 | { | ||
201 | return 0; | ||
202 | } | ||
203 | |||
204 | static int ipmr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh, | ||
205 | struct nlattr **tb) | ||
206 | { | ||
207 | return 1; | ||
208 | } | ||
209 | |||
210 | static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb, | ||
211 | struct fib_rule_hdr *frh) | ||
212 | { | ||
213 | frh->dst_len = 0; | ||
214 | frh->src_len = 0; | ||
215 | frh->tos = 0; | ||
216 | return 0; | ||
217 | } | ||
218 | |||
219 | static struct fib_rules_ops ipmr_rules_ops_template = { | ||
220 | .family = FIB_RULES_IPMR, | ||
221 | .rule_size = sizeof(struct ipmr_rule), | ||
222 | .addr_size = sizeof(u32), | ||
223 | .action = ipmr_rule_action, | ||
224 | .match = ipmr_rule_match, | ||
225 | .configure = ipmr_rule_configure, | ||
226 | .compare = ipmr_rule_compare, | ||
227 | .default_pref = fib_default_rule_pref, | ||
228 | .fill = ipmr_rule_fill, | ||
229 | .nlgroup = RTNLGRP_IPV4_RULE, | ||
230 | .policy = ipmr_rule_policy, | ||
231 | .owner = THIS_MODULE, | ||
232 | }; | ||
233 | |||
234 | static int __net_init ipmr_rules_init(struct net *net) | ||
235 | { | ||
236 | struct fib_rules_ops *ops; | ||
237 | struct mr_table *mrt; | ||
238 | int err; | ||
239 | |||
240 | ops = fib_rules_register(&ipmr_rules_ops_template, net); | ||
241 | if (IS_ERR(ops)) | ||
242 | return PTR_ERR(ops); | ||
243 | |||
244 | INIT_LIST_HEAD(&net->ipv4.mr_tables); | ||
245 | |||
246 | mrt = ipmr_new_table(net, RT_TABLE_DEFAULT); | ||
247 | if (mrt == NULL) { | ||
248 | err = -ENOMEM; | ||
249 | goto err1; | ||
250 | } | ||
251 | |||
252 | err = fib_default_rule_add(ops, 0x7fff, RT_TABLE_DEFAULT, 0); | ||
253 | if (err < 0) | ||
254 | goto err2; | ||
255 | |||
256 | net->ipv4.mr_rules_ops = ops; | ||
257 | return 0; | ||
258 | |||
259 | err2: | ||
260 | kfree(mrt); | ||
261 | err1: | ||
262 | fib_rules_unregister(ops); | ||
263 | return err; | ||
264 | } | ||
102 | 265 | ||
103 | static struct timer_list ipmr_expire_timer; | 266 | static void __net_exit ipmr_rules_exit(struct net *net) |
267 | { | ||
268 | struct mr_table *mrt, *next; | ||
269 | |||
270 | list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) | ||
271 | kfree(mrt); | ||
272 | fib_rules_unregister(net->ipv4.mr_rules_ops); | ||
273 | } | ||
274 | #else | ||
275 | #define ipmr_for_each_table(mrt, net) \ | ||
276 | for (mrt = net->ipv4.mrt; mrt; mrt = NULL) | ||
277 | |||
278 | static struct mr_table *ipmr_get_table(struct net *net, u32 id) | ||
279 | { | ||
280 | return net->ipv4.mrt; | ||
281 | } | ||
282 | |||
283 | static int ipmr_fib_lookup(struct net *net, struct flowi *flp, | ||
284 | struct mr_table **mrt) | ||
285 | { | ||
286 | *mrt = net->ipv4.mrt; | ||
287 | return 0; | ||
288 | } | ||
289 | |||
290 | static int __net_init ipmr_rules_init(struct net *net) | ||
291 | { | ||
292 | net->ipv4.mrt = ipmr_new_table(net, RT_TABLE_DEFAULT); | ||
293 | return net->ipv4.mrt ? 0 : -ENOMEM; | ||
294 | } | ||
295 | |||
296 | static void __net_exit ipmr_rules_exit(struct net *net) | ||
297 | { | ||
298 | kfree(net->ipv4.mrt); | ||
299 | } | ||
300 | #endif | ||
301 | |||
302 | static struct mr_table *ipmr_new_table(struct net *net, u32 id) | ||
303 | { | ||
304 | struct mr_table *mrt; | ||
305 | unsigned int i; | ||
306 | |||
307 | mrt = ipmr_get_table(net, id); | ||
308 | if (mrt != NULL) | ||
309 | return mrt; | ||
310 | |||
311 | mrt = kzalloc(sizeof(*mrt), GFP_KERNEL); | ||
312 | if (mrt == NULL) | ||
313 | return NULL; | ||
314 | write_pnet(&mrt->net, net); | ||
315 | mrt->id = id; | ||
316 | |||
317 | /* Forwarding cache */ | ||
318 | for (i = 0; i < MFC_LINES; i++) | ||
319 | INIT_LIST_HEAD(&mrt->mfc_cache_array[i]); | ||
320 | |||
321 | INIT_LIST_HEAD(&mrt->mfc_unres_queue); | ||
322 | |||
323 | setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process, | ||
324 | (unsigned long)mrt); | ||
325 | |||
326 | #ifdef CONFIG_IP_PIMSM | ||
327 | mrt->mroute_reg_vif_num = -1; | ||
328 | #endif | ||
329 | #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES | ||
330 | list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables); | ||
331 | #endif | ||
332 | return mrt; | ||
333 | } | ||
104 | 334 | ||
105 | /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */ | 335 | /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */ |
106 | 336 | ||
@@ -201,12 +431,22 @@ failure: | |||
201 | static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) | 431 | static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) |
202 | { | 432 | { |
203 | struct net *net = dev_net(dev); | 433 | struct net *net = dev_net(dev); |
434 | struct mr_table *mrt; | ||
435 | struct flowi fl = { | ||
436 | .oif = dev->ifindex, | ||
437 | .iif = skb->skb_iif, | ||
438 | .mark = skb->mark, | ||
439 | }; | ||
440 | int err; | ||
441 | |||
442 | err = ipmr_fib_lookup(net, &fl, &mrt); | ||
443 | if (err < 0) | ||
444 | return err; | ||
204 | 445 | ||
205 | read_lock(&mrt_lock); | 446 | read_lock(&mrt_lock); |
206 | dev->stats.tx_bytes += skb->len; | 447 | dev->stats.tx_bytes += skb->len; |
207 | dev->stats.tx_packets++; | 448 | dev->stats.tx_packets++; |
208 | ipmr_cache_report(net, skb, net->ipv4.mroute_reg_vif_num, | 449 | ipmr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, IGMPMSG_WHOLEPKT); |
209 | IGMPMSG_WHOLEPKT); | ||
210 | read_unlock(&mrt_lock); | 450 | read_unlock(&mrt_lock); |
211 | kfree_skb(skb); | 451 | kfree_skb(skb); |
212 | return NETDEV_TX_OK; | 452 | return NETDEV_TX_OK; |
@@ -226,12 +466,18 @@ static void reg_vif_setup(struct net_device *dev) | |||
226 | dev->features |= NETIF_F_NETNS_LOCAL; | 466 | dev->features |= NETIF_F_NETNS_LOCAL; |
227 | } | 467 | } |
228 | 468 | ||
229 | static struct net_device *ipmr_reg_vif(struct net *net) | 469 | static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt) |
230 | { | 470 | { |
231 | struct net_device *dev; | 471 | struct net_device *dev; |
232 | struct in_device *in_dev; | 472 | struct in_device *in_dev; |
473 | char name[IFNAMSIZ]; | ||
233 | 474 | ||
234 | dev = alloc_netdev(0, "pimreg", reg_vif_setup); | 475 | if (mrt->id == RT_TABLE_DEFAULT) |
476 | sprintf(name, "pimreg"); | ||
477 | else | ||
478 | sprintf(name, "pimreg%u", mrt->id); | ||
479 | |||
480 | dev = alloc_netdev(0, name, reg_vif_setup); | ||
235 | 481 | ||
236 | if (dev == NULL) | 482 | if (dev == NULL) |
237 | return NULL; | 483 | return NULL; |
@@ -276,17 +522,17 @@ failure: | |||
276 | * @notify: Set to 1, if the caller is a notifier_call | 522 | * @notify: Set to 1, if the caller is a notifier_call |
277 | */ | 523 | */ |
278 | 524 | ||
279 | static int vif_delete(struct net *net, int vifi, int notify, | 525 | static int vif_delete(struct mr_table *mrt, int vifi, int notify, |
280 | struct list_head *head) | 526 | struct list_head *head) |
281 | { | 527 | { |
282 | struct vif_device *v; | 528 | struct vif_device *v; |
283 | struct net_device *dev; | 529 | struct net_device *dev; |
284 | struct in_device *in_dev; | 530 | struct in_device *in_dev; |
285 | 531 | ||
286 | if (vifi < 0 || vifi >= net->ipv4.maxvif) | 532 | if (vifi < 0 || vifi >= mrt->maxvif) |
287 | return -EADDRNOTAVAIL; | 533 | return -EADDRNOTAVAIL; |
288 | 534 | ||
289 | v = &net->ipv4.vif_table[vifi]; | 535 | v = &mrt->vif_table[vifi]; |
290 | 536 | ||
291 | write_lock_bh(&mrt_lock); | 537 | write_lock_bh(&mrt_lock); |
292 | dev = v->dev; | 538 | dev = v->dev; |
@@ -298,17 +544,17 @@ static int vif_delete(struct net *net, int vifi, int notify, | |||
298 | } | 544 | } |
299 | 545 | ||
300 | #ifdef CONFIG_IP_PIMSM | 546 | #ifdef CONFIG_IP_PIMSM |
301 | if (vifi == net->ipv4.mroute_reg_vif_num) | 547 | if (vifi == mrt->mroute_reg_vif_num) |
302 | net->ipv4.mroute_reg_vif_num = -1; | 548 | mrt->mroute_reg_vif_num = -1; |
303 | #endif | 549 | #endif |
304 | 550 | ||
305 | if (vifi+1 == net->ipv4.maxvif) { | 551 | if (vifi+1 == mrt->maxvif) { |
306 | int tmp; | 552 | int tmp; |
307 | for (tmp=vifi-1; tmp>=0; tmp--) { | 553 | for (tmp=vifi-1; tmp>=0; tmp--) { |
308 | if (VIF_EXISTS(net, tmp)) | 554 | if (VIF_EXISTS(mrt, tmp)) |
309 | break; | 555 | break; |
310 | } | 556 | } |
311 | net->ipv4.maxvif = tmp+1; | 557 | mrt->maxvif = tmp+1; |
312 | } | 558 | } |
313 | 559 | ||
314 | write_unlock_bh(&mrt_lock); | 560 | write_unlock_bh(&mrt_lock); |
@@ -329,7 +575,6 @@ static int vif_delete(struct net *net, int vifi, int notify, | |||
329 | 575 | ||
330 | static inline void ipmr_cache_free(struct mfc_cache *c) | 576 | static inline void ipmr_cache_free(struct mfc_cache *c) |
331 | { | 577 | { |
332 | release_net(mfc_net(c)); | ||
333 | kmem_cache_free(mrt_cachep, c); | 578 | kmem_cache_free(mrt_cachep, c); |
334 | } | 579 | } |
335 | 580 | ||
@@ -337,13 +582,13 @@ static inline void ipmr_cache_free(struct mfc_cache *c) | |||
337 | and reporting error to netlink readers. | 582 | and reporting error to netlink readers. |
338 | */ | 583 | */ |
339 | 584 | ||
340 | static void ipmr_destroy_unres(struct mfc_cache *c) | 585 | static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c) |
341 | { | 586 | { |
587 | struct net *net = read_pnet(&mrt->net); | ||
342 | struct sk_buff *skb; | 588 | struct sk_buff *skb; |
343 | struct nlmsgerr *e; | 589 | struct nlmsgerr *e; |
344 | struct net *net = mfc_net(c); | ||
345 | 590 | ||
346 | atomic_dec(&net->ipv4.cache_resolve_queue_len); | 591 | atomic_dec(&mrt->cache_resolve_queue_len); |
347 | 592 | ||
348 | while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) { | 593 | while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) { |
349 | if (ip_hdr(skb)->version == 0) { | 594 | if (ip_hdr(skb)->version == 0) { |
@@ -364,42 +609,40 @@ static void ipmr_destroy_unres(struct mfc_cache *c) | |||
364 | } | 609 | } |
365 | 610 | ||
366 | 611 | ||
367 | /* Single timer process for all the unresolved queue. */ | 612 | /* Timer process for the unresolved queue. */ |
368 | 613 | ||
369 | static void ipmr_expire_process(unsigned long dummy) | 614 | static void ipmr_expire_process(unsigned long arg) |
370 | { | 615 | { |
616 | struct mr_table *mrt = (struct mr_table *)arg; | ||
371 | unsigned long now; | 617 | unsigned long now; |
372 | unsigned long expires; | 618 | unsigned long expires; |
373 | struct mfc_cache *c, **cp; | 619 | struct mfc_cache *c, *next; |
374 | 620 | ||
375 | if (!spin_trylock(&mfc_unres_lock)) { | 621 | if (!spin_trylock(&mfc_unres_lock)) { |
376 | mod_timer(&ipmr_expire_timer, jiffies+HZ/10); | 622 | mod_timer(&mrt->ipmr_expire_timer, jiffies+HZ/10); |
377 | return; | 623 | return; |
378 | } | 624 | } |
379 | 625 | ||
380 | if (mfc_unres_queue == NULL) | 626 | if (list_empty(&mrt->mfc_unres_queue)) |
381 | goto out; | 627 | goto out; |
382 | 628 | ||
383 | now = jiffies; | 629 | now = jiffies; |
384 | expires = 10*HZ; | 630 | expires = 10*HZ; |
385 | cp = &mfc_unres_queue; | ||
386 | 631 | ||
387 | while ((c=*cp) != NULL) { | 632 | list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) { |
388 | if (time_after(c->mfc_un.unres.expires, now)) { | 633 | if (time_after(c->mfc_un.unres.expires, now)) { |
389 | unsigned long interval = c->mfc_un.unres.expires - now; | 634 | unsigned long interval = c->mfc_un.unres.expires - now; |
390 | if (interval < expires) | 635 | if (interval < expires) |
391 | expires = interval; | 636 | expires = interval; |
392 | cp = &c->next; | ||
393 | continue; | 637 | continue; |
394 | } | 638 | } |
395 | 639 | ||
396 | *cp = c->next; | 640 | list_del(&c->list); |
397 | 641 | ipmr_destroy_unres(mrt, c); | |
398 | ipmr_destroy_unres(c); | ||
399 | } | 642 | } |
400 | 643 | ||
401 | if (mfc_unres_queue != NULL) | 644 | if (!list_empty(&mrt->mfc_unres_queue)) |
402 | mod_timer(&ipmr_expire_timer, jiffies + expires); | 645 | mod_timer(&mrt->ipmr_expire_timer, jiffies + expires); |
403 | 646 | ||
404 | out: | 647 | out: |
405 | spin_unlock(&mfc_unres_lock); | 648 | spin_unlock(&mfc_unres_lock); |
@@ -407,17 +650,17 @@ out: | |||
407 | 650 | ||
408 | /* Fill oifs list. It is called under write locked mrt_lock. */ | 651 | /* Fill oifs list. It is called under write locked mrt_lock. */ |
409 | 652 | ||
410 | static void ipmr_update_thresholds(struct mfc_cache *cache, unsigned char *ttls) | 653 | static void ipmr_update_thresholds(struct mr_table *mrt, struct mfc_cache *cache, |
654 | unsigned char *ttls) | ||
411 | { | 655 | { |
412 | int vifi; | 656 | int vifi; |
413 | struct net *net = mfc_net(cache); | ||
414 | 657 | ||
415 | cache->mfc_un.res.minvif = MAXVIFS; | 658 | cache->mfc_un.res.minvif = MAXVIFS; |
416 | cache->mfc_un.res.maxvif = 0; | 659 | cache->mfc_un.res.maxvif = 0; |
417 | memset(cache->mfc_un.res.ttls, 255, MAXVIFS); | 660 | memset(cache->mfc_un.res.ttls, 255, MAXVIFS); |
418 | 661 | ||
419 | for (vifi = 0; vifi < net->ipv4.maxvif; vifi++) { | 662 | for (vifi = 0; vifi < mrt->maxvif; vifi++) { |
420 | if (VIF_EXISTS(net, vifi) && | 663 | if (VIF_EXISTS(mrt, vifi) && |
421 | ttls[vifi] && ttls[vifi] < 255) { | 664 | ttls[vifi] && ttls[vifi] < 255) { |
422 | cache->mfc_un.res.ttls[vifi] = ttls[vifi]; | 665 | cache->mfc_un.res.ttls[vifi] = ttls[vifi]; |
423 | if (cache->mfc_un.res.minvif > vifi) | 666 | if (cache->mfc_un.res.minvif > vifi) |
@@ -428,16 +671,17 @@ static void ipmr_update_thresholds(struct mfc_cache *cache, unsigned char *ttls) | |||
428 | } | 671 | } |
429 | } | 672 | } |
430 | 673 | ||
431 | static int vif_add(struct net *net, struct vifctl *vifc, int mrtsock) | 674 | static int vif_add(struct net *net, struct mr_table *mrt, |
675 | struct vifctl *vifc, int mrtsock) | ||
432 | { | 676 | { |
433 | int vifi = vifc->vifc_vifi; | 677 | int vifi = vifc->vifc_vifi; |
434 | struct vif_device *v = &net->ipv4.vif_table[vifi]; | 678 | struct vif_device *v = &mrt->vif_table[vifi]; |
435 | struct net_device *dev; | 679 | struct net_device *dev; |
436 | struct in_device *in_dev; | 680 | struct in_device *in_dev; |
437 | int err; | 681 | int err; |
438 | 682 | ||
439 | /* Is vif busy ? */ | 683 | /* Is vif busy ? */ |
440 | if (VIF_EXISTS(net, vifi)) | 684 | if (VIF_EXISTS(mrt, vifi)) |
441 | return -EADDRINUSE; | 685 | return -EADDRINUSE; |
442 | 686 | ||
443 | switch (vifc->vifc_flags) { | 687 | switch (vifc->vifc_flags) { |
@@ -447,9 +691,9 @@ static int vif_add(struct net *net, struct vifctl *vifc, int mrtsock) | |||
447 | * Special Purpose VIF in PIM | 691 | * Special Purpose VIF in PIM |
448 | * All the packets will be sent to the daemon | 692 | * All the packets will be sent to the daemon |
449 | */ | 693 | */ |
450 | if (net->ipv4.mroute_reg_vif_num >= 0) | 694 | if (mrt->mroute_reg_vif_num >= 0) |
451 | return -EADDRINUSE; | 695 | return -EADDRINUSE; |
452 | dev = ipmr_reg_vif(net); | 696 | dev = ipmr_reg_vif(net, mrt); |
453 | if (!dev) | 697 | if (!dev) |
454 | return -ENOBUFS; | 698 | return -ENOBUFS; |
455 | err = dev_set_allmulti(dev, 1); | 699 | err = dev_set_allmulti(dev, 1); |
@@ -525,49 +769,47 @@ static int vif_add(struct net *net, struct vifctl *vifc, int mrtsock) | |||
525 | v->dev = dev; | 769 | v->dev = dev; |
526 | #ifdef CONFIG_IP_PIMSM | 770 | #ifdef CONFIG_IP_PIMSM |
527 | if (v->flags&VIFF_REGISTER) | 771 | if (v->flags&VIFF_REGISTER) |
528 | net->ipv4.mroute_reg_vif_num = vifi; | 772 | mrt->mroute_reg_vif_num = vifi; |
529 | #endif | 773 | #endif |
530 | if (vifi+1 > net->ipv4.maxvif) | 774 | if (vifi+1 > mrt->maxvif) |
531 | net->ipv4.maxvif = vifi+1; | 775 | mrt->maxvif = vifi+1; |
532 | write_unlock_bh(&mrt_lock); | 776 | write_unlock_bh(&mrt_lock); |
533 | return 0; | 777 | return 0; |
534 | } | 778 | } |
535 | 779 | ||
536 | static struct mfc_cache *ipmr_cache_find(struct net *net, | 780 | static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt, |
537 | __be32 origin, | 781 | __be32 origin, |
538 | __be32 mcastgrp) | 782 | __be32 mcastgrp) |
539 | { | 783 | { |
540 | int line = MFC_HASH(mcastgrp, origin); | 784 | int line = MFC_HASH(mcastgrp, origin); |
541 | struct mfc_cache *c; | 785 | struct mfc_cache *c; |
542 | 786 | ||
543 | for (c = net->ipv4.mfc_cache_array[line]; c; c = c->next) { | 787 | list_for_each_entry(c, &mrt->mfc_cache_array[line], list) { |
544 | if (c->mfc_origin==origin && c->mfc_mcastgrp==mcastgrp) | 788 | if (c->mfc_origin == origin && c->mfc_mcastgrp == mcastgrp) |
545 | break; | 789 | return c; |
546 | } | 790 | } |
547 | return c; | 791 | return NULL; |
548 | } | 792 | } |
549 | 793 | ||
550 | /* | 794 | /* |
551 | * Allocate a multicast cache entry | 795 | * Allocate a multicast cache entry |
552 | */ | 796 | */ |
553 | static struct mfc_cache *ipmr_cache_alloc(struct net *net) | 797 | static struct mfc_cache *ipmr_cache_alloc(void) |
554 | { | 798 | { |
555 | struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL); | 799 | struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL); |
556 | if (c == NULL) | 800 | if (c == NULL) |
557 | return NULL; | 801 | return NULL; |
558 | c->mfc_un.res.minvif = MAXVIFS; | 802 | c->mfc_un.res.minvif = MAXVIFS; |
559 | mfc_net_set(c, net); | ||
560 | return c; | 803 | return c; |
561 | } | 804 | } |
562 | 805 | ||
563 | static struct mfc_cache *ipmr_cache_alloc_unres(struct net *net) | 806 | static struct mfc_cache *ipmr_cache_alloc_unres(void) |
564 | { | 807 | { |
565 | struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC); | 808 | struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC); |
566 | if (c == NULL) | 809 | if (c == NULL) |
567 | return NULL; | 810 | return NULL; |
568 | skb_queue_head_init(&c->mfc_un.unres.unresolved); | 811 | skb_queue_head_init(&c->mfc_un.unres.unresolved); |
569 | c->mfc_un.unres.expires = jiffies + 10*HZ; | 812 | c->mfc_un.unres.expires = jiffies + 10*HZ; |
570 | mfc_net_set(c, net); | ||
571 | return c; | 813 | return c; |
572 | } | 814 | } |
573 | 815 | ||
@@ -575,7 +817,8 @@ static struct mfc_cache *ipmr_cache_alloc_unres(struct net *net) | |||
575 | * A cache entry has gone into a resolved state from queued | 817 | * A cache entry has gone into a resolved state from queued |
576 | */ | 818 | */ |
577 | 819 | ||
578 | static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c) | 820 | static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt, |
821 | struct mfc_cache *uc, struct mfc_cache *c) | ||
579 | { | 822 | { |
580 | struct sk_buff *skb; | 823 | struct sk_buff *skb; |
581 | struct nlmsgerr *e; | 824 | struct nlmsgerr *e; |
@@ -588,7 +831,7 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c) | |||
588 | if (ip_hdr(skb)->version == 0) { | 831 | if (ip_hdr(skb)->version == 0) { |
589 | struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); | 832 | struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); |
590 | 833 | ||
591 | if (ipmr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) { | 834 | if (ipmr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) { |
592 | nlh->nlmsg_len = (skb_tail_pointer(skb) - | 835 | nlh->nlmsg_len = (skb_tail_pointer(skb) - |
593 | (u8 *)nlh); | 836 | (u8 *)nlh); |
594 | } else { | 837 | } else { |
@@ -600,9 +843,9 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c) | |||
600 | memset(&e->msg, 0, sizeof(e->msg)); | 843 | memset(&e->msg, 0, sizeof(e->msg)); |
601 | } | 844 | } |
602 | 845 | ||
603 | rtnl_unicast(skb, mfc_net(c), NETLINK_CB(skb).pid); | 846 | rtnl_unicast(skb, net, NETLINK_CB(skb).pid); |
604 | } else | 847 | } else |
605 | ip_mr_forward(skb, c, 0); | 848 | ip_mr_forward(net, mrt, skb, c, 0); |
606 | } | 849 | } |
607 | } | 850 | } |
608 | 851 | ||
@@ -613,7 +856,7 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c) | |||
613 | * Called under mrt_lock. | 856 | * Called under mrt_lock. |
614 | */ | 857 | */ |
615 | 858 | ||
616 | static int ipmr_cache_report(struct net *net, | 859 | static int ipmr_cache_report(struct mr_table *mrt, |
617 | struct sk_buff *pkt, vifi_t vifi, int assert) | 860 | struct sk_buff *pkt, vifi_t vifi, int assert) |
618 | { | 861 | { |
619 | struct sk_buff *skb; | 862 | struct sk_buff *skb; |
@@ -646,7 +889,7 @@ static int ipmr_cache_report(struct net *net, | |||
646 | memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr)); | 889 | memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr)); |
647 | msg->im_msgtype = IGMPMSG_WHOLEPKT; | 890 | msg->im_msgtype = IGMPMSG_WHOLEPKT; |
648 | msg->im_mbz = 0; | 891 | msg->im_mbz = 0; |
649 | msg->im_vif = net->ipv4.mroute_reg_vif_num; | 892 | msg->im_vif = mrt->mroute_reg_vif_num; |
650 | ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2; | 893 | ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2; |
651 | ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) + | 894 | ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) + |
652 | sizeof(struct iphdr)); | 895 | sizeof(struct iphdr)); |
@@ -678,7 +921,7 @@ static int ipmr_cache_report(struct net *net, | |||
678 | skb->transport_header = skb->network_header; | 921 | skb->transport_header = skb->network_header; |
679 | } | 922 | } |
680 | 923 | ||
681 | if (net->ipv4.mroute_sk == NULL) { | 924 | if (mrt->mroute_sk == NULL) { |
682 | kfree_skb(skb); | 925 | kfree_skb(skb); |
683 | return -EINVAL; | 926 | return -EINVAL; |
684 | } | 927 | } |
@@ -686,7 +929,7 @@ static int ipmr_cache_report(struct net *net, | |||
686 | /* | 929 | /* |
687 | * Deliver to mrouted | 930 | * Deliver to mrouted |
688 | */ | 931 | */ |
689 | ret = sock_queue_rcv_skb(net->ipv4.mroute_sk, skb); | 932 | ret = sock_queue_rcv_skb(mrt->mroute_sk, skb); |
690 | if (ret < 0) { | 933 | if (ret < 0) { |
691 | if (net_ratelimit()) | 934 | if (net_ratelimit()) |
692 | printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n"); | 935 | printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n"); |
@@ -701,27 +944,29 @@ static int ipmr_cache_report(struct net *net, | |||
701 | */ | 944 | */ |
702 | 945 | ||
703 | static int | 946 | static int |
704 | ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb) | 947 | ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb) |
705 | { | 948 | { |
949 | bool found = false; | ||
706 | int err; | 950 | int err; |
707 | struct mfc_cache *c; | 951 | struct mfc_cache *c; |
708 | const struct iphdr *iph = ip_hdr(skb); | 952 | const struct iphdr *iph = ip_hdr(skb); |
709 | 953 | ||
710 | spin_lock_bh(&mfc_unres_lock); | 954 | spin_lock_bh(&mfc_unres_lock); |
711 | for (c=mfc_unres_queue; c; c=c->next) { | 955 | list_for_each_entry(c, &mrt->mfc_unres_queue, list) { |
712 | if (net_eq(mfc_net(c), net) && | 956 | if (c->mfc_mcastgrp == iph->daddr && |
713 | c->mfc_mcastgrp == iph->daddr && | 957 | c->mfc_origin == iph->saddr) { |
714 | c->mfc_origin == iph->saddr) | 958 | found = true; |
715 | break; | 959 | break; |
960 | } | ||
716 | } | 961 | } |
717 | 962 | ||
718 | if (c == NULL) { | 963 | if (!found) { |
719 | /* | 964 | /* |
720 | * Create a new entry if allowable | 965 | * Create a new entry if allowable |
721 | */ | 966 | */ |
722 | 967 | ||
723 | if (atomic_read(&net->ipv4.cache_resolve_queue_len) >= 10 || | 968 | if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 || |
724 | (c = ipmr_cache_alloc_unres(net)) == NULL) { | 969 | (c = ipmr_cache_alloc_unres()) == NULL) { |
725 | spin_unlock_bh(&mfc_unres_lock); | 970 | spin_unlock_bh(&mfc_unres_lock); |
726 | 971 | ||
727 | kfree_skb(skb); | 972 | kfree_skb(skb); |
@@ -738,7 +983,7 @@ ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb) | |||
738 | /* | 983 | /* |
739 | * Reflect first query at mrouted. | 984 | * Reflect first query at mrouted. |
740 | */ | 985 | */ |
741 | err = ipmr_cache_report(net, skb, vifi, IGMPMSG_NOCACHE); | 986 | err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE); |
742 | if (err < 0) { | 987 | if (err < 0) { |
743 | /* If the report failed throw the cache entry | 988 | /* If the report failed throw the cache entry |
744 | out - Brad Parker | 989 | out - Brad Parker |
@@ -750,11 +995,10 @@ ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb) | |||
750 | return err; | 995 | return err; |
751 | } | 996 | } |
752 | 997 | ||
753 | atomic_inc(&net->ipv4.cache_resolve_queue_len); | 998 | atomic_inc(&mrt->cache_resolve_queue_len); |
754 | c->next = mfc_unres_queue; | 999 | list_add(&c->list, &mrt->mfc_unres_queue); |
755 | mfc_unres_queue = c; | ||
756 | 1000 | ||
757 | mod_timer(&ipmr_expire_timer, c->mfc_un.unres.expires); | 1001 | mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires); |
758 | } | 1002 | } |
759 | 1003 | ||
760 | /* | 1004 | /* |
@@ -776,19 +1020,18 @@ ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb) | |||
776 | * MFC cache manipulation by user space mroute daemon | 1020 | * MFC cache manipulation by user space mroute daemon |
777 | */ | 1021 | */ |
778 | 1022 | ||
779 | static int ipmr_mfc_delete(struct net *net, struct mfcctl *mfc) | 1023 | static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc) |
780 | { | 1024 | { |
781 | int line; | 1025 | int line; |
782 | struct mfc_cache *c, **cp; | 1026 | struct mfc_cache *c, *next; |
783 | 1027 | ||
784 | line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); | 1028 | line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); |
785 | 1029 | ||
786 | for (cp = &net->ipv4.mfc_cache_array[line]; | 1030 | list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[line], list) { |
787 | (c = *cp) != NULL; cp = &c->next) { | ||
788 | if (c->mfc_origin == mfc->mfcc_origin.s_addr && | 1031 | if (c->mfc_origin == mfc->mfcc_origin.s_addr && |
789 | c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) { | 1032 | c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) { |
790 | write_lock_bh(&mrt_lock); | 1033 | write_lock_bh(&mrt_lock); |
791 | *cp = c->next; | 1034 | list_del(&c->list); |
792 | write_unlock_bh(&mrt_lock); | 1035 | write_unlock_bh(&mrt_lock); |
793 | 1036 | ||
794 | ipmr_cache_free(c); | 1037 | ipmr_cache_free(c); |
@@ -798,27 +1041,30 @@ static int ipmr_mfc_delete(struct net *net, struct mfcctl *mfc) | |||
798 | return -ENOENT; | 1041 | return -ENOENT; |
799 | } | 1042 | } |
800 | 1043 | ||
801 | static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock) | 1044 | static int ipmr_mfc_add(struct net *net, struct mr_table *mrt, |
1045 | struct mfcctl *mfc, int mrtsock) | ||
802 | { | 1046 | { |
1047 | bool found = false; | ||
803 | int line; | 1048 | int line; |
804 | struct mfc_cache *uc, *c, **cp; | 1049 | struct mfc_cache *uc, *c; |
805 | 1050 | ||
806 | if (mfc->mfcc_parent >= MAXVIFS) | 1051 | if (mfc->mfcc_parent >= MAXVIFS) |
807 | return -ENFILE; | 1052 | return -ENFILE; |
808 | 1053 | ||
809 | line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); | 1054 | line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); |
810 | 1055 | ||
811 | for (cp = &net->ipv4.mfc_cache_array[line]; | 1056 | list_for_each_entry(c, &mrt->mfc_cache_array[line], list) { |
812 | (c = *cp) != NULL; cp = &c->next) { | ||
813 | if (c->mfc_origin == mfc->mfcc_origin.s_addr && | 1057 | if (c->mfc_origin == mfc->mfcc_origin.s_addr && |
814 | c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) | 1058 | c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) { |
1059 | found = true; | ||
815 | break; | 1060 | break; |
1061 | } | ||
816 | } | 1062 | } |
817 | 1063 | ||
818 | if (c != NULL) { | 1064 | if (found) { |
819 | write_lock_bh(&mrt_lock); | 1065 | write_lock_bh(&mrt_lock); |
820 | c->mfc_parent = mfc->mfcc_parent; | 1066 | c->mfc_parent = mfc->mfcc_parent; |
821 | ipmr_update_thresholds(c, mfc->mfcc_ttls); | 1067 | ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls); |
822 | if (!mrtsock) | 1068 | if (!mrtsock) |
823 | c->mfc_flags |= MFC_STATIC; | 1069 | c->mfc_flags |= MFC_STATIC; |
824 | write_unlock_bh(&mrt_lock); | 1070 | write_unlock_bh(&mrt_lock); |
@@ -828,43 +1074,42 @@ static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock) | |||
828 | if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr)) | 1074 | if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr)) |
829 | return -EINVAL; | 1075 | return -EINVAL; |
830 | 1076 | ||
831 | c = ipmr_cache_alloc(net); | 1077 | c = ipmr_cache_alloc(); |
832 | if (c == NULL) | 1078 | if (c == NULL) |
833 | return -ENOMEM; | 1079 | return -ENOMEM; |
834 | 1080 | ||
835 | c->mfc_origin = mfc->mfcc_origin.s_addr; | 1081 | c->mfc_origin = mfc->mfcc_origin.s_addr; |
836 | c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr; | 1082 | c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr; |
837 | c->mfc_parent = mfc->mfcc_parent; | 1083 | c->mfc_parent = mfc->mfcc_parent; |
838 | ipmr_update_thresholds(c, mfc->mfcc_ttls); | 1084 | ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls); |
839 | if (!mrtsock) | 1085 | if (!mrtsock) |
840 | c->mfc_flags |= MFC_STATIC; | 1086 | c->mfc_flags |= MFC_STATIC; |
841 | 1087 | ||
842 | write_lock_bh(&mrt_lock); | 1088 | write_lock_bh(&mrt_lock); |
843 | c->next = net->ipv4.mfc_cache_array[line]; | 1089 | list_add(&c->list, &mrt->mfc_cache_array[line]); |
844 | net->ipv4.mfc_cache_array[line] = c; | ||
845 | write_unlock_bh(&mrt_lock); | 1090 | write_unlock_bh(&mrt_lock); |
846 | 1091 | ||
847 | /* | 1092 | /* |
848 | * Check to see if we resolved a queued list. If so we | 1093 | * Check to see if we resolved a queued list. If so we |
849 | * need to send on the frames and tidy up. | 1094 | * need to send on the frames and tidy up. |
850 | */ | 1095 | */ |
1096 | found = false; | ||
851 | spin_lock_bh(&mfc_unres_lock); | 1097 | spin_lock_bh(&mfc_unres_lock); |
852 | for (cp = &mfc_unres_queue; (uc=*cp) != NULL; | 1098 | list_for_each_entry(uc, &mrt->mfc_unres_queue, list) { |
853 | cp = &uc->next) { | 1099 | if (uc->mfc_origin == c->mfc_origin && |
854 | if (net_eq(mfc_net(uc), net) && | ||
855 | uc->mfc_origin == c->mfc_origin && | ||
856 | uc->mfc_mcastgrp == c->mfc_mcastgrp) { | 1100 | uc->mfc_mcastgrp == c->mfc_mcastgrp) { |
857 | *cp = uc->next; | 1101 | list_del(&uc->list); |
858 | atomic_dec(&net->ipv4.cache_resolve_queue_len); | 1102 | atomic_dec(&mrt->cache_resolve_queue_len); |
1103 | found = true; | ||
859 | break; | 1104 | break; |
860 | } | 1105 | } |
861 | } | 1106 | } |
862 | if (mfc_unres_queue == NULL) | 1107 | if (list_empty(&mrt->mfc_unres_queue)) |
863 | del_timer(&ipmr_expire_timer); | 1108 | del_timer(&mrt->ipmr_expire_timer); |
864 | spin_unlock_bh(&mfc_unres_lock); | 1109 | spin_unlock_bh(&mfc_unres_lock); |
865 | 1110 | ||
866 | if (uc) { | 1111 | if (found) { |
867 | ipmr_cache_resolve(uc, c); | 1112 | ipmr_cache_resolve(net, mrt, uc, c); |
868 | ipmr_cache_free(uc); | 1113 | ipmr_cache_free(uc); |
869 | } | 1114 | } |
870 | return 0; | 1115 | return 0; |
@@ -874,53 +1119,41 @@ static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock) | |||
874 | * Close the multicast socket, and clear the vif tables etc | 1119 | * Close the multicast socket, and clear the vif tables etc |
875 | */ | 1120 | */ |
876 | 1121 | ||
877 | static void mroute_clean_tables(struct net *net) | 1122 | static void mroute_clean_tables(struct mr_table *mrt) |
878 | { | 1123 | { |
879 | int i; | 1124 | int i; |
880 | LIST_HEAD(list); | 1125 | LIST_HEAD(list); |
1126 | struct mfc_cache *c, *next; | ||
881 | 1127 | ||
882 | /* | 1128 | /* |
883 | * Shut down all active vif entries | 1129 | * Shut down all active vif entries |
884 | */ | 1130 | */ |
885 | for (i = 0; i < net->ipv4.maxvif; i++) { | 1131 | for (i = 0; i < mrt->maxvif; i++) { |
886 | if (!(net->ipv4.vif_table[i].flags&VIFF_STATIC)) | 1132 | if (!(mrt->vif_table[i].flags&VIFF_STATIC)) |
887 | vif_delete(net, i, 0, &list); | 1133 | vif_delete(mrt, i, 0, &list); |
888 | } | 1134 | } |
889 | unregister_netdevice_many(&list); | 1135 | unregister_netdevice_many(&list); |
890 | 1136 | ||
891 | /* | 1137 | /* |
892 | * Wipe the cache | 1138 | * Wipe the cache |
893 | */ | 1139 | */ |
894 | for (i=0; i<MFC_LINES; i++) { | 1140 | for (i = 0; i < MFC_LINES; i++) { |
895 | struct mfc_cache *c, **cp; | 1141 | list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) { |
896 | 1142 | if (c->mfc_flags&MFC_STATIC) | |
897 | cp = &net->ipv4.mfc_cache_array[i]; | ||
898 | while ((c = *cp) != NULL) { | ||
899 | if (c->mfc_flags&MFC_STATIC) { | ||
900 | cp = &c->next; | ||
901 | continue; | 1143 | continue; |
902 | } | ||
903 | write_lock_bh(&mrt_lock); | 1144 | write_lock_bh(&mrt_lock); |
904 | *cp = c->next; | 1145 | list_del(&c->list); |
905 | write_unlock_bh(&mrt_lock); | 1146 | write_unlock_bh(&mrt_lock); |
906 | 1147 | ||
907 | ipmr_cache_free(c); | 1148 | ipmr_cache_free(c); |
908 | } | 1149 | } |
909 | } | 1150 | } |
910 | 1151 | ||
911 | if (atomic_read(&net->ipv4.cache_resolve_queue_len) != 0) { | 1152 | if (atomic_read(&mrt->cache_resolve_queue_len) != 0) { |
912 | struct mfc_cache *c, **cp; | ||
913 | |||
914 | spin_lock_bh(&mfc_unres_lock); | 1153 | spin_lock_bh(&mfc_unres_lock); |
915 | cp = &mfc_unres_queue; | 1154 | list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) { |
916 | while ((c = *cp) != NULL) { | 1155 | list_del(&c->list); |
917 | if (!net_eq(mfc_net(c), net)) { | 1156 | ipmr_destroy_unres(mrt, c); |
918 | cp = &c->next; | ||
919 | continue; | ||
920 | } | ||
921 | *cp = c->next; | ||
922 | |||
923 | ipmr_destroy_unres(c); | ||
924 | } | 1157 | } |
925 | spin_unlock_bh(&mfc_unres_lock); | 1158 | spin_unlock_bh(&mfc_unres_lock); |
926 | } | 1159 | } |
@@ -929,16 +1162,19 @@ static void mroute_clean_tables(struct net *net) | |||
929 | static void mrtsock_destruct(struct sock *sk) | 1162 | static void mrtsock_destruct(struct sock *sk) |
930 | { | 1163 | { |
931 | struct net *net = sock_net(sk); | 1164 | struct net *net = sock_net(sk); |
1165 | struct mr_table *mrt; | ||
932 | 1166 | ||
933 | rtnl_lock(); | 1167 | rtnl_lock(); |
934 | if (sk == net->ipv4.mroute_sk) { | 1168 | ipmr_for_each_table(mrt, net) { |
935 | IPV4_DEVCONF_ALL(net, MC_FORWARDING)--; | 1169 | if (sk == mrt->mroute_sk) { |
1170 | IPV4_DEVCONF_ALL(net, MC_FORWARDING)--; | ||
936 | 1171 | ||
937 | write_lock_bh(&mrt_lock); | 1172 | write_lock_bh(&mrt_lock); |
938 | net->ipv4.mroute_sk = NULL; | 1173 | mrt->mroute_sk = NULL; |
939 | write_unlock_bh(&mrt_lock); | 1174 | write_unlock_bh(&mrt_lock); |
940 | 1175 | ||
941 | mroute_clean_tables(net); | 1176 | mroute_clean_tables(mrt); |
1177 | } | ||
942 | } | 1178 | } |
943 | rtnl_unlock(); | 1179 | rtnl_unlock(); |
944 | } | 1180 | } |
@@ -956,9 +1192,14 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi | |||
956 | struct vifctl vif; | 1192 | struct vifctl vif; |
957 | struct mfcctl mfc; | 1193 | struct mfcctl mfc; |
958 | struct net *net = sock_net(sk); | 1194 | struct net *net = sock_net(sk); |
1195 | struct mr_table *mrt; | ||
1196 | |||
1197 | mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); | ||
1198 | if (mrt == NULL) | ||
1199 | return -ENOENT; | ||
959 | 1200 | ||
960 | if (optname != MRT_INIT) { | 1201 | if (optname != MRT_INIT) { |
961 | if (sk != net->ipv4.mroute_sk && !capable(CAP_NET_ADMIN)) | 1202 | if (sk != mrt->mroute_sk && !capable(CAP_NET_ADMIN)) |
962 | return -EACCES; | 1203 | return -EACCES; |
963 | } | 1204 | } |
964 | 1205 | ||
@@ -971,7 +1212,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi | |||
971 | return -ENOPROTOOPT; | 1212 | return -ENOPROTOOPT; |
972 | 1213 | ||
973 | rtnl_lock(); | 1214 | rtnl_lock(); |
974 | if (net->ipv4.mroute_sk) { | 1215 | if (mrt->mroute_sk) { |
975 | rtnl_unlock(); | 1216 | rtnl_unlock(); |
976 | return -EADDRINUSE; | 1217 | return -EADDRINUSE; |
977 | } | 1218 | } |
@@ -979,7 +1220,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi | |||
979 | ret = ip_ra_control(sk, 1, mrtsock_destruct); | 1220 | ret = ip_ra_control(sk, 1, mrtsock_destruct); |
980 | if (ret == 0) { | 1221 | if (ret == 0) { |
981 | write_lock_bh(&mrt_lock); | 1222 | write_lock_bh(&mrt_lock); |
982 | net->ipv4.mroute_sk = sk; | 1223 | mrt->mroute_sk = sk; |
983 | write_unlock_bh(&mrt_lock); | 1224 | write_unlock_bh(&mrt_lock); |
984 | 1225 | ||
985 | IPV4_DEVCONF_ALL(net, MC_FORWARDING)++; | 1226 | IPV4_DEVCONF_ALL(net, MC_FORWARDING)++; |
@@ -987,7 +1228,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi | |||
987 | rtnl_unlock(); | 1228 | rtnl_unlock(); |
988 | return ret; | 1229 | return ret; |
989 | case MRT_DONE: | 1230 | case MRT_DONE: |
990 | if (sk != net->ipv4.mroute_sk) | 1231 | if (sk != mrt->mroute_sk) |
991 | return -EACCES; | 1232 | return -EACCES; |
992 | return ip_ra_control(sk, 0, NULL); | 1233 | return ip_ra_control(sk, 0, NULL); |
993 | case MRT_ADD_VIF: | 1234 | case MRT_ADD_VIF: |
@@ -1000,9 +1241,9 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi | |||
1000 | return -ENFILE; | 1241 | return -ENFILE; |
1001 | rtnl_lock(); | 1242 | rtnl_lock(); |
1002 | if (optname == MRT_ADD_VIF) { | 1243 | if (optname == MRT_ADD_VIF) { |
1003 | ret = vif_add(net, &vif, sk == net->ipv4.mroute_sk); | 1244 | ret = vif_add(net, mrt, &vif, sk == mrt->mroute_sk); |
1004 | } else { | 1245 | } else { |
1005 | ret = vif_delete(net, vif.vifc_vifi, 0, NULL); | 1246 | ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL); |
1006 | } | 1247 | } |
1007 | rtnl_unlock(); | 1248 | rtnl_unlock(); |
1008 | return ret; | 1249 | return ret; |
@@ -1019,9 +1260,9 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi | |||
1019 | return -EFAULT; | 1260 | return -EFAULT; |
1020 | rtnl_lock(); | 1261 | rtnl_lock(); |
1021 | if (optname == MRT_DEL_MFC) | 1262 | if (optname == MRT_DEL_MFC) |
1022 | ret = ipmr_mfc_delete(net, &mfc); | 1263 | ret = ipmr_mfc_delete(mrt, &mfc); |
1023 | else | 1264 | else |
1024 | ret = ipmr_mfc_add(net, &mfc, sk == net->ipv4.mroute_sk); | 1265 | ret = ipmr_mfc_add(net, mrt, &mfc, sk == mrt->mroute_sk); |
1025 | rtnl_unlock(); | 1266 | rtnl_unlock(); |
1026 | return ret; | 1267 | return ret; |
1027 | /* | 1268 | /* |
@@ -1032,7 +1273,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi | |||
1032 | int v; | 1273 | int v; |
1033 | if (get_user(v,(int __user *)optval)) | 1274 | if (get_user(v,(int __user *)optval)) |
1034 | return -EFAULT; | 1275 | return -EFAULT; |
1035 | net->ipv4.mroute_do_assert = (v) ? 1 : 0; | 1276 | mrt->mroute_do_assert = (v) ? 1 : 0; |
1036 | return 0; | 1277 | return 0; |
1037 | } | 1278 | } |
1038 | #ifdef CONFIG_IP_PIMSM | 1279 | #ifdef CONFIG_IP_PIMSM |
@@ -1046,14 +1287,35 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi | |||
1046 | 1287 | ||
1047 | rtnl_lock(); | 1288 | rtnl_lock(); |
1048 | ret = 0; | 1289 | ret = 0; |
1049 | if (v != net->ipv4.mroute_do_pim) { | 1290 | if (v != mrt->mroute_do_pim) { |
1050 | net->ipv4.mroute_do_pim = v; | 1291 | mrt->mroute_do_pim = v; |
1051 | net->ipv4.mroute_do_assert = v; | 1292 | mrt->mroute_do_assert = v; |
1052 | } | 1293 | } |
1053 | rtnl_unlock(); | 1294 | rtnl_unlock(); |
1054 | return ret; | 1295 | return ret; |
1055 | } | 1296 | } |
1056 | #endif | 1297 | #endif |
1298 | #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES | ||
1299 | case MRT_TABLE: | ||
1300 | { | ||
1301 | u32 v; | ||
1302 | |||
1303 | if (optlen != sizeof(u32)) | ||
1304 | return -EINVAL; | ||
1305 | if (get_user(v, (u32 __user *)optval)) | ||
1306 | return -EFAULT; | ||
1307 | if (sk == mrt->mroute_sk) | ||
1308 | return -EBUSY; | ||
1309 | |||
1310 | rtnl_lock(); | ||
1311 | ret = 0; | ||
1312 | if (!ipmr_new_table(net, v)) | ||
1313 | ret = -ENOMEM; | ||
1314 | raw_sk(sk)->ipmr_table = v; | ||
1315 | rtnl_unlock(); | ||
1316 | return ret; | ||
1317 | } | ||
1318 | #endif | ||
1057 | /* | 1319 | /* |
1058 | * Spurious command, or MRT_VERSION which you cannot | 1320 | * Spurious command, or MRT_VERSION which you cannot |
1059 | * set. | 1321 | * set. |
@@ -1072,6 +1334,11 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int | |||
1072 | int olr; | 1334 | int olr; |
1073 | int val; | 1335 | int val; |
1074 | struct net *net = sock_net(sk); | 1336 | struct net *net = sock_net(sk); |
1337 | struct mr_table *mrt; | ||
1338 | |||
1339 | mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); | ||
1340 | if (mrt == NULL) | ||
1341 | return -ENOENT; | ||
1075 | 1342 | ||
1076 | if (optname != MRT_VERSION && | 1343 | if (optname != MRT_VERSION && |
1077 | #ifdef CONFIG_IP_PIMSM | 1344 | #ifdef CONFIG_IP_PIMSM |
@@ -1093,10 +1360,10 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int | |||
1093 | val = 0x0305; | 1360 | val = 0x0305; |
1094 | #ifdef CONFIG_IP_PIMSM | 1361 | #ifdef CONFIG_IP_PIMSM |
1095 | else if (optname == MRT_PIM) | 1362 | else if (optname == MRT_PIM) |
1096 | val = net->ipv4.mroute_do_pim; | 1363 | val = mrt->mroute_do_pim; |
1097 | #endif | 1364 | #endif |
1098 | else | 1365 | else |
1099 | val = net->ipv4.mroute_do_assert; | 1366 | val = mrt->mroute_do_assert; |
1100 | if (copy_to_user(optval, &val, olr)) | 1367 | if (copy_to_user(optval, &val, olr)) |
1101 | return -EFAULT; | 1368 | return -EFAULT; |
1102 | return 0; | 1369 | return 0; |
@@ -1113,16 +1380,21 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg) | |||
1113 | struct vif_device *vif; | 1380 | struct vif_device *vif; |
1114 | struct mfc_cache *c; | 1381 | struct mfc_cache *c; |
1115 | struct net *net = sock_net(sk); | 1382 | struct net *net = sock_net(sk); |
1383 | struct mr_table *mrt; | ||
1384 | |||
1385 | mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); | ||
1386 | if (mrt == NULL) | ||
1387 | return -ENOENT; | ||
1116 | 1388 | ||
1117 | switch (cmd) { | 1389 | switch (cmd) { |
1118 | case SIOCGETVIFCNT: | 1390 | case SIOCGETVIFCNT: |
1119 | if (copy_from_user(&vr, arg, sizeof(vr))) | 1391 | if (copy_from_user(&vr, arg, sizeof(vr))) |
1120 | return -EFAULT; | 1392 | return -EFAULT; |
1121 | if (vr.vifi >= net->ipv4.maxvif) | 1393 | if (vr.vifi >= mrt->maxvif) |
1122 | return -EINVAL; | 1394 | return -EINVAL; |
1123 | read_lock(&mrt_lock); | 1395 | read_lock(&mrt_lock); |
1124 | vif = &net->ipv4.vif_table[vr.vifi]; | 1396 | vif = &mrt->vif_table[vr.vifi]; |
1125 | if (VIF_EXISTS(net, vr.vifi)) { | 1397 | if (VIF_EXISTS(mrt, vr.vifi)) { |
1126 | vr.icount = vif->pkt_in; | 1398 | vr.icount = vif->pkt_in; |
1127 | vr.ocount = vif->pkt_out; | 1399 | vr.ocount = vif->pkt_out; |
1128 | vr.ibytes = vif->bytes_in; | 1400 | vr.ibytes = vif->bytes_in; |
@@ -1140,7 +1412,7 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg) | |||
1140 | return -EFAULT; | 1412 | return -EFAULT; |
1141 | 1413 | ||
1142 | read_lock(&mrt_lock); | 1414 | read_lock(&mrt_lock); |
1143 | c = ipmr_cache_find(net, sr.src.s_addr, sr.grp.s_addr); | 1415 | c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr); |
1144 | if (c) { | 1416 | if (c) { |
1145 | sr.pktcnt = c->mfc_un.res.pkt; | 1417 | sr.pktcnt = c->mfc_un.res.pkt; |
1146 | sr.bytecnt = c->mfc_un.res.bytes; | 1418 | sr.bytecnt = c->mfc_un.res.bytes; |
@@ -1163,16 +1435,20 @@ static int ipmr_device_event(struct notifier_block *this, unsigned long event, v | |||
1163 | { | 1435 | { |
1164 | struct net_device *dev = ptr; | 1436 | struct net_device *dev = ptr; |
1165 | struct net *net = dev_net(dev); | 1437 | struct net *net = dev_net(dev); |
1438 | struct mr_table *mrt; | ||
1166 | struct vif_device *v; | 1439 | struct vif_device *v; |
1167 | int ct; | 1440 | int ct; |
1168 | LIST_HEAD(list); | 1441 | LIST_HEAD(list); |
1169 | 1442 | ||
1170 | if (event != NETDEV_UNREGISTER) | 1443 | if (event != NETDEV_UNREGISTER) |
1171 | return NOTIFY_DONE; | 1444 | return NOTIFY_DONE; |
1172 | v = &net->ipv4.vif_table[0]; | 1445 | |
1173 | for (ct = 0; ct < net->ipv4.maxvif; ct++, v++) { | 1446 | ipmr_for_each_table(mrt, net) { |
1174 | if (v->dev == dev) | 1447 | v = &mrt->vif_table[0]; |
1175 | vif_delete(net, ct, 1, &list); | 1448 | for (ct = 0; ct < mrt->maxvif; ct++, v++) { |
1449 | if (v->dev == dev) | ||
1450 | vif_delete(mrt, ct, 1, &list); | ||
1451 | } | ||
1176 | } | 1452 | } |
1177 | unregister_netdevice_many(&list); | 1453 | unregister_netdevice_many(&list); |
1178 | return NOTIFY_DONE; | 1454 | return NOTIFY_DONE; |
@@ -1231,11 +1507,11 @@ static inline int ipmr_forward_finish(struct sk_buff *skb) | |||
1231 | * Processing handlers for ipmr_forward | 1507 | * Processing handlers for ipmr_forward |
1232 | */ | 1508 | */ |
1233 | 1509 | ||
1234 | static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi) | 1510 | static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt, |
1511 | struct sk_buff *skb, struct mfc_cache *c, int vifi) | ||
1235 | { | 1512 | { |
1236 | struct net *net = mfc_net(c); | ||
1237 | const struct iphdr *iph = ip_hdr(skb); | 1513 | const struct iphdr *iph = ip_hdr(skb); |
1238 | struct vif_device *vif = &net->ipv4.vif_table[vifi]; | 1514 | struct vif_device *vif = &mrt->vif_table[vifi]; |
1239 | struct net_device *dev; | 1515 | struct net_device *dev; |
1240 | struct rtable *rt; | 1516 | struct rtable *rt; |
1241 | int encap = 0; | 1517 | int encap = 0; |
@@ -1249,7 +1525,7 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi) | |||
1249 | vif->bytes_out += skb->len; | 1525 | vif->bytes_out += skb->len; |
1250 | vif->dev->stats.tx_bytes += skb->len; | 1526 | vif->dev->stats.tx_bytes += skb->len; |
1251 | vif->dev->stats.tx_packets++; | 1527 | vif->dev->stats.tx_packets++; |
1252 | ipmr_cache_report(net, skb, vifi, IGMPMSG_WHOLEPKT); | 1528 | ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT); |
1253 | goto out_free; | 1529 | goto out_free; |
1254 | } | 1530 | } |
1255 | #endif | 1531 | #endif |
@@ -1332,12 +1608,12 @@ out_free: | |||
1332 | return; | 1608 | return; |
1333 | } | 1609 | } |
1334 | 1610 | ||
1335 | static int ipmr_find_vif(struct net_device *dev) | 1611 | static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev) |
1336 | { | 1612 | { |
1337 | struct net *net = dev_net(dev); | ||
1338 | int ct; | 1613 | int ct; |
1339 | for (ct = net->ipv4.maxvif-1; ct >= 0; ct--) { | 1614 | |
1340 | if (net->ipv4.vif_table[ct].dev == dev) | 1615 | for (ct = mrt->maxvif-1; ct >= 0; ct--) { |
1616 | if (mrt->vif_table[ct].dev == dev) | ||
1341 | break; | 1617 | break; |
1342 | } | 1618 | } |
1343 | return ct; | 1619 | return ct; |
@@ -1345,11 +1621,12 @@ static int ipmr_find_vif(struct net_device *dev) | |||
1345 | 1621 | ||
1346 | /* "local" means that we should preserve one skb (for local delivery) */ | 1622 | /* "local" means that we should preserve one skb (for local delivery) */ |
1347 | 1623 | ||
1348 | static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local) | 1624 | static int ip_mr_forward(struct net *net, struct mr_table *mrt, |
1625 | struct sk_buff *skb, struct mfc_cache *cache, | ||
1626 | int local) | ||
1349 | { | 1627 | { |
1350 | int psend = -1; | 1628 | int psend = -1; |
1351 | int vif, ct; | 1629 | int vif, ct; |
1352 | struct net *net = mfc_net(cache); | ||
1353 | 1630 | ||
1354 | vif = cache->mfc_parent; | 1631 | vif = cache->mfc_parent; |
1355 | cache->mfc_un.res.pkt++; | 1632 | cache->mfc_un.res.pkt++; |
@@ -1358,7 +1635,7 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local | |||
1358 | /* | 1635 | /* |
1359 | * Wrong interface: drop packet and (maybe) send PIM assert. | 1636 | * Wrong interface: drop packet and (maybe) send PIM assert. |
1360 | */ | 1637 | */ |
1361 | if (net->ipv4.vif_table[vif].dev != skb->dev) { | 1638 | if (mrt->vif_table[vif].dev != skb->dev) { |
1362 | int true_vifi; | 1639 | int true_vifi; |
1363 | 1640 | ||
1364 | if (skb_rtable(skb)->fl.iif == 0) { | 1641 | if (skb_rtable(skb)->fl.iif == 0) { |
@@ -1377,26 +1654,26 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local | |||
1377 | } | 1654 | } |
1378 | 1655 | ||
1379 | cache->mfc_un.res.wrong_if++; | 1656 | cache->mfc_un.res.wrong_if++; |
1380 | true_vifi = ipmr_find_vif(skb->dev); | 1657 | true_vifi = ipmr_find_vif(mrt, skb->dev); |
1381 | 1658 | ||
1382 | if (true_vifi >= 0 && net->ipv4.mroute_do_assert && | 1659 | if (true_vifi >= 0 && mrt->mroute_do_assert && |
1383 | /* pimsm uses asserts, when switching from RPT to SPT, | 1660 | /* pimsm uses asserts, when switching from RPT to SPT, |
1384 | so that we cannot check that packet arrived on an oif. | 1661 | so that we cannot check that packet arrived on an oif. |
1385 | It is bad, but otherwise we would need to move pretty | 1662 | It is bad, but otherwise we would need to move pretty |
1386 | large chunk of pimd to kernel. Ough... --ANK | 1663 | large chunk of pimd to kernel. Ough... --ANK |
1387 | */ | 1664 | */ |
1388 | (net->ipv4.mroute_do_pim || | 1665 | (mrt->mroute_do_pim || |
1389 | cache->mfc_un.res.ttls[true_vifi] < 255) && | 1666 | cache->mfc_un.res.ttls[true_vifi] < 255) && |
1390 | time_after(jiffies, | 1667 | time_after(jiffies, |
1391 | cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) { | 1668 | cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) { |
1392 | cache->mfc_un.res.last_assert = jiffies; | 1669 | cache->mfc_un.res.last_assert = jiffies; |
1393 | ipmr_cache_report(net, skb, true_vifi, IGMPMSG_WRONGVIF); | 1670 | ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF); |
1394 | } | 1671 | } |
1395 | goto dont_forward; | 1672 | goto dont_forward; |
1396 | } | 1673 | } |
1397 | 1674 | ||
1398 | net->ipv4.vif_table[vif].pkt_in++; | 1675 | mrt->vif_table[vif].pkt_in++; |
1399 | net->ipv4.vif_table[vif].bytes_in += skb->len; | 1676 | mrt->vif_table[vif].bytes_in += skb->len; |
1400 | 1677 | ||
1401 | /* | 1678 | /* |
1402 | * Forward the frame | 1679 | * Forward the frame |
@@ -1406,7 +1683,8 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local | |||
1406 | if (psend != -1) { | 1683 | if (psend != -1) { |
1407 | struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); | 1684 | struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); |
1408 | if (skb2) | 1685 | if (skb2) |
1409 | ipmr_queue_xmit(skb2, cache, psend); | 1686 | ipmr_queue_xmit(net, mrt, skb2, cache, |
1687 | psend); | ||
1410 | } | 1688 | } |
1411 | psend = ct; | 1689 | psend = ct; |
1412 | } | 1690 | } |
@@ -1415,9 +1693,9 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local | |||
1415 | if (local) { | 1693 | if (local) { |
1416 | struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); | 1694 | struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); |
1417 | if (skb2) | 1695 | if (skb2) |
1418 | ipmr_queue_xmit(skb2, cache, psend); | 1696 | ipmr_queue_xmit(net, mrt, skb2, cache, psend); |
1419 | } else { | 1697 | } else { |
1420 | ipmr_queue_xmit(skb, cache, psend); | 1698 | ipmr_queue_xmit(net, mrt, skb, cache, psend); |
1421 | return 0; | 1699 | return 0; |
1422 | } | 1700 | } |
1423 | } | 1701 | } |
@@ -1438,6 +1716,8 @@ int ip_mr_input(struct sk_buff *skb) | |||
1438 | struct mfc_cache *cache; | 1716 | struct mfc_cache *cache; |
1439 | struct net *net = dev_net(skb->dev); | 1717 | struct net *net = dev_net(skb->dev); |
1440 | int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL; | 1718 | int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL; |
1719 | struct mr_table *mrt; | ||
1720 | int err; | ||
1441 | 1721 | ||
1442 | /* Packet is looped back after forward, it should not be | 1722 | /* Packet is looped back after forward, it should not be |
1443 | forwarded second time, but still can be delivered locally. | 1723 | forwarded second time, but still can be delivered locally. |
@@ -1445,6 +1725,10 @@ int ip_mr_input(struct sk_buff *skb) | |||
1445 | if (IPCB(skb)->flags&IPSKB_FORWARDED) | 1725 | if (IPCB(skb)->flags&IPSKB_FORWARDED) |
1446 | goto dont_forward; | 1726 | goto dont_forward; |
1447 | 1727 | ||
1728 | err = ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt); | ||
1729 | if (err < 0) | ||
1730 | return err; | ||
1731 | |||
1448 | if (!local) { | 1732 | if (!local) { |
1449 | if (IPCB(skb)->opt.router_alert) { | 1733 | if (IPCB(skb)->opt.router_alert) { |
1450 | if (ip_call_ra_chain(skb)) | 1734 | if (ip_call_ra_chain(skb)) |
@@ -1457,9 +1741,9 @@ int ip_mr_input(struct sk_buff *skb) | |||
1457 | that we can forward NO IGMP messages. | 1741 | that we can forward NO IGMP messages. |
1458 | */ | 1742 | */ |
1459 | read_lock(&mrt_lock); | 1743 | read_lock(&mrt_lock); |
1460 | if (net->ipv4.mroute_sk) { | 1744 | if (mrt->mroute_sk) { |
1461 | nf_reset(skb); | 1745 | nf_reset(skb); |
1462 | raw_rcv(net->ipv4.mroute_sk, skb); | 1746 | raw_rcv(mrt->mroute_sk, skb); |
1463 | read_unlock(&mrt_lock); | 1747 | read_unlock(&mrt_lock); |
1464 | return 0; | 1748 | return 0; |
1465 | } | 1749 | } |
@@ -1468,7 +1752,7 @@ int ip_mr_input(struct sk_buff *skb) | |||
1468 | } | 1752 | } |
1469 | 1753 | ||
1470 | read_lock(&mrt_lock); | 1754 | read_lock(&mrt_lock); |
1471 | cache = ipmr_cache_find(net, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr); | 1755 | cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr); |
1472 | 1756 | ||
1473 | /* | 1757 | /* |
1474 | * No usable cache entry | 1758 | * No usable cache entry |
@@ -1486,19 +1770,19 @@ int ip_mr_input(struct sk_buff *skb) | |||
1486 | skb = skb2; | 1770 | skb = skb2; |
1487 | } | 1771 | } |
1488 | 1772 | ||
1489 | vif = ipmr_find_vif(skb->dev); | 1773 | vif = ipmr_find_vif(mrt, skb->dev); |
1490 | if (vif >= 0) { | 1774 | if (vif >= 0) { |
1491 | int err = ipmr_cache_unresolved(net, vif, skb); | 1775 | int err2 = ipmr_cache_unresolved(mrt, vif, skb); |
1492 | read_unlock(&mrt_lock); | 1776 | read_unlock(&mrt_lock); |
1493 | 1777 | ||
1494 | return err; | 1778 | return err2; |
1495 | } | 1779 | } |
1496 | read_unlock(&mrt_lock); | 1780 | read_unlock(&mrt_lock); |
1497 | kfree_skb(skb); | 1781 | kfree_skb(skb); |
1498 | return -ENODEV; | 1782 | return -ENODEV; |
1499 | } | 1783 | } |
1500 | 1784 | ||
1501 | ip_mr_forward(skb, cache, local); | 1785 | ip_mr_forward(net, mrt, skb, cache, local); |
1502 | 1786 | ||
1503 | read_unlock(&mrt_lock); | 1787 | read_unlock(&mrt_lock); |
1504 | 1788 | ||
@@ -1515,11 +1799,11 @@ dont_forward: | |||
1515 | } | 1799 | } |
1516 | 1800 | ||
1517 | #ifdef CONFIG_IP_PIMSM | 1801 | #ifdef CONFIG_IP_PIMSM |
1518 | static int __pim_rcv(struct sk_buff *skb, unsigned int pimlen) | 1802 | static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb, |
1803 | unsigned int pimlen) | ||
1519 | { | 1804 | { |
1520 | struct net_device *reg_dev = NULL; | 1805 | struct net_device *reg_dev = NULL; |
1521 | struct iphdr *encap; | 1806 | struct iphdr *encap; |
1522 | struct net *net = dev_net(skb->dev); | ||
1523 | 1807 | ||
1524 | encap = (struct iphdr *)(skb_transport_header(skb) + pimlen); | 1808 | encap = (struct iphdr *)(skb_transport_header(skb) + pimlen); |
1525 | /* | 1809 | /* |
@@ -1534,8 +1818,8 @@ static int __pim_rcv(struct sk_buff *skb, unsigned int pimlen) | |||
1534 | return 1; | 1818 | return 1; |
1535 | 1819 | ||
1536 | read_lock(&mrt_lock); | 1820 | read_lock(&mrt_lock); |
1537 | if (net->ipv4.mroute_reg_vif_num >= 0) | 1821 | if (mrt->mroute_reg_vif_num >= 0) |
1538 | reg_dev = net->ipv4.vif_table[net->ipv4.mroute_reg_vif_num].dev; | 1822 | reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev; |
1539 | if (reg_dev) | 1823 | if (reg_dev) |
1540 | dev_hold(reg_dev); | 1824 | dev_hold(reg_dev); |
1541 | read_unlock(&mrt_lock); | 1825 | read_unlock(&mrt_lock); |
@@ -1570,17 +1854,21 @@ int pim_rcv_v1(struct sk_buff * skb) | |||
1570 | { | 1854 | { |
1571 | struct igmphdr *pim; | 1855 | struct igmphdr *pim; |
1572 | struct net *net = dev_net(skb->dev); | 1856 | struct net *net = dev_net(skb->dev); |
1857 | struct mr_table *mrt; | ||
1573 | 1858 | ||
1574 | if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr))) | 1859 | if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr))) |
1575 | goto drop; | 1860 | goto drop; |
1576 | 1861 | ||
1577 | pim = igmp_hdr(skb); | 1862 | pim = igmp_hdr(skb); |
1578 | 1863 | ||
1579 | if (!net->ipv4.mroute_do_pim || | 1864 | if (ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt) < 0) |
1865 | goto drop; | ||
1866 | |||
1867 | if (!mrt->mroute_do_pim || | ||
1580 | pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER) | 1868 | pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER) |
1581 | goto drop; | 1869 | goto drop; |
1582 | 1870 | ||
1583 | if (__pim_rcv(skb, sizeof(*pim))) { | 1871 | if (__pim_rcv(mrt, skb, sizeof(*pim))) { |
1584 | drop: | 1872 | drop: |
1585 | kfree_skb(skb); | 1873 | kfree_skb(skb); |
1586 | } | 1874 | } |
@@ -1592,6 +1880,8 @@ drop: | |||
1592 | static int pim_rcv(struct sk_buff * skb) | 1880 | static int pim_rcv(struct sk_buff * skb) |
1593 | { | 1881 | { |
1594 | struct pimreghdr *pim; | 1882 | struct pimreghdr *pim; |
1883 | struct net *net = dev_net(skb->dev); | ||
1884 | struct mr_table *mrt; | ||
1595 | 1885 | ||
1596 | if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr))) | 1886 | if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr))) |
1597 | goto drop; | 1887 | goto drop; |
@@ -1603,7 +1893,10 @@ static int pim_rcv(struct sk_buff * skb) | |||
1603 | csum_fold(skb_checksum(skb, 0, skb->len, 0)))) | 1893 | csum_fold(skb_checksum(skb, 0, skb->len, 0)))) |
1604 | goto drop; | 1894 | goto drop; |
1605 | 1895 | ||
1606 | if (__pim_rcv(skb, sizeof(*pim))) { | 1896 | if (ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt) < 0) |
1897 | goto drop; | ||
1898 | |||
1899 | if (__pim_rcv(mrt, skb, sizeof(*pim))) { | ||
1607 | drop: | 1900 | drop: |
1608 | kfree_skb(skb); | 1901 | kfree_skb(skb); |
1609 | } | 1902 | } |
@@ -1612,11 +1905,11 @@ drop: | |||
1612 | #endif | 1905 | #endif |
1613 | 1906 | ||
1614 | static int | 1907 | static int |
1615 | ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm) | 1908 | ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, struct mfc_cache *c, |
1909 | struct rtmsg *rtm) | ||
1616 | { | 1910 | { |
1617 | int ct; | 1911 | int ct; |
1618 | struct rtnexthop *nhp; | 1912 | struct rtnexthop *nhp; |
1619 | struct net *net = mfc_net(c); | ||
1620 | u8 *b = skb_tail_pointer(skb); | 1913 | u8 *b = skb_tail_pointer(skb); |
1621 | struct rtattr *mp_head; | 1914 | struct rtattr *mp_head; |
1622 | 1915 | ||
@@ -1624,19 +1917,19 @@ ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm) | |||
1624 | if (c->mfc_parent > MAXVIFS) | 1917 | if (c->mfc_parent > MAXVIFS) |
1625 | return -ENOENT; | 1918 | return -ENOENT; |
1626 | 1919 | ||
1627 | if (VIF_EXISTS(net, c->mfc_parent)) | 1920 | if (VIF_EXISTS(mrt, c->mfc_parent)) |
1628 | RTA_PUT(skb, RTA_IIF, 4, &net->ipv4.vif_table[c->mfc_parent].dev->ifindex); | 1921 | RTA_PUT(skb, RTA_IIF, 4, &mrt->vif_table[c->mfc_parent].dev->ifindex); |
1629 | 1922 | ||
1630 | mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0)); | 1923 | mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0)); |
1631 | 1924 | ||
1632 | for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { | 1925 | for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { |
1633 | if (VIF_EXISTS(net, ct) && c->mfc_un.res.ttls[ct] < 255) { | 1926 | if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) { |
1634 | if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) | 1927 | if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) |
1635 | goto rtattr_failure; | 1928 | goto rtattr_failure; |
1636 | nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); | 1929 | nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); |
1637 | nhp->rtnh_flags = 0; | 1930 | nhp->rtnh_flags = 0; |
1638 | nhp->rtnh_hops = c->mfc_un.res.ttls[ct]; | 1931 | nhp->rtnh_hops = c->mfc_un.res.ttls[ct]; |
1639 | nhp->rtnh_ifindex = net->ipv4.vif_table[ct].dev->ifindex; | 1932 | nhp->rtnh_ifindex = mrt->vif_table[ct].dev->ifindex; |
1640 | nhp->rtnh_len = sizeof(*nhp); | 1933 | nhp->rtnh_len = sizeof(*nhp); |
1641 | } | 1934 | } |
1642 | } | 1935 | } |
@@ -1654,11 +1947,16 @@ int ipmr_get_route(struct net *net, | |||
1654 | struct sk_buff *skb, struct rtmsg *rtm, int nowait) | 1947 | struct sk_buff *skb, struct rtmsg *rtm, int nowait) |
1655 | { | 1948 | { |
1656 | int err; | 1949 | int err; |
1950 | struct mr_table *mrt; | ||
1657 | struct mfc_cache *cache; | 1951 | struct mfc_cache *cache; |
1658 | struct rtable *rt = skb_rtable(skb); | 1952 | struct rtable *rt = skb_rtable(skb); |
1659 | 1953 | ||
1954 | mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); | ||
1955 | if (mrt == NULL) | ||
1956 | return -ENOENT; | ||
1957 | |||
1660 | read_lock(&mrt_lock); | 1958 | read_lock(&mrt_lock); |
1661 | cache = ipmr_cache_find(net, rt->rt_src, rt->rt_dst); | 1959 | cache = ipmr_cache_find(mrt, rt->rt_src, rt->rt_dst); |
1662 | 1960 | ||
1663 | if (cache == NULL) { | 1961 | if (cache == NULL) { |
1664 | struct sk_buff *skb2; | 1962 | struct sk_buff *skb2; |
@@ -1672,7 +1970,7 @@ int ipmr_get_route(struct net *net, | |||
1672 | } | 1970 | } |
1673 | 1971 | ||
1674 | dev = skb->dev; | 1972 | dev = skb->dev; |
1675 | if (dev == NULL || (vif = ipmr_find_vif(dev)) < 0) { | 1973 | if (dev == NULL || (vif = ipmr_find_vif(mrt, dev)) < 0) { |
1676 | read_unlock(&mrt_lock); | 1974 | read_unlock(&mrt_lock); |
1677 | return -ENODEV; | 1975 | return -ENODEV; |
1678 | } | 1976 | } |
@@ -1689,14 +1987,14 @@ int ipmr_get_route(struct net *net, | |||
1689 | iph->saddr = rt->rt_src; | 1987 | iph->saddr = rt->rt_src; |
1690 | iph->daddr = rt->rt_dst; | 1988 | iph->daddr = rt->rt_dst; |
1691 | iph->version = 0; | 1989 | iph->version = 0; |
1692 | err = ipmr_cache_unresolved(net, vif, skb2); | 1990 | err = ipmr_cache_unresolved(mrt, vif, skb2); |
1693 | read_unlock(&mrt_lock); | 1991 | read_unlock(&mrt_lock); |
1694 | return err; | 1992 | return err; |
1695 | } | 1993 | } |
1696 | 1994 | ||
1697 | if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY)) | 1995 | if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY)) |
1698 | cache->mfc_flags |= MFC_NOTIFY; | 1996 | cache->mfc_flags |= MFC_NOTIFY; |
1699 | err = ipmr_fill_mroute(skb, cache, rtm); | 1997 | err = ipmr_fill_mroute(mrt, skb, cache, rtm); |
1700 | read_unlock(&mrt_lock); | 1998 | read_unlock(&mrt_lock); |
1701 | return err; | 1999 | return err; |
1702 | } | 2000 | } |
@@ -1707,6 +2005,7 @@ int ipmr_get_route(struct net *net, | |||
1707 | */ | 2005 | */ |
1708 | struct ipmr_vif_iter { | 2006 | struct ipmr_vif_iter { |
1709 | struct seq_net_private p; | 2007 | struct seq_net_private p; |
2008 | struct mr_table *mrt; | ||
1710 | int ct; | 2009 | int ct; |
1711 | }; | 2010 | }; |
1712 | 2011 | ||
@@ -1714,11 +2013,13 @@ static struct vif_device *ipmr_vif_seq_idx(struct net *net, | |||
1714 | struct ipmr_vif_iter *iter, | 2013 | struct ipmr_vif_iter *iter, |
1715 | loff_t pos) | 2014 | loff_t pos) |
1716 | { | 2015 | { |
1717 | for (iter->ct = 0; iter->ct < net->ipv4.maxvif; ++iter->ct) { | 2016 | struct mr_table *mrt = iter->mrt; |
1718 | if (!VIF_EXISTS(net, iter->ct)) | 2017 | |
2018 | for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) { | ||
2019 | if (!VIF_EXISTS(mrt, iter->ct)) | ||
1719 | continue; | 2020 | continue; |
1720 | if (pos-- == 0) | 2021 | if (pos-- == 0) |
1721 | return &net->ipv4.vif_table[iter->ct]; | 2022 | return &mrt->vif_table[iter->ct]; |
1722 | } | 2023 | } |
1723 | return NULL; | 2024 | return NULL; |
1724 | } | 2025 | } |
@@ -1726,7 +2027,15 @@ static struct vif_device *ipmr_vif_seq_idx(struct net *net, | |||
1726 | static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos) | 2027 | static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos) |
1727 | __acquires(mrt_lock) | 2028 | __acquires(mrt_lock) |
1728 | { | 2029 | { |
2030 | struct ipmr_vif_iter *iter = seq->private; | ||
1729 | struct net *net = seq_file_net(seq); | 2031 | struct net *net = seq_file_net(seq); |
2032 | struct mr_table *mrt; | ||
2033 | |||
2034 | mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); | ||
2035 | if (mrt == NULL) | ||
2036 | return ERR_PTR(-ENOENT); | ||
2037 | |||
2038 | iter->mrt = mrt; | ||
1730 | 2039 | ||
1731 | read_lock(&mrt_lock); | 2040 | read_lock(&mrt_lock); |
1732 | return *pos ? ipmr_vif_seq_idx(net, seq->private, *pos - 1) | 2041 | return *pos ? ipmr_vif_seq_idx(net, seq->private, *pos - 1) |
@@ -1737,15 +2046,16 @@ static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
1737 | { | 2046 | { |
1738 | struct ipmr_vif_iter *iter = seq->private; | 2047 | struct ipmr_vif_iter *iter = seq->private; |
1739 | struct net *net = seq_file_net(seq); | 2048 | struct net *net = seq_file_net(seq); |
2049 | struct mr_table *mrt = iter->mrt; | ||
1740 | 2050 | ||
1741 | ++*pos; | 2051 | ++*pos; |
1742 | if (v == SEQ_START_TOKEN) | 2052 | if (v == SEQ_START_TOKEN) |
1743 | return ipmr_vif_seq_idx(net, iter, 0); | 2053 | return ipmr_vif_seq_idx(net, iter, 0); |
1744 | 2054 | ||
1745 | while (++iter->ct < net->ipv4.maxvif) { | 2055 | while (++iter->ct < mrt->maxvif) { |
1746 | if (!VIF_EXISTS(net, iter->ct)) | 2056 | if (!VIF_EXISTS(mrt, iter->ct)) |
1747 | continue; | 2057 | continue; |
1748 | return &net->ipv4.vif_table[iter->ct]; | 2058 | return &mrt->vif_table[iter->ct]; |
1749 | } | 2059 | } |
1750 | return NULL; | 2060 | return NULL; |
1751 | } | 2061 | } |
@@ -1758,7 +2068,8 @@ static void ipmr_vif_seq_stop(struct seq_file *seq, void *v) | |||
1758 | 2068 | ||
1759 | static int ipmr_vif_seq_show(struct seq_file *seq, void *v) | 2069 | static int ipmr_vif_seq_show(struct seq_file *seq, void *v) |
1760 | { | 2070 | { |
1761 | struct net *net = seq_file_net(seq); | 2071 | struct ipmr_vif_iter *iter = seq->private; |
2072 | struct mr_table *mrt = iter->mrt; | ||
1762 | 2073 | ||
1763 | if (v == SEQ_START_TOKEN) { | 2074 | if (v == SEQ_START_TOKEN) { |
1764 | seq_puts(seq, | 2075 | seq_puts(seq, |
@@ -1769,7 +2080,7 @@ static int ipmr_vif_seq_show(struct seq_file *seq, void *v) | |||
1769 | 2080 | ||
1770 | seq_printf(seq, | 2081 | seq_printf(seq, |
1771 | "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n", | 2082 | "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n", |
1772 | vif - net->ipv4.vif_table, | 2083 | vif - mrt->vif_table, |
1773 | name, vif->bytes_in, vif->pkt_in, | 2084 | name, vif->bytes_in, vif->pkt_in, |
1774 | vif->bytes_out, vif->pkt_out, | 2085 | vif->bytes_out, vif->pkt_out, |
1775 | vif->flags, vif->local, vif->remote); | 2086 | vif->flags, vif->local, vif->remote); |
@@ -1800,7 +2111,8 @@ static const struct file_operations ipmr_vif_fops = { | |||
1800 | 2111 | ||
1801 | struct ipmr_mfc_iter { | 2112 | struct ipmr_mfc_iter { |
1802 | struct seq_net_private p; | 2113 | struct seq_net_private p; |
1803 | struct mfc_cache **cache; | 2114 | struct mr_table *mrt; |
2115 | struct list_head *cache; | ||
1804 | int ct; | 2116 | int ct; |
1805 | }; | 2117 | }; |
1806 | 2118 | ||
@@ -1808,22 +2120,22 @@ struct ipmr_mfc_iter { | |||
1808 | static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net, | 2120 | static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net, |
1809 | struct ipmr_mfc_iter *it, loff_t pos) | 2121 | struct ipmr_mfc_iter *it, loff_t pos) |
1810 | { | 2122 | { |
2123 | struct mr_table *mrt = it->mrt; | ||
1811 | struct mfc_cache *mfc; | 2124 | struct mfc_cache *mfc; |
1812 | 2125 | ||
1813 | it->cache = net->ipv4.mfc_cache_array; | ||
1814 | read_lock(&mrt_lock); | 2126 | read_lock(&mrt_lock); |
1815 | for (it->ct = 0; it->ct < MFC_LINES; it->ct++) | 2127 | for (it->ct = 0; it->ct < MFC_LINES; it->ct++) { |
1816 | for (mfc = net->ipv4.mfc_cache_array[it->ct]; | 2128 | it->cache = &mrt->mfc_cache_array[it->ct]; |
1817 | mfc; mfc = mfc->next) | 2129 | list_for_each_entry(mfc, it->cache, list) |
1818 | if (pos-- == 0) | 2130 | if (pos-- == 0) |
1819 | return mfc; | 2131 | return mfc; |
2132 | } | ||
1820 | read_unlock(&mrt_lock); | 2133 | read_unlock(&mrt_lock); |
1821 | 2134 | ||
1822 | it->cache = &mfc_unres_queue; | ||
1823 | spin_lock_bh(&mfc_unres_lock); | 2135 | spin_lock_bh(&mfc_unres_lock); |
1824 | for (mfc = mfc_unres_queue; mfc; mfc = mfc->next) | 2136 | it->cache = &mrt->mfc_unres_queue; |
1825 | if (net_eq(mfc_net(mfc), net) && | 2137 | list_for_each_entry(mfc, it->cache, list) |
1826 | pos-- == 0) | 2138 | if (pos-- == 0) |
1827 | return mfc; | 2139 | return mfc; |
1828 | spin_unlock_bh(&mfc_unres_lock); | 2140 | spin_unlock_bh(&mfc_unres_lock); |
1829 | 2141 | ||
@@ -1836,7 +2148,13 @@ static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos) | |||
1836 | { | 2148 | { |
1837 | struct ipmr_mfc_iter *it = seq->private; | 2149 | struct ipmr_mfc_iter *it = seq->private; |
1838 | struct net *net = seq_file_net(seq); | 2150 | struct net *net = seq_file_net(seq); |
2151 | struct mr_table *mrt; | ||
2152 | |||
2153 | mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); | ||
2154 | if (mrt == NULL) | ||
2155 | return ERR_PTR(-ENOENT); | ||
1839 | 2156 | ||
2157 | it->mrt = mrt; | ||
1840 | it->cache = NULL; | 2158 | it->cache = NULL; |
1841 | it->ct = 0; | 2159 | it->ct = 0; |
1842 | return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1) | 2160 | return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1) |
@@ -1848,37 +2166,36 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
1848 | struct mfc_cache *mfc = v; | 2166 | struct mfc_cache *mfc = v; |
1849 | struct ipmr_mfc_iter *it = seq->private; | 2167 | struct ipmr_mfc_iter *it = seq->private; |
1850 | struct net *net = seq_file_net(seq); | 2168 | struct net *net = seq_file_net(seq); |
2169 | struct mr_table *mrt = it->mrt; | ||
1851 | 2170 | ||
1852 | ++*pos; | 2171 | ++*pos; |
1853 | 2172 | ||
1854 | if (v == SEQ_START_TOKEN) | 2173 | if (v == SEQ_START_TOKEN) |
1855 | return ipmr_mfc_seq_idx(net, seq->private, 0); | 2174 | return ipmr_mfc_seq_idx(net, seq->private, 0); |
1856 | 2175 | ||
1857 | if (mfc->next) | 2176 | if (mfc->list.next != it->cache) |
1858 | return mfc->next; | 2177 | return list_entry(mfc->list.next, struct mfc_cache, list); |
1859 | 2178 | ||
1860 | if (it->cache == &mfc_unres_queue) | 2179 | if (it->cache == &mrt->mfc_unres_queue) |
1861 | goto end_of_list; | 2180 | goto end_of_list; |
1862 | 2181 | ||
1863 | BUG_ON(it->cache != net->ipv4.mfc_cache_array); | 2182 | BUG_ON(it->cache != &mrt->mfc_cache_array[it->ct]); |
1864 | 2183 | ||
1865 | while (++it->ct < MFC_LINES) { | 2184 | while (++it->ct < MFC_LINES) { |
1866 | mfc = net->ipv4.mfc_cache_array[it->ct]; | 2185 | it->cache = &mrt->mfc_cache_array[it->ct]; |
1867 | if (mfc) | 2186 | if (list_empty(it->cache)) |
1868 | return mfc; | 2187 | continue; |
2188 | return list_first_entry(it->cache, struct mfc_cache, list); | ||
1869 | } | 2189 | } |
1870 | 2190 | ||
1871 | /* exhausted cache_array, show unresolved */ | 2191 | /* exhausted cache_array, show unresolved */ |
1872 | read_unlock(&mrt_lock); | 2192 | read_unlock(&mrt_lock); |
1873 | it->cache = &mfc_unres_queue; | 2193 | it->cache = &mrt->mfc_unres_queue; |
1874 | it->ct = 0; | 2194 | it->ct = 0; |
1875 | 2195 | ||
1876 | spin_lock_bh(&mfc_unres_lock); | 2196 | spin_lock_bh(&mfc_unres_lock); |
1877 | mfc = mfc_unres_queue; | 2197 | if (!list_empty(it->cache)) |
1878 | while (mfc && !net_eq(mfc_net(mfc), net)) | 2198 | return list_first_entry(it->cache, struct mfc_cache, list); |
1879 | mfc = mfc->next; | ||
1880 | if (mfc) | ||
1881 | return mfc; | ||
1882 | 2199 | ||
1883 | end_of_list: | 2200 | end_of_list: |
1884 | spin_unlock_bh(&mfc_unres_lock); | 2201 | spin_unlock_bh(&mfc_unres_lock); |
@@ -1890,18 +2207,17 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
1890 | static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v) | 2207 | static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v) |
1891 | { | 2208 | { |
1892 | struct ipmr_mfc_iter *it = seq->private; | 2209 | struct ipmr_mfc_iter *it = seq->private; |
1893 | struct net *net = seq_file_net(seq); | 2210 | struct mr_table *mrt = it->mrt; |
1894 | 2211 | ||
1895 | if (it->cache == &mfc_unres_queue) | 2212 | if (it->cache == &mrt->mfc_unres_queue) |
1896 | spin_unlock_bh(&mfc_unres_lock); | 2213 | spin_unlock_bh(&mfc_unres_lock); |
1897 | else if (it->cache == net->ipv4.mfc_cache_array) | 2214 | else if (it->cache == &mrt->mfc_cache_array[it->ct]) |
1898 | read_unlock(&mrt_lock); | 2215 | read_unlock(&mrt_lock); |
1899 | } | 2216 | } |
1900 | 2217 | ||
1901 | static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) | 2218 | static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) |
1902 | { | 2219 | { |
1903 | int n; | 2220 | int n; |
1904 | struct net *net = seq_file_net(seq); | ||
1905 | 2221 | ||
1906 | if (v == SEQ_START_TOKEN) { | 2222 | if (v == SEQ_START_TOKEN) { |
1907 | seq_puts(seq, | 2223 | seq_puts(seq, |
@@ -1909,20 +2225,21 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) | |||
1909 | } else { | 2225 | } else { |
1910 | const struct mfc_cache *mfc = v; | 2226 | const struct mfc_cache *mfc = v; |
1911 | const struct ipmr_mfc_iter *it = seq->private; | 2227 | const struct ipmr_mfc_iter *it = seq->private; |
2228 | const struct mr_table *mrt = it->mrt; | ||
1912 | 2229 | ||
1913 | seq_printf(seq, "%08lX %08lX %-3hd", | 2230 | seq_printf(seq, "%08X %08X %-3hd", |
1914 | (unsigned long) mfc->mfc_mcastgrp, | 2231 | (__force u32) mfc->mfc_mcastgrp, |
1915 | (unsigned long) mfc->mfc_origin, | 2232 | (__force u32) mfc->mfc_origin, |
1916 | mfc->mfc_parent); | 2233 | mfc->mfc_parent); |
1917 | 2234 | ||
1918 | if (it->cache != &mfc_unres_queue) { | 2235 | if (it->cache != &mrt->mfc_unres_queue) { |
1919 | seq_printf(seq, " %8lu %8lu %8lu", | 2236 | seq_printf(seq, " %8lu %8lu %8lu", |
1920 | mfc->mfc_un.res.pkt, | 2237 | mfc->mfc_un.res.pkt, |
1921 | mfc->mfc_un.res.bytes, | 2238 | mfc->mfc_un.res.bytes, |
1922 | mfc->mfc_un.res.wrong_if); | 2239 | mfc->mfc_un.res.wrong_if); |
1923 | for (n = mfc->mfc_un.res.minvif; | 2240 | for (n = mfc->mfc_un.res.minvif; |
1924 | n < mfc->mfc_un.res.maxvif; n++ ) { | 2241 | n < mfc->mfc_un.res.maxvif; n++ ) { |
1925 | if (VIF_EXISTS(net, n) && | 2242 | if (VIF_EXISTS(mrt, n) && |
1926 | mfc->mfc_un.res.ttls[n] < 255) | 2243 | mfc->mfc_un.res.ttls[n] < 255) |
1927 | seq_printf(seq, | 2244 | seq_printf(seq, |
1928 | " %2d:%-3d", | 2245 | " %2d:%-3d", |
@@ -1974,27 +2291,11 @@ static const struct net_protocol pim_protocol = { | |||
1974 | */ | 2291 | */ |
1975 | static int __net_init ipmr_net_init(struct net *net) | 2292 | static int __net_init ipmr_net_init(struct net *net) |
1976 | { | 2293 | { |
1977 | int err = 0; | 2294 | int err; |
1978 | 2295 | ||
1979 | net->ipv4.vif_table = kcalloc(MAXVIFS, sizeof(struct vif_device), | 2296 | err = ipmr_rules_init(net); |
1980 | GFP_KERNEL); | 2297 | if (err < 0) |
1981 | if (!net->ipv4.vif_table) { | ||
1982 | err = -ENOMEM; | ||
1983 | goto fail; | 2298 | goto fail; |
1984 | } | ||
1985 | |||
1986 | /* Forwarding cache */ | ||
1987 | net->ipv4.mfc_cache_array = kcalloc(MFC_LINES, | ||
1988 | sizeof(struct mfc_cache *), | ||
1989 | GFP_KERNEL); | ||
1990 | if (!net->ipv4.mfc_cache_array) { | ||
1991 | err = -ENOMEM; | ||
1992 | goto fail_mfc_cache; | ||
1993 | } | ||
1994 | |||
1995 | #ifdef CONFIG_IP_PIMSM | ||
1996 | net->ipv4.mroute_reg_vif_num = -1; | ||
1997 | #endif | ||
1998 | 2299 | ||
1999 | #ifdef CONFIG_PROC_FS | 2300 | #ifdef CONFIG_PROC_FS |
2000 | err = -ENOMEM; | 2301 | err = -ENOMEM; |
@@ -2009,10 +2310,8 @@ static int __net_init ipmr_net_init(struct net *net) | |||
2009 | proc_cache_fail: | 2310 | proc_cache_fail: |
2010 | proc_net_remove(net, "ip_mr_vif"); | 2311 | proc_net_remove(net, "ip_mr_vif"); |
2011 | proc_vif_fail: | 2312 | proc_vif_fail: |
2012 | kfree(net->ipv4.mfc_cache_array); | 2313 | ipmr_rules_exit(net); |
2013 | #endif | 2314 | #endif |
2014 | fail_mfc_cache: | ||
2015 | kfree(net->ipv4.vif_table); | ||
2016 | fail: | 2315 | fail: |
2017 | return err; | 2316 | return err; |
2018 | } | 2317 | } |
@@ -2023,8 +2322,7 @@ static void __net_exit ipmr_net_exit(struct net *net) | |||
2023 | proc_net_remove(net, "ip_mr_cache"); | 2322 | proc_net_remove(net, "ip_mr_cache"); |
2024 | proc_net_remove(net, "ip_mr_vif"); | 2323 | proc_net_remove(net, "ip_mr_vif"); |
2025 | #endif | 2324 | #endif |
2026 | kfree(net->ipv4.mfc_cache_array); | 2325 | ipmr_rules_exit(net); |
2027 | kfree(net->ipv4.vif_table); | ||
2028 | } | 2326 | } |
2029 | 2327 | ||
2030 | static struct pernet_operations ipmr_net_ops = { | 2328 | static struct pernet_operations ipmr_net_ops = { |
@@ -2047,7 +2345,6 @@ int __init ip_mr_init(void) | |||
2047 | if (err) | 2345 | if (err) |
2048 | goto reg_pernet_fail; | 2346 | goto reg_pernet_fail; |
2049 | 2347 | ||
2050 | setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0); | ||
2051 | err = register_netdevice_notifier(&ip_mr_notifier); | 2348 | err = register_netdevice_notifier(&ip_mr_notifier); |
2052 | if (err) | 2349 | if (err) |
2053 | goto reg_notif_fail; | 2350 | goto reg_notif_fail; |
@@ -2065,7 +2362,6 @@ add_proto_fail: | |||
2065 | unregister_netdevice_notifier(&ip_mr_notifier); | 2362 | unregister_netdevice_notifier(&ip_mr_notifier); |
2066 | #endif | 2363 | #endif |
2067 | reg_notif_fail: | 2364 | reg_notif_fail: |
2068 | del_timer(&ipmr_expire_timer); | ||
2069 | unregister_pernet_subsys(&ipmr_net_ops); | 2365 | unregister_pernet_subsys(&ipmr_net_ops); |
2070 | reg_pernet_fail: | 2366 | reg_pernet_fail: |
2071 | kmem_cache_destroy(mrt_cachep); | 2367 | kmem_cache_destroy(mrt_cachep); |
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index ab828400ed71..a992dc826f1c 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c | |||
@@ -88,7 +88,7 @@ clusterip_config_entry_put(struct clusterip_config *c) | |||
88 | list_del(&c->list); | 88 | list_del(&c->list); |
89 | write_unlock_bh(&clusterip_lock); | 89 | write_unlock_bh(&clusterip_lock); |
90 | 90 | ||
91 | dev_mc_delete(c->dev, c->clustermac, ETH_ALEN, 0); | 91 | dev_mc_del(c->dev, c->clustermac); |
92 | dev_put(c->dev); | 92 | dev_put(c->dev); |
93 | 93 | ||
94 | /* In case anyone still accesses the file, the open/close | 94 | /* In case anyone still accesses the file, the open/close |
@@ -397,7 +397,7 @@ static bool clusterip_tg_check(const struct xt_tgchk_param *par) | |||
397 | dev_put(dev); | 397 | dev_put(dev); |
398 | return false; | 398 | return false; |
399 | } | 399 | } |
400 | dev_mc_add(config->dev,config->clustermac, ETH_ALEN, 0); | 400 | dev_mc_add(config->dev, config->clustermac); |
401 | } | 401 | } |
402 | } | 402 | } |
403 | cipinfo->config = config; | 403 | cipinfo->config = config; |
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 4f1f337f4337..3dc9914c1dce 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c | |||
@@ -251,6 +251,7 @@ static const struct snmp_mib snmp4_net_list[] = { | |||
251 | SNMP_MIB_ITEM("TCPSackShiftFallback", LINUX_MIB_SACKSHIFTFALLBACK), | 251 | SNMP_MIB_ITEM("TCPSackShiftFallback", LINUX_MIB_SACKSHIFTFALLBACK), |
252 | SNMP_MIB_ITEM("TCPBacklogDrop", LINUX_MIB_TCPBACKLOGDROP), | 252 | SNMP_MIB_ITEM("TCPBacklogDrop", LINUX_MIB_TCPBACKLOGDROP), |
253 | SNMP_MIB_ITEM("TCPMinTTLDrop", LINUX_MIB_TCPMINTTLDROP), | 253 | SNMP_MIB_ITEM("TCPMinTTLDrop", LINUX_MIB_TCPMINTTLDROP), |
254 | SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP), | ||
254 | SNMP_MIB_SENTINEL | 255 | SNMP_MIB_SENTINEL |
255 | }; | 256 | }; |
256 | 257 | ||
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index cb562fdd9b9a..a947428ef0ae 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -258,10 +258,9 @@ static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat); | |||
258 | (__raw_get_cpu_var(rt_cache_stat).field++) | 258 | (__raw_get_cpu_var(rt_cache_stat).field++) |
259 | 259 | ||
260 | static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx, | 260 | static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx, |
261 | int genid) | 261 | int genid) |
262 | { | 262 | { |
263 | return jhash_3words((__force u32)(__be32)(daddr), | 263 | return jhash_3words((__force u32)daddr, (__force u32)saddr, |
264 | (__force u32)(__be32)(saddr), | ||
265 | idx, genid) | 264 | idx, genid) |
266 | & rt_hash_mask; | 265 | & rt_hash_mask; |
267 | } | 266 | } |
@@ -378,12 +377,13 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v) | |||
378 | struct rtable *r = v; | 377 | struct rtable *r = v; |
379 | int len; | 378 | int len; |
380 | 379 | ||
381 | seq_printf(seq, "%s\t%08lX\t%08lX\t%8X\t%d\t%u\t%d\t" | 380 | seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t" |
382 | "%08lX\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n", | 381 | "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n", |
383 | r->u.dst.dev ? r->u.dst.dev->name : "*", | 382 | r->u.dst.dev ? r->u.dst.dev->name : "*", |
384 | (unsigned long)r->rt_dst, (unsigned long)r->rt_gateway, | 383 | (__force u32)r->rt_dst, |
384 | (__force u32)r->rt_gateway, | ||
385 | r->rt_flags, atomic_read(&r->u.dst.__refcnt), | 385 | r->rt_flags, atomic_read(&r->u.dst.__refcnt), |
386 | r->u.dst.__use, 0, (unsigned long)r->rt_src, | 386 | r->u.dst.__use, 0, (__force u32)r->rt_src, |
387 | (dst_metric(&r->u.dst, RTAX_ADVMSS) ? | 387 | (dst_metric(&r->u.dst, RTAX_ADVMSS) ? |
388 | (int)dst_metric(&r->u.dst, RTAX_ADVMSS) + 40 : 0), | 388 | (int)dst_metric(&r->u.dst, RTAX_ADVMSS) + 40 : 0), |
389 | dst_metric(&r->u.dst, RTAX_WINDOW), | 389 | dst_metric(&r->u.dst, RTAX_WINDOW), |
@@ -685,18 +685,17 @@ static inline bool rt_caching(const struct net *net) | |||
685 | static inline bool compare_hash_inputs(const struct flowi *fl1, | 685 | static inline bool compare_hash_inputs(const struct flowi *fl1, |
686 | const struct flowi *fl2) | 686 | const struct flowi *fl2) |
687 | { | 687 | { |
688 | return (__force u32)(((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) | | 688 | return ((((__force u32)fl1->nl_u.ip4_u.daddr ^ (__force u32)fl2->nl_u.ip4_u.daddr) | |
689 | (fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr) | | 689 | ((__force u32)fl1->nl_u.ip4_u.saddr ^ (__force u32)fl2->nl_u.ip4_u.saddr) | |
690 | (fl1->iif ^ fl2->iif)) == 0); | 690 | (fl1->iif ^ fl2->iif)) == 0); |
691 | } | 691 | } |
692 | 692 | ||
693 | static inline int compare_keys(struct flowi *fl1, struct flowi *fl2) | 693 | static inline int compare_keys(struct flowi *fl1, struct flowi *fl2) |
694 | { | 694 | { |
695 | return ((__force u32)((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) | | 695 | return (((__force u32)fl1->nl_u.ip4_u.daddr ^ (__force u32)fl2->nl_u.ip4_u.daddr) | |
696 | (fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr)) | | 696 | ((__force u32)fl1->nl_u.ip4_u.saddr ^ (__force u32)fl2->nl_u.ip4_u.saddr) | |
697 | (fl1->mark ^ fl2->mark) | | 697 | (fl1->mark ^ fl2->mark) | |
698 | (*(u16 *)&fl1->nl_u.ip4_u.tos ^ | 698 | (*(u16 *)&fl1->nl_u.ip4_u.tos ^ *(u16 *)&fl2->nl_u.ip4_u.tos) | |
699 | *(u16 *)&fl2->nl_u.ip4_u.tos) | | ||
700 | (fl1->oif ^ fl2->oif) | | 699 | (fl1->oif ^ fl2->oif) | |
701 | (fl1->iif ^ fl2->iif)) == 0; | 700 | (fl1->iif ^ fl2->iif)) == 0; |
702 | } | 701 | } |
@@ -2319,8 +2318,8 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
2319 | rcu_read_lock(); | 2318 | rcu_read_lock(); |
2320 | for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; | 2319 | for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; |
2321 | rth = rcu_dereference(rth->u.dst.rt_next)) { | 2320 | rth = rcu_dereference(rth->u.dst.rt_next)) { |
2322 | if (((rth->fl.fl4_dst ^ daddr) | | 2321 | if ((((__force u32)rth->fl.fl4_dst ^ (__force u32)daddr) | |
2323 | (rth->fl.fl4_src ^ saddr) | | 2322 | ((__force u32)rth->fl.fl4_src ^ (__force u32)saddr) | |
2324 | (rth->fl.iif ^ iif) | | 2323 | (rth->fl.iif ^ iif) | |
2325 | rth->fl.oif | | 2324 | rth->fl.oif | |
2326 | (rth->fl.fl4_tos ^ tos)) == 0 && | 2325 | (rth->fl.fl4_tos ^ tos)) == 0 && |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 0f8caf64caa3..6689c61cab47 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -378,7 +378,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) | |||
378 | struct sock *sk = sock->sk; | 378 | struct sock *sk = sock->sk; |
379 | struct tcp_sock *tp = tcp_sk(sk); | 379 | struct tcp_sock *tp = tcp_sk(sk); |
380 | 380 | ||
381 | sock_poll_wait(file, sk->sk_sleep, wait); | 381 | sock_poll_wait(file, sk_sleep(sk), wait); |
382 | if (sk->sk_state == TCP_LISTEN) | 382 | if (sk->sk_state == TCP_LISTEN) |
383 | return inet_csk_listen_poll(sk); | 383 | return inet_csk_listen_poll(sk); |
384 | 384 | ||
@@ -2721,7 +2721,7 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb) | |||
2721 | struct tcphdr *th2; | 2721 | struct tcphdr *th2; |
2722 | unsigned int len; | 2722 | unsigned int len; |
2723 | unsigned int thlen; | 2723 | unsigned int thlen; |
2724 | unsigned int flags; | 2724 | __be32 flags; |
2725 | unsigned int mss = 1; | 2725 | unsigned int mss = 1; |
2726 | unsigned int hlen; | 2726 | unsigned int hlen; |
2727 | unsigned int off; | 2727 | unsigned int off; |
@@ -2771,10 +2771,10 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb) | |||
2771 | 2771 | ||
2772 | found: | 2772 | found: |
2773 | flush = NAPI_GRO_CB(p)->flush; | 2773 | flush = NAPI_GRO_CB(p)->flush; |
2774 | flush |= flags & TCP_FLAG_CWR; | 2774 | flush |= (__force int)(flags & TCP_FLAG_CWR); |
2775 | flush |= (flags ^ tcp_flag_word(th2)) & | 2775 | flush |= (__force int)((flags ^ tcp_flag_word(th2)) & |
2776 | ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH); | 2776 | ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH)); |
2777 | flush |= th->ack_seq ^ th2->ack_seq; | 2777 | flush |= (__force int)(th->ack_seq ^ th2->ack_seq); |
2778 | for (i = sizeof(*th); i < thlen; i += 4) | 2778 | for (i = sizeof(*th); i < thlen; i += 4) |
2779 | flush |= *(u32 *)((u8 *)th + i) ^ | 2779 | flush |= *(u32 *)((u8 *)th + i) ^ |
2780 | *(u32 *)((u8 *)th2 + i); | 2780 | *(u32 *)((u8 *)th2 + i); |
@@ -2795,8 +2795,9 @@ found: | |||
2795 | 2795 | ||
2796 | out_check_final: | 2796 | out_check_final: |
2797 | flush = len < mss; | 2797 | flush = len < mss; |
2798 | flush |= flags & (TCP_FLAG_URG | TCP_FLAG_PSH | TCP_FLAG_RST | | 2798 | flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH | |
2799 | TCP_FLAG_SYN | TCP_FLAG_FIN); | 2799 | TCP_FLAG_RST | TCP_FLAG_SYN | |
2800 | TCP_FLAG_FIN)); | ||
2800 | 2801 | ||
2801 | if (p && (!NAPI_GRO_CB(skb)->same_flow || flush)) | 2802 | if (p && (!NAPI_GRO_CB(skb)->same_flow || flush)) |
2802 | pp = head; | 2803 | pp = head; |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index f240f57b2199..ae3ec15fb630 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -3710,7 +3710,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) | |||
3710 | } | 3710 | } |
3711 | 3711 | ||
3712 | if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) | 3712 | if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) |
3713 | dst_confirm(sk->sk_dst_cache); | 3713 | dst_confirm(__sk_dst_get(sk)); |
3714 | 3714 | ||
3715 | return 1; | 3715 | return 1; |
3716 | 3716 | ||
@@ -4319,7 +4319,7 @@ static void tcp_ofo_queue(struct sock *sk) | |||
4319 | } | 4319 | } |
4320 | 4320 | ||
4321 | if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { | 4321 | if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { |
4322 | SOCK_DEBUG(sk, "ofo packet was already received \n"); | 4322 | SOCK_DEBUG(sk, "ofo packet was already received\n"); |
4323 | __skb_unlink(skb, &tp->out_of_order_queue); | 4323 | __skb_unlink(skb, &tp->out_of_order_queue); |
4324 | __kfree_skb(skb); | 4324 | __kfree_skb(skb); |
4325 | continue; | 4325 | continue; |
@@ -5833,7 +5833,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
5833 | if (tp->snd_una == tp->write_seq) { | 5833 | if (tp->snd_una == tp->write_seq) { |
5834 | tcp_set_state(sk, TCP_FIN_WAIT2); | 5834 | tcp_set_state(sk, TCP_FIN_WAIT2); |
5835 | sk->sk_shutdown |= SEND_SHUTDOWN; | 5835 | sk->sk_shutdown |= SEND_SHUTDOWN; |
5836 | dst_confirm(sk->sk_dst_cache); | 5836 | dst_confirm(__sk_dst_get(sk)); |
5837 | 5837 | ||
5838 | if (!sock_flag(sk, SOCK_DEAD)) | 5838 | if (!sock_flag(sk, SOCK_DEAD)) |
5839 | /* Wake up lingering close() */ | 5839 | /* Wake up lingering close() */ |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 3c23e70885f4..4d6717d1e61c 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -519,26 +519,31 @@ out: | |||
519 | sock_put(sk); | 519 | sock_put(sk); |
520 | } | 520 | } |
521 | 521 | ||
522 | /* This routine computes an IPv4 TCP checksum. */ | 522 | static void __tcp_v4_send_check(struct sk_buff *skb, |
523 | void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb) | 523 | __be32 saddr, __be32 daddr) |
524 | { | 524 | { |
525 | struct inet_sock *inet = inet_sk(sk); | ||
526 | struct tcphdr *th = tcp_hdr(skb); | 525 | struct tcphdr *th = tcp_hdr(skb); |
527 | 526 | ||
528 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 527 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
529 | th->check = ~tcp_v4_check(len, inet->inet_saddr, | 528 | th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0); |
530 | inet->inet_daddr, 0); | ||
531 | skb->csum_start = skb_transport_header(skb) - skb->head; | 529 | skb->csum_start = skb_transport_header(skb) - skb->head; |
532 | skb->csum_offset = offsetof(struct tcphdr, check); | 530 | skb->csum_offset = offsetof(struct tcphdr, check); |
533 | } else { | 531 | } else { |
534 | th->check = tcp_v4_check(len, inet->inet_saddr, | 532 | th->check = tcp_v4_check(skb->len, saddr, daddr, |
535 | inet->inet_daddr, | ||
536 | csum_partial(th, | 533 | csum_partial(th, |
537 | th->doff << 2, | 534 | th->doff << 2, |
538 | skb->csum)); | 535 | skb->csum)); |
539 | } | 536 | } |
540 | } | 537 | } |
541 | 538 | ||
539 | /* This routine computes an IPv4 TCP checksum. */ | ||
540 | void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb) | ||
541 | { | ||
542 | struct inet_sock *inet = inet_sk(sk); | ||
543 | |||
544 | __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr); | ||
545 | } | ||
546 | |||
542 | int tcp_v4_gso_send_check(struct sk_buff *skb) | 547 | int tcp_v4_gso_send_check(struct sk_buff *skb) |
543 | { | 548 | { |
544 | const struct iphdr *iph; | 549 | const struct iphdr *iph; |
@@ -551,10 +556,8 @@ int tcp_v4_gso_send_check(struct sk_buff *skb) | |||
551 | th = tcp_hdr(skb); | 556 | th = tcp_hdr(skb); |
552 | 557 | ||
553 | th->check = 0; | 558 | th->check = 0; |
554 | th->check = ~tcp_v4_check(skb->len, iph->saddr, iph->daddr, 0); | ||
555 | skb->csum_start = skb_transport_header(skb) - skb->head; | ||
556 | skb->csum_offset = offsetof(struct tcphdr, check); | ||
557 | skb->ip_summed = CHECKSUM_PARTIAL; | 559 | skb->ip_summed = CHECKSUM_PARTIAL; |
560 | __tcp_v4_send_check(skb, iph->saddr, iph->daddr); | ||
558 | return 0; | 561 | return 0; |
559 | } | 562 | } |
560 | 563 | ||
@@ -763,13 +766,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, | |||
763 | skb = tcp_make_synack(sk, dst, req, rvp); | 766 | skb = tcp_make_synack(sk, dst, req, rvp); |
764 | 767 | ||
765 | if (skb) { | 768 | if (skb) { |
766 | struct tcphdr *th = tcp_hdr(skb); | 769 | __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr); |
767 | |||
768 | th->check = tcp_v4_check(skb->len, | ||
769 | ireq->loc_addr, | ||
770 | ireq->rmt_addr, | ||
771 | csum_partial(th, skb->len, | ||
772 | skb->csum)); | ||
773 | 770 | ||
774 | err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr, | 771 | err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr, |
775 | ireq->rmt_addr, | 772 | ireq->rmt_addr, |
@@ -1289,8 +1286,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1289 | goto drop_and_release; | 1286 | goto drop_and_release; |
1290 | 1287 | ||
1291 | /* Secret recipe starts with IP addresses */ | 1288 | /* Secret recipe starts with IP addresses */ |
1292 | *mess++ ^= daddr; | 1289 | *mess++ ^= (__force u32)daddr; |
1293 | *mess++ ^= saddr; | 1290 | *mess++ ^= (__force u32)saddr; |
1294 | 1291 | ||
1295 | /* plus variable length Initiator Cookie */ | 1292 | /* plus variable length Initiator Cookie */ |
1296 | c = (u8 *)mess; | 1293 | c = (u8 *)mess; |
@@ -1675,6 +1672,8 @@ process: | |||
1675 | 1672 | ||
1676 | skb->dev = NULL; | 1673 | skb->dev = NULL; |
1677 | 1674 | ||
1675 | inet_rps_save_rxhash(sk, skb->rxhash); | ||
1676 | |||
1678 | bh_lock_sock_nested(sk); | 1677 | bh_lock_sock_nested(sk); |
1679 | ret = 0; | 1678 | ret = 0; |
1680 | if (!sock_owned_by_user(sk)) { | 1679 | if (!sock_owned_by_user(sk)) { |
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 5fabff9ac6d6..794c2e122a41 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -672,6 +672,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, | |||
672 | if (req->retrans < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && | 672 | if (req->retrans < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && |
673 | TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { | 673 | TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { |
674 | inet_rsk(req)->acked = 1; | 674 | inet_rsk(req)->acked = 1; |
675 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP); | ||
675 | return NULL; | 676 | return NULL; |
676 | } | 677 | } |
677 | 678 | ||
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 0dda86e72ad8..5db3a2c6cb33 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -350,6 +350,7 @@ static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb, | |||
350 | */ | 350 | */ |
351 | static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags) | 351 | static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags) |
352 | { | 352 | { |
353 | skb->ip_summed = CHECKSUM_PARTIAL; | ||
353 | skb->csum = 0; | 354 | skb->csum = 0; |
354 | 355 | ||
355 | TCP_SKB_CB(skb)->flags = flags; | 356 | TCP_SKB_CB(skb)->flags = flags; |
@@ -860,7 +861,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, | |||
860 | th->urg_ptr = htons(tp->snd_up - tcb->seq); | 861 | th->urg_ptr = htons(tp->snd_up - tcb->seq); |
861 | th->urg = 1; | 862 | th->urg = 1; |
862 | } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) { | 863 | } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) { |
863 | th->urg_ptr = 0xFFFF; | 864 | th->urg_ptr = htons(0xFFFF); |
864 | th->urg = 1; | 865 | th->urg = 1; |
865 | } | 866 | } |
866 | } | 867 | } |
@@ -878,7 +879,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, | |||
878 | } | 879 | } |
879 | #endif | 880 | #endif |
880 | 881 | ||
881 | icsk->icsk_af_ops->send_check(sk, skb->len, skb); | 882 | icsk->icsk_af_ops->send_check(sk, skb); |
882 | 883 | ||
883 | if (likely(tcb->flags & TCPCB_FLAG_ACK)) | 884 | if (likely(tcb->flags & TCPCB_FLAG_ACK)) |
884 | tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); | 885 | tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); |
@@ -887,9 +888,10 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, | |||
887 | tcp_event_data_sent(tp, skb, sk); | 888 | tcp_event_data_sent(tp, skb, sk); |
888 | 889 | ||
889 | if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) | 890 | if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) |
890 | TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS); | 891 | TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, |
892 | tcp_skb_pcount(skb)); | ||
891 | 893 | ||
892 | err = icsk->icsk_af_ops->queue_xmit(skb, 0); | 894 | err = icsk->icsk_af_ops->queue_xmit(skb); |
893 | if (likely(err <= 0)) | 895 | if (likely(err <= 0)) |
894 | return err; | 896 | return err; |
895 | 897 | ||
@@ -2484,7 +2486,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |||
2484 | *tail-- ^= TCP_SKB_CB(skb)->seq + 1; | 2486 | *tail-- ^= TCP_SKB_CB(skb)->seq + 1; |
2485 | 2487 | ||
2486 | /* recommended */ | 2488 | /* recommended */ |
2487 | *tail-- ^= ((th->dest << 16) | th->source); | 2489 | *tail-- ^= (((__force u32)th->dest << 16) | (__force u32)th->source); |
2488 | *tail-- ^= (u32)(unsigned long)cvp; /* per sockopt */ | 2490 | *tail-- ^= (u32)(unsigned long)cvp; /* per sockopt */ |
2489 | 2491 | ||
2490 | sha_transform((__u32 *)&xvp->cookie_bakery[0], | 2492 | sha_transform((__u32 *)&xvp->cookie_bakery[0], |
@@ -2502,7 +2504,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |||
2502 | th->window = htons(min(req->rcv_wnd, 65535U)); | 2504 | th->window = htons(min(req->rcv_wnd, 65535U)); |
2503 | tcp_options_write((__be32 *)(th + 1), tp, &opts); | 2505 | tcp_options_write((__be32 *)(th + 1), tp, &opts); |
2504 | th->doff = (tcp_header_size >> 2); | 2506 | th->doff = (tcp_header_size >> 2); |
2505 | TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS); | 2507 | TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, tcp_skb_pcount(skb)); |
2506 | 2508 | ||
2507 | #ifdef CONFIG_TCP_MD5SIG | 2509 | #ifdef CONFIG_TCP_MD5SIG |
2508 | /* Okay, we have all we need - do the md5 hash if needed */ | 2510 | /* Okay, we have all we need - do the md5 hash if needed */ |
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 8a0ab2977f1f..c732be00606b 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c | |||
@@ -172,14 +172,14 @@ static int tcp_write_timeout(struct sock *sk) | |||
172 | 172 | ||
173 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { | 173 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { |
174 | if (icsk->icsk_retransmits) | 174 | if (icsk->icsk_retransmits) |
175 | dst_negative_advice(&sk->sk_dst_cache, sk); | 175 | dst_negative_advice(sk); |
176 | retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; | 176 | retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; |
177 | } else { | 177 | } else { |
178 | if (retransmits_timed_out(sk, sysctl_tcp_retries1)) { | 178 | if (retransmits_timed_out(sk, sysctl_tcp_retries1)) { |
179 | /* Black hole detection */ | 179 | /* Black hole detection */ |
180 | tcp_mtu_probing(icsk, sk); | 180 | tcp_mtu_probing(icsk, sk); |
181 | 181 | ||
182 | dst_negative_advice(&sk->sk_dst_cache, sk); | 182 | dst_negative_advice(sk); |
183 | } | 183 | } |
184 | 184 | ||
185 | retry_until = sysctl_tcp_retries2; | 185 | retry_until = sysctl_tcp_retries2; |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 8fef859db35d..1e18f9cc9247 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -307,13 +307,13 @@ static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2) | |||
307 | static unsigned int udp4_portaddr_hash(struct net *net, __be32 saddr, | 307 | static unsigned int udp4_portaddr_hash(struct net *net, __be32 saddr, |
308 | unsigned int port) | 308 | unsigned int port) |
309 | { | 309 | { |
310 | return jhash_1word(saddr, net_hash_mix(net)) ^ port; | 310 | return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port; |
311 | } | 311 | } |
312 | 312 | ||
313 | int udp_v4_get_port(struct sock *sk, unsigned short snum) | 313 | int udp_v4_get_port(struct sock *sk, unsigned short snum) |
314 | { | 314 | { |
315 | unsigned int hash2_nulladdr = | 315 | unsigned int hash2_nulladdr = |
316 | udp4_portaddr_hash(sock_net(sk), INADDR_ANY, snum); | 316 | udp4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum); |
317 | unsigned int hash2_partial = | 317 | unsigned int hash2_partial = |
318 | udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0); | 318 | udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0); |
319 | 319 | ||
@@ -466,14 +466,14 @@ static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, | |||
466 | daddr, hnum, dif, | 466 | daddr, hnum, dif, |
467 | hslot2, slot2); | 467 | hslot2, slot2); |
468 | if (!result) { | 468 | if (!result) { |
469 | hash2 = udp4_portaddr_hash(net, INADDR_ANY, hnum); | 469 | hash2 = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum); |
470 | slot2 = hash2 & udptable->mask; | 470 | slot2 = hash2 & udptable->mask; |
471 | hslot2 = &udptable->hash2[slot2]; | 471 | hslot2 = &udptable->hash2[slot2]; |
472 | if (hslot->count < hslot2->count) | 472 | if (hslot->count < hslot2->count) |
473 | goto begin; | 473 | goto begin; |
474 | 474 | ||
475 | result = udp4_lib_lookup2(net, saddr, sport, | 475 | result = udp4_lib_lookup2(net, saddr, sport, |
476 | INADDR_ANY, hnum, dif, | 476 | htonl(INADDR_ANY), hnum, dif, |
477 | hslot2, slot2); | 477 | hslot2, slot2); |
478 | } | 478 | } |
479 | rcu_read_unlock(); | 479 | rcu_read_unlock(); |
@@ -1217,6 +1217,7 @@ int udp_disconnect(struct sock *sk, int flags) | |||
1217 | sk->sk_state = TCP_CLOSE; | 1217 | sk->sk_state = TCP_CLOSE; |
1218 | inet->inet_daddr = 0; | 1218 | inet->inet_daddr = 0; |
1219 | inet->inet_dport = 0; | 1219 | inet->inet_dport = 0; |
1220 | inet_rps_save_rxhash(sk, 0); | ||
1220 | sk->sk_bound_dev_if = 0; | 1221 | sk->sk_bound_dev_if = 0; |
1221 | if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) | 1222 | if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) |
1222 | inet_reset_saddr(sk); | 1223 | inet_reset_saddr(sk); |
@@ -1258,8 +1259,12 @@ EXPORT_SYMBOL(udp_lib_unhash); | |||
1258 | 1259 | ||
1259 | static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | 1260 | static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) |
1260 | { | 1261 | { |
1261 | int rc = sock_queue_rcv_skb(sk, skb); | 1262 | int rc; |
1263 | |||
1264 | if (inet_sk(sk)->inet_daddr) | ||
1265 | inet_rps_save_rxhash(sk, skb->rxhash); | ||
1262 | 1266 | ||
1267 | rc = sock_queue_rcv_skb(sk, skb); | ||
1263 | if (rc < 0) { | 1268 | if (rc < 0) { |
1264 | int is_udplite = IS_UDPLITE(sk); | 1269 | int is_udplite = IS_UDPLITE(sk); |
1265 | 1270 | ||
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index e4a1483fba77..1705476670ef 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c | |||
@@ -59,27 +59,6 @@ static int xfrm4_get_saddr(struct net *net, | |||
59 | return 0; | 59 | return 0; |
60 | } | 60 | } |
61 | 61 | ||
62 | static struct dst_entry * | ||
63 | __xfrm4_find_bundle(struct flowi *fl, struct xfrm_policy *policy) | ||
64 | { | ||
65 | struct dst_entry *dst; | ||
66 | |||
67 | read_lock_bh(&policy->lock); | ||
68 | for (dst = policy->bundles; dst; dst = dst->next) { | ||
69 | struct xfrm_dst *xdst = (struct xfrm_dst *)dst; | ||
70 | if (xdst->u.rt.fl.oif == fl->oif && /*XXX*/ | ||
71 | xdst->u.rt.fl.fl4_dst == fl->fl4_dst && | ||
72 | xdst->u.rt.fl.fl4_src == fl->fl4_src && | ||
73 | xdst->u.rt.fl.fl4_tos == fl->fl4_tos && | ||
74 | xfrm_bundle_ok(policy, xdst, fl, AF_INET, 0)) { | ||
75 | dst_clone(dst); | ||
76 | break; | ||
77 | } | ||
78 | } | ||
79 | read_unlock_bh(&policy->lock); | ||
80 | return dst; | ||
81 | } | ||
82 | |||
83 | static int xfrm4_get_tos(struct flowi *fl) | 62 | static int xfrm4_get_tos(struct flowi *fl) |
84 | { | 63 | { |
85 | return fl->fl4_tos; | 64 | return fl->fl4_tos; |
@@ -259,7 +238,6 @@ static struct xfrm_policy_afinfo xfrm4_policy_afinfo = { | |||
259 | .dst_ops = &xfrm4_dst_ops, | 238 | .dst_ops = &xfrm4_dst_ops, |
260 | .dst_lookup = xfrm4_dst_lookup, | 239 | .dst_lookup = xfrm4_dst_lookup, |
261 | .get_saddr = xfrm4_get_saddr, | 240 | .get_saddr = xfrm4_get_saddr, |
262 | .find_bundle = __xfrm4_find_bundle, | ||
263 | .decode_session = _decode_session4, | 241 | .decode_session = _decode_session4, |
264 | .get_tos = xfrm4_get_tos, | 242 | .get_tos = xfrm4_get_tos, |
265 | .init_path = xfrm4_init_path, | 243 | .init_path = xfrm4_init_path, |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 413054f02aab..34d2d649e396 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -82,7 +82,7 @@ | |||
82 | #include <linux/random.h> | 82 | #include <linux/random.h> |
83 | #endif | 83 | #endif |
84 | 84 | ||
85 | #include <asm/uaccess.h> | 85 | #include <linux/uaccess.h> |
86 | #include <asm/unaligned.h> | 86 | #include <asm/unaligned.h> |
87 | 87 | ||
88 | #include <linux/proc_fs.h> | 88 | #include <linux/proc_fs.h> |
@@ -98,7 +98,11 @@ | |||
98 | #endif | 98 | #endif |
99 | 99 | ||
100 | #define INFINITY_LIFE_TIME 0xFFFFFFFF | 100 | #define INFINITY_LIFE_TIME 0xFFFFFFFF |
101 | #define TIME_DELTA(a,b) ((unsigned long)((long)(a) - (long)(b))) | 101 | #define TIME_DELTA(a, b) ((unsigned long)((long)(a) - (long)(b))) |
102 | |||
103 | #define ADDRCONF_TIMER_FUZZ_MINUS (HZ > 50 ? HZ/50 : 1) | ||
104 | #define ADDRCONF_TIMER_FUZZ (HZ / 4) | ||
105 | #define ADDRCONF_TIMER_FUZZ_MAX (HZ) | ||
102 | 106 | ||
103 | #ifdef CONFIG_SYSCTL | 107 | #ifdef CONFIG_SYSCTL |
104 | static void addrconf_sysctl_register(struct inet6_dev *idev); | 108 | static void addrconf_sysctl_register(struct inet6_dev *idev); |
@@ -127,8 +131,8 @@ static int ipv6_count_addresses(struct inet6_dev *idev); | |||
127 | /* | 131 | /* |
128 | * Configured unicast address hash table | 132 | * Configured unicast address hash table |
129 | */ | 133 | */ |
130 | static struct inet6_ifaddr *inet6_addr_lst[IN6_ADDR_HSIZE]; | 134 | static struct hlist_head inet6_addr_lst[IN6_ADDR_HSIZE]; |
131 | static DEFINE_RWLOCK(addrconf_hash_lock); | 135 | static DEFINE_SPINLOCK(addrconf_hash_lock); |
132 | 136 | ||
133 | static void addrconf_verify(unsigned long); | 137 | static void addrconf_verify(unsigned long); |
134 | 138 | ||
@@ -138,8 +142,8 @@ static DEFINE_SPINLOCK(addrconf_verify_lock); | |||
138 | static void addrconf_join_anycast(struct inet6_ifaddr *ifp); | 142 | static void addrconf_join_anycast(struct inet6_ifaddr *ifp); |
139 | static void addrconf_leave_anycast(struct inet6_ifaddr *ifp); | 143 | static void addrconf_leave_anycast(struct inet6_ifaddr *ifp); |
140 | 144 | ||
141 | static void addrconf_bonding_change(struct net_device *dev, | 145 | static void addrconf_type_change(struct net_device *dev, |
142 | unsigned long event); | 146 | unsigned long event); |
143 | static int addrconf_ifdown(struct net_device *dev, int how); | 147 | static int addrconf_ifdown(struct net_device *dev, int how); |
144 | 148 | ||
145 | static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags); | 149 | static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags); |
@@ -152,8 +156,8 @@ static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa); | |||
152 | 156 | ||
153 | static void inet6_prefix_notify(int event, struct inet6_dev *idev, | 157 | static void inet6_prefix_notify(int event, struct inet6_dev *idev, |
154 | struct prefix_info *pinfo); | 158 | struct prefix_info *pinfo); |
155 | static int ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr, | 159 | static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr, |
156 | struct net_device *dev); | 160 | struct net_device *dev); |
157 | 161 | ||
158 | static ATOMIC_NOTIFIER_HEAD(inet6addr_chain); | 162 | static ATOMIC_NOTIFIER_HEAD(inet6addr_chain); |
159 | 163 | ||
@@ -250,8 +254,7 @@ static void addrconf_del_timer(struct inet6_ifaddr *ifp) | |||
250 | __in6_ifa_put(ifp); | 254 | __in6_ifa_put(ifp); |
251 | } | 255 | } |
252 | 256 | ||
253 | enum addrconf_timer_t | 257 | enum addrconf_timer_t { |
254 | { | ||
255 | AC_NONE, | 258 | AC_NONE, |
256 | AC_DAD, | 259 | AC_DAD, |
257 | AC_RS, | 260 | AC_RS, |
@@ -271,7 +274,8 @@ static void addrconf_mod_timer(struct inet6_ifaddr *ifp, | |||
271 | case AC_RS: | 274 | case AC_RS: |
272 | ifp->timer.function = addrconf_rs_timer; | 275 | ifp->timer.function = addrconf_rs_timer; |
273 | break; | 276 | break; |
274 | default:; | 277 | default: |
278 | break; | ||
275 | } | 279 | } |
276 | ifp->timer.expires = jiffies + when; | 280 | ifp->timer.expires = jiffies + when; |
277 | add_timer(&ifp->timer); | 281 | add_timer(&ifp->timer); |
@@ -318,7 +322,7 @@ void in6_dev_finish_destroy(struct inet6_dev *idev) | |||
318 | { | 322 | { |
319 | struct net_device *dev = idev->dev; | 323 | struct net_device *dev = idev->dev; |
320 | 324 | ||
321 | WARN_ON(idev->addr_list != NULL); | 325 | WARN_ON(!list_empty(&idev->addr_list)); |
322 | WARN_ON(idev->mc_list != NULL); | 326 | WARN_ON(idev->mc_list != NULL); |
323 | 327 | ||
324 | #ifdef NET_REFCNT_DEBUG | 328 | #ifdef NET_REFCNT_DEBUG |
@@ -326,7 +330,7 @@ void in6_dev_finish_destroy(struct inet6_dev *idev) | |||
326 | #endif | 330 | #endif |
327 | dev_put(dev); | 331 | dev_put(dev); |
328 | if (!idev->dead) { | 332 | if (!idev->dead) { |
329 | printk("Freeing alive inet6 device %p\n", idev); | 333 | pr_warning("Freeing alive inet6 device %p\n", idev); |
330 | return; | 334 | return; |
331 | } | 335 | } |
332 | snmp6_free_dev(idev); | 336 | snmp6_free_dev(idev); |
@@ -351,6 +355,8 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev) | |||
351 | 355 | ||
352 | rwlock_init(&ndev->lock); | 356 | rwlock_init(&ndev->lock); |
353 | ndev->dev = dev; | 357 | ndev->dev = dev; |
358 | INIT_LIST_HEAD(&ndev->addr_list); | ||
359 | |||
354 | memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf)); | 360 | memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf)); |
355 | ndev->cnf.mtu6 = dev->mtu; | 361 | ndev->cnf.mtu6 = dev->mtu; |
356 | ndev->cnf.sysctl = NULL; | 362 | ndev->cnf.sysctl = NULL; |
@@ -402,6 +408,7 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev) | |||
402 | #endif | 408 | #endif |
403 | 409 | ||
404 | #ifdef CONFIG_IPV6_PRIVACY | 410 | #ifdef CONFIG_IPV6_PRIVACY |
411 | INIT_LIST_HEAD(&ndev->tempaddr_list); | ||
405 | setup_timer(&ndev->regen_timer, ipv6_regen_rndid, (unsigned long)ndev); | 412 | setup_timer(&ndev->regen_timer, ipv6_regen_rndid, (unsigned long)ndev); |
406 | if ((dev->flags&IFF_LOOPBACK) || | 413 | if ((dev->flags&IFF_LOOPBACK) || |
407 | dev->type == ARPHRD_TUNNEL || | 414 | dev->type == ARPHRD_TUNNEL || |
@@ -439,8 +446,10 @@ static struct inet6_dev * ipv6_find_idev(struct net_device *dev) | |||
439 | 446 | ||
440 | ASSERT_RTNL(); | 447 | ASSERT_RTNL(); |
441 | 448 | ||
442 | if ((idev = __in6_dev_get(dev)) == NULL) { | 449 | idev = __in6_dev_get(dev); |
443 | if ((idev = ipv6_add_dev(dev)) == NULL) | 450 | if (!idev) { |
451 | idev = ipv6_add_dev(dev); | ||
452 | if (!idev) | ||
444 | return NULL; | 453 | return NULL; |
445 | } | 454 | } |
446 | 455 | ||
@@ -466,7 +475,8 @@ static void dev_forward_change(struct inet6_dev *idev) | |||
466 | else | 475 | else |
467 | ipv6_dev_mc_dec(dev, &in6addr_linklocal_allrouters); | 476 | ipv6_dev_mc_dec(dev, &in6addr_linklocal_allrouters); |
468 | } | 477 | } |
469 | for (ifa=idev->addr_list; ifa; ifa=ifa->if_next) { | 478 | |
479 | list_for_each_entry(ifa, &idev->addr_list, if_list) { | ||
470 | if (ifa->flags&IFA_F_TENTATIVE) | 480 | if (ifa->flags&IFA_F_TENTATIVE) |
471 | continue; | 481 | continue; |
472 | if (idev->cnf.forwarding) | 482 | if (idev->cnf.forwarding) |
@@ -523,12 +533,16 @@ static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old) | |||
523 | } | 533 | } |
524 | #endif | 534 | #endif |
525 | 535 | ||
526 | /* Nobody refers to this ifaddr, destroy it */ | 536 | static void inet6_ifa_finish_destroy_rcu(struct rcu_head *head) |
537 | { | ||
538 | struct inet6_ifaddr *ifp = container_of(head, struct inet6_ifaddr, rcu); | ||
539 | kfree(ifp); | ||
540 | } | ||
527 | 541 | ||
542 | /* Nobody refers to this ifaddr, destroy it */ | ||
528 | void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp) | 543 | void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp) |
529 | { | 544 | { |
530 | WARN_ON(ifp->if_next != NULL); | 545 | WARN_ON(!hlist_unhashed(&ifp->addr_lst)); |
531 | WARN_ON(ifp->lst_next != NULL); | ||
532 | 546 | ||
533 | #ifdef NET_REFCNT_DEBUG | 547 | #ifdef NET_REFCNT_DEBUG |
534 | printk(KERN_DEBUG "inet6_ifa_finish_destroy\n"); | 548 | printk(KERN_DEBUG "inet6_ifa_finish_destroy\n"); |
@@ -537,54 +551,46 @@ void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp) | |||
537 | in6_dev_put(ifp->idev); | 551 | in6_dev_put(ifp->idev); |
538 | 552 | ||
539 | if (del_timer(&ifp->timer)) | 553 | if (del_timer(&ifp->timer)) |
540 | printk("Timer is still running, when freeing ifa=%p\n", ifp); | 554 | pr_notice("Timer is still running, when freeing ifa=%p\n", ifp); |
541 | 555 | ||
542 | if (!ifp->dead) { | 556 | if (!ifp->dead) { |
543 | printk("Freeing alive inet6 address %p\n", ifp); | 557 | pr_warning("Freeing alive inet6 address %p\n", ifp); |
544 | return; | 558 | return; |
545 | } | 559 | } |
546 | dst_release(&ifp->rt->u.dst); | 560 | dst_release(&ifp->rt->u.dst); |
547 | 561 | ||
548 | kfree(ifp); | 562 | call_rcu(&ifp->rcu, inet6_ifa_finish_destroy_rcu); |
549 | } | 563 | } |
550 | 564 | ||
551 | static void | 565 | static void |
552 | ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp) | 566 | ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp) |
553 | { | 567 | { |
554 | struct inet6_ifaddr *ifa, **ifap; | 568 | struct list_head *p; |
555 | int ifp_scope = ipv6_addr_src_scope(&ifp->addr); | 569 | int ifp_scope = ipv6_addr_src_scope(&ifp->addr); |
556 | 570 | ||
557 | /* | 571 | /* |
558 | * Each device address list is sorted in order of scope - | 572 | * Each device address list is sorted in order of scope - |
559 | * global before linklocal. | 573 | * global before linklocal. |
560 | */ | 574 | */ |
561 | for (ifap = &idev->addr_list; (ifa = *ifap) != NULL; | 575 | list_for_each(p, &idev->addr_list) { |
562 | ifap = &ifa->if_next) { | 576 | struct inet6_ifaddr *ifa |
577 | = list_entry(p, struct inet6_ifaddr, if_list); | ||
563 | if (ifp_scope >= ipv6_addr_src_scope(&ifa->addr)) | 578 | if (ifp_scope >= ipv6_addr_src_scope(&ifa->addr)) |
564 | break; | 579 | break; |
565 | } | 580 | } |
566 | 581 | ||
567 | ifp->if_next = *ifap; | 582 | list_add_tail(&ifp->if_list, p); |
568 | *ifap = ifp; | ||
569 | } | 583 | } |
570 | 584 | ||
571 | /* | 585 | static u32 ipv6_addr_hash(const struct in6_addr *addr) |
572 | * Hash function taken from net_alias.c | ||
573 | */ | ||
574 | static u8 ipv6_addr_hash(const struct in6_addr *addr) | ||
575 | { | 586 | { |
576 | __u32 word; | ||
577 | |||
578 | /* | 587 | /* |
579 | * We perform the hash function over the last 64 bits of the address | 588 | * We perform the hash function over the last 64 bits of the address |
580 | * This will include the IEEE address token on links that support it. | 589 | * This will include the IEEE address token on links that support it. |
581 | */ | 590 | */ |
582 | 591 | return jhash_2words((__force u32)addr->s6_addr32[2], | |
583 | word = (__force u32)(addr->s6_addr32[2] ^ addr->s6_addr32[3]); | 592 | (__force u32)addr->s6_addr32[3], 0) |
584 | word ^= (word >> 16); | 593 | & (IN6_ADDR_HSIZE - 1); |
585 | word ^= (word >> 8); | ||
586 | |||
587 | return ((word ^ (word >> 4)) & 0x0f); | ||
588 | } | 594 | } |
589 | 595 | ||
590 | /* On success it returns ifp with increased reference count */ | 596 | /* On success it returns ifp with increased reference count */ |
@@ -595,7 +601,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen, | |||
595 | { | 601 | { |
596 | struct inet6_ifaddr *ifa = NULL; | 602 | struct inet6_ifaddr *ifa = NULL; |
597 | struct rt6_info *rt; | 603 | struct rt6_info *rt; |
598 | int hash; | 604 | unsigned int hash; |
599 | int err = 0; | 605 | int err = 0; |
600 | int addr_type = ipv6_addr_type(addr); | 606 | int addr_type = ipv6_addr_type(addr); |
601 | 607 | ||
@@ -616,7 +622,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen, | |||
616 | goto out2; | 622 | goto out2; |
617 | } | 623 | } |
618 | 624 | ||
619 | write_lock(&addrconf_hash_lock); | 625 | spin_lock(&addrconf_hash_lock); |
620 | 626 | ||
621 | /* Ignore adding duplicate addresses on an interface */ | 627 | /* Ignore adding duplicate addresses on an interface */ |
622 | if (ipv6_chk_same_addr(dev_net(idev->dev), addr, idev->dev)) { | 628 | if (ipv6_chk_same_addr(dev_net(idev->dev), addr, idev->dev)) { |
@@ -643,6 +649,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen, | |||
643 | 649 | ||
644 | spin_lock_init(&ifa->lock); | 650 | spin_lock_init(&ifa->lock); |
645 | init_timer(&ifa->timer); | 651 | init_timer(&ifa->timer); |
652 | INIT_HLIST_NODE(&ifa->addr_lst); | ||
646 | ifa->timer.data = (unsigned long) ifa; | 653 | ifa->timer.data = (unsigned long) ifa; |
647 | ifa->scope = scope; | 654 | ifa->scope = scope; |
648 | ifa->prefix_len = pfxlen; | 655 | ifa->prefix_len = pfxlen; |
@@ -669,10 +676,8 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen, | |||
669 | /* Add to big hash table */ | 676 | /* Add to big hash table */ |
670 | hash = ipv6_addr_hash(addr); | 677 | hash = ipv6_addr_hash(addr); |
671 | 678 | ||
672 | ifa->lst_next = inet6_addr_lst[hash]; | 679 | hlist_add_head_rcu(&ifa->addr_lst, &inet6_addr_lst[hash]); |
673 | inet6_addr_lst[hash] = ifa; | 680 | spin_unlock(&addrconf_hash_lock); |
674 | in6_ifa_hold(ifa); | ||
675 | write_unlock(&addrconf_hash_lock); | ||
676 | 681 | ||
677 | write_lock(&idev->lock); | 682 | write_lock(&idev->lock); |
678 | /* Add to inet6_dev unicast addr list. */ | 683 | /* Add to inet6_dev unicast addr list. */ |
@@ -680,8 +685,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen, | |||
680 | 685 | ||
681 | #ifdef CONFIG_IPV6_PRIVACY | 686 | #ifdef CONFIG_IPV6_PRIVACY |
682 | if (ifa->flags&IFA_F_TEMPORARY) { | 687 | if (ifa->flags&IFA_F_TEMPORARY) { |
683 | ifa->tmp_next = idev->tempaddr_list; | 688 | list_add(&ifa->tmp_list, &idev->tempaddr_list); |
684 | idev->tempaddr_list = ifa; | ||
685 | in6_ifa_hold(ifa); | 689 | in6_ifa_hold(ifa); |
686 | } | 690 | } |
687 | #endif | 691 | #endif |
@@ -700,7 +704,7 @@ out2: | |||
700 | 704 | ||
701 | return ifa; | 705 | return ifa; |
702 | out: | 706 | out: |
703 | write_unlock(&addrconf_hash_lock); | 707 | spin_unlock(&addrconf_hash_lock); |
704 | goto out2; | 708 | goto out2; |
705 | } | 709 | } |
706 | 710 | ||
@@ -708,7 +712,7 @@ out: | |||
708 | 712 | ||
709 | static void ipv6_del_addr(struct inet6_ifaddr *ifp) | 713 | static void ipv6_del_addr(struct inet6_ifaddr *ifp) |
710 | { | 714 | { |
711 | struct inet6_ifaddr *ifa, **ifap; | 715 | struct inet6_ifaddr *ifa, *ifn; |
712 | struct inet6_dev *idev = ifp->idev; | 716 | struct inet6_dev *idev = ifp->idev; |
713 | int hash; | 717 | int hash; |
714 | int deleted = 0, onlink = 0; | 718 | int deleted = 0, onlink = 0; |
@@ -718,42 +722,27 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp) | |||
718 | 722 | ||
719 | ifp->dead = 1; | 723 | ifp->dead = 1; |
720 | 724 | ||
721 | write_lock_bh(&addrconf_hash_lock); | 725 | spin_lock_bh(&addrconf_hash_lock); |
722 | for (ifap = &inet6_addr_lst[hash]; (ifa=*ifap) != NULL; | 726 | hlist_del_init_rcu(&ifp->addr_lst); |
723 | ifap = &ifa->lst_next) { | 727 | spin_unlock_bh(&addrconf_hash_lock); |
724 | if (ifa == ifp) { | ||
725 | *ifap = ifa->lst_next; | ||
726 | __in6_ifa_put(ifp); | ||
727 | ifa->lst_next = NULL; | ||
728 | break; | ||
729 | } | ||
730 | } | ||
731 | write_unlock_bh(&addrconf_hash_lock); | ||
732 | 728 | ||
733 | write_lock_bh(&idev->lock); | 729 | write_lock_bh(&idev->lock); |
734 | #ifdef CONFIG_IPV6_PRIVACY | 730 | #ifdef CONFIG_IPV6_PRIVACY |
735 | if (ifp->flags&IFA_F_TEMPORARY) { | 731 | if (ifp->flags&IFA_F_TEMPORARY) { |
736 | for (ifap = &idev->tempaddr_list; (ifa=*ifap) != NULL; | 732 | list_del(&ifp->tmp_list); |
737 | ifap = &ifa->tmp_next) { | 733 | if (ifp->ifpub) { |
738 | if (ifa == ifp) { | 734 | in6_ifa_put(ifp->ifpub); |
739 | *ifap = ifa->tmp_next; | 735 | ifp->ifpub = NULL; |
740 | if (ifp->ifpub) { | ||
741 | in6_ifa_put(ifp->ifpub); | ||
742 | ifp->ifpub = NULL; | ||
743 | } | ||
744 | __in6_ifa_put(ifp); | ||
745 | ifa->tmp_next = NULL; | ||
746 | break; | ||
747 | } | ||
748 | } | 736 | } |
737 | __in6_ifa_put(ifp); | ||
749 | } | 738 | } |
750 | #endif | 739 | #endif |
751 | 740 | ||
752 | for (ifap = &idev->addr_list; (ifa=*ifap) != NULL;) { | 741 | list_for_each_entry_safe(ifa, ifn, &idev->addr_list, if_list) { |
753 | if (ifa == ifp) { | 742 | if (ifa == ifp) { |
754 | *ifap = ifa->if_next; | 743 | list_del_init(&ifp->if_list); |
755 | __in6_ifa_put(ifp); | 744 | __in6_ifa_put(ifp); |
756 | ifa->if_next = NULL; | 745 | |
757 | if (!(ifp->flags & IFA_F_PERMANENT) || onlink > 0) | 746 | if (!(ifp->flags & IFA_F_PERMANENT) || onlink > 0) |
758 | break; | 747 | break; |
759 | deleted = 1; | 748 | deleted = 1; |
@@ -786,7 +775,6 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp) | |||
786 | } | 775 | } |
787 | } | 776 | } |
788 | } | 777 | } |
789 | ifap = &ifa->if_next; | ||
790 | } | 778 | } |
791 | write_unlock_bh(&idev->lock); | 779 | write_unlock_bh(&idev->lock); |
792 | 780 | ||
@@ -1165,7 +1153,7 @@ int ipv6_dev_get_saddr(struct net *net, struct net_device *dst_dev, | |||
1165 | continue; | 1153 | continue; |
1166 | 1154 | ||
1167 | read_lock_bh(&idev->lock); | 1155 | read_lock_bh(&idev->lock); |
1168 | for (score->ifa = idev->addr_list; score->ifa; score->ifa = score->ifa->if_next) { | 1156 | list_for_each_entry(score->ifa, &idev->addr_list, if_list) { |
1169 | int i; | 1157 | int i; |
1170 | 1158 | ||
1171 | /* | 1159 | /* |
@@ -1243,7 +1231,6 @@ try_nextdev: | |||
1243 | in6_ifa_put(hiscore->ifa); | 1231 | in6_ifa_put(hiscore->ifa); |
1244 | return 0; | 1232 | return 0; |
1245 | } | 1233 | } |
1246 | |||
1247 | EXPORT_SYMBOL(ipv6_dev_get_saddr); | 1234 | EXPORT_SYMBOL(ipv6_dev_get_saddr); |
1248 | 1235 | ||
1249 | int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr, | 1236 | int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr, |
@@ -1253,12 +1240,14 @@ int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr, | |||
1253 | int err = -EADDRNOTAVAIL; | 1240 | int err = -EADDRNOTAVAIL; |
1254 | 1241 | ||
1255 | rcu_read_lock(); | 1242 | rcu_read_lock(); |
1256 | if ((idev = __in6_dev_get(dev)) != NULL) { | 1243 | idev = __in6_dev_get(dev); |
1244 | if (idev) { | ||
1257 | struct inet6_ifaddr *ifp; | 1245 | struct inet6_ifaddr *ifp; |
1258 | 1246 | ||
1259 | read_lock_bh(&idev->lock); | 1247 | read_lock_bh(&idev->lock); |
1260 | for (ifp=idev->addr_list; ifp; ifp=ifp->if_next) { | 1248 | list_for_each_entry(ifp, &idev->addr_list, if_list) { |
1261 | if (ifp->scope == IFA_LINK && !(ifp->flags & banned_flags)) { | 1249 | if (ifp->scope == IFA_LINK && |
1250 | !(ifp->flags & banned_flags)) { | ||
1262 | ipv6_addr_copy(addr, &ifp->addr); | 1251 | ipv6_addr_copy(addr, &ifp->addr); |
1263 | err = 0; | 1252 | err = 0; |
1264 | break; | 1253 | break; |
@@ -1276,7 +1265,7 @@ static int ipv6_count_addresses(struct inet6_dev *idev) | |||
1276 | struct inet6_ifaddr *ifp; | 1265 | struct inet6_ifaddr *ifp; |
1277 | 1266 | ||
1278 | read_lock_bh(&idev->lock); | 1267 | read_lock_bh(&idev->lock); |
1279 | for (ifp=idev->addr_list; ifp; ifp=ifp->if_next) | 1268 | list_for_each_entry(ifp, &idev->addr_list, if_list) |
1280 | cnt++; | 1269 | cnt++; |
1281 | read_unlock_bh(&idev->lock); | 1270 | read_unlock_bh(&idev->lock); |
1282 | return cnt; | 1271 | return cnt; |
@@ -1285,11 +1274,12 @@ static int ipv6_count_addresses(struct inet6_dev *idev) | |||
1285 | int ipv6_chk_addr(struct net *net, struct in6_addr *addr, | 1274 | int ipv6_chk_addr(struct net *net, struct in6_addr *addr, |
1286 | struct net_device *dev, int strict) | 1275 | struct net_device *dev, int strict) |
1287 | { | 1276 | { |
1288 | struct inet6_ifaddr * ifp; | 1277 | struct inet6_ifaddr *ifp = NULL; |
1289 | u8 hash = ipv6_addr_hash(addr); | 1278 | struct hlist_node *node; |
1279 | unsigned int hash = ipv6_addr_hash(addr); | ||
1290 | 1280 | ||
1291 | read_lock_bh(&addrconf_hash_lock); | 1281 | rcu_read_lock_bh(); |
1292 | for(ifp = inet6_addr_lst[hash]; ifp; ifp=ifp->lst_next) { | 1282 | hlist_for_each_entry_rcu(ifp, node, &inet6_addr_lst[hash], addr_lst) { |
1293 | if (!net_eq(dev_net(ifp->idev->dev), net)) | 1283 | if (!net_eq(dev_net(ifp->idev->dev), net)) |
1294 | continue; | 1284 | continue; |
1295 | if (ipv6_addr_equal(&ifp->addr, addr) && | 1285 | if (ipv6_addr_equal(&ifp->addr, addr) && |
@@ -1299,27 +1289,28 @@ int ipv6_chk_addr(struct net *net, struct in6_addr *addr, | |||
1299 | break; | 1289 | break; |
1300 | } | 1290 | } |
1301 | } | 1291 | } |
1302 | read_unlock_bh(&addrconf_hash_lock); | 1292 | rcu_read_unlock_bh(); |
1293 | |||
1303 | return ifp != NULL; | 1294 | return ifp != NULL; |
1304 | } | 1295 | } |
1305 | EXPORT_SYMBOL(ipv6_chk_addr); | 1296 | EXPORT_SYMBOL(ipv6_chk_addr); |
1306 | 1297 | ||
1307 | static | 1298 | static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr, |
1308 | int ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr, | 1299 | struct net_device *dev) |
1309 | struct net_device *dev) | ||
1310 | { | 1300 | { |
1311 | struct inet6_ifaddr * ifp; | 1301 | unsigned int hash = ipv6_addr_hash(addr); |
1312 | u8 hash = ipv6_addr_hash(addr); | 1302 | struct inet6_ifaddr *ifp; |
1303 | struct hlist_node *node; | ||
1313 | 1304 | ||
1314 | for(ifp = inet6_addr_lst[hash]; ifp; ifp=ifp->lst_next) { | 1305 | hlist_for_each_entry(ifp, node, &inet6_addr_lst[hash], addr_lst) { |
1315 | if (!net_eq(dev_net(ifp->idev->dev), net)) | 1306 | if (!net_eq(dev_net(ifp->idev->dev), net)) |
1316 | continue; | 1307 | continue; |
1317 | if (ipv6_addr_equal(&ifp->addr, addr)) { | 1308 | if (ipv6_addr_equal(&ifp->addr, addr)) { |
1318 | if (dev == NULL || ifp->idev->dev == dev) | 1309 | if (dev == NULL || ifp->idev->dev == dev) |
1319 | break; | 1310 | return true; |
1320 | } | 1311 | } |
1321 | } | 1312 | } |
1322 | return ifp != NULL; | 1313 | return false; |
1323 | } | 1314 | } |
1324 | 1315 | ||
1325 | int ipv6_chk_prefix(struct in6_addr *addr, struct net_device *dev) | 1316 | int ipv6_chk_prefix(struct in6_addr *addr, struct net_device *dev) |
@@ -1333,7 +1324,7 @@ int ipv6_chk_prefix(struct in6_addr *addr, struct net_device *dev) | |||
1333 | idev = __in6_dev_get(dev); | 1324 | idev = __in6_dev_get(dev); |
1334 | if (idev) { | 1325 | if (idev) { |
1335 | read_lock_bh(&idev->lock); | 1326 | read_lock_bh(&idev->lock); |
1336 | for (ifa = idev->addr_list; ifa; ifa = ifa->if_next) { | 1327 | list_for_each_entry(ifa, &idev->addr_list, if_list) { |
1337 | onlink = ipv6_prefix_equal(addr, &ifa->addr, | 1328 | onlink = ipv6_prefix_equal(addr, &ifa->addr, |
1338 | ifa->prefix_len); | 1329 | ifa->prefix_len); |
1339 | if (onlink) | 1330 | if (onlink) |
@@ -1350,24 +1341,26 @@ EXPORT_SYMBOL(ipv6_chk_prefix); | |||
1350 | struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr, | 1341 | struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr, |
1351 | struct net_device *dev, int strict) | 1342 | struct net_device *dev, int strict) |
1352 | { | 1343 | { |
1353 | struct inet6_ifaddr * ifp; | 1344 | struct inet6_ifaddr *ifp, *result = NULL; |
1354 | u8 hash = ipv6_addr_hash(addr); | 1345 | unsigned int hash = ipv6_addr_hash(addr); |
1346 | struct hlist_node *node; | ||
1355 | 1347 | ||
1356 | read_lock_bh(&addrconf_hash_lock); | 1348 | rcu_read_lock_bh(); |
1357 | for(ifp = inet6_addr_lst[hash]; ifp; ifp=ifp->lst_next) { | 1349 | hlist_for_each_entry_rcu(ifp, node, &inet6_addr_lst[hash], addr_lst) { |
1358 | if (!net_eq(dev_net(ifp->idev->dev), net)) | 1350 | if (!net_eq(dev_net(ifp->idev->dev), net)) |
1359 | continue; | 1351 | continue; |
1360 | if (ipv6_addr_equal(&ifp->addr, addr)) { | 1352 | if (ipv6_addr_equal(&ifp->addr, addr)) { |
1361 | if (dev == NULL || ifp->idev->dev == dev || | 1353 | if (dev == NULL || ifp->idev->dev == dev || |
1362 | !(ifp->scope&(IFA_LINK|IFA_HOST) || strict)) { | 1354 | !(ifp->scope&(IFA_LINK|IFA_HOST) || strict)) { |
1355 | result = ifp; | ||
1363 | in6_ifa_hold(ifp); | 1356 | in6_ifa_hold(ifp); |
1364 | break; | 1357 | break; |
1365 | } | 1358 | } |
1366 | } | 1359 | } |
1367 | } | 1360 | } |
1368 | read_unlock_bh(&addrconf_hash_lock); | 1361 | rcu_read_unlock_bh(); |
1369 | 1362 | ||
1370 | return ifp; | 1363 | return result; |
1371 | } | 1364 | } |
1372 | 1365 | ||
1373 | /* Gets referenced address, destroys ifaddr */ | 1366 | /* Gets referenced address, destroys ifaddr */ |
@@ -1570,7 +1563,7 @@ static int ipv6_inherit_eui64(u8 *eui, struct inet6_dev *idev) | |||
1570 | struct inet6_ifaddr *ifp; | 1563 | struct inet6_ifaddr *ifp; |
1571 | 1564 | ||
1572 | read_lock_bh(&idev->lock); | 1565 | read_lock_bh(&idev->lock); |
1573 | for (ifp=idev->addr_list; ifp; ifp=ifp->if_next) { | 1566 | list_for_each_entry(ifp, &idev->addr_list, if_list) { |
1574 | if (ifp->scope == IFA_LINK && !(ifp->flags&IFA_F_TENTATIVE)) { | 1567 | if (ifp->scope == IFA_LINK && !(ifp->flags&IFA_F_TENTATIVE)) { |
1575 | memcpy(eui, ifp->addr.s6_addr+8, 8); | 1568 | memcpy(eui, ifp->addr.s6_addr+8, 8); |
1576 | err = 0; | 1569 | err = 0; |
@@ -1738,7 +1731,8 @@ static struct inet6_dev *addrconf_add_dev(struct net_device *dev) | |||
1738 | 1731 | ||
1739 | ASSERT_RTNL(); | 1732 | ASSERT_RTNL(); |
1740 | 1733 | ||
1741 | if ((idev = ipv6_find_idev(dev)) == NULL) | 1734 | idev = ipv6_find_idev(dev); |
1735 | if (!idev) | ||
1742 | return NULL; | 1736 | return NULL; |
1743 | 1737 | ||
1744 | /* Add default multicast route */ | 1738 | /* Add default multicast route */ |
@@ -1971,7 +1965,7 @@ ok: | |||
1971 | #ifdef CONFIG_IPV6_PRIVACY | 1965 | #ifdef CONFIG_IPV6_PRIVACY |
1972 | read_lock_bh(&in6_dev->lock); | 1966 | read_lock_bh(&in6_dev->lock); |
1973 | /* update all temporary addresses in the list */ | 1967 | /* update all temporary addresses in the list */ |
1974 | for (ift=in6_dev->tempaddr_list; ift; ift=ift->tmp_next) { | 1968 | list_for_each_entry(ift, &in6_dev->tempaddr_list, tmp_list) { |
1975 | /* | 1969 | /* |
1976 | * When adjusting the lifetimes of an existing | 1970 | * When adjusting the lifetimes of an existing |
1977 | * temporary address, only lower the lifetimes. | 1971 | * temporary address, only lower the lifetimes. |
@@ -2174,7 +2168,7 @@ static int inet6_addr_del(struct net *net, int ifindex, struct in6_addr *pfx, | |||
2174 | return -ENXIO; | 2168 | return -ENXIO; |
2175 | 2169 | ||
2176 | read_lock_bh(&idev->lock); | 2170 | read_lock_bh(&idev->lock); |
2177 | for (ifp = idev->addr_list; ifp; ifp=ifp->if_next) { | 2171 | list_for_each_entry(ifp, &idev->addr_list, if_list) { |
2178 | if (ifp->prefix_len == plen && | 2172 | if (ifp->prefix_len == plen && |
2179 | ipv6_addr_equal(pfx, &ifp->addr)) { | 2173 | ipv6_addr_equal(pfx, &ifp->addr)) { |
2180 | in6_ifa_hold(ifp); | 2174 | in6_ifa_hold(ifp); |
@@ -2185,7 +2179,7 @@ static int inet6_addr_del(struct net *net, int ifindex, struct in6_addr *pfx, | |||
2185 | /* If the last address is deleted administratively, | 2179 | /* If the last address is deleted administratively, |
2186 | disable IPv6 on this interface. | 2180 | disable IPv6 on this interface. |
2187 | */ | 2181 | */ |
2188 | if (idev->addr_list == NULL) | 2182 | if (list_empty(&idev->addr_list)) |
2189 | addrconf_ifdown(idev->dev, 1); | 2183 | addrconf_ifdown(idev->dev, 1); |
2190 | return 0; | 2184 | return 0; |
2191 | } | 2185 | } |
@@ -2446,7 +2440,8 @@ static void addrconf_ip6_tnl_config(struct net_device *dev) | |||
2446 | 2440 | ||
2447 | ASSERT_RTNL(); | 2441 | ASSERT_RTNL(); |
2448 | 2442 | ||
2449 | if ((idev = addrconf_add_dev(dev)) == NULL) { | 2443 | idev = addrconf_add_dev(dev); |
2444 | if (!idev) { | ||
2450 | printk(KERN_DEBUG "init ip6-ip6: add_dev failed\n"); | 2445 | printk(KERN_DEBUG "init ip6-ip6: add_dev failed\n"); |
2451 | return; | 2446 | return; |
2452 | } | 2447 | } |
@@ -2461,7 +2456,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
2461 | int run_pending = 0; | 2456 | int run_pending = 0; |
2462 | int err; | 2457 | int err; |
2463 | 2458 | ||
2464 | switch(event) { | 2459 | switch (event) { |
2465 | case NETDEV_REGISTER: | 2460 | case NETDEV_REGISTER: |
2466 | if (!idev && dev->mtu >= IPV6_MIN_MTU) { | 2461 | if (!idev && dev->mtu >= IPV6_MIN_MTU) { |
2467 | idev = ipv6_add_dev(dev); | 2462 | idev = ipv6_add_dev(dev); |
@@ -2469,6 +2464,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
2469 | return notifier_from_errno(-ENOMEM); | 2464 | return notifier_from_errno(-ENOMEM); |
2470 | } | 2465 | } |
2471 | break; | 2466 | break; |
2467 | |||
2472 | case NETDEV_UP: | 2468 | case NETDEV_UP: |
2473 | case NETDEV_CHANGE: | 2469 | case NETDEV_CHANGE: |
2474 | if (dev->flags & IFF_SLAVE) | 2470 | if (dev->flags & IFF_SLAVE) |
@@ -2498,10 +2494,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
2498 | } | 2494 | } |
2499 | 2495 | ||
2500 | if (idev) { | 2496 | if (idev) { |
2501 | if (idev->if_flags & IF_READY) { | 2497 | if (idev->if_flags & IF_READY) |
2502 | /* device is already configured. */ | 2498 | /* device is already configured. */ |
2503 | break; | 2499 | break; |
2504 | } | ||
2505 | idev->if_flags |= IF_READY; | 2500 | idev->if_flags |= IF_READY; |
2506 | } | 2501 | } |
2507 | 2502 | ||
@@ -2513,7 +2508,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
2513 | run_pending = 1; | 2508 | run_pending = 1; |
2514 | } | 2509 | } |
2515 | 2510 | ||
2516 | switch(dev->type) { | 2511 | switch (dev->type) { |
2517 | #if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE) | 2512 | #if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE) |
2518 | case ARPHRD_SIT: | 2513 | case ARPHRD_SIT: |
2519 | addrconf_sit_config(dev); | 2514 | addrconf_sit_config(dev); |
@@ -2530,25 +2525,30 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
2530 | addrconf_dev_config(dev); | 2525 | addrconf_dev_config(dev); |
2531 | break; | 2526 | break; |
2532 | } | 2527 | } |
2528 | |||
2533 | if (idev) { | 2529 | if (idev) { |
2534 | if (run_pending) | 2530 | if (run_pending) |
2535 | addrconf_dad_run(idev); | 2531 | addrconf_dad_run(idev); |
2536 | 2532 | ||
2537 | /* If the MTU changed during the interface down, when the | 2533 | /* |
2538 | interface up, the changed MTU must be reflected in the | 2534 | * If the MTU changed during the interface down, |
2539 | idev as well as routers. | 2535 | * when the interface up, the changed MTU must be |
2536 | * reflected in the idev as well as routers. | ||
2540 | */ | 2537 | */ |
2541 | if (idev->cnf.mtu6 != dev->mtu && dev->mtu >= IPV6_MIN_MTU) { | 2538 | if (idev->cnf.mtu6 != dev->mtu && |
2539 | dev->mtu >= IPV6_MIN_MTU) { | ||
2542 | rt6_mtu_change(dev, dev->mtu); | 2540 | rt6_mtu_change(dev, dev->mtu); |
2543 | idev->cnf.mtu6 = dev->mtu; | 2541 | idev->cnf.mtu6 = dev->mtu; |
2544 | } | 2542 | } |
2545 | idev->tstamp = jiffies; | 2543 | idev->tstamp = jiffies; |
2546 | inet6_ifinfo_notify(RTM_NEWLINK, idev); | 2544 | inet6_ifinfo_notify(RTM_NEWLINK, idev); |
2547 | /* If the changed mtu during down is lower than IPV6_MIN_MTU | 2545 | |
2548 | stop IPv6 on this interface. | 2546 | /* |
2547 | * If the changed mtu during down is lower than | ||
2548 | * IPV6_MIN_MTU stop IPv6 on this interface. | ||
2549 | */ | 2549 | */ |
2550 | if (dev->mtu < IPV6_MIN_MTU) | 2550 | if (dev->mtu < IPV6_MIN_MTU) |
2551 | addrconf_ifdown(dev, event != NETDEV_DOWN); | 2551 | addrconf_ifdown(dev, 1); |
2552 | } | 2552 | } |
2553 | break; | 2553 | break; |
2554 | 2554 | ||
@@ -2565,7 +2565,10 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
2565 | break; | 2565 | break; |
2566 | } | 2566 | } |
2567 | 2567 | ||
2568 | /* MTU falled under IPV6_MIN_MTU. Stop IPv6 on this interface. */ | 2568 | /* |
2569 | * MTU falled under IPV6_MIN_MTU. | ||
2570 | * Stop IPv6 on this interface. | ||
2571 | */ | ||
2569 | 2572 | ||
2570 | case NETDEV_DOWN: | 2573 | case NETDEV_DOWN: |
2571 | case NETDEV_UNREGISTER: | 2574 | case NETDEV_UNREGISTER: |
@@ -2585,9 +2588,10 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
2585 | return notifier_from_errno(err); | 2588 | return notifier_from_errno(err); |
2586 | } | 2589 | } |
2587 | break; | 2590 | break; |
2588 | case NETDEV_BONDING_OLDTYPE: | 2591 | |
2589 | case NETDEV_BONDING_NEWTYPE: | 2592 | case NETDEV_PRE_TYPE_CHANGE: |
2590 | addrconf_bonding_change(dev, event); | 2593 | case NETDEV_POST_TYPE_CHANGE: |
2594 | addrconf_type_change(dev, event); | ||
2591 | break; | 2595 | break; |
2592 | } | 2596 | } |
2593 | 2597 | ||
@@ -2599,28 +2603,27 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
2599 | */ | 2603 | */ |
2600 | static struct notifier_block ipv6_dev_notf = { | 2604 | static struct notifier_block ipv6_dev_notf = { |
2601 | .notifier_call = addrconf_notify, | 2605 | .notifier_call = addrconf_notify, |
2602 | .priority = 0 | ||
2603 | }; | 2606 | }; |
2604 | 2607 | ||
2605 | static void addrconf_bonding_change(struct net_device *dev, unsigned long event) | 2608 | static void addrconf_type_change(struct net_device *dev, unsigned long event) |
2606 | { | 2609 | { |
2607 | struct inet6_dev *idev; | 2610 | struct inet6_dev *idev; |
2608 | ASSERT_RTNL(); | 2611 | ASSERT_RTNL(); |
2609 | 2612 | ||
2610 | idev = __in6_dev_get(dev); | 2613 | idev = __in6_dev_get(dev); |
2611 | 2614 | ||
2612 | if (event == NETDEV_BONDING_NEWTYPE) | 2615 | if (event == NETDEV_POST_TYPE_CHANGE) |
2613 | ipv6_mc_remap(idev); | 2616 | ipv6_mc_remap(idev); |
2614 | else if (event == NETDEV_BONDING_OLDTYPE) | 2617 | else if (event == NETDEV_PRE_TYPE_CHANGE) |
2615 | ipv6_mc_unmap(idev); | 2618 | ipv6_mc_unmap(idev); |
2616 | } | 2619 | } |
2617 | 2620 | ||
2618 | static int addrconf_ifdown(struct net_device *dev, int how) | 2621 | static int addrconf_ifdown(struct net_device *dev, int how) |
2619 | { | 2622 | { |
2620 | struct inet6_dev *idev; | ||
2621 | struct inet6_ifaddr *ifa, *keep_list, **bifa; | ||
2622 | struct net *net = dev_net(dev); | 2623 | struct net *net = dev_net(dev); |
2623 | int i; | 2624 | struct inet6_dev *idev; |
2625 | struct inet6_ifaddr *ifa; | ||
2626 | LIST_HEAD(keep_list); | ||
2624 | 2627 | ||
2625 | ASSERT_RTNL(); | 2628 | ASSERT_RTNL(); |
2626 | 2629 | ||
@@ -2631,8 +2634,9 @@ static int addrconf_ifdown(struct net_device *dev, int how) | |||
2631 | if (idev == NULL) | 2634 | if (idev == NULL) |
2632 | return -ENODEV; | 2635 | return -ENODEV; |
2633 | 2636 | ||
2634 | /* Step 1: remove reference to ipv6 device from parent device. | 2637 | /* |
2635 | Do not dev_put! | 2638 | * Step 1: remove reference to ipv6 device from parent device. |
2639 | * Do not dev_put! | ||
2636 | */ | 2640 | */ |
2637 | if (how) { | 2641 | if (how) { |
2638 | idev->dead = 1; | 2642 | idev->dead = 1; |
@@ -2645,40 +2649,21 @@ static int addrconf_ifdown(struct net_device *dev, int how) | |||
2645 | 2649 | ||
2646 | } | 2650 | } |
2647 | 2651 | ||
2648 | /* Step 2: clear hash table */ | ||
2649 | for (i=0; i<IN6_ADDR_HSIZE; i++) { | ||
2650 | bifa = &inet6_addr_lst[i]; | ||
2651 | |||
2652 | write_lock_bh(&addrconf_hash_lock); | ||
2653 | while ((ifa = *bifa) != NULL) { | ||
2654 | if (ifa->idev == idev && | ||
2655 | (how || !(ifa->flags&IFA_F_PERMANENT) || | ||
2656 | ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)) { | ||
2657 | *bifa = ifa->lst_next; | ||
2658 | ifa->lst_next = NULL; | ||
2659 | __in6_ifa_put(ifa); | ||
2660 | continue; | ||
2661 | } | ||
2662 | bifa = &ifa->lst_next; | ||
2663 | } | ||
2664 | write_unlock_bh(&addrconf_hash_lock); | ||
2665 | } | ||
2666 | |||
2667 | write_lock_bh(&idev->lock); | 2652 | write_lock_bh(&idev->lock); |
2668 | 2653 | ||
2669 | /* Step 3: clear flags for stateless addrconf */ | 2654 | /* Step 2: clear flags for stateless addrconf */ |
2670 | if (!how) | 2655 | if (!how) |
2671 | idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY); | 2656 | idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY); |
2672 | 2657 | ||
2673 | /* Step 4: clear address list */ | ||
2674 | #ifdef CONFIG_IPV6_PRIVACY | 2658 | #ifdef CONFIG_IPV6_PRIVACY |
2675 | if (how && del_timer(&idev->regen_timer)) | 2659 | if (how && del_timer(&idev->regen_timer)) |
2676 | in6_dev_put(idev); | 2660 | in6_dev_put(idev); |
2677 | 2661 | ||
2678 | /* clear tempaddr list */ | 2662 | /* Step 3: clear tempaddr list */ |
2679 | while ((ifa = idev->tempaddr_list) != NULL) { | 2663 | while (!list_empty(&idev->tempaddr_list)) { |
2680 | idev->tempaddr_list = ifa->tmp_next; | 2664 | ifa = list_first_entry(&idev->tempaddr_list, |
2681 | ifa->tmp_next = NULL; | 2665 | struct inet6_ifaddr, tmp_list); |
2666 | list_del(&ifa->tmp_list); | ||
2682 | ifa->dead = 1; | 2667 | ifa->dead = 1; |
2683 | write_unlock_bh(&idev->lock); | 2668 | write_unlock_bh(&idev->lock); |
2684 | spin_lock_bh(&ifa->lock); | 2669 | spin_lock_bh(&ifa->lock); |
@@ -2692,23 +2677,18 @@ static int addrconf_ifdown(struct net_device *dev, int how) | |||
2692 | write_lock_bh(&idev->lock); | 2677 | write_lock_bh(&idev->lock); |
2693 | } | 2678 | } |
2694 | #endif | 2679 | #endif |
2695 | keep_list = NULL; | ||
2696 | bifa = &keep_list; | ||
2697 | while ((ifa = idev->addr_list) != NULL) { | ||
2698 | idev->addr_list = ifa->if_next; | ||
2699 | ifa->if_next = NULL; | ||
2700 | 2680 | ||
2681 | while (!list_empty(&idev->addr_list)) { | ||
2682 | ifa = list_first_entry(&idev->addr_list, | ||
2683 | struct inet6_ifaddr, if_list); | ||
2701 | addrconf_del_timer(ifa); | 2684 | addrconf_del_timer(ifa); |
2702 | 2685 | ||
2703 | /* If just doing link down, and address is permanent | 2686 | /* If just doing link down, and address is permanent |
2704 | and not link-local, then retain it. */ | 2687 | and not link-local, then retain it. */ |
2705 | if (how == 0 && | 2688 | if (!how && |
2706 | (ifa->flags&IFA_F_PERMANENT) && | 2689 | (ifa->flags&IFA_F_PERMANENT) && |
2707 | !(ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)) { | 2690 | !(ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)) { |
2708 | 2691 | list_move_tail(&ifa->if_list, &keep_list); | |
2709 | /* Move to holding list */ | ||
2710 | *bifa = ifa; | ||
2711 | bifa = &ifa->if_next; | ||
2712 | 2692 | ||
2713 | /* If not doing DAD on this address, just keep it. */ | 2693 | /* If not doing DAD on this address, just keep it. */ |
2714 | if ((dev->flags&(IFF_NOARP|IFF_LOOPBACK)) || | 2694 | if ((dev->flags&(IFF_NOARP|IFF_LOOPBACK)) || |
@@ -2723,24 +2703,32 @@ static int addrconf_ifdown(struct net_device *dev, int how) | |||
2723 | /* Flag it for later restoration when link comes up */ | 2703 | /* Flag it for later restoration when link comes up */ |
2724 | ifa->flags |= IFA_F_TENTATIVE; | 2704 | ifa->flags |= IFA_F_TENTATIVE; |
2725 | in6_ifa_hold(ifa); | 2705 | in6_ifa_hold(ifa); |
2706 | write_unlock_bh(&idev->lock); | ||
2726 | } else { | 2707 | } else { |
2708 | list_del(&ifa->if_list); | ||
2727 | ifa->dead = 1; | 2709 | ifa->dead = 1; |
2710 | write_unlock_bh(&idev->lock); | ||
2711 | |||
2712 | /* clear hash table */ | ||
2713 | spin_lock_bh(&addrconf_hash_lock); | ||
2714 | hlist_del_init_rcu(&ifa->addr_lst); | ||
2715 | spin_unlock_bh(&addrconf_hash_lock); | ||
2728 | } | 2716 | } |
2729 | write_unlock_bh(&idev->lock); | ||
2730 | 2717 | ||
2731 | __ipv6_ifa_notify(RTM_DELADDR, ifa); | 2718 | __ipv6_ifa_notify(RTM_DELADDR, ifa); |
2732 | atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa); | 2719 | if (ifa->dead) |
2720 | atomic_notifier_call_chain(&inet6addr_chain, | ||
2721 | NETDEV_DOWN, ifa); | ||
2733 | in6_ifa_put(ifa); | 2722 | in6_ifa_put(ifa); |
2734 | 2723 | ||
2735 | write_lock_bh(&idev->lock); | 2724 | write_lock_bh(&idev->lock); |
2736 | } | 2725 | } |
2737 | 2726 | ||
2738 | idev->addr_list = keep_list; | 2727 | list_splice(&keep_list, &idev->addr_list); |
2739 | 2728 | ||
2740 | write_unlock_bh(&idev->lock); | 2729 | write_unlock_bh(&idev->lock); |
2741 | 2730 | ||
2742 | /* Step 5: Discard multicast list */ | 2731 | /* Step 5: Discard multicast list */ |
2743 | |||
2744 | if (how) | 2732 | if (how) |
2745 | ipv6_mc_destroy_dev(idev); | 2733 | ipv6_mc_destroy_dev(idev); |
2746 | else | 2734 | else |
@@ -2748,8 +2736,7 @@ static int addrconf_ifdown(struct net_device *dev, int how) | |||
2748 | 2736 | ||
2749 | idev->tstamp = jiffies; | 2737 | idev->tstamp = jiffies; |
2750 | 2738 | ||
2751 | /* Shot the device (if unregistered) */ | 2739 | /* Last: Shot the device (if unregistered) */ |
2752 | |||
2753 | if (how) { | 2740 | if (how) { |
2754 | addrconf_sysctl_unregister(idev); | 2741 | addrconf_sysctl_unregister(idev); |
2755 | neigh_parms_release(&nd_tbl, idev->nd_parms); | 2742 | neigh_parms_release(&nd_tbl, idev->nd_parms); |
@@ -2860,7 +2847,7 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags) | |||
2860 | * Optimistic nodes can start receiving | 2847 | * Optimistic nodes can start receiving |
2861 | * Frames right away | 2848 | * Frames right away |
2862 | */ | 2849 | */ |
2863 | if(ifp->flags & IFA_F_OPTIMISTIC) | 2850 | if (ifp->flags & IFA_F_OPTIMISTIC) |
2864 | ip6_ins_rt(ifp->rt); | 2851 | ip6_ins_rt(ifp->rt); |
2865 | 2852 | ||
2866 | addrconf_dad_kick(ifp); | 2853 | addrconf_dad_kick(ifp); |
@@ -2910,7 +2897,7 @@ out: | |||
2910 | 2897 | ||
2911 | static void addrconf_dad_completed(struct inet6_ifaddr *ifp) | 2898 | static void addrconf_dad_completed(struct inet6_ifaddr *ifp) |
2912 | { | 2899 | { |
2913 | struct net_device * dev = ifp->idev->dev; | 2900 | struct net_device *dev = ifp->idev->dev; |
2914 | 2901 | ||
2915 | /* | 2902 | /* |
2916 | * Configure the address for reception. Now it is valid. | 2903 | * Configure the address for reception. Now it is valid. |
@@ -2941,11 +2928,12 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp) | |||
2941 | } | 2928 | } |
2942 | } | 2929 | } |
2943 | 2930 | ||
2944 | static void addrconf_dad_run(struct inet6_dev *idev) { | 2931 | static void addrconf_dad_run(struct inet6_dev *idev) |
2932 | { | ||
2945 | struct inet6_ifaddr *ifp; | 2933 | struct inet6_ifaddr *ifp; |
2946 | 2934 | ||
2947 | read_lock_bh(&idev->lock); | 2935 | read_lock_bh(&idev->lock); |
2948 | for (ifp = idev->addr_list; ifp; ifp = ifp->if_next) { | 2936 | list_for_each_entry(ifp, &idev->addr_list, if_list) { |
2949 | spin_lock(&ifp->lock); | 2937 | spin_lock(&ifp->lock); |
2950 | if (!(ifp->flags & IFA_F_TENTATIVE)) { | 2938 | if (!(ifp->flags & IFA_F_TENTATIVE)) { |
2951 | spin_unlock(&ifp->lock); | 2939 | spin_unlock(&ifp->lock); |
@@ -2970,36 +2958,35 @@ static struct inet6_ifaddr *if6_get_first(struct seq_file *seq) | |||
2970 | struct net *net = seq_file_net(seq); | 2958 | struct net *net = seq_file_net(seq); |
2971 | 2959 | ||
2972 | for (state->bucket = 0; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) { | 2960 | for (state->bucket = 0; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) { |
2973 | ifa = inet6_addr_lst[state->bucket]; | 2961 | struct hlist_node *n; |
2974 | 2962 | hlist_for_each_entry_rcu(ifa, n, &inet6_addr_lst[state->bucket], | |
2975 | while (ifa && !net_eq(dev_net(ifa->idev->dev), net)) | 2963 | addr_lst) |
2976 | ifa = ifa->lst_next; | 2964 | if (net_eq(dev_net(ifa->idev->dev), net)) |
2977 | if (ifa) | 2965 | return ifa; |
2978 | break; | ||
2979 | } | 2966 | } |
2980 | return ifa; | 2967 | return NULL; |
2981 | } | 2968 | } |
2982 | 2969 | ||
2983 | static struct inet6_ifaddr *if6_get_next(struct seq_file *seq, struct inet6_ifaddr *ifa) | 2970 | static struct inet6_ifaddr *if6_get_next(struct seq_file *seq, |
2971 | struct inet6_ifaddr *ifa) | ||
2984 | { | 2972 | { |
2985 | struct if6_iter_state *state = seq->private; | 2973 | struct if6_iter_state *state = seq->private; |
2986 | struct net *net = seq_file_net(seq); | 2974 | struct net *net = seq_file_net(seq); |
2975 | struct hlist_node *n = &ifa->addr_lst; | ||
2987 | 2976 | ||
2988 | ifa = ifa->lst_next; | 2977 | hlist_for_each_entry_continue_rcu(ifa, n, addr_lst) |
2989 | try_again: | 2978 | if (net_eq(dev_net(ifa->idev->dev), net)) |
2990 | if (ifa) { | 2979 | return ifa; |
2991 | if (!net_eq(dev_net(ifa->idev->dev), net)) { | ||
2992 | ifa = ifa->lst_next; | ||
2993 | goto try_again; | ||
2994 | } | ||
2995 | } | ||
2996 | 2980 | ||
2997 | if (!ifa && ++state->bucket < IN6_ADDR_HSIZE) { | 2981 | while (++state->bucket < IN6_ADDR_HSIZE) { |
2998 | ifa = inet6_addr_lst[state->bucket]; | 2982 | hlist_for_each_entry(ifa, n, |
2999 | goto try_again; | 2983 | &inet6_addr_lst[state->bucket], addr_lst) { |
2984 | if (net_eq(dev_net(ifa->idev->dev), net)) | ||
2985 | return ifa; | ||
2986 | } | ||
3000 | } | 2987 | } |
3001 | 2988 | ||
3002 | return ifa; | 2989 | return NULL; |
3003 | } | 2990 | } |
3004 | 2991 | ||
3005 | static struct inet6_ifaddr *if6_get_idx(struct seq_file *seq, loff_t pos) | 2992 | static struct inet6_ifaddr *if6_get_idx(struct seq_file *seq, loff_t pos) |
@@ -3007,15 +2994,15 @@ static struct inet6_ifaddr *if6_get_idx(struct seq_file *seq, loff_t pos) | |||
3007 | struct inet6_ifaddr *ifa = if6_get_first(seq); | 2994 | struct inet6_ifaddr *ifa = if6_get_first(seq); |
3008 | 2995 | ||
3009 | if (ifa) | 2996 | if (ifa) |
3010 | while(pos && (ifa = if6_get_next(seq, ifa)) != NULL) | 2997 | while (pos && (ifa = if6_get_next(seq, ifa)) != NULL) |
3011 | --pos; | 2998 | --pos; |
3012 | return pos ? NULL : ifa; | 2999 | return pos ? NULL : ifa; |
3013 | } | 3000 | } |
3014 | 3001 | ||
3015 | static void *if6_seq_start(struct seq_file *seq, loff_t *pos) | 3002 | static void *if6_seq_start(struct seq_file *seq, loff_t *pos) |
3016 | __acquires(addrconf_hash_lock) | 3003 | __acquires(rcu) |
3017 | { | 3004 | { |
3018 | read_lock_bh(&addrconf_hash_lock); | 3005 | rcu_read_lock_bh(); |
3019 | return if6_get_idx(seq, *pos); | 3006 | return if6_get_idx(seq, *pos); |
3020 | } | 3007 | } |
3021 | 3008 | ||
@@ -3029,9 +3016,9 @@ static void *if6_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
3029 | } | 3016 | } |
3030 | 3017 | ||
3031 | static void if6_seq_stop(struct seq_file *seq, void *v) | 3018 | static void if6_seq_stop(struct seq_file *seq, void *v) |
3032 | __releases(addrconf_hash_lock) | 3019 | __releases(rcu) |
3033 | { | 3020 | { |
3034 | read_unlock_bh(&addrconf_hash_lock); | 3021 | rcu_read_unlock_bh(); |
3035 | } | 3022 | } |
3036 | 3023 | ||
3037 | static int if6_seq_show(struct seq_file *seq, void *v) | 3024 | static int if6_seq_show(struct seq_file *seq, void *v) |
@@ -3101,10 +3088,12 @@ void if6_proc_exit(void) | |||
3101 | int ipv6_chk_home_addr(struct net *net, struct in6_addr *addr) | 3088 | int ipv6_chk_home_addr(struct net *net, struct in6_addr *addr) |
3102 | { | 3089 | { |
3103 | int ret = 0; | 3090 | int ret = 0; |
3104 | struct inet6_ifaddr * ifp; | 3091 | struct inet6_ifaddr *ifp = NULL; |
3105 | u8 hash = ipv6_addr_hash(addr); | 3092 | struct hlist_node *n; |
3106 | read_lock_bh(&addrconf_hash_lock); | 3093 | unsigned int hash = ipv6_addr_hash(addr); |
3107 | for (ifp = inet6_addr_lst[hash]; ifp; ifp = ifp->lst_next) { | 3094 | |
3095 | rcu_read_lock_bh(); | ||
3096 | hlist_for_each_entry_rcu(ifp, n, &inet6_addr_lst[hash], addr_lst) { | ||
3108 | if (!net_eq(dev_net(ifp->idev->dev), net)) | 3097 | if (!net_eq(dev_net(ifp->idev->dev), net)) |
3109 | continue; | 3098 | continue; |
3110 | if (ipv6_addr_equal(&ifp->addr, addr) && | 3099 | if (ipv6_addr_equal(&ifp->addr, addr) && |
@@ -3113,7 +3102,7 @@ int ipv6_chk_home_addr(struct net *net, struct in6_addr *addr) | |||
3113 | break; | 3102 | break; |
3114 | } | 3103 | } |
3115 | } | 3104 | } |
3116 | read_unlock_bh(&addrconf_hash_lock); | 3105 | rcu_read_unlock_bh(); |
3117 | return ret; | 3106 | return ret; |
3118 | } | 3107 | } |
3119 | #endif | 3108 | #endif |
@@ -3124,43 +3113,35 @@ int ipv6_chk_home_addr(struct net *net, struct in6_addr *addr) | |||
3124 | 3113 | ||
3125 | static void addrconf_verify(unsigned long foo) | 3114 | static void addrconf_verify(unsigned long foo) |
3126 | { | 3115 | { |
3116 | unsigned long now, next, next_sec, next_sched; | ||
3127 | struct inet6_ifaddr *ifp; | 3117 | struct inet6_ifaddr *ifp; |
3128 | unsigned long now, next; | 3118 | struct hlist_node *node; |
3129 | int i; | 3119 | int i; |
3130 | 3120 | ||
3131 | spin_lock_bh(&addrconf_verify_lock); | 3121 | rcu_read_lock_bh(); |
3122 | spin_lock(&addrconf_verify_lock); | ||
3132 | now = jiffies; | 3123 | now = jiffies; |
3133 | next = now + ADDR_CHECK_FREQUENCY; | 3124 | next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY); |
3134 | 3125 | ||
3135 | del_timer(&addr_chk_timer); | 3126 | del_timer(&addr_chk_timer); |
3136 | 3127 | ||
3137 | for (i=0; i < IN6_ADDR_HSIZE; i++) { | 3128 | for (i = 0; i < IN6_ADDR_HSIZE; i++) { |
3138 | |||
3139 | restart: | 3129 | restart: |
3140 | read_lock(&addrconf_hash_lock); | 3130 | hlist_for_each_entry_rcu(ifp, node, |
3141 | for (ifp=inet6_addr_lst[i]; ifp; ifp=ifp->lst_next) { | 3131 | &inet6_addr_lst[i], addr_lst) { |
3142 | unsigned long age; | 3132 | unsigned long age; |
3143 | #ifdef CONFIG_IPV6_PRIVACY | ||
3144 | unsigned long regen_advance; | ||
3145 | #endif | ||
3146 | 3133 | ||
3147 | if (ifp->flags & IFA_F_PERMANENT) | 3134 | if (ifp->flags & IFA_F_PERMANENT) |
3148 | continue; | 3135 | continue; |
3149 | 3136 | ||
3150 | spin_lock(&ifp->lock); | 3137 | spin_lock(&ifp->lock); |
3151 | age = (now - ifp->tstamp) / HZ; | 3138 | /* We try to batch several events at once. */ |
3152 | 3139 | age = (now - ifp->tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ; | |
3153 | #ifdef CONFIG_IPV6_PRIVACY | ||
3154 | regen_advance = ifp->idev->cnf.regen_max_retry * | ||
3155 | ifp->idev->cnf.dad_transmits * | ||
3156 | ifp->idev->nd_parms->retrans_time / HZ; | ||
3157 | #endif | ||
3158 | 3140 | ||
3159 | if (ifp->valid_lft != INFINITY_LIFE_TIME && | 3141 | if (ifp->valid_lft != INFINITY_LIFE_TIME && |
3160 | age >= ifp->valid_lft) { | 3142 | age >= ifp->valid_lft) { |
3161 | spin_unlock(&ifp->lock); | 3143 | spin_unlock(&ifp->lock); |
3162 | in6_ifa_hold(ifp); | 3144 | in6_ifa_hold(ifp); |
3163 | read_unlock(&addrconf_hash_lock); | ||
3164 | ipv6_del_addr(ifp); | 3145 | ipv6_del_addr(ifp); |
3165 | goto restart; | 3146 | goto restart; |
3166 | } else if (ifp->prefered_lft == INFINITY_LIFE_TIME) { | 3147 | } else if (ifp->prefered_lft == INFINITY_LIFE_TIME) { |
@@ -3182,7 +3163,6 @@ restart: | |||
3182 | 3163 | ||
3183 | if (deprecate) { | 3164 | if (deprecate) { |
3184 | in6_ifa_hold(ifp); | 3165 | in6_ifa_hold(ifp); |
3185 | read_unlock(&addrconf_hash_lock); | ||
3186 | 3166 | ||
3187 | ipv6_ifa_notify(0, ifp); | 3167 | ipv6_ifa_notify(0, ifp); |
3188 | in6_ifa_put(ifp); | 3168 | in6_ifa_put(ifp); |
@@ -3191,6 +3171,10 @@ restart: | |||
3191 | #ifdef CONFIG_IPV6_PRIVACY | 3171 | #ifdef CONFIG_IPV6_PRIVACY |
3192 | } else if ((ifp->flags&IFA_F_TEMPORARY) && | 3172 | } else if ((ifp->flags&IFA_F_TEMPORARY) && |
3193 | !(ifp->flags&IFA_F_TENTATIVE)) { | 3173 | !(ifp->flags&IFA_F_TENTATIVE)) { |
3174 | unsigned long regen_advance = ifp->idev->cnf.regen_max_retry * | ||
3175 | ifp->idev->cnf.dad_transmits * | ||
3176 | ifp->idev->nd_parms->retrans_time / HZ; | ||
3177 | |||
3194 | if (age >= ifp->prefered_lft - regen_advance) { | 3178 | if (age >= ifp->prefered_lft - regen_advance) { |
3195 | struct inet6_ifaddr *ifpub = ifp->ifpub; | 3179 | struct inet6_ifaddr *ifpub = ifp->ifpub; |
3196 | if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next)) | 3180 | if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next)) |
@@ -3200,7 +3184,7 @@ restart: | |||
3200 | in6_ifa_hold(ifp); | 3184 | in6_ifa_hold(ifp); |
3201 | in6_ifa_hold(ifpub); | 3185 | in6_ifa_hold(ifpub); |
3202 | spin_unlock(&ifp->lock); | 3186 | spin_unlock(&ifp->lock); |
3203 | read_unlock(&addrconf_hash_lock); | 3187 | |
3204 | spin_lock(&ifpub->lock); | 3188 | spin_lock(&ifpub->lock); |
3205 | ifpub->regen_count = 0; | 3189 | ifpub->regen_count = 0; |
3206 | spin_unlock(&ifpub->lock); | 3190 | spin_unlock(&ifpub->lock); |
@@ -3220,12 +3204,26 @@ restart: | |||
3220 | spin_unlock(&ifp->lock); | 3204 | spin_unlock(&ifp->lock); |
3221 | } | 3205 | } |
3222 | } | 3206 | } |
3223 | read_unlock(&addrconf_hash_lock); | ||
3224 | } | 3207 | } |
3225 | 3208 | ||
3226 | addr_chk_timer.expires = time_before(next, jiffies + HZ) ? jiffies + HZ : next; | 3209 | next_sec = round_jiffies_up(next); |
3210 | next_sched = next; | ||
3211 | |||
3212 | /* If rounded timeout is accurate enough, accept it. */ | ||
3213 | if (time_before(next_sec, next + ADDRCONF_TIMER_FUZZ)) | ||
3214 | next_sched = next_sec; | ||
3215 | |||
3216 | /* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */ | ||
3217 | if (time_before(next_sched, jiffies + ADDRCONF_TIMER_FUZZ_MAX)) | ||
3218 | next_sched = jiffies + ADDRCONF_TIMER_FUZZ_MAX; | ||
3219 | |||
3220 | ADBG((KERN_DEBUG "now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n", | ||
3221 | now, next, next_sec, next_sched)); | ||
3222 | |||
3223 | addr_chk_timer.expires = next_sched; | ||
3227 | add_timer(&addr_chk_timer); | 3224 | add_timer(&addr_chk_timer); |
3228 | spin_unlock_bh(&addrconf_verify_lock); | 3225 | spin_unlock(&addrconf_verify_lock); |
3226 | rcu_read_unlock_bh(); | ||
3229 | } | 3227 | } |
3230 | 3228 | ||
3231 | static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local) | 3229 | static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local) |
@@ -3515,8 +3513,7 @@ static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca, | |||
3515 | return nlmsg_end(skb, nlh); | 3513 | return nlmsg_end(skb, nlh); |
3516 | } | 3514 | } |
3517 | 3515 | ||
3518 | enum addr_type_t | 3516 | enum addr_type_t { |
3519 | { | ||
3520 | UNICAST_ADDR, | 3517 | UNICAST_ADDR, |
3521 | MULTICAST_ADDR, | 3518 | MULTICAST_ADDR, |
3522 | ANYCAST_ADDR, | 3519 | ANYCAST_ADDR, |
@@ -3527,7 +3524,6 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb, | |||
3527 | struct netlink_callback *cb, enum addr_type_t type, | 3524 | struct netlink_callback *cb, enum addr_type_t type, |
3528 | int s_ip_idx, int *p_ip_idx) | 3525 | int s_ip_idx, int *p_ip_idx) |
3529 | { | 3526 | { |
3530 | struct inet6_ifaddr *ifa; | ||
3531 | struct ifmcaddr6 *ifmca; | 3527 | struct ifmcaddr6 *ifmca; |
3532 | struct ifacaddr6 *ifaca; | 3528 | struct ifacaddr6 *ifaca; |
3533 | int err = 1; | 3529 | int err = 1; |
@@ -3535,11 +3531,12 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb, | |||
3535 | 3531 | ||
3536 | read_lock_bh(&idev->lock); | 3532 | read_lock_bh(&idev->lock); |
3537 | switch (type) { | 3533 | switch (type) { |
3538 | case UNICAST_ADDR: | 3534 | case UNICAST_ADDR: { |
3535 | struct inet6_ifaddr *ifa; | ||
3536 | |||
3539 | /* unicast address incl. temp addr */ | 3537 | /* unicast address incl. temp addr */ |
3540 | for (ifa = idev->addr_list; ifa; | 3538 | list_for_each_entry(ifa, &idev->addr_list, if_list) { |
3541 | ifa = ifa->if_next, ip_idx++) { | 3539 | if (++ip_idx < s_ip_idx) |
3542 | if (ip_idx < s_ip_idx) | ||
3543 | continue; | 3540 | continue; |
3544 | err = inet6_fill_ifaddr(skb, ifa, | 3541 | err = inet6_fill_ifaddr(skb, ifa, |
3545 | NETLINK_CB(cb->skb).pid, | 3542 | NETLINK_CB(cb->skb).pid, |
@@ -3550,6 +3547,7 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb, | |||
3550 | break; | 3547 | break; |
3551 | } | 3548 | } |
3552 | break; | 3549 | break; |
3550 | } | ||
3553 | case MULTICAST_ADDR: | 3551 | case MULTICAST_ADDR: |
3554 | /* multicast address */ | 3552 | /* multicast address */ |
3555 | for (ifmca = idev->mc_list; ifmca; | 3553 | for (ifmca = idev->mc_list; ifmca; |
@@ -3614,7 +3612,8 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb, | |||
3614 | if (h > s_h || idx > s_idx) | 3612 | if (h > s_h || idx > s_idx) |
3615 | s_ip_idx = 0; | 3613 | s_ip_idx = 0; |
3616 | ip_idx = 0; | 3614 | ip_idx = 0; |
3617 | if ((idev = __in6_dev_get(dev)) == NULL) | 3615 | idev = __in6_dev_get(dev); |
3616 | if (!idev) | ||
3618 | goto cont; | 3617 | goto cont; |
3619 | 3618 | ||
3620 | if (in6_dump_addrs(idev, skb, cb, type, | 3619 | if (in6_dump_addrs(idev, skb, cb, type, |
@@ -3681,12 +3680,14 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr* nlh, | |||
3681 | if (ifm->ifa_index) | 3680 | if (ifm->ifa_index) |
3682 | dev = __dev_get_by_index(net, ifm->ifa_index); | 3681 | dev = __dev_get_by_index(net, ifm->ifa_index); |
3683 | 3682 | ||
3684 | if ((ifa = ipv6_get_ifaddr(net, addr, dev, 1)) == NULL) { | 3683 | ifa = ipv6_get_ifaddr(net, addr, dev, 1); |
3684 | if (!ifa) { | ||
3685 | err = -EADDRNOTAVAIL; | 3685 | err = -EADDRNOTAVAIL; |
3686 | goto errout; | 3686 | goto errout; |
3687 | } | 3687 | } |
3688 | 3688 | ||
3689 | if ((skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_KERNEL)) == NULL) { | 3689 | skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_KERNEL); |
3690 | if (!skb) { | ||
3690 | err = -ENOBUFS; | 3691 | err = -ENOBUFS; |
3691 | goto errout_ifa; | 3692 | goto errout_ifa; |
3692 | } | 3693 | } |
@@ -3811,7 +3812,7 @@ static inline void __snmp6_fill_stats(u64 *stats, void __percpu **mib, | |||
3811 | static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype, | 3812 | static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype, |
3812 | int bytes) | 3813 | int bytes) |
3813 | { | 3814 | { |
3814 | switch(attrtype) { | 3815 | switch (attrtype) { |
3815 | case IFLA_INET6_STATS: | 3816 | case IFLA_INET6_STATS: |
3816 | __snmp6_fill_stats(stats, (void __percpu **)idev->stats.ipv6, IPSTATS_MIB_MAX, bytes); | 3817 | __snmp6_fill_stats(stats, (void __percpu **)idev->stats.ipv6, IPSTATS_MIB_MAX, bytes); |
3817 | break; | 3818 | break; |
@@ -4047,7 +4048,8 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) | |||
4047 | addrconf_leave_anycast(ifp); | 4048 | addrconf_leave_anycast(ifp); |
4048 | addrconf_leave_solict(ifp->idev, &ifp->addr); | 4049 | addrconf_leave_solict(ifp->idev, &ifp->addr); |
4049 | dst_hold(&ifp->rt->u.dst); | 4050 | dst_hold(&ifp->rt->u.dst); |
4050 | if (ip6_del_rt(ifp->rt)) | 4051 | |
4052 | if (ifp->dead && ip6_del_rt(ifp->rt)) | ||
4051 | dst_free(&ifp->rt->u.dst); | 4053 | dst_free(&ifp->rt->u.dst); |
4052 | break; | 4054 | break; |
4053 | } | 4055 | } |
@@ -4163,211 +4165,211 @@ static struct addrconf_sysctl_table | |||
4163 | .sysctl_header = NULL, | 4165 | .sysctl_header = NULL, |
4164 | .addrconf_vars = { | 4166 | .addrconf_vars = { |
4165 | { | 4167 | { |
4166 | .procname = "forwarding", | 4168 | .procname = "forwarding", |
4167 | .data = &ipv6_devconf.forwarding, | 4169 | .data = &ipv6_devconf.forwarding, |
4168 | .maxlen = sizeof(int), | 4170 | .maxlen = sizeof(int), |
4169 | .mode = 0644, | 4171 | .mode = 0644, |
4170 | .proc_handler = addrconf_sysctl_forward, | 4172 | .proc_handler = addrconf_sysctl_forward, |
4171 | }, | 4173 | }, |
4172 | { | 4174 | { |
4173 | .procname = "hop_limit", | 4175 | .procname = "hop_limit", |
4174 | .data = &ipv6_devconf.hop_limit, | 4176 | .data = &ipv6_devconf.hop_limit, |
4175 | .maxlen = sizeof(int), | 4177 | .maxlen = sizeof(int), |
4176 | .mode = 0644, | 4178 | .mode = 0644, |
4177 | .proc_handler = proc_dointvec, | 4179 | .proc_handler = proc_dointvec, |
4178 | }, | 4180 | }, |
4179 | { | 4181 | { |
4180 | .procname = "mtu", | 4182 | .procname = "mtu", |
4181 | .data = &ipv6_devconf.mtu6, | 4183 | .data = &ipv6_devconf.mtu6, |
4182 | .maxlen = sizeof(int), | 4184 | .maxlen = sizeof(int), |
4183 | .mode = 0644, | 4185 | .mode = 0644, |
4184 | .proc_handler = proc_dointvec, | 4186 | .proc_handler = proc_dointvec, |
4185 | }, | 4187 | }, |
4186 | { | 4188 | { |
4187 | .procname = "accept_ra", | 4189 | .procname = "accept_ra", |
4188 | .data = &ipv6_devconf.accept_ra, | 4190 | .data = &ipv6_devconf.accept_ra, |
4189 | .maxlen = sizeof(int), | 4191 | .maxlen = sizeof(int), |
4190 | .mode = 0644, | 4192 | .mode = 0644, |
4191 | .proc_handler = proc_dointvec, | 4193 | .proc_handler = proc_dointvec, |
4192 | }, | 4194 | }, |
4193 | { | 4195 | { |
4194 | .procname = "accept_redirects", | 4196 | .procname = "accept_redirects", |
4195 | .data = &ipv6_devconf.accept_redirects, | 4197 | .data = &ipv6_devconf.accept_redirects, |
4196 | .maxlen = sizeof(int), | 4198 | .maxlen = sizeof(int), |
4197 | .mode = 0644, | 4199 | .mode = 0644, |
4198 | .proc_handler = proc_dointvec, | 4200 | .proc_handler = proc_dointvec, |
4199 | }, | 4201 | }, |
4200 | { | 4202 | { |
4201 | .procname = "autoconf", | 4203 | .procname = "autoconf", |
4202 | .data = &ipv6_devconf.autoconf, | 4204 | .data = &ipv6_devconf.autoconf, |
4203 | .maxlen = sizeof(int), | 4205 | .maxlen = sizeof(int), |
4204 | .mode = 0644, | 4206 | .mode = 0644, |
4205 | .proc_handler = proc_dointvec, | 4207 | .proc_handler = proc_dointvec, |
4206 | }, | 4208 | }, |
4207 | { | 4209 | { |
4208 | .procname = "dad_transmits", | 4210 | .procname = "dad_transmits", |
4209 | .data = &ipv6_devconf.dad_transmits, | 4211 | .data = &ipv6_devconf.dad_transmits, |
4210 | .maxlen = sizeof(int), | 4212 | .maxlen = sizeof(int), |
4211 | .mode = 0644, | 4213 | .mode = 0644, |
4212 | .proc_handler = proc_dointvec, | 4214 | .proc_handler = proc_dointvec, |
4213 | }, | 4215 | }, |
4214 | { | 4216 | { |
4215 | .procname = "router_solicitations", | 4217 | .procname = "router_solicitations", |
4216 | .data = &ipv6_devconf.rtr_solicits, | 4218 | .data = &ipv6_devconf.rtr_solicits, |
4217 | .maxlen = sizeof(int), | 4219 | .maxlen = sizeof(int), |
4218 | .mode = 0644, | 4220 | .mode = 0644, |
4219 | .proc_handler = proc_dointvec, | 4221 | .proc_handler = proc_dointvec, |
4220 | }, | 4222 | }, |
4221 | { | 4223 | { |
4222 | .procname = "router_solicitation_interval", | 4224 | .procname = "router_solicitation_interval", |
4223 | .data = &ipv6_devconf.rtr_solicit_interval, | 4225 | .data = &ipv6_devconf.rtr_solicit_interval, |
4224 | .maxlen = sizeof(int), | 4226 | .maxlen = sizeof(int), |
4225 | .mode = 0644, | 4227 | .mode = 0644, |
4226 | .proc_handler = proc_dointvec_jiffies, | 4228 | .proc_handler = proc_dointvec_jiffies, |
4227 | }, | 4229 | }, |
4228 | { | 4230 | { |
4229 | .procname = "router_solicitation_delay", | 4231 | .procname = "router_solicitation_delay", |
4230 | .data = &ipv6_devconf.rtr_solicit_delay, | 4232 | .data = &ipv6_devconf.rtr_solicit_delay, |
4231 | .maxlen = sizeof(int), | 4233 | .maxlen = sizeof(int), |
4232 | .mode = 0644, | 4234 | .mode = 0644, |
4233 | .proc_handler = proc_dointvec_jiffies, | 4235 | .proc_handler = proc_dointvec_jiffies, |
4234 | }, | 4236 | }, |
4235 | { | 4237 | { |
4236 | .procname = "force_mld_version", | 4238 | .procname = "force_mld_version", |
4237 | .data = &ipv6_devconf.force_mld_version, | 4239 | .data = &ipv6_devconf.force_mld_version, |
4238 | .maxlen = sizeof(int), | 4240 | .maxlen = sizeof(int), |
4239 | .mode = 0644, | 4241 | .mode = 0644, |
4240 | .proc_handler = proc_dointvec, | 4242 | .proc_handler = proc_dointvec, |
4241 | }, | 4243 | }, |
4242 | #ifdef CONFIG_IPV6_PRIVACY | 4244 | #ifdef CONFIG_IPV6_PRIVACY |
4243 | { | 4245 | { |
4244 | .procname = "use_tempaddr", | 4246 | .procname = "use_tempaddr", |
4245 | .data = &ipv6_devconf.use_tempaddr, | 4247 | .data = &ipv6_devconf.use_tempaddr, |
4246 | .maxlen = sizeof(int), | 4248 | .maxlen = sizeof(int), |
4247 | .mode = 0644, | 4249 | .mode = 0644, |
4248 | .proc_handler = proc_dointvec, | 4250 | .proc_handler = proc_dointvec, |
4249 | }, | 4251 | }, |
4250 | { | 4252 | { |
4251 | .procname = "temp_valid_lft", | 4253 | .procname = "temp_valid_lft", |
4252 | .data = &ipv6_devconf.temp_valid_lft, | 4254 | .data = &ipv6_devconf.temp_valid_lft, |
4253 | .maxlen = sizeof(int), | 4255 | .maxlen = sizeof(int), |
4254 | .mode = 0644, | 4256 | .mode = 0644, |
4255 | .proc_handler = proc_dointvec, | 4257 | .proc_handler = proc_dointvec, |
4256 | }, | 4258 | }, |
4257 | { | 4259 | { |
4258 | .procname = "temp_prefered_lft", | 4260 | .procname = "temp_prefered_lft", |
4259 | .data = &ipv6_devconf.temp_prefered_lft, | 4261 | .data = &ipv6_devconf.temp_prefered_lft, |
4260 | .maxlen = sizeof(int), | 4262 | .maxlen = sizeof(int), |
4261 | .mode = 0644, | 4263 | .mode = 0644, |
4262 | .proc_handler = proc_dointvec, | 4264 | .proc_handler = proc_dointvec, |
4263 | }, | 4265 | }, |
4264 | { | 4266 | { |
4265 | .procname = "regen_max_retry", | 4267 | .procname = "regen_max_retry", |
4266 | .data = &ipv6_devconf.regen_max_retry, | 4268 | .data = &ipv6_devconf.regen_max_retry, |
4267 | .maxlen = sizeof(int), | 4269 | .maxlen = sizeof(int), |
4268 | .mode = 0644, | 4270 | .mode = 0644, |
4269 | .proc_handler = proc_dointvec, | 4271 | .proc_handler = proc_dointvec, |
4270 | }, | 4272 | }, |
4271 | { | 4273 | { |
4272 | .procname = "max_desync_factor", | 4274 | .procname = "max_desync_factor", |
4273 | .data = &ipv6_devconf.max_desync_factor, | 4275 | .data = &ipv6_devconf.max_desync_factor, |
4274 | .maxlen = sizeof(int), | 4276 | .maxlen = sizeof(int), |
4275 | .mode = 0644, | 4277 | .mode = 0644, |
4276 | .proc_handler = proc_dointvec, | 4278 | .proc_handler = proc_dointvec, |
4277 | }, | 4279 | }, |
4278 | #endif | 4280 | #endif |
4279 | { | 4281 | { |
4280 | .procname = "max_addresses", | 4282 | .procname = "max_addresses", |
4281 | .data = &ipv6_devconf.max_addresses, | 4283 | .data = &ipv6_devconf.max_addresses, |
4282 | .maxlen = sizeof(int), | 4284 | .maxlen = sizeof(int), |
4283 | .mode = 0644, | 4285 | .mode = 0644, |
4284 | .proc_handler = proc_dointvec, | 4286 | .proc_handler = proc_dointvec, |
4285 | }, | 4287 | }, |
4286 | { | 4288 | { |
4287 | .procname = "accept_ra_defrtr", | 4289 | .procname = "accept_ra_defrtr", |
4288 | .data = &ipv6_devconf.accept_ra_defrtr, | 4290 | .data = &ipv6_devconf.accept_ra_defrtr, |
4289 | .maxlen = sizeof(int), | 4291 | .maxlen = sizeof(int), |
4290 | .mode = 0644, | 4292 | .mode = 0644, |
4291 | .proc_handler = proc_dointvec, | 4293 | .proc_handler = proc_dointvec, |
4292 | }, | 4294 | }, |
4293 | { | 4295 | { |
4294 | .procname = "accept_ra_pinfo", | 4296 | .procname = "accept_ra_pinfo", |
4295 | .data = &ipv6_devconf.accept_ra_pinfo, | 4297 | .data = &ipv6_devconf.accept_ra_pinfo, |
4296 | .maxlen = sizeof(int), | 4298 | .maxlen = sizeof(int), |
4297 | .mode = 0644, | 4299 | .mode = 0644, |
4298 | .proc_handler = proc_dointvec, | 4300 | .proc_handler = proc_dointvec, |
4299 | }, | 4301 | }, |
4300 | #ifdef CONFIG_IPV6_ROUTER_PREF | 4302 | #ifdef CONFIG_IPV6_ROUTER_PREF |
4301 | { | 4303 | { |
4302 | .procname = "accept_ra_rtr_pref", | 4304 | .procname = "accept_ra_rtr_pref", |
4303 | .data = &ipv6_devconf.accept_ra_rtr_pref, | 4305 | .data = &ipv6_devconf.accept_ra_rtr_pref, |
4304 | .maxlen = sizeof(int), | 4306 | .maxlen = sizeof(int), |
4305 | .mode = 0644, | 4307 | .mode = 0644, |
4306 | .proc_handler = proc_dointvec, | 4308 | .proc_handler = proc_dointvec, |
4307 | }, | 4309 | }, |
4308 | { | 4310 | { |
4309 | .procname = "router_probe_interval", | 4311 | .procname = "router_probe_interval", |
4310 | .data = &ipv6_devconf.rtr_probe_interval, | 4312 | .data = &ipv6_devconf.rtr_probe_interval, |
4311 | .maxlen = sizeof(int), | 4313 | .maxlen = sizeof(int), |
4312 | .mode = 0644, | 4314 | .mode = 0644, |
4313 | .proc_handler = proc_dointvec_jiffies, | 4315 | .proc_handler = proc_dointvec_jiffies, |
4314 | }, | 4316 | }, |
4315 | #ifdef CONFIG_IPV6_ROUTE_INFO | 4317 | #ifdef CONFIG_IPV6_ROUTE_INFO |
4316 | { | 4318 | { |
4317 | .procname = "accept_ra_rt_info_max_plen", | 4319 | .procname = "accept_ra_rt_info_max_plen", |
4318 | .data = &ipv6_devconf.accept_ra_rt_info_max_plen, | 4320 | .data = &ipv6_devconf.accept_ra_rt_info_max_plen, |
4319 | .maxlen = sizeof(int), | 4321 | .maxlen = sizeof(int), |
4320 | .mode = 0644, | 4322 | .mode = 0644, |
4321 | .proc_handler = proc_dointvec, | 4323 | .proc_handler = proc_dointvec, |
4322 | }, | 4324 | }, |
4323 | #endif | 4325 | #endif |
4324 | #endif | 4326 | #endif |
4325 | { | 4327 | { |
4326 | .procname = "proxy_ndp", | 4328 | .procname = "proxy_ndp", |
4327 | .data = &ipv6_devconf.proxy_ndp, | 4329 | .data = &ipv6_devconf.proxy_ndp, |
4328 | .maxlen = sizeof(int), | 4330 | .maxlen = sizeof(int), |
4329 | .mode = 0644, | 4331 | .mode = 0644, |
4330 | .proc_handler = proc_dointvec, | 4332 | .proc_handler = proc_dointvec, |
4331 | }, | 4333 | }, |
4332 | { | 4334 | { |
4333 | .procname = "accept_source_route", | 4335 | .procname = "accept_source_route", |
4334 | .data = &ipv6_devconf.accept_source_route, | 4336 | .data = &ipv6_devconf.accept_source_route, |
4335 | .maxlen = sizeof(int), | 4337 | .maxlen = sizeof(int), |
4336 | .mode = 0644, | 4338 | .mode = 0644, |
4337 | .proc_handler = proc_dointvec, | 4339 | .proc_handler = proc_dointvec, |
4338 | }, | 4340 | }, |
4339 | #ifdef CONFIG_IPV6_OPTIMISTIC_DAD | 4341 | #ifdef CONFIG_IPV6_OPTIMISTIC_DAD |
4340 | { | 4342 | { |
4341 | .procname = "optimistic_dad", | 4343 | .procname = "optimistic_dad", |
4342 | .data = &ipv6_devconf.optimistic_dad, | 4344 | .data = &ipv6_devconf.optimistic_dad, |
4343 | .maxlen = sizeof(int), | 4345 | .maxlen = sizeof(int), |
4344 | .mode = 0644, | 4346 | .mode = 0644, |
4345 | .proc_handler = proc_dointvec, | 4347 | .proc_handler = proc_dointvec, |
4346 | 4348 | ||
4347 | }, | 4349 | }, |
4348 | #endif | 4350 | #endif |
4349 | #ifdef CONFIG_IPV6_MROUTE | 4351 | #ifdef CONFIG_IPV6_MROUTE |
4350 | { | 4352 | { |
4351 | .procname = "mc_forwarding", | 4353 | .procname = "mc_forwarding", |
4352 | .data = &ipv6_devconf.mc_forwarding, | 4354 | .data = &ipv6_devconf.mc_forwarding, |
4353 | .maxlen = sizeof(int), | 4355 | .maxlen = sizeof(int), |
4354 | .mode = 0444, | 4356 | .mode = 0444, |
4355 | .proc_handler = proc_dointvec, | 4357 | .proc_handler = proc_dointvec, |
4356 | }, | 4358 | }, |
4357 | #endif | 4359 | #endif |
4358 | { | 4360 | { |
4359 | .procname = "disable_ipv6", | 4361 | .procname = "disable_ipv6", |
4360 | .data = &ipv6_devconf.disable_ipv6, | 4362 | .data = &ipv6_devconf.disable_ipv6, |
4361 | .maxlen = sizeof(int), | 4363 | .maxlen = sizeof(int), |
4362 | .mode = 0644, | 4364 | .mode = 0644, |
4363 | .proc_handler = addrconf_sysctl_disable, | 4365 | .proc_handler = addrconf_sysctl_disable, |
4364 | }, | 4366 | }, |
4365 | { | 4367 | { |
4366 | .procname = "accept_dad", | 4368 | .procname = "accept_dad", |
4367 | .data = &ipv6_devconf.accept_dad, | 4369 | .data = &ipv6_devconf.accept_dad, |
4368 | .maxlen = sizeof(int), | 4370 | .maxlen = sizeof(int), |
4369 | .mode = 0644, | 4371 | .mode = 0644, |
4370 | .proc_handler = proc_dointvec, | 4372 | .proc_handler = proc_dointvec, |
4371 | }, | 4373 | }, |
4372 | { | 4374 | { |
4373 | .procname = "force_tllao", | 4375 | .procname = "force_tllao", |
@@ -4403,8 +4405,8 @@ static int __addrconf_sysctl_register(struct net *net, char *dev_name, | |||
4403 | if (t == NULL) | 4405 | if (t == NULL) |
4404 | goto out; | 4406 | goto out; |
4405 | 4407 | ||
4406 | for (i=0; t->addrconf_vars[i].data; i++) { | 4408 | for (i = 0; t->addrconf_vars[i].data; i++) { |
4407 | t->addrconf_vars[i].data += (char*)p - (char*)&ipv6_devconf; | 4409 | t->addrconf_vars[i].data += (char *)p - (char *)&ipv6_devconf; |
4408 | t->addrconf_vars[i].extra1 = idev; /* embedded; no ref */ | 4410 | t->addrconf_vars[i].extra1 = idev; /* embedded; no ref */ |
4409 | t->addrconf_vars[i].extra2 = net; | 4411 | t->addrconf_vars[i].extra2 = net; |
4410 | } | 4412 | } |
@@ -4541,14 +4543,12 @@ int register_inet6addr_notifier(struct notifier_block *nb) | |||
4541 | { | 4543 | { |
4542 | return atomic_notifier_chain_register(&inet6addr_chain, nb); | 4544 | return atomic_notifier_chain_register(&inet6addr_chain, nb); |
4543 | } | 4545 | } |
4544 | |||
4545 | EXPORT_SYMBOL(register_inet6addr_notifier); | 4546 | EXPORT_SYMBOL(register_inet6addr_notifier); |
4546 | 4547 | ||
4547 | int unregister_inet6addr_notifier(struct notifier_block *nb) | 4548 | int unregister_inet6addr_notifier(struct notifier_block *nb) |
4548 | { | 4549 | { |
4549 | return atomic_notifier_chain_unregister(&inet6addr_chain,nb); | 4550 | return atomic_notifier_chain_unregister(&inet6addr_chain, nb); |
4550 | } | 4551 | } |
4551 | |||
4552 | EXPORT_SYMBOL(unregister_inet6addr_notifier); | 4552 | EXPORT_SYMBOL(unregister_inet6addr_notifier); |
4553 | 4553 | ||
4554 | /* | 4554 | /* |
@@ -4557,11 +4557,12 @@ EXPORT_SYMBOL(unregister_inet6addr_notifier); | |||
4557 | 4557 | ||
4558 | int __init addrconf_init(void) | 4558 | int __init addrconf_init(void) |
4559 | { | 4559 | { |
4560 | int err; | 4560 | int i, err; |
4561 | 4561 | ||
4562 | if ((err = ipv6_addr_label_init()) < 0) { | 4562 | err = ipv6_addr_label_init(); |
4563 | printk(KERN_CRIT "IPv6 Addrconf: cannot initialize default policy table: %d.\n", | 4563 | if (err < 0) { |
4564 | err); | 4564 | printk(KERN_CRIT "IPv6 Addrconf:" |
4565 | " cannot initialize default policy table: %d.\n", err); | ||
4565 | return err; | 4566 | return err; |
4566 | } | 4567 | } |
4567 | 4568 | ||
@@ -4592,6 +4593,9 @@ int __init addrconf_init(void) | |||
4592 | if (err) | 4593 | if (err) |
4593 | goto errlo; | 4594 | goto errlo; |
4594 | 4595 | ||
4596 | for (i = 0; i < IN6_ADDR_HSIZE; i++) | ||
4597 | INIT_HLIST_HEAD(&inet6_addr_lst[i]); | ||
4598 | |||
4595 | register_netdevice_notifier(&ipv6_dev_notf); | 4599 | register_netdevice_notifier(&ipv6_dev_notf); |
4596 | 4600 | ||
4597 | addrconf_verify(0); | 4601 | addrconf_verify(0); |
@@ -4620,7 +4624,6 @@ errlo: | |||
4620 | 4624 | ||
4621 | void addrconf_cleanup(void) | 4625 | void addrconf_cleanup(void) |
4622 | { | 4626 | { |
4623 | struct inet6_ifaddr *ifa; | ||
4624 | struct net_device *dev; | 4627 | struct net_device *dev; |
4625 | int i; | 4628 | int i; |
4626 | 4629 | ||
@@ -4640,20 +4643,10 @@ void addrconf_cleanup(void) | |||
4640 | /* | 4643 | /* |
4641 | * Check hash table. | 4644 | * Check hash table. |
4642 | */ | 4645 | */ |
4643 | write_lock_bh(&addrconf_hash_lock); | 4646 | spin_lock_bh(&addrconf_hash_lock); |
4644 | for (i=0; i < IN6_ADDR_HSIZE; i++) { | 4647 | for (i = 0; i < IN6_ADDR_HSIZE; i++) |
4645 | for (ifa=inet6_addr_lst[i]; ifa; ) { | 4648 | WARN_ON(!hlist_empty(&inet6_addr_lst[i])); |
4646 | struct inet6_ifaddr *bifa; | 4649 | spin_unlock_bh(&addrconf_hash_lock); |
4647 | |||
4648 | bifa = ifa; | ||
4649 | ifa = ifa->lst_next; | ||
4650 | printk(KERN_DEBUG "bug: IPv6 address leakage detected: ifa=%p\n", bifa); | ||
4651 | /* Do not free it; something is wrong. | ||
4652 | Now we can investigate it with debugger. | ||
4653 | */ | ||
4654 | } | ||
4655 | } | ||
4656 | write_unlock_bh(&addrconf_hash_lock); | ||
4657 | 4650 | ||
4658 | del_timer(&addr_chk_timer); | 4651 | del_timer(&addr_chk_timer); |
4659 | rtnl_unlock(); | 4652 | rtnl_unlock(); |
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 3192aa02ba5d..d2df3144429b 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c | |||
@@ -417,6 +417,9 @@ void inet6_destroy_sock(struct sock *sk) | |||
417 | if ((skb = xchg(&np->pktoptions, NULL)) != NULL) | 417 | if ((skb = xchg(&np->pktoptions, NULL)) != NULL) |
418 | kfree_skb(skb); | 418 | kfree_skb(skb); |
419 | 419 | ||
420 | if ((skb = xchg(&np->rxpmtu, NULL)) != NULL) | ||
421 | kfree_skb(skb); | ||
422 | |||
420 | /* Free flowlabels */ | 423 | /* Free flowlabels */ |
421 | fl6_free_socklist(sk); | 424 | fl6_free_socklist(sk); |
422 | 425 | ||
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 622dc7939a1b..5959230bc6c1 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c | |||
@@ -278,6 +278,45 @@ void ipv6_local_error(struct sock *sk, int err, struct flowi *fl, u32 info) | |||
278 | kfree_skb(skb); | 278 | kfree_skb(skb); |
279 | } | 279 | } |
280 | 280 | ||
281 | void ipv6_local_rxpmtu(struct sock *sk, struct flowi *fl, u32 mtu) | ||
282 | { | ||
283 | struct ipv6_pinfo *np = inet6_sk(sk); | ||
284 | struct ipv6hdr *iph; | ||
285 | struct sk_buff *skb; | ||
286 | struct ip6_mtuinfo *mtu_info; | ||
287 | |||
288 | if (!np->rxopt.bits.rxpmtu) | ||
289 | return; | ||
290 | |||
291 | skb = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC); | ||
292 | if (!skb) | ||
293 | return; | ||
294 | |||
295 | skb_put(skb, sizeof(struct ipv6hdr)); | ||
296 | skb_reset_network_header(skb); | ||
297 | iph = ipv6_hdr(skb); | ||
298 | ipv6_addr_copy(&iph->daddr, &fl->fl6_dst); | ||
299 | |||
300 | mtu_info = IP6CBMTU(skb); | ||
301 | if (!mtu_info) { | ||
302 | kfree_skb(skb); | ||
303 | return; | ||
304 | } | ||
305 | |||
306 | mtu_info->ip6m_mtu = mtu; | ||
307 | mtu_info->ip6m_addr.sin6_family = AF_INET6; | ||
308 | mtu_info->ip6m_addr.sin6_port = 0; | ||
309 | mtu_info->ip6m_addr.sin6_flowinfo = 0; | ||
310 | mtu_info->ip6m_addr.sin6_scope_id = fl->oif; | ||
311 | ipv6_addr_copy(&mtu_info->ip6m_addr.sin6_addr, &ipv6_hdr(skb)->daddr); | ||
312 | |||
313 | __skb_pull(skb, skb_tail_pointer(skb) - skb->data); | ||
314 | skb_reset_transport_header(skb); | ||
315 | |||
316 | skb = xchg(&np->rxpmtu, skb); | ||
317 | kfree_skb(skb); | ||
318 | } | ||
319 | |||
281 | /* | 320 | /* |
282 | * Handle MSG_ERRQUEUE | 321 | * Handle MSG_ERRQUEUE |
283 | */ | 322 | */ |
@@ -381,6 +420,54 @@ out: | |||
381 | return err; | 420 | return err; |
382 | } | 421 | } |
383 | 422 | ||
423 | /* | ||
424 | * Handle IPV6_RECVPATHMTU | ||
425 | */ | ||
426 | int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len) | ||
427 | { | ||
428 | struct ipv6_pinfo *np = inet6_sk(sk); | ||
429 | struct sk_buff *skb; | ||
430 | struct sockaddr_in6 *sin; | ||
431 | struct ip6_mtuinfo mtu_info; | ||
432 | int err; | ||
433 | int copied; | ||
434 | |||
435 | err = -EAGAIN; | ||
436 | skb = xchg(&np->rxpmtu, NULL); | ||
437 | if (skb == NULL) | ||
438 | goto out; | ||
439 | |||
440 | copied = skb->len; | ||
441 | if (copied > len) { | ||
442 | msg->msg_flags |= MSG_TRUNC; | ||
443 | copied = len; | ||
444 | } | ||
445 | err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); | ||
446 | if (err) | ||
447 | goto out_free_skb; | ||
448 | |||
449 | sock_recv_timestamp(msg, sk, skb); | ||
450 | |||
451 | memcpy(&mtu_info, IP6CBMTU(skb), sizeof(mtu_info)); | ||
452 | |||
453 | sin = (struct sockaddr_in6 *)msg->msg_name; | ||
454 | if (sin) { | ||
455 | sin->sin6_family = AF_INET6; | ||
456 | sin->sin6_flowinfo = 0; | ||
457 | sin->sin6_port = 0; | ||
458 | sin->sin6_scope_id = mtu_info.ip6m_addr.sin6_scope_id; | ||
459 | ipv6_addr_copy(&sin->sin6_addr, &mtu_info.ip6m_addr.sin6_addr); | ||
460 | } | ||
461 | |||
462 | put_cmsg(msg, SOL_IPV6, IPV6_PATHMTU, sizeof(mtu_info), &mtu_info); | ||
463 | |||
464 | err = copied; | ||
465 | |||
466 | out_free_skb: | ||
467 | kfree_skb(skb); | ||
468 | out: | ||
469 | return err; | ||
470 | } | ||
384 | 471 | ||
385 | 472 | ||
386 | int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) | 473 | int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) |
@@ -497,7 +584,7 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) | |||
497 | int datagram_send_ctl(struct net *net, | 584 | int datagram_send_ctl(struct net *net, |
498 | struct msghdr *msg, struct flowi *fl, | 585 | struct msghdr *msg, struct flowi *fl, |
499 | struct ipv6_txoptions *opt, | 586 | struct ipv6_txoptions *opt, |
500 | int *hlimit, int *tclass) | 587 | int *hlimit, int *tclass, int *dontfrag) |
501 | { | 588 | { |
502 | struct in6_pktinfo *src_info; | 589 | struct in6_pktinfo *src_info; |
503 | struct cmsghdr *cmsg; | 590 | struct cmsghdr *cmsg; |
@@ -737,6 +824,25 @@ int datagram_send_ctl(struct net *net, | |||
737 | 824 | ||
738 | break; | 825 | break; |
739 | } | 826 | } |
827 | |||
828 | case IPV6_DONTFRAG: | ||
829 | { | ||
830 | int df; | ||
831 | |||
832 | err = -EINVAL; | ||
833 | if (cmsg->cmsg_len != CMSG_LEN(sizeof(int))) { | ||
834 | goto exit_f; | ||
835 | } | ||
836 | |||
837 | df = *(int *)CMSG_DATA(cmsg); | ||
838 | if (df < 0 || df > 1) | ||
839 | goto exit_f; | ||
840 | |||
841 | err = 0; | ||
842 | *dontfrag = df; | ||
843 | |||
844 | break; | ||
845 | } | ||
740 | default: | 846 | default: |
741 | LIMIT_NETDEBUG(KERN_DEBUG "invalid cmsg type: %d\n", | 847 | LIMIT_NETDEBUG(KERN_DEBUG "invalid cmsg type: %d\n", |
742 | cmsg->cmsg_type); | 848 | cmsg->cmsg_type); |
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index 5e463c43fcc2..8124f16f2ac2 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c | |||
@@ -208,7 +208,6 @@ static int fib6_rule_fill(struct fib_rule *rule, struct sk_buff *skb, | |||
208 | { | 208 | { |
209 | struct fib6_rule *rule6 = (struct fib6_rule *) rule; | 209 | struct fib6_rule *rule6 = (struct fib6_rule *) rule; |
210 | 210 | ||
211 | frh->family = AF_INET6; | ||
212 | frh->dst_len = rule6->dst.plen; | 211 | frh->dst_len = rule6->dst.plen; |
213 | frh->src_len = rule6->src.plen; | 212 | frh->src_len = rule6->src.plen; |
214 | frh->tos = rule6->tclass; | 213 | frh->tos = rule6->tclass; |
@@ -239,7 +238,7 @@ static size_t fib6_rule_nlmsg_payload(struct fib_rule *rule) | |||
239 | } | 238 | } |
240 | 239 | ||
241 | static struct fib_rules_ops fib6_rules_ops_template = { | 240 | static struct fib_rules_ops fib6_rules_ops_template = { |
242 | .family = AF_INET6, | 241 | .family = FIB_RULES_IPV6, |
243 | .rule_size = sizeof(struct fib6_rule), | 242 | .rule_size = sizeof(struct fib6_rule), |
244 | .addr_size = sizeof(struct in6_addr), | 243 | .addr_size = sizeof(struct in6_addr), |
245 | .action = fib6_rule_action, | 244 | .action = fib6_rule_action, |
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 3330a4bd6157..ce7992982557 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c | |||
@@ -481,8 +481,9 @@ route_done: | |||
481 | len + sizeof(struct icmp6hdr), | 481 | len + sizeof(struct icmp6hdr), |
482 | sizeof(struct icmp6hdr), hlimit, | 482 | sizeof(struct icmp6hdr), hlimit, |
483 | np->tclass, NULL, &fl, (struct rt6_info*)dst, | 483 | np->tclass, NULL, &fl, (struct rt6_info*)dst, |
484 | MSG_DONTWAIT); | 484 | MSG_DONTWAIT, np->dontfrag); |
485 | if (err) { | 485 | if (err) { |
486 | ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS); | ||
486 | ip6_flush_pending_frames(sk); | 487 | ip6_flush_pending_frames(sk); |
487 | goto out_put; | 488 | goto out_put; |
488 | } | 489 | } |
@@ -560,9 +561,11 @@ static void icmpv6_echo_reply(struct sk_buff *skb) | |||
560 | 561 | ||
561 | err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr), | 562 | err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr), |
562 | sizeof(struct icmp6hdr), hlimit, np->tclass, NULL, &fl, | 563 | sizeof(struct icmp6hdr), hlimit, np->tclass, NULL, &fl, |
563 | (struct rt6_info*)dst, MSG_DONTWAIT); | 564 | (struct rt6_info*)dst, MSG_DONTWAIT, |
565 | np->dontfrag); | ||
564 | 566 | ||
565 | if (err) { | 567 | if (err) { |
568 | ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS); | ||
566 | ip6_flush_pending_frames(sk); | 569 | ip6_flush_pending_frames(sk); |
567 | goto out_put; | 570 | goto out_put; |
568 | } | 571 | } |
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index 3a4d92b5a83e..9ca1efc923a1 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c | |||
@@ -183,7 +183,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie) | |||
183 | return dst; | 183 | return dst; |
184 | } | 184 | } |
185 | 185 | ||
186 | int inet6_csk_xmit(struct sk_buff *skb, int ipfragok) | 186 | int inet6_csk_xmit(struct sk_buff *skb) |
187 | { | 187 | { |
188 | struct sock *sk = skb->sk; | 188 | struct sock *sk = skb->sk; |
189 | struct inet_sock *inet = inet_sk(sk); | 189 | struct inet_sock *inet = inet_sk(sk); |
@@ -239,7 +239,7 @@ int inet6_csk_xmit(struct sk_buff *skb, int ipfragok) | |||
239 | /* Restore final destination back after routing done */ | 239 | /* Restore final destination back after routing done */ |
240 | ipv6_addr_copy(&fl.fl6_dst, &np->daddr); | 240 | ipv6_addr_copy(&fl.fl6_dst, &np->daddr); |
241 | 241 | ||
242 | return ip6_xmit(sk, skb, &fl, np->opt, 0); | 242 | return ip6_xmit(sk, skb, &fl, np->opt); |
243 | } | 243 | } |
244 | 244 | ||
245 | EXPORT_SYMBOL_GPL(inet6_csk_xmit); | 245 | EXPORT_SYMBOL_GPL(inet6_csk_xmit); |
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 6b82e02158c6..92a122b7795d 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c | |||
@@ -128,12 +128,24 @@ static __inline__ u32 fib6_new_sernum(void) | |||
128 | /* | 128 | /* |
129 | * test bit | 129 | * test bit |
130 | */ | 130 | */ |
131 | #if defined(__LITTLE_ENDIAN) | ||
132 | # define BITOP_BE32_SWIZZLE (0x1F & ~7) | ||
133 | #else | ||
134 | # define BITOP_BE32_SWIZZLE 0 | ||
135 | #endif | ||
131 | 136 | ||
132 | static __inline__ __be32 addr_bit_set(void *token, int fn_bit) | 137 | static __inline__ __be32 addr_bit_set(void *token, int fn_bit) |
133 | { | 138 | { |
134 | __be32 *addr = token; | 139 | __be32 *addr = token; |
135 | 140 | /* | |
136 | return htonl(1 << ((~fn_bit)&0x1F)) & addr[fn_bit>>5]; | 141 | * Here, |
142 | * 1 << ((~fn_bit ^ BITOP_BE32_SWIZZLE) & 0x1f) | ||
143 | * is optimized version of | ||
144 | * htonl(1 << ((~fn_bit)&0x1F)) | ||
145 | * See include/asm-generic/bitops/le.h. | ||
146 | */ | ||
147 | return (__force __be32)(1 << ((~fn_bit ^ BITOP_BE32_SWIZZLE) & 0x1f)) & | ||
148 | addr[fn_bit >> 5]; | ||
137 | } | 149 | } |
138 | 150 | ||
139 | static __inline__ struct fib6_node * node_alloc(void) | 151 | static __inline__ struct fib6_node * node_alloc(void) |
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index 14e23216eb28..13654686aeab 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c | |||
@@ -360,7 +360,8 @@ fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval, | |||
360 | msg.msg_control = (void*)(fl->opt+1); | 360 | msg.msg_control = (void*)(fl->opt+1); |
361 | flowi.oif = 0; | 361 | flowi.oif = 0; |
362 | 362 | ||
363 | err = datagram_send_ctl(net, &msg, &flowi, fl->opt, &junk, &junk); | 363 | err = datagram_send_ctl(net, &msg, &flowi, fl->opt, &junk, |
364 | &junk, &junk); | ||
364 | if (err) | 365 | if (err) |
365 | goto done; | 366 | goto done; |
366 | err = -EINVAL; | 367 | err = -EINVAL; |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 75d5ef830097..7db09c3f5289 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -181,11 +181,11 @@ int ip6_output(struct sk_buff *skb) | |||
181 | } | 181 | } |
182 | 182 | ||
183 | /* | 183 | /* |
184 | * xmit an sk_buff (used by TCP) | 184 | * xmit an sk_buff (used by TCP, SCTP and DCCP) |
185 | */ | 185 | */ |
186 | 186 | ||
187 | int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, | 187 | int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, |
188 | struct ipv6_txoptions *opt, int ipfragok) | 188 | struct ipv6_txoptions *opt) |
189 | { | 189 | { |
190 | struct net *net = sock_net(sk); | 190 | struct net *net = sock_net(sk); |
191 | struct ipv6_pinfo *np = inet6_sk(sk); | 191 | struct ipv6_pinfo *np = inet6_sk(sk); |
@@ -231,10 +231,6 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, | |||
231 | skb_reset_network_header(skb); | 231 | skb_reset_network_header(skb); |
232 | hdr = ipv6_hdr(skb); | 232 | hdr = ipv6_hdr(skb); |
233 | 233 | ||
234 | /* Allow local fragmentation. */ | ||
235 | if (ipfragok) | ||
236 | skb->local_df = 1; | ||
237 | |||
238 | /* | 234 | /* |
239 | * Fill in the IPv6 header | 235 | * Fill in the IPv6 header |
240 | */ | 236 | */ |
@@ -1109,7 +1105,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, | |||
1109 | int offset, int len, int odd, struct sk_buff *skb), | 1105 | int offset, int len, int odd, struct sk_buff *skb), |
1110 | void *from, int length, int transhdrlen, | 1106 | void *from, int length, int transhdrlen, |
1111 | int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi *fl, | 1107 | int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi *fl, |
1112 | struct rt6_info *rt, unsigned int flags) | 1108 | struct rt6_info *rt, unsigned int flags, int dontfrag) |
1113 | { | 1109 | { |
1114 | struct inet_sock *inet = inet_sk(sk); | 1110 | struct inet_sock *inet = inet_sk(sk); |
1115 | struct ipv6_pinfo *np = inet6_sk(sk); | 1111 | struct ipv6_pinfo *np = inet6_sk(sk); |
@@ -1223,15 +1219,23 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, | |||
1223 | */ | 1219 | */ |
1224 | 1220 | ||
1225 | inet->cork.length += length; | 1221 | inet->cork.length += length; |
1226 | if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) && | 1222 | if (length > mtu) { |
1227 | (rt->u.dst.dev->features & NETIF_F_UFO)) { | 1223 | int proto = sk->sk_protocol; |
1224 | if (dontfrag && (proto == IPPROTO_UDP || proto == IPPROTO_RAW)){ | ||
1225 | ipv6_local_rxpmtu(sk, fl, mtu-exthdrlen); | ||
1226 | return -EMSGSIZE; | ||
1227 | } | ||
1228 | 1228 | ||
1229 | err = ip6_ufo_append_data(sk, getfrag, from, length, hh_len, | 1229 | if (proto == IPPROTO_UDP && |
1230 | fragheaderlen, transhdrlen, mtu, | 1230 | (rt->u.dst.dev->features & NETIF_F_UFO)) { |
1231 | flags); | 1231 | |
1232 | if (err) | 1232 | err = ip6_ufo_append_data(sk, getfrag, from, length, |
1233 | goto error; | 1233 | hh_len, fragheaderlen, |
1234 | return 0; | 1234 | transhdrlen, mtu, flags); |
1235 | if (err) | ||
1236 | goto error; | ||
1237 | return 0; | ||
1238 | } | ||
1235 | } | 1239 | } |
1236 | 1240 | ||
1237 | if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) | 1241 | if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) |
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 33f60fca7aa7..bd43f0152c21 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c | |||
@@ -114,9 +114,9 @@ struct ipv6_txoptions *ipv6_update_options(struct sock *sk, | |||
114 | } | 114 | } |
115 | opt = xchg(&inet6_sk(sk)->opt, opt); | 115 | opt = xchg(&inet6_sk(sk)->opt, opt); |
116 | } else { | 116 | } else { |
117 | write_lock(&sk->sk_dst_lock); | 117 | spin_lock(&sk->sk_dst_lock); |
118 | opt = xchg(&inet6_sk(sk)->opt, opt); | 118 | opt = xchg(&inet6_sk(sk)->opt, opt); |
119 | write_unlock(&sk->sk_dst_lock); | 119 | spin_unlock(&sk->sk_dst_lock); |
120 | } | 120 | } |
121 | sk_dst_reset(sk); | 121 | sk_dst_reset(sk); |
122 | 122 | ||
@@ -337,6 +337,13 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, | |||
337 | retv = 0; | 337 | retv = 0; |
338 | break; | 338 | break; |
339 | 339 | ||
340 | case IPV6_RECVPATHMTU: | ||
341 | if (optlen < sizeof(int)) | ||
342 | goto e_inval; | ||
343 | np->rxopt.bits.rxpmtu = valbool; | ||
344 | retv = 0; | ||
345 | break; | ||
346 | |||
340 | case IPV6_HOPOPTS: | 347 | case IPV6_HOPOPTS: |
341 | case IPV6_RTHDRDSTOPTS: | 348 | case IPV6_RTHDRDSTOPTS: |
342 | case IPV6_RTHDR: | 349 | case IPV6_RTHDR: |
@@ -451,7 +458,8 @@ sticky_done: | |||
451 | msg.msg_controllen = optlen; | 458 | msg.msg_controllen = optlen; |
452 | msg.msg_control = (void*)(opt+1); | 459 | msg.msg_control = (void*)(opt+1); |
453 | 460 | ||
454 | retv = datagram_send_ctl(net, &msg, &fl, opt, &junk, &junk); | 461 | retv = datagram_send_ctl(net, &msg, &fl, opt, &junk, &junk, |
462 | &junk); | ||
455 | if (retv) | 463 | if (retv) |
456 | goto done; | 464 | goto done; |
457 | update: | 465 | update: |
@@ -767,6 +775,17 @@ pref_skip_coa: | |||
767 | 775 | ||
768 | break; | 776 | break; |
769 | } | 777 | } |
778 | case IPV6_MINHOPCOUNT: | ||
779 | if (optlen < sizeof(int)) | ||
780 | goto e_inval; | ||
781 | if (val < 0 || val > 255) | ||
782 | goto e_inval; | ||
783 | np->min_hopcount = val; | ||
784 | break; | ||
785 | case IPV6_DONTFRAG: | ||
786 | np->dontfrag = valbool; | ||
787 | retv = 0; | ||
788 | break; | ||
770 | } | 789 | } |
771 | 790 | ||
772 | release_sock(sk); | 791 | release_sock(sk); |
@@ -971,14 +990,13 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, | |||
971 | case IPV6_MTU: | 990 | case IPV6_MTU: |
972 | { | 991 | { |
973 | struct dst_entry *dst; | 992 | struct dst_entry *dst; |
993 | |||
974 | val = 0; | 994 | val = 0; |
975 | lock_sock(sk); | 995 | rcu_read_lock(); |
976 | dst = sk_dst_get(sk); | 996 | dst = __sk_dst_get(sk); |
977 | if (dst) { | 997 | if (dst) |
978 | val = dst_mtu(dst); | 998 | val = dst_mtu(dst); |
979 | dst_release(dst); | 999 | rcu_read_unlock(); |
980 | } | ||
981 | release_sock(sk); | ||
982 | if (!val) | 1000 | if (!val) |
983 | return -ENOTCONN; | 1001 | return -ENOTCONN; |
984 | break; | 1002 | break; |
@@ -1056,6 +1074,38 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, | |||
1056 | val = np->rxopt.bits.rxflow; | 1074 | val = np->rxopt.bits.rxflow; |
1057 | break; | 1075 | break; |
1058 | 1076 | ||
1077 | case IPV6_RECVPATHMTU: | ||
1078 | val = np->rxopt.bits.rxpmtu; | ||
1079 | break; | ||
1080 | |||
1081 | case IPV6_PATHMTU: | ||
1082 | { | ||
1083 | struct dst_entry *dst; | ||
1084 | struct ip6_mtuinfo mtuinfo; | ||
1085 | |||
1086 | if (len < sizeof(mtuinfo)) | ||
1087 | return -EINVAL; | ||
1088 | |||
1089 | len = sizeof(mtuinfo); | ||
1090 | memset(&mtuinfo, 0, sizeof(mtuinfo)); | ||
1091 | |||
1092 | rcu_read_lock(); | ||
1093 | dst = __sk_dst_get(sk); | ||
1094 | if (dst) | ||
1095 | mtuinfo.ip6m_mtu = dst_mtu(dst); | ||
1096 | rcu_read_unlock(); | ||
1097 | if (!mtuinfo.ip6m_mtu) | ||
1098 | return -ENOTCONN; | ||
1099 | |||
1100 | if (put_user(len, optlen)) | ||
1101 | return -EFAULT; | ||
1102 | if (copy_to_user(optval, &mtuinfo, len)) | ||
1103 | return -EFAULT; | ||
1104 | |||
1105 | return 0; | ||
1106 | break; | ||
1107 | } | ||
1108 | |||
1059 | case IPV6_UNICAST_HOPS: | 1109 | case IPV6_UNICAST_HOPS: |
1060 | case IPV6_MULTICAST_HOPS: | 1110 | case IPV6_MULTICAST_HOPS: |
1061 | { | 1111 | { |
@@ -1066,12 +1116,14 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, | |||
1066 | else | 1116 | else |
1067 | val = np->mcast_hops; | 1117 | val = np->mcast_hops; |
1068 | 1118 | ||
1069 | dst = sk_dst_get(sk); | 1119 | if (val < 0) { |
1070 | if (dst) { | 1120 | rcu_read_lock(); |
1071 | if (val < 0) | 1121 | dst = __sk_dst_get(sk); |
1122 | if (dst) | ||
1072 | val = ip6_dst_hoplimit(dst); | 1123 | val = ip6_dst_hoplimit(dst); |
1073 | dst_release(dst); | 1124 | rcu_read_unlock(); |
1074 | } | 1125 | } |
1126 | |||
1075 | if (val < 0) | 1127 | if (val < 0) |
1076 | val = sock_net(sk)->ipv6.devconf_all->hop_limit; | 1128 | val = sock_net(sk)->ipv6.devconf_all->hop_limit; |
1077 | break; | 1129 | break; |
@@ -1115,6 +1167,14 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, | |||
1115 | val |= IPV6_PREFER_SRC_HOME; | 1167 | val |= IPV6_PREFER_SRC_HOME; |
1116 | break; | 1168 | break; |
1117 | 1169 | ||
1170 | case IPV6_MINHOPCOUNT: | ||
1171 | val = np->min_hopcount; | ||
1172 | break; | ||
1173 | |||
1174 | case IPV6_DONTFRAG: | ||
1175 | val = np->dontfrag; | ||
1176 | break; | ||
1177 | |||
1118 | default: | 1178 | default: |
1119 | return -ENOPROTOOPT; | 1179 | return -ENOPROTOOPT; |
1120 | } | 1180 | } |
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index c483ab9fd67b..006aee683a0f 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c | |||
@@ -44,6 +44,7 @@ | |||
44 | #include <linux/proc_fs.h> | 44 | #include <linux/proc_fs.h> |
45 | #include <linux/seq_file.h> | 45 | #include <linux/seq_file.h> |
46 | #include <linux/slab.h> | 46 | #include <linux/slab.h> |
47 | #include <net/mld.h> | ||
47 | 48 | ||
48 | #include <linux/netfilter.h> | 49 | #include <linux/netfilter.h> |
49 | #include <linux/netfilter_ipv6.h> | 50 | #include <linux/netfilter_ipv6.h> |
@@ -71,54 +72,11 @@ | |||
71 | #define MDBG(x) | 72 | #define MDBG(x) |
72 | #endif | 73 | #endif |
73 | 74 | ||
74 | /* | 75 | /* Ensure that we have struct in6_addr aligned on 32bit word. */ |
75 | * These header formats should be in a separate include file, but icmpv6.h | 76 | static void *__mld2_query_bugs[] __attribute__((__unused__)) = { |
76 | * doesn't have in6_addr defined in all cases, there is no __u128, and no | 77 | BUILD_BUG_ON_NULL(offsetof(struct mld2_query, mld2q_srcs) % 4), |
77 | * other files reference these. | 78 | BUILD_BUG_ON_NULL(offsetof(struct mld2_report, mld2r_grec) % 4), |
78 | * | 79 | BUILD_BUG_ON_NULL(offsetof(struct mld2_grec, grec_mca) % 4) |
79 | * +-DLS 4/14/03 | ||
80 | */ | ||
81 | |||
82 | /* Multicast Listener Discovery version 2 headers */ | ||
83 | |||
84 | struct mld2_grec { | ||
85 | __u8 grec_type; | ||
86 | __u8 grec_auxwords; | ||
87 | __be16 grec_nsrcs; | ||
88 | struct in6_addr grec_mca; | ||
89 | struct in6_addr grec_src[0]; | ||
90 | }; | ||
91 | |||
92 | struct mld2_report { | ||
93 | __u8 type; | ||
94 | __u8 resv1; | ||
95 | __sum16 csum; | ||
96 | __be16 resv2; | ||
97 | __be16 ngrec; | ||
98 | struct mld2_grec grec[0]; | ||
99 | }; | ||
100 | |||
101 | struct mld2_query { | ||
102 | __u8 type; | ||
103 | __u8 code; | ||
104 | __sum16 csum; | ||
105 | __be16 mrc; | ||
106 | __be16 resv1; | ||
107 | struct in6_addr mca; | ||
108 | #if defined(__LITTLE_ENDIAN_BITFIELD) | ||
109 | __u8 qrv:3, | ||
110 | suppress:1, | ||
111 | resv2:4; | ||
112 | #elif defined(__BIG_ENDIAN_BITFIELD) | ||
113 | __u8 resv2:4, | ||
114 | suppress:1, | ||
115 | qrv:3; | ||
116 | #else | ||
117 | #error "Please fix <asm/byteorder.h>" | ||
118 | #endif | ||
119 | __u8 qqic; | ||
120 | __be16 nsrcs; | ||
121 | struct in6_addr srcs[0]; | ||
122 | }; | 80 | }; |
123 | 81 | ||
124 | static struct in6_addr mld2_all_mcr = MLD2_ALL_MCR_INIT; | 82 | static struct in6_addr mld2_all_mcr = MLD2_ALL_MCR_INIT; |
@@ -157,14 +115,6 @@ static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml, | |||
157 | ((idev)->mc_v1_seen && \ | 115 | ((idev)->mc_v1_seen && \ |
158 | time_before(jiffies, (idev)->mc_v1_seen))) | 116 | time_before(jiffies, (idev)->mc_v1_seen))) |
159 | 117 | ||
160 | #define MLDV2_MASK(value, nb) ((nb)>=32 ? (value) : ((1<<(nb))-1) & (value)) | ||
161 | #define MLDV2_EXP(thresh, nbmant, nbexp, value) \ | ||
162 | ((value) < (thresh) ? (value) : \ | ||
163 | ((MLDV2_MASK(value, nbmant) | (1<<(nbmant))) << \ | ||
164 | (MLDV2_MASK((value) >> (nbmant), nbexp) + (nbexp)))) | ||
165 | |||
166 | #define MLDV2_MRC(value) MLDV2_EXP(0x8000, 12, 3, value) | ||
167 | |||
168 | #define IPV6_MLD_MAX_MSF 64 | 118 | #define IPV6_MLD_MAX_MSF 64 |
169 | 119 | ||
170 | int sysctl_mld_max_msf __read_mostly = IPV6_MLD_MAX_MSF; | 120 | int sysctl_mld_max_msf __read_mostly = IPV6_MLD_MAX_MSF; |
@@ -715,7 +665,7 @@ static void igmp6_group_added(struct ifmcaddr6 *mc) | |||
715 | if (!(mc->mca_flags&MAF_LOADED)) { | 665 | if (!(mc->mca_flags&MAF_LOADED)) { |
716 | mc->mca_flags |= MAF_LOADED; | 666 | mc->mca_flags |= MAF_LOADED; |
717 | if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0) | 667 | if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0) |
718 | dev_mc_add(dev, buf, dev->addr_len, 0); | 668 | dev_mc_add(dev, buf); |
719 | } | 669 | } |
720 | spin_unlock_bh(&mc->mca_lock); | 670 | spin_unlock_bh(&mc->mca_lock); |
721 | 671 | ||
@@ -741,7 +691,7 @@ static void igmp6_group_dropped(struct ifmcaddr6 *mc) | |||
741 | if (mc->mca_flags&MAF_LOADED) { | 691 | if (mc->mca_flags&MAF_LOADED) { |
742 | mc->mca_flags &= ~MAF_LOADED; | 692 | mc->mca_flags &= ~MAF_LOADED; |
743 | if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0) | 693 | if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0) |
744 | dev_mc_delete(dev, buf, dev->addr_len, 0); | 694 | dev_mc_del(dev, buf); |
745 | } | 695 | } |
746 | 696 | ||
747 | if (mc->mca_flags & MAF_NOREPORT) | 697 | if (mc->mca_flags & MAF_NOREPORT) |
@@ -1161,7 +1111,7 @@ int igmp6_event_query(struct sk_buff *skb) | |||
1161 | struct in6_addr *group; | 1111 | struct in6_addr *group; |
1162 | unsigned long max_delay; | 1112 | unsigned long max_delay; |
1163 | struct inet6_dev *idev; | 1113 | struct inet6_dev *idev; |
1164 | struct icmp6hdr *hdr; | 1114 | struct mld_msg *mld; |
1165 | int group_type; | 1115 | int group_type; |
1166 | int mark = 0; | 1116 | int mark = 0; |
1167 | int len; | 1117 | int len; |
@@ -1182,8 +1132,8 @@ int igmp6_event_query(struct sk_buff *skb) | |||
1182 | if (idev == NULL) | 1132 | if (idev == NULL) |
1183 | return 0; | 1133 | return 0; |
1184 | 1134 | ||
1185 | hdr = icmp6_hdr(skb); | 1135 | mld = (struct mld_msg *)icmp6_hdr(skb); |
1186 | group = (struct in6_addr *) (hdr + 1); | 1136 | group = &mld->mld_mca; |
1187 | group_type = ipv6_addr_type(group); | 1137 | group_type = ipv6_addr_type(group); |
1188 | 1138 | ||
1189 | if (group_type != IPV6_ADDR_ANY && | 1139 | if (group_type != IPV6_ADDR_ANY && |
@@ -1197,7 +1147,7 @@ int igmp6_event_query(struct sk_buff *skb) | |||
1197 | /* MLDv1 router present */ | 1147 | /* MLDv1 router present */ |
1198 | 1148 | ||
1199 | /* Translate milliseconds to jiffies */ | 1149 | /* Translate milliseconds to jiffies */ |
1200 | max_delay = (ntohs(hdr->icmp6_maxdelay)*HZ)/1000; | 1150 | max_delay = (ntohs(mld->mld_maxdelay)*HZ)/1000; |
1201 | 1151 | ||
1202 | switchback = (idev->mc_qrv + 1) * max_delay; | 1152 | switchback = (idev->mc_qrv + 1) * max_delay; |
1203 | idev->mc_v1_seen = jiffies + switchback; | 1153 | idev->mc_v1_seen = jiffies + switchback; |
@@ -1216,14 +1166,14 @@ int igmp6_event_query(struct sk_buff *skb) | |||
1216 | return -EINVAL; | 1166 | return -EINVAL; |
1217 | } | 1167 | } |
1218 | mlh2 = (struct mld2_query *)skb_transport_header(skb); | 1168 | mlh2 = (struct mld2_query *)skb_transport_header(skb); |
1219 | max_delay = (MLDV2_MRC(ntohs(mlh2->mrc))*HZ)/1000; | 1169 | max_delay = (MLDV2_MRC(ntohs(mlh2->mld2q_mrc))*HZ)/1000; |
1220 | if (!max_delay) | 1170 | if (!max_delay) |
1221 | max_delay = 1; | 1171 | max_delay = 1; |
1222 | idev->mc_maxdelay = max_delay; | 1172 | idev->mc_maxdelay = max_delay; |
1223 | if (mlh2->qrv) | 1173 | if (mlh2->mld2q_qrv) |
1224 | idev->mc_qrv = mlh2->qrv; | 1174 | idev->mc_qrv = mlh2->mld2q_qrv; |
1225 | if (group_type == IPV6_ADDR_ANY) { /* general query */ | 1175 | if (group_type == IPV6_ADDR_ANY) { /* general query */ |
1226 | if (mlh2->nsrcs) { | 1176 | if (mlh2->mld2q_nsrcs) { |
1227 | in6_dev_put(idev); | 1177 | in6_dev_put(idev); |
1228 | return -EINVAL; /* no sources allowed */ | 1178 | return -EINVAL; /* no sources allowed */ |
1229 | } | 1179 | } |
@@ -1232,9 +1182,9 @@ int igmp6_event_query(struct sk_buff *skb) | |||
1232 | return 0; | 1182 | return 0; |
1233 | } | 1183 | } |
1234 | /* mark sources to include, if group & source-specific */ | 1184 | /* mark sources to include, if group & source-specific */ |
1235 | if (mlh2->nsrcs != 0) { | 1185 | if (mlh2->mld2q_nsrcs != 0) { |
1236 | if (!pskb_may_pull(skb, srcs_offset + | 1186 | if (!pskb_may_pull(skb, srcs_offset + |
1237 | ntohs(mlh2->nsrcs) * sizeof(struct in6_addr))) { | 1187 | ntohs(mlh2->mld2q_nsrcs) * sizeof(struct in6_addr))) { |
1238 | in6_dev_put(idev); | 1188 | in6_dev_put(idev); |
1239 | return -EINVAL; | 1189 | return -EINVAL; |
1240 | } | 1190 | } |
@@ -1270,7 +1220,7 @@ int igmp6_event_query(struct sk_buff *skb) | |||
1270 | ma->mca_flags &= ~MAF_GSQUERY; | 1220 | ma->mca_flags &= ~MAF_GSQUERY; |
1271 | } | 1221 | } |
1272 | if (!(ma->mca_flags & MAF_GSQUERY) || | 1222 | if (!(ma->mca_flags & MAF_GSQUERY) || |
1273 | mld_marksources(ma, ntohs(mlh2->nsrcs), mlh2->srcs)) | 1223 | mld_marksources(ma, ntohs(mlh2->mld2q_nsrcs), mlh2->mld2q_srcs)) |
1274 | igmp6_group_queried(ma, max_delay); | 1224 | igmp6_group_queried(ma, max_delay); |
1275 | spin_unlock_bh(&ma->mca_lock); | 1225 | spin_unlock_bh(&ma->mca_lock); |
1276 | break; | 1226 | break; |
@@ -1286,9 +1236,8 @@ int igmp6_event_query(struct sk_buff *skb) | |||
1286 | int igmp6_event_report(struct sk_buff *skb) | 1236 | int igmp6_event_report(struct sk_buff *skb) |
1287 | { | 1237 | { |
1288 | struct ifmcaddr6 *ma; | 1238 | struct ifmcaddr6 *ma; |
1289 | struct in6_addr *addrp; | ||
1290 | struct inet6_dev *idev; | 1239 | struct inet6_dev *idev; |
1291 | struct icmp6hdr *hdr; | 1240 | struct mld_msg *mld; |
1292 | int addr_type; | 1241 | int addr_type; |
1293 | 1242 | ||
1294 | /* Our own report looped back. Ignore it. */ | 1243 | /* Our own report looped back. Ignore it. */ |
@@ -1300,10 +1249,10 @@ int igmp6_event_report(struct sk_buff *skb) | |||
1300 | skb->pkt_type != PACKET_BROADCAST) | 1249 | skb->pkt_type != PACKET_BROADCAST) |
1301 | return 0; | 1250 | return 0; |
1302 | 1251 | ||
1303 | if (!pskb_may_pull(skb, sizeof(struct in6_addr))) | 1252 | if (!pskb_may_pull(skb, sizeof(*mld) - sizeof(struct icmp6hdr))) |
1304 | return -EINVAL; | 1253 | return -EINVAL; |
1305 | 1254 | ||
1306 | hdr = icmp6_hdr(skb); | 1255 | mld = (struct mld_msg *)icmp6_hdr(skb); |
1307 | 1256 | ||
1308 | /* Drop reports with not link local source */ | 1257 | /* Drop reports with not link local source */ |
1309 | addr_type = ipv6_addr_type(&ipv6_hdr(skb)->saddr); | 1258 | addr_type = ipv6_addr_type(&ipv6_hdr(skb)->saddr); |
@@ -1311,8 +1260,6 @@ int igmp6_event_report(struct sk_buff *skb) | |||
1311 | !(addr_type&IPV6_ADDR_LINKLOCAL)) | 1260 | !(addr_type&IPV6_ADDR_LINKLOCAL)) |
1312 | return -EINVAL; | 1261 | return -EINVAL; |
1313 | 1262 | ||
1314 | addrp = (struct in6_addr *) (hdr + 1); | ||
1315 | |||
1316 | idev = in6_dev_get(skb->dev); | 1263 | idev = in6_dev_get(skb->dev); |
1317 | if (idev == NULL) | 1264 | if (idev == NULL) |
1318 | return -ENODEV; | 1265 | return -ENODEV; |
@@ -1323,7 +1270,7 @@ int igmp6_event_report(struct sk_buff *skb) | |||
1323 | 1270 | ||
1324 | read_lock_bh(&idev->lock); | 1271 | read_lock_bh(&idev->lock); |
1325 | for (ma = idev->mc_list; ma; ma=ma->next) { | 1272 | for (ma = idev->mc_list; ma; ma=ma->next) { |
1326 | if (ipv6_addr_equal(&ma->mca_addr, addrp)) { | 1273 | if (ipv6_addr_equal(&ma->mca_addr, &mld->mld_mca)) { |
1327 | spin_lock(&ma->mca_lock); | 1274 | spin_lock(&ma->mca_lock); |
1328 | if (del_timer(&ma->mca_timer)) | 1275 | if (del_timer(&ma->mca_timer)) |
1329 | atomic_dec(&ma->mca_refcnt); | 1276 | atomic_dec(&ma->mca_refcnt); |
@@ -1432,11 +1379,11 @@ static struct sk_buff *mld_newpack(struct net_device *dev, int size) | |||
1432 | skb_set_transport_header(skb, skb_tail_pointer(skb) - skb->data); | 1379 | skb_set_transport_header(skb, skb_tail_pointer(skb) - skb->data); |
1433 | skb_put(skb, sizeof(*pmr)); | 1380 | skb_put(skb, sizeof(*pmr)); |
1434 | pmr = (struct mld2_report *)skb_transport_header(skb); | 1381 | pmr = (struct mld2_report *)skb_transport_header(skb); |
1435 | pmr->type = ICMPV6_MLD2_REPORT; | 1382 | pmr->mld2r_type = ICMPV6_MLD2_REPORT; |
1436 | pmr->resv1 = 0; | 1383 | pmr->mld2r_resv1 = 0; |
1437 | pmr->csum = 0; | 1384 | pmr->mld2r_cksum = 0; |
1438 | pmr->resv2 = 0; | 1385 | pmr->mld2r_resv2 = 0; |
1439 | pmr->ngrec = 0; | 1386 | pmr->mld2r_ngrec = 0; |
1440 | return skb; | 1387 | return skb; |
1441 | } | 1388 | } |
1442 | 1389 | ||
@@ -1458,9 +1405,10 @@ static void mld_sendpack(struct sk_buff *skb) | |||
1458 | mldlen = skb->tail - skb->transport_header; | 1405 | mldlen = skb->tail - skb->transport_header; |
1459 | pip6->payload_len = htons(payload_len); | 1406 | pip6->payload_len = htons(payload_len); |
1460 | 1407 | ||
1461 | pmr->csum = csum_ipv6_magic(&pip6->saddr, &pip6->daddr, mldlen, | 1408 | pmr->mld2r_cksum = csum_ipv6_magic(&pip6->saddr, &pip6->daddr, mldlen, |
1462 | IPPROTO_ICMPV6, csum_partial(skb_transport_header(skb), | 1409 | IPPROTO_ICMPV6, |
1463 | mldlen, 0)); | 1410 | csum_partial(skb_transport_header(skb), |
1411 | mldlen, 0)); | ||
1464 | 1412 | ||
1465 | dst = icmp6_dst_alloc(skb->dev, NULL, &ipv6_hdr(skb)->daddr); | 1413 | dst = icmp6_dst_alloc(skb->dev, NULL, &ipv6_hdr(skb)->daddr); |
1466 | 1414 | ||
@@ -1521,7 +1469,7 @@ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc, | |||
1521 | pgr->grec_nsrcs = 0; | 1469 | pgr->grec_nsrcs = 0; |
1522 | pgr->grec_mca = pmc->mca_addr; /* structure copy */ | 1470 | pgr->grec_mca = pmc->mca_addr; /* structure copy */ |
1523 | pmr = (struct mld2_report *)skb_transport_header(skb); | 1471 | pmr = (struct mld2_report *)skb_transport_header(skb); |
1524 | pmr->ngrec = htons(ntohs(pmr->ngrec)+1); | 1472 | pmr->mld2r_ngrec = htons(ntohs(pmr->mld2r_ngrec)+1); |
1525 | *ppgr = pgr; | 1473 | *ppgr = pgr; |
1526 | return skb; | 1474 | return skb; |
1527 | } | 1475 | } |
@@ -1557,7 +1505,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc, | |||
1557 | 1505 | ||
1558 | /* EX and TO_EX get a fresh packet, if needed */ | 1506 | /* EX and TO_EX get a fresh packet, if needed */ |
1559 | if (truncate) { | 1507 | if (truncate) { |
1560 | if (pmr && pmr->ngrec && | 1508 | if (pmr && pmr->mld2r_ngrec && |
1561 | AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) { | 1509 | AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) { |
1562 | if (skb) | 1510 | if (skb) |
1563 | mld_sendpack(skb); | 1511 | mld_sendpack(skb); |
@@ -1770,9 +1718,8 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type) | |||
1770 | struct sock *sk = net->ipv6.igmp_sk; | 1718 | struct sock *sk = net->ipv6.igmp_sk; |
1771 | struct inet6_dev *idev; | 1719 | struct inet6_dev *idev; |
1772 | struct sk_buff *skb; | 1720 | struct sk_buff *skb; |
1773 | struct icmp6hdr *hdr; | 1721 | struct mld_msg *hdr; |
1774 | const struct in6_addr *snd_addr, *saddr; | 1722 | const struct in6_addr *snd_addr, *saddr; |
1775 | struct in6_addr *addrp; | ||
1776 | struct in6_addr addr_buf; | 1723 | struct in6_addr addr_buf; |
1777 | int err, len, payload_len, full_len; | 1724 | int err, len, payload_len, full_len; |
1778 | u8 ra[8] = { IPPROTO_ICMPV6, 0, | 1725 | u8 ra[8] = { IPPROTO_ICMPV6, 0, |
@@ -1820,16 +1767,14 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type) | |||
1820 | 1767 | ||
1821 | memcpy(skb_put(skb, sizeof(ra)), ra, sizeof(ra)); | 1768 | memcpy(skb_put(skb, sizeof(ra)), ra, sizeof(ra)); |
1822 | 1769 | ||
1823 | hdr = (struct icmp6hdr *) skb_put(skb, sizeof(struct icmp6hdr)); | 1770 | hdr = (struct mld_msg *) skb_put(skb, sizeof(struct mld_msg)); |
1824 | memset(hdr, 0, sizeof(struct icmp6hdr)); | 1771 | memset(hdr, 0, sizeof(struct mld_msg)); |
1825 | hdr->icmp6_type = type; | 1772 | hdr->mld_type = type; |
1773 | ipv6_addr_copy(&hdr->mld_mca, addr); | ||
1826 | 1774 | ||
1827 | addrp = (struct in6_addr *) skb_put(skb, sizeof(struct in6_addr)); | 1775 | hdr->mld_cksum = csum_ipv6_magic(saddr, snd_addr, len, |
1828 | ipv6_addr_copy(addrp, addr); | 1776 | IPPROTO_ICMPV6, |
1829 | 1777 | csum_partial(hdr, len, 0)); | |
1830 | hdr->icmp6_cksum = csum_ipv6_magic(saddr, snd_addr, len, | ||
1831 | IPPROTO_ICMPV6, | ||
1832 | csum_partial(hdr, len, 0)); | ||
1833 | 1778 | ||
1834 | idev = in6_dev_get(skb->dev); | 1779 | idev = in6_dev_get(skb->dev); |
1835 | 1780 | ||
diff --git a/net/ipv6/netfilter/ip6t_hbh.c b/net/ipv6/netfilter/ip6t_hbh.c index cbe8dec9744b..e60677519e40 100644 --- a/net/ipv6/netfilter/ip6t_hbh.c +++ b/net/ipv6/netfilter/ip6t_hbh.c | |||
@@ -141,11 +141,11 @@ hbh_mt6(const struct sk_buff *skb, const struct xt_match_param *par) | |||
141 | } | 141 | } |
142 | 142 | ||
143 | /* Step to the next */ | 143 | /* Step to the next */ |
144 | pr_debug("len%04X \n", optlen); | 144 | pr_debug("len%04X\n", optlen); |
145 | 145 | ||
146 | if ((ptr > skb->len - optlen || hdrlen < optlen) && | 146 | if ((ptr > skb->len - optlen || hdrlen < optlen) && |
147 | temp < optinfo->optsnr - 1) { | 147 | temp < optinfo->optsnr - 1) { |
148 | pr_debug("new pointer is too large! \n"); | 148 | pr_debug("new pointer is too large!\n"); |
149 | break; | 149 | break; |
150 | } | 150 | } |
151 | ptr += optlen; | 151 | ptr += optlen; |
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c index 58344c0fbd13..458eabfbe130 100644 --- a/net/ipv6/proc.c +++ b/net/ipv6/proc.c | |||
@@ -97,6 +97,7 @@ static const struct snmp_mib snmp6_icmp6_list[] = { | |||
97 | SNMP_MIB_ITEM("Icmp6InMsgs", ICMP6_MIB_INMSGS), | 97 | SNMP_MIB_ITEM("Icmp6InMsgs", ICMP6_MIB_INMSGS), |
98 | SNMP_MIB_ITEM("Icmp6InErrors", ICMP6_MIB_INERRORS), | 98 | SNMP_MIB_ITEM("Icmp6InErrors", ICMP6_MIB_INERRORS), |
99 | SNMP_MIB_ITEM("Icmp6OutMsgs", ICMP6_MIB_OUTMSGS), | 99 | SNMP_MIB_ITEM("Icmp6OutMsgs", ICMP6_MIB_OUTMSGS), |
100 | SNMP_MIB_ITEM("Icmp6OutErrors", ICMP6_MIB_OUTERRORS), | ||
100 | SNMP_MIB_SENTINEL | 101 | SNMP_MIB_SENTINEL |
101 | }; | 102 | }; |
102 | 103 | ||
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 8763b1a0814a..85627386cb02 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c | |||
@@ -461,6 +461,9 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk, | |||
461 | if (flags & MSG_ERRQUEUE) | 461 | if (flags & MSG_ERRQUEUE) |
462 | return ipv6_recv_error(sk, msg, len); | 462 | return ipv6_recv_error(sk, msg, len); |
463 | 463 | ||
464 | if (np->rxpmtu && np->rxopt.bits.rxpmtu) | ||
465 | return ipv6_recv_rxpmtu(sk, msg, len); | ||
466 | |||
464 | skb = skb_recv_datagram(sk, flags, noblock, &err); | 467 | skb = skb_recv_datagram(sk, flags, noblock, &err); |
465 | if (!skb) | 468 | if (!skb) |
466 | goto out; | 469 | goto out; |
@@ -733,6 +736,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk, | |||
733 | int addr_len = msg->msg_namelen; | 736 | int addr_len = msg->msg_namelen; |
734 | int hlimit = -1; | 737 | int hlimit = -1; |
735 | int tclass = -1; | 738 | int tclass = -1; |
739 | int dontfrag = -1; | ||
736 | u16 proto; | 740 | u16 proto; |
737 | int err; | 741 | int err; |
738 | 742 | ||
@@ -811,7 +815,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk, | |||
811 | memset(opt, 0, sizeof(struct ipv6_txoptions)); | 815 | memset(opt, 0, sizeof(struct ipv6_txoptions)); |
812 | opt->tot_len = sizeof(struct ipv6_txoptions); | 816 | opt->tot_len = sizeof(struct ipv6_txoptions); |
813 | 817 | ||
814 | err = datagram_send_ctl(sock_net(sk), msg, &fl, opt, &hlimit, &tclass); | 818 | err = datagram_send_ctl(sock_net(sk), msg, &fl, opt, &hlimit, |
819 | &tclass, &dontfrag); | ||
815 | if (err < 0) { | 820 | if (err < 0) { |
816 | fl6_sock_release(flowlabel); | 821 | fl6_sock_release(flowlabel); |
817 | return err; | 822 | return err; |
@@ -880,6 +885,9 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk, | |||
880 | if (tclass < 0) | 885 | if (tclass < 0) |
881 | tclass = np->tclass; | 886 | tclass = np->tclass; |
882 | 887 | ||
888 | if (dontfrag < 0) | ||
889 | dontfrag = np->dontfrag; | ||
890 | |||
883 | if (msg->msg_flags&MSG_CONFIRM) | 891 | if (msg->msg_flags&MSG_CONFIRM) |
884 | goto do_confirm; | 892 | goto do_confirm; |
885 | 893 | ||
@@ -890,7 +898,7 @@ back_from_confirm: | |||
890 | lock_sock(sk); | 898 | lock_sock(sk); |
891 | err = ip6_append_data(sk, ip_generic_getfrag, msg->msg_iov, | 899 | err = ip6_append_data(sk, ip_generic_getfrag, msg->msg_iov, |
892 | len, 0, hlimit, tclass, opt, &fl, (struct rt6_info*)dst, | 900 | len, 0, hlimit, tclass, opt, &fl, (struct rt6_info*)dst, |
893 | msg->msg_flags); | 901 | msg->msg_flags, dontfrag); |
894 | 902 | ||
895 | if (err) | 903 | if (err) |
896 | ip6_flush_pending_frames(sk); | 904 | ip6_flush_pending_frames(sk); |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 075f540ec197..6603511e3673 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -75,6 +75,9 @@ static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, | |||
75 | struct request_sock *req); | 75 | struct request_sock *req); |
76 | 76 | ||
77 | static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); | 77 | static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); |
78 | static void __tcp_v6_send_check(struct sk_buff *skb, | ||
79 | struct in6_addr *saddr, | ||
80 | struct in6_addr *daddr); | ||
78 | 81 | ||
79 | static const struct inet_connection_sock_af_ops ipv6_mapped; | 82 | static const struct inet_connection_sock_af_ops ipv6_mapped; |
80 | static const struct inet_connection_sock_af_ops ipv6_specific; | 83 | static const struct inet_connection_sock_af_ops ipv6_specific; |
@@ -350,6 +353,11 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
350 | if (sk->sk_state == TCP_CLOSE) | 353 | if (sk->sk_state == TCP_CLOSE) |
351 | goto out; | 354 | goto out; |
352 | 355 | ||
356 | if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) { | ||
357 | NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); | ||
358 | goto out; | ||
359 | } | ||
360 | |||
353 | tp = tcp_sk(sk); | 361 | tp = tcp_sk(sk); |
354 | seq = ntohl(th->seq); | 362 | seq = ntohl(th->seq); |
355 | if (sk->sk_state != TCP_LISTEN && | 363 | if (sk->sk_state != TCP_LISTEN && |
@@ -503,14 +511,10 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req, | |||
503 | 511 | ||
504 | skb = tcp_make_synack(sk, dst, req, rvp); | 512 | skb = tcp_make_synack(sk, dst, req, rvp); |
505 | if (skb) { | 513 | if (skb) { |
506 | struct tcphdr *th = tcp_hdr(skb); | 514 | __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr); |
507 | |||
508 | th->check = tcp_v6_check(skb->len, | ||
509 | &treq->loc_addr, &treq->rmt_addr, | ||
510 | csum_partial(th, skb->len, skb->csum)); | ||
511 | 515 | ||
512 | ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr); | 516 | ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr); |
513 | err = ip6_xmit(sk, skb, &fl, opt, 0); | 517 | err = ip6_xmit(sk, skb, &fl, opt); |
514 | err = net_xmit_eval(err); | 518 | err = net_xmit_eval(err); |
515 | } | 519 | } |
516 | 520 | ||
@@ -918,22 +922,29 @@ static struct timewait_sock_ops tcp6_timewait_sock_ops = { | |||
918 | .twsk_destructor= tcp_twsk_destructor, | 922 | .twsk_destructor= tcp_twsk_destructor, |
919 | }; | 923 | }; |
920 | 924 | ||
921 | static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb) | 925 | static void __tcp_v6_send_check(struct sk_buff *skb, |
926 | struct in6_addr *saddr, struct in6_addr *daddr) | ||
922 | { | 927 | { |
923 | struct ipv6_pinfo *np = inet6_sk(sk); | ||
924 | struct tcphdr *th = tcp_hdr(skb); | 928 | struct tcphdr *th = tcp_hdr(skb); |
925 | 929 | ||
926 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 930 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
927 | th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0); | 931 | th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0); |
928 | skb->csum_start = skb_transport_header(skb) - skb->head; | 932 | skb->csum_start = skb_transport_header(skb) - skb->head; |
929 | skb->csum_offset = offsetof(struct tcphdr, check); | 933 | skb->csum_offset = offsetof(struct tcphdr, check); |
930 | } else { | 934 | } else { |
931 | th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, | 935 | th->check = tcp_v6_check(skb->len, saddr, daddr, |
932 | csum_partial(th, th->doff<<2, | 936 | csum_partial(th, th->doff << 2, |
933 | skb->csum)); | 937 | skb->csum)); |
934 | } | 938 | } |
935 | } | 939 | } |
936 | 940 | ||
941 | static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb) | ||
942 | { | ||
943 | struct ipv6_pinfo *np = inet6_sk(sk); | ||
944 | |||
945 | __tcp_v6_send_check(skb, &np->saddr, &np->daddr); | ||
946 | } | ||
947 | |||
937 | static int tcp_v6_gso_send_check(struct sk_buff *skb) | 948 | static int tcp_v6_gso_send_check(struct sk_buff *skb) |
938 | { | 949 | { |
939 | struct ipv6hdr *ipv6h; | 950 | struct ipv6hdr *ipv6h; |
@@ -946,11 +957,8 @@ static int tcp_v6_gso_send_check(struct sk_buff *skb) | |||
946 | th = tcp_hdr(skb); | 957 | th = tcp_hdr(skb); |
947 | 958 | ||
948 | th->check = 0; | 959 | th->check = 0; |
949 | th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len, | ||
950 | IPPROTO_TCP, 0); | ||
951 | skb->csum_start = skb_transport_header(skb) - skb->head; | ||
952 | skb->csum_offset = offsetof(struct tcphdr, check); | ||
953 | skb->ip_summed = CHECKSUM_PARTIAL; | 960 | skb->ip_summed = CHECKSUM_PARTIAL; |
961 | __tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr); | ||
954 | return 0; | 962 | return 0; |
955 | } | 963 | } |
956 | 964 | ||
@@ -1047,15 +1055,14 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, | |||
1047 | } | 1055 | } |
1048 | #endif | 1056 | #endif |
1049 | 1057 | ||
1050 | buff->csum = csum_partial(t1, tot_len, 0); | ||
1051 | |||
1052 | memset(&fl, 0, sizeof(fl)); | 1058 | memset(&fl, 0, sizeof(fl)); |
1053 | ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr); | 1059 | ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr); |
1054 | ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr); | 1060 | ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr); |
1055 | 1061 | ||
1056 | t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst, | 1062 | buff->ip_summed = CHECKSUM_PARTIAL; |
1057 | tot_len, IPPROTO_TCP, | 1063 | buff->csum = 0; |
1058 | buff->csum); | 1064 | |
1065 | __tcp_v6_send_check(buff, &fl.fl6_src, &fl.fl6_dst); | ||
1059 | 1066 | ||
1060 | fl.proto = IPPROTO_TCP; | 1067 | fl.proto = IPPROTO_TCP; |
1061 | fl.oif = inet6_iif(skb); | 1068 | fl.oif = inet6_iif(skb); |
@@ -1070,7 +1077,7 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, | |||
1070 | if (!ip6_dst_lookup(ctl_sk, &dst, &fl)) { | 1077 | if (!ip6_dst_lookup(ctl_sk, &dst, &fl)) { |
1071 | if (xfrm_lookup(net, &dst, &fl, NULL, 0) >= 0) { | 1078 | if (xfrm_lookup(net, &dst, &fl, NULL, 0) >= 0) { |
1072 | skb_dst_set(buff, dst); | 1079 | skb_dst_set(buff, dst); |
1073 | ip6_xmit(ctl_sk, buff, &fl, NULL, 0); | 1080 | ip6_xmit(ctl_sk, buff, &fl, NULL); |
1074 | TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); | 1081 | TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); |
1075 | if (rst) | 1082 | if (rst) |
1076 | TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS); | 1083 | TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS); |
@@ -1233,12 +1240,12 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1233 | goto drop_and_free; | 1240 | goto drop_and_free; |
1234 | 1241 | ||
1235 | /* Secret recipe starts with IP addresses */ | 1242 | /* Secret recipe starts with IP addresses */ |
1236 | d = &ipv6_hdr(skb)->daddr.s6_addr32[0]; | 1243 | d = (__force u32 *)&ipv6_hdr(skb)->daddr.s6_addr32[0]; |
1237 | *mess++ ^= *d++; | 1244 | *mess++ ^= *d++; |
1238 | *mess++ ^= *d++; | 1245 | *mess++ ^= *d++; |
1239 | *mess++ ^= *d++; | 1246 | *mess++ ^= *d++; |
1240 | *mess++ ^= *d++; | 1247 | *mess++ ^= *d++; |
1241 | d = &ipv6_hdr(skb)->saddr.s6_addr32[0]; | 1248 | d = (__force u32 *)&ipv6_hdr(skb)->saddr.s6_addr32[0]; |
1242 | *mess++ ^= *d++; | 1249 | *mess++ ^= *d++; |
1243 | *mess++ ^= *d++; | 1250 | *mess++ ^= *d++; |
1244 | *mess++ ^= *d++; | 1251 | *mess++ ^= *d++; |
@@ -1676,6 +1683,7 @@ ipv6_pktoptions: | |||
1676 | static int tcp_v6_rcv(struct sk_buff *skb) | 1683 | static int tcp_v6_rcv(struct sk_buff *skb) |
1677 | { | 1684 | { |
1678 | struct tcphdr *th; | 1685 | struct tcphdr *th; |
1686 | struct ipv6hdr *hdr; | ||
1679 | struct sock *sk; | 1687 | struct sock *sk; |
1680 | int ret; | 1688 | int ret; |
1681 | struct net *net = dev_net(skb->dev); | 1689 | struct net *net = dev_net(skb->dev); |
@@ -1702,12 +1710,13 @@ static int tcp_v6_rcv(struct sk_buff *skb) | |||
1702 | goto bad_packet; | 1710 | goto bad_packet; |
1703 | 1711 | ||
1704 | th = tcp_hdr(skb); | 1712 | th = tcp_hdr(skb); |
1713 | hdr = ipv6_hdr(skb); | ||
1705 | TCP_SKB_CB(skb)->seq = ntohl(th->seq); | 1714 | TCP_SKB_CB(skb)->seq = ntohl(th->seq); |
1706 | TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + | 1715 | TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + |
1707 | skb->len - th->doff*4); | 1716 | skb->len - th->doff*4); |
1708 | TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); | 1717 | TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); |
1709 | TCP_SKB_CB(skb)->when = 0; | 1718 | TCP_SKB_CB(skb)->when = 0; |
1710 | TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(ipv6_hdr(skb)); | 1719 | TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(hdr); |
1711 | TCP_SKB_CB(skb)->sacked = 0; | 1720 | TCP_SKB_CB(skb)->sacked = 0; |
1712 | 1721 | ||
1713 | sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest); | 1722 | sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest); |
@@ -1718,6 +1727,11 @@ process: | |||
1718 | if (sk->sk_state == TCP_TIME_WAIT) | 1727 | if (sk->sk_state == TCP_TIME_WAIT) |
1719 | goto do_time_wait; | 1728 | goto do_time_wait; |
1720 | 1729 | ||
1730 | if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) { | ||
1731 | NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); | ||
1732 | goto discard_and_relse; | ||
1733 | } | ||
1734 | |||
1721 | if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) | 1735 | if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) |
1722 | goto discard_and_relse; | 1736 | goto discard_and_relse; |
1723 | 1737 | ||
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 90824852f598..2850e35cee3d 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -91,9 +91,9 @@ static unsigned int udp6_portaddr_hash(struct net *net, | |||
91 | if (ipv6_addr_any(addr6)) | 91 | if (ipv6_addr_any(addr6)) |
92 | hash = jhash_1word(0, mix); | 92 | hash = jhash_1word(0, mix); |
93 | else if (ipv6_addr_v4mapped(addr6)) | 93 | else if (ipv6_addr_v4mapped(addr6)) |
94 | hash = jhash_1word(addr6->s6_addr32[3], mix); | 94 | hash = jhash_1word((__force u32)addr6->s6_addr32[3], mix); |
95 | else | 95 | else |
96 | hash = jhash2(addr6->s6_addr32, 4, mix); | 96 | hash = jhash2((__force u32 *)addr6->s6_addr32, 4, mix); |
97 | 97 | ||
98 | return hash ^ port; | 98 | return hash ^ port; |
99 | } | 99 | } |
@@ -335,6 +335,9 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, | |||
335 | if (flags & MSG_ERRQUEUE) | 335 | if (flags & MSG_ERRQUEUE) |
336 | return ipv6_recv_error(sk, msg, len); | 336 | return ipv6_recv_error(sk, msg, len); |
337 | 337 | ||
338 | if (np->rxpmtu && np->rxopt.bits.rxpmtu) | ||
339 | return ipv6_recv_rxpmtu(sk, msg, len); | ||
340 | |||
338 | try_again: | 341 | try_again: |
339 | skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), | 342 | skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), |
340 | &peeked, &err); | 343 | &peeked, &err); |
@@ -919,6 +922,7 @@ int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk, | |||
919 | int ulen = len; | 922 | int ulen = len; |
920 | int hlimit = -1; | 923 | int hlimit = -1; |
921 | int tclass = -1; | 924 | int tclass = -1; |
925 | int dontfrag = -1; | ||
922 | int corkreq = up->corkflag || msg->msg_flags&MSG_MORE; | 926 | int corkreq = up->corkflag || msg->msg_flags&MSG_MORE; |
923 | int err; | 927 | int err; |
924 | int connected = 0; | 928 | int connected = 0; |
@@ -1049,7 +1053,8 @@ do_udp_sendmsg: | |||
1049 | memset(opt, 0, sizeof(struct ipv6_txoptions)); | 1053 | memset(opt, 0, sizeof(struct ipv6_txoptions)); |
1050 | opt->tot_len = sizeof(*opt); | 1054 | opt->tot_len = sizeof(*opt); |
1051 | 1055 | ||
1052 | err = datagram_send_ctl(sock_net(sk), msg, &fl, opt, &hlimit, &tclass); | 1056 | err = datagram_send_ctl(sock_net(sk), msg, &fl, opt, &hlimit, |
1057 | &tclass, &dontfrag); | ||
1053 | if (err < 0) { | 1058 | if (err < 0) { |
1054 | fl6_sock_release(flowlabel); | 1059 | fl6_sock_release(flowlabel); |
1055 | return err; | 1060 | return err; |
@@ -1120,6 +1125,9 @@ do_udp_sendmsg: | |||
1120 | if (tclass < 0) | 1125 | if (tclass < 0) |
1121 | tclass = np->tclass; | 1126 | tclass = np->tclass; |
1122 | 1127 | ||
1128 | if (dontfrag < 0) | ||
1129 | dontfrag = np->dontfrag; | ||
1130 | |||
1123 | if (msg->msg_flags&MSG_CONFIRM) | 1131 | if (msg->msg_flags&MSG_CONFIRM) |
1124 | goto do_confirm; | 1132 | goto do_confirm; |
1125 | back_from_confirm: | 1133 | back_from_confirm: |
@@ -1143,7 +1151,7 @@ do_append_data: | |||
1143 | err = ip6_append_data(sk, getfrag, msg->msg_iov, ulen, | 1151 | err = ip6_append_data(sk, getfrag, msg->msg_iov, ulen, |
1144 | sizeof(struct udphdr), hlimit, tclass, opt, &fl, | 1152 | sizeof(struct udphdr), hlimit, tclass, opt, &fl, |
1145 | (struct rt6_info*)dst, | 1153 | (struct rt6_info*)dst, |
1146 | corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags); | 1154 | corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags, dontfrag); |
1147 | if (err) | 1155 | if (err) |
1148 | udp_v6_flush_pending_frames(sk); | 1156 | udp_v6_flush_pending_frames(sk); |
1149 | else if (!corkreq) | 1157 | else if (!corkreq) |
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index 00bf7c962b7e..4a0e77e14468 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c | |||
@@ -67,36 +67,6 @@ static int xfrm6_get_saddr(struct net *net, | |||
67 | return 0; | 67 | return 0; |
68 | } | 68 | } |
69 | 69 | ||
70 | static struct dst_entry * | ||
71 | __xfrm6_find_bundle(struct flowi *fl, struct xfrm_policy *policy) | ||
72 | { | ||
73 | struct dst_entry *dst; | ||
74 | |||
75 | /* Still not clear if we should set fl->fl6_{src,dst}... */ | ||
76 | read_lock_bh(&policy->lock); | ||
77 | for (dst = policy->bundles; dst; dst = dst->next) { | ||
78 | struct xfrm_dst *xdst = (struct xfrm_dst*)dst; | ||
79 | struct in6_addr fl_dst_prefix, fl_src_prefix; | ||
80 | |||
81 | ipv6_addr_prefix(&fl_dst_prefix, | ||
82 | &fl->fl6_dst, | ||
83 | xdst->u.rt6.rt6i_dst.plen); | ||
84 | ipv6_addr_prefix(&fl_src_prefix, | ||
85 | &fl->fl6_src, | ||
86 | xdst->u.rt6.rt6i_src.plen); | ||
87 | if (ipv6_addr_equal(&xdst->u.rt6.rt6i_dst.addr, &fl_dst_prefix) && | ||
88 | ipv6_addr_equal(&xdst->u.rt6.rt6i_src.addr, &fl_src_prefix) && | ||
89 | xfrm_bundle_ok(policy, xdst, fl, AF_INET6, | ||
90 | (xdst->u.rt6.rt6i_dst.plen != 128 || | ||
91 | xdst->u.rt6.rt6i_src.plen != 128))) { | ||
92 | dst_clone(dst); | ||
93 | break; | ||
94 | } | ||
95 | } | ||
96 | read_unlock_bh(&policy->lock); | ||
97 | return dst; | ||
98 | } | ||
99 | |||
100 | static int xfrm6_get_tos(struct flowi *fl) | 70 | static int xfrm6_get_tos(struct flowi *fl) |
101 | { | 71 | { |
102 | return 0; | 72 | return 0; |
@@ -291,7 +261,6 @@ static struct xfrm_policy_afinfo xfrm6_policy_afinfo = { | |||
291 | .dst_ops = &xfrm6_dst_ops, | 261 | .dst_ops = &xfrm6_dst_ops, |
292 | .dst_lookup = xfrm6_dst_lookup, | 262 | .dst_lookup = xfrm6_dst_lookup, |
293 | .get_saddr = xfrm6_get_saddr, | 263 | .get_saddr = xfrm6_get_saddr, |
294 | .find_bundle = __xfrm6_find_bundle, | ||
295 | .decode_session = _decode_session6, | 264 | .decode_session = _decode_session6, |
296 | .get_tos = xfrm6_get_tos, | 265 | .get_tos = xfrm6_get_tos, |
297 | .init_path = xfrm6_init_path, | 266 | .init_path = xfrm6_init_path, |
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index 2a4efcea3423..79986a674f6e 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c | |||
@@ -347,7 +347,7 @@ static void irda_flow_indication(void *instance, void *sap, LOCAL_FLOW flow) | |||
347 | self->tx_flow = flow; | 347 | self->tx_flow = flow; |
348 | IRDA_DEBUG(1, "%s(), IrTTP wants us to start again\n", | 348 | IRDA_DEBUG(1, "%s(), IrTTP wants us to start again\n", |
349 | __func__); | 349 | __func__); |
350 | wake_up_interruptible(sk->sk_sleep); | 350 | wake_up_interruptible(sk_sleep(sk)); |
351 | break; | 351 | break; |
352 | default: | 352 | default: |
353 | IRDA_DEBUG(0, "%s(), Unknown flow command!\n", __func__); | 353 | IRDA_DEBUG(0, "%s(), Unknown flow command!\n", __func__); |
@@ -900,7 +900,7 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags) | |||
900 | if (flags & O_NONBLOCK) | 900 | if (flags & O_NONBLOCK) |
901 | goto out; | 901 | goto out; |
902 | 902 | ||
903 | err = wait_event_interruptible(*(sk->sk_sleep), | 903 | err = wait_event_interruptible(*(sk_sleep(sk)), |
904 | skb_peek(&sk->sk_receive_queue)); | 904 | skb_peek(&sk->sk_receive_queue)); |
905 | if (err) | 905 | if (err) |
906 | goto out; | 906 | goto out; |
@@ -1066,7 +1066,7 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr, | |||
1066 | goto out; | 1066 | goto out; |
1067 | 1067 | ||
1068 | err = -ERESTARTSYS; | 1068 | err = -ERESTARTSYS; |
1069 | if (wait_event_interruptible(*(sk->sk_sleep), | 1069 | if (wait_event_interruptible(*(sk_sleep(sk)), |
1070 | (sk->sk_state != TCP_SYN_SENT))) | 1070 | (sk->sk_state != TCP_SYN_SENT))) |
1071 | goto out; | 1071 | goto out; |
1072 | 1072 | ||
@@ -1318,7 +1318,7 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
1318 | 1318 | ||
1319 | /* Check if IrTTP is wants us to slow down */ | 1319 | /* Check if IrTTP is wants us to slow down */ |
1320 | 1320 | ||
1321 | if (wait_event_interruptible(*(sk->sk_sleep), | 1321 | if (wait_event_interruptible(*(sk_sleep(sk)), |
1322 | (self->tx_flow != FLOW_STOP || sk->sk_state != TCP_ESTABLISHED))) { | 1322 | (self->tx_flow != FLOW_STOP || sk->sk_state != TCP_ESTABLISHED))) { |
1323 | err = -ERESTARTSYS; | 1323 | err = -ERESTARTSYS; |
1324 | goto out; | 1324 | goto out; |
@@ -1477,7 +1477,7 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock, | |||
1477 | if (copied >= target) | 1477 | if (copied >= target) |
1478 | break; | 1478 | break; |
1479 | 1479 | ||
1480 | prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 1480 | prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
1481 | 1481 | ||
1482 | /* | 1482 | /* |
1483 | * POSIX 1003.1g mandates this order. | 1483 | * POSIX 1003.1g mandates this order. |
@@ -1497,7 +1497,7 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock, | |||
1497 | /* Wait process until data arrives */ | 1497 | /* Wait process until data arrives */ |
1498 | schedule(); | 1498 | schedule(); |
1499 | 1499 | ||
1500 | finish_wait(sk->sk_sleep, &wait); | 1500 | finish_wait(sk_sleep(sk), &wait); |
1501 | 1501 | ||
1502 | if (err) | 1502 | if (err) |
1503 | goto out; | 1503 | goto out; |
@@ -1787,7 +1787,7 @@ static unsigned int irda_poll(struct file * file, struct socket *sock, | |||
1787 | IRDA_DEBUG(4, "%s()\n", __func__); | 1787 | IRDA_DEBUG(4, "%s()\n", __func__); |
1788 | 1788 | ||
1789 | lock_kernel(); | 1789 | lock_kernel(); |
1790 | poll_wait(file, sk->sk_sleep, wait); | 1790 | poll_wait(file, sk_sleep(sk), wait); |
1791 | mask = 0; | 1791 | mask = 0; |
1792 | 1792 | ||
1793 | /* Exceptional events? */ | 1793 | /* Exceptional events? */ |
diff --git a/net/irda/ircomm/ircomm_param.c b/net/irda/ircomm/ircomm_param.c index e2e893b474e9..8b915f3ac3b9 100644 --- a/net/irda/ircomm/ircomm_param.c +++ b/net/irda/ircomm/ircomm_param.c | |||
@@ -475,7 +475,7 @@ static int ircomm_param_dce(void *instance, irda_param_t *param, int get) | |||
475 | /* Check if any of the settings have changed */ | 475 | /* Check if any of the settings have changed */ |
476 | if (dce & 0x0f) { | 476 | if (dce & 0x0f) { |
477 | if (dce & IRCOMM_DELTA_CTS) { | 477 | if (dce & IRCOMM_DELTA_CTS) { |
478 | IRDA_DEBUG(2, "%s(), CTS \n", __func__ ); | 478 | IRDA_DEBUG(2, "%s(), CTS\n", __func__ ); |
479 | } | 479 | } |
480 | } | 480 | } |
481 | 481 | ||
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index c18286a2167b..9636b7d27b48 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c | |||
@@ -59,7 +59,7 @@ do { \ | |||
59 | DEFINE_WAIT(__wait); \ | 59 | DEFINE_WAIT(__wait); \ |
60 | long __timeo = timeo; \ | 60 | long __timeo = timeo; \ |
61 | ret = 0; \ | 61 | ret = 0; \ |
62 | prepare_to_wait(sk->sk_sleep, &__wait, TASK_INTERRUPTIBLE); \ | 62 | prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE); \ |
63 | while (!(condition)) { \ | 63 | while (!(condition)) { \ |
64 | if (!__timeo) { \ | 64 | if (!__timeo) { \ |
65 | ret = -EAGAIN; \ | 65 | ret = -EAGAIN; \ |
@@ -76,7 +76,7 @@ do { \ | |||
76 | if (ret) \ | 76 | if (ret) \ |
77 | break; \ | 77 | break; \ |
78 | } \ | 78 | } \ |
79 | finish_wait(sk->sk_sleep, &__wait); \ | 79 | finish_wait(sk_sleep(sk), &__wait); \ |
80 | } while (0) | 80 | } while (0) |
81 | 81 | ||
82 | #define iucv_sock_wait(sk, condition, timeo) \ | 82 | #define iucv_sock_wait(sk, condition, timeo) \ |
@@ -307,7 +307,7 @@ static void iucv_sock_wake_msglim(struct sock *sk) | |||
307 | { | 307 | { |
308 | read_lock(&sk->sk_callback_lock); | 308 | read_lock(&sk->sk_callback_lock); |
309 | if (sk_has_sleeper(sk)) | 309 | if (sk_has_sleeper(sk)) |
310 | wake_up_interruptible_all(sk->sk_sleep); | 310 | wake_up_interruptible_all(sk_sleep(sk)); |
311 | sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); | 311 | sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); |
312 | read_unlock(&sk->sk_callback_lock); | 312 | read_unlock(&sk->sk_callback_lock); |
313 | } | 313 | } |
@@ -795,7 +795,7 @@ static int iucv_sock_accept(struct socket *sock, struct socket *newsock, | |||
795 | timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); | 795 | timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); |
796 | 796 | ||
797 | /* Wait for an incoming connection */ | 797 | /* Wait for an incoming connection */ |
798 | add_wait_queue_exclusive(sk->sk_sleep, &wait); | 798 | add_wait_queue_exclusive(sk_sleep(sk), &wait); |
799 | while (!(nsk = iucv_accept_dequeue(sk, newsock))) { | 799 | while (!(nsk = iucv_accept_dequeue(sk, newsock))) { |
800 | set_current_state(TASK_INTERRUPTIBLE); | 800 | set_current_state(TASK_INTERRUPTIBLE); |
801 | if (!timeo) { | 801 | if (!timeo) { |
@@ -819,7 +819,7 @@ static int iucv_sock_accept(struct socket *sock, struct socket *newsock, | |||
819 | } | 819 | } |
820 | 820 | ||
821 | set_current_state(TASK_RUNNING); | 821 | set_current_state(TASK_RUNNING); |
822 | remove_wait_queue(sk->sk_sleep, &wait); | 822 | remove_wait_queue(sk_sleep(sk), &wait); |
823 | 823 | ||
824 | if (err) | 824 | if (err) |
825 | goto done; | 825 | goto done; |
@@ -1269,7 +1269,7 @@ unsigned int iucv_sock_poll(struct file *file, struct socket *sock, | |||
1269 | struct sock *sk = sock->sk; | 1269 | struct sock *sk = sock->sk; |
1270 | unsigned int mask = 0; | 1270 | unsigned int mask = 0; |
1271 | 1271 | ||
1272 | sock_poll_wait(file, sk->sk_sleep, wait); | 1272 | sock_poll_wait(file, sk_sleep(sk), wait); |
1273 | 1273 | ||
1274 | if (sk->sk_state == IUCV_LISTEN) | 1274 | if (sk->sk_state == IUCV_LISTEN) |
1275 | return iucv_accept_poll(sk); | 1275 | return iucv_accept_poll(sk); |
diff --git a/net/l2tp/Kconfig b/net/l2tp/Kconfig new file mode 100644 index 000000000000..4b1e71751e10 --- /dev/null +++ b/net/l2tp/Kconfig | |||
@@ -0,0 +1,107 @@ | |||
1 | # | ||
2 | # Layer Two Tunneling Protocol (L2TP) | ||
3 | # | ||
4 | |||
5 | menuconfig L2TP | ||
6 | tristate "Layer Two Tunneling Protocol (L2TP)" | ||
7 | depends on INET | ||
8 | ---help--- | ||
9 | Layer Two Tunneling Protocol | ||
10 | |||
11 | From RFC 2661 <http://www.ietf.org/rfc/rfc2661.txt>. | ||
12 | |||
13 | L2TP facilitates the tunneling of packets across an | ||
14 | intervening network in a way that is as transparent as | ||
15 | possible to both end-users and applications. | ||
16 | |||
17 | L2TP is often used to tunnel PPP traffic over IP | ||
18 | tunnels. One IP tunnel may carry thousands of individual PPP | ||
19 | connections. L2TP is also used as a VPN protocol, popular | ||
20 | with home workers to connect to their offices. | ||
21 | |||
22 | L2TPv3 allows other protocols as well as PPP to be carried | ||
23 | over L2TP tunnels. L2TPv3 is defined in RFC 3931 | ||
24 | <http://www.ietf.org/rfc/rfc3931.txt>. | ||
25 | |||
26 | The kernel component handles only L2TP data packets: a | ||
27 | userland daemon handles L2TP the control protocol (tunnel | ||
28 | and session setup). One such daemon is OpenL2TP | ||
29 | (http://openl2tp.org/). | ||
30 | |||
31 | If you don't need L2TP, say N. To compile all L2TP code as | ||
32 | modules, choose M here. | ||
33 | |||
34 | config L2TP_DEBUGFS | ||
35 | tristate "L2TP debugfs support" | ||
36 | depends on L2TP && DEBUG_FS | ||
37 | help | ||
38 | Support for l2tp directory in debugfs filesystem. This may be | ||
39 | used to dump internal state of the l2tp drivers for problem | ||
40 | analysis. | ||
41 | |||
42 | If unsure, say 'Y'. | ||
43 | |||
44 | To compile this driver as a module, choose M here. The module | ||
45 | will be called l2tp_debugfs. | ||
46 | |||
47 | config L2TP_V3 | ||
48 | bool "L2TPv3 support (EXPERIMENTAL)" | ||
49 | depends on EXPERIMENTAL && L2TP | ||
50 | help | ||
51 | Layer Two Tunneling Protocol Version 3 | ||
52 | |||
53 | From RFC 3931 <http://www.ietf.org/rfc/rfc3931.txt>. | ||
54 | |||
55 | The Layer Two Tunneling Protocol (L2TP) provides a dynamic | ||
56 | mechanism for tunneling Layer 2 (L2) "circuits" across a | ||
57 | packet-oriented data network (e.g., over IP). L2TP, as | ||
58 | originally defined in RFC 2661, is a standard method for | ||
59 | tunneling Point-to-Point Protocol (PPP) [RFC1661] sessions. | ||
60 | L2TP has since been adopted for tunneling a number of other | ||
61 | L2 protocols, including ATM, Frame Relay, HDLC and even raw | ||
62 | ethernet frames. | ||
63 | |||
64 | If you are connecting to L2TPv3 equipment, or you want to | ||
65 | tunnel raw ethernet frames using L2TP, say Y here. If | ||
66 | unsure, say N. | ||
67 | |||
68 | config L2TP_IP | ||
69 | tristate "L2TP IP encapsulation for L2TPv3" | ||
70 | depends on L2TP_V3 | ||
71 | help | ||
72 | Support for L2TP-over-IP socket family. | ||
73 | |||
74 | The L2TPv3 protocol defines two possible encapsulations for | ||
75 | L2TP frames, namely UDP and plain IP (without UDP). This | ||
76 | driver provides a new L2TPIP socket family with which | ||
77 | userspace L2TPv3 daemons may create L2TP/IP tunnel sockets | ||
78 | when UDP encapsulation is not required. When L2TP is carried | ||
79 | in IP packets, it used IP protocol number 115, so this port | ||
80 | must be enabled in firewalls. | ||
81 | |||
82 | To compile this driver as a module, choose M here. The module | ||
83 | will be called l2tp_ip. | ||
84 | |||
85 | config L2TP_ETH | ||
86 | tristate "L2TP ethernet pseudowire support for L2TPv3" | ||
87 | depends on L2TP_V3 | ||
88 | help | ||
89 | Support for carrying raw ethernet frames over L2TPv3. | ||
90 | |||
91 | From RFC 4719 <http://www.ietf.org/rfc/rfc4719.txt>. | ||
92 | |||
93 | The Layer 2 Tunneling Protocol, Version 3 (L2TPv3) can be | ||
94 | used as a control protocol and for data encapsulation to set | ||
95 | up Pseudowires for transporting layer 2 Packet Data Units | ||
96 | across an IP network [RFC3931]. | ||
97 | |||
98 | This driver provides an ethernet virtual interface for each | ||
99 | L2TP ethernet pseudowire instance. Standard Linux tools may | ||
100 | be used to assign an IP address to the local virtual | ||
101 | interface, or add the interface to a bridge. | ||
102 | |||
103 | If you are using L2TPv3, you will almost certainly want to | ||
104 | enable this option. | ||
105 | |||
106 | To compile this driver as a module, choose M here. The module | ||
107 | will be called l2tp_eth. | ||
diff --git a/net/l2tp/Makefile b/net/l2tp/Makefile new file mode 100644 index 000000000000..110e7bc2de5e --- /dev/null +++ b/net/l2tp/Makefile | |||
@@ -0,0 +1,12 @@ | |||
1 | # | ||
2 | # Makefile for the L2TP. | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_L2TP) += l2tp_core.o | ||
6 | |||
7 | # Build l2tp as modules if L2TP is M | ||
8 | obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_PPPOL2TP)) += l2tp_ppp.o | ||
9 | obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_IP)) += l2tp_ip.o | ||
10 | obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_V3)) += l2tp_netlink.o | ||
11 | obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_ETH)) += l2tp_eth.o | ||
12 | obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_DEBUGFS)) += l2tp_debugfs.o | ||
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c new file mode 100644 index 000000000000..1712af1c7b3f --- /dev/null +++ b/net/l2tp/l2tp_core.c | |||
@@ -0,0 +1,1666 @@ | |||
1 | /* | ||
2 | * L2TP core. | ||
3 | * | ||
4 | * Copyright (c) 2008,2009,2010 Katalix Systems Ltd | ||
5 | * | ||
6 | * This file contains some code of the original L2TPv2 pppol2tp | ||
7 | * driver, which has the following copyright: | ||
8 | * | ||
9 | * Authors: Martijn van Oosterhout <kleptog@svana.org> | ||
10 | * James Chapman (jchapman@katalix.com) | ||
11 | * Contributors: | ||
12 | * Michal Ostrowski <mostrows@speakeasy.net> | ||
13 | * Arnaldo Carvalho de Melo <acme@xconectiva.com.br> | ||
14 | * David S. Miller (davem@redhat.com) | ||
15 | * | ||
16 | * This program is free software; you can redistribute it and/or modify | ||
17 | * it under the terms of the GNU General Public License version 2 as | ||
18 | * published by the Free Software Foundation. | ||
19 | */ | ||
20 | |||
21 | #include <linux/module.h> | ||
22 | #include <linux/string.h> | ||
23 | #include <linux/list.h> | ||
24 | #include <linux/rculist.h> | ||
25 | #include <linux/uaccess.h> | ||
26 | |||
27 | #include <linux/kernel.h> | ||
28 | #include <linux/spinlock.h> | ||
29 | #include <linux/kthread.h> | ||
30 | #include <linux/sched.h> | ||
31 | #include <linux/slab.h> | ||
32 | #include <linux/errno.h> | ||
33 | #include <linux/jiffies.h> | ||
34 | |||
35 | #include <linux/netdevice.h> | ||
36 | #include <linux/net.h> | ||
37 | #include <linux/inetdevice.h> | ||
38 | #include <linux/skbuff.h> | ||
39 | #include <linux/init.h> | ||
40 | #include <linux/in.h> | ||
41 | #include <linux/ip.h> | ||
42 | #include <linux/udp.h> | ||
43 | #include <linux/l2tp.h> | ||
44 | #include <linux/hash.h> | ||
45 | #include <linux/sort.h> | ||
46 | #include <linux/file.h> | ||
47 | #include <linux/nsproxy.h> | ||
48 | #include <net/net_namespace.h> | ||
49 | #include <net/netns/generic.h> | ||
50 | #include <net/dst.h> | ||
51 | #include <net/ip.h> | ||
52 | #include <net/udp.h> | ||
53 | #include <net/inet_common.h> | ||
54 | #include <net/xfrm.h> | ||
55 | #include <net/protocol.h> | ||
56 | |||
57 | #include <asm/byteorder.h> | ||
58 | #include <asm/atomic.h> | ||
59 | |||
60 | #include "l2tp_core.h" | ||
61 | |||
62 | #define L2TP_DRV_VERSION "V2.0" | ||
63 | |||
64 | /* L2TP header constants */ | ||
65 | #define L2TP_HDRFLAG_T 0x8000 | ||
66 | #define L2TP_HDRFLAG_L 0x4000 | ||
67 | #define L2TP_HDRFLAG_S 0x0800 | ||
68 | #define L2TP_HDRFLAG_O 0x0200 | ||
69 | #define L2TP_HDRFLAG_P 0x0100 | ||
70 | |||
71 | #define L2TP_HDR_VER_MASK 0x000F | ||
72 | #define L2TP_HDR_VER_2 0x0002 | ||
73 | #define L2TP_HDR_VER_3 0x0003 | ||
74 | |||
75 | /* L2TPv3 default L2-specific sublayer */ | ||
76 | #define L2TP_SLFLAG_S 0x40000000 | ||
77 | #define L2TP_SL_SEQ_MASK 0x00ffffff | ||
78 | |||
79 | #define L2TP_HDR_SIZE_SEQ 10 | ||
80 | #define L2TP_HDR_SIZE_NOSEQ 6 | ||
81 | |||
82 | /* Default trace flags */ | ||
83 | #define L2TP_DEFAULT_DEBUG_FLAGS 0 | ||
84 | |||
85 | #define PRINTK(_mask, _type, _lvl, _fmt, args...) \ | ||
86 | do { \ | ||
87 | if ((_mask) & (_type)) \ | ||
88 | printk(_lvl "L2TP: " _fmt, ##args); \ | ||
89 | } while (0) | ||
90 | |||
91 | /* Private data stored for received packets in the skb. | ||
92 | */ | ||
93 | struct l2tp_skb_cb { | ||
94 | u32 ns; | ||
95 | u16 has_seq; | ||
96 | u16 length; | ||
97 | unsigned long expires; | ||
98 | }; | ||
99 | |||
100 | #define L2TP_SKB_CB(skb) ((struct l2tp_skb_cb *) &skb->cb[sizeof(struct inet_skb_parm)]) | ||
101 | |||
102 | static atomic_t l2tp_tunnel_count; | ||
103 | static atomic_t l2tp_session_count; | ||
104 | |||
105 | /* per-net private data for this module */ | ||
106 | static unsigned int l2tp_net_id; | ||
107 | struct l2tp_net { | ||
108 | struct list_head l2tp_tunnel_list; | ||
109 | spinlock_t l2tp_tunnel_list_lock; | ||
110 | struct hlist_head l2tp_session_hlist[L2TP_HASH_SIZE_2]; | ||
111 | spinlock_t l2tp_session_hlist_lock; | ||
112 | }; | ||
113 | |||
114 | static inline struct l2tp_net *l2tp_pernet(struct net *net) | ||
115 | { | ||
116 | BUG_ON(!net); | ||
117 | |||
118 | return net_generic(net, l2tp_net_id); | ||
119 | } | ||
120 | |||
121 | /* Session hash global list for L2TPv3. | ||
122 | * The session_id SHOULD be random according to RFC3931, but several | ||
123 | * L2TP implementations use incrementing session_ids. So we do a real | ||
124 | * hash on the session_id, rather than a simple bitmask. | ||
125 | */ | ||
126 | static inline struct hlist_head * | ||
127 | l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id) | ||
128 | { | ||
129 | return &pn->l2tp_session_hlist[hash_32(session_id, L2TP_HASH_BITS_2)]; | ||
130 | |||
131 | } | ||
132 | |||
133 | /* Lookup a session by id in the global session list | ||
134 | */ | ||
135 | static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id) | ||
136 | { | ||
137 | struct l2tp_net *pn = l2tp_pernet(net); | ||
138 | struct hlist_head *session_list = | ||
139 | l2tp_session_id_hash_2(pn, session_id); | ||
140 | struct l2tp_session *session; | ||
141 | struct hlist_node *walk; | ||
142 | |||
143 | rcu_read_lock_bh(); | ||
144 | hlist_for_each_entry_rcu(session, walk, session_list, global_hlist) { | ||
145 | if (session->session_id == session_id) { | ||
146 | rcu_read_unlock_bh(); | ||
147 | return session; | ||
148 | } | ||
149 | } | ||
150 | rcu_read_unlock_bh(); | ||
151 | |||
152 | return NULL; | ||
153 | } | ||
154 | |||
155 | /* Session hash list. | ||
156 | * The session_id SHOULD be random according to RFC2661, but several | ||
157 | * L2TP implementations (Cisco and Microsoft) use incrementing | ||
158 | * session_ids. So we do a real hash on the session_id, rather than a | ||
159 | * simple bitmask. | ||
160 | */ | ||
161 | static inline struct hlist_head * | ||
162 | l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id) | ||
163 | { | ||
164 | return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)]; | ||
165 | } | ||
166 | |||
167 | /* Lookup a session by id | ||
168 | */ | ||
169 | struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id) | ||
170 | { | ||
171 | struct hlist_head *session_list; | ||
172 | struct l2tp_session *session; | ||
173 | struct hlist_node *walk; | ||
174 | |||
175 | /* In L2TPv3, session_ids are unique over all tunnels and we | ||
176 | * sometimes need to look them up before we know the | ||
177 | * tunnel. | ||
178 | */ | ||
179 | if (tunnel == NULL) | ||
180 | return l2tp_session_find_2(net, session_id); | ||
181 | |||
182 | session_list = l2tp_session_id_hash(tunnel, session_id); | ||
183 | read_lock_bh(&tunnel->hlist_lock); | ||
184 | hlist_for_each_entry(session, walk, session_list, hlist) { | ||
185 | if (session->session_id == session_id) { | ||
186 | read_unlock_bh(&tunnel->hlist_lock); | ||
187 | return session; | ||
188 | } | ||
189 | } | ||
190 | read_unlock_bh(&tunnel->hlist_lock); | ||
191 | |||
192 | return NULL; | ||
193 | } | ||
194 | EXPORT_SYMBOL_GPL(l2tp_session_find); | ||
195 | |||
196 | struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth) | ||
197 | { | ||
198 | int hash; | ||
199 | struct hlist_node *walk; | ||
200 | struct l2tp_session *session; | ||
201 | int count = 0; | ||
202 | |||
203 | read_lock_bh(&tunnel->hlist_lock); | ||
204 | for (hash = 0; hash < L2TP_HASH_SIZE; hash++) { | ||
205 | hlist_for_each_entry(session, walk, &tunnel->session_hlist[hash], hlist) { | ||
206 | if (++count > nth) { | ||
207 | read_unlock_bh(&tunnel->hlist_lock); | ||
208 | return session; | ||
209 | } | ||
210 | } | ||
211 | } | ||
212 | |||
213 | read_unlock_bh(&tunnel->hlist_lock); | ||
214 | |||
215 | return NULL; | ||
216 | } | ||
217 | EXPORT_SYMBOL_GPL(l2tp_session_find_nth); | ||
218 | |||
219 | /* Lookup a session by interface name. | ||
220 | * This is very inefficient but is only used by management interfaces. | ||
221 | */ | ||
222 | struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname) | ||
223 | { | ||
224 | struct l2tp_net *pn = l2tp_pernet(net); | ||
225 | int hash; | ||
226 | struct hlist_node *walk; | ||
227 | struct l2tp_session *session; | ||
228 | |||
229 | rcu_read_lock_bh(); | ||
230 | for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) { | ||
231 | hlist_for_each_entry_rcu(session, walk, &pn->l2tp_session_hlist[hash], global_hlist) { | ||
232 | if (!strcmp(session->ifname, ifname)) { | ||
233 | rcu_read_unlock_bh(); | ||
234 | return session; | ||
235 | } | ||
236 | } | ||
237 | } | ||
238 | |||
239 | rcu_read_unlock_bh(); | ||
240 | |||
241 | return NULL; | ||
242 | } | ||
243 | EXPORT_SYMBOL_GPL(l2tp_session_find_by_ifname); | ||
244 | |||
245 | /* Lookup a tunnel by id | ||
246 | */ | ||
247 | struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id) | ||
248 | { | ||
249 | struct l2tp_tunnel *tunnel; | ||
250 | struct l2tp_net *pn = l2tp_pernet(net); | ||
251 | |||
252 | rcu_read_lock_bh(); | ||
253 | list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) { | ||
254 | if (tunnel->tunnel_id == tunnel_id) { | ||
255 | rcu_read_unlock_bh(); | ||
256 | return tunnel; | ||
257 | } | ||
258 | } | ||
259 | rcu_read_unlock_bh(); | ||
260 | |||
261 | return NULL; | ||
262 | } | ||
263 | EXPORT_SYMBOL_GPL(l2tp_tunnel_find); | ||
264 | |||
265 | struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth) | ||
266 | { | ||
267 | struct l2tp_net *pn = l2tp_pernet(net); | ||
268 | struct l2tp_tunnel *tunnel; | ||
269 | int count = 0; | ||
270 | |||
271 | rcu_read_lock_bh(); | ||
272 | list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) { | ||
273 | if (++count > nth) { | ||
274 | rcu_read_unlock_bh(); | ||
275 | return tunnel; | ||
276 | } | ||
277 | } | ||
278 | |||
279 | rcu_read_unlock_bh(); | ||
280 | |||
281 | return NULL; | ||
282 | } | ||
283 | EXPORT_SYMBOL_GPL(l2tp_tunnel_find_nth); | ||
284 | |||
285 | /***************************************************************************** | ||
286 | * Receive data handling | ||
287 | *****************************************************************************/ | ||
288 | |||
289 | /* Queue a skb in order. We come here only if the skb has an L2TP sequence | ||
290 | * number. | ||
291 | */ | ||
292 | static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *skb) | ||
293 | { | ||
294 | struct sk_buff *skbp; | ||
295 | struct sk_buff *tmp; | ||
296 | u32 ns = L2TP_SKB_CB(skb)->ns; | ||
297 | |||
298 | spin_lock_bh(&session->reorder_q.lock); | ||
299 | skb_queue_walk_safe(&session->reorder_q, skbp, tmp) { | ||
300 | if (L2TP_SKB_CB(skbp)->ns > ns) { | ||
301 | __skb_queue_before(&session->reorder_q, skbp, skb); | ||
302 | PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, | ||
303 | "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n", | ||
304 | session->name, ns, L2TP_SKB_CB(skbp)->ns, | ||
305 | skb_queue_len(&session->reorder_q)); | ||
306 | session->stats.rx_oos_packets++; | ||
307 | goto out; | ||
308 | } | ||
309 | } | ||
310 | |||
311 | __skb_queue_tail(&session->reorder_q, skb); | ||
312 | |||
313 | out: | ||
314 | spin_unlock_bh(&session->reorder_q.lock); | ||
315 | } | ||
316 | |||
317 | /* Dequeue a single skb. | ||
318 | */ | ||
319 | static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *skb) | ||
320 | { | ||
321 | struct l2tp_tunnel *tunnel = session->tunnel; | ||
322 | int length = L2TP_SKB_CB(skb)->length; | ||
323 | |||
324 | /* We're about to requeue the skb, so return resources | ||
325 | * to its current owner (a socket receive buffer). | ||
326 | */ | ||
327 | skb_orphan(skb); | ||
328 | |||
329 | tunnel->stats.rx_packets++; | ||
330 | tunnel->stats.rx_bytes += length; | ||
331 | session->stats.rx_packets++; | ||
332 | session->stats.rx_bytes += length; | ||
333 | |||
334 | if (L2TP_SKB_CB(skb)->has_seq) { | ||
335 | /* Bump our Nr */ | ||
336 | session->nr++; | ||
337 | if (tunnel->version == L2TP_HDR_VER_2) | ||
338 | session->nr &= 0xffff; | ||
339 | else | ||
340 | session->nr &= 0xffffff; | ||
341 | |||
342 | PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, | ||
343 | "%s: updated nr to %hu\n", session->name, session->nr); | ||
344 | } | ||
345 | |||
346 | /* call private receive handler */ | ||
347 | if (session->recv_skb != NULL) | ||
348 | (*session->recv_skb)(session, skb, L2TP_SKB_CB(skb)->length); | ||
349 | else | ||
350 | kfree_skb(skb); | ||
351 | |||
352 | if (session->deref) | ||
353 | (*session->deref)(session); | ||
354 | } | ||
355 | |||
356 | /* Dequeue skbs from the session's reorder_q, subject to packet order. | ||
357 | * Skbs that have been in the queue for too long are simply discarded. | ||
358 | */ | ||
359 | static void l2tp_recv_dequeue(struct l2tp_session *session) | ||
360 | { | ||
361 | struct sk_buff *skb; | ||
362 | struct sk_buff *tmp; | ||
363 | |||
364 | /* If the pkt at the head of the queue has the nr that we | ||
365 | * expect to send up next, dequeue it and any other | ||
366 | * in-sequence packets behind it. | ||
367 | */ | ||
368 | spin_lock_bh(&session->reorder_q.lock); | ||
369 | skb_queue_walk_safe(&session->reorder_q, skb, tmp) { | ||
370 | if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) { | ||
371 | session->stats.rx_seq_discards++; | ||
372 | session->stats.rx_errors++; | ||
373 | PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, | ||
374 | "%s: oos pkt %u len %d discarded (too old), " | ||
375 | "waiting for %u, reorder_q_len=%d\n", | ||
376 | session->name, L2TP_SKB_CB(skb)->ns, | ||
377 | L2TP_SKB_CB(skb)->length, session->nr, | ||
378 | skb_queue_len(&session->reorder_q)); | ||
379 | __skb_unlink(skb, &session->reorder_q); | ||
380 | kfree_skb(skb); | ||
381 | if (session->deref) | ||
382 | (*session->deref)(session); | ||
383 | continue; | ||
384 | } | ||
385 | |||
386 | if (L2TP_SKB_CB(skb)->has_seq) { | ||
387 | if (L2TP_SKB_CB(skb)->ns != session->nr) { | ||
388 | PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, | ||
389 | "%s: holding oos pkt %u len %d, " | ||
390 | "waiting for %u, reorder_q_len=%d\n", | ||
391 | session->name, L2TP_SKB_CB(skb)->ns, | ||
392 | L2TP_SKB_CB(skb)->length, session->nr, | ||
393 | skb_queue_len(&session->reorder_q)); | ||
394 | goto out; | ||
395 | } | ||
396 | } | ||
397 | __skb_unlink(skb, &session->reorder_q); | ||
398 | |||
399 | /* Process the skb. We release the queue lock while we | ||
400 | * do so to let other contexts process the queue. | ||
401 | */ | ||
402 | spin_unlock_bh(&session->reorder_q.lock); | ||
403 | l2tp_recv_dequeue_skb(session, skb); | ||
404 | spin_lock_bh(&session->reorder_q.lock); | ||
405 | } | ||
406 | |||
407 | out: | ||
408 | spin_unlock_bh(&session->reorder_q.lock); | ||
409 | } | ||
410 | |||
411 | static inline int l2tp_verify_udp_checksum(struct sock *sk, | ||
412 | struct sk_buff *skb) | ||
413 | { | ||
414 | struct udphdr *uh = udp_hdr(skb); | ||
415 | u16 ulen = ntohs(uh->len); | ||
416 | struct inet_sock *inet; | ||
417 | __wsum psum; | ||
418 | |||
419 | if (sk->sk_no_check || skb_csum_unnecessary(skb) || !uh->check) | ||
420 | return 0; | ||
421 | |||
422 | inet = inet_sk(sk); | ||
423 | psum = csum_tcpudp_nofold(inet->inet_saddr, inet->inet_daddr, ulen, | ||
424 | IPPROTO_UDP, 0); | ||
425 | |||
426 | if ((skb->ip_summed == CHECKSUM_COMPLETE) && | ||
427 | !csum_fold(csum_add(psum, skb->csum))) | ||
428 | return 0; | ||
429 | |||
430 | skb->csum = psum; | ||
431 | |||
432 | return __skb_checksum_complete(skb); | ||
433 | } | ||
434 | |||
435 | /* Do receive processing of L2TP data frames. We handle both L2TPv2 | ||
436 | * and L2TPv3 data frames here. | ||
437 | * | ||
438 | * L2TPv2 Data Message Header | ||
439 | * | ||
440 | * 0 1 2 3 | ||
441 | * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 | ||
442 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
443 | * |T|L|x|x|S|x|O|P|x|x|x|x| Ver | Length (opt) | | ||
444 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
445 | * | Tunnel ID | Session ID | | ||
446 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
447 | * | Ns (opt) | Nr (opt) | | ||
448 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
449 | * | Offset Size (opt) | Offset pad... (opt) | ||
450 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
451 | * | ||
452 | * Data frames are marked by T=0. All other fields are the same as | ||
453 | * those in L2TP control frames. | ||
454 | * | ||
455 | * L2TPv3 Data Message Header | ||
456 | * | ||
457 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
458 | * | L2TP Session Header | | ||
459 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
460 | * | L2-Specific Sublayer | | ||
461 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
462 | * | Tunnel Payload ... | ||
463 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
464 | * | ||
465 | * L2TPv3 Session Header Over IP | ||
466 | * | ||
467 | * 0 1 2 3 | ||
468 | * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 | ||
469 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
470 | * | Session ID | | ||
471 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
472 | * | Cookie (optional, maximum 64 bits)... | ||
473 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
474 | * | | ||
475 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
476 | * | ||
477 | * L2TPv3 L2-Specific Sublayer Format | ||
478 | * | ||
479 | * 0 1 2 3 | ||
480 | * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 | ||
481 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
482 | * |x|S|x|x|x|x|x|x| Sequence Number | | ||
483 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
484 | * | ||
485 | * Cookie value, sublayer format and offset (pad) are negotiated with | ||
486 | * the peer when the session is set up. Unlike L2TPv2, we do not need | ||
487 | * to parse the packet header to determine if optional fields are | ||
488 | * present. | ||
489 | * | ||
490 | * Caller must already have parsed the frame and determined that it is | ||
491 | * a data (not control) frame before coming here. Fields up to the | ||
492 | * session-id have already been parsed and ptr points to the data | ||
493 | * after the session-id. | ||
494 | */ | ||
495 | void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, | ||
496 | unsigned char *ptr, unsigned char *optr, u16 hdrflags, | ||
497 | int length, int (*payload_hook)(struct sk_buff *skb)) | ||
498 | { | ||
499 | struct l2tp_tunnel *tunnel = session->tunnel; | ||
500 | int offset; | ||
501 | u32 ns, nr; | ||
502 | |||
503 | /* The ref count is increased since we now hold a pointer to | ||
504 | * the session. Take care to decrement the refcnt when exiting | ||
505 | * this function from now on... | ||
506 | */ | ||
507 | l2tp_session_inc_refcount(session); | ||
508 | if (session->ref) | ||
509 | (*session->ref)(session); | ||
510 | |||
511 | /* Parse and check optional cookie */ | ||
512 | if (session->peer_cookie_len > 0) { | ||
513 | if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) { | ||
514 | PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO, | ||
515 | "%s: cookie mismatch (%u/%u). Discarding.\n", | ||
516 | tunnel->name, tunnel->tunnel_id, session->session_id); | ||
517 | session->stats.rx_cookie_discards++; | ||
518 | goto discard; | ||
519 | } | ||
520 | ptr += session->peer_cookie_len; | ||
521 | } | ||
522 | |||
523 | /* Handle the optional sequence numbers. Sequence numbers are | ||
524 | * in different places for L2TPv2 and L2TPv3. | ||
525 | * | ||
526 | * If we are the LAC, enable/disable sequence numbers under | ||
527 | * the control of the LNS. If no sequence numbers present but | ||
528 | * we were expecting them, discard frame. | ||
529 | */ | ||
530 | ns = nr = 0; | ||
531 | L2TP_SKB_CB(skb)->has_seq = 0; | ||
532 | if (tunnel->version == L2TP_HDR_VER_2) { | ||
533 | if (hdrflags & L2TP_HDRFLAG_S) { | ||
534 | ns = ntohs(*(__be16 *) ptr); | ||
535 | ptr += 2; | ||
536 | nr = ntohs(*(__be16 *) ptr); | ||
537 | ptr += 2; | ||
538 | |||
539 | /* Store L2TP info in the skb */ | ||
540 | L2TP_SKB_CB(skb)->ns = ns; | ||
541 | L2TP_SKB_CB(skb)->has_seq = 1; | ||
542 | |||
543 | PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, | ||
544 | "%s: recv data ns=%u, nr=%u, session nr=%u\n", | ||
545 | session->name, ns, nr, session->nr); | ||
546 | } | ||
547 | } else if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) { | ||
548 | u32 l2h = ntohl(*(__be32 *) ptr); | ||
549 | |||
550 | if (l2h & 0x40000000) { | ||
551 | ns = l2h & 0x00ffffff; | ||
552 | |||
553 | /* Store L2TP info in the skb */ | ||
554 | L2TP_SKB_CB(skb)->ns = ns; | ||
555 | L2TP_SKB_CB(skb)->has_seq = 1; | ||
556 | |||
557 | PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, | ||
558 | "%s: recv data ns=%u, session nr=%u\n", | ||
559 | session->name, ns, session->nr); | ||
560 | } | ||
561 | } | ||
562 | |||
563 | /* Advance past L2-specific header, if present */ | ||
564 | ptr += session->l2specific_len; | ||
565 | |||
566 | if (L2TP_SKB_CB(skb)->has_seq) { | ||
567 | /* Received a packet with sequence numbers. If we're the LNS, | ||
568 | * check if we sre sending sequence numbers and if not, | ||
569 | * configure it so. | ||
570 | */ | ||
571 | if ((!session->lns_mode) && (!session->send_seq)) { | ||
572 | PRINTK(session->debug, L2TP_MSG_SEQ, KERN_INFO, | ||
573 | "%s: requested to enable seq numbers by LNS\n", | ||
574 | session->name); | ||
575 | session->send_seq = -1; | ||
576 | l2tp_session_set_header_len(session, tunnel->version); | ||
577 | } | ||
578 | } else { | ||
579 | /* No sequence numbers. | ||
580 | * If user has configured mandatory sequence numbers, discard. | ||
581 | */ | ||
582 | if (session->recv_seq) { | ||
583 | PRINTK(session->debug, L2TP_MSG_SEQ, KERN_WARNING, | ||
584 | "%s: recv data has no seq numbers when required. " | ||
585 | "Discarding\n", session->name); | ||
586 | session->stats.rx_seq_discards++; | ||
587 | goto discard; | ||
588 | } | ||
589 | |||
590 | /* If we're the LAC and we're sending sequence numbers, the | ||
591 | * LNS has requested that we no longer send sequence numbers. | ||
592 | * If we're the LNS and we're sending sequence numbers, the | ||
593 | * LAC is broken. Discard the frame. | ||
594 | */ | ||
595 | if ((!session->lns_mode) && (session->send_seq)) { | ||
596 | PRINTK(session->debug, L2TP_MSG_SEQ, KERN_INFO, | ||
597 | "%s: requested to disable seq numbers by LNS\n", | ||
598 | session->name); | ||
599 | session->send_seq = 0; | ||
600 | l2tp_session_set_header_len(session, tunnel->version); | ||
601 | } else if (session->send_seq) { | ||
602 | PRINTK(session->debug, L2TP_MSG_SEQ, KERN_WARNING, | ||
603 | "%s: recv data has no seq numbers when required. " | ||
604 | "Discarding\n", session->name); | ||
605 | session->stats.rx_seq_discards++; | ||
606 | goto discard; | ||
607 | } | ||
608 | } | ||
609 | |||
610 | /* Session data offset is handled differently for L2TPv2 and | ||
611 | * L2TPv3. For L2TPv2, there is an optional 16-bit value in | ||
612 | * the header. For L2TPv3, the offset is negotiated using AVPs | ||
613 | * in the session setup control protocol. | ||
614 | */ | ||
615 | if (tunnel->version == L2TP_HDR_VER_2) { | ||
616 | /* If offset bit set, skip it. */ | ||
617 | if (hdrflags & L2TP_HDRFLAG_O) { | ||
618 | offset = ntohs(*(__be16 *)ptr); | ||
619 | ptr += 2 + offset; | ||
620 | } | ||
621 | } else | ||
622 | ptr += session->offset; | ||
623 | |||
624 | offset = ptr - optr; | ||
625 | if (!pskb_may_pull(skb, offset)) | ||
626 | goto discard; | ||
627 | |||
628 | __skb_pull(skb, offset); | ||
629 | |||
630 | /* If caller wants to process the payload before we queue the | ||
631 | * packet, do so now. | ||
632 | */ | ||
633 | if (payload_hook) | ||
634 | if ((*payload_hook)(skb)) | ||
635 | goto discard; | ||
636 | |||
637 | /* Prepare skb for adding to the session's reorder_q. Hold | ||
638 | * packets for max reorder_timeout or 1 second if not | ||
639 | * reordering. | ||
640 | */ | ||
641 | L2TP_SKB_CB(skb)->length = length; | ||
642 | L2TP_SKB_CB(skb)->expires = jiffies + | ||
643 | (session->reorder_timeout ? session->reorder_timeout : HZ); | ||
644 | |||
645 | /* Add packet to the session's receive queue. Reordering is done here, if | ||
646 | * enabled. Saved L2TP protocol info is stored in skb->sb[]. | ||
647 | */ | ||
648 | if (L2TP_SKB_CB(skb)->has_seq) { | ||
649 | if (session->reorder_timeout != 0) { | ||
650 | /* Packet reordering enabled. Add skb to session's | ||
651 | * reorder queue, in order of ns. | ||
652 | */ | ||
653 | l2tp_recv_queue_skb(session, skb); | ||
654 | } else { | ||
655 | /* Packet reordering disabled. Discard out-of-sequence | ||
656 | * packets | ||
657 | */ | ||
658 | if (L2TP_SKB_CB(skb)->ns != session->nr) { | ||
659 | session->stats.rx_seq_discards++; | ||
660 | PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, | ||
661 | "%s: oos pkt %u len %d discarded, " | ||
662 | "waiting for %u, reorder_q_len=%d\n", | ||
663 | session->name, L2TP_SKB_CB(skb)->ns, | ||
664 | L2TP_SKB_CB(skb)->length, session->nr, | ||
665 | skb_queue_len(&session->reorder_q)); | ||
666 | goto discard; | ||
667 | } | ||
668 | skb_queue_tail(&session->reorder_q, skb); | ||
669 | } | ||
670 | } else { | ||
671 | /* No sequence numbers. Add the skb to the tail of the | ||
672 | * reorder queue. This ensures that it will be | ||
673 | * delivered after all previous sequenced skbs. | ||
674 | */ | ||
675 | skb_queue_tail(&session->reorder_q, skb); | ||
676 | } | ||
677 | |||
678 | /* Try to dequeue as many skbs from reorder_q as we can. */ | ||
679 | l2tp_recv_dequeue(session); | ||
680 | |||
681 | l2tp_session_dec_refcount(session); | ||
682 | |||
683 | return; | ||
684 | |||
685 | discard: | ||
686 | session->stats.rx_errors++; | ||
687 | kfree_skb(skb); | ||
688 | |||
689 | if (session->deref) | ||
690 | (*session->deref)(session); | ||
691 | |||
692 | l2tp_session_dec_refcount(session); | ||
693 | } | ||
694 | EXPORT_SYMBOL(l2tp_recv_common); | ||
695 | |||
696 | /* Internal UDP receive frame. Do the real work of receiving an L2TP data frame | ||
697 | * here. The skb is not on a list when we get here. | ||
698 | * Returns 0 if the packet was a data packet and was successfully passed on. | ||
699 | * Returns 1 if the packet was not a good data packet and could not be | ||
700 | * forwarded. All such packets are passed up to userspace to deal with. | ||
701 | */ | ||
702 | int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, | ||
703 | int (*payload_hook)(struct sk_buff *skb)) | ||
704 | { | ||
705 | struct l2tp_session *session = NULL; | ||
706 | unsigned char *ptr, *optr; | ||
707 | u16 hdrflags; | ||
708 | u32 tunnel_id, session_id; | ||
709 | int offset; | ||
710 | u16 version; | ||
711 | int length; | ||
712 | |||
713 | if (tunnel->sock && l2tp_verify_udp_checksum(tunnel->sock, skb)) | ||
714 | goto discard_bad_csum; | ||
715 | |||
716 | /* UDP always verifies the packet length. */ | ||
717 | __skb_pull(skb, sizeof(struct udphdr)); | ||
718 | |||
719 | /* Short packet? */ | ||
720 | if (!pskb_may_pull(skb, L2TP_HDR_SIZE_SEQ)) { | ||
721 | PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO, | ||
722 | "%s: recv short packet (len=%d)\n", tunnel->name, skb->len); | ||
723 | goto error; | ||
724 | } | ||
725 | |||
726 | /* Point to L2TP header */ | ||
727 | optr = ptr = skb->data; | ||
728 | |||
729 | /* Trace packet contents, if enabled */ | ||
730 | if (tunnel->debug & L2TP_MSG_DATA) { | ||
731 | length = min(32u, skb->len); | ||
732 | if (!pskb_may_pull(skb, length)) | ||
733 | goto error; | ||
734 | |||
735 | printk(KERN_DEBUG "%s: recv: ", tunnel->name); | ||
736 | |||
737 | offset = 0; | ||
738 | do { | ||
739 | printk(" %02X", ptr[offset]); | ||
740 | } while (++offset < length); | ||
741 | |||
742 | printk("\n"); | ||
743 | } | ||
744 | |||
745 | /* Get L2TP header flags */ | ||
746 | hdrflags = ntohs(*(__be16 *) ptr); | ||
747 | |||
748 | /* Check protocol version */ | ||
749 | version = hdrflags & L2TP_HDR_VER_MASK; | ||
750 | if (version != tunnel->version) { | ||
751 | PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO, | ||
752 | "%s: recv protocol version mismatch: got %d expected %d\n", | ||
753 | tunnel->name, version, tunnel->version); | ||
754 | goto error; | ||
755 | } | ||
756 | |||
757 | /* Get length of L2TP packet */ | ||
758 | length = skb->len; | ||
759 | |||
760 | /* If type is control packet, it is handled by userspace. */ | ||
761 | if (hdrflags & L2TP_HDRFLAG_T) { | ||
762 | PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_DEBUG, | ||
763 | "%s: recv control packet, len=%d\n", tunnel->name, length); | ||
764 | goto error; | ||
765 | } | ||
766 | |||
767 | /* Skip flags */ | ||
768 | ptr += 2; | ||
769 | |||
770 | if (tunnel->version == L2TP_HDR_VER_2) { | ||
771 | /* If length is present, skip it */ | ||
772 | if (hdrflags & L2TP_HDRFLAG_L) | ||
773 | ptr += 2; | ||
774 | |||
775 | /* Extract tunnel and session ID */ | ||
776 | tunnel_id = ntohs(*(__be16 *) ptr); | ||
777 | ptr += 2; | ||
778 | session_id = ntohs(*(__be16 *) ptr); | ||
779 | ptr += 2; | ||
780 | } else { | ||
781 | ptr += 2; /* skip reserved bits */ | ||
782 | tunnel_id = tunnel->tunnel_id; | ||
783 | session_id = ntohl(*(__be32 *) ptr); | ||
784 | ptr += 4; | ||
785 | } | ||
786 | |||
787 | /* Find the session context */ | ||
788 | session = l2tp_session_find(tunnel->l2tp_net, tunnel, session_id); | ||
789 | if (!session || !session->recv_skb) { | ||
790 | /* Not found? Pass to userspace to deal with */ | ||
791 | PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO, | ||
792 | "%s: no session found (%u/%u). Passing up.\n", | ||
793 | tunnel->name, tunnel_id, session_id); | ||
794 | goto error; | ||
795 | } | ||
796 | |||
797 | l2tp_recv_common(session, skb, ptr, optr, hdrflags, length, payload_hook); | ||
798 | |||
799 | return 0; | ||
800 | |||
801 | discard_bad_csum: | ||
802 | LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name); | ||
803 | UDP_INC_STATS_USER(tunnel->l2tp_net, UDP_MIB_INERRORS, 0); | ||
804 | tunnel->stats.rx_errors++; | ||
805 | kfree_skb(skb); | ||
806 | |||
807 | return 0; | ||
808 | |||
809 | error: | ||
810 | /* Put UDP header back */ | ||
811 | __skb_push(skb, sizeof(struct udphdr)); | ||
812 | |||
813 | return 1; | ||
814 | } | ||
815 | EXPORT_SYMBOL_GPL(l2tp_udp_recv_core); | ||
816 | |||
817 | /* UDP encapsulation receive handler. See net/ipv4/udp.c. | ||
818 | * Return codes: | ||
819 | * 0 : success. | ||
820 | * <0: error | ||
821 | * >0: skb should be passed up to userspace as UDP. | ||
822 | */ | ||
823 | int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) | ||
824 | { | ||
825 | struct l2tp_tunnel *tunnel; | ||
826 | |||
827 | tunnel = l2tp_sock_to_tunnel(sk); | ||
828 | if (tunnel == NULL) | ||
829 | goto pass_up; | ||
830 | |||
831 | PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_DEBUG, | ||
832 | "%s: received %d bytes\n", tunnel->name, skb->len); | ||
833 | |||
834 | if (l2tp_udp_recv_core(tunnel, skb, tunnel->recv_payload_hook)) | ||
835 | goto pass_up_put; | ||
836 | |||
837 | sock_put(sk); | ||
838 | return 0; | ||
839 | |||
840 | pass_up_put: | ||
841 | sock_put(sk); | ||
842 | pass_up: | ||
843 | return 1; | ||
844 | } | ||
845 | EXPORT_SYMBOL_GPL(l2tp_udp_encap_recv); | ||
846 | |||
847 | /************************************************************************ | ||
848 | * Transmit handling | ||
849 | ***********************************************************************/ | ||
850 | |||
851 | /* Build an L2TP header for the session into the buffer provided. | ||
852 | */ | ||
853 | static int l2tp_build_l2tpv2_header(struct l2tp_session *session, void *buf) | ||
854 | { | ||
855 | struct l2tp_tunnel *tunnel = session->tunnel; | ||
856 | __be16 *bufp = buf; | ||
857 | __be16 *optr = buf; | ||
858 | u16 flags = L2TP_HDR_VER_2; | ||
859 | u32 tunnel_id = tunnel->peer_tunnel_id; | ||
860 | u32 session_id = session->peer_session_id; | ||
861 | |||
862 | if (session->send_seq) | ||
863 | flags |= L2TP_HDRFLAG_S; | ||
864 | |||
865 | /* Setup L2TP header. */ | ||
866 | *bufp++ = htons(flags); | ||
867 | *bufp++ = htons(tunnel_id); | ||
868 | *bufp++ = htons(session_id); | ||
869 | if (session->send_seq) { | ||
870 | *bufp++ = htons(session->ns); | ||
871 | *bufp++ = 0; | ||
872 | session->ns++; | ||
873 | session->ns &= 0xffff; | ||
874 | PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, | ||
875 | "%s: updated ns to %u\n", session->name, session->ns); | ||
876 | } | ||
877 | |||
878 | return bufp - optr; | ||
879 | } | ||
880 | |||
881 | static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf) | ||
882 | { | ||
883 | struct l2tp_tunnel *tunnel = session->tunnel; | ||
884 | char *bufp = buf; | ||
885 | char *optr = bufp; | ||
886 | |||
887 | /* Setup L2TP header. The header differs slightly for UDP and | ||
888 | * IP encapsulations. For UDP, there is 4 bytes of flags. | ||
889 | */ | ||
890 | if (tunnel->encap == L2TP_ENCAPTYPE_UDP) { | ||
891 | u16 flags = L2TP_HDR_VER_3; | ||
892 | *((__be16 *) bufp) = htons(flags); | ||
893 | bufp += 2; | ||
894 | *((__be16 *) bufp) = 0; | ||
895 | bufp += 2; | ||
896 | } | ||
897 | |||
898 | *((__be32 *) bufp) = htonl(session->peer_session_id); | ||
899 | bufp += 4; | ||
900 | if (session->cookie_len) { | ||
901 | memcpy(bufp, &session->cookie[0], session->cookie_len); | ||
902 | bufp += session->cookie_len; | ||
903 | } | ||
904 | if (session->l2specific_len) { | ||
905 | if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) { | ||
906 | u32 l2h = 0; | ||
907 | if (session->send_seq) { | ||
908 | l2h = 0x40000000 | session->ns; | ||
909 | session->ns++; | ||
910 | session->ns &= 0xffffff; | ||
911 | PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, | ||
912 | "%s: updated ns to %u\n", session->name, session->ns); | ||
913 | } | ||
914 | |||
915 | *((__be32 *) bufp) = htonl(l2h); | ||
916 | } | ||
917 | bufp += session->l2specific_len; | ||
918 | } | ||
919 | if (session->offset) | ||
920 | bufp += session->offset; | ||
921 | |||
922 | return bufp - optr; | ||
923 | } | ||
924 | |||
925 | int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, size_t data_len) | ||
926 | { | ||
927 | struct l2tp_tunnel *tunnel = session->tunnel; | ||
928 | unsigned int len = skb->len; | ||
929 | int error; | ||
930 | |||
931 | /* Debug */ | ||
932 | if (session->send_seq) | ||
933 | PRINTK(session->debug, L2TP_MSG_DATA, KERN_DEBUG, | ||
934 | "%s: send %Zd bytes, ns=%u\n", session->name, | ||
935 | data_len, session->ns - 1); | ||
936 | else | ||
937 | PRINTK(session->debug, L2TP_MSG_DATA, KERN_DEBUG, | ||
938 | "%s: send %Zd bytes\n", session->name, data_len); | ||
939 | |||
940 | if (session->debug & L2TP_MSG_DATA) { | ||
941 | int i; | ||
942 | int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0; | ||
943 | unsigned char *datap = skb->data + uhlen; | ||
944 | |||
945 | printk(KERN_DEBUG "%s: xmit:", session->name); | ||
946 | for (i = 0; i < (len - uhlen); i++) { | ||
947 | printk(" %02X", *datap++); | ||
948 | if (i == 31) { | ||
949 | printk(" ..."); | ||
950 | break; | ||
951 | } | ||
952 | } | ||
953 | printk("\n"); | ||
954 | } | ||
955 | |||
956 | /* Queue the packet to IP for output */ | ||
957 | skb->local_df = 1; | ||
958 | error = ip_queue_xmit(skb); | ||
959 | |||
960 | /* Update stats */ | ||
961 | if (error >= 0) { | ||
962 | tunnel->stats.tx_packets++; | ||
963 | tunnel->stats.tx_bytes += len; | ||
964 | session->stats.tx_packets++; | ||
965 | session->stats.tx_bytes += len; | ||
966 | } else { | ||
967 | tunnel->stats.tx_errors++; | ||
968 | session->stats.tx_errors++; | ||
969 | } | ||
970 | |||
971 | return 0; | ||
972 | } | ||
973 | EXPORT_SYMBOL_GPL(l2tp_xmit_core); | ||
974 | |||
975 | /* Automatically called when the skb is freed. | ||
976 | */ | ||
977 | static void l2tp_sock_wfree(struct sk_buff *skb) | ||
978 | { | ||
979 | sock_put(skb->sk); | ||
980 | } | ||
981 | |||
982 | /* For data skbs that we transmit, we associate with the tunnel socket | ||
983 | * but don't do accounting. | ||
984 | */ | ||
985 | static inline void l2tp_skb_set_owner_w(struct sk_buff *skb, struct sock *sk) | ||
986 | { | ||
987 | sock_hold(sk); | ||
988 | skb->sk = sk; | ||
989 | skb->destructor = l2tp_sock_wfree; | ||
990 | } | ||
991 | |||
992 | /* If caller requires the skb to have a ppp header, the header must be | ||
993 | * inserted in the skb data before calling this function. | ||
994 | */ | ||
995 | int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len) | ||
996 | { | ||
997 | int data_len = skb->len; | ||
998 | struct l2tp_tunnel *tunnel = session->tunnel; | ||
999 | struct sock *sk = tunnel->sock; | ||
1000 | struct udphdr *uh; | ||
1001 | struct inet_sock *inet; | ||
1002 | __wsum csum; | ||
1003 | int old_headroom; | ||
1004 | int new_headroom; | ||
1005 | int headroom; | ||
1006 | int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0; | ||
1007 | int udp_len; | ||
1008 | |||
1009 | /* Check that there's enough headroom in the skb to insert IP, | ||
1010 | * UDP and L2TP headers. If not enough, expand it to | ||
1011 | * make room. Adjust truesize. | ||
1012 | */ | ||
1013 | headroom = NET_SKB_PAD + sizeof(struct iphdr) + | ||
1014 | uhlen + hdr_len; | ||
1015 | old_headroom = skb_headroom(skb); | ||
1016 | if (skb_cow_head(skb, headroom)) | ||
1017 | goto abort; | ||
1018 | |||
1019 | new_headroom = skb_headroom(skb); | ||
1020 | skb_orphan(skb); | ||
1021 | skb->truesize += new_headroom - old_headroom; | ||
1022 | |||
1023 | /* Setup L2TP header */ | ||
1024 | session->build_header(session, __skb_push(skb, hdr_len)); | ||
1025 | |||
1026 | /* Reset skb netfilter state */ | ||
1027 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); | ||
1028 | IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | | ||
1029 | IPSKB_REROUTED); | ||
1030 | nf_reset(skb); | ||
1031 | |||
1032 | /* Get routing info from the tunnel socket */ | ||
1033 | skb_dst_drop(skb); | ||
1034 | skb_dst_set(skb, dst_clone(__sk_dst_get(sk))); | ||
1035 | |||
1036 | switch (tunnel->encap) { | ||
1037 | case L2TP_ENCAPTYPE_UDP: | ||
1038 | /* Setup UDP header */ | ||
1039 | inet = inet_sk(sk); | ||
1040 | __skb_push(skb, sizeof(*uh)); | ||
1041 | skb_reset_transport_header(skb); | ||
1042 | uh = udp_hdr(skb); | ||
1043 | uh->source = inet->inet_sport; | ||
1044 | uh->dest = inet->inet_dport; | ||
1045 | udp_len = uhlen + hdr_len + data_len; | ||
1046 | uh->len = htons(udp_len); | ||
1047 | uh->check = 0; | ||
1048 | |||
1049 | /* Calculate UDP checksum if configured to do so */ | ||
1050 | if (sk->sk_no_check == UDP_CSUM_NOXMIT) | ||
1051 | skb->ip_summed = CHECKSUM_NONE; | ||
1052 | else if ((skb_dst(skb) && skb_dst(skb)->dev) && | ||
1053 | (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM))) { | ||
1054 | skb->ip_summed = CHECKSUM_COMPLETE; | ||
1055 | csum = skb_checksum(skb, 0, udp_len, 0); | ||
1056 | uh->check = csum_tcpudp_magic(inet->inet_saddr, | ||
1057 | inet->inet_daddr, | ||
1058 | udp_len, IPPROTO_UDP, csum); | ||
1059 | if (uh->check == 0) | ||
1060 | uh->check = CSUM_MANGLED_0; | ||
1061 | } else { | ||
1062 | skb->ip_summed = CHECKSUM_PARTIAL; | ||
1063 | skb->csum_start = skb_transport_header(skb) - skb->head; | ||
1064 | skb->csum_offset = offsetof(struct udphdr, check); | ||
1065 | uh->check = ~csum_tcpudp_magic(inet->inet_saddr, | ||
1066 | inet->inet_daddr, | ||
1067 | udp_len, IPPROTO_UDP, 0); | ||
1068 | } | ||
1069 | break; | ||
1070 | |||
1071 | case L2TP_ENCAPTYPE_IP: | ||
1072 | break; | ||
1073 | } | ||
1074 | |||
1075 | l2tp_skb_set_owner_w(skb, sk); | ||
1076 | |||
1077 | l2tp_xmit_core(session, skb, data_len); | ||
1078 | |||
1079 | abort: | ||
1080 | return 0; | ||
1081 | } | ||
1082 | EXPORT_SYMBOL_GPL(l2tp_xmit_skb); | ||
1083 | |||
1084 | /***************************************************************************** | ||
1085 | * Tinnel and session create/destroy. | ||
1086 | *****************************************************************************/ | ||
1087 | |||
1088 | /* Tunnel socket destruct hook. | ||
1089 | * The tunnel context is deleted only when all session sockets have been | ||
1090 | * closed. | ||
1091 | */ | ||
1092 | void l2tp_tunnel_destruct(struct sock *sk) | ||
1093 | { | ||
1094 | struct l2tp_tunnel *tunnel; | ||
1095 | |||
1096 | tunnel = sk->sk_user_data; | ||
1097 | if (tunnel == NULL) | ||
1098 | goto end; | ||
1099 | |||
1100 | PRINTK(tunnel->debug, L2TP_MSG_CONTROL, KERN_INFO, | ||
1101 | "%s: closing...\n", tunnel->name); | ||
1102 | |||
1103 | /* Close all sessions */ | ||
1104 | l2tp_tunnel_closeall(tunnel); | ||
1105 | |||
1106 | switch (tunnel->encap) { | ||
1107 | case L2TP_ENCAPTYPE_UDP: | ||
1108 | /* No longer an encapsulation socket. See net/ipv4/udp.c */ | ||
1109 | (udp_sk(sk))->encap_type = 0; | ||
1110 | (udp_sk(sk))->encap_rcv = NULL; | ||
1111 | break; | ||
1112 | case L2TP_ENCAPTYPE_IP: | ||
1113 | break; | ||
1114 | } | ||
1115 | |||
1116 | /* Remove hooks into tunnel socket */ | ||
1117 | tunnel->sock = NULL; | ||
1118 | sk->sk_destruct = tunnel->old_sk_destruct; | ||
1119 | sk->sk_user_data = NULL; | ||
1120 | |||
1121 | /* Call the original destructor */ | ||
1122 | if (sk->sk_destruct) | ||
1123 | (*sk->sk_destruct)(sk); | ||
1124 | |||
1125 | /* We're finished with the socket */ | ||
1126 | l2tp_tunnel_dec_refcount(tunnel); | ||
1127 | |||
1128 | end: | ||
1129 | return; | ||
1130 | } | ||
1131 | EXPORT_SYMBOL(l2tp_tunnel_destruct); | ||
1132 | |||
1133 | /* When the tunnel is closed, all the attached sessions need to go too. | ||
1134 | */ | ||
1135 | void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel) | ||
1136 | { | ||
1137 | int hash; | ||
1138 | struct hlist_node *walk; | ||
1139 | struct hlist_node *tmp; | ||
1140 | struct l2tp_session *session; | ||
1141 | |||
1142 | BUG_ON(tunnel == NULL); | ||
1143 | |||
1144 | PRINTK(tunnel->debug, L2TP_MSG_CONTROL, KERN_INFO, | ||
1145 | "%s: closing all sessions...\n", tunnel->name); | ||
1146 | |||
1147 | write_lock_bh(&tunnel->hlist_lock); | ||
1148 | for (hash = 0; hash < L2TP_HASH_SIZE; hash++) { | ||
1149 | again: | ||
1150 | hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) { | ||
1151 | session = hlist_entry(walk, struct l2tp_session, hlist); | ||
1152 | |||
1153 | PRINTK(session->debug, L2TP_MSG_CONTROL, KERN_INFO, | ||
1154 | "%s: closing session\n", session->name); | ||
1155 | |||
1156 | hlist_del_init(&session->hlist); | ||
1157 | |||
1158 | /* Since we should hold the sock lock while | ||
1159 | * doing any unbinding, we need to release the | ||
1160 | * lock we're holding before taking that lock. | ||
1161 | * Hold a reference to the sock so it doesn't | ||
1162 | * disappear as we're jumping between locks. | ||
1163 | */ | ||
1164 | if (session->ref != NULL) | ||
1165 | (*session->ref)(session); | ||
1166 | |||
1167 | write_unlock_bh(&tunnel->hlist_lock); | ||
1168 | |||
1169 | if (tunnel->version != L2TP_HDR_VER_2) { | ||
1170 | struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); | ||
1171 | |||
1172 | spin_lock_bh(&pn->l2tp_session_hlist_lock); | ||
1173 | hlist_del_init_rcu(&session->global_hlist); | ||
1174 | spin_unlock_bh(&pn->l2tp_session_hlist_lock); | ||
1175 | synchronize_rcu(); | ||
1176 | } | ||
1177 | |||
1178 | if (session->session_close != NULL) | ||
1179 | (*session->session_close)(session); | ||
1180 | |||
1181 | if (session->deref != NULL) | ||
1182 | (*session->deref)(session); | ||
1183 | |||
1184 | write_lock_bh(&tunnel->hlist_lock); | ||
1185 | |||
1186 | /* Now restart from the beginning of this hash | ||
1187 | * chain. We always remove a session from the | ||
1188 | * list so we are guaranteed to make forward | ||
1189 | * progress. | ||
1190 | */ | ||
1191 | goto again; | ||
1192 | } | ||
1193 | } | ||
1194 | write_unlock_bh(&tunnel->hlist_lock); | ||
1195 | } | ||
1196 | EXPORT_SYMBOL_GPL(l2tp_tunnel_closeall); | ||
1197 | |||
1198 | /* Really kill the tunnel. | ||
1199 | * Come here only when all sessions have been cleared from the tunnel. | ||
1200 | */ | ||
1201 | void l2tp_tunnel_free(struct l2tp_tunnel *tunnel) | ||
1202 | { | ||
1203 | struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); | ||
1204 | |||
1205 | BUG_ON(atomic_read(&tunnel->ref_count) != 0); | ||
1206 | BUG_ON(tunnel->sock != NULL); | ||
1207 | |||
1208 | PRINTK(tunnel->debug, L2TP_MSG_CONTROL, KERN_INFO, | ||
1209 | "%s: free...\n", tunnel->name); | ||
1210 | |||
1211 | /* Remove from tunnel list */ | ||
1212 | spin_lock_bh(&pn->l2tp_tunnel_list_lock); | ||
1213 | list_del_rcu(&tunnel->list); | ||
1214 | spin_unlock_bh(&pn->l2tp_tunnel_list_lock); | ||
1215 | synchronize_rcu(); | ||
1216 | |||
1217 | atomic_dec(&l2tp_tunnel_count); | ||
1218 | kfree(tunnel); | ||
1219 | } | ||
1220 | EXPORT_SYMBOL_GPL(l2tp_tunnel_free); | ||
1221 | |||
1222 | /* Create a socket for the tunnel, if one isn't set up by | ||
1223 | * userspace. This is used for static tunnels where there is no | ||
1224 | * managing L2TP daemon. | ||
1225 | */ | ||
1226 | static int l2tp_tunnel_sock_create(u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct socket **sockp) | ||
1227 | { | ||
1228 | int err = -EINVAL; | ||
1229 | struct sockaddr_in udp_addr; | ||
1230 | struct sockaddr_l2tpip ip_addr; | ||
1231 | struct socket *sock = NULL; | ||
1232 | |||
1233 | switch (cfg->encap) { | ||
1234 | case L2TP_ENCAPTYPE_UDP: | ||
1235 | err = sock_create(AF_INET, SOCK_DGRAM, 0, sockp); | ||
1236 | if (err < 0) | ||
1237 | goto out; | ||
1238 | |||
1239 | sock = *sockp; | ||
1240 | |||
1241 | memset(&udp_addr, 0, sizeof(udp_addr)); | ||
1242 | udp_addr.sin_family = AF_INET; | ||
1243 | udp_addr.sin_addr = cfg->local_ip; | ||
1244 | udp_addr.sin_port = htons(cfg->local_udp_port); | ||
1245 | err = kernel_bind(sock, (struct sockaddr *) &udp_addr, sizeof(udp_addr)); | ||
1246 | if (err < 0) | ||
1247 | goto out; | ||
1248 | |||
1249 | udp_addr.sin_family = AF_INET; | ||
1250 | udp_addr.sin_addr = cfg->peer_ip; | ||
1251 | udp_addr.sin_port = htons(cfg->peer_udp_port); | ||
1252 | err = kernel_connect(sock, (struct sockaddr *) &udp_addr, sizeof(udp_addr), 0); | ||
1253 | if (err < 0) | ||
1254 | goto out; | ||
1255 | |||
1256 | if (!cfg->use_udp_checksums) | ||
1257 | sock->sk->sk_no_check = UDP_CSUM_NOXMIT; | ||
1258 | |||
1259 | break; | ||
1260 | |||
1261 | case L2TP_ENCAPTYPE_IP: | ||
1262 | err = sock_create(AF_INET, SOCK_DGRAM, IPPROTO_L2TP, sockp); | ||
1263 | if (err < 0) | ||
1264 | goto out; | ||
1265 | |||
1266 | sock = *sockp; | ||
1267 | |||
1268 | memset(&ip_addr, 0, sizeof(ip_addr)); | ||
1269 | ip_addr.l2tp_family = AF_INET; | ||
1270 | ip_addr.l2tp_addr = cfg->local_ip; | ||
1271 | ip_addr.l2tp_conn_id = tunnel_id; | ||
1272 | err = kernel_bind(sock, (struct sockaddr *) &ip_addr, sizeof(ip_addr)); | ||
1273 | if (err < 0) | ||
1274 | goto out; | ||
1275 | |||
1276 | ip_addr.l2tp_family = AF_INET; | ||
1277 | ip_addr.l2tp_addr = cfg->peer_ip; | ||
1278 | ip_addr.l2tp_conn_id = peer_tunnel_id; | ||
1279 | err = kernel_connect(sock, (struct sockaddr *) &ip_addr, sizeof(ip_addr), 0); | ||
1280 | if (err < 0) | ||
1281 | goto out; | ||
1282 | |||
1283 | break; | ||
1284 | |||
1285 | default: | ||
1286 | goto out; | ||
1287 | } | ||
1288 | |||
1289 | out: | ||
1290 | if ((err < 0) && sock) { | ||
1291 | sock_release(sock); | ||
1292 | *sockp = NULL; | ||
1293 | } | ||
1294 | |||
1295 | return err; | ||
1296 | } | ||
1297 | |||
1298 | int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp) | ||
1299 | { | ||
1300 | struct l2tp_tunnel *tunnel = NULL; | ||
1301 | int err; | ||
1302 | struct socket *sock = NULL; | ||
1303 | struct sock *sk = NULL; | ||
1304 | struct l2tp_net *pn; | ||
1305 | enum l2tp_encap_type encap = L2TP_ENCAPTYPE_UDP; | ||
1306 | |||
1307 | /* Get the tunnel socket from the fd, which was opened by | ||
1308 | * the userspace L2TP daemon. If not specified, create a | ||
1309 | * kernel socket. | ||
1310 | */ | ||
1311 | if (fd < 0) { | ||
1312 | err = l2tp_tunnel_sock_create(tunnel_id, peer_tunnel_id, cfg, &sock); | ||
1313 | if (err < 0) | ||
1314 | goto err; | ||
1315 | } else { | ||
1316 | err = -EBADF; | ||
1317 | sock = sockfd_lookup(fd, &err); | ||
1318 | if (!sock) { | ||
1319 | printk(KERN_ERR "tunl %hu: sockfd_lookup(fd=%d) returned %d\n", | ||
1320 | tunnel_id, fd, err); | ||
1321 | goto err; | ||
1322 | } | ||
1323 | } | ||
1324 | |||
1325 | sk = sock->sk; | ||
1326 | |||
1327 | if (cfg != NULL) | ||
1328 | encap = cfg->encap; | ||
1329 | |||
1330 | /* Quick sanity checks */ | ||
1331 | switch (encap) { | ||
1332 | case L2TP_ENCAPTYPE_UDP: | ||
1333 | err = -EPROTONOSUPPORT; | ||
1334 | if (sk->sk_protocol != IPPROTO_UDP) { | ||
1335 | printk(KERN_ERR "tunl %hu: fd %d wrong protocol, got %d, expected %d\n", | ||
1336 | tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP); | ||
1337 | goto err; | ||
1338 | } | ||
1339 | break; | ||
1340 | case L2TP_ENCAPTYPE_IP: | ||
1341 | err = -EPROTONOSUPPORT; | ||
1342 | if (sk->sk_protocol != IPPROTO_L2TP) { | ||
1343 | printk(KERN_ERR "tunl %hu: fd %d wrong protocol, got %d, expected %d\n", | ||
1344 | tunnel_id, fd, sk->sk_protocol, IPPROTO_L2TP); | ||
1345 | goto err; | ||
1346 | } | ||
1347 | break; | ||
1348 | } | ||
1349 | |||
1350 | /* Check if this socket has already been prepped */ | ||
1351 | tunnel = (struct l2tp_tunnel *)sk->sk_user_data; | ||
1352 | if (tunnel != NULL) { | ||
1353 | /* This socket has already been prepped */ | ||
1354 | err = -EBUSY; | ||
1355 | goto err; | ||
1356 | } | ||
1357 | |||
1358 | tunnel = kzalloc(sizeof(struct l2tp_tunnel), GFP_KERNEL); | ||
1359 | if (tunnel == NULL) { | ||
1360 | err = -ENOMEM; | ||
1361 | goto err; | ||
1362 | } | ||
1363 | |||
1364 | tunnel->version = version; | ||
1365 | tunnel->tunnel_id = tunnel_id; | ||
1366 | tunnel->peer_tunnel_id = peer_tunnel_id; | ||
1367 | tunnel->debug = L2TP_DEFAULT_DEBUG_FLAGS; | ||
1368 | |||
1369 | tunnel->magic = L2TP_TUNNEL_MAGIC; | ||
1370 | sprintf(&tunnel->name[0], "tunl %u", tunnel_id); | ||
1371 | rwlock_init(&tunnel->hlist_lock); | ||
1372 | |||
1373 | /* The net we belong to */ | ||
1374 | tunnel->l2tp_net = net; | ||
1375 | pn = l2tp_pernet(net); | ||
1376 | |||
1377 | if (cfg != NULL) | ||
1378 | tunnel->debug = cfg->debug; | ||
1379 | |||
1380 | /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ | ||
1381 | tunnel->encap = encap; | ||
1382 | if (encap == L2TP_ENCAPTYPE_UDP) { | ||
1383 | /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ | ||
1384 | udp_sk(sk)->encap_type = UDP_ENCAP_L2TPINUDP; | ||
1385 | udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv; | ||
1386 | } | ||
1387 | |||
1388 | sk->sk_user_data = tunnel; | ||
1389 | |||
1390 | /* Hook on the tunnel socket destructor so that we can cleanup | ||
1391 | * if the tunnel socket goes away. | ||
1392 | */ | ||
1393 | tunnel->old_sk_destruct = sk->sk_destruct; | ||
1394 | sk->sk_destruct = &l2tp_tunnel_destruct; | ||
1395 | tunnel->sock = sk; | ||
1396 | sk->sk_allocation = GFP_ATOMIC; | ||
1397 | |||
1398 | /* Add tunnel to our list */ | ||
1399 | INIT_LIST_HEAD(&tunnel->list); | ||
1400 | spin_lock_bh(&pn->l2tp_tunnel_list_lock); | ||
1401 | list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list); | ||
1402 | spin_unlock_bh(&pn->l2tp_tunnel_list_lock); | ||
1403 | synchronize_rcu(); | ||
1404 | atomic_inc(&l2tp_tunnel_count); | ||
1405 | |||
1406 | /* Bump the reference count. The tunnel context is deleted | ||
1407 | * only when this drops to zero. | ||
1408 | */ | ||
1409 | l2tp_tunnel_inc_refcount(tunnel); | ||
1410 | |||
1411 | err = 0; | ||
1412 | err: | ||
1413 | if (tunnelp) | ||
1414 | *tunnelp = tunnel; | ||
1415 | |||
1416 | /* If tunnel's socket was created by the kernel, it doesn't | ||
1417 | * have a file. | ||
1418 | */ | ||
1419 | if (sock && sock->file) | ||
1420 | sockfd_put(sock); | ||
1421 | |||
1422 | return err; | ||
1423 | } | ||
1424 | EXPORT_SYMBOL_GPL(l2tp_tunnel_create); | ||
1425 | |||
1426 | /* This function is used by the netlink TUNNEL_DELETE command. | ||
1427 | */ | ||
1428 | int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel) | ||
1429 | { | ||
1430 | int err = 0; | ||
1431 | struct socket *sock = tunnel->sock ? tunnel->sock->sk_socket : NULL; | ||
1432 | |||
1433 | /* Force the tunnel socket to close. This will eventually | ||
1434 | * cause the tunnel to be deleted via the normal socket close | ||
1435 | * mechanisms when userspace closes the tunnel socket. | ||
1436 | */ | ||
1437 | if (sock != NULL) { | ||
1438 | err = inet_shutdown(sock, 2); | ||
1439 | |||
1440 | /* If the tunnel's socket was created by the kernel, | ||
1441 | * close the socket here since the socket was not | ||
1442 | * created by userspace. | ||
1443 | */ | ||
1444 | if (sock->file == NULL) | ||
1445 | err = inet_release(sock); | ||
1446 | } | ||
1447 | |||
1448 | return err; | ||
1449 | } | ||
1450 | EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); | ||
1451 | |||
1452 | /* Really kill the session. | ||
1453 | */ | ||
1454 | void l2tp_session_free(struct l2tp_session *session) | ||
1455 | { | ||
1456 | struct l2tp_tunnel *tunnel; | ||
1457 | |||
1458 | BUG_ON(atomic_read(&session->ref_count) != 0); | ||
1459 | |||
1460 | tunnel = session->tunnel; | ||
1461 | if (tunnel != NULL) { | ||
1462 | BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC); | ||
1463 | |||
1464 | /* Delete the session from the hash */ | ||
1465 | write_lock_bh(&tunnel->hlist_lock); | ||
1466 | hlist_del_init(&session->hlist); | ||
1467 | write_unlock_bh(&tunnel->hlist_lock); | ||
1468 | |||
1469 | /* Unlink from the global hash if not L2TPv2 */ | ||
1470 | if (tunnel->version != L2TP_HDR_VER_2) { | ||
1471 | struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); | ||
1472 | |||
1473 | spin_lock_bh(&pn->l2tp_session_hlist_lock); | ||
1474 | hlist_del_init_rcu(&session->global_hlist); | ||
1475 | spin_unlock_bh(&pn->l2tp_session_hlist_lock); | ||
1476 | synchronize_rcu(); | ||
1477 | } | ||
1478 | |||
1479 | if (session->session_id != 0) | ||
1480 | atomic_dec(&l2tp_session_count); | ||
1481 | |||
1482 | sock_put(tunnel->sock); | ||
1483 | |||
1484 | /* This will delete the tunnel context if this | ||
1485 | * is the last session on the tunnel. | ||
1486 | */ | ||
1487 | session->tunnel = NULL; | ||
1488 | l2tp_tunnel_dec_refcount(tunnel); | ||
1489 | } | ||
1490 | |||
1491 | kfree(session); | ||
1492 | |||
1493 | return; | ||
1494 | } | ||
1495 | EXPORT_SYMBOL_GPL(l2tp_session_free); | ||
1496 | |||
1497 | /* This function is used by the netlink SESSION_DELETE command and by | ||
1498 | pseudowire modules. | ||
1499 | */ | ||
1500 | int l2tp_session_delete(struct l2tp_session *session) | ||
1501 | { | ||
1502 | if (session->session_close != NULL) | ||
1503 | (*session->session_close)(session); | ||
1504 | |||
1505 | l2tp_session_dec_refcount(session); | ||
1506 | |||
1507 | return 0; | ||
1508 | } | ||
1509 | EXPORT_SYMBOL_GPL(l2tp_session_delete); | ||
1510 | |||
1511 | |||
1512 | /* We come here whenever a session's send_seq, cookie_len or | ||
1513 | * l2specific_len parameters are set. | ||
1514 | */ | ||
1515 | void l2tp_session_set_header_len(struct l2tp_session *session, int version) | ||
1516 | { | ||
1517 | if (version == L2TP_HDR_VER_2) { | ||
1518 | session->hdr_len = 6; | ||
1519 | if (session->send_seq) | ||
1520 | session->hdr_len += 4; | ||
1521 | } else { | ||
1522 | session->hdr_len = 4 + session->cookie_len + session->l2specific_len + session->offset; | ||
1523 | if (session->tunnel->encap == L2TP_ENCAPTYPE_UDP) | ||
1524 | session->hdr_len += 4; | ||
1525 | } | ||
1526 | |||
1527 | } | ||
1528 | EXPORT_SYMBOL_GPL(l2tp_session_set_header_len); | ||
1529 | |||
1530 | struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg) | ||
1531 | { | ||
1532 | struct l2tp_session *session; | ||
1533 | |||
1534 | session = kzalloc(sizeof(struct l2tp_session) + priv_size, GFP_KERNEL); | ||
1535 | if (session != NULL) { | ||
1536 | session->magic = L2TP_SESSION_MAGIC; | ||
1537 | session->tunnel = tunnel; | ||
1538 | |||
1539 | session->session_id = session_id; | ||
1540 | session->peer_session_id = peer_session_id; | ||
1541 | session->nr = 1; | ||
1542 | |||
1543 | sprintf(&session->name[0], "sess %u/%u", | ||
1544 | tunnel->tunnel_id, session->session_id); | ||
1545 | |||
1546 | skb_queue_head_init(&session->reorder_q); | ||
1547 | |||
1548 | INIT_HLIST_NODE(&session->hlist); | ||
1549 | INIT_HLIST_NODE(&session->global_hlist); | ||
1550 | |||
1551 | /* Inherit debug options from tunnel */ | ||
1552 | session->debug = tunnel->debug; | ||
1553 | |||
1554 | if (cfg) { | ||
1555 | session->pwtype = cfg->pw_type; | ||
1556 | session->debug = cfg->debug; | ||
1557 | session->mtu = cfg->mtu; | ||
1558 | session->mru = cfg->mru; | ||
1559 | session->send_seq = cfg->send_seq; | ||
1560 | session->recv_seq = cfg->recv_seq; | ||
1561 | session->lns_mode = cfg->lns_mode; | ||
1562 | session->reorder_timeout = cfg->reorder_timeout; | ||
1563 | session->offset = cfg->offset; | ||
1564 | session->l2specific_type = cfg->l2specific_type; | ||
1565 | session->l2specific_len = cfg->l2specific_len; | ||
1566 | session->cookie_len = cfg->cookie_len; | ||
1567 | memcpy(&session->cookie[0], &cfg->cookie[0], cfg->cookie_len); | ||
1568 | session->peer_cookie_len = cfg->peer_cookie_len; | ||
1569 | memcpy(&session->peer_cookie[0], &cfg->peer_cookie[0], cfg->peer_cookie_len); | ||
1570 | } | ||
1571 | |||
1572 | if (tunnel->version == L2TP_HDR_VER_2) | ||
1573 | session->build_header = l2tp_build_l2tpv2_header; | ||
1574 | else | ||
1575 | session->build_header = l2tp_build_l2tpv3_header; | ||
1576 | |||
1577 | l2tp_session_set_header_len(session, tunnel->version); | ||
1578 | |||
1579 | /* Bump the reference count. The session context is deleted | ||
1580 | * only when this drops to zero. | ||
1581 | */ | ||
1582 | l2tp_session_inc_refcount(session); | ||
1583 | l2tp_tunnel_inc_refcount(tunnel); | ||
1584 | |||
1585 | /* Ensure tunnel socket isn't deleted */ | ||
1586 | sock_hold(tunnel->sock); | ||
1587 | |||
1588 | /* Add session to the tunnel's hash list */ | ||
1589 | write_lock_bh(&tunnel->hlist_lock); | ||
1590 | hlist_add_head(&session->hlist, | ||
1591 | l2tp_session_id_hash(tunnel, session_id)); | ||
1592 | write_unlock_bh(&tunnel->hlist_lock); | ||
1593 | |||
1594 | /* And to the global session list if L2TPv3 */ | ||
1595 | if (tunnel->version != L2TP_HDR_VER_2) { | ||
1596 | struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); | ||
1597 | |||
1598 | spin_lock_bh(&pn->l2tp_session_hlist_lock); | ||
1599 | hlist_add_head_rcu(&session->global_hlist, | ||
1600 | l2tp_session_id_hash_2(pn, session_id)); | ||
1601 | spin_unlock_bh(&pn->l2tp_session_hlist_lock); | ||
1602 | synchronize_rcu(); | ||
1603 | } | ||
1604 | |||
1605 | /* Ignore management session in session count value */ | ||
1606 | if (session->session_id != 0) | ||
1607 | atomic_inc(&l2tp_session_count); | ||
1608 | } | ||
1609 | |||
1610 | return session; | ||
1611 | } | ||
1612 | EXPORT_SYMBOL_GPL(l2tp_session_create); | ||
1613 | |||
1614 | /***************************************************************************** | ||
1615 | * Init and cleanup | ||
1616 | *****************************************************************************/ | ||
1617 | |||
1618 | static __net_init int l2tp_init_net(struct net *net) | ||
1619 | { | ||
1620 | struct l2tp_net *pn = net_generic(net, l2tp_net_id); | ||
1621 | int hash; | ||
1622 | |||
1623 | INIT_LIST_HEAD(&pn->l2tp_tunnel_list); | ||
1624 | spin_lock_init(&pn->l2tp_tunnel_list_lock); | ||
1625 | |||
1626 | for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) | ||
1627 | INIT_HLIST_HEAD(&pn->l2tp_session_hlist[hash]); | ||
1628 | |||
1629 | spin_lock_init(&pn->l2tp_session_hlist_lock); | ||
1630 | |||
1631 | return 0; | ||
1632 | } | ||
1633 | |||
1634 | static struct pernet_operations l2tp_net_ops = { | ||
1635 | .init = l2tp_init_net, | ||
1636 | .id = &l2tp_net_id, | ||
1637 | .size = sizeof(struct l2tp_net), | ||
1638 | }; | ||
1639 | |||
1640 | static int __init l2tp_init(void) | ||
1641 | { | ||
1642 | int rc = 0; | ||
1643 | |||
1644 | rc = register_pernet_device(&l2tp_net_ops); | ||
1645 | if (rc) | ||
1646 | goto out; | ||
1647 | |||
1648 | printk(KERN_INFO "L2TP core driver, %s\n", L2TP_DRV_VERSION); | ||
1649 | |||
1650 | out: | ||
1651 | return rc; | ||
1652 | } | ||
1653 | |||
1654 | static void __exit l2tp_exit(void) | ||
1655 | { | ||
1656 | unregister_pernet_device(&l2tp_net_ops); | ||
1657 | } | ||
1658 | |||
1659 | module_init(l2tp_init); | ||
1660 | module_exit(l2tp_exit); | ||
1661 | |||
1662 | MODULE_AUTHOR("James Chapman <jchapman@katalix.com>"); | ||
1663 | MODULE_DESCRIPTION("L2TP core"); | ||
1664 | MODULE_LICENSE("GPL"); | ||
1665 | MODULE_VERSION(L2TP_DRV_VERSION); | ||
1666 | |||
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h new file mode 100644 index 000000000000..f0f318edd3f1 --- /dev/null +++ b/net/l2tp/l2tp_core.h | |||
@@ -0,0 +1,304 @@ | |||
1 | /* | ||
2 | * L2TP internal definitions. | ||
3 | * | ||
4 | * Copyright (c) 2008,2009 Katalix Systems Ltd | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #ifndef _L2TP_CORE_H_ | ||
12 | #define _L2TP_CORE_H_ | ||
13 | |||
14 | /* Just some random numbers */ | ||
15 | #define L2TP_TUNNEL_MAGIC 0x42114DDA | ||
16 | #define L2TP_SESSION_MAGIC 0x0C04EB7D | ||
17 | |||
18 | /* Per tunnel, session hash table size */ | ||
19 | #define L2TP_HASH_BITS 4 | ||
20 | #define L2TP_HASH_SIZE (1 << L2TP_HASH_BITS) | ||
21 | |||
22 | /* System-wide, session hash table size */ | ||
23 | #define L2TP_HASH_BITS_2 8 | ||
24 | #define L2TP_HASH_SIZE_2 (1 << L2TP_HASH_BITS_2) | ||
25 | |||
26 | /* Debug message categories for the DEBUG socket option */ | ||
27 | enum { | ||
28 | L2TP_MSG_DEBUG = (1 << 0), /* verbose debug (if | ||
29 | * compiled in) */ | ||
30 | L2TP_MSG_CONTROL = (1 << 1), /* userspace - kernel | ||
31 | * interface */ | ||
32 | L2TP_MSG_SEQ = (1 << 2), /* sequence numbers */ | ||
33 | L2TP_MSG_DATA = (1 << 3), /* data packets */ | ||
34 | }; | ||
35 | |||
36 | struct sk_buff; | ||
37 | |||
38 | struct l2tp_stats { | ||
39 | u64 tx_packets; | ||
40 | u64 tx_bytes; | ||
41 | u64 tx_errors; | ||
42 | u64 rx_packets; | ||
43 | u64 rx_bytes; | ||
44 | u64 rx_seq_discards; | ||
45 | u64 rx_oos_packets; | ||
46 | u64 rx_errors; | ||
47 | u64 rx_cookie_discards; | ||
48 | }; | ||
49 | |||
50 | struct l2tp_tunnel; | ||
51 | |||
52 | /* Describes a session. Contains information to determine incoming | ||
53 | * packets and transmit outgoing ones. | ||
54 | */ | ||
55 | struct l2tp_session_cfg { | ||
56 | enum l2tp_pwtype pw_type; | ||
57 | unsigned data_seq:2; /* data sequencing level | ||
58 | * 0 => none, 1 => IP only, | ||
59 | * 2 => all | ||
60 | */ | ||
61 | unsigned recv_seq:1; /* expect receive packets with | ||
62 | * sequence numbers? */ | ||
63 | unsigned send_seq:1; /* send packets with sequence | ||
64 | * numbers? */ | ||
65 | unsigned lns_mode:1; /* behave as LNS? LAC enables | ||
66 | * sequence numbers under | ||
67 | * control of LNS. */ | ||
68 | int debug; /* bitmask of debug message | ||
69 | * categories */ | ||
70 | u16 vlan_id; /* VLAN pseudowire only */ | ||
71 | u16 offset; /* offset to payload */ | ||
72 | u16 l2specific_len; /* Layer 2 specific length */ | ||
73 | u16 l2specific_type; /* Layer 2 specific type */ | ||
74 | u8 cookie[8]; /* optional cookie */ | ||
75 | int cookie_len; /* 0, 4 or 8 bytes */ | ||
76 | u8 peer_cookie[8]; /* peer's cookie */ | ||
77 | int peer_cookie_len; /* 0, 4 or 8 bytes */ | ||
78 | int reorder_timeout; /* configured reorder timeout | ||
79 | * (in jiffies) */ | ||
80 | int mtu; | ||
81 | int mru; | ||
82 | char *ifname; | ||
83 | }; | ||
84 | |||
85 | struct l2tp_session { | ||
86 | int magic; /* should be | ||
87 | * L2TP_SESSION_MAGIC */ | ||
88 | |||
89 | struct l2tp_tunnel *tunnel; /* back pointer to tunnel | ||
90 | * context */ | ||
91 | u32 session_id; | ||
92 | u32 peer_session_id; | ||
93 | u8 cookie[8]; | ||
94 | int cookie_len; | ||
95 | u8 peer_cookie[8]; | ||
96 | int peer_cookie_len; | ||
97 | u16 offset; /* offset from end of L2TP header | ||
98 | to beginning of data */ | ||
99 | u16 l2specific_len; | ||
100 | u16 l2specific_type; | ||
101 | u16 hdr_len; | ||
102 | u32 nr; /* session NR state (receive) */ | ||
103 | u32 ns; /* session NR state (send) */ | ||
104 | struct sk_buff_head reorder_q; /* receive reorder queue */ | ||
105 | struct hlist_node hlist; /* Hash list node */ | ||
106 | atomic_t ref_count; | ||
107 | |||
108 | char name[32]; /* for logging */ | ||
109 | char ifname[IFNAMSIZ]; | ||
110 | unsigned data_seq:2; /* data sequencing level | ||
111 | * 0 => none, 1 => IP only, | ||
112 | * 2 => all | ||
113 | */ | ||
114 | unsigned recv_seq:1; /* expect receive packets with | ||
115 | * sequence numbers? */ | ||
116 | unsigned send_seq:1; /* send packets with sequence | ||
117 | * numbers? */ | ||
118 | unsigned lns_mode:1; /* behave as LNS? LAC enables | ||
119 | * sequence numbers under | ||
120 | * control of LNS. */ | ||
121 | int debug; /* bitmask of debug message | ||
122 | * categories */ | ||
123 | int reorder_timeout; /* configured reorder timeout | ||
124 | * (in jiffies) */ | ||
125 | int mtu; | ||
126 | int mru; | ||
127 | enum l2tp_pwtype pwtype; | ||
128 | struct l2tp_stats stats; | ||
129 | struct hlist_node global_hlist; /* Global hash list node */ | ||
130 | |||
131 | int (*build_header)(struct l2tp_session *session, void *buf); | ||
132 | void (*recv_skb)(struct l2tp_session *session, struct sk_buff *skb, int data_len); | ||
133 | void (*session_close)(struct l2tp_session *session); | ||
134 | void (*ref)(struct l2tp_session *session); | ||
135 | void (*deref)(struct l2tp_session *session); | ||
136 | #if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE) | ||
137 | void (*show)(struct seq_file *m, void *priv); | ||
138 | #endif | ||
139 | uint8_t priv[0]; /* private data */ | ||
140 | }; | ||
141 | |||
142 | /* Describes the tunnel. It contains info to track all the associated | ||
143 | * sessions so incoming packets can be sorted out | ||
144 | */ | ||
145 | struct l2tp_tunnel_cfg { | ||
146 | int debug; /* bitmask of debug message | ||
147 | * categories */ | ||
148 | enum l2tp_encap_type encap; | ||
149 | |||
150 | /* Used only for kernel-created sockets */ | ||
151 | struct in_addr local_ip; | ||
152 | struct in_addr peer_ip; | ||
153 | u16 local_udp_port; | ||
154 | u16 peer_udp_port; | ||
155 | unsigned int use_udp_checksums:1; | ||
156 | }; | ||
157 | |||
158 | struct l2tp_tunnel { | ||
159 | int magic; /* Should be L2TP_TUNNEL_MAGIC */ | ||
160 | rwlock_t hlist_lock; /* protect session_hlist */ | ||
161 | struct hlist_head session_hlist[L2TP_HASH_SIZE]; | ||
162 | /* hashed list of sessions, | ||
163 | * hashed by id */ | ||
164 | u32 tunnel_id; | ||
165 | u32 peer_tunnel_id; | ||
166 | int version; /* 2=>L2TPv2, 3=>L2TPv3 */ | ||
167 | |||
168 | char name[20]; /* for logging */ | ||
169 | int debug; /* bitmask of debug message | ||
170 | * categories */ | ||
171 | enum l2tp_encap_type encap; | ||
172 | struct l2tp_stats stats; | ||
173 | |||
174 | struct list_head list; /* Keep a list of all tunnels */ | ||
175 | struct net *l2tp_net; /* the net we belong to */ | ||
176 | |||
177 | atomic_t ref_count; | ||
178 | #ifdef CONFIG_DEBUG_FS | ||
179 | void (*show)(struct seq_file *m, void *arg); | ||
180 | #endif | ||
181 | int (*recv_payload_hook)(struct sk_buff *skb); | ||
182 | void (*old_sk_destruct)(struct sock *); | ||
183 | struct sock *sock; /* Parent socket */ | ||
184 | int fd; | ||
185 | |||
186 | uint8_t priv[0]; /* private data */ | ||
187 | }; | ||
188 | |||
189 | struct l2tp_nl_cmd_ops { | ||
190 | int (*session_create)(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg); | ||
191 | int (*session_delete)(struct l2tp_session *session); | ||
192 | }; | ||
193 | |||
194 | static inline void *l2tp_tunnel_priv(struct l2tp_tunnel *tunnel) | ||
195 | { | ||
196 | return &tunnel->priv[0]; | ||
197 | } | ||
198 | |||
199 | static inline void *l2tp_session_priv(struct l2tp_session *session) | ||
200 | { | ||
201 | return &session->priv[0]; | ||
202 | } | ||
203 | |||
204 | static inline struct l2tp_tunnel *l2tp_sock_to_tunnel(struct sock *sk) | ||
205 | { | ||
206 | struct l2tp_tunnel *tunnel; | ||
207 | |||
208 | if (sk == NULL) | ||
209 | return NULL; | ||
210 | |||
211 | sock_hold(sk); | ||
212 | tunnel = (struct l2tp_tunnel *)(sk->sk_user_data); | ||
213 | if (tunnel == NULL) { | ||
214 | sock_put(sk); | ||
215 | goto out; | ||
216 | } | ||
217 | |||
218 | BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC); | ||
219 | |||
220 | out: | ||
221 | return tunnel; | ||
222 | } | ||
223 | |||
224 | extern struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id); | ||
225 | extern struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth); | ||
226 | extern struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname); | ||
227 | extern struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id); | ||
228 | extern struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth); | ||
229 | |||
230 | extern int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp); | ||
231 | extern int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel); | ||
232 | extern struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg); | ||
233 | extern int l2tp_session_delete(struct l2tp_session *session); | ||
234 | extern void l2tp_tunnel_free(struct l2tp_tunnel *tunnel); | ||
235 | extern void l2tp_session_free(struct l2tp_session *session); | ||
236 | extern void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, int length, int (*payload_hook)(struct sk_buff *skb)); | ||
237 | extern int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, int (*payload_hook)(struct sk_buff *skb)); | ||
238 | extern int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb); | ||
239 | |||
240 | extern int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, size_t data_len); | ||
241 | extern int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len); | ||
242 | extern void l2tp_tunnel_destruct(struct sock *sk); | ||
243 | extern void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel); | ||
244 | extern void l2tp_session_set_header_len(struct l2tp_session *session, int version); | ||
245 | |||
246 | extern int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, const struct l2tp_nl_cmd_ops *ops); | ||
247 | extern void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type); | ||
248 | |||
249 | /* Tunnel reference counts. Incremented per session that is added to | ||
250 | * the tunnel. | ||
251 | */ | ||
252 | static inline void l2tp_tunnel_inc_refcount_1(struct l2tp_tunnel *tunnel) | ||
253 | { | ||
254 | atomic_inc(&tunnel->ref_count); | ||
255 | } | ||
256 | |||
257 | static inline void l2tp_tunnel_dec_refcount_1(struct l2tp_tunnel *tunnel) | ||
258 | { | ||
259 | if (atomic_dec_and_test(&tunnel->ref_count)) | ||
260 | l2tp_tunnel_free(tunnel); | ||
261 | } | ||
262 | #ifdef L2TP_REFCNT_DEBUG | ||
263 | #define l2tp_tunnel_inc_refcount(_t) do { \ | ||
264 | printk(KERN_DEBUG "l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \ | ||
265 | l2tp_tunnel_inc_refcount_1(_t); \ | ||
266 | } while (0) | ||
267 | #define l2tp_tunnel_dec_refcount(_t) do { \ | ||
268 | printk(KERN_DEBUG "l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \ | ||
269 | l2tp_tunnel_dec_refcount_1(_t); \ | ||
270 | } while (0) | ||
271 | #else | ||
272 | #define l2tp_tunnel_inc_refcount(t) l2tp_tunnel_inc_refcount_1(t) | ||
273 | #define l2tp_tunnel_dec_refcount(t) l2tp_tunnel_dec_refcount_1(t) | ||
274 | #endif | ||
275 | |||
276 | /* Session reference counts. Incremented when code obtains a reference | ||
277 | * to a session. | ||
278 | */ | ||
279 | static inline void l2tp_session_inc_refcount_1(struct l2tp_session *session) | ||
280 | { | ||
281 | atomic_inc(&session->ref_count); | ||
282 | } | ||
283 | |||
284 | static inline void l2tp_session_dec_refcount_1(struct l2tp_session *session) | ||
285 | { | ||
286 | if (atomic_dec_and_test(&session->ref_count)) | ||
287 | l2tp_session_free(session); | ||
288 | } | ||
289 | |||
290 | #ifdef L2TP_REFCNT_DEBUG | ||
291 | #define l2tp_session_inc_refcount(_s) do { \ | ||
292 | printk(KERN_DEBUG "l2tp_session_inc_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_s)->name, atomic_read(&_s->ref_count)); \ | ||
293 | l2tp_session_inc_refcount_1(_s); \ | ||
294 | } while (0) | ||
295 | #define l2tp_session_dec_refcount(_s) do { \ | ||
296 | printk(KERN_DEBUG "l2tp_session_dec_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_s)->name, atomic_read(&_s->ref_count)); \ | ||
297 | l2tp_session_dec_refcount_1(_s); \ | ||
298 | } while (0) | ||
299 | #else | ||
300 | #define l2tp_session_inc_refcount(s) l2tp_session_inc_refcount_1(s) | ||
301 | #define l2tp_session_dec_refcount(s) l2tp_session_dec_refcount_1(s) | ||
302 | #endif | ||
303 | |||
304 | #endif /* _L2TP_CORE_H_ */ | ||
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c new file mode 100644 index 000000000000..104ec3b283d4 --- /dev/null +++ b/net/l2tp/l2tp_debugfs.c | |||
@@ -0,0 +1,341 @@ | |||
1 | /* | ||
2 | * L2TP subsystem debugfs | ||
3 | * | ||
4 | * Copyright (c) 2010 Katalix Systems Ltd | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/skbuff.h> | ||
14 | #include <linux/socket.h> | ||
15 | #include <linux/hash.h> | ||
16 | #include <linux/l2tp.h> | ||
17 | #include <linux/in.h> | ||
18 | #include <linux/etherdevice.h> | ||
19 | #include <linux/spinlock.h> | ||
20 | #include <linux/debugfs.h> | ||
21 | #include <net/sock.h> | ||
22 | #include <net/ip.h> | ||
23 | #include <net/icmp.h> | ||
24 | #include <net/udp.h> | ||
25 | #include <net/inet_common.h> | ||
26 | #include <net/inet_hashtables.h> | ||
27 | #include <net/tcp_states.h> | ||
28 | #include <net/protocol.h> | ||
29 | #include <net/xfrm.h> | ||
30 | #include <net/net_namespace.h> | ||
31 | #include <net/netns/generic.h> | ||
32 | |||
33 | #include "l2tp_core.h" | ||
34 | |||
35 | static struct dentry *rootdir; | ||
36 | static struct dentry *tunnels; | ||
37 | |||
38 | struct l2tp_dfs_seq_data { | ||
39 | struct net *net; | ||
40 | int tunnel_idx; /* current tunnel */ | ||
41 | int session_idx; /* index of session within current tunnel */ | ||
42 | struct l2tp_tunnel *tunnel; | ||
43 | struct l2tp_session *session; /* NULL means get next tunnel */ | ||
44 | }; | ||
45 | |||
46 | static void l2tp_dfs_next_tunnel(struct l2tp_dfs_seq_data *pd) | ||
47 | { | ||
48 | pd->tunnel = l2tp_tunnel_find_nth(pd->net, pd->tunnel_idx); | ||
49 | pd->tunnel_idx++; | ||
50 | } | ||
51 | |||
52 | static void l2tp_dfs_next_session(struct l2tp_dfs_seq_data *pd) | ||
53 | { | ||
54 | pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx); | ||
55 | pd->session_idx++; | ||
56 | |||
57 | if (pd->session == NULL) { | ||
58 | pd->session_idx = 0; | ||
59 | l2tp_dfs_next_tunnel(pd); | ||
60 | } | ||
61 | |||
62 | } | ||
63 | |||
64 | static void *l2tp_dfs_seq_start(struct seq_file *m, loff_t *offs) | ||
65 | { | ||
66 | struct l2tp_dfs_seq_data *pd = SEQ_START_TOKEN; | ||
67 | loff_t pos = *offs; | ||
68 | |||
69 | if (!pos) | ||
70 | goto out; | ||
71 | |||
72 | BUG_ON(m->private == NULL); | ||
73 | pd = m->private; | ||
74 | |||
75 | if (pd->tunnel == NULL) | ||
76 | l2tp_dfs_next_tunnel(pd); | ||
77 | else | ||
78 | l2tp_dfs_next_session(pd); | ||
79 | |||
80 | /* NULL tunnel and session indicates end of list */ | ||
81 | if ((pd->tunnel == NULL) && (pd->session == NULL)) | ||
82 | pd = NULL; | ||
83 | |||
84 | out: | ||
85 | return pd; | ||
86 | } | ||
87 | |||
88 | |||
89 | static void *l2tp_dfs_seq_next(struct seq_file *m, void *v, loff_t *pos) | ||
90 | { | ||
91 | (*pos)++; | ||
92 | return NULL; | ||
93 | } | ||
94 | |||
95 | static void l2tp_dfs_seq_stop(struct seq_file *p, void *v) | ||
96 | { | ||
97 | /* nothing to do */ | ||
98 | } | ||
99 | |||
100 | static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v) | ||
101 | { | ||
102 | struct l2tp_tunnel *tunnel = v; | ||
103 | int session_count = 0; | ||
104 | int hash; | ||
105 | struct hlist_node *walk; | ||
106 | struct hlist_node *tmp; | ||
107 | |||
108 | read_lock_bh(&tunnel->hlist_lock); | ||
109 | for (hash = 0; hash < L2TP_HASH_SIZE; hash++) { | ||
110 | hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) { | ||
111 | struct l2tp_session *session; | ||
112 | |||
113 | session = hlist_entry(walk, struct l2tp_session, hlist); | ||
114 | if (session->session_id == 0) | ||
115 | continue; | ||
116 | |||
117 | session_count++; | ||
118 | } | ||
119 | } | ||
120 | read_unlock_bh(&tunnel->hlist_lock); | ||
121 | |||
122 | seq_printf(m, "\nTUNNEL %u peer %u", tunnel->tunnel_id, tunnel->peer_tunnel_id); | ||
123 | if (tunnel->sock) { | ||
124 | struct inet_sock *inet = inet_sk(tunnel->sock); | ||
125 | seq_printf(m, " from %pI4 to %pI4\n", | ||
126 | &inet->inet_saddr, &inet->inet_daddr); | ||
127 | if (tunnel->encap == L2TP_ENCAPTYPE_UDP) | ||
128 | seq_printf(m, " source port %hu, dest port %hu\n", | ||
129 | ntohs(inet->inet_sport), ntohs(inet->inet_dport)); | ||
130 | } | ||
131 | seq_printf(m, " L2TPv%d, %s\n", tunnel->version, | ||
132 | tunnel->encap == L2TP_ENCAPTYPE_UDP ? "UDP" : | ||
133 | tunnel->encap == L2TP_ENCAPTYPE_IP ? "IP" : | ||
134 | ""); | ||
135 | seq_printf(m, " %d sessions, refcnt %d/%d\n", session_count, | ||
136 | tunnel->sock ? atomic_read(&tunnel->sock->sk_refcnt) : 0, | ||
137 | atomic_read(&tunnel->ref_count)); | ||
138 | |||
139 | seq_printf(m, " %08x rx %llu/%llu/%llu rx %llu/%llu/%llu\n", | ||
140 | tunnel->debug, | ||
141 | (unsigned long long)tunnel->stats.tx_packets, | ||
142 | (unsigned long long)tunnel->stats.tx_bytes, | ||
143 | (unsigned long long)tunnel->stats.tx_errors, | ||
144 | (unsigned long long)tunnel->stats.rx_packets, | ||
145 | (unsigned long long)tunnel->stats.rx_bytes, | ||
146 | (unsigned long long)tunnel->stats.rx_errors); | ||
147 | |||
148 | if (tunnel->show != NULL) | ||
149 | tunnel->show(m, tunnel); | ||
150 | } | ||
151 | |||
152 | static void l2tp_dfs_seq_session_show(struct seq_file *m, void *v) | ||
153 | { | ||
154 | struct l2tp_session *session = v; | ||
155 | |||
156 | seq_printf(m, " SESSION %u, peer %u, %s\n", session->session_id, | ||
157 | session->peer_session_id, | ||
158 | session->pwtype == L2TP_PWTYPE_ETH ? "ETH" : | ||
159 | session->pwtype == L2TP_PWTYPE_PPP ? "PPP" : | ||
160 | ""); | ||
161 | if (session->send_seq || session->recv_seq) | ||
162 | seq_printf(m, " nr %hu, ns %hu\n", session->nr, session->ns); | ||
163 | seq_printf(m, " refcnt %d\n", atomic_read(&session->ref_count)); | ||
164 | seq_printf(m, " config %d/%d/%c/%c/%s/%s %08x %u\n", | ||
165 | session->mtu, session->mru, | ||
166 | session->recv_seq ? 'R' : '-', | ||
167 | session->send_seq ? 'S' : '-', | ||
168 | session->data_seq == 1 ? "IPSEQ" : | ||
169 | session->data_seq == 2 ? "DATASEQ" : "-", | ||
170 | session->lns_mode ? "LNS" : "LAC", | ||
171 | session->debug, | ||
172 | jiffies_to_msecs(session->reorder_timeout)); | ||
173 | seq_printf(m, " offset %hu l2specific %hu/%hu\n", | ||
174 | session->offset, session->l2specific_type, session->l2specific_len); | ||
175 | if (session->cookie_len) { | ||
176 | seq_printf(m, " cookie %02x%02x%02x%02x", | ||
177 | session->cookie[0], session->cookie[1], | ||
178 | session->cookie[2], session->cookie[3]); | ||
179 | if (session->cookie_len == 8) | ||
180 | seq_printf(m, "%02x%02x%02x%02x", | ||
181 | session->cookie[4], session->cookie[5], | ||
182 | session->cookie[6], session->cookie[7]); | ||
183 | seq_printf(m, "\n"); | ||
184 | } | ||
185 | if (session->peer_cookie_len) { | ||
186 | seq_printf(m, " peer cookie %02x%02x%02x%02x", | ||
187 | session->peer_cookie[0], session->peer_cookie[1], | ||
188 | session->peer_cookie[2], session->peer_cookie[3]); | ||
189 | if (session->peer_cookie_len == 8) | ||
190 | seq_printf(m, "%02x%02x%02x%02x", | ||
191 | session->peer_cookie[4], session->peer_cookie[5], | ||
192 | session->peer_cookie[6], session->peer_cookie[7]); | ||
193 | seq_printf(m, "\n"); | ||
194 | } | ||
195 | |||
196 | seq_printf(m, " %hu/%hu tx %llu/%llu/%llu rx %llu/%llu/%llu\n", | ||
197 | session->nr, session->ns, | ||
198 | (unsigned long long)session->stats.tx_packets, | ||
199 | (unsigned long long)session->stats.tx_bytes, | ||
200 | (unsigned long long)session->stats.tx_errors, | ||
201 | (unsigned long long)session->stats.rx_packets, | ||
202 | (unsigned long long)session->stats.rx_bytes, | ||
203 | (unsigned long long)session->stats.rx_errors); | ||
204 | |||
205 | if (session->show != NULL) | ||
206 | session->show(m, session); | ||
207 | } | ||
208 | |||
209 | static int l2tp_dfs_seq_show(struct seq_file *m, void *v) | ||
210 | { | ||
211 | struct l2tp_dfs_seq_data *pd = v; | ||
212 | |||
213 | /* display header on line 1 */ | ||
214 | if (v == SEQ_START_TOKEN) { | ||
215 | seq_puts(m, "TUNNEL ID, peer ID from IP to IP\n"); | ||
216 | seq_puts(m, " L2TPv2/L2TPv3, UDP/IP\n"); | ||
217 | seq_puts(m, " sessions session-count, refcnt refcnt/sk->refcnt\n"); | ||
218 | seq_puts(m, " debug tx-pkts/bytes/errs rx-pkts/bytes/errs\n"); | ||
219 | seq_puts(m, " SESSION ID, peer ID, PWTYPE\n"); | ||
220 | seq_puts(m, " refcnt cnt\n"); | ||
221 | seq_puts(m, " offset OFFSET l2specific TYPE/LEN\n"); | ||
222 | seq_puts(m, " [ cookie ]\n"); | ||
223 | seq_puts(m, " [ peer cookie ]\n"); | ||
224 | seq_puts(m, " config mtu/mru/rcvseq/sendseq/dataseq/lns debug reorderto\n"); | ||
225 | seq_puts(m, " nr/ns tx-pkts/bytes/errs rx-pkts/bytes/errs\n"); | ||
226 | goto out; | ||
227 | } | ||
228 | |||
229 | /* Show the tunnel or session context */ | ||
230 | if (pd->session == NULL) | ||
231 | l2tp_dfs_seq_tunnel_show(m, pd->tunnel); | ||
232 | else | ||
233 | l2tp_dfs_seq_session_show(m, pd->session); | ||
234 | |||
235 | out: | ||
236 | return 0; | ||
237 | } | ||
238 | |||
239 | static const struct seq_operations l2tp_dfs_seq_ops = { | ||
240 | .start = l2tp_dfs_seq_start, | ||
241 | .next = l2tp_dfs_seq_next, | ||
242 | .stop = l2tp_dfs_seq_stop, | ||
243 | .show = l2tp_dfs_seq_show, | ||
244 | }; | ||
245 | |||
246 | static int l2tp_dfs_seq_open(struct inode *inode, struct file *file) | ||
247 | { | ||
248 | struct l2tp_dfs_seq_data *pd; | ||
249 | struct seq_file *seq; | ||
250 | int rc = -ENOMEM; | ||
251 | |||
252 | pd = kzalloc(GFP_KERNEL, sizeof(*pd)); | ||
253 | if (pd == NULL) | ||
254 | goto out; | ||
255 | |||
256 | /* Derive the network namespace from the pid opening the | ||
257 | * file. | ||
258 | */ | ||
259 | pd->net = get_net_ns_by_pid(current->pid); | ||
260 | if (IS_ERR(pd->net)) { | ||
261 | rc = -PTR_ERR(pd->net); | ||
262 | goto err_free_pd; | ||
263 | } | ||
264 | |||
265 | rc = seq_open(file, &l2tp_dfs_seq_ops); | ||
266 | if (rc) | ||
267 | goto err_free_net; | ||
268 | |||
269 | seq = file->private_data; | ||
270 | seq->private = pd; | ||
271 | |||
272 | out: | ||
273 | return rc; | ||
274 | |||
275 | err_free_net: | ||
276 | put_net(pd->net); | ||
277 | err_free_pd: | ||
278 | kfree(pd); | ||
279 | goto out; | ||
280 | } | ||
281 | |||
282 | static int l2tp_dfs_seq_release(struct inode *inode, struct file *file) | ||
283 | { | ||
284 | struct l2tp_dfs_seq_data *pd; | ||
285 | struct seq_file *seq; | ||
286 | |||
287 | seq = file->private_data; | ||
288 | pd = seq->private; | ||
289 | if (pd->net) | ||
290 | put_net(pd->net); | ||
291 | kfree(pd); | ||
292 | seq_release(inode, file); | ||
293 | |||
294 | return 0; | ||
295 | } | ||
296 | |||
297 | static const struct file_operations l2tp_dfs_fops = { | ||
298 | .owner = THIS_MODULE, | ||
299 | .open = l2tp_dfs_seq_open, | ||
300 | .read = seq_read, | ||
301 | .llseek = seq_lseek, | ||
302 | .release = l2tp_dfs_seq_release, | ||
303 | }; | ||
304 | |||
305 | static int __init l2tp_debugfs_init(void) | ||
306 | { | ||
307 | int rc = 0; | ||
308 | |||
309 | rootdir = debugfs_create_dir("l2tp", NULL); | ||
310 | if (IS_ERR(rootdir)) { | ||
311 | rc = PTR_ERR(rootdir); | ||
312 | rootdir = NULL; | ||
313 | goto out; | ||
314 | } | ||
315 | |||
316 | tunnels = debugfs_create_file("tunnels", 0600, rootdir, NULL, &l2tp_dfs_fops); | ||
317 | if (tunnels == NULL) | ||
318 | rc = -EIO; | ||
319 | |||
320 | printk(KERN_INFO "L2TP debugfs support\n"); | ||
321 | |||
322 | out: | ||
323 | if (rc) | ||
324 | printk(KERN_WARNING "l2tp debugfs: unable to init\n"); | ||
325 | |||
326 | return rc; | ||
327 | } | ||
328 | |||
329 | static void __exit l2tp_debugfs_exit(void) | ||
330 | { | ||
331 | debugfs_remove(tunnels); | ||
332 | debugfs_remove(rootdir); | ||
333 | } | ||
334 | |||
335 | module_init(l2tp_debugfs_init); | ||
336 | module_exit(l2tp_debugfs_exit); | ||
337 | |||
338 | MODULE_LICENSE("GPL"); | ||
339 | MODULE_AUTHOR("James Chapman <jchapman@katalix.com>"); | ||
340 | MODULE_DESCRIPTION("L2TP debugfs driver"); | ||
341 | MODULE_VERSION("1.0"); | ||
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c new file mode 100644 index 000000000000..58c6c4cda73b --- /dev/null +++ b/net/l2tp/l2tp_eth.c | |||
@@ -0,0 +1,334 @@ | |||
1 | /* | ||
2 | * L2TPv3 ethernet pseudowire driver | ||
3 | * | ||
4 | * Copyright (c) 2008,2009,2010 Katalix Systems Ltd | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/skbuff.h> | ||
14 | #include <linux/socket.h> | ||
15 | #include <linux/hash.h> | ||
16 | #include <linux/l2tp.h> | ||
17 | #include <linux/in.h> | ||
18 | #include <linux/etherdevice.h> | ||
19 | #include <linux/spinlock.h> | ||
20 | #include <net/sock.h> | ||
21 | #include <net/ip.h> | ||
22 | #include <net/icmp.h> | ||
23 | #include <net/udp.h> | ||
24 | #include <net/inet_common.h> | ||
25 | #include <net/inet_hashtables.h> | ||
26 | #include <net/tcp_states.h> | ||
27 | #include <net/protocol.h> | ||
28 | #include <net/xfrm.h> | ||
29 | #include <net/net_namespace.h> | ||
30 | #include <net/netns/generic.h> | ||
31 | |||
32 | #include "l2tp_core.h" | ||
33 | |||
34 | /* Default device name. May be overridden by name specified by user */ | ||
35 | #define L2TP_ETH_DEV_NAME "l2tpeth%d" | ||
36 | |||
37 | /* via netdev_priv() */ | ||
38 | struct l2tp_eth { | ||
39 | struct net_device *dev; | ||
40 | struct sock *tunnel_sock; | ||
41 | struct l2tp_session *session; | ||
42 | struct list_head list; | ||
43 | }; | ||
44 | |||
45 | /* via l2tp_session_priv() */ | ||
46 | struct l2tp_eth_sess { | ||
47 | struct net_device *dev; | ||
48 | }; | ||
49 | |||
50 | /* per-net private data for this module */ | ||
51 | static unsigned int l2tp_eth_net_id; | ||
52 | struct l2tp_eth_net { | ||
53 | struct list_head l2tp_eth_dev_list; | ||
54 | spinlock_t l2tp_eth_lock; | ||
55 | }; | ||
56 | |||
57 | static inline struct l2tp_eth_net *l2tp_eth_pernet(struct net *net) | ||
58 | { | ||
59 | return net_generic(net, l2tp_eth_net_id); | ||
60 | } | ||
61 | |||
62 | static int l2tp_eth_dev_init(struct net_device *dev) | ||
63 | { | ||
64 | struct l2tp_eth *priv = netdev_priv(dev); | ||
65 | |||
66 | priv->dev = dev; | ||
67 | random_ether_addr(dev->dev_addr); | ||
68 | memset(&dev->broadcast[0], 0xff, 6); | ||
69 | |||
70 | return 0; | ||
71 | } | ||
72 | |||
73 | static void l2tp_eth_dev_uninit(struct net_device *dev) | ||
74 | { | ||
75 | struct l2tp_eth *priv = netdev_priv(dev); | ||
76 | struct l2tp_eth_net *pn = l2tp_eth_pernet(dev_net(dev)); | ||
77 | |||
78 | spin_lock(&pn->l2tp_eth_lock); | ||
79 | list_del_init(&priv->list); | ||
80 | spin_unlock(&pn->l2tp_eth_lock); | ||
81 | dev_put(dev); | ||
82 | } | ||
83 | |||
84 | static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev) | ||
85 | { | ||
86 | struct l2tp_eth *priv = netdev_priv(dev); | ||
87 | struct l2tp_session *session = priv->session; | ||
88 | |||
89 | l2tp_xmit_skb(session, skb, session->hdr_len); | ||
90 | |||
91 | dev->stats.tx_bytes += skb->len; | ||
92 | dev->stats.tx_packets++; | ||
93 | |||
94 | return 0; | ||
95 | } | ||
96 | |||
97 | static struct net_device_ops l2tp_eth_netdev_ops = { | ||
98 | .ndo_init = l2tp_eth_dev_init, | ||
99 | .ndo_uninit = l2tp_eth_dev_uninit, | ||
100 | .ndo_start_xmit = l2tp_eth_dev_xmit, | ||
101 | }; | ||
102 | |||
103 | static void l2tp_eth_dev_setup(struct net_device *dev) | ||
104 | { | ||
105 | ether_setup(dev); | ||
106 | |||
107 | dev->netdev_ops = &l2tp_eth_netdev_ops; | ||
108 | dev->destructor = free_netdev; | ||
109 | } | ||
110 | |||
111 | static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len) | ||
112 | { | ||
113 | struct l2tp_eth_sess *spriv = l2tp_session_priv(session); | ||
114 | struct net_device *dev = spriv->dev; | ||
115 | |||
116 | if (session->debug & L2TP_MSG_DATA) { | ||
117 | unsigned int length; | ||
118 | int offset; | ||
119 | u8 *ptr = skb->data; | ||
120 | |||
121 | length = min(32u, skb->len); | ||
122 | if (!pskb_may_pull(skb, length)) | ||
123 | goto error; | ||
124 | |||
125 | printk(KERN_DEBUG "%s: eth recv: ", session->name); | ||
126 | |||
127 | offset = 0; | ||
128 | do { | ||
129 | printk(" %02X", ptr[offset]); | ||
130 | } while (++offset < length); | ||
131 | |||
132 | printk("\n"); | ||
133 | } | ||
134 | |||
135 | if (data_len < ETH_HLEN) | ||
136 | goto error; | ||
137 | |||
138 | secpath_reset(skb); | ||
139 | |||
140 | /* checksums verified by L2TP */ | ||
141 | skb->ip_summed = CHECKSUM_NONE; | ||
142 | |||
143 | skb_dst_drop(skb); | ||
144 | nf_reset(skb); | ||
145 | |||
146 | if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) { | ||
147 | dev->last_rx = jiffies; | ||
148 | dev->stats.rx_packets++; | ||
149 | dev->stats.rx_bytes += data_len; | ||
150 | } else | ||
151 | dev->stats.rx_errors++; | ||
152 | |||
153 | return; | ||
154 | |||
155 | error: | ||
156 | dev->stats.rx_errors++; | ||
157 | kfree_skb(skb); | ||
158 | } | ||
159 | |||
160 | static void l2tp_eth_delete(struct l2tp_session *session) | ||
161 | { | ||
162 | struct l2tp_eth_sess *spriv; | ||
163 | struct net_device *dev; | ||
164 | |||
165 | if (session) { | ||
166 | spriv = l2tp_session_priv(session); | ||
167 | dev = spriv->dev; | ||
168 | if (dev) { | ||
169 | unregister_netdev(dev); | ||
170 | spriv->dev = NULL; | ||
171 | } | ||
172 | } | ||
173 | } | ||
174 | |||
175 | #if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE) | ||
176 | static void l2tp_eth_show(struct seq_file *m, void *arg) | ||
177 | { | ||
178 | struct l2tp_session *session = arg; | ||
179 | struct l2tp_eth_sess *spriv = l2tp_session_priv(session); | ||
180 | struct net_device *dev = spriv->dev; | ||
181 | |||
182 | seq_printf(m, " interface %s\n", dev->name); | ||
183 | } | ||
184 | #endif | ||
185 | |||
186 | static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg) | ||
187 | { | ||
188 | struct net_device *dev; | ||
189 | char name[IFNAMSIZ]; | ||
190 | struct l2tp_tunnel *tunnel; | ||
191 | struct l2tp_session *session; | ||
192 | struct l2tp_eth *priv; | ||
193 | struct l2tp_eth_sess *spriv; | ||
194 | int rc; | ||
195 | struct l2tp_eth_net *pn; | ||
196 | |||
197 | tunnel = l2tp_tunnel_find(net, tunnel_id); | ||
198 | if (!tunnel) { | ||
199 | rc = -ENODEV; | ||
200 | goto out; | ||
201 | } | ||
202 | |||
203 | session = l2tp_session_find(net, tunnel, session_id); | ||
204 | if (session) { | ||
205 | rc = -EEXIST; | ||
206 | goto out; | ||
207 | } | ||
208 | |||
209 | if (cfg->ifname) { | ||
210 | dev = dev_get_by_name(net, cfg->ifname); | ||
211 | if (dev) { | ||
212 | dev_put(dev); | ||
213 | rc = -EEXIST; | ||
214 | goto out; | ||
215 | } | ||
216 | strlcpy(name, cfg->ifname, IFNAMSIZ); | ||
217 | } else | ||
218 | strcpy(name, L2TP_ETH_DEV_NAME); | ||
219 | |||
220 | session = l2tp_session_create(sizeof(*spriv), tunnel, session_id, | ||
221 | peer_session_id, cfg); | ||
222 | if (!session) { | ||
223 | rc = -ENOMEM; | ||
224 | goto out; | ||
225 | } | ||
226 | |||
227 | dev = alloc_netdev(sizeof(*priv), name, l2tp_eth_dev_setup); | ||
228 | if (!dev) { | ||
229 | rc = -ENOMEM; | ||
230 | goto out_del_session; | ||
231 | } | ||
232 | |||
233 | dev_net_set(dev, net); | ||
234 | if (session->mtu == 0) | ||
235 | session->mtu = dev->mtu - session->hdr_len; | ||
236 | dev->mtu = session->mtu; | ||
237 | dev->needed_headroom += session->hdr_len; | ||
238 | |||
239 | priv = netdev_priv(dev); | ||
240 | priv->dev = dev; | ||
241 | priv->session = session; | ||
242 | INIT_LIST_HEAD(&priv->list); | ||
243 | |||
244 | priv->tunnel_sock = tunnel->sock; | ||
245 | session->recv_skb = l2tp_eth_dev_recv; | ||
246 | session->session_close = l2tp_eth_delete; | ||
247 | #if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE) | ||
248 | session->show = l2tp_eth_show; | ||
249 | #endif | ||
250 | |||
251 | spriv = l2tp_session_priv(session); | ||
252 | spriv->dev = dev; | ||
253 | |||
254 | rc = register_netdev(dev); | ||
255 | if (rc < 0) | ||
256 | goto out_del_dev; | ||
257 | |||
258 | /* Must be done after register_netdev() */ | ||
259 | strlcpy(session->ifname, dev->name, IFNAMSIZ); | ||
260 | |||
261 | dev_hold(dev); | ||
262 | pn = l2tp_eth_pernet(dev_net(dev)); | ||
263 | spin_lock(&pn->l2tp_eth_lock); | ||
264 | list_add(&priv->list, &pn->l2tp_eth_dev_list); | ||
265 | spin_unlock(&pn->l2tp_eth_lock); | ||
266 | |||
267 | return 0; | ||
268 | |||
269 | out_del_dev: | ||
270 | free_netdev(dev); | ||
271 | out_del_session: | ||
272 | l2tp_session_delete(session); | ||
273 | out: | ||
274 | return rc; | ||
275 | } | ||
276 | |||
277 | static __net_init int l2tp_eth_init_net(struct net *net) | ||
278 | { | ||
279 | struct l2tp_eth_net *pn = net_generic(net, l2tp_eth_net_id); | ||
280 | |||
281 | INIT_LIST_HEAD(&pn->l2tp_eth_dev_list); | ||
282 | spin_lock_init(&pn->l2tp_eth_lock); | ||
283 | |||
284 | return 0; | ||
285 | } | ||
286 | |||
287 | static __net_initdata struct pernet_operations l2tp_eth_net_ops = { | ||
288 | .init = l2tp_eth_init_net, | ||
289 | .id = &l2tp_eth_net_id, | ||
290 | .size = sizeof(struct l2tp_eth_net), | ||
291 | }; | ||
292 | |||
293 | |||
294 | static const struct l2tp_nl_cmd_ops l2tp_eth_nl_cmd_ops = { | ||
295 | .session_create = l2tp_eth_create, | ||
296 | .session_delete = l2tp_session_delete, | ||
297 | }; | ||
298 | |||
299 | |||
300 | static int __init l2tp_eth_init(void) | ||
301 | { | ||
302 | int err = 0; | ||
303 | |||
304 | err = l2tp_nl_register_ops(L2TP_PWTYPE_ETH, &l2tp_eth_nl_cmd_ops); | ||
305 | if (err) | ||
306 | goto out; | ||
307 | |||
308 | err = register_pernet_device(&l2tp_eth_net_ops); | ||
309 | if (err) | ||
310 | goto out_unreg; | ||
311 | |||
312 | printk(KERN_INFO "L2TP ethernet pseudowire support (L2TPv3)\n"); | ||
313 | |||
314 | return 0; | ||
315 | |||
316 | out_unreg: | ||
317 | l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH); | ||
318 | out: | ||
319 | return err; | ||
320 | } | ||
321 | |||
322 | static void __exit l2tp_eth_exit(void) | ||
323 | { | ||
324 | unregister_pernet_device(&l2tp_eth_net_ops); | ||
325 | l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH); | ||
326 | } | ||
327 | |||
328 | module_init(l2tp_eth_init); | ||
329 | module_exit(l2tp_eth_exit); | ||
330 | |||
331 | MODULE_LICENSE("GPL"); | ||
332 | MODULE_AUTHOR("James Chapman <jchapman@katalix.com>"); | ||
333 | MODULE_DESCRIPTION("L2TP ethernet pseudowire driver"); | ||
334 | MODULE_VERSION("1.0"); | ||
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c new file mode 100644 index 000000000000..0852512d392c --- /dev/null +++ b/net/l2tp/l2tp_ip.c | |||
@@ -0,0 +1,679 @@ | |||
1 | /* | ||
2 | * L2TPv3 IP encapsulation support | ||
3 | * | ||
4 | * Copyright (c) 2008,2009,2010 Katalix Systems Ltd | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/icmp.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/skbuff.h> | ||
15 | #include <linux/random.h> | ||
16 | #include <linux/socket.h> | ||
17 | #include <linux/l2tp.h> | ||
18 | #include <linux/in.h> | ||
19 | #include <net/sock.h> | ||
20 | #include <net/ip.h> | ||
21 | #include <net/icmp.h> | ||
22 | #include <net/udp.h> | ||
23 | #include <net/inet_common.h> | ||
24 | #include <net/inet_hashtables.h> | ||
25 | #include <net/tcp_states.h> | ||
26 | #include <net/protocol.h> | ||
27 | #include <net/xfrm.h> | ||
28 | |||
29 | #include "l2tp_core.h" | ||
30 | |||
31 | struct l2tp_ip_sock { | ||
32 | /* inet_sock has to be the first member of l2tp_ip_sock */ | ||
33 | struct inet_sock inet; | ||
34 | |||
35 | __u32 conn_id; | ||
36 | __u32 peer_conn_id; | ||
37 | |||
38 | __u64 tx_packets; | ||
39 | __u64 tx_bytes; | ||
40 | __u64 tx_errors; | ||
41 | __u64 rx_packets; | ||
42 | __u64 rx_bytes; | ||
43 | __u64 rx_errors; | ||
44 | }; | ||
45 | |||
46 | static DEFINE_RWLOCK(l2tp_ip_lock); | ||
47 | static struct hlist_head l2tp_ip_table; | ||
48 | static struct hlist_head l2tp_ip_bind_table; | ||
49 | |||
50 | static inline struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk) | ||
51 | { | ||
52 | return (struct l2tp_ip_sock *)sk; | ||
53 | } | ||
54 | |||
55 | static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id) | ||
56 | { | ||
57 | struct hlist_node *node; | ||
58 | struct sock *sk; | ||
59 | |||
60 | sk_for_each_bound(sk, node, &l2tp_ip_bind_table) { | ||
61 | struct inet_sock *inet = inet_sk(sk); | ||
62 | struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk); | ||
63 | |||
64 | if (l2tp == NULL) | ||
65 | continue; | ||
66 | |||
67 | if ((l2tp->conn_id == tunnel_id) && | ||
68 | #ifdef CONFIG_NET_NS | ||
69 | (sk->sk_net == net) && | ||
70 | #endif | ||
71 | !(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) && | ||
72 | !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) | ||
73 | goto found; | ||
74 | } | ||
75 | |||
76 | sk = NULL; | ||
77 | found: | ||
78 | return sk; | ||
79 | } | ||
80 | |||
81 | static inline struct sock *l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id) | ||
82 | { | ||
83 | struct sock *sk = __l2tp_ip_bind_lookup(net, laddr, dif, tunnel_id); | ||
84 | if (sk) | ||
85 | sock_hold(sk); | ||
86 | |||
87 | return sk; | ||
88 | } | ||
89 | |||
90 | /* When processing receive frames, there are two cases to | ||
91 | * consider. Data frames consist of a non-zero session-id and an | ||
92 | * optional cookie. Control frames consist of a regular L2TP header | ||
93 | * preceded by 32-bits of zeros. | ||
94 | * | ||
95 | * L2TPv3 Session Header Over IP | ||
96 | * | ||
97 | * 0 1 2 3 | ||
98 | * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 | ||
99 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
100 | * | Session ID | | ||
101 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
102 | * | Cookie (optional, maximum 64 bits)... | ||
103 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
104 | * | | ||
105 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
106 | * | ||
107 | * L2TPv3 Control Message Header Over IP | ||
108 | * | ||
109 | * 0 1 2 3 | ||
110 | * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 | ||
111 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
112 | * | (32 bits of zeros) | | ||
113 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
114 | * |T|L|x|x|S|x|x|x|x|x|x|x| Ver | Length | | ||
115 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
116 | * | Control Connection ID | | ||
117 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
118 | * | Ns | Nr | | ||
119 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
120 | * | ||
121 | * All control frames are passed to userspace. | ||
122 | */ | ||
123 | static int l2tp_ip_recv(struct sk_buff *skb) | ||
124 | { | ||
125 | struct sock *sk; | ||
126 | u32 session_id; | ||
127 | u32 tunnel_id; | ||
128 | unsigned char *ptr, *optr; | ||
129 | struct l2tp_session *session; | ||
130 | struct l2tp_tunnel *tunnel = NULL; | ||
131 | int length; | ||
132 | int offset; | ||
133 | |||
134 | /* Point to L2TP header */ | ||
135 | optr = ptr = skb->data; | ||
136 | |||
137 | if (!pskb_may_pull(skb, 4)) | ||
138 | goto discard; | ||
139 | |||
140 | session_id = ntohl(*((__be32 *) ptr)); | ||
141 | ptr += 4; | ||
142 | |||
143 | /* RFC3931: L2TP/IP packets have the first 4 bytes containing | ||
144 | * the session_id. If it is 0, the packet is a L2TP control | ||
145 | * frame and the session_id value can be discarded. | ||
146 | */ | ||
147 | if (session_id == 0) { | ||
148 | __skb_pull(skb, 4); | ||
149 | goto pass_up; | ||
150 | } | ||
151 | |||
152 | /* Ok, this is a data packet. Lookup the session. */ | ||
153 | session = l2tp_session_find(&init_net, NULL, session_id); | ||
154 | if (session == NULL) | ||
155 | goto discard; | ||
156 | |||
157 | tunnel = session->tunnel; | ||
158 | if (tunnel == NULL) | ||
159 | goto discard; | ||
160 | |||
161 | /* Trace packet contents, if enabled */ | ||
162 | if (tunnel->debug & L2TP_MSG_DATA) { | ||
163 | length = min(32u, skb->len); | ||
164 | if (!pskb_may_pull(skb, length)) | ||
165 | goto discard; | ||
166 | |||
167 | printk(KERN_DEBUG "%s: ip recv: ", tunnel->name); | ||
168 | |||
169 | offset = 0; | ||
170 | do { | ||
171 | printk(" %02X", ptr[offset]); | ||
172 | } while (++offset < length); | ||
173 | |||
174 | printk("\n"); | ||
175 | } | ||
176 | |||
177 | l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook); | ||
178 | |||
179 | return 0; | ||
180 | |||
181 | pass_up: | ||
182 | /* Get the tunnel_id from the L2TP header */ | ||
183 | if (!pskb_may_pull(skb, 12)) | ||
184 | goto discard; | ||
185 | |||
186 | if ((skb->data[0] & 0xc0) != 0xc0) | ||
187 | goto discard; | ||
188 | |||
189 | tunnel_id = ntohl(*(__be32 *) &skb->data[4]); | ||
190 | tunnel = l2tp_tunnel_find(&init_net, tunnel_id); | ||
191 | if (tunnel != NULL) | ||
192 | sk = tunnel->sock; | ||
193 | else { | ||
194 | struct iphdr *iph = (struct iphdr *) skb_network_header(skb); | ||
195 | |||
196 | read_lock_bh(&l2tp_ip_lock); | ||
197 | sk = __l2tp_ip_bind_lookup(&init_net, iph->daddr, 0, tunnel_id); | ||
198 | read_unlock_bh(&l2tp_ip_lock); | ||
199 | } | ||
200 | |||
201 | if (sk == NULL) | ||
202 | goto discard; | ||
203 | |||
204 | sock_hold(sk); | ||
205 | |||
206 | if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) | ||
207 | goto discard_put; | ||
208 | |||
209 | nf_reset(skb); | ||
210 | |||
211 | return sk_receive_skb(sk, skb, 1); | ||
212 | |||
213 | discard_put: | ||
214 | sock_put(sk); | ||
215 | |||
216 | discard: | ||
217 | kfree_skb(skb); | ||
218 | return 0; | ||
219 | } | ||
220 | |||
221 | static int l2tp_ip_open(struct sock *sk) | ||
222 | { | ||
223 | /* Prevent autobind. We don't have ports. */ | ||
224 | inet_sk(sk)->inet_num = IPPROTO_L2TP; | ||
225 | |||
226 | write_lock_bh(&l2tp_ip_lock); | ||
227 | sk_add_node(sk, &l2tp_ip_table); | ||
228 | write_unlock_bh(&l2tp_ip_lock); | ||
229 | |||
230 | return 0; | ||
231 | } | ||
232 | |||
233 | static void l2tp_ip_close(struct sock *sk, long timeout) | ||
234 | { | ||
235 | write_lock_bh(&l2tp_ip_lock); | ||
236 | hlist_del_init(&sk->sk_bind_node); | ||
237 | hlist_del_init(&sk->sk_node); | ||
238 | write_unlock_bh(&l2tp_ip_lock); | ||
239 | sk_common_release(sk); | ||
240 | } | ||
241 | |||
242 | static void l2tp_ip_destroy_sock(struct sock *sk) | ||
243 | { | ||
244 | struct sk_buff *skb; | ||
245 | |||
246 | while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) | ||
247 | kfree_skb(skb); | ||
248 | |||
249 | sk_refcnt_debug_dec(sk); | ||
250 | } | ||
251 | |||
252 | static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) | ||
253 | { | ||
254 | struct inet_sock *inet = inet_sk(sk); | ||
255 | struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *) uaddr; | ||
256 | int ret = -EINVAL; | ||
257 | int chk_addr_ret; | ||
258 | |||
259 | ret = -EADDRINUSE; | ||
260 | read_lock_bh(&l2tp_ip_lock); | ||
261 | if (__l2tp_ip_bind_lookup(&init_net, addr->l2tp_addr.s_addr, sk->sk_bound_dev_if, addr->l2tp_conn_id)) | ||
262 | goto out_in_use; | ||
263 | |||
264 | read_unlock_bh(&l2tp_ip_lock); | ||
265 | |||
266 | lock_sock(sk); | ||
267 | if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_l2tpip)) | ||
268 | goto out; | ||
269 | |||
270 | chk_addr_ret = inet_addr_type(&init_net, addr->l2tp_addr.s_addr); | ||
271 | ret = -EADDRNOTAVAIL; | ||
272 | if (addr->l2tp_addr.s_addr && chk_addr_ret != RTN_LOCAL && | ||
273 | chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST) | ||
274 | goto out; | ||
275 | |||
276 | inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr; | ||
277 | if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST) | ||
278 | inet->inet_saddr = 0; /* Use device */ | ||
279 | sk_dst_reset(sk); | ||
280 | |||
281 | l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id; | ||
282 | |||
283 | write_lock_bh(&l2tp_ip_lock); | ||
284 | sk_add_bind_node(sk, &l2tp_ip_bind_table); | ||
285 | sk_del_node_init(sk); | ||
286 | write_unlock_bh(&l2tp_ip_lock); | ||
287 | ret = 0; | ||
288 | out: | ||
289 | release_sock(sk); | ||
290 | |||
291 | return ret; | ||
292 | |||
293 | out_in_use: | ||
294 | read_unlock_bh(&l2tp_ip_lock); | ||
295 | |||
296 | return ret; | ||
297 | } | ||
298 | |||
299 | static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | ||
300 | { | ||
301 | int rc; | ||
302 | struct inet_sock *inet = inet_sk(sk); | ||
303 | struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr; | ||
304 | struct rtable *rt; | ||
305 | __be32 saddr; | ||
306 | int oif; | ||
307 | |||
308 | rc = -EINVAL; | ||
309 | if (addr_len < sizeof(*lsa)) | ||
310 | goto out; | ||
311 | |||
312 | rc = -EAFNOSUPPORT; | ||
313 | if (lsa->l2tp_family != AF_INET) | ||
314 | goto out; | ||
315 | |||
316 | sk_dst_reset(sk); | ||
317 | |||
318 | oif = sk->sk_bound_dev_if; | ||
319 | saddr = inet->inet_saddr; | ||
320 | |||
321 | rc = -EINVAL; | ||
322 | if (ipv4_is_multicast(lsa->l2tp_addr.s_addr)) | ||
323 | goto out; | ||
324 | |||
325 | rc = ip_route_connect(&rt, lsa->l2tp_addr.s_addr, saddr, | ||
326 | RT_CONN_FLAGS(sk), oif, | ||
327 | IPPROTO_L2TP, | ||
328 | 0, 0, sk, 1); | ||
329 | if (rc) { | ||
330 | if (rc == -ENETUNREACH) | ||
331 | IP_INC_STATS_BH(&init_net, IPSTATS_MIB_OUTNOROUTES); | ||
332 | goto out; | ||
333 | } | ||
334 | |||
335 | rc = -ENETUNREACH; | ||
336 | if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { | ||
337 | ip_rt_put(rt); | ||
338 | goto out; | ||
339 | } | ||
340 | |||
341 | l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id; | ||
342 | |||
343 | if (!inet->inet_saddr) | ||
344 | inet->inet_saddr = rt->rt_src; | ||
345 | if (!inet->inet_rcv_saddr) | ||
346 | inet->inet_rcv_saddr = rt->rt_src; | ||
347 | inet->inet_daddr = rt->rt_dst; | ||
348 | sk->sk_state = TCP_ESTABLISHED; | ||
349 | inet->inet_id = jiffies; | ||
350 | |||
351 | sk_dst_set(sk, &rt->u.dst); | ||
352 | |||
353 | write_lock_bh(&l2tp_ip_lock); | ||
354 | hlist_del_init(&sk->sk_bind_node); | ||
355 | sk_add_bind_node(sk, &l2tp_ip_bind_table); | ||
356 | write_unlock_bh(&l2tp_ip_lock); | ||
357 | |||
358 | rc = 0; | ||
359 | out: | ||
360 | return rc; | ||
361 | } | ||
362 | |||
363 | static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr, | ||
364 | int *uaddr_len, int peer) | ||
365 | { | ||
366 | struct sock *sk = sock->sk; | ||
367 | struct inet_sock *inet = inet_sk(sk); | ||
368 | struct l2tp_ip_sock *lsk = l2tp_ip_sk(sk); | ||
369 | struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *)uaddr; | ||
370 | |||
371 | memset(lsa, 0, sizeof(*lsa)); | ||
372 | lsa->l2tp_family = AF_INET; | ||
373 | if (peer) { | ||
374 | if (!inet->inet_dport) | ||
375 | return -ENOTCONN; | ||
376 | lsa->l2tp_conn_id = lsk->peer_conn_id; | ||
377 | lsa->l2tp_addr.s_addr = inet->inet_daddr; | ||
378 | } else { | ||
379 | __be32 addr = inet->inet_rcv_saddr; | ||
380 | if (!addr) | ||
381 | addr = inet->inet_saddr; | ||
382 | lsa->l2tp_conn_id = lsk->conn_id; | ||
383 | lsa->l2tp_addr.s_addr = addr; | ||
384 | } | ||
385 | *uaddr_len = sizeof(*lsa); | ||
386 | return 0; | ||
387 | } | ||
388 | |||
389 | static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb) | ||
390 | { | ||
391 | int rc; | ||
392 | |||
393 | if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) | ||
394 | goto drop; | ||
395 | |||
396 | nf_reset(skb); | ||
397 | |||
398 | /* Charge it to the socket, dropping if the queue is full. */ | ||
399 | rc = sock_queue_rcv_skb(sk, skb); | ||
400 | if (rc < 0) | ||
401 | goto drop; | ||
402 | |||
403 | return 0; | ||
404 | |||
405 | drop: | ||
406 | IP_INC_STATS(&init_net, IPSTATS_MIB_INDISCARDS); | ||
407 | kfree_skb(skb); | ||
408 | return -1; | ||
409 | } | ||
410 | |||
411 | /* Userspace will call sendmsg() on the tunnel socket to send L2TP | ||
412 | * control frames. | ||
413 | */ | ||
414 | static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len) | ||
415 | { | ||
416 | struct sk_buff *skb; | ||
417 | int rc; | ||
418 | struct l2tp_ip_sock *lsa = l2tp_ip_sk(sk); | ||
419 | struct inet_sock *inet = inet_sk(sk); | ||
420 | struct ip_options *opt = inet->opt; | ||
421 | struct rtable *rt = NULL; | ||
422 | int connected = 0; | ||
423 | __be32 daddr; | ||
424 | |||
425 | if (sock_flag(sk, SOCK_DEAD)) | ||
426 | return -ENOTCONN; | ||
427 | |||
428 | /* Get and verify the address. */ | ||
429 | if (msg->msg_name) { | ||
430 | struct sockaddr_l2tpip *lip = (struct sockaddr_l2tpip *) msg->msg_name; | ||
431 | if (msg->msg_namelen < sizeof(*lip)) | ||
432 | return -EINVAL; | ||
433 | |||
434 | if (lip->l2tp_family != AF_INET) { | ||
435 | if (lip->l2tp_family != AF_UNSPEC) | ||
436 | return -EAFNOSUPPORT; | ||
437 | } | ||
438 | |||
439 | daddr = lip->l2tp_addr.s_addr; | ||
440 | } else { | ||
441 | if (sk->sk_state != TCP_ESTABLISHED) | ||
442 | return -EDESTADDRREQ; | ||
443 | |||
444 | daddr = inet->inet_daddr; | ||
445 | connected = 1; | ||
446 | } | ||
447 | |||
448 | /* Allocate a socket buffer */ | ||
449 | rc = -ENOMEM; | ||
450 | skb = sock_wmalloc(sk, 2 + NET_SKB_PAD + sizeof(struct iphdr) + | ||
451 | 4 + len, 0, GFP_KERNEL); | ||
452 | if (!skb) | ||
453 | goto error; | ||
454 | |||
455 | /* Reserve space for headers, putting IP header on 4-byte boundary. */ | ||
456 | skb_reserve(skb, 2 + NET_SKB_PAD); | ||
457 | skb_reset_network_header(skb); | ||
458 | skb_reserve(skb, sizeof(struct iphdr)); | ||
459 | skb_reset_transport_header(skb); | ||
460 | |||
461 | /* Insert 0 session_id */ | ||
462 | *((__be32 *) skb_put(skb, 4)) = 0; | ||
463 | |||
464 | /* Copy user data into skb */ | ||
465 | rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); | ||
466 | if (rc < 0) { | ||
467 | kfree_skb(skb); | ||
468 | goto error; | ||
469 | } | ||
470 | |||
471 | if (connected) | ||
472 | rt = (struct rtable *) __sk_dst_check(sk, 0); | ||
473 | |||
474 | if (rt == NULL) { | ||
475 | /* Use correct destination address if we have options. */ | ||
476 | if (opt && opt->srr) | ||
477 | daddr = opt->faddr; | ||
478 | |||
479 | { | ||
480 | struct flowi fl = { .oif = sk->sk_bound_dev_if, | ||
481 | .nl_u = { .ip4_u = { | ||
482 | .daddr = daddr, | ||
483 | .saddr = inet->inet_saddr, | ||
484 | .tos = RT_CONN_FLAGS(sk) } }, | ||
485 | .proto = sk->sk_protocol, | ||
486 | .flags = inet_sk_flowi_flags(sk), | ||
487 | .uli_u = { .ports = { | ||
488 | .sport = inet->inet_sport, | ||
489 | .dport = inet->inet_dport } } }; | ||
490 | |||
491 | /* If this fails, retransmit mechanism of transport layer will | ||
492 | * keep trying until route appears or the connection times | ||
493 | * itself out. | ||
494 | */ | ||
495 | security_sk_classify_flow(sk, &fl); | ||
496 | if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0)) | ||
497 | goto no_route; | ||
498 | } | ||
499 | sk_setup_caps(sk, &rt->u.dst); | ||
500 | } | ||
501 | skb_dst_set(skb, dst_clone(&rt->u.dst)); | ||
502 | |||
503 | /* Queue the packet to IP for output */ | ||
504 | rc = ip_queue_xmit(skb); | ||
505 | |||
506 | error: | ||
507 | /* Update stats */ | ||
508 | if (rc >= 0) { | ||
509 | lsa->tx_packets++; | ||
510 | lsa->tx_bytes += len; | ||
511 | rc = len; | ||
512 | } else { | ||
513 | lsa->tx_errors++; | ||
514 | } | ||
515 | |||
516 | return rc; | ||
517 | |||
518 | no_route: | ||
519 | IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); | ||
520 | kfree_skb(skb); | ||
521 | return -EHOSTUNREACH; | ||
522 | } | ||
523 | |||
524 | static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | ||
525 | size_t len, int noblock, int flags, int *addr_len) | ||
526 | { | ||
527 | struct inet_sock *inet = inet_sk(sk); | ||
528 | struct l2tp_ip_sock *lsk = l2tp_ip_sk(sk); | ||
529 | size_t copied = 0; | ||
530 | int err = -EOPNOTSUPP; | ||
531 | struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; | ||
532 | struct sk_buff *skb; | ||
533 | |||
534 | if (flags & MSG_OOB) | ||
535 | goto out; | ||
536 | |||
537 | if (addr_len) | ||
538 | *addr_len = sizeof(*sin); | ||
539 | |||
540 | skb = skb_recv_datagram(sk, flags, noblock, &err); | ||
541 | if (!skb) | ||
542 | goto out; | ||
543 | |||
544 | copied = skb->len; | ||
545 | if (len < copied) { | ||
546 | msg->msg_flags |= MSG_TRUNC; | ||
547 | copied = len; | ||
548 | } | ||
549 | |||
550 | err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); | ||
551 | if (err) | ||
552 | goto done; | ||
553 | |||
554 | sock_recv_timestamp(msg, sk, skb); | ||
555 | |||
556 | /* Copy the address. */ | ||
557 | if (sin) { | ||
558 | sin->sin_family = AF_INET; | ||
559 | sin->sin_addr.s_addr = ip_hdr(skb)->saddr; | ||
560 | sin->sin_port = 0; | ||
561 | memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); | ||
562 | } | ||
563 | if (inet->cmsg_flags) | ||
564 | ip_cmsg_recv(msg, skb); | ||
565 | if (flags & MSG_TRUNC) | ||
566 | copied = skb->len; | ||
567 | done: | ||
568 | skb_free_datagram(sk, skb); | ||
569 | out: | ||
570 | if (err) { | ||
571 | lsk->rx_errors++; | ||
572 | return err; | ||
573 | } | ||
574 | |||
575 | lsk->rx_packets++; | ||
576 | lsk->rx_bytes += copied; | ||
577 | |||
578 | return copied; | ||
579 | } | ||
580 | |||
581 | struct proto l2tp_ip_prot = { | ||
582 | .name = "L2TP/IP", | ||
583 | .owner = THIS_MODULE, | ||
584 | .init = l2tp_ip_open, | ||
585 | .close = l2tp_ip_close, | ||
586 | .bind = l2tp_ip_bind, | ||
587 | .connect = l2tp_ip_connect, | ||
588 | .disconnect = udp_disconnect, | ||
589 | .ioctl = udp_ioctl, | ||
590 | .destroy = l2tp_ip_destroy_sock, | ||
591 | .setsockopt = ip_setsockopt, | ||
592 | .getsockopt = ip_getsockopt, | ||
593 | .sendmsg = l2tp_ip_sendmsg, | ||
594 | .recvmsg = l2tp_ip_recvmsg, | ||
595 | .backlog_rcv = l2tp_ip_backlog_recv, | ||
596 | .hash = inet_hash, | ||
597 | .unhash = inet_unhash, | ||
598 | .obj_size = sizeof(struct l2tp_ip_sock), | ||
599 | #ifdef CONFIG_COMPAT | ||
600 | .compat_setsockopt = compat_ip_setsockopt, | ||
601 | .compat_getsockopt = compat_ip_getsockopt, | ||
602 | #endif | ||
603 | }; | ||
604 | |||
605 | static const struct proto_ops l2tp_ip_ops = { | ||
606 | .family = PF_INET, | ||
607 | .owner = THIS_MODULE, | ||
608 | .release = inet_release, | ||
609 | .bind = inet_bind, | ||
610 | .connect = inet_dgram_connect, | ||
611 | .socketpair = sock_no_socketpair, | ||
612 | .accept = sock_no_accept, | ||
613 | .getname = l2tp_ip_getname, | ||
614 | .poll = datagram_poll, | ||
615 | .ioctl = inet_ioctl, | ||
616 | .listen = sock_no_listen, | ||
617 | .shutdown = inet_shutdown, | ||
618 | .setsockopt = sock_common_setsockopt, | ||
619 | .getsockopt = sock_common_getsockopt, | ||
620 | .sendmsg = inet_sendmsg, | ||
621 | .recvmsg = sock_common_recvmsg, | ||
622 | .mmap = sock_no_mmap, | ||
623 | .sendpage = sock_no_sendpage, | ||
624 | #ifdef CONFIG_COMPAT | ||
625 | .compat_setsockopt = compat_sock_common_setsockopt, | ||
626 | .compat_getsockopt = compat_sock_common_getsockopt, | ||
627 | #endif | ||
628 | }; | ||
629 | |||
630 | static struct inet_protosw l2tp_ip_protosw = { | ||
631 | .type = SOCK_DGRAM, | ||
632 | .protocol = IPPROTO_L2TP, | ||
633 | .prot = &l2tp_ip_prot, | ||
634 | .ops = &l2tp_ip_ops, | ||
635 | .no_check = 0, | ||
636 | }; | ||
637 | |||
638 | static struct net_protocol l2tp_ip_protocol __read_mostly = { | ||
639 | .handler = l2tp_ip_recv, | ||
640 | }; | ||
641 | |||
642 | static int __init l2tp_ip_init(void) | ||
643 | { | ||
644 | int err; | ||
645 | |||
646 | printk(KERN_INFO "L2TP IP encapsulation support (L2TPv3)\n"); | ||
647 | |||
648 | err = proto_register(&l2tp_ip_prot, 1); | ||
649 | if (err != 0) | ||
650 | goto out; | ||
651 | |||
652 | err = inet_add_protocol(&l2tp_ip_protocol, IPPROTO_L2TP); | ||
653 | if (err) | ||
654 | goto out1; | ||
655 | |||
656 | inet_register_protosw(&l2tp_ip_protosw); | ||
657 | return 0; | ||
658 | |||
659 | out1: | ||
660 | proto_unregister(&l2tp_ip_prot); | ||
661 | out: | ||
662 | return err; | ||
663 | } | ||
664 | |||
665 | static void __exit l2tp_ip_exit(void) | ||
666 | { | ||
667 | inet_unregister_protosw(&l2tp_ip_protosw); | ||
668 | inet_del_protocol(&l2tp_ip_protocol, IPPROTO_L2TP); | ||
669 | proto_unregister(&l2tp_ip_prot); | ||
670 | } | ||
671 | |||
672 | module_init(l2tp_ip_init); | ||
673 | module_exit(l2tp_ip_exit); | ||
674 | |||
675 | MODULE_LICENSE("GPL"); | ||
676 | MODULE_AUTHOR("James Chapman <jchapman@katalix.com>"); | ||
677 | MODULE_DESCRIPTION("L2TP over IP"); | ||
678 | MODULE_VERSION("1.0"); | ||
679 | MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, SOCK_DGRAM, IPPROTO_L2TP); | ||
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c new file mode 100644 index 000000000000..4c1e540732d7 --- /dev/null +++ b/net/l2tp/l2tp_netlink.c | |||
@@ -0,0 +1,840 @@ | |||
1 | /* | ||
2 | * L2TP netlink layer, for management | ||
3 | * | ||
4 | * Copyright (c) 2008,2009,2010 Katalix Systems Ltd | ||
5 | * | ||
6 | * Partly based on the IrDA nelink implementation | ||
7 | * (see net/irda/irnetlink.c) which is: | ||
8 | * Copyright (c) 2007 Samuel Ortiz <samuel@sortiz.org> | ||
9 | * which is in turn partly based on the wireless netlink code: | ||
10 | * Copyright 2006 Johannes Berg <johannes@sipsolutions.net> | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of the GNU General Public License version 2 as | ||
14 | * published by the Free Software Foundation. | ||
15 | */ | ||
16 | |||
17 | #include <net/sock.h> | ||
18 | #include <net/genetlink.h> | ||
19 | #include <net/udp.h> | ||
20 | #include <linux/in.h> | ||
21 | #include <linux/udp.h> | ||
22 | #include <linux/socket.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/list.h> | ||
25 | #include <net/net_namespace.h> | ||
26 | |||
27 | #include <linux/l2tp.h> | ||
28 | |||
29 | #include "l2tp_core.h" | ||
30 | |||
31 | |||
32 | static struct genl_family l2tp_nl_family = { | ||
33 | .id = GENL_ID_GENERATE, | ||
34 | .name = L2TP_GENL_NAME, | ||
35 | .version = L2TP_GENL_VERSION, | ||
36 | .hdrsize = 0, | ||
37 | .maxattr = L2TP_ATTR_MAX, | ||
38 | }; | ||
39 | |||
40 | /* Accessed under genl lock */ | ||
41 | static const struct l2tp_nl_cmd_ops *l2tp_nl_cmd_ops[__L2TP_PWTYPE_MAX]; | ||
42 | |||
43 | static struct l2tp_session *l2tp_nl_session_find(struct genl_info *info) | ||
44 | { | ||
45 | u32 tunnel_id; | ||
46 | u32 session_id; | ||
47 | char *ifname; | ||
48 | struct l2tp_tunnel *tunnel; | ||
49 | struct l2tp_session *session = NULL; | ||
50 | struct net *net = genl_info_net(info); | ||
51 | |||
52 | if (info->attrs[L2TP_ATTR_IFNAME]) { | ||
53 | ifname = nla_data(info->attrs[L2TP_ATTR_IFNAME]); | ||
54 | session = l2tp_session_find_by_ifname(net, ifname); | ||
55 | } else if ((info->attrs[L2TP_ATTR_SESSION_ID]) && | ||
56 | (info->attrs[L2TP_ATTR_CONN_ID])) { | ||
57 | tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); | ||
58 | session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]); | ||
59 | tunnel = l2tp_tunnel_find(net, tunnel_id); | ||
60 | if (tunnel) | ||
61 | session = l2tp_session_find(net, tunnel, session_id); | ||
62 | } | ||
63 | |||
64 | return session; | ||
65 | } | ||
66 | |||
67 | static int l2tp_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info) | ||
68 | { | ||
69 | struct sk_buff *msg; | ||
70 | void *hdr; | ||
71 | int ret = -ENOBUFS; | ||
72 | |||
73 | msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); | ||
74 | if (!msg) { | ||
75 | ret = -ENOMEM; | ||
76 | goto out; | ||
77 | } | ||
78 | |||
79 | hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq, | ||
80 | &l2tp_nl_family, 0, L2TP_CMD_NOOP); | ||
81 | if (IS_ERR(hdr)) { | ||
82 | ret = PTR_ERR(hdr); | ||
83 | goto err_out; | ||
84 | } | ||
85 | |||
86 | genlmsg_end(msg, hdr); | ||
87 | |||
88 | return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid); | ||
89 | |||
90 | err_out: | ||
91 | nlmsg_free(msg); | ||
92 | |||
93 | out: | ||
94 | return ret; | ||
95 | } | ||
96 | |||
97 | static int l2tp_nl_cmd_tunnel_create(struct sk_buff *skb, struct genl_info *info) | ||
98 | { | ||
99 | u32 tunnel_id; | ||
100 | u32 peer_tunnel_id; | ||
101 | int proto_version; | ||
102 | int fd; | ||
103 | int ret = 0; | ||
104 | struct l2tp_tunnel_cfg cfg = { 0, }; | ||
105 | struct l2tp_tunnel *tunnel; | ||
106 | struct net *net = genl_info_net(info); | ||
107 | |||
108 | if (!info->attrs[L2TP_ATTR_CONN_ID]) { | ||
109 | ret = -EINVAL; | ||
110 | goto out; | ||
111 | } | ||
112 | tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); | ||
113 | |||
114 | if (!info->attrs[L2TP_ATTR_PEER_CONN_ID]) { | ||
115 | ret = -EINVAL; | ||
116 | goto out; | ||
117 | } | ||
118 | peer_tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_PEER_CONN_ID]); | ||
119 | |||
120 | if (!info->attrs[L2TP_ATTR_PROTO_VERSION]) { | ||
121 | ret = -EINVAL; | ||
122 | goto out; | ||
123 | } | ||
124 | proto_version = nla_get_u8(info->attrs[L2TP_ATTR_PROTO_VERSION]); | ||
125 | |||
126 | if (!info->attrs[L2TP_ATTR_ENCAP_TYPE]) { | ||
127 | ret = -EINVAL; | ||
128 | goto out; | ||
129 | } | ||
130 | cfg.encap = nla_get_u16(info->attrs[L2TP_ATTR_ENCAP_TYPE]); | ||
131 | |||
132 | fd = -1; | ||
133 | if (info->attrs[L2TP_ATTR_FD]) { | ||
134 | fd = nla_get_u32(info->attrs[L2TP_ATTR_FD]); | ||
135 | } else { | ||
136 | if (info->attrs[L2TP_ATTR_IP_SADDR]) | ||
137 | cfg.local_ip.s_addr = nla_get_be32(info->attrs[L2TP_ATTR_IP_SADDR]); | ||
138 | if (info->attrs[L2TP_ATTR_IP_DADDR]) | ||
139 | cfg.peer_ip.s_addr = nla_get_be32(info->attrs[L2TP_ATTR_IP_DADDR]); | ||
140 | if (info->attrs[L2TP_ATTR_UDP_SPORT]) | ||
141 | cfg.local_udp_port = nla_get_u16(info->attrs[L2TP_ATTR_UDP_SPORT]); | ||
142 | if (info->attrs[L2TP_ATTR_UDP_DPORT]) | ||
143 | cfg.peer_udp_port = nla_get_u16(info->attrs[L2TP_ATTR_UDP_DPORT]); | ||
144 | if (info->attrs[L2TP_ATTR_UDP_CSUM]) | ||
145 | cfg.use_udp_checksums = nla_get_flag(info->attrs[L2TP_ATTR_UDP_CSUM]); | ||
146 | } | ||
147 | |||
148 | if (info->attrs[L2TP_ATTR_DEBUG]) | ||
149 | cfg.debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]); | ||
150 | |||
151 | tunnel = l2tp_tunnel_find(net, tunnel_id); | ||
152 | if (tunnel != NULL) { | ||
153 | ret = -EEXIST; | ||
154 | goto out; | ||
155 | } | ||
156 | |||
157 | ret = -EINVAL; | ||
158 | switch (cfg.encap) { | ||
159 | case L2TP_ENCAPTYPE_UDP: | ||
160 | case L2TP_ENCAPTYPE_IP: | ||
161 | ret = l2tp_tunnel_create(net, fd, proto_version, tunnel_id, | ||
162 | peer_tunnel_id, &cfg, &tunnel); | ||
163 | break; | ||
164 | } | ||
165 | |||
166 | out: | ||
167 | return ret; | ||
168 | } | ||
169 | |||
170 | static int l2tp_nl_cmd_tunnel_delete(struct sk_buff *skb, struct genl_info *info) | ||
171 | { | ||
172 | struct l2tp_tunnel *tunnel; | ||
173 | u32 tunnel_id; | ||
174 | int ret = 0; | ||
175 | struct net *net = genl_info_net(info); | ||
176 | |||
177 | if (!info->attrs[L2TP_ATTR_CONN_ID]) { | ||
178 | ret = -EINVAL; | ||
179 | goto out; | ||
180 | } | ||
181 | tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); | ||
182 | |||
183 | tunnel = l2tp_tunnel_find(net, tunnel_id); | ||
184 | if (tunnel == NULL) { | ||
185 | ret = -ENODEV; | ||
186 | goto out; | ||
187 | } | ||
188 | |||
189 | (void) l2tp_tunnel_delete(tunnel); | ||
190 | |||
191 | out: | ||
192 | return ret; | ||
193 | } | ||
194 | |||
195 | static int l2tp_nl_cmd_tunnel_modify(struct sk_buff *skb, struct genl_info *info) | ||
196 | { | ||
197 | struct l2tp_tunnel *tunnel; | ||
198 | u32 tunnel_id; | ||
199 | int ret = 0; | ||
200 | struct net *net = genl_info_net(info); | ||
201 | |||
202 | if (!info->attrs[L2TP_ATTR_CONN_ID]) { | ||
203 | ret = -EINVAL; | ||
204 | goto out; | ||
205 | } | ||
206 | tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); | ||
207 | |||
208 | tunnel = l2tp_tunnel_find(net, tunnel_id); | ||
209 | if (tunnel == NULL) { | ||
210 | ret = -ENODEV; | ||
211 | goto out; | ||
212 | } | ||
213 | |||
214 | if (info->attrs[L2TP_ATTR_DEBUG]) | ||
215 | tunnel->debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]); | ||
216 | |||
217 | out: | ||
218 | return ret; | ||
219 | } | ||
220 | |||
221 | static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 pid, u32 seq, int flags, | ||
222 | struct l2tp_tunnel *tunnel) | ||
223 | { | ||
224 | void *hdr; | ||
225 | struct nlattr *nest; | ||
226 | struct sock *sk = NULL; | ||
227 | struct inet_sock *inet; | ||
228 | |||
229 | hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags, | ||
230 | L2TP_CMD_TUNNEL_GET); | ||
231 | if (IS_ERR(hdr)) | ||
232 | return PTR_ERR(hdr); | ||
233 | |||
234 | NLA_PUT_U8(skb, L2TP_ATTR_PROTO_VERSION, tunnel->version); | ||
235 | NLA_PUT_U32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id); | ||
236 | NLA_PUT_U32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id); | ||
237 | NLA_PUT_U32(skb, L2TP_ATTR_DEBUG, tunnel->debug); | ||
238 | NLA_PUT_U16(skb, L2TP_ATTR_ENCAP_TYPE, tunnel->encap); | ||
239 | |||
240 | nest = nla_nest_start(skb, L2TP_ATTR_STATS); | ||
241 | if (nest == NULL) | ||
242 | goto nla_put_failure; | ||
243 | |||
244 | NLA_PUT_U64(skb, L2TP_ATTR_TX_PACKETS, tunnel->stats.tx_packets); | ||
245 | NLA_PUT_U64(skb, L2TP_ATTR_TX_BYTES, tunnel->stats.tx_bytes); | ||
246 | NLA_PUT_U64(skb, L2TP_ATTR_TX_ERRORS, tunnel->stats.tx_errors); | ||
247 | NLA_PUT_U64(skb, L2TP_ATTR_RX_PACKETS, tunnel->stats.rx_packets); | ||
248 | NLA_PUT_U64(skb, L2TP_ATTR_RX_BYTES, tunnel->stats.rx_bytes); | ||
249 | NLA_PUT_U64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, tunnel->stats.rx_seq_discards); | ||
250 | NLA_PUT_U64(skb, L2TP_ATTR_RX_OOS_PACKETS, tunnel->stats.rx_oos_packets); | ||
251 | NLA_PUT_U64(skb, L2TP_ATTR_RX_ERRORS, tunnel->stats.rx_errors); | ||
252 | nla_nest_end(skb, nest); | ||
253 | |||
254 | sk = tunnel->sock; | ||
255 | if (!sk) | ||
256 | goto out; | ||
257 | |||
258 | inet = inet_sk(sk); | ||
259 | |||
260 | switch (tunnel->encap) { | ||
261 | case L2TP_ENCAPTYPE_UDP: | ||
262 | NLA_PUT_U16(skb, L2TP_ATTR_UDP_SPORT, ntohs(inet->inet_sport)); | ||
263 | NLA_PUT_U16(skb, L2TP_ATTR_UDP_DPORT, ntohs(inet->inet_dport)); | ||
264 | NLA_PUT_U8(skb, L2TP_ATTR_UDP_CSUM, (sk->sk_no_check != UDP_CSUM_NOXMIT)); | ||
265 | /* NOBREAK */ | ||
266 | case L2TP_ENCAPTYPE_IP: | ||
267 | NLA_PUT_BE32(skb, L2TP_ATTR_IP_SADDR, inet->inet_saddr); | ||
268 | NLA_PUT_BE32(skb, L2TP_ATTR_IP_DADDR, inet->inet_daddr); | ||
269 | break; | ||
270 | } | ||
271 | |||
272 | out: | ||
273 | return genlmsg_end(skb, hdr); | ||
274 | |||
275 | nla_put_failure: | ||
276 | genlmsg_cancel(skb, hdr); | ||
277 | return -1; | ||
278 | } | ||
279 | |||
280 | static int l2tp_nl_cmd_tunnel_get(struct sk_buff *skb, struct genl_info *info) | ||
281 | { | ||
282 | struct l2tp_tunnel *tunnel; | ||
283 | struct sk_buff *msg; | ||
284 | u32 tunnel_id; | ||
285 | int ret = -ENOBUFS; | ||
286 | struct net *net = genl_info_net(info); | ||
287 | |||
288 | if (!info->attrs[L2TP_ATTR_CONN_ID]) { | ||
289 | ret = -EINVAL; | ||
290 | goto out; | ||
291 | } | ||
292 | |||
293 | tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); | ||
294 | |||
295 | tunnel = l2tp_tunnel_find(net, tunnel_id); | ||
296 | if (tunnel == NULL) { | ||
297 | ret = -ENODEV; | ||
298 | goto out; | ||
299 | } | ||
300 | |||
301 | msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); | ||
302 | if (!msg) { | ||
303 | ret = -ENOMEM; | ||
304 | goto out; | ||
305 | } | ||
306 | |||
307 | ret = l2tp_nl_tunnel_send(msg, info->snd_pid, info->snd_seq, | ||
308 | NLM_F_ACK, tunnel); | ||
309 | if (ret < 0) | ||
310 | goto err_out; | ||
311 | |||
312 | return genlmsg_unicast(net, msg, info->snd_pid); | ||
313 | |||
314 | err_out: | ||
315 | nlmsg_free(msg); | ||
316 | |||
317 | out: | ||
318 | return ret; | ||
319 | } | ||
320 | |||
321 | static int l2tp_nl_cmd_tunnel_dump(struct sk_buff *skb, struct netlink_callback *cb) | ||
322 | { | ||
323 | int ti = cb->args[0]; | ||
324 | struct l2tp_tunnel *tunnel; | ||
325 | struct net *net = sock_net(skb->sk); | ||
326 | |||
327 | for (;;) { | ||
328 | tunnel = l2tp_tunnel_find_nth(net, ti); | ||
329 | if (tunnel == NULL) | ||
330 | goto out; | ||
331 | |||
332 | if (l2tp_nl_tunnel_send(skb, NETLINK_CB(cb->skb).pid, | ||
333 | cb->nlh->nlmsg_seq, NLM_F_MULTI, | ||
334 | tunnel) <= 0) | ||
335 | goto out; | ||
336 | |||
337 | ti++; | ||
338 | } | ||
339 | |||
340 | out: | ||
341 | cb->args[0] = ti; | ||
342 | |||
343 | return skb->len; | ||
344 | } | ||
345 | |||
346 | static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *info) | ||
347 | { | ||
348 | u32 tunnel_id = 0; | ||
349 | u32 session_id; | ||
350 | u32 peer_session_id; | ||
351 | int ret = 0; | ||
352 | struct l2tp_tunnel *tunnel; | ||
353 | struct l2tp_session *session; | ||
354 | struct l2tp_session_cfg cfg = { 0, }; | ||
355 | struct net *net = genl_info_net(info); | ||
356 | |||
357 | if (!info->attrs[L2TP_ATTR_CONN_ID]) { | ||
358 | ret = -EINVAL; | ||
359 | goto out; | ||
360 | } | ||
361 | tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); | ||
362 | tunnel = l2tp_tunnel_find(net, tunnel_id); | ||
363 | if (!tunnel) { | ||
364 | ret = -ENODEV; | ||
365 | goto out; | ||
366 | } | ||
367 | |||
368 | if (!info->attrs[L2TP_ATTR_SESSION_ID]) { | ||
369 | ret = -EINVAL; | ||
370 | goto out; | ||
371 | } | ||
372 | session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]); | ||
373 | session = l2tp_session_find(net, tunnel, session_id); | ||
374 | if (session) { | ||
375 | ret = -EEXIST; | ||
376 | goto out; | ||
377 | } | ||
378 | |||
379 | if (!info->attrs[L2TP_ATTR_PEER_SESSION_ID]) { | ||
380 | ret = -EINVAL; | ||
381 | goto out; | ||
382 | } | ||
383 | peer_session_id = nla_get_u32(info->attrs[L2TP_ATTR_PEER_SESSION_ID]); | ||
384 | |||
385 | if (!info->attrs[L2TP_ATTR_PW_TYPE]) { | ||
386 | ret = -EINVAL; | ||
387 | goto out; | ||
388 | } | ||
389 | cfg.pw_type = nla_get_u16(info->attrs[L2TP_ATTR_PW_TYPE]); | ||
390 | if (cfg.pw_type >= __L2TP_PWTYPE_MAX) { | ||
391 | ret = -EINVAL; | ||
392 | goto out; | ||
393 | } | ||
394 | |||
395 | if (tunnel->version > 2) { | ||
396 | if (info->attrs[L2TP_ATTR_OFFSET]) | ||
397 | cfg.offset = nla_get_u16(info->attrs[L2TP_ATTR_OFFSET]); | ||
398 | |||
399 | if (info->attrs[L2TP_ATTR_DATA_SEQ]) | ||
400 | cfg.data_seq = nla_get_u8(info->attrs[L2TP_ATTR_DATA_SEQ]); | ||
401 | |||
402 | cfg.l2specific_type = L2TP_L2SPECTYPE_DEFAULT; | ||
403 | if (info->attrs[L2TP_ATTR_L2SPEC_TYPE]) | ||
404 | cfg.l2specific_type = nla_get_u8(info->attrs[L2TP_ATTR_L2SPEC_TYPE]); | ||
405 | |||
406 | cfg.l2specific_len = 4; | ||
407 | if (info->attrs[L2TP_ATTR_L2SPEC_LEN]) | ||
408 | cfg.l2specific_len = nla_get_u8(info->attrs[L2TP_ATTR_L2SPEC_LEN]); | ||
409 | |||
410 | if (info->attrs[L2TP_ATTR_COOKIE]) { | ||
411 | u16 len = nla_len(info->attrs[L2TP_ATTR_COOKIE]); | ||
412 | if (len > 8) { | ||
413 | ret = -EINVAL; | ||
414 | goto out; | ||
415 | } | ||
416 | cfg.cookie_len = len; | ||
417 | memcpy(&cfg.cookie[0], nla_data(info->attrs[L2TP_ATTR_COOKIE]), len); | ||
418 | } | ||
419 | if (info->attrs[L2TP_ATTR_PEER_COOKIE]) { | ||
420 | u16 len = nla_len(info->attrs[L2TP_ATTR_PEER_COOKIE]); | ||
421 | if (len > 8) { | ||
422 | ret = -EINVAL; | ||
423 | goto out; | ||
424 | } | ||
425 | cfg.peer_cookie_len = len; | ||
426 | memcpy(&cfg.peer_cookie[0], nla_data(info->attrs[L2TP_ATTR_PEER_COOKIE]), len); | ||
427 | } | ||
428 | if (info->attrs[L2TP_ATTR_IFNAME]) | ||
429 | cfg.ifname = nla_data(info->attrs[L2TP_ATTR_IFNAME]); | ||
430 | |||
431 | if (info->attrs[L2TP_ATTR_VLAN_ID]) | ||
432 | cfg.vlan_id = nla_get_u16(info->attrs[L2TP_ATTR_VLAN_ID]); | ||
433 | } | ||
434 | |||
435 | if (info->attrs[L2TP_ATTR_DEBUG]) | ||
436 | cfg.debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]); | ||
437 | |||
438 | if (info->attrs[L2TP_ATTR_RECV_SEQ]) | ||
439 | cfg.recv_seq = nla_get_u8(info->attrs[L2TP_ATTR_RECV_SEQ]); | ||
440 | |||
441 | if (info->attrs[L2TP_ATTR_SEND_SEQ]) | ||
442 | cfg.send_seq = nla_get_u8(info->attrs[L2TP_ATTR_SEND_SEQ]); | ||
443 | |||
444 | if (info->attrs[L2TP_ATTR_LNS_MODE]) | ||
445 | cfg.lns_mode = nla_get_u8(info->attrs[L2TP_ATTR_LNS_MODE]); | ||
446 | |||
447 | if (info->attrs[L2TP_ATTR_RECV_TIMEOUT]) | ||
448 | cfg.reorder_timeout = nla_get_msecs(info->attrs[L2TP_ATTR_RECV_TIMEOUT]); | ||
449 | |||
450 | if (info->attrs[L2TP_ATTR_MTU]) | ||
451 | cfg.mtu = nla_get_u16(info->attrs[L2TP_ATTR_MTU]); | ||
452 | |||
453 | if (info->attrs[L2TP_ATTR_MRU]) | ||
454 | cfg.mru = nla_get_u16(info->attrs[L2TP_ATTR_MRU]); | ||
455 | |||
456 | if ((l2tp_nl_cmd_ops[cfg.pw_type] == NULL) || | ||
457 | (l2tp_nl_cmd_ops[cfg.pw_type]->session_create == NULL)) { | ||
458 | ret = -EPROTONOSUPPORT; | ||
459 | goto out; | ||
460 | } | ||
461 | |||
462 | /* Check that pseudowire-specific params are present */ | ||
463 | switch (cfg.pw_type) { | ||
464 | case L2TP_PWTYPE_NONE: | ||
465 | break; | ||
466 | case L2TP_PWTYPE_ETH_VLAN: | ||
467 | if (!info->attrs[L2TP_ATTR_VLAN_ID]) { | ||
468 | ret = -EINVAL; | ||
469 | goto out; | ||
470 | } | ||
471 | break; | ||
472 | case L2TP_PWTYPE_ETH: | ||
473 | break; | ||
474 | case L2TP_PWTYPE_PPP: | ||
475 | case L2TP_PWTYPE_PPP_AC: | ||
476 | break; | ||
477 | case L2TP_PWTYPE_IP: | ||
478 | default: | ||
479 | ret = -EPROTONOSUPPORT; | ||
480 | break; | ||
481 | } | ||
482 | |||
483 | ret = -EPROTONOSUPPORT; | ||
484 | if (l2tp_nl_cmd_ops[cfg.pw_type]->session_create) | ||
485 | ret = (*l2tp_nl_cmd_ops[cfg.pw_type]->session_create)(net, tunnel_id, | ||
486 | session_id, peer_session_id, &cfg); | ||
487 | |||
488 | out: | ||
489 | return ret; | ||
490 | } | ||
491 | |||
492 | static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *info) | ||
493 | { | ||
494 | int ret = 0; | ||
495 | struct l2tp_session *session; | ||
496 | u16 pw_type; | ||
497 | |||
498 | session = l2tp_nl_session_find(info); | ||
499 | if (session == NULL) { | ||
500 | ret = -ENODEV; | ||
501 | goto out; | ||
502 | } | ||
503 | |||
504 | pw_type = session->pwtype; | ||
505 | if (pw_type < __L2TP_PWTYPE_MAX) | ||
506 | if (l2tp_nl_cmd_ops[pw_type] && l2tp_nl_cmd_ops[pw_type]->session_delete) | ||
507 | ret = (*l2tp_nl_cmd_ops[pw_type]->session_delete)(session); | ||
508 | |||
509 | out: | ||
510 | return ret; | ||
511 | } | ||
512 | |||
513 | static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *info) | ||
514 | { | ||
515 | int ret = 0; | ||
516 | struct l2tp_session *session; | ||
517 | |||
518 | session = l2tp_nl_session_find(info); | ||
519 | if (session == NULL) { | ||
520 | ret = -ENODEV; | ||
521 | goto out; | ||
522 | } | ||
523 | |||
524 | if (info->attrs[L2TP_ATTR_DEBUG]) | ||
525 | session->debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]); | ||
526 | |||
527 | if (info->attrs[L2TP_ATTR_DATA_SEQ]) | ||
528 | session->data_seq = nla_get_u8(info->attrs[L2TP_ATTR_DATA_SEQ]); | ||
529 | |||
530 | if (info->attrs[L2TP_ATTR_RECV_SEQ]) | ||
531 | session->recv_seq = nla_get_u8(info->attrs[L2TP_ATTR_RECV_SEQ]); | ||
532 | |||
533 | if (info->attrs[L2TP_ATTR_SEND_SEQ]) | ||
534 | session->send_seq = nla_get_u8(info->attrs[L2TP_ATTR_SEND_SEQ]); | ||
535 | |||
536 | if (info->attrs[L2TP_ATTR_LNS_MODE]) | ||
537 | session->lns_mode = nla_get_u8(info->attrs[L2TP_ATTR_LNS_MODE]); | ||
538 | |||
539 | if (info->attrs[L2TP_ATTR_RECV_TIMEOUT]) | ||
540 | session->reorder_timeout = nla_get_msecs(info->attrs[L2TP_ATTR_RECV_TIMEOUT]); | ||
541 | |||
542 | if (info->attrs[L2TP_ATTR_MTU]) | ||
543 | session->mtu = nla_get_u16(info->attrs[L2TP_ATTR_MTU]); | ||
544 | |||
545 | if (info->attrs[L2TP_ATTR_MRU]) | ||
546 | session->mru = nla_get_u16(info->attrs[L2TP_ATTR_MRU]); | ||
547 | |||
548 | out: | ||
549 | return ret; | ||
550 | } | ||
551 | |||
552 | static int l2tp_nl_session_send(struct sk_buff *skb, u32 pid, u32 seq, int flags, | ||
553 | struct l2tp_session *session) | ||
554 | { | ||
555 | void *hdr; | ||
556 | struct nlattr *nest; | ||
557 | struct l2tp_tunnel *tunnel = session->tunnel; | ||
558 | struct sock *sk = NULL; | ||
559 | |||
560 | sk = tunnel->sock; | ||
561 | |||
562 | hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags, L2TP_CMD_SESSION_GET); | ||
563 | if (IS_ERR(hdr)) | ||
564 | return PTR_ERR(hdr); | ||
565 | |||
566 | NLA_PUT_U32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id); | ||
567 | NLA_PUT_U32(skb, L2TP_ATTR_SESSION_ID, session->session_id); | ||
568 | NLA_PUT_U32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id); | ||
569 | NLA_PUT_U32(skb, L2TP_ATTR_PEER_SESSION_ID, session->peer_session_id); | ||
570 | NLA_PUT_U32(skb, L2TP_ATTR_DEBUG, session->debug); | ||
571 | NLA_PUT_U16(skb, L2TP_ATTR_PW_TYPE, session->pwtype); | ||
572 | NLA_PUT_U16(skb, L2TP_ATTR_MTU, session->mtu); | ||
573 | if (session->mru) | ||
574 | NLA_PUT_U16(skb, L2TP_ATTR_MRU, session->mru); | ||
575 | |||
576 | if (session->ifname && session->ifname[0]) | ||
577 | NLA_PUT_STRING(skb, L2TP_ATTR_IFNAME, session->ifname); | ||
578 | if (session->cookie_len) | ||
579 | NLA_PUT(skb, L2TP_ATTR_COOKIE, session->cookie_len, &session->cookie[0]); | ||
580 | if (session->peer_cookie_len) | ||
581 | NLA_PUT(skb, L2TP_ATTR_PEER_COOKIE, session->peer_cookie_len, &session->peer_cookie[0]); | ||
582 | NLA_PUT_U8(skb, L2TP_ATTR_RECV_SEQ, session->recv_seq); | ||
583 | NLA_PUT_U8(skb, L2TP_ATTR_SEND_SEQ, session->send_seq); | ||
584 | NLA_PUT_U8(skb, L2TP_ATTR_LNS_MODE, session->lns_mode); | ||
585 | #ifdef CONFIG_XFRM | ||
586 | if ((sk) && (sk->sk_policy[0] || sk->sk_policy[1])) | ||
587 | NLA_PUT_U8(skb, L2TP_ATTR_USING_IPSEC, 1); | ||
588 | #endif | ||
589 | if (session->reorder_timeout) | ||
590 | NLA_PUT_MSECS(skb, L2TP_ATTR_RECV_TIMEOUT, session->reorder_timeout); | ||
591 | |||
592 | nest = nla_nest_start(skb, L2TP_ATTR_STATS); | ||
593 | if (nest == NULL) | ||
594 | goto nla_put_failure; | ||
595 | NLA_PUT_U64(skb, L2TP_ATTR_TX_PACKETS, session->stats.tx_packets); | ||
596 | NLA_PUT_U64(skb, L2TP_ATTR_TX_BYTES, session->stats.tx_bytes); | ||
597 | NLA_PUT_U64(skb, L2TP_ATTR_TX_ERRORS, session->stats.tx_errors); | ||
598 | NLA_PUT_U64(skb, L2TP_ATTR_RX_PACKETS, session->stats.rx_packets); | ||
599 | NLA_PUT_U64(skb, L2TP_ATTR_RX_BYTES, session->stats.rx_bytes); | ||
600 | NLA_PUT_U64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, session->stats.rx_seq_discards); | ||
601 | NLA_PUT_U64(skb, L2TP_ATTR_RX_OOS_PACKETS, session->stats.rx_oos_packets); | ||
602 | NLA_PUT_U64(skb, L2TP_ATTR_RX_ERRORS, session->stats.rx_errors); | ||
603 | nla_nest_end(skb, nest); | ||
604 | |||
605 | return genlmsg_end(skb, hdr); | ||
606 | |||
607 | nla_put_failure: | ||
608 | genlmsg_cancel(skb, hdr); | ||
609 | return -1; | ||
610 | } | ||
611 | |||
612 | static int l2tp_nl_cmd_session_get(struct sk_buff *skb, struct genl_info *info) | ||
613 | { | ||
614 | struct l2tp_session *session; | ||
615 | struct sk_buff *msg; | ||
616 | int ret; | ||
617 | |||
618 | session = l2tp_nl_session_find(info); | ||
619 | if (session == NULL) { | ||
620 | ret = -ENODEV; | ||
621 | goto out; | ||
622 | } | ||
623 | |||
624 | msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); | ||
625 | if (!msg) { | ||
626 | ret = -ENOMEM; | ||
627 | goto out; | ||
628 | } | ||
629 | |||
630 | ret = l2tp_nl_session_send(msg, info->snd_pid, info->snd_seq, | ||
631 | 0, session); | ||
632 | if (ret < 0) | ||
633 | goto err_out; | ||
634 | |||
635 | return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid); | ||
636 | |||
637 | err_out: | ||
638 | nlmsg_free(msg); | ||
639 | |||
640 | out: | ||
641 | return ret; | ||
642 | } | ||
643 | |||
644 | static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback *cb) | ||
645 | { | ||
646 | struct net *net = sock_net(skb->sk); | ||
647 | struct l2tp_session *session; | ||
648 | struct l2tp_tunnel *tunnel = NULL; | ||
649 | int ti = cb->args[0]; | ||
650 | int si = cb->args[1]; | ||
651 | |||
652 | for (;;) { | ||
653 | if (tunnel == NULL) { | ||
654 | tunnel = l2tp_tunnel_find_nth(net, ti); | ||
655 | if (tunnel == NULL) | ||
656 | goto out; | ||
657 | } | ||
658 | |||
659 | session = l2tp_session_find_nth(tunnel, si); | ||
660 | if (session == NULL) { | ||
661 | ti++; | ||
662 | tunnel = NULL; | ||
663 | si = 0; | ||
664 | continue; | ||
665 | } | ||
666 | |||
667 | if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).pid, | ||
668 | cb->nlh->nlmsg_seq, NLM_F_MULTI, | ||
669 | session) <= 0) | ||
670 | break; | ||
671 | |||
672 | si++; | ||
673 | } | ||
674 | |||
675 | out: | ||
676 | cb->args[0] = ti; | ||
677 | cb->args[1] = si; | ||
678 | |||
679 | return skb->len; | ||
680 | } | ||
681 | |||
682 | static struct nla_policy l2tp_nl_policy[L2TP_ATTR_MAX + 1] = { | ||
683 | [L2TP_ATTR_NONE] = { .type = NLA_UNSPEC, }, | ||
684 | [L2TP_ATTR_PW_TYPE] = { .type = NLA_U16, }, | ||
685 | [L2TP_ATTR_ENCAP_TYPE] = { .type = NLA_U16, }, | ||
686 | [L2TP_ATTR_OFFSET] = { .type = NLA_U16, }, | ||
687 | [L2TP_ATTR_DATA_SEQ] = { .type = NLA_U8, }, | ||
688 | [L2TP_ATTR_L2SPEC_TYPE] = { .type = NLA_U8, }, | ||
689 | [L2TP_ATTR_L2SPEC_LEN] = { .type = NLA_U8, }, | ||
690 | [L2TP_ATTR_PROTO_VERSION] = { .type = NLA_U8, }, | ||
691 | [L2TP_ATTR_CONN_ID] = { .type = NLA_U32, }, | ||
692 | [L2TP_ATTR_PEER_CONN_ID] = { .type = NLA_U32, }, | ||
693 | [L2TP_ATTR_SESSION_ID] = { .type = NLA_U32, }, | ||
694 | [L2TP_ATTR_PEER_SESSION_ID] = { .type = NLA_U32, }, | ||
695 | [L2TP_ATTR_UDP_CSUM] = { .type = NLA_U8, }, | ||
696 | [L2TP_ATTR_VLAN_ID] = { .type = NLA_U16, }, | ||
697 | [L2TP_ATTR_DEBUG] = { .type = NLA_U32, }, | ||
698 | [L2TP_ATTR_RECV_SEQ] = { .type = NLA_U8, }, | ||
699 | [L2TP_ATTR_SEND_SEQ] = { .type = NLA_U8, }, | ||
700 | [L2TP_ATTR_LNS_MODE] = { .type = NLA_U8, }, | ||
701 | [L2TP_ATTR_USING_IPSEC] = { .type = NLA_U8, }, | ||
702 | [L2TP_ATTR_RECV_TIMEOUT] = { .type = NLA_MSECS, }, | ||
703 | [L2TP_ATTR_FD] = { .type = NLA_U32, }, | ||
704 | [L2TP_ATTR_IP_SADDR] = { .type = NLA_U32, }, | ||
705 | [L2TP_ATTR_IP_DADDR] = { .type = NLA_U32, }, | ||
706 | [L2TP_ATTR_UDP_SPORT] = { .type = NLA_U16, }, | ||
707 | [L2TP_ATTR_UDP_DPORT] = { .type = NLA_U16, }, | ||
708 | [L2TP_ATTR_MTU] = { .type = NLA_U16, }, | ||
709 | [L2TP_ATTR_MRU] = { .type = NLA_U16, }, | ||
710 | [L2TP_ATTR_STATS] = { .type = NLA_NESTED, }, | ||
711 | [L2TP_ATTR_IFNAME] = { | ||
712 | .type = NLA_NUL_STRING, | ||
713 | .len = IFNAMSIZ - 1, | ||
714 | }, | ||
715 | [L2TP_ATTR_COOKIE] = { | ||
716 | .type = NLA_BINARY, | ||
717 | .len = 8, | ||
718 | }, | ||
719 | [L2TP_ATTR_PEER_COOKIE] = { | ||
720 | .type = NLA_BINARY, | ||
721 | .len = 8, | ||
722 | }, | ||
723 | }; | ||
724 | |||
725 | static struct genl_ops l2tp_nl_ops[] = { | ||
726 | { | ||
727 | .cmd = L2TP_CMD_NOOP, | ||
728 | .doit = l2tp_nl_cmd_noop, | ||
729 | .policy = l2tp_nl_policy, | ||
730 | /* can be retrieved by unprivileged users */ | ||
731 | }, | ||
732 | { | ||
733 | .cmd = L2TP_CMD_TUNNEL_CREATE, | ||
734 | .doit = l2tp_nl_cmd_tunnel_create, | ||
735 | .policy = l2tp_nl_policy, | ||
736 | .flags = GENL_ADMIN_PERM, | ||
737 | }, | ||
738 | { | ||
739 | .cmd = L2TP_CMD_TUNNEL_DELETE, | ||
740 | .doit = l2tp_nl_cmd_tunnel_delete, | ||
741 | .policy = l2tp_nl_policy, | ||
742 | .flags = GENL_ADMIN_PERM, | ||
743 | }, | ||
744 | { | ||
745 | .cmd = L2TP_CMD_TUNNEL_MODIFY, | ||
746 | .doit = l2tp_nl_cmd_tunnel_modify, | ||
747 | .policy = l2tp_nl_policy, | ||
748 | .flags = GENL_ADMIN_PERM, | ||
749 | }, | ||
750 | { | ||
751 | .cmd = L2TP_CMD_TUNNEL_GET, | ||
752 | .doit = l2tp_nl_cmd_tunnel_get, | ||
753 | .dumpit = l2tp_nl_cmd_tunnel_dump, | ||
754 | .policy = l2tp_nl_policy, | ||
755 | .flags = GENL_ADMIN_PERM, | ||
756 | }, | ||
757 | { | ||
758 | .cmd = L2TP_CMD_SESSION_CREATE, | ||
759 | .doit = l2tp_nl_cmd_session_create, | ||
760 | .policy = l2tp_nl_policy, | ||
761 | .flags = GENL_ADMIN_PERM, | ||
762 | }, | ||
763 | { | ||
764 | .cmd = L2TP_CMD_SESSION_DELETE, | ||
765 | .doit = l2tp_nl_cmd_session_delete, | ||
766 | .policy = l2tp_nl_policy, | ||
767 | .flags = GENL_ADMIN_PERM, | ||
768 | }, | ||
769 | { | ||
770 | .cmd = L2TP_CMD_SESSION_MODIFY, | ||
771 | .doit = l2tp_nl_cmd_session_modify, | ||
772 | .policy = l2tp_nl_policy, | ||
773 | .flags = GENL_ADMIN_PERM, | ||
774 | }, | ||
775 | { | ||
776 | .cmd = L2TP_CMD_SESSION_GET, | ||
777 | .doit = l2tp_nl_cmd_session_get, | ||
778 | .dumpit = l2tp_nl_cmd_session_dump, | ||
779 | .policy = l2tp_nl_policy, | ||
780 | .flags = GENL_ADMIN_PERM, | ||
781 | }, | ||
782 | }; | ||
783 | |||
784 | int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, const struct l2tp_nl_cmd_ops *ops) | ||
785 | { | ||
786 | int ret; | ||
787 | |||
788 | ret = -EINVAL; | ||
789 | if (pw_type >= __L2TP_PWTYPE_MAX) | ||
790 | goto err; | ||
791 | |||
792 | genl_lock(); | ||
793 | ret = -EBUSY; | ||
794 | if (l2tp_nl_cmd_ops[pw_type]) | ||
795 | goto out; | ||
796 | |||
797 | l2tp_nl_cmd_ops[pw_type] = ops; | ||
798 | |||
799 | out: | ||
800 | genl_unlock(); | ||
801 | err: | ||
802 | return 0; | ||
803 | } | ||
804 | EXPORT_SYMBOL_GPL(l2tp_nl_register_ops); | ||
805 | |||
806 | void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type) | ||
807 | { | ||
808 | if (pw_type < __L2TP_PWTYPE_MAX) { | ||
809 | genl_lock(); | ||
810 | l2tp_nl_cmd_ops[pw_type] = NULL; | ||
811 | genl_unlock(); | ||
812 | } | ||
813 | } | ||
814 | EXPORT_SYMBOL_GPL(l2tp_nl_unregister_ops); | ||
815 | |||
816 | static int l2tp_nl_init(void) | ||
817 | { | ||
818 | int err; | ||
819 | |||
820 | printk(KERN_INFO "L2TP netlink interface\n"); | ||
821 | err = genl_register_family_with_ops(&l2tp_nl_family, l2tp_nl_ops, | ||
822 | ARRAY_SIZE(l2tp_nl_ops)); | ||
823 | |||
824 | return err; | ||
825 | } | ||
826 | |||
827 | static void l2tp_nl_cleanup(void) | ||
828 | { | ||
829 | genl_unregister_family(&l2tp_nl_family); | ||
830 | } | ||
831 | |||
832 | module_init(l2tp_nl_init); | ||
833 | module_exit(l2tp_nl_cleanup); | ||
834 | |||
835 | MODULE_AUTHOR("James Chapman <jchapman@katalix.com>"); | ||
836 | MODULE_DESCRIPTION("L2TP netlink"); | ||
837 | MODULE_LICENSE("GPL"); | ||
838 | MODULE_VERSION("1.0"); | ||
839 | MODULE_ALIAS("net-pf-" __stringify(PF_NETLINK) "-proto-" \ | ||
840 | __stringify(NETLINK_GENERIC) "-type-" "l2tp"); | ||
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c new file mode 100644 index 000000000000..90d82b3f2889 --- /dev/null +++ b/net/l2tp/l2tp_ppp.c | |||
@@ -0,0 +1,1837 @@ | |||
1 | /***************************************************************************** | ||
2 | * Linux PPP over L2TP (PPPoX/PPPoL2TP) Sockets | ||
3 | * | ||
4 | * PPPoX --- Generic PPP encapsulation socket family | ||
5 | * PPPoL2TP --- PPP over L2TP (RFC 2661) | ||
6 | * | ||
7 | * Version: 2.0.0 | ||
8 | * | ||
9 | * Authors: James Chapman (jchapman@katalix.com) | ||
10 | * | ||
11 | * Based on original work by Martijn van Oosterhout <kleptog@svana.org> | ||
12 | * | ||
13 | * License: | ||
14 | * This program is free software; you can redistribute it and/or | ||
15 | * modify it under the terms of the GNU General Public License | ||
16 | * as published by the Free Software Foundation; either version | ||
17 | * 2 of the License, or (at your option) any later version. | ||
18 | * | ||
19 | */ | ||
20 | |||
21 | /* This driver handles only L2TP data frames; control frames are handled by a | ||
22 | * userspace application. | ||
23 | * | ||
24 | * To send data in an L2TP session, userspace opens a PPPoL2TP socket and | ||
25 | * attaches it to a bound UDP socket with local tunnel_id / session_id and | ||
26 | * peer tunnel_id / session_id set. Data can then be sent or received using | ||
27 | * regular socket sendmsg() / recvmsg() calls. Kernel parameters of the socket | ||
28 | * can be read or modified using ioctl() or [gs]etsockopt() calls. | ||
29 | * | ||
30 | * When a PPPoL2TP socket is connected with local and peer session_id values | ||
31 | * zero, the socket is treated as a special tunnel management socket. | ||
32 | * | ||
33 | * Here's example userspace code to create a socket for sending/receiving data | ||
34 | * over an L2TP session:- | ||
35 | * | ||
36 | * struct sockaddr_pppol2tp sax; | ||
37 | * int fd; | ||
38 | * int session_fd; | ||
39 | * | ||
40 | * fd = socket(AF_PPPOX, SOCK_DGRAM, PX_PROTO_OL2TP); | ||
41 | * | ||
42 | * sax.sa_family = AF_PPPOX; | ||
43 | * sax.sa_protocol = PX_PROTO_OL2TP; | ||
44 | * sax.pppol2tp.fd = tunnel_fd; // bound UDP socket | ||
45 | * sax.pppol2tp.addr.sin_addr.s_addr = addr->sin_addr.s_addr; | ||
46 | * sax.pppol2tp.addr.sin_port = addr->sin_port; | ||
47 | * sax.pppol2tp.addr.sin_family = AF_INET; | ||
48 | * sax.pppol2tp.s_tunnel = tunnel_id; | ||
49 | * sax.pppol2tp.s_session = session_id; | ||
50 | * sax.pppol2tp.d_tunnel = peer_tunnel_id; | ||
51 | * sax.pppol2tp.d_session = peer_session_id; | ||
52 | * | ||
53 | * session_fd = connect(fd, (struct sockaddr *)&sax, sizeof(sax)); | ||
54 | * | ||
55 | * A pppd plugin that allows PPP traffic to be carried over L2TP using | ||
56 | * this driver is available from the OpenL2TP project at | ||
57 | * http://openl2tp.sourceforge.net. | ||
58 | */ | ||
59 | |||
60 | #include <linux/module.h> | ||
61 | #include <linux/string.h> | ||
62 | #include <linux/list.h> | ||
63 | #include <linux/uaccess.h> | ||
64 | |||
65 | #include <linux/kernel.h> | ||
66 | #include <linux/spinlock.h> | ||
67 | #include <linux/kthread.h> | ||
68 | #include <linux/sched.h> | ||
69 | #include <linux/slab.h> | ||
70 | #include <linux/errno.h> | ||
71 | #include <linux/jiffies.h> | ||
72 | |||
73 | #include <linux/netdevice.h> | ||
74 | #include <linux/net.h> | ||
75 | #include <linux/inetdevice.h> | ||
76 | #include <linux/skbuff.h> | ||
77 | #include <linux/init.h> | ||
78 | #include <linux/ip.h> | ||
79 | #include <linux/udp.h> | ||
80 | #include <linux/if_pppox.h> | ||
81 | #include <linux/if_pppol2tp.h> | ||
82 | #include <net/sock.h> | ||
83 | #include <linux/ppp_channel.h> | ||
84 | #include <linux/ppp_defs.h> | ||
85 | #include <linux/if_ppp.h> | ||
86 | #include <linux/file.h> | ||
87 | #include <linux/hash.h> | ||
88 | #include <linux/sort.h> | ||
89 | #include <linux/proc_fs.h> | ||
90 | #include <linux/l2tp.h> | ||
91 | #include <linux/nsproxy.h> | ||
92 | #include <net/net_namespace.h> | ||
93 | #include <net/netns/generic.h> | ||
94 | #include <net/dst.h> | ||
95 | #include <net/ip.h> | ||
96 | #include <net/udp.h> | ||
97 | #include <net/xfrm.h> | ||
98 | |||
99 | #include <asm/byteorder.h> | ||
100 | #include <asm/atomic.h> | ||
101 | |||
102 | #include "l2tp_core.h" | ||
103 | |||
104 | #define PPPOL2TP_DRV_VERSION "V2.0" | ||
105 | |||
106 | /* Space for UDP, L2TP and PPP headers */ | ||
107 | #define PPPOL2TP_HEADER_OVERHEAD 40 | ||
108 | |||
109 | #define PRINTK(_mask, _type, _lvl, _fmt, args...) \ | ||
110 | do { \ | ||
111 | if ((_mask) & (_type)) \ | ||
112 | printk(_lvl "PPPOL2TP: " _fmt, ##args); \ | ||
113 | } while (0) | ||
114 | |||
115 | /* Number of bytes to build transmit L2TP headers. | ||
116 | * Unfortunately the size is different depending on whether sequence numbers | ||
117 | * are enabled. | ||
118 | */ | ||
119 | #define PPPOL2TP_L2TP_HDR_SIZE_SEQ 10 | ||
120 | #define PPPOL2TP_L2TP_HDR_SIZE_NOSEQ 6 | ||
121 | |||
122 | /* Private data of each session. This data lives at the end of struct | ||
123 | * l2tp_session, referenced via session->priv[]. | ||
124 | */ | ||
125 | struct pppol2tp_session { | ||
126 | int owner; /* pid that opened the socket */ | ||
127 | |||
128 | struct sock *sock; /* Pointer to the session | ||
129 | * PPPoX socket */ | ||
130 | struct sock *tunnel_sock; /* Pointer to the tunnel UDP | ||
131 | * socket */ | ||
132 | int flags; /* accessed by PPPIOCGFLAGS. | ||
133 | * Unused. */ | ||
134 | }; | ||
135 | |||
136 | static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb); | ||
137 | |||
138 | static struct ppp_channel_ops pppol2tp_chan_ops = { pppol2tp_xmit , NULL }; | ||
139 | static const struct proto_ops pppol2tp_ops; | ||
140 | |||
141 | /* Helpers to obtain tunnel/session contexts from sockets. | ||
142 | */ | ||
143 | static inline struct l2tp_session *pppol2tp_sock_to_session(struct sock *sk) | ||
144 | { | ||
145 | struct l2tp_session *session; | ||
146 | |||
147 | if (sk == NULL) | ||
148 | return NULL; | ||
149 | |||
150 | sock_hold(sk); | ||
151 | session = (struct l2tp_session *)(sk->sk_user_data); | ||
152 | if (session == NULL) { | ||
153 | sock_put(sk); | ||
154 | goto out; | ||
155 | } | ||
156 | |||
157 | BUG_ON(session->magic != L2TP_SESSION_MAGIC); | ||
158 | |||
159 | out: | ||
160 | return session; | ||
161 | } | ||
162 | |||
163 | /***************************************************************************** | ||
164 | * Receive data handling | ||
165 | *****************************************************************************/ | ||
166 | |||
167 | static int pppol2tp_recv_payload_hook(struct sk_buff *skb) | ||
168 | { | ||
169 | /* Skip PPP header, if present. In testing, Microsoft L2TP clients | ||
170 | * don't send the PPP header (PPP header compression enabled), but | ||
171 | * other clients can include the header. So we cope with both cases | ||
172 | * here. The PPP header is always FF03 when using L2TP. | ||
173 | * | ||
174 | * Note that skb->data[] isn't dereferenced from a u16 ptr here since | ||
175 | * the field may be unaligned. | ||
176 | */ | ||
177 | if (!pskb_may_pull(skb, 2)) | ||
178 | return 1; | ||
179 | |||
180 | if ((skb->data[0] == 0xff) && (skb->data[1] == 0x03)) | ||
181 | skb_pull(skb, 2); | ||
182 | |||
183 | return 0; | ||
184 | } | ||
185 | |||
186 | /* Receive message. This is the recvmsg for the PPPoL2TP socket. | ||
187 | */ | ||
188 | static int pppol2tp_recvmsg(struct kiocb *iocb, struct socket *sock, | ||
189 | struct msghdr *msg, size_t len, | ||
190 | int flags) | ||
191 | { | ||
192 | int err; | ||
193 | struct sk_buff *skb; | ||
194 | struct sock *sk = sock->sk; | ||
195 | |||
196 | err = -EIO; | ||
197 | if (sk->sk_state & PPPOX_BOUND) | ||
198 | goto end; | ||
199 | |||
200 | msg->msg_namelen = 0; | ||
201 | |||
202 | err = 0; | ||
203 | skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, | ||
204 | flags & MSG_DONTWAIT, &err); | ||
205 | if (!skb) | ||
206 | goto end; | ||
207 | |||
208 | if (len > skb->len) | ||
209 | len = skb->len; | ||
210 | else if (len < skb->len) | ||
211 | msg->msg_flags |= MSG_TRUNC; | ||
212 | |||
213 | err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len); | ||
214 | if (likely(err == 0)) | ||
215 | err = len; | ||
216 | |||
217 | kfree_skb(skb); | ||
218 | end: | ||
219 | return err; | ||
220 | } | ||
221 | |||
222 | static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len) | ||
223 | { | ||
224 | struct pppol2tp_session *ps = l2tp_session_priv(session); | ||
225 | struct sock *sk = NULL; | ||
226 | |||
227 | /* If the socket is bound, send it in to PPP's input queue. Otherwise | ||
228 | * queue it on the session socket. | ||
229 | */ | ||
230 | sk = ps->sock; | ||
231 | if (sk == NULL) | ||
232 | goto no_sock; | ||
233 | |||
234 | if (sk->sk_state & PPPOX_BOUND) { | ||
235 | struct pppox_sock *po; | ||
236 | PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG, | ||
237 | "%s: recv %d byte data frame, passing to ppp\n", | ||
238 | session->name, data_len); | ||
239 | |||
240 | /* We need to forget all info related to the L2TP packet | ||
241 | * gathered in the skb as we are going to reuse the same | ||
242 | * skb for the inner packet. | ||
243 | * Namely we need to: | ||
244 | * - reset xfrm (IPSec) information as it applies to | ||
245 | * the outer L2TP packet and not to the inner one | ||
246 | * - release the dst to force a route lookup on the inner | ||
247 | * IP packet since skb->dst currently points to the dst | ||
248 | * of the UDP tunnel | ||
249 | * - reset netfilter information as it doesn't apply | ||
250 | * to the inner packet either | ||
251 | */ | ||
252 | secpath_reset(skb); | ||
253 | skb_dst_drop(skb); | ||
254 | nf_reset(skb); | ||
255 | |||
256 | po = pppox_sk(sk); | ||
257 | ppp_input(&po->chan, skb); | ||
258 | } else { | ||
259 | PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_INFO, | ||
260 | "%s: socket not bound\n", session->name); | ||
261 | |||
262 | /* Not bound. Nothing we can do, so discard. */ | ||
263 | session->stats.rx_errors++; | ||
264 | kfree_skb(skb); | ||
265 | } | ||
266 | |||
267 | return; | ||
268 | |||
269 | no_sock: | ||
270 | PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_INFO, | ||
271 | "%s: no socket\n", session->name); | ||
272 | kfree_skb(skb); | ||
273 | } | ||
274 | |||
275 | static void pppol2tp_session_sock_hold(struct l2tp_session *session) | ||
276 | { | ||
277 | struct pppol2tp_session *ps = l2tp_session_priv(session); | ||
278 | |||
279 | if (ps->sock) | ||
280 | sock_hold(ps->sock); | ||
281 | } | ||
282 | |||
283 | static void pppol2tp_session_sock_put(struct l2tp_session *session) | ||
284 | { | ||
285 | struct pppol2tp_session *ps = l2tp_session_priv(session); | ||
286 | |||
287 | if (ps->sock) | ||
288 | sock_put(ps->sock); | ||
289 | } | ||
290 | |||
291 | /************************************************************************ | ||
292 | * Transmit handling | ||
293 | ***********************************************************************/ | ||
294 | |||
295 | /* This is the sendmsg for the PPPoL2TP pppol2tp_session socket. We come here | ||
296 | * when a user application does a sendmsg() on the session socket. L2TP and | ||
297 | * PPP headers must be inserted into the user's data. | ||
298 | */ | ||
299 | static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, | ||
300 | size_t total_len) | ||
301 | { | ||
302 | static const unsigned char ppph[2] = { 0xff, 0x03 }; | ||
303 | struct sock *sk = sock->sk; | ||
304 | struct sk_buff *skb; | ||
305 | int error; | ||
306 | struct l2tp_session *session; | ||
307 | struct l2tp_tunnel *tunnel; | ||
308 | struct pppol2tp_session *ps; | ||
309 | int uhlen; | ||
310 | |||
311 | error = -ENOTCONN; | ||
312 | if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) | ||
313 | goto error; | ||
314 | |||
315 | /* Get session and tunnel contexts */ | ||
316 | error = -EBADF; | ||
317 | session = pppol2tp_sock_to_session(sk); | ||
318 | if (session == NULL) | ||
319 | goto error; | ||
320 | |||
321 | ps = l2tp_session_priv(session); | ||
322 | tunnel = l2tp_sock_to_tunnel(ps->tunnel_sock); | ||
323 | if (tunnel == NULL) | ||
324 | goto error_put_sess; | ||
325 | |||
326 | uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0; | ||
327 | |||
328 | /* Allocate a socket buffer */ | ||
329 | error = -ENOMEM; | ||
330 | skb = sock_wmalloc(sk, NET_SKB_PAD + sizeof(struct iphdr) + | ||
331 | uhlen + session->hdr_len + | ||
332 | sizeof(ppph) + total_len, | ||
333 | 0, GFP_KERNEL); | ||
334 | if (!skb) | ||
335 | goto error_put_sess_tun; | ||
336 | |||
337 | /* Reserve space for headers. */ | ||
338 | skb_reserve(skb, NET_SKB_PAD); | ||
339 | skb_reset_network_header(skb); | ||
340 | skb_reserve(skb, sizeof(struct iphdr)); | ||
341 | skb_reset_transport_header(skb); | ||
342 | skb_reserve(skb, uhlen); | ||
343 | |||
344 | /* Add PPP header */ | ||
345 | skb->data[0] = ppph[0]; | ||
346 | skb->data[1] = ppph[1]; | ||
347 | skb_put(skb, 2); | ||
348 | |||
349 | /* Copy user data into skb */ | ||
350 | error = memcpy_fromiovec(skb->data, m->msg_iov, total_len); | ||
351 | if (error < 0) { | ||
352 | kfree_skb(skb); | ||
353 | goto error_put_sess_tun; | ||
354 | } | ||
355 | skb_put(skb, total_len); | ||
356 | |||
357 | l2tp_xmit_skb(session, skb, session->hdr_len); | ||
358 | |||
359 | sock_put(ps->tunnel_sock); | ||
360 | |||
361 | return error; | ||
362 | |||
363 | error_put_sess_tun: | ||
364 | sock_put(ps->tunnel_sock); | ||
365 | error_put_sess: | ||
366 | sock_put(sk); | ||
367 | error: | ||
368 | return error; | ||
369 | } | ||
370 | |||
371 | /* Transmit function called by generic PPP driver. Sends PPP frame | ||
372 | * over PPPoL2TP socket. | ||
373 | * | ||
374 | * This is almost the same as pppol2tp_sendmsg(), but rather than | ||
375 | * being called with a msghdr from userspace, it is called with a skb | ||
376 | * from the kernel. | ||
377 | * | ||
378 | * The supplied skb from ppp doesn't have enough headroom for the | ||
379 | * insertion of L2TP, UDP and IP headers so we need to allocate more | ||
380 | * headroom in the skb. This will create a cloned skb. But we must be | ||
381 | * careful in the error case because the caller will expect to free | ||
382 | * the skb it supplied, not our cloned skb. So we take care to always | ||
383 | * leave the original skb unfreed if we return an error. | ||
384 | */ | ||
385 | static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb) | ||
386 | { | ||
387 | static const u8 ppph[2] = { 0xff, 0x03 }; | ||
388 | struct sock *sk = (struct sock *) chan->private; | ||
389 | struct sock *sk_tun; | ||
390 | struct l2tp_session *session; | ||
391 | struct l2tp_tunnel *tunnel; | ||
392 | struct pppol2tp_session *ps; | ||
393 | int old_headroom; | ||
394 | int new_headroom; | ||
395 | |||
396 | if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) | ||
397 | goto abort; | ||
398 | |||
399 | /* Get session and tunnel contexts from the socket */ | ||
400 | session = pppol2tp_sock_to_session(sk); | ||
401 | if (session == NULL) | ||
402 | goto abort; | ||
403 | |||
404 | ps = l2tp_session_priv(session); | ||
405 | sk_tun = ps->tunnel_sock; | ||
406 | if (sk_tun == NULL) | ||
407 | goto abort_put_sess; | ||
408 | tunnel = l2tp_sock_to_tunnel(sk_tun); | ||
409 | if (tunnel == NULL) | ||
410 | goto abort_put_sess; | ||
411 | |||
412 | old_headroom = skb_headroom(skb); | ||
413 | if (skb_cow_head(skb, sizeof(ppph))) | ||
414 | goto abort_put_sess_tun; | ||
415 | |||
416 | new_headroom = skb_headroom(skb); | ||
417 | skb->truesize += new_headroom - old_headroom; | ||
418 | |||
419 | /* Setup PPP header */ | ||
420 | __skb_push(skb, sizeof(ppph)); | ||
421 | skb->data[0] = ppph[0]; | ||
422 | skb->data[1] = ppph[1]; | ||
423 | |||
424 | l2tp_xmit_skb(session, skb, session->hdr_len); | ||
425 | |||
426 | sock_put(sk_tun); | ||
427 | sock_put(sk); | ||
428 | return 1; | ||
429 | |||
430 | abort_put_sess_tun: | ||
431 | sock_put(sk_tun); | ||
432 | abort_put_sess: | ||
433 | sock_put(sk); | ||
434 | abort: | ||
435 | /* Free the original skb */ | ||
436 | kfree_skb(skb); | ||
437 | return 1; | ||
438 | } | ||
439 | |||
440 | /***************************************************************************** | ||
441 | * Session (and tunnel control) socket create/destroy. | ||
442 | *****************************************************************************/ | ||
443 | |||
444 | /* Called by l2tp_core when a session socket is being closed. | ||
445 | */ | ||
446 | static void pppol2tp_session_close(struct l2tp_session *session) | ||
447 | { | ||
448 | struct pppol2tp_session *ps = l2tp_session_priv(session); | ||
449 | struct sock *sk = ps->sock; | ||
450 | struct sk_buff *skb; | ||
451 | |||
452 | BUG_ON(session->magic != L2TP_SESSION_MAGIC); | ||
453 | |||
454 | if (session->session_id == 0) | ||
455 | goto out; | ||
456 | |||
457 | if (sk != NULL) { | ||
458 | lock_sock(sk); | ||
459 | |||
460 | if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) { | ||
461 | pppox_unbind_sock(sk); | ||
462 | sk->sk_state = PPPOX_DEAD; | ||
463 | sk->sk_state_change(sk); | ||
464 | } | ||
465 | |||
466 | /* Purge any queued data */ | ||
467 | skb_queue_purge(&sk->sk_receive_queue); | ||
468 | skb_queue_purge(&sk->sk_write_queue); | ||
469 | while ((skb = skb_dequeue(&session->reorder_q))) { | ||
470 | kfree_skb(skb); | ||
471 | sock_put(sk); | ||
472 | } | ||
473 | |||
474 | release_sock(sk); | ||
475 | } | ||
476 | |||
477 | out: | ||
478 | return; | ||
479 | } | ||
480 | |||
481 | /* Really kill the session socket. (Called from sock_put() if | ||
482 | * refcnt == 0.) | ||
483 | */ | ||
484 | static void pppol2tp_session_destruct(struct sock *sk) | ||
485 | { | ||
486 | struct l2tp_session *session; | ||
487 | |||
488 | if (sk->sk_user_data != NULL) { | ||
489 | session = sk->sk_user_data; | ||
490 | if (session == NULL) | ||
491 | goto out; | ||
492 | |||
493 | sk->sk_user_data = NULL; | ||
494 | BUG_ON(session->magic != L2TP_SESSION_MAGIC); | ||
495 | l2tp_session_dec_refcount(session); | ||
496 | } | ||
497 | |||
498 | out: | ||
499 | return; | ||
500 | } | ||
501 | |||
502 | /* Called when the PPPoX socket (session) is closed. | ||
503 | */ | ||
504 | static int pppol2tp_release(struct socket *sock) | ||
505 | { | ||
506 | struct sock *sk = sock->sk; | ||
507 | struct l2tp_session *session; | ||
508 | int error; | ||
509 | |||
510 | if (!sk) | ||
511 | return 0; | ||
512 | |||
513 | error = -EBADF; | ||
514 | lock_sock(sk); | ||
515 | if (sock_flag(sk, SOCK_DEAD) != 0) | ||
516 | goto error; | ||
517 | |||
518 | pppox_unbind_sock(sk); | ||
519 | |||
520 | /* Signal the death of the socket. */ | ||
521 | sk->sk_state = PPPOX_DEAD; | ||
522 | sock_orphan(sk); | ||
523 | sock->sk = NULL; | ||
524 | |||
525 | session = pppol2tp_sock_to_session(sk); | ||
526 | |||
527 | /* Purge any queued data */ | ||
528 | skb_queue_purge(&sk->sk_receive_queue); | ||
529 | skb_queue_purge(&sk->sk_write_queue); | ||
530 | if (session != NULL) { | ||
531 | struct sk_buff *skb; | ||
532 | while ((skb = skb_dequeue(&session->reorder_q))) { | ||
533 | kfree_skb(skb); | ||
534 | sock_put(sk); | ||
535 | } | ||
536 | sock_put(sk); | ||
537 | } | ||
538 | |||
539 | release_sock(sk); | ||
540 | |||
541 | /* This will delete the session context via | ||
542 | * pppol2tp_session_destruct() if the socket's refcnt drops to | ||
543 | * zero. | ||
544 | */ | ||
545 | sock_put(sk); | ||
546 | |||
547 | return 0; | ||
548 | |||
549 | error: | ||
550 | release_sock(sk); | ||
551 | return error; | ||
552 | } | ||
553 | |||
554 | static struct proto pppol2tp_sk_proto = { | ||
555 | .name = "PPPOL2TP", | ||
556 | .owner = THIS_MODULE, | ||
557 | .obj_size = sizeof(struct pppox_sock), | ||
558 | }; | ||
559 | |||
560 | static int pppol2tp_backlog_recv(struct sock *sk, struct sk_buff *skb) | ||
561 | { | ||
562 | int rc; | ||
563 | |||
564 | rc = l2tp_udp_encap_recv(sk, skb); | ||
565 | if (rc) | ||
566 | kfree_skb(skb); | ||
567 | |||
568 | return NET_RX_SUCCESS; | ||
569 | } | ||
570 | |||
571 | /* socket() handler. Initialize a new struct sock. | ||
572 | */ | ||
573 | static int pppol2tp_create(struct net *net, struct socket *sock) | ||
574 | { | ||
575 | int error = -ENOMEM; | ||
576 | struct sock *sk; | ||
577 | |||
578 | sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppol2tp_sk_proto); | ||
579 | if (!sk) | ||
580 | goto out; | ||
581 | |||
582 | sock_init_data(sock, sk); | ||
583 | |||
584 | sock->state = SS_UNCONNECTED; | ||
585 | sock->ops = &pppol2tp_ops; | ||
586 | |||
587 | sk->sk_backlog_rcv = pppol2tp_backlog_recv; | ||
588 | sk->sk_protocol = PX_PROTO_OL2TP; | ||
589 | sk->sk_family = PF_PPPOX; | ||
590 | sk->sk_state = PPPOX_NONE; | ||
591 | sk->sk_type = SOCK_STREAM; | ||
592 | sk->sk_destruct = pppol2tp_session_destruct; | ||
593 | |||
594 | error = 0; | ||
595 | |||
596 | out: | ||
597 | return error; | ||
598 | } | ||
599 | |||
600 | #if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE) | ||
601 | static void pppol2tp_show(struct seq_file *m, void *arg) | ||
602 | { | ||
603 | struct l2tp_session *session = arg; | ||
604 | struct pppol2tp_session *ps = l2tp_session_priv(session); | ||
605 | |||
606 | if (ps) { | ||
607 | struct pppox_sock *po = pppox_sk(ps->sock); | ||
608 | if (po) | ||
609 | seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan)); | ||
610 | } | ||
611 | } | ||
612 | #endif | ||
613 | |||
614 | /* connect() handler. Attach a PPPoX socket to a tunnel UDP socket | ||
615 | */ | ||
616 | static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, | ||
617 | int sockaddr_len, int flags) | ||
618 | { | ||
619 | struct sock *sk = sock->sk; | ||
620 | struct sockaddr_pppol2tp *sp = (struct sockaddr_pppol2tp *) uservaddr; | ||
621 | struct sockaddr_pppol2tpv3 *sp3 = (struct sockaddr_pppol2tpv3 *) uservaddr; | ||
622 | struct pppox_sock *po = pppox_sk(sk); | ||
623 | struct l2tp_session *session = NULL; | ||
624 | struct l2tp_tunnel *tunnel; | ||
625 | struct pppol2tp_session *ps; | ||
626 | struct dst_entry *dst; | ||
627 | struct l2tp_session_cfg cfg = { 0, }; | ||
628 | int error = 0; | ||
629 | u32 tunnel_id, peer_tunnel_id; | ||
630 | u32 session_id, peer_session_id; | ||
631 | int ver = 2; | ||
632 | int fd; | ||
633 | |||
634 | lock_sock(sk); | ||
635 | |||
636 | error = -EINVAL; | ||
637 | if (sp->sa_protocol != PX_PROTO_OL2TP) | ||
638 | goto end; | ||
639 | |||
640 | /* Check for already bound sockets */ | ||
641 | error = -EBUSY; | ||
642 | if (sk->sk_state & PPPOX_CONNECTED) | ||
643 | goto end; | ||
644 | |||
645 | /* We don't supporting rebinding anyway */ | ||
646 | error = -EALREADY; | ||
647 | if (sk->sk_user_data) | ||
648 | goto end; /* socket is already attached */ | ||
649 | |||
650 | /* Get params from socket address. Handle L2TPv2 and L2TPv3 */ | ||
651 | if (sockaddr_len == sizeof(struct sockaddr_pppol2tp)) { | ||
652 | fd = sp->pppol2tp.fd; | ||
653 | tunnel_id = sp->pppol2tp.s_tunnel; | ||
654 | peer_tunnel_id = sp->pppol2tp.d_tunnel; | ||
655 | session_id = sp->pppol2tp.s_session; | ||
656 | peer_session_id = sp->pppol2tp.d_session; | ||
657 | } else if (sockaddr_len == sizeof(struct sockaddr_pppol2tpv3)) { | ||
658 | ver = 3; | ||
659 | fd = sp3->pppol2tp.fd; | ||
660 | tunnel_id = sp3->pppol2tp.s_tunnel; | ||
661 | peer_tunnel_id = sp3->pppol2tp.d_tunnel; | ||
662 | session_id = sp3->pppol2tp.s_session; | ||
663 | peer_session_id = sp3->pppol2tp.d_session; | ||
664 | } else { | ||
665 | error = -EINVAL; | ||
666 | goto end; /* bad socket address */ | ||
667 | } | ||
668 | |||
669 | /* Don't bind if tunnel_id is 0 */ | ||
670 | error = -EINVAL; | ||
671 | if (tunnel_id == 0) | ||
672 | goto end; | ||
673 | |||
674 | tunnel = l2tp_tunnel_find(sock_net(sk), tunnel_id); | ||
675 | |||
676 | /* Special case: create tunnel context if session_id and | ||
677 | * peer_session_id is 0. Otherwise look up tunnel using supplied | ||
678 | * tunnel id. | ||
679 | */ | ||
680 | if ((session_id == 0) && (peer_session_id == 0)) { | ||
681 | if (tunnel == NULL) { | ||
682 | struct l2tp_tunnel_cfg tcfg = { | ||
683 | .encap = L2TP_ENCAPTYPE_UDP, | ||
684 | .debug = 0, | ||
685 | }; | ||
686 | error = l2tp_tunnel_create(sock_net(sk), fd, ver, tunnel_id, peer_tunnel_id, &tcfg, &tunnel); | ||
687 | if (error < 0) | ||
688 | goto end; | ||
689 | } | ||
690 | } else { | ||
691 | /* Error if we can't find the tunnel */ | ||
692 | error = -ENOENT; | ||
693 | if (tunnel == NULL) | ||
694 | goto end; | ||
695 | |||
696 | /* Error if socket is not prepped */ | ||
697 | if (tunnel->sock == NULL) | ||
698 | goto end; | ||
699 | } | ||
700 | |||
701 | if (tunnel->recv_payload_hook == NULL) | ||
702 | tunnel->recv_payload_hook = pppol2tp_recv_payload_hook; | ||
703 | |||
704 | if (tunnel->peer_tunnel_id == 0) { | ||
705 | if (ver == 2) | ||
706 | tunnel->peer_tunnel_id = sp->pppol2tp.d_tunnel; | ||
707 | else | ||
708 | tunnel->peer_tunnel_id = sp3->pppol2tp.d_tunnel; | ||
709 | } | ||
710 | |||
711 | /* Create session if it doesn't already exist. We handle the | ||
712 | * case where a session was previously created by the netlink | ||
713 | * interface by checking that the session doesn't already have | ||
714 | * a socket and its tunnel socket are what we expect. If any | ||
715 | * of those checks fail, return EEXIST to the caller. | ||
716 | */ | ||
717 | session = l2tp_session_find(sock_net(sk), tunnel, session_id); | ||
718 | if (session == NULL) { | ||
719 | /* Default MTU must allow space for UDP/L2TP/PPP | ||
720 | * headers. | ||
721 | */ | ||
722 | cfg.mtu = cfg.mru = 1500 - PPPOL2TP_HEADER_OVERHEAD; | ||
723 | |||
724 | /* Allocate and initialize a new session context. */ | ||
725 | session = l2tp_session_create(sizeof(struct pppol2tp_session), | ||
726 | tunnel, session_id, | ||
727 | peer_session_id, &cfg); | ||
728 | if (session == NULL) { | ||
729 | error = -ENOMEM; | ||
730 | goto end; | ||
731 | } | ||
732 | } else { | ||
733 | ps = l2tp_session_priv(session); | ||
734 | error = -EEXIST; | ||
735 | if (ps->sock != NULL) | ||
736 | goto end; | ||
737 | |||
738 | /* consistency checks */ | ||
739 | if (ps->tunnel_sock != tunnel->sock) | ||
740 | goto end; | ||
741 | } | ||
742 | |||
743 | /* Associate session with its PPPoL2TP socket */ | ||
744 | ps = l2tp_session_priv(session); | ||
745 | ps->owner = current->pid; | ||
746 | ps->sock = sk; | ||
747 | ps->tunnel_sock = tunnel->sock; | ||
748 | |||
749 | session->recv_skb = pppol2tp_recv; | ||
750 | session->session_close = pppol2tp_session_close; | ||
751 | #if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE) | ||
752 | session->show = pppol2tp_show; | ||
753 | #endif | ||
754 | |||
755 | /* We need to know each time a skb is dropped from the reorder | ||
756 | * queue. | ||
757 | */ | ||
758 | session->ref = pppol2tp_session_sock_hold; | ||
759 | session->deref = pppol2tp_session_sock_put; | ||
760 | |||
761 | /* If PMTU discovery was enabled, use the MTU that was discovered */ | ||
762 | dst = sk_dst_get(sk); | ||
763 | if (dst != NULL) { | ||
764 | u32 pmtu = dst_mtu(__sk_dst_get(sk)); | ||
765 | if (pmtu != 0) | ||
766 | session->mtu = session->mru = pmtu - | ||
767 | PPPOL2TP_HEADER_OVERHEAD; | ||
768 | dst_release(dst); | ||
769 | } | ||
770 | |||
771 | /* Special case: if source & dest session_id == 0x0000, this | ||
772 | * socket is being created to manage the tunnel. Just set up | ||
773 | * the internal context for use by ioctl() and sockopt() | ||
774 | * handlers. | ||
775 | */ | ||
776 | if ((session->session_id == 0) && | ||
777 | (session->peer_session_id == 0)) { | ||
778 | error = 0; | ||
779 | goto out_no_ppp; | ||
780 | } | ||
781 | |||
782 | /* The only header we need to worry about is the L2TP | ||
783 | * header. This size is different depending on whether | ||
784 | * sequence numbers are enabled for the data channel. | ||
785 | */ | ||
786 | po->chan.hdrlen = PPPOL2TP_L2TP_HDR_SIZE_NOSEQ; | ||
787 | |||
788 | po->chan.private = sk; | ||
789 | po->chan.ops = &pppol2tp_chan_ops; | ||
790 | po->chan.mtu = session->mtu; | ||
791 | |||
792 | error = ppp_register_net_channel(sock_net(sk), &po->chan); | ||
793 | if (error) | ||
794 | goto end; | ||
795 | |||
796 | out_no_ppp: | ||
797 | /* This is how we get the session context from the socket. */ | ||
798 | sk->sk_user_data = session; | ||
799 | sk->sk_state = PPPOX_CONNECTED; | ||
800 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
801 | "%s: created\n", session->name); | ||
802 | |||
803 | end: | ||
804 | release_sock(sk); | ||
805 | |||
806 | return error; | ||
807 | } | ||
808 | |||
809 | #ifdef CONFIG_L2TP_V3 | ||
810 | |||
811 | /* Called when creating sessions via the netlink interface. | ||
812 | */ | ||
813 | static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg) | ||
814 | { | ||
815 | int error; | ||
816 | struct l2tp_tunnel *tunnel; | ||
817 | struct l2tp_session *session; | ||
818 | struct pppol2tp_session *ps; | ||
819 | |||
820 | tunnel = l2tp_tunnel_find(net, tunnel_id); | ||
821 | |||
822 | /* Error if we can't find the tunnel */ | ||
823 | error = -ENOENT; | ||
824 | if (tunnel == NULL) | ||
825 | goto out; | ||
826 | |||
827 | /* Error if tunnel socket is not prepped */ | ||
828 | if (tunnel->sock == NULL) | ||
829 | goto out; | ||
830 | |||
831 | /* Check that this session doesn't already exist */ | ||
832 | error = -EEXIST; | ||
833 | session = l2tp_session_find(net, tunnel, session_id); | ||
834 | if (session != NULL) | ||
835 | goto out; | ||
836 | |||
837 | /* Default MTU values. */ | ||
838 | if (cfg->mtu == 0) | ||
839 | cfg->mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD; | ||
840 | if (cfg->mru == 0) | ||
841 | cfg->mru = cfg->mtu; | ||
842 | |||
843 | /* Allocate and initialize a new session context. */ | ||
844 | error = -ENOMEM; | ||
845 | session = l2tp_session_create(sizeof(struct pppol2tp_session), | ||
846 | tunnel, session_id, | ||
847 | peer_session_id, cfg); | ||
848 | if (session == NULL) | ||
849 | goto out; | ||
850 | |||
851 | ps = l2tp_session_priv(session); | ||
852 | ps->tunnel_sock = tunnel->sock; | ||
853 | |||
854 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
855 | "%s: created\n", session->name); | ||
856 | |||
857 | error = 0; | ||
858 | |||
859 | out: | ||
860 | return error; | ||
861 | } | ||
862 | |||
863 | /* Called when deleting sessions via the netlink interface. | ||
864 | */ | ||
865 | static int pppol2tp_session_delete(struct l2tp_session *session) | ||
866 | { | ||
867 | struct pppol2tp_session *ps = l2tp_session_priv(session); | ||
868 | |||
869 | if (ps->sock == NULL) | ||
870 | l2tp_session_dec_refcount(session); | ||
871 | |||
872 | return 0; | ||
873 | } | ||
874 | |||
875 | #endif /* CONFIG_L2TP_V3 */ | ||
876 | |||
877 | /* getname() support. | ||
878 | */ | ||
879 | static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr, | ||
880 | int *usockaddr_len, int peer) | ||
881 | { | ||
882 | int len = 0; | ||
883 | int error = 0; | ||
884 | struct l2tp_session *session; | ||
885 | struct l2tp_tunnel *tunnel; | ||
886 | struct sock *sk = sock->sk; | ||
887 | struct inet_sock *inet; | ||
888 | struct pppol2tp_session *pls; | ||
889 | |||
890 | error = -ENOTCONN; | ||
891 | if (sk == NULL) | ||
892 | goto end; | ||
893 | if (sk->sk_state != PPPOX_CONNECTED) | ||
894 | goto end; | ||
895 | |||
896 | error = -EBADF; | ||
897 | session = pppol2tp_sock_to_session(sk); | ||
898 | if (session == NULL) | ||
899 | goto end; | ||
900 | |||
901 | pls = l2tp_session_priv(session); | ||
902 | tunnel = l2tp_sock_to_tunnel(pls->tunnel_sock); | ||
903 | if (tunnel == NULL) { | ||
904 | error = -EBADF; | ||
905 | goto end_put_sess; | ||
906 | } | ||
907 | |||
908 | inet = inet_sk(sk); | ||
909 | if (tunnel->version == 2) { | ||
910 | struct sockaddr_pppol2tp sp; | ||
911 | len = sizeof(sp); | ||
912 | memset(&sp, 0, len); | ||
913 | sp.sa_family = AF_PPPOX; | ||
914 | sp.sa_protocol = PX_PROTO_OL2TP; | ||
915 | sp.pppol2tp.fd = tunnel->fd; | ||
916 | sp.pppol2tp.pid = pls->owner; | ||
917 | sp.pppol2tp.s_tunnel = tunnel->tunnel_id; | ||
918 | sp.pppol2tp.d_tunnel = tunnel->peer_tunnel_id; | ||
919 | sp.pppol2tp.s_session = session->session_id; | ||
920 | sp.pppol2tp.d_session = session->peer_session_id; | ||
921 | sp.pppol2tp.addr.sin_family = AF_INET; | ||
922 | sp.pppol2tp.addr.sin_port = inet->inet_dport; | ||
923 | sp.pppol2tp.addr.sin_addr.s_addr = inet->inet_daddr; | ||
924 | memcpy(uaddr, &sp, len); | ||
925 | } else if (tunnel->version == 3) { | ||
926 | struct sockaddr_pppol2tpv3 sp; | ||
927 | len = sizeof(sp); | ||
928 | memset(&sp, 0, len); | ||
929 | sp.sa_family = AF_PPPOX; | ||
930 | sp.sa_protocol = PX_PROTO_OL2TP; | ||
931 | sp.pppol2tp.fd = tunnel->fd; | ||
932 | sp.pppol2tp.pid = pls->owner; | ||
933 | sp.pppol2tp.s_tunnel = tunnel->tunnel_id; | ||
934 | sp.pppol2tp.d_tunnel = tunnel->peer_tunnel_id; | ||
935 | sp.pppol2tp.s_session = session->session_id; | ||
936 | sp.pppol2tp.d_session = session->peer_session_id; | ||
937 | sp.pppol2tp.addr.sin_family = AF_INET; | ||
938 | sp.pppol2tp.addr.sin_port = inet->inet_dport; | ||
939 | sp.pppol2tp.addr.sin_addr.s_addr = inet->inet_daddr; | ||
940 | memcpy(uaddr, &sp, len); | ||
941 | } | ||
942 | |||
943 | *usockaddr_len = len; | ||
944 | |||
945 | sock_put(pls->tunnel_sock); | ||
946 | end_put_sess: | ||
947 | sock_put(sk); | ||
948 | error = 0; | ||
949 | |||
950 | end: | ||
951 | return error; | ||
952 | } | ||
953 | |||
954 | /**************************************************************************** | ||
955 | * ioctl() handlers. | ||
956 | * | ||
957 | * The PPPoX socket is created for L2TP sessions: tunnels have their own UDP | ||
958 | * sockets. However, in order to control kernel tunnel features, we allow | ||
959 | * userspace to create a special "tunnel" PPPoX socket which is used for | ||
960 | * control only. Tunnel PPPoX sockets have session_id == 0 and simply allow | ||
961 | * the user application to issue L2TP setsockopt(), getsockopt() and ioctl() | ||
962 | * calls. | ||
963 | ****************************************************************************/ | ||
964 | |||
965 | static void pppol2tp_copy_stats(struct pppol2tp_ioc_stats *dest, | ||
966 | struct l2tp_stats *stats) | ||
967 | { | ||
968 | dest->tx_packets = stats->tx_packets; | ||
969 | dest->tx_bytes = stats->tx_bytes; | ||
970 | dest->tx_errors = stats->tx_errors; | ||
971 | dest->rx_packets = stats->rx_packets; | ||
972 | dest->rx_bytes = stats->rx_bytes; | ||
973 | dest->rx_seq_discards = stats->rx_seq_discards; | ||
974 | dest->rx_oos_packets = stats->rx_oos_packets; | ||
975 | dest->rx_errors = stats->rx_errors; | ||
976 | } | ||
977 | |||
978 | /* Session ioctl helper. | ||
979 | */ | ||
980 | static int pppol2tp_session_ioctl(struct l2tp_session *session, | ||
981 | unsigned int cmd, unsigned long arg) | ||
982 | { | ||
983 | struct ifreq ifr; | ||
984 | int err = 0; | ||
985 | struct sock *sk; | ||
986 | int val = (int) arg; | ||
987 | struct pppol2tp_session *ps = l2tp_session_priv(session); | ||
988 | struct l2tp_tunnel *tunnel = session->tunnel; | ||
989 | struct pppol2tp_ioc_stats stats; | ||
990 | |||
991 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG, | ||
992 | "%s: pppol2tp_session_ioctl(cmd=%#x, arg=%#lx)\n", | ||
993 | session->name, cmd, arg); | ||
994 | |||
995 | sk = ps->sock; | ||
996 | sock_hold(sk); | ||
997 | |||
998 | switch (cmd) { | ||
999 | case SIOCGIFMTU: | ||
1000 | err = -ENXIO; | ||
1001 | if (!(sk->sk_state & PPPOX_CONNECTED)) | ||
1002 | break; | ||
1003 | |||
1004 | err = -EFAULT; | ||
1005 | if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq))) | ||
1006 | break; | ||
1007 | ifr.ifr_mtu = session->mtu; | ||
1008 | if (copy_to_user((void __user *) arg, &ifr, sizeof(struct ifreq))) | ||
1009 | break; | ||
1010 | |||
1011 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1012 | "%s: get mtu=%d\n", session->name, session->mtu); | ||
1013 | err = 0; | ||
1014 | break; | ||
1015 | |||
1016 | case SIOCSIFMTU: | ||
1017 | err = -ENXIO; | ||
1018 | if (!(sk->sk_state & PPPOX_CONNECTED)) | ||
1019 | break; | ||
1020 | |||
1021 | err = -EFAULT; | ||
1022 | if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq))) | ||
1023 | break; | ||
1024 | |||
1025 | session->mtu = ifr.ifr_mtu; | ||
1026 | |||
1027 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1028 | "%s: set mtu=%d\n", session->name, session->mtu); | ||
1029 | err = 0; | ||
1030 | break; | ||
1031 | |||
1032 | case PPPIOCGMRU: | ||
1033 | err = -ENXIO; | ||
1034 | if (!(sk->sk_state & PPPOX_CONNECTED)) | ||
1035 | break; | ||
1036 | |||
1037 | err = -EFAULT; | ||
1038 | if (put_user(session->mru, (int __user *) arg)) | ||
1039 | break; | ||
1040 | |||
1041 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1042 | "%s: get mru=%d\n", session->name, session->mru); | ||
1043 | err = 0; | ||
1044 | break; | ||
1045 | |||
1046 | case PPPIOCSMRU: | ||
1047 | err = -ENXIO; | ||
1048 | if (!(sk->sk_state & PPPOX_CONNECTED)) | ||
1049 | break; | ||
1050 | |||
1051 | err = -EFAULT; | ||
1052 | if (get_user(val, (int __user *) arg)) | ||
1053 | break; | ||
1054 | |||
1055 | session->mru = val; | ||
1056 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1057 | "%s: set mru=%d\n", session->name, session->mru); | ||
1058 | err = 0; | ||
1059 | break; | ||
1060 | |||
1061 | case PPPIOCGFLAGS: | ||
1062 | err = -EFAULT; | ||
1063 | if (put_user(ps->flags, (int __user *) arg)) | ||
1064 | break; | ||
1065 | |||
1066 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1067 | "%s: get flags=%d\n", session->name, ps->flags); | ||
1068 | err = 0; | ||
1069 | break; | ||
1070 | |||
1071 | case PPPIOCSFLAGS: | ||
1072 | err = -EFAULT; | ||
1073 | if (get_user(val, (int __user *) arg)) | ||
1074 | break; | ||
1075 | ps->flags = val; | ||
1076 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1077 | "%s: set flags=%d\n", session->name, ps->flags); | ||
1078 | err = 0; | ||
1079 | break; | ||
1080 | |||
1081 | case PPPIOCGL2TPSTATS: | ||
1082 | err = -ENXIO; | ||
1083 | if (!(sk->sk_state & PPPOX_CONNECTED)) | ||
1084 | break; | ||
1085 | |||
1086 | memset(&stats, 0, sizeof(stats)); | ||
1087 | stats.tunnel_id = tunnel->tunnel_id; | ||
1088 | stats.session_id = session->session_id; | ||
1089 | pppol2tp_copy_stats(&stats, &session->stats); | ||
1090 | if (copy_to_user((void __user *) arg, &stats, | ||
1091 | sizeof(stats))) | ||
1092 | break; | ||
1093 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1094 | "%s: get L2TP stats\n", session->name); | ||
1095 | err = 0; | ||
1096 | break; | ||
1097 | |||
1098 | default: | ||
1099 | err = -ENOSYS; | ||
1100 | break; | ||
1101 | } | ||
1102 | |||
1103 | sock_put(sk); | ||
1104 | |||
1105 | return err; | ||
1106 | } | ||
1107 | |||
1108 | /* Tunnel ioctl helper. | ||
1109 | * | ||
1110 | * Note the special handling for PPPIOCGL2TPSTATS below. If the ioctl data | ||
1111 | * specifies a session_id, the session ioctl handler is called. This allows an | ||
1112 | * application to retrieve session stats via a tunnel socket. | ||
1113 | */ | ||
1114 | static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel, | ||
1115 | unsigned int cmd, unsigned long arg) | ||
1116 | { | ||
1117 | int err = 0; | ||
1118 | struct sock *sk; | ||
1119 | struct pppol2tp_ioc_stats stats; | ||
1120 | |||
1121 | PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG, | ||
1122 | "%s: pppol2tp_tunnel_ioctl(cmd=%#x, arg=%#lx)\n", | ||
1123 | tunnel->name, cmd, arg); | ||
1124 | |||
1125 | sk = tunnel->sock; | ||
1126 | sock_hold(sk); | ||
1127 | |||
1128 | switch (cmd) { | ||
1129 | case PPPIOCGL2TPSTATS: | ||
1130 | err = -ENXIO; | ||
1131 | if (!(sk->sk_state & PPPOX_CONNECTED)) | ||
1132 | break; | ||
1133 | |||
1134 | if (copy_from_user(&stats, (void __user *) arg, | ||
1135 | sizeof(stats))) { | ||
1136 | err = -EFAULT; | ||
1137 | break; | ||
1138 | } | ||
1139 | if (stats.session_id != 0) { | ||
1140 | /* resend to session ioctl handler */ | ||
1141 | struct l2tp_session *session = | ||
1142 | l2tp_session_find(sock_net(sk), tunnel, stats.session_id); | ||
1143 | if (session != NULL) | ||
1144 | err = pppol2tp_session_ioctl(session, cmd, arg); | ||
1145 | else | ||
1146 | err = -EBADR; | ||
1147 | break; | ||
1148 | } | ||
1149 | #ifdef CONFIG_XFRM | ||
1150 | stats.using_ipsec = (sk->sk_policy[0] || sk->sk_policy[1]) ? 1 : 0; | ||
1151 | #endif | ||
1152 | pppol2tp_copy_stats(&stats, &tunnel->stats); | ||
1153 | if (copy_to_user((void __user *) arg, &stats, sizeof(stats))) { | ||
1154 | err = -EFAULT; | ||
1155 | break; | ||
1156 | } | ||
1157 | PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1158 | "%s: get L2TP stats\n", tunnel->name); | ||
1159 | err = 0; | ||
1160 | break; | ||
1161 | |||
1162 | default: | ||
1163 | err = -ENOSYS; | ||
1164 | break; | ||
1165 | } | ||
1166 | |||
1167 | sock_put(sk); | ||
1168 | |||
1169 | return err; | ||
1170 | } | ||
1171 | |||
1172 | /* Main ioctl() handler. | ||
1173 | * Dispatch to tunnel or session helpers depending on the socket. | ||
1174 | */ | ||
1175 | static int pppol2tp_ioctl(struct socket *sock, unsigned int cmd, | ||
1176 | unsigned long arg) | ||
1177 | { | ||
1178 | struct sock *sk = sock->sk; | ||
1179 | struct l2tp_session *session; | ||
1180 | struct l2tp_tunnel *tunnel; | ||
1181 | struct pppol2tp_session *ps; | ||
1182 | int err; | ||
1183 | |||
1184 | if (!sk) | ||
1185 | return 0; | ||
1186 | |||
1187 | err = -EBADF; | ||
1188 | if (sock_flag(sk, SOCK_DEAD) != 0) | ||
1189 | goto end; | ||
1190 | |||
1191 | err = -ENOTCONN; | ||
1192 | if ((sk->sk_user_data == NULL) || | ||
1193 | (!(sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)))) | ||
1194 | goto end; | ||
1195 | |||
1196 | /* Get session context from the socket */ | ||
1197 | err = -EBADF; | ||
1198 | session = pppol2tp_sock_to_session(sk); | ||
1199 | if (session == NULL) | ||
1200 | goto end; | ||
1201 | |||
1202 | /* Special case: if session's session_id is zero, treat ioctl as a | ||
1203 | * tunnel ioctl | ||
1204 | */ | ||
1205 | ps = l2tp_session_priv(session); | ||
1206 | if ((session->session_id == 0) && | ||
1207 | (session->peer_session_id == 0)) { | ||
1208 | err = -EBADF; | ||
1209 | tunnel = l2tp_sock_to_tunnel(ps->tunnel_sock); | ||
1210 | if (tunnel == NULL) | ||
1211 | goto end_put_sess; | ||
1212 | |||
1213 | err = pppol2tp_tunnel_ioctl(tunnel, cmd, arg); | ||
1214 | sock_put(ps->tunnel_sock); | ||
1215 | goto end_put_sess; | ||
1216 | } | ||
1217 | |||
1218 | err = pppol2tp_session_ioctl(session, cmd, arg); | ||
1219 | |||
1220 | end_put_sess: | ||
1221 | sock_put(sk); | ||
1222 | end: | ||
1223 | return err; | ||
1224 | } | ||
1225 | |||
1226 | /***************************************************************************** | ||
1227 | * setsockopt() / getsockopt() support. | ||
1228 | * | ||
1229 | * The PPPoX socket is created for L2TP sessions: tunnels have their own UDP | ||
1230 | * sockets. In order to control kernel tunnel features, we allow userspace to | ||
1231 | * create a special "tunnel" PPPoX socket which is used for control only. | ||
1232 | * Tunnel PPPoX sockets have session_id == 0 and simply allow the user | ||
1233 | * application to issue L2TP setsockopt(), getsockopt() and ioctl() calls. | ||
1234 | *****************************************************************************/ | ||
1235 | |||
1236 | /* Tunnel setsockopt() helper. | ||
1237 | */ | ||
1238 | static int pppol2tp_tunnel_setsockopt(struct sock *sk, | ||
1239 | struct l2tp_tunnel *tunnel, | ||
1240 | int optname, int val) | ||
1241 | { | ||
1242 | int err = 0; | ||
1243 | |||
1244 | switch (optname) { | ||
1245 | case PPPOL2TP_SO_DEBUG: | ||
1246 | tunnel->debug = val; | ||
1247 | PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1248 | "%s: set debug=%x\n", tunnel->name, tunnel->debug); | ||
1249 | break; | ||
1250 | |||
1251 | default: | ||
1252 | err = -ENOPROTOOPT; | ||
1253 | break; | ||
1254 | } | ||
1255 | |||
1256 | return err; | ||
1257 | } | ||
1258 | |||
1259 | /* Session setsockopt helper. | ||
1260 | */ | ||
1261 | static int pppol2tp_session_setsockopt(struct sock *sk, | ||
1262 | struct l2tp_session *session, | ||
1263 | int optname, int val) | ||
1264 | { | ||
1265 | int err = 0; | ||
1266 | struct pppol2tp_session *ps = l2tp_session_priv(session); | ||
1267 | |||
1268 | switch (optname) { | ||
1269 | case PPPOL2TP_SO_RECVSEQ: | ||
1270 | if ((val != 0) && (val != 1)) { | ||
1271 | err = -EINVAL; | ||
1272 | break; | ||
1273 | } | ||
1274 | session->recv_seq = val ? -1 : 0; | ||
1275 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1276 | "%s: set recv_seq=%d\n", session->name, session->recv_seq); | ||
1277 | break; | ||
1278 | |||
1279 | case PPPOL2TP_SO_SENDSEQ: | ||
1280 | if ((val != 0) && (val != 1)) { | ||
1281 | err = -EINVAL; | ||
1282 | break; | ||
1283 | } | ||
1284 | session->send_seq = val ? -1 : 0; | ||
1285 | { | ||
1286 | struct sock *ssk = ps->sock; | ||
1287 | struct pppox_sock *po = pppox_sk(ssk); | ||
1288 | po->chan.hdrlen = val ? PPPOL2TP_L2TP_HDR_SIZE_SEQ : | ||
1289 | PPPOL2TP_L2TP_HDR_SIZE_NOSEQ; | ||
1290 | } | ||
1291 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1292 | "%s: set send_seq=%d\n", session->name, session->send_seq); | ||
1293 | break; | ||
1294 | |||
1295 | case PPPOL2TP_SO_LNSMODE: | ||
1296 | if ((val != 0) && (val != 1)) { | ||
1297 | err = -EINVAL; | ||
1298 | break; | ||
1299 | } | ||
1300 | session->lns_mode = val ? -1 : 0; | ||
1301 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1302 | "%s: set lns_mode=%d\n", session->name, session->lns_mode); | ||
1303 | break; | ||
1304 | |||
1305 | case PPPOL2TP_SO_DEBUG: | ||
1306 | session->debug = val; | ||
1307 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1308 | "%s: set debug=%x\n", session->name, session->debug); | ||
1309 | break; | ||
1310 | |||
1311 | case PPPOL2TP_SO_REORDERTO: | ||
1312 | session->reorder_timeout = msecs_to_jiffies(val); | ||
1313 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1314 | "%s: set reorder_timeout=%d\n", session->name, session->reorder_timeout); | ||
1315 | break; | ||
1316 | |||
1317 | default: | ||
1318 | err = -ENOPROTOOPT; | ||
1319 | break; | ||
1320 | } | ||
1321 | |||
1322 | return err; | ||
1323 | } | ||
1324 | |||
1325 | /* Main setsockopt() entry point. | ||
1326 | * Does API checks, then calls either the tunnel or session setsockopt | ||
1327 | * handler, according to whether the PPPoL2TP socket is a for a regular | ||
1328 | * session or the special tunnel type. | ||
1329 | */ | ||
1330 | static int pppol2tp_setsockopt(struct socket *sock, int level, int optname, | ||
1331 | char __user *optval, unsigned int optlen) | ||
1332 | { | ||
1333 | struct sock *sk = sock->sk; | ||
1334 | struct l2tp_session *session; | ||
1335 | struct l2tp_tunnel *tunnel; | ||
1336 | struct pppol2tp_session *ps; | ||
1337 | int val; | ||
1338 | int err; | ||
1339 | |||
1340 | if (level != SOL_PPPOL2TP) | ||
1341 | return udp_prot.setsockopt(sk, level, optname, optval, optlen); | ||
1342 | |||
1343 | if (optlen < sizeof(int)) | ||
1344 | return -EINVAL; | ||
1345 | |||
1346 | if (get_user(val, (int __user *)optval)) | ||
1347 | return -EFAULT; | ||
1348 | |||
1349 | err = -ENOTCONN; | ||
1350 | if (sk->sk_user_data == NULL) | ||
1351 | goto end; | ||
1352 | |||
1353 | /* Get session context from the socket */ | ||
1354 | err = -EBADF; | ||
1355 | session = pppol2tp_sock_to_session(sk); | ||
1356 | if (session == NULL) | ||
1357 | goto end; | ||
1358 | |||
1359 | /* Special case: if session_id == 0x0000, treat as operation on tunnel | ||
1360 | */ | ||
1361 | ps = l2tp_session_priv(session); | ||
1362 | if ((session->session_id == 0) && | ||
1363 | (session->peer_session_id == 0)) { | ||
1364 | err = -EBADF; | ||
1365 | tunnel = l2tp_sock_to_tunnel(ps->tunnel_sock); | ||
1366 | if (tunnel == NULL) | ||
1367 | goto end_put_sess; | ||
1368 | |||
1369 | err = pppol2tp_tunnel_setsockopt(sk, tunnel, optname, val); | ||
1370 | sock_put(ps->tunnel_sock); | ||
1371 | } else | ||
1372 | err = pppol2tp_session_setsockopt(sk, session, optname, val); | ||
1373 | |||
1374 | err = 0; | ||
1375 | |||
1376 | end_put_sess: | ||
1377 | sock_put(sk); | ||
1378 | end: | ||
1379 | return err; | ||
1380 | } | ||
1381 | |||
1382 | /* Tunnel getsockopt helper. Called with sock locked. | ||
1383 | */ | ||
1384 | static int pppol2tp_tunnel_getsockopt(struct sock *sk, | ||
1385 | struct l2tp_tunnel *tunnel, | ||
1386 | int optname, int *val) | ||
1387 | { | ||
1388 | int err = 0; | ||
1389 | |||
1390 | switch (optname) { | ||
1391 | case PPPOL2TP_SO_DEBUG: | ||
1392 | *val = tunnel->debug; | ||
1393 | PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1394 | "%s: get debug=%x\n", tunnel->name, tunnel->debug); | ||
1395 | break; | ||
1396 | |||
1397 | default: | ||
1398 | err = -ENOPROTOOPT; | ||
1399 | break; | ||
1400 | } | ||
1401 | |||
1402 | return err; | ||
1403 | } | ||
1404 | |||
1405 | /* Session getsockopt helper. Called with sock locked. | ||
1406 | */ | ||
1407 | static int pppol2tp_session_getsockopt(struct sock *sk, | ||
1408 | struct l2tp_session *session, | ||
1409 | int optname, int *val) | ||
1410 | { | ||
1411 | int err = 0; | ||
1412 | |||
1413 | switch (optname) { | ||
1414 | case PPPOL2TP_SO_RECVSEQ: | ||
1415 | *val = session->recv_seq; | ||
1416 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1417 | "%s: get recv_seq=%d\n", session->name, *val); | ||
1418 | break; | ||
1419 | |||
1420 | case PPPOL2TP_SO_SENDSEQ: | ||
1421 | *val = session->send_seq; | ||
1422 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1423 | "%s: get send_seq=%d\n", session->name, *val); | ||
1424 | break; | ||
1425 | |||
1426 | case PPPOL2TP_SO_LNSMODE: | ||
1427 | *val = session->lns_mode; | ||
1428 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1429 | "%s: get lns_mode=%d\n", session->name, *val); | ||
1430 | break; | ||
1431 | |||
1432 | case PPPOL2TP_SO_DEBUG: | ||
1433 | *val = session->debug; | ||
1434 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1435 | "%s: get debug=%d\n", session->name, *val); | ||
1436 | break; | ||
1437 | |||
1438 | case PPPOL2TP_SO_REORDERTO: | ||
1439 | *val = (int) jiffies_to_msecs(session->reorder_timeout); | ||
1440 | PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, | ||
1441 | "%s: get reorder_timeout=%d\n", session->name, *val); | ||
1442 | break; | ||
1443 | |||
1444 | default: | ||
1445 | err = -ENOPROTOOPT; | ||
1446 | } | ||
1447 | |||
1448 | return err; | ||
1449 | } | ||
1450 | |||
1451 | /* Main getsockopt() entry point. | ||
1452 | * Does API checks, then calls either the tunnel or session getsockopt | ||
1453 | * handler, according to whether the PPPoX socket is a for a regular session | ||
1454 | * or the special tunnel type. | ||
1455 | */ | ||
1456 | static int pppol2tp_getsockopt(struct socket *sock, int level, | ||
1457 | int optname, char __user *optval, int __user *optlen) | ||
1458 | { | ||
1459 | struct sock *sk = sock->sk; | ||
1460 | struct l2tp_session *session; | ||
1461 | struct l2tp_tunnel *tunnel; | ||
1462 | int val, len; | ||
1463 | int err; | ||
1464 | struct pppol2tp_session *ps; | ||
1465 | |||
1466 | if (level != SOL_PPPOL2TP) | ||
1467 | return udp_prot.getsockopt(sk, level, optname, optval, optlen); | ||
1468 | |||
1469 | if (get_user(len, (int __user *) optlen)) | ||
1470 | return -EFAULT; | ||
1471 | |||
1472 | len = min_t(unsigned int, len, sizeof(int)); | ||
1473 | |||
1474 | if (len < 0) | ||
1475 | return -EINVAL; | ||
1476 | |||
1477 | err = -ENOTCONN; | ||
1478 | if (sk->sk_user_data == NULL) | ||
1479 | goto end; | ||
1480 | |||
1481 | /* Get the session context */ | ||
1482 | err = -EBADF; | ||
1483 | session = pppol2tp_sock_to_session(sk); | ||
1484 | if (session == NULL) | ||
1485 | goto end; | ||
1486 | |||
1487 | /* Special case: if session_id == 0x0000, treat as operation on tunnel */ | ||
1488 | ps = l2tp_session_priv(session); | ||
1489 | if ((session->session_id == 0) && | ||
1490 | (session->peer_session_id == 0)) { | ||
1491 | err = -EBADF; | ||
1492 | tunnel = l2tp_sock_to_tunnel(ps->tunnel_sock); | ||
1493 | if (tunnel == NULL) | ||
1494 | goto end_put_sess; | ||
1495 | |||
1496 | err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val); | ||
1497 | sock_put(ps->tunnel_sock); | ||
1498 | } else | ||
1499 | err = pppol2tp_session_getsockopt(sk, session, optname, &val); | ||
1500 | |||
1501 | err = -EFAULT; | ||
1502 | if (put_user(len, (int __user *) optlen)) | ||
1503 | goto end_put_sess; | ||
1504 | |||
1505 | if (copy_to_user((void __user *) optval, &val, len)) | ||
1506 | goto end_put_sess; | ||
1507 | |||
1508 | err = 0; | ||
1509 | |||
1510 | end_put_sess: | ||
1511 | sock_put(sk); | ||
1512 | end: | ||
1513 | return err; | ||
1514 | } | ||
1515 | |||
1516 | /***************************************************************************** | ||
1517 | * /proc filesystem for debug | ||
1518 | * Since the original pppol2tp driver provided /proc/net/pppol2tp for | ||
1519 | * L2TPv2, we dump only L2TPv2 tunnels and sessions here. | ||
1520 | *****************************************************************************/ | ||
1521 | |||
1522 | static unsigned int pppol2tp_net_id; | ||
1523 | |||
1524 | #ifdef CONFIG_PROC_FS | ||
1525 | |||
1526 | struct pppol2tp_seq_data { | ||
1527 | struct seq_net_private p; | ||
1528 | int tunnel_idx; /* current tunnel */ | ||
1529 | int session_idx; /* index of session within current tunnel */ | ||
1530 | struct l2tp_tunnel *tunnel; | ||
1531 | struct l2tp_session *session; /* NULL means get next tunnel */ | ||
1532 | }; | ||
1533 | |||
1534 | static void pppol2tp_next_tunnel(struct net *net, struct pppol2tp_seq_data *pd) | ||
1535 | { | ||
1536 | for (;;) { | ||
1537 | pd->tunnel = l2tp_tunnel_find_nth(net, pd->tunnel_idx); | ||
1538 | pd->tunnel_idx++; | ||
1539 | |||
1540 | if (pd->tunnel == NULL) | ||
1541 | break; | ||
1542 | |||
1543 | /* Ignore L2TPv3 tunnels */ | ||
1544 | if (pd->tunnel->version < 3) | ||
1545 | break; | ||
1546 | } | ||
1547 | } | ||
1548 | |||
1549 | static void pppol2tp_next_session(struct net *net, struct pppol2tp_seq_data *pd) | ||
1550 | { | ||
1551 | pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx); | ||
1552 | pd->session_idx++; | ||
1553 | |||
1554 | if (pd->session == NULL) { | ||
1555 | pd->session_idx = 0; | ||
1556 | pppol2tp_next_tunnel(net, pd); | ||
1557 | } | ||
1558 | } | ||
1559 | |||
1560 | static void *pppol2tp_seq_start(struct seq_file *m, loff_t *offs) | ||
1561 | { | ||
1562 | struct pppol2tp_seq_data *pd = SEQ_START_TOKEN; | ||
1563 | loff_t pos = *offs; | ||
1564 | struct net *net; | ||
1565 | |||
1566 | if (!pos) | ||
1567 | goto out; | ||
1568 | |||
1569 | BUG_ON(m->private == NULL); | ||
1570 | pd = m->private; | ||
1571 | net = seq_file_net(m); | ||
1572 | |||
1573 | if (pd->tunnel == NULL) | ||
1574 | pppol2tp_next_tunnel(net, pd); | ||
1575 | else | ||
1576 | pppol2tp_next_session(net, pd); | ||
1577 | |||
1578 | /* NULL tunnel and session indicates end of list */ | ||
1579 | if ((pd->tunnel == NULL) && (pd->session == NULL)) | ||
1580 | pd = NULL; | ||
1581 | |||
1582 | out: | ||
1583 | return pd; | ||
1584 | } | ||
1585 | |||
1586 | static void *pppol2tp_seq_next(struct seq_file *m, void *v, loff_t *pos) | ||
1587 | { | ||
1588 | (*pos)++; | ||
1589 | return NULL; | ||
1590 | } | ||
1591 | |||
1592 | static void pppol2tp_seq_stop(struct seq_file *p, void *v) | ||
1593 | { | ||
1594 | /* nothing to do */ | ||
1595 | } | ||
1596 | |||
1597 | static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v) | ||
1598 | { | ||
1599 | struct l2tp_tunnel *tunnel = v; | ||
1600 | |||
1601 | seq_printf(m, "\nTUNNEL '%s', %c %d\n", | ||
1602 | tunnel->name, | ||
1603 | (tunnel == tunnel->sock->sk_user_data) ? 'Y' : 'N', | ||
1604 | atomic_read(&tunnel->ref_count) - 1); | ||
1605 | seq_printf(m, " %08x %llu/%llu/%llu %llu/%llu/%llu\n", | ||
1606 | tunnel->debug, | ||
1607 | (unsigned long long)tunnel->stats.tx_packets, | ||
1608 | (unsigned long long)tunnel->stats.tx_bytes, | ||
1609 | (unsigned long long)tunnel->stats.tx_errors, | ||
1610 | (unsigned long long)tunnel->stats.rx_packets, | ||
1611 | (unsigned long long)tunnel->stats.rx_bytes, | ||
1612 | (unsigned long long)tunnel->stats.rx_errors); | ||
1613 | } | ||
1614 | |||
1615 | static void pppol2tp_seq_session_show(struct seq_file *m, void *v) | ||
1616 | { | ||
1617 | struct l2tp_session *session = v; | ||
1618 | struct l2tp_tunnel *tunnel = session->tunnel; | ||
1619 | struct pppol2tp_session *ps = l2tp_session_priv(session); | ||
1620 | struct pppox_sock *po = pppox_sk(ps->sock); | ||
1621 | u32 ip = 0; | ||
1622 | u16 port = 0; | ||
1623 | |||
1624 | if (tunnel->sock) { | ||
1625 | struct inet_sock *inet = inet_sk(tunnel->sock); | ||
1626 | ip = ntohl(inet->inet_saddr); | ||
1627 | port = ntohs(inet->inet_sport); | ||
1628 | } | ||
1629 | |||
1630 | seq_printf(m, " SESSION '%s' %08X/%d %04X/%04X -> " | ||
1631 | "%04X/%04X %d %c\n", | ||
1632 | session->name, ip, port, | ||
1633 | tunnel->tunnel_id, | ||
1634 | session->session_id, | ||
1635 | tunnel->peer_tunnel_id, | ||
1636 | session->peer_session_id, | ||
1637 | ps->sock->sk_state, | ||
1638 | (session == ps->sock->sk_user_data) ? | ||
1639 | 'Y' : 'N'); | ||
1640 | seq_printf(m, " %d/%d/%c/%c/%s %08x %u\n", | ||
1641 | session->mtu, session->mru, | ||
1642 | session->recv_seq ? 'R' : '-', | ||
1643 | session->send_seq ? 'S' : '-', | ||
1644 | session->lns_mode ? "LNS" : "LAC", | ||
1645 | session->debug, | ||
1646 | jiffies_to_msecs(session->reorder_timeout)); | ||
1647 | seq_printf(m, " %hu/%hu %llu/%llu/%llu %llu/%llu/%llu\n", | ||
1648 | session->nr, session->ns, | ||
1649 | (unsigned long long)session->stats.tx_packets, | ||
1650 | (unsigned long long)session->stats.tx_bytes, | ||
1651 | (unsigned long long)session->stats.tx_errors, | ||
1652 | (unsigned long long)session->stats.rx_packets, | ||
1653 | (unsigned long long)session->stats.rx_bytes, | ||
1654 | (unsigned long long)session->stats.rx_errors); | ||
1655 | |||
1656 | if (po) | ||
1657 | seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan)); | ||
1658 | } | ||
1659 | |||
1660 | static int pppol2tp_seq_show(struct seq_file *m, void *v) | ||
1661 | { | ||
1662 | struct pppol2tp_seq_data *pd = v; | ||
1663 | |||
1664 | /* display header on line 1 */ | ||
1665 | if (v == SEQ_START_TOKEN) { | ||
1666 | seq_puts(m, "PPPoL2TP driver info, " PPPOL2TP_DRV_VERSION "\n"); | ||
1667 | seq_puts(m, "TUNNEL name, user-data-ok session-count\n"); | ||
1668 | seq_puts(m, " debug tx-pkts/bytes/errs rx-pkts/bytes/errs\n"); | ||
1669 | seq_puts(m, " SESSION name, addr/port src-tid/sid " | ||
1670 | "dest-tid/sid state user-data-ok\n"); | ||
1671 | seq_puts(m, " mtu/mru/rcvseq/sendseq/lns debug reorderto\n"); | ||
1672 | seq_puts(m, " nr/ns tx-pkts/bytes/errs rx-pkts/bytes/errs\n"); | ||
1673 | goto out; | ||
1674 | } | ||
1675 | |||
1676 | /* Show the tunnel or session context. | ||
1677 | */ | ||
1678 | if (pd->session == NULL) | ||
1679 | pppol2tp_seq_tunnel_show(m, pd->tunnel); | ||
1680 | else | ||
1681 | pppol2tp_seq_session_show(m, pd->session); | ||
1682 | |||
1683 | out: | ||
1684 | return 0; | ||
1685 | } | ||
1686 | |||
1687 | static const struct seq_operations pppol2tp_seq_ops = { | ||
1688 | .start = pppol2tp_seq_start, | ||
1689 | .next = pppol2tp_seq_next, | ||
1690 | .stop = pppol2tp_seq_stop, | ||
1691 | .show = pppol2tp_seq_show, | ||
1692 | }; | ||
1693 | |||
1694 | /* Called when our /proc file is opened. We allocate data for use when | ||
1695 | * iterating our tunnel / session contexts and store it in the private | ||
1696 | * data of the seq_file. | ||
1697 | */ | ||
1698 | static int pppol2tp_proc_open(struct inode *inode, struct file *file) | ||
1699 | { | ||
1700 | return seq_open_net(inode, file, &pppol2tp_seq_ops, | ||
1701 | sizeof(struct pppol2tp_seq_data)); | ||
1702 | } | ||
1703 | |||
1704 | static const struct file_operations pppol2tp_proc_fops = { | ||
1705 | .owner = THIS_MODULE, | ||
1706 | .open = pppol2tp_proc_open, | ||
1707 | .read = seq_read, | ||
1708 | .llseek = seq_lseek, | ||
1709 | .release = seq_release_net, | ||
1710 | }; | ||
1711 | |||
1712 | #endif /* CONFIG_PROC_FS */ | ||
1713 | |||
1714 | /***************************************************************************** | ||
1715 | * Network namespace | ||
1716 | *****************************************************************************/ | ||
1717 | |||
1718 | static __net_init int pppol2tp_init_net(struct net *net) | ||
1719 | { | ||
1720 | struct proc_dir_entry *pde; | ||
1721 | int err = 0; | ||
1722 | |||
1723 | pde = proc_net_fops_create(net, "pppol2tp", S_IRUGO, &pppol2tp_proc_fops); | ||
1724 | if (!pde) { | ||
1725 | err = -ENOMEM; | ||
1726 | goto out; | ||
1727 | } | ||
1728 | |||
1729 | out: | ||
1730 | return err; | ||
1731 | } | ||
1732 | |||
1733 | static __net_exit void pppol2tp_exit_net(struct net *net) | ||
1734 | { | ||
1735 | proc_net_remove(net, "pppol2tp"); | ||
1736 | } | ||
1737 | |||
1738 | static struct pernet_operations pppol2tp_net_ops = { | ||
1739 | .init = pppol2tp_init_net, | ||
1740 | .exit = pppol2tp_exit_net, | ||
1741 | .id = &pppol2tp_net_id, | ||
1742 | }; | ||
1743 | |||
1744 | /***************************************************************************** | ||
1745 | * Init and cleanup | ||
1746 | *****************************************************************************/ | ||
1747 | |||
1748 | static const struct proto_ops pppol2tp_ops = { | ||
1749 | .family = AF_PPPOX, | ||
1750 | .owner = THIS_MODULE, | ||
1751 | .release = pppol2tp_release, | ||
1752 | .bind = sock_no_bind, | ||
1753 | .connect = pppol2tp_connect, | ||
1754 | .socketpair = sock_no_socketpair, | ||
1755 | .accept = sock_no_accept, | ||
1756 | .getname = pppol2tp_getname, | ||
1757 | .poll = datagram_poll, | ||
1758 | .listen = sock_no_listen, | ||
1759 | .shutdown = sock_no_shutdown, | ||
1760 | .setsockopt = pppol2tp_setsockopt, | ||
1761 | .getsockopt = pppol2tp_getsockopt, | ||
1762 | .sendmsg = pppol2tp_sendmsg, | ||
1763 | .recvmsg = pppol2tp_recvmsg, | ||
1764 | .mmap = sock_no_mmap, | ||
1765 | .ioctl = pppox_ioctl, | ||
1766 | }; | ||
1767 | |||
1768 | static struct pppox_proto pppol2tp_proto = { | ||
1769 | .create = pppol2tp_create, | ||
1770 | .ioctl = pppol2tp_ioctl | ||
1771 | }; | ||
1772 | |||
1773 | #ifdef CONFIG_L2TP_V3 | ||
1774 | |||
1775 | static const struct l2tp_nl_cmd_ops pppol2tp_nl_cmd_ops = { | ||
1776 | .session_create = pppol2tp_session_create, | ||
1777 | .session_delete = pppol2tp_session_delete, | ||
1778 | }; | ||
1779 | |||
1780 | #endif /* CONFIG_L2TP_V3 */ | ||
1781 | |||
1782 | static int __init pppol2tp_init(void) | ||
1783 | { | ||
1784 | int err; | ||
1785 | |||
1786 | err = register_pernet_device(&pppol2tp_net_ops); | ||
1787 | if (err) | ||
1788 | goto out; | ||
1789 | |||
1790 | err = proto_register(&pppol2tp_sk_proto, 0); | ||
1791 | if (err) | ||
1792 | goto out_unregister_pppol2tp_pernet; | ||
1793 | |||
1794 | err = register_pppox_proto(PX_PROTO_OL2TP, &pppol2tp_proto); | ||
1795 | if (err) | ||
1796 | goto out_unregister_pppol2tp_proto; | ||
1797 | |||
1798 | #ifdef CONFIG_L2TP_V3 | ||
1799 | err = l2tp_nl_register_ops(L2TP_PWTYPE_PPP, &pppol2tp_nl_cmd_ops); | ||
1800 | if (err) | ||
1801 | goto out_unregister_pppox; | ||
1802 | #endif | ||
1803 | |||
1804 | printk(KERN_INFO "PPPoL2TP kernel driver, %s\n", | ||
1805 | PPPOL2TP_DRV_VERSION); | ||
1806 | |||
1807 | out: | ||
1808 | return err; | ||
1809 | |||
1810 | #ifdef CONFIG_L2TP_V3 | ||
1811 | out_unregister_pppox: | ||
1812 | unregister_pppox_proto(PX_PROTO_OL2TP); | ||
1813 | #endif | ||
1814 | out_unregister_pppol2tp_proto: | ||
1815 | proto_unregister(&pppol2tp_sk_proto); | ||
1816 | out_unregister_pppol2tp_pernet: | ||
1817 | unregister_pernet_device(&pppol2tp_net_ops); | ||
1818 | goto out; | ||
1819 | } | ||
1820 | |||
1821 | static void __exit pppol2tp_exit(void) | ||
1822 | { | ||
1823 | #ifdef CONFIG_L2TP_V3 | ||
1824 | l2tp_nl_unregister_ops(L2TP_PWTYPE_PPP); | ||
1825 | #endif | ||
1826 | unregister_pppox_proto(PX_PROTO_OL2TP); | ||
1827 | proto_unregister(&pppol2tp_sk_proto); | ||
1828 | unregister_pernet_device(&pppol2tp_net_ops); | ||
1829 | } | ||
1830 | |||
1831 | module_init(pppol2tp_init); | ||
1832 | module_exit(pppol2tp_exit); | ||
1833 | |||
1834 | MODULE_AUTHOR("James Chapman <jchapman@katalix.com>"); | ||
1835 | MODULE_DESCRIPTION("PPP over L2TP over UDP"); | ||
1836 | MODULE_LICENSE("GPL"); | ||
1837 | MODULE_VERSION(PPPOL2TP_DRV_VERSION); | ||
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 2db6a9f75913..023ba820236f 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c | |||
@@ -536,7 +536,7 @@ static int llc_ui_wait_for_disc(struct sock *sk, long timeout) | |||
536 | int rc = 0; | 536 | int rc = 0; |
537 | 537 | ||
538 | while (1) { | 538 | while (1) { |
539 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 539 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
540 | if (sk_wait_event(sk, &timeout, sk->sk_state == TCP_CLOSE)) | 540 | if (sk_wait_event(sk, &timeout, sk->sk_state == TCP_CLOSE)) |
541 | break; | 541 | break; |
542 | rc = -ERESTARTSYS; | 542 | rc = -ERESTARTSYS; |
@@ -547,7 +547,7 @@ static int llc_ui_wait_for_disc(struct sock *sk, long timeout) | |||
547 | break; | 547 | break; |
548 | rc = 0; | 548 | rc = 0; |
549 | } | 549 | } |
550 | finish_wait(sk->sk_sleep, &wait); | 550 | finish_wait(sk_sleep(sk), &wait); |
551 | return rc; | 551 | return rc; |
552 | } | 552 | } |
553 | 553 | ||
@@ -556,13 +556,13 @@ static int llc_ui_wait_for_conn(struct sock *sk, long timeout) | |||
556 | DEFINE_WAIT(wait); | 556 | DEFINE_WAIT(wait); |
557 | 557 | ||
558 | while (1) { | 558 | while (1) { |
559 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 559 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
560 | if (sk_wait_event(sk, &timeout, sk->sk_state != TCP_SYN_SENT)) | 560 | if (sk_wait_event(sk, &timeout, sk->sk_state != TCP_SYN_SENT)) |
561 | break; | 561 | break; |
562 | if (signal_pending(current) || !timeout) | 562 | if (signal_pending(current) || !timeout) |
563 | break; | 563 | break; |
564 | } | 564 | } |
565 | finish_wait(sk->sk_sleep, &wait); | 565 | finish_wait(sk_sleep(sk), &wait); |
566 | return timeout; | 566 | return timeout; |
567 | } | 567 | } |
568 | 568 | ||
@@ -573,7 +573,7 @@ static int llc_ui_wait_for_busy_core(struct sock *sk, long timeout) | |||
573 | int rc; | 573 | int rc; |
574 | 574 | ||
575 | while (1) { | 575 | while (1) { |
576 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 576 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
577 | rc = 0; | 577 | rc = 0; |
578 | if (sk_wait_event(sk, &timeout, | 578 | if (sk_wait_event(sk, &timeout, |
579 | (sk->sk_shutdown & RCV_SHUTDOWN) || | 579 | (sk->sk_shutdown & RCV_SHUTDOWN) || |
@@ -588,7 +588,7 @@ static int llc_ui_wait_for_busy_core(struct sock *sk, long timeout) | |||
588 | if (!timeout) | 588 | if (!timeout) |
589 | break; | 589 | break; |
590 | } | 590 | } |
591 | finish_wait(sk->sk_sleep, &wait); | 591 | finish_wait(sk_sleep(sk), &wait); |
592 | return rc; | 592 | return rc; |
593 | } | 593 | } |
594 | 594 | ||
diff --git a/net/llc/llc_core.c b/net/llc/llc_core.c index 78167e81dfeb..2bb0ddff8c0f 100644 --- a/net/llc/llc_core.c +++ b/net/llc/llc_core.c | |||
@@ -144,12 +144,6 @@ static struct packet_type llc_tr_packet_type __read_mostly = { | |||
144 | 144 | ||
145 | static int __init llc_init(void) | 145 | static int __init llc_init(void) |
146 | { | 146 | { |
147 | struct net_device *dev; | ||
148 | |||
149 | dev = first_net_device(&init_net); | ||
150 | if (dev != NULL) | ||
151 | dev = next_net_device(dev); | ||
152 | |||
153 | dev_add_pack(&llc_packet_type); | 147 | dev_add_pack(&llc_packet_type); |
154 | dev_add_pack(&llc_tr_packet_type); | 148 | dev_add_pack(&llc_tr_packet_type); |
155 | return 0; | 149 | return 0; |
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig index a952b7f8c648..8a91f6c0bb18 100644 --- a/net/mac80211/Kconfig +++ b/net/mac80211/Kconfig | |||
@@ -15,8 +15,12 @@ comment "CFG80211 needs to be enabled for MAC80211" | |||
15 | 15 | ||
16 | if MAC80211 != n | 16 | if MAC80211 != n |
17 | 17 | ||
18 | config MAC80211_HAS_RC | ||
19 | def_bool n | ||
20 | |||
18 | config MAC80211_RC_PID | 21 | config MAC80211_RC_PID |
19 | bool "PID controller based rate control algorithm" if EMBEDDED | 22 | bool "PID controller based rate control algorithm" if EMBEDDED |
23 | select MAC80211_HAS_RC | ||
20 | ---help--- | 24 | ---help--- |
21 | This option enables a TX rate control algorithm for | 25 | This option enables a TX rate control algorithm for |
22 | mac80211 that uses a PID controller to select the TX | 26 | mac80211 that uses a PID controller to select the TX |
@@ -24,12 +28,14 @@ config MAC80211_RC_PID | |||
24 | 28 | ||
25 | config MAC80211_RC_MINSTREL | 29 | config MAC80211_RC_MINSTREL |
26 | bool "Minstrel" if EMBEDDED | 30 | bool "Minstrel" if EMBEDDED |
31 | select MAC80211_HAS_RC | ||
27 | default y | 32 | default y |
28 | ---help--- | 33 | ---help--- |
29 | This option enables the 'minstrel' TX rate control algorithm | 34 | This option enables the 'minstrel' TX rate control algorithm |
30 | 35 | ||
31 | choice | 36 | choice |
32 | prompt "Default rate control algorithm" | 37 | prompt "Default rate control algorithm" |
38 | depends on MAC80211_HAS_RC | ||
33 | default MAC80211_RC_DEFAULT_MINSTREL | 39 | default MAC80211_RC_DEFAULT_MINSTREL |
34 | ---help--- | 40 | ---help--- |
35 | This option selects the default rate control algorithm | 41 | This option selects the default rate control algorithm |
@@ -62,6 +68,9 @@ config MAC80211_RC_DEFAULT | |||
62 | 68 | ||
63 | endif | 69 | endif |
64 | 70 | ||
71 | comment "Some wireless drivers require a rate control algorithm" | ||
72 | depends on MAC80211_HAS_RC=n | ||
73 | |||
65 | config MAC80211_MESH | 74 | config MAC80211_MESH |
66 | bool "Enable mac80211 mesh networking (pre-802.11s) support" | 75 | bool "Enable mac80211 mesh networking (pre-802.11s) support" |
67 | depends on MAC80211 && EXPERIMENTAL | 76 | depends on MAC80211 && EXPERIMENTAL |
@@ -212,8 +221,8 @@ config MAC80211_DRIVER_API_TRACER | |||
212 | depends on EVENT_TRACING | 221 | depends on EVENT_TRACING |
213 | help | 222 | help |
214 | Say Y here to make mac80211 register with the ftrace | 223 | Say Y here to make mac80211 register with the ftrace |
215 | framework for the driver API -- you can see which | 224 | framework for the driver API -- you can then see which |
216 | driver methods it is calling then by looking at the | 225 | driver methods it is calling and which API functions |
217 | trace. | 226 | drivers are calling by looking at the trace. |
218 | 227 | ||
219 | If unsure, say N. | 228 | If unsure, say Y. |
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c index f9516a27e233..6bb9a9a94960 100644 --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c | |||
@@ -19,23 +19,25 @@ | |||
19 | #include "ieee80211_i.h" | 19 | #include "ieee80211_i.h" |
20 | #include "driver-ops.h" | 20 | #include "driver-ops.h" |
21 | 21 | ||
22 | void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, | 22 | static void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, |
23 | u16 initiator, u16 reason) | 23 | u16 initiator, u16 reason, |
24 | bool from_timer) | ||
24 | { | 25 | { |
25 | struct ieee80211_local *local = sta->local; | 26 | struct ieee80211_local *local = sta->local; |
27 | struct tid_ampdu_rx *tid_rx; | ||
26 | int i; | 28 | int i; |
27 | 29 | ||
28 | /* check if TID is in operational state */ | ||
29 | spin_lock_bh(&sta->lock); | 30 | spin_lock_bh(&sta->lock); |
30 | if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_OPERATIONAL) { | 31 | |
32 | /* check if TID is in operational state */ | ||
33 | if (!sta->ampdu_mlme.tid_active_rx[tid]) { | ||
31 | spin_unlock_bh(&sta->lock); | 34 | spin_unlock_bh(&sta->lock); |
32 | return; | 35 | return; |
33 | } | 36 | } |
34 | 37 | ||
35 | sta->ampdu_mlme.tid_state_rx[tid] = | 38 | sta->ampdu_mlme.tid_active_rx[tid] = false; |
36 | HT_AGG_STATE_REQ_STOP_BA_MSK | | 39 | |
37 | (initiator << HT_AGG_STATE_INITIATOR_SHIFT); | 40 | tid_rx = sta->ampdu_mlme.tid_rx[tid]; |
38 | spin_unlock_bh(&sta->lock); | ||
39 | 41 | ||
40 | #ifdef CONFIG_MAC80211_HT_DEBUG | 42 | #ifdef CONFIG_MAC80211_HT_DEBUG |
41 | printk(KERN_DEBUG "Rx BA session stop requested for %pM tid %u\n", | 43 | printk(KERN_DEBUG "Rx BA session stop requested for %pM tid %u\n", |
@@ -47,61 +49,42 @@ void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, | |||
47 | printk(KERN_DEBUG "HW problem - can not stop rx " | 49 | printk(KERN_DEBUG "HW problem - can not stop rx " |
48 | "aggregation for tid %d\n", tid); | 50 | "aggregation for tid %d\n", tid); |
49 | 51 | ||
50 | /* shutdown timer has not expired */ | ||
51 | if (initiator != WLAN_BACK_TIMER) | ||
52 | del_timer_sync(&sta->ampdu_mlme.tid_rx[tid]->session_timer); | ||
53 | |||
54 | /* check if this is a self generated aggregation halt */ | 52 | /* check if this is a self generated aggregation halt */ |
55 | if (initiator == WLAN_BACK_RECIPIENT || initiator == WLAN_BACK_TIMER) | 53 | if (initiator == WLAN_BACK_RECIPIENT) |
56 | ieee80211_send_delba(sta->sdata, sta->sta.addr, | 54 | ieee80211_send_delba(sta->sdata, sta->sta.addr, |
57 | tid, 0, reason); | 55 | tid, 0, reason); |
58 | 56 | ||
59 | /* free the reordering buffer */ | 57 | /* free the reordering buffer */ |
60 | for (i = 0; i < sta->ampdu_mlme.tid_rx[tid]->buf_size; i++) { | 58 | for (i = 0; i < tid_rx->buf_size; i++) { |
61 | if (sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i]) { | 59 | if (tid_rx->reorder_buf[i]) { |
62 | /* release the reordered frames */ | 60 | /* release the reordered frames */ |
63 | dev_kfree_skb(sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i]); | 61 | dev_kfree_skb(tid_rx->reorder_buf[i]); |
64 | sta->ampdu_mlme.tid_rx[tid]->stored_mpdu_num--; | 62 | tid_rx->stored_mpdu_num--; |
65 | sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i] = NULL; | 63 | tid_rx->reorder_buf[i] = NULL; |
66 | } | 64 | } |
67 | } | 65 | } |
68 | 66 | ||
69 | spin_lock_bh(&sta->lock); | ||
70 | /* free resources */ | 67 | /* free resources */ |
71 | kfree(sta->ampdu_mlme.tid_rx[tid]->reorder_buf); | 68 | kfree(tid_rx->reorder_buf); |
72 | kfree(sta->ampdu_mlme.tid_rx[tid]->reorder_time); | 69 | kfree(tid_rx->reorder_time); |
73 | 70 | sta->ampdu_mlme.tid_rx[tid] = NULL; | |
74 | if (!sta->ampdu_mlme.tid_rx[tid]->shutdown) { | ||
75 | kfree(sta->ampdu_mlme.tid_rx[tid]); | ||
76 | sta->ampdu_mlme.tid_rx[tid] = NULL; | ||
77 | } | ||
78 | 71 | ||
79 | sta->ampdu_mlme.tid_state_rx[tid] = HT_AGG_STATE_IDLE; | ||
80 | spin_unlock_bh(&sta->lock); | 72 | spin_unlock_bh(&sta->lock); |
73 | |||
74 | if (!from_timer) | ||
75 | del_timer_sync(&tid_rx->session_timer); | ||
76 | kfree(tid_rx); | ||
81 | } | 77 | } |
82 | 78 | ||
83 | void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, | 79 | void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, |
84 | u16 initiator, u16 reason) | 80 | u16 initiator, u16 reason) |
85 | { | 81 | { |
86 | struct sta_info *sta; | 82 | ___ieee80211_stop_rx_ba_session(sta, tid, initiator, reason, false); |
87 | |||
88 | rcu_read_lock(); | ||
89 | |||
90 | sta = sta_info_get(sdata, ra); | ||
91 | if (!sta) { | ||
92 | rcu_read_unlock(); | ||
93 | return; | ||
94 | } | ||
95 | |||
96 | __ieee80211_stop_rx_ba_session(sta, tid, initiator, reason); | ||
97 | |||
98 | rcu_read_unlock(); | ||
99 | } | 83 | } |
100 | 84 | ||
101 | /* | 85 | /* |
102 | * After accepting the AddBA Request we activated a timer, | 86 | * After accepting the AddBA Request we activated a timer, |
103 | * resetting it after each frame that arrives from the originator. | 87 | * resetting it after each frame that arrives from the originator. |
104 | * if this timer expires ieee80211_sta_stop_rx_ba_session will be executed. | ||
105 | */ | 88 | */ |
106 | static void sta_rx_agg_session_timer_expired(unsigned long data) | 89 | static void sta_rx_agg_session_timer_expired(unsigned long data) |
107 | { | 90 | { |
@@ -117,9 +100,8 @@ static void sta_rx_agg_session_timer_expired(unsigned long data) | |||
117 | #ifdef CONFIG_MAC80211_HT_DEBUG | 100 | #ifdef CONFIG_MAC80211_HT_DEBUG |
118 | printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid); | 101 | printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid); |
119 | #endif | 102 | #endif |
120 | ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->sta.addr, | 103 | ___ieee80211_stop_rx_ba_session(sta, *ptid, WLAN_BACK_RECIPIENT, |
121 | (u16)*ptid, WLAN_BACK_TIMER, | 104 | WLAN_REASON_QSTA_TIMEOUT, true); |
122 | WLAN_REASON_QSTA_TIMEOUT); | ||
123 | } | 105 | } |
124 | 106 | ||
125 | static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *da, u16 tid, | 107 | static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *da, u16 tid, |
@@ -194,7 +176,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, | |||
194 | 176 | ||
195 | status = WLAN_STATUS_REQUEST_DECLINED; | 177 | status = WLAN_STATUS_REQUEST_DECLINED; |
196 | 178 | ||
197 | if (test_sta_flags(sta, WLAN_STA_SUSPEND)) { | 179 | if (test_sta_flags(sta, WLAN_STA_BLOCK_BA)) { |
198 | #ifdef CONFIG_MAC80211_HT_DEBUG | 180 | #ifdef CONFIG_MAC80211_HT_DEBUG |
199 | printk(KERN_DEBUG "Suspend in progress. " | 181 | printk(KERN_DEBUG "Suspend in progress. " |
200 | "Denying ADDBA request\n"); | 182 | "Denying ADDBA request\n"); |
@@ -232,7 +214,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, | |||
232 | /* examine state machine */ | 214 | /* examine state machine */ |
233 | spin_lock_bh(&sta->lock); | 215 | spin_lock_bh(&sta->lock); |
234 | 216 | ||
235 | if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_IDLE) { | 217 | if (sta->ampdu_mlme.tid_active_rx[tid]) { |
236 | #ifdef CONFIG_MAC80211_HT_DEBUG | 218 | #ifdef CONFIG_MAC80211_HT_DEBUG |
237 | if (net_ratelimit()) | 219 | if (net_ratelimit()) |
238 | printk(KERN_DEBUG "unexpected AddBA Req from " | 220 | printk(KERN_DEBUG "unexpected AddBA Req from " |
@@ -294,7 +276,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, | |||
294 | } | 276 | } |
295 | 277 | ||
296 | /* change state and send addba resp */ | 278 | /* change state and send addba resp */ |
297 | sta->ampdu_mlme.tid_state_rx[tid] = HT_AGG_STATE_OPERATIONAL; | 279 | sta->ampdu_mlme.tid_active_rx[tid] = true; |
298 | tid_agg_rx->dialog_token = dialog_token; | 280 | tid_agg_rx->dialog_token = dialog_token; |
299 | tid_agg_rx->ssn = start_seq_num; | 281 | tid_agg_rx->ssn = start_seq_num; |
300 | tid_agg_rx->head_seq_num = start_seq_num; | 282 | tid_agg_rx->head_seq_num = start_seq_num; |
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index 87782a4bb541..c163d0a149f4 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c | |||
@@ -186,7 +186,7 @@ static void sta_addba_resp_timer_expired(unsigned long data) | |||
186 | spin_unlock_bh(&sta->lock); | 186 | spin_unlock_bh(&sta->lock); |
187 | #ifdef CONFIG_MAC80211_HT_DEBUG | 187 | #ifdef CONFIG_MAC80211_HT_DEBUG |
188 | printk(KERN_DEBUG "timer expired on tid %d but we are not " | 188 | printk(KERN_DEBUG "timer expired on tid %d but we are not " |
189 | "(or no longer) expecting addBA response there", | 189 | "(or no longer) expecting addBA response there\n", |
190 | tid); | 190 | tid); |
191 | #endif | 191 | #endif |
192 | return; | 192 | return; |
@@ -214,6 +214,8 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid) | |||
214 | int ret = 0; | 214 | int ret = 0; |
215 | u16 start_seq_num; | 215 | u16 start_seq_num; |
216 | 216 | ||
217 | trace_api_start_tx_ba_session(pubsta, tid); | ||
218 | |||
217 | if (WARN_ON(!local->ops->ampdu_action)) | 219 | if (WARN_ON(!local->ops->ampdu_action)) |
218 | return -EINVAL; | 220 | return -EINVAL; |
219 | 221 | ||
@@ -245,7 +247,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid) | |||
245 | return -EINVAL; | 247 | return -EINVAL; |
246 | } | 248 | } |
247 | 249 | ||
248 | if (test_sta_flags(sta, WLAN_STA_SUSPEND)) { | 250 | if (test_sta_flags(sta, WLAN_STA_BLOCK_BA)) { |
249 | #ifdef CONFIG_MAC80211_HT_DEBUG | 251 | #ifdef CONFIG_MAC80211_HT_DEBUG |
250 | printk(KERN_DEBUG "Suspend in progress. " | 252 | printk(KERN_DEBUG "Suspend in progress. " |
251 | "Denying BA session request\n"); | 253 | "Denying BA session request\n"); |
@@ -414,7 +416,7 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local, | |||
414 | struct sta_info *sta, u16 tid) | 416 | struct sta_info *sta, u16 tid) |
415 | { | 417 | { |
416 | #ifdef CONFIG_MAC80211_HT_DEBUG | 418 | #ifdef CONFIG_MAC80211_HT_DEBUG |
417 | printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid); | 419 | printk(KERN_DEBUG "Aggregation is on for tid %d\n", tid); |
418 | #endif | 420 | #endif |
419 | 421 | ||
420 | spin_lock(&local->ampdu_lock); | 422 | spin_lock(&local->ampdu_lock); |
@@ -440,6 +442,8 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid) | |||
440 | struct sta_info *sta; | 442 | struct sta_info *sta; |
441 | u8 *state; | 443 | u8 *state; |
442 | 444 | ||
445 | trace_api_start_tx_ba_cb(sdata, ra, tid); | ||
446 | |||
443 | if (tid >= STA_TID_NUM) { | 447 | if (tid >= STA_TID_NUM) { |
444 | #ifdef CONFIG_MAC80211_HT_DEBUG | 448 | #ifdef CONFIG_MAC80211_HT_DEBUG |
445 | printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n", | 449 | printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n", |
@@ -541,6 +545,8 @@ int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid, | |||
541 | struct ieee80211_sub_if_data *sdata = sta->sdata; | 545 | struct ieee80211_sub_if_data *sdata = sta->sdata; |
542 | struct ieee80211_local *local = sdata->local; | 546 | struct ieee80211_local *local = sdata->local; |
543 | 547 | ||
548 | trace_api_stop_tx_ba_session(pubsta, tid, initiator); | ||
549 | |||
544 | if (!local->ops->ampdu_action) | 550 | if (!local->ops->ampdu_action) |
545 | return -EINVAL; | 551 | return -EINVAL; |
546 | 552 | ||
@@ -558,6 +564,8 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid) | |||
558 | struct sta_info *sta; | 564 | struct sta_info *sta; |
559 | u8 *state; | 565 | u8 *state; |
560 | 566 | ||
567 | trace_api_stop_tx_ba_cb(sdata, ra, tid); | ||
568 | |||
561 | if (tid >= STA_TID_NUM) { | 569 | if (tid >= STA_TID_NUM) { |
562 | #ifdef CONFIG_MAC80211_HT_DEBUG | 570 | #ifdef CONFIG_MAC80211_HT_DEBUG |
563 | printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n", | 571 | printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n", |
@@ -674,7 +682,7 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local, | |||
674 | del_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer); | 682 | del_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer); |
675 | 683 | ||
676 | #ifdef CONFIG_MAC80211_HT_DEBUG | 684 | #ifdef CONFIG_MAC80211_HT_DEBUG |
677 | printk(KERN_DEBUG "switched off addBA timer for tid %d \n", tid); | 685 | printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid); |
678 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | 686 | #endif /* CONFIG_MAC80211_HT_DEBUG */ |
679 | 687 | ||
680 | if (le16_to_cpu(mgmt->u.action.u.addba_resp.status) | 688 | if (le16_to_cpu(mgmt->u.action.u.addba_resp.status) |
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index edc872e22c9b..845a6e6b9d89 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -411,6 +411,17 @@ static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev, | |||
411 | return ret; | 411 | return ret; |
412 | } | 412 | } |
413 | 413 | ||
414 | static int ieee80211_dump_survey(struct wiphy *wiphy, struct net_device *dev, | ||
415 | int idx, struct survey_info *survey) | ||
416 | { | ||
417 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
418 | |||
419 | if (!local->ops->get_survey) | ||
420 | return -EOPNOTSUPP; | ||
421 | |||
422 | return drv_get_survey(local, idx, survey); | ||
423 | } | ||
424 | |||
414 | static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev, | 425 | static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev, |
415 | u8 *mac, struct station_info *sinfo) | 426 | u8 *mac, struct station_info *sinfo) |
416 | { | 427 | { |
@@ -1137,6 +1148,10 @@ static int ieee80211_set_txq_params(struct wiphy *wiphy, | |||
1137 | return -EINVAL; | 1148 | return -EINVAL; |
1138 | } | 1149 | } |
1139 | 1150 | ||
1151 | /* enable WMM or activate new settings */ | ||
1152 | local->hw.conf.flags |= IEEE80211_CONF_QOS; | ||
1153 | drv_config(local, IEEE80211_CONF_CHANGE_QOS); | ||
1154 | |||
1140 | return 0; | 1155 | return 0; |
1141 | } | 1156 | } |
1142 | 1157 | ||
@@ -1403,6 +1418,35 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, | |||
1403 | return 0; | 1418 | return 0; |
1404 | } | 1419 | } |
1405 | 1420 | ||
1421 | static int ieee80211_set_cqm_rssi_config(struct wiphy *wiphy, | ||
1422 | struct net_device *dev, | ||
1423 | s32 rssi_thold, u32 rssi_hyst) | ||
1424 | { | ||
1425 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
1426 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
1427 | struct ieee80211_vif *vif = &sdata->vif; | ||
1428 | struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; | ||
1429 | |||
1430 | if (rssi_thold == bss_conf->cqm_rssi_thold && | ||
1431 | rssi_hyst == bss_conf->cqm_rssi_hyst) | ||
1432 | return 0; | ||
1433 | |||
1434 | bss_conf->cqm_rssi_thold = rssi_thold; | ||
1435 | bss_conf->cqm_rssi_hyst = rssi_hyst; | ||
1436 | |||
1437 | if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI)) { | ||
1438 | if (sdata->vif.type != NL80211_IFTYPE_STATION) | ||
1439 | return -EOPNOTSUPP; | ||
1440 | return 0; | ||
1441 | } | ||
1442 | |||
1443 | /* tell the driver upon association, unless already associated */ | ||
1444 | if (sdata->u.mgd.associated) | ||
1445 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_CQM); | ||
1446 | |||
1447 | return 0; | ||
1448 | } | ||
1449 | |||
1406 | static int ieee80211_set_bitrate_mask(struct wiphy *wiphy, | 1450 | static int ieee80211_set_bitrate_mask(struct wiphy *wiphy, |
1407 | struct net_device *dev, | 1451 | struct net_device *dev, |
1408 | const u8 *addr, | 1452 | const u8 *addr, |
@@ -1475,6 +1519,7 @@ struct cfg80211_ops mac80211_config_ops = { | |||
1475 | .change_station = ieee80211_change_station, | 1519 | .change_station = ieee80211_change_station, |
1476 | .get_station = ieee80211_get_station, | 1520 | .get_station = ieee80211_get_station, |
1477 | .dump_station = ieee80211_dump_station, | 1521 | .dump_station = ieee80211_dump_station, |
1522 | .dump_survey = ieee80211_dump_survey, | ||
1478 | #ifdef CONFIG_MAC80211_MESH | 1523 | #ifdef CONFIG_MAC80211_MESH |
1479 | .add_mpath = ieee80211_add_mpath, | 1524 | .add_mpath = ieee80211_add_mpath, |
1480 | .del_mpath = ieee80211_del_mpath, | 1525 | .del_mpath = ieee80211_del_mpath, |
@@ -1507,4 +1552,5 @@ struct cfg80211_ops mac80211_config_ops = { | |||
1507 | .remain_on_channel = ieee80211_remain_on_channel, | 1552 | .remain_on_channel = ieee80211_remain_on_channel, |
1508 | .cancel_remain_on_channel = ieee80211_cancel_remain_on_channel, | 1553 | .cancel_remain_on_channel = ieee80211_cancel_remain_on_channel, |
1509 | .action = ieee80211_action, | 1554 | .action = ieee80211_action, |
1555 | .set_cqm_rssi_config = ieee80211_set_cqm_rssi_config, | ||
1510 | }; | 1556 | }; |
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c index 83d4289d954b..20b2998fa0ed 100644 --- a/net/mac80211/debugfs_netdev.c +++ b/net/mac80211/debugfs_netdev.c | |||
@@ -100,6 +100,14 @@ static ssize_t ieee80211_if_fmt_##name( \ | |||
100 | return scnprintf(buf, buflen, "%pM\n", sdata->field); \ | 100 | return scnprintf(buf, buflen, "%pM\n", sdata->field); \ |
101 | } | 101 | } |
102 | 102 | ||
103 | #define IEEE80211_IF_FMT_DEC_DIV_16(name, field) \ | ||
104 | static ssize_t ieee80211_if_fmt_##name( \ | ||
105 | const struct ieee80211_sub_if_data *sdata, \ | ||
106 | char *buf, int buflen) \ | ||
107 | { \ | ||
108 | return scnprintf(buf, buflen, "%d\n", sdata->field / 16); \ | ||
109 | } | ||
110 | |||
103 | #define __IEEE80211_IF_FILE(name, _write) \ | 111 | #define __IEEE80211_IF_FILE(name, _write) \ |
104 | static ssize_t ieee80211_if_read_##name(struct file *file, \ | 112 | static ssize_t ieee80211_if_read_##name(struct file *file, \ |
105 | char __user *userbuf, \ | 113 | char __user *userbuf, \ |
@@ -140,6 +148,8 @@ IEEE80211_IF_FILE(rc_rateidx_mask_5ghz, rc_rateidx_mask[IEEE80211_BAND_5GHZ], | |||
140 | /* STA attributes */ | 148 | /* STA attributes */ |
141 | IEEE80211_IF_FILE(bssid, u.mgd.bssid, MAC); | 149 | IEEE80211_IF_FILE(bssid, u.mgd.bssid, MAC); |
142 | IEEE80211_IF_FILE(aid, u.mgd.aid, DEC); | 150 | IEEE80211_IF_FILE(aid, u.mgd.aid, DEC); |
151 | IEEE80211_IF_FILE(last_beacon, u.mgd.last_beacon_signal, DEC); | ||
152 | IEEE80211_IF_FILE(ave_beacon, u.mgd.ave_beacon_signal, DEC_DIV_16); | ||
143 | 153 | ||
144 | static int ieee80211_set_smps(struct ieee80211_sub_if_data *sdata, | 154 | static int ieee80211_set_smps(struct ieee80211_sub_if_data *sdata, |
145 | enum ieee80211_smps_mode smps_mode) | 155 | enum ieee80211_smps_mode smps_mode) |
@@ -276,6 +286,8 @@ static void add_sta_files(struct ieee80211_sub_if_data *sdata) | |||
276 | 286 | ||
277 | DEBUGFS_ADD(bssid); | 287 | DEBUGFS_ADD(bssid); |
278 | DEBUGFS_ADD(aid); | 288 | DEBUGFS_ADD(aid); |
289 | DEBUGFS_ADD(last_beacon); | ||
290 | DEBUGFS_ADD(ave_beacon); | ||
279 | DEBUGFS_ADD_MODE(smps, 0600); | 291 | DEBUGFS_ADD_MODE(smps, 0600); |
280 | } | 292 | } |
281 | 293 | ||
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c index d92800bb2d2f..6bc9b07c3eda 100644 --- a/net/mac80211/debugfs_sta.c +++ b/net/mac80211/debugfs_sta.c | |||
@@ -57,7 +57,6 @@ STA_FILE(tx_filtered, tx_filtered_count, LU); | |||
57 | STA_FILE(tx_retry_failed, tx_retry_failed, LU); | 57 | STA_FILE(tx_retry_failed, tx_retry_failed, LU); |
58 | STA_FILE(tx_retry_count, tx_retry_count, LU); | 58 | STA_FILE(tx_retry_count, tx_retry_count, LU); |
59 | STA_FILE(last_signal, last_signal, D); | 59 | STA_FILE(last_signal, last_signal, D); |
60 | STA_FILE(last_noise, last_noise, D); | ||
61 | STA_FILE(wep_weak_iv_count, wep_weak_iv_count, LU); | 60 | STA_FILE(wep_weak_iv_count, wep_weak_iv_count, LU); |
62 | 61 | ||
63 | static ssize_t sta_flags_read(struct file *file, char __user *userbuf, | 62 | static ssize_t sta_flags_read(struct file *file, char __user *userbuf, |
@@ -120,7 +119,7 @@ STA_OPS(last_seq_ctrl); | |||
120 | static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf, | 119 | static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf, |
121 | size_t count, loff_t *ppos) | 120 | size_t count, loff_t *ppos) |
122 | { | 121 | { |
123 | char buf[64 + STA_TID_NUM * 40], *p = buf; | 122 | char buf[71 + STA_TID_NUM * 40], *p = buf; |
124 | int i; | 123 | int i; |
125 | struct sta_info *sta = file->private_data; | 124 | struct sta_info *sta = file->private_data; |
126 | 125 | ||
@@ -128,16 +127,16 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf, | |||
128 | p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n", | 127 | p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n", |
129 | sta->ampdu_mlme.dialog_token_allocator + 1); | 128 | sta->ampdu_mlme.dialog_token_allocator + 1); |
130 | p += scnprintf(p, sizeof(buf) + buf - p, | 129 | p += scnprintf(p, sizeof(buf) + buf - p, |
131 | "TID\t\tRX\tDTKN\tSSN\t\tTX\tDTKN\tSSN\tpending\n"); | 130 | "TID\t\tRX active\tDTKN\tSSN\t\tTX\tDTKN\tSSN\tpending\n"); |
132 | for (i = 0; i < STA_TID_NUM; i++) { | 131 | for (i = 0; i < STA_TID_NUM; i++) { |
133 | p += scnprintf(p, sizeof(buf) + buf - p, "%02d", i); | 132 | p += scnprintf(p, sizeof(buf) + buf - p, "%02d", i); |
134 | p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x", | 133 | p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x", |
135 | sta->ampdu_mlme.tid_state_rx[i]); | 134 | sta->ampdu_mlme.tid_active_rx[i]); |
136 | p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x", | 135 | p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x", |
137 | sta->ampdu_mlme.tid_state_rx[i] ? | 136 | sta->ampdu_mlme.tid_active_rx[i] ? |
138 | sta->ampdu_mlme.tid_rx[i]->dialog_token : 0); | 137 | sta->ampdu_mlme.tid_rx[i]->dialog_token : 0); |
139 | p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.3x", | 138 | p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.3x", |
140 | sta->ampdu_mlme.tid_state_rx[i] ? | 139 | sta->ampdu_mlme.tid_active_rx[i] ? |
141 | sta->ampdu_mlme.tid_rx[i]->ssn : 0); | 140 | sta->ampdu_mlme.tid_rx[i]->ssn : 0); |
142 | 141 | ||
143 | p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x", | 142 | p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x", |
@@ -177,7 +176,7 @@ static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf, | |||
177 | if (htc->ht_supported) { | 176 | if (htc->ht_supported) { |
178 | p += scnprintf(p, sizeof(buf)+buf-p, "cap: %#.4x\n", htc->cap); | 177 | p += scnprintf(p, sizeof(buf)+buf-p, "cap: %#.4x\n", htc->cap); |
179 | 178 | ||
180 | PRINT_HT_CAP((htc->cap & BIT(0)), "RX LDCP"); | 179 | PRINT_HT_CAP((htc->cap & BIT(0)), "RX LDPC"); |
181 | PRINT_HT_CAP((htc->cap & BIT(1)), "HT20/HT40"); | 180 | PRINT_HT_CAP((htc->cap & BIT(1)), "HT20/HT40"); |
182 | PRINT_HT_CAP(!(htc->cap & BIT(1)), "HT20"); | 181 | PRINT_HT_CAP(!(htc->cap & BIT(1)), "HT20"); |
183 | 182 | ||
@@ -289,7 +288,6 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta) | |||
289 | DEBUGFS_ADD(tx_retry_failed); | 288 | DEBUGFS_ADD(tx_retry_failed); |
290 | DEBUGFS_ADD(tx_retry_count); | 289 | DEBUGFS_ADD(tx_retry_count); |
291 | DEBUGFS_ADD(last_signal); | 290 | DEBUGFS_ADD(last_signal); |
292 | DEBUGFS_ADD(last_noise); | ||
293 | DEBUGFS_ADD(wep_weak_iv_count); | 291 | DEBUGFS_ADD(wep_weak_iv_count); |
294 | DEBUGFS_ADD(ht_capa); | 292 | DEBUGFS_ADD(ht_capa); |
295 | } | 293 | } |
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index c3d844093a2f..35e1e581e806 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h | |||
@@ -84,16 +84,14 @@ static inline void drv_bss_info_changed(struct ieee80211_local *local, | |||
84 | } | 84 | } |
85 | 85 | ||
86 | static inline u64 drv_prepare_multicast(struct ieee80211_local *local, | 86 | static inline u64 drv_prepare_multicast(struct ieee80211_local *local, |
87 | int mc_count, | 87 | struct netdev_hw_addr_list *mc_list) |
88 | struct dev_addr_list *mc_list) | ||
89 | { | 88 | { |
90 | u64 ret = 0; | 89 | u64 ret = 0; |
91 | 90 | ||
92 | if (local->ops->prepare_multicast) | 91 | if (local->ops->prepare_multicast) |
93 | ret = local->ops->prepare_multicast(&local->hw, mc_count, | 92 | ret = local->ops->prepare_multicast(&local->hw, mc_list); |
94 | mc_list); | ||
95 | 93 | ||
96 | trace_drv_prepare_multicast(local, mc_count, ret); | 94 | trace_drv_prepare_multicast(local, mc_list->count, ret); |
97 | 95 | ||
98 | return ret; | 96 | return ret; |
99 | } | 97 | } |
@@ -346,6 +344,15 @@ static inline int drv_ampdu_action(struct ieee80211_local *local, | |||
346 | return ret; | 344 | return ret; |
347 | } | 345 | } |
348 | 346 | ||
347 | static inline int drv_get_survey(struct ieee80211_local *local, int idx, | ||
348 | struct survey_info *survey) | ||
349 | { | ||
350 | int ret = -EOPNOTSUPP; | ||
351 | if (local->ops->conf_tx) | ||
352 | ret = local->ops->get_survey(&local->hw, idx, survey); | ||
353 | /* trace_drv_get_survey(local, idx, survey, ret); */ | ||
354 | return ret; | ||
355 | } | ||
349 | 356 | ||
350 | static inline void drv_rfkill_poll(struct ieee80211_local *local) | 357 | static inline void drv_rfkill_poll(struct ieee80211_local *local) |
351 | { | 358 | { |
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h index 41baf730a5c7..e209cb82ff29 100644 --- a/net/mac80211/driver-trace.h +++ b/net/mac80211/driver-trace.h | |||
@@ -32,6 +32,10 @@ static inline void trace_ ## name(proto) {} | |||
32 | #define VIF_PR_FMT " vif:%s(%d)" | 32 | #define VIF_PR_FMT " vif:%s(%d)" |
33 | #define VIF_PR_ARG __get_str(vif_name), __entry->vif_type | 33 | #define VIF_PR_ARG __get_str(vif_name), __entry->vif_type |
34 | 34 | ||
35 | /* | ||
36 | * Tracing for driver callbacks. | ||
37 | */ | ||
38 | |||
35 | TRACE_EVENT(drv_start, | 39 | TRACE_EVENT(drv_start, |
36 | TP_PROTO(struct ieee80211_local *local, int ret), | 40 | TP_PROTO(struct ieee80211_local *local, int ret), |
37 | 41 | ||
@@ -766,6 +770,277 @@ TRACE_EVENT(drv_flush, | |||
766 | LOCAL_PR_ARG, __entry->drop | 770 | LOCAL_PR_ARG, __entry->drop |
767 | ) | 771 | ) |
768 | ); | 772 | ); |
773 | |||
774 | /* | ||
775 | * Tracing for API calls that drivers call. | ||
776 | */ | ||
777 | |||
778 | TRACE_EVENT(api_start_tx_ba_session, | ||
779 | TP_PROTO(struct ieee80211_sta *sta, u16 tid), | ||
780 | |||
781 | TP_ARGS(sta, tid), | ||
782 | |||
783 | TP_STRUCT__entry( | ||
784 | STA_ENTRY | ||
785 | __field(u16, tid) | ||
786 | ), | ||
787 | |||
788 | TP_fast_assign( | ||
789 | STA_ASSIGN; | ||
790 | __entry->tid = tid; | ||
791 | ), | ||
792 | |||
793 | TP_printk( | ||
794 | STA_PR_FMT " tid:%d", | ||
795 | STA_PR_ARG, __entry->tid | ||
796 | ) | ||
797 | ); | ||
798 | |||
799 | TRACE_EVENT(api_start_tx_ba_cb, | ||
800 | TP_PROTO(struct ieee80211_sub_if_data *sdata, const u8 *ra, u16 tid), | ||
801 | |||
802 | TP_ARGS(sdata, ra, tid), | ||
803 | |||
804 | TP_STRUCT__entry( | ||
805 | VIF_ENTRY | ||
806 | __array(u8, ra, ETH_ALEN) | ||
807 | __field(u16, tid) | ||
808 | ), | ||
809 | |||
810 | TP_fast_assign( | ||
811 | VIF_ASSIGN; | ||
812 | memcpy(__entry->ra, ra, ETH_ALEN); | ||
813 | __entry->tid = tid; | ||
814 | ), | ||
815 | |||
816 | TP_printk( | ||
817 | VIF_PR_FMT " ra:%pM tid:%d", | ||
818 | VIF_PR_ARG, __entry->ra, __entry->tid | ||
819 | ) | ||
820 | ); | ||
821 | |||
822 | TRACE_EVENT(api_stop_tx_ba_session, | ||
823 | TP_PROTO(struct ieee80211_sta *sta, u16 tid, u16 initiator), | ||
824 | |||
825 | TP_ARGS(sta, tid, initiator), | ||
826 | |||
827 | TP_STRUCT__entry( | ||
828 | STA_ENTRY | ||
829 | __field(u16, tid) | ||
830 | __field(u16, initiator) | ||
831 | ), | ||
832 | |||
833 | TP_fast_assign( | ||
834 | STA_ASSIGN; | ||
835 | __entry->tid = tid; | ||
836 | __entry->initiator = initiator; | ||
837 | ), | ||
838 | |||
839 | TP_printk( | ||
840 | STA_PR_FMT " tid:%d initiator:%d", | ||
841 | STA_PR_ARG, __entry->tid, __entry->initiator | ||
842 | ) | ||
843 | ); | ||
844 | |||
845 | TRACE_EVENT(api_stop_tx_ba_cb, | ||
846 | TP_PROTO(struct ieee80211_sub_if_data *sdata, const u8 *ra, u16 tid), | ||
847 | |||
848 | TP_ARGS(sdata, ra, tid), | ||
849 | |||
850 | TP_STRUCT__entry( | ||
851 | VIF_ENTRY | ||
852 | __array(u8, ra, ETH_ALEN) | ||
853 | __field(u16, tid) | ||
854 | ), | ||
855 | |||
856 | TP_fast_assign( | ||
857 | VIF_ASSIGN; | ||
858 | memcpy(__entry->ra, ra, ETH_ALEN); | ||
859 | __entry->tid = tid; | ||
860 | ), | ||
861 | |||
862 | TP_printk( | ||
863 | VIF_PR_FMT " ra:%pM tid:%d", | ||
864 | VIF_PR_ARG, __entry->ra, __entry->tid | ||
865 | ) | ||
866 | ); | ||
867 | |||
868 | TRACE_EVENT(api_restart_hw, | ||
869 | TP_PROTO(struct ieee80211_local *local), | ||
870 | |||
871 | TP_ARGS(local), | ||
872 | |||
873 | TP_STRUCT__entry( | ||
874 | LOCAL_ENTRY | ||
875 | ), | ||
876 | |||
877 | TP_fast_assign( | ||
878 | LOCAL_ASSIGN; | ||
879 | ), | ||
880 | |||
881 | TP_printk( | ||
882 | LOCAL_PR_FMT, | ||
883 | LOCAL_PR_ARG | ||
884 | ) | ||
885 | ); | ||
886 | |||
887 | TRACE_EVENT(api_beacon_loss, | ||
888 | TP_PROTO(struct ieee80211_sub_if_data *sdata), | ||
889 | |||
890 | TP_ARGS(sdata), | ||
891 | |||
892 | TP_STRUCT__entry( | ||
893 | VIF_ENTRY | ||
894 | ), | ||
895 | |||
896 | TP_fast_assign( | ||
897 | VIF_ASSIGN; | ||
898 | ), | ||
899 | |||
900 | TP_printk( | ||
901 | VIF_PR_FMT, | ||
902 | VIF_PR_ARG | ||
903 | ) | ||
904 | ); | ||
905 | |||
906 | TRACE_EVENT(api_connection_loss, | ||
907 | TP_PROTO(struct ieee80211_sub_if_data *sdata), | ||
908 | |||
909 | TP_ARGS(sdata), | ||
910 | |||
911 | TP_STRUCT__entry( | ||
912 | VIF_ENTRY | ||
913 | ), | ||
914 | |||
915 | TP_fast_assign( | ||
916 | VIF_ASSIGN; | ||
917 | ), | ||
918 | |||
919 | TP_printk( | ||
920 | VIF_PR_FMT, | ||
921 | VIF_PR_ARG | ||
922 | ) | ||
923 | ); | ||
924 | |||
925 | TRACE_EVENT(api_cqm_rssi_notify, | ||
926 | TP_PROTO(struct ieee80211_sub_if_data *sdata, | ||
927 | enum nl80211_cqm_rssi_threshold_event rssi_event), | ||
928 | |||
929 | TP_ARGS(sdata, rssi_event), | ||
930 | |||
931 | TP_STRUCT__entry( | ||
932 | VIF_ENTRY | ||
933 | __field(u32, rssi_event) | ||
934 | ), | ||
935 | |||
936 | TP_fast_assign( | ||
937 | VIF_ASSIGN; | ||
938 | __entry->rssi_event = rssi_event; | ||
939 | ), | ||
940 | |||
941 | TP_printk( | ||
942 | VIF_PR_FMT " event:%d", | ||
943 | VIF_PR_ARG, __entry->rssi_event | ||
944 | ) | ||
945 | ); | ||
946 | |||
947 | TRACE_EVENT(api_scan_completed, | ||
948 | TP_PROTO(struct ieee80211_local *local, bool aborted), | ||
949 | |||
950 | TP_ARGS(local, aborted), | ||
951 | |||
952 | TP_STRUCT__entry( | ||
953 | LOCAL_ENTRY | ||
954 | __field(bool, aborted) | ||
955 | ), | ||
956 | |||
957 | TP_fast_assign( | ||
958 | LOCAL_ASSIGN; | ||
959 | __entry->aborted = aborted; | ||
960 | ), | ||
961 | |||
962 | TP_printk( | ||
963 | LOCAL_PR_FMT " aborted:%d", | ||
964 | LOCAL_PR_ARG, __entry->aborted | ||
965 | ) | ||
966 | ); | ||
967 | |||
968 | TRACE_EVENT(api_sta_block_awake, | ||
969 | TP_PROTO(struct ieee80211_local *local, | ||
970 | struct ieee80211_sta *sta, bool block), | ||
971 | |||
972 | TP_ARGS(local, sta, block), | ||
973 | |||
974 | TP_STRUCT__entry( | ||
975 | LOCAL_ENTRY | ||
976 | STA_ENTRY | ||
977 | __field(bool, block) | ||
978 | ), | ||
979 | |||
980 | TP_fast_assign( | ||
981 | LOCAL_ASSIGN; | ||
982 | STA_ASSIGN; | ||
983 | __entry->block = block; | ||
984 | ), | ||
985 | |||
986 | TP_printk( | ||
987 | LOCAL_PR_FMT STA_PR_FMT " block:%d", | ||
988 | LOCAL_PR_ARG, STA_PR_FMT, __entry->block | ||
989 | ) | ||
990 | ); | ||
991 | |||
992 | /* | ||
993 | * Tracing for internal functions | ||
994 | * (which may also be called in response to driver calls) | ||
995 | */ | ||
996 | |||
997 | TRACE_EVENT(wake_queue, | ||
998 | TP_PROTO(struct ieee80211_local *local, u16 queue, | ||
999 | enum queue_stop_reason reason), | ||
1000 | |||
1001 | TP_ARGS(local, queue, reason), | ||
1002 | |||
1003 | TP_STRUCT__entry( | ||
1004 | LOCAL_ENTRY | ||
1005 | __field(u16, queue) | ||
1006 | __field(u32, reason) | ||
1007 | ), | ||
1008 | |||
1009 | TP_fast_assign( | ||
1010 | LOCAL_ASSIGN; | ||
1011 | __entry->queue = queue; | ||
1012 | __entry->reason = reason; | ||
1013 | ), | ||
1014 | |||
1015 | TP_printk( | ||
1016 | LOCAL_PR_FMT " queue:%d, reason:%d", | ||
1017 | LOCAL_PR_ARG, __entry->queue, __entry->reason | ||
1018 | ) | ||
1019 | ); | ||
1020 | |||
1021 | TRACE_EVENT(stop_queue, | ||
1022 | TP_PROTO(struct ieee80211_local *local, u16 queue, | ||
1023 | enum queue_stop_reason reason), | ||
1024 | |||
1025 | TP_ARGS(local, queue, reason), | ||
1026 | |||
1027 | TP_STRUCT__entry( | ||
1028 | LOCAL_ENTRY | ||
1029 | __field(u16, queue) | ||
1030 | __field(u32, reason) | ||
1031 | ), | ||
1032 | |||
1033 | TP_fast_assign( | ||
1034 | LOCAL_ASSIGN; | ||
1035 | __entry->queue = queue; | ||
1036 | __entry->reason = reason; | ||
1037 | ), | ||
1038 | |||
1039 | TP_printk( | ||
1040 | LOCAL_PR_FMT " queue:%d, reason:%d", | ||
1041 | LOCAL_PR_ARG, __entry->queue, __entry->reason | ||
1042 | ) | ||
1043 | ); | ||
769 | #endif /* !__MAC80211_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */ | 1044 | #endif /* !__MAC80211_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */ |
770 | 1045 | ||
771 | #undef TRACE_INCLUDE_PATH | 1046 | #undef TRACE_INCLUDE_PATH |
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c index bb677a73b7c9..2ab106a0a491 100644 --- a/net/mac80211/ht.c +++ b/net/mac80211/ht.c | |||
@@ -175,8 +175,7 @@ void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata, | |||
175 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | 175 | #endif /* CONFIG_MAC80211_HT_DEBUG */ |
176 | 176 | ||
177 | if (initiator == WLAN_BACK_INITIATOR) | 177 | if (initiator == WLAN_BACK_INITIATOR) |
178 | ieee80211_sta_stop_rx_ba_session(sdata, sta->sta.addr, tid, | 178 | __ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_INITIATOR, 0); |
179 | WLAN_BACK_INITIATOR, 0); | ||
180 | else { /* WLAN_BACK_RECIPIENT */ | 179 | else { /* WLAN_BACK_RECIPIENT */ |
181 | spin_lock_bh(&sta->lock); | 180 | spin_lock_bh(&sta->lock); |
182 | if (sta->ampdu_mlme.tid_state_tx[tid] & HT_ADDBA_REQUESTED_MSK) | 181 | if (sta->ampdu_mlme.tid_state_tx[tid] & HT_ADDBA_REQUESTED_MSK) |
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index e2976da4e0d9..e6f3b0c7a71f 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c | |||
@@ -265,17 +265,16 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, | |||
265 | sta->sta.supp_rates[band] = supp_rates | | 265 | sta->sta.supp_rates[band] = supp_rates | |
266 | ieee80211_mandatory_rates(local, band); | 266 | ieee80211_mandatory_rates(local, band); |
267 | 267 | ||
268 | if (sta->sta.supp_rates[band] != prev_rates) { | ||
268 | #ifdef CONFIG_MAC80211_IBSS_DEBUG | 269 | #ifdef CONFIG_MAC80211_IBSS_DEBUG |
269 | if (sta->sta.supp_rates[band] != prev_rates) | ||
270 | printk(KERN_DEBUG "%s: updated supp_rates set " | 270 | printk(KERN_DEBUG "%s: updated supp_rates set " |
271 | "for %pM based on beacon info (0x%llx | " | 271 | "for %pM based on beacon/probe_response " |
272 | "0x%llx -> 0x%llx)\n", | 272 | "(0x%x -> 0x%x)\n", |
273 | sdata->name, | 273 | sdata->name, sta->sta.addr, |
274 | sta->sta.addr, | 274 | prev_rates, sta->sta.supp_rates[band]); |
275 | (unsigned long long) prev_rates, | ||
276 | (unsigned long long) supp_rates, | ||
277 | (unsigned long long) sta->sta.supp_rates[band]); | ||
278 | #endif | 275 | #endif |
276 | rate_control_rate_init(sta); | ||
277 | } | ||
279 | rcu_read_unlock(); | 278 | rcu_read_unlock(); |
280 | } else { | 279 | } else { |
281 | rcu_read_unlock(); | 280 | rcu_read_unlock(); |
@@ -371,6 +370,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, | |||
371 | sdata->name, mgmt->bssid); | 370 | sdata->name, mgmt->bssid); |
372 | #endif | 371 | #endif |
373 | ieee80211_sta_join_ibss(sdata, bss); | 372 | ieee80211_sta_join_ibss(sdata, bss); |
373 | supp_rates = ieee80211_sta_get_rates(local, elems, band); | ||
374 | ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, | 374 | ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, |
375 | supp_rates, GFP_KERNEL); | 375 | supp_rates, GFP_KERNEL); |
376 | } | 376 | } |
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 241533e1bc03..c9712f35e596 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
@@ -317,6 +317,7 @@ enum ieee80211_sta_flags { | |||
317 | IEEE80211_STA_MFP_ENABLED = BIT(6), | 317 | IEEE80211_STA_MFP_ENABLED = BIT(6), |
318 | IEEE80211_STA_UAPSD_ENABLED = BIT(7), | 318 | IEEE80211_STA_UAPSD_ENABLED = BIT(7), |
319 | IEEE80211_STA_NULLFUNC_ACKED = BIT(8), | 319 | IEEE80211_STA_NULLFUNC_ACKED = BIT(8), |
320 | IEEE80211_STA_RESET_SIGNAL_AVE = BIT(9), | ||
320 | }; | 321 | }; |
321 | 322 | ||
322 | struct ieee80211_if_managed { | 323 | struct ieee80211_if_managed { |
@@ -327,7 +328,7 @@ struct ieee80211_if_managed { | |||
327 | struct work_struct work; | 328 | struct work_struct work; |
328 | struct work_struct monitor_work; | 329 | struct work_struct monitor_work; |
329 | struct work_struct chswitch_work; | 330 | struct work_struct chswitch_work; |
330 | struct work_struct beacon_loss_work; | 331 | struct work_struct beacon_connection_loss_work; |
331 | 332 | ||
332 | unsigned long probe_timeout; | 333 | unsigned long probe_timeout; |
333 | int probe_send_count; | 334 | int probe_send_count; |
@@ -359,6 +360,24 @@ struct ieee80211_if_managed { | |||
359 | int wmm_last_param_set; | 360 | int wmm_last_param_set; |
360 | 361 | ||
361 | u8 use_4addr; | 362 | u8 use_4addr; |
363 | |||
364 | /* Signal strength from the last Beacon frame in the current BSS. */ | ||
365 | int last_beacon_signal; | ||
366 | |||
367 | /* | ||
368 | * Weighted average of the signal strength from Beacon frames in the | ||
369 | * current BSS. This is in units of 1/16 of the signal unit to maintain | ||
370 | * accuracy and to speed up calculations, i.e., the value need to be | ||
371 | * divided by 16 to get the actual value. | ||
372 | */ | ||
373 | int ave_beacon_signal; | ||
374 | |||
375 | /* | ||
376 | * Last Beacon frame signal strength average (ave_beacon_signal / 16) | ||
377 | * that triggered a cqm event. 0 indicates that no event has been | ||
378 | * generated for the current association. | ||
379 | */ | ||
380 | int last_cqm_event_signal; | ||
362 | }; | 381 | }; |
363 | 382 | ||
364 | enum ieee80211_ibss_request { | 383 | enum ieee80211_ibss_request { |
@@ -646,8 +665,7 @@ struct ieee80211_local { | |||
646 | struct work_struct recalc_smps; | 665 | struct work_struct recalc_smps; |
647 | 666 | ||
648 | /* aggregated multicast list */ | 667 | /* aggregated multicast list */ |
649 | struct dev_addr_list *mc_list; | 668 | struct netdev_hw_addr_list mc_list; |
650 | int mc_count; | ||
651 | 669 | ||
652 | bool tim_in_locked_section; /* see ieee80211_beacon_get() */ | 670 | bool tim_in_locked_section; /* see ieee80211_beacon_get() */ |
653 | 671 | ||
@@ -745,6 +763,7 @@ struct ieee80211_local { | |||
745 | int scan_channel_idx; | 763 | int scan_channel_idx; |
746 | int scan_ies_len; | 764 | int scan_ies_len; |
747 | 765 | ||
766 | unsigned long leave_oper_channel_time; | ||
748 | enum mac80211_scan_state next_scan_state; | 767 | enum mac80211_scan_state next_scan_state; |
749 | struct delayed_work scan_work; | 768 | struct delayed_work scan_work; |
750 | struct ieee80211_sub_if_data *scan_sdata; | 769 | struct ieee80211_sub_if_data *scan_sdata; |
@@ -1078,8 +1097,6 @@ int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata, | |||
1078 | enum ieee80211_smps_mode smps, const u8 *da, | 1097 | enum ieee80211_smps_mode smps, const u8 *da, |
1079 | const u8 *bssid); | 1098 | const u8 *bssid); |
1080 | 1099 | ||
1081 | void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *da, | ||
1082 | u16 tid, u16 initiator, u16 reason); | ||
1083 | void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, | 1100 | void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, |
1084 | u16 initiator, u16 reason); | 1101 | u16 initiator, u16 reason); |
1085 | void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta); | 1102 | void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta); |
@@ -1155,7 +1172,7 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local, | |||
1155 | int powersave); | 1172 | int powersave); |
1156 | void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, | 1173 | void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, |
1157 | struct ieee80211_hdr *hdr); | 1174 | struct ieee80211_hdr *hdr); |
1158 | void ieee80211_beacon_loss_work(struct work_struct *work); | 1175 | void ieee80211_beacon_connection_loss_work(struct work_struct *work); |
1159 | 1176 | ||
1160 | void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw, | 1177 | void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw, |
1161 | enum queue_stop_reason reason); | 1178 | enum queue_stop_reason reason); |
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index e08fa8eda1b3..50deb017fd6e 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
@@ -413,8 +413,7 @@ static int ieee80211_stop(struct net_device *dev) | |||
413 | 413 | ||
414 | netif_addr_lock_bh(dev); | 414 | netif_addr_lock_bh(dev); |
415 | spin_lock_bh(&local->filter_lock); | 415 | spin_lock_bh(&local->filter_lock); |
416 | __dev_addr_unsync(&local->mc_list, &local->mc_count, | 416 | __hw_addr_unsync(&local->mc_list, &dev->mc, dev->addr_len); |
417 | &dev->mc_list, &dev->mc_count); | ||
418 | spin_unlock_bh(&local->filter_lock); | 417 | spin_unlock_bh(&local->filter_lock); |
419 | netif_addr_unlock_bh(dev); | 418 | netif_addr_unlock_bh(dev); |
420 | 419 | ||
@@ -487,7 +486,7 @@ static int ieee80211_stop(struct net_device *dev) | |||
487 | cancel_work_sync(&sdata->u.mgd.work); | 486 | cancel_work_sync(&sdata->u.mgd.work); |
488 | cancel_work_sync(&sdata->u.mgd.chswitch_work); | 487 | cancel_work_sync(&sdata->u.mgd.chswitch_work); |
489 | cancel_work_sync(&sdata->u.mgd.monitor_work); | 488 | cancel_work_sync(&sdata->u.mgd.monitor_work); |
490 | cancel_work_sync(&sdata->u.mgd.beacon_loss_work); | 489 | cancel_work_sync(&sdata->u.mgd.beacon_connection_loss_work); |
491 | 490 | ||
492 | /* | 491 | /* |
493 | * When we get here, the interface is marked down. | 492 | * When we get here, the interface is marked down. |
@@ -597,8 +596,7 @@ static void ieee80211_set_multicast_list(struct net_device *dev) | |||
597 | sdata->flags ^= IEEE80211_SDATA_PROMISC; | 596 | sdata->flags ^= IEEE80211_SDATA_PROMISC; |
598 | } | 597 | } |
599 | spin_lock_bh(&local->filter_lock); | 598 | spin_lock_bh(&local->filter_lock); |
600 | __dev_addr_sync(&local->mc_list, &local->mc_count, | 599 | __hw_addr_sync(&local->mc_list, &dev->mc, dev->addr_len); |
601 | &dev->mc_list, &dev->mc_count); | ||
602 | spin_unlock_bh(&local->filter_lock); | 600 | spin_unlock_bh(&local->filter_lock); |
603 | ieee80211_queue_work(&local->hw, &local->reconfig_filter); | 601 | ieee80211_queue_work(&local->hw, &local->reconfig_filter); |
604 | } | 602 | } |
@@ -816,6 +814,118 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata, | |||
816 | return 0; | 814 | return 0; |
817 | } | 815 | } |
818 | 816 | ||
817 | static void ieee80211_assign_perm_addr(struct ieee80211_local *local, | ||
818 | struct net_device *dev, | ||
819 | enum nl80211_iftype type) | ||
820 | { | ||
821 | struct ieee80211_sub_if_data *sdata; | ||
822 | u64 mask, start, addr, val, inc; | ||
823 | u8 *m; | ||
824 | u8 tmp_addr[ETH_ALEN]; | ||
825 | int i; | ||
826 | |||
827 | /* default ... something at least */ | ||
828 | memcpy(dev->perm_addr, local->hw.wiphy->perm_addr, ETH_ALEN); | ||
829 | |||
830 | if (is_zero_ether_addr(local->hw.wiphy->addr_mask) && | ||
831 | local->hw.wiphy->n_addresses <= 1) | ||
832 | return; | ||
833 | |||
834 | |||
835 | mutex_lock(&local->iflist_mtx); | ||
836 | |||
837 | switch (type) { | ||
838 | case NL80211_IFTYPE_MONITOR: | ||
839 | /* doesn't matter */ | ||
840 | break; | ||
841 | case NL80211_IFTYPE_WDS: | ||
842 | case NL80211_IFTYPE_AP_VLAN: | ||
843 | /* match up with an AP interface */ | ||
844 | list_for_each_entry(sdata, &local->interfaces, list) { | ||
845 | if (sdata->vif.type != NL80211_IFTYPE_AP) | ||
846 | continue; | ||
847 | memcpy(dev->perm_addr, sdata->vif.addr, ETH_ALEN); | ||
848 | break; | ||
849 | } | ||
850 | /* keep default if no AP interface present */ | ||
851 | break; | ||
852 | default: | ||
853 | /* assign a new address if possible -- try n_addresses first */ | ||
854 | for (i = 0; i < local->hw.wiphy->n_addresses; i++) { | ||
855 | bool used = false; | ||
856 | |||
857 | list_for_each_entry(sdata, &local->interfaces, list) { | ||
858 | if (memcmp(local->hw.wiphy->addresses[i].addr, | ||
859 | sdata->vif.addr, ETH_ALEN) == 0) { | ||
860 | used = true; | ||
861 | break; | ||
862 | } | ||
863 | } | ||
864 | |||
865 | if (!used) { | ||
866 | memcpy(dev->perm_addr, | ||
867 | local->hw.wiphy->addresses[i].addr, | ||
868 | ETH_ALEN); | ||
869 | break; | ||
870 | } | ||
871 | } | ||
872 | |||
873 | /* try mask if available */ | ||
874 | if (is_zero_ether_addr(local->hw.wiphy->addr_mask)) | ||
875 | break; | ||
876 | |||
877 | m = local->hw.wiphy->addr_mask; | ||
878 | mask = ((u64)m[0] << 5*8) | ((u64)m[1] << 4*8) | | ||
879 | ((u64)m[2] << 3*8) | ((u64)m[3] << 2*8) | | ||
880 | ((u64)m[4] << 1*8) | ((u64)m[5] << 0*8); | ||
881 | |||
882 | if (__ffs64(mask) + hweight64(mask) != fls64(mask)) { | ||
883 | /* not a contiguous mask ... not handled now! */ | ||
884 | printk(KERN_DEBUG "not contiguous\n"); | ||
885 | break; | ||
886 | } | ||
887 | |||
888 | m = local->hw.wiphy->perm_addr; | ||
889 | start = ((u64)m[0] << 5*8) | ((u64)m[1] << 4*8) | | ||
890 | ((u64)m[2] << 3*8) | ((u64)m[3] << 2*8) | | ||
891 | ((u64)m[4] << 1*8) | ((u64)m[5] << 0*8); | ||
892 | |||
893 | inc = 1ULL<<__ffs64(mask); | ||
894 | val = (start & mask); | ||
895 | addr = (start & ~mask) | (val & mask); | ||
896 | do { | ||
897 | bool used = false; | ||
898 | |||
899 | tmp_addr[5] = addr >> 0*8; | ||
900 | tmp_addr[4] = addr >> 1*8; | ||
901 | tmp_addr[3] = addr >> 2*8; | ||
902 | tmp_addr[2] = addr >> 3*8; | ||
903 | tmp_addr[1] = addr >> 4*8; | ||
904 | tmp_addr[0] = addr >> 5*8; | ||
905 | |||
906 | val += inc; | ||
907 | |||
908 | list_for_each_entry(sdata, &local->interfaces, list) { | ||
909 | if (memcmp(tmp_addr, sdata->vif.addr, | ||
910 | ETH_ALEN) == 0) { | ||
911 | used = true; | ||
912 | break; | ||
913 | } | ||
914 | } | ||
915 | |||
916 | if (!used) { | ||
917 | memcpy(dev->perm_addr, tmp_addr, ETH_ALEN); | ||
918 | break; | ||
919 | } | ||
920 | addr = (start & ~mask) | (val & mask); | ||
921 | } while (addr != start); | ||
922 | |||
923 | break; | ||
924 | } | ||
925 | |||
926 | mutex_unlock(&local->iflist_mtx); | ||
927 | } | ||
928 | |||
819 | int ieee80211_if_add(struct ieee80211_local *local, const char *name, | 929 | int ieee80211_if_add(struct ieee80211_local *local, const char *name, |
820 | struct net_device **new_dev, enum nl80211_iftype type, | 930 | struct net_device **new_dev, enum nl80211_iftype type, |
821 | struct vif_params *params) | 931 | struct vif_params *params) |
@@ -845,8 +955,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name, | |||
845 | if (ret < 0) | 955 | if (ret < 0) |
846 | goto fail; | 956 | goto fail; |
847 | 957 | ||
848 | memcpy(ndev->dev_addr, local->hw.wiphy->perm_addr, ETH_ALEN); | 958 | ieee80211_assign_perm_addr(local, ndev, type); |
849 | memcpy(ndev->perm_addr, ndev->dev_addr, ETH_ALEN); | 959 | memcpy(ndev->dev_addr, ndev->perm_addr, ETH_ALEN); |
850 | SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy)); | 960 | SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy)); |
851 | 961 | ||
852 | /* don't use IEEE80211_DEV_TO_SUB_IF because it checks too much */ | 962 | /* don't use IEEE80211_DEV_TO_SUB_IF because it checks too much */ |
diff --git a/net/mac80211/key.c b/net/mac80211/key.c index e8f6e3b252d8..8d4b41787dcf 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c | |||
@@ -140,6 +140,7 @@ static void ieee80211_key_enable_hw_accel(struct ieee80211_key *key) | |||
140 | struct ieee80211_sub_if_data, | 140 | struct ieee80211_sub_if_data, |
141 | u.ap); | 141 | u.ap); |
142 | 142 | ||
143 | key->conf.ap_addr = sdata->dev->dev_addr; | ||
143 | ret = drv_set_key(key->local, SET_KEY, sdata, sta, &key->conf); | 144 | ret = drv_set_key(key->local, SET_KEY, sdata, sta, &key->conf); |
144 | 145 | ||
145 | if (!ret) { | 146 | if (!ret) { |
diff --git a/net/mac80211/main.c b/net/mac80211/main.c index b887e484ae04..011ee85bcd57 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c | |||
@@ -71,7 +71,7 @@ void ieee80211_configure_filter(struct ieee80211_local *local) | |||
71 | spin_lock_bh(&local->filter_lock); | 71 | spin_lock_bh(&local->filter_lock); |
72 | changed_flags = local->filter_flags ^ new_flags; | 72 | changed_flags = local->filter_flags ^ new_flags; |
73 | 73 | ||
74 | mc = drv_prepare_multicast(local, local->mc_count, local->mc_list); | 74 | mc = drv_prepare_multicast(local, &local->mc_list); |
75 | spin_unlock_bh(&local->filter_lock); | 75 | spin_unlock_bh(&local->filter_lock); |
76 | 76 | ||
77 | /* be a bit nasty */ | 77 | /* be a bit nasty */ |
@@ -309,6 +309,8 @@ void ieee80211_restart_hw(struct ieee80211_hw *hw) | |||
309 | { | 309 | { |
310 | struct ieee80211_local *local = hw_to_local(hw); | 310 | struct ieee80211_local *local = hw_to_local(hw); |
311 | 311 | ||
312 | trace_api_restart_hw(local); | ||
313 | |||
312 | /* use this reason, __ieee80211_resume will unblock it */ | 314 | /* use this reason, __ieee80211_resume will unblock it */ |
313 | ieee80211_stop_queues_by_reason(hw, | 315 | ieee80211_stop_queues_by_reason(hw, |
314 | IEEE80211_QUEUE_STOP_REASON_SUSPEND); | 316 | IEEE80211_QUEUE_STOP_REASON_SUSPEND); |
@@ -388,6 +390,9 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, | |||
388 | local->uapsd_max_sp_len = IEEE80211_DEFAULT_MAX_SP_LEN; | 390 | local->uapsd_max_sp_len = IEEE80211_DEFAULT_MAX_SP_LEN; |
389 | 391 | ||
390 | INIT_LIST_HEAD(&local->interfaces); | 392 | INIT_LIST_HEAD(&local->interfaces); |
393 | |||
394 | __hw_addr_init(&local->mc_list); | ||
395 | |||
391 | mutex_init(&local->iflist_mtx); | 396 | mutex_init(&local->iflist_mtx); |
392 | mutex_init(&local->scan_mtx); | 397 | mutex_init(&local->scan_mtx); |
393 | 398 | ||
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 859ee5f3d941..7e93524459fc 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c | |||
@@ -601,10 +601,10 @@ static void ieee80211_mesh_rx_mgmt_action(struct ieee80211_sub_if_data *sdata, | |||
601 | struct ieee80211_rx_status *rx_status) | 601 | struct ieee80211_rx_status *rx_status) |
602 | { | 602 | { |
603 | switch (mgmt->u.action.category) { | 603 | switch (mgmt->u.action.category) { |
604 | case MESH_PLINK_CATEGORY: | 604 | case WLAN_CATEGORY_MESH_PLINK: |
605 | mesh_rx_plink_frame(sdata, mgmt, len, rx_status); | 605 | mesh_rx_plink_frame(sdata, mgmt, len, rx_status); |
606 | break; | 606 | break; |
607 | case MESH_PATH_SEL_CATEGORY: | 607 | case WLAN_CATEGORY_MESH_PATH_SEL: |
608 | mesh_rx_path_sel_frame(sdata, mgmt, len); | 608 | mesh_rx_path_sel_frame(sdata, mgmt, len); |
609 | break; | 609 | break; |
610 | } | 610 | } |
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index 85562c59d7d6..c88087f1cd0f 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h | |||
@@ -209,8 +209,6 @@ struct mesh_rmc { | |||
209 | #define MESH_MAX_MPATHS 1024 | 209 | #define MESH_MAX_MPATHS 1024 |
210 | 210 | ||
211 | /* Pending ANA approval */ | 211 | /* Pending ANA approval */ |
212 | #define MESH_PLINK_CATEGORY 30 | ||
213 | #define MESH_PATH_SEL_CATEGORY 32 | ||
214 | #define MESH_PATH_SEL_ACTION 0 | 212 | #define MESH_PATH_SEL_ACTION 0 |
215 | 213 | ||
216 | /* PERR reason codes */ | 214 | /* PERR reason codes */ |
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index fefc45c4b4e8..d89ed7f2592b 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c | |||
@@ -132,7 +132,7 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags, | |||
132 | memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); | 132 | memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); |
133 | /* BSSID == SA */ | 133 | /* BSSID == SA */ |
134 | memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); | 134 | memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); |
135 | mgmt->u.action.category = MESH_PATH_SEL_CATEGORY; | 135 | mgmt->u.action.category = WLAN_CATEGORY_MESH_PATH_SEL; |
136 | mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION; | 136 | mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION; |
137 | 137 | ||
138 | switch (action) { | 138 | switch (action) { |
@@ -225,7 +225,7 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn, | |||
225 | memcpy(mgmt->da, ra, ETH_ALEN); | 225 | memcpy(mgmt->da, ra, ETH_ALEN); |
226 | memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); | 226 | memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); |
227 | /* BSSID is left zeroed, wildcard value */ | 227 | /* BSSID is left zeroed, wildcard value */ |
228 | mgmt->u.action.category = MESH_PATH_SEL_CATEGORY; | 228 | mgmt->u.action.category = WLAN_CATEGORY_MESH_PATH_SEL; |
229 | mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION; | 229 | mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION; |
230 | ie_len = 15; | 230 | ie_len = 15; |
231 | pos = skb_put(skb, 2 + ie_len); | 231 | pos = skb_put(skb, 2 + ie_len); |
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index 7b7080e2b49f..3cd5f7b5d693 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c | |||
@@ -172,7 +172,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, | |||
172 | memcpy(mgmt->da, da, ETH_ALEN); | 172 | memcpy(mgmt->da, da, ETH_ALEN); |
173 | memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); | 173 | memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); |
174 | /* BSSID is left zeroed, wildcard value */ | 174 | /* BSSID is left zeroed, wildcard value */ |
175 | mgmt->u.action.category = MESH_PLINK_CATEGORY; | 175 | mgmt->u.action.category = WLAN_CATEGORY_MESH_PLINK; |
176 | mgmt->u.action.u.plink_action.action_code = action; | 176 | mgmt->u.action.u.plink_action.action_code = action; |
177 | 177 | ||
178 | if (action == PLINK_CLOSE) | 178 | if (action == PLINK_CLOSE) |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 4aefa6dc3091..425f66c70013 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -47,6 +47,13 @@ | |||
47 | */ | 47 | */ |
48 | #define IEEE80211_PROBE_WAIT (HZ / 2) | 48 | #define IEEE80211_PROBE_WAIT (HZ / 2) |
49 | 49 | ||
50 | /* | ||
51 | * Weight given to the latest Beacon frame when calculating average signal | ||
52 | * strength for Beacon frames received in the current BSS. This must be | ||
53 | * between 1 and 15. | ||
54 | */ | ||
55 | #define IEEE80211_SIGNAL_AVE_WEIGHT 3 | ||
56 | |||
50 | #define TMR_RUNNING_TIMER 0 | 57 | #define TMR_RUNNING_TIMER 0 |
51 | #define TMR_RUNNING_CHANSW 1 | 58 | #define TMR_RUNNING_CHANSW 1 |
52 | 59 | ||
@@ -206,7 +213,7 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata, | |||
206 | 213 | ||
207 | static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata, | 214 | static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata, |
208 | const u8 *bssid, u16 stype, u16 reason, | 215 | const u8 *bssid, u16 stype, u16 reason, |
209 | void *cookie) | 216 | void *cookie, bool send_frame) |
210 | { | 217 | { |
211 | struct ieee80211_local *local = sdata->local; | 218 | struct ieee80211_local *local = sdata->local; |
212 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 219 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
@@ -243,7 +250,11 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata, | |||
243 | cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len); | 250 | cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len); |
244 | if (!(ifmgd->flags & IEEE80211_STA_MFP_ENABLED)) | 251 | if (!(ifmgd->flags & IEEE80211_STA_MFP_ENABLED)) |
245 | IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; | 252 | IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; |
246 | ieee80211_tx_skb(sdata, skb); | 253 | |
254 | if (send_frame) | ||
255 | ieee80211_tx_skb(sdata, skb); | ||
256 | else | ||
257 | kfree_skb(skb); | ||
247 | } | 258 | } |
248 | 259 | ||
249 | void ieee80211_send_pspoll(struct ieee80211_local *local, | 260 | void ieee80211_send_pspoll(struct ieee80211_local *local, |
@@ -592,6 +603,9 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local, | |||
592 | int count; | 603 | int count; |
593 | u8 *pos, uapsd_queues = 0; | 604 | u8 *pos, uapsd_queues = 0; |
594 | 605 | ||
606 | if (!local->ops->conf_tx) | ||
607 | return; | ||
608 | |||
595 | if (local->hw.queues < 4) | 609 | if (local->hw.queues < 4) |
596 | return; | 610 | return; |
597 | 611 | ||
@@ -666,11 +680,15 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local, | |||
666 | params.aifs, params.cw_min, params.cw_max, params.txop, | 680 | params.aifs, params.cw_min, params.cw_max, params.txop, |
667 | params.uapsd); | 681 | params.uapsd); |
668 | #endif | 682 | #endif |
669 | if (drv_conf_tx(local, queue, ¶ms) && local->ops->conf_tx) | 683 | if (drv_conf_tx(local, queue, ¶ms)) |
670 | printk(KERN_DEBUG "%s: failed to set TX queue " | 684 | printk(KERN_DEBUG "%s: failed to set TX queue " |
671 | "parameters for queue %d\n", | 685 | "parameters for queue %d\n", |
672 | wiphy_name(local->hw.wiphy), queue); | 686 | wiphy_name(local->hw.wiphy), queue); |
673 | } | 687 | } |
688 | |||
689 | /* enable WMM or activate new settings */ | ||
690 | local->hw.conf.flags |= IEEE80211_CONF_QOS; | ||
691 | drv_config(local, IEEE80211_CONF_CHANGE_QOS); | ||
674 | } | 692 | } |
675 | 693 | ||
676 | static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata, | 694 | static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata, |
@@ -731,6 +749,8 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, | |||
731 | sdata->u.mgd.associated = cbss; | 749 | sdata->u.mgd.associated = cbss; |
732 | memcpy(sdata->u.mgd.bssid, cbss->bssid, ETH_ALEN); | 750 | memcpy(sdata->u.mgd.bssid, cbss->bssid, ETH_ALEN); |
733 | 751 | ||
752 | sdata->u.mgd.flags |= IEEE80211_STA_RESET_SIGNAL_AVE; | ||
753 | |||
734 | /* just to be sure */ | 754 | /* just to be sure */ |
735 | sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL | | 755 | sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL | |
736 | IEEE80211_STA_BEACON_POLL); | 756 | IEEE80211_STA_BEACON_POLL); |
@@ -756,6 +776,11 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, | |||
756 | /* And the BSSID changed - we're associated now */ | 776 | /* And the BSSID changed - we're associated now */ |
757 | bss_info_changed |= BSS_CHANGED_BSSID; | 777 | bss_info_changed |= BSS_CHANGED_BSSID; |
758 | 778 | ||
779 | /* Tell the driver to monitor connection quality (if supported) */ | ||
780 | if ((local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI) && | ||
781 | sdata->vif.bss_conf.cqm_rssi_thold) | ||
782 | bss_info_changed |= BSS_CHANGED_CQM; | ||
783 | |||
759 | ieee80211_bss_info_change_notify(sdata, bss_info_changed); | 784 | ieee80211_bss_info_change_notify(sdata, bss_info_changed); |
760 | 785 | ||
761 | mutex_lock(&local->iflist_mtx); | 786 | mutex_lock(&local->iflist_mtx); |
@@ -767,7 +792,8 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, | |||
767 | netif_carrier_on(sdata->dev); | 792 | netif_carrier_on(sdata->dev); |
768 | } | 793 | } |
769 | 794 | ||
770 | static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata) | 795 | static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, |
796 | bool remove_sta) | ||
771 | { | 797 | { |
772 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 798 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
773 | struct ieee80211_local *local = sdata->local; | 799 | struct ieee80211_local *local = sdata->local; |
@@ -840,7 +866,8 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata) | |||
840 | changed |= BSS_CHANGED_BSSID; | 866 | changed |= BSS_CHANGED_BSSID; |
841 | ieee80211_bss_info_change_notify(sdata, changed); | 867 | ieee80211_bss_info_change_notify(sdata, changed); |
842 | 868 | ||
843 | sta_info_destroy_addr(sdata, bssid); | 869 | if (remove_sta) |
870 | sta_info_destroy_addr(sdata, bssid); | ||
844 | } | 871 | } |
845 | 872 | ||
846 | void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, | 873 | void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, |
@@ -857,6 +884,9 @@ void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, | |||
857 | if (is_multicast_ether_addr(hdr->addr1)) | 884 | if (is_multicast_ether_addr(hdr->addr1)) |
858 | return; | 885 | return; |
859 | 886 | ||
887 | if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR) | ||
888 | return; | ||
889 | |||
860 | mod_timer(&sdata->u.mgd.conn_mon_timer, | 890 | mod_timer(&sdata->u.mgd.conn_mon_timer, |
861 | round_jiffies_up(jiffies + IEEE80211_CONNECTION_IDLE_TIME)); | 891 | round_jiffies_up(jiffies + IEEE80211_CONNECTION_IDLE_TIME)); |
862 | } | 892 | } |
@@ -934,23 +964,72 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata, | |||
934 | mutex_unlock(&ifmgd->mtx); | 964 | mutex_unlock(&ifmgd->mtx); |
935 | } | 965 | } |
936 | 966 | ||
937 | void ieee80211_beacon_loss_work(struct work_struct *work) | 967 | static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata) |
968 | { | ||
969 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | ||
970 | struct ieee80211_local *local = sdata->local; | ||
971 | u8 bssid[ETH_ALEN]; | ||
972 | |||
973 | mutex_lock(&ifmgd->mtx); | ||
974 | if (!ifmgd->associated) { | ||
975 | mutex_unlock(&ifmgd->mtx); | ||
976 | return; | ||
977 | } | ||
978 | |||
979 | memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN); | ||
980 | |||
981 | printk(KERN_DEBUG "Connection to AP %pM lost.\n", bssid); | ||
982 | |||
983 | ieee80211_set_disassoc(sdata, true); | ||
984 | ieee80211_recalc_idle(local); | ||
985 | mutex_unlock(&ifmgd->mtx); | ||
986 | /* | ||
987 | * must be outside lock due to cfg80211, | ||
988 | * but that's not a problem. | ||
989 | */ | ||
990 | ieee80211_send_deauth_disassoc(sdata, bssid, | ||
991 | IEEE80211_STYPE_DEAUTH, | ||
992 | WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, | ||
993 | NULL, true); | ||
994 | } | ||
995 | |||
996 | void ieee80211_beacon_connection_loss_work(struct work_struct *work) | ||
938 | { | 997 | { |
939 | struct ieee80211_sub_if_data *sdata = | 998 | struct ieee80211_sub_if_data *sdata = |
940 | container_of(work, struct ieee80211_sub_if_data, | 999 | container_of(work, struct ieee80211_sub_if_data, |
941 | u.mgd.beacon_loss_work); | 1000 | u.mgd.beacon_connection_loss_work); |
942 | 1001 | ||
943 | ieee80211_mgd_probe_ap(sdata, true); | 1002 | if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR) |
1003 | __ieee80211_connection_loss(sdata); | ||
1004 | else | ||
1005 | ieee80211_mgd_probe_ap(sdata, true); | ||
944 | } | 1006 | } |
945 | 1007 | ||
946 | void ieee80211_beacon_loss(struct ieee80211_vif *vif) | 1008 | void ieee80211_beacon_loss(struct ieee80211_vif *vif) |
947 | { | 1009 | { |
948 | struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); | 1010 | struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); |
1011 | struct ieee80211_hw *hw = &sdata->local->hw; | ||
1012 | |||
1013 | trace_api_beacon_loss(sdata); | ||
949 | 1014 | ||
950 | ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.beacon_loss_work); | 1015 | WARN_ON(hw->flags & IEEE80211_HW_CONNECTION_MONITOR); |
1016 | ieee80211_queue_work(hw, &sdata->u.mgd.beacon_connection_loss_work); | ||
951 | } | 1017 | } |
952 | EXPORT_SYMBOL(ieee80211_beacon_loss); | 1018 | EXPORT_SYMBOL(ieee80211_beacon_loss); |
953 | 1019 | ||
1020 | void ieee80211_connection_loss(struct ieee80211_vif *vif) | ||
1021 | { | ||
1022 | struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); | ||
1023 | struct ieee80211_hw *hw = &sdata->local->hw; | ||
1024 | |||
1025 | trace_api_connection_loss(sdata); | ||
1026 | |||
1027 | WARN_ON(!(hw->flags & IEEE80211_HW_CONNECTION_MONITOR)); | ||
1028 | ieee80211_queue_work(hw, &sdata->u.mgd.beacon_connection_loss_work); | ||
1029 | } | ||
1030 | EXPORT_SYMBOL(ieee80211_connection_loss); | ||
1031 | |||
1032 | |||
954 | static enum rx_mgmt_action __must_check | 1033 | static enum rx_mgmt_action __must_check |
955 | ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata, | 1034 | ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata, |
956 | struct ieee80211_mgmt *mgmt, size_t len) | 1035 | struct ieee80211_mgmt *mgmt, size_t len) |
@@ -971,7 +1050,7 @@ ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata, | |||
971 | printk(KERN_DEBUG "%s: deauthenticated from %pM (Reason: %u)\n", | 1050 | printk(KERN_DEBUG "%s: deauthenticated from %pM (Reason: %u)\n", |
972 | sdata->name, bssid, reason_code); | 1051 | sdata->name, bssid, reason_code); |
973 | 1052 | ||
974 | ieee80211_set_disassoc(sdata); | 1053 | ieee80211_set_disassoc(sdata, true); |
975 | ieee80211_recalc_idle(sdata->local); | 1054 | ieee80211_recalc_idle(sdata->local); |
976 | 1055 | ||
977 | return RX_MGMT_CFG80211_DEAUTH; | 1056 | return RX_MGMT_CFG80211_DEAUTH; |
@@ -1001,7 +1080,7 @@ ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata, | |||
1001 | printk(KERN_DEBUG "%s: disassociated from %pM (Reason: %u)\n", | 1080 | printk(KERN_DEBUG "%s: disassociated from %pM (Reason: %u)\n", |
1002 | sdata->name, mgmt->sa, reason_code); | 1081 | sdata->name, mgmt->sa, reason_code); |
1003 | 1082 | ||
1004 | ieee80211_set_disassoc(sdata); | 1083 | ieee80211_set_disassoc(sdata, true); |
1005 | ieee80211_recalc_idle(sdata->local); | 1084 | ieee80211_recalc_idle(sdata->local); |
1006 | return RX_MGMT_CFG80211_DISASSOC; | 1085 | return RX_MGMT_CFG80211_DISASSOC; |
1007 | } | 1086 | } |
@@ -1254,12 +1333,17 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata, | |||
1254 | mutex_lock(&sdata->local->iflist_mtx); | 1333 | mutex_lock(&sdata->local->iflist_mtx); |
1255 | ieee80211_recalc_ps(sdata->local, -1); | 1334 | ieee80211_recalc_ps(sdata->local, -1); |
1256 | mutex_unlock(&sdata->local->iflist_mtx); | 1335 | mutex_unlock(&sdata->local->iflist_mtx); |
1336 | |||
1337 | if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR) | ||
1338 | return; | ||
1339 | |||
1257 | /* | 1340 | /* |
1258 | * We've received a probe response, but are not sure whether | 1341 | * We've received a probe response, but are not sure whether |
1259 | * we have or will be receiving any beacons or data, so let's | 1342 | * we have or will be receiving any beacons or data, so let's |
1260 | * schedule the timers again, just in case. | 1343 | * schedule the timers again, just in case. |
1261 | */ | 1344 | */ |
1262 | mod_beacon_timer(sdata); | 1345 | mod_beacon_timer(sdata); |
1346 | |||
1263 | mod_timer(&ifmgd->conn_mon_timer, | 1347 | mod_timer(&ifmgd->conn_mon_timer, |
1264 | round_jiffies_up(jiffies + | 1348 | round_jiffies_up(jiffies + |
1265 | IEEE80211_CONNECTION_IDLE_TIME)); | 1349 | IEEE80211_CONNECTION_IDLE_TIME)); |
@@ -1293,6 +1377,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, | |||
1293 | struct ieee80211_rx_status *rx_status) | 1377 | struct ieee80211_rx_status *rx_status) |
1294 | { | 1378 | { |
1295 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 1379 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
1380 | struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; | ||
1296 | size_t baselen; | 1381 | size_t baselen; |
1297 | struct ieee802_11_elems elems; | 1382 | struct ieee802_11_elems elems; |
1298 | struct ieee80211_local *local = sdata->local; | 1383 | struct ieee80211_local *local = sdata->local; |
@@ -1328,6 +1413,41 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, | |||
1328 | if (memcmp(bssid, mgmt->bssid, ETH_ALEN) != 0) | 1413 | if (memcmp(bssid, mgmt->bssid, ETH_ALEN) != 0) |
1329 | return; | 1414 | return; |
1330 | 1415 | ||
1416 | /* Track average RSSI from the Beacon frames of the current AP */ | ||
1417 | ifmgd->last_beacon_signal = rx_status->signal; | ||
1418 | if (ifmgd->flags & IEEE80211_STA_RESET_SIGNAL_AVE) { | ||
1419 | ifmgd->flags &= ~IEEE80211_STA_RESET_SIGNAL_AVE; | ||
1420 | ifmgd->ave_beacon_signal = rx_status->signal; | ||
1421 | ifmgd->last_cqm_event_signal = 0; | ||
1422 | } else { | ||
1423 | ifmgd->ave_beacon_signal = | ||
1424 | (IEEE80211_SIGNAL_AVE_WEIGHT * rx_status->signal * 16 + | ||
1425 | (16 - IEEE80211_SIGNAL_AVE_WEIGHT) * | ||
1426 | ifmgd->ave_beacon_signal) / 16; | ||
1427 | } | ||
1428 | if (bss_conf->cqm_rssi_thold && | ||
1429 | !(local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI)) { | ||
1430 | int sig = ifmgd->ave_beacon_signal / 16; | ||
1431 | int last_event = ifmgd->last_cqm_event_signal; | ||
1432 | int thold = bss_conf->cqm_rssi_thold; | ||
1433 | int hyst = bss_conf->cqm_rssi_hyst; | ||
1434 | if (sig < thold && | ||
1435 | (last_event == 0 || sig < last_event - hyst)) { | ||
1436 | ifmgd->last_cqm_event_signal = sig; | ||
1437 | ieee80211_cqm_rssi_notify( | ||
1438 | &sdata->vif, | ||
1439 | NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW, | ||
1440 | GFP_KERNEL); | ||
1441 | } else if (sig > thold && | ||
1442 | (last_event == 0 || sig > last_event + hyst)) { | ||
1443 | ifmgd->last_cqm_event_signal = sig; | ||
1444 | ieee80211_cqm_rssi_notify( | ||
1445 | &sdata->vif, | ||
1446 | NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH, | ||
1447 | GFP_KERNEL); | ||
1448 | } | ||
1449 | } | ||
1450 | |||
1331 | if (ifmgd->flags & IEEE80211_STA_BEACON_POLL) { | 1451 | if (ifmgd->flags & IEEE80211_STA_BEACON_POLL) { |
1332 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | 1452 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG |
1333 | if (net_ratelimit()) { | 1453 | if (net_ratelimit()) { |
@@ -1613,7 +1733,7 @@ static void ieee80211_sta_work(struct work_struct *work) | |||
1613 | printk(KERN_DEBUG "No probe response from AP %pM" | 1733 | printk(KERN_DEBUG "No probe response from AP %pM" |
1614 | " after %dms, disconnecting.\n", | 1734 | " after %dms, disconnecting.\n", |
1615 | bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ); | 1735 | bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ); |
1616 | ieee80211_set_disassoc(sdata); | 1736 | ieee80211_set_disassoc(sdata, true); |
1617 | ieee80211_recalc_idle(local); | 1737 | ieee80211_recalc_idle(local); |
1618 | mutex_unlock(&ifmgd->mtx); | 1738 | mutex_unlock(&ifmgd->mtx); |
1619 | /* | 1739 | /* |
@@ -1623,7 +1743,7 @@ static void ieee80211_sta_work(struct work_struct *work) | |||
1623 | ieee80211_send_deauth_disassoc(sdata, bssid, | 1743 | ieee80211_send_deauth_disassoc(sdata, bssid, |
1624 | IEEE80211_STYPE_DEAUTH, | 1744 | IEEE80211_STYPE_DEAUTH, |
1625 | WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, | 1745 | WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, |
1626 | NULL); | 1746 | NULL, true); |
1627 | mutex_lock(&ifmgd->mtx); | 1747 | mutex_lock(&ifmgd->mtx); |
1628 | } | 1748 | } |
1629 | } | 1749 | } |
@@ -1640,7 +1760,8 @@ static void ieee80211_sta_bcn_mon_timer(unsigned long data) | |||
1640 | if (local->quiescing) | 1760 | if (local->quiescing) |
1641 | return; | 1761 | return; |
1642 | 1762 | ||
1643 | ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.beacon_loss_work); | 1763 | ieee80211_queue_work(&sdata->local->hw, |
1764 | &sdata->u.mgd.beacon_connection_loss_work); | ||
1644 | } | 1765 | } |
1645 | 1766 | ||
1646 | static void ieee80211_sta_conn_mon_timer(unsigned long data) | 1767 | static void ieee80211_sta_conn_mon_timer(unsigned long data) |
@@ -1692,7 +1813,7 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata) | |||
1692 | */ | 1813 | */ |
1693 | 1814 | ||
1694 | cancel_work_sync(&ifmgd->work); | 1815 | cancel_work_sync(&ifmgd->work); |
1695 | cancel_work_sync(&ifmgd->beacon_loss_work); | 1816 | cancel_work_sync(&ifmgd->beacon_connection_loss_work); |
1696 | if (del_timer_sync(&ifmgd->timer)) | 1817 | if (del_timer_sync(&ifmgd->timer)) |
1697 | set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running); | 1818 | set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running); |
1698 | 1819 | ||
@@ -1726,7 +1847,8 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata) | |||
1726 | INIT_WORK(&ifmgd->work, ieee80211_sta_work); | 1847 | INIT_WORK(&ifmgd->work, ieee80211_sta_work); |
1727 | INIT_WORK(&ifmgd->monitor_work, ieee80211_sta_monitor_work); | 1848 | INIT_WORK(&ifmgd->monitor_work, ieee80211_sta_monitor_work); |
1728 | INIT_WORK(&ifmgd->chswitch_work, ieee80211_chswitch_work); | 1849 | INIT_WORK(&ifmgd->chswitch_work, ieee80211_chswitch_work); |
1729 | INIT_WORK(&ifmgd->beacon_loss_work, ieee80211_beacon_loss_work); | 1850 | INIT_WORK(&ifmgd->beacon_connection_loss_work, |
1851 | ieee80211_beacon_connection_loss_work); | ||
1730 | setup_timer(&ifmgd->timer, ieee80211_sta_timer, | 1852 | setup_timer(&ifmgd->timer, ieee80211_sta_timer, |
1731 | (unsigned long) sdata); | 1853 | (unsigned long) sdata); |
1732 | setup_timer(&ifmgd->bcn_mon_timer, ieee80211_sta_bcn_mon_timer, | 1854 | setup_timer(&ifmgd->bcn_mon_timer, ieee80211_sta_bcn_mon_timer, |
@@ -1805,6 +1927,9 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, | |||
1805 | struct ieee80211_work *wk; | 1927 | struct ieee80211_work *wk; |
1806 | u16 auth_alg; | 1928 | u16 auth_alg; |
1807 | 1929 | ||
1930 | if (req->local_state_change) | ||
1931 | return 0; /* no need to update mac80211 state */ | ||
1932 | |||
1808 | switch (req->auth_type) { | 1933 | switch (req->auth_type) { |
1809 | case NL80211_AUTHTYPE_OPEN_SYSTEM: | 1934 | case NL80211_AUTHTYPE_OPEN_SYSTEM: |
1810 | auth_alg = WLAN_AUTH_OPEN; | 1935 | auth_alg = WLAN_AUTH_OPEN; |
@@ -1913,7 +2038,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, | |||
1913 | } | 2038 | } |
1914 | 2039 | ||
1915 | /* Trying to reassociate - clear previous association state */ | 2040 | /* Trying to reassociate - clear previous association state */ |
1916 | ieee80211_set_disassoc(sdata); | 2041 | ieee80211_set_disassoc(sdata, true); |
1917 | } | 2042 | } |
1918 | mutex_unlock(&ifmgd->mtx); | 2043 | mutex_unlock(&ifmgd->mtx); |
1919 | 2044 | ||
@@ -2017,7 +2142,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, | |||
2017 | 2142 | ||
2018 | if (ifmgd->associated == req->bss) { | 2143 | if (ifmgd->associated == req->bss) { |
2019 | bssid = req->bss->bssid; | 2144 | bssid = req->bss->bssid; |
2020 | ieee80211_set_disassoc(sdata); | 2145 | ieee80211_set_disassoc(sdata, true); |
2021 | mutex_unlock(&ifmgd->mtx); | 2146 | mutex_unlock(&ifmgd->mtx); |
2022 | } else { | 2147 | } else { |
2023 | bool not_auth_yet = false; | 2148 | bool not_auth_yet = false; |
@@ -2060,9 +2185,9 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, | |||
2060 | printk(KERN_DEBUG "%s: deauthenticating from %pM by local choice (reason=%d)\n", | 2185 | printk(KERN_DEBUG "%s: deauthenticating from %pM by local choice (reason=%d)\n", |
2061 | sdata->name, bssid, req->reason_code); | 2186 | sdata->name, bssid, req->reason_code); |
2062 | 2187 | ||
2063 | ieee80211_send_deauth_disassoc(sdata, bssid, | 2188 | ieee80211_send_deauth_disassoc(sdata, bssid, IEEE80211_STYPE_DEAUTH, |
2064 | IEEE80211_STYPE_DEAUTH, req->reason_code, | 2189 | req->reason_code, cookie, |
2065 | cookie); | 2190 | !req->local_state_change); |
2066 | 2191 | ||
2067 | ieee80211_recalc_idle(sdata->local); | 2192 | ieee80211_recalc_idle(sdata->local); |
2068 | 2193 | ||
@@ -2074,6 +2199,7 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata, | |||
2074 | void *cookie) | 2199 | void *cookie) |
2075 | { | 2200 | { |
2076 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 2201 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
2202 | u8 bssid[ETH_ALEN]; | ||
2077 | 2203 | ||
2078 | mutex_lock(&ifmgd->mtx); | 2204 | mutex_lock(&ifmgd->mtx); |
2079 | 2205 | ||
@@ -2091,13 +2217,15 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata, | |||
2091 | printk(KERN_DEBUG "%s: disassociating from %pM by local choice (reason=%d)\n", | 2217 | printk(KERN_DEBUG "%s: disassociating from %pM by local choice (reason=%d)\n", |
2092 | sdata->name, req->bss->bssid, req->reason_code); | 2218 | sdata->name, req->bss->bssid, req->reason_code); |
2093 | 2219 | ||
2094 | ieee80211_set_disassoc(sdata); | 2220 | memcpy(bssid, req->bss->bssid, ETH_ALEN); |
2221 | ieee80211_set_disassoc(sdata, false); | ||
2095 | 2222 | ||
2096 | mutex_unlock(&ifmgd->mtx); | 2223 | mutex_unlock(&ifmgd->mtx); |
2097 | 2224 | ||
2098 | ieee80211_send_deauth_disassoc(sdata, req->bss->bssid, | 2225 | ieee80211_send_deauth_disassoc(sdata, req->bss->bssid, |
2099 | IEEE80211_STYPE_DISASSOC, req->reason_code, | 2226 | IEEE80211_STYPE_DISASSOC, req->reason_code, |
2100 | cookie); | 2227 | cookie, !req->local_state_change); |
2228 | sta_info_destroy_addr(sdata, bssid); | ||
2101 | 2229 | ||
2102 | ieee80211_recalc_idle(sdata->local); | 2230 | ieee80211_recalc_idle(sdata->local); |
2103 | 2231 | ||
@@ -2138,3 +2266,15 @@ int ieee80211_mgd_action(struct ieee80211_sub_if_data *sdata, | |||
2138 | *cookie = (unsigned long) skb; | 2266 | *cookie = (unsigned long) skb; |
2139 | return 0; | 2267 | return 0; |
2140 | } | 2268 | } |
2269 | |||
2270 | void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif, | ||
2271 | enum nl80211_cqm_rssi_threshold_event rssi_event, | ||
2272 | gfp_t gfp) | ||
2273 | { | ||
2274 | struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); | ||
2275 | |||
2276 | trace_api_cqm_rssi_notify(sdata, rssi_event); | ||
2277 | |||
2278 | cfg80211_cqm_rssi_notify(sdata->dev, rssi_event, gfp); | ||
2279 | } | ||
2280 | EXPORT_SYMBOL(ieee80211_cqm_rssi_notify); | ||
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c index 0e64484e861c..75202b295a4e 100644 --- a/net/mac80211/pm.c +++ b/net/mac80211/pm.c | |||
@@ -46,7 +46,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw) | |||
46 | 46 | ||
47 | if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) { | 47 | if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) { |
48 | list_for_each_entry_rcu(sta, &local->sta_list, list) { | 48 | list_for_each_entry_rcu(sta, &local->sta_list, list) { |
49 | set_sta_flags(sta, WLAN_STA_SUSPEND); | 49 | set_sta_flags(sta, WLAN_STA_BLOCK_BA); |
50 | ieee80211_sta_tear_down_BA_sessions(sta); | 50 | ieee80211_sta_tear_down_BA_sessions(sta); |
51 | } | 51 | } |
52 | } | 52 | } |
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c index 818abfae9007..f65ce6dcc8e2 100644 --- a/net/mac80211/rc80211_minstrel.c +++ b/net/mac80211/rc80211_minstrel.c | |||
@@ -542,7 +542,7 @@ minstrel_free(void *priv) | |||
542 | kfree(priv); | 542 | kfree(priv); |
543 | } | 543 | } |
544 | 544 | ||
545 | static struct rate_control_ops mac80211_minstrel = { | 545 | struct rate_control_ops mac80211_minstrel = { |
546 | .name = "minstrel", | 546 | .name = "minstrel", |
547 | .tx_status = minstrel_tx_status, | 547 | .tx_status = minstrel_tx_status, |
548 | .get_rate = minstrel_get_rate, | 548 | .get_rate = minstrel_get_rate, |
diff --git a/net/mac80211/rc80211_minstrel.h b/net/mac80211/rc80211_minstrel.h index 38bf4168fc3a..0f5a83370aa6 100644 --- a/net/mac80211/rc80211_minstrel.h +++ b/net/mac80211/rc80211_minstrel.h | |||
@@ -80,7 +80,18 @@ struct minstrel_priv { | |||
80 | unsigned int lookaround_rate_mrr; | 80 | unsigned int lookaround_rate_mrr; |
81 | }; | 81 | }; |
82 | 82 | ||
83 | struct minstrel_debugfs_info { | ||
84 | size_t len; | ||
85 | char buf[]; | ||
86 | }; | ||
87 | |||
88 | extern struct rate_control_ops mac80211_minstrel; | ||
83 | void minstrel_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir); | 89 | void minstrel_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir); |
84 | void minstrel_remove_sta_debugfs(void *priv, void *priv_sta); | 90 | void minstrel_remove_sta_debugfs(void *priv, void *priv_sta); |
85 | 91 | ||
92 | /* debugfs */ | ||
93 | int minstrel_stats_open(struct inode *inode, struct file *file); | ||
94 | ssize_t minstrel_stats_read(struct file *file, char __user *buf, size_t len, loff_t *ppos); | ||
95 | int minstrel_stats_release(struct inode *inode, struct file *file); | ||
96 | |||
86 | #endif | 97 | #endif |
diff --git a/net/mac80211/rc80211_minstrel_debugfs.c b/net/mac80211/rc80211_minstrel_debugfs.c index 0e1f12b1b6dd..241e76f3fdf2 100644 --- a/net/mac80211/rc80211_minstrel_debugfs.c +++ b/net/mac80211/rc80211_minstrel_debugfs.c | |||
@@ -53,21 +53,15 @@ | |||
53 | #include <net/mac80211.h> | 53 | #include <net/mac80211.h> |
54 | #include "rc80211_minstrel.h" | 54 | #include "rc80211_minstrel.h" |
55 | 55 | ||
56 | struct minstrel_stats_info { | 56 | int |
57 | struct minstrel_sta_info *mi; | ||
58 | char buf[4096]; | ||
59 | size_t len; | ||
60 | }; | ||
61 | |||
62 | static int | ||
63 | minstrel_stats_open(struct inode *inode, struct file *file) | 57 | minstrel_stats_open(struct inode *inode, struct file *file) |
64 | { | 58 | { |
65 | struct minstrel_sta_info *mi = inode->i_private; | 59 | struct minstrel_sta_info *mi = inode->i_private; |
66 | struct minstrel_stats_info *ms; | 60 | struct minstrel_debugfs_info *ms; |
67 | unsigned int i, tp, prob, eprob; | 61 | unsigned int i, tp, prob, eprob; |
68 | char *p; | 62 | char *p; |
69 | 63 | ||
70 | ms = kmalloc(sizeof(*ms), GFP_KERNEL); | 64 | ms = kmalloc(sizeof(*ms) + 4096, GFP_KERNEL); |
71 | if (!ms) | 65 | if (!ms) |
72 | return -ENOMEM; | 66 | return -ENOMEM; |
73 | 67 | ||
@@ -107,36 +101,19 @@ minstrel_stats_open(struct inode *inode, struct file *file) | |||
107 | return 0; | 101 | return 0; |
108 | } | 102 | } |
109 | 103 | ||
110 | static ssize_t | 104 | ssize_t |
111 | minstrel_stats_read(struct file *file, char __user *buf, size_t len, loff_t *o) | 105 | minstrel_stats_read(struct file *file, char __user *buf, size_t len, loff_t *ppos) |
112 | { | 106 | { |
113 | struct minstrel_stats_info *ms; | 107 | struct minstrel_debugfs_info *ms; |
114 | char *src; | ||
115 | 108 | ||
116 | ms = file->private_data; | 109 | ms = file->private_data; |
117 | src = ms->buf; | 110 | return simple_read_from_buffer(buf, len, ppos, ms->buf, ms->len); |
118 | |||
119 | len = min(len, ms->len); | ||
120 | if (len <= *o) | ||
121 | return 0; | ||
122 | |||
123 | src += *o; | ||
124 | len -= *o; | ||
125 | *o += len; | ||
126 | |||
127 | if (copy_to_user(buf, src, len)) | ||
128 | return -EFAULT; | ||
129 | |||
130 | return len; | ||
131 | } | 111 | } |
132 | 112 | ||
133 | static int | 113 | int |
134 | minstrel_stats_release(struct inode *inode, struct file *file) | 114 | minstrel_stats_release(struct inode *inode, struct file *file) |
135 | { | 115 | { |
136 | struct minstrel_stats_info *ms = file->private_data; | 116 | kfree(file->private_data); |
137 | |||
138 | kfree(ms); | ||
139 | |||
140 | return 0; | 117 | return 0; |
141 | } | 118 | } |
142 | 119 | ||
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 04ea07f0e78a..72efbd87c1eb 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -39,7 +39,7 @@ static struct sk_buff *remove_monitor_info(struct ieee80211_local *local, | |||
39 | { | 39 | { |
40 | if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) { | 40 | if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) { |
41 | if (likely(skb->len > FCS_LEN)) | 41 | if (likely(skb->len > FCS_LEN)) |
42 | skb_trim(skb, skb->len - FCS_LEN); | 42 | __pskb_trim(skb, skb->len - FCS_LEN); |
43 | else { | 43 | else { |
44 | /* driver bug */ | 44 | /* driver bug */ |
45 | WARN_ON(1); | 45 | WARN_ON(1); |
@@ -179,14 +179,6 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, | |||
179 | pos++; | 179 | pos++; |
180 | } | 180 | } |
181 | 181 | ||
182 | /* IEEE80211_RADIOTAP_DBM_ANTNOISE */ | ||
183 | if (local->hw.flags & IEEE80211_HW_NOISE_DBM) { | ||
184 | *pos = status->noise; | ||
185 | rthdr->it_present |= | ||
186 | cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTNOISE); | ||
187 | pos++; | ||
188 | } | ||
189 | |||
190 | /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */ | 182 | /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */ |
191 | 183 | ||
192 | /* IEEE80211_RADIOTAP_ANTENNA */ | 184 | /* IEEE80211_RADIOTAP_ANTENNA */ |
@@ -236,6 +228,12 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, | |||
236 | if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) | 228 | if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) |
237 | present_fcs_len = FCS_LEN; | 229 | present_fcs_len = FCS_LEN; |
238 | 230 | ||
231 | /* make sure hdr->frame_control is on the linear part */ | ||
232 | if (!pskb_may_pull(origskb, 2)) { | ||
233 | dev_kfree_skb(origskb); | ||
234 | return NULL; | ||
235 | } | ||
236 | |||
239 | if (!local->monitors) { | 237 | if (!local->monitors) { |
240 | if (should_drop_frame(origskb, present_fcs_len)) { | 238 | if (should_drop_frame(origskb, present_fcs_len)) { |
241 | dev_kfree_skb(origskb); | 239 | dev_kfree_skb(origskb); |
@@ -493,7 +491,7 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) | |||
493 | 491 | ||
494 | if (ieee80211_is_action(hdr->frame_control)) { | 492 | if (ieee80211_is_action(hdr->frame_control)) { |
495 | mgmt = (struct ieee80211_mgmt *)hdr; | 493 | mgmt = (struct ieee80211_mgmt *)hdr; |
496 | if (mgmt->u.action.category != MESH_PLINK_CATEGORY) | 494 | if (mgmt->u.action.category != WLAN_CATEGORY_MESH_PLINK) |
497 | return RX_DROP_MONITOR; | 495 | return RX_DROP_MONITOR; |
498 | return RX_CONTINUE; | 496 | return RX_CONTINUE; |
499 | } | 497 | } |
@@ -723,14 +721,16 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx, | |||
723 | 721 | ||
724 | tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; | 722 | tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; |
725 | 723 | ||
726 | if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_OPERATIONAL) | 724 | spin_lock(&sta->lock); |
727 | goto dont_reorder; | 725 | |
726 | if (!sta->ampdu_mlme.tid_active_rx[tid]) | ||
727 | goto dont_reorder_unlock; | ||
728 | 728 | ||
729 | tid_agg_rx = sta->ampdu_mlme.tid_rx[tid]; | 729 | tid_agg_rx = sta->ampdu_mlme.tid_rx[tid]; |
730 | 730 | ||
731 | /* qos null data frames are excluded */ | 731 | /* qos null data frames are excluded */ |
732 | if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC))) | 732 | if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC))) |
733 | goto dont_reorder; | 733 | goto dont_reorder_unlock; |
734 | 734 | ||
735 | /* new, potentially un-ordered, ampdu frame - process it */ | 735 | /* new, potentially un-ordered, ampdu frame - process it */ |
736 | 736 | ||
@@ -742,15 +742,20 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx, | |||
742 | /* if this mpdu is fragmented - terminate rx aggregation session */ | 742 | /* if this mpdu is fragmented - terminate rx aggregation session */ |
743 | sc = le16_to_cpu(hdr->seq_ctrl); | 743 | sc = le16_to_cpu(hdr->seq_ctrl); |
744 | if (sc & IEEE80211_SCTL_FRAG) { | 744 | if (sc & IEEE80211_SCTL_FRAG) { |
745 | ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->sta.addr, | 745 | spin_unlock(&sta->lock); |
746 | tid, 0, WLAN_REASON_QSTA_REQUIRE_SETUP); | 746 | __ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_RECIPIENT, |
747 | WLAN_REASON_QSTA_REQUIRE_SETUP); | ||
747 | dev_kfree_skb(skb); | 748 | dev_kfree_skb(skb); |
748 | return; | 749 | return; |
749 | } | 750 | } |
750 | 751 | ||
751 | if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, frames)) | 752 | if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, frames)) { |
753 | spin_unlock(&sta->lock); | ||
752 | return; | 754 | return; |
755 | } | ||
753 | 756 | ||
757 | dont_reorder_unlock: | ||
758 | spin_unlock(&sta->lock); | ||
754 | dont_reorder: | 759 | dont_reorder: |
755 | __skb_queue_tail(frames, skb); | 760 | __skb_queue_tail(frames, skb); |
756 | } | 761 | } |
@@ -897,6 +902,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) | |||
897 | rx->key = key; | 902 | rx->key = key; |
898 | return RX_CONTINUE; | 903 | return RX_CONTINUE; |
899 | } else { | 904 | } else { |
905 | u8 keyid; | ||
900 | /* | 906 | /* |
901 | * The device doesn't give us the IV so we won't be | 907 | * The device doesn't give us the IV so we won't be |
902 | * able to look up the key. That's ok though, we | 908 | * able to look up the key. That's ok though, we |
@@ -919,7 +925,8 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) | |||
919 | * no need to call ieee80211_wep_get_keyidx, | 925 | * no need to call ieee80211_wep_get_keyidx, |
920 | * it verifies a bunch of things we've done already | 926 | * it verifies a bunch of things we've done already |
921 | */ | 927 | */ |
922 | keyidx = rx->skb->data[hdrlen + 3] >> 6; | 928 | skb_copy_bits(rx->skb, hdrlen + 3, &keyid, 1); |
929 | keyidx = keyid >> 6; | ||
923 | 930 | ||
924 | rx->key = rcu_dereference(rx->sdata->keys[keyidx]); | 931 | rx->key = rcu_dereference(rx->sdata->keys[keyidx]); |
925 | 932 | ||
@@ -940,6 +947,11 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) | |||
940 | return RX_DROP_MONITOR; | 947 | return RX_DROP_MONITOR; |
941 | } | 948 | } |
942 | 949 | ||
950 | if (skb_linearize(rx->skb)) | ||
951 | return RX_DROP_UNUSABLE; | ||
952 | |||
953 | hdr = (struct ieee80211_hdr *)rx->skb->data; | ||
954 | |||
943 | /* Check for weak IVs if possible */ | 955 | /* Check for weak IVs if possible */ |
944 | if (rx->sta && rx->key->conf.alg == ALG_WEP && | 956 | if (rx->sta && rx->key->conf.alg == ALG_WEP && |
945 | ieee80211_is_data(hdr->frame_control) && | 957 | ieee80211_is_data(hdr->frame_control) && |
@@ -1078,7 +1090,6 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) | |||
1078 | sta->rx_fragments++; | 1090 | sta->rx_fragments++; |
1079 | sta->rx_bytes += rx->skb->len; | 1091 | sta->rx_bytes += rx->skb->len; |
1080 | sta->last_signal = status->signal; | 1092 | sta->last_signal = status->signal; |
1081 | sta->last_noise = status->noise; | ||
1082 | 1093 | ||
1083 | /* | 1094 | /* |
1084 | * Change STA power saving mode only at the end of a frame | 1095 | * Change STA power saving mode only at the end of a frame |
@@ -1241,6 +1252,9 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) | |||
1241 | } | 1252 | } |
1242 | I802_DEBUG_INC(rx->local->rx_handlers_fragments); | 1253 | I802_DEBUG_INC(rx->local->rx_handlers_fragments); |
1243 | 1254 | ||
1255 | if (skb_linearize(rx->skb)) | ||
1256 | return RX_DROP_UNUSABLE; | ||
1257 | |||
1244 | seq = (sc & IEEE80211_SCTL_SEQ) >> 4; | 1258 | seq = (sc & IEEE80211_SCTL_SEQ) >> 4; |
1245 | 1259 | ||
1246 | if (frag == 0) { | 1260 | if (frag == 0) { |
@@ -1406,21 +1420,24 @@ static int | |||
1406 | ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx) | 1420 | ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx) |
1407 | { | 1421 | { |
1408 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; | 1422 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; |
1423 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); | ||
1409 | __le16 fc = hdr->frame_control; | 1424 | __le16 fc = hdr->frame_control; |
1410 | int res; | ||
1411 | 1425 | ||
1412 | res = ieee80211_drop_unencrypted(rx, fc); | 1426 | /* |
1413 | if (unlikely(res)) | 1427 | * Pass through unencrypted frames if the hardware has |
1414 | return res; | 1428 | * decrypted them already. |
1429 | */ | ||
1430 | if (status->flag & RX_FLAG_DECRYPTED) | ||
1431 | return 0; | ||
1415 | 1432 | ||
1416 | if (rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP)) { | 1433 | if (rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP)) { |
1417 | if (unlikely(ieee80211_is_unicast_robust_mgmt_frame(rx->skb) && | 1434 | if (unlikely(!ieee80211_has_protected(fc) && |
1435 | ieee80211_is_unicast_robust_mgmt_frame(rx->skb) && | ||
1418 | rx->key)) | 1436 | rx->key)) |
1419 | return -EACCES; | 1437 | return -EACCES; |
1420 | /* BIP does not use Protected field, so need to check MMIE */ | 1438 | /* BIP does not use Protected field, so need to check MMIE */ |
1421 | if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) && | 1439 | if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) && |
1422 | ieee80211_get_mmie_keyidx(rx->skb) < 0 && | 1440 | ieee80211_get_mmie_keyidx(rx->skb) < 0)) |
1423 | rx->key)) | ||
1424 | return -EACCES; | 1441 | return -EACCES; |
1425 | /* | 1442 | /* |
1426 | * When using MFP, Action frames are not allowed prior to | 1443 | * When using MFP, Action frames are not allowed prior to |
@@ -1598,6 +1615,9 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) | |||
1598 | skb->dev = dev; | 1615 | skb->dev = dev; |
1599 | __skb_queue_head_init(&frame_list); | 1616 | __skb_queue_head_init(&frame_list); |
1600 | 1617 | ||
1618 | if (skb_linearize(skb)) | ||
1619 | return RX_DROP_UNUSABLE; | ||
1620 | |||
1601 | ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr, | 1621 | ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr, |
1602 | rx->sdata->vif.type, | 1622 | rx->sdata->vif.type, |
1603 | rx->local->hw.extra_tx_headroom); | 1623 | rx->local->hw.extra_tx_headroom); |
@@ -1796,10 +1816,12 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames) | |||
1796 | if (ieee80211_is_back_req(bar->frame_control)) { | 1816 | if (ieee80211_is_back_req(bar->frame_control)) { |
1797 | if (!rx->sta) | 1817 | if (!rx->sta) |
1798 | return RX_DROP_MONITOR; | 1818 | return RX_DROP_MONITOR; |
1819 | spin_lock(&rx->sta->lock); | ||
1799 | tid = le16_to_cpu(bar->control) >> 12; | 1820 | tid = le16_to_cpu(bar->control) >> 12; |
1800 | if (rx->sta->ampdu_mlme.tid_state_rx[tid] | 1821 | if (!rx->sta->ampdu_mlme.tid_active_rx[tid]) { |
1801 | != HT_AGG_STATE_OPERATIONAL) | 1822 | spin_unlock(&rx->sta->lock); |
1802 | return RX_DROP_MONITOR; | 1823 | return RX_DROP_MONITOR; |
1824 | } | ||
1803 | tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid]; | 1825 | tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid]; |
1804 | 1826 | ||
1805 | start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4; | 1827 | start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4; |
@@ -1813,6 +1835,7 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames) | |||
1813 | ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num, | 1835 | ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num, |
1814 | frames); | 1836 | frames); |
1815 | kfree_skb(skb); | 1837 | kfree_skb(skb); |
1838 | spin_unlock(&rx->sta->lock); | ||
1816 | return RX_QUEUED; | 1839 | return RX_QUEUED; |
1817 | } | 1840 | } |
1818 | 1841 | ||
@@ -1974,8 +1997,8 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) | |||
1974 | goto handled; | 1997 | goto handled; |
1975 | } | 1998 | } |
1976 | break; | 1999 | break; |
1977 | case MESH_PLINK_CATEGORY: | 2000 | case WLAN_CATEGORY_MESH_PLINK: |
1978 | case MESH_PATH_SEL_CATEGORY: | 2001 | case WLAN_CATEGORY_MESH_PATH_SEL: |
1979 | if (ieee80211_vif_is_mesh(&sdata->vif)) | 2002 | if (ieee80211_vif_is_mesh(&sdata->vif)) |
1980 | return ieee80211_mesh_rx_mgmt(sdata, rx->skb); | 2003 | return ieee80211_mesh_rx_mgmt(sdata, rx->skb); |
1981 | break; | 2004 | break; |
@@ -2372,29 +2395,42 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, | |||
2372 | struct ieee80211_local *local = hw_to_local(hw); | 2395 | struct ieee80211_local *local = hw_to_local(hw); |
2373 | struct ieee80211_sub_if_data *sdata; | 2396 | struct ieee80211_sub_if_data *sdata; |
2374 | struct ieee80211_hdr *hdr; | 2397 | struct ieee80211_hdr *hdr; |
2398 | __le16 fc; | ||
2375 | struct ieee80211_rx_data rx; | 2399 | struct ieee80211_rx_data rx; |
2376 | int prepares; | 2400 | int prepares; |
2377 | struct ieee80211_sub_if_data *prev = NULL; | 2401 | struct ieee80211_sub_if_data *prev = NULL; |
2378 | struct sk_buff *skb_new; | 2402 | struct sk_buff *skb_new; |
2379 | struct sta_info *sta, *tmp; | 2403 | struct sta_info *sta, *tmp; |
2380 | bool found_sta = false; | 2404 | bool found_sta = false; |
2405 | int err = 0; | ||
2381 | 2406 | ||
2382 | hdr = (struct ieee80211_hdr *)skb->data; | 2407 | fc = ((struct ieee80211_hdr *)skb->data)->frame_control; |
2383 | memset(&rx, 0, sizeof(rx)); | 2408 | memset(&rx, 0, sizeof(rx)); |
2384 | rx.skb = skb; | 2409 | rx.skb = skb; |
2385 | rx.local = local; | 2410 | rx.local = local; |
2386 | 2411 | ||
2387 | if (ieee80211_is_data(hdr->frame_control) || ieee80211_is_mgmt(hdr->frame_control)) | 2412 | if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc)) |
2388 | local->dot11ReceivedFragmentCount++; | 2413 | local->dot11ReceivedFragmentCount++; |
2389 | 2414 | ||
2390 | if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) || | 2415 | if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) || |
2391 | test_bit(SCAN_OFF_CHANNEL, &local->scanning))) | 2416 | test_bit(SCAN_OFF_CHANNEL, &local->scanning))) |
2392 | rx.flags |= IEEE80211_RX_IN_SCAN; | 2417 | rx.flags |= IEEE80211_RX_IN_SCAN; |
2393 | 2418 | ||
2419 | if (ieee80211_is_mgmt(fc)) | ||
2420 | err = skb_linearize(skb); | ||
2421 | else | ||
2422 | err = !pskb_may_pull(skb, ieee80211_hdrlen(fc)); | ||
2423 | |||
2424 | if (err) { | ||
2425 | dev_kfree_skb(skb); | ||
2426 | return; | ||
2427 | } | ||
2428 | |||
2429 | hdr = (struct ieee80211_hdr *)skb->data; | ||
2394 | ieee80211_parse_qos(&rx); | 2430 | ieee80211_parse_qos(&rx); |
2395 | ieee80211_verify_alignment(&rx); | 2431 | ieee80211_verify_alignment(&rx); |
2396 | 2432 | ||
2397 | if (ieee80211_is_data(hdr->frame_control)) { | 2433 | if (ieee80211_is_data(fc)) { |
2398 | for_each_sta_info(local, hdr->addr2, sta, tmp) { | 2434 | for_each_sta_info(local, hdr->addr2, sta, tmp) { |
2399 | rx.sta = sta; | 2435 | rx.sta = sta; |
2400 | found_sta = true; | 2436 | found_sta = true; |
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 85507bd9e341..e1a3defdf581 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c | |||
@@ -14,6 +14,8 @@ | |||
14 | 14 | ||
15 | #include <linux/if_arp.h> | 15 | #include <linux/if_arp.h> |
16 | #include <linux/rtnetlink.h> | 16 | #include <linux/rtnetlink.h> |
17 | #include <linux/pm_qos_params.h> | ||
18 | #include <net/sch_generic.h> | ||
17 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
18 | #include <net/mac80211.h> | 20 | #include <net/mac80211.h> |
19 | 21 | ||
@@ -246,6 +248,8 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted) | |||
246 | struct ieee80211_local *local = hw_to_local(hw); | 248 | struct ieee80211_local *local = hw_to_local(hw); |
247 | bool was_hw_scan; | 249 | bool was_hw_scan; |
248 | 250 | ||
251 | trace_api_scan_completed(local, aborted); | ||
252 | |||
249 | mutex_lock(&local->scan_mtx); | 253 | mutex_lock(&local->scan_mtx); |
250 | 254 | ||
251 | /* | 255 | /* |
@@ -322,6 +326,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local) | |||
322 | 326 | ||
323 | ieee80211_offchannel_stop_beaconing(local); | 327 | ieee80211_offchannel_stop_beaconing(local); |
324 | 328 | ||
329 | local->leave_oper_channel_time = 0; | ||
325 | local->next_scan_state = SCAN_DECISION; | 330 | local->next_scan_state = SCAN_DECISION; |
326 | local->scan_channel_idx = 0; | 331 | local->scan_channel_idx = 0; |
327 | 332 | ||
@@ -426,11 +431,28 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata, | |||
426 | return rc; | 431 | return rc; |
427 | } | 432 | } |
428 | 433 | ||
434 | static unsigned long | ||
435 | ieee80211_scan_get_channel_time(struct ieee80211_channel *chan) | ||
436 | { | ||
437 | /* | ||
438 | * TODO: channel switching also consumes quite some time, | ||
439 | * add that delay as well to get a better estimation | ||
440 | */ | ||
441 | if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN) | ||
442 | return IEEE80211_PASSIVE_CHANNEL_TIME; | ||
443 | return IEEE80211_PROBE_DELAY + IEEE80211_CHANNEL_TIME; | ||
444 | } | ||
445 | |||
429 | static int ieee80211_scan_state_decision(struct ieee80211_local *local, | 446 | static int ieee80211_scan_state_decision(struct ieee80211_local *local, |
430 | unsigned long *next_delay) | 447 | unsigned long *next_delay) |
431 | { | 448 | { |
432 | bool associated = false; | 449 | bool associated = false; |
450 | bool tx_empty = true; | ||
451 | bool bad_latency; | ||
452 | bool listen_int_exceeded; | ||
453 | unsigned long min_beacon_int = 0; | ||
433 | struct ieee80211_sub_if_data *sdata; | 454 | struct ieee80211_sub_if_data *sdata; |
455 | struct ieee80211_channel *next_chan; | ||
434 | 456 | ||
435 | /* if no more bands/channels left, complete scan and advance to the idle state */ | 457 | /* if no more bands/channels left, complete scan and advance to the idle state */ |
436 | if (local->scan_channel_idx >= local->scan_req->n_channels) { | 458 | if (local->scan_channel_idx >= local->scan_req->n_channels) { |
@@ -438,7 +460,11 @@ static int ieee80211_scan_state_decision(struct ieee80211_local *local, | |||
438 | return 1; | 460 | return 1; |
439 | } | 461 | } |
440 | 462 | ||
441 | /* check if at least one STA interface is associated */ | 463 | /* |
464 | * check if at least one STA interface is associated, | ||
465 | * check if at least one STA interface has pending tx frames | ||
466 | * and grab the lowest used beacon interval | ||
467 | */ | ||
442 | mutex_lock(&local->iflist_mtx); | 468 | mutex_lock(&local->iflist_mtx); |
443 | list_for_each_entry(sdata, &local->interfaces, list) { | 469 | list_for_each_entry(sdata, &local->interfaces, list) { |
444 | if (!ieee80211_sdata_running(sdata)) | 470 | if (!ieee80211_sdata_running(sdata)) |
@@ -447,7 +473,16 @@ static int ieee80211_scan_state_decision(struct ieee80211_local *local, | |||
447 | if (sdata->vif.type == NL80211_IFTYPE_STATION) { | 473 | if (sdata->vif.type == NL80211_IFTYPE_STATION) { |
448 | if (sdata->u.mgd.associated) { | 474 | if (sdata->u.mgd.associated) { |
449 | associated = true; | 475 | associated = true; |
450 | break; | 476 | |
477 | if (sdata->vif.bss_conf.beacon_int < | ||
478 | min_beacon_int || min_beacon_int == 0) | ||
479 | min_beacon_int = | ||
480 | sdata->vif.bss_conf.beacon_int; | ||
481 | |||
482 | if (!qdisc_all_tx_empty(sdata->dev)) { | ||
483 | tx_empty = false; | ||
484 | break; | ||
485 | } | ||
451 | } | 486 | } |
452 | } | 487 | } |
453 | } | 488 | } |
@@ -456,11 +491,34 @@ static int ieee80211_scan_state_decision(struct ieee80211_local *local, | |||
456 | if (local->scan_channel) { | 491 | if (local->scan_channel) { |
457 | /* | 492 | /* |
458 | * we're currently scanning a different channel, let's | 493 | * we're currently scanning a different channel, let's |
459 | * switch back to the operating channel now if at least | 494 | * see if we can scan another channel without interfering |
460 | * one interface is associated. Otherwise just scan the | 495 | * with the current traffic situation. |
461 | * next channel | 496 | * |
497 | * Since we don't know if the AP has pending frames for us | ||
498 | * we can only check for our tx queues and use the current | ||
499 | * pm_qos requirements for rx. Hence, if no tx traffic occurs | ||
500 | * at all we will scan as many channels in a row as the pm_qos | ||
501 | * latency allows us to. Additionally we also check for the | ||
502 | * currently negotiated listen interval to prevent losing | ||
503 | * frames unnecessarily. | ||
504 | * | ||
505 | * Otherwise switch back to the operating channel. | ||
462 | */ | 506 | */ |
463 | if (associated) | 507 | next_chan = local->scan_req->channels[local->scan_channel_idx]; |
508 | |||
509 | bad_latency = time_after(jiffies + | ||
510 | ieee80211_scan_get_channel_time(next_chan), | ||
511 | local->leave_oper_channel_time + | ||
512 | usecs_to_jiffies(pm_qos_requirement(PM_QOS_NETWORK_LATENCY))); | ||
513 | |||
514 | listen_int_exceeded = time_after(jiffies + | ||
515 | ieee80211_scan_get_channel_time(next_chan), | ||
516 | local->leave_oper_channel_time + | ||
517 | usecs_to_jiffies(min_beacon_int * 1024) * | ||
518 | local->hw.conf.listen_interval); | ||
519 | |||
520 | if (associated && ( !tx_empty || bad_latency || | ||
521 | listen_int_exceeded)) | ||
464 | local->next_scan_state = SCAN_ENTER_OPER_CHANNEL; | 522 | local->next_scan_state = SCAN_ENTER_OPER_CHANNEL; |
465 | else | 523 | else |
466 | local->next_scan_state = SCAN_SET_CHANNEL; | 524 | local->next_scan_state = SCAN_SET_CHANNEL; |
@@ -492,6 +550,9 @@ static void ieee80211_scan_state_leave_oper_channel(struct ieee80211_local *loca | |||
492 | else | 550 | else |
493 | *next_delay = HZ / 10; | 551 | *next_delay = HZ / 10; |
494 | 552 | ||
553 | /* remember when we left the operating channel */ | ||
554 | local->leave_oper_channel_time = jiffies; | ||
555 | |||
495 | /* advance to the next channel to be scanned */ | 556 | /* advance to the next channel to be scanned */ |
496 | local->next_scan_state = SCAN_SET_CHANNEL; | 557 | local->next_scan_state = SCAN_SET_CHANNEL; |
497 | } | 558 | } |
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index fb12cec4d333..3de7a2260d65 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c | |||
@@ -250,9 +250,6 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, | |||
250 | * enable session_timer's data differentiation. refer to | 250 | * enable session_timer's data differentiation. refer to |
251 | * sta_rx_agg_session_timer_expired for useage */ | 251 | * sta_rx_agg_session_timer_expired for useage */ |
252 | sta->timer_to_tid[i] = i; | 252 | sta->timer_to_tid[i] = i; |
253 | /* rx */ | ||
254 | sta->ampdu_mlme.tid_state_rx[i] = HT_AGG_STATE_IDLE; | ||
255 | sta->ampdu_mlme.tid_rx[i] = NULL; | ||
256 | /* tx */ | 253 | /* tx */ |
257 | sta->ampdu_mlme.tid_state_tx[i] = HT_AGG_STATE_IDLE; | 254 | sta->ampdu_mlme.tid_state_tx[i] = HT_AGG_STATE_IDLE; |
258 | sta->ampdu_mlme.tid_tx[i] = NULL; | 255 | sta->ampdu_mlme.tid_tx[i] = NULL; |
@@ -578,7 +575,7 @@ static int sta_info_buffer_expired(struct sta_info *sta, | |||
578 | } | 575 | } |
579 | 576 | ||
580 | 577 | ||
581 | static void sta_info_cleanup_expire_buffered(struct ieee80211_local *local, | 578 | static bool sta_info_cleanup_expire_buffered(struct ieee80211_local *local, |
582 | struct sta_info *sta) | 579 | struct sta_info *sta) |
583 | { | 580 | { |
584 | unsigned long flags; | 581 | unsigned long flags; |
@@ -586,7 +583,7 @@ static void sta_info_cleanup_expire_buffered(struct ieee80211_local *local, | |||
586 | struct ieee80211_sub_if_data *sdata; | 583 | struct ieee80211_sub_if_data *sdata; |
587 | 584 | ||
588 | if (skb_queue_empty(&sta->ps_tx_buf)) | 585 | if (skb_queue_empty(&sta->ps_tx_buf)) |
589 | return; | 586 | return false; |
590 | 587 | ||
591 | for (;;) { | 588 | for (;;) { |
592 | spin_lock_irqsave(&sta->ps_tx_buf.lock, flags); | 589 | spin_lock_irqsave(&sta->ps_tx_buf.lock, flags); |
@@ -611,6 +608,8 @@ static void sta_info_cleanup_expire_buffered(struct ieee80211_local *local, | |||
611 | if (skb_queue_empty(&sta->ps_tx_buf)) | 608 | if (skb_queue_empty(&sta->ps_tx_buf)) |
612 | sta_info_clear_tim_bit(sta); | 609 | sta_info_clear_tim_bit(sta); |
613 | } | 610 | } |
611 | |||
612 | return true; | ||
614 | } | 613 | } |
615 | 614 | ||
616 | static int __must_check __sta_info_destroy(struct sta_info *sta) | 615 | static int __must_check __sta_info_destroy(struct sta_info *sta) |
@@ -619,7 +618,7 @@ static int __must_check __sta_info_destroy(struct sta_info *sta) | |||
619 | struct ieee80211_sub_if_data *sdata; | 618 | struct ieee80211_sub_if_data *sdata; |
620 | struct sk_buff *skb; | 619 | struct sk_buff *skb; |
621 | unsigned long flags; | 620 | unsigned long flags; |
622 | int ret, i; | 621 | int ret; |
623 | 622 | ||
624 | might_sleep(); | 623 | might_sleep(); |
625 | 624 | ||
@@ -629,6 +628,15 @@ static int __must_check __sta_info_destroy(struct sta_info *sta) | |||
629 | local = sta->local; | 628 | local = sta->local; |
630 | sdata = sta->sdata; | 629 | sdata = sta->sdata; |
631 | 630 | ||
631 | /* | ||
632 | * Before removing the station from the driver and | ||
633 | * rate control, it might still start new aggregation | ||
634 | * sessions -- block that to make sure the tear-down | ||
635 | * will be sufficient. | ||
636 | */ | ||
637 | set_sta_flags(sta, WLAN_STA_BLOCK_BA); | ||
638 | ieee80211_sta_tear_down_BA_sessions(sta); | ||
639 | |||
632 | spin_lock_irqsave(&local->sta_lock, flags); | 640 | spin_lock_irqsave(&local->sta_lock, flags); |
633 | ret = sta_info_hash_del(local, sta); | 641 | ret = sta_info_hash_del(local, sta); |
634 | /* this might still be the pending list ... which is fine */ | 642 | /* this might still be the pending list ... which is fine */ |
@@ -645,9 +653,6 @@ static int __must_check __sta_info_destroy(struct sta_info *sta) | |||
645 | * may mean it is removed from hardware which requires that | 653 | * may mean it is removed from hardware which requires that |
646 | * the key->sta pointer is still valid, so flush the key todo | 654 | * the key->sta pointer is still valid, so flush the key todo |
647 | * list here. | 655 | * list here. |
648 | * | ||
649 | * ieee80211_key_todo() will synchronize_rcu() so after this | ||
650 | * nothing can reference this sta struct any more. | ||
651 | */ | 656 | */ |
652 | ieee80211_key_todo(); | 657 | ieee80211_key_todo(); |
653 | 658 | ||
@@ -679,11 +684,17 @@ static int __must_check __sta_info_destroy(struct sta_info *sta) | |||
679 | sdata = sta->sdata; | 684 | sdata = sta->sdata; |
680 | } | 685 | } |
681 | 686 | ||
687 | /* | ||
688 | * At this point, after we wait for an RCU grace period, | ||
689 | * neither mac80211 nor the driver can reference this | ||
690 | * sta struct any more except by still existing timers | ||
691 | * associated with this station that we clean up below. | ||
692 | */ | ||
693 | synchronize_rcu(); | ||
694 | |||
682 | #ifdef CONFIG_MAC80211_MESH | 695 | #ifdef CONFIG_MAC80211_MESH |
683 | if (ieee80211_vif_is_mesh(&sdata->vif)) { | 696 | if (ieee80211_vif_is_mesh(&sdata->vif)) |
684 | mesh_accept_plinks_update(sdata); | 697 | mesh_accept_plinks_update(sdata); |
685 | del_timer(&sta->plink_timer); | ||
686 | } | ||
687 | #endif | 698 | #endif |
688 | 699 | ||
689 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | 700 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG |
@@ -710,50 +721,6 @@ static int __must_check __sta_info_destroy(struct sta_info *sta) | |||
710 | while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL) | 721 | while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL) |
711 | dev_kfree_skb_any(skb); | 722 | dev_kfree_skb_any(skb); |
712 | 723 | ||
713 | for (i = 0; i < STA_TID_NUM; i++) { | ||
714 | struct tid_ampdu_rx *tid_rx; | ||
715 | struct tid_ampdu_tx *tid_tx; | ||
716 | |||
717 | spin_lock_bh(&sta->lock); | ||
718 | tid_rx = sta->ampdu_mlme.tid_rx[i]; | ||
719 | /* Make sure timer won't free the tid_rx struct, see below */ | ||
720 | if (tid_rx) | ||
721 | tid_rx->shutdown = true; | ||
722 | |||
723 | spin_unlock_bh(&sta->lock); | ||
724 | |||
725 | /* | ||
726 | * Outside spinlock - shutdown is true now so that the timer | ||
727 | * won't free tid_rx, we have to do that now. Can't let the | ||
728 | * timer do it because we have to sync the timer outside the | ||
729 | * lock that it takes itself. | ||
730 | */ | ||
731 | if (tid_rx) { | ||
732 | del_timer_sync(&tid_rx->session_timer); | ||
733 | kfree(tid_rx); | ||
734 | } | ||
735 | |||
736 | /* | ||
737 | * No need to do such complications for TX agg sessions, the | ||
738 | * path leading to freeing the tid_tx struct goes via a call | ||
739 | * from the driver, and thus needs to look up the sta struct | ||
740 | * again, which cannot be found when we get here. Hence, we | ||
741 | * just need to delete the timer and free the aggregation | ||
742 | * info; we won't be telling the peer about it then but that | ||
743 | * doesn't matter if we're not talking to it again anyway. | ||
744 | */ | ||
745 | tid_tx = sta->ampdu_mlme.tid_tx[i]; | ||
746 | if (tid_tx) { | ||
747 | del_timer_sync(&tid_tx->addba_resp_timer); | ||
748 | /* | ||
749 | * STA removed while aggregation session being | ||
750 | * started? Bit odd, but purge frames anyway. | ||
751 | */ | ||
752 | skb_queue_purge(&tid_tx->pending); | ||
753 | kfree(tid_tx); | ||
754 | } | ||
755 | } | ||
756 | |||
757 | __sta_info_free(local, sta); | 724 | __sta_info_free(local, sta); |
758 | 725 | ||
759 | return 0; | 726 | return 0; |
@@ -790,15 +757,20 @@ static void sta_info_cleanup(unsigned long data) | |||
790 | { | 757 | { |
791 | struct ieee80211_local *local = (struct ieee80211_local *) data; | 758 | struct ieee80211_local *local = (struct ieee80211_local *) data; |
792 | struct sta_info *sta; | 759 | struct sta_info *sta; |
760 | bool timer_needed = false; | ||
793 | 761 | ||
794 | rcu_read_lock(); | 762 | rcu_read_lock(); |
795 | list_for_each_entry_rcu(sta, &local->sta_list, list) | 763 | list_for_each_entry_rcu(sta, &local->sta_list, list) |
796 | sta_info_cleanup_expire_buffered(local, sta); | 764 | if (sta_info_cleanup_expire_buffered(local, sta)) |
765 | timer_needed = true; | ||
797 | rcu_read_unlock(); | 766 | rcu_read_unlock(); |
798 | 767 | ||
799 | if (local->quiescing) | 768 | if (local->quiescing) |
800 | return; | 769 | return; |
801 | 770 | ||
771 | if (!timer_needed) | ||
772 | return; | ||
773 | |||
802 | local->sta_cleanup.expires = | 774 | local->sta_cleanup.expires = |
803 | round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL); | 775 | round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL); |
804 | add_timer(&local->sta_cleanup); | 776 | add_timer(&local->sta_cleanup); |
@@ -992,6 +964,8 @@ void ieee80211_sta_block_awake(struct ieee80211_hw *hw, | |||
992 | { | 964 | { |
993 | struct sta_info *sta = container_of(pubsta, struct sta_info, sta); | 965 | struct sta_info *sta = container_of(pubsta, struct sta_info, sta); |
994 | 966 | ||
967 | trace_api_sta_block_awake(sta->local, pubsta, block); | ||
968 | |||
995 | if (block) | 969 | if (block) |
996 | set_sta_flags(sta, WLAN_STA_PS_DRIVER); | 970 | set_sta_flags(sta, WLAN_STA_PS_DRIVER); |
997 | else | 971 | else |
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 822d84522937..48a5e80957f0 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h | |||
@@ -35,8 +35,8 @@ | |||
35 | * IEEE80211_TX_CTL_CLEAR_PS_FILT control flag) when the next | 35 | * IEEE80211_TX_CTL_CLEAR_PS_FILT control flag) when the next |
36 | * frame to this station is transmitted. | 36 | * frame to this station is transmitted. |
37 | * @WLAN_STA_MFP: Management frame protection is used with this STA. | 37 | * @WLAN_STA_MFP: Management frame protection is used with this STA. |
38 | * @WLAN_STA_SUSPEND: Set/cleared during a suspend/resume cycle. | 38 | * @WLAN_STA_BLOCK_BA: Used to deny ADDBA requests (both TX and RX) |
39 | * Used to deny ADDBA requests (both TX and RX). | 39 | * during suspend/resume and station removal. |
40 | * @WLAN_STA_PS_DRIVER: driver requires keeping this station in | 40 | * @WLAN_STA_PS_DRIVER: driver requires keeping this station in |
41 | * power-save mode logically to flush frames that might still | 41 | * power-save mode logically to flush frames that might still |
42 | * be in the queues | 42 | * be in the queues |
@@ -57,7 +57,7 @@ enum ieee80211_sta_info_flags { | |||
57 | WLAN_STA_WDS = 1<<7, | 57 | WLAN_STA_WDS = 1<<7, |
58 | WLAN_STA_CLEAR_PS_FILT = 1<<9, | 58 | WLAN_STA_CLEAR_PS_FILT = 1<<9, |
59 | WLAN_STA_MFP = 1<<10, | 59 | WLAN_STA_MFP = 1<<10, |
60 | WLAN_STA_SUSPEND = 1<<11, | 60 | WLAN_STA_BLOCK_BA = 1<<11, |
61 | WLAN_STA_PS_DRIVER = 1<<12, | 61 | WLAN_STA_PS_DRIVER = 1<<12, |
62 | WLAN_STA_PSPOLL = 1<<13, | 62 | WLAN_STA_PSPOLL = 1<<13, |
63 | WLAN_STA_DISASSOC = 1<<14, | 63 | WLAN_STA_DISASSOC = 1<<14, |
@@ -106,7 +106,6 @@ struct tid_ampdu_tx { | |||
106 | * @buf_size: buffer size for incoming A-MPDUs | 106 | * @buf_size: buffer size for incoming A-MPDUs |
107 | * @timeout: reset timer value (in TUs). | 107 | * @timeout: reset timer value (in TUs). |
108 | * @dialog_token: dialog token for aggregation session | 108 | * @dialog_token: dialog token for aggregation session |
109 | * @shutdown: this session is being shut down due to STA removal | ||
110 | */ | 109 | */ |
111 | struct tid_ampdu_rx { | 110 | struct tid_ampdu_rx { |
112 | struct sk_buff **reorder_buf; | 111 | struct sk_buff **reorder_buf; |
@@ -118,7 +117,6 @@ struct tid_ampdu_rx { | |||
118 | u16 buf_size; | 117 | u16 buf_size; |
119 | u16 timeout; | 118 | u16 timeout; |
120 | u8 dialog_token; | 119 | u8 dialog_token; |
121 | bool shutdown; | ||
122 | }; | 120 | }; |
123 | 121 | ||
124 | /** | 122 | /** |
@@ -156,7 +154,7 @@ enum plink_state { | |||
156 | */ | 154 | */ |
157 | struct sta_ampdu_mlme { | 155 | struct sta_ampdu_mlme { |
158 | /* rx */ | 156 | /* rx */ |
159 | u8 tid_state_rx[STA_TID_NUM]; | 157 | bool tid_active_rx[STA_TID_NUM]; |
160 | struct tid_ampdu_rx *tid_rx[STA_TID_NUM]; | 158 | struct tid_ampdu_rx *tid_rx[STA_TID_NUM]; |
161 | /* tx */ | 159 | /* tx */ |
162 | u8 tid_state_tx[STA_TID_NUM]; | 160 | u8 tid_state_tx[STA_TID_NUM]; |
@@ -200,7 +198,6 @@ struct sta_ampdu_mlme { | |||
200 | * @rx_fragments: number of received MPDUs | 198 | * @rx_fragments: number of received MPDUs |
201 | * @rx_dropped: number of dropped MPDUs from this STA | 199 | * @rx_dropped: number of dropped MPDUs from this STA |
202 | * @last_signal: signal of last received frame from this STA | 200 | * @last_signal: signal of last received frame from this STA |
203 | * @last_noise: noise of last received frame from this STA | ||
204 | * @last_seq_ctrl: last received seq/frag number from this STA (per RX queue) | 201 | * @last_seq_ctrl: last received seq/frag number from this STA (per RX queue) |
205 | * @tx_filtered_count: number of frames the hardware filtered for this STA | 202 | * @tx_filtered_count: number of frames the hardware filtered for this STA |
206 | * @tx_retry_failed: number of frames that failed retry | 203 | * @tx_retry_failed: number of frames that failed retry |
@@ -267,7 +264,6 @@ struct sta_info { | |||
267 | unsigned long rx_fragments; | 264 | unsigned long rx_fragments; |
268 | unsigned long rx_dropped; | 265 | unsigned long rx_dropped; |
269 | int last_signal; | 266 | int last_signal; |
270 | int last_noise; | ||
271 | __le16 last_seq_ctrl[NUM_RX_DATA_QUEUES]; | 267 | __le16 last_seq_ctrl[NUM_RX_DATA_QUEUES]; |
272 | 268 | ||
273 | /* Updated from TX status path only, no locking requirements */ | 269 | /* Updated from TX status path only, no locking requirements */ |
diff --git a/net/mac80211/status.c b/net/mac80211/status.c index 56d5b9a6ec5b..11805a3a626f 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c | |||
@@ -171,7 +171,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
171 | struct net_device *prev_dev = NULL; | 171 | struct net_device *prev_dev = NULL; |
172 | struct sta_info *sta, *tmp; | 172 | struct sta_info *sta, *tmp; |
173 | int retry_count = -1, i; | 173 | int retry_count = -1, i; |
174 | bool injected; | 174 | bool send_to_cooked; |
175 | 175 | ||
176 | for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { | 176 | for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { |
177 | /* the HW cannot have attempted that rate */ | 177 | /* the HW cannot have attempted that rate */ |
@@ -296,11 +296,15 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
296 | /* this was a transmitted frame, but now we want to reuse it */ | 296 | /* this was a transmitted frame, but now we want to reuse it */ |
297 | skb_orphan(skb); | 297 | skb_orphan(skb); |
298 | 298 | ||
299 | /* Need to make a copy before skb->cb gets cleared */ | ||
300 | send_to_cooked = !!(info->flags & IEEE80211_TX_CTL_INJECTED) || | ||
301 | (type != IEEE80211_FTYPE_DATA); | ||
302 | |||
299 | /* | 303 | /* |
300 | * This is a bit racy but we can avoid a lot of work | 304 | * This is a bit racy but we can avoid a lot of work |
301 | * with this test... | 305 | * with this test... |
302 | */ | 306 | */ |
303 | if (!local->monitors && !local->cooked_mntrs) { | 307 | if (!local->monitors && (!send_to_cooked || !local->cooked_mntrs)) { |
304 | dev_kfree_skb(skb); | 308 | dev_kfree_skb(skb); |
305 | return; | 309 | return; |
306 | } | 310 | } |
@@ -345,9 +349,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
345 | /* for now report the total retry_count */ | 349 | /* for now report the total retry_count */ |
346 | rthdr->data_retries = retry_count; | 350 | rthdr->data_retries = retry_count; |
347 | 351 | ||
348 | /* Need to make a copy before skb->cb gets cleared */ | ||
349 | injected = !!(info->flags & IEEE80211_TX_CTL_INJECTED); | ||
350 | |||
351 | /* XXX: is this sufficient for BPF? */ | 352 | /* XXX: is this sufficient for BPF? */ |
352 | skb_set_mac_header(skb, 0); | 353 | skb_set_mac_header(skb, 0); |
353 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 354 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
@@ -362,8 +363,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
362 | continue; | 363 | continue; |
363 | 364 | ||
364 | if ((sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) && | 365 | if ((sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) && |
365 | !injected && | 366 | !send_to_cooked) |
366 | (type == IEEE80211_FTYPE_DATA)) | ||
367 | continue; | 367 | continue; |
368 | 368 | ||
369 | if (prev_dev) { | 369 | if (prev_dev) { |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index cfc473e1b050..e2aa972d584f 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -429,6 +429,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) | |||
429 | struct sta_info *sta = tx->sta; | 429 | struct sta_info *sta = tx->sta; |
430 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); | 430 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); |
431 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; | 431 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; |
432 | struct ieee80211_local *local = tx->local; | ||
432 | u32 staflags; | 433 | u32 staflags; |
433 | 434 | ||
434 | if (unlikely(!sta || | 435 | if (unlikely(!sta || |
@@ -476,6 +477,12 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) | |||
476 | info->control.vif = &tx->sdata->vif; | 477 | info->control.vif = &tx->sdata->vif; |
477 | info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; | 478 | info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; |
478 | skb_queue_tail(&sta->ps_tx_buf, tx->skb); | 479 | skb_queue_tail(&sta->ps_tx_buf, tx->skb); |
480 | |||
481 | if (!timer_pending(&local->sta_cleanup)) | ||
482 | mod_timer(&local->sta_cleanup, | ||
483 | round_jiffies(jiffies + | ||
484 | STA_INFO_CLEANUP_INTERVAL)); | ||
485 | |||
479 | return TX_QUEUED; | 486 | return TX_QUEUED; |
480 | } | 487 | } |
481 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG | 488 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG |
@@ -513,6 +520,8 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx) | |||
513 | else if (tx->sta && (key = rcu_dereference(tx->sta->key))) | 520 | else if (tx->sta && (key = rcu_dereference(tx->sta->key))) |
514 | tx->key = key; | 521 | tx->key = key; |
515 | else if (ieee80211_is_mgmt(hdr->frame_control) && | 522 | else if (ieee80211_is_mgmt(hdr->frame_control) && |
523 | is_multicast_ether_addr(hdr->addr1) && | ||
524 | ieee80211_is_robust_mgmt_frame(hdr) && | ||
516 | (key = rcu_dereference(tx->sdata->default_mgmt_key))) | 525 | (key = rcu_dereference(tx->sdata->default_mgmt_key))) |
517 | tx->key = key; | 526 | tx->key = key; |
518 | else if ((key = rcu_dereference(tx->sdata->default_key))) | 527 | else if ((key = rcu_dereference(tx->sdata->default_key))) |
@@ -1142,13 +1151,12 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata, | |||
1142 | 1151 | ||
1143 | if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) && | 1152 | if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) && |
1144 | (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)) { | 1153 | (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)) { |
1145 | unsigned long flags; | ||
1146 | struct tid_ampdu_tx *tid_tx; | 1154 | struct tid_ampdu_tx *tid_tx; |
1147 | 1155 | ||
1148 | qc = ieee80211_get_qos_ctl(hdr); | 1156 | qc = ieee80211_get_qos_ctl(hdr); |
1149 | tid = *qc & IEEE80211_QOS_CTL_TID_MASK; | 1157 | tid = *qc & IEEE80211_QOS_CTL_TID_MASK; |
1150 | 1158 | ||
1151 | spin_lock_irqsave(&tx->sta->lock, flags); | 1159 | spin_lock(&tx->sta->lock); |
1152 | /* | 1160 | /* |
1153 | * XXX: This spinlock could be fairly expensive, but see the | 1161 | * XXX: This spinlock could be fairly expensive, but see the |
1154 | * comment in agg-tx.c:ieee80211_agg_tx_operational(). | 1162 | * comment in agg-tx.c:ieee80211_agg_tx_operational(). |
@@ -1173,7 +1181,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata, | |||
1173 | info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; | 1181 | info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; |
1174 | __skb_queue_tail(&tid_tx->pending, skb); | 1182 | __skb_queue_tail(&tid_tx->pending, skb); |
1175 | } | 1183 | } |
1176 | spin_unlock_irqrestore(&tx->sta->lock, flags); | 1184 | spin_unlock(&tx->sta->lock); |
1177 | 1185 | ||
1178 | if (unlikely(queued)) | 1186 | if (unlikely(queued)) |
1179 | return TX_QUEUED; | 1187 | return TX_QUEUED; |
@@ -2011,14 +2019,12 @@ void ieee80211_tx_pending(unsigned long data) | |||
2011 | while (!skb_queue_empty(&local->pending[i])) { | 2019 | while (!skb_queue_empty(&local->pending[i])) { |
2012 | struct sk_buff *skb = __skb_dequeue(&local->pending[i]); | 2020 | struct sk_buff *skb = __skb_dequeue(&local->pending[i]); |
2013 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 2021 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
2014 | struct ieee80211_sub_if_data *sdata; | ||
2015 | 2022 | ||
2016 | if (WARN_ON(!info->control.vif)) { | 2023 | if (WARN_ON(!info->control.vif)) { |
2017 | kfree_skb(skb); | 2024 | kfree_skb(skb); |
2018 | continue; | 2025 | continue; |
2019 | } | 2026 | } |
2020 | 2027 | ||
2021 | sdata = vif_to_sdata(info->control.vif); | ||
2022 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, | 2028 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, |
2023 | flags); | 2029 | flags); |
2024 | 2030 | ||
diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 53af57047435..2b75b4fb68f4 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c | |||
@@ -270,6 +270,8 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue, | |||
270 | struct ieee80211_local *local = hw_to_local(hw); | 270 | struct ieee80211_local *local = hw_to_local(hw); |
271 | struct ieee80211_sub_if_data *sdata; | 271 | struct ieee80211_sub_if_data *sdata; |
272 | 272 | ||
273 | trace_wake_queue(local, queue, reason); | ||
274 | |||
273 | if (WARN_ON(queue >= hw->queues)) | 275 | if (WARN_ON(queue >= hw->queues)) |
274 | return; | 276 | return; |
275 | 277 | ||
@@ -312,6 +314,8 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue, | |||
312 | struct ieee80211_local *local = hw_to_local(hw); | 314 | struct ieee80211_local *local = hw_to_local(hw); |
313 | struct ieee80211_sub_if_data *sdata; | 315 | struct ieee80211_sub_if_data *sdata; |
314 | 316 | ||
317 | trace_stop_queue(local, queue, reason); | ||
318 | |||
315 | if (WARN_ON(queue >= hw->queues)) | 319 | if (WARN_ON(queue >= hw->queues)) |
316 | return; | 320 | return; |
317 | 321 | ||
@@ -796,6 +800,11 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata) | |||
796 | 800 | ||
797 | drv_conf_tx(local, queue, &qparam); | 801 | drv_conf_tx(local, queue, &qparam); |
798 | } | 802 | } |
803 | |||
804 | /* after reinitialize QoS TX queues setting to default, | ||
805 | * disable QoS at all */ | ||
806 | local->hw.conf.flags &= ~IEEE80211_CONF_QOS; | ||
807 | drv_config(local, IEEE80211_CONF_CHANGE_QOS); | ||
799 | } | 808 | } |
800 | 809 | ||
801 | void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata, | 810 | void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata, |
@@ -1135,7 +1144,7 @@ int ieee80211_reconfig(struct ieee80211_local *local) | |||
1135 | 1144 | ||
1136 | if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) { | 1145 | if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) { |
1137 | list_for_each_entry_rcu(sta, &local->sta_list, list) { | 1146 | list_for_each_entry_rcu(sta, &local->sta_list, list) { |
1138 | clear_sta_flags(sta, WLAN_STA_SUSPEND); | 1147 | clear_sta_flags(sta, WLAN_STA_BLOCK_BA); |
1139 | } | 1148 | } |
1140 | } | 1149 | } |
1141 | 1150 | ||
diff --git a/net/mac80211/work.c b/net/mac80211/work.c index 15e1ba931b87..bdb1d05b16fc 100644 --- a/net/mac80211/work.c +++ b/net/mac80211/work.c | |||
@@ -920,11 +920,16 @@ static void ieee80211_work_work(struct work_struct *work) | |||
920 | run_again(local, jiffies + HZ/2); | 920 | run_again(local, jiffies + HZ/2); |
921 | } | 921 | } |
922 | 922 | ||
923 | if (list_empty(&local->work_list) && local->scan_req) | 923 | mutex_lock(&local->scan_mtx); |
924 | |||
925 | if (list_empty(&local->work_list) && local->scan_req && | ||
926 | !local->scanning) | ||
924 | ieee80211_queue_delayed_work(&local->hw, | 927 | ieee80211_queue_delayed_work(&local->hw, |
925 | &local->scan_work, | 928 | &local->scan_work, |
926 | round_jiffies_relative(0)); | 929 | round_jiffies_relative(0)); |
927 | 930 | ||
931 | mutex_unlock(&local->scan_mtx); | ||
932 | |||
928 | mutex_unlock(&local->work_mtx); | 933 | mutex_unlock(&local->work_mtx); |
929 | 934 | ||
930 | ieee80211_recalc_idle(local); | 935 | ieee80211_recalc_idle(local); |
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index 8fb0ae616761..7ba06939829f 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c | |||
@@ -802,7 +802,7 @@ static int sync_thread_backup(void *data) | |||
802 | ip_vs_backup_mcast_ifn, ip_vs_backup_syncid); | 802 | ip_vs_backup_mcast_ifn, ip_vs_backup_syncid); |
803 | 803 | ||
804 | while (!kthread_should_stop()) { | 804 | while (!kthread_should_stop()) { |
805 | wait_event_interruptible(*tinfo->sock->sk->sk_sleep, | 805 | wait_event_interruptible(*sk_sleep(tinfo->sock->sk), |
806 | !skb_queue_empty(&tinfo->sock->sk->sk_receive_queue) | 806 | !skb_queue_empty(&tinfo->sock->sk->sk_receive_queue) |
807 | || kthread_should_stop()); | 807 | || kthread_should_stop()); |
808 | 808 | ||
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 795424396aff..6464a1972a69 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -545,7 +545,7 @@ static int netlink_autobind(struct socket *sock) | |||
545 | struct hlist_head *head; | 545 | struct hlist_head *head; |
546 | struct sock *osk; | 546 | struct sock *osk; |
547 | struct hlist_node *node; | 547 | struct hlist_node *node; |
548 | s32 pid = current->tgid; | 548 | s32 pid = task_tgid_vnr(current); |
549 | int err; | 549 | int err; |
550 | static s32 rover = -4097; | 550 | static s32 rover = -4097; |
551 | 551 | ||
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index 06438fa2b1e5..aa4308afcc7f 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c | |||
@@ -21,15 +21,17 @@ | |||
21 | 21 | ||
22 | static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */ | 22 | static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */ |
23 | 23 | ||
24 | static inline void genl_lock(void) | 24 | void genl_lock(void) |
25 | { | 25 | { |
26 | mutex_lock(&genl_mutex); | 26 | mutex_lock(&genl_mutex); |
27 | } | 27 | } |
28 | EXPORT_SYMBOL(genl_lock); | ||
28 | 29 | ||
29 | static inline void genl_unlock(void) | 30 | void genl_unlock(void) |
30 | { | 31 | { |
31 | mutex_unlock(&genl_mutex); | 32 | mutex_unlock(&genl_mutex); |
32 | } | 33 | } |
34 | EXPORT_SYMBOL(genl_unlock); | ||
33 | 35 | ||
34 | #define GENL_FAM_TAB_SIZE 16 | 36 | #define GENL_FAM_TAB_SIZE 16 |
35 | #define GENL_FAM_TAB_MASK (GENL_FAM_TAB_SIZE - 1) | 37 | #define GENL_FAM_TAB_MASK (GENL_FAM_TAB_SIZE - 1) |
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index fa07f044b599..06cb02796a0e 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c | |||
@@ -739,7 +739,7 @@ static int nr_connect(struct socket *sock, struct sockaddr *uaddr, | |||
739 | DEFINE_WAIT(wait); | 739 | DEFINE_WAIT(wait); |
740 | 740 | ||
741 | for (;;) { | 741 | for (;;) { |
742 | prepare_to_wait(sk->sk_sleep, &wait, | 742 | prepare_to_wait(sk_sleep(sk), &wait, |
743 | TASK_INTERRUPTIBLE); | 743 | TASK_INTERRUPTIBLE); |
744 | if (sk->sk_state != TCP_SYN_SENT) | 744 | if (sk->sk_state != TCP_SYN_SENT) |
745 | break; | 745 | break; |
@@ -752,7 +752,7 @@ static int nr_connect(struct socket *sock, struct sockaddr *uaddr, | |||
752 | err = -ERESTARTSYS; | 752 | err = -ERESTARTSYS; |
753 | break; | 753 | break; |
754 | } | 754 | } |
755 | finish_wait(sk->sk_sleep, &wait); | 755 | finish_wait(sk_sleep(sk), &wait); |
756 | if (err) | 756 | if (err) |
757 | goto out_release; | 757 | goto out_release; |
758 | } | 758 | } |
@@ -798,7 +798,7 @@ static int nr_accept(struct socket *sock, struct socket *newsock, int flags) | |||
798 | * hooked into the SABM we saved | 798 | * hooked into the SABM we saved |
799 | */ | 799 | */ |
800 | for (;;) { | 800 | for (;;) { |
801 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 801 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
802 | skb = skb_dequeue(&sk->sk_receive_queue); | 802 | skb = skb_dequeue(&sk->sk_receive_queue); |
803 | if (skb) | 803 | if (skb) |
804 | break; | 804 | break; |
@@ -816,7 +816,7 @@ static int nr_accept(struct socket *sock, struct socket *newsock, int flags) | |||
816 | err = -ERESTARTSYS; | 816 | err = -ERESTARTSYS; |
817 | break; | 817 | break; |
818 | } | 818 | } |
819 | finish_wait(sk->sk_sleep, &wait); | 819 | finish_wait(sk_sleep(sk), &wait); |
820 | if (err) | 820 | if (err) |
821 | goto out_release; | 821 | goto out_release; |
822 | 822 | ||
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 243946d4809d..2078a277e06b 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -82,6 +82,7 @@ | |||
82 | #include <linux/mutex.h> | 82 | #include <linux/mutex.h> |
83 | #include <linux/if_vlan.h> | 83 | #include <linux/if_vlan.h> |
84 | #include <linux/virtio_net.h> | 84 | #include <linux/virtio_net.h> |
85 | #include <linux/errqueue.h> | ||
85 | 86 | ||
86 | #ifdef CONFIG_INET | 87 | #ifdef CONFIG_INET |
87 | #include <net/inet_common.h> | 88 | #include <net/inet_common.h> |
@@ -315,6 +316,8 @@ static inline struct packet_sock *pkt_sk(struct sock *sk) | |||
315 | 316 | ||
316 | static void packet_sock_destruct(struct sock *sk) | 317 | static void packet_sock_destruct(struct sock *sk) |
317 | { | 318 | { |
319 | skb_queue_purge(&sk->sk_error_queue); | ||
320 | |||
318 | WARN_ON(atomic_read(&sk->sk_rmem_alloc)); | 321 | WARN_ON(atomic_read(&sk->sk_rmem_alloc)); |
319 | WARN_ON(atomic_read(&sk->sk_wmem_alloc)); | 322 | WARN_ON(atomic_read(&sk->sk_wmem_alloc)); |
320 | 323 | ||
@@ -483,6 +486,9 @@ retry: | |||
483 | skb->dev = dev; | 486 | skb->dev = dev; |
484 | skb->priority = sk->sk_priority; | 487 | skb->priority = sk->sk_priority; |
485 | skb->mark = sk->sk_mark; | 488 | skb->mark = sk->sk_mark; |
489 | err = sock_tx_timestamp(msg, sk, skb_tx(skb)); | ||
490 | if (err < 0) | ||
491 | goto out_unlock; | ||
486 | 492 | ||
487 | dev_queue_xmit(skb); | 493 | dev_queue_xmit(skb); |
488 | rcu_read_unlock(); | 494 | rcu_read_unlock(); |
@@ -1188,6 +1194,9 @@ static int packet_snd(struct socket *sock, | |||
1188 | err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len); | 1194 | err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len); |
1189 | if (err) | 1195 | if (err) |
1190 | goto out_free; | 1196 | goto out_free; |
1197 | err = sock_tx_timestamp(msg, sk, skb_tx(skb)); | ||
1198 | if (err < 0) | ||
1199 | goto out_free; | ||
1191 | 1200 | ||
1192 | skb->protocol = proto; | 1201 | skb->protocol = proto; |
1193 | skb->dev = dev; | 1202 | skb->dev = dev; |
@@ -1487,6 +1496,51 @@ out: | |||
1487 | return err; | 1496 | return err; |
1488 | } | 1497 | } |
1489 | 1498 | ||
1499 | static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len) | ||
1500 | { | ||
1501 | struct sock_exterr_skb *serr; | ||
1502 | struct sk_buff *skb, *skb2; | ||
1503 | int copied, err; | ||
1504 | |||
1505 | err = -EAGAIN; | ||
1506 | skb = skb_dequeue(&sk->sk_error_queue); | ||
1507 | if (skb == NULL) | ||
1508 | goto out; | ||
1509 | |||
1510 | copied = skb->len; | ||
1511 | if (copied > len) { | ||
1512 | msg->msg_flags |= MSG_TRUNC; | ||
1513 | copied = len; | ||
1514 | } | ||
1515 | err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); | ||
1516 | if (err) | ||
1517 | goto out_free_skb; | ||
1518 | |||
1519 | sock_recv_timestamp(msg, sk, skb); | ||
1520 | |||
1521 | serr = SKB_EXT_ERR(skb); | ||
1522 | put_cmsg(msg, SOL_PACKET, PACKET_TX_TIMESTAMP, | ||
1523 | sizeof(serr->ee), &serr->ee); | ||
1524 | |||
1525 | msg->msg_flags |= MSG_ERRQUEUE; | ||
1526 | err = copied; | ||
1527 | |||
1528 | /* Reset and regenerate socket error */ | ||
1529 | spin_lock_bh(&sk->sk_error_queue.lock); | ||
1530 | sk->sk_err = 0; | ||
1531 | if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) { | ||
1532 | sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno; | ||
1533 | spin_unlock_bh(&sk->sk_error_queue.lock); | ||
1534 | sk->sk_error_report(sk); | ||
1535 | } else | ||
1536 | spin_unlock_bh(&sk->sk_error_queue.lock); | ||
1537 | |||
1538 | out_free_skb: | ||
1539 | kfree_skb(skb); | ||
1540 | out: | ||
1541 | return err; | ||
1542 | } | ||
1543 | |||
1490 | /* | 1544 | /* |
1491 | * Pull a packet from our receive queue and hand it to the user. | 1545 | * Pull a packet from our receive queue and hand it to the user. |
1492 | * If necessary we block. | 1546 | * If necessary we block. |
@@ -1502,7 +1556,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1502 | int vnet_hdr_len = 0; | 1556 | int vnet_hdr_len = 0; |
1503 | 1557 | ||
1504 | err = -EINVAL; | 1558 | err = -EINVAL; |
1505 | if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT)) | 1559 | if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE)) |
1506 | goto out; | 1560 | goto out; |
1507 | 1561 | ||
1508 | #if 0 | 1562 | #if 0 |
@@ -1511,6 +1565,11 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1511 | return -ENODEV; | 1565 | return -ENODEV; |
1512 | #endif | 1566 | #endif |
1513 | 1567 | ||
1568 | if (flags & MSG_ERRQUEUE) { | ||
1569 | err = packet_recv_error(sk, msg, len); | ||
1570 | goto out; | ||
1571 | } | ||
1572 | |||
1514 | /* | 1573 | /* |
1515 | * Call the generic datagram receiver. This handles all sorts | 1574 | * Call the generic datagram receiver. This handles all sorts |
1516 | * of horrible races and re-entrancy so we can forget about it | 1575 | * of horrible races and re-entrancy so we can forget about it |
@@ -1692,9 +1751,9 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i, | |||
1692 | if (i->alen != dev->addr_len) | 1751 | if (i->alen != dev->addr_len) |
1693 | return -EINVAL; | 1752 | return -EINVAL; |
1694 | if (what > 0) | 1753 | if (what > 0) |
1695 | return dev_mc_add(dev, i->addr, i->alen, 0); | 1754 | return dev_mc_add(dev, i->addr); |
1696 | else | 1755 | else |
1697 | return dev_mc_delete(dev, i->addr, i->alen, 0); | 1756 | return dev_mc_del(dev, i->addr); |
1698 | break; | 1757 | break; |
1699 | case PACKET_MR_PROMISC: | 1758 | case PACKET_MR_PROMISC: |
1700 | return dev_set_promiscuity(dev, what); | 1759 | return dev_set_promiscuity(dev, what); |
@@ -1706,9 +1765,9 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i, | |||
1706 | if (i->alen != dev->addr_len) | 1765 | if (i->alen != dev->addr_len) |
1707 | return -EINVAL; | 1766 | return -EINVAL; |
1708 | if (what > 0) | 1767 | if (what > 0) |
1709 | return dev_unicast_add(dev, i->addr); | 1768 | return dev_uc_add(dev, i->addr); |
1710 | else | 1769 | else |
1711 | return dev_unicast_delete(dev, i->addr); | 1770 | return dev_uc_del(dev, i->addr); |
1712 | break; | 1771 | break; |
1713 | default: | 1772 | default: |
1714 | break; | 1773 | break; |
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c index 9b4ced6e0968..c33da6576942 100644 --- a/net/phonet/pn_dev.c +++ b/net/phonet/pn_dev.c | |||
@@ -46,9 +46,16 @@ struct phonet_net { | |||
46 | 46 | ||
47 | int phonet_net_id __read_mostly; | 47 | int phonet_net_id __read_mostly; |
48 | 48 | ||
49 | static struct phonet_net *phonet_pernet(struct net *net) | ||
50 | { | ||
51 | BUG_ON(!net); | ||
52 | |||
53 | return net_generic(net, phonet_net_id); | ||
54 | } | ||
55 | |||
49 | struct phonet_device_list *phonet_device_list(struct net *net) | 56 | struct phonet_device_list *phonet_device_list(struct net *net) |
50 | { | 57 | { |
51 | struct phonet_net *pnn = net_generic(net, phonet_net_id); | 58 | struct phonet_net *pnn = phonet_pernet(net); |
52 | return &pnn->pndevs; | 59 | return &pnn->pndevs; |
53 | } | 60 | } |
54 | 61 | ||
@@ -261,7 +268,7 @@ static int phonet_device_autoconf(struct net_device *dev) | |||
261 | 268 | ||
262 | static void phonet_route_autodel(struct net_device *dev) | 269 | static void phonet_route_autodel(struct net_device *dev) |
263 | { | 270 | { |
264 | struct phonet_net *pnn = net_generic(dev_net(dev), phonet_net_id); | 271 | struct phonet_net *pnn = phonet_pernet(dev_net(dev)); |
265 | unsigned i; | 272 | unsigned i; |
266 | DECLARE_BITMAP(deleted, 64); | 273 | DECLARE_BITMAP(deleted, 64); |
267 | 274 | ||
@@ -313,7 +320,7 @@ static struct notifier_block phonet_device_notifier = { | |||
313 | /* Per-namespace Phonet devices handling */ | 320 | /* Per-namespace Phonet devices handling */ |
314 | static int __net_init phonet_init_net(struct net *net) | 321 | static int __net_init phonet_init_net(struct net *net) |
315 | { | 322 | { |
316 | struct phonet_net *pnn = net_generic(net, phonet_net_id); | 323 | struct phonet_net *pnn = phonet_pernet(net); |
317 | 324 | ||
318 | if (!proc_net_fops_create(net, "phonet", 0, &pn_sock_seq_fops)) | 325 | if (!proc_net_fops_create(net, "phonet", 0, &pn_sock_seq_fops)) |
319 | return -ENOMEM; | 326 | return -ENOMEM; |
@@ -326,7 +333,7 @@ static int __net_init phonet_init_net(struct net *net) | |||
326 | 333 | ||
327 | static void __net_exit phonet_exit_net(struct net *net) | 334 | static void __net_exit phonet_exit_net(struct net *net) |
328 | { | 335 | { |
329 | struct phonet_net *pnn = net_generic(net, phonet_net_id); | 336 | struct phonet_net *pnn = phonet_pernet(net); |
330 | struct net_device *dev; | 337 | struct net_device *dev; |
331 | unsigned i; | 338 | unsigned i; |
332 | 339 | ||
@@ -376,7 +383,7 @@ void phonet_device_exit(void) | |||
376 | 383 | ||
377 | int phonet_route_add(struct net_device *dev, u8 daddr) | 384 | int phonet_route_add(struct net_device *dev, u8 daddr) |
378 | { | 385 | { |
379 | struct phonet_net *pnn = net_generic(dev_net(dev), phonet_net_id); | 386 | struct phonet_net *pnn = phonet_pernet(dev_net(dev)); |
380 | struct phonet_routes *routes = &pnn->routes; | 387 | struct phonet_routes *routes = &pnn->routes; |
381 | int err = -EEXIST; | 388 | int err = -EEXIST; |
382 | 389 | ||
@@ -393,7 +400,7 @@ int phonet_route_add(struct net_device *dev, u8 daddr) | |||
393 | 400 | ||
394 | int phonet_route_del(struct net_device *dev, u8 daddr) | 401 | int phonet_route_del(struct net_device *dev, u8 daddr) |
395 | { | 402 | { |
396 | struct phonet_net *pnn = net_generic(dev_net(dev), phonet_net_id); | 403 | struct phonet_net *pnn = phonet_pernet(dev_net(dev)); |
397 | struct phonet_routes *routes = &pnn->routes; | 404 | struct phonet_routes *routes = &pnn->routes; |
398 | 405 | ||
399 | daddr = daddr >> 2; | 406 | daddr = daddr >> 2; |
@@ -413,7 +420,7 @@ int phonet_route_del(struct net_device *dev, u8 daddr) | |||
413 | 420 | ||
414 | struct net_device *phonet_route_get(struct net *net, u8 daddr) | 421 | struct net_device *phonet_route_get(struct net *net, u8 daddr) |
415 | { | 422 | { |
416 | struct phonet_net *pnn = net_generic(net, phonet_net_id); | 423 | struct phonet_net *pnn = phonet_pernet(net); |
417 | struct phonet_routes *routes = &pnn->routes; | 424 | struct phonet_routes *routes = &pnn->routes; |
418 | struct net_device *dev; | 425 | struct net_device *dev; |
419 | 426 | ||
@@ -428,7 +435,7 @@ struct net_device *phonet_route_get(struct net *net, u8 daddr) | |||
428 | 435 | ||
429 | struct net_device *phonet_route_output(struct net *net, u8 daddr) | 436 | struct net_device *phonet_route_output(struct net *net, u8 daddr) |
430 | { | 437 | { |
431 | struct phonet_net *pnn = net_generic(net, phonet_net_id); | 438 | struct phonet_net *pnn = phonet_pernet(net); |
432 | struct phonet_routes *routes = &pnn->routes; | 439 | struct phonet_routes *routes = &pnn->routes; |
433 | struct net_device *dev; | 440 | struct net_device *dev; |
434 | 441 | ||
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c index f81862baf4d0..aebfecbdb841 100644 --- a/net/rds/af_rds.c +++ b/net/rds/af_rds.c | |||
@@ -158,9 +158,10 @@ static unsigned int rds_poll(struct file *file, struct socket *sock, | |||
158 | unsigned int mask = 0; | 158 | unsigned int mask = 0; |
159 | unsigned long flags; | 159 | unsigned long flags; |
160 | 160 | ||
161 | poll_wait(file, sk->sk_sleep, wait); | 161 | poll_wait(file, sk_sleep(sk), wait); |
162 | 162 | ||
163 | poll_wait(file, &rds_poll_waitq, wait); | 163 | if (rs->rs_seen_congestion) |
164 | poll_wait(file, &rds_poll_waitq, wait); | ||
164 | 165 | ||
165 | read_lock_irqsave(&rs->rs_recv_lock, flags); | 166 | read_lock_irqsave(&rs->rs_recv_lock, flags); |
166 | if (!rs->rs_cong_monitor) { | 167 | if (!rs->rs_cong_monitor) { |
@@ -182,6 +183,10 @@ static unsigned int rds_poll(struct file *file, struct socket *sock, | |||
182 | mask |= (POLLOUT | POLLWRNORM); | 183 | mask |= (POLLOUT | POLLWRNORM); |
183 | read_unlock_irqrestore(&rs->rs_recv_lock, flags); | 184 | read_unlock_irqrestore(&rs->rs_recv_lock, flags); |
184 | 185 | ||
186 | /* clear state any time we wake a seen-congested socket */ | ||
187 | if (mask) | ||
188 | rs->rs_seen_congestion = 0; | ||
189 | |||
185 | return mask; | 190 | return mask; |
186 | } | 191 | } |
187 | 192 | ||
@@ -447,7 +452,6 @@ static void rds_sock_inc_info(struct socket *sock, unsigned int len, | |||
447 | struct rds_info_lengths *lens) | 452 | struct rds_info_lengths *lens) |
448 | { | 453 | { |
449 | struct rds_sock *rs; | 454 | struct rds_sock *rs; |
450 | struct sock *sk; | ||
451 | struct rds_incoming *inc; | 455 | struct rds_incoming *inc; |
452 | unsigned long flags; | 456 | unsigned long flags; |
453 | unsigned int total = 0; | 457 | unsigned int total = 0; |
@@ -457,7 +461,6 @@ static void rds_sock_inc_info(struct socket *sock, unsigned int len, | |||
457 | spin_lock_irqsave(&rds_sock_lock, flags); | 461 | spin_lock_irqsave(&rds_sock_lock, flags); |
458 | 462 | ||
459 | list_for_each_entry(rs, &rds_sock_list, rs_item) { | 463 | list_for_each_entry(rs, &rds_sock_list, rs_item) { |
460 | sk = rds_rs_to_sk(rs); | ||
461 | read_lock(&rs->rs_recv_lock); | 464 | read_lock(&rs->rs_recv_lock); |
462 | 465 | ||
463 | /* XXX too lazy to maintain counts.. */ | 466 | /* XXX too lazy to maintain counts.. */ |
diff --git a/net/rds/cong.c b/net/rds/cong.c index f1da27ceb064..0871a29f0780 100644 --- a/net/rds/cong.c +++ b/net/rds/cong.c | |||
@@ -219,8 +219,6 @@ void rds_cong_queue_updates(struct rds_cong_map *map) | |||
219 | spin_lock_irqsave(&rds_cong_lock, flags); | 219 | spin_lock_irqsave(&rds_cong_lock, flags); |
220 | 220 | ||
221 | list_for_each_entry(conn, &map->m_conn_list, c_map_item) { | 221 | list_for_each_entry(conn, &map->m_conn_list, c_map_item) { |
222 | if (conn->c_loopback) | ||
223 | continue; | ||
224 | if (!test_and_set_bit(0, &conn->c_map_queued)) { | 222 | if (!test_and_set_bit(0, &conn->c_map_queued)) { |
225 | rds_stats_inc(s_cong_update_queued); | 223 | rds_stats_inc(s_cong_update_queued); |
226 | queue_delayed_work(rds_wq, &conn->c_send_w, 0); | 224 | queue_delayed_work(rds_wq, &conn->c_send_w, 0); |
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c index 88d0856cb797..10ed0d55f759 100644 --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c | |||
@@ -204,9 +204,10 @@ static void rds_ib_qp_event_handler(struct ib_event *event, void *data) | |||
204 | rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST); | 204 | rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST); |
205 | break; | 205 | break; |
206 | default: | 206 | default: |
207 | rds_ib_conn_error(conn, "RDS/IB: Fatal QP Event %u " | 207 | rdsdebug("Fatal QP Event %u " |
208 | "- connection %pI4->%pI4, reconnecting\n", | 208 | "- connection %pI4->%pI4, reconnecting\n", |
209 | event->event, &conn->c_laddr, &conn->c_faddr); | 209 | event->event, &conn->c_laddr, &conn->c_faddr); |
210 | rds_conn_drop(conn); | ||
210 | break; | 211 | break; |
211 | } | 212 | } |
212 | } | 213 | } |
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c index 059989fdb7d7..a54cd63f9e35 100644 --- a/net/rds/ib_rdma.c +++ b/net/rds/ib_rdma.c | |||
@@ -235,8 +235,8 @@ void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool) | |||
235 | { | 235 | { |
236 | flush_workqueue(rds_wq); | 236 | flush_workqueue(rds_wq); |
237 | rds_ib_flush_mr_pool(pool, 1); | 237 | rds_ib_flush_mr_pool(pool, 1); |
238 | BUG_ON(atomic_read(&pool->item_count)); | 238 | WARN_ON(atomic_read(&pool->item_count)); |
239 | BUG_ON(atomic_read(&pool->free_pinned)); | 239 | WARN_ON(atomic_read(&pool->free_pinned)); |
240 | kfree(pool); | 240 | kfree(pool); |
241 | } | 241 | } |
242 | 242 | ||
@@ -441,6 +441,7 @@ static void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr) | |||
441 | 441 | ||
442 | /* FIXME we need a way to tell a r/w MR | 442 | /* FIXME we need a way to tell a r/w MR |
443 | * from a r/o MR */ | 443 | * from a r/o MR */ |
444 | BUG_ON(in_interrupt()); | ||
444 | set_page_dirty(page); | 445 | set_page_dirty(page); |
445 | put_page(page); | 446 | put_page(page); |
446 | } | 447 | } |
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c index c7dd11b835f0..c74e9904a6b2 100644 --- a/net/rds/ib_recv.c +++ b/net/rds/ib_recv.c | |||
@@ -469,8 +469,8 @@ static void rds_ib_send_ack(struct rds_ib_connection *ic, unsigned int adv_credi | |||
469 | set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); | 469 | set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); |
470 | 470 | ||
471 | rds_ib_stats_inc(s_ib_ack_send_failure); | 471 | rds_ib_stats_inc(s_ib_ack_send_failure); |
472 | /* Need to finesse this later. */ | 472 | |
473 | BUG(); | 473 | rds_ib_conn_error(ic->conn, "sending ack failed\n"); |
474 | } else | 474 | } else |
475 | rds_ib_stats_inc(s_ib_ack_sent); | 475 | rds_ib_stats_inc(s_ib_ack_sent); |
476 | } | 476 | } |
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c index a10fab6886d1..17fa80803ab0 100644 --- a/net/rds/ib_send.c +++ b/net/rds/ib_send.c | |||
@@ -243,8 +243,12 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context) | |||
243 | struct rds_message *rm; | 243 | struct rds_message *rm; |
244 | 244 | ||
245 | rm = rds_send_get_message(conn, send->s_op); | 245 | rm = rds_send_get_message(conn, send->s_op); |
246 | if (rm) | 246 | if (rm) { |
247 | if (rm->m_rdma_op) | ||
248 | rds_ib_send_unmap_rdma(ic, rm->m_rdma_op); | ||
247 | rds_ib_send_rdma_complete(rm, wc.status); | 249 | rds_ib_send_rdma_complete(rm, wc.status); |
250 | rds_message_put(rm); | ||
251 | } | ||
248 | } | 252 | } |
249 | 253 | ||
250 | oldest = (oldest + 1) % ic->i_send_ring.w_nr; | 254 | oldest = (oldest + 1) % ic->i_send_ring.w_nr; |
@@ -482,6 +486,13 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, | |||
482 | BUG_ON(off % RDS_FRAG_SIZE); | 486 | BUG_ON(off % RDS_FRAG_SIZE); |
483 | BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header)); | 487 | BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header)); |
484 | 488 | ||
489 | /* Do not send cong updates to IB loopback */ | ||
490 | if (conn->c_loopback | ||
491 | && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { | ||
492 | rds_cong_map_updated(conn->c_fcong, ~(u64) 0); | ||
493 | return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; | ||
494 | } | ||
495 | |||
485 | /* FIXME we may overallocate here */ | 496 | /* FIXME we may overallocate here */ |
486 | if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0) | 497 | if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0) |
487 | i = 1; | 498 | i = 1; |
@@ -574,8 +585,7 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, | |||
574 | rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits); | 585 | rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits); |
575 | adv_credits += posted; | 586 | adv_credits += posted; |
576 | BUG_ON(adv_credits > 255); | 587 | BUG_ON(adv_credits > 255); |
577 | } else if (ic->i_rm != rm) | 588 | } |
578 | BUG(); | ||
579 | 589 | ||
580 | send = &ic->i_sends[pos]; | 590 | send = &ic->i_sends[pos]; |
581 | first = send; | 591 | first = send; |
@@ -714,8 +724,8 @@ add_header: | |||
714 | ic->i_rm = prev->s_rm; | 724 | ic->i_rm = prev->s_rm; |
715 | prev->s_rm = NULL; | 725 | prev->s_rm = NULL; |
716 | } | 726 | } |
717 | /* Finesse this later */ | 727 | |
718 | BUG(); | 728 | rds_ib_conn_error(ic->conn, "ib_post_send failed\n"); |
719 | goto out; | 729 | goto out; |
720 | } | 730 | } |
721 | 731 | ||
diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c index 3e9460f935d8..a9d951b4fbae 100644 --- a/net/rds/iw_cm.c +++ b/net/rds/iw_cm.c | |||
@@ -157,9 +157,11 @@ static void rds_iw_qp_event_handler(struct ib_event *event, void *data) | |||
157 | case IB_EVENT_QP_REQ_ERR: | 157 | case IB_EVENT_QP_REQ_ERR: |
158 | case IB_EVENT_QP_FATAL: | 158 | case IB_EVENT_QP_FATAL: |
159 | default: | 159 | default: |
160 | rds_iw_conn_error(conn, "RDS/IW: Fatal QP Event %u - connection %pI4->%pI4...reconnecting\n", | 160 | rdsdebug("Fatal QP Event %u " |
161 | "- connection %pI4->%pI4, reconnecting\n", | ||
161 | event->event, &conn->c_laddr, | 162 | event->event, &conn->c_laddr, |
162 | &conn->c_faddr); | 163 | &conn->c_faddr); |
164 | rds_conn_drop(conn); | ||
163 | break; | 165 | break; |
164 | } | 166 | } |
165 | } | 167 | } |
diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c index da43ee840ca3..3d479067d54d 100644 --- a/net/rds/iw_recv.c +++ b/net/rds/iw_recv.c | |||
@@ -469,8 +469,8 @@ static void rds_iw_send_ack(struct rds_iw_connection *ic, unsigned int adv_credi | |||
469 | set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); | 469 | set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); |
470 | 470 | ||
471 | rds_iw_stats_inc(s_iw_ack_send_failure); | 471 | rds_iw_stats_inc(s_iw_ack_send_failure); |
472 | /* Need to finesse this later. */ | 472 | |
473 | BUG(); | 473 | rds_iw_conn_error(ic->conn, "sending ack failed\n"); |
474 | } else | 474 | } else |
475 | rds_iw_stats_inc(s_iw_ack_sent); | 475 | rds_iw_stats_inc(s_iw_ack_sent); |
476 | } | 476 | } |
diff --git a/net/rds/iw_send.c b/net/rds/iw_send.c index 1379e9d66a78..52182ff7519e 100644 --- a/net/rds/iw_send.c +++ b/net/rds/iw_send.c | |||
@@ -616,8 +616,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm, | |||
616 | rds_iw_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits); | 616 | rds_iw_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits); |
617 | adv_credits += posted; | 617 | adv_credits += posted; |
618 | BUG_ON(adv_credits > 255); | 618 | BUG_ON(adv_credits > 255); |
619 | } else if (ic->i_rm != rm) | 619 | } |
620 | BUG(); | ||
621 | 620 | ||
622 | send = &ic->i_sends[pos]; | 621 | send = &ic->i_sends[pos]; |
623 | first = send; | 622 | first = send; |
diff --git a/net/rds/loop.c b/net/rds/loop.c index 0d7a159158b8..dd9879379457 100644 --- a/net/rds/loop.c +++ b/net/rds/loop.c | |||
@@ -81,16 +81,9 @@ static int rds_loop_xmit_cong_map(struct rds_connection *conn, | |||
81 | struct rds_cong_map *map, | 81 | struct rds_cong_map *map, |
82 | unsigned long offset) | 82 | unsigned long offset) |
83 | { | 83 | { |
84 | unsigned long i; | ||
85 | |||
86 | BUG_ON(offset); | 84 | BUG_ON(offset); |
87 | BUG_ON(map != conn->c_lcong); | 85 | BUG_ON(map != conn->c_lcong); |
88 | 86 | ||
89 | for (i = 0; i < RDS_CONG_MAP_PAGES; i++) { | ||
90 | memcpy((void *)conn->c_fcong->m_page_addrs[i], | ||
91 | (void *)map->m_page_addrs[i], PAGE_SIZE); | ||
92 | } | ||
93 | |||
94 | rds_cong_map_updated(conn->c_fcong, ~(u64) 0); | 87 | rds_cong_map_updated(conn->c_fcong, ~(u64) 0); |
95 | 88 | ||
96 | return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; | 89 | return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; |
diff --git a/net/rds/rdma.c b/net/rds/rdma.c index 5ce9437cad67..75fd13bb631b 100644 --- a/net/rds/rdma.c +++ b/net/rds/rdma.c | |||
@@ -439,8 +439,10 @@ void rds_rdma_free_op(struct rds_rdma_op *ro) | |||
439 | /* Mark page dirty if it was possibly modified, which | 439 | /* Mark page dirty if it was possibly modified, which |
440 | * is the case for a RDMA_READ which copies from remote | 440 | * is the case for a RDMA_READ which copies from remote |
441 | * to local memory */ | 441 | * to local memory */ |
442 | if (!ro->r_write) | 442 | if (!ro->r_write) { |
443 | BUG_ON(in_interrupt()); | ||
443 | set_page_dirty(page); | 444 | set_page_dirty(page); |
445 | } | ||
444 | put_page(page); | 446 | put_page(page); |
445 | } | 447 | } |
446 | 448 | ||
diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c index 7b155081b4dc..e599ba2f950d 100644 --- a/net/rds/rdma_transport.c +++ b/net/rds/rdma_transport.c | |||
@@ -101,7 +101,7 @@ int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id, | |||
101 | break; | 101 | break; |
102 | 102 | ||
103 | case RDMA_CM_EVENT_DISCONNECTED: | 103 | case RDMA_CM_EVENT_DISCONNECTED: |
104 | printk(KERN_WARNING "RDS/RDMA: DISCONNECT event - dropping connection " | 104 | rdsdebug("DISCONNECT event - dropping connection " |
105 | "%pI4->%pI4\n", &conn->c_laddr, | 105 | "%pI4->%pI4\n", &conn->c_laddr, |
106 | &conn->c_faddr); | 106 | &conn->c_faddr); |
107 | rds_conn_drop(conn); | 107 | rds_conn_drop(conn); |
@@ -109,8 +109,7 @@ int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id, | |||
109 | 109 | ||
110 | default: | 110 | default: |
111 | /* things like device disconnect? */ | 111 | /* things like device disconnect? */ |
112 | printk(KERN_ERR "unknown event %u\n", event->event); | 112 | printk(KERN_ERR "RDS: unknown event %u!\n", event->event); |
113 | BUG(); | ||
114 | break; | 113 | break; |
115 | } | 114 | } |
116 | 115 | ||
diff --git a/net/rds/rds.h b/net/rds/rds.h index 85d6f897ecc7..c224b5bb3ba9 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h | |||
@@ -388,6 +388,8 @@ struct rds_sock { | |||
388 | 388 | ||
389 | /* flag indicating we were congested or not */ | 389 | /* flag indicating we were congested or not */ |
390 | int rs_congested; | 390 | int rs_congested; |
391 | /* seen congestion (ENOBUFS) when sending? */ | ||
392 | int rs_seen_congestion; | ||
391 | 393 | ||
392 | /* rs_lock protects all these adjacent members before the newline */ | 394 | /* rs_lock protects all these adjacent members before the newline */ |
393 | spinlock_t rs_lock; | 395 | spinlock_t rs_lock; |
@@ -490,7 +492,7 @@ void rds_sock_put(struct rds_sock *rs); | |||
490 | void rds_wake_sk_sleep(struct rds_sock *rs); | 492 | void rds_wake_sk_sleep(struct rds_sock *rs); |
491 | static inline void __rds_wake_sk_sleep(struct sock *sk) | 493 | static inline void __rds_wake_sk_sleep(struct sock *sk) |
492 | { | 494 | { |
493 | wait_queue_head_t *waitq = sk->sk_sleep; | 495 | wait_queue_head_t *waitq = sk_sleep(sk); |
494 | 496 | ||
495 | if (!sock_flag(sk, SOCK_DEAD) && waitq) | 497 | if (!sock_flag(sk, SOCK_DEAD) && waitq) |
496 | wake_up(waitq); | 498 | wake_up(waitq); |
diff --git a/net/rds/recv.c b/net/rds/recv.c index e2a2b9344f7b..795a00b7f2cb 100644 --- a/net/rds/recv.c +++ b/net/rds/recv.c | |||
@@ -432,7 +432,7 @@ int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, | |||
432 | break; | 432 | break; |
433 | } | 433 | } |
434 | 434 | ||
435 | timeo = wait_event_interruptible_timeout(*sk->sk_sleep, | 435 | timeo = wait_event_interruptible_timeout(*sk_sleep(sk), |
436 | (!list_empty(&rs->rs_notify_queue) || | 436 | (!list_empty(&rs->rs_notify_queue) || |
437 | rs->rs_cong_notify || | 437 | rs->rs_cong_notify || |
438 | rds_next_incoming(rs, &inc)), timeo); | 438 | rds_next_incoming(rs, &inc)), timeo); |
diff --git a/net/rds/send.c b/net/rds/send.c index f04b929ded92..9c1c6bcaa6c9 100644 --- a/net/rds/send.c +++ b/net/rds/send.c | |||
@@ -508,12 +508,13 @@ EXPORT_SYMBOL_GPL(rds_send_get_message); | |||
508 | */ | 508 | */ |
509 | void rds_send_remove_from_sock(struct list_head *messages, int status) | 509 | void rds_send_remove_from_sock(struct list_head *messages, int status) |
510 | { | 510 | { |
511 | unsigned long flags = 0; /* silence gcc :P */ | 511 | unsigned long flags; |
512 | struct rds_sock *rs = NULL; | 512 | struct rds_sock *rs = NULL; |
513 | struct rds_message *rm; | 513 | struct rds_message *rm; |
514 | 514 | ||
515 | local_irq_save(flags); | ||
516 | while (!list_empty(messages)) { | 515 | while (!list_empty(messages)) { |
516 | int was_on_sock = 0; | ||
517 | |||
517 | rm = list_entry(messages->next, struct rds_message, | 518 | rm = list_entry(messages->next, struct rds_message, |
518 | m_conn_item); | 519 | m_conn_item); |
519 | list_del_init(&rm->m_conn_item); | 520 | list_del_init(&rm->m_conn_item); |
@@ -528,20 +529,19 @@ void rds_send_remove_from_sock(struct list_head *messages, int status) | |||
528 | * while we're messing with it. It does not prevent the | 529 | * while we're messing with it. It does not prevent the |
529 | * message from being removed from the socket, though. | 530 | * message from being removed from the socket, though. |
530 | */ | 531 | */ |
531 | spin_lock(&rm->m_rs_lock); | 532 | spin_lock_irqsave(&rm->m_rs_lock, flags); |
532 | if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) | 533 | if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) |
533 | goto unlock_and_drop; | 534 | goto unlock_and_drop; |
534 | 535 | ||
535 | if (rs != rm->m_rs) { | 536 | if (rs != rm->m_rs) { |
536 | if (rs) { | 537 | if (rs) { |
537 | spin_unlock(&rs->rs_lock); | ||
538 | rds_wake_sk_sleep(rs); | 538 | rds_wake_sk_sleep(rs); |
539 | sock_put(rds_rs_to_sk(rs)); | 539 | sock_put(rds_rs_to_sk(rs)); |
540 | } | 540 | } |
541 | rs = rm->m_rs; | 541 | rs = rm->m_rs; |
542 | spin_lock(&rs->rs_lock); | ||
543 | sock_hold(rds_rs_to_sk(rs)); | 542 | sock_hold(rds_rs_to_sk(rs)); |
544 | } | 543 | } |
544 | spin_lock(&rs->rs_lock); | ||
545 | 545 | ||
546 | if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) { | 546 | if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) { |
547 | struct rds_rdma_op *ro = rm->m_rdma_op; | 547 | struct rds_rdma_op *ro = rm->m_rdma_op; |
@@ -558,21 +558,22 @@ void rds_send_remove_from_sock(struct list_head *messages, int status) | |||
558 | notifier->n_status = status; | 558 | notifier->n_status = status; |
559 | rm->m_rdma_op->r_notifier = NULL; | 559 | rm->m_rdma_op->r_notifier = NULL; |
560 | } | 560 | } |
561 | rds_message_put(rm); | 561 | was_on_sock = 1; |
562 | rm->m_rs = NULL; | 562 | rm->m_rs = NULL; |
563 | } | 563 | } |
564 | spin_unlock(&rs->rs_lock); | ||
564 | 565 | ||
565 | unlock_and_drop: | 566 | unlock_and_drop: |
566 | spin_unlock(&rm->m_rs_lock); | 567 | spin_unlock_irqrestore(&rm->m_rs_lock, flags); |
567 | rds_message_put(rm); | 568 | rds_message_put(rm); |
569 | if (was_on_sock) | ||
570 | rds_message_put(rm); | ||
568 | } | 571 | } |
569 | 572 | ||
570 | if (rs) { | 573 | if (rs) { |
571 | spin_unlock(&rs->rs_lock); | ||
572 | rds_wake_sk_sleep(rs); | 574 | rds_wake_sk_sleep(rs); |
573 | sock_put(rds_rs_to_sk(rs)); | 575 | sock_put(rds_rs_to_sk(rs)); |
574 | } | 576 | } |
575 | local_irq_restore(flags); | ||
576 | } | 577 | } |
577 | 578 | ||
578 | /* | 579 | /* |
@@ -634,9 +635,6 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest) | |||
634 | list_move(&rm->m_sock_item, &list); | 635 | list_move(&rm->m_sock_item, &list); |
635 | rds_send_sndbuf_remove(rs, rm); | 636 | rds_send_sndbuf_remove(rs, rm); |
636 | clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags); | 637 | clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags); |
637 | |||
638 | /* If this is a RDMA operation, notify the app. */ | ||
639 | __rds_rdma_send_complete(rs, rm, RDS_RDMA_CANCELED); | ||
640 | } | 638 | } |
641 | 639 | ||
642 | /* order flag updates with the rs lock */ | 640 | /* order flag updates with the rs lock */ |
@@ -645,9 +643,6 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest) | |||
645 | 643 | ||
646 | spin_unlock_irqrestore(&rs->rs_lock, flags); | 644 | spin_unlock_irqrestore(&rs->rs_lock, flags); |
647 | 645 | ||
648 | if (wake) | ||
649 | rds_wake_sk_sleep(rs); | ||
650 | |||
651 | conn = NULL; | 646 | conn = NULL; |
652 | 647 | ||
653 | /* now remove the messages from the conn list as needed */ | 648 | /* now remove the messages from the conn list as needed */ |
@@ -655,6 +650,10 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest) | |||
655 | /* We do this here rather than in the loop above, so that | 650 | /* We do this here rather than in the loop above, so that |
656 | * we don't have to nest m_rs_lock under rs->rs_lock */ | 651 | * we don't have to nest m_rs_lock under rs->rs_lock */ |
657 | spin_lock_irqsave(&rm->m_rs_lock, flags2); | 652 | spin_lock_irqsave(&rm->m_rs_lock, flags2); |
653 | /* If this is a RDMA operation, notify the app. */ | ||
654 | spin_lock(&rs->rs_lock); | ||
655 | __rds_rdma_send_complete(rs, rm, RDS_RDMA_CANCELED); | ||
656 | spin_unlock(&rs->rs_lock); | ||
658 | rm->m_rs = NULL; | 657 | rm->m_rs = NULL; |
659 | spin_unlock_irqrestore(&rm->m_rs_lock, flags2); | 658 | spin_unlock_irqrestore(&rm->m_rs_lock, flags2); |
660 | 659 | ||
@@ -683,6 +682,9 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest) | |||
683 | if (conn) | 682 | if (conn) |
684 | spin_unlock_irqrestore(&conn->c_lock, flags); | 683 | spin_unlock_irqrestore(&conn->c_lock, flags); |
685 | 684 | ||
685 | if (wake) | ||
686 | rds_wake_sk_sleep(rs); | ||
687 | |||
686 | while (!list_empty(&list)) { | 688 | while (!list_empty(&list)) { |
687 | rm = list_entry(list.next, struct rds_message, m_sock_item); | 689 | rm = list_entry(list.next, struct rds_message, m_sock_item); |
688 | list_del_init(&rm->m_sock_item); | 690 | list_del_init(&rm->m_sock_item); |
@@ -816,7 +818,7 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, | |||
816 | int ret = 0; | 818 | int ret = 0; |
817 | int queued = 0, allocated_mr = 0; | 819 | int queued = 0, allocated_mr = 0; |
818 | int nonblock = msg->msg_flags & MSG_DONTWAIT; | 820 | int nonblock = msg->msg_flags & MSG_DONTWAIT; |
819 | long timeo = sock_rcvtimeo(sk, nonblock); | 821 | long timeo = sock_sndtimeo(sk, nonblock); |
820 | 822 | ||
821 | /* Mirror Linux UDP mirror of BSD error message compatibility */ | 823 | /* Mirror Linux UDP mirror of BSD error message compatibility */ |
822 | /* XXX: Perhaps MSG_MORE someday */ | 824 | /* XXX: Perhaps MSG_MORE someday */ |
@@ -895,8 +897,10 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, | |||
895 | queue_delayed_work(rds_wq, &conn->c_conn_w, 0); | 897 | queue_delayed_work(rds_wq, &conn->c_conn_w, 0); |
896 | 898 | ||
897 | ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs); | 899 | ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs); |
898 | if (ret) | 900 | if (ret) { |
901 | rs->rs_seen_congestion = 1; | ||
899 | goto out; | 902 | goto out; |
903 | } | ||
900 | 904 | ||
901 | while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port, | 905 | while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port, |
902 | dport, &queued)) { | 906 | dport, &queued)) { |
@@ -911,7 +915,7 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, | |||
911 | goto out; | 915 | goto out; |
912 | } | 916 | } |
913 | 917 | ||
914 | timeo = wait_event_interruptible_timeout(*sk->sk_sleep, | 918 | timeo = wait_event_interruptible_timeout(*sk_sleep(sk), |
915 | rds_send_queue_rm(rs, conn, rm, | 919 | rds_send_queue_rm(rs, conn, rm, |
916 | rs->rs_bound_port, | 920 | rs->rs_bound_port, |
917 | dport, | 921 | dport, |
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c index e08ec912d8b0..1aba6878fa5d 100644 --- a/net/rds/tcp_recv.c +++ b/net/rds/tcp_recv.c | |||
@@ -98,6 +98,7 @@ int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov, | |||
98 | goto out; | 98 | goto out; |
99 | } | 99 | } |
100 | 100 | ||
101 | rds_stats_add(s_copy_to_user, to_copy); | ||
101 | size -= to_copy; | 102 | size -= to_copy; |
102 | ret += to_copy; | 103 | ret += to_copy; |
103 | skb_off += to_copy; | 104 | skb_off += to_copy; |
diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c index 34fdcc059e54..a28b895ff0d1 100644 --- a/net/rds/tcp_send.c +++ b/net/rds/tcp_send.c | |||
@@ -240,7 +240,9 @@ void rds_tcp_write_space(struct sock *sk) | |||
240 | tc->t_last_seen_una = rds_tcp_snd_una(tc); | 240 | tc->t_last_seen_una = rds_tcp_snd_una(tc); |
241 | rds_send_drop_acked(conn, rds_tcp_snd_una(tc), rds_tcp_is_acked); | 241 | rds_send_drop_acked(conn, rds_tcp_snd_una(tc), rds_tcp_is_acked); |
242 | 242 | ||
243 | queue_delayed_work(rds_wq, &conn->c_send_w, 0); | 243 | if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) |
244 | queue_delayed_work(rds_wq, &conn->c_send_w, 0); | ||
245 | |||
244 | out: | 246 | out: |
245 | read_unlock(&sk->sk_callback_lock); | 247 | read_unlock(&sk->sk_callback_lock); |
246 | 248 | ||
diff --git a/net/rds/threads.c b/net/rds/threads.c index 00fa10e59af8..786c20eaaf5e 100644 --- a/net/rds/threads.c +++ b/net/rds/threads.c | |||
@@ -259,7 +259,7 @@ void rds_threads_exit(void) | |||
259 | 259 | ||
260 | int __init rds_threads_init(void) | 260 | int __init rds_threads_init(void) |
261 | { | 261 | { |
262 | rds_wq = create_singlethread_workqueue("krdsd"); | 262 | rds_wq = create_workqueue("krdsd"); |
263 | if (rds_wq == NULL) | 263 | if (rds_wq == NULL) |
264 | return -ENOMEM; | 264 | return -ENOMEM; |
265 | 265 | ||
diff --git a/net/rfkill/core.c b/net/rfkill/core.c index a9fa86f65983..51875a0c5d48 100644 --- a/net/rfkill/core.c +++ b/net/rfkill/core.c | |||
@@ -629,6 +629,49 @@ static ssize_t rfkill_persistent_show(struct device *dev, | |||
629 | return sprintf(buf, "%d\n", rfkill->persistent); | 629 | return sprintf(buf, "%d\n", rfkill->persistent); |
630 | } | 630 | } |
631 | 631 | ||
632 | static ssize_t rfkill_hard_show(struct device *dev, | ||
633 | struct device_attribute *attr, | ||
634 | char *buf) | ||
635 | { | ||
636 | struct rfkill *rfkill = to_rfkill(dev); | ||
637 | |||
638 | return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_HW) ? 1 : 0 ); | ||
639 | } | ||
640 | |||
641 | static ssize_t rfkill_soft_show(struct device *dev, | ||
642 | struct device_attribute *attr, | ||
643 | char *buf) | ||
644 | { | ||
645 | struct rfkill *rfkill = to_rfkill(dev); | ||
646 | |||
647 | return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_SW) ? 1 : 0 ); | ||
648 | } | ||
649 | |||
650 | static ssize_t rfkill_soft_store(struct device *dev, | ||
651 | struct device_attribute *attr, | ||
652 | const char *buf, size_t count) | ||
653 | { | ||
654 | struct rfkill *rfkill = to_rfkill(dev); | ||
655 | unsigned long state; | ||
656 | int err; | ||
657 | |||
658 | if (!capable(CAP_NET_ADMIN)) | ||
659 | return -EPERM; | ||
660 | |||
661 | err = strict_strtoul(buf, 0, &state); | ||
662 | if (err) | ||
663 | return err; | ||
664 | |||
665 | if (state > 1 ) | ||
666 | return -EINVAL; | ||
667 | |||
668 | mutex_lock(&rfkill_global_mutex); | ||
669 | rfkill_set_block(rfkill, state); | ||
670 | mutex_unlock(&rfkill_global_mutex); | ||
671 | |||
672 | return err ?: count; | ||
673 | } | ||
674 | |||
632 | static u8 user_state_from_blocked(unsigned long state) | 675 | static u8 user_state_from_blocked(unsigned long state) |
633 | { | 676 | { |
634 | if (state & RFKILL_BLOCK_HW) | 677 | if (state & RFKILL_BLOCK_HW) |
@@ -644,14 +687,8 @@ static ssize_t rfkill_state_show(struct device *dev, | |||
644 | char *buf) | 687 | char *buf) |
645 | { | 688 | { |
646 | struct rfkill *rfkill = to_rfkill(dev); | 689 | struct rfkill *rfkill = to_rfkill(dev); |
647 | unsigned long flags; | ||
648 | u32 state; | ||
649 | |||
650 | spin_lock_irqsave(&rfkill->lock, flags); | ||
651 | state = rfkill->state; | ||
652 | spin_unlock_irqrestore(&rfkill->lock, flags); | ||
653 | 690 | ||
654 | return sprintf(buf, "%d\n", user_state_from_blocked(state)); | 691 | return sprintf(buf, "%d\n", user_state_from_blocked(rfkill->state)); |
655 | } | 692 | } |
656 | 693 | ||
657 | static ssize_t rfkill_state_store(struct device *dev, | 694 | static ssize_t rfkill_state_store(struct device *dev, |
@@ -701,6 +738,8 @@ static struct device_attribute rfkill_dev_attrs[] = { | |||
701 | __ATTR(persistent, S_IRUGO, rfkill_persistent_show, NULL), | 738 | __ATTR(persistent, S_IRUGO, rfkill_persistent_show, NULL), |
702 | __ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store), | 739 | __ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store), |
703 | __ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store), | 740 | __ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store), |
741 | __ATTR(soft, S_IRUGO|S_IWUSR, rfkill_soft_show, rfkill_soft_store), | ||
742 | __ATTR(hard, S_IRUGO, rfkill_hard_show, NULL), | ||
704 | __ATTR_NULL | 743 | __ATTR_NULL |
705 | }; | 744 | }; |
706 | 745 | ||
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index 4fb711a035f4..8e45e76a95f5 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c | |||
@@ -845,7 +845,7 @@ rose_try_next_neigh: | |||
845 | DEFINE_WAIT(wait); | 845 | DEFINE_WAIT(wait); |
846 | 846 | ||
847 | for (;;) { | 847 | for (;;) { |
848 | prepare_to_wait(sk->sk_sleep, &wait, | 848 | prepare_to_wait(sk_sleep(sk), &wait, |
849 | TASK_INTERRUPTIBLE); | 849 | TASK_INTERRUPTIBLE); |
850 | if (sk->sk_state != TCP_SYN_SENT) | 850 | if (sk->sk_state != TCP_SYN_SENT) |
851 | break; | 851 | break; |
@@ -858,7 +858,7 @@ rose_try_next_neigh: | |||
858 | err = -ERESTARTSYS; | 858 | err = -ERESTARTSYS; |
859 | break; | 859 | break; |
860 | } | 860 | } |
861 | finish_wait(sk->sk_sleep, &wait); | 861 | finish_wait(sk_sleep(sk), &wait); |
862 | 862 | ||
863 | if (err) | 863 | if (err) |
864 | goto out_release; | 864 | goto out_release; |
@@ -911,7 +911,7 @@ static int rose_accept(struct socket *sock, struct socket *newsock, int flags) | |||
911 | * hooked into the SABM we saved | 911 | * hooked into the SABM we saved |
912 | */ | 912 | */ |
913 | for (;;) { | 913 | for (;;) { |
914 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 914 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
915 | 915 | ||
916 | skb = skb_dequeue(&sk->sk_receive_queue); | 916 | skb = skb_dequeue(&sk->sk_receive_queue); |
917 | if (skb) | 917 | if (skb) |
@@ -930,7 +930,7 @@ static int rose_accept(struct socket *sock, struct socket *newsock, int flags) | |||
930 | err = -ERESTARTSYS; | 930 | err = -ERESTARTSYS; |
931 | break; | 931 | break; |
932 | } | 932 | } |
933 | finish_wait(sk->sk_sleep, &wait); | 933 | finish_wait(sk_sleep(sk), &wait); |
934 | if (err) | 934 | if (err) |
935 | goto out_release; | 935 | goto out_release; |
936 | 936 | ||
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index c060095b27ce..c432d76f415e 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c | |||
@@ -65,7 +65,7 @@ static void rxrpc_write_space(struct sock *sk) | |||
65 | read_lock(&sk->sk_callback_lock); | 65 | read_lock(&sk->sk_callback_lock); |
66 | if (rxrpc_writable(sk)) { | 66 | if (rxrpc_writable(sk)) { |
67 | if (sk_has_sleeper(sk)) | 67 | if (sk_has_sleeper(sk)) |
68 | wake_up_interruptible(sk->sk_sleep); | 68 | wake_up_interruptible(sk_sleep(sk)); |
69 | sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); | 69 | sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); |
70 | } | 70 | } |
71 | read_unlock(&sk->sk_callback_lock); | 71 | read_unlock(&sk->sk_callback_lock); |
@@ -589,7 +589,7 @@ static unsigned int rxrpc_poll(struct file *file, struct socket *sock, | |||
589 | unsigned int mask; | 589 | unsigned int mask; |
590 | struct sock *sk = sock->sk; | 590 | struct sock *sk = sock->sk; |
591 | 591 | ||
592 | sock_poll_wait(file, sk->sk_sleep, wait); | 592 | sock_poll_wait(file, sk_sleep(sk), wait); |
593 | mask = 0; | 593 | mask = 0; |
594 | 594 | ||
595 | /* the socket is readable if there are any messages waiting on the Rx | 595 | /* the socket is readable if there are any messages waiting on the Rx |
diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c index 60c2b94e6b54..0c65013e3bfe 100644 --- a/net/rxrpc/ar-recvmsg.c +++ b/net/rxrpc/ar-recvmsg.c | |||
@@ -91,7 +91,7 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
91 | 91 | ||
92 | /* wait for a message to turn up */ | 92 | /* wait for a message to turn up */ |
93 | release_sock(&rx->sk); | 93 | release_sock(&rx->sk); |
94 | prepare_to_wait_exclusive(rx->sk.sk_sleep, &wait, | 94 | prepare_to_wait_exclusive(sk_sleep(&rx->sk), &wait, |
95 | TASK_INTERRUPTIBLE); | 95 | TASK_INTERRUPTIBLE); |
96 | ret = sock_error(&rx->sk); | 96 | ret = sock_error(&rx->sk); |
97 | if (ret) | 97 | if (ret) |
@@ -102,7 +102,7 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
102 | goto wait_interrupted; | 102 | goto wait_interrupted; |
103 | timeo = schedule_timeout(timeo); | 103 | timeo = schedule_timeout(timeo); |
104 | } | 104 | } |
105 | finish_wait(rx->sk.sk_sleep, &wait); | 105 | finish_wait(sk_sleep(&rx->sk), &wait); |
106 | lock_sock(&rx->sk); | 106 | lock_sock(&rx->sk); |
107 | continue; | 107 | continue; |
108 | } | 108 | } |
@@ -356,7 +356,7 @@ csum_copy_error: | |||
356 | wait_interrupted: | 356 | wait_interrupted: |
357 | ret = sock_intr_errno(timeo); | 357 | ret = sock_intr_errno(timeo); |
358 | wait_error: | 358 | wait_error: |
359 | finish_wait(rx->sk.sk_sleep, &wait); | 359 | finish_wait(sk_sleep(&rx->sk), &wait); |
360 | if (continue_call) | 360 | if (continue_call) |
361 | rxrpc_put_call(continue_call); | 361 | rxrpc_put_call(continue_call); |
362 | if (copied) | 362 | if (copied) |
diff --git a/net/sched/act_api.c b/net/sched/act_api.c index d8e0171d9a4b..019045174fc3 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c | |||
@@ -668,7 +668,8 @@ nlmsg_failure: | |||
668 | } | 668 | } |
669 | 669 | ||
670 | static int | 670 | static int |
671 | act_get_notify(u32 pid, struct nlmsghdr *n, struct tc_action *a, int event) | 671 | act_get_notify(struct net *net, u32 pid, struct nlmsghdr *n, |
672 | struct tc_action *a, int event) | ||
672 | { | 673 | { |
673 | struct sk_buff *skb; | 674 | struct sk_buff *skb; |
674 | 675 | ||
@@ -680,7 +681,7 @@ act_get_notify(u32 pid, struct nlmsghdr *n, struct tc_action *a, int event) | |||
680 | return -EINVAL; | 681 | return -EINVAL; |
681 | } | 682 | } |
682 | 683 | ||
683 | return rtnl_unicast(skb, &init_net, pid); | 684 | return rtnl_unicast(skb, net, pid); |
684 | } | 685 | } |
685 | 686 | ||
686 | static struct tc_action * | 687 | static struct tc_action * |
@@ -750,7 +751,8 @@ static struct tc_action *create_a(int i) | |||
750 | return act; | 751 | return act; |
751 | } | 752 | } |
752 | 753 | ||
753 | static int tca_action_flush(struct nlattr *nla, struct nlmsghdr *n, u32 pid) | 754 | static int tca_action_flush(struct net *net, struct nlattr *nla, |
755 | struct nlmsghdr *n, u32 pid) | ||
754 | { | 756 | { |
755 | struct sk_buff *skb; | 757 | struct sk_buff *skb; |
756 | unsigned char *b; | 758 | unsigned char *b; |
@@ -809,7 +811,7 @@ static int tca_action_flush(struct nlattr *nla, struct nlmsghdr *n, u32 pid) | |||
809 | nlh->nlmsg_flags |= NLM_F_ROOT; | 811 | nlh->nlmsg_flags |= NLM_F_ROOT; |
810 | module_put(a->ops->owner); | 812 | module_put(a->ops->owner); |
811 | kfree(a); | 813 | kfree(a); |
812 | err = rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO); | 814 | err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO); |
813 | if (err > 0) | 815 | if (err > 0) |
814 | return 0; | 816 | return 0; |
815 | 817 | ||
@@ -826,7 +828,8 @@ noflush_out: | |||
826 | } | 828 | } |
827 | 829 | ||
828 | static int | 830 | static int |
829 | tca_action_gd(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int event) | 831 | tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n, |
832 | u32 pid, int event) | ||
830 | { | 833 | { |
831 | int i, ret; | 834 | int i, ret; |
832 | struct nlattr *tb[TCA_ACT_MAX_PRIO+1]; | 835 | struct nlattr *tb[TCA_ACT_MAX_PRIO+1]; |
@@ -838,7 +841,7 @@ tca_action_gd(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int event) | |||
838 | 841 | ||
839 | if (event == RTM_DELACTION && n->nlmsg_flags&NLM_F_ROOT) { | 842 | if (event == RTM_DELACTION && n->nlmsg_flags&NLM_F_ROOT) { |
840 | if (tb[1] != NULL) | 843 | if (tb[1] != NULL) |
841 | return tca_action_flush(tb[1], n, pid); | 844 | return tca_action_flush(net, tb[1], n, pid); |
842 | else | 845 | else |
843 | return -EINVAL; | 846 | return -EINVAL; |
844 | } | 847 | } |
@@ -859,7 +862,7 @@ tca_action_gd(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int event) | |||
859 | } | 862 | } |
860 | 863 | ||
861 | if (event == RTM_GETACTION) | 864 | if (event == RTM_GETACTION) |
862 | ret = act_get_notify(pid, n, head, event); | 865 | ret = act_get_notify(net, pid, n, head, event); |
863 | else { /* delete */ | 866 | else { /* delete */ |
864 | struct sk_buff *skb; | 867 | struct sk_buff *skb; |
865 | 868 | ||
@@ -878,7 +881,7 @@ tca_action_gd(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int event) | |||
878 | 881 | ||
879 | /* now do the delete */ | 882 | /* now do the delete */ |
880 | tcf_action_destroy(head, 0); | 883 | tcf_action_destroy(head, 0); |
881 | ret = rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, | 884 | ret = rtnetlink_send(skb, net, pid, RTNLGRP_TC, |
882 | n->nlmsg_flags&NLM_F_ECHO); | 885 | n->nlmsg_flags&NLM_F_ECHO); |
883 | if (ret > 0) | 886 | if (ret > 0) |
884 | return 0; | 887 | return 0; |
@@ -889,8 +892,8 @@ err: | |||
889 | return ret; | 892 | return ret; |
890 | } | 893 | } |
891 | 894 | ||
892 | static int tcf_add_notify(struct tc_action *a, u32 pid, u32 seq, int event, | 895 | static int tcf_add_notify(struct net *net, struct tc_action *a, |
893 | u16 flags) | 896 | u32 pid, u32 seq, int event, u16 flags) |
894 | { | 897 | { |
895 | struct tcamsg *t; | 898 | struct tcamsg *t; |
896 | struct nlmsghdr *nlh; | 899 | struct nlmsghdr *nlh; |
@@ -923,7 +926,7 @@ static int tcf_add_notify(struct tc_action *a, u32 pid, u32 seq, int event, | |||
923 | nlh->nlmsg_len = skb_tail_pointer(skb) - b; | 926 | nlh->nlmsg_len = skb_tail_pointer(skb) - b; |
924 | NETLINK_CB(skb).dst_group = RTNLGRP_TC; | 927 | NETLINK_CB(skb).dst_group = RTNLGRP_TC; |
925 | 928 | ||
926 | err = rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, flags&NLM_F_ECHO); | 929 | err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, flags&NLM_F_ECHO); |
927 | if (err > 0) | 930 | if (err > 0) |
928 | err = 0; | 931 | err = 0; |
929 | return err; | 932 | return err; |
@@ -936,7 +939,8 @@ nlmsg_failure: | |||
936 | 939 | ||
937 | 940 | ||
938 | static int | 941 | static int |
939 | tcf_action_add(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int ovr) | 942 | tcf_action_add(struct net *net, struct nlattr *nla, struct nlmsghdr *n, |
943 | u32 pid, int ovr) | ||
940 | { | 944 | { |
941 | int ret = 0; | 945 | int ret = 0; |
942 | struct tc_action *act; | 946 | struct tc_action *act; |
@@ -954,7 +958,7 @@ tcf_action_add(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int ovr) | |||
954 | /* dump then free all the actions after update; inserted policy | 958 | /* dump then free all the actions after update; inserted policy |
955 | * stays intact | 959 | * stays intact |
956 | * */ | 960 | * */ |
957 | ret = tcf_add_notify(act, pid, seq, RTM_NEWACTION, n->nlmsg_flags); | 961 | ret = tcf_add_notify(net, act, pid, seq, RTM_NEWACTION, n->nlmsg_flags); |
958 | for (a = act; a; a = act) { | 962 | for (a = act; a; a = act) { |
959 | act = a->next; | 963 | act = a->next; |
960 | kfree(a); | 964 | kfree(a); |
@@ -970,9 +974,6 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
970 | u32 pid = skb ? NETLINK_CB(skb).pid : 0; | 974 | u32 pid = skb ? NETLINK_CB(skb).pid : 0; |
971 | int ret = 0, ovr = 0; | 975 | int ret = 0, ovr = 0; |
972 | 976 | ||
973 | if (!net_eq(net, &init_net)) | ||
974 | return -EINVAL; | ||
975 | |||
976 | ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL); | 977 | ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL); |
977 | if (ret < 0) | 978 | if (ret < 0) |
978 | return ret; | 979 | return ret; |
@@ -995,15 +996,17 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
995 | if (n->nlmsg_flags&NLM_F_REPLACE) | 996 | if (n->nlmsg_flags&NLM_F_REPLACE) |
996 | ovr = 1; | 997 | ovr = 1; |
997 | replay: | 998 | replay: |
998 | ret = tcf_action_add(tca[TCA_ACT_TAB], n, pid, ovr); | 999 | ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, pid, ovr); |
999 | if (ret == -EAGAIN) | 1000 | if (ret == -EAGAIN) |
1000 | goto replay; | 1001 | goto replay; |
1001 | break; | 1002 | break; |
1002 | case RTM_DELACTION: | 1003 | case RTM_DELACTION: |
1003 | ret = tca_action_gd(tca[TCA_ACT_TAB], n, pid, RTM_DELACTION); | 1004 | ret = tca_action_gd(net, tca[TCA_ACT_TAB], n, |
1005 | pid, RTM_DELACTION); | ||
1004 | break; | 1006 | break; |
1005 | case RTM_GETACTION: | 1007 | case RTM_GETACTION: |
1006 | ret = tca_action_gd(tca[TCA_ACT_TAB], n, pid, RTM_GETACTION); | 1008 | ret = tca_action_gd(net, tca[TCA_ACT_TAB], n, |
1009 | pid, RTM_GETACTION); | ||
1007 | break; | 1010 | break; |
1008 | default: | 1011 | default: |
1009 | BUG(); | 1012 | BUG(); |
@@ -1043,7 +1046,6 @@ find_dump_kind(const struct nlmsghdr *n) | |||
1043 | static int | 1046 | static int |
1044 | tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) | 1047 | tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) |
1045 | { | 1048 | { |
1046 | struct net *net = sock_net(skb->sk); | ||
1047 | struct nlmsghdr *nlh; | 1049 | struct nlmsghdr *nlh; |
1048 | unsigned char *b = skb_tail_pointer(skb); | 1050 | unsigned char *b = skb_tail_pointer(skb); |
1049 | struct nlattr *nest; | 1051 | struct nlattr *nest; |
@@ -1053,9 +1055,6 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) | |||
1053 | struct tcamsg *t = (struct tcamsg *) NLMSG_DATA(cb->nlh); | 1055 | struct tcamsg *t = (struct tcamsg *) NLMSG_DATA(cb->nlh); |
1054 | struct nlattr *kind = find_dump_kind(cb->nlh); | 1056 | struct nlattr *kind = find_dump_kind(cb->nlh); |
1055 | 1057 | ||
1056 | if (!net_eq(net, &init_net)) | ||
1057 | return 0; | ||
1058 | |||
1059 | if (kind == NULL) { | 1058 | if (kind == NULL) { |
1060 | printk("tc_dump_action: action bad kind\n"); | 1059 | printk("tc_dump_action: action bad kind\n"); |
1061 | return 0; | 1060 | return 0; |
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index f082b27ff46d..5fd0c28ef79a 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c | |||
@@ -99,8 +99,9 @@ out: | |||
99 | } | 99 | } |
100 | EXPORT_SYMBOL(unregister_tcf_proto_ops); | 100 | EXPORT_SYMBOL(unregister_tcf_proto_ops); |
101 | 101 | ||
102 | static int tfilter_notify(struct sk_buff *oskb, struct nlmsghdr *n, | 102 | static int tfilter_notify(struct net *net, struct sk_buff *oskb, |
103 | struct tcf_proto *tp, unsigned long fh, int event); | 103 | struct nlmsghdr *n, struct tcf_proto *tp, |
104 | unsigned long fh, int event); | ||
104 | 105 | ||
105 | 106 | ||
106 | /* Select new prio value from the range, managed by kernel. */ | 107 | /* Select new prio value from the range, managed by kernel. */ |
@@ -138,9 +139,6 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
138 | int err; | 139 | int err; |
139 | int tp_created = 0; | 140 | int tp_created = 0; |
140 | 141 | ||
141 | if (!net_eq(net, &init_net)) | ||
142 | return -EINVAL; | ||
143 | |||
144 | replay: | 142 | replay: |
145 | t = NLMSG_DATA(n); | 143 | t = NLMSG_DATA(n); |
146 | protocol = TC_H_MIN(t->tcm_info); | 144 | protocol = TC_H_MIN(t->tcm_info); |
@@ -159,7 +157,7 @@ replay: | |||
159 | /* Find head of filter chain. */ | 157 | /* Find head of filter chain. */ |
160 | 158 | ||
161 | /* Find link */ | 159 | /* Find link */ |
162 | dev = __dev_get_by_index(&init_net, t->tcm_ifindex); | 160 | dev = __dev_get_by_index(net, t->tcm_ifindex); |
163 | if (dev == NULL) | 161 | if (dev == NULL) |
164 | return -ENODEV; | 162 | return -ENODEV; |
165 | 163 | ||
@@ -283,7 +281,7 @@ replay: | |||
283 | *back = tp->next; | 281 | *back = tp->next; |
284 | spin_unlock_bh(root_lock); | 282 | spin_unlock_bh(root_lock); |
285 | 283 | ||
286 | tfilter_notify(skb, n, tp, fh, RTM_DELTFILTER); | 284 | tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER); |
287 | tcf_destroy(tp); | 285 | tcf_destroy(tp); |
288 | err = 0; | 286 | err = 0; |
289 | goto errout; | 287 | goto errout; |
@@ -306,10 +304,10 @@ replay: | |||
306 | case RTM_DELTFILTER: | 304 | case RTM_DELTFILTER: |
307 | err = tp->ops->delete(tp, fh); | 305 | err = tp->ops->delete(tp, fh); |
308 | if (err == 0) | 306 | if (err == 0) |
309 | tfilter_notify(skb, n, tp, fh, RTM_DELTFILTER); | 307 | tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER); |
310 | goto errout; | 308 | goto errout; |
311 | case RTM_GETTFILTER: | 309 | case RTM_GETTFILTER: |
312 | err = tfilter_notify(skb, n, tp, fh, RTM_NEWTFILTER); | 310 | err = tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER); |
313 | goto errout; | 311 | goto errout; |
314 | default: | 312 | default: |
315 | err = -EINVAL; | 313 | err = -EINVAL; |
@@ -325,7 +323,7 @@ replay: | |||
325 | *back = tp; | 323 | *back = tp; |
326 | spin_unlock_bh(root_lock); | 324 | spin_unlock_bh(root_lock); |
327 | } | 325 | } |
328 | tfilter_notify(skb, n, tp, fh, RTM_NEWTFILTER); | 326 | tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER); |
329 | } else { | 327 | } else { |
330 | if (tp_created) | 328 | if (tp_created) |
331 | tcf_destroy(tp); | 329 | tcf_destroy(tp); |
@@ -371,8 +369,9 @@ nla_put_failure: | |||
371 | return -1; | 369 | return -1; |
372 | } | 370 | } |
373 | 371 | ||
374 | static int tfilter_notify(struct sk_buff *oskb, struct nlmsghdr *n, | 372 | static int tfilter_notify(struct net *net, struct sk_buff *oskb, |
375 | struct tcf_proto *tp, unsigned long fh, int event) | 373 | struct nlmsghdr *n, struct tcf_proto *tp, |
374 | unsigned long fh, int event) | ||
376 | { | 375 | { |
377 | struct sk_buff *skb; | 376 | struct sk_buff *skb; |
378 | u32 pid = oskb ? NETLINK_CB(oskb).pid : 0; | 377 | u32 pid = oskb ? NETLINK_CB(oskb).pid : 0; |
@@ -386,7 +385,7 @@ static int tfilter_notify(struct sk_buff *oskb, struct nlmsghdr *n, | |||
386 | return -EINVAL; | 385 | return -EINVAL; |
387 | } | 386 | } |
388 | 387 | ||
389 | return rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, | 388 | return rtnetlink_send(skb, net, pid, RTNLGRP_TC, |
390 | n->nlmsg_flags & NLM_F_ECHO); | 389 | n->nlmsg_flags & NLM_F_ECHO); |
391 | } | 390 | } |
392 | 391 | ||
@@ -419,12 +418,9 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) | |||
419 | const struct Qdisc_class_ops *cops; | 418 | const struct Qdisc_class_ops *cops; |
420 | struct tcf_dump_args arg; | 419 | struct tcf_dump_args arg; |
421 | 420 | ||
422 | if (!net_eq(net, &init_net)) | ||
423 | return 0; | ||
424 | |||
425 | if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm))) | 421 | if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm))) |
426 | return skb->len; | 422 | return skb->len; |
427 | if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) | 423 | if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL) |
428 | return skb->len; | 424 | return skb->len; |
429 | 425 | ||
430 | if (!tcm->tcm_parent) | 426 | if (!tcm->tcm_parent) |
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 17c5dfc67320..593eac056e8d 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c | |||
@@ -773,10 +773,10 @@ static int __init init_u32(void) | |||
773 | printk(" Performance counters on\n"); | 773 | printk(" Performance counters on\n"); |
774 | #endif | 774 | #endif |
775 | #ifdef CONFIG_NET_CLS_IND | 775 | #ifdef CONFIG_NET_CLS_IND |
776 | printk(" input device check on \n"); | 776 | printk(" input device check on\n"); |
777 | #endif | 777 | #endif |
778 | #ifdef CONFIG_NET_CLS_ACT | 778 | #ifdef CONFIG_NET_CLS_ACT |
779 | printk(" Actions configured \n"); | 779 | printk(" Actions configured\n"); |
780 | #endif | 780 | #endif |
781 | return register_tcf_proto_ops(&cls_u32_ops); | 781 | return register_tcf_proto_ops(&cls_u32_ops); |
782 | } | 782 | } |
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 145268ca57cf..9839b26674f4 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -35,10 +35,12 @@ | |||
35 | #include <net/netlink.h> | 35 | #include <net/netlink.h> |
36 | #include <net/pkt_sched.h> | 36 | #include <net/pkt_sched.h> |
37 | 37 | ||
38 | static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n, u32 clid, | 38 | static int qdisc_notify(struct net *net, struct sk_buff *oskb, |
39 | struct nlmsghdr *n, u32 clid, | ||
39 | struct Qdisc *old, struct Qdisc *new); | 40 | struct Qdisc *old, struct Qdisc *new); |
40 | static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n, | 41 | static int tclass_notify(struct net *net, struct sk_buff *oskb, |
41 | struct Qdisc *q, unsigned long cl, int event); | 42 | struct nlmsghdr *n, struct Qdisc *q, |
43 | unsigned long cl, int event); | ||
42 | 44 | ||
43 | /* | 45 | /* |
44 | 46 | ||
@@ -639,11 +641,12 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n) | |||
639 | } | 641 | } |
640 | EXPORT_SYMBOL(qdisc_tree_decrease_qlen); | 642 | EXPORT_SYMBOL(qdisc_tree_decrease_qlen); |
641 | 643 | ||
642 | static void notify_and_destroy(struct sk_buff *skb, struct nlmsghdr *n, u32 clid, | 644 | static void notify_and_destroy(struct net *net, struct sk_buff *skb, |
645 | struct nlmsghdr *n, u32 clid, | ||
643 | struct Qdisc *old, struct Qdisc *new) | 646 | struct Qdisc *old, struct Qdisc *new) |
644 | { | 647 | { |
645 | if (new || old) | 648 | if (new || old) |
646 | qdisc_notify(skb, n, clid, old, new); | 649 | qdisc_notify(net, skb, n, clid, old, new); |
647 | 650 | ||
648 | if (old) | 651 | if (old) |
649 | qdisc_destroy(old); | 652 | qdisc_destroy(old); |
@@ -663,6 +666,7 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, | |||
663 | struct Qdisc *new, struct Qdisc *old) | 666 | struct Qdisc *new, struct Qdisc *old) |
664 | { | 667 | { |
665 | struct Qdisc *q = old; | 668 | struct Qdisc *q = old; |
669 | struct net *net = dev_net(dev); | ||
666 | int err = 0; | 670 | int err = 0; |
667 | 671 | ||
668 | if (parent == NULL) { | 672 | if (parent == NULL) { |
@@ -699,12 +703,13 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, | |||
699 | } | 703 | } |
700 | 704 | ||
701 | if (!ingress) { | 705 | if (!ingress) { |
702 | notify_and_destroy(skb, n, classid, dev->qdisc, new); | 706 | notify_and_destroy(net, skb, n, classid, |
707 | dev->qdisc, new); | ||
703 | if (new && !new->ops->attach) | 708 | if (new && !new->ops->attach) |
704 | atomic_inc(&new->refcnt); | 709 | atomic_inc(&new->refcnt); |
705 | dev->qdisc = new ? : &noop_qdisc; | 710 | dev->qdisc = new ? : &noop_qdisc; |
706 | } else { | 711 | } else { |
707 | notify_and_destroy(skb, n, classid, old, new); | 712 | notify_and_destroy(net, skb, n, classid, old, new); |
708 | } | 713 | } |
709 | 714 | ||
710 | if (dev->flags & IFF_UP) | 715 | if (dev->flags & IFF_UP) |
@@ -722,7 +727,7 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, | |||
722 | err = -ENOENT; | 727 | err = -ENOENT; |
723 | } | 728 | } |
724 | if (!err) | 729 | if (!err) |
725 | notify_and_destroy(skb, n, classid, old, new); | 730 | notify_and_destroy(net, skb, n, classid, old, new); |
726 | } | 731 | } |
727 | return err; | 732 | return err; |
728 | } | 733 | } |
@@ -948,10 +953,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
948 | struct Qdisc *p = NULL; | 953 | struct Qdisc *p = NULL; |
949 | int err; | 954 | int err; |
950 | 955 | ||
951 | if (!net_eq(net, &init_net)) | 956 | if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL) |
952 | return -EINVAL; | ||
953 | |||
954 | if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) | ||
955 | return -ENODEV; | 957 | return -ENODEV; |
956 | 958 | ||
957 | err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); | 959 | err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); |
@@ -991,7 +993,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
991 | if ((err = qdisc_graft(dev, p, skb, n, clid, NULL, q)) != 0) | 993 | if ((err = qdisc_graft(dev, p, skb, n, clid, NULL, q)) != 0) |
992 | return err; | 994 | return err; |
993 | } else { | 995 | } else { |
994 | qdisc_notify(skb, n, clid, NULL, q); | 996 | qdisc_notify(net, skb, n, clid, NULL, q); |
995 | } | 997 | } |
996 | return 0; | 998 | return 0; |
997 | } | 999 | } |
@@ -1010,16 +1012,13 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
1010 | struct Qdisc *q, *p; | 1012 | struct Qdisc *q, *p; |
1011 | int err; | 1013 | int err; |
1012 | 1014 | ||
1013 | if (!net_eq(net, &init_net)) | ||
1014 | return -EINVAL; | ||
1015 | |||
1016 | replay: | 1015 | replay: |
1017 | /* Reinit, just in case something touches this. */ | 1016 | /* Reinit, just in case something touches this. */ |
1018 | tcm = NLMSG_DATA(n); | 1017 | tcm = NLMSG_DATA(n); |
1019 | clid = tcm->tcm_parent; | 1018 | clid = tcm->tcm_parent; |
1020 | q = p = NULL; | 1019 | q = p = NULL; |
1021 | 1020 | ||
1022 | if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) | 1021 | if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL) |
1023 | return -ENODEV; | 1022 | return -ENODEV; |
1024 | 1023 | ||
1025 | err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); | 1024 | err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); |
@@ -1106,7 +1105,7 @@ replay: | |||
1106 | return -EINVAL; | 1105 | return -EINVAL; |
1107 | err = qdisc_change(q, tca); | 1106 | err = qdisc_change(q, tca); |
1108 | if (err == 0) | 1107 | if (err == 0) |
1109 | qdisc_notify(skb, n, clid, NULL, q); | 1108 | qdisc_notify(net, skb, n, clid, NULL, q); |
1110 | return err; | 1109 | return err; |
1111 | 1110 | ||
1112 | create_n_graft: | 1111 | create_n_graft: |
@@ -1196,8 +1195,9 @@ nla_put_failure: | |||
1196 | return -1; | 1195 | return -1; |
1197 | } | 1196 | } |
1198 | 1197 | ||
1199 | static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n, | 1198 | static int qdisc_notify(struct net *net, struct sk_buff *oskb, |
1200 | u32 clid, struct Qdisc *old, struct Qdisc *new) | 1199 | struct nlmsghdr *n, u32 clid, |
1200 | struct Qdisc *old, struct Qdisc *new) | ||
1201 | { | 1201 | { |
1202 | struct sk_buff *skb; | 1202 | struct sk_buff *skb; |
1203 | u32 pid = oskb ? NETLINK_CB(oskb).pid : 0; | 1203 | u32 pid = oskb ? NETLINK_CB(oskb).pid : 0; |
@@ -1216,7 +1216,7 @@ static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n, | |||
1216 | } | 1216 | } |
1217 | 1217 | ||
1218 | if (skb->len) | 1218 | if (skb->len) |
1219 | return rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO); | 1219 | return rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO); |
1220 | 1220 | ||
1221 | err_out: | 1221 | err_out: |
1222 | kfree_skb(skb); | 1222 | kfree_skb(skb); |
@@ -1275,15 +1275,12 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb) | |||
1275 | int s_idx, s_q_idx; | 1275 | int s_idx, s_q_idx; |
1276 | struct net_device *dev; | 1276 | struct net_device *dev; |
1277 | 1277 | ||
1278 | if (!net_eq(net, &init_net)) | ||
1279 | return 0; | ||
1280 | |||
1281 | s_idx = cb->args[0]; | 1278 | s_idx = cb->args[0]; |
1282 | s_q_idx = q_idx = cb->args[1]; | 1279 | s_q_idx = q_idx = cb->args[1]; |
1283 | 1280 | ||
1284 | rcu_read_lock(); | 1281 | rcu_read_lock(); |
1285 | idx = 0; | 1282 | idx = 0; |
1286 | for_each_netdev_rcu(&init_net, dev) { | 1283 | for_each_netdev_rcu(net, dev) { |
1287 | struct netdev_queue *dev_queue; | 1284 | struct netdev_queue *dev_queue; |
1288 | 1285 | ||
1289 | if (idx < s_idx) | 1286 | if (idx < s_idx) |
@@ -1335,10 +1332,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
1335 | u32 qid = TC_H_MAJ(clid); | 1332 | u32 qid = TC_H_MAJ(clid); |
1336 | int err; | 1333 | int err; |
1337 | 1334 | ||
1338 | if (!net_eq(net, &init_net)) | 1335 | if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL) |
1339 | return -EINVAL; | ||
1340 | |||
1341 | if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) | ||
1342 | return -ENODEV; | 1336 | return -ENODEV; |
1343 | 1337 | ||
1344 | err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); | 1338 | err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); |
@@ -1419,10 +1413,10 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
1419 | if (cops->delete) | 1413 | if (cops->delete) |
1420 | err = cops->delete(q, cl); | 1414 | err = cops->delete(q, cl); |
1421 | if (err == 0) | 1415 | if (err == 0) |
1422 | tclass_notify(skb, n, q, cl, RTM_DELTCLASS); | 1416 | tclass_notify(net, skb, n, q, cl, RTM_DELTCLASS); |
1423 | goto out; | 1417 | goto out; |
1424 | case RTM_GETTCLASS: | 1418 | case RTM_GETTCLASS: |
1425 | err = tclass_notify(skb, n, q, cl, RTM_NEWTCLASS); | 1419 | err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS); |
1426 | goto out; | 1420 | goto out; |
1427 | default: | 1421 | default: |
1428 | err = -EINVAL; | 1422 | err = -EINVAL; |
@@ -1435,7 +1429,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
1435 | if (cops->change) | 1429 | if (cops->change) |
1436 | err = cops->change(q, clid, pid, tca, &new_cl); | 1430 | err = cops->change(q, clid, pid, tca, &new_cl); |
1437 | if (err == 0) | 1431 | if (err == 0) |
1438 | tclass_notify(skb, n, q, new_cl, RTM_NEWTCLASS); | 1432 | tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS); |
1439 | 1433 | ||
1440 | out: | 1434 | out: |
1441 | if (cl) | 1435 | if (cl) |
@@ -1487,8 +1481,9 @@ nla_put_failure: | |||
1487 | return -1; | 1481 | return -1; |
1488 | } | 1482 | } |
1489 | 1483 | ||
1490 | static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n, | 1484 | static int tclass_notify(struct net *net, struct sk_buff *oskb, |
1491 | struct Qdisc *q, unsigned long cl, int event) | 1485 | struct nlmsghdr *n, struct Qdisc *q, |
1486 | unsigned long cl, int event) | ||
1492 | { | 1487 | { |
1493 | struct sk_buff *skb; | 1488 | struct sk_buff *skb; |
1494 | u32 pid = oskb ? NETLINK_CB(oskb).pid : 0; | 1489 | u32 pid = oskb ? NETLINK_CB(oskb).pid : 0; |
@@ -1502,7 +1497,7 @@ static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n, | |||
1502 | return -EINVAL; | 1497 | return -EINVAL; |
1503 | } | 1498 | } |
1504 | 1499 | ||
1505 | return rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO); | 1500 | return rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO); |
1506 | } | 1501 | } |
1507 | 1502 | ||
1508 | struct qdisc_dump_args | 1503 | struct qdisc_dump_args |
@@ -1577,12 +1572,9 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) | |||
1577 | struct net_device *dev; | 1572 | struct net_device *dev; |
1578 | int t, s_t; | 1573 | int t, s_t; |
1579 | 1574 | ||
1580 | if (!net_eq(net, &init_net)) | ||
1581 | return 0; | ||
1582 | |||
1583 | if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm))) | 1575 | if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm))) |
1584 | return 0; | 1576 | return 0; |
1585 | if ((dev = dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) | 1577 | if ((dev = dev_get_by_index(net, tcm->tcm_ifindex)) == NULL) |
1586 | return 0; | 1578 | return 0; |
1587 | 1579 | ||
1588 | s_t = cb->args[0]; | 1580 | s_t = cb->args[0]; |
@@ -1692,7 +1684,7 @@ static int psched_show(struct seq_file *seq, void *v) | |||
1692 | 1684 | ||
1693 | static int psched_open(struct inode *inode, struct file *file) | 1685 | static int psched_open(struct inode *inode, struct file *file) |
1694 | { | 1686 | { |
1695 | return single_open(file, psched_show, PDE(inode)->data); | 1687 | return single_open(file, psched_show, NULL); |
1696 | } | 1688 | } |
1697 | 1689 | ||
1698 | static const struct file_operations psched_fops = { | 1690 | static const struct file_operations psched_fops = { |
@@ -1702,15 +1694,53 @@ static const struct file_operations psched_fops = { | |||
1702 | .llseek = seq_lseek, | 1694 | .llseek = seq_lseek, |
1703 | .release = single_release, | 1695 | .release = single_release, |
1704 | }; | 1696 | }; |
1697 | |||
1698 | static int __net_init psched_net_init(struct net *net) | ||
1699 | { | ||
1700 | struct proc_dir_entry *e; | ||
1701 | |||
1702 | e = proc_net_fops_create(net, "psched", 0, &psched_fops); | ||
1703 | if (e == NULL) | ||
1704 | return -ENOMEM; | ||
1705 | |||
1706 | return 0; | ||
1707 | } | ||
1708 | |||
1709 | static void __net_exit psched_net_exit(struct net *net) | ||
1710 | { | ||
1711 | proc_net_remove(net, "psched"); | ||
1712 | } | ||
1713 | #else | ||
1714 | static int __net_init psched_net_init(struct net *net) | ||
1715 | { | ||
1716 | return 0; | ||
1717 | } | ||
1718 | |||
1719 | static void __net_exit psched_net_exit(struct net *net) | ||
1720 | { | ||
1721 | } | ||
1705 | #endif | 1722 | #endif |
1706 | 1723 | ||
1724 | static struct pernet_operations psched_net_ops = { | ||
1725 | .init = psched_net_init, | ||
1726 | .exit = psched_net_exit, | ||
1727 | }; | ||
1728 | |||
1707 | static int __init pktsched_init(void) | 1729 | static int __init pktsched_init(void) |
1708 | { | 1730 | { |
1731 | int err; | ||
1732 | |||
1733 | err = register_pernet_subsys(&psched_net_ops); | ||
1734 | if (err) { | ||
1735 | printk(KERN_ERR "pktsched_init: " | ||
1736 | "cannot initialize per netns operations\n"); | ||
1737 | return err; | ||
1738 | } | ||
1739 | |||
1709 | register_qdisc(&pfifo_qdisc_ops); | 1740 | register_qdisc(&pfifo_qdisc_ops); |
1710 | register_qdisc(&bfifo_qdisc_ops); | 1741 | register_qdisc(&bfifo_qdisc_ops); |
1711 | register_qdisc(&pfifo_head_drop_qdisc_ops); | 1742 | register_qdisc(&pfifo_head_drop_qdisc_ops); |
1712 | register_qdisc(&mq_qdisc_ops); | 1743 | register_qdisc(&mq_qdisc_ops); |
1713 | proc_net_fops_create(&init_net, "psched", 0, &psched_fops); | ||
1714 | 1744 | ||
1715 | rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL); | 1745 | rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL); |
1716 | rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL); | 1746 | rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL); |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index ff4dd53eeff0..aeddabfb8e4e 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -529,7 +529,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, | |||
529 | unsigned int size; | 529 | unsigned int size; |
530 | int err = -ENOBUFS; | 530 | int err = -ENOBUFS; |
531 | 531 | ||
532 | /* ensure that the Qdisc and the private data are 32-byte aligned */ | 532 | /* ensure that the Qdisc and the private data are 64-byte aligned */ |
533 | size = QDISC_ALIGN(sizeof(*sch)); | 533 | size = QDISC_ALIGN(sizeof(*sch)); |
534 | size += ops->priv_size + (QDISC_ALIGNTO - 1); | 534 | size += ops->priv_size + (QDISC_ALIGNTO - 1); |
535 | 535 | ||
@@ -591,6 +591,13 @@ void qdisc_reset(struct Qdisc *qdisc) | |||
591 | } | 591 | } |
592 | EXPORT_SYMBOL(qdisc_reset); | 592 | EXPORT_SYMBOL(qdisc_reset); |
593 | 593 | ||
594 | static void qdisc_rcu_free(struct rcu_head *head) | ||
595 | { | ||
596 | struct Qdisc *qdisc = container_of(head, struct Qdisc, rcu_head); | ||
597 | |||
598 | kfree((char *) qdisc - qdisc->padded); | ||
599 | } | ||
600 | |||
594 | void qdisc_destroy(struct Qdisc *qdisc) | 601 | void qdisc_destroy(struct Qdisc *qdisc) |
595 | { | 602 | { |
596 | const struct Qdisc_ops *ops = qdisc->ops; | 603 | const struct Qdisc_ops *ops = qdisc->ops; |
@@ -614,7 +621,11 @@ void qdisc_destroy(struct Qdisc *qdisc) | |||
614 | dev_put(qdisc_dev(qdisc)); | 621 | dev_put(qdisc_dev(qdisc)); |
615 | 622 | ||
616 | kfree_skb(qdisc->gso_skb); | 623 | kfree_skb(qdisc->gso_skb); |
617 | kfree((char *) qdisc - qdisc->padded); | 624 | /* |
625 | * gen_estimator est_timer() might access qdisc->q.lock, | ||
626 | * wait a RCU grace period before freeing qdisc. | ||
627 | */ | ||
628 | call_rcu(&qdisc->rcu_head, qdisc_rcu_free); | ||
618 | } | 629 | } |
619 | EXPORT_SYMBOL(qdisc_destroy); | 630 | EXPORT_SYMBOL(qdisc_destroy); |
620 | 631 | ||
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index c5a9ac566007..c65762823f5e 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c | |||
@@ -123,8 +123,8 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb) | |||
123 | case htons(ETH_P_IP): | 123 | case htons(ETH_P_IP): |
124 | { | 124 | { |
125 | const struct iphdr *iph = ip_hdr(skb); | 125 | const struct iphdr *iph = ip_hdr(skb); |
126 | h = iph->daddr; | 126 | h = (__force u32)iph->daddr; |
127 | h2 = iph->saddr ^ iph->protocol; | 127 | h2 = (__force u32)iph->saddr ^ iph->protocol; |
128 | if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && | 128 | if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && |
129 | (iph->protocol == IPPROTO_TCP || | 129 | (iph->protocol == IPPROTO_TCP || |
130 | iph->protocol == IPPROTO_UDP || | 130 | iph->protocol == IPPROTO_UDP || |
@@ -138,8 +138,8 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb) | |||
138 | case htons(ETH_P_IPV6): | 138 | case htons(ETH_P_IPV6): |
139 | { | 139 | { |
140 | struct ipv6hdr *iph = ipv6_hdr(skb); | 140 | struct ipv6hdr *iph = ipv6_hdr(skb); |
141 | h = iph->daddr.s6_addr32[3]; | 141 | h = (__force u32)iph->daddr.s6_addr32[3]; |
142 | h2 = iph->saddr.s6_addr32[3] ^ iph->nexthdr; | 142 | h2 = (__force u32)iph->saddr.s6_addr32[3] ^ iph->nexthdr; |
143 | if (iph->nexthdr == IPPROTO_TCP || | 143 | if (iph->nexthdr == IPPROTO_TCP || |
144 | iph->nexthdr == IPPROTO_UDP || | 144 | iph->nexthdr == IPPROTO_UDP || |
145 | iph->nexthdr == IPPROTO_UDPLITE || | 145 | iph->nexthdr == IPPROTO_UDPLITE || |
@@ -150,7 +150,7 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb) | |||
150 | break; | 150 | break; |
151 | } | 151 | } |
152 | default: | 152 | default: |
153 | h = (unsigned long)skb_dst(skb) ^ skb->protocol; | 153 | h = (unsigned long)skb_dst(skb) ^ (__force u32)skb->protocol; |
154 | h2 = (unsigned long)skb->sk; | 154 | h2 = (unsigned long)skb->sk; |
155 | } | 155 | } |
156 | 156 | ||
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 9fb5d37c37ad..732689140fb8 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c | |||
@@ -232,7 +232,7 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport) | |||
232 | if (!(transport->param_flags & SPP_PMTUD_ENABLE)) | 232 | if (!(transport->param_flags & SPP_PMTUD_ENABLE)) |
233 | skb->local_df = 1; | 233 | skb->local_df = 1; |
234 | 234 | ||
235 | return ip6_xmit(sk, skb, &fl, np->opt, 0); | 235 | return ip6_xmit(sk, skb, &fl, np->opt); |
236 | } | 236 | } |
237 | 237 | ||
238 | /* Returns the dst cache entry for the given source and destination ip | 238 | /* Returns the dst cache entry for the given source and destination ip |
@@ -277,20 +277,7 @@ static struct dst_entry *sctp_v6_get_dst(struct sctp_association *asoc, | |||
277 | static inline int sctp_v6_addr_match_len(union sctp_addr *s1, | 277 | static inline int sctp_v6_addr_match_len(union sctp_addr *s1, |
278 | union sctp_addr *s2) | 278 | union sctp_addr *s2) |
279 | { | 279 | { |
280 | struct in6_addr *a1 = &s1->v6.sin6_addr; | 280 | return ipv6_addr_diff(&s1->v6.sin6_addr, &s2->v6.sin6_addr); |
281 | struct in6_addr *a2 = &s2->v6.sin6_addr; | ||
282 | int i, j; | ||
283 | |||
284 | for (i = 0; i < 4 ; i++) { | ||
285 | __be32 a1xora2; | ||
286 | |||
287 | a1xora2 = a1->s6_addr32[i] ^ a2->s6_addr32[i]; | ||
288 | |||
289 | if ((j = fls(ntohl(a1xora2)))) | ||
290 | return (i * 32 + 32 - j); | ||
291 | } | ||
292 | |||
293 | return (i*32); | ||
294 | } | 281 | } |
295 | 282 | ||
296 | /* Fills in the source address(saddr) based on the destination address(daddr) | 283 | /* Fills in the source address(saddr) based on the destination address(daddr) |
@@ -372,13 +359,13 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist, | |||
372 | } | 359 | } |
373 | 360 | ||
374 | read_lock_bh(&in6_dev->lock); | 361 | read_lock_bh(&in6_dev->lock); |
375 | for (ifp = in6_dev->addr_list; ifp; ifp = ifp->if_next) { | 362 | list_for_each_entry(ifp, &in6_dev->addr_list, if_list) { |
376 | /* Add the address to the local list. */ | 363 | /* Add the address to the local list. */ |
377 | addr = t_new(struct sctp_sockaddr_entry, GFP_ATOMIC); | 364 | addr = t_new(struct sctp_sockaddr_entry, GFP_ATOMIC); |
378 | if (addr) { | 365 | if (addr) { |
379 | addr->a.v6.sin6_family = AF_INET6; | 366 | addr->a.v6.sin6_family = AF_INET6; |
380 | addr->a.v6.sin6_port = 0; | 367 | addr->a.v6.sin6_port = 0; |
381 | addr->a.v6.sin6_addr = ifp->addr; | 368 | ipv6_addr_copy(&addr->a.v6.sin6_addr, &ifp->addr); |
382 | addr->a.v6.sin6_scope_id = dev->ifindex; | 369 | addr->a.v6.sin6_scope_id = dev->ifindex; |
383 | addr->valid = 1; | 370 | addr->valid = 1; |
384 | INIT_LIST_HEAD(&addr->list); | 371 | INIT_LIST_HEAD(&addr->list); |
@@ -419,7 +406,7 @@ static void sctp_v6_from_sk(union sctp_addr *addr, struct sock *sk) | |||
419 | { | 406 | { |
420 | addr->v6.sin6_family = AF_INET6; | 407 | addr->v6.sin6_family = AF_INET6; |
421 | addr->v6.sin6_port = 0; | 408 | addr->v6.sin6_port = 0; |
422 | addr->v6.sin6_addr = inet6_sk(sk)->rcv_saddr; | 409 | ipv6_addr_copy(&addr->v6.sin6_addr, &inet6_sk(sk)->rcv_saddr); |
423 | } | 410 | } |
424 | 411 | ||
425 | /* Initialize sk->sk_rcv_saddr from sctp_addr. */ | 412 | /* Initialize sk->sk_rcv_saddr from sctp_addr. */ |
@@ -432,7 +419,7 @@ static void sctp_v6_to_sk_saddr(union sctp_addr *addr, struct sock *sk) | |||
432 | inet6_sk(sk)->rcv_saddr.s6_addr32[3] = | 419 | inet6_sk(sk)->rcv_saddr.s6_addr32[3] = |
433 | addr->v4.sin_addr.s_addr; | 420 | addr->v4.sin_addr.s_addr; |
434 | } else { | 421 | } else { |
435 | inet6_sk(sk)->rcv_saddr = addr->v6.sin6_addr; | 422 | ipv6_addr_copy(&inet6_sk(sk)->rcv_saddr, &addr->v6.sin6_addr); |
436 | } | 423 | } |
437 | } | 424 | } |
438 | 425 | ||
@@ -445,7 +432,7 @@ static void sctp_v6_to_sk_daddr(union sctp_addr *addr, struct sock *sk) | |||
445 | inet6_sk(sk)->daddr.s6_addr32[2] = htonl(0x0000ffff); | 432 | inet6_sk(sk)->daddr.s6_addr32[2] = htonl(0x0000ffff); |
446 | inet6_sk(sk)->daddr.s6_addr32[3] = addr->v4.sin_addr.s_addr; | 433 | inet6_sk(sk)->daddr.s6_addr32[3] = addr->v4.sin_addr.s_addr; |
447 | } else { | 434 | } else { |
448 | inet6_sk(sk)->daddr = addr->v6.sin6_addr; | 435 | ipv6_addr_copy(&inet6_sk(sk)->daddr, &addr->v6.sin6_addr); |
449 | } | 436 | } |
450 | } | 437 | } |
451 | 438 | ||
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index a56f98e82f92..704298f4b284 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c | |||
@@ -854,7 +854,7 @@ static inline int sctp_v4_xmit(struct sk_buff *skb, | |||
854 | IP_PMTUDISC_DO : IP_PMTUDISC_DONT; | 854 | IP_PMTUDISC_DO : IP_PMTUDISC_DONT; |
855 | 855 | ||
856 | SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS); | 856 | SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS); |
857 | return ip_queue_xmit(skb, 0); | 857 | return ip_queue_xmit(skb); |
858 | } | 858 | } |
859 | 859 | ||
860 | static struct sctp_af sctp_af_inet; | 860 | static struct sctp_af sctp_af_inet; |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 007e8baba089..f34adcca8a8c 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -5482,7 +5482,6 @@ pp_found: | |||
5482 | */ | 5482 | */ |
5483 | int reuse = sk->sk_reuse; | 5483 | int reuse = sk->sk_reuse; |
5484 | struct sock *sk2; | 5484 | struct sock *sk2; |
5485 | struct hlist_node *node; | ||
5486 | 5485 | ||
5487 | SCTP_DEBUG_PRINTK("sctp_get_port() found a possible match\n"); | 5486 | SCTP_DEBUG_PRINTK("sctp_get_port() found a possible match\n"); |
5488 | if (pp->fastreuse && sk->sk_reuse && | 5487 | if (pp->fastreuse && sk->sk_reuse && |
@@ -5703,7 +5702,7 @@ unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait) | |||
5703 | struct sctp_sock *sp = sctp_sk(sk); | 5702 | struct sctp_sock *sp = sctp_sk(sk); |
5704 | unsigned int mask; | 5703 | unsigned int mask; |
5705 | 5704 | ||
5706 | poll_wait(file, sk->sk_sleep, wait); | 5705 | poll_wait(file, sk_sleep(sk), wait); |
5707 | 5706 | ||
5708 | /* A TCP-style listening socket becomes readable when the accept queue | 5707 | /* A TCP-style listening socket becomes readable when the accept queue |
5709 | * is not empty. | 5708 | * is not empty. |
@@ -5944,7 +5943,7 @@ static int sctp_wait_for_packet(struct sock * sk, int *err, long *timeo_p) | |||
5944 | int error; | 5943 | int error; |
5945 | DEFINE_WAIT(wait); | 5944 | DEFINE_WAIT(wait); |
5946 | 5945 | ||
5947 | prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 5946 | prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
5948 | 5947 | ||
5949 | /* Socket errors? */ | 5948 | /* Socket errors? */ |
5950 | error = sock_error(sk); | 5949 | error = sock_error(sk); |
@@ -5981,14 +5980,14 @@ static int sctp_wait_for_packet(struct sock * sk, int *err, long *timeo_p) | |||
5981 | sctp_lock_sock(sk); | 5980 | sctp_lock_sock(sk); |
5982 | 5981 | ||
5983 | ready: | 5982 | ready: |
5984 | finish_wait(sk->sk_sleep, &wait); | 5983 | finish_wait(sk_sleep(sk), &wait); |
5985 | return 0; | 5984 | return 0; |
5986 | 5985 | ||
5987 | interrupted: | 5986 | interrupted: |
5988 | error = sock_intr_errno(*timeo_p); | 5987 | error = sock_intr_errno(*timeo_p); |
5989 | 5988 | ||
5990 | out: | 5989 | out: |
5991 | finish_wait(sk->sk_sleep, &wait); | 5990 | finish_wait(sk_sleep(sk), &wait); |
5992 | *err = error; | 5991 | *err = error; |
5993 | return error; | 5992 | return error; |
5994 | } | 5993 | } |
@@ -6062,8 +6061,8 @@ static void __sctp_write_space(struct sctp_association *asoc) | |||
6062 | wake_up_interruptible(&asoc->wait); | 6061 | wake_up_interruptible(&asoc->wait); |
6063 | 6062 | ||
6064 | if (sctp_writeable(sk)) { | 6063 | if (sctp_writeable(sk)) { |
6065 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) | 6064 | if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk))) |
6066 | wake_up_interruptible(sk->sk_sleep); | 6065 | wake_up_interruptible(sk_sleep(sk)); |
6067 | 6066 | ||
6068 | /* Note that we try to include the Async I/O support | 6067 | /* Note that we try to include the Async I/O support |
6069 | * here by modeling from the current TCP/UDP code. | 6068 | * here by modeling from the current TCP/UDP code. |
@@ -6297,7 +6296,7 @@ static int sctp_wait_for_accept(struct sock *sk, long timeo) | |||
6297 | 6296 | ||
6298 | 6297 | ||
6299 | for (;;) { | 6298 | for (;;) { |
6300 | prepare_to_wait_exclusive(sk->sk_sleep, &wait, | 6299 | prepare_to_wait_exclusive(sk_sleep(sk), &wait, |
6301 | TASK_INTERRUPTIBLE); | 6300 | TASK_INTERRUPTIBLE); |
6302 | 6301 | ||
6303 | if (list_empty(&ep->asocs)) { | 6302 | if (list_empty(&ep->asocs)) { |
@@ -6323,7 +6322,7 @@ static int sctp_wait_for_accept(struct sock *sk, long timeo) | |||
6323 | break; | 6322 | break; |
6324 | } | 6323 | } |
6325 | 6324 | ||
6326 | finish_wait(sk->sk_sleep, &wait); | 6325 | finish_wait(sk_sleep(sk), &wait); |
6327 | 6326 | ||
6328 | return err; | 6327 | return err; |
6329 | } | 6328 | } |
@@ -6333,7 +6332,7 @@ static void sctp_wait_for_close(struct sock *sk, long timeout) | |||
6333 | DEFINE_WAIT(wait); | 6332 | DEFINE_WAIT(wait); |
6334 | 6333 | ||
6335 | do { | 6334 | do { |
6336 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 6335 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
6337 | if (list_empty(&sctp_sk(sk)->ep->asocs)) | 6336 | if (list_empty(&sctp_sk(sk)->ep->asocs)) |
6338 | break; | 6337 | break; |
6339 | sctp_release_sock(sk); | 6338 | sctp_release_sock(sk); |
@@ -6341,7 +6340,7 @@ static void sctp_wait_for_close(struct sock *sk, long timeout) | |||
6341 | sctp_lock_sock(sk); | 6340 | sctp_lock_sock(sk); |
6342 | } while (!signal_pending(current) && timeout); | 6341 | } while (!signal_pending(current) && timeout); |
6343 | 6342 | ||
6344 | finish_wait(sk->sk_sleep, &wait); | 6343 | finish_wait(sk_sleep(sk), &wait); |
6345 | } | 6344 | } |
6346 | 6345 | ||
6347 | static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk) | 6346 | static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk) |
diff --git a/net/socket.c b/net/socket.c index 5e8d0af3c0e7..9822081eab38 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -620,10 +620,9 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, | |||
620 | put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP, | 620 | put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP, |
621 | sizeof(tv), &tv); | 621 | sizeof(tv), &tv); |
622 | } else { | 622 | } else { |
623 | struct timespec ts; | 623 | skb_get_timestampns(skb, &ts[0]); |
624 | skb_get_timestampns(skb, &ts); | ||
625 | put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPNS, | 624 | put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPNS, |
626 | sizeof(ts), &ts); | 625 | sizeof(ts[0]), &ts[0]); |
627 | } | 626 | } |
628 | } | 627 | } |
629 | 628 | ||
@@ -1068,78 +1067,27 @@ static int sock_close(struct inode *inode, struct file *filp) | |||
1068 | * 1. fasync_list is modified only under process context socket lock | 1067 | * 1. fasync_list is modified only under process context socket lock |
1069 | * i.e. under semaphore. | 1068 | * i.e. under semaphore. |
1070 | * 2. fasync_list is used under read_lock(&sk->sk_callback_lock) | 1069 | * 2. fasync_list is used under read_lock(&sk->sk_callback_lock) |
1071 | * or under socket lock. | 1070 | * or under socket lock |
1072 | * 3. fasync_list can be used from softirq context, so that | ||
1073 | * modification under socket lock have to be enhanced with | ||
1074 | * write_lock_bh(&sk->sk_callback_lock). | ||
1075 | * --ANK (990710) | ||
1076 | */ | 1071 | */ |
1077 | 1072 | ||
1078 | static int sock_fasync(int fd, struct file *filp, int on) | 1073 | static int sock_fasync(int fd, struct file *filp, int on) |
1079 | { | 1074 | { |
1080 | struct fasync_struct *fa, *fna = NULL, **prev; | 1075 | struct socket *sock = filp->private_data; |
1081 | struct socket *sock; | 1076 | struct sock *sk = sock->sk; |
1082 | struct sock *sk; | ||
1083 | |||
1084 | if (on) { | ||
1085 | fna = kmalloc(sizeof(struct fasync_struct), GFP_KERNEL); | ||
1086 | if (fna == NULL) | ||
1087 | return -ENOMEM; | ||
1088 | } | ||
1089 | |||
1090 | sock = filp->private_data; | ||
1091 | 1077 | ||
1092 | sk = sock->sk; | 1078 | if (sk == NULL) |
1093 | if (sk == NULL) { | ||
1094 | kfree(fna); | ||
1095 | return -EINVAL; | 1079 | return -EINVAL; |
1096 | } | ||
1097 | 1080 | ||
1098 | lock_sock(sk); | 1081 | lock_sock(sk); |
1099 | 1082 | ||
1100 | spin_lock(&filp->f_lock); | 1083 | fasync_helper(fd, filp, on, &sock->fasync_list); |
1101 | if (on) | ||
1102 | filp->f_flags |= FASYNC; | ||
1103 | else | ||
1104 | filp->f_flags &= ~FASYNC; | ||
1105 | spin_unlock(&filp->f_lock); | ||
1106 | |||
1107 | prev = &(sock->fasync_list); | ||
1108 | 1084 | ||
1109 | for (fa = *prev; fa != NULL; prev = &fa->fa_next, fa = *prev) | 1085 | if (!sock->fasync_list) |
1110 | if (fa->fa_file == filp) | 1086 | sock_reset_flag(sk, SOCK_FASYNC); |
1111 | break; | 1087 | else |
1112 | |||
1113 | if (on) { | ||
1114 | if (fa != NULL) { | ||
1115 | write_lock_bh(&sk->sk_callback_lock); | ||
1116 | fa->fa_fd = fd; | ||
1117 | write_unlock_bh(&sk->sk_callback_lock); | ||
1118 | |||
1119 | kfree(fna); | ||
1120 | goto out; | ||
1121 | } | ||
1122 | fna->fa_file = filp; | ||
1123 | fna->fa_fd = fd; | ||
1124 | fna->magic = FASYNC_MAGIC; | ||
1125 | fna->fa_next = sock->fasync_list; | ||
1126 | write_lock_bh(&sk->sk_callback_lock); | ||
1127 | sock->fasync_list = fna; | ||
1128 | sock_set_flag(sk, SOCK_FASYNC); | 1088 | sock_set_flag(sk, SOCK_FASYNC); |
1129 | write_unlock_bh(&sk->sk_callback_lock); | ||
1130 | } else { | ||
1131 | if (fa != NULL) { | ||
1132 | write_lock_bh(&sk->sk_callback_lock); | ||
1133 | *prev = fa->fa_next; | ||
1134 | if (!sock->fasync_list) | ||
1135 | sock_reset_flag(sk, SOCK_FASYNC); | ||
1136 | write_unlock_bh(&sk->sk_callback_lock); | ||
1137 | kfree(fa); | ||
1138 | } | ||
1139 | } | ||
1140 | 1089 | ||
1141 | out: | 1090 | release_sock(sk); |
1142 | release_sock(sock->sk); | ||
1143 | return 0; | 1091 | return 0; |
1144 | } | 1092 | } |
1145 | 1093 | ||
@@ -1160,10 +1108,10 @@ int sock_wake_async(struct socket *sock, int how, int band) | |||
1160 | /* fall through */ | 1108 | /* fall through */ |
1161 | case SOCK_WAKE_IO: | 1109 | case SOCK_WAKE_IO: |
1162 | call_kill: | 1110 | call_kill: |
1163 | __kill_fasync(sock->fasync_list, SIGIO, band); | 1111 | kill_fasync(&sock->fasync_list, SIGIO, band); |
1164 | break; | 1112 | break; |
1165 | case SOCK_WAKE_URG: | 1113 | case SOCK_WAKE_URG: |
1166 | __kill_fasync(sock->fasync_list, SIGURG, band); | 1114 | kill_fasync(&sock->fasync_list, SIGURG, band); |
1167 | } | 1115 | } |
1168 | return 0; | 1116 | return 0; |
1169 | } | 1117 | } |
diff --git a/net/sunrpc/auth_gss/gss_spkm3_token.c b/net/sunrpc/auth_gss/gss_spkm3_token.c index 3308157436d2..a99825d7caa0 100644 --- a/net/sunrpc/auth_gss/gss_spkm3_token.c +++ b/net/sunrpc/auth_gss/gss_spkm3_token.c | |||
@@ -223,7 +223,7 @@ spkm3_verify_mic_token(unsigned char **tokp, int *mic_hdrlen, unsigned char **ck | |||
223 | 223 | ||
224 | /* only support SPKM_MIC_TOK */ | 224 | /* only support SPKM_MIC_TOK */ |
225 | if((ptr[6] != 0x01) || (ptr[7] != 0x01)) { | 225 | if((ptr[6] != 0x01) || (ptr[7] != 0x01)) { |
226 | dprintk("RPC: ERROR unsupported SPKM3 token \n"); | 226 | dprintk("RPC: ERROR unsupported SPKM3 token\n"); |
227 | goto out; | 227 | goto out; |
228 | } | 228 | } |
229 | 229 | ||
diff --git a/net/sunrpc/bc_svc.c b/net/sunrpc/bc_svc.c index f0c05d3311c1..7dcfe0cc3500 100644 --- a/net/sunrpc/bc_svc.c +++ b/net/sunrpc/bc_svc.c | |||
@@ -60,7 +60,7 @@ int bc_send(struct rpc_rqst *req) | |||
60 | rpc_put_task(task); | 60 | rpc_put_task(task); |
61 | } | 61 | } |
62 | return ret; | 62 | return ret; |
63 | dprintk("RPC: bc_send ret= %d \n", ret); | 63 | dprintk("RPC: bc_send ret= %d\n", ret); |
64 | } | 64 | } |
65 | 65 | ||
66 | #endif /* CONFIG_NFS_V4_1 */ | 66 | #endif /* CONFIG_NFS_V4_1 */ |
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index a29f259204e6..ce0d5b35c2ac 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c | |||
@@ -419,8 +419,8 @@ static void svc_udp_data_ready(struct sock *sk, int count) | |||
419 | set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); | 419 | set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); |
420 | svc_xprt_enqueue(&svsk->sk_xprt); | 420 | svc_xprt_enqueue(&svsk->sk_xprt); |
421 | } | 421 | } |
422 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) | 422 | if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk))) |
423 | wake_up_interruptible(sk->sk_sleep); | 423 | wake_up_interruptible(sk_sleep(sk)); |
424 | } | 424 | } |
425 | 425 | ||
426 | /* | 426 | /* |
@@ -436,10 +436,10 @@ static void svc_write_space(struct sock *sk) | |||
436 | svc_xprt_enqueue(&svsk->sk_xprt); | 436 | svc_xprt_enqueue(&svsk->sk_xprt); |
437 | } | 437 | } |
438 | 438 | ||
439 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) { | 439 | if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk))) { |
440 | dprintk("RPC svc_write_space: someone sleeping on %p\n", | 440 | dprintk("RPC svc_write_space: someone sleeping on %p\n", |
441 | svsk); | 441 | svsk); |
442 | wake_up_interruptible(sk->sk_sleep); | 442 | wake_up_interruptible(sk_sleep(sk)); |
443 | } | 443 | } |
444 | } | 444 | } |
445 | 445 | ||
@@ -757,8 +757,8 @@ static void svc_tcp_listen_data_ready(struct sock *sk, int count_unused) | |||
757 | printk("svc: socket %p: no user data\n", sk); | 757 | printk("svc: socket %p: no user data\n", sk); |
758 | } | 758 | } |
759 | 759 | ||
760 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) | 760 | if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk))) |
761 | wake_up_interruptible_all(sk->sk_sleep); | 761 | wake_up_interruptible_all(sk_sleep(sk)); |
762 | } | 762 | } |
763 | 763 | ||
764 | /* | 764 | /* |
@@ -777,8 +777,8 @@ static void svc_tcp_state_change(struct sock *sk) | |||
777 | set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); | 777 | set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); |
778 | svc_xprt_enqueue(&svsk->sk_xprt); | 778 | svc_xprt_enqueue(&svsk->sk_xprt); |
779 | } | 779 | } |
780 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) | 780 | if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk))) |
781 | wake_up_interruptible_all(sk->sk_sleep); | 781 | wake_up_interruptible_all(sk_sleep(sk)); |
782 | } | 782 | } |
783 | 783 | ||
784 | static void svc_tcp_data_ready(struct sock *sk, int count) | 784 | static void svc_tcp_data_ready(struct sock *sk, int count) |
@@ -791,8 +791,8 @@ static void svc_tcp_data_ready(struct sock *sk, int count) | |||
791 | set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); | 791 | set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); |
792 | svc_xprt_enqueue(&svsk->sk_xprt); | 792 | svc_xprt_enqueue(&svsk->sk_xprt); |
793 | } | 793 | } |
794 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) | 794 | if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk))) |
795 | wake_up_interruptible(sk->sk_sleep); | 795 | wake_up_interruptible(sk_sleep(sk)); |
796 | } | 796 | } |
797 | 797 | ||
798 | /* | 798 | /* |
@@ -1494,8 +1494,8 @@ static void svc_sock_detach(struct svc_xprt *xprt) | |||
1494 | sk->sk_data_ready = svsk->sk_odata; | 1494 | sk->sk_data_ready = svsk->sk_odata; |
1495 | sk->sk_write_space = svsk->sk_owspace; | 1495 | sk->sk_write_space = svsk->sk_owspace; |
1496 | 1496 | ||
1497 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) | 1497 | if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk))) |
1498 | wake_up_interruptible(sk->sk_sleep); | 1498 | wake_up_interruptible(sk_sleep(sk)); |
1499 | } | 1499 | } |
1500 | 1500 | ||
1501 | /* | 1501 | /* |
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 42f09ade0044..699ade68aac1 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
@@ -974,7 +974,7 @@ void xprt_reserve(struct rpc_task *task) | |||
974 | 974 | ||
975 | static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt) | 975 | static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt) |
976 | { | 976 | { |
977 | return xprt->xid++; | 977 | return (__force __be32)xprt->xid++; |
978 | } | 978 | } |
979 | 979 | ||
980 | static inline void xprt_init_xid(struct rpc_xprt *xprt) | 980 | static inline void xprt_init_xid(struct rpc_xprt *xprt) |
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index a3bfd4064912..90a051912c03 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c | |||
@@ -558,10 +558,7 @@ static int tipc_bcbearer_send(struct sk_buff *buf, | |||
558 | struct tipc_bearer *unused1, | 558 | struct tipc_bearer *unused1, |
559 | struct tipc_media_addr *unused2) | 559 | struct tipc_media_addr *unused2) |
560 | { | 560 | { |
561 | static int send_count = 0; | ||
562 | |||
563 | int bp_index; | 561 | int bp_index; |
564 | int swap_time; | ||
565 | 562 | ||
566 | /* Prepare buffer for broadcasting (if first time trying to send it) */ | 563 | /* Prepare buffer for broadcasting (if first time trying to send it) */ |
567 | 564 | ||
@@ -575,11 +572,6 @@ static int tipc_bcbearer_send(struct sk_buff *buf, | |||
575 | msg_set_mc_netid(msg, tipc_net_id); | 572 | msg_set_mc_netid(msg, tipc_net_id); |
576 | } | 573 | } |
577 | 574 | ||
578 | /* Determine if bearer pairs should be swapped following this attempt */ | ||
579 | |||
580 | if ((swap_time = (++send_count >= 10))) | ||
581 | send_count = 0; | ||
582 | |||
583 | /* Send buffer over bearers until all targets reached */ | 575 | /* Send buffer over bearers until all targets reached */ |
584 | 576 | ||
585 | bcbearer->remains = tipc_cltr_bcast_nodes; | 577 | bcbearer->remains = tipc_cltr_bcast_nodes; |
@@ -595,21 +587,22 @@ static int tipc_bcbearer_send(struct sk_buff *buf, | |||
595 | if (bcbearer->remains_new.count == bcbearer->remains.count) | 587 | if (bcbearer->remains_new.count == bcbearer->remains.count) |
596 | continue; /* bearer pair doesn't add anything */ | 588 | continue; /* bearer pair doesn't add anything */ |
597 | 589 | ||
598 | if (!p->publ.blocked && | 590 | if (p->publ.blocked || |
599 | !p->media->send_msg(buf, &p->publ, &p->media->bcast_addr)) { | 591 | p->media->send_msg(buf, &p->publ, &p->media->bcast_addr)) { |
600 | if (swap_time && s && !s->publ.blocked) | 592 | /* unable to send on primary bearer */ |
601 | goto swap; | 593 | if (!s || s->publ.blocked || |
602 | else | 594 | s->media->send_msg(buf, &s->publ, |
603 | goto update; | 595 | &s->media->bcast_addr)) { |
596 | /* unable to send on either bearer */ | ||
597 | continue; | ||
598 | } | ||
599 | } | ||
600 | |||
601 | if (s) { | ||
602 | bcbearer->bpairs[bp_index].primary = s; | ||
603 | bcbearer->bpairs[bp_index].secondary = p; | ||
604 | } | 604 | } |
605 | 605 | ||
606 | if (!s || s->publ.blocked || | ||
607 | s->media->send_msg(buf, &s->publ, &s->media->bcast_addr)) | ||
608 | continue; /* unable to send using bearer pair */ | ||
609 | swap: | ||
610 | bcbearer->bpairs[bp_index].primary = s; | ||
611 | bcbearer->bpairs[bp_index].secondary = p; | ||
612 | update: | ||
613 | if (bcbearer->remains_new.count == 0) | 606 | if (bcbearer->remains_new.count == 0) |
614 | return 0; | 607 | return 0; |
615 | 608 | ||
diff --git a/net/tipc/core.c b/net/tipc/core.c index 52c571fedbe0..4e84c8431f32 100644 --- a/net/tipc/core.c +++ b/net/tipc/core.c | |||
@@ -49,7 +49,7 @@ | |||
49 | #include "config.h" | 49 | #include "config.h" |
50 | 50 | ||
51 | 51 | ||
52 | #define TIPC_MOD_VER "1.6.4" | 52 | #define TIPC_MOD_VER "2.0.0" |
53 | 53 | ||
54 | #ifndef CONFIG_TIPC_ZONES | 54 | #ifndef CONFIG_TIPC_ZONES |
55 | #define CONFIG_TIPC_ZONES 3 | 55 | #define CONFIG_TIPC_ZONES 3 |
diff --git a/net/tipc/link.c b/net/tipc/link.c index 1a7e4665af80..c76e82e5f982 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c | |||
@@ -877,7 +877,7 @@ static void link_state_event(struct link *l_ptr, unsigned event) | |||
877 | case TIMEOUT_EVT: | 877 | case TIMEOUT_EVT: |
878 | dbg_link("TIM "); | 878 | dbg_link("TIM "); |
879 | if (l_ptr->next_in_no != l_ptr->checkpoint) { | 879 | if (l_ptr->next_in_no != l_ptr->checkpoint) { |
880 | dbg_link("-> WW \n"); | 880 | dbg_link("-> WW\n"); |
881 | l_ptr->state = WORKING_WORKING; | 881 | l_ptr->state = WORKING_WORKING; |
882 | l_ptr->fsm_msg_cnt = 0; | 882 | l_ptr->fsm_msg_cnt = 0; |
883 | l_ptr->checkpoint = l_ptr->next_in_no; | 883 | l_ptr->checkpoint = l_ptr->next_in_no; |
@@ -934,7 +934,7 @@ static void link_state_event(struct link *l_ptr, unsigned event) | |||
934 | link_set_timer(l_ptr, cont_intv); | 934 | link_set_timer(l_ptr, cont_intv); |
935 | break; | 935 | break; |
936 | case RESET_MSG: | 936 | case RESET_MSG: |
937 | dbg_link("RES \n"); | 937 | dbg_link("RES\n"); |
938 | dbg_link(" -> RR\n"); | 938 | dbg_link(" -> RR\n"); |
939 | l_ptr->state = RESET_RESET; | 939 | l_ptr->state = RESET_RESET; |
940 | l_ptr->fsm_msg_cnt = 0; | 940 | l_ptr->fsm_msg_cnt = 0; |
@@ -947,7 +947,7 @@ static void link_state_event(struct link *l_ptr, unsigned event) | |||
947 | l_ptr->started = 1; | 947 | l_ptr->started = 1; |
948 | /* fall through */ | 948 | /* fall through */ |
949 | case TIMEOUT_EVT: | 949 | case TIMEOUT_EVT: |
950 | dbg_link("TIM \n"); | 950 | dbg_link("TIM\n"); |
951 | tipc_link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0); | 951 | tipc_link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0); |
952 | l_ptr->fsm_msg_cnt++; | 952 | l_ptr->fsm_msg_cnt++; |
953 | link_set_timer(l_ptr, cont_intv); | 953 | link_set_timer(l_ptr, cont_intv); |
@@ -1553,7 +1553,7 @@ u32 tipc_link_push_packet(struct link *l_ptr) | |||
1553 | 1553 | ||
1554 | /* Continue retransmission now, if there is anything: */ | 1554 | /* Continue retransmission now, if there is anything: */ |
1555 | 1555 | ||
1556 | if (r_q_size && buf && !skb_cloned(buf)) { | 1556 | if (r_q_size && buf) { |
1557 | msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); | 1557 | msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); |
1558 | msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); | 1558 | msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); |
1559 | if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { | 1559 | if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { |
@@ -1722,15 +1722,16 @@ void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf, | |||
1722 | dbg("Retransmitting %u in link %x\n", retransmits, l_ptr); | 1722 | dbg("Retransmitting %u in link %x\n", retransmits, l_ptr); |
1723 | 1723 | ||
1724 | if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) { | 1724 | if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) { |
1725 | if (!skb_cloned(buf)) { | 1725 | if (l_ptr->retransm_queue_size == 0) { |
1726 | msg_dbg(msg, ">NO_RETR->BCONG>"); | 1726 | msg_dbg(msg, ">NO_RETR->BCONG>"); |
1727 | dbg_print_link(l_ptr, " "); | 1727 | dbg_print_link(l_ptr, " "); |
1728 | l_ptr->retransm_queue_head = msg_seqno(msg); | 1728 | l_ptr->retransm_queue_head = msg_seqno(msg); |
1729 | l_ptr->retransm_queue_size = retransmits; | 1729 | l_ptr->retransm_queue_size = retransmits; |
1730 | return; | ||
1731 | } else { | 1730 | } else { |
1732 | /* Don't retransmit if driver already has the buffer */ | 1731 | err("Unexpected retransmit on link %s (qsize=%d)\n", |
1732 | l_ptr->name, l_ptr->retransm_queue_size); | ||
1733 | } | 1733 | } |
1734 | return; | ||
1734 | } else { | 1735 | } else { |
1735 | /* Detect repeated retransmit failures on uncongested bearer */ | 1736 | /* Detect repeated retransmit failures on uncongested bearer */ |
1736 | 1737 | ||
@@ -1745,7 +1746,7 @@ void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf, | |||
1745 | } | 1746 | } |
1746 | } | 1747 | } |
1747 | 1748 | ||
1748 | while (retransmits && (buf != l_ptr->next_out) && buf && !skb_cloned(buf)) { | 1749 | while (retransmits && (buf != l_ptr->next_out) && buf) { |
1749 | msg = buf_msg(buf); | 1750 | msg = buf_msg(buf); |
1750 | msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); | 1751 | msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); |
1751 | msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); | 1752 | msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); |
@@ -3294,7 +3295,7 @@ static void link_dump_rec_queue(struct link *l_ptr) | |||
3294 | info("buffer %x invalid\n", crs); | 3295 | info("buffer %x invalid\n", crs); |
3295 | return; | 3296 | return; |
3296 | } | 3297 | } |
3297 | msg_dbg(buf_msg(crs), "In rec queue: \n"); | 3298 | msg_dbg(buf_msg(crs), "In rec queue:\n"); |
3298 | crs = crs->next; | 3299 | crs = crs->next; |
3299 | } | 3300 | } |
3300 | } | 3301 | } |
diff --git a/net/tipc/net.c b/net/tipc/net.c index f25b1cdb64eb..d7cd1e064a80 100644 --- a/net/tipc/net.c +++ b/net/tipc/net.c | |||
@@ -116,7 +116,7 @@ | |||
116 | */ | 116 | */ |
117 | 117 | ||
118 | DEFINE_RWLOCK(tipc_net_lock); | 118 | DEFINE_RWLOCK(tipc_net_lock); |
119 | struct _zone *tipc_zones[256] = { NULL, }; | 119 | static struct _zone *tipc_zones[256] = { NULL, }; |
120 | struct network tipc_net = { tipc_zones }; | 120 | struct network tipc_net = { tipc_zones }; |
121 | 121 | ||
122 | struct tipc_node *tipc_net_select_remote_node(u32 addr, u32 ref) | 122 | struct tipc_node *tipc_net_select_remote_node(u32 addr, u32 ref) |
@@ -291,6 +291,6 @@ void tipc_net_stop(void) | |||
291 | tipc_bclink_stop(); | 291 | tipc_bclink_stop(); |
292 | net_stop(); | 292 | net_stop(); |
293 | write_unlock_bh(&tipc_net_lock); | 293 | write_unlock_bh(&tipc_net_lock); |
294 | info("Left network mode \n"); | 294 | info("Left network mode\n"); |
295 | } | 295 | } |
296 | 296 | ||
diff --git a/net/tipc/node.c b/net/tipc/node.c index 2c24e7d6d950..17cc394f424f 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c | |||
@@ -278,7 +278,7 @@ struct tipc_node *tipc_node_attach_link(struct link *l_ptr) | |||
278 | n_ptr->link_cnt++; | 278 | n_ptr->link_cnt++; |
279 | return n_ptr; | 279 | return n_ptr; |
280 | } | 280 | } |
281 | err("Attempt to establish second link on <%s> to %s \n", | 281 | err("Attempt to establish second link on <%s> to %s\n", |
282 | l_ptr->b_ptr->publ.name, | 282 | l_ptr->b_ptr->publ.name, |
283 | addr_string_fill(addr_string, l_ptr->addr)); | 283 | addr_string_fill(addr_string, l_ptr->addr)); |
284 | } | 284 | } |
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index cfb20b80b3a1..66e889ba48fd 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
@@ -446,7 +446,7 @@ static unsigned int poll(struct file *file, struct socket *sock, | |||
446 | struct sock *sk = sock->sk; | 446 | struct sock *sk = sock->sk; |
447 | u32 mask; | 447 | u32 mask; |
448 | 448 | ||
449 | poll_wait(file, sk->sk_sleep, wait); | 449 | poll_wait(file, sk_sleep(sk), wait); |
450 | 450 | ||
451 | if (!skb_queue_empty(&sk->sk_receive_queue) || | 451 | if (!skb_queue_empty(&sk->sk_receive_queue) || |
452 | (sock->state == SS_UNCONNECTED) || | 452 | (sock->state == SS_UNCONNECTED) || |
@@ -591,7 +591,7 @@ static int send_msg(struct kiocb *iocb, struct socket *sock, | |||
591 | break; | 591 | break; |
592 | } | 592 | } |
593 | release_sock(sk); | 593 | release_sock(sk); |
594 | res = wait_event_interruptible(*sk->sk_sleep, | 594 | res = wait_event_interruptible(*sk_sleep(sk), |
595 | !tport->congested); | 595 | !tport->congested); |
596 | lock_sock(sk); | 596 | lock_sock(sk); |
597 | if (res) | 597 | if (res) |
@@ -650,7 +650,7 @@ static int send_packet(struct kiocb *iocb, struct socket *sock, | |||
650 | break; | 650 | break; |
651 | } | 651 | } |
652 | release_sock(sk); | 652 | release_sock(sk); |
653 | res = wait_event_interruptible(*sk->sk_sleep, | 653 | res = wait_event_interruptible(*sk_sleep(sk), |
654 | (!tport->congested || !tport->connected)); | 654 | (!tport->congested || !tport->connected)); |
655 | lock_sock(sk); | 655 | lock_sock(sk); |
656 | if (res) | 656 | if (res) |
@@ -931,7 +931,7 @@ restart: | |||
931 | goto exit; | 931 | goto exit; |
932 | } | 932 | } |
933 | release_sock(sk); | 933 | release_sock(sk); |
934 | res = wait_event_interruptible(*sk->sk_sleep, | 934 | res = wait_event_interruptible(*sk_sleep(sk), |
935 | (!skb_queue_empty(&sk->sk_receive_queue) || | 935 | (!skb_queue_empty(&sk->sk_receive_queue) || |
936 | (sock->state == SS_DISCONNECTING))); | 936 | (sock->state == SS_DISCONNECTING))); |
937 | lock_sock(sk); | 937 | lock_sock(sk); |
@@ -1064,7 +1064,7 @@ restart: | |||
1064 | goto exit; | 1064 | goto exit; |
1065 | } | 1065 | } |
1066 | release_sock(sk); | 1066 | release_sock(sk); |
1067 | res = wait_event_interruptible(*sk->sk_sleep, | 1067 | res = wait_event_interruptible(*sk_sleep(sk), |
1068 | (!skb_queue_empty(&sk->sk_receive_queue) || | 1068 | (!skb_queue_empty(&sk->sk_receive_queue) || |
1069 | (sock->state == SS_DISCONNECTING))); | 1069 | (sock->state == SS_DISCONNECTING))); |
1070 | lock_sock(sk); | 1070 | lock_sock(sk); |
@@ -1271,8 +1271,8 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf) | |||
1271 | tipc_disconnect_port(tipc_sk_port(sk)); | 1271 | tipc_disconnect_port(tipc_sk_port(sk)); |
1272 | } | 1272 | } |
1273 | 1273 | ||
1274 | if (waitqueue_active(sk->sk_sleep)) | 1274 | if (waitqueue_active(sk_sleep(sk))) |
1275 | wake_up_interruptible(sk->sk_sleep); | 1275 | wake_up_interruptible(sk_sleep(sk)); |
1276 | return TIPC_OK; | 1276 | return TIPC_OK; |
1277 | } | 1277 | } |
1278 | 1278 | ||
@@ -1343,8 +1343,8 @@ static void wakeupdispatch(struct tipc_port *tport) | |||
1343 | { | 1343 | { |
1344 | struct sock *sk = (struct sock *)tport->usr_handle; | 1344 | struct sock *sk = (struct sock *)tport->usr_handle; |
1345 | 1345 | ||
1346 | if (waitqueue_active(sk->sk_sleep)) | 1346 | if (waitqueue_active(sk_sleep(sk))) |
1347 | wake_up_interruptible(sk->sk_sleep); | 1347 | wake_up_interruptible(sk_sleep(sk)); |
1348 | } | 1348 | } |
1349 | 1349 | ||
1350 | /** | 1350 | /** |
@@ -1426,7 +1426,7 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen, | |||
1426 | /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */ | 1426 | /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */ |
1427 | 1427 | ||
1428 | release_sock(sk); | 1428 | release_sock(sk); |
1429 | res = wait_event_interruptible_timeout(*sk->sk_sleep, | 1429 | res = wait_event_interruptible_timeout(*sk_sleep(sk), |
1430 | (!skb_queue_empty(&sk->sk_receive_queue) || | 1430 | (!skb_queue_empty(&sk->sk_receive_queue) || |
1431 | (sock->state != SS_CONNECTING)), | 1431 | (sock->state != SS_CONNECTING)), |
1432 | sk->sk_rcvtimeo); | 1432 | sk->sk_rcvtimeo); |
@@ -1521,7 +1521,7 @@ static int accept(struct socket *sock, struct socket *new_sock, int flags) | |||
1521 | goto exit; | 1521 | goto exit; |
1522 | } | 1522 | } |
1523 | release_sock(sk); | 1523 | release_sock(sk); |
1524 | res = wait_event_interruptible(*sk->sk_sleep, | 1524 | res = wait_event_interruptible(*sk_sleep(sk), |
1525 | (!skb_queue_empty(&sk->sk_receive_queue))); | 1525 | (!skb_queue_empty(&sk->sk_receive_queue))); |
1526 | lock_sock(sk); | 1526 | lock_sock(sk); |
1527 | if (res) | 1527 | if (res) |
@@ -1632,8 +1632,8 @@ restart: | |||
1632 | /* Discard any unreceived messages; wake up sleeping tasks */ | 1632 | /* Discard any unreceived messages; wake up sleeping tasks */ |
1633 | 1633 | ||
1634 | discard_rx_queue(sk); | 1634 | discard_rx_queue(sk); |
1635 | if (waitqueue_active(sk->sk_sleep)) | 1635 | if (waitqueue_active(sk_sleep(sk))) |
1636 | wake_up_interruptible(sk->sk_sleep); | 1636 | wake_up_interruptible(sk_sleep(sk)); |
1637 | res = 0; | 1637 | res = 0; |
1638 | break; | 1638 | break; |
1639 | 1639 | ||
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c index ff123e56114a..ab6eab4c45e2 100644 --- a/net/tipc/subscr.c +++ b/net/tipc/subscr.c | |||
@@ -274,7 +274,7 @@ static void subscr_cancel(struct tipc_subscr *s, | |||
274 | { | 274 | { |
275 | struct subscription *sub; | 275 | struct subscription *sub; |
276 | struct subscription *sub_temp; | 276 | struct subscription *sub_temp; |
277 | __u32 type, lower, upper; | 277 | __u32 type, lower, upper, timeout, filter; |
278 | int found = 0; | 278 | int found = 0; |
279 | 279 | ||
280 | /* Find first matching subscription, exit if not found */ | 280 | /* Find first matching subscription, exit if not found */ |
@@ -282,12 +282,18 @@ static void subscr_cancel(struct tipc_subscr *s, | |||
282 | type = ntohl(s->seq.type); | 282 | type = ntohl(s->seq.type); |
283 | lower = ntohl(s->seq.lower); | 283 | lower = ntohl(s->seq.lower); |
284 | upper = ntohl(s->seq.upper); | 284 | upper = ntohl(s->seq.upper); |
285 | timeout = ntohl(s->timeout); | ||
286 | filter = ntohl(s->filter) & ~TIPC_SUB_CANCEL; | ||
285 | 287 | ||
286 | list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list, | 288 | list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list, |
287 | subscription_list) { | 289 | subscription_list) { |
288 | if ((type == sub->seq.type) && | 290 | if ((type == sub->seq.type) && |
289 | (lower == sub->seq.lower) && | 291 | (lower == sub->seq.lower) && |
290 | (upper == sub->seq.upper)) { | 292 | (upper == sub->seq.upper) && |
293 | (timeout == sub->timeout) && | ||
294 | (filter == sub->filter) && | ||
295 | !memcmp(s->usr_handle,sub->evt.s.usr_handle, | ||
296 | sizeof(s->usr_handle)) ){ | ||
291 | found = 1; | 297 | found = 1; |
292 | break; | 298 | break; |
293 | } | 299 | } |
@@ -304,7 +310,7 @@ static void subscr_cancel(struct tipc_subscr *s, | |||
304 | k_term_timer(&sub->timer); | 310 | k_term_timer(&sub->timer); |
305 | spin_lock_bh(subscriber->lock); | 311 | spin_lock_bh(subscriber->lock); |
306 | } | 312 | } |
307 | dbg("Cancel: removing sub %u,%u,%u from subscriber %x list\n", | 313 | dbg("Cancel: removing sub %u,%u,%u from subscriber %p list\n", |
308 | sub->seq.type, sub->seq.lower, sub->seq.upper, subscriber); | 314 | sub->seq.type, sub->seq.lower, sub->seq.upper, subscriber); |
309 | subscr_del(sub); | 315 | subscr_del(sub); |
310 | } | 316 | } |
@@ -352,8 +358,7 @@ static struct subscription *subscr_subscribe(struct tipc_subscr *s, | |||
352 | sub->seq.upper = ntohl(s->seq.upper); | 358 | sub->seq.upper = ntohl(s->seq.upper); |
353 | sub->timeout = ntohl(s->timeout); | 359 | sub->timeout = ntohl(s->timeout); |
354 | sub->filter = ntohl(s->filter); | 360 | sub->filter = ntohl(s->filter); |
355 | if ((!(sub->filter & TIPC_SUB_PORTS) == | 361 | if ((sub->filter && (sub->filter != TIPC_SUB_PORTS)) || |
356 | !(sub->filter & TIPC_SUB_SERVICE)) || | ||
357 | (sub->seq.lower > sub->seq.upper)) { | 362 | (sub->seq.lower > sub->seq.upper)) { |
358 | warn("Subscription rejected, illegal request\n"); | 363 | warn("Subscription rejected, illegal request\n"); |
359 | kfree(sub); | 364 | kfree(sub); |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 3d9122e78f41..87c0360eaa25 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -316,7 +316,7 @@ static void unix_write_space(struct sock *sk) | |||
316 | read_lock(&sk->sk_callback_lock); | 316 | read_lock(&sk->sk_callback_lock); |
317 | if (unix_writable(sk)) { | 317 | if (unix_writable(sk)) { |
318 | if (sk_has_sleeper(sk)) | 318 | if (sk_has_sleeper(sk)) |
319 | wake_up_interruptible_sync(sk->sk_sleep); | 319 | wake_up_interruptible_sync(sk_sleep(sk)); |
320 | sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); | 320 | sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); |
321 | } | 321 | } |
322 | read_unlock(&sk->sk_callback_lock); | 322 | read_unlock(&sk->sk_callback_lock); |
@@ -1736,7 +1736,7 @@ static long unix_stream_data_wait(struct sock *sk, long timeo) | |||
1736 | unix_state_lock(sk); | 1736 | unix_state_lock(sk); |
1737 | 1737 | ||
1738 | for (;;) { | 1738 | for (;;) { |
1739 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 1739 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
1740 | 1740 | ||
1741 | if (!skb_queue_empty(&sk->sk_receive_queue) || | 1741 | if (!skb_queue_empty(&sk->sk_receive_queue) || |
1742 | sk->sk_err || | 1742 | sk->sk_err || |
@@ -1752,7 +1752,7 @@ static long unix_stream_data_wait(struct sock *sk, long timeo) | |||
1752 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 1752 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); |
1753 | } | 1753 | } |
1754 | 1754 | ||
1755 | finish_wait(sk->sk_sleep, &wait); | 1755 | finish_wait(sk_sleep(sk), &wait); |
1756 | unix_state_unlock(sk); | 1756 | unix_state_unlock(sk); |
1757 | return timeo; | 1757 | return timeo; |
1758 | } | 1758 | } |
@@ -1991,7 +1991,7 @@ static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table | |||
1991 | struct sock *sk = sock->sk; | 1991 | struct sock *sk = sock->sk; |
1992 | unsigned int mask; | 1992 | unsigned int mask; |
1993 | 1993 | ||
1994 | sock_poll_wait(file, sk->sk_sleep, wait); | 1994 | sock_poll_wait(file, sk_sleep(sk), wait); |
1995 | mask = 0; | 1995 | mask = 0; |
1996 | 1996 | ||
1997 | /* exceptional events? */ | 1997 | /* exceptional events? */ |
@@ -2028,7 +2028,7 @@ static unsigned int unix_dgram_poll(struct file *file, struct socket *sock, | |||
2028 | struct sock *sk = sock->sk, *other; | 2028 | struct sock *sk = sock->sk, *other; |
2029 | unsigned int mask, writable; | 2029 | unsigned int mask, writable; |
2030 | 2030 | ||
2031 | sock_poll_wait(file, sk->sk_sleep, wait); | 2031 | sock_poll_wait(file, sk_sleep(sk), wait); |
2032 | mask = 0; | 2032 | mask = 0; |
2033 | 2033 | ||
2034 | /* exceptional events? */ | 2034 | /* exceptional events? */ |
diff --git a/net/wimax/op-reset.c b/net/wimax/op-reset.c index 4dc82a54ba30..68bedf3e5443 100644 --- a/net/wimax/op-reset.c +++ b/net/wimax/op-reset.c | |||
@@ -110,7 +110,6 @@ int wimax_gnl_doit_reset(struct sk_buff *skb, struct genl_info *info) | |||
110 | { | 110 | { |
111 | int result, ifindex; | 111 | int result, ifindex; |
112 | struct wimax_dev *wimax_dev; | 112 | struct wimax_dev *wimax_dev; |
113 | struct device *dev; | ||
114 | 113 | ||
115 | d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info); | 114 | d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info); |
116 | result = -ENODEV; | 115 | result = -ENODEV; |
@@ -123,7 +122,6 @@ int wimax_gnl_doit_reset(struct sk_buff *skb, struct genl_info *info) | |||
123 | wimax_dev = wimax_dev_get_by_genl_info(info, ifindex); | 122 | wimax_dev = wimax_dev_get_by_genl_info(info, ifindex); |
124 | if (wimax_dev == NULL) | 123 | if (wimax_dev == NULL) |
125 | goto error_no_wimax_dev; | 124 | goto error_no_wimax_dev; |
126 | dev = wimax_dev_to_dev(wimax_dev); | ||
127 | /* Execute the operation and send the result back to user space */ | 125 | /* Execute the operation and send the result back to user space */ |
128 | result = wimax_reset(wimax_dev); | 126 | result = wimax_reset(wimax_dev); |
129 | dev_put(wimax_dev->net_dev); | 127 | dev_put(wimax_dev->net_dev); |
diff --git a/net/wimax/op-state-get.c b/net/wimax/op-state-get.c index 11ad3356eb56..aff8776e2d41 100644 --- a/net/wimax/op-state-get.c +++ b/net/wimax/op-state-get.c | |||
@@ -53,7 +53,6 @@ int wimax_gnl_doit_state_get(struct sk_buff *skb, struct genl_info *info) | |||
53 | { | 53 | { |
54 | int result, ifindex; | 54 | int result, ifindex; |
55 | struct wimax_dev *wimax_dev; | 55 | struct wimax_dev *wimax_dev; |
56 | struct device *dev; | ||
57 | 56 | ||
58 | d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info); | 57 | d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info); |
59 | result = -ENODEV; | 58 | result = -ENODEV; |
@@ -66,7 +65,6 @@ int wimax_gnl_doit_state_get(struct sk_buff *skb, struct genl_info *info) | |||
66 | wimax_dev = wimax_dev_get_by_genl_info(info, ifindex); | 65 | wimax_dev = wimax_dev_get_by_genl_info(info, ifindex); |
67 | if (wimax_dev == NULL) | 66 | if (wimax_dev == NULL) |
68 | goto error_no_wimax_dev; | 67 | goto error_no_wimax_dev; |
69 | dev = wimax_dev_to_dev(wimax_dev); | ||
70 | /* Execute the operation and send the result back to user space */ | 68 | /* Execute the operation and send the result back to user space */ |
71 | result = wimax_state_get(wimax_dev); | 69 | result = wimax_state_get(wimax_dev); |
72 | dev_put(wimax_dev->net_dev); | 70 | dev_put(wimax_dev->net_dev); |
diff --git a/net/wireless/core.h b/net/wireless/core.h index d52da913145a..b2234b436ead 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h | |||
@@ -293,13 +293,15 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev, | |||
293 | const u8 *bssid, | 293 | const u8 *bssid, |
294 | const u8 *ssid, int ssid_len, | 294 | const u8 *ssid, int ssid_len, |
295 | const u8 *ie, int ie_len, | 295 | const u8 *ie, int ie_len, |
296 | const u8 *key, int key_len, int key_idx); | 296 | const u8 *key, int key_len, int key_idx, |
297 | bool local_state_change); | ||
297 | int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev, | 298 | int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev, |
298 | struct net_device *dev, struct ieee80211_channel *chan, | 299 | struct net_device *dev, struct ieee80211_channel *chan, |
299 | enum nl80211_auth_type auth_type, const u8 *bssid, | 300 | enum nl80211_auth_type auth_type, const u8 *bssid, |
300 | const u8 *ssid, int ssid_len, | 301 | const u8 *ssid, int ssid_len, |
301 | const u8 *ie, int ie_len, | 302 | const u8 *ie, int ie_len, |
302 | const u8 *key, int key_len, int key_idx); | 303 | const u8 *key, int key_len, int key_idx, |
304 | bool local_state_change); | ||
303 | int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, | 305 | int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, |
304 | struct net_device *dev, | 306 | struct net_device *dev, |
305 | struct ieee80211_channel *chan, | 307 | struct ieee80211_channel *chan, |
@@ -315,13 +317,16 @@ int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, | |||
315 | struct cfg80211_crypto_settings *crypt); | 317 | struct cfg80211_crypto_settings *crypt); |
316 | int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, | 318 | int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, |
317 | struct net_device *dev, const u8 *bssid, | 319 | struct net_device *dev, const u8 *bssid, |
318 | const u8 *ie, int ie_len, u16 reason); | 320 | const u8 *ie, int ie_len, u16 reason, |
321 | bool local_state_change); | ||
319 | int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, | 322 | int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, |
320 | struct net_device *dev, const u8 *bssid, | 323 | struct net_device *dev, const u8 *bssid, |
321 | const u8 *ie, int ie_len, u16 reason); | 324 | const u8 *ie, int ie_len, u16 reason, |
325 | bool local_state_change); | ||
322 | int cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev, | 326 | int cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev, |
323 | struct net_device *dev, const u8 *bssid, | 327 | struct net_device *dev, const u8 *bssid, |
324 | const u8 *ie, int ie_len, u16 reason); | 328 | const u8 *ie, int ie_len, u16 reason, |
329 | bool local_state_change); | ||
325 | void cfg80211_mlme_down(struct cfg80211_registered_device *rdev, | 330 | void cfg80211_mlme_down(struct cfg80211_registered_device *rdev, |
326 | struct net_device *dev); | 331 | struct net_device *dev); |
327 | void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, | 332 | void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, |
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index 22139fa46115..48ead6f0426d 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c | |||
@@ -378,7 +378,8 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev, | |||
378 | const u8 *bssid, | 378 | const u8 *bssid, |
379 | const u8 *ssid, int ssid_len, | 379 | const u8 *ssid, int ssid_len, |
380 | const u8 *ie, int ie_len, | 380 | const u8 *ie, int ie_len, |
381 | const u8 *key, int key_len, int key_idx) | 381 | const u8 *key, int key_len, int key_idx, |
382 | bool local_state_change) | ||
382 | { | 383 | { |
383 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 384 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
384 | struct cfg80211_auth_request req; | 385 | struct cfg80211_auth_request req; |
@@ -408,6 +409,7 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev, | |||
408 | 409 | ||
409 | memset(&req, 0, sizeof(req)); | 410 | memset(&req, 0, sizeof(req)); |
410 | 411 | ||
412 | req.local_state_change = local_state_change; | ||
411 | req.ie = ie; | 413 | req.ie = ie; |
412 | req.ie_len = ie_len; | 414 | req.ie_len = ie_len; |
413 | req.auth_type = auth_type; | 415 | req.auth_type = auth_type; |
@@ -434,12 +436,18 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev, | |||
434 | goto out; | 436 | goto out; |
435 | } | 437 | } |
436 | 438 | ||
437 | wdev->authtry_bsses[slot] = bss; | 439 | if (local_state_change) |
440 | wdev->auth_bsses[slot] = bss; | ||
441 | else | ||
442 | wdev->authtry_bsses[slot] = bss; | ||
438 | cfg80211_hold_bss(bss); | 443 | cfg80211_hold_bss(bss); |
439 | 444 | ||
440 | err = rdev->ops->auth(&rdev->wiphy, dev, &req); | 445 | err = rdev->ops->auth(&rdev->wiphy, dev, &req); |
441 | if (err) { | 446 | if (err) { |
442 | wdev->authtry_bsses[slot] = NULL; | 447 | if (local_state_change) |
448 | wdev->auth_bsses[slot] = NULL; | ||
449 | else | ||
450 | wdev->authtry_bsses[slot] = NULL; | ||
443 | cfg80211_unhold_bss(bss); | 451 | cfg80211_unhold_bss(bss); |
444 | } | 452 | } |
445 | 453 | ||
@@ -454,14 +462,15 @@ int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev, | |||
454 | enum nl80211_auth_type auth_type, const u8 *bssid, | 462 | enum nl80211_auth_type auth_type, const u8 *bssid, |
455 | const u8 *ssid, int ssid_len, | 463 | const u8 *ssid, int ssid_len, |
456 | const u8 *ie, int ie_len, | 464 | const u8 *ie, int ie_len, |
457 | const u8 *key, int key_len, int key_idx) | 465 | const u8 *key, int key_len, int key_idx, |
466 | bool local_state_change) | ||
458 | { | 467 | { |
459 | int err; | 468 | int err; |
460 | 469 | ||
461 | wdev_lock(dev->ieee80211_ptr); | 470 | wdev_lock(dev->ieee80211_ptr); |
462 | err = __cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid, | 471 | err = __cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid, |
463 | ssid, ssid_len, ie, ie_len, | 472 | ssid, ssid_len, ie, ie_len, |
464 | key, key_len, key_idx); | 473 | key, key_len, key_idx, local_state_change); |
465 | wdev_unlock(dev->ieee80211_ptr); | 474 | wdev_unlock(dev->ieee80211_ptr); |
466 | 475 | ||
467 | return err; | 476 | return err; |
@@ -555,7 +564,8 @@ int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, | |||
555 | 564 | ||
556 | int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, | 565 | int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, |
557 | struct net_device *dev, const u8 *bssid, | 566 | struct net_device *dev, const u8 *bssid, |
558 | const u8 *ie, int ie_len, u16 reason) | 567 | const u8 *ie, int ie_len, u16 reason, |
568 | bool local_state_change) | ||
559 | { | 569 | { |
560 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 570 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
561 | struct cfg80211_deauth_request req; | 571 | struct cfg80211_deauth_request req; |
@@ -565,6 +575,7 @@ int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, | |||
565 | 575 | ||
566 | memset(&req, 0, sizeof(req)); | 576 | memset(&req, 0, sizeof(req)); |
567 | req.reason_code = reason; | 577 | req.reason_code = reason; |
578 | req.local_state_change = local_state_change; | ||
568 | req.ie = ie; | 579 | req.ie = ie; |
569 | req.ie_len = ie_len; | 580 | req.ie_len = ie_len; |
570 | if (wdev->current_bss && | 581 | if (wdev->current_bss && |
@@ -591,13 +602,15 @@ int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, | |||
591 | 602 | ||
592 | int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, | 603 | int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, |
593 | struct net_device *dev, const u8 *bssid, | 604 | struct net_device *dev, const u8 *bssid, |
594 | const u8 *ie, int ie_len, u16 reason) | 605 | const u8 *ie, int ie_len, u16 reason, |
606 | bool local_state_change) | ||
595 | { | 607 | { |
596 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 608 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
597 | int err; | 609 | int err; |
598 | 610 | ||
599 | wdev_lock(wdev); | 611 | wdev_lock(wdev); |
600 | err = __cfg80211_mlme_deauth(rdev, dev, bssid, ie, ie_len, reason); | 612 | err = __cfg80211_mlme_deauth(rdev, dev, bssid, ie, ie_len, reason, |
613 | local_state_change); | ||
601 | wdev_unlock(wdev); | 614 | wdev_unlock(wdev); |
602 | 615 | ||
603 | return err; | 616 | return err; |
@@ -605,7 +618,8 @@ int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, | |||
605 | 618 | ||
606 | static int __cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev, | 619 | static int __cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev, |
607 | struct net_device *dev, const u8 *bssid, | 620 | struct net_device *dev, const u8 *bssid, |
608 | const u8 *ie, int ie_len, u16 reason) | 621 | const u8 *ie, int ie_len, u16 reason, |
622 | bool local_state_change) | ||
609 | { | 623 | { |
610 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 624 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
611 | struct cfg80211_disassoc_request req; | 625 | struct cfg80211_disassoc_request req; |
@@ -620,6 +634,7 @@ static int __cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev, | |||
620 | 634 | ||
621 | memset(&req, 0, sizeof(req)); | 635 | memset(&req, 0, sizeof(req)); |
622 | req.reason_code = reason; | 636 | req.reason_code = reason; |
637 | req.local_state_change = local_state_change; | ||
623 | req.ie = ie; | 638 | req.ie = ie; |
624 | req.ie_len = ie_len; | 639 | req.ie_len = ie_len; |
625 | if (memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) | 640 | if (memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) |
@@ -632,13 +647,15 @@ static int __cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev, | |||
632 | 647 | ||
633 | int cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev, | 648 | int cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev, |
634 | struct net_device *dev, const u8 *bssid, | 649 | struct net_device *dev, const u8 *bssid, |
635 | const u8 *ie, int ie_len, u16 reason) | 650 | const u8 *ie, int ie_len, u16 reason, |
651 | bool local_state_change) | ||
636 | { | 652 | { |
637 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 653 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
638 | int err; | 654 | int err; |
639 | 655 | ||
640 | wdev_lock(wdev); | 656 | wdev_lock(wdev); |
641 | err = __cfg80211_mlme_disassoc(rdev, dev, bssid, ie, ie_len, reason); | 657 | err = __cfg80211_mlme_disassoc(rdev, dev, bssid, ie, ie_len, reason, |
658 | local_state_change); | ||
642 | wdev_unlock(wdev); | 659 | wdev_unlock(wdev); |
643 | 660 | ||
644 | return err; | 661 | return err; |
@@ -895,3 +912,16 @@ void cfg80211_action_tx_status(struct net_device *dev, u64 cookie, | |||
895 | nl80211_send_action_tx_status(rdev, dev, cookie, buf, len, ack, gfp); | 912 | nl80211_send_action_tx_status(rdev, dev, cookie, buf, len, ack, gfp); |
896 | } | 913 | } |
897 | EXPORT_SYMBOL(cfg80211_action_tx_status); | 914 | EXPORT_SYMBOL(cfg80211_action_tx_status); |
915 | |||
916 | void cfg80211_cqm_rssi_notify(struct net_device *dev, | ||
917 | enum nl80211_cqm_rssi_threshold_event rssi_event, | ||
918 | gfp_t gfp) | ||
919 | { | ||
920 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
921 | struct wiphy *wiphy = wdev->wiphy; | ||
922 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); | ||
923 | |||
924 | /* Indicate roaming trigger event to user space */ | ||
925 | nl80211_send_cqm_rssi_notify(rdev, dev, rssi_event, gfp); | ||
926 | } | ||
927 | EXPORT_SYMBOL(cfg80211_cqm_rssi_notify); | ||
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 030cf153bea2..356a84a5daee 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -150,6 +150,8 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = { | |||
150 | .len = IEEE80211_MAX_DATA_LEN }, | 150 | .len = IEEE80211_MAX_DATA_LEN }, |
151 | [NL80211_ATTR_FRAME_MATCH] = { .type = NLA_BINARY, }, | 151 | [NL80211_ATTR_FRAME_MATCH] = { .type = NLA_BINARY, }, |
152 | [NL80211_ATTR_PS_STATE] = { .type = NLA_U32 }, | 152 | [NL80211_ATTR_PS_STATE] = { .type = NLA_U32 }, |
153 | [NL80211_ATTR_CQM] = { .type = NLA_NESTED, }, | ||
154 | [NL80211_ATTR_LOCAL_STATE_CHANGE] = { .type = NLA_FLAG }, | ||
153 | }; | 155 | }; |
154 | 156 | ||
155 | /* policy for the attributes */ | 157 | /* policy for the attributes */ |
@@ -2096,7 +2098,8 @@ static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info) | |||
2096 | goto out_rtnl; | 2098 | goto out_rtnl; |
2097 | 2099 | ||
2098 | if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && | 2100 | if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && |
2099 | dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN) { | 2101 | dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN && |
2102 | dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) { | ||
2100 | err = -EINVAL; | 2103 | err = -EINVAL; |
2101 | goto out; | 2104 | goto out; |
2102 | } | 2105 | } |
@@ -3392,6 +3395,7 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info) | |||
3392 | int err, ssid_len, ie_len = 0; | 3395 | int err, ssid_len, ie_len = 0; |
3393 | enum nl80211_auth_type auth_type; | 3396 | enum nl80211_auth_type auth_type; |
3394 | struct key_parse key; | 3397 | struct key_parse key; |
3398 | bool local_state_change; | ||
3395 | 3399 | ||
3396 | if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) | 3400 | if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) |
3397 | return -EINVAL; | 3401 | return -EINVAL; |
@@ -3470,9 +3474,12 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info) | |||
3470 | goto out; | 3474 | goto out; |
3471 | } | 3475 | } |
3472 | 3476 | ||
3477 | local_state_change = !!info->attrs[NL80211_ATTR_LOCAL_STATE_CHANGE]; | ||
3478 | |||
3473 | err = cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid, | 3479 | err = cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid, |
3474 | ssid, ssid_len, ie, ie_len, | 3480 | ssid, ssid_len, ie, ie_len, |
3475 | key.p.key, key.p.key_len, key.idx); | 3481 | key.p.key, key.p.key_len, key.idx, |
3482 | local_state_change); | ||
3476 | 3483 | ||
3477 | out: | 3484 | out: |
3478 | cfg80211_unlock_rdev(rdev); | 3485 | cfg80211_unlock_rdev(rdev); |
@@ -3649,6 +3656,7 @@ static int nl80211_deauthenticate(struct sk_buff *skb, struct genl_info *info) | |||
3649 | const u8 *ie = NULL, *bssid; | 3656 | const u8 *ie = NULL, *bssid; |
3650 | int err, ie_len = 0; | 3657 | int err, ie_len = 0; |
3651 | u16 reason_code; | 3658 | u16 reason_code; |
3659 | bool local_state_change; | ||
3652 | 3660 | ||
3653 | if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) | 3661 | if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) |
3654 | return -EINVAL; | 3662 | return -EINVAL; |
@@ -3694,7 +3702,10 @@ static int nl80211_deauthenticate(struct sk_buff *skb, struct genl_info *info) | |||
3694 | ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); | 3702 | ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); |
3695 | } | 3703 | } |
3696 | 3704 | ||
3697 | err = cfg80211_mlme_deauth(rdev, dev, bssid, ie, ie_len, reason_code); | 3705 | local_state_change = !!info->attrs[NL80211_ATTR_LOCAL_STATE_CHANGE]; |
3706 | |||
3707 | err = cfg80211_mlme_deauth(rdev, dev, bssid, ie, ie_len, reason_code, | ||
3708 | local_state_change); | ||
3698 | 3709 | ||
3699 | out: | 3710 | out: |
3700 | cfg80211_unlock_rdev(rdev); | 3711 | cfg80211_unlock_rdev(rdev); |
@@ -3711,6 +3722,7 @@ static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info) | |||
3711 | const u8 *ie = NULL, *bssid; | 3722 | const u8 *ie = NULL, *bssid; |
3712 | int err, ie_len = 0; | 3723 | int err, ie_len = 0; |
3713 | u16 reason_code; | 3724 | u16 reason_code; |
3725 | bool local_state_change; | ||
3714 | 3726 | ||
3715 | if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) | 3727 | if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) |
3716 | return -EINVAL; | 3728 | return -EINVAL; |
@@ -3756,7 +3768,10 @@ static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info) | |||
3756 | ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); | 3768 | ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); |
3757 | } | 3769 | } |
3758 | 3770 | ||
3759 | err = cfg80211_mlme_disassoc(rdev, dev, bssid, ie, ie_len, reason_code); | 3771 | local_state_change = !!info->attrs[NL80211_ATTR_LOCAL_STATE_CHANGE]; |
3772 | |||
3773 | err = cfg80211_mlme_disassoc(rdev, dev, bssid, ie, ie_len, reason_code, | ||
3774 | local_state_change); | ||
3760 | 3775 | ||
3761 | out: | 3776 | out: |
3762 | cfg80211_unlock_rdev(rdev); | 3777 | cfg80211_unlock_rdev(rdev); |
@@ -4779,6 +4794,84 @@ unlock_rtnl: | |||
4779 | return err; | 4794 | return err; |
4780 | } | 4795 | } |
4781 | 4796 | ||
4797 | static struct nla_policy | ||
4798 | nl80211_attr_cqm_policy[NL80211_ATTR_CQM_MAX + 1] __read_mostly = { | ||
4799 | [NL80211_ATTR_CQM_RSSI_THOLD] = { .type = NLA_U32 }, | ||
4800 | [NL80211_ATTR_CQM_RSSI_HYST] = { .type = NLA_U32 }, | ||
4801 | [NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT] = { .type = NLA_U32 }, | ||
4802 | }; | ||
4803 | |||
4804 | static int nl80211_set_cqm_rssi(struct genl_info *info, | ||
4805 | s32 threshold, u32 hysteresis) | ||
4806 | { | ||
4807 | struct cfg80211_registered_device *rdev; | ||
4808 | struct wireless_dev *wdev; | ||
4809 | struct net_device *dev; | ||
4810 | int err; | ||
4811 | |||
4812 | if (threshold > 0) | ||
4813 | return -EINVAL; | ||
4814 | |||
4815 | rtnl_lock(); | ||
4816 | |||
4817 | err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); | ||
4818 | if (err) | ||
4819 | goto unlock_rdev; | ||
4820 | |||
4821 | wdev = dev->ieee80211_ptr; | ||
4822 | |||
4823 | if (!rdev->ops->set_cqm_rssi_config) { | ||
4824 | err = -EOPNOTSUPP; | ||
4825 | goto unlock_rdev; | ||
4826 | } | ||
4827 | |||
4828 | if (wdev->iftype != NL80211_IFTYPE_STATION) { | ||
4829 | err = -EOPNOTSUPP; | ||
4830 | goto unlock_rdev; | ||
4831 | } | ||
4832 | |||
4833 | err = rdev->ops->set_cqm_rssi_config(wdev->wiphy, dev, | ||
4834 | threshold, hysteresis); | ||
4835 | |||
4836 | unlock_rdev: | ||
4837 | cfg80211_unlock_rdev(rdev); | ||
4838 | dev_put(dev); | ||
4839 | rtnl_unlock(); | ||
4840 | |||
4841 | return err; | ||
4842 | } | ||
4843 | |||
4844 | static int nl80211_set_cqm(struct sk_buff *skb, struct genl_info *info) | ||
4845 | { | ||
4846 | struct nlattr *attrs[NL80211_ATTR_CQM_MAX + 1]; | ||
4847 | struct nlattr *cqm; | ||
4848 | int err; | ||
4849 | |||
4850 | cqm = info->attrs[NL80211_ATTR_CQM]; | ||
4851 | if (!cqm) { | ||
4852 | err = -EINVAL; | ||
4853 | goto out; | ||
4854 | } | ||
4855 | |||
4856 | err = nla_parse_nested(attrs, NL80211_ATTR_CQM_MAX, cqm, | ||
4857 | nl80211_attr_cqm_policy); | ||
4858 | if (err) | ||
4859 | goto out; | ||
4860 | |||
4861 | if (attrs[NL80211_ATTR_CQM_RSSI_THOLD] && | ||
4862 | attrs[NL80211_ATTR_CQM_RSSI_HYST]) { | ||
4863 | s32 threshold; | ||
4864 | u32 hysteresis; | ||
4865 | threshold = nla_get_u32(attrs[NL80211_ATTR_CQM_RSSI_THOLD]); | ||
4866 | hysteresis = nla_get_u32(attrs[NL80211_ATTR_CQM_RSSI_HYST]); | ||
4867 | err = nl80211_set_cqm_rssi(info, threshold, hysteresis); | ||
4868 | } else | ||
4869 | err = -EINVAL; | ||
4870 | |||
4871 | out: | ||
4872 | return err; | ||
4873 | } | ||
4874 | |||
4782 | static struct genl_ops nl80211_ops[] = { | 4875 | static struct genl_ops nl80211_ops[] = { |
4783 | { | 4876 | { |
4784 | .cmd = NL80211_CMD_GET_WIPHY, | 4877 | .cmd = NL80211_CMD_GET_WIPHY, |
@@ -5083,6 +5176,12 @@ static struct genl_ops nl80211_ops[] = { | |||
5083 | .policy = nl80211_policy, | 5176 | .policy = nl80211_policy, |
5084 | /* can be retrieved by unprivileged users */ | 5177 | /* can be retrieved by unprivileged users */ |
5085 | }, | 5178 | }, |
5179 | { | ||
5180 | .cmd = NL80211_CMD_SET_CQM, | ||
5181 | .doit = nl80211_set_cqm, | ||
5182 | .policy = nl80211_policy, | ||
5183 | .flags = GENL_ADMIN_PERM, | ||
5184 | }, | ||
5086 | }; | 5185 | }; |
5087 | 5186 | ||
5088 | static struct genl_multicast_group nl80211_mlme_mcgrp = { | 5187 | static struct genl_multicast_group nl80211_mlme_mcgrp = { |
@@ -5833,6 +5932,52 @@ void nl80211_send_action_tx_status(struct cfg80211_registered_device *rdev, | |||
5833 | nlmsg_free(msg); | 5932 | nlmsg_free(msg); |
5834 | } | 5933 | } |
5835 | 5934 | ||
5935 | void | ||
5936 | nl80211_send_cqm_rssi_notify(struct cfg80211_registered_device *rdev, | ||
5937 | struct net_device *netdev, | ||
5938 | enum nl80211_cqm_rssi_threshold_event rssi_event, | ||
5939 | gfp_t gfp) | ||
5940 | { | ||
5941 | struct sk_buff *msg; | ||
5942 | struct nlattr *pinfoattr; | ||
5943 | void *hdr; | ||
5944 | |||
5945 | msg = nlmsg_new(NLMSG_GOODSIZE, gfp); | ||
5946 | if (!msg) | ||
5947 | return; | ||
5948 | |||
5949 | hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_NOTIFY_CQM); | ||
5950 | if (!hdr) { | ||
5951 | nlmsg_free(msg); | ||
5952 | return; | ||
5953 | } | ||
5954 | |||
5955 | NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); | ||
5956 | NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); | ||
5957 | |||
5958 | pinfoattr = nla_nest_start(msg, NL80211_ATTR_CQM); | ||
5959 | if (!pinfoattr) | ||
5960 | goto nla_put_failure; | ||
5961 | |||
5962 | NLA_PUT_U32(msg, NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT, | ||
5963 | rssi_event); | ||
5964 | |||
5965 | nla_nest_end(msg, pinfoattr); | ||
5966 | |||
5967 | if (genlmsg_end(msg, hdr) < 0) { | ||
5968 | nlmsg_free(msg); | ||
5969 | return; | ||
5970 | } | ||
5971 | |||
5972 | genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, | ||
5973 | nl80211_mlme_mcgrp.id, gfp); | ||
5974 | return; | ||
5975 | |||
5976 | nla_put_failure: | ||
5977 | genlmsg_cancel(msg, hdr); | ||
5978 | nlmsg_free(msg); | ||
5979 | } | ||
5980 | |||
5836 | static int nl80211_netlink_notify(struct notifier_block * nb, | 5981 | static int nl80211_netlink_notify(struct notifier_block * nb, |
5837 | unsigned long state, | 5982 | unsigned long state, |
5838 | void *_notify) | 5983 | void *_notify) |
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h index 4ca511102c6c..2ad7fbc7d9f1 100644 --- a/net/wireless/nl80211.h +++ b/net/wireless/nl80211.h | |||
@@ -82,4 +82,10 @@ void nl80211_send_action_tx_status(struct cfg80211_registered_device *rdev, | |||
82 | const u8 *buf, size_t len, bool ack, | 82 | const u8 *buf, size_t len, bool ack, |
83 | gfp_t gfp); | 83 | gfp_t gfp); |
84 | 84 | ||
85 | void | ||
86 | nl80211_send_cqm_rssi_notify(struct cfg80211_registered_device *rdev, | ||
87 | struct net_device *netdev, | ||
88 | enum nl80211_cqm_rssi_threshold_event rssi_event, | ||
89 | gfp_t gfp); | ||
90 | |||
85 | #endif /* __NET_WIRELESS_NL80211_H */ | 91 | #endif /* __NET_WIRELESS_NL80211_H */ |
diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 422da20d1e5b..8f0d97dd3109 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c | |||
@@ -2356,10 +2356,10 @@ static void print_regdomain(const struct ieee80211_regdomain *rd) | |||
2356 | rdev->country_ie_alpha2[1]); | 2356 | rdev->country_ie_alpha2[1]); |
2357 | } else | 2357 | } else |
2358 | printk(KERN_INFO "cfg80211: Current regulatory " | 2358 | printk(KERN_INFO "cfg80211: Current regulatory " |
2359 | "domain intersected: \n"); | 2359 | "domain intersected:\n"); |
2360 | } else | 2360 | } else |
2361 | printk(KERN_INFO "cfg80211: Current regulatory " | 2361 | printk(KERN_INFO "cfg80211: Current regulatory " |
2362 | "domain intersected: \n"); | 2362 | "domain intersected:\n"); |
2363 | } else if (is_world_regdom(rd->alpha2)) | 2363 | } else if (is_world_regdom(rd->alpha2)) |
2364 | printk(KERN_INFO "cfg80211: World regulatory " | 2364 | printk(KERN_INFO "cfg80211: World regulatory " |
2365 | "domain updated:\n"); | 2365 | "domain updated:\n"); |
diff --git a/net/wireless/sme.c b/net/wireless/sme.c index f4dfd5f5f2ea..8ddf5ae0dd03 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c | |||
@@ -171,7 +171,7 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev) | |||
171 | params->ssid, params->ssid_len, | 171 | params->ssid, params->ssid_len, |
172 | NULL, 0, | 172 | NULL, 0, |
173 | params->key, params->key_len, | 173 | params->key, params->key_len, |
174 | params->key_idx); | 174 | params->key_idx, false); |
175 | case CFG80211_CONN_ASSOCIATE_NEXT: | 175 | case CFG80211_CONN_ASSOCIATE_NEXT: |
176 | BUG_ON(!rdev->ops->assoc); | 176 | BUG_ON(!rdev->ops->assoc); |
177 | wdev->conn->state = CFG80211_CONN_ASSOCIATING; | 177 | wdev->conn->state = CFG80211_CONN_ASSOCIATING; |
@@ -186,12 +186,13 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev) | |||
186 | if (err) | 186 | if (err) |
187 | __cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid, | 187 | __cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid, |
188 | NULL, 0, | 188 | NULL, 0, |
189 | WLAN_REASON_DEAUTH_LEAVING); | 189 | WLAN_REASON_DEAUTH_LEAVING, |
190 | false); | ||
190 | return err; | 191 | return err; |
191 | case CFG80211_CONN_DEAUTH_ASSOC_FAIL: | 192 | case CFG80211_CONN_DEAUTH_ASSOC_FAIL: |
192 | __cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid, | 193 | __cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid, |
193 | NULL, 0, | 194 | NULL, 0, |
194 | WLAN_REASON_DEAUTH_LEAVING); | 195 | WLAN_REASON_DEAUTH_LEAVING, false); |
195 | /* return an error so that we call __cfg80211_connect_result() */ | 196 | /* return an error so that we call __cfg80211_connect_result() */ |
196 | return -EINVAL; | 197 | return -EINVAL; |
197 | default: | 198 | default: |
@@ -517,12 +518,16 @@ void cfg80211_connect_result(struct net_device *dev, const u8 *bssid, | |||
517 | ev->type = EVENT_CONNECT_RESULT; | 518 | ev->type = EVENT_CONNECT_RESULT; |
518 | if (bssid) | 519 | if (bssid) |
519 | memcpy(ev->cr.bssid, bssid, ETH_ALEN); | 520 | memcpy(ev->cr.bssid, bssid, ETH_ALEN); |
520 | ev->cr.req_ie = ((u8 *)ev) + sizeof(*ev); | 521 | if (req_ie_len) { |
521 | ev->cr.req_ie_len = req_ie_len; | 522 | ev->cr.req_ie = ((u8 *)ev) + sizeof(*ev); |
522 | memcpy((void *)ev->cr.req_ie, req_ie, req_ie_len); | 523 | ev->cr.req_ie_len = req_ie_len; |
523 | ev->cr.resp_ie = ((u8 *)ev) + sizeof(*ev) + req_ie_len; | 524 | memcpy((void *)ev->cr.req_ie, req_ie, req_ie_len); |
524 | ev->cr.resp_ie_len = resp_ie_len; | 525 | } |
525 | memcpy((void *)ev->cr.resp_ie, resp_ie, resp_ie_len); | 526 | if (resp_ie_len) { |
527 | ev->cr.resp_ie = ((u8 *)ev) + sizeof(*ev) + req_ie_len; | ||
528 | ev->cr.resp_ie_len = resp_ie_len; | ||
529 | memcpy((void *)ev->cr.resp_ie, resp_ie, resp_ie_len); | ||
530 | } | ||
526 | ev->cr.status = status; | 531 | ev->cr.status = status; |
527 | 532 | ||
528 | spin_lock_irqsave(&wdev->event_lock, flags); | 533 | spin_lock_irqsave(&wdev->event_lock, flags); |
@@ -676,7 +681,8 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie, | |||
676 | continue; | 681 | continue; |
677 | bssid = wdev->auth_bsses[i]->pub.bssid; | 682 | bssid = wdev->auth_bsses[i]->pub.bssid; |
678 | ret = __cfg80211_mlme_deauth(rdev, dev, bssid, NULL, 0, | 683 | ret = __cfg80211_mlme_deauth(rdev, dev, bssid, NULL, 0, |
679 | WLAN_REASON_DEAUTH_LEAVING); | 684 | WLAN_REASON_DEAUTH_LEAVING, |
685 | false); | ||
680 | WARN(ret, "deauth failed: %d\n", ret); | 686 | WARN(ret, "deauth failed: %d\n", ret); |
681 | } | 687 | } |
682 | } | 688 | } |
@@ -935,7 +941,7 @@ int __cfg80211_disconnect(struct cfg80211_registered_device *rdev, | |||
935 | /* wdev->conn->params.bssid must be set if > SCANNING */ | 941 | /* wdev->conn->params.bssid must be set if > SCANNING */ |
936 | err = __cfg80211_mlme_deauth(rdev, dev, | 942 | err = __cfg80211_mlme_deauth(rdev, dev, |
937 | wdev->conn->params.bssid, | 943 | wdev->conn->params.bssid, |
938 | NULL, 0, reason); | 944 | NULL, 0, reason, false); |
939 | if (err) | 945 | if (err) |
940 | return err; | 946 | return err; |
941 | } else { | 947 | } else { |
@@ -991,7 +997,8 @@ void cfg80211_sme_disassoc(struct net_device *dev, int idx) | |||
991 | 997 | ||
992 | memcpy(bssid, wdev->auth_bsses[idx]->pub.bssid, ETH_ALEN); | 998 | memcpy(bssid, wdev->auth_bsses[idx]->pub.bssid, ETH_ALEN); |
993 | if (__cfg80211_mlme_deauth(rdev, dev, bssid, | 999 | if (__cfg80211_mlme_deauth(rdev, dev, bssid, |
994 | NULL, 0, WLAN_REASON_DEAUTH_LEAVING)) { | 1000 | NULL, 0, WLAN_REASON_DEAUTH_LEAVING, |
1001 | false)) { | ||
995 | /* whatever -- assume gone anyway */ | 1002 | /* whatever -- assume gone anyway */ |
996 | cfg80211_unhold_bss(wdev->auth_bsses[idx]); | 1003 | cfg80211_unhold_bss(wdev->auth_bsses[idx]); |
997 | cfg80211_put_bss(&wdev->auth_bsses[idx]->pub); | 1004 | cfg80211_put_bss(&wdev->auth_bsses[idx]->pub); |
diff --git a/net/wireless/util.c b/net/wireless/util.c index d3574a4eb3ba..3416373a9c0c 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c | |||
@@ -331,11 +331,18 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr, | |||
331 | if (iftype == NL80211_IFTYPE_MESH_POINT) { | 331 | if (iftype == NL80211_IFTYPE_MESH_POINT) { |
332 | struct ieee80211s_hdr *meshdr = | 332 | struct ieee80211s_hdr *meshdr = |
333 | (struct ieee80211s_hdr *) (skb->data + hdrlen); | 333 | (struct ieee80211s_hdr *) (skb->data + hdrlen); |
334 | hdrlen += ieee80211_get_mesh_hdrlen(meshdr); | 334 | /* make sure meshdr->flags is on the linear part */ |
335 | if (!pskb_may_pull(skb, hdrlen + 1)) | ||
336 | return -1; | ||
335 | if (meshdr->flags & MESH_FLAGS_AE_A5_A6) { | 337 | if (meshdr->flags & MESH_FLAGS_AE_A5_A6) { |
336 | memcpy(dst, meshdr->eaddr1, ETH_ALEN); | 338 | skb_copy_bits(skb, hdrlen + |
337 | memcpy(src, meshdr->eaddr2, ETH_ALEN); | 339 | offsetof(struct ieee80211s_hdr, eaddr1), |
340 | dst, ETH_ALEN); | ||
341 | skb_copy_bits(skb, hdrlen + | ||
342 | offsetof(struct ieee80211s_hdr, eaddr2), | ||
343 | src, ETH_ALEN); | ||
338 | } | 344 | } |
345 | hdrlen += ieee80211_get_mesh_hdrlen(meshdr); | ||
339 | } | 346 | } |
340 | break; | 347 | break; |
341 | case cpu_to_le16(IEEE80211_FCTL_FROMDS): | 348 | case cpu_to_le16(IEEE80211_FCTL_FROMDS): |
@@ -347,9 +354,14 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr, | |||
347 | if (iftype == NL80211_IFTYPE_MESH_POINT) { | 354 | if (iftype == NL80211_IFTYPE_MESH_POINT) { |
348 | struct ieee80211s_hdr *meshdr = | 355 | struct ieee80211s_hdr *meshdr = |
349 | (struct ieee80211s_hdr *) (skb->data + hdrlen); | 356 | (struct ieee80211s_hdr *) (skb->data + hdrlen); |
350 | hdrlen += ieee80211_get_mesh_hdrlen(meshdr); | 357 | /* make sure meshdr->flags is on the linear part */ |
358 | if (!pskb_may_pull(skb, hdrlen + 1)) | ||
359 | return -1; | ||
351 | if (meshdr->flags & MESH_FLAGS_AE_A4) | 360 | if (meshdr->flags & MESH_FLAGS_AE_A4) |
352 | memcpy(src, meshdr->eaddr1, ETH_ALEN); | 361 | skb_copy_bits(skb, hdrlen + |
362 | offsetof(struct ieee80211s_hdr, eaddr1), | ||
363 | src, ETH_ALEN); | ||
364 | hdrlen += ieee80211_get_mesh_hdrlen(meshdr); | ||
353 | } | 365 | } |
354 | break; | 366 | break; |
355 | case cpu_to_le16(0): | 367 | case cpu_to_le16(0): |
@@ -358,7 +370,7 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr, | |||
358 | break; | 370 | break; |
359 | } | 371 | } |
360 | 372 | ||
361 | if (unlikely(skb->len - hdrlen < 8)) | 373 | if (!pskb_may_pull(skb, hdrlen + 8)) |
362 | return -1; | 374 | return -1; |
363 | 375 | ||
364 | payload = skb->data + hdrlen; | 376 | payload = skb->data + hdrlen; |
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c index 4f5a47091fde..0ef17bc42bac 100644 --- a/net/wireless/wext-core.c +++ b/net/wireless/wext-core.c | |||
@@ -29,226 +29,226 @@ typedef int (*wext_ioctl_func)(struct net_device *, struct iwreq *, | |||
29 | * know about. | 29 | * know about. |
30 | */ | 30 | */ |
31 | static const struct iw_ioctl_description standard_ioctl[] = { | 31 | static const struct iw_ioctl_description standard_ioctl[] = { |
32 | [SIOCSIWCOMMIT - SIOCIWFIRST] = { | 32 | [IW_IOCTL_IDX(SIOCSIWCOMMIT)] = { |
33 | .header_type = IW_HEADER_TYPE_NULL, | 33 | .header_type = IW_HEADER_TYPE_NULL, |
34 | }, | 34 | }, |
35 | [SIOCGIWNAME - SIOCIWFIRST] = { | 35 | [IW_IOCTL_IDX(SIOCGIWNAME)] = { |
36 | .header_type = IW_HEADER_TYPE_CHAR, | 36 | .header_type = IW_HEADER_TYPE_CHAR, |
37 | .flags = IW_DESCR_FLAG_DUMP, | 37 | .flags = IW_DESCR_FLAG_DUMP, |
38 | }, | 38 | }, |
39 | [SIOCSIWNWID - SIOCIWFIRST] = { | 39 | [IW_IOCTL_IDX(SIOCSIWNWID)] = { |
40 | .header_type = IW_HEADER_TYPE_PARAM, | 40 | .header_type = IW_HEADER_TYPE_PARAM, |
41 | .flags = IW_DESCR_FLAG_EVENT, | 41 | .flags = IW_DESCR_FLAG_EVENT, |
42 | }, | 42 | }, |
43 | [SIOCGIWNWID - SIOCIWFIRST] = { | 43 | [IW_IOCTL_IDX(SIOCGIWNWID)] = { |
44 | .header_type = IW_HEADER_TYPE_PARAM, | 44 | .header_type = IW_HEADER_TYPE_PARAM, |
45 | .flags = IW_DESCR_FLAG_DUMP, | 45 | .flags = IW_DESCR_FLAG_DUMP, |
46 | }, | 46 | }, |
47 | [SIOCSIWFREQ - SIOCIWFIRST] = { | 47 | [IW_IOCTL_IDX(SIOCSIWFREQ)] = { |
48 | .header_type = IW_HEADER_TYPE_FREQ, | 48 | .header_type = IW_HEADER_TYPE_FREQ, |
49 | .flags = IW_DESCR_FLAG_EVENT, | 49 | .flags = IW_DESCR_FLAG_EVENT, |
50 | }, | 50 | }, |
51 | [SIOCGIWFREQ - SIOCIWFIRST] = { | 51 | [IW_IOCTL_IDX(SIOCGIWFREQ)] = { |
52 | .header_type = IW_HEADER_TYPE_FREQ, | 52 | .header_type = IW_HEADER_TYPE_FREQ, |
53 | .flags = IW_DESCR_FLAG_DUMP, | 53 | .flags = IW_DESCR_FLAG_DUMP, |
54 | }, | 54 | }, |
55 | [SIOCSIWMODE - SIOCIWFIRST] = { | 55 | [IW_IOCTL_IDX(SIOCSIWMODE)] = { |
56 | .header_type = IW_HEADER_TYPE_UINT, | 56 | .header_type = IW_HEADER_TYPE_UINT, |
57 | .flags = IW_DESCR_FLAG_EVENT, | 57 | .flags = IW_DESCR_FLAG_EVENT, |
58 | }, | 58 | }, |
59 | [SIOCGIWMODE - SIOCIWFIRST] = { | 59 | [IW_IOCTL_IDX(SIOCGIWMODE)] = { |
60 | .header_type = IW_HEADER_TYPE_UINT, | 60 | .header_type = IW_HEADER_TYPE_UINT, |
61 | .flags = IW_DESCR_FLAG_DUMP, | 61 | .flags = IW_DESCR_FLAG_DUMP, |
62 | }, | 62 | }, |
63 | [SIOCSIWSENS - SIOCIWFIRST] = { | 63 | [IW_IOCTL_IDX(SIOCSIWSENS)] = { |
64 | .header_type = IW_HEADER_TYPE_PARAM, | 64 | .header_type = IW_HEADER_TYPE_PARAM, |
65 | }, | 65 | }, |
66 | [SIOCGIWSENS - SIOCIWFIRST] = { | 66 | [IW_IOCTL_IDX(SIOCGIWSENS)] = { |
67 | .header_type = IW_HEADER_TYPE_PARAM, | 67 | .header_type = IW_HEADER_TYPE_PARAM, |
68 | }, | 68 | }, |
69 | [SIOCSIWRANGE - SIOCIWFIRST] = { | 69 | [IW_IOCTL_IDX(SIOCSIWRANGE)] = { |
70 | .header_type = IW_HEADER_TYPE_NULL, | 70 | .header_type = IW_HEADER_TYPE_NULL, |
71 | }, | 71 | }, |
72 | [SIOCGIWRANGE - SIOCIWFIRST] = { | 72 | [IW_IOCTL_IDX(SIOCGIWRANGE)] = { |
73 | .header_type = IW_HEADER_TYPE_POINT, | 73 | .header_type = IW_HEADER_TYPE_POINT, |
74 | .token_size = 1, | 74 | .token_size = 1, |
75 | .max_tokens = sizeof(struct iw_range), | 75 | .max_tokens = sizeof(struct iw_range), |
76 | .flags = IW_DESCR_FLAG_DUMP, | 76 | .flags = IW_DESCR_FLAG_DUMP, |
77 | }, | 77 | }, |
78 | [SIOCSIWPRIV - SIOCIWFIRST] = { | 78 | [IW_IOCTL_IDX(SIOCSIWPRIV)] = { |
79 | .header_type = IW_HEADER_TYPE_NULL, | 79 | .header_type = IW_HEADER_TYPE_NULL, |
80 | }, | 80 | }, |
81 | [SIOCGIWPRIV - SIOCIWFIRST] = { /* (handled directly by us) */ | 81 | [IW_IOCTL_IDX(SIOCGIWPRIV)] = { /* (handled directly by us) */ |
82 | .header_type = IW_HEADER_TYPE_POINT, | 82 | .header_type = IW_HEADER_TYPE_POINT, |
83 | .token_size = sizeof(struct iw_priv_args), | 83 | .token_size = sizeof(struct iw_priv_args), |
84 | .max_tokens = 16, | 84 | .max_tokens = 16, |
85 | .flags = IW_DESCR_FLAG_NOMAX, | 85 | .flags = IW_DESCR_FLAG_NOMAX, |
86 | }, | 86 | }, |
87 | [SIOCSIWSTATS - SIOCIWFIRST] = { | 87 | [IW_IOCTL_IDX(SIOCSIWSTATS)] = { |
88 | .header_type = IW_HEADER_TYPE_NULL, | 88 | .header_type = IW_HEADER_TYPE_NULL, |
89 | }, | 89 | }, |
90 | [SIOCGIWSTATS - SIOCIWFIRST] = { /* (handled directly by us) */ | 90 | [IW_IOCTL_IDX(SIOCGIWSTATS)] = { /* (handled directly by us) */ |
91 | .header_type = IW_HEADER_TYPE_POINT, | 91 | .header_type = IW_HEADER_TYPE_POINT, |
92 | .token_size = 1, | 92 | .token_size = 1, |
93 | .max_tokens = sizeof(struct iw_statistics), | 93 | .max_tokens = sizeof(struct iw_statistics), |
94 | .flags = IW_DESCR_FLAG_DUMP, | 94 | .flags = IW_DESCR_FLAG_DUMP, |
95 | }, | 95 | }, |
96 | [SIOCSIWSPY - SIOCIWFIRST] = { | 96 | [IW_IOCTL_IDX(SIOCSIWSPY)] = { |
97 | .header_type = IW_HEADER_TYPE_POINT, | 97 | .header_type = IW_HEADER_TYPE_POINT, |
98 | .token_size = sizeof(struct sockaddr), | 98 | .token_size = sizeof(struct sockaddr), |
99 | .max_tokens = IW_MAX_SPY, | 99 | .max_tokens = IW_MAX_SPY, |
100 | }, | 100 | }, |
101 | [SIOCGIWSPY - SIOCIWFIRST] = { | 101 | [IW_IOCTL_IDX(SIOCGIWSPY)] = { |
102 | .header_type = IW_HEADER_TYPE_POINT, | 102 | .header_type = IW_HEADER_TYPE_POINT, |
103 | .token_size = sizeof(struct sockaddr) + | 103 | .token_size = sizeof(struct sockaddr) + |
104 | sizeof(struct iw_quality), | 104 | sizeof(struct iw_quality), |
105 | .max_tokens = IW_MAX_SPY, | 105 | .max_tokens = IW_MAX_SPY, |
106 | }, | 106 | }, |
107 | [SIOCSIWTHRSPY - SIOCIWFIRST] = { | 107 | [IW_IOCTL_IDX(SIOCSIWTHRSPY)] = { |
108 | .header_type = IW_HEADER_TYPE_POINT, | 108 | .header_type = IW_HEADER_TYPE_POINT, |
109 | .token_size = sizeof(struct iw_thrspy), | 109 | .token_size = sizeof(struct iw_thrspy), |
110 | .min_tokens = 1, | 110 | .min_tokens = 1, |
111 | .max_tokens = 1, | 111 | .max_tokens = 1, |
112 | }, | 112 | }, |
113 | [SIOCGIWTHRSPY - SIOCIWFIRST] = { | 113 | [IW_IOCTL_IDX(SIOCGIWTHRSPY)] = { |
114 | .header_type = IW_HEADER_TYPE_POINT, | 114 | .header_type = IW_HEADER_TYPE_POINT, |
115 | .token_size = sizeof(struct iw_thrspy), | 115 | .token_size = sizeof(struct iw_thrspy), |
116 | .min_tokens = 1, | 116 | .min_tokens = 1, |
117 | .max_tokens = 1, | 117 | .max_tokens = 1, |
118 | }, | 118 | }, |
119 | [SIOCSIWAP - SIOCIWFIRST] = { | 119 | [IW_IOCTL_IDX(SIOCSIWAP)] = { |
120 | .header_type = IW_HEADER_TYPE_ADDR, | 120 | .header_type = IW_HEADER_TYPE_ADDR, |
121 | }, | 121 | }, |
122 | [SIOCGIWAP - SIOCIWFIRST] = { | 122 | [IW_IOCTL_IDX(SIOCGIWAP)] = { |
123 | .header_type = IW_HEADER_TYPE_ADDR, | 123 | .header_type = IW_HEADER_TYPE_ADDR, |
124 | .flags = IW_DESCR_FLAG_DUMP, | 124 | .flags = IW_DESCR_FLAG_DUMP, |
125 | }, | 125 | }, |
126 | [SIOCSIWMLME - SIOCIWFIRST] = { | 126 | [IW_IOCTL_IDX(SIOCSIWMLME)] = { |
127 | .header_type = IW_HEADER_TYPE_POINT, | 127 | .header_type = IW_HEADER_TYPE_POINT, |
128 | .token_size = 1, | 128 | .token_size = 1, |
129 | .min_tokens = sizeof(struct iw_mlme), | 129 | .min_tokens = sizeof(struct iw_mlme), |
130 | .max_tokens = sizeof(struct iw_mlme), | 130 | .max_tokens = sizeof(struct iw_mlme), |
131 | }, | 131 | }, |
132 | [SIOCGIWAPLIST - SIOCIWFIRST] = { | 132 | [IW_IOCTL_IDX(SIOCGIWAPLIST)] = { |
133 | .header_type = IW_HEADER_TYPE_POINT, | 133 | .header_type = IW_HEADER_TYPE_POINT, |
134 | .token_size = sizeof(struct sockaddr) + | 134 | .token_size = sizeof(struct sockaddr) + |
135 | sizeof(struct iw_quality), | 135 | sizeof(struct iw_quality), |
136 | .max_tokens = IW_MAX_AP, | 136 | .max_tokens = IW_MAX_AP, |
137 | .flags = IW_DESCR_FLAG_NOMAX, | 137 | .flags = IW_DESCR_FLAG_NOMAX, |
138 | }, | 138 | }, |
139 | [SIOCSIWSCAN - SIOCIWFIRST] = { | 139 | [IW_IOCTL_IDX(SIOCSIWSCAN)] = { |
140 | .header_type = IW_HEADER_TYPE_POINT, | 140 | .header_type = IW_HEADER_TYPE_POINT, |
141 | .token_size = 1, | 141 | .token_size = 1, |
142 | .min_tokens = 0, | 142 | .min_tokens = 0, |
143 | .max_tokens = sizeof(struct iw_scan_req), | 143 | .max_tokens = sizeof(struct iw_scan_req), |
144 | }, | 144 | }, |
145 | [SIOCGIWSCAN - SIOCIWFIRST] = { | 145 | [IW_IOCTL_IDX(SIOCGIWSCAN)] = { |
146 | .header_type = IW_HEADER_TYPE_POINT, | 146 | .header_type = IW_HEADER_TYPE_POINT, |
147 | .token_size = 1, | 147 | .token_size = 1, |
148 | .max_tokens = IW_SCAN_MAX_DATA, | 148 | .max_tokens = IW_SCAN_MAX_DATA, |
149 | .flags = IW_DESCR_FLAG_NOMAX, | 149 | .flags = IW_DESCR_FLAG_NOMAX, |
150 | }, | 150 | }, |
151 | [SIOCSIWESSID - SIOCIWFIRST] = { | 151 | [IW_IOCTL_IDX(SIOCSIWESSID)] = { |
152 | .header_type = IW_HEADER_TYPE_POINT, | 152 | .header_type = IW_HEADER_TYPE_POINT, |
153 | .token_size = 1, | 153 | .token_size = 1, |
154 | .max_tokens = IW_ESSID_MAX_SIZE, | 154 | .max_tokens = IW_ESSID_MAX_SIZE, |
155 | .flags = IW_DESCR_FLAG_EVENT, | 155 | .flags = IW_DESCR_FLAG_EVENT, |
156 | }, | 156 | }, |
157 | [SIOCGIWESSID - SIOCIWFIRST] = { | 157 | [IW_IOCTL_IDX(SIOCGIWESSID)] = { |
158 | .header_type = IW_HEADER_TYPE_POINT, | 158 | .header_type = IW_HEADER_TYPE_POINT, |
159 | .token_size = 1, | 159 | .token_size = 1, |
160 | .max_tokens = IW_ESSID_MAX_SIZE, | 160 | .max_tokens = IW_ESSID_MAX_SIZE, |
161 | .flags = IW_DESCR_FLAG_DUMP, | 161 | .flags = IW_DESCR_FLAG_DUMP, |
162 | }, | 162 | }, |
163 | [SIOCSIWNICKN - SIOCIWFIRST] = { | 163 | [IW_IOCTL_IDX(SIOCSIWNICKN)] = { |
164 | .header_type = IW_HEADER_TYPE_POINT, | 164 | .header_type = IW_HEADER_TYPE_POINT, |
165 | .token_size = 1, | 165 | .token_size = 1, |
166 | .max_tokens = IW_ESSID_MAX_SIZE, | 166 | .max_tokens = IW_ESSID_MAX_SIZE, |
167 | }, | 167 | }, |
168 | [SIOCGIWNICKN - SIOCIWFIRST] = { | 168 | [IW_IOCTL_IDX(SIOCGIWNICKN)] = { |
169 | .header_type = IW_HEADER_TYPE_POINT, | 169 | .header_type = IW_HEADER_TYPE_POINT, |
170 | .token_size = 1, | 170 | .token_size = 1, |
171 | .max_tokens = IW_ESSID_MAX_SIZE, | 171 | .max_tokens = IW_ESSID_MAX_SIZE, |
172 | }, | 172 | }, |
173 | [SIOCSIWRATE - SIOCIWFIRST] = { | 173 | [IW_IOCTL_IDX(SIOCSIWRATE)] = { |
174 | .header_type = IW_HEADER_TYPE_PARAM, | 174 | .header_type = IW_HEADER_TYPE_PARAM, |
175 | }, | 175 | }, |
176 | [SIOCGIWRATE - SIOCIWFIRST] = { | 176 | [IW_IOCTL_IDX(SIOCGIWRATE)] = { |
177 | .header_type = IW_HEADER_TYPE_PARAM, | 177 | .header_type = IW_HEADER_TYPE_PARAM, |
178 | }, | 178 | }, |
179 | [SIOCSIWRTS - SIOCIWFIRST] = { | 179 | [IW_IOCTL_IDX(SIOCSIWRTS)] = { |
180 | .header_type = IW_HEADER_TYPE_PARAM, | 180 | .header_type = IW_HEADER_TYPE_PARAM, |
181 | }, | 181 | }, |
182 | [SIOCGIWRTS - SIOCIWFIRST] = { | 182 | [IW_IOCTL_IDX(SIOCGIWRTS)] = { |
183 | .header_type = IW_HEADER_TYPE_PARAM, | 183 | .header_type = IW_HEADER_TYPE_PARAM, |
184 | }, | 184 | }, |
185 | [SIOCSIWFRAG - SIOCIWFIRST] = { | 185 | [IW_IOCTL_IDX(SIOCSIWFRAG)] = { |
186 | .header_type = IW_HEADER_TYPE_PARAM, | 186 | .header_type = IW_HEADER_TYPE_PARAM, |
187 | }, | 187 | }, |
188 | [SIOCGIWFRAG - SIOCIWFIRST] = { | 188 | [IW_IOCTL_IDX(SIOCGIWFRAG)] = { |
189 | .header_type = IW_HEADER_TYPE_PARAM, | 189 | .header_type = IW_HEADER_TYPE_PARAM, |
190 | }, | 190 | }, |
191 | [SIOCSIWTXPOW - SIOCIWFIRST] = { | 191 | [IW_IOCTL_IDX(SIOCSIWTXPOW)] = { |
192 | .header_type = IW_HEADER_TYPE_PARAM, | 192 | .header_type = IW_HEADER_TYPE_PARAM, |
193 | }, | 193 | }, |
194 | [SIOCGIWTXPOW - SIOCIWFIRST] = { | 194 | [IW_IOCTL_IDX(SIOCGIWTXPOW)] = { |
195 | .header_type = IW_HEADER_TYPE_PARAM, | 195 | .header_type = IW_HEADER_TYPE_PARAM, |
196 | }, | 196 | }, |
197 | [SIOCSIWRETRY - SIOCIWFIRST] = { | 197 | [IW_IOCTL_IDX(SIOCSIWRETRY)] = { |
198 | .header_type = IW_HEADER_TYPE_PARAM, | 198 | .header_type = IW_HEADER_TYPE_PARAM, |
199 | }, | 199 | }, |
200 | [SIOCGIWRETRY - SIOCIWFIRST] = { | 200 | [IW_IOCTL_IDX(SIOCGIWRETRY)] = { |
201 | .header_type = IW_HEADER_TYPE_PARAM, | 201 | .header_type = IW_HEADER_TYPE_PARAM, |
202 | }, | 202 | }, |
203 | [SIOCSIWENCODE - SIOCIWFIRST] = { | 203 | [IW_IOCTL_IDX(SIOCSIWENCODE)] = { |
204 | .header_type = IW_HEADER_TYPE_POINT, | 204 | .header_type = IW_HEADER_TYPE_POINT, |
205 | .token_size = 1, | 205 | .token_size = 1, |
206 | .max_tokens = IW_ENCODING_TOKEN_MAX, | 206 | .max_tokens = IW_ENCODING_TOKEN_MAX, |
207 | .flags = IW_DESCR_FLAG_EVENT | IW_DESCR_FLAG_RESTRICT, | 207 | .flags = IW_DESCR_FLAG_EVENT | IW_DESCR_FLAG_RESTRICT, |
208 | }, | 208 | }, |
209 | [SIOCGIWENCODE - SIOCIWFIRST] = { | 209 | [IW_IOCTL_IDX(SIOCGIWENCODE)] = { |
210 | .header_type = IW_HEADER_TYPE_POINT, | 210 | .header_type = IW_HEADER_TYPE_POINT, |
211 | .token_size = 1, | 211 | .token_size = 1, |
212 | .max_tokens = IW_ENCODING_TOKEN_MAX, | 212 | .max_tokens = IW_ENCODING_TOKEN_MAX, |
213 | .flags = IW_DESCR_FLAG_DUMP | IW_DESCR_FLAG_RESTRICT, | 213 | .flags = IW_DESCR_FLAG_DUMP | IW_DESCR_FLAG_RESTRICT, |
214 | }, | 214 | }, |
215 | [SIOCSIWPOWER - SIOCIWFIRST] = { | 215 | [IW_IOCTL_IDX(SIOCSIWPOWER)] = { |
216 | .header_type = IW_HEADER_TYPE_PARAM, | 216 | .header_type = IW_HEADER_TYPE_PARAM, |
217 | }, | 217 | }, |
218 | [SIOCGIWPOWER - SIOCIWFIRST] = { | 218 | [IW_IOCTL_IDX(SIOCGIWPOWER)] = { |
219 | .header_type = IW_HEADER_TYPE_PARAM, | 219 | .header_type = IW_HEADER_TYPE_PARAM, |
220 | }, | 220 | }, |
221 | [SIOCSIWGENIE - SIOCIWFIRST] = { | 221 | [IW_IOCTL_IDX(SIOCSIWGENIE)] = { |
222 | .header_type = IW_HEADER_TYPE_POINT, | 222 | .header_type = IW_HEADER_TYPE_POINT, |
223 | .token_size = 1, | 223 | .token_size = 1, |
224 | .max_tokens = IW_GENERIC_IE_MAX, | 224 | .max_tokens = IW_GENERIC_IE_MAX, |
225 | }, | 225 | }, |
226 | [SIOCGIWGENIE - SIOCIWFIRST] = { | 226 | [IW_IOCTL_IDX(SIOCGIWGENIE)] = { |
227 | .header_type = IW_HEADER_TYPE_POINT, | 227 | .header_type = IW_HEADER_TYPE_POINT, |
228 | .token_size = 1, | 228 | .token_size = 1, |
229 | .max_tokens = IW_GENERIC_IE_MAX, | 229 | .max_tokens = IW_GENERIC_IE_MAX, |
230 | }, | 230 | }, |
231 | [SIOCSIWAUTH - SIOCIWFIRST] = { | 231 | [IW_IOCTL_IDX(SIOCSIWAUTH)] = { |
232 | .header_type = IW_HEADER_TYPE_PARAM, | 232 | .header_type = IW_HEADER_TYPE_PARAM, |
233 | }, | 233 | }, |
234 | [SIOCGIWAUTH - SIOCIWFIRST] = { | 234 | [IW_IOCTL_IDX(SIOCGIWAUTH)] = { |
235 | .header_type = IW_HEADER_TYPE_PARAM, | 235 | .header_type = IW_HEADER_TYPE_PARAM, |
236 | }, | 236 | }, |
237 | [SIOCSIWENCODEEXT - SIOCIWFIRST] = { | 237 | [IW_IOCTL_IDX(SIOCSIWENCODEEXT)] = { |
238 | .header_type = IW_HEADER_TYPE_POINT, | 238 | .header_type = IW_HEADER_TYPE_POINT, |
239 | .token_size = 1, | 239 | .token_size = 1, |
240 | .min_tokens = sizeof(struct iw_encode_ext), | 240 | .min_tokens = sizeof(struct iw_encode_ext), |
241 | .max_tokens = sizeof(struct iw_encode_ext) + | 241 | .max_tokens = sizeof(struct iw_encode_ext) + |
242 | IW_ENCODING_TOKEN_MAX, | 242 | IW_ENCODING_TOKEN_MAX, |
243 | }, | 243 | }, |
244 | [SIOCGIWENCODEEXT - SIOCIWFIRST] = { | 244 | [IW_IOCTL_IDX(SIOCGIWENCODEEXT)] = { |
245 | .header_type = IW_HEADER_TYPE_POINT, | 245 | .header_type = IW_HEADER_TYPE_POINT, |
246 | .token_size = 1, | 246 | .token_size = 1, |
247 | .min_tokens = sizeof(struct iw_encode_ext), | 247 | .min_tokens = sizeof(struct iw_encode_ext), |
248 | .max_tokens = sizeof(struct iw_encode_ext) + | 248 | .max_tokens = sizeof(struct iw_encode_ext) + |
249 | IW_ENCODING_TOKEN_MAX, | 249 | IW_ENCODING_TOKEN_MAX, |
250 | }, | 250 | }, |
251 | [SIOCSIWPMKSA - SIOCIWFIRST] = { | 251 | [IW_IOCTL_IDX(SIOCSIWPMKSA)] = { |
252 | .header_type = IW_HEADER_TYPE_POINT, | 252 | .header_type = IW_HEADER_TYPE_POINT, |
253 | .token_size = 1, | 253 | .token_size = 1, |
254 | .min_tokens = sizeof(struct iw_pmksa), | 254 | .min_tokens = sizeof(struct iw_pmksa), |
@@ -262,44 +262,44 @@ static const unsigned standard_ioctl_num = ARRAY_SIZE(standard_ioctl); | |||
262 | * we know about. | 262 | * we know about. |
263 | */ | 263 | */ |
264 | static const struct iw_ioctl_description standard_event[] = { | 264 | static const struct iw_ioctl_description standard_event[] = { |
265 | [IWEVTXDROP - IWEVFIRST] = { | 265 | [IW_EVENT_IDX(IWEVTXDROP)] = { |
266 | .header_type = IW_HEADER_TYPE_ADDR, | 266 | .header_type = IW_HEADER_TYPE_ADDR, |
267 | }, | 267 | }, |
268 | [IWEVQUAL - IWEVFIRST] = { | 268 | [IW_EVENT_IDX(IWEVQUAL)] = { |
269 | .header_type = IW_HEADER_TYPE_QUAL, | 269 | .header_type = IW_HEADER_TYPE_QUAL, |
270 | }, | 270 | }, |
271 | [IWEVCUSTOM - IWEVFIRST] = { | 271 | [IW_EVENT_IDX(IWEVCUSTOM)] = { |
272 | .header_type = IW_HEADER_TYPE_POINT, | 272 | .header_type = IW_HEADER_TYPE_POINT, |
273 | .token_size = 1, | 273 | .token_size = 1, |
274 | .max_tokens = IW_CUSTOM_MAX, | 274 | .max_tokens = IW_CUSTOM_MAX, |
275 | }, | 275 | }, |
276 | [IWEVREGISTERED - IWEVFIRST] = { | 276 | [IW_EVENT_IDX(IWEVREGISTERED)] = { |
277 | .header_type = IW_HEADER_TYPE_ADDR, | 277 | .header_type = IW_HEADER_TYPE_ADDR, |
278 | }, | 278 | }, |
279 | [IWEVEXPIRED - IWEVFIRST] = { | 279 | [IW_EVENT_IDX(IWEVEXPIRED)] = { |
280 | .header_type = IW_HEADER_TYPE_ADDR, | 280 | .header_type = IW_HEADER_TYPE_ADDR, |
281 | }, | 281 | }, |
282 | [IWEVGENIE - IWEVFIRST] = { | 282 | [IW_EVENT_IDX(IWEVGENIE)] = { |
283 | .header_type = IW_HEADER_TYPE_POINT, | 283 | .header_type = IW_HEADER_TYPE_POINT, |
284 | .token_size = 1, | 284 | .token_size = 1, |
285 | .max_tokens = IW_GENERIC_IE_MAX, | 285 | .max_tokens = IW_GENERIC_IE_MAX, |
286 | }, | 286 | }, |
287 | [IWEVMICHAELMICFAILURE - IWEVFIRST] = { | 287 | [IW_EVENT_IDX(IWEVMICHAELMICFAILURE)] = { |
288 | .header_type = IW_HEADER_TYPE_POINT, | 288 | .header_type = IW_HEADER_TYPE_POINT, |
289 | .token_size = 1, | 289 | .token_size = 1, |
290 | .max_tokens = sizeof(struct iw_michaelmicfailure), | 290 | .max_tokens = sizeof(struct iw_michaelmicfailure), |
291 | }, | 291 | }, |
292 | [IWEVASSOCREQIE - IWEVFIRST] = { | 292 | [IW_EVENT_IDX(IWEVASSOCREQIE)] = { |
293 | .header_type = IW_HEADER_TYPE_POINT, | 293 | .header_type = IW_HEADER_TYPE_POINT, |
294 | .token_size = 1, | 294 | .token_size = 1, |
295 | .max_tokens = IW_GENERIC_IE_MAX, | 295 | .max_tokens = IW_GENERIC_IE_MAX, |
296 | }, | 296 | }, |
297 | [IWEVASSOCRESPIE - IWEVFIRST] = { | 297 | [IW_EVENT_IDX(IWEVASSOCRESPIE)] = { |
298 | .header_type = IW_HEADER_TYPE_POINT, | 298 | .header_type = IW_HEADER_TYPE_POINT, |
299 | .token_size = 1, | 299 | .token_size = 1, |
300 | .max_tokens = IW_GENERIC_IE_MAX, | 300 | .max_tokens = IW_GENERIC_IE_MAX, |
301 | }, | 301 | }, |
302 | [IWEVPMKIDCAND - IWEVFIRST] = { | 302 | [IW_EVENT_IDX(IWEVPMKIDCAND)] = { |
303 | .header_type = IW_HEADER_TYPE_POINT, | 303 | .header_type = IW_HEADER_TYPE_POINT, |
304 | .token_size = 1, | 304 | .token_size = 1, |
305 | .max_tokens = sizeof(struct iw_pmkid_cand), | 305 | .max_tokens = sizeof(struct iw_pmkid_cand), |
@@ -450,11 +450,11 @@ void wireless_send_event(struct net_device * dev, | |||
450 | 450 | ||
451 | /* Get the description of the Event */ | 451 | /* Get the description of the Event */ |
452 | if (cmd <= SIOCIWLAST) { | 452 | if (cmd <= SIOCIWLAST) { |
453 | cmd_index = cmd - SIOCIWFIRST; | 453 | cmd_index = IW_IOCTL_IDX(cmd); |
454 | if (cmd_index < standard_ioctl_num) | 454 | if (cmd_index < standard_ioctl_num) |
455 | descr = &(standard_ioctl[cmd_index]); | 455 | descr = &(standard_ioctl[cmd_index]); |
456 | } else { | 456 | } else { |
457 | cmd_index = cmd - IWEVFIRST; | 457 | cmd_index = IW_EVENT_IDX(cmd); |
458 | if (cmd_index < standard_event_num) | 458 | if (cmd_index < standard_event_num) |
459 | descr = &(standard_event[cmd_index]); | 459 | descr = &(standard_event[cmd_index]); |
460 | } | 460 | } |
@@ -663,7 +663,7 @@ static iw_handler get_handler(struct net_device *dev, unsigned int cmd) | |||
663 | return NULL; | 663 | return NULL; |
664 | 664 | ||
665 | /* Try as a standard command */ | 665 | /* Try as a standard command */ |
666 | index = cmd - SIOCIWFIRST; | 666 | index = IW_IOCTL_IDX(cmd); |
667 | if (index < handlers->num_standard) | 667 | if (index < handlers->num_standard) |
668 | return handlers->standard[index]; | 668 | return handlers->standard[index]; |
669 | 669 | ||
@@ -955,9 +955,9 @@ static int ioctl_standard_call(struct net_device * dev, | |||
955 | int ret = -EINVAL; | 955 | int ret = -EINVAL; |
956 | 956 | ||
957 | /* Get the description of the IOCTL */ | 957 | /* Get the description of the IOCTL */ |
958 | if ((cmd - SIOCIWFIRST) >= standard_ioctl_num) | 958 | if (IW_IOCTL_IDX(cmd) >= standard_ioctl_num) |
959 | return -EOPNOTSUPP; | 959 | return -EOPNOTSUPP; |
960 | descr = &(standard_ioctl[cmd - SIOCIWFIRST]); | 960 | descr = &(standard_ioctl[IW_IOCTL_IDX(cmd)]); |
961 | 961 | ||
962 | /* Check if we have a pointer to user space data or not */ | 962 | /* Check if we have a pointer to user space data or not */ |
963 | if (descr->header_type != IW_HEADER_TYPE_POINT) { | 963 | if (descr->header_type != IW_HEADER_TYPE_POINT) { |
@@ -1013,7 +1013,7 @@ static int compat_standard_call(struct net_device *dev, | |||
1013 | struct iw_point iwp; | 1013 | struct iw_point iwp; |
1014 | int err; | 1014 | int err; |
1015 | 1015 | ||
1016 | descr = standard_ioctl + (cmd - SIOCIWFIRST); | 1016 | descr = standard_ioctl + IW_IOCTL_IDX(cmd); |
1017 | 1017 | ||
1018 | if (descr->header_type != IW_HEADER_TYPE_POINT) | 1018 | if (descr->header_type != IW_HEADER_TYPE_POINT) |
1019 | return ioctl_standard_call(dev, iwr, cmd, info, handler); | 1019 | return ioctl_standard_call(dev, iwr, cmd, info, handler); |
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index 36e84e13c6aa..296e65e01064 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c | |||
@@ -719,7 +719,7 @@ static int x25_wait_for_connection_establishment(struct sock *sk) | |||
719 | DECLARE_WAITQUEUE(wait, current); | 719 | DECLARE_WAITQUEUE(wait, current); |
720 | int rc; | 720 | int rc; |
721 | 721 | ||
722 | add_wait_queue_exclusive(sk->sk_sleep, &wait); | 722 | add_wait_queue_exclusive(sk_sleep(sk), &wait); |
723 | for (;;) { | 723 | for (;;) { |
724 | __set_current_state(TASK_INTERRUPTIBLE); | 724 | __set_current_state(TASK_INTERRUPTIBLE); |
725 | rc = -ERESTARTSYS; | 725 | rc = -ERESTARTSYS; |
@@ -739,7 +739,7 @@ static int x25_wait_for_connection_establishment(struct sock *sk) | |||
739 | break; | 739 | break; |
740 | } | 740 | } |
741 | __set_current_state(TASK_RUNNING); | 741 | __set_current_state(TASK_RUNNING); |
742 | remove_wait_queue(sk->sk_sleep, &wait); | 742 | remove_wait_queue(sk_sleep(sk), &wait); |
743 | return rc; | 743 | return rc; |
744 | } | 744 | } |
745 | 745 | ||
@@ -839,7 +839,7 @@ static int x25_wait_for_data(struct sock *sk, long timeout) | |||
839 | DECLARE_WAITQUEUE(wait, current); | 839 | DECLARE_WAITQUEUE(wait, current); |
840 | int rc = 0; | 840 | int rc = 0; |
841 | 841 | ||
842 | add_wait_queue_exclusive(sk->sk_sleep, &wait); | 842 | add_wait_queue_exclusive(sk_sleep(sk), &wait); |
843 | for (;;) { | 843 | for (;;) { |
844 | __set_current_state(TASK_INTERRUPTIBLE); | 844 | __set_current_state(TASK_INTERRUPTIBLE); |
845 | if (sk->sk_shutdown & RCV_SHUTDOWN) | 845 | if (sk->sk_shutdown & RCV_SHUTDOWN) |
@@ -859,7 +859,7 @@ static int x25_wait_for_data(struct sock *sk, long timeout) | |||
859 | break; | 859 | break; |
860 | } | 860 | } |
861 | __set_current_state(TASK_RUNNING); | 861 | __set_current_state(TASK_RUNNING); |
862 | remove_wait_queue(sk->sk_sleep, &wait); | 862 | remove_wait_queue(sk_sleep(sk), &wait); |
863 | return rc; | 863 | return rc; |
864 | } | 864 | } |
865 | 865 | ||
diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c index b9ef682230a0..9005f6daeab5 100644 --- a/net/x25/x25_dev.c +++ b/net/x25/x25_dev.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <net/sock.h> | 24 | #include <net/sock.h> |
25 | #include <linux/if_arp.h> | 25 | #include <linux/if_arp.h> |
26 | #include <net/x25.h> | 26 | #include <net/x25.h> |
27 | #include <net/x25device.h> | ||
27 | 28 | ||
28 | static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb) | 29 | static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb) |
29 | { | 30 | { |
@@ -115,19 +116,22 @@ int x25_lapb_receive_frame(struct sk_buff *skb, struct net_device *dev, | |||
115 | } | 116 | } |
116 | 117 | ||
117 | switch (skb->data[0]) { | 118 | switch (skb->data[0]) { |
118 | case 0x00: | 119 | |
119 | skb_pull(skb, 1); | 120 | case X25_IFACE_DATA: |
120 | if (x25_receive_data(skb, nb)) { | 121 | skb_pull(skb, 1); |
121 | x25_neigh_put(nb); | 122 | if (x25_receive_data(skb, nb)) { |
122 | goto out; | 123 | x25_neigh_put(nb); |
123 | } | 124 | goto out; |
124 | break; | 125 | } |
125 | case 0x01: | 126 | break; |
126 | x25_link_established(nb); | 127 | |
127 | break; | 128 | case X25_IFACE_CONNECT: |
128 | case 0x02: | 129 | x25_link_established(nb); |
129 | x25_link_terminated(nb); | 130 | break; |
130 | break; | 131 | |
132 | case X25_IFACE_DISCONNECT: | ||
133 | x25_link_terminated(nb); | ||
134 | break; | ||
131 | } | 135 | } |
132 | x25_neigh_put(nb); | 136 | x25_neigh_put(nb); |
133 | drop: | 137 | drop: |
@@ -148,7 +152,7 @@ void x25_establish_link(struct x25_neigh *nb) | |||
148 | return; | 152 | return; |
149 | } | 153 | } |
150 | ptr = skb_put(skb, 1); | 154 | ptr = skb_put(skb, 1); |
151 | *ptr = 0x01; | 155 | *ptr = X25_IFACE_CONNECT; |
152 | break; | 156 | break; |
153 | 157 | ||
154 | #if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE) | 158 | #if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE) |
@@ -184,7 +188,7 @@ void x25_terminate_link(struct x25_neigh *nb) | |||
184 | } | 188 | } |
185 | 189 | ||
186 | ptr = skb_put(skb, 1); | 190 | ptr = skb_put(skb, 1); |
187 | *ptr = 0x02; | 191 | *ptr = X25_IFACE_DISCONNECT; |
188 | 192 | ||
189 | skb->protocol = htons(ETH_P_X25); | 193 | skb->protocol = htons(ETH_P_X25); |
190 | skb->dev = nb->dev; | 194 | skb->dev = nb->dev; |
@@ -200,7 +204,7 @@ void x25_send_frame(struct sk_buff *skb, struct x25_neigh *nb) | |||
200 | switch (nb->dev->type) { | 204 | switch (nb->dev->type) { |
201 | case ARPHRD_X25: | 205 | case ARPHRD_X25: |
202 | dptr = skb_push(skb, 1); | 206 | dptr = skb_push(skb, 1); |
203 | *dptr = 0x00; | 207 | *dptr = X25_IFACE_DATA; |
204 | break; | 208 | break; |
205 | 209 | ||
206 | #if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE) | 210 | #if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE) |
diff --git a/net/xfrm/xfrm_hash.h b/net/xfrm/xfrm_hash.h index e5195c99f71e..1396572d2ade 100644 --- a/net/xfrm/xfrm_hash.h +++ b/net/xfrm/xfrm_hash.h | |||
@@ -16,7 +16,8 @@ static inline unsigned int __xfrm6_addr_hash(xfrm_address_t *addr) | |||
16 | 16 | ||
17 | static inline unsigned int __xfrm4_daddr_saddr_hash(xfrm_address_t *daddr, xfrm_address_t *saddr) | 17 | static inline unsigned int __xfrm4_daddr_saddr_hash(xfrm_address_t *daddr, xfrm_address_t *saddr) |
18 | { | 18 | { |
19 | return ntohl(daddr->a4 + saddr->a4); | 19 | u32 sum = (__force u32)daddr->a4 + (__force u32)saddr->a4; |
20 | return ntohl((__force __be32)sum); | ||
20 | } | 21 | } |
21 | 22 | ||
22 | static inline unsigned int __xfrm6_daddr_saddr_hash(xfrm_address_t *daddr, xfrm_address_t *saddr) | 23 | static inline unsigned int __xfrm6_daddr_saddr_hash(xfrm_address_t *daddr, xfrm_address_t *saddr) |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 843e066649cb..7430ac26ec49 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -37,6 +37,8 @@ | |||
37 | DEFINE_MUTEX(xfrm_cfg_mutex); | 37 | DEFINE_MUTEX(xfrm_cfg_mutex); |
38 | EXPORT_SYMBOL(xfrm_cfg_mutex); | 38 | EXPORT_SYMBOL(xfrm_cfg_mutex); |
39 | 39 | ||
40 | static DEFINE_SPINLOCK(xfrm_policy_sk_bundle_lock); | ||
41 | static struct dst_entry *xfrm_policy_sk_bundles; | ||
40 | static DEFINE_RWLOCK(xfrm_policy_lock); | 42 | static DEFINE_RWLOCK(xfrm_policy_lock); |
41 | 43 | ||
42 | static DEFINE_RWLOCK(xfrm_policy_afinfo_lock); | 44 | static DEFINE_RWLOCK(xfrm_policy_afinfo_lock); |
@@ -44,12 +46,10 @@ static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO]; | |||
44 | 46 | ||
45 | static struct kmem_cache *xfrm_dst_cache __read_mostly; | 47 | static struct kmem_cache *xfrm_dst_cache __read_mostly; |
46 | 48 | ||
47 | static HLIST_HEAD(xfrm_policy_gc_list); | ||
48 | static DEFINE_SPINLOCK(xfrm_policy_gc_lock); | ||
49 | |||
50 | static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family); | 49 | static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family); |
51 | static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo); | 50 | static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo); |
52 | static void xfrm_init_pmtu(struct dst_entry *dst); | 51 | static void xfrm_init_pmtu(struct dst_entry *dst); |
52 | static int stale_bundle(struct dst_entry *dst); | ||
53 | 53 | ||
54 | static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, | 54 | static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, |
55 | int dir); | 55 | int dir); |
@@ -156,7 +156,7 @@ static void xfrm_policy_timer(unsigned long data) | |||
156 | 156 | ||
157 | read_lock(&xp->lock); | 157 | read_lock(&xp->lock); |
158 | 158 | ||
159 | if (xp->walk.dead) | 159 | if (unlikely(xp->walk.dead)) |
160 | goto out; | 160 | goto out; |
161 | 161 | ||
162 | dir = xfrm_policy_id2dir(xp->index); | 162 | dir = xfrm_policy_id2dir(xp->index); |
@@ -216,6 +216,35 @@ expired: | |||
216 | xfrm_pol_put(xp); | 216 | xfrm_pol_put(xp); |
217 | } | 217 | } |
218 | 218 | ||
219 | static struct flow_cache_object *xfrm_policy_flo_get(struct flow_cache_object *flo) | ||
220 | { | ||
221 | struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo); | ||
222 | |||
223 | if (unlikely(pol->walk.dead)) | ||
224 | flo = NULL; | ||
225 | else | ||
226 | xfrm_pol_hold(pol); | ||
227 | |||
228 | return flo; | ||
229 | } | ||
230 | |||
231 | static int xfrm_policy_flo_check(struct flow_cache_object *flo) | ||
232 | { | ||
233 | struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo); | ||
234 | |||
235 | return !pol->walk.dead; | ||
236 | } | ||
237 | |||
238 | static void xfrm_policy_flo_delete(struct flow_cache_object *flo) | ||
239 | { | ||
240 | xfrm_pol_put(container_of(flo, struct xfrm_policy, flo)); | ||
241 | } | ||
242 | |||
243 | static const struct flow_cache_ops xfrm_policy_fc_ops = { | ||
244 | .get = xfrm_policy_flo_get, | ||
245 | .check = xfrm_policy_flo_check, | ||
246 | .delete = xfrm_policy_flo_delete, | ||
247 | }; | ||
219 | 248 | ||
220 | /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2 | 249 | /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2 |
221 | * SPD calls. | 250 | * SPD calls. |
@@ -236,6 +265,7 @@ struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp) | |||
236 | atomic_set(&policy->refcnt, 1); | 265 | atomic_set(&policy->refcnt, 1); |
237 | setup_timer(&policy->timer, xfrm_policy_timer, | 266 | setup_timer(&policy->timer, xfrm_policy_timer, |
238 | (unsigned long)policy); | 267 | (unsigned long)policy); |
268 | policy->flo.ops = &xfrm_policy_fc_ops; | ||
239 | } | 269 | } |
240 | return policy; | 270 | return policy; |
241 | } | 271 | } |
@@ -247,8 +277,6 @@ void xfrm_policy_destroy(struct xfrm_policy *policy) | |||
247 | { | 277 | { |
248 | BUG_ON(!policy->walk.dead); | 278 | BUG_ON(!policy->walk.dead); |
249 | 279 | ||
250 | BUG_ON(policy->bundles); | ||
251 | |||
252 | if (del_timer(&policy->timer)) | 280 | if (del_timer(&policy->timer)) |
253 | BUG(); | 281 | BUG(); |
254 | 282 | ||
@@ -257,63 +285,20 @@ void xfrm_policy_destroy(struct xfrm_policy *policy) | |||
257 | } | 285 | } |
258 | EXPORT_SYMBOL(xfrm_policy_destroy); | 286 | EXPORT_SYMBOL(xfrm_policy_destroy); |
259 | 287 | ||
260 | static void xfrm_policy_gc_kill(struct xfrm_policy *policy) | ||
261 | { | ||
262 | struct dst_entry *dst; | ||
263 | |||
264 | while ((dst = policy->bundles) != NULL) { | ||
265 | policy->bundles = dst->next; | ||
266 | dst_free(dst); | ||
267 | } | ||
268 | |||
269 | if (del_timer(&policy->timer)) | ||
270 | atomic_dec(&policy->refcnt); | ||
271 | |||
272 | if (atomic_read(&policy->refcnt) > 1) | ||
273 | flow_cache_flush(); | ||
274 | |||
275 | xfrm_pol_put(policy); | ||
276 | } | ||
277 | |||
278 | static void xfrm_policy_gc_task(struct work_struct *work) | ||
279 | { | ||
280 | struct xfrm_policy *policy; | ||
281 | struct hlist_node *entry, *tmp; | ||
282 | struct hlist_head gc_list; | ||
283 | |||
284 | spin_lock_bh(&xfrm_policy_gc_lock); | ||
285 | gc_list.first = xfrm_policy_gc_list.first; | ||
286 | INIT_HLIST_HEAD(&xfrm_policy_gc_list); | ||
287 | spin_unlock_bh(&xfrm_policy_gc_lock); | ||
288 | |||
289 | hlist_for_each_entry_safe(policy, entry, tmp, &gc_list, bydst) | ||
290 | xfrm_policy_gc_kill(policy); | ||
291 | } | ||
292 | static DECLARE_WORK(xfrm_policy_gc_work, xfrm_policy_gc_task); | ||
293 | |||
294 | /* Rule must be locked. Release descentant resources, announce | 288 | /* Rule must be locked. Release descentant resources, announce |
295 | * entry dead. The rule must be unlinked from lists to the moment. | 289 | * entry dead. The rule must be unlinked from lists to the moment. |
296 | */ | 290 | */ |
297 | 291 | ||
298 | static void xfrm_policy_kill(struct xfrm_policy *policy) | 292 | static void xfrm_policy_kill(struct xfrm_policy *policy) |
299 | { | 293 | { |
300 | int dead; | ||
301 | |||
302 | write_lock_bh(&policy->lock); | ||
303 | dead = policy->walk.dead; | ||
304 | policy->walk.dead = 1; | 294 | policy->walk.dead = 1; |
305 | write_unlock_bh(&policy->lock); | ||
306 | 295 | ||
307 | if (unlikely(dead)) { | 296 | atomic_inc(&policy->genid); |
308 | WARN_ON(1); | ||
309 | return; | ||
310 | } | ||
311 | 297 | ||
312 | spin_lock_bh(&xfrm_policy_gc_lock); | 298 | if (del_timer(&policy->timer)) |
313 | hlist_add_head(&policy->bydst, &xfrm_policy_gc_list); | 299 | xfrm_pol_put(policy); |
314 | spin_unlock_bh(&xfrm_policy_gc_lock); | ||
315 | 300 | ||
316 | schedule_work(&xfrm_policy_gc_work); | 301 | xfrm_pol_put(policy); |
317 | } | 302 | } |
318 | 303 | ||
319 | static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024; | 304 | static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024; |
@@ -555,7 +540,6 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) | |||
555 | struct xfrm_policy *delpol; | 540 | struct xfrm_policy *delpol; |
556 | struct hlist_head *chain; | 541 | struct hlist_head *chain; |
557 | struct hlist_node *entry, *newpos; | 542 | struct hlist_node *entry, *newpos; |
558 | struct dst_entry *gc_list; | ||
559 | u32 mark = policy->mark.v & policy->mark.m; | 543 | u32 mark = policy->mark.v & policy->mark.m; |
560 | 544 | ||
561 | write_lock_bh(&xfrm_policy_lock); | 545 | write_lock_bh(&xfrm_policy_lock); |
@@ -605,34 +589,6 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) | |||
605 | else if (xfrm_bydst_should_resize(net, dir, NULL)) | 589 | else if (xfrm_bydst_should_resize(net, dir, NULL)) |
606 | schedule_work(&net->xfrm.policy_hash_work); | 590 | schedule_work(&net->xfrm.policy_hash_work); |
607 | 591 | ||
608 | read_lock_bh(&xfrm_policy_lock); | ||
609 | gc_list = NULL; | ||
610 | entry = &policy->bydst; | ||
611 | hlist_for_each_entry_continue(policy, entry, bydst) { | ||
612 | struct dst_entry *dst; | ||
613 | |||
614 | write_lock(&policy->lock); | ||
615 | dst = policy->bundles; | ||
616 | if (dst) { | ||
617 | struct dst_entry *tail = dst; | ||
618 | while (tail->next) | ||
619 | tail = tail->next; | ||
620 | tail->next = gc_list; | ||
621 | gc_list = dst; | ||
622 | |||
623 | policy->bundles = NULL; | ||
624 | } | ||
625 | write_unlock(&policy->lock); | ||
626 | } | ||
627 | read_unlock_bh(&xfrm_policy_lock); | ||
628 | |||
629 | while (gc_list) { | ||
630 | struct dst_entry *dst = gc_list; | ||
631 | |||
632 | gc_list = dst->next; | ||
633 | dst_free(dst); | ||
634 | } | ||
635 | |||
636 | return 0; | 592 | return 0; |
637 | } | 593 | } |
638 | EXPORT_SYMBOL(xfrm_policy_insert); | 594 | EXPORT_SYMBOL(xfrm_policy_insert); |
@@ -671,10 +627,8 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type, | |||
671 | } | 627 | } |
672 | write_unlock_bh(&xfrm_policy_lock); | 628 | write_unlock_bh(&xfrm_policy_lock); |
673 | 629 | ||
674 | if (ret && delete) { | 630 | if (ret && delete) |
675 | atomic_inc(&flow_cache_genid); | ||
676 | xfrm_policy_kill(ret); | 631 | xfrm_policy_kill(ret); |
677 | } | ||
678 | return ret; | 632 | return ret; |
679 | } | 633 | } |
680 | EXPORT_SYMBOL(xfrm_policy_bysel_ctx); | 634 | EXPORT_SYMBOL(xfrm_policy_bysel_ctx); |
@@ -713,10 +667,8 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type, | |||
713 | } | 667 | } |
714 | write_unlock_bh(&xfrm_policy_lock); | 668 | write_unlock_bh(&xfrm_policy_lock); |
715 | 669 | ||
716 | if (ret && delete) { | 670 | if (ret && delete) |
717 | atomic_inc(&flow_cache_genid); | ||
718 | xfrm_policy_kill(ret); | 671 | xfrm_policy_kill(ret); |
719 | } | ||
720 | return ret; | 672 | return ret; |
721 | } | 673 | } |
722 | EXPORT_SYMBOL(xfrm_policy_byid); | 674 | EXPORT_SYMBOL(xfrm_policy_byid); |
@@ -776,7 +728,6 @@ xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audi | |||
776 | int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info) | 728 | int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info) |
777 | { | 729 | { |
778 | int dir, err = 0, cnt = 0; | 730 | int dir, err = 0, cnt = 0; |
779 | struct xfrm_policy *dp; | ||
780 | 731 | ||
781 | write_lock_bh(&xfrm_policy_lock); | 732 | write_lock_bh(&xfrm_policy_lock); |
782 | 733 | ||
@@ -794,10 +745,9 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info) | |||
794 | &net->xfrm.policy_inexact[dir], bydst) { | 745 | &net->xfrm.policy_inexact[dir], bydst) { |
795 | if (pol->type != type) | 746 | if (pol->type != type) |
796 | continue; | 747 | continue; |
797 | dp = __xfrm_policy_unlink(pol, dir); | 748 | __xfrm_policy_unlink(pol, dir); |
798 | write_unlock_bh(&xfrm_policy_lock); | 749 | write_unlock_bh(&xfrm_policy_lock); |
799 | if (dp) | 750 | cnt++; |
800 | cnt++; | ||
801 | 751 | ||
802 | xfrm_audit_policy_delete(pol, 1, audit_info->loginuid, | 752 | xfrm_audit_policy_delete(pol, 1, audit_info->loginuid, |
803 | audit_info->sessionid, | 753 | audit_info->sessionid, |
@@ -816,10 +766,9 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info) | |||
816 | bydst) { | 766 | bydst) { |
817 | if (pol->type != type) | 767 | if (pol->type != type) |
818 | continue; | 768 | continue; |
819 | dp = __xfrm_policy_unlink(pol, dir); | 769 | __xfrm_policy_unlink(pol, dir); |
820 | write_unlock_bh(&xfrm_policy_lock); | 770 | write_unlock_bh(&xfrm_policy_lock); |
821 | if (dp) | 771 | cnt++; |
822 | cnt++; | ||
823 | 772 | ||
824 | xfrm_audit_policy_delete(pol, 1, | 773 | xfrm_audit_policy_delete(pol, 1, |
825 | audit_info->loginuid, | 774 | audit_info->loginuid, |
@@ -835,7 +784,6 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info) | |||
835 | } | 784 | } |
836 | if (!cnt) | 785 | if (!cnt) |
837 | err = -ESRCH; | 786 | err = -ESRCH; |
838 | atomic_inc(&flow_cache_genid); | ||
839 | out: | 787 | out: |
840 | write_unlock_bh(&xfrm_policy_lock); | 788 | write_unlock_bh(&xfrm_policy_lock); |
841 | return err; | 789 | return err; |
@@ -989,32 +937,37 @@ fail: | |||
989 | return ret; | 937 | return ret; |
990 | } | 938 | } |
991 | 939 | ||
992 | static int xfrm_policy_lookup(struct net *net, struct flowi *fl, u16 family, | 940 | static struct xfrm_policy * |
993 | u8 dir, void **objp, atomic_t **obj_refp) | 941 | __xfrm_policy_lookup(struct net *net, struct flowi *fl, u16 family, u8 dir) |
994 | { | 942 | { |
943 | #ifdef CONFIG_XFRM_SUB_POLICY | ||
995 | struct xfrm_policy *pol; | 944 | struct xfrm_policy *pol; |
996 | int err = 0; | ||
997 | 945 | ||
998 | #ifdef CONFIG_XFRM_SUB_POLICY | ||
999 | pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir); | 946 | pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir); |
1000 | if (IS_ERR(pol)) { | 947 | if (pol != NULL) |
1001 | err = PTR_ERR(pol); | 948 | return pol; |
1002 | pol = NULL; | ||
1003 | } | ||
1004 | if (pol || err) | ||
1005 | goto end; | ||
1006 | #endif | ||
1007 | pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir); | ||
1008 | if (IS_ERR(pol)) { | ||
1009 | err = PTR_ERR(pol); | ||
1010 | pol = NULL; | ||
1011 | } | ||
1012 | #ifdef CONFIG_XFRM_SUB_POLICY | ||
1013 | end: | ||
1014 | #endif | 949 | #endif |
1015 | if ((*objp = (void *) pol) != NULL) | 950 | return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir); |
1016 | *obj_refp = &pol->refcnt; | 951 | } |
1017 | return err; | 952 | |
953 | static struct flow_cache_object * | ||
954 | xfrm_policy_lookup(struct net *net, struct flowi *fl, u16 family, | ||
955 | u8 dir, struct flow_cache_object *old_obj, void *ctx) | ||
956 | { | ||
957 | struct xfrm_policy *pol; | ||
958 | |||
959 | if (old_obj) | ||
960 | xfrm_pol_put(container_of(old_obj, struct xfrm_policy, flo)); | ||
961 | |||
962 | pol = __xfrm_policy_lookup(net, fl, family, dir); | ||
963 | if (IS_ERR_OR_NULL(pol)) | ||
964 | return ERR_CAST(pol); | ||
965 | |||
966 | /* Resolver returns two references: | ||
967 | * one for cache and one for caller of flow_cache_lookup() */ | ||
968 | xfrm_pol_hold(pol); | ||
969 | |||
970 | return &pol->flo; | ||
1018 | } | 971 | } |
1019 | 972 | ||
1020 | static inline int policy_to_flow_dir(int dir) | 973 | static inline int policy_to_flow_dir(int dir) |
@@ -1104,8 +1057,6 @@ int xfrm_policy_delete(struct xfrm_policy *pol, int dir) | |||
1104 | pol = __xfrm_policy_unlink(pol, dir); | 1057 | pol = __xfrm_policy_unlink(pol, dir); |
1105 | write_unlock_bh(&xfrm_policy_lock); | 1058 | write_unlock_bh(&xfrm_policy_lock); |
1106 | if (pol) { | 1059 | if (pol) { |
1107 | if (dir < XFRM_POLICY_MAX) | ||
1108 | atomic_inc(&flow_cache_genid); | ||
1109 | xfrm_policy_kill(pol); | 1060 | xfrm_policy_kill(pol); |
1110 | return 0; | 1061 | return 0; |
1111 | } | 1062 | } |
@@ -1132,6 +1083,9 @@ int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol) | |||
1132 | __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir); | 1083 | __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir); |
1133 | } | 1084 | } |
1134 | if (old_pol) | 1085 | if (old_pol) |
1086 | /* Unlinking succeeds always. This is the only function | ||
1087 | * allowed to delete or replace socket policy. | ||
1088 | */ | ||
1135 | __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir); | 1089 | __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir); |
1136 | write_unlock_bh(&xfrm_policy_lock); | 1090 | write_unlock_bh(&xfrm_policy_lock); |
1137 | 1091 | ||
@@ -1300,18 +1254,6 @@ xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, struct flowi *fl, | |||
1300 | * still valid. | 1254 | * still valid. |
1301 | */ | 1255 | */ |
1302 | 1256 | ||
1303 | static struct dst_entry * | ||
1304 | xfrm_find_bundle(struct flowi *fl, struct xfrm_policy *policy, unsigned short family) | ||
1305 | { | ||
1306 | struct dst_entry *x; | ||
1307 | struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); | ||
1308 | if (unlikely(afinfo == NULL)) | ||
1309 | return ERR_PTR(-EINVAL); | ||
1310 | x = afinfo->find_bundle(fl, policy); | ||
1311 | xfrm_policy_put_afinfo(afinfo); | ||
1312 | return x; | ||
1313 | } | ||
1314 | |||
1315 | static inline int xfrm_get_tos(struct flowi *fl, int family) | 1257 | static inline int xfrm_get_tos(struct flowi *fl, int family) |
1316 | { | 1258 | { |
1317 | struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); | 1259 | struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); |
@@ -1327,6 +1269,54 @@ static inline int xfrm_get_tos(struct flowi *fl, int family) | |||
1327 | return tos; | 1269 | return tos; |
1328 | } | 1270 | } |
1329 | 1271 | ||
1272 | static struct flow_cache_object *xfrm_bundle_flo_get(struct flow_cache_object *flo) | ||
1273 | { | ||
1274 | struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo); | ||
1275 | struct dst_entry *dst = &xdst->u.dst; | ||
1276 | |||
1277 | if (xdst->route == NULL) { | ||
1278 | /* Dummy bundle - if it has xfrms we were not | ||
1279 | * able to build bundle as template resolution failed. | ||
1280 | * It means we need to try again resolving. */ | ||
1281 | if (xdst->num_xfrms > 0) | ||
1282 | return NULL; | ||
1283 | } else { | ||
1284 | /* Real bundle */ | ||
1285 | if (stale_bundle(dst)) | ||
1286 | return NULL; | ||
1287 | } | ||
1288 | |||
1289 | dst_hold(dst); | ||
1290 | return flo; | ||
1291 | } | ||
1292 | |||
1293 | static int xfrm_bundle_flo_check(struct flow_cache_object *flo) | ||
1294 | { | ||
1295 | struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo); | ||
1296 | struct dst_entry *dst = &xdst->u.dst; | ||
1297 | |||
1298 | if (!xdst->route) | ||
1299 | return 0; | ||
1300 | if (stale_bundle(dst)) | ||
1301 | return 0; | ||
1302 | |||
1303 | return 1; | ||
1304 | } | ||
1305 | |||
1306 | static void xfrm_bundle_flo_delete(struct flow_cache_object *flo) | ||
1307 | { | ||
1308 | struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo); | ||
1309 | struct dst_entry *dst = &xdst->u.dst; | ||
1310 | |||
1311 | dst_free(dst); | ||
1312 | } | ||
1313 | |||
1314 | static const struct flow_cache_ops xfrm_bundle_fc_ops = { | ||
1315 | .get = xfrm_bundle_flo_get, | ||
1316 | .check = xfrm_bundle_flo_check, | ||
1317 | .delete = xfrm_bundle_flo_delete, | ||
1318 | }; | ||
1319 | |||
1330 | static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family) | 1320 | static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family) |
1331 | { | 1321 | { |
1332 | struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); | 1322 | struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); |
@@ -1349,9 +1339,10 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family) | |||
1349 | BUG(); | 1339 | BUG(); |
1350 | } | 1340 | } |
1351 | xdst = dst_alloc(dst_ops) ?: ERR_PTR(-ENOBUFS); | 1341 | xdst = dst_alloc(dst_ops) ?: ERR_PTR(-ENOBUFS); |
1352 | |||
1353 | xfrm_policy_put_afinfo(afinfo); | 1342 | xfrm_policy_put_afinfo(afinfo); |
1354 | 1343 | ||
1344 | xdst->flo.ops = &xfrm_bundle_fc_ops; | ||
1345 | |||
1355 | return xdst; | 1346 | return xdst; |
1356 | } | 1347 | } |
1357 | 1348 | ||
@@ -1389,6 +1380,7 @@ static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, | |||
1389 | return err; | 1380 | return err; |
1390 | } | 1381 | } |
1391 | 1382 | ||
1383 | |||
1392 | /* Allocate chain of dst_entry's, attach known xfrm's, calculate | 1384 | /* Allocate chain of dst_entry's, attach known xfrm's, calculate |
1393 | * all the metrics... Shortly, bundle a bundle. | 1385 | * all the metrics... Shortly, bundle a bundle. |
1394 | */ | 1386 | */ |
@@ -1452,7 +1444,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy, | |||
1452 | dst_hold(dst); | 1444 | dst_hold(dst); |
1453 | 1445 | ||
1454 | dst1->xfrm = xfrm[i]; | 1446 | dst1->xfrm = xfrm[i]; |
1455 | xdst->genid = xfrm[i]->genid; | 1447 | xdst->xfrm_genid = xfrm[i]->genid; |
1456 | 1448 | ||
1457 | dst1->obsolete = -1; | 1449 | dst1->obsolete = -1; |
1458 | dst1->flags |= DST_HOST; | 1450 | dst1->flags |= DST_HOST; |
@@ -1545,7 +1537,186 @@ xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl) | |||
1545 | #endif | 1537 | #endif |
1546 | } | 1538 | } |
1547 | 1539 | ||
1548 | static int stale_bundle(struct dst_entry *dst); | 1540 | static int xfrm_expand_policies(struct flowi *fl, u16 family, |
1541 | struct xfrm_policy **pols, | ||
1542 | int *num_pols, int *num_xfrms) | ||
1543 | { | ||
1544 | int i; | ||
1545 | |||
1546 | if (*num_pols == 0 || !pols[0]) { | ||
1547 | *num_pols = 0; | ||
1548 | *num_xfrms = 0; | ||
1549 | return 0; | ||
1550 | } | ||
1551 | if (IS_ERR(pols[0])) | ||
1552 | return PTR_ERR(pols[0]); | ||
1553 | |||
1554 | *num_xfrms = pols[0]->xfrm_nr; | ||
1555 | |||
1556 | #ifdef CONFIG_XFRM_SUB_POLICY | ||
1557 | if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW && | ||
1558 | pols[0]->type != XFRM_POLICY_TYPE_MAIN) { | ||
1559 | pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]), | ||
1560 | XFRM_POLICY_TYPE_MAIN, | ||
1561 | fl, family, | ||
1562 | XFRM_POLICY_OUT); | ||
1563 | if (pols[1]) { | ||
1564 | if (IS_ERR(pols[1])) { | ||
1565 | xfrm_pols_put(pols, *num_pols); | ||
1566 | return PTR_ERR(pols[1]); | ||
1567 | } | ||
1568 | (*num_pols) ++; | ||
1569 | (*num_xfrms) += pols[1]->xfrm_nr; | ||
1570 | } | ||
1571 | } | ||
1572 | #endif | ||
1573 | for (i = 0; i < *num_pols; i++) { | ||
1574 | if (pols[i]->action != XFRM_POLICY_ALLOW) { | ||
1575 | *num_xfrms = -1; | ||
1576 | break; | ||
1577 | } | ||
1578 | } | ||
1579 | |||
1580 | return 0; | ||
1581 | |||
1582 | } | ||
1583 | |||
1584 | static struct xfrm_dst * | ||
1585 | xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols, | ||
1586 | struct flowi *fl, u16 family, | ||
1587 | struct dst_entry *dst_orig) | ||
1588 | { | ||
1589 | struct net *net = xp_net(pols[0]); | ||
1590 | struct xfrm_state *xfrm[XFRM_MAX_DEPTH]; | ||
1591 | struct dst_entry *dst; | ||
1592 | struct xfrm_dst *xdst; | ||
1593 | int err; | ||
1594 | |||
1595 | /* Try to instantiate a bundle */ | ||
1596 | err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family); | ||
1597 | if (err < 0) { | ||
1598 | if (err != -EAGAIN) | ||
1599 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); | ||
1600 | return ERR_PTR(err); | ||
1601 | } | ||
1602 | |||
1603 | dst = xfrm_bundle_create(pols[0], xfrm, err, fl, dst_orig); | ||
1604 | if (IS_ERR(dst)) { | ||
1605 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR); | ||
1606 | return ERR_CAST(dst); | ||
1607 | } | ||
1608 | |||
1609 | xdst = (struct xfrm_dst *)dst; | ||
1610 | xdst->num_xfrms = err; | ||
1611 | if (num_pols > 1) | ||
1612 | err = xfrm_dst_update_parent(dst, &pols[1]->selector); | ||
1613 | else | ||
1614 | err = xfrm_dst_update_origin(dst, fl); | ||
1615 | if (unlikely(err)) { | ||
1616 | dst_free(dst); | ||
1617 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR); | ||
1618 | return ERR_PTR(err); | ||
1619 | } | ||
1620 | |||
1621 | xdst->num_pols = num_pols; | ||
1622 | memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols); | ||
1623 | xdst->policy_genid = atomic_read(&pols[0]->genid); | ||
1624 | |||
1625 | return xdst; | ||
1626 | } | ||
1627 | |||
1628 | static struct flow_cache_object * | ||
1629 | xfrm_bundle_lookup(struct net *net, struct flowi *fl, u16 family, u8 dir, | ||
1630 | struct flow_cache_object *oldflo, void *ctx) | ||
1631 | { | ||
1632 | struct dst_entry *dst_orig = (struct dst_entry *)ctx; | ||
1633 | struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; | ||
1634 | struct xfrm_dst *xdst, *new_xdst; | ||
1635 | int num_pols = 0, num_xfrms = 0, i, err, pol_dead; | ||
1636 | |||
1637 | /* Check if the policies from old bundle are usable */ | ||
1638 | xdst = NULL; | ||
1639 | if (oldflo) { | ||
1640 | xdst = container_of(oldflo, struct xfrm_dst, flo); | ||
1641 | num_pols = xdst->num_pols; | ||
1642 | num_xfrms = xdst->num_xfrms; | ||
1643 | pol_dead = 0; | ||
1644 | for (i = 0; i < num_pols; i++) { | ||
1645 | pols[i] = xdst->pols[i]; | ||
1646 | pol_dead |= pols[i]->walk.dead; | ||
1647 | } | ||
1648 | if (pol_dead) { | ||
1649 | dst_free(&xdst->u.dst); | ||
1650 | xdst = NULL; | ||
1651 | num_pols = 0; | ||
1652 | num_xfrms = 0; | ||
1653 | oldflo = NULL; | ||
1654 | } | ||
1655 | } | ||
1656 | |||
1657 | /* Resolve policies to use if we couldn't get them from | ||
1658 | * previous cache entry */ | ||
1659 | if (xdst == NULL) { | ||
1660 | num_pols = 1; | ||
1661 | pols[0] = __xfrm_policy_lookup(net, fl, family, dir); | ||
1662 | err = xfrm_expand_policies(fl, family, pols, | ||
1663 | &num_pols, &num_xfrms); | ||
1664 | if (err < 0) | ||
1665 | goto inc_error; | ||
1666 | if (num_pols == 0) | ||
1667 | return NULL; | ||
1668 | if (num_xfrms <= 0) | ||
1669 | goto make_dummy_bundle; | ||
1670 | } | ||
1671 | |||
1672 | new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, dst_orig); | ||
1673 | if (IS_ERR(new_xdst)) { | ||
1674 | err = PTR_ERR(new_xdst); | ||
1675 | if (err != -EAGAIN) | ||
1676 | goto error; | ||
1677 | if (oldflo == NULL) | ||
1678 | goto make_dummy_bundle; | ||
1679 | dst_hold(&xdst->u.dst); | ||
1680 | return oldflo; | ||
1681 | } | ||
1682 | |||
1683 | /* Kill the previous bundle */ | ||
1684 | if (xdst) { | ||
1685 | /* The policies were stolen for newly generated bundle */ | ||
1686 | xdst->num_pols = 0; | ||
1687 | dst_free(&xdst->u.dst); | ||
1688 | } | ||
1689 | |||
1690 | /* Flow cache does not have reference, it dst_free()'s, | ||
1691 | * but we do need to return one reference for original caller */ | ||
1692 | dst_hold(&new_xdst->u.dst); | ||
1693 | return &new_xdst->flo; | ||
1694 | |||
1695 | make_dummy_bundle: | ||
1696 | /* We found policies, but there's no bundles to instantiate: | ||
1697 | * either because the policy blocks, has no transformations or | ||
1698 | * we could not build template (no xfrm_states).*/ | ||
1699 | xdst = xfrm_alloc_dst(net, family); | ||
1700 | if (IS_ERR(xdst)) { | ||
1701 | xfrm_pols_put(pols, num_pols); | ||
1702 | return ERR_CAST(xdst); | ||
1703 | } | ||
1704 | xdst->num_pols = num_pols; | ||
1705 | xdst->num_xfrms = num_xfrms; | ||
1706 | memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols); | ||
1707 | |||
1708 | dst_hold(&xdst->u.dst); | ||
1709 | return &xdst->flo; | ||
1710 | |||
1711 | inc_error: | ||
1712 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); | ||
1713 | error: | ||
1714 | if (xdst != NULL) | ||
1715 | dst_free(&xdst->u.dst); | ||
1716 | else | ||
1717 | xfrm_pols_put(pols, num_pols); | ||
1718 | return ERR_PTR(err); | ||
1719 | } | ||
1549 | 1720 | ||
1550 | /* Main function: finds/creates a bundle for given flow. | 1721 | /* Main function: finds/creates a bundle for given flow. |
1551 | * | 1722 | * |
@@ -1555,245 +1726,152 @@ static int stale_bundle(struct dst_entry *dst); | |||
1555 | int __xfrm_lookup(struct net *net, struct dst_entry **dst_p, struct flowi *fl, | 1726 | int __xfrm_lookup(struct net *net, struct dst_entry **dst_p, struct flowi *fl, |
1556 | struct sock *sk, int flags) | 1727 | struct sock *sk, int flags) |
1557 | { | 1728 | { |
1558 | struct xfrm_policy *policy; | ||
1559 | struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; | 1729 | struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; |
1560 | int npols; | 1730 | struct flow_cache_object *flo; |
1561 | int pol_dead; | 1731 | struct xfrm_dst *xdst; |
1562 | int xfrm_nr; | 1732 | struct dst_entry *dst, *dst_orig = *dst_p, *route; |
1563 | int pi; | 1733 | u16 family = dst_orig->ops->family; |
1564 | struct xfrm_state *xfrm[XFRM_MAX_DEPTH]; | ||
1565 | struct dst_entry *dst, *dst_orig = *dst_p; | ||
1566 | int nx = 0; | ||
1567 | int err; | ||
1568 | u32 genid; | ||
1569 | u16 family; | ||
1570 | u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT); | 1734 | u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT); |
1735 | int i, err, num_pols, num_xfrms, drop_pols = 0; | ||
1571 | 1736 | ||
1572 | restart: | 1737 | restart: |
1573 | genid = atomic_read(&flow_cache_genid); | 1738 | dst = NULL; |
1574 | policy = NULL; | 1739 | xdst = NULL; |
1575 | for (pi = 0; pi < ARRAY_SIZE(pols); pi++) | 1740 | route = NULL; |
1576 | pols[pi] = NULL; | ||
1577 | npols = 0; | ||
1578 | pol_dead = 0; | ||
1579 | xfrm_nr = 0; | ||
1580 | 1741 | ||
1581 | if (sk && sk->sk_policy[XFRM_POLICY_OUT]) { | 1742 | if (sk && sk->sk_policy[XFRM_POLICY_OUT]) { |
1582 | policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl); | 1743 | num_pols = 1; |
1583 | err = PTR_ERR(policy); | 1744 | pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl); |
1584 | if (IS_ERR(policy)) { | 1745 | err = xfrm_expand_policies(fl, family, pols, |
1585 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); | 1746 | &num_pols, &num_xfrms); |
1747 | if (err < 0) | ||
1586 | goto dropdst; | 1748 | goto dropdst; |
1749 | |||
1750 | if (num_pols) { | ||
1751 | if (num_xfrms <= 0) { | ||
1752 | drop_pols = num_pols; | ||
1753 | goto no_transform; | ||
1754 | } | ||
1755 | |||
1756 | xdst = xfrm_resolve_and_create_bundle( | ||
1757 | pols, num_pols, fl, | ||
1758 | family, dst_orig); | ||
1759 | if (IS_ERR(xdst)) { | ||
1760 | xfrm_pols_put(pols, num_pols); | ||
1761 | err = PTR_ERR(xdst); | ||
1762 | goto dropdst; | ||
1763 | } | ||
1764 | |||
1765 | spin_lock_bh(&xfrm_policy_sk_bundle_lock); | ||
1766 | xdst->u.dst.next = xfrm_policy_sk_bundles; | ||
1767 | xfrm_policy_sk_bundles = &xdst->u.dst; | ||
1768 | spin_unlock_bh(&xfrm_policy_sk_bundle_lock); | ||
1769 | |||
1770 | route = xdst->route; | ||
1587 | } | 1771 | } |
1588 | } | 1772 | } |
1589 | 1773 | ||
1590 | if (!policy) { | 1774 | if (xdst == NULL) { |
1591 | /* To accelerate a bit... */ | 1775 | /* To accelerate a bit... */ |
1592 | if ((dst_orig->flags & DST_NOXFRM) || | 1776 | if ((dst_orig->flags & DST_NOXFRM) || |
1593 | !net->xfrm.policy_count[XFRM_POLICY_OUT]) | 1777 | !net->xfrm.policy_count[XFRM_POLICY_OUT]) |
1594 | goto nopol; | 1778 | goto nopol; |
1595 | 1779 | ||
1596 | policy = flow_cache_lookup(net, fl, dst_orig->ops->family, | 1780 | flo = flow_cache_lookup(net, fl, family, dir, |
1597 | dir, xfrm_policy_lookup); | 1781 | xfrm_bundle_lookup, dst_orig); |
1598 | err = PTR_ERR(policy); | 1782 | if (flo == NULL) |
1599 | if (IS_ERR(policy)) { | 1783 | goto nopol; |
1600 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); | 1784 | if (IS_ERR(flo)) { |
1785 | err = PTR_ERR(flo); | ||
1601 | goto dropdst; | 1786 | goto dropdst; |
1602 | } | 1787 | } |
1788 | xdst = container_of(flo, struct xfrm_dst, flo); | ||
1789 | |||
1790 | num_pols = xdst->num_pols; | ||
1791 | num_xfrms = xdst->num_xfrms; | ||
1792 | memcpy(pols, xdst->pols, sizeof(struct xfrm_policy*) * num_pols); | ||
1793 | route = xdst->route; | ||
1794 | } | ||
1795 | |||
1796 | dst = &xdst->u.dst; | ||
1797 | if (route == NULL && num_xfrms > 0) { | ||
1798 | /* The only case when xfrm_bundle_lookup() returns a | ||
1799 | * bundle with null route, is when the template could | ||
1800 | * not be resolved. It means policies are there, but | ||
1801 | * bundle could not be created, since we don't yet | ||
1802 | * have the xfrm_state's. We need to wait for KM to | ||
1803 | * negotiate new SA's or bail out with error.*/ | ||
1804 | if (net->xfrm.sysctl_larval_drop) { | ||
1805 | /* EREMOTE tells the caller to generate | ||
1806 | * a one-shot blackhole route. */ | ||
1807 | dst_release(dst); | ||
1808 | xfrm_pols_put(pols, num_pols); | ||
1809 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); | ||
1810 | return -EREMOTE; | ||
1811 | } | ||
1812 | if (flags & XFRM_LOOKUP_WAIT) { | ||
1813 | DECLARE_WAITQUEUE(wait, current); | ||
1814 | |||
1815 | add_wait_queue(&net->xfrm.km_waitq, &wait); | ||
1816 | set_current_state(TASK_INTERRUPTIBLE); | ||
1817 | schedule(); | ||
1818 | set_current_state(TASK_RUNNING); | ||
1819 | remove_wait_queue(&net->xfrm.km_waitq, &wait); | ||
1820 | |||
1821 | if (!signal_pending(current)) { | ||
1822 | dst_release(dst); | ||
1823 | goto restart; | ||
1824 | } | ||
1825 | |||
1826 | err = -ERESTART; | ||
1827 | } else | ||
1828 | err = -EAGAIN; | ||
1829 | |||
1830 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); | ||
1831 | goto error; | ||
1603 | } | 1832 | } |
1604 | 1833 | ||
1605 | if (!policy) | 1834 | no_transform: |
1835 | if (num_pols == 0) | ||
1606 | goto nopol; | 1836 | goto nopol; |
1607 | 1837 | ||
1608 | family = dst_orig->ops->family; | 1838 | if ((flags & XFRM_LOOKUP_ICMP) && |
1609 | pols[0] = policy; | 1839 | !(pols[0]->flags & XFRM_POLICY_ICMP)) { |
1610 | npols ++; | 1840 | err = -ENOENT; |
1611 | xfrm_nr += pols[0]->xfrm_nr; | ||
1612 | |||
1613 | err = -ENOENT; | ||
1614 | if ((flags & XFRM_LOOKUP_ICMP) && !(policy->flags & XFRM_POLICY_ICMP)) | ||
1615 | goto error; | 1841 | goto error; |
1842 | } | ||
1616 | 1843 | ||
1617 | policy->curlft.use_time = get_seconds(); | 1844 | for (i = 0; i < num_pols; i++) |
1845 | pols[i]->curlft.use_time = get_seconds(); | ||
1618 | 1846 | ||
1619 | switch (policy->action) { | 1847 | if (num_xfrms < 0) { |
1620 | default: | ||
1621 | case XFRM_POLICY_BLOCK: | ||
1622 | /* Prohibit the flow */ | 1848 | /* Prohibit the flow */ |
1623 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK); | 1849 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK); |
1624 | err = -EPERM; | 1850 | err = -EPERM; |
1625 | goto error; | 1851 | goto error; |
1626 | 1852 | } else if (num_xfrms > 0) { | |
1627 | case XFRM_POLICY_ALLOW: | 1853 | /* Flow transformed */ |
1628 | #ifndef CONFIG_XFRM_SUB_POLICY | 1854 | *dst_p = dst; |
1629 | if (policy->xfrm_nr == 0) { | 1855 | dst_release(dst_orig); |
1630 | /* Flow passes not transformed. */ | 1856 | } else { |
1631 | xfrm_pol_put(policy); | 1857 | /* Flow passes untransformed */ |
1632 | return 0; | 1858 | dst_release(dst); |
1633 | } | ||
1634 | #endif | ||
1635 | |||
1636 | /* Try to find matching bundle. | ||
1637 | * | ||
1638 | * LATER: help from flow cache. It is optional, this | ||
1639 | * is required only for output policy. | ||
1640 | */ | ||
1641 | dst = xfrm_find_bundle(fl, policy, family); | ||
1642 | if (IS_ERR(dst)) { | ||
1643 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR); | ||
1644 | err = PTR_ERR(dst); | ||
1645 | goto error; | ||
1646 | } | ||
1647 | |||
1648 | if (dst) | ||
1649 | break; | ||
1650 | |||
1651 | #ifdef CONFIG_XFRM_SUB_POLICY | ||
1652 | if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) { | ||
1653 | pols[1] = xfrm_policy_lookup_bytype(net, | ||
1654 | XFRM_POLICY_TYPE_MAIN, | ||
1655 | fl, family, | ||
1656 | XFRM_POLICY_OUT); | ||
1657 | if (pols[1]) { | ||
1658 | if (IS_ERR(pols[1])) { | ||
1659 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); | ||
1660 | err = PTR_ERR(pols[1]); | ||
1661 | goto error; | ||
1662 | } | ||
1663 | if (pols[1]->action == XFRM_POLICY_BLOCK) { | ||
1664 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK); | ||
1665 | err = -EPERM; | ||
1666 | goto error; | ||
1667 | } | ||
1668 | npols ++; | ||
1669 | xfrm_nr += pols[1]->xfrm_nr; | ||
1670 | } | ||
1671 | } | ||
1672 | |||
1673 | /* | ||
1674 | * Because neither flowi nor bundle information knows about | ||
1675 | * transformation template size. On more than one policy usage | ||
1676 | * we can realize whether all of them is bypass or not after | ||
1677 | * they are searched. See above not-transformed bypass | ||
1678 | * is surrounded by non-sub policy configuration, too. | ||
1679 | */ | ||
1680 | if (xfrm_nr == 0) { | ||
1681 | /* Flow passes not transformed. */ | ||
1682 | xfrm_pols_put(pols, npols); | ||
1683 | return 0; | ||
1684 | } | ||
1685 | |||
1686 | #endif | ||
1687 | nx = xfrm_tmpl_resolve(pols, npols, fl, xfrm, family); | ||
1688 | |||
1689 | if (unlikely(nx<0)) { | ||
1690 | err = nx; | ||
1691 | if (err == -EAGAIN && net->xfrm.sysctl_larval_drop) { | ||
1692 | /* EREMOTE tells the caller to generate | ||
1693 | * a one-shot blackhole route. | ||
1694 | */ | ||
1695 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); | ||
1696 | xfrm_pol_put(policy); | ||
1697 | return -EREMOTE; | ||
1698 | } | ||
1699 | if (err == -EAGAIN && (flags & XFRM_LOOKUP_WAIT)) { | ||
1700 | DECLARE_WAITQUEUE(wait, current); | ||
1701 | |||
1702 | add_wait_queue(&net->xfrm.km_waitq, &wait); | ||
1703 | set_current_state(TASK_INTERRUPTIBLE); | ||
1704 | schedule(); | ||
1705 | set_current_state(TASK_RUNNING); | ||
1706 | remove_wait_queue(&net->xfrm.km_waitq, &wait); | ||
1707 | |||
1708 | nx = xfrm_tmpl_resolve(pols, npols, fl, xfrm, family); | ||
1709 | |||
1710 | if (nx == -EAGAIN && signal_pending(current)) { | ||
1711 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); | ||
1712 | err = -ERESTART; | ||
1713 | goto error; | ||
1714 | } | ||
1715 | if (nx == -EAGAIN || | ||
1716 | genid != atomic_read(&flow_cache_genid)) { | ||
1717 | xfrm_pols_put(pols, npols); | ||
1718 | goto restart; | ||
1719 | } | ||
1720 | err = nx; | ||
1721 | } | ||
1722 | if (err < 0) { | ||
1723 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); | ||
1724 | goto error; | ||
1725 | } | ||
1726 | } | ||
1727 | if (nx == 0) { | ||
1728 | /* Flow passes not transformed. */ | ||
1729 | xfrm_pols_put(pols, npols); | ||
1730 | return 0; | ||
1731 | } | ||
1732 | |||
1733 | dst = xfrm_bundle_create(policy, xfrm, nx, fl, dst_orig); | ||
1734 | err = PTR_ERR(dst); | ||
1735 | if (IS_ERR(dst)) { | ||
1736 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR); | ||
1737 | goto error; | ||
1738 | } | ||
1739 | |||
1740 | for (pi = 0; pi < npols; pi++) { | ||
1741 | read_lock_bh(&pols[pi]->lock); | ||
1742 | pol_dead |= pols[pi]->walk.dead; | ||
1743 | read_unlock_bh(&pols[pi]->lock); | ||
1744 | } | ||
1745 | |||
1746 | write_lock_bh(&policy->lock); | ||
1747 | if (unlikely(pol_dead || stale_bundle(dst))) { | ||
1748 | /* Wow! While we worked on resolving, this | ||
1749 | * policy has gone. Retry. It is not paranoia, | ||
1750 | * we just cannot enlist new bundle to dead object. | ||
1751 | * We can't enlist stable bundles either. | ||
1752 | */ | ||
1753 | write_unlock_bh(&policy->lock); | ||
1754 | dst_free(dst); | ||
1755 | |||
1756 | if (pol_dead) | ||
1757 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLDEAD); | ||
1758 | else | ||
1759 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR); | ||
1760 | err = -EHOSTUNREACH; | ||
1761 | goto error; | ||
1762 | } | ||
1763 | |||
1764 | if (npols > 1) | ||
1765 | err = xfrm_dst_update_parent(dst, &pols[1]->selector); | ||
1766 | else | ||
1767 | err = xfrm_dst_update_origin(dst, fl); | ||
1768 | if (unlikely(err)) { | ||
1769 | write_unlock_bh(&policy->lock); | ||
1770 | dst_free(dst); | ||
1771 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR); | ||
1772 | goto error; | ||
1773 | } | ||
1774 | |||
1775 | dst->next = policy->bundles; | ||
1776 | policy->bundles = dst; | ||
1777 | dst_hold(dst); | ||
1778 | write_unlock_bh(&policy->lock); | ||
1779 | } | 1859 | } |
1780 | *dst_p = dst; | 1860 | ok: |
1781 | dst_release(dst_orig); | 1861 | xfrm_pols_put(pols, drop_pols); |
1782 | xfrm_pols_put(pols, npols); | ||
1783 | return 0; | 1862 | return 0; |
1784 | 1863 | ||
1864 | nopol: | ||
1865 | if (!(flags & XFRM_LOOKUP_ICMP)) | ||
1866 | goto ok; | ||
1867 | err = -ENOENT; | ||
1785 | error: | 1868 | error: |
1786 | xfrm_pols_put(pols, npols); | 1869 | dst_release(dst); |
1787 | dropdst: | 1870 | dropdst: |
1788 | dst_release(dst_orig); | 1871 | dst_release(dst_orig); |
1789 | *dst_p = NULL; | 1872 | *dst_p = NULL; |
1873 | xfrm_pols_put(pols, drop_pols); | ||
1790 | return err; | 1874 | return err; |
1791 | |||
1792 | nopol: | ||
1793 | err = -ENOENT; | ||
1794 | if (flags & XFRM_LOOKUP_ICMP) | ||
1795 | goto dropdst; | ||
1796 | return 0; | ||
1797 | } | 1875 | } |
1798 | EXPORT_SYMBOL(__xfrm_lookup); | 1876 | EXPORT_SYMBOL(__xfrm_lookup); |
1799 | 1877 | ||
@@ -1952,9 +2030,16 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, | |||
1952 | } | 2030 | } |
1953 | } | 2031 | } |
1954 | 2032 | ||
1955 | if (!pol) | 2033 | if (!pol) { |
1956 | pol = flow_cache_lookup(net, &fl, family, fl_dir, | 2034 | struct flow_cache_object *flo; |
1957 | xfrm_policy_lookup); | 2035 | |
2036 | flo = flow_cache_lookup(net, &fl, family, fl_dir, | ||
2037 | xfrm_policy_lookup, NULL); | ||
2038 | if (IS_ERR_OR_NULL(flo)) | ||
2039 | pol = ERR_CAST(flo); | ||
2040 | else | ||
2041 | pol = container_of(flo, struct xfrm_policy, flo); | ||
2042 | } | ||
1958 | 2043 | ||
1959 | if (IS_ERR(pol)) { | 2044 | if (IS_ERR(pol)) { |
1960 | XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); | 2045 | XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); |
@@ -2138,71 +2223,24 @@ static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst) | |||
2138 | return dst; | 2223 | return dst; |
2139 | } | 2224 | } |
2140 | 2225 | ||
2141 | static void prune_one_bundle(struct xfrm_policy *pol, int (*func)(struct dst_entry *), struct dst_entry **gc_list_p) | 2226 | static void __xfrm_garbage_collect(struct net *net) |
2142 | { | ||
2143 | struct dst_entry *dst, **dstp; | ||
2144 | |||
2145 | write_lock(&pol->lock); | ||
2146 | dstp = &pol->bundles; | ||
2147 | while ((dst=*dstp) != NULL) { | ||
2148 | if (func(dst)) { | ||
2149 | *dstp = dst->next; | ||
2150 | dst->next = *gc_list_p; | ||
2151 | *gc_list_p = dst; | ||
2152 | } else { | ||
2153 | dstp = &dst->next; | ||
2154 | } | ||
2155 | } | ||
2156 | write_unlock(&pol->lock); | ||
2157 | } | ||
2158 | |||
2159 | static void xfrm_prune_bundles(struct net *net, int (*func)(struct dst_entry *)) | ||
2160 | { | 2227 | { |
2161 | struct dst_entry *gc_list = NULL; | 2228 | struct dst_entry *head, *next; |
2162 | int dir; | ||
2163 | 2229 | ||
2164 | read_lock_bh(&xfrm_policy_lock); | 2230 | flow_cache_flush(); |
2165 | for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) { | ||
2166 | struct xfrm_policy *pol; | ||
2167 | struct hlist_node *entry; | ||
2168 | struct hlist_head *table; | ||
2169 | int i; | ||
2170 | 2231 | ||
2171 | hlist_for_each_entry(pol, entry, | 2232 | spin_lock_bh(&xfrm_policy_sk_bundle_lock); |
2172 | &net->xfrm.policy_inexact[dir], bydst) | 2233 | head = xfrm_policy_sk_bundles; |
2173 | prune_one_bundle(pol, func, &gc_list); | 2234 | xfrm_policy_sk_bundles = NULL; |
2235 | spin_unlock_bh(&xfrm_policy_sk_bundle_lock); | ||
2174 | 2236 | ||
2175 | table = net->xfrm.policy_bydst[dir].table; | 2237 | while (head) { |
2176 | for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) { | 2238 | next = head->next; |
2177 | hlist_for_each_entry(pol, entry, table + i, bydst) | 2239 | dst_free(head); |
2178 | prune_one_bundle(pol, func, &gc_list); | 2240 | head = next; |
2179 | } | ||
2180 | } | ||
2181 | read_unlock_bh(&xfrm_policy_lock); | ||
2182 | |||
2183 | while (gc_list) { | ||
2184 | struct dst_entry *dst = gc_list; | ||
2185 | gc_list = dst->next; | ||
2186 | dst_free(dst); | ||
2187 | } | 2241 | } |
2188 | } | 2242 | } |
2189 | 2243 | ||
2190 | static int unused_bundle(struct dst_entry *dst) | ||
2191 | { | ||
2192 | return !atomic_read(&dst->__refcnt); | ||
2193 | } | ||
2194 | |||
2195 | static void __xfrm_garbage_collect(struct net *net) | ||
2196 | { | ||
2197 | xfrm_prune_bundles(net, unused_bundle); | ||
2198 | } | ||
2199 | |||
2200 | static int xfrm_flush_bundles(struct net *net) | ||
2201 | { | ||
2202 | xfrm_prune_bundles(net, stale_bundle); | ||
2203 | return 0; | ||
2204 | } | ||
2205 | |||
2206 | static void xfrm_init_pmtu(struct dst_entry *dst) | 2244 | static void xfrm_init_pmtu(struct dst_entry *dst) |
2207 | { | 2245 | { |
2208 | do { | 2246 | do { |
@@ -2260,7 +2298,9 @@ int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first, | |||
2260 | return 0; | 2298 | return 0; |
2261 | if (dst->xfrm->km.state != XFRM_STATE_VALID) | 2299 | if (dst->xfrm->km.state != XFRM_STATE_VALID) |
2262 | return 0; | 2300 | return 0; |
2263 | if (xdst->genid != dst->xfrm->genid) | 2301 | if (xdst->xfrm_genid != dst->xfrm->genid) |
2302 | return 0; | ||
2303 | if (xdst->policy_genid != atomic_read(&xdst->pols[0]->genid)) | ||
2264 | return 0; | 2304 | return 0; |
2265 | 2305 | ||
2266 | if (strict && fl && | 2306 | if (strict && fl && |
@@ -2425,7 +2465,7 @@ static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void | |||
2425 | 2465 | ||
2426 | switch (event) { | 2466 | switch (event) { |
2427 | case NETDEV_DOWN: | 2467 | case NETDEV_DOWN: |
2428 | xfrm_flush_bundles(dev_net(dev)); | 2468 | __xfrm_garbage_collect(dev_net(dev)); |
2429 | } | 2469 | } |
2430 | return NOTIFY_DONE; | 2470 | return NOTIFY_DONE; |
2431 | } | 2471 | } |
@@ -2531,7 +2571,6 @@ static void xfrm_policy_fini(struct net *net) | |||
2531 | audit_info.sessionid = -1; | 2571 | audit_info.sessionid = -1; |
2532 | audit_info.secid = 0; | 2572 | audit_info.secid = 0; |
2533 | xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info); | 2573 | xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info); |
2534 | flush_work(&xfrm_policy_gc_work); | ||
2535 | 2574 | ||
2536 | WARN_ON(!list_empty(&net->xfrm.policy_all)); | 2575 | WARN_ON(!list_empty(&net->xfrm.policy_all)); |
2537 | 2576 | ||
@@ -2757,7 +2796,6 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol, | |||
2757 | struct xfrm_migrate *m, int num_migrate) | 2796 | struct xfrm_migrate *m, int num_migrate) |
2758 | { | 2797 | { |
2759 | struct xfrm_migrate *mp; | 2798 | struct xfrm_migrate *mp; |
2760 | struct dst_entry *dst; | ||
2761 | int i, j, n = 0; | 2799 | int i, j, n = 0; |
2762 | 2800 | ||
2763 | write_lock_bh(&pol->lock); | 2801 | write_lock_bh(&pol->lock); |
@@ -2782,10 +2820,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol, | |||
2782 | sizeof(pol->xfrm_vec[i].saddr)); | 2820 | sizeof(pol->xfrm_vec[i].saddr)); |
2783 | pol->xfrm_vec[i].encap_family = mp->new_family; | 2821 | pol->xfrm_vec[i].encap_family = mp->new_family; |
2784 | /* flush bundles */ | 2822 | /* flush bundles */ |
2785 | while ((dst = pol->bundles) != NULL) { | 2823 | atomic_inc(&pol->genid); |
2786 | pol->bundles = dst->next; | ||
2787 | dst_free(dst); | ||
2788 | } | ||
2789 | } | 2824 | } |
2790 | } | 2825 | } |
2791 | 2826 | ||
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index add77ecb8ac4..5208b12fbfb4 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c | |||
@@ -38,7 +38,6 @@ | |||
38 | static DEFINE_SPINLOCK(xfrm_state_lock); | 38 | static DEFINE_SPINLOCK(xfrm_state_lock); |
39 | 39 | ||
40 | static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024; | 40 | static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024; |
41 | static unsigned int xfrm_state_genid; | ||
42 | 41 | ||
43 | static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family); | 42 | static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family); |
44 | static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo); | 43 | static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo); |
@@ -924,8 +923,6 @@ static void __xfrm_state_insert(struct xfrm_state *x) | |||
924 | struct net *net = xs_net(x); | 923 | struct net *net = xs_net(x); |
925 | unsigned int h; | 924 | unsigned int h; |
926 | 925 | ||
927 | x->genid = ++xfrm_state_genid; | ||
928 | |||
929 | list_add(&x->km.all, &net->xfrm.state_all); | 926 | list_add(&x->km.all, &net->xfrm.state_all); |
930 | 927 | ||
931 | h = xfrm_dst_hash(net, &x->id.daddr, &x->props.saddr, | 928 | h = xfrm_dst_hash(net, &x->id.daddr, &x->props.saddr, |
@@ -971,7 +968,7 @@ static void __xfrm_state_bump_genids(struct xfrm_state *xnew) | |||
971 | (mark & x->mark.m) == x->mark.v && | 968 | (mark & x->mark.m) == x->mark.v && |
972 | !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) && | 969 | !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) && |
973 | !xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family)) | 970 | !xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family)) |
974 | x->genid = xfrm_state_genid; | 971 | x->genid++; |
975 | } | 972 | } |
976 | } | 973 | } |
977 | 974 | ||
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 6106b72826d3..a267fbdda525 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c | |||
@@ -1741,6 +1741,10 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
1741 | if (err) | 1741 | if (err) |
1742 | return err; | 1742 | return err; |
1743 | 1743 | ||
1744 | err = verify_policy_dir(p->dir); | ||
1745 | if (err) | ||
1746 | return err; | ||
1747 | |||
1744 | if (p->index) | 1748 | if (p->index) |
1745 | xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, 0, &err); | 1749 | xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, 0, &err); |
1746 | else { | 1750 | else { |
@@ -1766,13 +1770,9 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
1766 | if (xp == NULL) | 1770 | if (xp == NULL) |
1767 | return -ENOENT; | 1771 | return -ENOENT; |
1768 | 1772 | ||
1769 | read_lock(&xp->lock); | 1773 | if (unlikely(xp->walk.dead)) |
1770 | if (xp->walk.dead) { | ||
1771 | read_unlock(&xp->lock); | ||
1772 | goto out; | 1774 | goto out; |
1773 | } | ||
1774 | 1775 | ||
1775 | read_unlock(&xp->lock); | ||
1776 | err = 0; | 1776 | err = 0; |
1777 | if (up->hard) { | 1777 | if (up->hard) { |
1778 | uid_t loginuid = NETLINK_CB(skb).loginuid; | 1778 | uid_t loginuid = NETLINK_CB(skb).loginuid; |