diff options
author | Patrick McHardy <kaber@trash.net> | 2009-06-11 10:00:49 -0400 |
---|---|---|
committer | Patrick McHardy <kaber@trash.net> | 2009-06-11 10:00:49 -0400 |
commit | 36432dae73cf2c90a59b39c8df9fd8219272b005 (patch) | |
tree | 660b9104305a809ec4fdeb295ca13d6e90790ecc /net | |
parent | 440f0d588555892601cfe511728a0fc0c8204063 (diff) | |
parent | bb400801c2f40bbd9a688818323ad09abfc4e581 (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6
Diffstat (limited to 'net')
171 files changed, 5357 insertions, 2846 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index d1e10546eb85..fe649081fbdc 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c | |||
@@ -378,13 +378,13 @@ static void vlan_sync_address(struct net_device *dev, | |||
378 | * the new address */ | 378 | * the new address */ |
379 | if (compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) && | 379 | if (compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) && |
380 | !compare_ether_addr(vlandev->dev_addr, dev->dev_addr)) | 380 | !compare_ether_addr(vlandev->dev_addr, dev->dev_addr)) |
381 | dev_unicast_delete(dev, vlandev->dev_addr, ETH_ALEN); | 381 | dev_unicast_delete(dev, vlandev->dev_addr); |
382 | 382 | ||
383 | /* vlan address was equal to the old address and is different from | 383 | /* vlan address was equal to the old address and is different from |
384 | * the new address */ | 384 | * the new address */ |
385 | if (!compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) && | 385 | if (!compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) && |
386 | compare_ether_addr(vlandev->dev_addr, dev->dev_addr)) | 386 | compare_ether_addr(vlandev->dev_addr, dev->dev_addr)) |
387 | dev_unicast_add(dev, vlandev->dev_addr, ETH_ALEN); | 387 | dev_unicast_add(dev, vlandev->dev_addr); |
388 | 388 | ||
389 | memcpy(vlan->real_dev_addr, dev->dev_addr, ETH_ALEN); | 389 | memcpy(vlan->real_dev_addr, dev->dev_addr, ETH_ALEN); |
390 | } | 390 | } |
@@ -758,7 +758,7 @@ static void __exit vlan_cleanup_module(void) | |||
758 | BUG_ON(!hlist_empty(&vlan_group_hash[i])); | 758 | BUG_ON(!hlist_empty(&vlan_group_hash[i])); |
759 | 759 | ||
760 | unregister_pernet_gen_device(vlan_net_id, &vlan_net_ops); | 760 | unregister_pernet_gen_device(vlan_net_id, &vlan_net_ops); |
761 | synchronize_net(); | 761 | rcu_barrier(); /* Wait for completion of call_rcu()'s */ |
762 | 762 | ||
763 | vlan_gvrp_uninit(); | 763 | vlan_gvrp_uninit(); |
764 | } | 764 | } |
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 1e2ad4c7c59b..96bad8f233e2 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
@@ -441,7 +441,7 @@ static int vlan_dev_open(struct net_device *dev) | |||
441 | return -ENETDOWN; | 441 | return -ENETDOWN; |
442 | 442 | ||
443 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) { | 443 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) { |
444 | err = dev_unicast_add(real_dev, dev->dev_addr, ETH_ALEN); | 444 | err = dev_unicast_add(real_dev, dev->dev_addr); |
445 | if (err < 0) | 445 | if (err < 0) |
446 | goto out; | 446 | goto out; |
447 | } | 447 | } |
@@ -470,7 +470,7 @@ clear_allmulti: | |||
470 | dev_set_allmulti(real_dev, -1); | 470 | dev_set_allmulti(real_dev, -1); |
471 | del_unicast: | 471 | del_unicast: |
472 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) | 472 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) |
473 | dev_unicast_delete(real_dev, dev->dev_addr, ETH_ALEN); | 473 | dev_unicast_delete(real_dev, dev->dev_addr); |
474 | out: | 474 | out: |
475 | netif_carrier_off(dev); | 475 | netif_carrier_off(dev); |
476 | return err; | 476 | return err; |
@@ -492,7 +492,7 @@ static int vlan_dev_stop(struct net_device *dev) | |||
492 | dev_set_promiscuity(real_dev, -1); | 492 | dev_set_promiscuity(real_dev, -1); |
493 | 493 | ||
494 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) | 494 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) |
495 | dev_unicast_delete(real_dev, dev->dev_addr, dev->addr_len); | 495 | dev_unicast_delete(real_dev, dev->dev_addr); |
496 | 496 | ||
497 | netif_carrier_off(dev); | 497 | netif_carrier_off(dev); |
498 | return 0; | 498 | return 0; |
@@ -511,13 +511,13 @@ static int vlan_dev_set_mac_address(struct net_device *dev, void *p) | |||
511 | goto out; | 511 | goto out; |
512 | 512 | ||
513 | if (compare_ether_addr(addr->sa_data, real_dev->dev_addr)) { | 513 | if (compare_ether_addr(addr->sa_data, real_dev->dev_addr)) { |
514 | err = dev_unicast_add(real_dev, addr->sa_data, ETH_ALEN); | 514 | err = dev_unicast_add(real_dev, addr->sa_data); |
515 | if (err < 0) | 515 | if (err < 0) |
516 | return err; | 516 | return err; |
517 | } | 517 | } |
518 | 518 | ||
519 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) | 519 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) |
520 | dev_unicast_delete(real_dev, dev->dev_addr, ETH_ALEN); | 520 | dev_unicast_delete(real_dev, dev->dev_addr); |
521 | 521 | ||
522 | out: | 522 | out: |
523 | memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); | 523 | memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); |
diff --git a/net/Kconfig b/net/Kconfig index c19f549c8e74..7051b9710675 100644 --- a/net/Kconfig +++ b/net/Kconfig | |||
@@ -179,6 +179,7 @@ source "net/lapb/Kconfig" | |||
179 | source "net/econet/Kconfig" | 179 | source "net/econet/Kconfig" |
180 | source "net/wanrouter/Kconfig" | 180 | source "net/wanrouter/Kconfig" |
181 | source "net/phonet/Kconfig" | 181 | source "net/phonet/Kconfig" |
182 | source "net/ieee802154/Kconfig" | ||
182 | source "net/sched/Kconfig" | 183 | source "net/sched/Kconfig" |
183 | source "net/dcb/Kconfig" | 184 | source "net/dcb/Kconfig" |
184 | 185 | ||
diff --git a/net/Makefile b/net/Makefile index 9e00a55a901b..ba324aefda73 100644 --- a/net/Makefile +++ b/net/Makefile | |||
@@ -60,6 +60,7 @@ obj-$(CONFIG_NET_9P) += 9p/ | |||
60 | ifneq ($(CONFIG_DCB),) | 60 | ifneq ($(CONFIG_DCB),) |
61 | obj-y += dcb/ | 61 | obj-y += dcb/ |
62 | endif | 62 | endif |
63 | obj-y += ieee802154/ | ||
63 | 64 | ||
64 | ifeq ($(CONFIG_NET),y) | 65 | ifeq ($(CONFIG_NET),y) |
65 | obj-$(CONFIG_SYSCTL) += sysctl_net.o | 66 | obj-$(CONFIG_SYSCTL) += sysctl_net.o |
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index d6a9243641af..b603cbacdc58 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c | |||
@@ -939,6 +939,7 @@ static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset, | |||
939 | int len, unsigned long sum) | 939 | int len, unsigned long sum) |
940 | { | 940 | { |
941 | int start = skb_headlen(skb); | 941 | int start = skb_headlen(skb); |
942 | struct sk_buff *frag_iter; | ||
942 | int i, copy; | 943 | int i, copy; |
943 | 944 | ||
944 | /* checksum stuff in header space */ | 945 | /* checksum stuff in header space */ |
@@ -977,26 +978,22 @@ static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset, | |||
977 | start = end; | 978 | start = end; |
978 | } | 979 | } |
979 | 980 | ||
980 | if (skb_shinfo(skb)->frag_list) { | 981 | skb_walk_frags(skb, frag_iter) { |
981 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | 982 | int end; |
982 | |||
983 | for (; list; list = list->next) { | ||
984 | int end; | ||
985 | 983 | ||
986 | WARN_ON(start > offset + len); | 984 | WARN_ON(start > offset + len); |
987 | 985 | ||
988 | end = start + list->len; | 986 | end = start + frag_iter->len; |
989 | if ((copy = end - offset) > 0) { | 987 | if ((copy = end - offset) > 0) { |
990 | if (copy > len) | 988 | if (copy > len) |
991 | copy = len; | 989 | copy = len; |
992 | sum = atalk_sum_skb(list, offset - start, | 990 | sum = atalk_sum_skb(frag_iter, offset - start, |
993 | copy, sum); | 991 | copy, sum); |
994 | if ((len -= copy) == 0) | 992 | if ((len -= copy) == 0) |
995 | return sum; | 993 | return sum; |
996 | offset += copy; | 994 | offset += copy; |
997 | } | ||
998 | start = end; | ||
999 | } | 995 | } |
996 | start = end; | ||
1000 | } | 997 | } |
1001 | 998 | ||
1002 | BUG_ON(len > 0); | 999 | BUG_ON(len > 0); |
diff --git a/net/atm/br2684.c b/net/atm/br2684.c index 3100a8940afc..2912665fc58c 100644 --- a/net/atm/br2684.c +++ b/net/atm/br2684.c | |||
@@ -228,7 +228,7 @@ static int br2684_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
228 | struct br2684_dev *brdev = BRPRIV(dev); | 228 | struct br2684_dev *brdev = BRPRIV(dev); |
229 | struct br2684_vcc *brvcc; | 229 | struct br2684_vcc *brvcc; |
230 | 230 | ||
231 | pr_debug("br2684_start_xmit, skb->dst=%p\n", skb->dst); | 231 | pr_debug("br2684_start_xmit, skb_dst(skb)=%p\n", skb_dst(skb)); |
232 | read_lock(&devs_lock); | 232 | read_lock(&devs_lock); |
233 | brvcc = pick_outgoing_vcc(skb, brdev); | 233 | brvcc = pick_outgoing_vcc(skb, brdev); |
234 | if (brvcc == NULL) { | 234 | if (brvcc == NULL) { |
@@ -445,9 +445,10 @@ free_skb: | |||
445 | */ | 445 | */ |
446 | static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg) | 446 | static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg) |
447 | { | 447 | { |
448 | struct sk_buff_head queue; | ||
448 | int err; | 449 | int err; |
449 | struct br2684_vcc *brvcc; | 450 | struct br2684_vcc *brvcc; |
450 | struct sk_buff *skb; | 451 | struct sk_buff *skb, *tmp; |
451 | struct sk_buff_head *rq; | 452 | struct sk_buff_head *rq; |
452 | struct br2684_dev *brdev; | 453 | struct br2684_dev *brdev; |
453 | struct net_device *net_dev; | 454 | struct net_device *net_dev; |
@@ -505,29 +506,20 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg) | |||
505 | barrier(); | 506 | barrier(); |
506 | atmvcc->push = br2684_push; | 507 | atmvcc->push = br2684_push; |
507 | 508 | ||
509 | __skb_queue_head_init(&queue); | ||
508 | rq = &sk_atm(atmvcc)->sk_receive_queue; | 510 | rq = &sk_atm(atmvcc)->sk_receive_queue; |
509 | 511 | ||
510 | spin_lock_irqsave(&rq->lock, flags); | 512 | spin_lock_irqsave(&rq->lock, flags); |
511 | if (skb_queue_empty(rq)) { | 513 | skb_queue_splice_init(rq, &queue); |
512 | skb = NULL; | ||
513 | } else { | ||
514 | /* NULL terminate the list. */ | ||
515 | rq->prev->next = NULL; | ||
516 | skb = rq->next; | ||
517 | } | ||
518 | rq->prev = rq->next = (struct sk_buff *)rq; | ||
519 | rq->qlen = 0; | ||
520 | spin_unlock_irqrestore(&rq->lock, flags); | 514 | spin_unlock_irqrestore(&rq->lock, flags); |
521 | 515 | ||
522 | while (skb) { | 516 | skb_queue_walk_safe(&queue, skb, tmp) { |
523 | struct sk_buff *next = skb->next; | 517 | struct net_device *dev = skb->dev; |
524 | 518 | ||
525 | skb->next = skb->prev = NULL; | 519 | dev->stats.rx_bytes -= skb->len; |
526 | br2684_push(atmvcc, skb); | 520 | dev->stats.rx_packets--; |
527 | skb->dev->stats.rx_bytes -= skb->len; | ||
528 | skb->dev->stats.rx_packets--; | ||
529 | 521 | ||
530 | skb = next; | 522 | br2684_push(atmvcc, skb); |
531 | } | 523 | } |
532 | __module_get(THIS_MODULE); | 524 | __module_get(THIS_MODULE); |
533 | return 0; | 525 | return 0; |
diff --git a/net/atm/clip.c b/net/atm/clip.c index 3dc0a3a42a57..e65a3b1477f8 100644 --- a/net/atm/clip.c +++ b/net/atm/clip.c | |||
@@ -369,16 +369,16 @@ static int clip_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
369 | unsigned long flags; | 369 | unsigned long flags; |
370 | 370 | ||
371 | pr_debug("clip_start_xmit (skb %p)\n", skb); | 371 | pr_debug("clip_start_xmit (skb %p)\n", skb); |
372 | if (!skb->dst) { | 372 | if (!skb_dst(skb)) { |
373 | printk(KERN_ERR "clip_start_xmit: skb->dst == NULL\n"); | 373 | printk(KERN_ERR "clip_start_xmit: skb_dst(skb) == NULL\n"); |
374 | dev_kfree_skb(skb); | 374 | dev_kfree_skb(skb); |
375 | dev->stats.tx_dropped++; | 375 | dev->stats.tx_dropped++; |
376 | return 0; | 376 | return 0; |
377 | } | 377 | } |
378 | if (!skb->dst->neighbour) { | 378 | if (!skb_dst(skb)->neighbour) { |
379 | #if 0 | 379 | #if 0 |
380 | skb->dst->neighbour = clip_find_neighbour(skb->dst, 1); | 380 | skb_dst(skb)->neighbour = clip_find_neighbour(skb_dst(skb), 1); |
381 | if (!skb->dst->neighbour) { | 381 | if (!skb_dst(skb)->neighbour) { |
382 | dev_kfree_skb(skb); /* lost that one */ | 382 | dev_kfree_skb(skb); /* lost that one */ |
383 | dev->stats.tx_dropped++; | 383 | dev->stats.tx_dropped++; |
384 | return 0; | 384 | return 0; |
@@ -389,7 +389,7 @@ static int clip_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
389 | dev->stats.tx_dropped++; | 389 | dev->stats.tx_dropped++; |
390 | return 0; | 390 | return 0; |
391 | } | 391 | } |
392 | entry = NEIGH2ENTRY(skb->dst->neighbour); | 392 | entry = NEIGH2ENTRY(skb_dst(skb)->neighbour); |
393 | if (!entry->vccs) { | 393 | if (!entry->vccs) { |
394 | if (time_after(jiffies, entry->expires)) { | 394 | if (time_after(jiffies, entry->expires)) { |
395 | /* should be resolved */ | 395 | /* should be resolved */ |
@@ -406,7 +406,7 @@ static int clip_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
406 | } | 406 | } |
407 | pr_debug("neigh %p, vccs %p\n", entry, entry->vccs); | 407 | pr_debug("neigh %p, vccs %p\n", entry, entry->vccs); |
408 | ATM_SKB(skb)->vcc = vcc = entry->vccs->vcc; | 408 | ATM_SKB(skb)->vcc = vcc = entry->vccs->vcc; |
409 | pr_debug("using neighbour %p, vcc %p\n", skb->dst->neighbour, vcc); | 409 | pr_debug("using neighbour %p, vcc %p\n", skb_dst(skb)->neighbour, vcc); |
410 | if (entry->vccs->encap) { | 410 | if (entry->vccs->encap) { |
411 | void *here; | 411 | void *here; |
412 | 412 | ||
@@ -445,9 +445,9 @@ static int clip_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
445 | 445 | ||
446 | static int clip_mkip(struct atm_vcc *vcc, int timeout) | 446 | static int clip_mkip(struct atm_vcc *vcc, int timeout) |
447 | { | 447 | { |
448 | struct sk_buff_head *rq, queue; | ||
448 | struct clip_vcc *clip_vcc; | 449 | struct clip_vcc *clip_vcc; |
449 | struct sk_buff *skb; | 450 | struct sk_buff *skb, *tmp; |
450 | struct sk_buff_head *rq; | ||
451 | unsigned long flags; | 451 | unsigned long flags; |
452 | 452 | ||
453 | if (!vcc->push) | 453 | if (!vcc->push) |
@@ -469,39 +469,28 @@ static int clip_mkip(struct atm_vcc *vcc, int timeout) | |||
469 | vcc->push = clip_push; | 469 | vcc->push = clip_push; |
470 | vcc->pop = clip_pop; | 470 | vcc->pop = clip_pop; |
471 | 471 | ||
472 | __skb_queue_head_init(&queue); | ||
472 | rq = &sk_atm(vcc)->sk_receive_queue; | 473 | rq = &sk_atm(vcc)->sk_receive_queue; |
473 | 474 | ||
474 | spin_lock_irqsave(&rq->lock, flags); | 475 | spin_lock_irqsave(&rq->lock, flags); |
475 | if (skb_queue_empty(rq)) { | 476 | skb_queue_splice_init(rq, &queue); |
476 | skb = NULL; | ||
477 | } else { | ||
478 | /* NULL terminate the list. */ | ||
479 | rq->prev->next = NULL; | ||
480 | skb = rq->next; | ||
481 | } | ||
482 | rq->prev = rq->next = (struct sk_buff *)rq; | ||
483 | rq->qlen = 0; | ||
484 | spin_unlock_irqrestore(&rq->lock, flags); | 477 | spin_unlock_irqrestore(&rq->lock, flags); |
485 | 478 | ||
486 | /* re-process everything received between connection setup and MKIP */ | 479 | /* re-process everything received between connection setup and MKIP */ |
487 | while (skb) { | 480 | skb_queue_walk_safe(&queue, skb, tmp) { |
488 | struct sk_buff *next = skb->next; | ||
489 | |||
490 | skb->next = skb->prev = NULL; | ||
491 | if (!clip_devs) { | 481 | if (!clip_devs) { |
492 | atm_return(vcc, skb->truesize); | 482 | atm_return(vcc, skb->truesize); |
493 | kfree_skb(skb); | 483 | kfree_skb(skb); |
494 | } else { | 484 | } else { |
485 | struct net_device *dev = skb->dev; | ||
495 | unsigned int len = skb->len; | 486 | unsigned int len = skb->len; |
496 | 487 | ||
497 | skb_get(skb); | 488 | skb_get(skb); |
498 | clip_push(vcc, skb); | 489 | clip_push(vcc, skb); |
499 | skb->dev->stats.rx_packets--; | 490 | dev->stats.rx_packets--; |
500 | skb->dev->stats.rx_bytes -= len; | 491 | dev->stats.rx_bytes -= len; |
501 | kfree_skb(skb); | 492 | kfree_skb(skb); |
502 | } | 493 | } |
503 | |||
504 | skb = next; | ||
505 | } | 494 | } |
506 | return 0; | 495 | return 0; |
507 | } | 496 | } |
@@ -568,6 +557,7 @@ static void clip_setup(struct net_device *dev) | |||
568 | /* without any more elaborate queuing. 100 is a reasonable */ | 557 | /* without any more elaborate queuing. 100 is a reasonable */ |
569 | /* compromise between decent burst-tolerance and protection */ | 558 | /* compromise between decent burst-tolerance and protection */ |
570 | /* against memory hogs. */ | 559 | /* against memory hogs. */ |
560 | dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; | ||
571 | } | 561 | } |
572 | 562 | ||
573 | static int clip_create(int number) | 563 | static int clip_create(int number) |
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c index 78958c0f9a40..97f8d68d574d 100644 --- a/net/bluetooth/cmtp/capi.c +++ b/net/bluetooth/cmtp/capi.c | |||
@@ -382,7 +382,7 @@ static void cmtp_reset_ctr(struct capi_ctr *ctrl) | |||
382 | 382 | ||
383 | BT_DBG("ctrl %p", ctrl); | 383 | BT_DBG("ctrl %p", ctrl); |
384 | 384 | ||
385 | capi_ctr_reseted(ctrl); | 385 | capi_ctr_down(ctrl); |
386 | 386 | ||
387 | atomic_inc(&session->terminate); | 387 | atomic_inc(&session->terminate); |
388 | cmtp_schedule(session); | 388 | cmtp_schedule(session); |
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index cd061510b6bd..406ad07cdea1 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/skbuff.h> | 39 | #include <linux/skbuff.h> |
40 | #include <linux/interrupt.h> | 40 | #include <linux/interrupt.h> |
41 | #include <linux/notifier.h> | 41 | #include <linux/notifier.h> |
42 | #include <linux/rfkill.h> | ||
42 | #include <net/sock.h> | 43 | #include <net/sock.h> |
43 | 44 | ||
44 | #include <asm/system.h> | 45 | #include <asm/system.h> |
@@ -476,6 +477,11 @@ int hci_dev_open(__u16 dev) | |||
476 | 477 | ||
477 | hci_req_lock(hdev); | 478 | hci_req_lock(hdev); |
478 | 479 | ||
480 | if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) { | ||
481 | ret = -ERFKILL; | ||
482 | goto done; | ||
483 | } | ||
484 | |||
479 | if (test_bit(HCI_UP, &hdev->flags)) { | 485 | if (test_bit(HCI_UP, &hdev->flags)) { |
480 | ret = -EALREADY; | 486 | ret = -EALREADY; |
481 | goto done; | 487 | goto done; |
@@ -813,6 +819,24 @@ int hci_get_dev_info(void __user *arg) | |||
813 | 819 | ||
814 | /* ---- Interface to HCI drivers ---- */ | 820 | /* ---- Interface to HCI drivers ---- */ |
815 | 821 | ||
822 | static int hci_rfkill_set_block(void *data, bool blocked) | ||
823 | { | ||
824 | struct hci_dev *hdev = data; | ||
825 | |||
826 | BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked); | ||
827 | |||
828 | if (!blocked) | ||
829 | return 0; | ||
830 | |||
831 | hci_dev_do_close(hdev); | ||
832 | |||
833 | return 0; | ||
834 | } | ||
835 | |||
836 | static const struct rfkill_ops hci_rfkill_ops = { | ||
837 | .set_block = hci_rfkill_set_block, | ||
838 | }; | ||
839 | |||
816 | /* Alloc HCI device */ | 840 | /* Alloc HCI device */ |
817 | struct hci_dev *hci_alloc_dev(void) | 841 | struct hci_dev *hci_alloc_dev(void) |
818 | { | 842 | { |
@@ -844,7 +868,8 @@ int hci_register_dev(struct hci_dev *hdev) | |||
844 | struct list_head *head = &hci_dev_list, *p; | 868 | struct list_head *head = &hci_dev_list, *p; |
845 | int i, id = 0; | 869 | int i, id = 0; |
846 | 870 | ||
847 | BT_DBG("%p name %s type %d owner %p", hdev, hdev->name, hdev->type, hdev->owner); | 871 | BT_DBG("%p name %s type %d owner %p", hdev, hdev->name, |
872 | hdev->type, hdev->owner); | ||
848 | 873 | ||
849 | if (!hdev->open || !hdev->close || !hdev->destruct) | 874 | if (!hdev->open || !hdev->close || !hdev->destruct) |
850 | return -EINVAL; | 875 | return -EINVAL; |
@@ -900,6 +925,15 @@ int hci_register_dev(struct hci_dev *hdev) | |||
900 | 925 | ||
901 | hci_register_sysfs(hdev); | 926 | hci_register_sysfs(hdev); |
902 | 927 | ||
928 | hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev, | ||
929 | RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev); | ||
930 | if (hdev->rfkill) { | ||
931 | if (rfkill_register(hdev->rfkill) < 0) { | ||
932 | rfkill_destroy(hdev->rfkill); | ||
933 | hdev->rfkill = NULL; | ||
934 | } | ||
935 | } | ||
936 | |||
903 | hci_notify(hdev, HCI_DEV_REG); | 937 | hci_notify(hdev, HCI_DEV_REG); |
904 | 938 | ||
905 | return id; | 939 | return id; |
@@ -924,6 +958,11 @@ int hci_unregister_dev(struct hci_dev *hdev) | |||
924 | 958 | ||
925 | hci_notify(hdev, HCI_DEV_UNREG); | 959 | hci_notify(hdev, HCI_DEV_UNREG); |
926 | 960 | ||
961 | if (hdev->rfkill) { | ||
962 | rfkill_unregister(hdev->rfkill); | ||
963 | rfkill_destroy(hdev->rfkill); | ||
964 | } | ||
965 | |||
927 | hci_unregister_sysfs(hdev); | 966 | hci_unregister_sysfs(hdev); |
928 | 967 | ||
929 | __hci_dev_put(hdev); | 968 | __hci_dev_put(hdev); |
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c index 4cc3624bd22d..95f7a7a544b4 100644 --- a/net/bluetooth/hci_sysfs.c +++ b/net/bluetooth/hci_sysfs.c | |||
@@ -90,9 +90,6 @@ static void add_conn(struct work_struct *work) | |||
90 | struct hci_conn *conn = container_of(work, struct hci_conn, work_add); | 90 | struct hci_conn *conn = container_of(work, struct hci_conn, work_add); |
91 | struct hci_dev *hdev = conn->hdev; | 91 | struct hci_dev *hdev = conn->hdev; |
92 | 92 | ||
93 | /* ensure previous del is complete */ | ||
94 | flush_work(&conn->work_del); | ||
95 | |||
96 | dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle); | 93 | dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle); |
97 | 94 | ||
98 | if (device_add(&conn->dev) < 0) { | 95 | if (device_add(&conn->dev) < 0) { |
@@ -118,9 +115,6 @@ static void del_conn(struct work_struct *work) | |||
118 | struct hci_conn *conn = container_of(work, struct hci_conn, work_del); | 115 | struct hci_conn *conn = container_of(work, struct hci_conn, work_del); |
119 | struct hci_dev *hdev = conn->hdev; | 116 | struct hci_dev *hdev = conn->hdev; |
120 | 117 | ||
121 | /* ensure previous add is complete */ | ||
122 | flush_work(&conn->work_add); | ||
123 | |||
124 | if (!device_is_registered(&conn->dev)) | 118 | if (!device_is_registered(&conn->dev)) |
125 | return; | 119 | return; |
126 | 120 | ||
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c index ca4d3b40d5ce..bd0a4c1bced0 100644 --- a/net/bluetooth/l2cap.c +++ b/net/bluetooth/l2cap.c | |||
@@ -40,10 +40,10 @@ | |||
40 | #include <linux/skbuff.h> | 40 | #include <linux/skbuff.h> |
41 | #include <linux/list.h> | 41 | #include <linux/list.h> |
42 | #include <linux/device.h> | 42 | #include <linux/device.h> |
43 | #include <linux/uaccess.h> | ||
43 | #include <net/sock.h> | 44 | #include <net/sock.h> |
44 | 45 | ||
45 | #include <asm/system.h> | 46 | #include <asm/system.h> |
46 | #include <asm/uaccess.h> | ||
47 | #include <asm/unaligned.h> | 47 | #include <asm/unaligned.h> |
48 | 48 | ||
49 | #include <net/bluetooth/bluetooth.h> | 49 | #include <net/bluetooth/bluetooth.h> |
@@ -52,7 +52,7 @@ | |||
52 | 52 | ||
53 | #define VERSION "2.13" | 53 | #define VERSION "2.13" |
54 | 54 | ||
55 | static u32 l2cap_feat_mask = 0x0080; | 55 | static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN; |
56 | static u8 l2cap_fixed_chan[8] = { 0x02, }; | 56 | static u8 l2cap_fixed_chan[8] = { 0x02, }; |
57 | 57 | ||
58 | static const struct proto_ops l2cap_sock_ops; | 58 | static const struct proto_ops l2cap_sock_ops; |
@@ -134,7 +134,8 @@ static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 | |||
134 | struct sock *s; | 134 | struct sock *s; |
135 | read_lock(&l->lock); | 135 | read_lock(&l->lock); |
136 | s = __l2cap_get_chan_by_scid(l, cid); | 136 | s = __l2cap_get_chan_by_scid(l, cid); |
137 | if (s) bh_lock_sock(s); | 137 | if (s) |
138 | bh_lock_sock(s); | ||
138 | read_unlock(&l->lock); | 139 | read_unlock(&l->lock); |
139 | return s; | 140 | return s; |
140 | } | 141 | } |
@@ -154,17 +155,18 @@ static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 | |||
154 | struct sock *s; | 155 | struct sock *s; |
155 | read_lock(&l->lock); | 156 | read_lock(&l->lock); |
156 | s = __l2cap_get_chan_by_ident(l, ident); | 157 | s = __l2cap_get_chan_by_ident(l, ident); |
157 | if (s) bh_lock_sock(s); | 158 | if (s) |
159 | bh_lock_sock(s); | ||
158 | read_unlock(&l->lock); | 160 | read_unlock(&l->lock); |
159 | return s; | 161 | return s; |
160 | } | 162 | } |
161 | 163 | ||
162 | static u16 l2cap_alloc_cid(struct l2cap_chan_list *l) | 164 | static u16 l2cap_alloc_cid(struct l2cap_chan_list *l) |
163 | { | 165 | { |
164 | u16 cid = 0x0040; | 166 | u16 cid = L2CAP_CID_DYN_START; |
165 | 167 | ||
166 | for (; cid < 0xffff; cid++) { | 168 | for (; cid < L2CAP_CID_DYN_END; cid++) { |
167 | if(!__l2cap_get_chan_by_scid(l, cid)) | 169 | if (!__l2cap_get_chan_by_scid(l, cid)) |
168 | return cid; | 170 | return cid; |
169 | } | 171 | } |
170 | 172 | ||
@@ -204,7 +206,8 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct so | |||
204 | { | 206 | { |
205 | struct l2cap_chan_list *l = &conn->chan_list; | 207 | struct l2cap_chan_list *l = &conn->chan_list; |
206 | 208 | ||
207 | BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid); | 209 | BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, |
210 | l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid); | ||
208 | 211 | ||
209 | conn->disc_reason = 0x13; | 212 | conn->disc_reason = 0x13; |
210 | 213 | ||
@@ -215,13 +218,13 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct so | |||
215 | l2cap_pi(sk)->scid = l2cap_alloc_cid(l); | 218 | l2cap_pi(sk)->scid = l2cap_alloc_cid(l); |
216 | } else if (sk->sk_type == SOCK_DGRAM) { | 219 | } else if (sk->sk_type == SOCK_DGRAM) { |
217 | /* Connectionless socket */ | 220 | /* Connectionless socket */ |
218 | l2cap_pi(sk)->scid = 0x0002; | 221 | l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS; |
219 | l2cap_pi(sk)->dcid = 0x0002; | 222 | l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS; |
220 | l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU; | 223 | l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU; |
221 | } else { | 224 | } else { |
222 | /* Raw socket can send/recv signalling messages only */ | 225 | /* Raw socket can send/recv signalling messages only */ |
223 | l2cap_pi(sk)->scid = 0x0001; | 226 | l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING; |
224 | l2cap_pi(sk)->dcid = 0x0001; | 227 | l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING; |
225 | l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU; | 228 | l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU; |
226 | } | 229 | } |
227 | 230 | ||
@@ -272,7 +275,7 @@ static inline int l2cap_check_security(struct sock *sk) | |||
272 | if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH) | 275 | if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH) |
273 | auth_type = HCI_AT_NO_BONDING_MITM; | 276 | auth_type = HCI_AT_NO_BONDING_MITM; |
274 | else | 277 | else |
275 | auth_type = HCI_AT_NO_BONDING; | 278 | auth_type = HCI_AT_NO_BONDING; |
276 | 279 | ||
277 | if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW) | 280 | if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW) |
278 | l2cap_pi(sk)->sec_level = BT_SECURITY_SDP; | 281 | l2cap_pi(sk)->sec_level = BT_SECURITY_SDP; |
@@ -588,7 +591,8 @@ static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t | |||
588 | struct sock *s; | 591 | struct sock *s; |
589 | read_lock(&l2cap_sk_list.lock); | 592 | read_lock(&l2cap_sk_list.lock); |
590 | s = __l2cap_get_sock_by_psm(state, psm, src); | 593 | s = __l2cap_get_sock_by_psm(state, psm, src); |
591 | if (s) bh_lock_sock(s); | 594 | if (s) |
595 | bh_lock_sock(s); | ||
592 | read_unlock(&l2cap_sk_list.lock); | 596 | read_unlock(&l2cap_sk_list.lock); |
593 | return s; | 597 | return s; |
594 | } | 598 | } |
@@ -808,7 +812,7 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) | |||
808 | goto done; | 812 | goto done; |
809 | } | 813 | } |
810 | 814 | ||
811 | if (la.l2_psm && btohs(la.l2_psm) < 0x1001 && | 815 | if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 && |
812 | !capable(CAP_NET_BIND_SERVICE)) { | 816 | !capable(CAP_NET_BIND_SERVICE)) { |
813 | err = -EACCES; | 817 | err = -EACCES; |
814 | goto done; | 818 | goto done; |
@@ -825,7 +829,8 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) | |||
825 | l2cap_pi(sk)->sport = la.l2_psm; | 829 | l2cap_pi(sk)->sport = la.l2_psm; |
826 | sk->sk_state = BT_BOUND; | 830 | sk->sk_state = BT_BOUND; |
827 | 831 | ||
828 | if (btohs(la.l2_psm) == 0x0001 || btohs(la.l2_psm) == 0x0003) | 832 | if (__le16_to_cpu(la.l2_psm) == 0x0001 || |
833 | __le16_to_cpu(la.l2_psm) == 0x0003) | ||
829 | l2cap_pi(sk)->sec_level = BT_SECURITY_SDP; | 834 | l2cap_pi(sk)->sec_level = BT_SECURITY_SDP; |
830 | } | 835 | } |
831 | 836 | ||
@@ -844,12 +849,13 @@ static int l2cap_do_connect(struct sock *sk) | |||
844 | struct hci_conn *hcon; | 849 | struct hci_conn *hcon; |
845 | struct hci_dev *hdev; | 850 | struct hci_dev *hdev; |
846 | __u8 auth_type; | 851 | __u8 auth_type; |
847 | int err = 0; | 852 | int err; |
848 | 853 | ||
849 | BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), | 854 | BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), |
850 | l2cap_pi(sk)->psm); | 855 | l2cap_pi(sk)->psm); |
851 | 856 | ||
852 | if (!(hdev = hci_get_route(dst, src))) | 857 | hdev = hci_get_route(dst, src); |
858 | if (!hdev) | ||
853 | return -EHOSTUNREACH; | 859 | return -EHOSTUNREACH; |
854 | 860 | ||
855 | hci_dev_lock_bh(hdev); | 861 | hci_dev_lock_bh(hdev); |
@@ -950,7 +956,7 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al | |||
950 | goto done; | 956 | goto done; |
951 | } | 957 | } |
952 | 958 | ||
953 | switch(sk->sk_state) { | 959 | switch (sk->sk_state) { |
954 | case BT_CONNECT: | 960 | case BT_CONNECT: |
955 | case BT_CONNECT2: | 961 | case BT_CONNECT2: |
956 | case BT_CONFIG: | 962 | case BT_CONFIG: |
@@ -975,7 +981,8 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al | |||
975 | bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr); | 981 | bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr); |
976 | l2cap_pi(sk)->psm = la.l2_psm; | 982 | l2cap_pi(sk)->psm = la.l2_psm; |
977 | 983 | ||
978 | if ((err = l2cap_do_connect(sk))) | 984 | err = l2cap_do_connect(sk); |
985 | if (err) | ||
979 | goto done; | 986 | goto done; |
980 | 987 | ||
981 | wait: | 988 | wait: |
@@ -1009,9 +1016,9 @@ static int l2cap_sock_listen(struct socket *sock, int backlog) | |||
1009 | write_lock_bh(&l2cap_sk_list.lock); | 1016 | write_lock_bh(&l2cap_sk_list.lock); |
1010 | 1017 | ||
1011 | for (psm = 0x1001; psm < 0x1100; psm += 2) | 1018 | for (psm = 0x1001; psm < 0x1100; psm += 2) |
1012 | if (!__l2cap_get_sock_by_addr(htobs(psm), src)) { | 1019 | if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) { |
1013 | l2cap_pi(sk)->psm = htobs(psm); | 1020 | l2cap_pi(sk)->psm = cpu_to_le16(psm); |
1014 | l2cap_pi(sk)->sport = htobs(psm); | 1021 | l2cap_pi(sk)->sport = cpu_to_le16(psm); |
1015 | err = 0; | 1022 | err = 0; |
1016 | break; | 1023 | break; |
1017 | } | 1024 | } |
@@ -1100,11 +1107,11 @@ static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *l | |||
1100 | if (peer) { | 1107 | if (peer) { |
1101 | la->l2_psm = l2cap_pi(sk)->psm; | 1108 | la->l2_psm = l2cap_pi(sk)->psm; |
1102 | bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst); | 1109 | bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst); |
1103 | la->l2_cid = htobs(l2cap_pi(sk)->dcid); | 1110 | la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid); |
1104 | } else { | 1111 | } else { |
1105 | la->l2_psm = l2cap_pi(sk)->sport; | 1112 | la->l2_psm = l2cap_pi(sk)->sport; |
1106 | bacpy(&la->l2_bdaddr, &bt_sk(sk)->src); | 1113 | bacpy(&la->l2_bdaddr, &bt_sk(sk)->src); |
1107 | la->l2_cid = htobs(l2cap_pi(sk)->scid); | 1114 | la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid); |
1108 | } | 1115 | } |
1109 | 1116 | ||
1110 | return 0; | 1117 | return 0; |
@@ -1114,7 +1121,7 @@ static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len) | |||
1114 | { | 1121 | { |
1115 | struct l2cap_conn *conn = l2cap_pi(sk)->conn; | 1122 | struct l2cap_conn *conn = l2cap_pi(sk)->conn; |
1116 | struct sk_buff *skb, **frag; | 1123 | struct sk_buff *skb, **frag; |
1117 | int err, hlen, count, sent=0; | 1124 | int err, hlen, count, sent = 0; |
1118 | struct l2cap_hdr *lh; | 1125 | struct l2cap_hdr *lh; |
1119 | 1126 | ||
1120 | BT_DBG("sk %p len %d", sk, len); | 1127 | BT_DBG("sk %p len %d", sk, len); |
@@ -1167,8 +1174,8 @@ static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len) | |||
1167 | 1174 | ||
1168 | frag = &(*frag)->next; | 1175 | frag = &(*frag)->next; |
1169 | } | 1176 | } |
1170 | 1177 | err = hci_send_acl(conn->hcon, skb, 0); | |
1171 | if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0) | 1178 | if (err < 0) |
1172 | goto fail; | 1179 | goto fail; |
1173 | 1180 | ||
1174 | return sent; | 1181 | return sent; |
@@ -1556,7 +1563,7 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb) | |||
1556 | { | 1563 | { |
1557 | struct l2cap_chan_list *l = &conn->chan_list; | 1564 | struct l2cap_chan_list *l = &conn->chan_list; |
1558 | struct sk_buff *nskb; | 1565 | struct sk_buff *nskb; |
1559 | struct sock * sk; | 1566 | struct sock *sk; |
1560 | 1567 | ||
1561 | BT_DBG("conn %p", conn); | 1568 | BT_DBG("conn %p", conn); |
1562 | 1569 | ||
@@ -1568,8 +1575,8 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb) | |||
1568 | /* Don't send frame to the socket it came from */ | 1575 | /* Don't send frame to the socket it came from */ |
1569 | if (skb->sk == sk) | 1576 | if (skb->sk == sk) |
1570 | continue; | 1577 | continue; |
1571 | 1578 | nskb = skb_clone(skb, GFP_ATOMIC); | |
1572 | if (!(nskb = skb_clone(skb, GFP_ATOMIC))) | 1579 | if (!nskb) |
1573 | continue; | 1580 | continue; |
1574 | 1581 | ||
1575 | if (sock_queue_rcv_skb(sk, nskb)) | 1582 | if (sock_queue_rcv_skb(sk, nskb)) |
@@ -1587,7 +1594,8 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, | |||
1587 | struct l2cap_hdr *lh; | 1594 | struct l2cap_hdr *lh; |
1588 | int len, count; | 1595 | int len, count; |
1589 | 1596 | ||
1590 | BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen); | 1597 | BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", |
1598 | conn, code, ident, dlen); | ||
1591 | 1599 | ||
1592 | len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen; | 1600 | len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen; |
1593 | count = min_t(unsigned int, conn->mtu, len); | 1601 | count = min_t(unsigned int, conn->mtu, len); |
@@ -1598,7 +1606,7 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, | |||
1598 | 1606 | ||
1599 | lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); | 1607 | lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); |
1600 | lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen); | 1608 | lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen); |
1601 | lh->cid = cpu_to_le16(0x0001); | 1609 | lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING); |
1602 | 1610 | ||
1603 | cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE); | 1611 | cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE); |
1604 | cmd->code = code; | 1612 | cmd->code = code; |
@@ -1739,8 +1747,8 @@ static int l2cap_parse_conf_req(struct sock *sk, void *data) | |||
1739 | while (len >= L2CAP_CONF_OPT_SIZE) { | 1747 | while (len >= L2CAP_CONF_OPT_SIZE) { |
1740 | len -= l2cap_get_conf_opt(&req, &type, &olen, &val); | 1748 | len -= l2cap_get_conf_opt(&req, &type, &olen, &val); |
1741 | 1749 | ||
1742 | hint = type & 0x80; | 1750 | hint = type & L2CAP_CONF_HINT; |
1743 | type &= 0x7f; | 1751 | type &= L2CAP_CONF_MASK; |
1744 | 1752 | ||
1745 | switch (type) { | 1753 | switch (type) { |
1746 | case L2CAP_CONF_MTU: | 1754 | case L2CAP_CONF_MTU: |
@@ -1966,10 +1974,12 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd | |||
1966 | BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status); | 1974 | BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status); |
1967 | 1975 | ||
1968 | if (scid) { | 1976 | if (scid) { |
1969 | if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid))) | 1977 | sk = l2cap_get_chan_by_scid(&conn->chan_list, scid); |
1978 | if (!sk) | ||
1970 | return 0; | 1979 | return 0; |
1971 | } else { | 1980 | } else { |
1972 | if (!(sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident))) | 1981 | sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident); |
1982 | if (!sk) | ||
1973 | return 0; | 1983 | return 0; |
1974 | } | 1984 | } |
1975 | 1985 | ||
@@ -2012,7 +2022,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr | |||
2012 | 2022 | ||
2013 | BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags); | 2023 | BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags); |
2014 | 2024 | ||
2015 | if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid))) | 2025 | sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid); |
2026 | if (!sk) | ||
2016 | return -ENOENT; | 2027 | return -ENOENT; |
2017 | 2028 | ||
2018 | if (sk->sk_state == BT_DISCONN) | 2029 | if (sk->sk_state == BT_DISCONN) |
@@ -2079,9 +2090,11 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr | |||
2079 | flags = __le16_to_cpu(rsp->flags); | 2090 | flags = __le16_to_cpu(rsp->flags); |
2080 | result = __le16_to_cpu(rsp->result); | 2091 | result = __le16_to_cpu(rsp->result); |
2081 | 2092 | ||
2082 | BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result); | 2093 | BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", |
2094 | scid, flags, result); | ||
2083 | 2095 | ||
2084 | if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid))) | 2096 | sk = l2cap_get_chan_by_scid(&conn->chan_list, scid); |
2097 | if (!sk) | ||
2085 | return 0; | 2098 | return 0; |
2086 | 2099 | ||
2087 | switch (result) { | 2100 | switch (result) { |
@@ -2142,7 +2155,8 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd | |||
2142 | 2155 | ||
2143 | BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid); | 2156 | BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid); |
2144 | 2157 | ||
2145 | if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid))) | 2158 | sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid); |
2159 | if (!sk) | ||
2146 | return 0; | 2160 | return 0; |
2147 | 2161 | ||
2148 | rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); | 2162 | rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); |
@@ -2169,7 +2183,8 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd | |||
2169 | 2183 | ||
2170 | BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid); | 2184 | BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid); |
2171 | 2185 | ||
2172 | if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid))) | 2186 | sk = l2cap_get_chan_by_scid(&conn->chan_list, scid); |
2187 | if (!sk) | ||
2173 | return 0; | 2188 | return 0; |
2174 | 2189 | ||
2175 | l2cap_chan_del(sk, 0); | 2190 | l2cap_chan_del(sk, 0); |
@@ -2230,7 +2245,7 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm | |||
2230 | if (type == L2CAP_IT_FEAT_MASK) { | 2245 | if (type == L2CAP_IT_FEAT_MASK) { |
2231 | conn->feat_mask = get_unaligned_le32(rsp->data); | 2246 | conn->feat_mask = get_unaligned_le32(rsp->data); |
2232 | 2247 | ||
2233 | if (conn->feat_mask & 0x0080) { | 2248 | if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) { |
2234 | struct l2cap_info_req req; | 2249 | struct l2cap_info_req req; |
2235 | req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); | 2250 | req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); |
2236 | 2251 | ||
@@ -2403,7 +2418,8 @@ drop: | |||
2403 | kfree_skb(skb); | 2418 | kfree_skb(skb); |
2404 | 2419 | ||
2405 | done: | 2420 | done: |
2406 | if (sk) bh_unlock_sock(sk); | 2421 | if (sk) |
2422 | bh_unlock_sock(sk); | ||
2407 | return 0; | 2423 | return 0; |
2408 | } | 2424 | } |
2409 | 2425 | ||
@@ -2420,11 +2436,11 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb) | |||
2420 | BT_DBG("len %d, cid 0x%4.4x", len, cid); | 2436 | BT_DBG("len %d, cid 0x%4.4x", len, cid); |
2421 | 2437 | ||
2422 | switch (cid) { | 2438 | switch (cid) { |
2423 | case 0x0001: | 2439 | case L2CAP_CID_SIGNALING: |
2424 | l2cap_sig_channel(conn, skb); | 2440 | l2cap_sig_channel(conn, skb); |
2425 | break; | 2441 | break; |
2426 | 2442 | ||
2427 | case 0x0002: | 2443 | case L2CAP_CID_CONN_LESS: |
2428 | psm = get_unaligned((__le16 *) skb->data); | 2444 | psm = get_unaligned((__le16 *) skb->data); |
2429 | skb_pull(skb, 2); | 2445 | skb_pull(skb, 2); |
2430 | l2cap_conless_channel(conn, psm, skb); | 2446 | l2cap_conless_channel(conn, psm, skb); |
@@ -2650,7 +2666,8 @@ static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 fl | |||
2650 | } | 2666 | } |
2651 | 2667 | ||
2652 | /* Allocate skb for the complete frame (with header) */ | 2668 | /* Allocate skb for the complete frame (with header) */ |
2653 | if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC))) | 2669 | conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC); |
2670 | if (!conn->rx_skb) | ||
2654 | goto drop; | 2671 | goto drop; |
2655 | 2672 | ||
2656 | skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len), | 2673 | skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len), |
@@ -2704,13 +2721,13 @@ static ssize_t l2cap_sysfs_show(struct class *dev, char *buf) | |||
2704 | 2721 | ||
2705 | str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n", | 2722 | str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n", |
2706 | batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), | 2723 | batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), |
2707 | sk->sk_state, btohs(pi->psm), pi->scid, pi->dcid, | 2724 | sk->sk_state, __le16_to_cpu(pi->psm), pi->scid, |
2708 | pi->imtu, pi->omtu, pi->sec_level); | 2725 | pi->dcid, pi->imtu, pi->omtu, pi->sec_level); |
2709 | } | 2726 | } |
2710 | 2727 | ||
2711 | read_unlock_bh(&l2cap_sk_list.lock); | 2728 | read_unlock_bh(&l2cap_sk_list.lock); |
2712 | 2729 | ||
2713 | return (str - buf); | 2730 | return str - buf; |
2714 | } | 2731 | } |
2715 | 2732 | ||
2716 | static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL); | 2733 | static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL); |
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index 374536e050aa..e50566ebf9f9 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c | |||
@@ -679,7 +679,7 @@ static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, bdaddr_t *dst | |||
679 | 679 | ||
680 | bacpy(&addr.l2_bdaddr, dst); | 680 | bacpy(&addr.l2_bdaddr, dst); |
681 | addr.l2_family = AF_BLUETOOTH; | 681 | addr.l2_family = AF_BLUETOOTH; |
682 | addr.l2_psm = htobs(RFCOMM_PSM); | 682 | addr.l2_psm = cpu_to_le16(RFCOMM_PSM); |
683 | addr.l2_cid = 0; | 683 | addr.l2_cid = 0; |
684 | *err = kernel_connect(sock, (struct sockaddr *) &addr, sizeof(addr), O_NONBLOCK); | 684 | *err = kernel_connect(sock, (struct sockaddr *) &addr, sizeof(addr), O_NONBLOCK); |
685 | if (*err == 0 || *err == -EINPROGRESS) | 685 | if (*err == 0 || *err == -EINPROGRESS) |
@@ -852,9 +852,9 @@ static int rfcomm_send_pn(struct rfcomm_session *s, int cr, struct rfcomm_dlc *d | |||
852 | } | 852 | } |
853 | 853 | ||
854 | if (cr && channel_mtu >= 0) | 854 | if (cr && channel_mtu >= 0) |
855 | pn->mtu = htobs(channel_mtu); | 855 | pn->mtu = cpu_to_le16(channel_mtu); |
856 | else | 856 | else |
857 | pn->mtu = htobs(d->mtu); | 857 | pn->mtu = cpu_to_le16(d->mtu); |
858 | 858 | ||
859 | *ptr = __fcs(buf); ptr++; | 859 | *ptr = __fcs(buf); ptr++; |
860 | 860 | ||
@@ -1056,7 +1056,7 @@ static void rfcomm_make_uih(struct sk_buff *skb, u8 addr) | |||
1056 | 1056 | ||
1057 | if (len > 127) { | 1057 | if (len > 127) { |
1058 | hdr = (void *) skb_push(skb, 4); | 1058 | hdr = (void *) skb_push(skb, 4); |
1059 | put_unaligned(htobs(__len16(len)), (__le16 *) &hdr->len); | 1059 | put_unaligned(cpu_to_le16(__len16(len)), (__le16 *) &hdr->len); |
1060 | } else { | 1060 | } else { |
1061 | hdr = (void *) skb_push(skb, 3); | 1061 | hdr = (void *) skb_push(skb, 3); |
1062 | hdr->len = __len8(len); | 1062 | hdr->len = __len8(len); |
@@ -1289,7 +1289,7 @@ static int rfcomm_apply_pn(struct rfcomm_dlc *d, int cr, struct rfcomm_pn *pn) | |||
1289 | 1289 | ||
1290 | d->priority = pn->priority; | 1290 | d->priority = pn->priority; |
1291 | 1291 | ||
1292 | d->mtu = btohs(pn->mtu); | 1292 | d->mtu = __le16_to_cpu(pn->mtu); |
1293 | 1293 | ||
1294 | if (cr && d->mtu > s->mtu) | 1294 | if (cr && d->mtu > s->mtu) |
1295 | d->mtu = s->mtu; | 1295 | d->mtu = s->mtu; |
@@ -1922,7 +1922,7 @@ static int rfcomm_add_listener(bdaddr_t *ba) | |||
1922 | /* Bind socket */ | 1922 | /* Bind socket */ |
1923 | bacpy(&addr.l2_bdaddr, ba); | 1923 | bacpy(&addr.l2_bdaddr, ba); |
1924 | addr.l2_family = AF_BLUETOOTH; | 1924 | addr.l2_family = AF_BLUETOOTH; |
1925 | addr.l2_psm = htobs(RFCOMM_PSM); | 1925 | addr.l2_psm = cpu_to_le16(RFCOMM_PSM); |
1926 | addr.l2_cid = 0; | 1926 | addr.l2_cid = 0; |
1927 | err = kernel_bind(sock, (struct sockaddr *) &addr, sizeof(addr)); | 1927 | err = kernel_bind(sock, (struct sockaddr *) &addr, sizeof(addr)); |
1928 | if (err < 0) { | 1928 | if (err < 0) { |
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index a48f5efdb6bf..cb3e97b93aeb 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c | |||
@@ -398,7 +398,7 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source, | |||
398 | if (unlikely(fdb->is_local)) { | 398 | if (unlikely(fdb->is_local)) { |
399 | if (net_ratelimit()) | 399 | if (net_ratelimit()) |
400 | printk(KERN_WARNING "%s: received packet with " | 400 | printk(KERN_WARNING "%s: received packet with " |
401 | " own address as source address\n", | 401 | "own address as source address\n", |
402 | source->dev->name); | 402 | source->dev->name); |
403 | } else { | 403 | } else { |
404 | /* fastpath: update of existing entry */ | 404 | /* fastpath: update of existing entry */ |
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index e4a418fcb35b..d22f611e4004 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c | |||
@@ -228,6 +228,7 @@ int nf_bridge_copy_header(struct sk_buff *skb) | |||
228 | static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb) | 228 | static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb) |
229 | { | 229 | { |
230 | struct nf_bridge_info *nf_bridge = skb->nf_bridge; | 230 | struct nf_bridge_info *nf_bridge = skb->nf_bridge; |
231 | struct rtable *rt; | ||
231 | 232 | ||
232 | if (nf_bridge->mask & BRNF_PKT_TYPE) { | 233 | if (nf_bridge->mask & BRNF_PKT_TYPE) { |
233 | skb->pkt_type = PACKET_OTHERHOST; | 234 | skb->pkt_type = PACKET_OTHERHOST; |
@@ -235,12 +236,13 @@ static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb) | |||
235 | } | 236 | } |
236 | nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING; | 237 | nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING; |
237 | 238 | ||
238 | skb->rtable = bridge_parent_rtable(nf_bridge->physindev); | 239 | rt = bridge_parent_rtable(nf_bridge->physindev); |
239 | if (!skb->rtable) { | 240 | if (!rt) { |
240 | kfree_skb(skb); | 241 | kfree_skb(skb); |
241 | return 0; | 242 | return 0; |
242 | } | 243 | } |
243 | dst_hold(&skb->rtable->u.dst); | 244 | dst_hold(&rt->u.dst); |
245 | skb_dst_set(skb, &rt->u.dst); | ||
244 | 246 | ||
245 | skb->dev = nf_bridge->physindev; | 247 | skb->dev = nf_bridge->physindev; |
246 | nf_bridge_push_encap_header(skb); | 248 | nf_bridge_push_encap_header(skb); |
@@ -320,7 +322,7 @@ static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb) | |||
320 | 322 | ||
321 | skb->dev = bridge_parent(skb->dev); | 323 | skb->dev = bridge_parent(skb->dev); |
322 | if (skb->dev) { | 324 | if (skb->dev) { |
323 | struct dst_entry *dst = skb->dst; | 325 | struct dst_entry *dst = skb_dst(skb); |
324 | 326 | ||
325 | nf_bridge_pull_encap_header(skb); | 327 | nf_bridge_pull_encap_header(skb); |
326 | 328 | ||
@@ -338,6 +340,7 @@ static int br_nf_pre_routing_finish(struct sk_buff *skb) | |||
338 | struct net_device *dev = skb->dev; | 340 | struct net_device *dev = skb->dev; |
339 | struct iphdr *iph = ip_hdr(skb); | 341 | struct iphdr *iph = ip_hdr(skb); |
340 | struct nf_bridge_info *nf_bridge = skb->nf_bridge; | 342 | struct nf_bridge_info *nf_bridge = skb->nf_bridge; |
343 | struct rtable *rt; | ||
341 | int err; | 344 | int err; |
342 | 345 | ||
343 | if (nf_bridge->mask & BRNF_PKT_TYPE) { | 346 | if (nf_bridge->mask & BRNF_PKT_TYPE) { |
@@ -347,7 +350,6 @@ static int br_nf_pre_routing_finish(struct sk_buff *skb) | |||
347 | nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING; | 350 | nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING; |
348 | if (dnat_took_place(skb)) { | 351 | if (dnat_took_place(skb)) { |
349 | if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) { | 352 | if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) { |
350 | struct rtable *rt; | ||
351 | struct flowi fl = { | 353 | struct flowi fl = { |
352 | .nl_u = { | 354 | .nl_u = { |
353 | .ip4_u = { | 355 | .ip4_u = { |
@@ -373,7 +375,7 @@ static int br_nf_pre_routing_finish(struct sk_buff *skb) | |||
373 | /* - Bridged-and-DNAT'ed traffic doesn't | 375 | /* - Bridged-and-DNAT'ed traffic doesn't |
374 | * require ip_forwarding. */ | 376 | * require ip_forwarding. */ |
375 | if (((struct dst_entry *)rt)->dev == dev) { | 377 | if (((struct dst_entry *)rt)->dev == dev) { |
376 | skb->dst = (struct dst_entry *)rt; | 378 | skb_dst_set(skb, (struct dst_entry *)rt); |
377 | goto bridged_dnat; | 379 | goto bridged_dnat; |
378 | } | 380 | } |
379 | /* we are sure that forwarding is disabled, so printing | 381 | /* we are sure that forwarding is disabled, so printing |
@@ -387,7 +389,7 @@ free_skb: | |||
387 | kfree_skb(skb); | 389 | kfree_skb(skb); |
388 | return 0; | 390 | return 0; |
389 | } else { | 391 | } else { |
390 | if (skb->dst->dev == dev) { | 392 | if (skb_dst(skb)->dev == dev) { |
391 | bridged_dnat: | 393 | bridged_dnat: |
392 | /* Tell br_nf_local_out this is a | 394 | /* Tell br_nf_local_out this is a |
393 | * bridged frame */ | 395 | * bridged frame */ |
@@ -404,12 +406,13 @@ bridged_dnat: | |||
404 | skb->pkt_type = PACKET_HOST; | 406 | skb->pkt_type = PACKET_HOST; |
405 | } | 407 | } |
406 | } else { | 408 | } else { |
407 | skb->rtable = bridge_parent_rtable(nf_bridge->physindev); | 409 | rt = bridge_parent_rtable(nf_bridge->physindev); |
408 | if (!skb->rtable) { | 410 | if (!rt) { |
409 | kfree_skb(skb); | 411 | kfree_skb(skb); |
410 | return 0; | 412 | return 0; |
411 | } | 413 | } |
412 | dst_hold(&skb->rtable->u.dst); | 414 | dst_hold(&rt->u.dst); |
415 | skb_dst_set(skb, &rt->u.dst); | ||
413 | } | 416 | } |
414 | 417 | ||
415 | skb->dev = nf_bridge->physindev; | 418 | skb->dev = nf_bridge->physindev; |
@@ -628,10 +631,10 @@ static unsigned int br_nf_local_in(unsigned int hook, struct sk_buff *skb, | |||
628 | const struct net_device *out, | 631 | const struct net_device *out, |
629 | int (*okfn)(struct sk_buff *)) | 632 | int (*okfn)(struct sk_buff *)) |
630 | { | 633 | { |
631 | if (skb->rtable && skb->rtable == bridge_parent_rtable(in)) { | 634 | struct rtable *rt = skb_rtable(skb); |
632 | dst_release(&skb->rtable->u.dst); | 635 | |
633 | skb->rtable = NULL; | 636 | if (rt && rt == bridge_parent_rtable(in)) |
634 | } | 637 | skb_dst_drop(skb); |
635 | 638 | ||
636 | return NF_ACCEPT; | 639 | return NF_ACCEPT; |
637 | } | 640 | } |
@@ -846,7 +849,7 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb, | |||
846 | return NF_ACCEPT; | 849 | return NF_ACCEPT; |
847 | 850 | ||
848 | #ifdef CONFIG_NETFILTER_DEBUG | 851 | #ifdef CONFIG_NETFILTER_DEBUG |
849 | if (skb->dst == NULL) { | 852 | if (skb_dst(skb) == NULL) { |
850 | printk(KERN_INFO "br_netfilter post_routing: skb->dst == NULL\n"); | 853 | printk(KERN_INFO "br_netfilter post_routing: skb->dst == NULL\n"); |
851 | goto print_error; | 854 | goto print_error; |
852 | } | 855 | } |
diff --git a/net/can/af_can.c b/net/can/af_can.c index 10f0528c3bf5..e733725b11d4 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c | |||
@@ -903,6 +903,8 @@ static __exit void can_exit(void) | |||
903 | } | 903 | } |
904 | spin_unlock(&can_rcvlists_lock); | 904 | spin_unlock(&can_rcvlists_lock); |
905 | 905 | ||
906 | rcu_barrier(); /* Wait for completion of call_rcu()'s */ | ||
907 | |||
906 | kmem_cache_destroy(rcv_cache); | 908 | kmem_cache_destroy(rcv_cache); |
907 | } | 909 | } |
908 | 910 | ||
diff --git a/net/core/datagram.c b/net/core/datagram.c index e2a36f05cdf7..58abee1f1df1 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c | |||
@@ -282,6 +282,7 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset, | |||
282 | { | 282 | { |
283 | int start = skb_headlen(skb); | 283 | int start = skb_headlen(skb); |
284 | int i, copy = start - offset; | 284 | int i, copy = start - offset; |
285 | struct sk_buff *frag_iter; | ||
285 | 286 | ||
286 | /* Copy header. */ | 287 | /* Copy header. */ |
287 | if (copy > 0) { | 288 | if (copy > 0) { |
@@ -322,28 +323,24 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset, | |||
322 | start = end; | 323 | start = end; |
323 | } | 324 | } |
324 | 325 | ||
325 | if (skb_shinfo(skb)->frag_list) { | 326 | skb_walk_frags(skb, frag_iter) { |
326 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | 327 | int end; |
327 | 328 | ||
328 | for (; list; list = list->next) { | 329 | WARN_ON(start > offset + len); |
329 | int end; | 330 | |
330 | 331 | end = start + frag_iter->len; | |
331 | WARN_ON(start > offset + len); | 332 | if ((copy = end - offset) > 0) { |
332 | 333 | if (copy > len) | |
333 | end = start + list->len; | 334 | copy = len; |
334 | if ((copy = end - offset) > 0) { | 335 | if (skb_copy_datagram_iovec(frag_iter, |
335 | if (copy > len) | 336 | offset - start, |
336 | copy = len; | 337 | to, copy)) |
337 | if (skb_copy_datagram_iovec(list, | 338 | goto fault; |
338 | offset - start, | 339 | if ((len -= copy) == 0) |
339 | to, copy)) | 340 | return 0; |
340 | goto fault; | 341 | offset += copy; |
341 | if ((len -= copy) == 0) | ||
342 | return 0; | ||
343 | offset += copy; | ||
344 | } | ||
345 | start = end; | ||
346 | } | 342 | } |
343 | start = end; | ||
347 | } | 344 | } |
348 | if (!len) | 345 | if (!len) |
349 | return 0; | 346 | return 0; |
@@ -369,6 +366,7 @@ int skb_copy_datagram_const_iovec(const struct sk_buff *skb, int offset, | |||
369 | { | 366 | { |
370 | int start = skb_headlen(skb); | 367 | int start = skb_headlen(skb); |
371 | int i, copy = start - offset; | 368 | int i, copy = start - offset; |
369 | struct sk_buff *frag_iter; | ||
372 | 370 | ||
373 | /* Copy header. */ | 371 | /* Copy header. */ |
374 | if (copy > 0) { | 372 | if (copy > 0) { |
@@ -411,30 +409,26 @@ int skb_copy_datagram_const_iovec(const struct sk_buff *skb, int offset, | |||
411 | start = end; | 409 | start = end; |
412 | } | 410 | } |
413 | 411 | ||
414 | if (skb_shinfo(skb)->frag_list) { | 412 | skb_walk_frags(skb, frag_iter) { |
415 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | 413 | int end; |
416 | 414 | ||
417 | for (; list; list = list->next) { | 415 | WARN_ON(start > offset + len); |
418 | int end; | 416 | |
419 | 417 | end = start + frag_iter->len; | |
420 | WARN_ON(start > offset + len); | 418 | if ((copy = end - offset) > 0) { |
421 | 419 | if (copy > len) | |
422 | end = start + list->len; | 420 | copy = len; |
423 | if ((copy = end - offset) > 0) { | 421 | if (skb_copy_datagram_const_iovec(frag_iter, |
424 | if (copy > len) | 422 | offset - start, |
425 | copy = len; | 423 | to, to_offset, |
426 | if (skb_copy_datagram_const_iovec(list, | 424 | copy)) |
427 | offset - start, | 425 | goto fault; |
428 | to, to_offset, | 426 | if ((len -= copy) == 0) |
429 | copy)) | 427 | return 0; |
430 | goto fault; | 428 | offset += copy; |
431 | if ((len -= copy) == 0) | 429 | to_offset += copy; |
432 | return 0; | ||
433 | offset += copy; | ||
434 | to_offset += copy; | ||
435 | } | ||
436 | start = end; | ||
437 | } | 430 | } |
431 | start = end; | ||
438 | } | 432 | } |
439 | if (!len) | 433 | if (!len) |
440 | return 0; | 434 | return 0; |
@@ -461,12 +455,14 @@ int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset, | |||
461 | { | 455 | { |
462 | int start = skb_headlen(skb); | 456 | int start = skb_headlen(skb); |
463 | int i, copy = start - offset; | 457 | int i, copy = start - offset; |
458 | struct sk_buff *frag_iter; | ||
464 | 459 | ||
465 | /* Copy header. */ | 460 | /* Copy header. */ |
466 | if (copy > 0) { | 461 | if (copy > 0) { |
467 | if (copy > len) | 462 | if (copy > len) |
468 | copy = len; | 463 | copy = len; |
469 | if (memcpy_fromiovecend(skb->data + offset, from, 0, copy)) | 464 | if (memcpy_fromiovecend(skb->data + offset, from, from_offset, |
465 | copy)) | ||
470 | goto fault; | 466 | goto fault; |
471 | if ((len -= copy) == 0) | 467 | if ((len -= copy) == 0) |
472 | return 0; | 468 | return 0; |
@@ -505,31 +501,27 @@ int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset, | |||
505 | start = end; | 501 | start = end; |
506 | } | 502 | } |
507 | 503 | ||
508 | if (skb_shinfo(skb)->frag_list) { | 504 | skb_walk_frags(skb, frag_iter) { |
509 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | 505 | int end; |
510 | 506 | ||
511 | for (; list; list = list->next) { | 507 | WARN_ON(start > offset + len); |
512 | int end; | 508 | |
513 | 509 | end = start + frag_iter->len; | |
514 | WARN_ON(start > offset + len); | 510 | if ((copy = end - offset) > 0) { |
515 | 511 | if (copy > len) | |
516 | end = start + list->len; | 512 | copy = len; |
517 | if ((copy = end - offset) > 0) { | 513 | if (skb_copy_datagram_from_iovec(frag_iter, |
518 | if (copy > len) | 514 | offset - start, |
519 | copy = len; | 515 | from, |
520 | if (skb_copy_datagram_from_iovec(list, | 516 | from_offset, |
521 | offset - start, | 517 | copy)) |
522 | from, | 518 | goto fault; |
523 | from_offset, | 519 | if ((len -= copy) == 0) |
524 | copy)) | 520 | return 0; |
525 | goto fault; | 521 | offset += copy; |
526 | if ((len -= copy) == 0) | 522 | from_offset += copy; |
527 | return 0; | ||
528 | offset += copy; | ||
529 | from_offset += copy; | ||
530 | } | ||
531 | start = end; | ||
532 | } | 523 | } |
524 | start = end; | ||
533 | } | 525 | } |
534 | if (!len) | 526 | if (!len) |
535 | return 0; | 527 | return 0; |
@@ -544,8 +536,9 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, | |||
544 | __wsum *csump) | 536 | __wsum *csump) |
545 | { | 537 | { |
546 | int start = skb_headlen(skb); | 538 | int start = skb_headlen(skb); |
547 | int pos = 0; | ||
548 | int i, copy = start - offset; | 539 | int i, copy = start - offset; |
540 | struct sk_buff *frag_iter; | ||
541 | int pos = 0; | ||
549 | 542 | ||
550 | /* Copy header. */ | 543 | /* Copy header. */ |
551 | if (copy > 0) { | 544 | if (copy > 0) { |
@@ -596,33 +589,29 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, | |||
596 | start = end; | 589 | start = end; |
597 | } | 590 | } |
598 | 591 | ||
599 | if (skb_shinfo(skb)->frag_list) { | 592 | skb_walk_frags(skb, frag_iter) { |
600 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | 593 | int end; |
601 | 594 | ||
602 | for (; list; list=list->next) { | 595 | WARN_ON(start > offset + len); |
603 | int end; | 596 | |
604 | 597 | end = start + frag_iter->len; | |
605 | WARN_ON(start > offset + len); | 598 | if ((copy = end - offset) > 0) { |
606 | 599 | __wsum csum2 = 0; | |
607 | end = start + list->len; | 600 | if (copy > len) |
608 | if ((copy = end - offset) > 0) { | 601 | copy = len; |
609 | __wsum csum2 = 0; | 602 | if (skb_copy_and_csum_datagram(frag_iter, |
610 | if (copy > len) | 603 | offset - start, |
611 | copy = len; | 604 | to, copy, |
612 | if (skb_copy_and_csum_datagram(list, | 605 | &csum2)) |
613 | offset - start, | 606 | goto fault; |
614 | to, copy, | 607 | *csump = csum_block_add(*csump, csum2, pos); |
615 | &csum2)) | 608 | if ((len -= copy) == 0) |
616 | goto fault; | 609 | return 0; |
617 | *csump = csum_block_add(*csump, csum2, pos); | 610 | offset += copy; |
618 | if ((len -= copy) == 0) | 611 | to += copy; |
619 | return 0; | 612 | pos += copy; |
620 | offset += copy; | ||
621 | to += copy; | ||
622 | pos += copy; | ||
623 | } | ||
624 | start = end; | ||
625 | } | 613 | } |
614 | start = end; | ||
626 | } | 615 | } |
627 | if (!len) | 616 | if (!len) |
628 | return 0; | 617 | return 0; |
diff --git a/net/core/dev.c b/net/core/dev.c index ed4550fd9ece..11560e3258b5 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -269,7 +269,8 @@ static const unsigned short netdev_lock_type[] = | |||
269 | ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL, | 269 | ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL, |
270 | ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211, | 270 | ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211, |
271 | ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, | 271 | ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, |
272 | ARPHRD_PHONET_PIPE, ARPHRD_VOID, ARPHRD_NONE}; | 272 | ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154, ARPHRD_IEEE802154_PHY, |
273 | ARPHRD_VOID, ARPHRD_NONE}; | ||
273 | 274 | ||
274 | static const char *netdev_lock_name[] = | 275 | static const char *netdev_lock_name[] = |
275 | {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", | 276 | {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", |
@@ -286,7 +287,8 @@ static const char *netdev_lock_name[] = | |||
286 | "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", | 287 | "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", |
287 | "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211", | 288 | "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211", |
288 | "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", | 289 | "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", |
289 | "_xmit_PHONET_PIPE", "_xmit_VOID", "_xmit_NONE"}; | 290 | "_xmit_PHONET_PIPE", "_xmit_IEEE802154", "_xmit_IEEE802154_PHY", |
291 | "_xmit_VOID", "_xmit_NONE"}; | ||
290 | 292 | ||
291 | static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; | 293 | static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; |
292 | static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)]; | 294 | static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)]; |
@@ -1048,7 +1050,7 @@ void dev_load(struct net *net, const char *name) | |||
1048 | int dev_open(struct net_device *dev) | 1050 | int dev_open(struct net_device *dev) |
1049 | { | 1051 | { |
1050 | const struct net_device_ops *ops = dev->netdev_ops; | 1052 | const struct net_device_ops *ops = dev->netdev_ops; |
1051 | int ret = 0; | 1053 | int ret; |
1052 | 1054 | ||
1053 | ASSERT_RTNL(); | 1055 | ASSERT_RTNL(); |
1054 | 1056 | ||
@@ -1065,6 +1067,11 @@ int dev_open(struct net_device *dev) | |||
1065 | if (!netif_device_present(dev)) | 1067 | if (!netif_device_present(dev)) |
1066 | return -ENODEV; | 1068 | return -ENODEV; |
1067 | 1069 | ||
1070 | ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev); | ||
1071 | ret = notifier_to_errno(ret); | ||
1072 | if (ret) | ||
1073 | return ret; | ||
1074 | |||
1068 | /* | 1075 | /* |
1069 | * Call device private open method | 1076 | * Call device private open method |
1070 | */ | 1077 | */ |
@@ -1693,10 +1700,9 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, | |||
1693 | * If device doesnt need skb->dst, release it right now while | 1700 | * If device doesnt need skb->dst, release it right now while |
1694 | * its hot in this cpu cache | 1701 | * its hot in this cpu cache |
1695 | */ | 1702 | */ |
1696 | if ((dev->priv_flags & IFF_XMIT_DST_RELEASE) && skb->dst) { | 1703 | if (dev->priv_flags & IFF_XMIT_DST_RELEASE) |
1697 | dst_release(skb->dst); | 1704 | skb_dst_drop(skb); |
1698 | skb->dst = NULL; | 1705 | |
1699 | } | ||
1700 | rc = ops->ndo_start_xmit(skb, dev); | 1706 | rc = ops->ndo_start_xmit(skb, dev); |
1701 | if (rc == 0) | 1707 | if (rc == 0) |
1702 | txq_trans_update(txq); | 1708 | txq_trans_update(txq); |
@@ -1816,7 +1822,7 @@ int dev_queue_xmit(struct sk_buff *skb) | |||
1816 | if (netif_needs_gso(dev, skb)) | 1822 | if (netif_needs_gso(dev, skb)) |
1817 | goto gso; | 1823 | goto gso; |
1818 | 1824 | ||
1819 | if (skb_shinfo(skb)->frag_list && | 1825 | if (skb_has_frags(skb) && |
1820 | !(dev->features & NETIF_F_FRAGLIST) && | 1826 | !(dev->features & NETIF_F_FRAGLIST) && |
1821 | __skb_linearize(skb)) | 1827 | __skb_linearize(skb)) |
1822 | goto out_kfree_skb; | 1828 | goto out_kfree_skb; |
@@ -2403,7 +2409,7 @@ int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | |||
2403 | if (!(skb->dev->features & NETIF_F_GRO)) | 2409 | if (!(skb->dev->features & NETIF_F_GRO)) |
2404 | goto normal; | 2410 | goto normal; |
2405 | 2411 | ||
2406 | if (skb_is_gso(skb) || skb_shinfo(skb)->frag_list) | 2412 | if (skb_is_gso(skb) || skb_has_frags(skb)) |
2407 | goto normal; | 2413 | goto normal; |
2408 | 2414 | ||
2409 | rcu_read_lock(); | 2415 | rcu_read_lock(); |
@@ -3473,8 +3479,9 @@ void dev_set_rx_mode(struct net_device *dev) | |||
3473 | 3479 | ||
3474 | /* hw addresses list handling functions */ | 3480 | /* hw addresses list handling functions */ |
3475 | 3481 | ||
3476 | static int __hw_addr_add(struct list_head *list, unsigned char *addr, | 3482 | static int __hw_addr_add(struct list_head *list, int *delta, |
3477 | int addr_len, unsigned char addr_type) | 3483 | unsigned char *addr, int addr_len, |
3484 | unsigned char addr_type) | ||
3478 | { | 3485 | { |
3479 | struct netdev_hw_addr *ha; | 3486 | struct netdev_hw_addr *ha; |
3480 | int alloc_size; | 3487 | int alloc_size; |
@@ -3482,6 +3489,15 @@ static int __hw_addr_add(struct list_head *list, unsigned char *addr, | |||
3482 | if (addr_len > MAX_ADDR_LEN) | 3489 | if (addr_len > MAX_ADDR_LEN) |
3483 | return -EINVAL; | 3490 | return -EINVAL; |
3484 | 3491 | ||
3492 | list_for_each_entry(ha, list, list) { | ||
3493 | if (!memcmp(ha->addr, addr, addr_len) && | ||
3494 | ha->type == addr_type) { | ||
3495 | ha->refcount++; | ||
3496 | return 0; | ||
3497 | } | ||
3498 | } | ||
3499 | |||
3500 | |||
3485 | alloc_size = sizeof(*ha); | 3501 | alloc_size = sizeof(*ha); |
3486 | if (alloc_size < L1_CACHE_BYTES) | 3502 | if (alloc_size < L1_CACHE_BYTES) |
3487 | alloc_size = L1_CACHE_BYTES; | 3503 | alloc_size = L1_CACHE_BYTES; |
@@ -3490,7 +3506,11 @@ static int __hw_addr_add(struct list_head *list, unsigned char *addr, | |||
3490 | return -ENOMEM; | 3506 | return -ENOMEM; |
3491 | memcpy(ha->addr, addr, addr_len); | 3507 | memcpy(ha->addr, addr, addr_len); |
3492 | ha->type = addr_type; | 3508 | ha->type = addr_type; |
3509 | ha->refcount = 1; | ||
3510 | ha->synced = false; | ||
3493 | list_add_tail_rcu(&ha->list, list); | 3511 | list_add_tail_rcu(&ha->list, list); |
3512 | if (delta) | ||
3513 | (*delta)++; | ||
3494 | return 0; | 3514 | return 0; |
3495 | } | 3515 | } |
3496 | 3516 | ||
@@ -3502,29 +3522,30 @@ static void ha_rcu_free(struct rcu_head *head) | |||
3502 | kfree(ha); | 3522 | kfree(ha); |
3503 | } | 3523 | } |
3504 | 3524 | ||
3505 | static int __hw_addr_del_ii(struct list_head *list, unsigned char *addr, | 3525 | static int __hw_addr_del(struct list_head *list, int *delta, |
3506 | int addr_len, unsigned char addr_type, | 3526 | unsigned char *addr, int addr_len, |
3507 | int ignore_index) | 3527 | unsigned char addr_type) |
3508 | { | 3528 | { |
3509 | struct netdev_hw_addr *ha; | 3529 | struct netdev_hw_addr *ha; |
3510 | int i = 0; | ||
3511 | 3530 | ||
3512 | list_for_each_entry(ha, list, list) { | 3531 | list_for_each_entry(ha, list, list) { |
3513 | if (i++ != ignore_index && | 3532 | if (!memcmp(ha->addr, addr, addr_len) && |
3514 | !memcmp(ha->addr, addr, addr_len) && | ||
3515 | (ha->type == addr_type || !addr_type)) { | 3533 | (ha->type == addr_type || !addr_type)) { |
3534 | if (--ha->refcount) | ||
3535 | return 0; | ||
3516 | list_del_rcu(&ha->list); | 3536 | list_del_rcu(&ha->list); |
3517 | call_rcu(&ha->rcu_head, ha_rcu_free); | 3537 | call_rcu(&ha->rcu_head, ha_rcu_free); |
3538 | if (delta) | ||
3539 | (*delta)--; | ||
3518 | return 0; | 3540 | return 0; |
3519 | } | 3541 | } |
3520 | } | 3542 | } |
3521 | return -ENOENT; | 3543 | return -ENOENT; |
3522 | } | 3544 | } |
3523 | 3545 | ||
3524 | static int __hw_addr_add_multiple_ii(struct list_head *to_list, | 3546 | static int __hw_addr_add_multiple(struct list_head *to_list, int *to_delta, |
3525 | struct list_head *from_list, | 3547 | struct list_head *from_list, int addr_len, |
3526 | int addr_len, unsigned char addr_type, | 3548 | unsigned char addr_type) |
3527 | int ignore_index) | ||
3528 | { | 3549 | { |
3529 | int err; | 3550 | int err; |
3530 | struct netdev_hw_addr *ha, *ha2; | 3551 | struct netdev_hw_addr *ha, *ha2; |
@@ -3532,7 +3553,8 @@ static int __hw_addr_add_multiple_ii(struct list_head *to_list, | |||
3532 | 3553 | ||
3533 | list_for_each_entry(ha, from_list, list) { | 3554 | list_for_each_entry(ha, from_list, list) { |
3534 | type = addr_type ? addr_type : ha->type; | 3555 | type = addr_type ? addr_type : ha->type; |
3535 | err = __hw_addr_add(to_list, ha->addr, addr_len, type); | 3556 | err = __hw_addr_add(to_list, to_delta, ha->addr, |
3557 | addr_len, type); | ||
3536 | if (err) | 3558 | if (err) |
3537 | goto unroll; | 3559 | goto unroll; |
3538 | } | 3560 | } |
@@ -3543,27 +3565,69 @@ unroll: | |||
3543 | if (ha2 == ha) | 3565 | if (ha2 == ha) |
3544 | break; | 3566 | break; |
3545 | type = addr_type ? addr_type : ha2->type; | 3567 | type = addr_type ? addr_type : ha2->type; |
3546 | __hw_addr_del_ii(to_list, ha2->addr, addr_len, type, | 3568 | __hw_addr_del(to_list, to_delta, ha2->addr, |
3547 | ignore_index); | 3569 | addr_len, type); |
3548 | } | 3570 | } |
3549 | return err; | 3571 | return err; |
3550 | } | 3572 | } |
3551 | 3573 | ||
3552 | static void __hw_addr_del_multiple_ii(struct list_head *to_list, | 3574 | static void __hw_addr_del_multiple(struct list_head *to_list, int *to_delta, |
3553 | struct list_head *from_list, | 3575 | struct list_head *from_list, int addr_len, |
3554 | int addr_len, unsigned char addr_type, | 3576 | unsigned char addr_type) |
3555 | int ignore_index) | ||
3556 | { | 3577 | { |
3557 | struct netdev_hw_addr *ha; | 3578 | struct netdev_hw_addr *ha; |
3558 | unsigned char type; | 3579 | unsigned char type; |
3559 | 3580 | ||
3560 | list_for_each_entry(ha, from_list, list) { | 3581 | list_for_each_entry(ha, from_list, list) { |
3561 | type = addr_type ? addr_type : ha->type; | 3582 | type = addr_type ? addr_type : ha->type; |
3562 | __hw_addr_del_ii(to_list, ha->addr, addr_len, addr_type, | 3583 | __hw_addr_del(to_list, to_delta, ha->addr, |
3563 | ignore_index); | 3584 | addr_len, addr_type); |
3585 | } | ||
3586 | } | ||
3587 | |||
3588 | static int __hw_addr_sync(struct list_head *to_list, int *to_delta, | ||
3589 | struct list_head *from_list, int *from_delta, | ||
3590 | int addr_len) | ||
3591 | { | ||
3592 | int err = 0; | ||
3593 | struct netdev_hw_addr *ha, *tmp; | ||
3594 | |||
3595 | list_for_each_entry_safe(ha, tmp, from_list, list) { | ||
3596 | if (!ha->synced) { | ||
3597 | err = __hw_addr_add(to_list, to_delta, ha->addr, | ||
3598 | addr_len, ha->type); | ||
3599 | if (err) | ||
3600 | break; | ||
3601 | ha->synced = true; | ||
3602 | ha->refcount++; | ||
3603 | } else if (ha->refcount == 1) { | ||
3604 | __hw_addr_del(to_list, to_delta, ha->addr, | ||
3605 | addr_len, ha->type); | ||
3606 | __hw_addr_del(from_list, from_delta, ha->addr, | ||
3607 | addr_len, ha->type); | ||
3608 | } | ||
3609 | } | ||
3610 | return err; | ||
3611 | } | ||
3612 | |||
3613 | static void __hw_addr_unsync(struct list_head *to_list, int *to_delta, | ||
3614 | struct list_head *from_list, int *from_delta, | ||
3615 | int addr_len) | ||
3616 | { | ||
3617 | struct netdev_hw_addr *ha, *tmp; | ||
3618 | |||
3619 | list_for_each_entry_safe(ha, tmp, from_list, list) { | ||
3620 | if (ha->synced) { | ||
3621 | __hw_addr_del(to_list, to_delta, ha->addr, | ||
3622 | addr_len, ha->type); | ||
3623 | ha->synced = false; | ||
3624 | __hw_addr_del(from_list, from_delta, ha->addr, | ||
3625 | addr_len, ha->type); | ||
3626 | } | ||
3564 | } | 3627 | } |
3565 | } | 3628 | } |
3566 | 3629 | ||
3630 | |||
3567 | static void __hw_addr_flush(struct list_head *list) | 3631 | static void __hw_addr_flush(struct list_head *list) |
3568 | { | 3632 | { |
3569 | struct netdev_hw_addr *ha, *tmp; | 3633 | struct netdev_hw_addr *ha, *tmp; |
@@ -3593,8 +3657,8 @@ static int dev_addr_init(struct net_device *dev) | |||
3593 | /* rtnl_mutex must be held here */ | 3657 | /* rtnl_mutex must be held here */ |
3594 | 3658 | ||
3595 | INIT_LIST_HEAD(&dev->dev_addr_list); | 3659 | INIT_LIST_HEAD(&dev->dev_addr_list); |
3596 | memset(addr, 0, sizeof(*addr)); | 3660 | memset(addr, 0, sizeof(addr)); |
3597 | err = __hw_addr_add(&dev->dev_addr_list, addr, sizeof(*addr), | 3661 | err = __hw_addr_add(&dev->dev_addr_list, NULL, addr, sizeof(addr), |
3598 | NETDEV_HW_ADDR_T_LAN); | 3662 | NETDEV_HW_ADDR_T_LAN); |
3599 | if (!err) { | 3663 | if (!err) { |
3600 | /* | 3664 | /* |
@@ -3626,7 +3690,7 @@ int dev_addr_add(struct net_device *dev, unsigned char *addr, | |||
3626 | 3690 | ||
3627 | ASSERT_RTNL(); | 3691 | ASSERT_RTNL(); |
3628 | 3692 | ||
3629 | err = __hw_addr_add(&dev->dev_addr_list, addr, dev->addr_len, | 3693 | err = __hw_addr_add(&dev->dev_addr_list, NULL, addr, dev->addr_len, |
3630 | addr_type); | 3694 | addr_type); |
3631 | if (!err) | 3695 | if (!err) |
3632 | call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); | 3696 | call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); |
@@ -3649,11 +3713,20 @@ int dev_addr_del(struct net_device *dev, unsigned char *addr, | |||
3649 | unsigned char addr_type) | 3713 | unsigned char addr_type) |
3650 | { | 3714 | { |
3651 | int err; | 3715 | int err; |
3716 | struct netdev_hw_addr *ha; | ||
3652 | 3717 | ||
3653 | ASSERT_RTNL(); | 3718 | ASSERT_RTNL(); |
3654 | 3719 | ||
3655 | err = __hw_addr_del_ii(&dev->dev_addr_list, addr, dev->addr_len, | 3720 | /* |
3656 | addr_type, 0); | 3721 | * We can not remove the first address from the list because |
3722 | * dev->dev_addr points to that. | ||
3723 | */ | ||
3724 | ha = list_first_entry(&dev->dev_addr_list, struct netdev_hw_addr, list); | ||
3725 | if (ha->addr == dev->dev_addr && ha->refcount == 1) | ||
3726 | return -ENOENT; | ||
3727 | |||
3728 | err = __hw_addr_del(&dev->dev_addr_list, NULL, addr, dev->addr_len, | ||
3729 | addr_type); | ||
3657 | if (!err) | 3730 | if (!err) |
3658 | call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); | 3731 | call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); |
3659 | return err; | 3732 | return err; |
@@ -3680,9 +3753,9 @@ int dev_addr_add_multiple(struct net_device *to_dev, | |||
3680 | 3753 | ||
3681 | if (from_dev->addr_len != to_dev->addr_len) | 3754 | if (from_dev->addr_len != to_dev->addr_len) |
3682 | return -EINVAL; | 3755 | return -EINVAL; |
3683 | err = __hw_addr_add_multiple_ii(&to_dev->dev_addr_list, | 3756 | err = __hw_addr_add_multiple(&to_dev->dev_addr_list, NULL, |
3684 | &from_dev->dev_addr_list, | 3757 | &from_dev->dev_addr_list, |
3685 | to_dev->addr_len, addr_type, 0); | 3758 | to_dev->addr_len, addr_type); |
3686 | if (!err) | 3759 | if (!err) |
3687 | call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev); | 3760 | call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev); |
3688 | return err; | 3761 | return err; |
@@ -3707,9 +3780,9 @@ int dev_addr_del_multiple(struct net_device *to_dev, | |||
3707 | 3780 | ||
3708 | if (from_dev->addr_len != to_dev->addr_len) | 3781 | if (from_dev->addr_len != to_dev->addr_len) |
3709 | return -EINVAL; | 3782 | return -EINVAL; |
3710 | __hw_addr_del_multiple_ii(&to_dev->dev_addr_list, | 3783 | __hw_addr_del_multiple(&to_dev->dev_addr_list, NULL, |
3711 | &from_dev->dev_addr_list, | 3784 | &from_dev->dev_addr_list, |
3712 | to_dev->addr_len, addr_type, 0); | 3785 | to_dev->addr_len, addr_type); |
3713 | call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev); | 3786 | call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev); |
3714 | return 0; | 3787 | return 0; |
3715 | } | 3788 | } |
@@ -3779,24 +3852,22 @@ int __dev_addr_add(struct dev_addr_list **list, int *count, | |||
3779 | * dev_unicast_delete - Release secondary unicast address. | 3852 | * dev_unicast_delete - Release secondary unicast address. |
3780 | * @dev: device | 3853 | * @dev: device |
3781 | * @addr: address to delete | 3854 | * @addr: address to delete |
3782 | * @alen: length of @addr | ||
3783 | * | 3855 | * |
3784 | * Release reference to a secondary unicast address and remove it | 3856 | * Release reference to a secondary unicast address and remove it |
3785 | * from the device if the reference count drops to zero. | 3857 | * from the device if the reference count drops to zero. |
3786 | * | 3858 | * |
3787 | * The caller must hold the rtnl_mutex. | 3859 | * The caller must hold the rtnl_mutex. |
3788 | */ | 3860 | */ |
3789 | int dev_unicast_delete(struct net_device *dev, void *addr, int alen) | 3861 | int dev_unicast_delete(struct net_device *dev, void *addr) |
3790 | { | 3862 | { |
3791 | int err; | 3863 | int err; |
3792 | 3864 | ||
3793 | ASSERT_RTNL(); | 3865 | ASSERT_RTNL(); |
3794 | 3866 | ||
3795 | netif_addr_lock_bh(dev); | 3867 | err = __hw_addr_del(&dev->uc_list, &dev->uc_count, addr, |
3796 | err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0); | 3868 | dev->addr_len, NETDEV_HW_ADDR_T_UNICAST); |
3797 | if (!err) | 3869 | if (!err) |
3798 | __dev_set_rx_mode(dev); | 3870 | __dev_set_rx_mode(dev); |
3799 | netif_addr_unlock_bh(dev); | ||
3800 | return err; | 3871 | return err; |
3801 | } | 3872 | } |
3802 | EXPORT_SYMBOL(dev_unicast_delete); | 3873 | EXPORT_SYMBOL(dev_unicast_delete); |
@@ -3805,24 +3876,22 @@ EXPORT_SYMBOL(dev_unicast_delete); | |||
3805 | * dev_unicast_add - add a secondary unicast address | 3876 | * dev_unicast_add - add a secondary unicast address |
3806 | * @dev: device | 3877 | * @dev: device |
3807 | * @addr: address to add | 3878 | * @addr: address to add |
3808 | * @alen: length of @addr | ||
3809 | * | 3879 | * |
3810 | * Add a secondary unicast address to the device or increase | 3880 | * Add a secondary unicast address to the device or increase |
3811 | * the reference count if it already exists. | 3881 | * the reference count if it already exists. |
3812 | * | 3882 | * |
3813 | * The caller must hold the rtnl_mutex. | 3883 | * The caller must hold the rtnl_mutex. |
3814 | */ | 3884 | */ |
3815 | int dev_unicast_add(struct net_device *dev, void *addr, int alen) | 3885 | int dev_unicast_add(struct net_device *dev, void *addr) |
3816 | { | 3886 | { |
3817 | int err; | 3887 | int err; |
3818 | 3888 | ||
3819 | ASSERT_RTNL(); | 3889 | ASSERT_RTNL(); |
3820 | 3890 | ||
3821 | netif_addr_lock_bh(dev); | 3891 | err = __hw_addr_add(&dev->uc_list, &dev->uc_count, addr, |
3822 | err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0); | 3892 | dev->addr_len, NETDEV_HW_ADDR_T_UNICAST); |
3823 | if (!err) | 3893 | if (!err) |
3824 | __dev_set_rx_mode(dev); | 3894 | __dev_set_rx_mode(dev); |
3825 | netif_addr_unlock_bh(dev); | ||
3826 | return err; | 3895 | return err; |
3827 | } | 3896 | } |
3828 | EXPORT_SYMBOL(dev_unicast_add); | 3897 | EXPORT_SYMBOL(dev_unicast_add); |
@@ -3879,8 +3948,7 @@ void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, | |||
3879 | * @from: source device | 3948 | * @from: source device |
3880 | * | 3949 | * |
3881 | * Add newly added addresses to the destination device and release | 3950 | * Add newly added addresses to the destination device and release |
3882 | * addresses that have no users left. The source device must be | 3951 | * addresses that have no users left. |
3883 | * locked by netif_tx_lock_bh. | ||
3884 | * | 3952 | * |
3885 | * This function is intended to be called from the dev->set_rx_mode | 3953 | * This function is intended to be called from the dev->set_rx_mode |
3886 | * function of layered software devices. | 3954 | * function of layered software devices. |
@@ -3889,12 +3957,15 @@ int dev_unicast_sync(struct net_device *to, struct net_device *from) | |||
3889 | { | 3957 | { |
3890 | int err = 0; | 3958 | int err = 0; |
3891 | 3959 | ||
3892 | netif_addr_lock_bh(to); | 3960 | ASSERT_RTNL(); |
3893 | err = __dev_addr_sync(&to->uc_list, &to->uc_count, | 3961 | |
3894 | &from->uc_list, &from->uc_count); | 3962 | if (to->addr_len != from->addr_len) |
3963 | return -EINVAL; | ||
3964 | |||
3965 | err = __hw_addr_sync(&to->uc_list, &to->uc_count, | ||
3966 | &from->uc_list, &from->uc_count, to->addr_len); | ||
3895 | if (!err) | 3967 | if (!err) |
3896 | __dev_set_rx_mode(to); | 3968 | __dev_set_rx_mode(to); |
3897 | netif_addr_unlock_bh(to); | ||
3898 | return err; | 3969 | return err; |
3899 | } | 3970 | } |
3900 | EXPORT_SYMBOL(dev_unicast_sync); | 3971 | EXPORT_SYMBOL(dev_unicast_sync); |
@@ -3910,18 +3981,33 @@ EXPORT_SYMBOL(dev_unicast_sync); | |||
3910 | */ | 3981 | */ |
3911 | void dev_unicast_unsync(struct net_device *to, struct net_device *from) | 3982 | void dev_unicast_unsync(struct net_device *to, struct net_device *from) |
3912 | { | 3983 | { |
3913 | netif_addr_lock_bh(from); | 3984 | ASSERT_RTNL(); |
3914 | netif_addr_lock(to); | ||
3915 | 3985 | ||
3916 | __dev_addr_unsync(&to->uc_list, &to->uc_count, | 3986 | if (to->addr_len != from->addr_len) |
3917 | &from->uc_list, &from->uc_count); | 3987 | return; |
3918 | __dev_set_rx_mode(to); | ||
3919 | 3988 | ||
3920 | netif_addr_unlock(to); | 3989 | __hw_addr_unsync(&to->uc_list, &to->uc_count, |
3921 | netif_addr_unlock_bh(from); | 3990 | &from->uc_list, &from->uc_count, to->addr_len); |
3991 | __dev_set_rx_mode(to); | ||
3922 | } | 3992 | } |
3923 | EXPORT_SYMBOL(dev_unicast_unsync); | 3993 | EXPORT_SYMBOL(dev_unicast_unsync); |
3924 | 3994 | ||
3995 | static void dev_unicast_flush(struct net_device *dev) | ||
3996 | { | ||
3997 | /* rtnl_mutex must be held here */ | ||
3998 | |||
3999 | __hw_addr_flush(&dev->uc_list); | ||
4000 | dev->uc_count = 0; | ||
4001 | } | ||
4002 | |||
4003 | static void dev_unicast_init(struct net_device *dev) | ||
4004 | { | ||
4005 | /* rtnl_mutex must be held here */ | ||
4006 | |||
4007 | INIT_LIST_HEAD(&dev->uc_list); | ||
4008 | } | ||
4009 | |||
4010 | |||
3925 | static void __dev_addr_discard(struct dev_addr_list **list) | 4011 | static void __dev_addr_discard(struct dev_addr_list **list) |
3926 | { | 4012 | { |
3927 | struct dev_addr_list *tmp; | 4013 | struct dev_addr_list *tmp; |
@@ -3940,9 +4026,6 @@ static void dev_addr_discard(struct net_device *dev) | |||
3940 | { | 4026 | { |
3941 | netif_addr_lock_bh(dev); | 4027 | netif_addr_lock_bh(dev); |
3942 | 4028 | ||
3943 | __dev_addr_discard(&dev->uc_list); | ||
3944 | dev->uc_count = 0; | ||
3945 | |||
3946 | __dev_addr_discard(&dev->mc_list); | 4029 | __dev_addr_discard(&dev->mc_list); |
3947 | dev->mc_count = 0; | 4030 | dev->mc_count = 0; |
3948 | 4031 | ||
@@ -4535,6 +4618,7 @@ static void rollback_registered(struct net_device *dev) | |||
4535 | /* | 4618 | /* |
4536 | * Flush the unicast and multicast chains | 4619 | * Flush the unicast and multicast chains |
4537 | */ | 4620 | */ |
4621 | dev_unicast_flush(dev); | ||
4538 | dev_addr_discard(dev); | 4622 | dev_addr_discard(dev); |
4539 | 4623 | ||
4540 | if (dev->netdev_ops->ndo_uninit) | 4624 | if (dev->netdev_ops->ndo_uninit) |
@@ -4988,18 +5072,18 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
4988 | struct netdev_queue *tx; | 5072 | struct netdev_queue *tx; |
4989 | struct net_device *dev; | 5073 | struct net_device *dev; |
4990 | size_t alloc_size; | 5074 | size_t alloc_size; |
4991 | void *p; | 5075 | struct net_device *p; |
4992 | 5076 | ||
4993 | BUG_ON(strlen(name) >= sizeof(dev->name)); | 5077 | BUG_ON(strlen(name) >= sizeof(dev->name)); |
4994 | 5078 | ||
4995 | alloc_size = sizeof(struct net_device); | 5079 | alloc_size = sizeof(struct net_device); |
4996 | if (sizeof_priv) { | 5080 | if (sizeof_priv) { |
4997 | /* ensure 32-byte alignment of private area */ | 5081 | /* ensure 32-byte alignment of private area */ |
4998 | alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST; | 5082 | alloc_size = ALIGN(alloc_size, NETDEV_ALIGN); |
4999 | alloc_size += sizeof_priv; | 5083 | alloc_size += sizeof_priv; |
5000 | } | 5084 | } |
5001 | /* ensure 32-byte alignment of whole construct */ | 5085 | /* ensure 32-byte alignment of whole construct */ |
5002 | alloc_size += NETDEV_ALIGN_CONST; | 5086 | alloc_size += NETDEV_ALIGN - 1; |
5003 | 5087 | ||
5004 | p = kzalloc(alloc_size, GFP_KERNEL); | 5088 | p = kzalloc(alloc_size, GFP_KERNEL); |
5005 | if (!p) { | 5089 | if (!p) { |
@@ -5014,13 +5098,14 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
5014 | goto free_p; | 5098 | goto free_p; |
5015 | } | 5099 | } |
5016 | 5100 | ||
5017 | dev = (struct net_device *) | 5101 | dev = PTR_ALIGN(p, NETDEV_ALIGN); |
5018 | (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST); | ||
5019 | dev->padded = (char *)dev - (char *)p; | 5102 | dev->padded = (char *)dev - (char *)p; |
5020 | 5103 | ||
5021 | if (dev_addr_init(dev)) | 5104 | if (dev_addr_init(dev)) |
5022 | goto free_tx; | 5105 | goto free_tx; |
5023 | 5106 | ||
5107 | dev_unicast_init(dev); | ||
5108 | |||
5024 | dev_net_set(dev, &init_net); | 5109 | dev_net_set(dev, &init_net); |
5025 | 5110 | ||
5026 | dev->_tx = tx; | 5111 | dev->_tx = tx; |
@@ -5224,6 +5309,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char | |||
5224 | /* | 5309 | /* |
5225 | * Flush the unicast and multicast chains | 5310 | * Flush the unicast and multicast chains |
5226 | */ | 5311 | */ |
5312 | dev_unicast_flush(dev); | ||
5227 | dev_addr_discard(dev); | 5313 | dev_addr_discard(dev); |
5228 | 5314 | ||
5229 | netdev_unregister_kobject(dev); | 5315 | netdev_unregister_kobject(dev); |
diff --git a/net/core/iovec.c b/net/core/iovec.c index 40a76ce19d9f..16ad45d4882b 100644 --- a/net/core/iovec.c +++ b/net/core/iovec.c | |||
@@ -112,9 +112,9 @@ int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata, | |||
112 | continue; | 112 | continue; |
113 | } | 113 | } |
114 | copy = min_t(unsigned int, iov->iov_len - offset, len); | 114 | copy = min_t(unsigned int, iov->iov_len - offset, len); |
115 | offset = 0; | 115 | if (copy_to_user(iov->iov_base + offset, kdata, copy)) |
116 | if (copy_to_user(iov->iov_base, kdata, copy)) | ||
117 | return -EFAULT; | 116 | return -EFAULT; |
117 | offset = 0; | ||
118 | kdata += copy; | 118 | kdata += copy; |
119 | len -= copy; | 119 | len -= copy; |
120 | } | 120 | } |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index a1cbce7fdae5..163b4f5b0365 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
@@ -771,6 +771,28 @@ static __inline__ int neigh_max_probes(struct neighbour *n) | |||
771 | p->ucast_probes + p->app_probes + p->mcast_probes); | 771 | p->ucast_probes + p->app_probes + p->mcast_probes); |
772 | } | 772 | } |
773 | 773 | ||
774 | static void neigh_invalidate(struct neighbour *neigh) | ||
775 | { | ||
776 | struct sk_buff *skb; | ||
777 | |||
778 | NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed); | ||
779 | NEIGH_PRINTK2("neigh %p is failed.\n", neigh); | ||
780 | neigh->updated = jiffies; | ||
781 | |||
782 | /* It is very thin place. report_unreachable is very complicated | ||
783 | routine. Particularly, it can hit the same neighbour entry! | ||
784 | |||
785 | So that, we try to be accurate and avoid dead loop. --ANK | ||
786 | */ | ||
787 | while (neigh->nud_state == NUD_FAILED && | ||
788 | (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) { | ||
789 | write_unlock(&neigh->lock); | ||
790 | neigh->ops->error_report(neigh, skb); | ||
791 | write_lock(&neigh->lock); | ||
792 | } | ||
793 | skb_queue_purge(&neigh->arp_queue); | ||
794 | } | ||
795 | |||
774 | /* Called when a timer expires for a neighbour entry. */ | 796 | /* Called when a timer expires for a neighbour entry. */ |
775 | 797 | ||
776 | static void neigh_timer_handler(unsigned long arg) | 798 | static void neigh_timer_handler(unsigned long arg) |
@@ -835,26 +857,9 @@ static void neigh_timer_handler(unsigned long arg) | |||
835 | 857 | ||
836 | if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) && | 858 | if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) && |
837 | atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) { | 859 | atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) { |
838 | struct sk_buff *skb; | ||
839 | |||
840 | neigh->nud_state = NUD_FAILED; | 860 | neigh->nud_state = NUD_FAILED; |
841 | neigh->updated = jiffies; | ||
842 | notify = 1; | 861 | notify = 1; |
843 | NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed); | 862 | neigh_invalidate(neigh); |
844 | NEIGH_PRINTK2("neigh %p is failed.\n", neigh); | ||
845 | |||
846 | /* It is very thin place. report_unreachable is very complicated | ||
847 | routine. Particularly, it can hit the same neighbour entry! | ||
848 | |||
849 | So that, we try to be accurate and avoid dead loop. --ANK | ||
850 | */ | ||
851 | while (neigh->nud_state == NUD_FAILED && | ||
852 | (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) { | ||
853 | write_unlock(&neigh->lock); | ||
854 | neigh->ops->error_report(neigh, skb); | ||
855 | write_lock(&neigh->lock); | ||
856 | } | ||
857 | skb_queue_purge(&neigh->arp_queue); | ||
858 | } | 863 | } |
859 | 864 | ||
860 | if (neigh->nud_state & NUD_IN_TIMER) { | 865 | if (neigh->nud_state & NUD_IN_TIMER) { |
@@ -1001,6 +1006,11 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, | |||
1001 | neigh->nud_state = new; | 1006 | neigh->nud_state = new; |
1002 | err = 0; | 1007 | err = 0; |
1003 | notify = old & NUD_VALID; | 1008 | notify = old & NUD_VALID; |
1009 | if ((old & (NUD_INCOMPLETE | NUD_PROBE)) && | ||
1010 | (new & NUD_FAILED)) { | ||
1011 | neigh_invalidate(neigh); | ||
1012 | notify = 1; | ||
1013 | } | ||
1004 | goto out; | 1014 | goto out; |
1005 | } | 1015 | } |
1006 | 1016 | ||
@@ -1088,8 +1098,8 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, | |||
1088 | struct neighbour *n1 = neigh; | 1098 | struct neighbour *n1 = neigh; |
1089 | write_unlock_bh(&neigh->lock); | 1099 | write_unlock_bh(&neigh->lock); |
1090 | /* On shaper/eql skb->dst->neighbour != neigh :( */ | 1100 | /* On shaper/eql skb->dst->neighbour != neigh :( */ |
1091 | if (skb->dst && skb->dst->neighbour) | 1101 | if (skb_dst(skb) && skb_dst(skb)->neighbour) |
1092 | n1 = skb->dst->neighbour; | 1102 | n1 = skb_dst(skb)->neighbour; |
1093 | n1->output(skb); | 1103 | n1->output(skb); |
1094 | write_lock_bh(&neigh->lock); | 1104 | write_lock_bh(&neigh->lock); |
1095 | } | 1105 | } |
@@ -1182,7 +1192,7 @@ EXPORT_SYMBOL(neigh_compat_output); | |||
1182 | 1192 | ||
1183 | int neigh_resolve_output(struct sk_buff *skb) | 1193 | int neigh_resolve_output(struct sk_buff *skb) |
1184 | { | 1194 | { |
1185 | struct dst_entry *dst = skb->dst; | 1195 | struct dst_entry *dst = skb_dst(skb); |
1186 | struct neighbour *neigh; | 1196 | struct neighbour *neigh; |
1187 | int rc = 0; | 1197 | int rc = 0; |
1188 | 1198 | ||
@@ -1229,7 +1239,7 @@ EXPORT_SYMBOL(neigh_resolve_output); | |||
1229 | int neigh_connected_output(struct sk_buff *skb) | 1239 | int neigh_connected_output(struct sk_buff *skb) |
1230 | { | 1240 | { |
1231 | int err; | 1241 | int err; |
1232 | struct dst_entry *dst = skb->dst; | 1242 | struct dst_entry *dst = skb_dst(skb); |
1233 | struct neighbour *neigh = dst->neighbour; | 1243 | struct neighbour *neigh = dst->neighbour; |
1234 | struct net_device *dev = neigh->dev; | 1244 | struct net_device *dev = neigh->dev; |
1235 | 1245 | ||
@@ -1298,8 +1308,7 @@ void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p, | |||
1298 | if (time_before(tbl->proxy_timer.expires, sched_next)) | 1308 | if (time_before(tbl->proxy_timer.expires, sched_next)) |
1299 | sched_next = tbl->proxy_timer.expires; | 1309 | sched_next = tbl->proxy_timer.expires; |
1300 | } | 1310 | } |
1301 | dst_release(skb->dst); | 1311 | skb_dst_drop(skb); |
1302 | skb->dst = NULL; | ||
1303 | dev_hold(skb->dev); | 1312 | dev_hold(skb->dev); |
1304 | __skb_queue_tail(&tbl->proxy_queue, skb); | 1313 | __skb_queue_tail(&tbl->proxy_queue, skb); |
1305 | mod_timer(&tbl->proxy_timer, sched_next); | 1314 | mod_timer(&tbl->proxy_timer, sched_next); |
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index b8ccd3c88d63..19b8c20e98a4 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -3691,8 +3691,7 @@ out1: | |||
3691 | #ifdef CONFIG_XFRM | 3691 | #ifdef CONFIG_XFRM |
3692 | free_SAs(pkt_dev); | 3692 | free_SAs(pkt_dev); |
3693 | #endif | 3693 | #endif |
3694 | if (pkt_dev->flows) | 3694 | vfree(pkt_dev->flows); |
3695 | vfree(pkt_dev->flows); | ||
3696 | kfree(pkt_dev); | 3695 | kfree(pkt_dev); |
3697 | return err; | 3696 | return err; |
3698 | } | 3697 | } |
@@ -3791,8 +3790,7 @@ static int pktgen_remove_device(struct pktgen_thread *t, | |||
3791 | #ifdef CONFIG_XFRM | 3790 | #ifdef CONFIG_XFRM |
3792 | free_SAs(pkt_dev); | 3791 | free_SAs(pkt_dev); |
3793 | #endif | 3792 | #endif |
3794 | if (pkt_dev->flows) | 3793 | vfree(pkt_dev->flows); |
3795 | vfree(pkt_dev->flows); | ||
3796 | kfree(pkt_dev); | 3794 | kfree(pkt_dev); |
3797 | return 0; | 3795 | return 0; |
3798 | } | 3796 | } |
diff --git a/net/core/skb_dma_map.c b/net/core/skb_dma_map.c index 86234923a3b7..79687dfd6957 100644 --- a/net/core/skb_dma_map.c +++ b/net/core/skb_dma_map.c | |||
@@ -20,7 +20,7 @@ int skb_dma_map(struct device *dev, struct sk_buff *skb, | |||
20 | if (dma_mapping_error(dev, map)) | 20 | if (dma_mapping_error(dev, map)) |
21 | goto out_err; | 21 | goto out_err; |
22 | 22 | ||
23 | sp->dma_maps[0] = map; | 23 | sp->dma_head = map; |
24 | for (i = 0; i < sp->nr_frags; i++) { | 24 | for (i = 0; i < sp->nr_frags; i++) { |
25 | skb_frag_t *fp = &sp->frags[i]; | 25 | skb_frag_t *fp = &sp->frags[i]; |
26 | 26 | ||
@@ -28,9 +28,8 @@ int skb_dma_map(struct device *dev, struct sk_buff *skb, | |||
28 | fp->size, dir); | 28 | fp->size, dir); |
29 | if (dma_mapping_error(dev, map)) | 29 | if (dma_mapping_error(dev, map)) |
30 | goto unwind; | 30 | goto unwind; |
31 | sp->dma_maps[i + 1] = map; | 31 | sp->dma_maps[i] = map; |
32 | } | 32 | } |
33 | sp->num_dma_maps = i + 1; | ||
34 | 33 | ||
35 | return 0; | 34 | return 0; |
36 | 35 | ||
@@ -38,10 +37,10 @@ unwind: | |||
38 | while (--i >= 0) { | 37 | while (--i >= 0) { |
39 | skb_frag_t *fp = &sp->frags[i]; | 38 | skb_frag_t *fp = &sp->frags[i]; |
40 | 39 | ||
41 | dma_unmap_page(dev, sp->dma_maps[i + 1], | 40 | dma_unmap_page(dev, sp->dma_maps[i], |
42 | fp->size, dir); | 41 | fp->size, dir); |
43 | } | 42 | } |
44 | dma_unmap_single(dev, sp->dma_maps[0], | 43 | dma_unmap_single(dev, sp->dma_head, |
45 | skb_headlen(skb), dir); | 44 | skb_headlen(skb), dir); |
46 | out_err: | 45 | out_err: |
47 | return -ENOMEM; | 46 | return -ENOMEM; |
@@ -54,12 +53,12 @@ void skb_dma_unmap(struct device *dev, struct sk_buff *skb, | |||
54 | struct skb_shared_info *sp = skb_shinfo(skb); | 53 | struct skb_shared_info *sp = skb_shinfo(skb); |
55 | int i; | 54 | int i; |
56 | 55 | ||
57 | dma_unmap_single(dev, sp->dma_maps[0], | 56 | dma_unmap_single(dev, sp->dma_head, |
58 | skb_headlen(skb), dir); | 57 | skb_headlen(skb), dir); |
59 | for (i = 0; i < sp->nr_frags; i++) { | 58 | for (i = 0; i < sp->nr_frags; i++) { |
60 | skb_frag_t *fp = &sp->frags[i]; | 59 | skb_frag_t *fp = &sp->frags[i]; |
61 | 60 | ||
62 | dma_unmap_page(dev, sp->dma_maps[i + 1], | 61 | dma_unmap_page(dev, sp->dma_maps[i], |
63 | fp->size, dir); | 62 | fp->size, dir); |
64 | } | 63 | } |
65 | } | 64 | } |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 8e815e685f28..b94d777e3eb4 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -210,7 +210,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, | |||
210 | shinfo->gso_type = 0; | 210 | shinfo->gso_type = 0; |
211 | shinfo->ip6_frag_id = 0; | 211 | shinfo->ip6_frag_id = 0; |
212 | shinfo->tx_flags.flags = 0; | 212 | shinfo->tx_flags.flags = 0; |
213 | shinfo->frag_list = NULL; | 213 | skb_frag_list_init(skb); |
214 | memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps)); | 214 | memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps)); |
215 | 215 | ||
216 | if (fclone) { | 216 | if (fclone) { |
@@ -323,7 +323,7 @@ static void skb_clone_fraglist(struct sk_buff *skb) | |||
323 | { | 323 | { |
324 | struct sk_buff *list; | 324 | struct sk_buff *list; |
325 | 325 | ||
326 | for (list = skb_shinfo(skb)->frag_list; list; list = list->next) | 326 | skb_walk_frags(skb, list) |
327 | skb_get(list); | 327 | skb_get(list); |
328 | } | 328 | } |
329 | 329 | ||
@@ -338,7 +338,7 @@ static void skb_release_data(struct sk_buff *skb) | |||
338 | put_page(skb_shinfo(skb)->frags[i].page); | 338 | put_page(skb_shinfo(skb)->frags[i].page); |
339 | } | 339 | } |
340 | 340 | ||
341 | if (skb_shinfo(skb)->frag_list) | 341 | if (skb_has_frags(skb)) |
342 | skb_drop_fraglist(skb); | 342 | skb_drop_fraglist(skb); |
343 | 343 | ||
344 | kfree(skb->head); | 344 | kfree(skb->head); |
@@ -381,7 +381,7 @@ static void kfree_skbmem(struct sk_buff *skb) | |||
381 | 381 | ||
382 | static void skb_release_head_state(struct sk_buff *skb) | 382 | static void skb_release_head_state(struct sk_buff *skb) |
383 | { | 383 | { |
384 | dst_release(skb->dst); | 384 | skb_dst_drop(skb); |
385 | #ifdef CONFIG_XFRM | 385 | #ifdef CONFIG_XFRM |
386 | secpath_put(skb->sp); | 386 | secpath_put(skb->sp); |
387 | #endif | 387 | #endif |
@@ -503,7 +503,7 @@ int skb_recycle_check(struct sk_buff *skb, int skb_size) | |||
503 | shinfo->gso_type = 0; | 503 | shinfo->gso_type = 0; |
504 | shinfo->ip6_frag_id = 0; | 504 | shinfo->ip6_frag_id = 0; |
505 | shinfo->tx_flags.flags = 0; | 505 | shinfo->tx_flags.flags = 0; |
506 | shinfo->frag_list = NULL; | 506 | skb_frag_list_init(skb); |
507 | memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps)); | 507 | memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps)); |
508 | 508 | ||
509 | memset(skb, 0, offsetof(struct sk_buff, tail)); | 509 | memset(skb, 0, offsetof(struct sk_buff, tail)); |
@@ -521,7 +521,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) | |||
521 | new->transport_header = old->transport_header; | 521 | new->transport_header = old->transport_header; |
522 | new->network_header = old->network_header; | 522 | new->network_header = old->network_header; |
523 | new->mac_header = old->mac_header; | 523 | new->mac_header = old->mac_header; |
524 | new->dst = dst_clone(old->dst); | 524 | skb_dst_set(new, dst_clone(skb_dst(old))); |
525 | #ifdef CONFIG_XFRM | 525 | #ifdef CONFIG_XFRM |
526 | new->sp = secpath_get(old->sp); | 526 | new->sp = secpath_get(old->sp); |
527 | #endif | 527 | #endif |
@@ -552,7 +552,6 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) | |||
552 | new->vlan_tci = old->vlan_tci; | 552 | new->vlan_tci = old->vlan_tci; |
553 | #if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE) | 553 | #if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE) |
554 | new->do_not_encrypt = old->do_not_encrypt; | 554 | new->do_not_encrypt = old->do_not_encrypt; |
555 | new->requeue = old->requeue; | ||
556 | #endif | 555 | #endif |
557 | 556 | ||
558 | skb_copy_secmark(new, old); | 557 | skb_copy_secmark(new, old); |
@@ -758,7 +757,7 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask) | |||
758 | skb_shinfo(n)->nr_frags = i; | 757 | skb_shinfo(n)->nr_frags = i; |
759 | } | 758 | } |
760 | 759 | ||
761 | if (skb_shinfo(skb)->frag_list) { | 760 | if (skb_has_frags(skb)) { |
762 | skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; | 761 | skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; |
763 | skb_clone_fraglist(n); | 762 | skb_clone_fraglist(n); |
764 | } | 763 | } |
@@ -821,7 +820,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, | |||
821 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) | 820 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) |
822 | get_page(skb_shinfo(skb)->frags[i].page); | 821 | get_page(skb_shinfo(skb)->frags[i].page); |
823 | 822 | ||
824 | if (skb_shinfo(skb)->frag_list) | 823 | if (skb_has_frags(skb)) |
825 | skb_clone_fraglist(skb); | 824 | skb_clone_fraglist(skb); |
826 | 825 | ||
827 | skb_release_data(skb); | 826 | skb_release_data(skb); |
@@ -1093,7 +1092,7 @@ drop_pages: | |||
1093 | for (; i < nfrags; i++) | 1092 | for (; i < nfrags; i++) |
1094 | put_page(skb_shinfo(skb)->frags[i].page); | 1093 | put_page(skb_shinfo(skb)->frags[i].page); |
1095 | 1094 | ||
1096 | if (skb_shinfo(skb)->frag_list) | 1095 | if (skb_has_frags(skb)) |
1097 | skb_drop_fraglist(skb); | 1096 | skb_drop_fraglist(skb); |
1098 | goto done; | 1097 | goto done; |
1099 | } | 1098 | } |
@@ -1188,7 +1187,7 @@ unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) | |||
1188 | /* Optimization: no fragments, no reasons to preestimate | 1187 | /* Optimization: no fragments, no reasons to preestimate |
1189 | * size of pulled pages. Superb. | 1188 | * size of pulled pages. Superb. |
1190 | */ | 1189 | */ |
1191 | if (!skb_shinfo(skb)->frag_list) | 1190 | if (!skb_has_frags(skb)) |
1192 | goto pull_pages; | 1191 | goto pull_pages; |
1193 | 1192 | ||
1194 | /* Estimate size of pulled pages. */ | 1193 | /* Estimate size of pulled pages. */ |
@@ -1285,8 +1284,9 @@ EXPORT_SYMBOL(__pskb_pull_tail); | |||
1285 | 1284 | ||
1286 | int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) | 1285 | int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) |
1287 | { | 1286 | { |
1288 | int i, copy; | ||
1289 | int start = skb_headlen(skb); | 1287 | int start = skb_headlen(skb); |
1288 | struct sk_buff *frag_iter; | ||
1289 | int i, copy; | ||
1290 | 1290 | ||
1291 | if (offset > (int)skb->len - len) | 1291 | if (offset > (int)skb->len - len) |
1292 | goto fault; | 1292 | goto fault; |
@@ -1328,28 +1328,23 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) | |||
1328 | start = end; | 1328 | start = end; |
1329 | } | 1329 | } |
1330 | 1330 | ||
1331 | if (skb_shinfo(skb)->frag_list) { | 1331 | skb_walk_frags(skb, frag_iter) { |
1332 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | 1332 | int end; |
1333 | 1333 | ||
1334 | for (; list; list = list->next) { | 1334 | WARN_ON(start > offset + len); |
1335 | int end; | 1335 | |
1336 | 1336 | end = start + frag_iter->len; | |
1337 | WARN_ON(start > offset + len); | 1337 | if ((copy = end - offset) > 0) { |
1338 | 1338 | if (copy > len) | |
1339 | end = start + list->len; | 1339 | copy = len; |
1340 | if ((copy = end - offset) > 0) { | 1340 | if (skb_copy_bits(frag_iter, offset - start, to, copy)) |
1341 | if (copy > len) | 1341 | goto fault; |
1342 | copy = len; | 1342 | if ((len -= copy) == 0) |
1343 | if (skb_copy_bits(list, offset - start, | 1343 | return 0; |
1344 | to, copy)) | 1344 | offset += copy; |
1345 | goto fault; | 1345 | to += copy; |
1346 | if ((len -= copy) == 0) | ||
1347 | return 0; | ||
1348 | offset += copy; | ||
1349 | to += copy; | ||
1350 | } | ||
1351 | start = end; | ||
1352 | } | 1346 | } |
1347 | start = end; | ||
1353 | } | 1348 | } |
1354 | if (!len) | 1349 | if (!len) |
1355 | return 0; | 1350 | return 0; |
@@ -1534,6 +1529,7 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset, | |||
1534 | .ops = &sock_pipe_buf_ops, | 1529 | .ops = &sock_pipe_buf_ops, |
1535 | .spd_release = sock_spd_release, | 1530 | .spd_release = sock_spd_release, |
1536 | }; | 1531 | }; |
1532 | struct sk_buff *frag_iter; | ||
1537 | struct sock *sk = skb->sk; | 1533 | struct sock *sk = skb->sk; |
1538 | 1534 | ||
1539 | /* | 1535 | /* |
@@ -1548,13 +1544,11 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset, | |||
1548 | /* | 1544 | /* |
1549 | * now see if we have a frag_list to map | 1545 | * now see if we have a frag_list to map |
1550 | */ | 1546 | */ |
1551 | if (skb_shinfo(skb)->frag_list) { | 1547 | skb_walk_frags(skb, frag_iter) { |
1552 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | 1548 | if (!tlen) |
1553 | 1549 | break; | |
1554 | for (; list && tlen; list = list->next) { | 1550 | if (__skb_splice_bits(frag_iter, &offset, &tlen, &spd, sk)) |
1555 | if (__skb_splice_bits(list, &offset, &tlen, &spd, sk)) | 1551 | break; |
1556 | break; | ||
1557 | } | ||
1558 | } | 1552 | } |
1559 | 1553 | ||
1560 | done: | 1554 | done: |
@@ -1593,8 +1587,9 @@ done: | |||
1593 | 1587 | ||
1594 | int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) | 1588 | int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) |
1595 | { | 1589 | { |
1596 | int i, copy; | ||
1597 | int start = skb_headlen(skb); | 1590 | int start = skb_headlen(skb); |
1591 | struct sk_buff *frag_iter; | ||
1592 | int i, copy; | ||
1598 | 1593 | ||
1599 | if (offset > (int)skb->len - len) | 1594 | if (offset > (int)skb->len - len) |
1600 | goto fault; | 1595 | goto fault; |
@@ -1635,28 +1630,24 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) | |||
1635 | start = end; | 1630 | start = end; |
1636 | } | 1631 | } |
1637 | 1632 | ||
1638 | if (skb_shinfo(skb)->frag_list) { | 1633 | skb_walk_frags(skb, frag_iter) { |
1639 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | 1634 | int end; |
1640 | 1635 | ||
1641 | for (; list; list = list->next) { | 1636 | WARN_ON(start > offset + len); |
1642 | int end; | 1637 | |
1643 | 1638 | end = start + frag_iter->len; | |
1644 | WARN_ON(start > offset + len); | 1639 | if ((copy = end - offset) > 0) { |
1645 | 1640 | if (copy > len) | |
1646 | end = start + list->len; | 1641 | copy = len; |
1647 | if ((copy = end - offset) > 0) { | 1642 | if (skb_store_bits(frag_iter, offset - start, |
1648 | if (copy > len) | 1643 | from, copy)) |
1649 | copy = len; | 1644 | goto fault; |
1650 | if (skb_store_bits(list, offset - start, | 1645 | if ((len -= copy) == 0) |
1651 | from, copy)) | 1646 | return 0; |
1652 | goto fault; | 1647 | offset += copy; |
1653 | if ((len -= copy) == 0) | 1648 | from += copy; |
1654 | return 0; | ||
1655 | offset += copy; | ||
1656 | from += copy; | ||
1657 | } | ||
1658 | start = end; | ||
1659 | } | 1649 | } |
1650 | start = end; | ||
1660 | } | 1651 | } |
1661 | if (!len) | 1652 | if (!len) |
1662 | return 0; | 1653 | return 0; |
@@ -1673,6 +1664,7 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset, | |||
1673 | { | 1664 | { |
1674 | int start = skb_headlen(skb); | 1665 | int start = skb_headlen(skb); |
1675 | int i, copy = start - offset; | 1666 | int i, copy = start - offset; |
1667 | struct sk_buff *frag_iter; | ||
1676 | int pos = 0; | 1668 | int pos = 0; |
1677 | 1669 | ||
1678 | /* Checksum header. */ | 1670 | /* Checksum header. */ |
@@ -1712,29 +1704,25 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset, | |||
1712 | start = end; | 1704 | start = end; |
1713 | } | 1705 | } |
1714 | 1706 | ||
1715 | if (skb_shinfo(skb)->frag_list) { | 1707 | skb_walk_frags(skb, frag_iter) { |
1716 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | 1708 | int end; |
1717 | 1709 | ||
1718 | for (; list; list = list->next) { | 1710 | WARN_ON(start > offset + len); |
1719 | int end; | 1711 | |
1720 | 1712 | end = start + frag_iter->len; | |
1721 | WARN_ON(start > offset + len); | 1713 | if ((copy = end - offset) > 0) { |
1722 | 1714 | __wsum csum2; | |
1723 | end = start + list->len; | 1715 | if (copy > len) |
1724 | if ((copy = end - offset) > 0) { | 1716 | copy = len; |
1725 | __wsum csum2; | 1717 | csum2 = skb_checksum(frag_iter, offset - start, |
1726 | if (copy > len) | 1718 | copy, 0); |
1727 | copy = len; | 1719 | csum = csum_block_add(csum, csum2, pos); |
1728 | csum2 = skb_checksum(list, offset - start, | 1720 | if ((len -= copy) == 0) |
1729 | copy, 0); | 1721 | return csum; |
1730 | csum = csum_block_add(csum, csum2, pos); | 1722 | offset += copy; |
1731 | if ((len -= copy) == 0) | 1723 | pos += copy; |
1732 | return csum; | ||
1733 | offset += copy; | ||
1734 | pos += copy; | ||
1735 | } | ||
1736 | start = end; | ||
1737 | } | 1724 | } |
1725 | start = end; | ||
1738 | } | 1726 | } |
1739 | BUG_ON(len); | 1727 | BUG_ON(len); |
1740 | 1728 | ||
@@ -1749,6 +1737,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, | |||
1749 | { | 1737 | { |
1750 | int start = skb_headlen(skb); | 1738 | int start = skb_headlen(skb); |
1751 | int i, copy = start - offset; | 1739 | int i, copy = start - offset; |
1740 | struct sk_buff *frag_iter; | ||
1752 | int pos = 0; | 1741 | int pos = 0; |
1753 | 1742 | ||
1754 | /* Copy header. */ | 1743 | /* Copy header. */ |
@@ -1793,31 +1782,27 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, | |||
1793 | start = end; | 1782 | start = end; |
1794 | } | 1783 | } |
1795 | 1784 | ||
1796 | if (skb_shinfo(skb)->frag_list) { | 1785 | skb_walk_frags(skb, frag_iter) { |
1797 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | 1786 | __wsum csum2; |
1787 | int end; | ||
1798 | 1788 | ||
1799 | for (; list; list = list->next) { | 1789 | WARN_ON(start > offset + len); |
1800 | __wsum csum2; | 1790 | |
1801 | int end; | 1791 | end = start + frag_iter->len; |
1802 | 1792 | if ((copy = end - offset) > 0) { | |
1803 | WARN_ON(start > offset + len); | 1793 | if (copy > len) |
1804 | 1794 | copy = len; | |
1805 | end = start + list->len; | 1795 | csum2 = skb_copy_and_csum_bits(frag_iter, |
1806 | if ((copy = end - offset) > 0) { | 1796 | offset - start, |
1807 | if (copy > len) | 1797 | to, copy, 0); |
1808 | copy = len; | 1798 | csum = csum_block_add(csum, csum2, pos); |
1809 | csum2 = skb_copy_and_csum_bits(list, | 1799 | if ((len -= copy) == 0) |
1810 | offset - start, | 1800 | return csum; |
1811 | to, copy, 0); | 1801 | offset += copy; |
1812 | csum = csum_block_add(csum, csum2, pos); | 1802 | to += copy; |
1813 | if ((len -= copy) == 0) | 1803 | pos += copy; |
1814 | return csum; | ||
1815 | offset += copy; | ||
1816 | to += copy; | ||
1817 | pos += copy; | ||
1818 | } | ||
1819 | start = end; | ||
1820 | } | 1804 | } |
1805 | start = end; | ||
1821 | } | 1806 | } |
1822 | BUG_ON(len); | 1807 | BUG_ON(len); |
1823 | return csum; | 1808 | return csum; |
@@ -2327,8 +2312,7 @@ next_skb: | |||
2327 | st->frag_data = NULL; | 2312 | st->frag_data = NULL; |
2328 | } | 2313 | } |
2329 | 2314 | ||
2330 | if (st->root_skb == st->cur_skb && | 2315 | if (st->root_skb == st->cur_skb && skb_has_frags(st->root_skb)) { |
2331 | skb_shinfo(st->root_skb)->frag_list) { | ||
2332 | st->cur_skb = skb_shinfo(st->root_skb)->frag_list; | 2316 | st->cur_skb = skb_shinfo(st->root_skb)->frag_list; |
2333 | st->frag_idx = 0; | 2317 | st->frag_idx = 0; |
2334 | goto next_skb; | 2318 | goto next_skb; |
@@ -2639,7 +2623,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features) | |||
2639 | } else | 2623 | } else |
2640 | skb_get(fskb2); | 2624 | skb_get(fskb2); |
2641 | 2625 | ||
2642 | BUG_ON(skb_shinfo(nskb)->frag_list); | 2626 | SKB_FRAG_ASSERT(nskb); |
2643 | skb_shinfo(nskb)->frag_list = fskb2; | 2627 | skb_shinfo(nskb)->frag_list = fskb2; |
2644 | } | 2628 | } |
2645 | 2629 | ||
@@ -2796,6 +2780,7 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) | |||
2796 | { | 2780 | { |
2797 | int start = skb_headlen(skb); | 2781 | int start = skb_headlen(skb); |
2798 | int i, copy = start - offset; | 2782 | int i, copy = start - offset; |
2783 | struct sk_buff *frag_iter; | ||
2799 | int elt = 0; | 2784 | int elt = 0; |
2800 | 2785 | ||
2801 | if (copy > 0) { | 2786 | if (copy > 0) { |
@@ -2829,26 +2814,22 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) | |||
2829 | start = end; | 2814 | start = end; |
2830 | } | 2815 | } |
2831 | 2816 | ||
2832 | if (skb_shinfo(skb)->frag_list) { | 2817 | skb_walk_frags(skb, frag_iter) { |
2833 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | 2818 | int end; |
2834 | |||
2835 | for (; list; list = list->next) { | ||
2836 | int end; | ||
2837 | 2819 | ||
2838 | WARN_ON(start > offset + len); | 2820 | WARN_ON(start > offset + len); |
2839 | 2821 | ||
2840 | end = start + list->len; | 2822 | end = start + frag_iter->len; |
2841 | if ((copy = end - offset) > 0) { | 2823 | if ((copy = end - offset) > 0) { |
2842 | if (copy > len) | 2824 | if (copy > len) |
2843 | copy = len; | 2825 | copy = len; |
2844 | elt += __skb_to_sgvec(list, sg+elt, offset - start, | 2826 | elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start, |
2845 | copy); | 2827 | copy); |
2846 | if ((len -= copy) == 0) | 2828 | if ((len -= copy) == 0) |
2847 | return elt; | 2829 | return elt; |
2848 | offset += copy; | 2830 | offset += copy; |
2849 | } | ||
2850 | start = end; | ||
2851 | } | 2831 | } |
2832 | start = end; | ||
2852 | } | 2833 | } |
2853 | BUG_ON(len); | 2834 | BUG_ON(len); |
2854 | return elt; | 2835 | return elt; |
@@ -2896,7 +2877,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) | |||
2896 | return -ENOMEM; | 2877 | return -ENOMEM; |
2897 | 2878 | ||
2898 | /* Easy case. Most of packets will go this way. */ | 2879 | /* Easy case. Most of packets will go this way. */ |
2899 | if (!skb_shinfo(skb)->frag_list) { | 2880 | if (!skb_has_frags(skb)) { |
2900 | /* A little of trouble, not enough of space for trailer. | 2881 | /* A little of trouble, not enough of space for trailer. |
2901 | * This should not happen, when stack is tuned to generate | 2882 | * This should not happen, when stack is tuned to generate |
2902 | * good frames. OK, on miss we reallocate and reserve even more | 2883 | * good frames. OK, on miss we reallocate and reserve even more |
@@ -2931,7 +2912,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) | |||
2931 | 2912 | ||
2932 | if (skb1->next == NULL && tailbits) { | 2913 | if (skb1->next == NULL && tailbits) { |
2933 | if (skb_shinfo(skb1)->nr_frags || | 2914 | if (skb_shinfo(skb1)->nr_frags || |
2934 | skb_shinfo(skb1)->frag_list || | 2915 | skb_has_frags(skb1) || |
2935 | skb_tailroom(skb1) < tailbits) | 2916 | skb_tailroom(skb1) < tailbits) |
2936 | ntail = tailbits + 128; | 2917 | ntail = tailbits + 128; |
2937 | } | 2918 | } |
@@ -2940,7 +2921,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) | |||
2940 | skb_cloned(skb1) || | 2921 | skb_cloned(skb1) || |
2941 | ntail || | 2922 | ntail || |
2942 | skb_shinfo(skb1)->nr_frags || | 2923 | skb_shinfo(skb1)->nr_frags || |
2943 | skb_shinfo(skb1)->frag_list) { | 2924 | skb_has_frags(skb1)) { |
2944 | struct sk_buff *skb2; | 2925 | struct sk_buff *skb2; |
2945 | 2926 | ||
2946 | /* Fuck, we are miserable poor guys... */ | 2927 | /* Fuck, we are miserable poor guys... */ |
@@ -3026,12 +3007,12 @@ EXPORT_SYMBOL_GPL(skb_tstamp_tx); | |||
3026 | */ | 3007 | */ |
3027 | bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) | 3008 | bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) |
3028 | { | 3009 | { |
3029 | if (unlikely(start > skb->len - 2) || | 3010 | if (unlikely(start > skb_headlen(skb)) || |
3030 | unlikely((int)start + off > skb->len - 2)) { | 3011 | unlikely((int)start + off > skb_headlen(skb) - 2)) { |
3031 | if (net_ratelimit()) | 3012 | if (net_ratelimit()) |
3032 | printk(KERN_WARNING | 3013 | printk(KERN_WARNING |
3033 | "bad partial csum: csum=%u/%u len=%u\n", | 3014 | "bad partial csum: csum=%u/%u len=%u\n", |
3034 | start, off, skb->len); | 3015 | start, off, skb_headlen(skb)); |
3035 | return false; | 3016 | return false; |
3036 | } | 3017 | } |
3037 | skb->ip_summed = CHECKSUM_PARTIAL; | 3018 | skb->ip_summed = CHECKSUM_PARTIAL; |
diff --git a/net/core/sock.c b/net/core/sock.c index 7dbf3ffb35cc..06e26b77ad9e 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -155,6 +155,7 @@ static const char *af_family_key_strings[AF_MAX+1] = { | |||
155 | "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" , | 155 | "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" , |
156 | "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" , | 156 | "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" , |
157 | "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" , | 157 | "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" , |
158 | "sk_lock-AF_IEEE802154", | ||
158 | "sk_lock-AF_MAX" | 159 | "sk_lock-AF_MAX" |
159 | }; | 160 | }; |
160 | static const char *af_family_slock_key_strings[AF_MAX+1] = { | 161 | static const char *af_family_slock_key_strings[AF_MAX+1] = { |
@@ -170,6 +171,7 @@ static const char *af_family_slock_key_strings[AF_MAX+1] = { | |||
170 | "slock-27" , "slock-28" , "slock-AF_CAN" , | 171 | "slock-27" , "slock-28" , "slock-AF_CAN" , |
171 | "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" , | 172 | "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" , |
172 | "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" , | 173 | "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" , |
174 | "slock-AF_IEEE802154", | ||
173 | "slock-AF_MAX" | 175 | "slock-AF_MAX" |
174 | }; | 176 | }; |
175 | static const char *af_family_clock_key_strings[AF_MAX+1] = { | 177 | static const char *af_family_clock_key_strings[AF_MAX+1] = { |
@@ -185,6 +187,7 @@ static const char *af_family_clock_key_strings[AF_MAX+1] = { | |||
185 | "clock-27" , "clock-28" , "clock-AF_CAN" , | 187 | "clock-27" , "clock-28" , "clock-AF_CAN" , |
186 | "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" , | 188 | "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" , |
187 | "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" , | 189 | "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" , |
190 | "clock-AF_IEEE802154", | ||
188 | "clock-AF_MAX" | 191 | "clock-AF_MAX" |
189 | }; | 192 | }; |
190 | 193 | ||
@@ -212,6 +215,7 @@ __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX; | |||
212 | 215 | ||
213 | /* Maximal space eaten by iovec or ancilliary data plus some space */ | 216 | /* Maximal space eaten by iovec or ancilliary data plus some space */ |
214 | int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512); | 217 | int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512); |
218 | EXPORT_SYMBOL(sysctl_optmem_max); | ||
215 | 219 | ||
216 | static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen) | 220 | static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen) |
217 | { | 221 | { |
@@ -444,7 +448,7 @@ static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool) | |||
444 | int sock_setsockopt(struct socket *sock, int level, int optname, | 448 | int sock_setsockopt(struct socket *sock, int level, int optname, |
445 | char __user *optval, int optlen) | 449 | char __user *optval, int optlen) |
446 | { | 450 | { |
447 | struct sock *sk=sock->sk; | 451 | struct sock *sk = sock->sk; |
448 | int val; | 452 | int val; |
449 | int valbool; | 453 | int valbool; |
450 | struct linger ling; | 454 | struct linger ling; |
@@ -463,15 +467,15 @@ int sock_setsockopt(struct socket *sock, int level, int optname, | |||
463 | if (get_user(val, (int __user *)optval)) | 467 | if (get_user(val, (int __user *)optval)) |
464 | return -EFAULT; | 468 | return -EFAULT; |
465 | 469 | ||
466 | valbool = val?1:0; | 470 | valbool = val ? 1 : 0; |
467 | 471 | ||
468 | lock_sock(sk); | 472 | lock_sock(sk); |
469 | 473 | ||
470 | switch(optname) { | 474 | switch (optname) { |
471 | case SO_DEBUG: | 475 | case SO_DEBUG: |
472 | if (val && !capable(CAP_NET_ADMIN)) { | 476 | if (val && !capable(CAP_NET_ADMIN)) |
473 | ret = -EACCES; | 477 | ret = -EACCES; |
474 | } else | 478 | else |
475 | sock_valbool_flag(sk, SOCK_DBG, valbool); | 479 | sock_valbool_flag(sk, SOCK_DBG, valbool); |
476 | break; | 480 | break; |
477 | case SO_REUSEADDR: | 481 | case SO_REUSEADDR: |
@@ -582,7 +586,7 @@ set_rcvbuf: | |||
582 | ret = -EINVAL; /* 1003.1g */ | 586 | ret = -EINVAL; /* 1003.1g */ |
583 | break; | 587 | break; |
584 | } | 588 | } |
585 | if (copy_from_user(&ling,optval,sizeof(ling))) { | 589 | if (copy_from_user(&ling, optval, sizeof(ling))) { |
586 | ret = -EFAULT; | 590 | ret = -EFAULT; |
587 | break; | 591 | break; |
588 | } | 592 | } |
@@ -690,9 +694,8 @@ set_rcvbuf: | |||
690 | case SO_MARK: | 694 | case SO_MARK: |
691 | if (!capable(CAP_NET_ADMIN)) | 695 | if (!capable(CAP_NET_ADMIN)) |
692 | ret = -EPERM; | 696 | ret = -EPERM; |
693 | else { | 697 | else |
694 | sk->sk_mark = val; | 698 | sk->sk_mark = val; |
695 | } | ||
696 | break; | 699 | break; |
697 | 700 | ||
698 | /* We implement the SO_SNDLOWAT etc to | 701 | /* We implement the SO_SNDLOWAT etc to |
@@ -704,6 +707,7 @@ set_rcvbuf: | |||
704 | release_sock(sk); | 707 | release_sock(sk); |
705 | return ret; | 708 | return ret; |
706 | } | 709 | } |
710 | EXPORT_SYMBOL(sock_setsockopt); | ||
707 | 711 | ||
708 | 712 | ||
709 | int sock_getsockopt(struct socket *sock, int level, int optname, | 713 | int sock_getsockopt(struct socket *sock, int level, int optname, |
@@ -727,7 +731,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, | |||
727 | 731 | ||
728 | memset(&v, 0, sizeof(v)); | 732 | memset(&v, 0, sizeof(v)); |
729 | 733 | ||
730 | switch(optname) { | 734 | switch (optname) { |
731 | case SO_DEBUG: | 735 | case SO_DEBUG: |
732 | v.val = sock_flag(sk, SOCK_DBG); | 736 | v.val = sock_flag(sk, SOCK_DBG); |
733 | break; | 737 | break; |
@@ -762,7 +766,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, | |||
762 | 766 | ||
763 | case SO_ERROR: | 767 | case SO_ERROR: |
764 | v.val = -sock_error(sk); | 768 | v.val = -sock_error(sk); |
765 | if (v.val==0) | 769 | if (v.val == 0) |
766 | v.val = xchg(&sk->sk_err_soft, 0); | 770 | v.val = xchg(&sk->sk_err_soft, 0); |
767 | break; | 771 | break; |
768 | 772 | ||
@@ -816,7 +820,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, | |||
816 | break; | 820 | break; |
817 | 821 | ||
818 | case SO_RCVTIMEO: | 822 | case SO_RCVTIMEO: |
819 | lv=sizeof(struct timeval); | 823 | lv = sizeof(struct timeval); |
820 | if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) { | 824 | if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) { |
821 | v.tm.tv_sec = 0; | 825 | v.tm.tv_sec = 0; |
822 | v.tm.tv_usec = 0; | 826 | v.tm.tv_usec = 0; |
@@ -827,7 +831,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, | |||
827 | break; | 831 | break; |
828 | 832 | ||
829 | case SO_SNDTIMEO: | 833 | case SO_SNDTIMEO: |
830 | lv=sizeof(struct timeval); | 834 | lv = sizeof(struct timeval); |
831 | if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) { | 835 | if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) { |
832 | v.tm.tv_sec = 0; | 836 | v.tm.tv_sec = 0; |
833 | v.tm.tv_usec = 0; | 837 | v.tm.tv_usec = 0; |
@@ -842,7 +846,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, | |||
842 | break; | 846 | break; |
843 | 847 | ||
844 | case SO_SNDLOWAT: | 848 | case SO_SNDLOWAT: |
845 | v.val=1; | 849 | v.val = 1; |
846 | break; | 850 | break; |
847 | 851 | ||
848 | case SO_PASSCRED: | 852 | case SO_PASSCRED: |
@@ -1002,8 +1006,9 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority, | |||
1002 | 1006 | ||
1003 | return sk; | 1007 | return sk; |
1004 | } | 1008 | } |
1009 | EXPORT_SYMBOL(sk_alloc); | ||
1005 | 1010 | ||
1006 | void sk_free(struct sock *sk) | 1011 | static void __sk_free(struct sock *sk) |
1007 | { | 1012 | { |
1008 | struct sk_filter *filter; | 1013 | struct sk_filter *filter; |
1009 | 1014 | ||
@@ -1027,6 +1032,18 @@ void sk_free(struct sock *sk) | |||
1027 | sk_prot_free(sk->sk_prot_creator, sk); | 1032 | sk_prot_free(sk->sk_prot_creator, sk); |
1028 | } | 1033 | } |
1029 | 1034 | ||
1035 | void sk_free(struct sock *sk) | ||
1036 | { | ||
1037 | /* | ||
1038 | * We substract one from sk_wmem_alloc and can know if | ||
1039 | * some packets are still in some tx queue. | ||
1040 | * If not null, sock_wfree() will call __sk_free(sk) later | ||
1041 | */ | ||
1042 | if (atomic_dec_and_test(&sk->sk_wmem_alloc)) | ||
1043 | __sk_free(sk); | ||
1044 | } | ||
1045 | EXPORT_SYMBOL(sk_free); | ||
1046 | |||
1030 | /* | 1047 | /* |
1031 | * Last sock_put should drop referrence to sk->sk_net. It has already | 1048 | * Last sock_put should drop referrence to sk->sk_net. It has already |
1032 | * been dropped in sk_change_net. Taking referrence to stopping namespace | 1049 | * been dropped in sk_change_net. Taking referrence to stopping namespace |
@@ -1065,7 +1082,10 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority) | |||
1065 | newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; | 1082 | newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; |
1066 | 1083 | ||
1067 | atomic_set(&newsk->sk_rmem_alloc, 0); | 1084 | atomic_set(&newsk->sk_rmem_alloc, 0); |
1068 | atomic_set(&newsk->sk_wmem_alloc, 0); | 1085 | /* |
1086 | * sk_wmem_alloc set to one (see sk_free() and sock_wfree()) | ||
1087 | */ | ||
1088 | atomic_set(&newsk->sk_wmem_alloc, 1); | ||
1069 | atomic_set(&newsk->sk_omem_alloc, 0); | 1089 | atomic_set(&newsk->sk_omem_alloc, 0); |
1070 | skb_queue_head_init(&newsk->sk_receive_queue); | 1090 | skb_queue_head_init(&newsk->sk_receive_queue); |
1071 | skb_queue_head_init(&newsk->sk_write_queue); | 1091 | skb_queue_head_init(&newsk->sk_write_queue); |
@@ -1126,7 +1146,6 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority) | |||
1126 | out: | 1146 | out: |
1127 | return newsk; | 1147 | return newsk; |
1128 | } | 1148 | } |
1129 | |||
1130 | EXPORT_SYMBOL_GPL(sk_clone); | 1149 | EXPORT_SYMBOL_GPL(sk_clone); |
1131 | 1150 | ||
1132 | void sk_setup_caps(struct sock *sk, struct dst_entry *dst) | 1151 | void sk_setup_caps(struct sock *sk, struct dst_entry *dst) |
@@ -1170,13 +1189,20 @@ void __init sk_init(void) | |||
1170 | void sock_wfree(struct sk_buff *skb) | 1189 | void sock_wfree(struct sk_buff *skb) |
1171 | { | 1190 | { |
1172 | struct sock *sk = skb->sk; | 1191 | struct sock *sk = skb->sk; |
1192 | int res; | ||
1173 | 1193 | ||
1174 | /* In case it might be waiting for more memory. */ | 1194 | /* In case it might be waiting for more memory. */ |
1175 | atomic_sub(skb->truesize, &sk->sk_wmem_alloc); | 1195 | res = atomic_sub_return(skb->truesize, &sk->sk_wmem_alloc); |
1176 | if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) | 1196 | if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) |
1177 | sk->sk_write_space(sk); | 1197 | sk->sk_write_space(sk); |
1178 | sock_put(sk); | 1198 | /* |
1199 | * if sk_wmem_alloc reached 0, we are last user and should | ||
1200 | * free this sock, as sk_free() call could not do it. | ||
1201 | */ | ||
1202 | if (res == 0) | ||
1203 | __sk_free(sk); | ||
1179 | } | 1204 | } |
1205 | EXPORT_SYMBOL(sock_wfree); | ||
1180 | 1206 | ||
1181 | /* | 1207 | /* |
1182 | * Read buffer destructor automatically called from kfree_skb. | 1208 | * Read buffer destructor automatically called from kfree_skb. |
@@ -1188,6 +1214,7 @@ void sock_rfree(struct sk_buff *skb) | |||
1188 | atomic_sub(skb->truesize, &sk->sk_rmem_alloc); | 1214 | atomic_sub(skb->truesize, &sk->sk_rmem_alloc); |
1189 | sk_mem_uncharge(skb->sk, skb->truesize); | 1215 | sk_mem_uncharge(skb->sk, skb->truesize); |
1190 | } | 1216 | } |
1217 | EXPORT_SYMBOL(sock_rfree); | ||
1191 | 1218 | ||
1192 | 1219 | ||
1193 | int sock_i_uid(struct sock *sk) | 1220 | int sock_i_uid(struct sock *sk) |
@@ -1199,6 +1226,7 @@ int sock_i_uid(struct sock *sk) | |||
1199 | read_unlock(&sk->sk_callback_lock); | 1226 | read_unlock(&sk->sk_callback_lock); |
1200 | return uid; | 1227 | return uid; |
1201 | } | 1228 | } |
1229 | EXPORT_SYMBOL(sock_i_uid); | ||
1202 | 1230 | ||
1203 | unsigned long sock_i_ino(struct sock *sk) | 1231 | unsigned long sock_i_ino(struct sock *sk) |
1204 | { | 1232 | { |
@@ -1209,6 +1237,7 @@ unsigned long sock_i_ino(struct sock *sk) | |||
1209 | read_unlock(&sk->sk_callback_lock); | 1237 | read_unlock(&sk->sk_callback_lock); |
1210 | return ino; | 1238 | return ino; |
1211 | } | 1239 | } |
1240 | EXPORT_SYMBOL(sock_i_ino); | ||
1212 | 1241 | ||
1213 | /* | 1242 | /* |
1214 | * Allocate a skb from the socket's send buffer. | 1243 | * Allocate a skb from the socket's send buffer. |
@@ -1217,7 +1246,7 @@ struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, | |||
1217 | gfp_t priority) | 1246 | gfp_t priority) |
1218 | { | 1247 | { |
1219 | if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { | 1248 | if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { |
1220 | struct sk_buff * skb = alloc_skb(size, priority); | 1249 | struct sk_buff *skb = alloc_skb(size, priority); |
1221 | if (skb) { | 1250 | if (skb) { |
1222 | skb_set_owner_w(skb, sk); | 1251 | skb_set_owner_w(skb, sk); |
1223 | return skb; | 1252 | return skb; |
@@ -1225,6 +1254,7 @@ struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, | |||
1225 | } | 1254 | } |
1226 | return NULL; | 1255 | return NULL; |
1227 | } | 1256 | } |
1257 | EXPORT_SYMBOL(sock_wmalloc); | ||
1228 | 1258 | ||
1229 | /* | 1259 | /* |
1230 | * Allocate a skb from the socket's receive buffer. | 1260 | * Allocate a skb from the socket's receive buffer. |
@@ -1261,6 +1291,7 @@ void *sock_kmalloc(struct sock *sk, int size, gfp_t priority) | |||
1261 | } | 1291 | } |
1262 | return NULL; | 1292 | return NULL; |
1263 | } | 1293 | } |
1294 | EXPORT_SYMBOL(sock_kmalloc); | ||
1264 | 1295 | ||
1265 | /* | 1296 | /* |
1266 | * Free an option memory block. | 1297 | * Free an option memory block. |
@@ -1270,11 +1301,12 @@ void sock_kfree_s(struct sock *sk, void *mem, int size) | |||
1270 | kfree(mem); | 1301 | kfree(mem); |
1271 | atomic_sub(size, &sk->sk_omem_alloc); | 1302 | atomic_sub(size, &sk->sk_omem_alloc); |
1272 | } | 1303 | } |
1304 | EXPORT_SYMBOL(sock_kfree_s); | ||
1273 | 1305 | ||
1274 | /* It is almost wait_for_tcp_memory minus release_sock/lock_sock. | 1306 | /* It is almost wait_for_tcp_memory minus release_sock/lock_sock. |
1275 | I think, these locks should be removed for datagram sockets. | 1307 | I think, these locks should be removed for datagram sockets. |
1276 | */ | 1308 | */ |
1277 | static long sock_wait_for_wmem(struct sock * sk, long timeo) | 1309 | static long sock_wait_for_wmem(struct sock *sk, long timeo) |
1278 | { | 1310 | { |
1279 | DEFINE_WAIT(wait); | 1311 | DEFINE_WAIT(wait); |
1280 | 1312 | ||
@@ -1392,6 +1424,7 @@ struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size, | |||
1392 | { | 1424 | { |
1393 | return sock_alloc_send_pskb(sk, size, 0, noblock, errcode); | 1425 | return sock_alloc_send_pskb(sk, size, 0, noblock, errcode); |
1394 | } | 1426 | } |
1427 | EXPORT_SYMBOL(sock_alloc_send_skb); | ||
1395 | 1428 | ||
1396 | static void __lock_sock(struct sock *sk) | 1429 | static void __lock_sock(struct sock *sk) |
1397 | { | 1430 | { |
@@ -1460,7 +1493,6 @@ int sk_wait_data(struct sock *sk, long *timeo) | |||
1460 | finish_wait(sk->sk_sleep, &wait); | 1493 | finish_wait(sk->sk_sleep, &wait); |
1461 | return rc; | 1494 | return rc; |
1462 | } | 1495 | } |
1463 | |||
1464 | EXPORT_SYMBOL(sk_wait_data); | 1496 | EXPORT_SYMBOL(sk_wait_data); |
1465 | 1497 | ||
1466 | /** | 1498 | /** |
@@ -1541,7 +1573,6 @@ suppress_allocation: | |||
1541 | atomic_sub(amt, prot->memory_allocated); | 1573 | atomic_sub(amt, prot->memory_allocated); |
1542 | return 0; | 1574 | return 0; |
1543 | } | 1575 | } |
1544 | |||
1545 | EXPORT_SYMBOL(__sk_mem_schedule); | 1576 | EXPORT_SYMBOL(__sk_mem_schedule); |
1546 | 1577 | ||
1547 | /** | 1578 | /** |
@@ -1560,7 +1591,6 @@ void __sk_mem_reclaim(struct sock *sk) | |||
1560 | (atomic_read(prot->memory_allocated) < prot->sysctl_mem[0])) | 1591 | (atomic_read(prot->memory_allocated) < prot->sysctl_mem[0])) |
1561 | *prot->memory_pressure = 0; | 1592 | *prot->memory_pressure = 0; |
1562 | } | 1593 | } |
1563 | |||
1564 | EXPORT_SYMBOL(__sk_mem_reclaim); | 1594 | EXPORT_SYMBOL(__sk_mem_reclaim); |
1565 | 1595 | ||
1566 | 1596 | ||
@@ -1575,78 +1605,92 @@ int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len) | |||
1575 | { | 1605 | { |
1576 | return -EOPNOTSUPP; | 1606 | return -EOPNOTSUPP; |
1577 | } | 1607 | } |
1608 | EXPORT_SYMBOL(sock_no_bind); | ||
1578 | 1609 | ||
1579 | int sock_no_connect(struct socket *sock, struct sockaddr *saddr, | 1610 | int sock_no_connect(struct socket *sock, struct sockaddr *saddr, |
1580 | int len, int flags) | 1611 | int len, int flags) |
1581 | { | 1612 | { |
1582 | return -EOPNOTSUPP; | 1613 | return -EOPNOTSUPP; |
1583 | } | 1614 | } |
1615 | EXPORT_SYMBOL(sock_no_connect); | ||
1584 | 1616 | ||
1585 | int sock_no_socketpair(struct socket *sock1, struct socket *sock2) | 1617 | int sock_no_socketpair(struct socket *sock1, struct socket *sock2) |
1586 | { | 1618 | { |
1587 | return -EOPNOTSUPP; | 1619 | return -EOPNOTSUPP; |
1588 | } | 1620 | } |
1621 | EXPORT_SYMBOL(sock_no_socketpair); | ||
1589 | 1622 | ||
1590 | int sock_no_accept(struct socket *sock, struct socket *newsock, int flags) | 1623 | int sock_no_accept(struct socket *sock, struct socket *newsock, int flags) |
1591 | { | 1624 | { |
1592 | return -EOPNOTSUPP; | 1625 | return -EOPNOTSUPP; |
1593 | } | 1626 | } |
1627 | EXPORT_SYMBOL(sock_no_accept); | ||
1594 | 1628 | ||
1595 | int sock_no_getname(struct socket *sock, struct sockaddr *saddr, | 1629 | int sock_no_getname(struct socket *sock, struct sockaddr *saddr, |
1596 | int *len, int peer) | 1630 | int *len, int peer) |
1597 | { | 1631 | { |
1598 | return -EOPNOTSUPP; | 1632 | return -EOPNOTSUPP; |
1599 | } | 1633 | } |
1634 | EXPORT_SYMBOL(sock_no_getname); | ||
1600 | 1635 | ||
1601 | unsigned int sock_no_poll(struct file * file, struct socket *sock, poll_table *pt) | 1636 | unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt) |
1602 | { | 1637 | { |
1603 | return 0; | 1638 | return 0; |
1604 | } | 1639 | } |
1640 | EXPORT_SYMBOL(sock_no_poll); | ||
1605 | 1641 | ||
1606 | int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) | 1642 | int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) |
1607 | { | 1643 | { |
1608 | return -EOPNOTSUPP; | 1644 | return -EOPNOTSUPP; |
1609 | } | 1645 | } |
1646 | EXPORT_SYMBOL(sock_no_ioctl); | ||
1610 | 1647 | ||
1611 | int sock_no_listen(struct socket *sock, int backlog) | 1648 | int sock_no_listen(struct socket *sock, int backlog) |
1612 | { | 1649 | { |
1613 | return -EOPNOTSUPP; | 1650 | return -EOPNOTSUPP; |
1614 | } | 1651 | } |
1652 | EXPORT_SYMBOL(sock_no_listen); | ||
1615 | 1653 | ||
1616 | int sock_no_shutdown(struct socket *sock, int how) | 1654 | int sock_no_shutdown(struct socket *sock, int how) |
1617 | { | 1655 | { |
1618 | return -EOPNOTSUPP; | 1656 | return -EOPNOTSUPP; |
1619 | } | 1657 | } |
1658 | EXPORT_SYMBOL(sock_no_shutdown); | ||
1620 | 1659 | ||
1621 | int sock_no_setsockopt(struct socket *sock, int level, int optname, | 1660 | int sock_no_setsockopt(struct socket *sock, int level, int optname, |
1622 | char __user *optval, int optlen) | 1661 | char __user *optval, int optlen) |
1623 | { | 1662 | { |
1624 | return -EOPNOTSUPP; | 1663 | return -EOPNOTSUPP; |
1625 | } | 1664 | } |
1665 | EXPORT_SYMBOL(sock_no_setsockopt); | ||
1626 | 1666 | ||
1627 | int sock_no_getsockopt(struct socket *sock, int level, int optname, | 1667 | int sock_no_getsockopt(struct socket *sock, int level, int optname, |
1628 | char __user *optval, int __user *optlen) | 1668 | char __user *optval, int __user *optlen) |
1629 | { | 1669 | { |
1630 | return -EOPNOTSUPP; | 1670 | return -EOPNOTSUPP; |
1631 | } | 1671 | } |
1672 | EXPORT_SYMBOL(sock_no_getsockopt); | ||
1632 | 1673 | ||
1633 | int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, | 1674 | int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, |
1634 | size_t len) | 1675 | size_t len) |
1635 | { | 1676 | { |
1636 | return -EOPNOTSUPP; | 1677 | return -EOPNOTSUPP; |
1637 | } | 1678 | } |
1679 | EXPORT_SYMBOL(sock_no_sendmsg); | ||
1638 | 1680 | ||
1639 | int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, | 1681 | int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, |
1640 | size_t len, int flags) | 1682 | size_t len, int flags) |
1641 | { | 1683 | { |
1642 | return -EOPNOTSUPP; | 1684 | return -EOPNOTSUPP; |
1643 | } | 1685 | } |
1686 | EXPORT_SYMBOL(sock_no_recvmsg); | ||
1644 | 1687 | ||
1645 | int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma) | 1688 | int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma) |
1646 | { | 1689 | { |
1647 | /* Mirror missing mmap method error code */ | 1690 | /* Mirror missing mmap method error code */ |
1648 | return -ENODEV; | 1691 | return -ENODEV; |
1649 | } | 1692 | } |
1693 | EXPORT_SYMBOL(sock_no_mmap); | ||
1650 | 1694 | ||
1651 | ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) | 1695 | ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) |
1652 | { | 1696 | { |
@@ -1660,6 +1704,7 @@ ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, siz | |||
1660 | kunmap(page); | 1704 | kunmap(page); |
1661 | return res; | 1705 | return res; |
1662 | } | 1706 | } |
1707 | EXPORT_SYMBOL(sock_no_sendpage); | ||
1663 | 1708 | ||
1664 | /* | 1709 | /* |
1665 | * Default Socket Callbacks | 1710 | * Default Socket Callbacks |
@@ -1723,6 +1768,7 @@ void sk_send_sigurg(struct sock *sk) | |||
1723 | if (send_sigurg(&sk->sk_socket->file->f_owner)) | 1768 | if (send_sigurg(&sk->sk_socket->file->f_owner)) |
1724 | sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI); | 1769 | sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI); |
1725 | } | 1770 | } |
1771 | EXPORT_SYMBOL(sk_send_sigurg); | ||
1726 | 1772 | ||
1727 | void sk_reset_timer(struct sock *sk, struct timer_list* timer, | 1773 | void sk_reset_timer(struct sock *sk, struct timer_list* timer, |
1728 | unsigned long expires) | 1774 | unsigned long expires) |
@@ -1730,7 +1776,6 @@ void sk_reset_timer(struct sock *sk, struct timer_list* timer, | |||
1730 | if (!mod_timer(timer, expires)) | 1776 | if (!mod_timer(timer, expires)) |
1731 | sock_hold(sk); | 1777 | sock_hold(sk); |
1732 | } | 1778 | } |
1733 | |||
1734 | EXPORT_SYMBOL(sk_reset_timer); | 1779 | EXPORT_SYMBOL(sk_reset_timer); |
1735 | 1780 | ||
1736 | void sk_stop_timer(struct sock *sk, struct timer_list* timer) | 1781 | void sk_stop_timer(struct sock *sk, struct timer_list* timer) |
@@ -1738,7 +1783,6 @@ void sk_stop_timer(struct sock *sk, struct timer_list* timer) | |||
1738 | if (timer_pending(timer) && del_timer(timer)) | 1783 | if (timer_pending(timer) && del_timer(timer)) |
1739 | __sock_put(sk); | 1784 | __sock_put(sk); |
1740 | } | 1785 | } |
1741 | |||
1742 | EXPORT_SYMBOL(sk_stop_timer); | 1786 | EXPORT_SYMBOL(sk_stop_timer); |
1743 | 1787 | ||
1744 | void sock_init_data(struct socket *sock, struct sock *sk) | 1788 | void sock_init_data(struct socket *sock, struct sock *sk) |
@@ -1795,8 +1839,10 @@ void sock_init_data(struct socket *sock, struct sock *sk) | |||
1795 | sk->sk_stamp = ktime_set(-1L, 0); | 1839 | sk->sk_stamp = ktime_set(-1L, 0); |
1796 | 1840 | ||
1797 | atomic_set(&sk->sk_refcnt, 1); | 1841 | atomic_set(&sk->sk_refcnt, 1); |
1842 | atomic_set(&sk->sk_wmem_alloc, 1); | ||
1798 | atomic_set(&sk->sk_drops, 0); | 1843 | atomic_set(&sk->sk_drops, 0); |
1799 | } | 1844 | } |
1845 | EXPORT_SYMBOL(sock_init_data); | ||
1800 | 1846 | ||
1801 | void lock_sock_nested(struct sock *sk, int subclass) | 1847 | void lock_sock_nested(struct sock *sk, int subclass) |
1802 | { | 1848 | { |
@@ -1812,7 +1858,6 @@ void lock_sock_nested(struct sock *sk, int subclass) | |||
1812 | mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); | 1858 | mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); |
1813 | local_bh_enable(); | 1859 | local_bh_enable(); |
1814 | } | 1860 | } |
1815 | |||
1816 | EXPORT_SYMBOL(lock_sock_nested); | 1861 | EXPORT_SYMBOL(lock_sock_nested); |
1817 | 1862 | ||
1818 | void release_sock(struct sock *sk) | 1863 | void release_sock(struct sock *sk) |
@@ -1895,7 +1940,6 @@ int sock_common_getsockopt(struct socket *sock, int level, int optname, | |||
1895 | 1940 | ||
1896 | return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); | 1941 | return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); |
1897 | } | 1942 | } |
1898 | |||
1899 | EXPORT_SYMBOL(sock_common_getsockopt); | 1943 | EXPORT_SYMBOL(sock_common_getsockopt); |
1900 | 1944 | ||
1901 | #ifdef CONFIG_COMPAT | 1945 | #ifdef CONFIG_COMPAT |
@@ -1925,7 +1969,6 @@ int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1925 | msg->msg_namelen = addr_len; | 1969 | msg->msg_namelen = addr_len; |
1926 | return err; | 1970 | return err; |
1927 | } | 1971 | } |
1928 | |||
1929 | EXPORT_SYMBOL(sock_common_recvmsg); | 1972 | EXPORT_SYMBOL(sock_common_recvmsg); |
1930 | 1973 | ||
1931 | /* | 1974 | /* |
@@ -1938,7 +1981,6 @@ int sock_common_setsockopt(struct socket *sock, int level, int optname, | |||
1938 | 1981 | ||
1939 | return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); | 1982 | return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); |
1940 | } | 1983 | } |
1941 | |||
1942 | EXPORT_SYMBOL(sock_common_setsockopt); | 1984 | EXPORT_SYMBOL(sock_common_setsockopt); |
1943 | 1985 | ||
1944 | #ifdef CONFIG_COMPAT | 1986 | #ifdef CONFIG_COMPAT |
@@ -1989,7 +2031,6 @@ void sk_common_release(struct sock *sk) | |||
1989 | sk_refcnt_debug_release(sk); | 2031 | sk_refcnt_debug_release(sk); |
1990 | sock_put(sk); | 2032 | sock_put(sk); |
1991 | } | 2033 | } |
1992 | |||
1993 | EXPORT_SYMBOL(sk_common_release); | 2034 | EXPORT_SYMBOL(sk_common_release); |
1994 | 2035 | ||
1995 | static DEFINE_RWLOCK(proto_list_lock); | 2036 | static DEFINE_RWLOCK(proto_list_lock); |
@@ -2171,7 +2212,6 @@ out_free_sock_slab: | |||
2171 | out: | 2212 | out: |
2172 | return -ENOBUFS; | 2213 | return -ENOBUFS; |
2173 | } | 2214 | } |
2174 | |||
2175 | EXPORT_SYMBOL(proto_register); | 2215 | EXPORT_SYMBOL(proto_register); |
2176 | 2216 | ||
2177 | void proto_unregister(struct proto *prot) | 2217 | void proto_unregister(struct proto *prot) |
@@ -2198,7 +2238,6 @@ void proto_unregister(struct proto *prot) | |||
2198 | prot->twsk_prot->twsk_slab = NULL; | 2238 | prot->twsk_prot->twsk_slab = NULL; |
2199 | } | 2239 | } |
2200 | } | 2240 | } |
2201 | |||
2202 | EXPORT_SYMBOL(proto_unregister); | 2241 | EXPORT_SYMBOL(proto_unregister); |
2203 | 2242 | ||
2204 | #ifdef CONFIG_PROC_FS | 2243 | #ifdef CONFIG_PROC_FS |
@@ -2324,33 +2363,3 @@ static int __init proto_init(void) | |||
2324 | subsys_initcall(proto_init); | 2363 | subsys_initcall(proto_init); |
2325 | 2364 | ||
2326 | #endif /* PROC_FS */ | 2365 | #endif /* PROC_FS */ |
2327 | |||
2328 | EXPORT_SYMBOL(sk_alloc); | ||
2329 | EXPORT_SYMBOL(sk_free); | ||
2330 | EXPORT_SYMBOL(sk_send_sigurg); | ||
2331 | EXPORT_SYMBOL(sock_alloc_send_skb); | ||
2332 | EXPORT_SYMBOL(sock_init_data); | ||
2333 | EXPORT_SYMBOL(sock_kfree_s); | ||
2334 | EXPORT_SYMBOL(sock_kmalloc); | ||
2335 | EXPORT_SYMBOL(sock_no_accept); | ||
2336 | EXPORT_SYMBOL(sock_no_bind); | ||
2337 | EXPORT_SYMBOL(sock_no_connect); | ||
2338 | EXPORT_SYMBOL(sock_no_getname); | ||
2339 | EXPORT_SYMBOL(sock_no_getsockopt); | ||
2340 | EXPORT_SYMBOL(sock_no_ioctl); | ||
2341 | EXPORT_SYMBOL(sock_no_listen); | ||
2342 | EXPORT_SYMBOL(sock_no_mmap); | ||
2343 | EXPORT_SYMBOL(sock_no_poll); | ||
2344 | EXPORT_SYMBOL(sock_no_recvmsg); | ||
2345 | EXPORT_SYMBOL(sock_no_sendmsg); | ||
2346 | EXPORT_SYMBOL(sock_no_sendpage); | ||
2347 | EXPORT_SYMBOL(sock_no_setsockopt); | ||
2348 | EXPORT_SYMBOL(sock_no_shutdown); | ||
2349 | EXPORT_SYMBOL(sock_no_socketpair); | ||
2350 | EXPORT_SYMBOL(sock_rfree); | ||
2351 | EXPORT_SYMBOL(sock_setsockopt); | ||
2352 | EXPORT_SYMBOL(sock_wfree); | ||
2353 | EXPORT_SYMBOL(sock_wmalloc); | ||
2354 | EXPORT_SYMBOL(sock_i_uid); | ||
2355 | EXPORT_SYMBOL(sock_i_ino); | ||
2356 | EXPORT_SYMBOL(sysctl_optmem_max); | ||
diff --git a/net/core/user_dma.c b/net/core/user_dma.c index 164b090d5ac3..25d717ebc92e 100644 --- a/net/core/user_dma.c +++ b/net/core/user_dma.c | |||
@@ -51,6 +51,7 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan, | |||
51 | { | 51 | { |
52 | int start = skb_headlen(skb); | 52 | int start = skb_headlen(skb); |
53 | int i, copy = start - offset; | 53 | int i, copy = start - offset; |
54 | struct sk_buff *frag_iter; | ||
54 | dma_cookie_t cookie = 0; | 55 | dma_cookie_t cookie = 0; |
55 | 56 | ||
56 | /* Copy header. */ | 57 | /* Copy header. */ |
@@ -94,31 +95,28 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan, | |||
94 | start = end; | 95 | start = end; |
95 | } | 96 | } |
96 | 97 | ||
97 | if (skb_shinfo(skb)->frag_list) { | 98 | skb_walk_frags(skb, frag_iter) { |
98 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | 99 | int end; |
99 | 100 | ||
100 | for (; list; list = list->next) { | 101 | WARN_ON(start > offset + len); |
101 | int end; | 102 | |
102 | 103 | end = start + frag_iter->len; | |
103 | WARN_ON(start > offset + len); | 104 | copy = end - offset; |
104 | 105 | if (copy > 0) { | |
105 | end = start + list->len; | 106 | if (copy > len) |
106 | copy = end - offset; | 107 | copy = len; |
107 | if (copy > 0) { | 108 | cookie = dma_skb_copy_datagram_iovec(chan, frag_iter, |
108 | if (copy > len) | 109 | offset - start, |
109 | copy = len; | 110 | to, copy, |
110 | cookie = dma_skb_copy_datagram_iovec(chan, list, | 111 | pinned_list); |
111 | offset - start, to, copy, | 112 | if (cookie < 0) |
112 | pinned_list); | 113 | goto fault; |
113 | if (cookie < 0) | 114 | len -= copy; |
114 | goto fault; | 115 | if (len == 0) |
115 | len -= copy; | 116 | goto end; |
116 | if (len == 0) | 117 | offset += copy; |
117 | goto end; | ||
118 | offset += copy; | ||
119 | } | ||
120 | start = end; | ||
121 | } | 118 | } |
119 | start = end; | ||
122 | } | 120 | } |
123 | 121 | ||
124 | end: | 122 | end: |
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index d1dd95289b89..a0a36c9e6cce 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c | |||
@@ -452,7 +452,7 @@ static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk, | |||
452 | struct sk_buff *skb) | 452 | struct sk_buff *skb) |
453 | { | 453 | { |
454 | struct rtable *rt; | 454 | struct rtable *rt; |
455 | struct flowi fl = { .oif = skb->rtable->rt_iif, | 455 | struct flowi fl = { .oif = skb_rtable(skb)->rt_iif, |
456 | .nl_u = { .ip4_u = | 456 | .nl_u = { .ip4_u = |
457 | { .daddr = ip_hdr(skb)->saddr, | 457 | { .daddr = ip_hdr(skb)->saddr, |
458 | .saddr = ip_hdr(skb)->daddr, | 458 | .saddr = ip_hdr(skb)->daddr, |
@@ -507,14 +507,14 @@ static void dccp_v4_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb) | |||
507 | const struct iphdr *rxiph; | 507 | const struct iphdr *rxiph; |
508 | struct sk_buff *skb; | 508 | struct sk_buff *skb; |
509 | struct dst_entry *dst; | 509 | struct dst_entry *dst; |
510 | struct net *net = dev_net(rxskb->dst->dev); | 510 | struct net *net = dev_net(skb_dst(rxskb)->dev); |
511 | struct sock *ctl_sk = net->dccp.v4_ctl_sk; | 511 | struct sock *ctl_sk = net->dccp.v4_ctl_sk; |
512 | 512 | ||
513 | /* Never send a reset in response to a reset. */ | 513 | /* Never send a reset in response to a reset. */ |
514 | if (dccp_hdr(rxskb)->dccph_type == DCCP_PKT_RESET) | 514 | if (dccp_hdr(rxskb)->dccph_type == DCCP_PKT_RESET) |
515 | return; | 515 | return; |
516 | 516 | ||
517 | if (rxskb->rtable->rt_type != RTN_LOCAL) | 517 | if (skb_rtable(rxskb)->rt_type != RTN_LOCAL) |
518 | return; | 518 | return; |
519 | 519 | ||
520 | dst = dccp_v4_route_skb(net, ctl_sk, rxskb); | 520 | dst = dccp_v4_route_skb(net, ctl_sk, rxskb); |
@@ -528,7 +528,7 @@ static void dccp_v4_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb) | |||
528 | rxiph = ip_hdr(rxskb); | 528 | rxiph = ip_hdr(rxskb); |
529 | dccp_hdr(skb)->dccph_checksum = dccp_v4_csum_finish(skb, rxiph->saddr, | 529 | dccp_hdr(skb)->dccph_checksum = dccp_v4_csum_finish(skb, rxiph->saddr, |
530 | rxiph->daddr); | 530 | rxiph->daddr); |
531 | skb->dst = dst_clone(dst); | 531 | skb_dst_set(skb, dst_clone(dst)); |
532 | 532 | ||
533 | bh_lock_sock(ctl_sk); | 533 | bh_lock_sock(ctl_sk); |
534 | err = ip_build_and_send_pkt(skb, ctl_sk, | 534 | err = ip_build_and_send_pkt(skb, ctl_sk, |
@@ -567,7 +567,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
567 | struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); | 567 | struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); |
568 | 568 | ||
569 | /* Never answer to DCCP_PKT_REQUESTs send to broadcast or multicast */ | 569 | /* Never answer to DCCP_PKT_REQUESTs send to broadcast or multicast */ |
570 | if (skb->rtable->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) | 570 | if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) |
571 | return 0; /* discard, don't send a reset here */ | 571 | return 0; /* discard, don't send a reset here */ |
572 | 572 | ||
573 | if (dccp_bad_service_code(sk, service)) { | 573 | if (dccp_bad_service_code(sk, service)) { |
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index b963f35c65f6..05ea7440d9e5 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c | |||
@@ -314,8 +314,9 @@ static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb) | |||
314 | struct ipv6hdr *rxip6h; | 314 | struct ipv6hdr *rxip6h; |
315 | struct sk_buff *skb; | 315 | struct sk_buff *skb; |
316 | struct flowi fl; | 316 | struct flowi fl; |
317 | struct net *net = dev_net(rxskb->dst->dev); | 317 | struct net *net = dev_net(skb_dst(rxskb)->dev); |
318 | struct sock *ctl_sk = net->dccp.v6_ctl_sk; | 318 | struct sock *ctl_sk = net->dccp.v6_ctl_sk; |
319 | struct dst_entry *dst; | ||
319 | 320 | ||
320 | if (dccp_hdr(rxskb)->dccph_type == DCCP_PKT_RESET) | 321 | if (dccp_hdr(rxskb)->dccph_type == DCCP_PKT_RESET) |
321 | return; | 322 | return; |
@@ -342,8 +343,9 @@ static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb) | |||
342 | security_skb_classify_flow(rxskb, &fl); | 343 | security_skb_classify_flow(rxskb, &fl); |
343 | 344 | ||
344 | /* sk = NULL, but it is safe for now. RST socket required. */ | 345 | /* sk = NULL, but it is safe for now. RST socket required. */ |
345 | if (!ip6_dst_lookup(ctl_sk, &skb->dst, &fl)) { | 346 | if (!ip6_dst_lookup(ctl_sk, &dst, &fl)) { |
346 | if (xfrm_lookup(net, &skb->dst, &fl, NULL, 0) >= 0) { | 347 | if (xfrm_lookup(net, &dst, &fl, NULL, 0) >= 0) { |
348 | skb_dst_set(skb, dst); | ||
347 | ip6_xmit(ctl_sk, skb, &fl, NULL, 0); | 349 | ip6_xmit(ctl_sk, skb, &fl, NULL, 0); |
348 | DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS); | 350 | DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS); |
349 | DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS); | 351 | DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS); |
diff --git a/net/dccp/output.c b/net/dccp/output.c index 36bcc00654d3..c0e88c16d088 100644 --- a/net/dccp/output.c +++ b/net/dccp/output.c | |||
@@ -350,7 +350,7 @@ struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst, | |||
350 | /* Reserve space for headers. */ | 350 | /* Reserve space for headers. */ |
351 | skb_reserve(skb, sk->sk_prot->max_header); | 351 | skb_reserve(skb, sk->sk_prot->max_header); |
352 | 352 | ||
353 | skb->dst = dst_clone(dst); | 353 | skb_dst_set(skb, dst_clone(dst)); |
354 | 354 | ||
355 | dreq = dccp_rsk(req); | 355 | dreq = dccp_rsk(req); |
356 | if (inet_rsk(req)->acked) /* increase ISS upon retransmission */ | 356 | if (inet_rsk(req)->acked) /* increase ISS upon retransmission */ |
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index 9647d911f916..a5e3a593e472 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c | |||
@@ -1075,6 +1075,7 @@ static int dn_accept(struct socket *sock, struct socket *newsock, int flags) | |||
1075 | int err = 0; | 1075 | int err = 0; |
1076 | unsigned char type; | 1076 | unsigned char type; |
1077 | long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); | 1077 | long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); |
1078 | struct dst_entry *dst; | ||
1078 | 1079 | ||
1079 | lock_sock(sk); | 1080 | lock_sock(sk); |
1080 | 1081 | ||
@@ -1102,8 +1103,9 @@ static int dn_accept(struct socket *sock, struct socket *newsock, int flags) | |||
1102 | } | 1103 | } |
1103 | release_sock(sk); | 1104 | release_sock(sk); |
1104 | 1105 | ||
1105 | dst_release(xchg(&newsk->sk_dst_cache, skb->dst)); | 1106 | dst = skb_dst(skb); |
1106 | skb->dst = NULL; | 1107 | dst_release(xchg(&newsk->sk_dst_cache, dst)); |
1108 | skb_dst_set(skb, NULL); | ||
1107 | 1109 | ||
1108 | DN_SK(newsk)->state = DN_CR; | 1110 | DN_SK(newsk)->state = DN_CR; |
1109 | DN_SK(newsk)->addrrem = cb->src_port; | 1111 | DN_SK(newsk)->addrrem = cb->src_port; |
@@ -1250,14 +1252,8 @@ static int dn_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) | |||
1250 | if (skb) { | 1252 | if (skb) { |
1251 | amount = skb->len; | 1253 | amount = skb->len; |
1252 | } else { | 1254 | } else { |
1253 | skb = sk->sk_receive_queue.next; | 1255 | skb_queue_walk(&sk->sk_receive_queue, skb) |
1254 | for (;;) { | ||
1255 | if (skb == | ||
1256 | (struct sk_buff *)&sk->sk_receive_queue) | ||
1257 | break; | ||
1258 | amount += skb->len; | 1256 | amount += skb->len; |
1259 | skb = skb->next; | ||
1260 | } | ||
1261 | } | 1257 | } |
1262 | release_sock(sk); | 1258 | release_sock(sk); |
1263 | err = put_user(amount, (int __user *)arg); | 1259 | err = put_user(amount, (int __user *)arg); |
@@ -1644,13 +1640,13 @@ static int __dn_getsockopt(struct socket *sock, int level,int optname, char __us | |||
1644 | 1640 | ||
1645 | static int dn_data_ready(struct sock *sk, struct sk_buff_head *q, int flags, int target) | 1641 | static int dn_data_ready(struct sock *sk, struct sk_buff_head *q, int flags, int target) |
1646 | { | 1642 | { |
1647 | struct sk_buff *skb = q->next; | 1643 | struct sk_buff *skb; |
1648 | int len = 0; | 1644 | int len = 0; |
1649 | 1645 | ||
1650 | if (flags & MSG_OOB) | 1646 | if (flags & MSG_OOB) |
1651 | return !skb_queue_empty(q) ? 1 : 0; | 1647 | return !skb_queue_empty(q) ? 1 : 0; |
1652 | 1648 | ||
1653 | while(skb != (struct sk_buff *)q) { | 1649 | skb_queue_walk(q, skb) { |
1654 | struct dn_skb_cb *cb = DN_SKB_CB(skb); | 1650 | struct dn_skb_cb *cb = DN_SKB_CB(skb); |
1655 | len += skb->len; | 1651 | len += skb->len; |
1656 | 1652 | ||
@@ -1666,8 +1662,6 @@ static int dn_data_ready(struct sock *sk, struct sk_buff_head *q, int flags, int | |||
1666 | /* minimum data length for read exceeded */ | 1662 | /* minimum data length for read exceeded */ |
1667 | if (len >= target) | 1663 | if (len >= target) |
1668 | return 1; | 1664 | return 1; |
1669 | |||
1670 | skb = skb->next; | ||
1671 | } | 1665 | } |
1672 | 1666 | ||
1673 | return 0; | 1667 | return 0; |
@@ -1683,7 +1677,7 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1683 | size_t target = size > 1 ? 1 : 0; | 1677 | size_t target = size > 1 ? 1 : 0; |
1684 | size_t copied = 0; | 1678 | size_t copied = 0; |
1685 | int rv = 0; | 1679 | int rv = 0; |
1686 | struct sk_buff *skb, *nskb; | 1680 | struct sk_buff *skb, *n; |
1687 | struct dn_skb_cb *cb = NULL; | 1681 | struct dn_skb_cb *cb = NULL; |
1688 | unsigned char eor = 0; | 1682 | unsigned char eor = 0; |
1689 | long timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); | 1683 | long timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); |
@@ -1758,7 +1752,7 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1758 | finish_wait(sk->sk_sleep, &wait); | 1752 | finish_wait(sk->sk_sleep, &wait); |
1759 | } | 1753 | } |
1760 | 1754 | ||
1761 | for(skb = queue->next; skb != (struct sk_buff *)queue; skb = nskb) { | 1755 | skb_queue_walk_safe(queue, skb, n) { |
1762 | unsigned int chunk = skb->len; | 1756 | unsigned int chunk = skb->len; |
1763 | cb = DN_SKB_CB(skb); | 1757 | cb = DN_SKB_CB(skb); |
1764 | 1758 | ||
@@ -1775,7 +1769,6 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1775 | skb_pull(skb, chunk); | 1769 | skb_pull(skb, chunk); |
1776 | 1770 | ||
1777 | eor = cb->nsp_flags & 0x40; | 1771 | eor = cb->nsp_flags & 0x40; |
1778 | nskb = skb->next; | ||
1779 | 1772 | ||
1780 | if (skb->len == 0) { | 1773 | if (skb->len == 0) { |
1781 | skb_unlink(skb, queue); | 1774 | skb_unlink(skb, queue); |
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c index 05b5aa05e50e..923786bd6d01 100644 --- a/net/decnet/dn_neigh.c +++ b/net/decnet/dn_neigh.c | |||
@@ -204,7 +204,7 @@ static void dn_short_error_report(struct neighbour *neigh, struct sk_buff *skb) | |||
204 | 204 | ||
205 | static int dn_neigh_output_packet(struct sk_buff *skb) | 205 | static int dn_neigh_output_packet(struct sk_buff *skb) |
206 | { | 206 | { |
207 | struct dst_entry *dst = skb->dst; | 207 | struct dst_entry *dst = skb_dst(skb); |
208 | struct dn_route *rt = (struct dn_route *)dst; | 208 | struct dn_route *rt = (struct dn_route *)dst; |
209 | struct neighbour *neigh = dst->neighbour; | 209 | struct neighbour *neigh = dst->neighbour; |
210 | struct net_device *dev = neigh->dev; | 210 | struct net_device *dev = neigh->dev; |
@@ -224,7 +224,7 @@ static int dn_neigh_output_packet(struct sk_buff *skb) | |||
224 | 224 | ||
225 | static int dn_long_output(struct sk_buff *skb) | 225 | static int dn_long_output(struct sk_buff *skb) |
226 | { | 226 | { |
227 | struct dst_entry *dst = skb->dst; | 227 | struct dst_entry *dst = skb_dst(skb); |
228 | struct neighbour *neigh = dst->neighbour; | 228 | struct neighbour *neigh = dst->neighbour; |
229 | struct net_device *dev = neigh->dev; | 229 | struct net_device *dev = neigh->dev; |
230 | int headroom = dev->hard_header_len + sizeof(struct dn_long_packet) + 3; | 230 | int headroom = dev->hard_header_len + sizeof(struct dn_long_packet) + 3; |
@@ -270,7 +270,7 @@ static int dn_long_output(struct sk_buff *skb) | |||
270 | 270 | ||
271 | static int dn_short_output(struct sk_buff *skb) | 271 | static int dn_short_output(struct sk_buff *skb) |
272 | { | 272 | { |
273 | struct dst_entry *dst = skb->dst; | 273 | struct dst_entry *dst = skb_dst(skb); |
274 | struct neighbour *neigh = dst->neighbour; | 274 | struct neighbour *neigh = dst->neighbour; |
275 | struct net_device *dev = neigh->dev; | 275 | struct net_device *dev = neigh->dev; |
276 | int headroom = dev->hard_header_len + sizeof(struct dn_short_packet) + 2; | 276 | int headroom = dev->hard_header_len + sizeof(struct dn_short_packet) + 2; |
@@ -313,7 +313,7 @@ static int dn_short_output(struct sk_buff *skb) | |||
313 | */ | 313 | */ |
314 | static int dn_phase3_output(struct sk_buff *skb) | 314 | static int dn_phase3_output(struct sk_buff *skb) |
315 | { | 315 | { |
316 | struct dst_entry *dst = skb->dst; | 316 | struct dst_entry *dst = skb_dst(skb); |
317 | struct neighbour *neigh = dst->neighbour; | 317 | struct neighbour *neigh = dst->neighbour; |
318 | struct net_device *dev = neigh->dev; | 318 | struct net_device *dev = neigh->dev; |
319 | int headroom = dev->hard_header_len + sizeof(struct dn_short_packet) + 2; | 319 | int headroom = dev->hard_header_len + sizeof(struct dn_short_packet) + 2; |
diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c index 2013c25b7f5a..a65e929ce76c 100644 --- a/net/decnet/dn_nsp_out.c +++ b/net/decnet/dn_nsp_out.c | |||
@@ -85,7 +85,7 @@ static void dn_nsp_send(struct sk_buff *skb) | |||
85 | dst = sk_dst_check(sk, 0); | 85 | dst = sk_dst_check(sk, 0); |
86 | if (dst) { | 86 | if (dst) { |
87 | try_again: | 87 | try_again: |
88 | skb->dst = dst; | 88 | skb_dst_set(skb, dst); |
89 | dst_output(skb); | 89 | dst_output(skb); |
90 | return; | 90 | return; |
91 | } | 91 | } |
@@ -382,7 +382,7 @@ int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff | |||
382 | { | 382 | { |
383 | struct dn_skb_cb *cb = DN_SKB_CB(skb); | 383 | struct dn_skb_cb *cb = DN_SKB_CB(skb); |
384 | struct dn_scp *scp = DN_SK(sk); | 384 | struct dn_scp *scp = DN_SK(sk); |
385 | struct sk_buff *skb2, *list, *ack = NULL; | 385 | struct sk_buff *skb2, *n, *ack = NULL; |
386 | int wakeup = 0; | 386 | int wakeup = 0; |
387 | int try_retrans = 0; | 387 | int try_retrans = 0; |
388 | unsigned long reftime = cb->stamp; | 388 | unsigned long reftime = cb->stamp; |
@@ -390,9 +390,7 @@ int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff | |||
390 | unsigned short xmit_count; | 390 | unsigned short xmit_count; |
391 | unsigned short segnum; | 391 | unsigned short segnum; |
392 | 392 | ||
393 | skb2 = q->next; | 393 | skb_queue_walk_safe(q, skb2, n) { |
394 | list = (struct sk_buff *)q; | ||
395 | while(list != skb2) { | ||
396 | struct dn_skb_cb *cb2 = DN_SKB_CB(skb2); | 394 | struct dn_skb_cb *cb2 = DN_SKB_CB(skb2); |
397 | 395 | ||
398 | if (dn_before_or_equal(cb2->segnum, acknum)) | 396 | if (dn_before_or_equal(cb2->segnum, acknum)) |
@@ -400,8 +398,6 @@ int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff | |||
400 | 398 | ||
401 | /* printk(KERN_DEBUG "ack: %s %04x %04x\n", ack ? "ACK" : "SKIP", (int)cb2->segnum, (int)acknum); */ | 399 | /* printk(KERN_DEBUG "ack: %s %04x %04x\n", ack ? "ACK" : "SKIP", (int)cb2->segnum, (int)acknum); */ |
402 | 400 | ||
403 | skb2 = skb2->next; | ||
404 | |||
405 | if (ack == NULL) | 401 | if (ack == NULL) |
406 | continue; | 402 | continue; |
407 | 403 | ||
@@ -586,7 +582,7 @@ static __inline__ void dn_nsp_do_disc(struct sock *sk, unsigned char msgflg, | |||
586 | * to be able to send disc packets out which have no socket | 582 | * to be able to send disc packets out which have no socket |
587 | * associations. | 583 | * associations. |
588 | */ | 584 | */ |
589 | skb->dst = dst_clone(dst); | 585 | skb_dst_set(skb, dst_clone(dst)); |
590 | dst_output(skb); | 586 | dst_output(skb); |
591 | } | 587 | } |
592 | 588 | ||
@@ -615,7 +611,7 @@ void dn_nsp_return_disc(struct sk_buff *skb, unsigned char msgflg, | |||
615 | int ddl = 0; | 611 | int ddl = 0; |
616 | gfp_t gfp = GFP_ATOMIC; | 612 | gfp_t gfp = GFP_ATOMIC; |
617 | 613 | ||
618 | dn_nsp_do_disc(NULL, msgflg, reason, gfp, skb->dst, ddl, | 614 | dn_nsp_do_disc(NULL, msgflg, reason, gfp, skb_dst(skb), ddl, |
619 | NULL, cb->src_port, cb->dst_port); | 615 | NULL, cb->src_port, cb->dst_port); |
620 | } | 616 | } |
621 | 617 | ||
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index 0cc4394117df..1d6ca8a98dc6 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c | |||
@@ -678,7 +678,7 @@ out: | |||
678 | 678 | ||
679 | static int dn_output(struct sk_buff *skb) | 679 | static int dn_output(struct sk_buff *skb) |
680 | { | 680 | { |
681 | struct dst_entry *dst = skb->dst; | 681 | struct dst_entry *dst = skb_dst(skb); |
682 | struct dn_route *rt = (struct dn_route *)dst; | 682 | struct dn_route *rt = (struct dn_route *)dst; |
683 | struct net_device *dev = dst->dev; | 683 | struct net_device *dev = dst->dev; |
684 | struct dn_skb_cb *cb = DN_SKB_CB(skb); | 684 | struct dn_skb_cb *cb = DN_SKB_CB(skb); |
@@ -717,7 +717,7 @@ error: | |||
717 | static int dn_forward(struct sk_buff *skb) | 717 | static int dn_forward(struct sk_buff *skb) |
718 | { | 718 | { |
719 | struct dn_skb_cb *cb = DN_SKB_CB(skb); | 719 | struct dn_skb_cb *cb = DN_SKB_CB(skb); |
720 | struct dst_entry *dst = skb->dst; | 720 | struct dst_entry *dst = skb_dst(skb); |
721 | struct dn_dev *dn_db = dst->dev->dn_ptr; | 721 | struct dn_dev *dn_db = dst->dev->dn_ptr; |
722 | struct dn_route *rt; | 722 | struct dn_route *rt; |
723 | struct neighbour *neigh = dst->neighbour; | 723 | struct neighbour *neigh = dst->neighbour; |
@@ -730,7 +730,7 @@ static int dn_forward(struct sk_buff *skb) | |||
730 | goto drop; | 730 | goto drop; |
731 | 731 | ||
732 | /* Ensure that we have enough space for headers */ | 732 | /* Ensure that we have enough space for headers */ |
733 | rt = (struct dn_route *)skb->dst; | 733 | rt = (struct dn_route *)skb_dst(skb); |
734 | header_len = dn_db->use_long ? 21 : 6; | 734 | header_len = dn_db->use_long ? 21 : 6; |
735 | if (skb_cow(skb, LL_RESERVED_SPACE(rt->u.dst.dev)+header_len)) | 735 | if (skb_cow(skb, LL_RESERVED_SPACE(rt->u.dst.dev)+header_len)) |
736 | goto drop; | 736 | goto drop; |
@@ -1392,7 +1392,8 @@ make_route: | |||
1392 | goto e_neighbour; | 1392 | goto e_neighbour; |
1393 | 1393 | ||
1394 | hash = dn_hash(rt->fl.fld_src, rt->fl.fld_dst); | 1394 | hash = dn_hash(rt->fl.fld_src, rt->fl.fld_dst); |
1395 | dn_insert_route(rt, hash, (struct dn_route **)&skb->dst); | 1395 | dn_insert_route(rt, hash, &rt); |
1396 | skb_dst_set(skb, &rt->u.dst); | ||
1396 | 1397 | ||
1397 | done: | 1398 | done: |
1398 | if (neigh) | 1399 | if (neigh) |
@@ -1424,7 +1425,7 @@ static int dn_route_input(struct sk_buff *skb) | |||
1424 | struct dn_skb_cb *cb = DN_SKB_CB(skb); | 1425 | struct dn_skb_cb *cb = DN_SKB_CB(skb); |
1425 | unsigned hash = dn_hash(cb->src, cb->dst); | 1426 | unsigned hash = dn_hash(cb->src, cb->dst); |
1426 | 1427 | ||
1427 | if (skb->dst) | 1428 | if (skb_dst(skb)) |
1428 | return 0; | 1429 | return 0; |
1429 | 1430 | ||
1430 | rcu_read_lock(); | 1431 | rcu_read_lock(); |
@@ -1437,7 +1438,7 @@ static int dn_route_input(struct sk_buff *skb) | |||
1437 | (rt->fl.iif == cb->iif)) { | 1438 | (rt->fl.iif == cb->iif)) { |
1438 | dst_use(&rt->u.dst, jiffies); | 1439 | dst_use(&rt->u.dst, jiffies); |
1439 | rcu_read_unlock(); | 1440 | rcu_read_unlock(); |
1440 | skb->dst = (struct dst_entry *)rt; | 1441 | skb_dst_set(skb, (struct dst_entry *)rt); |
1441 | return 0; | 1442 | return 0; |
1442 | } | 1443 | } |
1443 | } | 1444 | } |
@@ -1449,7 +1450,7 @@ static int dn_route_input(struct sk_buff *skb) | |||
1449 | static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, | 1450 | static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, |
1450 | int event, int nowait, unsigned int flags) | 1451 | int event, int nowait, unsigned int flags) |
1451 | { | 1452 | { |
1452 | struct dn_route *rt = (struct dn_route *)skb->dst; | 1453 | struct dn_route *rt = (struct dn_route *)skb_dst(skb); |
1453 | struct rtmsg *r; | 1454 | struct rtmsg *r; |
1454 | struct nlmsghdr *nlh; | 1455 | struct nlmsghdr *nlh; |
1455 | unsigned char *b = skb_tail_pointer(skb); | 1456 | unsigned char *b = skb_tail_pointer(skb); |
@@ -1554,7 +1555,7 @@ static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void | |||
1554 | err = dn_route_input(skb); | 1555 | err = dn_route_input(skb); |
1555 | local_bh_enable(); | 1556 | local_bh_enable(); |
1556 | memset(cb, 0, sizeof(struct dn_skb_cb)); | 1557 | memset(cb, 0, sizeof(struct dn_skb_cb)); |
1557 | rt = (struct dn_route *)skb->dst; | 1558 | rt = (struct dn_route *)skb_dst(skb); |
1558 | if (!err && -rt->u.dst.error) | 1559 | if (!err && -rt->u.dst.error) |
1559 | err = rt->u.dst.error; | 1560 | err = rt->u.dst.error; |
1560 | } else { | 1561 | } else { |
@@ -1570,7 +1571,7 @@ static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void | |||
1570 | skb->dev = NULL; | 1571 | skb->dev = NULL; |
1571 | if (err) | 1572 | if (err) |
1572 | goto out_free; | 1573 | goto out_free; |
1573 | skb->dst = &rt->u.dst; | 1574 | skb_dst_set(skb, &rt->u.dst); |
1574 | if (rtm->rtm_flags & RTM_F_NOTIFY) | 1575 | if (rtm->rtm_flags & RTM_F_NOTIFY) |
1575 | rt->rt_flags |= RTCF_NOTIFY; | 1576 | rt->rt_flags |= RTCF_NOTIFY; |
1576 | 1577 | ||
@@ -1622,15 +1623,15 @@ int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
1622 | rt = rcu_dereference(rt->u.dst.dn_next), idx++) { | 1623 | rt = rcu_dereference(rt->u.dst.dn_next), idx++) { |
1623 | if (idx < s_idx) | 1624 | if (idx < s_idx) |
1624 | continue; | 1625 | continue; |
1625 | skb->dst = dst_clone(&rt->u.dst); | 1626 | skb_dst_set(skb, dst_clone(&rt->u.dst)); |
1626 | if (dn_rt_fill_info(skb, NETLINK_CB(cb->skb).pid, | 1627 | if (dn_rt_fill_info(skb, NETLINK_CB(cb->skb).pid, |
1627 | cb->nlh->nlmsg_seq, RTM_NEWROUTE, | 1628 | cb->nlh->nlmsg_seq, RTM_NEWROUTE, |
1628 | 1, NLM_F_MULTI) <= 0) { | 1629 | 1, NLM_F_MULTI) <= 0) { |
1629 | dst_release(xchg(&skb->dst, NULL)); | 1630 | skb_dst_drop(skb); |
1630 | rcu_read_unlock_bh(); | 1631 | rcu_read_unlock_bh(); |
1631 | goto done; | 1632 | goto done; |
1632 | } | 1633 | } |
1633 | dst_release(xchg(&skb->dst, NULL)); | 1634 | skb_dst_drop(skb); |
1634 | } | 1635 | } |
1635 | rcu_read_unlock_bh(); | 1636 | rcu_read_unlock_bh(); |
1636 | } | 1637 | } |
diff --git a/net/dsa/slave.c b/net/dsa/slave.c index ed131181215d..2175e6d5cc8d 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c | |||
@@ -67,7 +67,7 @@ static int dsa_slave_open(struct net_device *dev) | |||
67 | return -ENETDOWN; | 67 | return -ENETDOWN; |
68 | 68 | ||
69 | if (compare_ether_addr(dev->dev_addr, master->dev_addr)) { | 69 | if (compare_ether_addr(dev->dev_addr, master->dev_addr)) { |
70 | err = dev_unicast_add(master, dev->dev_addr, ETH_ALEN); | 70 | err = dev_unicast_add(master, dev->dev_addr); |
71 | if (err < 0) | 71 | if (err < 0) |
72 | goto out; | 72 | goto out; |
73 | } | 73 | } |
@@ -90,7 +90,7 @@ clear_allmulti: | |||
90 | dev_set_allmulti(master, -1); | 90 | dev_set_allmulti(master, -1); |
91 | del_unicast: | 91 | del_unicast: |
92 | if (compare_ether_addr(dev->dev_addr, master->dev_addr)) | 92 | if (compare_ether_addr(dev->dev_addr, master->dev_addr)) |
93 | dev_unicast_delete(master, dev->dev_addr, ETH_ALEN); | 93 | dev_unicast_delete(master, dev->dev_addr); |
94 | out: | 94 | out: |
95 | return err; | 95 | return err; |
96 | } | 96 | } |
@@ -108,7 +108,7 @@ static int dsa_slave_close(struct net_device *dev) | |||
108 | dev_set_promiscuity(master, -1); | 108 | dev_set_promiscuity(master, -1); |
109 | 109 | ||
110 | if (compare_ether_addr(dev->dev_addr, master->dev_addr)) | 110 | if (compare_ether_addr(dev->dev_addr, master->dev_addr)) |
111 | dev_unicast_delete(master, dev->dev_addr, ETH_ALEN); | 111 | dev_unicast_delete(master, dev->dev_addr); |
112 | 112 | ||
113 | return 0; | 113 | return 0; |
114 | } | 114 | } |
@@ -147,13 +147,13 @@ static int dsa_slave_set_mac_address(struct net_device *dev, void *a) | |||
147 | goto out; | 147 | goto out; |
148 | 148 | ||
149 | if (compare_ether_addr(addr->sa_data, master->dev_addr)) { | 149 | if (compare_ether_addr(addr->sa_data, master->dev_addr)) { |
150 | err = dev_unicast_add(master, addr->sa_data, ETH_ALEN); | 150 | err = dev_unicast_add(master, addr->sa_data); |
151 | if (err < 0) | 151 | if (err < 0) |
152 | return err; | 152 | return err; |
153 | } | 153 | } |
154 | 154 | ||
155 | if (compare_ether_addr(dev->dev_addr, master->dev_addr)) | 155 | if (compare_ether_addr(dev->dev_addr, master->dev_addr)) |
156 | dev_unicast_delete(master, dev->dev_addr, ETH_ALEN); | 156 | dev_unicast_delete(master, dev->dev_addr); |
157 | 157 | ||
158 | out: | 158 | out: |
159 | memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); | 159 | memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); |
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c index 6f479fa522c3..8121bf0029e3 100644 --- a/net/econet/af_econet.c +++ b/net/econet/af_econet.c | |||
@@ -901,15 +901,10 @@ static void aun_tx_ack(unsigned long seq, int result) | |||
901 | struct ec_cb *eb; | 901 | struct ec_cb *eb; |
902 | 902 | ||
903 | spin_lock_irqsave(&aun_queue_lock, flags); | 903 | spin_lock_irqsave(&aun_queue_lock, flags); |
904 | skb = skb_peek(&aun_queue); | 904 | skb_queue_walk(&aun_queue, skb) { |
905 | while (skb && skb != (struct sk_buff *)&aun_queue) | ||
906 | { | ||
907 | struct sk_buff *newskb = skb->next; | ||
908 | eb = (struct ec_cb *)&skb->cb; | 905 | eb = (struct ec_cb *)&skb->cb; |
909 | if (eb->seq == seq) | 906 | if (eb->seq == seq) |
910 | goto foundit; | 907 | goto foundit; |
911 | |||
912 | skb = newskb; | ||
913 | } | 908 | } |
914 | spin_unlock_irqrestore(&aun_queue_lock, flags); | 909 | spin_unlock_irqrestore(&aun_queue_lock, flags); |
915 | printk(KERN_DEBUG "AUN: unknown sequence %ld\n", seq); | 910 | printk(KERN_DEBUG "AUN: unknown sequence %ld\n", seq); |
@@ -982,23 +977,18 @@ static void aun_data_available(struct sock *sk, int slen) | |||
982 | 977 | ||
983 | static void ab_cleanup(unsigned long h) | 978 | static void ab_cleanup(unsigned long h) |
984 | { | 979 | { |
985 | struct sk_buff *skb; | 980 | struct sk_buff *skb, *n; |
986 | unsigned long flags; | 981 | unsigned long flags; |
987 | 982 | ||
988 | spin_lock_irqsave(&aun_queue_lock, flags); | 983 | spin_lock_irqsave(&aun_queue_lock, flags); |
989 | skb = skb_peek(&aun_queue); | 984 | skb_queue_walk_safe(&aun_queue, skb, n) { |
990 | while (skb && skb != (struct sk_buff *)&aun_queue) | ||
991 | { | ||
992 | struct sk_buff *newskb = skb->next; | ||
993 | struct ec_cb *eb = (struct ec_cb *)&skb->cb; | 985 | struct ec_cb *eb = (struct ec_cb *)&skb->cb; |
994 | if ((jiffies - eb->start) > eb->timeout) | 986 | if ((jiffies - eb->start) > eb->timeout) { |
995 | { | ||
996 | tx_result(skb->sk, eb->cookie, | 987 | tx_result(skb->sk, eb->cookie, |
997 | ECTYPE_TRANSMIT_NOT_PRESENT); | 988 | ECTYPE_TRANSMIT_NOT_PRESENT); |
998 | skb_unlink(skb, &aun_queue); | 989 | skb_unlink(skb, &aun_queue); |
999 | kfree_skb(skb); | 990 | kfree_skb(skb); |
1000 | } | 991 | } |
1001 | skb = newskb; | ||
1002 | } | 992 | } |
1003 | spin_unlock_irqrestore(&aun_queue_lock, flags); | 993 | spin_unlock_irqrestore(&aun_queue_lock, flags); |
1004 | 994 | ||
diff --git a/net/ieee802154/Kconfig b/net/ieee802154/Kconfig new file mode 100644 index 000000000000..1c1de97d264a --- /dev/null +++ b/net/ieee802154/Kconfig | |||
@@ -0,0 +1,12 @@ | |||
1 | config IEEE802154 | ||
2 | tristate "IEEE Std 802.15.4 Low-Rate Wireless Personal Area Networks support (EXPERIMENTAL)" | ||
3 | depends on EXPERIMENTAL | ||
4 | ---help--- | ||
5 | IEEE Std 802.15.4 defines a low data rate, low power and low | ||
6 | complexity short range wireless personal area networks. It was | ||
7 | designed to organise networks of sensors, switches, etc automation | ||
8 | devices. Maximum allowed data rate is 250 kb/s and typical personal | ||
9 | operating space around 10m. | ||
10 | |||
11 | Say Y here to compile LR-WPAN support into the kernel or say M to | ||
12 | compile it as modules. | ||
diff --git a/net/ieee802154/Makefile b/net/ieee802154/Makefile new file mode 100644 index 000000000000..f99338a26100 --- /dev/null +++ b/net/ieee802154/Makefile | |||
@@ -0,0 +1,5 @@ | |||
1 | obj-$(CONFIG_IEEE802154) += nl802154.o af_802154.o | ||
2 | nl802154-y := netlink.o nl_policy.o | ||
3 | af_802154-y := af_ieee802154.o raw.o dgram.o | ||
4 | |||
5 | ccflags-y += -Wall -DDEBUG | ||
diff --git a/net/ieee802154/af802154.h b/net/ieee802154/af802154.h new file mode 100644 index 000000000000..b1ec52537522 --- /dev/null +++ b/net/ieee802154/af802154.h | |||
@@ -0,0 +1,36 @@ | |||
1 | /* | ||
2 | * Internal interfaces for ieee 802.15.4 address family. | ||
3 | * | ||
4 | * Copyright 2007, 2008, 2009 Siemens AG | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 | ||
8 | * as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License along | ||
16 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
17 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | * | ||
19 | * Written by: | ||
20 | * Sergey Lapin <slapin@ossfans.org> | ||
21 | * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> | ||
22 | */ | ||
23 | |||
24 | #ifndef AF802154_H | ||
25 | #define AF802154_H | ||
26 | |||
27 | struct sk_buff; | ||
28 | struct net_devce; | ||
29 | extern struct proto ieee802154_raw_prot; | ||
30 | extern struct proto ieee802154_dgram_prot; | ||
31 | void ieee802154_raw_deliver(struct net_device *dev, struct sk_buff *skb); | ||
32 | int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb); | ||
33 | struct net_device *ieee802154_get_dev(struct net *net, | ||
34 | struct ieee802154_addr *addr); | ||
35 | |||
36 | #endif | ||
diff --git a/net/ieee802154/af_ieee802154.c b/net/ieee802154/af_ieee802154.c new file mode 100644 index 000000000000..882a927cefae --- /dev/null +++ b/net/ieee802154/af_ieee802154.c | |||
@@ -0,0 +1,372 @@ | |||
1 | /* | ||
2 | * IEEE802154.4 socket interface | ||
3 | * | ||
4 | * Copyright 2007, 2008 Siemens AG | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 | ||
8 | * as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License along | ||
16 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
17 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | * | ||
19 | * Written by: | ||
20 | * Sergey Lapin <slapin@ossfans.org> | ||
21 | * Maxim Gorbachyov <maxim.gorbachev@siemens.com> | ||
22 | */ | ||
23 | |||
24 | #include <linux/net.h> | ||
25 | #include <linux/capability.h> | ||
26 | #include <linux/module.h> | ||
27 | #include <linux/if_arp.h> | ||
28 | #include <linux/if.h> | ||
29 | #include <linux/termios.h> /* For TIOCOUTQ/INQ */ | ||
30 | #include <linux/list.h> | ||
31 | #include <net/datalink.h> | ||
32 | #include <net/psnap.h> | ||
33 | #include <net/sock.h> | ||
34 | #include <net/tcp_states.h> | ||
35 | #include <net/route.h> | ||
36 | |||
37 | #include <net/ieee802154/af_ieee802154.h> | ||
38 | #include <net/ieee802154/netdevice.h> | ||
39 | |||
40 | #include "af802154.h" | ||
41 | |||
42 | #define DBG_DUMP(data, len) { \ | ||
43 | int i; \ | ||
44 | pr_debug("function: %s: data: len %d:\n", __func__, len); \ | ||
45 | for (i = 0; i < len; i++) {\ | ||
46 | pr_debug("%02x: %02x\n", i, (data)[i]); \ | ||
47 | } \ | ||
48 | } | ||
49 | |||
50 | /* | ||
51 | * Utility function for families | ||
52 | */ | ||
53 | struct net_device *ieee802154_get_dev(struct net *net, | ||
54 | struct ieee802154_addr *addr) | ||
55 | { | ||
56 | struct net_device *dev = NULL; | ||
57 | struct net_device *tmp; | ||
58 | u16 pan_id, short_addr; | ||
59 | |||
60 | switch (addr->addr_type) { | ||
61 | case IEEE802154_ADDR_LONG: | ||
62 | rtnl_lock(); | ||
63 | dev = dev_getbyhwaddr(net, ARPHRD_IEEE802154, addr->hwaddr); | ||
64 | if (dev) | ||
65 | dev_hold(dev); | ||
66 | rtnl_unlock(); | ||
67 | break; | ||
68 | case IEEE802154_ADDR_SHORT: | ||
69 | if (addr->pan_id == 0xffff || | ||
70 | addr->short_addr == IEEE802154_ADDR_UNDEF || | ||
71 | addr->short_addr == 0xffff) | ||
72 | break; | ||
73 | |||
74 | rtnl_lock(); | ||
75 | |||
76 | for_each_netdev(net, tmp) { | ||
77 | if (tmp->type != ARPHRD_IEEE802154) | ||
78 | continue; | ||
79 | |||
80 | pan_id = ieee802154_mlme_ops(tmp)->get_pan_id(tmp); | ||
81 | short_addr = | ||
82 | ieee802154_mlme_ops(tmp)->get_short_addr(tmp); | ||
83 | |||
84 | if (pan_id == addr->pan_id && | ||
85 | short_addr == addr->short_addr) { | ||
86 | dev = tmp; | ||
87 | dev_hold(dev); | ||
88 | break; | ||
89 | } | ||
90 | } | ||
91 | |||
92 | rtnl_unlock(); | ||
93 | break; | ||
94 | default: | ||
95 | pr_warning("Unsupported ieee802154 address type: %d\n", | ||
96 | addr->addr_type); | ||
97 | break; | ||
98 | } | ||
99 | |||
100 | return dev; | ||
101 | } | ||
102 | |||
103 | static int ieee802154_sock_release(struct socket *sock) | ||
104 | { | ||
105 | struct sock *sk = sock->sk; | ||
106 | |||
107 | if (sk) { | ||
108 | sock->sk = NULL; | ||
109 | sk->sk_prot->close(sk, 0); | ||
110 | } | ||
111 | return 0; | ||
112 | } | ||
113 | static int ieee802154_sock_sendmsg(struct kiocb *iocb, struct socket *sock, | ||
114 | struct msghdr *msg, size_t len) | ||
115 | { | ||
116 | struct sock *sk = sock->sk; | ||
117 | |||
118 | return sk->sk_prot->sendmsg(iocb, sk, msg, len); | ||
119 | } | ||
120 | |||
121 | static int ieee802154_sock_bind(struct socket *sock, struct sockaddr *uaddr, | ||
122 | int addr_len) | ||
123 | { | ||
124 | struct sock *sk = sock->sk; | ||
125 | |||
126 | if (sk->sk_prot->bind) | ||
127 | return sk->sk_prot->bind(sk, uaddr, addr_len); | ||
128 | |||
129 | return sock_no_bind(sock, uaddr, addr_len); | ||
130 | } | ||
131 | |||
132 | static int ieee802154_sock_connect(struct socket *sock, struct sockaddr *uaddr, | ||
133 | int addr_len, int flags) | ||
134 | { | ||
135 | struct sock *sk = sock->sk; | ||
136 | |||
137 | if (uaddr->sa_family == AF_UNSPEC) | ||
138 | return sk->sk_prot->disconnect(sk, flags); | ||
139 | |||
140 | return sk->sk_prot->connect(sk, uaddr, addr_len); | ||
141 | } | ||
142 | |||
143 | static int ieee802154_dev_ioctl(struct sock *sk, struct ifreq __user *arg, | ||
144 | unsigned int cmd) | ||
145 | { | ||
146 | struct ifreq ifr; | ||
147 | int ret = -EINVAL; | ||
148 | struct net_device *dev; | ||
149 | |||
150 | if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) | ||
151 | return -EFAULT; | ||
152 | |||
153 | ifr.ifr_name[IFNAMSIZ-1] = 0; | ||
154 | |||
155 | dev_load(sock_net(sk), ifr.ifr_name); | ||
156 | dev = dev_get_by_name(sock_net(sk), ifr.ifr_name); | ||
157 | if (dev->type == ARPHRD_IEEE802154 || | ||
158 | dev->type == ARPHRD_IEEE802154_PHY) | ||
159 | ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, cmd); | ||
160 | |||
161 | if (!ret && copy_to_user(arg, &ifr, sizeof(struct ifreq))) | ||
162 | ret = -EFAULT; | ||
163 | dev_put(dev); | ||
164 | |||
165 | return ret; | ||
166 | } | ||
167 | |||
168 | static int ieee802154_sock_ioctl(struct socket *sock, unsigned int cmd, | ||
169 | unsigned long arg) | ||
170 | { | ||
171 | struct sock *sk = sock->sk; | ||
172 | |||
173 | switch (cmd) { | ||
174 | case SIOCGSTAMP: | ||
175 | return sock_get_timestamp(sk, (struct timeval __user *)arg); | ||
176 | case SIOCGSTAMPNS: | ||
177 | return sock_get_timestampns(sk, (struct timespec __user *)arg); | ||
178 | case SIOCGIFADDR: | ||
179 | case SIOCSIFADDR: | ||
180 | return ieee802154_dev_ioctl(sk, (struct ifreq __user *)arg, | ||
181 | cmd); | ||
182 | default: | ||
183 | if (!sk->sk_prot->ioctl) | ||
184 | return -ENOIOCTLCMD; | ||
185 | return sk->sk_prot->ioctl(sk, cmd, arg); | ||
186 | } | ||
187 | } | ||
188 | |||
189 | static const struct proto_ops ieee802154_raw_ops = { | ||
190 | .family = PF_IEEE802154, | ||
191 | .owner = THIS_MODULE, | ||
192 | .release = ieee802154_sock_release, | ||
193 | .bind = ieee802154_sock_bind, | ||
194 | .connect = ieee802154_sock_connect, | ||
195 | .socketpair = sock_no_socketpair, | ||
196 | .accept = sock_no_accept, | ||
197 | .getname = sock_no_getname, | ||
198 | .poll = datagram_poll, | ||
199 | .ioctl = ieee802154_sock_ioctl, | ||
200 | .listen = sock_no_listen, | ||
201 | .shutdown = sock_no_shutdown, | ||
202 | .setsockopt = sock_common_setsockopt, | ||
203 | .getsockopt = sock_common_getsockopt, | ||
204 | .sendmsg = ieee802154_sock_sendmsg, | ||
205 | .recvmsg = sock_common_recvmsg, | ||
206 | .mmap = sock_no_mmap, | ||
207 | .sendpage = sock_no_sendpage, | ||
208 | #ifdef CONFIG_COMPAT | ||
209 | .compat_setsockopt = compat_sock_common_setsockopt, | ||
210 | .compat_getsockopt = compat_sock_common_getsockopt, | ||
211 | #endif | ||
212 | }; | ||
213 | |||
214 | static const struct proto_ops ieee802154_dgram_ops = { | ||
215 | .family = PF_IEEE802154, | ||
216 | .owner = THIS_MODULE, | ||
217 | .release = ieee802154_sock_release, | ||
218 | .bind = ieee802154_sock_bind, | ||
219 | .connect = ieee802154_sock_connect, | ||
220 | .socketpair = sock_no_socketpair, | ||
221 | .accept = sock_no_accept, | ||
222 | .getname = sock_no_getname, | ||
223 | .poll = datagram_poll, | ||
224 | .ioctl = ieee802154_sock_ioctl, | ||
225 | .listen = sock_no_listen, | ||
226 | .shutdown = sock_no_shutdown, | ||
227 | .setsockopt = sock_common_setsockopt, | ||
228 | .getsockopt = sock_common_getsockopt, | ||
229 | .sendmsg = ieee802154_sock_sendmsg, | ||
230 | .recvmsg = sock_common_recvmsg, | ||
231 | .mmap = sock_no_mmap, | ||
232 | .sendpage = sock_no_sendpage, | ||
233 | #ifdef CONFIG_COMPAT | ||
234 | .compat_setsockopt = compat_sock_common_setsockopt, | ||
235 | .compat_getsockopt = compat_sock_common_getsockopt, | ||
236 | #endif | ||
237 | }; | ||
238 | |||
239 | |||
240 | /* | ||
241 | * Create a socket. Initialise the socket, blank the addresses | ||
242 | * set the state. | ||
243 | */ | ||
244 | static int ieee802154_create(struct net *net, struct socket *sock, | ||
245 | int protocol) | ||
246 | { | ||
247 | struct sock *sk; | ||
248 | int rc; | ||
249 | struct proto *proto; | ||
250 | const struct proto_ops *ops; | ||
251 | |||
252 | if (net != &init_net) | ||
253 | return -EAFNOSUPPORT; | ||
254 | |||
255 | switch (sock->type) { | ||
256 | case SOCK_RAW: | ||
257 | proto = &ieee802154_raw_prot; | ||
258 | ops = &ieee802154_raw_ops; | ||
259 | break; | ||
260 | case SOCK_DGRAM: | ||
261 | proto = &ieee802154_dgram_prot; | ||
262 | ops = &ieee802154_dgram_ops; | ||
263 | break; | ||
264 | default: | ||
265 | rc = -ESOCKTNOSUPPORT; | ||
266 | goto out; | ||
267 | } | ||
268 | |||
269 | rc = -ENOMEM; | ||
270 | sk = sk_alloc(net, PF_IEEE802154, GFP_KERNEL, proto); | ||
271 | if (!sk) | ||
272 | goto out; | ||
273 | rc = 0; | ||
274 | |||
275 | sock->ops = ops; | ||
276 | |||
277 | sock_init_data(sock, sk); | ||
278 | /* FIXME: sk->sk_destruct */ | ||
279 | sk->sk_family = PF_IEEE802154; | ||
280 | |||
281 | /* Checksums on by default */ | ||
282 | sock_set_flag(sk, SOCK_ZAPPED); | ||
283 | |||
284 | if (sk->sk_prot->hash) | ||
285 | sk->sk_prot->hash(sk); | ||
286 | |||
287 | if (sk->sk_prot->init) { | ||
288 | rc = sk->sk_prot->init(sk); | ||
289 | if (rc) | ||
290 | sk_common_release(sk); | ||
291 | } | ||
292 | out: | ||
293 | return rc; | ||
294 | } | ||
295 | |||
296 | static struct net_proto_family ieee802154_family_ops = { | ||
297 | .family = PF_IEEE802154, | ||
298 | .create = ieee802154_create, | ||
299 | .owner = THIS_MODULE, | ||
300 | }; | ||
301 | |||
302 | static int ieee802154_rcv(struct sk_buff *skb, struct net_device *dev, | ||
303 | struct packet_type *pt, struct net_device *orig_dev) | ||
304 | { | ||
305 | DBG_DUMP(skb->data, skb->len); | ||
306 | if (!netif_running(dev)) | ||
307 | return -ENODEV; | ||
308 | pr_debug("got frame, type %d, dev %p\n", dev->type, dev); | ||
309 | |||
310 | if (!net_eq(dev_net(dev), &init_net)) | ||
311 | goto drop; | ||
312 | |||
313 | ieee802154_raw_deliver(dev, skb); | ||
314 | |||
315 | if (dev->type != ARPHRD_IEEE802154) | ||
316 | goto drop; | ||
317 | |||
318 | if (skb->pkt_type != PACKET_OTHERHOST) | ||
319 | return ieee802154_dgram_deliver(dev, skb); | ||
320 | |||
321 | drop: | ||
322 | kfree_skb(skb); | ||
323 | return NET_RX_DROP; | ||
324 | } | ||
325 | |||
326 | |||
327 | static struct packet_type ieee802154_packet_type = { | ||
328 | .type = __constant_htons(ETH_P_IEEE802154), | ||
329 | .func = ieee802154_rcv, | ||
330 | }; | ||
331 | |||
332 | static int __init af_ieee802154_init(void) | ||
333 | { | ||
334 | int rc = -EINVAL; | ||
335 | |||
336 | rc = proto_register(&ieee802154_raw_prot, 1); | ||
337 | if (rc) | ||
338 | goto out; | ||
339 | |||
340 | rc = proto_register(&ieee802154_dgram_prot, 1); | ||
341 | if (rc) | ||
342 | goto err_dgram; | ||
343 | |||
344 | /* Tell SOCKET that we are alive */ | ||
345 | rc = sock_register(&ieee802154_family_ops); | ||
346 | if (rc) | ||
347 | goto err_sock; | ||
348 | dev_add_pack(&ieee802154_packet_type); | ||
349 | |||
350 | rc = 0; | ||
351 | goto out; | ||
352 | |||
353 | err_sock: | ||
354 | proto_unregister(&ieee802154_dgram_prot); | ||
355 | err_dgram: | ||
356 | proto_unregister(&ieee802154_raw_prot); | ||
357 | out: | ||
358 | return rc; | ||
359 | } | ||
360 | static void __exit af_ieee802154_remove(void) | ||
361 | { | ||
362 | dev_remove_pack(&ieee802154_packet_type); | ||
363 | sock_unregister(PF_IEEE802154); | ||
364 | proto_unregister(&ieee802154_dgram_prot); | ||
365 | proto_unregister(&ieee802154_raw_prot); | ||
366 | } | ||
367 | |||
368 | module_init(af_ieee802154_init); | ||
369 | module_exit(af_ieee802154_remove); | ||
370 | |||
371 | MODULE_LICENSE("GPL"); | ||
372 | MODULE_ALIAS_NETPROTO(PF_IEEE802154); | ||
diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c new file mode 100644 index 000000000000..1779677aed46 --- /dev/null +++ b/net/ieee802154/dgram.c | |||
@@ -0,0 +1,394 @@ | |||
1 | /* | ||
2 | * ZigBee socket interface | ||
3 | * | ||
4 | * Copyright 2007, 2008 Siemens AG | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 | ||
8 | * as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License along | ||
16 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
17 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | * | ||
19 | * Written by: | ||
20 | * Sergey Lapin <slapin@ossfans.org> | ||
21 | * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> | ||
22 | */ | ||
23 | |||
24 | #include <linux/net.h> | ||
25 | #include <linux/module.h> | ||
26 | #include <linux/if_arp.h> | ||
27 | #include <linux/list.h> | ||
28 | #include <net/sock.h> | ||
29 | #include <net/ieee802154/af_ieee802154.h> | ||
30 | #include <net/ieee802154/mac_def.h> | ||
31 | #include <net/ieee802154/netdevice.h> | ||
32 | |||
33 | #include <asm/ioctls.h> | ||
34 | |||
35 | #include "af802154.h" | ||
36 | |||
37 | static HLIST_HEAD(dgram_head); | ||
38 | static DEFINE_RWLOCK(dgram_lock); | ||
39 | |||
40 | struct dgram_sock { | ||
41 | struct sock sk; | ||
42 | |||
43 | int bound; | ||
44 | struct ieee802154_addr src_addr; | ||
45 | struct ieee802154_addr dst_addr; | ||
46 | }; | ||
47 | |||
48 | static inline struct dgram_sock *dgram_sk(const struct sock *sk) | ||
49 | { | ||
50 | return container_of(sk, struct dgram_sock, sk); | ||
51 | } | ||
52 | |||
53 | |||
54 | static void dgram_hash(struct sock *sk) | ||
55 | { | ||
56 | write_lock_bh(&dgram_lock); | ||
57 | sk_add_node(sk, &dgram_head); | ||
58 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); | ||
59 | write_unlock_bh(&dgram_lock); | ||
60 | } | ||
61 | |||
62 | static void dgram_unhash(struct sock *sk) | ||
63 | { | ||
64 | write_lock_bh(&dgram_lock); | ||
65 | if (sk_del_node_init(sk)) | ||
66 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); | ||
67 | write_unlock_bh(&dgram_lock); | ||
68 | } | ||
69 | |||
70 | static int dgram_init(struct sock *sk) | ||
71 | { | ||
72 | struct dgram_sock *ro = dgram_sk(sk); | ||
73 | |||
74 | ro->dst_addr.addr_type = IEEE802154_ADDR_LONG; | ||
75 | ro->dst_addr.pan_id = 0xffff; | ||
76 | memset(&ro->dst_addr.hwaddr, 0xff, sizeof(ro->dst_addr.hwaddr)); | ||
77 | return 0; | ||
78 | } | ||
79 | |||
80 | static void dgram_close(struct sock *sk, long timeout) | ||
81 | { | ||
82 | sk_common_release(sk); | ||
83 | } | ||
84 | |||
85 | static int dgram_bind(struct sock *sk, struct sockaddr *uaddr, int len) | ||
86 | { | ||
87 | struct sockaddr_ieee802154 *addr = (struct sockaddr_ieee802154 *)uaddr; | ||
88 | struct dgram_sock *ro = dgram_sk(sk); | ||
89 | int err = 0; | ||
90 | struct net_device *dev; | ||
91 | |||
92 | ro->bound = 0; | ||
93 | |||
94 | if (len < sizeof(*addr)) | ||
95 | return -EINVAL; | ||
96 | |||
97 | if (addr->family != AF_IEEE802154) | ||
98 | return -EINVAL; | ||
99 | |||
100 | lock_sock(sk); | ||
101 | |||
102 | dev = ieee802154_get_dev(sock_net(sk), &addr->addr); | ||
103 | if (!dev) { | ||
104 | err = -ENODEV; | ||
105 | goto out; | ||
106 | } | ||
107 | |||
108 | if (dev->type != ARPHRD_IEEE802154) { | ||
109 | err = -ENODEV; | ||
110 | goto out_put; | ||
111 | } | ||
112 | |||
113 | memcpy(&ro->src_addr, &addr->addr, sizeof(struct ieee802154_addr)); | ||
114 | |||
115 | ro->bound = 1; | ||
116 | out_put: | ||
117 | dev_put(dev); | ||
118 | out: | ||
119 | release_sock(sk); | ||
120 | |||
121 | return err; | ||
122 | } | ||
123 | |||
124 | static int dgram_ioctl(struct sock *sk, int cmd, unsigned long arg) | ||
125 | { | ||
126 | switch (cmd) { | ||
127 | case SIOCOUTQ: | ||
128 | { | ||
129 | int amount = atomic_read(&sk->sk_wmem_alloc); | ||
130 | return put_user(amount, (int __user *)arg); | ||
131 | } | ||
132 | |||
133 | case SIOCINQ: | ||
134 | { | ||
135 | struct sk_buff *skb; | ||
136 | unsigned long amount; | ||
137 | |||
138 | amount = 0; | ||
139 | spin_lock_bh(&sk->sk_receive_queue.lock); | ||
140 | skb = skb_peek(&sk->sk_receive_queue); | ||
141 | if (skb != NULL) { | ||
142 | /* | ||
143 | * We will only return the amount | ||
144 | * of this packet since that is all | ||
145 | * that will be read. | ||
146 | */ | ||
147 | /* FIXME: parse the header for more correct value */ | ||
148 | amount = skb->len - (3+8+8); | ||
149 | } | ||
150 | spin_unlock_bh(&sk->sk_receive_queue.lock); | ||
151 | return put_user(amount, (int __user *)arg); | ||
152 | } | ||
153 | |||
154 | } | ||
155 | return -ENOIOCTLCMD; | ||
156 | } | ||
157 | |||
158 | /* FIXME: autobind */ | ||
159 | static int dgram_connect(struct sock *sk, struct sockaddr *uaddr, | ||
160 | int len) | ||
161 | { | ||
162 | struct sockaddr_ieee802154 *addr = (struct sockaddr_ieee802154 *)uaddr; | ||
163 | struct dgram_sock *ro = dgram_sk(sk); | ||
164 | int err = 0; | ||
165 | |||
166 | if (len < sizeof(*addr)) | ||
167 | return -EINVAL; | ||
168 | |||
169 | if (addr->family != AF_IEEE802154) | ||
170 | return -EINVAL; | ||
171 | |||
172 | lock_sock(sk); | ||
173 | |||
174 | if (!ro->bound) { | ||
175 | err = -ENETUNREACH; | ||
176 | goto out; | ||
177 | } | ||
178 | |||
179 | memcpy(&ro->dst_addr, &addr->addr, sizeof(struct ieee802154_addr)); | ||
180 | |||
181 | out: | ||
182 | release_sock(sk); | ||
183 | return err; | ||
184 | } | ||
185 | |||
186 | static int dgram_disconnect(struct sock *sk, int flags) | ||
187 | { | ||
188 | struct dgram_sock *ro = dgram_sk(sk); | ||
189 | |||
190 | lock_sock(sk); | ||
191 | |||
192 | ro->dst_addr.addr_type = IEEE802154_ADDR_LONG; | ||
193 | memset(&ro->dst_addr.hwaddr, 0xff, sizeof(ro->dst_addr.hwaddr)); | ||
194 | |||
195 | release_sock(sk); | ||
196 | |||
197 | return 0; | ||
198 | } | ||
199 | |||
200 | static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk, | ||
201 | struct msghdr *msg, size_t size) | ||
202 | { | ||
203 | struct net_device *dev; | ||
204 | unsigned mtu; | ||
205 | struct sk_buff *skb; | ||
206 | struct dgram_sock *ro = dgram_sk(sk); | ||
207 | int err; | ||
208 | |||
209 | if (msg->msg_flags & MSG_OOB) { | ||
210 | pr_debug("msg->msg_flags = 0x%x\n", msg->msg_flags); | ||
211 | return -EOPNOTSUPP; | ||
212 | } | ||
213 | |||
214 | if (!ro->bound) | ||
215 | dev = dev_getfirstbyhwtype(sock_net(sk), ARPHRD_IEEE802154); | ||
216 | else | ||
217 | dev = ieee802154_get_dev(sock_net(sk), &ro->src_addr); | ||
218 | |||
219 | if (!dev) { | ||
220 | pr_debug("no dev\n"); | ||
221 | err = -ENXIO; | ||
222 | goto out; | ||
223 | } | ||
224 | mtu = dev->mtu; | ||
225 | pr_debug("name = %s, mtu = %u\n", dev->name, mtu); | ||
226 | |||
227 | skb = sock_alloc_send_skb(sk, LL_ALLOCATED_SPACE(dev) + size, | ||
228 | msg->msg_flags & MSG_DONTWAIT, | ||
229 | &err); | ||
230 | if (!skb) | ||
231 | goto out_dev; | ||
232 | |||
233 | skb_reserve(skb, LL_RESERVED_SPACE(dev)); | ||
234 | |||
235 | skb_reset_network_header(skb); | ||
236 | |||
237 | mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA | MAC_CB_FLAG_ACKREQ; | ||
238 | mac_cb(skb)->seq = ieee802154_mlme_ops(dev)->get_dsn(dev); | ||
239 | err = dev_hard_header(skb, dev, ETH_P_IEEE802154, &ro->dst_addr, | ||
240 | ro->bound ? &ro->src_addr : NULL, size); | ||
241 | if (err < 0) | ||
242 | goto out_skb; | ||
243 | |||
244 | skb_reset_mac_header(skb); | ||
245 | |||
246 | err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size); | ||
247 | if (err < 0) | ||
248 | goto out_skb; | ||
249 | |||
250 | if (size > mtu) { | ||
251 | pr_debug("size = %Zu, mtu = %u\n", size, mtu); | ||
252 | err = -EINVAL; | ||
253 | goto out_skb; | ||
254 | } | ||
255 | |||
256 | skb->dev = dev; | ||
257 | skb->sk = sk; | ||
258 | skb->protocol = htons(ETH_P_IEEE802154); | ||
259 | |||
260 | dev_put(dev); | ||
261 | |||
262 | err = dev_queue_xmit(skb); | ||
263 | if (err > 0) | ||
264 | err = net_xmit_errno(err); | ||
265 | |||
266 | return err ?: size; | ||
267 | |||
268 | out_skb: | ||
269 | kfree_skb(skb); | ||
270 | out_dev: | ||
271 | dev_put(dev); | ||
272 | out: | ||
273 | return err; | ||
274 | } | ||
275 | |||
276 | static int dgram_recvmsg(struct kiocb *iocb, struct sock *sk, | ||
277 | struct msghdr *msg, size_t len, int noblock, int flags, | ||
278 | int *addr_len) | ||
279 | { | ||
280 | size_t copied = 0; | ||
281 | int err = -EOPNOTSUPP; | ||
282 | struct sk_buff *skb; | ||
283 | |||
284 | skb = skb_recv_datagram(sk, flags, noblock, &err); | ||
285 | if (!skb) | ||
286 | goto out; | ||
287 | |||
288 | copied = skb->len; | ||
289 | if (len < copied) { | ||
290 | msg->msg_flags |= MSG_TRUNC; | ||
291 | copied = len; | ||
292 | } | ||
293 | |||
294 | /* FIXME: skip headers if necessary ?! */ | ||
295 | err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); | ||
296 | if (err) | ||
297 | goto done; | ||
298 | |||
299 | sock_recv_timestamp(msg, sk, skb); | ||
300 | |||
301 | if (flags & MSG_TRUNC) | ||
302 | copied = skb->len; | ||
303 | done: | ||
304 | skb_free_datagram(sk, skb); | ||
305 | out: | ||
306 | if (err) | ||
307 | return err; | ||
308 | return copied; | ||
309 | } | ||
310 | |||
311 | static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb) | ||
312 | { | ||
313 | if (sock_queue_rcv_skb(sk, skb) < 0) { | ||
314 | atomic_inc(&sk->sk_drops); | ||
315 | kfree_skb(skb); | ||
316 | return NET_RX_DROP; | ||
317 | } | ||
318 | |||
319 | return NET_RX_SUCCESS; | ||
320 | } | ||
321 | |||
322 | static inline int ieee802154_match_sock(u8 *hw_addr, u16 pan_id, | ||
323 | u16 short_addr, struct dgram_sock *ro) | ||
324 | { | ||
325 | if (!ro->bound) | ||
326 | return 1; | ||
327 | |||
328 | if (ro->src_addr.addr_type == IEEE802154_ADDR_LONG && | ||
329 | !memcmp(ro->src_addr.hwaddr, hw_addr, IEEE802154_ADDR_LEN)) | ||
330 | return 1; | ||
331 | |||
332 | if (ro->src_addr.addr_type == IEEE802154_ADDR_SHORT && | ||
333 | pan_id == ro->src_addr.pan_id && | ||
334 | short_addr == ro->src_addr.short_addr) | ||
335 | return 1; | ||
336 | |||
337 | return 0; | ||
338 | } | ||
339 | |||
340 | int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb) | ||
341 | { | ||
342 | struct sock *sk, *prev = NULL; | ||
343 | struct hlist_node *node; | ||
344 | int ret = NET_RX_SUCCESS; | ||
345 | u16 pan_id, short_addr; | ||
346 | |||
347 | /* Data frame processing */ | ||
348 | BUG_ON(dev->type != ARPHRD_IEEE802154); | ||
349 | |||
350 | pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev); | ||
351 | short_addr = ieee802154_mlme_ops(dev)->get_short_addr(dev); | ||
352 | |||
353 | read_lock(&dgram_lock); | ||
354 | sk_for_each(sk, node, &dgram_head) { | ||
355 | if (ieee802154_match_sock(dev->dev_addr, pan_id, short_addr, | ||
356 | dgram_sk(sk))) { | ||
357 | if (prev) { | ||
358 | struct sk_buff *clone; | ||
359 | clone = skb_clone(skb, GFP_ATOMIC); | ||
360 | if (clone) | ||
361 | dgram_rcv_skb(prev, clone); | ||
362 | } | ||
363 | |||
364 | prev = sk; | ||
365 | } | ||
366 | } | ||
367 | |||
368 | if (prev) | ||
369 | dgram_rcv_skb(prev, skb); | ||
370 | else { | ||
371 | kfree_skb(skb); | ||
372 | ret = NET_RX_DROP; | ||
373 | } | ||
374 | read_unlock(&dgram_lock); | ||
375 | |||
376 | return ret; | ||
377 | } | ||
378 | |||
379 | struct proto ieee802154_dgram_prot = { | ||
380 | .name = "IEEE-802.15.4-MAC", | ||
381 | .owner = THIS_MODULE, | ||
382 | .obj_size = sizeof(struct dgram_sock), | ||
383 | .init = dgram_init, | ||
384 | .close = dgram_close, | ||
385 | .bind = dgram_bind, | ||
386 | .sendmsg = dgram_sendmsg, | ||
387 | .recvmsg = dgram_recvmsg, | ||
388 | .hash = dgram_hash, | ||
389 | .unhash = dgram_unhash, | ||
390 | .connect = dgram_connect, | ||
391 | .disconnect = dgram_disconnect, | ||
392 | .ioctl = dgram_ioctl, | ||
393 | }; | ||
394 | |||
diff --git a/net/ieee802154/netlink.c b/net/ieee802154/netlink.c new file mode 100644 index 000000000000..105ad10876af --- /dev/null +++ b/net/ieee802154/netlink.c | |||
@@ -0,0 +1,523 @@ | |||
1 | /* | ||
2 | * Netlink inteface for IEEE 802.15.4 stack | ||
3 | * | ||
4 | * Copyright 2007, 2008 Siemens AG | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 | ||
8 | * as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License along | ||
16 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
17 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | * | ||
19 | * Written by: | ||
20 | * Sergey Lapin <slapin@ossfans.org> | ||
21 | * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> | ||
22 | */ | ||
23 | |||
24 | #include <linux/kernel.h> | ||
25 | #include <linux/if_arp.h> | ||
26 | #include <linux/netdevice.h> | ||
27 | #include <net/netlink.h> | ||
28 | #include <net/genetlink.h> | ||
29 | #include <linux/nl802154.h> | ||
30 | #include <net/ieee802154/af_ieee802154.h> | ||
31 | #include <net/ieee802154/nl802154.h> | ||
32 | #include <net/ieee802154/netdevice.h> | ||
33 | |||
34 | static unsigned int ieee802154_seq_num; | ||
35 | |||
36 | static struct genl_family ieee802154_coordinator_family = { | ||
37 | .id = GENL_ID_GENERATE, | ||
38 | .hdrsize = 0, | ||
39 | .name = IEEE802154_NL_NAME, | ||
40 | .version = 1, | ||
41 | .maxattr = IEEE802154_ATTR_MAX, | ||
42 | }; | ||
43 | |||
44 | static struct genl_multicast_group ieee802154_coord_mcgrp = { | ||
45 | .name = IEEE802154_MCAST_COORD_NAME, | ||
46 | }; | ||
47 | |||
48 | static struct genl_multicast_group ieee802154_beacon_mcgrp = { | ||
49 | .name = IEEE802154_MCAST_BEACON_NAME, | ||
50 | }; | ||
51 | |||
52 | /* Requests to userspace */ | ||
53 | static struct sk_buff *ieee802154_nl_create(int flags, u8 req) | ||
54 | { | ||
55 | void *hdr; | ||
56 | struct sk_buff *msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); | ||
57 | |||
58 | if (!msg) | ||
59 | return NULL; | ||
60 | |||
61 | hdr = genlmsg_put(msg, 0, ieee802154_seq_num++, | ||
62 | &ieee802154_coordinator_family, flags, req); | ||
63 | if (!hdr) { | ||
64 | nlmsg_free(msg); | ||
65 | return NULL; | ||
66 | } | ||
67 | |||
68 | return msg; | ||
69 | } | ||
70 | |||
71 | static int ieee802154_nl_finish(struct sk_buff *msg) | ||
72 | { | ||
73 | /* XXX: nlh is right at the start of msg */ | ||
74 | void *hdr = genlmsg_data(NLMSG_DATA(msg->data)); | ||
75 | |||
76 | if (!genlmsg_end(msg, hdr)) | ||
77 | goto out; | ||
78 | |||
79 | return genlmsg_multicast(msg, 0, ieee802154_coord_mcgrp.id, | ||
80 | GFP_ATOMIC); | ||
81 | out: | ||
82 | nlmsg_free(msg); | ||
83 | return -ENOBUFS; | ||
84 | } | ||
85 | |||
86 | int ieee802154_nl_assoc_indic(struct net_device *dev, | ||
87 | struct ieee802154_addr *addr, u8 cap) | ||
88 | { | ||
89 | struct sk_buff *msg; | ||
90 | |||
91 | pr_debug("%s\n", __func__); | ||
92 | |||
93 | if (addr->addr_type != IEEE802154_ADDR_LONG) { | ||
94 | pr_err("%s: received non-long source address!\n", __func__); | ||
95 | return -EINVAL; | ||
96 | } | ||
97 | |||
98 | msg = ieee802154_nl_create(0, IEEE802154_ASSOCIATE_INDIC); | ||
99 | if (!msg) | ||
100 | return -ENOBUFS; | ||
101 | |||
102 | NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); | ||
103 | NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); | ||
104 | NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, | ||
105 | dev->dev_addr); | ||
106 | |||
107 | NLA_PUT(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN, | ||
108 | addr->hwaddr); | ||
109 | |||
110 | NLA_PUT_U8(msg, IEEE802154_ATTR_CAPABILITY, cap); | ||
111 | |||
112 | return ieee802154_nl_finish(msg); | ||
113 | |||
114 | nla_put_failure: | ||
115 | nlmsg_free(msg); | ||
116 | return -ENOBUFS; | ||
117 | } | ||
118 | EXPORT_SYMBOL(ieee802154_nl_assoc_indic); | ||
119 | |||
120 | int ieee802154_nl_assoc_confirm(struct net_device *dev, u16 short_addr, | ||
121 | u8 status) | ||
122 | { | ||
123 | struct sk_buff *msg; | ||
124 | |||
125 | pr_debug("%s\n", __func__); | ||
126 | |||
127 | msg = ieee802154_nl_create(0, IEEE802154_ASSOCIATE_CONF); | ||
128 | if (!msg) | ||
129 | return -ENOBUFS; | ||
130 | |||
131 | NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); | ||
132 | NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); | ||
133 | NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, | ||
134 | dev->dev_addr); | ||
135 | |||
136 | NLA_PUT_U16(msg, IEEE802154_ATTR_SHORT_ADDR, short_addr); | ||
137 | NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status); | ||
138 | |||
139 | return ieee802154_nl_finish(msg); | ||
140 | |||
141 | nla_put_failure: | ||
142 | nlmsg_free(msg); | ||
143 | return -ENOBUFS; | ||
144 | } | ||
145 | EXPORT_SYMBOL(ieee802154_nl_assoc_confirm); | ||
146 | |||
147 | int ieee802154_nl_disassoc_indic(struct net_device *dev, | ||
148 | struct ieee802154_addr *addr, u8 reason) | ||
149 | { | ||
150 | struct sk_buff *msg; | ||
151 | |||
152 | pr_debug("%s\n", __func__); | ||
153 | |||
154 | msg = ieee802154_nl_create(0, IEEE802154_DISASSOCIATE_INDIC); | ||
155 | if (!msg) | ||
156 | return -ENOBUFS; | ||
157 | |||
158 | NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); | ||
159 | NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); | ||
160 | NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, | ||
161 | dev->dev_addr); | ||
162 | |||
163 | if (addr->addr_type == IEEE802154_ADDR_LONG) | ||
164 | NLA_PUT(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN, | ||
165 | addr->hwaddr); | ||
166 | else | ||
167 | NLA_PUT_U16(msg, IEEE802154_ATTR_SRC_SHORT_ADDR, | ||
168 | addr->short_addr); | ||
169 | |||
170 | NLA_PUT_U8(msg, IEEE802154_ATTR_REASON, reason); | ||
171 | |||
172 | return ieee802154_nl_finish(msg); | ||
173 | |||
174 | nla_put_failure: | ||
175 | nlmsg_free(msg); | ||
176 | return -ENOBUFS; | ||
177 | } | ||
178 | EXPORT_SYMBOL(ieee802154_nl_disassoc_indic); | ||
179 | |||
180 | int ieee802154_nl_disassoc_confirm(struct net_device *dev, u8 status) | ||
181 | { | ||
182 | struct sk_buff *msg; | ||
183 | |||
184 | pr_debug("%s\n", __func__); | ||
185 | |||
186 | msg = ieee802154_nl_create(0, IEEE802154_DISASSOCIATE_CONF); | ||
187 | if (!msg) | ||
188 | return -ENOBUFS; | ||
189 | |||
190 | NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); | ||
191 | NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); | ||
192 | NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, | ||
193 | dev->dev_addr); | ||
194 | |||
195 | NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status); | ||
196 | |||
197 | return ieee802154_nl_finish(msg); | ||
198 | |||
199 | nla_put_failure: | ||
200 | nlmsg_free(msg); | ||
201 | return -ENOBUFS; | ||
202 | } | ||
203 | EXPORT_SYMBOL(ieee802154_nl_disassoc_confirm); | ||
204 | |||
205 | int ieee802154_nl_beacon_indic(struct net_device *dev, | ||
206 | u16 panid, u16 coord_addr) | ||
207 | { | ||
208 | struct sk_buff *msg; | ||
209 | |||
210 | pr_debug("%s\n", __func__); | ||
211 | |||
212 | msg = ieee802154_nl_create(0, IEEE802154_BEACON_NOTIFY_INDIC); | ||
213 | if (!msg) | ||
214 | return -ENOBUFS; | ||
215 | |||
216 | NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); | ||
217 | NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); | ||
218 | NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, | ||
219 | dev->dev_addr); | ||
220 | NLA_PUT_U16(msg, IEEE802154_ATTR_COORD_SHORT_ADDR, coord_addr); | ||
221 | NLA_PUT_U16(msg, IEEE802154_ATTR_COORD_PAN_ID, panid); | ||
222 | |||
223 | return ieee802154_nl_finish(msg); | ||
224 | |||
225 | nla_put_failure: | ||
226 | nlmsg_free(msg); | ||
227 | return -ENOBUFS; | ||
228 | } | ||
229 | EXPORT_SYMBOL(ieee802154_nl_beacon_indic); | ||
230 | |||
231 | int ieee802154_nl_scan_confirm(struct net_device *dev, | ||
232 | u8 status, u8 scan_type, u32 unscanned, | ||
233 | u8 *edl/* , struct list_head *pan_desc_list */) | ||
234 | { | ||
235 | struct sk_buff *msg; | ||
236 | |||
237 | pr_debug("%s\n", __func__); | ||
238 | |||
239 | msg = ieee802154_nl_create(0, IEEE802154_SCAN_CONF); | ||
240 | if (!msg) | ||
241 | return -ENOBUFS; | ||
242 | |||
243 | NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); | ||
244 | NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); | ||
245 | NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, | ||
246 | dev->dev_addr); | ||
247 | |||
248 | NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status); | ||
249 | NLA_PUT_U8(msg, IEEE802154_ATTR_SCAN_TYPE, scan_type); | ||
250 | NLA_PUT_U32(msg, IEEE802154_ATTR_CHANNELS, unscanned); | ||
251 | |||
252 | if (edl) | ||
253 | NLA_PUT(msg, IEEE802154_ATTR_ED_LIST, 27, edl); | ||
254 | |||
255 | return ieee802154_nl_finish(msg); | ||
256 | |||
257 | nla_put_failure: | ||
258 | nlmsg_free(msg); | ||
259 | return -ENOBUFS; | ||
260 | } | ||
261 | EXPORT_SYMBOL(ieee802154_nl_scan_confirm); | ||
262 | |||
263 | /* Requests from userspace */ | ||
264 | static struct net_device *ieee802154_nl_get_dev(struct genl_info *info) | ||
265 | { | ||
266 | struct net_device *dev; | ||
267 | |||
268 | if (info->attrs[IEEE802154_ATTR_DEV_NAME]) { | ||
269 | char name[IFNAMSIZ + 1]; | ||
270 | nla_strlcpy(name, info->attrs[IEEE802154_ATTR_DEV_NAME], | ||
271 | sizeof(name)); | ||
272 | dev = dev_get_by_name(&init_net, name); | ||
273 | } else if (info->attrs[IEEE802154_ATTR_DEV_INDEX]) | ||
274 | dev = dev_get_by_index(&init_net, | ||
275 | nla_get_u32(info->attrs[IEEE802154_ATTR_DEV_INDEX])); | ||
276 | else | ||
277 | return NULL; | ||
278 | |||
279 | if (dev->type != ARPHRD_IEEE802154) { | ||
280 | dev_put(dev); | ||
281 | return NULL; | ||
282 | } | ||
283 | |||
284 | return dev; | ||
285 | } | ||
286 | |||
287 | static int ieee802154_associate_req(struct sk_buff *skb, | ||
288 | struct genl_info *info) | ||
289 | { | ||
290 | struct net_device *dev; | ||
291 | struct ieee802154_addr addr; | ||
292 | int ret = -EINVAL; | ||
293 | |||
294 | if (!info->attrs[IEEE802154_ATTR_CHANNEL] || | ||
295 | !info->attrs[IEEE802154_ATTR_COORD_PAN_ID] || | ||
296 | (!info->attrs[IEEE802154_ATTR_COORD_HW_ADDR] && | ||
297 | !info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR]) || | ||
298 | !info->attrs[IEEE802154_ATTR_CAPABILITY]) | ||
299 | return -EINVAL; | ||
300 | |||
301 | dev = ieee802154_nl_get_dev(info); | ||
302 | if (!dev) | ||
303 | return -ENODEV; | ||
304 | |||
305 | if (info->attrs[IEEE802154_ATTR_COORD_HW_ADDR]) { | ||
306 | addr.addr_type = IEEE802154_ADDR_LONG; | ||
307 | nla_memcpy(addr.hwaddr, | ||
308 | info->attrs[IEEE802154_ATTR_COORD_HW_ADDR], | ||
309 | IEEE802154_ADDR_LEN); | ||
310 | } else { | ||
311 | addr.addr_type = IEEE802154_ADDR_SHORT; | ||
312 | addr.short_addr = nla_get_u16( | ||
313 | info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR]); | ||
314 | } | ||
315 | addr.pan_id = nla_get_u16(info->attrs[IEEE802154_ATTR_COORD_PAN_ID]); | ||
316 | |||
317 | ret = ieee802154_mlme_ops(dev)->assoc_req(dev, &addr, | ||
318 | nla_get_u8(info->attrs[IEEE802154_ATTR_CHANNEL]), | ||
319 | nla_get_u8(info->attrs[IEEE802154_ATTR_CAPABILITY])); | ||
320 | |||
321 | dev_put(dev); | ||
322 | return ret; | ||
323 | } | ||
324 | |||
325 | static int ieee802154_associate_resp(struct sk_buff *skb, | ||
326 | struct genl_info *info) | ||
327 | { | ||
328 | struct net_device *dev; | ||
329 | struct ieee802154_addr addr; | ||
330 | int ret = -EINVAL; | ||
331 | |||
332 | if (!info->attrs[IEEE802154_ATTR_STATUS] || | ||
333 | !info->attrs[IEEE802154_ATTR_DEST_HW_ADDR] || | ||
334 | !info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]) | ||
335 | return -EINVAL; | ||
336 | |||
337 | dev = ieee802154_nl_get_dev(info); | ||
338 | if (!dev) | ||
339 | return -ENODEV; | ||
340 | |||
341 | addr.addr_type = IEEE802154_ADDR_LONG; | ||
342 | nla_memcpy(addr.hwaddr, info->attrs[IEEE802154_ATTR_DEST_HW_ADDR], | ||
343 | IEEE802154_ADDR_LEN); | ||
344 | addr.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev); | ||
345 | |||
346 | |||
347 | ret = ieee802154_mlme_ops(dev)->assoc_resp(dev, &addr, | ||
348 | nla_get_u16(info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]), | ||
349 | nla_get_u8(info->attrs[IEEE802154_ATTR_STATUS])); | ||
350 | |||
351 | dev_put(dev); | ||
352 | return ret; | ||
353 | } | ||
354 | |||
355 | static int ieee802154_disassociate_req(struct sk_buff *skb, | ||
356 | struct genl_info *info) | ||
357 | { | ||
358 | struct net_device *dev; | ||
359 | struct ieee802154_addr addr; | ||
360 | int ret = -EINVAL; | ||
361 | |||
362 | if ((!info->attrs[IEEE802154_ATTR_DEST_HW_ADDR] && | ||
363 | !info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]) || | ||
364 | !info->attrs[IEEE802154_ATTR_REASON]) | ||
365 | return -EINVAL; | ||
366 | |||
367 | dev = ieee802154_nl_get_dev(info); | ||
368 | if (!dev) | ||
369 | return -ENODEV; | ||
370 | |||
371 | if (info->attrs[IEEE802154_ATTR_DEST_HW_ADDR]) { | ||
372 | addr.addr_type = IEEE802154_ADDR_LONG; | ||
373 | nla_memcpy(addr.hwaddr, | ||
374 | info->attrs[IEEE802154_ATTR_DEST_HW_ADDR], | ||
375 | IEEE802154_ADDR_LEN); | ||
376 | } else { | ||
377 | addr.addr_type = IEEE802154_ADDR_SHORT; | ||
378 | addr.short_addr = nla_get_u16( | ||
379 | info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]); | ||
380 | } | ||
381 | addr.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev); | ||
382 | |||
383 | ret = ieee802154_mlme_ops(dev)->disassoc_req(dev, &addr, | ||
384 | nla_get_u8(info->attrs[IEEE802154_ATTR_REASON])); | ||
385 | |||
386 | dev_put(dev); | ||
387 | return ret; | ||
388 | } | ||
389 | |||
390 | /* | ||
391 | * PANid, channel, beacon_order = 15, superframe_order = 15, | ||
392 | * PAN_coordinator, battery_life_extension = 0, | ||
393 | * coord_realignment = 0, security_enable = 0 | ||
394 | */ | ||
395 | static int ieee802154_start_req(struct sk_buff *skb, struct genl_info *info) | ||
396 | { | ||
397 | struct net_device *dev; | ||
398 | struct ieee802154_addr addr; | ||
399 | |||
400 | u8 channel, bcn_ord, sf_ord; | ||
401 | int pan_coord, blx, coord_realign; | ||
402 | int ret; | ||
403 | |||
404 | if (!info->attrs[IEEE802154_ATTR_COORD_PAN_ID] || | ||
405 | !info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR] || | ||
406 | !info->attrs[IEEE802154_ATTR_CHANNEL] || | ||
407 | !info->attrs[IEEE802154_ATTR_BCN_ORD] || | ||
408 | !info->attrs[IEEE802154_ATTR_SF_ORD] || | ||
409 | !info->attrs[IEEE802154_ATTR_PAN_COORD] || | ||
410 | !info->attrs[IEEE802154_ATTR_BAT_EXT] || | ||
411 | !info->attrs[IEEE802154_ATTR_COORD_REALIGN] | ||
412 | ) | ||
413 | return -EINVAL; | ||
414 | |||
415 | dev = ieee802154_nl_get_dev(info); | ||
416 | if (!dev) | ||
417 | return -ENODEV; | ||
418 | |||
419 | addr.addr_type = IEEE802154_ADDR_SHORT; | ||
420 | addr.short_addr = nla_get_u16( | ||
421 | info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR]); | ||
422 | addr.pan_id = nla_get_u16(info->attrs[IEEE802154_ATTR_COORD_PAN_ID]); | ||
423 | |||
424 | channel = nla_get_u8(info->attrs[IEEE802154_ATTR_CHANNEL]); | ||
425 | bcn_ord = nla_get_u8(info->attrs[IEEE802154_ATTR_BCN_ORD]); | ||
426 | sf_ord = nla_get_u8(info->attrs[IEEE802154_ATTR_SF_ORD]); | ||
427 | pan_coord = nla_get_u8(info->attrs[IEEE802154_ATTR_PAN_COORD]); | ||
428 | blx = nla_get_u8(info->attrs[IEEE802154_ATTR_BAT_EXT]); | ||
429 | coord_realign = nla_get_u8(info->attrs[IEEE802154_ATTR_COORD_REALIGN]); | ||
430 | |||
431 | ret = ieee802154_mlme_ops(dev)->start_req(dev, &addr, channel, | ||
432 | bcn_ord, sf_ord, pan_coord, blx, coord_realign); | ||
433 | |||
434 | dev_put(dev); | ||
435 | return ret; | ||
436 | } | ||
437 | |||
438 | static int ieee802154_scan_req(struct sk_buff *skb, struct genl_info *info) | ||
439 | { | ||
440 | struct net_device *dev; | ||
441 | int ret; | ||
442 | u8 type; | ||
443 | u32 channels; | ||
444 | u8 duration; | ||
445 | |||
446 | if (!info->attrs[IEEE802154_ATTR_SCAN_TYPE] || | ||
447 | !info->attrs[IEEE802154_ATTR_CHANNELS] || | ||
448 | !info->attrs[IEEE802154_ATTR_DURATION]) | ||
449 | return -EINVAL; | ||
450 | |||
451 | dev = ieee802154_nl_get_dev(info); | ||
452 | if (!dev) | ||
453 | return -ENODEV; | ||
454 | |||
455 | type = nla_get_u8(info->attrs[IEEE802154_ATTR_SCAN_TYPE]); | ||
456 | channels = nla_get_u32(info->attrs[IEEE802154_ATTR_CHANNELS]); | ||
457 | duration = nla_get_u8(info->attrs[IEEE802154_ATTR_DURATION]); | ||
458 | |||
459 | ret = ieee802154_mlme_ops(dev)->scan_req(dev, type, channels, | ||
460 | duration); | ||
461 | |||
462 | dev_put(dev); | ||
463 | return ret; | ||
464 | } | ||
465 | |||
466 | #define IEEE802154_OP(_cmd, _func) \ | ||
467 | { \ | ||
468 | .cmd = _cmd, \ | ||
469 | .policy = ieee802154_policy, \ | ||
470 | .doit = _func, \ | ||
471 | .dumpit = NULL, \ | ||
472 | .flags = GENL_ADMIN_PERM, \ | ||
473 | } | ||
474 | |||
475 | static struct genl_ops ieee802154_coordinator_ops[] = { | ||
476 | IEEE802154_OP(IEEE802154_ASSOCIATE_REQ, ieee802154_associate_req), | ||
477 | IEEE802154_OP(IEEE802154_ASSOCIATE_RESP, ieee802154_associate_resp), | ||
478 | IEEE802154_OP(IEEE802154_DISASSOCIATE_REQ, ieee802154_disassociate_req), | ||
479 | IEEE802154_OP(IEEE802154_SCAN_REQ, ieee802154_scan_req), | ||
480 | IEEE802154_OP(IEEE802154_START_REQ, ieee802154_start_req), | ||
481 | }; | ||
482 | |||
483 | static int __init ieee802154_nl_init(void) | ||
484 | { | ||
485 | int rc; | ||
486 | int i; | ||
487 | |||
488 | rc = genl_register_family(&ieee802154_coordinator_family); | ||
489 | if (rc) | ||
490 | goto fail; | ||
491 | |||
492 | rc = genl_register_mc_group(&ieee802154_coordinator_family, | ||
493 | &ieee802154_coord_mcgrp); | ||
494 | if (rc) | ||
495 | goto fail; | ||
496 | |||
497 | rc = genl_register_mc_group(&ieee802154_coordinator_family, | ||
498 | &ieee802154_beacon_mcgrp); | ||
499 | if (rc) | ||
500 | goto fail; | ||
501 | |||
502 | |||
503 | for (i = 0; i < ARRAY_SIZE(ieee802154_coordinator_ops); i++) { | ||
504 | rc = genl_register_ops(&ieee802154_coordinator_family, | ||
505 | &ieee802154_coordinator_ops[i]); | ||
506 | if (rc) | ||
507 | goto fail; | ||
508 | } | ||
509 | |||
510 | return 0; | ||
511 | |||
512 | fail: | ||
513 | genl_unregister_family(&ieee802154_coordinator_family); | ||
514 | return rc; | ||
515 | } | ||
516 | module_init(ieee802154_nl_init); | ||
517 | |||
518 | static void __exit ieee802154_nl_exit(void) | ||
519 | { | ||
520 | genl_unregister_family(&ieee802154_coordinator_family); | ||
521 | } | ||
522 | module_exit(ieee802154_nl_exit); | ||
523 | |||
diff --git a/net/ieee802154/nl_policy.c b/net/ieee802154/nl_policy.c new file mode 100644 index 000000000000..c7d71d1adcac --- /dev/null +++ b/net/ieee802154/nl_policy.c | |||
@@ -0,0 +1,52 @@ | |||
1 | /* | ||
2 | * nl802154.h | ||
3 | * | ||
4 | * Copyright (C) 2007, 2008 Siemens AG | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 | ||
8 | * as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License along | ||
16 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
17 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | * | ||
19 | */ | ||
20 | |||
21 | #include <linux/kernel.h> | ||
22 | #include <net/netlink.h> | ||
23 | #include <linux/nl802154.h> | ||
24 | |||
25 | #define NLA_HW_ADDR NLA_U64 | ||
26 | |||
27 | struct nla_policy ieee802154_policy[IEEE802154_ATTR_MAX + 1] = { | ||
28 | [IEEE802154_ATTR_DEV_NAME] = { .type = NLA_STRING, }, | ||
29 | [IEEE802154_ATTR_DEV_INDEX] = { .type = NLA_U32, }, | ||
30 | |||
31 | [IEEE802154_ATTR_STATUS] = { .type = NLA_U8, }, | ||
32 | [IEEE802154_ATTR_SHORT_ADDR] = { .type = NLA_U16, }, | ||
33 | [IEEE802154_ATTR_HW_ADDR] = { .type = NLA_HW_ADDR, }, | ||
34 | [IEEE802154_ATTR_PAN_ID] = { .type = NLA_U16, }, | ||
35 | [IEEE802154_ATTR_CHANNEL] = { .type = NLA_U8, }, | ||
36 | [IEEE802154_ATTR_COORD_SHORT_ADDR] = { .type = NLA_U16, }, | ||
37 | [IEEE802154_ATTR_COORD_HW_ADDR] = { .type = NLA_HW_ADDR, }, | ||
38 | [IEEE802154_ATTR_COORD_PAN_ID] = { .type = NLA_U16, }, | ||
39 | [IEEE802154_ATTR_SRC_SHORT_ADDR] = { .type = NLA_U16, }, | ||
40 | [IEEE802154_ATTR_SRC_HW_ADDR] = { .type = NLA_HW_ADDR, }, | ||
41 | [IEEE802154_ATTR_SRC_PAN_ID] = { .type = NLA_U16, }, | ||
42 | [IEEE802154_ATTR_DEST_SHORT_ADDR] = { .type = NLA_U16, }, | ||
43 | [IEEE802154_ATTR_DEST_HW_ADDR] = { .type = NLA_HW_ADDR, }, | ||
44 | [IEEE802154_ATTR_DEST_PAN_ID] = { .type = NLA_U16, }, | ||
45 | |||
46 | [IEEE802154_ATTR_CAPABILITY] = { .type = NLA_U8, }, | ||
47 | [IEEE802154_ATTR_REASON] = { .type = NLA_U8, }, | ||
48 | [IEEE802154_ATTR_SCAN_TYPE] = { .type = NLA_U8, }, | ||
49 | [IEEE802154_ATTR_CHANNELS] = { .type = NLA_U32, }, | ||
50 | [IEEE802154_ATTR_DURATION] = { .type = NLA_U8, }, | ||
51 | [IEEE802154_ATTR_ED_LIST] = { .len = 27 }, | ||
52 | }; | ||
diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c new file mode 100644 index 000000000000..fca44d59f97e --- /dev/null +++ b/net/ieee802154/raw.c | |||
@@ -0,0 +1,254 @@ | |||
1 | /* | ||
2 | * Raw IEEE 802.15.4 sockets | ||
3 | * | ||
4 | * Copyright 2007, 2008 Siemens AG | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 | ||
8 | * as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License along | ||
16 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
17 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | * | ||
19 | * Written by: | ||
20 | * Sergey Lapin <slapin@ossfans.org> | ||
21 | * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> | ||
22 | */ | ||
23 | |||
24 | #include <linux/net.h> | ||
25 | #include <linux/module.h> | ||
26 | #include <linux/if_arp.h> | ||
27 | #include <linux/list.h> | ||
28 | #include <net/sock.h> | ||
29 | #include <net/ieee802154/af_ieee802154.h> | ||
30 | |||
31 | #include "af802154.h" | ||
32 | |||
33 | static HLIST_HEAD(raw_head); | ||
34 | static DEFINE_RWLOCK(raw_lock); | ||
35 | |||
36 | static void raw_hash(struct sock *sk) | ||
37 | { | ||
38 | write_lock_bh(&raw_lock); | ||
39 | sk_add_node(sk, &raw_head); | ||
40 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); | ||
41 | write_unlock_bh(&raw_lock); | ||
42 | } | ||
43 | |||
44 | static void raw_unhash(struct sock *sk) | ||
45 | { | ||
46 | write_lock_bh(&raw_lock); | ||
47 | if (sk_del_node_init(sk)) | ||
48 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); | ||
49 | write_unlock_bh(&raw_lock); | ||
50 | } | ||
51 | |||
52 | static void raw_close(struct sock *sk, long timeout) | ||
53 | { | ||
54 | sk_common_release(sk); | ||
55 | } | ||
56 | |||
57 | static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int len) | ||
58 | { | ||
59 | struct sockaddr_ieee802154 *addr = (struct sockaddr_ieee802154 *)uaddr; | ||
60 | int err = 0; | ||
61 | struct net_device *dev = NULL; | ||
62 | |||
63 | if (len < sizeof(*addr)) | ||
64 | return -EINVAL; | ||
65 | |||
66 | if (addr->family != AF_IEEE802154) | ||
67 | return -EINVAL; | ||
68 | |||
69 | lock_sock(sk); | ||
70 | |||
71 | dev = ieee802154_get_dev(sock_net(sk), &addr->addr); | ||
72 | if (!dev) { | ||
73 | err = -ENODEV; | ||
74 | goto out; | ||
75 | } | ||
76 | |||
77 | if (dev->type != ARPHRD_IEEE802154_PHY && | ||
78 | dev->type != ARPHRD_IEEE802154) { | ||
79 | err = -ENODEV; | ||
80 | goto out_put; | ||
81 | } | ||
82 | |||
83 | sk->sk_bound_dev_if = dev->ifindex; | ||
84 | sk_dst_reset(sk); | ||
85 | |||
86 | out_put: | ||
87 | dev_put(dev); | ||
88 | out: | ||
89 | release_sock(sk); | ||
90 | |||
91 | return err; | ||
92 | } | ||
93 | |||
94 | static int raw_connect(struct sock *sk, struct sockaddr *uaddr, | ||
95 | int addr_len) | ||
96 | { | ||
97 | return -ENOTSUPP; | ||
98 | } | ||
99 | |||
100 | static int raw_disconnect(struct sock *sk, int flags) | ||
101 | { | ||
102 | return 0; | ||
103 | } | ||
104 | |||
105 | static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | ||
106 | size_t size) | ||
107 | { | ||
108 | struct net_device *dev; | ||
109 | unsigned mtu; | ||
110 | struct sk_buff *skb; | ||
111 | int err; | ||
112 | |||
113 | if (msg->msg_flags & MSG_OOB) { | ||
114 | pr_debug("msg->msg_flags = 0x%x\n", msg->msg_flags); | ||
115 | return -EOPNOTSUPP; | ||
116 | } | ||
117 | |||
118 | lock_sock(sk); | ||
119 | if (!sk->sk_bound_dev_if) | ||
120 | dev = dev_getfirstbyhwtype(sock_net(sk), ARPHRD_IEEE802154); | ||
121 | else | ||
122 | dev = dev_get_by_index(sock_net(sk), sk->sk_bound_dev_if); | ||
123 | release_sock(sk); | ||
124 | |||
125 | if (!dev) { | ||
126 | pr_debug("no dev\n"); | ||
127 | err = -ENXIO; | ||
128 | goto out; | ||
129 | } | ||
130 | |||
131 | mtu = dev->mtu; | ||
132 | pr_debug("name = %s, mtu = %u\n", dev->name, mtu); | ||
133 | |||
134 | if (size > mtu) { | ||
135 | pr_debug("size = %Zu, mtu = %u\n", size, mtu); | ||
136 | err = -EINVAL; | ||
137 | goto out_dev; | ||
138 | } | ||
139 | |||
140 | skb = sock_alloc_send_skb(sk, LL_ALLOCATED_SPACE(dev) + size, | ||
141 | msg->msg_flags & MSG_DONTWAIT, &err); | ||
142 | if (!skb) | ||
143 | goto out_dev; | ||
144 | |||
145 | skb_reserve(skb, LL_RESERVED_SPACE(dev)); | ||
146 | |||
147 | skb_reset_mac_header(skb); | ||
148 | skb_reset_network_header(skb); | ||
149 | |||
150 | err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size); | ||
151 | if (err < 0) | ||
152 | goto out_skb; | ||
153 | |||
154 | skb->dev = dev; | ||
155 | skb->sk = sk; | ||
156 | skb->protocol = htons(ETH_P_IEEE802154); | ||
157 | |||
158 | dev_put(dev); | ||
159 | |||
160 | err = dev_queue_xmit(skb); | ||
161 | if (err > 0) | ||
162 | err = net_xmit_errno(err); | ||
163 | |||
164 | return err ?: size; | ||
165 | |||
166 | out_skb: | ||
167 | kfree_skb(skb); | ||
168 | out_dev: | ||
169 | dev_put(dev); | ||
170 | out: | ||
171 | return err; | ||
172 | } | ||
173 | |||
174 | static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | ||
175 | size_t len, int noblock, int flags, int *addr_len) | ||
176 | { | ||
177 | size_t copied = 0; | ||
178 | int err = -EOPNOTSUPP; | ||
179 | struct sk_buff *skb; | ||
180 | |||
181 | skb = skb_recv_datagram(sk, flags, noblock, &err); | ||
182 | if (!skb) | ||
183 | goto out; | ||
184 | |||
185 | copied = skb->len; | ||
186 | if (len < copied) { | ||
187 | msg->msg_flags |= MSG_TRUNC; | ||
188 | copied = len; | ||
189 | } | ||
190 | |||
191 | err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); | ||
192 | if (err) | ||
193 | goto done; | ||
194 | |||
195 | sock_recv_timestamp(msg, sk, skb); | ||
196 | |||
197 | if (flags & MSG_TRUNC) | ||
198 | copied = skb->len; | ||
199 | done: | ||
200 | skb_free_datagram(sk, skb); | ||
201 | out: | ||
202 | if (err) | ||
203 | return err; | ||
204 | return copied; | ||
205 | } | ||
206 | |||
207 | static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb) | ||
208 | { | ||
209 | if (sock_queue_rcv_skb(sk, skb) < 0) { | ||
210 | atomic_inc(&sk->sk_drops); | ||
211 | kfree_skb(skb); | ||
212 | return NET_RX_DROP; | ||
213 | } | ||
214 | |||
215 | return NET_RX_SUCCESS; | ||
216 | } | ||
217 | |||
218 | |||
219 | void ieee802154_raw_deliver(struct net_device *dev, struct sk_buff *skb) | ||
220 | { | ||
221 | struct sock *sk; | ||
222 | struct hlist_node *node; | ||
223 | |||
224 | read_lock(&raw_lock); | ||
225 | sk_for_each(sk, node, &raw_head) { | ||
226 | bh_lock_sock(sk); | ||
227 | if (!sk->sk_bound_dev_if || | ||
228 | sk->sk_bound_dev_if == dev->ifindex) { | ||
229 | |||
230 | struct sk_buff *clone; | ||
231 | |||
232 | clone = skb_clone(skb, GFP_ATOMIC); | ||
233 | if (clone) | ||
234 | raw_rcv_skb(sk, clone); | ||
235 | } | ||
236 | bh_unlock_sock(sk); | ||
237 | } | ||
238 | read_unlock(&raw_lock); | ||
239 | } | ||
240 | |||
241 | struct proto ieee802154_raw_prot = { | ||
242 | .name = "IEEE-802.15.4-RAW", | ||
243 | .owner = THIS_MODULE, | ||
244 | .obj_size = sizeof(struct sock), | ||
245 | .close = raw_close, | ||
246 | .bind = raw_bind, | ||
247 | .sendmsg = raw_sendmsg, | ||
248 | .recvmsg = raw_recvmsg, | ||
249 | .hash = raw_hash, | ||
250 | .unhash = raw_unhash, | ||
251 | .connect = raw_connect, | ||
252 | .disconnect = raw_disconnect, | ||
253 | }; | ||
254 | |||
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 5abee4c97449..566ea6c4321d 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
@@ -116,7 +116,6 @@ | |||
116 | #include <linux/mroute.h> | 116 | #include <linux/mroute.h> |
117 | #endif | 117 | #endif |
118 | 118 | ||
119 | extern void ip_mc_drop_socket(struct sock *sk); | ||
120 | 119 | ||
121 | /* The inetsw table contains everything that inet_create needs to | 120 | /* The inetsw table contains everything that inet_create needs to |
122 | * build a new socket. | 121 | * build a new socket. |
@@ -375,6 +374,7 @@ lookup_protocol: | |||
375 | inet->uc_ttl = -1; | 374 | inet->uc_ttl = -1; |
376 | inet->mc_loop = 1; | 375 | inet->mc_loop = 1; |
377 | inet->mc_ttl = 1; | 376 | inet->mc_ttl = 1; |
377 | inet->mc_all = 1; | ||
378 | inet->mc_index = 0; | 378 | inet->mc_index = 0; |
379 | inet->mc_list = NULL; | 379 | inet->mc_list = NULL; |
380 | 380 | ||
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index f11931c18381..8a3881e28aca 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c | |||
@@ -468,13 +468,13 @@ int arp_find(unsigned char *haddr, struct sk_buff *skb) | |||
468 | __be32 paddr; | 468 | __be32 paddr; |
469 | struct neighbour *n; | 469 | struct neighbour *n; |
470 | 470 | ||
471 | if (!skb->dst) { | 471 | if (!skb_dst(skb)) { |
472 | printk(KERN_DEBUG "arp_find is called with dst==NULL\n"); | 472 | printk(KERN_DEBUG "arp_find is called with dst==NULL\n"); |
473 | kfree_skb(skb); | 473 | kfree_skb(skb); |
474 | return 1; | 474 | return 1; |
475 | } | 475 | } |
476 | 476 | ||
477 | paddr = skb->rtable->rt_gateway; | 477 | paddr = skb_rtable(skb)->rt_gateway; |
478 | 478 | ||
479 | if (arp_set_predefined(inet_addr_type(dev_net(dev), paddr), haddr, paddr, dev)) | 479 | if (arp_set_predefined(inet_addr_type(dev_net(dev), paddr), haddr, paddr, dev)) |
480 | return 0; | 480 | return 0; |
@@ -817,7 +817,7 @@ static int arp_process(struct sk_buff *skb) | |||
817 | if (arp->ar_op == htons(ARPOP_REQUEST) && | 817 | if (arp->ar_op == htons(ARPOP_REQUEST) && |
818 | ip_route_input(skb, tip, sip, 0, dev) == 0) { | 818 | ip_route_input(skb, tip, sip, 0, dev) == 0) { |
819 | 819 | ||
820 | rt = skb->rtable; | 820 | rt = skb_rtable(skb); |
821 | addr_type = rt->rt_type; | 821 | addr_type = rt->rt_type; |
822 | 822 | ||
823 | if (addr_type == RTN_LOCAL) { | 823 | if (addr_type == RTN_LOCAL) { |
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 3f50807237e0..97c410e84388 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c | |||
@@ -356,7 +356,7 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param, | |||
356 | static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) | 356 | static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) |
357 | { | 357 | { |
358 | struct ipcm_cookie ipc; | 358 | struct ipcm_cookie ipc; |
359 | struct rtable *rt = skb->rtable; | 359 | struct rtable *rt = skb_rtable(skb); |
360 | struct net *net = dev_net(rt->u.dst.dev); | 360 | struct net *net = dev_net(rt->u.dst.dev); |
361 | struct sock *sk; | 361 | struct sock *sk; |
362 | struct inet_sock *inet; | 362 | struct inet_sock *inet; |
@@ -416,7 +416,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) | |||
416 | struct iphdr *iph; | 416 | struct iphdr *iph; |
417 | int room; | 417 | int room; |
418 | struct icmp_bxm icmp_param; | 418 | struct icmp_bxm icmp_param; |
419 | struct rtable *rt = skb_in->rtable; | 419 | struct rtable *rt = skb_rtable(skb_in); |
420 | struct ipcm_cookie ipc; | 420 | struct ipcm_cookie ipc; |
421 | __be32 saddr; | 421 | __be32 saddr; |
422 | u8 tos; | 422 | u8 tos; |
@@ -591,13 +591,13 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) | |||
591 | goto relookup_failed; | 591 | goto relookup_failed; |
592 | 592 | ||
593 | /* Ugh! */ | 593 | /* Ugh! */ |
594 | odst = skb_in->dst; | 594 | odst = skb_dst(skb_in); |
595 | err = ip_route_input(skb_in, fl.fl4_dst, fl.fl4_src, | 595 | err = ip_route_input(skb_in, fl.fl4_dst, fl.fl4_src, |
596 | RT_TOS(tos), rt2->u.dst.dev); | 596 | RT_TOS(tos), rt2->u.dst.dev); |
597 | 597 | ||
598 | dst_release(&rt2->u.dst); | 598 | dst_release(&rt2->u.dst); |
599 | rt2 = skb_in->rtable; | 599 | rt2 = skb_rtable(skb_in); |
600 | skb_in->dst = odst; | 600 | skb_dst_set(skb_in, odst); |
601 | } | 601 | } |
602 | 602 | ||
603 | if (err) | 603 | if (err) |
@@ -659,7 +659,7 @@ static void icmp_unreach(struct sk_buff *skb) | |||
659 | u32 info = 0; | 659 | u32 info = 0; |
660 | struct net *net; | 660 | struct net *net; |
661 | 661 | ||
662 | net = dev_net(skb->dst->dev); | 662 | net = dev_net(skb_dst(skb)->dev); |
663 | 663 | ||
664 | /* | 664 | /* |
665 | * Incomplete header ? | 665 | * Incomplete header ? |
@@ -822,7 +822,7 @@ static void icmp_echo(struct sk_buff *skb) | |||
822 | { | 822 | { |
823 | struct net *net; | 823 | struct net *net; |
824 | 824 | ||
825 | net = dev_net(skb->dst->dev); | 825 | net = dev_net(skb_dst(skb)->dev); |
826 | if (!net->ipv4.sysctl_icmp_echo_ignore_all) { | 826 | if (!net->ipv4.sysctl_icmp_echo_ignore_all) { |
827 | struct icmp_bxm icmp_param; | 827 | struct icmp_bxm icmp_param; |
828 | 828 | ||
@@ -873,7 +873,7 @@ static void icmp_timestamp(struct sk_buff *skb) | |||
873 | out: | 873 | out: |
874 | return; | 874 | return; |
875 | out_err: | 875 | out_err: |
876 | ICMP_INC_STATS_BH(dev_net(skb->dst->dev), ICMP_MIB_INERRORS); | 876 | ICMP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ICMP_MIB_INERRORS); |
877 | goto out; | 877 | goto out; |
878 | } | 878 | } |
879 | 879 | ||
@@ -926,7 +926,7 @@ static void icmp_address(struct sk_buff *skb) | |||
926 | 926 | ||
927 | static void icmp_address_reply(struct sk_buff *skb) | 927 | static void icmp_address_reply(struct sk_buff *skb) |
928 | { | 928 | { |
929 | struct rtable *rt = skb->rtable; | 929 | struct rtable *rt = skb_rtable(skb); |
930 | struct net_device *dev = skb->dev; | 930 | struct net_device *dev = skb->dev; |
931 | struct in_device *in_dev; | 931 | struct in_device *in_dev; |
932 | struct in_ifaddr *ifa; | 932 | struct in_ifaddr *ifa; |
@@ -970,7 +970,7 @@ static void icmp_discard(struct sk_buff *skb) | |||
970 | int icmp_rcv(struct sk_buff *skb) | 970 | int icmp_rcv(struct sk_buff *skb) |
971 | { | 971 | { |
972 | struct icmphdr *icmph; | 972 | struct icmphdr *icmph; |
973 | struct rtable *rt = skb->rtable; | 973 | struct rtable *rt = skb_rtable(skb); |
974 | struct net *net = dev_net(rt->u.dst.dev); | 974 | struct net *net = dev_net(rt->u.dst.dev); |
975 | 975 | ||
976 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { | 976 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { |
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 9eb6219af615..01b4284ed694 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c | |||
@@ -311,7 +311,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) | |||
311 | return NULL; | 311 | return NULL; |
312 | } | 312 | } |
313 | 313 | ||
314 | skb->dst = &rt->u.dst; | 314 | skb_dst_set(skb, &rt->u.dst); |
315 | skb->dev = dev; | 315 | skb->dev = dev; |
316 | 316 | ||
317 | skb_reserve(skb, LL_RESERVED_SPACE(dev)); | 317 | skb_reserve(skb, LL_RESERVED_SPACE(dev)); |
@@ -659,7 +659,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc, | |||
659 | return -1; | 659 | return -1; |
660 | } | 660 | } |
661 | 661 | ||
662 | skb->dst = &rt->u.dst; | 662 | skb_dst_set(skb, &rt->u.dst); |
663 | 663 | ||
664 | skb_reserve(skb, LL_RESERVED_SPACE(dev)); | 664 | skb_reserve(skb, LL_RESERVED_SPACE(dev)); |
665 | 665 | ||
@@ -948,7 +948,7 @@ int igmp_rcv(struct sk_buff *skb) | |||
948 | case IGMPV2_HOST_MEMBERSHIP_REPORT: | 948 | case IGMPV2_HOST_MEMBERSHIP_REPORT: |
949 | case IGMPV3_HOST_MEMBERSHIP_REPORT: | 949 | case IGMPV3_HOST_MEMBERSHIP_REPORT: |
950 | /* Is it our report looped back? */ | 950 | /* Is it our report looped back? */ |
951 | if (skb->rtable->fl.iif == 0) | 951 | if (skb_rtable(skb)->fl.iif == 0) |
952 | break; | 952 | break; |
953 | /* don't rely on MC router hearing unicast reports */ | 953 | /* don't rely on MC router hearing unicast reports */ |
954 | if (skb->pkt_type == PACKET_MULTICAST || | 954 | if (skb->pkt_type == PACKET_MULTICAST || |
@@ -2196,7 +2196,7 @@ int ip_mc_sf_allow(struct sock *sk, __be32 loc_addr, __be32 rmt_addr, int dif) | |||
2196 | break; | 2196 | break; |
2197 | } | 2197 | } |
2198 | if (!pmc) | 2198 | if (!pmc) |
2199 | return 1; | 2199 | return inet->mc_all; |
2200 | psl = pmc->sflist; | 2200 | psl = pmc->sflist; |
2201 | if (!psl) | 2201 | if (!psl) |
2202 | return pmc->sfmode == MCAST_EXCLUDE; | 2202 | return pmc->sfmode == MCAST_EXCLUDE; |
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index df3fe50bbf0d..a2991bc8e32e 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c | |||
@@ -42,7 +42,7 @@ static int ip_forward_finish(struct sk_buff *skb) | |||
42 | { | 42 | { |
43 | struct ip_options * opt = &(IPCB(skb)->opt); | 43 | struct ip_options * opt = &(IPCB(skb)->opt); |
44 | 44 | ||
45 | IP_INC_STATS_BH(dev_net(skb->dst->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); | 45 | IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); |
46 | 46 | ||
47 | if (unlikely(opt->optlen)) | 47 | if (unlikely(opt->optlen)) |
48 | ip_forward_options(skb); | 48 | ip_forward_options(skb); |
@@ -81,7 +81,7 @@ int ip_forward(struct sk_buff *skb) | |||
81 | if (!xfrm4_route_forward(skb)) | 81 | if (!xfrm4_route_forward(skb)) |
82 | goto drop; | 82 | goto drop; |
83 | 83 | ||
84 | rt = skb->rtable; | 84 | rt = skb_rtable(skb); |
85 | 85 | ||
86 | if (opt->is_strictroute && rt->rt_dst != rt->rt_gateway) | 86 | if (opt->is_strictroute && rt->rt_dst != rt->rt_gateway) |
87 | goto sr_failed; | 87 | goto sr_failed; |
@@ -123,7 +123,7 @@ sr_failed: | |||
123 | 123 | ||
124 | too_many_hops: | 124 | too_many_hops: |
125 | /* Tell the sender its packet died... */ | 125 | /* Tell the sender its packet died... */ |
126 | IP_INC_STATS_BH(dev_net(skb->dst->dev), IPSTATS_MIB_INHDRERRORS); | 126 | IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_INHDRERRORS); |
127 | icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0); | 127 | icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0); |
128 | drop: | 128 | drop: |
129 | kfree_skb(skb); | 129 | kfree_skb(skb); |
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 7985346653bd..575f9bd51ccd 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c | |||
@@ -507,7 +507,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, | |||
507 | /* If the first fragment is fragmented itself, we split | 507 | /* If the first fragment is fragmented itself, we split |
508 | * it to two chunks: the first with data and paged part | 508 | * it to two chunks: the first with data and paged part |
509 | * and the second, holding only fragments. */ | 509 | * and the second, holding only fragments. */ |
510 | if (skb_shinfo(head)->frag_list) { | 510 | if (skb_has_frags(head)) { |
511 | struct sk_buff *clone; | 511 | struct sk_buff *clone; |
512 | int i, plen = 0; | 512 | int i, plen = 0; |
513 | 513 | ||
@@ -516,7 +516,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, | |||
516 | clone->next = head->next; | 516 | clone->next = head->next; |
517 | head->next = clone; | 517 | head->next = clone; |
518 | skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; | 518 | skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; |
519 | skb_shinfo(head)->frag_list = NULL; | 519 | skb_frag_list_init(head); |
520 | for (i=0; i<skb_shinfo(head)->nr_frags; i++) | 520 | for (i=0; i<skb_shinfo(head)->nr_frags; i++) |
521 | plen += skb_shinfo(head)->frags[i].size; | 521 | plen += skb_shinfo(head)->frags[i].size; |
522 | clone->len = clone->data_len = head->data_len - plen; | 522 | clone->len = clone->data_len = head->data_len - plen; |
@@ -573,7 +573,7 @@ int ip_defrag(struct sk_buff *skb, u32 user) | |||
573 | struct ipq *qp; | 573 | struct ipq *qp; |
574 | struct net *net; | 574 | struct net *net; |
575 | 575 | ||
576 | net = skb->dev ? dev_net(skb->dev) : dev_net(skb->dst->dev); | 576 | net = skb->dev ? dev_net(skb->dev) : dev_net(skb_dst(skb)->dev); |
577 | IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS); | 577 | IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS); |
578 | 578 | ||
579 | /* Start by cleaning up the memory. */ | 579 | /* Start by cleaning up the memory. */ |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index e62510d5ea5a..44e2a3d2359a 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -602,7 +602,7 @@ static int ipgre_rcv(struct sk_buff *skb) | |||
602 | #ifdef CONFIG_NET_IPGRE_BROADCAST | 602 | #ifdef CONFIG_NET_IPGRE_BROADCAST |
603 | if (ipv4_is_multicast(iph->daddr)) { | 603 | if (ipv4_is_multicast(iph->daddr)) { |
604 | /* Looped back packet, drop it! */ | 604 | /* Looped back packet, drop it! */ |
605 | if (skb->rtable->fl.iif == 0) | 605 | if (skb_rtable(skb)->fl.iif == 0) |
606 | goto drop; | 606 | goto drop; |
607 | stats->multicast++; | 607 | stats->multicast++; |
608 | skb->pkt_type = PACKET_BROADCAST; | 608 | skb->pkt_type = PACKET_BROADCAST; |
@@ -643,8 +643,7 @@ static int ipgre_rcv(struct sk_buff *skb) | |||
643 | stats->rx_packets++; | 643 | stats->rx_packets++; |
644 | stats->rx_bytes += len; | 644 | stats->rx_bytes += len; |
645 | skb->dev = tunnel->dev; | 645 | skb->dev = tunnel->dev; |
646 | dst_release(skb->dst); | 646 | skb_dst_drop(skb); |
647 | skb->dst = NULL; | ||
648 | nf_reset(skb); | 647 | nf_reset(skb); |
649 | 648 | ||
650 | skb_reset_network_header(skb); | 649 | skb_reset_network_header(skb); |
@@ -698,13 +697,13 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
698 | if ((dst = tiph->daddr) == 0) { | 697 | if ((dst = tiph->daddr) == 0) { |
699 | /* NBMA tunnel */ | 698 | /* NBMA tunnel */ |
700 | 699 | ||
701 | if (skb->dst == NULL) { | 700 | if (skb_dst(skb) == NULL) { |
702 | stats->tx_fifo_errors++; | 701 | stats->tx_fifo_errors++; |
703 | goto tx_error; | 702 | goto tx_error; |
704 | } | 703 | } |
705 | 704 | ||
706 | if (skb->protocol == htons(ETH_P_IP)) { | 705 | if (skb->protocol == htons(ETH_P_IP)) { |
707 | rt = skb->rtable; | 706 | rt = skb_rtable(skb); |
708 | if ((dst = rt->rt_gateway) == 0) | 707 | if ((dst = rt->rt_gateway) == 0) |
709 | goto tx_error_icmp; | 708 | goto tx_error_icmp; |
710 | } | 709 | } |
@@ -712,7 +711,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
712 | else if (skb->protocol == htons(ETH_P_IPV6)) { | 711 | else if (skb->protocol == htons(ETH_P_IPV6)) { |
713 | struct in6_addr *addr6; | 712 | struct in6_addr *addr6; |
714 | int addr_type; | 713 | int addr_type; |
715 | struct neighbour *neigh = skb->dst->neighbour; | 714 | struct neighbour *neigh = skb_dst(skb)->neighbour; |
716 | 715 | ||
717 | if (neigh == NULL) | 716 | if (neigh == NULL) |
718 | goto tx_error; | 717 | goto tx_error; |
@@ -766,10 +765,10 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
766 | if (df) | 765 | if (df) |
767 | mtu = dst_mtu(&rt->u.dst) - dev->hard_header_len - tunnel->hlen; | 766 | mtu = dst_mtu(&rt->u.dst) - dev->hard_header_len - tunnel->hlen; |
768 | else | 767 | else |
769 | mtu = skb->dst ? dst_mtu(skb->dst) : dev->mtu; | 768 | mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu; |
770 | 769 | ||
771 | if (skb->dst) | 770 | if (skb_dst(skb)) |
772 | skb->dst->ops->update_pmtu(skb->dst, mtu); | 771 | skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu); |
773 | 772 | ||
774 | if (skb->protocol == htons(ETH_P_IP)) { | 773 | if (skb->protocol == htons(ETH_P_IP)) { |
775 | df |= (old_iph->frag_off&htons(IP_DF)); | 774 | df |= (old_iph->frag_off&htons(IP_DF)); |
@@ -783,14 +782,14 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
783 | } | 782 | } |
784 | #ifdef CONFIG_IPV6 | 783 | #ifdef CONFIG_IPV6 |
785 | else if (skb->protocol == htons(ETH_P_IPV6)) { | 784 | else if (skb->protocol == htons(ETH_P_IPV6)) { |
786 | struct rt6_info *rt6 = (struct rt6_info *)skb->dst; | 785 | struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb); |
787 | 786 | ||
788 | if (rt6 && mtu < dst_mtu(skb->dst) && mtu >= IPV6_MIN_MTU) { | 787 | if (rt6 && mtu < dst_mtu(skb_dst(skb)) && mtu >= IPV6_MIN_MTU) { |
789 | if ((tunnel->parms.iph.daddr && | 788 | if ((tunnel->parms.iph.daddr && |
790 | !ipv4_is_multicast(tunnel->parms.iph.daddr)) || | 789 | !ipv4_is_multicast(tunnel->parms.iph.daddr)) || |
791 | rt6->rt6i_dst.plen == 128) { | 790 | rt6->rt6i_dst.plen == 128) { |
792 | rt6->rt6i_flags |= RTF_MODIFIED; | 791 | rt6->rt6i_flags |= RTF_MODIFIED; |
793 | skb->dst->metrics[RTAX_MTU-1] = mtu; | 792 | skb_dst(skb)->metrics[RTAX_MTU-1] = mtu; |
794 | } | 793 | } |
795 | } | 794 | } |
796 | 795 | ||
@@ -837,8 +836,8 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
837 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); | 836 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); |
838 | IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | | 837 | IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | |
839 | IPSKB_REROUTED); | 838 | IPSKB_REROUTED); |
840 | dst_release(skb->dst); | 839 | skb_dst_drop(skb); |
841 | skb->dst = &rt->u.dst; | 840 | skb_dst_set(skb, &rt->u.dst); |
842 | 841 | ||
843 | /* | 842 | /* |
844 | * Push down and install the IPIP header. | 843 | * Push down and install the IPIP header. |
@@ -1238,6 +1237,7 @@ static void ipgre_tunnel_setup(struct net_device *dev) | |||
1238 | dev->iflink = 0; | 1237 | dev->iflink = 0; |
1239 | dev->addr_len = 4; | 1238 | dev->addr_len = 4; |
1240 | dev->features |= NETIF_F_NETNS_LOCAL; | 1239 | dev->features |= NETIF_F_NETNS_LOCAL; |
1240 | dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; | ||
1241 | } | 1241 | } |
1242 | 1242 | ||
1243 | static int ipgre_tunnel_init(struct net_device *dev) | 1243 | static int ipgre_tunnel_init(struct net_device *dev) |
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 40f6206b2aa9..490ce20faf38 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c | |||
@@ -329,7 +329,7 @@ static int ip_rcv_finish(struct sk_buff *skb) | |||
329 | * Initialise the virtual path cache for the packet. It describes | 329 | * Initialise the virtual path cache for the packet. It describes |
330 | * how the packet travels inside Linux networking. | 330 | * how the packet travels inside Linux networking. |
331 | */ | 331 | */ |
332 | if (skb->dst == NULL) { | 332 | if (skb_dst(skb) == NULL) { |
333 | int err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, | 333 | int err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, |
334 | skb->dev); | 334 | skb->dev); |
335 | if (unlikely(err)) { | 335 | if (unlikely(err)) { |
@@ -344,9 +344,9 @@ static int ip_rcv_finish(struct sk_buff *skb) | |||
344 | } | 344 | } |
345 | 345 | ||
346 | #ifdef CONFIG_NET_CLS_ROUTE | 346 | #ifdef CONFIG_NET_CLS_ROUTE |
347 | if (unlikely(skb->dst->tclassid)) { | 347 | if (unlikely(skb_dst(skb)->tclassid)) { |
348 | struct ip_rt_acct *st = per_cpu_ptr(ip_rt_acct, smp_processor_id()); | 348 | struct ip_rt_acct *st = per_cpu_ptr(ip_rt_acct, smp_processor_id()); |
349 | u32 idx = skb->dst->tclassid; | 349 | u32 idx = skb_dst(skb)->tclassid; |
350 | st[idx&0xFF].o_packets++; | 350 | st[idx&0xFF].o_packets++; |
351 | st[idx&0xFF].o_bytes += skb->len; | 351 | st[idx&0xFF].o_bytes += skb->len; |
352 | st[(idx>>16)&0xFF].i_packets++; | 352 | st[(idx>>16)&0xFF].i_packets++; |
@@ -357,7 +357,7 @@ static int ip_rcv_finish(struct sk_buff *skb) | |||
357 | if (iph->ihl > 5 && ip_rcv_options(skb)) | 357 | if (iph->ihl > 5 && ip_rcv_options(skb)) |
358 | goto drop; | 358 | goto drop; |
359 | 359 | ||
360 | rt = skb->rtable; | 360 | rt = skb_rtable(skb); |
361 | if (rt->rt_type == RTN_MULTICAST) { | 361 | if (rt->rt_type == RTN_MULTICAST) { |
362 | IP_UPD_PO_STATS_BH(dev_net(rt->u.dst.dev), IPSTATS_MIB_INMCAST, | 362 | IP_UPD_PO_STATS_BH(dev_net(rt->u.dst.dev), IPSTATS_MIB_INMCAST, |
363 | skb->len); | 363 | skb->len); |
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index 2c88da6e7862..94bf105ef3c9 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c | |||
@@ -102,7 +102,7 @@ int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb) | |||
102 | sptr = skb_network_header(skb); | 102 | sptr = skb_network_header(skb); |
103 | dptr = dopt->__data; | 103 | dptr = dopt->__data; |
104 | 104 | ||
105 | daddr = skb->rtable->rt_spec_dst; | 105 | daddr = skb_rtable(skb)->rt_spec_dst; |
106 | 106 | ||
107 | if (sopt->rr) { | 107 | if (sopt->rr) { |
108 | optlen = sptr[sopt->rr+1]; | 108 | optlen = sptr[sopt->rr+1]; |
@@ -143,7 +143,7 @@ int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb) | |||
143 | __be32 addr; | 143 | __be32 addr; |
144 | 144 | ||
145 | memcpy(&addr, sptr+soffset-1, 4); | 145 | memcpy(&addr, sptr+soffset-1, 4); |
146 | if (inet_addr_type(dev_net(skb->dst->dev), addr) != RTN_LOCAL) { | 146 | if (inet_addr_type(dev_net(skb_dst(skb)->dev), addr) != RTN_LOCAL) { |
147 | dopt->ts_needtime = 1; | 147 | dopt->ts_needtime = 1; |
148 | soffset += 8; | 148 | soffset += 8; |
149 | } | 149 | } |
@@ -257,7 +257,7 @@ int ip_options_compile(struct net *net, | |||
257 | struct rtable *rt = NULL; | 257 | struct rtable *rt = NULL; |
258 | 258 | ||
259 | if (skb != NULL) { | 259 | if (skb != NULL) { |
260 | rt = skb->rtable; | 260 | rt = skb_rtable(skb); |
261 | optptr = (unsigned char *)&(ip_hdr(skb)[1]); | 261 | optptr = (unsigned char *)&(ip_hdr(skb)[1]); |
262 | } else | 262 | } else |
263 | optptr = opt->__data; | 263 | optptr = opt->__data; |
@@ -550,7 +550,7 @@ void ip_forward_options(struct sk_buff *skb) | |||
550 | { | 550 | { |
551 | struct ip_options * opt = &(IPCB(skb)->opt); | 551 | struct ip_options * opt = &(IPCB(skb)->opt); |
552 | unsigned char * optptr; | 552 | unsigned char * optptr; |
553 | struct rtable *rt = skb->rtable; | 553 | struct rtable *rt = skb_rtable(skb); |
554 | unsigned char *raw = skb_network_header(skb); | 554 | unsigned char *raw = skb_network_header(skb); |
555 | 555 | ||
556 | if (opt->rr_needaddr) { | 556 | if (opt->rr_needaddr) { |
@@ -598,7 +598,7 @@ int ip_options_rcv_srr(struct sk_buff *skb) | |||
598 | __be32 nexthop; | 598 | __be32 nexthop; |
599 | struct iphdr *iph = ip_hdr(skb); | 599 | struct iphdr *iph = ip_hdr(skb); |
600 | unsigned char *optptr = skb_network_header(skb) + opt->srr; | 600 | unsigned char *optptr = skb_network_header(skb) + opt->srr; |
601 | struct rtable *rt = skb->rtable; | 601 | struct rtable *rt = skb_rtable(skb); |
602 | struct rtable *rt2; | 602 | struct rtable *rt2; |
603 | int err; | 603 | int err; |
604 | 604 | ||
@@ -623,13 +623,13 @@ int ip_options_rcv_srr(struct sk_buff *skb) | |||
623 | } | 623 | } |
624 | memcpy(&nexthop, &optptr[srrptr-1], 4); | 624 | memcpy(&nexthop, &optptr[srrptr-1], 4); |
625 | 625 | ||
626 | rt = skb->rtable; | 626 | rt = skb_rtable(skb); |
627 | skb->rtable = NULL; | 627 | skb_dst_set(skb, NULL); |
628 | err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, skb->dev); | 628 | err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, skb->dev); |
629 | rt2 = skb->rtable; | 629 | rt2 = skb_rtable(skb); |
630 | if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) { | 630 | if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) { |
631 | ip_rt_put(rt2); | 631 | ip_rt_put(rt2); |
632 | skb->rtable = rt; | 632 | skb_dst_set(skb, &rt->u.dst); |
633 | return -EINVAL; | 633 | return -EINVAL; |
634 | } | 634 | } |
635 | ip_rt_put(rt); | 635 | ip_rt_put(rt); |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index ea19c37ccc0c..247026282669 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -95,7 +95,7 @@ int __ip_local_out(struct sk_buff *skb) | |||
95 | 95 | ||
96 | iph->tot_len = htons(skb->len); | 96 | iph->tot_len = htons(skb->len); |
97 | ip_send_check(iph); | 97 | ip_send_check(iph); |
98 | return nf_hook(PF_INET, NF_INET_LOCAL_OUT, skb, NULL, skb->dst->dev, | 98 | return nf_hook(PF_INET, NF_INET_LOCAL_OUT, skb, NULL, skb_dst(skb)->dev, |
99 | dst_output); | 99 | dst_output); |
100 | } | 100 | } |
101 | 101 | ||
@@ -118,7 +118,7 @@ static int ip_dev_loopback_xmit(struct sk_buff *newskb) | |||
118 | __skb_pull(newskb, skb_network_offset(newskb)); | 118 | __skb_pull(newskb, skb_network_offset(newskb)); |
119 | newskb->pkt_type = PACKET_LOOPBACK; | 119 | newskb->pkt_type = PACKET_LOOPBACK; |
120 | newskb->ip_summed = CHECKSUM_UNNECESSARY; | 120 | newskb->ip_summed = CHECKSUM_UNNECESSARY; |
121 | WARN_ON(!newskb->dst); | 121 | WARN_ON(!skb_dst(newskb)); |
122 | netif_rx(newskb); | 122 | netif_rx(newskb); |
123 | return 0; | 123 | return 0; |
124 | } | 124 | } |
@@ -140,7 +140,7 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk, | |||
140 | __be32 saddr, __be32 daddr, struct ip_options *opt) | 140 | __be32 saddr, __be32 daddr, struct ip_options *opt) |
141 | { | 141 | { |
142 | struct inet_sock *inet = inet_sk(sk); | 142 | struct inet_sock *inet = inet_sk(sk); |
143 | struct rtable *rt = skb->rtable; | 143 | struct rtable *rt = skb_rtable(skb); |
144 | struct iphdr *iph; | 144 | struct iphdr *iph; |
145 | 145 | ||
146 | /* Build the IP header. */ | 146 | /* Build the IP header. */ |
@@ -176,7 +176,7 @@ EXPORT_SYMBOL_GPL(ip_build_and_send_pkt); | |||
176 | 176 | ||
177 | static inline int ip_finish_output2(struct sk_buff *skb) | 177 | static inline int ip_finish_output2(struct sk_buff *skb) |
178 | { | 178 | { |
179 | struct dst_entry *dst = skb->dst; | 179 | struct dst_entry *dst = skb_dst(skb); |
180 | struct rtable *rt = (struct rtable *)dst; | 180 | struct rtable *rt = (struct rtable *)dst; |
181 | struct net_device *dev = dst->dev; | 181 | struct net_device *dev = dst->dev; |
182 | unsigned int hh_len = LL_RESERVED_SPACE(dev); | 182 | unsigned int hh_len = LL_RESERVED_SPACE(dev); |
@@ -217,14 +217,14 @@ static inline int ip_skb_dst_mtu(struct sk_buff *skb) | |||
217 | struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL; | 217 | struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL; |
218 | 218 | ||
219 | return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ? | 219 | return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ? |
220 | skb->dst->dev->mtu : dst_mtu(skb->dst); | 220 | skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb)); |
221 | } | 221 | } |
222 | 222 | ||
223 | static int ip_finish_output(struct sk_buff *skb) | 223 | static int ip_finish_output(struct sk_buff *skb) |
224 | { | 224 | { |
225 | #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM) | 225 | #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM) |
226 | /* Policy lookup after SNAT yielded a new policy */ | 226 | /* Policy lookup after SNAT yielded a new policy */ |
227 | if (skb->dst->xfrm != NULL) { | 227 | if (skb_dst(skb)->xfrm != NULL) { |
228 | IPCB(skb)->flags |= IPSKB_REROUTED; | 228 | IPCB(skb)->flags |= IPSKB_REROUTED; |
229 | return dst_output(skb); | 229 | return dst_output(skb); |
230 | } | 230 | } |
@@ -238,7 +238,7 @@ static int ip_finish_output(struct sk_buff *skb) | |||
238 | int ip_mc_output(struct sk_buff *skb) | 238 | int ip_mc_output(struct sk_buff *skb) |
239 | { | 239 | { |
240 | struct sock *sk = skb->sk; | 240 | struct sock *sk = skb->sk; |
241 | struct rtable *rt = skb->rtable; | 241 | struct rtable *rt = skb_rtable(skb); |
242 | struct net_device *dev = rt->u.dst.dev; | 242 | struct net_device *dev = rt->u.dst.dev; |
243 | 243 | ||
244 | /* | 244 | /* |
@@ -296,7 +296,7 @@ int ip_mc_output(struct sk_buff *skb) | |||
296 | 296 | ||
297 | int ip_output(struct sk_buff *skb) | 297 | int ip_output(struct sk_buff *skb) |
298 | { | 298 | { |
299 | struct net_device *dev = skb->dst->dev; | 299 | struct net_device *dev = skb_dst(skb)->dev; |
300 | 300 | ||
301 | IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len); | 301 | IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len); |
302 | 302 | ||
@@ -319,7 +319,7 @@ int ip_queue_xmit(struct sk_buff *skb, int ipfragok) | |||
319 | /* Skip all of this if the packet is already routed, | 319 | /* Skip all of this if the packet is already routed, |
320 | * f.e. by something like SCTP. | 320 | * f.e. by something like SCTP. |
321 | */ | 321 | */ |
322 | rt = skb->rtable; | 322 | rt = skb_rtable(skb); |
323 | if (rt != NULL) | 323 | if (rt != NULL) |
324 | goto packet_routed; | 324 | goto packet_routed; |
325 | 325 | ||
@@ -355,7 +355,7 @@ int ip_queue_xmit(struct sk_buff *skb, int ipfragok) | |||
355 | } | 355 | } |
356 | sk_setup_caps(sk, &rt->u.dst); | 356 | sk_setup_caps(sk, &rt->u.dst); |
357 | } | 357 | } |
358 | skb->dst = dst_clone(&rt->u.dst); | 358 | skb_dst_set(skb, dst_clone(&rt->u.dst)); |
359 | 359 | ||
360 | packet_routed: | 360 | packet_routed: |
361 | if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) | 361 | if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) |
@@ -401,8 +401,8 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from) | |||
401 | to->pkt_type = from->pkt_type; | 401 | to->pkt_type = from->pkt_type; |
402 | to->priority = from->priority; | 402 | to->priority = from->priority; |
403 | to->protocol = from->protocol; | 403 | to->protocol = from->protocol; |
404 | dst_release(to->dst); | 404 | skb_dst_drop(to); |
405 | to->dst = dst_clone(from->dst); | 405 | skb_dst_set(to, dst_clone(skb_dst(from))); |
406 | to->dev = from->dev; | 406 | to->dev = from->dev; |
407 | to->mark = from->mark; | 407 | to->mark = from->mark; |
408 | 408 | ||
@@ -440,7 +440,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
440 | unsigned int mtu, hlen, left, len, ll_rs, pad; | 440 | unsigned int mtu, hlen, left, len, ll_rs, pad; |
441 | int offset; | 441 | int offset; |
442 | __be16 not_last_frag; | 442 | __be16 not_last_frag; |
443 | struct rtable *rt = skb->rtable; | 443 | struct rtable *rt = skb_rtable(skb); |
444 | int err = 0; | 444 | int err = 0; |
445 | 445 | ||
446 | dev = rt->u.dst.dev; | 446 | dev = rt->u.dst.dev; |
@@ -474,7 +474,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
474 | * LATER: this step can be merged to real generation of fragments, | 474 | * LATER: this step can be merged to real generation of fragments, |
475 | * we can switch to copy when see the first bad fragment. | 475 | * we can switch to copy when see the first bad fragment. |
476 | */ | 476 | */ |
477 | if (skb_shinfo(skb)->frag_list) { | 477 | if (skb_has_frags(skb)) { |
478 | struct sk_buff *frag; | 478 | struct sk_buff *frag; |
479 | int first_len = skb_pagelen(skb); | 479 | int first_len = skb_pagelen(skb); |
480 | int truesizes = 0; | 480 | int truesizes = 0; |
@@ -485,7 +485,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
485 | skb_cloned(skb)) | 485 | skb_cloned(skb)) |
486 | goto slow_path; | 486 | goto slow_path; |
487 | 487 | ||
488 | for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) { | 488 | skb_walk_frags(skb, frag) { |
489 | /* Correct geometry. */ | 489 | /* Correct geometry. */ |
490 | if (frag->len > mtu || | 490 | if (frag->len > mtu || |
491 | ((frag->len & 7) && frag->next) || | 491 | ((frag->len & 7) && frag->next) || |
@@ -498,7 +498,6 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
498 | 498 | ||
499 | BUG_ON(frag->sk); | 499 | BUG_ON(frag->sk); |
500 | if (skb->sk) { | 500 | if (skb->sk) { |
501 | sock_hold(skb->sk); | ||
502 | frag->sk = skb->sk; | 501 | frag->sk = skb->sk; |
503 | frag->destructor = sock_wfree; | 502 | frag->destructor = sock_wfree; |
504 | truesizes += frag->truesize; | 503 | truesizes += frag->truesize; |
@@ -510,7 +509,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
510 | err = 0; | 509 | err = 0; |
511 | offset = 0; | 510 | offset = 0; |
512 | frag = skb_shinfo(skb)->frag_list; | 511 | frag = skb_shinfo(skb)->frag_list; |
513 | skb_shinfo(skb)->frag_list = NULL; | 512 | skb_frag_list_init(skb); |
514 | skb->data_len = first_len - skb_headlen(skb); | 513 | skb->data_len = first_len - skb_headlen(skb); |
515 | skb->truesize -= truesizes; | 514 | skb->truesize -= truesizes; |
516 | skb->len = first_len; | 515 | skb->len = first_len; |
@@ -1294,7 +1293,7 @@ int ip_push_pending_frames(struct sock *sk) | |||
1294 | * on dst refcount | 1293 | * on dst refcount |
1295 | */ | 1294 | */ |
1296 | inet->cork.dst = NULL; | 1295 | inet->cork.dst = NULL; |
1297 | skb->dst = &rt->u.dst; | 1296 | skb_dst_set(skb, &rt->u.dst); |
1298 | 1297 | ||
1299 | if (iph->protocol == IPPROTO_ICMP) | 1298 | if (iph->protocol == IPPROTO_ICMP) |
1300 | icmp_out_count(net, ((struct icmphdr *) | 1299 | icmp_out_count(net, ((struct icmphdr *) |
@@ -1362,7 +1361,7 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar | |||
1362 | } replyopts; | 1361 | } replyopts; |
1363 | struct ipcm_cookie ipc; | 1362 | struct ipcm_cookie ipc; |
1364 | __be32 daddr; | 1363 | __be32 daddr; |
1365 | struct rtable *rt = skb->rtable; | 1364 | struct rtable *rt = skb_rtable(skb); |
1366 | 1365 | ||
1367 | if (ip_options_echo(&replyopts.opt, skb)) | 1366 | if (ip_options_echo(&replyopts.opt, skb)) |
1368 | return; | 1367 | return; |
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 43c05854d752..fc7993e9061f 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c | |||
@@ -57,7 +57,7 @@ | |||
57 | static void ip_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb) | 57 | static void ip_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb) |
58 | { | 58 | { |
59 | struct in_pktinfo info; | 59 | struct in_pktinfo info; |
60 | struct rtable *rt = skb->rtable; | 60 | struct rtable *rt = skb_rtable(skb); |
61 | 61 | ||
62 | info.ipi_addr.s_addr = ip_hdr(skb)->daddr; | 62 | info.ipi_addr.s_addr = ip_hdr(skb)->daddr; |
63 | if (rt) { | 63 | if (rt) { |
@@ -157,38 +157,39 @@ void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb) | |||
157 | /* Ordered by supposed usage frequency */ | 157 | /* Ordered by supposed usage frequency */ |
158 | if (flags & 1) | 158 | if (flags & 1) |
159 | ip_cmsg_recv_pktinfo(msg, skb); | 159 | ip_cmsg_recv_pktinfo(msg, skb); |
160 | if ((flags>>=1) == 0) | 160 | if ((flags >>= 1) == 0) |
161 | return; | 161 | return; |
162 | 162 | ||
163 | if (flags & 1) | 163 | if (flags & 1) |
164 | ip_cmsg_recv_ttl(msg, skb); | 164 | ip_cmsg_recv_ttl(msg, skb); |
165 | if ((flags>>=1) == 0) | 165 | if ((flags >>= 1) == 0) |
166 | return; | 166 | return; |
167 | 167 | ||
168 | if (flags & 1) | 168 | if (flags & 1) |
169 | ip_cmsg_recv_tos(msg, skb); | 169 | ip_cmsg_recv_tos(msg, skb); |
170 | if ((flags>>=1) == 0) | 170 | if ((flags >>= 1) == 0) |
171 | return; | 171 | return; |
172 | 172 | ||
173 | if (flags & 1) | 173 | if (flags & 1) |
174 | ip_cmsg_recv_opts(msg, skb); | 174 | ip_cmsg_recv_opts(msg, skb); |
175 | if ((flags>>=1) == 0) | 175 | if ((flags >>= 1) == 0) |
176 | return; | 176 | return; |
177 | 177 | ||
178 | if (flags & 1) | 178 | if (flags & 1) |
179 | ip_cmsg_recv_retopts(msg, skb); | 179 | ip_cmsg_recv_retopts(msg, skb); |
180 | if ((flags>>=1) == 0) | 180 | if ((flags >>= 1) == 0) |
181 | return; | 181 | return; |
182 | 182 | ||
183 | if (flags & 1) | 183 | if (flags & 1) |
184 | ip_cmsg_recv_security(msg, skb); | 184 | ip_cmsg_recv_security(msg, skb); |
185 | 185 | ||
186 | if ((flags>>=1) == 0) | 186 | if ((flags >>= 1) == 0) |
187 | return; | 187 | return; |
188 | if (flags & 1) | 188 | if (flags & 1) |
189 | ip_cmsg_recv_dstaddr(msg, skb); | 189 | ip_cmsg_recv_dstaddr(msg, skb); |
190 | 190 | ||
191 | } | 191 | } |
192 | EXPORT_SYMBOL(ip_cmsg_recv); | ||
192 | 193 | ||
193 | int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc) | 194 | int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc) |
194 | { | 195 | { |
@@ -203,7 +204,8 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc) | |||
203 | switch (cmsg->cmsg_type) { | 204 | switch (cmsg->cmsg_type) { |
204 | case IP_RETOPTS: | 205 | case IP_RETOPTS: |
205 | err = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr)); | 206 | err = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr)); |
206 | err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg), err < 40 ? err : 40); | 207 | err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg), |
208 | err < 40 ? err : 40); | ||
207 | if (err) | 209 | if (err) |
208 | return err; | 210 | return err; |
209 | break; | 211 | break; |
@@ -238,7 +240,8 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc) | |||
238 | struct ip_ra_chain *ip_ra_chain; | 240 | struct ip_ra_chain *ip_ra_chain; |
239 | DEFINE_RWLOCK(ip_ra_lock); | 241 | DEFINE_RWLOCK(ip_ra_lock); |
240 | 242 | ||
241 | int ip_ra_control(struct sock *sk, unsigned char on, void (*destructor)(struct sock *)) | 243 | int ip_ra_control(struct sock *sk, unsigned char on, |
244 | void (*destructor)(struct sock *)) | ||
242 | { | 245 | { |
243 | struct ip_ra_chain *ra, *new_ra, **rap; | 246 | struct ip_ra_chain *ra, *new_ra, **rap; |
244 | 247 | ||
@@ -248,7 +251,7 @@ int ip_ra_control(struct sock *sk, unsigned char on, void (*destructor)(struct s | |||
248 | new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL; | 251 | new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL; |
249 | 252 | ||
250 | write_lock_bh(&ip_ra_lock); | 253 | write_lock_bh(&ip_ra_lock); |
251 | for (rap = &ip_ra_chain; (ra=*rap) != NULL; rap = &ra->next) { | 254 | for (rap = &ip_ra_chain; (ra = *rap) != NULL; rap = &ra->next) { |
252 | if (ra->sk == sk) { | 255 | if (ra->sk == sk) { |
253 | if (on) { | 256 | if (on) { |
254 | write_unlock_bh(&ip_ra_lock); | 257 | write_unlock_bh(&ip_ra_lock); |
@@ -416,7 +419,8 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len) | |||
416 | /* Reset and regenerate socket error */ | 419 | /* Reset and regenerate socket error */ |
417 | spin_lock_bh(&sk->sk_error_queue.lock); | 420 | spin_lock_bh(&sk->sk_error_queue.lock); |
418 | sk->sk_err = 0; | 421 | sk->sk_err = 0; |
419 | if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) { | 422 | skb2 = skb_peek(&sk->sk_error_queue); |
423 | if (skb2 != NULL) { | ||
420 | sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno; | 424 | sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno; |
421 | spin_unlock_bh(&sk->sk_error_queue.lock); | 425 | spin_unlock_bh(&sk->sk_error_queue.lock); |
422 | sk->sk_error_report(sk); | 426 | sk->sk_error_report(sk); |
@@ -431,8 +435,8 @@ out: | |||
431 | 435 | ||
432 | 436 | ||
433 | /* | 437 | /* |
434 | * Socket option code for IP. This is the end of the line after any TCP,UDP etc options on | 438 | * Socket option code for IP. This is the end of the line after any |
435 | * an IP socket. | 439 | * TCP,UDP etc options on an IP socket. |
436 | */ | 440 | */ |
437 | 441 | ||
438 | static int do_ip_setsockopt(struct sock *sk, int level, | 442 | static int do_ip_setsockopt(struct sock *sk, int level, |
@@ -449,6 +453,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
449 | (1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) | | 453 | (1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) | |
450 | (1<<IP_PASSSEC) | (1<<IP_TRANSPARENT))) || | 454 | (1<<IP_PASSSEC) | (1<<IP_TRANSPARENT))) || |
451 | optname == IP_MULTICAST_TTL || | 455 | optname == IP_MULTICAST_TTL || |
456 | optname == IP_MULTICAST_ALL || | ||
452 | optname == IP_MULTICAST_LOOP || | 457 | optname == IP_MULTICAST_LOOP || |
453 | optname == IP_RECVORIGDSTADDR) { | 458 | optname == IP_RECVORIGDSTADDR) { |
454 | if (optlen >= sizeof(int)) { | 459 | if (optlen >= sizeof(int)) { |
@@ -474,7 +479,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
474 | switch (optname) { | 479 | switch (optname) { |
475 | case IP_OPTIONS: | 480 | case IP_OPTIONS: |
476 | { | 481 | { |
477 | struct ip_options * opt = NULL; | 482 | struct ip_options *opt = NULL; |
478 | if (optlen > 40 || optlen < 0) | 483 | if (optlen > 40 || optlen < 0) |
479 | goto e_inval; | 484 | goto e_inval; |
480 | err = ip_options_get_from_user(sock_net(sk), &opt, | 485 | err = ip_options_get_from_user(sock_net(sk), &opt, |
@@ -556,9 +561,9 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
556 | } | 561 | } |
557 | break; | 562 | break; |
558 | case IP_TTL: | 563 | case IP_TTL: |
559 | if (optlen<1) | 564 | if (optlen < 1) |
560 | goto e_inval; | 565 | goto e_inval; |
561 | if (val != -1 && (val < 1 || val>255)) | 566 | if (val != -1 && (val < 0 || val > 255)) |
562 | goto e_inval; | 567 | goto e_inval; |
563 | inet->uc_ttl = val; | 568 | inet->uc_ttl = val; |
564 | break; | 569 | break; |
@@ -570,7 +575,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
570 | inet->hdrincl = val ? 1 : 0; | 575 | inet->hdrincl = val ? 1 : 0; |
571 | break; | 576 | break; |
572 | case IP_MTU_DISCOVER: | 577 | case IP_MTU_DISCOVER: |
573 | if (val<0 || val>3) | 578 | if (val < 0 || val > 3) |
574 | goto e_inval; | 579 | goto e_inval; |
575 | inet->pmtudisc = val; | 580 | inet->pmtudisc = val; |
576 | break; | 581 | break; |
@@ -582,7 +587,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
582 | case IP_MULTICAST_TTL: | 587 | case IP_MULTICAST_TTL: |
583 | if (sk->sk_type == SOCK_STREAM) | 588 | if (sk->sk_type == SOCK_STREAM) |
584 | goto e_inval; | 589 | goto e_inval; |
585 | if (optlen<1) | 590 | if (optlen < 1) |
586 | goto e_inval; | 591 | goto e_inval; |
587 | if (val == -1) | 592 | if (val == -1) |
588 | val = 1; | 593 | val = 1; |
@@ -591,7 +596,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
591 | inet->mc_ttl = val; | 596 | inet->mc_ttl = val; |
592 | break; | 597 | break; |
593 | case IP_MULTICAST_LOOP: | 598 | case IP_MULTICAST_LOOP: |
594 | if (optlen<1) | 599 | if (optlen < 1) |
595 | goto e_inval; | 600 | goto e_inval; |
596 | inet->mc_loop = !!val; | 601 | inet->mc_loop = !!val; |
597 | break; | 602 | break; |
@@ -613,7 +618,8 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
613 | } else { | 618 | } else { |
614 | memset(&mreq, 0, sizeof(mreq)); | 619 | memset(&mreq, 0, sizeof(mreq)); |
615 | if (optlen >= sizeof(struct in_addr) && | 620 | if (optlen >= sizeof(struct in_addr) && |
616 | copy_from_user(&mreq.imr_address, optval, sizeof(struct in_addr))) | 621 | copy_from_user(&mreq.imr_address, optval, |
622 | sizeof(struct in_addr))) | ||
617 | break; | 623 | break; |
618 | } | 624 | } |
619 | 625 | ||
@@ -677,7 +683,6 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
677 | } | 683 | } |
678 | case IP_MSFILTER: | 684 | case IP_MSFILTER: |
679 | { | 685 | { |
680 | extern int sysctl_igmp_max_msf; | ||
681 | struct ip_msfilter *msf; | 686 | struct ip_msfilter *msf; |
682 | 687 | ||
683 | if (optlen < IP_MSFILTER_SIZE(0)) | 688 | if (optlen < IP_MSFILTER_SIZE(0)) |
@@ -831,7 +836,6 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
831 | } | 836 | } |
832 | case MCAST_MSFILTER: | 837 | case MCAST_MSFILTER: |
833 | { | 838 | { |
834 | extern int sysctl_igmp_max_msf; | ||
835 | struct sockaddr_in *psin; | 839 | struct sockaddr_in *psin; |
836 | struct ip_msfilter *msf = NULL; | 840 | struct ip_msfilter *msf = NULL; |
837 | struct group_filter *gsf = NULL; | 841 | struct group_filter *gsf = NULL; |
@@ -849,9 +853,9 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
849 | break; | 853 | break; |
850 | } | 854 | } |
851 | err = -EFAULT; | 855 | err = -EFAULT; |
852 | if (copy_from_user(gsf, optval, optlen)) { | 856 | if (copy_from_user(gsf, optval, optlen)) |
853 | goto mc_msf_out; | 857 | goto mc_msf_out; |
854 | } | 858 | |
855 | /* numsrc >= (4G-140)/128 overflow in 32 bits */ | 859 | /* numsrc >= (4G-140)/128 overflow in 32 bits */ |
856 | if (gsf->gf_numsrc >= 0x1ffffff || | 860 | if (gsf->gf_numsrc >= 0x1ffffff || |
857 | gsf->gf_numsrc > sysctl_igmp_max_msf) { | 861 | gsf->gf_numsrc > sysctl_igmp_max_msf) { |
@@ -879,7 +883,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
879 | msf->imsf_fmode = gsf->gf_fmode; | 883 | msf->imsf_fmode = gsf->gf_fmode; |
880 | msf->imsf_numsrc = gsf->gf_numsrc; | 884 | msf->imsf_numsrc = gsf->gf_numsrc; |
881 | err = -EADDRNOTAVAIL; | 885 | err = -EADDRNOTAVAIL; |
882 | for (i=0; i<gsf->gf_numsrc; ++i) { | 886 | for (i = 0; i < gsf->gf_numsrc; ++i) { |
883 | psin = (struct sockaddr_in *)&gsf->gf_slist[i]; | 887 | psin = (struct sockaddr_in *)&gsf->gf_slist[i]; |
884 | 888 | ||
885 | if (psin->sin_family != AF_INET) | 889 | if (psin->sin_family != AF_INET) |
@@ -890,17 +894,24 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
890 | gsf = NULL; | 894 | gsf = NULL; |
891 | 895 | ||
892 | err = ip_mc_msfilter(sk, msf, ifindex); | 896 | err = ip_mc_msfilter(sk, msf, ifindex); |
893 | mc_msf_out: | 897 | mc_msf_out: |
894 | kfree(msf); | 898 | kfree(msf); |
895 | kfree(gsf); | 899 | kfree(gsf); |
896 | break; | 900 | break; |
897 | } | 901 | } |
902 | case IP_MULTICAST_ALL: | ||
903 | if (optlen < 1) | ||
904 | goto e_inval; | ||
905 | if (val != 0 && val != 1) | ||
906 | goto e_inval; | ||
907 | inet->mc_all = val; | ||
908 | break; | ||
898 | case IP_ROUTER_ALERT: | 909 | case IP_ROUTER_ALERT: |
899 | err = ip_ra_control(sk, val ? 1 : 0, NULL); | 910 | err = ip_ra_control(sk, val ? 1 : 0, NULL); |
900 | break; | 911 | break; |
901 | 912 | ||
902 | case IP_FREEBIND: | 913 | case IP_FREEBIND: |
903 | if (optlen<1) | 914 | if (optlen < 1) |
904 | goto e_inval; | 915 | goto e_inval; |
905 | inet->freebind = !!val; | 916 | inet->freebind = !!val; |
906 | break; | 917 | break; |
@@ -957,6 +968,7 @@ int ip_setsockopt(struct sock *sk, int level, | |||
957 | #endif | 968 | #endif |
958 | return err; | 969 | return err; |
959 | } | 970 | } |
971 | EXPORT_SYMBOL(ip_setsockopt); | ||
960 | 972 | ||
961 | #ifdef CONFIG_COMPAT | 973 | #ifdef CONFIG_COMPAT |
962 | int compat_ip_setsockopt(struct sock *sk, int level, int optname, | 974 | int compat_ip_setsockopt(struct sock *sk, int level, int optname, |
@@ -986,13 +998,12 @@ int compat_ip_setsockopt(struct sock *sk, int level, int optname, | |||
986 | #endif | 998 | #endif |
987 | return err; | 999 | return err; |
988 | } | 1000 | } |
989 | |||
990 | EXPORT_SYMBOL(compat_ip_setsockopt); | 1001 | EXPORT_SYMBOL(compat_ip_setsockopt); |
991 | #endif | 1002 | #endif |
992 | 1003 | ||
993 | /* | 1004 | /* |
994 | * Get the options. Note for future reference. The GET of IP options gets the | 1005 | * Get the options. Note for future reference. The GET of IP options gets |
995 | * _received_ ones. The set sets the _sent_ ones. | 1006 | * the _received_ ones. The set sets the _sent_ ones. |
996 | */ | 1007 | */ |
997 | 1008 | ||
998 | static int do_ip_getsockopt(struct sock *sk, int level, int optname, | 1009 | static int do_ip_getsockopt(struct sock *sk, int level, int optname, |
@@ -1143,10 +1154,14 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, | |||
1143 | return -EFAULT; | 1154 | return -EFAULT; |
1144 | } | 1155 | } |
1145 | err = ip_mc_gsfget(sk, &gsf, | 1156 | err = ip_mc_gsfget(sk, &gsf, |
1146 | (struct group_filter __user *)optval, optlen); | 1157 | (struct group_filter __user *)optval, |
1158 | optlen); | ||
1147 | release_sock(sk); | 1159 | release_sock(sk); |
1148 | return err; | 1160 | return err; |
1149 | } | 1161 | } |
1162 | case IP_MULTICAST_ALL: | ||
1163 | val = inet->mc_all; | ||
1164 | break; | ||
1150 | case IP_PKTOPTIONS: | 1165 | case IP_PKTOPTIONS: |
1151 | { | 1166 | { |
1152 | struct msghdr msg; | 1167 | struct msghdr msg; |
@@ -1187,7 +1202,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, | |||
1187 | } | 1202 | } |
1188 | release_sock(sk); | 1203 | release_sock(sk); |
1189 | 1204 | ||
1190 | if (len < sizeof(int) && len > 0 && val>=0 && val<=255) { | 1205 | if (len < sizeof(int) && len > 0 && val >= 0 && val <= 255) { |
1191 | unsigned char ucval = (unsigned char)val; | 1206 | unsigned char ucval = (unsigned char)val; |
1192 | len = 1; | 1207 | len = 1; |
1193 | if (put_user(len, optlen)) | 1208 | if (put_user(len, optlen)) |
@@ -1230,6 +1245,7 @@ int ip_getsockopt(struct sock *sk, int level, | |||
1230 | #endif | 1245 | #endif |
1231 | return err; | 1246 | return err; |
1232 | } | 1247 | } |
1248 | EXPORT_SYMBOL(ip_getsockopt); | ||
1233 | 1249 | ||
1234 | #ifdef CONFIG_COMPAT | 1250 | #ifdef CONFIG_COMPAT |
1235 | int compat_ip_getsockopt(struct sock *sk, int level, int optname, | 1251 | int compat_ip_getsockopt(struct sock *sk, int level, int optname, |
@@ -1262,11 +1278,5 @@ int compat_ip_getsockopt(struct sock *sk, int level, int optname, | |||
1262 | #endif | 1278 | #endif |
1263 | return err; | 1279 | return err; |
1264 | } | 1280 | } |
1265 | |||
1266 | EXPORT_SYMBOL(compat_ip_getsockopt); | 1281 | EXPORT_SYMBOL(compat_ip_getsockopt); |
1267 | #endif | 1282 | #endif |
1268 | |||
1269 | EXPORT_SYMBOL(ip_cmsg_recv); | ||
1270 | |||
1271 | EXPORT_SYMBOL(ip_getsockopt); | ||
1272 | EXPORT_SYMBOL(ip_setsockopt); | ||
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 9054139795af..93e2b787da20 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c | |||
@@ -370,8 +370,7 @@ static int ipip_rcv(struct sk_buff *skb) | |||
370 | tunnel->dev->stats.rx_packets++; | 370 | tunnel->dev->stats.rx_packets++; |
371 | tunnel->dev->stats.rx_bytes += skb->len; | 371 | tunnel->dev->stats.rx_bytes += skb->len; |
372 | skb->dev = tunnel->dev; | 372 | skb->dev = tunnel->dev; |
373 | dst_release(skb->dst); | 373 | skb_dst_drop(skb); |
374 | skb->dst = NULL; | ||
375 | nf_reset(skb); | 374 | nf_reset(skb); |
376 | ipip_ecn_decapsulate(iph, skb); | 375 | ipip_ecn_decapsulate(iph, skb); |
377 | netif_rx(skb); | 376 | netif_rx(skb); |
@@ -416,7 +415,7 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
416 | 415 | ||
417 | if (!dst) { | 416 | if (!dst) { |
418 | /* NBMA tunnel */ | 417 | /* NBMA tunnel */ |
419 | if ((rt = skb->rtable) == NULL) { | 418 | if ((rt = skb_rtable(skb)) == NULL) { |
420 | stats->tx_fifo_errors++; | 419 | stats->tx_fifo_errors++; |
421 | goto tx_error; | 420 | goto tx_error; |
422 | } | 421 | } |
@@ -447,15 +446,15 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
447 | if (tiph->frag_off) | 446 | if (tiph->frag_off) |
448 | mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr); | 447 | mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr); |
449 | else | 448 | else |
450 | mtu = skb->dst ? dst_mtu(skb->dst) : dev->mtu; | 449 | mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu; |
451 | 450 | ||
452 | if (mtu < 68) { | 451 | if (mtu < 68) { |
453 | stats->collisions++; | 452 | stats->collisions++; |
454 | ip_rt_put(rt); | 453 | ip_rt_put(rt); |
455 | goto tx_error; | 454 | goto tx_error; |
456 | } | 455 | } |
457 | if (skb->dst) | 456 | if (skb_dst(skb)) |
458 | skb->dst->ops->update_pmtu(skb->dst, mtu); | 457 | skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu); |
459 | 458 | ||
460 | df |= (old_iph->frag_off&htons(IP_DF)); | 459 | df |= (old_iph->frag_off&htons(IP_DF)); |
461 | 460 | ||
@@ -502,8 +501,8 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
502 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); | 501 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); |
503 | IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | | 502 | IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | |
504 | IPSKB_REROUTED); | 503 | IPSKB_REROUTED); |
505 | dst_release(skb->dst); | 504 | skb_dst_drop(skb); |
506 | skb->dst = &rt->u.dst; | 505 | skb_dst_set(skb, &rt->u.dst); |
507 | 506 | ||
508 | /* | 507 | /* |
509 | * Push down and install the IPIP header. | 508 | * Push down and install the IPIP header. |
@@ -713,6 +712,7 @@ static void ipip_tunnel_setup(struct net_device *dev) | |||
713 | dev->iflink = 0; | 712 | dev->iflink = 0; |
714 | dev->addr_len = 4; | 713 | dev->addr_len = 4; |
715 | dev->features |= NETIF_F_NETNS_LOCAL; | 714 | dev->features |= NETIF_F_NETNS_LOCAL; |
715 | dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; | ||
716 | } | 716 | } |
717 | 717 | ||
718 | static void ipip_tunnel_init(struct net_device *dev) | 718 | static void ipip_tunnel_init(struct net_device *dev) |
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 13e9dd3012b3..ffd986104468 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
@@ -651,7 +651,7 @@ static int ipmr_cache_report(struct net *net, | |||
651 | ip_hdr(skb)->protocol = 0; /* Flag to the kernel this is a route add */ | 651 | ip_hdr(skb)->protocol = 0; /* Flag to the kernel this is a route add */ |
652 | msg = (struct igmpmsg *)skb_network_header(skb); | 652 | msg = (struct igmpmsg *)skb_network_header(skb); |
653 | msg->im_vif = vifi; | 653 | msg->im_vif = vifi; |
654 | skb->dst = dst_clone(pkt->dst); | 654 | skb_dst_set(skb, dst_clone(skb_dst(pkt))); |
655 | 655 | ||
656 | /* | 656 | /* |
657 | * Add our header | 657 | * Add our header |
@@ -1201,7 +1201,7 @@ static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr) | |||
1201 | iph->protocol = IPPROTO_IPIP; | 1201 | iph->protocol = IPPROTO_IPIP; |
1202 | iph->ihl = 5; | 1202 | iph->ihl = 5; |
1203 | iph->tot_len = htons(skb->len); | 1203 | iph->tot_len = htons(skb->len); |
1204 | ip_select_ident(iph, skb->dst, NULL); | 1204 | ip_select_ident(iph, skb_dst(skb), NULL); |
1205 | ip_send_check(iph); | 1205 | ip_send_check(iph); |
1206 | 1206 | ||
1207 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); | 1207 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); |
@@ -1212,7 +1212,7 @@ static inline int ipmr_forward_finish(struct sk_buff *skb) | |||
1212 | { | 1212 | { |
1213 | struct ip_options * opt = &(IPCB(skb)->opt); | 1213 | struct ip_options * opt = &(IPCB(skb)->opt); |
1214 | 1214 | ||
1215 | IP_INC_STATS_BH(dev_net(skb->dst->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); | 1215 | IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); |
1216 | 1216 | ||
1217 | if (unlikely(opt->optlen)) | 1217 | if (unlikely(opt->optlen)) |
1218 | ip_forward_options(skb); | 1218 | ip_forward_options(skb); |
@@ -1290,8 +1290,8 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi) | |||
1290 | vif->pkt_out++; | 1290 | vif->pkt_out++; |
1291 | vif->bytes_out += skb->len; | 1291 | vif->bytes_out += skb->len; |
1292 | 1292 | ||
1293 | dst_release(skb->dst); | 1293 | skb_dst_drop(skb); |
1294 | skb->dst = &rt->u.dst; | 1294 | skb_dst_set(skb, &rt->u.dst); |
1295 | ip_decrease_ttl(ip_hdr(skb)); | 1295 | ip_decrease_ttl(ip_hdr(skb)); |
1296 | 1296 | ||
1297 | /* FIXME: forward and output firewalls used to be called here. | 1297 | /* FIXME: forward and output firewalls used to be called here. |
@@ -1354,7 +1354,7 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local | |||
1354 | if (net->ipv4.vif_table[vif].dev != skb->dev) { | 1354 | if (net->ipv4.vif_table[vif].dev != skb->dev) { |
1355 | int true_vifi; | 1355 | int true_vifi; |
1356 | 1356 | ||
1357 | if (skb->rtable->fl.iif == 0) { | 1357 | if (skb_rtable(skb)->fl.iif == 0) { |
1358 | /* It is our own packet, looped back. | 1358 | /* It is our own packet, looped back. |
1359 | Very complicated situation... | 1359 | Very complicated situation... |
1360 | 1360 | ||
@@ -1430,7 +1430,7 @@ int ip_mr_input(struct sk_buff *skb) | |||
1430 | { | 1430 | { |
1431 | struct mfc_cache *cache; | 1431 | struct mfc_cache *cache; |
1432 | struct net *net = dev_net(skb->dev); | 1432 | struct net *net = dev_net(skb->dev); |
1433 | int local = skb->rtable->rt_flags&RTCF_LOCAL; | 1433 | int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL; |
1434 | 1434 | ||
1435 | /* Packet is looped back after forward, it should not be | 1435 | /* Packet is looped back after forward, it should not be |
1436 | forwarded second time, but still can be delivered locally. | 1436 | forwarded second time, but still can be delivered locally. |
@@ -1543,8 +1543,7 @@ static int __pim_rcv(struct sk_buff *skb, unsigned int pimlen) | |||
1543 | skb->protocol = htons(ETH_P_IP); | 1543 | skb->protocol = htons(ETH_P_IP); |
1544 | skb->ip_summed = 0; | 1544 | skb->ip_summed = 0; |
1545 | skb->pkt_type = PACKET_HOST; | 1545 | skb->pkt_type = PACKET_HOST; |
1546 | dst_release(skb->dst); | 1546 | skb_dst_drop(skb); |
1547 | skb->dst = NULL; | ||
1548 | reg_dev->stats.rx_bytes += skb->len; | 1547 | reg_dev->stats.rx_bytes += skb->len; |
1549 | reg_dev->stats.rx_packets++; | 1548 | reg_dev->stats.rx_packets++; |
1550 | nf_reset(skb); | 1549 | nf_reset(skb); |
@@ -1646,7 +1645,7 @@ int ipmr_get_route(struct net *net, | |||
1646 | { | 1645 | { |
1647 | int err; | 1646 | int err; |
1648 | struct mfc_cache *cache; | 1647 | struct mfc_cache *cache; |
1649 | struct rtable *rt = skb->rtable; | 1648 | struct rtable *rt = skb_rtable(skb); |
1650 | 1649 | ||
1651 | read_lock(&mrt_lock); | 1650 | read_lock(&mrt_lock); |
1652 | cache = ipmr_cache_find(net, rt->rt_src, rt->rt_dst); | 1651 | cache = ipmr_cache_find(net, rt->rt_src, rt->rt_dst); |
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c index fdf6811c31a2..1725dc0ef688 100644 --- a/net/ipv4/netfilter.c +++ b/net/ipv4/netfilter.c | |||
@@ -12,7 +12,7 @@ | |||
12 | /* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */ | 12 | /* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */ |
13 | int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type) | 13 | int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type) |
14 | { | 14 | { |
15 | struct net *net = dev_net(skb->dst->dev); | 15 | struct net *net = dev_net(skb_dst(skb)->dev); |
16 | const struct iphdr *iph = ip_hdr(skb); | 16 | const struct iphdr *iph = ip_hdr(skb); |
17 | struct rtable *rt; | 17 | struct rtable *rt; |
18 | struct flowi fl = {}; | 18 | struct flowi fl = {}; |
@@ -41,8 +41,8 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type) | |||
41 | return -1; | 41 | return -1; |
42 | 42 | ||
43 | /* Drop old route. */ | 43 | /* Drop old route. */ |
44 | dst_release(skb->dst); | 44 | skb_dst_drop(skb); |
45 | skb->dst = &rt->u.dst; | 45 | skb_dst_set(skb, &rt->u.dst); |
46 | } else { | 46 | } else { |
47 | /* non-local src, find valid iif to satisfy | 47 | /* non-local src, find valid iif to satisfy |
48 | * rp-filter when calling ip_route_input. */ | 48 | * rp-filter when calling ip_route_input. */ |
@@ -50,7 +50,7 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type) | |||
50 | if (ip_route_output_key(net, &rt, &fl) != 0) | 50 | if (ip_route_output_key(net, &rt, &fl) != 0) |
51 | return -1; | 51 | return -1; |
52 | 52 | ||
53 | odst = skb->dst; | 53 | odst = skb_dst(skb); |
54 | if (ip_route_input(skb, iph->daddr, iph->saddr, | 54 | if (ip_route_input(skb, iph->daddr, iph->saddr, |
55 | RT_TOS(iph->tos), rt->u.dst.dev) != 0) { | 55 | RT_TOS(iph->tos), rt->u.dst.dev) != 0) { |
56 | dst_release(&rt->u.dst); | 56 | dst_release(&rt->u.dst); |
@@ -60,18 +60,22 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type) | |||
60 | dst_release(odst); | 60 | dst_release(odst); |
61 | } | 61 | } |
62 | 62 | ||
63 | if (skb->dst->error) | 63 | if (skb_dst(skb)->error) |
64 | return -1; | 64 | return -1; |
65 | 65 | ||
66 | #ifdef CONFIG_XFRM | 66 | #ifdef CONFIG_XFRM |
67 | if (!(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) && | 67 | if (!(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) && |
68 | xfrm_decode_session(skb, &fl, AF_INET) == 0) | 68 | xfrm_decode_session(skb, &fl, AF_INET) == 0) { |
69 | if (xfrm_lookup(net, &skb->dst, &fl, skb->sk, 0)) | 69 | struct dst_entry *dst = skb_dst(skb); |
70 | skb_dst_set(skb, NULL); | ||
71 | if (xfrm_lookup(net, &dst, &fl, skb->sk, 0)) | ||
70 | return -1; | 72 | return -1; |
73 | skb_dst_set(skb, dst); | ||
74 | } | ||
71 | #endif | 75 | #endif |
72 | 76 | ||
73 | /* Change in oif may mean change in hh_len. */ | 77 | /* Change in oif may mean change in hh_len. */ |
74 | hh_len = skb->dst->dev->hard_header_len; | 78 | hh_len = skb_dst(skb)->dev->hard_header_len; |
75 | if (skb_headroom(skb) < hh_len && | 79 | if (skb_headroom(skb) < hh_len && |
76 | pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC)) | 80 | pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC)) |
77 | return -1; | 81 | return -1; |
@@ -92,7 +96,7 @@ int ip_xfrm_me_harder(struct sk_buff *skb) | |||
92 | if (xfrm_decode_session(skb, &fl, AF_INET) < 0) | 96 | if (xfrm_decode_session(skb, &fl, AF_INET) < 0) |
93 | return -1; | 97 | return -1; |
94 | 98 | ||
95 | dst = skb->dst; | 99 | dst = skb_dst(skb); |
96 | if (dst->xfrm) | 100 | if (dst->xfrm) |
97 | dst = ((struct xfrm_dst *)dst)->route; | 101 | dst = ((struct xfrm_dst *)dst)->route; |
98 | dst_hold(dst); | 102 | dst_hold(dst); |
@@ -100,11 +104,11 @@ int ip_xfrm_me_harder(struct sk_buff *skb) | |||
100 | if (xfrm_lookup(dev_net(dst->dev), &dst, &fl, skb->sk, 0) < 0) | 104 | if (xfrm_lookup(dev_net(dst->dev), &dst, &fl, skb->sk, 0) < 0) |
101 | return -1; | 105 | return -1; |
102 | 106 | ||
103 | dst_release(skb->dst); | 107 | skb_dst_drop(skb); |
104 | skb->dst = dst; | 108 | skb_dst_set(skb, dst); |
105 | 109 | ||
106 | /* Change in oif may mean change in hh_len. */ | 110 | /* Change in oif may mean change in hh_len. */ |
107 | hh_len = skb->dst->dev->hard_header_len; | 111 | hh_len = skb_dst(skb)->dev->hard_header_len; |
108 | if (skb_headroom(skb) < hh_len && | 112 | if (skb_headroom(skb) < hh_len && |
109 | pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC)) | 113 | pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC)) |
110 | return -1; | 114 | return -1; |
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c index 855505d480d2..dada0863946d 100644 --- a/net/ipv4/netfilter/ipt_MASQUERADE.c +++ b/net/ipv4/netfilter/ipt_MASQUERADE.c | |||
@@ -69,7 +69,7 @@ masquerade_tg(struct sk_buff *skb, const struct xt_target_param *par) | |||
69 | return NF_ACCEPT; | 69 | return NF_ACCEPT; |
70 | 70 | ||
71 | mr = par->targinfo; | 71 | mr = par->targinfo; |
72 | rt = skb->rtable; | 72 | rt = skb_rtable(skb); |
73 | newsrc = inet_select_addr(par->out, rt->rt_gateway, RT_SCOPE_UNIVERSE); | 73 | newsrc = inet_select_addr(par->out, rt->rt_gateway, RT_SCOPE_UNIVERSE); |
74 | if (!newsrc) { | 74 | if (!newsrc) { |
75 | printk("MASQUERADE: %s ate my IP address\n", par->out->name); | 75 | printk("MASQUERADE: %s ate my IP address\n", par->out->name); |
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c index 0b4b6e0ff2b9..c93ae44bff2a 100644 --- a/net/ipv4/netfilter/ipt_REJECT.c +++ b/net/ipv4/netfilter/ipt_REJECT.c | |||
@@ -108,17 +108,16 @@ static void send_reset(struct sk_buff *oldskb, int hook) | |||
108 | addr_type = RTN_LOCAL; | 108 | addr_type = RTN_LOCAL; |
109 | 109 | ||
110 | /* ip_route_me_harder expects skb->dst to be set */ | 110 | /* ip_route_me_harder expects skb->dst to be set */ |
111 | dst_hold(oldskb->dst); | 111 | skb_dst_set(nskb, dst_clone(skb_dst(oldskb))); |
112 | nskb->dst = oldskb->dst; | ||
113 | 112 | ||
114 | if (ip_route_me_harder(nskb, addr_type)) | 113 | if (ip_route_me_harder(nskb, addr_type)) |
115 | goto free_nskb; | 114 | goto free_nskb; |
116 | 115 | ||
117 | niph->ttl = dst_metric(nskb->dst, RTAX_HOPLIMIT); | 116 | niph->ttl = dst_metric(skb_dst(nskb), RTAX_HOPLIMIT); |
118 | nskb->ip_summed = CHECKSUM_NONE; | 117 | nskb->ip_summed = CHECKSUM_NONE; |
119 | 118 | ||
120 | /* "Never happens" */ | 119 | /* "Never happens" */ |
121 | if (nskb->len > dst_mtu(nskb->dst)) | 120 | if (nskb->len > dst_mtu(skb_dst(nskb))) |
122 | goto free_nskb; | 121 | goto free_nskb; |
123 | 122 | ||
124 | nf_ct_attach(nskb, oldskb); | 123 | nf_ct_attach(nskb, oldskb); |
diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c index cf7a42bf9820..155c008626c8 100644 --- a/net/ipv4/netfilter/nf_nat_helper.c +++ b/net/ipv4/netfilter/nf_nat_helper.c | |||
@@ -140,7 +140,7 @@ nf_nat_mangle_tcp_packet(struct sk_buff *skb, | |||
140 | const char *rep_buffer, | 140 | const char *rep_buffer, |
141 | unsigned int rep_len) | 141 | unsigned int rep_len) |
142 | { | 142 | { |
143 | struct rtable *rt = skb->rtable; | 143 | struct rtable *rt = skb_rtable(skb); |
144 | struct iphdr *iph; | 144 | struct iphdr *iph; |
145 | struct tcphdr *tcph; | 145 | struct tcphdr *tcph; |
146 | int oldlen, datalen; | 146 | int oldlen, datalen; |
@@ -218,7 +218,7 @@ nf_nat_mangle_udp_packet(struct sk_buff *skb, | |||
218 | const char *rep_buffer, | 218 | const char *rep_buffer, |
219 | unsigned int rep_len) | 219 | unsigned int rep_len) |
220 | { | 220 | { |
221 | struct rtable *rt = skb->rtable; | 221 | struct rtable *rt = skb_rtable(skb); |
222 | struct iphdr *iph; | 222 | struct iphdr *iph; |
223 | struct udphdr *udph; | 223 | struct udphdr *udph; |
224 | int datalen, oldlen; | 224 | int datalen, oldlen; |
diff --git a/net/ipv4/netfilter/nf_nat_proto_sctp.c b/net/ipv4/netfilter/nf_nat_proto_sctp.c index 65e470bc6123..3fc598eeeb1a 100644 --- a/net/ipv4/netfilter/nf_nat_proto_sctp.c +++ b/net/ipv4/netfilter/nf_nat_proto_sctp.c | |||
@@ -33,6 +33,7 @@ sctp_manip_pkt(struct sk_buff *skb, | |||
33 | enum nf_nat_manip_type maniptype) | 33 | enum nf_nat_manip_type maniptype) |
34 | { | 34 | { |
35 | const struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff); | 35 | const struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff); |
36 | struct sk_buff *frag; | ||
36 | sctp_sctphdr_t *hdr; | 37 | sctp_sctphdr_t *hdr; |
37 | unsigned int hdroff = iphdroff + iph->ihl*4; | 38 | unsigned int hdroff = iphdroff + iph->ihl*4; |
38 | __be32 oldip, newip; | 39 | __be32 oldip, newip; |
@@ -57,8 +58,8 @@ sctp_manip_pkt(struct sk_buff *skb, | |||
57 | } | 58 | } |
58 | 59 | ||
59 | crc32 = sctp_start_cksum((u8 *)hdr, skb_headlen(skb) - hdroff); | 60 | crc32 = sctp_start_cksum((u8 *)hdr, skb_headlen(skb) - hdroff); |
60 | for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) | 61 | skb_walk_frags(skb, frag) |
61 | crc32 = sctp_update_cksum((u8 *)skb->data, skb_headlen(skb), | 62 | crc32 = sctp_update_cksum((u8 *)frag->data, skb_headlen(frag), |
62 | crc32); | 63 | crc32); |
63 | crc32 = sctp_end_cksum(crc32); | 64 | crc32 = sctp_end_cksum(crc32); |
64 | hdr->checksum = crc32; | 65 | hdr->checksum = crc32; |
diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c index b7dd695691a0..5567bd0d0750 100644 --- a/net/ipv4/netfilter/nf_nat_standalone.c +++ b/net/ipv4/netfilter/nf_nat_standalone.c | |||
@@ -167,10 +167,9 @@ nf_nat_in(unsigned int hooknum, | |||
167 | 167 | ||
168 | ret = nf_nat_fn(hooknum, skb, in, out, okfn); | 168 | ret = nf_nat_fn(hooknum, skb, in, out, okfn); |
169 | if (ret != NF_DROP && ret != NF_STOLEN && | 169 | if (ret != NF_DROP && ret != NF_STOLEN && |
170 | daddr != ip_hdr(skb)->daddr) { | 170 | daddr != ip_hdr(skb)->daddr) |
171 | dst_release(skb->dst); | 171 | skb_dst_drop(skb); |
172 | skb->dst = NULL; | 172 | |
173 | } | ||
174 | return ret; | 173 | return ret; |
175 | } | 174 | } |
176 | 175 | ||
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index f774651f0a47..3dc9171a272f 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c | |||
@@ -343,7 +343,7 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length, | |||
343 | 343 | ||
344 | skb->priority = sk->sk_priority; | 344 | skb->priority = sk->sk_priority; |
345 | skb->mark = sk->sk_mark; | 345 | skb->mark = sk->sk_mark; |
346 | skb->dst = dst_clone(&rt->u.dst); | 346 | skb_dst_set(skb, dst_clone(&rt->u.dst)); |
347 | 347 | ||
348 | skb_reset_network_header(skb); | 348 | skb_reset_network_header(skb); |
349 | iph = ip_hdr(skb); | 349 | iph = ip_hdr(skb); |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 28205e5bfa9b..a849bb15d864 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -1064,7 +1064,8 @@ work_done: | |||
1064 | out: return 0; | 1064 | out: return 0; |
1065 | } | 1065 | } |
1066 | 1066 | ||
1067 | static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp) | 1067 | static int rt_intern_hash(unsigned hash, struct rtable *rt, |
1068 | struct rtable **rp, struct sk_buff *skb) | ||
1068 | { | 1069 | { |
1069 | struct rtable *rth, **rthp; | 1070 | struct rtable *rth, **rthp; |
1070 | unsigned long now; | 1071 | unsigned long now; |
@@ -1114,7 +1115,10 @@ restart: | |||
1114 | spin_unlock_bh(rt_hash_lock_addr(hash)); | 1115 | spin_unlock_bh(rt_hash_lock_addr(hash)); |
1115 | 1116 | ||
1116 | rt_drop(rt); | 1117 | rt_drop(rt); |
1117 | *rp = rth; | 1118 | if (rp) |
1119 | *rp = rth; | ||
1120 | else | ||
1121 | skb_dst_set(skb, &rth->u.dst); | ||
1118 | return 0; | 1122 | return 0; |
1119 | } | 1123 | } |
1120 | 1124 | ||
@@ -1210,7 +1214,10 @@ restart: | |||
1210 | rcu_assign_pointer(rt_hash_table[hash].chain, rt); | 1214 | rcu_assign_pointer(rt_hash_table[hash].chain, rt); |
1211 | 1215 | ||
1212 | spin_unlock_bh(rt_hash_lock_addr(hash)); | 1216 | spin_unlock_bh(rt_hash_lock_addr(hash)); |
1213 | *rp = rt; | 1217 | if (rp) |
1218 | *rp = rt; | ||
1219 | else | ||
1220 | skb_dst_set(skb, &rt->u.dst); | ||
1214 | return 0; | 1221 | return 0; |
1215 | } | 1222 | } |
1216 | 1223 | ||
@@ -1407,7 +1414,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, | |||
1407 | &netevent); | 1414 | &netevent); |
1408 | 1415 | ||
1409 | rt_del(hash, rth); | 1416 | rt_del(hash, rth); |
1410 | if (!rt_intern_hash(hash, rt, &rt)) | 1417 | if (!rt_intern_hash(hash, rt, &rt, NULL)) |
1411 | ip_rt_put(rt); | 1418 | ip_rt_put(rt); |
1412 | goto do_next; | 1419 | goto do_next; |
1413 | } | 1420 | } |
@@ -1473,7 +1480,7 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst) | |||
1473 | 1480 | ||
1474 | void ip_rt_send_redirect(struct sk_buff *skb) | 1481 | void ip_rt_send_redirect(struct sk_buff *skb) |
1475 | { | 1482 | { |
1476 | struct rtable *rt = skb->rtable; | 1483 | struct rtable *rt = skb_rtable(skb); |
1477 | struct in_device *in_dev = in_dev_get(rt->u.dst.dev); | 1484 | struct in_device *in_dev = in_dev_get(rt->u.dst.dev); |
1478 | 1485 | ||
1479 | if (!in_dev) | 1486 | if (!in_dev) |
@@ -1521,7 +1528,7 @@ out: | |||
1521 | 1528 | ||
1522 | static int ip_error(struct sk_buff *skb) | 1529 | static int ip_error(struct sk_buff *skb) |
1523 | { | 1530 | { |
1524 | struct rtable *rt = skb->rtable; | 1531 | struct rtable *rt = skb_rtable(skb); |
1525 | unsigned long now; | 1532 | unsigned long now; |
1526 | int code; | 1533 | int code; |
1527 | 1534 | ||
@@ -1698,7 +1705,7 @@ static void ipv4_link_failure(struct sk_buff *skb) | |||
1698 | 1705 | ||
1699 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0); | 1706 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0); |
1700 | 1707 | ||
1701 | rt = skb->rtable; | 1708 | rt = skb_rtable(skb); |
1702 | if (rt) | 1709 | if (rt) |
1703 | dst_set_expires(&rt->u.dst, 0); | 1710 | dst_set_expires(&rt->u.dst, 0); |
1704 | } | 1711 | } |
@@ -1858,7 +1865,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
1858 | 1865 | ||
1859 | in_dev_put(in_dev); | 1866 | in_dev_put(in_dev); |
1860 | hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev))); | 1867 | hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev))); |
1861 | return rt_intern_hash(hash, rth, &skb->rtable); | 1868 | return rt_intern_hash(hash, rth, NULL, skb); |
1862 | 1869 | ||
1863 | e_nobufs: | 1870 | e_nobufs: |
1864 | in_dev_put(in_dev); | 1871 | in_dev_put(in_dev); |
@@ -2019,7 +2026,7 @@ static int ip_mkroute_input(struct sk_buff *skb, | |||
2019 | /* put it into the cache */ | 2026 | /* put it into the cache */ |
2020 | hash = rt_hash(daddr, saddr, fl->iif, | 2027 | hash = rt_hash(daddr, saddr, fl->iif, |
2021 | rt_genid(dev_net(rth->u.dst.dev))); | 2028 | rt_genid(dev_net(rth->u.dst.dev))); |
2022 | return rt_intern_hash(hash, rth, &skb->rtable); | 2029 | return rt_intern_hash(hash, rth, NULL, skb); |
2023 | } | 2030 | } |
2024 | 2031 | ||
2025 | /* | 2032 | /* |
@@ -2175,7 +2182,7 @@ local_input: | |||
2175 | } | 2182 | } |
2176 | rth->rt_type = res.type; | 2183 | rth->rt_type = res.type; |
2177 | hash = rt_hash(daddr, saddr, fl.iif, rt_genid(net)); | 2184 | hash = rt_hash(daddr, saddr, fl.iif, rt_genid(net)); |
2178 | err = rt_intern_hash(hash, rth, &skb->rtable); | 2185 | err = rt_intern_hash(hash, rth, NULL, skb); |
2179 | goto done; | 2186 | goto done; |
2180 | 2187 | ||
2181 | no_route: | 2188 | no_route: |
@@ -2244,7 +2251,7 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
2244 | dst_use(&rth->u.dst, jiffies); | 2251 | dst_use(&rth->u.dst, jiffies); |
2245 | RT_CACHE_STAT_INC(in_hit); | 2252 | RT_CACHE_STAT_INC(in_hit); |
2246 | rcu_read_unlock(); | 2253 | rcu_read_unlock(); |
2247 | skb->rtable = rth; | 2254 | skb_dst_set(skb, &rth->u.dst); |
2248 | return 0; | 2255 | return 0; |
2249 | } | 2256 | } |
2250 | RT_CACHE_STAT_INC(in_hlist_search); | 2257 | RT_CACHE_STAT_INC(in_hlist_search); |
@@ -2420,7 +2427,7 @@ static int ip_mkroute_output(struct rtable **rp, | |||
2420 | if (err == 0) { | 2427 | if (err == 0) { |
2421 | hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif, | 2428 | hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif, |
2422 | rt_genid(dev_net(dev_out))); | 2429 | rt_genid(dev_net(dev_out))); |
2423 | err = rt_intern_hash(hash, rth, rp); | 2430 | err = rt_intern_hash(hash, rth, rp, NULL); |
2424 | } | 2431 | } |
2425 | 2432 | ||
2426 | return err; | 2433 | return err; |
@@ -2763,7 +2770,7 @@ static int rt_fill_info(struct net *net, | |||
2763 | struct sk_buff *skb, u32 pid, u32 seq, int event, | 2770 | struct sk_buff *skb, u32 pid, u32 seq, int event, |
2764 | int nowait, unsigned int flags) | 2771 | int nowait, unsigned int flags) |
2765 | { | 2772 | { |
2766 | struct rtable *rt = skb->rtable; | 2773 | struct rtable *rt = skb_rtable(skb); |
2767 | struct rtmsg *r; | 2774 | struct rtmsg *r; |
2768 | struct nlmsghdr *nlh; | 2775 | struct nlmsghdr *nlh; |
2769 | long expires; | 2776 | long expires; |
@@ -2907,7 +2914,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void | |||
2907 | err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev); | 2914 | err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev); |
2908 | local_bh_enable(); | 2915 | local_bh_enable(); |
2909 | 2916 | ||
2910 | rt = skb->rtable; | 2917 | rt = skb_rtable(skb); |
2911 | if (err == 0 && rt->u.dst.error) | 2918 | if (err == 0 && rt->u.dst.error) |
2912 | err = -rt->u.dst.error; | 2919 | err = -rt->u.dst.error; |
2913 | } else { | 2920 | } else { |
@@ -2927,7 +2934,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void | |||
2927 | if (err) | 2934 | if (err) |
2928 | goto errout_free; | 2935 | goto errout_free; |
2929 | 2936 | ||
2930 | skb->rtable = rt; | 2937 | skb_dst_set(skb, &rt->u.dst); |
2931 | if (rtm->rtm_flags & RTM_F_NOTIFY) | 2938 | if (rtm->rtm_flags & RTM_F_NOTIFY) |
2932 | rt->rt_flags |= RTCF_NOTIFY; | 2939 | rt->rt_flags |= RTCF_NOTIFY; |
2933 | 2940 | ||
@@ -2968,15 +2975,15 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
2968 | continue; | 2975 | continue; |
2969 | if (rt_is_expired(rt)) | 2976 | if (rt_is_expired(rt)) |
2970 | continue; | 2977 | continue; |
2971 | skb->dst = dst_clone(&rt->u.dst); | 2978 | skb_dst_set(skb, dst_clone(&rt->u.dst)); |
2972 | if (rt_fill_info(net, skb, NETLINK_CB(cb->skb).pid, | 2979 | if (rt_fill_info(net, skb, NETLINK_CB(cb->skb).pid, |
2973 | cb->nlh->nlmsg_seq, RTM_NEWROUTE, | 2980 | cb->nlh->nlmsg_seq, RTM_NEWROUTE, |
2974 | 1, NLM_F_MULTI) <= 0) { | 2981 | 1, NLM_F_MULTI) <= 0) { |
2975 | dst_release(xchg(&skb->dst, NULL)); | 2982 | skb_dst_drop(skb); |
2976 | rcu_read_unlock_bh(); | 2983 | rcu_read_unlock_bh(); |
2977 | goto done; | 2984 | goto done; |
2978 | } | 2985 | } |
2979 | dst_release(xchg(&skb->dst, NULL)); | 2986 | skb_dst_drop(skb); |
2980 | } | 2987 | } |
2981 | rcu_read_unlock_bh(); | 2988 | rcu_read_unlock_bh(); |
2982 | } | 2989 | } |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 0fb8b441f1f9..17b89c523f9d 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -439,12 +439,14 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) | |||
439 | !tp->urg_data || | 439 | !tp->urg_data || |
440 | before(tp->urg_seq, tp->copied_seq) || | 440 | before(tp->urg_seq, tp->copied_seq) || |
441 | !before(tp->urg_seq, tp->rcv_nxt)) { | 441 | !before(tp->urg_seq, tp->rcv_nxt)) { |
442 | struct sk_buff *skb; | ||
443 | |||
442 | answ = tp->rcv_nxt - tp->copied_seq; | 444 | answ = tp->rcv_nxt - tp->copied_seq; |
443 | 445 | ||
444 | /* Subtract 1, if FIN is in queue. */ | 446 | /* Subtract 1, if FIN is in queue. */ |
445 | if (answ && !skb_queue_empty(&sk->sk_receive_queue)) | 447 | skb = skb_peek_tail(&sk->sk_receive_queue); |
446 | answ -= | 448 | if (answ && skb) |
447 | tcp_hdr((struct sk_buff *)sk->sk_receive_queue.prev)->fin; | 449 | answ -= tcp_hdr(skb)->fin; |
448 | } else | 450 | } else |
449 | answ = tp->urg_seq - tp->copied_seq; | 451 | answ = tp->urg_seq - tp->copied_seq; |
450 | release_sock(sk); | 452 | release_sock(sk); |
@@ -1382,11 +1384,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
1382 | 1384 | ||
1383 | /* Next get a buffer. */ | 1385 | /* Next get a buffer. */ |
1384 | 1386 | ||
1385 | skb = skb_peek(&sk->sk_receive_queue); | 1387 | skb_queue_walk(&sk->sk_receive_queue, skb) { |
1386 | do { | ||
1387 | if (!skb) | ||
1388 | break; | ||
1389 | |||
1390 | /* Now that we have two receive queues this | 1388 | /* Now that we have two receive queues this |
1391 | * shouldn't happen. | 1389 | * shouldn't happen. |
1392 | */ | 1390 | */ |
@@ -1403,8 +1401,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
1403 | if (tcp_hdr(skb)->fin) | 1401 | if (tcp_hdr(skb)->fin) |
1404 | goto found_fin_ok; | 1402 | goto found_fin_ok; |
1405 | WARN_ON(!(flags & MSG_PEEK)); | 1403 | WARN_ON(!(flags & MSG_PEEK)); |
1406 | skb = skb->next; | 1404 | } |
1407 | } while (skb != (struct sk_buff *)&sk->sk_receive_queue); | ||
1408 | 1405 | ||
1409 | /* Well, if we have backlog, try to process it now yet. */ | 1406 | /* Well, if we have backlog, try to process it now yet. */ |
1410 | 1407 | ||
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index eeb8a92aa416..2bdb0da237e6 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -4426,7 +4426,7 @@ drop: | |||
4426 | } | 4426 | } |
4427 | __skb_queue_head(&tp->out_of_order_queue, skb); | 4427 | __skb_queue_head(&tp->out_of_order_queue, skb); |
4428 | } else { | 4428 | } else { |
4429 | struct sk_buff *skb1 = tp->out_of_order_queue.prev; | 4429 | struct sk_buff *skb1 = skb_peek_tail(&tp->out_of_order_queue); |
4430 | u32 seq = TCP_SKB_CB(skb)->seq; | 4430 | u32 seq = TCP_SKB_CB(skb)->seq; |
4431 | u32 end_seq = TCP_SKB_CB(skb)->end_seq; | 4431 | u32 end_seq = TCP_SKB_CB(skb)->end_seq; |
4432 | 4432 | ||
@@ -4443,15 +4443,18 @@ drop: | |||
4443 | } | 4443 | } |
4444 | 4444 | ||
4445 | /* Find place to insert this segment. */ | 4445 | /* Find place to insert this segment. */ |
4446 | do { | 4446 | while (1) { |
4447 | if (!after(TCP_SKB_CB(skb1)->seq, seq)) | 4447 | if (!after(TCP_SKB_CB(skb1)->seq, seq)) |
4448 | break; | 4448 | break; |
4449 | } while ((skb1 = skb1->prev) != | 4449 | if (skb_queue_is_first(&tp->out_of_order_queue, skb1)) { |
4450 | (struct sk_buff *)&tp->out_of_order_queue); | 4450 | skb1 = NULL; |
4451 | break; | ||
4452 | } | ||
4453 | skb1 = skb_queue_prev(&tp->out_of_order_queue, skb1); | ||
4454 | } | ||
4451 | 4455 | ||
4452 | /* Do skb overlap to previous one? */ | 4456 | /* Do skb overlap to previous one? */ |
4453 | if (skb1 != (struct sk_buff *)&tp->out_of_order_queue && | 4457 | if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) { |
4454 | before(seq, TCP_SKB_CB(skb1)->end_seq)) { | ||
4455 | if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) { | 4458 | if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) { |
4456 | /* All the bits are present. Drop. */ | 4459 | /* All the bits are present. Drop. */ |
4457 | __kfree_skb(skb); | 4460 | __kfree_skb(skb); |
@@ -4463,15 +4466,26 @@ drop: | |||
4463 | tcp_dsack_set(sk, seq, | 4466 | tcp_dsack_set(sk, seq, |
4464 | TCP_SKB_CB(skb1)->end_seq); | 4467 | TCP_SKB_CB(skb1)->end_seq); |
4465 | } else { | 4468 | } else { |
4466 | skb1 = skb1->prev; | 4469 | if (skb_queue_is_first(&tp->out_of_order_queue, |
4470 | skb1)) | ||
4471 | skb1 = NULL; | ||
4472 | else | ||
4473 | skb1 = skb_queue_prev( | ||
4474 | &tp->out_of_order_queue, | ||
4475 | skb1); | ||
4467 | } | 4476 | } |
4468 | } | 4477 | } |
4469 | __skb_queue_after(&tp->out_of_order_queue, skb1, skb); | 4478 | if (!skb1) |
4479 | __skb_queue_head(&tp->out_of_order_queue, skb); | ||
4480 | else | ||
4481 | __skb_queue_after(&tp->out_of_order_queue, skb1, skb); | ||
4470 | 4482 | ||
4471 | /* And clean segments covered by new one as whole. */ | 4483 | /* And clean segments covered by new one as whole. */ |
4472 | while ((skb1 = skb->next) != | 4484 | while (!skb_queue_is_last(&tp->out_of_order_queue, skb)) { |
4473 | (struct sk_buff *)&tp->out_of_order_queue && | 4485 | skb1 = skb_queue_next(&tp->out_of_order_queue, skb); |
4474 | after(end_seq, TCP_SKB_CB(skb1)->seq)) { | 4486 | |
4487 | if (!after(end_seq, TCP_SKB_CB(skb1)->seq)) | ||
4488 | break; | ||
4475 | if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) { | 4489 | if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) { |
4476 | tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, | 4490 | tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, |
4477 | end_seq); | 4491 | end_seq); |
@@ -4492,7 +4506,10 @@ add_sack: | |||
4492 | static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb, | 4506 | static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb, |
4493 | struct sk_buff_head *list) | 4507 | struct sk_buff_head *list) |
4494 | { | 4508 | { |
4495 | struct sk_buff *next = skb->next; | 4509 | struct sk_buff *next = NULL; |
4510 | |||
4511 | if (!skb_queue_is_last(list, skb)) | ||
4512 | next = skb_queue_next(list, skb); | ||
4496 | 4513 | ||
4497 | __skb_unlink(skb, list); | 4514 | __skb_unlink(skb, list); |
4498 | __kfree_skb(skb); | 4515 | __kfree_skb(skb); |
@@ -4503,6 +4520,9 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb, | |||
4503 | 4520 | ||
4504 | /* Collapse contiguous sequence of skbs head..tail with | 4521 | /* Collapse contiguous sequence of skbs head..tail with |
4505 | * sequence numbers start..end. | 4522 | * sequence numbers start..end. |
4523 | * | ||
4524 | * If tail is NULL, this means until the end of the list. | ||
4525 | * | ||
4506 | * Segments with FIN/SYN are not collapsed (only because this | 4526 | * Segments with FIN/SYN are not collapsed (only because this |
4507 | * simplifies code) | 4527 | * simplifies code) |
4508 | */ | 4528 | */ |
@@ -4511,15 +4531,23 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, | |||
4511 | struct sk_buff *head, struct sk_buff *tail, | 4531 | struct sk_buff *head, struct sk_buff *tail, |
4512 | u32 start, u32 end) | 4532 | u32 start, u32 end) |
4513 | { | 4533 | { |
4514 | struct sk_buff *skb; | 4534 | struct sk_buff *skb, *n; |
4535 | bool end_of_skbs; | ||
4515 | 4536 | ||
4516 | /* First, check that queue is collapsible and find | 4537 | /* First, check that queue is collapsible and find |
4517 | * the point where collapsing can be useful. */ | 4538 | * the point where collapsing can be useful. */ |
4518 | for (skb = head; skb != tail;) { | 4539 | skb = head; |
4540 | restart: | ||
4541 | end_of_skbs = true; | ||
4542 | skb_queue_walk_from_safe(list, skb, n) { | ||
4543 | if (skb == tail) | ||
4544 | break; | ||
4519 | /* No new bits? It is possible on ofo queue. */ | 4545 | /* No new bits? It is possible on ofo queue. */ |
4520 | if (!before(start, TCP_SKB_CB(skb)->end_seq)) { | 4546 | if (!before(start, TCP_SKB_CB(skb)->end_seq)) { |
4521 | skb = tcp_collapse_one(sk, skb, list); | 4547 | skb = tcp_collapse_one(sk, skb, list); |
4522 | continue; | 4548 | if (!skb) |
4549 | break; | ||
4550 | goto restart; | ||
4523 | } | 4551 | } |
4524 | 4552 | ||
4525 | /* The first skb to collapse is: | 4553 | /* The first skb to collapse is: |
@@ -4529,16 +4557,24 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, | |||
4529 | */ | 4557 | */ |
4530 | if (!tcp_hdr(skb)->syn && !tcp_hdr(skb)->fin && | 4558 | if (!tcp_hdr(skb)->syn && !tcp_hdr(skb)->fin && |
4531 | (tcp_win_from_space(skb->truesize) > skb->len || | 4559 | (tcp_win_from_space(skb->truesize) > skb->len || |
4532 | before(TCP_SKB_CB(skb)->seq, start) || | 4560 | before(TCP_SKB_CB(skb)->seq, start))) { |
4533 | (skb->next != tail && | 4561 | end_of_skbs = false; |
4534 | TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb->next)->seq))) | ||
4535 | break; | 4562 | break; |
4563 | } | ||
4564 | |||
4565 | if (!skb_queue_is_last(list, skb)) { | ||
4566 | struct sk_buff *next = skb_queue_next(list, skb); | ||
4567 | if (next != tail && | ||
4568 | TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(next)->seq) { | ||
4569 | end_of_skbs = false; | ||
4570 | break; | ||
4571 | } | ||
4572 | } | ||
4536 | 4573 | ||
4537 | /* Decided to skip this, advance start seq. */ | 4574 | /* Decided to skip this, advance start seq. */ |
4538 | start = TCP_SKB_CB(skb)->end_seq; | 4575 | start = TCP_SKB_CB(skb)->end_seq; |
4539 | skb = skb->next; | ||
4540 | } | 4576 | } |
4541 | if (skb == tail || tcp_hdr(skb)->syn || tcp_hdr(skb)->fin) | 4577 | if (end_of_skbs || tcp_hdr(skb)->syn || tcp_hdr(skb)->fin) |
4542 | return; | 4578 | return; |
4543 | 4579 | ||
4544 | while (before(start, end)) { | 4580 | while (before(start, end)) { |
@@ -4583,7 +4619,8 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, | |||
4583 | } | 4619 | } |
4584 | if (!before(start, TCP_SKB_CB(skb)->end_seq)) { | 4620 | if (!before(start, TCP_SKB_CB(skb)->end_seq)) { |
4585 | skb = tcp_collapse_one(sk, skb, list); | 4621 | skb = tcp_collapse_one(sk, skb, list); |
4586 | if (skb == tail || | 4622 | if (!skb || |
4623 | skb == tail || | ||
4587 | tcp_hdr(skb)->syn || | 4624 | tcp_hdr(skb)->syn || |
4588 | tcp_hdr(skb)->fin) | 4625 | tcp_hdr(skb)->fin) |
4589 | return; | 4626 | return; |
@@ -4610,17 +4647,21 @@ static void tcp_collapse_ofo_queue(struct sock *sk) | |||
4610 | head = skb; | 4647 | head = skb; |
4611 | 4648 | ||
4612 | for (;;) { | 4649 | for (;;) { |
4613 | skb = skb->next; | 4650 | struct sk_buff *next = NULL; |
4651 | |||
4652 | if (!skb_queue_is_last(&tp->out_of_order_queue, skb)) | ||
4653 | next = skb_queue_next(&tp->out_of_order_queue, skb); | ||
4654 | skb = next; | ||
4614 | 4655 | ||
4615 | /* Segment is terminated when we see gap or when | 4656 | /* Segment is terminated when we see gap or when |
4616 | * we are at the end of all the queue. */ | 4657 | * we are at the end of all the queue. */ |
4617 | if (skb == (struct sk_buff *)&tp->out_of_order_queue || | 4658 | if (!skb || |
4618 | after(TCP_SKB_CB(skb)->seq, end) || | 4659 | after(TCP_SKB_CB(skb)->seq, end) || |
4619 | before(TCP_SKB_CB(skb)->end_seq, start)) { | 4660 | before(TCP_SKB_CB(skb)->end_seq, start)) { |
4620 | tcp_collapse(sk, &tp->out_of_order_queue, | 4661 | tcp_collapse(sk, &tp->out_of_order_queue, |
4621 | head, skb, start, end); | 4662 | head, skb, start, end); |
4622 | head = skb; | 4663 | head = skb; |
4623 | if (skb == (struct sk_buff *)&tp->out_of_order_queue) | 4664 | if (!skb) |
4624 | break; | 4665 | break; |
4625 | /* Start new segment */ | 4666 | /* Start new segment */ |
4626 | start = TCP_SKB_CB(skb)->seq; | 4667 | start = TCP_SKB_CB(skb)->seq; |
@@ -4681,10 +4722,11 @@ static int tcp_prune_queue(struct sock *sk) | |||
4681 | tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); | 4722 | tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); |
4682 | 4723 | ||
4683 | tcp_collapse_ofo_queue(sk); | 4724 | tcp_collapse_ofo_queue(sk); |
4684 | tcp_collapse(sk, &sk->sk_receive_queue, | 4725 | if (!skb_queue_empty(&sk->sk_receive_queue)) |
4685 | sk->sk_receive_queue.next, | 4726 | tcp_collapse(sk, &sk->sk_receive_queue, |
4686 | (struct sk_buff *)&sk->sk_receive_queue, | 4727 | skb_peek(&sk->sk_receive_queue), |
4687 | tp->copied_seq, tp->rcv_nxt); | 4728 | NULL, |
4729 | tp->copied_seq, tp->rcv_nxt); | ||
4688 | sk_mem_reclaim(sk); | 4730 | sk_mem_reclaim(sk); |
4689 | 4731 | ||
4690 | if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) | 4732 | if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index fc79e3416288..5a1ca2698c88 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -546,7 +546,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) | |||
546 | if (th->rst) | 546 | if (th->rst) |
547 | return; | 547 | return; |
548 | 548 | ||
549 | if (skb->rtable->rt_type != RTN_LOCAL) | 549 | if (skb_rtable(skb)->rt_type != RTN_LOCAL) |
550 | return; | 550 | return; |
551 | 551 | ||
552 | /* Swap the send and the receive. */ | 552 | /* Swap the send and the receive. */ |
@@ -590,7 +590,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) | |||
590 | arg.csumoffset = offsetof(struct tcphdr, check) / 2; | 590 | arg.csumoffset = offsetof(struct tcphdr, check) / 2; |
591 | arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0; | 591 | arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0; |
592 | 592 | ||
593 | net = dev_net(skb->dst->dev); | 593 | net = dev_net(skb_dst(skb)->dev); |
594 | ip_send_reply(net->ipv4.tcp_sock, skb, | 594 | ip_send_reply(net->ipv4.tcp_sock, skb, |
595 | &arg, arg.iov[0].iov_len); | 595 | &arg, arg.iov[0].iov_len); |
596 | 596 | ||
@@ -617,7 +617,7 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, | |||
617 | ]; | 617 | ]; |
618 | } rep; | 618 | } rep; |
619 | struct ip_reply_arg arg; | 619 | struct ip_reply_arg arg; |
620 | struct net *net = dev_net(skb->dst->dev); | 620 | struct net *net = dev_net(skb_dst(skb)->dev); |
621 | 621 | ||
622 | memset(&rep.th, 0, sizeof(struct tcphdr)); | 622 | memset(&rep.th, 0, sizeof(struct tcphdr)); |
623 | memset(&arg, 0, sizeof(arg)); | 623 | memset(&arg, 0, sizeof(arg)); |
@@ -1185,7 +1185,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1185 | #endif | 1185 | #endif |
1186 | 1186 | ||
1187 | /* Never answer to SYNs send to broadcast or multicast */ | 1187 | /* Never answer to SYNs send to broadcast or multicast */ |
1188 | if (skb->rtable->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) | 1188 | if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) |
1189 | goto drop; | 1189 | goto drop; |
1190 | 1190 | ||
1191 | /* TW buckets are converted to open requests without | 1191 | /* TW buckets are converted to open requests without |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 79c39dc9b01c..416fc4c2e7eb 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -2202,7 +2202,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |||
2202 | /* Reserve space for headers. */ | 2202 | /* Reserve space for headers. */ |
2203 | skb_reserve(skb, MAX_TCP_HEADER); | 2203 | skb_reserve(skb, MAX_TCP_HEADER); |
2204 | 2204 | ||
2205 | skb->dst = dst_clone(dst); | 2205 | skb_dst_set(skb, dst_clone(dst)); |
2206 | 2206 | ||
2207 | mss = dst_metric(dst, RTAX_ADVMSS); | 2207 | mss = dst_metric(dst, RTAX_ADVMSS); |
2208 | if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss) | 2208 | if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss) |
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c index a453aac91bd3..c6743eec9b7d 100644 --- a/net/ipv4/tcp_vegas.c +++ b/net/ipv4/tcp_vegas.c | |||
@@ -158,6 +158,11 @@ void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event) | |||
158 | } | 158 | } |
159 | EXPORT_SYMBOL_GPL(tcp_vegas_cwnd_event); | 159 | EXPORT_SYMBOL_GPL(tcp_vegas_cwnd_event); |
160 | 160 | ||
161 | static inline u32 tcp_vegas_ssthresh(struct tcp_sock *tp) | ||
162 | { | ||
163 | return min(tp->snd_ssthresh, tp->snd_cwnd-1); | ||
164 | } | ||
165 | |||
161 | static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) | 166 | static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) |
162 | { | 167 | { |
163 | struct tcp_sock *tp = tcp_sk(sk); | 168 | struct tcp_sock *tp = tcp_sk(sk); |
@@ -221,11 +226,10 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) | |||
221 | */ | 226 | */ |
222 | diff = tp->snd_cwnd * (rtt-vegas->baseRTT) / vegas->baseRTT; | 227 | diff = tp->snd_cwnd * (rtt-vegas->baseRTT) / vegas->baseRTT; |
223 | 228 | ||
224 | if (diff > gamma && tp->snd_ssthresh > 2 ) { | 229 | if (diff > gamma && tp->snd_cwnd <= tp->snd_ssthresh) { |
225 | /* Going too fast. Time to slow down | 230 | /* Going too fast. Time to slow down |
226 | * and switch to congestion avoidance. | 231 | * and switch to congestion avoidance. |
227 | */ | 232 | */ |
228 | tp->snd_ssthresh = 2; | ||
229 | 233 | ||
230 | /* Set cwnd to match the actual rate | 234 | /* Set cwnd to match the actual rate |
231 | * exactly: | 235 | * exactly: |
@@ -235,6 +239,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) | |||
235 | * utilization. | 239 | * utilization. |
236 | */ | 240 | */ |
237 | tp->snd_cwnd = min(tp->snd_cwnd, (u32)target_cwnd+1); | 241 | tp->snd_cwnd = min(tp->snd_cwnd, (u32)target_cwnd+1); |
242 | tp->snd_ssthresh = tcp_vegas_ssthresh(tp); | ||
238 | 243 | ||
239 | } else if (tp->snd_cwnd <= tp->snd_ssthresh) { | 244 | } else if (tp->snd_cwnd <= tp->snd_ssthresh) { |
240 | /* Slow start. */ | 245 | /* Slow start. */ |
@@ -250,6 +255,8 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) | |||
250 | * we slow down. | 255 | * we slow down. |
251 | */ | 256 | */ |
252 | tp->snd_cwnd--; | 257 | tp->snd_cwnd--; |
258 | tp->snd_ssthresh | ||
259 | = tcp_vegas_ssthresh(tp); | ||
253 | } else if (diff < alpha) { | 260 | } else if (diff < alpha) { |
254 | /* We don't have enough extra packets | 261 | /* We don't have enough extra packets |
255 | * in the network, so speed up. | 262 | * in the network, so speed up. |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 7a1d1ce22e66..8f4158d7c9a6 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -328,7 +328,7 @@ static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb, | |||
328 | if (unlikely(sk = skb_steal_sock(skb))) | 328 | if (unlikely(sk = skb_steal_sock(skb))) |
329 | return sk; | 329 | return sk; |
330 | else | 330 | else |
331 | return __udp4_lib_lookup(dev_net(skb->dst->dev), iph->saddr, sport, | 331 | return __udp4_lib_lookup(dev_net(skb_dst(skb)->dev), iph->saddr, sport, |
332 | iph->daddr, dport, inet_iif(skb), | 332 | iph->daddr, dport, inet_iif(skb), |
333 | udptable); | 333 | udptable); |
334 | } | 334 | } |
@@ -1237,7 +1237,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, | |||
1237 | struct sock *sk; | 1237 | struct sock *sk; |
1238 | struct udphdr *uh; | 1238 | struct udphdr *uh; |
1239 | unsigned short ulen; | 1239 | unsigned short ulen; |
1240 | struct rtable *rt = (struct rtable*)skb->dst; | 1240 | struct rtable *rt = skb_rtable(skb); |
1241 | __be32 saddr, daddr; | 1241 | __be32 saddr, daddr; |
1242 | struct net *net = dev_net(skb->dev); | 1242 | struct net *net = dev_net(skb->dev); |
1243 | 1243 | ||
diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c index 4ec2162a437e..f9f922a0ba88 100644 --- a/net/ipv4/xfrm4_input.c +++ b/net/ipv4/xfrm4_input.c | |||
@@ -23,7 +23,7 @@ int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb) | |||
23 | 23 | ||
24 | static inline int xfrm4_rcv_encap_finish(struct sk_buff *skb) | 24 | static inline int xfrm4_rcv_encap_finish(struct sk_buff *skb) |
25 | { | 25 | { |
26 | if (skb->dst == NULL) { | 26 | if (skb_dst(skb) == NULL) { |
27 | const struct iphdr *iph = ip_hdr(skb); | 27 | const struct iphdr *iph = ip_hdr(skb); |
28 | 28 | ||
29 | if (ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, | 29 | if (ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, |
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c index 7135279f3f84..3444f3b34eca 100644 --- a/net/ipv4/xfrm4_mode_tunnel.c +++ b/net/ipv4/xfrm4_mode_tunnel.c | |||
@@ -28,7 +28,7 @@ static inline void ipip_ecn_decapsulate(struct sk_buff *skb) | |||
28 | */ | 28 | */ |
29 | static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) | 29 | static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) |
30 | { | 30 | { |
31 | struct dst_entry *dst = skb->dst; | 31 | struct dst_entry *dst = skb_dst(skb); |
32 | struct iphdr *top_iph; | 32 | struct iphdr *top_iph; |
33 | int flags; | 33 | int flags; |
34 | 34 | ||
@@ -41,7 +41,7 @@ static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) | |||
41 | top_iph->ihl = 5; | 41 | top_iph->ihl = 5; |
42 | top_iph->version = 4; | 42 | top_iph->version = 4; |
43 | 43 | ||
44 | top_iph->protocol = xfrm_af2proto(skb->dst->ops->family); | 44 | top_iph->protocol = xfrm_af2proto(skb_dst(skb)->ops->family); |
45 | 45 | ||
46 | /* DS disclosed */ | 46 | /* DS disclosed */ |
47 | top_iph->tos = INET_ECN_encapsulate(XFRM_MODE_SKB_CB(skb)->tos, | 47 | top_iph->tos = INET_ECN_encapsulate(XFRM_MODE_SKB_CB(skb)->tos, |
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c index 8c3180adddbf..c908bd99bcba 100644 --- a/net/ipv4/xfrm4_output.c +++ b/net/ipv4/xfrm4_output.c | |||
@@ -29,7 +29,7 @@ static int xfrm4_tunnel_check_size(struct sk_buff *skb) | |||
29 | if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->local_df) | 29 | if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->local_df) |
30 | goto out; | 30 | goto out; |
31 | 31 | ||
32 | dst = skb->dst; | 32 | dst = skb_dst(skb); |
33 | mtu = dst_mtu(dst); | 33 | mtu = dst_mtu(dst); |
34 | if (skb->len > mtu) { | 34 | if (skb->len > mtu) { |
35 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); | 35 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); |
@@ -72,7 +72,7 @@ EXPORT_SYMBOL(xfrm4_prepare_output); | |||
72 | static int xfrm4_output_finish(struct sk_buff *skb) | 72 | static int xfrm4_output_finish(struct sk_buff *skb) |
73 | { | 73 | { |
74 | #ifdef CONFIG_NETFILTER | 74 | #ifdef CONFIG_NETFILTER |
75 | if (!skb->dst->xfrm) { | 75 | if (!skb_dst(skb)->xfrm) { |
76 | IPCB(skb)->flags |= IPSKB_REROUTED; | 76 | IPCB(skb)->flags |= IPSKB_REROUTED; |
77 | return dst_output(skb); | 77 | return dst_output(skb); |
78 | } | 78 | } |
@@ -87,6 +87,6 @@ static int xfrm4_output_finish(struct sk_buff *skb) | |||
87 | int xfrm4_output(struct sk_buff *skb) | 87 | int xfrm4_output(struct sk_buff *skb) |
88 | { | 88 | { |
89 | return NF_HOOK_COND(PF_INET, NF_INET_POST_ROUTING, skb, | 89 | return NF_HOOK_COND(PF_INET, NF_INET_POST_ROUTING, skb, |
90 | NULL, skb->dst->dev, xfrm4_output_finish, | 90 | NULL, skb_dst(skb)->dev, xfrm4_output_finish, |
91 | !(IPCB(skb)->flags & IPSKB_REROUTED)); | 91 | !(IPCB(skb)->flags & IPSKB_REROUTED)); |
92 | } | 92 | } |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 31938e5fb220..c3488372f12d 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -591,7 +591,6 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen, | |||
591 | { | 591 | { |
592 | struct inet6_ifaddr *ifa = NULL; | 592 | struct inet6_ifaddr *ifa = NULL; |
593 | struct rt6_info *rt; | 593 | struct rt6_info *rt; |
594 | struct net *net = dev_net(idev->dev); | ||
595 | int hash; | 594 | int hash; |
596 | int err = 0; | 595 | int err = 0; |
597 | int addr_type = ipv6_addr_type(addr); | 596 | int addr_type = ipv6_addr_type(addr); |
@@ -608,7 +607,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen, | |||
608 | goto out2; | 607 | goto out2; |
609 | } | 608 | } |
610 | 609 | ||
611 | if (idev->cnf.disable_ipv6 || net->ipv6.devconf_all->disable_ipv6) { | 610 | if (idev->cnf.disable_ipv6) { |
612 | err = -EACCES; | 611 | err = -EACCES; |
613 | goto out2; | 612 | goto out2; |
614 | } | 613 | } |
@@ -1752,6 +1751,7 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len) | |||
1752 | __u32 prefered_lft; | 1751 | __u32 prefered_lft; |
1753 | int addr_type; | 1752 | int addr_type; |
1754 | struct inet6_dev *in6_dev; | 1753 | struct inet6_dev *in6_dev; |
1754 | struct net *net = dev_net(dev); | ||
1755 | 1755 | ||
1756 | pinfo = (struct prefix_info *) opt; | 1756 | pinfo = (struct prefix_info *) opt; |
1757 | 1757 | ||
@@ -1809,7 +1809,7 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len) | |||
1809 | if (addrconf_finite_timeout(rt_expires)) | 1809 | if (addrconf_finite_timeout(rt_expires)) |
1810 | rt_expires *= HZ; | 1810 | rt_expires *= HZ; |
1811 | 1811 | ||
1812 | rt = rt6_lookup(dev_net(dev), &pinfo->prefix, NULL, | 1812 | rt = rt6_lookup(net, &pinfo->prefix, NULL, |
1813 | dev->ifindex, 1); | 1813 | dev->ifindex, 1); |
1814 | 1814 | ||
1815 | if (rt && addrconf_is_prefix_route(rt)) { | 1815 | if (rt && addrconf_is_prefix_route(rt)) { |
@@ -1846,7 +1846,6 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len) | |||
1846 | struct inet6_ifaddr * ifp; | 1846 | struct inet6_ifaddr * ifp; |
1847 | struct in6_addr addr; | 1847 | struct in6_addr addr; |
1848 | int create = 0, update_lft = 0; | 1848 | int create = 0, update_lft = 0; |
1849 | struct net *net = dev_net(dev); | ||
1850 | 1849 | ||
1851 | if (pinfo->prefix_len == 64) { | 1850 | if (pinfo->prefix_len == 64) { |
1852 | memcpy(&addr, &pinfo->prefix, 8); | 1851 | memcpy(&addr, &pinfo->prefix, 8); |
@@ -3988,6 +3987,75 @@ static int addrconf_sysctl_forward_strategy(ctl_table *table, | |||
3988 | return addrconf_fixup_forwarding(table, valp, val); | 3987 | return addrconf_fixup_forwarding(table, valp, val); |
3989 | } | 3988 | } |
3990 | 3989 | ||
3990 | static void dev_disable_change(struct inet6_dev *idev) | ||
3991 | { | ||
3992 | if (!idev || !idev->dev) | ||
3993 | return; | ||
3994 | |||
3995 | if (idev->cnf.disable_ipv6) | ||
3996 | addrconf_notify(NULL, NETDEV_DOWN, idev->dev); | ||
3997 | else | ||
3998 | addrconf_notify(NULL, NETDEV_UP, idev->dev); | ||
3999 | } | ||
4000 | |||
4001 | static void addrconf_disable_change(struct net *net, __s32 newf) | ||
4002 | { | ||
4003 | struct net_device *dev; | ||
4004 | struct inet6_dev *idev; | ||
4005 | |||
4006 | read_lock(&dev_base_lock); | ||
4007 | for_each_netdev(net, dev) { | ||
4008 | rcu_read_lock(); | ||
4009 | idev = __in6_dev_get(dev); | ||
4010 | if (idev) { | ||
4011 | int changed = (!idev->cnf.disable_ipv6) ^ (!newf); | ||
4012 | idev->cnf.disable_ipv6 = newf; | ||
4013 | if (changed) | ||
4014 | dev_disable_change(idev); | ||
4015 | } | ||
4016 | rcu_read_unlock(); | ||
4017 | } | ||
4018 | read_unlock(&dev_base_lock); | ||
4019 | } | ||
4020 | |||
4021 | static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int old) | ||
4022 | { | ||
4023 | struct net *net; | ||
4024 | |||
4025 | net = (struct net *)table->extra2; | ||
4026 | |||
4027 | if (p == &net->ipv6.devconf_dflt->disable_ipv6) | ||
4028 | return 0; | ||
4029 | |||
4030 | if (!rtnl_trylock()) | ||
4031 | return restart_syscall(); | ||
4032 | |||
4033 | if (p == &net->ipv6.devconf_all->disable_ipv6) { | ||
4034 | __s32 newf = net->ipv6.devconf_all->disable_ipv6; | ||
4035 | net->ipv6.devconf_dflt->disable_ipv6 = newf; | ||
4036 | addrconf_disable_change(net, newf); | ||
4037 | } else if ((!*p) ^ (!old)) | ||
4038 | dev_disable_change((struct inet6_dev *)table->extra1); | ||
4039 | |||
4040 | rtnl_unlock(); | ||
4041 | return 0; | ||
4042 | } | ||
4043 | |||
4044 | static | ||
4045 | int addrconf_sysctl_disable(ctl_table *ctl, int write, struct file * filp, | ||
4046 | void __user *buffer, size_t *lenp, loff_t *ppos) | ||
4047 | { | ||
4048 | int *valp = ctl->data; | ||
4049 | int val = *valp; | ||
4050 | int ret; | ||
4051 | |||
4052 | ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos); | ||
4053 | |||
4054 | if (write) | ||
4055 | ret = addrconf_disable_ipv6(ctl, valp, val); | ||
4056 | return ret; | ||
4057 | } | ||
4058 | |||
3991 | static struct addrconf_sysctl_table | 4059 | static struct addrconf_sysctl_table |
3992 | { | 4060 | { |
3993 | struct ctl_table_header *sysctl_header; | 4061 | struct ctl_table_header *sysctl_header; |
@@ -4225,7 +4293,8 @@ static struct addrconf_sysctl_table | |||
4225 | .data = &ipv6_devconf.disable_ipv6, | 4293 | .data = &ipv6_devconf.disable_ipv6, |
4226 | .maxlen = sizeof(int), | 4294 | .maxlen = sizeof(int), |
4227 | .mode = 0644, | 4295 | .mode = 0644, |
4228 | .proc_handler = proc_dointvec, | 4296 | .proc_handler = addrconf_sysctl_disable, |
4297 | .strategy = sysctl_intvec, | ||
4229 | }, | 4298 | }, |
4230 | { | 4299 | { |
4231 | .ctl_name = CTL_UNNUMBERED, | 4300 | .ctl_name = CTL_UNNUMBERED, |
@@ -4346,6 +4415,10 @@ static int addrconf_init_net(struct net *net) | |||
4346 | dflt = kmemdup(dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL); | 4415 | dflt = kmemdup(dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL); |
4347 | if (dflt == NULL) | 4416 | if (dflt == NULL) |
4348 | goto err_alloc_dflt; | 4417 | goto err_alloc_dflt; |
4418 | } else { | ||
4419 | /* these will be inherited by all namespaces */ | ||
4420 | dflt->autoconf = ipv6_defaults.autoconf; | ||
4421 | dflt->disable_ipv6 = ipv6_defaults.disable_ipv6; | ||
4349 | } | 4422 | } |
4350 | 4423 | ||
4351 | net->ipv6.devconf_all = all; | 4424 | net->ipv6.devconf_all = all; |
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index b6215be0963f..85b3d0036afd 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c | |||
@@ -72,9 +72,21 @@ MODULE_LICENSE("GPL"); | |||
72 | static struct list_head inetsw6[SOCK_MAX]; | 72 | static struct list_head inetsw6[SOCK_MAX]; |
73 | static DEFINE_SPINLOCK(inetsw6_lock); | 73 | static DEFINE_SPINLOCK(inetsw6_lock); |
74 | 74 | ||
75 | static int disable_ipv6 = 0; | 75 | struct ipv6_params ipv6_defaults = { |
76 | module_param_named(disable, disable_ipv6, int, 0); | 76 | .disable_ipv6 = 0, |
77 | MODULE_PARM_DESC(disable, "Disable IPv6 such that it is non-functional"); | 77 | .autoconf = 1, |
78 | }; | ||
79 | |||
80 | static int disable_ipv6_mod = 0; | ||
81 | |||
82 | module_param_named(disable, disable_ipv6_mod, int, 0444); | ||
83 | MODULE_PARM_DESC(disable, "Disable IPv6 module such that it is non-functional"); | ||
84 | |||
85 | module_param_named(disable_ipv6, ipv6_defaults.disable_ipv6, int, 0444); | ||
86 | MODULE_PARM_DESC(disable_ipv6, "Disable IPv6 on all interfaces"); | ||
87 | |||
88 | module_param_named(autoconf, ipv6_defaults.autoconf, int, 0444); | ||
89 | MODULE_PARM_DESC(autoconf, "Enable IPv6 address autoconfiguration on all interfaces"); | ||
78 | 90 | ||
79 | static __inline__ struct ipv6_pinfo *inet6_sk_generic(struct sock *sk) | 91 | static __inline__ struct ipv6_pinfo *inet6_sk_generic(struct sock *sk) |
80 | { | 92 | { |
@@ -1038,7 +1050,7 @@ static int __init inet6_init(void) | |||
1038 | for(r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r) | 1050 | for(r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r) |
1039 | INIT_LIST_HEAD(r); | 1051 | INIT_LIST_HEAD(r); |
1040 | 1052 | ||
1041 | if (disable_ipv6) { | 1053 | if (disable_ipv6_mod) { |
1042 | printk(KERN_INFO | 1054 | printk(KERN_INFO |
1043 | "IPv6: Loaded, but administratively disabled, " | 1055 | "IPv6: Loaded, but administratively disabled, " |
1044 | "reboot required to enable\n"); | 1056 | "reboot required to enable\n"); |
@@ -1227,7 +1239,7 @@ module_init(inet6_init); | |||
1227 | 1239 | ||
1228 | static void __exit inet6_exit(void) | 1240 | static void __exit inet6_exit(void) |
1229 | { | 1241 | { |
1230 | if (disable_ipv6) | 1242 | if (disable_ipv6_mod) |
1231 | return; | 1243 | return; |
1232 | 1244 | ||
1233 | /* First of all disallow new sockets creation. */ | 1245 | /* First of all disallow new sockets creation. */ |
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index 1c7f400a3cfe..4aae658e5501 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c | |||
@@ -277,7 +277,7 @@ static int ipv6_destopt_rcv(struct sk_buff *skb) | |||
277 | if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) || | 277 | if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) || |
278 | !pskb_may_pull(skb, (skb_transport_offset(skb) + | 278 | !pskb_may_pull(skb, (skb_transport_offset(skb) + |
279 | ((skb_transport_header(skb)[1] + 1) << 3)))) { | 279 | ((skb_transport_header(skb)[1] + 1) << 3)))) { |
280 | IP6_INC_STATS_BH(dev_net(skb->dst->dev), ip6_dst_idev(skb->dst), | 280 | IP6_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)), |
281 | IPSTATS_MIB_INHDRERRORS); | 281 | IPSTATS_MIB_INHDRERRORS); |
282 | kfree_skb(skb); | 282 | kfree_skb(skb); |
283 | return -1; | 283 | return -1; |
@@ -288,7 +288,7 @@ static int ipv6_destopt_rcv(struct sk_buff *skb) | |||
288 | dstbuf = opt->dst1; | 288 | dstbuf = opt->dst1; |
289 | #endif | 289 | #endif |
290 | 290 | ||
291 | dst = dst_clone(skb->dst); | 291 | dst = dst_clone(skb_dst(skb)); |
292 | if (ip6_parse_tlv(tlvprocdestopt_lst, skb)) { | 292 | if (ip6_parse_tlv(tlvprocdestopt_lst, skb)) { |
293 | dst_release(dst); | 293 | dst_release(dst); |
294 | skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3; | 294 | skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3; |
@@ -333,7 +333,7 @@ static int ipv6_rthdr_rcv(struct sk_buff *skb) | |||
333 | if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) || | 333 | if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) || |
334 | !pskb_may_pull(skb, (skb_transport_offset(skb) + | 334 | !pskb_may_pull(skb, (skb_transport_offset(skb) + |
335 | ((skb_transport_header(skb)[1] + 1) << 3)))) { | 335 | ((skb_transport_header(skb)[1] + 1) << 3)))) { |
336 | IP6_INC_STATS_BH(net, ip6_dst_idev(skb->dst), | 336 | IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), |
337 | IPSTATS_MIB_INHDRERRORS); | 337 | IPSTATS_MIB_INHDRERRORS); |
338 | kfree_skb(skb); | 338 | kfree_skb(skb); |
339 | return -1; | 339 | return -1; |
@@ -343,7 +343,7 @@ static int ipv6_rthdr_rcv(struct sk_buff *skb) | |||
343 | 343 | ||
344 | if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) || | 344 | if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) || |
345 | skb->pkt_type != PACKET_HOST) { | 345 | skb->pkt_type != PACKET_HOST) { |
346 | IP6_INC_STATS_BH(net, ip6_dst_idev(skb->dst), | 346 | IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), |
347 | IPSTATS_MIB_INADDRERRORS); | 347 | IPSTATS_MIB_INADDRERRORS); |
348 | kfree_skb(skb); | 348 | kfree_skb(skb); |
349 | return -1; | 349 | return -1; |
@@ -358,7 +358,7 @@ looped_back: | |||
358 | * processed by own | 358 | * processed by own |
359 | */ | 359 | */ |
360 | if (!addr) { | 360 | if (!addr) { |
361 | IP6_INC_STATS_BH(net, ip6_dst_idev(skb->dst), | 361 | IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), |
362 | IPSTATS_MIB_INADDRERRORS); | 362 | IPSTATS_MIB_INADDRERRORS); |
363 | kfree_skb(skb); | 363 | kfree_skb(skb); |
364 | return -1; | 364 | return -1; |
@@ -384,7 +384,7 @@ looped_back: | |||
384 | goto unknown_rh; | 384 | goto unknown_rh; |
385 | /* Silently discard invalid RTH type 2 */ | 385 | /* Silently discard invalid RTH type 2 */ |
386 | if (hdr->hdrlen != 2 || hdr->segments_left != 1) { | 386 | if (hdr->hdrlen != 2 || hdr->segments_left != 1) { |
387 | IP6_INC_STATS_BH(net, ip6_dst_idev(skb->dst), | 387 | IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), |
388 | IPSTATS_MIB_INHDRERRORS); | 388 | IPSTATS_MIB_INHDRERRORS); |
389 | kfree_skb(skb); | 389 | kfree_skb(skb); |
390 | return -1; | 390 | return -1; |
@@ -403,7 +403,7 @@ looped_back: | |||
403 | n = hdr->hdrlen >> 1; | 403 | n = hdr->hdrlen >> 1; |
404 | 404 | ||
405 | if (hdr->segments_left > n) { | 405 | if (hdr->segments_left > n) { |
406 | IP6_INC_STATS_BH(net, ip6_dst_idev(skb->dst), | 406 | IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), |
407 | IPSTATS_MIB_INHDRERRORS); | 407 | IPSTATS_MIB_INHDRERRORS); |
408 | icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, | 408 | icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, |
409 | ((&hdr->segments_left) - | 409 | ((&hdr->segments_left) - |
@@ -417,7 +417,7 @@ looped_back: | |||
417 | if (skb_cloned(skb)) { | 417 | if (skb_cloned(skb)) { |
418 | /* the copy is a forwarded packet */ | 418 | /* the copy is a forwarded packet */ |
419 | if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { | 419 | if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { |
420 | IP6_INC_STATS_BH(net, ip6_dst_idev(skb->dst), | 420 | IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), |
421 | IPSTATS_MIB_OUTDISCARDS); | 421 | IPSTATS_MIB_OUTDISCARDS); |
422 | kfree_skb(skb); | 422 | kfree_skb(skb); |
423 | return -1; | 423 | return -1; |
@@ -440,13 +440,13 @@ looped_back: | |||
440 | if (xfrm6_input_addr(skb, (xfrm_address_t *)addr, | 440 | if (xfrm6_input_addr(skb, (xfrm_address_t *)addr, |
441 | (xfrm_address_t *)&ipv6_hdr(skb)->saddr, | 441 | (xfrm_address_t *)&ipv6_hdr(skb)->saddr, |
442 | IPPROTO_ROUTING) < 0) { | 442 | IPPROTO_ROUTING) < 0) { |
443 | IP6_INC_STATS_BH(net, ip6_dst_idev(skb->dst), | 443 | IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), |
444 | IPSTATS_MIB_INADDRERRORS); | 444 | IPSTATS_MIB_INADDRERRORS); |
445 | kfree_skb(skb); | 445 | kfree_skb(skb); |
446 | return -1; | 446 | return -1; |
447 | } | 447 | } |
448 | if (!ipv6_chk_home_addr(dev_net(skb->dst->dev), addr)) { | 448 | if (!ipv6_chk_home_addr(dev_net(skb_dst(skb)->dev), addr)) { |
449 | IP6_INC_STATS_BH(net, ip6_dst_idev(skb->dst), | 449 | IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), |
450 | IPSTATS_MIB_INADDRERRORS); | 450 | IPSTATS_MIB_INADDRERRORS); |
451 | kfree_skb(skb); | 451 | kfree_skb(skb); |
452 | return -1; | 452 | return -1; |
@@ -458,7 +458,7 @@ looped_back: | |||
458 | } | 458 | } |
459 | 459 | ||
460 | if (ipv6_addr_is_multicast(addr)) { | 460 | if (ipv6_addr_is_multicast(addr)) { |
461 | IP6_INC_STATS_BH(net, ip6_dst_idev(skb->dst), | 461 | IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), |
462 | IPSTATS_MIB_INADDRERRORS); | 462 | IPSTATS_MIB_INADDRERRORS); |
463 | kfree_skb(skb); | 463 | kfree_skb(skb); |
464 | return -1; | 464 | return -1; |
@@ -468,17 +468,17 @@ looped_back: | |||
468 | ipv6_addr_copy(addr, &ipv6_hdr(skb)->daddr); | 468 | ipv6_addr_copy(addr, &ipv6_hdr(skb)->daddr); |
469 | ipv6_addr_copy(&ipv6_hdr(skb)->daddr, &daddr); | 469 | ipv6_addr_copy(&ipv6_hdr(skb)->daddr, &daddr); |
470 | 470 | ||
471 | dst_release(xchg(&skb->dst, NULL)); | 471 | skb_dst_drop(skb); |
472 | ip6_route_input(skb); | 472 | ip6_route_input(skb); |
473 | if (skb->dst->error) { | 473 | if (skb_dst(skb)->error) { |
474 | skb_push(skb, skb->data - skb_network_header(skb)); | 474 | skb_push(skb, skb->data - skb_network_header(skb)); |
475 | dst_input(skb); | 475 | dst_input(skb); |
476 | return -1; | 476 | return -1; |
477 | } | 477 | } |
478 | 478 | ||
479 | if (skb->dst->dev->flags&IFF_LOOPBACK) { | 479 | if (skb_dst(skb)->dev->flags&IFF_LOOPBACK) { |
480 | if (ipv6_hdr(skb)->hop_limit <= 1) { | 480 | if (ipv6_hdr(skb)->hop_limit <= 1) { |
481 | IP6_INC_STATS_BH(net, ip6_dst_idev(skb->dst), | 481 | IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), |
482 | IPSTATS_MIB_INHDRERRORS); | 482 | IPSTATS_MIB_INHDRERRORS); |
483 | icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, | 483 | icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, |
484 | 0, skb->dev); | 484 | 0, skb->dev); |
@@ -494,7 +494,7 @@ looped_back: | |||
494 | return -1; | 494 | return -1; |
495 | 495 | ||
496 | unknown_rh: | 496 | unknown_rh: |
497 | IP6_INC_STATS_BH(net, ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS); | 497 | IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS); |
498 | icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, | 498 | icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, |
499 | (&hdr->type) - skb_network_header(skb)); | 499 | (&hdr->type) - skb_network_header(skb)); |
500 | return -1; | 500 | return -1; |
@@ -552,11 +552,11 @@ void ipv6_exthdrs_exit(void) | |||
552 | **********************************/ | 552 | **********************************/ |
553 | 553 | ||
554 | /* | 554 | /* |
555 | * Note: we cannot rely on skb->dst before we assign it in ip6_route_input(). | 555 | * Note: we cannot rely on skb_dst(skb) before we assign it in ip6_route_input(). |
556 | */ | 556 | */ |
557 | static inline struct inet6_dev *ipv6_skb_idev(struct sk_buff *skb) | 557 | static inline struct inet6_dev *ipv6_skb_idev(struct sk_buff *skb) |
558 | { | 558 | { |
559 | return skb->dst ? ip6_dst_idev(skb->dst) : __in6_dev_get(skb->dev); | 559 | return skb_dst(skb) ? ip6_dst_idev(skb_dst(skb)) : __in6_dev_get(skb->dev); |
560 | } | 560 | } |
561 | 561 | ||
562 | /* Router Alert as of RFC 2711 */ | 562 | /* Router Alert as of RFC 2711 */ |
@@ -581,7 +581,7 @@ static int ipv6_hop_jumbo(struct sk_buff *skb, int optoff) | |||
581 | { | 581 | { |
582 | const unsigned char *nh = skb_network_header(skb); | 582 | const unsigned char *nh = skb_network_header(skb); |
583 | u32 pkt_len; | 583 | u32 pkt_len; |
584 | struct net *net = dev_net(skb->dst->dev); | 584 | struct net *net = dev_net(skb_dst(skb)->dev); |
585 | 585 | ||
586 | if (nh[optoff + 1] != 4 || (optoff & 3) != 2) { | 586 | if (nh[optoff + 1] != 4 || (optoff & 3) != 2) { |
587 | LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n", | 587 | LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n", |
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index 3c3732d50c1a..cc4797dd8325 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c | |||
@@ -228,7 +228,7 @@ int inet6_csk_xmit(struct sk_buff *skb, int ipfragok) | |||
228 | __inet6_csk_dst_store(sk, dst, NULL, NULL); | 228 | __inet6_csk_dst_store(sk, dst, NULL, NULL); |
229 | } | 229 | } |
230 | 230 | ||
231 | skb->dst = dst_clone(dst); | 231 | skb_dst_set(skb, dst_clone(dst)); |
232 | 232 | ||
233 | /* Restore final destination back after routing done */ | 233 | /* Restore final destination back after routing done */ |
234 | ipv6_addr_copy(&fl.fl6_dst, &np->daddr); | 234 | ipv6_addr_copy(&fl.fl6_dst, &np->daddr); |
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index bc1a920c34a1..c3a07d75b5f5 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c | |||
@@ -48,7 +48,7 @@ | |||
48 | 48 | ||
49 | inline int ip6_rcv_finish( struct sk_buff *skb) | 49 | inline int ip6_rcv_finish( struct sk_buff *skb) |
50 | { | 50 | { |
51 | if (skb->dst == NULL) | 51 | if (skb_dst(skb) == NULL) |
52 | ip6_route_input(skb); | 52 | ip6_route_input(skb); |
53 | 53 | ||
54 | return dst_input(skb); | 54 | return dst_input(skb); |
@@ -91,7 +91,7 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt | |||
91 | * arrived via the sending interface (ethX), because of the | 91 | * arrived via the sending interface (ethX), because of the |
92 | * nature of scoping architecture. --yoshfuji | 92 | * nature of scoping architecture. --yoshfuji |
93 | */ | 93 | */ |
94 | IP6CB(skb)->iif = skb->dst ? ip6_dst_idev(skb->dst)->dev->ifindex : dev->ifindex; | 94 | IP6CB(skb)->iif = skb_dst(skb) ? ip6_dst_idev(skb_dst(skb))->dev->ifindex : dev->ifindex; |
95 | 95 | ||
96 | if (unlikely(!pskb_may_pull(skb, sizeof(*hdr)))) | 96 | if (unlikely(!pskb_may_pull(skb, sizeof(*hdr)))) |
97 | goto err; | 97 | goto err; |
@@ -161,7 +161,7 @@ static int ip6_input_finish(struct sk_buff *skb) | |||
161 | int nexthdr, raw; | 161 | int nexthdr, raw; |
162 | u8 hash; | 162 | u8 hash; |
163 | struct inet6_dev *idev; | 163 | struct inet6_dev *idev; |
164 | struct net *net = dev_net(skb->dst->dev); | 164 | struct net *net = dev_net(skb_dst(skb)->dev); |
165 | 165 | ||
166 | /* | 166 | /* |
167 | * Parse extension headers | 167 | * Parse extension headers |
@@ -169,7 +169,7 @@ static int ip6_input_finish(struct sk_buff *skb) | |||
169 | 169 | ||
170 | rcu_read_lock(); | 170 | rcu_read_lock(); |
171 | resubmit: | 171 | resubmit: |
172 | idev = ip6_dst_idev(skb->dst); | 172 | idev = ip6_dst_idev(skb_dst(skb)); |
173 | if (!pskb_pull(skb, skb_transport_offset(skb))) | 173 | if (!pskb_pull(skb, skb_transport_offset(skb))) |
174 | goto discard; | 174 | goto discard; |
175 | nhoff = IP6CB(skb)->nhoff; | 175 | nhoff = IP6CB(skb)->nhoff; |
@@ -242,8 +242,8 @@ int ip6_mc_input(struct sk_buff *skb) | |||
242 | struct ipv6hdr *hdr; | 242 | struct ipv6hdr *hdr; |
243 | int deliver; | 243 | int deliver; |
244 | 244 | ||
245 | IP6_UPD_PO_STATS_BH(dev_net(skb->dst->dev), | 245 | IP6_UPD_PO_STATS_BH(dev_net(skb_dst(skb)->dev), |
246 | ip6_dst_idev(skb->dst), IPSTATS_MIB_INMCAST, | 246 | ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INMCAST, |
247 | skb->len); | 247 | skb->len); |
248 | 248 | ||
249 | hdr = ipv6_hdr(skb); | 249 | hdr = ipv6_hdr(skb); |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 735a2bf4b5f1..7c76e3d18215 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -78,7 +78,7 @@ int __ip6_local_out(struct sk_buff *skb) | |||
78 | len = 0; | 78 | len = 0; |
79 | ipv6_hdr(skb)->payload_len = htons(len); | 79 | ipv6_hdr(skb)->payload_len = htons(len); |
80 | 80 | ||
81 | return nf_hook(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, skb->dst->dev, | 81 | return nf_hook(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, skb_dst(skb)->dev, |
82 | dst_output); | 82 | dst_output); |
83 | } | 83 | } |
84 | 84 | ||
@@ -96,7 +96,7 @@ EXPORT_SYMBOL_GPL(ip6_local_out); | |||
96 | 96 | ||
97 | static int ip6_output_finish(struct sk_buff *skb) | 97 | static int ip6_output_finish(struct sk_buff *skb) |
98 | { | 98 | { |
99 | struct dst_entry *dst = skb->dst; | 99 | struct dst_entry *dst = skb_dst(skb); |
100 | 100 | ||
101 | if (dst->hh) | 101 | if (dst->hh) |
102 | return neigh_hh_output(dst->hh, skb); | 102 | return neigh_hh_output(dst->hh, skb); |
@@ -117,7 +117,7 @@ static int ip6_dev_loopback_xmit(struct sk_buff *newskb) | |||
117 | __skb_pull(newskb, skb_network_offset(newskb)); | 117 | __skb_pull(newskb, skb_network_offset(newskb)); |
118 | newskb->pkt_type = PACKET_LOOPBACK; | 118 | newskb->pkt_type = PACKET_LOOPBACK; |
119 | newskb->ip_summed = CHECKSUM_UNNECESSARY; | 119 | newskb->ip_summed = CHECKSUM_UNNECESSARY; |
120 | WARN_ON(!newskb->dst); | 120 | WARN_ON(!skb_dst(newskb)); |
121 | 121 | ||
122 | netif_rx(newskb); | 122 | netif_rx(newskb); |
123 | return 0; | 123 | return 0; |
@@ -126,7 +126,7 @@ static int ip6_dev_loopback_xmit(struct sk_buff *newskb) | |||
126 | 126 | ||
127 | static int ip6_output2(struct sk_buff *skb) | 127 | static int ip6_output2(struct sk_buff *skb) |
128 | { | 128 | { |
129 | struct dst_entry *dst = skb->dst; | 129 | struct dst_entry *dst = skb_dst(skb); |
130 | struct net_device *dev = dst->dev; | 130 | struct net_device *dev = dst->dev; |
131 | 131 | ||
132 | skb->protocol = htons(ETH_P_IPV6); | 132 | skb->protocol = htons(ETH_P_IPV6); |
@@ -134,7 +134,7 @@ static int ip6_output2(struct sk_buff *skb) | |||
134 | 134 | ||
135 | if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) { | 135 | if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) { |
136 | struct ipv6_pinfo* np = skb->sk ? inet6_sk(skb->sk) : NULL; | 136 | struct ipv6_pinfo* np = skb->sk ? inet6_sk(skb->sk) : NULL; |
137 | struct inet6_dev *idev = ip6_dst_idev(skb->dst); | 137 | struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); |
138 | 138 | ||
139 | if (!(dev->flags & IFF_LOOPBACK) && (!np || np->mc_loop) && | 139 | if (!(dev->flags & IFF_LOOPBACK) && (!np || np->mc_loop) && |
140 | ((mroute6_socket(dev_net(dev)) && | 140 | ((mroute6_socket(dev_net(dev)) && |
@@ -172,21 +172,21 @@ static inline int ip6_skb_dst_mtu(struct sk_buff *skb) | |||
172 | struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL; | 172 | struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL; |
173 | 173 | ||
174 | return (np && np->pmtudisc == IPV6_PMTUDISC_PROBE) ? | 174 | return (np && np->pmtudisc == IPV6_PMTUDISC_PROBE) ? |
175 | skb->dst->dev->mtu : dst_mtu(skb->dst); | 175 | skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb)); |
176 | } | 176 | } |
177 | 177 | ||
178 | int ip6_output(struct sk_buff *skb) | 178 | int ip6_output(struct sk_buff *skb) |
179 | { | 179 | { |
180 | struct inet6_dev *idev = ip6_dst_idev(skb->dst); | 180 | struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); |
181 | if (unlikely(idev->cnf.disable_ipv6)) { | 181 | if (unlikely(idev->cnf.disable_ipv6)) { |
182 | IP6_INC_STATS(dev_net(skb->dst->dev), idev, | 182 | IP6_INC_STATS(dev_net(skb_dst(skb)->dev), idev, |
183 | IPSTATS_MIB_OUTDISCARDS); | 183 | IPSTATS_MIB_OUTDISCARDS); |
184 | kfree_skb(skb); | 184 | kfree_skb(skb); |
185 | return 0; | 185 | return 0; |
186 | } | 186 | } |
187 | 187 | ||
188 | if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) || | 188 | if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) || |
189 | dst_allfrag(skb->dst)) | 189 | dst_allfrag(skb_dst(skb))) |
190 | return ip6_fragment(skb, ip6_output2); | 190 | return ip6_fragment(skb, ip6_output2); |
191 | else | 191 | else |
192 | return ip6_output2(skb); | 192 | return ip6_output2(skb); |
@@ -202,7 +202,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, | |||
202 | struct net *net = sock_net(sk); | 202 | struct net *net = sock_net(sk); |
203 | struct ipv6_pinfo *np = inet6_sk(sk); | 203 | struct ipv6_pinfo *np = inet6_sk(sk); |
204 | struct in6_addr *first_hop = &fl->fl6_dst; | 204 | struct in6_addr *first_hop = &fl->fl6_dst; |
205 | struct dst_entry *dst = skb->dst; | 205 | struct dst_entry *dst = skb_dst(skb); |
206 | struct ipv6hdr *hdr; | 206 | struct ipv6hdr *hdr; |
207 | u8 proto = fl->proto; | 207 | u8 proto = fl->proto; |
208 | int seg_len = skb->len; | 208 | int seg_len = skb->len; |
@@ -222,7 +222,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, | |||
222 | if (skb_headroom(skb) < head_room) { | 222 | if (skb_headroom(skb) < head_room) { |
223 | struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room); | 223 | struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room); |
224 | if (skb2 == NULL) { | 224 | if (skb2 == NULL) { |
225 | IP6_INC_STATS(net, ip6_dst_idev(skb->dst), | 225 | IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), |
226 | IPSTATS_MIB_OUTDISCARDS); | 226 | IPSTATS_MIB_OUTDISCARDS); |
227 | kfree_skb(skb); | 227 | kfree_skb(skb); |
228 | return -ENOBUFS; | 228 | return -ENOBUFS; |
@@ -276,7 +276,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, | |||
276 | 276 | ||
277 | mtu = dst_mtu(dst); | 277 | mtu = dst_mtu(dst); |
278 | if ((skb->len <= mtu) || skb->local_df || skb_is_gso(skb)) { | 278 | if ((skb->len <= mtu) || skb->local_df || skb_is_gso(skb)) { |
279 | IP6_UPD_PO_STATS(net, ip6_dst_idev(skb->dst), | 279 | IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)), |
280 | IPSTATS_MIB_OUT, skb->len); | 280 | IPSTATS_MIB_OUT, skb->len); |
281 | return NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev, | 281 | return NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev, |
282 | dst_output); | 282 | dst_output); |
@@ -286,7 +286,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, | |||
286 | printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n"); | 286 | printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n"); |
287 | skb->dev = dst->dev; | 287 | skb->dev = dst->dev; |
288 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); | 288 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); |
289 | IP6_INC_STATS(net, ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS); | 289 | IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); |
290 | kfree_skb(skb); | 290 | kfree_skb(skb); |
291 | return -EMSGSIZE; | 291 | return -EMSGSIZE; |
292 | } | 292 | } |
@@ -416,7 +416,7 @@ static inline int ip6_forward_finish(struct sk_buff *skb) | |||
416 | 416 | ||
417 | int ip6_forward(struct sk_buff *skb) | 417 | int ip6_forward(struct sk_buff *skb) |
418 | { | 418 | { |
419 | struct dst_entry *dst = skb->dst; | 419 | struct dst_entry *dst = skb_dst(skb); |
420 | struct ipv6hdr *hdr = ipv6_hdr(skb); | 420 | struct ipv6hdr *hdr = ipv6_hdr(skb); |
421 | struct inet6_skb_parm *opt = IP6CB(skb); | 421 | struct inet6_skb_parm *opt = IP6CB(skb); |
422 | struct net *net = dev_net(dst->dev); | 422 | struct net *net = dev_net(dst->dev); |
@@ -485,7 +485,7 @@ int ip6_forward(struct sk_buff *skb) | |||
485 | IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS); | 485 | IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS); |
486 | goto drop; | 486 | goto drop; |
487 | } | 487 | } |
488 | dst = skb->dst; | 488 | dst = skb_dst(skb); |
489 | 489 | ||
490 | /* IPv6 specs say nothing about it, but it is clear that we cannot | 490 | /* IPv6 specs say nothing about it, but it is clear that we cannot |
491 | send redirects to source routed frames. | 491 | send redirects to source routed frames. |
@@ -566,8 +566,8 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from) | |||
566 | to->pkt_type = from->pkt_type; | 566 | to->pkt_type = from->pkt_type; |
567 | to->priority = from->priority; | 567 | to->priority = from->priority; |
568 | to->protocol = from->protocol; | 568 | to->protocol = from->protocol; |
569 | dst_release(to->dst); | 569 | skb_dst_drop(to); |
570 | to->dst = dst_clone(from->dst); | 570 | skb_dst_set(to, dst_clone(skb_dst(from))); |
571 | to->dev = from->dev; | 571 | to->dev = from->dev; |
572 | to->mark = from->mark; | 572 | to->mark = from->mark; |
573 | 573 | ||
@@ -624,7 +624,7 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) | |||
624 | static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | 624 | static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) |
625 | { | 625 | { |
626 | struct sk_buff *frag; | 626 | struct sk_buff *frag; |
627 | struct rt6_info *rt = (struct rt6_info*)skb->dst; | 627 | struct rt6_info *rt = (struct rt6_info*)skb_dst(skb); |
628 | struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL; | 628 | struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL; |
629 | struct ipv6hdr *tmp_hdr; | 629 | struct ipv6hdr *tmp_hdr; |
630 | struct frag_hdr *fh; | 630 | struct frag_hdr *fh; |
@@ -632,7 +632,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
632 | __be32 frag_id = 0; | 632 | __be32 frag_id = 0; |
633 | int ptr, offset = 0, err=0; | 633 | int ptr, offset = 0, err=0; |
634 | u8 *prevhdr, nexthdr = 0; | 634 | u8 *prevhdr, nexthdr = 0; |
635 | struct net *net = dev_net(skb->dst->dev); | 635 | struct net *net = dev_net(skb_dst(skb)->dev); |
636 | 636 | ||
637 | hlen = ip6_find_1stfragopt(skb, &prevhdr); | 637 | hlen = ip6_find_1stfragopt(skb, &prevhdr); |
638 | nexthdr = *prevhdr; | 638 | nexthdr = *prevhdr; |
@@ -644,9 +644,9 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
644 | * check should be redundant, but it's free.) | 644 | * check should be redundant, but it's free.) |
645 | */ | 645 | */ |
646 | if (!skb->local_df) { | 646 | if (!skb->local_df) { |
647 | skb->dev = skb->dst->dev; | 647 | skb->dev = skb_dst(skb)->dev; |
648 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); | 648 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); |
649 | IP6_INC_STATS(net, ip6_dst_idev(skb->dst), | 649 | IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), |
650 | IPSTATS_MIB_FRAGFAILS); | 650 | IPSTATS_MIB_FRAGFAILS); |
651 | kfree_skb(skb); | 651 | kfree_skb(skb); |
652 | return -EMSGSIZE; | 652 | return -EMSGSIZE; |
@@ -658,7 +658,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
658 | } | 658 | } |
659 | mtu -= hlen + sizeof(struct frag_hdr); | 659 | mtu -= hlen + sizeof(struct frag_hdr); |
660 | 660 | ||
661 | if (skb_shinfo(skb)->frag_list) { | 661 | if (skb_has_frags(skb)) { |
662 | int first_len = skb_pagelen(skb); | 662 | int first_len = skb_pagelen(skb); |
663 | int truesizes = 0; | 663 | int truesizes = 0; |
664 | 664 | ||
@@ -667,7 +667,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
667 | skb_cloned(skb)) | 667 | skb_cloned(skb)) |
668 | goto slow_path; | 668 | goto slow_path; |
669 | 669 | ||
670 | for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) { | 670 | skb_walk_frags(skb, frag) { |
671 | /* Correct geometry. */ | 671 | /* Correct geometry. */ |
672 | if (frag->len > mtu || | 672 | if (frag->len > mtu || |
673 | ((frag->len & 7) && frag->next) || | 673 | ((frag->len & 7) && frag->next) || |
@@ -680,7 +680,6 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
680 | 680 | ||
681 | BUG_ON(frag->sk); | 681 | BUG_ON(frag->sk); |
682 | if (skb->sk) { | 682 | if (skb->sk) { |
683 | sock_hold(skb->sk); | ||
684 | frag->sk = skb->sk; | 683 | frag->sk = skb->sk; |
685 | frag->destructor = sock_wfree; | 684 | frag->destructor = sock_wfree; |
686 | truesizes += frag->truesize; | 685 | truesizes += frag->truesize; |
@@ -690,13 +689,13 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
690 | err = 0; | 689 | err = 0; |
691 | offset = 0; | 690 | offset = 0; |
692 | frag = skb_shinfo(skb)->frag_list; | 691 | frag = skb_shinfo(skb)->frag_list; |
693 | skb_shinfo(skb)->frag_list = NULL; | 692 | skb_frag_list_init(skb); |
694 | /* BUILD HEADER */ | 693 | /* BUILD HEADER */ |
695 | 694 | ||
696 | *prevhdr = NEXTHDR_FRAGMENT; | 695 | *prevhdr = NEXTHDR_FRAGMENT; |
697 | tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC); | 696 | tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC); |
698 | if (!tmp_hdr) { | 697 | if (!tmp_hdr) { |
699 | IP6_INC_STATS(net, ip6_dst_idev(skb->dst), | 698 | IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), |
700 | IPSTATS_MIB_FRAGFAILS); | 699 | IPSTATS_MIB_FRAGFAILS); |
701 | return -ENOMEM; | 700 | return -ENOMEM; |
702 | } | 701 | } |
@@ -809,7 +808,7 @@ slow_path: | |||
809 | 808 | ||
810 | if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_ALLOCATED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) { | 809 | if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_ALLOCATED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) { |
811 | NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n"); | 810 | NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n"); |
812 | IP6_INC_STATS(net, ip6_dst_idev(skb->dst), | 811 | IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), |
813 | IPSTATS_MIB_FRAGFAILS); | 812 | IPSTATS_MIB_FRAGFAILS); |
814 | err = -ENOMEM; | 813 | err = -ENOMEM; |
815 | goto fail; | 814 | goto fail; |
@@ -873,16 +872,16 @@ slow_path: | |||
873 | if (err) | 872 | if (err) |
874 | goto fail; | 873 | goto fail; |
875 | 874 | ||
876 | IP6_INC_STATS(net, ip6_dst_idev(skb->dst), | 875 | IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), |
877 | IPSTATS_MIB_FRAGCREATES); | 876 | IPSTATS_MIB_FRAGCREATES); |
878 | } | 877 | } |
879 | IP6_INC_STATS(net, ip6_dst_idev(skb->dst), | 878 | IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), |
880 | IPSTATS_MIB_FRAGOKS); | 879 | IPSTATS_MIB_FRAGOKS); |
881 | kfree_skb(skb); | 880 | kfree_skb(skb); |
882 | return err; | 881 | return err; |
883 | 882 | ||
884 | fail: | 883 | fail: |
885 | IP6_INC_STATS(net, ip6_dst_idev(skb->dst), | 884 | IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), |
886 | IPSTATS_MIB_FRAGFAILS); | 885 | IPSTATS_MIB_FRAGFAILS); |
887 | kfree_skb(skb); | 886 | kfree_skb(skb); |
888 | return err; | 887 | return err; |
@@ -1516,10 +1515,10 @@ int ip6_push_pending_frames(struct sock *sk) | |||
1516 | skb->priority = sk->sk_priority; | 1515 | skb->priority = sk->sk_priority; |
1517 | skb->mark = sk->sk_mark; | 1516 | skb->mark = sk->sk_mark; |
1518 | 1517 | ||
1519 | skb->dst = dst_clone(&rt->u.dst); | 1518 | skb_dst_set(skb, dst_clone(&rt->u.dst)); |
1520 | IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len); | 1519 | IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len); |
1521 | if (proto == IPPROTO_ICMPV6) { | 1520 | if (proto == IPPROTO_ICMPV6) { |
1522 | struct inet6_dev *idev = ip6_dst_idev(skb->dst); | 1521 | struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); |
1523 | 1522 | ||
1524 | ICMP6MSGOUT_INC_STATS_BH(net, idev, icmp6_hdr(skb)->icmp6_type); | 1523 | ICMP6MSGOUT_INC_STATS_BH(net, idev, icmp6_hdr(skb)->icmp6_type); |
1525 | ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS); | 1524 | ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS); |
@@ -1545,8 +1544,8 @@ void ip6_flush_pending_frames(struct sock *sk) | |||
1545 | struct sk_buff *skb; | 1544 | struct sk_buff *skb; |
1546 | 1545 | ||
1547 | while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) { | 1546 | while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) { |
1548 | if (skb->dst) | 1547 | if (skb_dst(skb)) |
1549 | IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb->dst), | 1548 | IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb_dst(skb)), |
1550 | IPSTATS_MIB_OUTDISCARDS); | 1549 | IPSTATS_MIB_OUTDISCARDS); |
1551 | kfree_skb(skb); | 1550 | kfree_skb(skb); |
1552 | } | 1551 | } |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index af256d47fd35..404d16a97d5c 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -532,8 +532,8 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
532 | if (!skb2) | 532 | if (!skb2) |
533 | return 0; | 533 | return 0; |
534 | 534 | ||
535 | dst_release(skb2->dst); | 535 | skb_dst_drop(skb2); |
536 | skb2->dst = NULL; | 536 | |
537 | skb_pull(skb2, offset); | 537 | skb_pull(skb2, offset); |
538 | skb_reset_network_header(skb2); | 538 | skb_reset_network_header(skb2); |
539 | eiph = ip_hdr(skb2); | 539 | eiph = ip_hdr(skb2); |
@@ -560,21 +560,21 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
560 | ip_rt_put(rt); | 560 | ip_rt_put(rt); |
561 | goto out; | 561 | goto out; |
562 | } | 562 | } |
563 | skb2->dst = (struct dst_entry *)rt; | 563 | skb_dst_set(skb2, (struct dst_entry *)rt); |
564 | } else { | 564 | } else { |
565 | ip_rt_put(rt); | 565 | ip_rt_put(rt); |
566 | if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos, | 566 | if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos, |
567 | skb2->dev) || | 567 | skb2->dev) || |
568 | skb2->dst->dev->type != ARPHRD_TUNNEL) | 568 | skb_dst(skb2)->dev->type != ARPHRD_TUNNEL) |
569 | goto out; | 569 | goto out; |
570 | } | 570 | } |
571 | 571 | ||
572 | /* change mtu on this route */ | 572 | /* change mtu on this route */ |
573 | if (rel_type == ICMP_DEST_UNREACH && rel_code == ICMP_FRAG_NEEDED) { | 573 | if (rel_type == ICMP_DEST_UNREACH && rel_code == ICMP_FRAG_NEEDED) { |
574 | if (rel_info > dst_mtu(skb2->dst)) | 574 | if (rel_info > dst_mtu(skb_dst(skb2))) |
575 | goto out; | 575 | goto out; |
576 | 576 | ||
577 | skb2->dst->ops->update_pmtu(skb2->dst, rel_info); | 577 | skb_dst(skb2)->ops->update_pmtu(skb_dst(skb2), rel_info); |
578 | } | 578 | } |
579 | 579 | ||
580 | icmp_send(skb2, rel_type, rel_code, htonl(rel_info)); | 580 | icmp_send(skb2, rel_type, rel_code, htonl(rel_info)); |
@@ -606,8 +606,7 @@ ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
606 | if (!skb2) | 606 | if (!skb2) |
607 | return 0; | 607 | return 0; |
608 | 608 | ||
609 | dst_release(skb2->dst); | 609 | skb_dst_drop(skb2); |
610 | skb2->dst = NULL; | ||
611 | skb_pull(skb2, offset); | 610 | skb_pull(skb2, offset); |
612 | skb_reset_network_header(skb2); | 611 | skb_reset_network_header(skb2); |
613 | 612 | ||
@@ -720,8 +719,7 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol, | |||
720 | skb->pkt_type = PACKET_HOST; | 719 | skb->pkt_type = PACKET_HOST; |
721 | memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); | 720 | memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); |
722 | skb->dev = t->dev; | 721 | skb->dev = t->dev; |
723 | dst_release(skb->dst); | 722 | skb_dst_drop(skb); |
724 | skb->dst = NULL; | ||
725 | nf_reset(skb); | 723 | nf_reset(skb); |
726 | 724 | ||
727 | dscp_ecn_decapsulate(t, ipv6h, skb); | 725 | dscp_ecn_decapsulate(t, ipv6h, skb); |
@@ -885,8 +883,8 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, | |||
885 | } | 883 | } |
886 | if (mtu < IPV6_MIN_MTU) | 884 | if (mtu < IPV6_MIN_MTU) |
887 | mtu = IPV6_MIN_MTU; | 885 | mtu = IPV6_MIN_MTU; |
888 | if (skb->dst) | 886 | if (skb_dst(skb)) |
889 | skb->dst->ops->update_pmtu(skb->dst, mtu); | 887 | skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu); |
890 | if (skb->len > mtu) { | 888 | if (skb->len > mtu) { |
891 | *pmtu = mtu; | 889 | *pmtu = mtu; |
892 | err = -EMSGSIZE; | 890 | err = -EMSGSIZE; |
@@ -910,8 +908,8 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, | |||
910 | kfree_skb(skb); | 908 | kfree_skb(skb); |
911 | skb = new_skb; | 909 | skb = new_skb; |
912 | } | 910 | } |
913 | dst_release(skb->dst); | 911 | skb_dst_drop(skb); |
914 | skb->dst = dst_clone(dst); | 912 | skb_dst_set(skb, dst_clone(dst)); |
915 | 913 | ||
916 | skb->transport_header = skb->network_header; | 914 | skb->transport_header = skb->network_header; |
917 | 915 | ||
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 228be551e9c1..a35d8fc55b04 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c | |||
@@ -398,10 +398,9 @@ static int pim6_rcv(struct sk_buff *skb) | |||
398 | skb->protocol = htons(ETH_P_IPV6); | 398 | skb->protocol = htons(ETH_P_IPV6); |
399 | skb->ip_summed = 0; | 399 | skb->ip_summed = 0; |
400 | skb->pkt_type = PACKET_HOST; | 400 | skb->pkt_type = PACKET_HOST; |
401 | dst_release(skb->dst); | 401 | skb_dst_drop(skb); |
402 | reg_dev->stats.rx_bytes += skb->len; | 402 | reg_dev->stats.rx_bytes += skb->len; |
403 | reg_dev->stats.rx_packets++; | 403 | reg_dev->stats.rx_packets++; |
404 | skb->dst = NULL; | ||
405 | nf_reset(skb); | 404 | nf_reset(skb); |
406 | netif_rx(skb); | 405 | netif_rx(skb); |
407 | dev_put(reg_dev); | 406 | dev_put(reg_dev); |
@@ -849,7 +848,7 @@ static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi, | |||
849 | ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr); | 848 | ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr); |
850 | ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr); | 849 | ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr); |
851 | 850 | ||
852 | skb->dst = dst_clone(pkt->dst); | 851 | skb_dst_set(skb, dst_clone(skb_dst(pkt))); |
853 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 852 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
854 | } | 853 | } |
855 | 854 | ||
@@ -1487,7 +1486,7 @@ int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg) | |||
1487 | 1486 | ||
1488 | static inline int ip6mr_forward2_finish(struct sk_buff *skb) | 1487 | static inline int ip6mr_forward2_finish(struct sk_buff *skb) |
1489 | { | 1488 | { |
1490 | IP6_INC_STATS_BH(dev_net(skb->dst->dev), ip6_dst_idev(skb->dst), | 1489 | IP6_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)), |
1491 | IPSTATS_MIB_OUTFORWDATAGRAMS); | 1490 | IPSTATS_MIB_OUTFORWDATAGRAMS); |
1492 | return dst_output(skb); | 1491 | return dst_output(skb); |
1493 | } | 1492 | } |
@@ -1532,8 +1531,8 @@ static int ip6mr_forward2(struct sk_buff *skb, struct mfc6_cache *c, int vifi) | |||
1532 | if (!dst) | 1531 | if (!dst) |
1533 | goto out_free; | 1532 | goto out_free; |
1534 | 1533 | ||
1535 | dst_release(skb->dst); | 1534 | skb_dst_drop(skb); |
1536 | skb->dst = dst; | 1535 | skb_dst_set(skb, dst); |
1537 | 1536 | ||
1538 | /* | 1537 | /* |
1539 | * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally | 1538 | * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally |
@@ -1722,7 +1721,7 @@ int ip6mr_get_route(struct net *net, | |||
1722 | { | 1721 | { |
1723 | int err; | 1722 | int err; |
1724 | struct mfc6_cache *cache; | 1723 | struct mfc6_cache *cache; |
1725 | struct rt6_info *rt = (struct rt6_info *)skb->dst; | 1724 | struct rt6_info *rt = (struct rt6_info *)skb_dst(skb); |
1726 | 1725 | ||
1727 | read_lock(&mrt_lock); | 1726 | read_lock(&mrt_lock); |
1728 | cache = ip6mr_cache_find(net, &rt->rt6i_src.addr, &rt->rt6i_dst.addr); | 1727 | cache = ip6mr_cache_find(net, &rt->rt6i_src.addr, &rt->rt6i_dst.addr); |
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 4b48819a5b8d..4b264ed40a8c 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c | |||
@@ -1448,6 +1448,7 @@ static void mld_sendpack(struct sk_buff *skb) | |||
1448 | struct net *net = dev_net(skb->dev); | 1448 | struct net *net = dev_net(skb->dev); |
1449 | int err; | 1449 | int err; |
1450 | struct flowi fl; | 1450 | struct flowi fl; |
1451 | struct dst_entry *dst; | ||
1451 | 1452 | ||
1452 | IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len); | 1453 | IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len); |
1453 | 1454 | ||
@@ -1459,9 +1460,9 @@ static void mld_sendpack(struct sk_buff *skb) | |||
1459 | IPPROTO_ICMPV6, csum_partial(skb_transport_header(skb), | 1460 | IPPROTO_ICMPV6, csum_partial(skb_transport_header(skb), |
1460 | mldlen, 0)); | 1461 | mldlen, 0)); |
1461 | 1462 | ||
1462 | skb->dst = icmp6_dst_alloc(skb->dev, NULL, &ipv6_hdr(skb)->daddr); | 1463 | dst = icmp6_dst_alloc(skb->dev, NULL, &ipv6_hdr(skb)->daddr); |
1463 | 1464 | ||
1464 | if (!skb->dst) { | 1465 | if (!dst) { |
1465 | err = -ENOMEM; | 1466 | err = -ENOMEM; |
1466 | goto err_out; | 1467 | goto err_out; |
1467 | } | 1468 | } |
@@ -1470,7 +1471,8 @@ static void mld_sendpack(struct sk_buff *skb) | |||
1470 | &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, | 1471 | &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, |
1471 | skb->dev->ifindex); | 1472 | skb->dev->ifindex); |
1472 | 1473 | ||
1473 | err = xfrm_lookup(net, &skb->dst, &fl, NULL, 0); | 1474 | err = xfrm_lookup(net, &dst, &fl, NULL, 0); |
1475 | skb_dst_set(skb, dst); | ||
1474 | if (err) | 1476 | if (err) |
1475 | goto err_out; | 1477 | goto err_out; |
1476 | 1478 | ||
@@ -1775,6 +1777,7 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type) | |||
1775 | IPV6_TLV_ROUTERALERT, 2, 0, 0, | 1777 | IPV6_TLV_ROUTERALERT, 2, 0, 0, |
1776 | IPV6_TLV_PADN, 0 }; | 1778 | IPV6_TLV_PADN, 0 }; |
1777 | struct flowi fl; | 1779 | struct flowi fl; |
1780 | struct dst_entry *dst; | ||
1778 | 1781 | ||
1779 | if (type == ICMPV6_MGM_REDUCTION) | 1782 | if (type == ICMPV6_MGM_REDUCTION) |
1780 | snd_addr = &in6addr_linklocal_allrouters; | 1783 | snd_addr = &in6addr_linklocal_allrouters; |
@@ -1828,8 +1831,8 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type) | |||
1828 | 1831 | ||
1829 | idev = in6_dev_get(skb->dev); | 1832 | idev = in6_dev_get(skb->dev); |
1830 | 1833 | ||
1831 | skb->dst = icmp6_dst_alloc(skb->dev, NULL, &ipv6_hdr(skb)->daddr); | 1834 | dst = icmp6_dst_alloc(skb->dev, NULL, &ipv6_hdr(skb)->daddr); |
1832 | if (!skb->dst) { | 1835 | if (!dst) { |
1833 | err = -ENOMEM; | 1836 | err = -ENOMEM; |
1834 | goto err_out; | 1837 | goto err_out; |
1835 | } | 1838 | } |
@@ -1838,11 +1841,11 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type) | |||
1838 | &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, | 1841 | &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, |
1839 | skb->dev->ifindex); | 1842 | skb->dev->ifindex); |
1840 | 1843 | ||
1841 | err = xfrm_lookup(net, &skb->dst, &fl, NULL, 0); | 1844 | err = xfrm_lookup(net, &dst, &fl, NULL, 0); |
1842 | if (err) | 1845 | if (err) |
1843 | goto err_out; | 1846 | goto err_out; |
1844 | 1847 | ||
1845 | 1848 | skb_dst_set(skb, dst); | |
1846 | err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev, | 1849 | err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev, |
1847 | dst_output); | 1850 | dst_output); |
1848 | out: | 1851 | out: |
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index e09f12ee57cf..9eb68e92cc18 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c | |||
@@ -465,8 +465,8 @@ struct sk_buff *ndisc_build_skb(struct net_device *dev, | |||
465 | 1, &err); | 465 | 1, &err); |
466 | if (!skb) { | 466 | if (!skb) { |
467 | ND_PRINTK0(KERN_ERR | 467 | ND_PRINTK0(KERN_ERR |
468 | "ICMPv6 ND: %s() failed to allocate an skb.\n", | 468 | "ICMPv6 ND: %s() failed to allocate an skb, err=%d.\n", |
469 | __func__); | 469 | __func__, err); |
470 | return NULL; | 470 | return NULL; |
471 | } | 471 | } |
472 | 472 | ||
@@ -530,7 +530,7 @@ void ndisc_send_skb(struct sk_buff *skb, | |||
530 | return; | 530 | return; |
531 | } | 531 | } |
532 | 532 | ||
533 | skb->dst = dst; | 533 | skb_dst_set(skb, dst); |
534 | 534 | ||
535 | idev = in6_dev_get(dst->dev); | 535 | idev = in6_dev_get(dst->dev); |
536 | IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len); | 536 | IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len); |
@@ -1562,8 +1562,8 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh, | |||
1562 | 1, &err); | 1562 | 1, &err); |
1563 | if (buff == NULL) { | 1563 | if (buff == NULL) { |
1564 | ND_PRINTK0(KERN_ERR | 1564 | ND_PRINTK0(KERN_ERR |
1565 | "ICMPv6 Redirect: %s() failed to allocate an skb.\n", | 1565 | "ICMPv6 Redirect: %s() failed to allocate an skb, err=%d.\n", |
1566 | __func__); | 1566 | __func__, err); |
1567 | goto release; | 1567 | goto release; |
1568 | } | 1568 | } |
1569 | 1569 | ||
@@ -1612,7 +1612,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh, | |||
1612 | len, IPPROTO_ICMPV6, | 1612 | len, IPPROTO_ICMPV6, |
1613 | csum_partial(icmph, len, 0)); | 1613 | csum_partial(icmph, len, 0)); |
1614 | 1614 | ||
1615 | buff->dst = dst; | 1615 | skb_dst_set(buff, dst); |
1616 | idev = in6_dev_get(dst->dev); | 1616 | idev = in6_dev_get(dst->dev); |
1617 | IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len); | 1617 | IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len); |
1618 | err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, buff, NULL, dst->dev, | 1618 | err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, buff, NULL, dst->dev, |
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c index 834cea69fb53..d5ed92b14346 100644 --- a/net/ipv6/netfilter.c +++ b/net/ipv6/netfilter.c | |||
@@ -12,7 +12,7 @@ | |||
12 | 12 | ||
13 | int ip6_route_me_harder(struct sk_buff *skb) | 13 | int ip6_route_me_harder(struct sk_buff *skb) |
14 | { | 14 | { |
15 | struct net *net = dev_net(skb->dst->dev); | 15 | struct net *net = dev_net(skb_dst(skb)->dev); |
16 | struct ipv6hdr *iph = ipv6_hdr(skb); | 16 | struct ipv6hdr *iph = ipv6_hdr(skb); |
17 | struct dst_entry *dst; | 17 | struct dst_entry *dst; |
18 | struct flowi fl = { | 18 | struct flowi fl = { |
@@ -28,9 +28,15 @@ int ip6_route_me_harder(struct sk_buff *skb) | |||
28 | 28 | ||
29 | #ifdef CONFIG_XFRM | 29 | #ifdef CONFIG_XFRM |
30 | if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) && | 30 | if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) && |
31 | xfrm_decode_session(skb, &fl, AF_INET6) == 0) | 31 | xfrm_decode_session(skb, &fl, AF_INET6) == 0) { |
32 | if (xfrm_lookup(net, &skb->dst, &fl, skb->sk, 0)) | 32 | struct dst_entry *dst2 = skb_dst(skb); |
33 | |||
34 | if (xfrm_lookup(net, &dst2, &fl, skb->sk, 0)) { | ||
35 | skb_dst_set(skb, NULL); | ||
33 | return -1; | 36 | return -1; |
37 | } | ||
38 | skb_dst_set(skb, dst2); | ||
39 | } | ||
34 | #endif | 40 | #endif |
35 | 41 | ||
36 | if (dst->error) { | 42 | if (dst->error) { |
@@ -41,9 +47,9 @@ int ip6_route_me_harder(struct sk_buff *skb) | |||
41 | } | 47 | } |
42 | 48 | ||
43 | /* Drop old route. */ | 49 | /* Drop old route. */ |
44 | dst_release(skb->dst); | 50 | skb_dst_drop(skb); |
45 | 51 | ||
46 | skb->dst = dst; | 52 | skb_dst_set(skb, dst); |
47 | return 0; | 53 | return 0; |
48 | } | 54 | } |
49 | EXPORT_SYMBOL(ip6_route_me_harder); | 55 | EXPORT_SYMBOL(ip6_route_me_harder); |
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c index 5a2d0a41694a..5a7f00cd15ce 100644 --- a/net/ipv6/netfilter/ip6t_REJECT.c +++ b/net/ipv6/netfilter/ip6t_REJECT.c | |||
@@ -112,7 +112,7 @@ static void send_reset(struct net *net, struct sk_buff *oldskb) | |||
112 | return; | 112 | return; |
113 | } | 113 | } |
114 | 114 | ||
115 | nskb->dst = dst; | 115 | skb_dst_set(nskb, dst); |
116 | 116 | ||
117 | skb_reserve(nskb, hh_len + dst->header_len); | 117 | skb_reserve(nskb, hh_len + dst->header_len); |
118 | 118 | ||
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 058a5e4a60c3..f3aba255ad9f 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c | |||
@@ -409,7 +409,7 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev) | |||
409 | /* If the first fragment is fragmented itself, we split | 409 | /* If the first fragment is fragmented itself, we split |
410 | * it to two chunks: the first with data and paged part | 410 | * it to two chunks: the first with data and paged part |
411 | * and the second, holding only fragments. */ | 411 | * and the second, holding only fragments. */ |
412 | if (skb_shinfo(head)->frag_list) { | 412 | if (skb_has_frags(head)) { |
413 | struct sk_buff *clone; | 413 | struct sk_buff *clone; |
414 | int i, plen = 0; | 414 | int i, plen = 0; |
415 | 415 | ||
@@ -420,7 +420,7 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev) | |||
420 | clone->next = head->next; | 420 | clone->next = head->next; |
421 | head->next = clone; | 421 | head->next = clone; |
422 | skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; | 422 | skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; |
423 | skb_shinfo(head)->frag_list = NULL; | 423 | skb_frag_list_init(head); |
424 | for (i=0; i<skb_shinfo(head)->nr_frags; i++) | 424 | for (i=0; i<skb_shinfo(head)->nr_frags; i++) |
425 | plen += skb_shinfo(head)->frags[i].size; | 425 | plen += skb_shinfo(head)->frags[i].size; |
426 | clone->len = clone->data_len = head->data_len - plen; | 426 | clone->len = clone->data_len = head->data_len - plen; |
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index e99307fba0b1..36a090d87a3d 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c | |||
@@ -625,7 +625,7 @@ static int rawv6_send_hdrinc(struct sock *sk, void *from, int length, | |||
625 | 625 | ||
626 | skb->priority = sk->sk_priority; | 626 | skb->priority = sk->sk_priority; |
627 | skb->mark = sk->sk_mark; | 627 | skb->mark = sk->sk_mark; |
628 | skb->dst = dst_clone(&rt->u.dst); | 628 | skb_dst_set(skb, dst_clone(&rt->u.dst)); |
629 | 629 | ||
630 | skb_put(skb, length); | 630 | skb_put(skb, length); |
631 | skb_reset_network_header(skb); | 631 | skb_reset_network_header(skb); |
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index e9ac7a12f595..2642a41a8535 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c | |||
@@ -267,7 +267,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, | |||
267 | struct sk_buff *prev, *next; | 267 | struct sk_buff *prev, *next; |
268 | struct net_device *dev; | 268 | struct net_device *dev; |
269 | int offset, end; | 269 | int offset, end; |
270 | struct net *net = dev_net(skb->dst->dev); | 270 | struct net *net = dev_net(skb_dst(skb)->dev); |
271 | 271 | ||
272 | if (fq->q.last_in & INET_FRAG_COMPLETE) | 272 | if (fq->q.last_in & INET_FRAG_COMPLETE) |
273 | goto err; | 273 | goto err; |
@@ -277,7 +277,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, | |||
277 | ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1))); | 277 | ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1))); |
278 | 278 | ||
279 | if ((unsigned int)end > IPV6_MAXPLEN) { | 279 | if ((unsigned int)end > IPV6_MAXPLEN) { |
280 | IP6_INC_STATS_BH(net, ip6_dst_idev(skb->dst), | 280 | IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), |
281 | IPSTATS_MIB_INHDRERRORS); | 281 | IPSTATS_MIB_INHDRERRORS); |
282 | icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, | 282 | icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, |
283 | ((u8 *)&fhdr->frag_off - | 283 | ((u8 *)&fhdr->frag_off - |
@@ -310,7 +310,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, | |||
310 | /* RFC2460 says always send parameter problem in | 310 | /* RFC2460 says always send parameter problem in |
311 | * this case. -DaveM | 311 | * this case. -DaveM |
312 | */ | 312 | */ |
313 | IP6_INC_STATS_BH(net, ip6_dst_idev(skb->dst), | 313 | IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), |
314 | IPSTATS_MIB_INHDRERRORS); | 314 | IPSTATS_MIB_INHDRERRORS); |
315 | icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, | 315 | icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, |
316 | offsetof(struct ipv6hdr, payload_len)); | 316 | offsetof(struct ipv6hdr, payload_len)); |
@@ -434,7 +434,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, | |||
434 | return -1; | 434 | return -1; |
435 | 435 | ||
436 | err: | 436 | err: |
437 | IP6_INC_STATS(net, ip6_dst_idev(skb->dst), | 437 | IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), |
438 | IPSTATS_MIB_REASMFAILS); | 438 | IPSTATS_MIB_REASMFAILS); |
439 | kfree_skb(skb); | 439 | kfree_skb(skb); |
440 | return -1; | 440 | return -1; |
@@ -494,7 +494,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, | |||
494 | /* If the first fragment is fragmented itself, we split | 494 | /* If the first fragment is fragmented itself, we split |
495 | * it to two chunks: the first with data and paged part | 495 | * it to two chunks: the first with data and paged part |
496 | * and the second, holding only fragments. */ | 496 | * and the second, holding only fragments. */ |
497 | if (skb_shinfo(head)->frag_list) { | 497 | if (skb_has_frags(head)) { |
498 | struct sk_buff *clone; | 498 | struct sk_buff *clone; |
499 | int i, plen = 0; | 499 | int i, plen = 0; |
500 | 500 | ||
@@ -503,7 +503,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, | |||
503 | clone->next = head->next; | 503 | clone->next = head->next; |
504 | head->next = clone; | 504 | head->next = clone; |
505 | skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; | 505 | skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; |
506 | skb_shinfo(head)->frag_list = NULL; | 506 | skb_frag_list_init(head); |
507 | for (i=0; i<skb_shinfo(head)->nr_frags; i++) | 507 | for (i=0; i<skb_shinfo(head)->nr_frags; i++) |
508 | plen += skb_shinfo(head)->frags[i].size; | 508 | plen += skb_shinfo(head)->frags[i].size; |
509 | clone->len = clone->data_len = head->data_len - plen; | 509 | clone->len = clone->data_len = head->data_len - plen; |
@@ -576,9 +576,9 @@ static int ipv6_frag_rcv(struct sk_buff *skb) | |||
576 | struct frag_hdr *fhdr; | 576 | struct frag_hdr *fhdr; |
577 | struct frag_queue *fq; | 577 | struct frag_queue *fq; |
578 | struct ipv6hdr *hdr = ipv6_hdr(skb); | 578 | struct ipv6hdr *hdr = ipv6_hdr(skb); |
579 | struct net *net = dev_net(skb->dst->dev); | 579 | struct net *net = dev_net(skb_dst(skb)->dev); |
580 | 580 | ||
581 | IP6_INC_STATS_BH(net, ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMREQDS); | 581 | IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS); |
582 | 582 | ||
583 | /* Jumbo payload inhibits frag. header */ | 583 | /* Jumbo payload inhibits frag. header */ |
584 | if (hdr->payload_len==0) | 584 | if (hdr->payload_len==0) |
@@ -595,17 +595,17 @@ static int ipv6_frag_rcv(struct sk_buff *skb) | |||
595 | /* It is not a fragmented frame */ | 595 | /* It is not a fragmented frame */ |
596 | skb->transport_header += sizeof(struct frag_hdr); | 596 | skb->transport_header += sizeof(struct frag_hdr); |
597 | IP6_INC_STATS_BH(net, | 597 | IP6_INC_STATS_BH(net, |
598 | ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMOKS); | 598 | ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS); |
599 | 599 | ||
600 | IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb); | 600 | IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb); |
601 | return 1; | 601 | return 1; |
602 | } | 602 | } |
603 | 603 | ||
604 | if (atomic_read(&net->ipv6.frags.mem) > net->ipv6.frags.high_thresh) | 604 | if (atomic_read(&net->ipv6.frags.mem) > net->ipv6.frags.high_thresh) |
605 | ip6_evictor(net, ip6_dst_idev(skb->dst)); | 605 | ip6_evictor(net, ip6_dst_idev(skb_dst(skb))); |
606 | 606 | ||
607 | if ((fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr, | 607 | if ((fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr, |
608 | ip6_dst_idev(skb->dst))) != NULL) { | 608 | ip6_dst_idev(skb_dst(skb)))) != NULL) { |
609 | int ret; | 609 | int ret; |
610 | 610 | ||
611 | spin_lock(&fq->q.lock); | 611 | spin_lock(&fq->q.lock); |
@@ -617,12 +617,12 @@ static int ipv6_frag_rcv(struct sk_buff *skb) | |||
617 | return ret; | 617 | return ret; |
618 | } | 618 | } |
619 | 619 | ||
620 | IP6_INC_STATS_BH(net, ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMFAILS); | 620 | IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS); |
621 | kfree_skb(skb); | 621 | kfree_skb(skb); |
622 | return -1; | 622 | return -1; |
623 | 623 | ||
624 | fail_hdr: | 624 | fail_hdr: |
625 | IP6_INC_STATS(net, ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS); | 625 | IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS); |
626 | icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb_network_header_len(skb)); | 626 | icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb_network_header_len(skb)); |
627 | return -1; | 627 | return -1; |
628 | } | 628 | } |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 032a5ec391c5..658293ea05ba 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -800,7 +800,7 @@ void ip6_route_input(struct sk_buff *skb) | |||
800 | if (rt6_need_strict(&iph->daddr) && skb->dev->type != ARPHRD_PIMREG) | 800 | if (rt6_need_strict(&iph->daddr) && skb->dev->type != ARPHRD_PIMREG) |
801 | flags |= RT6_LOOKUP_F_IFACE; | 801 | flags |= RT6_LOOKUP_F_IFACE; |
802 | 802 | ||
803 | skb->dst = fib6_rule_lookup(net, &fl, flags, ip6_pol_route_input); | 803 | skb_dst_set(skb, fib6_rule_lookup(net, &fl, flags, ip6_pol_route_input)); |
804 | } | 804 | } |
805 | 805 | ||
806 | static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table, | 806 | static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table, |
@@ -911,7 +911,7 @@ static void ip6_link_failure(struct sk_buff *skb) | |||
911 | 911 | ||
912 | icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0, skb->dev); | 912 | icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0, skb->dev); |
913 | 913 | ||
914 | rt = (struct rt6_info *) skb->dst; | 914 | rt = (struct rt6_info *) skb_dst(skb); |
915 | if (rt) { | 915 | if (rt) { |
916 | if (rt->rt6i_flags&RTF_CACHE) { | 916 | if (rt->rt6i_flags&RTF_CACHE) { |
917 | dst_set_expires(&rt->u.dst, 0); | 917 | dst_set_expires(&rt->u.dst, 0); |
@@ -1868,7 +1868,7 @@ int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg) | |||
1868 | static int ip6_pkt_drop(struct sk_buff *skb, int code, int ipstats_mib_noroutes) | 1868 | static int ip6_pkt_drop(struct sk_buff *skb, int code, int ipstats_mib_noroutes) |
1869 | { | 1869 | { |
1870 | int type; | 1870 | int type; |
1871 | struct dst_entry *dst = skb->dst; | 1871 | struct dst_entry *dst = skb_dst(skb); |
1872 | switch (ipstats_mib_noroutes) { | 1872 | switch (ipstats_mib_noroutes) { |
1873 | case IPSTATS_MIB_INNOROUTES: | 1873 | case IPSTATS_MIB_INNOROUTES: |
1874 | type = ipv6_addr_type(&ipv6_hdr(skb)->daddr); | 1874 | type = ipv6_addr_type(&ipv6_hdr(skb)->daddr); |
@@ -1895,7 +1895,7 @@ static int ip6_pkt_discard(struct sk_buff *skb) | |||
1895 | 1895 | ||
1896 | static int ip6_pkt_discard_out(struct sk_buff *skb) | 1896 | static int ip6_pkt_discard_out(struct sk_buff *skb) |
1897 | { | 1897 | { |
1898 | skb->dev = skb->dst->dev; | 1898 | skb->dev = skb_dst(skb)->dev; |
1899 | return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES); | 1899 | return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES); |
1900 | } | 1900 | } |
1901 | 1901 | ||
@@ -1908,7 +1908,7 @@ static int ip6_pkt_prohibit(struct sk_buff *skb) | |||
1908 | 1908 | ||
1909 | static int ip6_pkt_prohibit_out(struct sk_buff *skb) | 1909 | static int ip6_pkt_prohibit_out(struct sk_buff *skb) |
1910 | { | 1910 | { |
1911 | skb->dev = skb->dst->dev; | 1911 | skb->dev = skb_dst(skb)->dev; |
1912 | return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES); | 1912 | return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES); |
1913 | } | 1913 | } |
1914 | 1914 | ||
@@ -2366,7 +2366,7 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void | |||
2366 | skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr)); | 2366 | skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr)); |
2367 | 2367 | ||
2368 | rt = (struct rt6_info*) ip6_route_output(net, NULL, &fl); | 2368 | rt = (struct rt6_info*) ip6_route_output(net, NULL, &fl); |
2369 | skb->dst = &rt->u.dst; | 2369 | skb_dst_set(skb, &rt->u.dst); |
2370 | 2370 | ||
2371 | err = rt6_fill_node(net, skb, rt, &fl.fl6_dst, &fl.fl6_src, iif, | 2371 | err = rt6_fill_node(net, skb, rt, &fl.fl6_dst, &fl.fl6_src, iif, |
2372 | RTM_NEWROUTE, NETLINK_CB(in_skb).pid, | 2372 | RTM_NEWROUTE, NETLINK_CB(in_skb).pid, |
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index b3a59bd40f01..68e52308e552 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
@@ -575,8 +575,7 @@ static int ipip6_rcv(struct sk_buff *skb) | |||
575 | tunnel->dev->stats.rx_packets++; | 575 | tunnel->dev->stats.rx_packets++; |
576 | tunnel->dev->stats.rx_bytes += skb->len; | 576 | tunnel->dev->stats.rx_bytes += skb->len; |
577 | skb->dev = tunnel->dev; | 577 | skb->dev = tunnel->dev; |
578 | dst_release(skb->dst); | 578 | skb_dst_drop(skb); |
579 | skb->dst = NULL; | ||
580 | nf_reset(skb); | 579 | nf_reset(skb); |
581 | ipip6_ecn_decapsulate(iph, skb); | 580 | ipip6_ecn_decapsulate(iph, skb); |
582 | netif_rx(skb); | 581 | netif_rx(skb); |
@@ -638,8 +637,8 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
638 | if (dev->priv_flags & IFF_ISATAP) { | 637 | if (dev->priv_flags & IFF_ISATAP) { |
639 | struct neighbour *neigh = NULL; | 638 | struct neighbour *neigh = NULL; |
640 | 639 | ||
641 | if (skb->dst) | 640 | if (skb_dst(skb)) |
642 | neigh = skb->dst->neighbour; | 641 | neigh = skb_dst(skb)->neighbour; |
643 | 642 | ||
644 | if (neigh == NULL) { | 643 | if (neigh == NULL) { |
645 | if (net_ratelimit()) | 644 | if (net_ratelimit()) |
@@ -663,8 +662,8 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
663 | if (!dst) { | 662 | if (!dst) { |
664 | struct neighbour *neigh = NULL; | 663 | struct neighbour *neigh = NULL; |
665 | 664 | ||
666 | if (skb->dst) | 665 | if (skb_dst(skb)) |
667 | neigh = skb->dst->neighbour; | 666 | neigh = skb_dst(skb)->neighbour; |
668 | 667 | ||
669 | if (neigh == NULL) { | 668 | if (neigh == NULL) { |
670 | if (net_ratelimit()) | 669 | if (net_ratelimit()) |
@@ -714,7 +713,7 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
714 | if (tiph->frag_off) | 713 | if (tiph->frag_off) |
715 | mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr); | 714 | mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr); |
716 | else | 715 | else |
717 | mtu = skb->dst ? dst_mtu(skb->dst) : dev->mtu; | 716 | mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu; |
718 | 717 | ||
719 | if (mtu < 68) { | 718 | if (mtu < 68) { |
720 | stats->collisions++; | 719 | stats->collisions++; |
@@ -723,8 +722,8 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
723 | } | 722 | } |
724 | if (mtu < IPV6_MIN_MTU) | 723 | if (mtu < IPV6_MIN_MTU) |
725 | mtu = IPV6_MIN_MTU; | 724 | mtu = IPV6_MIN_MTU; |
726 | if (tunnel->parms.iph.daddr && skb->dst) | 725 | if (tunnel->parms.iph.daddr && skb_dst(skb)) |
727 | skb->dst->ops->update_pmtu(skb->dst, mtu); | 726 | skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu); |
728 | 727 | ||
729 | if (skb->len > mtu) { | 728 | if (skb->len > mtu) { |
730 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev); | 729 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev); |
@@ -768,8 +767,8 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
768 | skb_reset_network_header(skb); | 767 | skb_reset_network_header(skb); |
769 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); | 768 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); |
770 | IPCB(skb)->flags = 0; | 769 | IPCB(skb)->flags = 0; |
771 | dst_release(skb->dst); | 770 | skb_dst_drop(skb); |
772 | skb->dst = &rt->u.dst; | 771 | skb_dst_set(skb, &rt->u.dst); |
773 | 772 | ||
774 | /* | 773 | /* |
775 | * Push down and install the IPIP header. | 774 | * Push down and install the IPIP header. |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index ea37741062a9..53b6a4192b16 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -981,9 +981,10 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, | |||
981 | struct tcphdr *th = tcp_hdr(skb), *t1; | 981 | struct tcphdr *th = tcp_hdr(skb), *t1; |
982 | struct sk_buff *buff; | 982 | struct sk_buff *buff; |
983 | struct flowi fl; | 983 | struct flowi fl; |
984 | struct net *net = dev_net(skb->dst->dev); | 984 | struct net *net = dev_net(skb_dst(skb)->dev); |
985 | struct sock *ctl_sk = net->ipv6.tcp_sk; | 985 | struct sock *ctl_sk = net->ipv6.tcp_sk; |
986 | unsigned int tot_len = sizeof(struct tcphdr); | 986 | unsigned int tot_len = sizeof(struct tcphdr); |
987 | struct dst_entry *dst; | ||
987 | __be32 *topt; | 988 | __be32 *topt; |
988 | 989 | ||
989 | if (ts) | 990 | if (ts) |
@@ -1052,8 +1053,9 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, | |||
1052 | * Underlying function will use this to retrieve the network | 1053 | * Underlying function will use this to retrieve the network |
1053 | * namespace | 1054 | * namespace |
1054 | */ | 1055 | */ |
1055 | if (!ip6_dst_lookup(ctl_sk, &buff->dst, &fl)) { | 1056 | if (!ip6_dst_lookup(ctl_sk, &dst, &fl)) { |
1056 | if (xfrm_lookup(net, &buff->dst, &fl, NULL, 0) >= 0) { | 1057 | if (xfrm_lookup(net, &dst, &fl, NULL, 0) >= 0) { |
1058 | skb_dst_set(buff, dst); | ||
1057 | ip6_xmit(ctl_sk, buff, &fl, NULL, 0); | 1059 | ip6_xmit(ctl_sk, buff, &fl, NULL, 0); |
1058 | TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); | 1060 | TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); |
1059 | if (rst) | 1061 | if (rst) |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 8905712cfbb8..fc333d854728 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -177,10 +177,9 @@ static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb, | |||
177 | 177 | ||
178 | if (unlikely(sk = skb_steal_sock(skb))) | 178 | if (unlikely(sk = skb_steal_sock(skb))) |
179 | return sk; | 179 | return sk; |
180 | else | 180 | return __udp6_lib_lookup(dev_net(skb_dst(skb)->dev), &iph->saddr, sport, |
181 | return __udp6_lib_lookup(dev_net(skb->dst->dev), &iph->saddr, sport, | 181 | &iph->daddr, dport, inet6_iif(skb), |
182 | &iph->daddr, dport, inet6_iif(skb), | 182 | udptable); |
183 | udptable); | ||
184 | } | 183 | } |
185 | 184 | ||
186 | /* | 185 | /* |
diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c index e20529b4c825..3927832227b9 100644 --- a/net/ipv6/xfrm6_mode_tunnel.c +++ b/net/ipv6/xfrm6_mode_tunnel.c | |||
@@ -31,7 +31,7 @@ static inline void ipip6_ecn_decapsulate(struct sk_buff *skb) | |||
31 | */ | 31 | */ |
32 | static int xfrm6_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) | 32 | static int xfrm6_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) |
33 | { | 33 | { |
34 | struct dst_entry *dst = skb->dst; | 34 | struct dst_entry *dst = skb_dst(skb); |
35 | struct ipv6hdr *top_iph; | 35 | struct ipv6hdr *top_iph; |
36 | int dsfield; | 36 | int dsfield; |
37 | 37 | ||
@@ -45,7 +45,7 @@ static int xfrm6_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) | |||
45 | 45 | ||
46 | memcpy(top_iph->flow_lbl, XFRM_MODE_SKB_CB(skb)->flow_lbl, | 46 | memcpy(top_iph->flow_lbl, XFRM_MODE_SKB_CB(skb)->flow_lbl, |
47 | sizeof(top_iph->flow_lbl)); | 47 | sizeof(top_iph->flow_lbl)); |
48 | top_iph->nexthdr = xfrm_af2proto(skb->dst->ops->family); | 48 | top_iph->nexthdr = xfrm_af2proto(skb_dst(skb)->ops->family); |
49 | 49 | ||
50 | dsfield = XFRM_MODE_SKB_CB(skb)->tos; | 50 | dsfield = XFRM_MODE_SKB_CB(skb)->tos; |
51 | dsfield = INET_ECN_encapsulate(dsfield, dsfield); | 51 | dsfield = INET_ECN_encapsulate(dsfield, dsfield); |
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c index 5ee5a031bc93..c4f4eef032a3 100644 --- a/net/ipv6/xfrm6_output.c +++ b/net/ipv6/xfrm6_output.c | |||
@@ -30,7 +30,7 @@ EXPORT_SYMBOL(xfrm6_find_1stfragopt); | |||
30 | static int xfrm6_tunnel_check_size(struct sk_buff *skb) | 30 | static int xfrm6_tunnel_check_size(struct sk_buff *skb) |
31 | { | 31 | { |
32 | int mtu, ret = 0; | 32 | int mtu, ret = 0; |
33 | struct dst_entry *dst = skb->dst; | 33 | struct dst_entry *dst = skb_dst(skb); |
34 | 34 | ||
35 | mtu = dst_mtu(dst); | 35 | mtu = dst_mtu(dst); |
36 | if (mtu < IPV6_MIN_MTU) | 36 | if (mtu < IPV6_MIN_MTU) |
@@ -90,6 +90,6 @@ static int xfrm6_output_finish(struct sk_buff *skb) | |||
90 | 90 | ||
91 | int xfrm6_output(struct sk_buff *skb) | 91 | int xfrm6_output(struct sk_buff *skb) |
92 | { | 92 | { |
93 | return NF_HOOK(PF_INET6, NF_INET_POST_ROUTING, skb, NULL, skb->dst->dev, | 93 | return NF_HOOK(PF_INET6, NF_INET_POST_ROUTING, skb, NULL, skb_dst(skb)->dev, |
94 | xfrm6_output_finish); | 94 | xfrm6_output_finish); |
95 | } | 95 | } |
diff --git a/net/irda/irlap_frame.c b/net/irda/irlap_frame.c index 2562ebc1b22c..7af2e74deda8 100644 --- a/net/irda/irlap_frame.c +++ b/net/irda/irlap_frame.c | |||
@@ -982,17 +982,12 @@ void irlap_resend_rejected_frames(struct irlap_cb *self, int command) | |||
982 | { | 982 | { |
983 | struct sk_buff *tx_skb; | 983 | struct sk_buff *tx_skb; |
984 | struct sk_buff *skb; | 984 | struct sk_buff *skb; |
985 | int count; | ||
986 | 985 | ||
987 | IRDA_ASSERT(self != NULL, return;); | 986 | IRDA_ASSERT(self != NULL, return;); |
988 | IRDA_ASSERT(self->magic == LAP_MAGIC, return;); | 987 | IRDA_ASSERT(self->magic == LAP_MAGIC, return;); |
989 | 988 | ||
990 | /* Initialize variables */ | ||
991 | count = skb_queue_len(&self->wx_list); | ||
992 | |||
993 | /* Resend unacknowledged frame(s) */ | 989 | /* Resend unacknowledged frame(s) */ |
994 | skb = skb_peek(&self->wx_list); | 990 | skb_queue_walk(&self->wx_list, skb) { |
995 | while (skb != NULL) { | ||
996 | irlap_wait_min_turn_around(self, &self->qos_tx); | 991 | irlap_wait_min_turn_around(self, &self->qos_tx); |
997 | 992 | ||
998 | /* We copy the skb to be retransmitted since we will have to | 993 | /* We copy the skb to be retransmitted since we will have to |
@@ -1011,21 +1006,12 @@ void irlap_resend_rejected_frames(struct irlap_cb *self, int command) | |||
1011 | /* | 1006 | /* |
1012 | * Set poll bit on the last frame retransmitted | 1007 | * Set poll bit on the last frame retransmitted |
1013 | */ | 1008 | */ |
1014 | if (count-- == 1) | 1009 | if (skb_queue_is_last(&self->wx_list, skb)) |
1015 | tx_skb->data[1] |= PF_BIT; /* Set p/f bit */ | 1010 | tx_skb->data[1] |= PF_BIT; /* Set p/f bit */ |
1016 | else | 1011 | else |
1017 | tx_skb->data[1] &= ~PF_BIT; /* Clear p/f bit */ | 1012 | tx_skb->data[1] &= ~PF_BIT; /* Clear p/f bit */ |
1018 | 1013 | ||
1019 | irlap_send_i_frame(self, tx_skb, command); | 1014 | irlap_send_i_frame(self, tx_skb, command); |
1020 | |||
1021 | /* | ||
1022 | * If our skb is the last buffer in the list, then | ||
1023 | * we are finished, if not, move to the next sk-buffer | ||
1024 | */ | ||
1025 | if (skb == skb_peek_tail(&self->wx_list)) | ||
1026 | skb = NULL; | ||
1027 | else | ||
1028 | skb = skb->next; | ||
1029 | } | 1015 | } |
1030 | #if 0 /* Not yet */ | 1016 | #if 0 /* Not yet */ |
1031 | /* | 1017 | /* |
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c index 3477624a4906..c6bab39b018e 100644 --- a/net/llc/llc_conn.c +++ b/net/llc/llc_conn.c | |||
@@ -79,10 +79,6 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb) | |||
79 | 79 | ||
80 | if (unlikely(!ev->ind_prim && !ev->cfm_prim)) { | 80 | if (unlikely(!ev->ind_prim && !ev->cfm_prim)) { |
81 | /* indicate or confirm not required */ | 81 | /* indicate or confirm not required */ |
82 | /* XXX this is not very pretty, perhaps we should store | ||
83 | * XXX indicate/confirm-needed state in the llc_conn_state_ev | ||
84 | * XXX control block of the SKB instead? -DaveM | ||
85 | */ | ||
86 | if (!skb->next) | 82 | if (!skb->next) |
87 | goto out_kfree_skb; | 83 | goto out_kfree_skb; |
88 | goto out_skb_put; | 84 | goto out_skb_put; |
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig index 9cbf545e95a2..ba2643a43c73 100644 --- a/net/mac80211/Kconfig +++ b/net/mac80211/Kconfig | |||
@@ -1,16 +1,19 @@ | |||
1 | config MAC80211 | 1 | config MAC80211 |
2 | tristate "Generic IEEE 802.11 Networking Stack (mac80211)" | 2 | tristate "Generic IEEE 802.11 Networking Stack (mac80211)" |
3 | depends on CFG80211 | ||
3 | select CRYPTO | 4 | select CRYPTO |
4 | select CRYPTO_ECB | 5 | select CRYPTO_ECB |
5 | select CRYPTO_ARC4 | 6 | select CRYPTO_ARC4 |
6 | select CRYPTO_AES | 7 | select CRYPTO_AES |
7 | select CRC32 | 8 | select CRC32 |
8 | select WIRELESS_EXT | 9 | select WIRELESS_EXT |
9 | select CFG80211 | ||
10 | ---help--- | 10 | ---help--- |
11 | This option enables the hardware independent IEEE 802.11 | 11 | This option enables the hardware independent IEEE 802.11 |
12 | networking stack. | 12 | networking stack. |
13 | 13 | ||
14 | comment "CFG80211 needs to be enabled for MAC80211" | ||
15 | depends on CFG80211=n | ||
16 | |||
14 | config MAC80211_DEFAULT_PS | 17 | config MAC80211_DEFAULT_PS |
15 | bool "enable powersave by default" | 18 | bool "enable powersave by default" |
16 | depends on MAC80211 | 19 | depends on MAC80211 |
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index 43d00ffd3988..9e5762ad307d 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c | |||
@@ -132,6 +132,9 @@ static int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, | |||
132 | 132 | ||
133 | state = &sta->ampdu_mlme.tid_state_tx[tid]; | 133 | state = &sta->ampdu_mlme.tid_state_tx[tid]; |
134 | 134 | ||
135 | if (*state == HT_AGG_STATE_OPERATIONAL) | ||
136 | sta->ampdu_mlme.addba_req_num[tid] = 0; | ||
137 | |||
135 | *state = HT_AGG_STATE_REQ_STOP_BA_MSK | | 138 | *state = HT_AGG_STATE_REQ_STOP_BA_MSK | |
136 | (initiator << HT_AGG_STATE_INITIATOR_SHIFT); | 139 | (initiator << HT_AGG_STATE_INITIATOR_SHIFT); |
137 | 140 | ||
@@ -337,6 +340,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid) | |||
337 | sta->ampdu_mlme.tid_tx[tid]->dialog_token, | 340 | sta->ampdu_mlme.tid_tx[tid]->dialog_token, |
338 | sta->ampdu_mlme.tid_tx[tid]->ssn, | 341 | sta->ampdu_mlme.tid_tx[tid]->ssn, |
339 | 0x40, 5000); | 342 | 0x40, 5000); |
343 | sta->ampdu_mlme.addba_req_num[tid]++; | ||
340 | /* activate the timer for the recipient's addBA response */ | 344 | /* activate the timer for the recipient's addBA response */ |
341 | sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.expires = | 345 | sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.expires = |
342 | jiffies + ADDBA_RESP_INTERVAL; | 346 | jiffies + ADDBA_RESP_INTERVAL; |
@@ -606,7 +610,6 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid) | |||
606 | 610 | ||
607 | *state = HT_AGG_STATE_IDLE; | 611 | *state = HT_AGG_STATE_IDLE; |
608 | /* from now on packets are no longer put onto sta->pending */ | 612 | /* from now on packets are no longer put onto sta->pending */ |
609 | sta->ampdu_mlme.addba_req_num[tid] = 0; | ||
610 | kfree(sta->ampdu_mlme.tid_tx[tid]); | 613 | kfree(sta->ampdu_mlme.tid_tx[tid]); |
611 | sta->ampdu_mlme.tid_tx[tid] = NULL; | 614 | sta->ampdu_mlme.tid_tx[tid] = NULL; |
612 | 615 | ||
@@ -689,7 +692,6 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local, | |||
689 | 692 | ||
690 | sta->ampdu_mlme.addba_req_num[tid] = 0; | 693 | sta->ampdu_mlme.addba_req_num[tid] = 0; |
691 | } else { | 694 | } else { |
692 | sta->ampdu_mlme.addba_req_num[tid]++; | ||
693 | ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR); | 695 | ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR); |
694 | } | 696 | } |
695 | spin_unlock_bh(&sta->lock); | 697 | spin_unlock_bh(&sta->lock); |
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 77e9ff5ec4f3..3f47276caeb8 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -664,18 +664,19 @@ static void sta_apply_parameters(struct ieee80211_local *local, | |||
664 | spin_unlock_bh(&sta->lock); | 664 | spin_unlock_bh(&sta->lock); |
665 | 665 | ||
666 | /* | 666 | /* |
667 | * cfg80211 validates this (1-2007) and allows setting the AID | ||
668 | * only when creating a new station entry | ||
669 | */ | ||
670 | if (params->aid) | ||
671 | sta->sta.aid = params->aid; | ||
672 | |||
673 | /* | ||
667 | * FIXME: updating the following information is racy when this | 674 | * FIXME: updating the following information is racy when this |
668 | * function is called from ieee80211_change_station(). | 675 | * function is called from ieee80211_change_station(). |
669 | * However, all this information should be static so | 676 | * However, all this information should be static so |
670 | * maybe we should just reject attemps to change it. | 677 | * maybe we should just reject attemps to change it. |
671 | */ | 678 | */ |
672 | 679 | ||
673 | if (params->aid) { | ||
674 | sta->sta.aid = params->aid; | ||
675 | if (sta->sta.aid > IEEE80211_MAX_AID) | ||
676 | sta->sta.aid = 0; /* XXX: should this be an error? */ | ||
677 | } | ||
678 | |||
679 | if (params->listen_interval >= 0) | 680 | if (params->listen_interval >= 0) |
680 | sta->listen_interval = params->listen_interval; | 681 | sta->listen_interval = params->listen_interval; |
681 | 682 | ||
@@ -1121,8 +1122,8 @@ static int ieee80211_set_txq_params(struct wiphy *wiphy, | |||
1121 | p.txop = params->txop; | 1122 | p.txop = params->txop; |
1122 | if (drv_conf_tx(local, params->queue, &p)) { | 1123 | if (drv_conf_tx(local, params->queue, &p)) { |
1123 | printk(KERN_DEBUG "%s: failed to set TX queue " | 1124 | printk(KERN_DEBUG "%s: failed to set TX queue " |
1124 | "parameters for queue %d\n", local->mdev->name, | 1125 | "parameters for queue %d\n", |
1125 | params->queue); | 1126 | wiphy_name(local->hw.wiphy), params->queue); |
1126 | return -EINVAL; | 1127 | return -EINVAL; |
1127 | } | 1128 | } |
1128 | 1129 | ||
@@ -1255,7 +1256,7 @@ static int ieee80211_assoc(struct wiphy *wiphy, struct net_device *dev, | |||
1255 | sdata->u.mgd.flags |= IEEE80211_STA_AUTO_SSID_SEL; | 1256 | sdata->u.mgd.flags |= IEEE80211_STA_AUTO_SSID_SEL; |
1256 | 1257 | ||
1257 | ret = ieee80211_sta_set_extra_ie(sdata, req->ie, req->ie_len); | 1258 | ret = ieee80211_sta_set_extra_ie(sdata, req->ie, req->ie_len); |
1258 | if (ret) | 1259 | if (ret && ret != -EALREADY) |
1259 | return ret; | 1260 | return ret; |
1260 | 1261 | ||
1261 | if (req->use_mfp) { | 1262 | if (req->use_mfp) { |
@@ -1333,6 +1334,53 @@ static int ieee80211_set_wiphy_params(struct wiphy *wiphy, u32 changed) | |||
1333 | return 0; | 1334 | return 0; |
1334 | } | 1335 | } |
1335 | 1336 | ||
1337 | static int ieee80211_set_tx_power(struct wiphy *wiphy, | ||
1338 | enum tx_power_setting type, int dbm) | ||
1339 | { | ||
1340 | struct ieee80211_local *local = wiphy_priv(wiphy); | ||
1341 | struct ieee80211_channel *chan = local->hw.conf.channel; | ||
1342 | u32 changes = 0; | ||
1343 | |||
1344 | switch (type) { | ||
1345 | case TX_POWER_AUTOMATIC: | ||
1346 | local->user_power_level = -1; | ||
1347 | break; | ||
1348 | case TX_POWER_LIMITED: | ||
1349 | if (dbm < 0) | ||
1350 | return -EINVAL; | ||
1351 | local->user_power_level = dbm; | ||
1352 | break; | ||
1353 | case TX_POWER_FIXED: | ||
1354 | if (dbm < 0) | ||
1355 | return -EINVAL; | ||
1356 | /* TODO: move to cfg80211 when it knows the channel */ | ||
1357 | if (dbm > chan->max_power) | ||
1358 | return -EINVAL; | ||
1359 | local->user_power_level = dbm; | ||
1360 | break; | ||
1361 | } | ||
1362 | |||
1363 | ieee80211_hw_config(local, changes); | ||
1364 | |||
1365 | return 0; | ||
1366 | } | ||
1367 | |||
1368 | static int ieee80211_get_tx_power(struct wiphy *wiphy, int *dbm) | ||
1369 | { | ||
1370 | struct ieee80211_local *local = wiphy_priv(wiphy); | ||
1371 | |||
1372 | *dbm = local->hw.conf.power_level; | ||
1373 | |||
1374 | return 0; | ||
1375 | } | ||
1376 | |||
1377 | static void ieee80211_rfkill_poll(struct wiphy *wiphy) | ||
1378 | { | ||
1379 | struct ieee80211_local *local = wiphy_priv(wiphy); | ||
1380 | |||
1381 | drv_rfkill_poll(local); | ||
1382 | } | ||
1383 | |||
1336 | struct cfg80211_ops mac80211_config_ops = { | 1384 | struct cfg80211_ops mac80211_config_ops = { |
1337 | .add_virtual_intf = ieee80211_add_iface, | 1385 | .add_virtual_intf = ieee80211_add_iface, |
1338 | .del_virtual_intf = ieee80211_del_iface, | 1386 | .del_virtual_intf = ieee80211_del_iface, |
@@ -1372,4 +1420,7 @@ struct cfg80211_ops mac80211_config_ops = { | |||
1372 | .join_ibss = ieee80211_join_ibss, | 1420 | .join_ibss = ieee80211_join_ibss, |
1373 | .leave_ibss = ieee80211_leave_ibss, | 1421 | .leave_ibss = ieee80211_leave_ibss, |
1374 | .set_wiphy_params = ieee80211_set_wiphy_params, | 1422 | .set_wiphy_params = ieee80211_set_wiphy_params, |
1423 | .set_tx_power = ieee80211_set_tx_power, | ||
1424 | .get_tx_power = ieee80211_get_tx_power, | ||
1425 | .rfkill_poll = ieee80211_rfkill_poll, | ||
1375 | }; | 1426 | }; |
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index 3912b5334b9c..b13446afd48f 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h | |||
@@ -181,4 +181,11 @@ static inline int drv_ampdu_action(struct ieee80211_local *local, | |||
181 | sta, tid, ssn); | 181 | sta, tid, ssn); |
182 | return -EOPNOTSUPP; | 182 | return -EOPNOTSUPP; |
183 | } | 183 | } |
184 | |||
185 | |||
186 | static inline void drv_rfkill_poll(struct ieee80211_local *local) | ||
187 | { | ||
188 | if (local->ops->rfkill_poll) | ||
189 | local->ops->rfkill_poll(&local->hw); | ||
190 | } | ||
184 | #endif /* __MAC80211_DRIVER_OPS */ | 191 | #endif /* __MAC80211_DRIVER_OPS */ |
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index c088c46704a3..4dbc28964196 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
@@ -589,6 +589,7 @@ enum queue_stop_reason { | |||
589 | IEEE80211_QUEUE_STOP_REASON_AGGREGATION, | 589 | IEEE80211_QUEUE_STOP_REASON_AGGREGATION, |
590 | IEEE80211_QUEUE_STOP_REASON_SUSPEND, | 590 | IEEE80211_QUEUE_STOP_REASON_SUSPEND, |
591 | IEEE80211_QUEUE_STOP_REASON_PENDING, | 591 | IEEE80211_QUEUE_STOP_REASON_PENDING, |
592 | IEEE80211_QUEUE_STOP_REASON_SKB_ADD, | ||
592 | }; | 593 | }; |
593 | 594 | ||
594 | struct ieee80211_master_priv { | 595 | struct ieee80211_master_priv { |
@@ -1121,6 +1122,10 @@ void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue, | |||
1121 | enum queue_stop_reason reason); | 1122 | enum queue_stop_reason reason); |
1122 | void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue, | 1123 | void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue, |
1123 | enum queue_stop_reason reason); | 1124 | enum queue_stop_reason reason); |
1125 | void ieee80211_add_pending_skb(struct ieee80211_local *local, | ||
1126 | struct sk_buff *skb); | ||
1127 | int ieee80211_add_pending_skbs(struct ieee80211_local *local, | ||
1128 | struct sk_buff_head *skbs); | ||
1124 | 1129 | ||
1125 | void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, | 1130 | void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, |
1126 | u16 transaction, u16 auth_alg, | 1131 | u16 transaction, u16 auth_alg, |
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 8c9f1c722cdb..b7c8a4484298 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
@@ -170,7 +170,7 @@ static int ieee80211_open(struct net_device *dev) | |||
170 | goto err_del_bss; | 170 | goto err_del_bss; |
171 | /* we're brought up, everything changes */ | 171 | /* we're brought up, everything changes */ |
172 | hw_reconf_flags = ~0; | 172 | hw_reconf_flags = ~0; |
173 | ieee80211_led_radio(local, local->hw.conf.radio_enabled); | 173 | ieee80211_led_radio(local, true); |
174 | } | 174 | } |
175 | 175 | ||
176 | /* | 176 | /* |
@@ -560,7 +560,7 @@ static int ieee80211_stop(struct net_device *dev) | |||
560 | 560 | ||
561 | drv_stop(local); | 561 | drv_stop(local); |
562 | 562 | ||
563 | ieee80211_led_radio(local, 0); | 563 | ieee80211_led_radio(local, false); |
564 | 564 | ||
565 | flush_workqueue(local->hw.workqueue); | 565 | flush_workqueue(local->hw.workqueue); |
566 | 566 | ||
diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 6b7e92eaab47..092a017b237e 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c | |||
@@ -289,16 +289,8 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata, | |||
289 | drv_bss_info_changed(local, &sdata->vif, | 289 | drv_bss_info_changed(local, &sdata->vif, |
290 | &sdata->vif.bss_conf, changed); | 290 | &sdata->vif.bss_conf, changed); |
291 | 291 | ||
292 | /* | 292 | /* DEPRECATED */ |
293 | * DEPRECATED | 293 | local->hw.conf.beacon_int = sdata->vif.bss_conf.beacon_int; |
294 | * | ||
295 | * ~changed is just there to not do this at resume time | ||
296 | */ | ||
297 | if (changed & BSS_CHANGED_BEACON_INT && ~changed) { | ||
298 | local->hw.conf.beacon_int = sdata->vif.bss_conf.beacon_int; | ||
299 | ieee80211_hw_config(local, | ||
300 | _IEEE80211_CONF_CHANGE_BEACON_INTERVAL); | ||
301 | } | ||
302 | } | 294 | } |
303 | 295 | ||
304 | u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata) | 296 | u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata) |
@@ -377,60 +369,12 @@ static void ieee80211_tasklet_handler(unsigned long data) | |||
377 | } | 369 | } |
378 | } | 370 | } |
379 | 371 | ||
380 | /* Remove added headers (e.g., QoS control), encryption header/MIC, etc. to | ||
381 | * make a prepared TX frame (one that has been given to hw) to look like brand | ||
382 | * new IEEE 802.11 frame that is ready to go through TX processing again. | ||
383 | */ | ||
384 | static void ieee80211_remove_tx_extra(struct ieee80211_local *local, | ||
385 | struct ieee80211_key *key, | ||
386 | struct sk_buff *skb) | ||
387 | { | ||
388 | unsigned int hdrlen, iv_len, mic_len; | ||
389 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | ||
390 | |||
391 | hdrlen = ieee80211_hdrlen(hdr->frame_control); | ||
392 | |||
393 | if (!key) | ||
394 | goto no_key; | ||
395 | |||
396 | switch (key->conf.alg) { | ||
397 | case ALG_WEP: | ||
398 | iv_len = WEP_IV_LEN; | ||
399 | mic_len = WEP_ICV_LEN; | ||
400 | break; | ||
401 | case ALG_TKIP: | ||
402 | iv_len = TKIP_IV_LEN; | ||
403 | mic_len = TKIP_ICV_LEN; | ||
404 | break; | ||
405 | case ALG_CCMP: | ||
406 | iv_len = CCMP_HDR_LEN; | ||
407 | mic_len = CCMP_MIC_LEN; | ||
408 | break; | ||
409 | default: | ||
410 | goto no_key; | ||
411 | } | ||
412 | |||
413 | if (skb->len >= hdrlen + mic_len && | ||
414 | !(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) | ||
415 | skb_trim(skb, skb->len - mic_len); | ||
416 | if (skb->len >= hdrlen + iv_len) { | ||
417 | memmove(skb->data + iv_len, skb->data, hdrlen); | ||
418 | hdr = (struct ieee80211_hdr *)skb_pull(skb, iv_len); | ||
419 | } | ||
420 | |||
421 | no_key: | ||
422 | if (ieee80211_is_data_qos(hdr->frame_control)) { | ||
423 | hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA); | ||
424 | memmove(skb->data + IEEE80211_QOS_CTL_LEN, skb->data, | ||
425 | hdrlen - IEEE80211_QOS_CTL_LEN); | ||
426 | skb_pull(skb, IEEE80211_QOS_CTL_LEN); | ||
427 | } | ||
428 | } | ||
429 | |||
430 | static void ieee80211_handle_filtered_frame(struct ieee80211_local *local, | 372 | static void ieee80211_handle_filtered_frame(struct ieee80211_local *local, |
431 | struct sta_info *sta, | 373 | struct sta_info *sta, |
432 | struct sk_buff *skb) | 374 | struct sk_buff *skb) |
433 | { | 375 | { |
376 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
377 | |||
434 | sta->tx_filtered_count++; | 378 | sta->tx_filtered_count++; |
435 | 379 | ||
436 | /* | 380 | /* |
@@ -472,16 +416,15 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local, | |||
472 | */ | 416 | */ |
473 | if (test_sta_flags(sta, WLAN_STA_PS) && | 417 | if (test_sta_flags(sta, WLAN_STA_PS) && |
474 | skb_queue_len(&sta->tx_filtered) < STA_MAX_TX_BUFFER) { | 418 | skb_queue_len(&sta->tx_filtered) < STA_MAX_TX_BUFFER) { |
475 | ieee80211_remove_tx_extra(local, sta->key, skb); | ||
476 | skb_queue_tail(&sta->tx_filtered, skb); | 419 | skb_queue_tail(&sta->tx_filtered, skb); |
477 | return; | 420 | return; |
478 | } | 421 | } |
479 | 422 | ||
480 | if (!test_sta_flags(sta, WLAN_STA_PS) && !skb->requeue) { | 423 | if (!test_sta_flags(sta, WLAN_STA_PS) && |
424 | !(info->flags & IEEE80211_TX_INTFL_RETRIED)) { | ||
481 | /* Software retry the packet once */ | 425 | /* Software retry the packet once */ |
482 | skb->requeue = 1; | 426 | info->flags |= IEEE80211_TX_INTFL_RETRIED; |
483 | ieee80211_remove_tx_extra(local, sta->key, skb); | 427 | ieee80211_add_pending_skb(local, skb); |
484 | dev_queue_xmit(skb); | ||
485 | return; | 428 | return; |
486 | } | 429 | } |
487 | 430 | ||
@@ -735,9 +678,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, | |||
735 | * +-------------------------+ | 678 | * +-------------------------+ |
736 | * | 679 | * |
737 | */ | 680 | */ |
738 | priv_size = ((sizeof(struct ieee80211_local) + | 681 | priv_size = ALIGN(sizeof(*local), NETDEV_ALIGN) + priv_data_len; |
739 | NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST) + | ||
740 | priv_data_len; | ||
741 | 682 | ||
742 | wiphy = wiphy_new(&mac80211_config_ops, priv_size); | 683 | wiphy = wiphy_new(&mac80211_config_ops, priv_size); |
743 | 684 | ||
@@ -754,9 +695,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, | |||
754 | 695 | ||
755 | local->hw.wiphy = wiphy; | 696 | local->hw.wiphy = wiphy; |
756 | 697 | ||
757 | local->hw.priv = (char *)local + | 698 | local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN); |
758 | ((sizeof(struct ieee80211_local) + | ||
759 | NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST); | ||
760 | 699 | ||
761 | BUG_ON(!ops->tx); | 700 | BUG_ON(!ops->tx); |
762 | BUG_ON(!ops->start); | 701 | BUG_ON(!ops->start); |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 509469cb9265..d779c57a8220 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -621,9 +621,6 @@ static void ieee80211_change_ps(struct ieee80211_local *local) | |||
621 | struct ieee80211_conf *conf = &local->hw.conf; | 621 | struct ieee80211_conf *conf = &local->hw.conf; |
622 | 622 | ||
623 | if (local->ps_sdata) { | 623 | if (local->ps_sdata) { |
624 | if (!(local->ps_sdata->u.mgd.flags & IEEE80211_STA_ASSOCIATED)) | ||
625 | return; | ||
626 | |||
627 | ieee80211_enable_ps(local, local->ps_sdata); | 624 | ieee80211_enable_ps(local, local->ps_sdata); |
628 | } else if (conf->flags & IEEE80211_CONF_PS) { | 625 | } else if (conf->flags & IEEE80211_CONF_PS) { |
629 | conf->flags &= ~IEEE80211_CONF_PS; | 626 | conf->flags &= ~IEEE80211_CONF_PS; |
@@ -653,7 +650,9 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency) | |||
653 | count++; | 650 | count++; |
654 | } | 651 | } |
655 | 652 | ||
656 | if (count == 1 && found->u.mgd.powersave) { | 653 | if (count == 1 && found->u.mgd.powersave && |
654 | (found->u.mgd.flags & IEEE80211_STA_ASSOCIATED) && | ||
655 | !(found->u.mgd.flags & IEEE80211_STA_PROBEREQ_POLL)) { | ||
657 | s32 beaconint_us; | 656 | s32 beaconint_us; |
658 | 657 | ||
659 | if (latency < 0) | 658 | if (latency < 0) |
@@ -793,13 +792,13 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local, | |||
793 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | 792 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG |
794 | printk(KERN_DEBUG "%s: WMM queue=%d aci=%d acm=%d aifs=%d " | 793 | printk(KERN_DEBUG "%s: WMM queue=%d aci=%d acm=%d aifs=%d " |
795 | "cWmin=%d cWmax=%d txop=%d\n", | 794 | "cWmin=%d cWmax=%d txop=%d\n", |
796 | local->mdev->name, queue, aci, acm, params.aifs, params.cw_min, | 795 | wiphy_name(local->hw.wiphy), queue, aci, acm, |
797 | params.cw_max, params.txop); | 796 | params.aifs, params.cw_min, params.cw_max, params.txop); |
798 | #endif | 797 | #endif |
799 | if (drv_conf_tx(local, queue, ¶ms) && local->ops->conf_tx) | 798 | if (drv_conf_tx(local, queue, ¶ms) && local->ops->conf_tx) |
800 | printk(KERN_DEBUG "%s: failed to set TX queue " | 799 | printk(KERN_DEBUG "%s: failed to set TX queue " |
801 | "parameters for queue %d\n", local->mdev->name, | 800 | "parameters for queue %d\n", |
802 | queue); | 801 | wiphy_name(local->hw.wiphy), queue); |
803 | } | 802 | } |
804 | } | 803 | } |
805 | 804 | ||
@@ -1322,6 +1321,11 @@ void ieee80211_beacon_loss_work(struct work_struct *work) | |||
1322 | #endif | 1321 | #endif |
1323 | 1322 | ||
1324 | ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL; | 1323 | ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL; |
1324 | |||
1325 | mutex_lock(&sdata->local->iflist_mtx); | ||
1326 | ieee80211_recalc_ps(sdata->local, -1); | ||
1327 | mutex_unlock(&sdata->local->iflist_mtx); | ||
1328 | |||
1325 | ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid, | 1329 | ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid, |
1326 | ifmgd->ssid_len, NULL, 0); | 1330 | ifmgd->ssid_len, NULL, 0); |
1327 | 1331 | ||
@@ -1342,6 +1346,7 @@ static void ieee80211_associated(struct ieee80211_sub_if_data *sdata) | |||
1342 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 1346 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
1343 | struct ieee80211_local *local = sdata->local; | 1347 | struct ieee80211_local *local = sdata->local; |
1344 | struct sta_info *sta; | 1348 | struct sta_info *sta; |
1349 | unsigned long last_rx; | ||
1345 | bool disassoc = false; | 1350 | bool disassoc = false; |
1346 | 1351 | ||
1347 | /* TODO: start monitoring current AP signal quality and number of | 1352 | /* TODO: start monitoring current AP signal quality and number of |
@@ -1358,17 +1363,21 @@ static void ieee80211_associated(struct ieee80211_sub_if_data *sdata) | |||
1358 | printk(KERN_DEBUG "%s: No STA entry for own AP %pM\n", | 1363 | printk(KERN_DEBUG "%s: No STA entry for own AP %pM\n", |
1359 | sdata->dev->name, ifmgd->bssid); | 1364 | sdata->dev->name, ifmgd->bssid); |
1360 | disassoc = true; | 1365 | disassoc = true; |
1361 | goto unlock; | 1366 | rcu_read_unlock(); |
1367 | goto out; | ||
1362 | } | 1368 | } |
1363 | 1369 | ||
1370 | last_rx = sta->last_rx; | ||
1371 | rcu_read_unlock(); | ||
1372 | |||
1364 | if ((ifmgd->flags & IEEE80211_STA_PROBEREQ_POLL) && | 1373 | if ((ifmgd->flags & IEEE80211_STA_PROBEREQ_POLL) && |
1365 | time_after(jiffies, sta->last_rx + IEEE80211_PROBE_WAIT)) { | 1374 | time_after(jiffies, last_rx + IEEE80211_PROBE_WAIT)) { |
1366 | printk(KERN_DEBUG "%s: no probe response from AP %pM " | 1375 | printk(KERN_DEBUG "%s: no probe response from AP %pM " |
1367 | "- disassociating\n", | 1376 | "- disassociating\n", |
1368 | sdata->dev->name, ifmgd->bssid); | 1377 | sdata->dev->name, ifmgd->bssid); |
1369 | disassoc = true; | 1378 | disassoc = true; |
1370 | ifmgd->flags &= ~IEEE80211_STA_PROBEREQ_POLL; | 1379 | ifmgd->flags &= ~IEEE80211_STA_PROBEREQ_POLL; |
1371 | goto unlock; | 1380 | goto out; |
1372 | } | 1381 | } |
1373 | 1382 | ||
1374 | /* | 1383 | /* |
@@ -1387,26 +1396,29 @@ static void ieee80211_associated(struct ieee80211_sub_if_data *sdata) | |||
1387 | } | 1396 | } |
1388 | #endif | 1397 | #endif |
1389 | ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL; | 1398 | ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL; |
1399 | mutex_lock(&local->iflist_mtx); | ||
1400 | ieee80211_recalc_ps(local, -1); | ||
1401 | mutex_unlock(&local->iflist_mtx); | ||
1390 | ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid, | 1402 | ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid, |
1391 | ifmgd->ssid_len, NULL, 0); | 1403 | ifmgd->ssid_len, NULL, 0); |
1392 | mod_timer(&ifmgd->timer, jiffies + IEEE80211_PROBE_WAIT); | 1404 | mod_timer(&ifmgd->timer, jiffies + IEEE80211_PROBE_WAIT); |
1393 | goto unlock; | 1405 | goto out; |
1394 | } | 1406 | } |
1395 | 1407 | ||
1396 | if (time_after(jiffies, sta->last_rx + IEEE80211_PROBE_IDLE_TIME)) { | 1408 | if (time_after(jiffies, last_rx + IEEE80211_PROBE_IDLE_TIME)) { |
1397 | ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL; | 1409 | ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL; |
1410 | mutex_lock(&local->iflist_mtx); | ||
1411 | ieee80211_recalc_ps(local, -1); | ||
1412 | mutex_unlock(&local->iflist_mtx); | ||
1398 | ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid, | 1413 | ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid, |
1399 | ifmgd->ssid_len, NULL, 0); | 1414 | ifmgd->ssid_len, NULL, 0); |
1400 | } | 1415 | } |
1401 | 1416 | ||
1417 | out: | ||
1402 | if (!disassoc) | 1418 | if (!disassoc) |
1403 | mod_timer(&ifmgd->timer, | 1419 | mod_timer(&ifmgd->timer, |
1404 | jiffies + IEEE80211_MONITORING_INTERVAL); | 1420 | jiffies + IEEE80211_MONITORING_INTERVAL); |
1405 | 1421 | else | |
1406 | unlock: | ||
1407 | rcu_read_unlock(); | ||
1408 | |||
1409 | if (disassoc) | ||
1410 | ieee80211_set_disassoc(sdata, true, true, | 1422 | ieee80211_set_disassoc(sdata, true, true, |
1411 | WLAN_REASON_PREV_AUTH_NOT_VALID); | 1423 | WLAN_REASON_PREV_AUTH_NOT_VALID); |
1412 | } | 1424 | } |
@@ -1889,8 +1901,12 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata, | |||
1889 | ieee80211_authenticate(sdata); | 1901 | ieee80211_authenticate(sdata); |
1890 | } | 1902 | } |
1891 | 1903 | ||
1892 | if (ifmgd->flags & IEEE80211_STA_PROBEREQ_POLL) | 1904 | if (ifmgd->flags & IEEE80211_STA_PROBEREQ_POLL) { |
1893 | ifmgd->flags &= ~IEEE80211_STA_PROBEREQ_POLL; | 1905 | ifmgd->flags &= ~IEEE80211_STA_PROBEREQ_POLL; |
1906 | mutex_lock(&sdata->local->iflist_mtx); | ||
1907 | ieee80211_recalc_ps(sdata->local, -1); | ||
1908 | mutex_unlock(&sdata->local->iflist_mtx); | ||
1909 | } | ||
1894 | } | 1910 | } |
1895 | 1911 | ||
1896 | /* | 1912 | /* |
@@ -1948,6 +1964,9 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, | |||
1948 | } | 1964 | } |
1949 | #endif | 1965 | #endif |
1950 | ifmgd->flags &= ~IEEE80211_STA_PROBEREQ_POLL; | 1966 | ifmgd->flags &= ~IEEE80211_STA_PROBEREQ_POLL; |
1967 | mutex_lock(&local->iflist_mtx); | ||
1968 | ieee80211_recalc_ps(local, -1); | ||
1969 | mutex_unlock(&local->iflist_mtx); | ||
1951 | } | 1970 | } |
1952 | 1971 | ||
1953 | ncrc = crc32_be(0, (void *)&mgmt->u.beacon.beacon_int, 4); | 1972 | ncrc = crc32_be(0, (void *)&mgmt->u.beacon.beacon_int, 4); |
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c index 0a11515341ba..b218b98fba7f 100644 --- a/net/mac80211/rc80211_minstrel.c +++ b/net/mac80211/rc80211_minstrel.c | |||
@@ -215,7 +215,7 @@ minstrel_get_next_sample(struct minstrel_sta_info *mi) | |||
215 | unsigned int sample_ndx; | 215 | unsigned int sample_ndx; |
216 | sample_ndx = SAMPLE_TBL(mi, mi->sample_idx, mi->sample_column); | 216 | sample_ndx = SAMPLE_TBL(mi, mi->sample_idx, mi->sample_column); |
217 | mi->sample_idx++; | 217 | mi->sample_idx++; |
218 | if (mi->sample_idx > (mi->n_rates - 2)) { | 218 | if ((int) mi->sample_idx > (mi->n_rates - 2)) { |
219 | mi->sample_idx = 0; | 219 | mi->sample_idx = 0; |
220 | mi->sample_column++; | 220 | mi->sample_column++; |
221 | if (mi->sample_column >= SAMPLE_COLUMNS) | 221 | if (mi->sample_column >= SAMPLE_COLUMNS) |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 6a9b8e63a6bf..de5bba7f910a 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -797,8 +797,7 @@ static int ap_sta_ps_end(struct sta_info *sta) | |||
797 | { | 797 | { |
798 | struct ieee80211_sub_if_data *sdata = sta->sdata; | 798 | struct ieee80211_sub_if_data *sdata = sta->sdata; |
799 | struct ieee80211_local *local = sdata->local; | 799 | struct ieee80211_local *local = sdata->local; |
800 | struct sk_buff *skb; | 800 | int sent, buffered; |
801 | int sent = 0; | ||
802 | 801 | ||
803 | atomic_dec(&sdata->bss->num_sta_ps); | 802 | atomic_dec(&sdata->bss->num_sta_ps); |
804 | 803 | ||
@@ -814,22 +813,16 @@ static int ap_sta_ps_end(struct sta_info *sta) | |||
814 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ | 813 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ |
815 | 814 | ||
816 | /* Send all buffered frames to the station */ | 815 | /* Send all buffered frames to the station */ |
817 | while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL) { | 816 | sent = ieee80211_add_pending_skbs(local, &sta->tx_filtered); |
818 | sent++; | 817 | buffered = ieee80211_add_pending_skbs(local, &sta->ps_tx_buf); |
819 | skb->requeue = 1; | 818 | sent += buffered; |
820 | dev_queue_xmit(skb); | 819 | local->total_ps_buffered -= buffered; |
821 | } | 820 | |
822 | while ((skb = skb_dequeue(&sta->ps_tx_buf)) != NULL) { | ||
823 | local->total_ps_buffered--; | ||
824 | sent++; | ||
825 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG | 821 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG |
826 | printk(KERN_DEBUG "%s: STA %pM aid %d send PS frame " | 822 | printk(KERN_DEBUG "%s: STA %pM aid %d sending %d filtered/%d PS frames " |
827 | "since STA not sleeping anymore\n", sdata->dev->name, | 823 | "since STA not sleeping anymore\n", sdata->dev->name, |
828 | sta->sta.addr, sta->sta.aid); | 824 | sta->sta.addr, sta->sta.aid, sent - buffered, buffered); |
829 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ | 825 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ |
830 | skb->requeue = 1; | ||
831 | dev_queue_xmit(skb); | ||
832 | } | ||
833 | 826 | ||
834 | return sent; | 827 | return sent; |
835 | } | 828 | } |
@@ -1335,7 +1328,7 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx) | |||
1335 | * mac80211. That also explains the __skb_push() | 1328 | * mac80211. That also explains the __skb_push() |
1336 | * below. | 1329 | * below. |
1337 | */ | 1330 | */ |
1338 | align = (unsigned long)skb->data & 3; | 1331 | align = ((unsigned long)(skb->data + sizeof(struct ethhdr))) & 3; |
1339 | if (align) { | 1332 | if (align) { |
1340 | if (WARN_ON(skb_headroom(skb) < 3)) { | 1333 | if (WARN_ON(skb_headroom(skb) < 3)) { |
1341 | dev_kfree_skb(skb); | 1334 | dev_kfree_skb(skb); |
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index d5611d8fd0d6..a360bceeba59 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c | |||
@@ -44,6 +44,15 @@ | |||
44 | * When the insertion fails (sta_info_insert()) returns non-zero), the | 44 | * When the insertion fails (sta_info_insert()) returns non-zero), the |
45 | * structure will have been freed by sta_info_insert()! | 45 | * structure will have been freed by sta_info_insert()! |
46 | * | 46 | * |
47 | * sta entries are added by mac80211 when you establish a link with a | ||
48 | * peer. This means different things for the different type of interfaces | ||
49 | * we support. For a regular station this mean we add the AP sta when we | ||
50 | * receive an assocation response from the AP. For IBSS this occurs when | ||
51 | * we receive a probe response or a beacon from target IBSS network. For | ||
52 | * WDS we add the sta for the peer imediately upon device open. When using | ||
53 | * AP mode we add stations for each respective station upon request from | ||
54 | * userspace through nl80211. | ||
55 | * | ||
47 | * Because there are debugfs entries for each station, and adding those | 56 | * Because there are debugfs entries for each station, and adding those |
48 | * must be able to sleep, it is also possible to "pin" a station entry, | 57 | * must be able to sleep, it is also possible to "pin" a station entry, |
49 | * that means it can be removed from the hash table but not be freed. | 58 | * that means it can be removed from the hash table but not be freed. |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index a910148b8228..364222bfb10d 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -400,6 +400,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) | |||
400 | sta_info_set_tim_bit(sta); | 400 | sta_info_set_tim_bit(sta); |
401 | 401 | ||
402 | info->control.jiffies = jiffies; | 402 | info->control.jiffies = jiffies; |
403 | info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; | ||
403 | skb_queue_tail(&sta->ps_tx_buf, tx->skb); | 404 | skb_queue_tail(&sta->ps_tx_buf, tx->skb); |
404 | return TX_QUEUED; | 405 | return TX_QUEUED; |
405 | } | 406 | } |
@@ -420,7 +421,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) | |||
420 | * frame filtering and keeps a station blacklist on its own | 421 | * frame filtering and keeps a station blacklist on its own |
421 | * (e.g: p54), so that frames can be delivered unimpeded. | 422 | * (e.g: p54), so that frames can be delivered unimpeded. |
422 | * | 423 | * |
423 | * Note: It should be save to disable the filter now. | 424 | * Note: It should be safe to disable the filter now. |
424 | * As, it is really unlikely that we still have any pending | 425 | * As, it is really unlikely that we still have any pending |
425 | * frame for this station in the hw's buffers/fifos left, | 426 | * frame for this station in the hw's buffers/fifos left, |
426 | * that is not rejected with a unsuccessful tx_status yet. | 427 | * that is not rejected with a unsuccessful tx_status yet. |
@@ -907,9 +908,8 @@ ieee80211_tx_h_stats(struct ieee80211_tx_data *tx) | |||
907 | * deal with packet injection down monitor interface | 908 | * deal with packet injection down monitor interface |
908 | * with Radiotap Header -- only called for monitor mode interface | 909 | * with Radiotap Header -- only called for monitor mode interface |
909 | */ | 910 | */ |
910 | static ieee80211_tx_result | 911 | static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx, |
911 | __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx, | 912 | struct sk_buff *skb) |
912 | struct sk_buff *skb) | ||
913 | { | 913 | { |
914 | /* | 914 | /* |
915 | * this is the moment to interpret and discard the radiotap header that | 915 | * this is the moment to interpret and discard the radiotap header that |
@@ -960,7 +960,7 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx, | |||
960 | * on transmission | 960 | * on transmission |
961 | */ | 961 | */ |
962 | if (skb->len < (iterator.max_length + FCS_LEN)) | 962 | if (skb->len < (iterator.max_length + FCS_LEN)) |
963 | return TX_DROP; | 963 | return false; |
964 | 964 | ||
965 | skb_trim(skb, skb->len - FCS_LEN); | 965 | skb_trim(skb, skb->len - FCS_LEN); |
966 | } | 966 | } |
@@ -982,7 +982,7 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx, | |||
982 | } | 982 | } |
983 | 983 | ||
984 | if (ret != -ENOENT) /* ie, if we didn't simply run out of fields */ | 984 | if (ret != -ENOENT) /* ie, if we didn't simply run out of fields */ |
985 | return TX_DROP; | 985 | return false; |
986 | 986 | ||
987 | /* | 987 | /* |
988 | * remove the radiotap header | 988 | * remove the radiotap header |
@@ -991,7 +991,7 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx, | |||
991 | */ | 991 | */ |
992 | skb_pull(skb, iterator.max_length); | 992 | skb_pull(skb, iterator.max_length); |
993 | 993 | ||
994 | return TX_CONTINUE; | 994 | return true; |
995 | } | 995 | } |
996 | 996 | ||
997 | /* | 997 | /* |
@@ -1025,7 +1025,7 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx, | |||
1025 | /* process and remove the injection radiotap header */ | 1025 | /* process and remove the injection radiotap header */ |
1026 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 1026 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
1027 | if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED)) { | 1027 | if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED)) { |
1028 | if (__ieee80211_parse_tx_radiotap(tx, skb) == TX_DROP) | 1028 | if (!__ieee80211_parse_tx_radiotap(tx, skb)) |
1029 | return TX_DROP; | 1029 | return TX_DROP; |
1030 | 1030 | ||
1031 | /* | 1031 | /* |
@@ -1238,7 +1238,6 @@ static void ieee80211_tx(struct net_device *dev, struct sk_buff *skb, | |||
1238 | bool txpending) | 1238 | bool txpending) |
1239 | { | 1239 | { |
1240 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 1240 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
1241 | struct sta_info *sta; | ||
1242 | struct ieee80211_tx_data tx; | 1241 | struct ieee80211_tx_data tx; |
1243 | ieee80211_tx_result res_prepare; | 1242 | ieee80211_tx_result res_prepare; |
1244 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 1243 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
@@ -1270,7 +1269,6 @@ static void ieee80211_tx(struct net_device *dev, struct sk_buff *skb, | |||
1270 | return; | 1269 | return; |
1271 | } | 1270 | } |
1272 | 1271 | ||
1273 | sta = tx.sta; | ||
1274 | tx.channel = local->hw.conf.channel; | 1272 | tx.channel = local->hw.conf.channel; |
1275 | info->band = tx.channel->band; | 1273 | info->band = tx.channel->band; |
1276 | 1274 | ||
@@ -1417,7 +1415,8 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1417 | } | 1415 | } |
1418 | 1416 | ||
1419 | if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) && | 1417 | if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) && |
1420 | local->hw.conf.dynamic_ps_timeout > 0) { | 1418 | local->hw.conf.dynamic_ps_timeout > 0 && |
1419 | !local->sw_scanning && !local->hw_scanning && local->ps_sdata) { | ||
1421 | if (local->hw.conf.flags & IEEE80211_CONF_PS) { | 1420 | if (local->hw.conf.flags & IEEE80211_CONF_PS) { |
1422 | ieee80211_stop_queues_by_reason(&local->hw, | 1421 | ieee80211_stop_queues_by_reason(&local->hw, |
1423 | IEEE80211_QUEUE_STOP_REASON_PS); | 1422 | IEEE80211_QUEUE_STOP_REASON_PS); |
diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 949d857debd8..66ce96a69f31 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c | |||
@@ -341,6 +341,52 @@ void ieee80211_stop_queue(struct ieee80211_hw *hw, int queue) | |||
341 | } | 341 | } |
342 | EXPORT_SYMBOL(ieee80211_stop_queue); | 342 | EXPORT_SYMBOL(ieee80211_stop_queue); |
343 | 343 | ||
344 | void ieee80211_add_pending_skb(struct ieee80211_local *local, | ||
345 | struct sk_buff *skb) | ||
346 | { | ||
347 | struct ieee80211_hw *hw = &local->hw; | ||
348 | unsigned long flags; | ||
349 | int queue = skb_get_queue_mapping(skb); | ||
350 | |||
351 | spin_lock_irqsave(&local->queue_stop_reason_lock, flags); | ||
352 | __ieee80211_stop_queue(hw, queue, IEEE80211_QUEUE_STOP_REASON_SKB_ADD); | ||
353 | __ieee80211_stop_queue(hw, queue, IEEE80211_QUEUE_STOP_REASON_PENDING); | ||
354 | skb_queue_tail(&local->pending[queue], skb); | ||
355 | __ieee80211_wake_queue(hw, queue, IEEE80211_QUEUE_STOP_REASON_SKB_ADD); | ||
356 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); | ||
357 | } | ||
358 | |||
359 | int ieee80211_add_pending_skbs(struct ieee80211_local *local, | ||
360 | struct sk_buff_head *skbs) | ||
361 | { | ||
362 | struct ieee80211_hw *hw = &local->hw; | ||
363 | struct sk_buff *skb; | ||
364 | unsigned long flags; | ||
365 | int queue, ret = 0, i; | ||
366 | |||
367 | spin_lock_irqsave(&local->queue_stop_reason_lock, flags); | ||
368 | for (i = 0; i < hw->queues; i++) | ||
369 | __ieee80211_stop_queue(hw, i, | ||
370 | IEEE80211_QUEUE_STOP_REASON_SKB_ADD); | ||
371 | |||
372 | while ((skb = skb_dequeue(skbs))) { | ||
373 | ret++; | ||
374 | queue = skb_get_queue_mapping(skb); | ||
375 | skb_queue_tail(&local->pending[queue], skb); | ||
376 | } | ||
377 | |||
378 | for (i = 0; i < hw->queues; i++) { | ||
379 | if (ret) | ||
380 | __ieee80211_stop_queue(hw, i, | ||
381 | IEEE80211_QUEUE_STOP_REASON_PENDING); | ||
382 | __ieee80211_wake_queue(hw, i, | ||
383 | IEEE80211_QUEUE_STOP_REASON_SKB_ADD); | ||
384 | } | ||
385 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); | ||
386 | |||
387 | return ret; | ||
388 | } | ||
389 | |||
344 | void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw, | 390 | void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw, |
345 | enum queue_stop_reason reason) | 391 | enum queue_stop_reason reason) |
346 | { | 392 | { |
@@ -657,15 +703,15 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata) | |||
657 | 703 | ||
658 | switch (queue) { | 704 | switch (queue) { |
659 | case 3: /* AC_BK */ | 705 | case 3: /* AC_BK */ |
660 | qparam.cw_max = aCWmin; | 706 | qparam.cw_max = aCWmax; |
661 | qparam.cw_min = aCWmax; | 707 | qparam.cw_min = aCWmin; |
662 | qparam.txop = 0; | 708 | qparam.txop = 0; |
663 | qparam.aifs = 7; | 709 | qparam.aifs = 7; |
664 | break; | 710 | break; |
665 | default: /* never happens but let's not leave undefined */ | 711 | default: /* never happens but let's not leave undefined */ |
666 | case 2: /* AC_BE */ | 712 | case 2: /* AC_BE */ |
667 | qparam.cw_max = aCWmin; | 713 | qparam.cw_max = aCWmax; |
668 | qparam.cw_min = aCWmax; | 714 | qparam.cw_min = aCWmin; |
669 | qparam.txop = 0; | 715 | qparam.txop = 0; |
670 | qparam.aifs = 3; | 716 | qparam.aifs = 3; |
671 | break; | 717 | break; |
@@ -973,7 +1019,7 @@ int ieee80211_reconfig(struct ieee80211_local *local) | |||
973 | if (local->open_count) { | 1019 | if (local->open_count) { |
974 | res = drv_start(local); | 1020 | res = drv_start(local); |
975 | 1021 | ||
976 | ieee80211_led_radio(local, hw->conf.radio_enabled); | 1022 | ieee80211_led_radio(local, true); |
977 | } | 1023 | } |
978 | 1024 | ||
979 | /* add interfaces */ | 1025 | /* add interfaces */ |
diff --git a/net/mac80211/wext.c b/net/mac80211/wext.c index a01154e127f0..d2d81b103341 100644 --- a/net/mac80211/wext.c +++ b/net/mac80211/wext.c | |||
@@ -306,82 +306,6 @@ static int ieee80211_ioctl_giwrate(struct net_device *dev, | |||
306 | return 0; | 306 | return 0; |
307 | } | 307 | } |
308 | 308 | ||
309 | static int ieee80211_ioctl_siwtxpower(struct net_device *dev, | ||
310 | struct iw_request_info *info, | ||
311 | union iwreq_data *data, char *extra) | ||
312 | { | ||
313 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
314 | struct ieee80211_channel* chan = local->hw.conf.channel; | ||
315 | bool reconf = false; | ||
316 | u32 reconf_flags = 0; | ||
317 | int new_power_level; | ||
318 | |||
319 | if ((data->txpower.flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM) | ||
320 | return -EINVAL; | ||
321 | if (data->txpower.flags & IW_TXPOW_RANGE) | ||
322 | return -EINVAL; | ||
323 | if (!chan) | ||
324 | return -EINVAL; | ||
325 | |||
326 | /* only change when not disabling */ | ||
327 | if (!data->txpower.disabled) { | ||
328 | if (data->txpower.fixed) { | ||
329 | if (data->txpower.value < 0) | ||
330 | return -EINVAL; | ||
331 | new_power_level = data->txpower.value; | ||
332 | /* | ||
333 | * Debatable, but we cannot do a fixed power | ||
334 | * level above the regulatory constraint. | ||
335 | * Use "iwconfig wlan0 txpower 15dBm" instead. | ||
336 | */ | ||
337 | if (new_power_level > chan->max_power) | ||
338 | return -EINVAL; | ||
339 | } else { | ||
340 | /* | ||
341 | * Automatic power level setting, max being the value | ||
342 | * passed in from userland. | ||
343 | */ | ||
344 | if (data->txpower.value < 0) | ||
345 | new_power_level = -1; | ||
346 | else | ||
347 | new_power_level = data->txpower.value; | ||
348 | } | ||
349 | |||
350 | reconf = true; | ||
351 | |||
352 | /* | ||
353 | * ieee80211_hw_config() will limit to the channel's | ||
354 | * max power and possibly power constraint from AP. | ||
355 | */ | ||
356 | local->user_power_level = new_power_level; | ||
357 | } | ||
358 | |||
359 | if (local->hw.conf.radio_enabled != !(data->txpower.disabled)) { | ||
360 | local->hw.conf.radio_enabled = !(data->txpower.disabled); | ||
361 | reconf_flags |= IEEE80211_CONF_CHANGE_RADIO_ENABLED; | ||
362 | ieee80211_led_radio(local, local->hw.conf.radio_enabled); | ||
363 | } | ||
364 | |||
365 | if (reconf || reconf_flags) | ||
366 | ieee80211_hw_config(local, reconf_flags); | ||
367 | |||
368 | return 0; | ||
369 | } | ||
370 | |||
371 | static int ieee80211_ioctl_giwtxpower(struct net_device *dev, | ||
372 | struct iw_request_info *info, | ||
373 | union iwreq_data *data, char *extra) | ||
374 | { | ||
375 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
376 | |||
377 | data->txpower.fixed = 1; | ||
378 | data->txpower.disabled = !(local->hw.conf.radio_enabled); | ||
379 | data->txpower.value = local->hw.conf.power_level; | ||
380 | data->txpower.flags = IW_TXPOW_DBM; | ||
381 | |||
382 | return 0; | ||
383 | } | ||
384 | |||
385 | static int ieee80211_ioctl_siwpower(struct net_device *dev, | 309 | static int ieee80211_ioctl_siwpower(struct net_device *dev, |
386 | struct iw_request_info *info, | 310 | struct iw_request_info *info, |
387 | struct iw_param *wrq, | 311 | struct iw_param *wrq, |
@@ -658,8 +582,8 @@ static const iw_handler ieee80211_handler[] = | |||
658 | (iw_handler) cfg80211_wext_giwrts, /* SIOCGIWRTS */ | 582 | (iw_handler) cfg80211_wext_giwrts, /* SIOCGIWRTS */ |
659 | (iw_handler) cfg80211_wext_siwfrag, /* SIOCSIWFRAG */ | 583 | (iw_handler) cfg80211_wext_siwfrag, /* SIOCSIWFRAG */ |
660 | (iw_handler) cfg80211_wext_giwfrag, /* SIOCGIWFRAG */ | 584 | (iw_handler) cfg80211_wext_giwfrag, /* SIOCGIWFRAG */ |
661 | (iw_handler) ieee80211_ioctl_siwtxpower, /* SIOCSIWTXPOW */ | 585 | (iw_handler) cfg80211_wext_siwtxpower, /* SIOCSIWTXPOW */ |
662 | (iw_handler) ieee80211_ioctl_giwtxpower, /* SIOCGIWTXPOW */ | 586 | (iw_handler) cfg80211_wext_giwtxpower, /* SIOCGIWTXPOW */ |
663 | (iw_handler) cfg80211_wext_siwretry, /* SIOCSIWRETRY */ | 587 | (iw_handler) cfg80211_wext_siwretry, /* SIOCSIWRETRY */ |
664 | (iw_handler) cfg80211_wext_giwretry, /* SIOCGIWRETRY */ | 588 | (iw_handler) cfg80211_wext_giwretry, /* SIOCGIWRETRY */ |
665 | (iw_handler) cfg80211_wext_siwencode, /* SIOCSIWENCODE */ | 589 | (iw_handler) cfg80211_wext_siwencode, /* SIOCSIWENCODE */ |
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c index 694343b9102b..116a923b14d6 100644 --- a/net/mac80211/wme.c +++ b/net/mac80211/wme.c | |||
@@ -101,7 +101,7 @@ u16 ieee80211_select_queue(struct net_device *dev, struct sk_buff *skb) | |||
101 | * Now we know the 1d priority, fill in the QoS header if | 101 | * Now we know the 1d priority, fill in the QoS header if |
102 | * there is one (and we haven't done this before). | 102 | * there is one (and we haven't done this before). |
103 | */ | 103 | */ |
104 | if (!skb->requeue && ieee80211_is_data_qos(hdr->frame_control)) { | 104 | if (ieee80211_is_data_qos(hdr->frame_control)) { |
105 | u8 *p = ieee80211_get_qos_ctl(hdr); | 105 | u8 *p = ieee80211_get_qos_ctl(hdr); |
106 | u8 ack_policy = 0; | 106 | u8 ack_policy = 0; |
107 | tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; | 107 | tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; |
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index 425ab144f15d..5874657af7f2 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c | |||
@@ -260,8 +260,8 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
260 | ip_send_check(ip_hdr(skb)); | 260 | ip_send_check(ip_hdr(skb)); |
261 | 261 | ||
262 | /* drop old route */ | 262 | /* drop old route */ |
263 | dst_release(skb->dst); | 263 | skb_dst_drop(skb); |
264 | skb->dst = &rt->u.dst; | 264 | skb_dst_set(skb, &rt->u.dst); |
265 | 265 | ||
266 | /* Another hack: avoid icmp_send in ip_fragment */ | 266 | /* Another hack: avoid icmp_send in ip_fragment */ |
267 | skb->local_df = 1; | 267 | skb->local_df = 1; |
@@ -324,8 +324,8 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
324 | } | 324 | } |
325 | 325 | ||
326 | /* drop old route */ | 326 | /* drop old route */ |
327 | dst_release(skb->dst); | 327 | skb_dst_drop(skb); |
328 | skb->dst = &rt->u.dst; | 328 | skb_dst_set(skb, &rt->u.dst); |
329 | 329 | ||
330 | /* Another hack: avoid icmp_send in ip_fragment */ | 330 | /* Another hack: avoid icmp_send in ip_fragment */ |
331 | skb->local_df = 1; | 331 | skb->local_df = 1; |
@@ -388,8 +388,8 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
388 | goto tx_error_put; | 388 | goto tx_error_put; |
389 | 389 | ||
390 | /* drop old route */ | 390 | /* drop old route */ |
391 | dst_release(skb->dst); | 391 | skb_dst_drop(skb); |
392 | skb->dst = &rt->u.dst; | 392 | skb_dst_set(skb, &rt->u.dst); |
393 | 393 | ||
394 | /* mangle the packet */ | 394 | /* mangle the packet */ |
395 | if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp)) | 395 | if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp)) |
@@ -465,8 +465,8 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
465 | goto tx_error_put; | 465 | goto tx_error_put; |
466 | 466 | ||
467 | /* drop old route */ | 467 | /* drop old route */ |
468 | dst_release(skb->dst); | 468 | skb_dst_drop(skb); |
469 | skb->dst = &rt->u.dst; | 469 | skb_dst_set(skb, &rt->u.dst); |
470 | 470 | ||
471 | /* mangle the packet */ | 471 | /* mangle the packet */ |
472 | if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp)) | 472 | if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp)) |
@@ -553,8 +553,8 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
553 | IP_VS_DBG_RL("ip_vs_tunnel_xmit(): mtu less than 68\n"); | 553 | IP_VS_DBG_RL("ip_vs_tunnel_xmit(): mtu less than 68\n"); |
554 | goto tx_error; | 554 | goto tx_error; |
555 | } | 555 | } |
556 | if (skb->dst) | 556 | if (skb_dst(skb)) |
557 | skb->dst->ops->update_pmtu(skb->dst, mtu); | 557 | skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu); |
558 | 558 | ||
559 | df |= (old_iph->frag_off & htons(IP_DF)); | 559 | df |= (old_iph->frag_off & htons(IP_DF)); |
560 | 560 | ||
@@ -596,8 +596,8 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
596 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); | 596 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); |
597 | 597 | ||
598 | /* drop old route */ | 598 | /* drop old route */ |
599 | dst_release(skb->dst); | 599 | skb_dst_drop(skb); |
600 | skb->dst = &rt->u.dst; | 600 | skb_dst_set(skb, &rt->u.dst); |
601 | 601 | ||
602 | /* | 602 | /* |
603 | * Push down and install the IPIP header. | 603 | * Push down and install the IPIP header. |
@@ -665,8 +665,8 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
665 | IP_VS_DBG_RL("ip_vs_tunnel_xmit_v6(): mtu less than 1280\n"); | 665 | IP_VS_DBG_RL("ip_vs_tunnel_xmit_v6(): mtu less than 1280\n"); |
666 | goto tx_error; | 666 | goto tx_error; |
667 | } | 667 | } |
668 | if (skb->dst) | 668 | if (skb_dst(skb)) |
669 | skb->dst->ops->update_pmtu(skb->dst, mtu); | 669 | skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu); |
670 | 670 | ||
671 | if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr)) { | 671 | if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr)) { |
672 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); | 672 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); |
@@ -702,8 +702,8 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
702 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); | 702 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); |
703 | 703 | ||
704 | /* drop old route */ | 704 | /* drop old route */ |
705 | dst_release(skb->dst); | 705 | skb_dst_drop(skb); |
706 | skb->dst = &rt->u.dst; | 706 | skb_dst_set(skb, &rt->u.dst); |
707 | 707 | ||
708 | /* | 708 | /* |
709 | * Push down and install the IPIP header. | 709 | * Push down and install the IPIP header. |
@@ -775,8 +775,8 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
775 | ip_send_check(ip_hdr(skb)); | 775 | ip_send_check(ip_hdr(skb)); |
776 | 776 | ||
777 | /* drop old route */ | 777 | /* drop old route */ |
778 | dst_release(skb->dst); | 778 | skb_dst_drop(skb); |
779 | skb->dst = &rt->u.dst; | 779 | skb_dst_set(skb, &rt->u.dst); |
780 | 780 | ||
781 | /* Another hack: avoid icmp_send in ip_fragment */ | 781 | /* Another hack: avoid icmp_send in ip_fragment */ |
782 | skb->local_df = 1; | 782 | skb->local_df = 1; |
@@ -828,8 +828,8 @@ ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
828 | } | 828 | } |
829 | 829 | ||
830 | /* drop old route */ | 830 | /* drop old route */ |
831 | dst_release(skb->dst); | 831 | skb_dst_drop(skb); |
832 | skb->dst = &rt->u.dst; | 832 | skb_dst_set(skb, &rt->u.dst); |
833 | 833 | ||
834 | /* Another hack: avoid icmp_send in ip_fragment */ | 834 | /* Another hack: avoid icmp_send in ip_fragment */ |
835 | skb->local_df = 1; | 835 | skb->local_df = 1; |
@@ -900,8 +900,8 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
900 | goto tx_error_put; | 900 | goto tx_error_put; |
901 | 901 | ||
902 | /* drop the old route when skb is not shared */ | 902 | /* drop the old route when skb is not shared */ |
903 | dst_release(skb->dst); | 903 | skb_dst_drop(skb); |
904 | skb->dst = &rt->u.dst; | 904 | skb_dst_set(skb, &rt->u.dst); |
905 | 905 | ||
906 | ip_vs_nat_icmp(skb, pp, cp, 0); | 906 | ip_vs_nat_icmp(skb, pp, cp, 0); |
907 | 907 | ||
@@ -975,8 +975,8 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
975 | goto tx_error_put; | 975 | goto tx_error_put; |
976 | 976 | ||
977 | /* drop the old route when skb is not shared */ | 977 | /* drop the old route when skb is not shared */ |
978 | dst_release(skb->dst); | 978 | skb_dst_drop(skb); |
979 | skb->dst = &rt->u.dst; | 979 | skb_dst_set(skb, &rt->u.dst); |
980 | 980 | ||
981 | ip_vs_nat_icmp_v6(skb, pp, cp, 0); | 981 | ip_vs_nat_icmp_v6(skb, pp, cp, 0); |
982 | 982 | ||
diff --git a/net/netfilter/nf_conntrack_netbios_ns.c b/net/netfilter/nf_conntrack_netbios_ns.c index 8a3875e36ec2..497b2224536f 100644 --- a/net/netfilter/nf_conntrack_netbios_ns.c +++ b/net/netfilter/nf_conntrack_netbios_ns.c | |||
@@ -48,7 +48,7 @@ static int help(struct sk_buff *skb, unsigned int protoff, | |||
48 | { | 48 | { |
49 | struct nf_conntrack_expect *exp; | 49 | struct nf_conntrack_expect *exp; |
50 | struct iphdr *iph = ip_hdr(skb); | 50 | struct iphdr *iph = ip_hdr(skb); |
51 | struct rtable *rt = skb->rtable; | 51 | struct rtable *rt = skb_rtable(skb); |
52 | struct in_device *in_dev; | 52 | struct in_device *in_dev; |
53 | __be32 mask = 0; | 53 | __be32 mask = 0; |
54 | 54 | ||
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c index 6b08d3277965..1b816a2ea813 100644 --- a/net/netfilter/nf_conntrack_proto_dccp.c +++ b/net/netfilter/nf_conntrack_proto_dccp.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/netfilter/nfnetlink_conntrack.h> | 22 | #include <linux/netfilter/nfnetlink_conntrack.h> |
23 | #include <net/netfilter/nf_conntrack.h> | 23 | #include <net/netfilter/nf_conntrack.h> |
24 | #include <net/netfilter/nf_conntrack_l4proto.h> | 24 | #include <net/netfilter/nf_conntrack_l4proto.h> |
25 | #include <net/netfilter/nf_conntrack_ecache.h> | ||
25 | #include <net/netfilter/nf_log.h> | 26 | #include <net/netfilter/nf_log.h> |
26 | 27 | ||
27 | /* Timeouts are based on values from RFC4340: | 28 | /* Timeouts are based on values from RFC4340: |
@@ -551,6 +552,9 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb, | |||
551 | ct->proto.dccp.state = new_state; | 552 | ct->proto.dccp.state = new_state; |
552 | spin_unlock_bh(&ct->lock); | 553 | spin_unlock_bh(&ct->lock); |
553 | 554 | ||
555 | if (new_state != old_state) | ||
556 | nf_conntrack_event_cache(IPCT_PROTOINFO, ct); | ||
557 | |||
554 | dn = dccp_pernet(net); | 558 | dn = dccp_pernet(net); |
555 | nf_ct_refresh_acct(ct, ctinfo, skb, dn->dccp_timeout[new_state]); | 559 | nf_ct_refresh_acct(ct, ctinfo, skb, dn->dccp_timeout[new_state]); |
556 | 560 | ||
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c index 175a28c96168..a54a0af0edba 100644 --- a/net/netfilter/nf_conntrack_proto_gre.c +++ b/net/netfilter/nf_conntrack_proto_gre.c | |||
@@ -176,7 +176,7 @@ static bool gre_invert_tuple(struct nf_conntrack_tuple *tuple, | |||
176 | static bool gre_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff, | 176 | static bool gre_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff, |
177 | struct nf_conntrack_tuple *tuple) | 177 | struct nf_conntrack_tuple *tuple) |
178 | { | 178 | { |
179 | struct net *net = dev_net(skb->dev ? skb->dev : skb->dst->dev); | 179 | struct net *net = dev_net(skb->dev ? skb->dev : skb_dst(skb)->dev); |
180 | const struct gre_hdr_pptp *pgrehdr; | 180 | const struct gre_hdr_pptp *pgrehdr; |
181 | struct gre_hdr_pptp _pgrehdr; | 181 | struct gre_hdr_pptp _pgrehdr; |
182 | __be16 srckey; | 182 | __be16 srckey; |
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index 5c5739c741f1..5142e60af540 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c | |||
@@ -648,6 +648,14 @@ static bool tcp_in_window(const struct nf_conn *ct, | |||
648 | sender->td_end = end; | 648 | sender->td_end = end; |
649 | sender->flags |= IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED; | 649 | sender->flags |= IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED; |
650 | } | 650 | } |
651 | if (tcph->ack) { | ||
652 | if (!(sender->flags & IP_CT_TCP_FLAG_MAXACK_SET)) { | ||
653 | sender->td_maxack = ack; | ||
654 | sender->flags |= IP_CT_TCP_FLAG_MAXACK_SET; | ||
655 | } else if (after(ack, sender->td_maxack)) | ||
656 | sender->td_maxack = ack; | ||
657 | } | ||
658 | |||
651 | /* | 659 | /* |
652 | * Update receiver data. | 660 | * Update receiver data. |
653 | */ | 661 | */ |
@@ -933,6 +941,16 @@ static int tcp_packet(struct nf_conn *ct, | |||
933 | return -NF_ACCEPT; | 941 | return -NF_ACCEPT; |
934 | case TCP_CONNTRACK_CLOSE: | 942 | case TCP_CONNTRACK_CLOSE: |
935 | if (index == TCP_RST_SET | 943 | if (index == TCP_RST_SET |
944 | && (ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET) | ||
945 | && before(ntohl(th->seq), ct->proto.tcp.seen[!dir].td_maxack)) { | ||
946 | /* Invalid RST */ | ||
947 | write_unlock_bh(&tcp_lock); | ||
948 | if (LOG_INVALID(net, IPPROTO_TCP)) | ||
949 | nf_log_packet(pf, 0, skb, NULL, NULL, NULL, | ||
950 | "nf_ct_tcp: invalid RST "); | ||
951 | return -NF_ACCEPT; | ||
952 | } | ||
953 | if (index == TCP_RST_SET | ||
936 | && ((test_bit(IPS_SEEN_REPLY_BIT, &ct->status) | 954 | && ((test_bit(IPS_SEEN_REPLY_BIT, &ct->status) |
937 | && ct->proto.tcp.last_index == TCP_SYN_SET) | 955 | && ct->proto.tcp.last_index == TCP_SYN_SET) |
938 | || (!test_bit(IPS_ASSURED_BIT, &ct->status) | 956 | || (!test_bit(IPS_ASSURED_BIT, &ct->status) |
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index fd326ac27ec8..66a6dd5c519a 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c | |||
@@ -581,6 +581,12 @@ nfulnl_log_packet(u_int8_t pf, | |||
581 | + nla_total_size(sizeof(struct nfulnl_msg_packet_hw)) | 581 | + nla_total_size(sizeof(struct nfulnl_msg_packet_hw)) |
582 | + nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp)); | 582 | + nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp)); |
583 | 583 | ||
584 | if (in && skb_mac_header_was_set(skb)) { | ||
585 | size += nla_total_size(skb->dev->hard_header_len) | ||
586 | + nla_total_size(sizeof(u_int16_t)) /* hwtype */ | ||
587 | + nla_total_size(sizeof(u_int16_t)); /* hwlen */ | ||
588 | } | ||
589 | |||
584 | spin_lock_bh(&inst->lock); | 590 | spin_lock_bh(&inst->lock); |
585 | 591 | ||
586 | if (inst->flags & NFULNL_CFG_F_SEQ) | 592 | if (inst->flags & NFULNL_CFG_F_SEQ) |
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index 8c860112ce05..71daa0934b6c 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * This is a module which is used for queueing packets and communicating with | 2 | * This is a module which is used for queueing packets and communicating with |
3 | * userspace via nfetlink. | 3 | * userspace via nfnetlink. |
4 | * | 4 | * |
5 | * (C) 2005 by Harald Welte <laforge@netfilter.org> | 5 | * (C) 2005 by Harald Welte <laforge@netfilter.org> |
6 | * (C) 2007 by Patrick McHardy <kaber@trash.net> | 6 | * (C) 2007 by Patrick McHardy <kaber@trash.net> |
@@ -932,6 +932,8 @@ static void __exit nfnetlink_queue_fini(void) | |||
932 | #endif | 932 | #endif |
933 | nfnetlink_subsys_unregister(&nfqnl_subsys); | 933 | nfnetlink_subsys_unregister(&nfqnl_subsys); |
934 | netlink_unregister_notifier(&nfqnl_rtnl_notifier); | 934 | netlink_unregister_notifier(&nfqnl_rtnl_notifier); |
935 | |||
936 | rcu_barrier(); /* Wait for completion of call_rcu()'s */ | ||
935 | } | 937 | } |
936 | 938 | ||
937 | MODULE_DESCRIPTION("netfilter packet queue handler"); | 939 | MODULE_DESCRIPTION("netfilter packet queue handler"); |
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c index 4f3b1f808795..eda64c1cb1e5 100644 --- a/net/netfilter/xt_TCPMSS.c +++ b/net/netfilter/xt_TCPMSS.c | |||
@@ -73,11 +73,11 @@ tcpmss_mangle_packet(struct sk_buff *skb, | |||
73 | } | 73 | } |
74 | 74 | ||
75 | if (info->mss == XT_TCPMSS_CLAMP_PMTU) { | 75 | if (info->mss == XT_TCPMSS_CLAMP_PMTU) { |
76 | if (dst_mtu(skb->dst) <= minlen) { | 76 | if (dst_mtu(skb_dst(skb)) <= minlen) { |
77 | if (net_ratelimit()) | 77 | if (net_ratelimit()) |
78 | printk(KERN_ERR "xt_TCPMSS: " | 78 | printk(KERN_ERR "xt_TCPMSS: " |
79 | "unknown or invalid path-MTU (%u)\n", | 79 | "unknown or invalid path-MTU (%u)\n", |
80 | dst_mtu(skb->dst)); | 80 | dst_mtu(skb_dst(skb))); |
81 | return -1; | 81 | return -1; |
82 | } | 82 | } |
83 | if (in_mtu <= minlen) { | 83 | if (in_mtu <= minlen) { |
@@ -86,7 +86,7 @@ tcpmss_mangle_packet(struct sk_buff *skb, | |||
86 | "invalid path-MTU (%u)\n", in_mtu); | 86 | "invalid path-MTU (%u)\n", in_mtu); |
87 | return -1; | 87 | return -1; |
88 | } | 88 | } |
89 | newmss = min(dst_mtu(skb->dst), in_mtu) - minlen; | 89 | newmss = min(dst_mtu(skb_dst(skb)), in_mtu) - minlen; |
90 | } else | 90 | } else |
91 | newmss = info->mss; | 91 | newmss = info->mss; |
92 | 92 | ||
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index a5b5369c30f9..219dcdbe388c 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c | |||
@@ -926,7 +926,7 @@ static int dl_seq_show(struct seq_file *s, void *v) | |||
926 | if (!hlist_empty(&htable->hash[*bucket])) { | 926 | if (!hlist_empty(&htable->hash[*bucket])) { |
927 | hlist_for_each_entry(ent, pos, &htable->hash[*bucket], node) | 927 | hlist_for_each_entry(ent, pos, &htable->hash[*bucket], node) |
928 | if (dl_seq_real_show(ent, htable->family, s)) | 928 | if (dl_seq_real_show(ent, htable->family, s)) |
929 | return 1; | 929 | return -1; |
930 | } | 930 | } |
931 | return 0; | 931 | return 0; |
932 | } | 932 | } |
diff --git a/net/netfilter/xt_policy.c b/net/netfilter/xt_policy.c index 328bd20ddd25..4cbfebda8fa1 100644 --- a/net/netfilter/xt_policy.c +++ b/net/netfilter/xt_policy.c | |||
@@ -86,7 +86,7 @@ match_policy_out(const struct sk_buff *skb, const struct xt_policy_info *info, | |||
86 | unsigned short family) | 86 | unsigned short family) |
87 | { | 87 | { |
88 | const struct xt_policy_elem *e; | 88 | const struct xt_policy_elem *e; |
89 | const struct dst_entry *dst = skb->dst; | 89 | const struct dst_entry *dst = skb_dst(skb); |
90 | int strict = info->flags & XT_POLICY_MATCH_STRICT; | 90 | int strict = info->flags & XT_POLICY_MATCH_STRICT; |
91 | int i, pos; | 91 | int i, pos; |
92 | 92 | ||
diff --git a/net/netfilter/xt_realm.c b/net/netfilter/xt_realm.c index 67419287bc7e..484d1689bfde 100644 --- a/net/netfilter/xt_realm.c +++ b/net/netfilter/xt_realm.c | |||
@@ -25,7 +25,7 @@ static bool | |||
25 | realm_mt(const struct sk_buff *skb, const struct xt_match_param *par) | 25 | realm_mt(const struct sk_buff *skb, const struct xt_match_param *par) |
26 | { | 26 | { |
27 | const struct xt_realm_info *info = par->matchinfo; | 27 | const struct xt_realm_info *info = par->matchinfo; |
28 | const struct dst_entry *dst = skb->dst; | 28 | const struct dst_entry *dst = skb_dst(skb); |
29 | 29 | ||
30 | return (info->id == (dst->tclassid & info->mask)) ^ info->invert; | 30 | return (info->id == (dst->tclassid & info->mask)) ^ info->invert; |
31 | } | 31 | } |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index c7c5d524967e..4f76e5552d8e 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -372,8 +372,7 @@ static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, struct | |||
372 | goto oom; | 372 | goto oom; |
373 | 373 | ||
374 | /* drop any routing info */ | 374 | /* drop any routing info */ |
375 | dst_release(skb->dst); | 375 | skb_dst_drop(skb); |
376 | skb->dst = NULL; | ||
377 | 376 | ||
378 | /* drop conntrack reference */ | 377 | /* drop conntrack reference */ |
379 | nf_reset(skb); | 378 | nf_reset(skb); |
@@ -621,8 +620,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet | |||
621 | 620 | ||
622 | skb_set_owner_r(skb, sk); | 621 | skb_set_owner_r(skb, sk); |
623 | skb->dev = NULL; | 622 | skb->dev = NULL; |
624 | dst_release(skb->dst); | 623 | skb_dst_drop(skb); |
625 | skb->dst = NULL; | ||
626 | 624 | ||
627 | /* drop conntrack reference */ | 625 | /* drop conntrack reference */ |
628 | nf_reset(skb); | 626 | nf_reset(skb); |
@@ -1582,9 +1580,9 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i, | |||
1582 | break; | 1580 | break; |
1583 | case PACKET_MR_UNICAST: | 1581 | case PACKET_MR_UNICAST: |
1584 | if (what > 0) | 1582 | if (what > 0) |
1585 | return dev_unicast_add(dev, i->addr, i->alen); | 1583 | return dev_unicast_add(dev, i->addr); |
1586 | else | 1584 | else |
1587 | return dev_unicast_delete(dev, i->addr, i->alen); | 1585 | return dev_unicast_delete(dev, i->addr); |
1588 | break; | 1586 | break; |
1589 | default:; | 1587 | default:; |
1590 | } | 1588 | } |
diff --git a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c index 4aa888584d20..480839dfc560 100644 --- a/net/phonet/pep-gprs.c +++ b/net/phonet/pep-gprs.c | |||
@@ -115,10 +115,10 @@ static int gprs_recv(struct gprs_dev *gp, struct sk_buff *skb) | |||
115 | rskb->truesize += rskb->len; | 115 | rskb->truesize += rskb->len; |
116 | 116 | ||
117 | /* Avoid nested fragments */ | 117 | /* Avoid nested fragments */ |
118 | for (fs = skb_shinfo(skb)->frag_list; fs; fs = fs->next) | 118 | skb_walk_frags(skb, fs) |
119 | flen += fs->len; | 119 | flen += fs->len; |
120 | skb->next = skb_shinfo(skb)->frag_list; | 120 | skb->next = skb_shinfo(skb)->frag_list; |
121 | skb_shinfo(skb)->frag_list = NULL; | 121 | skb_frag_list_init(skb); |
122 | skb->len -= flen; | 122 | skb->len -= flen; |
123 | skb->data_len -= flen; | 123 | skb->data_len -= flen; |
124 | skb->truesize -= flen; | 124 | skb->truesize -= flen; |
@@ -212,8 +212,9 @@ static int gprs_xmit(struct sk_buff *skb, struct net_device *dev) | |||
212 | dev->stats.tx_bytes += len; | 212 | dev->stats.tx_bytes += len; |
213 | } | 213 | } |
214 | 214 | ||
215 | if (!pep_writeable(sk)) | 215 | netif_stop_queue(dev); |
216 | netif_stop_queue(dev); | 216 | if (pep_writeable(sk)) |
217 | netif_wake_queue(dev); | ||
217 | return 0; | 218 | return 0; |
218 | } | 219 | } |
219 | 220 | ||
diff --git a/net/phonet/pep.c b/net/phonet/pep.c index 8ad2b5333881..eef833ea6d7b 100644 --- a/net/phonet/pep.c +++ b/net/phonet/pep.c | |||
@@ -940,10 +940,10 @@ int pep_write(struct sock *sk, struct sk_buff *skb) | |||
940 | rskb->truesize += rskb->len; | 940 | rskb->truesize += rskb->len; |
941 | 941 | ||
942 | /* Avoid nested fragments */ | 942 | /* Avoid nested fragments */ |
943 | for (fs = skb_shinfo(skb)->frag_list; fs; fs = fs->next) | 943 | skb_walk_frags(skb, fs) |
944 | flen += fs->len; | 944 | flen += fs->len; |
945 | skb->next = skb_shinfo(skb)->frag_list; | 945 | skb->next = skb_shinfo(skb)->frag_list; |
946 | skb_shinfo(skb)->frag_list = NULL; | 946 | skb_frag_list_init(skb); |
947 | skb->len -= flen; | 947 | skb->len -= flen; |
948 | skb->data_len -= flen; | 948 | skb->data_len -= flen; |
949 | skb->truesize -= flen; | 949 | skb->truesize -= flen; |
diff --git a/net/rfkill/Kconfig b/net/rfkill/Kconfig index 7f807b30cfbb..eaf765876458 100644 --- a/net/rfkill/Kconfig +++ b/net/rfkill/Kconfig | |||
@@ -10,22 +10,15 @@ menuconfig RFKILL | |||
10 | To compile this driver as a module, choose M here: the | 10 | To compile this driver as a module, choose M here: the |
11 | module will be called rfkill. | 11 | module will be called rfkill. |
12 | 12 | ||
13 | config RFKILL_INPUT | ||
14 | tristate "Input layer to RF switch connector" | ||
15 | depends on RFKILL && INPUT | ||
16 | help | ||
17 | Say Y here if you want kernel automatically toggle state | ||
18 | of RF switches on and off when user presses appropriate | ||
19 | button or a key on the keyboard. Without this module you | ||
20 | need a some kind of userspace application to control | ||
21 | state of the switches. | ||
22 | |||
23 | To compile this driver as a module, choose M here: the | ||
24 | module will be called rfkill-input. | ||
25 | |||
26 | # LED trigger support | 13 | # LED trigger support |
27 | config RFKILL_LEDS | 14 | config RFKILL_LEDS |
28 | bool | 15 | bool |
29 | depends on RFKILL && LEDS_TRIGGERS | 16 | depends on RFKILL |
17 | depends on LEDS_TRIGGERS = y || RFKILL = LEDS_TRIGGERS | ||
30 | default y | 18 | default y |
31 | 19 | ||
20 | config RFKILL_INPUT | ||
21 | bool "RF switch input support" if EMBEDDED | ||
22 | depends on RFKILL | ||
23 | depends on INPUT = y || RFKILL = INPUT | ||
24 | default y if !EMBEDDED | ||
diff --git a/net/rfkill/Makefile b/net/rfkill/Makefile index b38c430be057..662105352691 100644 --- a/net/rfkill/Makefile +++ b/net/rfkill/Makefile | |||
@@ -2,5 +2,6 @@ | |||
2 | # Makefile for the RF switch subsystem. | 2 | # Makefile for the RF switch subsystem. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-$(CONFIG_RFKILL) += rfkill.o | 5 | rfkill-y += core.o |
6 | obj-$(CONFIG_RFKILL_INPUT) += rfkill-input.o | 6 | rfkill-$(CONFIG_RFKILL_INPUT) += input.o |
7 | obj-$(CONFIG_RFKILL) += rfkill.o | ||
diff --git a/net/rfkill/core.c b/net/rfkill/core.c new file mode 100644 index 000000000000..4e68ab439d5d --- /dev/null +++ b/net/rfkill/core.c | |||
@@ -0,0 +1,1205 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006 - 2007 Ivo van Doorn | ||
3 | * Copyright (C) 2007 Dmitry Torokhov | ||
4 | * Copyright 2009 Johannes Berg <johannes@sipsolutions.net> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the | ||
18 | * Free Software Foundation, Inc., | ||
19 | * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
20 | */ | ||
21 | |||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/init.h> | ||
25 | #include <linux/workqueue.h> | ||
26 | #include <linux/capability.h> | ||
27 | #include <linux/list.h> | ||
28 | #include <linux/mutex.h> | ||
29 | #include <linux/rfkill.h> | ||
30 | #include <linux/spinlock.h> | ||
31 | #include <linux/miscdevice.h> | ||
32 | #include <linux/wait.h> | ||
33 | #include <linux/poll.h> | ||
34 | #include <linux/fs.h> | ||
35 | |||
36 | #include "rfkill.h" | ||
37 | |||
38 | #define POLL_INTERVAL (5 * HZ) | ||
39 | |||
40 | #define RFKILL_BLOCK_HW BIT(0) | ||
41 | #define RFKILL_BLOCK_SW BIT(1) | ||
42 | #define RFKILL_BLOCK_SW_PREV BIT(2) | ||
43 | #define RFKILL_BLOCK_ANY (RFKILL_BLOCK_HW |\ | ||
44 | RFKILL_BLOCK_SW |\ | ||
45 | RFKILL_BLOCK_SW_PREV) | ||
46 | #define RFKILL_BLOCK_SW_SETCALL BIT(31) | ||
47 | |||
48 | struct rfkill { | ||
49 | spinlock_t lock; | ||
50 | |||
51 | const char *name; | ||
52 | enum rfkill_type type; | ||
53 | |||
54 | unsigned long state; | ||
55 | |||
56 | u32 idx; | ||
57 | |||
58 | bool registered; | ||
59 | bool suspended; | ||
60 | bool persistent; | ||
61 | |||
62 | const struct rfkill_ops *ops; | ||
63 | void *data; | ||
64 | |||
65 | #ifdef CONFIG_RFKILL_LEDS | ||
66 | struct led_trigger led_trigger; | ||
67 | const char *ledtrigname; | ||
68 | #endif | ||
69 | |||
70 | struct device dev; | ||
71 | struct list_head node; | ||
72 | |||
73 | struct delayed_work poll_work; | ||
74 | struct work_struct uevent_work; | ||
75 | struct work_struct sync_work; | ||
76 | }; | ||
77 | #define to_rfkill(d) container_of(d, struct rfkill, dev) | ||
78 | |||
79 | struct rfkill_int_event { | ||
80 | struct list_head list; | ||
81 | struct rfkill_event ev; | ||
82 | }; | ||
83 | |||
84 | struct rfkill_data { | ||
85 | struct list_head list; | ||
86 | struct list_head events; | ||
87 | struct mutex mtx; | ||
88 | wait_queue_head_t read_wait; | ||
89 | bool input_handler; | ||
90 | }; | ||
91 | |||
92 | |||
93 | MODULE_AUTHOR("Ivo van Doorn <IvDoorn@gmail.com>"); | ||
94 | MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>"); | ||
95 | MODULE_DESCRIPTION("RF switch support"); | ||
96 | MODULE_LICENSE("GPL"); | ||
97 | |||
98 | |||
99 | /* | ||
100 | * The locking here should be made much smarter, we currently have | ||
101 | * a bit of a stupid situation because drivers might want to register | ||
102 | * the rfkill struct under their own lock, and take this lock during | ||
103 | * rfkill method calls -- which will cause an AB-BA deadlock situation. | ||
104 | * | ||
105 | * To fix that, we need to rework this code here to be mostly lock-free | ||
106 | * and only use the mutex for list manipulations, not to protect the | ||
107 | * various other global variables. Then we can avoid holding the mutex | ||
108 | * around driver operations, and all is happy. | ||
109 | */ | ||
110 | static LIST_HEAD(rfkill_list); /* list of registered rf switches */ | ||
111 | static DEFINE_MUTEX(rfkill_global_mutex); | ||
112 | static LIST_HEAD(rfkill_fds); /* list of open fds of /dev/rfkill */ | ||
113 | |||
114 | static unsigned int rfkill_default_state = 1; | ||
115 | module_param_named(default_state, rfkill_default_state, uint, 0444); | ||
116 | MODULE_PARM_DESC(default_state, | ||
117 | "Default initial state for all radio types, 0 = radio off"); | ||
118 | |||
119 | static struct { | ||
120 | bool cur, sav; | ||
121 | } rfkill_global_states[NUM_RFKILL_TYPES]; | ||
122 | |||
123 | static bool rfkill_epo_lock_active; | ||
124 | |||
125 | |||
126 | #ifdef CONFIG_RFKILL_LEDS | ||
127 | static void rfkill_led_trigger_event(struct rfkill *rfkill) | ||
128 | { | ||
129 | struct led_trigger *trigger; | ||
130 | |||
131 | if (!rfkill->registered) | ||
132 | return; | ||
133 | |||
134 | trigger = &rfkill->led_trigger; | ||
135 | |||
136 | if (rfkill->state & RFKILL_BLOCK_ANY) | ||
137 | led_trigger_event(trigger, LED_OFF); | ||
138 | else | ||
139 | led_trigger_event(trigger, LED_FULL); | ||
140 | } | ||
141 | |||
142 | static void rfkill_led_trigger_activate(struct led_classdev *led) | ||
143 | { | ||
144 | struct rfkill *rfkill; | ||
145 | |||
146 | rfkill = container_of(led->trigger, struct rfkill, led_trigger); | ||
147 | |||
148 | rfkill_led_trigger_event(rfkill); | ||
149 | } | ||
150 | |||
151 | const char *rfkill_get_led_trigger_name(struct rfkill *rfkill) | ||
152 | { | ||
153 | return rfkill->led_trigger.name; | ||
154 | } | ||
155 | EXPORT_SYMBOL(rfkill_get_led_trigger_name); | ||
156 | |||
157 | void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name) | ||
158 | { | ||
159 | BUG_ON(!rfkill); | ||
160 | |||
161 | rfkill->ledtrigname = name; | ||
162 | } | ||
163 | EXPORT_SYMBOL(rfkill_set_led_trigger_name); | ||
164 | |||
165 | static int rfkill_led_trigger_register(struct rfkill *rfkill) | ||
166 | { | ||
167 | rfkill->led_trigger.name = rfkill->ledtrigname | ||
168 | ? : dev_name(&rfkill->dev); | ||
169 | rfkill->led_trigger.activate = rfkill_led_trigger_activate; | ||
170 | return led_trigger_register(&rfkill->led_trigger); | ||
171 | } | ||
172 | |||
173 | static void rfkill_led_trigger_unregister(struct rfkill *rfkill) | ||
174 | { | ||
175 | led_trigger_unregister(&rfkill->led_trigger); | ||
176 | } | ||
177 | #else | ||
178 | static void rfkill_led_trigger_event(struct rfkill *rfkill) | ||
179 | { | ||
180 | } | ||
181 | |||
182 | static inline int rfkill_led_trigger_register(struct rfkill *rfkill) | ||
183 | { | ||
184 | return 0; | ||
185 | } | ||
186 | |||
187 | static inline void rfkill_led_trigger_unregister(struct rfkill *rfkill) | ||
188 | { | ||
189 | } | ||
190 | #endif /* CONFIG_RFKILL_LEDS */ | ||
191 | |||
192 | static void rfkill_fill_event(struct rfkill_event *ev, struct rfkill *rfkill, | ||
193 | enum rfkill_operation op) | ||
194 | { | ||
195 | unsigned long flags; | ||
196 | |||
197 | ev->idx = rfkill->idx; | ||
198 | ev->type = rfkill->type; | ||
199 | ev->op = op; | ||
200 | |||
201 | spin_lock_irqsave(&rfkill->lock, flags); | ||
202 | ev->hard = !!(rfkill->state & RFKILL_BLOCK_HW); | ||
203 | ev->soft = !!(rfkill->state & (RFKILL_BLOCK_SW | | ||
204 | RFKILL_BLOCK_SW_PREV)); | ||
205 | spin_unlock_irqrestore(&rfkill->lock, flags); | ||
206 | } | ||
207 | |||
208 | static void rfkill_send_events(struct rfkill *rfkill, enum rfkill_operation op) | ||
209 | { | ||
210 | struct rfkill_data *data; | ||
211 | struct rfkill_int_event *ev; | ||
212 | |||
213 | list_for_each_entry(data, &rfkill_fds, list) { | ||
214 | ev = kzalloc(sizeof(*ev), GFP_KERNEL); | ||
215 | if (!ev) | ||
216 | continue; | ||
217 | rfkill_fill_event(&ev->ev, rfkill, op); | ||
218 | mutex_lock(&data->mtx); | ||
219 | list_add_tail(&ev->list, &data->events); | ||
220 | mutex_unlock(&data->mtx); | ||
221 | wake_up_interruptible(&data->read_wait); | ||
222 | } | ||
223 | } | ||
224 | |||
225 | static void rfkill_event(struct rfkill *rfkill) | ||
226 | { | ||
227 | if (!rfkill->registered || rfkill->suspended) | ||
228 | return; | ||
229 | |||
230 | kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE); | ||
231 | |||
232 | /* also send event to /dev/rfkill */ | ||
233 | rfkill_send_events(rfkill, RFKILL_OP_CHANGE); | ||
234 | } | ||
235 | |||
236 | static bool __rfkill_set_hw_state(struct rfkill *rfkill, | ||
237 | bool blocked, bool *change) | ||
238 | { | ||
239 | unsigned long flags; | ||
240 | bool prev, any; | ||
241 | |||
242 | BUG_ON(!rfkill); | ||
243 | |||
244 | spin_lock_irqsave(&rfkill->lock, flags); | ||
245 | prev = !!(rfkill->state & RFKILL_BLOCK_HW); | ||
246 | if (blocked) | ||
247 | rfkill->state |= RFKILL_BLOCK_HW; | ||
248 | else | ||
249 | rfkill->state &= ~RFKILL_BLOCK_HW; | ||
250 | *change = prev != blocked; | ||
251 | any = rfkill->state & RFKILL_BLOCK_ANY; | ||
252 | spin_unlock_irqrestore(&rfkill->lock, flags); | ||
253 | |||
254 | rfkill_led_trigger_event(rfkill); | ||
255 | |||
256 | return any; | ||
257 | } | ||
258 | |||
259 | /** | ||
260 | * rfkill_set_block - wrapper for set_block method | ||
261 | * | ||
262 | * @rfkill: the rfkill struct to use | ||
263 | * @blocked: the new software state | ||
264 | * | ||
265 | * Calls the set_block method (when applicable) and handles notifications | ||
266 | * etc. as well. | ||
267 | */ | ||
268 | static void rfkill_set_block(struct rfkill *rfkill, bool blocked) | ||
269 | { | ||
270 | unsigned long flags; | ||
271 | int err; | ||
272 | |||
273 | /* | ||
274 | * Some platforms (...!) generate input events which affect the | ||
275 | * _hard_ kill state -- whenever something tries to change the | ||
276 | * current software state query the hardware state too. | ||
277 | */ | ||
278 | if (rfkill->ops->query) | ||
279 | rfkill->ops->query(rfkill, rfkill->data); | ||
280 | |||
281 | spin_lock_irqsave(&rfkill->lock, flags); | ||
282 | if (rfkill->state & RFKILL_BLOCK_SW) | ||
283 | rfkill->state |= RFKILL_BLOCK_SW_PREV; | ||
284 | else | ||
285 | rfkill->state &= ~RFKILL_BLOCK_SW_PREV; | ||
286 | |||
287 | if (blocked) | ||
288 | rfkill->state |= RFKILL_BLOCK_SW; | ||
289 | else | ||
290 | rfkill->state &= ~RFKILL_BLOCK_SW; | ||
291 | |||
292 | rfkill->state |= RFKILL_BLOCK_SW_SETCALL; | ||
293 | spin_unlock_irqrestore(&rfkill->lock, flags); | ||
294 | |||
295 | if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP)) | ||
296 | return; | ||
297 | |||
298 | err = rfkill->ops->set_block(rfkill->data, blocked); | ||
299 | |||
300 | spin_lock_irqsave(&rfkill->lock, flags); | ||
301 | if (err) { | ||
302 | /* | ||
303 | * Failed -- reset status to _prev, this may be different | ||
304 | * from what set set _PREV to earlier in this function | ||
305 | * if rfkill_set_sw_state was invoked. | ||
306 | */ | ||
307 | if (rfkill->state & RFKILL_BLOCK_SW_PREV) | ||
308 | rfkill->state |= RFKILL_BLOCK_SW; | ||
309 | else | ||
310 | rfkill->state &= ~RFKILL_BLOCK_SW; | ||
311 | } | ||
312 | rfkill->state &= ~RFKILL_BLOCK_SW_SETCALL; | ||
313 | rfkill->state &= ~RFKILL_BLOCK_SW_PREV; | ||
314 | spin_unlock_irqrestore(&rfkill->lock, flags); | ||
315 | |||
316 | rfkill_led_trigger_event(rfkill); | ||
317 | rfkill_event(rfkill); | ||
318 | } | ||
319 | |||
320 | #ifdef CONFIG_RFKILL_INPUT | ||
321 | static atomic_t rfkill_input_disabled = ATOMIC_INIT(0); | ||
322 | |||
323 | /** | ||
324 | * __rfkill_switch_all - Toggle state of all switches of given type | ||
325 | * @type: type of interfaces to be affected | ||
326 | * @state: the new state | ||
327 | * | ||
328 | * This function sets the state of all switches of given type, | ||
329 | * unless a specific switch is claimed by userspace (in which case, | ||
330 | * that switch is left alone) or suspended. | ||
331 | * | ||
332 | * Caller must have acquired rfkill_global_mutex. | ||
333 | */ | ||
334 | static void __rfkill_switch_all(const enum rfkill_type type, bool blocked) | ||
335 | { | ||
336 | struct rfkill *rfkill; | ||
337 | |||
338 | rfkill_global_states[type].cur = blocked; | ||
339 | list_for_each_entry(rfkill, &rfkill_list, node) { | ||
340 | if (rfkill->type != type) | ||
341 | continue; | ||
342 | |||
343 | rfkill_set_block(rfkill, blocked); | ||
344 | } | ||
345 | } | ||
346 | |||
347 | /** | ||
348 | * rfkill_switch_all - Toggle state of all switches of given type | ||
349 | * @type: type of interfaces to be affected | ||
350 | * @state: the new state | ||
351 | * | ||
352 | * Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state). | ||
353 | * Please refer to __rfkill_switch_all() for details. | ||
354 | * | ||
355 | * Does nothing if the EPO lock is active. | ||
356 | */ | ||
357 | void rfkill_switch_all(enum rfkill_type type, bool blocked) | ||
358 | { | ||
359 | if (atomic_read(&rfkill_input_disabled)) | ||
360 | return; | ||
361 | |||
362 | mutex_lock(&rfkill_global_mutex); | ||
363 | |||
364 | if (!rfkill_epo_lock_active) | ||
365 | __rfkill_switch_all(type, blocked); | ||
366 | |||
367 | mutex_unlock(&rfkill_global_mutex); | ||
368 | } | ||
369 | |||
370 | /** | ||
371 | * rfkill_epo - emergency power off all transmitters | ||
372 | * | ||
373 | * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED, | ||
374 | * ignoring everything in its path but rfkill_global_mutex and rfkill->mutex. | ||
375 | * | ||
376 | * The global state before the EPO is saved and can be restored later | ||
377 | * using rfkill_restore_states(). | ||
378 | */ | ||
379 | void rfkill_epo(void) | ||
380 | { | ||
381 | struct rfkill *rfkill; | ||
382 | int i; | ||
383 | |||
384 | if (atomic_read(&rfkill_input_disabled)) | ||
385 | return; | ||
386 | |||
387 | mutex_lock(&rfkill_global_mutex); | ||
388 | |||
389 | rfkill_epo_lock_active = true; | ||
390 | list_for_each_entry(rfkill, &rfkill_list, node) | ||
391 | rfkill_set_block(rfkill, true); | ||
392 | |||
393 | for (i = 0; i < NUM_RFKILL_TYPES; i++) { | ||
394 | rfkill_global_states[i].sav = rfkill_global_states[i].cur; | ||
395 | rfkill_global_states[i].cur = true; | ||
396 | } | ||
397 | |||
398 | mutex_unlock(&rfkill_global_mutex); | ||
399 | } | ||
400 | |||
401 | /** | ||
402 | * rfkill_restore_states - restore global states | ||
403 | * | ||
404 | * Restore (and sync switches to) the global state from the | ||
405 | * states in rfkill_default_states. This can undo the effects of | ||
406 | * a call to rfkill_epo(). | ||
407 | */ | ||
408 | void rfkill_restore_states(void) | ||
409 | { | ||
410 | int i; | ||
411 | |||
412 | if (atomic_read(&rfkill_input_disabled)) | ||
413 | return; | ||
414 | |||
415 | mutex_lock(&rfkill_global_mutex); | ||
416 | |||
417 | rfkill_epo_lock_active = false; | ||
418 | for (i = 0; i < NUM_RFKILL_TYPES; i++) | ||
419 | __rfkill_switch_all(i, rfkill_global_states[i].sav); | ||
420 | mutex_unlock(&rfkill_global_mutex); | ||
421 | } | ||
422 | |||
423 | /** | ||
424 | * rfkill_remove_epo_lock - unlock state changes | ||
425 | * | ||
426 | * Used by rfkill-input manually unlock state changes, when | ||
427 | * the EPO switch is deactivated. | ||
428 | */ | ||
429 | void rfkill_remove_epo_lock(void) | ||
430 | { | ||
431 | if (atomic_read(&rfkill_input_disabled)) | ||
432 | return; | ||
433 | |||
434 | mutex_lock(&rfkill_global_mutex); | ||
435 | rfkill_epo_lock_active = false; | ||
436 | mutex_unlock(&rfkill_global_mutex); | ||
437 | } | ||
438 | |||
439 | /** | ||
440 | * rfkill_is_epo_lock_active - returns true EPO is active | ||
441 | * | ||
442 | * Returns 0 (false) if there is NOT an active EPO contidion, | ||
443 | * and 1 (true) if there is an active EPO contition, which | ||
444 | * locks all radios in one of the BLOCKED states. | ||
445 | * | ||
446 | * Can be called in atomic context. | ||
447 | */ | ||
448 | bool rfkill_is_epo_lock_active(void) | ||
449 | { | ||
450 | return rfkill_epo_lock_active; | ||
451 | } | ||
452 | |||
453 | /** | ||
454 | * rfkill_get_global_sw_state - returns global state for a type | ||
455 | * @type: the type to get the global state of | ||
456 | * | ||
457 | * Returns the current global state for a given wireless | ||
458 | * device type. | ||
459 | */ | ||
460 | bool rfkill_get_global_sw_state(const enum rfkill_type type) | ||
461 | { | ||
462 | return rfkill_global_states[type].cur; | ||
463 | } | ||
464 | #endif | ||
465 | |||
466 | |||
467 | bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked) | ||
468 | { | ||
469 | bool ret, change; | ||
470 | |||
471 | ret = __rfkill_set_hw_state(rfkill, blocked, &change); | ||
472 | |||
473 | if (!rfkill->registered) | ||
474 | return ret; | ||
475 | |||
476 | if (change) | ||
477 | schedule_work(&rfkill->uevent_work); | ||
478 | |||
479 | return ret; | ||
480 | } | ||
481 | EXPORT_SYMBOL(rfkill_set_hw_state); | ||
482 | |||
483 | static void __rfkill_set_sw_state(struct rfkill *rfkill, bool blocked) | ||
484 | { | ||
485 | u32 bit = RFKILL_BLOCK_SW; | ||
486 | |||
487 | /* if in a ops->set_block right now, use other bit */ | ||
488 | if (rfkill->state & RFKILL_BLOCK_SW_SETCALL) | ||
489 | bit = RFKILL_BLOCK_SW_PREV; | ||
490 | |||
491 | if (blocked) | ||
492 | rfkill->state |= bit; | ||
493 | else | ||
494 | rfkill->state &= ~bit; | ||
495 | } | ||
496 | |||
497 | bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked) | ||
498 | { | ||
499 | unsigned long flags; | ||
500 | bool prev, hwblock; | ||
501 | |||
502 | BUG_ON(!rfkill); | ||
503 | |||
504 | spin_lock_irqsave(&rfkill->lock, flags); | ||
505 | prev = !!(rfkill->state & RFKILL_BLOCK_SW); | ||
506 | __rfkill_set_sw_state(rfkill, blocked); | ||
507 | hwblock = !!(rfkill->state & RFKILL_BLOCK_HW); | ||
508 | blocked = blocked || hwblock; | ||
509 | spin_unlock_irqrestore(&rfkill->lock, flags); | ||
510 | |||
511 | if (!rfkill->registered) { | ||
512 | rfkill->persistent = true; | ||
513 | } else { | ||
514 | if (prev != blocked && !hwblock) | ||
515 | schedule_work(&rfkill->uevent_work); | ||
516 | |||
517 | rfkill_led_trigger_event(rfkill); | ||
518 | } | ||
519 | |||
520 | return blocked; | ||
521 | } | ||
522 | EXPORT_SYMBOL(rfkill_set_sw_state); | ||
523 | |||
524 | void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw) | ||
525 | { | ||
526 | unsigned long flags; | ||
527 | bool swprev, hwprev; | ||
528 | |||
529 | BUG_ON(!rfkill); | ||
530 | |||
531 | spin_lock_irqsave(&rfkill->lock, flags); | ||
532 | |||
533 | /* | ||
534 | * No need to care about prev/setblock ... this is for uevent only | ||
535 | * and that will get triggered by rfkill_set_block anyway. | ||
536 | */ | ||
537 | swprev = !!(rfkill->state & RFKILL_BLOCK_SW); | ||
538 | hwprev = !!(rfkill->state & RFKILL_BLOCK_HW); | ||
539 | __rfkill_set_sw_state(rfkill, sw); | ||
540 | |||
541 | spin_unlock_irqrestore(&rfkill->lock, flags); | ||
542 | |||
543 | if (!rfkill->registered) { | ||
544 | rfkill->persistent = true; | ||
545 | } else { | ||
546 | if (swprev != sw || hwprev != hw) | ||
547 | schedule_work(&rfkill->uevent_work); | ||
548 | |||
549 | rfkill_led_trigger_event(rfkill); | ||
550 | } | ||
551 | } | ||
552 | EXPORT_SYMBOL(rfkill_set_states); | ||
553 | |||
554 | static ssize_t rfkill_name_show(struct device *dev, | ||
555 | struct device_attribute *attr, | ||
556 | char *buf) | ||
557 | { | ||
558 | struct rfkill *rfkill = to_rfkill(dev); | ||
559 | |||
560 | return sprintf(buf, "%s\n", rfkill->name); | ||
561 | } | ||
562 | |||
563 | static const char *rfkill_get_type_str(enum rfkill_type type) | ||
564 | { | ||
565 | switch (type) { | ||
566 | case RFKILL_TYPE_WLAN: | ||
567 | return "wlan"; | ||
568 | case RFKILL_TYPE_BLUETOOTH: | ||
569 | return "bluetooth"; | ||
570 | case RFKILL_TYPE_UWB: | ||
571 | return "ultrawideband"; | ||
572 | case RFKILL_TYPE_WIMAX: | ||
573 | return "wimax"; | ||
574 | case RFKILL_TYPE_WWAN: | ||
575 | return "wwan"; | ||
576 | default: | ||
577 | BUG(); | ||
578 | } | ||
579 | |||
580 | BUILD_BUG_ON(NUM_RFKILL_TYPES != RFKILL_TYPE_WWAN + 1); | ||
581 | } | ||
582 | |||
583 | static ssize_t rfkill_type_show(struct device *dev, | ||
584 | struct device_attribute *attr, | ||
585 | char *buf) | ||
586 | { | ||
587 | struct rfkill *rfkill = to_rfkill(dev); | ||
588 | |||
589 | return sprintf(buf, "%s\n", rfkill_get_type_str(rfkill->type)); | ||
590 | } | ||
591 | |||
592 | static ssize_t rfkill_idx_show(struct device *dev, | ||
593 | struct device_attribute *attr, | ||
594 | char *buf) | ||
595 | { | ||
596 | struct rfkill *rfkill = to_rfkill(dev); | ||
597 | |||
598 | return sprintf(buf, "%d\n", rfkill->idx); | ||
599 | } | ||
600 | |||
601 | static u8 user_state_from_blocked(unsigned long state) | ||
602 | { | ||
603 | if (state & RFKILL_BLOCK_HW) | ||
604 | return RFKILL_USER_STATE_HARD_BLOCKED; | ||
605 | if (state & RFKILL_BLOCK_SW) | ||
606 | return RFKILL_USER_STATE_SOFT_BLOCKED; | ||
607 | |||
608 | return RFKILL_USER_STATE_UNBLOCKED; | ||
609 | } | ||
610 | |||
611 | static ssize_t rfkill_state_show(struct device *dev, | ||
612 | struct device_attribute *attr, | ||
613 | char *buf) | ||
614 | { | ||
615 | struct rfkill *rfkill = to_rfkill(dev); | ||
616 | unsigned long flags; | ||
617 | u32 state; | ||
618 | |||
619 | spin_lock_irqsave(&rfkill->lock, flags); | ||
620 | state = rfkill->state; | ||
621 | spin_unlock_irqrestore(&rfkill->lock, flags); | ||
622 | |||
623 | return sprintf(buf, "%d\n", user_state_from_blocked(state)); | ||
624 | } | ||
625 | |||
626 | static ssize_t rfkill_state_store(struct device *dev, | ||
627 | struct device_attribute *attr, | ||
628 | const char *buf, size_t count) | ||
629 | { | ||
630 | /* | ||
631 | * The intention was that userspace can only take control over | ||
632 | * a given device when/if rfkill-input doesn't control it due | ||
633 | * to user_claim. Since user_claim is currently unsupported, | ||
634 | * we never support changing the state from userspace -- this | ||
635 | * can be implemented again later. | ||
636 | */ | ||
637 | |||
638 | return -EPERM; | ||
639 | } | ||
640 | |||
641 | static ssize_t rfkill_claim_show(struct device *dev, | ||
642 | struct device_attribute *attr, | ||
643 | char *buf) | ||
644 | { | ||
645 | return sprintf(buf, "%d\n", 0); | ||
646 | } | ||
647 | |||
648 | static ssize_t rfkill_claim_store(struct device *dev, | ||
649 | struct device_attribute *attr, | ||
650 | const char *buf, size_t count) | ||
651 | { | ||
652 | return -EOPNOTSUPP; | ||
653 | } | ||
654 | |||
655 | static struct device_attribute rfkill_dev_attrs[] = { | ||
656 | __ATTR(name, S_IRUGO, rfkill_name_show, NULL), | ||
657 | __ATTR(type, S_IRUGO, rfkill_type_show, NULL), | ||
658 | __ATTR(index, S_IRUGO, rfkill_idx_show, NULL), | ||
659 | __ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store), | ||
660 | __ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store), | ||
661 | __ATTR_NULL | ||
662 | }; | ||
663 | |||
664 | static void rfkill_release(struct device *dev) | ||
665 | { | ||
666 | struct rfkill *rfkill = to_rfkill(dev); | ||
667 | |||
668 | kfree(rfkill); | ||
669 | } | ||
670 | |||
671 | static int rfkill_dev_uevent(struct device *dev, struct kobj_uevent_env *env) | ||
672 | { | ||
673 | struct rfkill *rfkill = to_rfkill(dev); | ||
674 | unsigned long flags; | ||
675 | u32 state; | ||
676 | int error; | ||
677 | |||
678 | error = add_uevent_var(env, "RFKILL_NAME=%s", rfkill->name); | ||
679 | if (error) | ||
680 | return error; | ||
681 | error = add_uevent_var(env, "RFKILL_TYPE=%s", | ||
682 | rfkill_get_type_str(rfkill->type)); | ||
683 | if (error) | ||
684 | return error; | ||
685 | spin_lock_irqsave(&rfkill->lock, flags); | ||
686 | state = rfkill->state; | ||
687 | spin_unlock_irqrestore(&rfkill->lock, flags); | ||
688 | error = add_uevent_var(env, "RFKILL_STATE=%d", | ||
689 | user_state_from_blocked(state)); | ||
690 | return error; | ||
691 | } | ||
692 | |||
693 | void rfkill_pause_polling(struct rfkill *rfkill) | ||
694 | { | ||
695 | BUG_ON(!rfkill); | ||
696 | |||
697 | if (!rfkill->ops->poll) | ||
698 | return; | ||
699 | |||
700 | cancel_delayed_work_sync(&rfkill->poll_work); | ||
701 | } | ||
702 | EXPORT_SYMBOL(rfkill_pause_polling); | ||
703 | |||
704 | void rfkill_resume_polling(struct rfkill *rfkill) | ||
705 | { | ||
706 | BUG_ON(!rfkill); | ||
707 | |||
708 | if (!rfkill->ops->poll) | ||
709 | return; | ||
710 | |||
711 | schedule_work(&rfkill->poll_work.work); | ||
712 | } | ||
713 | EXPORT_SYMBOL(rfkill_resume_polling); | ||
714 | |||
715 | static int rfkill_suspend(struct device *dev, pm_message_t state) | ||
716 | { | ||
717 | struct rfkill *rfkill = to_rfkill(dev); | ||
718 | |||
719 | rfkill_pause_polling(rfkill); | ||
720 | |||
721 | rfkill->suspended = true; | ||
722 | |||
723 | return 0; | ||
724 | } | ||
725 | |||
726 | static int rfkill_resume(struct device *dev) | ||
727 | { | ||
728 | struct rfkill *rfkill = to_rfkill(dev); | ||
729 | bool cur; | ||
730 | |||
731 | cur = !!(rfkill->state & RFKILL_BLOCK_SW); | ||
732 | rfkill_set_block(rfkill, cur); | ||
733 | |||
734 | rfkill->suspended = false; | ||
735 | |||
736 | rfkill_resume_polling(rfkill); | ||
737 | |||
738 | return 0; | ||
739 | } | ||
740 | |||
741 | static struct class rfkill_class = { | ||
742 | .name = "rfkill", | ||
743 | .dev_release = rfkill_release, | ||
744 | .dev_attrs = rfkill_dev_attrs, | ||
745 | .dev_uevent = rfkill_dev_uevent, | ||
746 | .suspend = rfkill_suspend, | ||
747 | .resume = rfkill_resume, | ||
748 | }; | ||
749 | |||
750 | bool rfkill_blocked(struct rfkill *rfkill) | ||
751 | { | ||
752 | unsigned long flags; | ||
753 | u32 state; | ||
754 | |||
755 | spin_lock_irqsave(&rfkill->lock, flags); | ||
756 | state = rfkill->state; | ||
757 | spin_unlock_irqrestore(&rfkill->lock, flags); | ||
758 | |||
759 | return !!(state & RFKILL_BLOCK_ANY); | ||
760 | } | ||
761 | EXPORT_SYMBOL(rfkill_blocked); | ||
762 | |||
763 | |||
764 | struct rfkill * __must_check rfkill_alloc(const char *name, | ||
765 | struct device *parent, | ||
766 | const enum rfkill_type type, | ||
767 | const struct rfkill_ops *ops, | ||
768 | void *ops_data) | ||
769 | { | ||
770 | struct rfkill *rfkill; | ||
771 | struct device *dev; | ||
772 | |||
773 | if (WARN_ON(!ops)) | ||
774 | return NULL; | ||
775 | |||
776 | if (WARN_ON(!ops->set_block)) | ||
777 | return NULL; | ||
778 | |||
779 | if (WARN_ON(!name)) | ||
780 | return NULL; | ||
781 | |||
782 | if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES)) | ||
783 | return NULL; | ||
784 | |||
785 | rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL); | ||
786 | if (!rfkill) | ||
787 | return NULL; | ||
788 | |||
789 | spin_lock_init(&rfkill->lock); | ||
790 | INIT_LIST_HEAD(&rfkill->node); | ||
791 | rfkill->type = type; | ||
792 | rfkill->name = name; | ||
793 | rfkill->ops = ops; | ||
794 | rfkill->data = ops_data; | ||
795 | |||
796 | dev = &rfkill->dev; | ||
797 | dev->class = &rfkill_class; | ||
798 | dev->parent = parent; | ||
799 | device_initialize(dev); | ||
800 | |||
801 | return rfkill; | ||
802 | } | ||
803 | EXPORT_SYMBOL(rfkill_alloc); | ||
804 | |||
805 | static void rfkill_poll(struct work_struct *work) | ||
806 | { | ||
807 | struct rfkill *rfkill; | ||
808 | |||
809 | rfkill = container_of(work, struct rfkill, poll_work.work); | ||
810 | |||
811 | /* | ||
812 | * Poll hardware state -- driver will use one of the | ||
813 | * rfkill_set{,_hw,_sw}_state functions and use its | ||
814 | * return value to update the current status. | ||
815 | */ | ||
816 | rfkill->ops->poll(rfkill, rfkill->data); | ||
817 | |||
818 | schedule_delayed_work(&rfkill->poll_work, | ||
819 | round_jiffies_relative(POLL_INTERVAL)); | ||
820 | } | ||
821 | |||
822 | static void rfkill_uevent_work(struct work_struct *work) | ||
823 | { | ||
824 | struct rfkill *rfkill; | ||
825 | |||
826 | rfkill = container_of(work, struct rfkill, uevent_work); | ||
827 | |||
828 | mutex_lock(&rfkill_global_mutex); | ||
829 | rfkill_event(rfkill); | ||
830 | mutex_unlock(&rfkill_global_mutex); | ||
831 | } | ||
832 | |||
833 | static void rfkill_sync_work(struct work_struct *work) | ||
834 | { | ||
835 | struct rfkill *rfkill; | ||
836 | bool cur; | ||
837 | |||
838 | rfkill = container_of(work, struct rfkill, sync_work); | ||
839 | |||
840 | mutex_lock(&rfkill_global_mutex); | ||
841 | cur = rfkill_global_states[rfkill->type].cur; | ||
842 | rfkill_set_block(rfkill, cur); | ||
843 | mutex_unlock(&rfkill_global_mutex); | ||
844 | } | ||
845 | |||
846 | int __must_check rfkill_register(struct rfkill *rfkill) | ||
847 | { | ||
848 | static unsigned long rfkill_no; | ||
849 | struct device *dev = &rfkill->dev; | ||
850 | int error; | ||
851 | |||
852 | BUG_ON(!rfkill); | ||
853 | |||
854 | mutex_lock(&rfkill_global_mutex); | ||
855 | |||
856 | if (rfkill->registered) { | ||
857 | error = -EALREADY; | ||
858 | goto unlock; | ||
859 | } | ||
860 | |||
861 | rfkill->idx = rfkill_no; | ||
862 | dev_set_name(dev, "rfkill%lu", rfkill_no); | ||
863 | rfkill_no++; | ||
864 | |||
865 | list_add_tail(&rfkill->node, &rfkill_list); | ||
866 | |||
867 | error = device_add(dev); | ||
868 | if (error) | ||
869 | goto remove; | ||
870 | |||
871 | error = rfkill_led_trigger_register(rfkill); | ||
872 | if (error) | ||
873 | goto devdel; | ||
874 | |||
875 | rfkill->registered = true; | ||
876 | |||
877 | INIT_DELAYED_WORK(&rfkill->poll_work, rfkill_poll); | ||
878 | INIT_WORK(&rfkill->uevent_work, rfkill_uevent_work); | ||
879 | INIT_WORK(&rfkill->sync_work, rfkill_sync_work); | ||
880 | |||
881 | if (rfkill->ops->poll) | ||
882 | schedule_delayed_work(&rfkill->poll_work, | ||
883 | round_jiffies_relative(POLL_INTERVAL)); | ||
884 | |||
885 | if (!rfkill->persistent || rfkill_epo_lock_active) { | ||
886 | schedule_work(&rfkill->sync_work); | ||
887 | } else { | ||
888 | #ifdef CONFIG_RFKILL_INPUT | ||
889 | bool soft_blocked = !!(rfkill->state & RFKILL_BLOCK_SW); | ||
890 | |||
891 | if (!atomic_read(&rfkill_input_disabled)) | ||
892 | __rfkill_switch_all(rfkill->type, soft_blocked); | ||
893 | #endif | ||
894 | } | ||
895 | |||
896 | rfkill_send_events(rfkill, RFKILL_OP_ADD); | ||
897 | |||
898 | mutex_unlock(&rfkill_global_mutex); | ||
899 | return 0; | ||
900 | |||
901 | devdel: | ||
902 | device_del(&rfkill->dev); | ||
903 | remove: | ||
904 | list_del_init(&rfkill->node); | ||
905 | unlock: | ||
906 | mutex_unlock(&rfkill_global_mutex); | ||
907 | return error; | ||
908 | } | ||
909 | EXPORT_SYMBOL(rfkill_register); | ||
910 | |||
911 | void rfkill_unregister(struct rfkill *rfkill) | ||
912 | { | ||
913 | BUG_ON(!rfkill); | ||
914 | |||
915 | if (rfkill->ops->poll) | ||
916 | cancel_delayed_work_sync(&rfkill->poll_work); | ||
917 | |||
918 | cancel_work_sync(&rfkill->uevent_work); | ||
919 | cancel_work_sync(&rfkill->sync_work); | ||
920 | |||
921 | rfkill->registered = false; | ||
922 | |||
923 | device_del(&rfkill->dev); | ||
924 | |||
925 | mutex_lock(&rfkill_global_mutex); | ||
926 | rfkill_send_events(rfkill, RFKILL_OP_DEL); | ||
927 | list_del_init(&rfkill->node); | ||
928 | mutex_unlock(&rfkill_global_mutex); | ||
929 | |||
930 | rfkill_led_trigger_unregister(rfkill); | ||
931 | } | ||
932 | EXPORT_SYMBOL(rfkill_unregister); | ||
933 | |||
934 | void rfkill_destroy(struct rfkill *rfkill) | ||
935 | { | ||
936 | if (rfkill) | ||
937 | put_device(&rfkill->dev); | ||
938 | } | ||
939 | EXPORT_SYMBOL(rfkill_destroy); | ||
940 | |||
941 | static int rfkill_fop_open(struct inode *inode, struct file *file) | ||
942 | { | ||
943 | struct rfkill_data *data; | ||
944 | struct rfkill *rfkill; | ||
945 | struct rfkill_int_event *ev, *tmp; | ||
946 | |||
947 | data = kzalloc(sizeof(*data), GFP_KERNEL); | ||
948 | if (!data) | ||
949 | return -ENOMEM; | ||
950 | |||
951 | INIT_LIST_HEAD(&data->events); | ||
952 | mutex_init(&data->mtx); | ||
953 | init_waitqueue_head(&data->read_wait); | ||
954 | |||
955 | mutex_lock(&rfkill_global_mutex); | ||
956 | mutex_lock(&data->mtx); | ||
957 | /* | ||
958 | * start getting events from elsewhere but hold mtx to get | ||
959 | * startup events added first | ||
960 | */ | ||
961 | list_add(&data->list, &rfkill_fds); | ||
962 | |||
963 | list_for_each_entry(rfkill, &rfkill_list, node) { | ||
964 | ev = kzalloc(sizeof(*ev), GFP_KERNEL); | ||
965 | if (!ev) | ||
966 | goto free; | ||
967 | rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD); | ||
968 | list_add_tail(&ev->list, &data->events); | ||
969 | } | ||
970 | mutex_unlock(&data->mtx); | ||
971 | mutex_unlock(&rfkill_global_mutex); | ||
972 | |||
973 | file->private_data = data; | ||
974 | |||
975 | return nonseekable_open(inode, file); | ||
976 | |||
977 | free: | ||
978 | mutex_unlock(&data->mtx); | ||
979 | mutex_unlock(&rfkill_global_mutex); | ||
980 | mutex_destroy(&data->mtx); | ||
981 | list_for_each_entry_safe(ev, tmp, &data->events, list) | ||
982 | kfree(ev); | ||
983 | kfree(data); | ||
984 | return -ENOMEM; | ||
985 | } | ||
986 | |||
987 | static unsigned int rfkill_fop_poll(struct file *file, poll_table *wait) | ||
988 | { | ||
989 | struct rfkill_data *data = file->private_data; | ||
990 | unsigned int res = POLLOUT | POLLWRNORM; | ||
991 | |||
992 | poll_wait(file, &data->read_wait, wait); | ||
993 | |||
994 | mutex_lock(&data->mtx); | ||
995 | if (!list_empty(&data->events)) | ||
996 | res = POLLIN | POLLRDNORM; | ||
997 | mutex_unlock(&data->mtx); | ||
998 | |||
999 | return res; | ||
1000 | } | ||
1001 | |||
1002 | static bool rfkill_readable(struct rfkill_data *data) | ||
1003 | { | ||
1004 | bool r; | ||
1005 | |||
1006 | mutex_lock(&data->mtx); | ||
1007 | r = !list_empty(&data->events); | ||
1008 | mutex_unlock(&data->mtx); | ||
1009 | |||
1010 | return r; | ||
1011 | } | ||
1012 | |||
1013 | static ssize_t rfkill_fop_read(struct file *file, char __user *buf, | ||
1014 | size_t count, loff_t *pos) | ||
1015 | { | ||
1016 | struct rfkill_data *data = file->private_data; | ||
1017 | struct rfkill_int_event *ev; | ||
1018 | unsigned long sz; | ||
1019 | int ret; | ||
1020 | |||
1021 | mutex_lock(&data->mtx); | ||
1022 | |||
1023 | while (list_empty(&data->events)) { | ||
1024 | if (file->f_flags & O_NONBLOCK) { | ||
1025 | ret = -EAGAIN; | ||
1026 | goto out; | ||
1027 | } | ||
1028 | mutex_unlock(&data->mtx); | ||
1029 | ret = wait_event_interruptible(data->read_wait, | ||
1030 | rfkill_readable(data)); | ||
1031 | mutex_lock(&data->mtx); | ||
1032 | |||
1033 | if (ret) | ||
1034 | goto out; | ||
1035 | } | ||
1036 | |||
1037 | ev = list_first_entry(&data->events, struct rfkill_int_event, | ||
1038 | list); | ||
1039 | |||
1040 | sz = min_t(unsigned long, sizeof(ev->ev), count); | ||
1041 | ret = sz; | ||
1042 | if (copy_to_user(buf, &ev->ev, sz)) | ||
1043 | ret = -EFAULT; | ||
1044 | |||
1045 | list_del(&ev->list); | ||
1046 | kfree(ev); | ||
1047 | out: | ||
1048 | mutex_unlock(&data->mtx); | ||
1049 | return ret; | ||
1050 | } | ||
1051 | |||
1052 | static ssize_t rfkill_fop_write(struct file *file, const char __user *buf, | ||
1053 | size_t count, loff_t *pos) | ||
1054 | { | ||
1055 | struct rfkill *rfkill; | ||
1056 | struct rfkill_event ev; | ||
1057 | |||
1058 | /* we don't need the 'hard' variable but accept it */ | ||
1059 | if (count < sizeof(ev) - 1) | ||
1060 | return -EINVAL; | ||
1061 | |||
1062 | if (copy_from_user(&ev, buf, sizeof(ev) - 1)) | ||
1063 | return -EFAULT; | ||
1064 | |||
1065 | if (ev.op != RFKILL_OP_CHANGE && ev.op != RFKILL_OP_CHANGE_ALL) | ||
1066 | return -EINVAL; | ||
1067 | |||
1068 | if (ev.type >= NUM_RFKILL_TYPES) | ||
1069 | return -EINVAL; | ||
1070 | |||
1071 | mutex_lock(&rfkill_global_mutex); | ||
1072 | |||
1073 | if (ev.op == RFKILL_OP_CHANGE_ALL) { | ||
1074 | if (ev.type == RFKILL_TYPE_ALL) { | ||
1075 | enum rfkill_type i; | ||
1076 | for (i = 0; i < NUM_RFKILL_TYPES; i++) | ||
1077 | rfkill_global_states[i].cur = ev.soft; | ||
1078 | } else { | ||
1079 | rfkill_global_states[ev.type].cur = ev.soft; | ||
1080 | } | ||
1081 | } | ||
1082 | |||
1083 | list_for_each_entry(rfkill, &rfkill_list, node) { | ||
1084 | if (rfkill->idx != ev.idx && ev.op != RFKILL_OP_CHANGE_ALL) | ||
1085 | continue; | ||
1086 | |||
1087 | if (rfkill->type != ev.type && ev.type != RFKILL_TYPE_ALL) | ||
1088 | continue; | ||
1089 | |||
1090 | rfkill_set_block(rfkill, ev.soft); | ||
1091 | } | ||
1092 | mutex_unlock(&rfkill_global_mutex); | ||
1093 | |||
1094 | return count; | ||
1095 | } | ||
1096 | |||
1097 | static int rfkill_fop_release(struct inode *inode, struct file *file) | ||
1098 | { | ||
1099 | struct rfkill_data *data = file->private_data; | ||
1100 | struct rfkill_int_event *ev, *tmp; | ||
1101 | |||
1102 | mutex_lock(&rfkill_global_mutex); | ||
1103 | list_del(&data->list); | ||
1104 | mutex_unlock(&rfkill_global_mutex); | ||
1105 | |||
1106 | mutex_destroy(&data->mtx); | ||
1107 | list_for_each_entry_safe(ev, tmp, &data->events, list) | ||
1108 | kfree(ev); | ||
1109 | |||
1110 | #ifdef CONFIG_RFKILL_INPUT | ||
1111 | if (data->input_handler) | ||
1112 | if (atomic_dec_return(&rfkill_input_disabled) == 0) | ||
1113 | printk(KERN_DEBUG "rfkill: input handler enabled\n"); | ||
1114 | #endif | ||
1115 | |||
1116 | kfree(data); | ||
1117 | |||
1118 | return 0; | ||
1119 | } | ||
1120 | |||
1121 | #ifdef CONFIG_RFKILL_INPUT | ||
1122 | static long rfkill_fop_ioctl(struct file *file, unsigned int cmd, | ||
1123 | unsigned long arg) | ||
1124 | { | ||
1125 | struct rfkill_data *data = file->private_data; | ||
1126 | |||
1127 | if (_IOC_TYPE(cmd) != RFKILL_IOC_MAGIC) | ||
1128 | return -ENOSYS; | ||
1129 | |||
1130 | if (_IOC_NR(cmd) != RFKILL_IOC_NOINPUT) | ||
1131 | return -ENOSYS; | ||
1132 | |||
1133 | mutex_lock(&data->mtx); | ||
1134 | |||
1135 | if (!data->input_handler) { | ||
1136 | if (atomic_inc_return(&rfkill_input_disabled) == 1) | ||
1137 | printk(KERN_DEBUG "rfkill: input handler disabled\n"); | ||
1138 | data->input_handler = true; | ||
1139 | } | ||
1140 | |||
1141 | mutex_unlock(&data->mtx); | ||
1142 | |||
1143 | return 0; | ||
1144 | } | ||
1145 | #endif | ||
1146 | |||
1147 | static const struct file_operations rfkill_fops = { | ||
1148 | .open = rfkill_fop_open, | ||
1149 | .read = rfkill_fop_read, | ||
1150 | .write = rfkill_fop_write, | ||
1151 | .poll = rfkill_fop_poll, | ||
1152 | .release = rfkill_fop_release, | ||
1153 | #ifdef CONFIG_RFKILL_INPUT | ||
1154 | .unlocked_ioctl = rfkill_fop_ioctl, | ||
1155 | .compat_ioctl = rfkill_fop_ioctl, | ||
1156 | #endif | ||
1157 | }; | ||
1158 | |||
1159 | static struct miscdevice rfkill_miscdev = { | ||
1160 | .name = "rfkill", | ||
1161 | .fops = &rfkill_fops, | ||
1162 | .minor = MISC_DYNAMIC_MINOR, | ||
1163 | }; | ||
1164 | |||
1165 | static int __init rfkill_init(void) | ||
1166 | { | ||
1167 | int error; | ||
1168 | int i; | ||
1169 | |||
1170 | for (i = 0; i < NUM_RFKILL_TYPES; i++) | ||
1171 | rfkill_global_states[i].cur = !rfkill_default_state; | ||
1172 | |||
1173 | error = class_register(&rfkill_class); | ||
1174 | if (error) | ||
1175 | goto out; | ||
1176 | |||
1177 | error = misc_register(&rfkill_miscdev); | ||
1178 | if (error) { | ||
1179 | class_unregister(&rfkill_class); | ||
1180 | goto out; | ||
1181 | } | ||
1182 | |||
1183 | #ifdef CONFIG_RFKILL_INPUT | ||
1184 | error = rfkill_handler_init(); | ||
1185 | if (error) { | ||
1186 | misc_deregister(&rfkill_miscdev); | ||
1187 | class_unregister(&rfkill_class); | ||
1188 | goto out; | ||
1189 | } | ||
1190 | #endif | ||
1191 | |||
1192 | out: | ||
1193 | return error; | ||
1194 | } | ||
1195 | subsys_initcall(rfkill_init); | ||
1196 | |||
1197 | static void __exit rfkill_exit(void) | ||
1198 | { | ||
1199 | #ifdef CONFIG_RFKILL_INPUT | ||
1200 | rfkill_handler_exit(); | ||
1201 | #endif | ||
1202 | misc_deregister(&rfkill_miscdev); | ||
1203 | class_unregister(&rfkill_class); | ||
1204 | } | ||
1205 | module_exit(rfkill_exit); | ||
diff --git a/net/rfkill/input.c b/net/rfkill/input.c new file mode 100644 index 000000000000..a7295ad5f9cb --- /dev/null +++ b/net/rfkill/input.c | |||
@@ -0,0 +1,342 @@ | |||
1 | /* | ||
2 | * Input layer to RF Kill interface connector | ||
3 | * | ||
4 | * Copyright (c) 2007 Dmitry Torokhov | ||
5 | * Copyright 2009 Johannes Berg <johannes@sipsolutions.net> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License version 2 as published | ||
9 | * by the Free Software Foundation. | ||
10 | * | ||
11 | * If you ever run into a situation in which you have a SW_ type rfkill | ||
12 | * input device, then you can revive code that was removed in the patch | ||
13 | * "rfkill-input: remove unused code". | ||
14 | */ | ||
15 | |||
16 | #include <linux/input.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/workqueue.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/rfkill.h> | ||
21 | #include <linux/sched.h> | ||
22 | |||
23 | #include "rfkill.h" | ||
24 | |||
25 | enum rfkill_input_master_mode { | ||
26 | RFKILL_INPUT_MASTER_UNLOCK = 0, | ||
27 | RFKILL_INPUT_MASTER_RESTORE = 1, | ||
28 | RFKILL_INPUT_MASTER_UNBLOCKALL = 2, | ||
29 | NUM_RFKILL_INPUT_MASTER_MODES | ||
30 | }; | ||
31 | |||
32 | /* Delay (in ms) between consecutive switch ops */ | ||
33 | #define RFKILL_OPS_DELAY 200 | ||
34 | |||
35 | static enum rfkill_input_master_mode rfkill_master_switch_mode = | ||
36 | RFKILL_INPUT_MASTER_UNBLOCKALL; | ||
37 | module_param_named(master_switch_mode, rfkill_master_switch_mode, uint, 0); | ||
38 | MODULE_PARM_DESC(master_switch_mode, | ||
39 | "SW_RFKILL_ALL ON should: 0=do nothing (only unlock); 1=restore; 2=unblock all"); | ||
40 | |||
41 | static spinlock_t rfkill_op_lock; | ||
42 | static bool rfkill_op_pending; | ||
43 | static unsigned long rfkill_sw_pending[BITS_TO_LONGS(NUM_RFKILL_TYPES)]; | ||
44 | static unsigned long rfkill_sw_state[BITS_TO_LONGS(NUM_RFKILL_TYPES)]; | ||
45 | |||
46 | enum rfkill_sched_op { | ||
47 | RFKILL_GLOBAL_OP_EPO = 0, | ||
48 | RFKILL_GLOBAL_OP_RESTORE, | ||
49 | RFKILL_GLOBAL_OP_UNLOCK, | ||
50 | RFKILL_GLOBAL_OP_UNBLOCK, | ||
51 | }; | ||
52 | |||
53 | static enum rfkill_sched_op rfkill_master_switch_op; | ||
54 | static enum rfkill_sched_op rfkill_op; | ||
55 | |||
56 | static void __rfkill_handle_global_op(enum rfkill_sched_op op) | ||
57 | { | ||
58 | unsigned int i; | ||
59 | |||
60 | switch (op) { | ||
61 | case RFKILL_GLOBAL_OP_EPO: | ||
62 | rfkill_epo(); | ||
63 | break; | ||
64 | case RFKILL_GLOBAL_OP_RESTORE: | ||
65 | rfkill_restore_states(); | ||
66 | break; | ||
67 | case RFKILL_GLOBAL_OP_UNLOCK: | ||
68 | rfkill_remove_epo_lock(); | ||
69 | break; | ||
70 | case RFKILL_GLOBAL_OP_UNBLOCK: | ||
71 | rfkill_remove_epo_lock(); | ||
72 | for (i = 0; i < NUM_RFKILL_TYPES; i++) | ||
73 | rfkill_switch_all(i, false); | ||
74 | break; | ||
75 | default: | ||
76 | /* memory corruption or bug, fail safely */ | ||
77 | rfkill_epo(); | ||
78 | WARN(1, "Unknown requested operation %d! " | ||
79 | "rfkill Emergency Power Off activated\n", | ||
80 | op); | ||
81 | } | ||
82 | } | ||
83 | |||
84 | static void __rfkill_handle_normal_op(const enum rfkill_type type, | ||
85 | const bool complement) | ||
86 | { | ||
87 | bool blocked; | ||
88 | |||
89 | blocked = rfkill_get_global_sw_state(type); | ||
90 | if (complement) | ||
91 | blocked = !blocked; | ||
92 | |||
93 | rfkill_switch_all(type, blocked); | ||
94 | } | ||
95 | |||
96 | static void rfkill_op_handler(struct work_struct *work) | ||
97 | { | ||
98 | unsigned int i; | ||
99 | bool c; | ||
100 | |||
101 | spin_lock_irq(&rfkill_op_lock); | ||
102 | do { | ||
103 | if (rfkill_op_pending) { | ||
104 | enum rfkill_sched_op op = rfkill_op; | ||
105 | rfkill_op_pending = false; | ||
106 | memset(rfkill_sw_pending, 0, | ||
107 | sizeof(rfkill_sw_pending)); | ||
108 | spin_unlock_irq(&rfkill_op_lock); | ||
109 | |||
110 | __rfkill_handle_global_op(op); | ||
111 | |||
112 | spin_lock_irq(&rfkill_op_lock); | ||
113 | |||
114 | /* | ||
115 | * handle global ops first -- during unlocked period | ||
116 | * we might have gotten a new global op. | ||
117 | */ | ||
118 | if (rfkill_op_pending) | ||
119 | continue; | ||
120 | } | ||
121 | |||
122 | if (rfkill_is_epo_lock_active()) | ||
123 | continue; | ||
124 | |||
125 | for (i = 0; i < NUM_RFKILL_TYPES; i++) { | ||
126 | if (__test_and_clear_bit(i, rfkill_sw_pending)) { | ||
127 | c = __test_and_clear_bit(i, rfkill_sw_state); | ||
128 | spin_unlock_irq(&rfkill_op_lock); | ||
129 | |||
130 | __rfkill_handle_normal_op(i, c); | ||
131 | |||
132 | spin_lock_irq(&rfkill_op_lock); | ||
133 | } | ||
134 | } | ||
135 | } while (rfkill_op_pending); | ||
136 | spin_unlock_irq(&rfkill_op_lock); | ||
137 | } | ||
138 | |||
139 | static DECLARE_DELAYED_WORK(rfkill_op_work, rfkill_op_handler); | ||
140 | static unsigned long rfkill_last_scheduled; | ||
141 | |||
142 | static unsigned long rfkill_ratelimit(const unsigned long last) | ||
143 | { | ||
144 | const unsigned long delay = msecs_to_jiffies(RFKILL_OPS_DELAY); | ||
145 | return (time_after(jiffies, last + delay)) ? 0 : delay; | ||
146 | } | ||
147 | |||
148 | static void rfkill_schedule_ratelimited(void) | ||
149 | { | ||
150 | if (delayed_work_pending(&rfkill_op_work)) | ||
151 | return; | ||
152 | schedule_delayed_work(&rfkill_op_work, | ||
153 | rfkill_ratelimit(rfkill_last_scheduled)); | ||
154 | rfkill_last_scheduled = jiffies; | ||
155 | } | ||
156 | |||
157 | static void rfkill_schedule_global_op(enum rfkill_sched_op op) | ||
158 | { | ||
159 | unsigned long flags; | ||
160 | |||
161 | spin_lock_irqsave(&rfkill_op_lock, flags); | ||
162 | rfkill_op = op; | ||
163 | rfkill_op_pending = true; | ||
164 | if (op == RFKILL_GLOBAL_OP_EPO && !rfkill_is_epo_lock_active()) { | ||
165 | /* bypass the limiter for EPO */ | ||
166 | cancel_delayed_work(&rfkill_op_work); | ||
167 | schedule_delayed_work(&rfkill_op_work, 0); | ||
168 | rfkill_last_scheduled = jiffies; | ||
169 | } else | ||
170 | rfkill_schedule_ratelimited(); | ||
171 | spin_unlock_irqrestore(&rfkill_op_lock, flags); | ||
172 | } | ||
173 | |||
174 | static void rfkill_schedule_toggle(enum rfkill_type type) | ||
175 | { | ||
176 | unsigned long flags; | ||
177 | |||
178 | if (rfkill_is_epo_lock_active()) | ||
179 | return; | ||
180 | |||
181 | spin_lock_irqsave(&rfkill_op_lock, flags); | ||
182 | if (!rfkill_op_pending) { | ||
183 | __set_bit(type, rfkill_sw_pending); | ||
184 | __change_bit(type, rfkill_sw_state); | ||
185 | rfkill_schedule_ratelimited(); | ||
186 | } | ||
187 | spin_unlock_irqrestore(&rfkill_op_lock, flags); | ||
188 | } | ||
189 | |||
190 | static void rfkill_schedule_evsw_rfkillall(int state) | ||
191 | { | ||
192 | if (state) | ||
193 | rfkill_schedule_global_op(rfkill_master_switch_op); | ||
194 | else | ||
195 | rfkill_schedule_global_op(RFKILL_GLOBAL_OP_EPO); | ||
196 | } | ||
197 | |||
198 | static void rfkill_event(struct input_handle *handle, unsigned int type, | ||
199 | unsigned int code, int data) | ||
200 | { | ||
201 | if (type == EV_KEY && data == 1) { | ||
202 | switch (code) { | ||
203 | case KEY_WLAN: | ||
204 | rfkill_schedule_toggle(RFKILL_TYPE_WLAN); | ||
205 | break; | ||
206 | case KEY_BLUETOOTH: | ||
207 | rfkill_schedule_toggle(RFKILL_TYPE_BLUETOOTH); | ||
208 | break; | ||
209 | case KEY_UWB: | ||
210 | rfkill_schedule_toggle(RFKILL_TYPE_UWB); | ||
211 | break; | ||
212 | case KEY_WIMAX: | ||
213 | rfkill_schedule_toggle(RFKILL_TYPE_WIMAX); | ||
214 | break; | ||
215 | } | ||
216 | } else if (type == EV_SW && code == SW_RFKILL_ALL) | ||
217 | rfkill_schedule_evsw_rfkillall(data); | ||
218 | } | ||
219 | |||
220 | static int rfkill_connect(struct input_handler *handler, struct input_dev *dev, | ||
221 | const struct input_device_id *id) | ||
222 | { | ||
223 | struct input_handle *handle; | ||
224 | int error; | ||
225 | |||
226 | handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL); | ||
227 | if (!handle) | ||
228 | return -ENOMEM; | ||
229 | |||
230 | handle->dev = dev; | ||
231 | handle->handler = handler; | ||
232 | handle->name = "rfkill"; | ||
233 | |||
234 | /* causes rfkill_start() to be called */ | ||
235 | error = input_register_handle(handle); | ||
236 | if (error) | ||
237 | goto err_free_handle; | ||
238 | |||
239 | error = input_open_device(handle); | ||
240 | if (error) | ||
241 | goto err_unregister_handle; | ||
242 | |||
243 | return 0; | ||
244 | |||
245 | err_unregister_handle: | ||
246 | input_unregister_handle(handle); | ||
247 | err_free_handle: | ||
248 | kfree(handle); | ||
249 | return error; | ||
250 | } | ||
251 | |||
252 | static void rfkill_start(struct input_handle *handle) | ||
253 | { | ||
254 | /* | ||
255 | * Take event_lock to guard against configuration changes, we | ||
256 | * should be able to deal with concurrency with rfkill_event() | ||
257 | * just fine (which event_lock will also avoid). | ||
258 | */ | ||
259 | spin_lock_irq(&handle->dev->event_lock); | ||
260 | |||
261 | if (test_bit(EV_SW, handle->dev->evbit) && | ||
262 | test_bit(SW_RFKILL_ALL, handle->dev->swbit)) | ||
263 | rfkill_schedule_evsw_rfkillall(test_bit(SW_RFKILL_ALL, | ||
264 | handle->dev->sw)); | ||
265 | |||
266 | spin_unlock_irq(&handle->dev->event_lock); | ||
267 | } | ||
268 | |||
269 | static void rfkill_disconnect(struct input_handle *handle) | ||
270 | { | ||
271 | input_close_device(handle); | ||
272 | input_unregister_handle(handle); | ||
273 | kfree(handle); | ||
274 | } | ||
275 | |||
276 | static const struct input_device_id rfkill_ids[] = { | ||
277 | { | ||
278 | .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, | ||
279 | .evbit = { BIT_MASK(EV_KEY) }, | ||
280 | .keybit = { [BIT_WORD(KEY_WLAN)] = BIT_MASK(KEY_WLAN) }, | ||
281 | }, | ||
282 | { | ||
283 | .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, | ||
284 | .evbit = { BIT_MASK(EV_KEY) }, | ||
285 | .keybit = { [BIT_WORD(KEY_BLUETOOTH)] = BIT_MASK(KEY_BLUETOOTH) }, | ||
286 | }, | ||
287 | { | ||
288 | .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, | ||
289 | .evbit = { BIT_MASK(EV_KEY) }, | ||
290 | .keybit = { [BIT_WORD(KEY_UWB)] = BIT_MASK(KEY_UWB) }, | ||
291 | }, | ||
292 | { | ||
293 | .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, | ||
294 | .evbit = { BIT_MASK(EV_KEY) }, | ||
295 | .keybit = { [BIT_WORD(KEY_WIMAX)] = BIT_MASK(KEY_WIMAX) }, | ||
296 | }, | ||
297 | { | ||
298 | .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_SWBIT, | ||
299 | .evbit = { BIT(EV_SW) }, | ||
300 | .swbit = { [BIT_WORD(SW_RFKILL_ALL)] = BIT_MASK(SW_RFKILL_ALL) }, | ||
301 | }, | ||
302 | { } | ||
303 | }; | ||
304 | |||
305 | static struct input_handler rfkill_handler = { | ||
306 | .name = "rfkill", | ||
307 | .event = rfkill_event, | ||
308 | .connect = rfkill_connect, | ||
309 | .start = rfkill_start, | ||
310 | .disconnect = rfkill_disconnect, | ||
311 | .id_table = rfkill_ids, | ||
312 | }; | ||
313 | |||
314 | int __init rfkill_handler_init(void) | ||
315 | { | ||
316 | switch (rfkill_master_switch_mode) { | ||
317 | case RFKILL_INPUT_MASTER_UNBLOCKALL: | ||
318 | rfkill_master_switch_op = RFKILL_GLOBAL_OP_UNBLOCK; | ||
319 | break; | ||
320 | case RFKILL_INPUT_MASTER_RESTORE: | ||
321 | rfkill_master_switch_op = RFKILL_GLOBAL_OP_RESTORE; | ||
322 | break; | ||
323 | case RFKILL_INPUT_MASTER_UNLOCK: | ||
324 | rfkill_master_switch_op = RFKILL_GLOBAL_OP_UNLOCK; | ||
325 | break; | ||
326 | default: | ||
327 | return -EINVAL; | ||
328 | } | ||
329 | |||
330 | spin_lock_init(&rfkill_op_lock); | ||
331 | |||
332 | /* Avoid delay at first schedule */ | ||
333 | rfkill_last_scheduled = | ||
334 | jiffies - msecs_to_jiffies(RFKILL_OPS_DELAY) - 1; | ||
335 | return input_register_handler(&rfkill_handler); | ||
336 | } | ||
337 | |||
338 | void __exit rfkill_handler_exit(void) | ||
339 | { | ||
340 | input_unregister_handler(&rfkill_handler); | ||
341 | cancel_delayed_work_sync(&rfkill_op_work); | ||
342 | } | ||
diff --git a/net/rfkill/rfkill-input.c b/net/rfkill/rfkill-input.c deleted file mode 100644 index 60a34f3b5f65..000000000000 --- a/net/rfkill/rfkill-input.c +++ /dev/null | |||
@@ -1,390 +0,0 @@ | |||
1 | /* | ||
2 | * Input layer to RF Kill interface connector | ||
3 | * | ||
4 | * Copyright (c) 2007 Dmitry Torokhov | ||
5 | */ | ||
6 | |||
7 | /* | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License version 2 as published | ||
10 | * by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | #include <linux/module.h> | ||
14 | #include <linux/input.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <linux/workqueue.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/rfkill.h> | ||
19 | #include <linux/sched.h> | ||
20 | |||
21 | #include "rfkill-input.h" | ||
22 | |||
23 | MODULE_AUTHOR("Dmitry Torokhov <dtor@mail.ru>"); | ||
24 | MODULE_DESCRIPTION("Input layer to RF switch connector"); | ||
25 | MODULE_LICENSE("GPL"); | ||
26 | |||
27 | enum rfkill_input_master_mode { | ||
28 | RFKILL_INPUT_MASTER_DONOTHING = 0, | ||
29 | RFKILL_INPUT_MASTER_RESTORE = 1, | ||
30 | RFKILL_INPUT_MASTER_UNBLOCKALL = 2, | ||
31 | RFKILL_INPUT_MASTER_MAX, /* marker */ | ||
32 | }; | ||
33 | |||
34 | /* Delay (in ms) between consecutive switch ops */ | ||
35 | #define RFKILL_OPS_DELAY 200 | ||
36 | |||
37 | static enum rfkill_input_master_mode rfkill_master_switch_mode = | ||
38 | RFKILL_INPUT_MASTER_UNBLOCKALL; | ||
39 | module_param_named(master_switch_mode, rfkill_master_switch_mode, uint, 0); | ||
40 | MODULE_PARM_DESC(master_switch_mode, | ||
41 | "SW_RFKILL_ALL ON should: 0=do nothing; 1=restore; 2=unblock all"); | ||
42 | |||
43 | enum rfkill_global_sched_op { | ||
44 | RFKILL_GLOBAL_OP_EPO = 0, | ||
45 | RFKILL_GLOBAL_OP_RESTORE, | ||
46 | RFKILL_GLOBAL_OP_UNLOCK, | ||
47 | RFKILL_GLOBAL_OP_UNBLOCK, | ||
48 | }; | ||
49 | |||
50 | struct rfkill_task { | ||
51 | struct delayed_work dwork; | ||
52 | |||
53 | /* ensures that task is serialized */ | ||
54 | struct mutex mutex; | ||
55 | |||
56 | /* protects everything below */ | ||
57 | spinlock_t lock; | ||
58 | |||
59 | /* pending regular switch operations (1=pending) */ | ||
60 | unsigned long sw_pending[BITS_TO_LONGS(RFKILL_TYPE_MAX)]; | ||
61 | |||
62 | /* should the state be complemented (1=yes) */ | ||
63 | unsigned long sw_togglestate[BITS_TO_LONGS(RFKILL_TYPE_MAX)]; | ||
64 | |||
65 | bool global_op_pending; | ||
66 | enum rfkill_global_sched_op op; | ||
67 | |||
68 | /* last time it was scheduled */ | ||
69 | unsigned long last_scheduled; | ||
70 | }; | ||
71 | |||
72 | static void __rfkill_handle_global_op(enum rfkill_global_sched_op op) | ||
73 | { | ||
74 | unsigned int i; | ||
75 | |||
76 | switch (op) { | ||
77 | case RFKILL_GLOBAL_OP_EPO: | ||
78 | rfkill_epo(); | ||
79 | break; | ||
80 | case RFKILL_GLOBAL_OP_RESTORE: | ||
81 | rfkill_restore_states(); | ||
82 | break; | ||
83 | case RFKILL_GLOBAL_OP_UNLOCK: | ||
84 | rfkill_remove_epo_lock(); | ||
85 | break; | ||
86 | case RFKILL_GLOBAL_OP_UNBLOCK: | ||
87 | rfkill_remove_epo_lock(); | ||
88 | for (i = 0; i < RFKILL_TYPE_MAX; i++) | ||
89 | rfkill_switch_all(i, RFKILL_STATE_UNBLOCKED); | ||
90 | break; | ||
91 | default: | ||
92 | /* memory corruption or bug, fail safely */ | ||
93 | rfkill_epo(); | ||
94 | WARN(1, "Unknown requested operation %d! " | ||
95 | "rfkill Emergency Power Off activated\n", | ||
96 | op); | ||
97 | } | ||
98 | } | ||
99 | |||
100 | static void __rfkill_handle_normal_op(const enum rfkill_type type, | ||
101 | const bool c) | ||
102 | { | ||
103 | enum rfkill_state state; | ||
104 | |||
105 | state = rfkill_get_global_state(type); | ||
106 | if (c) | ||
107 | state = rfkill_state_complement(state); | ||
108 | |||
109 | rfkill_switch_all(type, state); | ||
110 | } | ||
111 | |||
112 | static void rfkill_task_handler(struct work_struct *work) | ||
113 | { | ||
114 | struct rfkill_task *task = container_of(work, | ||
115 | struct rfkill_task, dwork.work); | ||
116 | bool doit = true; | ||
117 | |||
118 | mutex_lock(&task->mutex); | ||
119 | |||
120 | spin_lock_irq(&task->lock); | ||
121 | while (doit) { | ||
122 | if (task->global_op_pending) { | ||
123 | enum rfkill_global_sched_op op = task->op; | ||
124 | task->global_op_pending = false; | ||
125 | memset(task->sw_pending, 0, sizeof(task->sw_pending)); | ||
126 | spin_unlock_irq(&task->lock); | ||
127 | |||
128 | __rfkill_handle_global_op(op); | ||
129 | |||
130 | /* make sure we do at least one pass with | ||
131 | * !task->global_op_pending */ | ||
132 | spin_lock_irq(&task->lock); | ||
133 | continue; | ||
134 | } else if (!rfkill_is_epo_lock_active()) { | ||
135 | unsigned int i = 0; | ||
136 | |||
137 | while (!task->global_op_pending && | ||
138 | i < RFKILL_TYPE_MAX) { | ||
139 | if (test_and_clear_bit(i, task->sw_pending)) { | ||
140 | bool c; | ||
141 | c = test_and_clear_bit(i, | ||
142 | task->sw_togglestate); | ||
143 | spin_unlock_irq(&task->lock); | ||
144 | |||
145 | __rfkill_handle_normal_op(i, c); | ||
146 | |||
147 | spin_lock_irq(&task->lock); | ||
148 | } | ||
149 | i++; | ||
150 | } | ||
151 | } | ||
152 | doit = task->global_op_pending; | ||
153 | } | ||
154 | spin_unlock_irq(&task->lock); | ||
155 | |||
156 | mutex_unlock(&task->mutex); | ||
157 | } | ||
158 | |||
159 | static struct rfkill_task rfkill_task = { | ||
160 | .dwork = __DELAYED_WORK_INITIALIZER(rfkill_task.dwork, | ||
161 | rfkill_task_handler), | ||
162 | .mutex = __MUTEX_INITIALIZER(rfkill_task.mutex), | ||
163 | .lock = __SPIN_LOCK_UNLOCKED(rfkill_task.lock), | ||
164 | }; | ||
165 | |||
166 | static unsigned long rfkill_ratelimit(const unsigned long last) | ||
167 | { | ||
168 | const unsigned long delay = msecs_to_jiffies(RFKILL_OPS_DELAY); | ||
169 | return (time_after(jiffies, last + delay)) ? 0 : delay; | ||
170 | } | ||
171 | |||
172 | static void rfkill_schedule_ratelimited(void) | ||
173 | { | ||
174 | if (!delayed_work_pending(&rfkill_task.dwork)) { | ||
175 | schedule_delayed_work(&rfkill_task.dwork, | ||
176 | rfkill_ratelimit(rfkill_task.last_scheduled)); | ||
177 | rfkill_task.last_scheduled = jiffies; | ||
178 | } | ||
179 | } | ||
180 | |||
181 | static void rfkill_schedule_global_op(enum rfkill_global_sched_op op) | ||
182 | { | ||
183 | unsigned long flags; | ||
184 | |||
185 | spin_lock_irqsave(&rfkill_task.lock, flags); | ||
186 | rfkill_task.op = op; | ||
187 | rfkill_task.global_op_pending = true; | ||
188 | if (op == RFKILL_GLOBAL_OP_EPO && !rfkill_is_epo_lock_active()) { | ||
189 | /* bypass the limiter for EPO */ | ||
190 | cancel_delayed_work(&rfkill_task.dwork); | ||
191 | schedule_delayed_work(&rfkill_task.dwork, 0); | ||
192 | rfkill_task.last_scheduled = jiffies; | ||
193 | } else | ||
194 | rfkill_schedule_ratelimited(); | ||
195 | spin_unlock_irqrestore(&rfkill_task.lock, flags); | ||
196 | } | ||
197 | |||
198 | static void rfkill_schedule_toggle(enum rfkill_type type) | ||
199 | { | ||
200 | unsigned long flags; | ||
201 | |||
202 | if (rfkill_is_epo_lock_active()) | ||
203 | return; | ||
204 | |||
205 | spin_lock_irqsave(&rfkill_task.lock, flags); | ||
206 | if (!rfkill_task.global_op_pending) { | ||
207 | set_bit(type, rfkill_task.sw_pending); | ||
208 | change_bit(type, rfkill_task.sw_togglestate); | ||
209 | rfkill_schedule_ratelimited(); | ||
210 | } | ||
211 | spin_unlock_irqrestore(&rfkill_task.lock, flags); | ||
212 | } | ||
213 | |||
214 | static void rfkill_schedule_evsw_rfkillall(int state) | ||
215 | { | ||
216 | if (state) { | ||
217 | switch (rfkill_master_switch_mode) { | ||
218 | case RFKILL_INPUT_MASTER_UNBLOCKALL: | ||
219 | rfkill_schedule_global_op(RFKILL_GLOBAL_OP_UNBLOCK); | ||
220 | break; | ||
221 | case RFKILL_INPUT_MASTER_RESTORE: | ||
222 | rfkill_schedule_global_op(RFKILL_GLOBAL_OP_RESTORE); | ||
223 | break; | ||
224 | case RFKILL_INPUT_MASTER_DONOTHING: | ||
225 | rfkill_schedule_global_op(RFKILL_GLOBAL_OP_UNLOCK); | ||
226 | break; | ||
227 | default: | ||
228 | /* memory corruption or driver bug! fail safely */ | ||
229 | rfkill_schedule_global_op(RFKILL_GLOBAL_OP_EPO); | ||
230 | WARN(1, "Unknown rfkill_master_switch_mode (%d), " | ||
231 | "driver bug or memory corruption detected!\n", | ||
232 | rfkill_master_switch_mode); | ||
233 | break; | ||
234 | } | ||
235 | } else | ||
236 | rfkill_schedule_global_op(RFKILL_GLOBAL_OP_EPO); | ||
237 | } | ||
238 | |||
239 | static void rfkill_event(struct input_handle *handle, unsigned int type, | ||
240 | unsigned int code, int data) | ||
241 | { | ||
242 | if (type == EV_KEY && data == 1) { | ||
243 | enum rfkill_type t; | ||
244 | |||
245 | switch (code) { | ||
246 | case KEY_WLAN: | ||
247 | t = RFKILL_TYPE_WLAN; | ||
248 | break; | ||
249 | case KEY_BLUETOOTH: | ||
250 | t = RFKILL_TYPE_BLUETOOTH; | ||
251 | break; | ||
252 | case KEY_UWB: | ||
253 | t = RFKILL_TYPE_UWB; | ||
254 | break; | ||
255 | case KEY_WIMAX: | ||
256 | t = RFKILL_TYPE_WIMAX; | ||
257 | break; | ||
258 | default: | ||
259 | return; | ||
260 | } | ||
261 | rfkill_schedule_toggle(t); | ||
262 | return; | ||
263 | } else if (type == EV_SW) { | ||
264 | switch (code) { | ||
265 | case SW_RFKILL_ALL: | ||
266 | rfkill_schedule_evsw_rfkillall(data); | ||
267 | return; | ||
268 | default: | ||
269 | return; | ||
270 | } | ||
271 | } | ||
272 | } | ||
273 | |||
274 | static int rfkill_connect(struct input_handler *handler, struct input_dev *dev, | ||
275 | const struct input_device_id *id) | ||
276 | { | ||
277 | struct input_handle *handle; | ||
278 | int error; | ||
279 | |||
280 | handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL); | ||
281 | if (!handle) | ||
282 | return -ENOMEM; | ||
283 | |||
284 | handle->dev = dev; | ||
285 | handle->handler = handler; | ||
286 | handle->name = "rfkill"; | ||
287 | |||
288 | /* causes rfkill_start() to be called */ | ||
289 | error = input_register_handle(handle); | ||
290 | if (error) | ||
291 | goto err_free_handle; | ||
292 | |||
293 | error = input_open_device(handle); | ||
294 | if (error) | ||
295 | goto err_unregister_handle; | ||
296 | |||
297 | return 0; | ||
298 | |||
299 | err_unregister_handle: | ||
300 | input_unregister_handle(handle); | ||
301 | err_free_handle: | ||
302 | kfree(handle); | ||
303 | return error; | ||
304 | } | ||
305 | |||
306 | static void rfkill_start(struct input_handle *handle) | ||
307 | { | ||
308 | /* Take event_lock to guard against configuration changes, we | ||
309 | * should be able to deal with concurrency with rfkill_event() | ||
310 | * just fine (which event_lock will also avoid). */ | ||
311 | spin_lock_irq(&handle->dev->event_lock); | ||
312 | |||
313 | if (test_bit(EV_SW, handle->dev->evbit)) { | ||
314 | if (test_bit(SW_RFKILL_ALL, handle->dev->swbit)) | ||
315 | rfkill_schedule_evsw_rfkillall(test_bit(SW_RFKILL_ALL, | ||
316 | handle->dev->sw)); | ||
317 | /* add resync for further EV_SW events here */ | ||
318 | } | ||
319 | |||
320 | spin_unlock_irq(&handle->dev->event_lock); | ||
321 | } | ||
322 | |||
323 | static void rfkill_disconnect(struct input_handle *handle) | ||
324 | { | ||
325 | input_close_device(handle); | ||
326 | input_unregister_handle(handle); | ||
327 | kfree(handle); | ||
328 | } | ||
329 | |||
330 | static const struct input_device_id rfkill_ids[] = { | ||
331 | { | ||
332 | .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, | ||
333 | .evbit = { BIT_MASK(EV_KEY) }, | ||
334 | .keybit = { [BIT_WORD(KEY_WLAN)] = BIT_MASK(KEY_WLAN) }, | ||
335 | }, | ||
336 | { | ||
337 | .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, | ||
338 | .evbit = { BIT_MASK(EV_KEY) }, | ||
339 | .keybit = { [BIT_WORD(KEY_BLUETOOTH)] = BIT_MASK(KEY_BLUETOOTH) }, | ||
340 | }, | ||
341 | { | ||
342 | .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, | ||
343 | .evbit = { BIT_MASK(EV_KEY) }, | ||
344 | .keybit = { [BIT_WORD(KEY_UWB)] = BIT_MASK(KEY_UWB) }, | ||
345 | }, | ||
346 | { | ||
347 | .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, | ||
348 | .evbit = { BIT_MASK(EV_KEY) }, | ||
349 | .keybit = { [BIT_WORD(KEY_WIMAX)] = BIT_MASK(KEY_WIMAX) }, | ||
350 | }, | ||
351 | { | ||
352 | .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_SWBIT, | ||
353 | .evbit = { BIT(EV_SW) }, | ||
354 | .swbit = { [BIT_WORD(SW_RFKILL_ALL)] = BIT_MASK(SW_RFKILL_ALL) }, | ||
355 | }, | ||
356 | { } | ||
357 | }; | ||
358 | |||
359 | static struct input_handler rfkill_handler = { | ||
360 | .event = rfkill_event, | ||
361 | .connect = rfkill_connect, | ||
362 | .disconnect = rfkill_disconnect, | ||
363 | .start = rfkill_start, | ||
364 | .name = "rfkill", | ||
365 | .id_table = rfkill_ids, | ||
366 | }; | ||
367 | |||
368 | static int __init rfkill_handler_init(void) | ||
369 | { | ||
370 | if (rfkill_master_switch_mode >= RFKILL_INPUT_MASTER_MAX) | ||
371 | return -EINVAL; | ||
372 | |||
373 | /* | ||
374 | * The penalty to not doing this is a possible RFKILL_OPS_DELAY delay | ||
375 | * at the first use. Acceptable, but if we can avoid it, why not? | ||
376 | */ | ||
377 | rfkill_task.last_scheduled = | ||
378 | jiffies - msecs_to_jiffies(RFKILL_OPS_DELAY) - 1; | ||
379 | return input_register_handler(&rfkill_handler); | ||
380 | } | ||
381 | |||
382 | static void __exit rfkill_handler_exit(void) | ||
383 | { | ||
384 | input_unregister_handler(&rfkill_handler); | ||
385 | cancel_delayed_work_sync(&rfkill_task.dwork); | ||
386 | rfkill_remove_epo_lock(); | ||
387 | } | ||
388 | |||
389 | module_init(rfkill_handler_init); | ||
390 | module_exit(rfkill_handler_exit); | ||
diff --git a/net/rfkill/rfkill.c b/net/rfkill/rfkill.c deleted file mode 100644 index 4f5a83183c95..000000000000 --- a/net/rfkill/rfkill.c +++ /dev/null | |||
@@ -1,855 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006 - 2007 Ivo van Doorn | ||
3 | * Copyright (C) 2007 Dmitry Torokhov | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; either version 2 of the License, or | ||
8 | * (at your option) any later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the | ||
17 | * Free Software Foundation, Inc., | ||
18 | * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
19 | */ | ||
20 | |||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/init.h> | ||
24 | #include <linux/workqueue.h> | ||
25 | #include <linux/capability.h> | ||
26 | #include <linux/list.h> | ||
27 | #include <linux/mutex.h> | ||
28 | #include <linux/rfkill.h> | ||
29 | |||
30 | /* Get declaration of rfkill_switch_all() to shut up sparse. */ | ||
31 | #include "rfkill-input.h" | ||
32 | |||
33 | |||
34 | MODULE_AUTHOR("Ivo van Doorn <IvDoorn@gmail.com>"); | ||
35 | MODULE_VERSION("1.0"); | ||
36 | MODULE_DESCRIPTION("RF switch support"); | ||
37 | MODULE_LICENSE("GPL"); | ||
38 | |||
39 | static LIST_HEAD(rfkill_list); /* list of registered rf switches */ | ||
40 | static DEFINE_MUTEX(rfkill_global_mutex); | ||
41 | |||
42 | static unsigned int rfkill_default_state = RFKILL_STATE_UNBLOCKED; | ||
43 | module_param_named(default_state, rfkill_default_state, uint, 0444); | ||
44 | MODULE_PARM_DESC(default_state, | ||
45 | "Default initial state for all radio types, 0 = radio off"); | ||
46 | |||
47 | struct rfkill_gsw_state { | ||
48 | enum rfkill_state current_state; | ||
49 | enum rfkill_state default_state; | ||
50 | }; | ||
51 | |||
52 | static struct rfkill_gsw_state rfkill_global_states[RFKILL_TYPE_MAX]; | ||
53 | static unsigned long rfkill_states_lockdflt[BITS_TO_LONGS(RFKILL_TYPE_MAX)]; | ||
54 | static bool rfkill_epo_lock_active; | ||
55 | |||
56 | |||
57 | #ifdef CONFIG_RFKILL_LEDS | ||
58 | static void rfkill_led_trigger(struct rfkill *rfkill, | ||
59 | enum rfkill_state state) | ||
60 | { | ||
61 | struct led_trigger *led = &rfkill->led_trigger; | ||
62 | |||
63 | if (!led->name) | ||
64 | return; | ||
65 | if (state != RFKILL_STATE_UNBLOCKED) | ||
66 | led_trigger_event(led, LED_OFF); | ||
67 | else | ||
68 | led_trigger_event(led, LED_FULL); | ||
69 | } | ||
70 | |||
71 | static void rfkill_led_trigger_activate(struct led_classdev *led) | ||
72 | { | ||
73 | struct rfkill *rfkill = container_of(led->trigger, | ||
74 | struct rfkill, led_trigger); | ||
75 | |||
76 | rfkill_led_trigger(rfkill, rfkill->state); | ||
77 | } | ||
78 | #else | ||
79 | static inline void rfkill_led_trigger(struct rfkill *rfkill, | ||
80 | enum rfkill_state state) | ||
81 | { | ||
82 | } | ||
83 | #endif /* CONFIG_RFKILL_LEDS */ | ||
84 | |||
85 | static void rfkill_uevent(struct rfkill *rfkill) | ||
86 | { | ||
87 | kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE); | ||
88 | } | ||
89 | |||
90 | static void update_rfkill_state(struct rfkill *rfkill) | ||
91 | { | ||
92 | enum rfkill_state newstate, oldstate; | ||
93 | |||
94 | if (rfkill->get_state) { | ||
95 | mutex_lock(&rfkill->mutex); | ||
96 | if (!rfkill->get_state(rfkill->data, &newstate)) { | ||
97 | oldstate = rfkill->state; | ||
98 | rfkill->state = newstate; | ||
99 | if (oldstate != newstate) | ||
100 | rfkill_uevent(rfkill); | ||
101 | } | ||
102 | mutex_unlock(&rfkill->mutex); | ||
103 | } | ||
104 | rfkill_led_trigger(rfkill, rfkill->state); | ||
105 | } | ||
106 | |||
107 | /** | ||
108 | * rfkill_toggle_radio - wrapper for toggle_radio hook | ||
109 | * @rfkill: the rfkill struct to use | ||
110 | * @force: calls toggle_radio even if cache says it is not needed, | ||
111 | * and also makes sure notifications of the state will be | ||
112 | * sent even if it didn't change | ||
113 | * @state: the new state to call toggle_radio() with | ||
114 | * | ||
115 | * Calls rfkill->toggle_radio, enforcing the API for toggle_radio | ||
116 | * calls and handling all the red tape such as issuing notifications | ||
117 | * if the call is successful. | ||
118 | * | ||
119 | * Suspended devices are not touched at all, and -EAGAIN is returned. | ||
120 | * | ||
121 | * Note that the @force parameter cannot override a (possibly cached) | ||
122 | * state of RFKILL_STATE_HARD_BLOCKED. Any device making use of | ||
123 | * RFKILL_STATE_HARD_BLOCKED implements either get_state() or | ||
124 | * rfkill_force_state(), so the cache either is bypassed or valid. | ||
125 | * | ||
126 | * Note that we do call toggle_radio for RFKILL_STATE_SOFT_BLOCKED | ||
127 | * even if the radio is in RFKILL_STATE_HARD_BLOCKED state, so as to | ||
128 | * give the driver a hint that it should double-BLOCK the transmitter. | ||
129 | * | ||
130 | * Caller must have acquired rfkill->mutex. | ||
131 | */ | ||
132 | static int rfkill_toggle_radio(struct rfkill *rfkill, | ||
133 | enum rfkill_state state, | ||
134 | int force) | ||
135 | { | ||
136 | int retval = 0; | ||
137 | enum rfkill_state oldstate, newstate; | ||
138 | |||
139 | if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP)) | ||
140 | return -EBUSY; | ||
141 | |||
142 | oldstate = rfkill->state; | ||
143 | |||
144 | if (rfkill->get_state && !force && | ||
145 | !rfkill->get_state(rfkill->data, &newstate)) { | ||
146 | rfkill->state = newstate; | ||
147 | } | ||
148 | |||
149 | switch (state) { | ||
150 | case RFKILL_STATE_HARD_BLOCKED: | ||
151 | /* typically happens when refreshing hardware state, | ||
152 | * such as on resume */ | ||
153 | state = RFKILL_STATE_SOFT_BLOCKED; | ||
154 | break; | ||
155 | case RFKILL_STATE_UNBLOCKED: | ||
156 | /* force can't override this, only rfkill_force_state() can */ | ||
157 | if (rfkill->state == RFKILL_STATE_HARD_BLOCKED) | ||
158 | return -EPERM; | ||
159 | break; | ||
160 | case RFKILL_STATE_SOFT_BLOCKED: | ||
161 | /* nothing to do, we want to give drivers the hint to double | ||
162 | * BLOCK even a transmitter that is already in state | ||
163 | * RFKILL_STATE_HARD_BLOCKED */ | ||
164 | break; | ||
165 | default: | ||
166 | WARN(1, KERN_WARNING | ||
167 | "rfkill: illegal state %d passed as parameter " | ||
168 | "to rfkill_toggle_radio\n", state); | ||
169 | return -EINVAL; | ||
170 | } | ||
171 | |||
172 | if (force || state != rfkill->state) { | ||
173 | retval = rfkill->toggle_radio(rfkill->data, state); | ||
174 | /* never allow a HARD->SOFT downgrade! */ | ||
175 | if (!retval && rfkill->state != RFKILL_STATE_HARD_BLOCKED) | ||
176 | rfkill->state = state; | ||
177 | } | ||
178 | |||
179 | if (force || rfkill->state != oldstate) | ||
180 | rfkill_uevent(rfkill); | ||
181 | |||
182 | rfkill_led_trigger(rfkill, rfkill->state); | ||
183 | return retval; | ||
184 | } | ||
185 | |||
186 | /** | ||
187 | * __rfkill_switch_all - Toggle state of all switches of given type | ||
188 | * @type: type of interfaces to be affected | ||
189 | * @state: the new state | ||
190 | * | ||
191 | * This function toggles the state of all switches of given type, | ||
192 | * unless a specific switch is claimed by userspace (in which case, | ||
193 | * that switch is left alone) or suspended. | ||
194 | * | ||
195 | * Caller must have acquired rfkill_global_mutex. | ||
196 | */ | ||
197 | static void __rfkill_switch_all(const enum rfkill_type type, | ||
198 | const enum rfkill_state state) | ||
199 | { | ||
200 | struct rfkill *rfkill; | ||
201 | |||
202 | if (WARN((state >= RFKILL_STATE_MAX || type >= RFKILL_TYPE_MAX), | ||
203 | KERN_WARNING | ||
204 | "rfkill: illegal state %d or type %d " | ||
205 | "passed as parameter to __rfkill_switch_all\n", | ||
206 | state, type)) | ||
207 | return; | ||
208 | |||
209 | rfkill_global_states[type].current_state = state; | ||
210 | list_for_each_entry(rfkill, &rfkill_list, node) { | ||
211 | if (rfkill->type == type) { | ||
212 | mutex_lock(&rfkill->mutex); | ||
213 | rfkill_toggle_radio(rfkill, state, 0); | ||
214 | mutex_unlock(&rfkill->mutex); | ||
215 | rfkill_led_trigger(rfkill, rfkill->state); | ||
216 | } | ||
217 | } | ||
218 | } | ||
219 | |||
220 | /** | ||
221 | * rfkill_switch_all - Toggle state of all switches of given type | ||
222 | * @type: type of interfaces to be affected | ||
223 | * @state: the new state | ||
224 | * | ||
225 | * Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state). | ||
226 | * Please refer to __rfkill_switch_all() for details. | ||
227 | * | ||
228 | * Does nothing if the EPO lock is active. | ||
229 | */ | ||
230 | void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state) | ||
231 | { | ||
232 | mutex_lock(&rfkill_global_mutex); | ||
233 | if (!rfkill_epo_lock_active) | ||
234 | __rfkill_switch_all(type, state); | ||
235 | mutex_unlock(&rfkill_global_mutex); | ||
236 | } | ||
237 | EXPORT_SYMBOL(rfkill_switch_all); | ||
238 | |||
239 | /** | ||
240 | * rfkill_epo - emergency power off all transmitters | ||
241 | * | ||
242 | * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED, | ||
243 | * ignoring everything in its path but rfkill_global_mutex and rfkill->mutex. | ||
244 | * | ||
245 | * The global state before the EPO is saved and can be restored later | ||
246 | * using rfkill_restore_states(). | ||
247 | */ | ||
248 | void rfkill_epo(void) | ||
249 | { | ||
250 | struct rfkill *rfkill; | ||
251 | int i; | ||
252 | |||
253 | mutex_lock(&rfkill_global_mutex); | ||
254 | |||
255 | rfkill_epo_lock_active = true; | ||
256 | list_for_each_entry(rfkill, &rfkill_list, node) { | ||
257 | mutex_lock(&rfkill->mutex); | ||
258 | rfkill_toggle_radio(rfkill, RFKILL_STATE_SOFT_BLOCKED, 1); | ||
259 | mutex_unlock(&rfkill->mutex); | ||
260 | } | ||
261 | for (i = 0; i < RFKILL_TYPE_MAX; i++) { | ||
262 | rfkill_global_states[i].default_state = | ||
263 | rfkill_global_states[i].current_state; | ||
264 | rfkill_global_states[i].current_state = | ||
265 | RFKILL_STATE_SOFT_BLOCKED; | ||
266 | } | ||
267 | mutex_unlock(&rfkill_global_mutex); | ||
268 | rfkill_led_trigger(rfkill, rfkill->state); | ||
269 | } | ||
270 | EXPORT_SYMBOL_GPL(rfkill_epo); | ||
271 | |||
272 | /** | ||
273 | * rfkill_restore_states - restore global states | ||
274 | * | ||
275 | * Restore (and sync switches to) the global state from the | ||
276 | * states in rfkill_default_states. This can undo the effects of | ||
277 | * a call to rfkill_epo(). | ||
278 | */ | ||
279 | void rfkill_restore_states(void) | ||
280 | { | ||
281 | int i; | ||
282 | |||
283 | mutex_lock(&rfkill_global_mutex); | ||
284 | |||
285 | rfkill_epo_lock_active = false; | ||
286 | for (i = 0; i < RFKILL_TYPE_MAX; i++) | ||
287 | __rfkill_switch_all(i, rfkill_global_states[i].default_state); | ||
288 | mutex_unlock(&rfkill_global_mutex); | ||
289 | } | ||
290 | EXPORT_SYMBOL_GPL(rfkill_restore_states); | ||
291 | |||
292 | /** | ||
293 | * rfkill_remove_epo_lock - unlock state changes | ||
294 | * | ||
295 | * Used by rfkill-input manually unlock state changes, when | ||
296 | * the EPO switch is deactivated. | ||
297 | */ | ||
298 | void rfkill_remove_epo_lock(void) | ||
299 | { | ||
300 | mutex_lock(&rfkill_global_mutex); | ||
301 | rfkill_epo_lock_active = false; | ||
302 | mutex_unlock(&rfkill_global_mutex); | ||
303 | } | ||
304 | EXPORT_SYMBOL_GPL(rfkill_remove_epo_lock); | ||
305 | |||
306 | /** | ||
307 | * rfkill_is_epo_lock_active - returns true EPO is active | ||
308 | * | ||
309 | * Returns 0 (false) if there is NOT an active EPO contidion, | ||
310 | * and 1 (true) if there is an active EPO contition, which | ||
311 | * locks all radios in one of the BLOCKED states. | ||
312 | * | ||
313 | * Can be called in atomic context. | ||
314 | */ | ||
315 | bool rfkill_is_epo_lock_active(void) | ||
316 | { | ||
317 | return rfkill_epo_lock_active; | ||
318 | } | ||
319 | EXPORT_SYMBOL_GPL(rfkill_is_epo_lock_active); | ||
320 | |||
321 | /** | ||
322 | * rfkill_get_global_state - returns global state for a type | ||
323 | * @type: the type to get the global state of | ||
324 | * | ||
325 | * Returns the current global state for a given wireless | ||
326 | * device type. | ||
327 | */ | ||
328 | enum rfkill_state rfkill_get_global_state(const enum rfkill_type type) | ||
329 | { | ||
330 | return rfkill_global_states[type].current_state; | ||
331 | } | ||
332 | EXPORT_SYMBOL_GPL(rfkill_get_global_state); | ||
333 | |||
334 | /** | ||
335 | * rfkill_force_state - Force the internal rfkill radio state | ||
336 | * @rfkill: pointer to the rfkill class to modify. | ||
337 | * @state: the current radio state the class should be forced to. | ||
338 | * | ||
339 | * This function updates the internal state of the radio cached | ||
340 | * by the rfkill class. It should be used when the driver gets | ||
341 | * a notification by the firmware/hardware of the current *real* | ||
342 | * state of the radio rfkill switch. | ||
343 | * | ||
344 | * Devices which are subject to external changes on their rfkill | ||
345 | * state (such as those caused by a hardware rfkill line) MUST | ||
346 | * have their driver arrange to call rfkill_force_state() as soon | ||
347 | * as possible after such a change. | ||
348 | * | ||
349 | * This function may not be called from an atomic context. | ||
350 | */ | ||
351 | int rfkill_force_state(struct rfkill *rfkill, enum rfkill_state state) | ||
352 | { | ||
353 | enum rfkill_state oldstate; | ||
354 | |||
355 | BUG_ON(!rfkill); | ||
356 | if (WARN((state >= RFKILL_STATE_MAX), | ||
357 | KERN_WARNING | ||
358 | "rfkill: illegal state %d passed as parameter " | ||
359 | "to rfkill_force_state\n", state)) | ||
360 | return -EINVAL; | ||
361 | |||
362 | mutex_lock(&rfkill->mutex); | ||
363 | |||
364 | oldstate = rfkill->state; | ||
365 | rfkill->state = state; | ||
366 | |||
367 | if (state != oldstate) | ||
368 | rfkill_uevent(rfkill); | ||
369 | |||
370 | mutex_unlock(&rfkill->mutex); | ||
371 | rfkill_led_trigger(rfkill, rfkill->state); | ||
372 | |||
373 | return 0; | ||
374 | } | ||
375 | EXPORT_SYMBOL(rfkill_force_state); | ||
376 | |||
377 | static ssize_t rfkill_name_show(struct device *dev, | ||
378 | struct device_attribute *attr, | ||
379 | char *buf) | ||
380 | { | ||
381 | struct rfkill *rfkill = to_rfkill(dev); | ||
382 | |||
383 | return sprintf(buf, "%s\n", rfkill->name); | ||
384 | } | ||
385 | |||
386 | static const char *rfkill_get_type_str(enum rfkill_type type) | ||
387 | { | ||
388 | switch (type) { | ||
389 | case RFKILL_TYPE_WLAN: | ||
390 | return "wlan"; | ||
391 | case RFKILL_TYPE_BLUETOOTH: | ||
392 | return "bluetooth"; | ||
393 | case RFKILL_TYPE_UWB: | ||
394 | return "ultrawideband"; | ||
395 | case RFKILL_TYPE_WIMAX: | ||
396 | return "wimax"; | ||
397 | case RFKILL_TYPE_WWAN: | ||
398 | return "wwan"; | ||
399 | default: | ||
400 | BUG(); | ||
401 | } | ||
402 | } | ||
403 | |||
404 | static ssize_t rfkill_type_show(struct device *dev, | ||
405 | struct device_attribute *attr, | ||
406 | char *buf) | ||
407 | { | ||
408 | struct rfkill *rfkill = to_rfkill(dev); | ||
409 | |||
410 | return sprintf(buf, "%s\n", rfkill_get_type_str(rfkill->type)); | ||
411 | } | ||
412 | |||
413 | static ssize_t rfkill_state_show(struct device *dev, | ||
414 | struct device_attribute *attr, | ||
415 | char *buf) | ||
416 | { | ||
417 | struct rfkill *rfkill = to_rfkill(dev); | ||
418 | |||
419 | update_rfkill_state(rfkill); | ||
420 | return sprintf(buf, "%d\n", rfkill->state); | ||
421 | } | ||
422 | |||
423 | static ssize_t rfkill_state_store(struct device *dev, | ||
424 | struct device_attribute *attr, | ||
425 | const char *buf, size_t count) | ||
426 | { | ||
427 | struct rfkill *rfkill = to_rfkill(dev); | ||
428 | unsigned long state; | ||
429 | int error; | ||
430 | |||
431 | if (!capable(CAP_NET_ADMIN)) | ||
432 | return -EPERM; | ||
433 | |||
434 | error = strict_strtoul(buf, 0, &state); | ||
435 | if (error) | ||
436 | return error; | ||
437 | |||
438 | /* RFKILL_STATE_HARD_BLOCKED is illegal here... */ | ||
439 | if (state != RFKILL_STATE_UNBLOCKED && | ||
440 | state != RFKILL_STATE_SOFT_BLOCKED) | ||
441 | return -EINVAL; | ||
442 | |||
443 | error = mutex_lock_killable(&rfkill->mutex); | ||
444 | if (error) | ||
445 | return error; | ||
446 | |||
447 | if (!rfkill_epo_lock_active) | ||
448 | error = rfkill_toggle_radio(rfkill, state, 0); | ||
449 | else | ||
450 | error = -EPERM; | ||
451 | |||
452 | mutex_unlock(&rfkill->mutex); | ||
453 | |||
454 | return error ? error : count; | ||
455 | } | ||
456 | |||
457 | static ssize_t rfkill_claim_show(struct device *dev, | ||
458 | struct device_attribute *attr, | ||
459 | char *buf) | ||
460 | { | ||
461 | return sprintf(buf, "%d\n", 0); | ||
462 | } | ||
463 | |||
464 | static ssize_t rfkill_claim_store(struct device *dev, | ||
465 | struct device_attribute *attr, | ||
466 | const char *buf, size_t count) | ||
467 | { | ||
468 | return -EOPNOTSUPP; | ||
469 | } | ||
470 | |||
471 | static struct device_attribute rfkill_dev_attrs[] = { | ||
472 | __ATTR(name, S_IRUGO, rfkill_name_show, NULL), | ||
473 | __ATTR(type, S_IRUGO, rfkill_type_show, NULL), | ||
474 | __ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store), | ||
475 | __ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store), | ||
476 | __ATTR_NULL | ||
477 | }; | ||
478 | |||
479 | static void rfkill_release(struct device *dev) | ||
480 | { | ||
481 | struct rfkill *rfkill = to_rfkill(dev); | ||
482 | |||
483 | kfree(rfkill); | ||
484 | module_put(THIS_MODULE); | ||
485 | } | ||
486 | |||
487 | #ifdef CONFIG_PM | ||
488 | static int rfkill_suspend(struct device *dev, pm_message_t state) | ||
489 | { | ||
490 | struct rfkill *rfkill = to_rfkill(dev); | ||
491 | |||
492 | /* mark class device as suspended */ | ||
493 | if (dev->power.power_state.event != state.event) | ||
494 | dev->power.power_state = state; | ||
495 | |||
496 | /* store state for the resume handler */ | ||
497 | rfkill->state_for_resume = rfkill->state; | ||
498 | |||
499 | return 0; | ||
500 | } | ||
501 | |||
502 | static int rfkill_resume(struct device *dev) | ||
503 | { | ||
504 | struct rfkill *rfkill = to_rfkill(dev); | ||
505 | enum rfkill_state newstate; | ||
506 | |||
507 | if (dev->power.power_state.event != PM_EVENT_ON) { | ||
508 | mutex_lock(&rfkill->mutex); | ||
509 | |||
510 | dev->power.power_state.event = PM_EVENT_ON; | ||
511 | |||
512 | /* | ||
513 | * rfkill->state could have been modified before we got | ||
514 | * called, and won't be updated by rfkill_toggle_radio() | ||
515 | * in force mode. Sync it FIRST. | ||
516 | */ | ||
517 | if (rfkill->get_state && | ||
518 | !rfkill->get_state(rfkill->data, &newstate)) | ||
519 | rfkill->state = newstate; | ||
520 | |||
521 | /* | ||
522 | * If we are under EPO, kick transmitter offline, | ||
523 | * otherwise restore to pre-suspend state. | ||
524 | * | ||
525 | * Issue a notification in any case | ||
526 | */ | ||
527 | rfkill_toggle_radio(rfkill, | ||
528 | rfkill_epo_lock_active ? | ||
529 | RFKILL_STATE_SOFT_BLOCKED : | ||
530 | rfkill->state_for_resume, | ||
531 | 1); | ||
532 | |||
533 | mutex_unlock(&rfkill->mutex); | ||
534 | rfkill_led_trigger(rfkill, rfkill->state); | ||
535 | } | ||
536 | |||
537 | return 0; | ||
538 | } | ||
539 | #else | ||
540 | #define rfkill_suspend NULL | ||
541 | #define rfkill_resume NULL | ||
542 | #endif | ||
543 | |||
544 | static int rfkill_dev_uevent(struct device *dev, struct kobj_uevent_env *env) | ||
545 | { | ||
546 | struct rfkill *rfkill = to_rfkill(dev); | ||
547 | int error; | ||
548 | |||
549 | error = add_uevent_var(env, "RFKILL_NAME=%s", rfkill->name); | ||
550 | if (error) | ||
551 | return error; | ||
552 | error = add_uevent_var(env, "RFKILL_TYPE=%s", | ||
553 | rfkill_get_type_str(rfkill->type)); | ||
554 | if (error) | ||
555 | return error; | ||
556 | error = add_uevent_var(env, "RFKILL_STATE=%d", rfkill->state); | ||
557 | return error; | ||
558 | } | ||
559 | |||
560 | static struct class rfkill_class = { | ||
561 | .name = "rfkill", | ||
562 | .dev_release = rfkill_release, | ||
563 | .dev_attrs = rfkill_dev_attrs, | ||
564 | .suspend = rfkill_suspend, | ||
565 | .resume = rfkill_resume, | ||
566 | .dev_uevent = rfkill_dev_uevent, | ||
567 | }; | ||
568 | |||
569 | static int rfkill_check_duplicity(const struct rfkill *rfkill) | ||
570 | { | ||
571 | struct rfkill *p; | ||
572 | unsigned long seen[BITS_TO_LONGS(RFKILL_TYPE_MAX)]; | ||
573 | |||
574 | memset(seen, 0, sizeof(seen)); | ||
575 | |||
576 | list_for_each_entry(p, &rfkill_list, node) { | ||
577 | if (WARN((p == rfkill), KERN_WARNING | ||
578 | "rfkill: illegal attempt to register " | ||
579 | "an already registered rfkill struct\n")) | ||
580 | return -EEXIST; | ||
581 | set_bit(p->type, seen); | ||
582 | } | ||
583 | |||
584 | /* 0: first switch of its kind */ | ||
585 | return (test_bit(rfkill->type, seen)) ? 1 : 0; | ||
586 | } | ||
587 | |||
588 | static int rfkill_add_switch(struct rfkill *rfkill) | ||
589 | { | ||
590 | int error; | ||
591 | |||
592 | mutex_lock(&rfkill_global_mutex); | ||
593 | |||
594 | error = rfkill_check_duplicity(rfkill); | ||
595 | if (error < 0) | ||
596 | goto unlock_out; | ||
597 | |||
598 | if (!error) { | ||
599 | /* lock default after first use */ | ||
600 | set_bit(rfkill->type, rfkill_states_lockdflt); | ||
601 | rfkill_global_states[rfkill->type].current_state = | ||
602 | rfkill_global_states[rfkill->type].default_state; | ||
603 | } | ||
604 | |||
605 | rfkill_toggle_radio(rfkill, | ||
606 | rfkill_global_states[rfkill->type].current_state, | ||
607 | 0); | ||
608 | |||
609 | list_add_tail(&rfkill->node, &rfkill_list); | ||
610 | |||
611 | error = 0; | ||
612 | unlock_out: | ||
613 | mutex_unlock(&rfkill_global_mutex); | ||
614 | |||
615 | return error; | ||
616 | } | ||
617 | |||
618 | static void rfkill_remove_switch(struct rfkill *rfkill) | ||
619 | { | ||
620 | mutex_lock(&rfkill_global_mutex); | ||
621 | list_del_init(&rfkill->node); | ||
622 | mutex_unlock(&rfkill_global_mutex); | ||
623 | |||
624 | mutex_lock(&rfkill->mutex); | ||
625 | rfkill_toggle_radio(rfkill, RFKILL_STATE_SOFT_BLOCKED, 1); | ||
626 | mutex_unlock(&rfkill->mutex); | ||
627 | } | ||
628 | |||
629 | /** | ||
630 | * rfkill_allocate - allocate memory for rfkill structure. | ||
631 | * @parent: device that has rf switch on it | ||
632 | * @type: type of the switch (RFKILL_TYPE_*) | ||
633 | * | ||
634 | * This function should be called by the network driver when it needs | ||
635 | * rfkill structure. Once the structure is allocated the driver should | ||
636 | * finish its initialization by setting the name, private data, enable_radio | ||
637 | * and disable_radio methods and then register it with rfkill_register(). | ||
638 | * | ||
639 | * NOTE: If registration fails the structure shoudl be freed by calling | ||
640 | * rfkill_free() otherwise rfkill_unregister() should be used. | ||
641 | */ | ||
642 | struct rfkill * __must_check rfkill_allocate(struct device *parent, | ||
643 | enum rfkill_type type) | ||
644 | { | ||
645 | struct rfkill *rfkill; | ||
646 | struct device *dev; | ||
647 | |||
648 | if (WARN((type >= RFKILL_TYPE_MAX), | ||
649 | KERN_WARNING | ||
650 | "rfkill: illegal type %d passed as parameter " | ||
651 | "to rfkill_allocate\n", type)) | ||
652 | return NULL; | ||
653 | |||
654 | rfkill = kzalloc(sizeof(struct rfkill), GFP_KERNEL); | ||
655 | if (!rfkill) | ||
656 | return NULL; | ||
657 | |||
658 | mutex_init(&rfkill->mutex); | ||
659 | INIT_LIST_HEAD(&rfkill->node); | ||
660 | rfkill->type = type; | ||
661 | |||
662 | dev = &rfkill->dev; | ||
663 | dev->class = &rfkill_class; | ||
664 | dev->parent = parent; | ||
665 | device_initialize(dev); | ||
666 | |||
667 | __module_get(THIS_MODULE); | ||
668 | |||
669 | return rfkill; | ||
670 | } | ||
671 | EXPORT_SYMBOL(rfkill_allocate); | ||
672 | |||
673 | /** | ||
674 | * rfkill_free - Mark rfkill structure for deletion | ||
675 | * @rfkill: rfkill structure to be destroyed | ||
676 | * | ||
677 | * Decrements reference count of the rfkill structure so it is destroyed. | ||
678 | * Note that rfkill_free() should _not_ be called after rfkill_unregister(). | ||
679 | */ | ||
680 | void rfkill_free(struct rfkill *rfkill) | ||
681 | { | ||
682 | if (rfkill) | ||
683 | put_device(&rfkill->dev); | ||
684 | } | ||
685 | EXPORT_SYMBOL(rfkill_free); | ||
686 | |||
687 | static void rfkill_led_trigger_register(struct rfkill *rfkill) | ||
688 | { | ||
689 | #ifdef CONFIG_RFKILL_LEDS | ||
690 | int error; | ||
691 | |||
692 | if (!rfkill->led_trigger.name) | ||
693 | rfkill->led_trigger.name = dev_name(&rfkill->dev); | ||
694 | if (!rfkill->led_trigger.activate) | ||
695 | rfkill->led_trigger.activate = rfkill_led_trigger_activate; | ||
696 | error = led_trigger_register(&rfkill->led_trigger); | ||
697 | if (error) | ||
698 | rfkill->led_trigger.name = NULL; | ||
699 | #endif /* CONFIG_RFKILL_LEDS */ | ||
700 | } | ||
701 | |||
702 | static void rfkill_led_trigger_unregister(struct rfkill *rfkill) | ||
703 | { | ||
704 | #ifdef CONFIG_RFKILL_LEDS | ||
705 | if (rfkill->led_trigger.name) { | ||
706 | led_trigger_unregister(&rfkill->led_trigger); | ||
707 | rfkill->led_trigger.name = NULL; | ||
708 | } | ||
709 | #endif | ||
710 | } | ||
711 | |||
712 | /** | ||
713 | * rfkill_register - Register a rfkill structure. | ||
714 | * @rfkill: rfkill structure to be registered | ||
715 | * | ||
716 | * This function should be called by the network driver when the rfkill | ||
717 | * structure needs to be registered. Immediately from registration the | ||
718 | * switch driver should be able to service calls to toggle_radio. | ||
719 | */ | ||
720 | int __must_check rfkill_register(struct rfkill *rfkill) | ||
721 | { | ||
722 | static atomic_t rfkill_no = ATOMIC_INIT(0); | ||
723 | struct device *dev = &rfkill->dev; | ||
724 | int error; | ||
725 | |||
726 | if (WARN((!rfkill || !rfkill->toggle_radio || | ||
727 | rfkill->type >= RFKILL_TYPE_MAX || | ||
728 | rfkill->state >= RFKILL_STATE_MAX), | ||
729 | KERN_WARNING | ||
730 | "rfkill: attempt to register a " | ||
731 | "badly initialized rfkill struct\n")) | ||
732 | return -EINVAL; | ||
733 | |||
734 | dev_set_name(dev, "rfkill%ld", (long)atomic_inc_return(&rfkill_no) - 1); | ||
735 | |||
736 | rfkill_led_trigger_register(rfkill); | ||
737 | |||
738 | error = rfkill_add_switch(rfkill); | ||
739 | if (error) { | ||
740 | rfkill_led_trigger_unregister(rfkill); | ||
741 | return error; | ||
742 | } | ||
743 | |||
744 | error = device_add(dev); | ||
745 | if (error) { | ||
746 | rfkill_remove_switch(rfkill); | ||
747 | rfkill_led_trigger_unregister(rfkill); | ||
748 | return error; | ||
749 | } | ||
750 | |||
751 | return 0; | ||
752 | } | ||
753 | EXPORT_SYMBOL(rfkill_register); | ||
754 | |||
755 | /** | ||
756 | * rfkill_unregister - Unregister a rfkill structure. | ||
757 | * @rfkill: rfkill structure to be unregistered | ||
758 | * | ||
759 | * This function should be called by the network driver during device | ||
760 | * teardown to destroy rfkill structure. Note that rfkill_free() should | ||
761 | * _not_ be called after rfkill_unregister(). | ||
762 | */ | ||
763 | void rfkill_unregister(struct rfkill *rfkill) | ||
764 | { | ||
765 | BUG_ON(!rfkill); | ||
766 | device_del(&rfkill->dev); | ||
767 | rfkill_remove_switch(rfkill); | ||
768 | rfkill_led_trigger_unregister(rfkill); | ||
769 | put_device(&rfkill->dev); | ||
770 | } | ||
771 | EXPORT_SYMBOL(rfkill_unregister); | ||
772 | |||
773 | /** | ||
774 | * rfkill_set_default - set initial value for a switch type | ||
775 | * @type - the type of switch to set the default state of | ||
776 | * @state - the new default state for that group of switches | ||
777 | * | ||
778 | * Sets the initial state rfkill should use for a given type. | ||
779 | * The following initial states are allowed: RFKILL_STATE_SOFT_BLOCKED | ||
780 | * and RFKILL_STATE_UNBLOCKED. | ||
781 | * | ||
782 | * This function is meant to be used by platform drivers for platforms | ||
783 | * that can save switch state across power down/reboot. | ||
784 | * | ||
785 | * The default state for each switch type can be changed exactly once. | ||
786 | * After a switch of that type is registered, the default state cannot | ||
787 | * be changed anymore. This guards against multiple drivers it the | ||
788 | * same platform trying to set the initial switch default state, which | ||
789 | * is not allowed. | ||
790 | * | ||
791 | * Returns -EPERM if the state has already been set once or is in use, | ||
792 | * so drivers likely want to either ignore or at most printk(KERN_NOTICE) | ||
793 | * if this function returns -EPERM. | ||
794 | * | ||
795 | * Returns 0 if the new default state was set, or an error if it | ||
796 | * could not be set. | ||
797 | */ | ||
798 | int rfkill_set_default(enum rfkill_type type, enum rfkill_state state) | ||
799 | { | ||
800 | int error; | ||
801 | |||
802 | if (WARN((type >= RFKILL_TYPE_MAX || | ||
803 | (state != RFKILL_STATE_SOFT_BLOCKED && | ||
804 | state != RFKILL_STATE_UNBLOCKED)), | ||
805 | KERN_WARNING | ||
806 | "rfkill: illegal state %d or type %d passed as " | ||
807 | "parameter to rfkill_set_default\n", state, type)) | ||
808 | return -EINVAL; | ||
809 | |||
810 | mutex_lock(&rfkill_global_mutex); | ||
811 | |||
812 | if (!test_and_set_bit(type, rfkill_states_lockdflt)) { | ||
813 | rfkill_global_states[type].default_state = state; | ||
814 | rfkill_global_states[type].current_state = state; | ||
815 | error = 0; | ||
816 | } else | ||
817 | error = -EPERM; | ||
818 | |||
819 | mutex_unlock(&rfkill_global_mutex); | ||
820 | return error; | ||
821 | } | ||
822 | EXPORT_SYMBOL_GPL(rfkill_set_default); | ||
823 | |||
824 | /* | ||
825 | * Rfkill module initialization/deinitialization. | ||
826 | */ | ||
827 | static int __init rfkill_init(void) | ||
828 | { | ||
829 | int error; | ||
830 | int i; | ||
831 | |||
832 | /* RFKILL_STATE_HARD_BLOCKED is illegal here... */ | ||
833 | if (rfkill_default_state != RFKILL_STATE_SOFT_BLOCKED && | ||
834 | rfkill_default_state != RFKILL_STATE_UNBLOCKED) | ||
835 | return -EINVAL; | ||
836 | |||
837 | for (i = 0; i < RFKILL_TYPE_MAX; i++) | ||
838 | rfkill_global_states[i].default_state = rfkill_default_state; | ||
839 | |||
840 | error = class_register(&rfkill_class); | ||
841 | if (error) { | ||
842 | printk(KERN_ERR "rfkill: unable to register rfkill class\n"); | ||
843 | return error; | ||
844 | } | ||
845 | |||
846 | return 0; | ||
847 | } | ||
848 | |||
849 | static void __exit rfkill_exit(void) | ||
850 | { | ||
851 | class_unregister(&rfkill_class); | ||
852 | } | ||
853 | |||
854 | subsys_initcall(rfkill_init); | ||
855 | module_exit(rfkill_exit); | ||
diff --git a/net/rfkill/rfkill-input.h b/net/rfkill/rfkill.h index fe8df6b5b935..d1117cb6e4de 100644 --- a/net/rfkill/rfkill-input.h +++ b/net/rfkill/rfkill.h | |||
@@ -1,5 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2007 Ivo van Doorn | 2 | * Copyright (C) 2007 Ivo van Doorn |
3 | * Copyright 2009 Johannes Berg <johannes@sipsolutions.net> | ||
3 | */ | 4 | */ |
4 | 5 | ||
5 | /* | 6 | /* |
@@ -11,11 +12,16 @@ | |||
11 | #ifndef __RFKILL_INPUT_H | 12 | #ifndef __RFKILL_INPUT_H |
12 | #define __RFKILL_INPUT_H | 13 | #define __RFKILL_INPUT_H |
13 | 14 | ||
14 | void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state); | 15 | /* core code */ |
16 | void rfkill_switch_all(const enum rfkill_type type, bool blocked); | ||
15 | void rfkill_epo(void); | 17 | void rfkill_epo(void); |
16 | void rfkill_restore_states(void); | 18 | void rfkill_restore_states(void); |
17 | void rfkill_remove_epo_lock(void); | 19 | void rfkill_remove_epo_lock(void); |
18 | bool rfkill_is_epo_lock_active(void); | 20 | bool rfkill_is_epo_lock_active(void); |
19 | enum rfkill_state rfkill_get_global_state(const enum rfkill_type type); | 21 | bool rfkill_get_global_sw_state(const enum rfkill_type type); |
22 | |||
23 | /* input handler */ | ||
24 | int rfkill_handler_init(void); | ||
25 | void rfkill_handler_exit(void); | ||
20 | 26 | ||
21 | #endif /* __RFKILL_INPUT_H */ | 27 | #endif /* __RFKILL_INPUT_H */ |
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 0759f32e9dca..09cdcdfe7e91 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c | |||
@@ -135,6 +135,7 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
135 | unsigned long cl; | 135 | unsigned long cl; |
136 | unsigned long fh; | 136 | unsigned long fh; |
137 | int err; | 137 | int err; |
138 | int tp_created = 0; | ||
138 | 139 | ||
139 | if (net != &init_net) | 140 | if (net != &init_net) |
140 | return -EINVAL; | 141 | return -EINVAL; |
@@ -266,10 +267,7 @@ replay: | |||
266 | goto errout; | 267 | goto errout; |
267 | } | 268 | } |
268 | 269 | ||
269 | spin_lock_bh(root_lock); | 270 | tp_created = 1; |
270 | tp->next = *back; | ||
271 | *back = tp; | ||
272 | spin_unlock_bh(root_lock); | ||
273 | 271 | ||
274 | } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) | 272 | } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) |
275 | goto errout; | 273 | goto errout; |
@@ -296,8 +294,11 @@ replay: | |||
296 | switch (n->nlmsg_type) { | 294 | switch (n->nlmsg_type) { |
297 | case RTM_NEWTFILTER: | 295 | case RTM_NEWTFILTER: |
298 | err = -EEXIST; | 296 | err = -EEXIST; |
299 | if (n->nlmsg_flags & NLM_F_EXCL) | 297 | if (n->nlmsg_flags & NLM_F_EXCL) { |
298 | if (tp_created) | ||
299 | tcf_destroy(tp); | ||
300 | goto errout; | 300 | goto errout; |
301 | } | ||
301 | break; | 302 | break; |
302 | case RTM_DELTFILTER: | 303 | case RTM_DELTFILTER: |
303 | err = tp->ops->delete(tp, fh); | 304 | err = tp->ops->delete(tp, fh); |
@@ -314,8 +315,18 @@ replay: | |||
314 | } | 315 | } |
315 | 316 | ||
316 | err = tp->ops->change(tp, cl, t->tcm_handle, tca, &fh); | 317 | err = tp->ops->change(tp, cl, t->tcm_handle, tca, &fh); |
317 | if (err == 0) | 318 | if (err == 0) { |
319 | if (tp_created) { | ||
320 | spin_lock_bh(root_lock); | ||
321 | tp->next = *back; | ||
322 | *back = tp; | ||
323 | spin_unlock_bh(root_lock); | ||
324 | } | ||
318 | tfilter_notify(skb, n, tp, fh, RTM_NEWTFILTER); | 325 | tfilter_notify(skb, n, tp, fh, RTM_NEWTFILTER); |
326 | } else { | ||
327 | if (tp_created) | ||
328 | tcf_destroy(tp); | ||
329 | } | ||
319 | 330 | ||
320 | errout: | 331 | errout: |
321 | if (cl) | 332 | if (cl) |
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c index 1ab4542e61e0..0f815cc6a3db 100644 --- a/net/sched/cls_cgroup.c +++ b/net/sched/cls_cgroup.c | |||
@@ -98,8 +98,7 @@ static int cls_cgroup_classify(struct sk_buff *skb, struct tcf_proto *tp, | |||
98 | struct tcf_result *res) | 98 | struct tcf_result *res) |
99 | { | 99 | { |
100 | struct cls_cgroup_head *head = tp->root; | 100 | struct cls_cgroup_head *head = tp->root; |
101 | struct cgroup_cls_state *cs; | 101 | u32 classid; |
102 | int ret = 0; | ||
103 | 102 | ||
104 | /* | 103 | /* |
105 | * Due to the nature of the classifier it is required to ignore all | 104 | * Due to the nature of the classifier it is required to ignore all |
@@ -115,17 +114,18 @@ static int cls_cgroup_classify(struct sk_buff *skb, struct tcf_proto *tp, | |||
115 | return -1; | 114 | return -1; |
116 | 115 | ||
117 | rcu_read_lock(); | 116 | rcu_read_lock(); |
118 | cs = task_cls_state(current); | 117 | classid = task_cls_state(current)->classid; |
119 | if (cs->classid && tcf_em_tree_match(skb, &head->ematches, NULL)) { | ||
120 | res->classid = cs->classid; | ||
121 | res->class = 0; | ||
122 | ret = tcf_exts_exec(skb, &head->exts, res); | ||
123 | } else | ||
124 | ret = -1; | ||
125 | |||
126 | rcu_read_unlock(); | 118 | rcu_read_unlock(); |
127 | 119 | ||
128 | return ret; | 120 | if (!classid) |
121 | return -1; | ||
122 | |||
123 | if (!tcf_em_tree_match(skb, &head->ematches, NULL)) | ||
124 | return -1; | ||
125 | |||
126 | res->classid = classid; | ||
127 | res->class = 0; | ||
128 | return tcf_exts_exec(skb, &head->exts, res); | ||
129 | } | 129 | } |
130 | 130 | ||
131 | static unsigned long cls_cgroup_get(struct tcf_proto *tp, u32 handle) | 131 | static unsigned long cls_cgroup_get(struct tcf_proto *tp, u32 handle) |
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c index 0ef4e3065bcd..9402a7fd3785 100644 --- a/net/sched/cls_flow.c +++ b/net/sched/cls_flow.c | |||
@@ -84,7 +84,7 @@ static u32 flow_get_dst(const struct sk_buff *skb) | |||
84 | case htons(ETH_P_IPV6): | 84 | case htons(ETH_P_IPV6): |
85 | return ntohl(ipv6_hdr(skb)->daddr.s6_addr32[3]); | 85 | return ntohl(ipv6_hdr(skb)->daddr.s6_addr32[3]); |
86 | default: | 86 | default: |
87 | return addr_fold(skb->dst) ^ (__force u16)skb->protocol; | 87 | return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol; |
88 | } | 88 | } |
89 | } | 89 | } |
90 | 90 | ||
@@ -163,7 +163,7 @@ static u32 flow_get_proto_dst(const struct sk_buff *skb) | |||
163 | break; | 163 | break; |
164 | } | 164 | } |
165 | default: | 165 | default: |
166 | res = addr_fold(skb->dst) ^ (__force u16)skb->protocol; | 166 | res = addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol; |
167 | } | 167 | } |
168 | 168 | ||
169 | return res; | 169 | return res; |
@@ -251,8 +251,8 @@ fallback: | |||
251 | static u32 flow_get_rtclassid(const struct sk_buff *skb) | 251 | static u32 flow_get_rtclassid(const struct sk_buff *skb) |
252 | { | 252 | { |
253 | #ifdef CONFIG_NET_CLS_ROUTE | 253 | #ifdef CONFIG_NET_CLS_ROUTE |
254 | if (skb->dst) | 254 | if (skb_dst(skb)) |
255 | return skb->dst->tclassid; | 255 | return skb_dst(skb)->tclassid; |
256 | #endif | 256 | #endif |
257 | return 0; | 257 | return 0; |
258 | } | 258 | } |
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c index bdf1f4172eef..dd872d5383ef 100644 --- a/net/sched/cls_route.c +++ b/net/sched/cls_route.c | |||
@@ -137,7 +137,7 @@ static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp, | |||
137 | u32 id, h; | 137 | u32 id, h; |
138 | int iif, dont_cache = 0; | 138 | int iif, dont_cache = 0; |
139 | 139 | ||
140 | if ((dst = skb->dst) == NULL) | 140 | if ((dst = skb_dst(skb)) == NULL) |
141 | goto failure; | 141 | goto failure; |
142 | 142 | ||
143 | id = dst->tclassid; | 143 | id = dst->tclassid; |
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c index fad596bf32d7..266151ae85a3 100644 --- a/net/sched/em_meta.c +++ b/net/sched/em_meta.c | |||
@@ -246,11 +246,11 @@ META_COLLECTOR(int_tcindex) | |||
246 | 246 | ||
247 | META_COLLECTOR(int_rtclassid) | 247 | META_COLLECTOR(int_rtclassid) |
248 | { | 248 | { |
249 | if (unlikely(skb->dst == NULL)) | 249 | if (unlikely(skb_dst(skb) == NULL)) |
250 | *err = -1; | 250 | *err = -1; |
251 | else | 251 | else |
252 | #ifdef CONFIG_NET_CLS_ROUTE | 252 | #ifdef CONFIG_NET_CLS_ROUTE |
253 | dst->value = skb->dst->tclassid; | 253 | dst->value = skb_dst(skb)->tclassid; |
254 | #else | 254 | #else |
255 | dst->value = 0; | 255 | dst->value = 0; |
256 | #endif | 256 | #endif |
@@ -258,10 +258,10 @@ META_COLLECTOR(int_rtclassid) | |||
258 | 258 | ||
259 | META_COLLECTOR(int_rtiif) | 259 | META_COLLECTOR(int_rtiif) |
260 | { | 260 | { |
261 | if (unlikely(skb->rtable == NULL)) | 261 | if (unlikely(skb_rtable(skb) == NULL)) |
262 | *err = -1; | 262 | *err = -1; |
263 | else | 263 | else |
264 | dst->value = skb->rtable->fl.iif; | 264 | dst->value = skb_rtable(skb)->fl.iif; |
265 | } | 265 | } |
266 | 266 | ||
267 | /************************************************************************** | 267 | /************************************************************************** |
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 5022f9c1f34b..362c2811b2df 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c | |||
@@ -372,7 +372,7 @@ cftree_update(struct hfsc_class *cl) | |||
372 | * ism: (psched_us/byte) << ISM_SHIFT | 372 | * ism: (psched_us/byte) << ISM_SHIFT |
373 | * dx: psched_us | 373 | * dx: psched_us |
374 | * | 374 | * |
375 | * The clock source resolution with ktime is 1.024us. | 375 | * The clock source resolution with ktime and PSCHED_SHIFT 10 is 1.024us. |
376 | * | 376 | * |
377 | * sm and ism are scaled in order to keep effective digits. | 377 | * sm and ism are scaled in order to keep effective digits. |
378 | * SM_SHIFT and ISM_SHIFT are selected to keep at least 4 effective | 378 | * SM_SHIFT and ISM_SHIFT are selected to keep at least 4 effective |
@@ -383,9 +383,11 @@ cftree_update(struct hfsc_class *cl) | |||
383 | * bytes/1.024us 12.8e-3 128e-3 1280e-3 12800e-3 128000e-3 | 383 | * bytes/1.024us 12.8e-3 128e-3 1280e-3 12800e-3 128000e-3 |
384 | * | 384 | * |
385 | * 1.024us/byte 78.125 7.8125 0.78125 0.078125 0.0078125 | 385 | * 1.024us/byte 78.125 7.8125 0.78125 0.078125 0.0078125 |
386 | * | ||
387 | * So, for PSCHED_SHIFT 10 we need: SM_SHIFT 20, ISM_SHIFT 18. | ||
386 | */ | 388 | */ |
387 | #define SM_SHIFT 20 | 389 | #define SM_SHIFT (30 - PSCHED_SHIFT) |
388 | #define ISM_SHIFT 18 | 390 | #define ISM_SHIFT (8 + PSCHED_SHIFT) |
389 | 391 | ||
390 | #define SM_MASK ((1ULL << SM_SHIFT) - 1) | 392 | #define SM_MASK ((1ULL << SM_SHIFT) - 1) |
391 | #define ISM_MASK ((1ULL << ISM_SHIFT) - 1) | 393 | #define ISM_MASK ((1ULL << ISM_SHIFT) - 1) |
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 33133d27b539..8706920a6d45 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c | |||
@@ -149,7 +149,7 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb) | |||
149 | break; | 149 | break; |
150 | } | 150 | } |
151 | default: | 151 | default: |
152 | h = (unsigned long)skb->dst ^ skb->protocol; | 152 | h = (unsigned long)skb_dst(skb) ^ skb->protocol; |
153 | h2 = (unsigned long)skb->sk; | 153 | h2 = (unsigned long)skb->sk; |
154 | } | 154 | } |
155 | 155 | ||
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index a886496bdc3a..cb1cb1e76b9a 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c | |||
@@ -222,7 +222,7 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device * | |||
222 | { | 222 | { |
223 | struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, 0); | 223 | struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, 0); |
224 | struct teql_sched_data *q = qdisc_priv(dev_queue->qdisc); | 224 | struct teql_sched_data *q = qdisc_priv(dev_queue->qdisc); |
225 | struct neighbour *mn = skb->dst->neighbour; | 225 | struct neighbour *mn = skb_dst(skb)->neighbour; |
226 | struct neighbour *n = q->ncache; | 226 | struct neighbour *n = q->ncache; |
227 | 227 | ||
228 | if (mn->tbl == NULL) | 228 | if (mn->tbl == NULL) |
@@ -262,8 +262,8 @@ static inline int teql_resolve(struct sk_buff *skb, | |||
262 | return -ENODEV; | 262 | return -ENODEV; |
263 | 263 | ||
264 | if (dev->header_ops == NULL || | 264 | if (dev->header_ops == NULL || |
265 | skb->dst == NULL || | 265 | skb_dst(skb) == NULL || |
266 | skb->dst->neighbour == NULL) | 266 | skb_dst(skb)->neighbour == NULL) |
267 | return 0; | 267 | return 0; |
268 | return __teql_resolve(skb, skb_res, dev); | 268 | return __teql_resolve(skb, skb_res, dev); |
269 | } | 269 | } |
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index f4b23043b610..525864bf4f07 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
@@ -293,7 +293,8 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a | |||
293 | * told otherwise. | 293 | * told otherwise. |
294 | */ | 294 | */ |
295 | asoc->peer.ipv4_address = 1; | 295 | asoc->peer.ipv4_address = 1; |
296 | asoc->peer.ipv6_address = 1; | 296 | if (asoc->base.sk->sk_family == PF_INET6) |
297 | asoc->peer.ipv6_address = 1; | ||
297 | INIT_LIST_HEAD(&asoc->asocs); | 298 | INIT_LIST_HEAD(&asoc->asocs); |
298 | 299 | ||
299 | asoc->autoclose = sp->autoclose; | 300 | asoc->autoclose = sp->autoclose; |
@@ -566,6 +567,21 @@ void sctp_assoc_rm_peer(struct sctp_association *asoc, | |||
566 | if (asoc->init_last_sent_to == peer) | 567 | if (asoc->init_last_sent_to == peer) |
567 | asoc->init_last_sent_to = NULL; | 568 | asoc->init_last_sent_to = NULL; |
568 | 569 | ||
570 | /* If we remove the transport an SHUTDOWN was last sent to, set it | ||
571 | * to NULL. Combined with the update of the retran path above, this | ||
572 | * will cause the next SHUTDOWN to be sent to the next available | ||
573 | * transport, maintaining the cycle. | ||
574 | */ | ||
575 | if (asoc->shutdown_last_sent_to == peer) | ||
576 | asoc->shutdown_last_sent_to = NULL; | ||
577 | |||
578 | /* If we remove the transport an ASCONF was last sent to, set it to | ||
579 | * NULL. | ||
580 | */ | ||
581 | if (asoc->addip_last_asconf && | ||
582 | asoc->addip_last_asconf->transport == peer) | ||
583 | asoc->addip_last_asconf->transport = NULL; | ||
584 | |||
569 | asoc->peer.transport_count--; | 585 | asoc->peer.transport_count--; |
570 | 586 | ||
571 | sctp_transport_free(peer); | 587 | sctp_transport_free(peer); |
@@ -1268,49 +1284,21 @@ void sctp_assoc_update_retran_path(struct sctp_association *asoc) | |||
1268 | ntohs(t->ipaddr.v4.sin_port)); | 1284 | ntohs(t->ipaddr.v4.sin_port)); |
1269 | } | 1285 | } |
1270 | 1286 | ||
1271 | /* Choose the transport for sending a INIT packet. */ | 1287 | /* Choose the transport for sending retransmit packet. */ |
1272 | struct sctp_transport *sctp_assoc_choose_init_transport( | 1288 | struct sctp_transport *sctp_assoc_choose_alter_transport( |
1273 | struct sctp_association *asoc) | 1289 | struct sctp_association *asoc, struct sctp_transport *last_sent_to) |
1274 | { | ||
1275 | struct sctp_transport *t; | ||
1276 | |||
1277 | /* Use the retran path. If the last INIT was sent over the | ||
1278 | * retran path, update the retran path and use it. | ||
1279 | */ | ||
1280 | if (!asoc->init_last_sent_to) { | ||
1281 | t = asoc->peer.active_path; | ||
1282 | } else { | ||
1283 | if (asoc->init_last_sent_to == asoc->peer.retran_path) | ||
1284 | sctp_assoc_update_retran_path(asoc); | ||
1285 | t = asoc->peer.retran_path; | ||
1286 | } | ||
1287 | |||
1288 | SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_update_retran_path:association" | ||
1289 | " %p addr: ", | ||
1290 | " port: %d\n", | ||
1291 | asoc, | ||
1292 | (&t->ipaddr), | ||
1293 | ntohs(t->ipaddr.v4.sin_port)); | ||
1294 | |||
1295 | return t; | ||
1296 | } | ||
1297 | |||
1298 | /* Choose the transport for sending a SHUTDOWN packet. */ | ||
1299 | struct sctp_transport *sctp_assoc_choose_shutdown_transport( | ||
1300 | struct sctp_association *asoc) | ||
1301 | { | 1290 | { |
1302 | /* If this is the first time SHUTDOWN is sent, use the active path, | 1291 | /* If this is the first time packet is sent, use the active path, |
1303 | * else use the retran path. If the last SHUTDOWN was sent over the | 1292 | * else use the retran path. If the last packet was sent over the |
1304 | * retran path, update the retran path and use it. | 1293 | * retran path, update the retran path and use it. |
1305 | */ | 1294 | */ |
1306 | if (!asoc->shutdown_last_sent_to) | 1295 | if (!last_sent_to) |
1307 | return asoc->peer.active_path; | 1296 | return asoc->peer.active_path; |
1308 | else { | 1297 | else { |
1309 | if (asoc->shutdown_last_sent_to == asoc->peer.retran_path) | 1298 | if (last_sent_to == asoc->peer.retran_path) |
1310 | sctp_assoc_update_retran_path(asoc); | 1299 | sctp_assoc_update_retran_path(asoc); |
1311 | return asoc->peer.retran_path; | 1300 | return asoc->peer.retran_path; |
1312 | } | 1301 | } |
1313 | |||
1314 | } | 1302 | } |
1315 | 1303 | ||
1316 | /* Update the association's pmtu and frag_point by going through all the | 1304 | /* Update the association's pmtu and frag_point by going through all the |
@@ -1482,6 +1470,10 @@ int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp) | |||
1482 | { | 1470 | { |
1483 | int assoc_id; | 1471 | int assoc_id; |
1484 | int error = 0; | 1472 | int error = 0; |
1473 | |||
1474 | /* If the id is already assigned, keep it. */ | ||
1475 | if (asoc->assoc_id) | ||
1476 | return error; | ||
1485 | retry: | 1477 | retry: |
1486 | if (unlikely(!idr_pre_get(&sctp_assocs_id, gfp))) | 1478 | if (unlikely(!idr_pre_get(&sctp_assocs_id, gfp))) |
1487 | return -ENOMEM; | 1479 | return -ENOMEM; |
diff --git a/net/sctp/input.c b/net/sctp/input.c index d2e98803ffe3..c0c973e67add 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c | |||
@@ -81,13 +81,13 @@ static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb); | |||
81 | /* Calculate the SCTP checksum of an SCTP packet. */ | 81 | /* Calculate the SCTP checksum of an SCTP packet. */ |
82 | static inline int sctp_rcv_checksum(struct sk_buff *skb) | 82 | static inline int sctp_rcv_checksum(struct sk_buff *skb) |
83 | { | 83 | { |
84 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | ||
85 | struct sctphdr *sh = sctp_hdr(skb); | 84 | struct sctphdr *sh = sctp_hdr(skb); |
86 | __le32 cmp = sh->checksum; | 85 | __le32 cmp = sh->checksum; |
86 | struct sk_buff *list; | ||
87 | __le32 val; | 87 | __le32 val; |
88 | __u32 tmp = sctp_start_cksum((__u8 *)sh, skb_headlen(skb)); | 88 | __u32 tmp = sctp_start_cksum((__u8 *)sh, skb_headlen(skb)); |
89 | 89 | ||
90 | for (; list; list = list->next) | 90 | skb_walk_frags(skb, list) |
91 | tmp = sctp_update_cksum((__u8 *)list->data, skb_headlen(list), | 91 | tmp = sctp_update_cksum((__u8 *)list->data, skb_headlen(list), |
92 | tmp); | 92 | tmp); |
93 | 93 | ||
diff --git a/net/sctp/output.c b/net/sctp/output.c index f0c91df59d4e..b76411444515 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c | |||
@@ -405,10 +405,10 @@ int sctp_packet_transmit(struct sctp_packet *packet) | |||
405 | sctp_assoc_sync_pmtu(asoc); | 405 | sctp_assoc_sync_pmtu(asoc); |
406 | } | 406 | } |
407 | } | 407 | } |
408 | nskb->dst = dst_clone(tp->dst); | 408 | dst = dst_clone(tp->dst); |
409 | if (!nskb->dst) | 409 | skb_dst_set(nskb, dst); |
410 | if (dst) | ||
410 | goto no_route; | 411 | goto no_route; |
411 | dst = nskb->dst; | ||
412 | 412 | ||
413 | /* Build the SCTP header. */ | 413 | /* Build the SCTP header. */ |
414 | sh = (struct sctphdr *)skb_push(nskb, sizeof(struct sctphdr)); | 414 | sh = (struct sctphdr *)skb_push(nskb, sizeof(struct sctphdr)); |
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 8eb3e61cb701..79cbd47f4df7 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c | |||
@@ -393,7 +393,7 @@ static int sctp_v4_addr_valid(union sctp_addr *addr, | |||
393 | return 0; | 393 | return 0; |
394 | 394 | ||
395 | /* Is this a broadcast address? */ | 395 | /* Is this a broadcast address? */ |
396 | if (skb && skb->rtable->rt_flags & RTCF_BROADCAST) | 396 | if (skb && skb_rtable(skb)->rt_flags & RTCF_BROADCAST) |
397 | return 0; | 397 | return 0; |
398 | 398 | ||
399 | return 1; | 399 | return 1; |
@@ -572,7 +572,7 @@ static void sctp_v4_get_saddr(struct sctp_sock *sk, | |||
572 | /* What interface did this skb arrive on? */ | 572 | /* What interface did this skb arrive on? */ |
573 | static int sctp_v4_skb_iif(const struct sk_buff *skb) | 573 | static int sctp_v4_skb_iif(const struct sk_buff *skb) |
574 | { | 574 | { |
575 | return skb->rtable->rt_iif; | 575 | return skb_rtable(skb)->rt_iif; |
576 | } | 576 | } |
577 | 577 | ||
578 | /* Was this packet marked by Explicit Congestion Notification? */ | 578 | /* Was this packet marked by Explicit Congestion Notification? */ |
@@ -848,8 +848,8 @@ static inline int sctp_v4_xmit(struct sk_buff *skb, | |||
848 | 848 | ||
849 | SCTP_DEBUG_PRINTK("%s: skb:%p, len:%d, src:%pI4, dst:%pI4\n", | 849 | SCTP_DEBUG_PRINTK("%s: skb:%p, len:%d, src:%pI4, dst:%pI4\n", |
850 | __func__, skb, skb->len, | 850 | __func__, skb, skb->len, |
851 | &skb->rtable->rt_src, | 851 | &skb_rtable(skb)->rt_src, |
852 | &skb->rtable->rt_dst); | 852 | &skb_rtable(skb)->rt_dst); |
853 | 853 | ||
854 | inet->pmtudisc = transport->param_flags & SPP_PMTUD_ENABLE ? | 854 | inet->pmtudisc = transport->param_flags & SPP_PMTUD_ENABLE ? |
855 | IP_PMTUDISC_DO : IP_PMTUDISC_DONT; | 855 | IP_PMTUDISC_DO : IP_PMTUDISC_DONT; |
@@ -1370,6 +1370,8 @@ SCTP_STATIC __exit void sctp_exit(void) | |||
1370 | sctp_proc_exit(); | 1370 | sctp_proc_exit(); |
1371 | cleanup_sctp_mibs(); | 1371 | cleanup_sctp_mibs(); |
1372 | 1372 | ||
1373 | rcu_barrier(); /* Wait for completion of call_rcu()'s */ | ||
1374 | |||
1373 | kmem_cache_destroy(sctp_chunk_cachep); | 1375 | kmem_cache_destroy(sctp_chunk_cachep); |
1374 | kmem_cache_destroy(sctp_bucket_cachep); | 1376 | kmem_cache_destroy(sctp_bucket_cachep); |
1375 | } | 1377 | } |
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 6851ee94e974..61cc6075b0df 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c | |||
@@ -2864,19 +2864,19 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc, | |||
2864 | switch (addr_param->v4.param_hdr.type) { | 2864 | switch (addr_param->v4.param_hdr.type) { |
2865 | case SCTP_PARAM_IPV6_ADDRESS: | 2865 | case SCTP_PARAM_IPV6_ADDRESS: |
2866 | if (!asoc->peer.ipv6_address) | 2866 | if (!asoc->peer.ipv6_address) |
2867 | return SCTP_ERROR_INV_PARAM; | 2867 | return SCTP_ERROR_DNS_FAILED; |
2868 | break; | 2868 | break; |
2869 | case SCTP_PARAM_IPV4_ADDRESS: | 2869 | case SCTP_PARAM_IPV4_ADDRESS: |
2870 | if (!asoc->peer.ipv4_address) | 2870 | if (!asoc->peer.ipv4_address) |
2871 | return SCTP_ERROR_INV_PARAM; | 2871 | return SCTP_ERROR_DNS_FAILED; |
2872 | break; | 2872 | break; |
2873 | default: | 2873 | default: |
2874 | return SCTP_ERROR_INV_PARAM; | 2874 | return SCTP_ERROR_DNS_FAILED; |
2875 | } | 2875 | } |
2876 | 2876 | ||
2877 | af = sctp_get_af_specific(param_type2af(addr_param->v4.param_hdr.type)); | 2877 | af = sctp_get_af_specific(param_type2af(addr_param->v4.param_hdr.type)); |
2878 | if (unlikely(!af)) | 2878 | if (unlikely(!af)) |
2879 | return SCTP_ERROR_INV_PARAM; | 2879 | return SCTP_ERROR_DNS_FAILED; |
2880 | 2880 | ||
2881 | af->from_addr_param(&addr, addr_param, htons(asoc->peer.port), 0); | 2881 | af->from_addr_param(&addr, addr_param, htons(asoc->peer.port), 0); |
2882 | 2882 | ||
@@ -2886,7 +2886,7 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc, | |||
2886 | * make sure we check for that) | 2886 | * make sure we check for that) |
2887 | */ | 2887 | */ |
2888 | if (!af->is_any(&addr) && !af->addr_valid(&addr, NULL, asconf->skb)) | 2888 | if (!af->is_any(&addr) && !af->addr_valid(&addr, NULL, asconf->skb)) |
2889 | return SCTP_ERROR_INV_PARAM; | 2889 | return SCTP_ERROR_DNS_FAILED; |
2890 | 2890 | ||
2891 | switch (asconf_param->param_hdr.type) { | 2891 | switch (asconf_param->param_hdr.type) { |
2892 | case SCTP_PARAM_ADD_IP: | 2892 | case SCTP_PARAM_ADD_IP: |
@@ -2954,12 +2954,12 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc, | |||
2954 | 2954 | ||
2955 | peer = sctp_assoc_lookup_paddr(asoc, &addr); | 2955 | peer = sctp_assoc_lookup_paddr(asoc, &addr); |
2956 | if (!peer) | 2956 | if (!peer) |
2957 | return SCTP_ERROR_INV_PARAM; | 2957 | return SCTP_ERROR_DNS_FAILED; |
2958 | 2958 | ||
2959 | sctp_assoc_set_primary(asoc, peer); | 2959 | sctp_assoc_set_primary(asoc, peer); |
2960 | break; | 2960 | break; |
2961 | default: | 2961 | default: |
2962 | return SCTP_ERROR_INV_PARAM; | 2962 | return SCTP_ERROR_UNKNOWN_PARAM; |
2963 | break; | 2963 | break; |
2964 | } | 2964 | } |
2965 | 2965 | ||
@@ -3273,7 +3273,7 @@ int sctp_process_asconf_ack(struct sctp_association *asoc, | |||
3273 | retval = 1; | 3273 | retval = 1; |
3274 | break; | 3274 | break; |
3275 | 3275 | ||
3276 | case SCTP_ERROR_INV_PARAM: | 3276 | case SCTP_ERROR_UNKNOWN_PARAM: |
3277 | /* Disable sending this type of asconf parameter in | 3277 | /* Disable sending this type of asconf parameter in |
3278 | * future. | 3278 | * future. |
3279 | */ | 3279 | */ |
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index e2020eb2c8ca..86426aac1600 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c | |||
@@ -686,7 +686,8 @@ static void sctp_cmd_setup_t2(sctp_cmd_seq_t *cmds, | |||
686 | { | 686 | { |
687 | struct sctp_transport *t; | 687 | struct sctp_transport *t; |
688 | 688 | ||
689 | t = sctp_assoc_choose_shutdown_transport(asoc); | 689 | t = sctp_assoc_choose_alter_transport(asoc, |
690 | asoc->shutdown_last_sent_to); | ||
690 | asoc->shutdown_last_sent_to = t; | 691 | asoc->shutdown_last_sent_to = t; |
691 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = t->rto; | 692 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = t->rto; |
692 | chunk->transport = t; | 693 | chunk->transport = t; |
@@ -777,7 +778,7 @@ static void sctp_cmd_setup_t4(sctp_cmd_seq_t *cmds, | |||
777 | { | 778 | { |
778 | struct sctp_transport *t; | 779 | struct sctp_transport *t; |
779 | 780 | ||
780 | t = asoc->peer.active_path; | 781 | t = sctp_assoc_choose_alter_transport(asoc, chunk->transport); |
781 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = t->rto; | 782 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = t->rto; |
782 | chunk->transport = t; | 783 | chunk->transport = t; |
783 | } | 784 | } |
@@ -1379,7 +1380,8 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, | |||
1379 | 1380 | ||
1380 | case SCTP_CMD_INIT_CHOOSE_TRANSPORT: | 1381 | case SCTP_CMD_INIT_CHOOSE_TRANSPORT: |
1381 | chunk = cmd->obj.ptr; | 1382 | chunk = cmd->obj.ptr; |
1382 | t = sctp_assoc_choose_init_transport(asoc); | 1383 | t = sctp_assoc_choose_alter_transport(asoc, |
1384 | asoc->init_last_sent_to); | ||
1383 | asoc->init_last_sent_to = t; | 1385 | asoc->init_last_sent_to = t; |
1384 | chunk->transport = t; | 1386 | chunk->transport = t; |
1385 | t->init_sent_count++; | 1387 | t->init_sent_count++; |
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 55a61aa69662..7288192f7df5 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
@@ -5432,9 +5432,13 @@ sctp_disposition_t sctp_sf_t2_timer_expire(const struct sctp_endpoint *ep, | |||
5432 | if (!reply) | 5432 | if (!reply) |
5433 | goto nomem; | 5433 | goto nomem; |
5434 | 5434 | ||
5435 | /* Do some failure management (Section 8.2). */ | 5435 | /* Do some failure management (Section 8.2). |
5436 | sctp_add_cmd_sf(commands, SCTP_CMD_STRIKE, | 5436 | * If we remove the transport an SHUTDOWN was last sent to, don't |
5437 | SCTP_TRANSPORT(asoc->shutdown_last_sent_to)); | 5437 | * do failure management. |
5438 | */ | ||
5439 | if (asoc->shutdown_last_sent_to) | ||
5440 | sctp_add_cmd_sf(commands, SCTP_CMD_STRIKE, | ||
5441 | SCTP_TRANSPORT(asoc->shutdown_last_sent_to)); | ||
5438 | 5442 | ||
5439 | /* Set the transport for the SHUTDOWN/ACK chunk and the timeout for | 5443 | /* Set the transport for the SHUTDOWN/ACK chunk and the timeout for |
5440 | * the T2-shutdown timer. | 5444 | * the T2-shutdown timer. |
@@ -5471,7 +5475,9 @@ sctp_disposition_t sctp_sf_t4_timer_expire( | |||
5471 | * detection on the appropriate destination address as defined in | 5475 | * detection on the appropriate destination address as defined in |
5472 | * RFC2960 [5] section 8.1 and 8.2. | 5476 | * RFC2960 [5] section 8.1 and 8.2. |
5473 | */ | 5477 | */ |
5474 | sctp_add_cmd_sf(commands, SCTP_CMD_STRIKE, SCTP_TRANSPORT(transport)); | 5478 | if (transport) |
5479 | sctp_add_cmd_sf(commands, SCTP_CMD_STRIKE, | ||
5480 | SCTP_TRANSPORT(transport)); | ||
5475 | 5481 | ||
5476 | /* Reconfig T4 timer and transport. */ | 5482 | /* Reconfig T4 timer and transport. */ |
5477 | sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T4, SCTP_CHUNK(chunk)); | 5483 | sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T4, SCTP_CHUNK(chunk)); |
diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c index 5c8186d88c61..6d9b3aafcc5d 100644 --- a/net/sctp/sm_statetable.c +++ b/net/sctp/sm_statetable.c | |||
@@ -698,7 +698,7 @@ chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = { | |||
698 | TYPE_SCTP_FUNC(sctp_sf_do_prm_asconf), \ | 698 | TYPE_SCTP_FUNC(sctp_sf_do_prm_asconf), \ |
699 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | 699 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ |
700 | TYPE_SCTP_FUNC(sctp_sf_error_shutdown), \ | 700 | TYPE_SCTP_FUNC(sctp_sf_error_shutdown), \ |
701 | } /* TYPE_SCTP_PRIMITIVE_REQUESTHEARTBEAT */ | 701 | } /* TYPE_SCTP_PRIMITIVE_ASCONF */ |
702 | 702 | ||
703 | /* The primary index for this table is the primitive type. | 703 | /* The primary index for this table is the primitive type. |
704 | * The secondary index for this table is the state. | 704 | * The secondary index for this table is the state. |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 5fb3a8c9792e..0f01e5d8a24f 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -1100,6 +1100,15 @@ static int __sctp_connect(struct sock* sk, | |||
1100 | goto out_free; | 1100 | goto out_free; |
1101 | } | 1101 | } |
1102 | 1102 | ||
1103 | /* In case the user of sctp_connectx() wants an association | ||
1104 | * id back, assign one now. | ||
1105 | */ | ||
1106 | if (assoc_id) { | ||
1107 | err = sctp_assoc_set_id(asoc, GFP_KERNEL); | ||
1108 | if (err < 0) | ||
1109 | goto out_free; | ||
1110 | } | ||
1111 | |||
1103 | err = sctp_primitive_ASSOCIATE(asoc, NULL); | 1112 | err = sctp_primitive_ASSOCIATE(asoc, NULL); |
1104 | if (err < 0) { | 1113 | if (err < 0) { |
1105 | goto out_free; | 1114 | goto out_free; |
@@ -1120,7 +1129,7 @@ static int __sctp_connect(struct sock* sk, | |||
1120 | timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK); | 1129 | timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK); |
1121 | 1130 | ||
1122 | err = sctp_wait_for_connect(asoc, &timeo); | 1131 | err = sctp_wait_for_connect(asoc, &timeo); |
1123 | if (!err && assoc_id) | 1132 | if ((err == 0 || err == -EINPROGRESS) && assoc_id) |
1124 | *assoc_id = asoc->assoc_id; | 1133 | *assoc_id = asoc->assoc_id; |
1125 | 1134 | ||
1126 | /* Don't free association on exit. */ | 1135 | /* Don't free association on exit. */ |
@@ -1264,6 +1273,34 @@ SCTP_STATIC int sctp_setsockopt_connectx(struct sock* sk, | |||
1264 | return assoc_id; | 1273 | return assoc_id; |
1265 | } | 1274 | } |
1266 | 1275 | ||
1276 | /* | ||
1277 | * New (hopefully final) interface for the API. The option buffer is used | ||
1278 | * both for the returned association id and the addresses. | ||
1279 | */ | ||
1280 | SCTP_STATIC int sctp_getsockopt_connectx3(struct sock* sk, int len, | ||
1281 | char __user *optval, | ||
1282 | int __user *optlen) | ||
1283 | { | ||
1284 | sctp_assoc_t assoc_id = 0; | ||
1285 | int err = 0; | ||
1286 | |||
1287 | if (len < sizeof(assoc_id)) | ||
1288 | return -EINVAL; | ||
1289 | |||
1290 | err = __sctp_setsockopt_connectx(sk, | ||
1291 | (struct sockaddr __user *)(optval + sizeof(assoc_id)), | ||
1292 | len - sizeof(assoc_id), &assoc_id); | ||
1293 | |||
1294 | if (err == 0 || err == -EINPROGRESS) { | ||
1295 | if (copy_to_user(optval, &assoc_id, sizeof(assoc_id))) | ||
1296 | return -EFAULT; | ||
1297 | if (put_user(sizeof(assoc_id), optlen)) | ||
1298 | return -EFAULT; | ||
1299 | } | ||
1300 | |||
1301 | return err; | ||
1302 | } | ||
1303 | |||
1267 | /* API 3.1.4 close() - UDP Style Syntax | 1304 | /* API 3.1.4 close() - UDP Style Syntax |
1268 | * Applications use close() to perform graceful shutdown (as described in | 1305 | * Applications use close() to perform graceful shutdown (as described in |
1269 | * Section 10.1 of [SCTP]) on ALL the associations currently represented | 1306 | * Section 10.1 of [SCTP]) on ALL the associations currently represented |
@@ -1844,7 +1881,7 @@ static int sctp_skb_pull(struct sk_buff *skb, int len) | |||
1844 | len -= skb_len; | 1881 | len -= skb_len; |
1845 | __skb_pull(skb, skb_len); | 1882 | __skb_pull(skb, skb_len); |
1846 | 1883 | ||
1847 | for (list = skb_shinfo(skb)->frag_list; list; list = list->next) { | 1884 | skb_walk_frags(skb, list) { |
1848 | rlen = sctp_skb_pull(list, len); | 1885 | rlen = sctp_skb_pull(list, len); |
1849 | skb->len -= (len-rlen); | 1886 | skb->len -= (len-rlen); |
1850 | skb->data_len -= (len-rlen); | 1887 | skb->data_len -= (len-rlen); |
@@ -5578,6 +5615,9 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname, | |||
5578 | retval = sctp_getsockopt_local_addrs(sk, len, optval, | 5615 | retval = sctp_getsockopt_local_addrs(sk, len, optval, |
5579 | optlen); | 5616 | optlen); |
5580 | break; | 5617 | break; |
5618 | case SCTP_SOCKOPT_CONNECTX3: | ||
5619 | retval = sctp_getsockopt_connectx3(sk, len, optval, optlen); | ||
5620 | break; | ||
5581 | case SCTP_DEFAULT_SEND_PARAM: | 5621 | case SCTP_DEFAULT_SEND_PARAM: |
5582 | retval = sctp_getsockopt_default_send_param(sk, len, | 5622 | retval = sctp_getsockopt_default_send_param(sk, len, |
5583 | optval, optlen); | 5623 | optval, optlen); |
@@ -6620,7 +6660,7 @@ static void sctp_sock_rfree_frag(struct sk_buff *skb) | |||
6620 | goto done; | 6660 | goto done; |
6621 | 6661 | ||
6622 | /* Don't forget the fragments. */ | 6662 | /* Don't forget the fragments. */ |
6623 | for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) | 6663 | skb_walk_frags(skb, frag) |
6624 | sctp_sock_rfree_frag(frag); | 6664 | sctp_sock_rfree_frag(frag); |
6625 | 6665 | ||
6626 | done: | 6666 | done: |
@@ -6635,7 +6675,7 @@ static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk) | |||
6635 | goto done; | 6675 | goto done; |
6636 | 6676 | ||
6637 | /* Don't forget the fragments. */ | 6677 | /* Don't forget the fragments. */ |
6638 | for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) | 6678 | skb_walk_frags(skb, frag) |
6639 | sctp_skb_set_owner_r_frag(frag, sk); | 6679 | sctp_skb_set_owner_r_frag(frag, sk); |
6640 | 6680 | ||
6641 | done: | 6681 | done: |
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c index f58e994e6852..63eabbc71298 100644 --- a/net/sctp/sysctl.c +++ b/net/sctp/sysctl.c | |||
@@ -49,8 +49,8 @@ static int zero = 0; | |||
49 | static int one = 1; | 49 | static int one = 1; |
50 | static int timer_max = 86400000; /* ms in one day */ | 50 | static int timer_max = 86400000; /* ms in one day */ |
51 | static int int_max = INT_MAX; | 51 | static int int_max = INT_MAX; |
52 | static long sack_timer_min = 1; | 52 | static int sack_timer_min = 1; |
53 | static long sack_timer_max = 500; | 53 | static int sack_timer_max = 500; |
54 | 54 | ||
55 | extern int sysctl_sctp_mem[3]; | 55 | extern int sysctl_sctp_mem[3]; |
56 | extern int sysctl_sctp_rmem[3]; | 56 | extern int sysctl_sctp_rmem[3]; |
@@ -223,7 +223,7 @@ static ctl_table sctp_table[] = { | |||
223 | .ctl_name = NET_SCTP_SACK_TIMEOUT, | 223 | .ctl_name = NET_SCTP_SACK_TIMEOUT, |
224 | .procname = "sack_timeout", | 224 | .procname = "sack_timeout", |
225 | .data = &sctp_sack_timeout, | 225 | .data = &sctp_sack_timeout, |
226 | .maxlen = sizeof(long), | 226 | .maxlen = sizeof(int), |
227 | .mode = 0644, | 227 | .mode = 0644, |
228 | .proc_handler = proc_dointvec_minmax, | 228 | .proc_handler = proc_dointvec_minmax, |
229 | .strategy = sysctl_intvec, | 229 | .strategy = sysctl_intvec, |
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c index 5f186ca550d7..8b3560fd876d 100644 --- a/net/sctp/ulpevent.c +++ b/net/sctp/ulpevent.c | |||
@@ -976,9 +976,8 @@ static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event, | |||
976 | * In general, the skb passed from IP can have only 1 level of | 976 | * In general, the skb passed from IP can have only 1 level of |
977 | * fragments. But we allow multiple levels of fragments. | 977 | * fragments. But we allow multiple levels of fragments. |
978 | */ | 978 | */ |
979 | for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) { | 979 | skb_walk_frags(skb, frag) |
980 | sctp_ulpevent_receive_data(sctp_skb2event(frag), asoc); | 980 | sctp_ulpevent_receive_data(sctp_skb2event(frag), asoc); |
981 | } | ||
982 | } | 981 | } |
983 | 982 | ||
984 | /* Do accounting for bytes just read by user and release the references to | 983 | /* Do accounting for bytes just read by user and release the references to |
@@ -1003,7 +1002,7 @@ static void sctp_ulpevent_release_data(struct sctp_ulpevent *event) | |||
1003 | goto done; | 1002 | goto done; |
1004 | 1003 | ||
1005 | /* Don't forget the fragments. */ | 1004 | /* Don't forget the fragments. */ |
1006 | for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) { | 1005 | skb_walk_frags(skb, frag) { |
1007 | /* NOTE: skb_shinfos are recursive. Although IP returns | 1006 | /* NOTE: skb_shinfos are recursive. Although IP returns |
1008 | * skb's with only 1 level of fragments, SCTP reassembly can | 1007 | * skb's with only 1 level of fragments, SCTP reassembly can |
1009 | * increase the levels. | 1008 | * increase the levels. |
@@ -1026,7 +1025,7 @@ static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event) | |||
1026 | goto done; | 1025 | goto done; |
1027 | 1026 | ||
1028 | /* Don't forget the fragments. */ | 1027 | /* Don't forget the fragments. */ |
1029 | for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) { | 1028 | skb_walk_frags(skb, frag) { |
1030 | /* NOTE: skb_shinfos are recursive. Although IP returns | 1029 | /* NOTE: skb_shinfos are recursive. Although IP returns |
1031 | * skb's with only 1 level of fragments, SCTP reassembly can | 1030 | * skb's with only 1 level of fragments, SCTP reassembly can |
1032 | * increase the levels. | 1031 | * increase the levels. |
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index e630b38a6047..66d458fc6920 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c | |||
@@ -1548,6 +1548,7 @@ static void __exit exit_rpcsec_gss(void) | |||
1548 | { | 1548 | { |
1549 | gss_svc_shutdown(); | 1549 | gss_svc_shutdown(); |
1550 | rpcauth_unregister(&authgss_ops); | 1550 | rpcauth_unregister(&authgss_ops); |
1551 | rcu_barrier(); /* Wait for completion of call_rcu()'s */ | ||
1551 | } | 1552 | } |
1552 | 1553 | ||
1553 | MODULE_LICENSE("GPL"); | 1554 | MODULE_LICENSE("GPL"); |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index e18596146013..6c2d61586551 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -918,7 +918,7 @@ static void xs_udp_data_ready(struct sock *sk, int len) | |||
918 | UDPX_INC_STATS_BH(sk, UDP_MIB_INDATAGRAMS); | 918 | UDPX_INC_STATS_BH(sk, UDP_MIB_INDATAGRAMS); |
919 | 919 | ||
920 | /* Something worked... */ | 920 | /* Something worked... */ |
921 | dst_confirm(skb->dst); | 921 | dst_confirm(skb_dst(skb)); |
922 | 922 | ||
923 | xprt_adjust_cwnd(task, copied); | 923 | xprt_adjust_cwnd(task, copied); |
924 | xprt_update_rtt(task); | 924 | xprt_update_rtt(task); |
diff --git a/net/wimax/Kconfig b/net/wimax/Kconfig index 1b46747a5f5a..e4d97ab476d5 100644 --- a/net/wimax/Kconfig +++ b/net/wimax/Kconfig | |||
@@ -1,23 +1,10 @@ | |||
1 | # | 1 | # |
2 | # WiMAX LAN device configuration | 2 | # WiMAX LAN device configuration |
3 | # | 3 | # |
4 | # Note the ugly 'depends on' on WIMAX: that disallows RFKILL to be a | ||
5 | # module if WIMAX is to be linked in. The WiMAX code is done in such a | ||
6 | # way that it doesn't require and explicit dependency on RFKILL in | ||
7 | # case an embedded system wants to rip it out. | ||
8 | # | ||
9 | # As well, enablement of the RFKILL code means we need the INPUT layer | ||
10 | # support to inject events coming from hw rfkill switches. That | ||
11 | # dependency could be killed if input.h provided appropriate means to | ||
12 | # work when input is disabled. | ||
13 | |||
14 | comment "WiMAX Wireless Broadband support requires CONFIG_INPUT enabled" | ||
15 | depends on INPUT = n && RFKILL != n | ||
16 | 4 | ||
17 | menuconfig WIMAX | 5 | menuconfig WIMAX |
18 | tristate "WiMAX Wireless Broadband support" | 6 | tristate "WiMAX Wireless Broadband support" |
19 | depends on (y && RFKILL != m) || m | 7 | depends on RFKILL || !RFKILL |
20 | depends on (INPUT && RFKILL != n) || RFKILL = n | ||
21 | help | 8 | help |
22 | 9 | ||
23 | Select to configure support for devices that provide | 10 | Select to configure support for devices that provide |
diff --git a/net/wimax/Makefile b/net/wimax/Makefile index 5b80b941c2c9..8f1510d0cc2b 100644 --- a/net/wimax/Makefile +++ b/net/wimax/Makefile | |||
@@ -6,6 +6,7 @@ wimax-y := \ | |||
6 | op-msg.o \ | 6 | op-msg.o \ |
7 | op-reset.o \ | 7 | op-reset.o \ |
8 | op-rfkill.o \ | 8 | op-rfkill.o \ |
9 | op-state-get.o \ | ||
9 | stack.o | 10 | stack.o |
10 | 11 | ||
11 | wimax-$(CONFIG_DEBUG_FS) += debugfs.o | 12 | wimax-$(CONFIG_DEBUG_FS) += debugfs.o |
diff --git a/net/wimax/debug-levels.h b/net/wimax/debug-levels.h index 1c29123a3aa9..0975adba6b71 100644 --- a/net/wimax/debug-levels.h +++ b/net/wimax/debug-levels.h | |||
@@ -36,6 +36,7 @@ enum d_module { | |||
36 | D_SUBMODULE_DECLARE(op_msg), | 36 | D_SUBMODULE_DECLARE(op_msg), |
37 | D_SUBMODULE_DECLARE(op_reset), | 37 | D_SUBMODULE_DECLARE(op_reset), |
38 | D_SUBMODULE_DECLARE(op_rfkill), | 38 | D_SUBMODULE_DECLARE(op_rfkill), |
39 | D_SUBMODULE_DECLARE(op_state_get), | ||
39 | D_SUBMODULE_DECLARE(stack), | 40 | D_SUBMODULE_DECLARE(stack), |
40 | }; | 41 | }; |
41 | 42 | ||
diff --git a/net/wimax/debugfs.c b/net/wimax/debugfs.c index 94d216a46407..6c9bedb7431e 100644 --- a/net/wimax/debugfs.c +++ b/net/wimax/debugfs.c | |||
@@ -61,6 +61,7 @@ int wimax_debugfs_add(struct wimax_dev *wimax_dev) | |||
61 | __debugfs_register("wimax_dl_", op_msg, dentry); | 61 | __debugfs_register("wimax_dl_", op_msg, dentry); |
62 | __debugfs_register("wimax_dl_", op_reset, dentry); | 62 | __debugfs_register("wimax_dl_", op_reset, dentry); |
63 | __debugfs_register("wimax_dl_", op_rfkill, dentry); | 63 | __debugfs_register("wimax_dl_", op_rfkill, dentry); |
64 | __debugfs_register("wimax_dl_", op_state_get, dentry); | ||
64 | __debugfs_register("wimax_dl_", stack, dentry); | 65 | __debugfs_register("wimax_dl_", stack, dentry); |
65 | result = 0; | 66 | result = 0; |
66 | out: | 67 | out: |
diff --git a/net/wimax/op-msg.c b/net/wimax/op-msg.c index 9ad4d893a566..d631a17186bc 100644 --- a/net/wimax/op-msg.c +++ b/net/wimax/op-msg.c | |||
@@ -108,6 +108,12 @@ | |||
108 | * Don't use skb_push()/skb_pull()/skb_reserve() on the skb, as | 108 | * Don't use skb_push()/skb_pull()/skb_reserve() on the skb, as |
109 | * wimax_msg_send() depends on skb->data being placed at the | 109 | * wimax_msg_send() depends on skb->data being placed at the |
110 | * beginning of the user message. | 110 | * beginning of the user message. |
111 | * | ||
112 | * Unlike other WiMAX stack calls, this call can be used way early, | ||
113 | * even before wimax_dev_add() is called, as long as the | ||
114 | * wimax_dev->net_dev pointer is set to point to a proper | ||
115 | * net_dev. This is so that drivers can use it early in case they need | ||
116 | * to send stuff around or communicate with user space. | ||
111 | */ | 117 | */ |
112 | struct sk_buff *wimax_msg_alloc(struct wimax_dev *wimax_dev, | 118 | struct sk_buff *wimax_msg_alloc(struct wimax_dev *wimax_dev, |
113 | const char *pipe_name, | 119 | const char *pipe_name, |
@@ -115,7 +121,7 @@ struct sk_buff *wimax_msg_alloc(struct wimax_dev *wimax_dev, | |||
115 | gfp_t gfp_flags) | 121 | gfp_t gfp_flags) |
116 | { | 122 | { |
117 | int result; | 123 | int result; |
118 | struct device *dev = wimax_dev->net_dev->dev.parent; | 124 | struct device *dev = wimax_dev_to_dev(wimax_dev); |
119 | size_t msg_size; | 125 | size_t msg_size; |
120 | void *genl_msg; | 126 | void *genl_msg; |
121 | struct sk_buff *skb; | 127 | struct sk_buff *skb; |
@@ -161,7 +167,6 @@ error_genlmsg_put: | |||
161 | error_new: | 167 | error_new: |
162 | nlmsg_free(skb); | 168 | nlmsg_free(skb); |
163 | return ERR_PTR(result); | 169 | return ERR_PTR(result); |
164 | |||
165 | } | 170 | } |
166 | EXPORT_SYMBOL_GPL(wimax_msg_alloc); | 171 | EXPORT_SYMBOL_GPL(wimax_msg_alloc); |
167 | 172 | ||
@@ -256,10 +261,16 @@ EXPORT_SYMBOL_GPL(wimax_msg_len); | |||
256 | * Don't use skb_push()/skb_pull()/skb_reserve() on the skb, as | 261 | * Don't use skb_push()/skb_pull()/skb_reserve() on the skb, as |
257 | * wimax_msg_send() depends on skb->data being placed at the | 262 | * wimax_msg_send() depends on skb->data being placed at the |
258 | * beginning of the user message. | 263 | * beginning of the user message. |
264 | * | ||
265 | * Unlike other WiMAX stack calls, this call can be used way early, | ||
266 | * even before wimax_dev_add() is called, as long as the | ||
267 | * wimax_dev->net_dev pointer is set to point to a proper | ||
268 | * net_dev. This is so that drivers can use it early in case they need | ||
269 | * to send stuff around or communicate with user space. | ||
259 | */ | 270 | */ |
260 | int wimax_msg_send(struct wimax_dev *wimax_dev, struct sk_buff *skb) | 271 | int wimax_msg_send(struct wimax_dev *wimax_dev, struct sk_buff *skb) |
261 | { | 272 | { |
262 | struct device *dev = wimax_dev->net_dev->dev.parent; | 273 | struct device *dev = wimax_dev_to_dev(wimax_dev); |
263 | void *msg = skb->data; | 274 | void *msg = skb->data; |
264 | size_t size = skb->len; | 275 | size_t size = skb->len; |
265 | might_sleep(); | 276 | might_sleep(); |
diff --git a/net/wimax/op-rfkill.c b/net/wimax/op-rfkill.c index a3616e2ccb8a..bb102e4aa3e9 100644 --- a/net/wimax/op-rfkill.c +++ b/net/wimax/op-rfkill.c | |||
@@ -29,8 +29,8 @@ | |||
29 | * A non-polled generic rfkill device is embedded into the WiMAX | 29 | * A non-polled generic rfkill device is embedded into the WiMAX |
30 | * subsystem's representation of a device. | 30 | * subsystem's representation of a device. |
31 | * | 31 | * |
32 | * FIXME: Need polled support? use a timer or add the implementation | 32 | * FIXME: Need polled support? Let drivers provide a poll routine |
33 | * to the stack. | 33 | * and hand it to rfkill ops then? |
34 | * | 34 | * |
35 | * All device drivers have to do is after wimax_dev_init(), call | 35 | * All device drivers have to do is after wimax_dev_init(), call |
36 | * wimax_report_rfkill_hw() and wimax_report_rfkill_sw() to update | 36 | * wimax_report_rfkill_hw() and wimax_report_rfkill_sw() to update |
@@ -43,7 +43,7 @@ | |||
43 | * wimax_rfkill() Kernel calling wimax_rfkill() | 43 | * wimax_rfkill() Kernel calling wimax_rfkill() |
44 | * __wimax_rf_toggle_radio() | 44 | * __wimax_rf_toggle_radio() |
45 | * | 45 | * |
46 | * wimax_rfkill_toggle_radio() RF-Kill subsytem calling | 46 | * wimax_rfkill_set_radio_block() RF-Kill subsytem calling |
47 | * __wimax_rf_toggle_radio() | 47 | * __wimax_rf_toggle_radio() |
48 | * | 48 | * |
49 | * __wimax_rf_toggle_radio() | 49 | * __wimax_rf_toggle_radio() |
@@ -65,15 +65,11 @@ | |||
65 | #include <linux/wimax.h> | 65 | #include <linux/wimax.h> |
66 | #include <linux/security.h> | 66 | #include <linux/security.h> |
67 | #include <linux/rfkill.h> | 67 | #include <linux/rfkill.h> |
68 | #include <linux/input.h> | ||
69 | #include "wimax-internal.h" | 68 | #include "wimax-internal.h" |
70 | 69 | ||
71 | #define D_SUBMODULE op_rfkill | 70 | #define D_SUBMODULE op_rfkill |
72 | #include "debug-levels.h" | 71 | #include "debug-levels.h" |
73 | 72 | ||
74 | #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE) | ||
75 | |||
76 | |||
77 | /** | 73 | /** |
78 | * wimax_report_rfkill_hw - Reports changes in the hardware RF switch | 74 | * wimax_report_rfkill_hw - Reports changes in the hardware RF switch |
79 | * | 75 | * |
@@ -99,7 +95,6 @@ void wimax_report_rfkill_hw(struct wimax_dev *wimax_dev, | |||
99 | int result; | 95 | int result; |
100 | struct device *dev = wimax_dev_to_dev(wimax_dev); | 96 | struct device *dev = wimax_dev_to_dev(wimax_dev); |
101 | enum wimax_st wimax_state; | 97 | enum wimax_st wimax_state; |
102 | enum rfkill_state rfkill_state; | ||
103 | 98 | ||
104 | d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state); | 99 | d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state); |
105 | BUG_ON(state == WIMAX_RF_QUERY); | 100 | BUG_ON(state == WIMAX_RF_QUERY); |
@@ -112,16 +107,15 @@ void wimax_report_rfkill_hw(struct wimax_dev *wimax_dev, | |||
112 | 107 | ||
113 | if (state != wimax_dev->rf_hw) { | 108 | if (state != wimax_dev->rf_hw) { |
114 | wimax_dev->rf_hw = state; | 109 | wimax_dev->rf_hw = state; |
115 | rfkill_state = state == WIMAX_RF_ON ? | ||
116 | RFKILL_STATE_UNBLOCKED : RFKILL_STATE_SOFT_BLOCKED; | ||
117 | if (wimax_dev->rf_hw == WIMAX_RF_ON | 110 | if (wimax_dev->rf_hw == WIMAX_RF_ON |
118 | && wimax_dev->rf_sw == WIMAX_RF_ON) | 111 | && wimax_dev->rf_sw == WIMAX_RF_ON) |
119 | wimax_state = WIMAX_ST_READY; | 112 | wimax_state = WIMAX_ST_READY; |
120 | else | 113 | else |
121 | wimax_state = WIMAX_ST_RADIO_OFF; | 114 | wimax_state = WIMAX_ST_RADIO_OFF; |
115 | |||
116 | rfkill_set_hw_state(wimax_dev->rfkill, state == WIMAX_RF_OFF); | ||
117 | |||
122 | __wimax_state_change(wimax_dev, wimax_state); | 118 | __wimax_state_change(wimax_dev, wimax_state); |
123 | input_report_key(wimax_dev->rfkill_input, KEY_WIMAX, | ||
124 | rfkill_state); | ||
125 | } | 119 | } |
126 | error_not_ready: | 120 | error_not_ready: |
127 | mutex_unlock(&wimax_dev->mutex); | 121 | mutex_unlock(&wimax_dev->mutex); |
@@ -174,6 +168,7 @@ void wimax_report_rfkill_sw(struct wimax_dev *wimax_dev, | |||
174 | else | 168 | else |
175 | wimax_state = WIMAX_ST_RADIO_OFF; | 169 | wimax_state = WIMAX_ST_RADIO_OFF; |
176 | __wimax_state_change(wimax_dev, wimax_state); | 170 | __wimax_state_change(wimax_dev, wimax_state); |
171 | rfkill_set_sw_state(wimax_dev->rfkill, state == WIMAX_RF_OFF); | ||
177 | } | 172 | } |
178 | error_not_ready: | 173 | error_not_ready: |
179 | mutex_unlock(&wimax_dev->mutex); | 174 | mutex_unlock(&wimax_dev->mutex); |
@@ -249,36 +244,31 @@ out_no_change: | |||
249 | * | 244 | * |
250 | * NOTE: This call will block until the operation is completed. | 245 | * NOTE: This call will block until the operation is completed. |
251 | */ | 246 | */ |
252 | static | 247 | static int wimax_rfkill_set_radio_block(void *data, bool blocked) |
253 | int wimax_rfkill_toggle_radio(void *data, enum rfkill_state state) | ||
254 | { | 248 | { |
255 | int result; | 249 | int result; |
256 | struct wimax_dev *wimax_dev = data; | 250 | struct wimax_dev *wimax_dev = data; |
257 | struct device *dev = wimax_dev_to_dev(wimax_dev); | 251 | struct device *dev = wimax_dev_to_dev(wimax_dev); |
258 | enum wimax_rf_state rf_state; | 252 | enum wimax_rf_state rf_state; |
259 | 253 | ||
260 | d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state); | 254 | d_fnstart(3, dev, "(wimax_dev %p blocked %u)\n", wimax_dev, blocked); |
261 | switch (state) { | 255 | rf_state = WIMAX_RF_ON; |
262 | case RFKILL_STATE_SOFT_BLOCKED: | 256 | if (blocked) |
263 | rf_state = WIMAX_RF_OFF; | 257 | rf_state = WIMAX_RF_OFF; |
264 | break; | ||
265 | case RFKILL_STATE_UNBLOCKED: | ||
266 | rf_state = WIMAX_RF_ON; | ||
267 | break; | ||
268 | default: | ||
269 | BUG(); | ||
270 | } | ||
271 | mutex_lock(&wimax_dev->mutex); | 258 | mutex_lock(&wimax_dev->mutex); |
272 | if (wimax_dev->state <= __WIMAX_ST_QUIESCING) | 259 | if (wimax_dev->state <= __WIMAX_ST_QUIESCING) |
273 | result = 0; /* just pretend it didn't happen */ | 260 | result = 0; |
274 | else | 261 | else |
275 | result = __wimax_rf_toggle_radio(wimax_dev, rf_state); | 262 | result = __wimax_rf_toggle_radio(wimax_dev, rf_state); |
276 | mutex_unlock(&wimax_dev->mutex); | 263 | mutex_unlock(&wimax_dev->mutex); |
277 | d_fnend(3, dev, "(wimax_dev %p state %u) = %d\n", | 264 | d_fnend(3, dev, "(wimax_dev %p blocked %u) = %d\n", |
278 | wimax_dev, state, result); | 265 | wimax_dev, blocked, result); |
279 | return result; | 266 | return result; |
280 | } | 267 | } |
281 | 268 | ||
269 | static const struct rfkill_ops wimax_rfkill_ops = { | ||
270 | .set_block = wimax_rfkill_set_radio_block, | ||
271 | }; | ||
282 | 272 | ||
283 | /** | 273 | /** |
284 | * wimax_rfkill - Set the software RF switch state for a WiMAX device | 274 | * wimax_rfkill - Set the software RF switch state for a WiMAX device |
@@ -322,6 +312,7 @@ int wimax_rfkill(struct wimax_dev *wimax_dev, enum wimax_rf_state state) | |||
322 | result = __wimax_rf_toggle_radio(wimax_dev, state); | 312 | result = __wimax_rf_toggle_radio(wimax_dev, state); |
323 | if (result < 0) | 313 | if (result < 0) |
324 | goto error; | 314 | goto error; |
315 | rfkill_set_sw_state(wimax_dev->rfkill, state == WIMAX_RF_OFF); | ||
325 | break; | 316 | break; |
326 | case WIMAX_RF_QUERY: | 317 | case WIMAX_RF_QUERY: |
327 | break; | 318 | break; |
@@ -349,40 +340,20 @@ int wimax_rfkill_add(struct wimax_dev *wimax_dev) | |||
349 | { | 340 | { |
350 | int result; | 341 | int result; |
351 | struct rfkill *rfkill; | 342 | struct rfkill *rfkill; |
352 | struct input_dev *input_dev; | ||
353 | struct device *dev = wimax_dev_to_dev(wimax_dev); | 343 | struct device *dev = wimax_dev_to_dev(wimax_dev); |
354 | 344 | ||
355 | d_fnstart(3, dev, "(wimax_dev %p)\n", wimax_dev); | 345 | d_fnstart(3, dev, "(wimax_dev %p)\n", wimax_dev); |
356 | /* Initialize RF Kill */ | 346 | /* Initialize RF Kill */ |
357 | result = -ENOMEM; | 347 | result = -ENOMEM; |
358 | rfkill = rfkill_allocate(dev, RFKILL_TYPE_WIMAX); | 348 | rfkill = rfkill_alloc(wimax_dev->name, dev, RFKILL_TYPE_WIMAX, |
349 | &wimax_rfkill_ops, wimax_dev); | ||
359 | if (rfkill == NULL) | 350 | if (rfkill == NULL) |
360 | goto error_rfkill_allocate; | 351 | goto error_rfkill_allocate; |
352 | |||
353 | d_printf(1, dev, "rfkill %p\n", rfkill); | ||
354 | |||
361 | wimax_dev->rfkill = rfkill; | 355 | wimax_dev->rfkill = rfkill; |
362 | 356 | ||
363 | rfkill->name = wimax_dev->name; | ||
364 | rfkill->state = RFKILL_STATE_UNBLOCKED; | ||
365 | rfkill->data = wimax_dev; | ||
366 | rfkill->toggle_radio = wimax_rfkill_toggle_radio; | ||
367 | |||
368 | /* Initialize the input device for the hw key */ | ||
369 | input_dev = input_allocate_device(); | ||
370 | if (input_dev == NULL) | ||
371 | goto error_input_allocate; | ||
372 | wimax_dev->rfkill_input = input_dev; | ||
373 | d_printf(1, dev, "rfkill %p input %p\n", rfkill, input_dev); | ||
374 | |||
375 | input_dev->name = wimax_dev->name; | ||
376 | /* FIXME: get a real device bus ID and stuff? do we care? */ | ||
377 | input_dev->id.bustype = BUS_HOST; | ||
378 | input_dev->id.vendor = 0xffff; | ||
379 | input_dev->evbit[0] = BIT(EV_KEY); | ||
380 | set_bit(KEY_WIMAX, input_dev->keybit); | ||
381 | |||
382 | /* Register both */ | ||
383 | result = input_register_device(wimax_dev->rfkill_input); | ||
384 | if (result < 0) | ||
385 | goto error_input_register; | ||
386 | result = rfkill_register(wimax_dev->rfkill); | 357 | result = rfkill_register(wimax_dev->rfkill); |
387 | if (result < 0) | 358 | if (result < 0) |
388 | goto error_rfkill_register; | 359 | goto error_rfkill_register; |
@@ -394,17 +365,8 @@ int wimax_rfkill_add(struct wimax_dev *wimax_dev) | |||
394 | d_fnend(3, dev, "(wimax_dev %p) = 0\n", wimax_dev); | 365 | d_fnend(3, dev, "(wimax_dev %p) = 0\n", wimax_dev); |
395 | return 0; | 366 | return 0; |
396 | 367 | ||
397 | /* if rfkill_register() suceeds, can't use rfkill_free() any | ||
398 | * more, only rfkill_unregister() [it owns the refcount]; with | ||
399 | * the input device we have the same issue--hence the if. */ | ||
400 | error_rfkill_register: | 368 | error_rfkill_register: |
401 | input_unregister_device(wimax_dev->rfkill_input); | 369 | rfkill_destroy(wimax_dev->rfkill); |
402 | wimax_dev->rfkill_input = NULL; | ||
403 | error_input_register: | ||
404 | if (wimax_dev->rfkill_input) | ||
405 | input_free_device(wimax_dev->rfkill_input); | ||
406 | error_input_allocate: | ||
407 | rfkill_free(wimax_dev->rfkill); | ||
408 | error_rfkill_allocate: | 370 | error_rfkill_allocate: |
409 | d_fnend(3, dev, "(wimax_dev %p) = %d\n", wimax_dev, result); | 371 | d_fnend(3, dev, "(wimax_dev %p) = %d\n", wimax_dev, result); |
410 | return result; | 372 | return result; |
@@ -423,45 +385,12 @@ void wimax_rfkill_rm(struct wimax_dev *wimax_dev) | |||
423 | { | 385 | { |
424 | struct device *dev = wimax_dev_to_dev(wimax_dev); | 386 | struct device *dev = wimax_dev_to_dev(wimax_dev); |
425 | d_fnstart(3, dev, "(wimax_dev %p)\n", wimax_dev); | 387 | d_fnstart(3, dev, "(wimax_dev %p)\n", wimax_dev); |
426 | rfkill_unregister(wimax_dev->rfkill); /* frees */ | 388 | rfkill_unregister(wimax_dev->rfkill); |
427 | input_unregister_device(wimax_dev->rfkill_input); | 389 | rfkill_destroy(wimax_dev->rfkill); |
428 | d_fnend(3, dev, "(wimax_dev %p)\n", wimax_dev); | 390 | d_fnend(3, dev, "(wimax_dev %p)\n", wimax_dev); |
429 | } | 391 | } |
430 | 392 | ||
431 | 393 | ||
432 | #else /* #ifdef CONFIG_RFKILL */ | ||
433 | |||
434 | void wimax_report_rfkill_hw(struct wimax_dev *wimax_dev, | ||
435 | enum wimax_rf_state state) | ||
436 | { | ||
437 | } | ||
438 | EXPORT_SYMBOL_GPL(wimax_report_rfkill_hw); | ||
439 | |||
440 | void wimax_report_rfkill_sw(struct wimax_dev *wimax_dev, | ||
441 | enum wimax_rf_state state) | ||
442 | { | ||
443 | } | ||
444 | EXPORT_SYMBOL_GPL(wimax_report_rfkill_sw); | ||
445 | |||
446 | int wimax_rfkill(struct wimax_dev *wimax_dev, | ||
447 | enum wimax_rf_state state) | ||
448 | { | ||
449 | return WIMAX_RF_ON << 1 | WIMAX_RF_ON; | ||
450 | } | ||
451 | EXPORT_SYMBOL_GPL(wimax_rfkill); | ||
452 | |||
453 | int wimax_rfkill_add(struct wimax_dev *wimax_dev) | ||
454 | { | ||
455 | return 0; | ||
456 | } | ||
457 | |||
458 | void wimax_rfkill_rm(struct wimax_dev *wimax_dev) | ||
459 | { | ||
460 | } | ||
461 | |||
462 | #endif /* #ifdef CONFIG_RFKILL */ | ||
463 | |||
464 | |||
465 | /* | 394 | /* |
466 | * Exporting to user space over generic netlink | 395 | * Exporting to user space over generic netlink |
467 | * | 396 | * |
diff --git a/net/wimax/op-state-get.c b/net/wimax/op-state-get.c new file mode 100644 index 000000000000..a76b8fcb056d --- /dev/null +++ b/net/wimax/op-state-get.c | |||
@@ -0,0 +1,86 @@ | |||
1 | /* | ||
2 | * Linux WiMAX | ||
3 | * Implement and export a method for getting a WiMAX device current state | ||
4 | * | ||
5 | * Copyright (C) 2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt> | ||
6 | * | ||
7 | * Based on previous WiMAX core work by: | ||
8 | * Copyright (C) 2008 Intel Corporation <linux-wimax@intel.com> | ||
9 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or | ||
12 | * modify it under the terms of the GNU General Public License version | ||
13 | * 2 as published by the Free Software Foundation. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
18 | * GNU General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public License | ||
21 | * along with this program; if not, write to the Free Software | ||
22 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
23 | * 02110-1301, USA. | ||
24 | */ | ||
25 | |||
26 | #include <net/wimax.h> | ||
27 | #include <net/genetlink.h> | ||
28 | #include <linux/wimax.h> | ||
29 | #include <linux/security.h> | ||
30 | #include "wimax-internal.h" | ||
31 | |||
32 | #define D_SUBMODULE op_state_get | ||
33 | #include "debug-levels.h" | ||
34 | |||
35 | |||
36 | static const | ||
37 | struct nla_policy wimax_gnl_state_get_policy[WIMAX_GNL_ATTR_MAX + 1] = { | ||
38 | [WIMAX_GNL_STGET_IFIDX] = { | ||
39 | .type = NLA_U32, | ||
40 | }, | ||
41 | }; | ||
42 | |||
43 | |||
44 | /* | ||
45 | * Exporting to user space over generic netlink | ||
46 | * | ||
47 | * Parse the state get command from user space, return a combination | ||
48 | * value that describe the current state. | ||
49 | * | ||
50 | * No attributes. | ||
51 | */ | ||
52 | static | ||
53 | int wimax_gnl_doit_state_get(struct sk_buff *skb, struct genl_info *info) | ||
54 | { | ||
55 | int result, ifindex; | ||
56 | struct wimax_dev *wimax_dev; | ||
57 | struct device *dev; | ||
58 | |||
59 | d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info); | ||
60 | result = -ENODEV; | ||
61 | if (info->attrs[WIMAX_GNL_STGET_IFIDX] == NULL) { | ||
62 | printk(KERN_ERR "WIMAX_GNL_OP_STATE_GET: can't find IFIDX " | ||
63 | "attribute\n"); | ||
64 | goto error_no_wimax_dev; | ||
65 | } | ||
66 | ifindex = nla_get_u32(info->attrs[WIMAX_GNL_STGET_IFIDX]); | ||
67 | wimax_dev = wimax_dev_get_by_genl_info(info, ifindex); | ||
68 | if (wimax_dev == NULL) | ||
69 | goto error_no_wimax_dev; | ||
70 | dev = wimax_dev_to_dev(wimax_dev); | ||
71 | /* Execute the operation and send the result back to user space */ | ||
72 | result = wimax_state_get(wimax_dev); | ||
73 | dev_put(wimax_dev->net_dev); | ||
74 | error_no_wimax_dev: | ||
75 | d_fnend(3, NULL, "(skb %p info %p) = %d\n", skb, info, result); | ||
76 | return result; | ||
77 | } | ||
78 | |||
79 | |||
80 | struct genl_ops wimax_gnl_state_get = { | ||
81 | .cmd = WIMAX_GNL_OP_STATE_GET, | ||
82 | .flags = GENL_ADMIN_PERM, | ||
83 | .policy = wimax_gnl_state_get_policy, | ||
84 | .doit = wimax_gnl_doit_state_get, | ||
85 | .dumpit = NULL, | ||
86 | }; | ||
diff --git a/net/wimax/stack.c b/net/wimax/stack.c index 933e1422b09f..79fb7d7c640f 100644 --- a/net/wimax/stack.c +++ b/net/wimax/stack.c | |||
@@ -402,13 +402,15 @@ EXPORT_SYMBOL_GPL(wimax_dev_init); | |||
402 | extern struct genl_ops | 402 | extern struct genl_ops |
403 | wimax_gnl_msg_from_user, | 403 | wimax_gnl_msg_from_user, |
404 | wimax_gnl_reset, | 404 | wimax_gnl_reset, |
405 | wimax_gnl_rfkill; | 405 | wimax_gnl_rfkill, |
406 | wimax_gnl_state_get; | ||
406 | 407 | ||
407 | static | 408 | static |
408 | struct genl_ops *wimax_gnl_ops[] = { | 409 | struct genl_ops *wimax_gnl_ops[] = { |
409 | &wimax_gnl_msg_from_user, | 410 | &wimax_gnl_msg_from_user, |
410 | &wimax_gnl_reset, | 411 | &wimax_gnl_reset, |
411 | &wimax_gnl_rfkill, | 412 | &wimax_gnl_rfkill, |
413 | &wimax_gnl_state_get, | ||
412 | }; | 414 | }; |
413 | 415 | ||
414 | 416 | ||
@@ -533,6 +535,7 @@ struct d_level D_LEVEL[] = { | |||
533 | D_SUBMODULE_DEFINE(op_msg), | 535 | D_SUBMODULE_DEFINE(op_msg), |
534 | D_SUBMODULE_DEFINE(op_reset), | 536 | D_SUBMODULE_DEFINE(op_reset), |
535 | D_SUBMODULE_DEFINE(op_rfkill), | 537 | D_SUBMODULE_DEFINE(op_rfkill), |
538 | D_SUBMODULE_DEFINE(op_state_get), | ||
536 | D_SUBMODULE_DEFINE(stack), | 539 | D_SUBMODULE_DEFINE(stack), |
537 | }; | 540 | }; |
538 | size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL); | 541 | size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL); |
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig index 45005497c634..4428dd5e911d 100644 --- a/net/wireless/Kconfig +++ b/net/wireless/Kconfig | |||
@@ -1,5 +1,6 @@ | |||
1 | config CFG80211 | 1 | config CFG80211 |
2 | tristate "Improved wireless configuration API" | 2 | tristate "Improved wireless configuration API" |
3 | depends on RFKILL || !RFKILL | ||
3 | 4 | ||
4 | config CFG80211_REG_DEBUG | 5 | config CFG80211_REG_DEBUG |
5 | bool "cfg80211 regulatory debugging" | 6 | bool "cfg80211 regulatory debugging" |
diff --git a/net/wireless/core.c b/net/wireless/core.c index a5dbea1da476..d5850292b3df 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/debugfs.h> | 12 | #include <linux/debugfs.h> |
13 | #include <linux/notifier.h> | 13 | #include <linux/notifier.h> |
14 | #include <linux/device.h> | 14 | #include <linux/device.h> |
15 | #include <linux/rtnetlink.h> | ||
15 | #include <net/genetlink.h> | 16 | #include <net/genetlink.h> |
16 | #include <net/cfg80211.h> | 17 | #include <net/cfg80211.h> |
17 | #include "nl80211.h" | 18 | #include "nl80211.h" |
@@ -227,6 +228,41 @@ int cfg80211_dev_rename(struct cfg80211_registered_device *rdev, | |||
227 | return 0; | 228 | return 0; |
228 | } | 229 | } |
229 | 230 | ||
231 | static void cfg80211_rfkill_poll(struct rfkill *rfkill, void *data) | ||
232 | { | ||
233 | struct cfg80211_registered_device *drv = data; | ||
234 | |||
235 | drv->ops->rfkill_poll(&drv->wiphy); | ||
236 | } | ||
237 | |||
238 | static int cfg80211_rfkill_set_block(void *data, bool blocked) | ||
239 | { | ||
240 | struct cfg80211_registered_device *drv = data; | ||
241 | struct wireless_dev *wdev; | ||
242 | |||
243 | if (!blocked) | ||
244 | return 0; | ||
245 | |||
246 | rtnl_lock(); | ||
247 | mutex_lock(&drv->devlist_mtx); | ||
248 | |||
249 | list_for_each_entry(wdev, &drv->netdev_list, list) | ||
250 | dev_close(wdev->netdev); | ||
251 | |||
252 | mutex_unlock(&drv->devlist_mtx); | ||
253 | rtnl_unlock(); | ||
254 | |||
255 | return 0; | ||
256 | } | ||
257 | |||
258 | static void cfg80211_rfkill_sync_work(struct work_struct *work) | ||
259 | { | ||
260 | struct cfg80211_registered_device *drv; | ||
261 | |||
262 | drv = container_of(work, struct cfg80211_registered_device, rfkill_sync); | ||
263 | cfg80211_rfkill_set_block(drv, rfkill_blocked(drv->rfkill)); | ||
264 | } | ||
265 | |||
230 | /* exported functions */ | 266 | /* exported functions */ |
231 | 267 | ||
232 | struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv) | 268 | struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv) |
@@ -274,6 +310,18 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv) | |||
274 | drv->wiphy.dev.class = &ieee80211_class; | 310 | drv->wiphy.dev.class = &ieee80211_class; |
275 | drv->wiphy.dev.platform_data = drv; | 311 | drv->wiphy.dev.platform_data = drv; |
276 | 312 | ||
313 | drv->rfkill_ops.set_block = cfg80211_rfkill_set_block; | ||
314 | drv->rfkill = rfkill_alloc(dev_name(&drv->wiphy.dev), | ||
315 | &drv->wiphy.dev, RFKILL_TYPE_WLAN, | ||
316 | &drv->rfkill_ops, drv); | ||
317 | |||
318 | if (!drv->rfkill) { | ||
319 | kfree(drv); | ||
320 | return NULL; | ||
321 | } | ||
322 | |||
323 | INIT_WORK(&drv->rfkill_sync, cfg80211_rfkill_sync_work); | ||
324 | |||
277 | /* | 325 | /* |
278 | * Initialize wiphy parameters to IEEE 802.11 MIB default values. | 326 | * Initialize wiphy parameters to IEEE 802.11 MIB default values. |
279 | * Fragmentation and RTS threshold are disabled by default with the | 327 | * Fragmentation and RTS threshold are disabled by default with the |
@@ -347,17 +395,23 @@ int wiphy_register(struct wiphy *wiphy) | |||
347 | /* check and set up bitrates */ | 395 | /* check and set up bitrates */ |
348 | ieee80211_set_bitrate_flags(wiphy); | 396 | ieee80211_set_bitrate_flags(wiphy); |
349 | 397 | ||
398 | res = device_add(&drv->wiphy.dev); | ||
399 | if (res) | ||
400 | return res; | ||
401 | |||
402 | res = rfkill_register(drv->rfkill); | ||
403 | if (res) | ||
404 | goto out_rm_dev; | ||
405 | |||
350 | mutex_lock(&cfg80211_mutex); | 406 | mutex_lock(&cfg80211_mutex); |
351 | 407 | ||
352 | /* set up regulatory info */ | 408 | /* set up regulatory info */ |
353 | wiphy_update_regulatory(wiphy, NL80211_REGDOM_SET_BY_CORE); | 409 | wiphy_update_regulatory(wiphy, NL80211_REGDOM_SET_BY_CORE); |
354 | 410 | ||
355 | res = device_add(&drv->wiphy.dev); | ||
356 | if (res) | ||
357 | goto out_unlock; | ||
358 | |||
359 | list_add(&drv->list, &cfg80211_drv_list); | 411 | list_add(&drv->list, &cfg80211_drv_list); |
360 | 412 | ||
413 | mutex_unlock(&cfg80211_mutex); | ||
414 | |||
361 | /* add to debugfs */ | 415 | /* add to debugfs */ |
362 | drv->wiphy.debugfsdir = | 416 | drv->wiphy.debugfsdir = |
363 | debugfs_create_dir(wiphy_name(&drv->wiphy), | 417 | debugfs_create_dir(wiphy_name(&drv->wiphy), |
@@ -378,17 +432,39 @@ int wiphy_register(struct wiphy *wiphy) | |||
378 | 432 | ||
379 | cfg80211_debugfs_drv_add(drv); | 433 | cfg80211_debugfs_drv_add(drv); |
380 | 434 | ||
381 | res = 0; | 435 | return 0; |
382 | out_unlock: | 436 | |
383 | mutex_unlock(&cfg80211_mutex); | 437 | out_rm_dev: |
438 | device_del(&drv->wiphy.dev); | ||
384 | return res; | 439 | return res; |
385 | } | 440 | } |
386 | EXPORT_SYMBOL(wiphy_register); | 441 | EXPORT_SYMBOL(wiphy_register); |
387 | 442 | ||
443 | void wiphy_rfkill_start_polling(struct wiphy *wiphy) | ||
444 | { | ||
445 | struct cfg80211_registered_device *drv = wiphy_to_dev(wiphy); | ||
446 | |||
447 | if (!drv->ops->rfkill_poll) | ||
448 | return; | ||
449 | drv->rfkill_ops.poll = cfg80211_rfkill_poll; | ||
450 | rfkill_resume_polling(drv->rfkill); | ||
451 | } | ||
452 | EXPORT_SYMBOL(wiphy_rfkill_start_polling); | ||
453 | |||
454 | void wiphy_rfkill_stop_polling(struct wiphy *wiphy) | ||
455 | { | ||
456 | struct cfg80211_registered_device *drv = wiphy_to_dev(wiphy); | ||
457 | |||
458 | rfkill_pause_polling(drv->rfkill); | ||
459 | } | ||
460 | EXPORT_SYMBOL(wiphy_rfkill_stop_polling); | ||
461 | |||
388 | void wiphy_unregister(struct wiphy *wiphy) | 462 | void wiphy_unregister(struct wiphy *wiphy) |
389 | { | 463 | { |
390 | struct cfg80211_registered_device *drv = wiphy_to_dev(wiphy); | 464 | struct cfg80211_registered_device *drv = wiphy_to_dev(wiphy); |
391 | 465 | ||
466 | rfkill_unregister(drv->rfkill); | ||
467 | |||
392 | /* protect the device list */ | 468 | /* protect the device list */ |
393 | mutex_lock(&cfg80211_mutex); | 469 | mutex_lock(&cfg80211_mutex); |
394 | 470 | ||
@@ -425,6 +501,7 @@ EXPORT_SYMBOL(wiphy_unregister); | |||
425 | void cfg80211_dev_free(struct cfg80211_registered_device *drv) | 501 | void cfg80211_dev_free(struct cfg80211_registered_device *drv) |
426 | { | 502 | { |
427 | struct cfg80211_internal_bss *scan, *tmp; | 503 | struct cfg80211_internal_bss *scan, *tmp; |
504 | rfkill_destroy(drv->rfkill); | ||
428 | mutex_destroy(&drv->mtx); | 505 | mutex_destroy(&drv->mtx); |
429 | mutex_destroy(&drv->devlist_mtx); | 506 | mutex_destroy(&drv->devlist_mtx); |
430 | list_for_each_entry_safe(scan, tmp, &drv->bss_list, list) | 507 | list_for_each_entry_safe(scan, tmp, &drv->bss_list, list) |
@@ -438,6 +515,15 @@ void wiphy_free(struct wiphy *wiphy) | |||
438 | } | 515 | } |
439 | EXPORT_SYMBOL(wiphy_free); | 516 | EXPORT_SYMBOL(wiphy_free); |
440 | 517 | ||
518 | void wiphy_rfkill_set_hw_state(struct wiphy *wiphy, bool blocked) | ||
519 | { | ||
520 | struct cfg80211_registered_device *drv = wiphy_to_dev(wiphy); | ||
521 | |||
522 | if (rfkill_set_hw_state(drv->rfkill, blocked)) | ||
523 | schedule_work(&drv->rfkill_sync); | ||
524 | } | ||
525 | EXPORT_SYMBOL(wiphy_rfkill_set_hw_state); | ||
526 | |||
441 | static int cfg80211_netdev_notifier_call(struct notifier_block * nb, | 527 | static int cfg80211_netdev_notifier_call(struct notifier_block * nb, |
442 | unsigned long state, | 528 | unsigned long state, |
443 | void *ndev) | 529 | void *ndev) |
@@ -446,7 +532,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb, | |||
446 | struct cfg80211_registered_device *rdev; | 532 | struct cfg80211_registered_device *rdev; |
447 | 533 | ||
448 | if (!dev->ieee80211_ptr) | 534 | if (!dev->ieee80211_ptr) |
449 | return 0; | 535 | return NOTIFY_DONE; |
450 | 536 | ||
451 | rdev = wiphy_to_dev(dev->ieee80211_ptr->wiphy); | 537 | rdev = wiphy_to_dev(dev->ieee80211_ptr->wiphy); |
452 | 538 | ||
@@ -492,9 +578,13 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb, | |||
492 | } | 578 | } |
493 | mutex_unlock(&rdev->devlist_mtx); | 579 | mutex_unlock(&rdev->devlist_mtx); |
494 | break; | 580 | break; |
581 | case NETDEV_PRE_UP: | ||
582 | if (rfkill_blocked(rdev->rfkill)) | ||
583 | return notifier_from_errno(-ERFKILL); | ||
584 | break; | ||
495 | } | 585 | } |
496 | 586 | ||
497 | return 0; | 587 | return NOTIFY_DONE; |
498 | } | 588 | } |
499 | 589 | ||
500 | static struct notifier_block cfg80211_netdev_notifier = { | 590 | static struct notifier_block cfg80211_netdev_notifier = { |
diff --git a/net/wireless/core.h b/net/wireless/core.h index ab512bcd8153..bfa340c7abb5 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h | |||
@@ -11,6 +11,8 @@ | |||
11 | #include <linux/kref.h> | 11 | #include <linux/kref.h> |
12 | #include <linux/rbtree.h> | 12 | #include <linux/rbtree.h> |
13 | #include <linux/debugfs.h> | 13 | #include <linux/debugfs.h> |
14 | #include <linux/rfkill.h> | ||
15 | #include <linux/workqueue.h> | ||
14 | #include <net/genetlink.h> | 16 | #include <net/genetlink.h> |
15 | #include <net/cfg80211.h> | 17 | #include <net/cfg80211.h> |
16 | #include "reg.h" | 18 | #include "reg.h" |
@@ -24,6 +26,11 @@ struct cfg80211_registered_device { | |||
24 | * any call is in progress */ | 26 | * any call is in progress */ |
25 | struct mutex mtx; | 27 | struct mutex mtx; |
26 | 28 | ||
29 | /* rfkill support */ | ||
30 | struct rfkill_ops rfkill_ops; | ||
31 | struct rfkill *rfkill; | ||
32 | struct work_struct rfkill_sync; | ||
33 | |||
27 | /* ISO / IEC 3166 alpha2 for which this device is receiving | 34 | /* ISO / IEC 3166 alpha2 for which this device is receiving |
28 | * country IEs on, this can help disregard country IEs from APs | 35 | * country IEs on, this can help disregard country IEs from APs |
29 | * on the same alpha2 quickly. The alpha2 may differ from | 36 | * on the same alpha2 quickly. The alpha2 may differ from |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 4b4d3c8a1aed..24168560ebae 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -1687,6 +1687,12 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info) | |||
1687 | if (err) | 1687 | if (err) |
1688 | goto out_rtnl; | 1688 | goto out_rtnl; |
1689 | 1689 | ||
1690 | if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && | ||
1691 | dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN) { | ||
1692 | err = -EINVAL; | ||
1693 | goto out; | ||
1694 | } | ||
1695 | |||
1690 | err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, ¶ms.vlan); | 1696 | err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, ¶ms.vlan); |
1691 | if (err) | 1697 | if (err) |
1692 | goto out; | 1698 | goto out; |
@@ -1738,7 +1744,11 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) | |||
1738 | nla_len(info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]); | 1744 | nla_len(info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]); |
1739 | params.listen_interval = | 1745 | params.listen_interval = |
1740 | nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]); | 1746 | nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]); |
1747 | |||
1741 | params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]); | 1748 | params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]); |
1749 | if (!params.aid || params.aid > IEEE80211_MAX_AID) | ||
1750 | return -EINVAL; | ||
1751 | |||
1742 | if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) | 1752 | if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) |
1743 | params.ht_capa = | 1753 | params.ht_capa = |
1744 | nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]); | 1754 | nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]); |
@@ -3559,11 +3569,43 @@ void nl80211_notify_dev_rename(struct cfg80211_registered_device *rdev) | |||
3559 | genlmsg_multicast(msg, 0, nl80211_config_mcgrp.id, GFP_KERNEL); | 3569 | genlmsg_multicast(msg, 0, nl80211_config_mcgrp.id, GFP_KERNEL); |
3560 | } | 3570 | } |
3561 | 3571 | ||
3572 | static int nl80211_add_scan_req(struct sk_buff *msg, | ||
3573 | struct cfg80211_registered_device *rdev) | ||
3574 | { | ||
3575 | struct cfg80211_scan_request *req = rdev->scan_req; | ||
3576 | struct nlattr *nest; | ||
3577 | int i; | ||
3578 | |||
3579 | if (WARN_ON(!req)) | ||
3580 | return 0; | ||
3581 | |||
3582 | nest = nla_nest_start(msg, NL80211_ATTR_SCAN_SSIDS); | ||
3583 | if (!nest) | ||
3584 | goto nla_put_failure; | ||
3585 | for (i = 0; i < req->n_ssids; i++) | ||
3586 | NLA_PUT(msg, i, req->ssids[i].ssid_len, req->ssids[i].ssid); | ||
3587 | nla_nest_end(msg, nest); | ||
3588 | |||
3589 | nest = nla_nest_start(msg, NL80211_ATTR_SCAN_FREQUENCIES); | ||
3590 | if (!nest) | ||
3591 | goto nla_put_failure; | ||
3592 | for (i = 0; i < req->n_channels; i++) | ||
3593 | NLA_PUT_U32(msg, i, req->channels[i]->center_freq); | ||
3594 | nla_nest_end(msg, nest); | ||
3595 | |||
3596 | if (req->ie) | ||
3597 | NLA_PUT(msg, NL80211_ATTR_IE, req->ie_len, req->ie); | ||
3598 | |||
3599 | return 0; | ||
3600 | nla_put_failure: | ||
3601 | return -ENOBUFS; | ||
3602 | } | ||
3603 | |||
3562 | static int nl80211_send_scan_donemsg(struct sk_buff *msg, | 3604 | static int nl80211_send_scan_donemsg(struct sk_buff *msg, |
3563 | struct cfg80211_registered_device *rdev, | 3605 | struct cfg80211_registered_device *rdev, |
3564 | struct net_device *netdev, | 3606 | struct net_device *netdev, |
3565 | u32 pid, u32 seq, int flags, | 3607 | u32 pid, u32 seq, int flags, |
3566 | u32 cmd) | 3608 | u32 cmd) |
3567 | { | 3609 | { |
3568 | void *hdr; | 3610 | void *hdr; |
3569 | 3611 | ||
@@ -3574,7 +3616,8 @@ static int nl80211_send_scan_donemsg(struct sk_buff *msg, | |||
3574 | NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); | 3616 | NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); |
3575 | NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); | 3617 | NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); |
3576 | 3618 | ||
3577 | /* XXX: we should probably bounce back the request? */ | 3619 | /* ignore errors and send incomplete event anyway */ |
3620 | nl80211_add_scan_req(msg, rdev); | ||
3578 | 3621 | ||
3579 | return genlmsg_end(msg, hdr); | 3622 | return genlmsg_end(msg, hdr); |
3580 | 3623 | ||
@@ -3828,7 +3871,7 @@ void nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev, | |||
3828 | struct sk_buff *msg; | 3871 | struct sk_buff *msg; |
3829 | void *hdr; | 3872 | void *hdr; |
3830 | 3873 | ||
3831 | msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); | 3874 | msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); |
3832 | if (!msg) | 3875 | if (!msg) |
3833 | return; | 3876 | return; |
3834 | 3877 | ||
@@ -3852,7 +3895,7 @@ void nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev, | |||
3852 | return; | 3895 | return; |
3853 | } | 3896 | } |
3854 | 3897 | ||
3855 | genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, GFP_KERNEL); | 3898 | genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, GFP_ATOMIC); |
3856 | return; | 3899 | return; |
3857 | 3900 | ||
3858 | nla_put_failure: | 3901 | nla_put_failure: |
diff --git a/net/wireless/reg.c b/net/wireless/reg.c index f87ac1df2df5..5e14371cda70 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c | |||
@@ -2129,7 +2129,12 @@ static int __set_regdom(const struct ieee80211_regdomain *rd) | |||
2129 | * driver wanted to the wiphy to deal with conflicts | 2129 | * driver wanted to the wiphy to deal with conflicts |
2130 | */ | 2130 | */ |
2131 | 2131 | ||
2132 | BUG_ON(request_wiphy->regd); | 2132 | /* |
2133 | * Userspace could have sent two replies with only | ||
2134 | * one kernel request. | ||
2135 | */ | ||
2136 | if (request_wiphy->regd) | ||
2137 | return -EALREADY; | ||
2133 | 2138 | ||
2134 | r = reg_copy_regd(&request_wiphy->regd, rd); | 2139 | r = reg_copy_regd(&request_wiphy->regd, rd); |
2135 | if (r) | 2140 | if (r) |
@@ -2171,7 +2176,13 @@ static int __set_regdom(const struct ieee80211_regdomain *rd) | |||
2171 | * the country IE rd with what CRDA believes that country should have | 2176 | * the country IE rd with what CRDA believes that country should have |
2172 | */ | 2177 | */ |
2173 | 2178 | ||
2174 | BUG_ON(!country_ie_regdomain); | 2179 | /* |
2180 | * Userspace could have sent two replies with only | ||
2181 | * one kernel request. By the second reply we would have | ||
2182 | * already processed and consumed the country_ie_regdomain. | ||
2183 | */ | ||
2184 | if (!country_ie_regdomain) | ||
2185 | return -EALREADY; | ||
2175 | BUG_ON(rd == country_ie_regdomain); | 2186 | BUG_ON(rd == country_ie_regdomain); |
2176 | 2187 | ||
2177 | /* | 2188 | /* |
diff --git a/net/wireless/scan.c b/net/wireless/scan.c index df59440290e5..e95b638b919f 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c | |||
@@ -29,13 +29,14 @@ void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted) | |||
29 | goto out; | 29 | goto out; |
30 | 30 | ||
31 | WARN_ON(request != wiphy_to_dev(request->wiphy)->scan_req); | 31 | WARN_ON(request != wiphy_to_dev(request->wiphy)->scan_req); |
32 | wiphy_to_dev(request->wiphy)->scan_req = NULL; | ||
33 | 32 | ||
34 | if (aborted) | 33 | if (aborted) |
35 | nl80211_send_scan_aborted(wiphy_to_dev(request->wiphy), dev); | 34 | nl80211_send_scan_aborted(wiphy_to_dev(request->wiphy), dev); |
36 | else | 35 | else |
37 | nl80211_send_scan_done(wiphy_to_dev(request->wiphy), dev); | 36 | nl80211_send_scan_done(wiphy_to_dev(request->wiphy), dev); |
38 | 37 | ||
38 | wiphy_to_dev(request->wiphy)->scan_req = NULL; | ||
39 | |||
39 | #ifdef CONFIG_WIRELESS_EXT | 40 | #ifdef CONFIG_WIRELESS_EXT |
40 | if (!aborted) { | 41 | if (!aborted) { |
41 | memset(&wrqu, 0, sizeof(wrqu)); | 42 | memset(&wrqu, 0, sizeof(wrqu)); |
diff --git a/net/wireless/util.c b/net/wireless/util.c index d072bff463aa..25550692dda6 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c | |||
@@ -157,26 +157,25 @@ int cfg80211_validate_key_settings(struct key_params *params, int key_idx, | |||
157 | params->cipher != WLAN_CIPHER_SUITE_WEP104) | 157 | params->cipher != WLAN_CIPHER_SUITE_WEP104) |
158 | return -EINVAL; | 158 | return -EINVAL; |
159 | 159 | ||
160 | /* TODO: add definitions for the lengths to linux/ieee80211.h */ | ||
161 | switch (params->cipher) { | 160 | switch (params->cipher) { |
162 | case WLAN_CIPHER_SUITE_WEP40: | 161 | case WLAN_CIPHER_SUITE_WEP40: |
163 | if (params->key_len != 5) | 162 | if (params->key_len != WLAN_KEY_LEN_WEP40) |
164 | return -EINVAL; | 163 | return -EINVAL; |
165 | break; | 164 | break; |
166 | case WLAN_CIPHER_SUITE_TKIP: | 165 | case WLAN_CIPHER_SUITE_TKIP: |
167 | if (params->key_len != 32) | 166 | if (params->key_len != WLAN_KEY_LEN_TKIP) |
168 | return -EINVAL; | 167 | return -EINVAL; |
169 | break; | 168 | break; |
170 | case WLAN_CIPHER_SUITE_CCMP: | 169 | case WLAN_CIPHER_SUITE_CCMP: |
171 | if (params->key_len != 16) | 170 | if (params->key_len != WLAN_KEY_LEN_CCMP) |
172 | return -EINVAL; | 171 | return -EINVAL; |
173 | break; | 172 | break; |
174 | case WLAN_CIPHER_SUITE_WEP104: | 173 | case WLAN_CIPHER_SUITE_WEP104: |
175 | if (params->key_len != 13) | 174 | if (params->key_len != WLAN_KEY_LEN_WEP104) |
176 | return -EINVAL; | 175 | return -EINVAL; |
177 | break; | 176 | break; |
178 | case WLAN_CIPHER_SUITE_AES_CMAC: | 177 | case WLAN_CIPHER_SUITE_AES_CMAC: |
179 | if (params->key_len != 16) | 178 | if (params->key_len != WLAN_KEY_LEN_AES_CMAC) |
180 | return -EINVAL; | 179 | return -EINVAL; |
181 | break; | 180 | break; |
182 | default: | 181 | default: |
@@ -259,7 +258,7 @@ unsigned int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb) | |||
259 | } | 258 | } |
260 | EXPORT_SYMBOL(ieee80211_get_hdrlen_from_skb); | 259 | EXPORT_SYMBOL(ieee80211_get_hdrlen_from_skb); |
261 | 260 | ||
262 | int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr) | 261 | static int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr) |
263 | { | 262 | { |
264 | int ae = meshhdr->flags & MESH_FLAGS_AE; | 263 | int ae = meshhdr->flags & MESH_FLAGS_AE; |
265 | /* 7.1.3.5a.2 */ | 264 | /* 7.1.3.5a.2 */ |
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c index 711e00a0c9b5..d030c5315672 100644 --- a/net/wireless/wext-compat.c +++ b/net/wireless/wext-compat.c | |||
@@ -744,3 +744,86 @@ int cfg80211_wext_giwencode(struct net_device *dev, | |||
744 | return err; | 744 | return err; |
745 | } | 745 | } |
746 | EXPORT_SYMBOL_GPL(cfg80211_wext_giwencode); | 746 | EXPORT_SYMBOL_GPL(cfg80211_wext_giwencode); |
747 | |||
748 | int cfg80211_wext_siwtxpower(struct net_device *dev, | ||
749 | struct iw_request_info *info, | ||
750 | union iwreq_data *data, char *extra) | ||
751 | { | ||
752 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
753 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); | ||
754 | enum tx_power_setting type; | ||
755 | int dbm = 0; | ||
756 | |||
757 | if ((data->txpower.flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM) | ||
758 | return -EINVAL; | ||
759 | if (data->txpower.flags & IW_TXPOW_RANGE) | ||
760 | return -EINVAL; | ||
761 | |||
762 | if (!rdev->ops->set_tx_power) | ||
763 | return -EOPNOTSUPP; | ||
764 | |||
765 | /* only change when not disabling */ | ||
766 | if (!data->txpower.disabled) { | ||
767 | rfkill_set_sw_state(rdev->rfkill, false); | ||
768 | |||
769 | if (data->txpower.fixed) { | ||
770 | /* | ||
771 | * wext doesn't support negative values, see | ||
772 | * below where it's for automatic | ||
773 | */ | ||
774 | if (data->txpower.value < 0) | ||
775 | return -EINVAL; | ||
776 | dbm = data->txpower.value; | ||
777 | type = TX_POWER_FIXED; | ||
778 | /* TODO: do regulatory check! */ | ||
779 | } else { | ||
780 | /* | ||
781 | * Automatic power level setting, max being the value | ||
782 | * passed in from userland. | ||
783 | */ | ||
784 | if (data->txpower.value < 0) { | ||
785 | type = TX_POWER_AUTOMATIC; | ||
786 | } else { | ||
787 | dbm = data->txpower.value; | ||
788 | type = TX_POWER_LIMITED; | ||
789 | } | ||
790 | } | ||
791 | } else { | ||
792 | rfkill_set_sw_state(rdev->rfkill, true); | ||
793 | schedule_work(&rdev->rfkill_sync); | ||
794 | return 0; | ||
795 | } | ||
796 | |||
797 | return rdev->ops->set_tx_power(wdev->wiphy, type, dbm);; | ||
798 | } | ||
799 | EXPORT_SYMBOL_GPL(cfg80211_wext_siwtxpower); | ||
800 | |||
801 | int cfg80211_wext_giwtxpower(struct net_device *dev, | ||
802 | struct iw_request_info *info, | ||
803 | union iwreq_data *data, char *extra) | ||
804 | { | ||
805 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
806 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); | ||
807 | int err, val; | ||
808 | |||
809 | if ((data->txpower.flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM) | ||
810 | return -EINVAL; | ||
811 | if (data->txpower.flags & IW_TXPOW_RANGE) | ||
812 | return -EINVAL; | ||
813 | |||
814 | if (!rdev->ops->get_tx_power) | ||
815 | return -EOPNOTSUPP; | ||
816 | |||
817 | err = rdev->ops->get_tx_power(wdev->wiphy, &val); | ||
818 | if (err) | ||
819 | return err; | ||
820 | |||
821 | /* well... oh well */ | ||
822 | data->txpower.fixed = 1; | ||
823 | data->txpower.disabled = rfkill_blocked(rdev->rfkill); | ||
824 | data->txpower.value = val; | ||
825 | data->txpower.flags = IW_TXPOW_DBM; | ||
826 | |||
827 | return 0; | ||
828 | } | ||
829 | EXPORT_SYMBOL_GPL(cfg80211_wext_giwtxpower); | ||
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c index 96036cf2216d..d31ccb487730 100644 --- a/net/xfrm/xfrm_algo.c +++ b/net/xfrm/xfrm_algo.c | |||
@@ -696,8 +696,9 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc, | |||
696 | { | 696 | { |
697 | int start = skb_headlen(skb); | 697 | int start = skb_headlen(skb); |
698 | int i, copy = start - offset; | 698 | int i, copy = start - offset; |
699 | int err; | 699 | struct sk_buff *frag_iter; |
700 | struct scatterlist sg; | 700 | struct scatterlist sg; |
701 | int err; | ||
701 | 702 | ||
702 | /* Checksum header. */ | 703 | /* Checksum header. */ |
703 | if (copy > 0) { | 704 | if (copy > 0) { |
@@ -742,28 +743,24 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc, | |||
742 | start = end; | 743 | start = end; |
743 | } | 744 | } |
744 | 745 | ||
745 | if (skb_shinfo(skb)->frag_list) { | 746 | skb_walk_frags(skb, frag_iter) { |
746 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | 747 | int end; |
747 | 748 | ||
748 | for (; list; list = list->next) { | 749 | WARN_ON(start > offset + len); |
749 | int end; | 750 | |
750 | 751 | end = start + frag_iter->len; | |
751 | WARN_ON(start > offset + len); | 752 | if ((copy = end - offset) > 0) { |
752 | 753 | if (copy > len) | |
753 | end = start + list->len; | 754 | copy = len; |
754 | if ((copy = end - offset) > 0) { | 755 | err = skb_icv_walk(frag_iter, desc, offset-start, |
755 | if (copy > len) | 756 | copy, icv_update); |
756 | copy = len; | 757 | if (unlikely(err)) |
757 | err = skb_icv_walk(list, desc, offset-start, | 758 | return err; |
758 | copy, icv_update); | 759 | if ((len -= copy) == 0) |
759 | if (unlikely(err)) | 760 | return 0; |
760 | return err; | 761 | offset += copy; |
761 | if ((len -= copy) == 0) | ||
762 | return 0; | ||
763 | offset += copy; | ||
764 | } | ||
765 | start = end; | ||
766 | } | 762 | } |
763 | start = end; | ||
767 | } | 764 | } |
768 | BUG_ON(len); | 765 | BUG_ON(len); |
769 | return 0; | 766 | return 0; |
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index b4a13178fb40..e0009c17d809 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c | |||
@@ -251,8 +251,7 @@ resume: | |||
251 | nf_reset(skb); | 251 | nf_reset(skb); |
252 | 252 | ||
253 | if (decaps) { | 253 | if (decaps) { |
254 | dst_release(skb->dst); | 254 | skb_dst_drop(skb); |
255 | skb->dst = NULL; | ||
256 | netif_rx(skb); | 255 | netif_rx(skb); |
257 | return 0; | 256 | return 0; |
258 | } else { | 257 | } else { |
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c index c235597ba8dd..b9fe13138c07 100644 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c | |||
@@ -22,7 +22,7 @@ static int xfrm_output2(struct sk_buff *skb); | |||
22 | 22 | ||
23 | static int xfrm_state_check_space(struct xfrm_state *x, struct sk_buff *skb) | 23 | static int xfrm_state_check_space(struct xfrm_state *x, struct sk_buff *skb) |
24 | { | 24 | { |
25 | struct dst_entry *dst = skb->dst; | 25 | struct dst_entry *dst = skb_dst(skb); |
26 | int nhead = dst->header_len + LL_RESERVED_SPACE(dst->dev) | 26 | int nhead = dst->header_len + LL_RESERVED_SPACE(dst->dev) |
27 | - skb_headroom(skb); | 27 | - skb_headroom(skb); |
28 | int ntail = dst->dev->needed_tailroom - skb_tailroom(skb); | 28 | int ntail = dst->dev->needed_tailroom - skb_tailroom(skb); |
@@ -39,7 +39,7 @@ static int xfrm_state_check_space(struct xfrm_state *x, struct sk_buff *skb) | |||
39 | 39 | ||
40 | static int xfrm_output_one(struct sk_buff *skb, int err) | 40 | static int xfrm_output_one(struct sk_buff *skb, int err) |
41 | { | 41 | { |
42 | struct dst_entry *dst = skb->dst; | 42 | struct dst_entry *dst = skb_dst(skb); |
43 | struct xfrm_state *x = dst->xfrm; | 43 | struct xfrm_state *x = dst->xfrm; |
44 | struct net *net = xs_net(x); | 44 | struct net *net = xs_net(x); |
45 | 45 | ||
@@ -94,12 +94,13 @@ resume: | |||
94 | goto error_nolock; | 94 | goto error_nolock; |
95 | } | 95 | } |
96 | 96 | ||
97 | if (!(skb->dst = dst_pop(dst))) { | 97 | dst = dst_pop(dst); |
98 | if (!dst) { | ||
98 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR); | 99 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR); |
99 | err = -EHOSTUNREACH; | 100 | err = -EHOSTUNREACH; |
100 | goto error_nolock; | 101 | goto error_nolock; |
101 | } | 102 | } |
102 | dst = skb->dst; | 103 | skb_dst_set(skb, dst); |
103 | x = dst->xfrm; | 104 | x = dst->xfrm; |
104 | } while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL)); | 105 | } while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL)); |
105 | 106 | ||
@@ -119,16 +120,16 @@ int xfrm_output_resume(struct sk_buff *skb, int err) | |||
119 | while (likely((err = xfrm_output_one(skb, err)) == 0)) { | 120 | while (likely((err = xfrm_output_one(skb, err)) == 0)) { |
120 | nf_reset(skb); | 121 | nf_reset(skb); |
121 | 122 | ||
122 | err = skb->dst->ops->local_out(skb); | 123 | err = skb_dst(skb)->ops->local_out(skb); |
123 | if (unlikely(err != 1)) | 124 | if (unlikely(err != 1)) |
124 | goto out; | 125 | goto out; |
125 | 126 | ||
126 | if (!skb->dst->xfrm) | 127 | if (!skb_dst(skb)->xfrm) |
127 | return dst_output(skb); | 128 | return dst_output(skb); |
128 | 129 | ||
129 | err = nf_hook(skb->dst->ops->family, | 130 | err = nf_hook(skb_dst(skb)->ops->family, |
130 | NF_INET_POST_ROUTING, skb, | 131 | NF_INET_POST_ROUTING, skb, |
131 | NULL, skb->dst->dev, xfrm_output2); | 132 | NULL, skb_dst(skb)->dev, xfrm_output2); |
132 | if (unlikely(err != 1)) | 133 | if (unlikely(err != 1)) |
133 | goto out; | 134 | goto out; |
134 | } | 135 | } |
@@ -179,7 +180,7 @@ static int xfrm_output_gso(struct sk_buff *skb) | |||
179 | 180 | ||
180 | int xfrm_output(struct sk_buff *skb) | 181 | int xfrm_output(struct sk_buff *skb) |
181 | { | 182 | { |
182 | struct net *net = dev_net(skb->dst->dev); | 183 | struct net *net = dev_net(skb_dst(skb)->dev); |
183 | int err; | 184 | int err; |
184 | 185 | ||
185 | if (skb_is_gso(skb)) | 186 | if (skb_is_gso(skb)) |
@@ -202,7 +203,7 @@ int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb) | |||
202 | struct xfrm_mode *inner_mode; | 203 | struct xfrm_mode *inner_mode; |
203 | if (x->sel.family == AF_UNSPEC) | 204 | if (x->sel.family == AF_UNSPEC) |
204 | inner_mode = xfrm_ip2inner_mode(x, | 205 | inner_mode = xfrm_ip2inner_mode(x, |
205 | xfrm_af2proto(skb->dst->ops->family)); | 206 | xfrm_af2proto(skb_dst(skb)->ops->family)); |
206 | else | 207 | else |
207 | inner_mode = x->inner_mode; | 208 | inner_mode = x->inner_mode; |
208 | 209 | ||
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 9c068ab3a834..cb81ca35b0d6 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -2027,6 +2027,8 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family) | |||
2027 | { | 2027 | { |
2028 | struct net *net = dev_net(skb->dev); | 2028 | struct net *net = dev_net(skb->dev); |
2029 | struct flowi fl; | 2029 | struct flowi fl; |
2030 | struct dst_entry *dst; | ||
2031 | int res; | ||
2030 | 2032 | ||
2031 | if (xfrm_decode_session(skb, &fl, family) < 0) { | 2033 | if (xfrm_decode_session(skb, &fl, family) < 0) { |
2032 | /* XXX: we should have something like FWDHDRERROR here. */ | 2034 | /* XXX: we should have something like FWDHDRERROR here. */ |
@@ -2034,7 +2036,11 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family) | |||
2034 | return 0; | 2036 | return 0; |
2035 | } | 2037 | } |
2036 | 2038 | ||
2037 | return xfrm_lookup(net, &skb->dst, &fl, NULL, 0) == 0; | 2039 | dst = skb_dst(skb); |
2040 | |||
2041 | res = xfrm_lookup(net, &dst, &fl, NULL, 0) == 0; | ||
2042 | skb_dst_set(skb, dst); | ||
2043 | return res; | ||
2038 | } | 2044 | } |
2039 | EXPORT_SYMBOL(__xfrm_route_forward); | 2045 | EXPORT_SYMBOL(__xfrm_route_forward); |
2040 | 2046 | ||