diff options
Diffstat (limited to 'net')
-rw-r--r-- | net/ax25/af_ax25.c | 4 | ||||
-rw-r--r-- | net/core/pktgen.c | 160 | ||||
-rw-r--r-- | net/ipv4/ip_gre.c | 13 | ||||
-rw-r--r-- | net/ipv4/ip_sockglue.c | 3 | ||||
-rw-r--r-- | net/ipv4/ipip.c | 8 | ||||
-rw-r--r-- | net/ipv6/ip6_tunnel.c | 7 | ||||
-rw-r--r-- | net/ipv6/sit.c | 8 | ||||
-rw-r--r-- | net/mac80211/scan.c | 4 | ||||
-rw-r--r-- | net/netlink/af_netlink.c | 19 | ||||
-rw-r--r-- | net/netlink/genetlink.c | 4 | ||||
-rw-r--r-- | net/phonet/af_phonet.c | 6 | ||||
-rw-r--r-- | net/phonet/socket.c | 16 | ||||
-rw-r--r-- | net/wireless/wext-sme.c | 2 |
13 files changed, 113 insertions, 141 deletions
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index da0f64f82b57..d6b1b054e294 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c | |||
@@ -1781,8 +1781,8 @@ static int ax25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) | |||
1781 | ax25_info.idletimer = ax25_display_timer(&ax25->idletimer) / (60 * HZ); | 1781 | ax25_info.idletimer = ax25_display_timer(&ax25->idletimer) / (60 * HZ); |
1782 | ax25_info.n2count = ax25->n2count; | 1782 | ax25_info.n2count = ax25->n2count; |
1783 | ax25_info.state = ax25->state; | 1783 | ax25_info.state = ax25->state; |
1784 | ax25_info.rcv_q = sk_wmem_alloc_get(sk); | 1784 | ax25_info.rcv_q = sk_rmem_alloc_get(sk); |
1785 | ax25_info.snd_q = sk_rmem_alloc_get(sk); | 1785 | ax25_info.snd_q = sk_wmem_alloc_get(sk); |
1786 | ax25_info.vs = ax25->vs; | 1786 | ax25_info.vs = ax25->vs; |
1787 | ax25_info.vr = ax25->vr; | 1787 | ax25_info.vr = ax25->vr; |
1788 | ax25_info.va = ax25->va; | 1788 | ax25_info.va = ax25->va; |
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 0bcecbf06581..4d11c28ca8ca 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -192,11 +192,10 @@ | |||
192 | #define F_QUEUE_MAP_CPU (1<<14) /* queue map mirrors smp_processor_id() */ | 192 | #define F_QUEUE_MAP_CPU (1<<14) /* queue map mirrors smp_processor_id() */ |
193 | 193 | ||
194 | /* Thread control flag bits */ | 194 | /* Thread control flag bits */ |
195 | #define T_TERMINATE (1<<0) | 195 | #define T_STOP (1<<0) /* Stop run */ |
196 | #define T_STOP (1<<1) /* Stop run */ | 196 | #define T_RUN (1<<1) /* Start run */ |
197 | #define T_RUN (1<<2) /* Start run */ | 197 | #define T_REMDEVALL (1<<2) /* Remove all devs */ |
198 | #define T_REMDEVALL (1<<3) /* Remove all devs */ | 198 | #define T_REMDEV (1<<3) /* Remove one dev */ |
199 | #define T_REMDEV (1<<4) /* Remove one dev */ | ||
200 | 199 | ||
201 | /* If lock -- can be removed after some work */ | 200 | /* If lock -- can be removed after some work */ |
202 | #define if_lock(t) spin_lock(&(t->if_lock)); | 201 | #define if_lock(t) spin_lock(&(t->if_lock)); |
@@ -2105,7 +2104,7 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) | |||
2105 | 2104 | ||
2106 | static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until) | 2105 | static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until) |
2107 | { | 2106 | { |
2108 | ktime_t start; | 2107 | ktime_t start_time, end_time; |
2109 | s32 remaining; | 2108 | s32 remaining; |
2110 | struct hrtimer_sleeper t; | 2109 | struct hrtimer_sleeper t; |
2111 | 2110 | ||
@@ -2116,7 +2115,7 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until) | |||
2116 | if (remaining <= 0) | 2115 | if (remaining <= 0) |
2117 | return; | 2116 | return; |
2118 | 2117 | ||
2119 | start = ktime_now(); | 2118 | start_time = ktime_now(); |
2120 | if (remaining < 100) | 2119 | if (remaining < 100) |
2121 | udelay(remaining); /* really small just spin */ | 2120 | udelay(remaining); /* really small just spin */ |
2122 | else { | 2121 | else { |
@@ -2135,7 +2134,10 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until) | |||
2135 | } while (t.task && pkt_dev->running && !signal_pending(current)); | 2134 | } while (t.task && pkt_dev->running && !signal_pending(current)); |
2136 | __set_current_state(TASK_RUNNING); | 2135 | __set_current_state(TASK_RUNNING); |
2137 | } | 2136 | } |
2138 | pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), start)); | 2137 | end_time = ktime_now(); |
2138 | |||
2139 | pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time)); | ||
2140 | pkt_dev->next_tx = ktime_add_ns(end_time, pkt_dev->delay); | ||
2139 | } | 2141 | } |
2140 | 2142 | ||
2141 | static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev) | 2143 | static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev) |
@@ -3365,19 +3367,29 @@ static void pktgen_rem_thread(struct pktgen_thread *t) | |||
3365 | mutex_unlock(&pktgen_thread_lock); | 3367 | mutex_unlock(&pktgen_thread_lock); |
3366 | } | 3368 | } |
3367 | 3369 | ||
3368 | static void idle(struct pktgen_dev *pkt_dev) | 3370 | static void pktgen_resched(struct pktgen_dev *pkt_dev) |
3369 | { | 3371 | { |
3370 | ktime_t idle_start = ktime_now(); | 3372 | ktime_t idle_start = ktime_now(); |
3373 | schedule(); | ||
3374 | pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), idle_start)); | ||
3375 | } | ||
3371 | 3376 | ||
3372 | if (need_resched()) | 3377 | static void pktgen_wait_for_skb(struct pktgen_dev *pkt_dev) |
3373 | schedule(); | 3378 | { |
3374 | else | 3379 | ktime_t idle_start = ktime_now(); |
3375 | cpu_relax(); | ||
3376 | 3380 | ||
3381 | while (atomic_read(&(pkt_dev->skb->users)) != 1) { | ||
3382 | if (signal_pending(current)) | ||
3383 | break; | ||
3384 | |||
3385 | if (need_resched()) | ||
3386 | pktgen_resched(pkt_dev); | ||
3387 | else | ||
3388 | cpu_relax(); | ||
3389 | } | ||
3377 | pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), idle_start)); | 3390 | pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), idle_start)); |
3378 | } | 3391 | } |
3379 | 3392 | ||
3380 | |||
3381 | static void pktgen_xmit(struct pktgen_dev *pkt_dev) | 3393 | static void pktgen_xmit(struct pktgen_dev *pkt_dev) |
3382 | { | 3394 | { |
3383 | struct net_device *odev = pkt_dev->odev; | 3395 | struct net_device *odev = pkt_dev->odev; |
@@ -3387,36 +3399,21 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) | |||
3387 | u16 queue_map; | 3399 | u16 queue_map; |
3388 | int ret; | 3400 | int ret; |
3389 | 3401 | ||
3390 | if (pkt_dev->delay) { | 3402 | /* If device is offline, then don't send */ |
3391 | spin(pkt_dev, pkt_dev->next_tx); | 3403 | if (unlikely(!netif_running(odev) || !netif_carrier_ok(odev))) { |
3392 | 3404 | pktgen_stop_device(pkt_dev); | |
3393 | /* This is max DELAY, this has special meaning of | 3405 | return; |
3394 | * "never transmit" | ||
3395 | */ | ||
3396 | if (pkt_dev->delay == ULLONG_MAX) { | ||
3397 | pkt_dev->next_tx = ktime_add_ns(ktime_now(), ULONG_MAX); | ||
3398 | return; | ||
3399 | } | ||
3400 | } | ||
3401 | |||
3402 | if (!pkt_dev->skb) { | ||
3403 | set_cur_queue_map(pkt_dev); | ||
3404 | queue_map = pkt_dev->cur_queue_map; | ||
3405 | } else { | ||
3406 | queue_map = skb_get_queue_mapping(pkt_dev->skb); | ||
3407 | } | 3406 | } |
3408 | 3407 | ||
3409 | txq = netdev_get_tx_queue(odev, queue_map); | 3408 | /* This is max DELAY, this has special meaning of |
3410 | /* Did we saturate the queue already? */ | 3409 | * "never transmit" |
3411 | if (netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq)) { | 3410 | */ |
3412 | /* If device is down, then all queues are permnantly frozen */ | 3411 | if (unlikely(pkt_dev->delay == ULLONG_MAX)) { |
3413 | if (netif_running(odev)) | 3412 | pkt_dev->next_tx = ktime_add_ns(ktime_now(), ULONG_MAX); |
3414 | idle(pkt_dev); | ||
3415 | else | ||
3416 | pktgen_stop_device(pkt_dev); | ||
3417 | return; | 3413 | return; |
3418 | } | 3414 | } |
3419 | 3415 | ||
3416 | /* If no skb or clone count exhausted then get new one */ | ||
3420 | if (!pkt_dev->skb || (pkt_dev->last_ok && | 3417 | if (!pkt_dev->skb || (pkt_dev->last_ok && |
3421 | ++pkt_dev->clone_count >= pkt_dev->clone_skb)) { | 3418 | ++pkt_dev->clone_count >= pkt_dev->clone_skb)) { |
3422 | /* build a new pkt */ | 3419 | /* build a new pkt */ |
@@ -3435,54 +3432,45 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) | |||
3435 | pkt_dev->clone_count = 0; /* reset counter */ | 3432 | pkt_dev->clone_count = 0; /* reset counter */ |
3436 | } | 3433 | } |
3437 | 3434 | ||
3438 | /* fill_packet() might have changed the queue */ | 3435 | if (pkt_dev->delay && pkt_dev->last_ok) |
3436 | spin(pkt_dev, pkt_dev->next_tx); | ||
3437 | |||
3439 | queue_map = skb_get_queue_mapping(pkt_dev->skb); | 3438 | queue_map = skb_get_queue_mapping(pkt_dev->skb); |
3440 | txq = netdev_get_tx_queue(odev, queue_map); | 3439 | txq = netdev_get_tx_queue(odev, queue_map); |
3441 | 3440 | ||
3442 | __netif_tx_lock_bh(txq); | 3441 | __netif_tx_lock_bh(txq); |
3443 | if (unlikely(netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq))) | 3442 | atomic_inc(&(pkt_dev->skb->users)); |
3444 | pkt_dev->last_ok = 0; | ||
3445 | else { | ||
3446 | atomic_inc(&(pkt_dev->skb->users)); | ||
3447 | 3443 | ||
3448 | retry_now: | 3444 | if (unlikely(netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq))) |
3445 | ret = NETDEV_TX_BUSY; | ||
3446 | else | ||
3449 | ret = (*xmit)(pkt_dev->skb, odev); | 3447 | ret = (*xmit)(pkt_dev->skb, odev); |
3450 | switch (ret) { | 3448 | |
3451 | case NETDEV_TX_OK: | 3449 | switch (ret) { |
3452 | txq_trans_update(txq); | 3450 | case NETDEV_TX_OK: |
3453 | pkt_dev->last_ok = 1; | 3451 | txq_trans_update(txq); |
3454 | pkt_dev->sofar++; | 3452 | pkt_dev->last_ok = 1; |
3455 | pkt_dev->seq_num++; | 3453 | pkt_dev->sofar++; |
3456 | pkt_dev->tx_bytes += pkt_dev->cur_pkt_size; | 3454 | pkt_dev->seq_num++; |
3457 | break; | 3455 | pkt_dev->tx_bytes += pkt_dev->cur_pkt_size; |
3458 | case NETDEV_TX_LOCKED: | 3456 | break; |
3459 | cpu_relax(); | 3457 | default: /* Drivers are not supposed to return other values! */ |
3460 | goto retry_now; | 3458 | if (net_ratelimit()) |
3461 | default: /* Drivers are not supposed to return other values! */ | 3459 | pr_info("pktgen: %s xmit error: %d\n", |
3462 | if (net_ratelimit()) | 3460 | odev->name, ret); |
3463 | pr_info("pktgen: %s xmit error: %d\n", | 3461 | pkt_dev->errors++; |
3464 | odev->name, ret); | 3462 | /* fallthru */ |
3465 | pkt_dev->errors++; | 3463 | case NETDEV_TX_LOCKED: |
3466 | /* fallthru */ | 3464 | case NETDEV_TX_BUSY: |
3467 | case NETDEV_TX_BUSY: | 3465 | /* Retry it next time */ |
3468 | /* Retry it next time */ | 3466 | atomic_dec(&(pkt_dev->skb->users)); |
3469 | atomic_dec(&(pkt_dev->skb->users)); | 3467 | pkt_dev->last_ok = 0; |
3470 | pkt_dev->last_ok = 0; | ||
3471 | } | ||
3472 | |||
3473 | if (pkt_dev->delay) | ||
3474 | pkt_dev->next_tx = ktime_add_ns(ktime_now(), | ||
3475 | pkt_dev->delay); | ||
3476 | } | 3468 | } |
3477 | __netif_tx_unlock_bh(txq); | 3469 | __netif_tx_unlock_bh(txq); |
3478 | 3470 | ||
3479 | /* If pkt_dev->count is zero, then run forever */ | 3471 | /* If pkt_dev->count is zero, then run forever */ |
3480 | if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) { | 3472 | if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) { |
3481 | while (atomic_read(&(pkt_dev->skb->users)) != 1) { | 3473 | pktgen_wait_for_skb(pkt_dev); |
3482 | if (signal_pending(current)) | ||
3483 | break; | ||
3484 | idle(pkt_dev); | ||
3485 | } | ||
3486 | 3474 | ||
3487 | /* Done with this */ | 3475 | /* Done with this */ |
3488 | pktgen_stop_device(pkt_dev); | 3476 | pktgen_stop_device(pkt_dev); |
@@ -3515,20 +3503,24 @@ static int pktgen_thread_worker(void *arg) | |||
3515 | while (!kthread_should_stop()) { | 3503 | while (!kthread_should_stop()) { |
3516 | pkt_dev = next_to_run(t); | 3504 | pkt_dev = next_to_run(t); |
3517 | 3505 | ||
3518 | if (!pkt_dev && | 3506 | if (unlikely(!pkt_dev && t->control == 0)) { |
3519 | (t->control & (T_STOP | T_RUN | T_REMDEVALL | T_REMDEV)) | 3507 | wait_event_interruptible_timeout(t->queue, |
3520 | == 0) { | 3508 | t->control != 0, |
3521 | prepare_to_wait(&(t->queue), &wait, | 3509 | HZ/10); |
3522 | TASK_INTERRUPTIBLE); | 3510 | continue; |
3523 | schedule_timeout(HZ / 10); | ||
3524 | finish_wait(&(t->queue), &wait); | ||
3525 | } | 3511 | } |
3526 | 3512 | ||
3527 | __set_current_state(TASK_RUNNING); | 3513 | __set_current_state(TASK_RUNNING); |
3528 | 3514 | ||
3529 | if (pkt_dev) | 3515 | if (likely(pkt_dev)) { |
3530 | pktgen_xmit(pkt_dev); | 3516 | pktgen_xmit(pkt_dev); |
3531 | 3517 | ||
3518 | if (need_resched()) | ||
3519 | pktgen_resched(pkt_dev); | ||
3520 | else | ||
3521 | cpu_relax(); | ||
3522 | } | ||
3523 | |||
3532 | if (t->control & T_STOP) { | 3524 | if (t->control & T_STOP) { |
3533 | pktgen_stop(t); | 3525 | pktgen_stop(t); |
3534 | t->control &= ~(T_STOP); | 3526 | t->control &= ~(T_STOP); |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index d9645c94a067..41ada9904d31 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -66,10 +66,7 @@ | |||
66 | solution, but it supposes maintaing new variable in ALL | 66 | solution, but it supposes maintaing new variable in ALL |
67 | skb, even if no tunneling is used. | 67 | skb, even if no tunneling is used. |
68 | 68 | ||
69 | Current solution: t->recursion lock breaks dead loops. It looks | 69 | Current solution: HARD_TX_LOCK lock breaks dead loops. |
70 | like dev->tbusy flag, but I preferred new variable, because | ||
71 | the semantics is different. One day, when hard_start_xmit | ||
72 | will be multithreaded we will have to use skb->encapsulation. | ||
73 | 70 | ||
74 | 71 | ||
75 | 72 | ||
@@ -678,11 +675,6 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev | |||
678 | __be32 dst; | 675 | __be32 dst; |
679 | int mtu; | 676 | int mtu; |
680 | 677 | ||
681 | if (tunnel->recursion++) { | ||
682 | stats->collisions++; | ||
683 | goto tx_error; | ||
684 | } | ||
685 | |||
686 | if (dev->type == ARPHRD_ETHER) | 678 | if (dev->type == ARPHRD_ETHER) |
687 | IPCB(skb)->flags = 0; | 679 | IPCB(skb)->flags = 0; |
688 | 680 | ||
@@ -820,7 +812,6 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev | |||
820 | ip_rt_put(rt); | 812 | ip_rt_put(rt); |
821 | stats->tx_dropped++; | 813 | stats->tx_dropped++; |
822 | dev_kfree_skb(skb); | 814 | dev_kfree_skb(skb); |
823 | tunnel->recursion--; | ||
824 | return NETDEV_TX_OK; | 815 | return NETDEV_TX_OK; |
825 | } | 816 | } |
826 | if (skb->sk) | 817 | if (skb->sk) |
@@ -888,7 +879,6 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev | |||
888 | nf_reset(skb); | 879 | nf_reset(skb); |
889 | 880 | ||
890 | IPTUNNEL_XMIT(); | 881 | IPTUNNEL_XMIT(); |
891 | tunnel->recursion--; | ||
892 | return NETDEV_TX_OK; | 882 | return NETDEV_TX_OK; |
893 | 883 | ||
894 | tx_error_icmp: | 884 | tx_error_icmp: |
@@ -897,7 +887,6 @@ tx_error_icmp: | |||
897 | tx_error: | 887 | tx_error: |
898 | stats->tx_errors++; | 888 | stats->tx_errors++; |
899 | dev_kfree_skb(skb); | 889 | dev_kfree_skb(skb); |
900 | tunnel->recursion--; | ||
901 | return NETDEV_TX_OK; | 890 | return NETDEV_TX_OK; |
902 | } | 891 | } |
903 | 892 | ||
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index fc7993e9061f..5a0693576e82 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c | |||
@@ -611,6 +611,9 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
611 | * Check the arguments are allowable | 611 | * Check the arguments are allowable |
612 | */ | 612 | */ |
613 | 613 | ||
614 | if (optlen < sizeof(struct in_addr)) | ||
615 | goto e_inval; | ||
616 | |||
614 | err = -EFAULT; | 617 | err = -EFAULT; |
615 | if (optlen >= sizeof(struct ip_mreqn)) { | 618 | if (optlen >= sizeof(struct ip_mreqn)) { |
616 | if (copy_from_user(&mreq, optval, sizeof(mreq))) | 619 | if (copy_from_user(&mreq, optval, sizeof(mreq))) |
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 62548cb0923c..08ccd344de7a 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c | |||
@@ -402,11 +402,6 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
402 | __be32 dst = tiph->daddr; | 402 | __be32 dst = tiph->daddr; |
403 | int mtu; | 403 | int mtu; |
404 | 404 | ||
405 | if (tunnel->recursion++) { | ||
406 | stats->collisions++; | ||
407 | goto tx_error; | ||
408 | } | ||
409 | |||
410 | if (skb->protocol != htons(ETH_P_IP)) | 405 | if (skb->protocol != htons(ETH_P_IP)) |
411 | goto tx_error; | 406 | goto tx_error; |
412 | 407 | ||
@@ -485,7 +480,6 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
485 | ip_rt_put(rt); | 480 | ip_rt_put(rt); |
486 | stats->tx_dropped++; | 481 | stats->tx_dropped++; |
487 | dev_kfree_skb(skb); | 482 | dev_kfree_skb(skb); |
488 | tunnel->recursion--; | ||
489 | return NETDEV_TX_OK; | 483 | return NETDEV_TX_OK; |
490 | } | 484 | } |
491 | if (skb->sk) | 485 | if (skb->sk) |
@@ -523,7 +517,6 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
523 | nf_reset(skb); | 517 | nf_reset(skb); |
524 | 518 | ||
525 | IPTUNNEL_XMIT(); | 519 | IPTUNNEL_XMIT(); |
526 | tunnel->recursion--; | ||
527 | return NETDEV_TX_OK; | 520 | return NETDEV_TX_OK; |
528 | 521 | ||
529 | tx_error_icmp: | 522 | tx_error_icmp: |
@@ -531,7 +524,6 @@ tx_error_icmp: | |||
531 | tx_error: | 524 | tx_error: |
532 | stats->tx_errors++; | 525 | stats->tx_errors++; |
533 | dev_kfree_skb(skb); | 526 | dev_kfree_skb(skb); |
534 | tunnel->recursion--; | ||
535 | return NETDEV_TX_OK; | 527 | return NETDEV_TX_OK; |
536 | } | 528 | } |
537 | 529 | ||
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 7d25bbe32110..c595bbe1ed99 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -1043,11 +1043,6 @@ ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1043 | struct net_device_stats *stats = &t->dev->stats; | 1043 | struct net_device_stats *stats = &t->dev->stats; |
1044 | int ret; | 1044 | int ret; |
1045 | 1045 | ||
1046 | if (t->recursion++) { | ||
1047 | stats->collisions++; | ||
1048 | goto tx_err; | ||
1049 | } | ||
1050 | |||
1051 | switch (skb->protocol) { | 1046 | switch (skb->protocol) { |
1052 | case htons(ETH_P_IP): | 1047 | case htons(ETH_P_IP): |
1053 | ret = ip4ip6_tnl_xmit(skb, dev); | 1048 | ret = ip4ip6_tnl_xmit(skb, dev); |
@@ -1062,14 +1057,12 @@ ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1062 | if (ret < 0) | 1057 | if (ret < 0) |
1063 | goto tx_err; | 1058 | goto tx_err; |
1064 | 1059 | ||
1065 | t->recursion--; | ||
1066 | return NETDEV_TX_OK; | 1060 | return NETDEV_TX_OK; |
1067 | 1061 | ||
1068 | tx_err: | 1062 | tx_err: |
1069 | stats->tx_errors++; | 1063 | stats->tx_errors++; |
1070 | stats->tx_dropped++; | 1064 | stats->tx_dropped++; |
1071 | kfree_skb(skb); | 1065 | kfree_skb(skb); |
1072 | t->recursion--; | ||
1073 | return NETDEV_TX_OK; | 1066 | return NETDEV_TX_OK; |
1074 | } | 1067 | } |
1075 | 1068 | ||
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 0ae4f6448187..fcb539628847 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
@@ -626,11 +626,6 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, | |||
626 | struct in6_addr *addr6; | 626 | struct in6_addr *addr6; |
627 | int addr_type; | 627 | int addr_type; |
628 | 628 | ||
629 | if (tunnel->recursion++) { | ||
630 | stats->collisions++; | ||
631 | goto tx_error; | ||
632 | } | ||
633 | |||
634 | if (skb->protocol != htons(ETH_P_IPV6)) | 629 | if (skb->protocol != htons(ETH_P_IPV6)) |
635 | goto tx_error; | 630 | goto tx_error; |
636 | 631 | ||
@@ -753,7 +748,6 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, | |||
753 | ip_rt_put(rt); | 748 | ip_rt_put(rt); |
754 | stats->tx_dropped++; | 749 | stats->tx_dropped++; |
755 | dev_kfree_skb(skb); | 750 | dev_kfree_skb(skb); |
756 | tunnel->recursion--; | ||
757 | return NETDEV_TX_OK; | 751 | return NETDEV_TX_OK; |
758 | } | 752 | } |
759 | if (skb->sk) | 753 | if (skb->sk) |
@@ -794,7 +788,6 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, | |||
794 | nf_reset(skb); | 788 | nf_reset(skb); |
795 | 789 | ||
796 | IPTUNNEL_XMIT(); | 790 | IPTUNNEL_XMIT(); |
797 | tunnel->recursion--; | ||
798 | return NETDEV_TX_OK; | 791 | return NETDEV_TX_OK; |
799 | 792 | ||
800 | tx_error_icmp: | 793 | tx_error_icmp: |
@@ -802,7 +795,6 @@ tx_error_icmp: | |||
802 | tx_error: | 795 | tx_error: |
803 | stats->tx_errors++; | 796 | stats->tx_errors++; |
804 | dev_kfree_skb(skb); | 797 | dev_kfree_skb(skb); |
805 | tunnel->recursion--; | ||
806 | return NETDEV_TX_OK; | 798 | return NETDEV_TX_OK; |
807 | } | 799 | } |
808 | 800 | ||
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 039901109fa1..71e10cabf811 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c | |||
@@ -90,8 +90,8 @@ ieee80211_bss_info_update(struct ieee80211_local *local, | |||
90 | bss->dtim_period = tim_ie->dtim_period; | 90 | bss->dtim_period = tim_ie->dtim_period; |
91 | } | 91 | } |
92 | 92 | ||
93 | /* set default value for buggy APs */ | 93 | /* set default value for buggy AP/no TIM element */ |
94 | if (!elems->tim || bss->dtim_period == 0) | 94 | if (bss->dtim_period == 0) |
95 | bss->dtim_period = 1; | 95 | bss->dtim_period = 1; |
96 | 96 | ||
97 | bss->supp_rates_len = 0; | 97 | bss->supp_rates_len = 0; |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 55180b99562a..a4bafbf15097 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -1609,6 +1609,16 @@ int netlink_change_ngroups(struct sock *sk, unsigned int groups) | |||
1609 | return err; | 1609 | return err; |
1610 | } | 1610 | } |
1611 | 1611 | ||
1612 | void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group) | ||
1613 | { | ||
1614 | struct sock *sk; | ||
1615 | struct hlist_node *node; | ||
1616 | struct netlink_table *tbl = &nl_table[ksk->sk_protocol]; | ||
1617 | |||
1618 | sk_for_each_bound(sk, node, &tbl->mc_list) | ||
1619 | netlink_update_socket_mc(nlk_sk(sk), group, 0); | ||
1620 | } | ||
1621 | |||
1612 | /** | 1622 | /** |
1613 | * netlink_clear_multicast_users - kick off multicast listeners | 1623 | * netlink_clear_multicast_users - kick off multicast listeners |
1614 | * | 1624 | * |
@@ -1619,15 +1629,8 @@ int netlink_change_ngroups(struct sock *sk, unsigned int groups) | |||
1619 | */ | 1629 | */ |
1620 | void netlink_clear_multicast_users(struct sock *ksk, unsigned int group) | 1630 | void netlink_clear_multicast_users(struct sock *ksk, unsigned int group) |
1621 | { | 1631 | { |
1622 | struct sock *sk; | ||
1623 | struct hlist_node *node; | ||
1624 | struct netlink_table *tbl = &nl_table[ksk->sk_protocol]; | ||
1625 | |||
1626 | netlink_table_grab(); | 1632 | netlink_table_grab(); |
1627 | 1633 | __netlink_clear_multicast_users(ksk, group); | |
1628 | sk_for_each_bound(sk, node, &tbl->mc_list) | ||
1629 | netlink_update_socket_mc(nlk_sk(sk), group, 0); | ||
1630 | |||
1631 | netlink_table_ungrab(); | 1634 | netlink_table_ungrab(); |
1632 | } | 1635 | } |
1633 | 1636 | ||
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index 566941e03363..44ff3f3810fa 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c | |||
@@ -220,10 +220,12 @@ static void __genl_unregister_mc_group(struct genl_family *family, | |||
220 | struct net *net; | 220 | struct net *net; |
221 | BUG_ON(grp->family != family); | 221 | BUG_ON(grp->family != family); |
222 | 222 | ||
223 | netlink_table_grab(); | ||
223 | rcu_read_lock(); | 224 | rcu_read_lock(); |
224 | for_each_net_rcu(net) | 225 | for_each_net_rcu(net) |
225 | netlink_clear_multicast_users(net->genl_sock, grp->id); | 226 | __netlink_clear_multicast_users(net->genl_sock, grp->id); |
226 | rcu_read_unlock(); | 227 | rcu_read_unlock(); |
228 | netlink_table_ungrab(); | ||
227 | 229 | ||
228 | clear_bit(grp->id, mc_groups); | 230 | clear_bit(grp->id, mc_groups); |
229 | list_del(&grp->list); | 231 | list_del(&grp->list); |
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c index a662e62a99cf..f60c0c2aacba 100644 --- a/net/phonet/af_phonet.c +++ b/net/phonet/af_phonet.c | |||
@@ -168,6 +168,12 @@ static int pn_send(struct sk_buff *skb, struct net_device *dev, | |||
168 | goto drop; | 168 | goto drop; |
169 | } | 169 | } |
170 | 170 | ||
171 | /* Broadcast sending is not implemented */ | ||
172 | if (pn_addr(dst) == PNADDR_BROADCAST) { | ||
173 | err = -EOPNOTSUPP; | ||
174 | goto drop; | ||
175 | } | ||
176 | |||
171 | skb_reset_transport_header(skb); | 177 | skb_reset_transport_header(skb); |
172 | WARN_ON(skb_headroom(skb) & 1); /* HW assumes word alignment */ | 178 | WARN_ON(skb_headroom(skb) & 1); /* HW assumes word alignment */ |
173 | skb_push(skb, sizeof(struct phonethdr)); | 179 | skb_push(skb, sizeof(struct phonethdr)); |
diff --git a/net/phonet/socket.c b/net/phonet/socket.c index 7a4ee397d2f7..07aa9f08d5fb 100644 --- a/net/phonet/socket.c +++ b/net/phonet/socket.c | |||
@@ -113,6 +113,8 @@ void pn_sock_unhash(struct sock *sk) | |||
113 | } | 113 | } |
114 | EXPORT_SYMBOL(pn_sock_unhash); | 114 | EXPORT_SYMBOL(pn_sock_unhash); |
115 | 115 | ||
116 | static DEFINE_MUTEX(port_mutex); | ||
117 | |||
116 | static int pn_socket_bind(struct socket *sock, struct sockaddr *addr, int len) | 118 | static int pn_socket_bind(struct socket *sock, struct sockaddr *addr, int len) |
117 | { | 119 | { |
118 | struct sock *sk = sock->sk; | 120 | struct sock *sk = sock->sk; |
@@ -140,9 +142,11 @@ static int pn_socket_bind(struct socket *sock, struct sockaddr *addr, int len) | |||
140 | err = -EINVAL; /* attempt to rebind */ | 142 | err = -EINVAL; /* attempt to rebind */ |
141 | goto out; | 143 | goto out; |
142 | } | 144 | } |
145 | WARN_ON(sk_hashed(sk)); | ||
146 | mutex_lock(&port_mutex); | ||
143 | err = sk->sk_prot->get_port(sk, pn_port(handle)); | 147 | err = sk->sk_prot->get_port(sk, pn_port(handle)); |
144 | if (err) | 148 | if (err) |
145 | goto out; | 149 | goto out_port; |
146 | 150 | ||
147 | /* get_port() sets the port, bind() sets the address if applicable */ | 151 | /* get_port() sets the port, bind() sets the address if applicable */ |
148 | pn->sobject = pn_object(saddr, pn_port(pn->sobject)); | 152 | pn->sobject = pn_object(saddr, pn_port(pn->sobject)); |
@@ -150,6 +154,8 @@ static int pn_socket_bind(struct socket *sock, struct sockaddr *addr, int len) | |||
150 | 154 | ||
151 | /* Enable RX on the socket */ | 155 | /* Enable RX on the socket */ |
152 | sk->sk_prot->hash(sk); | 156 | sk->sk_prot->hash(sk); |
157 | out_port: | ||
158 | mutex_unlock(&port_mutex); | ||
153 | out: | 159 | out: |
154 | release_sock(sk); | 160 | release_sock(sk); |
155 | return err; | 161 | return err; |
@@ -357,8 +363,6 @@ const struct proto_ops phonet_stream_ops = { | |||
357 | }; | 363 | }; |
358 | EXPORT_SYMBOL(phonet_stream_ops); | 364 | EXPORT_SYMBOL(phonet_stream_ops); |
359 | 365 | ||
360 | static DEFINE_MUTEX(port_mutex); | ||
361 | |||
362 | /* allocate port for a socket */ | 366 | /* allocate port for a socket */ |
363 | int pn_sock_get_port(struct sock *sk, unsigned short sport) | 367 | int pn_sock_get_port(struct sock *sk, unsigned short sport) |
364 | { | 368 | { |
@@ -370,9 +374,7 @@ int pn_sock_get_port(struct sock *sk, unsigned short sport) | |||
370 | 374 | ||
371 | memset(&try_sa, 0, sizeof(struct sockaddr_pn)); | 375 | memset(&try_sa, 0, sizeof(struct sockaddr_pn)); |
372 | try_sa.spn_family = AF_PHONET; | 376 | try_sa.spn_family = AF_PHONET; |
373 | 377 | WARN_ON(!mutex_is_locked(&port_mutex)); | |
374 | mutex_lock(&port_mutex); | ||
375 | |||
376 | if (!sport) { | 378 | if (!sport) { |
377 | /* search free port */ | 379 | /* search free port */ |
378 | int port, pmin, pmax; | 380 | int port, pmin, pmax; |
@@ -401,8 +403,6 @@ int pn_sock_get_port(struct sock *sk, unsigned short sport) | |||
401 | else | 403 | else |
402 | sock_put(tmpsk); | 404 | sock_put(tmpsk); |
403 | } | 405 | } |
404 | mutex_unlock(&port_mutex); | ||
405 | |||
406 | /* the port must be in use already */ | 406 | /* the port must be in use already */ |
407 | return -EADDRINUSE; | 407 | return -EADDRINUSE; |
408 | 408 | ||
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c index d16cd9ea4d00..bf725275eb8d 100644 --- a/net/wireless/wext-sme.c +++ b/net/wireless/wext-sme.c | |||
@@ -26,11 +26,11 @@ int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev, | |||
26 | 26 | ||
27 | wdev->wext.connect.ie = wdev->wext.ie; | 27 | wdev->wext.connect.ie = wdev->wext.ie; |
28 | wdev->wext.connect.ie_len = wdev->wext.ie_len; | 28 | wdev->wext.connect.ie_len = wdev->wext.ie_len; |
29 | wdev->wext.connect.privacy = wdev->wext.default_key != -1; | ||
30 | 29 | ||
31 | if (wdev->wext.keys) { | 30 | if (wdev->wext.keys) { |
32 | wdev->wext.keys->def = wdev->wext.default_key; | 31 | wdev->wext.keys->def = wdev->wext.default_key; |
33 | wdev->wext.keys->defmgmt = wdev->wext.default_mgmt_key; | 32 | wdev->wext.keys->defmgmt = wdev->wext.default_mgmt_key; |
33 | wdev->wext.connect.privacy = true; | ||
34 | } | 34 | } |
35 | 35 | ||
36 | if (!wdev->wext.connect.ssid_len) | 36 | if (!wdev->wext.connect.ssid_len) |