aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/bluetooth/hidp/core.c18
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/netevent.c1
-rw-r--r--net/core/netpoll.c26
-rw-r--r--net/core/skbuff.c8
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_module.c32
-rw-r--r--net/ipv4/ipvs/ip_vs_sync.c41
-rw-r--r--net/ipv4/tcp.c8
-rw-r--r--net/ipv4/tcp_bic.c2
-rw-r--r--net/ipv4/tcp_cubic.c2
-rw-r--r--net/ipv4/tcp_illinois.c3
-rw-r--r--net/ipv4/tcp_input.c18
-rw-r--r--net/ipv4/tcp_ipv4.c3
-rw-r--r--net/ipv4/tcp_lp.c3
-rw-r--r--net/ipv4/tcp_vegas.c3
-rw-r--r--net/ipv4/tcp_veno.c3
-rw-r--r--net/ipv6/addrconf.c36
-rw-r--r--net/ipv6/ndisc.c2
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/irda/irlap_event.c28
-rw-r--r--net/irda/irlap_frame.c3
-rw-r--r--net/mac80211/debugfs.c2
-rw-r--r--net/mac80211/ieee80211_sta.c12
-rw-r--r--net/netfilter/nf_conntrack_h323_asn1.c4
-rw-r--r--net/netfilter/nf_conntrack_netlink.c3
-rw-r--r--net/netfilter/nf_conntrack_sip.c3
-rw-r--r--net/rxrpc/ar-connection.c2
-rw-r--r--net/rxrpc/ar-output.c5
-rw-r--r--net/sctp/associola.c4
-rw-r--r--net/sctp/input.c24
-rw-r--r--net/sctp/ipv6.c4
-rw-r--r--net/sctp/socket.c163
-rw-r--r--net/sctp/transport.c39
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c2
-rw-r--r--net/sunrpc/svc.c2
-rw-r--r--net/tipc/netlink.c2
-rw-r--r--net/wireless/sysfs.c2
-rw-r--r--net/xfrm/xfrm_state.c2
38 files changed, 339 insertions, 180 deletions
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index ceadfcf457c1..450eb0244bbf 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -581,15 +581,6 @@ static int hidp_session(void *arg)
581 581
582 hidp_del_timer(session); 582 hidp_del_timer(session);
583 583
584 fput(session->intr_sock->file);
585
586 wait_event_timeout(*(ctrl_sk->sk_sleep),
587 (ctrl_sk->sk_state == BT_CLOSED), msecs_to_jiffies(500));
588
589 fput(session->ctrl_sock->file);
590
591 __hidp_unlink_session(session);
592
593 if (session->input) { 584 if (session->input) {
594 input_unregister_device(session->input); 585 input_unregister_device(session->input);
595 session->input = NULL; 586 session->input = NULL;
@@ -601,6 +592,15 @@ static int hidp_session(void *arg)
601 hid_free_device(session->hid); 592 hid_free_device(session->hid);
602 } 593 }
603 594
595 fput(session->intr_sock->file);
596
597 wait_event_timeout(*(ctrl_sk->sk_sleep),
598 (ctrl_sk->sk_state == BT_CLOSED), msecs_to_jiffies(500));
599
600 fput(session->ctrl_sock->file);
601
602 __hidp_unlink_session(session);
603
604 up_write(&hidp_session_sem); 604 up_write(&hidp_session_sem);
605 605
606 kfree(session); 606 kfree(session);
diff --git a/net/core/dev.c b/net/core/dev.c
index 26090621ea6b..ee051bb398a0 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2009,6 +2009,7 @@ static void net_rx_action(struct softirq_action *h)
2009 } 2009 }
2010 } 2010 }
2011out: 2011out:
2012 local_irq_enable();
2012#ifdef CONFIG_NET_DMA 2013#ifdef CONFIG_NET_DMA
2013 /* 2014 /*
2014 * There may not be any more sk_buffs coming right now, so push 2015 * There may not be any more sk_buffs coming right now, so push
@@ -2022,7 +2023,6 @@ out:
2022 rcu_read_unlock(); 2023 rcu_read_unlock();
2023 } 2024 }
2024#endif 2025#endif
2025 local_irq_enable();
2026 return; 2026 return;
2027 2027
2028softnet_break: 2028softnet_break:
diff --git a/net/core/netevent.c b/net/core/netevent.c
index 35d02c38554e..95f81de87502 100644
--- a/net/core/netevent.c
+++ b/net/core/netevent.c
@@ -15,6 +15,7 @@
15 15
16#include <linux/rtnetlink.h> 16#include <linux/rtnetlink.h>
17#include <linux/notifier.h> 17#include <linux/notifier.h>
18#include <net/netevent.h>
18 19
19static ATOMIC_NOTIFIER_HEAD(netevent_notif_chain); 20static ATOMIC_NOTIFIER_HEAD(netevent_notif_chain);
20 21
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 758dafe284c0..a0efdd7a6b37 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -250,22 +250,23 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
250 unsigned long flags; 250 unsigned long flags;
251 251
252 local_irq_save(flags); 252 local_irq_save(flags);
253 if (netif_tx_trylock(dev)) { 253 /* try until next clock tick */
254 /* try until next clock tick */ 254 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
255 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; 255 tries > 0; --tries) {
256 tries > 0; --tries) { 256 if (netif_tx_trylock(dev)) {
257 if (!netif_queue_stopped(dev)) 257 if (!netif_queue_stopped(dev))
258 status = dev->hard_start_xmit(skb, dev); 258 status = dev->hard_start_xmit(skb, dev);
259 netif_tx_unlock(dev);
259 260
260 if (status == NETDEV_TX_OK) 261 if (status == NETDEV_TX_OK)
261 break; 262 break;
262 263
263 /* tickle device maybe there is some cleanup */
264 netpoll_poll(np);
265
266 udelay(USEC_PER_POLL);
267 } 264 }
268 netif_tx_unlock(dev); 265
266 /* tickle device maybe there is some cleanup */
267 netpoll_poll(np);
268
269 udelay(USEC_PER_POLL);
269 } 270 }
270 local_irq_restore(flags); 271 local_irq_restore(flags);
271 } 272 }
@@ -785,8 +786,13 @@ void netpoll_cleanup(struct netpoll *np)
785 skb_queue_purge(&npinfo->arp_tx); 786 skb_queue_purge(&npinfo->arp_tx);
786 skb_queue_purge(&npinfo->txq); 787 skb_queue_purge(&npinfo->txq);
787 cancel_rearming_delayed_work(&npinfo->tx_work); 788 cancel_rearming_delayed_work(&npinfo->tx_work);
788 flush_scheduled_work();
789 789
790 /* clean after last, unfinished work */
791 if (!skb_queue_empty(&npinfo->txq)) {
792 struct sk_buff *skb;
793 skb = __skb_dequeue(&npinfo->txq);
794 kfree_skb(skb);
795 }
790 kfree(npinfo); 796 kfree(npinfo);
791 } 797 }
792 } 798 }
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 7c6a34e21eee..3943c3ad9145 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -434,8 +434,8 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
434 n->tc_verd = CLR_TC_MUNGED(n->tc_verd); 434 n->tc_verd = CLR_TC_MUNGED(n->tc_verd);
435 C(iif); 435 C(iif);
436#endif 436#endif
437 skb_copy_secmark(n, skb);
438#endif 437#endif
438 skb_copy_secmark(n, skb);
439 C(truesize); 439 C(truesize);
440 atomic_set(&n->users, 1); 440 atomic_set(&n->users, 1);
441 C(head); 441 C(head);
@@ -1706,6 +1706,11 @@ next_skb:
1706 st->stepped_offset += frag->size; 1706 st->stepped_offset += frag->size;
1707 } 1707 }
1708 1708
1709 if (st->frag_data) {
1710 kunmap_skb_frag(st->frag_data);
1711 st->frag_data = NULL;
1712 }
1713
1709 if (st->cur_skb->next) { 1714 if (st->cur_skb->next) {
1710 st->cur_skb = st->cur_skb->next; 1715 st->cur_skb = st->cur_skb->next;
1711 st->frag_idx = 0; 1716 st->frag_idx = 0;
@@ -2206,7 +2211,6 @@ EXPORT_SYMBOL(pskb_copy);
2206EXPORT_SYMBOL(pskb_expand_head); 2211EXPORT_SYMBOL(pskb_expand_head);
2207EXPORT_SYMBOL(skb_checksum); 2212EXPORT_SYMBOL(skb_checksum);
2208EXPORT_SYMBOL(skb_clone); 2213EXPORT_SYMBOL(skb_clone);
2209EXPORT_SYMBOL(skb_clone_fraglist);
2210EXPORT_SYMBOL(skb_copy); 2214EXPORT_SYMBOL(skb_copy);
2211EXPORT_SYMBOL(skb_copy_and_csum_bits); 2215EXPORT_SYMBOL(skb_copy_and_csum_bits);
2212EXPORT_SYMBOL(skb_copy_and_csum_dev); 2216EXPORT_SYMBOL(skb_copy_and_csum_dev);
diff --git a/net/ieee80211/softmac/ieee80211softmac_module.c b/net/ieee80211/softmac/ieee80211softmac_module.c
index c308756c2f9d..6398e6e67493 100644
--- a/net/ieee80211/softmac/ieee80211softmac_module.c
+++ b/net/ieee80211/softmac/ieee80211softmac_module.c
@@ -456,18 +456,13 @@ void
456ieee80211softmac_add_network_locked(struct ieee80211softmac_device *mac, 456ieee80211softmac_add_network_locked(struct ieee80211softmac_device *mac,
457 struct ieee80211softmac_network *add_net) 457 struct ieee80211softmac_network *add_net)
458{ 458{
459 struct list_head *list_ptr; 459 struct ieee80211softmac_network *softmac_net;
460 struct ieee80211softmac_network *softmac_net = NULL;
461 460
462 list_for_each(list_ptr, &mac->network_list) { 461 list_for_each_entry(softmac_net, &mac->network_list, list) {
463 softmac_net = list_entry(list_ptr, struct ieee80211softmac_network, list);
464 if(!memcmp(softmac_net->bssid, add_net->bssid, ETH_ALEN)) 462 if(!memcmp(softmac_net->bssid, add_net->bssid, ETH_ALEN))
465 break; 463 return;
466 else
467 softmac_net = NULL;
468 } 464 }
469 if(softmac_net == NULL) 465 list_add(&(add_net->list), &mac->network_list);
470 list_add(&(add_net->list), &mac->network_list);
471} 466}
472 467
473/* Add a network to the list, with locking */ 468/* Add a network to the list, with locking */
@@ -506,16 +501,13 @@ struct ieee80211softmac_network *
506ieee80211softmac_get_network_by_bssid_locked(struct ieee80211softmac_device *mac, 501ieee80211softmac_get_network_by_bssid_locked(struct ieee80211softmac_device *mac,
507 u8 *bssid) 502 u8 *bssid)
508{ 503{
509 struct list_head *list_ptr; 504 struct ieee80211softmac_network *softmac_net;
510 struct ieee80211softmac_network *softmac_net = NULL; 505
511 list_for_each(list_ptr, &mac->network_list) { 506 list_for_each_entry(softmac_net, &mac->network_list, list) {
512 softmac_net = list_entry(list_ptr, struct ieee80211softmac_network, list);
513 if(!memcmp(softmac_net->bssid, bssid, ETH_ALEN)) 507 if(!memcmp(softmac_net->bssid, bssid, ETH_ALEN))
514 break; 508 return softmac_net;
515 else
516 softmac_net = NULL;
517 } 509 }
518 return softmac_net; 510 return NULL;
519} 511}
520 512
521/* Get a network from the list by BSSID with locking */ 513/* Get a network from the list by BSSID with locking */
@@ -537,11 +529,9 @@ struct ieee80211softmac_network *
537ieee80211softmac_get_network_by_essid_locked(struct ieee80211softmac_device *mac, 529ieee80211softmac_get_network_by_essid_locked(struct ieee80211softmac_device *mac,
538 struct ieee80211softmac_essid *essid) 530 struct ieee80211softmac_essid *essid)
539{ 531{
540 struct list_head *list_ptr; 532 struct ieee80211softmac_network *softmac_net;
541 struct ieee80211softmac_network *softmac_net = NULL;
542 533
543 list_for_each(list_ptr, &mac->network_list) { 534 list_for_each_entry(softmac_net, &mac->network_list, list) {
544 softmac_net = list_entry(list_ptr, struct ieee80211softmac_network, list);
545 if (softmac_net->essid.len == essid->len && 535 if (softmac_net->essid.len == essid->len &&
546 !memcmp(softmac_net->essid.data, essid->data, essid->len)) 536 !memcmp(softmac_net->essid.data, essid->data, essid->len))
547 return softmac_net; 537 return softmac_net;
diff --git a/net/ipv4/ipvs/ip_vs_sync.c b/net/ipv4/ipvs/ip_vs_sync.c
index 7ea2d981a932..356f067484e3 100644
--- a/net/ipv4/ipvs/ip_vs_sync.c
+++ b/net/ipv4/ipvs/ip_vs_sync.c
@@ -67,6 +67,11 @@ struct ip_vs_sync_conn_options {
67 struct ip_vs_seq out_seq; /* outgoing seq. struct */ 67 struct ip_vs_seq out_seq; /* outgoing seq. struct */
68}; 68};
69 69
70struct ip_vs_sync_thread_data {
71 struct completion *startup;
72 int state;
73};
74
70#define IP_VS_SYNC_CONN_TIMEOUT (3*60*HZ) 75#define IP_VS_SYNC_CONN_TIMEOUT (3*60*HZ)
71#define SIMPLE_CONN_SIZE (sizeof(struct ip_vs_sync_conn)) 76#define SIMPLE_CONN_SIZE (sizeof(struct ip_vs_sync_conn))
72#define FULL_CONN_SIZE \ 77#define FULL_CONN_SIZE \
@@ -751,6 +756,7 @@ static int sync_thread(void *startup)
751 mm_segment_t oldmm; 756 mm_segment_t oldmm;
752 int state; 757 int state;
753 const char *name; 758 const char *name;
759 struct ip_vs_sync_thread_data *tinfo = startup;
754 760
755 /* increase the module use count */ 761 /* increase the module use count */
756 ip_vs_use_count_inc(); 762 ip_vs_use_count_inc();
@@ -789,7 +795,14 @@ static int sync_thread(void *startup)
789 add_wait_queue(&sync_wait, &wait); 795 add_wait_queue(&sync_wait, &wait);
790 796
791 set_sync_pid(state, current->pid); 797 set_sync_pid(state, current->pid);
792 complete((struct completion *)startup); 798 complete(tinfo->startup);
799
800 /*
801 * once we call the completion queue above, we should
802 * null out that reference, since its allocated on the
803 * stack of the creating kernel thread
804 */
805 tinfo->startup = NULL;
793 806
794 /* processing master/backup loop here */ 807 /* processing master/backup loop here */
795 if (state == IP_VS_STATE_MASTER) 808 if (state == IP_VS_STATE_MASTER)
@@ -801,6 +814,14 @@ static int sync_thread(void *startup)
801 remove_wait_queue(&sync_wait, &wait); 814 remove_wait_queue(&sync_wait, &wait);
802 815
803 /* thread exits */ 816 /* thread exits */
817
818 /*
819 * If we weren't explicitly stopped, then we
820 * exited in error, and should undo our state
821 */
822 if ((!stop_master_sync) && (!stop_backup_sync))
823 ip_vs_sync_state -= tinfo->state;
824
804 set_sync_pid(state, 0); 825 set_sync_pid(state, 0);
805 IP_VS_INFO("sync thread stopped!\n"); 826 IP_VS_INFO("sync thread stopped!\n");
806 827
@@ -812,6 +833,11 @@ static int sync_thread(void *startup)
812 set_stop_sync(state, 0); 833 set_stop_sync(state, 0);
813 wake_up(&stop_sync_wait); 834 wake_up(&stop_sync_wait);
814 835
836 /*
837 * we need to free the structure that was allocated
838 * for us in start_sync_thread
839 */
840 kfree(tinfo);
815 return 0; 841 return 0;
816} 842}
817 843
@@ -838,11 +864,19 @@ int start_sync_thread(int state, char *mcast_ifn, __u8 syncid)
838{ 864{
839 DECLARE_COMPLETION_ONSTACK(startup); 865 DECLARE_COMPLETION_ONSTACK(startup);
840 pid_t pid; 866 pid_t pid;
867 struct ip_vs_sync_thread_data *tinfo;
841 868
842 if ((state == IP_VS_STATE_MASTER && sync_master_pid) || 869 if ((state == IP_VS_STATE_MASTER && sync_master_pid) ||
843 (state == IP_VS_STATE_BACKUP && sync_backup_pid)) 870 (state == IP_VS_STATE_BACKUP && sync_backup_pid))
844 return -EEXIST; 871 return -EEXIST;
845 872
873 /*
874 * Note that tinfo will be freed in sync_thread on exit
875 */
876 tinfo = kmalloc(sizeof(struct ip_vs_sync_thread_data), GFP_KERNEL);
877 if (!tinfo)
878 return -ENOMEM;
879
846 IP_VS_DBG(7, "%s: pid %d\n", __FUNCTION__, current->pid); 880 IP_VS_DBG(7, "%s: pid %d\n", __FUNCTION__, current->pid);
847 IP_VS_DBG(7, "Each ip_vs_sync_conn entry need %Zd bytes\n", 881 IP_VS_DBG(7, "Each ip_vs_sync_conn entry need %Zd bytes\n",
848 sizeof(struct ip_vs_sync_conn)); 882 sizeof(struct ip_vs_sync_conn));
@@ -858,8 +892,11 @@ int start_sync_thread(int state, char *mcast_ifn, __u8 syncid)
858 ip_vs_backup_syncid = syncid; 892 ip_vs_backup_syncid = syncid;
859 } 893 }
860 894
895 tinfo->state = state;
896 tinfo->startup = &startup;
897
861 repeat: 898 repeat:
862 if ((pid = kernel_thread(fork_sync_thread, &startup, 0)) < 0) { 899 if ((pid = kernel_thread(fork_sync_thread, tinfo, 0)) < 0) {
863 IP_VS_ERR("could not create fork_sync_thread due to %d... " 900 IP_VS_ERR("could not create fork_sync_thread due to %d... "
864 "retrying.\n", pid); 901 "retrying.\n", pid);
865 msleep_interruptible(1000); 902 msleep_interruptible(1000);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index cd3c7e95de9e..450f44bb2c8e 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1064,7 +1064,11 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1064 break; 1064 break;
1065 } 1065 }
1066 used = recv_actor(desc, skb, offset, len); 1066 used = recv_actor(desc, skb, offset, len);
1067 if (used <= len) { 1067 if (used < 0) {
1068 if (!copied)
1069 copied = used;
1070 break;
1071 } else if (used <= len) {
1068 seq += used; 1072 seq += used;
1069 copied += used; 1073 copied += used;
1070 offset += used; 1074 offset += used;
@@ -1086,7 +1090,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1086 tcp_rcv_space_adjust(sk); 1090 tcp_rcv_space_adjust(sk);
1087 1091
1088 /* Clean up data we have read: This will do ACK frames. */ 1092 /* Clean up data we have read: This will do ACK frames. */
1089 if (copied) 1093 if (copied > 0)
1090 tcp_cleanup_rbuf(sk, copied); 1094 tcp_cleanup_rbuf(sk, copied);
1091 return copied; 1095 return copied;
1092} 1096}
diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c
index 281c9f913257..dd9ef65ad3ff 100644
--- a/net/ipv4/tcp_bic.c
+++ b/net/ipv4/tcp_bic.c
@@ -29,7 +29,7 @@ static int fast_convergence = 1;
29static int max_increment = 16; 29static int max_increment = 16;
30static int low_window = 14; 30static int low_window = 14;
31static int beta = 819; /* = 819/1024 (BICTCP_BETA_SCALE) */ 31static int beta = 819; /* = 819/1024 (BICTCP_BETA_SCALE) */
32static int initial_ssthresh = 100; 32static int initial_ssthresh;
33static int smooth_part = 20; 33static int smooth_part = 20;
34 34
35module_param(fast_convergence, int, 0644); 35module_param(fast_convergence, int, 0644);
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index 14224487b16b..ebfaac2f9f46 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -29,7 +29,7 @@
29static int fast_convergence __read_mostly = 1; 29static int fast_convergence __read_mostly = 1;
30static int max_increment __read_mostly = 16; 30static int max_increment __read_mostly = 16;
31static int beta __read_mostly = 819; /* = 819/1024 (BICTCP_BETA_SCALE) */ 31static int beta __read_mostly = 819; /* = 819/1024 (BICTCP_BETA_SCALE) */
32static int initial_ssthresh __read_mostly = 100; 32static int initial_ssthresh __read_mostly;
33static int bic_scale __read_mostly = 41; 33static int bic_scale __read_mostly = 41;
34static int tcp_friendliness __read_mostly = 1; 34static int tcp_friendliness __read_mostly = 1;
35 35
diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
index 4adc47c55351..b2b2256d3b84 100644
--- a/net/ipv4/tcp_illinois.c
+++ b/net/ipv4/tcp_illinois.c
@@ -90,6 +90,9 @@ static void tcp_illinois_acked(struct sock *sk, u32 pkts_acked, ktime_t last)
90 90
91 ca->acked = pkts_acked; 91 ca->acked = pkts_acked;
92 92
93 if (ktime_equal(last, net_invalid_timestamp()))
94 return;
95
93 rtt = ktime_to_us(net_timedelta(last)); 96 rtt = ktime_to_us(net_timedelta(last));
94 97
95 /* ignore bogus values, this prevents wraparound in alpha math */ 98 /* ignore bogus values, this prevents wraparound in alpha math */
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 74683d81c3f1..69f9f1ef3ef6 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -953,7 +953,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
953 int prior_fackets; 953 int prior_fackets;
954 u32 lost_retrans = 0; 954 u32 lost_retrans = 0;
955 int flag = 0; 955 int flag = 0;
956 int dup_sack = 0; 956 int found_dup_sack = 0;
957 int cached_fack_count; 957 int cached_fack_count;
958 int i; 958 int i;
959 int first_sack_index; 959 int first_sack_index;
@@ -964,20 +964,20 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
964 964
965 /* Check for D-SACK. */ 965 /* Check for D-SACK. */
966 if (before(ntohl(sp[0].start_seq), TCP_SKB_CB(ack_skb)->ack_seq)) { 966 if (before(ntohl(sp[0].start_seq), TCP_SKB_CB(ack_skb)->ack_seq)) {
967 dup_sack = 1; 967 found_dup_sack = 1;
968 tp->rx_opt.sack_ok |= 4; 968 tp->rx_opt.sack_ok |= 4;
969 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV); 969 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV);
970 } else if (num_sacks > 1 && 970 } else if (num_sacks > 1 &&
971 !after(ntohl(sp[0].end_seq), ntohl(sp[1].end_seq)) && 971 !after(ntohl(sp[0].end_seq), ntohl(sp[1].end_seq)) &&
972 !before(ntohl(sp[0].start_seq), ntohl(sp[1].start_seq))) { 972 !before(ntohl(sp[0].start_seq), ntohl(sp[1].start_seq))) {
973 dup_sack = 1; 973 found_dup_sack = 1;
974 tp->rx_opt.sack_ok |= 4; 974 tp->rx_opt.sack_ok |= 4;
975 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV); 975 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV);
976 } 976 }
977 977
978 /* D-SACK for already forgotten data... 978 /* D-SACK for already forgotten data...
979 * Do dumb counting. */ 979 * Do dumb counting. */
980 if (dup_sack && 980 if (found_dup_sack &&
981 !after(ntohl(sp[0].end_seq), prior_snd_una) && 981 !after(ntohl(sp[0].end_seq), prior_snd_una) &&
982 after(ntohl(sp[0].end_seq), tp->undo_marker)) 982 after(ntohl(sp[0].end_seq), tp->undo_marker))
983 tp->undo_retrans--; 983 tp->undo_retrans--;
@@ -1058,6 +1058,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1058 __u32 start_seq = ntohl(sp->start_seq); 1058 __u32 start_seq = ntohl(sp->start_seq);
1059 __u32 end_seq = ntohl(sp->end_seq); 1059 __u32 end_seq = ntohl(sp->end_seq);
1060 int fack_count; 1060 int fack_count;
1061 int dup_sack = (found_dup_sack && (i == first_sack_index));
1061 1062
1062 skb = cached_skb; 1063 skb = cached_skb;
1063 fack_count = cached_fack_count; 1064 fack_count = cached_fack_count;
@@ -2037,7 +2038,7 @@ static void tcp_try_to_open(struct sock *sk, int flag)
2037{ 2038{
2038 struct tcp_sock *tp = tcp_sk(sk); 2039 struct tcp_sock *tp = tcp_sk(sk);
2039 2040
2040 tp->left_out = tp->sacked_out; 2041 tcp_sync_left_out(tp);
2041 2042
2042 if (tp->retrans_out == 0) 2043 if (tp->retrans_out == 0)
2043 tp->retrans_stamp = 0; 2044 tp->retrans_stamp = 0;
@@ -2409,7 +2410,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
2409 int acked = 0; 2410 int acked = 0;
2410 int prior_packets = tp->packets_out; 2411 int prior_packets = tp->packets_out;
2411 __s32 seq_rtt = -1; 2412 __s32 seq_rtt = -1;
2412 ktime_t last_ackt = ktime_set(0,0); 2413 ktime_t last_ackt = net_invalid_timestamp();
2413 2414
2414 while ((skb = tcp_write_queue_head(sk)) && 2415 while ((skb = tcp_write_queue_head(sk)) &&
2415 skb != tcp_send_head(sk)) { 2416 skb != tcp_send_head(sk)) {
@@ -2487,6 +2488,10 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
2487 tcp_ack_update_rtt(sk, acked, seq_rtt); 2488 tcp_ack_update_rtt(sk, acked, seq_rtt);
2488 tcp_ack_packets_out(sk); 2489 tcp_ack_packets_out(sk);
2489 2490
2491 /* Is the ACK triggering packet unambiguous? */
2492 if (acked & FLAG_RETRANS_DATA_ACKED)
2493 last_ackt = net_invalid_timestamp();
2494
2490 if (ca_ops->pkts_acked) 2495 if (ca_ops->pkts_acked)
2491 ca_ops->pkts_acked(sk, pkts_acked, last_ackt); 2496 ca_ops->pkts_acked(sk, pkts_acked, last_ackt);
2492 } 2497 }
@@ -2932,6 +2937,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
2932 opt_rx->sack_ok) { 2937 opt_rx->sack_ok) {
2933 TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th; 2938 TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th;
2934 } 2939 }
2940 break;
2935#ifdef CONFIG_TCP_MD5SIG 2941#ifdef CONFIG_TCP_MD5SIG
2936 case TCPOPT_MD5SIG: 2942 case TCPOPT_MD5SIG:
2937 /* 2943 /*
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 97e294e82679..354721d67f69 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -878,6 +878,7 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
878 kfree(newkey); 878 kfree(newkey);
879 return -ENOMEM; 879 return -ENOMEM;
880 } 880 }
881 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
881 } 882 }
882 if (tcp_alloc_md5sig_pool() == NULL) { 883 if (tcp_alloc_md5sig_pool() == NULL) {
883 kfree(newkey); 884 kfree(newkey);
@@ -1007,7 +1008,7 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1007 return -EINVAL; 1008 return -EINVAL;
1008 1009
1009 tp->md5sig_info = p; 1010 tp->md5sig_info = p;
1010 1011 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
1011 } 1012 }
1012 1013
1013 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL); 1014 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c
index 43294ad9f63e..e49836ce012e 100644
--- a/net/ipv4/tcp_lp.c
+++ b/net/ipv4/tcp_lp.c
@@ -266,7 +266,8 @@ static void tcp_lp_pkts_acked(struct sock *sk, u32 num_acked, ktime_t last)
266 struct tcp_sock *tp = tcp_sk(sk); 266 struct tcp_sock *tp = tcp_sk(sk);
267 struct lp *lp = inet_csk_ca(sk); 267 struct lp *lp = inet_csk_ca(sk);
268 268
269 tcp_lp_rtt_sample(sk, ktime_to_us(net_timedelta(last))); 269 if (!ktime_equal(last, net_invalid_timestamp()))
270 tcp_lp_rtt_sample(sk, ktime_to_us(net_timedelta(last)));
270 271
271 /* calc inference */ 272 /* calc inference */
272 if (tcp_time_stamp > tp->rx_opt.rcv_tsecr) 273 if (tcp_time_stamp > tp->rx_opt.rcv_tsecr)
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
index 73e19cf7df21..e218a51ceced 100644
--- a/net/ipv4/tcp_vegas.c
+++ b/net/ipv4/tcp_vegas.c
@@ -117,6 +117,9 @@ void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, ktime_t last)
117 struct vegas *vegas = inet_csk_ca(sk); 117 struct vegas *vegas = inet_csk_ca(sk);
118 u32 vrtt; 118 u32 vrtt;
119 119
120 if (ktime_equal(last, net_invalid_timestamp()))
121 return;
122
120 /* Never allow zero rtt or baseRTT */ 123 /* Never allow zero rtt or baseRTT */
121 vrtt = ktime_to_us(net_timedelta(last)) + 1; 124 vrtt = ktime_to_us(net_timedelta(last)) + 1;
122 125
diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c
index 9edb340f2f95..ec854cc5fad5 100644
--- a/net/ipv4/tcp_veno.c
+++ b/net/ipv4/tcp_veno.c
@@ -74,6 +74,9 @@ static void tcp_veno_pkts_acked(struct sock *sk, u32 cnt, ktime_t last)
74 struct veno *veno = inet_csk_ca(sk); 74 struct veno *veno = inet_csk_ca(sk);
75 u32 vrtt; 75 u32 vrtt;
76 76
77 if (ktime_equal(last, net_invalid_timestamp()))
78 return;
79
77 /* Never allow zero rtt or baseRTT */ 80 /* Never allow zero rtt or baseRTT */
78 vrtt = ktime_to_us(net_timedelta(last)) + 1; 81 vrtt = ktime_to_us(net_timedelta(last)) + 1;
79 82
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 5a5f8bd4597a..79b79f3de24c 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -2154,6 +2154,15 @@ static void addrconf_dev_config(struct net_device *dev)
2154 2154
2155 ASSERT_RTNL(); 2155 ASSERT_RTNL();
2156 2156
2157 if ((dev->type != ARPHRD_ETHER) &&
2158 (dev->type != ARPHRD_FDDI) &&
2159 (dev->type != ARPHRD_IEEE802_TR) &&
2160 (dev->type != ARPHRD_ARCNET) &&
2161 (dev->type != ARPHRD_INFINIBAND)) {
2162 /* Alas, we support only Ethernet autoconfiguration. */
2163 return;
2164 }
2165
2157 idev = addrconf_add_dev(dev); 2166 idev = addrconf_add_dev(dev);
2158 if (idev == NULL) 2167 if (idev == NULL)
2159 return; 2168 return;
@@ -2241,36 +2250,16 @@ static void addrconf_ip6_tnl_config(struct net_device *dev)
2241 ip6_tnl_add_linklocal(idev); 2250 ip6_tnl_add_linklocal(idev);
2242} 2251}
2243 2252
2244static int ipv6_hwtype(struct net_device *dev)
2245{
2246 if ((dev->type == ARPHRD_ETHER) ||
2247 (dev->type == ARPHRD_LOOPBACK) ||
2248 (dev->type == ARPHRD_SIT) ||
2249 (dev->type == ARPHRD_TUNNEL6) ||
2250 (dev->type == ARPHRD_FDDI) ||
2251 (dev->type == ARPHRD_IEEE802_TR) ||
2252 (dev->type == ARPHRD_ARCNET) ||
2253 (dev->type == ARPHRD_INFINIBAND))
2254 return 1;
2255
2256 return 0;
2257}
2258
2259static int addrconf_notify(struct notifier_block *this, unsigned long event, 2253static int addrconf_notify(struct notifier_block *this, unsigned long event,
2260 void * data) 2254 void * data)
2261{ 2255{
2262 struct net_device *dev = (struct net_device *) data; 2256 struct net_device *dev = (struct net_device *) data;
2263 struct inet6_dev *idev; 2257 struct inet6_dev *idev = __in6_dev_get(dev);
2264 int run_pending = 0; 2258 int run_pending = 0;
2265 2259
2266 if (!ipv6_hwtype(dev))
2267 return NOTIFY_OK;
2268
2269 idev = __in6_dev_get(dev);
2270
2271 switch(event) { 2260 switch(event) {
2272 case NETDEV_REGISTER: 2261 case NETDEV_REGISTER:
2273 if (!idev) { 2262 if (!idev && dev->mtu >= IPV6_MIN_MTU) {
2274 idev = ipv6_add_dev(dev); 2263 idev = ipv6_add_dev(dev);
2275 if (!idev) 2264 if (!idev)
2276 printk(KERN_WARNING "IPv6: add_dev failed for %s\n", 2265 printk(KERN_WARNING "IPv6: add_dev failed for %s\n",
@@ -2279,6 +2268,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
2279 break; 2268 break;
2280 case NETDEV_UP: 2269 case NETDEV_UP:
2281 case NETDEV_CHANGE: 2270 case NETDEV_CHANGE:
2271 if (dev->flags & IFF_SLAVE)
2272 break;
2273
2282 if (event == NETDEV_UP) { 2274 if (event == NETDEV_UP) {
2283 if (!netif_carrier_ok(dev)) { 2275 if (!netif_carrier_ok(dev)) {
2284 /* device is not ready yet. */ 2276 /* device is not ready yet. */
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index d8b36451bada..0358e6066a4e 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1062,7 +1062,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
1062 pref = ra_msg->icmph.icmp6_router_pref; 1062 pref = ra_msg->icmph.icmp6_router_pref;
1063 /* 10b is handled as if it were 00b (medium) */ 1063 /* 10b is handled as if it were 00b (medium) */
1064 if (pref == ICMPV6_ROUTER_PREF_INVALID || 1064 if (pref == ICMPV6_ROUTER_PREF_INVALID ||
1065 in6_dev->cnf.accept_ra_rtr_pref) 1065 !in6_dev->cnf.accept_ra_rtr_pref)
1066 pref = ICMPV6_ROUTER_PREF_MEDIUM; 1066 pref = ICMPV6_ROUTER_PREF_MEDIUM;
1067#endif 1067#endif
1068 1068
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 4f06a51ad4fd..193d9d60bb7a 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -590,6 +590,7 @@ static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
590 kfree(newkey); 590 kfree(newkey);
591 return -ENOMEM; 591 return -ENOMEM;
592 } 592 }
593 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
593 } 594 }
594 tcp_alloc_md5sig_pool(); 595 tcp_alloc_md5sig_pool();
595 if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) { 596 if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
@@ -724,6 +725,7 @@ static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
724 return -ENOMEM; 725 return -ENOMEM;
725 726
726 tp->md5sig_info = p; 727 tp->md5sig_info = p;
728 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
727 } 729 }
728 730
729 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL); 731 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
diff --git a/net/irda/irlap_event.c b/net/irda/irlap_event.c
index 0b02073ffdf3..a8b8873aa263 100644
--- a/net/irda/irlap_event.c
+++ b/net/irda/irlap_event.c
@@ -317,23 +317,6 @@ void irlap_do_event(struct irlap_cb *self, IRLAP_EVENT event,
317} 317}
318 318
319/* 319/*
320 * Function irlap_next_state (self, state)
321 *
322 * Switches state and provides debug information
323 *
324 */
325static inline void irlap_next_state(struct irlap_cb *self, IRLAP_STATE state)
326{
327 /*
328 if (!self || self->magic != LAP_MAGIC)
329 return;
330
331 IRDA_DEBUG(4, "next LAP state = %s\n", irlap_state[state]);
332 */
333 self->state = state;
334}
335
336/*
337 * Function irlap_state_ndm (event, skb, frame) 320 * Function irlap_state_ndm (event, skb, frame)
338 * 321 *
339 * NDM (Normal Disconnected Mode) state 322 * NDM (Normal Disconnected Mode) state
@@ -1086,7 +1069,6 @@ static int irlap_state_xmit_p(struct irlap_cb *self, IRLAP_EVENT event,
1086 } else { 1069 } else {
1087 /* Final packet of window */ 1070 /* Final packet of window */
1088 irlap_send_data_primary_poll(self, skb); 1071 irlap_send_data_primary_poll(self, skb);
1089 irlap_next_state(self, LAP_NRM_P);
1090 1072
1091 /* 1073 /*
1092 * Make sure state machine does not try to send 1074 * Make sure state machine does not try to send
@@ -1436,14 +1418,14 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
1436 */ 1418 */
1437 self->remote_busy = FALSE; 1419 self->remote_busy = FALSE;
1438 1420
1421 /* Stop final timer */
1422 del_timer(&self->final_timer);
1423
1439 /* 1424 /*
1440 * Nr as expected? 1425 * Nr as expected?
1441 */ 1426 */
1442 ret = irlap_validate_nr_received(self, info->nr); 1427 ret = irlap_validate_nr_received(self, info->nr);
1443 if (ret == NR_EXPECTED) { 1428 if (ret == NR_EXPECTED) {
1444 /* Stop final timer */
1445 del_timer(&self->final_timer);
1446
1447 /* Update Nr received */ 1429 /* Update Nr received */
1448 irlap_update_nr_received(self, info->nr); 1430 irlap_update_nr_received(self, info->nr);
1449 1431
@@ -1475,14 +1457,12 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
1475 1457
1476 /* Resend rejected frames */ 1458 /* Resend rejected frames */
1477 irlap_resend_rejected_frames(self, CMD_FRAME); 1459 irlap_resend_rejected_frames(self, CMD_FRAME);
1478 1460 irlap_start_final_timer(self, self->final_timeout * 2);
1479 /* Final timer ??? Jean II */
1480 1461
1481 irlap_next_state(self, LAP_NRM_P); 1462 irlap_next_state(self, LAP_NRM_P);
1482 } else if (ret == NR_INVALID) { 1463 } else if (ret == NR_INVALID) {
1483 IRDA_DEBUG(1, "%s(), Received RR with " 1464 IRDA_DEBUG(1, "%s(), Received RR with "
1484 "invalid nr !\n", __FUNCTION__); 1465 "invalid nr !\n", __FUNCTION__);
1485 del_timer(&self->final_timer);
1486 1466
1487 irlap_next_state(self, LAP_RESET_WAIT); 1467 irlap_next_state(self, LAP_RESET_WAIT);
1488 1468
diff --git a/net/irda/irlap_frame.c b/net/irda/irlap_frame.c
index 3c5a68e36414..3013c49ab975 100644
--- a/net/irda/irlap_frame.c
+++ b/net/irda/irlap_frame.c
@@ -798,16 +798,19 @@ void irlap_send_data_primary_poll(struct irlap_cb *self, struct sk_buff *skb)
798 self->vs = (self->vs + 1) % 8; 798 self->vs = (self->vs + 1) % 8;
799 self->ack_required = FALSE; 799 self->ack_required = FALSE;
800 800
801 irlap_next_state(self, LAP_NRM_P);
801 irlap_send_i_frame(self, tx_skb, CMD_FRAME); 802 irlap_send_i_frame(self, tx_skb, CMD_FRAME);
802 } else { 803 } else {
803 IRDA_DEBUG(4, "%s(), sending unreliable frame\n", __FUNCTION__); 804 IRDA_DEBUG(4, "%s(), sending unreliable frame\n", __FUNCTION__);
804 805
805 if (self->ack_required) { 806 if (self->ack_required) {
806 irlap_send_ui_frame(self, skb_get(skb), self->caddr, CMD_FRAME); 807 irlap_send_ui_frame(self, skb_get(skb), self->caddr, CMD_FRAME);
808 irlap_next_state(self, LAP_NRM_P);
807 irlap_send_rr_frame(self, CMD_FRAME); 809 irlap_send_rr_frame(self, CMD_FRAME);
808 self->ack_required = FALSE; 810 self->ack_required = FALSE;
809 } else { 811 } else {
810 skb->data[1] |= PF_BIT; 812 skb->data[1] |= PF_BIT;
813 irlap_next_state(self, LAP_NRM_P);
811 irlap_send_ui_frame(self, skb_get(skb), self->caddr, CMD_FRAME); 814 irlap_send_ui_frame(self, skb_get(skb), self->caddr, CMD_FRAME);
812 } 815 }
813 } 816 }
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index bb6c0feb2d48..476c8486f789 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -112,7 +112,7 @@ DEBUGFS_READONLY_FILE(wep_iv, 20, "%#06x",
112 local->wep_iv & 0xffffff); 112 local->wep_iv & 0xffffff);
113DEBUGFS_READONLY_FILE(tx_power_reduction, 20, "%d.%d dBm", 113DEBUGFS_READONLY_FILE(tx_power_reduction, 20, "%d.%d dBm",
114 local->hw.conf.tx_power_reduction / 10, 114 local->hw.conf.tx_power_reduction / 10,
115 local->hw.conf.tx_power_reduction & 10); 115 local->hw.conf.tx_power_reduction % 10);
116DEBUGFS_READONLY_FILE(rate_ctrl_alg, 100, "%s", 116DEBUGFS_READONLY_FILE(rate_ctrl_alg, 100, "%s",
117 local->rate_ctrl ? local->rate_ctrl->ops->name : "<unset>"); 117 local->rate_ctrl ? local->rate_ctrl->ops->name : "<unset>");
118 118
diff --git a/net/mac80211/ieee80211_sta.c b/net/mac80211/ieee80211_sta.c
index 9f30ae4c2ab3..91b545c144c1 100644
--- a/net/mac80211/ieee80211_sta.c
+++ b/net/mac80211/ieee80211_sta.c
@@ -2592,11 +2592,17 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw)
2592 2592
2593 read_lock(&local->sub_if_lock); 2593 read_lock(&local->sub_if_lock);
2594 list_for_each_entry(sdata, &local->sub_if_list, list) { 2594 list_for_each_entry(sdata, &local->sub_if_list, list) {
2595
2596 /* No need to wake the master device. */
2597 if (sdata->dev == local->mdev)
2598 continue;
2599
2595 if (sdata->type == IEEE80211_IF_TYPE_STA) { 2600 if (sdata->type == IEEE80211_IF_TYPE_STA) {
2596 if (sdata->u.sta.associated) 2601 if (sdata->u.sta.associated)
2597 ieee80211_send_nullfunc(local, sdata, 0); 2602 ieee80211_send_nullfunc(local, sdata, 0);
2598 ieee80211_sta_timer((unsigned long)sdata); 2603 ieee80211_sta_timer((unsigned long)sdata);
2599 } 2604 }
2605
2600 netif_wake_queue(sdata->dev); 2606 netif_wake_queue(sdata->dev);
2601 } 2607 }
2602 read_unlock(&local->sub_if_lock); 2608 read_unlock(&local->sub_if_lock);
@@ -2738,6 +2744,12 @@ static int ieee80211_sta_start_scan(struct net_device *dev,
2738 2744
2739 read_lock(&local->sub_if_lock); 2745 read_lock(&local->sub_if_lock);
2740 list_for_each_entry(sdata, &local->sub_if_list, list) { 2746 list_for_each_entry(sdata, &local->sub_if_list, list) {
2747
2748 /* Don't stop the master interface, otherwise we can't transmit
2749 * probes! */
2750 if (sdata->dev == local->mdev)
2751 continue;
2752
2741 netif_stop_queue(sdata->dev); 2753 netif_stop_queue(sdata->dev);
2742 if (sdata->type == IEEE80211_IF_TYPE_STA && 2754 if (sdata->type == IEEE80211_IF_TYPE_STA &&
2743 sdata->u.sta.associated) 2755 sdata->u.sta.associated)
diff --git a/net/netfilter/nf_conntrack_h323_asn1.c b/net/netfilter/nf_conntrack_h323_asn1.c
index f6fad713d484..6b7eaa019d4c 100644
--- a/net/netfilter/nf_conntrack_h323_asn1.c
+++ b/net/netfilter/nf_conntrack_h323_asn1.c
@@ -518,7 +518,7 @@ int decode_seq(bitstr_t * bs, field_t * f, char *base, int level)
518 CHECK_BOUND(bs, 2); 518 CHECK_BOUND(bs, 2);
519 len = get_len(bs); 519 len = get_len(bs);
520 CHECK_BOUND(bs, len); 520 CHECK_BOUND(bs, len);
521 if (!base) { 521 if (!base || !(son->attr & DECODE)) {
522 PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, 522 PRINT("%*.s%s\n", (level + 1) * TAB_SIZE,
523 " ", son->name); 523 " ", son->name);
524 bs->cur += len; 524 bs->cur += len;
@@ -704,6 +704,8 @@ int decode_choice(bitstr_t * bs, field_t * f, char *base, int level)
704 } else { 704 } else {
705 ext = 0; 705 ext = 0;
706 type = get_bits(bs, f->sz); 706 type = get_bits(bs, f->sz);
707 if (type >= f->lb)
708 return H323_ERROR_RANGE;
707 } 709 }
708 710
709 /* Write Type */ 711 /* Write Type */
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 3f73327794ab..d0fe3d769828 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -869,8 +869,7 @@ ctnetlink_change_helper(struct nf_conn *ct, struct nfattr *cda[])
869 return 0; 869 return 0;
870 870
871 if (help->helper) 871 if (help->helper)
872 /* we had a helper before ... */ 872 return -EBUSY;
873 nf_ct_remove_expectations(ct);
874 873
875 /* need to zero data of old helper */ 874 /* need to zero data of old helper */
876 memset(&help->help, 0, sizeof(help->help)); 875 memset(&help->help, 0, sizeof(help->help));
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 7aaa8c91b293..1b5c6c1055f7 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -442,6 +442,9 @@ static int sip_help(struct sk_buff **pskb,
442 442
443 /* RTP info only in some SDP pkts */ 443 /* RTP info only in some SDP pkts */
444 if (memcmp(dptr, "INVITE", sizeof("INVITE") - 1) != 0 && 444 if (memcmp(dptr, "INVITE", sizeof("INVITE") - 1) != 0 &&
445 memcmp(dptr, "UPDATE", sizeof("UPDATE") - 1) != 0 &&
446 memcmp(dptr, "SIP/2.0 180", sizeof("SIP/2.0 180") - 1) != 0 &&
447 memcmp(dptr, "SIP/2.0 183", sizeof("SIP/2.0 183") - 1) != 0 &&
445 memcmp(dptr, "SIP/2.0 200", sizeof("SIP/2.0 200") - 1) != 0) { 448 memcmp(dptr, "SIP/2.0 200", sizeof("SIP/2.0 200") - 1) != 0) {
446 goto out; 449 goto out;
447 } 450 }
diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
index 43cb3e051ece..482750efc235 100644
--- a/net/rxrpc/ar-connection.c
+++ b/net/rxrpc/ar-connection.c
@@ -211,7 +211,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
211 conn->header_size = sizeof(struct rxrpc_header); 211 conn->header_size = sizeof(struct rxrpc_header);
212 } 212 }
213 213
214 _leave(" = %p{%d}", conn, conn->debug_id); 214 _leave(" = %p{%d}", conn, conn ? conn->debug_id : 0);
215 return conn; 215 return conn;
216} 216}
217 217
diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
index 591c4422205e..cc9102c5b588 100644
--- a/net/rxrpc/ar-output.c
+++ b/net/rxrpc/ar-output.c
@@ -640,6 +640,7 @@ static int rxrpc_send_data(struct kiocb *iocb,
640 goto efault; 640 goto efault;
641 sp->remain -= copy; 641 sp->remain -= copy;
642 skb->mark += copy; 642 skb->mark += copy;
643 copied += copy;
643 644
644 len -= copy; 645 len -= copy;
645 segment -= copy; 646 segment -= copy;
@@ -709,6 +710,8 @@ static int rxrpc_send_data(struct kiocb *iocb,
709 710
710 } while (segment > 0); 711 } while (segment > 0);
711 712
713success:
714 ret = copied;
712out: 715out:
713 call->tx_pending = skb; 716 call->tx_pending = skb;
714 _leave(" = %d", ret); 717 _leave(" = %d", ret);
@@ -725,7 +728,7 @@ call_aborted:
725 728
726maybe_error: 729maybe_error:
727 if (copied) 730 if (copied)
728 ret = copied; 731 goto success;
729 goto out; 732 goto out;
730 733
731efault: 734efault:
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index df94e3cdfba3..498edb0cd4e5 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1231,6 +1231,10 @@ void sctp_assoc_sync_pmtu(struct sctp_association *asoc)
1231 /* Get the lowest pmtu of all the transports. */ 1231 /* Get the lowest pmtu of all the transports. */
1232 list_for_each(pos, &asoc->peer.transport_addr_list) { 1232 list_for_each(pos, &asoc->peer.transport_addr_list) {
1233 t = list_entry(pos, struct sctp_transport, transports); 1233 t = list_entry(pos, struct sctp_transport, transports);
1234 if (t->pmtu_pending && t->dst) {
1235 sctp_transport_update_pmtu(t, dst_mtu(t->dst));
1236 t->pmtu_pending = 0;
1237 }
1234 if (!pmtu || (t->pathmtu < pmtu)) 1238 if (!pmtu || (t->pathmtu < pmtu))
1235 pmtu = t->pathmtu; 1239 pmtu = t->pathmtu;
1236 } 1240 }
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 885109fb3dda..d57ff7f3c576 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -367,24 +367,18 @@ static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
367void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc, 367void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
368 struct sctp_transport *t, __u32 pmtu) 368 struct sctp_transport *t, __u32 pmtu)
369{ 369{
370 if (sock_owned_by_user(sk) || !t || (t->pathmtu == pmtu)) 370 if (!t || (t->pathmtu == pmtu))
371 return; 371 return;
372 372
373 if (sock_owned_by_user(sk)) {
374 asoc->pmtu_pending = 1;
375 t->pmtu_pending = 1;
376 return;
377 }
378
373 if (t->param_flags & SPP_PMTUD_ENABLE) { 379 if (t->param_flags & SPP_PMTUD_ENABLE) {
374 if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) { 380 /* Update transports view of the MTU */
375 printk(KERN_WARNING "%s: Reported pmtu %d too low, " 381 sctp_transport_update_pmtu(t, pmtu);
376 "using default minimum of %d\n",
377 __FUNCTION__, pmtu,
378 SCTP_DEFAULT_MINSEGMENT);
379 /* Use default minimum segment size and disable
380 * pmtu discovery on this transport.
381 */
382 t->pathmtu = SCTP_DEFAULT_MINSEGMENT;
383 t->param_flags = (t->param_flags & ~SPP_PMTUD) |
384 SPP_PMTUD_DISABLE;
385 } else {
386 t->pathmtu = pmtu;
387 }
388 382
389 /* Update association pmtu. */ 383 /* Update association pmtu. */
390 sctp_assoc_sync_pmtu(asoc); 384 sctp_assoc_sync_pmtu(asoc);
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 84cd53635fe8..2c29394fd92e 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -844,6 +844,10 @@ static int sctp_inet6_bind_verify(struct sctp_sock *opt, union sctp_addr *addr)
844 dev = dev_get_by_index(addr->v6.sin6_scope_id); 844 dev = dev_get_by_index(addr->v6.sin6_scope_id);
845 if (!dev) 845 if (!dev)
846 return 0; 846 return 0;
847 if (!ipv6_chk_addr(&addr->v6.sin6_addr, dev, 0)) {
848 dev_put(dev);
849 return 0;
850 }
847 dev_put(dev); 851 dev_put(dev);
848 } 852 }
849 af = opt->pf->af; 853 af = opt->pf->af;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 4dcdabf56473..b1917f68723c 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -333,12 +333,19 @@ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
333 if (!sp->pf->bind_verify(sp, addr)) 333 if (!sp->pf->bind_verify(sp, addr))
334 return -EADDRNOTAVAIL; 334 return -EADDRNOTAVAIL;
335 335
336 /* We must either be unbound, or bind to the same port. */ 336 /* We must either be unbound, or bind to the same port.
337 if (bp->port && (snum != bp->port)) { 337 * It's OK to allow 0 ports if we are already bound.
338 SCTP_DEBUG_PRINTK("sctp_do_bind:" 338 * We'll just inhert an already bound port in this case
339 */
340 if (bp->port) {
341 if (!snum)
342 snum = bp->port;
343 else if (snum != bp->port) {
344 SCTP_DEBUG_PRINTK("sctp_do_bind:"
339 " New port %d does not match existing port " 345 " New port %d does not match existing port "
340 "%d.\n", snum, bp->port); 346 "%d.\n", snum, bp->port);
341 return -EINVAL; 347 return -EINVAL;
348 }
342 } 349 }
343 350
344 if (snum && snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) 351 if (snum && snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
@@ -973,6 +980,7 @@ static int __sctp_connect(struct sock* sk,
973 union sctp_addr *sa_addr; 980 union sctp_addr *sa_addr;
974 void *addr_buf; 981 void *addr_buf;
975 unsigned short port; 982 unsigned short port;
983 unsigned int f_flags = 0;
976 984
977 sp = sctp_sk(sk); 985 sp = sctp_sk(sk);
978 ep = sp->ep; 986 ep = sp->ep;
@@ -1099,7 +1107,14 @@ static int __sctp_connect(struct sock* sk,
1099 af->to_sk_daddr(&to, sk); 1107 af->to_sk_daddr(&to, sk);
1100 sk->sk_err = 0; 1108 sk->sk_err = 0;
1101 1109
1102 timeo = sock_sndtimeo(sk, sk->sk_socket->file->f_flags & O_NONBLOCK); 1110 /* in-kernel sockets don't generally have a file allocated to them
1111 * if all they do is call sock_create_kern().
1112 */
1113 if (sk->sk_socket->file)
1114 f_flags = sk->sk_socket->file->f_flags;
1115
1116 timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK);
1117
1103 err = sctp_wait_for_connect(asoc, &timeo); 1118 err = sctp_wait_for_connect(asoc, &timeo);
1104 1119
1105 /* Don't free association on exit. */ 1120 /* Don't free association on exit. */
@@ -1655,6 +1670,9 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
1655 goto out_free; 1670 goto out_free;
1656 } 1671 }
1657 1672
1673 if (asoc->pmtu_pending)
1674 sctp_assoc_pending_pmtu(asoc);
1675
1658 /* If fragmentation is disabled and the message length exceeds the 1676 /* If fragmentation is disabled and the message length exceeds the
1659 * association fragmentation point, return EMSGSIZE. The I-D 1677 * association fragmentation point, return EMSGSIZE. The I-D
1660 * does not specify what this error is, but this looks like 1678 * does not specify what this error is, but this looks like
@@ -3365,12 +3383,13 @@ static int sctp_getsockopt_sctp_status(struct sock *sk, int len,
3365 sctp_assoc_t associd; 3383 sctp_assoc_t associd;
3366 int retval = 0; 3384 int retval = 0;
3367 3385
3368 if (len != sizeof(status)) { 3386 if (len < sizeof(status)) {
3369 retval = -EINVAL; 3387 retval = -EINVAL;
3370 goto out; 3388 goto out;
3371 } 3389 }
3372 3390
3373 if (copy_from_user(&status, optval, sizeof(status))) { 3391 len = sizeof(status);
3392 if (copy_from_user(&status, optval, len)) {
3374 retval = -EFAULT; 3393 retval = -EFAULT;
3375 goto out; 3394 goto out;
3376 } 3395 }
@@ -3442,12 +3461,13 @@ static int sctp_getsockopt_peer_addr_info(struct sock *sk, int len,
3442 struct sctp_transport *transport; 3461 struct sctp_transport *transport;
3443 int retval = 0; 3462 int retval = 0;
3444 3463
3445 if (len != sizeof(pinfo)) { 3464 if (len < sizeof(pinfo)) {
3446 retval = -EINVAL; 3465 retval = -EINVAL;
3447 goto out; 3466 goto out;
3448 } 3467 }
3449 3468
3450 if (copy_from_user(&pinfo, optval, sizeof(pinfo))) { 3469 len = sizeof(pinfo);
3470 if (copy_from_user(&pinfo, optval, len)) {
3451 retval = -EFAULT; 3471 retval = -EFAULT;
3452 goto out; 3472 goto out;
3453 } 3473 }
@@ -3513,8 +3533,11 @@ static int sctp_getsockopt_disable_fragments(struct sock *sk, int len,
3513static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval, 3533static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
3514 int __user *optlen) 3534 int __user *optlen)
3515{ 3535{
3516 if (len != sizeof(struct sctp_event_subscribe)) 3536 if (len < sizeof(struct sctp_event_subscribe))
3517 return -EINVAL; 3537 return -EINVAL;
3538 len = sizeof(struct sctp_event_subscribe);
3539 if (put_user(len, optlen))
3540 return -EFAULT;
3518 if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len)) 3541 if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len))
3519 return -EFAULT; 3542 return -EFAULT;
3520 return 0; 3543 return 0;
@@ -3536,9 +3559,12 @@ static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optv
3536 /* Applicable to UDP-style socket only */ 3559 /* Applicable to UDP-style socket only */
3537 if (sctp_style(sk, TCP)) 3560 if (sctp_style(sk, TCP))
3538 return -EOPNOTSUPP; 3561 return -EOPNOTSUPP;
3539 if (len != sizeof(int)) 3562 if (len < sizeof(int))
3540 return -EINVAL; 3563 return -EINVAL;
3541 if (copy_to_user(optval, &sctp_sk(sk)->autoclose, len)) 3564 len = sizeof(int);
3565 if (put_user(len, optlen))
3566 return -EFAULT;
3567 if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int)))
3542 return -EFAULT; 3568 return -EFAULT;
3543 return 0; 3569 return 0;
3544} 3570}
@@ -3550,6 +3576,7 @@ SCTP_STATIC int sctp_do_peeloff(struct sctp_association *asoc,
3550 struct sock *sk = asoc->base.sk; 3576 struct sock *sk = asoc->base.sk;
3551 struct socket *sock; 3577 struct socket *sock;
3552 struct inet_sock *inetsk; 3578 struct inet_sock *inetsk;
3579 struct sctp_af *af;
3553 int err = 0; 3580 int err = 0;
3554 3581
3555 /* An association cannot be branched off from an already peeled-off 3582 /* An association cannot be branched off from an already peeled-off
@@ -3571,8 +3598,9 @@ SCTP_STATIC int sctp_do_peeloff(struct sctp_association *asoc,
3571 /* Make peeled-off sockets more like 1-1 accepted sockets. 3598 /* Make peeled-off sockets more like 1-1 accepted sockets.
3572 * Set the daddr and initialize id to something more random 3599 * Set the daddr and initialize id to something more random
3573 */ 3600 */
3601 af = sctp_get_af_specific(asoc->peer.primary_addr.sa.sa_family);
3602 af->to_sk_daddr(&asoc->peer.primary_addr, sk);
3574 inetsk = inet_sk(sock->sk); 3603 inetsk = inet_sk(sock->sk);
3575 inetsk->daddr = asoc->peer.primary_addr.v4.sin_addr.s_addr;
3576 inetsk->id = asoc->next_tsn ^ jiffies; 3604 inetsk->id = asoc->next_tsn ^ jiffies;
3577 3605
3578 *sockp = sock; 3606 *sockp = sock;
@@ -3587,8 +3615,9 @@ static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval
3587 int retval = 0; 3615 int retval = 0;
3588 struct sctp_association *asoc; 3616 struct sctp_association *asoc;
3589 3617
3590 if (len != sizeof(sctp_peeloff_arg_t)) 3618 if (len < sizeof(sctp_peeloff_arg_t))
3591 return -EINVAL; 3619 return -EINVAL;
3620 len = sizeof(sctp_peeloff_arg_t);
3592 if (copy_from_user(&peeloff, optval, len)) 3621 if (copy_from_user(&peeloff, optval, len))
3593 return -EFAULT; 3622 return -EFAULT;
3594 3623
@@ -3616,6 +3645,8 @@ static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval
3616 3645
3617 /* Return the fd mapped to the new socket. */ 3646 /* Return the fd mapped to the new socket. */
3618 peeloff.sd = retval; 3647 peeloff.sd = retval;
3648 if (put_user(len, optlen))
3649 return -EFAULT;
3619 if (copy_to_user(optval, &peeloff, len)) 3650 if (copy_to_user(optval, &peeloff, len))
3620 retval = -EFAULT; 3651 retval = -EFAULT;
3621 3652
@@ -3724,9 +3755,9 @@ static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len,
3724 struct sctp_association *asoc = NULL; 3755 struct sctp_association *asoc = NULL;
3725 struct sctp_sock *sp = sctp_sk(sk); 3756 struct sctp_sock *sp = sctp_sk(sk);
3726 3757
3727 if (len != sizeof(struct sctp_paddrparams)) 3758 if (len < sizeof(struct sctp_paddrparams))
3728 return -EINVAL; 3759 return -EINVAL;
3729 3760 len = sizeof(struct sctp_paddrparams);
3730 if (copy_from_user(&params, optval, len)) 3761 if (copy_from_user(&params, optval, len))
3731 return -EFAULT; 3762 return -EFAULT;
3732 3763
@@ -3825,9 +3856,11 @@ static int sctp_getsockopt_delayed_ack_time(struct sock *sk, int len,
3825 struct sctp_association *asoc = NULL; 3856 struct sctp_association *asoc = NULL;
3826 struct sctp_sock *sp = sctp_sk(sk); 3857 struct sctp_sock *sp = sctp_sk(sk);
3827 3858
3828 if (len != sizeof(struct sctp_assoc_value)) 3859 if (len < sizeof(struct sctp_assoc_value))
3829 return - EINVAL; 3860 return - EINVAL;
3830 3861
3862 len = sizeof(struct sctp_assoc_value);
3863
3831 if (copy_from_user(&params, optval, len)) 3864 if (copy_from_user(&params, optval, len))
3832 return -EFAULT; 3865 return -EFAULT;
3833 3866
@@ -3876,8 +3909,11 @@ static int sctp_getsockopt_delayed_ack_time(struct sock *sk, int len,
3876 */ 3909 */
3877static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval, int __user *optlen) 3910static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval, int __user *optlen)
3878{ 3911{
3879 if (len != sizeof(struct sctp_initmsg)) 3912 if (len < sizeof(struct sctp_initmsg))
3880 return -EINVAL; 3913 return -EINVAL;
3914 len = sizeof(struct sctp_initmsg);
3915 if (put_user(len, optlen))
3916 return -EFAULT;
3881 if (copy_to_user(optval, &sctp_sk(sk)->initmsg, len)) 3917 if (copy_to_user(optval, &sctp_sk(sk)->initmsg, len))
3882 return -EFAULT; 3918 return -EFAULT;
3883 return 0; 3919 return 0;
@@ -3892,7 +3928,7 @@ static int sctp_getsockopt_peer_addrs_num_old(struct sock *sk, int len,
3892 struct list_head *pos; 3928 struct list_head *pos;
3893 int cnt = 0; 3929 int cnt = 0;
3894 3930
3895 if (len != sizeof(sctp_assoc_t)) 3931 if (len < sizeof(sctp_assoc_t))
3896 return -EINVAL; 3932 return -EINVAL;
3897 3933
3898 if (copy_from_user(&id, optval, sizeof(sctp_assoc_t))) 3934 if (copy_from_user(&id, optval, sizeof(sctp_assoc_t)))
@@ -3928,10 +3964,12 @@ static int sctp_getsockopt_peer_addrs_old(struct sock *sk, int len,
3928 struct sctp_sock *sp = sctp_sk(sk); 3964 struct sctp_sock *sp = sctp_sk(sk);
3929 int addrlen; 3965 int addrlen;
3930 3966
3931 if (len != sizeof(struct sctp_getaddrs_old)) 3967 if (len < sizeof(struct sctp_getaddrs_old))
3932 return -EINVAL; 3968 return -EINVAL;
3933 3969
3934 if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs_old))) 3970 len = sizeof(struct sctp_getaddrs_old);
3971
3972 if (copy_from_user(&getaddrs, optval, len))
3935 return -EFAULT; 3973 return -EFAULT;
3936 3974
3937 if (getaddrs.addr_num <= 0) return -EINVAL; 3975 if (getaddrs.addr_num <= 0) return -EINVAL;
@@ -3954,7 +3992,9 @@ static int sctp_getsockopt_peer_addrs_old(struct sock *sk, int len,
3954 if (cnt >= getaddrs.addr_num) break; 3992 if (cnt >= getaddrs.addr_num) break;
3955 } 3993 }
3956 getaddrs.addr_num = cnt; 3994 getaddrs.addr_num = cnt;
3957 if (copy_to_user(optval, &getaddrs, sizeof(struct sctp_getaddrs_old))) 3995 if (put_user(len, optlen))
3996 return -EFAULT;
3997 if (copy_to_user(optval, &getaddrs, len))
3958 return -EFAULT; 3998 return -EFAULT;
3959 3999
3960 return 0; 4000 return 0;
@@ -3987,8 +4027,7 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
3987 return -EINVAL; 4027 return -EINVAL;
3988 4028
3989 to = optval + offsetof(struct sctp_getaddrs,addrs); 4029 to = optval + offsetof(struct sctp_getaddrs,addrs);
3990 space_left = len - sizeof(struct sctp_getaddrs) - 4030 space_left = len - offsetof(struct sctp_getaddrs,addrs);
3991 offsetof(struct sctp_getaddrs,addrs);
3992 4031
3993 list_for_each(pos, &asoc->peer.transport_addr_list) { 4032 list_for_each(pos, &asoc->peer.transport_addr_list) {
3994 from = list_entry(pos, struct sctp_transport, transports); 4033 from = list_entry(pos, struct sctp_transport, transports);
@@ -4025,7 +4064,7 @@ static int sctp_getsockopt_local_addrs_num_old(struct sock *sk, int len,
4025 rwlock_t *addr_lock; 4064 rwlock_t *addr_lock;
4026 int cnt = 0; 4065 int cnt = 0;
4027 4066
4028 if (len != sizeof(sctp_assoc_t)) 4067 if (len < sizeof(sctp_assoc_t))
4029 return -EINVAL; 4068 return -EINVAL;
4030 4069
4031 if (copy_from_user(&id, optval, sizeof(sctp_assoc_t))) 4070 if (copy_from_user(&id, optval, sizeof(sctp_assoc_t)))
@@ -4139,7 +4178,7 @@ static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to,
4139 to += addrlen; 4178 to += addrlen;
4140 cnt ++; 4179 cnt ++;
4141 space_left -= addrlen; 4180 space_left -= addrlen;
4142 bytes_copied += addrlen; 4181 *bytes_copied += addrlen;
4143 } 4182 }
4144 4183
4145 return cnt; 4184 return cnt;
@@ -4167,10 +4206,11 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len,
4167 void *buf; 4206 void *buf;
4168 int bytes_copied = 0; 4207 int bytes_copied = 0;
4169 4208
4170 if (len != sizeof(struct sctp_getaddrs_old)) 4209 if (len < sizeof(struct sctp_getaddrs_old))
4171 return -EINVAL; 4210 return -EINVAL;
4172 4211
4173 if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs_old))) 4212 len = sizeof(struct sctp_getaddrs_old);
4213 if (copy_from_user(&getaddrs, optval, len))
4174 return -EFAULT; 4214 return -EFAULT;
4175 4215
4176 if (getaddrs.addr_num <= 0) return -EINVAL; 4216 if (getaddrs.addr_num <= 0) return -EINVAL;
@@ -4242,7 +4282,7 @@ copy_getaddrs:
4242 4282
4243 /* copy the leading structure back to user */ 4283 /* copy the leading structure back to user */
4244 getaddrs.addr_num = cnt; 4284 getaddrs.addr_num = cnt;
4245 if (copy_to_user(optval, &getaddrs, sizeof(struct sctp_getaddrs_old))) 4285 if (copy_to_user(optval, &getaddrs, len))
4246 err = -EFAULT; 4286 err = -EFAULT;
4247 4287
4248error: 4288error:
@@ -4270,7 +4310,7 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
4270 void *addrs; 4310 void *addrs;
4271 void *buf; 4311 void *buf;
4272 4312
4273 if (len <= sizeof(struct sctp_getaddrs)) 4313 if (len < sizeof(struct sctp_getaddrs))
4274 return -EINVAL; 4314 return -EINVAL;
4275 4315
4276 if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs))) 4316 if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs)))
@@ -4294,8 +4334,8 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
4294 } 4334 }
4295 4335
4296 to = optval + offsetof(struct sctp_getaddrs,addrs); 4336 to = optval + offsetof(struct sctp_getaddrs,addrs);
4297 space_left = len - sizeof(struct sctp_getaddrs) - 4337 space_left = len - offsetof(struct sctp_getaddrs,addrs);
4298 offsetof(struct sctp_getaddrs,addrs); 4338
4299 addrs = kmalloc(space_left, GFP_KERNEL); 4339 addrs = kmalloc(space_left, GFP_KERNEL);
4300 if (!addrs) 4340 if (!addrs)
4301 return -ENOMEM; 4341 return -ENOMEM;
@@ -4343,11 +4383,12 @@ copy_getaddrs:
4343 err = -EFAULT; 4383 err = -EFAULT;
4344 goto error; 4384 goto error;
4345 } 4385 }
4346 if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) 4386 if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) {
4347 return -EFAULT; 4387 err = -EFAULT;
4388 goto error;
4389 }
4348 if (put_user(bytes_copied, optlen)) 4390 if (put_user(bytes_copied, optlen))
4349 return -EFAULT; 4391 err = -EFAULT;
4350
4351error: 4392error:
4352 kfree(addrs); 4393 kfree(addrs);
4353 return err; 4394 return err;
@@ -4366,10 +4407,12 @@ static int sctp_getsockopt_primary_addr(struct sock *sk, int len,
4366 struct sctp_association *asoc; 4407 struct sctp_association *asoc;
4367 struct sctp_sock *sp = sctp_sk(sk); 4408 struct sctp_sock *sp = sctp_sk(sk);
4368 4409
4369 if (len != sizeof(struct sctp_prim)) 4410 if (len < sizeof(struct sctp_prim))
4370 return -EINVAL; 4411 return -EINVAL;
4371 4412
4372 if (copy_from_user(&prim, optval, sizeof(struct sctp_prim))) 4413 len = sizeof(struct sctp_prim);
4414
4415 if (copy_from_user(&prim, optval, len))
4373 return -EFAULT; 4416 return -EFAULT;
4374 4417
4375 asoc = sctp_id2assoc(sk, prim.ssp_assoc_id); 4418 asoc = sctp_id2assoc(sk, prim.ssp_assoc_id);
@@ -4385,7 +4428,9 @@ static int sctp_getsockopt_primary_addr(struct sock *sk, int len,
4385 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, 4428 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp,
4386 (union sctp_addr *)&prim.ssp_addr); 4429 (union sctp_addr *)&prim.ssp_addr);
4387 4430
4388 if (copy_to_user(optval, &prim, sizeof(struct sctp_prim))) 4431 if (put_user(len, optlen))
4432 return -EFAULT;
4433 if (copy_to_user(optval, &prim, len))
4389 return -EFAULT; 4434 return -EFAULT;
4390 4435
4391 return 0; 4436 return 0;
@@ -4402,10 +4447,15 @@ static int sctp_getsockopt_adaptation_layer(struct sock *sk, int len,
4402{ 4447{
4403 struct sctp_setadaptation adaptation; 4448 struct sctp_setadaptation adaptation;
4404 4449
4405 if (len != sizeof(struct sctp_setadaptation)) 4450 if (len < sizeof(struct sctp_setadaptation))
4406 return -EINVAL; 4451 return -EINVAL;
4407 4452
4453 len = sizeof(struct sctp_setadaptation);
4454
4408 adaptation.ssb_adaptation_ind = sctp_sk(sk)->adaptation_ind; 4455 adaptation.ssb_adaptation_ind = sctp_sk(sk)->adaptation_ind;
4456
4457 if (put_user(len, optlen))
4458 return -EFAULT;
4409 if (copy_to_user(optval, &adaptation, len)) 4459 if (copy_to_user(optval, &adaptation, len))
4410 return -EFAULT; 4460 return -EFAULT;
4411 4461
@@ -4439,9 +4489,12 @@ static int sctp_getsockopt_default_send_param(struct sock *sk,
4439 struct sctp_association *asoc; 4489 struct sctp_association *asoc;
4440 struct sctp_sock *sp = sctp_sk(sk); 4490 struct sctp_sock *sp = sctp_sk(sk);
4441 4491
4442 if (len != sizeof(struct sctp_sndrcvinfo)) 4492 if (len < sizeof(struct sctp_sndrcvinfo))
4443 return -EINVAL; 4493 return -EINVAL;
4444 if (copy_from_user(&info, optval, sizeof(struct sctp_sndrcvinfo))) 4494
4495 len = sizeof(struct sctp_sndrcvinfo);
4496
4497 if (copy_from_user(&info, optval, len))
4445 return -EFAULT; 4498 return -EFAULT;
4446 4499
4447 asoc = sctp_id2assoc(sk, info.sinfo_assoc_id); 4500 asoc = sctp_id2assoc(sk, info.sinfo_assoc_id);
@@ -4462,7 +4515,9 @@ static int sctp_getsockopt_default_send_param(struct sock *sk,
4462 info.sinfo_timetolive = sp->default_timetolive; 4515 info.sinfo_timetolive = sp->default_timetolive;
4463 } 4516 }
4464 4517
4465 if (copy_to_user(optval, &info, sizeof(struct sctp_sndrcvinfo))) 4518 if (put_user(len, optlen))
4519 return -EFAULT;
4520 if (copy_to_user(optval, &info, len))
4466 return -EFAULT; 4521 return -EFAULT;
4467 4522
4468 return 0; 4523 return 0;
@@ -4513,10 +4568,12 @@ static int sctp_getsockopt_rtoinfo(struct sock *sk, int len,
4513 struct sctp_rtoinfo rtoinfo; 4568 struct sctp_rtoinfo rtoinfo;
4514 struct sctp_association *asoc; 4569 struct sctp_association *asoc;
4515 4570
4516 if (len != sizeof (struct sctp_rtoinfo)) 4571 if (len < sizeof (struct sctp_rtoinfo))
4517 return -EINVAL; 4572 return -EINVAL;
4518 4573
4519 if (copy_from_user(&rtoinfo, optval, sizeof (struct sctp_rtoinfo))) 4574 len = sizeof(struct sctp_rtoinfo);
4575
4576 if (copy_from_user(&rtoinfo, optval, len))
4520 return -EFAULT; 4577 return -EFAULT;
4521 4578
4522 asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id); 4579 asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id);
@@ -4568,11 +4625,12 @@ static int sctp_getsockopt_associnfo(struct sock *sk, int len,
4568 struct list_head *pos; 4625 struct list_head *pos;
4569 int cnt = 0; 4626 int cnt = 0;
4570 4627
4571 if (len != sizeof (struct sctp_assocparams)) 4628 if (len < sizeof (struct sctp_assocparams))
4572 return -EINVAL; 4629 return -EINVAL;
4573 4630
4574 if (copy_from_user(&assocparams, optval, 4631 len = sizeof(struct sctp_assocparams);
4575 sizeof (struct sctp_assocparams))) 4632
4633 if (copy_from_user(&assocparams, optval, len))
4576 return -EFAULT; 4634 return -EFAULT;
4577 4635
4578 asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id); 4636 asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id);
@@ -4658,9 +4716,11 @@ static int sctp_getsockopt_context(struct sock *sk, int len,
4658 struct sctp_sock *sp; 4716 struct sctp_sock *sp;
4659 struct sctp_association *asoc; 4717 struct sctp_association *asoc;
4660 4718
4661 if (len != sizeof(struct sctp_assoc_value)) 4719 if (len < sizeof(struct sctp_assoc_value))
4662 return -EINVAL; 4720 return -EINVAL;
4663 4721
4722 len = sizeof(struct sctp_assoc_value);
4723
4664 if (copy_from_user(&params, optval, len)) 4724 if (copy_from_user(&params, optval, len))
4665 return -EFAULT; 4725 return -EFAULT;
4666 4726
@@ -6071,8 +6131,11 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
6071 * queued to the backlog. This prevents a potential race between 6131 * queued to the backlog. This prevents a potential race between
6072 * backlog processing on the old socket and new-packet processing 6132 * backlog processing on the old socket and new-packet processing
6073 * on the new socket. 6133 * on the new socket.
6134 *
6135 * The caller has just allocated newsk so we can guarantee that other
6136 * paths won't try to lock it and then oldsk.
6074 */ 6137 */
6075 sctp_lock_sock(newsk); 6138 lock_sock_nested(newsk, SINGLE_DEPTH_NESTING);
6076 sctp_assoc_migrate(assoc, newsk); 6139 sctp_assoc_migrate(assoc, newsk);
6077 6140
6078 /* If the association on the newsk is already closed before accept() 6141 /* If the association on the newsk is already closed before accept()
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 961df275d5b9..5f467c914f80 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -241,6 +241,45 @@ void sctp_transport_pmtu(struct sctp_transport *transport)
241 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT; 241 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
242} 242}
243 243
244/* this is a complete rip-off from __sk_dst_check
245 * the cookie is always 0 since this is how it's used in the
246 * pmtu code
247 */
248static struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t)
249{
250 struct dst_entry *dst = t->dst;
251
252 if (dst && dst->obsolete && dst->ops->check(dst, 0) == NULL) {
253 dst_release(t->dst);
254 t->dst = NULL;
255 return NULL;
256 }
257
258 return dst;
259}
260
261void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
262{
263 struct dst_entry *dst;
264
265 if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
266 printk(KERN_WARNING "%s: Reported pmtu %d too low, "
267 "using default minimum of %d\n",
268 __FUNCTION__, pmtu,
269 SCTP_DEFAULT_MINSEGMENT);
270 /* Use default minimum segment size and disable
271 * pmtu discovery on this transport.
272 */
273 t->pathmtu = SCTP_DEFAULT_MINSEGMENT;
274 } else {
275 t->pathmtu = pmtu;
276 }
277
278 dst = sctp_transport_dst_check(t);
279 if (dst)
280 dst->ops->update_pmtu(dst, pmtu);
281}
282
244/* Caches the dst entry and source address for a transport's destination 283/* Caches the dst entry and source address for a transport's destination
245 * address. 284 * address.
246 */ 285 */
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 099a983797da..c094583386fd 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -853,7 +853,7 @@ unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gs
853 u32 priv_len, maj_stat; 853 u32 priv_len, maj_stat;
854 int pad, saved_len, remaining_len, offset; 854 int pad, saved_len, remaining_len, offset;
855 855
856 rqstp->rq_sendfile_ok = 0; 856 rqstp->rq_splice_ok = 0;
857 857
858 priv_len = svc_getnl(&buf->head[0]); 858 priv_len = svc_getnl(&buf->head[0]);
859 if (rqstp->rq_deferred) { 859 if (rqstp->rq_deferred) {
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index e673ef993904..55ea6df069de 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -814,7 +814,7 @@ svc_process(struct svc_rqst *rqstp)
814 rqstp->rq_res.tail[0].iov_base = NULL; 814 rqstp->rq_res.tail[0].iov_base = NULL;
815 rqstp->rq_res.tail[0].iov_len = 0; 815 rqstp->rq_res.tail[0].iov_len = 0;
816 /* Will be turned off only in gss privacy case: */ 816 /* Will be turned off only in gss privacy case: */
817 rqstp->rq_sendfile_ok = 1; 817 rqstp->rq_splice_ok = 1;
818 /* tcp needs a space for the record length... */ 818 /* tcp needs a space for the record length... */
819 if (rqstp->rq_prot == IPPROTO_TCP) 819 if (rqstp->rq_prot == IPPROTO_TCP)
820 svc_putnl(resv, 0); 820 svc_putnl(resv, 0);
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
index 4cdafa2d1d4d..6a7f7b4c2595 100644
--- a/net/tipc/netlink.c
+++ b/net/tipc/netlink.c
@@ -60,7 +60,7 @@ static int handle_cmd(struct sk_buff *skb, struct genl_info *info)
60 rep_nlh = nlmsg_hdr(rep_buf); 60 rep_nlh = nlmsg_hdr(rep_buf);
61 memcpy(rep_nlh, req_nlh, hdr_space); 61 memcpy(rep_nlh, req_nlh, hdr_space);
62 rep_nlh->nlmsg_len = rep_buf->len; 62 rep_nlh->nlmsg_len = rep_buf->len;
63 genlmsg_unicast(rep_buf, req_nlh->nlmsg_pid); 63 genlmsg_unicast(rep_buf, NETLINK_CB(skb).pid);
64 } 64 }
65 65
66 return 0; 66 return 0;
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index 3ebae1442963..88aaacd9f822 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -33,7 +33,7 @@ static ssize_t _show_permaddr(struct device *dev,
33 struct device_attribute *attr, 33 struct device_attribute *attr,
34 char *buf) 34 char *buf)
35{ 35{
36 char *addr = dev_to_rdev(dev)->wiphy.perm_addr; 36 unsigned char *addr = dev_to_rdev(dev)->wiphy.perm_addr;
37 37
38 return sprintf(buf, "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n", 38 return sprintf(buf, "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n",
39 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); 39 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 85f3f43a6cca..dfacb9c2a6e3 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1729,7 +1729,7 @@ int xfrm_state_mtu(struct xfrm_state *x, int mtu)
1729 x->type && x->type->get_mtu) 1729 x->type && x->type->get_mtu)
1730 res = x->type->get_mtu(x, mtu); 1730 res = x->type->get_mtu(x, mtu);
1731 else 1731 else
1732 res = mtu; 1732 res = mtu - x->props.header_len;
1733 spin_unlock_bh(&x->lock); 1733 spin_unlock_bh(&x->lock);
1734 return res; 1734 return res;
1735} 1735}