aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-03-05 21:42:29 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-03-05 21:42:29 -0500
commit9da060d0ed571bbff434c4a1ef3e48db99a37ee0 (patch)
tree084194657e0bae0eaec74c0049da97af4352dde0 /net
parente3b59518c10e08eeb06215abf06f50e8f83b51dc (diff)
parentaab2b4bf224ef8358d262f95b568b8ad0cecf0a0 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: "A moderately sized pile of fixes, some specifically for merge window introduced regressions although others are for longer standing items and have been queued up for -stable. I'm kind of tired of all the RDS protocol bugs over the years, to be honest, it's way out of proportion to the number of people who actually use it. 1) Fix missing range initialization in netfilter IPSET, from Jozsef Kadlecsik. 2) ieee80211_local->tim_lock needs to use BH disabling, from Johannes Berg. 3) Fix DMA syncing in SFC driver, from Ben Hutchings. 4) Fix regression in BOND device MAC address setting, from Jiri Pirko. 5) Missing usb_free_urb in ISDN Hisax driver, from Marina Makienko. 6) Fix UDP checksumming in bnx2x driver for 57710 and 57711 chips, fix from Dmitry Kravkov. 7) Missing cfgspace_lock initialization in BCMA driver. 8) Validate parameter size for SCTP assoc stats getsockopt(), from Guenter Roeck. 9) Fix SCTP association hangs, from Lee A Roberts. 10) Fix jumbo frame handling in r8169, from Francois Romieu. 11) Fix phy_device memory leak, from Petr Malat. 12) Omit trailing FCS from frames received in BGMAC driver, from Hauke Mehrtens. 13) Missing socket refcount release in L2TP, from Guillaume Nault. 14) sctp_endpoint_init should respect passed in gfp_t, rather than use GFP_KERNEL unconditionally. From Dan Carpenter. 15) Add AISX AX88179 USB driver, from Freddy Xin. 16) Remove MAINTAINERS entries for drivers deleted during the merge window, from Cesar Eduardo Barros. 17) RDS protocol can try to allocate huge amounts of memory, check that the user's request length makes sense, from Cong Wang. 18) SCTP should use the provided KMALLOC_MAX_SIZE instead of it's own, bogus, definition. From Cong Wang. 19) Fix deadlocks in FEC driver by moving TX reclaim into NAPI poll, from Frank Li. Also, fix a build error introduced in the merge window. 20) Fix bogus purging of default routes in ipv6, from Lorenzo Colitti. 21) Don't double count RTT measurements when we leave the TCP receive fast path, from Neal Cardwell." * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (61 commits) tcp: fix double-counted receiver RTT when leaving receiver fast path CAIF: fix sparse warning for caif_usb rds: simplify a warning message net: fec: fix build error in no MXC platform net: ipv6: Don't purge default router if accept_ra=2 net: fec: put tx to napi poll function to fix dead lock sctp: use KMALLOC_MAX_SIZE instead of its own MAX_KMALLOC_SIZE rds: limit the size allocated by rds_message_alloc() MAINTAINERS: remove eexpress MAINTAINERS: remove drivers/net/wan/cycx* MAINTAINERS: remove 3c505 caif_dev: fix sparse warnings for caif_flow_cb ax88179_178a: ASIX AX88179_178A USB 3.0/2.0 to gigabit ethernet adapter driver sctp: use the passed in gfp flags instead GFP_KERNEL ipv[4|6]: correct dropwatch false positive in local_deliver_finish l2tp: Restore socket refcount when sendmsg succeeds net/phy: micrel: Disable asymmetric pause for KSZ9021 bgmac: omit the fcs phy: Fix phy_device_free memory leak bnx2x: Fix KR2 work-around condition ...
Diffstat (limited to 'net')
-rw-r--r--net/caif/caif_dev.c2
-rw-r--r--net/caif/caif_usb.c4
-rw-r--r--net/ipv4/ip_input.c6
-rw-r--r--net/ipv4/tcp_input.c6
-rw-r--r--net/ipv6/ip6_input.c6
-rw-r--r--net/ipv6/route.c3
-rw-r--r--net/irda/iriap.c7
-rw-r--r--net/l2tp/l2tp_ppp.c1
-rw-r--r--net/mac80211/cfg.c12
-rw-r--r--net/mac80211/iface.c2
-rw-r--r--net/mac80211/tx.c77
-rw-r--r--net/netfilter/ipset/ip_set_core.c3
-rw-r--r--net/rds/message.c8
-rw-r--r--net/sctp/endpointola.c2
-rw-r--r--net/sctp/socket.c6
-rw-r--r--net/sctp/ssnmap.c8
-rw-r--r--net/sctp/tsnmap.c13
-rw-r--r--net/sctp/ulpqueue.c87
-rw-r--r--net/wireless/nl80211.c61
19 files changed, 175 insertions, 139 deletions
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index 1ae1d9cb278d..21760f008974 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -118,7 +118,7 @@ static struct caif_device_entry *caif_get(struct net_device *dev)
118 return NULL; 118 return NULL;
119} 119}
120 120
121void caif_flow_cb(struct sk_buff *skb) 121static void caif_flow_cb(struct sk_buff *skb)
122{ 122{
123 struct caif_device_entry *caifd; 123 struct caif_device_entry *caifd;
124 void (*dtor)(struct sk_buff *skb) = NULL; 124 void (*dtor)(struct sk_buff *skb) = NULL;
diff --git a/net/caif/caif_usb.c b/net/caif/caif_usb.c
index 3ebc8cbc91ff..ef8ebaa993cf 100644
--- a/net/caif/caif_usb.c
+++ b/net/caif/caif_usb.c
@@ -81,8 +81,8 @@ static void cfusbl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
81 layr->up->ctrlcmd(layr->up, ctrl, layr->id); 81 layr->up->ctrlcmd(layr->up, ctrl, layr->id);
82} 82}
83 83
84struct cflayer *cfusbl_create(int phyid, u8 ethaddr[ETH_ALEN], 84static struct cflayer *cfusbl_create(int phyid, u8 ethaddr[ETH_ALEN],
85 u8 braddr[ETH_ALEN]) 85 u8 braddr[ETH_ALEN])
86{ 86{
87 struct cfusbl *this = kmalloc(sizeof(struct cfusbl), GFP_ATOMIC); 87 struct cfusbl *this = kmalloc(sizeof(struct cfusbl), GFP_ATOMIC);
88 88
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 87abd3e2bd32..2bdf802e28e2 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -228,9 +228,11 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
228 icmp_send(skb, ICMP_DEST_UNREACH, 228 icmp_send(skb, ICMP_DEST_UNREACH,
229 ICMP_PROT_UNREACH, 0); 229 ICMP_PROT_UNREACH, 0);
230 } 230 }
231 } else 231 kfree_skb(skb);
232 } else {
232 IP_INC_STATS_BH(net, IPSTATS_MIB_INDELIVERS); 233 IP_INC_STATS_BH(net, IPSTATS_MIB_INDELIVERS);
233 kfree_skb(skb); 234 consume_skb(skb);
235 }
234 } 236 }
235 } 237 }
236 out: 238 out:
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index a759e19496d2..0d9bdacce99f 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5485,6 +5485,9 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
5485 if (tcp_checksum_complete_user(sk, skb)) 5485 if (tcp_checksum_complete_user(sk, skb))
5486 goto csum_error; 5486 goto csum_error;
5487 5487
5488 if ((int)skb->truesize > sk->sk_forward_alloc)
5489 goto step5;
5490
5488 /* Predicted packet is in window by definition. 5491 /* Predicted packet is in window by definition.
5489 * seq == rcv_nxt and rcv_wup <= rcv_nxt. 5492 * seq == rcv_nxt and rcv_wup <= rcv_nxt.
5490 * Hence, check seq<=rcv_wup reduces to: 5493 * Hence, check seq<=rcv_wup reduces to:
@@ -5496,9 +5499,6 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
5496 5499
5497 tcp_rcv_rtt_measure_ts(sk, skb); 5500 tcp_rcv_rtt_measure_ts(sk, skb);
5498 5501
5499 if ((int)skb->truesize > sk->sk_forward_alloc)
5500 goto step5;
5501
5502 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS); 5502 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS);
5503 5503
5504 /* Bulk data transfer: receiver */ 5504 /* Bulk data transfer: receiver */
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 5b10414e619e..b1876e52091e 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -241,9 +241,11 @@ resubmit:
241 icmpv6_send(skb, ICMPV6_PARAMPROB, 241 icmpv6_send(skb, ICMPV6_PARAMPROB,
242 ICMPV6_UNK_NEXTHDR, nhoff); 242 ICMPV6_UNK_NEXTHDR, nhoff);
243 } 243 }
244 } else 244 kfree_skb(skb);
245 } else {
245 IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS); 246 IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS);
246 kfree_skb(skb); 247 consume_skb(skb);
248 }
247 } 249 }
248 rcu_read_unlock(); 250 rcu_read_unlock();
249 return 0; 251 return 0;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 928266569689..e5fe0041adfa 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1915,7 +1915,8 @@ void rt6_purge_dflt_routers(struct net *net)
1915restart: 1915restart:
1916 read_lock_bh(&table->tb6_lock); 1916 read_lock_bh(&table->tb6_lock);
1917 for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) { 1917 for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
1918 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) { 1918 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
1919 (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) {
1919 dst_hold(&rt->dst); 1920 dst_hold(&rt->dst);
1920 read_unlock_bh(&table->tb6_lock); 1921 read_unlock_bh(&table->tb6_lock);
1921 ip6_del_rt(rt); 1922 ip6_del_rt(rt);
diff --git a/net/irda/iriap.c b/net/irda/iriap.c
index e71e85ba2bf1..29340a9a6fb9 100644
--- a/net/irda/iriap.c
+++ b/net/irda/iriap.c
@@ -495,8 +495,11 @@ static void iriap_getvaluebyclass_confirm(struct iriap_cb *self,
495/* case CS_ISO_8859_9: */ 495/* case CS_ISO_8859_9: */
496/* case CS_UNICODE: */ 496/* case CS_UNICODE: */
497 default: 497 default:
498 IRDA_DEBUG(0, "%s(), charset %s, not supported\n", 498 IRDA_DEBUG(0, "%s(), charset [%d] %s, not supported\n",
499 __func__, ias_charset_types[charset]); 499 __func__, charset,
500 charset < ARRAY_SIZE(ias_charset_types) ?
501 ias_charset_types[charset] :
502 "(unknown)");
500 503
501 /* Aborting, close connection! */ 504 /* Aborting, close connection! */
502 iriap_disconnect_request(self); 505 iriap_disconnect_request(self);
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 3f4e3afc191a..6a53371dba1f 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -355,6 +355,7 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
355 l2tp_xmit_skb(session, skb, session->hdr_len); 355 l2tp_xmit_skb(session, skb, session->hdr_len);
356 356
357 sock_put(ps->tunnel_sock); 357 sock_put(ps->tunnel_sock);
358 sock_put(sk);
358 359
359 return error; 360 return error;
360 361
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 09d96a8f6c2c..808f5fcd1ced 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -3285,13 +3285,19 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
3285 struct cfg80211_chan_def *chandef) 3285 struct cfg80211_chan_def *chandef)
3286{ 3286{
3287 struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); 3287 struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
3288 struct ieee80211_local *local = wiphy_priv(wiphy);
3288 struct ieee80211_chanctx_conf *chanctx_conf; 3289 struct ieee80211_chanctx_conf *chanctx_conf;
3289 int ret = -ENODATA; 3290 int ret = -ENODATA;
3290 3291
3291 rcu_read_lock(); 3292 rcu_read_lock();
3292 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); 3293 if (local->use_chanctx) {
3293 if (chanctx_conf) { 3294 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
3294 *chandef = chanctx_conf->def; 3295 if (chanctx_conf) {
3296 *chandef = chanctx_conf->def;
3297 ret = 0;
3298 }
3299 } else if (local->open_count == local->monitors) {
3300 *chandef = local->monitor_chandef;
3295 ret = 0; 3301 ret = 0;
3296 } 3302 }
3297 rcu_read_unlock(); 3303 rcu_read_unlock();
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 2c059e54e885..640afab304d7 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -107,7 +107,7 @@ void ieee80211_recalc_idle(struct ieee80211_local *local)
107 107
108 lockdep_assert_held(&local->mtx); 108 lockdep_assert_held(&local->mtx);
109 109
110 active = !list_empty(&local->chanctx_list); 110 active = !list_empty(&local->chanctx_list) || local->monitors;
111 111
112 if (!local->ops->remain_on_channel) { 112 if (!local->ops->remain_on_channel) {
113 list_for_each_entry(roc, &local->roc_list, list) { 113 list_for_each_entry(roc, &local->roc_list, list) {
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index de8548bf0a7f..ce78d1149f1d 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1231,34 +1231,40 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local,
1231 if (local->queue_stop_reasons[q] || 1231 if (local->queue_stop_reasons[q] ||
1232 (!txpending && !skb_queue_empty(&local->pending[q]))) { 1232 (!txpending && !skb_queue_empty(&local->pending[q]))) {
1233 if (unlikely(info->flags & 1233 if (unlikely(info->flags &
1234 IEEE80211_TX_INTFL_OFFCHAN_TX_OK && 1234 IEEE80211_TX_INTFL_OFFCHAN_TX_OK)) {
1235 local->queue_stop_reasons[q] & 1235 if (local->queue_stop_reasons[q] &
1236 ~BIT(IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL))) { 1236 ~BIT(IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL)) {
1237 /*
1238 * Drop off-channel frames if queues
1239 * are stopped for any reason other
1240 * than off-channel operation. Never
1241 * queue them.
1242 */
1243 spin_unlock_irqrestore(
1244 &local->queue_stop_reason_lock,
1245 flags);
1246 ieee80211_purge_tx_queue(&local->hw,
1247 skbs);
1248 return true;
1249 }
1250 } else {
1251
1237 /* 1252 /*
1238 * Drop off-channel frames if queues are stopped 1253 * Since queue is stopped, queue up frames for
1239 * for any reason other than off-channel 1254 * later transmission from the tx-pending
1240 * operation. Never queue them. 1255 * tasklet when the queue is woken again.
1241 */ 1256 */
1242 spin_unlock_irqrestore( 1257 if (txpending)
1243 &local->queue_stop_reason_lock, flags); 1258 skb_queue_splice_init(skbs,
1244 ieee80211_purge_tx_queue(&local->hw, skbs); 1259 &local->pending[q]);
1245 return true; 1260 else
1261 skb_queue_splice_tail_init(skbs,
1262 &local->pending[q]);
1263
1264 spin_unlock_irqrestore(&local->queue_stop_reason_lock,
1265 flags);
1266 return false;
1246 } 1267 }
1247
1248 /*
1249 * Since queue is stopped, queue up frames for later
1250 * transmission from the tx-pending tasklet when the
1251 * queue is woken again.
1252 */
1253 if (txpending)
1254 skb_queue_splice_init(skbs, &local->pending[q]);
1255 else
1256 skb_queue_splice_tail_init(skbs,
1257 &local->pending[q]);
1258
1259 spin_unlock_irqrestore(&local->queue_stop_reason_lock,
1260 flags);
1261 return false;
1262 } 1268 }
1263 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); 1269 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
1264 1270
@@ -1844,9 +1850,24 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1844 } 1850 }
1845 1851
1846 if (!is_multicast_ether_addr(skb->data)) { 1852 if (!is_multicast_ether_addr(skb->data)) {
1853 struct sta_info *next_hop;
1854 bool mpp_lookup = true;
1855
1847 mpath = mesh_path_lookup(sdata, skb->data); 1856 mpath = mesh_path_lookup(sdata, skb->data);
1848 if (!mpath) 1857 if (mpath) {
1858 mpp_lookup = false;
1859 next_hop = rcu_dereference(mpath->next_hop);
1860 if (!next_hop ||
1861 !(mpath->flags & (MESH_PATH_ACTIVE |
1862 MESH_PATH_RESOLVING)))
1863 mpp_lookup = true;
1864 }
1865
1866 if (mpp_lookup)
1849 mppath = mpp_path_lookup(sdata, skb->data); 1867 mppath = mpp_path_lookup(sdata, skb->data);
1868
1869 if (mppath && mpath)
1870 mesh_path_del(mpath->sdata, mpath->dst);
1850 } 1871 }
1851 1872
1852 /* 1873 /*
@@ -2350,9 +2371,9 @@ static int ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
2350 if (local->tim_in_locked_section) { 2371 if (local->tim_in_locked_section) {
2351 __ieee80211_beacon_add_tim(sdata, ps, skb); 2372 __ieee80211_beacon_add_tim(sdata, ps, skb);
2352 } else { 2373 } else {
2353 spin_lock(&local->tim_lock); 2374 spin_lock_bh(&local->tim_lock);
2354 __ieee80211_beacon_add_tim(sdata, ps, skb); 2375 __ieee80211_beacon_add_tim(sdata, ps, skb);
2355 spin_unlock(&local->tim_lock); 2376 spin_unlock_bh(&local->tim_lock);
2356 } 2377 }
2357 2378
2358 return 0; 2379 return 0;
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index f82b2e606cfd..1ba9dbc0e107 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -1470,7 +1470,8 @@ ip_set_utest(struct sock *ctnl, struct sk_buff *skb,
1470 if (ret == -EAGAIN) 1470 if (ret == -EAGAIN)
1471 ret = 1; 1471 ret = 1;
1472 1472
1473 return ret < 0 ? ret : ret > 0 ? 0 : -IPSET_ERR_EXIST; 1473 return (ret < 0 && ret != -ENOTEMPTY) ? ret :
1474 ret > 0 ? 0 : -IPSET_ERR_EXIST;
1474} 1475}
1475 1476
1476/* Get headed data of a set */ 1477/* Get headed data of a set */
diff --git a/net/rds/message.c b/net/rds/message.c
index f0a4658f3273..aba232f9f308 100644
--- a/net/rds/message.c
+++ b/net/rds/message.c
@@ -82,10 +82,7 @@ static void rds_message_purge(struct rds_message *rm)
82void rds_message_put(struct rds_message *rm) 82void rds_message_put(struct rds_message *rm)
83{ 83{
84 rdsdebug("put rm %p ref %d\n", rm, atomic_read(&rm->m_refcount)); 84 rdsdebug("put rm %p ref %d\n", rm, atomic_read(&rm->m_refcount));
85 if (atomic_read(&rm->m_refcount) == 0) { 85 WARN(!atomic_read(&rm->m_refcount), "danger refcount zero on %p\n", rm);
86printk(KERN_CRIT "danger refcount zero on %p\n", rm);
87WARN_ON(1);
88 }
89 if (atomic_dec_and_test(&rm->m_refcount)) { 86 if (atomic_dec_and_test(&rm->m_refcount)) {
90 BUG_ON(!list_empty(&rm->m_sock_item)); 87 BUG_ON(!list_empty(&rm->m_sock_item));
91 BUG_ON(!list_empty(&rm->m_conn_item)); 88 BUG_ON(!list_empty(&rm->m_conn_item));
@@ -197,6 +194,9 @@ struct rds_message *rds_message_alloc(unsigned int extra_len, gfp_t gfp)
197{ 194{
198 struct rds_message *rm; 195 struct rds_message *rm;
199 196
197 if (extra_len > KMALLOC_MAX_SIZE - sizeof(struct rds_message))
198 return NULL;
199
200 rm = kzalloc(sizeof(struct rds_message) + extra_len, gfp); 200 rm = kzalloc(sizeof(struct rds_message) + extra_len, gfp);
201 if (!rm) 201 if (!rm)
202 goto out; 202 goto out;
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 2b3ef03c6098..12ed45dbe75d 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -155,7 +155,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
155 155
156 /* SCTP-AUTH extensions*/ 156 /* SCTP-AUTH extensions*/
157 INIT_LIST_HEAD(&ep->endpoint_shared_keys); 157 INIT_LIST_HEAD(&ep->endpoint_shared_keys);
158 null_key = sctp_auth_shkey_create(0, GFP_KERNEL); 158 null_key = sctp_auth_shkey_create(0, gfp);
159 if (!null_key) 159 if (!null_key)
160 goto nomem; 160 goto nomem;
161 161
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index c99458df3f3f..b9070736b8d9 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -5653,6 +5653,9 @@ static int sctp_getsockopt_assoc_stats(struct sock *sk, int len,
5653 if (len < sizeof(sctp_assoc_t)) 5653 if (len < sizeof(sctp_assoc_t))
5654 return -EINVAL; 5654 return -EINVAL;
5655 5655
5656 /* Allow the struct to grow and fill in as much as possible */
5657 len = min_t(size_t, len, sizeof(sas));
5658
5656 if (copy_from_user(&sas, optval, len)) 5659 if (copy_from_user(&sas, optval, len))
5657 return -EFAULT; 5660 return -EFAULT;
5658 5661
@@ -5686,9 +5689,6 @@ static int sctp_getsockopt_assoc_stats(struct sock *sk, int len,
5686 /* Mark beginning of a new observation period */ 5689 /* Mark beginning of a new observation period */
5687 asoc->stats.max_obs_rto = asoc->rto_min; 5690 asoc->stats.max_obs_rto = asoc->rto_min;
5688 5691
5689 /* Allow the struct to grow and fill in as much as possible */
5690 len = min_t(size_t, len, sizeof(sas));
5691
5692 if (put_user(len, optlen)) 5692 if (put_user(len, optlen))
5693 return -EFAULT; 5693 return -EFAULT;
5694 5694
diff --git a/net/sctp/ssnmap.c b/net/sctp/ssnmap.c
index 442ad4ed6315..825ea94415b3 100644
--- a/net/sctp/ssnmap.c
+++ b/net/sctp/ssnmap.c
@@ -41,8 +41,6 @@
41#include <net/sctp/sctp.h> 41#include <net/sctp/sctp.h>
42#include <net/sctp/sm.h> 42#include <net/sctp/sm.h>
43 43
44#define MAX_KMALLOC_SIZE 131072
45
46static struct sctp_ssnmap *sctp_ssnmap_init(struct sctp_ssnmap *map, __u16 in, 44static struct sctp_ssnmap *sctp_ssnmap_init(struct sctp_ssnmap *map, __u16 in,
47 __u16 out); 45 __u16 out);
48 46
@@ -65,7 +63,7 @@ struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out,
65 int size; 63 int size;
66 64
67 size = sctp_ssnmap_size(in, out); 65 size = sctp_ssnmap_size(in, out);
68 if (size <= MAX_KMALLOC_SIZE) 66 if (size <= KMALLOC_MAX_SIZE)
69 retval = kmalloc(size, gfp); 67 retval = kmalloc(size, gfp);
70 else 68 else
71 retval = (struct sctp_ssnmap *) 69 retval = (struct sctp_ssnmap *)
@@ -82,7 +80,7 @@ struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out,
82 return retval; 80 return retval;
83 81
84fail_map: 82fail_map:
85 if (size <= MAX_KMALLOC_SIZE) 83 if (size <= KMALLOC_MAX_SIZE)
86 kfree(retval); 84 kfree(retval);
87 else 85 else
88 free_pages((unsigned long)retval, get_order(size)); 86 free_pages((unsigned long)retval, get_order(size));
@@ -124,7 +122,7 @@ void sctp_ssnmap_free(struct sctp_ssnmap *map)
124 int size; 122 int size;
125 123
126 size = sctp_ssnmap_size(map->in.len, map->out.len); 124 size = sctp_ssnmap_size(map->in.len, map->out.len);
127 if (size <= MAX_KMALLOC_SIZE) 125 if (size <= KMALLOC_MAX_SIZE)
128 kfree(map); 126 kfree(map);
129 else 127 else
130 free_pages((unsigned long)map, get_order(size)); 128 free_pages((unsigned long)map, get_order(size));
diff --git a/net/sctp/tsnmap.c b/net/sctp/tsnmap.c
index 5f25e0c92c31..396c45174e5b 100644
--- a/net/sctp/tsnmap.c
+++ b/net/sctp/tsnmap.c
@@ -51,7 +51,7 @@
51static void sctp_tsnmap_update(struct sctp_tsnmap *map); 51static void sctp_tsnmap_update(struct sctp_tsnmap *map);
52static void sctp_tsnmap_find_gap_ack(unsigned long *map, __u16 off, 52static void sctp_tsnmap_find_gap_ack(unsigned long *map, __u16 off,
53 __u16 len, __u16 *start, __u16 *end); 53 __u16 len, __u16 *start, __u16 *end);
54static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 gap); 54static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 size);
55 55
56/* Initialize a block of memory as a tsnmap. */ 56/* Initialize a block of memory as a tsnmap. */
57struct sctp_tsnmap *sctp_tsnmap_init(struct sctp_tsnmap *map, __u16 len, 57struct sctp_tsnmap *sctp_tsnmap_init(struct sctp_tsnmap *map, __u16 len,
@@ -124,7 +124,7 @@ int sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn,
124 124
125 gap = tsn - map->base_tsn; 125 gap = tsn - map->base_tsn;
126 126
127 if (gap >= map->len && !sctp_tsnmap_grow(map, gap)) 127 if (gap >= map->len && !sctp_tsnmap_grow(map, gap + 1))
128 return -ENOMEM; 128 return -ENOMEM;
129 129
130 if (!sctp_tsnmap_has_gap(map) && gap == 0) { 130 if (!sctp_tsnmap_has_gap(map) && gap == 0) {
@@ -360,23 +360,24 @@ __u16 sctp_tsnmap_num_gabs(struct sctp_tsnmap *map,
360 return ngaps; 360 return ngaps;
361} 361}
362 362
363static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 gap) 363static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 size)
364{ 364{
365 unsigned long *new; 365 unsigned long *new;
366 unsigned long inc; 366 unsigned long inc;
367 u16 len; 367 u16 len;
368 368
369 if (gap >= SCTP_TSN_MAP_SIZE) 369 if (size > SCTP_TSN_MAP_SIZE)
370 return 0; 370 return 0;
371 371
372 inc = ALIGN((gap - map->len),BITS_PER_LONG) + SCTP_TSN_MAP_INCREMENT; 372 inc = ALIGN((size - map->len), BITS_PER_LONG) + SCTP_TSN_MAP_INCREMENT;
373 len = min_t(u16, map->len + inc, SCTP_TSN_MAP_SIZE); 373 len = min_t(u16, map->len + inc, SCTP_TSN_MAP_SIZE);
374 374
375 new = kzalloc(len>>3, GFP_ATOMIC); 375 new = kzalloc(len>>3, GFP_ATOMIC);
376 if (!new) 376 if (!new)
377 return 0; 377 return 0;
378 378
379 bitmap_copy(new, map->tsn_map, map->max_tsn_seen - map->base_tsn); 379 bitmap_copy(new, map->tsn_map,
380 map->max_tsn_seen - map->cumulative_tsn_ack_point);
380 kfree(map->tsn_map); 381 kfree(map->tsn_map);
381 map->tsn_map = new; 382 map->tsn_map = new;
382 map->len = len; 383 map->len = len;
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index ada17464b65b..0fd5b3d2df03 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -106,6 +106,7 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
106{ 106{
107 struct sk_buff_head temp; 107 struct sk_buff_head temp;
108 struct sctp_ulpevent *event; 108 struct sctp_ulpevent *event;
109 int event_eor = 0;
109 110
110 /* Create an event from the incoming chunk. */ 111 /* Create an event from the incoming chunk. */
111 event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp); 112 event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
@@ -127,10 +128,12 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
127 /* Send event to the ULP. 'event' is the sctp_ulpevent for 128 /* Send event to the ULP. 'event' is the sctp_ulpevent for
128 * very first SKB on the 'temp' list. 129 * very first SKB on the 'temp' list.
129 */ 130 */
130 if (event) 131 if (event) {
132 event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
131 sctp_ulpq_tail_event(ulpq, event); 133 sctp_ulpq_tail_event(ulpq, event);
134 }
132 135
133 return 0; 136 return event_eor;
134} 137}
135 138
136/* Add a new event for propagation to the ULP. */ 139/* Add a new event for propagation to the ULP. */
@@ -540,14 +543,19 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
540 ctsn = cevent->tsn; 543 ctsn = cevent->tsn;
541 544
542 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { 545 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
546 case SCTP_DATA_FIRST_FRAG:
547 if (!first_frag)
548 return NULL;
549 goto done;
543 case SCTP_DATA_MIDDLE_FRAG: 550 case SCTP_DATA_MIDDLE_FRAG:
544 if (!first_frag) { 551 if (!first_frag) {
545 first_frag = pos; 552 first_frag = pos;
546 next_tsn = ctsn + 1; 553 next_tsn = ctsn + 1;
547 last_frag = pos; 554 last_frag = pos;
548 } else if (next_tsn == ctsn) 555 } else if (next_tsn == ctsn) {
549 next_tsn++; 556 next_tsn++;
550 else 557 last_frag = pos;
558 } else
551 goto done; 559 goto done;
552 break; 560 break;
553 case SCTP_DATA_LAST_FRAG: 561 case SCTP_DATA_LAST_FRAG:
@@ -651,6 +659,14 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
651 } else 659 } else
652 goto done; 660 goto done;
653 break; 661 break;
662
663 case SCTP_DATA_LAST_FRAG:
664 if (!first_frag)
665 return NULL;
666 else
667 goto done;
668 break;
669
654 default: 670 default:
655 return NULL; 671 return NULL;
656 } 672 }
@@ -962,20 +978,43 @@ static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
962 struct sk_buff_head *list, __u16 needed) 978 struct sk_buff_head *list, __u16 needed)
963{ 979{
964 __u16 freed = 0; 980 __u16 freed = 0;
965 __u32 tsn; 981 __u32 tsn, last_tsn;
966 struct sk_buff *skb; 982 struct sk_buff *skb, *flist, *last;
967 struct sctp_ulpevent *event; 983 struct sctp_ulpevent *event;
968 struct sctp_tsnmap *tsnmap; 984 struct sctp_tsnmap *tsnmap;
969 985
970 tsnmap = &ulpq->asoc->peer.tsn_map; 986 tsnmap = &ulpq->asoc->peer.tsn_map;
971 987
972 while ((skb = __skb_dequeue_tail(list)) != NULL) { 988 while ((skb = skb_peek_tail(list)) != NULL) {
973 freed += skb_headlen(skb);
974 event = sctp_skb2event(skb); 989 event = sctp_skb2event(skb);
975 tsn = event->tsn; 990 tsn = event->tsn;
976 991
992 /* Don't renege below the Cumulative TSN ACK Point. */
993 if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap)))
994 break;
995
996 /* Events in ordering queue may have multiple fragments
997 * corresponding to additional TSNs. Sum the total
998 * freed space; find the last TSN.
999 */
1000 freed += skb_headlen(skb);
1001 flist = skb_shinfo(skb)->frag_list;
1002 for (last = flist; flist; flist = flist->next) {
1003 last = flist;
1004 freed += skb_headlen(last);
1005 }
1006 if (last)
1007 last_tsn = sctp_skb2event(last)->tsn;
1008 else
1009 last_tsn = tsn;
1010
1011 /* Unlink the event, then renege all applicable TSNs. */
1012 __skb_unlink(skb, list);
977 sctp_ulpevent_free(event); 1013 sctp_ulpevent_free(event);
978 sctp_tsnmap_renege(tsnmap, tsn); 1014 while (TSN_lte(tsn, last_tsn)) {
1015 sctp_tsnmap_renege(tsnmap, tsn);
1016 tsn++;
1017 }
979 if (freed >= needed) 1018 if (freed >= needed)
980 return freed; 1019 return freed;
981 } 1020 }
@@ -1002,16 +1041,28 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
1002 struct sctp_ulpevent *event; 1041 struct sctp_ulpevent *event;
1003 struct sctp_association *asoc; 1042 struct sctp_association *asoc;
1004 struct sctp_sock *sp; 1043 struct sctp_sock *sp;
1044 __u32 ctsn;
1045 struct sk_buff *skb;
1005 1046
1006 asoc = ulpq->asoc; 1047 asoc = ulpq->asoc;
1007 sp = sctp_sk(asoc->base.sk); 1048 sp = sctp_sk(asoc->base.sk);
1008 1049
1009 /* If the association is already in Partial Delivery mode 1050 /* If the association is already in Partial Delivery mode
1010 * we have noting to do. 1051 * we have nothing to do.
1011 */ 1052 */
1012 if (ulpq->pd_mode) 1053 if (ulpq->pd_mode)
1013 return; 1054 return;
1014 1055
1056 /* Data must be at or below the Cumulative TSN ACK Point to
1057 * start partial delivery.
1058 */
1059 skb = skb_peek(&asoc->ulpq.reasm);
1060 if (skb != NULL) {
1061 ctsn = sctp_skb2event(skb)->tsn;
1062 if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)))
1063 return;
1064 }
1065
1015 /* If the user enabled fragment interleave socket option, 1066 /* If the user enabled fragment interleave socket option,
1016 * multiple associations can enter partial delivery. 1067 * multiple associations can enter partial delivery.
1017 * Otherwise, we can only enter partial delivery if the 1068 * Otherwise, we can only enter partial delivery if the
@@ -1054,12 +1105,16 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
1054 } 1105 }
1055 /* If able to free enough room, accept this chunk. */ 1106 /* If able to free enough room, accept this chunk. */
1056 if (chunk && (freed >= needed)) { 1107 if (chunk && (freed >= needed)) {
1057 __u32 tsn; 1108 int retval;
1058 tsn = ntohl(chunk->subh.data_hdr->tsn); 1109 retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
1059 sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn, chunk->transport); 1110 /*
1060 sctp_ulpq_tail_data(ulpq, chunk, gfp); 1111 * Enter partial delivery if chunk has not been
1061 1112 * delivered; otherwise, drain the reassembly queue.
1062 sctp_ulpq_partial_delivery(ulpq, gfp); 1113 */
1114 if (retval <= 0)
1115 sctp_ulpq_partial_delivery(ulpq, gfp);
1116 else if (retval == 1)
1117 sctp_ulpq_reasm_drain(ulpq);
1063 } 1118 }
1064 1119
1065 sk_mem_reclaim(asoc->base.sk); 1120 sk_mem_reclaim(asoc->base.sk);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 35545ccc30fd..e652d05ff712 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -554,16 +554,9 @@ static int nl80211_msg_put_channel(struct sk_buff *msg,
554 if ((chan->flags & IEEE80211_CHAN_NO_IBSS) && 554 if ((chan->flags & IEEE80211_CHAN_NO_IBSS) &&
555 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_IBSS)) 555 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_IBSS))
556 goto nla_put_failure; 556 goto nla_put_failure;
557 if (chan->flags & IEEE80211_CHAN_RADAR) { 557 if ((chan->flags & IEEE80211_CHAN_RADAR) &&
558 u32 time = elapsed_jiffies_msecs(chan->dfs_state_entered); 558 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_RADAR))
559 if (nla_put_flag(msg, NL80211_FREQUENCY_ATTR_RADAR)) 559 goto nla_put_failure;
560 goto nla_put_failure;
561 if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_DFS_STATE,
562 chan->dfs_state))
563 goto nla_put_failure;
564 if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_DFS_TIME, time))
565 goto nla_put_failure;
566 }
567 if ((chan->flags & IEEE80211_CHAN_NO_HT40MINUS) && 560 if ((chan->flags & IEEE80211_CHAN_NO_HT40MINUS) &&
568 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HT40_MINUS)) 561 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HT40_MINUS))
569 goto nla_put_failure; 562 goto nla_put_failure;
@@ -900,9 +893,6 @@ static int nl80211_put_iface_combinations(struct wiphy *wiphy,
900 nla_put_u32(msg, NL80211_IFACE_COMB_MAXNUM, 893 nla_put_u32(msg, NL80211_IFACE_COMB_MAXNUM,
901 c->max_interfaces)) 894 c->max_interfaces))
902 goto nla_put_failure; 895 goto nla_put_failure;
903 if (nla_put_u32(msg, NL80211_IFACE_COMB_RADAR_DETECT_WIDTHS,
904 c->radar_detect_widths))
905 goto nla_put_failure;
906 896
907 nla_nest_end(msg, nl_combi); 897 nla_nest_end(msg, nl_combi);
908 } 898 }
@@ -914,48 +904,6 @@ nla_put_failure:
914 return -ENOBUFS; 904 return -ENOBUFS;
915} 905}
916 906
917#ifdef CONFIG_PM
918static int nl80211_send_wowlan_tcp_caps(struct cfg80211_registered_device *rdev,
919 struct sk_buff *msg)
920{
921 const struct wiphy_wowlan_tcp_support *tcp = rdev->wiphy.wowlan.tcp;
922 struct nlattr *nl_tcp;
923
924 if (!tcp)
925 return 0;
926
927 nl_tcp = nla_nest_start(msg, NL80211_WOWLAN_TRIG_TCP_CONNECTION);
928 if (!nl_tcp)
929 return -ENOBUFS;
930
931 if (nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD,
932 tcp->data_payload_max))
933 return -ENOBUFS;
934
935 if (nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD,
936 tcp->data_payload_max))
937 return -ENOBUFS;
938
939 if (tcp->seq && nla_put_flag(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ))
940 return -ENOBUFS;
941
942 if (tcp->tok && nla_put(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN,
943 sizeof(*tcp->tok), tcp->tok))
944 return -ENOBUFS;
945
946 if (nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_INTERVAL,
947 tcp->data_interval_max))
948 return -ENOBUFS;
949
950 if (nla_put_u32(msg, NL80211_WOWLAN_TCP_WAKE_PAYLOAD,
951 tcp->wake_payload_max))
952 return -ENOBUFS;
953
954 nla_nest_end(msg, nl_tcp);
955 return 0;
956}
957#endif
958
959static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flags, 907static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flags,
960 struct cfg80211_registered_device *dev) 908 struct cfg80211_registered_device *dev)
961{ 909{
@@ -1330,9 +1278,6 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flag
1330 goto nla_put_failure; 1278 goto nla_put_failure;
1331 } 1279 }
1332 1280
1333 if (nl80211_send_wowlan_tcp_caps(dev, msg))
1334 goto nla_put_failure;
1335
1336 nla_nest_end(msg, nl_wowlan); 1281 nla_nest_end(msg, nl_wowlan);
1337 } 1282 }
1338#endif 1283#endif