aboutsummaryrefslogtreecommitdiffstats
path: root/net/mac80211/tx.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/mac80211/tx.c')
-rw-r--r--net/mac80211/tx.c190
1 files changed, 126 insertions, 64 deletions
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index eaa4118de988..8834cc93c716 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -317,12 +317,11 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
317 if (!atomic_read(&tx->sdata->bss->num_sta_ps)) 317 if (!atomic_read(&tx->sdata->bss->num_sta_ps))
318 return TX_CONTINUE; 318 return TX_CONTINUE;
319 319
320 /* buffered in hardware */ 320 info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM;
321 if (!(tx->local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING)) {
322 info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM;
323 321
322 /* device releases frame after DTIM beacon */
323 if (!(tx->local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING))
324 return TX_CONTINUE; 324 return TX_CONTINUE;
325 }
326 325
327 /* buffered in mac80211 */ 326 /* buffered in mac80211 */
328 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) 327 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
@@ -367,15 +366,16 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
367 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; 366 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
368 u32 staflags; 367 u32 staflags;
369 368
370 if (unlikely(!sta || ieee80211_is_probe_resp(hdr->frame_control) 369 if (unlikely(!sta ||
371 || ieee80211_is_auth(hdr->frame_control) 370 ieee80211_is_probe_resp(hdr->frame_control) ||
372 || ieee80211_is_assoc_resp(hdr->frame_control) 371 ieee80211_is_auth(hdr->frame_control) ||
373 || ieee80211_is_reassoc_resp(hdr->frame_control))) 372 ieee80211_is_assoc_resp(hdr->frame_control) ||
373 ieee80211_is_reassoc_resp(hdr->frame_control)))
374 return TX_CONTINUE; 374 return TX_CONTINUE;
375 375
376 staflags = get_sta_flags(sta); 376 staflags = get_sta_flags(sta);
377 377
378 if (unlikely((staflags & WLAN_STA_PS) && 378 if (unlikely((staflags & (WLAN_STA_PS_STA | WLAN_STA_PS_DRIVER)) &&
379 !(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE))) { 379 !(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE))) {
380#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 380#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
381 printk(KERN_DEBUG "STA %pM aid %d: PS buffer (entries " 381 printk(KERN_DEBUG "STA %pM aid %d: PS buffer (entries "
@@ -398,8 +398,13 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
398 } else 398 } else
399 tx->local->total_ps_buffered++; 399 tx->local->total_ps_buffered++;
400 400
401 /* Queue frame to be sent after STA sends an PS Poll frame */ 401 /*
402 if (skb_queue_empty(&sta->ps_tx_buf)) 402 * Queue frame to be sent after STA wakes up/polls,
403 * but don't set the TIM bit if the driver is blocking
404 * wakeup or poll response transmissions anyway.
405 */
406 if (skb_queue_empty(&sta->ps_tx_buf) &&
407 !(staflags & WLAN_STA_PS_DRIVER))
403 sta_info_set_tim_bit(sta); 408 sta_info_set_tim_bit(sta);
404 409
405 info->control.jiffies = jiffies; 410 info->control.jiffies = jiffies;
@@ -409,7 +414,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
409 return TX_QUEUED; 414 return TX_QUEUED;
410 } 415 }
411#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 416#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
412 else if (unlikely(test_sta_flags(sta, WLAN_STA_PS))) { 417 else if (unlikely(staflags & WLAN_STA_PS_STA)) {
413 printk(KERN_DEBUG "%s: STA %pM in PS mode, but pspoll " 418 printk(KERN_DEBUG "%s: STA %pM in PS mode, but pspoll "
414 "set -> send frame\n", tx->dev->name, 419 "set -> send frame\n", tx->dev->name,
415 sta->sta.addr); 420 sta->sta.addr);
@@ -1047,7 +1052,10 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1047 1052
1048 hdr = (struct ieee80211_hdr *) skb->data; 1053 hdr = (struct ieee80211_hdr *) skb->data;
1049 1054
1050 tx->sta = sta_info_get(local, hdr->addr1); 1055 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
1056 tx->sta = rcu_dereference(sdata->u.vlan.sta);
1057 if (!tx->sta)
1058 tx->sta = sta_info_get(local, hdr->addr1);
1051 1059
1052 if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) && 1060 if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) &&
1053 (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)) { 1061 (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)) {
@@ -1201,23 +1209,26 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
1201 struct sk_buff *skb = tx->skb; 1209 struct sk_buff *skb = tx->skb;
1202 ieee80211_tx_result res = TX_DROP; 1210 ieee80211_tx_result res = TX_DROP;
1203 1211
1204#define CALL_TXH(txh) \ 1212#define CALL_TXH(txh) \
1205 res = txh(tx); \ 1213 do { \
1206 if (res != TX_CONTINUE) \ 1214 res = txh(tx); \
1207 goto txh_done; 1215 if (res != TX_CONTINUE) \
1208 1216 goto txh_done; \
1209 CALL_TXH(ieee80211_tx_h_check_assoc) 1217 } while (0)
1210 CALL_TXH(ieee80211_tx_h_ps_buf) 1218
1211 CALL_TXH(ieee80211_tx_h_select_key) 1219 CALL_TXH(ieee80211_tx_h_check_assoc);
1212 CALL_TXH(ieee80211_tx_h_michael_mic_add) 1220 CALL_TXH(ieee80211_tx_h_ps_buf);
1213 CALL_TXH(ieee80211_tx_h_rate_ctrl) 1221 CALL_TXH(ieee80211_tx_h_select_key);
1214 CALL_TXH(ieee80211_tx_h_misc) 1222 CALL_TXH(ieee80211_tx_h_michael_mic_add);
1215 CALL_TXH(ieee80211_tx_h_sequence) 1223 if (!(tx->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL))
1216 CALL_TXH(ieee80211_tx_h_fragment) 1224 CALL_TXH(ieee80211_tx_h_rate_ctrl);
1225 CALL_TXH(ieee80211_tx_h_misc);
1226 CALL_TXH(ieee80211_tx_h_sequence);
1227 CALL_TXH(ieee80211_tx_h_fragment);
1217 /* handlers after fragment must be aware of tx info fragmentation! */ 1228 /* handlers after fragment must be aware of tx info fragmentation! */
1218 CALL_TXH(ieee80211_tx_h_stats) 1229 CALL_TXH(ieee80211_tx_h_stats);
1219 CALL_TXH(ieee80211_tx_h_encrypt) 1230 CALL_TXH(ieee80211_tx_h_encrypt);
1220 CALL_TXH(ieee80211_tx_h_calculate_duration) 1231 CALL_TXH(ieee80211_tx_h_calculate_duration);
1221#undef CALL_TXH 1232#undef CALL_TXH
1222 1233
1223 txh_done: 1234 txh_done:
@@ -1387,6 +1398,30 @@ static int ieee80211_skb_resize(struct ieee80211_local *local,
1387 return 0; 1398 return 0;
1388} 1399}
1389 1400
1401static bool need_dynamic_ps(struct ieee80211_local *local)
1402{
1403 /* driver doesn't support power save */
1404 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS))
1405 return false;
1406
1407 /* hardware does dynamic power save */
1408 if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)
1409 return false;
1410
1411 /* dynamic power save disabled */
1412 if (local->hw.conf.dynamic_ps_timeout <= 0)
1413 return false;
1414
1415 /* we are scanning, don't enable power save */
1416 if (local->scanning)
1417 return false;
1418
1419 if (!local->ps_sdata)
1420 return false;
1421
1422 return true;
1423}
1424
1390static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, 1425static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
1391 struct sk_buff *skb) 1426 struct sk_buff *skb)
1392{ 1427{
@@ -1397,11 +1432,7 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
1397 int headroom; 1432 int headroom;
1398 bool may_encrypt; 1433 bool may_encrypt;
1399 1434
1400 dev_hold(sdata->dev); 1435 if (need_dynamic_ps(local)) {
1401
1402 if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
1403 local->hw.conf.dynamic_ps_timeout > 0 &&
1404 !(local->scanning) && local->ps_sdata) {
1405 if (local->hw.conf.flags & IEEE80211_CONF_PS) { 1436 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
1406 ieee80211_stop_queues_by_reason(&local->hw, 1437 ieee80211_stop_queues_by_reason(&local->hw,
1407 IEEE80211_QUEUE_STOP_REASON_PS); 1438 IEEE80211_QUEUE_STOP_REASON_PS);
@@ -1413,7 +1444,7 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
1413 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); 1444 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
1414 } 1445 }
1415 1446
1416 info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; 1447 rcu_read_lock();
1417 1448
1418 if (unlikely(sdata->vif.type == NL80211_IFTYPE_MONITOR)) { 1449 if (unlikely(sdata->vif.type == NL80211_IFTYPE_MONITOR)) {
1419 int hdrlen; 1450 int hdrlen;
@@ -1437,7 +1468,6 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
1437 * support we will need a different mechanism. 1468 * support we will need a different mechanism.
1438 */ 1469 */
1439 1470
1440 rcu_read_lock();
1441 list_for_each_entry_rcu(tmp_sdata, &local->interfaces, 1471 list_for_each_entry_rcu(tmp_sdata, &local->interfaces,
1442 list) { 1472 list) {
1443 if (!netif_running(tmp_sdata->dev)) 1473 if (!netif_running(tmp_sdata->dev))
@@ -1446,13 +1476,10 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
1446 continue; 1476 continue;
1447 if (compare_ether_addr(tmp_sdata->dev->dev_addr, 1477 if (compare_ether_addr(tmp_sdata->dev->dev_addr,
1448 hdr->addr2) == 0) { 1478 hdr->addr2) == 0) {
1449 dev_hold(tmp_sdata->dev);
1450 dev_put(sdata->dev);
1451 sdata = tmp_sdata; 1479 sdata = tmp_sdata;
1452 break; 1480 break;
1453 } 1481 }
1454 } 1482 }
1455 rcu_read_unlock();
1456 } 1483 }
1457 } 1484 }
1458 1485
@@ -1466,7 +1493,7 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
1466 1493
1467 if (ieee80211_skb_resize(local, skb, headroom, may_encrypt)) { 1494 if (ieee80211_skb_resize(local, skb, headroom, may_encrypt)) {
1468 dev_kfree_skb(skb); 1495 dev_kfree_skb(skb);
1469 dev_put(sdata->dev); 1496 rcu_read_unlock();
1470 return; 1497 return;
1471 } 1498 }
1472 1499
@@ -1477,13 +1504,13 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
1477 !is_multicast_ether_addr(hdr->addr1)) 1504 !is_multicast_ether_addr(hdr->addr1))
1478 if (mesh_nexthop_lookup(skb, sdata)) { 1505 if (mesh_nexthop_lookup(skb, sdata)) {
1479 /* skb queued: don't free */ 1506 /* skb queued: don't free */
1480 dev_put(sdata->dev); 1507 rcu_read_unlock();
1481 return; 1508 return;
1482 } 1509 }
1483 1510
1484 ieee80211_select_queue(local, skb); 1511 ieee80211_select_queue(local, skb);
1485 ieee80211_tx(sdata, skb, false); 1512 ieee80211_tx(sdata, skb, false);
1486 dev_put(sdata->dev); 1513 rcu_read_unlock();
1487} 1514}
1488 1515
1489netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb, 1516netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
@@ -1547,6 +1574,8 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
1547 1574
1548 memset(info, 0, sizeof(*info)); 1575 memset(info, 0, sizeof(*info));
1549 1576
1577 info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
1578
1550 /* pass the radiotap header up to xmit */ 1579 /* pass the radiotap header up to xmit */
1551 ieee80211_xmit(IEEE80211_DEV_TO_SUB_IF(dev), skb); 1580 ieee80211_xmit(IEEE80211_DEV_TO_SUB_IF(dev), skb);
1552 return NETDEV_TX_OK; 1581 return NETDEV_TX_OK;
@@ -1585,7 +1614,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1585 const u8 *encaps_data; 1614 const u8 *encaps_data;
1586 int encaps_len, skip_header_bytes; 1615 int encaps_len, skip_header_bytes;
1587 int nh_pos, h_pos; 1616 int nh_pos, h_pos;
1588 struct sta_info *sta; 1617 struct sta_info *sta = NULL;
1589 u32 sta_flags = 0; 1618 u32 sta_flags = 0;
1590 1619
1591 if (unlikely(skb->len < ETH_HLEN)) { 1620 if (unlikely(skb->len < ETH_HLEN)) {
@@ -1602,8 +1631,24 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1602 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA); 1631 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA);
1603 1632
1604 switch (sdata->vif.type) { 1633 switch (sdata->vif.type) {
1605 case NL80211_IFTYPE_AP:
1606 case NL80211_IFTYPE_AP_VLAN: 1634 case NL80211_IFTYPE_AP_VLAN:
1635 rcu_read_lock();
1636 sta = rcu_dereference(sdata->u.vlan.sta);
1637 if (sta) {
1638 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
1639 /* RA TA DA SA */
1640 memcpy(hdr.addr1, sta->sta.addr, ETH_ALEN);
1641 memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN);
1642 memcpy(hdr.addr3, skb->data, ETH_ALEN);
1643 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
1644 hdrlen = 30;
1645 sta_flags = get_sta_flags(sta);
1646 }
1647 rcu_read_unlock();
1648 if (sta)
1649 break;
1650 /* fall through */
1651 case NL80211_IFTYPE_AP:
1607 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS); 1652 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
1608 /* DA BSSID SA */ 1653 /* DA BSSID SA */
1609 memcpy(hdr.addr1, skb->data, ETH_ALEN); 1654 memcpy(hdr.addr1, skb->data, ETH_ALEN);
@@ -1639,21 +1684,25 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1639 /* packet from other interface */ 1684 /* packet from other interface */
1640 struct mesh_path *mppath; 1685 struct mesh_path *mppath;
1641 int is_mesh_mcast = 1; 1686 int is_mesh_mcast = 1;
1642 char *mesh_da; 1687 const u8 *mesh_da;
1643 1688
1644 rcu_read_lock(); 1689 rcu_read_lock();
1645 if (is_multicast_ether_addr(skb->data)) 1690 if (is_multicast_ether_addr(skb->data))
1646 /* DA TA mSA AE:SA */ 1691 /* DA TA mSA AE:SA */
1647 mesh_da = skb->data; 1692 mesh_da = skb->data;
1648 else { 1693 else {
1694 static const u8 bcast[ETH_ALEN] =
1695 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1696
1649 mppath = mpp_path_lookup(skb->data, sdata); 1697 mppath = mpp_path_lookup(skb->data, sdata);
1650 if (mppath) { 1698 if (mppath) {
1651 /* RA TA mDA mSA AE:DA SA */ 1699 /* RA TA mDA mSA AE:DA SA */
1652 mesh_da = mppath->mpp; 1700 mesh_da = mppath->mpp;
1653 is_mesh_mcast = 0; 1701 is_mesh_mcast = 0;
1654 } else 1702 } else {
1655 /* DA TA mSA AE:SA */ 1703 /* DA TA mSA AE:SA */
1656 mesh_da = dev->broadcast; 1704 mesh_da = bcast;
1705 }
1657 } 1706 }
1658 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, 1707 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
1659 mesh_da, dev->dev_addr); 1708 mesh_da, dev->dev_addr);
@@ -1677,12 +1726,21 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1677 break; 1726 break;
1678#endif 1727#endif
1679 case NL80211_IFTYPE_STATION: 1728 case NL80211_IFTYPE_STATION:
1680 fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
1681 /* BSSID SA DA */
1682 memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN); 1729 memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN);
1683 memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); 1730 if (sdata->u.mgd.use_4addr && ethertype != ETH_P_PAE) {
1684 memcpy(hdr.addr3, skb->data, ETH_ALEN); 1731 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
1685 hdrlen = 24; 1732 /* RA TA DA SA */
1733 memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN);
1734 memcpy(hdr.addr3, skb->data, ETH_ALEN);
1735 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
1736 hdrlen = 30;
1737 } else {
1738 fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
1739 /* BSSID SA DA */
1740 memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
1741 memcpy(hdr.addr3, skb->data, ETH_ALEN);
1742 hdrlen = 24;
1743 }
1686 break; 1744 break;
1687 case NL80211_IFTYPE_ADHOC: 1745 case NL80211_IFTYPE_ADHOC:
1688 /* DA SA BSSID */ 1746 /* DA SA BSSID */
@@ -1907,12 +1965,10 @@ void ieee80211_tx_pending(unsigned long data)
1907 } 1965 }
1908 1966
1909 sdata = vif_to_sdata(info->control.vif); 1967 sdata = vif_to_sdata(info->control.vif);
1910 dev_hold(sdata->dev);
1911 spin_unlock_irqrestore(&local->queue_stop_reason_lock, 1968 spin_unlock_irqrestore(&local->queue_stop_reason_lock,
1912 flags); 1969 flags);
1913 1970
1914 txok = ieee80211_tx_pending_skb(local, skb); 1971 txok = ieee80211_tx_pending_skb(local, skb);
1915 dev_put(sdata->dev);
1916 if (!txok) 1972 if (!txok)
1917 __skb_queue_head(&local->pending[i], skb); 1973 __skb_queue_head(&local->pending[i], skb);
1918 spin_lock_irqsave(&local->queue_stop_reason_lock, 1974 spin_lock_irqsave(&local->queue_stop_reason_lock,
@@ -1990,8 +2046,9 @@ static void ieee80211_beacon_add_tim(struct ieee80211_if_ap *bss,
1990 } 2046 }
1991} 2047}
1992 2048
1993struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, 2049struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
1994 struct ieee80211_vif *vif) 2050 struct ieee80211_vif *vif,
2051 u16 *tim_offset, u16 *tim_length)
1995{ 2052{
1996 struct ieee80211_local *local = hw_to_local(hw); 2053 struct ieee80211_local *local = hw_to_local(hw);
1997 struct sk_buff *skb = NULL; 2054 struct sk_buff *skb = NULL;
@@ -2008,6 +2065,11 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
2008 2065
2009 sdata = vif_to_sdata(vif); 2066 sdata = vif_to_sdata(vif);
2010 2067
2068 if (tim_offset)
2069 *tim_offset = 0;
2070 if (tim_length)
2071 *tim_length = 0;
2072
2011 if (sdata->vif.type == NL80211_IFTYPE_AP) { 2073 if (sdata->vif.type == NL80211_IFTYPE_AP) {
2012 ap = &sdata->u.ap; 2074 ap = &sdata->u.ap;
2013 beacon = rcu_dereference(ap->beacon); 2075 beacon = rcu_dereference(ap->beacon);
@@ -2043,6 +2105,11 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
2043 spin_unlock_irqrestore(&local->sta_lock, flags); 2105 spin_unlock_irqrestore(&local->sta_lock, flags);
2044 } 2106 }
2045 2107
2108 if (tim_offset)
2109 *tim_offset = beacon->head_len;
2110 if (tim_length)
2111 *tim_length = skb->len - beacon->head_len;
2112
2046 if (beacon->tail) 2113 if (beacon->tail)
2047 memcpy(skb_put(skb, beacon->tail_len), 2114 memcpy(skb_put(skb, beacon->tail_len),
2048 beacon->tail, beacon->tail_len); 2115 beacon->tail, beacon->tail_len);
@@ -2080,7 +2147,7 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
2080 cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON); 2147 cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON);
2081 memset(mgmt->da, 0xff, ETH_ALEN); 2148 memset(mgmt->da, 0xff, ETH_ALEN);
2082 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 2149 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
2083 /* BSSID is left zeroed, wildcard value */ 2150 memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN);
2084 mgmt->u.beacon.beacon_int = 2151 mgmt->u.beacon.beacon_int =
2085 cpu_to_le16(sdata->vif.bss_conf.beacon_int); 2152 cpu_to_le16(sdata->vif.bss_conf.beacon_int);
2086 mgmt->u.beacon.capab_info = 0x0; /* 0x0 for MPs */ 2153 mgmt->u.beacon.capab_info = 0x0; /* 0x0 for MPs */
@@ -2119,7 +2186,7 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
2119 rcu_read_unlock(); 2186 rcu_read_unlock();
2120 return skb; 2187 return skb;
2121} 2188}
2122EXPORT_SYMBOL(ieee80211_beacon_get); 2189EXPORT_SYMBOL(ieee80211_beacon_get_tim);
2123 2190
2124void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 2191void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2125 const void *frame, size_t frame_len, 2192 const void *frame, size_t frame_len,
@@ -2214,17 +2281,12 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
2214} 2281}
2215EXPORT_SYMBOL(ieee80211_get_buffered_bc); 2282EXPORT_SYMBOL(ieee80211_get_buffered_bc);
2216 2283
2217void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, 2284void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
2218 int encrypt)
2219{ 2285{
2220 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2221 skb_set_mac_header(skb, 0); 2286 skb_set_mac_header(skb, 0);
2222 skb_set_network_header(skb, 0); 2287 skb_set_network_header(skb, 0);
2223 skb_set_transport_header(skb, 0); 2288 skb_set_transport_header(skb, 0);
2224 2289
2225 if (!encrypt)
2226 info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
2227
2228 /* 2290 /*
2229 * The other path calling ieee80211_xmit is from the tasklet, 2291 * The other path calling ieee80211_xmit is from the tasklet,
2230 * and while we can handle concurrent transmissions locking 2292 * and while we can handle concurrent transmissions locking