diff options
Diffstat (limited to 'net/mac80211/tx.c')
-rw-r--r-- | net/mac80211/tx.c | 1057 |
1 files changed, 525 insertions, 532 deletions
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index c80d5899f279..9bd9faac3c3c 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -52,9 +52,8 @@ static inline void ieee80211_include_sequence(struct ieee80211_sub_if_data *sdat | |||
52 | static void ieee80211_dump_frame(const char *ifname, const char *title, | 52 | static void ieee80211_dump_frame(const char *ifname, const char *title, |
53 | const struct sk_buff *skb) | 53 | const struct sk_buff *skb) |
54 | { | 54 | { |
55 | const struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 55 | const struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
56 | u16 fc; | 56 | unsigned int hdrlen; |
57 | int hdrlen; | ||
58 | DECLARE_MAC_BUF(mac); | 57 | DECLARE_MAC_BUF(mac); |
59 | 58 | ||
60 | printk(KERN_DEBUG "%s: %s (len=%d)", ifname, title, skb->len); | 59 | printk(KERN_DEBUG "%s: %s (len=%d)", ifname, title, skb->len); |
@@ -63,13 +62,12 @@ static void ieee80211_dump_frame(const char *ifname, const char *title, | |||
63 | return; | 62 | return; |
64 | } | 63 | } |
65 | 64 | ||
66 | fc = le16_to_cpu(hdr->frame_control); | 65 | hdrlen = ieee80211_hdrlen(hdr->frame_control); |
67 | hdrlen = ieee80211_get_hdrlen(fc); | ||
68 | if (hdrlen > skb->len) | 66 | if (hdrlen > skb->len) |
69 | hdrlen = skb->len; | 67 | hdrlen = skb->len; |
70 | if (hdrlen >= 4) | 68 | if (hdrlen >= 4) |
71 | printk(" FC=0x%04x DUR=0x%04x", | 69 | printk(" FC=0x%04x DUR=0x%04x", |
72 | fc, le16_to_cpu(hdr->duration_id)); | 70 | le16_to_cpu(hdr->frame_control), le16_to_cpu(hdr->duration_id)); |
73 | if (hdrlen >= 10) | 71 | if (hdrlen >= 10) |
74 | printk(" A1=%s", print_mac(mac, hdr->addr1)); | 72 | printk(" A1=%s", print_mac(mac, hdr->addr1)); |
75 | if (hdrlen >= 16) | 73 | if (hdrlen >= 16) |
@@ -87,15 +85,16 @@ static inline void ieee80211_dump_frame(const char *ifname, const char *title, | |||
87 | } | 85 | } |
88 | #endif /* CONFIG_MAC80211_LOWTX_FRAME_DUMP */ | 86 | #endif /* CONFIG_MAC80211_LOWTX_FRAME_DUMP */ |
89 | 87 | ||
90 | static u16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr, | 88 | static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr, |
91 | int next_frag_len) | 89 | int next_frag_len) |
92 | { | 90 | { |
93 | int rate, mrate, erp, dur, i; | 91 | int rate, mrate, erp, dur, i; |
94 | struct ieee80211_rate *txrate = tx->rate; | 92 | struct ieee80211_rate *txrate; |
95 | struct ieee80211_local *local = tx->local; | 93 | struct ieee80211_local *local = tx->local; |
96 | struct ieee80211_supported_band *sband; | 94 | struct ieee80211_supported_band *sband; |
97 | 95 | ||
98 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; | 96 | sband = local->hw.wiphy->bands[tx->channel->band]; |
97 | txrate = &sband->bitrates[tx->rate_idx]; | ||
99 | 98 | ||
100 | erp = 0; | 99 | erp = 0; |
101 | if (tx->sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) | 100 | if (tx->sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) |
@@ -139,7 +138,7 @@ static u16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr, | |||
139 | 138 | ||
140 | /* data/mgmt */ | 139 | /* data/mgmt */ |
141 | if (0 /* FIX: data/mgmt during CFP */) | 140 | if (0 /* FIX: data/mgmt during CFP */) |
142 | return 32768; | 141 | return cpu_to_le16(32768); |
143 | 142 | ||
144 | if (group_addr) /* Group address as the destination - no ACK */ | 143 | if (group_addr) /* Group address as the destination - no ACK */ |
145 | return 0; | 144 | return 0; |
@@ -209,19 +208,7 @@ static u16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr, | |||
209 | tx->sdata->bss_conf.use_short_preamble); | 208 | tx->sdata->bss_conf.use_short_preamble); |
210 | } | 209 | } |
211 | 210 | ||
212 | return dur; | 211 | return cpu_to_le16(dur); |
213 | } | ||
214 | |||
215 | static inline int __ieee80211_queue_stopped(const struct ieee80211_local *local, | ||
216 | int queue) | ||
217 | { | ||
218 | return test_bit(IEEE80211_LINK_STATE_XOFF, &local->state[queue]); | ||
219 | } | ||
220 | |||
221 | static inline int __ieee80211_queue_pending(const struct ieee80211_local *local, | ||
222 | int queue) | ||
223 | { | ||
224 | return test_bit(IEEE80211_LINK_STATE_PENDING, &local->state[queue]); | ||
225 | } | 212 | } |
226 | 213 | ||
227 | static int inline is_ieee80211_device(struct net_device *dev, | 214 | static int inline is_ieee80211_device(struct net_device *dev, |
@@ -233,16 +220,16 @@ static int inline is_ieee80211_device(struct net_device *dev, | |||
233 | 220 | ||
234 | /* tx handlers */ | 221 | /* tx handlers */ |
235 | 222 | ||
236 | static ieee80211_tx_result | 223 | static ieee80211_tx_result debug_noinline |
237 | ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) | 224 | ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) |
238 | { | 225 | { |
239 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | 226 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG |
240 | struct sk_buff *skb = tx->skb; | 227 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; |
241 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | ||
242 | #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ | 228 | #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ |
229 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); | ||
243 | u32 sta_flags; | 230 | u32 sta_flags; |
244 | 231 | ||
245 | if (unlikely(tx->flags & IEEE80211_TX_INJECTED)) | 232 | if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED)) |
246 | return TX_CONTINUE; | 233 | return TX_CONTINUE; |
247 | 234 | ||
248 | if (unlikely(tx->local->sta_sw_scanning) && | 235 | if (unlikely(tx->local->sta_sw_scanning) && |
@@ -256,7 +243,7 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) | |||
256 | if (tx->flags & IEEE80211_TX_PS_BUFFERED) | 243 | if (tx->flags & IEEE80211_TX_PS_BUFFERED) |
257 | return TX_CONTINUE; | 244 | return TX_CONTINUE; |
258 | 245 | ||
259 | sta_flags = tx->sta ? tx->sta->flags : 0; | 246 | sta_flags = tx->sta ? get_sta_flags(tx->sta) : 0; |
260 | 247 | ||
261 | if (likely(tx->flags & IEEE80211_TX_UNICAST)) { | 248 | if (likely(tx->flags & IEEE80211_TX_UNICAST)) { |
262 | if (unlikely(!(sta_flags & WLAN_STA_ASSOC) && | 249 | if (unlikely(!(sta_flags & WLAN_STA_ASSOC) && |
@@ -287,12 +274,12 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) | |||
287 | return TX_CONTINUE; | 274 | return TX_CONTINUE; |
288 | } | 275 | } |
289 | 276 | ||
290 | static ieee80211_tx_result | 277 | static ieee80211_tx_result debug_noinline |
291 | ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx) | 278 | ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx) |
292 | { | 279 | { |
293 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; | 280 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; |
294 | 281 | ||
295 | if (ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control)) >= 24) | 282 | if (ieee80211_hdrlen(hdr->frame_control) >= 24) |
296 | ieee80211_include_sequence(tx->sdata, hdr); | 283 | ieee80211_include_sequence(tx->sdata, hdr); |
297 | 284 | ||
298 | return TX_CONTINUE; | 285 | return TX_CONTINUE; |
@@ -340,13 +327,17 @@ static void purge_old_ps_buffers(struct ieee80211_local *local) | |||
340 | rcu_read_unlock(); | 327 | rcu_read_unlock(); |
341 | 328 | ||
342 | local->total_ps_buffered = total; | 329 | local->total_ps_buffered = total; |
330 | #ifdef MAC80211_VERBOSE_PS_DEBUG | ||
343 | printk(KERN_DEBUG "%s: PS buffers full - purged %d frames\n", | 331 | printk(KERN_DEBUG "%s: PS buffers full - purged %d frames\n", |
344 | wiphy_name(local->hw.wiphy), purged); | 332 | wiphy_name(local->hw.wiphy), purged); |
333 | #endif | ||
345 | } | 334 | } |
346 | 335 | ||
347 | static ieee80211_tx_result | 336 | static ieee80211_tx_result |
348 | ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx) | 337 | ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx) |
349 | { | 338 | { |
339 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); | ||
340 | |||
350 | /* | 341 | /* |
351 | * broadcast/multicast frame | 342 | * broadcast/multicast frame |
352 | * | 343 | * |
@@ -369,11 +360,13 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx) | |||
369 | purge_old_ps_buffers(tx->local); | 360 | purge_old_ps_buffers(tx->local); |
370 | if (skb_queue_len(&tx->sdata->bss->ps_bc_buf) >= | 361 | if (skb_queue_len(&tx->sdata->bss->ps_bc_buf) >= |
371 | AP_MAX_BC_BUFFER) { | 362 | AP_MAX_BC_BUFFER) { |
363 | #ifdef MAC80211_VERBOSE_PS_DEBUG | ||
372 | if (net_ratelimit()) { | 364 | if (net_ratelimit()) { |
373 | printk(KERN_DEBUG "%s: BC TX buffer full - " | 365 | printk(KERN_DEBUG "%s: BC TX buffer full - " |
374 | "dropping the oldest frame\n", | 366 | "dropping the oldest frame\n", |
375 | tx->dev->name); | 367 | tx->dev->name); |
376 | } | 368 | } |
369 | #endif | ||
377 | dev_kfree_skb(skb_dequeue(&tx->sdata->bss->ps_bc_buf)); | 370 | dev_kfree_skb(skb_dequeue(&tx->sdata->bss->ps_bc_buf)); |
378 | } else | 371 | } else |
379 | tx->local->total_ps_buffered++; | 372 | tx->local->total_ps_buffered++; |
@@ -382,7 +375,7 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx) | |||
382 | } | 375 | } |
383 | 376 | ||
384 | /* buffered in hardware */ | 377 | /* buffered in hardware */ |
385 | tx->control->flags |= IEEE80211_TXCTL_SEND_AFTER_DTIM; | 378 | info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM; |
386 | 379 | ||
387 | return TX_CONTINUE; | 380 | return TX_CONTINUE; |
388 | } | 381 | } |
@@ -391,6 +384,8 @@ static ieee80211_tx_result | |||
391 | ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) | 384 | ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) |
392 | { | 385 | { |
393 | struct sta_info *sta = tx->sta; | 386 | struct sta_info *sta = tx->sta; |
387 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); | ||
388 | u32 staflags; | ||
394 | DECLARE_MAC_BUF(mac); | 389 | DECLARE_MAC_BUF(mac); |
395 | 390 | ||
396 | if (unlikely(!sta || | 391 | if (unlikely(!sta || |
@@ -398,9 +393,10 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) | |||
398 | (tx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP))) | 393 | (tx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP))) |
399 | return TX_CONTINUE; | 394 | return TX_CONTINUE; |
400 | 395 | ||
401 | if (unlikely((sta->flags & WLAN_STA_PS) && | 396 | staflags = get_sta_flags(sta); |
402 | !(sta->flags & WLAN_STA_PSPOLL))) { | 397 | |
403 | struct ieee80211_tx_packet_data *pkt_data; | 398 | if (unlikely((staflags & WLAN_STA_PS) && |
399 | !(staflags & WLAN_STA_PSPOLL))) { | ||
404 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG | 400 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG |
405 | printk(KERN_DEBUG "STA %s aid %d: PS buffer (entries " | 401 | printk(KERN_DEBUG "STA %s aid %d: PS buffer (entries " |
406 | "before %d)\n", | 402 | "before %d)\n", |
@@ -411,11 +407,13 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) | |||
411 | purge_old_ps_buffers(tx->local); | 407 | purge_old_ps_buffers(tx->local); |
412 | if (skb_queue_len(&sta->ps_tx_buf) >= STA_MAX_TX_BUFFER) { | 408 | if (skb_queue_len(&sta->ps_tx_buf) >= STA_MAX_TX_BUFFER) { |
413 | struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf); | 409 | struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf); |
410 | #ifdef MAC80211_VERBOSE_PS_DEBUG | ||
414 | if (net_ratelimit()) { | 411 | if (net_ratelimit()) { |
415 | printk(KERN_DEBUG "%s: STA %s TX " | 412 | printk(KERN_DEBUG "%s: STA %s TX " |
416 | "buffer full - dropping oldest frame\n", | 413 | "buffer full - dropping oldest frame\n", |
417 | tx->dev->name, print_mac(mac, sta->addr)); | 414 | tx->dev->name, print_mac(mac, sta->addr)); |
418 | } | 415 | } |
416 | #endif | ||
419 | dev_kfree_skb(old); | 417 | dev_kfree_skb(old); |
420 | } else | 418 | } else |
421 | tx->local->total_ps_buffered++; | 419 | tx->local->total_ps_buffered++; |
@@ -424,24 +422,23 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) | |||
424 | if (skb_queue_empty(&sta->ps_tx_buf)) | 422 | if (skb_queue_empty(&sta->ps_tx_buf)) |
425 | sta_info_set_tim_bit(sta); | 423 | sta_info_set_tim_bit(sta); |
426 | 424 | ||
427 | pkt_data = (struct ieee80211_tx_packet_data *)tx->skb->cb; | 425 | info->control.jiffies = jiffies; |
428 | pkt_data->jiffies = jiffies; | ||
429 | skb_queue_tail(&sta->ps_tx_buf, tx->skb); | 426 | skb_queue_tail(&sta->ps_tx_buf, tx->skb); |
430 | return TX_QUEUED; | 427 | return TX_QUEUED; |
431 | } | 428 | } |
432 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG | 429 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG |
433 | else if (unlikely(sta->flags & WLAN_STA_PS)) { | 430 | else if (unlikely(test_sta_flags(sta, WLAN_STA_PS))) { |
434 | printk(KERN_DEBUG "%s: STA %s in PS mode, but pspoll " | 431 | printk(KERN_DEBUG "%s: STA %s in PS mode, but pspoll " |
435 | "set -> send frame\n", tx->dev->name, | 432 | "set -> send frame\n", tx->dev->name, |
436 | print_mac(mac, sta->addr)); | 433 | print_mac(mac, sta->addr)); |
437 | } | 434 | } |
438 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ | 435 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ |
439 | sta->flags &= ~WLAN_STA_PSPOLL; | 436 | clear_sta_flags(sta, WLAN_STA_PSPOLL); |
440 | 437 | ||
441 | return TX_CONTINUE; | 438 | return TX_CONTINUE; |
442 | } | 439 | } |
443 | 440 | ||
444 | static ieee80211_tx_result | 441 | static ieee80211_tx_result debug_noinline |
445 | ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx) | 442 | ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx) |
446 | { | 443 | { |
447 | if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED)) | 444 | if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED)) |
@@ -453,21 +450,22 @@ ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx) | |||
453 | return ieee80211_tx_h_multicast_ps_buf(tx); | 450 | return ieee80211_tx_h_multicast_ps_buf(tx); |
454 | } | 451 | } |
455 | 452 | ||
456 | static ieee80211_tx_result | 453 | static ieee80211_tx_result debug_noinline |
457 | ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx) | 454 | ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx) |
458 | { | 455 | { |
459 | struct ieee80211_key *key; | 456 | struct ieee80211_key *key; |
457 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); | ||
460 | u16 fc = tx->fc; | 458 | u16 fc = tx->fc; |
461 | 459 | ||
462 | if (unlikely(tx->control->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT)) | 460 | if (unlikely(info->flags & IEEE80211_TX_CTL_DO_NOT_ENCRYPT)) |
463 | tx->key = NULL; | 461 | tx->key = NULL; |
464 | else if (tx->sta && (key = rcu_dereference(tx->sta->key))) | 462 | else if (tx->sta && (key = rcu_dereference(tx->sta->key))) |
465 | tx->key = key; | 463 | tx->key = key; |
466 | else if ((key = rcu_dereference(tx->sdata->default_key))) | 464 | else if ((key = rcu_dereference(tx->sdata->default_key))) |
467 | tx->key = key; | 465 | tx->key = key; |
468 | else if (tx->sdata->drop_unencrypted && | 466 | else if (tx->sdata->drop_unencrypted && |
469 | !(tx->control->flags & IEEE80211_TXCTL_EAPOL_FRAME) && | 467 | !(info->flags & IEEE80211_TX_CTL_EAPOL_FRAME) && |
470 | !(tx->flags & IEEE80211_TX_INJECTED)) { | 468 | !(info->flags & IEEE80211_TX_CTL_INJECTED)) { |
471 | I802_DEBUG_INC(tx->local->tx_handlers_drop_unencrypted); | 469 | I802_DEBUG_INC(tx->local->tx_handlers_drop_unencrypted); |
472 | return TX_DROP; | 470 | return TX_DROP; |
473 | } else | 471 | } else |
@@ -496,15 +494,154 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx) | |||
496 | } | 494 | } |
497 | 495 | ||
498 | if (!tx->key || !(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) | 496 | if (!tx->key || !(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) |
499 | tx->control->flags |= IEEE80211_TXCTL_DO_NOT_ENCRYPT; | 497 | info->flags |= IEEE80211_TX_CTL_DO_NOT_ENCRYPT; |
500 | 498 | ||
501 | return TX_CONTINUE; | 499 | return TX_CONTINUE; |
502 | } | 500 | } |
503 | 501 | ||
504 | static ieee80211_tx_result | 502 | static ieee80211_tx_result debug_noinline |
503 | ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx) | ||
504 | { | ||
505 | struct rate_selection rsel; | ||
506 | struct ieee80211_supported_band *sband; | ||
507 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); | ||
508 | |||
509 | sband = tx->local->hw.wiphy->bands[tx->channel->band]; | ||
510 | |||
511 | if (likely(tx->rate_idx < 0)) { | ||
512 | rate_control_get_rate(tx->dev, sband, tx->skb, &rsel); | ||
513 | tx->rate_idx = rsel.rate_idx; | ||
514 | if (unlikely(rsel.probe_idx >= 0)) { | ||
515 | info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE; | ||
516 | tx->flags |= IEEE80211_TX_PROBE_LAST_FRAG; | ||
517 | info->control.alt_retry_rate_idx = tx->rate_idx; | ||
518 | tx->rate_idx = rsel.probe_idx; | ||
519 | } else | ||
520 | info->control.alt_retry_rate_idx = -1; | ||
521 | |||
522 | if (unlikely(tx->rate_idx < 0)) | ||
523 | return TX_DROP; | ||
524 | } else | ||
525 | info->control.alt_retry_rate_idx = -1; | ||
526 | |||
527 | if (tx->sdata->bss_conf.use_cts_prot && | ||
528 | (tx->flags & IEEE80211_TX_FRAGMENTED) && (rsel.nonerp_idx >= 0)) { | ||
529 | tx->last_frag_rate_idx = tx->rate_idx; | ||
530 | if (rsel.probe_idx >= 0) | ||
531 | tx->flags &= ~IEEE80211_TX_PROBE_LAST_FRAG; | ||
532 | else | ||
533 | tx->flags |= IEEE80211_TX_PROBE_LAST_FRAG; | ||
534 | tx->rate_idx = rsel.nonerp_idx; | ||
535 | info->tx_rate_idx = rsel.nonerp_idx; | ||
536 | info->flags &= ~IEEE80211_TX_CTL_RATE_CTRL_PROBE; | ||
537 | } else { | ||
538 | tx->last_frag_rate_idx = tx->rate_idx; | ||
539 | info->tx_rate_idx = tx->rate_idx; | ||
540 | } | ||
541 | info->tx_rate_idx = tx->rate_idx; | ||
542 | |||
543 | return TX_CONTINUE; | ||
544 | } | ||
545 | |||
546 | static ieee80211_tx_result debug_noinline | ||
547 | ieee80211_tx_h_misc(struct ieee80211_tx_data *tx) | ||
548 | { | ||
549 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; | ||
550 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); | ||
551 | struct ieee80211_supported_band *sband; | ||
552 | |||
553 | sband = tx->local->hw.wiphy->bands[tx->channel->band]; | ||
554 | |||
555 | if (tx->sta) | ||
556 | info->control.aid = tx->sta->aid; | ||
557 | |||
558 | if (!info->control.retry_limit) { | ||
559 | if (!is_multicast_ether_addr(hdr->addr1)) { | ||
560 | int len = min_t(int, tx->skb->len + FCS_LEN, | ||
561 | tx->local->fragmentation_threshold); | ||
562 | if (len > tx->local->rts_threshold | ||
563 | && tx->local->rts_threshold < | ||
564 | IEEE80211_MAX_RTS_THRESHOLD) { | ||
565 | info->flags |= IEEE80211_TX_CTL_USE_RTS_CTS; | ||
566 | info->flags |= | ||
567 | IEEE80211_TX_CTL_LONG_RETRY_LIMIT; | ||
568 | info->control.retry_limit = | ||
569 | tx->local->long_retry_limit; | ||
570 | } else { | ||
571 | info->control.retry_limit = | ||
572 | tx->local->short_retry_limit; | ||
573 | } | ||
574 | } else { | ||
575 | info->control.retry_limit = 1; | ||
576 | } | ||
577 | } | ||
578 | |||
579 | if (tx->flags & IEEE80211_TX_FRAGMENTED) { | ||
580 | /* Do not use multiple retry rates when sending fragmented | ||
581 | * frames. | ||
582 | * TODO: The last fragment could still use multiple retry | ||
583 | * rates. */ | ||
584 | info->control.alt_retry_rate_idx = -1; | ||
585 | } | ||
586 | |||
587 | /* Use CTS protection for unicast frames sent using extended rates if | ||
588 | * there are associated non-ERP stations and RTS/CTS is not configured | ||
589 | * for the frame. */ | ||
590 | if ((tx->sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) && | ||
591 | (sband->bitrates[tx->rate_idx].flags & IEEE80211_RATE_ERP_G) && | ||
592 | (tx->flags & IEEE80211_TX_UNICAST) && | ||
593 | tx->sdata->bss_conf.use_cts_prot && | ||
594 | !(info->flags & IEEE80211_TX_CTL_USE_RTS_CTS)) | ||
595 | info->flags |= IEEE80211_TX_CTL_USE_CTS_PROTECT; | ||
596 | |||
597 | /* Transmit data frames using short preambles if the driver supports | ||
598 | * short preambles at the selected rate and short preambles are | ||
599 | * available on the network at the current point in time. */ | ||
600 | if (ieee80211_is_data(hdr->frame_control) && | ||
601 | (sband->bitrates[tx->rate_idx].flags & IEEE80211_RATE_SHORT_PREAMBLE) && | ||
602 | tx->sdata->bss_conf.use_short_preamble && | ||
603 | (!tx->sta || test_sta_flags(tx->sta, WLAN_STA_SHORT_PREAMBLE))) { | ||
604 | info->flags |= IEEE80211_TX_CTL_SHORT_PREAMBLE; | ||
605 | } | ||
606 | |||
607 | if ((info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) || | ||
608 | (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT)) { | ||
609 | struct ieee80211_rate *rate; | ||
610 | s8 baserate = -1; | ||
611 | int idx; | ||
612 | |||
613 | /* Do not use multiple retry rates when using RTS/CTS */ | ||
614 | info->control.alt_retry_rate_idx = -1; | ||
615 | |||
616 | /* Use min(data rate, max base rate) as CTS/RTS rate */ | ||
617 | rate = &sband->bitrates[tx->rate_idx]; | ||
618 | |||
619 | for (idx = 0; idx < sband->n_bitrates; idx++) { | ||
620 | if (sband->bitrates[idx].bitrate > rate->bitrate) | ||
621 | continue; | ||
622 | if (tx->sdata->basic_rates & BIT(idx) && | ||
623 | (baserate < 0 || | ||
624 | (sband->bitrates[baserate].bitrate | ||
625 | < sband->bitrates[idx].bitrate))) | ||
626 | baserate = idx; | ||
627 | } | ||
628 | |||
629 | if (baserate >= 0) | ||
630 | info->control.rts_cts_rate_idx = baserate; | ||
631 | else | ||
632 | info->control.rts_cts_rate_idx = 0; | ||
633 | } | ||
634 | |||
635 | if (tx->sta) | ||
636 | info->control.aid = tx->sta->aid; | ||
637 | |||
638 | return TX_CONTINUE; | ||
639 | } | ||
640 | |||
641 | static ieee80211_tx_result debug_noinline | ||
505 | ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) | 642 | ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) |
506 | { | 643 | { |
507 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx->skb->data; | 644 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; |
508 | size_t hdrlen, per_fragm, num_fragm, payload_len, left; | 645 | size_t hdrlen, per_fragm, num_fragm, payload_len, left; |
509 | struct sk_buff **frags, *first, *frag; | 646 | struct sk_buff **frags, *first, *frag; |
510 | int i; | 647 | int i; |
@@ -515,9 +652,19 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) | |||
515 | if (!(tx->flags & IEEE80211_TX_FRAGMENTED)) | 652 | if (!(tx->flags & IEEE80211_TX_FRAGMENTED)) |
516 | return TX_CONTINUE; | 653 | return TX_CONTINUE; |
517 | 654 | ||
655 | /* | ||
656 | * Warn when submitting a fragmented A-MPDU frame and drop it. | ||
657 | * This scenario is handled in __ieee80211_tx_prepare but extra | ||
658 | * caution taken here as fragmented ampdu may cause Tx stop. | ||
659 | */ | ||
660 | if (WARN_ON(tx->flags & IEEE80211_TX_CTL_AMPDU || | ||
661 | skb_get_queue_mapping(tx->skb) >= | ||
662 | ieee80211_num_regular_queues(&tx->local->hw))) | ||
663 | return TX_DROP; | ||
664 | |||
518 | first = tx->skb; | 665 | first = tx->skb; |
519 | 666 | ||
520 | hdrlen = ieee80211_get_hdrlen(tx->fc); | 667 | hdrlen = ieee80211_hdrlen(hdr->frame_control); |
521 | payload_len = first->len - hdrlen; | 668 | payload_len = first->len - hdrlen; |
522 | per_fragm = frag_threshold - hdrlen - FCS_LEN; | 669 | per_fragm = frag_threshold - hdrlen - FCS_LEN; |
523 | num_fragm = DIV_ROUND_UP(payload_len, per_fragm); | 670 | num_fragm = DIV_ROUND_UP(payload_len, per_fragm); |
@@ -558,6 +705,8 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) | |||
558 | fhdr->seq_ctrl = cpu_to_le16(seq | ((i + 1) & IEEE80211_SCTL_FRAG)); | 705 | fhdr->seq_ctrl = cpu_to_le16(seq | ((i + 1) & IEEE80211_SCTL_FRAG)); |
559 | copylen = left > per_fragm ? per_fragm : left; | 706 | copylen = left > per_fragm ? per_fragm : left; |
560 | memcpy(skb_put(frag, copylen), pos, copylen); | 707 | memcpy(skb_put(frag, copylen), pos, copylen); |
708 | memcpy(frag->cb, first->cb, sizeof(frag->cb)); | ||
709 | skb_copy_queue_mapping(frag, first); | ||
561 | 710 | ||
562 | pos += copylen; | 711 | pos += copylen; |
563 | left -= copylen; | 712 | left -= copylen; |
@@ -570,7 +719,6 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) | |||
570 | return TX_CONTINUE; | 719 | return TX_CONTINUE; |
571 | 720 | ||
572 | fail: | 721 | fail: |
573 | printk(KERN_DEBUG "%s: failed to fragment frame\n", tx->dev->name); | ||
574 | if (frags) { | 722 | if (frags) { |
575 | for (i = 0; i < num_fragm - 1; i++) | 723 | for (i = 0; i < num_fragm - 1; i++) |
576 | if (frags[i]) | 724 | if (frags[i]) |
@@ -581,7 +729,7 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) | |||
581 | return TX_DROP; | 729 | return TX_DROP; |
582 | } | 730 | } |
583 | 731 | ||
584 | static ieee80211_tx_result | 732 | static ieee80211_tx_result debug_noinline |
585 | ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx) | 733 | ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx) |
586 | { | 734 | { |
587 | if (!tx->key) | 735 | if (!tx->key) |
@@ -601,236 +749,57 @@ ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx) | |||
601 | return TX_DROP; | 749 | return TX_DROP; |
602 | } | 750 | } |
603 | 751 | ||
604 | static ieee80211_tx_result | 752 | static ieee80211_tx_result debug_noinline |
605 | ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx) | 753 | ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx) |
606 | { | 754 | { |
607 | struct rate_selection rsel; | 755 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; |
608 | struct ieee80211_supported_band *sband; | 756 | int next_len, i; |
609 | 757 | int group_addr = is_multicast_ether_addr(hdr->addr1); | |
610 | sband = tx->local->hw.wiphy->bands[tx->local->hw.conf.channel->band]; | ||
611 | |||
612 | if (likely(!tx->rate)) { | ||
613 | rate_control_get_rate(tx->dev, sband, tx->skb, &rsel); | ||
614 | tx->rate = rsel.rate; | ||
615 | if (unlikely(rsel.probe)) { | ||
616 | tx->control->flags |= | ||
617 | IEEE80211_TXCTL_RATE_CTRL_PROBE; | ||
618 | tx->flags |= IEEE80211_TX_PROBE_LAST_FRAG; | ||
619 | tx->control->alt_retry_rate = tx->rate; | ||
620 | tx->rate = rsel.probe; | ||
621 | } else | ||
622 | tx->control->alt_retry_rate = NULL; | ||
623 | |||
624 | if (!tx->rate) | ||
625 | return TX_DROP; | ||
626 | } else | ||
627 | tx->control->alt_retry_rate = NULL; | ||
628 | 758 | ||
629 | if (tx->sdata->bss_conf.use_cts_prot && | 759 | if (!(tx->flags & IEEE80211_TX_FRAGMENTED)) { |
630 | (tx->flags & IEEE80211_TX_FRAGMENTED) && rsel.nonerp) { | 760 | hdr->duration_id = ieee80211_duration(tx, group_addr, 0); |
631 | tx->last_frag_rate = tx->rate; | 761 | return TX_CONTINUE; |
632 | if (rsel.probe) | ||
633 | tx->flags &= ~IEEE80211_TX_PROBE_LAST_FRAG; | ||
634 | else | ||
635 | tx->flags |= IEEE80211_TX_PROBE_LAST_FRAG; | ||
636 | tx->rate = rsel.nonerp; | ||
637 | tx->control->tx_rate = rsel.nonerp; | ||
638 | tx->control->flags &= ~IEEE80211_TXCTL_RATE_CTRL_PROBE; | ||
639 | } else { | ||
640 | tx->last_frag_rate = tx->rate; | ||
641 | tx->control->tx_rate = tx->rate; | ||
642 | } | 762 | } |
643 | tx->control->tx_rate = tx->rate; | ||
644 | 763 | ||
645 | return TX_CONTINUE; | 764 | hdr->duration_id = ieee80211_duration(tx, group_addr, |
646 | } | 765 | tx->extra_frag[0]->len); |
647 | |||
648 | static ieee80211_tx_result | ||
649 | ieee80211_tx_h_misc(struct ieee80211_tx_data *tx) | ||
650 | { | ||
651 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx->skb->data; | ||
652 | u16 fc = le16_to_cpu(hdr->frame_control); | ||
653 | u16 dur; | ||
654 | struct ieee80211_tx_control *control = tx->control; | ||
655 | 766 | ||
656 | if (!control->retry_limit) { | 767 | for (i = 0; i < tx->num_extra_frag; i++) { |
657 | if (!is_multicast_ether_addr(hdr->addr1)) { | 768 | if (i + 1 < tx->num_extra_frag) { |
658 | if (tx->skb->len + FCS_LEN > tx->local->rts_threshold | 769 | next_len = tx->extra_frag[i + 1]->len; |
659 | && tx->local->rts_threshold < | ||
660 | IEEE80211_MAX_RTS_THRESHOLD) { | ||
661 | control->flags |= | ||
662 | IEEE80211_TXCTL_USE_RTS_CTS; | ||
663 | control->flags |= | ||
664 | IEEE80211_TXCTL_LONG_RETRY_LIMIT; | ||
665 | control->retry_limit = | ||
666 | tx->local->long_retry_limit; | ||
667 | } else { | ||
668 | control->retry_limit = | ||
669 | tx->local->short_retry_limit; | ||
670 | } | ||
671 | } else { | 770 | } else { |
672 | control->retry_limit = 1; | 771 | next_len = 0; |
772 | tx->rate_idx = tx->last_frag_rate_idx; | ||
673 | } | 773 | } |
674 | } | ||
675 | 774 | ||
676 | if (tx->flags & IEEE80211_TX_FRAGMENTED) { | 775 | hdr = (struct ieee80211_hdr *)tx->extra_frag[i]->data; |
677 | /* Do not use multiple retry rates when sending fragmented | 776 | hdr->duration_id = ieee80211_duration(tx, 0, next_len); |
678 | * frames. | ||
679 | * TODO: The last fragment could still use multiple retry | ||
680 | * rates. */ | ||
681 | control->alt_retry_rate = NULL; | ||
682 | } | ||
683 | |||
684 | /* Use CTS protection for unicast frames sent using extended rates if | ||
685 | * there are associated non-ERP stations and RTS/CTS is not configured | ||
686 | * for the frame. */ | ||
687 | if ((tx->sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) && | ||
688 | (tx->rate->flags & IEEE80211_RATE_ERP_G) && | ||
689 | (tx->flags & IEEE80211_TX_UNICAST) && | ||
690 | tx->sdata->bss_conf.use_cts_prot && | ||
691 | !(control->flags & IEEE80211_TXCTL_USE_RTS_CTS)) | ||
692 | control->flags |= IEEE80211_TXCTL_USE_CTS_PROTECT; | ||
693 | |||
694 | /* Transmit data frames using short preambles if the driver supports | ||
695 | * short preambles at the selected rate and short preambles are | ||
696 | * available on the network at the current point in time. */ | ||
697 | if (((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) && | ||
698 | (tx->rate->flags & IEEE80211_RATE_SHORT_PREAMBLE) && | ||
699 | tx->sdata->bss_conf.use_short_preamble && | ||
700 | (!tx->sta || (tx->sta->flags & WLAN_STA_SHORT_PREAMBLE))) { | ||
701 | tx->control->flags |= IEEE80211_TXCTL_SHORT_PREAMBLE; | ||
702 | } | ||
703 | |||
704 | /* Setup duration field for the first fragment of the frame. Duration | ||
705 | * for remaining fragments will be updated when they are being sent | ||
706 | * to low-level driver in ieee80211_tx(). */ | ||
707 | dur = ieee80211_duration(tx, is_multicast_ether_addr(hdr->addr1), | ||
708 | (tx->flags & IEEE80211_TX_FRAGMENTED) ? | ||
709 | tx->extra_frag[0]->len : 0); | ||
710 | hdr->duration_id = cpu_to_le16(dur); | ||
711 | |||
712 | if ((control->flags & IEEE80211_TXCTL_USE_RTS_CTS) || | ||
713 | (control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT)) { | ||
714 | struct ieee80211_supported_band *sband; | ||
715 | struct ieee80211_rate *rate, *baserate; | ||
716 | int idx; | ||
717 | |||
718 | sband = tx->local->hw.wiphy->bands[ | ||
719 | tx->local->hw.conf.channel->band]; | ||
720 | |||
721 | /* Do not use multiple retry rates when using RTS/CTS */ | ||
722 | control->alt_retry_rate = NULL; | ||
723 | |||
724 | /* Use min(data rate, max base rate) as CTS/RTS rate */ | ||
725 | rate = tx->rate; | ||
726 | baserate = NULL; | ||
727 | |||
728 | for (idx = 0; idx < sband->n_bitrates; idx++) { | ||
729 | if (sband->bitrates[idx].bitrate > rate->bitrate) | ||
730 | continue; | ||
731 | if (tx->sdata->basic_rates & BIT(idx) && | ||
732 | (!baserate || | ||
733 | (baserate->bitrate < sband->bitrates[idx].bitrate))) | ||
734 | baserate = &sband->bitrates[idx]; | ||
735 | } | ||
736 | |||
737 | if (baserate) | ||
738 | control->rts_cts_rate = baserate; | ||
739 | else | ||
740 | control->rts_cts_rate = &sband->bitrates[0]; | ||
741 | } | ||
742 | |||
743 | if (tx->sta) { | ||
744 | control->aid = tx->sta->aid; | ||
745 | tx->sta->tx_packets++; | ||
746 | tx->sta->tx_fragments++; | ||
747 | tx->sta->tx_bytes += tx->skb->len; | ||
748 | if (tx->extra_frag) { | ||
749 | int i; | ||
750 | tx->sta->tx_fragments += tx->num_extra_frag; | ||
751 | for (i = 0; i < tx->num_extra_frag; i++) { | ||
752 | tx->sta->tx_bytes += | ||
753 | tx->extra_frag[i]->len; | ||
754 | } | ||
755 | } | ||
756 | } | 777 | } |
757 | 778 | ||
758 | return TX_CONTINUE; | 779 | return TX_CONTINUE; |
759 | } | 780 | } |
760 | 781 | ||
761 | static ieee80211_tx_result | 782 | static ieee80211_tx_result debug_noinline |
762 | ieee80211_tx_h_load_stats(struct ieee80211_tx_data *tx) | 783 | ieee80211_tx_h_stats(struct ieee80211_tx_data *tx) |
763 | { | 784 | { |
764 | struct ieee80211_local *local = tx->local; | 785 | int i; |
765 | struct sk_buff *skb = tx->skb; | ||
766 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | ||
767 | u32 load = 0, hdrtime; | ||
768 | struct ieee80211_rate *rate = tx->rate; | ||
769 | |||
770 | /* TODO: this could be part of tx_status handling, so that the number | ||
771 | * of retries would be known; TX rate should in that case be stored | ||
772 | * somewhere with the packet */ | ||
773 | |||
774 | /* Estimate total channel use caused by this frame */ | ||
775 | |||
776 | /* 1 bit at 1 Mbit/s takes 1 usec; in channel_use values, | ||
777 | * 1 usec = 1/8 * (1080 / 10) = 13.5 */ | ||
778 | |||
779 | if (tx->channel->band == IEEE80211_BAND_5GHZ || | ||
780 | (tx->channel->band == IEEE80211_BAND_2GHZ && | ||
781 | rate->flags & IEEE80211_RATE_ERP_G)) | ||
782 | hdrtime = CHAN_UTIL_HDR_SHORT; | ||
783 | else | ||
784 | hdrtime = CHAN_UTIL_HDR_LONG; | ||
785 | |||
786 | load = hdrtime; | ||
787 | if (!is_multicast_ether_addr(hdr->addr1)) | ||
788 | load += hdrtime; | ||
789 | |||
790 | if (tx->control->flags & IEEE80211_TXCTL_USE_RTS_CTS) | ||
791 | load += 2 * hdrtime; | ||
792 | else if (tx->control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) | ||
793 | load += hdrtime; | ||
794 | 786 | ||
795 | /* TODO: optimise again */ | 787 | if (!tx->sta) |
796 | load += skb->len * CHAN_UTIL_RATE_LCM / rate->bitrate; | 788 | return TX_CONTINUE; |
797 | 789 | ||
790 | tx->sta->tx_packets++; | ||
791 | tx->sta->tx_fragments++; | ||
792 | tx->sta->tx_bytes += tx->skb->len; | ||
798 | if (tx->extra_frag) { | 793 | if (tx->extra_frag) { |
799 | int i; | 794 | tx->sta->tx_fragments += tx->num_extra_frag; |
800 | for (i = 0; i < tx->num_extra_frag; i++) { | 795 | for (i = 0; i < tx->num_extra_frag; i++) |
801 | load += 2 * hdrtime; | 796 | tx->sta->tx_bytes += tx->extra_frag[i]->len; |
802 | load += tx->extra_frag[i]->len * | ||
803 | tx->rate->bitrate; | ||
804 | } | ||
805 | } | 797 | } |
806 | 798 | ||
807 | /* Divide channel_use by 8 to avoid wrapping around the counter */ | ||
808 | load >>= CHAN_UTIL_SHIFT; | ||
809 | local->channel_use_raw += load; | ||
810 | if (tx->sta) | ||
811 | tx->sta->channel_use_raw += load; | ||
812 | tx->sdata->channel_use_raw += load; | ||
813 | |||
814 | return TX_CONTINUE; | 799 | return TX_CONTINUE; |
815 | } | 800 | } |
816 | 801 | ||
817 | 802 | ||
818 | typedef ieee80211_tx_result (*ieee80211_tx_handler)(struct ieee80211_tx_data *); | ||
819 | static ieee80211_tx_handler ieee80211_tx_handlers[] = | ||
820 | { | ||
821 | ieee80211_tx_h_check_assoc, | ||
822 | ieee80211_tx_h_sequence, | ||
823 | ieee80211_tx_h_ps_buf, | ||
824 | ieee80211_tx_h_select_key, | ||
825 | ieee80211_tx_h_michael_mic_add, | ||
826 | ieee80211_tx_h_fragment, | ||
827 | ieee80211_tx_h_encrypt, | ||
828 | ieee80211_tx_h_rate_ctrl, | ||
829 | ieee80211_tx_h_misc, | ||
830 | ieee80211_tx_h_load_stats, | ||
831 | NULL | ||
832 | }; | ||
833 | |||
834 | /* actual transmit path */ | 803 | /* actual transmit path */ |
835 | 804 | ||
836 | /* | 805 | /* |
@@ -854,12 +823,12 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx, | |||
854 | (struct ieee80211_radiotap_header *) skb->data; | 823 | (struct ieee80211_radiotap_header *) skb->data; |
855 | struct ieee80211_supported_band *sband; | 824 | struct ieee80211_supported_band *sband; |
856 | int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len); | 825 | int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len); |
857 | struct ieee80211_tx_control *control = tx->control; | 826 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
858 | 827 | ||
859 | sband = tx->local->hw.wiphy->bands[tx->local->hw.conf.channel->band]; | 828 | sband = tx->local->hw.wiphy->bands[tx->channel->band]; |
860 | 829 | ||
861 | control->flags |= IEEE80211_TXCTL_DO_NOT_ENCRYPT; | 830 | info->flags |= IEEE80211_TX_CTL_DO_NOT_ENCRYPT; |
862 | tx->flags |= IEEE80211_TX_INJECTED; | 831 | info->flags |= IEEE80211_TX_CTL_INJECTED; |
863 | tx->flags &= ~IEEE80211_TX_FRAGMENTED; | 832 | tx->flags &= ~IEEE80211_TX_FRAGMENTED; |
864 | 833 | ||
865 | /* | 834 | /* |
@@ -896,7 +865,7 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx, | |||
896 | r = &sband->bitrates[i]; | 865 | r = &sband->bitrates[i]; |
897 | 866 | ||
898 | if (r->bitrate == target_rate) { | 867 | if (r->bitrate == target_rate) { |
899 | tx->rate = r; | 868 | tx->rate_idx = i; |
900 | break; | 869 | break; |
901 | } | 870 | } |
902 | } | 871 | } |
@@ -907,7 +876,7 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx, | |||
907 | * radiotap uses 0 for 1st ant, mac80211 is 1 for | 876 | * radiotap uses 0 for 1st ant, mac80211 is 1 for |
908 | * 1st ant | 877 | * 1st ant |
909 | */ | 878 | */ |
910 | control->antenna_sel_tx = (*iterator.this_arg) + 1; | 879 | info->antenna_sel_tx = (*iterator.this_arg) + 1; |
911 | break; | 880 | break; |
912 | 881 | ||
913 | #if 0 | 882 | #if 0 |
@@ -931,8 +900,8 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx, | |||
931 | skb_trim(skb, skb->len - FCS_LEN); | 900 | skb_trim(skb, skb->len - FCS_LEN); |
932 | } | 901 | } |
933 | if (*iterator.this_arg & IEEE80211_RADIOTAP_F_WEP) | 902 | if (*iterator.this_arg & IEEE80211_RADIOTAP_F_WEP) |
934 | control->flags &= | 903 | info->flags &= |
935 | ~IEEE80211_TXCTL_DO_NOT_ENCRYPT; | 904 | ~IEEE80211_TX_CTL_DO_NOT_ENCRYPT; |
936 | if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FRAG) | 905 | if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FRAG) |
937 | tx->flags |= IEEE80211_TX_FRAGMENTED; | 906 | tx->flags |= IEEE80211_TX_FRAGMENTED; |
938 | break; | 907 | break; |
@@ -967,12 +936,12 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx, | |||
967 | static ieee80211_tx_result | 936 | static ieee80211_tx_result |
968 | __ieee80211_tx_prepare(struct ieee80211_tx_data *tx, | 937 | __ieee80211_tx_prepare(struct ieee80211_tx_data *tx, |
969 | struct sk_buff *skb, | 938 | struct sk_buff *skb, |
970 | struct net_device *dev, | 939 | struct net_device *dev) |
971 | struct ieee80211_tx_control *control) | ||
972 | { | 940 | { |
973 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 941 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
974 | struct ieee80211_hdr *hdr; | 942 | struct ieee80211_hdr *hdr; |
975 | struct ieee80211_sub_if_data *sdata; | 943 | struct ieee80211_sub_if_data *sdata; |
944 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
976 | 945 | ||
977 | int hdrlen; | 946 | int hdrlen; |
978 | 947 | ||
@@ -981,7 +950,9 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx, | |||
981 | tx->dev = dev; /* use original interface */ | 950 | tx->dev = dev; /* use original interface */ |
982 | tx->local = local; | 951 | tx->local = local; |
983 | tx->sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 952 | tx->sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
984 | tx->control = control; | 953 | tx->channel = local->hw.conf.channel; |
954 | tx->rate_idx = -1; | ||
955 | tx->last_frag_rate_idx = -1; | ||
985 | /* | 956 | /* |
986 | * Set this flag (used below to indicate "automatic fragmentation"), | 957 | * Set this flag (used below to indicate "automatic fragmentation"), |
987 | * it will be cleared/left by radiotap as desired. | 958 | * it will be cleared/left by radiotap as desired. |
@@ -1008,34 +979,33 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx, | |||
1008 | 979 | ||
1009 | if (is_multicast_ether_addr(hdr->addr1)) { | 980 | if (is_multicast_ether_addr(hdr->addr1)) { |
1010 | tx->flags &= ~IEEE80211_TX_UNICAST; | 981 | tx->flags &= ~IEEE80211_TX_UNICAST; |
1011 | control->flags |= IEEE80211_TXCTL_NO_ACK; | 982 | info->flags |= IEEE80211_TX_CTL_NO_ACK; |
1012 | } else { | 983 | } else { |
1013 | tx->flags |= IEEE80211_TX_UNICAST; | 984 | tx->flags |= IEEE80211_TX_UNICAST; |
1014 | control->flags &= ~IEEE80211_TXCTL_NO_ACK; | 985 | info->flags &= ~IEEE80211_TX_CTL_NO_ACK; |
1015 | } | 986 | } |
1016 | 987 | ||
1017 | if (tx->flags & IEEE80211_TX_FRAGMENTED) { | 988 | if (tx->flags & IEEE80211_TX_FRAGMENTED) { |
1018 | if ((tx->flags & IEEE80211_TX_UNICAST) && | 989 | if ((tx->flags & IEEE80211_TX_UNICAST) && |
1019 | skb->len + FCS_LEN > local->fragmentation_threshold && | 990 | skb->len + FCS_LEN > local->fragmentation_threshold && |
1020 | !local->ops->set_frag_threshold) | 991 | !local->ops->set_frag_threshold && |
992 | !(info->flags & IEEE80211_TX_CTL_AMPDU)) | ||
1021 | tx->flags |= IEEE80211_TX_FRAGMENTED; | 993 | tx->flags |= IEEE80211_TX_FRAGMENTED; |
1022 | else | 994 | else |
1023 | tx->flags &= ~IEEE80211_TX_FRAGMENTED; | 995 | tx->flags &= ~IEEE80211_TX_FRAGMENTED; |
1024 | } | 996 | } |
1025 | 997 | ||
1026 | if (!tx->sta) | 998 | if (!tx->sta) |
1027 | control->flags |= IEEE80211_TXCTL_CLEAR_PS_FILT; | 999 | info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; |
1028 | else if (tx->sta->flags & WLAN_STA_CLEAR_PS_FILT) { | 1000 | else if (test_and_clear_sta_flags(tx->sta, WLAN_STA_CLEAR_PS_FILT)) |
1029 | control->flags |= IEEE80211_TXCTL_CLEAR_PS_FILT; | 1001 | info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; |
1030 | tx->sta->flags &= ~WLAN_STA_CLEAR_PS_FILT; | ||
1031 | } | ||
1032 | 1002 | ||
1033 | hdrlen = ieee80211_get_hdrlen(tx->fc); | 1003 | hdrlen = ieee80211_get_hdrlen(tx->fc); |
1034 | if (skb->len > hdrlen + sizeof(rfc1042_header) + 2) { | 1004 | if (skb->len > hdrlen + sizeof(rfc1042_header) + 2) { |
1035 | u8 *pos = &skb->data[hdrlen + sizeof(rfc1042_header)]; | 1005 | u8 *pos = &skb->data[hdrlen + sizeof(rfc1042_header)]; |
1036 | tx->ethertype = (pos[0] << 8) | pos[1]; | 1006 | tx->ethertype = (pos[0] << 8) | pos[1]; |
1037 | } | 1007 | } |
1038 | control->flags |= IEEE80211_TXCTL_FIRST_FRAGMENT; | 1008 | info->flags |= IEEE80211_TX_CTL_FIRST_FRAGMENT; |
1039 | 1009 | ||
1040 | return TX_CONTINUE; | 1010 | return TX_CONTINUE; |
1041 | } | 1011 | } |
@@ -1045,14 +1015,12 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx, | |||
1045 | */ | 1015 | */ |
1046 | static int ieee80211_tx_prepare(struct ieee80211_tx_data *tx, | 1016 | static int ieee80211_tx_prepare(struct ieee80211_tx_data *tx, |
1047 | struct sk_buff *skb, | 1017 | struct sk_buff *skb, |
1048 | struct net_device *mdev, | 1018 | struct net_device *mdev) |
1049 | struct ieee80211_tx_control *control) | ||
1050 | { | 1019 | { |
1051 | struct ieee80211_tx_packet_data *pkt_data; | 1020 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
1052 | struct net_device *dev; | 1021 | struct net_device *dev; |
1053 | 1022 | ||
1054 | pkt_data = (struct ieee80211_tx_packet_data *)skb->cb; | 1023 | dev = dev_get_by_index(&init_net, info->control.ifindex); |
1055 | dev = dev_get_by_index(&init_net, pkt_data->ifindex); | ||
1056 | if (unlikely(dev && !is_ieee80211_device(dev, mdev))) { | 1024 | if (unlikely(dev && !is_ieee80211_device(dev, mdev))) { |
1057 | dev_put(dev); | 1025 | dev_put(dev); |
1058 | dev = NULL; | 1026 | dev = NULL; |
@@ -1060,7 +1028,7 @@ static int ieee80211_tx_prepare(struct ieee80211_tx_data *tx, | |||
1060 | if (unlikely(!dev)) | 1028 | if (unlikely(!dev)) |
1061 | return -ENODEV; | 1029 | return -ENODEV; |
1062 | /* initialises tx with control */ | 1030 | /* initialises tx with control */ |
1063 | __ieee80211_tx_prepare(tx, skb, dev, control); | 1031 | __ieee80211_tx_prepare(tx, skb, dev); |
1064 | dev_put(dev); | 1032 | dev_put(dev); |
1065 | return 0; | 1033 | return 0; |
1066 | } | 1034 | } |
@@ -1068,50 +1036,49 @@ static int ieee80211_tx_prepare(struct ieee80211_tx_data *tx, | |||
1068 | static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb, | 1036 | static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb, |
1069 | struct ieee80211_tx_data *tx) | 1037 | struct ieee80211_tx_data *tx) |
1070 | { | 1038 | { |
1071 | struct ieee80211_tx_control *control = tx->control; | 1039 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
1072 | int ret, i; | 1040 | int ret, i; |
1073 | 1041 | ||
1074 | if (!ieee80211_qdisc_installed(local->mdev) && | 1042 | if (netif_subqueue_stopped(local->mdev, skb)) |
1075 | __ieee80211_queue_stopped(local, 0)) { | ||
1076 | netif_stop_queue(local->mdev); | ||
1077 | return IEEE80211_TX_AGAIN; | 1043 | return IEEE80211_TX_AGAIN; |
1078 | } | 1044 | |
1079 | if (skb) { | 1045 | if (skb) { |
1080 | ieee80211_dump_frame(wiphy_name(local->hw.wiphy), | 1046 | ieee80211_dump_frame(wiphy_name(local->hw.wiphy), |
1081 | "TX to low-level driver", skb); | 1047 | "TX to low-level driver", skb); |
1082 | ret = local->ops->tx(local_to_hw(local), skb, control); | 1048 | ret = local->ops->tx(local_to_hw(local), skb); |
1083 | if (ret) | 1049 | if (ret) |
1084 | return IEEE80211_TX_AGAIN; | 1050 | return IEEE80211_TX_AGAIN; |
1085 | local->mdev->trans_start = jiffies; | 1051 | local->mdev->trans_start = jiffies; |
1086 | ieee80211_led_tx(local, 1); | 1052 | ieee80211_led_tx(local, 1); |
1087 | } | 1053 | } |
1088 | if (tx->extra_frag) { | 1054 | if (tx->extra_frag) { |
1089 | control->flags &= ~(IEEE80211_TXCTL_USE_RTS_CTS | | ||
1090 | IEEE80211_TXCTL_USE_CTS_PROTECT | | ||
1091 | IEEE80211_TXCTL_CLEAR_PS_FILT | | ||
1092 | IEEE80211_TXCTL_FIRST_FRAGMENT); | ||
1093 | for (i = 0; i < tx->num_extra_frag; i++) { | 1055 | for (i = 0; i < tx->num_extra_frag; i++) { |
1094 | if (!tx->extra_frag[i]) | 1056 | if (!tx->extra_frag[i]) |
1095 | continue; | 1057 | continue; |
1096 | if (__ieee80211_queue_stopped(local, control->queue)) | 1058 | info = IEEE80211_SKB_CB(tx->extra_frag[i]); |
1059 | info->flags &= ~(IEEE80211_TX_CTL_USE_RTS_CTS | | ||
1060 | IEEE80211_TX_CTL_USE_CTS_PROTECT | | ||
1061 | IEEE80211_TX_CTL_CLEAR_PS_FILT | | ||
1062 | IEEE80211_TX_CTL_FIRST_FRAGMENT); | ||
1063 | if (netif_subqueue_stopped(local->mdev, | ||
1064 | tx->extra_frag[i])) | ||
1097 | return IEEE80211_TX_FRAG_AGAIN; | 1065 | return IEEE80211_TX_FRAG_AGAIN; |
1098 | if (i == tx->num_extra_frag) { | 1066 | if (i == tx->num_extra_frag) { |
1099 | control->tx_rate = tx->last_frag_rate; | 1067 | info->tx_rate_idx = tx->last_frag_rate_idx; |
1100 | 1068 | ||
1101 | if (tx->flags & IEEE80211_TX_PROBE_LAST_FRAG) | 1069 | if (tx->flags & IEEE80211_TX_PROBE_LAST_FRAG) |
1102 | control->flags |= | 1070 | info->flags |= |
1103 | IEEE80211_TXCTL_RATE_CTRL_PROBE; | 1071 | IEEE80211_TX_CTL_RATE_CTRL_PROBE; |
1104 | else | 1072 | else |
1105 | control->flags &= | 1073 | info->flags &= |
1106 | ~IEEE80211_TXCTL_RATE_CTRL_PROBE; | 1074 | ~IEEE80211_TX_CTL_RATE_CTRL_PROBE; |
1107 | } | 1075 | } |
1108 | 1076 | ||
1109 | ieee80211_dump_frame(wiphy_name(local->hw.wiphy), | 1077 | ieee80211_dump_frame(wiphy_name(local->hw.wiphy), |
1110 | "TX to low-level driver", | 1078 | "TX to low-level driver", |
1111 | tx->extra_frag[i]); | 1079 | tx->extra_frag[i]); |
1112 | ret = local->ops->tx(local_to_hw(local), | 1080 | ret = local->ops->tx(local_to_hw(local), |
1113 | tx->extra_frag[i], | 1081 | tx->extra_frag[i]); |
1114 | control); | ||
1115 | if (ret) | 1082 | if (ret) |
1116 | return IEEE80211_TX_FRAG_AGAIN; | 1083 | return IEEE80211_TX_FRAG_AGAIN; |
1117 | local->mdev->trans_start = jiffies; | 1084 | local->mdev->trans_start = jiffies; |
@@ -1124,17 +1091,65 @@ static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb, | |||
1124 | return IEEE80211_TX_OK; | 1091 | return IEEE80211_TX_OK; |
1125 | } | 1092 | } |
1126 | 1093 | ||
1127 | static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb, | 1094 | /* |
1128 | struct ieee80211_tx_control *control) | 1095 | * Invoke TX handlers, return 0 on success and non-zero if the |
1096 | * frame was dropped or queued. | ||
1097 | */ | ||
1098 | static int invoke_tx_handlers(struct ieee80211_tx_data *tx) | ||
1099 | { | ||
1100 | struct sk_buff *skb = tx->skb; | ||
1101 | ieee80211_tx_result res = TX_DROP; | ||
1102 | int i; | ||
1103 | |||
1104 | #define CALL_TXH(txh) \ | ||
1105 | res = txh(tx); \ | ||
1106 | if (res != TX_CONTINUE) \ | ||
1107 | goto txh_done; | ||
1108 | |||
1109 | CALL_TXH(ieee80211_tx_h_check_assoc) | ||
1110 | CALL_TXH(ieee80211_tx_h_sequence) | ||
1111 | CALL_TXH(ieee80211_tx_h_ps_buf) | ||
1112 | CALL_TXH(ieee80211_tx_h_select_key) | ||
1113 | CALL_TXH(ieee80211_tx_h_michael_mic_add) | ||
1114 | CALL_TXH(ieee80211_tx_h_rate_ctrl) | ||
1115 | CALL_TXH(ieee80211_tx_h_misc) | ||
1116 | CALL_TXH(ieee80211_tx_h_fragment) | ||
1117 | /* handlers after fragment must be aware of tx info fragmentation! */ | ||
1118 | CALL_TXH(ieee80211_tx_h_encrypt) | ||
1119 | CALL_TXH(ieee80211_tx_h_calculate_duration) | ||
1120 | CALL_TXH(ieee80211_tx_h_stats) | ||
1121 | #undef CALL_TXH | ||
1122 | |||
1123 | txh_done: | ||
1124 | if (unlikely(res == TX_DROP)) { | ||
1125 | I802_DEBUG_INC(tx->local->tx_handlers_drop); | ||
1126 | dev_kfree_skb(skb); | ||
1127 | for (i = 0; i < tx->num_extra_frag; i++) | ||
1128 | if (tx->extra_frag[i]) | ||
1129 | dev_kfree_skb(tx->extra_frag[i]); | ||
1130 | kfree(tx->extra_frag); | ||
1131 | return -1; | ||
1132 | } else if (unlikely(res == TX_QUEUED)) { | ||
1133 | I802_DEBUG_INC(tx->local->tx_handlers_queued); | ||
1134 | return -1; | ||
1135 | } | ||
1136 | |||
1137 | return 0; | ||
1138 | } | ||
1139 | |||
1140 | static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb) | ||
1129 | { | 1141 | { |
1130 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 1142 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
1131 | struct sta_info *sta; | 1143 | struct sta_info *sta; |
1132 | ieee80211_tx_handler *handler; | ||
1133 | struct ieee80211_tx_data tx; | 1144 | struct ieee80211_tx_data tx; |
1134 | ieee80211_tx_result res = TX_DROP, res_prepare; | 1145 | ieee80211_tx_result res_prepare; |
1135 | int ret, i, retries = 0; | 1146 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
1147 | int ret, i; | ||
1148 | u16 queue; | ||
1149 | |||
1150 | queue = skb_get_queue_mapping(skb); | ||
1136 | 1151 | ||
1137 | WARN_ON(__ieee80211_queue_pending(local, control->queue)); | 1152 | WARN_ON(test_bit(queue, local->queues_pending)); |
1138 | 1153 | ||
1139 | if (unlikely(skb->len < 10)) { | 1154 | if (unlikely(skb->len < 10)) { |
1140 | dev_kfree_skb(skb); | 1155 | dev_kfree_skb(skb); |
@@ -1144,7 +1159,7 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb, | |||
1144 | rcu_read_lock(); | 1159 | rcu_read_lock(); |
1145 | 1160 | ||
1146 | /* initialises tx */ | 1161 | /* initialises tx */ |
1147 | res_prepare = __ieee80211_tx_prepare(&tx, skb, dev, control); | 1162 | res_prepare = __ieee80211_tx_prepare(&tx, skb, dev); |
1148 | 1163 | ||
1149 | if (res_prepare == TX_DROP) { | 1164 | if (res_prepare == TX_DROP) { |
1150 | dev_kfree_skb(skb); | 1165 | dev_kfree_skb(skb); |
@@ -1154,86 +1169,53 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb, | |||
1154 | 1169 | ||
1155 | sta = tx.sta; | 1170 | sta = tx.sta; |
1156 | tx.channel = local->hw.conf.channel; | 1171 | tx.channel = local->hw.conf.channel; |
1172 | info->band = tx.channel->band; | ||
1157 | 1173 | ||
1158 | for (handler = ieee80211_tx_handlers; *handler != NULL; | 1174 | if (invoke_tx_handlers(&tx)) |
1159 | handler++) { | 1175 | goto out; |
1160 | res = (*handler)(&tx); | ||
1161 | if (res != TX_CONTINUE) | ||
1162 | break; | ||
1163 | } | ||
1164 | |||
1165 | skb = tx.skb; /* handlers are allowed to change skb */ | ||
1166 | |||
1167 | if (unlikely(res == TX_DROP)) { | ||
1168 | I802_DEBUG_INC(local->tx_handlers_drop); | ||
1169 | goto drop; | ||
1170 | } | ||
1171 | |||
1172 | if (unlikely(res == TX_QUEUED)) { | ||
1173 | I802_DEBUG_INC(local->tx_handlers_queued); | ||
1174 | rcu_read_unlock(); | ||
1175 | return 0; | ||
1176 | } | ||
1177 | |||
1178 | if (tx.extra_frag) { | ||
1179 | for (i = 0; i < tx.num_extra_frag; i++) { | ||
1180 | int next_len, dur; | ||
1181 | struct ieee80211_hdr *hdr = | ||
1182 | (struct ieee80211_hdr *) | ||
1183 | tx.extra_frag[i]->data; | ||
1184 | |||
1185 | if (i + 1 < tx.num_extra_frag) { | ||
1186 | next_len = tx.extra_frag[i + 1]->len; | ||
1187 | } else { | ||
1188 | next_len = 0; | ||
1189 | tx.rate = tx.last_frag_rate; | ||
1190 | } | ||
1191 | dur = ieee80211_duration(&tx, 0, next_len); | ||
1192 | hdr->duration_id = cpu_to_le16(dur); | ||
1193 | } | ||
1194 | } | ||
1195 | 1176 | ||
1196 | retry: | 1177 | retry: |
1197 | ret = __ieee80211_tx(local, skb, &tx); | 1178 | ret = __ieee80211_tx(local, skb, &tx); |
1198 | if (ret) { | 1179 | if (ret) { |
1199 | struct ieee80211_tx_stored_packet *store = | 1180 | struct ieee80211_tx_stored_packet *store; |
1200 | &local->pending_packet[control->queue]; | 1181 | |
1182 | /* | ||
1183 | * Since there are no fragmented frames on A-MPDU | ||
1184 | * queues, there's no reason for a driver to reject | ||
1185 | * a frame there, warn and drop it. | ||
1186 | */ | ||
1187 | if (WARN_ON(queue >= ieee80211_num_regular_queues(&local->hw))) | ||
1188 | goto drop; | ||
1189 | |||
1190 | store = &local->pending_packet[queue]; | ||
1201 | 1191 | ||
1202 | if (ret == IEEE80211_TX_FRAG_AGAIN) | 1192 | if (ret == IEEE80211_TX_FRAG_AGAIN) |
1203 | skb = NULL; | 1193 | skb = NULL; |
1204 | set_bit(IEEE80211_LINK_STATE_PENDING, | 1194 | set_bit(queue, local->queues_pending); |
1205 | &local->state[control->queue]); | ||
1206 | smp_mb(); | 1195 | smp_mb(); |
1207 | /* When the driver gets out of buffers during sending of | 1196 | /* |
1208 | * fragments and calls ieee80211_stop_queue, there is | 1197 | * When the driver gets out of buffers during sending of |
1209 | * a small window between IEEE80211_LINK_STATE_XOFF and | 1198 | * fragments and calls ieee80211_stop_queue, the netif |
1210 | * IEEE80211_LINK_STATE_PENDING flags are set. If a buffer | 1199 | * subqueue is stopped. There is, however, a small window |
1200 | * in which the PENDING bit is not yet set. If a buffer | ||
1211 | * gets available in that window (i.e. driver calls | 1201 | * gets available in that window (i.e. driver calls |
1212 | * ieee80211_wake_queue), we would end up with ieee80211_tx | 1202 | * ieee80211_wake_queue), we would end up with ieee80211_tx |
1213 | * called with IEEE80211_LINK_STATE_PENDING. Prevent this by | 1203 | * called with the PENDING bit still set. Prevent this by |
1214 | * continuing transmitting here when that situation is | 1204 | * continuing transmitting here when that situation is |
1215 | * possible to have happened. */ | 1205 | * possible to have happened. |
1216 | if (!__ieee80211_queue_stopped(local, control->queue)) { | 1206 | */ |
1217 | clear_bit(IEEE80211_LINK_STATE_PENDING, | 1207 | if (!__netif_subqueue_stopped(local->mdev, queue)) { |
1218 | &local->state[control->queue]); | 1208 | clear_bit(queue, local->queues_pending); |
1219 | retries++; | ||
1220 | /* | ||
1221 | * Driver bug, it's rejecting packets but | ||
1222 | * not stopping queues. | ||
1223 | */ | ||
1224 | if (WARN_ON_ONCE(retries > 5)) | ||
1225 | goto drop; | ||
1226 | goto retry; | 1209 | goto retry; |
1227 | } | 1210 | } |
1228 | memcpy(&store->control, control, | ||
1229 | sizeof(struct ieee80211_tx_control)); | ||
1230 | store->skb = skb; | 1211 | store->skb = skb; |
1231 | store->extra_frag = tx.extra_frag; | 1212 | store->extra_frag = tx.extra_frag; |
1232 | store->num_extra_frag = tx.num_extra_frag; | 1213 | store->num_extra_frag = tx.num_extra_frag; |
1233 | store->last_frag_rate = tx.last_frag_rate; | 1214 | store->last_frag_rate_idx = tx.last_frag_rate_idx; |
1234 | store->last_frag_rate_ctrl_probe = | 1215 | store->last_frag_rate_ctrl_probe = |
1235 | !!(tx.flags & IEEE80211_TX_PROBE_LAST_FRAG); | 1216 | !!(tx.flags & IEEE80211_TX_PROBE_LAST_FRAG); |
1236 | } | 1217 | } |
1218 | out: | ||
1237 | rcu_read_unlock(); | 1219 | rcu_read_unlock(); |
1238 | return 0; | 1220 | return 0; |
1239 | 1221 | ||
@@ -1250,24 +1232,57 @@ retry: | |||
1250 | 1232 | ||
1251 | /* device xmit handlers */ | 1233 | /* device xmit handlers */ |
1252 | 1234 | ||
1235 | static int ieee80211_skb_resize(struct ieee80211_local *local, | ||
1236 | struct sk_buff *skb, | ||
1237 | int head_need, bool may_encrypt) | ||
1238 | { | ||
1239 | int tail_need = 0; | ||
1240 | |||
1241 | /* | ||
1242 | * This could be optimised, devices that do full hardware | ||
1243 | * crypto (including TKIP MMIC) need no tailroom... But we | ||
1244 | * have no drivers for such devices currently. | ||
1245 | */ | ||
1246 | if (may_encrypt) { | ||
1247 | tail_need = IEEE80211_ENCRYPT_TAILROOM; | ||
1248 | tail_need -= skb_tailroom(skb); | ||
1249 | tail_need = max_t(int, tail_need, 0); | ||
1250 | } | ||
1251 | |||
1252 | if (head_need || tail_need) { | ||
1253 | /* Sorry. Can't account for this any more */ | ||
1254 | skb_orphan(skb); | ||
1255 | } | ||
1256 | |||
1257 | if (skb_header_cloned(skb)) | ||
1258 | I802_DEBUG_INC(local->tx_expand_skb_head_cloned); | ||
1259 | else | ||
1260 | I802_DEBUG_INC(local->tx_expand_skb_head); | ||
1261 | |||
1262 | if (pskb_expand_head(skb, head_need, tail_need, GFP_ATOMIC)) { | ||
1263 | printk(KERN_DEBUG "%s: failed to reallocate TX buffer\n", | ||
1264 | wiphy_name(local->hw.wiphy)); | ||
1265 | return -ENOMEM; | ||
1266 | } | ||
1267 | |||
1268 | /* update truesize too */ | ||
1269 | skb->truesize += head_need + tail_need; | ||
1270 | |||
1271 | return 0; | ||
1272 | } | ||
1273 | |||
1253 | int ieee80211_master_start_xmit(struct sk_buff *skb, | 1274 | int ieee80211_master_start_xmit(struct sk_buff *skb, |
1254 | struct net_device *dev) | 1275 | struct net_device *dev) |
1255 | { | 1276 | { |
1256 | struct ieee80211_tx_control control; | 1277 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
1257 | struct ieee80211_tx_packet_data *pkt_data; | ||
1258 | struct net_device *odev = NULL; | 1278 | struct net_device *odev = NULL; |
1259 | struct ieee80211_sub_if_data *osdata; | 1279 | struct ieee80211_sub_if_data *osdata; |
1260 | int headroom; | 1280 | int headroom; |
1281 | bool may_encrypt; | ||
1261 | int ret; | 1282 | int ret; |
1262 | 1283 | ||
1263 | /* | 1284 | if (info->control.ifindex) |
1264 | * copy control out of the skb so other people can use skb->cb | 1285 | odev = dev_get_by_index(&init_net, info->control.ifindex); |
1265 | */ | ||
1266 | pkt_data = (struct ieee80211_tx_packet_data *)skb->cb; | ||
1267 | memset(&control, 0, sizeof(struct ieee80211_tx_control)); | ||
1268 | |||
1269 | if (pkt_data->ifindex) | ||
1270 | odev = dev_get_by_index(&init_net, pkt_data->ifindex); | ||
1271 | if (unlikely(odev && !is_ieee80211_device(odev, dev))) { | 1286 | if (unlikely(odev && !is_ieee80211_device(odev, dev))) { |
1272 | dev_put(odev); | 1287 | dev_put(odev); |
1273 | odev = NULL; | 1288 | odev = NULL; |
@@ -1280,32 +1295,25 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, | |||
1280 | dev_kfree_skb(skb); | 1295 | dev_kfree_skb(skb); |
1281 | return 0; | 1296 | return 0; |
1282 | } | 1297 | } |
1298 | |||
1283 | osdata = IEEE80211_DEV_TO_SUB_IF(odev); | 1299 | osdata = IEEE80211_DEV_TO_SUB_IF(odev); |
1284 | 1300 | ||
1285 | headroom = osdata->local->tx_headroom + IEEE80211_ENCRYPT_HEADROOM; | 1301 | may_encrypt = !(info->flags & IEEE80211_TX_CTL_DO_NOT_ENCRYPT); |
1286 | if (skb_headroom(skb) < headroom) { | 1302 | |
1287 | if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) { | 1303 | headroom = osdata->local->tx_headroom; |
1288 | dev_kfree_skb(skb); | 1304 | if (may_encrypt) |
1289 | dev_put(odev); | 1305 | headroom += IEEE80211_ENCRYPT_HEADROOM; |
1290 | return 0; | 1306 | headroom -= skb_headroom(skb); |
1291 | } | 1307 | headroom = max_t(int, 0, headroom); |
1308 | |||
1309 | if (ieee80211_skb_resize(osdata->local, skb, headroom, may_encrypt)) { | ||
1310 | dev_kfree_skb(skb); | ||
1311 | dev_put(odev); | ||
1312 | return 0; | ||
1292 | } | 1313 | } |
1293 | 1314 | ||
1294 | control.vif = &osdata->vif; | 1315 | info->control.vif = &osdata->vif; |
1295 | control.type = osdata->vif.type; | 1316 | ret = ieee80211_tx(odev, skb); |
1296 | if (pkt_data->flags & IEEE80211_TXPD_REQ_TX_STATUS) | ||
1297 | control.flags |= IEEE80211_TXCTL_REQ_TX_STATUS; | ||
1298 | if (pkt_data->flags & IEEE80211_TXPD_DO_NOT_ENCRYPT) | ||
1299 | control.flags |= IEEE80211_TXCTL_DO_NOT_ENCRYPT; | ||
1300 | if (pkt_data->flags & IEEE80211_TXPD_REQUEUE) | ||
1301 | control.flags |= IEEE80211_TXCTL_REQUEUE; | ||
1302 | if (pkt_data->flags & IEEE80211_TXPD_EAPOL_FRAME) | ||
1303 | control.flags |= IEEE80211_TXCTL_EAPOL_FRAME; | ||
1304 | if (pkt_data->flags & IEEE80211_TXPD_AMPDU) | ||
1305 | control.flags |= IEEE80211_TXCTL_AMPDU; | ||
1306 | control.queue = pkt_data->queue; | ||
1307 | |||
1308 | ret = ieee80211_tx(odev, skb, &control); | ||
1309 | dev_put(odev); | 1317 | dev_put(odev); |
1310 | 1318 | ||
1311 | return ret; | 1319 | return ret; |
@@ -1315,7 +1323,7 @@ int ieee80211_monitor_start_xmit(struct sk_buff *skb, | |||
1315 | struct net_device *dev) | 1323 | struct net_device *dev) |
1316 | { | 1324 | { |
1317 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 1325 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
1318 | struct ieee80211_tx_packet_data *pkt_data; | 1326 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
1319 | struct ieee80211_radiotap_header *prthdr = | 1327 | struct ieee80211_radiotap_header *prthdr = |
1320 | (struct ieee80211_radiotap_header *)skb->data; | 1328 | (struct ieee80211_radiotap_header *)skb->data; |
1321 | u16 len_rthdr; | 1329 | u16 len_rthdr; |
@@ -1337,12 +1345,12 @@ int ieee80211_monitor_start_xmit(struct sk_buff *skb, | |||
1337 | 1345 | ||
1338 | skb->dev = local->mdev; | 1346 | skb->dev = local->mdev; |
1339 | 1347 | ||
1340 | pkt_data = (struct ieee80211_tx_packet_data *)skb->cb; | ||
1341 | memset(pkt_data, 0, sizeof(*pkt_data)); | ||
1342 | /* needed because we set skb device to master */ | 1348 | /* needed because we set skb device to master */ |
1343 | pkt_data->ifindex = dev->ifindex; | 1349 | info->control.ifindex = dev->ifindex; |
1344 | 1350 | ||
1345 | pkt_data->flags |= IEEE80211_TXPD_DO_NOT_ENCRYPT; | 1351 | info->flags |= IEEE80211_TX_CTL_DO_NOT_ENCRYPT; |
1352 | /* Interfaces should always request a status report */ | ||
1353 | info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; | ||
1346 | 1354 | ||
1347 | /* | 1355 | /* |
1348 | * fix up the pointers accounting for the radiotap | 1356 | * fix up the pointers accounting for the radiotap |
@@ -1386,10 +1394,11 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1386 | struct net_device *dev) | 1394 | struct net_device *dev) |
1387 | { | 1395 | { |
1388 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 1396 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
1389 | struct ieee80211_tx_packet_data *pkt_data; | 1397 | struct ieee80211_tx_info *info; |
1390 | struct ieee80211_sub_if_data *sdata; | 1398 | struct ieee80211_sub_if_data *sdata; |
1391 | int ret = 1, head_need; | 1399 | int ret = 1, head_need; |
1392 | u16 ethertype, hdrlen, meshhdrlen = 0, fc; | 1400 | u16 ethertype, hdrlen, meshhdrlen = 0; |
1401 | __le16 fc; | ||
1393 | struct ieee80211_hdr hdr; | 1402 | struct ieee80211_hdr hdr; |
1394 | struct ieee80211s_hdr mesh_hdr; | 1403 | struct ieee80211s_hdr mesh_hdr; |
1395 | const u8 *encaps_data; | 1404 | const u8 *encaps_data; |
@@ -1400,8 +1409,6 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1400 | 1409 | ||
1401 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 1410 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
1402 | if (unlikely(skb->len < ETH_HLEN)) { | 1411 | if (unlikely(skb->len < ETH_HLEN)) { |
1403 | printk(KERN_DEBUG "%s: short skb (len=%d)\n", | ||
1404 | dev->name, skb->len); | ||
1405 | ret = 0; | 1412 | ret = 0; |
1406 | goto fail; | 1413 | goto fail; |
1407 | } | 1414 | } |
@@ -1412,12 +1419,12 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1412 | /* convert Ethernet header to proper 802.11 header (based on | 1419 | /* convert Ethernet header to proper 802.11 header (based on |
1413 | * operation mode) */ | 1420 | * operation mode) */ |
1414 | ethertype = (skb->data[12] << 8) | skb->data[13]; | 1421 | ethertype = (skb->data[12] << 8) | skb->data[13]; |
1415 | fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA; | 1422 | fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA); |
1416 | 1423 | ||
1417 | switch (sdata->vif.type) { | 1424 | switch (sdata->vif.type) { |
1418 | case IEEE80211_IF_TYPE_AP: | 1425 | case IEEE80211_IF_TYPE_AP: |
1419 | case IEEE80211_IF_TYPE_VLAN: | 1426 | case IEEE80211_IF_TYPE_VLAN: |
1420 | fc |= IEEE80211_FCTL_FROMDS; | 1427 | fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS); |
1421 | /* DA BSSID SA */ | 1428 | /* DA BSSID SA */ |
1422 | memcpy(hdr.addr1, skb->data, ETH_ALEN); | 1429 | memcpy(hdr.addr1, skb->data, ETH_ALEN); |
1423 | memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN); | 1430 | memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN); |
@@ -1425,7 +1432,7 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1425 | hdrlen = 24; | 1432 | hdrlen = 24; |
1426 | break; | 1433 | break; |
1427 | case IEEE80211_IF_TYPE_WDS: | 1434 | case IEEE80211_IF_TYPE_WDS: |
1428 | fc |= IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS; | 1435 | fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); |
1429 | /* RA TA DA SA */ | 1436 | /* RA TA DA SA */ |
1430 | memcpy(hdr.addr1, sdata->u.wds.remote_addr, ETH_ALEN); | 1437 | memcpy(hdr.addr1, sdata->u.wds.remote_addr, ETH_ALEN); |
1431 | memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN); | 1438 | memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN); |
@@ -1435,7 +1442,7 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1435 | break; | 1442 | break; |
1436 | #ifdef CONFIG_MAC80211_MESH | 1443 | #ifdef CONFIG_MAC80211_MESH |
1437 | case IEEE80211_IF_TYPE_MESH_POINT: | 1444 | case IEEE80211_IF_TYPE_MESH_POINT: |
1438 | fc |= IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS; | 1445 | fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); |
1439 | /* RA TA DA SA */ | 1446 | /* RA TA DA SA */ |
1440 | if (is_multicast_ether_addr(skb->data)) | 1447 | if (is_multicast_ether_addr(skb->data)) |
1441 | memcpy(hdr.addr1, skb->data, ETH_ALEN); | 1448 | memcpy(hdr.addr1, skb->data, ETH_ALEN); |
@@ -1465,7 +1472,7 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1465 | break; | 1472 | break; |
1466 | #endif | 1473 | #endif |
1467 | case IEEE80211_IF_TYPE_STA: | 1474 | case IEEE80211_IF_TYPE_STA: |
1468 | fc |= IEEE80211_FCTL_TODS; | 1475 | fc |= cpu_to_le16(IEEE80211_FCTL_TODS); |
1469 | /* BSSID SA DA */ | 1476 | /* BSSID SA DA */ |
1470 | memcpy(hdr.addr1, sdata->u.sta.bssid, ETH_ALEN); | 1477 | memcpy(hdr.addr1, sdata->u.sta.bssid, ETH_ALEN); |
1471 | memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); | 1478 | memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); |
@@ -1493,13 +1500,14 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1493 | rcu_read_lock(); | 1500 | rcu_read_lock(); |
1494 | sta = sta_info_get(local, hdr.addr1); | 1501 | sta = sta_info_get(local, hdr.addr1); |
1495 | if (sta) | 1502 | if (sta) |
1496 | sta_flags = sta->flags; | 1503 | sta_flags = get_sta_flags(sta); |
1497 | rcu_read_unlock(); | 1504 | rcu_read_unlock(); |
1498 | } | 1505 | } |
1499 | 1506 | ||
1500 | /* receiver is QoS enabled, use a QoS type frame */ | 1507 | /* receiver and we are QoS enabled, use a QoS type frame */ |
1501 | if (sta_flags & WLAN_STA_WME) { | 1508 | if (sta_flags & WLAN_STA_WME && |
1502 | fc |= IEEE80211_STYPE_QOS_DATA; | 1509 | ieee80211_num_regular_queues(&local->hw) >= 4) { |
1510 | fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA); | ||
1503 | hdrlen += 2; | 1511 | hdrlen += 2; |
1504 | } | 1512 | } |
1505 | 1513 | ||
@@ -1527,7 +1535,7 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1527 | goto fail; | 1535 | goto fail; |
1528 | } | 1536 | } |
1529 | 1537 | ||
1530 | hdr.frame_control = cpu_to_le16(fc); | 1538 | hdr.frame_control = fc; |
1531 | hdr.duration_id = 0; | 1539 | hdr.duration_id = 0; |
1532 | hdr.seq_ctrl = 0; | 1540 | hdr.seq_ctrl = 0; |
1533 | 1541 | ||
@@ -1562,32 +1570,26 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1562 | * build in headroom in __dev_alloc_skb() (linux/skbuff.h) and | 1570 | * build in headroom in __dev_alloc_skb() (linux/skbuff.h) and |
1563 | * alloc_skb() (net/core/skbuff.c) | 1571 | * alloc_skb() (net/core/skbuff.c) |
1564 | */ | 1572 | */ |
1565 | head_need = hdrlen + encaps_len + meshhdrlen + local->tx_headroom; | 1573 | head_need = hdrlen + encaps_len + meshhdrlen - skb_headroom(skb); |
1566 | head_need -= skb_headroom(skb); | ||
1567 | 1574 | ||
1568 | /* We are going to modify skb data, so make a copy of it if happens to | 1575 | /* |
1569 | * be cloned. This could happen, e.g., with Linux bridge code passing | 1576 | * So we need to modify the skb header and hence need a copy of |
1570 | * us broadcast frames. */ | 1577 | * that. The head_need variable above doesn't, so far, include |
1578 | * the needed header space that we don't need right away. If we | ||
1579 | * can, then we don't reallocate right now but only after the | ||
1580 | * frame arrives at the master device (if it does...) | ||
1581 | * | ||
1582 | * If we cannot, however, then we will reallocate to include all | ||
1583 | * the ever needed space. Also, if we need to reallocate it anyway, | ||
1584 | * make it big enough for everything we may ever need. | ||
1585 | */ | ||
1571 | 1586 | ||
1572 | if (head_need > 0 || skb_cloned(skb)) { | 1587 | if (head_need > 0 || skb_cloned(skb)) { |
1573 | #if 0 | 1588 | head_need += IEEE80211_ENCRYPT_HEADROOM; |
1574 | printk(KERN_DEBUG "%s: need to reallocate buffer for %d bytes " | 1589 | head_need += local->tx_headroom; |
1575 | "of headroom\n", dev->name, head_need); | 1590 | head_need = max_t(int, 0, head_need); |
1576 | #endif | 1591 | if (ieee80211_skb_resize(local, skb, head_need, true)) |
1577 | |||
1578 | if (skb_cloned(skb)) | ||
1579 | I802_DEBUG_INC(local->tx_expand_skb_head_cloned); | ||
1580 | else | ||
1581 | I802_DEBUG_INC(local->tx_expand_skb_head); | ||
1582 | /* Since we have to reallocate the buffer, make sure that there | ||
1583 | * is enough room for possible WEP IV/ICV and TKIP (8 bytes | ||
1584 | * before payload and 12 after). */ | ||
1585 | if (pskb_expand_head(skb, (head_need > 0 ? head_need + 8 : 8), | ||
1586 | 12, GFP_ATOMIC)) { | ||
1587 | printk(KERN_DEBUG "%s: failed to reallocate TX buffer" | ||
1588 | "\n", dev->name); | ||
1589 | goto fail; | 1592 | goto fail; |
1590 | } | ||
1591 | } | 1593 | } |
1592 | 1594 | ||
1593 | if (encaps_data) { | 1595 | if (encaps_data) { |
@@ -1602,7 +1604,7 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1602 | h_pos += meshhdrlen; | 1604 | h_pos += meshhdrlen; |
1603 | } | 1605 | } |
1604 | 1606 | ||
1605 | if (fc & IEEE80211_STYPE_QOS_DATA) { | 1607 | if (ieee80211_is_data_qos(fc)) { |
1606 | __le16 *qos_control; | 1608 | __le16 *qos_control; |
1607 | 1609 | ||
1608 | qos_control = (__le16*) skb_push(skb, 2); | 1610 | qos_control = (__le16*) skb_push(skb, 2); |
@@ -1618,11 +1620,14 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1618 | nh_pos += hdrlen; | 1620 | nh_pos += hdrlen; |
1619 | h_pos += hdrlen; | 1621 | h_pos += hdrlen; |
1620 | 1622 | ||
1621 | pkt_data = (struct ieee80211_tx_packet_data *)skb->cb; | 1623 | info = IEEE80211_SKB_CB(skb); |
1622 | memset(pkt_data, 0, sizeof(struct ieee80211_tx_packet_data)); | 1624 | memset(info, 0, sizeof(*info)); |
1623 | pkt_data->ifindex = dev->ifindex; | 1625 | info->control.ifindex = dev->ifindex; |
1624 | if (ethertype == ETH_P_PAE) | 1626 | if (ethertype == ETH_P_PAE) |
1625 | pkt_data->flags |= IEEE80211_TXPD_EAPOL_FRAME; | 1627 | info->flags |= IEEE80211_TX_CTL_EAPOL_FRAME; |
1628 | |||
1629 | /* Interfaces should always request a status report */ | ||
1630 | info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; | ||
1626 | 1631 | ||
1627 | skb->dev = local->mdev; | 1632 | skb->dev = local->mdev; |
1628 | dev->stats.tx_packets++; | 1633 | dev->stats.tx_packets++; |
@@ -1647,46 +1652,55 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1647 | return ret; | 1652 | return ret; |
1648 | } | 1653 | } |
1649 | 1654 | ||
1650 | /* helper functions for pending packets for when queues are stopped */ | ||
1651 | 1655 | ||
1656 | /* | ||
1657 | * ieee80211_clear_tx_pending may not be called in a context where | ||
1658 | * it is possible that it packets could come in again. | ||
1659 | */ | ||
1652 | void ieee80211_clear_tx_pending(struct ieee80211_local *local) | 1660 | void ieee80211_clear_tx_pending(struct ieee80211_local *local) |
1653 | { | 1661 | { |
1654 | int i, j; | 1662 | int i, j; |
1655 | struct ieee80211_tx_stored_packet *store; | 1663 | struct ieee80211_tx_stored_packet *store; |
1656 | 1664 | ||
1657 | for (i = 0; i < local->hw.queues; i++) { | 1665 | for (i = 0; i < ieee80211_num_regular_queues(&local->hw); i++) { |
1658 | if (!__ieee80211_queue_pending(local, i)) | 1666 | if (!test_bit(i, local->queues_pending)) |
1659 | continue; | 1667 | continue; |
1660 | store = &local->pending_packet[i]; | 1668 | store = &local->pending_packet[i]; |
1661 | kfree_skb(store->skb); | 1669 | kfree_skb(store->skb); |
1662 | for (j = 0; j < store->num_extra_frag; j++) | 1670 | for (j = 0; j < store->num_extra_frag; j++) |
1663 | kfree_skb(store->extra_frag[j]); | 1671 | kfree_skb(store->extra_frag[j]); |
1664 | kfree(store->extra_frag); | 1672 | kfree(store->extra_frag); |
1665 | clear_bit(IEEE80211_LINK_STATE_PENDING, &local->state[i]); | 1673 | clear_bit(i, local->queues_pending); |
1666 | } | 1674 | } |
1667 | } | 1675 | } |
1668 | 1676 | ||
1677 | /* | ||
1678 | * Transmit all pending packets. Called from tasklet, locks master device | ||
1679 | * TX lock so that no new packets can come in. | ||
1680 | */ | ||
1669 | void ieee80211_tx_pending(unsigned long data) | 1681 | void ieee80211_tx_pending(unsigned long data) |
1670 | { | 1682 | { |
1671 | struct ieee80211_local *local = (struct ieee80211_local *)data; | 1683 | struct ieee80211_local *local = (struct ieee80211_local *)data; |
1672 | struct net_device *dev = local->mdev; | 1684 | struct net_device *dev = local->mdev; |
1673 | struct ieee80211_tx_stored_packet *store; | 1685 | struct ieee80211_tx_stored_packet *store; |
1674 | struct ieee80211_tx_data tx; | 1686 | struct ieee80211_tx_data tx; |
1675 | int i, ret, reschedule = 0; | 1687 | int i, ret; |
1676 | 1688 | ||
1677 | netif_tx_lock_bh(dev); | 1689 | netif_tx_lock_bh(dev); |
1678 | for (i = 0; i < local->hw.queues; i++) { | 1690 | for (i = 0; i < ieee80211_num_regular_queues(&local->hw); i++) { |
1679 | if (__ieee80211_queue_stopped(local, i)) | 1691 | /* Check that this queue is ok */ |
1692 | if (__netif_subqueue_stopped(local->mdev, i)) | ||
1680 | continue; | 1693 | continue; |
1681 | if (!__ieee80211_queue_pending(local, i)) { | 1694 | |
1682 | reschedule = 1; | 1695 | if (!test_bit(i, local->queues_pending)) { |
1696 | ieee80211_wake_queue(&local->hw, i); | ||
1683 | continue; | 1697 | continue; |
1684 | } | 1698 | } |
1699 | |||
1685 | store = &local->pending_packet[i]; | 1700 | store = &local->pending_packet[i]; |
1686 | tx.control = &store->control; | ||
1687 | tx.extra_frag = store->extra_frag; | 1701 | tx.extra_frag = store->extra_frag; |
1688 | tx.num_extra_frag = store->num_extra_frag; | 1702 | tx.num_extra_frag = store->num_extra_frag; |
1689 | tx.last_frag_rate = store->last_frag_rate; | 1703 | tx.last_frag_rate_idx = store->last_frag_rate_idx; |
1690 | tx.flags = 0; | 1704 | tx.flags = 0; |
1691 | if (store->last_frag_rate_ctrl_probe) | 1705 | if (store->last_frag_rate_ctrl_probe) |
1692 | tx.flags |= IEEE80211_TX_PROBE_LAST_FRAG; | 1706 | tx.flags |= IEEE80211_TX_PROBE_LAST_FRAG; |
@@ -1695,19 +1709,11 @@ void ieee80211_tx_pending(unsigned long data) | |||
1695 | if (ret == IEEE80211_TX_FRAG_AGAIN) | 1709 | if (ret == IEEE80211_TX_FRAG_AGAIN) |
1696 | store->skb = NULL; | 1710 | store->skb = NULL; |
1697 | } else { | 1711 | } else { |
1698 | clear_bit(IEEE80211_LINK_STATE_PENDING, | 1712 | clear_bit(i, local->queues_pending); |
1699 | &local->state[i]); | 1713 | ieee80211_wake_queue(&local->hw, i); |
1700 | reschedule = 1; | ||
1701 | } | 1714 | } |
1702 | } | 1715 | } |
1703 | netif_tx_unlock_bh(dev); | 1716 | netif_tx_unlock_bh(dev); |
1704 | if (reschedule) { | ||
1705 | if (!ieee80211_qdisc_installed(dev)) { | ||
1706 | if (!__ieee80211_queue_stopped(local, 0)) | ||
1707 | netif_wake_queue(dev); | ||
1708 | } else | ||
1709 | netif_schedule(dev); | ||
1710 | } | ||
1711 | } | 1717 | } |
1712 | 1718 | ||
1713 | /* functions for drivers to get certain frames */ | 1719 | /* functions for drivers to get certain frames */ |
@@ -1776,11 +1782,11 @@ static void ieee80211_beacon_add_tim(struct ieee80211_local *local, | |||
1776 | } | 1782 | } |
1777 | 1783 | ||
1778 | struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, | 1784 | struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, |
1779 | struct ieee80211_vif *vif, | 1785 | struct ieee80211_vif *vif) |
1780 | struct ieee80211_tx_control *control) | ||
1781 | { | 1786 | { |
1782 | struct ieee80211_local *local = hw_to_local(hw); | 1787 | struct ieee80211_local *local = hw_to_local(hw); |
1783 | struct sk_buff *skb; | 1788 | struct sk_buff *skb; |
1789 | struct ieee80211_tx_info *info; | ||
1784 | struct net_device *bdev; | 1790 | struct net_device *bdev; |
1785 | struct ieee80211_sub_if_data *sdata = NULL; | 1791 | struct ieee80211_sub_if_data *sdata = NULL; |
1786 | struct ieee80211_if_ap *ap = NULL; | 1792 | struct ieee80211_if_ap *ap = NULL; |
@@ -1790,9 +1796,10 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, | |||
1790 | struct ieee80211_mgmt *mgmt; | 1796 | struct ieee80211_mgmt *mgmt; |
1791 | int *num_beacons; | 1797 | int *num_beacons; |
1792 | bool err = true; | 1798 | bool err = true; |
1799 | enum ieee80211_band band = local->hw.conf.channel->band; | ||
1793 | u8 *pos; | 1800 | u8 *pos; |
1794 | 1801 | ||
1795 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; | 1802 | sband = local->hw.wiphy->bands[band]; |
1796 | 1803 | ||
1797 | rcu_read_lock(); | 1804 | rcu_read_lock(); |
1798 | 1805 | ||
@@ -1855,8 +1862,8 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, | |||
1855 | mgmt = (struct ieee80211_mgmt *) | 1862 | mgmt = (struct ieee80211_mgmt *) |
1856 | skb_put(skb, 24 + sizeof(mgmt->u.beacon)); | 1863 | skb_put(skb, 24 + sizeof(mgmt->u.beacon)); |
1857 | memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon)); | 1864 | memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon)); |
1858 | mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, | 1865 | mgmt->frame_control = |
1859 | IEEE80211_STYPE_BEACON); | 1866 | cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON); |
1860 | memset(mgmt->da, 0xff, ETH_ALEN); | 1867 | memset(mgmt->da, 0xff, ETH_ALEN); |
1861 | memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); | 1868 | memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); |
1862 | /* BSSID is left zeroed, wildcard value */ | 1869 | /* BSSID is left zeroed, wildcard value */ |
@@ -1885,30 +1892,32 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, | |||
1885 | goto out; | 1892 | goto out; |
1886 | } | 1893 | } |
1887 | 1894 | ||
1888 | if (control) { | 1895 | info = IEEE80211_SKB_CB(skb); |
1889 | rate_control_get_rate(local->mdev, sband, skb, &rsel); | ||
1890 | if (!rsel.rate) { | ||
1891 | if (net_ratelimit()) { | ||
1892 | printk(KERN_DEBUG "%s: ieee80211_beacon_get: " | ||
1893 | "no rate found\n", | ||
1894 | wiphy_name(local->hw.wiphy)); | ||
1895 | } | ||
1896 | dev_kfree_skb(skb); | ||
1897 | skb = NULL; | ||
1898 | goto out; | ||
1899 | } | ||
1900 | 1896 | ||
1901 | control->vif = vif; | 1897 | info->band = band; |
1902 | control->tx_rate = rsel.rate; | 1898 | rate_control_get_rate(local->mdev, sband, skb, &rsel); |
1903 | if (sdata->bss_conf.use_short_preamble && | 1899 | |
1904 | rsel.rate->flags & IEEE80211_RATE_SHORT_PREAMBLE) | 1900 | if (unlikely(rsel.rate_idx < 0)) { |
1905 | control->flags |= IEEE80211_TXCTL_SHORT_PREAMBLE; | 1901 | if (net_ratelimit()) { |
1906 | control->antenna_sel_tx = local->hw.conf.antenna_sel_tx; | 1902 | printk(KERN_DEBUG "%s: ieee80211_beacon_get: " |
1907 | control->flags |= IEEE80211_TXCTL_NO_ACK; | 1903 | "no rate found\n", |
1908 | control->flags |= IEEE80211_TXCTL_DO_NOT_ENCRYPT; | 1904 | wiphy_name(local->hw.wiphy)); |
1909 | control->retry_limit = 1; | 1905 | } |
1910 | control->flags |= IEEE80211_TXCTL_CLEAR_PS_FILT; | 1906 | dev_kfree_skb(skb); |
1907 | skb = NULL; | ||
1908 | goto out; | ||
1911 | } | 1909 | } |
1910 | |||
1911 | info->control.vif = vif; | ||
1912 | info->tx_rate_idx = rsel.rate_idx; | ||
1913 | if (sdata->bss_conf.use_short_preamble && | ||
1914 | sband->bitrates[rsel.rate_idx].flags & IEEE80211_RATE_SHORT_PREAMBLE) | ||
1915 | info->flags |= IEEE80211_TX_CTL_SHORT_PREAMBLE; | ||
1916 | info->antenna_sel_tx = local->hw.conf.antenna_sel_tx; | ||
1917 | info->flags |= IEEE80211_TX_CTL_NO_ACK; | ||
1918 | info->flags |= IEEE80211_TX_CTL_DO_NOT_ENCRYPT; | ||
1919 | info->control.retry_limit = 1; | ||
1920 | info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; | ||
1912 | (*num_beacons)++; | 1921 | (*num_beacons)++; |
1913 | out: | 1922 | out: |
1914 | rcu_read_unlock(); | 1923 | rcu_read_unlock(); |
@@ -1918,14 +1927,13 @@ EXPORT_SYMBOL(ieee80211_beacon_get); | |||
1918 | 1927 | ||
1919 | void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, | 1928 | void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, |
1920 | const void *frame, size_t frame_len, | 1929 | const void *frame, size_t frame_len, |
1921 | const struct ieee80211_tx_control *frame_txctl, | 1930 | const struct ieee80211_tx_info *frame_txctl, |
1922 | struct ieee80211_rts *rts) | 1931 | struct ieee80211_rts *rts) |
1923 | { | 1932 | { |
1924 | const struct ieee80211_hdr *hdr = frame; | 1933 | const struct ieee80211_hdr *hdr = frame; |
1925 | u16 fctl; | ||
1926 | 1934 | ||
1927 | fctl = IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS; | 1935 | rts->frame_control = |
1928 | rts->frame_control = cpu_to_le16(fctl); | 1936 | cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS); |
1929 | rts->duration = ieee80211_rts_duration(hw, vif, frame_len, | 1937 | rts->duration = ieee80211_rts_duration(hw, vif, frame_len, |
1930 | frame_txctl); | 1938 | frame_txctl); |
1931 | memcpy(rts->ra, hdr->addr1, sizeof(rts->ra)); | 1939 | memcpy(rts->ra, hdr->addr1, sizeof(rts->ra)); |
@@ -1935,14 +1943,13 @@ EXPORT_SYMBOL(ieee80211_rts_get); | |||
1935 | 1943 | ||
1936 | void ieee80211_ctstoself_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, | 1944 | void ieee80211_ctstoself_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, |
1937 | const void *frame, size_t frame_len, | 1945 | const void *frame, size_t frame_len, |
1938 | const struct ieee80211_tx_control *frame_txctl, | 1946 | const struct ieee80211_tx_info *frame_txctl, |
1939 | struct ieee80211_cts *cts) | 1947 | struct ieee80211_cts *cts) |
1940 | { | 1948 | { |
1941 | const struct ieee80211_hdr *hdr = frame; | 1949 | const struct ieee80211_hdr *hdr = frame; |
1942 | u16 fctl; | ||
1943 | 1950 | ||
1944 | fctl = IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS; | 1951 | cts->frame_control = |
1945 | cts->frame_control = cpu_to_le16(fctl); | 1952 | cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS); |
1946 | cts->duration = ieee80211_ctstoself_duration(hw, vif, | 1953 | cts->duration = ieee80211_ctstoself_duration(hw, vif, |
1947 | frame_len, frame_txctl); | 1954 | frame_len, frame_txctl); |
1948 | memcpy(cts->ra, hdr->addr1, sizeof(cts->ra)); | 1955 | memcpy(cts->ra, hdr->addr1, sizeof(cts->ra)); |
@@ -1951,23 +1958,21 @@ EXPORT_SYMBOL(ieee80211_ctstoself_get); | |||
1951 | 1958 | ||
1952 | struct sk_buff * | 1959 | struct sk_buff * |
1953 | ieee80211_get_buffered_bc(struct ieee80211_hw *hw, | 1960 | ieee80211_get_buffered_bc(struct ieee80211_hw *hw, |
1954 | struct ieee80211_vif *vif, | 1961 | struct ieee80211_vif *vif) |
1955 | struct ieee80211_tx_control *control) | ||
1956 | { | 1962 | { |
1957 | struct ieee80211_local *local = hw_to_local(hw); | 1963 | struct ieee80211_local *local = hw_to_local(hw); |
1958 | struct sk_buff *skb; | 1964 | struct sk_buff *skb = NULL; |
1959 | struct sta_info *sta; | 1965 | struct sta_info *sta; |
1960 | ieee80211_tx_handler *handler; | ||
1961 | struct ieee80211_tx_data tx; | 1966 | struct ieee80211_tx_data tx; |
1962 | ieee80211_tx_result res = TX_DROP; | ||
1963 | struct net_device *bdev; | 1967 | struct net_device *bdev; |
1964 | struct ieee80211_sub_if_data *sdata; | 1968 | struct ieee80211_sub_if_data *sdata; |
1965 | struct ieee80211_if_ap *bss = NULL; | 1969 | struct ieee80211_if_ap *bss = NULL; |
1966 | struct beacon_data *beacon; | 1970 | struct beacon_data *beacon; |
1971 | struct ieee80211_tx_info *info; | ||
1967 | 1972 | ||
1968 | sdata = vif_to_sdata(vif); | 1973 | sdata = vif_to_sdata(vif); |
1969 | bdev = sdata->dev; | 1974 | bdev = sdata->dev; |
1970 | 1975 | bss = &sdata->u.ap; | |
1971 | 1976 | ||
1972 | if (!bss) | 1977 | if (!bss) |
1973 | return NULL; | 1978 | return NULL; |
@@ -1975,19 +1980,16 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw, | |||
1975 | rcu_read_lock(); | 1980 | rcu_read_lock(); |
1976 | beacon = rcu_dereference(bss->beacon); | 1981 | beacon = rcu_dereference(bss->beacon); |
1977 | 1982 | ||
1978 | if (sdata->vif.type != IEEE80211_IF_TYPE_AP || !beacon || | 1983 | if (sdata->vif.type != IEEE80211_IF_TYPE_AP || !beacon || !beacon->head) |
1979 | !beacon->head) { | 1984 | goto out; |
1980 | rcu_read_unlock(); | ||
1981 | return NULL; | ||
1982 | } | ||
1983 | 1985 | ||
1984 | if (bss->dtim_count != 0) | 1986 | if (bss->dtim_count != 0) |
1985 | return NULL; /* send buffered bc/mc only after DTIM beacon */ | 1987 | goto out; /* send buffered bc/mc only after DTIM beacon */ |
1986 | memset(control, 0, sizeof(*control)); | 1988 | |
1987 | while (1) { | 1989 | while (1) { |
1988 | skb = skb_dequeue(&bss->ps_bc_buf); | 1990 | skb = skb_dequeue(&bss->ps_bc_buf); |
1989 | if (!skb) | 1991 | if (!skb) |
1990 | return NULL; | 1992 | goto out; |
1991 | local->total_ps_buffered--; | 1993 | local->total_ps_buffered--; |
1992 | 1994 | ||
1993 | if (!skb_queue_empty(&bss->ps_bc_buf) && skb->len >= 2) { | 1995 | if (!skb_queue_empty(&bss->ps_bc_buf) && skb->len >= 2) { |
@@ -2000,30 +2002,21 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw, | |||
2000 | cpu_to_le16(IEEE80211_FCTL_MOREDATA); | 2002 | cpu_to_le16(IEEE80211_FCTL_MOREDATA); |
2001 | } | 2003 | } |
2002 | 2004 | ||
2003 | if (!ieee80211_tx_prepare(&tx, skb, local->mdev, control)) | 2005 | if (!ieee80211_tx_prepare(&tx, skb, local->mdev)) |
2004 | break; | 2006 | break; |
2005 | dev_kfree_skb_any(skb); | 2007 | dev_kfree_skb_any(skb); |
2006 | } | 2008 | } |
2009 | |||
2010 | info = IEEE80211_SKB_CB(skb); | ||
2011 | |||
2007 | sta = tx.sta; | 2012 | sta = tx.sta; |
2008 | tx.flags |= IEEE80211_TX_PS_BUFFERED; | 2013 | tx.flags |= IEEE80211_TX_PS_BUFFERED; |
2009 | tx.channel = local->hw.conf.channel; | 2014 | tx.channel = local->hw.conf.channel; |
2015 | info->band = tx.channel->band; | ||
2010 | 2016 | ||
2011 | for (handler = ieee80211_tx_handlers; *handler != NULL; handler++) { | 2017 | if (invoke_tx_handlers(&tx)) |
2012 | res = (*handler)(&tx); | ||
2013 | if (res == TX_DROP || res == TX_QUEUED) | ||
2014 | break; | ||
2015 | } | ||
2016 | skb = tx.skb; /* handlers are allowed to change skb */ | ||
2017 | |||
2018 | if (res == TX_DROP) { | ||
2019 | I802_DEBUG_INC(local->tx_handlers_drop); | ||
2020 | dev_kfree_skb(skb); | ||
2021 | skb = NULL; | ||
2022 | } else if (res == TX_QUEUED) { | ||
2023 | I802_DEBUG_INC(local->tx_handlers_queued); | ||
2024 | skb = NULL; | 2018 | skb = NULL; |
2025 | } | 2019 | out: |
2026 | |||
2027 | rcu_read_unlock(); | 2020 | rcu_read_unlock(); |
2028 | 2021 | ||
2029 | return skb; | 2022 | return skb; |