aboutsummaryrefslogtreecommitdiffstats
path: root/net/mac80211
diff options
context:
space:
mode:
authorJohannes Berg <johannes@sipsolutions.net>2008-05-15 06:55:28 -0400
committerJohn W. Linville <linville@tuxdriver.com>2008-05-21 21:48:10 -0400
commite24549485f859be6518929bb1c9c0257d79f033d (patch)
treeae4d0e72eae8aee3439029318794f450ecb3fbfc /net/mac80211
parent2e92e6f2c50b4baf85cca968f0e6f1b5c0df7d39 (diff)
mac80211: reorder some transmit handlers
The next patch will require that transmit handlers that are after fragmentation are aware of the fact that the control info is also fragmented. To make that easier, this patch moves a number of transmit handlers before fragmentation. Signed-off-by: Johannes Berg <johannes@sipsolutions.net> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'net/mac80211')
-rw-r--r--net/mac80211/tx.c234
1 files changed, 118 insertions, 116 deletions
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 99c3860bc0e2..666158f02a89 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -506,106 +506,6 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
506} 506}
507 507
508static ieee80211_tx_result 508static ieee80211_tx_result
509ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
510{
511 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx->skb->data;
512 size_t hdrlen, per_fragm, num_fragm, payload_len, left;
513 struct sk_buff **frags, *first, *frag;
514 int i;
515 u16 seq;
516 u8 *pos;
517 int frag_threshold = tx->local->fragmentation_threshold;
518
519 if (!(tx->flags & IEEE80211_TX_FRAGMENTED))
520 return TX_CONTINUE;
521
522 first = tx->skb;
523
524 hdrlen = ieee80211_get_hdrlen(tx->fc);
525 payload_len = first->len - hdrlen;
526 per_fragm = frag_threshold - hdrlen - FCS_LEN;
527 num_fragm = DIV_ROUND_UP(payload_len, per_fragm);
528
529 frags = kzalloc(num_fragm * sizeof(struct sk_buff *), GFP_ATOMIC);
530 if (!frags)
531 goto fail;
532
533 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
534 seq = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ;
535 pos = first->data + hdrlen + per_fragm;
536 left = payload_len - per_fragm;
537 for (i = 0; i < num_fragm - 1; i++) {
538 struct ieee80211_hdr *fhdr;
539 size_t copylen;
540
541 if (left <= 0)
542 goto fail;
543
544 /* reserve enough extra head and tail room for possible
545 * encryption */
546 frag = frags[i] =
547 dev_alloc_skb(tx->local->tx_headroom +
548 frag_threshold +
549 IEEE80211_ENCRYPT_HEADROOM +
550 IEEE80211_ENCRYPT_TAILROOM);
551 if (!frag)
552 goto fail;
553 /* Make sure that all fragments use the same priority so
554 * that they end up using the same TX queue */
555 frag->priority = first->priority;
556 skb_reserve(frag, tx->local->tx_headroom +
557 IEEE80211_ENCRYPT_HEADROOM);
558 fhdr = (struct ieee80211_hdr *) skb_put(frag, hdrlen);
559 memcpy(fhdr, first->data, hdrlen);
560 if (i == num_fragm - 2)
561 fhdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_MOREFRAGS);
562 fhdr->seq_ctrl = cpu_to_le16(seq | ((i + 1) & IEEE80211_SCTL_FRAG));
563 copylen = left > per_fragm ? per_fragm : left;
564 memcpy(skb_put(frag, copylen), pos, copylen);
565
566 pos += copylen;
567 left -= copylen;
568 }
569 skb_trim(first, hdrlen + per_fragm);
570
571 tx->num_extra_frag = num_fragm - 1;
572 tx->extra_frag = frags;
573
574 return TX_CONTINUE;
575
576 fail:
577 printk(KERN_DEBUG "%s: failed to fragment frame\n", tx->dev->name);
578 if (frags) {
579 for (i = 0; i < num_fragm - 1; i++)
580 if (frags[i])
581 dev_kfree_skb(frags[i]);
582 kfree(frags);
583 }
584 I802_DEBUG_INC(tx->local->tx_handlers_drop_fragment);
585 return TX_DROP;
586}
587
588static ieee80211_tx_result
589ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx)
590{
591 if (!tx->key)
592 return TX_CONTINUE;
593
594 switch (tx->key->conf.alg) {
595 case ALG_WEP:
596 return ieee80211_crypto_wep_encrypt(tx);
597 case ALG_TKIP:
598 return ieee80211_crypto_tkip_encrypt(tx);
599 case ALG_CCMP:
600 return ieee80211_crypto_ccmp_encrypt(tx);
601 }
602
603 /* not reached */
604 WARN_ON(1);
605 return TX_DROP;
606}
607
608static ieee80211_tx_result
609ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx) 509ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
610{ 510{
611 struct rate_selection rsel; 511 struct rate_selection rsel;
@@ -747,26 +647,114 @@ ieee80211_tx_h_misc(struct ieee80211_tx_data *tx)
747 control->rts_cts_rate_idx = 0; 647 control->rts_cts_rate_idx = 0;
748 } 648 }
749 649
750 if (tx->sta) { 650 if (tx->sta)
751 control->aid = tx->sta->aid; 651 control->aid = tx->sta->aid;
752 tx->sta->tx_packets++; 652
753 tx->sta->tx_fragments++; 653 return TX_CONTINUE;
754 tx->sta->tx_bytes += tx->skb->len; 654}
755 if (tx->extra_frag) { 655
756 int i; 656static ieee80211_tx_result
757 tx->sta->tx_fragments += tx->num_extra_frag; 657ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
758 for (i = 0; i < tx->num_extra_frag; i++) { 658{
759 tx->sta->tx_bytes += 659 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx->skb->data;
760 tx->extra_frag[i]->len; 660 size_t hdrlen, per_fragm, num_fragm, payload_len, left;
761 } 661 struct sk_buff **frags, *first, *frag;
762 } 662 int i;
663 u16 seq;
664 u8 *pos;
665 int frag_threshold = tx->local->fragmentation_threshold;
666
667 if (!(tx->flags & IEEE80211_TX_FRAGMENTED))
668 return TX_CONTINUE;
669
670 first = tx->skb;
671
672 hdrlen = ieee80211_get_hdrlen(tx->fc);
673 payload_len = first->len - hdrlen;
674 per_fragm = frag_threshold - hdrlen - FCS_LEN;
675 num_fragm = DIV_ROUND_UP(payload_len, per_fragm);
676
677 frags = kzalloc(num_fragm * sizeof(struct sk_buff *), GFP_ATOMIC);
678 if (!frags)
679 goto fail;
680
681 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
682 seq = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ;
683 pos = first->data + hdrlen + per_fragm;
684 left = payload_len - per_fragm;
685 for (i = 0; i < num_fragm - 1; i++) {
686 struct ieee80211_hdr *fhdr;
687 size_t copylen;
688
689 if (left <= 0)
690 goto fail;
691
692 /* reserve enough extra head and tail room for possible
693 * encryption */
694 frag = frags[i] =
695 dev_alloc_skb(tx->local->tx_headroom +
696 frag_threshold +
697 IEEE80211_ENCRYPT_HEADROOM +
698 IEEE80211_ENCRYPT_TAILROOM);
699 if (!frag)
700 goto fail;
701 /* Make sure that all fragments use the same priority so
702 * that they end up using the same TX queue */
703 frag->priority = first->priority;
704 skb_reserve(frag, tx->local->tx_headroom +
705 IEEE80211_ENCRYPT_HEADROOM);
706 fhdr = (struct ieee80211_hdr *) skb_put(frag, hdrlen);
707 memcpy(fhdr, first->data, hdrlen);
708 if (i == num_fragm - 2)
709 fhdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_MOREFRAGS);
710 fhdr->seq_ctrl = cpu_to_le16(seq | ((i + 1) & IEEE80211_SCTL_FRAG));
711 copylen = left > per_fragm ? per_fragm : left;
712 memcpy(skb_put(frag, copylen), pos, copylen);
713
714 pos += copylen;
715 left -= copylen;
763 } 716 }
717 skb_trim(first, hdrlen + per_fragm);
718
719 tx->num_extra_frag = num_fragm - 1;
720 tx->extra_frag = frags;
764 721
765 return TX_CONTINUE; 722 return TX_CONTINUE;
723
724 fail:
725 printk(KERN_DEBUG "%s: failed to fragment frame\n", tx->dev->name);
726 if (frags) {
727 for (i = 0; i < num_fragm - 1; i++)
728 if (frags[i])
729 dev_kfree_skb(frags[i]);
730 kfree(frags);
731 }
732 I802_DEBUG_INC(tx->local->tx_handlers_drop_fragment);
733 return TX_DROP;
766} 734}
767 735
768static ieee80211_tx_result 736static ieee80211_tx_result
769ieee80211_tx_h_load_stats(struct ieee80211_tx_data *tx) 737ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx)
738{
739 if (!tx->key)
740 return TX_CONTINUE;
741
742 switch (tx->key->conf.alg) {
743 case ALG_WEP:
744 return ieee80211_crypto_wep_encrypt(tx);
745 case ALG_TKIP:
746 return ieee80211_crypto_tkip_encrypt(tx);
747 case ALG_CCMP:
748 return ieee80211_crypto_ccmp_encrypt(tx);
749 }
750
751 /* not reached */
752 WARN_ON(1);
753 return TX_DROP;
754}
755
756static ieee80211_tx_result
757ieee80211_tx_h_stats(struct ieee80211_tx_data *tx)
770{ 758{
771 struct ieee80211_local *local = tx->local; 759 struct ieee80211_local *local = tx->local;
772 struct sk_buff *skb = tx->skb; 760 struct sk_buff *skb = tx->skb;
@@ -822,6 +810,20 @@ ieee80211_tx_h_load_stats(struct ieee80211_tx_data *tx)
822 tx->sta->channel_use_raw += load; 810 tx->sta->channel_use_raw += load;
823 tx->sdata->channel_use_raw += load; 811 tx->sdata->channel_use_raw += load;
824 812
813 if (tx->sta) {
814 tx->sta->tx_packets++;
815 tx->sta->tx_fragments++;
816 tx->sta->tx_bytes += tx->skb->len;
817 if (tx->extra_frag) {
818 int i;
819 tx->sta->tx_fragments += tx->num_extra_frag;
820 for (i = 0; i < tx->num_extra_frag; i++) {
821 tx->sta->tx_bytes +=
822 tx->extra_frag[i]->len;
823 }
824 }
825 }
826
825 return TX_CONTINUE; 827 return TX_CONTINUE;
826} 828}
827 829
@@ -834,11 +836,11 @@ static ieee80211_tx_handler ieee80211_tx_handlers[] =
834 ieee80211_tx_h_ps_buf, 836 ieee80211_tx_h_ps_buf,
835 ieee80211_tx_h_select_key, 837 ieee80211_tx_h_select_key,
836 ieee80211_tx_h_michael_mic_add, 838 ieee80211_tx_h_michael_mic_add,
837 ieee80211_tx_h_fragment,
838 ieee80211_tx_h_encrypt,
839 ieee80211_tx_h_rate_ctrl, 839 ieee80211_tx_h_rate_ctrl,
840 ieee80211_tx_h_misc, 840 ieee80211_tx_h_misc,
841 ieee80211_tx_h_load_stats, 841 ieee80211_tx_h_fragment,
842 ieee80211_tx_h_encrypt,
843 ieee80211_tx_h_stats,
842 NULL 844 NULL
843}; 845};
844 846