summaryrefslogtreecommitdiffstats
path: root/net/mac80211
diff options
context:
space:
mode:
authorFelix Fietkau <nbd@nbd.name>2019-08-20 05:54:49 -0400
committerJohannes Berg <johannes.berg@intel.com>2019-08-21 05:10:13 -0400
commit48cb39522a9d4d4680865e40a88f975a1cee6abc (patch)
tree619f9198241125039f315670e6728aa985f6eecb /net/mac80211
parent21f7981b4bd904871c6bbd67333cf0f69ff7c06a (diff)
mac80211: minstrel_ht: improve rate probing for devices with static fallback
On some devices that only support static rate fallback tables sending rate control probing packets can be really expensive. Probing lower rates can already hurt throughput quite a bit. What hurts even more is the fact that on mt76x0/mt76x2, single probing packets can only be forced by directing packets at a different internal hardware queue, which causes some heavy reordering and extra latency. The reordering issue is mainly problematic while pushing lots of packets to a particular station. If there is little activity, the overhead of probing is neglegible. The static fallback behavior is designed to pretty much only handle rate control algorithms that use only a very limited set of rates on which the algorithm switches up/down based on packet error rate. In order to better support that kind of hardware, this patch implements a different approach to rate probing where it switches to a slightly higher rate, waits for tx status feedback, then updates the stats and switches back to the new max throughput rate. This only triggers above a packet rate of 100 per stats interval (~50ms). For that kind of probing, the code has to reduce the set of probing rates a lot more compared to single packet probing, so it uses only one packet per MCS group which is either slightly faster, or as close as possible to the max throughput rate. This allows switching between similar rates with different numbers of streams. The algorithm assumes that the hardware will work its way lower within an MCS group in case of retransmissions, so that lower rates don't have to be probed by the high packets per second rate probing code. To further reduce the search space, it also does not probe rates with lower channel bandwidth than the max throughput rate. At the moment, these changes will only affect mt76x0/mt76x2. Signed-off-by: Felix Fietkau <nbd@nbd.name> Link: https://lore.kernel.org/r/20190820095449.45255-4-nbd@nbd.name Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Diffstat (limited to 'net/mac80211')
-rw-r--r--net/mac80211/rc80211_minstrel.h1
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c240
-rw-r--r--net/mac80211/rc80211_minstrel_ht.h12
3 files changed, 225 insertions, 28 deletions
diff --git a/net/mac80211/rc80211_minstrel.h b/net/mac80211/rc80211_minstrel.h
index 3c96a853adbd..51d8b2c846e7 100644
--- a/net/mac80211/rc80211_minstrel.h
+++ b/net/mac80211/rc80211_minstrel.h
@@ -95,6 +95,7 @@ struct minstrel_sta_info {
95struct minstrel_priv { 95struct minstrel_priv {
96 struct ieee80211_hw *hw; 96 struct ieee80211_hw *hw;
97 bool has_mrr; 97 bool has_mrr;
98 u32 sample_switch;
98 unsigned int cw_min; 99 unsigned int cw_min;
99 unsigned int cw_max; 100 unsigned int cw_max;
100 unsigned int max_retry; 101 unsigned int max_retry;
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index c5868a1de306..a01168514840 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -18,6 +18,8 @@
18#define AVG_AMPDU_SIZE 16 18#define AVG_AMPDU_SIZE 16
19#define AVG_PKT_SIZE 1200 19#define AVG_PKT_SIZE 1200
20 20
21#define SAMPLE_SWITCH_THR 100
22
21/* Number of bits for an average sized packet */ 23/* Number of bits for an average sized packet */
22#define MCS_NBITS ((AVG_PKT_SIZE * AVG_AMPDU_SIZE) << 3) 24#define MCS_NBITS ((AVG_PKT_SIZE * AVG_AMPDU_SIZE) << 3)
23 25
@@ -58,6 +60,7 @@
58 [GROUP_IDX(_streams, _sgi, _ht40)] = { \ 60 [GROUP_IDX(_streams, _sgi, _ht40)] = { \
59 .streams = _streams, \ 61 .streams = _streams, \
60 .shift = _s, \ 62 .shift = _s, \
63 .bw = _ht40, \
61 .flags = \ 64 .flags = \
62 IEEE80211_TX_RC_MCS | \ 65 IEEE80211_TX_RC_MCS | \
63 (_sgi ? IEEE80211_TX_RC_SHORT_GI : 0) | \ 66 (_sgi ? IEEE80211_TX_RC_SHORT_GI : 0) | \
@@ -94,6 +97,7 @@
94 [VHT_GROUP_IDX(_streams, _sgi, _bw)] = { \ 97 [VHT_GROUP_IDX(_streams, _sgi, _bw)] = { \
95 .streams = _streams, \ 98 .streams = _streams, \
96 .shift = _s, \ 99 .shift = _s, \
100 .bw = _bw, \
97 .flags = \ 101 .flags = \
98 IEEE80211_TX_RC_VHT_MCS | \ 102 IEEE80211_TX_RC_VHT_MCS | \
99 (_sgi ? IEEE80211_TX_RC_SHORT_GI : 0) | \ 103 (_sgi ? IEEE80211_TX_RC_SHORT_GI : 0) | \
@@ -526,6 +530,133 @@ minstrel_ht_prob_rate_reduce_streams(struct minstrel_ht_sta *mi)
526 } 530 }
527} 531}
528 532
533static inline int
534minstrel_get_duration(int index)
535{
536 const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
537 unsigned int duration = group->duration[index % MCS_GROUP_RATES];
538 return duration << group->shift;
539}
540
541static bool
542minstrel_ht_probe_group(struct minstrel_ht_sta *mi, const struct mcs_group *tp_group,
543 int tp_idx, const struct mcs_group *group)
544{
545 if (group->bw < tp_group->bw)
546 return false;
547
548 if (group->streams == tp_group->streams)
549 return true;
550
551 if (tp_idx < 4 && group->streams == tp_group->streams - 1)
552 return true;
553
554 return group->streams == tp_group->streams + 1;
555}
556
557static void
558minstrel_ht_find_probe_rates(struct minstrel_ht_sta *mi, u16 *rates, int *n_rates,
559 bool faster_rate)
560{
561 const struct mcs_group *group, *tp_group;
562 int i, g, max_dur;
563 int tp_idx;
564
565 tp_group = &minstrel_mcs_groups[mi->max_tp_rate[0] / MCS_GROUP_RATES];
566 tp_idx = mi->max_tp_rate[0] % MCS_GROUP_RATES;
567
568 max_dur = minstrel_get_duration(mi->max_tp_rate[0]);
569 if (faster_rate)
570 max_dur -= max_dur / 16;
571
572 for (g = 0; g < MINSTREL_GROUPS_NB; g++) {
573 u16 supported = mi->supported[g];
574
575 if (!supported)
576 continue;
577
578 group = &minstrel_mcs_groups[g];
579 if (!minstrel_ht_probe_group(mi, tp_group, tp_idx, group))
580 continue;
581
582 for (i = 0; supported; supported >>= 1, i++) {
583 int idx;
584
585 if (!(supported & 1))
586 continue;
587
588 if ((group->duration[i] << group->shift) > max_dur)
589 continue;
590
591 idx = g * MCS_GROUP_RATES + i;
592 if (idx == mi->max_tp_rate[0])
593 continue;
594
595 rates[(*n_rates)++] = idx;
596 break;
597 }
598 }
599}
600
601static void
602minstrel_ht_rate_sample_switch(struct minstrel_priv *mp,
603 struct minstrel_ht_sta *mi)
604{
605 struct minstrel_rate_stats *mrs;
606 u16 rates[MINSTREL_GROUPS_NB];
607 int n_rates = 0;
608 int probe_rate = 0;
609 bool faster_rate;
610 int i;
611 u8 random;
612
613 /*
614 * Use rate switching instead of probing packets for devices with
615 * little control over retry fallback behavior
616 */
617 if (mp->hw->max_rates > 1)
618 return;
619
620 /*
621 * If the current EWMA prob is >75%, look for a rate that's 6.25%
622 * faster than the max tp rate.
623 * If that fails, look again for a rate that is at least as fast
624 */
625 mrs = minstrel_get_ratestats(mi, mi->max_tp_rate[0]);
626 faster_rate = mrs->prob_ewma > MINSTREL_FRAC(75, 100);
627 minstrel_ht_find_probe_rates(mi, rates, &n_rates, faster_rate);
628 if (!n_rates && faster_rate)
629 minstrel_ht_find_probe_rates(mi, rates, &n_rates, false);
630
631 /* If no suitable rate was found, try to pick the next one in the group */
632 if (!n_rates) {
633 int g_idx = mi->max_tp_rate[0] / MCS_GROUP_RATES;
634 u16 supported = mi->supported[g_idx];
635
636 supported >>= mi->max_tp_rate[0] % MCS_GROUP_RATES;
637 for (i = 0; supported; i++) {
638 if (!(supported & 1))
639 continue;
640
641 probe_rate = mi->max_tp_rate[0] + i;
642 goto out;
643 }
644
645 return;
646 }
647
648 i = 0;
649 if (n_rates > 1) {
650 random = prandom_u32();
651 i = random % n_rates;
652 }
653 probe_rate = rates[i];
654
655out:
656 mi->sample_rate = probe_rate;
657 mi->sample_mode = MINSTREL_SAMPLE_ACTIVE;
658}
659
529/* 660/*
530 * Update rate statistics and select new primary rates 661 * Update rate statistics and select new primary rates
531 * 662 *
@@ -536,7 +667,8 @@ minstrel_ht_prob_rate_reduce_streams(struct minstrel_ht_sta *mi)
536 * higher throughput rates, even if the probablity is a bit lower 667 * higher throughput rates, even if the probablity is a bit lower
537 */ 668 */
538static void 669static void
539minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) 670minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
671 bool sample)
540{ 672{
541 struct minstrel_mcs_group_data *mg; 673 struct minstrel_mcs_group_data *mg;
542 struct minstrel_rate_stats *mrs; 674 struct minstrel_rate_stats *mrs;
@@ -544,6 +676,18 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
544 u16 tmp_mcs_tp_rate[MAX_THR_RATES], tmp_group_tp_rate[MAX_THR_RATES]; 676 u16 tmp_mcs_tp_rate[MAX_THR_RATES], tmp_group_tp_rate[MAX_THR_RATES];
545 u16 tmp_cck_tp_rate[MAX_THR_RATES], index; 677 u16 tmp_cck_tp_rate[MAX_THR_RATES], index;
546 678
679 mi->sample_mode = MINSTREL_SAMPLE_IDLE;
680
681 if (sample) {
682 mi->total_packets_cur = mi->total_packets -
683 mi->total_packets_last;
684 mi->total_packets_last = mi->total_packets;
685 }
686 if (!mp->sample_switch)
687 sample = false;
688 if (mi->total_packets_cur < SAMPLE_SWITCH_THR && mp->sample_switch != 1)
689 sample = false;
690
547 if (mi->ampdu_packets > 0) { 691 if (mi->ampdu_packets > 0) {
548 if (!ieee80211_hw_check(mp->hw, TX_STATUS_NO_AMPDU_LEN)) 692 if (!ieee80211_hw_check(mp->hw, TX_STATUS_NO_AMPDU_LEN))
549 mi->avg_ampdu_len = minstrel_ewma(mi->avg_ampdu_len, 693 mi->avg_ampdu_len = minstrel_ewma(mi->avg_ampdu_len,
@@ -630,12 +774,16 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
630 /* try to sample all available rates during each interval */ 774 /* try to sample all available rates during each interval */
631 mi->sample_count *= 8; 775 mi->sample_count *= 8;
632 776
777 if (sample)
778 minstrel_ht_rate_sample_switch(mp, mi);
779
633#ifdef CONFIG_MAC80211_DEBUGFS 780#ifdef CONFIG_MAC80211_DEBUGFS
634 /* use fixed index if set */ 781 /* use fixed index if set */
635 if (mp->fixed_rate_idx != -1) { 782 if (mp->fixed_rate_idx != -1) {
636 for (i = 0; i < 4; i++) 783 for (i = 0; i < 4; i++)
637 mi->max_tp_rate[i] = mp->fixed_rate_idx; 784 mi->max_tp_rate[i] = mp->fixed_rate_idx;
638 mi->max_prob_rate = mp->fixed_rate_idx; 785 mi->max_prob_rate = mp->fixed_rate_idx;
786 mi->sample_mode = MINSTREL_SAMPLE_IDLE;
639 } 787 }
640#endif 788#endif
641 789
@@ -739,15 +887,17 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
739 struct minstrel_ht_sta_priv *msp = priv_sta; 887 struct minstrel_ht_sta_priv *msp = priv_sta;
740 struct minstrel_ht_sta *mi = &msp->ht; 888 struct minstrel_ht_sta *mi = &msp->ht;
741 struct ieee80211_tx_rate *ar = info->status.rates; 889 struct ieee80211_tx_rate *ar = info->status.rates;
742 struct minstrel_rate_stats *rate, *rate2; 890 struct minstrel_rate_stats *rate, *rate2, *rate_sample = NULL;
743 struct minstrel_priv *mp = priv; 891 struct minstrel_priv *mp = priv;
744 bool last, update = false; 892 bool last, update = false;
893 bool sample_status = false;
745 int i; 894 int i;
746 895
747 if (!msp->is_ht) 896 if (!msp->is_ht)
748 return mac80211_minstrel.tx_status_ext(priv, sband, 897 return mac80211_minstrel.tx_status_ext(priv, sband,
749 &msp->legacy, st); 898 &msp->legacy, st);
750 899
900
751 /* This packet was aggregated but doesn't carry status info */ 901 /* This packet was aggregated but doesn't carry status info */
752 if ((info->flags & IEEE80211_TX_CTL_AMPDU) && 902 if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
753 !(info->flags & IEEE80211_TX_STAT_AMPDU)) 903 !(info->flags & IEEE80211_TX_STAT_AMPDU))
@@ -773,12 +923,17 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
773 if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) 923 if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
774 mi->sample_packets += info->status.ampdu_len; 924 mi->sample_packets += info->status.ampdu_len;
775 925
926 if (mi->sample_mode != MINSTREL_SAMPLE_IDLE)
927 rate_sample = minstrel_get_ratestats(mi, mi->sample_rate);
928
776 last = !minstrel_ht_txstat_valid(mp, &ar[0]); 929 last = !minstrel_ht_txstat_valid(mp, &ar[0]);
777 for (i = 0; !last; i++) { 930 for (i = 0; !last; i++) {
778 last = (i == IEEE80211_TX_MAX_RATES - 1) || 931 last = (i == IEEE80211_TX_MAX_RATES - 1) ||
779 !minstrel_ht_txstat_valid(mp, &ar[i + 1]); 932 !minstrel_ht_txstat_valid(mp, &ar[i + 1]);
780 933
781 rate = minstrel_ht_get_stats(mp, mi, &ar[i]); 934 rate = minstrel_ht_get_stats(mp, mi, &ar[i]);
935 if (rate == rate_sample)
936 sample_status = true;
782 937
783 if (last) 938 if (last)
784 rate->success += info->status.ampdu_ack_len; 939 rate->success += info->status.ampdu_ack_len;
@@ -786,44 +941,60 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
786 rate->attempts += ar[i].count * info->status.ampdu_len; 941 rate->attempts += ar[i].count * info->status.ampdu_len;
787 } 942 }
788 943
789 /* 944 switch (mi->sample_mode) {
790 * check for sudden death of spatial multiplexing, 945 case MINSTREL_SAMPLE_IDLE:
791 * downgrade to a lower number of streams if necessary. 946 break;
792 */ 947
793 rate = minstrel_get_ratestats(mi, mi->max_tp_rate[0]); 948 case MINSTREL_SAMPLE_ACTIVE:
794 if (rate->attempts > 30 && 949 if (!sample_status)
795 MINSTREL_FRAC(rate->success, rate->attempts) < 950 break;
796 MINSTREL_FRAC(20, 100)) { 951
797 minstrel_downgrade_rate(mi, &mi->max_tp_rate[0], true); 952 mi->sample_mode = MINSTREL_SAMPLE_PENDING;
798 update = true; 953 update = true;
799 } 954 break;
955
956 case MINSTREL_SAMPLE_PENDING:
957 if (sample_status)
958 break;
800 959
801 rate2 = minstrel_get_ratestats(mi, mi->max_tp_rate[1]);
802 if (rate2->attempts > 30 &&
803 MINSTREL_FRAC(rate2->success, rate2->attempts) <
804 MINSTREL_FRAC(20, 100)) {
805 minstrel_downgrade_rate(mi, &mi->max_tp_rate[1], false);
806 update = true; 960 update = true;
961 minstrel_ht_update_stats(mp, mi, false);
962 break;
963 }
964
965
966 if (mp->hw->max_rates > 1) {
967 /*
968 * check for sudden death of spatial multiplexing,
969 * downgrade to a lower number of streams if necessary.
970 */
971 rate = minstrel_get_ratestats(mi, mi->max_tp_rate[0]);
972 if (rate->attempts > 30 &&
973 MINSTREL_FRAC(rate->success, rate->attempts) <
974 MINSTREL_FRAC(20, 100)) {
975 minstrel_downgrade_rate(mi, &mi->max_tp_rate[0], true);
976 update = true;
977 }
978
979 rate2 = minstrel_get_ratestats(mi, mi->max_tp_rate[1]);
980 if (rate2->attempts > 30 &&
981 MINSTREL_FRAC(rate2->success, rate2->attempts) <
982 MINSTREL_FRAC(20, 100)) {
983 minstrel_downgrade_rate(mi, &mi->max_tp_rate[1], false);
984 update = true;
985 }
807 } 986 }
808 987
809 if (time_after(jiffies, mi->last_stats_update + 988 if (time_after(jiffies, mi->last_stats_update +
810 (mp->update_interval / 2 * HZ) / 1000)) { 989 (mp->update_interval / 2 * HZ) / 1000)) {
811 update = true; 990 update = true;
812 minstrel_ht_update_stats(mp, mi); 991 minstrel_ht_update_stats(mp, mi, true);
813 } 992 }
814 993
815 if (update) 994 if (update)
816 minstrel_ht_update_rates(mp, mi); 995 minstrel_ht_update_rates(mp, mi);
817} 996}
818 997
819static inline int
820minstrel_get_duration(int index)
821{
822 const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
823 unsigned int duration = group->duration[index % MCS_GROUP_RATES];
824 return duration << group->shift;
825}
826
827static void 998static void
828minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, 999minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
829 int index) 1000 int index)
@@ -988,14 +1159,18 @@ static void
988minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) 1159minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
989{ 1160{
990 struct ieee80211_sta_rates *rates; 1161 struct ieee80211_sta_rates *rates;
1162 u16 first_rate = mi->max_tp_rate[0];
991 int i = 0; 1163 int i = 0;
992 1164
1165 if (mi->sample_mode == MINSTREL_SAMPLE_ACTIVE)
1166 first_rate = mi->sample_rate;
1167
993 rates = kzalloc(sizeof(*rates), GFP_ATOMIC); 1168 rates = kzalloc(sizeof(*rates), GFP_ATOMIC);
994 if (!rates) 1169 if (!rates)
995 return; 1170 return;
996 1171
997 /* Start with max_tp_rate[0] */ 1172 /* Start with max_tp_rate[0] */
998 minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_tp_rate[0]); 1173 minstrel_ht_set_rate(mp, mi, rates, i++, first_rate);
999 1174
1000 if (mp->hw->max_rates >= 3) { 1175 if (mp->hw->max_rates >= 3) {
1001 /* At least 3 tx rates supported, use max_tp_rate[1] next */ 1176 /* At least 3 tx rates supported, use max_tp_rate[1] next */
@@ -1020,6 +1195,11 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
1020 int tp_rate1, tp_rate2; 1195 int tp_rate1, tp_rate2;
1021 int sample_idx = 0; 1196 int sample_idx = 0;
1022 1197
1198 if (mp->hw->max_rates == 1 && mp->sample_switch &&
1199 (mi->total_packets_cur >= SAMPLE_SWITCH_THR ||
1200 mp->sample_switch == 1))
1201 return -1;
1202
1023 if (mi->sample_wait > 0) { 1203 if (mi->sample_wait > 0) {
1024 mi->sample_wait--; 1204 mi->sample_wait--;
1025 return -1; 1205 return -1;
@@ -1341,7 +1521,7 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
1341 mi->supported[MINSTREL_CCK_GROUP] |= mi->cck_supported_short << 4; 1521 mi->supported[MINSTREL_CCK_GROUP] |= mi->cck_supported_short << 4;
1342 1522
1343 /* create an initial rate table with the lowest supported rates */ 1523 /* create an initial rate table with the lowest supported rates */
1344 minstrel_ht_update_stats(mp, mi); 1524 minstrel_ht_update_stats(mp, mi, true);
1345 minstrel_ht_update_rates(mp, mi); 1525 minstrel_ht_update_rates(mp, mi);
1346 1526
1347 return; 1527 return;
@@ -1459,6 +1639,8 @@ minstrel_ht_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
1459 if (!mp) 1639 if (!mp)
1460 return NULL; 1640 return NULL;
1461 1641
1642 mp->sample_switch = -1;
1643
1462 /* contention window settings 1644 /* contention window settings
1463 * Just an approximation. Using the per-queue values would complicate 1645 * Just an approximation. Using the per-queue values would complicate
1464 * the calculations and is probably unnecessary */ 1646 * the calculations and is probably unnecessary */
@@ -1490,6 +1672,8 @@ minstrel_ht_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
1490 mp->fixed_rate_idx = (u32) -1; 1672 mp->fixed_rate_idx = (u32) -1;
1491 debugfs_create_u32("fixed_rate_idx", S_IRUGO | S_IWUGO, debugfsdir, 1673 debugfs_create_u32("fixed_rate_idx", S_IRUGO | S_IWUGO, debugfsdir,
1492 &mp->fixed_rate_idx); 1674 &mp->fixed_rate_idx);
1675 debugfs_create_u32("sample_switch", S_IRUGO | S_IWUSR, debugfsdir,
1676 &mp->sample_switch);
1493#endif 1677#endif
1494 1678
1495 minstrel_ht_init_cck_rates(mp); 1679 minstrel_ht_init_cck_rates(mp);
diff --git a/net/mac80211/rc80211_minstrel_ht.h b/net/mac80211/rc80211_minstrel_ht.h
index 80296268c778..f938701e7ab7 100644
--- a/net/mac80211/rc80211_minstrel_ht.h
+++ b/net/mac80211/rc80211_minstrel_ht.h
@@ -33,6 +33,7 @@ struct mcs_group {
33 u16 flags; 33 u16 flags;
34 u8 streams; 34 u8 streams;
35 u8 shift; 35 u8 shift;
36 u8 bw;
36 u16 duration[MCS_GROUP_RATES]; 37 u16 duration[MCS_GROUP_RATES];
37}; 38};
38 39
@@ -50,6 +51,12 @@ struct minstrel_mcs_group_data {
50 struct minstrel_rate_stats rates[MCS_GROUP_RATES]; 51 struct minstrel_rate_stats rates[MCS_GROUP_RATES];
51}; 52};
52 53
54enum minstrel_sample_mode {
55 MINSTREL_SAMPLE_IDLE,
56 MINSTREL_SAMPLE_ACTIVE,
57 MINSTREL_SAMPLE_PENDING,
58};
59
53struct minstrel_ht_sta { 60struct minstrel_ht_sta {
54 struct ieee80211_sta *sta; 61 struct ieee80211_sta *sta;
55 62
@@ -71,6 +78,8 @@ struct minstrel_ht_sta {
71 unsigned int overhead; 78 unsigned int overhead;
72 unsigned int overhead_rtscts; 79 unsigned int overhead_rtscts;
73 80
81 unsigned int total_packets_last;
82 unsigned int total_packets_cur;
74 unsigned int total_packets; 83 unsigned int total_packets;
75 unsigned int sample_packets; 84 unsigned int sample_packets;
76 85
@@ -82,6 +91,9 @@ struct minstrel_ht_sta {
82 u8 sample_count; 91 u8 sample_count;
83 u8 sample_slow; 92 u8 sample_slow;
84 93
94 enum minstrel_sample_mode sample_mode;
95 u16 sample_rate;
96
85 /* current MCS group to be sampled */ 97 /* current MCS group to be sampled */
86 u8 sample_group; 98 u8 sample_group;
87 99