diff options
author | Thomas Huehn <thomas@net.t-labs.tu-berlin.de> | 2014-09-09 17:22:14 -0400 |
---|---|---|
committer | Johannes Berg <johannes.berg@intel.com> | 2014-09-11 06:10:14 -0400 |
commit | 5935839ad73583781b8bbe8d91412f6826e218a4 (patch) | |
tree | e28cbd441b882cecf9e16b575fe684eae9197d2a /net/mac80211/rc80211_minstrel_ht.c | |
parent | ca12c0c83334a84581bb01daaedf1009deb09204 (diff) |
mac80211: improve minstrel_ht rate sorting by throughput & probability
This patch improves the way minstrel_ht sorts rates according to throughput
and success probability. 3 FOR-loops across the entire rate and mcs group set
in function minstrel_ht_update_stats() which where used to determine the
fastest, second fastest and most robust rate are reduced to 2 FOR-loop.
The sorted list of rates according throughput is extended to the best four
rates as we need them in upcoming joint rate and power control. The sorting
is done via the new function minstrel_ht_sort_best_tp_rates(). The annotation
of those 4 best throughput rates in the debugfs file rc-stats is changes to:
"A,B,C,D", where A is the fastest rate and C the 4th fastest.
Signed-off-by: Thomas Huehn <thomas@net.t-labs.tu-berlin.de>
Tested-by: Stefan Venz <ikstream86@gmail.com>
Acked-by: Felix Fietkau <nbd@openwrt.org>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Diffstat (limited to 'net/mac80211/rc80211_minstrel_ht.c')
-rw-r--r-- | net/mac80211/rc80211_minstrel_ht.c | 303 |
1 files changed, 201 insertions, 102 deletions
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index 85c1e74b7714..df90ce2db00c 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c | |||
@@ -135,7 +135,7 @@ minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi); | |||
135 | static int | 135 | static int |
136 | minstrel_ht_get_group_idx(struct ieee80211_tx_rate *rate) | 136 | minstrel_ht_get_group_idx(struct ieee80211_tx_rate *rate) |
137 | { | 137 | { |
138 | return GROUP_IDX((rate->idx / 8) + 1, | 138 | return GROUP_IDX((rate->idx / MCS_GROUP_RATES) + 1, |
139 | !!(rate->flags & IEEE80211_TX_RC_SHORT_GI), | 139 | !!(rate->flags & IEEE80211_TX_RC_SHORT_GI), |
140 | !!(rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)); | 140 | !!(rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)); |
141 | } | 141 | } |
@@ -233,12 +233,151 @@ minstrel_ht_calc_tp(struct minstrel_ht_sta *mi, int group, int rate) | |||
233 | } | 233 | } |
234 | 234 | ||
235 | /* | 235 | /* |
236 | * Find & sort topmost throughput rates | ||
237 | * | ||
238 | * If multiple rates provide equal throughput the sorting is based on their | ||
239 | * current success probability. Higher success probability is preferred among | ||
240 | * MCS groups, CCK rates do not provide aggregation and are therefore at last. | ||
241 | */ | ||
242 | static void | ||
243 | minstrel_ht_sort_best_tp_rates(struct minstrel_ht_sta *mi, u8 index, | ||
244 | u8 *tp_list) | ||
245 | { | ||
246 | int cur_group, cur_idx, cur_thr, cur_prob; | ||
247 | int tmp_group, tmp_idx, tmp_thr, tmp_prob; | ||
248 | int j = MAX_THR_RATES; | ||
249 | |||
250 | cur_group = index / MCS_GROUP_RATES; | ||
251 | cur_idx = index % MCS_GROUP_RATES; | ||
252 | cur_thr = mi->groups[cur_group].rates[cur_idx].cur_tp; | ||
253 | cur_prob = mi->groups[cur_group].rates[cur_idx].probability; | ||
254 | |||
255 | tmp_group = tp_list[j - 1] / MCS_GROUP_RATES; | ||
256 | tmp_idx = tp_list[j - 1] % MCS_GROUP_RATES; | ||
257 | tmp_thr = mi->groups[tmp_group].rates[tmp_idx].cur_tp; | ||
258 | tmp_prob = mi->groups[tmp_group].rates[tmp_idx].probability; | ||
259 | |||
260 | while (j > 0 && (cur_thr > tmp_thr || | ||
261 | (cur_thr == tmp_thr && cur_prob > tmp_prob))) { | ||
262 | j--; | ||
263 | tmp_group = tp_list[j - 1] / MCS_GROUP_RATES; | ||
264 | tmp_idx = tp_list[j - 1] % MCS_GROUP_RATES; | ||
265 | tmp_thr = mi->groups[tmp_group].rates[tmp_idx].cur_tp; | ||
266 | tmp_prob = mi->groups[tmp_group].rates[tmp_idx].probability; | ||
267 | } | ||
268 | |||
269 | if (j < MAX_THR_RATES - 1) { | ||
270 | memmove(&tp_list[j + 1], &tp_list[j], (sizeof(*tp_list) * | ||
271 | (MAX_THR_RATES - (j + 1)))); | ||
272 | } | ||
273 | if (j < MAX_THR_RATES) | ||
274 | tp_list[j] = index; | ||
275 | } | ||
276 | |||
277 | /* | ||
278 | * Find and set the topmost probability rate per sta and per group | ||
279 | */ | ||
280 | static void | ||
281 | minstrel_ht_set_best_prob_rate(struct minstrel_ht_sta *mi, u8 index) | ||
282 | { | ||
283 | struct minstrel_mcs_group_data *mg; | ||
284 | struct minstrel_rate_stats *mr; | ||
285 | int tmp_group, tmp_idx, tmp_tp, tmp_prob, max_tp_group; | ||
286 | |||
287 | mg = &mi->groups[index / MCS_GROUP_RATES]; | ||
288 | mr = &mg->rates[index % MCS_GROUP_RATES]; | ||
289 | |||
290 | tmp_group = mi->max_prob_rate / MCS_GROUP_RATES; | ||
291 | tmp_idx = mi->max_prob_rate % MCS_GROUP_RATES; | ||
292 | tmp_tp = mi->groups[tmp_group].rates[tmp_idx].cur_tp; | ||
293 | tmp_prob = mi->groups[tmp_group].rates[tmp_idx].probability; | ||
294 | |||
295 | /* if max_tp_rate[0] is from MCS_GROUP max_prob_rate get selected from | ||
296 | * MCS_GROUP as well as CCK_GROUP rates do not allow aggregation */ | ||
297 | max_tp_group = mi->max_tp_rate[0] / MCS_GROUP_RATES; | ||
298 | if((index / MCS_GROUP_RATES == MINSTREL_CCK_GROUP) && | ||
299 | (max_tp_group != MINSTREL_CCK_GROUP)) | ||
300 | return; | ||
301 | |||
302 | if (mr->probability > MINSTREL_FRAC(75, 100)) { | ||
303 | if (mr->cur_tp > tmp_tp) | ||
304 | mi->max_prob_rate = index; | ||
305 | if (mr->cur_tp > mg->rates[mg->max_group_prob_rate].cur_tp) | ||
306 | mg->max_group_prob_rate = index; | ||
307 | } else { | ||
308 | if (mr->probability > tmp_prob) | ||
309 | mi->max_prob_rate = index; | ||
310 | if (mr->probability > mg->rates[mg->max_group_prob_rate].probability) | ||
311 | mg->max_group_prob_rate = index; | ||
312 | } | ||
313 | } | ||
314 | |||
315 | |||
316 | /* | ||
317 | * Assign new rate set per sta and use CCK rates only if the fastest | ||
318 | * rate (max_tp_rate[0]) is from CCK group. This prohibits such sorted | ||
319 | * rate sets where MCS and CCK rates are mixed, because CCK rates can | ||
320 | * not use aggregation. | ||
321 | */ | ||
322 | static void | ||
323 | minstrel_ht_assign_best_tp_rates(struct minstrel_ht_sta *mi, | ||
324 | u8 tmp_mcs_tp_rate[MAX_THR_RATES], | ||
325 | u8 tmp_cck_tp_rate[MAX_THR_RATES]) | ||
326 | { | ||
327 | unsigned int tmp_group, tmp_idx, tmp_cck_tp, tmp_mcs_tp; | ||
328 | int i; | ||
329 | |||
330 | tmp_group = tmp_cck_tp_rate[0] / MCS_GROUP_RATES; | ||
331 | tmp_idx = tmp_cck_tp_rate[0] % MCS_GROUP_RATES; | ||
332 | tmp_cck_tp = mi->groups[tmp_group].rates[tmp_idx].cur_tp; | ||
333 | |||
334 | tmp_group = tmp_mcs_tp_rate[0] / MCS_GROUP_RATES; | ||
335 | tmp_idx = tmp_mcs_tp_rate[0] % MCS_GROUP_RATES; | ||
336 | tmp_mcs_tp = mi->groups[tmp_group].rates[tmp_idx].cur_tp; | ||
337 | |||
338 | if (tmp_cck_tp > tmp_mcs_tp) { | ||
339 | for(i = 0; i < MAX_THR_RATES; i++) { | ||
340 | minstrel_ht_sort_best_tp_rates(mi, tmp_cck_tp_rate[i], | ||
341 | tmp_mcs_tp_rate); | ||
342 | } | ||
343 | } | ||
344 | |||
345 | } | ||
346 | |||
347 | /* | ||
348 | * Try to increase robustness of max_prob rate by decrease number of | ||
349 | * streams if possible. | ||
350 | */ | ||
351 | static inline void | ||
352 | minstrel_ht_prob_rate_reduce_streams(struct minstrel_ht_sta *mi) | ||
353 | { | ||
354 | struct minstrel_mcs_group_data *mg; | ||
355 | struct minstrel_rate_stats *mr; | ||
356 | int tmp_max_streams, group; | ||
357 | int tmp_tp = 0; | ||
358 | |||
359 | tmp_max_streams = minstrel_mcs_groups[mi->max_tp_rate[0] / | ||
360 | MCS_GROUP_RATES].streams; | ||
361 | for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) { | ||
362 | mg = &mi->groups[group]; | ||
363 | if (!mg->supported || group == MINSTREL_CCK_GROUP) | ||
364 | continue; | ||
365 | mr = minstrel_get_ratestats(mi, mg->max_group_prob_rate); | ||
366 | if (tmp_tp < mr->cur_tp && | ||
367 | (minstrel_mcs_groups[group].streams < tmp_max_streams)) { | ||
368 | mi->max_prob_rate = mg->max_group_prob_rate; | ||
369 | tmp_tp = mr->cur_tp; | ||
370 | } | ||
371 | } | ||
372 | } | ||
373 | |||
374 | /* | ||
236 | * Update rate statistics and select new primary rates | 375 | * Update rate statistics and select new primary rates |
237 | * | 376 | * |
238 | * Rules for rate selection: | 377 | * Rules for rate selection: |
239 | * - max_prob_rate must use only one stream, as a tradeoff between delivery | 378 | * - max_prob_rate must use only one stream, as a tradeoff between delivery |
240 | * probability and throughput during strong fluctuations | 379 | * probability and throughput during strong fluctuations |
241 | * - as long as the max prob rate has a probability of more than 3/4, pick | 380 | * - as long as the max prob rate has a probability of more than 75%, pick |
242 | * higher throughput rates, even if the probablity is a bit lower | 381 | * higher throughput rates, even if the probablity is a bit lower |
243 | */ | 382 | */ |
244 | static void | 383 | static void |
@@ -246,9 +385,9 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) | |||
246 | { | 385 | { |
247 | struct minstrel_mcs_group_data *mg; | 386 | struct minstrel_mcs_group_data *mg; |
248 | struct minstrel_rate_stats *mr; | 387 | struct minstrel_rate_stats *mr; |
249 | int cur_prob, cur_prob_tp, cur_tp, cur_tp2; | 388 | int group, i, j; |
250 | int group, i, index; | 389 | u8 tmp_mcs_tp_rate[MAX_THR_RATES], tmp_group_tp_rate[MAX_THR_RATES]; |
251 | bool mi_rates_valid = false; | 390 | u8 tmp_cck_tp_rate[MAX_THR_RATES], index; |
252 | 391 | ||
253 | if (mi->ampdu_packets > 0) { | 392 | if (mi->ampdu_packets > 0) { |
254 | mi->avg_ampdu_len = minstrel_ewma(mi->avg_ampdu_len, | 393 | mi->avg_ampdu_len = minstrel_ewma(mi->avg_ampdu_len, |
@@ -260,13 +399,14 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) | |||
260 | mi->sample_slow = 0; | 399 | mi->sample_slow = 0; |
261 | mi->sample_count = 0; | 400 | mi->sample_count = 0; |
262 | 401 | ||
263 | for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) { | 402 | /* Initialize global rate indexes */ |
264 | bool mg_rates_valid = false; | 403 | for(j = 0; j < MAX_THR_RATES; j++){ |
404 | tmp_mcs_tp_rate[j] = 0; | ||
405 | tmp_cck_tp_rate[j] = 0; | ||
406 | } | ||
265 | 407 | ||
266 | cur_prob = 0; | 408 | /* Find best rate sets within all MCS groups*/ |
267 | cur_prob_tp = 0; | 409 | for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) { |
268 | cur_tp = 0; | ||
269 | cur_tp2 = 0; | ||
270 | 410 | ||
271 | mg = &mi->groups[group]; | 411 | mg = &mi->groups[group]; |
272 | if (!mg->supported) | 412 | if (!mg->supported) |
@@ -274,24 +414,16 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) | |||
274 | 414 | ||
275 | mi->sample_count++; | 415 | mi->sample_count++; |
276 | 416 | ||
417 | /* (re)Initialize group rate indexes */ | ||
418 | for(j = 0; j < MAX_THR_RATES; j++) | ||
419 | tmp_group_tp_rate[j] = group; | ||
420 | |||
277 | for (i = 0; i < MCS_GROUP_RATES; i++) { | 421 | for (i = 0; i < MCS_GROUP_RATES; i++) { |
278 | if (!(mg->supported & BIT(i))) | 422 | if (!(mg->supported & BIT(i))) |
279 | continue; | 423 | continue; |
280 | 424 | ||
281 | index = MCS_GROUP_RATES * group + i; | 425 | index = MCS_GROUP_RATES * group + i; |
282 | 426 | ||
283 | /* initialize rates selections starting indexes */ | ||
284 | if (!mg_rates_valid) { | ||
285 | mg->max_tp_rate = mg->max_tp_rate2 = | ||
286 | mg->max_prob_rate = i; | ||
287 | if (!mi_rates_valid) { | ||
288 | mi->max_tp_rate = mi->max_tp_rate2 = | ||
289 | mi->max_prob_rate = index; | ||
290 | mi_rates_valid = true; | ||
291 | } | ||
292 | mg_rates_valid = true; | ||
293 | } | ||
294 | |||
295 | mr = &mg->rates[i]; | 427 | mr = &mg->rates[i]; |
296 | mr->retry_updated = false; | 428 | mr->retry_updated = false; |
297 | minstrel_calc_rate_ewma(mr); | 429 | minstrel_calc_rate_ewma(mr); |
@@ -300,82 +432,47 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) | |||
300 | if (!mr->cur_tp) | 432 | if (!mr->cur_tp) |
301 | continue; | 433 | continue; |
302 | 434 | ||
303 | if ((mr->cur_tp > cur_prob_tp && mr->probability > | 435 | /* Find max throughput rate set */ |
304 | MINSTREL_FRAC(3, 4)) || mr->probability > cur_prob) { | 436 | if (group != MINSTREL_CCK_GROUP) { |
305 | mg->max_prob_rate = index; | 437 | minstrel_ht_sort_best_tp_rates(mi, index, |
306 | cur_prob = mr->probability; | 438 | tmp_mcs_tp_rate); |
307 | cur_prob_tp = mr->cur_tp; | 439 | } else if (group == MINSTREL_CCK_GROUP) { |
308 | } | 440 | minstrel_ht_sort_best_tp_rates(mi, index, |
309 | 441 | tmp_cck_tp_rate); | |
310 | if (mr->cur_tp > cur_tp) { | ||
311 | swap(index, mg->max_tp_rate); | ||
312 | cur_tp = mr->cur_tp; | ||
313 | mr = minstrel_get_ratestats(mi, index); | ||
314 | } | ||
315 | |||
316 | if (index >= mg->max_tp_rate) | ||
317 | continue; | ||
318 | |||
319 | if (mr->cur_tp > cur_tp2) { | ||
320 | mg->max_tp_rate2 = index; | ||
321 | cur_tp2 = mr->cur_tp; | ||
322 | } | 442 | } |
323 | } | ||
324 | } | ||
325 | 443 | ||
326 | /* try to sample all available rates during each interval */ | 444 | /* Find max throughput rate set within a group */ |
327 | mi->sample_count *= 8; | 445 | minstrel_ht_sort_best_tp_rates(mi, index, |
446 | tmp_group_tp_rate); | ||
328 | 447 | ||
329 | cur_prob = 0; | 448 | /* Find max probability rate per group and global */ |
330 | cur_prob_tp = 0; | 449 | minstrel_ht_set_best_prob_rate(mi, index); |
331 | cur_tp = 0; | ||
332 | cur_tp2 = 0; | ||
333 | for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) { | ||
334 | mg = &mi->groups[group]; | ||
335 | if (!mg->supported) | ||
336 | continue; | ||
337 | |||
338 | mr = minstrel_get_ratestats(mi, mg->max_tp_rate); | ||
339 | if (cur_tp < mr->cur_tp) { | ||
340 | mi->max_tp_rate2 = mi->max_tp_rate; | ||
341 | cur_tp2 = cur_tp; | ||
342 | mi->max_tp_rate = mg->max_tp_rate; | ||
343 | cur_tp = mr->cur_tp; | ||
344 | mi->max_prob_streams = minstrel_mcs_groups[group].streams - 1; | ||
345 | } | 450 | } |
346 | 451 | ||
347 | mr = minstrel_get_ratestats(mi, mg->max_tp_rate2); | 452 | memcpy(mg->max_group_tp_rate, tmp_group_tp_rate, |
348 | if (cur_tp2 < mr->cur_tp) { | 453 | sizeof(mg->max_group_tp_rate)); |
349 | mi->max_tp_rate2 = mg->max_tp_rate2; | ||
350 | cur_tp2 = mr->cur_tp; | ||
351 | } | ||
352 | } | 454 | } |
353 | 455 | ||
354 | if (mi->max_prob_streams < 1) | 456 | /* Assign new rate set per sta */ |
355 | mi->max_prob_streams = 1; | 457 | minstrel_ht_assign_best_tp_rates(mi, tmp_mcs_tp_rate, tmp_cck_tp_rate); |
458 | memcpy(mi->max_tp_rate, tmp_mcs_tp_rate, sizeof(mi->max_tp_rate)); | ||
356 | 459 | ||
357 | for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) { | 460 | /* Try to increase robustness of max_prob_rate*/ |
358 | mg = &mi->groups[group]; | 461 | minstrel_ht_prob_rate_reduce_streams(mi); |
359 | if (!mg->supported) | 462 | |
360 | continue; | 463 | /* try to sample all available rates during each interval */ |
361 | mr = minstrel_get_ratestats(mi, mg->max_prob_rate); | 464 | mi->sample_count *= 8; |
362 | if (cur_prob_tp < mr->cur_tp && | ||
363 | minstrel_mcs_groups[group].streams <= mi->max_prob_streams) { | ||
364 | mi->max_prob_rate = mg->max_prob_rate; | ||
365 | cur_prob = mr->cur_prob; | ||
366 | cur_prob_tp = mr->cur_tp; | ||
367 | } | ||
368 | } | ||
369 | 465 | ||
370 | #ifdef CONFIG_MAC80211_DEBUGFS | 466 | #ifdef CONFIG_MAC80211_DEBUGFS |
371 | /* use fixed index if set */ | 467 | /* use fixed index if set */ |
372 | if (mp->fixed_rate_idx != -1) { | 468 | if (mp->fixed_rate_idx != -1) { |
373 | mi->max_tp_rate = mp->fixed_rate_idx; | 469 | for (i = 0; i < 4; i++) |
374 | mi->max_tp_rate2 = mp->fixed_rate_idx; | 470 | mi->max_tp_rate[i] = mp->fixed_rate_idx; |
375 | mi->max_prob_rate = mp->fixed_rate_idx; | 471 | mi->max_prob_rate = mp->fixed_rate_idx; |
376 | } | 472 | } |
377 | #endif | 473 | #endif |
378 | 474 | ||
475 | /* Reset update timer */ | ||
379 | mi->stats_update = jiffies; | 476 | mi->stats_update = jiffies; |
380 | } | 477 | } |
381 | 478 | ||
@@ -420,8 +517,7 @@ minstrel_next_sample_idx(struct minstrel_ht_sta *mi) | |||
420 | } | 517 | } |
421 | 518 | ||
422 | static void | 519 | static void |
423 | minstrel_downgrade_rate(struct minstrel_ht_sta *mi, unsigned int *idx, | 520 | minstrel_downgrade_rate(struct minstrel_ht_sta *mi, u8 *idx, bool primary) |
424 | bool primary) | ||
425 | { | 521 | { |
426 | int group, orig_group; | 522 | int group, orig_group; |
427 | 523 | ||
@@ -437,9 +533,9 @@ minstrel_downgrade_rate(struct minstrel_ht_sta *mi, unsigned int *idx, | |||
437 | continue; | 533 | continue; |
438 | 534 | ||
439 | if (primary) | 535 | if (primary) |
440 | *idx = mi->groups[group].max_tp_rate; | 536 | *idx = mi->groups[group].max_group_tp_rate[0]; |
441 | else | 537 | else |
442 | *idx = mi->groups[group].max_tp_rate2; | 538 | *idx = mi->groups[group].max_group_tp_rate[1]; |
443 | break; | 539 | break; |
444 | } | 540 | } |
445 | } | 541 | } |
@@ -524,19 +620,19 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband, | |||
524 | * check for sudden death of spatial multiplexing, | 620 | * check for sudden death of spatial multiplexing, |
525 | * downgrade to a lower number of streams if necessary. | 621 | * downgrade to a lower number of streams if necessary. |
526 | */ | 622 | */ |
527 | rate = minstrel_get_ratestats(mi, mi->max_tp_rate); | 623 | rate = minstrel_get_ratestats(mi, mi->max_tp_rate[0]); |
528 | if (rate->attempts > 30 && | 624 | if (rate->attempts > 30 && |
529 | MINSTREL_FRAC(rate->success, rate->attempts) < | 625 | MINSTREL_FRAC(rate->success, rate->attempts) < |
530 | MINSTREL_FRAC(20, 100)) { | 626 | MINSTREL_FRAC(20, 100)) { |
531 | minstrel_downgrade_rate(mi, &mi->max_tp_rate, true); | 627 | minstrel_downgrade_rate(mi, &mi->max_tp_rate[0], true); |
532 | update = true; | 628 | update = true; |
533 | } | 629 | } |
534 | 630 | ||
535 | rate2 = minstrel_get_ratestats(mi, mi->max_tp_rate2); | 631 | rate2 = minstrel_get_ratestats(mi, mi->max_tp_rate[1]); |
536 | if (rate2->attempts > 30 && | 632 | if (rate2->attempts > 30 && |
537 | MINSTREL_FRAC(rate2->success, rate2->attempts) < | 633 | MINSTREL_FRAC(rate2->success, rate2->attempts) < |
538 | MINSTREL_FRAC(20, 100)) { | 634 | MINSTREL_FRAC(20, 100)) { |
539 | minstrel_downgrade_rate(mi, &mi->max_tp_rate2, false); | 635 | minstrel_downgrade_rate(mi, &mi->max_tp_rate[1], false); |
540 | update = true; | 636 | update = true; |
541 | } | 637 | } |
542 | 638 | ||
@@ -661,12 +757,12 @@ minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) | |||
661 | if (!rates) | 757 | if (!rates) |
662 | return; | 758 | return; |
663 | 759 | ||
664 | /* Start with max_tp_rate */ | 760 | /* Start with max_tp_rate[0] */ |
665 | minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_tp_rate); | 761 | minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_tp_rate[0]); |
666 | 762 | ||
667 | if (mp->hw->max_rates >= 3) { | 763 | if (mp->hw->max_rates >= 3) { |
668 | /* At least 3 tx rates supported, use max_tp_rate2 next */ | 764 | /* At least 3 tx rates supported, use max_tp_rate[1] next */ |
669 | minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_tp_rate2); | 765 | minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_tp_rate[1]); |
670 | } | 766 | } |
671 | 767 | ||
672 | if (mp->hw->max_rates >= 2) { | 768 | if (mp->hw->max_rates >= 2) { |
@@ -691,7 +787,7 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) | |||
691 | { | 787 | { |
692 | struct minstrel_rate_stats *mr; | 788 | struct minstrel_rate_stats *mr; |
693 | struct minstrel_mcs_group_data *mg; | 789 | struct minstrel_mcs_group_data *mg; |
694 | unsigned int sample_dur, sample_group; | 790 | unsigned int sample_dur, sample_group, cur_max_tp_streams; |
695 | int sample_idx = 0; | 791 | int sample_idx = 0; |
696 | 792 | ||
697 | if (mi->sample_wait > 0) { | 793 | if (mi->sample_wait > 0) { |
@@ -718,8 +814,8 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) | |||
718 | * to the frame. Hence, don't use sampling for the currently | 814 | * to the frame. Hence, don't use sampling for the currently |
719 | * used rates. | 815 | * used rates. |
720 | */ | 816 | */ |
721 | if (sample_idx == mi->max_tp_rate || | 817 | if (sample_idx == mi->max_tp_rate[0] || |
722 | sample_idx == mi->max_tp_rate2 || | 818 | sample_idx == mi->max_tp_rate[1] || |
723 | sample_idx == mi->max_prob_rate) | 819 | sample_idx == mi->max_prob_rate) |
724 | return -1; | 820 | return -1; |
725 | 821 | ||
@@ -734,9 +830,12 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) | |||
734 | * Make sure that lower rates get sampled only occasionally, | 830 | * Make sure that lower rates get sampled only occasionally, |
735 | * if the link is working perfectly. | 831 | * if the link is working perfectly. |
736 | */ | 832 | */ |
833 | |||
834 | cur_max_tp_streams = minstrel_mcs_groups[mi->max_tp_rate[0] / | ||
835 | MCS_GROUP_RATES].streams; | ||
737 | sample_dur = minstrel_get_duration(sample_idx); | 836 | sample_dur = minstrel_get_duration(sample_idx); |
738 | if (sample_dur >= minstrel_get_duration(mi->max_tp_rate2) && | 837 | if (sample_dur >= minstrel_get_duration(mi->max_tp_rate[1]) && |
739 | (mi->max_prob_streams < | 838 | (cur_max_tp_streams - 1 < |
740 | minstrel_mcs_groups[sample_group].streams || | 839 | minstrel_mcs_groups[sample_group].streams || |
741 | sample_dur >= minstrel_get_duration(mi->max_prob_rate))) { | 840 | sample_dur >= minstrel_get_duration(mi->max_prob_rate))) { |
742 | if (mr->sample_skipped < 20) | 841 | if (mr->sample_skipped < 20) |
@@ -1041,8 +1140,8 @@ static u32 minstrel_ht_get_expected_throughput(void *priv_sta) | |||
1041 | if (!msp->is_ht) | 1140 | if (!msp->is_ht) |
1042 | return mac80211_minstrel.get_expected_throughput(priv_sta); | 1141 | return mac80211_minstrel.get_expected_throughput(priv_sta); |
1043 | 1142 | ||
1044 | i = mi->max_tp_rate / MCS_GROUP_RATES; | 1143 | i = mi->max_tp_rate[0] / MCS_GROUP_RATES; |
1045 | j = mi->max_tp_rate % MCS_GROUP_RATES; | 1144 | j = mi->max_tp_rate[0] % MCS_GROUP_RATES; |
1046 | 1145 | ||
1047 | /* convert cur_tp from pkt per second in kbps */ | 1146 | /* convert cur_tp from pkt per second in kbps */ |
1048 | return mi->groups[i].rates[j].cur_tp * AVG_PKT_SIZE * 8 / 1024; | 1147 | return mi->groups[i].rates[j].cur_tp * AVG_PKT_SIZE * 8 / 1024; |