aboutsummaryrefslogtreecommitdiffstats
path: root/net/mac80211/rc80211_minstrel.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/mac80211/rc80211_minstrel.c')
-rw-r--r--net/mac80211/rc80211_minstrel.c204
1 files changed, 114 insertions, 90 deletions
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index eea45a2c7c35..1c36c9b4fa4a 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -55,7 +55,6 @@
55#include "rate.h" 55#include "rate.h"
56#include "rc80211_minstrel.h" 56#include "rc80211_minstrel.h"
57 57
58#define SAMPLE_COLUMNS 10
59#define SAMPLE_TBL(_mi, _idx, _col) \ 58#define SAMPLE_TBL(_mi, _idx, _col) \
60 _mi->sample_table[(_idx * SAMPLE_COLUMNS) + _col] 59 _mi->sample_table[(_idx * SAMPLE_COLUMNS) + _col]
61 60
@@ -70,16 +69,31 @@ rix_to_ndx(struct minstrel_sta_info *mi, int rix)
70 return i; 69 return i;
71} 70}
72 71
72/* find & sort topmost throughput rates */
73static inline void
74minstrel_sort_best_tp_rates(struct minstrel_sta_info *mi, int i, u8 *tp_list)
75{
76 int j = MAX_THR_RATES;
77
78 while (j > 0 && mi->r[i].cur_tp > mi->r[tp_list[j - 1]].cur_tp)
79 j--;
80 if (j < MAX_THR_RATES - 1)
81 memmove(&tp_list[j + 1], &tp_list[j], MAX_THR_RATES - (j + 1));
82 if (j < MAX_THR_RATES)
83 tp_list[j] = i;
84}
85
73static void 86static void
74minstrel_update_stats(struct minstrel_priv *mp, struct minstrel_sta_info *mi) 87minstrel_update_stats(struct minstrel_priv *mp, struct minstrel_sta_info *mi)
75{ 88{
76 u32 max_tp = 0, index_max_tp = 0, index_max_tp2 = 0; 89 u8 tmp_tp_rate[MAX_THR_RATES];
77 u32 max_prob = 0, index_max_prob = 0; 90 u8 tmp_prob_rate = 0;
78 u32 usecs; 91 u32 usecs;
79 u32 p;
80 int i; 92 int i;
81 93
82 mi->stats_update = jiffies; 94 for (i=0; i < MAX_THR_RATES; i++)
95 tmp_tp_rate[i] = 0;
96
83 for (i = 0; i < mi->n_rates; i++) { 97 for (i = 0; i < mi->n_rates; i++) {
84 struct minstrel_rate *mr = &mi->r[i]; 98 struct minstrel_rate *mr = &mi->r[i];
85 99
@@ -87,27 +101,32 @@ minstrel_update_stats(struct minstrel_priv *mp, struct minstrel_sta_info *mi)
87 if (!usecs) 101 if (!usecs)
88 usecs = 1000000; 102 usecs = 1000000;
89 103
90 /* To avoid rounding issues, probabilities scale from 0 (0%) 104 if (unlikely(mr->attempts > 0)) {
91 * to 18000 (100%) */ 105 mr->sample_skipped = 0;
92 if (mr->attempts) { 106 mr->cur_prob = MINSTREL_FRAC(mr->success, mr->attempts);
93 p = (mr->success * 18000) / mr->attempts;
94 mr->succ_hist += mr->success; 107 mr->succ_hist += mr->success;
95 mr->att_hist += mr->attempts; 108 mr->att_hist += mr->attempts;
96 mr->cur_prob = p; 109 mr->probability = minstrel_ewma(mr->probability,
97 p = ((p * (100 - mp->ewma_level)) + (mr->probability * 110 mr->cur_prob,
98 mp->ewma_level)) / 100; 111 EWMA_LEVEL);
99 mr->probability = p; 112 } else
100 mr->cur_tp = p * (1000000 / usecs); 113 mr->sample_skipped++;
101 }
102 114
103 mr->last_success = mr->success; 115 mr->last_success = mr->success;
104 mr->last_attempts = mr->attempts; 116 mr->last_attempts = mr->attempts;
105 mr->success = 0; 117 mr->success = 0;
106 mr->attempts = 0; 118 mr->attempts = 0;
107 119
120 /* Update throughput per rate, reset thr. below 10% success */
121 if (mr->probability < MINSTREL_FRAC(10, 100))
122 mr->cur_tp = 0;
123 else
124 mr->cur_tp = mr->probability * (1000000 / usecs);
125
108 /* Sample less often below the 10% chance of success. 126 /* Sample less often below the 10% chance of success.
109 * Sample less often above the 95% chance of success. */ 127 * Sample less often above the 95% chance of success. */
110 if ((mr->probability > 17100) || (mr->probability < 1800)) { 128 if (mr->probability > MINSTREL_FRAC(95, 100) ||
129 mr->probability < MINSTREL_FRAC(10, 100)) {
111 mr->adjusted_retry_count = mr->retry_count >> 1; 130 mr->adjusted_retry_count = mr->retry_count >> 1;
112 if (mr->adjusted_retry_count > 2) 131 if (mr->adjusted_retry_count > 2)
113 mr->adjusted_retry_count = 2; 132 mr->adjusted_retry_count = 2;
@@ -118,35 +137,30 @@ minstrel_update_stats(struct minstrel_priv *mp, struct minstrel_sta_info *mi)
118 } 137 }
119 if (!mr->adjusted_retry_count) 138 if (!mr->adjusted_retry_count)
120 mr->adjusted_retry_count = 2; 139 mr->adjusted_retry_count = 2;
121 }
122 140
123 for (i = 0; i < mi->n_rates; i++) { 141 minstrel_sort_best_tp_rates(mi, i, tmp_tp_rate);
124 struct minstrel_rate *mr = &mi->r[i]; 142
125 if (max_tp < mr->cur_tp) { 143 /* To determine the most robust rate (max_prob_rate) used at
126 index_max_tp = i; 144 * 3rd mmr stage we distinct between two cases:
127 max_tp = mr->cur_tp; 145 * (1) if any success probabilitiy >= 95%, out of those rates
128 } 146 * choose the maximum throughput rate as max_prob_rate
129 if (max_prob < mr->probability) { 147 * (2) if all success probabilities < 95%, the rate with
130 index_max_prob = i; 148 * highest success probability is choosen as max_prob_rate */
131 max_prob = mr->probability; 149 if (mr->probability >= MINSTREL_FRAC(95,100)) {
150 if (mr->cur_tp >= mi->r[tmp_prob_rate].cur_tp)
151 tmp_prob_rate = i;
152 } else {
153 if (mr->probability >= mi->r[tmp_prob_rate].probability)
154 tmp_prob_rate = i;
132 } 155 }
133 } 156 }
134 157
135 max_tp = 0; 158 /* Assign the new rate set */
136 for (i = 0; i < mi->n_rates; i++) { 159 memcpy(mi->max_tp_rate, tmp_tp_rate, sizeof(mi->max_tp_rate));
137 struct minstrel_rate *mr = &mi->r[i]; 160 mi->max_prob_rate = tmp_prob_rate;
138
139 if (i == index_max_tp)
140 continue;
141 161
142 if (max_tp < mr->cur_tp) { 162 /* Reset update timer */
143 index_max_tp2 = i; 163 mi->stats_update = jiffies;
144 max_tp = mr->cur_tp;
145 }
146 }
147 mi->max_tp_rate = index_max_tp;
148 mi->max_tp_rate2 = index_max_tp2;
149 mi->max_prob_rate = index_max_prob;
150} 164}
151 165
152static void 166static void
@@ -207,10 +221,10 @@ static int
207minstrel_get_next_sample(struct minstrel_sta_info *mi) 221minstrel_get_next_sample(struct minstrel_sta_info *mi)
208{ 222{
209 unsigned int sample_ndx; 223 unsigned int sample_ndx;
210 sample_ndx = SAMPLE_TBL(mi, mi->sample_idx, mi->sample_column); 224 sample_ndx = SAMPLE_TBL(mi, mi->sample_row, mi->sample_column);
211 mi->sample_idx++; 225 mi->sample_row++;
212 if ((int) mi->sample_idx > (mi->n_rates - 2)) { 226 if ((int) mi->sample_row >= mi->n_rates) {
213 mi->sample_idx = 0; 227 mi->sample_row = 0;
214 mi->sample_column++; 228 mi->sample_column++;
215 if (mi->sample_column >= SAMPLE_COLUMNS) 229 if (mi->sample_column >= SAMPLE_COLUMNS)
216 mi->sample_column = 0; 230 mi->sample_column = 0;
@@ -228,31 +242,37 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
228 struct minstrel_priv *mp = priv; 242 struct minstrel_priv *mp = priv;
229 struct ieee80211_tx_rate *ar = info->control.rates; 243 struct ieee80211_tx_rate *ar = info->control.rates;
230 unsigned int ndx, sample_ndx = 0; 244 unsigned int ndx, sample_ndx = 0;
231 bool mrr; 245 bool mrr_capable;
232 bool sample_slower = false; 246 bool indirect_rate_sampling = false;
233 bool sample = false; 247 bool rate_sampling = false;
234 int i, delta; 248 int i, delta;
235 int mrr_ndx[3]; 249 int mrr_ndx[3];
236 int sample_rate; 250 int sampling_ratio;
237 251
252 /* management/no-ack frames do not use rate control */
238 if (rate_control_send_low(sta, priv_sta, txrc)) 253 if (rate_control_send_low(sta, priv_sta, txrc))
239 return; 254 return;
240 255
241 mrr = mp->has_mrr && !txrc->rts && !txrc->bss_conf->use_cts_prot; 256 /* check multi-rate-retry capabilities & adjust lookaround_rate */
242 257 mrr_capable = mp->has_mrr &&
243 ndx = mi->max_tp_rate; 258 !txrc->rts &&
244 259 !txrc->bss_conf->use_cts_prot;
245 if (mrr) 260 if (mrr_capable)
246 sample_rate = mp->lookaround_rate_mrr; 261 sampling_ratio = mp->lookaround_rate_mrr;
247 else 262 else
248 sample_rate = mp->lookaround_rate; 263 sampling_ratio = mp->lookaround_rate;
264
265 /* init rateindex [ndx] with max throughput rate */
266 ndx = mi->max_tp_rate[0];
249 267
268 /* increase sum packet counter */
250 mi->packet_count++; 269 mi->packet_count++;
251 delta = (mi->packet_count * sample_rate / 100) - 270
271 delta = (mi->packet_count * sampling_ratio / 100) -
252 (mi->sample_count + mi->sample_deferred / 2); 272 (mi->sample_count + mi->sample_deferred / 2);
253 273
254 /* delta > 0: sampling required */ 274 /* delta > 0: sampling required */
255 if ((delta > 0) && (mrr || !mi->prev_sample)) { 275 if ((delta > 0) && (mrr_capable || !mi->prev_sample)) {
256 struct minstrel_rate *msr; 276 struct minstrel_rate *msr;
257 if (mi->packet_count >= 10000) { 277 if (mi->packet_count >= 10000) {
258 mi->sample_deferred = 0; 278 mi->sample_deferred = 0;
@@ -271,21 +291,28 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
271 mi->sample_count += (delta - mi->n_rates * 2); 291 mi->sample_count += (delta - mi->n_rates * 2);
272 } 292 }
273 293
294 /* get next random rate sample */
274 sample_ndx = minstrel_get_next_sample(mi); 295 sample_ndx = minstrel_get_next_sample(mi);
275 msr = &mi->r[sample_ndx]; 296 msr = &mi->r[sample_ndx];
276 sample = true; 297 rate_sampling = true;
277 sample_slower = mrr && (msr->perfect_tx_time > 298
278 mi->r[ndx].perfect_tx_time); 299 /* Decide if direct ( 1st mrr stage) or indirect (2nd mrr stage)
279 300 * rate sampling method should be used.
280 if (!sample_slower) { 301 * Respect such rates that are not sampled for 20 interations.
302 */
303 if (mrr_capable &&
304 msr->perfect_tx_time > mi->r[ndx].perfect_tx_time &&
305 msr->sample_skipped < 20)
306 indirect_rate_sampling = true;
307
308 if (!indirect_rate_sampling) {
281 if (msr->sample_limit != 0) { 309 if (msr->sample_limit != 0) {
282 ndx = sample_ndx; 310 ndx = sample_ndx;
283 mi->sample_count++; 311 mi->sample_count++;
284 if (msr->sample_limit > 0) 312 if (msr->sample_limit > 0)
285 msr->sample_limit--; 313 msr->sample_limit--;
286 } else { 314 } else
287 sample = false; 315 rate_sampling = false;
288 }
289 } else { 316 } else {
290 /* Only use IEEE80211_TX_CTL_RATE_CTRL_PROBE to mark 317 /* Only use IEEE80211_TX_CTL_RATE_CTRL_PROBE to mark
291 * packets that have the sampling rate deferred to the 318 * packets that have the sampling rate deferred to the
@@ -297,34 +324,39 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
297 mi->sample_deferred++; 324 mi->sample_deferred++;
298 } 325 }
299 } 326 }
300 mi->prev_sample = sample; 327 mi->prev_sample = rate_sampling;
301 328
302 /* If we're not using MRR and the sampling rate already 329 /* If we're not using MRR and the sampling rate already
303 * has a probability of >95%, we shouldn't be attempting 330 * has a probability of >95%, we shouldn't be attempting
304 * to use it, as this only wastes precious airtime */ 331 * to use it, as this only wastes precious airtime */
305 if (!mrr && sample && (mi->r[ndx].probability > 17100)) 332 if (!mrr_capable && rate_sampling &&
306 ndx = mi->max_tp_rate; 333 (mi->r[ndx].probability > MINSTREL_FRAC(95, 100)))
334 ndx = mi->max_tp_rate[0];
307 335
336 /* mrr setup for 1st stage */
308 ar[0].idx = mi->r[ndx].rix; 337 ar[0].idx = mi->r[ndx].rix;
309 ar[0].count = minstrel_get_retry_count(&mi->r[ndx], info); 338 ar[0].count = minstrel_get_retry_count(&mi->r[ndx], info);
310 339
311 if (!mrr) { 340 /* non mrr setup for 2nd stage */
312 if (!sample) 341 if (!mrr_capable) {
342 if (!rate_sampling)
313 ar[0].count = mp->max_retry; 343 ar[0].count = mp->max_retry;
314 ar[1].idx = mi->lowest_rix; 344 ar[1].idx = mi->lowest_rix;
315 ar[1].count = mp->max_retry; 345 ar[1].count = mp->max_retry;
316 return; 346 return;
317 } 347 }
318 348
319 /* MRR setup */ 349 /* mrr setup for 2nd stage */
320 if (sample) { 350 if (rate_sampling) {
321 if (sample_slower) 351 if (indirect_rate_sampling)
322 mrr_ndx[0] = sample_ndx; 352 mrr_ndx[0] = sample_ndx;
323 else 353 else
324 mrr_ndx[0] = mi->max_tp_rate; 354 mrr_ndx[0] = mi->max_tp_rate[0];
325 } else { 355 } else {
326 mrr_ndx[0] = mi->max_tp_rate2; 356 mrr_ndx[0] = mi->max_tp_rate[1];
327 } 357 }
358
359 /* mrr setup for 3rd & 4th stage */
328 mrr_ndx[1] = mi->max_prob_rate; 360 mrr_ndx[1] = mi->max_prob_rate;
329 mrr_ndx[2] = 0; 361 mrr_ndx[2] = 0;
330 for (i = 1; i < 4; i++) { 362 for (i = 1; i < 4; i++) {
@@ -351,26 +383,21 @@ static void
351init_sample_table(struct minstrel_sta_info *mi) 383init_sample_table(struct minstrel_sta_info *mi)
352{ 384{
353 unsigned int i, col, new_idx; 385 unsigned int i, col, new_idx;
354 unsigned int n_srates = mi->n_rates - 1;
355 u8 rnd[8]; 386 u8 rnd[8];
356 387
357 mi->sample_column = 0; 388 mi->sample_column = 0;
358 mi->sample_idx = 0; 389 mi->sample_row = 0;
359 memset(mi->sample_table, 0, SAMPLE_COLUMNS * mi->n_rates); 390 memset(mi->sample_table, 0xff, SAMPLE_COLUMNS * mi->n_rates);
360 391
361 for (col = 0; col < SAMPLE_COLUMNS; col++) { 392 for (col = 0; col < SAMPLE_COLUMNS; col++) {
362 for (i = 0; i < n_srates; i++) { 393 for (i = 0; i < mi->n_rates; i++) {
363 get_random_bytes(rnd, sizeof(rnd)); 394 get_random_bytes(rnd, sizeof(rnd));
364 new_idx = (i + rnd[i & 7]) % n_srates; 395 new_idx = (i + rnd[i & 7]) % mi->n_rates;
365 396
366 while (SAMPLE_TBL(mi, new_idx, col) != 0) 397 while (SAMPLE_TBL(mi, new_idx, col) != 0xff)
367 new_idx = (new_idx + 1) % n_srates; 398 new_idx = (new_idx + 1) % mi->n_rates;
368 399
369 /* Don't sample the slowest rate (i.e. slowest base 400 SAMPLE_TBL(mi, new_idx, col) = i;
370 * rate). We must presume that the slowest rate works
371 * fine, or else other management frames will also be
372 * failing and the link will break */
373 SAMPLE_TBL(mi, new_idx, col) = i + 1;
374 } 401 }
375 } 402 }
376} 403}
@@ -542,9 +569,6 @@ minstrel_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
542 mp->lookaround_rate = 5; 569 mp->lookaround_rate = 5;
543 mp->lookaround_rate_mrr = 10; 570 mp->lookaround_rate_mrr = 10;
544 571
545 /* moving average weight for EWMA */
546 mp->ewma_level = 75;
547
548 /* maximum time that the hw is allowed to stay in one MRR segment */ 572 /* maximum time that the hw is allowed to stay in one MRR segment */
549 mp->segment_size = 6000; 573 mp->segment_size = 6000;
550 574