aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/ath/ath5k/qcu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/ath/ath5k/qcu.c')
-rw-r--r--drivers/net/wireless/ath/ath5k/qcu.c777
1 files changed, 449 insertions, 328 deletions
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
index 4186ff4c6e9c..b18c5021aac3 100644
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -25,6 +25,68 @@ Queue Control Unit, DFS Control Unit Functions
25#include "debug.h" 25#include "debug.h"
26#include "base.h" 26#include "base.h"
27 27
28
29/******************\
30* Helper functions *
31\******************/
32
33/*
34 * Get number of pending frames
35 * for a specific queue [5211+]
36 */
37u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
38{
39 u32 pending;
40 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
41
42 /* Return if queue is declared inactive */
43 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
44 return false;
45
46 /* XXX: How about AR5K_CFG_TXCNT ? */
47 if (ah->ah_version == AR5K_AR5210)
48 return false;
49
50 pending = ath5k_hw_reg_read(ah, AR5K_QUEUE_STATUS(queue));
51 pending &= AR5K_QCU_STS_FRMPENDCNT;
52
53 /* It's possible to have no frames pending even if TXE
54 * is set. To indicate that q has not stopped return
55 * true */
56 if (!pending && AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
57 return true;
58
59 return pending;
60}
61
62/*
63 * Set a transmit queue inactive
64 */
65void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
66{
67 if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num))
68 return;
69
70 /* This queue will be skipped in further operations */
71 ah->ah_txq[queue].tqi_type = AR5K_TX_QUEUE_INACTIVE;
72 /*For SIMR setup*/
73 AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue);
74}
75
76/*
77 * Make sure cw is a power of 2 minus 1 and smaller than 1024
78 */
79static u16 ath5k_cw_validate(u16 cw_req)
80{
81 u32 cw = 1;
82 cw_req = min(cw_req, (u16)1023);
83
84 while (cw < cw_req)
85 cw = (cw << 1) | 1;
86
87 return cw;
88}
89
28/* 90/*
29 * Get properties for a transmit queue 91 * Get properties for a transmit queue
30 */ 92 */
@@ -39,21 +101,41 @@ int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
39 * Set properties for a transmit queue 101 * Set properties for a transmit queue
40 */ 102 */
41int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue, 103int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
42 const struct ath5k_txq_info *queue_info) 104 const struct ath5k_txq_info *qinfo)
43{ 105{
106 struct ath5k_txq_info *qi;
107
44 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); 108 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
45 109
46 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) 110 qi = &ah->ah_txq[queue];
111
112 if (qi->tqi_type == AR5K_TX_QUEUE_INACTIVE)
47 return -EIO; 113 return -EIO;
48 114
49 memcpy(&ah->ah_txq[queue], queue_info, sizeof(struct ath5k_txq_info)); 115 /* copy and validate values */
116 qi->tqi_type = qinfo->tqi_type;
117 qi->tqi_subtype = qinfo->tqi_subtype;
118 qi->tqi_flags = qinfo->tqi_flags;
119 /*
120 * According to the docs: Although the AIFS field is 8 bit wide,
121 * the maximum supported value is 0xFC. Setting it higher than that
122 * will cause the DCU to hang.
123 */
124 qi->tqi_aifs = min(qinfo->tqi_aifs, (u8)0xFC);
125 qi->tqi_cw_min = ath5k_cw_validate(qinfo->tqi_cw_min);
126 qi->tqi_cw_max = ath5k_cw_validate(qinfo->tqi_cw_max);
127 qi->tqi_cbr_period = qinfo->tqi_cbr_period;
128 qi->tqi_cbr_overflow_limit = qinfo->tqi_cbr_overflow_limit;
129 qi->tqi_burst_time = qinfo->tqi_burst_time;
130 qi->tqi_ready_time = qinfo->tqi_ready_time;
50 131
51 /*XXX: Is this supported on 5210 ?*/ 132 /*XXX: Is this supported on 5210 ?*/
52 if ((queue_info->tqi_type == AR5K_TX_QUEUE_DATA && 133 /*XXX: Is this correct for AR5K_WME_AC_VI,VO ???*/
53 ((queue_info->tqi_subtype == AR5K_WME_AC_VI) || 134 if ((qinfo->tqi_type == AR5K_TX_QUEUE_DATA &&
54 (queue_info->tqi_subtype == AR5K_WME_AC_VO))) || 135 ((qinfo->tqi_subtype == AR5K_WME_AC_VI) ||
55 queue_info->tqi_type == AR5K_TX_QUEUE_UAPSD) 136 (qinfo->tqi_subtype == AR5K_WME_AC_VO))) ||
56 ah->ah_txq[queue].tqi_flags |= AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS; 137 qinfo->tqi_type == AR5K_TX_QUEUE_UAPSD)
138 qi->tqi_flags |= AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS;
57 139
58 return 0; 140 return 0;
59} 141}
@@ -70,8 +152,8 @@ int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
70 /* 152 /*
71 * Get queue by type 153 * Get queue by type
72 */ 154 */
73 /*5210 only has 2 queues*/ 155 /* 5210 only has 2 queues */
74 if (ah->ah_version == AR5K_AR5210) { 156 if (ah->ah_capabilities.cap_queues.q_tx_num == 2) {
75 switch (queue_type) { 157 switch (queue_type) {
76 case AR5K_TX_QUEUE_DATA: 158 case AR5K_TX_QUEUE_DATA:
77 queue = AR5K_TX_QUEUE_ID_NOQCU_DATA; 159 queue = AR5K_TX_QUEUE_ID_NOQCU_DATA;
@@ -138,392 +220,431 @@ int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
138 return queue; 220 return queue;
139} 221}
140 222
141/*
142 * Get number of pending frames
143 * for a specific queue [5211+]
144 */
145u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
146{
147 u32 pending;
148 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
149
150 /* Return if queue is declared inactive */
151 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
152 return false;
153
154 /* XXX: How about AR5K_CFG_TXCNT ? */
155 if (ah->ah_version == AR5K_AR5210)
156 return false;
157
158 pending = ath5k_hw_reg_read(ah, AR5K_QUEUE_STATUS(queue));
159 pending &= AR5K_QCU_STS_FRMPENDCNT;
160
161 /* It's possible to have no frames pending even if TXE
162 * is set. To indicate that q has not stopped return
163 * true */
164 if (!pending && AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
165 return true;
166 223
167 return pending; 224/*******************************\
168} 225* Single QCU/DCU initialization *
226\*******************************/
169 227
170/* 228/*
171 * Set a transmit queue inactive 229 * Set tx retry limits on DCU
172 */ 230 */
173void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue) 231void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
232 unsigned int queue)
174{ 233{
175 if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num)) 234 /* Single data queue on AR5210 */
176 return; 235 if (ah->ah_version == AR5K_AR5210) {
236 struct ath5k_txq_info *tq = &ah->ah_txq[queue];
177 237
178 /* This queue will be skipped in further operations */ 238 if (queue > 0)
179 ah->ah_txq[queue].tqi_type = AR5K_TX_QUEUE_INACTIVE; 239 return;
180 /*For SIMR setup*/ 240
181 AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue); 241 ath5k_hw_reg_write(ah,
242 (tq->tqi_cw_min << AR5K_NODCU_RETRY_LMT_CW_MIN_S)
243 | AR5K_REG_SM(ah->ah_retry_long,
244 AR5K_NODCU_RETRY_LMT_SLG_RETRY)
245 | AR5K_REG_SM(ah->ah_retry_short,
246 AR5K_NODCU_RETRY_LMT_SSH_RETRY)
247 | AR5K_REG_SM(ah->ah_retry_long,
248 AR5K_NODCU_RETRY_LMT_LG_RETRY)
249 | AR5K_REG_SM(ah->ah_retry_short,
250 AR5K_NODCU_RETRY_LMT_SH_RETRY),
251 AR5K_NODCU_RETRY_LMT);
252 /* DCU on AR5211+ */
253 } else {
254 ath5k_hw_reg_write(ah,
255 AR5K_REG_SM(ah->ah_retry_long,
256 AR5K_DCU_RETRY_LMT_RTS)
257 | AR5K_REG_SM(ah->ah_retry_long,
258 AR5K_DCU_RETRY_LMT_STA_RTS)
259 | AR5K_REG_SM(max(ah->ah_retry_long, ah->ah_retry_short),
260 AR5K_DCU_RETRY_LMT_STA_DATA),
261 AR5K_QUEUE_DFS_RETRY_LIMIT(queue));
262 }
182} 263}
183 264
184/* 265/**
185 * Set DFS properties for a transmit queue on DCU 266 * ath5k_hw_reset_tx_queue - Initialize a single hw queue
267 *
268 * @ah The &struct ath5k_hw
269 * @queue The hw queue number
270 *
271 * Set DFS properties for the given transmit queue on DCU
272 * and configures all queue-specific parameters.
186 */ 273 */
187int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue) 274int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
188{ 275{
189 u32 cw_min, cw_max, retry_lg, retry_sh;
190 struct ath5k_txq_info *tq = &ah->ah_txq[queue]; 276 struct ath5k_txq_info *tq = &ah->ah_txq[queue];
191 277
192 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); 278 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
193 279
194 tq = &ah->ah_txq[queue]; 280 tq = &ah->ah_txq[queue];
195 281
196 if (tq->tqi_type == AR5K_TX_QUEUE_INACTIVE) 282 /* Skip if queue inactive or if we are on AR5210
283 * that doesn't have QCU/DCU */
284 if ((ah->ah_version == AR5K_AR5210) ||
285 (tq->tqi_type == AR5K_TX_QUEUE_INACTIVE))
197 return 0; 286 return 0;
198 287
199 if (ah->ah_version == AR5K_AR5210) {
200 /* Only handle data queues, others will be ignored */
201 if (tq->tqi_type != AR5K_TX_QUEUE_DATA)
202 return 0;
203
204 /* Set Slot time */
205 ath5k_hw_reg_write(ah, ah->ah_turbo ?
206 AR5K_INIT_SLOT_TIME_TURBO : AR5K_INIT_SLOT_TIME,
207 AR5K_SLOT_TIME);
208 /* Set ACK_CTS timeout */
209 ath5k_hw_reg_write(ah, ah->ah_turbo ?
210 AR5K_INIT_ACK_CTS_TIMEOUT_TURBO :
211 AR5K_INIT_ACK_CTS_TIMEOUT, AR5K_SLOT_TIME);
212 /* Set Transmit Latency */
213 ath5k_hw_reg_write(ah, ah->ah_turbo ?
214 AR5K_INIT_TRANSMIT_LATENCY_TURBO :
215 AR5K_INIT_TRANSMIT_LATENCY, AR5K_USEC_5210);
216
217 /* Set IFS0 */
218 if (ah->ah_turbo) {
219 ath5k_hw_reg_write(ah, ((AR5K_INIT_SIFS_TURBO +
220 (ah->ah_aifs + tq->tqi_aifs) *
221 AR5K_INIT_SLOT_TIME_TURBO) <<
222 AR5K_IFS0_DIFS_S) | AR5K_INIT_SIFS_TURBO,
223 AR5K_IFS0);
224 } else {
225 ath5k_hw_reg_write(ah, ((AR5K_INIT_SIFS +
226 (ah->ah_aifs + tq->tqi_aifs) *
227 AR5K_INIT_SLOT_TIME) << AR5K_IFS0_DIFS_S) |
228 AR5K_INIT_SIFS, AR5K_IFS0);
229 }
230
231 /* Set IFS1 */
232 ath5k_hw_reg_write(ah, ah->ah_turbo ?
233 AR5K_INIT_PROTO_TIME_CNTRL_TURBO :
234 AR5K_INIT_PROTO_TIME_CNTRL, AR5K_IFS1);
235 /* Set AR5K_PHY_SETTLING */
236 ath5k_hw_reg_write(ah, ah->ah_turbo ?
237 (ath5k_hw_reg_read(ah, AR5K_PHY_SETTLING) & ~0x7F)
238 | 0x38 :
239 (ath5k_hw_reg_read(ah, AR5K_PHY_SETTLING) & ~0x7F)
240 | 0x1C,
241 AR5K_PHY_SETTLING);
242 /* Set Frame Control Register */
243 ath5k_hw_reg_write(ah, ah->ah_turbo ?
244 (AR5K_PHY_FRAME_CTL_INI | AR5K_PHY_TURBO_MODE |
245 AR5K_PHY_TURBO_SHORT | 0x2020) :
246 (AR5K_PHY_FRAME_CTL_INI | 0x1020),
247 AR5K_PHY_FRAME_CTL_5210);
248 }
249
250 /* 288 /*
251 * Calculate cwmin/max by channel mode 289 * Set contention window (cw_min/cw_max)
290 * and arbitrated interframe space (aifs)...
252 */ 291 */
253 cw_min = ah->ah_cw_min = AR5K_TUNE_CWMIN; 292 ath5k_hw_reg_write(ah,
254 cw_max = ah->ah_cw_max = AR5K_TUNE_CWMAX; 293 AR5K_REG_SM(tq->tqi_cw_min, AR5K_DCU_LCL_IFS_CW_MIN) |
255 ah->ah_aifs = AR5K_TUNE_AIFS; 294 AR5K_REG_SM(tq->tqi_cw_max, AR5K_DCU_LCL_IFS_CW_MAX) |
256 /*XR is only supported on 5212*/ 295 AR5K_REG_SM(tq->tqi_aifs, AR5K_DCU_LCL_IFS_AIFS),
257 if (IS_CHAN_XR(ah->ah_current_channel) && 296 AR5K_QUEUE_DFS_LOCAL_IFS(queue));
258 ah->ah_version == AR5K_AR5212) {
259 cw_min = ah->ah_cw_min = AR5K_TUNE_CWMIN_XR;
260 cw_max = ah->ah_cw_max = AR5K_TUNE_CWMAX_XR;
261 ah->ah_aifs = AR5K_TUNE_AIFS_XR;
262 /*B mode is not supported on 5210*/
263 } else if (IS_CHAN_B(ah->ah_current_channel) &&
264 ah->ah_version != AR5K_AR5210) {
265 cw_min = ah->ah_cw_min = AR5K_TUNE_CWMIN_11B;
266 cw_max = ah->ah_cw_max = AR5K_TUNE_CWMAX_11B;
267 ah->ah_aifs = AR5K_TUNE_AIFS_11B;
268 }
269 297
270 cw_min = 1; 298 /*
271 while (cw_min < ah->ah_cw_min) 299 * Set tx retry limits for this queue
272 cw_min = (cw_min << 1) | 1; 300 */
301 ath5k_hw_set_tx_retry_limits(ah, queue);
273 302
274 cw_min = tq->tqi_cw_min < 0 ? (cw_min >> (-tq->tqi_cw_min)) :
275 ((cw_min << tq->tqi_cw_min) + (1 << tq->tqi_cw_min) - 1);
276 cw_max = tq->tqi_cw_max < 0 ? (cw_max >> (-tq->tqi_cw_max)) :
277 ((cw_max << tq->tqi_cw_max) + (1 << tq->tqi_cw_max) - 1);
278 303
279 /* 304 /*
280 * Calculate and set retry limits 305 * Set misc registers
281 */ 306 */
282 if (ah->ah_software_retry) {
283 /* XXX Need to test this */
284 retry_lg = ah->ah_limit_tx_retries;
285 retry_sh = retry_lg = retry_lg > AR5K_DCU_RETRY_LMT_SH_RETRY ?
286 AR5K_DCU_RETRY_LMT_SH_RETRY : retry_lg;
287 } else {
288 retry_lg = AR5K_INIT_LG_RETRY;
289 retry_sh = AR5K_INIT_SH_RETRY;
290 }
291 307
292 /*No QCU/DCU [5210]*/ 308 /* Enable DCU to wait for next fragment from QCU */
293 if (ah->ah_version == AR5K_AR5210) { 309 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
294 ath5k_hw_reg_write(ah, 310 AR5K_DCU_MISC_FRAG_WAIT);
295 (cw_min << AR5K_NODCU_RETRY_LMT_CW_MIN_S)
296 | AR5K_REG_SM(AR5K_INIT_SLG_RETRY,
297 AR5K_NODCU_RETRY_LMT_SLG_RETRY)
298 | AR5K_REG_SM(AR5K_INIT_SSH_RETRY,
299 AR5K_NODCU_RETRY_LMT_SSH_RETRY)
300 | AR5K_REG_SM(retry_lg, AR5K_NODCU_RETRY_LMT_LG_RETRY)
301 | AR5K_REG_SM(retry_sh, AR5K_NODCU_RETRY_LMT_SH_RETRY),
302 AR5K_NODCU_RETRY_LMT);
303 } else {
304 /*QCU/DCU [5211+]*/
305 ath5k_hw_reg_write(ah,
306 AR5K_REG_SM(AR5K_INIT_SLG_RETRY,
307 AR5K_DCU_RETRY_LMT_SLG_RETRY) |
308 AR5K_REG_SM(AR5K_INIT_SSH_RETRY,
309 AR5K_DCU_RETRY_LMT_SSH_RETRY) |
310 AR5K_REG_SM(retry_lg, AR5K_DCU_RETRY_LMT_LG_RETRY) |
311 AR5K_REG_SM(retry_sh, AR5K_DCU_RETRY_LMT_SH_RETRY),
312 AR5K_QUEUE_DFS_RETRY_LIMIT(queue));
313 311
314 /*===Rest is also for QCU/DCU only [5211+]===*/ 312 /* On Maui and Spirit use the global seqnum on DCU */
313 if (ah->ah_mac_version < AR5K_SREV_AR5211)
314 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
315 AR5K_DCU_MISC_SEQNUM_CTL);
316
317 /* Constant bit rate period */
318 if (tq->tqi_cbr_period) {
319 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_cbr_period,
320 AR5K_QCU_CBRCFG_INTVAL) |
321 AR5K_REG_SM(tq->tqi_cbr_overflow_limit,
322 AR5K_QCU_CBRCFG_ORN_THRES),
323 AR5K_QUEUE_CBRCFG(queue));
315 324
316 /*
317 * Set initial content window (cw_min/cw_max)
318 * and arbitrated interframe space (aifs)...
319 */
320 ath5k_hw_reg_write(ah,
321 AR5K_REG_SM(cw_min, AR5K_DCU_LCL_IFS_CW_MIN) |
322 AR5K_REG_SM(cw_max, AR5K_DCU_LCL_IFS_CW_MAX) |
323 AR5K_REG_SM(ah->ah_aifs + tq->tqi_aifs,
324 AR5K_DCU_LCL_IFS_AIFS),
325 AR5K_QUEUE_DFS_LOCAL_IFS(queue));
326
327 /*
328 * Set misc registers
329 */
330 /* Enable DCU early termination for this queue */
331 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue), 325 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
332 AR5K_QCU_MISC_DCU_EARLY); 326 AR5K_QCU_MISC_FRSHED_CBR);
333 327
334 /* Enable DCU to wait for next fragment from QCU */ 328 if (tq->tqi_cbr_overflow_limit)
335 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
336 AR5K_DCU_MISC_FRAG_WAIT);
337
338 /* On Maui and Spirit use the global seqnum on DCU */
339 if (ah->ah_mac_version < AR5K_SREV_AR5211)
340 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
341 AR5K_DCU_MISC_SEQNUM_CTL);
342
343 if (tq->tqi_cbr_period) {
344 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_cbr_period,
345 AR5K_QCU_CBRCFG_INTVAL) |
346 AR5K_REG_SM(tq->tqi_cbr_overflow_limit,
347 AR5K_QCU_CBRCFG_ORN_THRES),
348 AR5K_QUEUE_CBRCFG(queue));
349 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue), 329 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
350 AR5K_QCU_MISC_FRSHED_CBR);
351 if (tq->tqi_cbr_overflow_limit)
352 AR5K_REG_ENABLE_BITS(ah,
353 AR5K_QUEUE_MISC(queue),
354 AR5K_QCU_MISC_CBR_THRES_ENABLE); 330 AR5K_QCU_MISC_CBR_THRES_ENABLE);
355 } 331 }
356 332
357 if (tq->tqi_ready_time && 333 /* Ready time interval */
358 (tq->tqi_type != AR5K_TX_QUEUE_CAB)) 334 if (tq->tqi_ready_time && (tq->tqi_type != AR5K_TX_QUEUE_CAB))
359 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_ready_time, 335 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_ready_time,
360 AR5K_QCU_RDYTIMECFG_INTVAL) | 336 AR5K_QCU_RDYTIMECFG_INTVAL) |
361 AR5K_QCU_RDYTIMECFG_ENABLE, 337 AR5K_QCU_RDYTIMECFG_ENABLE,
362 AR5K_QUEUE_RDYTIMECFG(queue)); 338 AR5K_QUEUE_RDYTIMECFG(queue));
363 339
364 if (tq->tqi_burst_time) { 340 if (tq->tqi_burst_time) {
365 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_burst_time, 341 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_burst_time,
366 AR5K_DCU_CHAN_TIME_DUR) | 342 AR5K_DCU_CHAN_TIME_DUR) |
367 AR5K_DCU_CHAN_TIME_ENABLE, 343 AR5K_DCU_CHAN_TIME_ENABLE,
368 AR5K_QUEUE_DFS_CHANNEL_TIME(queue)); 344 AR5K_QUEUE_DFS_CHANNEL_TIME(queue));
369 345
370 if (tq->tqi_flags 346 if (tq->tqi_flags & AR5K_TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)
371 & AR5K_TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE) 347 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
372 AR5K_REG_ENABLE_BITS(ah,
373 AR5K_QUEUE_MISC(queue),
374 AR5K_QCU_MISC_RDY_VEOL_POLICY); 348 AR5K_QCU_MISC_RDY_VEOL_POLICY);
375 } 349 }
376 350
377 if (tq->tqi_flags & AR5K_TXQ_FLAG_BACKOFF_DISABLE) 351 /* Enable/disable Post frame backoff */
378 ath5k_hw_reg_write(ah, AR5K_DCU_MISC_POST_FR_BKOFF_DIS, 352 if (tq->tqi_flags & AR5K_TXQ_FLAG_BACKOFF_DISABLE)
379 AR5K_QUEUE_DFS_MISC(queue)); 353 ath5k_hw_reg_write(ah, AR5K_DCU_MISC_POST_FR_BKOFF_DIS,
354 AR5K_QUEUE_DFS_MISC(queue));
380 355
381 if (tq->tqi_flags & AR5K_TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) 356 /* Enable/disable fragmentation burst backoff */
382 ath5k_hw_reg_write(ah, AR5K_DCU_MISC_BACKOFF_FRAG, 357 if (tq->tqi_flags & AR5K_TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE)
383 AR5K_QUEUE_DFS_MISC(queue)); 358 ath5k_hw_reg_write(ah, AR5K_DCU_MISC_BACKOFF_FRAG,
359 AR5K_QUEUE_DFS_MISC(queue));
384 360
385 /* 361 /*
386 * Set registers by queue type 362 * Set registers by queue type
387 */ 363 */
388 switch (tq->tqi_type) { 364 switch (tq->tqi_type) {
389 case AR5K_TX_QUEUE_BEACON: 365 case AR5K_TX_QUEUE_BEACON:
390 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue), 366 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
391 AR5K_QCU_MISC_FRSHED_DBA_GT | 367 AR5K_QCU_MISC_FRSHED_DBA_GT |
392 AR5K_QCU_MISC_CBREXP_BCN_DIS | 368 AR5K_QCU_MISC_CBREXP_BCN_DIS |
393 AR5K_QCU_MISC_BCN_ENABLE); 369 AR5K_QCU_MISC_BCN_ENABLE);
394 370
395 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue), 371 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
396 (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL << 372 (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
397 AR5K_DCU_MISC_ARBLOCK_CTL_S) | 373 AR5K_DCU_MISC_ARBLOCK_CTL_S) |
398 AR5K_DCU_MISC_ARBLOCK_IGNORE | 374 AR5K_DCU_MISC_ARBLOCK_IGNORE |
399 AR5K_DCU_MISC_POST_FR_BKOFF_DIS | 375 AR5K_DCU_MISC_POST_FR_BKOFF_DIS |
400 AR5K_DCU_MISC_BCN_ENABLE); 376 AR5K_DCU_MISC_BCN_ENABLE);
401 break; 377 break;
402 378
403 case AR5K_TX_QUEUE_CAB: 379 case AR5K_TX_QUEUE_CAB:
404 /* XXX: use BCN_SENT_GT, if we can figure out how */ 380 /* XXX: use BCN_SENT_GT, if we can figure out how */
405 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue), 381 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
406 AR5K_QCU_MISC_FRSHED_DBA_GT | 382 AR5K_QCU_MISC_FRSHED_DBA_GT |
407 AR5K_QCU_MISC_CBREXP_DIS | 383 AR5K_QCU_MISC_CBREXP_DIS |
408 AR5K_QCU_MISC_CBREXP_BCN_DIS); 384 AR5K_QCU_MISC_CBREXP_BCN_DIS);
409 385
410 ath5k_hw_reg_write(ah, ((tq->tqi_ready_time - 386 ath5k_hw_reg_write(ah, ((tq->tqi_ready_time -
411 (AR5K_TUNE_SW_BEACON_RESP - 387 (AR5K_TUNE_SW_BEACON_RESP -
412 AR5K_TUNE_DMA_BEACON_RESP) - 388 AR5K_TUNE_DMA_BEACON_RESP) -
413 AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF) * 1024) | 389 AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF) * 1024) |
414 AR5K_QCU_RDYTIMECFG_ENABLE, 390 AR5K_QCU_RDYTIMECFG_ENABLE,
415 AR5K_QUEUE_RDYTIMECFG(queue)); 391 AR5K_QUEUE_RDYTIMECFG(queue));
416 392
417 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue), 393 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
418 (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL << 394 (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
419 AR5K_DCU_MISC_ARBLOCK_CTL_S)); 395 AR5K_DCU_MISC_ARBLOCK_CTL_S));
420 break; 396 break;
421 397
422 case AR5K_TX_QUEUE_UAPSD: 398 case AR5K_TX_QUEUE_UAPSD:
423 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue), 399 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
424 AR5K_QCU_MISC_CBREXP_DIS); 400 AR5K_QCU_MISC_CBREXP_DIS);
425 break; 401 break;
426 402
427 case AR5K_TX_QUEUE_DATA: 403 case AR5K_TX_QUEUE_DATA:
428 default: 404 default:
429 break; 405 break;
430 }
431
432 /* TODO: Handle frame compression */
433
434 /*
435 * Enable interrupts for this tx queue
436 * in the secondary interrupt mask registers
437 */
438 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXOKINT_ENABLE)
439 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txok, queue);
440
441 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXERRINT_ENABLE)
442 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txerr, queue);
443
444 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXURNINT_ENABLE)
445 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txurn, queue);
446
447 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXDESCINT_ENABLE)
448 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txdesc, queue);
449
450 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXEOLINT_ENABLE)
451 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txeol, queue);
452
453 if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRORNINT_ENABLE)
454 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrorn, queue);
455
456 if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRURNINT_ENABLE)
457 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrurn, queue);
458
459 if (tq->tqi_flags & AR5K_TXQ_FLAG_QTRIGINT_ENABLE)
460 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_qtrig, queue);
461
462 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXNOFRMINT_ENABLE)
463 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_nofrm, queue);
464
465 /* Update secondary interrupt mask registers */
466
467 /* Filter out inactive queues */
468 ah->ah_txq_imr_txok &= ah->ah_txq_status;
469 ah->ah_txq_imr_txerr &= ah->ah_txq_status;
470 ah->ah_txq_imr_txurn &= ah->ah_txq_status;
471 ah->ah_txq_imr_txdesc &= ah->ah_txq_status;
472 ah->ah_txq_imr_txeol &= ah->ah_txq_status;
473 ah->ah_txq_imr_cbrorn &= ah->ah_txq_status;
474 ah->ah_txq_imr_cbrurn &= ah->ah_txq_status;
475 ah->ah_txq_imr_qtrig &= ah->ah_txq_status;
476 ah->ah_txq_imr_nofrm &= ah->ah_txq_status;
477
478 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txok,
479 AR5K_SIMR0_QCU_TXOK) |
480 AR5K_REG_SM(ah->ah_txq_imr_txdesc,
481 AR5K_SIMR0_QCU_TXDESC), AR5K_SIMR0);
482 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txerr,
483 AR5K_SIMR1_QCU_TXERR) |
484 AR5K_REG_SM(ah->ah_txq_imr_txeol,
485 AR5K_SIMR1_QCU_TXEOL), AR5K_SIMR1);
486 /* Update simr2 but don't overwrite rest simr2 settings */
487 AR5K_REG_DISABLE_BITS(ah, AR5K_SIMR2, AR5K_SIMR2_QCU_TXURN);
488 AR5K_REG_ENABLE_BITS(ah, AR5K_SIMR2,
489 AR5K_REG_SM(ah->ah_txq_imr_txurn,
490 AR5K_SIMR2_QCU_TXURN));
491 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_cbrorn,
492 AR5K_SIMR3_QCBRORN) |
493 AR5K_REG_SM(ah->ah_txq_imr_cbrurn,
494 AR5K_SIMR3_QCBRURN), AR5K_SIMR3);
495 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_qtrig,
496 AR5K_SIMR4_QTRIG), AR5K_SIMR4);
497 /* Set TXNOFRM_QCU for the queues with TXNOFRM enabled */
498 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_nofrm,
499 AR5K_TXNOFRM_QCU), AR5K_TXNOFRM);
500 /* No queue has TXNOFRM enabled, disable the interrupt
501 * by setting AR5K_TXNOFRM to zero */
502 if (ah->ah_txq_imr_nofrm == 0)
503 ath5k_hw_reg_write(ah, 0, AR5K_TXNOFRM);
504
505 /* Set QCU mask for this DCU to save power */
506 AR5K_REG_WRITE_Q(ah, AR5K_QUEUE_QCUMASK(queue), queue);
507 } 406 }
508 407
408 /* TODO: Handle frame compression */
409
410 /*
411 * Enable interrupts for this tx queue
412 * in the secondary interrupt mask registers
413 */
414 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXOKINT_ENABLE)
415 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txok, queue);
416
417 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXERRINT_ENABLE)
418 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txerr, queue);
419
420 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXURNINT_ENABLE)
421 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txurn, queue);
422
423 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXDESCINT_ENABLE)
424 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txdesc, queue);
425
426 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXEOLINT_ENABLE)
427 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txeol, queue);
428
429 if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRORNINT_ENABLE)
430 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrorn, queue);
431
432 if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRURNINT_ENABLE)
433 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrurn, queue);
434
435 if (tq->tqi_flags & AR5K_TXQ_FLAG_QTRIGINT_ENABLE)
436 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_qtrig, queue);
437
438 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXNOFRMINT_ENABLE)
439 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_nofrm, queue);
440
441 /* Update secondary interrupt mask registers */
442
443 /* Filter out inactive queues */
444 ah->ah_txq_imr_txok &= ah->ah_txq_status;
445 ah->ah_txq_imr_txerr &= ah->ah_txq_status;
446 ah->ah_txq_imr_txurn &= ah->ah_txq_status;
447 ah->ah_txq_imr_txdesc &= ah->ah_txq_status;
448 ah->ah_txq_imr_txeol &= ah->ah_txq_status;
449 ah->ah_txq_imr_cbrorn &= ah->ah_txq_status;
450 ah->ah_txq_imr_cbrurn &= ah->ah_txq_status;
451 ah->ah_txq_imr_qtrig &= ah->ah_txq_status;
452 ah->ah_txq_imr_nofrm &= ah->ah_txq_status;
453
454 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txok,
455 AR5K_SIMR0_QCU_TXOK) |
456 AR5K_REG_SM(ah->ah_txq_imr_txdesc,
457 AR5K_SIMR0_QCU_TXDESC),
458 AR5K_SIMR0);
459
460 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txerr,
461 AR5K_SIMR1_QCU_TXERR) |
462 AR5K_REG_SM(ah->ah_txq_imr_txeol,
463 AR5K_SIMR1_QCU_TXEOL),
464 AR5K_SIMR1);
465
466 /* Update SIMR2 but don't overwrite rest simr2 settings */
467 AR5K_REG_DISABLE_BITS(ah, AR5K_SIMR2, AR5K_SIMR2_QCU_TXURN);
468 AR5K_REG_ENABLE_BITS(ah, AR5K_SIMR2,
469 AR5K_REG_SM(ah->ah_txq_imr_txurn,
470 AR5K_SIMR2_QCU_TXURN));
471
472 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_cbrorn,
473 AR5K_SIMR3_QCBRORN) |
474 AR5K_REG_SM(ah->ah_txq_imr_cbrurn,
475 AR5K_SIMR3_QCBRURN),
476 AR5K_SIMR3);
477
478 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_qtrig,
479 AR5K_SIMR4_QTRIG), AR5K_SIMR4);
480
481 /* Set TXNOFRM_QCU for the queues with TXNOFRM enabled */
482 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_nofrm,
483 AR5K_TXNOFRM_QCU), AR5K_TXNOFRM);
484
485 /* No queue has TXNOFRM enabled, disable the interrupt
486 * by setting AR5K_TXNOFRM to zero */
487 if (ah->ah_txq_imr_nofrm == 0)
488 ath5k_hw_reg_write(ah, 0, AR5K_TXNOFRM);
489
490 /* Set QCU mask for this DCU to save power */
491 AR5K_REG_WRITE_Q(ah, AR5K_QUEUE_QCUMASK(queue), queue);
492
509 return 0; 493 return 0;
510} 494}
511 495
512/* 496
513 * Set slot time on DCU 497/**************************\
498* Global QCU/DCU functions *
499\**************************/
500
501/**
502 * ath5k_hw_set_ifs_intervals - Set global inter-frame spaces on DCU
503 *
504 * @ah The &struct ath5k_hw
505 * @slot_time Slot time in us
506 *
507 * Sets the global IFS intervals on DCU (also works on AR5210) for
508 * the given slot time and the current bwmode.
514 */ 509 */
515int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time) 510int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
516{ 511{
512 struct ieee80211_channel *channel = ah->ah_current_channel;
513 struct ath5k_softc *sc = ah->ah_sc;
514 struct ieee80211_rate *rate;
515 u32 ack_tx_time, eifs, eifs_clock, sifs, sifs_clock;
517 u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time); 516 u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time);
518 517
519 if (slot_time < 6 || slot_time_clock > AR5K_SLOT_TIME_MAX) 518 if (slot_time < 6 || slot_time_clock > AR5K_SLOT_TIME_MAX)
520 return -EINVAL; 519 return -EINVAL;
521 520
522 if (ah->ah_version == AR5K_AR5210) 521 sifs = ath5k_hw_get_default_sifs(ah);
523 ath5k_hw_reg_write(ah, slot_time_clock, AR5K_SLOT_TIME); 522 sifs_clock = ath5k_hw_htoclock(ah, sifs - 2);
523
524 /* EIFS
525 * Txtime of ack at lowest rate + SIFS + DIFS
526 * (DIFS = SIFS + 2 * Slot time)
527 *
528 * Note: HAL has some predefined values for EIFS
529 * Turbo: (37 + 2 * 6)
530 * Default: (74 + 2 * 9)
531 * Half: (149 + 2 * 13)
532 * Quarter: (298 + 2 * 21)
533 *
534 * (74 + 2 * 6) for AR5210 default and turbo !
535 *
536 * According to the formula we have
537 * ack_tx_time = 25 for turbo and
538 * ack_tx_time = 42.5 * clock multiplier
539 * for default/half/quarter.
540 *
541 * This can't be right, 42 is what we would get
542 * from ath5k_hw_get_frame_dur_for_bwmode or
543 * ieee80211_generic_frame_duration for zero frame
544 * length and without SIFS !
545 *
546 * Also we have different lowest rate for 802.11a
547 */
548 if (channel->hw_value & CHANNEL_5GHZ)
549 rate = &sc->sbands[IEEE80211_BAND_5GHZ].bitrates[0];
524 else 550 else
525 ath5k_hw_reg_write(ah, slot_time_clock, AR5K_DCU_GBL_IFS_SLOT); 551 rate = &sc->sbands[IEEE80211_BAND_2GHZ].bitrates[0];
552
553 ack_tx_time = ath5k_hw_get_frame_duration(ah, 10, rate, false);
554
555 /* ack_tx_time includes an SIFS already */
556 eifs = ack_tx_time + sifs + 2 * slot_time;
557 eifs_clock = ath5k_hw_htoclock(ah, eifs);
558
559 /* Set IFS settings on AR5210 */
560 if (ah->ah_version == AR5K_AR5210) {
561 u32 pifs, pifs_clock, difs, difs_clock;
562
563 /* Set slot time */
564 ath5k_hw_reg_write(ah, slot_time_clock, AR5K_SLOT_TIME);
565
566 /* Set EIFS */
567 eifs_clock = AR5K_REG_SM(eifs_clock, AR5K_IFS1_EIFS);
568
569 /* PIFS = Slot time + SIFS */
570 pifs = slot_time + sifs;
571 pifs_clock = ath5k_hw_htoclock(ah, pifs);
572 pifs_clock = AR5K_REG_SM(pifs_clock, AR5K_IFS1_PIFS);
573
574 /* DIFS = SIFS + 2 * Slot time */
575 difs = sifs + 2 * slot_time;
576 difs_clock = ath5k_hw_htoclock(ah, difs);
577
578 /* Set SIFS/DIFS */
579 ath5k_hw_reg_write(ah, (difs_clock <<
580 AR5K_IFS0_DIFS_S) | sifs_clock,
581 AR5K_IFS0);
582
583 /* Set PIFS/EIFS and preserve AR5K_INIT_CARR_SENSE_EN */
584 ath5k_hw_reg_write(ah, pifs_clock | eifs_clock |
585 (AR5K_INIT_CARR_SENSE_EN << AR5K_IFS1_CS_EN_S),
586 AR5K_IFS1);
587
588 return 0;
589 }
590
591 /* Set IFS slot time */
592 ath5k_hw_reg_write(ah, slot_time_clock, AR5K_DCU_GBL_IFS_SLOT);
593
594 /* Set EIFS interval */
595 ath5k_hw_reg_write(ah, eifs_clock, AR5K_DCU_GBL_IFS_EIFS);
596
597 /* Set SIFS interval in usecs */
598 AR5K_REG_WRITE_BITS(ah, AR5K_DCU_GBL_IFS_MISC,
599 AR5K_DCU_GBL_IFS_MISC_SIFS_DUR_USEC,
600 sifs);
601
602 /* Set SIFS interval in clock cycles */
603 ath5k_hw_reg_write(ah, sifs_clock, AR5K_DCU_GBL_IFS_SIFS);
526 604
527 return 0; 605 return 0;
528} 606}
529 607
608
609int ath5k_hw_init_queues(struct ath5k_hw *ah)
610{
611 int i, ret;
612
613 /* TODO: HW Compression support for data queues */
614 /* TODO: Burst prefetch for data queues */
615
616 /*
617 * Reset queues and start beacon timers at the end of the reset routine
618 * This also sets QCU mask on each DCU for 1:1 qcu to dcu mapping
619 * Note: If we want we can assign multiple qcus on one dcu.
620 */
621 if (ah->ah_version != AR5K_AR5210)
622 for (i = 0; i < ah->ah_capabilities.cap_queues.q_tx_num; i++) {
623 ret = ath5k_hw_reset_tx_queue(ah, i);
624 if (ret) {
625 ATH5K_ERR(ah->ah_sc,
626 "failed to reset TX queue #%d\n", i);
627 return ret;
628 }
629 }
630 else
631 /* No QCU/DCU on AR5210, just set tx
632 * retry limits. We set IFS parameters
633 * on ath5k_hw_set_ifs_intervals */
634 ath5k_hw_set_tx_retry_limits(ah, 0);
635
636 /* Set the turbo flag when operating on 40MHz */
637 if (ah->ah_bwmode == AR5K_BWMODE_40MHZ)
638 AR5K_REG_ENABLE_BITS(ah, AR5K_DCU_GBL_IFS_MISC,
639 AR5K_DCU_GBL_IFS_MISC_TURBO_MODE);
640
641 /* If we didn't set IFS timings through
642 * ath5k_hw_set_coverage_class make sure
643 * we set them here */
644 if (!ah->ah_coverage_class) {
645 unsigned int slot_time = ath5k_hw_get_default_slottime(ah);
646 ath5k_hw_set_ifs_intervals(ah, slot_time);
647 }
648
649 return 0;
650}