diff options
Diffstat (limited to 'drivers/net/wireless/rt2x00/rt2x00queue.c')
-rw-r--r-- | drivers/net/wireless/rt2x00/rt2x00queue.c | 665 |
1 files changed, 493 insertions, 172 deletions
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c index a3401d301058..ab8c16f8bcaf 100644 --- a/drivers/net/wireless/rt2x00/rt2x00queue.c +++ b/drivers/net/wireless/rt2x00/rt2x00queue.c | |||
@@ -1,5 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com> | 2 | Copyright (C) 2010 Willow Garage <http://www.willowgarage.com> |
3 | Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com> | ||
3 | Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com> | 4 | Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com> |
4 | <http://rt2x00.serialmonkey.com> | 5 | <http://rt2x00.serialmonkey.com> |
5 | 6 | ||
@@ -32,9 +33,9 @@ | |||
32 | #include "rt2x00.h" | 33 | #include "rt2x00.h" |
33 | #include "rt2x00lib.h" | 34 | #include "rt2x00lib.h" |
34 | 35 | ||
35 | struct sk_buff *rt2x00queue_alloc_rxskb(struct rt2x00_dev *rt2x00dev, | 36 | struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry) |
36 | struct queue_entry *entry) | ||
37 | { | 37 | { |
38 | struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; | ||
38 | struct sk_buff *skb; | 39 | struct sk_buff *skb; |
39 | struct skb_frame_desc *skbdesc; | 40 | struct skb_frame_desc *skbdesc; |
40 | unsigned int frame_size; | 41 | unsigned int frame_size; |
@@ -59,7 +60,7 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct rt2x00_dev *rt2x00dev, | |||
59 | * at least 8 bytes bytes available in headroom for IV/EIV | 60 | * at least 8 bytes bytes available in headroom for IV/EIV |
60 | * and 8 bytes for ICV data as tailroon. | 61 | * and 8 bytes for ICV data as tailroon. |
61 | */ | 62 | */ |
62 | if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) { | 63 | if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags)) { |
63 | head_size += 8; | 64 | head_size += 8; |
64 | tail_size += 8; | 65 | tail_size += 8; |
65 | } | 66 | } |
@@ -85,7 +86,7 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct rt2x00_dev *rt2x00dev, | |||
85 | memset(skbdesc, 0, sizeof(*skbdesc)); | 86 | memset(skbdesc, 0, sizeof(*skbdesc)); |
86 | skbdesc->entry = entry; | 87 | skbdesc->entry = entry; |
87 | 88 | ||
88 | if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags)) { | 89 | if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags)) { |
89 | skbdesc->skb_dma = dma_map_single(rt2x00dev->dev, | 90 | skbdesc->skb_dma = dma_map_single(rt2x00dev->dev, |
90 | skb->data, | 91 | skb->data, |
91 | skb->len, | 92 | skb->len, |
@@ -96,41 +97,42 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct rt2x00_dev *rt2x00dev, | |||
96 | return skb; | 97 | return skb; |
97 | } | 98 | } |
98 | 99 | ||
99 | void rt2x00queue_map_txskb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb) | 100 | void rt2x00queue_map_txskb(struct queue_entry *entry) |
100 | { | 101 | { |
101 | struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); | 102 | struct device *dev = entry->queue->rt2x00dev->dev; |
103 | struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); | ||
102 | 104 | ||
103 | skbdesc->skb_dma = | 105 | skbdesc->skb_dma = |
104 | dma_map_single(rt2x00dev->dev, skb->data, skb->len, DMA_TO_DEVICE); | 106 | dma_map_single(dev, entry->skb->data, entry->skb->len, DMA_TO_DEVICE); |
105 | skbdesc->flags |= SKBDESC_DMA_MAPPED_TX; | 107 | skbdesc->flags |= SKBDESC_DMA_MAPPED_TX; |
106 | } | 108 | } |
107 | EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb); | 109 | EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb); |
108 | 110 | ||
109 | void rt2x00queue_unmap_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb) | 111 | void rt2x00queue_unmap_skb(struct queue_entry *entry) |
110 | { | 112 | { |
111 | struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); | 113 | struct device *dev = entry->queue->rt2x00dev->dev; |
114 | struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); | ||
112 | 115 | ||
113 | if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) { | 116 | if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) { |
114 | dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma, skb->len, | 117 | dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len, |
115 | DMA_FROM_DEVICE); | 118 | DMA_FROM_DEVICE); |
116 | skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX; | 119 | skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX; |
117 | } | 120 | } else if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) { |
118 | 121 | dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len, | |
119 | if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) { | ||
120 | dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma, skb->len, | ||
121 | DMA_TO_DEVICE); | 122 | DMA_TO_DEVICE); |
122 | skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX; | 123 | skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX; |
123 | } | 124 | } |
124 | } | 125 | } |
125 | EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb); | 126 | EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb); |
126 | 127 | ||
127 | void rt2x00queue_free_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb) | 128 | void rt2x00queue_free_skb(struct queue_entry *entry) |
128 | { | 129 | { |
129 | if (!skb) | 130 | if (!entry->skb) |
130 | return; | 131 | return; |
131 | 132 | ||
132 | rt2x00queue_unmap_skb(rt2x00dev, skb); | 133 | rt2x00queue_unmap_skb(entry); |
133 | dev_kfree_skb_any(skb); | 134 | dev_kfree_skb_any(entry->skb); |
135 | entry->skb = NULL; | ||
134 | } | 136 | } |
135 | 137 | ||
136 | void rt2x00queue_align_frame(struct sk_buff *skb) | 138 | void rt2x00queue_align_frame(struct sk_buff *skb) |
@@ -146,19 +148,6 @@ void rt2x00queue_align_frame(struct sk_buff *skb) | |||
146 | skb_trim(skb, frame_length); | 148 | skb_trim(skb, frame_length); |
147 | } | 149 | } |
148 | 150 | ||
149 | void rt2x00queue_align_payload(struct sk_buff *skb, unsigned int header_length) | ||
150 | { | ||
151 | unsigned int frame_length = skb->len; | ||
152 | unsigned int align = ALIGN_SIZE(skb, header_length); | ||
153 | |||
154 | if (!align) | ||
155 | return; | ||
156 | |||
157 | skb_push(skb, align); | ||
158 | memmove(skb->data, skb->data + align, frame_length); | ||
159 | skb_trim(skb, frame_length); | ||
160 | } | ||
161 | |||
162 | void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length) | 151 | void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length) |
163 | { | 152 | { |
164 | unsigned int payload_length = skb->len - header_length; | 153 | unsigned int payload_length = skb->len - header_length; |
@@ -197,7 +186,12 @@ void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length) | |||
197 | 186 | ||
198 | void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length) | 187 | void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length) |
199 | { | 188 | { |
200 | unsigned int l2pad = L2PAD_SIZE(header_length); | 189 | /* |
190 | * L2 padding is only present if the skb contains more than just the | ||
191 | * IEEE 802.11 header. | ||
192 | */ | ||
193 | unsigned int l2pad = (skb->len > header_length) ? | ||
194 | L2PAD_SIZE(header_length) : 0; | ||
201 | 195 | ||
202 | if (!l2pad) | 196 | if (!l2pad) |
203 | return; | 197 | return; |
@@ -214,14 +208,17 @@ static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry, | |||
214 | struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif); | 208 | struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif); |
215 | unsigned long irqflags; | 209 | unsigned long irqflags; |
216 | 210 | ||
217 | if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) || | 211 | if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)) |
218 | unlikely(!tx_info->control.vif)) | 212 | return; |
213 | |||
214 | __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); | ||
215 | |||
216 | if (!test_bit(REQUIRE_SW_SEQNO, &entry->queue->rt2x00dev->cap_flags)) | ||
219 | return; | 217 | return; |
220 | 218 | ||
221 | /* | 219 | /* |
222 | * Hardware should insert sequence counter. | 220 | * The hardware is not able to insert a sequence number. Assign a |
223 | * FIXME: We insert a software sequence counter first for | 221 | * software generated one here. |
224 | * hardware that doesn't support hardware sequence counting. | ||
225 | * | 222 | * |
226 | * This is wrong because beacons are not getting sequence | 223 | * This is wrong because beacons are not getting sequence |
227 | * numbers assigned properly. | 224 | * numbers assigned properly. |
@@ -239,7 +236,6 @@ static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry, | |||
239 | 236 | ||
240 | spin_unlock_irqrestore(&intf->seqlock, irqflags); | 237 | spin_unlock_irqrestore(&intf->seqlock, irqflags); |
241 | 238 | ||
242 | __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); | ||
243 | } | 239 | } |
244 | 240 | ||
245 | static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry, | 241 | static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry, |
@@ -253,6 +249,16 @@ static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry, | |||
253 | unsigned int duration; | 249 | unsigned int duration; |
254 | unsigned int residual; | 250 | unsigned int residual; |
255 | 251 | ||
252 | /* | ||
253 | * Determine with what IFS priority this frame should be send. | ||
254 | * Set ifs to IFS_SIFS when the this is not the first fragment, | ||
255 | * or this fragment came after RTS/CTS. | ||
256 | */ | ||
257 | if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)) | ||
258 | txdesc->u.plcp.ifs = IFS_BACKOFF; | ||
259 | else | ||
260 | txdesc->u.plcp.ifs = IFS_SIFS; | ||
261 | |||
256 | /* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */ | 262 | /* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */ |
257 | data_length = entry->skb->len + 4; | 263 | data_length = entry->skb->len + 4; |
258 | data_length += rt2x00crypto_tx_overhead(rt2x00dev, entry->skb); | 264 | data_length += rt2x00crypto_tx_overhead(rt2x00dev, entry->skb); |
@@ -261,12 +267,12 @@ static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry, | |||
261 | * PLCP setup | 267 | * PLCP setup |
262 | * Length calculation depends on OFDM/CCK rate. | 268 | * Length calculation depends on OFDM/CCK rate. |
263 | */ | 269 | */ |
264 | txdesc->signal = hwrate->plcp; | 270 | txdesc->u.plcp.signal = hwrate->plcp; |
265 | txdesc->service = 0x04; | 271 | txdesc->u.plcp.service = 0x04; |
266 | 272 | ||
267 | if (hwrate->flags & DEV_RATE_OFDM) { | 273 | if (hwrate->flags & DEV_RATE_OFDM) { |
268 | txdesc->length_high = (data_length >> 6) & 0x3f; | 274 | txdesc->u.plcp.length_high = (data_length >> 6) & 0x3f; |
269 | txdesc->length_low = data_length & 0x3f; | 275 | txdesc->u.plcp.length_low = data_length & 0x3f; |
270 | } else { | 276 | } else { |
271 | /* | 277 | /* |
272 | * Convert length to microseconds. | 278 | * Convert length to microseconds. |
@@ -281,42 +287,113 @@ static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry, | |||
281 | * Check if we need to set the Length Extension | 287 | * Check if we need to set the Length Extension |
282 | */ | 288 | */ |
283 | if (hwrate->bitrate == 110 && residual <= 30) | 289 | if (hwrate->bitrate == 110 && residual <= 30) |
284 | txdesc->service |= 0x80; | 290 | txdesc->u.plcp.service |= 0x80; |
285 | } | 291 | } |
286 | 292 | ||
287 | txdesc->length_high = (duration >> 8) & 0xff; | 293 | txdesc->u.plcp.length_high = (duration >> 8) & 0xff; |
288 | txdesc->length_low = duration & 0xff; | 294 | txdesc->u.plcp.length_low = duration & 0xff; |
289 | 295 | ||
290 | /* | 296 | /* |
291 | * When preamble is enabled we should set the | 297 | * When preamble is enabled we should set the |
292 | * preamble bit for the signal. | 298 | * preamble bit for the signal. |
293 | */ | 299 | */ |
294 | if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) | 300 | if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) |
295 | txdesc->signal |= 0x08; | 301 | txdesc->u.plcp.signal |= 0x08; |
296 | } | 302 | } |
297 | } | 303 | } |
298 | 304 | ||
305 | static void rt2x00queue_create_tx_descriptor_ht(struct queue_entry *entry, | ||
306 | struct txentry_desc *txdesc, | ||
307 | const struct rt2x00_rate *hwrate) | ||
308 | { | ||
309 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); | ||
310 | struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0]; | ||
311 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data; | ||
312 | |||
313 | if (tx_info->control.sta) | ||
314 | txdesc->u.ht.mpdu_density = | ||
315 | tx_info->control.sta->ht_cap.ampdu_density; | ||
316 | |||
317 | txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */ | ||
318 | |||
319 | /* | ||
320 | * Only one STBC stream is supported for now. | ||
321 | */ | ||
322 | if (tx_info->flags & IEEE80211_TX_CTL_STBC) | ||
323 | txdesc->u.ht.stbc = 1; | ||
324 | |||
325 | /* | ||
326 | * If IEEE80211_TX_RC_MCS is set txrate->idx just contains the | ||
327 | * mcs rate to be used | ||
328 | */ | ||
329 | if (txrate->flags & IEEE80211_TX_RC_MCS) { | ||
330 | txdesc->u.ht.mcs = txrate->idx; | ||
331 | |||
332 | /* | ||
333 | * MIMO PS should be set to 1 for STA's using dynamic SM PS | ||
334 | * when using more then one tx stream (>MCS7). | ||
335 | */ | ||
336 | if (tx_info->control.sta && txdesc->u.ht.mcs > 7 && | ||
337 | ((tx_info->control.sta->ht_cap.cap & | ||
338 | IEEE80211_HT_CAP_SM_PS) >> | ||
339 | IEEE80211_HT_CAP_SM_PS_SHIFT) == | ||
340 | WLAN_HT_CAP_SM_PS_DYNAMIC) | ||
341 | __set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags); | ||
342 | } else { | ||
343 | txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs); | ||
344 | if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) | ||
345 | txdesc->u.ht.mcs |= 0x08; | ||
346 | } | ||
347 | |||
348 | /* | ||
349 | * This frame is eligible for an AMPDU, however, don't aggregate | ||
350 | * frames that are intended to probe a specific tx rate. | ||
351 | */ | ||
352 | if (tx_info->flags & IEEE80211_TX_CTL_AMPDU && | ||
353 | !(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) | ||
354 | __set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags); | ||
355 | |||
356 | /* | ||
357 | * Set 40Mhz mode if necessary (for legacy rates this will | ||
358 | * duplicate the frame to both channels). | ||
359 | */ | ||
360 | if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH || | ||
361 | txrate->flags & IEEE80211_TX_RC_DUP_DATA) | ||
362 | __set_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags); | ||
363 | if (txrate->flags & IEEE80211_TX_RC_SHORT_GI) | ||
364 | __set_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags); | ||
365 | |||
366 | /* | ||
367 | * Determine IFS values | ||
368 | * - Use TXOP_BACKOFF for management frames except beacons | ||
369 | * - Use TXOP_SIFS for fragment bursts | ||
370 | * - Use TXOP_HTTXOP for everything else | ||
371 | * | ||
372 | * Note: rt2800 devices won't use CTS protection (if used) | ||
373 | * for frames not transmitted with TXOP_HTTXOP | ||
374 | */ | ||
375 | if (ieee80211_is_mgmt(hdr->frame_control) && | ||
376 | !ieee80211_is_beacon(hdr->frame_control)) | ||
377 | txdesc->u.ht.txop = TXOP_BACKOFF; | ||
378 | else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)) | ||
379 | txdesc->u.ht.txop = TXOP_SIFS; | ||
380 | else | ||
381 | txdesc->u.ht.txop = TXOP_HTTXOP; | ||
382 | } | ||
383 | |||
299 | static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry, | 384 | static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry, |
300 | struct txentry_desc *txdesc) | 385 | struct txentry_desc *txdesc) |
301 | { | 386 | { |
302 | struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; | 387 | struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; |
303 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); | 388 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); |
304 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data; | 389 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data; |
305 | struct ieee80211_rate *rate = | 390 | struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0]; |
306 | ieee80211_get_tx_rate(rt2x00dev->hw, tx_info); | 391 | struct ieee80211_rate *rate; |
307 | const struct rt2x00_rate *hwrate; | 392 | const struct rt2x00_rate *hwrate = NULL; |
308 | 393 | ||
309 | memset(txdesc, 0, sizeof(*txdesc)); | 394 | memset(txdesc, 0, sizeof(*txdesc)); |
310 | 395 | ||
311 | /* | 396 | /* |
312 | * Initialize information from queue | ||
313 | */ | ||
314 | txdesc->queue = entry->queue->qid; | ||
315 | txdesc->cw_min = entry->queue->cw_min; | ||
316 | txdesc->cw_max = entry->queue->cw_max; | ||
317 | txdesc->aifs = entry->queue->aifs; | ||
318 | |||
319 | /* | ||
320 | * Header and frame information. | 397 | * Header and frame information. |
321 | */ | 398 | */ |
322 | txdesc->length = entry->skb->len; | 399 | txdesc->length = entry->skb->len; |
@@ -366,42 +443,42 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry, | |||
366 | 443 | ||
367 | /* | 444 | /* |
368 | * Beacons and probe responses require the tsf timestamp | 445 | * Beacons and probe responses require the tsf timestamp |
369 | * to be inserted into the frame, except for a frame that has been injected | 446 | * to be inserted into the frame. |
370 | * through a monitor interface. This latter is needed for testing a | ||
371 | * monitor interface. | ||
372 | */ | 447 | */ |
373 | if ((ieee80211_is_beacon(hdr->frame_control) || | 448 | if (ieee80211_is_beacon(hdr->frame_control) || |
374 | ieee80211_is_probe_resp(hdr->frame_control)) && | 449 | ieee80211_is_probe_resp(hdr->frame_control)) |
375 | (!(tx_info->flags & IEEE80211_TX_CTL_INJECTED))) | ||
376 | __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags); | 450 | __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags); |
377 | 451 | ||
378 | /* | ||
379 | * Determine with what IFS priority this frame should be send. | ||
380 | * Set ifs to IFS_SIFS when the this is not the first fragment, | ||
381 | * or this fragment came after RTS/CTS. | ||
382 | */ | ||
383 | if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) && | 452 | if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) && |
384 | !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)) { | 453 | !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)) |
385 | __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags); | 454 | __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags); |
386 | txdesc->ifs = IFS_BACKOFF; | ||
387 | } else | ||
388 | txdesc->ifs = IFS_SIFS; | ||
389 | 455 | ||
390 | /* | 456 | /* |
391 | * Determine rate modulation. | 457 | * Determine rate modulation. |
392 | */ | 458 | */ |
393 | hwrate = rt2x00_get_rate(rate->hw_value); | 459 | if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD) |
394 | txdesc->rate_mode = RATE_MODE_CCK; | 460 | txdesc->rate_mode = RATE_MODE_HT_GREENFIELD; |
395 | if (hwrate->flags & DEV_RATE_OFDM) | 461 | else if (txrate->flags & IEEE80211_TX_RC_MCS) |
396 | txdesc->rate_mode = RATE_MODE_OFDM; | 462 | txdesc->rate_mode = RATE_MODE_HT_MIX; |
463 | else { | ||
464 | rate = ieee80211_get_tx_rate(rt2x00dev->hw, tx_info); | ||
465 | hwrate = rt2x00_get_rate(rate->hw_value); | ||
466 | if (hwrate->flags & DEV_RATE_OFDM) | ||
467 | txdesc->rate_mode = RATE_MODE_OFDM; | ||
468 | else | ||
469 | txdesc->rate_mode = RATE_MODE_CCK; | ||
470 | } | ||
397 | 471 | ||
398 | /* | 472 | /* |
399 | * Apply TX descriptor handling by components | 473 | * Apply TX descriptor handling by components |
400 | */ | 474 | */ |
401 | rt2x00crypto_create_tx_descriptor(entry, txdesc); | 475 | rt2x00crypto_create_tx_descriptor(entry, txdesc); |
402 | rt2x00ht_create_tx_descriptor(entry, txdesc, hwrate); | ||
403 | rt2x00queue_create_tx_descriptor_seq(entry, txdesc); | 476 | rt2x00queue_create_tx_descriptor_seq(entry, txdesc); |
404 | rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate); | 477 | |
478 | if (test_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags)) | ||
479 | rt2x00queue_create_tx_descriptor_ht(entry, txdesc, hwrate); | ||
480 | else | ||
481 | rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate); | ||
405 | } | 482 | } |
406 | 483 | ||
407 | static int rt2x00queue_write_tx_data(struct queue_entry *entry, | 484 | static int rt2x00queue_write_tx_data(struct queue_entry *entry, |
@@ -438,8 +515,8 @@ static int rt2x00queue_write_tx_data(struct queue_entry *entry, | |||
438 | /* | 515 | /* |
439 | * Map the skb to DMA. | 516 | * Map the skb to DMA. |
440 | */ | 517 | */ |
441 | if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags)) | 518 | if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags)) |
442 | rt2x00queue_map_txskb(rt2x00dev, entry->skb); | 519 | rt2x00queue_map_txskb(entry); |
443 | 520 | ||
444 | return 0; | 521 | return 0; |
445 | } | 522 | } |
@@ -448,23 +525,19 @@ static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry, | |||
448 | struct txentry_desc *txdesc) | 525 | struct txentry_desc *txdesc) |
449 | { | 526 | { |
450 | struct data_queue *queue = entry->queue; | 527 | struct data_queue *queue = entry->queue; |
451 | struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; | ||
452 | 528 | ||
453 | rt2x00dev->ops->lib->write_tx_desc(rt2x00dev, entry->skb, txdesc); | 529 | queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc); |
454 | 530 | ||
455 | /* | 531 | /* |
456 | * All processing on the frame has been completed, this means | 532 | * All processing on the frame has been completed, this means |
457 | * it is now ready to be dumped to userspace through debugfs. | 533 | * it is now ready to be dumped to userspace through debugfs. |
458 | */ | 534 | */ |
459 | rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TX, entry->skb); | 535 | rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry->skb); |
460 | } | 536 | } |
461 | 537 | ||
462 | static void rt2x00queue_kick_tx_queue(struct queue_entry *entry, | 538 | static void rt2x00queue_kick_tx_queue(struct data_queue *queue, |
463 | struct txentry_desc *txdesc) | 539 | struct txentry_desc *txdesc) |
464 | { | 540 | { |
465 | struct data_queue *queue = entry->queue; | ||
466 | struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; | ||
467 | |||
468 | /* | 541 | /* |
469 | * Check if we need to kick the queue, there are however a few rules | 542 | * Check if we need to kick the queue, there are however a few rules |
470 | * 1) Don't kick unless this is the last in frame in a burst. | 543 | * 1) Don't kick unless this is the last in frame in a burst. |
@@ -476,7 +549,7 @@ static void rt2x00queue_kick_tx_queue(struct queue_entry *entry, | |||
476 | */ | 549 | */ |
477 | if (rt2x00queue_threshold(queue) || | 550 | if (rt2x00queue_threshold(queue) || |
478 | !test_bit(ENTRY_TXD_BURST, &txdesc->flags)) | 551 | !test_bit(ENTRY_TXD_BURST, &txdesc->flags)) |
479 | rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, queue->qid); | 552 | queue->rt2x00dev->ops->lib->kick_queue(queue); |
480 | } | 553 | } |
481 | 554 | ||
482 | int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, | 555 | int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, |
@@ -488,10 +561,14 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, | |||
488 | struct skb_frame_desc *skbdesc; | 561 | struct skb_frame_desc *skbdesc; |
489 | u8 rate_idx, rate_flags; | 562 | u8 rate_idx, rate_flags; |
490 | 563 | ||
491 | if (unlikely(rt2x00queue_full(queue))) | 564 | if (unlikely(rt2x00queue_full(queue))) { |
565 | ERROR(queue->rt2x00dev, | ||
566 | "Dropping frame due to full tx queue %d.\n", queue->qid); | ||
492 | return -ENOBUFS; | 567 | return -ENOBUFS; |
568 | } | ||
493 | 569 | ||
494 | if (test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) { | 570 | if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, |
571 | &entry->flags))) { | ||
495 | ERROR(queue->rt2x00dev, | 572 | ERROR(queue->rt2x00dev, |
496 | "Arrived at non-free entry in the non-full queue %d.\n" | 573 | "Arrived at non-free entry in the non-full queue %d.\n" |
497 | "Please file bug report to %s.\n", | 574 | "Please file bug report to %s.\n", |
@@ -531,23 +608,23 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, | |||
531 | */ | 608 | */ |
532 | if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) && | 609 | if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) && |
533 | !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) { | 610 | !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) { |
534 | if (test_bit(DRIVER_REQUIRE_COPY_IV, &queue->rt2x00dev->flags)) | 611 | if (test_bit(REQUIRE_COPY_IV, &queue->rt2x00dev->cap_flags)) |
535 | rt2x00crypto_tx_copy_iv(skb, &txdesc); | 612 | rt2x00crypto_tx_copy_iv(skb, &txdesc); |
536 | else | 613 | else |
537 | rt2x00crypto_tx_remove_iv(skb, &txdesc); | 614 | rt2x00crypto_tx_remove_iv(skb, &txdesc); |
538 | } | 615 | } |
539 | 616 | ||
540 | /* | 617 | /* |
541 | * When DMA allocation is required we should guarentee to the | 618 | * When DMA allocation is required we should guarantee to the |
542 | * driver that the DMA is aligned to a 4-byte boundary. | 619 | * driver that the DMA is aligned to a 4-byte boundary. |
543 | * However some drivers require L2 padding to pad the payload | 620 | * However some drivers require L2 padding to pad the payload |
544 | * rather then the header. This could be a requirement for | 621 | * rather then the header. This could be a requirement for |
545 | * PCI and USB devices, while header alignment only is valid | 622 | * PCI and USB devices, while header alignment only is valid |
546 | * for PCI devices. | 623 | * for PCI devices. |
547 | */ | 624 | */ |
548 | if (test_bit(DRIVER_REQUIRE_L2PAD, &queue->rt2x00dev->flags)) | 625 | if (test_bit(REQUIRE_L2PAD, &queue->rt2x00dev->cap_flags)) |
549 | rt2x00queue_insert_l2pad(entry->skb, txdesc.header_length); | 626 | rt2x00queue_insert_l2pad(entry->skb, txdesc.header_length); |
550 | else if (test_bit(DRIVER_REQUIRE_DMA, &queue->rt2x00dev->flags)) | 627 | else if (test_bit(REQUIRE_DMA, &queue->rt2x00dev->cap_flags)) |
551 | rt2x00queue_align_frame(entry->skb); | 628 | rt2x00queue_align_frame(entry->skb); |
552 | 629 | ||
553 | /* | 630 | /* |
@@ -563,20 +640,17 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, | |||
563 | 640 | ||
564 | set_bit(ENTRY_DATA_PENDING, &entry->flags); | 641 | set_bit(ENTRY_DATA_PENDING, &entry->flags); |
565 | 642 | ||
566 | rt2x00queue_index_inc(queue, Q_INDEX); | 643 | rt2x00queue_index_inc(entry, Q_INDEX); |
567 | rt2x00queue_write_tx_descriptor(entry, &txdesc); | 644 | rt2x00queue_write_tx_descriptor(entry, &txdesc); |
568 | rt2x00queue_kick_tx_queue(entry, &txdesc); | 645 | rt2x00queue_kick_tx_queue(queue, &txdesc); |
569 | 646 | ||
570 | return 0; | 647 | return 0; |
571 | } | 648 | } |
572 | 649 | ||
573 | int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev, | 650 | int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev, |
574 | struct ieee80211_vif *vif, | 651 | struct ieee80211_vif *vif) |
575 | const bool enable_beacon) | ||
576 | { | 652 | { |
577 | struct rt2x00_intf *intf = vif_to_intf(vif); | 653 | struct rt2x00_intf *intf = vif_to_intf(vif); |
578 | struct skb_frame_desc *skbdesc; | ||
579 | struct txentry_desc txdesc; | ||
580 | 654 | ||
581 | if (unlikely(!intf->beacon)) | 655 | if (unlikely(!intf->beacon)) |
582 | return -ENOBUFS; | 656 | return -ENOBUFS; |
@@ -586,20 +660,38 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev, | |||
586 | /* | 660 | /* |
587 | * Clean up the beacon skb. | 661 | * Clean up the beacon skb. |
588 | */ | 662 | */ |
589 | rt2x00queue_free_skb(rt2x00dev, intf->beacon->skb); | 663 | rt2x00queue_free_skb(intf->beacon); |
590 | intf->beacon->skb = NULL; | ||
591 | 664 | ||
592 | if (!enable_beacon) { | 665 | /* |
593 | rt2x00dev->ops->lib->kill_tx_queue(rt2x00dev, QID_BEACON); | 666 | * Clear beacon (single bssid devices don't need to clear the beacon |
594 | mutex_unlock(&intf->beacon_skb_mutex); | 667 | * since the beacon queue will get stopped anyway). |
595 | return 0; | 668 | */ |
596 | } | 669 | if (rt2x00dev->ops->lib->clear_beacon) |
670 | rt2x00dev->ops->lib->clear_beacon(intf->beacon); | ||
671 | |||
672 | mutex_unlock(&intf->beacon_skb_mutex); | ||
673 | |||
674 | return 0; | ||
675 | } | ||
676 | |||
677 | int rt2x00queue_update_beacon_locked(struct rt2x00_dev *rt2x00dev, | ||
678 | struct ieee80211_vif *vif) | ||
679 | { | ||
680 | struct rt2x00_intf *intf = vif_to_intf(vif); | ||
681 | struct skb_frame_desc *skbdesc; | ||
682 | struct txentry_desc txdesc; | ||
683 | |||
684 | if (unlikely(!intf->beacon)) | ||
685 | return -ENOBUFS; | ||
686 | |||
687 | /* | ||
688 | * Clean up the beacon skb. | ||
689 | */ | ||
690 | rt2x00queue_free_skb(intf->beacon); | ||
597 | 691 | ||
598 | intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif); | 692 | intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif); |
599 | if (!intf->beacon->skb) { | 693 | if (!intf->beacon->skb) |
600 | mutex_unlock(&intf->beacon_skb_mutex); | ||
601 | return -ENOMEM; | 694 | return -ENOMEM; |
602 | } | ||
603 | 695 | ||
604 | /* | 696 | /* |
605 | * Copy all TX descriptor information into txdesc, | 697 | * Copy all TX descriptor information into txdesc, |
@@ -616,37 +708,81 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev, | |||
616 | skbdesc->entry = intf->beacon; | 708 | skbdesc->entry = intf->beacon; |
617 | 709 | ||
618 | /* | 710 | /* |
619 | * Send beacon to hardware and enable beacon genaration.. | 711 | * Send beacon to hardware. |
620 | */ | 712 | */ |
621 | rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc); | 713 | rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc); |
622 | 714 | ||
715 | return 0; | ||
716 | |||
717 | } | ||
718 | |||
719 | int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev, | ||
720 | struct ieee80211_vif *vif) | ||
721 | { | ||
722 | struct rt2x00_intf *intf = vif_to_intf(vif); | ||
723 | int ret; | ||
724 | |||
725 | mutex_lock(&intf->beacon_skb_mutex); | ||
726 | ret = rt2x00queue_update_beacon_locked(rt2x00dev, vif); | ||
623 | mutex_unlock(&intf->beacon_skb_mutex); | 727 | mutex_unlock(&intf->beacon_skb_mutex); |
624 | 728 | ||
625 | return 0; | 729 | return ret; |
626 | } | 730 | } |
627 | 731 | ||
628 | struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev, | 732 | bool rt2x00queue_for_each_entry(struct data_queue *queue, |
629 | const enum data_queue_qid queue) | 733 | enum queue_index start, |
734 | enum queue_index end, | ||
735 | void *data, | ||
736 | bool (*fn)(struct queue_entry *entry, | ||
737 | void *data)) | ||
630 | { | 738 | { |
631 | int atim = test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags); | 739 | unsigned long irqflags; |
740 | unsigned int index_start; | ||
741 | unsigned int index_end; | ||
742 | unsigned int i; | ||
632 | 743 | ||
633 | if (queue == QID_RX) | 744 | if (unlikely(start >= Q_INDEX_MAX || end >= Q_INDEX_MAX)) { |
634 | return rt2x00dev->rx; | 745 | ERROR(queue->rt2x00dev, |
746 | "Entry requested from invalid index range (%d - %d)\n", | ||
747 | start, end); | ||
748 | return true; | ||
749 | } | ||
635 | 750 | ||
636 | if (queue < rt2x00dev->ops->tx_queues && rt2x00dev->tx) | 751 | /* |
637 | return &rt2x00dev->tx[queue]; | 752 | * Only protect the range we are going to loop over, |
753 | * if during our loop a extra entry is set to pending | ||
754 | * it should not be kicked during this run, since it | ||
755 | * is part of another TX operation. | ||
756 | */ | ||
757 | spin_lock_irqsave(&queue->index_lock, irqflags); | ||
758 | index_start = queue->index[start]; | ||
759 | index_end = queue->index[end]; | ||
760 | spin_unlock_irqrestore(&queue->index_lock, irqflags); | ||
638 | 761 | ||
639 | if (!rt2x00dev->bcn) | 762 | /* |
640 | return NULL; | 763 | * Start from the TX done pointer, this guarantees that we will |
764 | * send out all frames in the correct order. | ||
765 | */ | ||
766 | if (index_start < index_end) { | ||
767 | for (i = index_start; i < index_end; i++) { | ||
768 | if (fn(&queue->entries[i], data)) | ||
769 | return true; | ||
770 | } | ||
771 | } else { | ||
772 | for (i = index_start; i < queue->limit; i++) { | ||
773 | if (fn(&queue->entries[i], data)) | ||
774 | return true; | ||
775 | } | ||
641 | 776 | ||
642 | if (queue == QID_BEACON) | 777 | for (i = 0; i < index_end; i++) { |
643 | return &rt2x00dev->bcn[0]; | 778 | if (fn(&queue->entries[i], data)) |
644 | else if (queue == QID_ATIM && atim) | 779 | return true; |
645 | return &rt2x00dev->bcn[1]; | 780 | } |
781 | } | ||
646 | 782 | ||
647 | return NULL; | 783 | return false; |
648 | } | 784 | } |
649 | EXPORT_SYMBOL_GPL(rt2x00queue_get_queue); | 785 | EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry); |
650 | 786 | ||
651 | struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue, | 787 | struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue, |
652 | enum queue_index index) | 788 | enum queue_index index) |
@@ -660,18 +796,19 @@ struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue, | |||
660 | return NULL; | 796 | return NULL; |
661 | } | 797 | } |
662 | 798 | ||
663 | spin_lock_irqsave(&queue->lock, irqflags); | 799 | spin_lock_irqsave(&queue->index_lock, irqflags); |
664 | 800 | ||
665 | entry = &queue->entries[queue->index[index]]; | 801 | entry = &queue->entries[queue->index[index]]; |
666 | 802 | ||
667 | spin_unlock_irqrestore(&queue->lock, irqflags); | 803 | spin_unlock_irqrestore(&queue->index_lock, irqflags); |
668 | 804 | ||
669 | return entry; | 805 | return entry; |
670 | } | 806 | } |
671 | EXPORT_SYMBOL_GPL(rt2x00queue_get_entry); | 807 | EXPORT_SYMBOL_GPL(rt2x00queue_get_entry); |
672 | 808 | ||
673 | void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index) | 809 | void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index) |
674 | { | 810 | { |
811 | struct data_queue *queue = entry->queue; | ||
675 | unsigned long irqflags; | 812 | unsigned long irqflags; |
676 | 813 | ||
677 | if (unlikely(index >= Q_INDEX_MAX)) { | 814 | if (unlikely(index >= Q_INDEX_MAX)) { |
@@ -680,45 +817,233 @@ void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index) | |||
680 | return; | 817 | return; |
681 | } | 818 | } |
682 | 819 | ||
683 | spin_lock_irqsave(&queue->lock, irqflags); | 820 | spin_lock_irqsave(&queue->index_lock, irqflags); |
684 | 821 | ||
685 | queue->index[index]++; | 822 | queue->index[index]++; |
686 | if (queue->index[index] >= queue->limit) | 823 | if (queue->index[index] >= queue->limit) |
687 | queue->index[index] = 0; | 824 | queue->index[index] = 0; |
688 | 825 | ||
826 | entry->last_action = jiffies; | ||
827 | |||
689 | if (index == Q_INDEX) { | 828 | if (index == Q_INDEX) { |
690 | queue->length++; | 829 | queue->length++; |
691 | queue->last_index = jiffies; | ||
692 | } else if (index == Q_INDEX_DONE) { | 830 | } else if (index == Q_INDEX_DONE) { |
693 | queue->length--; | 831 | queue->length--; |
694 | queue->count++; | 832 | queue->count++; |
695 | queue->last_index_done = jiffies; | ||
696 | } | 833 | } |
697 | 834 | ||
698 | spin_unlock_irqrestore(&queue->lock, irqflags); | 835 | spin_unlock_irqrestore(&queue->index_lock, irqflags); |
699 | } | 836 | } |
700 | 837 | ||
701 | static void rt2x00queue_reset(struct data_queue *queue) | 838 | void rt2x00queue_pause_queue(struct data_queue *queue) |
702 | { | 839 | { |
703 | unsigned long irqflags; | 840 | if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || |
841 | !test_bit(QUEUE_STARTED, &queue->flags) || | ||
842 | test_and_set_bit(QUEUE_PAUSED, &queue->flags)) | ||
843 | return; | ||
704 | 844 | ||
705 | spin_lock_irqsave(&queue->lock, irqflags); | 845 | switch (queue->qid) { |
846 | case QID_AC_VO: | ||
847 | case QID_AC_VI: | ||
848 | case QID_AC_BE: | ||
849 | case QID_AC_BK: | ||
850 | /* | ||
851 | * For TX queues, we have to disable the queue | ||
852 | * inside mac80211. | ||
853 | */ | ||
854 | ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid); | ||
855 | break; | ||
856 | default: | ||
857 | break; | ||
858 | } | ||
859 | } | ||
860 | EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue); | ||
706 | 861 | ||
707 | queue->count = 0; | 862 | void rt2x00queue_unpause_queue(struct data_queue *queue) |
708 | queue->length = 0; | 863 | { |
709 | queue->last_index = jiffies; | 864 | if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || |
710 | queue->last_index_done = jiffies; | 865 | !test_bit(QUEUE_STARTED, &queue->flags) || |
711 | memset(queue->index, 0, sizeof(queue->index)); | 866 | !test_and_clear_bit(QUEUE_PAUSED, &queue->flags)) |
867 | return; | ||
712 | 868 | ||
713 | spin_unlock_irqrestore(&queue->lock, irqflags); | 869 | switch (queue->qid) { |
870 | case QID_AC_VO: | ||
871 | case QID_AC_VI: | ||
872 | case QID_AC_BE: | ||
873 | case QID_AC_BK: | ||
874 | /* | ||
875 | * For TX queues, we have to enable the queue | ||
876 | * inside mac80211. | ||
877 | */ | ||
878 | ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid); | ||
879 | break; | ||
880 | case QID_RX: | ||
881 | /* | ||
882 | * For RX we need to kick the queue now in order to | ||
883 | * receive frames. | ||
884 | */ | ||
885 | queue->rt2x00dev->ops->lib->kick_queue(queue); | ||
886 | default: | ||
887 | break; | ||
888 | } | ||
889 | } | ||
890 | EXPORT_SYMBOL_GPL(rt2x00queue_unpause_queue); | ||
891 | |||
892 | void rt2x00queue_start_queue(struct data_queue *queue) | ||
893 | { | ||
894 | mutex_lock(&queue->status_lock); | ||
895 | |||
896 | if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || | ||
897 | test_and_set_bit(QUEUE_STARTED, &queue->flags)) { | ||
898 | mutex_unlock(&queue->status_lock); | ||
899 | return; | ||
900 | } | ||
901 | |||
902 | set_bit(QUEUE_PAUSED, &queue->flags); | ||
903 | |||
904 | queue->rt2x00dev->ops->lib->start_queue(queue); | ||
905 | |||
906 | rt2x00queue_unpause_queue(queue); | ||
907 | |||
908 | mutex_unlock(&queue->status_lock); | ||
714 | } | 909 | } |
910 | EXPORT_SYMBOL_GPL(rt2x00queue_start_queue); | ||
911 | |||
912 | void rt2x00queue_stop_queue(struct data_queue *queue) | ||
913 | { | ||
914 | mutex_lock(&queue->status_lock); | ||
915 | |||
916 | if (!test_and_clear_bit(QUEUE_STARTED, &queue->flags)) { | ||
917 | mutex_unlock(&queue->status_lock); | ||
918 | return; | ||
919 | } | ||
920 | |||
921 | rt2x00queue_pause_queue(queue); | ||
922 | |||
923 | queue->rt2x00dev->ops->lib->stop_queue(queue); | ||
924 | |||
925 | mutex_unlock(&queue->status_lock); | ||
926 | } | ||
927 | EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue); | ||
928 | |||
929 | void rt2x00queue_flush_queue(struct data_queue *queue, bool drop) | ||
930 | { | ||
931 | bool started; | ||
932 | bool tx_queue = | ||
933 | (queue->qid == QID_AC_VO) || | ||
934 | (queue->qid == QID_AC_VI) || | ||
935 | (queue->qid == QID_AC_BE) || | ||
936 | (queue->qid == QID_AC_BK); | ||
937 | |||
938 | mutex_lock(&queue->status_lock); | ||
939 | |||
940 | /* | ||
941 | * If the queue has been started, we must stop it temporarily | ||
942 | * to prevent any new frames to be queued on the device. If | ||
943 | * we are not dropping the pending frames, the queue must | ||
944 | * only be stopped in the software and not the hardware, | ||
945 | * otherwise the queue will never become empty on its own. | ||
946 | */ | ||
947 | started = test_bit(QUEUE_STARTED, &queue->flags); | ||
948 | if (started) { | ||
949 | /* | ||
950 | * Pause the queue | ||
951 | */ | ||
952 | rt2x00queue_pause_queue(queue); | ||
953 | |||
954 | /* | ||
955 | * If we are not supposed to drop any pending | ||
956 | * frames, this means we must force a start (=kick) | ||
957 | * to the queue to make sure the hardware will | ||
958 | * start transmitting. | ||
959 | */ | ||
960 | if (!drop && tx_queue) | ||
961 | queue->rt2x00dev->ops->lib->kick_queue(queue); | ||
962 | } | ||
963 | |||
964 | /* | ||
965 | * Check if driver supports flushing, if that is the case we can | ||
966 | * defer the flushing to the driver. Otherwise we must use the | ||
967 | * alternative which just waits for the queue to become empty. | ||
968 | */ | ||
969 | if (likely(queue->rt2x00dev->ops->lib->flush_queue)) | ||
970 | queue->rt2x00dev->ops->lib->flush_queue(queue, drop); | ||
971 | |||
972 | /* | ||
973 | * The queue flush has failed... | ||
974 | */ | ||
975 | if (unlikely(!rt2x00queue_empty(queue))) | ||
976 | WARNING(queue->rt2x00dev, "Queue %d failed to flush\n", queue->qid); | ||
977 | |||
978 | /* | ||
979 | * Restore the queue to the previous status | ||
980 | */ | ||
981 | if (started) | ||
982 | rt2x00queue_unpause_queue(queue); | ||
983 | |||
984 | mutex_unlock(&queue->status_lock); | ||
985 | } | ||
986 | EXPORT_SYMBOL_GPL(rt2x00queue_flush_queue); | ||
987 | |||
988 | void rt2x00queue_start_queues(struct rt2x00_dev *rt2x00dev) | ||
989 | { | ||
990 | struct data_queue *queue; | ||
991 | |||
992 | /* | ||
993 | * rt2x00queue_start_queue will call ieee80211_wake_queue | ||
994 | * for each queue after is has been properly initialized. | ||
995 | */ | ||
996 | tx_queue_for_each(rt2x00dev, queue) | ||
997 | rt2x00queue_start_queue(queue); | ||
998 | |||
999 | rt2x00queue_start_queue(rt2x00dev->rx); | ||
1000 | } | ||
1001 | EXPORT_SYMBOL_GPL(rt2x00queue_start_queues); | ||
715 | 1002 | ||
716 | void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev) | 1003 | void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev) |
717 | { | 1004 | { |
718 | struct data_queue *queue; | 1005 | struct data_queue *queue; |
719 | 1006 | ||
720 | txall_queue_for_each(rt2x00dev, queue) | 1007 | /* |
721 | rt2x00dev->ops->lib->kill_tx_queue(rt2x00dev, queue->qid); | 1008 | * rt2x00queue_stop_queue will call ieee80211_stop_queue |
1009 | * as well, but we are completely shutting doing everything | ||
1010 | * now, so it is much safer to stop all TX queues at once, | ||
1011 | * and use rt2x00queue_stop_queue for cleaning up. | ||
1012 | */ | ||
1013 | ieee80211_stop_queues(rt2x00dev->hw); | ||
1014 | |||
1015 | tx_queue_for_each(rt2x00dev, queue) | ||
1016 | rt2x00queue_stop_queue(queue); | ||
1017 | |||
1018 | rt2x00queue_stop_queue(rt2x00dev->rx); | ||
1019 | } | ||
1020 | EXPORT_SYMBOL_GPL(rt2x00queue_stop_queues); | ||
1021 | |||
1022 | void rt2x00queue_flush_queues(struct rt2x00_dev *rt2x00dev, bool drop) | ||
1023 | { | ||
1024 | struct data_queue *queue; | ||
1025 | |||
1026 | tx_queue_for_each(rt2x00dev, queue) | ||
1027 | rt2x00queue_flush_queue(queue, drop); | ||
1028 | |||
1029 | rt2x00queue_flush_queue(rt2x00dev->rx, drop); | ||
1030 | } | ||
1031 | EXPORT_SYMBOL_GPL(rt2x00queue_flush_queues); | ||
1032 | |||
1033 | static void rt2x00queue_reset(struct data_queue *queue) | ||
1034 | { | ||
1035 | unsigned long irqflags; | ||
1036 | unsigned int i; | ||
1037 | |||
1038 | spin_lock_irqsave(&queue->index_lock, irqflags); | ||
1039 | |||
1040 | queue->count = 0; | ||
1041 | queue->length = 0; | ||
1042 | |||
1043 | for (i = 0; i < Q_INDEX_MAX; i++) | ||
1044 | queue->index[i] = 0; | ||
1045 | |||
1046 | spin_unlock_irqrestore(&queue->index_lock, irqflags); | ||
722 | } | 1047 | } |
723 | 1048 | ||
724 | void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev) | 1049 | void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev) |
@@ -729,11 +1054,8 @@ void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev) | |||
729 | queue_for_each(rt2x00dev, queue) { | 1054 | queue_for_each(rt2x00dev, queue) { |
730 | rt2x00queue_reset(queue); | 1055 | rt2x00queue_reset(queue); |
731 | 1056 | ||
732 | for (i = 0; i < queue->limit; i++) { | 1057 | for (i = 0; i < queue->limit; i++) |
733 | queue->entries[i].flags = 0; | ||
734 | |||
735 | rt2x00dev->ops->lib->clear_entry(&queue->entries[i]); | 1058 | rt2x00dev->ops->lib->clear_entry(&queue->entries[i]); |
736 | } | ||
737 | } | 1059 | } |
738 | } | 1060 | } |
739 | 1061 | ||
@@ -755,13 +1077,13 @@ static int rt2x00queue_alloc_entries(struct data_queue *queue, | |||
755 | * Allocate all queue entries. | 1077 | * Allocate all queue entries. |
756 | */ | 1078 | */ |
757 | entry_size = sizeof(*entries) + qdesc->priv_size; | 1079 | entry_size = sizeof(*entries) + qdesc->priv_size; |
758 | entries = kzalloc(queue->limit * entry_size, GFP_KERNEL); | 1080 | entries = kcalloc(queue->limit, entry_size, GFP_KERNEL); |
759 | if (!entries) | 1081 | if (!entries) |
760 | return -ENOMEM; | 1082 | return -ENOMEM; |
761 | 1083 | ||
762 | #define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \ | 1084 | #define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \ |
763 | ( ((char *)(__base)) + ((__limit) * (__esize)) + \ | 1085 | (((char *)(__base)) + ((__limit) * (__esize)) + \ |
764 | ((__index) * (__psize)) ) | 1086 | ((__index) * (__psize))) |
765 | 1087 | ||
766 | for (i = 0; i < queue->limit; i++) { | 1088 | for (i = 0; i < queue->limit; i++) { |
767 | entries[i].flags = 0; | 1089 | entries[i].flags = 0; |
@@ -780,8 +1102,7 @@ static int rt2x00queue_alloc_entries(struct data_queue *queue, | |||
780 | return 0; | 1102 | return 0; |
781 | } | 1103 | } |
782 | 1104 | ||
783 | static void rt2x00queue_free_skbs(struct rt2x00_dev *rt2x00dev, | 1105 | static void rt2x00queue_free_skbs(struct data_queue *queue) |
784 | struct data_queue *queue) | ||
785 | { | 1106 | { |
786 | unsigned int i; | 1107 | unsigned int i; |
787 | 1108 | ||
@@ -789,19 +1110,17 @@ static void rt2x00queue_free_skbs(struct rt2x00_dev *rt2x00dev, | |||
789 | return; | 1110 | return; |
790 | 1111 | ||
791 | for (i = 0; i < queue->limit; i++) { | 1112 | for (i = 0; i < queue->limit; i++) { |
792 | if (queue->entries[i].skb) | 1113 | rt2x00queue_free_skb(&queue->entries[i]); |
793 | rt2x00queue_free_skb(rt2x00dev, queue->entries[i].skb); | ||
794 | } | 1114 | } |
795 | } | 1115 | } |
796 | 1116 | ||
797 | static int rt2x00queue_alloc_rxskbs(struct rt2x00_dev *rt2x00dev, | 1117 | static int rt2x00queue_alloc_rxskbs(struct data_queue *queue) |
798 | struct data_queue *queue) | ||
799 | { | 1118 | { |
800 | unsigned int i; | 1119 | unsigned int i; |
801 | struct sk_buff *skb; | 1120 | struct sk_buff *skb; |
802 | 1121 | ||
803 | for (i = 0; i < queue->limit; i++) { | 1122 | for (i = 0; i < queue->limit; i++) { |
804 | skb = rt2x00queue_alloc_rxskb(rt2x00dev, &queue->entries[i]); | 1123 | skb = rt2x00queue_alloc_rxskb(&queue->entries[i]); |
805 | if (!skb) | 1124 | if (!skb) |
806 | return -ENOMEM; | 1125 | return -ENOMEM; |
807 | queue->entries[i].skb = skb; | 1126 | queue->entries[i].skb = skb; |
@@ -829,14 +1148,14 @@ int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev) | |||
829 | if (status) | 1148 | if (status) |
830 | goto exit; | 1149 | goto exit; |
831 | 1150 | ||
832 | if (test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags)) { | 1151 | if (test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags)) { |
833 | status = rt2x00queue_alloc_entries(&rt2x00dev->bcn[1], | 1152 | status = rt2x00queue_alloc_entries(rt2x00dev->atim, |
834 | rt2x00dev->ops->atim); | 1153 | rt2x00dev->ops->atim); |
835 | if (status) | 1154 | if (status) |
836 | goto exit; | 1155 | goto exit; |
837 | } | 1156 | } |
838 | 1157 | ||
839 | status = rt2x00queue_alloc_rxskbs(rt2x00dev, rt2x00dev->rx); | 1158 | status = rt2x00queue_alloc_rxskbs(rt2x00dev->rx); |
840 | if (status) | 1159 | if (status) |
841 | goto exit; | 1160 | goto exit; |
842 | 1161 | ||
@@ -854,7 +1173,7 @@ void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev) | |||
854 | { | 1173 | { |
855 | struct data_queue *queue; | 1174 | struct data_queue *queue; |
856 | 1175 | ||
857 | rt2x00queue_free_skbs(rt2x00dev, rt2x00dev->rx); | 1176 | rt2x00queue_free_skbs(rt2x00dev->rx); |
858 | 1177 | ||
859 | queue_for_each(rt2x00dev, queue) { | 1178 | queue_for_each(rt2x00dev, queue) { |
860 | kfree(queue->entries); | 1179 | kfree(queue->entries); |
@@ -865,7 +1184,8 @@ void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev) | |||
865 | static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev, | 1184 | static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev, |
866 | struct data_queue *queue, enum data_queue_qid qid) | 1185 | struct data_queue *queue, enum data_queue_qid qid) |
867 | { | 1186 | { |
868 | spin_lock_init(&queue->lock); | 1187 | mutex_init(&queue->status_lock); |
1188 | spin_lock_init(&queue->index_lock); | ||
869 | 1189 | ||
870 | queue->rt2x00dev = rt2x00dev; | 1190 | queue->rt2x00dev = rt2x00dev; |
871 | queue->qid = qid; | 1191 | queue->qid = qid; |
@@ -880,7 +1200,7 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev) | |||
880 | struct data_queue *queue; | 1200 | struct data_queue *queue; |
881 | enum data_queue_qid qid; | 1201 | enum data_queue_qid qid; |
882 | unsigned int req_atim = | 1202 | unsigned int req_atim = |
883 | !!test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags); | 1203 | !!test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags); |
884 | 1204 | ||
885 | /* | 1205 | /* |
886 | * We need the following queues: | 1206 | * We need the following queues: |
@@ -891,7 +1211,7 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev) | |||
891 | */ | 1211 | */ |
892 | rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim; | 1212 | rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim; |
893 | 1213 | ||
894 | queue = kzalloc(rt2x00dev->data_queues * sizeof(*queue), GFP_KERNEL); | 1214 | queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL); |
895 | if (!queue) { | 1215 | if (!queue) { |
896 | ERROR(rt2x00dev, "Queue allocation failed.\n"); | 1216 | ERROR(rt2x00dev, "Queue allocation failed.\n"); |
897 | return -ENOMEM; | 1217 | return -ENOMEM; |
@@ -903,11 +1223,12 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev) | |||
903 | rt2x00dev->rx = queue; | 1223 | rt2x00dev->rx = queue; |
904 | rt2x00dev->tx = &queue[1]; | 1224 | rt2x00dev->tx = &queue[1]; |
905 | rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues]; | 1225 | rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues]; |
1226 | rt2x00dev->atim = req_atim ? &queue[2 + rt2x00dev->ops->tx_queues] : NULL; | ||
906 | 1227 | ||
907 | /* | 1228 | /* |
908 | * Initialize queue parameters. | 1229 | * Initialize queue parameters. |
909 | * RX: qid = QID_RX | 1230 | * RX: qid = QID_RX |
910 | * TX: qid = QID_AC_BE + index | 1231 | * TX: qid = QID_AC_VO + index |
911 | * TX: cw_min: 2^5 = 32. | 1232 | * TX: cw_min: 2^5 = 32. |
912 | * TX: cw_max: 2^10 = 1024. | 1233 | * TX: cw_max: 2^10 = 1024. |
913 | * BCN: qid = QID_BEACON | 1234 | * BCN: qid = QID_BEACON |
@@ -915,13 +1236,13 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev) | |||
915 | */ | 1236 | */ |
916 | rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX); | 1237 | rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX); |
917 | 1238 | ||
918 | qid = QID_AC_BE; | 1239 | qid = QID_AC_VO; |
919 | tx_queue_for_each(rt2x00dev, queue) | 1240 | tx_queue_for_each(rt2x00dev, queue) |
920 | rt2x00queue_init(rt2x00dev, queue, qid++); | 1241 | rt2x00queue_init(rt2x00dev, queue, qid++); |
921 | 1242 | ||
922 | rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[0], QID_BEACON); | 1243 | rt2x00queue_init(rt2x00dev, rt2x00dev->bcn, QID_BEACON); |
923 | if (req_atim) | 1244 | if (req_atim) |
924 | rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[1], QID_ATIM); | 1245 | rt2x00queue_init(rt2x00dev, rt2x00dev->atim, QID_ATIM); |
925 | 1246 | ||
926 | return 0; | 1247 | return 0; |
927 | } | 1248 | } |