aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/rt2x00/rt2x00queue.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/rt2x00/rt2x00queue.c')
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c94
1 files changed, 31 insertions, 63 deletions
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 451d410ecdae..eaec6bd93ed5 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -55,14 +55,12 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct rt2x00_dev *rt2x00dev,
55 /* 55 /*
56 * For IV/EIV/ICV assembly we must make sure there is 56 * For IV/EIV/ICV assembly we must make sure there is
57 * at least 8 bytes bytes available in headroom for IV/EIV 57 * at least 8 bytes bytes available in headroom for IV/EIV
58 * and 4 bytes for ICV data as tailroon. 58 * and 8 bytes for ICV data as tailroon.
59 */ 59 */
60#ifdef CONFIG_RT2X00_LIB_CRYPTO
61 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) { 60 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
62 head_size += 8; 61 head_size += 8;
63 tail_size += 4; 62 tail_size += 8;
64 } 63 }
65#endif /* CONFIG_RT2X00_LIB_CRYPTO */
66 64
67 /* 65 /*
68 * Allocate skbuffer. 66 * Allocate skbuffer.
@@ -174,7 +172,7 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
174 txdesc->cw_max = entry->queue->cw_max; 172 txdesc->cw_max = entry->queue->cw_max;
175 txdesc->aifs = entry->queue->aifs; 173 txdesc->aifs = entry->queue->aifs;
176 174
177 /* Data length + CRC + IV/EIV/ICV/MMIC (when using encryption) */ 175 /* Data length + CRC */
178 data_length = entry->skb->len + 4; 176 data_length = entry->skb->len + 4;
179 177
180 /* 178 /*
@@ -183,34 +181,17 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
183 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK)) 181 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK))
184 __set_bit(ENTRY_TXD_ACK, &txdesc->flags); 182 __set_bit(ENTRY_TXD_ACK, &txdesc->flags);
185 183
186#ifdef CONFIG_RT2X00_LIB_CRYPTO
187 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags) && 184 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags) &&
188 !entry->skb->do_not_encrypt) { 185 !entry->skb->do_not_encrypt) {
189 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key; 186 /* Apply crypto specific descriptor information */
190 187 rt2x00crypto_create_tx_descriptor(entry, txdesc);
191 __set_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags);
192
193 txdesc->cipher = rt2x00crypto_key_to_cipher(hw_key);
194
195 if (hw_key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
196 __set_bit(ENTRY_TXD_ENCRYPT_PAIRWISE, &txdesc->flags);
197
198 txdesc->key_idx = hw_key->hw_key_idx;
199 txdesc->iv_offset = ieee80211_get_hdrlen_from_skb(entry->skb);
200 188
201 /* 189 /*
202 * Extend frame length to include all encryption overhead 190 * Extend frame length to include all encryption overhead
203 * that will be added by the hardware. 191 * that will be added by the hardware.
204 */ 192 */
205 data_length += rt2x00crypto_tx_overhead(tx_info); 193 data_length += rt2x00crypto_tx_overhead(tx_info);
206
207 if (!(hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV))
208 __set_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags);
209
210 if (!(hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_MMIC))
211 __set_bit(ENTRY_TXD_ENCRYPT_MMIC, &txdesc->flags);
212 } 194 }
213#endif /* CONFIG_RT2X00_LIB_CRYPTO */
214 195
215 /* 196 /*
216 * Check if this is a RTS/CTS frame 197 * Check if this is a RTS/CTS frame
@@ -230,8 +211,8 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
230 /* 211 /*
231 * Determine retry information. 212 * Determine retry information.
232 */ 213 */
233 txdesc->retry_limit = tx_info->control.retry_limit; 214 txdesc->retry_limit = tx_info->control.rates[0].count - 1;
234 if (tx_info->flags & IEEE80211_TX_CTL_LONG_RETRY_LIMIT) 215 if (txdesc->retry_limit >= rt2x00dev->long_retry)
235 __set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags); 216 __set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags);
236 217
237 /* 218 /*
@@ -312,8 +293,8 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
312 /* 293 /*
313 * Convert length to microseconds. 294 * Convert length to microseconds.
314 */ 295 */
315 residual = get_duration_res(data_length, hwrate->bitrate); 296 residual = GET_DURATION_RES(data_length, hwrate->bitrate);
316 duration = get_duration(data_length, hwrate->bitrate); 297 duration = GET_DURATION(data_length, hwrate->bitrate);
317 298
318 if (residual != 0) { 299 if (residual != 0) {
319 duration++; 300 duration++;
@@ -371,13 +352,15 @@ static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
371 352
372int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb) 353int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb)
373{ 354{
355 struct ieee80211_tx_info *tx_info;
374 struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX); 356 struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
375 struct txentry_desc txdesc; 357 struct txentry_desc txdesc;
376 struct skb_frame_desc *skbdesc; 358 struct skb_frame_desc *skbdesc;
377 unsigned int iv_len = 0; 359 unsigned int iv_len = 0;
360 u8 rate_idx, rate_flags;
378 361
379 if (unlikely(rt2x00queue_full(queue))) 362 if (unlikely(rt2x00queue_full(queue)))
380 return -EINVAL; 363 return -ENOBUFS;
381 364
382 if (test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) { 365 if (test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) {
383 ERROR(queue->rt2x00dev, 366 ERROR(queue->rt2x00dev,
@@ -399,13 +382,18 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb)
399 iv_len = IEEE80211_SKB_CB(skb)->control.hw_key->iv_len; 382 iv_len = IEEE80211_SKB_CB(skb)->control.hw_key->iv_len;
400 383
401 /* 384 /*
402 * All information is retreived from the skb->cb array, 385 * All information is retrieved from the skb->cb array,
403 * now we should claim ownership of the driver part of that 386 * now we should claim ownership of the driver part of that
404 * array. 387 * array, preserving the bitrate index and flags.
405 */ 388 */
406 skbdesc = get_skb_frame_desc(entry->skb); 389 tx_info = IEEE80211_SKB_CB(skb);
390 rate_idx = tx_info->control.rates[0].idx;
391 rate_flags = tx_info->control.rates[0].flags;
392 skbdesc = get_skb_frame_desc(skb);
407 memset(skbdesc, 0, sizeof(*skbdesc)); 393 memset(skbdesc, 0, sizeof(*skbdesc));
408 skbdesc->entry = entry; 394 skbdesc->entry = entry;
395 skbdesc->tx_rate_idx = rate_idx;
396 skbdesc->tx_rate_flags = rate_flags;
409 397
410 /* 398 /*
411 * When hardware encryption is supported, and this frame 399 * When hardware encryption is supported, and this frame
@@ -414,19 +402,21 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb)
414 */ 402 */
415 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) && 403 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
416 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) { 404 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
417 rt2x00crypto_tx_remove_iv(skb, iv_len); 405 if (test_bit(CONFIG_CRYPTO_COPY_IV, &queue->rt2x00dev->flags))
406 rt2x00crypto_tx_copy_iv(skb, iv_len);
407 else
408 rt2x00crypto_tx_remove_iv(skb, iv_len);
418 } 409 }
419 410
420 /* 411 /*
421 * It could be possible that the queue was corrupted and this 412 * It could be possible that the queue was corrupted and this
422 * call failed. Just drop the frame, we cannot rollback and pass 413 * call failed. Since we always return NETDEV_TX_OK to mac80211,
423 * the frame to mac80211 because the skb->cb has now been tainted. 414 * this frame will simply be dropped.
424 */ 415 */
425 if (unlikely(queue->rt2x00dev->ops->lib->write_tx_data(entry))) { 416 if (unlikely(queue->rt2x00dev->ops->lib->write_tx_data(entry))) {
426 clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); 417 clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
427 dev_kfree_skb_any(entry->skb);
428 entry->skb = NULL; 418 entry->skb = NULL;
429 return 0; 419 return -EIO;
430 } 420 }
431 421
432 if (test_bit(DRIVER_REQUIRE_DMA, &queue->rt2x00dev->flags)) 422 if (test_bit(DRIVER_REQUIRE_DMA, &queue->rt2x00dev->flags))
@@ -556,7 +546,7 @@ void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
556 queue->length++; 546 queue->length++;
557 } else if (index == Q_INDEX_DONE) { 547 } else if (index == Q_INDEX_DONE) {
558 queue->length--; 548 queue->length--;
559 queue->count ++; 549 queue->count++;
560 } 550 }
561 551
562 spin_unlock_irqrestore(&queue->lock, irqflags); 552 spin_unlock_irqrestore(&queue->lock, irqflags);
@@ -575,40 +565,18 @@ static void rt2x00queue_reset(struct data_queue *queue)
575 spin_unlock_irqrestore(&queue->lock, irqflags); 565 spin_unlock_irqrestore(&queue->lock, irqflags);
576} 566}
577 567
578void rt2x00queue_init_rx(struct rt2x00_dev *rt2x00dev) 568void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
579{
580 struct data_queue *queue = rt2x00dev->rx;
581 unsigned int i;
582
583 rt2x00queue_reset(queue);
584
585 if (!rt2x00dev->ops->lib->init_rxentry)
586 return;
587
588 for (i = 0; i < queue->limit; i++) {
589 queue->entries[i].flags = 0;
590
591 rt2x00dev->ops->lib->init_rxentry(rt2x00dev,
592 &queue->entries[i]);
593 }
594}
595
596void rt2x00queue_init_tx(struct rt2x00_dev *rt2x00dev)
597{ 569{
598 struct data_queue *queue; 570 struct data_queue *queue;
599 unsigned int i; 571 unsigned int i;
600 572
601 txall_queue_for_each(rt2x00dev, queue) { 573 queue_for_each(rt2x00dev, queue) {
602 rt2x00queue_reset(queue); 574 rt2x00queue_reset(queue);
603 575
604 if (!rt2x00dev->ops->lib->init_txentry)
605 continue;
606
607 for (i = 0; i < queue->limit; i++) { 576 for (i = 0; i < queue->limit; i++) {
608 queue->entries[i].flags = 0; 577 queue->entries[i].flags = 0;
609 578
610 rt2x00dev->ops->lib->init_txentry(rt2x00dev, 579 rt2x00dev->ops->lib->clear_entry(&queue->entries[i]);
611 &queue->entries[i]);
612 } 580 }
613 } 581 }
614} 582}