diff options
Diffstat (limited to 'drivers/net/wireless/rt2x00/rt2x00queue.c')
-rw-r--r-- | drivers/net/wireless/rt2x00/rt2x00queue.c | 18 |
1 files changed, 9 insertions, 9 deletions
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c index 66ff36447b94..68b620b2462f 100644 --- a/drivers/net/wireless/rt2x00/rt2x00queue.c +++ b/drivers/net/wireless/rt2x00/rt2x00queue.c | |||
@@ -85,7 +85,7 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp) | |||
85 | memset(skbdesc, 0, sizeof(*skbdesc)); | 85 | memset(skbdesc, 0, sizeof(*skbdesc)); |
86 | skbdesc->entry = entry; | 86 | skbdesc->entry = entry; |
87 | 87 | ||
88 | if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags)) { | 88 | if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DMA)) { |
89 | dma_addr_t skb_dma; | 89 | dma_addr_t skb_dma; |
90 | 90 | ||
91 | skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len, | 91 | skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len, |
@@ -198,7 +198,7 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev, | |||
198 | 198 | ||
199 | __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); | 199 | __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); |
200 | 200 | ||
201 | if (!test_bit(REQUIRE_SW_SEQNO, &rt2x00dev->cap_flags)) { | 201 | if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_SW_SEQNO)) { |
202 | /* | 202 | /* |
203 | * rt2800 has a H/W (or F/W) bug, device incorrectly increase | 203 | * rt2800 has a H/W (or F/W) bug, device incorrectly increase |
204 | * seqno on retransmited data (non-QOS) frames. To workaround | 204 | * seqno on retransmited data (non-QOS) frames. To workaround |
@@ -484,7 +484,7 @@ static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev, | |||
484 | rt2x00crypto_create_tx_descriptor(rt2x00dev, skb, txdesc); | 484 | rt2x00crypto_create_tx_descriptor(rt2x00dev, skb, txdesc); |
485 | rt2x00queue_create_tx_descriptor_seq(rt2x00dev, skb, txdesc); | 485 | rt2x00queue_create_tx_descriptor_seq(rt2x00dev, skb, txdesc); |
486 | 486 | ||
487 | if (test_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags)) | 487 | if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_HT_TX_DESC)) |
488 | rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc, | 488 | rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc, |
489 | sta, hwrate); | 489 | sta, hwrate); |
490 | else | 490 | else |
@@ -526,7 +526,7 @@ static int rt2x00queue_write_tx_data(struct queue_entry *entry, | |||
526 | /* | 526 | /* |
527 | * Map the skb to DMA. | 527 | * Map the skb to DMA. |
528 | */ | 528 | */ |
529 | if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags) && | 529 | if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DMA) && |
530 | rt2x00queue_map_txskb(entry)) | 530 | rt2x00queue_map_txskb(entry)) |
531 | return -ENOMEM; | 531 | return -ENOMEM; |
532 | 532 | ||
@@ -646,7 +646,7 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, | |||
646 | */ | 646 | */ |
647 | if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) && | 647 | if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) && |
648 | !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) { | 648 | !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) { |
649 | if (test_bit(REQUIRE_COPY_IV, &queue->rt2x00dev->cap_flags)) | 649 | if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_COPY_IV)) |
650 | rt2x00crypto_tx_copy_iv(skb, &txdesc); | 650 | rt2x00crypto_tx_copy_iv(skb, &txdesc); |
651 | else | 651 | else |
652 | rt2x00crypto_tx_remove_iv(skb, &txdesc); | 652 | rt2x00crypto_tx_remove_iv(skb, &txdesc); |
@@ -660,9 +660,9 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, | |||
660 | * PCI and USB devices, while header alignment only is valid | 660 | * PCI and USB devices, while header alignment only is valid |
661 | * for PCI devices. | 661 | * for PCI devices. |
662 | */ | 662 | */ |
663 | if (test_bit(REQUIRE_L2PAD, &queue->rt2x00dev->cap_flags)) | 663 | if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_L2PAD)) |
664 | rt2x00queue_insert_l2pad(skb, txdesc.header_length); | 664 | rt2x00queue_insert_l2pad(skb, txdesc.header_length); |
665 | else if (test_bit(REQUIRE_DMA, &queue->rt2x00dev->cap_flags)) | 665 | else if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_DMA)) |
666 | rt2x00queue_align_frame(skb); | 666 | rt2x00queue_align_frame(skb); |
667 | 667 | ||
668 | /* | 668 | /* |
@@ -1178,7 +1178,7 @@ int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev) | |||
1178 | if (status) | 1178 | if (status) |
1179 | goto exit; | 1179 | goto exit; |
1180 | 1180 | ||
1181 | if (test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags)) { | 1181 | if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_ATIM_QUEUE)) { |
1182 | status = rt2x00queue_alloc_entries(rt2x00dev->atim); | 1182 | status = rt2x00queue_alloc_entries(rt2x00dev->atim); |
1183 | if (status) | 1183 | if (status) |
1184 | goto exit; | 1184 | goto exit; |
@@ -1234,7 +1234,7 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev) | |||
1234 | struct data_queue *queue; | 1234 | struct data_queue *queue; |
1235 | enum data_queue_qid qid; | 1235 | enum data_queue_qid qid; |
1236 | unsigned int req_atim = | 1236 | unsigned int req_atim = |
1237 | !!test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags); | 1237 | rt2x00_has_cap_flag(rt2x00dev, REQUIRE_ATIM_QUEUE); |
1238 | 1238 | ||
1239 | /* | 1239 | /* |
1240 | * We need the following queues: | 1240 | * We need the following queues: |