aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/rt2x00/rt2x00queue.c
diff options
context:
space:
mode:
authorIvo van Doorn <IvDoorn@gmail.com>2011-04-18 09:27:06 -0400
committerJohn W. Linville <linville@tuxdriver.com>2011-04-19 15:39:11 -0400
commit7dab73b37f5e8885cb73efd25e73861f9b4f0246 (patch)
tree3c09412e1ec0b02eaf193879aed12db0f9874f7c /drivers/net/wireless/rt2x00/rt2x00queue.c
parent62fe778412b36791b7897cfa139342906fbbf07b (diff)
rt2x00: Split rt2x00dev->flags
The number of flags defined for the rt2x00dev->flags field, has been growing over the years. Currently we are approaching the maximum number of bits which are available in the field. A secondary problem, is that one part of the field are initialized only during boot, because the driver requirements are initialized or device requirements are loaded from the EEPROM. In both cases, the flags are fixed and will not change during device operation. The other flags are the device state, and will change frequently. So far this resulted in the fact that for some flags, the atomic bit accessors are used, while for the others the non-atomic variants are used. By splitting the flags up into a "flags" and "cap_flags" we can put all flags which are fixed inside "cap_flags". This field can then be read non-atomically. In the "flags" field we keep the device state, which is going to be read atomically. This adds more room for more flags in the future, and sanitizes the field access methods. Signed-off-by: Ivo van Doorn <IvDoorn@gmail.com> Acked-by: Helmut Schaa <helmut.schaa@googlemail.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/rt2x00/rt2x00queue.c')
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 9fc4a1ec4b43..d03eef28f036 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -60,7 +60,7 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry)
60 * at least 8 bytes bytes available in headroom for IV/EIV 60 * at least 8 bytes bytes available in headroom for IV/EIV
61 * and 8 bytes for ICV data as tailroon. 61 * and 8 bytes for ICV data as tailroon.
62 */ 62 */
63 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) { 63 if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags)) {
64 head_size += 8; 64 head_size += 8;
65 tail_size += 8; 65 tail_size += 8;
66 } 66 }
@@ -86,7 +86,7 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry)
86 memset(skbdesc, 0, sizeof(*skbdesc)); 86 memset(skbdesc, 0, sizeof(*skbdesc));
87 skbdesc->entry = entry; 87 skbdesc->entry = entry;
88 88
89 if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags)) { 89 if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags)) {
90 skbdesc->skb_dma = dma_map_single(rt2x00dev->dev, 90 skbdesc->skb_dma = dma_map_single(rt2x00dev->dev,
91 skb->data, 91 skb->data,
92 skb->len, 92 skb->len,
@@ -213,7 +213,7 @@ static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry,
213 213
214 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); 214 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
215 215
216 if (!test_bit(DRIVER_REQUIRE_SW_SEQNO, &entry->queue->rt2x00dev->flags)) 216 if (!test_bit(REQUIRE_SW_SEQNO, &entry->queue->rt2x00dev->cap_flags))
217 return; 217 return;
218 218
219 /* 219 /*
@@ -396,7 +396,7 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
396 rt2x00crypto_create_tx_descriptor(entry, txdesc); 396 rt2x00crypto_create_tx_descriptor(entry, txdesc);
397 rt2x00queue_create_tx_descriptor_seq(entry, txdesc); 397 rt2x00queue_create_tx_descriptor_seq(entry, txdesc);
398 398
399 if (test_bit(DRIVER_REQUIRE_HT_TX_DESC, &rt2x00dev->flags)) 399 if (test_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags))
400 rt2x00ht_create_tx_descriptor(entry, txdesc, hwrate); 400 rt2x00ht_create_tx_descriptor(entry, txdesc, hwrate);
401 else 401 else
402 rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate); 402 rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate);
@@ -436,7 +436,7 @@ static int rt2x00queue_write_tx_data(struct queue_entry *entry,
436 /* 436 /*
437 * Map the skb to DMA. 437 * Map the skb to DMA.
438 */ 438 */
439 if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags)) 439 if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags))
440 rt2x00queue_map_txskb(entry); 440 rt2x00queue_map_txskb(entry);
441 441
442 return 0; 442 return 0;
@@ -529,7 +529,7 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
529 */ 529 */
530 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) && 530 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
531 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) { 531 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
532 if (test_bit(DRIVER_REQUIRE_COPY_IV, &queue->rt2x00dev->flags)) 532 if (test_bit(REQUIRE_COPY_IV, &queue->rt2x00dev->cap_flags))
533 rt2x00crypto_tx_copy_iv(skb, &txdesc); 533 rt2x00crypto_tx_copy_iv(skb, &txdesc);
534 else 534 else
535 rt2x00crypto_tx_remove_iv(skb, &txdesc); 535 rt2x00crypto_tx_remove_iv(skb, &txdesc);
@@ -543,9 +543,9 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
543 * PCI and USB devices, while header alignment only is valid 543 * PCI and USB devices, while header alignment only is valid
544 * for PCI devices. 544 * for PCI devices.
545 */ 545 */
546 if (test_bit(DRIVER_REQUIRE_L2PAD, &queue->rt2x00dev->flags)) 546 if (test_bit(REQUIRE_L2PAD, &queue->rt2x00dev->cap_flags))
547 rt2x00queue_insert_l2pad(entry->skb, txdesc.header_length); 547 rt2x00queue_insert_l2pad(entry->skb, txdesc.header_length);
548 else if (test_bit(DRIVER_REQUIRE_DMA, &queue->rt2x00dev->flags)) 548 else if (test_bit(REQUIRE_DMA, &queue->rt2x00dev->cap_flags))
549 rt2x00queue_align_frame(entry->skb); 549 rt2x00queue_align_frame(entry->skb);
550 550
551 /* 551 /*
@@ -1069,7 +1069,7 @@ int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
1069 if (status) 1069 if (status)
1070 goto exit; 1070 goto exit;
1071 1071
1072 if (test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags)) { 1072 if (test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags)) {
1073 status = rt2x00queue_alloc_entries(rt2x00dev->atim, 1073 status = rt2x00queue_alloc_entries(rt2x00dev->atim,
1074 rt2x00dev->ops->atim); 1074 rt2x00dev->ops->atim);
1075 if (status) 1075 if (status)
@@ -1121,7 +1121,7 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
1121 struct data_queue *queue; 1121 struct data_queue *queue;
1122 enum data_queue_qid qid; 1122 enum data_queue_qid qid;
1123 unsigned int req_atim = 1123 unsigned int req_atim =
1124 !!test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags); 1124 !!test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags);
1125 1125
1126 /* 1126 /*
1127 * We need the following queues: 1127 * We need the following queues: