aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/rt2x00/rt2x00pci.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/rt2x00/rt2x00pci.c')
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c126
1 files changed, 19 insertions, 107 deletions
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index 8d6ad18d3890..adf2876ed8ab 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -60,12 +60,8 @@ int rt2x00pci_write_tx_data(struct queue_entry *entry)
60 * Fill in skb descriptor 60 * Fill in skb descriptor
61 */ 61 */
62 skbdesc = get_skb_frame_desc(entry->skb); 62 skbdesc = get_skb_frame_desc(entry->skb);
63 memset(skbdesc, 0, sizeof(*skbdesc));
64 skbdesc->desc = entry_priv->desc; 63 skbdesc->desc = entry_priv->desc;
65 skbdesc->desc_len = entry->queue->desc_size; 64 skbdesc->desc_len = entry->queue->desc_size;
66 skbdesc->entry = entry;
67
68 memcpy(entry_priv->data, entry->skb->data, entry->skb->len);
69 65
70 return 0; 66 return 0;
71} 67}
@@ -80,7 +76,6 @@ void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
80 struct queue_entry *entry; 76 struct queue_entry *entry;
81 struct queue_entry_priv_pci *entry_priv; 77 struct queue_entry_priv_pci *entry_priv;
82 struct skb_frame_desc *skbdesc; 78 struct skb_frame_desc *skbdesc;
83 struct rxdone_entry_desc rxdesc;
84 u32 word; 79 u32 word;
85 80
86 while (1) { 81 while (1) {
@@ -91,110 +86,27 @@ void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
91 if (rt2x00_get_field32(word, RXD_ENTRY_OWNER_NIC)) 86 if (rt2x00_get_field32(word, RXD_ENTRY_OWNER_NIC))
92 break; 87 break;
93 88
94 memset(&rxdesc, 0, sizeof(rxdesc));
95 rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc);
96
97 /* 89 /*
98 * Allocate the sk_buffer and copy all data into it. 90 * Fill in desc fields of the skb descriptor
99 */
100 entry->skb = rt2x00queue_alloc_rxskb(queue);
101 if (!entry->skb)
102 return;
103
104 memcpy(entry->skb->data, entry_priv->data, rxdesc.size);
105 skb_trim(entry->skb, rxdesc.size);
106
107 /*
108 * Fill in skb descriptor
109 */ 91 */
110 skbdesc = get_skb_frame_desc(entry->skb); 92 skbdesc = get_skb_frame_desc(entry->skb);
111 memset(skbdesc, 0, sizeof(*skbdesc));
112 skbdesc->desc = entry_priv->desc; 93 skbdesc->desc = entry_priv->desc;
113 skbdesc->desc_len = queue->desc_size; 94 skbdesc->desc_len = entry->queue->desc_size;
114 skbdesc->entry = entry;
115 95
116 /* 96 /*
117 * Send the frame to rt2x00lib for further processing. 97 * Send the frame to rt2x00lib for further processing.
118 */ 98 */
119 rt2x00lib_rxdone(entry, &rxdesc); 99 rt2x00lib_rxdone(rt2x00dev, entry);
120
121 if (test_bit(DEVICE_ENABLED_RADIO, &queue->rt2x00dev->flags)) {
122 rt2x00_set_field32(&word, RXD_ENTRY_OWNER_NIC, 1);
123 rt2x00_desc_write(entry_priv->desc, 0, word);
124 }
125
126 rt2x00queue_index_inc(queue, Q_INDEX);
127 } 100 }
128} 101}
129EXPORT_SYMBOL_GPL(rt2x00pci_rxdone); 102EXPORT_SYMBOL_GPL(rt2x00pci_rxdone);
130 103
131void rt2x00pci_txdone(struct rt2x00_dev *rt2x00dev, struct queue_entry *entry,
132 struct txdone_entry_desc *txdesc)
133{
134 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
135 enum data_queue_qid qid = skb_get_queue_mapping(entry->skb);
136 u32 word;
137
138 rt2x00lib_txdone(entry, txdesc);
139
140 /*
141 * Make this entry available for reuse.
142 */
143 entry->flags = 0;
144
145 rt2x00_desc_read(entry_priv->desc, 0, &word);
146 rt2x00_set_field32(&word, TXD_ENTRY_OWNER_NIC, 0);
147 rt2x00_set_field32(&word, TXD_ENTRY_VALID, 0);
148 rt2x00_desc_write(entry_priv->desc, 0, word);
149
150 __clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
151 rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE);
152
153 /*
154 * If the data queue was below the threshold before the txdone
155 * handler we must make sure the packet queue in the mac80211 stack
156 * is reenabled when the txdone handler has finished.
157 */
158 if (!rt2x00queue_threshold(entry->queue))
159 ieee80211_wake_queue(rt2x00dev->hw, qid);
160
161}
162EXPORT_SYMBOL_GPL(rt2x00pci_txdone);
163
164/* 104/*
165 * Device initialization handlers. 105 * Device initialization handlers.
166 */ 106 */
167#define desc_size(__queue) \
168({ \
169 ((__queue)->limit * (__queue)->desc_size);\
170})
171
172#define data_size(__queue) \
173({ \
174 ((__queue)->limit * (__queue)->data_size);\
175})
176
177#define dma_size(__queue) \
178({ \
179 data_size(__queue) + desc_size(__queue);\
180})
181
182#define desc_offset(__queue, __base, __i) \
183({ \
184 (__base) + data_size(__queue) + \
185 ((__i) * (__queue)->desc_size); \
186})
187
188#define data_offset(__queue, __base, __i) \
189({ \
190 (__base) + \
191 ((__i) * (__queue)->data_size); \
192})
193
194static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev, 107static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
195 struct data_queue *queue) 108 struct data_queue *queue)
196{ 109{
197 struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev);
198 struct queue_entry_priv_pci *entry_priv; 110 struct queue_entry_priv_pci *entry_priv;
199 void *addr; 111 void *addr;
200 dma_addr_t dma; 112 dma_addr_t dma;
@@ -203,21 +115,21 @@ static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
203 /* 115 /*
204 * Allocate DMA memory for descriptor and buffer. 116 * Allocate DMA memory for descriptor and buffer.
205 */ 117 */
206 addr = pci_alloc_consistent(pci_dev, dma_size(queue), &dma); 118 addr = dma_alloc_coherent(rt2x00dev->dev,
119 queue->limit * queue->desc_size,
120 &dma, GFP_KERNEL | GFP_DMA);
207 if (!addr) 121 if (!addr)
208 return -ENOMEM; 122 return -ENOMEM;
209 123
210 memset(addr, 0, dma_size(queue)); 124 memset(addr, 0, queue->limit * queue->desc_size);
211 125
212 /* 126 /*
213 * Initialize all queue entries to contain valid addresses. 127 * Initialize all queue entries to contain valid addresses.
214 */ 128 */
215 for (i = 0; i < queue->limit; i++) { 129 for (i = 0; i < queue->limit; i++) {
216 entry_priv = queue->entries[i].priv_data; 130 entry_priv = queue->entries[i].priv_data;
217 entry_priv->desc = desc_offset(queue, addr, i); 131 entry_priv->desc = addr + i * queue->desc_size;
218 entry_priv->desc_dma = desc_offset(queue, dma, i); 132 entry_priv->desc_dma = dma + i * queue->desc_size;
219 entry_priv->data = data_offset(queue, addr, i);
220 entry_priv->data_dma = data_offset(queue, dma, i);
221 } 133 }
222 134
223 return 0; 135 return 0;
@@ -226,19 +138,19 @@ static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
226static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev, 138static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev,
227 struct data_queue *queue) 139 struct data_queue *queue)
228{ 140{
229 struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev);
230 struct queue_entry_priv_pci *entry_priv = 141 struct queue_entry_priv_pci *entry_priv =
231 queue->entries[0].priv_data; 142 queue->entries[0].priv_data;
232 143
233 if (entry_priv->data) 144 if (entry_priv->desc)
234 pci_free_consistent(pci_dev, dma_size(queue), 145 dma_free_coherent(rt2x00dev->dev,
235 entry_priv->data, entry_priv->data_dma); 146 queue->limit * queue->desc_size,
236 entry_priv->data = NULL; 147 entry_priv->desc, entry_priv->desc_dma);
148 entry_priv->desc = NULL;
237} 149}
238 150
239int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev) 151int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
240{ 152{
241 struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev); 153 struct pci_dev *pci_dev = to_pci_dev(rt2x00dev->dev);
242 struct data_queue *queue; 154 struct data_queue *queue;
243 int status; 155 int status;
244 156
@@ -279,7 +191,7 @@ void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev)
279 /* 191 /*
280 * Free irq line. 192 * Free irq line.
281 */ 193 */
282 free_irq(rt2x00dev_pci(rt2x00dev)->irq, rt2x00dev); 194 free_irq(to_pci_dev(rt2x00dev->dev)->irq, rt2x00dev);
283 195
284 /* 196 /*
285 * Free DMA 197 * Free DMA
@@ -308,7 +220,7 @@ static void rt2x00pci_free_reg(struct rt2x00_dev *rt2x00dev)
308 220
309static int rt2x00pci_alloc_reg(struct rt2x00_dev *rt2x00dev) 221static int rt2x00pci_alloc_reg(struct rt2x00_dev *rt2x00dev)
310{ 222{
311 struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev); 223 struct pci_dev *pci_dev = to_pci_dev(rt2x00dev->dev);
312 224
313 rt2x00dev->csr.base = ioremap(pci_resource_start(pci_dev, 0), 225 rt2x00dev->csr.base = ioremap(pci_resource_start(pci_dev, 0),
314 pci_resource_len(pci_dev, 0)); 226 pci_resource_len(pci_dev, 0));
@@ -357,7 +269,7 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
357 if (pci_set_mwi(pci_dev)) 269 if (pci_set_mwi(pci_dev))
358 ERROR_PROBE("MWI not available.\n"); 270 ERROR_PROBE("MWI not available.\n");
359 271
360 if (pci_set_dma_mask(pci_dev, DMA_32BIT_MASK)) { 272 if (dma_set_mask(&pci_dev->dev, DMA_32BIT_MASK)) {
361 ERROR_PROBE("PCI DMA not supported.\n"); 273 ERROR_PROBE("PCI DMA not supported.\n");
362 retval = -EIO; 274 retval = -EIO;
363 goto exit_disable_device; 275 goto exit_disable_device;
@@ -373,7 +285,7 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
373 pci_set_drvdata(pci_dev, hw); 285 pci_set_drvdata(pci_dev, hw);
374 286
375 rt2x00dev = hw->priv; 287 rt2x00dev = hw->priv;
376 rt2x00dev->dev = pci_dev; 288 rt2x00dev->dev = &pci_dev->dev;
377 rt2x00dev->ops = ops; 289 rt2x00dev->ops = ops;
378 rt2x00dev->hw = hw; 290 rt2x00dev->hw = hw;
379 291