aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/rt2x00/rt2x00pci.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/rt2x00/rt2x00pci.c')
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c287
1 files changed, 147 insertions, 140 deletions
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index 804a9980055d..7867ec64bd2c 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2007 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -32,64 +32,21 @@
32#include "rt2x00pci.h" 32#include "rt2x00pci.h"
33 33
34/* 34/*
35 * Beacon handlers.
36 */
37int rt2x00pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb,
38 struct ieee80211_tx_control *control)
39{
40 struct rt2x00_dev *rt2x00dev = hw->priv;
41 struct skb_desc *desc;
42 struct data_ring *ring;
43 struct data_entry *entry;
44
45 /*
46 * Just in case mac80211 doesn't set this correctly,
47 * but we need this queue set for the descriptor
48 * initialization.
49 */
50 control->queue = IEEE80211_TX_QUEUE_BEACON;
51 ring = rt2x00lib_get_ring(rt2x00dev, control->queue);
52 entry = rt2x00_get_data_entry(ring);
53
54 /*
55 * Fill in skb descriptor
56 */
57 desc = get_skb_desc(skb);
58 desc->desc_len = ring->desc_size;
59 desc->data_len = skb->len;
60 desc->desc = entry->priv;
61 desc->data = skb->data;
62 desc->ring = ring;
63 desc->entry = entry;
64
65 memcpy(entry->data_addr, skb->data, skb->len);
66 rt2x00lib_write_tx_desc(rt2x00dev, skb, control);
67
68 /*
69 * Enable beacon generation.
70 */
71 rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, control->queue);
72
73 return 0;
74}
75EXPORT_SYMBOL_GPL(rt2x00pci_beacon_update);
76
77/*
78 * TX data handlers. 35 * TX data handlers.
79 */ 36 */
80int rt2x00pci_write_tx_data(struct rt2x00_dev *rt2x00dev, 37int rt2x00pci_write_tx_data(struct rt2x00_dev *rt2x00dev,
81 struct data_ring *ring, struct sk_buff *skb, 38 struct data_queue *queue, struct sk_buff *skb,
82 struct ieee80211_tx_control *control) 39 struct ieee80211_tx_control *control)
83{ 40{
84 struct data_entry *entry = rt2x00_get_data_entry(ring); 41 struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
85 __le32 *txd = entry->priv; 42 struct queue_entry_priv_pci_tx *priv_tx = entry->priv_data;
86 struct skb_desc *desc; 43 struct skb_frame_desc *skbdesc;
87 u32 word; 44 u32 word;
88 45
89 if (rt2x00_ring_full(ring)) 46 if (rt2x00queue_full(queue))
90 return -EINVAL; 47 return -EINVAL;
91 48
92 rt2x00_desc_read(txd, 0, &word); 49 rt2x00_desc_read(priv_tx->desc, 0, &word);
93 50
94 if (rt2x00_get_field32(word, TXD_ENTRY_OWNER_NIC) || 51 if (rt2x00_get_field32(word, TXD_ENTRY_OWNER_NIC) ||
95 rt2x00_get_field32(word, TXD_ENTRY_VALID)) { 52 rt2x00_get_field32(word, TXD_ENTRY_VALID)) {
@@ -103,18 +60,18 @@ int rt2x00pci_write_tx_data(struct rt2x00_dev *rt2x00dev,
103 /* 60 /*
104 * Fill in skb descriptor 61 * Fill in skb descriptor
105 */ 62 */
106 desc = get_skb_desc(skb); 63 skbdesc = get_skb_frame_desc(skb);
107 desc->desc_len = ring->desc_size; 64 skbdesc->data = skb->data;
108 desc->data_len = skb->len; 65 skbdesc->data_len = skb->len;
109 desc->desc = entry->priv; 66 skbdesc->desc = priv_tx->desc;
110 desc->data = skb->data; 67 skbdesc->desc_len = queue->desc_size;
111 desc->ring = ring; 68 skbdesc->entry = entry;
112 desc->entry = entry; 69
113 70 memcpy(&priv_tx->control, control, sizeof(priv_tx->control));
114 memcpy(entry->data_addr, skb->data, skb->len); 71 memcpy(priv_tx->data, skb->data, skb->len);
115 rt2x00lib_write_tx_desc(rt2x00dev, skb, control); 72 rt2x00lib_write_tx_desc(rt2x00dev, skb, control);
116 73
117 rt2x00_ring_index_inc(ring); 74 rt2x00queue_index_inc(queue, Q_INDEX);
118 75
119 return 0; 76 return 0;
120} 77}
@@ -125,29 +82,28 @@ EXPORT_SYMBOL_GPL(rt2x00pci_write_tx_data);
125 */ 82 */
126void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev) 83void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
127{ 84{
128 struct data_ring *ring = rt2x00dev->rx; 85 struct data_queue *queue = rt2x00dev->rx;
129 struct data_entry *entry; 86 struct queue_entry *entry;
130 struct sk_buff *skb; 87 struct queue_entry_priv_pci_rx *priv_rx;
131 struct ieee80211_hdr *hdr; 88 struct ieee80211_hdr *hdr;
132 struct skb_desc *skbdesc; 89 struct skb_frame_desc *skbdesc;
133 struct rxdata_entry_desc desc; 90 struct rxdone_entry_desc rxdesc;
134 int header_size; 91 int header_size;
135 __le32 *rxd;
136 int align; 92 int align;
137 u32 word; 93 u32 word;
138 94
139 while (1) { 95 while (1) {
140 entry = rt2x00_get_data_entry(ring); 96 entry = rt2x00queue_get_entry(queue, Q_INDEX);
141 rxd = entry->priv; 97 priv_rx = entry->priv_data;
142 rt2x00_desc_read(rxd, 0, &word); 98 rt2x00_desc_read(priv_rx->desc, 0, &word);
143 99
144 if (rt2x00_get_field32(word, RXD_ENTRY_OWNER_NIC)) 100 if (rt2x00_get_field32(word, RXD_ENTRY_OWNER_NIC))
145 break; 101 break;
146 102
147 memset(&desc, 0, sizeof(desc)); 103 memset(&rxdesc, 0, sizeof(rxdesc));
148 rt2x00dev->ops->lib->fill_rxdone(entry, &desc); 104 rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc);
149 105
150 hdr = (struct ieee80211_hdr *)entry->data_addr; 106 hdr = (struct ieee80211_hdr *)priv_rx->data;
151 header_size = 107 header_size =
152 ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control)); 108 ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control));
153 109
@@ -161,66 +117,68 @@ void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
161 * Allocate the sk_buffer, initialize it and copy 117 * Allocate the sk_buffer, initialize it and copy
162 * all data into it. 118 * all data into it.
163 */ 119 */
164 skb = dev_alloc_skb(desc.size + align); 120 entry->skb = dev_alloc_skb(rxdesc.size + align);
165 if (!skb) 121 if (!entry->skb)
166 return; 122 return;
167 123
168 skb_reserve(skb, align); 124 skb_reserve(entry->skb, align);
169 memcpy(skb_put(skb, desc.size), entry->data_addr, desc.size); 125 memcpy(skb_put(entry->skb, rxdesc.size),
126 priv_rx->data, rxdesc.size);
170 127
171 /* 128 /*
172 * Fill in skb descriptor 129 * Fill in skb descriptor
173 */ 130 */
174 skbdesc = get_skb_desc(skb); 131 skbdesc = get_skb_frame_desc(entry->skb);
175 skbdesc->desc_len = entry->ring->desc_size; 132 memset(skbdesc, 0, sizeof(*skbdesc));
176 skbdesc->data_len = skb->len; 133 skbdesc->data = entry->skb->data;
177 skbdesc->desc = entry->priv; 134 skbdesc->data_len = entry->skb->len;
178 skbdesc->data = skb->data; 135 skbdesc->desc = priv_rx->desc;
179 skbdesc->ring = ring; 136 skbdesc->desc_len = queue->desc_size;
180 skbdesc->entry = entry; 137 skbdesc->entry = entry;
181 138
182 /* 139 /*
183 * Send the frame to rt2x00lib for further processing. 140 * Send the frame to rt2x00lib for further processing.
184 */ 141 */
185 rt2x00lib_rxdone(entry, skb, &desc); 142 rt2x00lib_rxdone(entry, &rxdesc);
186 143
187 if (test_bit(DEVICE_ENABLED_RADIO, &ring->rt2x00dev->flags)) { 144 if (test_bit(DEVICE_ENABLED_RADIO, &queue->rt2x00dev->flags)) {
188 rt2x00_set_field32(&word, RXD_ENTRY_OWNER_NIC, 1); 145 rt2x00_set_field32(&word, RXD_ENTRY_OWNER_NIC, 1);
189 rt2x00_desc_write(rxd, 0, word); 146 rt2x00_desc_write(priv_rx->desc, 0, word);
190 } 147 }
191 148
192 rt2x00_ring_index_inc(ring); 149 rt2x00queue_index_inc(queue, Q_INDEX);
193 } 150 }
194} 151}
195EXPORT_SYMBOL_GPL(rt2x00pci_rxdone); 152EXPORT_SYMBOL_GPL(rt2x00pci_rxdone);
196 153
197void rt2x00pci_txdone(struct rt2x00_dev *rt2x00dev, struct data_entry *entry, 154void rt2x00pci_txdone(struct rt2x00_dev *rt2x00dev, struct queue_entry *entry,
198 const int tx_status, const int retry) 155 struct txdone_entry_desc *txdesc)
199{ 156{
157 struct queue_entry_priv_pci_tx *priv_tx = entry->priv_data;
200 u32 word; 158 u32 word;
201 159
202 rt2x00lib_txdone(entry, tx_status, retry); 160 txdesc->control = &priv_tx->control;
161 rt2x00lib_txdone(entry, txdesc);
203 162
204 /* 163 /*
205 * Make this entry available for reuse. 164 * Make this entry available for reuse.
206 */ 165 */
207 entry->flags = 0; 166 entry->flags = 0;
208 167
209 rt2x00_desc_read(entry->priv, 0, &word); 168 rt2x00_desc_read(priv_tx->desc, 0, &word);
210 rt2x00_set_field32(&word, TXD_ENTRY_OWNER_NIC, 0); 169 rt2x00_set_field32(&word, TXD_ENTRY_OWNER_NIC, 0);
211 rt2x00_set_field32(&word, TXD_ENTRY_VALID, 0); 170 rt2x00_set_field32(&word, TXD_ENTRY_VALID, 0);
212 rt2x00_desc_write(entry->priv, 0, word); 171 rt2x00_desc_write(priv_tx->desc, 0, word);
213 172
214 rt2x00_ring_index_done_inc(entry->ring); 173 rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE);
215 174
216 /* 175 /*
217 * If the data ring was full before the txdone handler 176 * If the data queue was full before the txdone handler
218 * we must make sure the packet queue in the mac80211 stack 177 * we must make sure the packet queue in the mac80211 stack
219 * is reenabled when the txdone handler has finished. 178 * is reenabled when the txdone handler has finished.
220 */ 179 */
221 if (!rt2x00_ring_full(entry->ring)) 180 if (!rt2x00queue_full(entry->queue))
222 ieee80211_wake_queue(rt2x00dev->hw, 181 ieee80211_wake_queue(rt2x00dev->hw, priv_tx->control.queue);
223 entry->tx_status.control.queue);
224 182
225} 183}
226EXPORT_SYMBOL_GPL(rt2x00pci_txdone); 184EXPORT_SYMBOL_GPL(rt2x00pci_txdone);
@@ -228,73 +186,122 @@ EXPORT_SYMBOL_GPL(rt2x00pci_txdone);
228/* 186/*
229 * Device initialization handlers. 187 * Device initialization handlers.
230 */ 188 */
231#define priv_offset(__ring, __i) \ 189#define desc_size(__queue) \
232({ \ 190({ \
233 ring->data_addr + (i * ring->desc_size); \ 191 ((__queue)->limit * (__queue)->desc_size);\
192})
193
194#define data_size(__queue) \
195({ \
196 ((__queue)->limit * (__queue)->data_size);\
234}) 197})
235 198
236#define data_addr_offset(__ring, __i) \ 199#define dma_size(__queue) \
237({ \ 200({ \
238 (__ring)->data_addr + \ 201 data_size(__queue) + desc_size(__queue);\
239 ((__ring)->stats.limit * (__ring)->desc_size) + \
240 ((__i) * (__ring)->data_size); \
241}) 202})
242 203
243#define data_dma_offset(__ring, __i) \ 204#define desc_offset(__queue, __base, __i) \
244({ \ 205({ \
245 (__ring)->data_dma + \ 206 (__base) + data_size(__queue) + \
246 ((__ring)->stats.limit * (__ring)->desc_size) + \ 207 ((__i) * (__queue)->desc_size); \
247 ((__i) * (__ring)->data_size); \
248}) 208})
249 209
250static int rt2x00pci_alloc_dma(struct rt2x00_dev *rt2x00dev, 210#define data_offset(__queue, __base, __i) \
251 struct data_ring *ring) 211({ \
212 (__base) + \
213 ((__i) * (__queue)->data_size); \
214})
215
216static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
217 struct data_queue *queue)
252{ 218{
219 struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev);
220 struct queue_entry_priv_pci_rx *priv_rx;
221 struct queue_entry_priv_pci_tx *priv_tx;
222 void *addr;
223 dma_addr_t dma;
224 void *desc_addr;
225 dma_addr_t desc_dma;
226 void *data_addr;
227 dma_addr_t data_dma;
253 unsigned int i; 228 unsigned int i;
254 229
255 /* 230 /*
256 * Allocate DMA memory for descriptor and buffer. 231 * Allocate DMA memory for descriptor and buffer.
257 */ 232 */
258 ring->data_addr = pci_alloc_consistent(rt2x00dev_pci(rt2x00dev), 233 addr = pci_alloc_consistent(pci_dev, dma_size(queue), &dma);
259 rt2x00_get_ring_size(ring), 234 if (!addr)
260 &ring->data_dma);
261 if (!ring->data_addr)
262 return -ENOMEM; 235 return -ENOMEM;
263 236
237 memset(addr, 0, dma_size(queue));
238
264 /* 239 /*
265 * Initialize all ring entries to contain valid 240 * Initialize all queue entries to contain valid addresses.
266 * addresses.
267 */ 241 */
268 for (i = 0; i < ring->stats.limit; i++) { 242 for (i = 0; i < queue->limit; i++) {
269 ring->entry[i].priv = priv_offset(ring, i); 243 desc_addr = desc_offset(queue, addr, i);
270 ring->entry[i].data_addr = data_addr_offset(ring, i); 244 desc_dma = desc_offset(queue, dma, i);
271 ring->entry[i].data_dma = data_dma_offset(ring, i); 245 data_addr = data_offset(queue, addr, i);
246 data_dma = data_offset(queue, dma, i);
247
248 if (queue->qid == QID_RX) {
249 priv_rx = queue->entries[i].priv_data;
250 priv_rx->desc = desc_addr;
251 priv_rx->desc_dma = desc_dma;
252 priv_rx->data = data_addr;
253 priv_rx->data_dma = data_dma;
254 } else {
255 priv_tx = queue->entries[i].priv_data;
256 priv_tx->desc = desc_addr;
257 priv_tx->desc_dma = desc_dma;
258 priv_tx->data = data_addr;
259 priv_tx->data_dma = data_dma;
260 }
272 } 261 }
273 262
274 return 0; 263 return 0;
275} 264}
276 265
277static void rt2x00pci_free_dma(struct rt2x00_dev *rt2x00dev, 266static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev,
278 struct data_ring *ring) 267 struct data_queue *queue)
279{ 268{
280 if (ring->data_addr) 269 struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev);
281 pci_free_consistent(rt2x00dev_pci(rt2x00dev), 270 struct queue_entry_priv_pci_rx *priv_rx;
282 rt2x00_get_ring_size(ring), 271 struct queue_entry_priv_pci_tx *priv_tx;
283 ring->data_addr, ring->data_dma); 272 void *data_addr;
284 ring->data_addr = NULL; 273 dma_addr_t data_dma;
274
275 if (queue->qid == QID_RX) {
276 priv_rx = queue->entries[0].priv_data;
277 data_addr = priv_rx->data;
278 data_dma = priv_rx->data_dma;
279
280 priv_rx->data = NULL;
281 } else {
282 priv_tx = queue->entries[0].priv_data;
283 data_addr = priv_tx->data;
284 data_dma = priv_tx->data_dma;
285
286 priv_tx->data = NULL;
287 }
288
289 if (data_addr)
290 pci_free_consistent(pci_dev, dma_size(queue),
291 data_addr, data_dma);
285} 292}
286 293
287int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev) 294int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
288{ 295{
289 struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev); 296 struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev);
290 struct data_ring *ring; 297 struct data_queue *queue;
291 int status; 298 int status;
292 299
293 /* 300 /*
294 * Allocate DMA 301 * Allocate DMA
295 */ 302 */
296 ring_for_each(rt2x00dev, ring) { 303 queue_for_each(rt2x00dev, queue) {
297 status = rt2x00pci_alloc_dma(rt2x00dev, ring); 304 status = rt2x00pci_alloc_queue_dma(rt2x00dev, queue);
298 if (status) 305 if (status)
299 goto exit; 306 goto exit;
300 } 307 }
@@ -321,7 +328,7 @@ EXPORT_SYMBOL_GPL(rt2x00pci_initialize);
321 328
322void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev) 329void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev)
323{ 330{
324 struct data_ring *ring; 331 struct data_queue *queue;
325 332
326 /* 333 /*
327 * Free irq line. 334 * Free irq line.
@@ -331,8 +338,8 @@ void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev)
331 /* 338 /*
332 * Free DMA 339 * Free DMA
333 */ 340 */
334 ring_for_each(rt2x00dev, ring) 341 queue_for_each(rt2x00dev, queue)
335 rt2x00pci_free_dma(rt2x00dev, ring); 342 rt2x00pci_free_queue_dma(rt2x00dev, queue);
336} 343}
337EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize); 344EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize);
338 345
@@ -347,9 +354,9 @@ static void rt2x00pci_free_reg(struct rt2x00_dev *rt2x00dev)
347 kfree(rt2x00dev->eeprom); 354 kfree(rt2x00dev->eeprom);
348 rt2x00dev->eeprom = NULL; 355 rt2x00dev->eeprom = NULL;
349 356
350 if (rt2x00dev->csr_addr) { 357 if (rt2x00dev->csr.base) {
351 iounmap(rt2x00dev->csr_addr); 358 iounmap(rt2x00dev->csr.base);
352 rt2x00dev->csr_addr = NULL; 359 rt2x00dev->csr.base = NULL;
353 } 360 }
354} 361}
355 362
@@ -357,9 +364,9 @@ static int rt2x00pci_alloc_reg(struct rt2x00_dev *rt2x00dev)
357{ 364{
358 struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev); 365 struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev);
359 366
360 rt2x00dev->csr_addr = ioremap(pci_resource_start(pci_dev, 0), 367 rt2x00dev->csr.base = ioremap(pci_resource_start(pci_dev, 0),
361 pci_resource_len(pci_dev, 0)); 368 pci_resource_len(pci_dev, 0));
362 if (!rt2x00dev->csr_addr) 369 if (!rt2x00dev->csr.base)
363 goto exit; 370 goto exit;
364 371
365 rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL); 372 rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL);
@@ -530,5 +537,5 @@ EXPORT_SYMBOL_GPL(rt2x00pci_resume);
530 */ 537 */
531MODULE_AUTHOR(DRV_PROJECT); 538MODULE_AUTHOR(DRV_PROJECT);
532MODULE_VERSION(DRV_VERSION); 539MODULE_VERSION(DRV_VERSION);
533MODULE_DESCRIPTION("rt2x00 library"); 540MODULE_DESCRIPTION("rt2x00 pci library");
534MODULE_LICENSE("GPL"); 541MODULE_LICENSE("GPL");