aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/rt2x00/rt2x00pci.c
diff options
context:
space:
mode:
authorIvo van Doorn <IvDoorn@gmail.com>2008-02-05 16:42:23 -0500
committerJohn W. Linville <linville@tuxdriver.com>2008-02-29 15:19:27 -0500
commit181d6902b6bad978d157e69479c95cc0ff213a76 (patch)
tree7a90b8a949a50bc8db6b7b5b2d76d5671fb9a89e /drivers/net/wireless/rt2x00/rt2x00pci.c
parent811aa9cad1bd927999888ab56ed9592519d2fef6 (diff)
rt2x00: Queue handling overhaul
This introduces a big queue handling overhaul, this also renames "ring" to "queues". Move queue handling into rt2x00queue.c and the matching header, use Kerneldoc to improve rt2x00 library documentation. Access to the queues is now protected under a spinlock, this to prevent race conditions which could corrupt the indexing system of the queue. Each queue entry allocates x bytes for driver/device specific data, this cleans up the queue structure significantly and improves code readability. rt2500usb no longer needs 2 entries in the beacon queue to correctly send out the guardian byte. This is now handled in the entry specific structure. rt61 and rt73 now use the correct descriptor size for beacon frames, since this data is written into the registers not the entire TXD descriptor was used but instead of a subset of it named TXINFO. Finally this also fixes numerous other bugs related to incorrect beacon handling or beacon related code. Signed-off-by: Ivo van Doorn <IvDoorn@gmail.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/rt2x00/rt2x00pci.c')
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c223
1 files changed, 118 insertions, 105 deletions
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index 0f2590bc31ec..63cfe33e95da 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -38,9 +38,10 @@ int rt2x00pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb,
38 struct ieee80211_tx_control *control) 38 struct ieee80211_tx_control *control)
39{ 39{
40 struct rt2x00_dev *rt2x00dev = hw->priv; 40 struct rt2x00_dev *rt2x00dev = hw->priv;
41 struct skb_desc *desc; 41 struct queue_entry_priv_pci_tx *priv_tx;
42 struct data_ring *ring; 42 struct skb_frame_desc *skbdesc;
43 struct data_entry *entry; 43 struct data_queue *queue;
44 struct queue_entry *entry;
44 45
45 /* 46 /*
46 * Just in case mac80211 doesn't set this correctly, 47 * Just in case mac80211 doesn't set this correctly,
@@ -48,21 +49,22 @@ int rt2x00pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb,
48 * initialization. 49 * initialization.
49 */ 50 */
50 control->queue = IEEE80211_TX_QUEUE_BEACON; 51 control->queue = IEEE80211_TX_QUEUE_BEACON;
51 ring = rt2x00lib_get_ring(rt2x00dev, control->queue); 52 queue = rt2x00queue_get_queue(rt2x00dev, control->queue);
52 entry = rt2x00_get_data_entry(ring); 53 entry = rt2x00queue_get_entry(queue, Q_INDEX);
54 priv_tx = entry->priv_data;
53 55
54 /* 56 /*
55 * Fill in skb descriptor 57 * Fill in skb descriptor
56 */ 58 */
57 desc = get_skb_desc(skb); 59 skbdesc = get_skb_frame_desc(skb);
58 desc->desc_len = ring->desc_size; 60 memset(skbdesc, 0, sizeof(*skbdesc));
59 desc->data_len = skb->len; 61 skbdesc->data = skb->data;
60 desc->desc = entry->priv; 62 skbdesc->data_len = queue->data_size;
61 desc->data = skb->data; 63 skbdesc->desc = priv_tx->desc;
62 desc->ring = ring; 64 skbdesc->desc_len = queue->desc_size;
63 desc->entry = entry; 65 skbdesc->entry = entry;
64 66
65 memcpy(entry->data_addr, skb->data, skb->len); 67 memcpy(priv_tx->data, skb->data, skb->len);
66 rt2x00lib_write_tx_desc(rt2x00dev, skb, control); 68 rt2x00lib_write_tx_desc(rt2x00dev, skb, control);
67 69
68 /* 70 /*
@@ -78,18 +80,18 @@ EXPORT_SYMBOL_GPL(rt2x00pci_beacon_update);
78 * TX data handlers. 80 * TX data handlers.
79 */ 81 */
80int rt2x00pci_write_tx_data(struct rt2x00_dev *rt2x00dev, 82int rt2x00pci_write_tx_data(struct rt2x00_dev *rt2x00dev,
81 struct data_ring *ring, struct sk_buff *skb, 83 struct data_queue *queue, struct sk_buff *skb,
82 struct ieee80211_tx_control *control) 84 struct ieee80211_tx_control *control)
83{ 85{
84 struct data_entry *entry = rt2x00_get_data_entry(ring); 86 struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
85 __le32 *txd = entry->priv; 87 struct queue_entry_priv_pci_tx *priv_tx = entry->priv_data;
86 struct skb_desc *desc; 88 struct skb_frame_desc *skbdesc;
87 u32 word; 89 u32 word;
88 90
89 if (rt2x00_ring_full(ring)) 91 if (rt2x00queue_full(queue))
90 return -EINVAL; 92 return -EINVAL;
91 93
92 rt2x00_desc_read(txd, 0, &word); 94 rt2x00_desc_read(priv_tx->desc, 0, &word);
93 95
94 if (rt2x00_get_field32(word, TXD_ENTRY_OWNER_NIC) || 96 if (rt2x00_get_field32(word, TXD_ENTRY_OWNER_NIC) ||
95 rt2x00_get_field32(word, TXD_ENTRY_VALID)) { 97 rt2x00_get_field32(word, TXD_ENTRY_VALID)) {
@@ -103,18 +105,18 @@ int rt2x00pci_write_tx_data(struct rt2x00_dev *rt2x00dev,
103 /* 105 /*
104 * Fill in skb descriptor 106 * Fill in skb descriptor
105 */ 107 */
106 desc = get_skb_desc(skb); 108 skbdesc = get_skb_frame_desc(skb);
107 desc->desc_len = ring->desc_size; 109 memset(skbdesc, 0, sizeof(*skbdesc));
108 desc->data_len = skb->len; 110 skbdesc->data = skb->data;
109 desc->desc = entry->priv; 111 skbdesc->data_len = queue->data_size;
110 desc->data = skb->data; 112 skbdesc->desc = priv_tx->desc;
111 desc->ring = ring; 113 skbdesc->desc_len = queue->desc_size;
112 desc->entry = entry; 114 skbdesc->entry = entry;
113 115
114 memcpy(entry->data_addr, skb->data, skb->len); 116 memcpy(priv_tx->data, skb->data, skb->len);
115 rt2x00lib_write_tx_desc(rt2x00dev, skb, control); 117 rt2x00lib_write_tx_desc(rt2x00dev, skb, control);
116 118
117 rt2x00_ring_index_inc(ring); 119 rt2x00queue_index_inc(queue, Q_INDEX);
118 120
119 return 0; 121 return 0;
120} 122}
@@ -125,29 +127,28 @@ EXPORT_SYMBOL_GPL(rt2x00pci_write_tx_data);
125 */ 127 */
126void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev) 128void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
127{ 129{
128 struct data_ring *ring = rt2x00dev->rx; 130 struct data_queue *queue = rt2x00dev->rx;
129 struct data_entry *entry; 131 struct queue_entry *entry;
130 struct sk_buff *skb; 132 struct queue_entry_priv_pci_rx *priv_rx;
131 struct ieee80211_hdr *hdr; 133 struct ieee80211_hdr *hdr;
132 struct skb_desc *skbdesc; 134 struct skb_frame_desc *skbdesc;
133 struct rxdata_entry_desc desc; 135 struct rxdone_entry_desc rxdesc;
134 int header_size; 136 int header_size;
135 __le32 *rxd;
136 int align; 137 int align;
137 u32 word; 138 u32 word;
138 139
139 while (1) { 140 while (1) {
140 entry = rt2x00_get_data_entry(ring); 141 entry = rt2x00queue_get_entry(queue, Q_INDEX);
141 rxd = entry->priv; 142 priv_rx = entry->priv_data;
142 rt2x00_desc_read(rxd, 0, &word); 143 rt2x00_desc_read(priv_rx->desc, 0, &word);
143 144
144 if (rt2x00_get_field32(word, RXD_ENTRY_OWNER_NIC)) 145 if (rt2x00_get_field32(word, RXD_ENTRY_OWNER_NIC))
145 break; 146 break;
146 147
147 memset(&desc, 0, sizeof(desc)); 148 memset(&rxdesc, 0, sizeof(rxdesc));
148 rt2x00dev->ops->lib->fill_rxdone(entry, &desc); 149 rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc);
149 150
150 hdr = (struct ieee80211_hdr *)entry->data_addr; 151 hdr = (struct ieee80211_hdr *)priv_rx->data;
151 header_size = 152 header_size =
152 ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control)); 153 ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control));
153 154
@@ -161,66 +162,68 @@ void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
161 * Allocate the sk_buffer, initialize it and copy 162 * Allocate the sk_buffer, initialize it and copy
162 * all data into it. 163 * all data into it.
163 */ 164 */
164 skb = dev_alloc_skb(desc.size + align); 165 entry->skb = dev_alloc_skb(rxdesc.size + align);
165 if (!skb) 166 if (!entry->skb)
166 return; 167 return;
167 168
168 skb_reserve(skb, align); 169 skb_reserve(entry->skb, align);
169 memcpy(skb_put(skb, desc.size), entry->data_addr, desc.size); 170 memcpy(skb_put(entry->skb, rxdesc.size),
171 priv_rx->data, rxdesc.size);
170 172
171 /* 173 /*
172 * Fill in skb descriptor 174 * Fill in skb descriptor
173 */ 175 */
174 skbdesc = get_skb_desc(skb); 176 skbdesc = get_skb_frame_desc(entry->skb);
175 skbdesc->desc_len = entry->ring->desc_size; 177 memset(skbdesc, 0, sizeof(*skbdesc));
176 skbdesc->data_len = skb->len; 178 skbdesc->data = entry->skb->data;
177 skbdesc->desc = entry->priv; 179 skbdesc->data_len = queue->data_size;
178 skbdesc->data = skb->data; 180 skbdesc->desc = priv_rx->desc;
179 skbdesc->ring = ring; 181 skbdesc->desc_len = queue->desc_size;
180 skbdesc->entry = entry; 182 skbdesc->entry = entry;
181 183
182 /* 184 /*
183 * Send the frame to rt2x00lib for further processing. 185 * Send the frame to rt2x00lib for further processing.
184 */ 186 */
185 rt2x00lib_rxdone(entry, skb, &desc); 187 rt2x00lib_rxdone(entry, &rxdesc);
186 188
187 if (test_bit(DEVICE_ENABLED_RADIO, &ring->rt2x00dev->flags)) { 189 if (test_bit(DEVICE_ENABLED_RADIO, &queue->rt2x00dev->flags)) {
188 rt2x00_set_field32(&word, RXD_ENTRY_OWNER_NIC, 1); 190 rt2x00_set_field32(&word, RXD_ENTRY_OWNER_NIC, 1);
189 rt2x00_desc_write(rxd, 0, word); 191 rt2x00_desc_write(priv_rx->desc, 0, word);
190 } 192 }
191 193
192 rt2x00_ring_index_inc(ring); 194 rt2x00queue_index_inc(queue, Q_INDEX);
193 } 195 }
194} 196}
195EXPORT_SYMBOL_GPL(rt2x00pci_rxdone); 197EXPORT_SYMBOL_GPL(rt2x00pci_rxdone);
196 198
197void rt2x00pci_txdone(struct rt2x00_dev *rt2x00dev, struct data_entry *entry, 199void rt2x00pci_txdone(struct rt2x00_dev *rt2x00dev, struct queue_entry *entry,
198 const int tx_status, const int retry) 200 struct txdone_entry_desc *txdesc)
199{ 201{
202 struct queue_entry_priv_pci_tx *priv_tx = entry->priv_data;
200 u32 word; 203 u32 word;
201 204
202 rt2x00lib_txdone(entry, tx_status, retry); 205 txdesc->control = &priv_tx->control;
206 rt2x00lib_txdone(entry, txdesc);
203 207
204 /* 208 /*
205 * Make this entry available for reuse. 209 * Make this entry available for reuse.
206 */ 210 */
207 entry->flags = 0; 211 entry->flags = 0;
208 212
209 rt2x00_desc_read(entry->priv, 0, &word); 213 rt2x00_desc_read(priv_tx->desc, 0, &word);
210 rt2x00_set_field32(&word, TXD_ENTRY_OWNER_NIC, 0); 214 rt2x00_set_field32(&word, TXD_ENTRY_OWNER_NIC, 0);
211 rt2x00_set_field32(&word, TXD_ENTRY_VALID, 0); 215 rt2x00_set_field32(&word, TXD_ENTRY_VALID, 0);
212 rt2x00_desc_write(entry->priv, 0, word); 216 rt2x00_desc_write(priv_tx->desc, 0, word);
213 217
214 rt2x00_ring_index_done_inc(entry->ring); 218 rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE);
215 219
216 /* 220 /*
217 * If the data ring was full before the txdone handler 221 * If the data queue was full before the txdone handler
218 * we must make sure the packet queue in the mac80211 stack 222 * we must make sure the packet queue in the mac80211 stack
219 * is reenabled when the txdone handler has finished. 223 * is reenabled when the txdone handler has finished.
220 */ 224 */
221 if (!rt2x00_ring_full(entry->ring)) 225 if (!rt2x00queue_full(entry->queue))
222 ieee80211_wake_queue(rt2x00dev->hw, 226 ieee80211_wake_queue(rt2x00dev->hw, priv_tx->control.queue);
223 entry->tx_status.control.queue);
224 227
225} 228}
226EXPORT_SYMBOL_GPL(rt2x00pci_txdone); 229EXPORT_SYMBOL_GPL(rt2x00pci_txdone);
@@ -228,73 +231,83 @@ EXPORT_SYMBOL_GPL(rt2x00pci_txdone);
228/* 231/*
229 * Device initialization handlers. 232 * Device initialization handlers.
230 */ 233 */
231#define priv_offset(__ring, __i) \ 234#define dma_size(__queue) \
232({ \ 235({ \
233 ring->data_addr + (i * ring->desc_size); \ 236 (__queue)->limit * \
237 ((__queue)->desc_size + (__queue)->data_size);\
234}) 238})
235 239
236#define data_addr_offset(__ring, __i) \ 240#define priv_offset(__queue, __base, __i) \
237({ \ 241({ \
238 (__ring)->data_addr + \ 242 (__base) + ((__i) * (__queue)->desc_size); \
239 ((__ring)->stats.limit * (__ring)->desc_size) + \
240 ((__i) * (__ring)->data_size); \
241}) 243})
242 244
243#define data_dma_offset(__ring, __i) \ 245#define data_addr_offset(__queue, __base, __i) \
244({ \ 246({ \
245 (__ring)->data_dma + \ 247 (__base) + \
246 ((__ring)->stats.limit * (__ring)->desc_size) + \ 248 ((__queue)->limit * (__queue)->desc_size) + \
247 ((__i) * (__ring)->data_size); \ 249 ((__i) * (__queue)->data_size); \
248}) 250})
249 251
250static int rt2x00pci_alloc_dma(struct rt2x00_dev *rt2x00dev, 252#define data_dma_offset(__queue, __base, __i) \
251 struct data_ring *ring) 253({ \
254 (__base) + \
255 ((__queue)->limit * (__queue)->desc_size) + \
256 ((__i) * (__queue)->data_size); \
257})
258
259static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
260 struct data_queue *queue)
252{ 261{
262 struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev);
263 struct queue_entry_priv_pci_tx *priv_tx;
264 void *data_addr;
265 dma_addr_t data_dma;
253 unsigned int i; 266 unsigned int i;
254 267
255 /* 268 /*
256 * Allocate DMA memory for descriptor and buffer. 269 * Allocate DMA memory for descriptor and buffer.
257 */ 270 */
258 ring->data_addr = pci_alloc_consistent(rt2x00dev_pci(rt2x00dev), 271 data_addr = pci_alloc_consistent(pci_dev, dma_size(queue), &data_dma);
259 rt2x00_get_ring_size(ring), 272 if (!data_addr)
260 &ring->data_dma);
261 if (!ring->data_addr)
262 return -ENOMEM; 273 return -ENOMEM;
263 274
264 /* 275 /*
265 * Initialize all ring entries to contain valid 276 * Initialize all queue entries to contain valid addresses.
266 * addresses.
267 */ 277 */
268 for (i = 0; i < ring->stats.limit; i++) { 278 for (i = 0; i < queue->limit; i++) {
269 ring->entry[i].priv = priv_offset(ring, i); 279 priv_tx = queue->entries[i].priv_data;
270 ring->entry[i].data_addr = data_addr_offset(ring, i); 280 priv_tx->desc = priv_offset(queue, data_addr, i);
271 ring->entry[i].data_dma = data_dma_offset(ring, i); 281 priv_tx->data = data_addr_offset(queue, data_addr, i);
282 priv_tx->dma = data_dma_offset(queue, data_dma, i);
272 } 283 }
273 284
274 return 0; 285 return 0;
275} 286}
276 287
277static void rt2x00pci_free_dma(struct rt2x00_dev *rt2x00dev, 288static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev,
278 struct data_ring *ring) 289 struct data_queue *queue)
279{ 290{
280 if (ring->data_addr) 291 struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev);
281 pci_free_consistent(rt2x00dev_pci(rt2x00dev), 292 struct queue_entry_priv_pci_tx *priv_tx = queue->entries[0].priv_data;
282 rt2x00_get_ring_size(ring), 293
283 ring->data_addr, ring->data_dma); 294 if (priv_tx->data)
284 ring->data_addr = NULL; 295 pci_free_consistent(pci_dev, dma_size(queue),
296 priv_tx->data, priv_tx->dma);
297 priv_tx->data = NULL;
285} 298}
286 299
287int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev) 300int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
288{ 301{
289 struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev); 302 struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev);
290 struct data_ring *ring; 303 struct data_queue *queue;
291 int status; 304 int status;
292 305
293 /* 306 /*
294 * Allocate DMA 307 * Allocate DMA
295 */ 308 */
296 ring_for_each(rt2x00dev, ring) { 309 queue_for_each(rt2x00dev, queue) {
297 status = rt2x00pci_alloc_dma(rt2x00dev, ring); 310 status = rt2x00pci_alloc_queue_dma(rt2x00dev, queue);
298 if (status) 311 if (status)
299 goto exit; 312 goto exit;
300 } 313 }
@@ -321,7 +334,7 @@ EXPORT_SYMBOL_GPL(rt2x00pci_initialize);
321 334
322void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev) 335void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev)
323{ 336{
324 struct data_ring *ring; 337 struct data_queue *queue;
325 338
326 /* 339 /*
327 * Free irq line. 340 * Free irq line.
@@ -331,8 +344,8 @@ void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev)
331 /* 344 /*
332 * Free DMA 345 * Free DMA
333 */ 346 */
334 ring_for_each(rt2x00dev, ring) 347 queue_for_each(rt2x00dev, queue)
335 rt2x00pci_free_dma(rt2x00dev, ring); 348 rt2x00pci_free_queue_dma(rt2x00dev, queue);
336} 349}
337EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize); 350EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize);
338 351
@@ -530,5 +543,5 @@ EXPORT_SYMBOL_GPL(rt2x00pci_resume);
530 */ 543 */
531MODULE_AUTHOR(DRV_PROJECT); 544MODULE_AUTHOR(DRV_PROJECT);
532MODULE_VERSION(DRV_VERSION); 545MODULE_VERSION(DRV_VERSION);
533MODULE_DESCRIPTION("rt2x00 library"); 546MODULE_DESCRIPTION("rt2x00 pci library");
534MODULE_LICENSE("GPL"); 547MODULE_LICENSE("GPL");