aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/rt2x00/rt2x00usb.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/rt2x00/rt2x00usb.c')
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c361
1 files changed, 131 insertions, 230 deletions
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index e5ceae805b57..83862e7f7aec 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -40,7 +40,7 @@ int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev,
40 void *buffer, const u16 buffer_length, 40 void *buffer, const u16 buffer_length,
41 const int timeout) 41 const int timeout)
42{ 42{
43 struct usb_device *usb_dev = rt2x00dev_usb_dev(rt2x00dev); 43 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
44 int status; 44 int status;
45 unsigned int i; 45 unsigned int i;
46 unsigned int pipe = 46 unsigned int pipe =
@@ -129,17 +129,12 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
129{ 129{
130 struct queue_entry *entry = (struct queue_entry *)urb->context; 130 struct queue_entry *entry = (struct queue_entry *)urb->context;
131 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 131 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
132 struct queue_entry_priv_usb_tx *priv_tx = entry->priv_data;
133 struct txdone_entry_desc txdesc; 132 struct txdone_entry_desc txdesc;
134 __le32 *txd = (__le32 *)entry->skb->data;
135 u32 word;
136 133
137 if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) || 134 if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) ||
138 !__test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) 135 !test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
139 return; 136 return;
140 137
141 rt2x00_desc_read(txd, 0, &word);
142
143 /* 138 /*
144 * Remove the descriptor data from the buffer. 139 * Remove the descriptor data from the buffer.
145 */ 140 */
@@ -147,128 +142,116 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
147 142
148 /* 143 /*
149 * Obtain the status about this packet. 144 * Obtain the status about this packet.
145 * Note that when the status is 0 it does not mean the
146 * frame was send out correctly. It only means the frame
147 * was succesfully pushed to the hardware, we have no
148 * way to determine the transmission status right now.
149 * (Only indirectly by looking at the failed TX counters
150 * in the register).
150 */ 151 */
151 txdesc.status = !urb->status ? TX_SUCCESS : TX_FAIL_RETRY; 152 if (!urb->status)
153 __set_bit(TXDONE_UNKNOWN, &txdesc.flags);
154 else
155 __set_bit(TXDONE_FAILURE, &txdesc.flags);
152 txdesc.retry = 0; 156 txdesc.retry = 0;
153 txdesc.control = &priv_tx->control;
154 157
155 rt2x00lib_txdone(entry, &txdesc); 158 rt2x00lib_txdone(entry, &txdesc);
156
157 /*
158 * Make this entry available for reuse.
159 */
160 entry->flags = 0;
161 rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE);
162
163 /*
164 * If the data queue was full before the txdone handler
165 * we must make sure the packet queue in the mac80211 stack
166 * is reenabled when the txdone handler has finished.
167 */
168 if (!rt2x00queue_full(entry->queue))
169 ieee80211_wake_queue(rt2x00dev->hw, priv_tx->control.queue);
170} 159}
171 160
172int rt2x00usb_write_tx_data(struct rt2x00_dev *rt2x00dev, 161int rt2x00usb_write_tx_data(struct queue_entry *entry)
173 struct data_queue *queue, struct sk_buff *skb,
174 struct ieee80211_tx_control *control)
175{ 162{
176 struct usb_device *usb_dev = rt2x00dev_usb_dev(rt2x00dev); 163 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
177 struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX); 164 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
178 struct queue_entry_priv_usb_tx *priv_tx = entry->priv_data; 165 struct queue_entry_priv_usb *entry_priv = entry->priv_data;
179 struct skb_frame_desc *skbdesc; 166 struct skb_frame_desc *skbdesc;
180 u32 length; 167 u32 length;
181 168
182 if (rt2x00queue_full(queue))
183 return -EINVAL;
184
185 if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) {
186 ERROR(rt2x00dev,
187 "Arrived at non-free entry in the non-full queue %d.\n"
188 "Please file bug report to %s.\n",
189 control->queue, DRV_PROJECT);
190 return -EINVAL;
191 }
192
193 /* 169 /*
194 * Add the descriptor in front of the skb. 170 * Add the descriptor in front of the skb.
195 */ 171 */
196 skb_push(skb, queue->desc_size); 172 skb_push(entry->skb, entry->queue->desc_size);
197 memset(skb->data, 0, queue->desc_size); 173 memset(entry->skb->data, 0, entry->queue->desc_size);
198 174
199 /* 175 /*
200 * Fill in skb descriptor 176 * Fill in skb descriptor
201 */ 177 */
202 skbdesc = get_skb_frame_desc(skb); 178 skbdesc = get_skb_frame_desc(entry->skb);
203 skbdesc->data = skb->data + queue->desc_size; 179 skbdesc->desc = entry->skb->data;
204 skbdesc->data_len = skb->len - queue->desc_size; 180 skbdesc->desc_len = entry->queue->desc_size;
205 skbdesc->desc = skb->data;
206 skbdesc->desc_len = queue->desc_size;
207 skbdesc->entry = entry;
208
209 memcpy(&priv_tx->control, control, sizeof(priv_tx->control));
210 rt2x00lib_write_tx_desc(rt2x00dev, skb, control);
211 181
212 /* 182 /*
213 * USB devices cannot blindly pass the skb->len as the 183 * USB devices cannot blindly pass the skb->len as the
214 * length of the data to usb_fill_bulk_urb. Pass the skb 184 * length of the data to usb_fill_bulk_urb. Pass the skb
215 * to the driver to determine what the length should be. 185 * to the driver to determine what the length should be.
216 */ 186 */
217 length = rt2x00dev->ops->lib->get_tx_data_len(rt2x00dev, skb); 187 length = rt2x00dev->ops->lib->get_tx_data_len(rt2x00dev, entry->skb);
218
219 /*
220 * Initialize URB and send the frame to the device.
221 */
222 __set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
223 usb_fill_bulk_urb(priv_tx->urb, usb_dev, usb_sndbulkpipe(usb_dev, 1),
224 skb->data, length, rt2x00usb_interrupt_txdone, entry);
225 usb_submit_urb(priv_tx->urb, GFP_ATOMIC);
226 188
227 rt2x00queue_index_inc(queue, Q_INDEX); 189 usb_fill_bulk_urb(entry_priv->urb, usb_dev,
190 usb_sndbulkpipe(usb_dev, 1),
191 entry->skb->data, length,
192 rt2x00usb_interrupt_txdone, entry);
228 193
229 return 0; 194 return 0;
230} 195}
231EXPORT_SYMBOL_GPL(rt2x00usb_write_tx_data); 196EXPORT_SYMBOL_GPL(rt2x00usb_write_tx_data);
232 197
233/* 198static inline void rt2x00usb_kick_tx_entry(struct queue_entry *entry)
234 * RX data handlers.
235 */
236static struct sk_buff* rt2x00usb_alloc_rxskb(struct data_queue *queue)
237{ 199{
238 struct sk_buff *skb; 200 struct queue_entry_priv_usb *entry_priv = entry->priv_data;
239 unsigned int frame_size; 201
202 if (__test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags))
203 usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
204}
205
206void rt2x00usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
207 const enum data_queue_qid qid)
208{
209 struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, qid);
210 unsigned long irqflags;
211 unsigned int index;
212 unsigned int index_done;
213 unsigned int i;
240 214
241 /* 215 /*
242 * As alignment we use 2 and not NET_IP_ALIGN because we need 216 * Only protect the range we are going to loop over,
243 * to be sure we have 2 bytes room in the head. (NET_IP_ALIGN 217 * if during our loop a extra entry is set to pending
244 * can be 0 on some hardware). We use these 2 bytes for frame 218 * it should not be kicked during this run, since it
245 * alignment later, we assume that the chance that 219 * is part of another TX operation.
246 * header_size % 4 == 2 is bigger then header_size % 2 == 0
247 * and thus optimize alignment by reserving the 2 bytes in
248 * advance.
249 */ 220 */
250 frame_size = queue->data_size + queue->desc_size; 221 spin_lock_irqsave(&queue->lock, irqflags);
251 skb = dev_alloc_skb(queue->desc_size + frame_size + 2); 222 index = queue->index[Q_INDEX];
252 if (!skb) 223 index_done = queue->index[Q_INDEX_DONE];
253 return NULL; 224 spin_unlock_irqrestore(&queue->lock, irqflags);
254
255 skb_reserve(skb, queue->desc_size + 2);
256 skb_put(skb, frame_size);
257 225
258 return skb; 226 /*
227 * Start from the TX done pointer, this guarentees that we will
228 * send out all frames in the correct order.
229 */
230 if (index_done < index) {
231 for (i = index_done; i < index; i++)
232 rt2x00usb_kick_tx_entry(&queue->entries[i]);
233 } else {
234 for (i = index_done; i < queue->limit; i++)
235 rt2x00usb_kick_tx_entry(&queue->entries[i]);
236
237 for (i = 0; i < index; i++)
238 rt2x00usb_kick_tx_entry(&queue->entries[i]);
239 }
259} 240}
241EXPORT_SYMBOL_GPL(rt2x00usb_kick_tx_queue);
260 242
243/*
244 * RX data handlers.
245 */
261static void rt2x00usb_interrupt_rxdone(struct urb *urb) 246static void rt2x00usb_interrupt_rxdone(struct urb *urb)
262{ 247{
263 struct queue_entry *entry = (struct queue_entry *)urb->context; 248 struct queue_entry *entry = (struct queue_entry *)urb->context;
264 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 249 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
265 struct sk_buff *skb; 250 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
266 struct skb_frame_desc *skbdesc; 251 u8 rxd[32];
267 struct rxdone_entry_desc rxdesc;
268 int header_size;
269 252
270 if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) || 253 if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) ||
271 !test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) 254 !test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
272 return; 255 return;
273 256
274 /* 257 /*
@@ -276,61 +259,22 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb)
276 * to be actually valid, or if the urb is signaling 259 * to be actually valid, or if the urb is signaling
277 * a problem. 260 * a problem.
278 */ 261 */
279 if (urb->actual_length < entry->queue->desc_size || urb->status) 262 if (urb->actual_length < entry->queue->desc_size || urb->status) {
280 goto skip_entry; 263 __set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
281 264 usb_submit_urb(urb, GFP_ATOMIC);
282 /* 265 return;
283 * Fill in skb descriptor
284 */
285 skbdesc = get_skb_frame_desc(entry->skb);
286 memset(skbdesc, 0, sizeof(*skbdesc));
287 skbdesc->entry = entry;
288
289 memset(&rxdesc, 0, sizeof(rxdesc));
290 rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc);
291
292 /*
293 * The data behind the ieee80211 header must be
294 * aligned on a 4 byte boundary.
295 */
296 header_size = ieee80211_get_hdrlen_from_skb(entry->skb);
297 if (header_size % 4 == 0) {
298 skb_push(entry->skb, 2);
299 memmove(entry->skb->data, entry->skb->data + 2,
300 entry->skb->len - 2);
301 skbdesc->data = entry->skb->data;
302 skb_trim(entry->skb,entry->skb->len - 2);
303 } 266 }
304 267
305 /* 268 /*
306 * Allocate a new sk buffer to replace the current one. 269 * Fill in desc fields of the skb descriptor
307 * If allocation fails, we should drop the current frame
308 * so we can recycle the existing sk buffer for the new frame.
309 */ 270 */
310 skb = rt2x00usb_alloc_rxskb(entry->queue); 271 skbdesc->desc = rxd;
311 if (!skb) 272 skbdesc->desc_len = entry->queue->desc_size;
312 goto skip_entry;
313 273
314 /* 274 /*
315 * Send the frame to rt2x00lib for further processing. 275 * Send the frame to rt2x00lib for further processing.
316 */ 276 */
317 rt2x00lib_rxdone(entry, &rxdesc); 277 rt2x00lib_rxdone(rt2x00dev, entry);
318
319 /*
320 * Replace current entry's skb with the newly allocated one,
321 * and reinitialize the urb.
322 */
323 entry->skb = skb;
324 urb->transfer_buffer = entry->skb->data;
325 urb->transfer_buffer_length = entry->skb->len;
326
327skip_entry:
328 if (test_bit(DEVICE_ENABLED_RADIO, &entry->queue->rt2x00dev->flags)) {
329 __set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
330 usb_submit_urb(urb, GFP_ATOMIC);
331 }
332
333 rt2x00queue_index_inc(entry->queue, Q_INDEX);
334} 278}
335 279
336/* 280/*
@@ -338,27 +282,21 @@ skip_entry:
338 */ 282 */
339void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev) 283void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev)
340{ 284{
341 struct queue_entry_priv_usb_rx *priv_rx; 285 struct queue_entry_priv_usb *entry_priv;
342 struct queue_entry_priv_usb_tx *priv_tx; 286 struct queue_entry_priv_usb_bcn *bcn_priv;
343 struct queue_entry_priv_usb_bcn *priv_bcn;
344 struct data_queue *queue; 287 struct data_queue *queue;
345 unsigned int i; 288 unsigned int i;
346 289
347 rt2x00usb_vendor_request_sw(rt2x00dev, USB_RX_CONTROL, 0x0000, 0x0000, 290 rt2x00usb_vendor_request_sw(rt2x00dev, USB_RX_CONTROL, 0, 0,
348 REGISTER_TIMEOUT); 291 REGISTER_TIMEOUT);
349 292
350 /* 293 /*
351 * Cancel all queues. 294 * Cancel all queues.
352 */ 295 */
353 for (i = 0; i < rt2x00dev->rx->limit; i++) { 296 queue_for_each(rt2x00dev, queue) {
354 priv_rx = rt2x00dev->rx->entries[i].priv_data;
355 usb_kill_urb(priv_rx->urb);
356 }
357
358 tx_queue_for_each(rt2x00dev, queue) {
359 for (i = 0; i < queue->limit; i++) { 297 for (i = 0; i < queue->limit; i++) {
360 priv_tx = queue->entries[i].priv_data; 298 entry_priv = queue->entries[i].priv_data;
361 usb_kill_urb(priv_tx->urb); 299 usb_kill_urb(entry_priv->urb);
362 } 300 }
363 } 301 }
364 302
@@ -369,19 +307,9 @@ void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev)
369 return; 307 return;
370 308
371 for (i = 0; i < rt2x00dev->bcn->limit; i++) { 309 for (i = 0; i < rt2x00dev->bcn->limit; i++) {
372 priv_bcn = rt2x00dev->bcn->entries[i].priv_data; 310 bcn_priv = rt2x00dev->bcn->entries[i].priv_data;
373 usb_kill_urb(priv_bcn->urb); 311 if (bcn_priv->guardian_urb)
374 312 usb_kill_urb(bcn_priv->guardian_urb);
375 if (priv_bcn->guardian_urb)
376 usb_kill_urb(priv_bcn->guardian_urb);
377 }
378
379 if (!test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags))
380 return;
381
382 for (i = 0; i < rt2x00dev->bcn[1].limit; i++) {
383 priv_tx = rt2x00dev->bcn[1].entries[i].priv_data;
384 usb_kill_urb(priv_tx->urb);
385 } 313 }
386} 314}
387EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio); 315EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio);
@@ -392,16 +320,16 @@ EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio);
392void rt2x00usb_init_rxentry(struct rt2x00_dev *rt2x00dev, 320void rt2x00usb_init_rxentry(struct rt2x00_dev *rt2x00dev,
393 struct queue_entry *entry) 321 struct queue_entry *entry)
394{ 322{
395 struct usb_device *usb_dev = rt2x00dev_usb_dev(rt2x00dev); 323 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
396 struct queue_entry_priv_usb_rx *priv_rx = entry->priv_data; 324 struct queue_entry_priv_usb *entry_priv = entry->priv_data;
397 325
398 usb_fill_bulk_urb(priv_rx->urb, usb_dev, 326 usb_fill_bulk_urb(entry_priv->urb, usb_dev,
399 usb_rcvbulkpipe(usb_dev, 1), 327 usb_rcvbulkpipe(usb_dev, 1),
400 entry->skb->data, entry->skb->len, 328 entry->skb->data, entry->skb->len,
401 rt2x00usb_interrupt_rxdone, entry); 329 rt2x00usb_interrupt_rxdone, entry);
402 330
403 __set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); 331 __set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
404 usb_submit_urb(priv_rx->urb, GFP_ATOMIC); 332 usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
405} 333}
406EXPORT_SYMBOL_GPL(rt2x00usb_init_rxentry); 334EXPORT_SYMBOL_GPL(rt2x00usb_init_rxentry);
407 335
@@ -415,38 +343,31 @@ EXPORT_SYMBOL_GPL(rt2x00usb_init_txentry);
415static int rt2x00usb_alloc_urb(struct rt2x00_dev *rt2x00dev, 343static int rt2x00usb_alloc_urb(struct rt2x00_dev *rt2x00dev,
416 struct data_queue *queue) 344 struct data_queue *queue)
417{ 345{
418 struct queue_entry_priv_usb_rx *priv_rx; 346 struct queue_entry_priv_usb *entry_priv;
419 struct queue_entry_priv_usb_tx *priv_tx; 347 struct queue_entry_priv_usb_bcn *bcn_priv;
420 struct queue_entry_priv_usb_bcn *priv_bcn;
421 struct urb *urb;
422 unsigned int guardian =
423 test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags);
424 unsigned int i; 348 unsigned int i;
425 349
350 for (i = 0; i < queue->limit; i++) {
351 entry_priv = queue->entries[i].priv_data;
352 entry_priv->urb = usb_alloc_urb(0, GFP_KERNEL);
353 if (!entry_priv->urb)
354 return -ENOMEM;
355 }
356
426 /* 357 /*
427 * Allocate the URB's 358 * If this is not the beacon queue or
359 * no guardian byte was required for the beacon,
360 * then we are done.
428 */ 361 */
362 if (rt2x00dev->bcn != queue ||
363 !test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags))
364 return 0;
365
429 for (i = 0; i < queue->limit; i++) { 366 for (i = 0; i < queue->limit; i++) {
430 urb = usb_alloc_urb(0, GFP_KERNEL); 367 bcn_priv = queue->entries[i].priv_data;
431 if (!urb) 368 bcn_priv->guardian_urb = usb_alloc_urb(0, GFP_KERNEL);
369 if (!bcn_priv->guardian_urb)
432 return -ENOMEM; 370 return -ENOMEM;
433
434 if (queue->qid == QID_RX) {
435 priv_rx = queue->entries[i].priv_data;
436 priv_rx->urb = urb;
437 } else if (queue->qid == QID_MGMT && guardian) {
438 priv_bcn = queue->entries[i].priv_data;
439 priv_bcn->urb = urb;
440
441 urb = usb_alloc_urb(0, GFP_KERNEL);
442 if (!urb)
443 return -ENOMEM;
444
445 priv_bcn->guardian_urb = urb;
446 } else {
447 priv_tx = queue->entries[i].priv_data;
448 priv_tx->urb = urb;
449 }
450 } 371 }
451 372
452 return 0; 373 return 0;
@@ -455,47 +376,39 @@ static int rt2x00usb_alloc_urb(struct rt2x00_dev *rt2x00dev,
455static void rt2x00usb_free_urb(struct rt2x00_dev *rt2x00dev, 376static void rt2x00usb_free_urb(struct rt2x00_dev *rt2x00dev,
456 struct data_queue *queue) 377 struct data_queue *queue)
457{ 378{
458 struct queue_entry_priv_usb_rx *priv_rx; 379 struct queue_entry_priv_usb *entry_priv;
459 struct queue_entry_priv_usb_tx *priv_tx; 380 struct queue_entry_priv_usb_bcn *bcn_priv;
460 struct queue_entry_priv_usb_bcn *priv_bcn;
461 struct urb *urb;
462 unsigned int guardian =
463 test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags);
464 unsigned int i; 381 unsigned int i;
465 382
466 if (!queue->entries) 383 if (!queue->entries)
467 return; 384 return;
468 385
469 for (i = 0; i < queue->limit; i++) { 386 for (i = 0; i < queue->limit; i++) {
470 if (queue->qid == QID_RX) { 387 entry_priv = queue->entries[i].priv_data;
471 priv_rx = queue->entries[i].priv_data; 388 usb_kill_urb(entry_priv->urb);
472 urb = priv_rx->urb; 389 usb_free_urb(entry_priv->urb);
473 } else if (queue->qid == QID_MGMT && guardian) { 390 }
474 priv_bcn = queue->entries[i].priv_data;
475
476 usb_kill_urb(priv_bcn->guardian_urb);
477 usb_free_urb(priv_bcn->guardian_urb);
478
479 urb = priv_bcn->urb;
480 } else {
481 priv_tx = queue->entries[i].priv_data;
482 urb = priv_tx->urb;
483 }
484 391
485 usb_kill_urb(urb); 392 /*
486 usb_free_urb(urb); 393 * If this is not the beacon queue or
487 if (queue->entries[i].skb) 394 * no guardian byte was required for the beacon,
488 kfree_skb(queue->entries[i].skb); 395 * then we are done.
396 */
397 if (rt2x00dev->bcn != queue ||
398 !test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags))
399 return;
400
401 for (i = 0; i < queue->limit; i++) {
402 bcn_priv = queue->entries[i].priv_data;
403 usb_kill_urb(bcn_priv->guardian_urb);
404 usb_free_urb(bcn_priv->guardian_urb);
489 } 405 }
490} 406}
491 407
492int rt2x00usb_initialize(struct rt2x00_dev *rt2x00dev) 408int rt2x00usb_initialize(struct rt2x00_dev *rt2x00dev)
493{ 409{
494 struct data_queue *queue; 410 struct data_queue *queue;
495 struct sk_buff *skb; 411 int status;
496 unsigned int entry_size;
497 unsigned int i;
498 int uninitialized_var(status);
499 412
500 /* 413 /*
501 * Allocate DMA 414 * Allocate DMA
@@ -506,18 +419,6 @@ int rt2x00usb_initialize(struct rt2x00_dev *rt2x00dev)
506 goto exit; 419 goto exit;
507 } 420 }
508 421
509 /*
510 * For the RX queue, skb's should be allocated.
511 */
512 entry_size = rt2x00dev->rx->data_size + rt2x00dev->rx->desc_size;
513 for (i = 0; i < rt2x00dev->rx->limit; i++) {
514 skb = rt2x00usb_alloc_rxskb(rt2x00dev->rx);
515 if (!skb)
516 goto exit;
517
518 rt2x00dev->rx->entries[i].skb = skb;
519 }
520
521 return 0; 422 return 0;
522 423
523exit: 424exit:
@@ -596,7 +497,7 @@ int rt2x00usb_probe(struct usb_interface *usb_intf,
596 usb_set_intfdata(usb_intf, hw); 497 usb_set_intfdata(usb_intf, hw);
597 498
598 rt2x00dev = hw->priv; 499 rt2x00dev = hw->priv;
599 rt2x00dev->dev = usb_intf; 500 rt2x00dev->dev = &usb_intf->dev;
600 rt2x00dev->ops = ops; 501 rt2x00dev->ops = ops;
601 rt2x00dev->hw = hw; 502 rt2x00dev->hw = hw;
602 mutex_init(&rt2x00dev->usb_cache_mutex); 503 mutex_init(&rt2x00dev->usb_cache_mutex);