aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/rt2x00/rt2x00usb.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/rt2x00/rt2x00usb.c')
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c282
1 files changed, 161 insertions, 121 deletions
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 9ac14598e2a0..1a9937d5aff6 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -195,7 +195,8 @@ static void rt2x00usb_work_txdone(struct work_struct *work)
195 while (!rt2x00queue_empty(queue)) { 195 while (!rt2x00queue_empty(queue)) {
196 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); 196 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
197 197
198 if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) 198 if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
199 !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
199 break; 200 break;
200 201
201 rt2x00usb_work_txdone_entry(entry); 202 rt2x00usb_work_txdone_entry(entry);
@@ -235,8 +236,10 @@ static void rt2x00usb_kick_tx_entry(struct queue_entry *entry)
235 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); 236 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
236 struct queue_entry_priv_usb *entry_priv = entry->priv_data; 237 struct queue_entry_priv_usb *entry_priv = entry->priv_data;
237 u32 length; 238 u32 length;
239 int status;
238 240
239 if (!test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags)) 241 if (!test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags) ||
242 test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
240 return; 243 return;
241 244
242 /* 245 /*
@@ -251,106 +254,15 @@ static void rt2x00usb_kick_tx_entry(struct queue_entry *entry)
251 entry->skb->data, length, 254 entry->skb->data, length,
252 rt2x00usb_interrupt_txdone, entry); 255 rt2x00usb_interrupt_txdone, entry);
253 256
254 if (usb_submit_urb(entry_priv->urb, GFP_ATOMIC)) { 257 status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
258 if (status) {
259 if (status == -ENODEV)
260 clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
255 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); 261 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
256 rt2x00lib_dmadone(entry); 262 rt2x00lib_dmadone(entry);
257 } 263 }
258} 264}
259 265
260void rt2x00usb_kick_tx_queue(struct data_queue *queue)
261{
262 rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX,
263 rt2x00usb_kick_tx_entry);
264}
265EXPORT_SYMBOL_GPL(rt2x00usb_kick_tx_queue);
266
267static void rt2x00usb_kill_tx_entry(struct queue_entry *entry)
268{
269 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
270 struct queue_entry_priv_usb *entry_priv = entry->priv_data;
271 struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data;
272
273 if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
274 return;
275
276 usb_kill_urb(entry_priv->urb);
277
278 /*
279 * Kill guardian urb (if required by driver).
280 */
281 if ((entry->queue->qid == QID_BEACON) &&
282 (test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags)))
283 usb_kill_urb(bcn_priv->guardian_urb);
284}
285
286void rt2x00usb_kill_tx_queue(struct data_queue *queue)
287{
288 rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX,
289 rt2x00usb_kill_tx_entry);
290}
291EXPORT_SYMBOL_GPL(rt2x00usb_kill_tx_queue);
292
293static void rt2x00usb_watchdog_tx_dma(struct data_queue *queue)
294{
295 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
296 unsigned short threshold = queue->threshold;
297
298 WARNING(queue->rt2x00dev, "TX queue %d DMA timed out,"
299 " invoke forced forced reset", queue->qid);
300
301 /*
302 * Temporarily disable the TX queue, this will force mac80211
303 * to use the other queues until this queue has been restored.
304 *
305 * Set the queue threshold to the queue limit. This prevents the
306 * queue from being enabled during the txdone handler.
307 */
308 queue->threshold = queue->limit;
309 ieee80211_stop_queue(rt2x00dev->hw, queue->qid);
310
311 /*
312 * Kill all entries in the queue, afterwards we need to
313 * wait a bit for all URBs to be cancelled.
314 */
315 rt2x00usb_kill_tx_queue(queue);
316
317 /*
318 * In case that a driver has overriden the txdone_work
319 * function, we invoke the TX done through there.
320 */
321 rt2x00dev->txdone_work.func(&rt2x00dev->txdone_work);
322
323 /*
324 * The queue has been reset, and mac80211 is allowed to use the
325 * queue again.
326 */
327 queue->threshold = threshold;
328 ieee80211_wake_queue(rt2x00dev->hw, queue->qid);
329}
330
331static void rt2x00usb_watchdog_tx_status(struct data_queue *queue)
332{
333 WARNING(queue->rt2x00dev, "TX queue %d status timed out,"
334 " invoke forced tx handler", queue->qid);
335
336 ieee80211_queue_work(queue->rt2x00dev->hw, &queue->rt2x00dev->txdone_work);
337}
338
339void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev)
340{
341 struct data_queue *queue;
342
343 tx_queue_for_each(rt2x00dev, queue) {
344 if (!rt2x00queue_empty(queue)) {
345 if (rt2x00queue_dma_timeout(queue))
346 rt2x00usb_watchdog_tx_dma(queue);
347 if (rt2x00queue_status_timeout(queue))
348 rt2x00usb_watchdog_tx_status(queue);
349 }
350 }
351}
352EXPORT_SYMBOL_GPL(rt2x00usb_watchdog);
353
354/* 266/*
355 * RX data handlers. 267 * RX data handlers.
356 */ 268 */
@@ -365,7 +277,8 @@ static void rt2x00usb_work_rxdone(struct work_struct *work)
365 while (!rt2x00queue_empty(rt2x00dev->rx)) { 277 while (!rt2x00queue_empty(rt2x00dev->rx)) {
366 entry = rt2x00queue_get_entry(rt2x00dev->rx, Q_INDEX_DONE); 278 entry = rt2x00queue_get_entry(rt2x00dev->rx, Q_INDEX_DONE);
367 279
368 if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) 280 if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
281 !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
369 break; 282 break;
370 283
371 /* 284 /*
@@ -410,6 +323,154 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb)
410 ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->rxdone_work); 323 ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->rxdone_work);
411} 324}
412 325
326static void rt2x00usb_kick_rx_entry(struct queue_entry *entry)
327{
328 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
329 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
330 struct queue_entry_priv_usb *entry_priv = entry->priv_data;
331 int status;
332
333 if (test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
334 test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
335 return;
336
337 rt2x00lib_dmastart(entry);
338
339 usb_fill_bulk_urb(entry_priv->urb, usb_dev,
340 usb_rcvbulkpipe(usb_dev, entry->queue->usb_endpoint),
341 entry->skb->data, entry->skb->len,
342 rt2x00usb_interrupt_rxdone, entry);
343
344 status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
345 if (status) {
346 if (status == -ENODEV)
347 clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
348 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
349 rt2x00lib_dmadone(entry);
350 }
351}
352
353void rt2x00usb_kick_queue(struct data_queue *queue)
354{
355 switch (queue->qid) {
356 case QID_AC_VO:
357 case QID_AC_VI:
358 case QID_AC_BE:
359 case QID_AC_BK:
360 if (!rt2x00queue_empty(queue))
361 rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX,
362 rt2x00usb_kick_tx_entry);
363 break;
364 case QID_RX:
365 if (!rt2x00queue_full(queue))
366 rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX,
367 rt2x00usb_kick_rx_entry);
368 break;
369 default:
370 break;
371 }
372}
373EXPORT_SYMBOL_GPL(rt2x00usb_kick_queue);
374
375static void rt2x00usb_flush_entry(struct queue_entry *entry)
376{
377 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
378 struct queue_entry_priv_usb *entry_priv = entry->priv_data;
379 struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data;
380
381 if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
382 return;
383
384 usb_kill_urb(entry_priv->urb);
385
386 /*
387 * Kill guardian urb (if required by driver).
388 */
389 if ((entry->queue->qid == QID_BEACON) &&
390 (test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags)))
391 usb_kill_urb(bcn_priv->guardian_urb);
392}
393
394void rt2x00usb_flush_queue(struct data_queue *queue)
395{
396 struct work_struct *completion;
397 unsigned int i;
398
399 rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX,
400 rt2x00usb_flush_entry);
401
402 /*
403 * Obtain the queue completion handler
404 */
405 switch (queue->qid) {
406 case QID_AC_VO:
407 case QID_AC_VI:
408 case QID_AC_BE:
409 case QID_AC_BK:
410 completion = &queue->rt2x00dev->txdone_work;
411 break;
412 case QID_RX:
413 completion = &queue->rt2x00dev->rxdone_work;
414 break;
415 default:
416 return;
417 }
418
419 for (i = 0; i < 20; i++) {
420 /*
421 * Check if the driver is already done, otherwise we
422 * have to sleep a little while to give the driver/hw
423 * the oppurtunity to complete interrupt process itself.
424 */
425 if (rt2x00queue_empty(queue))
426 break;
427
428 /*
429 * Schedule the completion handler manually, when this
430 * worker function runs, it should cleanup the queue.
431 */
432 ieee80211_queue_work(queue->rt2x00dev->hw, completion);
433
434 /*
435 * Wait for a little while to give the driver
436 * the oppurtunity to recover itself.
437 */
438 msleep(10);
439 }
440}
441EXPORT_SYMBOL_GPL(rt2x00usb_flush_queue);
442
443static void rt2x00usb_watchdog_tx_dma(struct data_queue *queue)
444{
445 WARNING(queue->rt2x00dev, "TX queue %d DMA timed out,"
446 " invoke forced forced reset\n", queue->qid);
447
448 rt2x00queue_flush_queue(queue, true);
449}
450
451static void rt2x00usb_watchdog_tx_status(struct data_queue *queue)
452{
453 WARNING(queue->rt2x00dev, "TX queue %d status timed out,"
454 " invoke forced tx handler\n", queue->qid);
455
456 ieee80211_queue_work(queue->rt2x00dev->hw, &queue->rt2x00dev->txdone_work);
457}
458
459void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev)
460{
461 struct data_queue *queue;
462
463 tx_queue_for_each(rt2x00dev, queue) {
464 if (!rt2x00queue_empty(queue)) {
465 if (rt2x00queue_dma_timeout(queue))
466 rt2x00usb_watchdog_tx_dma(queue);
467 if (rt2x00queue_status_timeout(queue))
468 rt2x00usb_watchdog_tx_status(queue);
469 }
470 }
471}
472EXPORT_SYMBOL_GPL(rt2x00usb_watchdog);
473
413/* 474/*
414 * Radio handlers 475 * Radio handlers
415 */ 476 */
@@ -417,12 +478,6 @@ void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev)
417{ 478{
418 rt2x00usb_vendor_request_sw(rt2x00dev, USB_RX_CONTROL, 0, 0, 479 rt2x00usb_vendor_request_sw(rt2x00dev, USB_RX_CONTROL, 0, 0,
419 REGISTER_TIMEOUT); 480 REGISTER_TIMEOUT);
420
421 /*
422 * The USB version of kill_tx_queue also works
423 * on the RX queue.
424 */
425 rt2x00dev->ops->lib->kill_tx_queue(rt2x00dev->rx);
426} 481}
427EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio); 482EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio);
428 483
@@ -431,25 +486,10 @@ EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio);
431 */ 486 */
432void rt2x00usb_clear_entry(struct queue_entry *entry) 487void rt2x00usb_clear_entry(struct queue_entry *entry)
433{ 488{
434 struct usb_device *usb_dev =
435 to_usb_device_intf(entry->queue->rt2x00dev->dev);
436 struct queue_entry_priv_usb *entry_priv = entry->priv_data;
437 int pipe;
438
439 entry->flags = 0; 489 entry->flags = 0;
440 490
441 if (entry->queue->qid == QID_RX) { 491 if (entry->queue->qid == QID_RX)
442 pipe = usb_rcvbulkpipe(usb_dev, entry->queue->usb_endpoint); 492 rt2x00usb_kick_rx_entry(entry);
443 usb_fill_bulk_urb(entry_priv->urb, usb_dev, pipe,
444 entry->skb->data, entry->skb->len,
445 rt2x00usb_interrupt_rxdone, entry);
446
447 set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
448 if (usb_submit_urb(entry_priv->urb, GFP_ATOMIC)) {
449 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
450 rt2x00lib_dmadone(entry);
451 }
452 }
453} 493}
454EXPORT_SYMBOL_GPL(rt2x00usb_clear_entry); 494EXPORT_SYMBOL_GPL(rt2x00usb_clear_entry);
455 495