aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/rt2x00/rt2x00usb.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/rt2x00/rt2x00usb.c')
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c199
1 files changed, 98 insertions, 101 deletions
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index f76014f732c..4c5ae3d4562 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -208,11 +208,15 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
208 struct queue_entry *entry = (struct queue_entry *)urb->context; 208 struct queue_entry *entry = (struct queue_entry *)urb->context;
209 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 209 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
210 210
211 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags) || 211 if (!__test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
212 !__test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
213 return; 212 return;
214 213
215 /* 214 /*
215 * Report the frame as DMA done
216 */
217 rt2x00lib_dmadone(entry);
218
219 /*
216 * Check if the frame was correctly uploaded 220 * Check if the frame was correctly uploaded
217 */ 221 */
218 if (urb->status) 222 if (urb->status)
@@ -222,112 +226,84 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
222 * Schedule the delayed work for reading the TX status 226 * Schedule the delayed work for reading the TX status
223 * from the device. 227 * from the device.
224 */ 228 */
225 ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->txdone_work); 229 if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) &&
230 test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
231 ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->txdone_work);
226} 232}
227 233
228static inline void rt2x00usb_kick_tx_entry(struct queue_entry *entry) 234static void rt2x00usb_kick_tx_entry(struct queue_entry *entry)
229{ 235{
230 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 236 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
231 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); 237 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
232 struct queue_entry_priv_usb *entry_priv = entry->priv_data; 238 struct queue_entry_priv_usb *entry_priv = entry->priv_data;
233 u32 length; 239 u32 length;
234 240
235 if (test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags)) { 241 if (!test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags))
236 /* 242 return;
237 * USB devices cannot blindly pass the skb->len as the
238 * length of the data to usb_fill_bulk_urb. Pass the skb
239 * to the driver to determine what the length should be.
240 */
241 length = rt2x00dev->ops->lib->get_tx_data_len(entry);
242
243 usb_fill_bulk_urb(entry_priv->urb, usb_dev,
244 usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint),
245 entry->skb->data, length,
246 rt2x00usb_interrupt_txdone, entry);
247
248 usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
249 }
250}
251
252void rt2x00usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
253 const enum data_queue_qid qid)
254{
255 struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, qid);
256 unsigned long irqflags;
257 unsigned int index;
258 unsigned int index_done;
259 unsigned int i;
260 243
261 /* 244 /*
262 * Only protect the range we are going to loop over, 245 * USB devices cannot blindly pass the skb->len as the
263 * if during our loop a extra entry is set to pending 246 * length of the data to usb_fill_bulk_urb. Pass the skb
264 * it should not be kicked during this run, since it 247 * to the driver to determine what the length should be.
265 * is part of another TX operation.
266 */ 248 */
267 spin_lock_irqsave(&queue->lock, irqflags); 249 length = rt2x00dev->ops->lib->get_tx_data_len(entry);
268 index = queue->index[Q_INDEX];
269 index_done = queue->index[Q_INDEX_DONE];
270 spin_unlock_irqrestore(&queue->lock, irqflags);
271 250
272 /* 251 usb_fill_bulk_urb(entry_priv->urb, usb_dev,
273 * Start from the TX done pointer, this guarentees that we will 252 usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint),
274 * send out all frames in the correct order. 253 entry->skb->data, length,
275 */ 254 rt2x00usb_interrupt_txdone, entry);
276 if (index_done < index) {
277 for (i = index_done; i < index; i++)
278 rt2x00usb_kick_tx_entry(&queue->entries[i]);
279 } else {
280 for (i = index_done; i < queue->limit; i++)
281 rt2x00usb_kick_tx_entry(&queue->entries[i]);
282 255
283 for (i = 0; i < index; i++) 256 usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
284 rt2x00usb_kick_tx_entry(&queue->entries[i]); 257}
285 } 258
259void rt2x00usb_kick_tx_queue(struct data_queue *queue)
260{
261 rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX,
262 rt2x00usb_kick_tx_entry);
286} 263}
287EXPORT_SYMBOL_GPL(rt2x00usb_kick_tx_queue); 264EXPORT_SYMBOL_GPL(rt2x00usb_kick_tx_queue);
288 265
289void rt2x00usb_kill_tx_queue(struct rt2x00_dev *rt2x00dev, 266static void rt2x00usb_kill_tx_entry(struct queue_entry *entry)
290 const enum data_queue_qid qid)
291{ 267{
292 struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, qid); 268 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
293 struct queue_entry_priv_usb *entry_priv; 269 struct queue_entry_priv_usb *entry_priv = entry->priv_data;
294 struct queue_entry_priv_usb_bcn *bcn_priv; 270 struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data;
295 unsigned int i; 271
296 bool kill_guard; 272 if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
273 return;
274
275 usb_kill_urb(entry_priv->urb);
297 276
298 /* 277 /*
299 * When killing the beacon queue, we must also kill 278 * Kill guardian urb (if required by driver).
300 * the beacon guard byte.
301 */ 279 */
302 kill_guard = 280 if ((entry->queue->qid == QID_BEACON) &&
303 (qid == QID_BEACON) && 281 (test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags)))
304 (test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags)); 282 usb_kill_urb(bcn_priv->guardian_urb);
305 283
306 /* 284 /*
307 * Cancel all entries. 285 * We need a short delay here to wait for
286 * the URB to be canceled
308 */ 287 */
309 for (i = 0; i < queue->limit; i++) { 288 do {
310 entry_priv = queue->entries[i].priv_data; 289 udelay(100);
311 usb_kill_urb(entry_priv->urb); 290 } while (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags));
291}
312 292
313 /* 293void rt2x00usb_kill_tx_queue(struct data_queue *queue)
314 * Kill guardian urb (if required by driver). 294{
315 */ 295 rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX,
316 if (kill_guard) { 296 rt2x00usb_kill_tx_entry);
317 bcn_priv = queue->entries[i].priv_data;
318 usb_kill_urb(bcn_priv->guardian_urb);
319 }
320 }
321} 297}
322EXPORT_SYMBOL_GPL(rt2x00usb_kill_tx_queue); 298EXPORT_SYMBOL_GPL(rt2x00usb_kill_tx_queue);
323 299
324static void rt2x00usb_watchdog_reset_tx(struct data_queue *queue) 300static void rt2x00usb_watchdog_tx_dma(struct data_queue *queue)
325{ 301{
326 struct queue_entry *entry; 302 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
327 struct queue_entry_priv_usb *entry_priv;
328 unsigned short threshold = queue->threshold; 303 unsigned short threshold = queue->threshold;
329 304
330 WARNING(queue->rt2x00dev, "TX queue %d timed out, invoke reset", queue->qid); 305 WARNING(queue->rt2x00dev, "TX queue %d DMA timed out,"
306 " invoke forced forced reset", queue->qid);
331 307
332 /* 308 /*
333 * Temporarily disable the TX queue, this will force mac80211 309 * Temporarily disable the TX queue, this will force mac80211
@@ -337,28 +313,33 @@ static void rt2x00usb_watchdog_reset_tx(struct data_queue *queue)
337 * queue from being enabled during the txdone handler. 313 * queue from being enabled during the txdone handler.
338 */ 314 */
339 queue->threshold = queue->limit; 315 queue->threshold = queue->limit;
340 ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid); 316 ieee80211_stop_queue(rt2x00dev->hw, queue->qid);
341 317
342 /* 318 /*
343 * Reset all currently uploaded TX frames. 319 * Kill all entries in the queue, afterwards we need to
320 * wait a bit for all URBs to be cancelled.
344 */ 321 */
345 while (!rt2x00queue_empty(queue)) { 322 rt2x00usb_kill_tx_queue(queue);
346 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
347 entry_priv = entry->priv_data;
348 usb_kill_urb(entry_priv->urb);
349 323
350 /* 324 /*
351 * We need a short delay here to wait for 325 * In case that a driver has overriden the txdone_work
352 * the URB to be canceled 326 * function, we invoke the TX done through there.
353 */ 327 */
354 do { 328 rt2x00dev->txdone_work.func(&rt2x00dev->txdone_work);
355 udelay(100);
356 } while (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags));
357 329
358 /* 330 /*
359 * Invoke the TX done handler 331 * Security measure: if the driver did override the
360 */ 332 * txdone_work function, and the hardware did arrive
361 rt2x00usb_work_txdone_entry(entry); 333 * in a state which causes it to malfunction, it is
334 * possible that the driver couldn't handle the txdone
335 * event correctly. So after giving the driver the
336 * chance to cleanup, we now force a cleanup of any
337 * leftovers.
338 */
339 if (!rt2x00queue_empty(queue)) {
340 WARNING(queue->rt2x00dev, "TX queue %d DMA timed out,"
341 " status handling failed, invoke hard reset", queue->qid);
342 rt2x00usb_work_txdone(&rt2x00dev->txdone_work);
362 } 343 }
363 344
364 /* 345 /*
@@ -366,7 +347,15 @@ static void rt2x00usb_watchdog_reset_tx(struct data_queue *queue)
366 * queue again. 347 * queue again.
367 */ 348 */
368 queue->threshold = threshold; 349 queue->threshold = threshold;
369 ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid); 350 ieee80211_wake_queue(rt2x00dev->hw, queue->qid);
351}
352
353static void rt2x00usb_watchdog_tx_status(struct data_queue *queue)
354{
355 WARNING(queue->rt2x00dev, "TX queue %d status timed out,"
356 " invoke forced tx handler", queue->qid);
357
358 ieee80211_queue_work(queue->rt2x00dev->hw, &queue->rt2x00dev->txdone_work);
370} 359}
371 360
372void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev) 361void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev)
@@ -374,8 +363,10 @@ void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev)
374 struct data_queue *queue; 363 struct data_queue *queue;
375 364
376 tx_queue_for_each(rt2x00dev, queue) { 365 tx_queue_for_each(rt2x00dev, queue) {
366 if (rt2x00queue_dma_timeout(queue))
367 rt2x00usb_watchdog_tx_dma(queue);
377 if (rt2x00queue_timeout(queue)) 368 if (rt2x00queue_timeout(queue))
378 rt2x00usb_watchdog_reset_tx(queue); 369 rt2x00usb_watchdog_tx_status(queue);
379 } 370 }
380} 371}
381EXPORT_SYMBOL_GPL(rt2x00usb_watchdog); 372EXPORT_SYMBOL_GPL(rt2x00usb_watchdog);
@@ -416,11 +407,15 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb)
416 struct queue_entry *entry = (struct queue_entry *)urb->context; 407 struct queue_entry *entry = (struct queue_entry *)urb->context;
417 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 408 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
418 409
419 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags) || 410 if (!__test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
420 !__test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
421 return; 411 return;
422 412
423 /* 413 /*
414 * Report the frame as DMA done
415 */
416 rt2x00lib_dmadone(entry);
417
418 /*
424 * Check if the received data is simply too small 419 * Check if the received data is simply too small
425 * to be actually valid, or if the urb is signaling 420 * to be actually valid, or if the urb is signaling
426 * a problem. 421 * a problem.
@@ -432,7 +427,9 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb)
432 * Schedule the delayed work for reading the RX status 427 * Schedule the delayed work for reading the RX status
433 * from the device. 428 * from the device.
434 */ 429 */
435 ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->rxdone_work); 430 if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) &&
431 test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
432 ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->rxdone_work);
436} 433}
437 434
438/* 435/*
@@ -447,7 +444,7 @@ void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev)
447 * The USB version of kill_tx_queue also works 444 * The USB version of kill_tx_queue also works
448 * on the RX queue. 445 * on the RX queue.
449 */ 446 */
450 rt2x00dev->ops->lib->kill_tx_queue(rt2x00dev, QID_RX); 447 rt2x00dev->ops->lib->kill_tx_queue(rt2x00dev->rx);
451} 448}
452EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio); 449EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio);
453 450