diff options
author | Ivo van Doorn <ivdoorn@gmail.com> | 2010-08-23 13:54:21 -0400 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2010-08-25 14:34:54 -0400 |
commit | 5eb7efe8a4807d98a277280e1317e5094eedfb6b (patch) | |
tree | 124a1a599ff718cfd84bb441202fe251f65bda19 /drivers/net/wireless/rt2x00/rt2x00usb.c | |
parent | ee1e755f84dfd5d482076c642fac830aafdc482b (diff) |
rt2x00: Move direct access to queue->entries to rt2x00queue.c
All access to queue->entries through the Q_INDEX/Q_INDEX_DONE
variables must be done using spinlock protection. It is best
to manage this completely from rt2x00queue.c.
For safely looping through all entries in the queue, the function
rt2x00queue_for_each_entry is added which will walk from from a index
range in a safe manner.
This also fixes rt2x00usb which walked the entries list from
0 to length to kill each entry (killing entries must be done
from Q_INDEX_DONE to Q_INDEX to enforce TX status reporting to
occur in the correct order.
Signed-off-by: Ivo van Doorn <IvDoorn@gmail.com>
Acked-by: Gertjan van Wingerde <gwingerde@gmail.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/rt2x00/rt2x00usb.c')
-rw-r--r-- | drivers/net/wireless/rt2x00/rt2x00usb.c | 75 |
1 files changed, 20 insertions, 55 deletions
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c index 1d2eb461329f..6cc7aa418d87 100644 --- a/drivers/net/wireless/rt2x00/rt2x00usb.c +++ b/drivers/net/wireless/rt2x00/rt2x00usb.c | |||
@@ -225,7 +225,7 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb) | |||
225 | ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->txdone_work); | 225 | ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->txdone_work); |
226 | } | 226 | } |
227 | 227 | ||
228 | static inline void rt2x00usb_kick_tx_entry(struct queue_entry *entry) | 228 | static void rt2x00usb_kick_tx_entry(struct queue_entry *entry) |
229 | { | 229 | { |
230 | struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; | 230 | struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; |
231 | struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); | 231 | struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); |
@@ -252,69 +252,34 @@ static inline void rt2x00usb_kick_tx_entry(struct queue_entry *entry) | |||
252 | 252 | ||
253 | void rt2x00usb_kick_tx_queue(struct data_queue *queue) | 253 | void rt2x00usb_kick_tx_queue(struct data_queue *queue) |
254 | { | 254 | { |
255 | unsigned long irqflags; | 255 | rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX, |
256 | unsigned int index; | 256 | rt2x00usb_kick_tx_entry); |
257 | unsigned int index_done; | ||
258 | unsigned int i; | ||
259 | |||
260 | /* | ||
261 | * Only protect the range we are going to loop over, | ||
262 | * if during our loop a extra entry is set to pending | ||
263 | * it should not be kicked during this run, since it | ||
264 | * is part of another TX operation. | ||
265 | */ | ||
266 | spin_lock_irqsave(&queue->lock, irqflags); | ||
267 | index = queue->index[Q_INDEX]; | ||
268 | index_done = queue->index[Q_INDEX_DONE]; | ||
269 | spin_unlock_irqrestore(&queue->lock, irqflags); | ||
270 | |||
271 | /* | ||
272 | * Start from the TX done pointer, this guarentees that we will | ||
273 | * send out all frames in the correct order. | ||
274 | */ | ||
275 | if (index_done < index) { | ||
276 | for (i = index_done; i < index; i++) | ||
277 | rt2x00usb_kick_tx_entry(&queue->entries[i]); | ||
278 | } else { | ||
279 | for (i = index_done; i < queue->limit; i++) | ||
280 | rt2x00usb_kick_tx_entry(&queue->entries[i]); | ||
281 | |||
282 | for (i = 0; i < index; i++) | ||
283 | rt2x00usb_kick_tx_entry(&queue->entries[i]); | ||
284 | } | ||
285 | } | 257 | } |
286 | EXPORT_SYMBOL_GPL(rt2x00usb_kick_tx_queue); | 258 | EXPORT_SYMBOL_GPL(rt2x00usb_kick_tx_queue); |
287 | 259 | ||
288 | void rt2x00usb_kill_tx_queue(struct data_queue *queue) | 260 | static void rt2x00usb_kill_tx_entry(struct queue_entry *entry) |
289 | { | 261 | { |
290 | struct queue_entry_priv_usb *entry_priv; | 262 | struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; |
291 | struct queue_entry_priv_usb_bcn *bcn_priv; | 263 | struct queue_entry_priv_usb *entry_priv = entry->priv_data; |
292 | unsigned int i; | 264 | struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data; |
293 | bool kill_guard; | ||
294 | 265 | ||
295 | /* | 266 | if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) |
296 | * When killing the beacon queue, we must also kill | 267 | return; |
297 | * the beacon guard byte. | 268 | |
298 | */ | 269 | usb_kill_urb(entry_priv->urb); |
299 | kill_guard = | ||
300 | (queue->qid == QID_BEACON) && | ||
301 | (test_bit(DRIVER_REQUIRE_BEACON_GUARD, &queue->rt2x00dev->flags)); | ||
302 | 270 | ||
303 | /* | 271 | /* |
304 | * Cancel all entries. | 272 | * Kill guardian urb (if required by driver). |
305 | */ | 273 | */ |
306 | for (i = 0; i < queue->limit; i++) { | 274 | if ((entry->queue->qid == QID_BEACON) && |
307 | entry_priv = queue->entries[i].priv_data; | 275 | (test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags))) |
308 | usb_kill_urb(entry_priv->urb); | 276 | usb_kill_urb(bcn_priv->guardian_urb); |
277 | } | ||
309 | 278 | ||
310 | /* | 279 | void rt2x00usb_kill_tx_queue(struct data_queue *queue) |
311 | * Kill guardian urb (if required by driver). | 280 | { |
312 | */ | 281 | rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX, |
313 | if (kill_guard) { | 282 | rt2x00usb_kill_tx_entry); |
314 | bcn_priv = queue->entries[i].priv_data; | ||
315 | usb_kill_urb(bcn_priv->guardian_urb); | ||
316 | } | ||
317 | } | ||
318 | } | 283 | } |
319 | EXPORT_SYMBOL_GPL(rt2x00usb_kill_tx_queue); | 284 | EXPORT_SYMBOL_GPL(rt2x00usb_kill_tx_queue); |
320 | 285 | ||