aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/rt2x00/rt2x00queue.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/rt2x00/rt2x00queue.h')
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h167
1 files changed, 107 insertions, 60 deletions
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index 191e7775a9c0..167d45873dca 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com> 2 Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -43,28 +43,12 @@
43#define AGGREGATION_SIZE 3840 43#define AGGREGATION_SIZE 3840
44 44
45/** 45/**
46 * DOC: Number of entries per queue
47 *
48 * Under normal load without fragmentation, 12 entries are sufficient
49 * without the queue being filled up to the maximum. When using fragmentation
50 * and the queue threshold code, we need to add some additional margins to
51 * make sure the queue will never (or only under extreme load) fill up
52 * completely.
53 * Since we don't use preallocated DMA, having a large number of queue entries
54 * will have minimal impact on the memory requirements for the queue.
55 */
56#define RX_ENTRIES 24
57#define TX_ENTRIES 24
58#define BEACON_ENTRIES 1
59#define ATIM_ENTRIES 8
60
61/**
62 * enum data_queue_qid: Queue identification 46 * enum data_queue_qid: Queue identification
63 * 47 *
48 * @QID_AC_VO: AC VO queue
49 * @QID_AC_VI: AC VI queue
64 * @QID_AC_BE: AC BE queue 50 * @QID_AC_BE: AC BE queue
65 * @QID_AC_BK: AC BK queue 51 * @QID_AC_BK: AC BK queue
66 * @QID_AC_VI: AC VI queue
67 * @QID_AC_VO: AC VO queue
68 * @QID_HCCA: HCCA queue 52 * @QID_HCCA: HCCA queue
69 * @QID_MGMT: MGMT queue (prio queue) 53 * @QID_MGMT: MGMT queue (prio queue)
70 * @QID_RX: RX queue 54 * @QID_RX: RX queue
@@ -73,10 +57,10 @@
73 * @QID_ATIM: Atim queue (value unspeficied, don't send it to device) 57 * @QID_ATIM: Atim queue (value unspeficied, don't send it to device)
74 */ 58 */
75enum data_queue_qid { 59enum data_queue_qid {
76 QID_AC_BE = 0, 60 QID_AC_VO = 0,
77 QID_AC_BK = 1, 61 QID_AC_VI = 1,
78 QID_AC_VI = 2, 62 QID_AC_BE = 2,
79 QID_AC_VO = 3, 63 QID_AC_BK = 3,
80 QID_HCCA = 4, 64 QID_HCCA = 4,
81 QID_MGMT = 13, 65 QID_MGMT = 13,
82 QID_RX = 14, 66 QID_RX = 14,
@@ -233,6 +217,7 @@ enum txdone_entry_desc_flags {
233 TXDONE_FALLBACK, 217 TXDONE_FALLBACK,
234 TXDONE_FAILURE, 218 TXDONE_FAILURE,
235 TXDONE_EXCESSIVE_RETRY, 219 TXDONE_EXCESSIVE_RETRY,
220 TXDONE_AMPDU,
236}; 221};
237 222
238/** 223/**
@@ -268,6 +253,7 @@ struct txdone_entry_desc {
268 * @ENTRY_TXD_HT_AMPDU: This frame is part of an AMPDU. 253 * @ENTRY_TXD_HT_AMPDU: This frame is part of an AMPDU.
269 * @ENTRY_TXD_HT_BW_40: Use 40MHz Bandwidth. 254 * @ENTRY_TXD_HT_BW_40: Use 40MHz Bandwidth.
270 * @ENTRY_TXD_HT_SHORT_GI: Use short GI. 255 * @ENTRY_TXD_HT_SHORT_GI: Use short GI.
256 * @ENTRY_TXD_HT_MIMO_PS: The receiving STA is in dynamic SM PS mode.
271 */ 257 */
272enum txentry_desc_flags { 258enum txentry_desc_flags {
273 ENTRY_TXD_RTS_FRAME, 259 ENTRY_TXD_RTS_FRAME,
@@ -286,6 +272,7 @@ enum txentry_desc_flags {
286 ENTRY_TXD_HT_AMPDU, 272 ENTRY_TXD_HT_AMPDU,
287 ENTRY_TXD_HT_BW_40, 273 ENTRY_TXD_HT_BW_40,
288 ENTRY_TXD_HT_SHORT_GI, 274 ENTRY_TXD_HT_SHORT_GI,
275 ENTRY_TXD_HT_MIMO_PS,
289}; 276};
290 277
291/** 278/**
@@ -294,7 +281,6 @@ enum txentry_desc_flags {
294 * Summary of information for the frame descriptor before sending a TX frame. 281 * Summary of information for the frame descriptor before sending a TX frame.
295 * 282 *
296 * @flags: Descriptor flags (See &enum queue_entry_flags). 283 * @flags: Descriptor flags (See &enum queue_entry_flags).
297 * @queue: Queue identification (See &enum data_queue_qid).
298 * @length: Length of the entire frame. 284 * @length: Length of the entire frame.
299 * @header_length: Length of 802.11 header. 285 * @header_length: Length of 802.11 header.
300 * @length_high: PLCP length high word. 286 * @length_high: PLCP length high word.
@@ -307,11 +293,8 @@ enum txentry_desc_flags {
307 * @rate_mode: Rate mode (See @enum rate_modulation). 293 * @rate_mode: Rate mode (See @enum rate_modulation).
308 * @mpdu_density: MDPU density. 294 * @mpdu_density: MDPU density.
309 * @retry_limit: Max number of retries. 295 * @retry_limit: Max number of retries.
310 * @aifs: AIFS value.
311 * @ifs: IFS value. 296 * @ifs: IFS value.
312 * @txop: IFS value for 11n capable chips. 297 * @txop: IFS value for 11n capable chips.
313 * @cw_min: cwmin value.
314 * @cw_max: cwmax value.
315 * @cipher: Cipher type used for encryption. 298 * @cipher: Cipher type used for encryption.
316 * @key_idx: Key index used for encryption. 299 * @key_idx: Key index used for encryption.
317 * @iv_offset: Position where IV should be inserted by hardware. 300 * @iv_offset: Position where IV should be inserted by hardware.
@@ -320,28 +303,30 @@ enum txentry_desc_flags {
320struct txentry_desc { 303struct txentry_desc {
321 unsigned long flags; 304 unsigned long flags;
322 305
323 enum data_queue_qid queue;
324
325 u16 length; 306 u16 length;
326 u16 header_length; 307 u16 header_length;
327 308
328 u16 length_high; 309 union {
329 u16 length_low; 310 struct {
330 u16 signal; 311 u16 length_high;
331 u16 service; 312 u16 length_low;
332 313 u16 signal;
333 u16 mcs; 314 u16 service;
334 u16 stbc; 315 enum ifs ifs;
335 u16 ba_size; 316 } plcp;
336 u16 rate_mode; 317
337 u16 mpdu_density; 318 struct {
319 u16 mcs;
320 u8 stbc;
321 u8 ba_size;
322 u8 mpdu_density;
323 enum txop txop;
324 } ht;
325 } u;
326
327 enum rate_modulation rate_mode;
338 328
339 short retry_limit; 329 short retry_limit;
340 short aifs;
341 short ifs;
342 short txop;
343 short cw_min;
344 short cw_max;
345 330
346 enum cipher cipher; 331 enum cipher cipher;
347 u16 key_idx; 332 u16 key_idx;
@@ -358,32 +343,38 @@ struct txentry_desc {
358 * @ENTRY_OWNER_DEVICE_DATA: This entry is owned by the device for data 343 * @ENTRY_OWNER_DEVICE_DATA: This entry is owned by the device for data
359 * transfer (either TX or RX depending on the queue). The entry should 344 * transfer (either TX or RX depending on the queue). The entry should
360 * only be touched after the device has signaled it is done with it. 345 * only be touched after the device has signaled it is done with it.
361 * @ENTRY_OWNER_DEVICE_CRYPTO: This entry is owned by the device for data
362 * encryption or decryption. The entry should only be touched after
363 * the device has signaled it is done with it.
364 * @ENTRY_DATA_PENDING: This entry contains a valid frame and is waiting 346 * @ENTRY_DATA_PENDING: This entry contains a valid frame and is waiting
365 * for the signal to start sending. 347 * for the signal to start sending.
348 * @ENTRY_DATA_IO_FAILED: Hardware indicated that an IO error occurred
349 * while transferring the data to the hardware. No TX status report will
350 * be expected from the hardware.
351 * @ENTRY_DATA_STATUS_PENDING: The entry has been send to the device and
352 * returned. It is now waiting for the status reporting before the
353 * entry can be reused again.
366 */ 354 */
367enum queue_entry_flags { 355enum queue_entry_flags {
368 ENTRY_BCN_ASSIGNED, 356 ENTRY_BCN_ASSIGNED,
369 ENTRY_OWNER_DEVICE_DATA, 357 ENTRY_OWNER_DEVICE_DATA,
370 ENTRY_OWNER_DEVICE_CRYPTO,
371 ENTRY_DATA_PENDING, 358 ENTRY_DATA_PENDING,
359 ENTRY_DATA_IO_FAILED,
360 ENTRY_DATA_STATUS_PENDING,
372}; 361};
373 362
374/** 363/**
375 * struct queue_entry: Entry inside the &struct data_queue 364 * struct queue_entry: Entry inside the &struct data_queue
376 * 365 *
377 * @flags: Entry flags, see &enum queue_entry_flags. 366 * @flags: Entry flags, see &enum queue_entry_flags.
367 * @last_action: Timestamp of last change.
378 * @queue: The data queue (&struct data_queue) to which this entry belongs. 368 * @queue: The data queue (&struct data_queue) to which this entry belongs.
379 * @skb: The buffer which is currently being transmitted (for TX queue), 369 * @skb: The buffer which is currently being transmitted (for TX queue),
380 * or used to directly recieve data in (for RX queue). 370 * or used to directly receive data in (for RX queue).
381 * @entry_idx: The entry index number. 371 * @entry_idx: The entry index number.
382 * @priv_data: Private data belonging to this queue entry. The pointer 372 * @priv_data: Private data belonging to this queue entry. The pointer
383 * points to data specific to a particular driver and queue type. 373 * points to data specific to a particular driver and queue type.
384 */ 374 */
385struct queue_entry { 375struct queue_entry {
386 unsigned long flags; 376 unsigned long flags;
377 unsigned long last_action;
387 378
388 struct data_queue *queue; 379 struct data_queue *queue;
389 380
@@ -399,29 +390,49 @@ struct queue_entry {
399 * 390 *
400 * @Q_INDEX: Index pointer to the current entry in the queue, if this entry is 391 * @Q_INDEX: Index pointer to the current entry in the queue, if this entry is
401 * owned by the hardware then the queue is considered to be full. 392 * owned by the hardware then the queue is considered to be full.
393 * @Q_INDEX_DMA_DONE: Index pointer for the next entry which will have been
394 * transferred to the hardware.
402 * @Q_INDEX_DONE: Index pointer to the next entry which will be completed by 395 * @Q_INDEX_DONE: Index pointer to the next entry which will be completed by
403 * the hardware and for which we need to run the txdone handler. If this 396 * the hardware and for which we need to run the txdone handler. If this
404 * entry is not owned by the hardware the queue is considered to be empty. 397 * entry is not owned by the hardware the queue is considered to be empty.
405 * @Q_INDEX_CRYPTO: Index pointer to the next entry which encryption/decription
406 * will be completed by the hardware next.
407 * @Q_INDEX_MAX: Keep last, used in &struct data_queue to determine the size 398 * @Q_INDEX_MAX: Keep last, used in &struct data_queue to determine the size
408 * of the index array. 399 * of the index array.
409 */ 400 */
410enum queue_index { 401enum queue_index {
411 Q_INDEX, 402 Q_INDEX,
403 Q_INDEX_DMA_DONE,
412 Q_INDEX_DONE, 404 Q_INDEX_DONE,
413 Q_INDEX_CRYPTO,
414 Q_INDEX_MAX, 405 Q_INDEX_MAX,
415}; 406};
416 407
417/** 408/**
409 * enum data_queue_flags: Status flags for data queues
410 *
411 * @QUEUE_STARTED: The queue has been started. Fox RX queues this means the
412 * device might be DMA'ing skbuffers. TX queues will accept skbuffers to
413 * be transmitted and beacon queues will start beaconing the configured
414 * beacons.
415 * @QUEUE_PAUSED: The queue has been started but is currently paused.
416 * When this bit is set, the queue has been stopped in mac80211,
417 * preventing new frames to be enqueued. However, a few frames
418 * might still appear shortly after the pausing...
419 */
420enum data_queue_flags {
421 QUEUE_STARTED,
422 QUEUE_PAUSED,
423};
424
425/**
418 * struct data_queue: Data queue 426 * struct data_queue: Data queue
419 * 427 *
420 * @rt2x00dev: Pointer to main &struct rt2x00dev where this queue belongs to. 428 * @rt2x00dev: Pointer to main &struct rt2x00dev where this queue belongs to.
421 * @entries: Base address of the &struct queue_entry which are 429 * @entries: Base address of the &struct queue_entry which are
422 * part of this queue. 430 * part of this queue.
423 * @qid: The queue identification, see &enum data_queue_qid. 431 * @qid: The queue identification, see &enum data_queue_qid.
424 * @lock: Spinlock to protect index handling. Whenever @index, @index_done or 432 * @flags: Entry flags, see &enum queue_entry_flags.
433 * @status_lock: The mutex for protecting the start/stop/flush
434 * handling on this queue.
435 * @index_lock: Spinlock to protect index handling. Whenever @index, @index_done or
425 * @index_crypt needs to be changed this lock should be grabbed to prevent 436 * @index_crypt needs to be changed this lock should be grabbed to prevent
426 * index corruption due to concurrency. 437 * index corruption due to concurrency.
427 * @count: Number of frames handled in the queue. 438 * @count: Number of frames handled in the queue.
@@ -444,10 +455,11 @@ struct data_queue {
444 struct queue_entry *entries; 455 struct queue_entry *entries;
445 456
446 enum data_queue_qid qid; 457 enum data_queue_qid qid;
458 unsigned long flags;
459
460 struct mutex status_lock;
461 spinlock_t index_lock;
447 462
448 spinlock_t lock;
449 unsigned long last_index;
450 unsigned long last_index_done;
451 unsigned int count; 463 unsigned int count;
452 unsigned short limit; 464 unsigned short limit;
453 unsigned short threshold; 465 unsigned short threshold;
@@ -565,6 +577,28 @@ struct data_queue_desc {
565 queue_loop(__entry, (__dev)->tx, queue_end(__dev)) 577 queue_loop(__entry, (__dev)->tx, queue_end(__dev))
566 578
567/** 579/**
580 * rt2x00queue_for_each_entry - Loop through all entries in the queue
581 * @queue: Pointer to @data_queue
582 * @start: &enum queue_index Pointer to start index
583 * @end: &enum queue_index Pointer to end index
584 * @data: Data to pass to the callback function
585 * @fn: The function to call for each &struct queue_entry
586 *
587 * This will walk through all entries in the queue, in chronological
588 * order. This means it will start at the current @start pointer
589 * and will walk through the queue until it reaches the @end pointer.
590 *
591 * If fn returns true for an entry rt2x00queue_for_each_entry will stop
592 * processing and return true as well.
593 */
594bool rt2x00queue_for_each_entry(struct data_queue *queue,
595 enum queue_index start,
596 enum queue_index end,
597 void *data,
598 bool (*fn)(struct queue_entry *entry,
599 void *data));
600
601/**
568 * rt2x00queue_empty - Check if the queue is empty. 602 * rt2x00queue_empty - Check if the queue is empty.
569 * @queue: Queue to check if empty. 603 * @queue: Queue to check if empty.
570 */ 604 */
@@ -601,12 +635,25 @@ static inline int rt2x00queue_threshold(struct data_queue *queue)
601} 635}
602 636
603/** 637/**
604 * rt2x00queue_timeout - Check if a timeout occured for this queue 638 * rt2x00queue_status_timeout - Check if a timeout occurred for STATUS reports
605 * @queue: Queue to check. 639 * @entry: Queue entry to check.
640 */
641static inline int rt2x00queue_status_timeout(struct queue_entry *entry)
642{
643 if (!test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
644 return false;
645 return time_after(jiffies, entry->last_action + msecs_to_jiffies(100));
646}
647
648/**
649 * rt2x00queue_dma_timeout - Check if a timeout occurred for DMA transfers
650 * @entry: Queue entry to check.
606 */ 651 */
607static inline int rt2x00queue_timeout(struct data_queue *queue) 652static inline int rt2x00queue_dma_timeout(struct queue_entry *entry)
608{ 653{
609 return time_after(queue->last_index, queue->last_index_done + (HZ / 10)); 654 if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
655 return false;
656 return time_after(jiffies, entry->last_action + msecs_to_jiffies(100));
610} 657}
611 658
612/** 659/**