diff options
Diffstat (limited to 'drivers/net/wireless/rt2x00/rt2x00queue.c')
-rw-r--r-- | drivers/net/wireless/rt2x00/rt2x00queue.c | 82 |
1 files changed, 65 insertions, 17 deletions
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c index a3401d301058..eede99939db9 100644 --- a/drivers/net/wireless/rt2x00/rt2x00queue.c +++ b/drivers/net/wireless/rt2x00/rt2x00queue.c | |||
@@ -1,5 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com> | 2 | Copyright (C) 2010 Willow Garage <http://www.willowgarage.com> |
3 | Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com> | ||
3 | Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com> | 4 | Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com> |
4 | <http://rt2x00.serialmonkey.com> | 5 | <http://rt2x00.serialmonkey.com> |
5 | 6 | ||
@@ -311,7 +312,7 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry, | |||
311 | /* | 312 | /* |
312 | * Initialize information from queue | 313 | * Initialize information from queue |
313 | */ | 314 | */ |
314 | txdesc->queue = entry->queue->qid; | 315 | txdesc->qid = entry->queue->qid; |
315 | txdesc->cw_min = entry->queue->cw_min; | 316 | txdesc->cw_min = entry->queue->cw_min; |
316 | txdesc->cw_max = entry->queue->cw_max; | 317 | txdesc->cw_max = entry->queue->cw_max; |
317 | txdesc->aifs = entry->queue->aifs; | 318 | txdesc->aifs = entry->queue->aifs; |
@@ -448,15 +449,14 @@ static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry, | |||
448 | struct txentry_desc *txdesc) | 449 | struct txentry_desc *txdesc) |
449 | { | 450 | { |
450 | struct data_queue *queue = entry->queue; | 451 | struct data_queue *queue = entry->queue; |
451 | struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; | ||
452 | 452 | ||
453 | rt2x00dev->ops->lib->write_tx_desc(rt2x00dev, entry->skb, txdesc); | 453 | queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc); |
454 | 454 | ||
455 | /* | 455 | /* |
456 | * All processing on the frame has been completed, this means | 456 | * All processing on the frame has been completed, this means |
457 | * it is now ready to be dumped to userspace through debugfs. | 457 | * it is now ready to be dumped to userspace through debugfs. |
458 | */ | 458 | */ |
459 | rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TX, entry->skb); | 459 | rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry->skb); |
460 | } | 460 | } |
461 | 461 | ||
462 | static void rt2x00queue_kick_tx_queue(struct queue_entry *entry, | 462 | static void rt2x00queue_kick_tx_queue(struct queue_entry *entry, |
@@ -476,7 +476,7 @@ static void rt2x00queue_kick_tx_queue(struct queue_entry *entry, | |||
476 | */ | 476 | */ |
477 | if (rt2x00queue_threshold(queue) || | 477 | if (rt2x00queue_threshold(queue) || |
478 | !test_bit(ENTRY_TXD_BURST, &txdesc->flags)) | 478 | !test_bit(ENTRY_TXD_BURST, &txdesc->flags)) |
479 | rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, queue->qid); | 479 | rt2x00dev->ops->lib->kick_tx_queue(queue); |
480 | } | 480 | } |
481 | 481 | ||
482 | int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, | 482 | int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, |
@@ -590,7 +590,7 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev, | |||
590 | intf->beacon->skb = NULL; | 590 | intf->beacon->skb = NULL; |
591 | 591 | ||
592 | if (!enable_beacon) { | 592 | if (!enable_beacon) { |
593 | rt2x00dev->ops->lib->kill_tx_queue(rt2x00dev, QID_BEACON); | 593 | rt2x00dev->ops->lib->kill_tx_queue(intf->beacon->queue); |
594 | mutex_unlock(&intf->beacon_skb_mutex); | 594 | mutex_unlock(&intf->beacon_skb_mutex); |
595 | return 0; | 595 | return 0; |
596 | } | 596 | } |
@@ -625,6 +625,51 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev, | |||
625 | return 0; | 625 | return 0; |
626 | } | 626 | } |
627 | 627 | ||
628 | void rt2x00queue_for_each_entry(struct data_queue *queue, | ||
629 | enum queue_index start, | ||
630 | enum queue_index end, | ||
631 | void (*fn)(struct queue_entry *entry)) | ||
632 | { | ||
633 | unsigned long irqflags; | ||
634 | unsigned int index_start; | ||
635 | unsigned int index_end; | ||
636 | unsigned int i; | ||
637 | |||
638 | if (unlikely(start >= Q_INDEX_MAX || end >= Q_INDEX_MAX)) { | ||
639 | ERROR(queue->rt2x00dev, | ||
640 | "Entry requested from invalid index range (%d - %d)\n", | ||
641 | start, end); | ||
642 | return; | ||
643 | } | ||
644 | |||
645 | /* | ||
646 | * Only protect the range we are going to loop over, | ||
647 | * if during our loop a extra entry is set to pending | ||
648 | * it should not be kicked during this run, since it | ||
649 | * is part of another TX operation. | ||
650 | */ | ||
651 | spin_lock_irqsave(&queue->lock, irqflags); | ||
652 | index_start = queue->index[start]; | ||
653 | index_end = queue->index[end]; | ||
654 | spin_unlock_irqrestore(&queue->lock, irqflags); | ||
655 | |||
656 | /* | ||
657 | * Start from the TX done pointer, this guarentees that we will | ||
658 | * send out all frames in the correct order. | ||
659 | */ | ||
660 | if (index_start < index_end) { | ||
661 | for (i = index_start; i < index_end; i++) | ||
662 | fn(&queue->entries[i]); | ||
663 | } else { | ||
664 | for (i = index_start; i < queue->limit; i++) | ||
665 | fn(&queue->entries[i]); | ||
666 | |||
667 | for (i = 0; i < index_end; i++) | ||
668 | fn(&queue->entries[i]); | ||
669 | } | ||
670 | } | ||
671 | EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry); | ||
672 | |||
628 | struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev, | 673 | struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev, |
629 | const enum data_queue_qid queue) | 674 | const enum data_queue_qid queue) |
630 | { | 675 | { |
@@ -686,13 +731,13 @@ void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index) | |||
686 | if (queue->index[index] >= queue->limit) | 731 | if (queue->index[index] >= queue->limit) |
687 | queue->index[index] = 0; | 732 | queue->index[index] = 0; |
688 | 733 | ||
734 | queue->last_action[index] = jiffies; | ||
735 | |||
689 | if (index == Q_INDEX) { | 736 | if (index == Q_INDEX) { |
690 | queue->length++; | 737 | queue->length++; |
691 | queue->last_index = jiffies; | ||
692 | } else if (index == Q_INDEX_DONE) { | 738 | } else if (index == Q_INDEX_DONE) { |
693 | queue->length--; | 739 | queue->length--; |
694 | queue->count++; | 740 | queue->count++; |
695 | queue->last_index_done = jiffies; | ||
696 | } | 741 | } |
697 | 742 | ||
698 | spin_unlock_irqrestore(&queue->lock, irqflags); | 743 | spin_unlock_irqrestore(&queue->lock, irqflags); |
@@ -701,14 +746,17 @@ void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index) | |||
701 | static void rt2x00queue_reset(struct data_queue *queue) | 746 | static void rt2x00queue_reset(struct data_queue *queue) |
702 | { | 747 | { |
703 | unsigned long irqflags; | 748 | unsigned long irqflags; |
749 | unsigned int i; | ||
704 | 750 | ||
705 | spin_lock_irqsave(&queue->lock, irqflags); | 751 | spin_lock_irqsave(&queue->lock, irqflags); |
706 | 752 | ||
707 | queue->count = 0; | 753 | queue->count = 0; |
708 | queue->length = 0; | 754 | queue->length = 0; |
709 | queue->last_index = jiffies; | 755 | |
710 | queue->last_index_done = jiffies; | 756 | for (i = 0; i < Q_INDEX_MAX; i++) { |
711 | memset(queue->index, 0, sizeof(queue->index)); | 757 | queue->index[i] = 0; |
758 | queue->last_action[i] = jiffies; | ||
759 | } | ||
712 | 760 | ||
713 | spin_unlock_irqrestore(&queue->lock, irqflags); | 761 | spin_unlock_irqrestore(&queue->lock, irqflags); |
714 | } | 762 | } |
@@ -718,7 +766,7 @@ void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev) | |||
718 | struct data_queue *queue; | 766 | struct data_queue *queue; |
719 | 767 | ||
720 | txall_queue_for_each(rt2x00dev, queue) | 768 | txall_queue_for_each(rt2x00dev, queue) |
721 | rt2x00dev->ops->lib->kill_tx_queue(rt2x00dev, queue->qid); | 769 | rt2x00dev->ops->lib->kill_tx_queue(queue); |
722 | } | 770 | } |
723 | 771 | ||
724 | void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev) | 772 | void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev) |
@@ -730,9 +778,9 @@ void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev) | |||
730 | rt2x00queue_reset(queue); | 778 | rt2x00queue_reset(queue); |
731 | 779 | ||
732 | for (i = 0; i < queue->limit; i++) { | 780 | for (i = 0; i < queue->limit; i++) { |
733 | queue->entries[i].flags = 0; | ||
734 | |||
735 | rt2x00dev->ops->lib->clear_entry(&queue->entries[i]); | 781 | rt2x00dev->ops->lib->clear_entry(&queue->entries[i]); |
782 | if (queue->qid == QID_RX) | ||
783 | rt2x00queue_index_inc(queue, Q_INDEX); | ||
736 | } | 784 | } |
737 | } | 785 | } |
738 | } | 786 | } |
@@ -755,7 +803,7 @@ static int rt2x00queue_alloc_entries(struct data_queue *queue, | |||
755 | * Allocate all queue entries. | 803 | * Allocate all queue entries. |
756 | */ | 804 | */ |
757 | entry_size = sizeof(*entries) + qdesc->priv_size; | 805 | entry_size = sizeof(*entries) + qdesc->priv_size; |
758 | entries = kzalloc(queue->limit * entry_size, GFP_KERNEL); | 806 | entries = kcalloc(queue->limit, entry_size, GFP_KERNEL); |
759 | if (!entries) | 807 | if (!entries) |
760 | return -ENOMEM; | 808 | return -ENOMEM; |
761 | 809 | ||
@@ -891,7 +939,7 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev) | |||
891 | */ | 939 | */ |
892 | rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim; | 940 | rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim; |
893 | 941 | ||
894 | queue = kzalloc(rt2x00dev->data_queues * sizeof(*queue), GFP_KERNEL); | 942 | queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL); |
895 | if (!queue) { | 943 | if (!queue) { |
896 | ERROR(rt2x00dev, "Queue allocation failed.\n"); | 944 | ERROR(rt2x00dev, "Queue allocation failed.\n"); |
897 | return -ENOMEM; | 945 | return -ENOMEM; |