diff options
Diffstat (limited to 'drivers/scsi/fcoe/libfcoe.c')
-rw-r--r-- | drivers/scsi/fcoe/libfcoe.c | 45 |
1 files changed, 21 insertions, 24 deletions
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c index 7265e0937995..14dd8a0402b6 100644 --- a/drivers/scsi/fcoe/libfcoe.c +++ b/drivers/scsi/fcoe/libfcoe.c | |||
@@ -49,6 +49,7 @@ | |||
49 | static int debug_fcoe; | 49 | static int debug_fcoe; |
50 | 50 | ||
51 | #define FCOE_MAX_QUEUE_DEPTH 256 | 51 | #define FCOE_MAX_QUEUE_DEPTH 256 |
52 | #define FCOE_LOW_QUEUE_DEPTH 32 | ||
52 | 53 | ||
53 | /* destination address mode */ | 54 | /* destination address mode */ |
54 | #define FCOE_GW_ADDR_MODE 0x00 | 55 | #define FCOE_GW_ADDR_MODE 0x00 |
@@ -723,21 +724,12 @@ static void fcoe_recv_flogi(struct fcoe_softc *fc, struct fc_frame *fp, u8 *sa) | |||
723 | */ | 724 | */ |
724 | void fcoe_watchdog(ulong vp) | 725 | void fcoe_watchdog(ulong vp) |
725 | { | 726 | { |
726 | struct fc_lport *lp; | ||
727 | struct fcoe_softc *fc; | 727 | struct fcoe_softc *fc; |
728 | int qfilled = 0; | ||
729 | 728 | ||
730 | read_lock(&fcoe_hostlist_lock); | 729 | read_lock(&fcoe_hostlist_lock); |
731 | list_for_each_entry(fc, &fcoe_hostlist, list) { | 730 | list_for_each_entry(fc, &fcoe_hostlist, list) { |
732 | lp = fc->lp; | 731 | if (fc->lp) |
733 | if (lp) { | 732 | fcoe_check_wait_queue(fc->lp); |
734 | if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) | ||
735 | qfilled = 1; | ||
736 | if (fcoe_check_wait_queue(lp) < FCOE_MAX_QUEUE_DEPTH) { | ||
737 | if (qfilled) | ||
738 | lp->qfull = 0; | ||
739 | } | ||
740 | } | ||
741 | } | 733 | } |
742 | read_unlock(&fcoe_hostlist_lock); | 734 | read_unlock(&fcoe_hostlist_lock); |
743 | 735 | ||
@@ -753,8 +745,8 @@ void fcoe_watchdog(ulong vp) | |||
753 | * | 745 | * |
754 | * This empties the wait_queue, dequeue the head of the wait_queue queue | 746 | * This empties the wait_queue, dequeue the head of the wait_queue queue |
755 | * and calls fcoe_start_io() for each packet, if all skb have been | 747 | * and calls fcoe_start_io() for each packet, if all skb have been |
756 | * transmitted, return 0 if a error occurs, then restore wait_queue and | 748 | * transmitted, return qlen or -1 if a error occurs, then restore |
757 | * try again later. | 749 | * wait_queue and try again later. |
758 | * | 750 | * |
759 | * The wait_queue is used when the skb transmit fails. skb will go | 751 | * The wait_queue is used when the skb transmit fails. skb will go |
760 | * in the wait_queue which will be emptied by the time function OR | 752 | * in the wait_queue which will be emptied by the time function OR |
@@ -764,33 +756,38 @@ void fcoe_watchdog(ulong vp) | |||
764 | */ | 756 | */ |
765 | static int fcoe_check_wait_queue(struct fc_lport *lp) | 757 | static int fcoe_check_wait_queue(struct fc_lport *lp) |
766 | { | 758 | { |
767 | int rc; | ||
768 | struct sk_buff *skb; | 759 | struct sk_buff *skb; |
769 | struct fcoe_softc *fc; | 760 | struct fcoe_softc *fc; |
761 | int rc = -1; | ||
770 | 762 | ||
771 | fc = lport_priv(lp); | 763 | fc = lport_priv(lp); |
772 | spin_lock_bh(&fc->fcoe_pending_queue.lock); | 764 | spin_lock_bh(&fc->fcoe_pending_queue.lock); |
773 | 765 | ||
774 | /* | 766 | if (fc->fcoe_pending_queue_active) |
775 | * if interface pending queue full then set qfull in lport. | 767 | goto out; |
776 | */ | 768 | fc->fcoe_pending_queue_active = 1; |
777 | if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) | ||
778 | lp->qfull = 1; | ||
779 | if (fc->fcoe_pending_queue.qlen) { | 769 | if (fc->fcoe_pending_queue.qlen) { |
780 | while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) { | 770 | while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) { |
781 | spin_unlock_bh(&fc->fcoe_pending_queue.lock); | 771 | spin_unlock_bh(&fc->fcoe_pending_queue.lock); |
782 | rc = fcoe_start_io(skb); | 772 | rc = fcoe_start_io(skb); |
783 | if (rc) { | 773 | if (rc) |
784 | fcoe_insert_wait_queue_head(lp, skb); | 774 | fcoe_insert_wait_queue_head(lp, skb); |
785 | return rc; | ||
786 | } | ||
787 | spin_lock_bh(&fc->fcoe_pending_queue.lock); | 775 | spin_lock_bh(&fc->fcoe_pending_queue.lock); |
776 | if (rc) | ||
777 | break; | ||
788 | } | 778 | } |
789 | if (fc->fcoe_pending_queue.qlen < FCOE_MAX_QUEUE_DEPTH) | 779 | /* |
780 | * if interface pending queue is below FCOE_LOW_QUEUE_DEPTH | ||
781 | * then clear qfull flag. | ||
782 | */ | ||
783 | if (fc->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH) | ||
790 | lp->qfull = 0; | 784 | lp->qfull = 0; |
791 | } | 785 | } |
786 | fc->fcoe_pending_queue_active = 0; | ||
787 | rc = fc->fcoe_pending_queue.qlen; | ||
788 | out: | ||
792 | spin_unlock_bh(&fc->fcoe_pending_queue.lock); | 789 | spin_unlock_bh(&fc->fcoe_pending_queue.lock); |
793 | return fc->fcoe_pending_queue.qlen; | 790 | return rc; |
794 | } | 791 | } |
795 | 792 | ||
796 | /** | 793 | /** |