diff options
author | Vasu Dev <vasu.dev@intel.com> | 2009-02-27 13:56:27 -0500 |
---|---|---|
committer | James Bottomley <James.Bottomley@HansenPartnership.com> | 2009-03-10 10:09:21 -0400 |
commit | c826a3145736e3baabebccfd0aecfbb6dae059f2 (patch) | |
tree | 4c156c85a8eec039973b369cff052206da128cbe | |
parent | e904158159e9812d06646767b7c81846dc3b05e6 (diff) |
[SCSI] fcoe: Out of order tx frames was causing several check condition SCSI status
frames followed by these errors in log.
[sdp] Result: hostbyte=DID_OK driverbyte=DRIVER_SENSE,SUGGEST_OK
[sdp] Sense Key : Aborted Command [current]
[sdp] Add. Sense: Data phase error
This was causing some test apps to exit due to write failure under heavy
load.
This was due to a race around adding and removing tx frame skb in
fcoe_pending_queue, Chris Leech helped me to find that brief unlocking
period when pulling skb from fcoe_pending_queue in various contexts
(fcoe_watchdog and fcoe_xmit) and then adding skb back into fcoe_pending_queue
up on a failed fcoe_start_io could change skb/tx frame order in
fcoe_pending_queue. Thanks Chris.
This patch allows only single context to pull skb from fcoe_pending_queue
at any time to prevent above described ordering issue/race by use of
fcoe_pending_queue_active flag.
This patch simplified fcoe_watchdog with modified fcoe_check_wait_queue by
use of FCOE_LOW_QUEUE_DEPTH instead previously used several conditionals
to clear and set lp->qfull.
I think FCOE_MAX_QUEUE_DEPTH with FCOE_LOW_QUEUE_DEPTH will work better
in re/setting lp->qfull and these could be fine tuned for performance.
Signed-off-by: Vasu Dev <vasu.dev@intel.com>
Signed-off-by: Robert Love <robert.w.love@intel.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
-rw-r--r-- | drivers/scsi/fcoe/fcoe_sw.c | 1 | ||||
-rw-r--r-- | drivers/scsi/fcoe/libfcoe.c | 45 | ||||
-rw-r--r-- | include/scsi/libfcoe.h | 1 |
3 files changed, 23 insertions, 24 deletions
diff --git a/drivers/scsi/fcoe/fcoe_sw.c b/drivers/scsi/fcoe/fcoe_sw.c index 37d359db1648..da210eba1941 100644 --- a/drivers/scsi/fcoe/fcoe_sw.c +++ b/drivers/scsi/fcoe/fcoe_sw.c | |||
@@ -188,6 +188,7 @@ static int fcoe_sw_netdev_config(struct fc_lport *lp, struct net_device *netdev) | |||
188 | 188 | ||
189 | 189 | ||
190 | skb_queue_head_init(&fc->fcoe_pending_queue); | 190 | skb_queue_head_init(&fc->fcoe_pending_queue); |
191 | fc->fcoe_pending_queue_active = 0; | ||
191 | 192 | ||
192 | /* setup Source Mac Address */ | 193 | /* setup Source Mac Address */ |
193 | memcpy(fc->ctl_src_addr, fc->real_dev->dev_addr, | 194 | memcpy(fc->ctl_src_addr, fc->real_dev->dev_addr, |
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c index 7265e0937995..14dd8a0402b6 100644 --- a/drivers/scsi/fcoe/libfcoe.c +++ b/drivers/scsi/fcoe/libfcoe.c | |||
@@ -49,6 +49,7 @@ | |||
49 | static int debug_fcoe; | 49 | static int debug_fcoe; |
50 | 50 | ||
51 | #define FCOE_MAX_QUEUE_DEPTH 256 | 51 | #define FCOE_MAX_QUEUE_DEPTH 256 |
52 | #define FCOE_LOW_QUEUE_DEPTH 32 | ||
52 | 53 | ||
53 | /* destination address mode */ | 54 | /* destination address mode */ |
54 | #define FCOE_GW_ADDR_MODE 0x00 | 55 | #define FCOE_GW_ADDR_MODE 0x00 |
@@ -723,21 +724,12 @@ static void fcoe_recv_flogi(struct fcoe_softc *fc, struct fc_frame *fp, u8 *sa) | |||
723 | */ | 724 | */ |
724 | void fcoe_watchdog(ulong vp) | 725 | void fcoe_watchdog(ulong vp) |
725 | { | 726 | { |
726 | struct fc_lport *lp; | ||
727 | struct fcoe_softc *fc; | 727 | struct fcoe_softc *fc; |
728 | int qfilled = 0; | ||
729 | 728 | ||
730 | read_lock(&fcoe_hostlist_lock); | 729 | read_lock(&fcoe_hostlist_lock); |
731 | list_for_each_entry(fc, &fcoe_hostlist, list) { | 730 | list_for_each_entry(fc, &fcoe_hostlist, list) { |
732 | lp = fc->lp; | 731 | if (fc->lp) |
733 | if (lp) { | 732 | fcoe_check_wait_queue(fc->lp); |
734 | if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) | ||
735 | qfilled = 1; | ||
736 | if (fcoe_check_wait_queue(lp) < FCOE_MAX_QUEUE_DEPTH) { | ||
737 | if (qfilled) | ||
738 | lp->qfull = 0; | ||
739 | } | ||
740 | } | ||
741 | } | 733 | } |
742 | read_unlock(&fcoe_hostlist_lock); | 734 | read_unlock(&fcoe_hostlist_lock); |
743 | 735 | ||
@@ -753,8 +745,8 @@ void fcoe_watchdog(ulong vp) | |||
753 | * | 745 | * |
754 | * This empties the wait_queue, dequeue the head of the wait_queue queue | 746 | * This empties the wait_queue, dequeue the head of the wait_queue queue |
755 | * and calls fcoe_start_io() for each packet, if all skb have been | 747 | * and calls fcoe_start_io() for each packet, if all skb have been |
756 | * transmitted, return 0 if a error occurs, then restore wait_queue and | 748 | * transmitted, return qlen or -1 if a error occurs, then restore |
757 | * try again later. | 749 | * wait_queue and try again later. |
758 | * | 750 | * |
759 | * The wait_queue is used when the skb transmit fails. skb will go | 751 | * The wait_queue is used when the skb transmit fails. skb will go |
760 | * in the wait_queue which will be emptied by the time function OR | 752 | * in the wait_queue which will be emptied by the time function OR |
@@ -764,33 +756,38 @@ void fcoe_watchdog(ulong vp) | |||
764 | */ | 756 | */ |
765 | static int fcoe_check_wait_queue(struct fc_lport *lp) | 757 | static int fcoe_check_wait_queue(struct fc_lport *lp) |
766 | { | 758 | { |
767 | int rc; | ||
768 | struct sk_buff *skb; | 759 | struct sk_buff *skb; |
769 | struct fcoe_softc *fc; | 760 | struct fcoe_softc *fc; |
761 | int rc = -1; | ||
770 | 762 | ||
771 | fc = lport_priv(lp); | 763 | fc = lport_priv(lp); |
772 | spin_lock_bh(&fc->fcoe_pending_queue.lock); | 764 | spin_lock_bh(&fc->fcoe_pending_queue.lock); |
773 | 765 | ||
774 | /* | 766 | if (fc->fcoe_pending_queue_active) |
775 | * if interface pending queue full then set qfull in lport. | 767 | goto out; |
776 | */ | 768 | fc->fcoe_pending_queue_active = 1; |
777 | if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) | ||
778 | lp->qfull = 1; | ||
779 | if (fc->fcoe_pending_queue.qlen) { | 769 | if (fc->fcoe_pending_queue.qlen) { |
780 | while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) { | 770 | while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) { |
781 | spin_unlock_bh(&fc->fcoe_pending_queue.lock); | 771 | spin_unlock_bh(&fc->fcoe_pending_queue.lock); |
782 | rc = fcoe_start_io(skb); | 772 | rc = fcoe_start_io(skb); |
783 | if (rc) { | 773 | if (rc) |
784 | fcoe_insert_wait_queue_head(lp, skb); | 774 | fcoe_insert_wait_queue_head(lp, skb); |
785 | return rc; | ||
786 | } | ||
787 | spin_lock_bh(&fc->fcoe_pending_queue.lock); | 775 | spin_lock_bh(&fc->fcoe_pending_queue.lock); |
776 | if (rc) | ||
777 | break; | ||
788 | } | 778 | } |
789 | if (fc->fcoe_pending_queue.qlen < FCOE_MAX_QUEUE_DEPTH) | 779 | /* |
780 | * if interface pending queue is below FCOE_LOW_QUEUE_DEPTH | ||
781 | * then clear qfull flag. | ||
782 | */ | ||
783 | if (fc->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH) | ||
790 | lp->qfull = 0; | 784 | lp->qfull = 0; |
791 | } | 785 | } |
786 | fc->fcoe_pending_queue_active = 0; | ||
787 | rc = fc->fcoe_pending_queue.qlen; | ||
788 | out: | ||
792 | spin_unlock_bh(&fc->fcoe_pending_queue.lock); | 789 | spin_unlock_bh(&fc->fcoe_pending_queue.lock); |
793 | return fc->fcoe_pending_queue.qlen; | 790 | return rc; |
794 | } | 791 | } |
795 | 792 | ||
796 | /** | 793 | /** |
diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h index f43d3833a7a8..941818f29f59 100644 --- a/include/scsi/libfcoe.h +++ b/include/scsi/libfcoe.h | |||
@@ -46,6 +46,7 @@ struct fcoe_softc { | |||
46 | struct net_device *phys_dev; /* device with ethtool_ops */ | 46 | struct net_device *phys_dev; /* device with ethtool_ops */ |
47 | struct packet_type fcoe_packet_type; | 47 | struct packet_type fcoe_packet_type; |
48 | struct sk_buff_head fcoe_pending_queue; | 48 | struct sk_buff_head fcoe_pending_queue; |
49 | u8 fcoe_pending_queue_active; | ||
49 | 50 | ||
50 | u8 dest_addr[ETH_ALEN]; | 51 | u8 dest_addr[ETH_ALEN]; |
51 | u8 ctl_src_addr[ETH_ALEN]; | 52 | u8 ctl_src_addr[ETH_ALEN]; |