diff options
author | Vasu Dev <vasu.dev@intel.com> | 2009-02-27 13:54:57 -0500 |
---|---|---|
committer | James Bottomley <James.Bottomley@HansenPartnership.com> | 2009-03-06 16:37:49 -0500 |
commit | bc0e17f691085315ae9303eb5b0883fe16dfe6b1 (patch) | |
tree | 53138e80aec810604d4eca4626e4f8df65376ccc /drivers/scsi | |
parent | a7e84f2b83f17f8f11da34ccef3ba5a862dc0182 (diff) |
[SCSI] libfc, fcoe: fixed locking issues with lport->lp_mutex around lport->link_status
The fcoe_xmit could call fc_pause in case the pending skb queue len is larger
than FCOE_MAX_QUEUE_DEPTH, the fc_pause was trying to grab lport->lp_muex to
change lport->link_status and that had these issues :-
1. The fcoe_xmit was getting called with bh disabled, thus causing
"BUG: scheduling while atomic" when grabbing lport->lp_muex with bh disabled.
2. fc_linkup and fc_linkdown function calls lport_enter function with
lport->lp_mutex held and these enter function in turn calls fcoe_xmit to send
lport related FC frame, e.g. fc_linkup => fc_lport_enter_flogi to send flogi
req. In this case grabbing the same lport->lp_mutex again in fc_puase from
fcoe_xmit would cause deadlock.
The lport->lp_mutex was used for setting FC_PAUSE in fcoe_xmit path but
FC_PAUSE bit was not used anywhere beside just setting and clear this
bit in lport->link_status, instead used a separate field qfull in fc_lport
to eliminate need for lport->lp_mutex to track pending queue full condition
and in turn avoid above described two locking issues.
Also added check for lp->qfull in fc_fcp_lport_queue_ready to trigger
SCSI_MLQUEUE_HOST_BUSY when lp->qfull is set to prevent more scsi-ml cmds
while lp->qfull is set.
This patch eliminated FC_LINK_UP and FC_PAUSE and instead used dedicated
fields in fc_lport for this, this simplified all related conditional
code.
Also removed fc_pause and fc_unpause functions and instead used newly added
lport->qfull directly in fcoe.
Signed-off-by: Vasu Dev <vasu.dev@intel.com>
Signed-off-by: Robert Love <robert.w.love@intel.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers/scsi')
-rw-r--r-- | drivers/scsi/fcoe/fcoe_sw.c | 6 | ||||
-rw-r--r-- | drivers/scsi/fcoe/libfcoe.c | 41 | ||||
-rw-r--r-- | drivers/scsi/libfc/fc_fcp.c | 4 | ||||
-rw-r--r-- | drivers/scsi/libfc/fc_lport.c | 36 |
4 files changed, 28 insertions, 59 deletions
diff --git a/drivers/scsi/fcoe/fcoe_sw.c b/drivers/scsi/fcoe/fcoe_sw.c index dc4cd5e25760..cf83675a0fb9 100644 --- a/drivers/scsi/fcoe/fcoe_sw.c +++ b/drivers/scsi/fcoe/fcoe_sw.c | |||
@@ -116,7 +116,8 @@ static int fcoe_sw_lport_config(struct fc_lport *lp) | |||
116 | { | 116 | { |
117 | int i = 0; | 117 | int i = 0; |
118 | 118 | ||
119 | lp->link_status = 0; | 119 | lp->link_up = 0; |
120 | lp->qfull = 0; | ||
120 | lp->max_retry_count = 3; | 121 | lp->max_retry_count = 3; |
121 | lp->e_d_tov = 2 * 1000; /* FC-FS default */ | 122 | lp->e_d_tov = 2 * 1000; /* FC-FS default */ |
122 | lp->r_a_tov = 2 * 2 * 1000; | 123 | lp->r_a_tov = 2 * 2 * 1000; |
@@ -181,9 +182,8 @@ static int fcoe_sw_netdev_config(struct fc_lport *lp, struct net_device *netdev) | |||
181 | if (fc_set_mfs(lp, mfs)) | 182 | if (fc_set_mfs(lp, mfs)) |
182 | return -EINVAL; | 183 | return -EINVAL; |
183 | 184 | ||
184 | lp->link_status = ~FC_PAUSE & ~FC_LINK_UP; | ||
185 | if (!fcoe_link_ok(lp)) | 185 | if (!fcoe_link_ok(lp)) |
186 | lp->link_status |= FC_LINK_UP; | 186 | lp->link_up = 1; |
187 | 187 | ||
188 | /* offload features support */ | 188 | /* offload features support */ |
189 | if (fc->real_dev->features & NETIF_F_SG) | 189 | if (fc->real_dev->features & NETIF_F_SG) |
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c index e419f486cdb3..296071043f55 100644 --- a/drivers/scsi/fcoe/libfcoe.c +++ b/drivers/scsi/fcoe/libfcoe.c | |||
@@ -504,7 +504,7 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) | |||
504 | if (rc) { | 504 | if (rc) { |
505 | fcoe_insert_wait_queue(lp, skb); | 505 | fcoe_insert_wait_queue(lp, skb); |
506 | if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) | 506 | if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) |
507 | fc_pause(lp); | 507 | lp->qfull = 1; |
508 | } | 508 | } |
509 | 509 | ||
510 | return 0; | 510 | return 0; |
@@ -718,7 +718,7 @@ static void fcoe_recv_flogi(struct fcoe_softc *fc, struct fc_frame *fp, u8 *sa) | |||
718 | * fcoe_watchdog - fcoe timer callback | 718 | * fcoe_watchdog - fcoe timer callback |
719 | * @vp: | 719 | * @vp: |
720 | * | 720 | * |
721 | * This checks the pending queue length for fcoe and put fcoe to be paused state | 721 | * This checks the pending queue length for fcoe and set lport qfull |
722 | * if the FCOE_MAX_QUEUE_DEPTH is reached. This is done for all fc_lport on the | 722 | * if the FCOE_MAX_QUEUE_DEPTH is reached. This is done for all fc_lport on the |
723 | * fcoe_hostlist. | 723 | * fcoe_hostlist. |
724 | * | 724 | * |
@@ -728,17 +728,17 @@ void fcoe_watchdog(ulong vp) | |||
728 | { | 728 | { |
729 | struct fc_lport *lp; | 729 | struct fc_lport *lp; |
730 | struct fcoe_softc *fc; | 730 | struct fcoe_softc *fc; |
731 | int paused = 0; | 731 | int qfilled = 0; |
732 | 732 | ||
733 | read_lock(&fcoe_hostlist_lock); | 733 | read_lock(&fcoe_hostlist_lock); |
734 | list_for_each_entry(fc, &fcoe_hostlist, list) { | 734 | list_for_each_entry(fc, &fcoe_hostlist, list) { |
735 | lp = fc->lp; | 735 | lp = fc->lp; |
736 | if (lp) { | 736 | if (lp) { |
737 | if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) | 737 | if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) |
738 | paused = 1; | 738 | qfilled = 1; |
739 | if (fcoe_check_wait_queue(lp) < FCOE_MAX_QUEUE_DEPTH) { | 739 | if (fcoe_check_wait_queue(lp) < FCOE_MAX_QUEUE_DEPTH) { |
740 | if (paused) | 740 | if (qfilled) |
741 | fc_unpause(lp); | 741 | lp->qfull = 0; |
742 | } | 742 | } |
743 | } | 743 | } |
744 | } | 744 | } |
@@ -767,8 +767,7 @@ void fcoe_watchdog(ulong vp) | |||
767 | **/ | 767 | **/ |
768 | static int fcoe_check_wait_queue(struct fc_lport *lp) | 768 | static int fcoe_check_wait_queue(struct fc_lport *lp) |
769 | { | 769 | { |
770 | int rc, unpause = 0; | 770 | int rc; |
771 | int paused = 0; | ||
772 | struct sk_buff *skb; | 771 | struct sk_buff *skb; |
773 | struct fcoe_softc *fc; | 772 | struct fcoe_softc *fc; |
774 | 773 | ||
@@ -776,10 +775,10 @@ static int fcoe_check_wait_queue(struct fc_lport *lp) | |||
776 | spin_lock_bh(&fc->fcoe_pending_queue.lock); | 775 | spin_lock_bh(&fc->fcoe_pending_queue.lock); |
777 | 776 | ||
778 | /* | 777 | /* |
779 | * is this interface paused? | 778 | * if interface pending queue full then set qfull in lport. |
780 | */ | 779 | */ |
781 | if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) | 780 | if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) |
782 | paused = 1; | 781 | lp->qfull = 1; |
783 | if (fc->fcoe_pending_queue.qlen) { | 782 | if (fc->fcoe_pending_queue.qlen) { |
784 | while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) { | 783 | while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) { |
785 | spin_unlock_bh(&fc->fcoe_pending_queue.lock); | 784 | spin_unlock_bh(&fc->fcoe_pending_queue.lock); |
@@ -791,11 +790,9 @@ static int fcoe_check_wait_queue(struct fc_lport *lp) | |||
791 | spin_lock_bh(&fc->fcoe_pending_queue.lock); | 790 | spin_lock_bh(&fc->fcoe_pending_queue.lock); |
792 | } | 791 | } |
793 | if (fc->fcoe_pending_queue.qlen < FCOE_MAX_QUEUE_DEPTH) | 792 | if (fc->fcoe_pending_queue.qlen < FCOE_MAX_QUEUE_DEPTH) |
794 | unpause = 1; | 793 | lp->qfull = 0; |
795 | } | 794 | } |
796 | spin_unlock_bh(&fc->fcoe_pending_queue.lock); | 795 | spin_unlock_bh(&fc->fcoe_pending_queue.lock); |
797 | if ((unpause) && (paused)) | ||
798 | fc_unpause(lp); | ||
799 | return fc->fcoe_pending_queue.qlen; | 796 | return fc->fcoe_pending_queue.qlen; |
800 | } | 797 | } |
801 | 798 | ||
@@ -873,7 +870,7 @@ static int fcoe_device_notification(struct notifier_block *notifier, | |||
873 | struct net_device *real_dev = ptr; | 870 | struct net_device *real_dev = ptr; |
874 | struct fcoe_softc *fc; | 871 | struct fcoe_softc *fc; |
875 | struct fcoe_dev_stats *stats; | 872 | struct fcoe_dev_stats *stats; |
876 | u16 new_status; | 873 | u32 new_link_up; |
877 | u32 mfs; | 874 | u32 mfs; |
878 | int rc = NOTIFY_OK; | 875 | int rc = NOTIFY_OK; |
879 | 876 | ||
@@ -890,17 +887,15 @@ static int fcoe_device_notification(struct notifier_block *notifier, | |||
890 | goto out; | 887 | goto out; |
891 | } | 888 | } |
892 | 889 | ||
893 | new_status = lp->link_status; | 890 | new_link_up = lp->link_up; |
894 | switch (event) { | 891 | switch (event) { |
895 | case NETDEV_DOWN: | 892 | case NETDEV_DOWN: |
896 | case NETDEV_GOING_DOWN: | 893 | case NETDEV_GOING_DOWN: |
897 | new_status &= ~FC_LINK_UP; | 894 | new_link_up = 0; |
898 | break; | 895 | break; |
899 | case NETDEV_UP: | 896 | case NETDEV_UP: |
900 | case NETDEV_CHANGE: | 897 | case NETDEV_CHANGE: |
901 | new_status &= ~FC_LINK_UP; | 898 | new_link_up = !fcoe_link_ok(lp); |
902 | if (!fcoe_link_ok(lp)) | ||
903 | new_status |= FC_LINK_UP; | ||
904 | break; | 899 | break; |
905 | case NETDEV_CHANGEMTU: | 900 | case NETDEV_CHANGEMTU: |
906 | mfs = fc->real_dev->mtu - | 901 | mfs = fc->real_dev->mtu - |
@@ -908,17 +903,15 @@ static int fcoe_device_notification(struct notifier_block *notifier, | |||
908 | sizeof(struct fcoe_crc_eof)); | 903 | sizeof(struct fcoe_crc_eof)); |
909 | if (mfs >= FC_MIN_MAX_FRAME) | 904 | if (mfs >= FC_MIN_MAX_FRAME) |
910 | fc_set_mfs(lp, mfs); | 905 | fc_set_mfs(lp, mfs); |
911 | new_status &= ~FC_LINK_UP; | 906 | new_link_up = !fcoe_link_ok(lp); |
912 | if (!fcoe_link_ok(lp)) | ||
913 | new_status |= FC_LINK_UP; | ||
914 | break; | 907 | break; |
915 | case NETDEV_REGISTER: | 908 | case NETDEV_REGISTER: |
916 | break; | 909 | break; |
917 | default: | 910 | default: |
918 | FC_DBG("unknown event %ld call", event); | 911 | FC_DBG("unknown event %ld call", event); |
919 | } | 912 | } |
920 | if (lp->link_status != new_status) { | 913 | if (lp->link_up != new_link_up) { |
921 | if ((new_status & FC_LINK_UP) == FC_LINK_UP) | 914 | if (new_link_up) |
922 | fc_linkup(lp); | 915 | fc_linkup(lp); |
923 | else { | 916 | else { |
924 | stats = lp->dev_stats[smp_processor_id()]; | 917 | stats = lp->dev_stats[smp_processor_id()]; |
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 404e63ff46b8..f440aaca39c2 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c | |||
@@ -1621,7 +1621,7 @@ out: | |||
1621 | static inline int fc_fcp_lport_queue_ready(struct fc_lport *lp) | 1621 | static inline int fc_fcp_lport_queue_ready(struct fc_lport *lp) |
1622 | { | 1622 | { |
1623 | /* lock ? */ | 1623 | /* lock ? */ |
1624 | return (lp->state == LPORT_ST_READY) && (lp->link_status & FC_LINK_UP); | 1624 | return (lp->state == LPORT_ST_READY) && lp->link_up && !lp->qfull; |
1625 | } | 1625 | } |
1626 | 1626 | ||
1627 | /** | 1627 | /** |
@@ -1890,7 +1890,7 @@ int fc_eh_abort(struct scsi_cmnd *sc_cmd) | |||
1890 | lp = shost_priv(sc_cmd->device->host); | 1890 | lp = shost_priv(sc_cmd->device->host); |
1891 | if (lp->state != LPORT_ST_READY) | 1891 | if (lp->state != LPORT_ST_READY) |
1892 | return rc; | 1892 | return rc; |
1893 | else if (!(lp->link_status & FC_LINK_UP)) | 1893 | else if (!lp->link_up) |
1894 | return rc; | 1894 | return rc; |
1895 | 1895 | ||
1896 | spin_lock_irqsave(lp->host->host_lock, flags); | 1896 | spin_lock_irqsave(lp->host->host_lock, flags); |
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index 5db223ce3b25..a6ab692f5f51 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c | |||
@@ -250,7 +250,7 @@ void fc_get_host_port_state(struct Scsi_Host *shost) | |||
250 | { | 250 | { |
251 | struct fc_lport *lp = shost_priv(shost); | 251 | struct fc_lport *lp = shost_priv(shost); |
252 | 252 | ||
253 | if ((lp->link_status & FC_LINK_UP) == FC_LINK_UP) | 253 | if (lp->link_up) |
254 | fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; | 254 | fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; |
255 | else | 255 | else |
256 | fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; | 256 | fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; |
@@ -577,8 +577,8 @@ void fc_linkup(struct fc_lport *lport) | |||
577 | fc_host_port_id(lport->host)); | 577 | fc_host_port_id(lport->host)); |
578 | 578 | ||
579 | mutex_lock(&lport->lp_mutex); | 579 | mutex_lock(&lport->lp_mutex); |
580 | if ((lport->link_status & FC_LINK_UP) != FC_LINK_UP) { | 580 | if (!lport->link_up) { |
581 | lport->link_status |= FC_LINK_UP; | 581 | lport->link_up = 1; |
582 | 582 | ||
583 | if (lport->state == LPORT_ST_RESET) | 583 | if (lport->state == LPORT_ST_RESET) |
584 | fc_lport_enter_flogi(lport); | 584 | fc_lport_enter_flogi(lport); |
@@ -597,8 +597,8 @@ void fc_linkdown(struct fc_lport *lport) | |||
597 | FC_DEBUG_LPORT("Link is down for port (%6x)\n", | 597 | FC_DEBUG_LPORT("Link is down for port (%6x)\n", |
598 | fc_host_port_id(lport->host)); | 598 | fc_host_port_id(lport->host)); |
599 | 599 | ||
600 | if ((lport->link_status & FC_LINK_UP) == FC_LINK_UP) { | 600 | if (lport->link_up) { |
601 | lport->link_status &= ~(FC_LINK_UP); | 601 | lport->link_up = 0; |
602 | fc_lport_enter_reset(lport); | 602 | fc_lport_enter_reset(lport); |
603 | lport->tt.fcp_cleanup(lport); | 603 | lport->tt.fcp_cleanup(lport); |
604 | } | 604 | } |
@@ -607,30 +607,6 @@ void fc_linkdown(struct fc_lport *lport) | |||
607 | EXPORT_SYMBOL(fc_linkdown); | 607 | EXPORT_SYMBOL(fc_linkdown); |
608 | 608 | ||
609 | /** | 609 | /** |
610 | * fc_pause - Pause the flow of frames | ||
611 | * @lport: The lport to be paused | ||
612 | */ | ||
613 | void fc_pause(struct fc_lport *lport) | ||
614 | { | ||
615 | mutex_lock(&lport->lp_mutex); | ||
616 | lport->link_status |= FC_PAUSE; | ||
617 | mutex_unlock(&lport->lp_mutex); | ||
618 | } | ||
619 | EXPORT_SYMBOL(fc_pause); | ||
620 | |||
621 | /** | ||
622 | * fc_unpause - Unpause the flow of frames | ||
623 | * @lport: The lport to be unpaused | ||
624 | */ | ||
625 | void fc_unpause(struct fc_lport *lport) | ||
626 | { | ||
627 | mutex_lock(&lport->lp_mutex); | ||
628 | lport->link_status &= ~(FC_PAUSE); | ||
629 | mutex_unlock(&lport->lp_mutex); | ||
630 | } | ||
631 | EXPORT_SYMBOL(fc_unpause); | ||
632 | |||
633 | /** | ||
634 | * fc_fabric_logoff - Logout of the fabric | 610 | * fc_fabric_logoff - Logout of the fabric |
635 | * @lport: fc_lport pointer to logoff the fabric | 611 | * @lport: fc_lport pointer to logoff the fabric |
636 | * | 612 | * |
@@ -977,7 +953,7 @@ static void fc_lport_enter_reset(struct fc_lport *lport) | |||
977 | fc_host_fabric_name(lport->host) = 0; | 953 | fc_host_fabric_name(lport->host) = 0; |
978 | fc_host_port_id(lport->host) = 0; | 954 | fc_host_port_id(lport->host) = 0; |
979 | 955 | ||
980 | if ((lport->link_status & FC_LINK_UP) == FC_LINK_UP) | 956 | if (lport->link_up) |
981 | fc_lport_enter_flogi(lport); | 957 | fc_lport_enter_flogi(lport); |
982 | } | 958 | } |
983 | 959 | ||