diff options
author | Vasu Dev <vasu.dev@intel.com> | 2009-02-27 13:54:57 -0500 |
---|---|---|
committer | James Bottomley <James.Bottomley@HansenPartnership.com> | 2009-03-06 16:37:49 -0500 |
commit | bc0e17f691085315ae9303eb5b0883fe16dfe6b1 (patch) | |
tree | 53138e80aec810604d4eca4626e4f8df65376ccc /drivers/scsi/fcoe | |
parent | a7e84f2b83f17f8f11da34ccef3ba5a862dc0182 (diff) |
[SCSI] libfc, fcoe: fixed locking issues with lport->lp_mutex around lport->link_status
The fcoe_xmit could call fc_pause in case the pending skb queue len is larger
than FCOE_MAX_QUEUE_DEPTH, the fc_pause was trying to grab lport->lp_muex to
change lport->link_status and that had these issues :-
1. The fcoe_xmit was getting called with bh disabled, thus causing
"BUG: scheduling while atomic" when grabbing lport->lp_muex with bh disabled.
2. fc_linkup and fc_linkdown function calls lport_enter function with
lport->lp_mutex held and these enter function in turn calls fcoe_xmit to send
lport related FC frame, e.g. fc_linkup => fc_lport_enter_flogi to send flogi
req. In this case grabbing the same lport->lp_mutex again in fc_puase from
fcoe_xmit would cause deadlock.
The lport->lp_mutex was used for setting FC_PAUSE in fcoe_xmit path but
FC_PAUSE bit was not used anywhere beside just setting and clear this
bit in lport->link_status, instead used a separate field qfull in fc_lport
to eliminate need for lport->lp_mutex to track pending queue full condition
and in turn avoid above described two locking issues.
Also added check for lp->qfull in fc_fcp_lport_queue_ready to trigger
SCSI_MLQUEUE_HOST_BUSY when lp->qfull is set to prevent more scsi-ml cmds
while lp->qfull is set.
This patch eliminated FC_LINK_UP and FC_PAUSE and instead used dedicated
fields in fc_lport for this, this simplified all related conditional
code.
Also removed fc_pause and fc_unpause functions and instead used newly added
lport->qfull directly in fcoe.
Signed-off-by: Vasu Dev <vasu.dev@intel.com>
Signed-off-by: Robert Love <robert.w.love@intel.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers/scsi/fcoe')
-rw-r--r-- | drivers/scsi/fcoe/fcoe_sw.c | 6 | ||||
-rw-r--r-- | drivers/scsi/fcoe/libfcoe.c | 41 |
2 files changed, 20 insertions, 27 deletions
diff --git a/drivers/scsi/fcoe/fcoe_sw.c b/drivers/scsi/fcoe/fcoe_sw.c index dc4cd5e25760..cf83675a0fb9 100644 --- a/drivers/scsi/fcoe/fcoe_sw.c +++ b/drivers/scsi/fcoe/fcoe_sw.c | |||
@@ -116,7 +116,8 @@ static int fcoe_sw_lport_config(struct fc_lport *lp) | |||
116 | { | 116 | { |
117 | int i = 0; | 117 | int i = 0; |
118 | 118 | ||
119 | lp->link_status = 0; | 119 | lp->link_up = 0; |
120 | lp->qfull = 0; | ||
120 | lp->max_retry_count = 3; | 121 | lp->max_retry_count = 3; |
121 | lp->e_d_tov = 2 * 1000; /* FC-FS default */ | 122 | lp->e_d_tov = 2 * 1000; /* FC-FS default */ |
122 | lp->r_a_tov = 2 * 2 * 1000; | 123 | lp->r_a_tov = 2 * 2 * 1000; |
@@ -181,9 +182,8 @@ static int fcoe_sw_netdev_config(struct fc_lport *lp, struct net_device *netdev) | |||
181 | if (fc_set_mfs(lp, mfs)) | 182 | if (fc_set_mfs(lp, mfs)) |
182 | return -EINVAL; | 183 | return -EINVAL; |
183 | 184 | ||
184 | lp->link_status = ~FC_PAUSE & ~FC_LINK_UP; | ||
185 | if (!fcoe_link_ok(lp)) | 185 | if (!fcoe_link_ok(lp)) |
186 | lp->link_status |= FC_LINK_UP; | 186 | lp->link_up = 1; |
187 | 187 | ||
188 | /* offload features support */ | 188 | /* offload features support */ |
189 | if (fc->real_dev->features & NETIF_F_SG) | 189 | if (fc->real_dev->features & NETIF_F_SG) |
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c index e419f486cdb3..296071043f55 100644 --- a/drivers/scsi/fcoe/libfcoe.c +++ b/drivers/scsi/fcoe/libfcoe.c | |||
@@ -504,7 +504,7 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) | |||
504 | if (rc) { | 504 | if (rc) { |
505 | fcoe_insert_wait_queue(lp, skb); | 505 | fcoe_insert_wait_queue(lp, skb); |
506 | if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) | 506 | if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) |
507 | fc_pause(lp); | 507 | lp->qfull = 1; |
508 | } | 508 | } |
509 | 509 | ||
510 | return 0; | 510 | return 0; |
@@ -718,7 +718,7 @@ static void fcoe_recv_flogi(struct fcoe_softc *fc, struct fc_frame *fp, u8 *sa) | |||
718 | * fcoe_watchdog - fcoe timer callback | 718 | * fcoe_watchdog - fcoe timer callback |
719 | * @vp: | 719 | * @vp: |
720 | * | 720 | * |
721 | * This checks the pending queue length for fcoe and put fcoe to be paused state | 721 | * This checks the pending queue length for fcoe and set lport qfull |
722 | * if the FCOE_MAX_QUEUE_DEPTH is reached. This is done for all fc_lport on the | 722 | * if the FCOE_MAX_QUEUE_DEPTH is reached. This is done for all fc_lport on the |
723 | * fcoe_hostlist. | 723 | * fcoe_hostlist. |
724 | * | 724 | * |
@@ -728,17 +728,17 @@ void fcoe_watchdog(ulong vp) | |||
728 | { | 728 | { |
729 | struct fc_lport *lp; | 729 | struct fc_lport *lp; |
730 | struct fcoe_softc *fc; | 730 | struct fcoe_softc *fc; |
731 | int paused = 0; | 731 | int qfilled = 0; |
732 | 732 | ||
733 | read_lock(&fcoe_hostlist_lock); | 733 | read_lock(&fcoe_hostlist_lock); |
734 | list_for_each_entry(fc, &fcoe_hostlist, list) { | 734 | list_for_each_entry(fc, &fcoe_hostlist, list) { |
735 | lp = fc->lp; | 735 | lp = fc->lp; |
736 | if (lp) { | 736 | if (lp) { |
737 | if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) | 737 | if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) |
738 | paused = 1; | 738 | qfilled = 1; |
739 | if (fcoe_check_wait_queue(lp) < FCOE_MAX_QUEUE_DEPTH) { | 739 | if (fcoe_check_wait_queue(lp) < FCOE_MAX_QUEUE_DEPTH) { |
740 | if (paused) | 740 | if (qfilled) |
741 | fc_unpause(lp); | 741 | lp->qfull = 0; |
742 | } | 742 | } |
743 | } | 743 | } |
744 | } | 744 | } |
@@ -767,8 +767,7 @@ void fcoe_watchdog(ulong vp) | |||
767 | **/ | 767 | **/ |
768 | static int fcoe_check_wait_queue(struct fc_lport *lp) | 768 | static int fcoe_check_wait_queue(struct fc_lport *lp) |
769 | { | 769 | { |
770 | int rc, unpause = 0; | 770 | int rc; |
771 | int paused = 0; | ||
772 | struct sk_buff *skb; | 771 | struct sk_buff *skb; |
773 | struct fcoe_softc *fc; | 772 | struct fcoe_softc *fc; |
774 | 773 | ||
@@ -776,10 +775,10 @@ static int fcoe_check_wait_queue(struct fc_lport *lp) | |||
776 | spin_lock_bh(&fc->fcoe_pending_queue.lock); | 775 | spin_lock_bh(&fc->fcoe_pending_queue.lock); |
777 | 776 | ||
778 | /* | 777 | /* |
779 | * is this interface paused? | 778 | * if interface pending queue full then set qfull in lport. |
780 | */ | 779 | */ |
781 | if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) | 780 | if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) |
782 | paused = 1; | 781 | lp->qfull = 1; |
783 | if (fc->fcoe_pending_queue.qlen) { | 782 | if (fc->fcoe_pending_queue.qlen) { |
784 | while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) { | 783 | while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) { |
785 | spin_unlock_bh(&fc->fcoe_pending_queue.lock); | 784 | spin_unlock_bh(&fc->fcoe_pending_queue.lock); |
@@ -791,11 +790,9 @@ static int fcoe_check_wait_queue(struct fc_lport *lp) | |||
791 | spin_lock_bh(&fc->fcoe_pending_queue.lock); | 790 | spin_lock_bh(&fc->fcoe_pending_queue.lock); |
792 | } | 791 | } |
793 | if (fc->fcoe_pending_queue.qlen < FCOE_MAX_QUEUE_DEPTH) | 792 | if (fc->fcoe_pending_queue.qlen < FCOE_MAX_QUEUE_DEPTH) |
794 | unpause = 1; | 793 | lp->qfull = 0; |
795 | } | 794 | } |
796 | spin_unlock_bh(&fc->fcoe_pending_queue.lock); | 795 | spin_unlock_bh(&fc->fcoe_pending_queue.lock); |
797 | if ((unpause) && (paused)) | ||
798 | fc_unpause(lp); | ||
799 | return fc->fcoe_pending_queue.qlen; | 796 | return fc->fcoe_pending_queue.qlen; |
800 | } | 797 | } |
801 | 798 | ||
@@ -873,7 +870,7 @@ static int fcoe_device_notification(struct notifier_block *notifier, | |||
873 | struct net_device *real_dev = ptr; | 870 | struct net_device *real_dev = ptr; |
874 | struct fcoe_softc *fc; | 871 | struct fcoe_softc *fc; |
875 | struct fcoe_dev_stats *stats; | 872 | struct fcoe_dev_stats *stats; |
876 | u16 new_status; | 873 | u32 new_link_up; |
877 | u32 mfs; | 874 | u32 mfs; |
878 | int rc = NOTIFY_OK; | 875 | int rc = NOTIFY_OK; |
879 | 876 | ||
@@ -890,17 +887,15 @@ static int fcoe_device_notification(struct notifier_block *notifier, | |||
890 | goto out; | 887 | goto out; |
891 | } | 888 | } |
892 | 889 | ||
893 | new_status = lp->link_status; | 890 | new_link_up = lp->link_up; |
894 | switch (event) { | 891 | switch (event) { |
895 | case NETDEV_DOWN: | 892 | case NETDEV_DOWN: |
896 | case NETDEV_GOING_DOWN: | 893 | case NETDEV_GOING_DOWN: |
897 | new_status &= ~FC_LINK_UP; | 894 | new_link_up = 0; |
898 | break; | 895 | break; |
899 | case NETDEV_UP: | 896 | case NETDEV_UP: |
900 | case NETDEV_CHANGE: | 897 | case NETDEV_CHANGE: |
901 | new_status &= ~FC_LINK_UP; | 898 | new_link_up = !fcoe_link_ok(lp); |
902 | if (!fcoe_link_ok(lp)) | ||
903 | new_status |= FC_LINK_UP; | ||
904 | break; | 899 | break; |
905 | case NETDEV_CHANGEMTU: | 900 | case NETDEV_CHANGEMTU: |
906 | mfs = fc->real_dev->mtu - | 901 | mfs = fc->real_dev->mtu - |
@@ -908,17 +903,15 @@ static int fcoe_device_notification(struct notifier_block *notifier, | |||
908 | sizeof(struct fcoe_crc_eof)); | 903 | sizeof(struct fcoe_crc_eof)); |
909 | if (mfs >= FC_MIN_MAX_FRAME) | 904 | if (mfs >= FC_MIN_MAX_FRAME) |
910 | fc_set_mfs(lp, mfs); | 905 | fc_set_mfs(lp, mfs); |
911 | new_status &= ~FC_LINK_UP; | 906 | new_link_up = !fcoe_link_ok(lp); |
912 | if (!fcoe_link_ok(lp)) | ||
913 | new_status |= FC_LINK_UP; | ||
914 | break; | 907 | break; |
915 | case NETDEV_REGISTER: | 908 | case NETDEV_REGISTER: |
916 | break; | 909 | break; |
917 | default: | 910 | default: |
918 | FC_DBG("unknown event %ld call", event); | 911 | FC_DBG("unknown event %ld call", event); |
919 | } | 912 | } |
920 | if (lp->link_status != new_status) { | 913 | if (lp->link_up != new_link_up) { |
921 | if ((new_status & FC_LINK_UP) == FC_LINK_UP) | 914 | if (new_link_up) |
922 | fc_linkup(lp); | 915 | fc_linkup(lp); |
923 | else { | 916 | else { |
924 | stats = lp->dev_stats[smp_processor_id()]; | 917 | stats = lp->dev_stats[smp_processor_id()]; |