aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/fcoe/libfcoe.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/fcoe/libfcoe.c')
-rw-r--r--drivers/scsi/fcoe/libfcoe.c41
1 files changed, 17 insertions, 24 deletions
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
index e419f486cdb3..296071043f55 100644
--- a/drivers/scsi/fcoe/libfcoe.c
+++ b/drivers/scsi/fcoe/libfcoe.c
@@ -504,7 +504,7 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
504 if (rc) { 504 if (rc) {
505 fcoe_insert_wait_queue(lp, skb); 505 fcoe_insert_wait_queue(lp, skb);
506 if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) 506 if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
507 fc_pause(lp); 507 lp->qfull = 1;
508 } 508 }
509 509
510 return 0; 510 return 0;
@@ -718,7 +718,7 @@ static void fcoe_recv_flogi(struct fcoe_softc *fc, struct fc_frame *fp, u8 *sa)
718 * fcoe_watchdog - fcoe timer callback 718 * fcoe_watchdog - fcoe timer callback
719 * @vp: 719 * @vp:
720 * 720 *
721 * This checks the pending queue length for fcoe and put fcoe to be paused state 721 * This checks the pending queue length for fcoe and set lport qfull
722 * if the FCOE_MAX_QUEUE_DEPTH is reached. This is done for all fc_lport on the 722 * if the FCOE_MAX_QUEUE_DEPTH is reached. This is done for all fc_lport on the
723 * fcoe_hostlist. 723 * fcoe_hostlist.
724 * 724 *
@@ -728,17 +728,17 @@ void fcoe_watchdog(ulong vp)
728{ 728{
729 struct fc_lport *lp; 729 struct fc_lport *lp;
730 struct fcoe_softc *fc; 730 struct fcoe_softc *fc;
731 int paused = 0; 731 int qfilled = 0;
732 732
733 read_lock(&fcoe_hostlist_lock); 733 read_lock(&fcoe_hostlist_lock);
734 list_for_each_entry(fc, &fcoe_hostlist, list) { 734 list_for_each_entry(fc, &fcoe_hostlist, list) {
735 lp = fc->lp; 735 lp = fc->lp;
736 if (lp) { 736 if (lp) {
737 if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) 737 if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
738 paused = 1; 738 qfilled = 1;
739 if (fcoe_check_wait_queue(lp) < FCOE_MAX_QUEUE_DEPTH) { 739 if (fcoe_check_wait_queue(lp) < FCOE_MAX_QUEUE_DEPTH) {
740 if (paused) 740 if (qfilled)
741 fc_unpause(lp); 741 lp->qfull = 0;
742 } 742 }
743 } 743 }
744 } 744 }
@@ -767,8 +767,7 @@ void fcoe_watchdog(ulong vp)
767 **/ 767 **/
768static int fcoe_check_wait_queue(struct fc_lport *lp) 768static int fcoe_check_wait_queue(struct fc_lport *lp)
769{ 769{
770 int rc, unpause = 0; 770 int rc;
771 int paused = 0;
772 struct sk_buff *skb; 771 struct sk_buff *skb;
773 struct fcoe_softc *fc; 772 struct fcoe_softc *fc;
774 773
@@ -776,10 +775,10 @@ static int fcoe_check_wait_queue(struct fc_lport *lp)
776 spin_lock_bh(&fc->fcoe_pending_queue.lock); 775 spin_lock_bh(&fc->fcoe_pending_queue.lock);
777 776
778 /* 777 /*
779 * is this interface paused? 778 * if interface pending queue full then set qfull in lport.
780 */ 779 */
781 if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) 780 if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
782 paused = 1; 781 lp->qfull = 1;
783 if (fc->fcoe_pending_queue.qlen) { 782 if (fc->fcoe_pending_queue.qlen) {
784 while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) { 783 while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) {
785 spin_unlock_bh(&fc->fcoe_pending_queue.lock); 784 spin_unlock_bh(&fc->fcoe_pending_queue.lock);
@@ -791,11 +790,9 @@ static int fcoe_check_wait_queue(struct fc_lport *lp)
791 spin_lock_bh(&fc->fcoe_pending_queue.lock); 790 spin_lock_bh(&fc->fcoe_pending_queue.lock);
792 } 791 }
793 if (fc->fcoe_pending_queue.qlen < FCOE_MAX_QUEUE_DEPTH) 792 if (fc->fcoe_pending_queue.qlen < FCOE_MAX_QUEUE_DEPTH)
794 unpause = 1; 793 lp->qfull = 0;
795 } 794 }
796 spin_unlock_bh(&fc->fcoe_pending_queue.lock); 795 spin_unlock_bh(&fc->fcoe_pending_queue.lock);
797 if ((unpause) && (paused))
798 fc_unpause(lp);
799 return fc->fcoe_pending_queue.qlen; 796 return fc->fcoe_pending_queue.qlen;
800} 797}
801 798
@@ -873,7 +870,7 @@ static int fcoe_device_notification(struct notifier_block *notifier,
873 struct net_device *real_dev = ptr; 870 struct net_device *real_dev = ptr;
874 struct fcoe_softc *fc; 871 struct fcoe_softc *fc;
875 struct fcoe_dev_stats *stats; 872 struct fcoe_dev_stats *stats;
876 u16 new_status; 873 u32 new_link_up;
877 u32 mfs; 874 u32 mfs;
878 int rc = NOTIFY_OK; 875 int rc = NOTIFY_OK;
879 876
@@ -890,17 +887,15 @@ static int fcoe_device_notification(struct notifier_block *notifier,
890 goto out; 887 goto out;
891 } 888 }
892 889
893 new_status = lp->link_status; 890 new_link_up = lp->link_up;
894 switch (event) { 891 switch (event) {
895 case NETDEV_DOWN: 892 case NETDEV_DOWN:
896 case NETDEV_GOING_DOWN: 893 case NETDEV_GOING_DOWN:
897 new_status &= ~FC_LINK_UP; 894 new_link_up = 0;
898 break; 895 break;
899 case NETDEV_UP: 896 case NETDEV_UP:
900 case NETDEV_CHANGE: 897 case NETDEV_CHANGE:
901 new_status &= ~FC_LINK_UP; 898 new_link_up = !fcoe_link_ok(lp);
902 if (!fcoe_link_ok(lp))
903 new_status |= FC_LINK_UP;
904 break; 899 break;
905 case NETDEV_CHANGEMTU: 900 case NETDEV_CHANGEMTU:
906 mfs = fc->real_dev->mtu - 901 mfs = fc->real_dev->mtu -
@@ -908,17 +903,15 @@ static int fcoe_device_notification(struct notifier_block *notifier,
908 sizeof(struct fcoe_crc_eof)); 903 sizeof(struct fcoe_crc_eof));
909 if (mfs >= FC_MIN_MAX_FRAME) 904 if (mfs >= FC_MIN_MAX_FRAME)
910 fc_set_mfs(lp, mfs); 905 fc_set_mfs(lp, mfs);
911 new_status &= ~FC_LINK_UP; 906 new_link_up = !fcoe_link_ok(lp);
912 if (!fcoe_link_ok(lp))
913 new_status |= FC_LINK_UP;
914 break; 907 break;
915 case NETDEV_REGISTER: 908 case NETDEV_REGISTER:
916 break; 909 break;
917 default: 910 default:
918 FC_DBG("unknown event %ld call", event); 911 FC_DBG("unknown event %ld call", event);
919 } 912 }
920 if (lp->link_status != new_status) { 913 if (lp->link_up != new_link_up) {
921 if ((new_status & FC_LINK_UP) == FC_LINK_UP) 914 if (new_link_up)
922 fc_linkup(lp); 915 fc_linkup(lp);
923 else { 916 else {
924 stats = lp->dev_stats[smp_processor_id()]; 917 stats = lp->dev_stats[smp_processor_id()];