aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorVasu Dev <vasu.dev@intel.com>2009-05-06 13:52:34 -0400
committerJames Bottomley <James.Bottomley@HansenPartnership.com>2009-06-08 14:29:13 -0400
commit4bb6b5153313269b4b328f4f5ddc558c45c50713 (patch)
treea75bdbf690d9f91b7486db40515b61302a44e12b /drivers/scsi
parent30121d14f503dac056ee7f68d99eb5d548899b59 (diff)
[SCSI] fcoe: reduces lock cost when adding a new skb to fcoe_pending_queue
Currently fcoe_pending_queue.lock held twice for every new skb adding to this queue when already least one pkt is pending in this queue and that is not uncommon once skb pkts starts getting queued here upon fcoe_start_io => dev_queue_xmit failure. This patch moves most fcoe_pending_queue logic to fcoe_check_wait_queue function, this new logic grabs fcoe_pending_queue.lock only once to add a new skb instead twice as used to be. I think after this patch call flow around fcoe_check_wait_queue calling in fcoe_xmit is bit simplified with modified fcoe_check_wait_queue function taking care of adding and removing pending skb in one function. Signed-off-by: Vasu Dev <vasu.dev@intel.com> Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/fcoe/fcoe.c37
1 files changed, 15 insertions, 22 deletions
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 6acb7778f55..30eba75a5cd 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -71,7 +71,7 @@ static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
71static int fcoe_hostlist_add(const struct fc_lport *); 71static int fcoe_hostlist_add(const struct fc_lport *);
72static int fcoe_hostlist_remove(const struct fc_lport *); 72static int fcoe_hostlist_remove(const struct fc_lport *);
73 73
74static int fcoe_check_wait_queue(struct fc_lport *); 74static void fcoe_check_wait_queue(struct fc_lport *, struct sk_buff *);
75static int fcoe_device_notification(struct notifier_block *, ulong, void *); 75static int fcoe_device_notification(struct notifier_block *, ulong, void *);
76static void fcoe_dev_setup(void); 76static void fcoe_dev_setup(void);
77static void fcoe_dev_cleanup(void); 77static void fcoe_dev_cleanup(void);
@@ -989,7 +989,7 @@ u32 fcoe_fc_crc(struct fc_frame *fp)
989 */ 989 */
990int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) 990int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
991{ 991{
992 int wlen, rc = 0; 992 int wlen;
993 u32 crc; 993 u32 crc;
994 struct ethhdr *eh; 994 struct ethhdr *eh;
995 struct fcoe_crc_eof *cp; 995 struct fcoe_crc_eof *cp;
@@ -1108,18 +1108,9 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
1108 /* send down to lld */ 1108 /* send down to lld */
1109 fr_dev(fp) = lp; 1109 fr_dev(fp) = lp;
1110 if (fc->fcoe_pending_queue.qlen) 1110 if (fc->fcoe_pending_queue.qlen)
1111 rc = fcoe_check_wait_queue(lp); 1111 fcoe_check_wait_queue(lp, skb);
1112 1112 else if (fcoe_start_io(skb))
1113 if (rc == 0) 1113 fcoe_check_wait_queue(lp, skb);
1114 rc = fcoe_start_io(skb);
1115
1116 if (rc) {
1117 spin_lock_bh(&fc->fcoe_pending_queue.lock);
1118 __skb_queue_tail(&fc->fcoe_pending_queue, skb);
1119 spin_unlock_bh(&fc->fcoe_pending_queue.lock);
1120 if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
1121 lp->qfull = 1;
1122 }
1123 1114
1124 return 0; 1115 return 0;
1125} 1116}
@@ -1285,7 +1276,7 @@ void fcoe_watchdog(ulong vp)
1285 read_lock(&fcoe_hostlist_lock); 1276 read_lock(&fcoe_hostlist_lock);
1286 list_for_each_entry(fc, &fcoe_hostlist, list) { 1277 list_for_each_entry(fc, &fcoe_hostlist, list) {
1287 if (fc->ctlr.lp) 1278 if (fc->ctlr.lp)
1288 fcoe_check_wait_queue(fc->ctlr.lp); 1279 fcoe_check_wait_queue(fc->ctlr.lp, NULL);
1289 } 1280 }
1290 read_unlock(&fcoe_hostlist_lock); 1281 read_unlock(&fcoe_hostlist_lock);
1291 1282
@@ -1306,16 +1297,17 @@ void fcoe_watchdog(ulong vp)
1306 * The wait_queue is used when the skb transmit fails. skb will go 1297 * The wait_queue is used when the skb transmit fails. skb will go
1307 * in the wait_queue which will be emptied by the timer function or 1298 * in the wait_queue which will be emptied by the timer function or
1308 * by the next skb transmit. 1299 * by the next skb transmit.
1309 *
1310 * Returns: 0 for success
1311 */ 1300 */
1312static int fcoe_check_wait_queue(struct fc_lport *lp) 1301static void fcoe_check_wait_queue(struct fc_lport *lp, struct sk_buff *skb)
1313{ 1302{
1314 struct fcoe_softc *fc = lport_priv(lp); 1303 struct fcoe_softc *fc = lport_priv(lp);
1315 struct sk_buff *skb; 1304 int rc;
1316 int rc = -1;
1317 1305
1318 spin_lock_bh(&fc->fcoe_pending_queue.lock); 1306 spin_lock_bh(&fc->fcoe_pending_queue.lock);
1307
1308 if (skb)
1309 __skb_queue_tail(&fc->fcoe_pending_queue, skb);
1310
1319 if (fc->fcoe_pending_queue_active) 1311 if (fc->fcoe_pending_queue_active)
1320 goto out; 1312 goto out;
1321 fc->fcoe_pending_queue_active = 1; 1313 fc->fcoe_pending_queue_active = 1;
@@ -1342,10 +1334,11 @@ static int fcoe_check_wait_queue(struct fc_lport *lp)
1342 if (fc->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH) 1334 if (fc->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH)
1343 lp->qfull = 0; 1335 lp->qfull = 0;
1344 fc->fcoe_pending_queue_active = 0; 1336 fc->fcoe_pending_queue_active = 0;
1345 rc = fc->fcoe_pending_queue.qlen;
1346out: 1337out:
1338 if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
1339 lp->qfull = 1;
1347 spin_unlock_bh(&fc->fcoe_pending_queue.lock); 1340 spin_unlock_bh(&fc->fcoe_pending_queue.lock);
1348 return rc; 1341 return;
1349} 1342}
1350 1343
1351/** 1344/**