aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/libfc
diff options
context:
space:
mode:
authorVasu Dev <vasu.dev@intel.com>2009-11-03 14:48:06 -0500
committerJames Bottomley <James.Bottomley@suse.de>2009-12-04 13:01:10 -0500
commit84c3e1ad08d4be018a95e7a9964bf3dbc8cf8857 (patch)
tree1dcd106bcf8e64afce588f7914a082a21f45e941 /drivers/scsi/libfc
parentc46be11a683acc1ccf86883ea906f171b90ff29a (diff)
[SCSI] libfc: adds can_queue ramp up
Adds last_can_queue_ramp_down_time and updates this on every ramp down. If last_can_queue_ramp_down_time is not zero then do ramp up on any IO completion in added fc_fcp_can_queue_ramp_up. Reset last_can_queue_ramp_down_time to zero once can_queue is ramped up to added max_can_queue limit, this is to avoid any more ramp up attempts on subsequent IO completion. The ramp down and up are skipped for FC_CAN_QUEUE_PERIOD to avoid infrequent changes to can_queue, this required keeping track of ramp up time also in last_can_queue_ramp_up_time. Adds code to ramp down can_queue if lp->qfull is set, with added new ramp up code the can_queue will be increased after FC_CAN_QUEUE_PERIOD, therefore it is safe to do ramp down without fsp in this case and will avoid thrash. This required fc_fcp_can_queue_ramp_down locking change so that it can be called with Scsi_Host lock held. Removes si->throttled and fsp state FC_SRB_NOMEM, not needed with added ramp up code. Signed-off-by: Vasu Dev <vasu.dev@intel.com> Signed-off-by: Robert Love <robert.w.love@intel.com> Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'drivers/scsi/libfc')
-rw-r--r--drivers/scsi/libfc/fc_fcp.c78
1 files changed, 59 insertions, 19 deletions
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index ac5c148d0182..4bfab4f0ccb3 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -52,7 +52,6 @@ struct kmem_cache *scsi_pkt_cachep;
52#define FC_SRB_DISCONTIG (1 << 4) /* non-sequential data recvd */ 52#define FC_SRB_DISCONTIG (1 << 4) /* non-sequential data recvd */
53#define FC_SRB_COMPL (1 << 5) /* fc_io_compl has been run */ 53#define FC_SRB_COMPL (1 << 5) /* fc_io_compl has been run */
54#define FC_SRB_FCP_PROCESSING_TMO (1 << 6) /* timer function processing */ 54#define FC_SRB_FCP_PROCESSING_TMO (1 << 6) /* timer function processing */
55#define FC_SRB_NOMEM (1 << 7) /* dropped to out of mem */
56 55
57#define FC_SRB_READ (1 << 1) 56#define FC_SRB_READ (1 << 1)
58#define FC_SRB_WRITE (1 << 0) 57#define FC_SRB_WRITE (1 << 0)
@@ -71,12 +70,16 @@ struct kmem_cache *scsi_pkt_cachep;
71 * struct fc_fcp_internal - FCP layer internal data 70 * struct fc_fcp_internal - FCP layer internal data
72 * @scsi_pkt_pool: Memory pool to draw FCP packets from 71 * @scsi_pkt_pool: Memory pool to draw FCP packets from
73 * @scsi_pkt_queue: Current FCP packets 72 * @scsi_pkt_queue: Current FCP packets
74 * @throttled: The FCP packet queue is throttled 73 * @last_can_queue_ramp_down_time: ramp down time
74 * @last_can_queue_ramp_up_time: ramp up time
75 * @max_can_queue: max can_queue size
75 */ 76 */
76struct fc_fcp_internal { 77struct fc_fcp_internal {
77 mempool_t *scsi_pkt_pool; 78 mempool_t *scsi_pkt_pool;
78 struct list_head scsi_pkt_queue; 79 struct list_head scsi_pkt_queue;
79 u8 throttled; 80 unsigned long last_can_queue_ramp_down_time;
81 unsigned long last_can_queue_ramp_up_time;
82 int max_can_queue;
80}; 83};
81 84
82#define fc_get_scsi_internal(x) ((struct fc_fcp_internal *)(x)->scsi_priv) 85#define fc_get_scsi_internal(x) ((struct fc_fcp_internal *)(x)->scsi_priv)
@@ -124,6 +127,7 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *);
124#define FC_SCSI_TM_TOV (10 * HZ) 127#define FC_SCSI_TM_TOV (10 * HZ)
125#define FC_SCSI_REC_TOV (2 * HZ) 128#define FC_SCSI_REC_TOV (2 * HZ)
126#define FC_HOST_RESET_TIMEOUT (30 * HZ) 129#define FC_HOST_RESET_TIMEOUT (30 * HZ)
130#define FC_CAN_QUEUE_PERIOD (60 * HZ)
127 131
128#define FC_MAX_ERROR_CNT 5 132#define FC_MAX_ERROR_CNT 5
129#define FC_MAX_RECOV_RETRY 3 133#define FC_MAX_RECOV_RETRY 3
@@ -327,6 +331,38 @@ static void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp)
327} 331}
328 332
329/** 333/**
334 * fc_fcp_can_queue_ramp_up() - increases can_queue
335 * @lport: lport to ramp up can_queue
336 *
337 * Locking notes: Called with Scsi_Host lock held
338 */
339static void fc_fcp_can_queue_ramp_up(struct fc_lport *lport)
340{
341 struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
342 int can_queue;
343
344 if (si->last_can_queue_ramp_up_time &&
345 (time_before(jiffies, si->last_can_queue_ramp_up_time +
346 FC_CAN_QUEUE_PERIOD)))
347 return;
348
349 if (time_before(jiffies, si->last_can_queue_ramp_down_time +
350 FC_CAN_QUEUE_PERIOD))
351 return;
352
353 si->last_can_queue_ramp_up_time = jiffies;
354
355 can_queue = lport->host->can_queue << 1;
356 if (can_queue >= si->max_can_queue) {
357 can_queue = si->max_can_queue;
358 si->last_can_queue_ramp_down_time = 0;
359 }
360 lport->host->can_queue = can_queue;
361 shost_printk(KERN_ERR, lport->host, "libfc: increased "
362 "can_queue to %d.\n", can_queue);
363}
364
365/**
330 * fc_fcp_can_queue_ramp_down() - reduces can_queue 366 * fc_fcp_can_queue_ramp_down() - reduces can_queue
331 * @lport: lport to reduce can_queue 367 * @lport: lport to reduce can_queue
332 * 368 *
@@ -335,17 +371,20 @@ static void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp)
335 * commands complete or timeout, then try again with a reduced 371 * commands complete or timeout, then try again with a reduced
336 * can_queue. Eventually we will hit the point where we run 372 * can_queue. Eventually we will hit the point where we run
337 * on all reserved structs. 373 * on all reserved structs.
374 *
375 * Locking notes: Called with Scsi_Host lock held
338 */ 376 */
339static void fc_fcp_can_queue_ramp_down(struct fc_lport *lport) 377static void fc_fcp_can_queue_ramp_down(struct fc_lport *lport)
340{ 378{
341 struct fc_fcp_internal *si = fc_get_scsi_internal(lport); 379 struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
342 unsigned long flags;
343 int can_queue; 380 int can_queue;
344 381
345 spin_lock_irqsave(lport->host->host_lock, flags); 382 if (si->last_can_queue_ramp_down_time &&
346 if (si->throttled) 383 (time_before(jiffies, si->last_can_queue_ramp_down_time +
347 goto done; 384 FC_CAN_QUEUE_PERIOD)))
348 si->throttled = 1; 385 return;
386
387 si->last_can_queue_ramp_down_time = jiffies;
349 388
350 can_queue = lport->host->can_queue; 389 can_queue = lport->host->can_queue;
351 can_queue >>= 1; 390 can_queue >>= 1;
@@ -354,8 +393,6 @@ static void fc_fcp_can_queue_ramp_down(struct fc_lport *lport)
354 lport->host->can_queue = can_queue; 393 lport->host->can_queue = can_queue;
355 shost_printk(KERN_ERR, lport->host, "libfc: Could not allocate frame.\n" 394 shost_printk(KERN_ERR, lport->host, "libfc: Could not allocate frame.\n"
356 "Reducing can_queue to %d.\n", can_queue); 395 "Reducing can_queue to %d.\n", can_queue);
357done:
358 spin_unlock_irqrestore(lport->host->host_lock, flags);
359} 396}
360 397
361/* 398/*
@@ -370,10 +407,14 @@ static inline struct fc_frame *fc_fcp_frame_alloc(struct fc_lport *lport,
370 size_t len) 407 size_t len)
371{ 408{
372 struct fc_frame *fp; 409 struct fc_frame *fp;
410 unsigned long flags;
373 411
374 fp = fc_frame_alloc(lport, len); 412 fp = fc_frame_alloc(lport, len);
375 if (!fp) 413 if (!fp) {
414 spin_lock_irqsave(lport->host->host_lock, flags);
376 fc_fcp_can_queue_ramp_down(lport); 415 fc_fcp_can_queue_ramp_down(lport);
416 spin_unlock_irqrestore(lport->host->host_lock, flags);
417 }
377 return fp; 418 return fp;
378} 419}
379 420
@@ -720,8 +761,6 @@ static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg)
720 (size_t) ntohl(dd->ft_burst_len)); 761 (size_t) ntohl(dd->ft_burst_len));
721 if (!rc) 762 if (!rc)
722 seq->rec_data = fsp->xfer_len; 763 seq->rec_data = fsp->xfer_len;
723 else if (rc == -ENOMEM)
724 fsp->state |= FC_SRB_NOMEM;
725 } else if (r_ctl == FC_RCTL_DD_SOL_DATA) { 764 } else if (r_ctl == FC_RCTL_DD_SOL_DATA) {
726 /* 765 /*
727 * received a DATA frame 766 * received a DATA frame
@@ -1734,6 +1773,8 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
1734 rpriv = rport->dd_data; 1773 rpriv = rport->dd_data;
1735 1774
1736 if (!fc_fcp_lport_queue_ready(lport)) { 1775 if (!fc_fcp_lport_queue_ready(lport)) {
1776 if (lport->qfull)
1777 fc_fcp_can_queue_ramp_down(lport);
1737 rc = SCSI_MLQUEUE_HOST_BUSY; 1778 rc = SCSI_MLQUEUE_HOST_BUSY;
1738 goto out; 1779 goto out;
1739 } 1780 }
@@ -1830,13 +1871,11 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
1830 } 1871 }
1831 1872
1832 /* 1873 /*
1833 * if a command timed out while we had to try and throttle IO 1874 * if can_queue ramp down is done then try can_queue ramp up
1834 * and it is now getting cleaned up, then we are about to 1875 * since commands are completing now.
1835 * try again so clear the throttled flag incase we get more
1836 * time outs.
1837 */ 1876 */
1838 if (si->throttled && fsp->state & FC_SRB_NOMEM) 1877 if (si->last_can_queue_ramp_down_time)
1839 si->throttled = 0; 1878 fc_fcp_can_queue_ramp_up(lport);
1840 1879
1841 sc_cmd = fsp->cmd; 1880 sc_cmd = fsp->cmd;
1842 fsp->cmd = NULL; 1881 fsp->cmd = NULL;
@@ -2176,6 +2215,7 @@ int fc_fcp_init(struct fc_lport *lport)
2176 if (!si) 2215 if (!si)
2177 return -ENOMEM; 2216 return -ENOMEM;
2178 lport->scsi_priv = si; 2217 lport->scsi_priv = si;
2218 si->max_can_queue = lport->host->can_queue;
2179 INIT_LIST_HEAD(&si->scsi_pkt_queue); 2219 INIT_LIST_HEAD(&si->scsi_pkt_queue);
2180 2220
2181 si->scsi_pkt_pool = mempool_create_slab_pool(2, scsi_pkt_cachep); 2221 si->scsi_pkt_pool = mempool_create_slab_pool(2, scsi_pkt_cachep);