summaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorDouglas Gilbert <dgilbert@interlog.com>2018-01-10 16:57:31 -0500
committerMartin K. Petersen <martin.petersen@oracle.com>2018-01-17 01:00:05 -0500
commit10bde980ac18da7859591f3a30ddac881f83a2cf (patch)
treee799b486574ce176b7238012543d1d30d6b59fd7 /drivers/scsi
parentdbc1ebe7b0fd43f7d74ba0e87b411eb48c9fdeb2 (diff)
scsi: scsi_debug: delay stress fix
Introduce a state enum into sdebug_defer objects to indicate which, if any, defer method has been used with the associated command. Also add 2 bools to indicate which of the defer methods has been initialized. Those objects are re-used but the initialization only needs to be done once. This simplifies command cancellation handling. Now the delay associated with a deferred response of a command cannot be changed (once started) by changing the delay (and ndelay) parameters in sysfs. Command aborts and driver shutdown are still honoured immediately when received. [mkp: applied by hand] Signed-off-by: Douglas Gilbert <dgilbert@interlog.com> Reviewed-by: Bart Van Assche <bart.vanassche@wdc.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/scsi_debug.c72
1 files changed, 46 insertions, 26 deletions
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index b8f83c4e2c81..a5986dae9020 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -267,12 +267,18 @@ struct sdebug_host_info {
267#define to_sdebug_host(d) \ 267#define to_sdebug_host(d) \
268 container_of(d, struct sdebug_host_info, dev) 268 container_of(d, struct sdebug_host_info, dev)
269 269
270enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
271 SDEB_DEFER_WQ = 2};
272
270struct sdebug_defer { 273struct sdebug_defer {
271 struct hrtimer hrt; 274 struct hrtimer hrt;
272 struct execute_work ew; 275 struct execute_work ew;
273 int sqa_idx; /* index of sdebug_queue array */ 276 int sqa_idx; /* index of sdebug_queue array */
274 int qc_idx; /* index of sdebug_queued_cmd array within sqa_idx */ 277 int qc_idx; /* index of sdebug_queued_cmd array within sqa_idx */
275 int issuing_cpu; 278 int issuing_cpu;
279 bool init_hrt;
280 bool init_wq;
281 enum sdeb_defer_type defer_t;
276}; 282};
277 283
278struct sdebug_queued_cmd { 284struct sdebug_queued_cmd {
@@ -3748,6 +3754,7 @@ static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
3748 struct scsi_cmnd *scp; 3754 struct scsi_cmnd *scp;
3749 struct sdebug_dev_info *devip; 3755 struct sdebug_dev_info *devip;
3750 3756
3757 sd_dp->defer_t = SDEB_DEFER_NONE;
3751 qc_idx = sd_dp->qc_idx; 3758 qc_idx = sd_dp->qc_idx;
3752 sqp = sdebug_q_arr + sd_dp->sqa_idx; 3759 sqp = sdebug_q_arr + sd_dp->sqa_idx;
3753 if (sdebug_statistics) { 3760 if (sdebug_statistics) {
@@ -3932,13 +3939,14 @@ static void scsi_debug_slave_destroy(struct scsi_device *sdp)
3932 } 3939 }
3933} 3940}
3934 3941
3935static void stop_qc_helper(struct sdebug_defer *sd_dp) 3942static void stop_qc_helper(struct sdebug_defer *sd_dp,
3943 enum sdeb_defer_type defer_t)
3936{ 3944{
3937 if (!sd_dp) 3945 if (!sd_dp)
3938 return; 3946 return;
3939 if ((sdebug_jdelay > 0) || (sdebug_ndelay > 0)) 3947 if (defer_t == SDEB_DEFER_HRT)
3940 hrtimer_cancel(&sd_dp->hrt); 3948 hrtimer_cancel(&sd_dp->hrt);
3941 else if (sdebug_jdelay < 0) 3949 else if (defer_t == SDEB_DEFER_WQ)
3942 cancel_work_sync(&sd_dp->ew.work); 3950 cancel_work_sync(&sd_dp->ew.work);
3943} 3951}
3944 3952
@@ -3948,6 +3956,7 @@ static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
3948{ 3956{
3949 unsigned long iflags; 3957 unsigned long iflags;
3950 int j, k, qmax, r_qmax; 3958 int j, k, qmax, r_qmax;
3959 enum sdeb_defer_type l_defer_t;
3951 struct sdebug_queue *sqp; 3960 struct sdebug_queue *sqp;
3952 struct sdebug_queued_cmd *sqcp; 3961 struct sdebug_queued_cmd *sqcp;
3953 struct sdebug_dev_info *devip; 3962 struct sdebug_dev_info *devip;
@@ -3971,8 +3980,13 @@ static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
3971 atomic_dec(&devip->num_in_q); 3980 atomic_dec(&devip->num_in_q);
3972 sqcp->a_cmnd = NULL; 3981 sqcp->a_cmnd = NULL;
3973 sd_dp = sqcp->sd_dp; 3982 sd_dp = sqcp->sd_dp;
3983 if (sd_dp) {
3984 l_defer_t = sd_dp->defer_t;
3985 sd_dp->defer_t = SDEB_DEFER_NONE;
3986 } else
3987 l_defer_t = SDEB_DEFER_NONE;
3974 spin_unlock_irqrestore(&sqp->qc_lock, iflags); 3988 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3975 stop_qc_helper(sd_dp); 3989 stop_qc_helper(sd_dp, l_defer_t);
3976 clear_bit(k, sqp->in_use_bm); 3990 clear_bit(k, sqp->in_use_bm);
3977 return true; 3991 return true;
3978 } 3992 }
@@ -3987,6 +4001,7 @@ static void stop_all_queued(void)
3987{ 4001{
3988 unsigned long iflags; 4002 unsigned long iflags;
3989 int j, k; 4003 int j, k;
4004 enum sdeb_defer_type l_defer_t;
3990 struct sdebug_queue *sqp; 4005 struct sdebug_queue *sqp;
3991 struct sdebug_queued_cmd *sqcp; 4006 struct sdebug_queued_cmd *sqcp;
3992 struct sdebug_dev_info *devip; 4007 struct sdebug_dev_info *devip;
@@ -4005,8 +4020,13 @@ static void stop_all_queued(void)
4005 atomic_dec(&devip->num_in_q); 4020 atomic_dec(&devip->num_in_q);
4006 sqcp->a_cmnd = NULL; 4021 sqcp->a_cmnd = NULL;
4007 sd_dp = sqcp->sd_dp; 4022 sd_dp = sqcp->sd_dp;
4023 if (sd_dp) {
4024 l_defer_t = sd_dp->defer_t;
4025 sd_dp->defer_t = SDEB_DEFER_NONE;
4026 } else
4027 l_defer_t = SDEB_DEFER_NONE;
4008 spin_unlock_irqrestore(&sqp->qc_lock, iflags); 4028 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4009 stop_qc_helper(sd_dp); 4029 stop_qc_helper(sd_dp, l_defer_t);
4010 clear_bit(k, sqp->in_use_bm); 4030 clear_bit(k, sqp->in_use_bm);
4011 spin_lock_irqsave(&sqp->qc_lock, iflags); 4031 spin_lock_irqsave(&sqp->qc_lock, iflags);
4012 } 4032 }
@@ -4258,7 +4278,7 @@ static void setup_inject(struct sdebug_queue *sqp,
4258 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources. 4278 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
4259 */ 4279 */
4260static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, 4280static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
4261 int scsi_result, int delta_jiff) 4281 int scsi_result, int delta_jiff, int ndelay)
4262{ 4282{
4263 unsigned long iflags; 4283 unsigned long iflags;
4264 int k, num_in_q, qdepth, inject; 4284 int k, num_in_q, qdepth, inject;
@@ -4336,17 +4356,20 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
4336 spin_unlock_irqrestore(&sqp->qc_lock, iflags); 4356 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4337 if (unlikely(sdebug_every_nth && sdebug_any_injecting_opt)) 4357 if (unlikely(sdebug_every_nth && sdebug_any_injecting_opt))
4338 setup_inject(sqp, sqcp); 4358 setup_inject(sqp, sqcp);
4339 if (delta_jiff > 0 || sdebug_ndelay > 0) { 4359 if (sd_dp == NULL) {
4360 sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
4361 if (sd_dp == NULL)
4362 return SCSI_MLQUEUE_HOST_BUSY;
4363 }
4364 if (delta_jiff > 0 || ndelay > 0) {
4340 ktime_t kt; 4365 ktime_t kt;
4341 4366
4342 if (delta_jiff > 0) { 4367 if (delta_jiff > 0) {
4343 kt = ns_to_ktime((u64)delta_jiff * (NSEC_PER_SEC / HZ)); 4368 kt = ns_to_ktime((u64)delta_jiff * (NSEC_PER_SEC / HZ));
4344 } else 4369 } else
4345 kt = sdebug_ndelay; 4370 kt = ndelay;
4346 if (NULL == sd_dp) { 4371 if (!sd_dp->init_hrt) {
4347 sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC); 4372 sd_dp->init_hrt = true;
4348 if (NULL == sd_dp)
4349 return SCSI_MLQUEUE_HOST_BUSY;
4350 sqcp->sd_dp = sd_dp; 4373 sqcp->sd_dp = sd_dp;
4351 hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC, 4374 hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
4352 HRTIMER_MODE_REL_PINNED); 4375 HRTIMER_MODE_REL_PINNED);
@@ -4356,12 +4379,11 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
4356 } 4379 }
4357 if (sdebug_statistics) 4380 if (sdebug_statistics)
4358 sd_dp->issuing_cpu = raw_smp_processor_id(); 4381 sd_dp->issuing_cpu = raw_smp_processor_id();
4382 sd_dp->defer_t = SDEB_DEFER_HRT;
4359 hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED); 4383 hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
4360 } else { /* jdelay < 0, use work queue */ 4384 } else { /* jdelay < 0, use work queue */
4361 if (NULL == sd_dp) { 4385 if (!sd_dp->init_wq) {
4362 sd_dp = kzalloc(sizeof(*sqcp->sd_dp), GFP_ATOMIC); 4386 sd_dp->init_wq = true;
4363 if (NULL == sd_dp)
4364 return SCSI_MLQUEUE_HOST_BUSY;
4365 sqcp->sd_dp = sd_dp; 4387 sqcp->sd_dp = sd_dp;
4366 sd_dp->sqa_idx = sqp - sdebug_q_arr; 4388 sd_dp->sqa_idx = sqp - sdebug_q_arr;
4367 sd_dp->qc_idx = k; 4389 sd_dp->qc_idx = k;
@@ -4369,6 +4391,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
4369 } 4391 }
4370 if (sdebug_statistics) 4392 if (sdebug_statistics)
4371 sd_dp->issuing_cpu = raw_smp_processor_id(); 4393 sd_dp->issuing_cpu = raw_smp_processor_id();
4394 sd_dp->defer_t = SDEB_DEFER_WQ;
4372 schedule_work(&sd_dp->ew.work); 4395 schedule_work(&sd_dp->ew.work);
4373 } 4396 }
4374 if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && 4397 if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) &&
@@ -4615,9 +4638,6 @@ static ssize_t delay_store(struct device_driver *ddp, const char *buf,
4615 } 4638 }
4616 } 4639 }
4617 if (res > 0) { 4640 if (res > 0) {
4618 /* make sure sdebug_defer instances get
4619 * re-allocated for new delay variant */
4620 free_all_queued();
4621 sdebug_jdelay = jdelay; 4641 sdebug_jdelay = jdelay;
4622 sdebug_ndelay = 0; 4642 sdebug_ndelay = 0;
4623 } 4643 }
@@ -4658,9 +4678,6 @@ static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
4658 } 4678 }
4659 } 4679 }
4660 if (res > 0) { 4680 if (res > 0) {
4661 /* make sure sdebug_defer instances get
4662 * re-allocated for new delay variant */
4663 free_all_queued();
4664 sdebug_ndelay = ndelay; 4681 sdebug_ndelay = ndelay;
4665 sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN 4682 sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
4666 : DEF_JDELAY; 4683 : DEF_JDELAY;
@@ -5702,12 +5719,15 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
5702 errsts = r_pfp(scp, devip); 5719 errsts = r_pfp(scp, devip);
5703 5720
5704fini: 5721fini:
5705 return schedule_resp(scp, devip, errsts, 5722 if (F_DELAY_OVERR & flags)
5706 ((F_DELAY_OVERR & flags) ? 0 : sdebug_jdelay)); 5723 return schedule_resp(scp, devip, errsts, 0, 0);
5724 else
5725 return schedule_resp(scp, devip, errsts, sdebug_jdelay,
5726 sdebug_ndelay);
5707check_cond: 5727check_cond:
5708 return schedule_resp(scp, devip, check_condition_result, 0); 5728 return schedule_resp(scp, devip, check_condition_result, 0, 0);
5709err_out: 5729err_out:
5710 return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, 0); 5730 return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, 0, 0);
5711} 5731}
5712 5732
5713static struct scsi_host_template sdebug_driver_template = { 5733static struct scsi_host_template sdebug_driver_template = {