diff options
author | Douglas Gilbert <dgilbert@interlog.com> | 2016-04-25 12:16:32 -0400 |
---|---|---|
committer | Martin K. Petersen <martin.petersen@oracle.com> | 2016-04-29 19:27:27 -0400 |
commit | a10bc12af6347d2aa3a2ffbd5f8b7be260c12b85 (patch) | |
tree | d3b1c696994d246c544bee88e0d9163d29242899 /drivers/scsi | |
parent | c2206098972e8ca464040897c95bdf5b2f45ac32 (diff) |
scsi_debug: replace tasklet with work queue
When a negative value was placed in the delay parameter, a tasklet was
scheduled. Change the tasklet to a work queue. Previously a delay of -1
scheduled a high priority tasklet; since there are no high priority work
queues, treat -1 like other negative values in delay and schedule a work
item.
Signed-off-by: Douglas Gilbert <dgilbert@interlog.com>
Reviewed-by: Hannes Reinicke <hare@suse.de>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'drivers/scsi')
-rw-r--r-- | drivers/scsi/scsi_debug.c | 230 |
1 files changed, 95 insertions, 135 deletions
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index e2fbac3b32aa..40de305de1d5 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c | |||
@@ -610,15 +610,15 @@ static LIST_HEAD(sdebug_host_list); | |||
610 | static DEFINE_SPINLOCK(sdebug_host_list_lock); | 610 | static DEFINE_SPINLOCK(sdebug_host_list_lock); |
611 | 611 | ||
612 | 612 | ||
613 | struct sdebug_hrtimer { /* ... is derived from hrtimer */ | 613 | struct sdebug_defer { |
614 | struct hrtimer hrt; /* must be first element */ | 614 | struct hrtimer hrt; |
615 | struct execute_work ew; | ||
615 | int qa_indx; | 616 | int qa_indx; |
616 | }; | 617 | }; |
617 | 618 | ||
618 | struct sdebug_queued_cmd { | 619 | struct sdebug_queued_cmd { |
619 | /* in_use flagged by a bit in queued_in_use_bm[] */ | 620 | /* in_use flagged by a bit in queued_in_use_bm[] */ |
620 | struct tasklet_struct *tletp; | 621 | struct sdebug_defer *sd_dp; |
621 | struct sdebug_hrtimer *sd_hrtp; | ||
622 | struct scsi_cmnd * a_cmnd; | 622 | struct scsi_cmnd * a_cmnd; |
623 | }; | 623 | }; |
624 | static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE]; | 624 | static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE]; |
@@ -3349,8 +3349,9 @@ resp_xdwriteread_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | |||
3349 | return resp_xdwriteread(scp, lba, num, devip); | 3349 | return resp_xdwriteread(scp, lba, num, devip); |
3350 | } | 3350 | } |
3351 | 3351 | ||
3352 | /* When tasklet goes off this function is called. */ | 3352 | /* Queued command completions converge here. */ |
3353 | static void sdebug_q_cmd_complete(unsigned long indx) | 3353 | static void |
3354 | sdebug_q_cmd_complete(struct sdebug_defer *sd_dp) | ||
3354 | { | 3355 | { |
3355 | int qa_indx; | 3356 | int qa_indx; |
3356 | int retiring = 0; | 3357 | int retiring = 0; |
@@ -3360,7 +3361,7 @@ static void sdebug_q_cmd_complete(unsigned long indx) | |||
3360 | struct sdebug_dev_info *devip; | 3361 | struct sdebug_dev_info *devip; |
3361 | 3362 | ||
3362 | atomic_inc(&sdebug_completions); | 3363 | atomic_inc(&sdebug_completions); |
3363 | qa_indx = indx; | 3364 | qa_indx = sd_dp->qa_indx; |
3364 | if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) { | 3365 | if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) { |
3365 | pr_err("wild qa_indx=%d\n", qa_indx); | 3366 | pr_err("wild qa_indx=%d\n", qa_indx); |
3366 | return; | 3367 | return; |
@@ -3411,64 +3412,21 @@ static void sdebug_q_cmd_complete(unsigned long indx) | |||
3411 | static enum hrtimer_restart | 3412 | static enum hrtimer_restart |
3412 | sdebug_q_cmd_hrt_complete(struct hrtimer *timer) | 3413 | sdebug_q_cmd_hrt_complete(struct hrtimer *timer) |
3413 | { | 3414 | { |
3414 | int qa_indx; | 3415 | struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer, |
3415 | int retiring = 0; | 3416 | hrt); |
3416 | unsigned long iflags; | 3417 | sdebug_q_cmd_complete(sd_dp); |
3417 | struct sdebug_hrtimer *sd_hrtp = (struct sdebug_hrtimer *)timer; | ||
3418 | struct sdebug_queued_cmd *sqcp; | ||
3419 | struct scsi_cmnd *scp; | ||
3420 | struct sdebug_dev_info *devip; | ||
3421 | |||
3422 | atomic_inc(&sdebug_completions); | ||
3423 | qa_indx = sd_hrtp->qa_indx; | ||
3424 | if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) { | ||
3425 | pr_err("wild qa_indx=%d\n", qa_indx); | ||
3426 | goto the_end; | ||
3427 | } | ||
3428 | spin_lock_irqsave(&queued_arr_lock, iflags); | ||
3429 | sqcp = &queued_arr[qa_indx]; | ||
3430 | scp = sqcp->a_cmnd; | ||
3431 | if (NULL == scp) { | ||
3432 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | ||
3433 | pr_err("scp is NULL\n"); | ||
3434 | goto the_end; | ||
3435 | } | ||
3436 | devip = (struct sdebug_dev_info *)scp->device->hostdata; | ||
3437 | if (devip) | ||
3438 | atomic_dec(&devip->num_in_q); | ||
3439 | else | ||
3440 | pr_err("devip=NULL\n"); | ||
3441 | if (atomic_read(&retired_max_queue) > 0) | ||
3442 | retiring = 1; | ||
3443 | |||
3444 | sqcp->a_cmnd = NULL; | ||
3445 | if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) { | ||
3446 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | ||
3447 | pr_err("Unexpected completion\n"); | ||
3448 | goto the_end; | ||
3449 | } | ||
3450 | |||
3451 | if (unlikely(retiring)) { /* user has reduced max_queue */ | ||
3452 | int k, retval; | ||
3453 | |||
3454 | retval = atomic_read(&retired_max_queue); | ||
3455 | if (qa_indx >= retval) { | ||
3456 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | ||
3457 | pr_err("index %d too large\n", retval); | ||
3458 | goto the_end; | ||
3459 | } | ||
3460 | k = find_last_bit(queued_in_use_bm, retval); | ||
3461 | if ((k < sdebug_max_queue) || (k == retval)) | ||
3462 | atomic_set(&retired_max_queue, 0); | ||
3463 | else | ||
3464 | atomic_set(&retired_max_queue, k + 1); | ||
3465 | } | ||
3466 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | ||
3467 | scp->scsi_done(scp); /* callback to mid level */ | ||
3468 | the_end: | ||
3469 | return HRTIMER_NORESTART; | 3418 | return HRTIMER_NORESTART; |
3470 | } | 3419 | } |
3471 | 3420 | ||
3421 | /* When work queue schedules work, it calls this function. */ | ||
3422 | static void | ||
3423 | sdebug_q_cmd_wq_complete(struct work_struct *work) | ||
3424 | { | ||
3425 | struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer, | ||
3426 | ew.work); | ||
3427 | sdebug_q_cmd_complete(sd_dp); | ||
3428 | } | ||
3429 | |||
3472 | static struct sdebug_dev_info * | 3430 | static struct sdebug_dev_info * |
3473 | sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags) | 3431 | sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags) |
3474 | { | 3432 | { |
@@ -3567,13 +3525,15 @@ static void scsi_debug_slave_destroy(struct scsi_device *sdp) | |||
3567 | } | 3525 | } |
3568 | } | 3526 | } |
3569 | 3527 | ||
3570 | /* Returns 1 if cmnd found (deletes its timer or tasklet), else returns 0 */ | 3528 | /* If @cmnd found deletes its timer or work queue and returns true; else |
3571 | static int stop_queued_cmnd(struct scsi_cmnd *cmnd) | 3529 | returns false */ |
3530 | static bool stop_queued_cmnd(struct scsi_cmnd *cmnd) | ||
3572 | { | 3531 | { |
3573 | unsigned long iflags; | 3532 | unsigned long iflags; |
3574 | int k, qmax, r_qmax; | 3533 | int k, qmax, r_qmax; |
3575 | struct sdebug_queued_cmd *sqcp; | 3534 | struct sdebug_queued_cmd *sqcp; |
3576 | struct sdebug_dev_info *devip; | 3535 | struct sdebug_dev_info *devip; |
3536 | struct sdebug_defer *sd_dp; | ||
3577 | 3537 | ||
3578 | spin_lock_irqsave(&queued_arr_lock, iflags); | 3538 | spin_lock_irqsave(&queued_arr_lock, iflags); |
3579 | qmax = sdebug_max_queue; | 3539 | qmax = sdebug_max_queue; |
@@ -3583,64 +3543,63 @@ static int stop_queued_cmnd(struct scsi_cmnd *cmnd) | |||
3583 | for (k = 0; k < qmax; ++k) { | 3543 | for (k = 0; k < qmax; ++k) { |
3584 | if (test_bit(k, queued_in_use_bm)) { | 3544 | if (test_bit(k, queued_in_use_bm)) { |
3585 | sqcp = &queued_arr[k]; | 3545 | sqcp = &queued_arr[k]; |
3586 | if (cmnd == sqcp->a_cmnd) { | 3546 | if (cmnd != sqcp->a_cmnd) |
3587 | devip = (struct sdebug_dev_info *) | 3547 | continue; |
3588 | cmnd->device->hostdata; | 3548 | /* found command */ |
3589 | if (devip) | 3549 | devip = (struct sdebug_dev_info *) |
3590 | atomic_dec(&devip->num_in_q); | 3550 | cmnd->device->hostdata; |
3591 | sqcp->a_cmnd = NULL; | 3551 | if (devip) |
3592 | spin_unlock_irqrestore(&queued_arr_lock, | 3552 | atomic_dec(&devip->num_in_q); |
3593 | iflags); | 3553 | sqcp->a_cmnd = NULL; |
3594 | if ((sdebug_jdelay > 0) || | 3554 | sd_dp = sqcp->sd_dp; |
3595 | (sdebug_ndelay > 0)) { | 3555 | spin_unlock_irqrestore(&queued_arr_lock, |
3596 | if (sqcp->sd_hrtp) | 3556 | iflags); |
3597 | hrtimer_cancel( | 3557 | if ((sdebug_jdelay > 0) || (sdebug_ndelay > 0)) { |
3598 | &sqcp->sd_hrtp->hrt); | 3558 | if (sd_dp) |
3599 | } else if (sdebug_jdelay < 0) { | 3559 | hrtimer_cancel(&sd_dp->hrt); |
3600 | if (sqcp->tletp) | 3560 | } else if (sdebug_jdelay < 0) { |
3601 | tasklet_kill(sqcp->tletp); | 3561 | if (sd_dp) |
3602 | } | 3562 | cancel_work_sync(&sd_dp->ew.work); |
3603 | clear_bit(k, queued_in_use_bm); | ||
3604 | return 1; | ||
3605 | } | 3563 | } |
3564 | clear_bit(k, queued_in_use_bm); | ||
3565 | return true; | ||
3606 | } | 3566 | } |
3607 | } | 3567 | } |
3608 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | 3568 | spin_unlock_irqrestore(&queued_arr_lock, iflags); |
3609 | return 0; | 3569 | return false; |
3610 | } | 3570 | } |
3611 | 3571 | ||
3612 | /* Deletes (stops) timers or tasklets of all queued commands */ | 3572 | /* Deletes (stops) timers or work queues of all queued commands */ |
3613 | static void stop_all_queued(void) | 3573 | static void stop_all_queued(void) |
3614 | { | 3574 | { |
3615 | unsigned long iflags; | 3575 | unsigned long iflags; |
3616 | int k; | 3576 | int k; |
3617 | struct sdebug_queued_cmd *sqcp; | 3577 | struct sdebug_queued_cmd *sqcp; |
3618 | struct sdebug_dev_info *devip; | 3578 | struct sdebug_dev_info *devip; |
3579 | struct sdebug_defer *sd_dp; | ||
3619 | 3580 | ||
3620 | spin_lock_irqsave(&queued_arr_lock, iflags); | 3581 | spin_lock_irqsave(&queued_arr_lock, iflags); |
3621 | for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) { | 3582 | for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) { |
3622 | if (test_bit(k, queued_in_use_bm)) { | 3583 | if (test_bit(k, queued_in_use_bm)) { |
3623 | sqcp = &queued_arr[k]; | 3584 | sqcp = &queued_arr[k]; |
3624 | if (sqcp->a_cmnd) { | 3585 | if (NULL == sqcp->a_cmnd) |
3625 | devip = (struct sdebug_dev_info *) | 3586 | continue; |
3626 | sqcp->a_cmnd->device->hostdata; | 3587 | devip = (struct sdebug_dev_info *) |
3627 | if (devip) | 3588 | sqcp->a_cmnd->device->hostdata; |
3628 | atomic_dec(&devip->num_in_q); | 3589 | if (devip) |
3629 | sqcp->a_cmnd = NULL; | 3590 | atomic_dec(&devip->num_in_q); |
3630 | spin_unlock_irqrestore(&queued_arr_lock, | 3591 | sqcp->a_cmnd = NULL; |
3631 | iflags); | 3592 | sd_dp = sqcp->sd_dp; |
3632 | if ((sdebug_jdelay > 0) || | 3593 | spin_unlock_irqrestore(&queued_arr_lock, iflags); |
3633 | (sdebug_ndelay > 0)) { | 3594 | if ((sdebug_jdelay > 0) || (sdebug_ndelay > 0)) { |
3634 | if (sqcp->sd_hrtp) | 3595 | if (sd_dp) |
3635 | hrtimer_cancel( | 3596 | hrtimer_cancel(&sd_dp->hrt); |
3636 | &sqcp->sd_hrtp->hrt); | 3597 | } else if (sdebug_jdelay < 0) { |
3637 | } else if (sdebug_jdelay < 0) { | 3598 | if (sd_dp) |
3638 | if (sqcp->tletp) | 3599 | cancel_work_sync(&sd_dp->ew.work); |
3639 | tasklet_kill(sqcp->tletp); | ||
3640 | } | ||
3641 | clear_bit(k, queued_in_use_bm); | ||
3642 | spin_lock_irqsave(&queued_arr_lock, iflags); | ||
3643 | } | 3600 | } |
3601 | clear_bit(k, queued_in_use_bm); | ||
3602 | spin_lock_irqsave(&queued_arr_lock, iflags); | ||
3644 | } | 3603 | } |
3645 | } | 3604 | } |
3646 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | 3605 | spin_unlock_irqrestore(&queued_arr_lock, iflags); |
@@ -3649,30 +3608,27 @@ static void stop_all_queued(void) | |||
3649 | /* Free queued command memory on heap */ | 3608 | /* Free queued command memory on heap */ |
3650 | static void free_all_queued(void) | 3609 | static void free_all_queued(void) |
3651 | { | 3610 | { |
3652 | unsigned long iflags; | ||
3653 | int k; | 3611 | int k; |
3654 | struct sdebug_queued_cmd *sqcp; | 3612 | struct sdebug_queued_cmd *sqcp; |
3655 | 3613 | ||
3656 | spin_lock_irqsave(&queued_arr_lock, iflags); | ||
3657 | for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) { | 3614 | for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) { |
3658 | sqcp = &queued_arr[k]; | 3615 | sqcp = &queued_arr[k]; |
3659 | kfree(sqcp->tletp); | 3616 | kfree(sqcp->sd_dp); |
3660 | sqcp->tletp = NULL; | 3617 | sqcp->sd_dp = NULL; |
3661 | kfree(sqcp->sd_hrtp); | ||
3662 | sqcp->sd_hrtp = NULL; | ||
3663 | } | 3618 | } |
3664 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | ||
3665 | } | 3619 | } |
3666 | 3620 | ||
3667 | static int scsi_debug_abort(struct scsi_cmnd *SCpnt) | 3621 | static int scsi_debug_abort(struct scsi_cmnd *SCpnt) |
3668 | { | 3622 | { |
3623 | bool ok; | ||
3624 | |||
3669 | ++num_aborts; | 3625 | ++num_aborts; |
3670 | if (SCpnt) { | 3626 | if (SCpnt) { |
3671 | if (SCpnt->device && | 3627 | ok = stop_queued_cmnd(SCpnt); |
3672 | (SDEBUG_OPT_ALL_NOISE & sdebug_opts)) | 3628 | if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts)) |
3673 | sdev_printk(KERN_INFO, SCpnt->device, "%s\n", | 3629 | sdev_printk(KERN_INFO, SCpnt->device, |
3674 | __func__); | 3630 | "%s: command%s found\n", __func__, |
3675 | stop_queued_cmnd(SCpnt); | 3631 | ok ? "" : " not"); |
3676 | } | 3632 | } |
3677 | return SUCCESS; | 3633 | return SUCCESS; |
3678 | } | 3634 | } |
@@ -3846,6 +3802,7 @@ schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, | |||
3846 | int k, num_in_q, qdepth, inject; | 3802 | int k, num_in_q, qdepth, inject; |
3847 | struct sdebug_queued_cmd *sqcp = NULL; | 3803 | struct sdebug_queued_cmd *sqcp = NULL; |
3848 | struct scsi_device *sdp; | 3804 | struct scsi_device *sdp; |
3805 | struct sdebug_defer *sd_dp; | ||
3849 | 3806 | ||
3850 | /* this should never happen */ | 3807 | /* this should never happen */ |
3851 | if (WARN_ON(!cmnd)) | 3808 | if (WARN_ON(!cmnd)) |
@@ -3912,8 +3869,8 @@ schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, | |||
3912 | sqcp->a_cmnd = cmnd; | 3869 | sqcp->a_cmnd = cmnd; |
3913 | cmnd->result = scsi_result; | 3870 | cmnd->result = scsi_result; |
3914 | spin_unlock_irqrestore(&queued_arr_lock, iflags); | 3871 | spin_unlock_irqrestore(&queued_arr_lock, iflags); |
3872 | sd_dp = sqcp->sd_dp; | ||
3915 | if ((delta_jiff > 0) || (sdebug_ndelay > 0)) { | 3873 | if ((delta_jiff > 0) || (sdebug_ndelay > 0)) { |
3916 | struct sdebug_hrtimer *sd_hp = sqcp->sd_hrtp; | ||
3917 | ktime_t kt; | 3874 | ktime_t kt; |
3918 | 3875 | ||
3919 | if (delta_jiff > 0) { | 3876 | if (delta_jiff > 0) { |
@@ -3923,30 +3880,27 @@ schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, | |||
3923 | kt = ktime_set(ts.tv_sec, ts.tv_nsec); | 3880 | kt = ktime_set(ts.tv_sec, ts.tv_nsec); |
3924 | } else | 3881 | } else |
3925 | kt = ktime_set(0, sdebug_ndelay); | 3882 | kt = ktime_set(0, sdebug_ndelay); |
3926 | if (NULL == sd_hp) { | 3883 | if (NULL == sd_dp) { |
3927 | sd_hp = kzalloc(sizeof(*sd_hp), GFP_ATOMIC); | 3884 | sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC); |
3928 | if (NULL == sd_hp) | 3885 | if (NULL == sd_dp) |
3929 | return SCSI_MLQUEUE_HOST_BUSY; | 3886 | return SCSI_MLQUEUE_HOST_BUSY; |
3930 | sqcp->sd_hrtp = sd_hp; | 3887 | sqcp->sd_dp = sd_dp; |
3931 | hrtimer_init(&sd_hp->hrt, CLOCK_MONOTONIC, | 3888 | hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC, |
3932 | HRTIMER_MODE_REL); | 3889 | HRTIMER_MODE_REL); |
3933 | sd_hp->hrt.function = sdebug_q_cmd_hrt_complete; | 3890 | sd_dp->hrt.function = sdebug_q_cmd_hrt_complete; |
3934 | sd_hp->qa_indx = k; | 3891 | sd_dp->qa_indx = k; |
3935 | } | 3892 | } |
3936 | hrtimer_start(&sd_hp->hrt, kt, HRTIMER_MODE_REL); | 3893 | hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL); |
3937 | } else { /* jdelay < 0 */ | 3894 | } else { /* jdelay < 0 */ |
3938 | if (NULL == sqcp->tletp) { | 3895 | if (NULL == sd_dp) { |
3939 | sqcp->tletp = kzalloc(sizeof(*sqcp->tletp), | 3896 | sd_dp = kzalloc(sizeof(*sqcp->sd_dp), GFP_ATOMIC); |
3940 | GFP_ATOMIC); | 3897 | if (NULL == sd_dp) |
3941 | if (NULL == sqcp->tletp) | ||
3942 | return SCSI_MLQUEUE_HOST_BUSY; | 3898 | return SCSI_MLQUEUE_HOST_BUSY; |
3943 | tasklet_init(sqcp->tletp, | 3899 | sqcp->sd_dp = sd_dp; |
3944 | sdebug_q_cmd_complete, k); | 3900 | sd_dp->qa_indx = k; |
3901 | INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete); | ||
3945 | } | 3902 | } |
3946 | if (-1 == delta_jiff) | 3903 | schedule_work(&sd_dp->ew.work); |
3947 | tasklet_hi_schedule(sqcp->tletp); | ||
3948 | else | ||
3949 | tasklet_schedule(sqcp->tletp); | ||
3950 | } | 3904 | } |
3951 | if ((SDEBUG_OPT_Q_NOISE & sdebug_opts) && | 3905 | if ((SDEBUG_OPT_Q_NOISE & sdebug_opts) && |
3952 | (scsi_result == device_qfull_result)) | 3906 | (scsi_result == device_qfull_result)) |
@@ -4149,6 +4103,9 @@ static ssize_t delay_store(struct device_driver *ddp, const char *buf, | |||
4149 | if (k != sdebug_max_queue) | 4103 | if (k != sdebug_max_queue) |
4150 | res = -EBUSY; /* have queued commands */ | 4104 | res = -EBUSY; /* have queued commands */ |
4151 | else { | 4105 | else { |
4106 | /* make sure sdebug_defer instances get | ||
4107 | * re-allocated for new delay variant */ | ||
4108 | free_all_queued(); | ||
4152 | sdebug_jdelay = jdelay; | 4109 | sdebug_jdelay = jdelay; |
4153 | sdebug_ndelay = 0; | 4110 | sdebug_ndelay = 0; |
4154 | } | 4111 | } |
@@ -4181,6 +4138,9 @@ static ssize_t ndelay_store(struct device_driver *ddp, const char *buf, | |||
4181 | if (k != sdebug_max_queue) | 4138 | if (k != sdebug_max_queue) |
4182 | res = -EBUSY; /* have queued commands */ | 4139 | res = -EBUSY; /* have queued commands */ |
4183 | else { | 4140 | else { |
4141 | /* make sure sdebug_defer instances get | ||
4142 | * re-allocated for new delay variant */ | ||
4143 | free_all_queued(); | ||
4184 | sdebug_ndelay = ndelay; | 4144 | sdebug_ndelay = ndelay; |
4185 | sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN | 4145 | sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN |
4186 | : DEF_JDELAY; | 4146 | : DEF_JDELAY; |