aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/scsi_debug.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/scsi_debug.c')
-rw-r--r--drivers/scsi/scsi_debug.c158
1 files changed, 74 insertions, 84 deletions
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 30268bb2ddb6..dfcc45bb03b1 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -25,6 +25,9 @@
25 * module options to "modprobe scsi_debug num_tgts=2" [20021221] 25 * module options to "modprobe scsi_debug num_tgts=2" [20021221]
26 */ 26 */
27 27
28
29#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
30
28#include <linux/module.h> 31#include <linux/module.h>
29 32
30#include <linux/kernel.h> 33#include <linux/kernel.h>
@@ -201,7 +204,6 @@ static const char *scsi_debug_version_date = "20141022";
201/* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1) 204/* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
202 * or "peripheral device" addressing (value 0) */ 205 * or "peripheral device" addressing (value 0) */
203#define SAM2_LUN_ADDRESS_METHOD 0 206#define SAM2_LUN_ADDRESS_METHOD 0
204#define SAM2_WLUN_REPORT_LUNS 0xc101
205 207
206/* SCSI_DEBUG_CANQUEUE is the maximum number of commands that can be queued 208/* SCSI_DEBUG_CANQUEUE is the maximum number of commands that can be queued
207 * (for response) at one time. Can be reduced by max_queue option. Command 209 * (for response) at one time. Can be reduced by max_queue option. Command
@@ -698,7 +700,7 @@ static void sdebug_max_tgts_luns(void)
698 else 700 else
699 hpnt->max_id = scsi_debug_num_tgts; 701 hpnt->max_id = scsi_debug_num_tgts;
700 /* scsi_debug_max_luns; */ 702 /* scsi_debug_max_luns; */
701 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS; 703 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
702 } 704 }
703 spin_unlock(&sdebug_host_list_lock); 705 spin_unlock(&sdebug_host_list_lock);
704} 706}
@@ -1288,7 +1290,7 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1288 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC); 1290 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1289 if (! arr) 1291 if (! arr)
1290 return DID_REQUEUE << 16; 1292 return DID_REQUEUE << 16;
1291 have_wlun = (scp->device->lun == SAM2_WLUN_REPORT_LUNS); 1293 have_wlun = (scp->device->lun == SCSI_W_LUN_REPORT_LUNS);
1292 if (have_wlun) 1294 if (have_wlun)
1293 pq_pdt = 0x1e; /* present, wlun */ 1295 pq_pdt = 0x1e; /* present, wlun */
1294 else if (scsi_debug_no_lun_0 && (0 == devip->lun)) 1296 else if (scsi_debug_no_lun_0 && (0 == devip->lun))
@@ -1427,12 +1429,11 @@ static int resp_requests(struct scsi_cmnd * scp,
1427 unsigned char * sbuff; 1429 unsigned char * sbuff;
1428 unsigned char *cmd = scp->cmnd; 1430 unsigned char *cmd = scp->cmnd;
1429 unsigned char arr[SCSI_SENSE_BUFFERSIZE]; 1431 unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1430 bool dsense, want_dsense; 1432 bool dsense;
1431 int len = 18; 1433 int len = 18;
1432 1434
1433 memset(arr, 0, sizeof(arr)); 1435 memset(arr, 0, sizeof(arr));
1434 dsense = !!(cmd[1] & 1); 1436 dsense = !!(cmd[1] & 1);
1435 want_dsense = dsense || scsi_debug_dsense;
1436 sbuff = scp->sense_buffer; 1437 sbuff = scp->sense_buffer;
1437 if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) { 1438 if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1438 if (dsense) { 1439 if (dsense) {
@@ -2446,8 +2447,7 @@ static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
2446 __be16 csum = dif_compute_csum(data, scsi_debug_sector_size); 2447 __be16 csum = dif_compute_csum(data, scsi_debug_sector_size);
2447 2448
2448 if (sdt->guard_tag != csum) { 2449 if (sdt->guard_tag != csum) {
2449 pr_err("%s: GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n", 2450 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2450 __func__,
2451 (unsigned long)sector, 2451 (unsigned long)sector,
2452 be16_to_cpu(sdt->guard_tag), 2452 be16_to_cpu(sdt->guard_tag),
2453 be16_to_cpu(csum)); 2453 be16_to_cpu(csum));
@@ -2455,14 +2455,14 @@ static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
2455 } 2455 }
2456 if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION && 2456 if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
2457 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { 2457 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
2458 pr_err("%s: REF check failed on sector %lu\n", 2458 pr_err("REF check failed on sector %lu\n",
2459 __func__, (unsigned long)sector); 2459 (unsigned long)sector);
2460 return 0x03; 2460 return 0x03;
2461 } 2461 }
2462 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION && 2462 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
2463 be32_to_cpu(sdt->ref_tag) != ei_lba) { 2463 be32_to_cpu(sdt->ref_tag) != ei_lba) {
2464 pr_err("%s: REF check failed on sector %lu\n", 2464 pr_err("REF check failed on sector %lu\n",
2465 __func__, (unsigned long)sector); 2465 (unsigned long)sector);
2466 return 0x03; 2466 return 0x03;
2467 } 2467 }
2468 return 0; 2468 return 0;
@@ -2680,7 +2680,7 @@ resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2680 return 0; 2680 return 0;
2681} 2681}
2682 2682
2683void dump_sector(unsigned char *buf, int len) 2683static void dump_sector(unsigned char *buf, int len)
2684{ 2684{
2685 int i, j, n; 2685 int i, j, n;
2686 2686
@@ -3365,8 +3365,8 @@ static int resp_report_luns(struct scsi_cmnd * scp,
3365 one_lun[i].scsi_lun[1] = lun & 0xff; 3365 one_lun[i].scsi_lun[1] = lun & 0xff;
3366 } 3366 }
3367 if (want_wlun) { 3367 if (want_wlun) {
3368 one_lun[i].scsi_lun[0] = (SAM2_WLUN_REPORT_LUNS >> 8) & 0xff; 3368 one_lun[i].scsi_lun[0] = (SCSI_W_LUN_REPORT_LUNS >> 8) & 0xff;
3369 one_lun[i].scsi_lun[1] = SAM2_WLUN_REPORT_LUNS & 0xff; 3369 one_lun[i].scsi_lun[1] = SCSI_W_LUN_REPORT_LUNS & 0xff;
3370 i++; 3370 i++;
3371 } 3371 }
3372 alloc_len = (unsigned char *)(one_lun + i) - arr; 3372 alloc_len = (unsigned char *)(one_lun + i) - arr;
@@ -3449,7 +3449,7 @@ static void sdebug_q_cmd_complete(unsigned long indx)
3449 atomic_inc(&sdebug_completions); 3449 atomic_inc(&sdebug_completions);
3450 qa_indx = indx; 3450 qa_indx = indx;
3451 if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) { 3451 if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
3452 pr_err("%s: wild qa_indx=%d\n", __func__, qa_indx); 3452 pr_err("wild qa_indx=%d\n", qa_indx);
3453 return; 3453 return;
3454 } 3454 }
3455 spin_lock_irqsave(&queued_arr_lock, iflags); 3455 spin_lock_irqsave(&queued_arr_lock, iflags);
@@ -3457,21 +3457,21 @@ static void sdebug_q_cmd_complete(unsigned long indx)
3457 scp = sqcp->a_cmnd; 3457 scp = sqcp->a_cmnd;
3458 if (NULL == scp) { 3458 if (NULL == scp) {
3459 spin_unlock_irqrestore(&queued_arr_lock, iflags); 3459 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3460 pr_err("%s: scp is NULL\n", __func__); 3460 pr_err("scp is NULL\n");
3461 return; 3461 return;
3462 } 3462 }
3463 devip = (struct sdebug_dev_info *)scp->device->hostdata; 3463 devip = (struct sdebug_dev_info *)scp->device->hostdata;
3464 if (devip) 3464 if (devip)
3465 atomic_dec(&devip->num_in_q); 3465 atomic_dec(&devip->num_in_q);
3466 else 3466 else
3467 pr_err("%s: devip=NULL\n", __func__); 3467 pr_err("devip=NULL\n");
3468 if (atomic_read(&retired_max_queue) > 0) 3468 if (atomic_read(&retired_max_queue) > 0)
3469 retiring = 1; 3469 retiring = 1;
3470 3470
3471 sqcp->a_cmnd = NULL; 3471 sqcp->a_cmnd = NULL;
3472 if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) { 3472 if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
3473 spin_unlock_irqrestore(&queued_arr_lock, iflags); 3473 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3474 pr_err("%s: Unexpected completion\n", __func__); 3474 pr_err("Unexpected completion\n");
3475 return; 3475 return;
3476 } 3476 }
3477 3477
@@ -3481,7 +3481,7 @@ static void sdebug_q_cmd_complete(unsigned long indx)
3481 retval = atomic_read(&retired_max_queue); 3481 retval = atomic_read(&retired_max_queue);
3482 if (qa_indx >= retval) { 3482 if (qa_indx >= retval) {
3483 spin_unlock_irqrestore(&queued_arr_lock, iflags); 3483 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3484 pr_err("%s: index %d too large\n", __func__, retval); 3484 pr_err("index %d too large\n", retval);
3485 return; 3485 return;
3486 } 3486 }
3487 k = find_last_bit(queued_in_use_bm, retval); 3487 k = find_last_bit(queued_in_use_bm, retval);
@@ -3509,7 +3509,7 @@ sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
3509 atomic_inc(&sdebug_completions); 3509 atomic_inc(&sdebug_completions);
3510 qa_indx = sd_hrtp->qa_indx; 3510 qa_indx = sd_hrtp->qa_indx;
3511 if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) { 3511 if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
3512 pr_err("%s: wild qa_indx=%d\n", __func__, qa_indx); 3512 pr_err("wild qa_indx=%d\n", qa_indx);
3513 goto the_end; 3513 goto the_end;
3514 } 3514 }
3515 spin_lock_irqsave(&queued_arr_lock, iflags); 3515 spin_lock_irqsave(&queued_arr_lock, iflags);
@@ -3517,21 +3517,21 @@ sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
3517 scp = sqcp->a_cmnd; 3517 scp = sqcp->a_cmnd;
3518 if (NULL == scp) { 3518 if (NULL == scp) {
3519 spin_unlock_irqrestore(&queued_arr_lock, iflags); 3519 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3520 pr_err("%s: scp is NULL\n", __func__); 3520 pr_err("scp is NULL\n");
3521 goto the_end; 3521 goto the_end;
3522 } 3522 }
3523 devip = (struct sdebug_dev_info *)scp->device->hostdata; 3523 devip = (struct sdebug_dev_info *)scp->device->hostdata;
3524 if (devip) 3524 if (devip)
3525 atomic_dec(&devip->num_in_q); 3525 atomic_dec(&devip->num_in_q);
3526 else 3526 else
3527 pr_err("%s: devip=NULL\n", __func__); 3527 pr_err("devip=NULL\n");
3528 if (atomic_read(&retired_max_queue) > 0) 3528 if (atomic_read(&retired_max_queue) > 0)
3529 retiring = 1; 3529 retiring = 1;
3530 3530
3531 sqcp->a_cmnd = NULL; 3531 sqcp->a_cmnd = NULL;
3532 if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) { 3532 if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
3533 spin_unlock_irqrestore(&queued_arr_lock, iflags); 3533 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3534 pr_err("%s: Unexpected completion\n", __func__); 3534 pr_err("Unexpected completion\n");
3535 goto the_end; 3535 goto the_end;
3536 } 3536 }
3537 3537
@@ -3541,7 +3541,7 @@ sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
3541 retval = atomic_read(&retired_max_queue); 3541 retval = atomic_read(&retired_max_queue);
3542 if (qa_indx >= retval) { 3542 if (qa_indx >= retval) {
3543 spin_unlock_irqrestore(&queued_arr_lock, iflags); 3543 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3544 pr_err("%s: index %d too large\n", __func__, retval); 3544 pr_err("index %d too large\n", retval);
3545 goto the_end; 3545 goto the_end;
3546 } 3546 }
3547 k = find_last_bit(queued_in_use_bm, retval); 3547 k = find_last_bit(queued_in_use_bm, retval);
@@ -3580,7 +3580,7 @@ static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
3580 return devip; 3580 return devip;
3581 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host); 3581 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
3582 if (!sdbg_host) { 3582 if (!sdbg_host) {
3583 pr_err("%s: Host info NULL\n", __func__); 3583 pr_err("Host info NULL\n");
3584 return NULL; 3584 return NULL;
3585 } 3585 }
3586 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) { 3586 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
@@ -3596,8 +3596,7 @@ static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
3596 if (!open_devip) { /* try and make a new one */ 3596 if (!open_devip) { /* try and make a new one */
3597 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC); 3597 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
3598 if (!open_devip) { 3598 if (!open_devip) {
3599 printk(KERN_ERR "%s: out of memory at line %d\n", 3599 pr_err("out of memory at line %d\n", __LINE__);
3600 __func__, __LINE__);
3601 return NULL; 3600 return NULL;
3602 } 3601 }
3603 } 3602 }
@@ -3615,7 +3614,7 @@ static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
3615static int scsi_debug_slave_alloc(struct scsi_device *sdp) 3614static int scsi_debug_slave_alloc(struct scsi_device *sdp)
3616{ 3615{
3617 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) 3616 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3618 printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %llu>\n", 3617 pr_info("slave_alloc <%u %u %u %llu>\n",
3619 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); 3618 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3620 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue); 3619 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
3621 return 0; 3620 return 0;
@@ -3626,7 +3625,7 @@ static int scsi_debug_slave_configure(struct scsi_device *sdp)
3626 struct sdebug_dev_info *devip; 3625 struct sdebug_dev_info *devip;
3627 3626
3628 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) 3627 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3629 printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %llu>\n", 3628 pr_info("slave_configure <%u %u %u %llu>\n",
3630 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); 3629 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3631 if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN) 3630 if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
3632 sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN; 3631 sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
@@ -3646,7 +3645,7 @@ static void scsi_debug_slave_destroy(struct scsi_device *sdp)
3646 (struct sdebug_dev_info *)sdp->hostdata; 3645 (struct sdebug_dev_info *)sdp->hostdata;
3647 3646
3648 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) 3647 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3649 printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %llu>\n", 3648 pr_info("slave_destroy <%u %u %u %llu>\n",
3650 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); 3649 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3651 if (devip) { 3650 if (devip) {
3652 /* make this slot available for re-use */ 3651 /* make this slot available for re-use */
@@ -3897,8 +3896,7 @@ static void __init sdebug_build_parts(unsigned char *ramp,
3897 return; 3896 return;
3898 if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) { 3897 if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) {
3899 scsi_debug_num_parts = SDEBUG_MAX_PARTS; 3898 scsi_debug_num_parts = SDEBUG_MAX_PARTS;
3900 pr_warn("%s: reducing partitions to %d\n", __func__, 3899 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
3901 SDEBUG_MAX_PARTS);
3902 } 3900 }
3903 num_sectors = (int)sdebug_store_sectors; 3901 num_sectors = (int)sdebug_store_sectors;
3904 sectors_per_part = (num_sectors - sdebug_sectors_per) 3902 sectors_per_part = (num_sectors - sdebug_sectors_per)
@@ -3942,14 +3940,20 @@ schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
3942 unsigned long iflags; 3940 unsigned long iflags;
3943 int k, num_in_q, qdepth, inject; 3941 int k, num_in_q, qdepth, inject;
3944 struct sdebug_queued_cmd *sqcp = NULL; 3942 struct sdebug_queued_cmd *sqcp = NULL;
3945 struct scsi_device *sdp = cmnd->device; 3943 struct scsi_device *sdp;
3944
3945 /* this should never happen */
3946 if (WARN_ON(!cmnd))
3947 return SCSI_MLQUEUE_HOST_BUSY;
3946 3948
3947 if (NULL == cmnd || NULL == devip) { 3949 if (NULL == devip) {
3948 pr_warn("%s: called with NULL cmnd or devip pointer\n", 3950 pr_warn("called devip == NULL\n");
3949 __func__);
3950 /* no particularly good error to report back */ 3951 /* no particularly good error to report back */
3951 return SCSI_MLQUEUE_HOST_BUSY; 3952 return SCSI_MLQUEUE_HOST_BUSY;
3952 } 3953 }
3954
3955 sdp = cmnd->device;
3956
3953 if ((scsi_result) && (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) 3957 if ((scsi_result) && (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
3954 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n", 3958 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
3955 __func__, scsi_result); 3959 __func__, scsi_result);
@@ -4383,8 +4387,7 @@ static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
4383 4387
4384 fake_storep = vmalloc(sz); 4388 fake_storep = vmalloc(sz);
4385 if (NULL == fake_storep) { 4389 if (NULL == fake_storep) {
4386 pr_err("%s: out of memory, 9\n", 4390 pr_err("out of memory, 9\n");
4387 __func__);
4388 return -ENOMEM; 4391 return -ENOMEM;
4389 } 4392 }
4390 memset(fake_storep, 0, sz); 4393 memset(fake_storep, 0, sz);
@@ -4784,8 +4787,7 @@ static int __init scsi_debug_init(void)
4784 atomic_set(&retired_max_queue, 0); 4787 atomic_set(&retired_max_queue, 0);
4785 4788
4786 if (scsi_debug_ndelay >= 1000000000) { 4789 if (scsi_debug_ndelay >= 1000000000) {
4787 pr_warn("%s: ndelay must be less than 1 second, ignored\n", 4790 pr_warn("ndelay must be less than 1 second, ignored\n");
4788 __func__);
4789 scsi_debug_ndelay = 0; 4791 scsi_debug_ndelay = 0;
4790 } else if (scsi_debug_ndelay > 0) 4792 } else if (scsi_debug_ndelay > 0)
4791 scsi_debug_delay = DELAY_OVERRIDDEN; 4793 scsi_debug_delay = DELAY_OVERRIDDEN;
@@ -4797,8 +4799,7 @@ static int __init scsi_debug_init(void)
4797 case 4096: 4799 case 4096:
4798 break; 4800 break;
4799 default: 4801 default:
4800 pr_err("%s: invalid sector_size %d\n", __func__, 4802 pr_err("invalid sector_size %d\n", scsi_debug_sector_size);
4801 scsi_debug_sector_size);
4802 return -EINVAL; 4803 return -EINVAL;
4803 } 4804 }
4804 4805
@@ -4811,29 +4812,28 @@ static int __init scsi_debug_init(void)
4811 break; 4812 break;
4812 4813
4813 default: 4814 default:
4814 pr_err("%s: dif must be 0, 1, 2 or 3\n", __func__); 4815 pr_err("dif must be 0, 1, 2 or 3\n");
4815 return -EINVAL; 4816 return -EINVAL;
4816 } 4817 }
4817 4818
4818 if (scsi_debug_guard > 1) { 4819 if (scsi_debug_guard > 1) {
4819 pr_err("%s: guard must be 0 or 1\n", __func__); 4820 pr_err("guard must be 0 or 1\n");
4820 return -EINVAL; 4821 return -EINVAL;
4821 } 4822 }
4822 4823
4823 if (scsi_debug_ato > 1) { 4824 if (scsi_debug_ato > 1) {
4824 pr_err("%s: ato must be 0 or 1\n", __func__); 4825 pr_err("ato must be 0 or 1\n");
4825 return -EINVAL; 4826 return -EINVAL;
4826 } 4827 }
4827 4828
4828 if (scsi_debug_physblk_exp > 15) { 4829 if (scsi_debug_physblk_exp > 15) {
4829 pr_err("%s: invalid physblk_exp %u\n", __func__, 4830 pr_err("invalid physblk_exp %u\n", scsi_debug_physblk_exp);
4830 scsi_debug_physblk_exp);
4831 return -EINVAL; 4831 return -EINVAL;
4832 } 4832 }
4833 4833
4834 if (scsi_debug_lowest_aligned > 0x3fff) { 4834 if (scsi_debug_lowest_aligned > 0x3fff) {
4835 pr_err("%s: lowest_aligned too big: %u\n", __func__, 4835 pr_err("lowest_aligned too big: %u\n",
4836 scsi_debug_lowest_aligned); 4836 scsi_debug_lowest_aligned);
4837 return -EINVAL; 4837 return -EINVAL;
4838 } 4838 }
4839 4839
@@ -4863,7 +4863,7 @@ static int __init scsi_debug_init(void)
4863 if (0 == scsi_debug_fake_rw) { 4863 if (0 == scsi_debug_fake_rw) {
4864 fake_storep = vmalloc(sz); 4864 fake_storep = vmalloc(sz);
4865 if (NULL == fake_storep) { 4865 if (NULL == fake_storep) {
4866 pr_err("%s: out of memory, 1\n", __func__); 4866 pr_err("out of memory, 1\n");
4867 return -ENOMEM; 4867 return -ENOMEM;
4868 } 4868 }
4869 memset(fake_storep, 0, sz); 4869 memset(fake_storep, 0, sz);
@@ -4877,11 +4877,10 @@ static int __init scsi_debug_init(void)
4877 dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple); 4877 dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
4878 dif_storep = vmalloc(dif_size); 4878 dif_storep = vmalloc(dif_size);
4879 4879
4880 pr_err("%s: dif_storep %u bytes @ %p\n", __func__, dif_size, 4880 pr_err("dif_storep %u bytes @ %p\n", dif_size, dif_storep);
4881 dif_storep);
4882 4881
4883 if (dif_storep == NULL) { 4882 if (dif_storep == NULL) {
4884 pr_err("%s: out of mem. (DIX)\n", __func__); 4883 pr_err("out of mem. (DIX)\n");
4885 ret = -ENOMEM; 4884 ret = -ENOMEM;
4886 goto free_vm; 4885 goto free_vm;
4887 } 4886 }
@@ -4903,18 +4902,17 @@ static int __init scsi_debug_init(void)
4903 if (scsi_debug_unmap_alignment && 4902 if (scsi_debug_unmap_alignment &&
4904 scsi_debug_unmap_granularity <= 4903 scsi_debug_unmap_granularity <=
4905 scsi_debug_unmap_alignment) { 4904 scsi_debug_unmap_alignment) {
4906 pr_err("%s: ERR: unmap_granularity <= unmap_alignment\n", 4905 pr_err("ERR: unmap_granularity <= unmap_alignment\n");
4907 __func__);
4908 return -EINVAL; 4906 return -EINVAL;
4909 } 4907 }
4910 4908
4911 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1; 4909 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
4912 map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long)); 4910 map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
4913 4911
4914 pr_info("%s: %lu provisioning blocks\n", __func__, map_size); 4912 pr_info("%lu provisioning blocks\n", map_size);
4915 4913
4916 if (map_storep == NULL) { 4914 if (map_storep == NULL) {
4917 pr_err("%s: out of mem. (MAP)\n", __func__); 4915 pr_err("out of mem. (MAP)\n");
4918 ret = -ENOMEM; 4916 ret = -ENOMEM;
4919 goto free_vm; 4917 goto free_vm;
4920 } 4918 }
@@ -4928,18 +4926,18 @@ static int __init scsi_debug_init(void)
4928 4926
4929 pseudo_primary = root_device_register("pseudo_0"); 4927 pseudo_primary = root_device_register("pseudo_0");
4930 if (IS_ERR(pseudo_primary)) { 4928 if (IS_ERR(pseudo_primary)) {
4931 pr_warn("%s: root_device_register() error\n", __func__); 4929 pr_warn("root_device_register() error\n");
4932 ret = PTR_ERR(pseudo_primary); 4930 ret = PTR_ERR(pseudo_primary);
4933 goto free_vm; 4931 goto free_vm;
4934 } 4932 }
4935 ret = bus_register(&pseudo_lld_bus); 4933 ret = bus_register(&pseudo_lld_bus);
4936 if (ret < 0) { 4934 if (ret < 0) {
4937 pr_warn("%s: bus_register error: %d\n", __func__, ret); 4935 pr_warn("bus_register error: %d\n", ret);
4938 goto dev_unreg; 4936 goto dev_unreg;
4939 } 4937 }
4940 ret = driver_register(&sdebug_driverfs_driver); 4938 ret = driver_register(&sdebug_driverfs_driver);
4941 if (ret < 0) { 4939 if (ret < 0) {
4942 pr_warn("%s: driver_register error: %d\n", __func__, ret); 4940 pr_warn("driver_register error: %d\n", ret);
4943 goto bus_unreg; 4941 goto bus_unreg;
4944 } 4942 }
4945 4943
@@ -4948,16 +4946,14 @@ static int __init scsi_debug_init(void)
4948 4946
4949 for (k = 0; k < host_to_add; k++) { 4947 for (k = 0; k < host_to_add; k++) {
4950 if (sdebug_add_adapter()) { 4948 if (sdebug_add_adapter()) {
4951 pr_err("%s: sdebug_add_adapter failed k=%d\n", 4949 pr_err("sdebug_add_adapter failed k=%d\n", k);
4952 __func__, k);
4953 break; 4950 break;
4954 } 4951 }
4955 } 4952 }
4956 4953
4957 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) { 4954 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
4958 pr_info("%s: built %d host(s)\n", __func__, 4955 pr_info("built %d host(s)\n", scsi_debug_add_host);
4959 scsi_debug_add_host); 4956
4960 }
4961 return 0; 4957 return 0;
4962 4958
4963bus_unreg: 4959bus_unreg:
@@ -4965,10 +4961,8 @@ bus_unreg:
4965dev_unreg: 4961dev_unreg:
4966 root_device_unregister(pseudo_primary); 4962 root_device_unregister(pseudo_primary);
4967free_vm: 4963free_vm:
4968 if (map_storep) 4964 vfree(map_storep);
4969 vfree(map_storep); 4965 vfree(dif_storep);
4970 if (dif_storep)
4971 vfree(dif_storep);
4972 vfree(fake_storep); 4966 vfree(fake_storep);
4973 4967
4974 return ret; 4968 return ret;
@@ -4986,9 +4980,7 @@ static void __exit scsi_debug_exit(void)
4986 bus_unregister(&pseudo_lld_bus); 4980 bus_unregister(&pseudo_lld_bus);
4987 root_device_unregister(pseudo_primary); 4981 root_device_unregister(pseudo_primary);
4988 4982
4989 if (dif_storep) 4983 vfree(dif_storep);
4990 vfree(dif_storep);
4991
4992 vfree(fake_storep); 4984 vfree(fake_storep);
4993} 4985}
4994 4986
@@ -5012,8 +5004,7 @@ static int sdebug_add_adapter(void)
5012 5004
5013 sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL); 5005 sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
5014 if (NULL == sdbg_host) { 5006 if (NULL == sdbg_host) {
5015 printk(KERN_ERR "%s: out of memory at line %d\n", 5007 pr_err("out of memory at line %d\n", __LINE__);
5016 __func__, __LINE__);
5017 return -ENOMEM; 5008 return -ENOMEM;
5018 } 5009 }
5019 5010
@@ -5023,8 +5014,7 @@ static int sdebug_add_adapter(void)
5023 for (k = 0; k < devs_per_host; k++) { 5014 for (k = 0; k < devs_per_host; k++) {
5024 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL); 5015 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
5025 if (!sdbg_devinfo) { 5016 if (!sdbg_devinfo) {
5026 printk(KERN_ERR "%s: out of memory at line %d\n", 5017 pr_err("out of memory at line %d\n", __LINE__);
5027 __func__, __LINE__);
5028 error = -ENOMEM; 5018 error = -ENOMEM;
5029 goto clean; 5019 goto clean;
5030 } 5020 }
@@ -5178,7 +5168,7 @@ scsi_debug_queuecommand(struct scsi_cmnd *scp)
5178 } 5168 }
5179 sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name, b); 5169 sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name, b);
5180 } 5170 }
5181 has_wlun_rl = (sdp->lun == SAM2_WLUN_REPORT_LUNS); 5171 has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
5182 if ((sdp->lun >= scsi_debug_max_luns) && !has_wlun_rl) 5172 if ((sdp->lun >= scsi_debug_max_luns) && !has_wlun_rl)
5183 return schedule_resp(scp, NULL, errsts_no_connect, 0); 5173 return schedule_resp(scp, NULL, errsts_no_connect, 0);
5184 5174
@@ -5338,7 +5328,7 @@ static int sdebug_driver_probe(struct device * dev)
5338 sdebug_driver_template.use_clustering = ENABLE_CLUSTERING; 5328 sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
5339 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host)); 5329 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
5340 if (NULL == hpnt) { 5330 if (NULL == hpnt) {
5341 pr_err("%s: scsi_host_alloc failed\n", __func__); 5331 pr_err("scsi_host_alloc failed\n");
5342 error = -ENODEV; 5332 error = -ENODEV;
5343 return error; 5333 return error;
5344 } 5334 }
@@ -5349,7 +5339,8 @@ static int sdebug_driver_probe(struct device * dev)
5349 hpnt->max_id = scsi_debug_num_tgts + 1; 5339 hpnt->max_id = scsi_debug_num_tgts + 1;
5350 else 5340 else
5351 hpnt->max_id = scsi_debug_num_tgts; 5341 hpnt->max_id = scsi_debug_num_tgts;
5352 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS; /* = scsi_debug_max_luns; */ 5342 /* = scsi_debug_max_luns; */
5343 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
5353 5344
5354 host_prot = 0; 5345 host_prot = 0;
5355 5346
@@ -5381,7 +5372,7 @@ static int sdebug_driver_probe(struct device * dev)
5381 5372
5382 scsi_host_set_prot(hpnt, host_prot); 5373 scsi_host_set_prot(hpnt, host_prot);
5383 5374
5384 printk(KERN_INFO "scsi_debug: host protection%s%s%s%s%s%s%s\n", 5375 pr_info("host protection%s%s%s%s%s%s%s\n",
5385 (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "", 5376 (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
5386 (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "", 5377 (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
5387 (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "", 5378 (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
@@ -5409,7 +5400,7 @@ static int sdebug_driver_probe(struct device * dev)
5409 5400
5410 error = scsi_add_host(hpnt, &sdbg_host->dev); 5401 error = scsi_add_host(hpnt, &sdbg_host->dev);
5411 if (error) { 5402 if (error) {
5412 printk(KERN_ERR "%s: scsi_add_host failed\n", __func__); 5403 pr_err("scsi_add_host failed\n");
5413 error = -ENODEV; 5404 error = -ENODEV;
5414 scsi_host_put(hpnt); 5405 scsi_host_put(hpnt);
5415 } else 5406 } else
@@ -5426,8 +5417,7 @@ static int sdebug_driver_remove(struct device * dev)
5426 sdbg_host = to_sdebug_host(dev); 5417 sdbg_host = to_sdebug_host(dev);
5427 5418
5428 if (!sdbg_host) { 5419 if (!sdbg_host) {
5429 printk(KERN_ERR "%s: Unable to locate host info\n", 5420 pr_err("Unable to locate host info\n");
5430 __func__);
5431 return -ENODEV; 5421 return -ENODEV;
5432 } 5422 }
5433 5423