aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMartin K. Petersen <martin.petersen@oracle.com>2009-10-15 14:45:27 -0400
committerJames Bottomley <James.Bottomley@suse.de>2009-12-04 13:00:16 -0500
commit44d9269481bb43df445adf464b06ff031e67d7ea (patch)
tree7630c57e9416e7939ce6a177aaa2ff60113adccf
parentf57e4502cea471c69782d4790c71d8414ab49a9d (diff)
[SCSI] scsi_debug: Thin provisioning support
This version fixes 64-bit modulo on 32-bit as well as inadvertent map updates when TP was disabled. Implement support for thin provisioning in scsi_debug. No actual memory de-allocation is taking place. The intent is to emulate a thinly provisioned storage device, not to be one. There are four new module options: - unmap_granularity specifies the granularity at which to track mapped blocks (specified in number of logical blocks). 2048 (1 MB) is a realistic value for disk arrays although some may have a finer granularity. - unmap_alignment specifies the first LBA which is naturally aligned on an unmap_granularity boundary. - unmap_max_desc specifies the maximum number of ranges that can be unmapped using one UNMAP command. If this is 0, only WRITE SAME is supported and UNMAP will cause a check condition. - unmap_max_blocks specifies the maximum number of blocks that can be unmapped using a single UNMAP command. Default is 0xffffffff. These parameters are reported in the new and extended block limits VPD. If unmap_granularity is specified the device is tagged as thin provisioning capable in READ CAPACITY(16). A bitmap is allocated to track whether blocks are mapped or not. A WRITE request will cause a block to be mapped. So will WRITE SAME unless the UNMAP bit is set. Blocks can be unmapped using either WRITE SAME or UNMAP. No accounting is done to track partial blocks. This means that only whole blocks will be marked free. This is how the array people tell me their firmwares work. GET LBA STATUS is also supported. This command reports whether a block is mapped or not, and how long the adjoining mapped/unmapped extent is. The block allocation bitmap can also be viewed from user space via: /sys/bus/pseudo/drivers/scsi_debug/map Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com> Acked-by: Douglas Gilbert <dgilbert@interlog.com> Signed-off-by: James Bottomley <James.Bottomley@suse.de>
-rw-r--r--drivers/scsi/scsi_debug.c338
1 files changed, 335 insertions, 3 deletions
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index c4103bef41b5..cb4bf16b4e66 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -44,6 +44,8 @@
44 44
45#include <net/checksum.h> 45#include <net/checksum.h>
46 46
47#include <asm/unaligned.h>
48
47#include <scsi/scsi.h> 49#include <scsi/scsi.h>
48#include <scsi/scsi_cmnd.h> 50#include <scsi/scsi_cmnd.h>
49#include <scsi/scsi_device.h> 51#include <scsi/scsi_device.h>
@@ -105,6 +107,10 @@ static const char * scsi_debug_version_date = "20070104";
105#define DEF_ATO 1 107#define DEF_ATO 1
106#define DEF_PHYSBLK_EXP 0 108#define DEF_PHYSBLK_EXP 0
107#define DEF_LOWEST_ALIGNED 0 109#define DEF_LOWEST_ALIGNED 0
110#define DEF_UNMAP_MAX_BLOCKS 0
111#define DEF_UNMAP_MAX_DESC 0
112#define DEF_UNMAP_GRANULARITY 0
113#define DEF_UNMAP_ALIGNMENT 0
108 114
109/* bit mask values for scsi_debug_opts */ 115/* bit mask values for scsi_debug_opts */
110#define SCSI_DEBUG_OPT_NOISE 1 116#define SCSI_DEBUG_OPT_NOISE 1
@@ -162,6 +168,10 @@ static int scsi_debug_guard = DEF_GUARD;
162static int scsi_debug_ato = DEF_ATO; 168static int scsi_debug_ato = DEF_ATO;
163static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP; 169static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
164static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED; 170static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
171static int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
172static int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
173static int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
174static int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
165 175
166static int scsi_debug_cmnd_count = 0; 176static int scsi_debug_cmnd_count = 0;
167 177
@@ -223,7 +233,9 @@ static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
223 233
224static unsigned char * fake_storep; /* ramdisk storage */ 234static unsigned char * fake_storep; /* ramdisk storage */
225static unsigned char *dif_storep; /* protection info */ 235static unsigned char *dif_storep; /* protection info */
236static void *map_storep; /* provisioning map */
226 237
238static unsigned long map_size;
227static int num_aborts = 0; 239static int num_aborts = 0;
228static int num_dev_resets = 0; 240static int num_dev_resets = 0;
229static int num_bus_resets = 0; 241static int num_bus_resets = 0;
@@ -317,6 +329,7 @@ static void get_data_transfer_info(unsigned char *cmd,
317 (u32)cmd[28] << 24; 329 (u32)cmd[28] << 24;
318 break; 330 break;
319 331
332 case WRITE_SAME_16:
320 case WRITE_16: 333 case WRITE_16:
321 case READ_16: 334 case READ_16:
322 *lba = (u64)cmd[9] | (u64)cmd[8] << 8 | 335 *lba = (u64)cmd[9] | (u64)cmd[8] << 8 |
@@ -335,6 +348,7 @@ static void get_data_transfer_info(unsigned char *cmd,
335 *num = (u32)cmd[9] | (u32)cmd[8] << 8 | (u32)cmd[7] << 16 | 348 *num = (u32)cmd[9] | (u32)cmd[8] << 8 | (u32)cmd[7] << 16 |
336 (u32)cmd[6] << 24; 349 (u32)cmd[6] << 24;
337 break; 350 break;
351 case WRITE_SAME:
338 case WRITE_10: 352 case WRITE_10:
339 case READ_10: 353 case READ_10:
340 case XDWRITEREAD_10: 354 case XDWRITEREAD_10:
@@ -691,6 +705,29 @@ static int inquiry_evpd_b0(unsigned char * arr)
691 arr[6] = (sdebug_store_sectors >> 8) & 0xff; 705 arr[6] = (sdebug_store_sectors >> 8) & 0xff;
692 arr[7] = sdebug_store_sectors & 0xff; 706 arr[7] = sdebug_store_sectors & 0xff;
693 } 707 }
708
709 if (scsi_debug_unmap_max_desc) {
710 unsigned int blocks;
711
712 if (scsi_debug_unmap_max_blocks)
713 blocks = scsi_debug_unmap_max_blocks;
714 else
715 blocks = 0xffffffff;
716
717 put_unaligned_be32(blocks, &arr[16]);
718 put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]);
719 }
720
721 if (scsi_debug_unmap_alignment) {
722 put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]);
723 arr[28] |= 0x80; /* UGAVALID */
724 }
725
726 if (scsi_debug_unmap_granularity) {
727 put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
728 return 0x3c; /* Mandatory page length for thin provisioning */
729 }
730
694 return sizeof(vpdb0_data); 731 return sizeof(vpdb0_data);
695} 732}
696 733
@@ -974,6 +1011,10 @@ static int resp_readcap16(struct scsi_cmnd * scp,
974 arr[11] = scsi_debug_sector_size & 0xff; 1011 arr[11] = scsi_debug_sector_size & 0xff;
975 arr[13] = scsi_debug_physblk_exp & 0xf; 1012 arr[13] = scsi_debug_physblk_exp & 0xf;
976 arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f; 1013 arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
1014
1015 if (scsi_debug_unmap_granularity)
1016 arr[14] |= 0x80; /* TPE */
1017
977 arr[15] = scsi_debug_lowest_aligned & 0xff; 1018 arr[15] = scsi_debug_lowest_aligned & 0xff;
978 1019
979 if (scsi_debug_dif) { 1020 if (scsi_debug_dif) {
@@ -1887,6 +1928,70 @@ out:
1887 return ret; 1928 return ret;
1888} 1929}
1889 1930
1931static unsigned int map_state(sector_t lba, unsigned int *num)
1932{
1933 unsigned int granularity, alignment, mapped;
1934 sector_t block, next, end;
1935
1936 granularity = scsi_debug_unmap_granularity;
1937 alignment = granularity - scsi_debug_unmap_alignment;
1938 block = lba + alignment;
1939 do_div(block, granularity);
1940
1941 mapped = test_bit(block, map_storep);
1942
1943 if (mapped)
1944 next = find_next_zero_bit(map_storep, map_size, block);
1945 else
1946 next = find_next_bit(map_storep, map_size, block);
1947
1948 end = next * granularity - scsi_debug_unmap_alignment;
1949 *num = end - lba;
1950
1951 return mapped;
1952}
1953
1954static void map_region(sector_t lba, unsigned int len)
1955{
1956 unsigned int granularity, alignment;
1957 sector_t end = lba + len;
1958
1959 granularity = scsi_debug_unmap_granularity;
1960 alignment = granularity - scsi_debug_unmap_alignment;
1961
1962 while (lba < end) {
1963 sector_t block, rem;
1964
1965 block = lba + alignment;
1966 rem = do_div(block, granularity);
1967
1968 set_bit(block, map_storep);
1969
1970 lba += granularity - rem;
1971 }
1972}
1973
1974static void unmap_region(sector_t lba, unsigned int len)
1975{
1976 unsigned int granularity, alignment;
1977 sector_t end = lba + len;
1978
1979 granularity = scsi_debug_unmap_granularity;
1980 alignment = granularity - scsi_debug_unmap_alignment;
1981
1982 while (lba < end) {
1983 sector_t block, rem;
1984
1985 block = lba + alignment;
1986 rem = do_div(block, granularity);
1987
1988 if (rem == 0 && lba + granularity <= end)
1989 clear_bit(block, map_storep);
1990
1991 lba += granularity - rem;
1992 }
1993}
1994
1890static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba, 1995static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
1891 unsigned int num, struct sdebug_dev_info *devip, 1996 unsigned int num, struct sdebug_dev_info *devip,
1892 u32 ei_lba) 1997 u32 ei_lba)
@@ -1910,6 +2015,8 @@ static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
1910 2015
1911 write_lock_irqsave(&atomic_rw, iflags); 2016 write_lock_irqsave(&atomic_rw, iflags);
1912 ret = do_device_access(SCpnt, devip, lba, num, 1); 2017 ret = do_device_access(SCpnt, devip, lba, num, 1);
2018 if (scsi_debug_unmap_granularity)
2019 map_region(lba, num);
1913 write_unlock_irqrestore(&atomic_rw, iflags); 2020 write_unlock_irqrestore(&atomic_rw, iflags);
1914 if (-1 == ret) 2021 if (-1 == ret)
1915 return (DID_ERROR << 16); 2022 return (DID_ERROR << 16);
@@ -1917,9 +2024,143 @@ static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
1917 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) 2024 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
1918 printk(KERN_INFO "scsi_debug: write: cdb indicated=%u, " 2025 printk(KERN_INFO "scsi_debug: write: cdb indicated=%u, "
1919 " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret); 2026 " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
2027
2028 return 0;
2029}
2030
2031static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
2032 unsigned int num, struct sdebug_dev_info *devip,
2033 u32 ei_lba, unsigned int unmap)
2034{
2035 unsigned long iflags;
2036 unsigned long long i;
2037 int ret;
2038
2039 ret = check_device_access_params(devip, lba, num);
2040 if (ret)
2041 return ret;
2042
2043 write_lock_irqsave(&atomic_rw, iflags);
2044
2045 if (unmap && scsi_debug_unmap_granularity) {
2046 unmap_region(lba, num);
2047 goto out;
2048 }
2049
2050 /* Else fetch one logical block */
2051 ret = fetch_to_dev_buffer(scmd,
2052 fake_storep + (lba * scsi_debug_sector_size),
2053 scsi_debug_sector_size);
2054
2055 if (-1 == ret) {
2056 write_unlock_irqrestore(&atomic_rw, iflags);
2057 return (DID_ERROR << 16);
2058 } else if ((ret < (num * scsi_debug_sector_size)) &&
2059 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2060 printk(KERN_INFO "scsi_debug: write same: cdb indicated=%u, "
2061 " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
2062
2063 /* Copy first sector to remaining blocks */
2064 for (i = 1 ; i < num ; i++)
2065 memcpy(fake_storep + ((lba + i) * scsi_debug_sector_size),
2066 fake_storep + (lba * scsi_debug_sector_size),
2067 scsi_debug_sector_size);
2068
2069 if (scsi_debug_unmap_granularity)
2070 map_region(lba, num);
2071out:
2072 write_unlock_irqrestore(&atomic_rw, iflags);
2073
1920 return 0; 2074 return 0;
1921} 2075}
1922 2076
2077struct unmap_block_desc {
2078 __be64 lba;
2079 __be32 blocks;
2080 __be32 __reserved;
2081};
2082
2083static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip)
2084{
2085 unsigned char *buf;
2086 struct unmap_block_desc *desc;
2087 unsigned int i, payload_len, descriptors;
2088 int ret;
2089
2090 ret = check_readiness(scmd, 1, devip);
2091 if (ret)
2092 return ret;
2093
2094 payload_len = get_unaligned_be16(&scmd->cmnd[7]);
2095 BUG_ON(scsi_bufflen(scmd) != payload_len);
2096
2097 descriptors = (payload_len - 8) / 16;
2098
2099 buf = kmalloc(scsi_bufflen(scmd), GFP_ATOMIC);
2100 if (!buf)
2101 return check_condition_result;
2102
2103 scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd));
2104
2105 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
2106 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
2107
2108 desc = (void *)&buf[8];
2109
2110 for (i = 0 ; i < descriptors ; i++) {
2111 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
2112 unsigned int num = get_unaligned_be32(&desc[i].blocks);
2113
2114 ret = check_device_access_params(devip, lba, num);
2115 if (ret)
2116 goto out;
2117
2118 unmap_region(lba, num);
2119 }
2120
2121 ret = 0;
2122
2123out:
2124 kfree(buf);
2125
2126 return ret;
2127}
2128
2129#define SDEBUG_GET_LBA_STATUS_LEN 32
2130
2131static int resp_get_lba_status(struct scsi_cmnd * scmd,
2132 struct sdebug_dev_info * devip)
2133{
2134 unsigned long long lba;
2135 unsigned int alloc_len, mapped, num;
2136 unsigned char arr[SDEBUG_GET_LBA_STATUS_LEN];
2137 int ret;
2138
2139 ret = check_readiness(scmd, 1, devip);
2140 if (ret)
2141 return ret;
2142
2143 lba = get_unaligned_be64(&scmd->cmnd[2]);
2144 alloc_len = get_unaligned_be32(&scmd->cmnd[10]);
2145
2146 if (alloc_len < 24)
2147 return 0;
2148
2149 ret = check_device_access_params(devip, lba, 1);
2150 if (ret)
2151 return ret;
2152
2153 mapped = map_state(lba, &num);
2154
2155 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
2156 put_unaligned_be32(16, &arr[0]); /* Parameter Data Length */
2157 put_unaligned_be64(lba, &arr[8]); /* LBA */
2158 put_unaligned_be32(num, &arr[16]); /* Number of blocks */
2159 arr[20] = !mapped; /* mapped = 0, unmapped = 1 */
2160
2161 return fill_from_dev_buffer(scmd, arr, SDEBUG_GET_LBA_STATUS_LEN);
2162}
2163
1923#define SDEBUG_RLUN_ARR_SZ 256 2164#define SDEBUG_RLUN_ARR_SZ 256
1924 2165
1925static int resp_report_luns(struct scsi_cmnd * scp, 2166static int resp_report_luns(struct scsi_cmnd * scp,
@@ -2430,6 +2671,10 @@ module_param_named(guard, scsi_debug_guard, int, S_IRUGO);
2430module_param_named(ato, scsi_debug_ato, int, S_IRUGO); 2671module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
2431module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO); 2672module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
2432module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO); 2673module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
2674module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
2675module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
2676module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
2677module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
2433 2678
2434MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert"); 2679MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
2435MODULE_DESCRIPTION("SCSI debug adapter driver"); 2680MODULE_DESCRIPTION("SCSI debug adapter driver");
@@ -2458,6 +2703,10 @@ MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
2458MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)"); 2703MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
2459MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)"); 2704MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
2460MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)"); 2705MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
2706MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0)");
2707MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=0)");
2708MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=0)");
2709MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
2461 2710
2462static char sdebug_info[256]; 2711static char sdebug_info[256];
2463 2712
@@ -2816,6 +3065,23 @@ static ssize_t sdebug_ato_show(struct device_driver *ddp, char *buf)
2816} 3065}
2817DRIVER_ATTR(ato, S_IRUGO, sdebug_ato_show, NULL); 3066DRIVER_ATTR(ato, S_IRUGO, sdebug_ato_show, NULL);
2818 3067
3068static ssize_t sdebug_map_show(struct device_driver *ddp, char *buf)
3069{
3070 ssize_t count;
3071
3072 if (scsi_debug_unmap_granularity == 0)
3073 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
3074 sdebug_store_sectors);
3075
3076 count = bitmap_scnlistprintf(buf, PAGE_SIZE, map_storep, map_size);
3077
3078 buf[count++] = '\n';
3079 buf[count++] = 0;
3080
3081 return count;
3082}
3083DRIVER_ATTR(map, S_IRUGO, sdebug_map_show, NULL);
3084
2819 3085
2820/* Note: The following function creates attribute files in the 3086/* Note: The following function creates attribute files in the
2821 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these 3087 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
@@ -2847,11 +3113,13 @@ static int do_create_driverfs_files(void)
2847 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dif); 3113 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dif);
2848 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_guard); 3114 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_guard);
2849 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ato); 3115 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ato);
3116 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_map);
2850 return ret; 3117 return ret;
2851} 3118}
2852 3119
2853static void do_remove_driverfs_files(void) 3120static void do_remove_driverfs_files(void)
2854{ 3121{
3122 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_map);
2855 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ato); 3123 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ato);
2856 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_guard); 3124 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_guard);
2857 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dif); 3125 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dif);
@@ -2989,6 +3257,36 @@ static int __init scsi_debug_init(void)
2989 memset(dif_storep, 0xff, dif_size); 3257 memset(dif_storep, 0xff, dif_size);
2990 } 3258 }
2991 3259
3260 if (scsi_debug_unmap_granularity) {
3261 unsigned int map_bytes;
3262
3263 if (scsi_debug_unmap_granularity < scsi_debug_unmap_alignment) {
3264 printk(KERN_ERR
3265 "%s: ERR: unmap_granularity < unmap_alignment\n",
3266 __func__);
3267 return -EINVAL;
3268 }
3269
3270 map_size = (sdebug_store_sectors / scsi_debug_unmap_granularity);
3271 map_bytes = map_size >> 3;
3272 map_storep = vmalloc(map_bytes);
3273
3274 printk(KERN_INFO "scsi_debug_init: %lu provisioning blocks\n",
3275 map_size);
3276
3277 if (map_storep == NULL) {
3278 printk(KERN_ERR "scsi_debug_init: out of mem. (MAP)\n");
3279 ret = -ENOMEM;
3280 goto free_vm;
3281 }
3282
3283 memset(map_storep, 0x0, map_bytes);
3284
3285 /* Map first 1KB for partition table */
3286 if (scsi_debug_num_parts)
3287 map_region(0, 2);
3288 }
3289
2992 ret = device_register(&pseudo_primary); 3290 ret = device_register(&pseudo_primary);
2993 if (ret < 0) { 3291 if (ret < 0) {
2994 printk(KERN_WARNING "scsi_debug: device_register error: %d\n", 3292 printk(KERN_WARNING "scsi_debug: device_register error: %d\n",
@@ -3041,6 +3339,8 @@ bus_unreg:
3041dev_unreg: 3339dev_unreg:
3042 device_unregister(&pseudo_primary); 3340 device_unregister(&pseudo_primary);
3043free_vm: 3341free_vm:
3342 if (map_storep)
3343 vfree(map_storep);
3044 if (dif_storep) 3344 if (dif_storep)
3045 vfree(dif_storep); 3345 vfree(dif_storep);
3046 vfree(fake_storep); 3346 vfree(fake_storep);
@@ -3167,6 +3467,7 @@ int scsi_debug_queuecommand(struct scsi_cmnd *SCpnt, done_funct_t done)
3167 int inj_dif = 0; 3467 int inj_dif = 0;
3168 int inj_dix = 0; 3468 int inj_dix = 0;
3169 int delay_override = 0; 3469 int delay_override = 0;
3470 int unmap = 0;
3170 3471
3171 scsi_set_resid(SCpnt, 0); 3472 scsi_set_resid(SCpnt, 0);
3172 if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmd) { 3473 if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmd) {
@@ -3272,13 +3573,21 @@ int scsi_debug_queuecommand(struct scsi_cmnd *SCpnt, done_funct_t done)
3272 errsts = resp_readcap(SCpnt, devip); 3573 errsts = resp_readcap(SCpnt, devip);
3273 break; 3574 break;
3274 case SERVICE_ACTION_IN: 3575 case SERVICE_ACTION_IN:
3275 if (SAI_READ_CAPACITY_16 != cmd[1]) { 3576 if (cmd[1] == SAI_READ_CAPACITY_16)
3577 errsts = resp_readcap16(SCpnt, devip);
3578 else if (cmd[1] == SAI_GET_LBA_STATUS) {
3579
3580 if (scsi_debug_unmap_max_desc == 0) {
3581 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3582 INVALID_COMMAND_OPCODE, 0);
3583 errsts = check_condition_result;
3584 } else
3585 errsts = resp_get_lba_status(SCpnt, devip);
3586 } else {
3276 mk_sense_buffer(devip, ILLEGAL_REQUEST, 3587 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3277 INVALID_OPCODE, 0); 3588 INVALID_OPCODE, 0);
3278 errsts = check_condition_result; 3589 errsts = check_condition_result;
3279 break;
3280 } 3590 }
3281 errsts = resp_readcap16(SCpnt, devip);
3282 break; 3591 break;
3283 case MAINTENANCE_IN: 3592 case MAINTENANCE_IN:
3284 if (MI_REPORT_TARGET_PGS != cmd[1]) { 3593 if (MI_REPORT_TARGET_PGS != cmd[1]) {
@@ -3378,6 +3687,29 @@ write:
3378 errsts = illegal_condition_result; 3687 errsts = illegal_condition_result;
3379 } 3688 }
3380 break; 3689 break;
3690 case WRITE_SAME_16:
3691 if (cmd[1] & 0x8)
3692 unmap = 1;
3693 /* fall through */
3694 case WRITE_SAME:
3695 errsts = check_readiness(SCpnt, 0, devip);
3696 if (errsts)
3697 break;
3698 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3699 errsts = resp_write_same(SCpnt, lba, num, devip, ei_lba, unmap);
3700 break;
3701 case UNMAP:
3702 errsts = check_readiness(SCpnt, 0, devip);
3703 if (errsts)
3704 break;
3705
3706 if (scsi_debug_unmap_max_desc == 0) {
3707 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3708 INVALID_COMMAND_OPCODE, 0);
3709 errsts = check_condition_result;
3710 } else
3711 errsts = resp_unmap(SCpnt, devip);
3712 break;
3381 case MODE_SENSE: 3713 case MODE_SENSE:
3382 case MODE_SENSE_10: 3714 case MODE_SENSE_10:
3383 errsts = resp_mode_sense(SCpnt, target, devip); 3715 errsts = resp_mode_sense(SCpnt, target, devip);