diff options
Diffstat (limited to 'drivers/target/target_core_iblock.c')
-rw-r--r-- | drivers/target/target_core_iblock.c | 501 |
1 files changed, 258 insertions, 243 deletions
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 57d7674c5013..b526d23dcd4f 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c | |||
@@ -4,10 +4,7 @@ | |||
4 | * This file contains the Storage Engine <-> Linux BlockIO transport | 4 | * This file contains the Storage Engine <-> Linux BlockIO transport |
5 | * specific functions. | 5 | * specific functions. |
6 | * | 6 | * |
7 | * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. | 7 | * (c) Copyright 2003-2012 RisingTide Systems LLC. |
8 | * Copyright (c) 2005, 2006, 2007 SBE, Inc. | ||
9 | * Copyright (c) 2007-2010 Rising Tide Systems | ||
10 | * Copyright (c) 2008-2010 Linux-iSCSI.org | ||
11 | * | 8 | * |
12 | * Nicholas A. Bellinger <nab@kernel.org> | 9 | * Nicholas A. Bellinger <nab@kernel.org> |
13 | * | 10 | * |
@@ -50,9 +47,13 @@ | |||
50 | #define IBLOCK_MAX_BIO_PER_TASK 32 /* max # of bios to submit at a time */ | 47 | #define IBLOCK_MAX_BIO_PER_TASK 32 /* max # of bios to submit at a time */ |
51 | #define IBLOCK_BIO_POOL_SIZE 128 | 48 | #define IBLOCK_BIO_POOL_SIZE 128 |
52 | 49 | ||
53 | static struct se_subsystem_api iblock_template; | 50 | static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev) |
51 | { | ||
52 | return container_of(dev, struct iblock_dev, dev); | ||
53 | } | ||
54 | |||
54 | 55 | ||
55 | static void iblock_bio_done(struct bio *, int); | 56 | static struct se_subsystem_api iblock_template; |
56 | 57 | ||
57 | /* iblock_attach_hba(): (Part of se_subsystem_api_t template) | 58 | /* iblock_attach_hba(): (Part of se_subsystem_api_t template) |
58 | * | 59 | * |
@@ -70,7 +71,7 @@ static void iblock_detach_hba(struct se_hba *hba) | |||
70 | { | 71 | { |
71 | } | 72 | } |
72 | 73 | ||
73 | static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name) | 74 | static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name) |
74 | { | 75 | { |
75 | struct iblock_dev *ib_dev = NULL; | 76 | struct iblock_dev *ib_dev = NULL; |
76 | 77 | ||
@@ -82,40 +83,28 @@ static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name) | |||
82 | 83 | ||
83 | pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name); | 84 | pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name); |
84 | 85 | ||
85 | return ib_dev; | 86 | return &ib_dev->dev; |
86 | } | 87 | } |
87 | 88 | ||
88 | static struct se_device *iblock_create_virtdevice( | 89 | static int iblock_configure_device(struct se_device *dev) |
89 | struct se_hba *hba, | ||
90 | struct se_subsystem_dev *se_dev, | ||
91 | void *p) | ||
92 | { | 90 | { |
93 | struct iblock_dev *ib_dev = p; | 91 | struct iblock_dev *ib_dev = IBLOCK_DEV(dev); |
94 | struct se_device *dev; | ||
95 | struct se_dev_limits dev_limits; | ||
96 | struct block_device *bd = NULL; | ||
97 | struct request_queue *q; | 92 | struct request_queue *q; |
98 | struct queue_limits *limits; | 93 | struct block_device *bd = NULL; |
99 | u32 dev_flags = 0; | ||
100 | fmode_t mode; | 94 | fmode_t mode; |
101 | int ret = -EINVAL; | 95 | int ret = -ENOMEM; |
102 | 96 | ||
103 | if (!ib_dev) { | 97 | if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) { |
104 | pr_err("Unable to locate struct iblock_dev parameter\n"); | 98 | pr_err("Missing udev_path= parameters for IBLOCK\n"); |
105 | return ERR_PTR(ret); | 99 | return -EINVAL; |
106 | } | 100 | } |
107 | memset(&dev_limits, 0, sizeof(struct se_dev_limits)); | ||
108 | 101 | ||
109 | ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0); | 102 | ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0); |
110 | if (!ib_dev->ibd_bio_set) { | 103 | if (!ib_dev->ibd_bio_set) { |
111 | pr_err("IBLOCK: Unable to create bioset()\n"); | 104 | pr_err("IBLOCK: Unable to create bioset\n"); |
112 | return ERR_PTR(-ENOMEM); | 105 | goto out; |
113 | } | 106 | } |
114 | pr_debug("IBLOCK: Created bio_set()\n"); | 107 | |
115 | /* | ||
116 | * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path | ||
117 | * must already have been set in order for echo 1 > $HBA/$DEV/enable to run. | ||
118 | */ | ||
119 | pr_debug( "IBLOCK: Claiming struct block_device: %s\n", | 108 | pr_debug( "IBLOCK: Claiming struct block_device: %s\n", |
120 | ib_dev->ibd_udev_path); | 109 | ib_dev->ibd_udev_path); |
121 | 110 | ||
@@ -126,27 +115,15 @@ static struct se_device *iblock_create_virtdevice( | |||
126 | bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev); | 115 | bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev); |
127 | if (IS_ERR(bd)) { | 116 | if (IS_ERR(bd)) { |
128 | ret = PTR_ERR(bd); | 117 | ret = PTR_ERR(bd); |
129 | goto failed; | 118 | goto out_free_bioset; |
130 | } | 119 | } |
131 | /* | ||
132 | * Setup the local scope queue_limits from struct request_queue->limits | ||
133 | * to pass into transport_add_device_to_core_hba() as struct se_dev_limits. | ||
134 | */ | ||
135 | q = bdev_get_queue(bd); | ||
136 | limits = &dev_limits.limits; | ||
137 | limits->logical_block_size = bdev_logical_block_size(bd); | ||
138 | limits->max_hw_sectors = UINT_MAX; | ||
139 | limits->max_sectors = UINT_MAX; | ||
140 | dev_limits.hw_queue_depth = q->nr_requests; | ||
141 | dev_limits.queue_depth = q->nr_requests; | ||
142 | |||
143 | ib_dev->ibd_bd = bd; | 120 | ib_dev->ibd_bd = bd; |
144 | 121 | ||
145 | dev = transport_add_device_to_core_hba(hba, | 122 | q = bdev_get_queue(bd); |
146 | &iblock_template, se_dev, dev_flags, ib_dev, | 123 | |
147 | &dev_limits, "IBLOCK", IBLOCK_VERSION); | 124 | dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd); |
148 | if (!dev) | 125 | dev->dev_attrib.hw_max_sectors = UINT_MAX; |
149 | goto failed; | 126 | dev->dev_attrib.hw_queue_depth = q->nr_requests; |
150 | 127 | ||
151 | /* | 128 | /* |
152 | * Check if the underlying struct block_device request_queue supports | 129 | * Check if the underlying struct block_device request_queue supports |
@@ -154,38 +131,41 @@ static struct se_device *iblock_create_virtdevice( | |||
154 | * in ATA and we need to set TPE=1 | 131 | * in ATA and we need to set TPE=1 |
155 | */ | 132 | */ |
156 | if (blk_queue_discard(q)) { | 133 | if (blk_queue_discard(q)) { |
157 | dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = | 134 | dev->dev_attrib.max_unmap_lba_count = |
158 | q->limits.max_discard_sectors; | 135 | q->limits.max_discard_sectors; |
136 | |||
159 | /* | 137 | /* |
160 | * Currently hardcoded to 1 in Linux/SCSI code.. | 138 | * Currently hardcoded to 1 in Linux/SCSI code.. |
161 | */ | 139 | */ |
162 | dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 1; | 140 | dev->dev_attrib.max_unmap_block_desc_count = 1; |
163 | dev->se_sub_dev->se_dev_attrib.unmap_granularity = | 141 | dev->dev_attrib.unmap_granularity = |
164 | q->limits.discard_granularity >> 9; | 142 | q->limits.discard_granularity >> 9; |
165 | dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = | 143 | dev->dev_attrib.unmap_granularity_alignment = |
166 | q->limits.discard_alignment; | 144 | q->limits.discard_alignment; |
167 | 145 | ||
168 | pr_debug("IBLOCK: BLOCK Discard support available," | 146 | pr_debug("IBLOCK: BLOCK Discard support available," |
169 | " disabled by default\n"); | 147 | " disabled by default\n"); |
170 | } | 148 | } |
149 | /* | ||
150 | * Enable write same emulation for IBLOCK and use 0xFFFF as | ||
151 | * the smaller WRITE_SAME(10) only has a two-byte block count. | ||
152 | */ | ||
153 | dev->dev_attrib.max_write_same_len = 0xFFFF; | ||
171 | 154 | ||
172 | if (blk_queue_nonrot(q)) | 155 | if (blk_queue_nonrot(q)) |
173 | dev->se_sub_dev->se_dev_attrib.is_nonrot = 1; | 156 | dev->dev_attrib.is_nonrot = 1; |
174 | 157 | return 0; | |
175 | return dev; | ||
176 | 158 | ||
177 | failed: | 159 | out_free_bioset: |
178 | if (ib_dev->ibd_bio_set) { | 160 | bioset_free(ib_dev->ibd_bio_set); |
179 | bioset_free(ib_dev->ibd_bio_set); | 161 | ib_dev->ibd_bio_set = NULL; |
180 | ib_dev->ibd_bio_set = NULL; | 162 | out: |
181 | } | 163 | return ret; |
182 | ib_dev->ibd_bd = NULL; | ||
183 | return ERR_PTR(ret); | ||
184 | } | 164 | } |
185 | 165 | ||
186 | static void iblock_free_device(void *p) | 166 | static void iblock_free_device(struct se_device *dev) |
187 | { | 167 | { |
188 | struct iblock_dev *ib_dev = p; | 168 | struct iblock_dev *ib_dev = IBLOCK_DEV(dev); |
189 | 169 | ||
190 | if (ib_dev->ibd_bd != NULL) | 170 | if (ib_dev->ibd_bd != NULL) |
191 | blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); | 171 | blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); |
@@ -203,12 +183,12 @@ static unsigned long long iblock_emulate_read_cap_with_block_size( | |||
203 | bdev_logical_block_size(bd)) - 1); | 183 | bdev_logical_block_size(bd)) - 1); |
204 | u32 block_size = bdev_logical_block_size(bd); | 184 | u32 block_size = bdev_logical_block_size(bd); |
205 | 185 | ||
206 | if (block_size == dev->se_sub_dev->se_dev_attrib.block_size) | 186 | if (block_size == dev->dev_attrib.block_size) |
207 | return blocks_long; | 187 | return blocks_long; |
208 | 188 | ||
209 | switch (block_size) { | 189 | switch (block_size) { |
210 | case 4096: | 190 | case 4096: |
211 | switch (dev->se_sub_dev->se_dev_attrib.block_size) { | 191 | switch (dev->dev_attrib.block_size) { |
212 | case 2048: | 192 | case 2048: |
213 | blocks_long <<= 1; | 193 | blocks_long <<= 1; |
214 | break; | 194 | break; |
@@ -222,7 +202,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size( | |||
222 | } | 202 | } |
223 | break; | 203 | break; |
224 | case 2048: | 204 | case 2048: |
225 | switch (dev->se_sub_dev->se_dev_attrib.block_size) { | 205 | switch (dev->dev_attrib.block_size) { |
226 | case 4096: | 206 | case 4096: |
227 | blocks_long >>= 1; | 207 | blocks_long >>= 1; |
228 | break; | 208 | break; |
@@ -237,7 +217,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size( | |||
237 | } | 217 | } |
238 | break; | 218 | break; |
239 | case 1024: | 219 | case 1024: |
240 | switch (dev->se_sub_dev->se_dev_attrib.block_size) { | 220 | switch (dev->dev_attrib.block_size) { |
241 | case 4096: | 221 | case 4096: |
242 | blocks_long >>= 2; | 222 | blocks_long >>= 2; |
243 | break; | 223 | break; |
@@ -252,7 +232,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size( | |||
252 | } | 232 | } |
253 | break; | 233 | break; |
254 | case 512: | 234 | case 512: |
255 | switch (dev->se_sub_dev->se_dev_attrib.block_size) { | 235 | switch (dev->dev_attrib.block_size) { |
256 | case 4096: | 236 | case 4096: |
257 | blocks_long >>= 3; | 237 | blocks_long >>= 3; |
258 | break; | 238 | break; |
@@ -273,6 +253,87 @@ static unsigned long long iblock_emulate_read_cap_with_block_size( | |||
273 | return blocks_long; | 253 | return blocks_long; |
274 | } | 254 | } |
275 | 255 | ||
256 | static void iblock_complete_cmd(struct se_cmd *cmd) | ||
257 | { | ||
258 | struct iblock_req *ibr = cmd->priv; | ||
259 | u8 status; | ||
260 | |||
261 | if (!atomic_dec_and_test(&ibr->pending)) | ||
262 | return; | ||
263 | |||
264 | if (atomic_read(&ibr->ib_bio_err_cnt)) | ||
265 | status = SAM_STAT_CHECK_CONDITION; | ||
266 | else | ||
267 | status = SAM_STAT_GOOD; | ||
268 | |||
269 | target_complete_cmd(cmd, status); | ||
270 | kfree(ibr); | ||
271 | } | ||
272 | |||
273 | static void iblock_bio_done(struct bio *bio, int err) | ||
274 | { | ||
275 | struct se_cmd *cmd = bio->bi_private; | ||
276 | struct iblock_req *ibr = cmd->priv; | ||
277 | |||
278 | /* | ||
279 | * Set -EIO if !BIO_UPTODATE and the passed is still err=0 | ||
280 | */ | ||
281 | if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err) | ||
282 | err = -EIO; | ||
283 | |||
284 | if (err != 0) { | ||
285 | pr_err("test_bit(BIO_UPTODATE) failed for bio: %p," | ||
286 | " err: %d\n", bio, err); | ||
287 | /* | ||
288 | * Bump the ib_bio_err_cnt and release bio. | ||
289 | */ | ||
290 | atomic_inc(&ibr->ib_bio_err_cnt); | ||
291 | smp_mb__after_atomic_inc(); | ||
292 | } | ||
293 | |||
294 | bio_put(bio); | ||
295 | |||
296 | iblock_complete_cmd(cmd); | ||
297 | } | ||
298 | |||
299 | static struct bio * | ||
300 | iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num) | ||
301 | { | ||
302 | struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev); | ||
303 | struct bio *bio; | ||
304 | |||
305 | /* | ||
306 | * Only allocate as many vector entries as the bio code allows us to, | ||
307 | * we'll loop later on until we have handled the whole request. | ||
308 | */ | ||
309 | if (sg_num > BIO_MAX_PAGES) | ||
310 | sg_num = BIO_MAX_PAGES; | ||
311 | |||
312 | bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set); | ||
313 | if (!bio) { | ||
314 | pr_err("Unable to allocate memory for bio\n"); | ||
315 | return NULL; | ||
316 | } | ||
317 | |||
318 | bio->bi_bdev = ib_dev->ibd_bd; | ||
319 | bio->bi_private = cmd; | ||
320 | bio->bi_end_io = &iblock_bio_done; | ||
321 | bio->bi_sector = lba; | ||
322 | |||
323 | return bio; | ||
324 | } | ||
325 | |||
326 | static void iblock_submit_bios(struct bio_list *list, int rw) | ||
327 | { | ||
328 | struct blk_plug plug; | ||
329 | struct bio *bio; | ||
330 | |||
331 | blk_start_plug(&plug); | ||
332 | while ((bio = bio_list_pop(list))) | ||
333 | submit_bio(rw, bio); | ||
334 | blk_finish_plug(&plug); | ||
335 | } | ||
336 | |||
276 | static void iblock_end_io_flush(struct bio *bio, int err) | 337 | static void iblock_end_io_flush(struct bio *bio, int err) |
277 | { | 338 | { |
278 | struct se_cmd *cmd = bio->bi_private; | 339 | struct se_cmd *cmd = bio->bi_private; |
@@ -281,13 +342,10 @@ static void iblock_end_io_flush(struct bio *bio, int err) | |||
281 | pr_err("IBLOCK: cache flush failed: %d\n", err); | 342 | pr_err("IBLOCK: cache flush failed: %d\n", err); |
282 | 343 | ||
283 | if (cmd) { | 344 | if (cmd) { |
284 | if (err) { | 345 | if (err) |
285 | cmd->scsi_sense_reason = | ||
286 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | ||
287 | target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); | 346 | target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); |
288 | } else { | 347 | else |
289 | target_complete_cmd(cmd, SAM_STAT_GOOD); | 348 | target_complete_cmd(cmd, SAM_STAT_GOOD); |
290 | } | ||
291 | } | 349 | } |
292 | 350 | ||
293 | bio_put(bio); | 351 | bio_put(bio); |
@@ -297,9 +355,10 @@ static void iblock_end_io_flush(struct bio *bio, int err) | |||
297 | * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must | 355 | * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must |
298 | * always flush the whole cache. | 356 | * always flush the whole cache. |
299 | */ | 357 | */ |
300 | static int iblock_execute_sync_cache(struct se_cmd *cmd) | 358 | static sense_reason_t |
359 | iblock_execute_sync_cache(struct se_cmd *cmd) | ||
301 | { | 360 | { |
302 | struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; | 361 | struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev); |
303 | int immed = (cmd->t_task_cdb[1] & 0x2); | 362 | int immed = (cmd->t_task_cdb[1] & 0x2); |
304 | struct bio *bio; | 363 | struct bio *bio; |
305 | 364 | ||
@@ -319,25 +378,27 @@ static int iblock_execute_sync_cache(struct se_cmd *cmd) | |||
319 | return 0; | 378 | return 0; |
320 | } | 379 | } |
321 | 380 | ||
322 | static int iblock_execute_unmap(struct se_cmd *cmd) | 381 | static sense_reason_t |
382 | iblock_execute_unmap(struct se_cmd *cmd) | ||
323 | { | 383 | { |
324 | struct se_device *dev = cmd->se_dev; | 384 | struct se_device *dev = cmd->se_dev; |
325 | struct iblock_dev *ibd = dev->dev_ptr; | 385 | struct iblock_dev *ib_dev = IBLOCK_DEV(dev); |
326 | unsigned char *buf, *ptr = NULL; | 386 | unsigned char *buf, *ptr = NULL; |
327 | sector_t lba; | 387 | sector_t lba; |
328 | int size; | 388 | int size; |
329 | u32 range; | 389 | u32 range; |
330 | int ret = 0; | 390 | sense_reason_t ret = 0; |
331 | int dl, bd_dl; | 391 | int dl, bd_dl, err; |
332 | 392 | ||
333 | if (cmd->data_length < 8) { | 393 | if (cmd->data_length < 8) { |
334 | pr_warn("UNMAP parameter list length %u too small\n", | 394 | pr_warn("UNMAP parameter list length %u too small\n", |
335 | cmd->data_length); | 395 | cmd->data_length); |
336 | cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; | 396 | return TCM_INVALID_PARAMETER_LIST; |
337 | return -EINVAL; | ||
338 | } | 397 | } |
339 | 398 | ||
340 | buf = transport_kmap_data_sg(cmd); | 399 | buf = transport_kmap_data_sg(cmd); |
400 | if (!buf) | ||
401 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | ||
341 | 402 | ||
342 | dl = get_unaligned_be16(&buf[0]); | 403 | dl = get_unaligned_be16(&buf[0]); |
343 | bd_dl = get_unaligned_be16(&buf[2]); | 404 | bd_dl = get_unaligned_be16(&buf[2]); |
@@ -349,9 +410,8 @@ static int iblock_execute_unmap(struct se_cmd *cmd) | |||
349 | else | 410 | else |
350 | size = bd_dl; | 411 | size = bd_dl; |
351 | 412 | ||
352 | if (size / 16 > dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) { | 413 | if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) { |
353 | cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; | 414 | ret = TCM_INVALID_PARAMETER_LIST; |
354 | ret = -EINVAL; | ||
355 | goto err; | 415 | goto err; |
356 | } | 416 | } |
357 | 417 | ||
@@ -366,23 +426,22 @@ static int iblock_execute_unmap(struct se_cmd *cmd) | |||
366 | pr_debug("UNMAP: Using lba: %llu and range: %u\n", | 426 | pr_debug("UNMAP: Using lba: %llu and range: %u\n", |
367 | (unsigned long long)lba, range); | 427 | (unsigned long long)lba, range); |
368 | 428 | ||
369 | if (range > dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count) { | 429 | if (range > dev->dev_attrib.max_unmap_lba_count) { |
370 | cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; | 430 | ret = TCM_INVALID_PARAMETER_LIST; |
371 | ret = -EINVAL; | ||
372 | goto err; | 431 | goto err; |
373 | } | 432 | } |
374 | 433 | ||
375 | if (lba + range > dev->transport->get_blocks(dev) + 1) { | 434 | if (lba + range > dev->transport->get_blocks(dev) + 1) { |
376 | cmd->scsi_sense_reason = TCM_ADDRESS_OUT_OF_RANGE; | 435 | ret = TCM_ADDRESS_OUT_OF_RANGE; |
377 | ret = -EINVAL; | ||
378 | goto err; | 436 | goto err; |
379 | } | 437 | } |
380 | 438 | ||
381 | ret = blkdev_issue_discard(ibd->ibd_bd, lba, range, | 439 | err = blkdev_issue_discard(ib_dev->ibd_bd, lba, range, |
382 | GFP_KERNEL, 0); | 440 | GFP_KERNEL, 0); |
383 | if (ret < 0) { | 441 | if (err < 0) { |
384 | pr_err("blkdev_issue_discard() failed: %d\n", | 442 | pr_err("blkdev_issue_discard() failed: %d\n", |
385 | ret); | 443 | err); |
444 | ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | ||
386 | goto err; | 445 | goto err; |
387 | } | 446 | } |
388 | 447 | ||
@@ -397,23 +456,86 @@ err: | |||
397 | return ret; | 456 | return ret; |
398 | } | 457 | } |
399 | 458 | ||
400 | static int iblock_execute_write_same(struct se_cmd *cmd) | 459 | static sense_reason_t |
460 | iblock_execute_write_same_unmap(struct se_cmd *cmd) | ||
401 | { | 461 | { |
402 | struct iblock_dev *ibd = cmd->se_dev->dev_ptr; | 462 | struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev); |
403 | int ret; | 463 | int rc; |
404 | 464 | ||
405 | ret = blkdev_issue_discard(ibd->ibd_bd, cmd->t_task_lba, | 465 | rc = blkdev_issue_discard(ib_dev->ibd_bd, cmd->t_task_lba, |
406 | spc_get_write_same_sectors(cmd), GFP_KERNEL, | 466 | spc_get_write_same_sectors(cmd), GFP_KERNEL, 0); |
407 | 0); | 467 | if (rc < 0) { |
408 | if (ret < 0) { | 468 | pr_warn("blkdev_issue_discard() failed: %d\n", rc); |
409 | pr_debug("blkdev_issue_discard() failed for WRITE_SAME\n"); | 469 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
410 | return ret; | ||
411 | } | 470 | } |
412 | 471 | ||
413 | target_complete_cmd(cmd, GOOD); | 472 | target_complete_cmd(cmd, GOOD); |
414 | return 0; | 473 | return 0; |
415 | } | 474 | } |
416 | 475 | ||
476 | static sense_reason_t | ||
477 | iblock_execute_write_same(struct se_cmd *cmd) | ||
478 | { | ||
479 | struct iblock_req *ibr; | ||
480 | struct scatterlist *sg; | ||
481 | struct bio *bio; | ||
482 | struct bio_list list; | ||
483 | sector_t block_lba = cmd->t_task_lba; | ||
484 | sector_t sectors = spc_get_write_same_sectors(cmd); | ||
485 | |||
486 | sg = &cmd->t_data_sg[0]; | ||
487 | |||
488 | if (cmd->t_data_nents > 1 || | ||
489 | sg->length != cmd->se_dev->dev_attrib.block_size) { | ||
490 | pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u" | ||
491 | " block_size: %u\n", cmd->t_data_nents, sg->length, | ||
492 | cmd->se_dev->dev_attrib.block_size); | ||
493 | return TCM_INVALID_CDB_FIELD; | ||
494 | } | ||
495 | |||
496 | ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); | ||
497 | if (!ibr) | ||
498 | goto fail; | ||
499 | cmd->priv = ibr; | ||
500 | |||
501 | bio = iblock_get_bio(cmd, block_lba, 1); | ||
502 | if (!bio) | ||
503 | goto fail_free_ibr; | ||
504 | |||
505 | bio_list_init(&list); | ||
506 | bio_list_add(&list, bio); | ||
507 | |||
508 | atomic_set(&ibr->pending, 1); | ||
509 | |||
510 | while (sectors) { | ||
511 | while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) | ||
512 | != sg->length) { | ||
513 | |||
514 | bio = iblock_get_bio(cmd, block_lba, 1); | ||
515 | if (!bio) | ||
516 | goto fail_put_bios; | ||
517 | |||
518 | atomic_inc(&ibr->pending); | ||
519 | bio_list_add(&list, bio); | ||
520 | } | ||
521 | |||
522 | /* Always in 512 byte units for Linux/Block */ | ||
523 | block_lba += sg->length >> IBLOCK_LBA_SHIFT; | ||
524 | sectors -= 1; | ||
525 | } | ||
526 | |||
527 | iblock_submit_bios(&list, WRITE); | ||
528 | return 0; | ||
529 | |||
530 | fail_put_bios: | ||
531 | while ((bio = bio_list_pop(&list))) | ||
532 | bio_put(bio); | ||
533 | fail_free_ibr: | ||
534 | kfree(ibr); | ||
535 | fail: | ||
536 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | ||
537 | } | ||
538 | |||
417 | enum { | 539 | enum { |
418 | Opt_udev_path, Opt_readonly, Opt_force, Opt_err | 540 | Opt_udev_path, Opt_readonly, Opt_force, Opt_err |
419 | }; | 541 | }; |
@@ -425,11 +547,10 @@ static match_table_t tokens = { | |||
425 | {Opt_err, NULL} | 547 | {Opt_err, NULL} |
426 | }; | 548 | }; |
427 | 549 | ||
428 | static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba, | 550 | static ssize_t iblock_set_configfs_dev_params(struct se_device *dev, |
429 | struct se_subsystem_dev *se_dev, | 551 | const char *page, ssize_t count) |
430 | const char *page, ssize_t count) | ||
431 | { | 552 | { |
432 | struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr; | 553 | struct iblock_dev *ib_dev = IBLOCK_DEV(dev); |
433 | char *orig, *ptr, *arg_p, *opts; | 554 | char *orig, *ptr, *arg_p, *opts; |
434 | substring_t args[MAX_OPT_ARGS]; | 555 | substring_t args[MAX_OPT_ARGS]; |
435 | int ret = 0, token; | 556 | int ret = 0, token; |
@@ -491,43 +612,26 @@ out: | |||
491 | return (!ret) ? count : ret; | 612 | return (!ret) ? count : ret; |
492 | } | 613 | } |
493 | 614 | ||
494 | static ssize_t iblock_check_configfs_dev_params( | 615 | static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b) |
495 | struct se_hba *hba, | ||
496 | struct se_subsystem_dev *se_dev) | ||
497 | { | 616 | { |
498 | struct iblock_dev *ibd = se_dev->se_dev_su_ptr; | 617 | struct iblock_dev *ib_dev = IBLOCK_DEV(dev); |
499 | 618 | struct block_device *bd = ib_dev->ibd_bd; | |
500 | if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) { | ||
501 | pr_err("Missing udev_path= parameters for IBLOCK\n"); | ||
502 | return -EINVAL; | ||
503 | } | ||
504 | |||
505 | return 0; | ||
506 | } | ||
507 | |||
508 | static ssize_t iblock_show_configfs_dev_params( | ||
509 | struct se_hba *hba, | ||
510 | struct se_subsystem_dev *se_dev, | ||
511 | char *b) | ||
512 | { | ||
513 | struct iblock_dev *ibd = se_dev->se_dev_su_ptr; | ||
514 | struct block_device *bd = ibd->ibd_bd; | ||
515 | char buf[BDEVNAME_SIZE]; | 619 | char buf[BDEVNAME_SIZE]; |
516 | ssize_t bl = 0; | 620 | ssize_t bl = 0; |
517 | 621 | ||
518 | if (bd) | 622 | if (bd) |
519 | bl += sprintf(b + bl, "iBlock device: %s", | 623 | bl += sprintf(b + bl, "iBlock device: %s", |
520 | bdevname(bd, buf)); | 624 | bdevname(bd, buf)); |
521 | if (ibd->ibd_flags & IBDF_HAS_UDEV_PATH) | 625 | if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH) |
522 | bl += sprintf(b + bl, " UDEV PATH: %s", | 626 | bl += sprintf(b + bl, " UDEV PATH: %s", |
523 | ibd->ibd_udev_path); | 627 | ib_dev->ibd_udev_path); |
524 | bl += sprintf(b + bl, " readonly: %d\n", ibd->ibd_readonly); | 628 | bl += sprintf(b + bl, " readonly: %d\n", ib_dev->ibd_readonly); |
525 | 629 | ||
526 | bl += sprintf(b + bl, " "); | 630 | bl += sprintf(b + bl, " "); |
527 | if (bd) { | 631 | if (bd) { |
528 | bl += sprintf(b + bl, "Major: %d Minor: %d %s\n", | 632 | bl += sprintf(b + bl, "Major: %d Minor: %d %s\n", |
529 | MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ? | 633 | MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ? |
530 | "" : (bd->bd_holder == ibd) ? | 634 | "" : (bd->bd_holder == ib_dev) ? |
531 | "CLAIMED: IBLOCK" : "CLAIMED: OS"); | 635 | "CLAIMED: IBLOCK" : "CLAIMED: OS"); |
532 | } else { | 636 | } else { |
533 | bl += sprintf(b + bl, "Major: 0 Minor: 0\n"); | 637 | bl += sprintf(b + bl, "Major: 0 Minor: 0\n"); |
@@ -536,61 +640,8 @@ static ssize_t iblock_show_configfs_dev_params( | |||
536 | return bl; | 640 | return bl; |
537 | } | 641 | } |
538 | 642 | ||
539 | static void iblock_complete_cmd(struct se_cmd *cmd) | 643 | static sense_reason_t |
540 | { | 644 | iblock_execute_rw(struct se_cmd *cmd) |
541 | struct iblock_req *ibr = cmd->priv; | ||
542 | u8 status; | ||
543 | |||
544 | if (!atomic_dec_and_test(&ibr->pending)) | ||
545 | return; | ||
546 | |||
547 | if (atomic_read(&ibr->ib_bio_err_cnt)) | ||
548 | status = SAM_STAT_CHECK_CONDITION; | ||
549 | else | ||
550 | status = SAM_STAT_GOOD; | ||
551 | |||
552 | target_complete_cmd(cmd, status); | ||
553 | kfree(ibr); | ||
554 | } | ||
555 | |||
556 | static struct bio * | ||
557 | iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num) | ||
558 | { | ||
559 | struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; | ||
560 | struct bio *bio; | ||
561 | |||
562 | /* | ||
563 | * Only allocate as many vector entries as the bio code allows us to, | ||
564 | * we'll loop later on until we have handled the whole request. | ||
565 | */ | ||
566 | if (sg_num > BIO_MAX_PAGES) | ||
567 | sg_num = BIO_MAX_PAGES; | ||
568 | |||
569 | bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set); | ||
570 | if (!bio) { | ||
571 | pr_err("Unable to allocate memory for bio\n"); | ||
572 | return NULL; | ||
573 | } | ||
574 | |||
575 | bio->bi_bdev = ib_dev->ibd_bd; | ||
576 | bio->bi_private = cmd; | ||
577 | bio->bi_end_io = &iblock_bio_done; | ||
578 | bio->bi_sector = lba; | ||
579 | return bio; | ||
580 | } | ||
581 | |||
582 | static void iblock_submit_bios(struct bio_list *list, int rw) | ||
583 | { | ||
584 | struct blk_plug plug; | ||
585 | struct bio *bio; | ||
586 | |||
587 | blk_start_plug(&plug); | ||
588 | while ((bio = bio_list_pop(list))) | ||
589 | submit_bio(rw, bio); | ||
590 | blk_finish_plug(&plug); | ||
591 | } | ||
592 | |||
593 | static int iblock_execute_rw(struct se_cmd *cmd) | ||
594 | { | 645 | { |
595 | struct scatterlist *sgl = cmd->t_data_sg; | 646 | struct scatterlist *sgl = cmd->t_data_sg; |
596 | u32 sgl_nents = cmd->t_data_nents; | 647 | u32 sgl_nents = cmd->t_data_nents; |
@@ -611,8 +662,8 @@ static int iblock_execute_rw(struct se_cmd *cmd) | |||
611 | * Force data to disk if we pretend to not have a volatile | 662 | * Force data to disk if we pretend to not have a volatile |
612 | * write cache, or the initiator set the Force Unit Access bit. | 663 | * write cache, or the initiator set the Force Unit Access bit. |
613 | */ | 664 | */ |
614 | if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 || | 665 | if (dev->dev_attrib.emulate_write_cache == 0 || |
615 | (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && | 666 | (dev->dev_attrib.emulate_fua_write > 0 && |
616 | (cmd->se_cmd_flags & SCF_FUA))) | 667 | (cmd->se_cmd_flags & SCF_FUA))) |
617 | rw = WRITE_FUA; | 668 | rw = WRITE_FUA; |
618 | else | 669 | else |
@@ -625,19 +676,18 @@ static int iblock_execute_rw(struct se_cmd *cmd) | |||
625 | * Convert the blocksize advertised to the initiator to the 512 byte | 676 | * Convert the blocksize advertised to the initiator to the 512 byte |
626 | * units unconditionally used by the Linux block layer. | 677 | * units unconditionally used by the Linux block layer. |
627 | */ | 678 | */ |
628 | if (dev->se_sub_dev->se_dev_attrib.block_size == 4096) | 679 | if (dev->dev_attrib.block_size == 4096) |
629 | block_lba = (cmd->t_task_lba << 3); | 680 | block_lba = (cmd->t_task_lba << 3); |
630 | else if (dev->se_sub_dev->se_dev_attrib.block_size == 2048) | 681 | else if (dev->dev_attrib.block_size == 2048) |
631 | block_lba = (cmd->t_task_lba << 2); | 682 | block_lba = (cmd->t_task_lba << 2); |
632 | else if (dev->se_sub_dev->se_dev_attrib.block_size == 1024) | 683 | else if (dev->dev_attrib.block_size == 1024) |
633 | block_lba = (cmd->t_task_lba << 1); | 684 | block_lba = (cmd->t_task_lba << 1); |
634 | else if (dev->se_sub_dev->se_dev_attrib.block_size == 512) | 685 | else if (dev->dev_attrib.block_size == 512) |
635 | block_lba = cmd->t_task_lba; | 686 | block_lba = cmd->t_task_lba; |
636 | else { | 687 | else { |
637 | pr_err("Unsupported SCSI -> BLOCK LBA conversion:" | 688 | pr_err("Unsupported SCSI -> BLOCK LBA conversion:" |
638 | " %u\n", dev->se_sub_dev->se_dev_attrib.block_size); | 689 | " %u\n", dev->dev_attrib.block_size); |
639 | cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 690 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
640 | return -ENOSYS; | ||
641 | } | 691 | } |
642 | 692 | ||
643 | ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); | 693 | ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); |
@@ -697,83 +747,48 @@ fail_put_bios: | |||
697 | bio_put(bio); | 747 | bio_put(bio); |
698 | fail_free_ibr: | 748 | fail_free_ibr: |
699 | kfree(ibr); | 749 | kfree(ibr); |
700 | cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | ||
701 | fail: | 750 | fail: |
702 | return -ENOMEM; | 751 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
703 | } | ||
704 | |||
705 | static u32 iblock_get_device_rev(struct se_device *dev) | ||
706 | { | ||
707 | return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */ | ||
708 | } | ||
709 | |||
710 | static u32 iblock_get_device_type(struct se_device *dev) | ||
711 | { | ||
712 | return TYPE_DISK; | ||
713 | } | 752 | } |
714 | 753 | ||
715 | static sector_t iblock_get_blocks(struct se_device *dev) | 754 | static sector_t iblock_get_blocks(struct se_device *dev) |
716 | { | 755 | { |
717 | struct iblock_dev *ibd = dev->dev_ptr; | 756 | struct iblock_dev *ib_dev = IBLOCK_DEV(dev); |
718 | struct block_device *bd = ibd->ibd_bd; | 757 | struct block_device *bd = ib_dev->ibd_bd; |
719 | struct request_queue *q = bdev_get_queue(bd); | 758 | struct request_queue *q = bdev_get_queue(bd); |
720 | 759 | ||
721 | return iblock_emulate_read_cap_with_block_size(dev, bd, q); | 760 | return iblock_emulate_read_cap_with_block_size(dev, bd, q); |
722 | } | 761 | } |
723 | 762 | ||
724 | static void iblock_bio_done(struct bio *bio, int err) | 763 | static struct sbc_ops iblock_sbc_ops = { |
725 | { | ||
726 | struct se_cmd *cmd = bio->bi_private; | ||
727 | struct iblock_req *ibr = cmd->priv; | ||
728 | |||
729 | /* | ||
730 | * Set -EIO if !BIO_UPTODATE and the passed is still err=0 | ||
731 | */ | ||
732 | if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err) | ||
733 | err = -EIO; | ||
734 | |||
735 | if (err != 0) { | ||
736 | pr_err("test_bit(BIO_UPTODATE) failed for bio: %p," | ||
737 | " err: %d\n", bio, err); | ||
738 | /* | ||
739 | * Bump the ib_bio_err_cnt and release bio. | ||
740 | */ | ||
741 | atomic_inc(&ibr->ib_bio_err_cnt); | ||
742 | smp_mb__after_atomic_inc(); | ||
743 | } | ||
744 | |||
745 | bio_put(bio); | ||
746 | |||
747 | iblock_complete_cmd(cmd); | ||
748 | } | ||
749 | |||
750 | static struct spc_ops iblock_spc_ops = { | ||
751 | .execute_rw = iblock_execute_rw, | 764 | .execute_rw = iblock_execute_rw, |
752 | .execute_sync_cache = iblock_execute_sync_cache, | 765 | .execute_sync_cache = iblock_execute_sync_cache, |
753 | .execute_write_same = iblock_execute_write_same, | 766 | .execute_write_same = iblock_execute_write_same, |
767 | .execute_write_same_unmap = iblock_execute_write_same_unmap, | ||
754 | .execute_unmap = iblock_execute_unmap, | 768 | .execute_unmap = iblock_execute_unmap, |
755 | }; | 769 | }; |
756 | 770 | ||
757 | static int iblock_parse_cdb(struct se_cmd *cmd) | 771 | static sense_reason_t |
772 | iblock_parse_cdb(struct se_cmd *cmd) | ||
758 | { | 773 | { |
759 | return sbc_parse_cdb(cmd, &iblock_spc_ops); | 774 | return sbc_parse_cdb(cmd, &iblock_sbc_ops); |
760 | } | 775 | } |
761 | 776 | ||
762 | static struct se_subsystem_api iblock_template = { | 777 | static struct se_subsystem_api iblock_template = { |
763 | .name = "iblock", | 778 | .name = "iblock", |
779 | .inquiry_prod = "IBLOCK", | ||
780 | .inquiry_rev = IBLOCK_VERSION, | ||
764 | .owner = THIS_MODULE, | 781 | .owner = THIS_MODULE, |
765 | .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, | 782 | .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, |
766 | .attach_hba = iblock_attach_hba, | 783 | .attach_hba = iblock_attach_hba, |
767 | .detach_hba = iblock_detach_hba, | 784 | .detach_hba = iblock_detach_hba, |
768 | .allocate_virtdevice = iblock_allocate_virtdevice, | 785 | .alloc_device = iblock_alloc_device, |
769 | .create_virtdevice = iblock_create_virtdevice, | 786 | .configure_device = iblock_configure_device, |
770 | .free_device = iblock_free_device, | 787 | .free_device = iblock_free_device, |
771 | .parse_cdb = iblock_parse_cdb, | 788 | .parse_cdb = iblock_parse_cdb, |
772 | .check_configfs_dev_params = iblock_check_configfs_dev_params, | ||
773 | .set_configfs_dev_params = iblock_set_configfs_dev_params, | 789 | .set_configfs_dev_params = iblock_set_configfs_dev_params, |
774 | .show_configfs_dev_params = iblock_show_configfs_dev_params, | 790 | .show_configfs_dev_params = iblock_show_configfs_dev_params, |
775 | .get_device_rev = iblock_get_device_rev, | 791 | .get_device_type = sbc_get_device_type, |
776 | .get_device_type = iblock_get_device_type, | ||
777 | .get_blocks = iblock_get_blocks, | 792 | .get_blocks = iblock_get_blocks, |
778 | }; | 793 | }; |
779 | 794 | ||