aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/target/target_core_file.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/target/target_core_file.c')
-rw-r--r--drivers/target/target_core_file.c279
1 files changed, 98 insertions, 181 deletions
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 0360383dfb94..b9c88497e8f0 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -3,10 +3,7 @@
3 * 3 *
4 * This file contains the Storage Engine <-> FILEIO transport specific functions 4 * This file contains the Storage Engine <-> FILEIO transport specific functions
5 * 5 *
6 * Copyright (c) 2005 PyX Technologies, Inc. 6 * (c) Copyright 2005-2012 RisingTide Systems LLC.
7 * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved.
8 * Copyright (c) 2007-2010 Rising Tide Systems
9 * Copyright (c) 2008-2010 Linux-iSCSI.org
10 * 7 *
11 * Nicholas A. Bellinger <nab@kernel.org> 8 * Nicholas A. Bellinger <nab@kernel.org>
12 * 9 *
@@ -41,7 +38,10 @@
41 38
42#include "target_core_file.h" 39#include "target_core_file.h"
43 40
44static struct se_subsystem_api fileio_template; 41static inline struct fd_dev *FD_DEV(struct se_device *dev)
42{
43 return container_of(dev, struct fd_dev, dev);
44}
45 45
46/* fd_attach_hba(): (Part of se_subsystem_api_t template) 46/* fd_attach_hba(): (Part of se_subsystem_api_t template)
47 * 47 *
@@ -82,7 +82,7 @@ static void fd_detach_hba(struct se_hba *hba)
82 hba->hba_ptr = NULL; 82 hba->hba_ptr = NULL;
83} 83}
84 84
85static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name) 85static struct se_device *fd_alloc_device(struct se_hba *hba, const char *name)
86{ 86{
87 struct fd_dev *fd_dev; 87 struct fd_dev *fd_dev;
88 struct fd_host *fd_host = hba->hba_ptr; 88 struct fd_host *fd_host = hba->hba_ptr;
@@ -97,34 +97,28 @@ static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
97 97
98 pr_debug("FILEIO: Allocated fd_dev for %p\n", name); 98 pr_debug("FILEIO: Allocated fd_dev for %p\n", name);
99 99
100 return fd_dev; 100 return &fd_dev->dev;
101} 101}
102 102
103/* fd_create_virtdevice(): (Part of se_subsystem_api_t template) 103static int fd_configure_device(struct se_device *dev)
104 *
105 *
106 */
107static struct se_device *fd_create_virtdevice(
108 struct se_hba *hba,
109 struct se_subsystem_dev *se_dev,
110 void *p)
111{ 104{
112 struct se_device *dev; 105 struct fd_dev *fd_dev = FD_DEV(dev);
113 struct se_dev_limits dev_limits; 106 struct fd_host *fd_host = dev->se_hba->hba_ptr;
114 struct queue_limits *limits;
115 struct fd_dev *fd_dev = p;
116 struct fd_host *fd_host = hba->hba_ptr;
117 struct file *file; 107 struct file *file;
118 struct inode *inode = NULL; 108 struct inode *inode = NULL;
119 int dev_flags = 0, flags, ret = -EINVAL; 109 int flags, ret = -EINVAL;
120 110
121 memset(&dev_limits, 0, sizeof(struct se_dev_limits)); 111 if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
112 pr_err("Missing fd_dev_name=\n");
113 return -EINVAL;
114 }
122 115
123 /* 116 /*
124 * Use O_DSYNC by default instead of O_SYNC to forgo syncing 117 * Use O_DSYNC by default instead of O_SYNC to forgo syncing
125 * of pure timestamp updates. 118 * of pure timestamp updates.
126 */ 119 */
127 flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC; 120 flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
121
128 /* 122 /*
129 * Optionally allow fd_buffered_io=1 to be enabled for people 123 * Optionally allow fd_buffered_io=1 to be enabled for people
130 * who want use the fs buffer cache as an WriteCache mechanism. 124 * who want use the fs buffer cache as an WriteCache mechanism.
@@ -154,22 +148,17 @@ static struct se_device *fd_create_virtdevice(
154 */ 148 */
155 inode = file->f_mapping->host; 149 inode = file->f_mapping->host;
156 if (S_ISBLK(inode->i_mode)) { 150 if (S_ISBLK(inode->i_mode)) {
157 struct request_queue *q; 151 struct request_queue *q = bdev_get_queue(inode->i_bdev);
158 unsigned long long dev_size; 152 unsigned long long dev_size;
159 /* 153
160 * Setup the local scope queue_limits from struct request_queue->limits 154 dev->dev_attrib.hw_block_size =
161 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits. 155 bdev_logical_block_size(inode->i_bdev);
162 */ 156 dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
163 q = bdev_get_queue(inode->i_bdev); 157
164 limits = &dev_limits.limits;
165 limits->logical_block_size = bdev_logical_block_size(inode->i_bdev);
166 limits->max_hw_sectors = queue_max_hw_sectors(q);
167 limits->max_sectors = queue_max_sectors(q);
168 /* 158 /*
169 * Determine the number of bytes from i_size_read() minus 159 * Determine the number of bytes from i_size_read() minus
170 * one (1) logical sector from underlying struct block_device 160 * one (1) logical sector from underlying struct block_device
171 */ 161 */
172 fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
173 dev_size = (i_size_read(file->f_mapping->host) - 162 dev_size = (i_size_read(file->f_mapping->host) -
174 fd_dev->fd_block_size); 163 fd_dev->fd_block_size);
175 164
@@ -185,26 +174,18 @@ static struct se_device *fd_create_virtdevice(
185 goto fail; 174 goto fail;
186 } 175 }
187 176
188 limits = &dev_limits.limits; 177 dev->dev_attrib.hw_block_size = FD_BLOCKSIZE;
189 limits->logical_block_size = FD_BLOCKSIZE; 178 dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS;
190 limits->max_hw_sectors = FD_MAX_SECTORS;
191 limits->max_sectors = FD_MAX_SECTORS;
192 fd_dev->fd_block_size = FD_BLOCKSIZE;
193 } 179 }
194 180
195 dev_limits.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH; 181 fd_dev->fd_block_size = dev->dev_attrib.hw_block_size;
196 dev_limits.queue_depth = FD_DEVICE_QUEUE_DEPTH;
197 182
198 dev = transport_add_device_to_core_hba(hba, &fileio_template, 183 dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
199 se_dev, dev_flags, fd_dev,
200 &dev_limits, "FILEIO", FD_VERSION);
201 if (!dev)
202 goto fail;
203 184
204 if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) { 185 if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
205 pr_debug("FILEIO: Forcing setting of emulate_write_cache=1" 186 pr_debug("FILEIO: Forcing setting of emulate_write_cache=1"
206 " with FDBD_HAS_BUFFERED_IO_WCE\n"); 187 " with FDBD_HAS_BUFFERED_IO_WCE\n");
207 dev->se_sub_dev->se_dev_attrib.emulate_write_cache = 1; 188 dev->dev_attrib.emulate_write_cache = 1;
208 } 189 }
209 190
210 fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++; 191 fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
@@ -214,22 +195,18 @@ static struct se_device *fd_create_virtdevice(
214 " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id, 195 " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
215 fd_dev->fd_dev_name, fd_dev->fd_dev_size); 196 fd_dev->fd_dev_name, fd_dev->fd_dev_size);
216 197
217 return dev; 198 return 0;
218fail: 199fail:
219 if (fd_dev->fd_file) { 200 if (fd_dev->fd_file) {
220 filp_close(fd_dev->fd_file, NULL); 201 filp_close(fd_dev->fd_file, NULL);
221 fd_dev->fd_file = NULL; 202 fd_dev->fd_file = NULL;
222 } 203 }
223 return ERR_PTR(ret); 204 return ret;
224} 205}
225 206
226/* fd_free_device(): (Part of se_subsystem_api_t template) 207static void fd_free_device(struct se_device *dev)
227 *
228 *
229 */
230static void fd_free_device(void *p)
231{ 208{
232 struct fd_dev *fd_dev = p; 209 struct fd_dev *fd_dev = FD_DEV(dev);
233 210
234 if (fd_dev->fd_file) { 211 if (fd_dev->fd_file) {
235 filp_close(fd_dev->fd_file, NULL); 212 filp_close(fd_dev->fd_file, NULL);
@@ -239,17 +216,16 @@ static void fd_free_device(void *p)
239 kfree(fd_dev); 216 kfree(fd_dev);
240} 217}
241 218
242static int fd_do_readv(struct se_cmd *cmd, struct scatterlist *sgl, 219static int fd_do_rw(struct se_cmd *cmd, struct scatterlist *sgl,
243 u32 sgl_nents) 220 u32 sgl_nents, int is_write)
244{ 221{
245 struct se_device *se_dev = cmd->se_dev; 222 struct se_device *se_dev = cmd->se_dev;
246 struct fd_dev *dev = se_dev->dev_ptr; 223 struct fd_dev *dev = FD_DEV(se_dev);
247 struct file *fd = dev->fd_file; 224 struct file *fd = dev->fd_file;
248 struct scatterlist *sg; 225 struct scatterlist *sg;
249 struct iovec *iov; 226 struct iovec *iov;
250 mm_segment_t old_fs; 227 mm_segment_t old_fs;
251 loff_t pos = (cmd->t_task_lba * 228 loff_t pos = (cmd->t_task_lba * se_dev->dev_attrib.block_size);
252 se_dev->se_sub_dev->se_dev_attrib.block_size);
253 int ret = 0, i; 229 int ret = 0, i;
254 230
255 iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL); 231 iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL);
@@ -260,81 +236,58 @@ static int fd_do_readv(struct se_cmd *cmd, struct scatterlist *sgl,
260 236
261 for_each_sg(sgl, sg, sgl_nents, i) { 237 for_each_sg(sgl, sg, sgl_nents, i) {
262 iov[i].iov_len = sg->length; 238 iov[i].iov_len = sg->length;
263 iov[i].iov_base = sg_virt(sg); 239 iov[i].iov_base = kmap(sg_page(sg)) + sg->offset;
264 } 240 }
265 241
266 old_fs = get_fs(); 242 old_fs = get_fs();
267 set_fs(get_ds()); 243 set_fs(get_ds());
268 ret = vfs_readv(fd, &iov[0], sgl_nents, &pos); 244
245 if (is_write)
246 ret = vfs_writev(fd, &iov[0], sgl_nents, &pos);
247 else
248 ret = vfs_readv(fd, &iov[0], sgl_nents, &pos);
249
269 set_fs(old_fs); 250 set_fs(old_fs);
270 251
252 for_each_sg(sgl, sg, sgl_nents, i)
253 kunmap(sg_page(sg));
254
271 kfree(iov); 255 kfree(iov);
272 /* 256
273 * Return zeros and GOOD status even if the READ did not return 257 if (is_write) {
274 * the expected virt_size for struct file w/o a backing struct
275 * block_device.
276 */
277 if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
278 if (ret < 0 || ret != cmd->data_length) { 258 if (ret < 0 || ret != cmd->data_length) {
279 pr_err("vfs_readv() returned %d," 259 pr_err("%s() write returned %d\n", __func__, ret);
280 " expecting %d for S_ISBLK\n", ret,
281 (int)cmd->data_length);
282 return (ret < 0 ? ret : -EINVAL); 260 return (ret < 0 ? ret : -EINVAL);
283 } 261 }
284 } else { 262 } else {
285 if (ret < 0) { 263 /*
286 pr_err("vfs_readv() returned %d for non" 264 * Return zeros and GOOD status even if the READ did not return
287 " S_ISBLK\n", ret); 265 * the expected virt_size for struct file w/o a backing struct
288 return ret; 266 * block_device.
267 */
268 if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
269 if (ret < 0 || ret != cmd->data_length) {
270 pr_err("%s() returned %d, expecting %u for "
271 "S_ISBLK\n", __func__, ret,
272 cmd->data_length);
273 return (ret < 0 ? ret : -EINVAL);
274 }
275 } else {
276 if (ret < 0) {
277 pr_err("%s() returned %d for non S_ISBLK\n",
278 __func__, ret);
279 return ret;
280 }
289 } 281 }
290 } 282 }
291
292 return 1;
293}
294
295static int fd_do_writev(struct se_cmd *cmd, struct scatterlist *sgl,
296 u32 sgl_nents)
297{
298 struct se_device *se_dev = cmd->se_dev;
299 struct fd_dev *dev = se_dev->dev_ptr;
300 struct file *fd = dev->fd_file;
301 struct scatterlist *sg;
302 struct iovec *iov;
303 mm_segment_t old_fs;
304 loff_t pos = (cmd->t_task_lba *
305 se_dev->se_sub_dev->se_dev_attrib.block_size);
306 int ret, i = 0;
307
308 iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL);
309 if (!iov) {
310 pr_err("Unable to allocate fd_do_writev iov[]\n");
311 return -ENOMEM;
312 }
313
314 for_each_sg(sgl, sg, sgl_nents, i) {
315 iov[i].iov_len = sg->length;
316 iov[i].iov_base = sg_virt(sg);
317 }
318
319 old_fs = get_fs();
320 set_fs(get_ds());
321 ret = vfs_writev(fd, &iov[0], sgl_nents, &pos);
322 set_fs(old_fs);
323
324 kfree(iov);
325
326 if (ret < 0 || ret != cmd->data_length) {
327 pr_err("vfs_writev() returned %d\n", ret);
328 return (ret < 0 ? ret : -EINVAL);
329 }
330
331 return 1; 283 return 1;
332} 284}
333 285
334static int fd_execute_sync_cache(struct se_cmd *cmd) 286static sense_reason_t
287fd_execute_sync_cache(struct se_cmd *cmd)
335{ 288{
336 struct se_device *dev = cmd->se_dev; 289 struct se_device *dev = cmd->se_dev;
337 struct fd_dev *fd_dev = dev->dev_ptr; 290 struct fd_dev *fd_dev = FD_DEV(dev);
338 int immed = (cmd->t_task_cdb[1] & 0x2); 291 int immed = (cmd->t_task_cdb[1] & 0x2);
339 loff_t start, end; 292 loff_t start, end;
340 int ret; 293 int ret;
@@ -353,7 +306,7 @@ static int fd_execute_sync_cache(struct se_cmd *cmd)
353 start = 0; 306 start = 0;
354 end = LLONG_MAX; 307 end = LLONG_MAX;
355 } else { 308 } else {
356 start = cmd->t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size; 309 start = cmd->t_task_lba * dev->dev_attrib.block_size;
357 if (cmd->data_length) 310 if (cmd->data_length)
358 end = start + cmd->data_length; 311 end = start + cmd->data_length;
359 else 312 else
@@ -367,17 +320,16 @@ static int fd_execute_sync_cache(struct se_cmd *cmd)
367 if (immed) 320 if (immed)
368 return 0; 321 return 0;
369 322
370 if (ret) { 323 if (ret)
371 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
372 target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); 324 target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
373 } else { 325 else
374 target_complete_cmd(cmd, SAM_STAT_GOOD); 326 target_complete_cmd(cmd, SAM_STAT_GOOD);
375 }
376 327
377 return 0; 328 return 0;
378} 329}
379 330
380static int fd_execute_rw(struct se_cmd *cmd) 331static sense_reason_t
332fd_execute_rw(struct se_cmd *cmd)
381{ 333{
382 struct scatterlist *sgl = cmd->t_data_sg; 334 struct scatterlist *sgl = cmd->t_data_sg;
383 u32 sgl_nents = cmd->t_data_nents; 335 u32 sgl_nents = cmd->t_data_nents;
@@ -390,30 +342,29 @@ static int fd_execute_rw(struct se_cmd *cmd)
390 * physical memory addresses to struct iovec virtual memory. 342 * physical memory addresses to struct iovec virtual memory.
391 */ 343 */
392 if (data_direction == DMA_FROM_DEVICE) { 344 if (data_direction == DMA_FROM_DEVICE) {
393 ret = fd_do_readv(cmd, sgl, sgl_nents); 345 ret = fd_do_rw(cmd, sgl, sgl_nents, 0);
394 } else { 346 } else {
395 ret = fd_do_writev(cmd, sgl, sgl_nents); 347 ret = fd_do_rw(cmd, sgl, sgl_nents, 1);
396 /* 348 /*
397 * Perform implict vfs_fsync_range() for fd_do_writev() ops 349 * Perform implict vfs_fsync_range() for fd_do_writev() ops
398 * for SCSI WRITEs with Forced Unit Access (FUA) set. 350 * for SCSI WRITEs with Forced Unit Access (FUA) set.
399 * Allow this to happen independent of WCE=0 setting. 351 * Allow this to happen independent of WCE=0 setting.
400 */ 352 */
401 if (ret > 0 && 353 if (ret > 0 &&
402 dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && 354 dev->dev_attrib.emulate_fua_write > 0 &&
403 (cmd->se_cmd_flags & SCF_FUA)) { 355 (cmd->se_cmd_flags & SCF_FUA)) {
404 struct fd_dev *fd_dev = dev->dev_ptr; 356 struct fd_dev *fd_dev = FD_DEV(dev);
405 loff_t start = cmd->t_task_lba * 357 loff_t start = cmd->t_task_lba *
406 dev->se_sub_dev->se_dev_attrib.block_size; 358 dev->dev_attrib.block_size;
407 loff_t end = start + cmd->data_length; 359 loff_t end = start + cmd->data_length;
408 360
409 vfs_fsync_range(fd_dev->fd_file, start, end, 1); 361 vfs_fsync_range(fd_dev->fd_file, start, end, 1);
410 } 362 }
411 } 363 }
412 364
413 if (ret < 0) { 365 if (ret < 0)
414 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 366 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
415 return ret; 367
416 }
417 if (ret) 368 if (ret)
418 target_complete_cmd(cmd, SAM_STAT_GOOD); 369 target_complete_cmd(cmd, SAM_STAT_GOOD);
419 return 0; 370 return 0;
@@ -430,12 +381,10 @@ static match_table_t tokens = {
430 {Opt_err, NULL} 381 {Opt_err, NULL}
431}; 382};
432 383
433static ssize_t fd_set_configfs_dev_params( 384static ssize_t fd_set_configfs_dev_params(struct se_device *dev,
434 struct se_hba *hba, 385 const char *page, ssize_t count)
435 struct se_subsystem_dev *se_dev,
436 const char *page, ssize_t count)
437{ 386{
438 struct fd_dev *fd_dev = se_dev->se_dev_su_ptr; 387 struct fd_dev *fd_dev = FD_DEV(dev);
439 char *orig, *ptr, *arg_p, *opts; 388 char *orig, *ptr, *arg_p, *opts;
440 substring_t args[MAX_OPT_ARGS]; 389 substring_t args[MAX_OPT_ARGS];
441 int ret = 0, arg, token; 390 int ret = 0, arg, token;
@@ -502,24 +451,9 @@ out:
502 return (!ret) ? count : ret; 451 return (!ret) ? count : ret;
503} 452}
504 453
505static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev) 454static ssize_t fd_show_configfs_dev_params(struct se_device *dev, char *b)
506{ 455{
507 struct fd_dev *fd_dev = se_dev->se_dev_su_ptr; 456 struct fd_dev *fd_dev = FD_DEV(dev);
508
509 if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
510 pr_err("Missing fd_dev_name=\n");
511 return -EINVAL;
512 }
513
514 return 0;
515}
516
517static ssize_t fd_show_configfs_dev_params(
518 struct se_hba *hba,
519 struct se_subsystem_dev *se_dev,
520 char *b)
521{
522 struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
523 ssize_t bl = 0; 457 ssize_t bl = 0;
524 458
525 bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id); 459 bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
@@ -530,27 +464,9 @@ static ssize_t fd_show_configfs_dev_params(
530 return bl; 464 return bl;
531} 465}
532 466
533/* fd_get_device_rev(): (Part of se_subsystem_api_t template)
534 *
535 *
536 */
537static u32 fd_get_device_rev(struct se_device *dev)
538{
539 return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
540}
541
542/* fd_get_device_type(): (Part of se_subsystem_api_t template)
543 *
544 *
545 */
546static u32 fd_get_device_type(struct se_device *dev)
547{
548 return TYPE_DISK;
549}
550
551static sector_t fd_get_blocks(struct se_device *dev) 467static sector_t fd_get_blocks(struct se_device *dev)
552{ 468{
553 struct fd_dev *fd_dev = dev->dev_ptr; 469 struct fd_dev *fd_dev = FD_DEV(dev);
554 struct file *f = fd_dev->fd_file; 470 struct file *f = fd_dev->fd_file;
555 struct inode *i = f->f_mapping->host; 471 struct inode *i = f->f_mapping->host;
556 unsigned long long dev_size; 472 unsigned long long dev_size;
@@ -564,34 +480,35 @@ static sector_t fd_get_blocks(struct se_device *dev)
564 else 480 else
565 dev_size = fd_dev->fd_dev_size; 481 dev_size = fd_dev->fd_dev_size;
566 482
567 return div_u64(dev_size, dev->se_sub_dev->se_dev_attrib.block_size); 483 return div_u64(dev_size, dev->dev_attrib.block_size);
568} 484}
569 485
570static struct spc_ops fd_spc_ops = { 486static struct sbc_ops fd_sbc_ops = {
571 .execute_rw = fd_execute_rw, 487 .execute_rw = fd_execute_rw,
572 .execute_sync_cache = fd_execute_sync_cache, 488 .execute_sync_cache = fd_execute_sync_cache,
573}; 489};
574 490
575static int fd_parse_cdb(struct se_cmd *cmd) 491static sense_reason_t
492fd_parse_cdb(struct se_cmd *cmd)
576{ 493{
577 return sbc_parse_cdb(cmd, &fd_spc_ops); 494 return sbc_parse_cdb(cmd, &fd_sbc_ops);
578} 495}
579 496
580static struct se_subsystem_api fileio_template = { 497static struct se_subsystem_api fileio_template = {
581 .name = "fileio", 498 .name = "fileio",
499 .inquiry_prod = "FILEIO",
500 .inquiry_rev = FD_VERSION,
582 .owner = THIS_MODULE, 501 .owner = THIS_MODULE,
583 .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, 502 .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
584 .attach_hba = fd_attach_hba, 503 .attach_hba = fd_attach_hba,
585 .detach_hba = fd_detach_hba, 504 .detach_hba = fd_detach_hba,
586 .allocate_virtdevice = fd_allocate_virtdevice, 505 .alloc_device = fd_alloc_device,
587 .create_virtdevice = fd_create_virtdevice, 506 .configure_device = fd_configure_device,
588 .free_device = fd_free_device, 507 .free_device = fd_free_device,
589 .parse_cdb = fd_parse_cdb, 508 .parse_cdb = fd_parse_cdb,
590 .check_configfs_dev_params = fd_check_configfs_dev_params,
591 .set_configfs_dev_params = fd_set_configfs_dev_params, 509 .set_configfs_dev_params = fd_set_configfs_dev_params,
592 .show_configfs_dev_params = fd_show_configfs_dev_params, 510 .show_configfs_dev_params = fd_show_configfs_dev_params,
593 .get_device_rev = fd_get_device_rev, 511 .get_device_type = sbc_get_device_type,
594 .get_device_type = fd_get_device_type,
595 .get_blocks = fd_get_blocks, 512 .get_blocks = fd_get_blocks,
596}; 513};
597 514