aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/target/target_core_file.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/target/target_core_file.c')
-rw-r--r--drivers/target/target_core_file.c131
1 files changed, 47 insertions, 84 deletions
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index f286955331a2..686dba189f8e 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -133,15 +133,10 @@ static struct se_device *fd_create_virtdevice(
133 ret = PTR_ERR(dev_p); 133 ret = PTR_ERR(dev_p);
134 goto fail; 134 goto fail;
135 } 135 }
136#if 0 136
137 if (di->no_create_file) 137 /* O_DIRECT too? */
138 flags = O_RDWR | O_LARGEFILE;
139 else
140 flags = O_RDWR | O_CREAT | O_LARGEFILE;
141#else
142 flags = O_RDWR | O_CREAT | O_LARGEFILE; 138 flags = O_RDWR | O_CREAT | O_LARGEFILE;
143#endif 139
144/* flags |= O_DIRECT; */
145 /* 140 /*
146 * If fd_buffered_io=1 has not been set explicitly (the default), 141 * If fd_buffered_io=1 has not been set explicitly (the default),
147 * use O_SYNC to force FILEIO writes to disk. 142 * use O_SYNC to force FILEIO writes to disk.
@@ -249,53 +244,33 @@ static void fd_free_device(void *p)
249 kfree(fd_dev); 244 kfree(fd_dev);
250} 245}
251 246
252static inline struct fd_request *FILE_REQ(struct se_task *task) 247static int fd_do_readv(struct se_cmd *cmd, struct scatterlist *sgl,
253{ 248 u32 sgl_nents)
254 return container_of(task, struct fd_request, fd_task);
255}
256
257
258static struct se_task *
259fd_alloc_task(unsigned char *cdb)
260{ 249{
261 struct fd_request *fd_req; 250 struct se_device *se_dev = cmd->se_dev;
262
263 fd_req = kzalloc(sizeof(struct fd_request), GFP_KERNEL);
264 if (!fd_req) {
265 pr_err("Unable to allocate struct fd_request\n");
266 return NULL;
267 }
268
269 return &fd_req->fd_task;
270}
271
272static int fd_do_readv(struct se_task *task)
273{
274 struct fd_request *req = FILE_REQ(task);
275 struct se_device *se_dev = req->fd_task.task_se_cmd->se_dev;
276 struct fd_dev *dev = se_dev->dev_ptr; 251 struct fd_dev *dev = se_dev->dev_ptr;
277 struct file *fd = dev->fd_file; 252 struct file *fd = dev->fd_file;
278 struct scatterlist *sg = task->task_sg; 253 struct scatterlist *sg;
279 struct iovec *iov; 254 struct iovec *iov;
280 mm_segment_t old_fs; 255 mm_segment_t old_fs;
281 loff_t pos = (task->task_lba * 256 loff_t pos = (cmd->t_task_lba *
282 se_dev->se_sub_dev->se_dev_attrib.block_size); 257 se_dev->se_sub_dev->se_dev_attrib.block_size);
283 int ret = 0, i; 258 int ret = 0, i;
284 259
285 iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL); 260 iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL);
286 if (!iov) { 261 if (!iov) {
287 pr_err("Unable to allocate fd_do_readv iov[]\n"); 262 pr_err("Unable to allocate fd_do_readv iov[]\n");
288 return -ENOMEM; 263 return -ENOMEM;
289 } 264 }
290 265
291 for_each_sg(task->task_sg, sg, task->task_sg_nents, i) { 266 for_each_sg(sgl, sg, sgl_nents, i) {
292 iov[i].iov_len = sg->length; 267 iov[i].iov_len = sg->length;
293 iov[i].iov_base = sg_virt(sg); 268 iov[i].iov_base = sg_virt(sg);
294 } 269 }
295 270
296 old_fs = get_fs(); 271 old_fs = get_fs();
297 set_fs(get_ds()); 272 set_fs(get_ds());
298 ret = vfs_readv(fd, &iov[0], task->task_sg_nents, &pos); 273 ret = vfs_readv(fd, &iov[0], sgl_nents, &pos);
299 set_fs(old_fs); 274 set_fs(old_fs);
300 275
301 kfree(iov); 276 kfree(iov);
@@ -305,10 +280,10 @@ static int fd_do_readv(struct se_task *task)
305 * block_device. 280 * block_device.
306 */ 281 */
307 if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) { 282 if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
308 if (ret < 0 || ret != task->task_size) { 283 if (ret < 0 || ret != cmd->data_length) {
309 pr_err("vfs_readv() returned %d," 284 pr_err("vfs_readv() returned %d,"
310 " expecting %d for S_ISBLK\n", ret, 285 " expecting %d for S_ISBLK\n", ret,
311 (int)task->task_size); 286 (int)cmd->data_length);
312 return (ret < 0 ? ret : -EINVAL); 287 return (ret < 0 ? ret : -EINVAL);
313 } 288 }
314 } else { 289 } else {
@@ -322,38 +297,38 @@ static int fd_do_readv(struct se_task *task)
322 return 1; 297 return 1;
323} 298}
324 299
325static int fd_do_writev(struct se_task *task) 300static int fd_do_writev(struct se_cmd *cmd, struct scatterlist *sgl,
301 u32 sgl_nents)
326{ 302{
327 struct fd_request *req = FILE_REQ(task); 303 struct se_device *se_dev = cmd->se_dev;
328 struct se_device *se_dev = req->fd_task.task_se_cmd->se_dev;
329 struct fd_dev *dev = se_dev->dev_ptr; 304 struct fd_dev *dev = se_dev->dev_ptr;
330 struct file *fd = dev->fd_file; 305 struct file *fd = dev->fd_file;
331 struct scatterlist *sg = task->task_sg; 306 struct scatterlist *sg;
332 struct iovec *iov; 307 struct iovec *iov;
333 mm_segment_t old_fs; 308 mm_segment_t old_fs;
334 loff_t pos = (task->task_lba * 309 loff_t pos = (cmd->t_task_lba *
335 se_dev->se_sub_dev->se_dev_attrib.block_size); 310 se_dev->se_sub_dev->se_dev_attrib.block_size);
336 int ret, i = 0; 311 int ret, i = 0;
337 312
338 iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL); 313 iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL);
339 if (!iov) { 314 if (!iov) {
340 pr_err("Unable to allocate fd_do_writev iov[]\n"); 315 pr_err("Unable to allocate fd_do_writev iov[]\n");
341 return -ENOMEM; 316 return -ENOMEM;
342 } 317 }
343 318
344 for_each_sg(task->task_sg, sg, task->task_sg_nents, i) { 319 for_each_sg(sgl, sg, sgl_nents, i) {
345 iov[i].iov_len = sg->length; 320 iov[i].iov_len = sg->length;
346 iov[i].iov_base = sg_virt(sg); 321 iov[i].iov_base = sg_virt(sg);
347 } 322 }
348 323
349 old_fs = get_fs(); 324 old_fs = get_fs();
350 set_fs(get_ds()); 325 set_fs(get_ds());
351 ret = vfs_writev(fd, &iov[0], task->task_sg_nents, &pos); 326 ret = vfs_writev(fd, &iov[0], sgl_nents, &pos);
352 set_fs(old_fs); 327 set_fs(old_fs);
353 328
354 kfree(iov); 329 kfree(iov);
355 330
356 if (ret < 0 || ret != task->task_size) { 331 if (ret < 0 || ret != cmd->data_length) {
357 pr_err("vfs_writev() returned %d\n", ret); 332 pr_err("vfs_writev() returned %d\n", ret);
358 return (ret < 0 ? ret : -EINVAL); 333 return (ret < 0 ? ret : -EINVAL);
359 } 334 }
@@ -361,9 +336,8 @@ static int fd_do_writev(struct se_task *task)
361 return 1; 336 return 1;
362} 337}
363 338
364static void fd_emulate_sync_cache(struct se_task *task) 339static void fd_emulate_sync_cache(struct se_cmd *cmd)
365{ 340{
366 struct se_cmd *cmd = task->task_se_cmd;
367 struct se_device *dev = cmd->se_dev; 341 struct se_device *dev = cmd->se_dev;
368 struct fd_dev *fd_dev = dev->dev_ptr; 342 struct fd_dev *fd_dev = dev->dev_ptr;
369 int immed = (cmd->t_task_cdb[1] & 0x2); 343 int immed = (cmd->t_task_cdb[1] & 0x2);
@@ -375,7 +349,7 @@ static void fd_emulate_sync_cache(struct se_task *task)
375 * for this SYNCHRONIZE_CACHE op 349 * for this SYNCHRONIZE_CACHE op
376 */ 350 */
377 if (immed) 351 if (immed)
378 transport_complete_sync_cache(cmd, 1); 352 target_complete_cmd(cmd, SAM_STAT_GOOD);
379 353
380 /* 354 /*
381 * Determine if we will be flushing the entire device. 355 * Determine if we will be flushing the entire device.
@@ -395,33 +369,37 @@ static void fd_emulate_sync_cache(struct se_task *task)
395 if (ret != 0) 369 if (ret != 0)
396 pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret); 370 pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
397 371
398 if (!immed) 372 if (immed)
399 transport_complete_sync_cache(cmd, ret == 0); 373 return;
374
375 if (ret) {
376 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
377 target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
378 } else {
379 target_complete_cmd(cmd, SAM_STAT_GOOD);
380 }
400} 381}
401 382
402/* 383static void fd_emulate_write_fua(struct se_cmd *cmd)
403 * WRITE Force Unit Access (FUA) emulation on a per struct se_task
404 * LBA range basis..
405 */
406static void fd_emulate_write_fua(struct se_cmd *cmd, struct se_task *task)
407{ 384{
408 struct se_device *dev = cmd->se_dev; 385 struct se_device *dev = cmd->se_dev;
409 struct fd_dev *fd_dev = dev->dev_ptr; 386 struct fd_dev *fd_dev = dev->dev_ptr;
410 loff_t start = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size; 387 loff_t start = cmd->t_task_lba *
411 loff_t end = start + task->task_size; 388 dev->se_sub_dev->se_dev_attrib.block_size;
389 loff_t end = start + cmd->data_length;
412 int ret; 390 int ret;
413 391
414 pr_debug("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n", 392 pr_debug("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n",
415 task->task_lba, task->task_size); 393 cmd->t_task_lba, cmd->data_length);
416 394
417 ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1); 395 ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
418 if (ret != 0) 396 if (ret != 0)
419 pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret); 397 pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
420} 398}
421 399
422static int fd_do_task(struct se_task *task) 400static int fd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
401 u32 sgl_nents, enum dma_data_direction data_direction)
423{ 402{
424 struct se_cmd *cmd = task->task_se_cmd;
425 struct se_device *dev = cmd->se_dev; 403 struct se_device *dev = cmd->se_dev;
426 int ret = 0; 404 int ret = 0;
427 405
@@ -429,10 +407,10 @@ static int fd_do_task(struct se_task *task)
429 * Call vectorized fileio functions to map struct scatterlist 407 * Call vectorized fileio functions to map struct scatterlist
430 * physical memory addresses to struct iovec virtual memory. 408 * physical memory addresses to struct iovec virtual memory.
431 */ 409 */
432 if (task->task_data_direction == DMA_FROM_DEVICE) { 410 if (data_direction == DMA_FROM_DEVICE) {
433 ret = fd_do_readv(task); 411 ret = fd_do_readv(cmd, sgl, sgl_nents);
434 } else { 412 } else {
435 ret = fd_do_writev(task); 413 ret = fd_do_writev(cmd, sgl, sgl_nents);
436 414
437 if (ret > 0 && 415 if (ret > 0 &&
438 dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 && 416 dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 &&
@@ -443,7 +421,7 @@ static int fd_do_task(struct se_task *task)
443 * and return some sense data to let the initiator 421 * and return some sense data to let the initiator
444 * know the FUA WRITE cache sync failed..? 422 * know the FUA WRITE cache sync failed..?
445 */ 423 */
446 fd_emulate_write_fua(cmd, task); 424 fd_emulate_write_fua(cmd);
447 } 425 }
448 426
449 } 427 }
@@ -452,24 +430,11 @@ static int fd_do_task(struct se_task *task)
452 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 430 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
453 return ret; 431 return ret;
454 } 432 }
455 if (ret) { 433 if (ret)
456 task->task_scsi_status = GOOD; 434 target_complete_cmd(cmd, SAM_STAT_GOOD);
457 transport_complete_task(task, 1);
458 }
459 return 0; 435 return 0;
460} 436}
461 437
462/* fd_free_task(): (Part of se_subsystem_api_t template)
463 *
464 *
465 */
466static void fd_free_task(struct se_task *task)
467{
468 struct fd_request *req = FILE_REQ(task);
469
470 kfree(req);
471}
472
473enum { 438enum {
474 Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err 439 Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err
475}; 440};
@@ -632,10 +597,8 @@ static struct se_subsystem_api fileio_template = {
632 .allocate_virtdevice = fd_allocate_virtdevice, 597 .allocate_virtdevice = fd_allocate_virtdevice,
633 .create_virtdevice = fd_create_virtdevice, 598 .create_virtdevice = fd_create_virtdevice,
634 .free_device = fd_free_device, 599 .free_device = fd_free_device,
635 .alloc_task = fd_alloc_task, 600 .execute_cmd = fd_execute_cmd,
636 .do_task = fd_do_task,
637 .do_sync_cache = fd_emulate_sync_cache, 601 .do_sync_cache = fd_emulate_sync_cache,
638 .free_task = fd_free_task,
639 .check_configfs_dev_params = fd_check_configfs_dev_params, 602 .check_configfs_dev_params = fd_check_configfs_dev_params,
640 .set_configfs_dev_params = fd_set_configfs_dev_params, 603 .set_configfs_dev_params = fd_set_configfs_dev_params,
641 .show_configfs_dev_params = fd_show_configfs_dev_params, 604 .show_configfs_dev_params = fd_show_configfs_dev_params,