aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/target/target_core_user.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/target/target_core_user.c')
-rw-r--r--drivers/target/target_core_user.c272
1 files changed, 151 insertions, 121 deletions
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 07d2996d8c1f..c448ef421ce7 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -1,6 +1,7 @@
1/* 1/*
2 * Copyright (C) 2013 Shaohua Li <shli@kernel.org> 2 * Copyright (C) 2013 Shaohua Li <shli@kernel.org>
3 * Copyright (C) 2014 Red Hat, Inc. 3 * Copyright (C) 2014 Red Hat, Inc.
4 * Copyright (C) 2015 Arrikto, Inc.
4 * 5 *
5 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -19,16 +20,17 @@
19#include <linux/spinlock.h> 20#include <linux/spinlock.h>
20#include <linux/module.h> 21#include <linux/module.h>
21#include <linux/idr.h> 22#include <linux/idr.h>
23#include <linux/kernel.h>
22#include <linux/timer.h> 24#include <linux/timer.h>
23#include <linux/parser.h> 25#include <linux/parser.h>
24#include <scsi/scsi.h> 26#include <linux/vmalloc.h>
25#include <scsi/scsi_host.h>
26#include <linux/uio_driver.h> 27#include <linux/uio_driver.h>
27#include <net/genetlink.h> 28#include <net/genetlink.h>
29#include <scsi/scsi_common.h>
30#include <scsi/scsi_proto.h>
28#include <target/target_core_base.h> 31#include <target/target_core_base.h>
29#include <target/target_core_fabric.h> 32#include <target/target_core_fabric.h>
30#include <target/target_core_backend.h> 33#include <target/target_core_backend.h>
31#include <target/target_core_backend_configfs.h>
32 34
33#include <linux/target_core_user.h> 35#include <linux/target_core_user.h>
34 36
@@ -166,6 +168,11 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
166 tcmu_cmd->tcmu_dev = udev; 168 tcmu_cmd->tcmu_dev = udev;
167 tcmu_cmd->data_length = se_cmd->data_length; 169 tcmu_cmd->data_length = se_cmd->data_length;
168 170
171 if (se_cmd->se_cmd_flags & SCF_BIDI) {
172 BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
173 tcmu_cmd->data_length += se_cmd->t_bidi_data_sg->length;
174 }
175
169 tcmu_cmd->deadline = jiffies + msecs_to_jiffies(TCMU_TIME_OUT); 176 tcmu_cmd->deadline = jiffies + msecs_to_jiffies(TCMU_TIME_OUT);
170 177
171 idr_preload(GFP_KERNEL); 178 idr_preload(GFP_KERNEL);
@@ -224,9 +231,106 @@ static inline size_t head_to_end(size_t head, size_t size)
224 231
225#define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size) 232#define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size)
226 233
234static void alloc_and_scatter_data_area(struct tcmu_dev *udev,
235 struct scatterlist *data_sg, unsigned int data_nents,
236 struct iovec **iov, int *iov_cnt, bool copy_data)
237{
238 int i;
239 void *from, *to;
240 size_t copy_bytes;
241 struct scatterlist *sg;
242
243 for_each_sg(data_sg, sg, data_nents, i) {
244 copy_bytes = min_t(size_t, sg->length,
245 head_to_end(udev->data_head, udev->data_size));
246 from = kmap_atomic(sg_page(sg)) + sg->offset;
247 to = (void *) udev->mb_addr + udev->data_off + udev->data_head;
248
249 if (copy_data) {
250 memcpy(to, from, copy_bytes);
251 tcmu_flush_dcache_range(to, copy_bytes);
252 }
253
254 /* Even iov_base is relative to mb_addr */
255 (*iov)->iov_len = copy_bytes;
256 (*iov)->iov_base = (void __user *) udev->data_off +
257 udev->data_head;
258 (*iov_cnt)++;
259 (*iov)++;
260
261 UPDATE_HEAD(udev->data_head, copy_bytes, udev->data_size);
262
263 /* Uh oh, we wrapped the buffer. Must split sg across 2 iovs. */
264 if (sg->length != copy_bytes) {
265 void *from_skip = from + copy_bytes;
266
267 copy_bytes = sg->length - copy_bytes;
268
269 (*iov)->iov_len = copy_bytes;
270 (*iov)->iov_base = (void __user *) udev->data_off +
271 udev->data_head;
272
273 if (copy_data) {
274 to = (void *) udev->mb_addr +
275 udev->data_off + udev->data_head;
276 memcpy(to, from_skip, copy_bytes);
277 tcmu_flush_dcache_range(to, copy_bytes);
278 }
279
280 (*iov_cnt)++;
281 (*iov)++;
282
283 UPDATE_HEAD(udev->data_head,
284 copy_bytes, udev->data_size);
285 }
286
287 kunmap_atomic(from - sg->offset);
288 }
289}
290
291static void gather_and_free_data_area(struct tcmu_dev *udev,
292 struct scatterlist *data_sg, unsigned int data_nents)
293{
294 int i;
295 void *from, *to;
296 size_t copy_bytes;
297 struct scatterlist *sg;
298
299 /* It'd be easier to look at entry's iovec again, but UAM */
300 for_each_sg(data_sg, sg, data_nents, i) {
301 copy_bytes = min_t(size_t, sg->length,
302 head_to_end(udev->data_tail, udev->data_size));
303
304 to = kmap_atomic(sg_page(sg)) + sg->offset;
305 WARN_ON(sg->length + sg->offset > PAGE_SIZE);
306 from = (void *) udev->mb_addr +
307 udev->data_off + udev->data_tail;
308 tcmu_flush_dcache_range(from, copy_bytes);
309 memcpy(to, from, copy_bytes);
310
311 UPDATE_HEAD(udev->data_tail, copy_bytes, udev->data_size);
312
313 /* Uh oh, wrapped the data buffer for this sg's data */
314 if (sg->length != copy_bytes) {
315 void *to_skip = to + copy_bytes;
316
317 from = (void *) udev->mb_addr +
318 udev->data_off + udev->data_tail;
319 WARN_ON(udev->data_tail);
320 copy_bytes = sg->length - copy_bytes;
321 tcmu_flush_dcache_range(from, copy_bytes);
322 memcpy(to_skip, from, copy_bytes);
323
324 UPDATE_HEAD(udev->data_tail,
325 copy_bytes, udev->data_size);
326 }
327 kunmap_atomic(to - sg->offset);
328 }
329}
330
227/* 331/*
228 * We can't queue a command until we have space available on the cmd ring *and* space 332 * We can't queue a command until we have space available on the cmd ring *and*
229 * space avail on the data ring. 333 * space available on the data ring.
230 * 334 *
231 * Called with ring lock held. 335 * Called with ring lock held.
232 */ 336 */
@@ -274,12 +378,11 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
274 size_t base_command_size, command_size; 378 size_t base_command_size, command_size;
275 struct tcmu_mailbox *mb; 379 struct tcmu_mailbox *mb;
276 struct tcmu_cmd_entry *entry; 380 struct tcmu_cmd_entry *entry;
277 int i;
278 struct scatterlist *sg;
279 struct iovec *iov; 381 struct iovec *iov;
280 int iov_cnt = 0; 382 int iov_cnt;
281 uint32_t cmd_head; 383 uint32_t cmd_head;
282 uint64_t cdb_off; 384 uint64_t cdb_off;
385 bool copy_to_data_area;
283 386
284 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) 387 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
285 return -EINVAL; 388 return -EINVAL;
@@ -292,7 +395,8 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
292 * b/c size == offsetof one-past-element. 395 * b/c size == offsetof one-past-element.
293 */ 396 */
294 base_command_size = max(offsetof(struct tcmu_cmd_entry, 397 base_command_size = max(offsetof(struct tcmu_cmd_entry,
295 req.iov[se_cmd->t_data_nents + 2]), 398 req.iov[se_cmd->t_bidi_data_nents +
399 se_cmd->t_data_nents + 2]),
296 sizeof(struct tcmu_cmd_entry)); 400 sizeof(struct tcmu_cmd_entry));
297 command_size = base_command_size 401 command_size = base_command_size
298 + round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE); 402 + round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE);
@@ -360,53 +464,20 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
360 * Fix up iovecs, and handle if allocation in data ring wrapped. 464 * Fix up iovecs, and handle if allocation in data ring wrapped.
361 */ 465 */
362 iov = &entry->req.iov[0]; 466 iov = &entry->req.iov[0];
363 for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, i) { 467 iov_cnt = 0;
364 size_t copy_bytes = min((size_t)sg->length, 468 copy_to_data_area = (se_cmd->data_direction == DMA_TO_DEVICE
365 head_to_end(udev->data_head, udev->data_size)); 469 || se_cmd->se_cmd_flags & SCF_BIDI);
366 void *from = kmap_atomic(sg_page(sg)) + sg->offset; 470 alloc_and_scatter_data_area(udev, se_cmd->t_data_sg,
367 void *to = (void *) mb + udev->data_off + udev->data_head; 471 se_cmd->t_data_nents, &iov, &iov_cnt, copy_to_data_area);
368
369 if (tcmu_cmd->se_cmd->data_direction == DMA_TO_DEVICE) {
370 memcpy(to, from, copy_bytes);
371 tcmu_flush_dcache_range(to, copy_bytes);
372 }
373
374 /* Even iov_base is relative to mb_addr */
375 iov->iov_len = copy_bytes;
376 iov->iov_base = (void __user *) udev->data_off +
377 udev->data_head;
378 iov_cnt++;
379 iov++;
380
381 UPDATE_HEAD(udev->data_head, copy_bytes, udev->data_size);
382
383 /* Uh oh, we wrapped the buffer. Must split sg across 2 iovs. */
384 if (sg->length != copy_bytes) {
385 from += copy_bytes;
386 copy_bytes = sg->length - copy_bytes;
387
388 iov->iov_len = copy_bytes;
389 iov->iov_base = (void __user *) udev->data_off +
390 udev->data_head;
391
392 if (se_cmd->data_direction == DMA_TO_DEVICE) {
393 to = (void *) mb + udev->data_off + udev->data_head;
394 memcpy(to, from, copy_bytes);
395 tcmu_flush_dcache_range(to, copy_bytes);
396 }
397
398 iov_cnt++;
399 iov++;
400
401 UPDATE_HEAD(udev->data_head, copy_bytes, udev->data_size);
402 }
403
404 kunmap_atomic(from);
405 }
406 entry->req.iov_cnt = iov_cnt; 472 entry->req.iov_cnt = iov_cnt;
407 entry->req.iov_bidi_cnt = 0;
408 entry->req.iov_dif_cnt = 0; 473 entry->req.iov_dif_cnt = 0;
409 474
475 /* Handle BIDI commands */
476 iov_cnt = 0;
477 alloc_and_scatter_data_area(udev, se_cmd->t_bidi_data_sg,
478 se_cmd->t_bidi_data_nents, &iov, &iov_cnt, false);
479 entry->req.iov_bidi_cnt = iov_cnt;
480
410 /* All offsets relative to mb_addr, not start of entry! */ 481 /* All offsets relative to mb_addr, not start of entry! */
411 cdb_off = CMDR_OFF + cmd_head + base_command_size; 482 cdb_off = CMDR_OFF + cmd_head + base_command_size;
412 memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb)); 483 memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb));
@@ -479,47 +550,22 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
479 se_cmd->scsi_sense_length); 550 se_cmd->scsi_sense_length);
480 551
481 UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size); 552 UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
482 } 553 } else if (se_cmd->se_cmd_flags & SCF_BIDI) {
483 else if (se_cmd->data_direction == DMA_FROM_DEVICE) { 554 /* Discard data_out buffer */
484 struct scatterlist *sg; 555 UPDATE_HEAD(udev->data_tail,
485 int i; 556 (size_t)se_cmd->t_data_sg->length, udev->data_size);
486 557
487 /* It'd be easier to look at entry's iovec again, but UAM */ 558 /* Get Data-In buffer */
488 for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, i) { 559 gather_and_free_data_area(udev,
489 size_t copy_bytes; 560 se_cmd->t_bidi_data_sg, se_cmd->t_bidi_data_nents);
490 void *to; 561 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
491 void *from; 562 gather_and_free_data_area(udev,
492 563 se_cmd->t_data_sg, se_cmd->t_data_nents);
493 copy_bytes = min((size_t)sg->length,
494 head_to_end(udev->data_tail, udev->data_size));
495
496 to = kmap_atomic(sg_page(sg)) + sg->offset;
497 WARN_ON(sg->length + sg->offset > PAGE_SIZE);
498 from = (void *) udev->mb_addr + udev->data_off + udev->data_tail;
499 tcmu_flush_dcache_range(from, copy_bytes);
500 memcpy(to, from, copy_bytes);
501
502 UPDATE_HEAD(udev->data_tail, copy_bytes, udev->data_size);
503
504 /* Uh oh, wrapped the data buffer for this sg's data */
505 if (sg->length != copy_bytes) {
506 from = (void *) udev->mb_addr + udev->data_off + udev->data_tail;
507 WARN_ON(udev->data_tail);
508 to += copy_bytes;
509 copy_bytes = sg->length - copy_bytes;
510 tcmu_flush_dcache_range(from, copy_bytes);
511 memcpy(to, from, copy_bytes);
512
513 UPDATE_HEAD(udev->data_tail, copy_bytes, udev->data_size);
514 }
515
516 kunmap_atomic(to);
517 }
518
519 } else if (se_cmd->data_direction == DMA_TO_DEVICE) { 564 } else if (se_cmd->data_direction == DMA_TO_DEVICE) {
520 UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size); 565 UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
521 } else { 566 } else if (se_cmd->data_direction != DMA_NONE) {
522 pr_warn("TCMU: data direction was %d!\n", se_cmd->data_direction); 567 pr_warn("TCMU: data direction was %d!\n",
568 se_cmd->data_direction);
523 } 569 }
524 570
525 target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status); 571 target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
@@ -908,6 +954,14 @@ static int tcmu_check_pending_cmd(int id, void *p, void *data)
908 return -EINVAL; 954 return -EINVAL;
909} 955}
910 956
957static void tcmu_dev_call_rcu(struct rcu_head *p)
958{
959 struct se_device *dev = container_of(p, struct se_device, rcu_head);
960 struct tcmu_dev *udev = TCMU_DEV(dev);
961
962 kfree(udev);
963}
964
911static void tcmu_free_device(struct se_device *dev) 965static void tcmu_free_device(struct se_device *dev)
912{ 966{
913 struct tcmu_dev *udev = TCMU_DEV(dev); 967 struct tcmu_dev *udev = TCMU_DEV(dev);
@@ -933,8 +987,7 @@ static void tcmu_free_device(struct se_device *dev)
933 kfree(udev->uio_info.name); 987 kfree(udev->uio_info.name);
934 kfree(udev->name); 988 kfree(udev->name);
935 } 989 }
936 990 call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
937 kfree(udev);
938} 991}
939 992
940enum { 993enum {
@@ -1052,27 +1105,7 @@ tcmu_parse_cdb(struct se_cmd *cmd)
1052 return passthrough_parse_cdb(cmd, tcmu_pass_op); 1105 return passthrough_parse_cdb(cmd, tcmu_pass_op);
1053} 1106}
1054 1107
1055DEF_TB_DEV_ATTRIB_RO(tcmu, hw_pi_prot_type); 1108static const struct target_backend_ops tcmu_ops = {
1056TB_DEV_ATTR_RO(tcmu, hw_pi_prot_type);
1057
1058DEF_TB_DEV_ATTRIB_RO(tcmu, hw_block_size);
1059TB_DEV_ATTR_RO(tcmu, hw_block_size);
1060
1061DEF_TB_DEV_ATTRIB_RO(tcmu, hw_max_sectors);
1062TB_DEV_ATTR_RO(tcmu, hw_max_sectors);
1063
1064DEF_TB_DEV_ATTRIB_RO(tcmu, hw_queue_depth);
1065TB_DEV_ATTR_RO(tcmu, hw_queue_depth);
1066
1067static struct configfs_attribute *tcmu_backend_dev_attrs[] = {
1068 &tcmu_dev_attrib_hw_pi_prot_type.attr,
1069 &tcmu_dev_attrib_hw_block_size.attr,
1070 &tcmu_dev_attrib_hw_max_sectors.attr,
1071 &tcmu_dev_attrib_hw_queue_depth.attr,
1072 NULL,
1073};
1074
1075static struct se_subsystem_api tcmu_template = {
1076 .name = "user", 1109 .name = "user",
1077 .inquiry_prod = "USER", 1110 .inquiry_prod = "USER",
1078 .inquiry_rev = TCMU_VERSION, 1111 .inquiry_rev = TCMU_VERSION,
@@ -1088,11 +1121,11 @@ static struct se_subsystem_api tcmu_template = {
1088 .show_configfs_dev_params = tcmu_show_configfs_dev_params, 1121 .show_configfs_dev_params = tcmu_show_configfs_dev_params,
1089 .get_device_type = sbc_get_device_type, 1122 .get_device_type = sbc_get_device_type,
1090 .get_blocks = tcmu_get_blocks, 1123 .get_blocks = tcmu_get_blocks,
1124 .tb_dev_attrib_attrs = passthrough_attrib_attrs,
1091}; 1125};
1092 1126
1093static int __init tcmu_module_init(void) 1127static int __init tcmu_module_init(void)
1094{ 1128{
1095 struct target_backend_cits *tbc = &tcmu_template.tb_cits;
1096 int ret; 1129 int ret;
1097 1130
1098 BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0); 1131 BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
@@ -1115,10 +1148,7 @@ static int __init tcmu_module_init(void)
1115 goto out_unreg_device; 1148 goto out_unreg_device;
1116 } 1149 }
1117 1150
1118 target_core_setup_sub_cits(&tcmu_template); 1151 ret = transport_backend_register(&tcmu_ops);
1119 tbc->tb_dev_attrib_cit.ct_attrs = tcmu_backend_dev_attrs;
1120
1121 ret = transport_subsystem_register(&tcmu_template);
1122 if (ret) 1152 if (ret)
1123 goto out_unreg_genl; 1153 goto out_unreg_genl;
1124 1154
@@ -1136,7 +1166,7 @@ out_free_cache:
1136 1166
1137static void __exit tcmu_module_exit(void) 1167static void __exit tcmu_module_exit(void)
1138{ 1168{
1139 transport_subsystem_release(&tcmu_template); 1169 target_backend_unregister(&tcmu_ops);
1140 genl_unregister_family(&tcmu_genl_family); 1170 genl_unregister_family(&tcmu_genl_family);
1141 root_device_unregister(tcmu_root_device); 1171 root_device_unregister(tcmu_root_device);
1142 kmem_cache_destroy(tcmu_cmd_cache); 1172 kmem_cache_destroy(tcmu_cmd_cache);