aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/target/iscsi/iscsi_target.c65
-rw-r--r--drivers/target/iscsi/iscsi_target.h2
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c8
-rw-r--r--drivers/target/iscsi/iscsi_target_stat.c25
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c39
-rw-r--r--drivers/target/target_core_configfs.c4
-rw-r--r--drivers/target/target_core_device.c58
-rw-r--r--drivers/target/target_core_fabric_configfs.c12
-rw-r--r--drivers/target/target_core_file.c114
-rw-r--r--drivers/target/target_core_iblock.c46
-rw-r--r--drivers/target/target_core_internal.h3
-rw-r--r--drivers/target/target_core_rd.c18
-rw-r--r--drivers/target/target_core_sbc.c8
-rw-r--r--drivers/target/target_core_spc.c42
-rw-r--r--drivers/target/target_core_tmr.c12
-rw-r--r--drivers/target/target_core_tpg.c10
-rw-r--r--drivers/target/target_core_transport.c19
-rw-r--r--drivers/vhost/tcm_vhost.c287
-rw-r--r--drivers/vhost/tcm_vhost.h8
-rw-r--r--include/target/target_core_backend.h5
-rw-r--r--include/target/target_core_base.h7
21 files changed, 563 insertions, 229 deletions
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 339f97f7085b..23a98e658306 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -264,16 +264,50 @@ int iscsit_deaccess_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
264 return 0; 264 return 0;
265} 265}
266 266
267static struct iscsi_np *iscsit_get_np( 267bool iscsit_check_np_match(
268 struct __kernel_sockaddr_storage *sockaddr, 268 struct __kernel_sockaddr_storage *sockaddr,
269 struct iscsi_np *np,
269 int network_transport) 270 int network_transport)
270{ 271{
271 struct sockaddr_in *sock_in, *sock_in_e; 272 struct sockaddr_in *sock_in, *sock_in_e;
272 struct sockaddr_in6 *sock_in6, *sock_in6_e; 273 struct sockaddr_in6 *sock_in6, *sock_in6_e;
273 struct iscsi_np *np; 274 bool ip_match = false;
274 int ip_match = 0;
275 u16 port; 275 u16 port;
276 276
277 if (sockaddr->ss_family == AF_INET6) {
278 sock_in6 = (struct sockaddr_in6 *)sockaddr;
279 sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr;
280
281 if (!memcmp(&sock_in6->sin6_addr.in6_u,
282 &sock_in6_e->sin6_addr.in6_u,
283 sizeof(struct in6_addr)))
284 ip_match = true;
285
286 port = ntohs(sock_in6->sin6_port);
287 } else {
288 sock_in = (struct sockaddr_in *)sockaddr;
289 sock_in_e = (struct sockaddr_in *)&np->np_sockaddr;
290
291 if (sock_in->sin_addr.s_addr == sock_in_e->sin_addr.s_addr)
292 ip_match = true;
293
294 port = ntohs(sock_in->sin_port);
295 }
296
297 if ((ip_match == true) && (np->np_port == port) &&
298 (np->np_network_transport == network_transport))
299 return true;
300
301 return false;
302}
303
304static struct iscsi_np *iscsit_get_np(
305 struct __kernel_sockaddr_storage *sockaddr,
306 int network_transport)
307{
308 struct iscsi_np *np;
309 bool match;
310
277 spin_lock_bh(&np_lock); 311 spin_lock_bh(&np_lock);
278 list_for_each_entry(np, &g_np_list, np_list) { 312 list_for_each_entry(np, &g_np_list, np_list) {
279 spin_lock(&np->np_thread_lock); 313 spin_lock(&np->np_thread_lock);
@@ -282,29 +316,8 @@ static struct iscsi_np *iscsit_get_np(
282 continue; 316 continue;
283 } 317 }
284 318
285 if (sockaddr->ss_family == AF_INET6) { 319 match = iscsit_check_np_match(sockaddr, np, network_transport);
286 sock_in6 = (struct sockaddr_in6 *)sockaddr; 320 if (match == true) {
287 sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr;
288
289 if (!memcmp(&sock_in6->sin6_addr.in6_u,
290 &sock_in6_e->sin6_addr.in6_u,
291 sizeof(struct in6_addr)))
292 ip_match = 1;
293
294 port = ntohs(sock_in6->sin6_port);
295 } else {
296 sock_in = (struct sockaddr_in *)sockaddr;
297 sock_in_e = (struct sockaddr_in *)&np->np_sockaddr;
298
299 if (sock_in->sin_addr.s_addr ==
300 sock_in_e->sin_addr.s_addr)
301 ip_match = 1;
302
303 port = ntohs(sock_in->sin_port);
304 }
305
306 if ((ip_match == 1) && (np->np_port == port) &&
307 (np->np_network_transport == network_transport)) {
308 /* 321 /*
309 * Increment the np_exports reference count now to 322 * Increment the np_exports reference count now to
310 * prevent iscsit_del_np() below from being called 323 * prevent iscsit_del_np() below from being called
diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h
index f1e4f3155bac..b1a1e6350707 100644
--- a/drivers/target/iscsi/iscsi_target.h
+++ b/drivers/target/iscsi/iscsi_target.h
@@ -8,6 +8,8 @@ extern struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *);
8extern void iscsit_del_tiqn(struct iscsi_tiqn *); 8extern void iscsit_del_tiqn(struct iscsi_tiqn *);
9extern int iscsit_access_np(struct iscsi_np *, struct iscsi_portal_group *); 9extern int iscsit_access_np(struct iscsi_np *, struct iscsi_portal_group *);
10extern int iscsit_deaccess_np(struct iscsi_np *, struct iscsi_portal_group *); 10extern int iscsit_deaccess_np(struct iscsi_np *, struct iscsi_portal_group *);
11extern bool iscsit_check_np_match(struct __kernel_sockaddr_storage *,
12 struct iscsi_np *, int);
11extern struct iscsi_np *iscsit_add_np(struct __kernel_sockaddr_storage *, 13extern struct iscsi_np *iscsit_add_np(struct __kernel_sockaddr_storage *,
12 char *, int); 14 char *, int);
13extern int iscsit_reset_np_thread(struct iscsi_np *, struct iscsi_tpg_np *, 15extern int iscsit_reset_np_thread(struct iscsi_np *, struct iscsi_tpg_np *,
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index d89164287d00..ca2be406f141 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -1095,11 +1095,11 @@ static int iscsi_check_acceptor_state(struct iscsi_param *param, char *value,
1095 SET_PSTATE_REPLY_OPTIONAL(param); 1095 SET_PSTATE_REPLY_OPTIONAL(param);
1096 } 1096 }
1097 } else if (IS_TYPE_NUMBER(param)) { 1097 } else if (IS_TYPE_NUMBER(param)) {
1098 char *tmpptr, buf[10]; 1098 char *tmpptr, buf[11];
1099 u32 acceptor_value = simple_strtoul(param->value, &tmpptr, 0); 1099 u32 acceptor_value = simple_strtoul(param->value, &tmpptr, 0);
1100 u32 proposer_value = simple_strtoul(value, &tmpptr, 0); 1100 u32 proposer_value = simple_strtoul(value, &tmpptr, 0);
1101 1101
1102 memset(buf, 0, 10); 1102 memset(buf, 0, sizeof(buf));
1103 1103
1104 if (!strcmp(param->name, MAXCONNECTIONS) || 1104 if (!strcmp(param->name, MAXCONNECTIONS) ||
1105 !strcmp(param->name, MAXBURSTLENGTH) || 1105 !strcmp(param->name, MAXBURSTLENGTH) ||
@@ -1503,8 +1503,8 @@ static int iscsi_enforce_integrity_rules(
1503 FirstBurstLength = simple_strtoul(param->value, 1503 FirstBurstLength = simple_strtoul(param->value,
1504 &tmpptr, 0); 1504 &tmpptr, 0);
1505 if (FirstBurstLength > MaxBurstLength) { 1505 if (FirstBurstLength > MaxBurstLength) {
1506 char tmpbuf[10]; 1506 char tmpbuf[11];
1507 memset(tmpbuf, 0, 10); 1507 memset(tmpbuf, 0, sizeof(tmpbuf));
1508 sprintf(tmpbuf, "%u", MaxBurstLength); 1508 sprintf(tmpbuf, "%u", MaxBurstLength);
1509 if (iscsi_update_param_value(param, tmpbuf)) 1509 if (iscsi_update_param_value(param, tmpbuf))
1510 return -1; 1510 return -1;
diff --git a/drivers/target/iscsi/iscsi_target_stat.c b/drivers/target/iscsi/iscsi_target_stat.c
index 421d6947dc64..464b4206a51e 100644
--- a/drivers/target/iscsi/iscsi_target_stat.c
+++ b/drivers/target/iscsi/iscsi_target_stat.c
@@ -410,14 +410,16 @@ static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_addr_type(
410 struct iscsi_tiqn *tiqn = container_of(igrps, 410 struct iscsi_tiqn *tiqn = container_of(igrps,
411 struct iscsi_tiqn, tiqn_stat_grps); 411 struct iscsi_tiqn, tiqn_stat_grps);
412 struct iscsi_login_stats *lstat = &tiqn->login_stats; 412 struct iscsi_login_stats *lstat = &tiqn->login_stats;
413 unsigned char buf[8]; 413 int ret;
414 414
415 spin_lock(&lstat->lock); 415 spin_lock(&lstat->lock);
416 snprintf(buf, 8, "%s", (lstat->last_intr_fail_ip_addr != NULL) ? 416 if (lstat->last_intr_fail_ip_family == AF_INET6)
417 "ipv6" : "ipv4"); 417 ret = snprintf(page, PAGE_SIZE, "ipv6\n");
418 else
419 ret = snprintf(page, PAGE_SIZE, "ipv4\n");
418 spin_unlock(&lstat->lock); 420 spin_unlock(&lstat->lock);
419 421
420 return snprintf(page, PAGE_SIZE, "%s\n", buf); 422 return ret;
421} 423}
422ISCSI_STAT_TGT_ATTR_RO(fail_intr_addr_type); 424ISCSI_STAT_TGT_ATTR_RO(fail_intr_addr_type);
423 425
@@ -427,16 +429,19 @@ static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_addr(
427 struct iscsi_tiqn *tiqn = container_of(igrps, 429 struct iscsi_tiqn *tiqn = container_of(igrps,
428 struct iscsi_tiqn, tiqn_stat_grps); 430 struct iscsi_tiqn, tiqn_stat_grps);
429 struct iscsi_login_stats *lstat = &tiqn->login_stats; 431 struct iscsi_login_stats *lstat = &tiqn->login_stats;
430 unsigned char buf[32]; 432 int ret;
431 433
432 spin_lock(&lstat->lock); 434 spin_lock(&lstat->lock);
433 if (lstat->last_intr_fail_ip_family == AF_INET6) 435 if (lstat->last_intr_fail_ip_family == AF_INET6) {
434 snprintf(buf, 32, "[%s]", lstat->last_intr_fail_ip_addr); 436 ret = snprintf(page, PAGE_SIZE, "[%s]\n",
435 else 437 lstat->last_intr_fail_ip_addr);
436 snprintf(buf, 32, "%s", lstat->last_intr_fail_ip_addr); 438 } else {
439 ret = snprintf(page, PAGE_SIZE, "%s\n",
440 lstat->last_intr_fail_ip_addr);
441 }
437 spin_unlock(&lstat->lock); 442 spin_unlock(&lstat->lock);
438 443
439 return snprintf(page, PAGE_SIZE, "%s\n", buf); 444 return ret;
440} 445}
441ISCSI_STAT_TGT_ATTR_RO(fail_intr_addr); 446ISCSI_STAT_TGT_ATTR_RO(fail_intr_addr);
442 447
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index de9ea32b6104..ee8f8c66248d 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -422,6 +422,35 @@ struct iscsi_tpg_np *iscsit_tpg_locate_child_np(
422 return NULL; 422 return NULL;
423} 423}
424 424
425static bool iscsit_tpg_check_network_portal(
426 struct iscsi_tiqn *tiqn,
427 struct __kernel_sockaddr_storage *sockaddr,
428 int network_transport)
429{
430 struct iscsi_portal_group *tpg;
431 struct iscsi_tpg_np *tpg_np;
432 struct iscsi_np *np;
433 bool match = false;
434
435 spin_lock(&tiqn->tiqn_tpg_lock);
436 list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
437
438 spin_lock(&tpg->tpg_np_lock);
439 list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, tpg_np_list) {
440 np = tpg_np->tpg_np;
441
442 match = iscsit_check_np_match(sockaddr, np,
443 network_transport);
444 if (match == true)
445 break;
446 }
447 spin_unlock(&tpg->tpg_np_lock);
448 }
449 spin_unlock(&tiqn->tiqn_tpg_lock);
450
451 return match;
452}
453
425struct iscsi_tpg_np *iscsit_tpg_add_network_portal( 454struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
426 struct iscsi_portal_group *tpg, 455 struct iscsi_portal_group *tpg,
427 struct __kernel_sockaddr_storage *sockaddr, 456 struct __kernel_sockaddr_storage *sockaddr,
@@ -432,6 +461,16 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
432 struct iscsi_np *np; 461 struct iscsi_np *np;
433 struct iscsi_tpg_np *tpg_np; 462 struct iscsi_tpg_np *tpg_np;
434 463
464 if (!tpg_np_parent) {
465 if (iscsit_tpg_check_network_portal(tpg->tpg_tiqn, sockaddr,
466 network_transport) == true) {
467 pr_err("Network Portal: %s already exists on a"
468 " different TPG on %s\n", ip_str,
469 tpg->tpg_tiqn->tiqn);
470 return ERR_PTR(-EEXIST);
471 }
472 }
473
435 tpg_np = kzalloc(sizeof(struct iscsi_tpg_np), GFP_KERNEL); 474 tpg_np = kzalloc(sizeof(struct iscsi_tpg_np), GFP_KERNEL);
436 if (!tpg_np) { 475 if (!tpg_np) {
437 pr_err("Unable to allocate memory for" 476 pr_err("Unable to allocate memory for"
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 4efb61b8d001..43b7ac6c5b1c 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -609,6 +609,9 @@ static struct target_core_dev_attrib_attribute \
609 __CONFIGFS_EATTR_RO(_name, \ 609 __CONFIGFS_EATTR_RO(_name, \
610 target_core_dev_show_attr_##_name); 610 target_core_dev_show_attr_##_name);
611 611
612DEF_DEV_ATTRIB(emulate_model_alias);
613SE_DEV_ATTR(emulate_model_alias, S_IRUGO | S_IWUSR);
614
612DEF_DEV_ATTRIB(emulate_dpo); 615DEF_DEV_ATTRIB(emulate_dpo);
613SE_DEV_ATTR(emulate_dpo, S_IRUGO | S_IWUSR); 616SE_DEV_ATTR(emulate_dpo, S_IRUGO | S_IWUSR);
614 617
@@ -681,6 +684,7 @@ SE_DEV_ATTR(max_write_same_len, S_IRUGO | S_IWUSR);
681CONFIGFS_EATTR_OPS(target_core_dev_attrib, se_dev_attrib, da_group); 684CONFIGFS_EATTR_OPS(target_core_dev_attrib, se_dev_attrib, da_group);
682 685
683static struct configfs_attribute *target_core_dev_attrib_attrs[] = { 686static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
687 &target_core_dev_attrib_emulate_model_alias.attr,
684 &target_core_dev_attrib_emulate_dpo.attr, 688 &target_core_dev_attrib_emulate_dpo.attr,
685 &target_core_dev_attrib_emulate_fua_write.attr, 689 &target_core_dev_attrib_emulate_fua_write.attr,
686 &target_core_dev_attrib_emulate_fua_read.attr, 690 &target_core_dev_attrib_emulate_fua_read.attr,
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index f2aa7543d20a..2e4d655471bc 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -713,6 +713,44 @@ int se_dev_set_max_write_same_len(
713 return 0; 713 return 0;
714} 714}
715 715
716static void dev_set_t10_wwn_model_alias(struct se_device *dev)
717{
718 const char *configname;
719
720 configname = config_item_name(&dev->dev_group.cg_item);
721 if (strlen(configname) >= 16) {
722 pr_warn("dev[%p]: Backstore name '%s' is too long for "
723 "INQUIRY_MODEL, truncating to 16 bytes\n", dev,
724 configname);
725 }
726 snprintf(&dev->t10_wwn.model[0], 16, "%s", configname);
727}
728
729int se_dev_set_emulate_model_alias(struct se_device *dev, int flag)
730{
731 if (dev->export_count) {
732 pr_err("dev[%p]: Unable to change model alias"
733 " while export_count is %d\n",
734 dev, dev->export_count);
735 return -EINVAL;
736 }
737
738 if (flag != 0 && flag != 1) {
739 pr_err("Illegal value %d\n", flag);
740 return -EINVAL;
741 }
742
743 if (flag) {
744 dev_set_t10_wwn_model_alias(dev);
745 } else {
746 strncpy(&dev->t10_wwn.model[0],
747 dev->transport->inquiry_prod, 16);
748 }
749 dev->dev_attrib.emulate_model_alias = flag;
750
751 return 0;
752}
753
716int se_dev_set_emulate_dpo(struct se_device *dev, int flag) 754int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
717{ 755{
718 if (flag != 0 && flag != 1) { 756 if (flag != 0 && flag != 1) {
@@ -772,6 +810,12 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
772 pr_err("emulate_write_cache not supported for pSCSI\n"); 810 pr_err("emulate_write_cache not supported for pSCSI\n");
773 return -EINVAL; 811 return -EINVAL;
774 } 812 }
813 if (dev->transport->get_write_cache) {
814 pr_warn("emulate_write_cache cannot be changed when underlying"
815 " HW reports WriteCacheEnabled, ignoring request\n");
816 return 0;
817 }
818
775 dev->dev_attrib.emulate_write_cache = flag; 819 dev->dev_attrib.emulate_write_cache = flag;
776 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", 820 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
777 dev, dev->dev_attrib.emulate_write_cache); 821 dev, dev->dev_attrib.emulate_write_cache);
@@ -1182,24 +1226,18 @@ static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked
1182 1226
1183struct se_lun_acl *core_dev_init_initiator_node_lun_acl( 1227struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
1184 struct se_portal_group *tpg, 1228 struct se_portal_group *tpg,
1229 struct se_node_acl *nacl,
1185 u32 mapped_lun, 1230 u32 mapped_lun,
1186 char *initiatorname,
1187 int *ret) 1231 int *ret)
1188{ 1232{
1189 struct se_lun_acl *lacl; 1233 struct se_lun_acl *lacl;
1190 struct se_node_acl *nacl;
1191 1234
1192 if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) { 1235 if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
1193 pr_err("%s InitiatorName exceeds maximum size.\n", 1236 pr_err("%s InitiatorName exceeds maximum size.\n",
1194 tpg->se_tpg_tfo->get_fabric_name()); 1237 tpg->se_tpg_tfo->get_fabric_name());
1195 *ret = -EOVERFLOW; 1238 *ret = -EOVERFLOW;
1196 return NULL; 1239 return NULL;
1197 } 1240 }
1198 nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
1199 if (!nacl) {
1200 *ret = -EINVAL;
1201 return NULL;
1202 }
1203 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL); 1241 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
1204 if (!lacl) { 1242 if (!lacl) {
1205 pr_err("Unable to allocate memory for struct se_lun_acl.\n"); 1243 pr_err("Unable to allocate memory for struct se_lun_acl.\n");
@@ -1210,7 +1248,8 @@ struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
1210 INIT_LIST_HEAD(&lacl->lacl_list); 1248 INIT_LIST_HEAD(&lacl->lacl_list);
1211 lacl->mapped_lun = mapped_lun; 1249 lacl->mapped_lun = mapped_lun;
1212 lacl->se_lun_nacl = nacl; 1250 lacl->se_lun_nacl = nacl;
1213 snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); 1251 snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s",
1252 nacl->initiatorname);
1214 1253
1215 return lacl; 1254 return lacl;
1216} 1255}
@@ -1390,6 +1429,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
1390 dev->t10_alua.t10_dev = dev; 1429 dev->t10_alua.t10_dev = dev;
1391 1430
1392 dev->dev_attrib.da_dev = dev; 1431 dev->dev_attrib.da_dev = dev;
1432 dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS;
1393 dev->dev_attrib.emulate_dpo = DA_EMULATE_DPO; 1433 dev->dev_attrib.emulate_dpo = DA_EMULATE_DPO;
1394 dev->dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE; 1434 dev->dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE;
1395 dev->dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ; 1435 dev->dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ;
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index c57bbbc7a7d1..04c775cb3e65 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -354,9 +354,17 @@ static struct config_group *target_fabric_make_mappedlun(
354 ret = -EINVAL; 354 ret = -EINVAL;
355 goto out; 355 goto out;
356 } 356 }
357 if (mapped_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
358 pr_err("Mapped LUN: %lu exceeds TRANSPORT_MAX_LUNS_PER_TPG"
359 "-1: %u for Target Portal Group: %u\n", mapped_lun,
360 TRANSPORT_MAX_LUNS_PER_TPG-1,
361 se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
362 ret = -EINVAL;
363 goto out;
364 }
357 365
358 lacl = core_dev_init_initiator_node_lun_acl(se_tpg, mapped_lun, 366 lacl = core_dev_init_initiator_node_lun_acl(se_tpg, se_nacl,
359 config_item_name(acl_ci), &ret); 367 mapped_lun, &ret);
360 if (!lacl) { 368 if (!lacl) {
361 ret = -EINVAL; 369 ret = -EINVAL;
362 goto out; 370 goto out;
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index b9c88497e8f0..ca36a38eb274 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -190,6 +190,11 @@ static int fd_configure_device(struct se_device *dev)
190 190
191 fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++; 191 fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
192 fd_dev->fd_queue_depth = dev->queue_depth; 192 fd_dev->fd_queue_depth = dev->queue_depth;
193 /*
194 * Limit WRITE_SAME w/ UNMAP=0 emulation to 8k Number of LBAs (NoLB)
195 * based upon struct iovec limit for vfs_writev()
196 */
197 dev->dev_attrib.max_write_same_len = 0x1000;
193 198
194 pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s," 199 pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
195 " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id, 200 " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
@@ -328,6 +333,114 @@ fd_execute_sync_cache(struct se_cmd *cmd)
328 return 0; 333 return 0;
329} 334}
330 335
336static unsigned char *
337fd_setup_write_same_buf(struct se_cmd *cmd, struct scatterlist *sg,
338 unsigned int len)
339{
340 struct se_device *se_dev = cmd->se_dev;
341 unsigned int block_size = se_dev->dev_attrib.block_size;
342 unsigned int i = 0, end;
343 unsigned char *buf, *p, *kmap_buf;
344
345 buf = kzalloc(min_t(unsigned int, len, PAGE_SIZE), GFP_KERNEL);
346 if (!buf) {
347 pr_err("Unable to allocate fd_execute_write_same buf\n");
348 return NULL;
349 }
350
351 kmap_buf = kmap(sg_page(sg)) + sg->offset;
352 if (!kmap_buf) {
353 pr_err("kmap() failed in fd_setup_write_same\n");
354 kfree(buf);
355 return NULL;
356 }
357 /*
358 * Fill local *buf to contain multiple WRITE_SAME blocks up to
359 * min(len, PAGE_SIZE)
360 */
361 p = buf;
362 end = min_t(unsigned int, len, PAGE_SIZE);
363
364 while (i < end) {
365 memcpy(p, kmap_buf, block_size);
366
367 i += block_size;
368 p += block_size;
369 }
370 kunmap(sg_page(sg));
371
372 return buf;
373}
374
375static sense_reason_t
376fd_execute_write_same(struct se_cmd *cmd)
377{
378 struct se_device *se_dev = cmd->se_dev;
379 struct fd_dev *fd_dev = FD_DEV(se_dev);
380 struct file *f = fd_dev->fd_file;
381 struct scatterlist *sg;
382 struct iovec *iov;
383 mm_segment_t old_fs;
384 sector_t nolb = sbc_get_write_same_sectors(cmd);
385 loff_t pos = cmd->t_task_lba * se_dev->dev_attrib.block_size;
386 unsigned int len, len_tmp, iov_num;
387 int i, rc;
388 unsigned char *buf;
389
390 if (!nolb) {
391 target_complete_cmd(cmd, SAM_STAT_GOOD);
392 return 0;
393 }
394 sg = &cmd->t_data_sg[0];
395
396 if (cmd->t_data_nents > 1 ||
397 sg->length != cmd->se_dev->dev_attrib.block_size) {
398 pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
399 " block_size: %u\n", cmd->t_data_nents, sg->length,
400 cmd->se_dev->dev_attrib.block_size);
401 return TCM_INVALID_CDB_FIELD;
402 }
403
404 len = len_tmp = nolb * se_dev->dev_attrib.block_size;
405 iov_num = DIV_ROUND_UP(len, PAGE_SIZE);
406
407 buf = fd_setup_write_same_buf(cmd, sg, len);
408 if (!buf)
409 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
410
411 iov = vzalloc(sizeof(struct iovec) * iov_num);
412 if (!iov) {
413 pr_err("Unable to allocate fd_execute_write_same iovecs\n");
414 kfree(buf);
415 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
416 }
417 /*
418 * Map the single fabric received scatterlist block now populated
419 * in *buf into each iovec for I/O submission.
420 */
421 for (i = 0; i < iov_num; i++) {
422 iov[i].iov_base = buf;
423 iov[i].iov_len = min_t(unsigned int, len_tmp, PAGE_SIZE);
424 len_tmp -= iov[i].iov_len;
425 }
426
427 old_fs = get_fs();
428 set_fs(get_ds());
429 rc = vfs_writev(f, &iov[0], iov_num, &pos);
430 set_fs(old_fs);
431
432 vfree(iov);
433 kfree(buf);
434
435 if (rc < 0 || rc != len) {
436 pr_err("vfs_writev() returned %d for write same\n", rc);
437 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
438 }
439
440 target_complete_cmd(cmd, SAM_STAT_GOOD);
441 return 0;
442}
443
331static sense_reason_t 444static sense_reason_t
332fd_execute_rw(struct se_cmd *cmd) 445fd_execute_rw(struct se_cmd *cmd)
333{ 446{
@@ -486,6 +599,7 @@ static sector_t fd_get_blocks(struct se_device *dev)
486static struct sbc_ops fd_sbc_ops = { 599static struct sbc_ops fd_sbc_ops = {
487 .execute_rw = fd_execute_rw, 600 .execute_rw = fd_execute_rw,
488 .execute_sync_cache = fd_execute_sync_cache, 601 .execute_sync_cache = fd_execute_sync_cache,
602 .execute_write_same = fd_execute_write_same,
489}; 603};
490 604
491static sense_reason_t 605static sense_reason_t
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index b526d23dcd4f..c73f4a950e23 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -154,6 +154,7 @@ static int iblock_configure_device(struct se_device *dev)
154 154
155 if (blk_queue_nonrot(q)) 155 if (blk_queue_nonrot(q))
156 dev->dev_attrib.is_nonrot = 1; 156 dev->dev_attrib.is_nonrot = 1;
157
157 return 0; 158 return 0;
158 159
159out_free_bioset: 160out_free_bioset:
@@ -390,10 +391,19 @@ iblock_execute_unmap(struct se_cmd *cmd)
390 sense_reason_t ret = 0; 391 sense_reason_t ret = 0;
391 int dl, bd_dl, err; 392 int dl, bd_dl, err;
392 393
394 /* We never set ANC_SUP */
395 if (cmd->t_task_cdb[1])
396 return TCM_INVALID_CDB_FIELD;
397
398 if (cmd->data_length == 0) {
399 target_complete_cmd(cmd, SAM_STAT_GOOD);
400 return 0;
401 }
402
393 if (cmd->data_length < 8) { 403 if (cmd->data_length < 8) {
394 pr_warn("UNMAP parameter list length %u too small\n", 404 pr_warn("UNMAP parameter list length %u too small\n",
395 cmd->data_length); 405 cmd->data_length);
396 return TCM_INVALID_PARAMETER_LIST; 406 return TCM_PARAMETER_LIST_LENGTH_ERROR;
397 } 407 }
398 408
399 buf = transport_kmap_data_sg(cmd); 409 buf = transport_kmap_data_sg(cmd);
@@ -463,7 +473,7 @@ iblock_execute_write_same_unmap(struct se_cmd *cmd)
463 int rc; 473 int rc;
464 474
465 rc = blkdev_issue_discard(ib_dev->ibd_bd, cmd->t_task_lba, 475 rc = blkdev_issue_discard(ib_dev->ibd_bd, cmd->t_task_lba,
466 spc_get_write_same_sectors(cmd), GFP_KERNEL, 0); 476 sbc_get_write_same_sectors(cmd), GFP_KERNEL, 0);
467 if (rc < 0) { 477 if (rc < 0) {
468 pr_warn("blkdev_issue_discard() failed: %d\n", rc); 478 pr_warn("blkdev_issue_discard() failed: %d\n", rc);
469 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 479 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -481,7 +491,7 @@ iblock_execute_write_same(struct se_cmd *cmd)
481 struct bio *bio; 491 struct bio *bio;
482 struct bio_list list; 492 struct bio_list list;
483 sector_t block_lba = cmd->t_task_lba; 493 sector_t block_lba = cmd->t_task_lba;
484 sector_t sectors = spc_get_write_same_sectors(cmd); 494 sector_t sectors = sbc_get_write_same_sectors(cmd);
485 495
486 sg = &cmd->t_data_sg[0]; 496 sg = &cmd->t_data_sg[0];
487 497
@@ -654,20 +664,24 @@ iblock_execute_rw(struct se_cmd *cmd)
654 u32 sg_num = sgl_nents; 664 u32 sg_num = sgl_nents;
655 sector_t block_lba; 665 sector_t block_lba;
656 unsigned bio_cnt; 666 unsigned bio_cnt;
657 int rw; 667 int rw = 0;
658 int i; 668 int i;
659 669
660 if (data_direction == DMA_TO_DEVICE) { 670 if (data_direction == DMA_TO_DEVICE) {
671 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
672 struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd);
661 /* 673 /*
662 * Force data to disk if we pretend to not have a volatile 674 * Force writethrough using WRITE_FUA if a volatile write cache
663 * write cache, or the initiator set the Force Unit Access bit. 675 * is not enabled, or if initiator set the Force Unit Access bit.
664 */ 676 */
665 if (dev->dev_attrib.emulate_write_cache == 0 || 677 if (q->flush_flags & REQ_FUA) {
666 (dev->dev_attrib.emulate_fua_write > 0 && 678 if (cmd->se_cmd_flags & SCF_FUA)
667 (cmd->se_cmd_flags & SCF_FUA))) 679 rw = WRITE_FUA;
668 rw = WRITE_FUA; 680 else if (!(q->flush_flags & REQ_FLUSH))
669 else 681 rw = WRITE_FUA;
682 } else {
670 rw = WRITE; 683 rw = WRITE;
684 }
671 } else { 685 } else {
672 rw = READ; 686 rw = READ;
673 } 687 }
@@ -774,6 +788,15 @@ iblock_parse_cdb(struct se_cmd *cmd)
774 return sbc_parse_cdb(cmd, &iblock_sbc_ops); 788 return sbc_parse_cdb(cmd, &iblock_sbc_ops);
775} 789}
776 790
791bool iblock_get_write_cache(struct se_device *dev)
792{
793 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
794 struct block_device *bd = ib_dev->ibd_bd;
795 struct request_queue *q = bdev_get_queue(bd);
796
797 return q->flush_flags & REQ_FLUSH;
798}
799
777static struct se_subsystem_api iblock_template = { 800static struct se_subsystem_api iblock_template = {
778 .name = "iblock", 801 .name = "iblock",
779 .inquiry_prod = "IBLOCK", 802 .inquiry_prod = "IBLOCK",
@@ -790,6 +813,7 @@ static struct se_subsystem_api iblock_template = {
790 .show_configfs_dev_params = iblock_show_configfs_dev_params, 813 .show_configfs_dev_params = iblock_show_configfs_dev_params,
791 .get_device_type = sbc_get_device_type, 814 .get_device_type = sbc_get_device_type,
792 .get_blocks = iblock_get_blocks, 815 .get_blocks = iblock_get_blocks,
816 .get_write_cache = iblock_get_write_cache,
793}; 817};
794 818
795static int __init iblock_module_init(void) 819static int __init iblock_module_init(void)
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 93e9c1f580b0..853bab60e362 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -25,6 +25,7 @@ int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
25int se_dev_set_unmap_granularity(struct se_device *, u32); 25int se_dev_set_unmap_granularity(struct se_device *, u32);
26int se_dev_set_unmap_granularity_alignment(struct se_device *, u32); 26int se_dev_set_unmap_granularity_alignment(struct se_device *, u32);
27int se_dev_set_max_write_same_len(struct se_device *, u32); 27int se_dev_set_max_write_same_len(struct se_device *, u32);
28int se_dev_set_emulate_model_alias(struct se_device *, int);
28int se_dev_set_emulate_dpo(struct se_device *, int); 29int se_dev_set_emulate_dpo(struct se_device *, int);
29int se_dev_set_emulate_fua_write(struct se_device *, int); 30int se_dev_set_emulate_fua_write(struct se_device *, int);
30int se_dev_set_emulate_fua_read(struct se_device *, int); 31int se_dev_set_emulate_fua_read(struct se_device *, int);
@@ -45,7 +46,7 @@ struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_device *, u3
45int core_dev_del_lun(struct se_portal_group *, u32); 46int core_dev_del_lun(struct se_portal_group *, u32);
46struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32); 47struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32);
47struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *, 48struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *,
48 u32, char *, int *); 49 struct se_node_acl *, u32, int *);
49int core_dev_add_initiator_node_lun_acl(struct se_portal_group *, 50int core_dev_add_initiator_node_lun_acl(struct se_portal_group *,
50 struct se_lun_acl *, u32, u32); 51 struct se_lun_acl *, u32, u32);
51int core_dev_del_initiator_node_lun_acl(struct se_portal_group *, 52int core_dev_del_initiator_node_lun_acl(struct se_portal_group *,
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 0457de362e68..e0b3c379aa14 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -256,10 +256,12 @@ static void rd_free_device(struct se_device *dev)
256 256
257static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page) 257static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
258{ 258{
259 u32 i;
260 struct rd_dev_sg_table *sg_table; 259 struct rd_dev_sg_table *sg_table;
260 u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
261 sizeof(struct scatterlist));
261 262
262 for (i = 0; i < rd_dev->sg_table_count; i++) { 263 i = page / sg_per_table;
264 if (i < rd_dev->sg_table_count) {
263 sg_table = &rd_dev->sg_table_array[i]; 265 sg_table = &rd_dev->sg_table_array[i];
264 if ((sg_table->page_start_offset <= page) && 266 if ((sg_table->page_start_offset <= page) &&
265 (sg_table->page_end_offset >= page)) 267 (sg_table->page_end_offset >= page))
@@ -314,7 +316,19 @@ rd_execute_rw(struct se_cmd *cmd)
314 void *rd_addr; 316 void *rd_addr;
315 317
316 sg_miter_next(&m); 318 sg_miter_next(&m);
319 if (!(u32)m.length) {
320 pr_debug("RD[%u]: invalid sgl %p len %zu\n",
321 dev->rd_dev_id, m.addr, m.length);
322 sg_miter_stop(&m);
323 return TCM_INCORRECT_AMOUNT_OF_DATA;
324 }
317 len = min((u32)m.length, src_len); 325 len = min((u32)m.length, src_len);
326 if (len > rd_size) {
327 pr_debug("RD[%u]: size underrun page %d offset %d "
328 "size %d\n", dev->rd_dev_id,
329 rd_page, rd_offset, rd_size);
330 len = rd_size;
331 }
318 m.consumed = len; 332 m.consumed = len;
319 333
320 rd_addr = sg_virt(rd_sg) + rd_offset; 334 rd_addr = sg_virt(rd_sg) + rd_offset;
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index a664c664a31a..290230de2c53 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -105,7 +105,7 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)
105 return 0; 105 return 0;
106} 106}
107 107
108sector_t spc_get_write_same_sectors(struct se_cmd *cmd) 108sector_t sbc_get_write_same_sectors(struct se_cmd *cmd)
109{ 109{
110 u32 num_blocks; 110 u32 num_blocks;
111 111
@@ -126,7 +126,7 @@ sector_t spc_get_write_same_sectors(struct se_cmd *cmd)
126 return cmd->se_dev->transport->get_blocks(cmd->se_dev) - 126 return cmd->se_dev->transport->get_blocks(cmd->se_dev) -
127 cmd->t_task_lba + 1; 127 cmd->t_task_lba + 1;
128} 128}
129EXPORT_SYMBOL(spc_get_write_same_sectors); 129EXPORT_SYMBOL(sbc_get_write_same_sectors);
130 130
131static sense_reason_t 131static sense_reason_t
132sbc_emulate_noop(struct se_cmd *cmd) 132sbc_emulate_noop(struct se_cmd *cmd)
@@ -233,7 +233,7 @@ static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
233static sense_reason_t 233static sense_reason_t
234sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops) 234sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
235{ 235{
236 unsigned int sectors = spc_get_write_same_sectors(cmd); 236 unsigned int sectors = sbc_get_write_same_sectors(cmd);
237 237
238 if ((flags[0] & 0x04) || (flags[0] & 0x02)) { 238 if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
239 pr_err("WRITE_SAME PBDATA and LBDATA" 239 pr_err("WRITE_SAME PBDATA and LBDATA"
@@ -486,7 +486,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
486 */ 486 */
487 if (cmd->t_task_lba || sectors) { 487 if (cmd->t_task_lba || sectors) {
488 if (sbc_check_valid_sectors(cmd) < 0) 488 if (sbc_check_valid_sectors(cmd) < 0)
489 return TCM_INVALID_CDB_FIELD; 489 return TCM_ADDRESS_OUT_OF_RANGE;
490 } 490 }
491 cmd->execute_cmd = ops->execute_sync_cache; 491 cmd->execute_cmd = ops->execute_sync_cache;
492 break; 492 break;
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 2d88f087d961..4cb667d720a7 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -66,8 +66,8 @@ static void spc_fill_alua_data(struct se_port *port, unsigned char *buf)
66 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 66 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
67} 67}
68 68
69static sense_reason_t 69sense_reason_t
70spc_emulate_inquiry_std(struct se_cmd *cmd, char *buf) 70spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
71{ 71{
72 struct se_lun *lun = cmd->se_lun; 72 struct se_lun *lun = cmd->se_lun;
73 struct se_device *dev = cmd->se_dev; 73 struct se_device *dev = cmd->se_dev;
@@ -104,6 +104,7 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, char *buf)
104 104
105 return 0; 105 return 0;
106} 106}
107EXPORT_SYMBOL(spc_emulate_inquiry_std);
107 108
108/* unit serial number */ 109/* unit serial number */
109static sense_reason_t 110static sense_reason_t
@@ -160,7 +161,7 @@ static void spc_parse_naa_6h_vendor_specific(struct se_device *dev,
160 * Device identification VPD, for a complete list of 161 * Device identification VPD, for a complete list of
161 * DESIGNATOR TYPEs see spc4r17 Table 459. 162 * DESIGNATOR TYPEs see spc4r17 Table 459.
162 */ 163 */
163static sense_reason_t 164sense_reason_t
164spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) 165spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
165{ 166{
166 struct se_device *dev = cmd->se_dev; 167 struct se_device *dev = cmd->se_dev;
@@ -404,17 +405,33 @@ check_scsi_name:
404 buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */ 405 buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */
405 return 0; 406 return 0;
406} 407}
408EXPORT_SYMBOL(spc_emulate_evpd_83);
409
410static bool
411spc_check_dev_wce(struct se_device *dev)
412{
413 bool wce = false;
414
415 if (dev->transport->get_write_cache)
416 wce = dev->transport->get_write_cache(dev);
417 else if (dev->dev_attrib.emulate_write_cache > 0)
418 wce = true;
419
420 return wce;
421}
407 422
408/* Extended INQUIRY Data VPD Page */ 423/* Extended INQUIRY Data VPD Page */
409static sense_reason_t 424static sense_reason_t
410spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) 425spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
411{ 426{
427 struct se_device *dev = cmd->se_dev;
428
412 buf[3] = 0x3c; 429 buf[3] = 0x3c;
413 /* Set HEADSUP, ORDSUP, SIMPSUP */ 430 /* Set HEADSUP, ORDSUP, SIMPSUP */
414 buf[5] = 0x07; 431 buf[5] = 0x07;
415 432
416 /* If WriteCache emulation is enabled, set V_SUP */ 433 /* If WriteCache emulation is enabled, set V_SUP */
417 if (cmd->se_dev->dev_attrib.emulate_write_cache > 0) 434 if (spc_check_dev_wce(dev))
418 buf[6] = 0x01; 435 buf[6] = 0x01;
419 return 0; 436 return 0;
420} 437}
@@ -764,7 +781,7 @@ static int spc_modesense_caching(struct se_device *dev, u8 pc, u8 *p)
764 if (pc == 1) 781 if (pc == 1)
765 goto out; 782 goto out;
766 783
767 if (dev->dev_attrib.emulate_write_cache > 0) 784 if (spc_check_dev_wce(dev))
768 p[2] = 0x04; /* Write Cache Enable */ 785 p[2] = 0x04; /* Write Cache Enable */
769 p[12] = 0x20; /* Disabled Read Ahead */ 786 p[12] = 0x20; /* Disabled Read Ahead */
770 787
@@ -876,7 +893,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
876 (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) 893 (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
877 spc_modesense_write_protect(&buf[length], type); 894 spc_modesense_write_protect(&buf[length], type);
878 895
879 if ((dev->dev_attrib.emulate_write_cache > 0) && 896 if ((spc_check_dev_wce(dev)) &&
880 (dev->dev_attrib.emulate_fua_write > 0)) 897 (dev->dev_attrib.emulate_fua_write > 0))
881 spc_modesense_dpofua(&buf[length], type); 898 spc_modesense_dpofua(&buf[length], type);
882 899
@@ -983,6 +1000,14 @@ static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd)
983 int ret = 0; 1000 int ret = 0;
984 int i; 1001 int i;
985 1002
1003 if (!cmd->data_length) {
1004 target_complete_cmd(cmd, GOOD);
1005 return 0;
1006 }
1007
1008 if (cmd->data_length < off + 2)
1009 return TCM_PARAMETER_LIST_LENGTH_ERROR;
1010
986 buf = transport_kmap_data_sg(cmd); 1011 buf = transport_kmap_data_sg(cmd);
987 if (!buf) 1012 if (!buf)
988 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1013 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -1007,6 +1032,11 @@ static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd)
1007 goto out; 1032 goto out;
1008 1033
1009check_contents: 1034check_contents:
1035 if (cmd->data_length < off + length) {
1036 ret = TCM_PARAMETER_LIST_LENGTH_ERROR;
1037 goto out;
1038 }
1039
1010 if (memcmp(buf + off, tbuf, length)) 1040 if (memcmp(buf + off, tbuf, length))
1011 ret = TCM_INVALID_PARAMETER_LIST; 1041 ret = TCM_INVALID_PARAMETER_LIST;
1012 1042
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index c6e0293ffdb0..d0b4dd95b91e 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -331,18 +331,6 @@ static void core_tmr_drain_state_list(
331 331
332 fe_count = atomic_read(&cmd->t_fe_count); 332 fe_count = atomic_read(&cmd->t_fe_count);
333 333
334 if (!(cmd->transport_state & CMD_T_ACTIVE)) {
335 pr_debug("LUN_RESET: got CMD_T_ACTIVE for"
336 " cdb: %p, t_fe_count: %d dev: %p\n", cmd,
337 fe_count, dev);
338 cmd->transport_state |= CMD_T_ABORTED;
339 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
340
341 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
342 continue;
343 }
344 pr_debug("LUN_RESET: Got !CMD_T_ACTIVE for cdb: %p,"
345 " t_fe_count: %d dev: %p\n", cmd, fe_count, dev);
346 cmd->transport_state |= CMD_T_ABORTED; 334 cmd->transport_state |= CMD_T_ABORTED;
347 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 335 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
348 336
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 5192ac0337f7..9169d6a5d7e4 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -111,16 +111,10 @@ struct se_node_acl *core_tpg_get_initiator_node_acl(
111 struct se_node_acl *acl; 111 struct se_node_acl *acl;
112 112
113 spin_lock_irq(&tpg->acl_node_lock); 113 spin_lock_irq(&tpg->acl_node_lock);
114 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { 114 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
115 if (!strcmp(acl->initiatorname, initiatorname) &&
116 !acl->dynamic_node_acl) {
117 spin_unlock_irq(&tpg->acl_node_lock);
118 return acl;
119 }
120 }
121 spin_unlock_irq(&tpg->acl_node_lock); 115 spin_unlock_irq(&tpg->acl_node_lock);
122 116
123 return NULL; 117 return acl;
124} 118}
125 119
126/* core_tpg_add_node_to_devs(): 120/* core_tpg_add_node_to_devs():
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index bd587b70661a..2030b608136d 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -907,15 +907,18 @@ int transport_dump_vpd_ident(
907 907
908 switch (vpd->device_identifier_code_set) { 908 switch (vpd->device_identifier_code_set) {
909 case 0x01: /* Binary */ 909 case 0x01: /* Binary */
910 sprintf(buf, "T10 VPD Binary Device Identifier: %s\n", 910 snprintf(buf, sizeof(buf),
911 "T10 VPD Binary Device Identifier: %s\n",
911 &vpd->device_identifier[0]); 912 &vpd->device_identifier[0]);
912 break; 913 break;
913 case 0x02: /* ASCII */ 914 case 0x02: /* ASCII */
914 sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n", 915 snprintf(buf, sizeof(buf),
916 "T10 VPD ASCII Device Identifier: %s\n",
915 &vpd->device_identifier[0]); 917 &vpd->device_identifier[0]);
916 break; 918 break;
917 case 0x03: /* UTF-8 */ 919 case 0x03: /* UTF-8 */
918 sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n", 920 snprintf(buf, sizeof(buf),
921 "T10 VPD UTF-8 Device Identifier: %s\n",
919 &vpd->device_identifier[0]); 922 &vpd->device_identifier[0]);
920 break; 923 break;
921 default: 924 default:
@@ -1514,6 +1517,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
1514 case TCM_UNSUPPORTED_SCSI_OPCODE: 1517 case TCM_UNSUPPORTED_SCSI_OPCODE:
1515 case TCM_INVALID_CDB_FIELD: 1518 case TCM_INVALID_CDB_FIELD:
1516 case TCM_INVALID_PARAMETER_LIST: 1519 case TCM_INVALID_PARAMETER_LIST:
1520 case TCM_PARAMETER_LIST_LENGTH_ERROR:
1517 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: 1521 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
1518 case TCM_UNKNOWN_MODE_PAGE: 1522 case TCM_UNKNOWN_MODE_PAGE:
1519 case TCM_WRITE_PROTECTED: 1523 case TCM_WRITE_PROTECTED:
@@ -2674,6 +2678,15 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd,
2674 /* INVALID FIELD IN PARAMETER LIST */ 2678 /* INVALID FIELD IN PARAMETER LIST */
2675 buffer[SPC_ASC_KEY_OFFSET] = 0x26; 2679 buffer[SPC_ASC_KEY_OFFSET] = 0x26;
2676 break; 2680 break;
2681 case TCM_PARAMETER_LIST_LENGTH_ERROR:
2682 /* CURRENT ERROR */
2683 buffer[0] = 0x70;
2684 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2685 /* ILLEGAL REQUEST */
2686 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
2687 /* PARAMETER LIST LENGTH ERROR */
2688 buffer[SPC_ASC_KEY_OFFSET] = 0x1a;
2689 break;
2677 case TCM_UNEXPECTED_UNSOLICITED_DATA: 2690 case TCM_UNEXPECTED_UNSOLICITED_DATA:
2678 /* CURRENT ERROR */ 2691 /* CURRENT ERROR */
2679 buffer[0] = 0x70; 2692 buffer[0] = 0x70;
diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
index 22321cf84fbe..9951297b2427 100644
--- a/drivers/vhost/tcm_vhost.c
+++ b/drivers/vhost/tcm_vhost.c
@@ -47,6 +47,8 @@
47#include <linux/vhost.h> 47#include <linux/vhost.h>
48#include <linux/virtio_net.h> /* TODO vhost.h currently depends on this */ 48#include <linux/virtio_net.h> /* TODO vhost.h currently depends on this */
49#include <linux/virtio_scsi.h> 49#include <linux/virtio_scsi.h>
50#include <linux/llist.h>
51#include <linux/bitmap.h>
50 52
51#include "vhost.c" 53#include "vhost.c"
52#include "vhost.h" 54#include "vhost.h"
@@ -58,14 +60,20 @@ enum {
58 VHOST_SCSI_VQ_IO = 2, 60 VHOST_SCSI_VQ_IO = 2,
59}; 61};
60 62
63#define VHOST_SCSI_MAX_TARGET 256
64#define VHOST_SCSI_MAX_VQ 128
65
61struct vhost_scsi { 66struct vhost_scsi {
62 struct tcm_vhost_tpg *vs_tpg; /* Protected by vhost_scsi->dev.mutex */ 67 /* Protected by vhost_scsi->dev.mutex */
68 struct tcm_vhost_tpg *vs_tpg[VHOST_SCSI_MAX_TARGET];
69 char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
70 bool vs_endpoint;
71
63 struct vhost_dev dev; 72 struct vhost_dev dev;
64 struct vhost_virtqueue vqs[3]; 73 struct vhost_virtqueue vqs[VHOST_SCSI_MAX_VQ];
65 74
66 struct vhost_work vs_completion_work; /* cmd completion work item */ 75 struct vhost_work vs_completion_work; /* cmd completion work item */
67 struct list_head vs_completion_list; /* cmd completion queue */ 76 struct llist_head vs_completion_list; /* cmd completion queue */
68 spinlock_t vs_completion_lock; /* protects s_completion_list */
69}; 77};
70 78
71/* Local pointer to allocated TCM configfs fabric module */ 79/* Local pointer to allocated TCM configfs fabric module */
@@ -77,6 +85,12 @@ static struct workqueue_struct *tcm_vhost_workqueue;
77static DEFINE_MUTEX(tcm_vhost_mutex); 85static DEFINE_MUTEX(tcm_vhost_mutex);
78static LIST_HEAD(tcm_vhost_list); 86static LIST_HEAD(tcm_vhost_list);
79 87
88static int iov_num_pages(struct iovec *iov)
89{
90 return (PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) -
91 ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT;
92}
93
80static int tcm_vhost_check_true(struct se_portal_group *se_tpg) 94static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
81{ 95{
82 return 1; 96 return 1;
@@ -301,9 +315,7 @@ static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *tv_cmd)
301{ 315{
302 struct vhost_scsi *vs = tv_cmd->tvc_vhost; 316 struct vhost_scsi *vs = tv_cmd->tvc_vhost;
303 317
304 spin_lock_bh(&vs->vs_completion_lock); 318 llist_add(&tv_cmd->tvc_completion_list, &vs->vs_completion_list);
305 list_add_tail(&tv_cmd->tvc_completion_list, &vs->vs_completion_list);
306 spin_unlock_bh(&vs->vs_completion_lock);
307 319
308 vhost_work_queue(&vs->dev, &vs->vs_completion_work); 320 vhost_work_queue(&vs->dev, &vs->vs_completion_work);
309} 321}
@@ -347,27 +359,6 @@ static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
347 kfree(tv_cmd); 359 kfree(tv_cmd);
348} 360}
349 361
350/* Dequeue a command from the completion list */
351static struct tcm_vhost_cmd *vhost_scsi_get_cmd_from_completion(
352 struct vhost_scsi *vs)
353{
354 struct tcm_vhost_cmd *tv_cmd = NULL;
355
356 spin_lock_bh(&vs->vs_completion_lock);
357 if (list_empty(&vs->vs_completion_list)) {
358 spin_unlock_bh(&vs->vs_completion_lock);
359 return NULL;
360 }
361
362 list_for_each_entry(tv_cmd, &vs->vs_completion_list,
363 tvc_completion_list) {
364 list_del(&tv_cmd->tvc_completion_list);
365 break;
366 }
367 spin_unlock_bh(&vs->vs_completion_lock);
368 return tv_cmd;
369}
370
371/* Fill in status and signal that we are done processing this command 362/* Fill in status and signal that we are done processing this command
372 * 363 *
373 * This is scheduled in the vhost work queue so we are called with the owner 364 * This is scheduled in the vhost work queue so we are called with the owner
@@ -377,12 +368,20 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
377{ 368{
378 struct vhost_scsi *vs = container_of(work, struct vhost_scsi, 369 struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
379 vs_completion_work); 370 vs_completion_work);
371 DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
372 struct virtio_scsi_cmd_resp v_rsp;
380 struct tcm_vhost_cmd *tv_cmd; 373 struct tcm_vhost_cmd *tv_cmd;
381 374 struct llist_node *llnode;
382 while ((tv_cmd = vhost_scsi_get_cmd_from_completion(vs))) { 375 struct se_cmd *se_cmd;
383 struct virtio_scsi_cmd_resp v_rsp; 376 int ret, vq;
384 struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd; 377
385 int ret; 378 bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
379 llnode = llist_del_all(&vs->vs_completion_list);
380 while (llnode) {
381 tv_cmd = llist_entry(llnode, struct tcm_vhost_cmd,
382 tvc_completion_list);
383 llnode = llist_next(llnode);
384 se_cmd = &tv_cmd->tvc_se_cmd;
386 385
387 pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__, 386 pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
388 tv_cmd, se_cmd->residual_count, se_cmd->scsi_status); 387 tv_cmd, se_cmd->residual_count, se_cmd->scsi_status);
@@ -395,15 +394,20 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
395 memcpy(v_rsp.sense, tv_cmd->tvc_sense_buf, 394 memcpy(v_rsp.sense, tv_cmd->tvc_sense_buf,
396 v_rsp.sense_len); 395 v_rsp.sense_len);
397 ret = copy_to_user(tv_cmd->tvc_resp, &v_rsp, sizeof(v_rsp)); 396 ret = copy_to_user(tv_cmd->tvc_resp, &v_rsp, sizeof(v_rsp));
398 if (likely(ret == 0)) 397 if (likely(ret == 0)) {
399 vhost_add_used(&vs->vqs[2], tv_cmd->tvc_vq_desc, 0); 398 vhost_add_used(tv_cmd->tvc_vq, tv_cmd->tvc_vq_desc, 0);
400 else 399 vq = tv_cmd->tvc_vq - vs->vqs;
400 __set_bit(vq, signal);
401 } else
401 pr_err("Faulted on virtio_scsi_cmd_resp\n"); 402 pr_err("Faulted on virtio_scsi_cmd_resp\n");
402 403
403 vhost_scsi_free_cmd(tv_cmd); 404 vhost_scsi_free_cmd(tv_cmd);
404 } 405 }
405 406
406 vhost_signal(&vs->dev, &vs->vqs[2]); 407 vq = -1;
408 while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
409 < VHOST_SCSI_MAX_VQ)
410 vhost_signal(&vs->dev, &vs->vqs[vq]);
407} 411}
408 412
409static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd( 413static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(
@@ -426,7 +430,6 @@ static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(
426 pr_err("Unable to allocate struct tcm_vhost_cmd\n"); 430 pr_err("Unable to allocate struct tcm_vhost_cmd\n");
427 return ERR_PTR(-ENOMEM); 431 return ERR_PTR(-ENOMEM);
428 } 432 }
429 INIT_LIST_HEAD(&tv_cmd->tvc_completion_list);
430 tv_cmd->tvc_tag = v_req->tag; 433 tv_cmd->tvc_tag = v_req->tag;
431 tv_cmd->tvc_task_attr = v_req->task_attr; 434 tv_cmd->tvc_task_attr = v_req->task_attr;
432 tv_cmd->tvc_exp_data_len = exp_data_len; 435 tv_cmd->tvc_exp_data_len = exp_data_len;
@@ -442,40 +445,47 @@ static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(
442 * Returns the number of scatterlist entries used or -errno on error. 445 * Returns the number of scatterlist entries used or -errno on error.
443 */ 446 */
444static int vhost_scsi_map_to_sgl(struct scatterlist *sgl, 447static int vhost_scsi_map_to_sgl(struct scatterlist *sgl,
445 unsigned int sgl_count, void __user *ptr, size_t len, int write) 448 unsigned int sgl_count, struct iovec *iov, int write)
446{ 449{
450 unsigned int npages = 0, pages_nr, offset, nbytes;
447 struct scatterlist *sg = sgl; 451 struct scatterlist *sg = sgl;
448 unsigned int npages = 0; 452 void __user *ptr = iov->iov_base;
449 int ret; 453 size_t len = iov->iov_len;
454 struct page **pages;
455 int ret, i;
450 456
451 while (len > 0) { 457 pages_nr = iov_num_pages(iov);
452 struct page *page; 458 if (pages_nr > sgl_count)
453 unsigned int offset = (uintptr_t)ptr & ~PAGE_MASK; 459 return -ENOBUFS;
454 unsigned int nbytes = min_t(unsigned int,
455 PAGE_SIZE - offset, len);
456 460
457 if (npages == sgl_count) { 461 pages = kmalloc(pages_nr * sizeof(struct page *), GFP_KERNEL);
458 ret = -ENOBUFS; 462 if (!pages)
459 goto err; 463 return -ENOMEM;
460 }
461 464
462 ret = get_user_pages_fast((unsigned long)ptr, 1, write, &page); 465 ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages);
463 BUG_ON(ret == 0); /* we should either get our page or fail */ 466 /* No pages were pinned */
464 if (ret < 0) 467 if (ret < 0)
465 goto err; 468 goto out;
469 /* Less pages pinned than wanted */
470 if (ret != pages_nr) {
471 for (i = 0; i < ret; i++)
472 put_page(pages[i]);
473 ret = -EFAULT;
474 goto out;
475 }
466 476
467 sg_set_page(sg, page, nbytes, offset); 477 while (len > 0) {
478 offset = (uintptr_t)ptr & ~PAGE_MASK;
479 nbytes = min_t(unsigned int, PAGE_SIZE - offset, len);
480 sg_set_page(sg, pages[npages], nbytes, offset);
468 ptr += nbytes; 481 ptr += nbytes;
469 len -= nbytes; 482 len -= nbytes;
470 sg++; 483 sg++;
471 npages++; 484 npages++;
472 } 485 }
473 return npages;
474 486
475err: 487out:
476 /* Put pages that we hold */ 488 kfree(pages);
477 for (sg = sgl; sg != &sgl[npages]; sg++)
478 put_page(sg_page(sg));
479 return ret; 489 return ret;
480} 490}
481 491
@@ -491,11 +501,9 @@ static int vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *tv_cmd,
491 * Find out how long sglist needs to be 501 * Find out how long sglist needs to be
492 */ 502 */
493 sgl_count = 0; 503 sgl_count = 0;
494 for (i = 0; i < niov; i++) { 504 for (i = 0; i < niov; i++)
495 sgl_count += (((uintptr_t)iov[i].iov_base + iov[i].iov_len + 505 sgl_count += iov_num_pages(&iov[i]);
496 PAGE_SIZE - 1) >> PAGE_SHIFT) - 506
497 ((uintptr_t)iov[i].iov_base >> PAGE_SHIFT);
498 }
499 /* TODO overflow checking */ 507 /* TODO overflow checking */
500 508
501 sg = kmalloc(sizeof(tv_cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC); 509 sg = kmalloc(sizeof(tv_cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC);
@@ -510,8 +518,7 @@ static int vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *tv_cmd,
510 518
511 pr_debug("Mapping %u iovecs for %u pages\n", niov, sgl_count); 519 pr_debug("Mapping %u iovecs for %u pages\n", niov, sgl_count);
512 for (i = 0; i < niov; i++) { 520 for (i = 0; i < niov; i++) {
513 ret = vhost_scsi_map_to_sgl(sg, sgl_count, iov[i].iov_base, 521 ret = vhost_scsi_map_to_sgl(sg, sgl_count, &iov[i], write);
514 iov[i].iov_len, write);
515 if (ret < 0) { 522 if (ret < 0) {
516 for (i = 0; i < tv_cmd->tvc_sgl_count; i++) 523 for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
517 put_page(sg_page(&tv_cmd->tvc_sgl[i])); 524 put_page(sg_page(&tv_cmd->tvc_sgl[i]));
@@ -563,19 +570,19 @@ static void tcm_vhost_submission_work(struct work_struct *work)
563 } 570 }
564} 571}
565 572
566static void vhost_scsi_handle_vq(struct vhost_scsi *vs) 573static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
574 struct vhost_virtqueue *vq)
567{ 575{
568 struct vhost_virtqueue *vq = &vs->vqs[2];
569 struct virtio_scsi_cmd_req v_req; 576 struct virtio_scsi_cmd_req v_req;
570 struct tcm_vhost_tpg *tv_tpg; 577 struct tcm_vhost_tpg *tv_tpg;
571 struct tcm_vhost_cmd *tv_cmd; 578 struct tcm_vhost_cmd *tv_cmd;
572 u32 exp_data_len, data_first, data_num, data_direction; 579 u32 exp_data_len, data_first, data_num, data_direction;
573 unsigned out, in, i; 580 unsigned out, in, i;
574 int head, ret; 581 int head, ret;
582 u8 target;
575 583
576 /* Must use ioctl VHOST_SCSI_SET_ENDPOINT */ 584 /* Must use ioctl VHOST_SCSI_SET_ENDPOINT */
577 tv_tpg = vs->vs_tpg; 585 if (unlikely(!vs->vs_endpoint))
578 if (unlikely(!tv_tpg))
579 return; 586 return;
580 587
581 mutex_lock(&vq->mutex); 588 mutex_lock(&vq->mutex);
@@ -643,6 +650,28 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs)
643 break; 650 break;
644 } 651 }
645 652
653 /* Extract the tpgt */
654 target = v_req.lun[1];
655 tv_tpg = vs->vs_tpg[target];
656
657 /* Target does not exist, fail the request */
658 if (unlikely(!tv_tpg)) {
659 struct virtio_scsi_cmd_resp __user *resp;
660 struct virtio_scsi_cmd_resp rsp;
661
662 memset(&rsp, 0, sizeof(rsp));
663 rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
664 resp = vq->iov[out].iov_base;
665 ret = __copy_to_user(resp, &rsp, sizeof(rsp));
666 if (!ret)
667 vhost_add_used_and_signal(&vs->dev,
668 vq, head, 0);
669 else
670 pr_err("Faulted on virtio_scsi_cmd_resp\n");
671
672 continue;
673 }
674
646 exp_data_len = 0; 675 exp_data_len = 0;
647 for (i = 0; i < data_num; i++) 676 for (i = 0; i < data_num; i++)
648 exp_data_len += vq->iov[data_first + i].iov_len; 677 exp_data_len += vq->iov[data_first + i].iov_len;
@@ -658,6 +687,7 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs)
658 ": %d\n", tv_cmd, exp_data_len, data_direction); 687 ": %d\n", tv_cmd, exp_data_len, data_direction);
659 688
660 tv_cmd->tvc_vhost = vs; 689 tv_cmd->tvc_vhost = vs;
690 tv_cmd->tvc_vq = vq;
661 691
662 if (unlikely(vq->iov[out].iov_len != 692 if (unlikely(vq->iov[out].iov_len !=
663 sizeof(struct virtio_scsi_cmd_resp))) { 693 sizeof(struct virtio_scsi_cmd_resp))) {
@@ -738,7 +768,7 @@ static void vhost_scsi_handle_kick(struct vhost_work *work)
738 poll.work); 768 poll.work);
739 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev); 769 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
740 770
741 vhost_scsi_handle_vq(vs); 771 vhost_scsi_handle_vq(vs, vq);
742} 772}
743 773
744/* 774/*
@@ -751,7 +781,8 @@ static int vhost_scsi_set_endpoint(
751{ 781{
752 struct tcm_vhost_tport *tv_tport; 782 struct tcm_vhost_tport *tv_tport;
753 struct tcm_vhost_tpg *tv_tpg; 783 struct tcm_vhost_tpg *tv_tpg;
754 int index; 784 bool match = false;
785 int index, ret;
755 786
756 mutex_lock(&vs->dev.mutex); 787 mutex_lock(&vs->dev.mutex);
757 /* Verify that ring has been setup correctly. */ 788 /* Verify that ring has been setup correctly. */
@@ -762,7 +793,6 @@ static int vhost_scsi_set_endpoint(
762 return -EFAULT; 793 return -EFAULT;
763 } 794 }
764 } 795 }
765 mutex_unlock(&vs->dev.mutex);
766 796
767 mutex_lock(&tcm_vhost_mutex); 797 mutex_lock(&tcm_vhost_mutex);
768 list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) { 798 list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) {
@@ -777,30 +807,33 @@ static int vhost_scsi_set_endpoint(
777 } 807 }
778 tv_tport = tv_tpg->tport; 808 tv_tport = tv_tpg->tport;
779 809
780 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn) && 810 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
781 (tv_tpg->tport_tpgt == t->vhost_tpgt)) { 811 if (vs->vs_tpg[tv_tpg->tport_tpgt]) {
782 tv_tpg->tv_tpg_vhost_count++;
783 mutex_unlock(&tv_tpg->tv_tpg_mutex);
784 mutex_unlock(&tcm_vhost_mutex);
785
786 mutex_lock(&vs->dev.mutex);
787 if (vs->vs_tpg) {
788 mutex_unlock(&vs->dev.mutex);
789 mutex_lock(&tv_tpg->tv_tpg_mutex);
790 tv_tpg->tv_tpg_vhost_count--;
791 mutex_unlock(&tv_tpg->tv_tpg_mutex); 812 mutex_unlock(&tv_tpg->tv_tpg_mutex);
813 mutex_unlock(&tcm_vhost_mutex);
814 mutex_unlock(&vs->dev.mutex);
792 return -EEXIST; 815 return -EEXIST;
793 } 816 }
794 817 tv_tpg->tv_tpg_vhost_count++;
795 vs->vs_tpg = tv_tpg; 818 vs->vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
796 smp_mb__after_atomic_inc(); 819 smp_mb__after_atomic_inc();
797 mutex_unlock(&vs->dev.mutex); 820 match = true;
798 return 0;
799 } 821 }
800 mutex_unlock(&tv_tpg->tv_tpg_mutex); 822 mutex_unlock(&tv_tpg->tv_tpg_mutex);
801 } 823 }
802 mutex_unlock(&tcm_vhost_mutex); 824 mutex_unlock(&tcm_vhost_mutex);
803 return -EINVAL; 825
826 if (match) {
827 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
828 sizeof(vs->vs_vhost_wwpn));
829 vs->vs_endpoint = true;
830 ret = 0;
831 } else {
832 ret = -EEXIST;
833 }
834
835 mutex_unlock(&vs->dev.mutex);
836 return ret;
804} 837}
805 838
806static int vhost_scsi_clear_endpoint( 839static int vhost_scsi_clear_endpoint(
@@ -809,7 +842,8 @@ static int vhost_scsi_clear_endpoint(
809{ 842{
810 struct tcm_vhost_tport *tv_tport; 843 struct tcm_vhost_tport *tv_tport;
811 struct tcm_vhost_tpg *tv_tpg; 844 struct tcm_vhost_tpg *tv_tpg;
812 int index, ret; 845 int index, ret, i;
846 u8 target;
813 847
814 mutex_lock(&vs->dev.mutex); 848 mutex_lock(&vs->dev.mutex);
815 /* Verify that ring has been setup correctly. */ 849 /* Verify that ring has been setup correctly. */
@@ -819,27 +853,32 @@ static int vhost_scsi_clear_endpoint(
819 goto err; 853 goto err;
820 } 854 }
821 } 855 }
856 for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
857 target = i;
822 858
823 if (!vs->vs_tpg) { 859 tv_tpg = vs->vs_tpg[target];
824 ret = -ENODEV; 860 if (!tv_tpg)
825 goto err; 861 continue;
826 } 862
827 tv_tpg = vs->vs_tpg; 863 tv_tport = tv_tpg->tport;
828 tv_tport = tv_tpg->tport; 864 if (!tv_tport) {
829 865 ret = -ENODEV;
830 if (strcmp(tv_tport->tport_name, t->vhost_wwpn) || 866 goto err;
831 (tv_tpg->tport_tpgt != t->vhost_tpgt)) { 867 }
832 pr_warn("tv_tport->tport_name: %s, tv_tpg->tport_tpgt: %hu" 868
833 " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n", 869 if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
834 tv_tport->tport_name, tv_tpg->tport_tpgt, 870 pr_warn("tv_tport->tport_name: %s, tv_tpg->tport_tpgt: %hu"
835 t->vhost_wwpn, t->vhost_tpgt); 871 " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
836 ret = -EINVAL; 872 tv_tport->tport_name, tv_tpg->tport_tpgt,
837 goto err; 873 t->vhost_wwpn, t->vhost_tpgt);
874 ret = -EINVAL;
875 goto err;
876 }
877 tv_tpg->tv_tpg_vhost_count--;
878 vs->vs_tpg[target] = NULL;
879 vs->vs_endpoint = false;
838 } 880 }
839 tv_tpg->tv_tpg_vhost_count--;
840 vs->vs_tpg = NULL;
841 mutex_unlock(&vs->dev.mutex); 881 mutex_unlock(&vs->dev.mutex);
842
843 return 0; 882 return 0;
844 883
845err: 884err:
@@ -850,20 +889,19 @@ err:
850static int vhost_scsi_open(struct inode *inode, struct file *f) 889static int vhost_scsi_open(struct inode *inode, struct file *f)
851{ 890{
852 struct vhost_scsi *s; 891 struct vhost_scsi *s;
853 int r; 892 int r, i;
854 893
855 s = kzalloc(sizeof(*s), GFP_KERNEL); 894 s = kzalloc(sizeof(*s), GFP_KERNEL);
856 if (!s) 895 if (!s)
857 return -ENOMEM; 896 return -ENOMEM;
858 897
859 vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work); 898 vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work);
860 INIT_LIST_HEAD(&s->vs_completion_list);
861 spin_lock_init(&s->vs_completion_lock);
862 899
863 s->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick; 900 s->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick;
864 s->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick; 901 s->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick;
865 s->vqs[VHOST_SCSI_VQ_IO].handle_kick = vhost_scsi_handle_kick; 902 for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++)
866 r = vhost_dev_init(&s->dev, s->vqs, 3); 903 s->vqs[i].handle_kick = vhost_scsi_handle_kick;
904 r = vhost_dev_init(&s->dev, s->vqs, VHOST_SCSI_MAX_VQ);
867 if (r < 0) { 905 if (r < 0) {
868 kfree(s); 906 kfree(s);
869 return r; 907 return r;
@@ -876,16 +914,12 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
876static int vhost_scsi_release(struct inode *inode, struct file *f) 914static int vhost_scsi_release(struct inode *inode, struct file *f)
877{ 915{
878 struct vhost_scsi *s = f->private_data; 916 struct vhost_scsi *s = f->private_data;
917 struct vhost_scsi_target t;
879 918
880 if (s->vs_tpg && s->vs_tpg->tport) { 919 mutex_lock(&s->dev.mutex);
881 struct vhost_scsi_target backend; 920 memcpy(t.vhost_wwpn, s->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
882 921 mutex_unlock(&s->dev.mutex);
883 memcpy(backend.vhost_wwpn, s->vs_tpg->tport->tport_name, 922 vhost_scsi_clear_endpoint(s, &t);
884 sizeof(backend.vhost_wwpn));
885 backend.vhost_tpgt = s->vs_tpg->tport_tpgt;
886 vhost_scsi_clear_endpoint(s, &backend);
887 }
888
889 vhost_dev_stop(&s->dev); 923 vhost_dev_stop(&s->dev);
890 vhost_dev_cleanup(&s->dev, false); 924 vhost_dev_cleanup(&s->dev, false);
891 kfree(s); 925 kfree(s);
@@ -899,9 +933,10 @@ static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
899 933
900static void vhost_scsi_flush(struct vhost_scsi *vs) 934static void vhost_scsi_flush(struct vhost_scsi *vs)
901{ 935{
902 vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_CTL); 936 int i;
903 vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_EVT); 937
904 vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_IO); 938 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
939 vhost_scsi_flush_vq(vs, i);
905} 940}
906 941
907static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features) 942static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
diff --git a/drivers/vhost/tcm_vhost.h b/drivers/vhost/tcm_vhost.h
index 7e87c63ecbcd..1d2ae7a60e11 100644
--- a/drivers/vhost/tcm_vhost.h
+++ b/drivers/vhost/tcm_vhost.h
@@ -23,6 +23,8 @@ struct tcm_vhost_cmd {
23 struct virtio_scsi_cmd_resp __user *tvc_resp; 23 struct virtio_scsi_cmd_resp __user *tvc_resp;
24 /* Pointer to vhost_scsi for our device */ 24 /* Pointer to vhost_scsi for our device */
25 struct vhost_scsi *tvc_vhost; 25 struct vhost_scsi *tvc_vhost;
26 /* Pointer to vhost_virtqueue for the cmd */
27 struct vhost_virtqueue *tvc_vq;
26 /* Pointer to vhost nexus memory */ 28 /* Pointer to vhost nexus memory */
27 struct tcm_vhost_nexus *tvc_nexus; 29 struct tcm_vhost_nexus *tvc_nexus;
28 /* The TCM I/O descriptor that is accessed via container_of() */ 30 /* The TCM I/O descriptor that is accessed via container_of() */
@@ -34,7 +36,7 @@ struct tcm_vhost_cmd {
34 /* Sense buffer that will be mapped into outgoing status */ 36 /* Sense buffer that will be mapped into outgoing status */
35 unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER]; 37 unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
36 /* Completed commands list, serviced from vhost worker thread */ 38 /* Completed commands list, serviced from vhost worker thread */
37 struct list_head tvc_completion_list; 39 struct llist_node tvc_completion_list;
38}; 40};
39 41
40struct tcm_vhost_nexus { 42struct tcm_vhost_nexus {
@@ -93,9 +95,11 @@ struct tcm_vhost_tport {
93 * 95 *
94 * ABI Rev 0: July 2012 version starting point for v3.6-rc merge candidate + 96 * ABI Rev 0: July 2012 version starting point for v3.6-rc merge candidate +
95 * RFC-v2 vhost-scsi userspace. Add GET_ABI_VERSION ioctl usage 97 * RFC-v2 vhost-scsi userspace. Add GET_ABI_VERSION ioctl usage
98 * ABI Rev 1: January 2013. Ignore vhost_tpgt filed in struct vhost_scsi_target.
99 * All the targets under vhost_wwpn can be seen and used by guset.
96 */ 100 */
97 101
98#define VHOST_SCSI_ABI_VERSION 0 102#define VHOST_SCSI_ABI_VERSION 1
99 103
100struct vhost_scsi_target { 104struct vhost_scsi_target {
101 int abi_version; 105 int abi_version;
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 507910992c59..b128c20770bc 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -35,6 +35,7 @@ struct se_subsystem_api {
35 u32 (*get_device_type)(struct se_device *); 35 u32 (*get_device_type)(struct se_device *);
36 sector_t (*get_blocks)(struct se_device *); 36 sector_t (*get_blocks)(struct se_device *);
37 unsigned char *(*get_sense_buffer)(struct se_cmd *); 37 unsigned char *(*get_sense_buffer)(struct se_cmd *);
38 bool (*get_write_cache)(struct se_device *);
38}; 39};
39 40
40struct sbc_ops { 41struct sbc_ops {
@@ -52,11 +53,13 @@ void target_complete_cmd(struct se_cmd *, u8);
52 53
53sense_reason_t spc_parse_cdb(struct se_cmd *cmd, unsigned int *size); 54sense_reason_t spc_parse_cdb(struct se_cmd *cmd, unsigned int *size);
54sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd); 55sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd);
55sector_t spc_get_write_same_sectors(struct se_cmd *cmd); 56sense_reason_t spc_emulate_inquiry_std(struct se_cmd *, unsigned char *);
57sense_reason_t spc_emulate_evpd_83(struct se_cmd *, unsigned char *);
56 58
57sense_reason_t sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops); 59sense_reason_t sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops);
58u32 sbc_get_device_rev(struct se_device *dev); 60u32 sbc_get_device_rev(struct se_device *dev);
59u32 sbc_get_device_type(struct se_device *dev); 61u32 sbc_get_device_type(struct se_device *dev);
62sector_t sbc_get_write_same_sectors(struct se_cmd *cmd);
60 63
61void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *); 64void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *);
62int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *); 65int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *);
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 663e34a5383f..c4af592f7057 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -44,7 +44,7 @@
44/* Used by core_alua_store_tg_pt_gp_info() and target_core_alua_tg_pt_gp_show_attr_members() */ 44/* Used by core_alua_store_tg_pt_gp_info() and target_core_alua_tg_pt_gp_show_attr_members() */
45#define TG_PT_GROUP_NAME_BUF 256 45#define TG_PT_GROUP_NAME_BUF 256
46/* Used to parse VPD into struct t10_vpd */ 46/* Used to parse VPD into struct t10_vpd */
47#define VPD_TMP_BUF_SIZE 128 47#define VPD_TMP_BUF_SIZE 254
48/* Used by transport_generic_cmd_sequencer() */ 48/* Used by transport_generic_cmd_sequencer() */
49#define READ_BLOCK_LEN 6 49#define READ_BLOCK_LEN 6
50#define READ_CAP_LEN 8 50#define READ_CAP_LEN 8
@@ -75,6 +75,8 @@
75#define DA_MAX_WRITE_SAME_LEN 0 75#define DA_MAX_WRITE_SAME_LEN 0
76/* Default max transfer length */ 76/* Default max transfer length */
77#define DA_FABRIC_MAX_SECTORS 8192 77#define DA_FABRIC_MAX_SECTORS 8192
78/* Use a model alias based on the configfs backend device name */
79#define DA_EMULATE_MODEL_ALIAS 0
78/* Emulation for Direct Page Out */ 80/* Emulation for Direct Page Out */
79#define DA_EMULATE_DPO 0 81#define DA_EMULATE_DPO 0
80/* Emulation for Forced Unit Access WRITEs */ 82/* Emulation for Forced Unit Access WRITEs */
@@ -193,6 +195,7 @@ enum tcm_sense_reason_table {
193 TCM_RESERVATION_CONFLICT = R(0x10), 195 TCM_RESERVATION_CONFLICT = R(0x10),
194 TCM_ADDRESS_OUT_OF_RANGE = R(0x11), 196 TCM_ADDRESS_OUT_OF_RANGE = R(0x11),
195 TCM_OUT_OF_RESOURCES = R(0x12), 197 TCM_OUT_OF_RESOURCES = R(0x12),
198 TCM_PARAMETER_LIST_LENGTH_ERROR = R(0x13),
196#undef R 199#undef R
197}; 200};
198 201
@@ -211,7 +214,6 @@ enum tcm_tmreq_table {
211 TMR_LUN_RESET = 5, 214 TMR_LUN_RESET = 5,
212 TMR_TARGET_WARM_RESET = 6, 215 TMR_TARGET_WARM_RESET = 6,
213 TMR_TARGET_COLD_RESET = 7, 216 TMR_TARGET_COLD_RESET = 7,
214 TMR_FABRIC_TMR = 255,
215}; 217};
216 218
217/* fabric independent task management response values */ 219/* fabric independent task management response values */
@@ -592,6 +594,7 @@ struct se_dev_entry {
592}; 594};
593 595
594struct se_dev_attrib { 596struct se_dev_attrib {
597 int emulate_model_alias;
595 int emulate_dpo; 598 int emulate_dpo;
596 int emulate_fua_write; 599 int emulate_fua_write;
597 int emulate_fua_read; 600 int emulate_fua_read;