aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/target
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/target')
-rw-r--r--drivers/target/iscsi/iscsi_target.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c16
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c45
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c274
-rw-r--r--drivers/target/target_core_cdb.c92
-rw-r--r--drivers/target/target_core_device.c48
-rw-r--r--drivers/target/target_core_fabric_configfs.c2
-rw-r--r--drivers/target/target_core_pr.c8
-rw-r--r--drivers/target/target_core_rd.c24
-rw-r--r--drivers/target/target_core_tpg.c64
-rw-r--r--drivers/target/target_core_transport.c215
-rw-r--r--drivers/target/tcm_fc/tcm_fc.h12
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c90
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c13
-rw-r--r--drivers/target/tcm_fc/tfc_io.c62
17 files changed, 374 insertions, 598 deletions
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index c24fb10de60b..6a4ea29c2f36 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -2243,7 +2243,6 @@ static int iscsit_handle_snack(
2243 case 0: 2243 case 0:
2244 return iscsit_handle_recovery_datain_or_r2t(conn, buf, 2244 return iscsit_handle_recovery_datain_or_r2t(conn, buf,
2245 hdr->itt, hdr->ttt, hdr->begrun, hdr->runlength); 2245 hdr->itt, hdr->ttt, hdr->begrun, hdr->runlength);
2246 return 0;
2247 case ISCSI_FLAG_SNACK_TYPE_STATUS: 2246 case ISCSI_FLAG_SNACK_TYPE_STATUS:
2248 return iscsit_handle_status_snack(conn, hdr->itt, hdr->ttt, 2247 return iscsit_handle_status_snack(conn, hdr->itt, hdr->ttt,
2249 hdr->begrun, hdr->runlength); 2248 hdr->begrun, hdr->runlength);
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index f095e65b1ccf..f1643dbf6a92 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -268,7 +268,7 @@ struct se_tpg_np *lio_target_call_addnptotpg(
268 ISCSI_TCP); 268 ISCSI_TCP);
269 if (IS_ERR(tpg_np)) { 269 if (IS_ERR(tpg_np)) {
270 iscsit_put_tpg(tpg); 270 iscsit_put_tpg(tpg);
271 return ERR_PTR(PTR_ERR(tpg_np)); 271 return ERR_CAST(tpg_np);
272 } 272 }
273 pr_debug("LIO_Target_ConfigFS: addnptotpg done!\n"); 273 pr_debug("LIO_Target_ConfigFS: addnptotpg done!\n");
274 274
@@ -1285,7 +1285,7 @@ struct se_wwn *lio_target_call_coreaddtiqn(
1285 1285
1286 tiqn = iscsit_add_tiqn((unsigned char *)name); 1286 tiqn = iscsit_add_tiqn((unsigned char *)name);
1287 if (IS_ERR(tiqn)) 1287 if (IS_ERR(tiqn))
1288 return ERR_PTR(PTR_ERR(tiqn)); 1288 return ERR_CAST(tiqn);
1289 /* 1289 /*
1290 * Setup struct iscsi_wwn_stat_grps for se_wwn->fabric_stat_group. 1290 * Setup struct iscsi_wwn_stat_grps for se_wwn->fabric_stat_group.
1291 */ 1291 */
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index 980650792cf6..c4c68da3e500 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -834,7 +834,7 @@ static int iscsit_attach_ooo_cmdsn(
834 */ 834 */
835 list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list, 835 list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list,
836 ooo_list) { 836 ooo_list) {
837 while (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn) 837 if (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn)
838 continue; 838 continue;
839 839
840 list_add(&ooo_cmdsn->ooo_list, 840 list_add(&ooo_cmdsn->ooo_list,
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index bcaf82f47037..daad362a93ce 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -1013,19 +1013,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
1013 ISCSI_LOGIN_STATUS_TARGET_ERROR); 1013 ISCSI_LOGIN_STATUS_TARGET_ERROR);
1014 goto new_sess_out; 1014 goto new_sess_out;
1015 } 1015 }
1016#if 0 1016 snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
1017 if (!iscsi_ntop6((const unsigned char *) 1017 &sock_in6.sin6_addr.in6_u);
1018 &sock_in6.sin6_addr.in6_u, 1018 conn->login_port = ntohs(sock_in6.sin6_port);
1019 (char *)&conn->ipv6_login_ip[0],
1020 IPV6_ADDRESS_SPACE)) {
1021 pr_err("iscsi_ntop6() failed\n");
1022 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
1023 ISCSI_LOGIN_STATUS_TARGET_ERROR);
1024 goto new_sess_out;
1025 }
1026#else
1027 pr_debug("Skipping iscsi_ntop6()\n");
1028#endif
1029 } else { 1019 } else {
1030 memset(&sock_in, 0, sizeof(struct sockaddr_in)); 1020 memset(&sock_in, 0, sizeof(struct sockaddr_in));
1031 1021
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index 252e246cf51e..5b773160200f 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -545,13 +545,13 @@ int iscsi_copy_param_list(
545 struct iscsi_param_list *src_param_list, 545 struct iscsi_param_list *src_param_list,
546 int leading) 546 int leading)
547{ 547{
548 struct iscsi_param *new_param = NULL, *param = NULL; 548 struct iscsi_param *param = NULL;
549 struct iscsi_param *new_param = NULL;
549 struct iscsi_param_list *param_list = NULL; 550 struct iscsi_param_list *param_list = NULL;
550 551
551 param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL); 552 param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL);
552 if (!param_list) { 553 if (!param_list) {
553 pr_err("Unable to allocate memory for" 554 pr_err("Unable to allocate memory for struct iscsi_param_list.\n");
554 " struct iscsi_param_list.\n");
555 goto err_out; 555 goto err_out;
556 } 556 }
557 INIT_LIST_HEAD(&param_list->param_list); 557 INIT_LIST_HEAD(&param_list->param_list);
@@ -567,8 +567,17 @@ int iscsi_copy_param_list(
567 567
568 new_param = kzalloc(sizeof(struct iscsi_param), GFP_KERNEL); 568 new_param = kzalloc(sizeof(struct iscsi_param), GFP_KERNEL);
569 if (!new_param) { 569 if (!new_param) {
570 pr_err("Unable to allocate memory for" 570 pr_err("Unable to allocate memory for struct iscsi_param.\n");
571 " struct iscsi_param.\n"); 571 goto err_out;
572 }
573
574 new_param->name = kstrdup(param->name, GFP_KERNEL);
575 new_param->value = kstrdup(param->value, GFP_KERNEL);
576 if (!new_param->value || !new_param->name) {
577 kfree(new_param->value);
578 kfree(new_param->name);
579 kfree(new_param);
580 pr_err("Unable to allocate memory for parameter name/value.\n");
572 goto err_out; 581 goto err_out;
573 } 582 }
574 583
@@ -580,32 +589,12 @@ int iscsi_copy_param_list(
580 new_param->use = param->use; 589 new_param->use = param->use;
581 new_param->type_range = param->type_range; 590 new_param->type_range = param->type_range;
582 591
583 new_param->name = kzalloc(strlen(param->name) + 1, GFP_KERNEL);
584 if (!new_param->name) {
585 pr_err("Unable to allocate memory for"
586 " parameter name.\n");
587 goto err_out;
588 }
589
590 new_param->value = kzalloc(strlen(param->value) + 1,
591 GFP_KERNEL);
592 if (!new_param->value) {
593 pr_err("Unable to allocate memory for"
594 " parameter value.\n");
595 goto err_out;
596 }
597
598 memcpy(new_param->name, param->name, strlen(param->name));
599 new_param->name[strlen(param->name)] = '\0';
600 memcpy(new_param->value, param->value, strlen(param->value));
601 new_param->value[strlen(param->value)] = '\0';
602
603 list_add_tail(&new_param->p_list, &param_list->param_list); 592 list_add_tail(&new_param->p_list, &param_list->param_list);
604 } 593 }
605 594
606 if (!list_empty(&param_list->param_list)) 595 if (!list_empty(&param_list->param_list)) {
607 *dst_param_list = param_list; 596 *dst_param_list = param_list;
608 else { 597 } else {
609 pr_err("No parameters allocated.\n"); 598 pr_err("No parameters allocated.\n");
610 goto err_out; 599 goto err_out;
611 } 600 }
@@ -1441,7 +1430,7 @@ static int iscsi_enforce_integrity_rules(
1441 u8 DataSequenceInOrder = 0; 1430 u8 DataSequenceInOrder = 0;
1442 u8 ErrorRecoveryLevel = 0, SessionType = 0; 1431 u8 ErrorRecoveryLevel = 0, SessionType = 0;
1443 u8 IFMarker = 0, OFMarker = 0; 1432 u8 IFMarker = 0, OFMarker = 0;
1444 u8 IFMarkInt_Reject = 0, OFMarkInt_Reject = 0; 1433 u8 IFMarkInt_Reject = 1, OFMarkInt_Reject = 1;
1445 u32 FirstBurstLength = 0, MaxBurstLength = 0; 1434 u32 FirstBurstLength = 0, MaxBurstLength = 0;
1446 struct iscsi_param *param = NULL; 1435 struct iscsi_param *param = NULL;
1447 1436
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index a1acb0167902..f00137f377b2 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -243,7 +243,7 @@ struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(
243 if (!cmd->tmr_req) { 243 if (!cmd->tmr_req) {
244 pr_err("Unable to allocate memory for" 244 pr_err("Unable to allocate memory for"
245 " Task Management command!\n"); 245 " Task Management command!\n");
246 return NULL; 246 goto out;
247 } 247 }
248 /* 248 /*
249 * TASK_REASSIGN for ERL=2 / connection stays inside of 249 * TASK_REASSIGN for ERL=2 / connection stays inside of
@@ -298,8 +298,6 @@ struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(
298 return cmd; 298 return cmd;
299out: 299out:
300 iscsit_release_cmd(cmd); 300 iscsit_release_cmd(cmd);
301 if (se_cmd)
302 transport_free_se_cmd(se_cmd);
303 return NULL; 301 return NULL;
304} 302}
305 303
@@ -877,40 +875,6 @@ void iscsit_inc_session_usage_count(struct iscsi_session *sess)
877} 875}
878 876
879/* 877/*
880 * Used before iscsi_do[rx,tx]_data() to determine iov and [rx,tx]_marker
881 * array counts needed for sync and steering.
882 */
883static int iscsit_determine_sync_and_steering_counts(
884 struct iscsi_conn *conn,
885 struct iscsi_data_count *count)
886{
887 u32 length = count->data_length;
888 u32 marker, markint;
889
890 count->sync_and_steering = 1;
891
892 marker = (count->type == ISCSI_RX_DATA) ?
893 conn->of_marker : conn->if_marker;
894 markint = (count->type == ISCSI_RX_DATA) ?
895 (conn->conn_ops->OFMarkInt * 4) :
896 (conn->conn_ops->IFMarkInt * 4);
897 count->ss_iov_count = count->iov_count;
898
899 while (length > 0) {
900 if (length >= marker) {
901 count->ss_iov_count += 3;
902 count->ss_marker_count += 2;
903
904 length -= marker;
905 marker = markint;
906 } else
907 length = 0;
908 }
909
910 return 0;
911}
912
913/*
914 * Setup conn->if_marker and conn->of_marker values based upon 878 * Setup conn->if_marker and conn->of_marker values based upon
915 * the initial marker-less interval. (see iSCSI v19 A.2) 879 * the initial marker-less interval. (see iSCSI v19 A.2)
916 */ 880 */
@@ -1292,7 +1256,7 @@ int iscsit_fe_sendpage_sg(
1292 struct kvec iov; 1256 struct kvec iov;
1293 u32 tx_hdr_size, data_len; 1257 u32 tx_hdr_size, data_len;
1294 u32 offset = cmd->first_data_sg_off; 1258 u32 offset = cmd->first_data_sg_off;
1295 int tx_sent; 1259 int tx_sent, iov_off;
1296 1260
1297send_hdr: 1261send_hdr:
1298 tx_hdr_size = ISCSI_HDR_LEN; 1262 tx_hdr_size = ISCSI_HDR_LEN;
@@ -1312,9 +1276,19 @@ send_hdr:
1312 } 1276 }
1313 1277
1314 data_len = cmd->tx_size - tx_hdr_size - cmd->padding; 1278 data_len = cmd->tx_size - tx_hdr_size - cmd->padding;
1315 if (conn->conn_ops->DataDigest) 1279 /*
1280 * Set iov_off used by padding and data digest tx_data() calls below
1281 * in order to determine proper offset into cmd->iov_data[]
1282 */
1283 if (conn->conn_ops->DataDigest) {
1316 data_len -= ISCSI_CRC_LEN; 1284 data_len -= ISCSI_CRC_LEN;
1317 1285 if (cmd->padding)
1286 iov_off = (cmd->iov_data_count - 2);
1287 else
1288 iov_off = (cmd->iov_data_count - 1);
1289 } else {
1290 iov_off = (cmd->iov_data_count - 1);
1291 }
1318 /* 1292 /*
1319 * Perform sendpage() for each page in the scatterlist 1293 * Perform sendpage() for each page in the scatterlist
1320 */ 1294 */
@@ -1343,8 +1317,7 @@ send_pg:
1343 1317
1344send_padding: 1318send_padding:
1345 if (cmd->padding) { 1319 if (cmd->padding) {
1346 struct kvec *iov_p = 1320 struct kvec *iov_p = &cmd->iov_data[iov_off++];
1347 &cmd->iov_data[cmd->iov_data_count-1];
1348 1321
1349 tx_sent = tx_data(conn, iov_p, 1, cmd->padding); 1322 tx_sent = tx_data(conn, iov_p, 1, cmd->padding);
1350 if (cmd->padding != tx_sent) { 1323 if (cmd->padding != tx_sent) {
@@ -1358,8 +1331,7 @@ send_padding:
1358 1331
1359send_datacrc: 1332send_datacrc:
1360 if (conn->conn_ops->DataDigest) { 1333 if (conn->conn_ops->DataDigest) {
1361 struct kvec *iov_d = 1334 struct kvec *iov_d = &cmd->iov_data[iov_off];
1362 &cmd->iov_data[cmd->iov_data_count];
1363 1335
1364 tx_sent = tx_data(conn, iov_d, 1, ISCSI_CRC_LEN); 1336 tx_sent = tx_data(conn, iov_d, 1, ISCSI_CRC_LEN);
1365 if (ISCSI_CRC_LEN != tx_sent) { 1337 if (ISCSI_CRC_LEN != tx_sent) {
@@ -1433,8 +1405,7 @@ static int iscsit_do_rx_data(
1433 struct iscsi_data_count *count) 1405 struct iscsi_data_count *count)
1434{ 1406{
1435 int data = count->data_length, rx_loop = 0, total_rx = 0, iov_len; 1407 int data = count->data_length, rx_loop = 0, total_rx = 0, iov_len;
1436 u32 rx_marker_val[count->ss_marker_count], rx_marker_iov = 0; 1408 struct kvec *iov_p;
1437 struct kvec iov[count->ss_iov_count], *iov_p;
1438 struct msghdr msg; 1409 struct msghdr msg;
1439 1410
1440 if (!conn || !conn->sock || !conn->conn_ops) 1411 if (!conn || !conn->sock || !conn->conn_ops)
@@ -1442,93 +1413,8 @@ static int iscsit_do_rx_data(
1442 1413
1443 memset(&msg, 0, sizeof(struct msghdr)); 1414 memset(&msg, 0, sizeof(struct msghdr));
1444 1415
1445 if (count->sync_and_steering) { 1416 iov_p = count->iov;
1446 int size = 0; 1417 iov_len = count->iov_count;
1447 u32 i, orig_iov_count = 0;
1448 u32 orig_iov_len = 0, orig_iov_loc = 0;
1449 u32 iov_count = 0, per_iov_bytes = 0;
1450 u32 *rx_marker, old_rx_marker = 0;
1451 struct kvec *iov_record;
1452
1453 memset(&rx_marker_val, 0,
1454 count->ss_marker_count * sizeof(u32));
1455 memset(&iov, 0, count->ss_iov_count * sizeof(struct kvec));
1456
1457 iov_record = count->iov;
1458 orig_iov_count = count->iov_count;
1459 rx_marker = &conn->of_marker;
1460
1461 i = 0;
1462 size = data;
1463 orig_iov_len = iov_record[orig_iov_loc].iov_len;
1464 while (size > 0) {
1465 pr_debug("rx_data: #1 orig_iov_len %u,"
1466 " orig_iov_loc %u\n", orig_iov_len, orig_iov_loc);
1467 pr_debug("rx_data: #2 rx_marker %u, size"
1468 " %u\n", *rx_marker, size);
1469
1470 if (orig_iov_len >= *rx_marker) {
1471 iov[iov_count].iov_len = *rx_marker;
1472 iov[iov_count++].iov_base =
1473 (iov_record[orig_iov_loc].iov_base +
1474 per_iov_bytes);
1475
1476 iov[iov_count].iov_len = (MARKER_SIZE / 2);
1477 iov[iov_count++].iov_base =
1478 &rx_marker_val[rx_marker_iov++];
1479 iov[iov_count].iov_len = (MARKER_SIZE / 2);
1480 iov[iov_count++].iov_base =
1481 &rx_marker_val[rx_marker_iov++];
1482 old_rx_marker = *rx_marker;
1483
1484 /*
1485 * OFMarkInt is in 32-bit words.
1486 */
1487 *rx_marker = (conn->conn_ops->OFMarkInt * 4);
1488 size -= old_rx_marker;
1489 orig_iov_len -= old_rx_marker;
1490 per_iov_bytes += old_rx_marker;
1491
1492 pr_debug("rx_data: #3 new_rx_marker"
1493 " %u, size %u\n", *rx_marker, size);
1494 } else {
1495 iov[iov_count].iov_len = orig_iov_len;
1496 iov[iov_count++].iov_base =
1497 (iov_record[orig_iov_loc].iov_base +
1498 per_iov_bytes);
1499
1500 per_iov_bytes = 0;
1501 *rx_marker -= orig_iov_len;
1502 size -= orig_iov_len;
1503
1504 if (size)
1505 orig_iov_len =
1506 iov_record[++orig_iov_loc].iov_len;
1507
1508 pr_debug("rx_data: #4 new_rx_marker"
1509 " %u, size %u\n", *rx_marker, size);
1510 }
1511 }
1512 data += (rx_marker_iov * (MARKER_SIZE / 2));
1513
1514 iov_p = &iov[0];
1515 iov_len = iov_count;
1516
1517 if (iov_count > count->ss_iov_count) {
1518 pr_err("iov_count: %d, count->ss_iov_count:"
1519 " %d\n", iov_count, count->ss_iov_count);
1520 return -1;
1521 }
1522 if (rx_marker_iov > count->ss_marker_count) {
1523 pr_err("rx_marker_iov: %d, count->ss_marker"
1524 "_count: %d\n", rx_marker_iov,
1525 count->ss_marker_count);
1526 return -1;
1527 }
1528 } else {
1529 iov_p = count->iov;
1530 iov_len = count->iov_count;
1531 }
1532 1418
1533 while (total_rx < data) { 1419 while (total_rx < data) {
1534 rx_loop = kernel_recvmsg(conn->sock, &msg, iov_p, iov_len, 1420 rx_loop = kernel_recvmsg(conn->sock, &msg, iov_p, iov_len,
@@ -1543,16 +1429,6 @@ static int iscsit_do_rx_data(
1543 rx_loop, total_rx, data); 1429 rx_loop, total_rx, data);
1544 } 1430 }
1545 1431
1546 if (count->sync_and_steering) {
1547 int j;
1548 for (j = 0; j < rx_marker_iov; j++) {
1549 pr_debug("rx_data: #5 j: %d, offset: %d\n",
1550 j, rx_marker_val[j]);
1551 conn->of_marker_offset = rx_marker_val[j];
1552 }
1553 total_rx -= (rx_marker_iov * (MARKER_SIZE / 2));
1554 }
1555
1556 return total_rx; 1432 return total_rx;
1557} 1433}
1558 1434
@@ -1561,8 +1437,7 @@ static int iscsit_do_tx_data(
1561 struct iscsi_data_count *count) 1437 struct iscsi_data_count *count)
1562{ 1438{
1563 int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len; 1439 int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len;
1564 u32 tx_marker_val[count->ss_marker_count], tx_marker_iov = 0; 1440 struct kvec *iov_p;
1565 struct kvec iov[count->ss_iov_count], *iov_p;
1566 struct msghdr msg; 1441 struct msghdr msg;
1567 1442
1568 if (!conn || !conn->sock || !conn->conn_ops) 1443 if (!conn || !conn->sock || !conn->conn_ops)
@@ -1575,98 +1450,8 @@ static int iscsit_do_tx_data(
1575 1450
1576 memset(&msg, 0, sizeof(struct msghdr)); 1451 memset(&msg, 0, sizeof(struct msghdr));
1577 1452
1578 if (count->sync_and_steering) { 1453 iov_p = count->iov;
1579 int size = 0; 1454 iov_len = count->iov_count;
1580 u32 i, orig_iov_count = 0;
1581 u32 orig_iov_len = 0, orig_iov_loc = 0;
1582 u32 iov_count = 0, per_iov_bytes = 0;
1583 u32 *tx_marker, old_tx_marker = 0;
1584 struct kvec *iov_record;
1585
1586 memset(&tx_marker_val, 0,
1587 count->ss_marker_count * sizeof(u32));
1588 memset(&iov, 0, count->ss_iov_count * sizeof(struct kvec));
1589
1590 iov_record = count->iov;
1591 orig_iov_count = count->iov_count;
1592 tx_marker = &conn->if_marker;
1593
1594 i = 0;
1595 size = data;
1596 orig_iov_len = iov_record[orig_iov_loc].iov_len;
1597 while (size > 0) {
1598 pr_debug("tx_data: #1 orig_iov_len %u,"
1599 " orig_iov_loc %u\n", orig_iov_len, orig_iov_loc);
1600 pr_debug("tx_data: #2 tx_marker %u, size"
1601 " %u\n", *tx_marker, size);
1602
1603 if (orig_iov_len >= *tx_marker) {
1604 iov[iov_count].iov_len = *tx_marker;
1605 iov[iov_count++].iov_base =
1606 (iov_record[orig_iov_loc].iov_base +
1607 per_iov_bytes);
1608
1609 tx_marker_val[tx_marker_iov] =
1610 (size - *tx_marker);
1611 iov[iov_count].iov_len = (MARKER_SIZE / 2);
1612 iov[iov_count++].iov_base =
1613 &tx_marker_val[tx_marker_iov++];
1614 iov[iov_count].iov_len = (MARKER_SIZE / 2);
1615 iov[iov_count++].iov_base =
1616 &tx_marker_val[tx_marker_iov++];
1617 old_tx_marker = *tx_marker;
1618
1619 /*
1620 * IFMarkInt is in 32-bit words.
1621 */
1622 *tx_marker = (conn->conn_ops->IFMarkInt * 4);
1623 size -= old_tx_marker;
1624 orig_iov_len -= old_tx_marker;
1625 per_iov_bytes += old_tx_marker;
1626
1627 pr_debug("tx_data: #3 new_tx_marker"
1628 " %u, size %u\n", *tx_marker, size);
1629 pr_debug("tx_data: #4 offset %u\n",
1630 tx_marker_val[tx_marker_iov-1]);
1631 } else {
1632 iov[iov_count].iov_len = orig_iov_len;
1633 iov[iov_count++].iov_base
1634 = (iov_record[orig_iov_loc].iov_base +
1635 per_iov_bytes);
1636
1637 per_iov_bytes = 0;
1638 *tx_marker -= orig_iov_len;
1639 size -= orig_iov_len;
1640
1641 if (size)
1642 orig_iov_len =
1643 iov_record[++orig_iov_loc].iov_len;
1644
1645 pr_debug("tx_data: #5 new_tx_marker"
1646 " %u, size %u\n", *tx_marker, size);
1647 }
1648 }
1649
1650 data += (tx_marker_iov * (MARKER_SIZE / 2));
1651
1652 iov_p = &iov[0];
1653 iov_len = iov_count;
1654
1655 if (iov_count > count->ss_iov_count) {
1656 pr_err("iov_count: %d, count->ss_iov_count:"
1657 " %d\n", iov_count, count->ss_iov_count);
1658 return -1;
1659 }
1660 if (tx_marker_iov > count->ss_marker_count) {
1661 pr_err("tx_marker_iov: %d, count->ss_marker"
1662 "_count: %d\n", tx_marker_iov,
1663 count->ss_marker_count);
1664 return -1;
1665 }
1666 } else {
1667 iov_p = count->iov;
1668 iov_len = count->iov_count;
1669 }
1670 1455
1671 while (total_tx < data) { 1456 while (total_tx < data) {
1672 tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len, 1457 tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
@@ -1681,9 +1466,6 @@ static int iscsit_do_tx_data(
1681 tx_loop, total_tx, data); 1466 tx_loop, total_tx, data);
1682 } 1467 }
1683 1468
1684 if (count->sync_and_steering)
1685 total_tx -= (tx_marker_iov * (MARKER_SIZE / 2));
1686
1687 return total_tx; 1469 return total_tx;
1688} 1470}
1689 1471
@@ -1704,12 +1486,6 @@ int rx_data(
1704 c.data_length = data; 1486 c.data_length = data;
1705 c.type = ISCSI_RX_DATA; 1487 c.type = ISCSI_RX_DATA;
1706 1488
1707 if (conn->conn_ops->OFMarker &&
1708 (conn->conn_state >= TARG_CONN_STATE_LOGGED_IN)) {
1709 if (iscsit_determine_sync_and_steering_counts(conn, &c) < 0)
1710 return -1;
1711 }
1712
1713 return iscsit_do_rx_data(conn, &c); 1489 return iscsit_do_rx_data(conn, &c);
1714} 1490}
1715 1491
@@ -1730,12 +1506,6 @@ int tx_data(
1730 c.data_length = data; 1506 c.data_length = data;
1731 c.type = ISCSI_TX_DATA; 1507 c.type = ISCSI_TX_DATA;
1732 1508
1733 if (conn->conn_ops->IFMarker &&
1734 (conn->conn_state >= TARG_CONN_STATE_LOGGED_IN)) {
1735 if (iscsit_determine_sync_and_steering_counts(conn, &c) < 0)
1736 return -1;
1737 }
1738
1739 return iscsit_do_tx_data(conn, &c); 1509 return iscsit_do_tx_data(conn, &c);
1740} 1510}
1741 1511
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
index 8ae09a1bdf74..f04d4ef99dca 100644
--- a/drivers/target/target_core_cdb.c
+++ b/drivers/target/target_core_cdb.c
@@ -24,6 +24,7 @@
24 */ 24 */
25 25
26#include <linux/kernel.h> 26#include <linux/kernel.h>
27#include <linux/ctype.h>
27#include <asm/unaligned.h> 28#include <asm/unaligned.h>
28#include <scsi/scsi.h> 29#include <scsi/scsi.h>
29 30
@@ -67,6 +68,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
67{ 68{
68 struct se_lun *lun = cmd->se_lun; 69 struct se_lun *lun = cmd->se_lun;
69 struct se_device *dev = cmd->se_dev; 70 struct se_device *dev = cmd->se_dev;
71 struct se_portal_group *tpg = lun->lun_sep->sep_tpg;
70 unsigned char *buf; 72 unsigned char *buf;
71 73
72 /* 74 /*
@@ -81,9 +83,13 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
81 83
82 buf = transport_kmap_first_data_page(cmd); 84 buf = transport_kmap_first_data_page(cmd);
83 85
84 buf[0] = dev->transport->get_device_type(dev); 86 if (dev == tpg->tpg_virt_lun0.lun_se_dev) {
85 if (buf[0] == TYPE_TAPE) 87 buf[0] = 0x3f; /* Not connected */
86 buf[1] = 0x80; 88 } else {
89 buf[0] = dev->transport->get_device_type(dev);
90 if (buf[0] == TYPE_TAPE)
91 buf[1] = 0x80;
92 }
87 buf[2] = dev->transport->get_device_rev(dev); 93 buf[2] = dev->transport->get_device_rev(dev);
88 94
89 /* 95 /*
@@ -149,6 +155,37 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
149 return 0; 155 return 0;
150} 156}
151 157
158static void
159target_parse_naa_6h_vendor_specific(struct se_device *dev, unsigned char *buf_off)
160{
161 unsigned char *p = &dev->se_sub_dev->t10_wwn.unit_serial[0];
162 unsigned char *buf = buf_off;
163 int cnt = 0, next = 1;
164 /*
165 * Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on
166 * byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field
167 * format, followed by 64 bits of VENDOR SPECIFIC IDENTIFIER EXTENSION
168 * to complete the payload. These are based from VPD=0x80 PRODUCT SERIAL
169 * NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure
170 * per device uniqeness.
171 */
172 while (*p != '\0') {
173 if (cnt >= 13)
174 break;
175 if (!isxdigit(*p)) {
176 p++;
177 continue;
178 }
179 if (next != 0) {
180 buf[cnt++] |= hex_to_bin(*p++);
181 next = 0;
182 } else {
183 buf[cnt] = hex_to_bin(*p++) << 4;
184 next = 1;
185 }
186 }
187}
188
152/* 189/*
153 * Device identification VPD, for a complete list of 190 * Device identification VPD, for a complete list of
154 * DESIGNATOR TYPEs see spc4r17 Table 459. 191 * DESIGNATOR TYPEs see spc4r17 Table 459.
@@ -214,8 +251,7 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
214 * VENDOR_SPECIFIC_IDENTIFIER and 251 * VENDOR_SPECIFIC_IDENTIFIER and
215 * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION 252 * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION
216 */ 253 */
217 buf[off++] |= hex_to_bin(dev->se_sub_dev->t10_wwn.unit_serial[0]); 254 target_parse_naa_6h_vendor_specific(dev, &buf[off]);
218 hex2bin(&buf[off], &dev->se_sub_dev->t10_wwn.unit_serial[1], 12);
219 255
220 len = 20; 256 len = 20;
221 off = (len + 4); 257 off = (len + 4);
@@ -915,8 +951,8 @@ target_emulate_modesense(struct se_cmd *cmd, int ten)
915 length += target_modesense_control(dev, &buf[offset+length]); 951 length += target_modesense_control(dev, &buf[offset+length]);
916 break; 952 break;
917 default: 953 default:
918 pr_err("Got Unknown Mode Page: 0x%02x\n", 954 pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
919 cdb[2] & 0x3f); 955 cdb[2] & 0x3f, cdb[3]);
920 return PYX_TRANSPORT_UNKNOWN_MODE_PAGE; 956 return PYX_TRANSPORT_UNKNOWN_MODE_PAGE;
921 } 957 }
922 offset += length; 958 offset += length;
@@ -1072,8 +1108,6 @@ target_emulate_unmap(struct se_task *task)
1072 size -= 16; 1108 size -= 16;
1073 } 1109 }
1074 1110
1075 task->task_scsi_status = GOOD;
1076 transport_complete_task(task, 1);
1077err: 1111err:
1078 transport_kunmap_first_data_page(cmd); 1112 transport_kunmap_first_data_page(cmd);
1079 1113
@@ -1085,24 +1119,17 @@ err:
1085 * Note this is not used for TCM/pSCSI passthrough 1119 * Note this is not used for TCM/pSCSI passthrough
1086 */ 1120 */
1087static int 1121static int
1088target_emulate_write_same(struct se_task *task, int write_same32) 1122target_emulate_write_same(struct se_task *task, u32 num_blocks)
1089{ 1123{
1090 struct se_cmd *cmd = task->task_se_cmd; 1124 struct se_cmd *cmd = task->task_se_cmd;
1091 struct se_device *dev = cmd->se_dev; 1125 struct se_device *dev = cmd->se_dev;
1092 sector_t range; 1126 sector_t range;
1093 sector_t lba = cmd->t_task_lba; 1127 sector_t lba = cmd->t_task_lba;
1094 unsigned int num_blocks;
1095 int ret; 1128 int ret;
1096 /* 1129 /*
1097 * Extract num_blocks from the WRITE_SAME_* CDB. Then use the explict 1130 * Use the explicit range when non zero is supplied, otherwise calculate
1098 * range when non zero is supplied, otherwise calculate the remaining 1131 * the remaining range based on ->get_blocks() - starting LBA.
1099 * range based on ->get_blocks() - starting LBA.
1100 */ 1132 */
1101 if (write_same32)
1102 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
1103 else
1104 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
1105
1106 if (num_blocks != 0) 1133 if (num_blocks != 0)
1107 range = num_blocks; 1134 range = num_blocks;
1108 else 1135 else
@@ -1117,8 +1144,6 @@ target_emulate_write_same(struct se_task *task, int write_same32)
1117 return ret; 1144 return ret;
1118 } 1145 }
1119 1146
1120 task->task_scsi_status = GOOD;
1121 transport_complete_task(task, 1);
1122 return 0; 1147 return 0;
1123} 1148}
1124 1149
@@ -1165,13 +1190,23 @@ transport_emulate_control_cdb(struct se_task *task)
1165 } 1190 }
1166 ret = target_emulate_unmap(task); 1191 ret = target_emulate_unmap(task);
1167 break; 1192 break;
1193 case WRITE_SAME:
1194 if (!dev->transport->do_discard) {
1195 pr_err("WRITE_SAME emulation not supported"
1196 " for: %s\n", dev->transport->name);
1197 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1198 }
1199 ret = target_emulate_write_same(task,
1200 get_unaligned_be16(&cmd->t_task_cdb[7]));
1201 break;
1168 case WRITE_SAME_16: 1202 case WRITE_SAME_16:
1169 if (!dev->transport->do_discard) { 1203 if (!dev->transport->do_discard) {
1170 pr_err("WRITE_SAME_16 emulation not supported" 1204 pr_err("WRITE_SAME_16 emulation not supported"
1171 " for: %s\n", dev->transport->name); 1205 " for: %s\n", dev->transport->name);
1172 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 1206 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1173 } 1207 }
1174 ret = target_emulate_write_same(task, 0); 1208 ret = target_emulate_write_same(task,
1209 get_unaligned_be32(&cmd->t_task_cdb[10]));
1175 break; 1210 break;
1176 case VARIABLE_LENGTH_CMD: 1211 case VARIABLE_LENGTH_CMD:
1177 service_action = 1212 service_action =
@@ -1184,7 +1219,8 @@ transport_emulate_control_cdb(struct se_task *task)
1184 dev->transport->name); 1219 dev->transport->name);
1185 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 1220 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1186 } 1221 }
1187 ret = target_emulate_write_same(task, 1); 1222 ret = target_emulate_write_same(task,
1223 get_unaligned_be32(&cmd->t_task_cdb[28]));
1188 break; 1224 break;
1189 default: 1225 default:
1190 pr_err("Unsupported VARIABLE_LENGTH_CMD SA:" 1226 pr_err("Unsupported VARIABLE_LENGTH_CMD SA:"
@@ -1219,8 +1255,14 @@ transport_emulate_control_cdb(struct se_task *task)
1219 1255
1220 if (ret < 0) 1256 if (ret < 0)
1221 return ret; 1257 return ret;
1222 task->task_scsi_status = GOOD; 1258 /*
1223 transport_complete_task(task, 1); 1259 * Handle the successful completion here unless a caller
1260 * has explictly requested an asychronous completion.
1261 */
1262 if (!(cmd->se_cmd_flags & SCF_EMULATE_CDB_ASYNC)) {
1263 task->task_scsi_status = GOOD;
1264 transport_complete_task(task, 1);
1265 }
1224 1266
1225 return PYX_TRANSPORT_SENT_TO_TRANSPORT; 1267 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
1226} 1268}
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index b38b6c993e65..ca6e4a4df134 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -472,9 +472,9 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
472 struct se_dev_entry *deve; 472 struct se_dev_entry *deve;
473 u32 i; 473 u32 i;
474 474
475 spin_lock_bh(&tpg->acl_node_lock); 475 spin_lock_irq(&tpg->acl_node_lock);
476 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { 476 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
477 spin_unlock_bh(&tpg->acl_node_lock); 477 spin_unlock_irq(&tpg->acl_node_lock);
478 478
479 spin_lock_irq(&nacl->device_list_lock); 479 spin_lock_irq(&nacl->device_list_lock);
480 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 480 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
@@ -491,9 +491,9 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
491 } 491 }
492 spin_unlock_irq(&nacl->device_list_lock); 492 spin_unlock_irq(&nacl->device_list_lock);
493 493
494 spin_lock_bh(&tpg->acl_node_lock); 494 spin_lock_irq(&tpg->acl_node_lock);
495 } 495 }
496 spin_unlock_bh(&tpg->acl_node_lock); 496 spin_unlock_irq(&tpg->acl_node_lock);
497} 497}
498 498
499static struct se_port *core_alloc_port(struct se_device *dev) 499static struct se_port *core_alloc_port(struct se_device *dev)
@@ -839,6 +839,24 @@ int se_dev_check_shutdown(struct se_device *dev)
839 return ret; 839 return ret;
840} 840}
841 841
842u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
843{
844 u32 tmp, aligned_max_sectors;
845 /*
846 * Limit max_sectors to a PAGE_SIZE aligned value for modern
847 * transport_allocate_data_tasks() operation.
848 */
849 tmp = rounddown((max_sectors * block_size), PAGE_SIZE);
850 aligned_max_sectors = (tmp / block_size);
851 if (max_sectors != aligned_max_sectors) {
852 printk(KERN_INFO "Rounding down aligned max_sectors from %u"
853 " to %u\n", max_sectors, aligned_max_sectors);
854 return aligned_max_sectors;
855 }
856
857 return max_sectors;
858}
859
842void se_dev_set_default_attribs( 860void se_dev_set_default_attribs(
843 struct se_device *dev, 861 struct se_device *dev,
844 struct se_dev_limits *dev_limits) 862 struct se_dev_limits *dev_limits)
@@ -878,6 +896,11 @@ void se_dev_set_default_attribs(
878 * max_sectors is based on subsystem plugin dependent requirements. 896 * max_sectors is based on subsystem plugin dependent requirements.
879 */ 897 */
880 dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors; 898 dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors;
899 /*
900 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
901 */
902 limits->max_sectors = se_dev_align_max_sectors(limits->max_sectors,
903 limits->logical_block_size);
881 dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors; 904 dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors;
882 /* 905 /*
883 * Set optimal_sectors from max_sectors, which can be lowered via 906 * Set optimal_sectors from max_sectors, which can be lowered via
@@ -1242,6 +1265,11 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
1242 return -EINVAL; 1265 return -EINVAL;
1243 } 1266 }
1244 } 1267 }
1268 /*
1269 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
1270 */
1271 max_sectors = se_dev_align_max_sectors(max_sectors,
1272 dev->se_sub_dev->se_dev_attrib.block_size);
1245 1273
1246 dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors; 1274 dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors;
1247 pr_debug("dev[%p]: SE Device max_sectors changed to %u\n", 1275 pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
@@ -1344,15 +1372,17 @@ struct se_lun *core_dev_add_lun(
1344 */ 1372 */
1345 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) { 1373 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
1346 struct se_node_acl *acl; 1374 struct se_node_acl *acl;
1347 spin_lock_bh(&tpg->acl_node_lock); 1375 spin_lock_irq(&tpg->acl_node_lock);
1348 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { 1376 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
1349 if (acl->dynamic_node_acl) { 1377 if (acl->dynamic_node_acl &&
1350 spin_unlock_bh(&tpg->acl_node_lock); 1378 (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
1379 !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
1380 spin_unlock_irq(&tpg->acl_node_lock);
1351 core_tpg_add_node_to_devs(acl, tpg); 1381 core_tpg_add_node_to_devs(acl, tpg);
1352 spin_lock_bh(&tpg->acl_node_lock); 1382 spin_lock_irq(&tpg->acl_node_lock);
1353 } 1383 }
1354 } 1384 }
1355 spin_unlock_bh(&tpg->acl_node_lock); 1385 spin_unlock_irq(&tpg->acl_node_lock);
1356 } 1386 }
1357 1387
1358 return lun_p; 1388 return lun_p;
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index f1654694f4ea..55bbe0847a6d 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -481,7 +481,7 @@ static struct config_group *target_fabric_make_nodeacl(
481 481
482 se_nacl = tf->tf_ops.fabric_make_nodeacl(se_tpg, group, name); 482 se_nacl = tf->tf_ops.fabric_make_nodeacl(se_tpg, group, name);
483 if (IS_ERR(se_nacl)) 483 if (IS_ERR(se_nacl))
484 return ERR_PTR(PTR_ERR(se_nacl)); 484 return ERR_CAST(se_nacl);
485 485
486 nacl_cg = &se_nacl->acl_group; 486 nacl_cg = &se_nacl->acl_group;
487 nacl_cg->default_groups = se_nacl->acl_default_groups; 487 nacl_cg->default_groups = se_nacl->acl_default_groups;
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 1c1b849cd4fb..7fd3a161f7cc 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -1598,14 +1598,14 @@ static int core_scsi3_decode_spec_i_port(
1598 * from the decoded fabric module specific TransportID 1598 * from the decoded fabric module specific TransportID
1599 * at *i_str. 1599 * at *i_str.
1600 */ 1600 */
1601 spin_lock_bh(&tmp_tpg->acl_node_lock); 1601 spin_lock_irq(&tmp_tpg->acl_node_lock);
1602 dest_node_acl = __core_tpg_get_initiator_node_acl( 1602 dest_node_acl = __core_tpg_get_initiator_node_acl(
1603 tmp_tpg, i_str); 1603 tmp_tpg, i_str);
1604 if (dest_node_acl) { 1604 if (dest_node_acl) {
1605 atomic_inc(&dest_node_acl->acl_pr_ref_count); 1605 atomic_inc(&dest_node_acl->acl_pr_ref_count);
1606 smp_mb__after_atomic_inc(); 1606 smp_mb__after_atomic_inc();
1607 } 1607 }
1608 spin_unlock_bh(&tmp_tpg->acl_node_lock); 1608 spin_unlock_irq(&tmp_tpg->acl_node_lock);
1609 1609
1610 if (!dest_node_acl) { 1610 if (!dest_node_acl) {
1611 core_scsi3_tpg_undepend_item(tmp_tpg); 1611 core_scsi3_tpg_undepend_item(tmp_tpg);
@@ -3496,14 +3496,14 @@ after_iport_check:
3496 /* 3496 /*
3497 * Locate the destination struct se_node_acl from the received Transport ID 3497 * Locate the destination struct se_node_acl from the received Transport ID
3498 */ 3498 */
3499 spin_lock_bh(&dest_se_tpg->acl_node_lock); 3499 spin_lock_irq(&dest_se_tpg->acl_node_lock);
3500 dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg, 3500 dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg,
3501 initiator_str); 3501 initiator_str);
3502 if (dest_node_acl) { 3502 if (dest_node_acl) {
3503 atomic_inc(&dest_node_acl->acl_pr_ref_count); 3503 atomic_inc(&dest_node_acl->acl_pr_ref_count);
3504 smp_mb__after_atomic_inc(); 3504 smp_mb__after_atomic_inc();
3505 } 3505 }
3506 spin_unlock_bh(&dest_se_tpg->acl_node_lock); 3506 spin_unlock_irq(&dest_se_tpg->acl_node_lock);
3507 3507
3508 if (!dest_node_acl) { 3508 if (!dest_node_acl) {
3509 pr_err("Unable to locate %s dest_node_acl for" 3509 pr_err("Unable to locate %s dest_node_acl for"
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 3dd81d24d9a9..e567e129c697 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -390,12 +390,10 @@ static int rd_MEMCPY_read(struct rd_request *req)
390 length = req->rd_size; 390 length = req->rd_size;
391 391
392 dst = sg_virt(&sg_d[i++]) + dst_offset; 392 dst = sg_virt(&sg_d[i++]) + dst_offset;
393 if (!dst) 393 BUG_ON(!dst);
394 BUG();
395 394
396 src = sg_virt(&sg_s[j]) + src_offset; 395 src = sg_virt(&sg_s[j]) + src_offset;
397 if (!src) 396 BUG_ON(!src);
398 BUG();
399 397
400 dst_offset = 0; 398 dst_offset = 0;
401 src_offset = length; 399 src_offset = length;
@@ -415,8 +413,7 @@ static int rd_MEMCPY_read(struct rd_request *req)
415 length = req->rd_size; 413 length = req->rd_size;
416 414
417 dst = sg_virt(&sg_d[i]) + dst_offset; 415 dst = sg_virt(&sg_d[i]) + dst_offset;
418 if (!dst) 416 BUG_ON(!dst);
419 BUG();
420 417
421 if (sg_d[i].length == length) { 418 if (sg_d[i].length == length) {
422 i++; 419 i++;
@@ -425,8 +422,7 @@ static int rd_MEMCPY_read(struct rd_request *req)
425 dst_offset = length; 422 dst_offset = length;
426 423
427 src = sg_virt(&sg_s[j++]) + src_offset; 424 src = sg_virt(&sg_s[j++]) + src_offset;
428 if (!src) 425 BUG_ON(!src);
429 BUG();
430 426
431 src_offset = 0; 427 src_offset = 0;
432 page_end = 1; 428 page_end = 1;
@@ -510,12 +506,10 @@ static int rd_MEMCPY_write(struct rd_request *req)
510 length = req->rd_size; 506 length = req->rd_size;
511 507
512 src = sg_virt(&sg_s[i++]) + src_offset; 508 src = sg_virt(&sg_s[i++]) + src_offset;
513 if (!src) 509 BUG_ON(!src);
514 BUG();
515 510
516 dst = sg_virt(&sg_d[j]) + dst_offset; 511 dst = sg_virt(&sg_d[j]) + dst_offset;
517 if (!dst) 512 BUG_ON(!dst);
518 BUG();
519 513
520 src_offset = 0; 514 src_offset = 0;
521 dst_offset = length; 515 dst_offset = length;
@@ -535,8 +529,7 @@ static int rd_MEMCPY_write(struct rd_request *req)
535 length = req->rd_size; 529 length = req->rd_size;
536 530
537 src = sg_virt(&sg_s[i]) + src_offset; 531 src = sg_virt(&sg_s[i]) + src_offset;
538 if (!src) 532 BUG_ON(!src);
539 BUG();
540 533
541 if (sg_s[i].length == length) { 534 if (sg_s[i].length == length) {
542 i++; 535 i++;
@@ -545,8 +538,7 @@ static int rd_MEMCPY_write(struct rd_request *req)
545 src_offset = length; 538 src_offset = length;
546 539
547 dst = sg_virt(&sg_d[j++]) + dst_offset; 540 dst = sg_virt(&sg_d[j++]) + dst_offset;
548 if (!dst) 541 BUG_ON(!dst);
549 BUG();
550 542
551 dst_offset = 0; 543 dst_offset = 0;
552 page_end = 1; 544 page_end = 1;
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 4f1ba4c5ef11..162b736c7342 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -137,15 +137,15 @@ struct se_node_acl *core_tpg_get_initiator_node_acl(
137{ 137{
138 struct se_node_acl *acl; 138 struct se_node_acl *acl;
139 139
140 spin_lock_bh(&tpg->acl_node_lock); 140 spin_lock_irq(&tpg->acl_node_lock);
141 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { 141 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
142 if (!strcmp(acl->initiatorname, initiatorname) && 142 if (!strcmp(acl->initiatorname, initiatorname) &&
143 !acl->dynamic_node_acl) { 143 !acl->dynamic_node_acl) {
144 spin_unlock_bh(&tpg->acl_node_lock); 144 spin_unlock_irq(&tpg->acl_node_lock);
145 return acl; 145 return acl;
146 } 146 }
147 } 147 }
148 spin_unlock_bh(&tpg->acl_node_lock); 148 spin_unlock_irq(&tpg->acl_node_lock);
149 149
150 return NULL; 150 return NULL;
151} 151}
@@ -298,13 +298,21 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
298 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); 298 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
299 return NULL; 299 return NULL;
300 } 300 }
301 /*
302 * Here we only create demo-mode MappedLUNs from the active
303 * TPG LUNs if the fabric is not explictly asking for
304 * tpg_check_demo_mode_login_only() == 1.
305 */
306 if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only != NULL) &&
307 (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) == 1))
308 do { ; } while (0);
309 else
310 core_tpg_add_node_to_devs(acl, tpg);
301 311
302 core_tpg_add_node_to_devs(acl, tpg); 312 spin_lock_irq(&tpg->acl_node_lock);
303
304 spin_lock_bh(&tpg->acl_node_lock);
305 list_add_tail(&acl->acl_list, &tpg->acl_node_list); 313 list_add_tail(&acl->acl_list, &tpg->acl_node_list);
306 tpg->num_node_acls++; 314 tpg->num_node_acls++;
307 spin_unlock_bh(&tpg->acl_node_lock); 315 spin_unlock_irq(&tpg->acl_node_lock);
308 316
309 pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s" 317 pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
310 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), 318 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
@@ -354,7 +362,7 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
354{ 362{
355 struct se_node_acl *acl = NULL; 363 struct se_node_acl *acl = NULL;
356 364
357 spin_lock_bh(&tpg->acl_node_lock); 365 spin_lock_irq(&tpg->acl_node_lock);
358 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); 366 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
359 if (acl) { 367 if (acl) {
360 if (acl->dynamic_node_acl) { 368 if (acl->dynamic_node_acl) {
@@ -362,7 +370,7 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
362 pr_debug("%s_TPG[%u] - Replacing dynamic ACL" 370 pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
363 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(), 371 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
364 tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname); 372 tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
365 spin_unlock_bh(&tpg->acl_node_lock); 373 spin_unlock_irq(&tpg->acl_node_lock);
366 /* 374 /*
367 * Release the locally allocated struct se_node_acl 375 * Release the locally allocated struct se_node_acl
368 * because * core_tpg_add_initiator_node_acl() returned 376 * because * core_tpg_add_initiator_node_acl() returned
@@ -378,10 +386,10 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
378 " Node %s already exists for TPG %u, ignoring" 386 " Node %s already exists for TPG %u, ignoring"
379 " request.\n", tpg->se_tpg_tfo->get_fabric_name(), 387 " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
380 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); 388 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
381 spin_unlock_bh(&tpg->acl_node_lock); 389 spin_unlock_irq(&tpg->acl_node_lock);
382 return ERR_PTR(-EEXIST); 390 return ERR_PTR(-EEXIST);
383 } 391 }
384 spin_unlock_bh(&tpg->acl_node_lock); 392 spin_unlock_irq(&tpg->acl_node_lock);
385 393
386 if (!se_nacl) { 394 if (!se_nacl) {
387 pr_err("struct se_node_acl pointer is NULL\n"); 395 pr_err("struct se_node_acl pointer is NULL\n");
@@ -418,10 +426,10 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
418 return ERR_PTR(-EINVAL); 426 return ERR_PTR(-EINVAL);
419 } 427 }
420 428
421 spin_lock_bh(&tpg->acl_node_lock); 429 spin_lock_irq(&tpg->acl_node_lock);
422 list_add_tail(&acl->acl_list, &tpg->acl_node_list); 430 list_add_tail(&acl->acl_list, &tpg->acl_node_list);
423 tpg->num_node_acls++; 431 tpg->num_node_acls++;
424 spin_unlock_bh(&tpg->acl_node_lock); 432 spin_unlock_irq(&tpg->acl_node_lock);
425 433
426done: 434done:
427 pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s" 435 pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
@@ -445,14 +453,14 @@ int core_tpg_del_initiator_node_acl(
445 struct se_session *sess, *sess_tmp; 453 struct se_session *sess, *sess_tmp;
446 int dynamic_acl = 0; 454 int dynamic_acl = 0;
447 455
448 spin_lock_bh(&tpg->acl_node_lock); 456 spin_lock_irq(&tpg->acl_node_lock);
449 if (acl->dynamic_node_acl) { 457 if (acl->dynamic_node_acl) {
450 acl->dynamic_node_acl = 0; 458 acl->dynamic_node_acl = 0;
451 dynamic_acl = 1; 459 dynamic_acl = 1;
452 } 460 }
453 list_del(&acl->acl_list); 461 list_del(&acl->acl_list);
454 tpg->num_node_acls--; 462 tpg->num_node_acls--;
455 spin_unlock_bh(&tpg->acl_node_lock); 463 spin_unlock_irq(&tpg->acl_node_lock);
456 464
457 spin_lock_bh(&tpg->session_lock); 465 spin_lock_bh(&tpg->session_lock);
458 list_for_each_entry_safe(sess, sess_tmp, 466 list_for_each_entry_safe(sess, sess_tmp,
@@ -503,21 +511,21 @@ int core_tpg_set_initiator_node_queue_depth(
503 struct se_node_acl *acl; 511 struct se_node_acl *acl;
504 int dynamic_acl = 0; 512 int dynamic_acl = 0;
505 513
506 spin_lock_bh(&tpg->acl_node_lock); 514 spin_lock_irq(&tpg->acl_node_lock);
507 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); 515 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
508 if (!acl) { 516 if (!acl) {
509 pr_err("Access Control List entry for %s Initiator" 517 pr_err("Access Control List entry for %s Initiator"
510 " Node %s does not exists for TPG %hu, ignoring" 518 " Node %s does not exists for TPG %hu, ignoring"
511 " request.\n", tpg->se_tpg_tfo->get_fabric_name(), 519 " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
512 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); 520 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
513 spin_unlock_bh(&tpg->acl_node_lock); 521 spin_unlock_irq(&tpg->acl_node_lock);
514 return -ENODEV; 522 return -ENODEV;
515 } 523 }
516 if (acl->dynamic_node_acl) { 524 if (acl->dynamic_node_acl) {
517 acl->dynamic_node_acl = 0; 525 acl->dynamic_node_acl = 0;
518 dynamic_acl = 1; 526 dynamic_acl = 1;
519 } 527 }
520 spin_unlock_bh(&tpg->acl_node_lock); 528 spin_unlock_irq(&tpg->acl_node_lock);
521 529
522 spin_lock_bh(&tpg->session_lock); 530 spin_lock_bh(&tpg->session_lock);
523 list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) { 531 list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
@@ -533,10 +541,10 @@ int core_tpg_set_initiator_node_queue_depth(
533 tpg->se_tpg_tfo->get_fabric_name(), initiatorname); 541 tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
534 spin_unlock_bh(&tpg->session_lock); 542 spin_unlock_bh(&tpg->session_lock);
535 543
536 spin_lock_bh(&tpg->acl_node_lock); 544 spin_lock_irq(&tpg->acl_node_lock);
537 if (dynamic_acl) 545 if (dynamic_acl)
538 acl->dynamic_node_acl = 1; 546 acl->dynamic_node_acl = 1;
539 spin_unlock_bh(&tpg->acl_node_lock); 547 spin_unlock_irq(&tpg->acl_node_lock);
540 return -EEXIST; 548 return -EEXIST;
541 } 549 }
542 /* 550 /*
@@ -571,10 +579,10 @@ int core_tpg_set_initiator_node_queue_depth(
571 if (init_sess) 579 if (init_sess)
572 tpg->se_tpg_tfo->close_session(init_sess); 580 tpg->se_tpg_tfo->close_session(init_sess);
573 581
574 spin_lock_bh(&tpg->acl_node_lock); 582 spin_lock_irq(&tpg->acl_node_lock);
575 if (dynamic_acl) 583 if (dynamic_acl)
576 acl->dynamic_node_acl = 1; 584 acl->dynamic_node_acl = 1;
577 spin_unlock_bh(&tpg->acl_node_lock); 585 spin_unlock_irq(&tpg->acl_node_lock);
578 return -EINVAL; 586 return -EINVAL;
579 } 587 }
580 spin_unlock_bh(&tpg->session_lock); 588 spin_unlock_bh(&tpg->session_lock);
@@ -590,10 +598,10 @@ int core_tpg_set_initiator_node_queue_depth(
590 initiatorname, tpg->se_tpg_tfo->get_fabric_name(), 598 initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
591 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 599 tpg->se_tpg_tfo->tpg_get_tag(tpg));
592 600
593 spin_lock_bh(&tpg->acl_node_lock); 601 spin_lock_irq(&tpg->acl_node_lock);
594 if (dynamic_acl) 602 if (dynamic_acl)
595 acl->dynamic_node_acl = 1; 603 acl->dynamic_node_acl = 1;
596 spin_unlock_bh(&tpg->acl_node_lock); 604 spin_unlock_irq(&tpg->acl_node_lock);
597 605
598 return 0; 606 return 0;
599} 607}
@@ -717,20 +725,20 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
717 * not been released because of TFO->tpg_check_demo_mode_cache() == 1 725 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
718 * in transport_deregister_session(). 726 * in transport_deregister_session().
719 */ 727 */
720 spin_lock_bh(&se_tpg->acl_node_lock); 728 spin_lock_irq(&se_tpg->acl_node_lock);
721 list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list, 729 list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
722 acl_list) { 730 acl_list) {
723 list_del(&nacl->acl_list); 731 list_del(&nacl->acl_list);
724 se_tpg->num_node_acls--; 732 se_tpg->num_node_acls--;
725 spin_unlock_bh(&se_tpg->acl_node_lock); 733 spin_unlock_irq(&se_tpg->acl_node_lock);
726 734
727 core_tpg_wait_for_nacl_pr_ref(nacl); 735 core_tpg_wait_for_nacl_pr_ref(nacl);
728 core_free_device_list_for_node(nacl, se_tpg); 736 core_free_device_list_for_node(nacl, se_tpg);
729 se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl); 737 se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl);
730 738
731 spin_lock_bh(&se_tpg->acl_node_lock); 739 spin_lock_irq(&se_tpg->acl_node_lock);
732 } 740 }
733 spin_unlock_bh(&se_tpg->acl_node_lock); 741 spin_unlock_irq(&se_tpg->acl_node_lock);
734 742
735 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) 743 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
736 core_tpg_release_virtual_lun0(se_tpg); 744 core_tpg_release_virtual_lun0(se_tpg);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 89760329d5d0..a4b0a8d27f25 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -389,17 +389,18 @@ void transport_deregister_session(struct se_session *se_sess)
389{ 389{
390 struct se_portal_group *se_tpg = se_sess->se_tpg; 390 struct se_portal_group *se_tpg = se_sess->se_tpg;
391 struct se_node_acl *se_nacl; 391 struct se_node_acl *se_nacl;
392 unsigned long flags;
392 393
393 if (!se_tpg) { 394 if (!se_tpg) {
394 transport_free_session(se_sess); 395 transport_free_session(se_sess);
395 return; 396 return;
396 } 397 }
397 398
398 spin_lock_bh(&se_tpg->session_lock); 399 spin_lock_irqsave(&se_tpg->session_lock, flags);
399 list_del(&se_sess->sess_list); 400 list_del(&se_sess->sess_list);
400 se_sess->se_tpg = NULL; 401 se_sess->se_tpg = NULL;
401 se_sess->fabric_sess_ptr = NULL; 402 se_sess->fabric_sess_ptr = NULL;
402 spin_unlock_bh(&se_tpg->session_lock); 403 spin_unlock_irqrestore(&se_tpg->session_lock, flags);
403 404
404 /* 405 /*
405 * Determine if we need to do extra work for this initiator node's 406 * Determine if we need to do extra work for this initiator node's
@@ -407,22 +408,22 @@ void transport_deregister_session(struct se_session *se_sess)
407 */ 408 */
408 se_nacl = se_sess->se_node_acl; 409 se_nacl = se_sess->se_node_acl;
409 if (se_nacl) { 410 if (se_nacl) {
410 spin_lock_bh(&se_tpg->acl_node_lock); 411 spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
411 if (se_nacl->dynamic_node_acl) { 412 if (se_nacl->dynamic_node_acl) {
412 if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache( 413 if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache(
413 se_tpg)) { 414 se_tpg)) {
414 list_del(&se_nacl->acl_list); 415 list_del(&se_nacl->acl_list);
415 se_tpg->num_node_acls--; 416 se_tpg->num_node_acls--;
416 spin_unlock_bh(&se_tpg->acl_node_lock); 417 spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
417 418
418 core_tpg_wait_for_nacl_pr_ref(se_nacl); 419 core_tpg_wait_for_nacl_pr_ref(se_nacl);
419 core_free_device_list_for_node(se_nacl, se_tpg); 420 core_free_device_list_for_node(se_nacl, se_tpg);
420 se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, 421 se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg,
421 se_nacl); 422 se_nacl);
422 spin_lock_bh(&se_tpg->acl_node_lock); 423 spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
423 } 424 }
424 } 425 }
425 spin_unlock_bh(&se_tpg->acl_node_lock); 426 spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
426 } 427 }
427 428
428 transport_free_session(se_sess); 429 transport_free_session(se_sess);
@@ -976,15 +977,17 @@ static void target_qf_do_work(struct work_struct *work)
976{ 977{
977 struct se_device *dev = container_of(work, struct se_device, 978 struct se_device *dev = container_of(work, struct se_device,
978 qf_work_queue); 979 qf_work_queue);
980 LIST_HEAD(qf_cmd_list);
979 struct se_cmd *cmd, *cmd_tmp; 981 struct se_cmd *cmd, *cmd_tmp;
980 982
981 spin_lock_irq(&dev->qf_cmd_lock); 983 spin_lock_irq(&dev->qf_cmd_lock);
982 list_for_each_entry_safe(cmd, cmd_tmp, &dev->qf_cmd_list, se_qf_node) { 984 list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
985 spin_unlock_irq(&dev->qf_cmd_lock);
983 986
987 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
984 list_del(&cmd->se_qf_node); 988 list_del(&cmd->se_qf_node);
985 atomic_dec(&dev->dev_qf_count); 989 atomic_dec(&dev->dev_qf_count);
986 smp_mb__after_atomic_dec(); 990 smp_mb__after_atomic_dec();
987 spin_unlock_irq(&dev->qf_cmd_lock);
988 991
989 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" 992 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
990 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, 993 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
@@ -996,10 +999,7 @@ static void target_qf_do_work(struct work_struct *work)
996 * has been added to head of queue 999 * has been added to head of queue
997 */ 1000 */
998 transport_add_cmd_to_queue(cmd, cmd->t_state); 1001 transport_add_cmd_to_queue(cmd, cmd->t_state);
999
1000 spin_lock_irq(&dev->qf_cmd_lock);
1001 } 1002 }
1002 spin_unlock_irq(&dev->qf_cmd_lock);
1003} 1003}
1004 1004
1005unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) 1005unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
@@ -2053,8 +2053,14 @@ static void transport_generic_request_failure(
2053 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 2053 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
2054 break; 2054 break;
2055 } 2055 }
2056 2056 /*
2057 if (!sc) 2057 * If a fabric does not define a cmd->se_tfo->new_cmd_map caller,
2058 * make the call to transport_send_check_condition_and_sense()
2059 * directly. Otherwise expect the fabric to make the call to
2060 * transport_send_check_condition_and_sense() after handling
2061 * possible unsoliticied write data payloads.
2062 */
2063 if (!sc && !cmd->se_tfo->new_cmd_map)
2058 transport_new_cmd_failure(cmd); 2064 transport_new_cmd_failure(cmd);
2059 else { 2065 else {
2060 ret = transport_send_check_condition_and_sense(cmd, 2066 ret = transport_send_check_condition_and_sense(cmd,
@@ -2847,12 +2853,42 @@ static int transport_cmd_get_valid_sectors(struct se_cmd *cmd)
2847 " transport_dev_end_lba(): %llu\n", 2853 " transport_dev_end_lba(): %llu\n",
2848 cmd->t_task_lba, sectors, 2854 cmd->t_task_lba, sectors,
2849 transport_dev_end_lba(dev)); 2855 transport_dev_end_lba(dev));
2850 pr_err(" We should return CHECK_CONDITION" 2856 return -EINVAL;
2851 " but we don't yet\n");
2852 return 0;
2853 } 2857 }
2854 2858
2855 return sectors; 2859 return 0;
2860}
2861
2862static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev)
2863{
2864 /*
2865 * Determine if the received WRITE_SAME is used to for direct
2866 * passthrough into Linux/SCSI with struct request via TCM/pSCSI
2867 * or we are signaling the use of internal WRITE_SAME + UNMAP=1
2868 * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK code.
2869 */
2870 int passthrough = (dev->transport->transport_type ==
2871 TRANSPORT_PLUGIN_PHBA_PDEV);
2872
2873 if (!passthrough) {
2874 if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
2875 pr_err("WRITE_SAME PBDATA and LBDATA"
2876 " bits not supported for Block Discard"
2877 " Emulation\n");
2878 return -ENOSYS;
2879 }
2880 /*
2881 * Currently for the emulated case we only accept
2882 * tpws with the UNMAP=1 bit set.
2883 */
2884 if (!(flags[0] & 0x08)) {
2885 pr_err("WRITE_SAME w/o UNMAP bit not"
2886 " supported for Block Discard Emulation\n");
2887 return -ENOSYS;
2888 }
2889 }
2890
2891 return 0;
2856} 2892}
2857 2893
2858/* transport_generic_cmd_sequencer(): 2894/* transport_generic_cmd_sequencer():
@@ -3065,7 +3101,7 @@ static int transport_generic_cmd_sequencer(
3065 goto out_unsupported_cdb; 3101 goto out_unsupported_cdb;
3066 3102
3067 if (sectors) 3103 if (sectors)
3068 size = transport_get_size(sectors, cdb, cmd); 3104 size = transport_get_size(1, cdb, cmd);
3069 else { 3105 else {
3070 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not" 3106 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
3071 " supported\n"); 3107 " supported\n");
@@ -3075,27 +3111,9 @@ static int transport_generic_cmd_sequencer(
3075 cmd->t_task_lba = get_unaligned_be64(&cdb[12]); 3111 cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
3076 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 3112 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3077 3113
3078 /* 3114 if (target_check_write_same_discard(&cdb[10], dev) < 0)
3079 * Skip the remaining assignments for TCM/PSCSI passthrough
3080 */
3081 if (passthrough)
3082 break;
3083
3084 if ((cdb[10] & 0x04) || (cdb[10] & 0x02)) {
3085 pr_err("WRITE_SAME PBDATA and LBDATA"
3086 " bits not supported for Block Discard"
3087 " Emulation\n");
3088 goto out_invalid_cdb_field; 3115 goto out_invalid_cdb_field;
3089 } 3116
3090 /*
3091 * Currently for the emulated case we only accept
3092 * tpws with the UNMAP=1 bit set.
3093 */
3094 if (!(cdb[10] & 0x08)) {
3095 pr_err("WRITE_SAME w/o UNMAP bit not"
3096 " supported for Block Discard Emulation\n");
3097 goto out_invalid_cdb_field;
3098 }
3099 break; 3117 break;
3100 default: 3118 default:
3101 pr_err("VARIABLE_LENGTH_CMD service action" 3119 pr_err("VARIABLE_LENGTH_CMD service action"
@@ -3330,10 +3348,12 @@ static int transport_generic_cmd_sequencer(
3330 cmd->se_cmd_flags |= SCF_EMULATE_CDB_ASYNC; 3348 cmd->se_cmd_flags |= SCF_EMULATE_CDB_ASYNC;
3331 /* 3349 /*
3332 * Check to ensure that LBA + Range does not exceed past end of 3350 * Check to ensure that LBA + Range does not exceed past end of
3333 * device. 3351 * device for IBLOCK and FILEIO ->do_sync_cache() backend calls
3334 */ 3352 */
3335 if (!transport_cmd_get_valid_sectors(cmd)) 3353 if ((cmd->t_task_lba != 0) || (sectors != 0)) {
3336 goto out_invalid_cdb_field; 3354 if (transport_cmd_get_valid_sectors(cmd) < 0)
3355 goto out_invalid_cdb_field;
3356 }
3337 break; 3357 break;
3338 case UNMAP: 3358 case UNMAP:
3339 size = get_unaligned_be16(&cdb[7]); 3359 size = get_unaligned_be16(&cdb[7]);
@@ -3345,40 +3365,38 @@ static int transport_generic_cmd_sequencer(
3345 goto out_unsupported_cdb; 3365 goto out_unsupported_cdb;
3346 3366
3347 if (sectors) 3367 if (sectors)
3348 size = transport_get_size(sectors, cdb, cmd); 3368 size = transport_get_size(1, cdb, cmd);
3349 else { 3369 else {
3350 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); 3370 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
3351 goto out_invalid_cdb_field; 3371 goto out_invalid_cdb_field;
3352 } 3372 }
3353 3373
3354 cmd->t_task_lba = get_unaligned_be64(&cdb[2]); 3374 cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
3355 passthrough = (dev->transport->transport_type == 3375 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3356 TRANSPORT_PLUGIN_PHBA_PDEV); 3376
3357 /* 3377 if (target_check_write_same_discard(&cdb[1], dev) < 0)
3358 * Determine if the received WRITE_SAME_16 is used to for direct 3378 goto out_invalid_cdb_field;
3359 * passthrough into Linux/SCSI with struct request via TCM/pSCSI 3379 break;
3360 * or we are signaling the use of internal WRITE_SAME + UNMAP=1 3380 case WRITE_SAME:
3361 * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK and 3381 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
3362 * TCM/FILEIO subsystem plugin backstores. 3382 if (sector_ret)
3363 */ 3383 goto out_unsupported_cdb;
3364 if (!passthrough) { 3384
3365 if ((cdb[1] & 0x04) || (cdb[1] & 0x02)) { 3385 if (sectors)
3366 pr_err("WRITE_SAME PBDATA and LBDATA" 3386 size = transport_get_size(1, cdb, cmd);
3367 " bits not supported for Block Discard" 3387 else {
3368 " Emulation\n"); 3388 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
3369 goto out_invalid_cdb_field; 3389 goto out_invalid_cdb_field;
3370 }
3371 /*
3372 * Currently for the emulated case we only accept
3373 * tpws with the UNMAP=1 bit set.
3374 */
3375 if (!(cdb[1] & 0x08)) {
3376 pr_err("WRITE_SAME w/o UNMAP bit not "
3377 " supported for Block Discard Emulation\n");
3378 goto out_invalid_cdb_field;
3379 }
3380 } 3390 }
3391
3392 cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
3381 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 3393 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3394 /*
3395 * Follow sbcr26 with WRITE_SAME (10) and check for the existence
3396 * of byte 1 bit 3 UNMAP instead of original reserved field
3397 */
3398 if (target_check_write_same_discard(&cdb[1], dev) < 0)
3399 goto out_invalid_cdb_field;
3382 break; 3400 break;
3383 case ALLOW_MEDIUM_REMOVAL: 3401 case ALLOW_MEDIUM_REMOVAL:
3384 case GPCMD_CLOSE_TRACK: 3402 case GPCMD_CLOSE_TRACK:
@@ -3873,9 +3891,7 @@ EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
3873static int transport_new_cmd_obj(struct se_cmd *cmd) 3891static int transport_new_cmd_obj(struct se_cmd *cmd)
3874{ 3892{
3875 struct se_device *dev = cmd->se_dev; 3893 struct se_device *dev = cmd->se_dev;
3876 u32 task_cdbs; 3894 int set_counts = 1, rc, task_cdbs;
3877 u32 rc;
3878 int set_counts = 1;
3879 3895
3880 /* 3896 /*
3881 * Setup any BIDI READ tasks and memory from 3897 * Setup any BIDI READ tasks and memory from
@@ -3893,7 +3909,7 @@ static int transport_new_cmd_obj(struct se_cmd *cmd)
3893 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 3909 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3894 cmd->scsi_sense_reason = 3910 cmd->scsi_sense_reason =
3895 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3911 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3896 return PYX_TRANSPORT_LU_COMM_FAILURE; 3912 return -EINVAL;
3897 } 3913 }
3898 atomic_inc(&cmd->t_fe_count); 3914 atomic_inc(&cmd->t_fe_count);
3899 atomic_inc(&cmd->t_se_count); 3915 atomic_inc(&cmd->t_se_count);
@@ -3912,7 +3928,7 @@ static int transport_new_cmd_obj(struct se_cmd *cmd)
3912 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 3928 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3913 cmd->scsi_sense_reason = 3929 cmd->scsi_sense_reason =
3914 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3930 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3915 return PYX_TRANSPORT_LU_COMM_FAILURE; 3931 return -EINVAL;
3916 } 3932 }
3917 3933
3918 if (set_counts) { 3934 if (set_counts) {
@@ -4028,8 +4044,6 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
4028 if (!task->task_sg) 4044 if (!task->task_sg)
4029 continue; 4045 continue;
4030 4046
4031 BUG_ON(!task->task_padded_sg);
4032
4033 if (!sg_first) { 4047 if (!sg_first) {
4034 sg_first = task->task_sg; 4048 sg_first = task->task_sg;
4035 chained_nents = task->task_sg_nents; 4049 chained_nents = task->task_sg_nents;
@@ -4037,9 +4051,19 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
4037 sg_chain(sg_prev, sg_prev_nents, task->task_sg); 4051 sg_chain(sg_prev, sg_prev_nents, task->task_sg);
4038 chained_nents += task->task_sg_nents; 4052 chained_nents += task->task_sg_nents;
4039 } 4053 }
4054 /*
4055 * For the padded tasks, use the extra SGL vector allocated
4056 * in transport_allocate_data_tasks() for the sg_prev_nents
4057 * offset into sg_chain() above.. The last task of a
4058 * multi-task list, or a single task will not have
4059 * task->task_sg_padded set..
4060 */
4061 if (task->task_padded_sg)
4062 sg_prev_nents = (task->task_sg_nents + 1);
4063 else
4064 sg_prev_nents = task->task_sg_nents;
4040 4065
4041 sg_prev = task->task_sg; 4066 sg_prev = task->task_sg;
4042 sg_prev_nents = task->task_sg_nents;
4043 } 4067 }
4044 /* 4068 /*
4045 * Setup the starting pointer and total t_tasks_sg_linked_no including 4069 * Setup the starting pointer and total t_tasks_sg_linked_no including
@@ -4091,7 +4115,7 @@ static int transport_allocate_data_tasks(
4091 4115
4092 cmd_sg = sgl; 4116 cmd_sg = sgl;
4093 for (i = 0; i < task_count; i++) { 4117 for (i = 0; i < task_count; i++) {
4094 unsigned int task_size; 4118 unsigned int task_size, task_sg_nents_padded;
4095 int count; 4119 int count;
4096 4120
4097 task = transport_generic_get_task(cmd, data_direction); 4121 task = transport_generic_get_task(cmd, data_direction);
@@ -4110,30 +4134,33 @@ static int transport_allocate_data_tasks(
4110 4134
4111 /* Update new cdb with updated lba/sectors */ 4135 /* Update new cdb with updated lba/sectors */
4112 cmd->transport_split_cdb(task->task_lba, task->task_sectors, cdb); 4136 cmd->transport_split_cdb(task->task_lba, task->task_sectors, cdb);
4113 4137 /*
4138 * This now assumes that passed sg_ents are in PAGE_SIZE chunks
4139 * in order to calculate the number per task SGL entries
4140 */
4141 task->task_sg_nents = DIV_ROUND_UP(task->task_size, PAGE_SIZE);
4114 /* 4142 /*
4115 * Check if the fabric module driver is requesting that all 4143 * Check if the fabric module driver is requesting that all
4116 * struct se_task->task_sg[] be chained together.. If so, 4144 * struct se_task->task_sg[] be chained together.. If so,
4117 * then allocate an extra padding SG entry for linking and 4145 * then allocate an extra padding SG entry for linking and
4118 * marking the end of the chained SGL. 4146 * marking the end of the chained SGL for every task except
4119 * Possibly over-allocate task sgl size by using cmd sgl size. 4147 * the last one for (task_count > 1) operation, or skipping
4120 * It's so much easier and only a waste when task_count > 1. 4148 * the extra padding for the (task_count == 1) case.
4121 * That is extremely rare.
4122 */ 4149 */
4123 task->task_sg_nents = sgl_nents; 4150 if (cmd->se_tfo->task_sg_chaining && (i < (task_count - 1))) {
4124 if (cmd->se_tfo->task_sg_chaining) { 4151 task_sg_nents_padded = (task->task_sg_nents + 1);
4125 task->task_sg_nents++;
4126 task->task_padded_sg = 1; 4152 task->task_padded_sg = 1;
4127 } 4153 } else
4154 task_sg_nents_padded = task->task_sg_nents;
4128 4155
4129 task->task_sg = kmalloc(sizeof(struct scatterlist) * 4156 task->task_sg = kmalloc(sizeof(struct scatterlist) *
4130 task->task_sg_nents, GFP_KERNEL); 4157 task_sg_nents_padded, GFP_KERNEL);
4131 if (!task->task_sg) { 4158 if (!task->task_sg) {
4132 cmd->se_dev->transport->free_task(task); 4159 cmd->se_dev->transport->free_task(task);
4133 return -ENOMEM; 4160 return -ENOMEM;
4134 } 4161 }
4135 4162
4136 sg_init_table(task->task_sg, task->task_sg_nents); 4163 sg_init_table(task->task_sg, task_sg_nents_padded);
4137 4164
4138 task_size = task->task_size; 4165 task_size = task->task_size;
4139 4166
@@ -4230,10 +4257,13 @@ static u32 transport_allocate_tasks(
4230 struct scatterlist *sgl, 4257 struct scatterlist *sgl,
4231 unsigned int sgl_nents) 4258 unsigned int sgl_nents)
4232{ 4259{
4233 if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) 4260 if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
4261 if (transport_cmd_get_valid_sectors(cmd) < 0)
4262 return -EINVAL;
4263
4234 return transport_allocate_data_tasks(cmd, lba, data_direction, 4264 return transport_allocate_data_tasks(cmd, lba, data_direction,
4235 sgl, sgl_nents); 4265 sgl, sgl_nents);
4236 else 4266 } else
4237 return transport_allocate_control_task(cmd); 4267 return transport_allocate_control_task(cmd);
4238 4268
4239} 4269}
@@ -4726,6 +4756,13 @@ int transport_send_check_condition_and_sense(
4726 */ 4756 */
4727 switch (reason) { 4757 switch (reason) {
4728 case TCM_NON_EXISTENT_LUN: 4758 case TCM_NON_EXISTENT_LUN:
4759 /* CURRENT ERROR */
4760 buffer[offset] = 0x70;
4761 /* ILLEGAL REQUEST */
4762 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4763 /* LOGICAL UNIT NOT SUPPORTED */
4764 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x25;
4765 break;
4729 case TCM_UNSUPPORTED_SCSI_OPCODE: 4766 case TCM_UNSUPPORTED_SCSI_OPCODE:
4730 case TCM_SECTOR_COUNT_TOO_MANY: 4767 case TCM_SECTOR_COUNT_TOO_MANY:
4731 /* CURRENT ERROR */ 4768 /* CURRENT ERROR */
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
index bd4fe21a23b8..3749d8b4b423 100644
--- a/drivers/target/tcm_fc/tcm_fc.h
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -98,8 +98,7 @@ struct ft_tpg {
98 struct list_head list; /* linkage in ft_lport_acl tpg_list */ 98 struct list_head list; /* linkage in ft_lport_acl tpg_list */
99 struct list_head lun_list; /* head of LUNs */ 99 struct list_head lun_list; /* head of LUNs */
100 struct se_portal_group se_tpg; 100 struct se_portal_group se_tpg;
101 struct task_struct *thread; /* processing thread */ 101 struct workqueue_struct *workqueue;
102 struct se_queue_obj qobj; /* queue for processing thread */
103}; 102};
104 103
105struct ft_lport_acl { 104struct ft_lport_acl {
@@ -110,16 +109,10 @@ struct ft_lport_acl {
110 struct se_wwn fc_lport_wwn; 109 struct se_wwn fc_lport_wwn;
111}; 110};
112 111
113enum ft_cmd_state {
114 FC_CMD_ST_NEW = 0,
115 FC_CMD_ST_REJ
116};
117
118/* 112/*
119 * Commands 113 * Commands
120 */ 114 */
121struct ft_cmd { 115struct ft_cmd {
122 enum ft_cmd_state state;
123 u32 lun; /* LUN from request */ 116 u32 lun; /* LUN from request */
124 struct ft_sess *sess; /* session held for cmd */ 117 struct ft_sess *sess; /* session held for cmd */
125 struct fc_seq *seq; /* sequence in exchange mgr */ 118 struct fc_seq *seq; /* sequence in exchange mgr */
@@ -127,7 +120,7 @@ struct ft_cmd {
127 struct fc_frame *req_frame; 120 struct fc_frame *req_frame;
128 unsigned char *cdb; /* pointer to CDB inside frame */ 121 unsigned char *cdb; /* pointer to CDB inside frame */
129 u32 write_data_len; /* data received on writes */ 122 u32 write_data_len; /* data received on writes */
130 struct se_queue_req se_req; 123 struct work_struct work;
131 /* Local sense buffer */ 124 /* Local sense buffer */
132 unsigned char ft_sense_buffer[TRANSPORT_SENSE_BUFFER]; 125 unsigned char ft_sense_buffer[TRANSPORT_SENSE_BUFFER];
133 u32 was_ddp_setup:1; /* Set only if ddp is setup */ 126 u32 was_ddp_setup:1; /* Set only if ddp is setup */
@@ -177,7 +170,6 @@ int ft_is_state_remove(struct se_cmd *);
177/* 170/*
178 * other internal functions. 171 * other internal functions.
179 */ 172 */
180int ft_thread(void *);
181void ft_recv_req(struct ft_sess *, struct fc_frame *); 173void ft_recv_req(struct ft_sess *, struct fc_frame *);
182struct ft_tpg *ft_lport_find_tpg(struct fc_lport *); 174struct ft_tpg *ft_lport_find_tpg(struct fc_lport *);
183struct ft_node_acl *ft_acl_get(struct ft_tpg *, struct fc_rport_priv *); 175struct ft_node_acl *ft_acl_get(struct ft_tpg *, struct fc_rport_priv *);
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 5654dc22f7ae..80fbcde00cb6 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -62,8 +62,8 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
62 int count; 62 int count;
63 63
64 se_cmd = &cmd->se_cmd; 64 se_cmd = &cmd->se_cmd;
65 pr_debug("%s: cmd %p state %d sess %p seq %p se_cmd %p\n", 65 pr_debug("%s: cmd %p sess %p seq %p se_cmd %p\n",
66 caller, cmd, cmd->state, cmd->sess, cmd->seq, se_cmd); 66 caller, cmd, cmd->sess, cmd->seq, se_cmd);
67 pr_debug("%s: cmd %p cdb %p\n", 67 pr_debug("%s: cmd %p cdb %p\n",
68 caller, cmd, cmd->cdb); 68 caller, cmd, cmd->cdb);
69 pr_debug("%s: cmd %p lun %d\n", caller, cmd, cmd->lun); 69 pr_debug("%s: cmd %p lun %d\n", caller, cmd, cmd->lun);
@@ -90,38 +90,6 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
90 16, 4, cmd->cdb, MAX_COMMAND_SIZE, 0); 90 16, 4, cmd->cdb, MAX_COMMAND_SIZE, 0);
91} 91}
92 92
93static void ft_queue_cmd(struct ft_sess *sess, struct ft_cmd *cmd)
94{
95 struct ft_tpg *tpg = sess->tport->tpg;
96 struct se_queue_obj *qobj = &tpg->qobj;
97 unsigned long flags;
98
99 qobj = &sess->tport->tpg->qobj;
100 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
101 list_add_tail(&cmd->se_req.qr_list, &qobj->qobj_list);
102 atomic_inc(&qobj->queue_cnt);
103 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
104
105 wake_up_process(tpg->thread);
106}
107
108static struct ft_cmd *ft_dequeue_cmd(struct se_queue_obj *qobj)
109{
110 unsigned long flags;
111 struct se_queue_req *qr;
112
113 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
114 if (list_empty(&qobj->qobj_list)) {
115 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
116 return NULL;
117 }
118 qr = list_first_entry(&qobj->qobj_list, struct se_queue_req, qr_list);
119 list_del(&qr->qr_list);
120 atomic_dec(&qobj->queue_cnt);
121 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
122 return container_of(qr, struct ft_cmd, se_req);
123}
124
125static void ft_free_cmd(struct ft_cmd *cmd) 93static void ft_free_cmd(struct ft_cmd *cmd)
126{ 94{
127 struct fc_frame *fp; 95 struct fc_frame *fp;
@@ -282,9 +250,7 @@ u32 ft_get_task_tag(struct se_cmd *se_cmd)
282 250
283int ft_get_cmd_state(struct se_cmd *se_cmd) 251int ft_get_cmd_state(struct se_cmd *se_cmd)
284{ 252{
285 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); 253 return 0;
286
287 return cmd->state;
288} 254}
289 255
290int ft_is_state_remove(struct se_cmd *se_cmd) 256int ft_is_state_remove(struct se_cmd *se_cmd)
@@ -505,6 +471,8 @@ int ft_queue_tm_resp(struct se_cmd *se_cmd)
505 return 0; 471 return 0;
506} 472}
507 473
474static void ft_send_work(struct work_struct *work);
475
508/* 476/*
509 * Handle incoming FCP command. 477 * Handle incoming FCP command.
510 */ 478 */
@@ -523,7 +491,9 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
523 goto busy; 491 goto busy;
524 } 492 }
525 cmd->req_frame = fp; /* hold frame during cmd */ 493 cmd->req_frame = fp; /* hold frame during cmd */
526 ft_queue_cmd(sess, cmd); 494
495 INIT_WORK(&cmd->work, ft_send_work);
496 queue_work(sess->tport->tpg->workqueue, &cmd->work);
527 return; 497 return;
528 498
529busy: 499busy:
@@ -563,12 +533,13 @@ void ft_recv_req(struct ft_sess *sess, struct fc_frame *fp)
563/* 533/*
564 * Send new command to target. 534 * Send new command to target.
565 */ 535 */
566static void ft_send_cmd(struct ft_cmd *cmd) 536static void ft_send_work(struct work_struct *work)
567{ 537{
538 struct ft_cmd *cmd = container_of(work, struct ft_cmd, work);
568 struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame); 539 struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame);
569 struct se_cmd *se_cmd; 540 struct se_cmd *se_cmd;
570 struct fcp_cmnd *fcp; 541 struct fcp_cmnd *fcp;
571 int data_dir; 542 int data_dir = 0;
572 u32 data_len; 543 u32 data_len;
573 int task_attr; 544 int task_attr;
574 int ret; 545 int ret;
@@ -675,42 +646,3 @@ static void ft_send_cmd(struct ft_cmd *cmd)
675err: 646err:
676 ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID); 647 ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID);
677} 648}
678
679/*
680 * Handle request in the command thread.
681 */
682static void ft_exec_req(struct ft_cmd *cmd)
683{
684 pr_debug("cmd state %x\n", cmd->state);
685 switch (cmd->state) {
686 case FC_CMD_ST_NEW:
687 ft_send_cmd(cmd);
688 break;
689 default:
690 break;
691 }
692}
693
694/*
695 * Processing thread.
696 * Currently one thread per tpg.
697 */
698int ft_thread(void *arg)
699{
700 struct ft_tpg *tpg = arg;
701 struct se_queue_obj *qobj = &tpg->qobj;
702 struct ft_cmd *cmd;
703
704 while (!kthread_should_stop()) {
705 schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT);
706 if (kthread_should_stop())
707 goto out;
708
709 cmd = ft_dequeue_cmd(qobj);
710 if (cmd)
711 ft_exec_req(cmd);
712 }
713
714out:
715 return 0;
716}
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index 8781d1e423df..8fa39b74f22c 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -256,7 +256,7 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)
256 struct se_portal_group *se_tpg = &tpg->se_tpg; 256 struct se_portal_group *se_tpg = &tpg->se_tpg;
257 struct se_node_acl *se_acl; 257 struct se_node_acl *se_acl;
258 258
259 spin_lock_bh(&se_tpg->acl_node_lock); 259 spin_lock_irq(&se_tpg->acl_node_lock);
260 list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) { 260 list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) {
261 acl = container_of(se_acl, struct ft_node_acl, se_node_acl); 261 acl = container_of(se_acl, struct ft_node_acl, se_node_acl);
262 pr_debug("acl %p port_name %llx\n", 262 pr_debug("acl %p port_name %llx\n",
@@ -270,7 +270,7 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)
270 break; 270 break;
271 } 271 }
272 } 272 }
273 spin_unlock_bh(&se_tpg->acl_node_lock); 273 spin_unlock_irq(&se_tpg->acl_node_lock);
274 return found; 274 return found;
275} 275}
276 276
@@ -327,7 +327,6 @@ static struct se_portal_group *ft_add_tpg(
327 tpg->index = index; 327 tpg->index = index;
328 tpg->lport_acl = lacl; 328 tpg->lport_acl = lacl;
329 INIT_LIST_HEAD(&tpg->lun_list); 329 INIT_LIST_HEAD(&tpg->lun_list);
330 transport_init_queue_obj(&tpg->qobj);
331 330
332 ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg, 331 ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg,
333 tpg, TRANSPORT_TPG_TYPE_NORMAL); 332 tpg, TRANSPORT_TPG_TYPE_NORMAL);
@@ -336,8 +335,8 @@ static struct se_portal_group *ft_add_tpg(
336 return NULL; 335 return NULL;
337 } 336 }
338 337
339 tpg->thread = kthread_run(ft_thread, tpg, "ft_tpg%lu", index); 338 tpg->workqueue = alloc_workqueue("tcm_fc", 0, 1);
340 if (IS_ERR(tpg->thread)) { 339 if (!tpg->workqueue) {
341 kfree(tpg); 340 kfree(tpg);
342 return NULL; 341 return NULL;
343 } 342 }
@@ -356,7 +355,7 @@ static void ft_del_tpg(struct se_portal_group *se_tpg)
356 pr_debug("del tpg %s\n", 355 pr_debug("del tpg %s\n",
357 config_item_name(&tpg->se_tpg.tpg_group.cg_item)); 356 config_item_name(&tpg->se_tpg.tpg_group.cg_item));
358 357
359 kthread_stop(tpg->thread); 358 destroy_workqueue(tpg->workqueue);
360 359
361 /* Wait for sessions to be freed thru RCU, for BUG_ON below */ 360 /* Wait for sessions to be freed thru RCU, for BUG_ON below */
362 synchronize_rcu(); 361 synchronize_rcu();
@@ -655,9 +654,7 @@ static void __exit ft_exit(void)
655 synchronize_rcu(); 654 synchronize_rcu();
656} 655}
657 656
658#ifdef MODULE
659MODULE_DESCRIPTION("FC TCM fabric driver " FT_VERSION); 657MODULE_DESCRIPTION("FC TCM fabric driver " FT_VERSION);
660MODULE_LICENSE("GPL"); 658MODULE_LICENSE("GPL");
661module_init(ft_init); 659module_init(ft_init);
662module_exit(ft_exit); 660module_exit(ft_exit);
663#endif /* MODULE */
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index c37f4cd96452..d35ea5a3d56c 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -219,43 +219,41 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
219 if (cmd->was_ddp_setup) { 219 if (cmd->was_ddp_setup) {
220 BUG_ON(!ep); 220 BUG_ON(!ep);
221 BUG_ON(!lport); 221 BUG_ON(!lport);
222 } 222 /*
223 223 * Since DDP (Large Rx offload) was setup for this request,
224 /* 224 * payload is expected to be copied directly to user buffers.
225 * Doesn't expect payload if DDP is setup. Payload 225 */
226 * is expected to be copied directly to user buffers 226 buf = fc_frame_payload_get(fp, 1);
227 * due to DDP (Large Rx offload), 227 if (buf)
228 */ 228 pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, "
229 buf = fc_frame_payload_get(fp, 1);
230 if (buf)
231 pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, "
232 "cmd->sg_cnt 0x%x. DDP was setup" 229 "cmd->sg_cnt 0x%x. DDP was setup"
233 " hence not expected to receive frame with " 230 " hence not expected to receive frame with "
234 "payload, Frame will be dropped if " 231 "payload, Frame will be dropped if"
235 "'Sequence Initiative' bit in f_ctl is " 232 "'Sequence Initiative' bit in f_ctl is"
236 "not set\n", __func__, ep->xid, f_ctl, 233 "not set\n", __func__, ep->xid, f_ctl,
237 cmd->sg, cmd->sg_cnt); 234 cmd->sg, cmd->sg_cnt);
238 /* 235 /*
239 * Invalidate HW DDP context if it was setup for respective 236 * Invalidate HW DDP context if it was setup for respective
240 * command. Invalidation of HW DDP context is requited in both 237 * command. Invalidation of HW DDP context is requited in both
241 * situation (success and error). 238 * situation (success and error).
242 */ 239 */
243 ft_invl_hw_context(cmd); 240 ft_invl_hw_context(cmd);
244 241
245 /* 242 /*
246 * If "Sequence Initiative (TSI)" bit set in f_ctl, means last 243 * If "Sequence Initiative (TSI)" bit set in f_ctl, means last
247 * write data frame is received successfully where payload is 244 * write data frame is received successfully where payload is
248 * posted directly to user buffer and only the last frame's 245 * posted directly to user buffer and only the last frame's
249 * header is posted in receive queue. 246 * header is posted in receive queue.
250 * 247 *
251 * If "Sequence Initiative (TSI)" bit is not set, means error 248 * If "Sequence Initiative (TSI)" bit is not set, means error
252 * condition w.r.t. DDP, hence drop the packet and let explict 249 * condition w.r.t. DDP, hence drop the packet and let explict
253 * ABORTS from other end of exchange timer trigger the recovery. 250 * ABORTS from other end of exchange timer trigger the recovery.
254 */ 251 */
255 if (f_ctl & FC_FC_SEQ_INIT) 252 if (f_ctl & FC_FC_SEQ_INIT)
256 goto last_frame; 253 goto last_frame;
257 else 254 else
258 goto drop; 255 goto drop;
256 }
259 257
260 rel_off = ntohl(fh->fh_parm_offset); 258 rel_off = ntohl(fh->fh_parm_offset);
261 frame_len = fr_len(fp); 259 frame_len = fr_len(fp);