diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-10-25 05:17:39 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-10-25 05:17:39 -0400 |
commit | 7c1953ddb609f1c161bf4a11a5e4e4577e82e557 (patch) | |
tree | 283244582f4fafd15a2ddf52971e0e5ff048af47 /drivers/target | |
parent | 1bc67188c3843b8e16caaa8624beeb0e2823c1f8 (diff) | |
parent | b91bf5bf7fb0f35a8119a662e8e6b71ed950f443 (diff) |
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending
* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (62 commits)
target: Fix compile warning w/ missing module.h include
target: Remove legacy se_task->task_timer and associated logic
target: Fix incorrect transport_sent usage
target: re-use the command S/G list for single-task commands
target: Fix BIDI t_task_cdb handling in transport_generic_new_cmd
target: remove transport_allocate_tasks
target: merge transport_new_cmd_obj into transport_generic_new_cmd
target: remove the task_sg_bidi field se_task and pSCSI BIDI support
target: transport_subsystem_check_init cleanups
target: use a workqueue for I/O completions
target: remove unused TRANSPORT_ states
target: remove TRANSPORT_DEFERRED_CMD state
target: remove the TRANSPORT_REMOVE state
target: move depth_left manipulation out of transport_generic_request_failure
target: stop task timers earlier
target: remove TF_TIMER_STOP
target: factor some duplicate code for stopping a task
target: fix list walking in transport_free_dev_tasks
target: use transport_cmd_check_stop_to_fabric consistently
target: do not pass the queue object to transport_remove_cmd_from_queue
...
Diffstat (limited to 'drivers/target')
33 files changed, 901 insertions, 1956 deletions
diff --git a/drivers/target/Makefile b/drivers/target/Makefile index 1060c7b7f803..62e54053bcd8 100644 --- a/drivers/target/Makefile +++ b/drivers/target/Makefile | |||
@@ -6,7 +6,6 @@ target_core_mod-y := target_core_configfs.o \ | |||
6 | target_core_hba.o \ | 6 | target_core_hba.o \ |
7 | target_core_pr.o \ | 7 | target_core_pr.o \ |
8 | target_core_alua.o \ | 8 | target_core_alua.o \ |
9 | target_core_scdb.o \ | ||
10 | target_core_tmr.o \ | 9 | target_core_tmr.o \ |
11 | target_core_tpg.o \ | 10 | target_core_tpg.o \ |
12 | target_core_transport.o \ | 11 | target_core_transport.o \ |
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 6a4ea29c2f36..4d01768fcd90 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c | |||
@@ -765,7 +765,7 @@ static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd) | |||
765 | u32 iov_count = (cmd->se_cmd.t_data_nents == 0) ? 1 : | 765 | u32 iov_count = (cmd->se_cmd.t_data_nents == 0) ? 1 : |
766 | cmd->se_cmd.t_data_nents; | 766 | cmd->se_cmd.t_data_nents; |
767 | 767 | ||
768 | iov_count += TRANSPORT_IOV_DATA_BUFFER; | 768 | iov_count += ISCSI_IOV_DATA_BUFFER; |
769 | 769 | ||
770 | cmd->iov_data = kzalloc(iov_count * sizeof(struct kvec), GFP_KERNEL); | 770 | cmd->iov_data = kzalloc(iov_count * sizeof(struct kvec), GFP_KERNEL); |
771 | if (!cmd->iov_data) { | 771 | if (!cmd->iov_data) { |
@@ -3538,16 +3538,8 @@ get_immediate: | |||
3538 | spin_lock_bh(&conn->cmd_lock); | 3538 | spin_lock_bh(&conn->cmd_lock); |
3539 | list_del(&cmd->i_list); | 3539 | list_del(&cmd->i_list); |
3540 | spin_unlock_bh(&conn->cmd_lock); | 3540 | spin_unlock_bh(&conn->cmd_lock); |
3541 | /* | 3541 | |
3542 | * Determine if a struct se_cmd is assoicated with | 3542 | iscsit_free_cmd(cmd); |
3543 | * this struct iscsi_cmd. | ||
3544 | */ | ||
3545 | if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) && | ||
3546 | !(cmd->tmr_req)) | ||
3547 | iscsit_release_cmd(cmd); | ||
3548 | else | ||
3549 | transport_generic_free_cmd(&cmd->se_cmd, | ||
3550 | 1, 0); | ||
3551 | goto get_immediate; | 3543 | goto get_immediate; |
3552 | case ISTATE_SEND_NOPIN_WANT_RESPONSE: | 3544 | case ISTATE_SEND_NOPIN_WANT_RESPONSE: |
3553 | spin_unlock_bh(&cmd->istate_lock); | 3545 | spin_unlock_bh(&cmd->istate_lock); |
@@ -3940,7 +3932,6 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn) | |||
3940 | { | 3932 | { |
3941 | struct iscsi_cmd *cmd = NULL, *cmd_tmp = NULL; | 3933 | struct iscsi_cmd *cmd = NULL, *cmd_tmp = NULL; |
3942 | struct iscsi_session *sess = conn->sess; | 3934 | struct iscsi_session *sess = conn->sess; |
3943 | struct se_cmd *se_cmd; | ||
3944 | /* | 3935 | /* |
3945 | * We expect this function to only ever be called from either RX or TX | 3936 | * We expect this function to only ever be called from either RX or TX |
3946 | * thread context via iscsit_close_connection() once the other context | 3937 | * thread context via iscsit_close_connection() once the other context |
@@ -3948,35 +3939,13 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn) | |||
3948 | */ | 3939 | */ |
3949 | spin_lock_bh(&conn->cmd_lock); | 3940 | spin_lock_bh(&conn->cmd_lock); |
3950 | list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) { | 3941 | list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) { |
3951 | if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD)) { | ||
3952 | 3942 | ||
3953 | list_del(&cmd->i_list); | ||
3954 | spin_unlock_bh(&conn->cmd_lock); | ||
3955 | iscsit_increment_maxcmdsn(cmd, sess); | ||
3956 | se_cmd = &cmd->se_cmd; | ||
3957 | /* | ||
3958 | * Special cases for active iSCSI TMR, and | ||
3959 | * transport_lookup_cmd_lun() failing from | ||
3960 | * iscsit_get_lun_for_cmd() in iscsit_handle_scsi_cmd(). | ||
3961 | */ | ||
3962 | if (cmd->tmr_req && se_cmd->transport_wait_for_tasks) | ||
3963 | se_cmd->transport_wait_for_tasks(se_cmd, 1, 1); | ||
3964 | else if (cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) | ||
3965 | transport_release_cmd(se_cmd); | ||
3966 | else | ||
3967 | iscsit_release_cmd(cmd); | ||
3968 | |||
3969 | spin_lock_bh(&conn->cmd_lock); | ||
3970 | continue; | ||
3971 | } | ||
3972 | list_del(&cmd->i_list); | 3943 | list_del(&cmd->i_list); |
3973 | spin_unlock_bh(&conn->cmd_lock); | 3944 | spin_unlock_bh(&conn->cmd_lock); |
3974 | 3945 | ||
3975 | iscsit_increment_maxcmdsn(cmd, sess); | 3946 | iscsit_increment_maxcmdsn(cmd, sess); |
3976 | se_cmd = &cmd->se_cmd; | ||
3977 | 3947 | ||
3978 | if (se_cmd->transport_wait_for_tasks) | 3948 | iscsit_free_cmd(cmd); |
3979 | se_cmd->transport_wait_for_tasks(se_cmd, 1, 1); | ||
3980 | 3949 | ||
3981 | spin_lock_bh(&conn->cmd_lock); | 3950 | spin_lock_bh(&conn->cmd_lock); |
3982 | } | 3951 | } |
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c index 11fd74307811..beb39469e7f1 100644 --- a/drivers/target/iscsi/iscsi_target_auth.c +++ b/drivers/target/iscsi/iscsi_target_auth.c | |||
@@ -18,6 +18,7 @@ | |||
18 | * GNU General Public License for more details. | 18 | * GNU General Public License for more details. |
19 | ******************************************************************************/ | 19 | ******************************************************************************/ |
20 | 20 | ||
21 | #include <linux/kernel.h> | ||
21 | #include <linux/string.h> | 22 | #include <linux/string.h> |
22 | #include <linux/crypto.h> | 23 | #include <linux/crypto.h> |
23 | #include <linux/err.h> | 24 | #include <linux/err.h> |
@@ -27,40 +28,11 @@ | |||
27 | #include "iscsi_target_nego.h" | 28 | #include "iscsi_target_nego.h" |
28 | #include "iscsi_target_auth.h" | 29 | #include "iscsi_target_auth.h" |
29 | 30 | ||
30 | static unsigned char chap_asciihex_to_binaryhex(unsigned char val[2]) | ||
31 | { | ||
32 | unsigned char result = 0; | ||
33 | /* | ||
34 | * MSB | ||
35 | */ | ||
36 | if ((val[0] >= 'a') && (val[0] <= 'f')) | ||
37 | result = ((val[0] - 'a' + 10) & 0xf) << 4; | ||
38 | else | ||
39 | if ((val[0] >= 'A') && (val[0] <= 'F')) | ||
40 | result = ((val[0] - 'A' + 10) & 0xf) << 4; | ||
41 | else /* digit */ | ||
42 | result = ((val[0] - '0') & 0xf) << 4; | ||
43 | /* | ||
44 | * LSB | ||
45 | */ | ||
46 | if ((val[1] >= 'a') && (val[1] <= 'f')) | ||
47 | result |= ((val[1] - 'a' + 10) & 0xf); | ||
48 | else | ||
49 | if ((val[1] >= 'A') && (val[1] <= 'F')) | ||
50 | result |= ((val[1] - 'A' + 10) & 0xf); | ||
51 | else /* digit */ | ||
52 | result |= ((val[1] - '0') & 0xf); | ||
53 | |||
54 | return result; | ||
55 | } | ||
56 | |||
57 | static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len) | 31 | static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len) |
58 | { | 32 | { |
59 | int i, j = 0; | 33 | int j = DIV_ROUND_UP(len, 2); |
60 | 34 | ||
61 | for (i = 0; i < len; i += 2) { | 35 | hex2bin(dst, src, j); |
62 | dst[j++] = (unsigned char) chap_asciihex_to_binaryhex(&src[i]); | ||
63 | } | ||
64 | 36 | ||
65 | dst[j] = '\0'; | 37 | dst[j] = '\0'; |
66 | return j; | 38 | return j; |
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h index 470ed551eeb5..3723d90d5ae5 100644 --- a/drivers/target/iscsi/iscsi_target_core.h +++ b/drivers/target/iscsi/iscsi_target_core.h | |||
@@ -57,6 +57,9 @@ | |||
57 | #define TA_PROD_MODE_WRITE_PROTECT 0 | 57 | #define TA_PROD_MODE_WRITE_PROTECT 0 |
58 | #define TA_CACHE_CORE_NPS 0 | 58 | #define TA_CACHE_CORE_NPS 0 |
59 | 59 | ||
60 | |||
61 | #define ISCSI_IOV_DATA_BUFFER 5 | ||
62 | |||
60 | enum tpg_np_network_transport_table { | 63 | enum tpg_np_network_transport_table { |
61 | ISCSI_TCP = 0, | 64 | ISCSI_TCP = 0, |
62 | ISCSI_SCTP_TCP = 1, | 65 | ISCSI_SCTP_TCP = 1, |
@@ -425,7 +428,6 @@ struct iscsi_cmd { | |||
425 | /* Number of times struct iscsi_cmd is present in immediate queue */ | 428 | /* Number of times struct iscsi_cmd is present in immediate queue */ |
426 | atomic_t immed_queue_count; | 429 | atomic_t immed_queue_count; |
427 | atomic_t response_queue_count; | 430 | atomic_t response_queue_count; |
428 | atomic_t transport_sent; | ||
429 | spinlock_t datain_lock; | 431 | spinlock_t datain_lock; |
430 | spinlock_t dataout_timeout_lock; | 432 | spinlock_t dataout_timeout_lock; |
431 | /* spinlock for protecting struct iscsi_cmd->i_state */ | 433 | /* spinlock for protecting struct iscsi_cmd->i_state */ |
diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c index 91a4d170bda4..0b8404c30125 100644 --- a/drivers/target/iscsi/iscsi_target_erl2.c +++ b/drivers/target/iscsi/iscsi_target_erl2.c | |||
@@ -143,12 +143,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess) | |||
143 | list_del(&cmd->i_list); | 143 | list_del(&cmd->i_list); |
144 | cmd->conn = NULL; | 144 | cmd->conn = NULL; |
145 | spin_unlock(&cr->conn_recovery_cmd_lock); | 145 | spin_unlock(&cr->conn_recovery_cmd_lock); |
146 | if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) || | 146 | iscsit_free_cmd(cmd); |
147 | !(cmd->se_cmd.transport_wait_for_tasks)) | ||
148 | iscsit_release_cmd(cmd); | ||
149 | else | ||
150 | cmd->se_cmd.transport_wait_for_tasks( | ||
151 | &cmd->se_cmd, 1, 1); | ||
152 | spin_lock(&cr->conn_recovery_cmd_lock); | 147 | spin_lock(&cr->conn_recovery_cmd_lock); |
153 | } | 148 | } |
154 | spin_unlock(&cr->conn_recovery_cmd_lock); | 149 | spin_unlock(&cr->conn_recovery_cmd_lock); |
@@ -170,12 +165,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess) | |||
170 | list_del(&cmd->i_list); | 165 | list_del(&cmd->i_list); |
171 | cmd->conn = NULL; | 166 | cmd->conn = NULL; |
172 | spin_unlock(&cr->conn_recovery_cmd_lock); | 167 | spin_unlock(&cr->conn_recovery_cmd_lock); |
173 | if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) || | 168 | iscsit_free_cmd(cmd); |
174 | !(cmd->se_cmd.transport_wait_for_tasks)) | ||
175 | iscsit_release_cmd(cmd); | ||
176 | else | ||
177 | cmd->se_cmd.transport_wait_for_tasks( | ||
178 | &cmd->se_cmd, 1, 1); | ||
179 | spin_lock(&cr->conn_recovery_cmd_lock); | 169 | spin_lock(&cr->conn_recovery_cmd_lock); |
180 | } | 170 | } |
181 | spin_unlock(&cr->conn_recovery_cmd_lock); | 171 | spin_unlock(&cr->conn_recovery_cmd_lock); |
@@ -260,12 +250,7 @@ void iscsit_discard_cr_cmds_by_expstatsn( | |||
260 | iscsit_remove_cmd_from_connection_recovery(cmd, sess); | 250 | iscsit_remove_cmd_from_connection_recovery(cmd, sess); |
261 | 251 | ||
262 | spin_unlock(&cr->conn_recovery_cmd_lock); | 252 | spin_unlock(&cr->conn_recovery_cmd_lock); |
263 | if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) || | 253 | iscsit_free_cmd(cmd); |
264 | !(cmd->se_cmd.transport_wait_for_tasks)) | ||
265 | iscsit_release_cmd(cmd); | ||
266 | else | ||
267 | cmd->se_cmd.transport_wait_for_tasks( | ||
268 | &cmd->se_cmd, 1, 0); | ||
269 | spin_lock(&cr->conn_recovery_cmd_lock); | 254 | spin_lock(&cr->conn_recovery_cmd_lock); |
270 | } | 255 | } |
271 | spin_unlock(&cr->conn_recovery_cmd_lock); | 256 | spin_unlock(&cr->conn_recovery_cmd_lock); |
@@ -319,12 +304,7 @@ int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn) | |||
319 | list_del(&cmd->i_list); | 304 | list_del(&cmd->i_list); |
320 | 305 | ||
321 | spin_unlock_bh(&conn->cmd_lock); | 306 | spin_unlock_bh(&conn->cmd_lock); |
322 | if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) || | 307 | iscsit_free_cmd(cmd); |
323 | !(cmd->se_cmd.transport_wait_for_tasks)) | ||
324 | iscsit_release_cmd(cmd); | ||
325 | else | ||
326 | cmd->se_cmd.transport_wait_for_tasks( | ||
327 | &cmd->se_cmd, 1, 1); | ||
328 | spin_lock_bh(&conn->cmd_lock); | 308 | spin_lock_bh(&conn->cmd_lock); |
329 | } | 309 | } |
330 | spin_unlock_bh(&conn->cmd_lock); | 310 | spin_unlock_bh(&conn->cmd_lock); |
@@ -377,13 +357,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) | |||
377 | 357 | ||
378 | list_del(&cmd->i_list); | 358 | list_del(&cmd->i_list); |
379 | spin_unlock_bh(&conn->cmd_lock); | 359 | spin_unlock_bh(&conn->cmd_lock); |
380 | 360 | iscsit_free_cmd(cmd); | |
381 | if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) || | ||
382 | !(cmd->se_cmd.transport_wait_for_tasks)) | ||
383 | iscsit_release_cmd(cmd); | ||
384 | else | ||
385 | cmd->se_cmd.transport_wait_for_tasks( | ||
386 | &cmd->se_cmd, 1, 0); | ||
387 | spin_lock_bh(&conn->cmd_lock); | 361 | spin_lock_bh(&conn->cmd_lock); |
388 | continue; | 362 | continue; |
389 | } | 363 | } |
@@ -403,13 +377,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) | |||
403 | (cmd->cmd_sn >= conn->sess->exp_cmd_sn)) { | 377 | (cmd->cmd_sn >= conn->sess->exp_cmd_sn)) { |
404 | list_del(&cmd->i_list); | 378 | list_del(&cmd->i_list); |
405 | spin_unlock_bh(&conn->cmd_lock); | 379 | spin_unlock_bh(&conn->cmd_lock); |
406 | 380 | iscsit_free_cmd(cmd); | |
407 | if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) || | ||
408 | !(cmd->se_cmd.transport_wait_for_tasks)) | ||
409 | iscsit_release_cmd(cmd); | ||
410 | else | ||
411 | cmd->se_cmd.transport_wait_for_tasks( | ||
412 | &cmd->se_cmd, 1, 1); | ||
413 | spin_lock_bh(&conn->cmd_lock); | 381 | spin_lock_bh(&conn->cmd_lock); |
414 | continue; | 382 | continue; |
415 | } | 383 | } |
@@ -434,10 +402,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) | |||
434 | 402 | ||
435 | iscsit_free_all_datain_reqs(cmd); | 403 | iscsit_free_all_datain_reqs(cmd); |
436 | 404 | ||
437 | if ((cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) && | 405 | transport_wait_for_tasks(&cmd->se_cmd); |
438 | cmd->se_cmd.transport_wait_for_tasks) | ||
439 | cmd->se_cmd.transport_wait_for_tasks(&cmd->se_cmd, | ||
440 | 0, 0); | ||
441 | /* | 406 | /* |
442 | * Add the struct iscsi_cmd to the connection recovery cmd list | 407 | * Add the struct iscsi_cmd to the connection recovery cmd list |
443 | */ | 408 | */ |
diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c index db1fe1ec84df..490207eacde9 100644 --- a/drivers/target/iscsi/iscsi_target_tmr.c +++ b/drivers/target/iscsi/iscsi_target_tmr.c | |||
@@ -250,7 +250,7 @@ static int iscsit_task_reassign_complete_write( | |||
250 | * so if we have received all DataOUT we can safety ignore Initiator. | 250 | * so if we have received all DataOUT we can safety ignore Initiator. |
251 | */ | 251 | */ |
252 | if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) { | 252 | if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) { |
253 | if (!atomic_read(&cmd->transport_sent)) { | 253 | if (!atomic_read(&cmd->se_cmd.t_transport_sent)) { |
254 | pr_debug("WRITE ITT: 0x%08x: t_state: %d" | 254 | pr_debug("WRITE ITT: 0x%08x: t_state: %d" |
255 | " never sent to transport\n", | 255 | " never sent to transport\n", |
256 | cmd->init_task_tag, cmd->se_cmd.t_state); | 256 | cmd->init_task_tag, cmd->se_cmd.t_state); |
@@ -314,11 +314,11 @@ static int iscsit_task_reassign_complete_read( | |||
314 | cmd->acked_data_sn = (tmr_req->exp_data_sn - 1); | 314 | cmd->acked_data_sn = (tmr_req->exp_data_sn - 1); |
315 | } | 315 | } |
316 | 316 | ||
317 | if (!atomic_read(&cmd->transport_sent)) { | 317 | if (!atomic_read(&cmd->se_cmd.t_transport_sent)) { |
318 | pr_debug("READ ITT: 0x%08x: t_state: %d never sent to" | 318 | pr_debug("READ ITT: 0x%08x: t_state: %d never sent to" |
319 | " transport\n", cmd->init_task_tag, | 319 | " transport\n", cmd->init_task_tag, |
320 | cmd->se_cmd.t_state); | 320 | cmd->se_cmd.t_state); |
321 | transport_generic_handle_cdb(se_cmd); | 321 | transport_handle_cdb_direct(se_cmd); |
322 | return 0; | 322 | return 0; |
323 | } | 323 | } |
324 | 324 | ||
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index f00137f377b2..02348f727bd4 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c | |||
@@ -289,7 +289,8 @@ struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr( | |||
289 | } | 289 | } |
290 | 290 | ||
291 | se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, | 291 | se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, |
292 | (void *)cmd->tmr_req, tcm_function); | 292 | (void *)cmd->tmr_req, tcm_function, |
293 | GFP_KERNEL); | ||
293 | if (!se_cmd->se_tmr_req) | 294 | if (!se_cmd->se_tmr_req) |
294 | goto out; | 295 | goto out; |
295 | 296 | ||
@@ -839,6 +840,23 @@ void iscsit_release_cmd(struct iscsi_cmd *cmd) | |||
839 | kmem_cache_free(lio_cmd_cache, cmd); | 840 | kmem_cache_free(lio_cmd_cache, cmd); |
840 | } | 841 | } |
841 | 842 | ||
843 | void iscsit_free_cmd(struct iscsi_cmd *cmd) | ||
844 | { | ||
845 | /* | ||
846 | * Determine if a struct se_cmd is assoicated with | ||
847 | * this struct iscsi_cmd. | ||
848 | */ | ||
849 | switch (cmd->iscsi_opcode) { | ||
850 | case ISCSI_OP_SCSI_CMD: | ||
851 | case ISCSI_OP_SCSI_TMFUNC: | ||
852 | transport_generic_free_cmd(&cmd->se_cmd, 1); | ||
853 | break; | ||
854 | default: | ||
855 | iscsit_release_cmd(cmd); | ||
856 | break; | ||
857 | } | ||
858 | } | ||
859 | |||
842 | int iscsit_check_session_usage_count(struct iscsi_session *sess) | 860 | int iscsit_check_session_usage_count(struct iscsi_session *sess) |
843 | { | 861 | { |
844 | spin_lock_bh(&sess->session_usage_lock); | 862 | spin_lock_bh(&sess->session_usage_lock); |
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h index 2cd49d607bda..835bf7de0281 100644 --- a/drivers/target/iscsi/iscsi_target_util.h +++ b/drivers/target/iscsi/iscsi_target_util.h | |||
@@ -30,6 +30,7 @@ extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_c | |||
30 | extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_conn *); | 30 | extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_conn *); |
31 | extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *); | 31 | extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *); |
32 | extern void iscsit_release_cmd(struct iscsi_cmd *); | 32 | extern void iscsit_release_cmd(struct iscsi_cmd *); |
33 | extern void iscsit_free_cmd(struct iscsi_cmd *); | ||
33 | extern int iscsit_check_session_usage_count(struct iscsi_session *); | 34 | extern int iscsit_check_session_usage_count(struct iscsi_session *); |
34 | extern void iscsit_dec_session_usage_count(struct iscsi_session *); | 35 | extern void iscsit_dec_session_usage_count(struct iscsi_session *); |
35 | extern void iscsit_inc_session_usage_count(struct iscsi_session *); | 36 | extern void iscsit_inc_session_usage_count(struct iscsi_session *); |
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index aa2d67997235..b15d8cbf630b 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c | |||
@@ -200,7 +200,7 @@ static void tcm_loop_check_stop_free(struct se_cmd *se_cmd) | |||
200 | * Release the struct se_cmd, which will make a callback to release | 200 | * Release the struct se_cmd, which will make a callback to release |
201 | * struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd() | 201 | * struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd() |
202 | */ | 202 | */ |
203 | transport_generic_free_cmd(se_cmd, 0, 0); | 203 | transport_generic_free_cmd(se_cmd, 0); |
204 | } | 204 | } |
205 | 205 | ||
206 | static void tcm_loop_release_cmd(struct se_cmd *se_cmd) | 206 | static void tcm_loop_release_cmd(struct se_cmd *se_cmd) |
@@ -290,6 +290,15 @@ static int tcm_loop_queuecommand( | |||
290 | */ | 290 | */ |
291 | tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); | 291 | tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); |
292 | tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; | 292 | tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; |
293 | /* | ||
294 | * Ensure that this tl_tpg reference from the incoming sc->device->id | ||
295 | * has already been configured via tcm_loop_make_naa_tpg(). | ||
296 | */ | ||
297 | if (!tl_tpg->tl_hba) { | ||
298 | set_host_byte(sc, DID_NO_CONNECT); | ||
299 | sc->scsi_done(sc); | ||
300 | return 0; | ||
301 | } | ||
293 | se_tpg = &tl_tpg->tl_se_tpg; | 302 | se_tpg = &tl_tpg->tl_se_tpg; |
294 | /* | 303 | /* |
295 | * Determine the SAM Task Attribute and allocate tl_cmd and | 304 | * Determine the SAM Task Attribute and allocate tl_cmd and |
@@ -366,7 +375,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc) | |||
366 | * Allocate the LUN_RESET TMR | 375 | * Allocate the LUN_RESET TMR |
367 | */ | 376 | */ |
368 | se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, tl_tmr, | 377 | se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, tl_tmr, |
369 | TMR_LUN_RESET); | 378 | TMR_LUN_RESET, GFP_KERNEL); |
370 | if (IS_ERR(se_cmd->se_tmr_req)) | 379 | if (IS_ERR(se_cmd->se_tmr_req)) |
371 | goto release; | 380 | goto release; |
372 | /* | 381 | /* |
@@ -388,7 +397,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc) | |||
388 | SUCCESS : FAILED; | 397 | SUCCESS : FAILED; |
389 | release: | 398 | release: |
390 | if (se_cmd) | 399 | if (se_cmd) |
391 | transport_generic_free_cmd(se_cmd, 1, 0); | 400 | transport_generic_free_cmd(se_cmd, 1); |
392 | else | 401 | else |
393 | kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); | 402 | kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); |
394 | kfree(tl_tmr); | 403 | kfree(tl_tmr); |
@@ -1245,6 +1254,9 @@ void tcm_loop_drop_naa_tpg( | |||
1245 | */ | 1254 | */ |
1246 | core_tpg_deregister(se_tpg); | 1255 | core_tpg_deregister(se_tpg); |
1247 | 1256 | ||
1257 | tl_tpg->tl_hba = NULL; | ||
1258 | tl_tpg->tl_tpgt = 0; | ||
1259 | |||
1248 | pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s" | 1260 | pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s" |
1249 | " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba), | 1261 | " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba), |
1250 | config_item_name(&wwn->wwn_group.cg_item), tpgt); | 1262 | config_item_name(&wwn->wwn_group.cg_item), tpgt); |
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index 98c98a3a0250..8f4447749c71 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c | |||
@@ -24,7 +24,6 @@ | |||
24 | * | 24 | * |
25 | ******************************************************************************/ | 25 | ******************************************************************************/ |
26 | 26 | ||
27 | #include <linux/version.h> | ||
28 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
29 | #include <linux/spinlock.h> | 28 | #include <linux/spinlock.h> |
30 | #include <linux/configfs.h> | 29 | #include <linux/configfs.h> |
@@ -68,6 +67,15 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd) | |||
68 | unsigned char *buf; | 67 | unsigned char *buf; |
69 | u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first | 68 | u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first |
70 | Target port group descriptor */ | 69 | Target port group descriptor */ |
70 | /* | ||
71 | * Need at least 4 bytes of response data or else we can't | ||
72 | * even fit the return data length. | ||
73 | */ | ||
74 | if (cmd->data_length < 4) { | ||
75 | pr_warn("REPORT TARGET PORT GROUPS allocation length %u" | ||
76 | " too small\n", cmd->data_length); | ||
77 | return -EINVAL; | ||
78 | } | ||
71 | 79 | ||
72 | buf = transport_kmap_first_data_page(cmd); | 80 | buf = transport_kmap_first_data_page(cmd); |
73 | 81 | ||
@@ -75,6 +83,17 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd) | |||
75 | list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list, | 83 | list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list, |
76 | tg_pt_gp_list) { | 84 | tg_pt_gp_list) { |
77 | /* | 85 | /* |
86 | * Check if the Target port group and Target port descriptor list | ||
87 | * based on tg_pt_gp_members count will fit into the response payload. | ||
88 | * Otherwise, bump rd_len to let the initiator know we have exceeded | ||
89 | * the allocation length and the response is truncated. | ||
90 | */ | ||
91 | if ((off + 8 + (tg_pt_gp->tg_pt_gp_members * 4)) > | ||
92 | cmd->data_length) { | ||
93 | rd_len += 8 + (tg_pt_gp->tg_pt_gp_members * 4); | ||
94 | continue; | ||
95 | } | ||
96 | /* | ||
78 | * PREF: Preferred target port bit, determine if this | 97 | * PREF: Preferred target port bit, determine if this |
79 | * bit should be set for port group. | 98 | * bit should be set for port group. |
80 | */ | 99 | */ |
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c index f04d4ef99dca..38535eb13929 100644 --- a/drivers/target/target_core_cdb.c +++ b/drivers/target/target_core_cdb.c | |||
@@ -24,7 +24,7 @@ | |||
24 | */ | 24 | */ |
25 | 25 | ||
26 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
27 | #include <linux/ctype.h> | 27 | #include <linux/module.h> |
28 | #include <asm/unaligned.h> | 28 | #include <asm/unaligned.h> |
29 | #include <scsi/scsi.h> | 29 | #include <scsi/scsi.h> |
30 | 30 | ||
@@ -156,11 +156,12 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) | |||
156 | } | 156 | } |
157 | 157 | ||
158 | static void | 158 | static void |
159 | target_parse_naa_6h_vendor_specific(struct se_device *dev, unsigned char *buf_off) | 159 | target_parse_naa_6h_vendor_specific(struct se_device *dev, unsigned char *buf) |
160 | { | 160 | { |
161 | unsigned char *p = &dev->se_sub_dev->t10_wwn.unit_serial[0]; | 161 | unsigned char *p = &dev->se_sub_dev->t10_wwn.unit_serial[0]; |
162 | unsigned char *buf = buf_off; | 162 | int cnt; |
163 | int cnt = 0, next = 1; | 163 | bool next = true; |
164 | |||
164 | /* | 165 | /* |
165 | * Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on | 166 | * Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on |
166 | * byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field | 167 | * byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field |
@@ -169,19 +170,18 @@ target_parse_naa_6h_vendor_specific(struct se_device *dev, unsigned char *buf_of | |||
169 | * NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure | 170 | * NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure |
170 | * per device uniqeness. | 171 | * per device uniqeness. |
171 | */ | 172 | */ |
172 | while (*p != '\0') { | 173 | for (cnt = 0; *p && cnt < 13; p++) { |
173 | if (cnt >= 13) | 174 | int val = hex_to_bin(*p); |
174 | break; | 175 | |
175 | if (!isxdigit(*p)) { | 176 | if (val < 0) |
176 | p++; | ||
177 | continue; | 177 | continue; |
178 | } | 178 | |
179 | if (next != 0) { | 179 | if (next) { |
180 | buf[cnt++] |= hex_to_bin(*p++); | 180 | next = false; |
181 | next = 0; | 181 | buf[cnt++] |= val; |
182 | } else { | 182 | } else { |
183 | buf[cnt] = hex_to_bin(*p++) << 4; | 183 | next = true; |
184 | next = 1; | 184 | buf[cnt] = val << 4; |
185 | } | 185 | } |
186 | } | 186 | } |
187 | } | 187 | } |
@@ -1266,3 +1266,52 @@ transport_emulate_control_cdb(struct se_task *task) | |||
1266 | 1266 | ||
1267 | return PYX_TRANSPORT_SENT_TO_TRANSPORT; | 1267 | return PYX_TRANSPORT_SENT_TO_TRANSPORT; |
1268 | } | 1268 | } |
1269 | |||
1270 | /* | ||
1271 | * Write a CDB into @cdb that is based on the one the intiator sent us, | ||
1272 | * but updated to only cover the sectors that the current task handles. | ||
1273 | */ | ||
1274 | void target_get_task_cdb(struct se_task *task, unsigned char *cdb) | ||
1275 | { | ||
1276 | struct se_cmd *cmd = task->task_se_cmd; | ||
1277 | unsigned int cdb_len = scsi_command_size(cmd->t_task_cdb); | ||
1278 | |||
1279 | memcpy(cdb, cmd->t_task_cdb, cdb_len); | ||
1280 | if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { | ||
1281 | unsigned long long lba = task->task_lba; | ||
1282 | u32 sectors = task->task_sectors; | ||
1283 | |||
1284 | switch (cdb_len) { | ||
1285 | case 6: | ||
1286 | /* 21-bit LBA and 8-bit sectors */ | ||
1287 | cdb[1] = (lba >> 16) & 0x1f; | ||
1288 | cdb[2] = (lba >> 8) & 0xff; | ||
1289 | cdb[3] = lba & 0xff; | ||
1290 | cdb[4] = sectors & 0xff; | ||
1291 | break; | ||
1292 | case 10: | ||
1293 | /* 32-bit LBA and 16-bit sectors */ | ||
1294 | put_unaligned_be32(lba, &cdb[2]); | ||
1295 | put_unaligned_be16(sectors, &cdb[7]); | ||
1296 | break; | ||
1297 | case 12: | ||
1298 | /* 32-bit LBA and 32-bit sectors */ | ||
1299 | put_unaligned_be32(lba, &cdb[2]); | ||
1300 | put_unaligned_be32(sectors, &cdb[6]); | ||
1301 | break; | ||
1302 | case 16: | ||
1303 | /* 64-bit LBA and 32-bit sectors */ | ||
1304 | put_unaligned_be64(lba, &cdb[2]); | ||
1305 | put_unaligned_be32(sectors, &cdb[10]); | ||
1306 | break; | ||
1307 | case 32: | ||
1308 | /* 64-bit LBA and 32-bit sectors, extended CDB */ | ||
1309 | put_unaligned_be64(lba, &cdb[12]); | ||
1310 | put_unaligned_be32(sectors, &cdb[28]); | ||
1311 | break; | ||
1312 | default: | ||
1313 | BUG(); | ||
1314 | } | ||
1315 | } | ||
1316 | } | ||
1317 | EXPORT_SYMBOL(target_get_task_cdb); | ||
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index b2575d8568cc..e0c1e8a8dd4e 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c | |||
@@ -23,7 +23,6 @@ | |||
23 | 23 | ||
24 | #include <linux/module.h> | 24 | #include <linux/module.h> |
25 | #include <linux/moduleparam.h> | 25 | #include <linux/moduleparam.h> |
26 | #include <linux/version.h> | ||
27 | #include <generated/utsrelease.h> | 26 | #include <generated/utsrelease.h> |
28 | #include <linux/utsname.h> | 27 | #include <linux/utsname.h> |
29 | #include <linux/init.h> | 28 | #include <linux/init.h> |
@@ -133,14 +132,6 @@ static struct config_group *target_core_register_fabric( | |||
133 | pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:" | 132 | pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:" |
134 | " %s\n", group, name); | 133 | " %s\n", group, name); |
135 | /* | 134 | /* |
136 | * Ensure that TCM subsystem plugins are loaded at this point for | ||
137 | * using the RAMDISK_DR virtual LUN 0 and all other struct se_port | ||
138 | * LUN symlinks. | ||
139 | */ | ||
140 | if (transport_subsystem_check_init() < 0) | ||
141 | return ERR_PTR(-EINVAL); | ||
142 | |||
143 | /* | ||
144 | * Below are some hardcoded request_module() calls to automatically | 135 | * Below are some hardcoded request_module() calls to automatically |
145 | * local fabric modules when the following is called: | 136 | * local fabric modules when the following is called: |
146 | * | 137 | * |
@@ -725,9 +716,6 @@ SE_DEV_ATTR_RO(hw_queue_depth); | |||
725 | DEF_DEV_ATTRIB(queue_depth); | 716 | DEF_DEV_ATTRIB(queue_depth); |
726 | SE_DEV_ATTR(queue_depth, S_IRUGO | S_IWUSR); | 717 | SE_DEV_ATTR(queue_depth, S_IRUGO | S_IWUSR); |
727 | 718 | ||
728 | DEF_DEV_ATTRIB(task_timeout); | ||
729 | SE_DEV_ATTR(task_timeout, S_IRUGO | S_IWUSR); | ||
730 | |||
731 | DEF_DEV_ATTRIB(max_unmap_lba_count); | 719 | DEF_DEV_ATTRIB(max_unmap_lba_count); |
732 | SE_DEV_ATTR(max_unmap_lba_count, S_IRUGO | S_IWUSR); | 720 | SE_DEV_ATTR(max_unmap_lba_count, S_IRUGO | S_IWUSR); |
733 | 721 | ||
@@ -761,7 +749,6 @@ static struct configfs_attribute *target_core_dev_attrib_attrs[] = { | |||
761 | &target_core_dev_attrib_optimal_sectors.attr, | 749 | &target_core_dev_attrib_optimal_sectors.attr, |
762 | &target_core_dev_attrib_hw_queue_depth.attr, | 750 | &target_core_dev_attrib_hw_queue_depth.attr, |
763 | &target_core_dev_attrib_queue_depth.attr, | 751 | &target_core_dev_attrib_queue_depth.attr, |
764 | &target_core_dev_attrib_task_timeout.attr, | ||
765 | &target_core_dev_attrib_max_unmap_lba_count.attr, | 752 | &target_core_dev_attrib_max_unmap_lba_count.attr, |
766 | &target_core_dev_attrib_max_unmap_block_desc_count.attr, | 753 | &target_core_dev_attrib_max_unmap_block_desc_count.attr, |
767 | &target_core_dev_attrib_unmap_granularity.attr, | 754 | &target_core_dev_attrib_unmap_granularity.attr, |
@@ -3080,8 +3067,7 @@ static struct config_group *target_core_call_addhbatotarget( | |||
3080 | /* | 3067 | /* |
3081 | * Load up TCM subsystem plugins if they have not already been loaded. | 3068 | * Load up TCM subsystem plugins if they have not already been loaded. |
3082 | */ | 3069 | */ |
3083 | if (transport_subsystem_check_init() < 0) | 3070 | transport_subsystem_check_init(); |
3084 | return ERR_PTR(-EINVAL); | ||
3085 | 3071 | ||
3086 | hba = core_alloc_hba(se_plugin_str, plugin_dep_id, 0); | 3072 | hba = core_alloc_hba(se_plugin_str, plugin_dep_id, 0); |
3087 | if (IS_ERR(hba)) | 3073 | if (IS_ERR(hba)) |
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index ca6e4a4df134..f870c3bcfd82 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c | |||
@@ -914,21 +914,6 @@ void se_dev_set_default_attribs( | |||
914 | dev->se_sub_dev->se_dev_attrib.queue_depth = dev_limits->queue_depth; | 914 | dev->se_sub_dev->se_dev_attrib.queue_depth = dev_limits->queue_depth; |
915 | } | 915 | } |
916 | 916 | ||
917 | int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout) | ||
918 | { | ||
919 | if (task_timeout > DA_TASK_TIMEOUT_MAX) { | ||
920 | pr_err("dev[%p]: Passed task_timeout: %u larger then" | ||
921 | " DA_TASK_TIMEOUT_MAX\n", dev, task_timeout); | ||
922 | return -EINVAL; | ||
923 | } else { | ||
924 | dev->se_sub_dev->se_dev_attrib.task_timeout = task_timeout; | ||
925 | pr_debug("dev[%p]: Set SE Device task_timeout: %u\n", | ||
926 | dev, task_timeout); | ||
927 | } | ||
928 | |||
929 | return 0; | ||
930 | } | ||
931 | |||
932 | int se_dev_set_max_unmap_lba_count( | 917 | int se_dev_set_max_unmap_lba_count( |
933 | struct se_device *dev, | 918 | struct se_device *dev, |
934 | u32 max_unmap_lba_count) | 919 | u32 max_unmap_lba_count) |
@@ -972,36 +957,24 @@ int se_dev_set_unmap_granularity_alignment( | |||
972 | 957 | ||
973 | int se_dev_set_emulate_dpo(struct se_device *dev, int flag) | 958 | int se_dev_set_emulate_dpo(struct se_device *dev, int flag) |
974 | { | 959 | { |
975 | if ((flag != 0) && (flag != 1)) { | 960 | if (flag != 0 && flag != 1) { |
976 | pr_err("Illegal value %d\n", flag); | 961 | pr_err("Illegal value %d\n", flag); |
977 | return -EINVAL; | 962 | return -EINVAL; |
978 | } | 963 | } |
979 | if (dev->transport->dpo_emulated == NULL) { | 964 | |
980 | pr_err("dev->transport->dpo_emulated is NULL\n"); | 965 | pr_err("dpo_emulated not supported\n"); |
981 | return -EINVAL; | 966 | return -EINVAL; |
982 | } | ||
983 | if (dev->transport->dpo_emulated(dev) == 0) { | ||
984 | pr_err("dev->transport->dpo_emulated not supported\n"); | ||
985 | return -EINVAL; | ||
986 | } | ||
987 | dev->se_sub_dev->se_dev_attrib.emulate_dpo = flag; | ||
988 | pr_debug("dev[%p]: SE Device Page Out (DPO) Emulation" | ||
989 | " bit: %d\n", dev, dev->se_sub_dev->se_dev_attrib.emulate_dpo); | ||
990 | return 0; | ||
991 | } | 967 | } |
992 | 968 | ||
993 | int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) | 969 | int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) |
994 | { | 970 | { |
995 | if ((flag != 0) && (flag != 1)) { | 971 | if (flag != 0 && flag != 1) { |
996 | pr_err("Illegal value %d\n", flag); | 972 | pr_err("Illegal value %d\n", flag); |
997 | return -EINVAL; | 973 | return -EINVAL; |
998 | } | 974 | } |
999 | if (dev->transport->fua_write_emulated == NULL) { | 975 | |
1000 | pr_err("dev->transport->fua_write_emulated is NULL\n"); | 976 | if (dev->transport->fua_write_emulated == 0) { |
1001 | return -EINVAL; | 977 | pr_err("fua_write_emulated not supported\n"); |
1002 | } | ||
1003 | if (dev->transport->fua_write_emulated(dev) == 0) { | ||
1004 | pr_err("dev->transport->fua_write_emulated not supported\n"); | ||
1005 | return -EINVAL; | 978 | return -EINVAL; |
1006 | } | 979 | } |
1007 | dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag; | 980 | dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag; |
@@ -1012,36 +985,23 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) | |||
1012 | 985 | ||
1013 | int se_dev_set_emulate_fua_read(struct se_device *dev, int flag) | 986 | int se_dev_set_emulate_fua_read(struct se_device *dev, int flag) |
1014 | { | 987 | { |
1015 | if ((flag != 0) && (flag != 1)) { | 988 | if (flag != 0 && flag != 1) { |
1016 | pr_err("Illegal value %d\n", flag); | 989 | pr_err("Illegal value %d\n", flag); |
1017 | return -EINVAL; | 990 | return -EINVAL; |
1018 | } | 991 | } |
1019 | if (dev->transport->fua_read_emulated == NULL) { | 992 | |
1020 | pr_err("dev->transport->fua_read_emulated is NULL\n"); | 993 | pr_err("ua read emulated not supported\n"); |
1021 | return -EINVAL; | 994 | return -EINVAL; |
1022 | } | ||
1023 | if (dev->transport->fua_read_emulated(dev) == 0) { | ||
1024 | pr_err("dev->transport->fua_read_emulated not supported\n"); | ||
1025 | return -EINVAL; | ||
1026 | } | ||
1027 | dev->se_sub_dev->se_dev_attrib.emulate_fua_read = flag; | ||
1028 | pr_debug("dev[%p]: SE Device Forced Unit Access READs: %d\n", | ||
1029 | dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_read); | ||
1030 | return 0; | ||
1031 | } | 995 | } |
1032 | 996 | ||
1033 | int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) | 997 | int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) |
1034 | { | 998 | { |
1035 | if ((flag != 0) && (flag != 1)) { | 999 | if (flag != 0 && flag != 1) { |
1036 | pr_err("Illegal value %d\n", flag); | 1000 | pr_err("Illegal value %d\n", flag); |
1037 | return -EINVAL; | 1001 | return -EINVAL; |
1038 | } | 1002 | } |
1039 | if (dev->transport->write_cache_emulated == NULL) { | 1003 | if (dev->transport->write_cache_emulated == 0) { |
1040 | pr_err("dev->transport->write_cache_emulated is NULL\n"); | 1004 | pr_err("write_cache_emulated not supported\n"); |
1041 | return -EINVAL; | ||
1042 | } | ||
1043 | if (dev->transport->write_cache_emulated(dev) == 0) { | ||
1044 | pr_err("dev->transport->write_cache_emulated not supported\n"); | ||
1045 | return -EINVAL; | 1005 | return -EINVAL; |
1046 | } | 1006 | } |
1047 | dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag; | 1007 | dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag; |
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index 55bbe0847a6d..09b6f8729f91 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c | |||
@@ -22,7 +22,6 @@ | |||
22 | 22 | ||
23 | #include <linux/module.h> | 23 | #include <linux/module.h> |
24 | #include <linux/moduleparam.h> | 24 | #include <linux/moduleparam.h> |
25 | #include <linux/version.h> | ||
26 | #include <generated/utsrelease.h> | 25 | #include <generated/utsrelease.h> |
27 | #include <linux/utsname.h> | 26 | #include <linux/utsname.h> |
28 | #include <linux/init.h> | 27 | #include <linux/init.h> |
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index bc1b33639b8d..19a0be9c6570 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c | |||
@@ -26,7 +26,6 @@ | |||
26 | * | 26 | * |
27 | ******************************************************************************/ | 27 | ******************************************************************************/ |
28 | 28 | ||
29 | #include <linux/version.h> | ||
30 | #include <linux/string.h> | 29 | #include <linux/string.h> |
31 | #include <linux/parser.h> | 30 | #include <linux/parser.h> |
32 | #include <linux/timer.h> | 31 | #include <linux/timer.h> |
@@ -273,13 +272,14 @@ fd_alloc_task(unsigned char *cdb) | |||
273 | static int fd_do_readv(struct se_task *task) | 272 | static int fd_do_readv(struct se_task *task) |
274 | { | 273 | { |
275 | struct fd_request *req = FILE_REQ(task); | 274 | struct fd_request *req = FILE_REQ(task); |
276 | struct fd_dev *dev = req->fd_task.se_dev->dev_ptr; | 275 | struct se_device *se_dev = req->fd_task.task_se_cmd->se_dev; |
276 | struct fd_dev *dev = se_dev->dev_ptr; | ||
277 | struct file *fd = dev->fd_file; | 277 | struct file *fd = dev->fd_file; |
278 | struct scatterlist *sg = task->task_sg; | 278 | struct scatterlist *sg = task->task_sg; |
279 | struct iovec *iov; | 279 | struct iovec *iov; |
280 | mm_segment_t old_fs; | 280 | mm_segment_t old_fs; |
281 | loff_t pos = (task->task_lba * | 281 | loff_t pos = (task->task_lba * |
282 | task->se_dev->se_sub_dev->se_dev_attrib.block_size); | 282 | se_dev->se_sub_dev->se_dev_attrib.block_size); |
283 | int ret = 0, i; | 283 | int ret = 0, i; |
284 | 284 | ||
285 | iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL); | 285 | iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL); |
@@ -325,13 +325,14 @@ static int fd_do_readv(struct se_task *task) | |||
325 | static int fd_do_writev(struct se_task *task) | 325 | static int fd_do_writev(struct se_task *task) |
326 | { | 326 | { |
327 | struct fd_request *req = FILE_REQ(task); | 327 | struct fd_request *req = FILE_REQ(task); |
328 | struct fd_dev *dev = req->fd_task.se_dev->dev_ptr; | 328 | struct se_device *se_dev = req->fd_task.task_se_cmd->se_dev; |
329 | struct fd_dev *dev = se_dev->dev_ptr; | ||
329 | struct file *fd = dev->fd_file; | 330 | struct file *fd = dev->fd_file; |
330 | struct scatterlist *sg = task->task_sg; | 331 | struct scatterlist *sg = task->task_sg; |
331 | struct iovec *iov; | 332 | struct iovec *iov; |
332 | mm_segment_t old_fs; | 333 | mm_segment_t old_fs; |
333 | loff_t pos = (task->task_lba * | 334 | loff_t pos = (task->task_lba * |
334 | task->se_dev->se_sub_dev->se_dev_attrib.block_size); | 335 | se_dev->se_sub_dev->se_dev_attrib.block_size); |
335 | int ret, i = 0; | 336 | int ret, i = 0; |
336 | 337 | ||
337 | iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL); | 338 | iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL); |
@@ -399,33 +400,6 @@ static void fd_emulate_sync_cache(struct se_task *task) | |||
399 | } | 400 | } |
400 | 401 | ||
401 | /* | 402 | /* |
402 | * Tell TCM Core that we are capable of WriteCache emulation for | ||
403 | * an underlying struct se_device. | ||
404 | */ | ||
405 | static int fd_emulated_write_cache(struct se_device *dev) | ||
406 | { | ||
407 | return 1; | ||
408 | } | ||
409 | |||
410 | static int fd_emulated_dpo(struct se_device *dev) | ||
411 | { | ||
412 | return 0; | ||
413 | } | ||
414 | /* | ||
415 | * Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs | ||
416 | * for TYPE_DISK. | ||
417 | */ | ||
418 | static int fd_emulated_fua_write(struct se_device *dev) | ||
419 | { | ||
420 | return 1; | ||
421 | } | ||
422 | |||
423 | static int fd_emulated_fua_read(struct se_device *dev) | ||
424 | { | ||
425 | return 0; | ||
426 | } | ||
427 | |||
428 | /* | ||
429 | * WRITE Force Unit Access (FUA) emulation on a per struct se_task | 403 | * WRITE Force Unit Access (FUA) emulation on a per struct se_task |
430 | * LBA range basis.. | 404 | * LBA range basis.. |
431 | */ | 405 | */ |
@@ -608,17 +582,6 @@ static ssize_t fd_show_configfs_dev_params( | |||
608 | return bl; | 582 | return bl; |
609 | } | 583 | } |
610 | 584 | ||
611 | /* fd_get_cdb(): (Part of se_subsystem_api_t template) | ||
612 | * | ||
613 | * | ||
614 | */ | ||
615 | static unsigned char *fd_get_cdb(struct se_task *task) | ||
616 | { | ||
617 | struct fd_request *req = FILE_REQ(task); | ||
618 | |||
619 | return req->fd_scsi_cdb; | ||
620 | } | ||
621 | |||
622 | /* fd_get_device_rev(): (Part of se_subsystem_api_t template) | 585 | /* fd_get_device_rev(): (Part of se_subsystem_api_t template) |
623 | * | 586 | * |
624 | * | 587 | * |
@@ -650,15 +613,13 @@ static struct se_subsystem_api fileio_template = { | |||
650 | .name = "fileio", | 613 | .name = "fileio", |
651 | .owner = THIS_MODULE, | 614 | .owner = THIS_MODULE, |
652 | .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, | 615 | .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, |
616 | .write_cache_emulated = 1, | ||
617 | .fua_write_emulated = 1, | ||
653 | .attach_hba = fd_attach_hba, | 618 | .attach_hba = fd_attach_hba, |
654 | .detach_hba = fd_detach_hba, | 619 | .detach_hba = fd_detach_hba, |
655 | .allocate_virtdevice = fd_allocate_virtdevice, | 620 | .allocate_virtdevice = fd_allocate_virtdevice, |
656 | .create_virtdevice = fd_create_virtdevice, | 621 | .create_virtdevice = fd_create_virtdevice, |
657 | .free_device = fd_free_device, | 622 | .free_device = fd_free_device, |
658 | .dpo_emulated = fd_emulated_dpo, | ||
659 | .fua_write_emulated = fd_emulated_fua_write, | ||
660 | .fua_read_emulated = fd_emulated_fua_read, | ||
661 | .write_cache_emulated = fd_emulated_write_cache, | ||
662 | .alloc_task = fd_alloc_task, | 623 | .alloc_task = fd_alloc_task, |
663 | .do_task = fd_do_task, | 624 | .do_task = fd_do_task, |
664 | .do_sync_cache = fd_emulate_sync_cache, | 625 | .do_sync_cache = fd_emulate_sync_cache, |
@@ -666,7 +627,6 @@ static struct se_subsystem_api fileio_template = { | |||
666 | .check_configfs_dev_params = fd_check_configfs_dev_params, | 627 | .check_configfs_dev_params = fd_check_configfs_dev_params, |
667 | .set_configfs_dev_params = fd_set_configfs_dev_params, | 628 | .set_configfs_dev_params = fd_set_configfs_dev_params, |
668 | .show_configfs_dev_params = fd_show_configfs_dev_params, | 629 | .show_configfs_dev_params = fd_show_configfs_dev_params, |
669 | .get_cdb = fd_get_cdb, | ||
670 | .get_device_rev = fd_get_device_rev, | 630 | .get_device_rev = fd_get_device_rev, |
671 | .get_device_type = fd_get_device_type, | 631 | .get_device_type = fd_get_device_type, |
672 | .get_blocks = fd_get_blocks, | 632 | .get_blocks = fd_get_blocks, |
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h index daebd710b893..59e6e73106c2 100644 --- a/drivers/target/target_core_file.h +++ b/drivers/target/target_core_file.h | |||
@@ -14,9 +14,7 @@ | |||
14 | 14 | ||
15 | struct fd_request { | 15 | struct fd_request { |
16 | struct se_task fd_task; | 16 | struct se_task fd_task; |
17 | /* SCSI CDB from iSCSI Command PDU */ | 17 | }; |
18 | unsigned char fd_scsi_cdb[TCM_MAX_COMMAND_SIZE]; | ||
19 | } ____cacheline_aligned; | ||
20 | 18 | ||
21 | #define FBDF_HAS_PATH 0x01 | 19 | #define FBDF_HAS_PATH 0x01 |
22 | #define FBDF_HAS_SIZE 0x02 | 20 | #define FBDF_HAS_SIZE 0x02 |
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 7e1234105442..41ad02b5fb87 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c | |||
@@ -27,7 +27,6 @@ | |||
27 | * | 27 | * |
28 | ******************************************************************************/ | 28 | ******************************************************************************/ |
29 | 29 | ||
30 | #include <linux/version.h> | ||
31 | #include <linux/string.h> | 30 | #include <linux/string.h> |
32 | #include <linux/parser.h> | 31 | #include <linux/parser.h> |
33 | #include <linux/timer.h> | 32 | #include <linux/timer.h> |
@@ -314,104 +313,42 @@ static unsigned long long iblock_emulate_read_cap_with_block_size( | |||
314 | return blocks_long; | 313 | return blocks_long; |
315 | } | 314 | } |
316 | 315 | ||
316 | static void iblock_end_io_flush(struct bio *bio, int err) | ||
317 | { | ||
318 | struct se_cmd *cmd = bio->bi_private; | ||
319 | |||
320 | if (err) | ||
321 | pr_err("IBLOCK: cache flush failed: %d\n", err); | ||
322 | |||
323 | if (cmd) | ||
324 | transport_complete_sync_cache(cmd, err == 0); | ||
325 | bio_put(bio); | ||
326 | } | ||
327 | |||
317 | /* | 328 | /* |
318 | * Emulate SYCHRONIZE_CACHE_* | 329 | * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must |
330 | * always flush the whole cache. | ||
319 | */ | 331 | */ |
320 | static void iblock_emulate_sync_cache(struct se_task *task) | 332 | static void iblock_emulate_sync_cache(struct se_task *task) |
321 | { | 333 | { |
322 | struct se_cmd *cmd = task->task_se_cmd; | 334 | struct se_cmd *cmd = task->task_se_cmd; |
323 | struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; | 335 | struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; |
324 | int immed = (cmd->t_task_cdb[1] & 0x2); | 336 | int immed = (cmd->t_task_cdb[1] & 0x2); |
325 | sector_t error_sector; | 337 | struct bio *bio; |
326 | int ret; | ||
327 | 338 | ||
328 | /* | 339 | /* |
329 | * If the Immediate bit is set, queue up the GOOD response | 340 | * If the Immediate bit is set, queue up the GOOD response |
330 | * for this SYNCHRONIZE_CACHE op | 341 | * for this SYNCHRONIZE_CACHE op. |
331 | */ | 342 | */ |
332 | if (immed) | 343 | if (immed) |
333 | transport_complete_sync_cache(cmd, 1); | 344 | transport_complete_sync_cache(cmd, 1); |
334 | 345 | ||
335 | /* | 346 | bio = bio_alloc(GFP_KERNEL, 0); |
336 | * blkdev_issue_flush() does not support a specifying a range, so | 347 | bio->bi_end_io = iblock_end_io_flush; |
337 | * we have to flush the entire cache. | 348 | bio->bi_bdev = ib_dev->ibd_bd; |
338 | */ | ||
339 | ret = blkdev_issue_flush(ib_dev->ibd_bd, GFP_KERNEL, &error_sector); | ||
340 | if (ret != 0) { | ||
341 | pr_err("IBLOCK: block_issue_flush() failed: %d " | ||
342 | " error_sector: %llu\n", ret, | ||
343 | (unsigned long long)error_sector); | ||
344 | } | ||
345 | |||
346 | if (!immed) | 349 | if (!immed) |
347 | transport_complete_sync_cache(cmd, ret == 0); | 350 | bio->bi_private = cmd; |
348 | } | 351 | submit_bio(WRITE_FLUSH, bio); |
349 | |||
350 | /* | ||
351 | * Tell TCM Core that we are capable of WriteCache emulation for | ||
352 | * an underlying struct se_device. | ||
353 | */ | ||
354 | static int iblock_emulated_write_cache(struct se_device *dev) | ||
355 | { | ||
356 | return 1; | ||
357 | } | ||
358 | |||
359 | static int iblock_emulated_dpo(struct se_device *dev) | ||
360 | { | ||
361 | return 0; | ||
362 | } | ||
363 | |||
364 | /* | ||
365 | * Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs | ||
366 | * for TYPE_DISK. | ||
367 | */ | ||
368 | static int iblock_emulated_fua_write(struct se_device *dev) | ||
369 | { | ||
370 | return 1; | ||
371 | } | ||
372 | |||
373 | static int iblock_emulated_fua_read(struct se_device *dev) | ||
374 | { | ||
375 | return 0; | ||
376 | } | ||
377 | |||
378 | static int iblock_do_task(struct se_task *task) | ||
379 | { | ||
380 | struct se_device *dev = task->task_se_cmd->se_dev; | ||
381 | struct iblock_req *req = IBLOCK_REQ(task); | ||
382 | struct bio *bio = req->ib_bio, *nbio = NULL; | ||
383 | struct blk_plug plug; | ||
384 | int rw; | ||
385 | |||
386 | if (task->task_data_direction == DMA_TO_DEVICE) { | ||
387 | /* | ||
388 | * Force data to disk if we pretend to not have a volatile | ||
389 | * write cache, or the initiator set the Force Unit Access bit. | ||
390 | */ | ||
391 | if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 || | ||
392 | (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && | ||
393 | task->task_se_cmd->t_tasks_fua)) | ||
394 | rw = WRITE_FUA; | ||
395 | else | ||
396 | rw = WRITE; | ||
397 | } else { | ||
398 | rw = READ; | ||
399 | } | ||
400 | |||
401 | blk_start_plug(&plug); | ||
402 | while (bio) { | ||
403 | nbio = bio->bi_next; | ||
404 | bio->bi_next = NULL; | ||
405 | pr_debug("Calling submit_bio() task: %p bio: %p" | ||
406 | " bio->bi_sector: %llu\n", task, bio, | ||
407 | (unsigned long long)bio->bi_sector); | ||
408 | |||
409 | submit_bio(rw, bio); | ||
410 | bio = nbio; | ||
411 | } | ||
412 | blk_finish_plug(&plug); | ||
413 | |||
414 | return PYX_TRANSPORT_SENT_TO_TRANSPORT; | ||
415 | } | 352 | } |
416 | 353 | ||
417 | static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range) | 354 | static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range) |
@@ -425,20 +362,7 @@ static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range) | |||
425 | 362 | ||
426 | static void iblock_free_task(struct se_task *task) | 363 | static void iblock_free_task(struct se_task *task) |
427 | { | 364 | { |
428 | struct iblock_req *req = IBLOCK_REQ(task); | 365 | kfree(IBLOCK_REQ(task)); |
429 | struct bio *bio, *hbio = req->ib_bio; | ||
430 | /* | ||
431 | * We only release the bio(s) here if iblock_bio_done() has not called | ||
432 | * bio_put() -> iblock_bio_destructor(). | ||
433 | */ | ||
434 | while (hbio != NULL) { | ||
435 | bio = hbio; | ||
436 | hbio = hbio->bi_next; | ||
437 | bio->bi_next = NULL; | ||
438 | bio_put(bio); | ||
439 | } | ||
440 | |||
441 | kfree(req); | ||
442 | } | 366 | } |
443 | 367 | ||
444 | enum { | 368 | enum { |
@@ -552,25 +476,21 @@ static ssize_t iblock_show_configfs_dev_params( | |||
552 | static void iblock_bio_destructor(struct bio *bio) | 476 | static void iblock_bio_destructor(struct bio *bio) |
553 | { | 477 | { |
554 | struct se_task *task = bio->bi_private; | 478 | struct se_task *task = bio->bi_private; |
555 | struct iblock_dev *ib_dev = task->se_dev->dev_ptr; | 479 | struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr; |
556 | 480 | ||
557 | bio_free(bio, ib_dev->ibd_bio_set); | 481 | bio_free(bio, ib_dev->ibd_bio_set); |
558 | } | 482 | } |
559 | 483 | ||
560 | static struct bio *iblock_get_bio( | 484 | static struct bio * |
561 | struct se_task *task, | 485 | iblock_get_bio(struct se_task *task, sector_t lba, u32 sg_num) |
562 | struct iblock_req *ib_req, | ||
563 | struct iblock_dev *ib_dev, | ||
564 | int *ret, | ||
565 | sector_t lba, | ||
566 | u32 sg_num) | ||
567 | { | 486 | { |
487 | struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr; | ||
488 | struct iblock_req *ib_req = IBLOCK_REQ(task); | ||
568 | struct bio *bio; | 489 | struct bio *bio; |
569 | 490 | ||
570 | bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set); | 491 | bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set); |
571 | if (!bio) { | 492 | if (!bio) { |
572 | pr_err("Unable to allocate memory for bio\n"); | 493 | pr_err("Unable to allocate memory for bio\n"); |
573 | *ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; | ||
574 | return NULL; | 494 | return NULL; |
575 | } | 495 | } |
576 | 496 | ||
@@ -591,17 +511,33 @@ static struct bio *iblock_get_bio( | |||
591 | return bio; | 511 | return bio; |
592 | } | 512 | } |
593 | 513 | ||
594 | static int iblock_map_data_SG(struct se_task *task) | 514 | static int iblock_do_task(struct se_task *task) |
595 | { | 515 | { |
596 | struct se_cmd *cmd = task->task_se_cmd; | 516 | struct se_cmd *cmd = task->task_se_cmd; |
597 | struct se_device *dev = cmd->se_dev; | 517 | struct se_device *dev = cmd->se_dev; |
598 | struct iblock_dev *ib_dev = task->se_dev->dev_ptr; | 518 | struct bio *bio; |
599 | struct iblock_req *ib_req = IBLOCK_REQ(task); | 519 | struct bio_list list; |
600 | struct bio *bio = NULL, *hbio = NULL, *tbio = NULL; | ||
601 | struct scatterlist *sg; | 520 | struct scatterlist *sg; |
602 | int ret = 0; | ||
603 | u32 i, sg_num = task->task_sg_nents; | 521 | u32 i, sg_num = task->task_sg_nents; |
604 | sector_t block_lba; | 522 | sector_t block_lba; |
523 | struct blk_plug plug; | ||
524 | int rw; | ||
525 | |||
526 | if (task->task_data_direction == DMA_TO_DEVICE) { | ||
527 | /* | ||
528 | * Force data to disk if we pretend to not have a volatile | ||
529 | * write cache, or the initiator set the Force Unit Access bit. | ||
530 | */ | ||
531 | if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 || | ||
532 | (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && | ||
533 | task->task_se_cmd->t_tasks_fua)) | ||
534 | rw = WRITE_FUA; | ||
535 | else | ||
536 | rw = WRITE; | ||
537 | } else { | ||
538 | rw = READ; | ||
539 | } | ||
540 | |||
605 | /* | 541 | /* |
606 | * Do starting conversion up from non 512-byte blocksize with | 542 | * Do starting conversion up from non 512-byte blocksize with |
607 | * struct se_task SCSI blocksize into Linux/Block 512 units for BIO. | 543 | * struct se_task SCSI blocksize into Linux/Block 512 units for BIO. |
@@ -620,68 +556,43 @@ static int iblock_map_data_SG(struct se_task *task) | |||
620 | return PYX_TRANSPORT_LU_COMM_FAILURE; | 556 | return PYX_TRANSPORT_LU_COMM_FAILURE; |
621 | } | 557 | } |
622 | 558 | ||
623 | bio = iblock_get_bio(task, ib_req, ib_dev, &ret, block_lba, sg_num); | 559 | bio = iblock_get_bio(task, block_lba, sg_num); |
624 | if (!bio) | 560 | if (!bio) |
625 | return ret; | 561 | return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; |
562 | |||
563 | bio_list_init(&list); | ||
564 | bio_list_add(&list, bio); | ||
626 | 565 | ||
627 | ib_req->ib_bio = bio; | ||
628 | hbio = tbio = bio; | ||
629 | /* | ||
630 | * Use fs/bio.c:bio_add_pages() to setup the bio_vec maplist | ||
631 | * from task->task_sg -> struct scatterlist memory. | ||
632 | */ | ||
633 | for_each_sg(task->task_sg, sg, task->task_sg_nents, i) { | 566 | for_each_sg(task->task_sg, sg, task->task_sg_nents, i) { |
634 | pr_debug("task: %p bio: %p Calling bio_add_page(): page:" | 567 | /* |
635 | " %p len: %u offset: %u\n", task, bio, sg_page(sg), | 568 | * XXX: if the length the device accepts is shorter than the |
636 | sg->length, sg->offset); | 569 | * length of the S/G list entry this will cause and |
637 | again: | 570 | * endless loop. Better hope no driver uses huge pages. |
638 | ret = bio_add_page(bio, sg_page(sg), sg->length, sg->offset); | 571 | */ |
639 | if (ret != sg->length) { | 572 | while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) |
640 | 573 | != sg->length) { | |
641 | pr_debug("*** Set bio->bi_sector: %llu\n", | 574 | bio = iblock_get_bio(task, block_lba, sg_num); |
642 | (unsigned long long)bio->bi_sector); | ||
643 | pr_debug("** task->task_size: %u\n", | ||
644 | task->task_size); | ||
645 | pr_debug("*** bio->bi_max_vecs: %u\n", | ||
646 | bio->bi_max_vecs); | ||
647 | pr_debug("*** bio->bi_vcnt: %u\n", | ||
648 | bio->bi_vcnt); | ||
649 | |||
650 | bio = iblock_get_bio(task, ib_req, ib_dev, &ret, | ||
651 | block_lba, sg_num); | ||
652 | if (!bio) | 575 | if (!bio) |
653 | goto fail; | 576 | goto fail; |
654 | 577 | bio_list_add(&list, bio); | |
655 | tbio = tbio->bi_next = bio; | ||
656 | pr_debug("-----------------> Added +1 bio: %p to" | ||
657 | " list, Going to again\n", bio); | ||
658 | goto again; | ||
659 | } | 578 | } |
579 | |||
660 | /* Always in 512 byte units for Linux/Block */ | 580 | /* Always in 512 byte units for Linux/Block */ |
661 | block_lba += sg->length >> IBLOCK_LBA_SHIFT; | 581 | block_lba += sg->length >> IBLOCK_LBA_SHIFT; |
662 | sg_num--; | 582 | sg_num--; |
663 | pr_debug("task: %p bio-add_page() passed!, decremented" | ||
664 | " sg_num to %u\n", task, sg_num); | ||
665 | pr_debug("task: %p bio_add_page() passed!, increased lba" | ||
666 | " to %llu\n", task, (unsigned long long)block_lba); | ||
667 | pr_debug("task: %p bio_add_page() passed!, bio->bi_vcnt:" | ||
668 | " %u\n", task, bio->bi_vcnt); | ||
669 | } | 583 | } |
670 | 584 | ||
671 | return 0; | 585 | blk_start_plug(&plug); |
586 | while ((bio = bio_list_pop(&list))) | ||
587 | submit_bio(rw, bio); | ||
588 | blk_finish_plug(&plug); | ||
589 | |||
590 | return PYX_TRANSPORT_SENT_TO_TRANSPORT; | ||
591 | |||
672 | fail: | 592 | fail: |
673 | while (hbio) { | 593 | while ((bio = bio_list_pop(&list))) |
674 | bio = hbio; | ||
675 | hbio = hbio->bi_next; | ||
676 | bio->bi_next = NULL; | ||
677 | bio_put(bio); | 594 | bio_put(bio); |
678 | } | 595 | return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; |
679 | return ret; | ||
680 | } | ||
681 | |||
682 | static unsigned char *iblock_get_cdb(struct se_task *task) | ||
683 | { | ||
684 | return IBLOCK_REQ(task)->ib_scsi_cdb; | ||
685 | } | 596 | } |
686 | 597 | ||
687 | static u32 iblock_get_device_rev(struct se_device *dev) | 598 | static u32 iblock_get_device_rev(struct se_device *dev) |
@@ -707,6 +618,7 @@ static void iblock_bio_done(struct bio *bio, int err) | |||
707 | { | 618 | { |
708 | struct se_task *task = bio->bi_private; | 619 | struct se_task *task = bio->bi_private; |
709 | struct iblock_req *ibr = IBLOCK_REQ(task); | 620 | struct iblock_req *ibr = IBLOCK_REQ(task); |
621 | |||
710 | /* | 622 | /* |
711 | * Set -EIO if !BIO_UPTODATE and the passed is still err=0 | 623 | * Set -EIO if !BIO_UPTODATE and the passed is still err=0 |
712 | */ | 624 | */ |
@@ -721,50 +633,31 @@ static void iblock_bio_done(struct bio *bio, int err) | |||
721 | */ | 633 | */ |
722 | atomic_inc(&ibr->ib_bio_err_cnt); | 634 | atomic_inc(&ibr->ib_bio_err_cnt); |
723 | smp_mb__after_atomic_inc(); | 635 | smp_mb__after_atomic_inc(); |
724 | bio_put(bio); | ||
725 | /* | ||
726 | * Wait to complete the task until the last bio as completed. | ||
727 | */ | ||
728 | if (!atomic_dec_and_test(&ibr->ib_bio_cnt)) | ||
729 | return; | ||
730 | |||
731 | ibr->ib_bio = NULL; | ||
732 | transport_complete_task(task, 0); | ||
733 | return; | ||
734 | } | 636 | } |
735 | pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n", | 637 | |
736 | task, bio, task->task_lba, (unsigned long long)bio->bi_sector, err); | ||
737 | /* | ||
738 | * bio_put() will call iblock_bio_destructor() to release the bio back | ||
739 | * to ibr->ib_bio_set. | ||
740 | */ | ||
741 | bio_put(bio); | 638 | bio_put(bio); |
742 | /* | 639 | |
743 | * Wait to complete the task until the last bio as completed. | ||
744 | */ | ||
745 | if (!atomic_dec_and_test(&ibr->ib_bio_cnt)) | 640 | if (!atomic_dec_and_test(&ibr->ib_bio_cnt)) |
746 | return; | 641 | return; |
747 | /* | 642 | |
748 | * Return GOOD status for task if zero ib_bio_err_cnt exists. | 643 | pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n", |
749 | */ | 644 | task, bio, task->task_lba, |
750 | ibr->ib_bio = NULL; | 645 | (unsigned long long)bio->bi_sector, err); |
751 | transport_complete_task(task, (!atomic_read(&ibr->ib_bio_err_cnt))); | 646 | |
647 | transport_complete_task(task, !atomic_read(&ibr->ib_bio_err_cnt)); | ||
752 | } | 648 | } |
753 | 649 | ||
754 | static struct se_subsystem_api iblock_template = { | 650 | static struct se_subsystem_api iblock_template = { |
755 | .name = "iblock", | 651 | .name = "iblock", |
756 | .owner = THIS_MODULE, | 652 | .owner = THIS_MODULE, |
757 | .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, | 653 | .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, |
758 | .map_data_SG = iblock_map_data_SG, | 654 | .write_cache_emulated = 1, |
655 | .fua_write_emulated = 1, | ||
759 | .attach_hba = iblock_attach_hba, | 656 | .attach_hba = iblock_attach_hba, |
760 | .detach_hba = iblock_detach_hba, | 657 | .detach_hba = iblock_detach_hba, |
761 | .allocate_virtdevice = iblock_allocate_virtdevice, | 658 | .allocate_virtdevice = iblock_allocate_virtdevice, |
762 | .create_virtdevice = iblock_create_virtdevice, | 659 | .create_virtdevice = iblock_create_virtdevice, |
763 | .free_device = iblock_free_device, | 660 | .free_device = iblock_free_device, |
764 | .dpo_emulated = iblock_emulated_dpo, | ||
765 | .fua_write_emulated = iblock_emulated_fua_write, | ||
766 | .fua_read_emulated = iblock_emulated_fua_read, | ||
767 | .write_cache_emulated = iblock_emulated_write_cache, | ||
768 | .alloc_task = iblock_alloc_task, | 661 | .alloc_task = iblock_alloc_task, |
769 | .do_task = iblock_do_task, | 662 | .do_task = iblock_do_task, |
770 | .do_discard = iblock_do_discard, | 663 | .do_discard = iblock_do_discard, |
@@ -773,7 +666,6 @@ static struct se_subsystem_api iblock_template = { | |||
773 | .check_configfs_dev_params = iblock_check_configfs_dev_params, | 666 | .check_configfs_dev_params = iblock_check_configfs_dev_params, |
774 | .set_configfs_dev_params = iblock_set_configfs_dev_params, | 667 | .set_configfs_dev_params = iblock_set_configfs_dev_params, |
775 | .show_configfs_dev_params = iblock_show_configfs_dev_params, | 668 | .show_configfs_dev_params = iblock_show_configfs_dev_params, |
776 | .get_cdb = iblock_get_cdb, | ||
777 | .get_device_rev = iblock_get_device_rev, | 669 | .get_device_rev = iblock_get_device_rev, |
778 | .get_device_type = iblock_get_device_type, | 670 | .get_device_type = iblock_get_device_type, |
779 | .get_blocks = iblock_get_blocks, | 671 | .get_blocks = iblock_get_blocks, |
diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h index a121cd1b6575..5cf1860c10d0 100644 --- a/drivers/target/target_core_iblock.h +++ b/drivers/target/target_core_iblock.h | |||
@@ -8,10 +8,8 @@ | |||
8 | 8 | ||
9 | struct iblock_req { | 9 | struct iblock_req { |
10 | struct se_task ib_task; | 10 | struct se_task ib_task; |
11 | unsigned char ib_scsi_cdb[TCM_MAX_COMMAND_SIZE]; | ||
12 | atomic_t ib_bio_cnt; | 11 | atomic_t ib_bio_cnt; |
13 | atomic_t ib_bio_err_cnt; | 12 | atomic_t ib_bio_err_cnt; |
14 | struct bio *ib_bio; | ||
15 | } ____cacheline_aligned; | 13 | } ____cacheline_aligned; |
16 | 14 | ||
17 | #define IBDF_HAS_UDEV_PATH 0x01 | 15 | #define IBDF_HAS_UDEV_PATH 0x01 |
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 7fd3a161f7cc..0c4f783f924c 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c | |||
@@ -25,7 +25,6 @@ | |||
25 | * | 25 | * |
26 | ******************************************************************************/ | 26 | ******************************************************************************/ |
27 | 27 | ||
28 | #include <linux/version.h> | ||
29 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
30 | #include <linux/spinlock.h> | 29 | #include <linux/spinlock.h> |
31 | #include <linux/list.h> | 30 | #include <linux/list.h> |
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 2b7b0da9146d..dad671dee9e9 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c | |||
@@ -26,7 +26,6 @@ | |||
26 | * | 26 | * |
27 | ******************************************************************************/ | 27 | ******************************************************************************/ |
28 | 28 | ||
29 | #include <linux/version.h> | ||
30 | #include <linux/string.h> | 29 | #include <linux/string.h> |
31 | #include <linux/parser.h> | 30 | #include <linux/parser.h> |
32 | #include <linux/timer.h> | 31 | #include <linux/timer.h> |
@@ -567,7 +566,7 @@ static struct se_device *pscsi_create_virtdevice( | |||
567 | if (IS_ERR(sh)) { | 566 | if (IS_ERR(sh)) { |
568 | pr_err("pSCSI: Unable to locate" | 567 | pr_err("pSCSI: Unable to locate" |
569 | " pdv_host_id: %d\n", pdv->pdv_host_id); | 568 | " pdv_host_id: %d\n", pdv->pdv_host_id); |
570 | return (struct se_device *) sh; | 569 | return ERR_CAST(sh); |
571 | } | 570 | } |
572 | } | 571 | } |
573 | } else { | 572 | } else { |
@@ -677,7 +676,7 @@ static inline struct pscsi_plugin_task *PSCSI_TASK(struct se_task *task) | |||
677 | */ | 676 | */ |
678 | static int pscsi_transport_complete(struct se_task *task) | 677 | static int pscsi_transport_complete(struct se_task *task) |
679 | { | 678 | { |
680 | struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; | 679 | struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr; |
681 | struct scsi_device *sd = pdv->pdv_sd; | 680 | struct scsi_device *sd = pdv->pdv_sd; |
682 | int result; | 681 | int result; |
683 | struct pscsi_plugin_task *pt = PSCSI_TASK(task); | 682 | struct pscsi_plugin_task *pt = PSCSI_TASK(task); |
@@ -777,95 +776,6 @@ pscsi_alloc_task(unsigned char *cdb) | |||
777 | return &pt->pscsi_task; | 776 | return &pt->pscsi_task; |
778 | } | 777 | } |
779 | 778 | ||
780 | static inline void pscsi_blk_init_request( | ||
781 | struct se_task *task, | ||
782 | struct pscsi_plugin_task *pt, | ||
783 | struct request *req, | ||
784 | int bidi_read) | ||
785 | { | ||
786 | /* | ||
787 | * Defined as "scsi command" in include/linux/blkdev.h. | ||
788 | */ | ||
789 | req->cmd_type = REQ_TYPE_BLOCK_PC; | ||
790 | /* | ||
791 | * For the extra BIDI-COMMAND READ struct request we do not | ||
792 | * need to setup the remaining structure members | ||
793 | */ | ||
794 | if (bidi_read) | ||
795 | return; | ||
796 | /* | ||
797 | * Setup the done function pointer for struct request, | ||
798 | * also set the end_io_data pointer.to struct se_task. | ||
799 | */ | ||
800 | req->end_io = pscsi_req_done; | ||
801 | req->end_io_data = task; | ||
802 | /* | ||
803 | * Load the referenced struct se_task's SCSI CDB into | ||
804 | * include/linux/blkdev.h:struct request->cmd | ||
805 | */ | ||
806 | req->cmd_len = scsi_command_size(pt->pscsi_cdb); | ||
807 | req->cmd = &pt->pscsi_cdb[0]; | ||
808 | /* | ||
809 | * Setup pointer for outgoing sense data. | ||
810 | */ | ||
811 | req->sense = &pt->pscsi_sense[0]; | ||
812 | req->sense_len = 0; | ||
813 | } | ||
814 | |||
815 | /* | ||
816 | * Used for pSCSI data payloads for all *NON* SCF_SCSI_DATA_SG_IO_CDB | ||
817 | */ | ||
818 | static int pscsi_blk_get_request(struct se_task *task) | ||
819 | { | ||
820 | struct pscsi_plugin_task *pt = PSCSI_TASK(task); | ||
821 | struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; | ||
822 | |||
823 | pt->pscsi_req = blk_get_request(pdv->pdv_sd->request_queue, | ||
824 | (task->task_data_direction == DMA_TO_DEVICE), | ||
825 | GFP_KERNEL); | ||
826 | if (!pt->pscsi_req || IS_ERR(pt->pscsi_req)) { | ||
827 | pr_err("PSCSI: blk_get_request() failed: %ld\n", | ||
828 | IS_ERR(pt->pscsi_req)); | ||
829 | return PYX_TRANSPORT_LU_COMM_FAILURE; | ||
830 | } | ||
831 | /* | ||
832 | * Setup the newly allocated struct request for REQ_TYPE_BLOCK_PC, | ||
833 | * and setup rq callback, CDB and sense. | ||
834 | */ | ||
835 | pscsi_blk_init_request(task, pt, pt->pscsi_req, 0); | ||
836 | return 0; | ||
837 | } | ||
838 | |||
839 | /* pscsi_do_task(): (Part of se_subsystem_api_t template) | ||
840 | * | ||
841 | * | ||
842 | */ | ||
843 | static int pscsi_do_task(struct se_task *task) | ||
844 | { | ||
845 | struct pscsi_plugin_task *pt = PSCSI_TASK(task); | ||
846 | struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; | ||
847 | /* | ||
848 | * Set the struct request->timeout value based on peripheral | ||
849 | * device type from SCSI. | ||
850 | */ | ||
851 | if (pdv->pdv_sd->type == TYPE_DISK) | ||
852 | pt->pscsi_req->timeout = PS_TIMEOUT_DISK; | ||
853 | else | ||
854 | pt->pscsi_req->timeout = PS_TIMEOUT_OTHER; | ||
855 | |||
856 | pt->pscsi_req->retries = PS_RETRY; | ||
857 | /* | ||
858 | * Queue the struct request into the struct scsi_device->request_queue. | ||
859 | * Also check for HEAD_OF_QUEUE SAM TASK attr from received se_cmd | ||
860 | * descriptor | ||
861 | */ | ||
862 | blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, pt->pscsi_req, | ||
863 | (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG), | ||
864 | pscsi_req_done); | ||
865 | |||
866 | return PYX_TRANSPORT_SENT_TO_TRANSPORT; | ||
867 | } | ||
868 | |||
869 | static void pscsi_free_task(struct se_task *task) | 779 | static void pscsi_free_task(struct se_task *task) |
870 | { | 780 | { |
871 | struct pscsi_plugin_task *pt = PSCSI_TASK(task); | 781 | struct pscsi_plugin_task *pt = PSCSI_TASK(task); |
@@ -1049,15 +959,12 @@ static inline struct bio *pscsi_get_bio(int sg_num) | |||
1049 | return bio; | 959 | return bio; |
1050 | } | 960 | } |
1051 | 961 | ||
1052 | static int __pscsi_map_SG( | 962 | static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg, |
1053 | struct se_task *task, | 963 | struct bio **hbio) |
1054 | struct scatterlist *task_sg, | ||
1055 | u32 task_sg_num, | ||
1056 | int bidi_read) | ||
1057 | { | 964 | { |
1058 | struct pscsi_plugin_task *pt = PSCSI_TASK(task); | 965 | struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr; |
1059 | struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; | 966 | u32 task_sg_num = task->task_sg_nents; |
1060 | struct bio *bio = NULL, *hbio = NULL, *tbio = NULL; | 967 | struct bio *bio = NULL, *tbio = NULL; |
1061 | struct page *page; | 968 | struct page *page; |
1062 | struct scatterlist *sg; | 969 | struct scatterlist *sg; |
1063 | u32 data_len = task->task_size, i, len, bytes, off; | 970 | u32 data_len = task->task_size, i, len, bytes, off; |
@@ -1066,19 +973,8 @@ static int __pscsi_map_SG( | |||
1066 | int nr_vecs = 0, rc, ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; | 973 | int nr_vecs = 0, rc, ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; |
1067 | int rw = (task->task_data_direction == DMA_TO_DEVICE); | 974 | int rw = (task->task_data_direction == DMA_TO_DEVICE); |
1068 | 975 | ||
1069 | if (!task->task_size) | 976 | *hbio = NULL; |
1070 | return 0; | 977 | |
1071 | /* | ||
1072 | * For SCF_SCSI_DATA_SG_IO_CDB, Use fs/bio.c:bio_add_page() to setup | ||
1073 | * the bio_vec maplist from task->task_sg -> | ||
1074 | * struct scatterlist memory. The struct se_task->task_sg[] currently needs | ||
1075 | * to be attached to struct bios for submission to Linux/SCSI using | ||
1076 | * struct request to struct scsi_device->request_queue. | ||
1077 | * | ||
1078 | * Note that this will be changing post v2.6.28 as Target_Core_Mod/pSCSI | ||
1079 | * is ported to upstream SCSI passthrough functionality that accepts | ||
1080 | * struct scatterlist->page_link or struct page as a paraemeter. | ||
1081 | */ | ||
1082 | pr_debug("PSCSI: nr_pages: %d\n", nr_pages); | 978 | pr_debug("PSCSI: nr_pages: %d\n", nr_pages); |
1083 | 979 | ||
1084 | for_each_sg(task_sg, sg, task_sg_num, i) { | 980 | for_each_sg(task_sg, sg, task_sg_num, i) { |
@@ -1115,8 +1011,8 @@ static int __pscsi_map_SG( | |||
1115 | * bios need to be added to complete a given | 1011 | * bios need to be added to complete a given |
1116 | * struct se_task | 1012 | * struct se_task |
1117 | */ | 1013 | */ |
1118 | if (!hbio) | 1014 | if (!*hbio) |
1119 | hbio = tbio = bio; | 1015 | *hbio = tbio = bio; |
1120 | else | 1016 | else |
1121 | tbio = tbio->bi_next = bio; | 1017 | tbio = tbio->bi_next = bio; |
1122 | } | 1018 | } |
@@ -1152,92 +1048,82 @@ static int __pscsi_map_SG( | |||
1152 | off = 0; | 1048 | off = 0; |
1153 | } | 1049 | } |
1154 | } | 1050 | } |
1155 | /* | ||
1156 | * Setup the primary pt->pscsi_req used for non BIDI and BIDI-COMMAND | ||
1157 | * primary SCSI WRITE poayload mapped for struct se_task->task_sg[] | ||
1158 | */ | ||
1159 | if (!bidi_read) { | ||
1160 | /* | ||
1161 | * Starting with v2.6.31, call blk_make_request() passing in *hbio to | ||
1162 | * allocate the pSCSI task a struct request. | ||
1163 | */ | ||
1164 | pt->pscsi_req = blk_make_request(pdv->pdv_sd->request_queue, | ||
1165 | hbio, GFP_KERNEL); | ||
1166 | if (!pt->pscsi_req) { | ||
1167 | pr_err("pSCSI: blk_make_request() failed\n"); | ||
1168 | goto fail; | ||
1169 | } | ||
1170 | /* | ||
1171 | * Setup the newly allocated struct request for REQ_TYPE_BLOCK_PC, | ||
1172 | * and setup rq callback, CDB and sense. | ||
1173 | */ | ||
1174 | pscsi_blk_init_request(task, pt, pt->pscsi_req, 0); | ||
1175 | |||
1176 | return task->task_sg_nents; | ||
1177 | } | ||
1178 | /* | ||
1179 | * Setup the secondary pt->pscsi_req->next_rq used for the extra BIDI-COMMAND | ||
1180 | * SCSI READ paylaod mapped for struct se_task->task_sg_bidi[] | ||
1181 | */ | ||
1182 | pt->pscsi_req->next_rq = blk_make_request(pdv->pdv_sd->request_queue, | ||
1183 | hbio, GFP_KERNEL); | ||
1184 | if (!pt->pscsi_req->next_rq) { | ||
1185 | pr_err("pSCSI: blk_make_request() failed for BIDI\n"); | ||
1186 | goto fail; | ||
1187 | } | ||
1188 | pscsi_blk_init_request(task, pt, pt->pscsi_req->next_rq, 1); | ||
1189 | 1051 | ||
1190 | return task->task_sg_nents; | 1052 | return task->task_sg_nents; |
1191 | fail: | 1053 | fail: |
1192 | while (hbio) { | 1054 | while (*hbio) { |
1193 | bio = hbio; | 1055 | bio = *hbio; |
1194 | hbio = hbio->bi_next; | 1056 | *hbio = (*hbio)->bi_next; |
1195 | bio->bi_next = NULL; | 1057 | bio->bi_next = NULL; |
1196 | bio_endio(bio, 0); | 1058 | bio_endio(bio, 0); /* XXX: should be error */ |
1197 | } | 1059 | } |
1198 | return ret; | 1060 | return ret; |
1199 | } | 1061 | } |
1200 | 1062 | ||
1201 | /* | 1063 | static int pscsi_do_task(struct se_task *task) |
1202 | * pSCSI maps both ->map_control_SG() and ->map_data_SG() to a single call. | ||
1203 | */ | ||
1204 | static int pscsi_map_SG(struct se_task *task) | ||
1205 | { | 1064 | { |
1065 | struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr; | ||
1066 | struct pscsi_plugin_task *pt = PSCSI_TASK(task); | ||
1067 | struct request *req; | ||
1068 | struct bio *hbio; | ||
1206 | int ret; | 1069 | int ret; |
1207 | 1070 | ||
1208 | /* | 1071 | target_get_task_cdb(task, pt->pscsi_cdb); |
1209 | * Setup the main struct request for the task->task_sg[] payload | 1072 | |
1210 | */ | 1073 | if (task->task_se_cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) { |
1074 | req = blk_get_request(pdv->pdv_sd->request_queue, | ||
1075 | (task->task_data_direction == DMA_TO_DEVICE), | ||
1076 | GFP_KERNEL); | ||
1077 | if (!req || IS_ERR(req)) { | ||
1078 | pr_err("PSCSI: blk_get_request() failed: %ld\n", | ||
1079 | req ? IS_ERR(req) : -ENOMEM); | ||
1080 | return PYX_TRANSPORT_LU_COMM_FAILURE; | ||
1081 | } | ||
1082 | } else { | ||
1083 | BUG_ON(!task->task_size); | ||
1211 | 1084 | ||
1212 | ret = __pscsi_map_SG(task, task->task_sg, task->task_sg_nents, 0); | ||
1213 | if (ret >= 0 && task->task_sg_bidi) { | ||
1214 | /* | 1085 | /* |
1215 | * If present, set up the extra BIDI-COMMAND SCSI READ | 1086 | * Setup the main struct request for the task->task_sg[] payload |
1216 | * struct request and payload. | ||
1217 | */ | 1087 | */ |
1218 | ret = __pscsi_map_SG(task, task->task_sg_bidi, | 1088 | ret = pscsi_map_sg(task, task->task_sg, &hbio); |
1219 | task->task_sg_nents, 1); | 1089 | if (ret < 0) |
1090 | return PYX_TRANSPORT_LU_COMM_FAILURE; | ||
1091 | |||
1092 | req = blk_make_request(pdv->pdv_sd->request_queue, hbio, | ||
1093 | GFP_KERNEL); | ||
1094 | if (!req) { | ||
1095 | pr_err("pSCSI: blk_make_request() failed\n"); | ||
1096 | goto fail; | ||
1097 | } | ||
1220 | } | 1098 | } |
1221 | 1099 | ||
1222 | if (ret < 0) | 1100 | req->cmd_type = REQ_TYPE_BLOCK_PC; |
1223 | return PYX_TRANSPORT_LU_COMM_FAILURE; | 1101 | req->end_io = pscsi_req_done; |
1224 | return 0; | 1102 | req->end_io_data = task; |
1225 | } | 1103 | req->cmd_len = scsi_command_size(pt->pscsi_cdb); |
1104 | req->cmd = &pt->pscsi_cdb[0]; | ||
1105 | req->sense = &pt->pscsi_sense[0]; | ||
1106 | req->sense_len = 0; | ||
1107 | if (pdv->pdv_sd->type == TYPE_DISK) | ||
1108 | req->timeout = PS_TIMEOUT_DISK; | ||
1109 | else | ||
1110 | req->timeout = PS_TIMEOUT_OTHER; | ||
1111 | req->retries = PS_RETRY; | ||
1226 | 1112 | ||
1227 | static int pscsi_CDB_none(struct se_task *task) | 1113 | blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, req, |
1228 | { | 1114 | (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG), |
1229 | return pscsi_blk_get_request(task); | 1115 | pscsi_req_done); |
1230 | } | ||
1231 | 1116 | ||
1232 | /* pscsi_get_cdb(): | 1117 | return PYX_TRANSPORT_SENT_TO_TRANSPORT; |
1233 | * | ||
1234 | * | ||
1235 | */ | ||
1236 | static unsigned char *pscsi_get_cdb(struct se_task *task) | ||
1237 | { | ||
1238 | struct pscsi_plugin_task *pt = PSCSI_TASK(task); | ||
1239 | 1118 | ||
1240 | return pt->pscsi_cdb; | 1119 | fail: |
1120 | while (hbio) { | ||
1121 | struct bio *bio = hbio; | ||
1122 | hbio = hbio->bi_next; | ||
1123 | bio->bi_next = NULL; | ||
1124 | bio_endio(bio, 0); /* XXX: should be error */ | ||
1125 | } | ||
1126 | return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; | ||
1241 | } | 1127 | } |
1242 | 1128 | ||
1243 | /* pscsi_get_sense_buffer(): | 1129 | /* pscsi_get_sense_buffer(): |
@@ -1328,23 +1214,13 @@ static void pscsi_req_done(struct request *req, int uptodate) | |||
1328 | pt->pscsi_resid = req->resid_len; | 1214 | pt->pscsi_resid = req->resid_len; |
1329 | 1215 | ||
1330 | pscsi_process_SAM_status(task, pt); | 1216 | pscsi_process_SAM_status(task, pt); |
1331 | /* | ||
1332 | * Release BIDI-READ if present | ||
1333 | */ | ||
1334 | if (req->next_rq != NULL) | ||
1335 | __blk_put_request(req->q, req->next_rq); | ||
1336 | |||
1337 | __blk_put_request(req->q, req); | 1217 | __blk_put_request(req->q, req); |
1338 | pt->pscsi_req = NULL; | ||
1339 | } | 1218 | } |
1340 | 1219 | ||
1341 | static struct se_subsystem_api pscsi_template = { | 1220 | static struct se_subsystem_api pscsi_template = { |
1342 | .name = "pscsi", | 1221 | .name = "pscsi", |
1343 | .owner = THIS_MODULE, | 1222 | .owner = THIS_MODULE, |
1344 | .transport_type = TRANSPORT_PLUGIN_PHBA_PDEV, | 1223 | .transport_type = TRANSPORT_PLUGIN_PHBA_PDEV, |
1345 | .cdb_none = pscsi_CDB_none, | ||
1346 | .map_control_SG = pscsi_map_SG, | ||
1347 | .map_data_SG = pscsi_map_SG, | ||
1348 | .attach_hba = pscsi_attach_hba, | 1224 | .attach_hba = pscsi_attach_hba, |
1349 | .detach_hba = pscsi_detach_hba, | 1225 | .detach_hba = pscsi_detach_hba, |
1350 | .pmode_enable_hba = pscsi_pmode_enable_hba, | 1226 | .pmode_enable_hba = pscsi_pmode_enable_hba, |
@@ -1358,7 +1234,6 @@ static struct se_subsystem_api pscsi_template = { | |||
1358 | .check_configfs_dev_params = pscsi_check_configfs_dev_params, | 1234 | .check_configfs_dev_params = pscsi_check_configfs_dev_params, |
1359 | .set_configfs_dev_params = pscsi_set_configfs_dev_params, | 1235 | .set_configfs_dev_params = pscsi_set_configfs_dev_params, |
1360 | .show_configfs_dev_params = pscsi_show_configfs_dev_params, | 1236 | .show_configfs_dev_params = pscsi_show_configfs_dev_params, |
1361 | .get_cdb = pscsi_get_cdb, | ||
1362 | .get_sense_buffer = pscsi_get_sense_buffer, | 1237 | .get_sense_buffer = pscsi_get_sense_buffer, |
1363 | .get_device_rev = pscsi_get_device_rev, | 1238 | .get_device_rev = pscsi_get_device_rev, |
1364 | .get_device_type = pscsi_get_device_type, | 1239 | .get_device_type = pscsi_get_device_type, |
diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h index ebf4f1ae2c83..fdc17b6aefb3 100644 --- a/drivers/target/target_core_pscsi.h +++ b/drivers/target/target_core_pscsi.h | |||
@@ -27,7 +27,6 @@ struct pscsi_plugin_task { | |||
27 | int pscsi_direction; | 27 | int pscsi_direction; |
28 | int pscsi_result; | 28 | int pscsi_result; |
29 | u32 pscsi_resid; | 29 | u32 pscsi_resid; |
30 | struct request *pscsi_req; | ||
31 | unsigned char pscsi_cdb[0]; | 30 | unsigned char pscsi_cdb[0]; |
32 | } ____cacheline_aligned; | 31 | } ____cacheline_aligned; |
33 | 32 | ||
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index e567e129c697..5158d3846f19 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c | |||
@@ -27,7 +27,6 @@ | |||
27 | * | 27 | * |
28 | ******************************************************************************/ | 28 | ******************************************************************************/ |
29 | 29 | ||
30 | #include <linux/version.h> | ||
31 | #include <linux/string.h> | 30 | #include <linux/string.h> |
32 | #include <linux/parser.h> | 31 | #include <linux/parser.h> |
33 | #include <linux/timer.h> | 32 | #include <linux/timer.h> |
@@ -351,7 +350,7 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page) | |||
351 | static int rd_MEMCPY_read(struct rd_request *req) | 350 | static int rd_MEMCPY_read(struct rd_request *req) |
352 | { | 351 | { |
353 | struct se_task *task = &req->rd_task; | 352 | struct se_task *task = &req->rd_task; |
354 | struct rd_dev *dev = req->rd_task.se_dev->dev_ptr; | 353 | struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr; |
355 | struct rd_dev_sg_table *table; | 354 | struct rd_dev_sg_table *table; |
356 | struct scatterlist *sg_d, *sg_s; | 355 | struct scatterlist *sg_d, *sg_s; |
357 | void *dst, *src; | 356 | void *dst, *src; |
@@ -467,7 +466,7 @@ static int rd_MEMCPY_read(struct rd_request *req) | |||
467 | static int rd_MEMCPY_write(struct rd_request *req) | 466 | static int rd_MEMCPY_write(struct rd_request *req) |
468 | { | 467 | { |
469 | struct se_task *task = &req->rd_task; | 468 | struct se_task *task = &req->rd_task; |
470 | struct rd_dev *dev = req->rd_task.se_dev->dev_ptr; | 469 | struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr; |
471 | struct rd_dev_sg_table *table; | 470 | struct rd_dev_sg_table *table; |
472 | struct scatterlist *sg_d, *sg_s; | 471 | struct scatterlist *sg_d, *sg_s; |
473 | void *dst, *src; | 472 | void *dst, *src; |
@@ -582,7 +581,7 @@ static int rd_MEMCPY_write(struct rd_request *req) | |||
582 | */ | 581 | */ |
583 | static int rd_MEMCPY_do_task(struct se_task *task) | 582 | static int rd_MEMCPY_do_task(struct se_task *task) |
584 | { | 583 | { |
585 | struct se_device *dev = task->se_dev; | 584 | struct se_device *dev = task->task_se_cmd->se_dev; |
586 | struct rd_request *req = RD_REQ(task); | 585 | struct rd_request *req = RD_REQ(task); |
587 | unsigned long long lba; | 586 | unsigned long long lba; |
588 | int ret; | 587 | int ret; |
@@ -692,17 +691,6 @@ static ssize_t rd_show_configfs_dev_params( | |||
692 | return bl; | 691 | return bl; |
693 | } | 692 | } |
694 | 693 | ||
695 | /* rd_get_cdb(): (Part of se_subsystem_api_t template) | ||
696 | * | ||
697 | * | ||
698 | */ | ||
699 | static unsigned char *rd_get_cdb(struct se_task *task) | ||
700 | { | ||
701 | struct rd_request *req = RD_REQ(task); | ||
702 | |||
703 | return req->rd_scsi_cdb; | ||
704 | } | ||
705 | |||
706 | static u32 rd_get_device_rev(struct se_device *dev) | 694 | static u32 rd_get_device_rev(struct se_device *dev) |
707 | { | 695 | { |
708 | return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */ | 696 | return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */ |
@@ -736,7 +724,6 @@ static struct se_subsystem_api rd_mcp_template = { | |||
736 | .check_configfs_dev_params = rd_check_configfs_dev_params, | 724 | .check_configfs_dev_params = rd_check_configfs_dev_params, |
737 | .set_configfs_dev_params = rd_set_configfs_dev_params, | 725 | .set_configfs_dev_params = rd_set_configfs_dev_params, |
738 | .show_configfs_dev_params = rd_show_configfs_dev_params, | 726 | .show_configfs_dev_params = rd_show_configfs_dev_params, |
739 | .get_cdb = rd_get_cdb, | ||
740 | .get_device_rev = rd_get_device_rev, | 727 | .get_device_rev = rd_get_device_rev, |
741 | .get_device_type = rd_get_device_type, | 728 | .get_device_type = rd_get_device_type, |
742 | .get_blocks = rd_get_blocks, | 729 | .get_blocks = rd_get_blocks, |
diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h index 0d027732cd00..784e56a04100 100644 --- a/drivers/target/target_core_rd.h +++ b/drivers/target/target_core_rd.h | |||
@@ -22,8 +22,6 @@ void rd_module_exit(void); | |||
22 | struct rd_request { | 22 | struct rd_request { |
23 | struct se_task rd_task; | 23 | struct se_task rd_task; |
24 | 24 | ||
25 | /* SCSI CDB from iSCSI Command PDU */ | ||
26 | unsigned char rd_scsi_cdb[TCM_MAX_COMMAND_SIZE]; | ||
27 | /* Offset from start of page */ | 25 | /* Offset from start of page */ |
28 | u32 rd_offset; | 26 | u32 rd_offset; |
29 | /* Starting page in Ramdisk for request */ | 27 | /* Starting page in Ramdisk for request */ |
diff --git a/drivers/target/target_core_scdb.c b/drivers/target/target_core_scdb.c deleted file mode 100644 index 72843441d4fa..000000000000 --- a/drivers/target/target_core_scdb.c +++ /dev/null | |||
@@ -1,105 +0,0 @@ | |||
1 | /******************************************************************************* | ||
2 | * Filename: target_core_scdb.c | ||
3 | * | ||
4 | * This file contains the generic target engine Split CDB related functions. | ||
5 | * | ||
6 | * Copyright (c) 2004-2005 PyX Technologies, Inc. | ||
7 | * Copyright (c) 2005, 2006, 2007 SBE, Inc. | ||
8 | * Copyright (c) 2007-2010 Rising Tide Systems | ||
9 | * Copyright (c) 2008-2010 Linux-iSCSI.org | ||
10 | * | ||
11 | * Nicholas A. Bellinger <nab@kernel.org> | ||
12 | * | ||
13 | * This program is free software; you can redistribute it and/or modify | ||
14 | * it under the terms of the GNU General Public License as published by | ||
15 | * the Free Software Foundation; either version 2 of the License, or | ||
16 | * (at your option) any later version. | ||
17 | * | ||
18 | * This program is distributed in the hope that it will be useful, | ||
19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
21 | * GNU General Public License for more details. | ||
22 | * | ||
23 | * You should have received a copy of the GNU General Public License | ||
24 | * along with this program; if not, write to the Free Software | ||
25 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
26 | * | ||
27 | ******************************************************************************/ | ||
28 | |||
29 | #include <linux/net.h> | ||
30 | #include <linux/string.h> | ||
31 | #include <scsi/scsi.h> | ||
32 | #include <asm/unaligned.h> | ||
33 | |||
34 | #include <target/target_core_base.h> | ||
35 | #include <target/target_core_transport.h> | ||
36 | |||
37 | #include "target_core_scdb.h" | ||
38 | |||
39 | /* split_cdb_XX_6(): | ||
40 | * | ||
41 | * 21-bit LBA w/ 8-bit SECTORS | ||
42 | */ | ||
43 | void split_cdb_XX_6( | ||
44 | unsigned long long lba, | ||
45 | u32 sectors, | ||
46 | unsigned char *cdb) | ||
47 | { | ||
48 | cdb[1] = (lba >> 16) & 0x1f; | ||
49 | cdb[2] = (lba >> 8) & 0xff; | ||
50 | cdb[3] = lba & 0xff; | ||
51 | cdb[4] = sectors & 0xff; | ||
52 | } | ||
53 | |||
54 | /* split_cdb_XX_10(): | ||
55 | * | ||
56 | * 32-bit LBA w/ 16-bit SECTORS | ||
57 | */ | ||
58 | void split_cdb_XX_10( | ||
59 | unsigned long long lba, | ||
60 | u32 sectors, | ||
61 | unsigned char *cdb) | ||
62 | { | ||
63 | put_unaligned_be32(lba, &cdb[2]); | ||
64 | put_unaligned_be16(sectors, &cdb[7]); | ||
65 | } | ||
66 | |||
67 | /* split_cdb_XX_12(): | ||
68 | * | ||
69 | * 32-bit LBA w/ 32-bit SECTORS | ||
70 | */ | ||
71 | void split_cdb_XX_12( | ||
72 | unsigned long long lba, | ||
73 | u32 sectors, | ||
74 | unsigned char *cdb) | ||
75 | { | ||
76 | put_unaligned_be32(lba, &cdb[2]); | ||
77 | put_unaligned_be32(sectors, &cdb[6]); | ||
78 | } | ||
79 | |||
80 | /* split_cdb_XX_16(): | ||
81 | * | ||
82 | * 64-bit LBA w/ 32-bit SECTORS | ||
83 | */ | ||
84 | void split_cdb_XX_16( | ||
85 | unsigned long long lba, | ||
86 | u32 sectors, | ||
87 | unsigned char *cdb) | ||
88 | { | ||
89 | put_unaligned_be64(lba, &cdb[2]); | ||
90 | put_unaligned_be32(sectors, &cdb[10]); | ||
91 | } | ||
92 | |||
93 | /* | ||
94 | * split_cdb_XX_32(): | ||
95 | * | ||
96 | * 64-bit LBA w/ 32-bit SECTORS such as READ_32, WRITE_32 and emulated XDWRITEREAD_32 | ||
97 | */ | ||
98 | void split_cdb_XX_32( | ||
99 | unsigned long long lba, | ||
100 | u32 sectors, | ||
101 | unsigned char *cdb) | ||
102 | { | ||
103 | put_unaligned_be64(lba, &cdb[12]); | ||
104 | put_unaligned_be32(sectors, &cdb[28]); | ||
105 | } | ||
diff --git a/drivers/target/target_core_scdb.h b/drivers/target/target_core_scdb.h deleted file mode 100644 index 48e9ccc9585e..000000000000 --- a/drivers/target/target_core_scdb.h +++ /dev/null | |||
@@ -1,10 +0,0 @@ | |||
1 | #ifndef TARGET_CORE_SCDB_H | ||
2 | #define TARGET_CORE_SCDB_H | ||
3 | |||
4 | extern void split_cdb_XX_6(unsigned long long, u32, unsigned char *); | ||
5 | extern void split_cdb_XX_10(unsigned long long, u32, unsigned char *); | ||
6 | extern void split_cdb_XX_12(unsigned long long, u32, unsigned char *); | ||
7 | extern void split_cdb_XX_16(unsigned long long, u32, unsigned char *); | ||
8 | extern void split_cdb_XX_32(unsigned long long, u32, unsigned char *); | ||
9 | |||
10 | #endif /* TARGET_CORE_SCDB_H */ | ||
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c index a8d6e1dee938..874152aed94a 100644 --- a/drivers/target/target_core_stat.c +++ b/drivers/target/target_core_stat.c | |||
@@ -32,7 +32,6 @@ | |||
32 | #include <linux/delay.h> | 32 | #include <linux/delay.h> |
33 | #include <linux/timer.h> | 33 | #include <linux/timer.h> |
34 | #include <linux/string.h> | 34 | #include <linux/string.h> |
35 | #include <linux/version.h> | ||
36 | #include <generated/utsrelease.h> | 35 | #include <generated/utsrelease.h> |
37 | #include <linux/utsname.h> | 36 | #include <linux/utsname.h> |
38 | #include <linux/proc_fs.h> | 37 | #include <linux/proc_fs.h> |
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index 27d4925e51c3..570b144a1edb 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c | |||
@@ -24,7 +24,6 @@ | |||
24 | * | 24 | * |
25 | ******************************************************************************/ | 25 | ******************************************************************************/ |
26 | 26 | ||
27 | #include <linux/version.h> | ||
28 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
29 | #include <linux/spinlock.h> | 28 | #include <linux/spinlock.h> |
30 | #include <linux/list.h> | 29 | #include <linux/list.h> |
@@ -44,12 +43,12 @@ | |||
44 | struct se_tmr_req *core_tmr_alloc_req( | 43 | struct se_tmr_req *core_tmr_alloc_req( |
45 | struct se_cmd *se_cmd, | 44 | struct se_cmd *se_cmd, |
46 | void *fabric_tmr_ptr, | 45 | void *fabric_tmr_ptr, |
47 | u8 function) | 46 | u8 function, |
47 | gfp_t gfp_flags) | ||
48 | { | 48 | { |
49 | struct se_tmr_req *tmr; | 49 | struct se_tmr_req *tmr; |
50 | 50 | ||
51 | tmr = kmem_cache_zalloc(se_tmr_req_cache, (in_interrupt()) ? | 51 | tmr = kmem_cache_zalloc(se_tmr_req_cache, gfp_flags); |
52 | GFP_ATOMIC : GFP_KERNEL); | ||
53 | if (!tmr) { | 52 | if (!tmr) { |
54 | pr_err("Unable to allocate struct se_tmr_req\n"); | 53 | pr_err("Unable to allocate struct se_tmr_req\n"); |
55 | return ERR_PTR(-ENOMEM); | 54 | return ERR_PTR(-ENOMEM); |
@@ -67,15 +66,16 @@ void core_tmr_release_req( | |||
67 | struct se_tmr_req *tmr) | 66 | struct se_tmr_req *tmr) |
68 | { | 67 | { |
69 | struct se_device *dev = tmr->tmr_dev; | 68 | struct se_device *dev = tmr->tmr_dev; |
69 | unsigned long flags; | ||
70 | 70 | ||
71 | if (!dev) { | 71 | if (!dev) { |
72 | kmem_cache_free(se_tmr_req_cache, tmr); | 72 | kmem_cache_free(se_tmr_req_cache, tmr); |
73 | return; | 73 | return; |
74 | } | 74 | } |
75 | 75 | ||
76 | spin_lock_irq(&dev->se_tmr_lock); | 76 | spin_lock_irqsave(&dev->se_tmr_lock, flags); |
77 | list_del(&tmr->tmr_list); | 77 | list_del(&tmr->tmr_list); |
78 | spin_unlock_irq(&dev->se_tmr_lock); | 78 | spin_unlock_irqrestore(&dev->se_tmr_lock, flags); |
79 | 79 | ||
80 | kmem_cache_free(se_tmr_req_cache, tmr); | 80 | kmem_cache_free(se_tmr_req_cache, tmr); |
81 | } | 81 | } |
@@ -100,54 +100,20 @@ static void core_tmr_handle_tas_abort( | |||
100 | transport_cmd_finish_abort(cmd, 0); | 100 | transport_cmd_finish_abort(cmd, 0); |
101 | } | 101 | } |
102 | 102 | ||
103 | int core_tmr_lun_reset( | 103 | static void core_tmr_drain_tmr_list( |
104 | struct se_device *dev, | 104 | struct se_device *dev, |
105 | struct se_tmr_req *tmr, | 105 | struct se_tmr_req *tmr, |
106 | struct list_head *preempt_and_abort_list, | 106 | struct list_head *preempt_and_abort_list) |
107 | struct se_cmd *prout_cmd) | ||
108 | { | 107 | { |
109 | struct se_cmd *cmd, *tcmd; | 108 | LIST_HEAD(drain_tmr_list); |
110 | struct se_node_acl *tmr_nacl = NULL; | ||
111 | struct se_portal_group *tmr_tpg = NULL; | ||
112 | struct se_queue_obj *qobj = &dev->dev_queue_obj; | ||
113 | struct se_tmr_req *tmr_p, *tmr_pp; | 109 | struct se_tmr_req *tmr_p, *tmr_pp; |
114 | struct se_task *task, *task_tmp; | 110 | struct se_cmd *cmd; |
115 | unsigned long flags; | 111 | unsigned long flags; |
116 | int fe_count, tas; | ||
117 | /* | ||
118 | * TASK_ABORTED status bit, this is configurable via ConfigFS | ||
119 | * struct se_device attributes. spc4r17 section 7.4.6 Control mode page | ||
120 | * | ||
121 | * A task aborted status (TAS) bit set to zero specifies that aborted | ||
122 | * tasks shall be terminated by the device server without any response | ||
123 | * to the application client. A TAS bit set to one specifies that tasks | ||
124 | * aborted by the actions of an I_T nexus other than the I_T nexus on | ||
125 | * which the command was received shall be completed with TASK ABORTED | ||
126 | * status (see SAM-4). | ||
127 | */ | ||
128 | tas = dev->se_sub_dev->se_dev_attrib.emulate_tas; | ||
129 | /* | ||
130 | * Determine if this se_tmr is coming from a $FABRIC_MOD | ||
131 | * or struct se_device passthrough.. | ||
132 | */ | ||
133 | if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) { | ||
134 | tmr_nacl = tmr->task_cmd->se_sess->se_node_acl; | ||
135 | tmr_tpg = tmr->task_cmd->se_sess->se_tpg; | ||
136 | if (tmr_nacl && tmr_tpg) { | ||
137 | pr_debug("LUN_RESET: TMR caller fabric: %s" | ||
138 | " initiator port %s\n", | ||
139 | tmr_tpg->se_tpg_tfo->get_fabric_name(), | ||
140 | tmr_nacl->initiatorname); | ||
141 | } | ||
142 | } | ||
143 | pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n", | ||
144 | (preempt_and_abort_list) ? "Preempt" : "TMR", | ||
145 | dev->transport->name, tas); | ||
146 | /* | 112 | /* |
147 | * Release all pending and outgoing TMRs aside from the received | 113 | * Release all pending and outgoing TMRs aside from the received |
148 | * LUN_RESET tmr.. | 114 | * LUN_RESET tmr.. |
149 | */ | 115 | */ |
150 | spin_lock_irq(&dev->se_tmr_lock); | 116 | spin_lock_irqsave(&dev->se_tmr_lock, flags); |
151 | list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) { | 117 | list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) { |
152 | /* | 118 | /* |
153 | * Allow the received TMR to return with FUNCTION_COMPLETE. | 119 | * Allow the received TMR to return with FUNCTION_COMPLETE. |
@@ -169,29 +135,48 @@ int core_tmr_lun_reset( | |||
169 | (core_scsi3_check_cdb_abort_and_preempt( | 135 | (core_scsi3_check_cdb_abort_and_preempt( |
170 | preempt_and_abort_list, cmd) != 0)) | 136 | preempt_and_abort_list, cmd) != 0)) |
171 | continue; | 137 | continue; |
172 | spin_unlock_irq(&dev->se_tmr_lock); | ||
173 | 138 | ||
174 | spin_lock_irqsave(&cmd->t_state_lock, flags); | 139 | spin_lock(&cmd->t_state_lock); |
175 | if (!atomic_read(&cmd->t_transport_active)) { | 140 | if (!atomic_read(&cmd->t_transport_active)) { |
176 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 141 | spin_unlock(&cmd->t_state_lock); |
177 | spin_lock_irq(&dev->se_tmr_lock); | ||
178 | continue; | 142 | continue; |
179 | } | 143 | } |
180 | if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) { | 144 | if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) { |
181 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 145 | spin_unlock(&cmd->t_state_lock); |
182 | spin_lock_irq(&dev->se_tmr_lock); | ||
183 | continue; | 146 | continue; |
184 | } | 147 | } |
148 | spin_unlock(&cmd->t_state_lock); | ||
149 | |||
150 | list_move_tail(&tmr->tmr_list, &drain_tmr_list); | ||
151 | } | ||
152 | spin_unlock_irqrestore(&dev->se_tmr_lock, flags); | ||
153 | |||
154 | while (!list_empty(&drain_tmr_list)) { | ||
155 | tmr = list_entry(drain_tmr_list.next, struct se_tmr_req, tmr_list); | ||
156 | list_del(&tmr->tmr_list); | ||
157 | cmd = tmr_p->task_cmd; | ||
158 | |||
185 | pr_debug("LUN_RESET: %s releasing TMR %p Function: 0x%02x," | 159 | pr_debug("LUN_RESET: %s releasing TMR %p Function: 0x%02x," |
186 | " Response: 0x%02x, t_state: %d\n", | 160 | " Response: 0x%02x, t_state: %d\n", |
187 | (preempt_and_abort_list) ? "Preempt" : "", tmr_p, | 161 | (preempt_and_abort_list) ? "Preempt" : "", tmr, |
188 | tmr_p->function, tmr_p->response, cmd->t_state); | 162 | tmr->function, tmr->response, cmd->t_state); |
189 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
190 | 163 | ||
191 | transport_cmd_finish_abort_tmr(cmd); | 164 | transport_cmd_finish_abort(cmd, 1); |
192 | spin_lock_irq(&dev->se_tmr_lock); | ||
193 | } | 165 | } |
194 | spin_unlock_irq(&dev->se_tmr_lock); | 166 | } |
167 | |||
168 | static void core_tmr_drain_task_list( | ||
169 | struct se_device *dev, | ||
170 | struct se_cmd *prout_cmd, | ||
171 | struct se_node_acl *tmr_nacl, | ||
172 | int tas, | ||
173 | struct list_head *preempt_and_abort_list) | ||
174 | { | ||
175 | LIST_HEAD(drain_task_list); | ||
176 | struct se_cmd *cmd; | ||
177 | struct se_task *task, *task_tmp; | ||
178 | unsigned long flags; | ||
179 | int fe_count; | ||
195 | /* | 180 | /* |
196 | * Complete outstanding struct se_task CDBs with TASK_ABORTED SAM status. | 181 | * Complete outstanding struct se_task CDBs with TASK_ABORTED SAM status. |
197 | * This is following sam4r17, section 5.6 Aborting commands, Table 38 | 182 | * This is following sam4r17, section 5.6 Aborting commands, Table 38 |
@@ -236,18 +221,28 @@ int core_tmr_lun_reset( | |||
236 | if (prout_cmd == cmd) | 221 | if (prout_cmd == cmd) |
237 | continue; | 222 | continue; |
238 | 223 | ||
239 | list_del(&task->t_state_list); | 224 | list_move_tail(&task->t_state_list, &drain_task_list); |
240 | atomic_set(&task->task_state_active, 0); | 225 | atomic_set(&task->task_state_active, 0); |
241 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | 226 | /* |
227 | * Remove from task execute list before processing drain_task_list | ||
228 | */ | ||
229 | if (!list_empty(&task->t_execute_list)) | ||
230 | __transport_remove_task_from_execute_queue(task, dev); | ||
231 | } | ||
232 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | ||
233 | |||
234 | while (!list_empty(&drain_task_list)) { | ||
235 | task = list_entry(drain_task_list.next, struct se_task, t_state_list); | ||
236 | list_del(&task->t_state_list); | ||
237 | cmd = task->task_se_cmd; | ||
242 | 238 | ||
243 | spin_lock_irqsave(&cmd->t_state_lock, flags); | ||
244 | pr_debug("LUN_RESET: %s cmd: %p task: %p" | 239 | pr_debug("LUN_RESET: %s cmd: %p task: %p" |
245 | " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/" | 240 | " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state: %d" |
246 | "def_t_state: %d/%d cdb: 0x%02x\n", | 241 | "cdb: 0x%02x\n", |
247 | (preempt_and_abort_list) ? "Preempt" : "", cmd, task, | 242 | (preempt_and_abort_list) ? "Preempt" : "", cmd, task, |
248 | cmd->se_tfo->get_task_tag(cmd), 0, | 243 | cmd->se_tfo->get_task_tag(cmd), 0, |
249 | cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, | 244 | cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, |
250 | cmd->deferred_t_state, cmd->t_task_cdb[0]); | 245 | cmd->t_task_cdb[0]); |
251 | pr_debug("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx" | 246 | pr_debug("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx" |
252 | " t_task_cdbs: %d t_task_cdbs_left: %d" | 247 | " t_task_cdbs: %d t_task_cdbs_left: %d" |
253 | " t_task_cdbs_sent: %d -- t_transport_active: %d" | 248 | " t_task_cdbs_sent: %d -- t_transport_active: %d" |
@@ -260,35 +255,24 @@ int core_tmr_lun_reset( | |||
260 | atomic_read(&cmd->t_transport_stop), | 255 | atomic_read(&cmd->t_transport_stop), |
261 | atomic_read(&cmd->t_transport_sent)); | 256 | atomic_read(&cmd->t_transport_sent)); |
262 | 257 | ||
263 | if (atomic_read(&task->task_active)) { | 258 | /* |
264 | atomic_set(&task->task_stop, 1); | 259 | * If the command may be queued onto a workqueue cancel it now. |
265 | spin_unlock_irqrestore( | 260 | * |
266 | &cmd->t_state_lock, flags); | 261 | * This is equivalent to removal from the execute queue in the |
267 | 262 | * loop above, but we do it down here given that | |
268 | pr_debug("LUN_RESET: Waiting for task: %p to shutdown" | 263 | * cancel_work_sync may block. |
269 | " for dev: %p\n", task, dev); | 264 | */ |
270 | wait_for_completion(&task->task_stop_comp); | 265 | if (cmd->t_state == TRANSPORT_COMPLETE) |
271 | pr_debug("LUN_RESET Completed task: %p shutdown for" | 266 | cancel_work_sync(&cmd->work); |
272 | " dev: %p\n", task, dev); | 267 | |
273 | spin_lock_irqsave(&cmd->t_state_lock, flags); | 268 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
274 | atomic_dec(&cmd->t_task_cdbs_left); | 269 | target_stop_task(task, &flags); |
275 | |||
276 | atomic_set(&task->task_active, 0); | ||
277 | atomic_set(&task->task_stop, 0); | ||
278 | } else { | ||
279 | if (atomic_read(&task->task_execute_queue) != 0) | ||
280 | transport_remove_task_from_execute_queue(task, dev); | ||
281 | } | ||
282 | __transport_stop_task_timer(task, &flags); | ||
283 | 270 | ||
284 | if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) { | 271 | if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) { |
285 | spin_unlock_irqrestore( | 272 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
286 | &cmd->t_state_lock, flags); | ||
287 | pr_debug("LUN_RESET: Skipping task: %p, dev: %p for" | 273 | pr_debug("LUN_RESET: Skipping task: %p, dev: %p for" |
288 | " t_task_cdbs_ex_left: %d\n", task, dev, | 274 | " t_task_cdbs_ex_left: %d\n", task, dev, |
289 | atomic_read(&cmd->t_task_cdbs_ex_left)); | 275 | atomic_read(&cmd->t_task_cdbs_ex_left)); |
290 | |||
291 | spin_lock_irqsave(&dev->execute_task_lock, flags); | ||
292 | continue; | 276 | continue; |
293 | } | 277 | } |
294 | fe_count = atomic_read(&cmd->t_fe_count); | 278 | fe_count = atomic_read(&cmd->t_fe_count); |
@@ -298,22 +282,31 @@ int core_tmr_lun_reset( | |||
298 | " task: %p, t_fe_count: %d dev: %p\n", task, | 282 | " task: %p, t_fe_count: %d dev: %p\n", task, |
299 | fe_count, dev); | 283 | fe_count, dev); |
300 | atomic_set(&cmd->t_transport_aborted, 1); | 284 | atomic_set(&cmd->t_transport_aborted, 1); |
301 | spin_unlock_irqrestore(&cmd->t_state_lock, | 285 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
302 | flags); | ||
303 | core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); | ||
304 | 286 | ||
305 | spin_lock_irqsave(&dev->execute_task_lock, flags); | 287 | core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); |
306 | continue; | 288 | continue; |
307 | } | 289 | } |
308 | pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p," | 290 | pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p," |
309 | " t_fe_count: %d dev: %p\n", task, fe_count, dev); | 291 | " t_fe_count: %d dev: %p\n", task, fe_count, dev); |
310 | atomic_set(&cmd->t_transport_aborted, 1); | 292 | atomic_set(&cmd->t_transport_aborted, 1); |
311 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 293 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
312 | core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); | ||
313 | 294 | ||
314 | spin_lock_irqsave(&dev->execute_task_lock, flags); | 295 | core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); |
315 | } | 296 | } |
316 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | 297 | } |
298 | |||
299 | static void core_tmr_drain_cmd_list( | ||
300 | struct se_device *dev, | ||
301 | struct se_cmd *prout_cmd, | ||
302 | struct se_node_acl *tmr_nacl, | ||
303 | int tas, | ||
304 | struct list_head *preempt_and_abort_list) | ||
305 | { | ||
306 | LIST_HEAD(drain_cmd_list); | ||
307 | struct se_queue_obj *qobj = &dev->dev_queue_obj; | ||
308 | struct se_cmd *cmd, *tcmd; | ||
309 | unsigned long flags; | ||
317 | /* | 310 | /* |
318 | * Release all commands remaining in the struct se_device cmd queue. | 311 | * Release all commands remaining in the struct se_device cmd queue. |
319 | * | 312 | * |
@@ -337,11 +330,26 @@ int core_tmr_lun_reset( | |||
337 | */ | 330 | */ |
338 | if (prout_cmd == cmd) | 331 | if (prout_cmd == cmd) |
339 | continue; | 332 | continue; |
333 | /* | ||
334 | * Skip direct processing of TRANSPORT_FREE_CMD_INTR for | ||
335 | * HW target mode fabrics. | ||
336 | */ | ||
337 | spin_lock(&cmd->t_state_lock); | ||
338 | if (cmd->t_state == TRANSPORT_FREE_CMD_INTR) { | ||
339 | spin_unlock(&cmd->t_state_lock); | ||
340 | continue; | ||
341 | } | ||
342 | spin_unlock(&cmd->t_state_lock); | ||
340 | 343 | ||
341 | atomic_dec(&cmd->t_transport_queue_active); | 344 | atomic_set(&cmd->t_transport_queue_active, 0); |
342 | atomic_dec(&qobj->queue_cnt); | 345 | atomic_dec(&qobj->queue_cnt); |
343 | list_del(&cmd->se_queue_node); | 346 | list_move_tail(&cmd->se_queue_node, &drain_cmd_list); |
344 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | 347 | } |
348 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | ||
349 | |||
350 | while (!list_empty(&drain_cmd_list)) { | ||
351 | cmd = list_entry(drain_cmd_list.next, struct se_cmd, se_queue_node); | ||
352 | list_del_init(&cmd->se_queue_node); | ||
345 | 353 | ||
346 | pr_debug("LUN_RESET: %s from Device Queue: cmd: %p t_state:" | 354 | pr_debug("LUN_RESET: %s from Device Queue: cmd: %p t_state:" |
347 | " %d t_fe_count: %d\n", (preempt_and_abort_list) ? | 355 | " %d t_fe_count: %d\n", (preempt_and_abort_list) ? |
@@ -354,9 +362,53 @@ int core_tmr_lun_reset( | |||
354 | 362 | ||
355 | core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, | 363 | core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, |
356 | atomic_read(&cmd->t_fe_count)); | 364 | atomic_read(&cmd->t_fe_count)); |
357 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | ||
358 | } | 365 | } |
359 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | 366 | } |
367 | |||
368 | int core_tmr_lun_reset( | ||
369 | struct se_device *dev, | ||
370 | struct se_tmr_req *tmr, | ||
371 | struct list_head *preempt_and_abort_list, | ||
372 | struct se_cmd *prout_cmd) | ||
373 | { | ||
374 | struct se_node_acl *tmr_nacl = NULL; | ||
375 | struct se_portal_group *tmr_tpg = NULL; | ||
376 | int tas; | ||
377 | /* | ||
378 | * TASK_ABORTED status bit, this is configurable via ConfigFS | ||
379 | * struct se_device attributes. spc4r17 section 7.4.6 Control mode page | ||
380 | * | ||
381 | * A task aborted status (TAS) bit set to zero specifies that aborted | ||
382 | * tasks shall be terminated by the device server without any response | ||
383 | * to the application client. A TAS bit set to one specifies that tasks | ||
384 | * aborted by the actions of an I_T nexus other than the I_T nexus on | ||
385 | * which the command was received shall be completed with TASK ABORTED | ||
386 | * status (see SAM-4). | ||
387 | */ | ||
388 | tas = dev->se_sub_dev->se_dev_attrib.emulate_tas; | ||
389 | /* | ||
390 | * Determine if this se_tmr is coming from a $FABRIC_MOD | ||
391 | * or struct se_device passthrough.. | ||
392 | */ | ||
393 | if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) { | ||
394 | tmr_nacl = tmr->task_cmd->se_sess->se_node_acl; | ||
395 | tmr_tpg = tmr->task_cmd->se_sess->se_tpg; | ||
396 | if (tmr_nacl && tmr_tpg) { | ||
397 | pr_debug("LUN_RESET: TMR caller fabric: %s" | ||
398 | " initiator port %s\n", | ||
399 | tmr_tpg->se_tpg_tfo->get_fabric_name(), | ||
400 | tmr_nacl->initiatorname); | ||
401 | } | ||
402 | } | ||
403 | pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n", | ||
404 | (preempt_and_abort_list) ? "Preempt" : "TMR", | ||
405 | dev->transport->name, tas); | ||
406 | |||
407 | core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list); | ||
408 | core_tmr_drain_task_list(dev, prout_cmd, tmr_nacl, tas, | ||
409 | preempt_and_abort_list); | ||
410 | core_tmr_drain_cmd_list(dev, prout_cmd, tmr_nacl, tas, | ||
411 | preempt_and_abort_list); | ||
360 | /* | 412 | /* |
361 | * Clear any legacy SPC-2 reservation when called during | 413 | * Clear any legacy SPC-2 reservation when called during |
362 | * LOGICAL UNIT RESET | 414 | * LOGICAL UNIT RESET |
@@ -379,3 +431,4 @@ int core_tmr_lun_reset( | |||
379 | dev->transport->name); | 431 | dev->transport->name); |
380 | return 0; | 432 | return 0; |
381 | } | 433 | } |
434 | |||
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index a4b0a8d27f25..d75255804481 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
@@ -26,7 +26,6 @@ | |||
26 | * | 26 | * |
27 | ******************************************************************************/ | 27 | ******************************************************************************/ |
28 | 28 | ||
29 | #include <linux/version.h> | ||
30 | #include <linux/net.h> | 29 | #include <linux/net.h> |
31 | #include <linux/delay.h> | 30 | #include <linux/delay.h> |
32 | #include <linux/string.h> | 31 | #include <linux/string.h> |
@@ -55,11 +54,11 @@ | |||
55 | #include "target_core_alua.h" | 54 | #include "target_core_alua.h" |
56 | #include "target_core_hba.h" | 55 | #include "target_core_hba.h" |
57 | #include "target_core_pr.h" | 56 | #include "target_core_pr.h" |
58 | #include "target_core_scdb.h" | ||
59 | #include "target_core_ua.h" | 57 | #include "target_core_ua.h" |
60 | 58 | ||
61 | static int sub_api_initialized; | 59 | static int sub_api_initialized; |
62 | 60 | ||
61 | static struct workqueue_struct *target_completion_wq; | ||
63 | static struct kmem_cache *se_cmd_cache; | 62 | static struct kmem_cache *se_cmd_cache; |
64 | static struct kmem_cache *se_sess_cache; | 63 | static struct kmem_cache *se_sess_cache; |
65 | struct kmem_cache *se_tmr_req_cache; | 64 | struct kmem_cache *se_tmr_req_cache; |
@@ -70,30 +69,19 @@ struct kmem_cache *t10_alua_lu_gp_mem_cache; | |||
70 | struct kmem_cache *t10_alua_tg_pt_gp_cache; | 69 | struct kmem_cache *t10_alua_tg_pt_gp_cache; |
71 | struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; | 70 | struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; |
72 | 71 | ||
73 | /* Used for transport_dev_get_map_*() */ | ||
74 | typedef int (*map_func_t)(struct se_task *, u32); | ||
75 | |||
76 | static int transport_generic_write_pending(struct se_cmd *); | 72 | static int transport_generic_write_pending(struct se_cmd *); |
77 | static int transport_processing_thread(void *param); | 73 | static int transport_processing_thread(void *param); |
78 | static int __transport_execute_tasks(struct se_device *dev); | 74 | static int __transport_execute_tasks(struct se_device *dev); |
79 | static void transport_complete_task_attr(struct se_cmd *cmd); | 75 | static void transport_complete_task_attr(struct se_cmd *cmd); |
80 | static int transport_complete_qf(struct se_cmd *cmd); | ||
81 | static void transport_handle_queue_full(struct se_cmd *cmd, | 76 | static void transport_handle_queue_full(struct se_cmd *cmd, |
82 | struct se_device *dev, int (*qf_callback)(struct se_cmd *)); | 77 | struct se_device *dev); |
83 | static void transport_direct_request_timeout(struct se_cmd *cmd); | ||
84 | static void transport_free_dev_tasks(struct se_cmd *cmd); | 78 | static void transport_free_dev_tasks(struct se_cmd *cmd); |
85 | static u32 transport_allocate_tasks(struct se_cmd *cmd, | ||
86 | unsigned long long starting_lba, | ||
87 | enum dma_data_direction data_direction, | ||
88 | struct scatterlist *sgl, unsigned int nents); | ||
89 | static int transport_generic_get_mem(struct se_cmd *cmd); | 79 | static int transport_generic_get_mem(struct se_cmd *cmd); |
90 | static int transport_generic_remove(struct se_cmd *cmd, | 80 | static void transport_put_cmd(struct se_cmd *cmd); |
91 | int session_reinstatement); | 81 | static void transport_remove_cmd_from_queue(struct se_cmd *cmd); |
92 | static void transport_release_fe_cmd(struct se_cmd *cmd); | ||
93 | static void transport_remove_cmd_from_queue(struct se_cmd *cmd, | ||
94 | struct se_queue_obj *qobj); | ||
95 | static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); | 82 | static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); |
96 | static void transport_stop_all_task_timers(struct se_cmd *cmd); | 83 | static void transport_generic_request_failure(struct se_cmd *, int, int); |
84 | static void target_complete_ok_work(struct work_struct *work); | ||
97 | 85 | ||
98 | int init_se_kmem_caches(void) | 86 | int init_se_kmem_caches(void) |
99 | { | 87 | { |
@@ -109,7 +97,7 @@ int init_se_kmem_caches(void) | |||
109 | if (!se_tmr_req_cache) { | 97 | if (!se_tmr_req_cache) { |
110 | pr_err("kmem_cache_create() for struct se_tmr_req" | 98 | pr_err("kmem_cache_create() for struct se_tmr_req" |
111 | " failed\n"); | 99 | " failed\n"); |
112 | goto out; | 100 | goto out_free_cmd_cache; |
113 | } | 101 | } |
114 | se_sess_cache = kmem_cache_create("se_sess_cache", | 102 | se_sess_cache = kmem_cache_create("se_sess_cache", |
115 | sizeof(struct se_session), __alignof__(struct se_session), | 103 | sizeof(struct se_session), __alignof__(struct se_session), |
@@ -117,14 +105,14 @@ int init_se_kmem_caches(void) | |||
117 | if (!se_sess_cache) { | 105 | if (!se_sess_cache) { |
118 | pr_err("kmem_cache_create() for struct se_session" | 106 | pr_err("kmem_cache_create() for struct se_session" |
119 | " failed\n"); | 107 | " failed\n"); |
120 | goto out; | 108 | goto out_free_tmr_req_cache; |
121 | } | 109 | } |
122 | se_ua_cache = kmem_cache_create("se_ua_cache", | 110 | se_ua_cache = kmem_cache_create("se_ua_cache", |
123 | sizeof(struct se_ua), __alignof__(struct se_ua), | 111 | sizeof(struct se_ua), __alignof__(struct se_ua), |
124 | 0, NULL); | 112 | 0, NULL); |
125 | if (!se_ua_cache) { | 113 | if (!se_ua_cache) { |
126 | pr_err("kmem_cache_create() for struct se_ua failed\n"); | 114 | pr_err("kmem_cache_create() for struct se_ua failed\n"); |
127 | goto out; | 115 | goto out_free_sess_cache; |
128 | } | 116 | } |
129 | t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", | 117 | t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", |
130 | sizeof(struct t10_pr_registration), | 118 | sizeof(struct t10_pr_registration), |
@@ -132,7 +120,7 @@ int init_se_kmem_caches(void) | |||
132 | if (!t10_pr_reg_cache) { | 120 | if (!t10_pr_reg_cache) { |
133 | pr_err("kmem_cache_create() for struct t10_pr_registration" | 121 | pr_err("kmem_cache_create() for struct t10_pr_registration" |
134 | " failed\n"); | 122 | " failed\n"); |
135 | goto out; | 123 | goto out_free_ua_cache; |
136 | } | 124 | } |
137 | t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", | 125 | t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", |
138 | sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), | 126 | sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), |
@@ -140,7 +128,7 @@ int init_se_kmem_caches(void) | |||
140 | if (!t10_alua_lu_gp_cache) { | 128 | if (!t10_alua_lu_gp_cache) { |
141 | pr_err("kmem_cache_create() for t10_alua_lu_gp_cache" | 129 | pr_err("kmem_cache_create() for t10_alua_lu_gp_cache" |
142 | " failed\n"); | 130 | " failed\n"); |
143 | goto out; | 131 | goto out_free_pr_reg_cache; |
144 | } | 132 | } |
145 | t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", | 133 | t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", |
146 | sizeof(struct t10_alua_lu_gp_member), | 134 | sizeof(struct t10_alua_lu_gp_member), |
@@ -148,7 +136,7 @@ int init_se_kmem_caches(void) | |||
148 | if (!t10_alua_lu_gp_mem_cache) { | 136 | if (!t10_alua_lu_gp_mem_cache) { |
149 | pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_" | 137 | pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_" |
150 | "cache failed\n"); | 138 | "cache failed\n"); |
151 | goto out; | 139 | goto out_free_lu_gp_cache; |
152 | } | 140 | } |
153 | t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", | 141 | t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", |
154 | sizeof(struct t10_alua_tg_pt_gp), | 142 | sizeof(struct t10_alua_tg_pt_gp), |
@@ -156,7 +144,7 @@ int init_se_kmem_caches(void) | |||
156 | if (!t10_alua_tg_pt_gp_cache) { | 144 | if (!t10_alua_tg_pt_gp_cache) { |
157 | pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" | 145 | pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" |
158 | "cache failed\n"); | 146 | "cache failed\n"); |
159 | goto out; | 147 | goto out_free_lu_gp_mem_cache; |
160 | } | 148 | } |
161 | t10_alua_tg_pt_gp_mem_cache = kmem_cache_create( | 149 | t10_alua_tg_pt_gp_mem_cache = kmem_cache_create( |
162 | "t10_alua_tg_pt_gp_mem_cache", | 150 | "t10_alua_tg_pt_gp_mem_cache", |
@@ -166,34 +154,41 @@ int init_se_kmem_caches(void) | |||
166 | if (!t10_alua_tg_pt_gp_mem_cache) { | 154 | if (!t10_alua_tg_pt_gp_mem_cache) { |
167 | pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" | 155 | pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" |
168 | "mem_t failed\n"); | 156 | "mem_t failed\n"); |
169 | goto out; | 157 | goto out_free_tg_pt_gp_cache; |
170 | } | 158 | } |
171 | 159 | ||
160 | target_completion_wq = alloc_workqueue("target_completion", | ||
161 | WQ_MEM_RECLAIM, 0); | ||
162 | if (!target_completion_wq) | ||
163 | goto out_free_tg_pt_gp_mem_cache; | ||
164 | |||
172 | return 0; | 165 | return 0; |
166 | |||
167 | out_free_tg_pt_gp_mem_cache: | ||
168 | kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); | ||
169 | out_free_tg_pt_gp_cache: | ||
170 | kmem_cache_destroy(t10_alua_tg_pt_gp_cache); | ||
171 | out_free_lu_gp_mem_cache: | ||
172 | kmem_cache_destroy(t10_alua_lu_gp_mem_cache); | ||
173 | out_free_lu_gp_cache: | ||
174 | kmem_cache_destroy(t10_alua_lu_gp_cache); | ||
175 | out_free_pr_reg_cache: | ||
176 | kmem_cache_destroy(t10_pr_reg_cache); | ||
177 | out_free_ua_cache: | ||
178 | kmem_cache_destroy(se_ua_cache); | ||
179 | out_free_sess_cache: | ||
180 | kmem_cache_destroy(se_sess_cache); | ||
181 | out_free_tmr_req_cache: | ||
182 | kmem_cache_destroy(se_tmr_req_cache); | ||
183 | out_free_cmd_cache: | ||
184 | kmem_cache_destroy(se_cmd_cache); | ||
173 | out: | 185 | out: |
174 | if (se_cmd_cache) | ||
175 | kmem_cache_destroy(se_cmd_cache); | ||
176 | if (se_tmr_req_cache) | ||
177 | kmem_cache_destroy(se_tmr_req_cache); | ||
178 | if (se_sess_cache) | ||
179 | kmem_cache_destroy(se_sess_cache); | ||
180 | if (se_ua_cache) | ||
181 | kmem_cache_destroy(se_ua_cache); | ||
182 | if (t10_pr_reg_cache) | ||
183 | kmem_cache_destroy(t10_pr_reg_cache); | ||
184 | if (t10_alua_lu_gp_cache) | ||
185 | kmem_cache_destroy(t10_alua_lu_gp_cache); | ||
186 | if (t10_alua_lu_gp_mem_cache) | ||
187 | kmem_cache_destroy(t10_alua_lu_gp_mem_cache); | ||
188 | if (t10_alua_tg_pt_gp_cache) | ||
189 | kmem_cache_destroy(t10_alua_tg_pt_gp_cache); | ||
190 | if (t10_alua_tg_pt_gp_mem_cache) | ||
191 | kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); | ||
192 | return -ENOMEM; | 186 | return -ENOMEM; |
193 | } | 187 | } |
194 | 188 | ||
195 | void release_se_kmem_caches(void) | 189 | void release_se_kmem_caches(void) |
196 | { | 190 | { |
191 | destroy_workqueue(target_completion_wq); | ||
197 | kmem_cache_destroy(se_cmd_cache); | 192 | kmem_cache_destroy(se_cmd_cache); |
198 | kmem_cache_destroy(se_tmr_req_cache); | 193 | kmem_cache_destroy(se_tmr_req_cache); |
199 | kmem_cache_destroy(se_sess_cache); | 194 | kmem_cache_destroy(se_sess_cache); |
@@ -234,10 +229,13 @@ void transport_init_queue_obj(struct se_queue_obj *qobj) | |||
234 | } | 229 | } |
235 | EXPORT_SYMBOL(transport_init_queue_obj); | 230 | EXPORT_SYMBOL(transport_init_queue_obj); |
236 | 231 | ||
237 | static int transport_subsystem_reqmods(void) | 232 | void transport_subsystem_check_init(void) |
238 | { | 233 | { |
239 | int ret; | 234 | int ret; |
240 | 235 | ||
236 | if (sub_api_initialized) | ||
237 | return; | ||
238 | |||
241 | ret = request_module("target_core_iblock"); | 239 | ret = request_module("target_core_iblock"); |
242 | if (ret != 0) | 240 | if (ret != 0) |
243 | pr_err("Unable to load target_core_iblock\n"); | 241 | pr_err("Unable to load target_core_iblock\n"); |
@@ -254,24 +252,8 @@ static int transport_subsystem_reqmods(void) | |||
254 | if (ret != 0) | 252 | if (ret != 0) |
255 | pr_err("Unable to load target_core_stgt\n"); | 253 | pr_err("Unable to load target_core_stgt\n"); |
256 | 254 | ||
257 | return 0; | ||
258 | } | ||
259 | |||
260 | int transport_subsystem_check_init(void) | ||
261 | { | ||
262 | int ret; | ||
263 | |||
264 | if (sub_api_initialized) | ||
265 | return 0; | ||
266 | /* | ||
267 | * Request the loading of known TCM subsystem plugins.. | ||
268 | */ | ||
269 | ret = transport_subsystem_reqmods(); | ||
270 | if (ret < 0) | ||
271 | return ret; | ||
272 | |||
273 | sub_api_initialized = 1; | 255 | sub_api_initialized = 1; |
274 | return 0; | 256 | return; |
275 | } | 257 | } |
276 | 258 | ||
277 | struct se_session *transport_init_session(void) | 259 | struct se_session *transport_init_session(void) |
@@ -438,16 +420,15 @@ EXPORT_SYMBOL(transport_deregister_session); | |||
438 | */ | 420 | */ |
439 | static void transport_all_task_dev_remove_state(struct se_cmd *cmd) | 421 | static void transport_all_task_dev_remove_state(struct se_cmd *cmd) |
440 | { | 422 | { |
441 | struct se_device *dev; | 423 | struct se_device *dev = cmd->se_dev; |
442 | struct se_task *task; | 424 | struct se_task *task; |
443 | unsigned long flags; | 425 | unsigned long flags; |
444 | 426 | ||
445 | list_for_each_entry(task, &cmd->t_task_list, t_list) { | 427 | if (!dev) |
446 | dev = task->se_dev; | 428 | return; |
447 | if (!dev) | ||
448 | continue; | ||
449 | 429 | ||
450 | if (atomic_read(&task->task_active)) | 430 | list_for_each_entry(task, &cmd->t_task_list, t_list) { |
431 | if (task->task_flags & TF_ACTIVE) | ||
451 | continue; | 432 | continue; |
452 | 433 | ||
453 | if (!atomic_read(&task->task_state_active)) | 434 | if (!atomic_read(&task->task_state_active)) |
@@ -489,8 +470,6 @@ static int transport_cmd_check_stop( | |||
489 | " == TRUE for ITT: 0x%08x\n", __func__, __LINE__, | 470 | " == TRUE for ITT: 0x%08x\n", __func__, __LINE__, |
490 | cmd->se_tfo->get_task_tag(cmd)); | 471 | cmd->se_tfo->get_task_tag(cmd)); |
491 | 472 | ||
492 | cmd->deferred_t_state = cmd->t_state; | ||
493 | cmd->t_state = TRANSPORT_DEFERRED_CMD; | ||
494 | atomic_set(&cmd->t_transport_active, 0); | 473 | atomic_set(&cmd->t_transport_active, 0); |
495 | if (transport_off == 2) | 474 | if (transport_off == 2) |
496 | transport_all_task_dev_remove_state(cmd); | 475 | transport_all_task_dev_remove_state(cmd); |
@@ -508,8 +487,6 @@ static int transport_cmd_check_stop( | |||
508 | " TRUE for ITT: 0x%08x\n", __func__, __LINE__, | 487 | " TRUE for ITT: 0x%08x\n", __func__, __LINE__, |
509 | cmd->se_tfo->get_task_tag(cmd)); | 488 | cmd->se_tfo->get_task_tag(cmd)); |
510 | 489 | ||
511 | cmd->deferred_t_state = cmd->t_state; | ||
512 | cmd->t_state = TRANSPORT_DEFERRED_CMD; | ||
513 | if (transport_off == 2) | 490 | if (transport_off == 2) |
514 | transport_all_task_dev_remove_state(cmd); | 491 | transport_all_task_dev_remove_state(cmd); |
515 | 492 | ||
@@ -594,35 +571,24 @@ check_lun: | |||
594 | 571 | ||
595 | void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) | 572 | void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) |
596 | { | 573 | { |
597 | transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); | 574 | if (!cmd->se_tmr_req) |
598 | transport_lun_remove_cmd(cmd); | 575 | transport_lun_remove_cmd(cmd); |
599 | 576 | ||
600 | if (transport_cmd_check_stop_to_fabric(cmd)) | 577 | if (transport_cmd_check_stop_to_fabric(cmd)) |
601 | return; | 578 | return; |
602 | if (remove) | 579 | if (remove) { |
603 | transport_generic_remove(cmd, 0); | 580 | transport_remove_cmd_from_queue(cmd); |
604 | } | 581 | transport_put_cmd(cmd); |
605 | 582 | } | |
606 | void transport_cmd_finish_abort_tmr(struct se_cmd *cmd) | ||
607 | { | ||
608 | transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); | ||
609 | |||
610 | if (transport_cmd_check_stop_to_fabric(cmd)) | ||
611 | return; | ||
612 | |||
613 | transport_generic_remove(cmd, 0); | ||
614 | } | 583 | } |
615 | 584 | ||
616 | static void transport_add_cmd_to_queue( | 585 | static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state, |
617 | struct se_cmd *cmd, | 586 | bool at_head) |
618 | int t_state) | ||
619 | { | 587 | { |
620 | struct se_device *dev = cmd->se_dev; | 588 | struct se_device *dev = cmd->se_dev; |
621 | struct se_queue_obj *qobj = &dev->dev_queue_obj; | 589 | struct se_queue_obj *qobj = &dev->dev_queue_obj; |
622 | unsigned long flags; | 590 | unsigned long flags; |
623 | 591 | ||
624 | INIT_LIST_HEAD(&cmd->se_queue_node); | ||
625 | |||
626 | if (t_state) { | 592 | if (t_state) { |
627 | spin_lock_irqsave(&cmd->t_state_lock, flags); | 593 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
628 | cmd->t_state = t_state; | 594 | cmd->t_state = t_state; |
@@ -631,15 +597,20 @@ static void transport_add_cmd_to_queue( | |||
631 | } | 597 | } |
632 | 598 | ||
633 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | 599 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); |
634 | if (cmd->se_cmd_flags & SCF_EMULATE_QUEUE_FULL) { | 600 | |
635 | cmd->se_cmd_flags &= ~SCF_EMULATE_QUEUE_FULL; | 601 | /* If the cmd is already on the list, remove it before we add it */ |
602 | if (!list_empty(&cmd->se_queue_node)) | ||
603 | list_del(&cmd->se_queue_node); | ||
604 | else | ||
605 | atomic_inc(&qobj->queue_cnt); | ||
606 | |||
607 | if (at_head) | ||
636 | list_add(&cmd->se_queue_node, &qobj->qobj_list); | 608 | list_add(&cmd->se_queue_node, &qobj->qobj_list); |
637 | } else | 609 | else |
638 | list_add_tail(&cmd->se_queue_node, &qobj->qobj_list); | 610 | list_add_tail(&cmd->se_queue_node, &qobj->qobj_list); |
639 | atomic_inc(&cmd->t_transport_queue_active); | 611 | atomic_set(&cmd->t_transport_queue_active, 1); |
640 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | 612 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); |
641 | 613 | ||
642 | atomic_inc(&qobj->queue_cnt); | ||
643 | wake_up_interruptible(&qobj->thread_wq); | 614 | wake_up_interruptible(&qobj->thread_wq); |
644 | } | 615 | } |
645 | 616 | ||
@@ -656,19 +627,18 @@ transport_get_cmd_from_queue(struct se_queue_obj *qobj) | |||
656 | } | 627 | } |
657 | cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node); | 628 | cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node); |
658 | 629 | ||
659 | atomic_dec(&cmd->t_transport_queue_active); | 630 | atomic_set(&cmd->t_transport_queue_active, 0); |
660 | 631 | ||
661 | list_del(&cmd->se_queue_node); | 632 | list_del_init(&cmd->se_queue_node); |
662 | atomic_dec(&qobj->queue_cnt); | 633 | atomic_dec(&qobj->queue_cnt); |
663 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | 634 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); |
664 | 635 | ||
665 | return cmd; | 636 | return cmd; |
666 | } | 637 | } |
667 | 638 | ||
668 | static void transport_remove_cmd_from_queue(struct se_cmd *cmd, | 639 | static void transport_remove_cmd_from_queue(struct se_cmd *cmd) |
669 | struct se_queue_obj *qobj) | ||
670 | { | 640 | { |
671 | struct se_cmd *t; | 641 | struct se_queue_obj *qobj = &cmd->se_dev->dev_queue_obj; |
672 | unsigned long flags; | 642 | unsigned long flags; |
673 | 643 | ||
674 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | 644 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); |
@@ -676,14 +646,9 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd, | |||
676 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | 646 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); |
677 | return; | 647 | return; |
678 | } | 648 | } |
679 | 649 | atomic_set(&cmd->t_transport_queue_active, 0); | |
680 | list_for_each_entry(t, &qobj->qobj_list, se_queue_node) | 650 | atomic_dec(&qobj->queue_cnt); |
681 | if (t == cmd) { | 651 | list_del_init(&cmd->se_queue_node); |
682 | atomic_dec(&cmd->t_transport_queue_active); | ||
683 | atomic_dec(&qobj->queue_cnt); | ||
684 | list_del(&cmd->se_queue_node); | ||
685 | break; | ||
686 | } | ||
687 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | 652 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); |
688 | 653 | ||
689 | if (atomic_read(&cmd->t_transport_queue_active)) { | 654 | if (atomic_read(&cmd->t_transport_queue_active)) { |
@@ -716,6 +681,13 @@ void transport_complete_sync_cache(struct se_cmd *cmd, int good) | |||
716 | } | 681 | } |
717 | EXPORT_SYMBOL(transport_complete_sync_cache); | 682 | EXPORT_SYMBOL(transport_complete_sync_cache); |
718 | 683 | ||
684 | static void target_complete_failure_work(struct work_struct *work) | ||
685 | { | ||
686 | struct se_cmd *cmd = container_of(work, struct se_cmd, work); | ||
687 | |||
688 | transport_generic_request_failure(cmd, 1, 1); | ||
689 | } | ||
690 | |||
719 | /* transport_complete_task(): | 691 | /* transport_complete_task(): |
720 | * | 692 | * |
721 | * Called from interrupt and non interrupt context depending | 693 | * Called from interrupt and non interrupt context depending |
@@ -724,8 +696,7 @@ EXPORT_SYMBOL(transport_complete_sync_cache); | |||
724 | void transport_complete_task(struct se_task *task, int success) | 696 | void transport_complete_task(struct se_task *task, int success) |
725 | { | 697 | { |
726 | struct se_cmd *cmd = task->task_se_cmd; | 698 | struct se_cmd *cmd = task->task_se_cmd; |
727 | struct se_device *dev = task->se_dev; | 699 | struct se_device *dev = cmd->se_dev; |
728 | int t_state; | ||
729 | unsigned long flags; | 700 | unsigned long flags; |
730 | #if 0 | 701 | #if 0 |
731 | pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task, | 702 | pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task, |
@@ -735,7 +706,7 @@ void transport_complete_task(struct se_task *task, int success) | |||
735 | atomic_inc(&dev->depth_left); | 706 | atomic_inc(&dev->depth_left); |
736 | 707 | ||
737 | spin_lock_irqsave(&cmd->t_state_lock, flags); | 708 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
738 | atomic_set(&task->task_active, 0); | 709 | task->task_flags &= ~TF_ACTIVE; |
739 | 710 | ||
740 | /* | 711 | /* |
741 | * See if any sense data exists, if so set the TASK_SENSE flag. | 712 | * See if any sense data exists, if so set the TASK_SENSE flag. |
@@ -754,68 +725,39 @@ void transport_complete_task(struct se_task *task, int success) | |||
754 | * See if we are waiting for outstanding struct se_task | 725 | * See if we are waiting for outstanding struct se_task |
755 | * to complete for an exception condition | 726 | * to complete for an exception condition |
756 | */ | 727 | */ |
757 | if (atomic_read(&task->task_stop)) { | 728 | if (task->task_flags & TF_REQUEST_STOP) { |
758 | /* | ||
759 | * Decrement cmd->t_se_count if this task had | ||
760 | * previously thrown its timeout exception handler. | ||
761 | */ | ||
762 | if (atomic_read(&task->task_timeout)) { | ||
763 | atomic_dec(&cmd->t_se_count); | ||
764 | atomic_set(&task->task_timeout, 0); | ||
765 | } | ||
766 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 729 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
767 | |||
768 | complete(&task->task_stop_comp); | 730 | complete(&task->task_stop_comp); |
769 | return; | 731 | return; |
770 | } | 732 | } |
771 | /* | 733 | /* |
772 | * If the task's timeout handler has fired, use the t_task_cdbs_timeout | ||
773 | * left counter to determine when the struct se_cmd is ready to be queued to | ||
774 | * the processing thread. | ||
775 | */ | ||
776 | if (atomic_read(&task->task_timeout)) { | ||
777 | if (!atomic_dec_and_test( | ||
778 | &cmd->t_task_cdbs_timeout_left)) { | ||
779 | spin_unlock_irqrestore(&cmd->t_state_lock, | ||
780 | flags); | ||
781 | return; | ||
782 | } | ||
783 | t_state = TRANSPORT_COMPLETE_TIMEOUT; | ||
784 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
785 | |||
786 | transport_add_cmd_to_queue(cmd, t_state); | ||
787 | return; | ||
788 | } | ||
789 | atomic_dec(&cmd->t_task_cdbs_timeout_left); | ||
790 | |||
791 | /* | ||
792 | * Decrement the outstanding t_task_cdbs_left count. The last | 734 | * Decrement the outstanding t_task_cdbs_left count. The last |
793 | * struct se_task from struct se_cmd will complete itself into the | 735 | * struct se_task from struct se_cmd will complete itself into the |
794 | * device queue depending upon int success. | 736 | * device queue depending upon int success. |
795 | */ | 737 | */ |
796 | if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) { | 738 | if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) { |
797 | if (!success) | ||
798 | cmd->t_tasks_failed = 1; | ||
799 | |||
800 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 739 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
801 | return; | 740 | return; |
802 | } | 741 | } |
803 | 742 | ||
804 | if (!success || cmd->t_tasks_failed) { | 743 | if (!success || cmd->t_tasks_failed) { |
805 | t_state = TRANSPORT_COMPLETE_FAILURE; | ||
806 | if (!task->task_error_status) { | 744 | if (!task->task_error_status) { |
807 | task->task_error_status = | 745 | task->task_error_status = |
808 | PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | 746 | PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; |
809 | cmd->transport_error_status = | 747 | cmd->transport_error_status = |
810 | PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | 748 | PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; |
811 | } | 749 | } |
750 | INIT_WORK(&cmd->work, target_complete_failure_work); | ||
812 | } else { | 751 | } else { |
813 | atomic_set(&cmd->t_transport_complete, 1); | 752 | atomic_set(&cmd->t_transport_complete, 1); |
814 | t_state = TRANSPORT_COMPLETE_OK; | 753 | INIT_WORK(&cmd->work, target_complete_ok_work); |
815 | } | 754 | } |
755 | |||
756 | cmd->t_state = TRANSPORT_COMPLETE; | ||
757 | atomic_set(&cmd->t_transport_active, 1); | ||
816 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 758 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
817 | 759 | ||
818 | transport_add_cmd_to_queue(cmd, t_state); | 760 | queue_work(target_completion_wq, &cmd->work); |
819 | } | 761 | } |
820 | EXPORT_SYMBOL(transport_complete_task); | 762 | EXPORT_SYMBOL(transport_complete_task); |
821 | 763 | ||
@@ -902,14 +844,12 @@ static void __transport_add_task_to_execute_queue( | |||
902 | 844 | ||
903 | static void transport_add_tasks_to_state_queue(struct se_cmd *cmd) | 845 | static void transport_add_tasks_to_state_queue(struct se_cmd *cmd) |
904 | { | 846 | { |
905 | struct se_device *dev; | 847 | struct se_device *dev = cmd->se_dev; |
906 | struct se_task *task; | 848 | struct se_task *task; |
907 | unsigned long flags; | 849 | unsigned long flags; |
908 | 850 | ||
909 | spin_lock_irqsave(&cmd->t_state_lock, flags); | 851 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
910 | list_for_each_entry(task, &cmd->t_task_list, t_list) { | 852 | list_for_each_entry(task, &cmd->t_task_list, t_list) { |
911 | dev = task->se_dev; | ||
912 | |||
913 | if (atomic_read(&task->task_state_active)) | 853 | if (atomic_read(&task->task_state_active)) |
914 | continue; | 854 | continue; |
915 | 855 | ||
@@ -934,38 +874,36 @@ static void transport_add_tasks_from_cmd(struct se_cmd *cmd) | |||
934 | 874 | ||
935 | spin_lock_irqsave(&dev->execute_task_lock, flags); | 875 | spin_lock_irqsave(&dev->execute_task_lock, flags); |
936 | list_for_each_entry(task, &cmd->t_task_list, t_list) { | 876 | list_for_each_entry(task, &cmd->t_task_list, t_list) { |
937 | if (atomic_read(&task->task_execute_queue)) | 877 | if (!list_empty(&task->t_execute_list)) |
938 | continue; | 878 | continue; |
939 | /* | 879 | /* |
940 | * __transport_add_task_to_execute_queue() handles the | 880 | * __transport_add_task_to_execute_queue() handles the |
941 | * SAM Task Attribute emulation if enabled | 881 | * SAM Task Attribute emulation if enabled |
942 | */ | 882 | */ |
943 | __transport_add_task_to_execute_queue(task, task_prev, dev); | 883 | __transport_add_task_to_execute_queue(task, task_prev, dev); |
944 | atomic_set(&task->task_execute_queue, 1); | ||
945 | task_prev = task; | 884 | task_prev = task; |
946 | } | 885 | } |
947 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | 886 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); |
948 | } | 887 | } |
949 | 888 | ||
950 | /* transport_remove_task_from_execute_queue(): | 889 | void __transport_remove_task_from_execute_queue(struct se_task *task, |
951 | * | 890 | struct se_device *dev) |
952 | * | 891 | { |
953 | */ | 892 | list_del_init(&task->t_execute_list); |
893 | atomic_dec(&dev->execute_tasks); | ||
894 | } | ||
895 | |||
954 | void transport_remove_task_from_execute_queue( | 896 | void transport_remove_task_from_execute_queue( |
955 | struct se_task *task, | 897 | struct se_task *task, |
956 | struct se_device *dev) | 898 | struct se_device *dev) |
957 | { | 899 | { |
958 | unsigned long flags; | 900 | unsigned long flags; |
959 | 901 | ||
960 | if (atomic_read(&task->task_execute_queue) == 0) { | 902 | if (WARN_ON(list_empty(&task->t_execute_list))) |
961 | dump_stack(); | ||
962 | return; | 903 | return; |
963 | } | ||
964 | 904 | ||
965 | spin_lock_irqsave(&dev->execute_task_lock, flags); | 905 | spin_lock_irqsave(&dev->execute_task_lock, flags); |
966 | list_del(&task->t_execute_list); | 906 | __transport_remove_task_from_execute_queue(task, dev); |
967 | atomic_set(&task->task_execute_queue, 0); | ||
968 | atomic_dec(&dev->execute_tasks); | ||
969 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | 907 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); |
970 | } | 908 | } |
971 | 909 | ||
@@ -991,14 +929,11 @@ static void target_qf_do_work(struct work_struct *work) | |||
991 | 929 | ||
992 | pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" | 930 | pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" |
993 | " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, | 931 | " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, |
994 | (cmd->t_state == TRANSPORT_COMPLETE_OK) ? "COMPLETE_OK" : | 932 | (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" : |
995 | (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" | 933 | (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" |
996 | : "UNKNOWN"); | 934 | : "UNKNOWN"); |
997 | /* | 935 | |
998 | * The SCF_EMULATE_QUEUE_FULL flag will be cleared once se_cmd | 936 | transport_add_cmd_to_queue(cmd, cmd->t_state, true); |
999 | * has been added to head of queue | ||
1000 | */ | ||
1001 | transport_add_cmd_to_queue(cmd, cmd->t_state); | ||
1002 | } | 937 | } |
1003 | } | 938 | } |
1004 | 939 | ||
@@ -1053,41 +988,6 @@ void transport_dump_dev_state( | |||
1053 | *bl += sprintf(b + *bl, " "); | 988 | *bl += sprintf(b + *bl, " "); |
1054 | } | 989 | } |
1055 | 990 | ||
1056 | /* transport_release_all_cmds(): | ||
1057 | * | ||
1058 | * | ||
1059 | */ | ||
1060 | static void transport_release_all_cmds(struct se_device *dev) | ||
1061 | { | ||
1062 | struct se_cmd *cmd, *tcmd; | ||
1063 | int bug_out = 0, t_state; | ||
1064 | unsigned long flags; | ||
1065 | |||
1066 | spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags); | ||
1067 | list_for_each_entry_safe(cmd, tcmd, &dev->dev_queue_obj.qobj_list, | ||
1068 | se_queue_node) { | ||
1069 | t_state = cmd->t_state; | ||
1070 | list_del(&cmd->se_queue_node); | ||
1071 | spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock, | ||
1072 | flags); | ||
1073 | |||
1074 | pr_err("Releasing ITT: 0x%08x, i_state: %u," | ||
1075 | " t_state: %u directly\n", | ||
1076 | cmd->se_tfo->get_task_tag(cmd), | ||
1077 | cmd->se_tfo->get_cmd_state(cmd), t_state); | ||
1078 | |||
1079 | transport_release_fe_cmd(cmd); | ||
1080 | bug_out = 1; | ||
1081 | |||
1082 | spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags); | ||
1083 | } | ||
1084 | spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock, flags); | ||
1085 | #if 0 | ||
1086 | if (bug_out) | ||
1087 | BUG(); | ||
1088 | #endif | ||
1089 | } | ||
1090 | |||
1091 | void transport_dump_vpd_proto_id( | 991 | void transport_dump_vpd_proto_id( |
1092 | struct t10_vpd *vpd, | 992 | struct t10_vpd *vpd, |
1093 | unsigned char *p_buf, | 993 | unsigned char *p_buf, |
@@ -1573,7 +1473,6 @@ transport_generic_get_task(struct se_cmd *cmd, | |||
1573 | INIT_LIST_HEAD(&task->t_state_list); | 1473 | INIT_LIST_HEAD(&task->t_state_list); |
1574 | init_completion(&task->task_stop_comp); | 1474 | init_completion(&task->task_stop_comp); |
1575 | task->task_se_cmd = cmd; | 1475 | task->task_se_cmd = cmd; |
1576 | task->se_dev = dev; | ||
1577 | task->task_data_direction = data_direction; | 1476 | task->task_data_direction = data_direction; |
1578 | 1477 | ||
1579 | return task; | 1478 | return task; |
@@ -1598,6 +1497,7 @@ void transport_init_se_cmd( | |||
1598 | INIT_LIST_HEAD(&cmd->se_delayed_node); | 1497 | INIT_LIST_HEAD(&cmd->se_delayed_node); |
1599 | INIT_LIST_HEAD(&cmd->se_ordered_node); | 1498 | INIT_LIST_HEAD(&cmd->se_ordered_node); |
1600 | INIT_LIST_HEAD(&cmd->se_qf_node); | 1499 | INIT_LIST_HEAD(&cmd->se_qf_node); |
1500 | INIT_LIST_HEAD(&cmd->se_queue_node); | ||
1601 | 1501 | ||
1602 | INIT_LIST_HEAD(&cmd->t_task_list); | 1502 | INIT_LIST_HEAD(&cmd->t_task_list); |
1603 | init_completion(&cmd->transport_lun_fe_stop_comp); | 1503 | init_completion(&cmd->transport_lun_fe_stop_comp); |
@@ -1641,21 +1541,6 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd) | |||
1641 | return 0; | 1541 | return 0; |
1642 | } | 1542 | } |
1643 | 1543 | ||
1644 | void transport_free_se_cmd( | ||
1645 | struct se_cmd *se_cmd) | ||
1646 | { | ||
1647 | if (se_cmd->se_tmr_req) | ||
1648 | core_tmr_release_req(se_cmd->se_tmr_req); | ||
1649 | /* | ||
1650 | * Check and free any extended CDB buffer that was allocated | ||
1651 | */ | ||
1652 | if (se_cmd->t_task_cdb != se_cmd->__t_task_cdb) | ||
1653 | kfree(se_cmd->t_task_cdb); | ||
1654 | } | ||
1655 | EXPORT_SYMBOL(transport_free_se_cmd); | ||
1656 | |||
1657 | static void transport_generic_wait_for_tasks(struct se_cmd *, int, int); | ||
1658 | |||
1659 | /* transport_generic_allocate_tasks(): | 1544 | /* transport_generic_allocate_tasks(): |
1660 | * | 1545 | * |
1661 | * Called from fabric RX Thread. | 1546 | * Called from fabric RX Thread. |
@@ -1667,12 +1552,6 @@ int transport_generic_allocate_tasks( | |||
1667 | int ret; | 1552 | int ret; |
1668 | 1553 | ||
1669 | transport_generic_prepare_cdb(cdb); | 1554 | transport_generic_prepare_cdb(cdb); |
1670 | |||
1671 | /* | ||
1672 | * This is needed for early exceptions. | ||
1673 | */ | ||
1674 | cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks; | ||
1675 | |||
1676 | /* | 1555 | /* |
1677 | * Ensure that the received CDB is less than the max (252 + 8) bytes | 1556 | * Ensure that the received CDB is less than the max (252 + 8) bytes |
1678 | * for VARIABLE_LENGTH_CMD | 1557 | * for VARIABLE_LENGTH_CMD |
@@ -1730,26 +1609,6 @@ int transport_generic_allocate_tasks( | |||
1730 | EXPORT_SYMBOL(transport_generic_allocate_tasks); | 1609 | EXPORT_SYMBOL(transport_generic_allocate_tasks); |
1731 | 1610 | ||
1732 | /* | 1611 | /* |
1733 | * Used by fabric module frontends not defining a TFO->new_cmd_map() | ||
1734 | * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD statis | ||
1735 | */ | ||
1736 | int transport_generic_handle_cdb( | ||
1737 | struct se_cmd *cmd) | ||
1738 | { | ||
1739 | if (!cmd->se_lun) { | ||
1740 | dump_stack(); | ||
1741 | pr_err("cmd->se_lun is NULL\n"); | ||
1742 | return -EINVAL; | ||
1743 | } | ||
1744 | |||
1745 | transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD); | ||
1746 | return 0; | ||
1747 | } | ||
1748 | EXPORT_SYMBOL(transport_generic_handle_cdb); | ||
1749 | |||
1750 | static void transport_generic_request_failure(struct se_cmd *, | ||
1751 | struct se_device *, int, int); | ||
1752 | /* | ||
1753 | * Used by fabric module frontends to queue tasks directly. | 1612 | * Used by fabric module frontends to queue tasks directly. |
1754 | * Many only be used from process context only | 1613 | * Many only be used from process context only |
1755 | */ | 1614 | */ |
@@ -1773,7 +1632,7 @@ int transport_handle_cdb_direct( | |||
1773 | * Set TRANSPORT_NEW_CMD state and cmd->t_transport_active=1 following | 1632 | * Set TRANSPORT_NEW_CMD state and cmd->t_transport_active=1 following |
1774 | * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue() | 1633 | * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue() |
1775 | * in existing usage to ensure that outstanding descriptors are handled | 1634 | * in existing usage to ensure that outstanding descriptors are handled |
1776 | * correctly during shutdown via transport_generic_wait_for_tasks() | 1635 | * correctly during shutdown via transport_wait_for_tasks() |
1777 | * | 1636 | * |
1778 | * Also, we don't take cmd->t_state_lock here as we only expect | 1637 | * Also, we don't take cmd->t_state_lock here as we only expect |
1779 | * this to be called for initial descriptor submission. | 1638 | * this to be called for initial descriptor submission. |
@@ -1790,7 +1649,7 @@ int transport_handle_cdb_direct( | |||
1790 | return 0; | 1649 | return 0; |
1791 | else if (ret < 0) { | 1650 | else if (ret < 0) { |
1792 | cmd->transport_error_status = ret; | 1651 | cmd->transport_error_status = ret; |
1793 | transport_generic_request_failure(cmd, NULL, 0, | 1652 | transport_generic_request_failure(cmd, 0, |
1794 | (cmd->data_direction != DMA_TO_DEVICE)); | 1653 | (cmd->data_direction != DMA_TO_DEVICE)); |
1795 | } | 1654 | } |
1796 | return 0; | 1655 | return 0; |
@@ -1811,7 +1670,7 @@ int transport_generic_handle_cdb_map( | |||
1811 | return -EINVAL; | 1670 | return -EINVAL; |
1812 | } | 1671 | } |
1813 | 1672 | ||
1814 | transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP); | 1673 | transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP, false); |
1815 | return 0; | 1674 | return 0; |
1816 | } | 1675 | } |
1817 | EXPORT_SYMBOL(transport_generic_handle_cdb_map); | 1676 | EXPORT_SYMBOL(transport_generic_handle_cdb_map); |
@@ -1841,7 +1700,7 @@ int transport_generic_handle_data( | |||
1841 | if (transport_check_aborted_status(cmd, 1) != 0) | 1700 | if (transport_check_aborted_status(cmd, 1) != 0) |
1842 | return 0; | 1701 | return 0; |
1843 | 1702 | ||
1844 | transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE); | 1703 | transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE, false); |
1845 | return 0; | 1704 | return 0; |
1846 | } | 1705 | } |
1847 | EXPORT_SYMBOL(transport_generic_handle_data); | 1706 | EXPORT_SYMBOL(transport_generic_handle_data); |
@@ -1853,12 +1712,7 @@ EXPORT_SYMBOL(transport_generic_handle_data); | |||
1853 | int transport_generic_handle_tmr( | 1712 | int transport_generic_handle_tmr( |
1854 | struct se_cmd *cmd) | 1713 | struct se_cmd *cmd) |
1855 | { | 1714 | { |
1856 | /* | 1715 | transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR, false); |
1857 | * This is needed for early exceptions. | ||
1858 | */ | ||
1859 | cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks; | ||
1860 | |||
1861 | transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR); | ||
1862 | return 0; | 1716 | return 0; |
1863 | } | 1717 | } |
1864 | EXPORT_SYMBOL(transport_generic_handle_tmr); | 1718 | EXPORT_SYMBOL(transport_generic_handle_tmr); |
@@ -1866,10 +1720,36 @@ EXPORT_SYMBOL(transport_generic_handle_tmr); | |||
1866 | void transport_generic_free_cmd_intr( | 1720 | void transport_generic_free_cmd_intr( |
1867 | struct se_cmd *cmd) | 1721 | struct se_cmd *cmd) |
1868 | { | 1722 | { |
1869 | transport_add_cmd_to_queue(cmd, TRANSPORT_FREE_CMD_INTR); | 1723 | transport_add_cmd_to_queue(cmd, TRANSPORT_FREE_CMD_INTR, false); |
1870 | } | 1724 | } |
1871 | EXPORT_SYMBOL(transport_generic_free_cmd_intr); | 1725 | EXPORT_SYMBOL(transport_generic_free_cmd_intr); |
1872 | 1726 | ||
1727 | /* | ||
1728 | * If the task is active, request it to be stopped and sleep until it | ||
1729 | * has completed. | ||
1730 | */ | ||
1731 | bool target_stop_task(struct se_task *task, unsigned long *flags) | ||
1732 | { | ||
1733 | struct se_cmd *cmd = task->task_se_cmd; | ||
1734 | bool was_active = false; | ||
1735 | |||
1736 | if (task->task_flags & TF_ACTIVE) { | ||
1737 | task->task_flags |= TF_REQUEST_STOP; | ||
1738 | spin_unlock_irqrestore(&cmd->t_state_lock, *flags); | ||
1739 | |||
1740 | pr_debug("Task %p waiting to complete\n", task); | ||
1741 | wait_for_completion(&task->task_stop_comp); | ||
1742 | pr_debug("Task %p stopped successfully\n", task); | ||
1743 | |||
1744 | spin_lock_irqsave(&cmd->t_state_lock, *flags); | ||
1745 | atomic_dec(&cmd->t_task_cdbs_left); | ||
1746 | task->task_flags &= ~(TF_ACTIVE | TF_REQUEST_STOP); | ||
1747 | was_active = true; | ||
1748 | } | ||
1749 | |||
1750 | return was_active; | ||
1751 | } | ||
1752 | |||
1873 | static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) | 1753 | static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) |
1874 | { | 1754 | { |
1875 | struct se_task *task, *task_tmp; | 1755 | struct se_task *task, *task_tmp; |
@@ -1885,51 +1765,26 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) | |||
1885 | spin_lock_irqsave(&cmd->t_state_lock, flags); | 1765 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
1886 | list_for_each_entry_safe(task, task_tmp, | 1766 | list_for_each_entry_safe(task, task_tmp, |
1887 | &cmd->t_task_list, t_list) { | 1767 | &cmd->t_task_list, t_list) { |
1888 | pr_debug("task_no[%d] - Processing task %p\n", | 1768 | pr_debug("Processing task %p\n", task); |
1889 | task->task_no, task); | ||
1890 | /* | 1769 | /* |
1891 | * If the struct se_task has not been sent and is not active, | 1770 | * If the struct se_task has not been sent and is not active, |
1892 | * remove the struct se_task from the execution queue. | 1771 | * remove the struct se_task from the execution queue. |
1893 | */ | 1772 | */ |
1894 | if (!atomic_read(&task->task_sent) && | 1773 | if (!(task->task_flags & (TF_ACTIVE | TF_SENT))) { |
1895 | !atomic_read(&task->task_active)) { | ||
1896 | spin_unlock_irqrestore(&cmd->t_state_lock, | 1774 | spin_unlock_irqrestore(&cmd->t_state_lock, |
1897 | flags); | 1775 | flags); |
1898 | transport_remove_task_from_execute_queue(task, | 1776 | transport_remove_task_from_execute_queue(task, |
1899 | task->se_dev); | 1777 | cmd->se_dev); |
1900 | 1778 | ||
1901 | pr_debug("task_no[%d] - Removed from execute queue\n", | 1779 | pr_debug("Task %p removed from execute queue\n", task); |
1902 | task->task_no); | ||
1903 | spin_lock_irqsave(&cmd->t_state_lock, flags); | 1780 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
1904 | continue; | 1781 | continue; |
1905 | } | 1782 | } |
1906 | 1783 | ||
1907 | /* | 1784 | if (!target_stop_task(task, &flags)) { |
1908 | * If the struct se_task is active, sleep until it is returned | 1785 | pr_debug("Task %p - did nothing\n", task); |
1909 | * from the plugin. | ||
1910 | */ | ||
1911 | if (atomic_read(&task->task_active)) { | ||
1912 | atomic_set(&task->task_stop, 1); | ||
1913 | spin_unlock_irqrestore(&cmd->t_state_lock, | ||
1914 | flags); | ||
1915 | |||
1916 | pr_debug("task_no[%d] - Waiting to complete\n", | ||
1917 | task->task_no); | ||
1918 | wait_for_completion(&task->task_stop_comp); | ||
1919 | pr_debug("task_no[%d] - Stopped successfully\n", | ||
1920 | task->task_no); | ||
1921 | |||
1922 | spin_lock_irqsave(&cmd->t_state_lock, flags); | ||
1923 | atomic_dec(&cmd->t_task_cdbs_left); | ||
1924 | |||
1925 | atomic_set(&task->task_active, 0); | ||
1926 | atomic_set(&task->task_stop, 0); | ||
1927 | } else { | ||
1928 | pr_debug("task_no[%d] - Did nothing\n", task->task_no); | ||
1929 | ret++; | 1786 | ret++; |
1930 | } | 1787 | } |
1931 | |||
1932 | __transport_stop_task_timer(task, &flags); | ||
1933 | } | 1788 | } |
1934 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 1789 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
1935 | 1790 | ||
@@ -1941,7 +1796,6 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) | |||
1941 | */ | 1796 | */ |
1942 | static void transport_generic_request_failure( | 1797 | static void transport_generic_request_failure( |
1943 | struct se_cmd *cmd, | 1798 | struct se_cmd *cmd, |
1944 | struct se_device *dev, | ||
1945 | int complete, | 1799 | int complete, |
1946 | int sc) | 1800 | int sc) |
1947 | { | 1801 | { |
@@ -1950,10 +1804,9 @@ static void transport_generic_request_failure( | |||
1950 | pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" | 1804 | pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" |
1951 | " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd), | 1805 | " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd), |
1952 | cmd->t_task_cdb[0]); | 1806 | cmd->t_task_cdb[0]); |
1953 | pr_debug("-----[ i_state: %d t_state/def_t_state:" | 1807 | pr_debug("-----[ i_state: %d t_state: %d transport_error_status: %d\n", |
1954 | " %d/%d transport_error_status: %d\n", | ||
1955 | cmd->se_tfo->get_cmd_state(cmd), | 1808 | cmd->se_tfo->get_cmd_state(cmd), |
1956 | cmd->t_state, cmd->deferred_t_state, | 1809 | cmd->t_state, |
1957 | cmd->transport_error_status); | 1810 | cmd->transport_error_status); |
1958 | pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d" | 1811 | pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d" |
1959 | " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --" | 1812 | " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --" |
@@ -1966,10 +1819,6 @@ static void transport_generic_request_failure( | |||
1966 | atomic_read(&cmd->t_transport_stop), | 1819 | atomic_read(&cmd->t_transport_stop), |
1967 | atomic_read(&cmd->t_transport_sent)); | 1820 | atomic_read(&cmd->t_transport_sent)); |
1968 | 1821 | ||
1969 | transport_stop_all_task_timers(cmd); | ||
1970 | |||
1971 | if (dev) | ||
1972 | atomic_inc(&dev->depth_left); | ||
1973 | /* | 1822 | /* |
1974 | * For SAM Task Attribute emulation for failed struct se_cmd | 1823 | * For SAM Task Attribute emulation for failed struct se_cmd |
1975 | */ | 1824 | */ |
@@ -1977,7 +1826,6 @@ static void transport_generic_request_failure( | |||
1977 | transport_complete_task_attr(cmd); | 1826 | transport_complete_task_attr(cmd); |
1978 | 1827 | ||
1979 | if (complete) { | 1828 | if (complete) { |
1980 | transport_direct_request_timeout(cmd); | ||
1981 | cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE; | 1829 | cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE; |
1982 | } | 1830 | } |
1983 | 1831 | ||
@@ -2076,46 +1924,8 @@ check_stop: | |||
2076 | return; | 1924 | return; |
2077 | 1925 | ||
2078 | queue_full: | 1926 | queue_full: |
2079 | cmd->t_state = TRANSPORT_COMPLETE_OK; | 1927 | cmd->t_state = TRANSPORT_COMPLETE_QF_OK; |
2080 | transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf); | 1928 | transport_handle_queue_full(cmd, cmd->se_dev); |
2081 | } | ||
2082 | |||
2083 | static void transport_direct_request_timeout(struct se_cmd *cmd) | ||
2084 | { | ||
2085 | unsigned long flags; | ||
2086 | |||
2087 | spin_lock_irqsave(&cmd->t_state_lock, flags); | ||
2088 | if (!atomic_read(&cmd->t_transport_timeout)) { | ||
2089 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
2090 | return; | ||
2091 | } | ||
2092 | if (atomic_read(&cmd->t_task_cdbs_timeout_left)) { | ||
2093 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
2094 | return; | ||
2095 | } | ||
2096 | |||
2097 | atomic_sub(atomic_read(&cmd->t_transport_timeout), | ||
2098 | &cmd->t_se_count); | ||
2099 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
2100 | } | ||
2101 | |||
2102 | static void transport_generic_request_timeout(struct se_cmd *cmd) | ||
2103 | { | ||
2104 | unsigned long flags; | ||
2105 | |||
2106 | /* | ||
2107 | * Reset cmd->t_se_count to allow transport_generic_remove() | ||
2108 | * to allow last call to free memory resources. | ||
2109 | */ | ||
2110 | spin_lock_irqsave(&cmd->t_state_lock, flags); | ||
2111 | if (atomic_read(&cmd->t_transport_timeout) > 1) { | ||
2112 | int tmp = (atomic_read(&cmd->t_transport_timeout) - 1); | ||
2113 | |||
2114 | atomic_sub(tmp, &cmd->t_se_count); | ||
2115 | } | ||
2116 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
2117 | |||
2118 | transport_generic_remove(cmd, 0); | ||
2119 | } | 1929 | } |
2120 | 1930 | ||
2121 | static inline u32 transport_lba_21(unsigned char *cdb) | 1931 | static inline u32 transport_lba_21(unsigned char *cdb) |
@@ -2160,127 +1970,6 @@ static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd) | |||
2160 | spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); | 1970 | spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); |
2161 | } | 1971 | } |
2162 | 1972 | ||
2163 | /* | ||
2164 | * Called from interrupt context. | ||
2165 | */ | ||
2166 | static void transport_task_timeout_handler(unsigned long data) | ||
2167 | { | ||
2168 | struct se_task *task = (struct se_task *)data; | ||
2169 | struct se_cmd *cmd = task->task_se_cmd; | ||
2170 | unsigned long flags; | ||
2171 | |||
2172 | pr_debug("transport task timeout fired! task: %p cmd: %p\n", task, cmd); | ||
2173 | |||
2174 | spin_lock_irqsave(&cmd->t_state_lock, flags); | ||
2175 | if (task->task_flags & TF_STOP) { | ||
2176 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
2177 | return; | ||
2178 | } | ||
2179 | task->task_flags &= ~TF_RUNNING; | ||
2180 | |||
2181 | /* | ||
2182 | * Determine if transport_complete_task() has already been called. | ||
2183 | */ | ||
2184 | if (!atomic_read(&task->task_active)) { | ||
2185 | pr_debug("transport task: %p cmd: %p timeout task_active" | ||
2186 | " == 0\n", task, cmd); | ||
2187 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
2188 | return; | ||
2189 | } | ||
2190 | |||
2191 | atomic_inc(&cmd->t_se_count); | ||
2192 | atomic_inc(&cmd->t_transport_timeout); | ||
2193 | cmd->t_tasks_failed = 1; | ||
2194 | |||
2195 | atomic_set(&task->task_timeout, 1); | ||
2196 | task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT; | ||
2197 | task->task_scsi_status = 1; | ||
2198 | |||
2199 | if (atomic_read(&task->task_stop)) { | ||
2200 | pr_debug("transport task: %p cmd: %p timeout task_stop" | ||
2201 | " == 1\n", task, cmd); | ||
2202 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
2203 | complete(&task->task_stop_comp); | ||
2204 | return; | ||
2205 | } | ||
2206 | |||
2207 | if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) { | ||
2208 | pr_debug("transport task: %p cmd: %p timeout non zero" | ||
2209 | " t_task_cdbs_left\n", task, cmd); | ||
2210 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
2211 | return; | ||
2212 | } | ||
2213 | pr_debug("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n", | ||
2214 | task, cmd); | ||
2215 | |||
2216 | cmd->t_state = TRANSPORT_COMPLETE_FAILURE; | ||
2217 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
2218 | |||
2219 | transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE); | ||
2220 | } | ||
2221 | |||
2222 | /* | ||
2223 | * Called with cmd->t_state_lock held. | ||
2224 | */ | ||
2225 | static void transport_start_task_timer(struct se_task *task) | ||
2226 | { | ||
2227 | struct se_device *dev = task->se_dev; | ||
2228 | int timeout; | ||
2229 | |||
2230 | if (task->task_flags & TF_RUNNING) | ||
2231 | return; | ||
2232 | /* | ||
2233 | * If the task_timeout is disabled, exit now. | ||
2234 | */ | ||
2235 | timeout = dev->se_sub_dev->se_dev_attrib.task_timeout; | ||
2236 | if (!timeout) | ||
2237 | return; | ||
2238 | |||
2239 | init_timer(&task->task_timer); | ||
2240 | task->task_timer.expires = (get_jiffies_64() + timeout * HZ); | ||
2241 | task->task_timer.data = (unsigned long) task; | ||
2242 | task->task_timer.function = transport_task_timeout_handler; | ||
2243 | |||
2244 | task->task_flags |= TF_RUNNING; | ||
2245 | add_timer(&task->task_timer); | ||
2246 | #if 0 | ||
2247 | pr_debug("Starting task timer for cmd: %p task: %p seconds:" | ||
2248 | " %d\n", task->task_se_cmd, task, timeout); | ||
2249 | #endif | ||
2250 | } | ||
2251 | |||
2252 | /* | ||
2253 | * Called with spin_lock_irq(&cmd->t_state_lock) held. | ||
2254 | */ | ||
2255 | void __transport_stop_task_timer(struct se_task *task, unsigned long *flags) | ||
2256 | { | ||
2257 | struct se_cmd *cmd = task->task_se_cmd; | ||
2258 | |||
2259 | if (!task->task_flags & TF_RUNNING) | ||
2260 | return; | ||
2261 | |||
2262 | task->task_flags |= TF_STOP; | ||
2263 | spin_unlock_irqrestore(&cmd->t_state_lock, *flags); | ||
2264 | |||
2265 | del_timer_sync(&task->task_timer); | ||
2266 | |||
2267 | spin_lock_irqsave(&cmd->t_state_lock, *flags); | ||
2268 | task->task_flags &= ~TF_RUNNING; | ||
2269 | task->task_flags &= ~TF_STOP; | ||
2270 | } | ||
2271 | |||
2272 | static void transport_stop_all_task_timers(struct se_cmd *cmd) | ||
2273 | { | ||
2274 | struct se_task *task = NULL, *task_tmp; | ||
2275 | unsigned long flags; | ||
2276 | |||
2277 | spin_lock_irqsave(&cmd->t_state_lock, flags); | ||
2278 | list_for_each_entry_safe(task, task_tmp, | ||
2279 | &cmd->t_task_list, t_list) | ||
2280 | __transport_stop_task_timer(task, &flags); | ||
2281 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
2282 | } | ||
2283 | |||
2284 | static inline int transport_tcq_window_closed(struct se_device *dev) | 1973 | static inline int transport_tcq_window_closed(struct se_device *dev) |
2285 | { | 1974 | { |
2286 | if (dev->dev_tcq_window_closed++ < | 1975 | if (dev->dev_tcq_window_closed++ < |
@@ -2385,7 +2074,7 @@ static int transport_execute_tasks(struct se_cmd *cmd) | |||
2385 | 2074 | ||
2386 | if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) { | 2075 | if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) { |
2387 | cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE; | 2076 | cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE; |
2388 | transport_generic_request_failure(cmd, NULL, 0, 1); | 2077 | transport_generic_request_failure(cmd, 0, 1); |
2389 | return 0; | 2078 | return 0; |
2390 | } | 2079 | } |
2391 | 2080 | ||
@@ -2448,9 +2137,7 @@ check_depth: | |||
2448 | } | 2137 | } |
2449 | task = list_first_entry(&dev->execute_task_list, | 2138 | task = list_first_entry(&dev->execute_task_list, |
2450 | struct se_task, t_execute_list); | 2139 | struct se_task, t_execute_list); |
2451 | list_del(&task->t_execute_list); | 2140 | __transport_remove_task_from_execute_queue(task, dev); |
2452 | atomic_set(&task->task_execute_queue, 0); | ||
2453 | atomic_dec(&dev->execute_tasks); | ||
2454 | spin_unlock_irq(&dev->execute_task_lock); | 2141 | spin_unlock_irq(&dev->execute_task_lock); |
2455 | 2142 | ||
2456 | atomic_dec(&dev->depth_left); | 2143 | atomic_dec(&dev->depth_left); |
@@ -2458,15 +2145,13 @@ check_depth: | |||
2458 | cmd = task->task_se_cmd; | 2145 | cmd = task->task_se_cmd; |
2459 | 2146 | ||
2460 | spin_lock_irqsave(&cmd->t_state_lock, flags); | 2147 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
2461 | atomic_set(&task->task_active, 1); | 2148 | task->task_flags |= (TF_ACTIVE | TF_SENT); |
2462 | atomic_set(&task->task_sent, 1); | ||
2463 | atomic_inc(&cmd->t_task_cdbs_sent); | 2149 | atomic_inc(&cmd->t_task_cdbs_sent); |
2464 | 2150 | ||
2465 | if (atomic_read(&cmd->t_task_cdbs_sent) == | 2151 | if (atomic_read(&cmd->t_task_cdbs_sent) == |
2466 | cmd->t_task_list_num) | 2152 | cmd->t_task_list_num) |
2467 | atomic_set(&cmd->transport_sent, 1); | 2153 | atomic_set(&cmd->t_transport_sent, 1); |
2468 | 2154 | ||
2469 | transport_start_task_timer(task); | ||
2470 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 2155 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
2471 | /* | 2156 | /* |
2472 | * The struct se_cmd->transport_emulate_cdb() function pointer is used | 2157 | * The struct se_cmd->transport_emulate_cdb() function pointer is used |
@@ -2477,10 +2162,13 @@ check_depth: | |||
2477 | error = cmd->transport_emulate_cdb(cmd); | 2162 | error = cmd->transport_emulate_cdb(cmd); |
2478 | if (error != 0) { | 2163 | if (error != 0) { |
2479 | cmd->transport_error_status = error; | 2164 | cmd->transport_error_status = error; |
2480 | atomic_set(&task->task_active, 0); | 2165 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
2481 | atomic_set(&cmd->transport_sent, 0); | 2166 | task->task_flags &= ~TF_ACTIVE; |
2167 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
2168 | atomic_set(&cmd->t_transport_sent, 0); | ||
2482 | transport_stop_tasks_for_cmd(cmd); | 2169 | transport_stop_tasks_for_cmd(cmd); |
2483 | transport_generic_request_failure(cmd, dev, 0, 1); | 2170 | atomic_inc(&dev->depth_left); |
2171 | transport_generic_request_failure(cmd, 0, 1); | ||
2484 | goto check_depth; | 2172 | goto check_depth; |
2485 | } | 2173 | } |
2486 | /* | 2174 | /* |
@@ -2513,10 +2201,13 @@ check_depth: | |||
2513 | 2201 | ||
2514 | if (error != 0) { | 2202 | if (error != 0) { |
2515 | cmd->transport_error_status = error; | 2203 | cmd->transport_error_status = error; |
2516 | atomic_set(&task->task_active, 0); | 2204 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
2517 | atomic_set(&cmd->transport_sent, 0); | 2205 | task->task_flags &= ~TF_ACTIVE; |
2206 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
2207 | atomic_set(&cmd->t_transport_sent, 0); | ||
2518 | transport_stop_tasks_for_cmd(cmd); | 2208 | transport_stop_tasks_for_cmd(cmd); |
2519 | transport_generic_request_failure(cmd, dev, 0, 1); | 2209 | atomic_inc(&dev->depth_left); |
2210 | transport_generic_request_failure(cmd, 0, 1); | ||
2520 | } | 2211 | } |
2521 | } | 2212 | } |
2522 | 2213 | ||
@@ -2538,8 +2229,6 @@ void transport_new_cmd_failure(struct se_cmd *se_cmd) | |||
2538 | spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); | 2229 | spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); |
2539 | } | 2230 | } |
2540 | 2231 | ||
2541 | static void transport_nop_wait_for_tasks(struct se_cmd *, int, int); | ||
2542 | |||
2543 | static inline u32 transport_get_sectors_6( | 2232 | static inline u32 transport_get_sectors_6( |
2544 | unsigned char *cdb, | 2233 | unsigned char *cdb, |
2545 | struct se_cmd *cmd, | 2234 | struct se_cmd *cmd, |
@@ -2752,13 +2441,16 @@ out: | |||
2752 | static int transport_get_sense_data(struct se_cmd *cmd) | 2441 | static int transport_get_sense_data(struct se_cmd *cmd) |
2753 | { | 2442 | { |
2754 | unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL; | 2443 | unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL; |
2755 | struct se_device *dev; | 2444 | struct se_device *dev = cmd->se_dev; |
2756 | struct se_task *task = NULL, *task_tmp; | 2445 | struct se_task *task = NULL, *task_tmp; |
2757 | unsigned long flags; | 2446 | unsigned long flags; |
2758 | u32 offset = 0; | 2447 | u32 offset = 0; |
2759 | 2448 | ||
2760 | WARN_ON(!cmd->se_lun); | 2449 | WARN_ON(!cmd->se_lun); |
2761 | 2450 | ||
2451 | if (!dev) | ||
2452 | return 0; | ||
2453 | |||
2762 | spin_lock_irqsave(&cmd->t_state_lock, flags); | 2454 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
2763 | if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { | 2455 | if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { |
2764 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 2456 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
@@ -2767,14 +2459,9 @@ static int transport_get_sense_data(struct se_cmd *cmd) | |||
2767 | 2459 | ||
2768 | list_for_each_entry_safe(task, task_tmp, | 2460 | list_for_each_entry_safe(task, task_tmp, |
2769 | &cmd->t_task_list, t_list) { | 2461 | &cmd->t_task_list, t_list) { |
2770 | |||
2771 | if (!task->task_sense) | 2462 | if (!task->task_sense) |
2772 | continue; | 2463 | continue; |
2773 | 2464 | ||
2774 | dev = task->se_dev; | ||
2775 | if (!dev) | ||
2776 | continue; | ||
2777 | |||
2778 | if (!dev->transport->get_sense_buffer) { | 2465 | if (!dev->transport->get_sense_buffer) { |
2779 | pr_err("dev->transport->get_sense_buffer" | 2466 | pr_err("dev->transport->get_sense_buffer" |
2780 | " is NULL\n"); | 2467 | " is NULL\n"); |
@@ -2783,9 +2470,9 @@ static int transport_get_sense_data(struct se_cmd *cmd) | |||
2783 | 2470 | ||
2784 | sense_buffer = dev->transport->get_sense_buffer(task); | 2471 | sense_buffer = dev->transport->get_sense_buffer(task); |
2785 | if (!sense_buffer) { | 2472 | if (!sense_buffer) { |
2786 | pr_err("ITT[0x%08x]_TASK[%d]: Unable to locate" | 2473 | pr_err("ITT[0x%08x]_TASK[%p]: Unable to locate" |
2787 | " sense buffer for task with sense\n", | 2474 | " sense buffer for task with sense\n", |
2788 | cmd->se_tfo->get_task_tag(cmd), task->task_no); | 2475 | cmd->se_tfo->get_task_tag(cmd), task); |
2789 | continue; | 2476 | continue; |
2790 | } | 2477 | } |
2791 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 2478 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
@@ -2814,7 +2501,6 @@ static int transport_get_sense_data(struct se_cmd *cmd) | |||
2814 | static int | 2501 | static int |
2815 | transport_handle_reservation_conflict(struct se_cmd *cmd) | 2502 | transport_handle_reservation_conflict(struct se_cmd *cmd) |
2816 | { | 2503 | { |
2817 | cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; | ||
2818 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 2504 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
2819 | cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT; | 2505 | cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT; |
2820 | cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; | 2506 | cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; |
@@ -2915,8 +2601,6 @@ static int transport_generic_cmd_sequencer( | |||
2915 | * Check for an existing UNIT ATTENTION condition | 2601 | * Check for an existing UNIT ATTENTION condition |
2916 | */ | 2602 | */ |
2917 | if (core_scsi3_ua_check(cmd, cdb) < 0) { | 2603 | if (core_scsi3_ua_check(cmd, cdb) < 0) { |
2918 | cmd->transport_wait_for_tasks = | ||
2919 | &transport_nop_wait_for_tasks; | ||
2920 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 2604 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
2921 | cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION; | 2605 | cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION; |
2922 | return -EINVAL; | 2606 | return -EINVAL; |
@@ -2926,7 +2610,6 @@ static int transport_generic_cmd_sequencer( | |||
2926 | */ | 2610 | */ |
2927 | ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq); | 2611 | ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq); |
2928 | if (ret != 0) { | 2612 | if (ret != 0) { |
2929 | cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; | ||
2930 | /* | 2613 | /* |
2931 | * Set SCSI additional sense code (ASC) to 'LUN Not Accessible'; | 2614 | * Set SCSI additional sense code (ASC) to 'LUN Not Accessible'; |
2932 | * The ALUA additional sense code qualifier (ASCQ) is determined | 2615 | * The ALUA additional sense code qualifier (ASCQ) is determined |
@@ -2965,7 +2648,6 @@ static int transport_generic_cmd_sequencer( | |||
2965 | if (sector_ret) | 2648 | if (sector_ret) |
2966 | goto out_unsupported_cdb; | 2649 | goto out_unsupported_cdb; |
2967 | size = transport_get_size(sectors, cdb, cmd); | 2650 | size = transport_get_size(sectors, cdb, cmd); |
2968 | cmd->transport_split_cdb = &split_cdb_XX_6; | ||
2969 | cmd->t_task_lba = transport_lba_21(cdb); | 2651 | cmd->t_task_lba = transport_lba_21(cdb); |
2970 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | 2652 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
2971 | break; | 2653 | break; |
@@ -2974,7 +2656,6 @@ static int transport_generic_cmd_sequencer( | |||
2974 | if (sector_ret) | 2656 | if (sector_ret) |
2975 | goto out_unsupported_cdb; | 2657 | goto out_unsupported_cdb; |
2976 | size = transport_get_size(sectors, cdb, cmd); | 2658 | size = transport_get_size(sectors, cdb, cmd); |
2977 | cmd->transport_split_cdb = &split_cdb_XX_10; | ||
2978 | cmd->t_task_lba = transport_lba_32(cdb); | 2659 | cmd->t_task_lba = transport_lba_32(cdb); |
2979 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | 2660 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
2980 | break; | 2661 | break; |
@@ -2983,7 +2664,6 @@ static int transport_generic_cmd_sequencer( | |||
2983 | if (sector_ret) | 2664 | if (sector_ret) |
2984 | goto out_unsupported_cdb; | 2665 | goto out_unsupported_cdb; |
2985 | size = transport_get_size(sectors, cdb, cmd); | 2666 | size = transport_get_size(sectors, cdb, cmd); |
2986 | cmd->transport_split_cdb = &split_cdb_XX_12; | ||
2987 | cmd->t_task_lba = transport_lba_32(cdb); | 2667 | cmd->t_task_lba = transport_lba_32(cdb); |
2988 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | 2668 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
2989 | break; | 2669 | break; |
@@ -2992,7 +2672,6 @@ static int transport_generic_cmd_sequencer( | |||
2992 | if (sector_ret) | 2672 | if (sector_ret) |
2993 | goto out_unsupported_cdb; | 2673 | goto out_unsupported_cdb; |
2994 | size = transport_get_size(sectors, cdb, cmd); | 2674 | size = transport_get_size(sectors, cdb, cmd); |
2995 | cmd->transport_split_cdb = &split_cdb_XX_16; | ||
2996 | cmd->t_task_lba = transport_lba_64(cdb); | 2675 | cmd->t_task_lba = transport_lba_64(cdb); |
2997 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | 2676 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
2998 | break; | 2677 | break; |
@@ -3001,7 +2680,6 @@ static int transport_generic_cmd_sequencer( | |||
3001 | if (sector_ret) | 2680 | if (sector_ret) |
3002 | goto out_unsupported_cdb; | 2681 | goto out_unsupported_cdb; |
3003 | size = transport_get_size(sectors, cdb, cmd); | 2682 | size = transport_get_size(sectors, cdb, cmd); |
3004 | cmd->transport_split_cdb = &split_cdb_XX_6; | ||
3005 | cmd->t_task_lba = transport_lba_21(cdb); | 2683 | cmd->t_task_lba = transport_lba_21(cdb); |
3006 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | 2684 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3007 | break; | 2685 | break; |
@@ -3010,7 +2688,6 @@ static int transport_generic_cmd_sequencer( | |||
3010 | if (sector_ret) | 2688 | if (sector_ret) |
3011 | goto out_unsupported_cdb; | 2689 | goto out_unsupported_cdb; |
3012 | size = transport_get_size(sectors, cdb, cmd); | 2690 | size = transport_get_size(sectors, cdb, cmd); |
3013 | cmd->transport_split_cdb = &split_cdb_XX_10; | ||
3014 | cmd->t_task_lba = transport_lba_32(cdb); | 2691 | cmd->t_task_lba = transport_lba_32(cdb); |
3015 | cmd->t_tasks_fua = (cdb[1] & 0x8); | 2692 | cmd->t_tasks_fua = (cdb[1] & 0x8); |
3016 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | 2693 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
@@ -3020,7 +2697,6 @@ static int transport_generic_cmd_sequencer( | |||
3020 | if (sector_ret) | 2697 | if (sector_ret) |
3021 | goto out_unsupported_cdb; | 2698 | goto out_unsupported_cdb; |
3022 | size = transport_get_size(sectors, cdb, cmd); | 2699 | size = transport_get_size(sectors, cdb, cmd); |
3023 | cmd->transport_split_cdb = &split_cdb_XX_12; | ||
3024 | cmd->t_task_lba = transport_lba_32(cdb); | 2700 | cmd->t_task_lba = transport_lba_32(cdb); |
3025 | cmd->t_tasks_fua = (cdb[1] & 0x8); | 2701 | cmd->t_tasks_fua = (cdb[1] & 0x8); |
3026 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | 2702 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
@@ -3030,7 +2706,6 @@ static int transport_generic_cmd_sequencer( | |||
3030 | if (sector_ret) | 2706 | if (sector_ret) |
3031 | goto out_unsupported_cdb; | 2707 | goto out_unsupported_cdb; |
3032 | size = transport_get_size(sectors, cdb, cmd); | 2708 | size = transport_get_size(sectors, cdb, cmd); |
3033 | cmd->transport_split_cdb = &split_cdb_XX_16; | ||
3034 | cmd->t_task_lba = transport_lba_64(cdb); | 2709 | cmd->t_task_lba = transport_lba_64(cdb); |
3035 | cmd->t_tasks_fua = (cdb[1] & 0x8); | 2710 | cmd->t_tasks_fua = (cdb[1] & 0x8); |
3036 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | 2711 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
@@ -3043,18 +2718,14 @@ static int transport_generic_cmd_sequencer( | |||
3043 | if (sector_ret) | 2718 | if (sector_ret) |
3044 | goto out_unsupported_cdb; | 2719 | goto out_unsupported_cdb; |
3045 | size = transport_get_size(sectors, cdb, cmd); | 2720 | size = transport_get_size(sectors, cdb, cmd); |
3046 | cmd->transport_split_cdb = &split_cdb_XX_10; | ||
3047 | cmd->t_task_lba = transport_lba_32(cdb); | 2721 | cmd->t_task_lba = transport_lba_32(cdb); |
3048 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | 2722 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3049 | passthrough = (dev->transport->transport_type == | 2723 | |
3050 | TRANSPORT_PLUGIN_PHBA_PDEV); | 2724 | if (dev->transport->transport_type == |
3051 | /* | 2725 | TRANSPORT_PLUGIN_PHBA_PDEV) |
3052 | * Skip the remaining assignments for TCM/PSCSI passthrough | 2726 | goto out_unsupported_cdb; |
3053 | */ | ||
3054 | if (passthrough) | ||
3055 | break; | ||
3056 | /* | 2727 | /* |
3057 | * Setup BIDI XOR callback to be run during transport_generic_complete_ok() | 2728 | * Setup BIDI XOR callback to be run after I/O completion. |
3058 | */ | 2729 | */ |
3059 | cmd->transport_complete_callback = &transport_xor_callback; | 2730 | cmd->transport_complete_callback = &transport_xor_callback; |
3060 | cmd->t_tasks_fua = (cdb[1] & 0x8); | 2731 | cmd->t_tasks_fua = (cdb[1] & 0x8); |
@@ -3078,19 +2749,14 @@ static int transport_generic_cmd_sequencer( | |||
3078 | * Use WRITE_32 and READ_32 opcodes for the emulated | 2749 | * Use WRITE_32 and READ_32 opcodes for the emulated |
3079 | * XDWRITE_READ_32 logic. | 2750 | * XDWRITE_READ_32 logic. |
3080 | */ | 2751 | */ |
3081 | cmd->transport_split_cdb = &split_cdb_XX_32; | ||
3082 | cmd->t_task_lba = transport_lba_64_ext(cdb); | 2752 | cmd->t_task_lba = transport_lba_64_ext(cdb); |
3083 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | 2753 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3084 | 2754 | ||
3085 | /* | ||
3086 | * Skip the remaining assignments for TCM/PSCSI passthrough | ||
3087 | */ | ||
3088 | if (passthrough) | 2755 | if (passthrough) |
3089 | break; | 2756 | goto out_unsupported_cdb; |
3090 | |||
3091 | /* | 2757 | /* |
3092 | * Setup BIDI XOR callback to be run during | 2758 | * Setup BIDI XOR callback to be run during after I/O |
3093 | * transport_generic_complete_ok() | 2759 | * completion. |
3094 | */ | 2760 | */ |
3095 | cmd->transport_complete_callback = &transport_xor_callback; | 2761 | cmd->transport_complete_callback = &transport_xor_callback; |
3096 | cmd->t_tasks_fua = (cdb[10] & 0x8); | 2762 | cmd->t_tasks_fua = (cdb[10] & 0x8); |
@@ -3430,7 +3096,6 @@ static int transport_generic_cmd_sequencer( | |||
3430 | pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode" | 3096 | pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode" |
3431 | " 0x%02x, sending CHECK_CONDITION.\n", | 3097 | " 0x%02x, sending CHECK_CONDITION.\n", |
3432 | cmd->se_tfo->get_fabric_name(), cdb[0]); | 3098 | cmd->se_tfo->get_fabric_name(), cdb[0]); |
3433 | cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; | ||
3434 | goto out_unsupported_cdb; | 3099 | goto out_unsupported_cdb; |
3435 | } | 3100 | } |
3436 | 3101 | ||
@@ -3488,8 +3153,7 @@ out_invalid_cdb_field: | |||
3488 | } | 3153 | } |
3489 | 3154 | ||
3490 | /* | 3155 | /* |
3491 | * Called from transport_generic_complete_ok() and | 3156 | * Called from I/O completion to determine which dormant/delayed |
3492 | * transport_generic_request_failure() to determine which dormant/delayed | ||
3493 | * and ordered cmds need to have their tasks added to the execution queue. | 3157 | * and ordered cmds need to have their tasks added to the execution queue. |
3494 | */ | 3158 | */ |
3495 | static void transport_complete_task_attr(struct se_cmd *cmd) | 3159 | static void transport_complete_task_attr(struct se_cmd *cmd) |
@@ -3557,12 +3221,18 @@ static void transport_complete_task_attr(struct se_cmd *cmd) | |||
3557 | wake_up_interruptible(&dev->dev_queue_obj.thread_wq); | 3221 | wake_up_interruptible(&dev->dev_queue_obj.thread_wq); |
3558 | } | 3222 | } |
3559 | 3223 | ||
3560 | static int transport_complete_qf(struct se_cmd *cmd) | 3224 | static void transport_complete_qf(struct se_cmd *cmd) |
3561 | { | 3225 | { |
3562 | int ret = 0; | 3226 | int ret = 0; |
3563 | 3227 | ||
3564 | if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) | 3228 | if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) |
3565 | return cmd->se_tfo->queue_status(cmd); | 3229 | transport_complete_task_attr(cmd); |
3230 | |||
3231 | if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { | ||
3232 | ret = cmd->se_tfo->queue_status(cmd); | ||
3233 | if (ret) | ||
3234 | goto out; | ||
3235 | } | ||
3566 | 3236 | ||
3567 | switch (cmd->data_direction) { | 3237 | switch (cmd->data_direction) { |
3568 | case DMA_FROM_DEVICE: | 3238 | case DMA_FROM_DEVICE: |
@@ -3572,7 +3242,7 @@ static int transport_complete_qf(struct se_cmd *cmd) | |||
3572 | if (cmd->t_bidi_data_sg) { | 3242 | if (cmd->t_bidi_data_sg) { |
3573 | ret = cmd->se_tfo->queue_data_in(cmd); | 3243 | ret = cmd->se_tfo->queue_data_in(cmd); |
3574 | if (ret < 0) | 3244 | if (ret < 0) |
3575 | return ret; | 3245 | break; |
3576 | } | 3246 | } |
3577 | /* Fall through for DMA_TO_DEVICE */ | 3247 | /* Fall through for DMA_TO_DEVICE */ |
3578 | case DMA_NONE: | 3248 | case DMA_NONE: |
@@ -3582,17 +3252,20 @@ static int transport_complete_qf(struct se_cmd *cmd) | |||
3582 | break; | 3252 | break; |
3583 | } | 3253 | } |
3584 | 3254 | ||
3585 | return ret; | 3255 | out: |
3256 | if (ret < 0) { | ||
3257 | transport_handle_queue_full(cmd, cmd->se_dev); | ||
3258 | return; | ||
3259 | } | ||
3260 | transport_lun_remove_cmd(cmd); | ||
3261 | transport_cmd_check_stop_to_fabric(cmd); | ||
3586 | } | 3262 | } |
3587 | 3263 | ||
3588 | static void transport_handle_queue_full( | 3264 | static void transport_handle_queue_full( |
3589 | struct se_cmd *cmd, | 3265 | struct se_cmd *cmd, |
3590 | struct se_device *dev, | 3266 | struct se_device *dev) |
3591 | int (*qf_callback)(struct se_cmd *)) | ||
3592 | { | 3267 | { |
3593 | spin_lock_irq(&dev->qf_cmd_lock); | 3268 | spin_lock_irq(&dev->qf_cmd_lock); |
3594 | cmd->se_cmd_flags |= SCF_EMULATE_QUEUE_FULL; | ||
3595 | cmd->transport_qf_callback = qf_callback; | ||
3596 | list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); | 3269 | list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); |
3597 | atomic_inc(&dev->dev_qf_count); | 3270 | atomic_inc(&dev->dev_qf_count); |
3598 | smp_mb__after_atomic_inc(); | 3271 | smp_mb__after_atomic_inc(); |
@@ -3601,9 +3274,11 @@ static void transport_handle_queue_full( | |||
3601 | schedule_work(&cmd->se_dev->qf_work_queue); | 3274 | schedule_work(&cmd->se_dev->qf_work_queue); |
3602 | } | 3275 | } |
3603 | 3276 | ||
3604 | static void transport_generic_complete_ok(struct se_cmd *cmd) | 3277 | static void target_complete_ok_work(struct work_struct *work) |
3605 | { | 3278 | { |
3279 | struct se_cmd *cmd = container_of(work, struct se_cmd, work); | ||
3606 | int reason = 0, ret; | 3280 | int reason = 0, ret; |
3281 | |||
3607 | /* | 3282 | /* |
3608 | * Check if we need to move delayed/dormant tasks from cmds on the | 3283 | * Check if we need to move delayed/dormant tasks from cmds on the |
3609 | * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task | 3284 | * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task |
@@ -3618,14 +3293,6 @@ static void transport_generic_complete_ok(struct se_cmd *cmd) | |||
3618 | if (atomic_read(&cmd->se_dev->dev_qf_count) != 0) | 3293 | if (atomic_read(&cmd->se_dev->dev_qf_count) != 0) |
3619 | schedule_work(&cmd->se_dev->qf_work_queue); | 3294 | schedule_work(&cmd->se_dev->qf_work_queue); |
3620 | 3295 | ||
3621 | if (cmd->transport_qf_callback) { | ||
3622 | ret = cmd->transport_qf_callback(cmd); | ||
3623 | if (ret < 0) | ||
3624 | goto queue_full; | ||
3625 | |||
3626 | cmd->transport_qf_callback = NULL; | ||
3627 | goto done; | ||
3628 | } | ||
3629 | /* | 3296 | /* |
3630 | * Check if we need to retrieve a sense buffer from | 3297 | * Check if we need to retrieve a sense buffer from |
3631 | * the struct se_cmd in question. | 3298 | * the struct se_cmd in question. |
@@ -3701,7 +3368,6 @@ static void transport_generic_complete_ok(struct se_cmd *cmd) | |||
3701 | break; | 3368 | break; |
3702 | } | 3369 | } |
3703 | 3370 | ||
3704 | done: | ||
3705 | transport_lun_remove_cmd(cmd); | 3371 | transport_lun_remove_cmd(cmd); |
3706 | transport_cmd_check_stop_to_fabric(cmd); | 3372 | transport_cmd_check_stop_to_fabric(cmd); |
3707 | return; | 3373 | return; |
@@ -3709,34 +3375,35 @@ done: | |||
3709 | queue_full: | 3375 | queue_full: |
3710 | pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," | 3376 | pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," |
3711 | " data_direction: %d\n", cmd, cmd->data_direction); | 3377 | " data_direction: %d\n", cmd, cmd->data_direction); |
3712 | transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf); | 3378 | cmd->t_state = TRANSPORT_COMPLETE_QF_OK; |
3379 | transport_handle_queue_full(cmd, cmd->se_dev); | ||
3713 | } | 3380 | } |
3714 | 3381 | ||
3715 | static void transport_free_dev_tasks(struct se_cmd *cmd) | 3382 | static void transport_free_dev_tasks(struct se_cmd *cmd) |
3716 | { | 3383 | { |
3717 | struct se_task *task, *task_tmp; | 3384 | struct se_task *task, *task_tmp; |
3718 | unsigned long flags; | 3385 | unsigned long flags; |
3386 | LIST_HEAD(dispose_list); | ||
3719 | 3387 | ||
3720 | spin_lock_irqsave(&cmd->t_state_lock, flags); | 3388 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
3721 | list_for_each_entry_safe(task, task_tmp, | 3389 | list_for_each_entry_safe(task, task_tmp, |
3722 | &cmd->t_task_list, t_list) { | 3390 | &cmd->t_task_list, t_list) { |
3723 | if (atomic_read(&task->task_active)) | 3391 | if (!(task->task_flags & TF_ACTIVE)) |
3724 | continue; | 3392 | list_move_tail(&task->t_list, &dispose_list); |
3393 | } | ||
3394 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
3395 | |||
3396 | while (!list_empty(&dispose_list)) { | ||
3397 | task = list_first_entry(&dispose_list, struct se_task, t_list); | ||
3725 | 3398 | ||
3726 | kfree(task->task_sg_bidi); | 3399 | if (task->task_sg != cmd->t_data_sg && |
3727 | kfree(task->task_sg); | 3400 | task->task_sg != cmd->t_bidi_data_sg) |
3401 | kfree(task->task_sg); | ||
3728 | 3402 | ||
3729 | list_del(&task->t_list); | 3403 | list_del(&task->t_list); |
3730 | 3404 | ||
3731 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 3405 | cmd->se_dev->transport->free_task(task); |
3732 | if (task->se_dev) | ||
3733 | task->se_dev->transport->free_task(task); | ||
3734 | else | ||
3735 | pr_err("task[%u] - task->se_dev is NULL\n", | ||
3736 | task->task_no); | ||
3737 | spin_lock_irqsave(&cmd->t_state_lock, flags); | ||
3738 | } | 3406 | } |
3739 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
3740 | } | 3407 | } |
3741 | 3408 | ||
3742 | static inline void transport_free_sgl(struct scatterlist *sgl, int nents) | 3409 | static inline void transport_free_sgl(struct scatterlist *sgl, int nents) |
@@ -3764,89 +3431,43 @@ static inline void transport_free_pages(struct se_cmd *cmd) | |||
3764 | cmd->t_bidi_data_nents = 0; | 3431 | cmd->t_bidi_data_nents = 0; |
3765 | } | 3432 | } |
3766 | 3433 | ||
3767 | static inline void transport_release_tasks(struct se_cmd *cmd) | 3434 | /** |
3768 | { | 3435 | * transport_put_cmd - release a reference to a command |
3769 | transport_free_dev_tasks(cmd); | 3436 | * @cmd: command to release |
3770 | } | 3437 | * |
3771 | 3438 | * This routine releases our reference to the command and frees it if possible. | |
3772 | static inline int transport_dec_and_check(struct se_cmd *cmd) | 3439 | */ |
3440 | static void transport_put_cmd(struct se_cmd *cmd) | ||
3773 | { | 3441 | { |
3774 | unsigned long flags; | 3442 | unsigned long flags; |
3443 | int free_tasks = 0; | ||
3775 | 3444 | ||
3776 | spin_lock_irqsave(&cmd->t_state_lock, flags); | 3445 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
3777 | if (atomic_read(&cmd->t_fe_count)) { | 3446 | if (atomic_read(&cmd->t_fe_count)) { |
3778 | if (!atomic_dec_and_test(&cmd->t_fe_count)) { | 3447 | if (!atomic_dec_and_test(&cmd->t_fe_count)) |
3779 | spin_unlock_irqrestore(&cmd->t_state_lock, | 3448 | goto out_busy; |
3780 | flags); | ||
3781 | return 1; | ||
3782 | } | ||
3783 | } | 3449 | } |
3784 | 3450 | ||
3785 | if (atomic_read(&cmd->t_se_count)) { | 3451 | if (atomic_read(&cmd->t_se_count)) { |
3786 | if (!atomic_dec_and_test(&cmd->t_se_count)) { | 3452 | if (!atomic_dec_and_test(&cmd->t_se_count)) |
3787 | spin_unlock_irqrestore(&cmd->t_state_lock, | 3453 | goto out_busy; |
3788 | flags); | ||
3789 | return 1; | ||
3790 | } | ||
3791 | } | 3454 | } |
3792 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
3793 | 3455 | ||
3794 | return 0; | 3456 | if (atomic_read(&cmd->transport_dev_active)) { |
3795 | } | 3457 | atomic_set(&cmd->transport_dev_active, 0); |
3796 | 3458 | transport_all_task_dev_remove_state(cmd); | |
3797 | static void transport_release_fe_cmd(struct se_cmd *cmd) | 3459 | free_tasks = 1; |
3798 | { | ||
3799 | unsigned long flags; | ||
3800 | |||
3801 | if (transport_dec_and_check(cmd)) | ||
3802 | return; | ||
3803 | |||
3804 | spin_lock_irqsave(&cmd->t_state_lock, flags); | ||
3805 | if (!atomic_read(&cmd->transport_dev_active)) { | ||
3806 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
3807 | goto free_pages; | ||
3808 | } | ||
3809 | atomic_set(&cmd->transport_dev_active, 0); | ||
3810 | transport_all_task_dev_remove_state(cmd); | ||
3811 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
3812 | |||
3813 | transport_release_tasks(cmd); | ||
3814 | free_pages: | ||
3815 | transport_free_pages(cmd); | ||
3816 | transport_free_se_cmd(cmd); | ||
3817 | cmd->se_tfo->release_cmd(cmd); | ||
3818 | } | ||
3819 | |||
3820 | static int | ||
3821 | transport_generic_remove(struct se_cmd *cmd, int session_reinstatement) | ||
3822 | { | ||
3823 | unsigned long flags; | ||
3824 | |||
3825 | if (transport_dec_and_check(cmd)) { | ||
3826 | if (session_reinstatement) { | ||
3827 | spin_lock_irqsave(&cmd->t_state_lock, flags); | ||
3828 | transport_all_task_dev_remove_state(cmd); | ||
3829 | spin_unlock_irqrestore(&cmd->t_state_lock, | ||
3830 | flags); | ||
3831 | } | ||
3832 | return 1; | ||
3833 | } | ||
3834 | |||
3835 | spin_lock_irqsave(&cmd->t_state_lock, flags); | ||
3836 | if (!atomic_read(&cmd->transport_dev_active)) { | ||
3837 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
3838 | goto free_pages; | ||
3839 | } | 3460 | } |
3840 | atomic_set(&cmd->transport_dev_active, 0); | ||
3841 | transport_all_task_dev_remove_state(cmd); | ||
3842 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 3461 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
3843 | 3462 | ||
3844 | transport_release_tasks(cmd); | 3463 | if (free_tasks != 0) |
3464 | transport_free_dev_tasks(cmd); | ||
3845 | 3465 | ||
3846 | free_pages: | ||
3847 | transport_free_pages(cmd); | 3466 | transport_free_pages(cmd); |
3848 | transport_release_cmd(cmd); | 3467 | transport_release_cmd(cmd); |
3849 | return 0; | 3468 | return; |
3469 | out_busy: | ||
3470 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
3850 | } | 3471 | } |
3851 | 3472 | ||
3852 | /* | 3473 | /* |
@@ -3888,62 +3509,6 @@ int transport_generic_map_mem_to_cmd( | |||
3888 | } | 3509 | } |
3889 | EXPORT_SYMBOL(transport_generic_map_mem_to_cmd); | 3510 | EXPORT_SYMBOL(transport_generic_map_mem_to_cmd); |
3890 | 3511 | ||
3891 | static int transport_new_cmd_obj(struct se_cmd *cmd) | ||
3892 | { | ||
3893 | struct se_device *dev = cmd->se_dev; | ||
3894 | int set_counts = 1, rc, task_cdbs; | ||
3895 | |||
3896 | /* | ||
3897 | * Setup any BIDI READ tasks and memory from | ||
3898 | * cmd->t_mem_bidi_list so the READ struct se_tasks | ||
3899 | * are queued first for the non pSCSI passthrough case. | ||
3900 | */ | ||
3901 | if (cmd->t_bidi_data_sg && | ||
3902 | (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) { | ||
3903 | rc = transport_allocate_tasks(cmd, | ||
3904 | cmd->t_task_lba, | ||
3905 | DMA_FROM_DEVICE, | ||
3906 | cmd->t_bidi_data_sg, | ||
3907 | cmd->t_bidi_data_nents); | ||
3908 | if (rc <= 0) { | ||
3909 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | ||
3910 | cmd->scsi_sense_reason = | ||
3911 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | ||
3912 | return -EINVAL; | ||
3913 | } | ||
3914 | atomic_inc(&cmd->t_fe_count); | ||
3915 | atomic_inc(&cmd->t_se_count); | ||
3916 | set_counts = 0; | ||
3917 | } | ||
3918 | /* | ||
3919 | * Setup the tasks and memory from cmd->t_mem_list | ||
3920 | * Note for BIDI transfers this will contain the WRITE payload | ||
3921 | */ | ||
3922 | task_cdbs = transport_allocate_tasks(cmd, | ||
3923 | cmd->t_task_lba, | ||
3924 | cmd->data_direction, | ||
3925 | cmd->t_data_sg, | ||
3926 | cmd->t_data_nents); | ||
3927 | if (task_cdbs <= 0) { | ||
3928 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | ||
3929 | cmd->scsi_sense_reason = | ||
3930 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | ||
3931 | return -EINVAL; | ||
3932 | } | ||
3933 | |||
3934 | if (set_counts) { | ||
3935 | atomic_inc(&cmd->t_fe_count); | ||
3936 | atomic_inc(&cmd->t_se_count); | ||
3937 | } | ||
3938 | |||
3939 | cmd->t_task_list_num = task_cdbs; | ||
3940 | |||
3941 | atomic_set(&cmd->t_task_cdbs_left, task_cdbs); | ||
3942 | atomic_set(&cmd->t_task_cdbs_ex_left, task_cdbs); | ||
3943 | atomic_set(&cmd->t_task_cdbs_timeout_left, task_cdbs); | ||
3944 | return 0; | ||
3945 | } | ||
3946 | |||
3947 | void *transport_kmap_first_data_page(struct se_cmd *cmd) | 3512 | void *transport_kmap_first_data_page(struct se_cmd *cmd) |
3948 | { | 3513 | { |
3949 | struct scatterlist *sg = cmd->t_data_sg; | 3514 | struct scatterlist *sg = cmd->t_data_sg; |
@@ -4054,15 +3619,13 @@ void transport_do_task_sg_chain(struct se_cmd *cmd) | |||
4054 | /* | 3619 | /* |
4055 | * For the padded tasks, use the extra SGL vector allocated | 3620 | * For the padded tasks, use the extra SGL vector allocated |
4056 | * in transport_allocate_data_tasks() for the sg_prev_nents | 3621 | * in transport_allocate_data_tasks() for the sg_prev_nents |
4057 | * offset into sg_chain() above.. The last task of a | 3622 | * offset into sg_chain() above. |
4058 | * multi-task list, or a single task will not have | 3623 | * |
4059 | * task->task_sg_padded set.. | 3624 | * We do not need the padding for the last task (or a single |
3625 | * task), but in that case we will never use the sg_prev_nents | ||
3626 | * value below which would be incorrect. | ||
4060 | */ | 3627 | */ |
4061 | if (task->task_padded_sg) | 3628 | sg_prev_nents = (task->task_sg_nents + 1); |
4062 | sg_prev_nents = (task->task_sg_nents + 1); | ||
4063 | else | ||
4064 | sg_prev_nents = task->task_sg_nents; | ||
4065 | |||
4066 | sg_prev = task->task_sg; | 3629 | sg_prev = task->task_sg; |
4067 | } | 3630 | } |
4068 | /* | 3631 | /* |
@@ -4092,30 +3655,60 @@ EXPORT_SYMBOL(transport_do_task_sg_chain); | |||
4092 | /* | 3655 | /* |
4093 | * Break up cmd into chunks transport can handle | 3656 | * Break up cmd into chunks transport can handle |
4094 | */ | 3657 | */ |
4095 | static int transport_allocate_data_tasks( | 3658 | static int |
4096 | struct se_cmd *cmd, | 3659 | transport_allocate_data_tasks(struct se_cmd *cmd, |
4097 | unsigned long long lba, | ||
4098 | enum dma_data_direction data_direction, | 3660 | enum dma_data_direction data_direction, |
4099 | struct scatterlist *sgl, | 3661 | struct scatterlist *cmd_sg, unsigned int sgl_nents) |
4100 | unsigned int sgl_nents) | ||
4101 | { | 3662 | { |
4102 | unsigned char *cdb = NULL; | ||
4103 | struct se_task *task; | ||
4104 | struct se_device *dev = cmd->se_dev; | 3663 | struct se_device *dev = cmd->se_dev; |
4105 | unsigned long flags; | 3664 | int task_count, i; |
4106 | int task_count, i, ret; | 3665 | unsigned long long lba; |
4107 | sector_t sectors, dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors; | 3666 | sector_t sectors, dev_max_sectors; |
4108 | u32 sector_size = dev->se_sub_dev->se_dev_attrib.block_size; | 3667 | u32 sector_size; |
4109 | struct scatterlist *sg; | 3668 | |
4110 | struct scatterlist *cmd_sg; | 3669 | if (transport_cmd_get_valid_sectors(cmd) < 0) |
3670 | return -EINVAL; | ||
3671 | |||
3672 | dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors; | ||
3673 | sector_size = dev->se_sub_dev->se_dev_attrib.block_size; | ||
4111 | 3674 | ||
4112 | WARN_ON(cmd->data_length % sector_size); | 3675 | WARN_ON(cmd->data_length % sector_size); |
3676 | |||
3677 | lba = cmd->t_task_lba; | ||
4113 | sectors = DIV_ROUND_UP(cmd->data_length, sector_size); | 3678 | sectors = DIV_ROUND_UP(cmd->data_length, sector_size); |
4114 | task_count = DIV_ROUND_UP_SECTOR_T(sectors, dev_max_sectors); | 3679 | task_count = DIV_ROUND_UP_SECTOR_T(sectors, dev_max_sectors); |
4115 | 3680 | ||
4116 | cmd_sg = sgl; | 3681 | /* |
3682 | * If we need just a single task reuse the SG list in the command | ||
3683 | * and avoid a lot of work. | ||
3684 | */ | ||
3685 | if (task_count == 1) { | ||
3686 | struct se_task *task; | ||
3687 | unsigned long flags; | ||
3688 | |||
3689 | task = transport_generic_get_task(cmd, data_direction); | ||
3690 | if (!task) | ||
3691 | return -ENOMEM; | ||
3692 | |||
3693 | task->task_sg = cmd_sg; | ||
3694 | task->task_sg_nents = sgl_nents; | ||
3695 | |||
3696 | task->task_lba = lba; | ||
3697 | task->task_sectors = sectors; | ||
3698 | task->task_size = task->task_sectors * sector_size; | ||
3699 | |||
3700 | spin_lock_irqsave(&cmd->t_state_lock, flags); | ||
3701 | list_add_tail(&task->t_list, &cmd->t_task_list); | ||
3702 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
3703 | |||
3704 | return task_count; | ||
3705 | } | ||
3706 | |||
4117 | for (i = 0; i < task_count; i++) { | 3707 | for (i = 0; i < task_count; i++) { |
3708 | struct se_task *task; | ||
4118 | unsigned int task_size, task_sg_nents_padded; | 3709 | unsigned int task_size, task_sg_nents_padded; |
3710 | struct scatterlist *sg; | ||
3711 | unsigned long flags; | ||
4119 | int count; | 3712 | int count; |
4120 | 3713 | ||
4121 | task = transport_generic_get_task(cmd, data_direction); | 3714 | task = transport_generic_get_task(cmd, data_direction); |
@@ -4126,14 +3719,6 @@ static int transport_allocate_data_tasks( | |||
4126 | task->task_sectors = min(sectors, dev_max_sectors); | 3719 | task->task_sectors = min(sectors, dev_max_sectors); |
4127 | task->task_size = task->task_sectors * sector_size; | 3720 | task->task_size = task->task_sectors * sector_size; |
4128 | 3721 | ||
4129 | cdb = dev->transport->get_cdb(task); | ||
4130 | BUG_ON(!cdb); | ||
4131 | |||
4132 | memcpy(cdb, cmd->t_task_cdb, | ||
4133 | scsi_command_size(cmd->t_task_cdb)); | ||
4134 | |||
4135 | /* Update new cdb with updated lba/sectors */ | ||
4136 | cmd->transport_split_cdb(task->task_lba, task->task_sectors, cdb); | ||
4137 | /* | 3722 | /* |
4138 | * This now assumes that passed sg_ents are in PAGE_SIZE chunks | 3723 | * This now assumes that passed sg_ents are in PAGE_SIZE chunks |
4139 | * in order to calculate the number per task SGL entries | 3724 | * in order to calculate the number per task SGL entries |
@@ -4149,7 +3734,6 @@ static int transport_allocate_data_tasks( | |||
4149 | */ | 3734 | */ |
4150 | if (cmd->se_tfo->task_sg_chaining && (i < (task_count - 1))) { | 3735 | if (cmd->se_tfo->task_sg_chaining && (i < (task_count - 1))) { |
4151 | task_sg_nents_padded = (task->task_sg_nents + 1); | 3736 | task_sg_nents_padded = (task->task_sg_nents + 1); |
4152 | task->task_padded_sg = 1; | ||
4153 | } else | 3737 | } else |
4154 | task_sg_nents_padded = task->task_sg_nents; | 3738 | task_sg_nents_padded = task->task_sg_nents; |
4155 | 3739 | ||
@@ -4181,20 +3765,6 @@ static int transport_allocate_data_tasks( | |||
4181 | list_add_tail(&task->t_list, &cmd->t_task_list); | 3765 | list_add_tail(&task->t_list, &cmd->t_task_list); |
4182 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 3766 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
4183 | } | 3767 | } |
4184 | /* | ||
4185 | * Now perform the memory map of task->task_sg[] into backend | ||
4186 | * subsystem memory.. | ||
4187 | */ | ||
4188 | list_for_each_entry(task, &cmd->t_task_list, t_list) { | ||
4189 | if (atomic_read(&task->task_sent)) | ||
4190 | continue; | ||
4191 | if (!dev->transport->map_data_SG) | ||
4192 | continue; | ||
4193 | |||
4194 | ret = dev->transport->map_data_SG(task); | ||
4195 | if (ret < 0) | ||
4196 | return 0; | ||
4197 | } | ||
4198 | 3768 | ||
4199 | return task_count; | 3769 | return task_count; |
4200 | } | 3770 | } |
@@ -4202,30 +3772,14 @@ static int transport_allocate_data_tasks( | |||
4202 | static int | 3772 | static int |
4203 | transport_allocate_control_task(struct se_cmd *cmd) | 3773 | transport_allocate_control_task(struct se_cmd *cmd) |
4204 | { | 3774 | { |
4205 | struct se_device *dev = cmd->se_dev; | ||
4206 | unsigned char *cdb; | ||
4207 | struct se_task *task; | 3775 | struct se_task *task; |
4208 | unsigned long flags; | 3776 | unsigned long flags; |
4209 | int ret = 0; | ||
4210 | 3777 | ||
4211 | task = transport_generic_get_task(cmd, cmd->data_direction); | 3778 | task = transport_generic_get_task(cmd, cmd->data_direction); |
4212 | if (!task) | 3779 | if (!task) |
4213 | return -ENOMEM; | 3780 | return -ENOMEM; |
4214 | 3781 | ||
4215 | cdb = dev->transport->get_cdb(task); | 3782 | task->task_sg = cmd->t_data_sg; |
4216 | BUG_ON(!cdb); | ||
4217 | memcpy(cdb, cmd->t_task_cdb, | ||
4218 | scsi_command_size(cmd->t_task_cdb)); | ||
4219 | |||
4220 | task->task_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents, | ||
4221 | GFP_KERNEL); | ||
4222 | if (!task->task_sg) { | ||
4223 | cmd->se_dev->transport->free_task(task); | ||
4224 | return -ENOMEM; | ||
4225 | } | ||
4226 | |||
4227 | memcpy(task->task_sg, cmd->t_data_sg, | ||
4228 | sizeof(struct scatterlist) * cmd->t_data_nents); | ||
4229 | task->task_size = cmd->data_length; | 3783 | task->task_size = cmd->data_length; |
4230 | task->task_sg_nents = cmd->t_data_nents; | 3784 | task->task_sg_nents = cmd->t_data_nents; |
4231 | 3785 | ||
@@ -4233,53 +3787,20 @@ transport_allocate_control_task(struct se_cmd *cmd) | |||
4233 | list_add_tail(&task->t_list, &cmd->t_task_list); | 3787 | list_add_tail(&task->t_list, &cmd->t_task_list); |
4234 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 3788 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
4235 | 3789 | ||
4236 | if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) { | ||
4237 | if (dev->transport->map_control_SG) | ||
4238 | ret = dev->transport->map_control_SG(task); | ||
4239 | } else if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) { | ||
4240 | if (dev->transport->cdb_none) | ||
4241 | ret = dev->transport->cdb_none(task); | ||
4242 | } else { | ||
4243 | pr_err("target: Unknown control cmd type!\n"); | ||
4244 | BUG(); | ||
4245 | } | ||
4246 | |||
4247 | /* Success! Return number of tasks allocated */ | 3790 | /* Success! Return number of tasks allocated */ |
4248 | if (ret == 0) | 3791 | return 1; |
4249 | return 1; | ||
4250 | return ret; | ||
4251 | } | ||
4252 | |||
4253 | static u32 transport_allocate_tasks( | ||
4254 | struct se_cmd *cmd, | ||
4255 | unsigned long long lba, | ||
4256 | enum dma_data_direction data_direction, | ||
4257 | struct scatterlist *sgl, | ||
4258 | unsigned int sgl_nents) | ||
4259 | { | ||
4260 | if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { | ||
4261 | if (transport_cmd_get_valid_sectors(cmd) < 0) | ||
4262 | return -EINVAL; | ||
4263 | |||
4264 | return transport_allocate_data_tasks(cmd, lba, data_direction, | ||
4265 | sgl, sgl_nents); | ||
4266 | } else | ||
4267 | return transport_allocate_control_task(cmd); | ||
4268 | |||
4269 | } | 3792 | } |
4270 | 3793 | ||
4271 | 3794 | /* | |
4272 | /* transport_generic_new_cmd(): Called from transport_processing_thread() | 3795 | * Allocate any required ressources to execute the command, and either place |
4273 | * | 3796 | * it on the execution queue if possible. For writes we might not have the |
4274 | * Allocate storage transport resources from a set of values predefined | 3797 | * payload yet, thus notify the fabric via a call to ->write_pending instead. |
4275 | * by transport_generic_cmd_sequencer() from the iSCSI Target RX process. | ||
4276 | * Any non zero return here is treated as an "out of resource' op here. | ||
4277 | */ | 3798 | */ |
4278 | /* | ||
4279 | * Generate struct se_task(s) and/or their payloads for this CDB. | ||
4280 | */ | ||
4281 | int transport_generic_new_cmd(struct se_cmd *cmd) | 3799 | int transport_generic_new_cmd(struct se_cmd *cmd) |
4282 | { | 3800 | { |
3801 | struct se_device *dev = cmd->se_dev; | ||
3802 | int task_cdbs, task_cdbs_bidi = 0; | ||
3803 | int set_counts = 1; | ||
4283 | int ret = 0; | 3804 | int ret = 0; |
4284 | 3805 | ||
4285 | /* | 3806 | /* |
@@ -4293,16 +3814,45 @@ int transport_generic_new_cmd(struct se_cmd *cmd) | |||
4293 | if (ret < 0) | 3814 | if (ret < 0) |
4294 | return ret; | 3815 | return ret; |
4295 | } | 3816 | } |
3817 | |||
4296 | /* | 3818 | /* |
4297 | * Call transport_new_cmd_obj() to invoke transport_allocate_tasks() for | 3819 | * For BIDI command set up the read tasks first. |
4298 | * control or data CDB types, and perform the map to backend subsystem | ||
4299 | * code from SGL memory allocated here by transport_generic_get_mem(), or | ||
4300 | * via pre-existing SGL memory setup explictly by fabric module code with | ||
4301 | * transport_generic_map_mem_to_cmd(). | ||
4302 | */ | 3820 | */ |
4303 | ret = transport_new_cmd_obj(cmd); | 3821 | if (cmd->t_bidi_data_sg && |
4304 | if (ret < 0) | 3822 | dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { |
4305 | return ret; | 3823 | BUG_ON(!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)); |
3824 | |||
3825 | task_cdbs_bidi = transport_allocate_data_tasks(cmd, | ||
3826 | DMA_FROM_DEVICE, cmd->t_bidi_data_sg, | ||
3827 | cmd->t_bidi_data_nents); | ||
3828 | if (task_cdbs_bidi <= 0) | ||
3829 | goto out_fail; | ||
3830 | |||
3831 | atomic_inc(&cmd->t_fe_count); | ||
3832 | atomic_inc(&cmd->t_se_count); | ||
3833 | set_counts = 0; | ||
3834 | } | ||
3835 | |||
3836 | if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { | ||
3837 | task_cdbs = transport_allocate_data_tasks(cmd, | ||
3838 | cmd->data_direction, cmd->t_data_sg, | ||
3839 | cmd->t_data_nents); | ||
3840 | } else { | ||
3841 | task_cdbs = transport_allocate_control_task(cmd); | ||
3842 | } | ||
3843 | |||
3844 | if (task_cdbs <= 0) | ||
3845 | goto out_fail; | ||
3846 | |||
3847 | if (set_counts) { | ||
3848 | atomic_inc(&cmd->t_fe_count); | ||
3849 | atomic_inc(&cmd->t_se_count); | ||
3850 | } | ||
3851 | |||
3852 | cmd->t_task_list_num = (task_cdbs + task_cdbs_bidi); | ||
3853 | atomic_set(&cmd->t_task_cdbs_left, cmd->t_task_list_num); | ||
3854 | atomic_set(&cmd->t_task_cdbs_ex_left, cmd->t_task_list_num); | ||
3855 | |||
4306 | /* | 3856 | /* |
4307 | * For WRITEs, let the fabric know its buffer is ready.. | 3857 | * For WRITEs, let the fabric know its buffer is ready.. |
4308 | * This WRITE struct se_cmd (and all of its associated struct se_task's) | 3858 | * This WRITE struct se_cmd (and all of its associated struct se_task's) |
@@ -4320,6 +3870,11 @@ int transport_generic_new_cmd(struct se_cmd *cmd) | |||
4320 | */ | 3870 | */ |
4321 | transport_execute_tasks(cmd); | 3871 | transport_execute_tasks(cmd); |
4322 | return 0; | 3872 | return 0; |
3873 | |||
3874 | out_fail: | ||
3875 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | ||
3876 | cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | ||
3877 | return -EINVAL; | ||
4323 | } | 3878 | } |
4324 | EXPORT_SYMBOL(transport_generic_new_cmd); | 3879 | EXPORT_SYMBOL(transport_generic_new_cmd); |
4325 | 3880 | ||
@@ -4333,15 +3888,15 @@ void transport_generic_process_write(struct se_cmd *cmd) | |||
4333 | } | 3888 | } |
4334 | EXPORT_SYMBOL(transport_generic_process_write); | 3889 | EXPORT_SYMBOL(transport_generic_process_write); |
4335 | 3890 | ||
4336 | static int transport_write_pending_qf(struct se_cmd *cmd) | 3891 | static void transport_write_pending_qf(struct se_cmd *cmd) |
4337 | { | 3892 | { |
4338 | return cmd->se_tfo->write_pending(cmd); | 3893 | if (cmd->se_tfo->write_pending(cmd) == -EAGAIN) { |
3894 | pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", | ||
3895 | cmd); | ||
3896 | transport_handle_queue_full(cmd, cmd->se_dev); | ||
3897 | } | ||
4339 | } | 3898 | } |
4340 | 3899 | ||
4341 | /* transport_generic_write_pending(): | ||
4342 | * | ||
4343 | * | ||
4344 | */ | ||
4345 | static int transport_generic_write_pending(struct se_cmd *cmd) | 3900 | static int transport_generic_write_pending(struct se_cmd *cmd) |
4346 | { | 3901 | { |
4347 | unsigned long flags; | 3902 | unsigned long flags; |
@@ -4351,17 +3906,6 @@ static int transport_generic_write_pending(struct se_cmd *cmd) | |||
4351 | cmd->t_state = TRANSPORT_WRITE_PENDING; | 3906 | cmd->t_state = TRANSPORT_WRITE_PENDING; |
4352 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 3907 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
4353 | 3908 | ||
4354 | if (cmd->transport_qf_callback) { | ||
4355 | ret = cmd->transport_qf_callback(cmd); | ||
4356 | if (ret == -EAGAIN) | ||
4357 | goto queue_full; | ||
4358 | else if (ret < 0) | ||
4359 | return ret; | ||
4360 | |||
4361 | cmd->transport_qf_callback = NULL; | ||
4362 | return 0; | ||
4363 | } | ||
4364 | |||
4365 | /* | 3909 | /* |
4366 | * Clear the se_cmd for WRITE_PENDING status in order to set | 3910 | * Clear the se_cmd for WRITE_PENDING status in order to set |
4367 | * cmd->t_transport_active=0 so that transport_generic_handle_data | 3911 | * cmd->t_transport_active=0 so that transport_generic_handle_data |
@@ -4386,61 +3930,52 @@ static int transport_generic_write_pending(struct se_cmd *cmd) | |||
4386 | queue_full: | 3930 | queue_full: |
4387 | pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); | 3931 | pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); |
4388 | cmd->t_state = TRANSPORT_COMPLETE_QF_WP; | 3932 | cmd->t_state = TRANSPORT_COMPLETE_QF_WP; |
4389 | transport_handle_queue_full(cmd, cmd->se_dev, | 3933 | transport_handle_queue_full(cmd, cmd->se_dev); |
4390 | transport_write_pending_qf); | ||
4391 | return ret; | 3934 | return ret; |
4392 | } | 3935 | } |
4393 | 3936 | ||
3937 | /** | ||
3938 | * transport_release_cmd - free a command | ||
3939 | * @cmd: command to free | ||
3940 | * | ||
3941 | * This routine unconditionally frees a command, and reference counting | ||
3942 | * or list removal must be done in the caller. | ||
3943 | */ | ||
4394 | void transport_release_cmd(struct se_cmd *cmd) | 3944 | void transport_release_cmd(struct se_cmd *cmd) |
4395 | { | 3945 | { |
4396 | BUG_ON(!cmd->se_tfo); | 3946 | BUG_ON(!cmd->se_tfo); |
4397 | 3947 | ||
4398 | transport_free_se_cmd(cmd); | 3948 | if (cmd->se_tmr_req) |
3949 | core_tmr_release_req(cmd->se_tmr_req); | ||
3950 | if (cmd->t_task_cdb != cmd->__t_task_cdb) | ||
3951 | kfree(cmd->t_task_cdb); | ||
4399 | cmd->se_tfo->release_cmd(cmd); | 3952 | cmd->se_tfo->release_cmd(cmd); |
4400 | } | 3953 | } |
4401 | EXPORT_SYMBOL(transport_release_cmd); | 3954 | EXPORT_SYMBOL(transport_release_cmd); |
4402 | 3955 | ||
4403 | /* transport_generic_free_cmd(): | 3956 | void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) |
4404 | * | ||
4405 | * Called from processing frontend to release storage engine resources | ||
4406 | */ | ||
4407 | void transport_generic_free_cmd( | ||
4408 | struct se_cmd *cmd, | ||
4409 | int wait_for_tasks, | ||
4410 | int session_reinstatement) | ||
4411 | { | 3957 | { |
4412 | if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) | 3958 | if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { |
3959 | if (wait_for_tasks && cmd->se_tmr_req) | ||
3960 | transport_wait_for_tasks(cmd); | ||
3961 | |||
4413 | transport_release_cmd(cmd); | 3962 | transport_release_cmd(cmd); |
4414 | else { | 3963 | } else { |
3964 | if (wait_for_tasks) | ||
3965 | transport_wait_for_tasks(cmd); | ||
3966 | |||
4415 | core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd); | 3967 | core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd); |
4416 | 3968 | ||
4417 | if (cmd->se_lun) { | 3969 | if (cmd->se_lun) |
4418 | #if 0 | ||
4419 | pr_debug("cmd: %p ITT: 0x%08x contains" | ||
4420 | " cmd->se_lun\n", cmd, | ||
4421 | cmd->se_tfo->get_task_tag(cmd)); | ||
4422 | #endif | ||
4423 | transport_lun_remove_cmd(cmd); | 3970 | transport_lun_remove_cmd(cmd); |
4424 | } | ||
4425 | |||
4426 | if (wait_for_tasks && cmd->transport_wait_for_tasks) | ||
4427 | cmd->transport_wait_for_tasks(cmd, 0, 0); | ||
4428 | 3971 | ||
4429 | transport_free_dev_tasks(cmd); | 3972 | transport_free_dev_tasks(cmd); |
4430 | 3973 | ||
4431 | transport_generic_remove(cmd, session_reinstatement); | 3974 | transport_put_cmd(cmd); |
4432 | } | 3975 | } |
4433 | } | 3976 | } |
4434 | EXPORT_SYMBOL(transport_generic_free_cmd); | 3977 | EXPORT_SYMBOL(transport_generic_free_cmd); |
4435 | 3978 | ||
4436 | static void transport_nop_wait_for_tasks( | ||
4437 | struct se_cmd *cmd, | ||
4438 | int remove_cmd, | ||
4439 | int session_reinstatement) | ||
4440 | { | ||
4441 | return; | ||
4442 | } | ||
4443 | |||
4444 | /* transport_lun_wait_for_tasks(): | 3979 | /* transport_lun_wait_for_tasks(): |
4445 | * | 3980 | * |
4446 | * Called from ConfigFS context to stop the passed struct se_cmd to allow | 3981 | * Called from ConfigFS context to stop the passed struct se_cmd to allow |
@@ -4479,7 +4014,7 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) | |||
4479 | pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n", | 4014 | pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n", |
4480 | cmd->se_tfo->get_task_tag(cmd)); | 4015 | cmd->se_tfo->get_task_tag(cmd)); |
4481 | } | 4016 | } |
4482 | transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); | 4017 | transport_remove_cmd_from_queue(cmd); |
4483 | 4018 | ||
4484 | return 0; | 4019 | return 0; |
4485 | } | 4020 | } |
@@ -4610,22 +4145,30 @@ int transport_clear_lun_from_sessions(struct se_lun *lun) | |||
4610 | return 0; | 4145 | return 0; |
4611 | } | 4146 | } |
4612 | 4147 | ||
4613 | /* transport_generic_wait_for_tasks(): | 4148 | /** |
4149 | * transport_wait_for_tasks - wait for completion to occur | ||
4150 | * @cmd: command to wait | ||
4614 | * | 4151 | * |
4615 | * Called from frontend or passthrough context to wait for storage engine | 4152 | * Called from frontend fabric context to wait for storage engine |
4616 | * to pause and/or release frontend generated struct se_cmd. | 4153 | * to pause and/or release frontend generated struct se_cmd. |
4617 | */ | 4154 | */ |
4618 | static void transport_generic_wait_for_tasks( | 4155 | void transport_wait_for_tasks(struct se_cmd *cmd) |
4619 | struct se_cmd *cmd, | ||
4620 | int remove_cmd, | ||
4621 | int session_reinstatement) | ||
4622 | { | 4156 | { |
4623 | unsigned long flags; | 4157 | unsigned long flags; |
4624 | 4158 | ||
4625 | if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) | ||
4626 | return; | ||
4627 | |||
4628 | spin_lock_irqsave(&cmd->t_state_lock, flags); | 4159 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
4160 | if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) { | ||
4161 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
4162 | return; | ||
4163 | } | ||
4164 | /* | ||
4165 | * Only perform a possible wait_for_tasks if SCF_SUPPORTED_SAM_OPCODE | ||
4166 | * has been set in transport_set_supported_SAM_opcode(). | ||
4167 | */ | ||
4168 | if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && !cmd->se_tmr_req) { | ||
4169 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
4170 | return; | ||
4171 | } | ||
4629 | /* | 4172 | /* |
4630 | * If we are already stopped due to an external event (ie: LUN shutdown) | 4173 | * If we are already stopped due to an external event (ie: LUN shutdown) |
4631 | * sleep until the connection can have the passed struct se_cmd back. | 4174 | * sleep until the connection can have the passed struct se_cmd back. |
@@ -4665,16 +4208,17 @@ static void transport_generic_wait_for_tasks( | |||
4665 | atomic_set(&cmd->transport_lun_stop, 0); | 4208 | atomic_set(&cmd->transport_lun_stop, 0); |
4666 | } | 4209 | } |
4667 | if (!atomic_read(&cmd->t_transport_active) || | 4210 | if (!atomic_read(&cmd->t_transport_active) || |
4668 | atomic_read(&cmd->t_transport_aborted)) | 4211 | atomic_read(&cmd->t_transport_aborted)) { |
4669 | goto remove; | 4212 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
4213 | return; | ||
4214 | } | ||
4670 | 4215 | ||
4671 | atomic_set(&cmd->t_transport_stop, 1); | 4216 | atomic_set(&cmd->t_transport_stop, 1); |
4672 | 4217 | ||
4673 | pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x" | 4218 | pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x" |
4674 | " i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop" | 4219 | " i_state: %d, t_state: %d, t_transport_stop = TRUE\n", |
4675 | " = TRUE\n", cmd, cmd->se_tfo->get_task_tag(cmd), | 4220 | cmd, cmd->se_tfo->get_task_tag(cmd), |
4676 | cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, | 4221 | cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); |
4677 | cmd->deferred_t_state); | ||
4678 | 4222 | ||
4679 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 4223 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
4680 | 4224 | ||
@@ -4689,13 +4233,10 @@ static void transport_generic_wait_for_tasks( | |||
4689 | pr_debug("wait_for_tasks: Stopped wait_for_compltion(" | 4233 | pr_debug("wait_for_tasks: Stopped wait_for_compltion(" |
4690 | "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n", | 4234 | "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n", |
4691 | cmd->se_tfo->get_task_tag(cmd)); | 4235 | cmd->se_tfo->get_task_tag(cmd)); |
4692 | remove: | ||
4693 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
4694 | if (!remove_cmd) | ||
4695 | return; | ||
4696 | 4236 | ||
4697 | transport_generic_free_cmd(cmd, 0, session_reinstatement); | 4237 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
4698 | } | 4238 | } |
4239 | EXPORT_SYMBOL(transport_wait_for_tasks); | ||
4699 | 4240 | ||
4700 | static int transport_get_sense_codes( | 4241 | static int transport_get_sense_codes( |
4701 | struct se_cmd *cmd, | 4242 | struct se_cmd *cmd, |
@@ -4920,6 +4461,15 @@ EXPORT_SYMBOL(transport_check_aborted_status); | |||
4920 | 4461 | ||
4921 | void transport_send_task_abort(struct se_cmd *cmd) | 4462 | void transport_send_task_abort(struct se_cmd *cmd) |
4922 | { | 4463 | { |
4464 | unsigned long flags; | ||
4465 | |||
4466 | spin_lock_irqsave(&cmd->t_state_lock, flags); | ||
4467 | if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { | ||
4468 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
4469 | return; | ||
4470 | } | ||
4471 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
4472 | |||
4923 | /* | 4473 | /* |
4924 | * If there are still expected incoming fabric WRITEs, we wait | 4474 | * If there are still expected incoming fabric WRITEs, we wait |
4925 | * until until they have completed before sending a TASK_ABORTED | 4475 | * until until they have completed before sending a TASK_ABORTED |
@@ -4984,184 +4534,10 @@ int transport_generic_do_tmr(struct se_cmd *cmd) | |||
4984 | cmd->t_state = TRANSPORT_ISTATE_PROCESSING; | 4534 | cmd->t_state = TRANSPORT_ISTATE_PROCESSING; |
4985 | cmd->se_tfo->queue_tm_rsp(cmd); | 4535 | cmd->se_tfo->queue_tm_rsp(cmd); |
4986 | 4536 | ||
4987 | transport_cmd_check_stop(cmd, 2, 0); | 4537 | transport_cmd_check_stop_to_fabric(cmd); |
4988 | return 0; | 4538 | return 0; |
4989 | } | 4539 | } |
4990 | 4540 | ||
4991 | /* | ||
4992 | * Called with spin_lock_irq(&dev->execute_task_lock); held | ||
4993 | * | ||
4994 | */ | ||
4995 | static struct se_task * | ||
4996 | transport_get_task_from_state_list(struct se_device *dev) | ||
4997 | { | ||
4998 | struct se_task *task; | ||
4999 | |||
5000 | if (list_empty(&dev->state_task_list)) | ||
5001 | return NULL; | ||
5002 | |||
5003 | list_for_each_entry(task, &dev->state_task_list, t_state_list) | ||
5004 | break; | ||
5005 | |||
5006 | list_del(&task->t_state_list); | ||
5007 | atomic_set(&task->task_state_active, 0); | ||
5008 | |||
5009 | return task; | ||
5010 | } | ||
5011 | |||
5012 | static void transport_processing_shutdown(struct se_device *dev) | ||
5013 | { | ||
5014 | struct se_cmd *cmd; | ||
5015 | struct se_task *task; | ||
5016 | unsigned long flags; | ||
5017 | /* | ||
5018 | * Empty the struct se_device's struct se_task state list. | ||
5019 | */ | ||
5020 | spin_lock_irqsave(&dev->execute_task_lock, flags); | ||
5021 | while ((task = transport_get_task_from_state_list(dev))) { | ||
5022 | if (!task->task_se_cmd) { | ||
5023 | pr_err("task->task_se_cmd is NULL!\n"); | ||
5024 | continue; | ||
5025 | } | ||
5026 | cmd = task->task_se_cmd; | ||
5027 | |||
5028 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | ||
5029 | |||
5030 | spin_lock_irqsave(&cmd->t_state_lock, flags); | ||
5031 | |||
5032 | pr_debug("PT: cmd: %p task: %p ITT: 0x%08x," | ||
5033 | " i_state: %d, t_state/def_t_state:" | ||
5034 | " %d/%d cdb: 0x%02x\n", cmd, task, | ||
5035 | cmd->se_tfo->get_task_tag(cmd), | ||
5036 | cmd->se_tfo->get_cmd_state(cmd), | ||
5037 | cmd->t_state, cmd->deferred_t_state, | ||
5038 | cmd->t_task_cdb[0]); | ||
5039 | pr_debug("PT: ITT[0x%08x] - t_tasks: %d t_task_cdbs_left:" | ||
5040 | " %d t_task_cdbs_sent: %d -- t_transport_active: %d" | ||
5041 | " t_transport_stop: %d t_transport_sent: %d\n", | ||
5042 | cmd->se_tfo->get_task_tag(cmd), | ||
5043 | cmd->t_task_list_num, | ||
5044 | atomic_read(&cmd->t_task_cdbs_left), | ||
5045 | atomic_read(&cmd->t_task_cdbs_sent), | ||
5046 | atomic_read(&cmd->t_transport_active), | ||
5047 | atomic_read(&cmd->t_transport_stop), | ||
5048 | atomic_read(&cmd->t_transport_sent)); | ||
5049 | |||
5050 | if (atomic_read(&task->task_active)) { | ||
5051 | atomic_set(&task->task_stop, 1); | ||
5052 | spin_unlock_irqrestore( | ||
5053 | &cmd->t_state_lock, flags); | ||
5054 | |||
5055 | pr_debug("Waiting for task: %p to shutdown for dev:" | ||
5056 | " %p\n", task, dev); | ||
5057 | wait_for_completion(&task->task_stop_comp); | ||
5058 | pr_debug("Completed task: %p shutdown for dev: %p\n", | ||
5059 | task, dev); | ||
5060 | |||
5061 | spin_lock_irqsave(&cmd->t_state_lock, flags); | ||
5062 | atomic_dec(&cmd->t_task_cdbs_left); | ||
5063 | |||
5064 | atomic_set(&task->task_active, 0); | ||
5065 | atomic_set(&task->task_stop, 0); | ||
5066 | } else { | ||
5067 | if (atomic_read(&task->task_execute_queue) != 0) | ||
5068 | transport_remove_task_from_execute_queue(task, dev); | ||
5069 | } | ||
5070 | __transport_stop_task_timer(task, &flags); | ||
5071 | |||
5072 | if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) { | ||
5073 | spin_unlock_irqrestore( | ||
5074 | &cmd->t_state_lock, flags); | ||
5075 | |||
5076 | pr_debug("Skipping task: %p, dev: %p for" | ||
5077 | " t_task_cdbs_ex_left: %d\n", task, dev, | ||
5078 | atomic_read(&cmd->t_task_cdbs_ex_left)); | ||
5079 | |||
5080 | spin_lock_irqsave(&dev->execute_task_lock, flags); | ||
5081 | continue; | ||
5082 | } | ||
5083 | |||
5084 | if (atomic_read(&cmd->t_transport_active)) { | ||
5085 | pr_debug("got t_transport_active = 1 for task: %p, dev:" | ||
5086 | " %p\n", task, dev); | ||
5087 | |||
5088 | if (atomic_read(&cmd->t_fe_count)) { | ||
5089 | spin_unlock_irqrestore( | ||
5090 | &cmd->t_state_lock, flags); | ||
5091 | transport_send_check_condition_and_sense( | ||
5092 | cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, | ||
5093 | 0); | ||
5094 | transport_remove_cmd_from_queue(cmd, | ||
5095 | &cmd->se_dev->dev_queue_obj); | ||
5096 | |||
5097 | transport_lun_remove_cmd(cmd); | ||
5098 | transport_cmd_check_stop(cmd, 1, 0); | ||
5099 | } else { | ||
5100 | spin_unlock_irqrestore( | ||
5101 | &cmd->t_state_lock, flags); | ||
5102 | |||
5103 | transport_remove_cmd_from_queue(cmd, | ||
5104 | &cmd->se_dev->dev_queue_obj); | ||
5105 | |||
5106 | transport_lun_remove_cmd(cmd); | ||
5107 | |||
5108 | if (transport_cmd_check_stop(cmd, 1, 0)) | ||
5109 | transport_generic_remove(cmd, 0); | ||
5110 | } | ||
5111 | |||
5112 | spin_lock_irqsave(&dev->execute_task_lock, flags); | ||
5113 | continue; | ||
5114 | } | ||
5115 | pr_debug("Got t_transport_active = 0 for task: %p, dev: %p\n", | ||
5116 | task, dev); | ||
5117 | |||
5118 | if (atomic_read(&cmd->t_fe_count)) { | ||
5119 | spin_unlock_irqrestore( | ||
5120 | &cmd->t_state_lock, flags); | ||
5121 | transport_send_check_condition_and_sense(cmd, | ||
5122 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); | ||
5123 | transport_remove_cmd_from_queue(cmd, | ||
5124 | &cmd->se_dev->dev_queue_obj); | ||
5125 | |||
5126 | transport_lun_remove_cmd(cmd); | ||
5127 | transport_cmd_check_stop(cmd, 1, 0); | ||
5128 | } else { | ||
5129 | spin_unlock_irqrestore( | ||
5130 | &cmd->t_state_lock, flags); | ||
5131 | |||
5132 | transport_remove_cmd_from_queue(cmd, | ||
5133 | &cmd->se_dev->dev_queue_obj); | ||
5134 | transport_lun_remove_cmd(cmd); | ||
5135 | |||
5136 | if (transport_cmd_check_stop(cmd, 1, 0)) | ||
5137 | transport_generic_remove(cmd, 0); | ||
5138 | } | ||
5139 | |||
5140 | spin_lock_irqsave(&dev->execute_task_lock, flags); | ||
5141 | } | ||
5142 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | ||
5143 | /* | ||
5144 | * Empty the struct se_device's struct se_cmd list. | ||
5145 | */ | ||
5146 | while ((cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj))) { | ||
5147 | |||
5148 | pr_debug("From Device Queue: cmd: %p t_state: %d\n", | ||
5149 | cmd, cmd->t_state); | ||
5150 | |||
5151 | if (atomic_read(&cmd->t_fe_count)) { | ||
5152 | transport_send_check_condition_and_sense(cmd, | ||
5153 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); | ||
5154 | |||
5155 | transport_lun_remove_cmd(cmd); | ||
5156 | transport_cmd_check_stop(cmd, 1, 0); | ||
5157 | } else { | ||
5158 | transport_lun_remove_cmd(cmd); | ||
5159 | if (transport_cmd_check_stop(cmd, 1, 0)) | ||
5160 | transport_generic_remove(cmd, 0); | ||
5161 | } | ||
5162 | } | ||
5163 | } | ||
5164 | |||
5165 | /* transport_processing_thread(): | 4541 | /* transport_processing_thread(): |
5166 | * | 4542 | * |
5167 | * | 4543 | * |
@@ -5181,14 +4557,6 @@ static int transport_processing_thread(void *param) | |||
5181 | if (ret < 0) | 4557 | if (ret < 0) |
5182 | goto out; | 4558 | goto out; |
5183 | 4559 | ||
5184 | spin_lock_irq(&dev->dev_status_lock); | ||
5185 | if (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) { | ||
5186 | spin_unlock_irq(&dev->dev_status_lock); | ||
5187 | transport_processing_shutdown(dev); | ||
5188 | continue; | ||
5189 | } | ||
5190 | spin_unlock_irq(&dev->dev_status_lock); | ||
5191 | |||
5192 | get_cmd: | 4560 | get_cmd: |
5193 | __transport_execute_tasks(dev); | 4561 | __transport_execute_tasks(dev); |
5194 | 4562 | ||
@@ -5197,6 +4565,9 @@ get_cmd: | |||
5197 | continue; | 4565 | continue; |
5198 | 4566 | ||
5199 | switch (cmd->t_state) { | 4567 | switch (cmd->t_state) { |
4568 | case TRANSPORT_NEW_CMD: | ||
4569 | BUG(); | ||
4570 | break; | ||
5200 | case TRANSPORT_NEW_CMD_MAP: | 4571 | case TRANSPORT_NEW_CMD_MAP: |
5201 | if (!cmd->se_tfo->new_cmd_map) { | 4572 | if (!cmd->se_tfo->new_cmd_map) { |
5202 | pr_err("cmd->se_tfo->new_cmd_map is" | 4573 | pr_err("cmd->se_tfo->new_cmd_map is" |
@@ -5206,19 +4577,17 @@ get_cmd: | |||
5206 | ret = cmd->se_tfo->new_cmd_map(cmd); | 4577 | ret = cmd->se_tfo->new_cmd_map(cmd); |
5207 | if (ret < 0) { | 4578 | if (ret < 0) { |
5208 | cmd->transport_error_status = ret; | 4579 | cmd->transport_error_status = ret; |
5209 | transport_generic_request_failure(cmd, NULL, | 4580 | transport_generic_request_failure(cmd, |
5210 | 0, (cmd->data_direction != | 4581 | 0, (cmd->data_direction != |
5211 | DMA_TO_DEVICE)); | 4582 | DMA_TO_DEVICE)); |
5212 | break; | 4583 | break; |
5213 | } | 4584 | } |
5214 | /* Fall through */ | ||
5215 | case TRANSPORT_NEW_CMD: | ||
5216 | ret = transport_generic_new_cmd(cmd); | 4585 | ret = transport_generic_new_cmd(cmd); |
5217 | if (ret == -EAGAIN) | 4586 | if (ret == -EAGAIN) |
5218 | break; | 4587 | break; |
5219 | else if (ret < 0) { | 4588 | else if (ret < 0) { |
5220 | cmd->transport_error_status = ret; | 4589 | cmd->transport_error_status = ret; |
5221 | transport_generic_request_failure(cmd, NULL, | 4590 | transport_generic_request_failure(cmd, |
5222 | 0, (cmd->data_direction != | 4591 | 0, (cmd->data_direction != |
5223 | DMA_TO_DEVICE)); | 4592 | DMA_TO_DEVICE)); |
5224 | } | 4593 | } |
@@ -5226,33 +4595,22 @@ get_cmd: | |||
5226 | case TRANSPORT_PROCESS_WRITE: | 4595 | case TRANSPORT_PROCESS_WRITE: |
5227 | transport_generic_process_write(cmd); | 4596 | transport_generic_process_write(cmd); |
5228 | break; | 4597 | break; |
5229 | case TRANSPORT_COMPLETE_OK: | ||
5230 | transport_stop_all_task_timers(cmd); | ||
5231 | transport_generic_complete_ok(cmd); | ||
5232 | break; | ||
5233 | case TRANSPORT_REMOVE: | ||
5234 | transport_generic_remove(cmd, 0); | ||
5235 | break; | ||
5236 | case TRANSPORT_FREE_CMD_INTR: | 4598 | case TRANSPORT_FREE_CMD_INTR: |
5237 | transport_generic_free_cmd(cmd, 0, 0); | 4599 | transport_generic_free_cmd(cmd, 0); |
5238 | break; | 4600 | break; |
5239 | case TRANSPORT_PROCESS_TMR: | 4601 | case TRANSPORT_PROCESS_TMR: |
5240 | transport_generic_do_tmr(cmd); | 4602 | transport_generic_do_tmr(cmd); |
5241 | break; | 4603 | break; |
5242 | case TRANSPORT_COMPLETE_FAILURE: | ||
5243 | transport_generic_request_failure(cmd, NULL, 1, 1); | ||
5244 | break; | ||
5245 | case TRANSPORT_COMPLETE_TIMEOUT: | ||
5246 | transport_stop_all_task_timers(cmd); | ||
5247 | transport_generic_request_timeout(cmd); | ||
5248 | break; | ||
5249 | case TRANSPORT_COMPLETE_QF_WP: | 4604 | case TRANSPORT_COMPLETE_QF_WP: |
5250 | transport_generic_write_pending(cmd); | 4605 | transport_write_pending_qf(cmd); |
4606 | break; | ||
4607 | case TRANSPORT_COMPLETE_QF_OK: | ||
4608 | transport_complete_qf(cmd); | ||
5251 | break; | 4609 | break; |
5252 | default: | 4610 | default: |
5253 | pr_err("Unknown t_state: %d deferred_t_state:" | 4611 | pr_err("Unknown t_state: %d for ITT: 0x%08x " |
5254 | " %d for ITT: 0x%08x i_state: %d on SE LUN:" | 4612 | "i_state: %d on SE LUN: %u\n", |
5255 | " %u\n", cmd->t_state, cmd->deferred_t_state, | 4613 | cmd->t_state, |
5256 | cmd->se_tfo->get_task_tag(cmd), | 4614 | cmd->se_tfo->get_task_tag(cmd), |
5257 | cmd->se_tfo->get_cmd_state(cmd), | 4615 | cmd->se_tfo->get_cmd_state(cmd), |
5258 | cmd->se_lun->unpacked_lun); | 4616 | cmd->se_lun->unpacked_lun); |
@@ -5263,7 +4621,8 @@ get_cmd: | |||
5263 | } | 4621 | } |
5264 | 4622 | ||
5265 | out: | 4623 | out: |
5266 | transport_release_all_cmds(dev); | 4624 | WARN_ON(!list_empty(&dev->state_task_list)); |
4625 | WARN_ON(!list_empty(&dev->dev_queue_obj.qobj_list)); | ||
5267 | dev->process_thread = NULL; | 4626 | dev->process_thread = NULL; |
5268 | return 0; | 4627 | return 0; |
5269 | } | 4628 | } |
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c index 31e3c652527e..50a480db7a66 100644 --- a/drivers/target/target_core_ua.c +++ b/drivers/target/target_core_ua.c | |||
@@ -24,7 +24,6 @@ | |||
24 | * | 24 | * |
25 | ******************************************************************************/ | 25 | ******************************************************************************/ |
26 | 26 | ||
27 | #include <linux/version.h> | ||
28 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
29 | #include <linux/spinlock.h> | 28 | #include <linux/spinlock.h> |
30 | #include <scsi/scsi.h> | 29 | #include <scsi/scsi.h> |
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index 80fbcde00cb6..6195026cc7b0 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c | |||
@@ -19,7 +19,6 @@ | |||
19 | 19 | ||
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/moduleparam.h> | 21 | #include <linux/moduleparam.h> |
22 | #include <linux/version.h> | ||
23 | #include <generated/utsrelease.h> | 22 | #include <generated/utsrelease.h> |
24 | #include <linux/utsname.h> | 23 | #include <linux/utsname.h> |
25 | #include <linux/init.h> | 24 | #include <linux/init.h> |
@@ -115,7 +114,7 @@ void ft_release_cmd(struct se_cmd *se_cmd) | |||
115 | 114 | ||
116 | void ft_check_stop_free(struct se_cmd *se_cmd) | 115 | void ft_check_stop_free(struct se_cmd *se_cmd) |
117 | { | 116 | { |
118 | transport_generic_free_cmd(se_cmd, 0, 0); | 117 | transport_generic_free_cmd(se_cmd, 0); |
119 | } | 118 | } |
120 | 119 | ||
121 | /* | 120 | /* |
@@ -268,9 +267,8 @@ static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg) | |||
268 | 267 | ||
269 | if (IS_ERR(fp)) { | 268 | if (IS_ERR(fp)) { |
270 | /* XXX need to find cmd if queued */ | 269 | /* XXX need to find cmd if queued */ |
271 | cmd->se_cmd.t_state = TRANSPORT_REMOVE; | ||
272 | cmd->seq = NULL; | 270 | cmd->seq = NULL; |
273 | transport_generic_free_cmd(&cmd->se_cmd, 0, 0); | 271 | transport_generic_free_cmd(&cmd->se_cmd, 0); |
274 | return; | 272 | return; |
275 | } | 273 | } |
276 | 274 | ||
@@ -288,7 +286,7 @@ static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg) | |||
288 | __func__, fh->fh_r_ctl); | 286 | __func__, fh->fh_r_ctl); |
289 | ft_invl_hw_context(cmd); | 287 | ft_invl_hw_context(cmd); |
290 | fc_frame_free(fp); | 288 | fc_frame_free(fp); |
291 | transport_generic_free_cmd(&cmd->se_cmd, 0, 0); | 289 | transport_generic_free_cmd(&cmd->se_cmd, 0); |
292 | break; | 290 | break; |
293 | } | 291 | } |
294 | } | 292 | } |
@@ -397,7 +395,7 @@ static void ft_send_tm(struct ft_cmd *cmd) | |||
397 | } | 395 | } |
398 | 396 | ||
399 | pr_debug("alloc tm cmd fn %d\n", tm_func); | 397 | pr_debug("alloc tm cmd fn %d\n", tm_func); |
400 | tmr = core_tmr_alloc_req(&cmd->se_cmd, cmd, tm_func); | 398 | tmr = core_tmr_alloc_req(&cmd->se_cmd, cmd, tm_func, GFP_KERNEL); |
401 | if (!tmr) { | 399 | if (!tmr) { |
402 | pr_debug("alloc failed\n"); | 400 | pr_debug("alloc failed\n"); |
403 | ft_send_resp_code_and_free(cmd, FCP_TMF_FAILED); | 401 | ft_send_resp_code_and_free(cmd, FCP_TMF_FAILED); |
@@ -421,7 +419,7 @@ static void ft_send_tm(struct ft_cmd *cmd) | |||
421 | sess = cmd->sess; | 419 | sess = cmd->sess; |
422 | transport_send_check_condition_and_sense(&cmd->se_cmd, | 420 | transport_send_check_condition_and_sense(&cmd->se_cmd, |
423 | cmd->se_cmd.scsi_sense_reason, 0); | 421 | cmd->se_cmd.scsi_sense_reason, 0); |
424 | transport_generic_free_cmd(&cmd->se_cmd, 0, 0); | 422 | transport_generic_free_cmd(&cmd->se_cmd, 0); |
425 | ft_sess_put(sess); | 423 | ft_sess_put(sess); |
426 | return; | 424 | return; |
427 | } | 425 | } |
@@ -628,7 +626,7 @@ static void ft_send_work(struct work_struct *work) | |||
628 | if (ret == -ENOMEM) { | 626 | if (ret == -ENOMEM) { |
629 | transport_send_check_condition_and_sense(se_cmd, | 627 | transport_send_check_condition_and_sense(se_cmd, |
630 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); | 628 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); |
631 | transport_generic_free_cmd(se_cmd, 0, 0); | 629 | transport_generic_free_cmd(se_cmd, 0); |
632 | return; | 630 | return; |
633 | } | 631 | } |
634 | if (ret == -EINVAL) { | 632 | if (ret == -EINVAL) { |
@@ -637,10 +635,10 @@ static void ft_send_work(struct work_struct *work) | |||
637 | else | 635 | else |
638 | transport_send_check_condition_and_sense(se_cmd, | 636 | transport_send_check_condition_and_sense(se_cmd, |
639 | se_cmd->scsi_sense_reason, 0); | 637 | se_cmd->scsi_sense_reason, 0); |
640 | transport_generic_free_cmd(se_cmd, 0, 0); | 638 | transport_generic_free_cmd(se_cmd, 0); |
641 | return; | 639 | return; |
642 | } | 640 | } |
643 | transport_generic_handle_cdb(se_cmd); | 641 | transport_handle_cdb_direct(se_cmd); |
644 | return; | 642 | return; |
645 | 643 | ||
646 | err: | 644 | err: |
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c index 8fa39b74f22c..5f770412ca40 100644 --- a/drivers/target/tcm_fc/tfc_conf.c +++ b/drivers/target/tcm_fc/tfc_conf.c | |||
@@ -23,7 +23,6 @@ | |||
23 | 23 | ||
24 | #include <linux/module.h> | 24 | #include <linux/module.h> |
25 | #include <linux/moduleparam.h> | 25 | #include <linux/moduleparam.h> |
26 | #include <linux/version.h> | ||
27 | #include <generated/utsrelease.h> | 26 | #include <generated/utsrelease.h> |
28 | #include <linux/utsname.h> | 27 | #include <linux/utsname.h> |
29 | #include <linux/init.h> | 28 | #include <linux/init.h> |
@@ -32,6 +31,7 @@ | |||
32 | #include <linux/types.h> | 31 | #include <linux/types.h> |
33 | #include <linux/string.h> | 32 | #include <linux/string.h> |
34 | #include <linux/configfs.h> | 33 | #include <linux/configfs.h> |
34 | #include <linux/kernel.h> | ||
35 | #include <linux/ctype.h> | 35 | #include <linux/ctype.h> |
36 | #include <asm/unaligned.h> | 36 | #include <asm/unaligned.h> |
37 | #include <scsi/scsi.h> | 37 | #include <scsi/scsi.h> |
@@ -71,10 +71,10 @@ static ssize_t ft_parse_wwn(const char *name, u64 *wwn, int strict) | |||
71 | { | 71 | { |
72 | const char *cp; | 72 | const char *cp; |
73 | char c; | 73 | char c; |
74 | u32 nibble; | ||
75 | u32 byte = 0; | 74 | u32 byte = 0; |
76 | u32 pos = 0; | 75 | u32 pos = 0; |
77 | u32 err; | 76 | u32 err; |
77 | int val; | ||
78 | 78 | ||
79 | *wwn = 0; | 79 | *wwn = 0; |
80 | for (cp = name; cp < &name[FT_NAMELEN - 1]; cp++) { | 80 | for (cp = name; cp < &name[FT_NAMELEN - 1]; cp++) { |
@@ -95,13 +95,10 @@ static ssize_t ft_parse_wwn(const char *name, u64 *wwn, int strict) | |||
95 | return cp - name; | 95 | return cp - name; |
96 | } | 96 | } |
97 | err = 3; | 97 | err = 3; |
98 | if (isdigit(c)) | 98 | val = hex_to_bin(c); |
99 | nibble = c - '0'; | 99 | if (val < 0 || (strict && isupper(c))) |
100 | else if (isxdigit(c) && (islower(c) || !strict)) | ||
101 | nibble = tolower(c) - 'a' + 10; | ||
102 | else | ||
103 | goto fail; | 100 | goto fail; |
104 | *wwn = (*wwn << 4) | nibble; | 101 | *wwn = (*wwn << 4) | val; |
105 | } | 102 | } |
106 | err = 4; | 103 | err = 4; |
107 | fail: | 104 | fail: |
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c index d35ea5a3d56c..1369b1cb103d 100644 --- a/drivers/target/tcm_fc/tfc_io.c +++ b/drivers/target/tcm_fc/tfc_io.c | |||
@@ -28,7 +28,6 @@ | |||
28 | 28 | ||
29 | #include <linux/module.h> | 29 | #include <linux/module.h> |
30 | #include <linux/moduleparam.h> | 30 | #include <linux/moduleparam.h> |
31 | #include <linux/version.h> | ||
32 | #include <generated/utsrelease.h> | 31 | #include <generated/utsrelease.h> |
33 | #include <linux/utsname.h> | 32 | #include <linux/utsname.h> |
34 | #include <linux/init.h> | 33 | #include <linux/init.h> |
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c index dbb5eaeee399..326921385aff 100644 --- a/drivers/target/tcm_fc/tfc_sess.c +++ b/drivers/target/tcm_fc/tfc_sess.c | |||
@@ -19,7 +19,6 @@ | |||
19 | 19 | ||
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/moduleparam.h> | 21 | #include <linux/moduleparam.h> |
22 | #include <linux/version.h> | ||
23 | #include <generated/utsrelease.h> | 22 | #include <generated/utsrelease.h> |
24 | #include <linux/utsname.h> | 23 | #include <linux/utsname.h> |
25 | #include <linux/init.h> | 24 | #include <linux/init.h> |