aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/target/target_core_transport.c
diff options
context:
space:
mode:
authorAndy Grover <agrover@redhat.com>2011-06-08 13:36:43 -0400
committerNicholas Bellinger <nab@linux-iscsi.org>2011-07-22 05:37:48 -0400
commit6708bb27bb2703da238f21f516034263348af5be (patch)
treea23e1f9eab22933d773d6b6ad6263d6751379a00 /drivers/target/target_core_transport.c
parentec98f7825c6eaa4a9afb0eb518826efc8a2ed4a2 (diff)
target: Follow up core updates from AGrover and HCH (round 4)
This patch contains the squashed version of forth round series cleanups from Andy and Christoph following the post heavy lifting in the preceeding: 'Eliminate usage of struct se_mem' and 'Make all control CDBs scatter-gather' changes. This also includes a conversion of target core and the v3.0 mainline fabric modules (loopback and tcm_fc) to use pr_debug and the CONFIG_DYNAMIC_DEBUG infrastructure! These have been squashed into this third and final round for v3.1. target: Remove ifdeffed code in t_g_process_write target: Remove direct ramdisk code target: Rename task_sg_num to task_sg_nents target: Remove custom debug macros for pr_debug. Use pr_err(). target: Remove custom debug macros in mainline fabrics target: Set WSNZ=1 in block limits VPD. Abort if WRITE_SAME sectors = 0 target: Remove transport do_se_mem_map callback target: Further simplify transport_free_pages target: Redo task allocation return value handling target: Remove extra parentheses target: change alloc_task call to take *cdb, not *cmd (nab: Fix bogus struct file assignments in fd_do_readv and fd_do_writev) Signed-off-by: Andy Grover <agrover@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Diffstat (limited to 'drivers/target/target_core_transport.c')
-rw-r--r--drivers/target/target_core_transport.c690
1 files changed, 240 insertions, 450 deletions
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index c743d94baf7..55b6588904a 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -58,132 +58,6 @@
58#include "target_core_scdb.h" 58#include "target_core_scdb.h"
59#include "target_core_ua.h" 59#include "target_core_ua.h"
60 60
61/* #define DEBUG_CDB_HANDLER */
62#ifdef DEBUG_CDB_HANDLER
63#define DEBUG_CDB_H(x...) printk(KERN_INFO x)
64#else
65#define DEBUG_CDB_H(x...)
66#endif
67
68/* #define DEBUG_CMD_MAP */
69#ifdef DEBUG_CMD_MAP
70#define DEBUG_CMD_M(x...) printk(KERN_INFO x)
71#else
72#define DEBUG_CMD_M(x...)
73#endif
74
75/* #define DEBUG_MEM_ALLOC */
76#ifdef DEBUG_MEM_ALLOC
77#define DEBUG_MEM(x...) printk(KERN_INFO x)
78#else
79#define DEBUG_MEM(x...)
80#endif
81
82/* #define DEBUG_MEM2_ALLOC */
83#ifdef DEBUG_MEM2_ALLOC
84#define DEBUG_MEM2(x...) printk(KERN_INFO x)
85#else
86#define DEBUG_MEM2(x...)
87#endif
88
89/* #define DEBUG_SG_CALC */
90#ifdef DEBUG_SG_CALC
91#define DEBUG_SC(x...) printk(KERN_INFO x)
92#else
93#define DEBUG_SC(x...)
94#endif
95
96/* #define DEBUG_SE_OBJ */
97#ifdef DEBUG_SE_OBJ
98#define DEBUG_SO(x...) printk(KERN_INFO x)
99#else
100#define DEBUG_SO(x...)
101#endif
102
103/* #define DEBUG_CMD_VOL */
104#ifdef DEBUG_CMD_VOL
105#define DEBUG_VOL(x...) printk(KERN_INFO x)
106#else
107#define DEBUG_VOL(x...)
108#endif
109
110/* #define DEBUG_CMD_STOP */
111#ifdef DEBUG_CMD_STOP
112#define DEBUG_CS(x...) printk(KERN_INFO x)
113#else
114#define DEBUG_CS(x...)
115#endif
116
117/* #define DEBUG_PASSTHROUGH */
118#ifdef DEBUG_PASSTHROUGH
119#define DEBUG_PT(x...) printk(KERN_INFO x)
120#else
121#define DEBUG_PT(x...)
122#endif
123
124/* #define DEBUG_TASK_STOP */
125#ifdef DEBUG_TASK_STOP
126#define DEBUG_TS(x...) printk(KERN_INFO x)
127#else
128#define DEBUG_TS(x...)
129#endif
130
131/* #define DEBUG_TRANSPORT_STOP */
132#ifdef DEBUG_TRANSPORT_STOP
133#define DEBUG_TRANSPORT_S(x...) printk(KERN_INFO x)
134#else
135#define DEBUG_TRANSPORT_S(x...)
136#endif
137
138/* #define DEBUG_TASK_FAILURE */
139#ifdef DEBUG_TASK_FAILURE
140#define DEBUG_TF(x...) printk(KERN_INFO x)
141#else
142#define DEBUG_TF(x...)
143#endif
144
145/* #define DEBUG_DEV_OFFLINE */
146#ifdef DEBUG_DEV_OFFLINE
147#define DEBUG_DO(x...) printk(KERN_INFO x)
148#else
149#define DEBUG_DO(x...)
150#endif
151
152/* #define DEBUG_TASK_STATE */
153#ifdef DEBUG_TASK_STATE
154#define DEBUG_TSTATE(x...) printk(KERN_INFO x)
155#else
156#define DEBUG_TSTATE(x...)
157#endif
158
159/* #define DEBUG_STATUS_THR */
160#ifdef DEBUG_STATUS_THR
161#define DEBUG_ST(x...) printk(KERN_INFO x)
162#else
163#define DEBUG_ST(x...)
164#endif
165
166/* #define DEBUG_TASK_TIMEOUT */
167#ifdef DEBUG_TASK_TIMEOUT
168#define DEBUG_TT(x...) printk(KERN_INFO x)
169#else
170#define DEBUG_TT(x...)
171#endif
172
173/* #define DEBUG_GENERIC_REQUEST_FAILURE */
174#ifdef DEBUG_GENERIC_REQUEST_FAILURE
175#define DEBUG_GRF(x...) printk(KERN_INFO x)
176#else
177#define DEBUG_GRF(x...)
178#endif
179
180/* #define DEBUG_SAM_TASK_ATTRS */
181#ifdef DEBUG_SAM_TASK_ATTRS
182#define DEBUG_STA(x...) printk(KERN_INFO x)
183#else
184#define DEBUG_STA(x...)
185#endif
186
187static int sub_api_initialized; 61static int sub_api_initialized;
188 62
189static struct kmem_cache *se_cmd_cache; 63static struct kmem_cache *se_cmd_cache;
@@ -225,62 +99,62 @@ int init_se_kmem_caches(void)
225{ 99{
226 se_cmd_cache = kmem_cache_create("se_cmd_cache", 100 se_cmd_cache = kmem_cache_create("se_cmd_cache",
227 sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL); 101 sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL);
228 if (!(se_cmd_cache)) { 102 if (!se_cmd_cache) {
229 printk(KERN_ERR "kmem_cache_create for struct se_cmd failed\n"); 103 pr_err("kmem_cache_create for struct se_cmd failed\n");
230 goto out; 104 goto out;
231 } 105 }
232 se_tmr_req_cache = kmem_cache_create("se_tmr_cache", 106 se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
233 sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req), 107 sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
234 0, NULL); 108 0, NULL);
235 if (!(se_tmr_req_cache)) { 109 if (!se_tmr_req_cache) {
236 printk(KERN_ERR "kmem_cache_create() for struct se_tmr_req" 110 pr_err("kmem_cache_create() for struct se_tmr_req"
237 " failed\n"); 111 " failed\n");
238 goto out; 112 goto out;
239 } 113 }
240 se_sess_cache = kmem_cache_create("se_sess_cache", 114 se_sess_cache = kmem_cache_create("se_sess_cache",
241 sizeof(struct se_session), __alignof__(struct se_session), 115 sizeof(struct se_session), __alignof__(struct se_session),
242 0, NULL); 116 0, NULL);
243 if (!(se_sess_cache)) { 117 if (!se_sess_cache) {
244 printk(KERN_ERR "kmem_cache_create() for struct se_session" 118 pr_err("kmem_cache_create() for struct se_session"
245 " failed\n"); 119 " failed\n");
246 goto out; 120 goto out;
247 } 121 }
248 se_ua_cache = kmem_cache_create("se_ua_cache", 122 se_ua_cache = kmem_cache_create("se_ua_cache",
249 sizeof(struct se_ua), __alignof__(struct se_ua), 123 sizeof(struct se_ua), __alignof__(struct se_ua),
250 0, NULL); 124 0, NULL);
251 if (!(se_ua_cache)) { 125 if (!se_ua_cache) {
252 printk(KERN_ERR "kmem_cache_create() for struct se_ua failed\n"); 126 pr_err("kmem_cache_create() for struct se_ua failed\n");
253 goto out; 127 goto out;
254 } 128 }
255 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", 129 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
256 sizeof(struct t10_pr_registration), 130 sizeof(struct t10_pr_registration),
257 __alignof__(struct t10_pr_registration), 0, NULL); 131 __alignof__(struct t10_pr_registration), 0, NULL);
258 if (!(t10_pr_reg_cache)) { 132 if (!t10_pr_reg_cache) {
259 printk(KERN_ERR "kmem_cache_create() for struct t10_pr_registration" 133 pr_err("kmem_cache_create() for struct t10_pr_registration"
260 " failed\n"); 134 " failed\n");
261 goto out; 135 goto out;
262 } 136 }
263 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", 137 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
264 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), 138 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
265 0, NULL); 139 0, NULL);
266 if (!(t10_alua_lu_gp_cache)) { 140 if (!t10_alua_lu_gp_cache) {
267 printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_cache" 141 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
268 " failed\n"); 142 " failed\n");
269 goto out; 143 goto out;
270 } 144 }
271 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", 145 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
272 sizeof(struct t10_alua_lu_gp_member), 146 sizeof(struct t10_alua_lu_gp_member),
273 __alignof__(struct t10_alua_lu_gp_member), 0, NULL); 147 __alignof__(struct t10_alua_lu_gp_member), 0, NULL);
274 if (!(t10_alua_lu_gp_mem_cache)) { 148 if (!t10_alua_lu_gp_mem_cache) {
275 printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_mem_" 149 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
276 "cache failed\n"); 150 "cache failed\n");
277 goto out; 151 goto out;
278 } 152 }
279 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", 153 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
280 sizeof(struct t10_alua_tg_pt_gp), 154 sizeof(struct t10_alua_tg_pt_gp),
281 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL); 155 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
282 if (!(t10_alua_tg_pt_gp_cache)) { 156 if (!t10_alua_tg_pt_gp_cache) {
283 printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_" 157 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
284 "cache failed\n"); 158 "cache failed\n");
285 goto out; 159 goto out;
286 } 160 }
@@ -289,8 +163,8 @@ int init_se_kmem_caches(void)
289 sizeof(struct t10_alua_tg_pt_gp_member), 163 sizeof(struct t10_alua_tg_pt_gp_member),
290 __alignof__(struct t10_alua_tg_pt_gp_member), 164 __alignof__(struct t10_alua_tg_pt_gp_member),
291 0, NULL); 165 0, NULL);
292 if (!(t10_alua_tg_pt_gp_mem_cache)) { 166 if (!t10_alua_tg_pt_gp_mem_cache) {
293 printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_" 167 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
294 "mem_t failed\n"); 168 "mem_t failed\n");
295 goto out; 169 goto out;
296 } 170 }
@@ -366,19 +240,19 @@ static int transport_subsystem_reqmods(void)
366 240
367 ret = request_module("target_core_iblock"); 241 ret = request_module("target_core_iblock");
368 if (ret != 0) 242 if (ret != 0)
369 printk(KERN_ERR "Unable to load target_core_iblock\n"); 243 pr_err("Unable to load target_core_iblock\n");
370 244
371 ret = request_module("target_core_file"); 245 ret = request_module("target_core_file");
372 if (ret != 0) 246 if (ret != 0)
373 printk(KERN_ERR "Unable to load target_core_file\n"); 247 pr_err("Unable to load target_core_file\n");
374 248
375 ret = request_module("target_core_pscsi"); 249 ret = request_module("target_core_pscsi");
376 if (ret != 0) 250 if (ret != 0)
377 printk(KERN_ERR "Unable to load target_core_pscsi\n"); 251 pr_err("Unable to load target_core_pscsi\n");
378 252
379 ret = request_module("target_core_stgt"); 253 ret = request_module("target_core_stgt");
380 if (ret != 0) 254 if (ret != 0)
381 printk(KERN_ERR "Unable to load target_core_stgt\n"); 255 pr_err("Unable to load target_core_stgt\n");
382 256
383 return 0; 257 return 0;
384} 258}
@@ -405,8 +279,8 @@ struct se_session *transport_init_session(void)
405 struct se_session *se_sess; 279 struct se_session *se_sess;
406 280
407 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); 281 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
408 if (!(se_sess)) { 282 if (!se_sess) {
409 printk(KERN_ERR "Unable to allocate struct se_session from" 283 pr_err("Unable to allocate struct se_session from"
410 " se_sess_cache\n"); 284 " se_sess_cache\n");
411 return ERR_PTR(-ENOMEM); 285 return ERR_PTR(-ENOMEM);
412 } 286 }
@@ -460,7 +334,7 @@ void __transport_register_session(
460 } 334 }
461 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); 335 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
462 336
463 printk(KERN_INFO "TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", 337 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
464 se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr); 338 se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
465} 339}
466EXPORT_SYMBOL(__transport_register_session); 340EXPORT_SYMBOL(__transport_register_session);
@@ -485,7 +359,7 @@ void transport_deregister_session_configfs(struct se_session *se_sess)
485 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session 359 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
486 */ 360 */
487 se_nacl = se_sess->se_node_acl; 361 se_nacl = se_sess->se_node_acl;
488 if ((se_nacl)) { 362 if (se_nacl) {
489 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); 363 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
490 list_del(&se_sess->sess_acl_list); 364 list_del(&se_sess->sess_acl_list);
491 /* 365 /*
@@ -516,7 +390,7 @@ void transport_deregister_session(struct se_session *se_sess)
516 struct se_portal_group *se_tpg = se_sess->se_tpg; 390 struct se_portal_group *se_tpg = se_sess->se_tpg;
517 struct se_node_acl *se_nacl; 391 struct se_node_acl *se_nacl;
518 392
519 if (!(se_tpg)) { 393 if (!se_tpg) {
520 transport_free_session(se_sess); 394 transport_free_session(se_sess);
521 return; 395 return;
522 } 396 }
@@ -532,11 +406,11 @@ void transport_deregister_session(struct se_session *se_sess)
532 * struct se_node_acl if it had been previously dynamically generated. 406 * struct se_node_acl if it had been previously dynamically generated.
533 */ 407 */
534 se_nacl = se_sess->se_node_acl; 408 se_nacl = se_sess->se_node_acl;
535 if ((se_nacl)) { 409 if (se_nacl) {
536 spin_lock_bh(&se_tpg->acl_node_lock); 410 spin_lock_bh(&se_tpg->acl_node_lock);
537 if (se_nacl->dynamic_node_acl) { 411 if (se_nacl->dynamic_node_acl) {
538 if (!(se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache( 412 if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache(
539 se_tpg))) { 413 se_tpg)) {
540 list_del(&se_nacl->acl_list); 414 list_del(&se_nacl->acl_list);
541 se_tpg->num_node_acls--; 415 se_tpg->num_node_acls--;
542 spin_unlock_bh(&se_tpg->acl_node_lock); 416 spin_unlock_bh(&se_tpg->acl_node_lock);
@@ -553,7 +427,7 @@ void transport_deregister_session(struct se_session *se_sess)
553 427
554 transport_free_session(se_sess); 428 transport_free_session(se_sess);
555 429
556 printk(KERN_INFO "TARGET_CORE[%s]: Deregistered fabric_sess\n", 430 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
557 se_tpg->se_tpg_tfo->get_fabric_name()); 431 se_tpg->se_tpg_tfo->get_fabric_name());
558} 432}
559EXPORT_SYMBOL(transport_deregister_session); 433EXPORT_SYMBOL(transport_deregister_session);
@@ -569,19 +443,19 @@ static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
569 443
570 list_for_each_entry(task, &cmd->t_task_list, t_list) { 444 list_for_each_entry(task, &cmd->t_task_list, t_list) {
571 dev = task->se_dev; 445 dev = task->se_dev;
572 if (!(dev)) 446 if (!dev)
573 continue; 447 continue;
574 448
575 if (atomic_read(&task->task_active)) 449 if (atomic_read(&task->task_active))
576 continue; 450 continue;
577 451
578 if (!(atomic_read(&task->task_state_active))) 452 if (!atomic_read(&task->task_state_active))
579 continue; 453 continue;
580 454
581 spin_lock_irqsave(&dev->execute_task_lock, flags); 455 spin_lock_irqsave(&dev->execute_task_lock, flags);
582 list_del(&task->t_state_list); 456 list_del(&task->t_state_list);
583 DEBUG_TSTATE("Removed ITT: 0x%08x dev: %p task[%p]\n", 457 pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n",
584 cmd->se_tfo->tfo_get_task_tag(cmd), dev, task); 458 cmd->se_tfo->get_task_tag(cmd), dev, task);
585 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 459 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
586 460
587 atomic_set(&task->task_state_active, 0); 461 atomic_set(&task->task_state_active, 0);
@@ -610,7 +484,7 @@ static int transport_cmd_check_stop(
610 * command for LUN shutdown purposes. 484 * command for LUN shutdown purposes.
611 */ 485 */
612 if (atomic_read(&cmd->transport_lun_stop)) { 486 if (atomic_read(&cmd->transport_lun_stop)) {
613 DEBUG_CS("%s:%d atomic_read(&cmd->transport_lun_stop)" 487 pr_debug("%s:%d atomic_read(&cmd->transport_lun_stop)"
614 " == TRUE for ITT: 0x%08x\n", __func__, __LINE__, 488 " == TRUE for ITT: 0x%08x\n", __func__, __LINE__,
615 cmd->se_tfo->get_task_tag(cmd)); 489 cmd->se_tfo->get_task_tag(cmd));
616 490
@@ -629,7 +503,7 @@ static int transport_cmd_check_stop(
629 * this command for frontend exceptions. 503 * this command for frontend exceptions.
630 */ 504 */
631 if (atomic_read(&cmd->t_transport_stop)) { 505 if (atomic_read(&cmd->t_transport_stop)) {
632 DEBUG_CS("%s:%d atomic_read(&cmd->t_transport_stop) ==" 506 pr_debug("%s:%d atomic_read(&cmd->t_transport_stop) =="
633 " TRUE for ITT: 0x%08x\n", __func__, __LINE__, 507 " TRUE for ITT: 0x%08x\n", __func__, __LINE__,
634 cmd->se_tfo->get_task_tag(cmd)); 508 cmd->se_tfo->get_task_tag(cmd));
635 509
@@ -695,7 +569,7 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
695 return; 569 return;
696 570
697 spin_lock_irqsave(&cmd->t_state_lock, flags); 571 spin_lock_irqsave(&cmd->t_state_lock, flags);
698 if (!(atomic_read(&cmd->transport_dev_active))) { 572 if (!atomic_read(&cmd->transport_dev_active)) {
699 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 573 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
700 goto check_lun; 574 goto check_lun;
701 } 575 }
@@ -710,7 +584,7 @@ check_lun:
710 list_del(&cmd->se_lun_node); 584 list_del(&cmd->se_lun_node);
711 atomic_set(&cmd->transport_lun_active, 0); 585 atomic_set(&cmd->transport_lun_active, 0);
712#if 0 586#if 0
713 printk(KERN_INFO "Removed ITT: 0x%08x from LUN LIST[%d]\n" 587 pr_debug("Removed ITT: 0x%08x from LUN LIST[%d]\n"
714 cmd->se_tfo->get_task_tag(cmd), lun->unpacked_lun); 588 cmd->se_tfo->get_task_tag(cmd), lun->unpacked_lun);
715#endif 589#endif
716 } 590 }
@@ -797,7 +671,7 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
797 unsigned long flags; 671 unsigned long flags;
798 672
799 spin_lock_irqsave(&qobj->cmd_queue_lock, flags); 673 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
800 if (!(atomic_read(&cmd->t_transport_queue_active))) { 674 if (!atomic_read(&cmd->t_transport_queue_active)) {
801 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); 675 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
802 return; 676 return;
803 } 677 }
@@ -812,7 +686,7 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
812 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); 686 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
813 687
814 if (atomic_read(&cmd->t_transport_queue_active)) { 688 if (atomic_read(&cmd->t_transport_queue_active)) {
815 printk(KERN_ERR "ITT: 0x%08x t_transport_queue_active: %d\n", 689 pr_err("ITT: 0x%08x t_transport_queue_active: %d\n",
816 cmd->se_tfo->get_task_tag(cmd), 690 cmd->se_tfo->get_task_tag(cmd),
817 atomic_read(&cmd->t_transport_queue_active)); 691 atomic_read(&cmd->t_transport_queue_active));
818 } 692 }
@@ -853,7 +727,7 @@ void transport_complete_task(struct se_task *task, int success)
853 int t_state; 727 int t_state;
854 unsigned long flags; 728 unsigned long flags;
855#if 0 729#if 0
856 printk(KERN_INFO "task: %p CDB: 0x%02x obj_ptr: %p\n", task, 730 pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task,
857 cmd->t_task_cdb[0], dev); 731 cmd->t_task_cdb[0], dev);
858#endif 732#endif
859 if (dev) 733 if (dev)
@@ -899,8 +773,8 @@ void transport_complete_task(struct se_task *task, int success)
899 * the processing thread. 773 * the processing thread.
900 */ 774 */
901 if (atomic_read(&task->task_timeout)) { 775 if (atomic_read(&task->task_timeout)) {
902 if (!(atomic_dec_and_test( 776 if (!atomic_dec_and_test(
903 &cmd->t_task_cdbs_timeout_left))) { 777 &cmd->t_task_cdbs_timeout_left)) {
904 spin_unlock_irqrestore(&cmd->t_state_lock, 778 spin_unlock_irqrestore(&cmd->t_state_lock,
905 flags); 779 flags);
906 return; 780 return;
@@ -918,7 +792,7 @@ void transport_complete_task(struct se_task *task, int success)
918 * struct se_task from struct se_cmd will complete itself into the 792 * struct se_task from struct se_cmd will complete itself into the
919 * device queue depending upon int success. 793 * device queue depending upon int success.
920 */ 794 */
921 if (!(atomic_dec_and_test(&cmd->t_task_cdbs_left))) { 795 if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
922 if (!success) 796 if (!success)
923 cmd->t_tasks_failed = 1; 797 cmd->t_tasks_failed = 1;
924 798
@@ -976,9 +850,9 @@ static inline int transport_add_task_check_sam_attr(
976 &task_prev->t_execute_list : 850 &task_prev->t_execute_list :
977 &dev->execute_task_list); 851 &dev->execute_task_list);
978 852
979 DEBUG_STA("Set HEAD_OF_QUEUE for task CDB: 0x%02x" 853 pr_debug("Set HEAD_OF_QUEUE for task CDB: 0x%02x"
980 " in execution queue\n", 854 " in execution queue\n",
981 T_TASK(task->task_se_cmd)->t_task_cdb[0]); 855 task->task_se_cmd->t_task_cdb[0]);
982 return 1; 856 return 1;
983 } 857 }
984 /* 858 /*
@@ -1020,7 +894,7 @@ static void __transport_add_task_to_execute_queue(
1020 894
1021 atomic_set(&task->task_state_active, 1); 895 atomic_set(&task->task_state_active, 1);
1022 896
1023 DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n", 897 pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
1024 task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd), 898 task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd),
1025 task, dev); 899 task, dev);
1026} 900}
@@ -1042,8 +916,8 @@ static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
1042 list_add_tail(&task->t_state_list, &dev->state_task_list); 916 list_add_tail(&task->t_state_list, &dev->state_task_list);
1043 atomic_set(&task->task_state_active, 1); 917 atomic_set(&task->task_state_active, 1);
1044 918
1045 DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n", 919 pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
1046 task->se_cmd->se_tfo->get_task_tag( 920 task->task_se_cmd->se_tfo->get_task_tag(
1047 task->task_se_cmd), task, dev); 921 task->task_se_cmd), task, dev);
1048 922
1049 spin_unlock(&dev->execute_task_lock); 923 spin_unlock(&dev->execute_task_lock);
@@ -1112,7 +986,7 @@ static void target_qf_do_work(struct work_struct *work)
1112 smp_mb__after_atomic_dec(); 986 smp_mb__after_atomic_dec();
1113 spin_unlock_irq(&dev->qf_cmd_lock); 987 spin_unlock_irq(&dev->qf_cmd_lock);
1114 988
1115 printk(KERN_INFO "Processing %s cmd: %p QUEUE_FULL in work queue" 989 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
1116 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, 990 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
1117 (cmd->t_state == TRANSPORT_COMPLETE_OK) ? "COMPLETE_OK" : 991 (cmd->t_state == TRANSPORT_COMPLETE_OK) ? "COMPLETE_OK" :
1118 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" 992 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
@@ -1197,7 +1071,7 @@ static void transport_release_all_cmds(struct se_device *dev)
1197 spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock, 1071 spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock,
1198 flags); 1072 flags);
1199 1073
1200 printk(KERN_ERR "Releasing ITT: 0x%08x, i_state: %u," 1074 pr_err("Releasing ITT: 0x%08x, i_state: %u,"
1201 " t_state: %u directly\n", 1075 " t_state: %u directly\n",
1202 cmd->se_tfo->get_task_tag(cmd), 1076 cmd->se_tfo->get_task_tag(cmd),
1203 cmd->se_tfo->get_cmd_state(cmd), t_state); 1077 cmd->se_tfo->get_cmd_state(cmd), t_state);
@@ -1264,7 +1138,7 @@ void transport_dump_vpd_proto_id(
1264 if (p_buf) 1138 if (p_buf)
1265 strncpy(p_buf, buf, p_buf_len); 1139 strncpy(p_buf, buf, p_buf_len);
1266 else 1140 else
1267 printk(KERN_INFO "%s", buf); 1141 pr_debug("%s", buf);
1268} 1142}
1269 1143
1270void 1144void
@@ -1314,7 +1188,7 @@ int transport_dump_vpd_assoc(
1314 if (p_buf) 1188 if (p_buf)
1315 strncpy(p_buf, buf, p_buf_len); 1189 strncpy(p_buf, buf, p_buf_len);
1316 else 1190 else
1317 printk("%s", buf); 1191 pr_debug("%s", buf);
1318 1192
1319 return ret; 1193 return ret;
1320} 1194}
@@ -1374,7 +1248,7 @@ int transport_dump_vpd_ident_type(
1374 return -EINVAL; 1248 return -EINVAL;
1375 strncpy(p_buf, buf, p_buf_len); 1249 strncpy(p_buf, buf, p_buf_len);
1376 } else { 1250 } else {
1377 printk("%s", buf); 1251 pr_debug("%s", buf);
1378 } 1252 }
1379 1253
1380 return ret; 1254 return ret;
@@ -1425,7 +1299,7 @@ int transport_dump_vpd_ident(
1425 if (p_buf) 1299 if (p_buf)
1426 strncpy(p_buf, buf, p_buf_len); 1300 strncpy(p_buf, buf, p_buf_len);
1427 else 1301 else
1428 printk("%s", buf); 1302 pr_debug("%s", buf);
1429 1303
1430 return ret; 1304 return ret;
1431} 1305}
@@ -1482,7 +1356,7 @@ static void core_setup_task_attr_emulation(struct se_device *dev)
1482 } 1356 }
1483 1357
1484 dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED; 1358 dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
1485 DEBUG_STA("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x" 1359 pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
1486 " device\n", dev->transport->name, 1360 " device\n", dev->transport->name,
1487 dev->transport->get_device_rev(dev)); 1361 dev->transport->get_device_rev(dev));
1488} 1362}
@@ -1494,32 +1368,32 @@ static void scsi_dump_inquiry(struct se_device *dev)
1494 /* 1368 /*
1495 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer 1369 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
1496 */ 1370 */
1497 printk(" Vendor: "); 1371 pr_debug(" Vendor: ");
1498 for (i = 0; i < 8; i++) 1372 for (i = 0; i < 8; i++)
1499 if (wwn->vendor[i] >= 0x20) 1373 if (wwn->vendor[i] >= 0x20)
1500 printk("%c", wwn->vendor[i]); 1374 pr_debug("%c", wwn->vendor[i]);
1501 else 1375 else
1502 printk(" "); 1376 pr_debug(" ");
1503 1377
1504 printk(" Model: "); 1378 pr_debug(" Model: ");
1505 for (i = 0; i < 16; i++) 1379 for (i = 0; i < 16; i++)
1506 if (wwn->model[i] >= 0x20) 1380 if (wwn->model[i] >= 0x20)
1507 printk("%c", wwn->model[i]); 1381 pr_debug("%c", wwn->model[i]);
1508 else 1382 else
1509 printk(" "); 1383 pr_debug(" ");
1510 1384
1511 printk(" Revision: "); 1385 pr_debug(" Revision: ");
1512 for (i = 0; i < 4; i++) 1386 for (i = 0; i < 4; i++)
1513 if (wwn->revision[i] >= 0x20) 1387 if (wwn->revision[i] >= 0x20)
1514 printk("%c", wwn->revision[i]); 1388 pr_debug("%c", wwn->revision[i]);
1515 else 1389 else
1516 printk(" "); 1390 pr_debug(" ");
1517 1391
1518 printk("\n"); 1392 pr_debug("\n");
1519 1393
1520 device_type = dev->transport->get_device_type(dev); 1394 device_type = dev->transport->get_device_type(dev);
1521 printk(" Type: %s ", scsi_device_type(device_type)); 1395 pr_debug(" Type: %s ", scsi_device_type(device_type));
1522 printk(" ANSI SCSI revision: %02x\n", 1396 pr_debug(" ANSI SCSI revision: %02x\n",
1523 dev->transport->get_device_rev(dev)); 1397 dev->transport->get_device_rev(dev));
1524} 1398}
1525 1399
@@ -1537,8 +1411,8 @@ struct se_device *transport_add_device_to_core_hba(
1537 struct se_device *dev; 1411 struct se_device *dev;
1538 1412
1539 dev = kzalloc(sizeof(struct se_device), GFP_KERNEL); 1413 dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
1540 if (!(dev)) { 1414 if (!dev) {
1541 printk(KERN_ERR "Unable to allocate memory for se_dev_t\n"); 1415 pr_err("Unable to allocate memory for se_dev_t\n");
1542 return NULL; 1416 return NULL;
1543 } 1417 }
1544 1418
@@ -1608,7 +1482,7 @@ struct se_device *transport_add_device_to_core_hba(
1608 dev->process_thread = kthread_run(transport_processing_thread, dev, 1482 dev->process_thread = kthread_run(transport_processing_thread, dev,
1609 "LIO_%s", dev->transport->name); 1483 "LIO_%s", dev->transport->name);
1610 if (IS_ERR(dev->process_thread)) { 1484 if (IS_ERR(dev->process_thread)) {
1611 printk(KERN_ERR "Unable to create kthread: LIO_%s\n", 1485 pr_err("Unable to create kthread: LIO_%s\n",
1612 dev->transport->name); 1486 dev->transport->name);
1613 goto out; 1487 goto out;
1614 } 1488 }
@@ -1626,7 +1500,7 @@ struct se_device *transport_add_device_to_core_hba(
1626 */ 1500 */
1627 if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { 1501 if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
1628 if (!inquiry_prod || !inquiry_rev) { 1502 if (!inquiry_prod || !inquiry_rev) {
1629 printk(KERN_ERR "All non TCM/pSCSI plugins require" 1503 pr_err("All non TCM/pSCSI plugins require"
1630 " INQUIRY consts\n"); 1504 " INQUIRY consts\n");
1631 goto out; 1505 goto out;
1632 } 1506 }
@@ -1688,9 +1562,9 @@ transport_generic_get_task(struct se_cmd *cmd,
1688 struct se_task *task; 1562 struct se_task *task;
1689 struct se_device *dev = cmd->se_dev; 1563 struct se_device *dev = cmd->se_dev;
1690 1564
1691 task = dev->transport->alloc_task(cmd); 1565 task = dev->transport->alloc_task(cmd->t_task_cdb);
1692 if (!task) { 1566 if (!task) {
1693 printk(KERN_ERR "Unable to allocate struct se_task\n"); 1567 pr_err("Unable to allocate struct se_task\n");
1694 return NULL; 1568 return NULL;
1695 } 1569 }
1696 1570
@@ -1751,7 +1625,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
1751 return 0; 1625 return 0;
1752 1626
1753 if (cmd->sam_task_attr == MSG_ACA_TAG) { 1627 if (cmd->sam_task_attr == MSG_ACA_TAG) {
1754 DEBUG_STA("SAM Task Attribute ACA" 1628 pr_debug("SAM Task Attribute ACA"
1755 " emulation is not supported\n"); 1629 " emulation is not supported\n");
1756 return -EINVAL; 1630 return -EINVAL;
1757 } 1631 }
@@ -1761,9 +1635,9 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
1761 */ 1635 */
1762 cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id); 1636 cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
1763 smp_mb__after_atomic_inc(); 1637 smp_mb__after_atomic_inc();
1764 DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", 1638 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
1765 cmd->se_ordered_id, cmd->sam_task_attr, 1639 cmd->se_ordered_id, cmd->sam_task_attr,
1766 TRANSPORT(cmd->se_dev)->name); 1640 cmd->se_dev->transport->name);
1767 return 0; 1641 return 0;
1768} 1642}
1769 1643
@@ -1804,7 +1678,7 @@ int transport_generic_allocate_tasks(
1804 * for VARIABLE_LENGTH_CMD 1678 * for VARIABLE_LENGTH_CMD
1805 */ 1679 */
1806 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) { 1680 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
1807 printk(KERN_ERR "Received SCSI CDB with command_size: %d that" 1681 pr_err("Received SCSI CDB with command_size: %d that"
1808 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", 1682 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1809 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); 1683 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
1810 return -EINVAL; 1684 return -EINVAL;
@@ -1817,8 +1691,8 @@ int transport_generic_allocate_tasks(
1817 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) { 1691 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
1818 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), 1692 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
1819 GFP_KERNEL); 1693 GFP_KERNEL);
1820 if (!(cmd->t_task_cdb)) { 1694 if (!cmd->t_task_cdb) {
1821 printk(KERN_ERR "Unable to allocate cmd->t_task_cdb" 1695 pr_err("Unable to allocate cmd->t_task_cdb"
1822 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", 1696 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
1823 scsi_command_size(cdb), 1697 scsi_command_size(cdb),
1824 (unsigned long)sizeof(cmd->__t_task_cdb)); 1698 (unsigned long)sizeof(cmd->__t_task_cdb));
@@ -1864,7 +1738,7 @@ int transport_generic_handle_cdb(
1864{ 1738{
1865 if (!cmd->se_lun) { 1739 if (!cmd->se_lun) {
1866 dump_stack(); 1740 dump_stack();
1867 printk(KERN_ERR "cmd->se_lun is NULL\n"); 1741 pr_err("cmd->se_lun is NULL\n");
1868 return -EINVAL; 1742 return -EINVAL;
1869 } 1743 }
1870 1744
@@ -1882,12 +1756,12 @@ int transport_handle_cdb_direct(
1882{ 1756{
1883 if (!cmd->se_lun) { 1757 if (!cmd->se_lun) {
1884 dump_stack(); 1758 dump_stack();
1885 printk(KERN_ERR "cmd->se_lun is NULL\n"); 1759 pr_err("cmd->se_lun is NULL\n");
1886 return -EINVAL; 1760 return -EINVAL;
1887 } 1761 }
1888 if (in_interrupt()) { 1762 if (in_interrupt()) {
1889 dump_stack(); 1763 dump_stack();
1890 printk(KERN_ERR "transport_generic_handle_cdb cannot be called" 1764 pr_err("transport_generic_handle_cdb cannot be called"
1891 " from interrupt context\n"); 1765 " from interrupt context\n");
1892 return -EINVAL; 1766 return -EINVAL;
1893 } 1767 }
@@ -1906,7 +1780,7 @@ int transport_generic_handle_cdb_map(
1906{ 1780{
1907 if (!cmd->se_lun) { 1781 if (!cmd->se_lun) {
1908 dump_stack(); 1782 dump_stack();
1909 printk(KERN_ERR "cmd->se_lun is NULL\n"); 1783 pr_err("cmd->se_lun is NULL\n");
1910 return -EINVAL; 1784 return -EINVAL;
1911 } 1785 }
1912 1786
@@ -1975,7 +1849,7 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
1975 unsigned long flags; 1849 unsigned long flags;
1976 int ret = 0; 1850 int ret = 0;
1977 1851
1978 DEBUG_TS("ITT[0x%08x] - Stopping tasks\n", 1852 pr_debug("ITT[0x%08x] - Stopping tasks\n",
1979 cmd->se_tfo->get_task_tag(cmd)); 1853 cmd->se_tfo->get_task_tag(cmd));
1980 1854
1981 /* 1855 /*
@@ -1984,7 +1858,7 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
1984 spin_lock_irqsave(&cmd->t_state_lock, flags); 1858 spin_lock_irqsave(&cmd->t_state_lock, flags);
1985 list_for_each_entry_safe(task, task_tmp, 1859 list_for_each_entry_safe(task, task_tmp,
1986 &cmd->t_task_list, t_list) { 1860 &cmd->t_task_list, t_list) {
1987 DEBUG_TS("task_no[%d] - Processing task %p\n", 1861 pr_debug("task_no[%d] - Processing task %p\n",
1988 task->task_no, task); 1862 task->task_no, task);
1989 /* 1863 /*
1990 * If the struct se_task has not been sent and is not active, 1864 * If the struct se_task has not been sent and is not active,
@@ -1997,7 +1871,7 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
1997 transport_remove_task_from_execute_queue(task, 1871 transport_remove_task_from_execute_queue(task,
1998 task->se_dev); 1872 task->se_dev);
1999 1873
2000 DEBUG_TS("task_no[%d] - Removed from execute queue\n", 1874 pr_debug("task_no[%d] - Removed from execute queue\n",
2001 task->task_no); 1875 task->task_no);
2002 spin_lock_irqsave(&cmd->t_state_lock, flags); 1876 spin_lock_irqsave(&cmd->t_state_lock, flags);
2003 continue; 1877 continue;
@@ -2012,10 +1886,10 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
2012 spin_unlock_irqrestore(&cmd->t_state_lock, 1886 spin_unlock_irqrestore(&cmd->t_state_lock,
2013 flags); 1887 flags);
2014 1888
2015 DEBUG_TS("task_no[%d] - Waiting to complete\n", 1889 pr_debug("task_no[%d] - Waiting to complete\n",
2016 task->task_no); 1890 task->task_no);
2017 wait_for_completion(&task->task_stop_comp); 1891 wait_for_completion(&task->task_stop_comp);
2018 DEBUG_TS("task_no[%d] - Stopped successfully\n", 1892 pr_debug("task_no[%d] - Stopped successfully\n",
2019 task->task_no); 1893 task->task_no);
2020 1894
2021 spin_lock_irqsave(&cmd->t_state_lock, flags); 1895 spin_lock_irqsave(&cmd->t_state_lock, flags);
@@ -2024,7 +1898,7 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
2024 atomic_set(&task->task_active, 0); 1898 atomic_set(&task->task_active, 0);
2025 atomic_set(&task->task_stop, 0); 1899 atomic_set(&task->task_stop, 0);
2026 } else { 1900 } else {
2027 DEBUG_TS("task_no[%d] - Did nothing\n", task->task_no); 1901 pr_debug("task_no[%d] - Did nothing\n", task->task_no);
2028 ret++; 1902 ret++;
2029 } 1903 }
2030 1904
@@ -2046,18 +1920,18 @@ static void transport_generic_request_failure(
2046{ 1920{
2047 int ret = 0; 1921 int ret = 0;
2048 1922
2049 DEBUG_GRF("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" 1923 pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
2050 " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd), 1924 " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
2051 cmd->t_task_cdb[0]); 1925 cmd->t_task_cdb[0]);
2052 DEBUG_GRF("-----[ i_state: %d t_state/def_t_state:" 1926 pr_debug("-----[ i_state: %d t_state/def_t_state:"
2053 " %d/%d transport_error_status: %d\n", 1927 " %d/%d transport_error_status: %d\n",
2054 cmd->se_tfo->get_cmd_state(cmd), 1928 cmd->se_tfo->get_cmd_state(cmd),
2055 cmd->t_state, cmd->deferred_t_state, 1929 cmd->t_state, cmd->deferred_t_state,
2056 cmd->transport_error_status); 1930 cmd->transport_error_status);
2057 DEBUG_GRF("-----[ t_task_cdbs: %d t_task_cdbs_left: %d" 1931 pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
2058 " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --" 1932 " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
2059 " t_transport_active: %d t_transport_stop: %d" 1933 " t_transport_active: %d t_transport_stop: %d"
2060 " t_transport_sent: %d\n", cmd->t_task_cdbs, 1934 " t_transport_sent: %d\n", cmd->t_task_list_num,
2061 atomic_read(&cmd->t_task_cdbs_left), 1935 atomic_read(&cmd->t_task_cdbs_left),
2062 atomic_read(&cmd->t_task_cdbs_sent), 1936 atomic_read(&cmd->t_task_cdbs_sent),
2063 atomic_read(&cmd->t_task_cdbs_ex_left), 1937 atomic_read(&cmd->t_task_cdbs_ex_left),
@@ -2146,7 +2020,7 @@ static void transport_generic_request_failure(
2146 */ 2020 */
2147 break; 2021 break;
2148 default: 2022 default:
2149 printk(KERN_ERR "Unknown transport error for CDB 0x%02x: %d\n", 2023 pr_err("Unknown transport error for CDB 0x%02x: %d\n",
2150 cmd->t_task_cdb[0], 2024 cmd->t_task_cdb[0],
2151 cmd->transport_error_status); 2025 cmd->transport_error_status);
2152 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 2026 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
@@ -2164,7 +2038,7 @@ static void transport_generic_request_failure(
2164 2038
2165check_stop: 2039check_stop:
2166 transport_lun_remove_cmd(cmd); 2040 transport_lun_remove_cmd(cmd);
2167 if (!(transport_cmd_check_stop_to_fabric(cmd))) 2041 if (!transport_cmd_check_stop_to_fabric(cmd))
2168 ; 2042 ;
2169 return; 2043 return;
2170 2044
@@ -2178,7 +2052,7 @@ static void transport_direct_request_timeout(struct se_cmd *cmd)
2178 unsigned long flags; 2052 unsigned long flags;
2179 2053
2180 spin_lock_irqsave(&cmd->t_state_lock, flags); 2054 spin_lock_irqsave(&cmd->t_state_lock, flags);
2181 if (!(atomic_read(&cmd->t_transport_timeout))) { 2055 if (!atomic_read(&cmd->t_transport_timeout)) {
2182 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2056 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2183 return; 2057 return;
2184 } 2058 }
@@ -2262,7 +2136,7 @@ static void transport_task_timeout_handler(unsigned long data)
2262 struct se_cmd *cmd = task->task_se_cmd; 2136 struct se_cmd *cmd = task->task_se_cmd;
2263 unsigned long flags; 2137 unsigned long flags;
2264 2138
2265 DEBUG_TT("transport task timeout fired! task: %p cmd: %p\n", task, cmd); 2139 pr_debug("transport task timeout fired! task: %p cmd: %p\n", task, cmd);
2266 2140
2267 spin_lock_irqsave(&cmd->t_state_lock, flags); 2141 spin_lock_irqsave(&cmd->t_state_lock, flags);
2268 if (task->task_flags & TF_STOP) { 2142 if (task->task_flags & TF_STOP) {
@@ -2274,8 +2148,8 @@ static void transport_task_timeout_handler(unsigned long data)
2274 /* 2148 /*
2275 * Determine if transport_complete_task() has already been called. 2149 * Determine if transport_complete_task() has already been called.
2276 */ 2150 */
2277 if (!(atomic_read(&task->task_active))) { 2151 if (!atomic_read(&task->task_active)) {
2278 DEBUG_TT("transport task: %p cmd: %p timeout task_active" 2152 pr_debug("transport task: %p cmd: %p timeout task_active"
2279 " == 0\n", task, cmd); 2153 " == 0\n", task, cmd);
2280 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2154 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2281 return; 2155 return;
@@ -2290,20 +2164,20 @@ static void transport_task_timeout_handler(unsigned long data)
2290 task->task_scsi_status = 1; 2164 task->task_scsi_status = 1;
2291 2165
2292 if (atomic_read(&task->task_stop)) { 2166 if (atomic_read(&task->task_stop)) {
2293 DEBUG_TT("transport task: %p cmd: %p timeout task_stop" 2167 pr_debug("transport task: %p cmd: %p timeout task_stop"
2294 " == 1\n", task, cmd); 2168 " == 1\n", task, cmd);
2295 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2169 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2296 complete(&task->task_stop_comp); 2170 complete(&task->task_stop_comp);
2297 return; 2171 return;
2298 } 2172 }
2299 2173
2300 if (!(atomic_dec_and_test(&cmd->t_task_cdbs_left))) { 2174 if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
2301 DEBUG_TT("transport task: %p cmd: %p timeout non zero" 2175 pr_debug("transport task: %p cmd: %p timeout non zero"
2302 " t_task_cdbs_left\n", task, cmd); 2176 " t_task_cdbs_left\n", task, cmd);
2303 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2177 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2304 return; 2178 return;
2305 } 2179 }
2306 DEBUG_TT("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n", 2180 pr_debug("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n",
2307 task, cmd); 2181 task, cmd);
2308 2182
2309 cmd->t_state = TRANSPORT_COMPLETE_FAILURE; 2183 cmd->t_state = TRANSPORT_COMPLETE_FAILURE;
@@ -2326,7 +2200,7 @@ static void transport_start_task_timer(struct se_task *task)
2326 * If the task_timeout is disabled, exit now. 2200 * If the task_timeout is disabled, exit now.
2327 */ 2201 */
2328 timeout = dev->se_sub_dev->se_dev_attrib.task_timeout; 2202 timeout = dev->se_sub_dev->se_dev_attrib.task_timeout;
2329 if (!(timeout)) 2203 if (!timeout)
2330 return; 2204 return;
2331 2205
2332 init_timer(&task->task_timer); 2206 init_timer(&task->task_timer);
@@ -2337,7 +2211,7 @@ static void transport_start_task_timer(struct se_task *task)
2337 task->task_flags |= TF_RUNNING; 2211 task->task_flags |= TF_RUNNING;
2338 add_timer(&task->task_timer); 2212 add_timer(&task->task_timer);
2339#if 0 2213#if 0
2340 printk(KERN_INFO "Starting task timer for cmd: %p task: %p seconds:" 2214 pr_debug("Starting task timer for cmd: %p task: %p seconds:"
2341 " %d\n", task->task_se_cmd, task, timeout); 2215 " %d\n", task->task_se_cmd, task, timeout);
2342#endif 2216#endif
2343} 2217}
@@ -2349,7 +2223,7 @@ void __transport_stop_task_timer(struct se_task *task, unsigned long *flags)
2349{ 2223{
2350 struct se_cmd *cmd = task->task_se_cmd; 2224 struct se_cmd *cmd = task->task_se_cmd;
2351 2225
2352 if (!(task->task_flags & TF_RUNNING)) 2226 if (!task->task_flags & TF_RUNNING)
2353 return; 2227 return;
2354 2228
2355 task->task_flags |= TF_STOP; 2229 task->task_flags |= TF_STOP;
@@ -2404,9 +2278,9 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
2404 if (cmd->sam_task_attr == MSG_HEAD_TAG) { 2278 if (cmd->sam_task_attr == MSG_HEAD_TAG) {
2405 atomic_inc(&cmd->se_dev->dev_hoq_count); 2279 atomic_inc(&cmd->se_dev->dev_hoq_count);
2406 smp_mb__after_atomic_inc(); 2280 smp_mb__after_atomic_inc();
2407 DEBUG_STA("Added HEAD_OF_QUEUE for CDB:" 2281 pr_debug("Added HEAD_OF_QUEUE for CDB:"
2408 " 0x%02x, se_ordered_id: %u\n", 2282 " 0x%02x, se_ordered_id: %u\n",
2409 cmd->_task_cdb[0], 2283 cmd->t_task_cdb[0],
2410 cmd->se_ordered_id); 2284 cmd->se_ordered_id);
2411 return 1; 2285 return 1;
2412 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { 2286 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
@@ -2418,7 +2292,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
2418 atomic_inc(&cmd->se_dev->dev_ordered_sync); 2292 atomic_inc(&cmd->se_dev->dev_ordered_sync);
2419 smp_mb__after_atomic_inc(); 2293 smp_mb__after_atomic_inc();
2420 2294
2421 DEBUG_STA("Added ORDERED for CDB: 0x%02x to ordered" 2295 pr_debug("Added ORDERED for CDB: 0x%02x to ordered"
2422 " list, se_ordered_id: %u\n", 2296 " list, se_ordered_id: %u\n",
2423 cmd->t_task_cdb[0], 2297 cmd->t_task_cdb[0],
2424 cmd->se_ordered_id); 2298 cmd->se_ordered_id);
@@ -2427,7 +2301,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
2427 * no other older commands exist that need to be 2301 * no other older commands exist that need to be
2428 * completed first. 2302 * completed first.
2429 */ 2303 */
2430 if (!(atomic_read(&cmd->se_dev->simple_cmds))) 2304 if (!atomic_read(&cmd->se_dev->simple_cmds))
2431 return 1; 2305 return 1;
2432 } else { 2306 } else {
2433 /* 2307 /*
@@ -2452,7 +2326,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
2452 &cmd->se_dev->delayed_cmd_list); 2326 &cmd->se_dev->delayed_cmd_list);
2453 spin_unlock(&cmd->se_dev->delayed_cmd_lock); 2327 spin_unlock(&cmd->se_dev->delayed_cmd_lock);
2454 2328
2455 DEBUG_STA("Added CDB: 0x%02x Task Attr: 0x%02x to" 2329 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
2456 " delayed CMD list, se_ordered_id: %u\n", 2330 " delayed CMD list, se_ordered_id: %u\n",
2457 cmd->t_task_cdb[0], cmd->sam_task_attr, 2331 cmd->t_task_cdb[0], cmd->sam_task_attr,
2458 cmd->se_ordered_id); 2332 cmd->se_ordered_id);
@@ -2486,7 +2360,7 @@ static int transport_execute_tasks(struct se_cmd *cmd)
2486 * Call transport_cmd_check_stop() to see if a fabric exception 2360 * Call transport_cmd_check_stop() to see if a fabric exception
2487 * has occurred that prevents execution. 2361 * has occurred that prevents execution.
2488 */ 2362 */
2489 if (!(transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING))) { 2363 if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) {
2490 /* 2364 /*
2491 * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE 2365 * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE
2492 * attribute for the tasks of the received struct se_cmd CDB 2366 * attribute for the tasks of the received struct se_cmd CDB
@@ -2777,7 +2651,7 @@ static inline u32 transport_get_size(
2777 return sectors; 2651 return sectors;
2778 } 2652 }
2779#if 0 2653#if 0
2780 printk(KERN_INFO "Returning block_size: %u, sectors: %u == %u for" 2654 pr_debug("Returning block_size: %u, sectors: %u == %u for"
2781 " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, sectors, 2655 " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, sectors,
2782 dev->se_sub_dev->se_dev_attrib.block_size * sectors, 2656 dev->se_sub_dev->se_dev_attrib.block_size * sectors,
2783 dev->transport->name); 2657 dev->transport->name);
@@ -2832,8 +2706,8 @@ static void transport_xor_callback(struct se_cmd *cmd)
2832 * 5) transfer the resulting XOR data to the data-in buffer. 2706 * 5) transfer the resulting XOR data to the data-in buffer.
2833 */ 2707 */
2834 buf = kmalloc(cmd->data_length, GFP_KERNEL); 2708 buf = kmalloc(cmd->data_length, GFP_KERNEL);
2835 if (!(buf)) { 2709 if (!buf) {
2836 printk(KERN_ERR "Unable to allocate xor_callback buf\n"); 2710 pr_err("Unable to allocate xor_callback buf\n");
2837 return; 2711 return;
2838 } 2712 }
2839 /* 2713 /*
@@ -2893,18 +2767,18 @@ static int transport_get_sense_data(struct se_cmd *cmd)
2893 continue; 2767 continue;
2894 2768
2895 dev = task->se_dev; 2769 dev = task->se_dev;
2896 if (!(dev)) 2770 if (!dev)
2897 continue; 2771 continue;
2898 2772
2899 if (!dev->transport->get_sense_buffer) { 2773 if (!dev->transport->get_sense_buffer) {
2900 printk(KERN_ERR "dev->transport->get_sense_buffer" 2774 pr_err("dev->transport->get_sense_buffer"
2901 " is NULL\n"); 2775 " is NULL\n");
2902 continue; 2776 continue;
2903 } 2777 }
2904 2778
2905 sense_buffer = dev->transport->get_sense_buffer(task); 2779 sense_buffer = dev->transport->get_sense_buffer(task);
2906 if (!(sense_buffer)) { 2780 if (!sense_buffer) {
2907 printk(KERN_ERR "ITT[0x%08x]_TASK[%d]: Unable to locate" 2781 pr_err("ITT[0x%08x]_TASK[%d]: Unable to locate"
2908 " sense buffer for task with sense\n", 2782 " sense buffer for task with sense\n",
2909 cmd->se_tfo->get_task_tag(cmd), task->task_no); 2783 cmd->se_tfo->get_task_tag(cmd), task->task_no);
2910 continue; 2784 continue;
@@ -2921,7 +2795,7 @@ static int transport_get_sense_data(struct se_cmd *cmd)
2921 cmd->scsi_sense_length = 2795 cmd->scsi_sense_length =
2922 (TRANSPORT_SENSE_BUFFER + offset); 2796 (TRANSPORT_SENSE_BUFFER + offset);
2923 2797
2924 printk(KERN_INFO "HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x" 2798 pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x"
2925 " and sense\n", 2799 " and sense\n",
2926 dev->se_hba->hba_id, dev->transport->name, 2800 dev->se_hba->hba_id, dev->transport->name,
2927 cmd->scsi_status); 2801 cmd->scsi_status);
@@ -2969,13 +2843,12 @@ static int transport_cmd_get_valid_sectors(struct se_cmd *cmd)
2969 2843
2970 sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size); 2844 sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size);
2971 2845
2972 if ((cmd->t_task_lba + sectors) > 2846 if ((cmd->t_task_lba + sectors) > transport_dev_end_lba(dev)) {
2973 transport_dev_end_lba(dev)) { 2847 pr_err("LBA: %llu Sectors: %u exceeds"
2974 printk(KERN_ERR "LBA: %llu Sectors: %u exceeds"
2975 " transport_dev_end_lba(): %llu\n", 2848 " transport_dev_end_lba(): %llu\n",
2976 cmd->t_task_lba, sectors, 2849 cmd->t_task_lba, sectors,
2977 transport_dev_end_lba(dev)); 2850 transport_dev_end_lba(dev));
2978 printk(KERN_ERR " We should return CHECK_CONDITION" 2851 pr_err(" We should return CHECK_CONDITION"
2979 " but we don't yet\n"); 2852 " but we don't yet\n");
2980 return 0; 2853 return 0;
2981 } 2854 }
@@ -3026,7 +2899,7 @@ static int transport_generic_cmd_sequencer(
3026 */ 2899 */
3027 if (ret > 0) { 2900 if (ret > 0) {
3028#if 0 2901#if 0
3029 printk(KERN_INFO "[%s]: ALUA TG Port not available," 2902 pr_debug("[%s]: ALUA TG Port not available,"
3030 " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n", 2903 " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n",
3031 cmd->se_tfo->get_fabric_name(), alua_ascq); 2904 cmd->se_tfo->get_fabric_name(), alua_ascq);
3032#endif 2905#endif
@@ -3192,10 +3065,13 @@ static int transport_generic_cmd_sequencer(
3192 if (sector_ret) 3065 if (sector_ret)
3193 goto out_unsupported_cdb; 3066 goto out_unsupported_cdb;
3194 3067
3195 if (sectors != 0) 3068 if (sectors)
3196 size = transport_get_size(sectors, cdb, cmd); 3069 size = transport_get_size(sectors, cdb, cmd);
3197 else 3070 else {
3198 size = dev->se_sub_dev->se_dev_attrib.block_size; 3071 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
3072 " supported\n");
3073 goto out_invalid_cdb_field;
3074 }
3199 3075
3200 cmd->t_task_lba = get_unaligned_be64(&cdb[12]); 3076 cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
3201 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 3077 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
@@ -3207,7 +3083,7 @@ static int transport_generic_cmd_sequencer(
3207 break; 3083 break;
3208 3084
3209 if ((cdb[10] & 0x04) || (cdb[10] & 0x02)) { 3085 if ((cdb[10] & 0x04) || (cdb[10] & 0x02)) {
3210 printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA" 3086 pr_err("WRITE_SAME PBDATA and LBDATA"
3211 " bits not supported for Block Discard" 3087 " bits not supported for Block Discard"
3212 " Emulation\n"); 3088 " Emulation\n");
3213 goto out_invalid_cdb_field; 3089 goto out_invalid_cdb_field;
@@ -3217,13 +3093,13 @@ static int transport_generic_cmd_sequencer(
3217 * tpws with the UNMAP=1 bit set. 3093 * tpws with the UNMAP=1 bit set.
3218 */ 3094 */
3219 if (!(cdb[10] & 0x08)) { 3095 if (!(cdb[10] & 0x08)) {
3220 printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not" 3096 pr_err("WRITE_SAME w/o UNMAP bit not"
3221 " supported for Block Discard Emulation\n"); 3097 " supported for Block Discard Emulation\n");
3222 goto out_invalid_cdb_field; 3098 goto out_invalid_cdb_field;
3223 } 3099 }
3224 break; 3100 break;
3225 default: 3101 default:
3226 printk(KERN_ERR "VARIABLE_LENGTH_CMD service action" 3102 pr_err("VARIABLE_LENGTH_CMD service action"
3227 " 0x%04x not supported\n", service_action); 3103 " 0x%04x not supported\n", service_action);
3228 goto out_unsupported_cdb; 3104 goto out_unsupported_cdb;
3229 } 3105 }
@@ -3469,10 +3345,12 @@ static int transport_generic_cmd_sequencer(
3469 if (sector_ret) 3345 if (sector_ret)
3470 goto out_unsupported_cdb; 3346 goto out_unsupported_cdb;
3471 3347
3472 if (sectors != 0) 3348 if (sectors)
3473 size = transport_get_size(sectors, cdb, cmd); 3349 size = transport_get_size(sectors, cdb, cmd);
3474 else 3350 else {
3475 size = dev->se_sub_dev->se_dev_attrib.block_size; 3351 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
3352 goto out_invalid_cdb_field;
3353 }
3476 3354
3477 cmd->t_task_lba = get_unaligned_be16(&cdb[2]); 3355 cmd->t_task_lba = get_unaligned_be16(&cdb[2]);
3478 passthrough = (dev->transport->transport_type == 3356 passthrough = (dev->transport->transport_type ==
@@ -3484,9 +3362,9 @@ static int transport_generic_cmd_sequencer(
3484 * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK and 3362 * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK and
3485 * TCM/FILEIO subsystem plugin backstores. 3363 * TCM/FILEIO subsystem plugin backstores.
3486 */ 3364 */
3487 if (!(passthrough)) { 3365 if (!passthrough) {
3488 if ((cdb[1] & 0x04) || (cdb[1] & 0x02)) { 3366 if ((cdb[1] & 0x04) || (cdb[1] & 0x02)) {
3489 printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA" 3367 pr_err("WRITE_SAME PBDATA and LBDATA"
3490 " bits not supported for Block Discard" 3368 " bits not supported for Block Discard"
3491 " Emulation\n"); 3369 " Emulation\n");
3492 goto out_invalid_cdb_field; 3370 goto out_invalid_cdb_field;
@@ -3496,7 +3374,7 @@ static int transport_generic_cmd_sequencer(
3496 * tpws with the UNMAP=1 bit set. 3374 * tpws with the UNMAP=1 bit set.
3497 */ 3375 */
3498 if (!(cdb[1] & 0x08)) { 3376 if (!(cdb[1] & 0x08)) {
3499 printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not " 3377 pr_err("WRITE_SAME w/o UNMAP bit not "
3500 " supported for Block Discard Emulation\n"); 3378 " supported for Block Discard Emulation\n");
3501 goto out_invalid_cdb_field; 3379 goto out_invalid_cdb_field;
3502 } 3380 }
@@ -3532,7 +3410,7 @@ static int transport_generic_cmd_sequencer(
3532 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 3410 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3533 break; 3411 break;
3534 default: 3412 default:
3535 printk(KERN_WARNING "TARGET_CORE[%s]: Unsupported SCSI Opcode" 3413 pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
3536 " 0x%02x, sending CHECK_CONDITION.\n", 3414 " 0x%02x, sending CHECK_CONDITION.\n",
3537 cmd->se_tfo->get_fabric_name(), cdb[0]); 3415 cmd->se_tfo->get_fabric_name(), cdb[0]);
3538 cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; 3416 cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
@@ -3540,7 +3418,7 @@ static int transport_generic_cmd_sequencer(
3540 } 3418 }
3541 3419
3542 if (size != cmd->data_length) { 3420 if (size != cmd->data_length) {
3543 printk(KERN_WARNING "TARGET_CORE[%s]: Expected Transfer Length:" 3421 pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
3544 " %u does not match SCSI CDB Length: %u for SAM Opcode:" 3422 " %u does not match SCSI CDB Length: %u for SAM Opcode:"
3545 " 0x%02x\n", cmd->se_tfo->get_fabric_name(), 3423 " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
3546 cmd->data_length, size, cdb[0]); 3424 cmd->data_length, size, cdb[0]);
@@ -3548,7 +3426,7 @@ static int transport_generic_cmd_sequencer(
3548 cmd->cmd_spdtl = size; 3426 cmd->cmd_spdtl = size;
3549 3427
3550 if (cmd->data_direction == DMA_TO_DEVICE) { 3428 if (cmd->data_direction == DMA_TO_DEVICE) {
3551 printk(KERN_ERR "Rejecting underflow/overflow" 3429 pr_err("Rejecting underflow/overflow"
3552 " WRITE data\n"); 3430 " WRITE data\n");
3553 goto out_invalid_cdb_field; 3431 goto out_invalid_cdb_field;
3554 } 3432 }
@@ -3556,8 +3434,8 @@ static int transport_generic_cmd_sequencer(
3556 * Reject READ_* or WRITE_* with overflow/underflow for 3434 * Reject READ_* or WRITE_* with overflow/underflow for
3557 * type SCF_SCSI_DATA_SG_IO_CDB. 3435 * type SCF_SCSI_DATA_SG_IO_CDB.
3558 */ 3436 */
3559 if (!(ret) && (dev->se_sub_dev->se_dev_attrib.block_size != 512)) { 3437 if (!ret && (dev->se_sub_dev->se_dev_attrib.block_size != 512)) {
3560 printk(KERN_ERR "Failing OVERFLOW/UNDERFLOW for LBA op" 3438 pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
3561 " CDB on non 512-byte sector setup subsystem" 3439 " CDB on non 512-byte sector setup subsystem"
3562 " plugin: %s\n", dev->transport->name); 3440 " plugin: %s\n", dev->transport->name);
3563 /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ 3441 /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
@@ -3607,14 +3485,14 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
3607 atomic_dec(&dev->simple_cmds); 3485 atomic_dec(&dev->simple_cmds);
3608 smp_mb__after_atomic_dec(); 3486 smp_mb__after_atomic_dec();
3609 dev->dev_cur_ordered_id++; 3487 dev->dev_cur_ordered_id++;
3610 DEBUG_STA("Incremented dev->dev_cur_ordered_id: %u for" 3488 pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
3611 " SIMPLE: %u\n", dev->dev_cur_ordered_id, 3489 " SIMPLE: %u\n", dev->dev_cur_ordered_id,
3612 cmd->se_ordered_id); 3490 cmd->se_ordered_id);
3613 } else if (cmd->sam_task_attr == MSG_HEAD_TAG) { 3491 } else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
3614 atomic_dec(&dev->dev_hoq_count); 3492 atomic_dec(&dev->dev_hoq_count);
3615 smp_mb__after_atomic_dec(); 3493 smp_mb__after_atomic_dec();
3616 dev->dev_cur_ordered_id++; 3494 dev->dev_cur_ordered_id++;
3617 DEBUG_STA("Incremented dev_cur_ordered_id: %u for" 3495 pr_debug("Incremented dev_cur_ordered_id: %u for"
3618 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, 3496 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
3619 cmd->se_ordered_id); 3497 cmd->se_ordered_id);
3620 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { 3498 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
@@ -3625,7 +3503,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
3625 spin_unlock(&dev->ordered_cmd_lock); 3503 spin_unlock(&dev->ordered_cmd_lock);
3626 3504
3627 dev->dev_cur_ordered_id++; 3505 dev->dev_cur_ordered_id++;
3628 DEBUG_STA("Incremented dev_cur_ordered_id: %u for ORDERED:" 3506 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
3629 " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); 3507 " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
3630 } 3508 }
3631 /* 3509 /*
@@ -3640,10 +3518,10 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
3640 list_del(&cmd_p->se_delayed_node); 3518 list_del(&cmd_p->se_delayed_node);
3641 spin_unlock(&dev->delayed_cmd_lock); 3519 spin_unlock(&dev->delayed_cmd_lock);
3642 3520
3643 DEBUG_STA("Calling add_tasks() for" 3521 pr_debug("Calling add_tasks() for"
3644 " cmd_p: 0x%02x Task Attr: 0x%02x" 3522 " cmd_p: 0x%02x Task Attr: 0x%02x"
3645 " Dormant -> Active, se_ordered_id: %u\n", 3523 " Dormant -> Active, se_ordered_id: %u\n",
3646 T_TASK(cmd_p)->t_task_cdb[0], 3524 cmd_p->t_task_cdb[0],
3647 cmd_p->sam_task_attr, cmd_p->se_ordered_id); 3525 cmd_p->sam_task_attr, cmd_p->se_ordered_id);
3648 3526
3649 transport_add_tasks_from_cmd(cmd_p); 3527 transport_add_tasks_from_cmd(cmd_p);
@@ -3812,7 +3690,7 @@ done:
3812 return; 3690 return;
3813 3691
3814queue_full: 3692queue_full:
3815 printk(KERN_INFO "Handling complete_ok QUEUE_FULL: se_cmd: %p," 3693 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
3816 " data_direction: %d\n", cmd, cmd->data_direction); 3694 " data_direction: %d\n", cmd, cmd->data_direction);
3817 transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf); 3695 transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf);
3818} 3696}
@@ -3837,49 +3715,34 @@ static void transport_free_dev_tasks(struct se_cmd *cmd)
3837 if (task->se_dev) 3715 if (task->se_dev)
3838 task->se_dev->transport->free_task(task); 3716 task->se_dev->transport->free_task(task);
3839 else 3717 else
3840 printk(KERN_ERR "task[%u] - task->se_dev is NULL\n", 3718 pr_err("task[%u] - task->se_dev is NULL\n",
3841 task->task_no); 3719 task->task_no);
3842 spin_lock_irqsave(&cmd->t_state_lock, flags); 3720 spin_lock_irqsave(&cmd->t_state_lock, flags);
3843 } 3721 }
3844 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3722 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3845} 3723}
3846 3724
3847static inline void transport_free_pages(struct se_cmd *cmd) 3725static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
3848{ 3726{
3849 struct scatterlist *sg; 3727 struct scatterlist *sg;
3850 int free_page = 1;
3851 int count; 3728 int count;
3852 3729
3853 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) 3730 for_each_sg(sgl, sg, nents, count)
3854 free_page = 0; 3731 __free_page(sg_page(sg));
3855 if (cmd->se_dev->transport->do_se_mem_map)
3856 free_page = 0;
3857 3732
3858 for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, count) { 3733 kfree(sgl);
3859 /* 3734}
3860 * Only called if
3861 * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use,
3862 */
3863 if (free_page)
3864 __free_page(sg_page(sg));
3865 3735
3866 } 3736static inline void transport_free_pages(struct se_cmd *cmd)
3867 if (free_page) 3737{
3868 kfree(cmd->t_data_sg); 3738 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
3739 return;
3740
3741 transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
3869 cmd->t_data_sg = NULL; 3742 cmd->t_data_sg = NULL;
3870 cmd->t_data_nents = 0; 3743 cmd->t_data_nents = 0;
3871 3744
3872 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) { 3745 transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
3873 /*
3874 * Only called if
3875 * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use,
3876 */
3877 if (free_page)
3878 __free_page(sg_page(sg));
3879
3880 }
3881 if (free_page)
3882 kfree(cmd->t_bidi_data_sg);
3883 cmd->t_bidi_data_sg = NULL; 3746 cmd->t_bidi_data_sg = NULL;
3884 cmd->t_bidi_data_nents = 0; 3747 cmd->t_bidi_data_nents = 0;
3885} 3748}
@@ -3895,7 +3758,7 @@ static inline int transport_dec_and_check(struct se_cmd *cmd)
3895 3758
3896 spin_lock_irqsave(&cmd->t_state_lock, flags); 3759 spin_lock_irqsave(&cmd->t_state_lock, flags);
3897 if (atomic_read(&cmd->t_fe_count)) { 3760 if (atomic_read(&cmd->t_fe_count)) {
3898 if (!(atomic_dec_and_test(&cmd->t_fe_count))) { 3761 if (!atomic_dec_and_test(&cmd->t_fe_count)) {
3899 spin_unlock_irqrestore(&cmd->t_state_lock, 3762 spin_unlock_irqrestore(&cmd->t_state_lock,
3900 flags); 3763 flags);
3901 return 1; 3764 return 1;
@@ -3903,7 +3766,7 @@ static inline int transport_dec_and_check(struct se_cmd *cmd)
3903 } 3766 }
3904 3767
3905 if (atomic_read(&cmd->t_se_count)) { 3768 if (atomic_read(&cmd->t_se_count)) {
3906 if (!(atomic_dec_and_test(&cmd->t_se_count))) { 3769 if (!atomic_dec_and_test(&cmd->t_se_count)) {
3907 spin_unlock_irqrestore(&cmd->t_state_lock, 3770 spin_unlock_irqrestore(&cmd->t_state_lock,
3908 flags); 3771 flags);
3909 return 1; 3772 return 1;
@@ -3922,7 +3785,7 @@ static void transport_release_fe_cmd(struct se_cmd *cmd)
3922 return; 3785 return;
3923 3786
3924 spin_lock_irqsave(&cmd->t_state_lock, flags); 3787 spin_lock_irqsave(&cmd->t_state_lock, flags);
3925 if (!(atomic_read(&cmd->transport_dev_active))) { 3788 if (!atomic_read(&cmd->transport_dev_active)) {
3926 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3789 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3927 goto free_pages; 3790 goto free_pages;
3928 } 3791 }
@@ -3953,7 +3816,7 @@ transport_generic_remove(struct se_cmd *cmd, int session_reinstatement)
3953 } 3816 }
3954 3817
3955 spin_lock_irqsave(&cmd->t_state_lock, flags); 3818 spin_lock_irqsave(&cmd->t_state_lock, flags);
3956 if (!(atomic_read(&cmd->transport_dev_active))) { 3819 if (!atomic_read(&cmd->transport_dev_active)) {
3957 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3820 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3958 goto free_pages; 3821 goto free_pages;
3959 } 3822 }
@@ -4027,7 +3890,7 @@ static int transport_new_cmd_obj(struct se_cmd *cmd)
4027 DMA_FROM_DEVICE, 3890 DMA_FROM_DEVICE,
4028 cmd->t_bidi_data_sg, 3891 cmd->t_bidi_data_sg,
4029 cmd->t_bidi_data_nents); 3892 cmd->t_bidi_data_nents);
4030 if (!rc) { 3893 if (rc <= 0) {
4031 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 3894 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
4032 cmd->scsi_sense_reason = 3895 cmd->scsi_sense_reason =
4033 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3896 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -4046,7 +3909,7 @@ static int transport_new_cmd_obj(struct se_cmd *cmd)
4046 cmd->data_direction, 3909 cmd->data_direction,
4047 cmd->t_data_sg, 3910 cmd->t_data_sg,
4048 cmd->t_data_nents); 3911 cmd->t_data_nents);
4049 if (!task_cdbs) { 3912 if (task_cdbs <= 0) {
4050 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 3913 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
4051 cmd->scsi_sense_reason = 3914 cmd->scsi_sense_reason =
4052 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3915 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -4094,12 +3957,6 @@ transport_generic_get_mem(struct se_cmd *cmd)
4094 struct page *page; 3957 struct page *page;
4095 int i = 0; 3958 int i = 0;
4096 3959
4097 /*
4098 * If the device uses memory mapping this is enough.
4099 */
4100 if (cmd->se_dev->transport->do_se_mem_map)
4101 return 0;
4102
4103 nents = DIV_ROUND_UP(length, PAGE_SIZE); 3960 nents = DIV_ROUND_UP(length, PAGE_SIZE);
4104 cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL); 3961 cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL);
4105 if (!cmd->t_data_sg) 3962 if (!cmd->t_data_sg)
@@ -4176,14 +4033,14 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
4176 4033
4177 if (!sg_first) { 4034 if (!sg_first) {
4178 sg_first = task->task_sg; 4035 sg_first = task->task_sg;
4179 chained_nents = task->task_sg_num; 4036 chained_nents = task->task_sg_nents;
4180 } else { 4037 } else {
4181 sg_chain(sg_prev, sg_prev_nents, task->task_sg); 4038 sg_chain(sg_prev, sg_prev_nents, task->task_sg);
4182 chained_nents += task->task_sg_num; 4039 chained_nents += task->task_sg_nents;
4183 } 4040 }
4184 4041
4185 sg_prev = task->task_sg; 4042 sg_prev = task->task_sg;
4186 sg_prev_nents = task->task_sg_num; 4043 sg_prev_nents = task->task_sg_nents;
4187 } 4044 }
4188 /* 4045 /*
4189 * Setup the starting pointer and total t_tasks_sg_linked_no including 4046 * Setup the starting pointer and total t_tasks_sg_linked_no including
@@ -4192,19 +4049,19 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
4192 cmd->t_tasks_sg_chained = sg_first; 4049 cmd->t_tasks_sg_chained = sg_first;
4193 cmd->t_tasks_sg_chained_no = chained_nents; 4050 cmd->t_tasks_sg_chained_no = chained_nents;
4194 4051
4195 DEBUG_CMD_M("Setup cmd: %p cmd->t_tasks_sg_chained: %p and" 4052 pr_debug("Setup cmd: %p cmd->t_tasks_sg_chained: %p and"
4196 " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained, 4053 " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained,
4197 cmd->t_tasks_sg_chained_no); 4054 cmd->t_tasks_sg_chained_no);
4198 4055
4199 for_each_sg(cmd->t_tasks_sg_chained, sg, 4056 for_each_sg(cmd->t_tasks_sg_chained, sg,
4200 cmd->t_tasks_sg_chained_no, i) { 4057 cmd->t_tasks_sg_chained_no, i) {
4201 4058
4202 DEBUG_CMD_M("SG[%d]: %p page: %p length: %d offset: %d\n", 4059 pr_debug("SG[%d]: %p page: %p length: %d offset: %d\n",
4203 i, sg, sg_page(sg), sg->length, sg->offset); 4060 i, sg, sg_page(sg), sg->length, sg->offset);
4204 if (sg_is_chain(sg)) 4061 if (sg_is_chain(sg))
4205 DEBUG_CMD_M("SG: %p sg_is_chain=1\n", sg); 4062 pr_debug("SG: %p sg_is_chain=1\n", sg);
4206 if (sg_is_last(sg)) 4063 if (sg_is_last(sg))
4207 DEBUG_CMD_M("SG: %p sg_is_last=1\n", sg); 4064 pr_debug("SG: %p sg_is_last=1\n", sg);
4208 } 4065 }
4209} 4066}
4210EXPORT_SYMBOL(transport_do_task_sg_chain); 4067EXPORT_SYMBOL(transport_do_task_sg_chain);
@@ -4266,25 +4123,25 @@ static int transport_allocate_data_tasks(
4266 * It's so much easier and only a waste when task_count > 1. 4123 * It's so much easier and only a waste when task_count > 1.
4267 * That is extremely rare. 4124 * That is extremely rare.
4268 */ 4125 */
4269 task->task_sg_num = sgl_nents; 4126 task->task_sg_nents = sgl_nents;
4270 if (cmd->se_tfo->task_sg_chaining) { 4127 if (cmd->se_tfo->task_sg_chaining) {
4271 task->task_sg_num++; 4128 task->task_sg_nents++;
4272 task->task_padded_sg = 1; 4129 task->task_padded_sg = 1;
4273 } 4130 }
4274 4131
4275 task->task_sg = kmalloc(sizeof(struct scatterlist) * \ 4132 task->task_sg = kmalloc(sizeof(struct scatterlist) * \
4276 task->task_sg_num, GFP_KERNEL); 4133 task->task_sg_nents, GFP_KERNEL);
4277 if (!task->task_sg) { 4134 if (!task->task_sg) {
4278 cmd->se_dev->transport->free_task(task); 4135 cmd->se_dev->transport->free_task(task);
4279 return -ENOMEM; 4136 return -ENOMEM;
4280 } 4137 }
4281 4138
4282 sg_init_table(task->task_sg, task->task_sg_num); 4139 sg_init_table(task->task_sg, task->task_sg_nents);
4283 4140
4284 task_size = task->task_size; 4141 task_size = task->task_size;
4285 4142
4286 /* Build new sgl, only up to task_size */ 4143 /* Build new sgl, only up to task_size */
4287 for_each_sg(task->task_sg, sg, task->task_sg_num, count) { 4144 for_each_sg(task->task_sg, sg, task->task_sg_nents, count) {
4288 if (cmd_sg->length > task_size) 4145 if (cmd_sg->length > task_size)
4289 break; 4146 break;
4290 4147
@@ -4311,6 +4168,7 @@ transport_allocate_control_task(struct se_cmd *cmd)
4311 unsigned char *cdb; 4168 unsigned char *cdb;
4312 struct se_task *task; 4169 struct se_task *task;
4313 unsigned long flags; 4170 unsigned long flags;
4171 int ret = 0;
4314 4172
4315 task = transport_generic_get_task(cmd, cmd->data_direction); 4173 task = transport_generic_get_task(cmd, cmd->data_direction);
4316 if (!task) 4174 if (!task)
@@ -4331,7 +4189,7 @@ transport_allocate_control_task(struct se_cmd *cmd)
4331 memcpy(task->task_sg, cmd->t_data_sg, 4189 memcpy(task->task_sg, cmd->t_data_sg,
4332 sizeof(struct scatterlist) * cmd->t_data_nents); 4190 sizeof(struct scatterlist) * cmd->t_data_nents);
4333 task->task_size = cmd->data_length; 4191 task->task_size = cmd->data_length;
4334 task->task_sg_num = cmd->t_data_nents; 4192 task->task_sg_nents = cmd->t_data_nents;
4335 4193
4336 spin_lock_irqsave(&cmd->t_state_lock, flags); 4194 spin_lock_irqsave(&cmd->t_state_lock, flags);
4337 list_add_tail(&task->t_list, &cmd->t_task_list); 4195 list_add_tail(&task->t_list, &cmd->t_task_list);
@@ -4339,16 +4197,19 @@ transport_allocate_control_task(struct se_cmd *cmd)
4339 4197
4340 if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) { 4198 if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) {
4341 if (dev->transport->map_task_SG) 4199 if (dev->transport->map_task_SG)
4342 return dev->transport->map_task_SG(task); 4200 ret = dev->transport->map_task_SG(task);
4343 return 0;
4344 } else if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) { 4201 } else if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) {
4345 if (dev->transport->cdb_none) 4202 if (dev->transport->cdb_none)
4346 return dev->transport->cdb_none(task); 4203 ret = dev->transport->cdb_none(task);
4347 return 0;
4348 } else { 4204 } else {
4205 pr_err("target: Unknown control cmd type!\n");
4349 BUG(); 4206 BUG();
4350 return -ENOMEM;
4351 } 4207 }
4208
4209 /* Success! Return number of tasks allocated */
4210 if (ret == 0)
4211 return 1;
4212 return ret;
4352} 4213}
4353 4214
4354static u32 transport_allocate_tasks( 4215static u32 transport_allocate_tasks(
@@ -4358,18 +4219,12 @@ static u32 transport_allocate_tasks(
4358 struct scatterlist *sgl, 4219 struct scatterlist *sgl,
4359 unsigned int sgl_nents) 4220 unsigned int sgl_nents)
4360{ 4221{
4361 int ret; 4222 if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)
4362
4363 if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
4364 return transport_allocate_data_tasks(cmd, lba, data_direction, 4223 return transport_allocate_data_tasks(cmd, lba, data_direction,
4365 sgl, sgl_nents); 4224 sgl, sgl_nents);
4366 } else { 4225 else
4367 ret = transport_allocate_control_task(cmd); 4226 return transport_allocate_control_task(cmd);
4368 if (ret < 0) 4227
4369 return ret;
4370 else
4371 return 1;
4372 }
4373} 4228}
4374 4229
4375 4230
@@ -4441,64 +4296,6 @@ EXPORT_SYMBOL(transport_generic_new_cmd);
4441 */ 4296 */
4442void transport_generic_process_write(struct se_cmd *cmd) 4297void transport_generic_process_write(struct se_cmd *cmd)
4443{ 4298{
4444#if 0
4445 /*
4446 * Copy SCSI Presented DTL sector(s) from received buffers allocated to
4447 * original EDTL
4448 */
4449 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
4450 if (!cmd->t_tasks_se_num) {
4451 unsigned char *dst, *buf =
4452 (unsigned char *)cmd->t_task_buf;
4453
4454 dst = kzalloc(cmd->cmd_spdtl), GFP_KERNEL);
4455 if (!(dst)) {
4456 printk(KERN_ERR "Unable to allocate memory for"
4457 " WRITE underflow\n");
4458 transport_generic_request_failure(cmd, NULL,
4459 PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
4460 return;
4461 }
4462 memcpy(dst, buf, cmd->cmd_spdtl);
4463
4464 kfree(cmd->t_task_buf);
4465 cmd->t_task_buf = dst;
4466 } else {
4467 struct scatterlist *sg =
4468 (struct scatterlist *sg)cmd->t_task_buf;
4469 struct scatterlist *orig_sg;
4470
4471 orig_sg = kzalloc(sizeof(struct scatterlist) *
4472 cmd->t_tasks_se_num,
4473 GFP_KERNEL))) {
4474 if (!(orig_sg)) {
4475 printk(KERN_ERR "Unable to allocate memory"
4476 " for WRITE underflow\n");
4477 transport_generic_request_failure(cmd, NULL,
4478 PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
4479 return;
4480 }
4481
4482 memcpy(orig_sg, cmd->t_task_buf,
4483 sizeof(struct scatterlist) *
4484 cmd->t_tasks_se_num);
4485
4486 cmd->data_length = cmd->cmd_spdtl;
4487 /*
4488 * FIXME, clear out original struct se_task and state
4489 * information.
4490 */
4491 if (transport_generic_new_cmd(cmd) < 0) {
4492 transport_generic_request_failure(cmd, NULL,
4493 PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
4494 kfree(orig_sg);
4495 return;
4496 }
4497
4498 transport_memcpy_write_sg(cmd, orig_sg);
4499 }
4500 }
4501#endif
4502 transport_execute_tasks(cmd); 4299 transport_execute_tasks(cmd);
4503} 4300}
4504EXPORT_SYMBOL(transport_generic_process_write); 4301EXPORT_SYMBOL(transport_generic_process_write);
@@ -4554,7 +4351,7 @@ static int transport_generic_write_pending(struct se_cmd *cmd)
4554 return PYX_TRANSPORT_WRITE_PENDING; 4351 return PYX_TRANSPORT_WRITE_PENDING;
4555 4352
4556queue_full: 4353queue_full:
4557 printk(KERN_INFO "Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); 4354 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
4558 cmd->t_state = TRANSPORT_COMPLETE_QF_WP; 4355 cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
4559 transport_handle_queue_full(cmd, cmd->se_dev, 4356 transport_handle_queue_full(cmd, cmd->se_dev,
4560 transport_write_pending_qf); 4357 transport_write_pending_qf);
@@ -4586,7 +4383,7 @@ void transport_generic_free_cmd(
4586 4383
4587 if (cmd->se_lun) { 4384 if (cmd->se_lun) {
4588#if 0 4385#if 0
4589 printk(KERN_INFO "cmd: %p ITT: 0x%08x contains" 4386 pr_debug("cmd: %p ITT: 0x%08x contains"
4590 " cmd->se_lun\n", cmd, 4387 " cmd->se_lun\n", cmd,
4591 cmd->se_tfo->get_task_tag(cmd)); 4388 cmd->se_tfo->get_task_tag(cmd));
4592#endif 4389#endif
@@ -4627,7 +4424,7 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
4627 spin_lock_irqsave(&cmd->t_state_lock, flags); 4424 spin_lock_irqsave(&cmd->t_state_lock, flags);
4628 if (atomic_read(&cmd->t_transport_stop)) { 4425 if (atomic_read(&cmd->t_transport_stop)) {
4629 atomic_set(&cmd->transport_lun_stop, 0); 4426 atomic_set(&cmd->transport_lun_stop, 0);
4630 DEBUG_TRANSPORT_S("ConfigFS ITT[0x%08x] - t_transport_stop ==" 4427 pr_debug("ConfigFS ITT[0x%08x] - t_transport_stop =="
4631 " TRUE, skipping\n", cmd->se_tfo->get_task_tag(cmd)); 4428 " TRUE, skipping\n", cmd->se_tfo->get_task_tag(cmd));
4632 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 4429 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4633 transport_cmd_check_stop(cmd, 1, 0); 4430 transport_cmd_check_stop(cmd, 1, 0);
@@ -4640,13 +4437,13 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
4640 4437
4641 ret = transport_stop_tasks_for_cmd(cmd); 4438 ret = transport_stop_tasks_for_cmd(cmd);
4642 4439
4643 DEBUG_TRANSPORT_S("ConfigFS: cmd: %p t_task_cdbs: %d stop tasks ret:" 4440 pr_debug("ConfigFS: cmd: %p t_tasks: %d stop tasks ret:"
4644 " %d\n", cmd, cmd->t_task_cdbs, ret); 4441 " %d\n", cmd, cmd->t_task_list_num, ret);
4645 if (!ret) { 4442 if (!ret) {
4646 DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopping cmd....\n", 4443 pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
4647 cmd->se_tfo->get_task_tag(cmd)); 4444 cmd->se_tfo->get_task_tag(cmd));
4648 wait_for_completion(&cmd->transport_lun_stop_comp); 4445 wait_for_completion(&cmd->transport_lun_stop_comp);
4649 DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopped cmd....\n", 4446 pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
4650 cmd->se_tfo->get_task_tag(cmd)); 4447 cmd->se_tfo->get_task_tag(cmd));
4651 } 4448 }
4652 transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); 4449 transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj);
@@ -4654,13 +4451,6 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
4654 return 0; 4451 return 0;
4655} 4452}
4656 4453
4657/* #define DEBUG_CLEAR_LUN */
4658#ifdef DEBUG_CLEAR_LUN
4659#define DEBUG_CLEAR_L(x...) printk(KERN_INFO x)
4660#else
4661#define DEBUG_CLEAR_L(x...)
4662#endif
4663
4664static void __transport_clear_lun_from_sessions(struct se_lun *lun) 4454static void __transport_clear_lun_from_sessions(struct se_lun *lun)
4665{ 4455{
4666 struct se_cmd *cmd = NULL; 4456 struct se_cmd *cmd = NULL;
@@ -4682,7 +4472,7 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun)
4682 * progress for the iscsi_cmd_t. 4472 * progress for the iscsi_cmd_t.
4683 */ 4473 */
4684 spin_lock(&cmd->t_state_lock); 4474 spin_lock(&cmd->t_state_lock);
4685 DEBUG_CLEAR_L("SE_LUN[%d] - Setting cmd->transport" 4475 pr_debug("SE_LUN[%d] - Setting cmd->transport"
4686 "_lun_stop for ITT: 0x%08x\n", 4476 "_lun_stop for ITT: 0x%08x\n",
4687 cmd->se_lun->unpacked_lun, 4477 cmd->se_lun->unpacked_lun,
4688 cmd->se_tfo->get_task_tag(cmd)); 4478 cmd->se_tfo->get_task_tag(cmd));
@@ -4691,8 +4481,8 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun)
4691 4481
4692 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); 4482 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
4693 4483
4694 if (!(cmd->se_lun)) { 4484 if (!cmd->se_lun) {
4695 printk(KERN_ERR "ITT: 0x%08x, [i,t]_state: %u/%u\n", 4485 pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n",
4696 cmd->se_tfo->get_task_tag(cmd), 4486 cmd->se_tfo->get_task_tag(cmd),
4697 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); 4487 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
4698 BUG(); 4488 BUG();
@@ -4701,7 +4491,7 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun)
4701 * If the Storage engine still owns the iscsi_cmd_t, determine 4491 * If the Storage engine still owns the iscsi_cmd_t, determine
4702 * and/or stop its context. 4492 * and/or stop its context.
4703 */ 4493 */
4704 DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x before transport" 4494 pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport"
4705 "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun, 4495 "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun,
4706 cmd->se_tfo->get_task_tag(cmd)); 4496 cmd->se_tfo->get_task_tag(cmd));
4707 4497
@@ -4710,13 +4500,13 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun)
4710 continue; 4500 continue;
4711 } 4501 }
4712 4502
4713 DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x after transport_lun" 4503 pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
4714 "_wait_for_tasks(): SUCCESS\n", 4504 "_wait_for_tasks(): SUCCESS\n",
4715 cmd->se_lun->unpacked_lun, 4505 cmd->se_lun->unpacked_lun,
4716 cmd->se_tfo->get_task_tag(cmd)); 4506 cmd->se_tfo->get_task_tag(cmd));
4717 4507
4718 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); 4508 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
4719 if (!(atomic_read(&cmd->transport_dev_active))) { 4509 if (!atomic_read(&cmd->transport_dev_active)) {
4720 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); 4510 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
4721 goto check_cond; 4511 goto check_cond;
4722 } 4512 }
@@ -4741,7 +4531,7 @@ check_cond:
4741 */ 4531 */
4742 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); 4532 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
4743 if (atomic_read(&cmd->transport_lun_fe_stop)) { 4533 if (atomic_read(&cmd->transport_lun_fe_stop)) {
4744 DEBUG_CLEAR_L("SE_LUN[%d] - Detected FE stop for" 4534 pr_debug("SE_LUN[%d] - Detected FE stop for"
4745 " struct se_cmd: %p ITT: 0x%08x\n", 4535 " struct se_cmd: %p ITT: 0x%08x\n",
4746 lun->unpacked_lun, 4536 lun->unpacked_lun,
4747 cmd, cmd->se_tfo->get_task_tag(cmd)); 4537 cmd, cmd->se_tfo->get_task_tag(cmd));
@@ -4753,7 +4543,7 @@ check_cond:
4753 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); 4543 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4754 continue; 4544 continue;
4755 } 4545 }
4756 DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x finished processing\n", 4546 pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
4757 lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd)); 4547 lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd));
4758 4548
4759 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); 4549 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
@@ -4779,7 +4569,7 @@ int transport_clear_lun_from_sessions(struct se_lun *lun)
4779 kt = kthread_run(transport_clear_lun_thread, lun, 4569 kt = kthread_run(transport_clear_lun_thread, lun,
4780 "tcm_cl_%u", lun->unpacked_lun); 4570 "tcm_cl_%u", lun->unpacked_lun);
4781 if (IS_ERR(kt)) { 4571 if (IS_ERR(kt)) {
4782 printk(KERN_ERR "Unable to start clear_lun thread\n"); 4572 pr_err("Unable to start clear_lun thread\n");
4783 return PTR_ERR(kt); 4573 return PTR_ERR(kt);
4784 } 4574 }
4785 wait_for_completion(&lun->lun_shutdown_comp); 4575 wait_for_completion(&lun->lun_shutdown_comp);
@@ -4812,7 +4602,7 @@ static void transport_generic_wait_for_tasks(
4812 */ 4602 */
4813 if (atomic_read(&cmd->transport_lun_stop)) { 4603 if (atomic_read(&cmd->transport_lun_stop)) {
4814 4604
4815 DEBUG_TRANSPORT_S("wait_for_tasks: Stopping" 4605 pr_debug("wait_for_tasks: Stopping"
4816 " wait_for_completion(&cmd->t_tasktransport_lun_fe" 4606 " wait_for_completion(&cmd->t_tasktransport_lun_fe"
4817 "_stop_comp); for ITT: 0x%08x\n", 4607 "_stop_comp); for ITT: 0x%08x\n",
4818 cmd->se_tfo->get_task_tag(cmd)); 4608 cmd->se_tfo->get_task_tag(cmd));
@@ -4834,7 +4624,7 @@ static void transport_generic_wait_for_tasks(
4834 * struct se_cmd, now owns the structure and can be released through 4624 * struct se_cmd, now owns the structure and can be released through
4835 * normal means below. 4625 * normal means below.
4836 */ 4626 */
4837 DEBUG_TRANSPORT_S("wait_for_tasks: Stopped" 4627 pr_debug("wait_for_tasks: Stopped"
4838 " wait_for_completion(&cmd->t_tasktransport_lun_fe_" 4628 " wait_for_completion(&cmd->t_tasktransport_lun_fe_"
4839 "stop_comp); for ITT: 0x%08x\n", 4629 "stop_comp); for ITT: 0x%08x\n",
4840 cmd->se_tfo->get_task_tag(cmd)); 4630 cmd->se_tfo->get_task_tag(cmd));
@@ -4847,7 +4637,7 @@ static void transport_generic_wait_for_tasks(
4847 4637
4848 atomic_set(&cmd->t_transport_stop, 1); 4638 atomic_set(&cmd->t_transport_stop, 1);
4849 4639
4850 DEBUG_TRANSPORT_S("wait_for_tasks: Stopping %p ITT: 0x%08x" 4640 pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x"
4851 " i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop" 4641 " i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop"
4852 " = TRUE\n", cmd, cmd->se_tfo->get_task_tag(cmd), 4642 " = TRUE\n", cmd, cmd->se_tfo->get_task_tag(cmd),
4853 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, 4643 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
@@ -4863,7 +4653,7 @@ static void transport_generic_wait_for_tasks(
4863 atomic_set(&cmd->t_transport_active, 0); 4653 atomic_set(&cmd->t_transport_active, 0);
4864 atomic_set(&cmd->t_transport_stop, 0); 4654 atomic_set(&cmd->t_transport_stop, 0);
4865 4655
4866 DEBUG_TRANSPORT_S("wait_for_tasks: Stopped wait_for_compltion(" 4656 pr_debug("wait_for_tasks: Stopped wait_for_compltion("
4867 "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n", 4657 "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
4868 cmd->se_tfo->get_task_tag(cmd)); 4658 cmd->se_tfo->get_task_tag(cmd));
4869remove: 4659remove:
@@ -5071,11 +4861,11 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
5071 int ret = 0; 4861 int ret = 0;
5072 4862
5073 if (atomic_read(&cmd->t_transport_aborted) != 0) { 4863 if (atomic_read(&cmd->t_transport_aborted) != 0) {
5074 if (!(send_status) || 4864 if (!send_status ||
5075 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) 4865 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
5076 return 1; 4866 return 1;
5077#if 0 4867#if 0
5078 printk(KERN_INFO "Sending delayed SAM_STAT_TASK_ABORTED" 4868 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED"
5079 " status for CDB: 0x%02x ITT: 0x%08x\n", 4869 " status for CDB: 0x%02x ITT: 0x%08x\n",
5080 cmd->t_task_cdb[0], 4870 cmd->t_task_cdb[0],
5081 cmd->se_tfo->get_task_tag(cmd)); 4871 cmd->se_tfo->get_task_tag(cmd));
@@ -5107,7 +4897,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
5107 } 4897 }
5108 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 4898 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
5109#if 0 4899#if 0
5110 printk(KERN_INFO "Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x," 4900 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
5111 " ITT: 0x%08x\n", cmd->t_task_cdb[0], 4901 " ITT: 0x%08x\n", cmd->t_task_cdb[0],
5112 cmd->se_tfo->get_task_tag(cmd)); 4902 cmd->se_tfo->get_task_tag(cmd));
5113#endif 4903#endif
@@ -5145,7 +4935,7 @@ int transport_generic_do_tmr(struct se_cmd *cmd)
5145 tmr->response = TMR_FUNCTION_REJECTED; 4935 tmr->response = TMR_FUNCTION_REJECTED;
5146 break; 4936 break;
5147 default: 4937 default:
5148 printk(KERN_ERR "Uknown TMR function: 0x%02x.\n", 4938 pr_err("Uknown TMR function: 0x%02x.\n",
5149 tmr->function); 4939 tmr->function);
5150 tmr->response = TMR_FUNCTION_REJECTED; 4940 tmr->response = TMR_FUNCTION_REJECTED;
5151 break; 4941 break;
@@ -5190,7 +4980,7 @@ static void transport_processing_shutdown(struct se_device *dev)
5190 spin_lock_irqsave(&dev->execute_task_lock, flags); 4980 spin_lock_irqsave(&dev->execute_task_lock, flags);
5191 while ((task = transport_get_task_from_state_list(dev))) { 4981 while ((task = transport_get_task_from_state_list(dev))) {
5192 if (!task->task_se_cmd) { 4982 if (!task->task_se_cmd) {
5193 printk(KERN_ERR "task->task_se_cmd is NULL!\n"); 4983 pr_err("task->task_se_cmd is NULL!\n");
5194 continue; 4984 continue;
5195 } 4985 }
5196 cmd = task->task_se_cmd; 4986 cmd = task->task_se_cmd;
@@ -5199,18 +4989,18 @@ static void transport_processing_shutdown(struct se_device *dev)
5199 4989
5200 spin_lock_irqsave(&cmd->t_state_lock, flags); 4990 spin_lock_irqsave(&cmd->t_state_lock, flags);
5201 4991
5202 DEBUG_DO("PT: cmd: %p task: %p ITT/CmdSN: 0x%08x/0x%08x," 4992 pr_debug("PT: cmd: %p task: %p ITT: 0x%08x,"
5203 " i_state/def_i_state: %d/%d, t_state/def_t_state:" 4993 " i_state: %d, t_state/def_t_state:"
5204 " %d/%d cdb: 0x%02x\n", cmd, task, 4994 " %d/%d cdb: 0x%02x\n", cmd, task,
5205 cmd->se_tfo->get_task_tag(cmd), cmd->cmd_sn, 4995 cmd->se_tfo->get_task_tag(cmd),
5206 cmd->se_tfo->get_cmd_state(cmd), cmd->deferred_i_state, 4996 cmd->se_tfo->get_cmd_state(cmd),
5207 cmd->t_state, cmd->deferred_t_state, 4997 cmd->t_state, cmd->deferred_t_state,
5208 cmd->t_task_cdb[0]); 4998 cmd->t_task_cdb[0]);
5209 DEBUG_DO("PT: ITT[0x%08x] - t_task_cdbs: %d t_task_cdbs_left:" 4999 pr_debug("PT: ITT[0x%08x] - t_tasks: %d t_task_cdbs_left:"
5210 " %d t_task_cdbs_sent: %d -- t_transport_active: %d" 5000 " %d t_task_cdbs_sent: %d -- t_transport_active: %d"
5211 " t_transport_stop: %d t_transport_sent: %d\n", 5001 " t_transport_stop: %d t_transport_sent: %d\n",
5212 cmd->se_tfo->get_task_tag(cmd), 5002 cmd->se_tfo->get_task_tag(cmd),
5213 cmd->t_task_cdbs, 5003 cmd->t_task_list_num,
5214 atomic_read(&cmd->t_task_cdbs_left), 5004 atomic_read(&cmd->t_task_cdbs_left),
5215 atomic_read(&cmd->t_task_cdbs_sent), 5005 atomic_read(&cmd->t_task_cdbs_sent),
5216 atomic_read(&cmd->t_transport_active), 5006 atomic_read(&cmd->t_transport_active),
@@ -5222,10 +5012,10 @@ static void transport_processing_shutdown(struct se_device *dev)
5222 spin_unlock_irqrestore( 5012 spin_unlock_irqrestore(
5223 &cmd->t_state_lock, flags); 5013 &cmd->t_state_lock, flags);
5224 5014
5225 DEBUG_DO("Waiting for task: %p to shutdown for dev:" 5015 pr_debug("Waiting for task: %p to shutdown for dev:"
5226 " %p\n", task, dev); 5016 " %p\n", task, dev);
5227 wait_for_completion(&task->task_stop_comp); 5017 wait_for_completion(&task->task_stop_comp);
5228 DEBUG_DO("Completed task: %p shutdown for dev: %p\n", 5018 pr_debug("Completed task: %p shutdown for dev: %p\n",
5229 task, dev); 5019 task, dev);
5230 5020
5231 spin_lock_irqsave(&cmd->t_state_lock, flags); 5021 spin_lock_irqsave(&cmd->t_state_lock, flags);
@@ -5239,11 +5029,11 @@ static void transport_processing_shutdown(struct se_device *dev)
5239 } 5029 }
5240 __transport_stop_task_timer(task, &flags); 5030 __transport_stop_task_timer(task, &flags);
5241 5031
5242 if (!(atomic_dec_and_test(&cmd->t_task_cdbs_ex_left))) { 5032 if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) {
5243 spin_unlock_irqrestore( 5033 spin_unlock_irqrestore(
5244 &cmd->t_state_lock, flags); 5034 &cmd->t_state_lock, flags);
5245 5035
5246 DEBUG_DO("Skipping task: %p, dev: %p for" 5036 pr_debug("Skipping task: %p, dev: %p for"
5247 " t_task_cdbs_ex_left: %d\n", task, dev, 5037 " t_task_cdbs_ex_left: %d\n", task, dev,
5248 atomic_read(&cmd->t_task_cdbs_ex_left)); 5038 atomic_read(&cmd->t_task_cdbs_ex_left));
5249 5039
@@ -5252,7 +5042,7 @@ static void transport_processing_shutdown(struct se_device *dev)
5252 } 5042 }
5253 5043
5254 if (atomic_read(&cmd->t_transport_active)) { 5044 if (atomic_read(&cmd->t_transport_active)) {
5255 DEBUG_DO("got t_transport_active = 1 for task: %p, dev:" 5045 pr_debug("got t_transport_active = 1 for task: %p, dev:"
5256 " %p\n", task, dev); 5046 " %p\n", task, dev);
5257 5047
5258 if (atomic_read(&cmd->t_fe_count)) { 5048 if (atomic_read(&cmd->t_fe_count)) {
@@ -5282,7 +5072,7 @@ static void transport_processing_shutdown(struct se_device *dev)
5282 spin_lock_irqsave(&dev->execute_task_lock, flags); 5072 spin_lock_irqsave(&dev->execute_task_lock, flags);
5283 continue; 5073 continue;
5284 } 5074 }
5285 DEBUG_DO("Got t_transport_active = 0 for task: %p, dev: %p\n", 5075 pr_debug("Got t_transport_active = 0 for task: %p, dev: %p\n",
5286 task, dev); 5076 task, dev);
5287 5077
5288 if (atomic_read(&cmd->t_fe_count)) { 5078 if (atomic_read(&cmd->t_fe_count)) {
@@ -5315,7 +5105,7 @@ static void transport_processing_shutdown(struct se_device *dev)
5315 */ 5105 */
5316 while ((cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj))) { 5106 while ((cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj))) {
5317 5107
5318 DEBUG_DO("From Device Queue: cmd: %p t_state: %d\n", 5108 pr_debug("From Device Queue: cmd: %p t_state: %d\n",
5319 cmd, cmd->t_state); 5109 cmd, cmd->t_state);
5320 5110
5321 if (atomic_read(&cmd->t_fe_count)) { 5111 if (atomic_read(&cmd->t_fe_count)) {
@@ -5368,8 +5158,8 @@ get_cmd:
5368 5158
5369 switch (cmd->t_state) { 5159 switch (cmd->t_state) {
5370 case TRANSPORT_NEW_CMD_MAP: 5160 case TRANSPORT_NEW_CMD_MAP:
5371 if (!(cmd->se_tfo->new_cmd_map)) { 5161 if (!cmd->se_tfo->new_cmd_map) {
5372 printk(KERN_ERR "cmd->se_tfo->new_cmd_map is" 5162 pr_err("cmd->se_tfo->new_cmd_map is"
5373 " NULL for TRANSPORT_NEW_CMD_MAP\n"); 5163 " NULL for TRANSPORT_NEW_CMD_MAP\n");
5374 BUG(); 5164 BUG();
5375 } 5165 }
@@ -5420,7 +5210,7 @@ get_cmd:
5420 transport_generic_write_pending(cmd); 5210 transport_generic_write_pending(cmd);
5421 break; 5211 break;
5422 default: 5212 default:
5423 printk(KERN_ERR "Unknown t_state: %d deferred_t_state:" 5213 pr_err("Unknown t_state: %d deferred_t_state:"
5424 " %d for ITT: 0x%08x i_state: %d on SE LUN:" 5214 " %d for ITT: 0x%08x i_state: %d on SE LUN:"
5425 " %u\n", cmd->t_state, cmd->deferred_t_state, 5215 " %u\n", cmd->t_state, cmd->deferred_t_state,
5426 cmd->se_tfo->get_task_tag(cmd), 5216 cmd->se_tfo->get_task_tag(cmd),