aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/target/tcm_fc/tfc_cmd.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/target/tcm_fc/tfc_cmd.c')
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c696
1 files changed, 696 insertions, 0 deletions
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
new file mode 100644
index 000000000000..49e51778f733
--- /dev/null
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -0,0 +1,696 @@
1/*
2 * Copyright (c) 2010 Cisco Systems, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 */
17
18/* XXX TBD some includes may be extraneous */
19
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/version.h>
23#include <generated/utsrelease.h>
24#include <linux/utsname.h>
25#include <linux/init.h>
26#include <linux/slab.h>
27#include <linux/kthread.h>
28#include <linux/types.h>
29#include <linux/string.h>
30#include <linux/configfs.h>
31#include <linux/ctype.h>
32#include <linux/hash.h>
33#include <asm/unaligned.h>
34#include <scsi/scsi.h>
35#include <scsi/scsi_host.h>
36#include <scsi/scsi_device.h>
37#include <scsi/scsi_cmnd.h>
38#include <scsi/libfc.h>
39#include <scsi/fc_encode.h>
40
41#include <target/target_core_base.h>
42#include <target/target_core_transport.h>
43#include <target/target_core_fabric_ops.h>
44#include <target/target_core_device.h>
45#include <target/target_core_tpg.h>
46#include <target/target_core_configfs.h>
47#include <target/target_core_base.h>
48#include <target/target_core_tmr.h>
49#include <target/configfs_macros.h>
50
51#include "tcm_fc.h"
52
53/*
54 * Dump cmd state for debugging.
55 */
56void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
57{
58 struct fc_exch *ep;
59 struct fc_seq *sp;
60 struct se_cmd *se_cmd;
61 struct se_mem *mem;
62 struct se_transport_task *task;
63
64 if (!(ft_debug_logging & FT_DEBUG_IO))
65 return;
66
67 se_cmd = &cmd->se_cmd;
68 printk(KERN_INFO "%s: cmd %p state %d sess %p seq %p se_cmd %p\n",
69 caller, cmd, cmd->state, cmd->sess, cmd->seq, se_cmd);
70 printk(KERN_INFO "%s: cmd %p cdb %p\n",
71 caller, cmd, cmd->cdb);
72 printk(KERN_INFO "%s: cmd %p lun %d\n", caller, cmd, cmd->lun);
73
74 task = T_TASK(se_cmd);
75 printk(KERN_INFO "%s: cmd %p task %p se_num %u buf %p len %u se_cmd_flags <0x%x>\n",
76 caller, cmd, task, task->t_tasks_se_num,
77 task->t_task_buf, se_cmd->data_length, se_cmd->se_cmd_flags);
78 if (task->t_mem_list)
79 list_for_each_entry(mem, task->t_mem_list, se_list)
80 printk(KERN_INFO "%s: cmd %p mem %p page %p "
81 "len 0x%x off 0x%x\n",
82 caller, cmd, mem,
83 mem->se_page, mem->se_len, mem->se_off);
84 sp = cmd->seq;
85 if (sp) {
86 ep = fc_seq_exch(sp);
87 printk(KERN_INFO "%s: cmd %p sid %x did %x "
88 "ox_id %x rx_id %x seq_id %x e_stat %x\n",
89 caller, cmd, ep->sid, ep->did, ep->oxid, ep->rxid,
90 sp->id, ep->esb_stat);
91 }
92 print_hex_dump(KERN_INFO, "ft_dump_cmd ", DUMP_PREFIX_NONE,
93 16, 4, cmd->cdb, MAX_COMMAND_SIZE, 0);
94}
95
96/*
97 * Get LUN from CDB.
98 */
99static int ft_get_lun_for_cmd(struct ft_cmd *cmd, u8 *lunp)
100{
101 u64 lun;
102
103 lun = lunp[1];
104 switch (lunp[0] >> 6) {
105 case 0:
106 break;
107 case 1:
108 lun |= (lunp[0] & 0x3f) << 8;
109 break;
110 default:
111 return -1;
112 }
113 if (lun >= TRANSPORT_MAX_LUNS_PER_TPG)
114 return -1;
115 cmd->lun = lun;
116 return transport_get_lun_for_cmd(&cmd->se_cmd, NULL, lun);
117}
118
119static void ft_queue_cmd(struct ft_sess *sess, struct ft_cmd *cmd)
120{
121 struct se_queue_obj *qobj;
122 unsigned long flags;
123
124 qobj = &sess->tport->tpg->qobj;
125 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
126 list_add_tail(&cmd->se_req.qr_list, &qobj->qobj_list);
127 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
128 atomic_inc(&qobj->queue_cnt);
129 wake_up_interruptible(&qobj->thread_wq);
130}
131
132static struct ft_cmd *ft_dequeue_cmd(struct se_queue_obj *qobj)
133{
134 unsigned long flags;
135 struct se_queue_req *qr;
136
137 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
138 if (list_empty(&qobj->qobj_list)) {
139 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
140 return NULL;
141 }
142 qr = list_first_entry(&qobj->qobj_list, struct se_queue_req, qr_list);
143 list_del(&qr->qr_list);
144 atomic_dec(&qobj->queue_cnt);
145 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
146 return container_of(qr, struct ft_cmd, se_req);
147}
148
149static void ft_free_cmd(struct ft_cmd *cmd)
150{
151 struct fc_frame *fp;
152 struct fc_lport *lport;
153
154 if (!cmd)
155 return;
156 fp = cmd->req_frame;
157 lport = fr_dev(fp);
158 if (fr_seq(fp))
159 lport->tt.seq_release(fr_seq(fp));
160 fc_frame_free(fp);
161 ft_sess_put(cmd->sess); /* undo get from lookup at recv */
162 kfree(cmd);
163}
164
165void ft_release_cmd(struct se_cmd *se_cmd)
166{
167 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
168
169 ft_free_cmd(cmd);
170}
171
172void ft_check_stop_free(struct se_cmd *se_cmd)
173{
174 transport_generic_free_cmd(se_cmd, 0, 1, 0);
175}
176
177/*
178 * Send response.
179 */
180int ft_queue_status(struct se_cmd *se_cmd)
181{
182 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
183 struct fc_frame *fp;
184 struct fcp_resp_with_ext *fcp;
185 struct fc_lport *lport;
186 struct fc_exch *ep;
187 size_t len;
188
189 ft_dump_cmd(cmd, __func__);
190 ep = fc_seq_exch(cmd->seq);
191 lport = ep->lp;
192 len = sizeof(*fcp) + se_cmd->scsi_sense_length;
193 fp = fc_frame_alloc(lport, len);
194 if (!fp) {
195 /* XXX shouldn't just drop it - requeue and retry? */
196 return 0;
197 }
198 fcp = fc_frame_payload_get(fp, len);
199 memset(fcp, 0, len);
200 fcp->resp.fr_status = se_cmd->scsi_status;
201
202 len = se_cmd->scsi_sense_length;
203 if (len) {
204 fcp->resp.fr_flags |= FCP_SNS_LEN_VAL;
205 fcp->ext.fr_sns_len = htonl(len);
206 memcpy((fcp + 1), se_cmd->sense_buffer, len);
207 }
208
209 /*
210 * Test underflow and overflow with one mask. Usually both are off.
211 * Bidirectional commands are not handled yet.
212 */
213 if (se_cmd->se_cmd_flags & (SCF_OVERFLOW_BIT | SCF_UNDERFLOW_BIT)) {
214 if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT)
215 fcp->resp.fr_flags |= FCP_RESID_OVER;
216 else
217 fcp->resp.fr_flags |= FCP_RESID_UNDER;
218 fcp->ext.fr_resid = cpu_to_be32(se_cmd->residual_count);
219 }
220
221 /*
222 * Send response.
223 */
224 cmd->seq = lport->tt.seq_start_next(cmd->seq);
225 fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP,
226 FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0);
227
228 lport->tt.seq_send(lport, cmd->seq, fp);
229 lport->tt.exch_done(cmd->seq);
230 return 0;
231}
232
233int ft_write_pending_status(struct se_cmd *se_cmd)
234{
235 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
236
237 return cmd->write_data_len != se_cmd->data_length;
238}
239
240/*
241 * Send TX_RDY (transfer ready).
242 */
243int ft_write_pending(struct se_cmd *se_cmd)
244{
245 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
246 struct fc_frame *fp;
247 struct fcp_txrdy *txrdy;
248 struct fc_lport *lport;
249 struct fc_exch *ep;
250 struct fc_frame_header *fh;
251 u32 f_ctl;
252
253 ft_dump_cmd(cmd, __func__);
254
255 ep = fc_seq_exch(cmd->seq);
256 lport = ep->lp;
257 fp = fc_frame_alloc(lport, sizeof(*txrdy));
258 if (!fp)
259 return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
260
261 txrdy = fc_frame_payload_get(fp, sizeof(*txrdy));
262 memset(txrdy, 0, sizeof(*txrdy));
263 txrdy->ft_burst_len = htonl(se_cmd->data_length);
264
265 cmd->seq = lport->tt.seq_start_next(cmd->seq);
266 fc_fill_fc_hdr(fp, FC_RCTL_DD_DATA_DESC, ep->did, ep->sid, FC_TYPE_FCP,
267 FC_FC_EX_CTX | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
268
269 fh = fc_frame_header_get(fp);
270 f_ctl = ntoh24(fh->fh_f_ctl);
271
272 /* Only if it is 'Exchange Responder' */
273 if (f_ctl & FC_FC_EX_CTX) {
274 /* Target is 'exchange responder' and sending XFER_READY
275 * to 'exchange initiator (initiator)'
276 */
277 if ((ep->xid <= lport->lro_xid) &&
278 (fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) {
279 if (se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
280 /*
281 * Map se_mem list to scatterlist, so that
282 * DDP can be setup. DDP setup function require
283 * scatterlist. se_mem_list is internal to
284 * TCM/LIO target
285 */
286 transport_do_task_sg_chain(se_cmd);
287 cmd->sg = T_TASK(se_cmd)->t_tasks_sg_chained;
288 cmd->sg_cnt =
289 T_TASK(se_cmd)->t_tasks_sg_chained_no;
290 }
291 if (cmd->sg && lport->tt.ddp_setup(lport, ep->xid,
292 cmd->sg, cmd->sg_cnt))
293 cmd->was_ddp_setup = 1;
294 }
295 }
296 lport->tt.seq_send(lport, cmd->seq, fp);
297 return 0;
298}
299
300u32 ft_get_task_tag(struct se_cmd *se_cmd)
301{
302 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
303
304 return fc_seq_exch(cmd->seq)->rxid;
305}
306
307int ft_get_cmd_state(struct se_cmd *se_cmd)
308{
309 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
310
311 return cmd->state;
312}
313
314int ft_is_state_remove(struct se_cmd *se_cmd)
315{
316 return 0; /* XXX TBD */
317}
318
319void ft_new_cmd_failure(struct se_cmd *se_cmd)
320{
321 /* XXX TBD */
322 printk(KERN_INFO "%s: se_cmd %p\n", __func__, se_cmd);
323}
324
325/*
326 * FC sequence response handler for follow-on sequences (data) and aborts.
327 */
328static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg)
329{
330 struct ft_cmd *cmd = arg;
331 struct fc_frame_header *fh;
332
333 if (IS_ERR(fp)) {
334 /* XXX need to find cmd if queued */
335 cmd->se_cmd.t_state = TRANSPORT_REMOVE;
336 cmd->seq = NULL;
337 transport_generic_free_cmd(&cmd->se_cmd, 0, 1, 0);
338 return;
339 }
340
341 fh = fc_frame_header_get(fp);
342
343 switch (fh->fh_r_ctl) {
344 case FC_RCTL_DD_SOL_DATA: /* write data */
345 ft_recv_write_data(cmd, fp);
346 break;
347 case FC_RCTL_DD_UNSOL_CTL: /* command */
348 case FC_RCTL_DD_SOL_CTL: /* transfer ready */
349 case FC_RCTL_DD_DATA_DESC: /* transfer ready */
350 default:
351 printk(KERN_INFO "%s: unhandled frame r_ctl %x\n",
352 __func__, fh->fh_r_ctl);
353 fc_frame_free(fp);
354 transport_generic_free_cmd(&cmd->se_cmd, 0, 1, 0);
355 break;
356 }
357}
358
359/*
360 * Send a FCP response including SCSI status and optional FCP rsp_code.
361 * status is SAM_STAT_GOOD (zero) iff code is valid.
362 * This is used in error cases, such as allocation failures.
363 */
364static void ft_send_resp_status(struct fc_lport *lport,
365 const struct fc_frame *rx_fp,
366 u32 status, enum fcp_resp_rsp_codes code)
367{
368 struct fc_frame *fp;
369 struct fc_seq *sp;
370 const struct fc_frame_header *fh;
371 size_t len;
372 struct fcp_resp_with_ext *fcp;
373 struct fcp_resp_rsp_info *info;
374
375 fh = fc_frame_header_get(rx_fp);
376 FT_IO_DBG("FCP error response: did %x oxid %x status %x code %x\n",
377 ntoh24(fh->fh_s_id), ntohs(fh->fh_ox_id), status, code);
378 len = sizeof(*fcp);
379 if (status == SAM_STAT_GOOD)
380 len += sizeof(*info);
381 fp = fc_frame_alloc(lport, len);
382 if (!fp)
383 return;
384 fcp = fc_frame_payload_get(fp, len);
385 memset(fcp, 0, len);
386 fcp->resp.fr_status = status;
387 if (status == SAM_STAT_GOOD) {
388 fcp->ext.fr_rsp_len = htonl(sizeof(*info));
389 fcp->resp.fr_flags |= FCP_RSP_LEN_VAL;
390 info = (struct fcp_resp_rsp_info *)(fcp + 1);
391 info->rsp_code = code;
392 }
393
394 fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_DD_CMD_STATUS, 0);
395 sp = fr_seq(fp);
396 if (sp)
397 lport->tt.seq_send(lport, sp, fp);
398 else
399 lport->tt.frame_send(lport, fp);
400}
401
402/*
403 * Send error or task management response.
404 * Always frees the cmd and associated state.
405 */
406static void ft_send_resp_code(struct ft_cmd *cmd, enum fcp_resp_rsp_codes code)
407{
408 ft_send_resp_status(cmd->sess->tport->lport,
409 cmd->req_frame, SAM_STAT_GOOD, code);
410 ft_free_cmd(cmd);
411}
412
413/*
414 * Handle Task Management Request.
415 */
416static void ft_send_tm(struct ft_cmd *cmd)
417{
418 struct se_tmr_req *tmr;
419 struct fcp_cmnd *fcp;
420 u8 tm_func;
421
422 fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
423
424 switch (fcp->fc_tm_flags) {
425 case FCP_TMF_LUN_RESET:
426 tm_func = TMR_LUN_RESET;
427 if (ft_get_lun_for_cmd(cmd, fcp->fc_lun) < 0) {
428 ft_dump_cmd(cmd, __func__);
429 transport_send_check_condition_and_sense(&cmd->se_cmd,
430 cmd->se_cmd.scsi_sense_reason, 0);
431 ft_sess_put(cmd->sess);
432 return;
433 }
434 break;
435 case FCP_TMF_TGT_RESET:
436 tm_func = TMR_TARGET_WARM_RESET;
437 break;
438 case FCP_TMF_CLR_TASK_SET:
439 tm_func = TMR_CLEAR_TASK_SET;
440 break;
441 case FCP_TMF_ABT_TASK_SET:
442 tm_func = TMR_ABORT_TASK_SET;
443 break;
444 case FCP_TMF_CLR_ACA:
445 tm_func = TMR_CLEAR_ACA;
446 break;
447 default:
448 /*
449 * FCP4r01 indicates having a combination of
450 * tm_flags set is invalid.
451 */
452 FT_TM_DBG("invalid FCP tm_flags %x\n", fcp->fc_tm_flags);
453 ft_send_resp_code(cmd, FCP_CMND_FIELDS_INVALID);
454 return;
455 }
456
457 FT_TM_DBG("alloc tm cmd fn %d\n", tm_func);
458 tmr = core_tmr_alloc_req(&cmd->se_cmd, cmd, tm_func);
459 if (!tmr) {
460 FT_TM_DBG("alloc failed\n");
461 ft_send_resp_code(cmd, FCP_TMF_FAILED);
462 return;
463 }
464 cmd->se_cmd.se_tmr_req = tmr;
465 transport_generic_handle_tmr(&cmd->se_cmd);
466}
467
468/*
469 * Send status from completed task management request.
470 */
471int ft_queue_tm_resp(struct se_cmd *se_cmd)
472{
473 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
474 struct se_tmr_req *tmr = se_cmd->se_tmr_req;
475 enum fcp_resp_rsp_codes code;
476
477 switch (tmr->response) {
478 case TMR_FUNCTION_COMPLETE:
479 code = FCP_TMF_CMPL;
480 break;
481 case TMR_LUN_DOES_NOT_EXIST:
482 code = FCP_TMF_INVALID_LUN;
483 break;
484 case TMR_FUNCTION_REJECTED:
485 code = FCP_TMF_REJECTED;
486 break;
487 case TMR_TASK_DOES_NOT_EXIST:
488 case TMR_TASK_STILL_ALLEGIANT:
489 case TMR_TASK_FAILOVER_NOT_SUPPORTED:
490 case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
491 case TMR_FUNCTION_AUTHORIZATION_FAILED:
492 default:
493 code = FCP_TMF_FAILED;
494 break;
495 }
496 FT_TM_DBG("tmr fn %d resp %d fcp code %d\n",
497 tmr->function, tmr->response, code);
498 ft_send_resp_code(cmd, code);
499 return 0;
500}
501
502/*
503 * Handle incoming FCP command.
504 */
505static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
506{
507 struct ft_cmd *cmd;
508 struct fc_lport *lport = sess->tport->lport;
509
510 cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
511 if (!cmd)
512 goto busy;
513 cmd->sess = sess;
514 cmd->seq = lport->tt.seq_assign(lport, fp);
515 if (!cmd->seq) {
516 kfree(cmd);
517 goto busy;
518 }
519 cmd->req_frame = fp; /* hold frame during cmd */
520 ft_queue_cmd(sess, cmd);
521 return;
522
523busy:
524 FT_IO_DBG("cmd or seq allocation failure - sending BUSY\n");
525 ft_send_resp_status(lport, fp, SAM_STAT_BUSY, 0);
526 fc_frame_free(fp);
527 ft_sess_put(sess); /* undo get from lookup */
528}
529
530
531/*
532 * Handle incoming FCP frame.
533 * Caller has verified that the frame is type FCP.
534 */
535void ft_recv_req(struct ft_sess *sess, struct fc_frame *fp)
536{
537 struct fc_frame_header *fh = fc_frame_header_get(fp);
538
539 switch (fh->fh_r_ctl) {
540 case FC_RCTL_DD_UNSOL_CMD: /* command */
541 ft_recv_cmd(sess, fp);
542 break;
543 case FC_RCTL_DD_SOL_DATA: /* write data */
544 case FC_RCTL_DD_UNSOL_CTL:
545 case FC_RCTL_DD_SOL_CTL:
546 case FC_RCTL_DD_DATA_DESC: /* transfer ready */
547 case FC_RCTL_ELS4_REQ: /* SRR, perhaps */
548 default:
549 printk(KERN_INFO "%s: unhandled frame r_ctl %x\n",
550 __func__, fh->fh_r_ctl);
551 fc_frame_free(fp);
552 ft_sess_put(sess); /* undo get from lookup */
553 break;
554 }
555}
556
557/*
558 * Send new command to target.
559 */
560static void ft_send_cmd(struct ft_cmd *cmd)
561{
562 struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame);
563 struct se_cmd *se_cmd;
564 struct fcp_cmnd *fcp;
565 int data_dir;
566 u32 data_len;
567 int task_attr;
568 int ret;
569
570 fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
571 if (!fcp)
572 goto err;
573
574 if (fcp->fc_flags & FCP_CFL_LEN_MASK)
575 goto err; /* not handling longer CDBs yet */
576
577 if (fcp->fc_tm_flags) {
578 task_attr = FCP_PTA_SIMPLE;
579 data_dir = DMA_NONE;
580 data_len = 0;
581 } else {
582 switch (fcp->fc_flags & (FCP_CFL_RDDATA | FCP_CFL_WRDATA)) {
583 case 0:
584 data_dir = DMA_NONE;
585 break;
586 case FCP_CFL_RDDATA:
587 data_dir = DMA_FROM_DEVICE;
588 break;
589 case FCP_CFL_WRDATA:
590 data_dir = DMA_TO_DEVICE;
591 break;
592 case FCP_CFL_WRDATA | FCP_CFL_RDDATA:
593 goto err; /* TBD not supported by tcm_fc yet */
594 }
595
596 /* FCP_PTA_ maps 1:1 to TASK_ATTR_ */
597 task_attr = fcp->fc_pri_ta & FCP_PTA_MASK;
598 data_len = ntohl(fcp->fc_dl);
599 cmd->cdb = fcp->fc_cdb;
600 }
601
602 se_cmd = &cmd->se_cmd;
603 /*
604 * Initialize struct se_cmd descriptor from target_core_mod
605 * infrastructure
606 */
607 transport_init_se_cmd(se_cmd, &ft_configfs->tf_ops, cmd->sess->se_sess,
608 data_len, data_dir, task_attr,
609 &cmd->ft_sense_buffer[0]);
610 /*
611 * Check for FCP task management flags
612 */
613 if (fcp->fc_tm_flags) {
614 ft_send_tm(cmd);
615 return;
616 }
617
618 fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
619
620 ret = ft_get_lun_for_cmd(cmd, fcp->fc_lun);
621 if (ret < 0) {
622 ft_dump_cmd(cmd, __func__);
623 transport_send_check_condition_and_sense(&cmd->se_cmd,
624 cmd->se_cmd.scsi_sense_reason, 0);
625 return;
626 }
627
628 ret = transport_generic_allocate_tasks(se_cmd, cmd->cdb);
629
630 FT_IO_DBG("r_ctl %x alloc task ret %d\n", fh->fh_r_ctl, ret);
631 ft_dump_cmd(cmd, __func__);
632
633 if (ret == -1) {
634 transport_send_check_condition_and_sense(se_cmd,
635 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
636 transport_generic_free_cmd(se_cmd, 0, 1, 0);
637 return;
638 }
639 if (ret == -2) {
640 if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
641 ft_queue_status(se_cmd);
642 else
643 transport_send_check_condition_and_sense(se_cmd,
644 se_cmd->scsi_sense_reason, 0);
645 transport_generic_free_cmd(se_cmd, 0, 1, 0);
646 return;
647 }
648 transport_generic_handle_cdb(se_cmd);
649 return;
650
651err:
652 ft_send_resp_code(cmd, FCP_CMND_FIELDS_INVALID);
653 return;
654}
655
656/*
657 * Handle request in the command thread.
658 */
659static void ft_exec_req(struct ft_cmd *cmd)
660{
661 FT_IO_DBG("cmd state %x\n", cmd->state);
662 switch (cmd->state) {
663 case FC_CMD_ST_NEW:
664 ft_send_cmd(cmd);
665 break;
666 default:
667 break;
668 }
669}
670
671/*
672 * Processing thread.
673 * Currently one thread per tpg.
674 */
675int ft_thread(void *arg)
676{
677 struct ft_tpg *tpg = arg;
678 struct se_queue_obj *qobj = &tpg->qobj;
679 struct ft_cmd *cmd;
680 int ret;
681
682 set_user_nice(current, -20);
683
684 while (!kthread_should_stop()) {
685 ret = wait_event_interruptible(qobj->thread_wq,
686 atomic_read(&qobj->queue_cnt) || kthread_should_stop());
687 if (ret < 0 || kthread_should_stop())
688 goto out;
689 cmd = ft_dequeue_cmd(qobj);
690 if (cmd)
691 ft_exec_req(cmd);
692 }
693
694out:
695 return 0;
696}