aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/target/tcm_fc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/target/tcm_fc')
-rw-r--r--drivers/target/tcm_fc/Kconfig5
-rw-r--r--drivers/target/tcm_fc/Makefile15
-rw-r--r--drivers/target/tcm_fc/tcm_fc.h215
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c716
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c669
-rw-r--r--drivers/target/tcm_fc/tfc_io.c374
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c541
7 files changed, 2535 insertions, 0 deletions
diff --git a/drivers/target/tcm_fc/Kconfig b/drivers/target/tcm_fc/Kconfig
new file mode 100644
index 000000000000..40caf458e89e
--- /dev/null
+++ b/drivers/target/tcm_fc/Kconfig
@@ -0,0 +1,5 @@
1config TCM_FC
2 tristate "TCM_FC fabric Plugin"
3 depends on LIBFC
4 help
5 Say Y here to enable the TCM FC plugin for accessing FC fabrics in TCM
diff --git a/drivers/target/tcm_fc/Makefile b/drivers/target/tcm_fc/Makefile
new file mode 100644
index 000000000000..7a5c2b64cf65
--- /dev/null
+++ b/drivers/target/tcm_fc/Makefile
@@ -0,0 +1,15 @@
1EXTRA_CFLAGS += -I$(srctree)/drivers/target/ \
2 -I$(srctree)/drivers/scsi/ \
3 -I$(srctree)/include/scsi/ \
4 -I$(srctree)/drivers/target/tcm_fc/
5
6tcm_fc-y += tfc_cmd.o \
7 tfc_conf.o \
8 tfc_io.o \
9 tfc_sess.o
10
11obj-$(CONFIG_TCM_FC) += tcm_fc.o
12
13ifdef CONFIGFS_TCM_FC_DEBUG
14EXTRA_CFLAGS += -DTCM_FC_DEBUG
15endif
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
new file mode 100644
index 000000000000..7b82f1b7fef8
--- /dev/null
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -0,0 +1,215 @@
1/*
2 * Copyright (c) 2010 Cisco Systems, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 */
17#ifndef __TCM_FC_H__
18#define __TCM_FC_H__
19
20#define FT_VERSION "0.3"
21
22#define FT_NAMELEN 32 /* length of ASCII WWPNs including pad */
23#define FT_TPG_NAMELEN 32 /* max length of TPG name */
24#define FT_LUN_NAMELEN 32 /* max length of LUN name */
25
26/*
27 * Debug options.
28 */
29#define FT_DEBUG_CONF 0x01 /* configuration messages */
30#define FT_DEBUG_SESS 0x02 /* session messages */
31#define FT_DEBUG_TM 0x04 /* TM operations */
32#define FT_DEBUG_IO 0x08 /* I/O commands */
33#define FT_DEBUG_DATA 0x10 /* Data transfer */
34
35extern unsigned int ft_debug_logging; /* debug options */
36
37#define FT_DEBUG(mask, fmt, args...) \
38 do { \
39 if (ft_debug_logging & (mask)) \
40 printk(KERN_INFO "tcm_fc: %s: " fmt, \
41 __func__, ##args); \
42 } while (0)
43
44#define FT_CONF_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_CONF, fmt, ##args)
45#define FT_SESS_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_SESS, fmt, ##args)
46#define FT_TM_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_TM, fmt, ##args)
47#define FT_IO_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_IO, fmt, ##args)
48#define FT_DATA_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_DATA, fmt, ##args)
49
50struct ft_transport_id {
51 __u8 format;
52 __u8 __resvd1[7];
53 __u8 wwpn[8];
54 __u8 __resvd2[8];
55} __attribute__((__packed__));
56
57/*
58 * Session (remote port).
59 */
60struct ft_sess {
61 u32 port_id; /* for hash lookup use only */
62 u32 params;
63 u16 max_frame; /* maximum frame size */
64 u64 port_name; /* port name for transport ID */
65 struct ft_tport *tport;
66 struct se_session *se_sess;
67 struct hlist_node hash; /* linkage in ft_sess_hash table */
68 struct rcu_head rcu;
69 struct kref kref; /* ref for hash and outstanding I/Os */
70};
71
72/*
73 * Hash table of sessions per local port.
74 * Hash lookup by remote port FC_ID.
75 */
76#define FT_SESS_HASH_BITS 6
77#define FT_SESS_HASH_SIZE (1 << FT_SESS_HASH_BITS)
78
79/*
80 * Per local port data.
81 * This is created only after a TPG exists that allows target function
82 * for the local port. If the TPG exists, this is allocated when
83 * we're notified that the local port has been created, or when
84 * the first PRLI provider callback is received.
85 */
86struct ft_tport {
87 struct fc_lport *lport;
88 struct ft_tpg *tpg; /* NULL if TPG deleted before tport */
89 u32 sess_count; /* number of sessions in hash */
90 struct rcu_head rcu;
91 struct hlist_head hash[FT_SESS_HASH_SIZE]; /* list of sessions */
92};
93
94/*
95 * Node ID and authentication.
96 */
97struct ft_node_auth {
98 u64 port_name;
99 u64 node_name;
100};
101
102/*
103 * Node ACL for FC remote port session.
104 */
105struct ft_node_acl {
106 struct ft_node_auth node_auth;
107 struct se_node_acl se_node_acl;
108};
109
110struct ft_lun {
111 u32 index;
112 char name[FT_LUN_NAMELEN];
113};
114
115/*
116 * Target portal group (local port).
117 */
118struct ft_tpg {
119 u32 index;
120 struct ft_lport_acl *lport_acl;
121 struct ft_tport *tport; /* active tport or NULL */
122 struct list_head list; /* linkage in ft_lport_acl tpg_list */
123 struct list_head lun_list; /* head of LUNs */
124 struct se_portal_group se_tpg;
125 struct task_struct *thread; /* processing thread */
126 struct se_queue_obj qobj; /* queue for processing thread */
127};
128
129struct ft_lport_acl {
130 u64 wwpn;
131 char name[FT_NAMELEN];
132 struct list_head list;
133 struct list_head tpg_list;
134 struct se_wwn fc_lport_wwn;
135};
136
137enum ft_cmd_state {
138 FC_CMD_ST_NEW = 0,
139 FC_CMD_ST_REJ
140};
141
142/*
143 * Commands
144 */
145struct ft_cmd {
146 enum ft_cmd_state state;
147 u32 lun; /* LUN from request */
148 struct ft_sess *sess; /* session held for cmd */
149 struct fc_seq *seq; /* sequence in exchange mgr */
150 struct se_cmd se_cmd; /* Local TCM I/O descriptor */
151 struct fc_frame *req_frame;
152 unsigned char *cdb; /* pointer to CDB inside frame */
153 u32 write_data_len; /* data received on writes */
154 struct se_queue_req se_req;
155 /* Local sense buffer */
156 unsigned char ft_sense_buffer[TRANSPORT_SENSE_BUFFER];
157 u32 was_ddp_setup:1; /* Set only if ddp is setup */
158 struct scatterlist *sg; /* Set only if DDP is setup */
159 u32 sg_cnt; /* No. of item in scatterlist */
160};
161
162extern struct list_head ft_lport_list;
163extern struct mutex ft_lport_lock;
164extern struct fc4_prov ft_prov;
165extern struct target_fabric_configfs *ft_configfs;
166
167/*
168 * Fabric methods.
169 */
170
171/*
172 * Session ops.
173 */
174void ft_sess_put(struct ft_sess *);
175int ft_sess_shutdown(struct se_session *);
176void ft_sess_close(struct se_session *);
177void ft_sess_stop(struct se_session *, int, int);
178int ft_sess_logged_in(struct se_session *);
179u32 ft_sess_get_index(struct se_session *);
180u32 ft_sess_get_port_name(struct se_session *, unsigned char *, u32);
181void ft_sess_set_erl0(struct se_session *);
182
183void ft_lport_add(struct fc_lport *, void *);
184void ft_lport_del(struct fc_lport *, void *);
185int ft_lport_notify(struct notifier_block *, unsigned long, void *);
186
187/*
188 * IO methods.
189 */
190void ft_check_stop_free(struct se_cmd *);
191void ft_release_cmd(struct se_cmd *);
192int ft_queue_status(struct se_cmd *);
193int ft_queue_data_in(struct se_cmd *);
194int ft_write_pending(struct se_cmd *);
195int ft_write_pending_status(struct se_cmd *);
196u32 ft_get_task_tag(struct se_cmd *);
197int ft_get_cmd_state(struct se_cmd *);
198void ft_new_cmd_failure(struct se_cmd *);
199int ft_queue_tm_resp(struct se_cmd *);
200int ft_is_state_remove(struct se_cmd *);
201
202/*
203 * other internal functions.
204 */
205int ft_thread(void *);
206void ft_recv_req(struct ft_sess *, struct fc_frame *);
207struct ft_tpg *ft_lport_find_tpg(struct fc_lport *);
208struct ft_node_acl *ft_acl_get(struct ft_tpg *, struct fc_rport_priv *);
209
210void ft_recv_write_data(struct ft_cmd *, struct fc_frame *);
211void ft_dump_cmd(struct ft_cmd *, const char *caller);
212
213ssize_t ft_format_wwn(char *, size_t, u64);
214
215#endif /* __TCM_FC_H__ */
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
new file mode 100644
index 000000000000..b2a106729d49
--- /dev/null
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -0,0 +1,716 @@
1/*
2 * Copyright (c) 2010 Cisco Systems, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 */
17
18/* XXX TBD some includes may be extraneous */
19
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/version.h>
23#include <generated/utsrelease.h>
24#include <linux/utsname.h>
25#include <linux/init.h>
26#include <linux/slab.h>
27#include <linux/kthread.h>
28#include <linux/types.h>
29#include <linux/string.h>
30#include <linux/configfs.h>
31#include <linux/ctype.h>
32#include <linux/hash.h>
33#include <asm/unaligned.h>
34#include <scsi/scsi.h>
35#include <scsi/scsi_host.h>
36#include <scsi/scsi_device.h>
37#include <scsi/scsi_cmnd.h>
38#include <scsi/scsi_tcq.h>
39#include <scsi/libfc.h>
40#include <scsi/fc_encode.h>
41
42#include <target/target_core_base.h>
43#include <target/target_core_transport.h>
44#include <target/target_core_fabric_ops.h>
45#include <target/target_core_device.h>
46#include <target/target_core_tpg.h>
47#include <target/target_core_configfs.h>
48#include <target/target_core_base.h>
49#include <target/target_core_tmr.h>
50#include <target/configfs_macros.h>
51
52#include "tcm_fc.h"
53
54/*
55 * Dump cmd state for debugging.
56 */
57void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
58{
59 struct fc_exch *ep;
60 struct fc_seq *sp;
61 struct se_cmd *se_cmd;
62 struct se_mem *mem;
63 struct se_transport_task *task;
64
65 if (!(ft_debug_logging & FT_DEBUG_IO))
66 return;
67
68 se_cmd = &cmd->se_cmd;
69 printk(KERN_INFO "%s: cmd %p state %d sess %p seq %p se_cmd %p\n",
70 caller, cmd, cmd->state, cmd->sess, cmd->seq, se_cmd);
71 printk(KERN_INFO "%s: cmd %p cdb %p\n",
72 caller, cmd, cmd->cdb);
73 printk(KERN_INFO "%s: cmd %p lun %d\n", caller, cmd, cmd->lun);
74
75 task = T_TASK(se_cmd);
76 printk(KERN_INFO "%s: cmd %p task %p se_num %u buf %p len %u se_cmd_flags <0x%x>\n",
77 caller, cmd, task, task->t_tasks_se_num,
78 task->t_task_buf, se_cmd->data_length, se_cmd->se_cmd_flags);
79 if (task->t_mem_list)
80 list_for_each_entry(mem, task->t_mem_list, se_list)
81 printk(KERN_INFO "%s: cmd %p mem %p page %p "
82 "len 0x%x off 0x%x\n",
83 caller, cmd, mem,
84 mem->se_page, mem->se_len, mem->se_off);
85 sp = cmd->seq;
86 if (sp) {
87 ep = fc_seq_exch(sp);
88 printk(KERN_INFO "%s: cmd %p sid %x did %x "
89 "ox_id %x rx_id %x seq_id %x e_stat %x\n",
90 caller, cmd, ep->sid, ep->did, ep->oxid, ep->rxid,
91 sp->id, ep->esb_stat);
92 }
93 print_hex_dump(KERN_INFO, "ft_dump_cmd ", DUMP_PREFIX_NONE,
94 16, 4, cmd->cdb, MAX_COMMAND_SIZE, 0);
95}
96
97static void ft_queue_cmd(struct ft_sess *sess, struct ft_cmd *cmd)
98{
99 struct se_queue_obj *qobj;
100 unsigned long flags;
101
102 qobj = &sess->tport->tpg->qobj;
103 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
104 list_add_tail(&cmd->se_req.qr_list, &qobj->qobj_list);
105 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
106 atomic_inc(&qobj->queue_cnt);
107 wake_up_interruptible(&qobj->thread_wq);
108}
109
110static struct ft_cmd *ft_dequeue_cmd(struct se_queue_obj *qobj)
111{
112 unsigned long flags;
113 struct se_queue_req *qr;
114
115 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
116 if (list_empty(&qobj->qobj_list)) {
117 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
118 return NULL;
119 }
120 qr = list_first_entry(&qobj->qobj_list, struct se_queue_req, qr_list);
121 list_del(&qr->qr_list);
122 atomic_dec(&qobj->queue_cnt);
123 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
124 return container_of(qr, struct ft_cmd, se_req);
125}
126
127static void ft_free_cmd(struct ft_cmd *cmd)
128{
129 struct fc_frame *fp;
130 struct fc_lport *lport;
131
132 if (!cmd)
133 return;
134 fp = cmd->req_frame;
135 lport = fr_dev(fp);
136 if (fr_seq(fp))
137 lport->tt.seq_release(fr_seq(fp));
138 fc_frame_free(fp);
139 ft_sess_put(cmd->sess); /* undo get from lookup at recv */
140 kfree(cmd);
141}
142
143void ft_release_cmd(struct se_cmd *se_cmd)
144{
145 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
146
147 ft_free_cmd(cmd);
148}
149
150void ft_check_stop_free(struct se_cmd *se_cmd)
151{
152 transport_generic_free_cmd(se_cmd, 0, 1, 0);
153}
154
155/*
156 * Send response.
157 */
158int ft_queue_status(struct se_cmd *se_cmd)
159{
160 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
161 struct fc_frame *fp;
162 struct fcp_resp_with_ext *fcp;
163 struct fc_lport *lport;
164 struct fc_exch *ep;
165 size_t len;
166
167 ft_dump_cmd(cmd, __func__);
168 ep = fc_seq_exch(cmd->seq);
169 lport = ep->lp;
170 len = sizeof(*fcp) + se_cmd->scsi_sense_length;
171 fp = fc_frame_alloc(lport, len);
172 if (!fp) {
173 /* XXX shouldn't just drop it - requeue and retry? */
174 return 0;
175 }
176 fcp = fc_frame_payload_get(fp, len);
177 memset(fcp, 0, len);
178 fcp->resp.fr_status = se_cmd->scsi_status;
179
180 len = se_cmd->scsi_sense_length;
181 if (len) {
182 fcp->resp.fr_flags |= FCP_SNS_LEN_VAL;
183 fcp->ext.fr_sns_len = htonl(len);
184 memcpy((fcp + 1), se_cmd->sense_buffer, len);
185 }
186
187 /*
188 * Test underflow and overflow with one mask. Usually both are off.
189 * Bidirectional commands are not handled yet.
190 */
191 if (se_cmd->se_cmd_flags & (SCF_OVERFLOW_BIT | SCF_UNDERFLOW_BIT)) {
192 if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT)
193 fcp->resp.fr_flags |= FCP_RESID_OVER;
194 else
195 fcp->resp.fr_flags |= FCP_RESID_UNDER;
196 fcp->ext.fr_resid = cpu_to_be32(se_cmd->residual_count);
197 }
198
199 /*
200 * Send response.
201 */
202 cmd->seq = lport->tt.seq_start_next(cmd->seq);
203 fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP,
204 FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0);
205
206 lport->tt.seq_send(lport, cmd->seq, fp);
207 lport->tt.exch_done(cmd->seq);
208 return 0;
209}
210
211int ft_write_pending_status(struct se_cmd *se_cmd)
212{
213 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
214
215 return cmd->write_data_len != se_cmd->data_length;
216}
217
218/*
219 * Send TX_RDY (transfer ready).
220 */
221int ft_write_pending(struct se_cmd *se_cmd)
222{
223 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
224 struct fc_frame *fp;
225 struct fcp_txrdy *txrdy;
226 struct fc_lport *lport;
227 struct fc_exch *ep;
228 struct fc_frame_header *fh;
229 u32 f_ctl;
230
231 ft_dump_cmd(cmd, __func__);
232
233 ep = fc_seq_exch(cmd->seq);
234 lport = ep->lp;
235 fp = fc_frame_alloc(lport, sizeof(*txrdy));
236 if (!fp)
237 return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
238
239 txrdy = fc_frame_payload_get(fp, sizeof(*txrdy));
240 memset(txrdy, 0, sizeof(*txrdy));
241 txrdy->ft_burst_len = htonl(se_cmd->data_length);
242
243 cmd->seq = lport->tt.seq_start_next(cmd->seq);
244 fc_fill_fc_hdr(fp, FC_RCTL_DD_DATA_DESC, ep->did, ep->sid, FC_TYPE_FCP,
245 FC_FC_EX_CTX | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
246
247 fh = fc_frame_header_get(fp);
248 f_ctl = ntoh24(fh->fh_f_ctl);
249
250 /* Only if it is 'Exchange Responder' */
251 if (f_ctl & FC_FC_EX_CTX) {
252 /* Target is 'exchange responder' and sending XFER_READY
253 * to 'exchange initiator (initiator)'
254 */
255 if ((ep->xid <= lport->lro_xid) &&
256 (fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) {
257 if (se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
258 /*
259 * Map se_mem list to scatterlist, so that
260 * DDP can be setup. DDP setup function require
261 * scatterlist. se_mem_list is internal to
262 * TCM/LIO target
263 */
264 transport_do_task_sg_chain(se_cmd);
265 cmd->sg = T_TASK(se_cmd)->t_tasks_sg_chained;
266 cmd->sg_cnt =
267 T_TASK(se_cmd)->t_tasks_sg_chained_no;
268 }
269 if (cmd->sg && lport->tt.ddp_setup(lport, ep->xid,
270 cmd->sg, cmd->sg_cnt))
271 cmd->was_ddp_setup = 1;
272 }
273 }
274 lport->tt.seq_send(lport, cmd->seq, fp);
275 return 0;
276}
277
278u32 ft_get_task_tag(struct se_cmd *se_cmd)
279{
280 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
281
282 return fc_seq_exch(cmd->seq)->rxid;
283}
284
285int ft_get_cmd_state(struct se_cmd *se_cmd)
286{
287 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
288
289 return cmd->state;
290}
291
292int ft_is_state_remove(struct se_cmd *se_cmd)
293{
294 return 0; /* XXX TBD */
295}
296
297void ft_new_cmd_failure(struct se_cmd *se_cmd)
298{
299 /* XXX TBD */
300 printk(KERN_INFO "%s: se_cmd %p\n", __func__, se_cmd);
301}
302
303/*
304 * FC sequence response handler for follow-on sequences (data) and aborts.
305 */
306static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg)
307{
308 struct ft_cmd *cmd = arg;
309 struct fc_frame_header *fh;
310
311 if (IS_ERR(fp)) {
312 /* XXX need to find cmd if queued */
313 cmd->se_cmd.t_state = TRANSPORT_REMOVE;
314 cmd->seq = NULL;
315 transport_generic_free_cmd(&cmd->se_cmd, 0, 1, 0);
316 return;
317 }
318
319 fh = fc_frame_header_get(fp);
320
321 switch (fh->fh_r_ctl) {
322 case FC_RCTL_DD_SOL_DATA: /* write data */
323 ft_recv_write_data(cmd, fp);
324 break;
325 case FC_RCTL_DD_UNSOL_CTL: /* command */
326 case FC_RCTL_DD_SOL_CTL: /* transfer ready */
327 case FC_RCTL_DD_DATA_DESC: /* transfer ready */
328 default:
329 printk(KERN_INFO "%s: unhandled frame r_ctl %x\n",
330 __func__, fh->fh_r_ctl);
331 fc_frame_free(fp);
332 transport_generic_free_cmd(&cmd->se_cmd, 0, 1, 0);
333 break;
334 }
335}
336
337/*
338 * Send a FCP response including SCSI status and optional FCP rsp_code.
339 * status is SAM_STAT_GOOD (zero) iff code is valid.
340 * This is used in error cases, such as allocation failures.
341 */
342static void ft_send_resp_status(struct fc_lport *lport,
343 const struct fc_frame *rx_fp,
344 u32 status, enum fcp_resp_rsp_codes code)
345{
346 struct fc_frame *fp;
347 struct fc_seq *sp;
348 const struct fc_frame_header *fh;
349 size_t len;
350 struct fcp_resp_with_ext *fcp;
351 struct fcp_resp_rsp_info *info;
352
353 fh = fc_frame_header_get(rx_fp);
354 FT_IO_DBG("FCP error response: did %x oxid %x status %x code %x\n",
355 ntoh24(fh->fh_s_id), ntohs(fh->fh_ox_id), status, code);
356 len = sizeof(*fcp);
357 if (status == SAM_STAT_GOOD)
358 len += sizeof(*info);
359 fp = fc_frame_alloc(lport, len);
360 if (!fp)
361 return;
362 fcp = fc_frame_payload_get(fp, len);
363 memset(fcp, 0, len);
364 fcp->resp.fr_status = status;
365 if (status == SAM_STAT_GOOD) {
366 fcp->ext.fr_rsp_len = htonl(sizeof(*info));
367 fcp->resp.fr_flags |= FCP_RSP_LEN_VAL;
368 info = (struct fcp_resp_rsp_info *)(fcp + 1);
369 info->rsp_code = code;
370 }
371
372 fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_DD_CMD_STATUS, 0);
373 sp = fr_seq(fp);
374 if (sp)
375 lport->tt.seq_send(lport, sp, fp);
376 else
377 lport->tt.frame_send(lport, fp);
378}
379
380/*
381 * Send error or task management response.
382 * Always frees the cmd and associated state.
383 */
384static void ft_send_resp_code(struct ft_cmd *cmd, enum fcp_resp_rsp_codes code)
385{
386 ft_send_resp_status(cmd->sess->tport->lport,
387 cmd->req_frame, SAM_STAT_GOOD, code);
388 ft_free_cmd(cmd);
389}
390
391/*
392 * Handle Task Management Request.
393 */
394static void ft_send_tm(struct ft_cmd *cmd)
395{
396 struct se_tmr_req *tmr;
397 struct fcp_cmnd *fcp;
398 struct ft_sess *sess;
399 u8 tm_func;
400
401 fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
402
403 switch (fcp->fc_tm_flags) {
404 case FCP_TMF_LUN_RESET:
405 tm_func = TMR_LUN_RESET;
406 break;
407 case FCP_TMF_TGT_RESET:
408 tm_func = TMR_TARGET_WARM_RESET;
409 break;
410 case FCP_TMF_CLR_TASK_SET:
411 tm_func = TMR_CLEAR_TASK_SET;
412 break;
413 case FCP_TMF_ABT_TASK_SET:
414 tm_func = TMR_ABORT_TASK_SET;
415 break;
416 case FCP_TMF_CLR_ACA:
417 tm_func = TMR_CLEAR_ACA;
418 break;
419 default:
420 /*
421 * FCP4r01 indicates having a combination of
422 * tm_flags set is invalid.
423 */
424 FT_TM_DBG("invalid FCP tm_flags %x\n", fcp->fc_tm_flags);
425 ft_send_resp_code(cmd, FCP_CMND_FIELDS_INVALID);
426 return;
427 }
428
429 FT_TM_DBG("alloc tm cmd fn %d\n", tm_func);
430 tmr = core_tmr_alloc_req(&cmd->se_cmd, cmd, tm_func);
431 if (!tmr) {
432 FT_TM_DBG("alloc failed\n");
433 ft_send_resp_code(cmd, FCP_TMF_FAILED);
434 return;
435 }
436 cmd->se_cmd.se_tmr_req = tmr;
437
438 switch (fcp->fc_tm_flags) {
439 case FCP_TMF_LUN_RESET:
440 cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun);
441 if (transport_get_lun_for_tmr(&cmd->se_cmd, cmd->lun) < 0) {
442 /*
443 * Make sure to clean up newly allocated TMR request
444 * since "unable to handle TMR request because failed
445 * to get to LUN"
446 */
447 FT_TM_DBG("Failed to get LUN for TMR func %d, "
448 "se_cmd %p, unpacked_lun %d\n",
449 tm_func, &cmd->se_cmd, cmd->lun);
450 ft_dump_cmd(cmd, __func__);
451 sess = cmd->sess;
452 transport_send_check_condition_and_sense(&cmd->se_cmd,
453 cmd->se_cmd.scsi_sense_reason, 0);
454 transport_generic_free_cmd(&cmd->se_cmd, 0, 1, 0);
455 ft_sess_put(sess);
456 return;
457 }
458 break;
459 case FCP_TMF_TGT_RESET:
460 case FCP_TMF_CLR_TASK_SET:
461 case FCP_TMF_ABT_TASK_SET:
462 case FCP_TMF_CLR_ACA:
463 break;
464 default:
465 return;
466 }
467 transport_generic_handle_tmr(&cmd->se_cmd);
468}
469
470/*
471 * Send status from completed task management request.
472 */
473int ft_queue_tm_resp(struct se_cmd *se_cmd)
474{
475 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
476 struct se_tmr_req *tmr = se_cmd->se_tmr_req;
477 enum fcp_resp_rsp_codes code;
478
479 switch (tmr->response) {
480 case TMR_FUNCTION_COMPLETE:
481 code = FCP_TMF_CMPL;
482 break;
483 case TMR_LUN_DOES_NOT_EXIST:
484 code = FCP_TMF_INVALID_LUN;
485 break;
486 case TMR_FUNCTION_REJECTED:
487 code = FCP_TMF_REJECTED;
488 break;
489 case TMR_TASK_DOES_NOT_EXIST:
490 case TMR_TASK_STILL_ALLEGIANT:
491 case TMR_TASK_FAILOVER_NOT_SUPPORTED:
492 case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
493 case TMR_FUNCTION_AUTHORIZATION_FAILED:
494 default:
495 code = FCP_TMF_FAILED;
496 break;
497 }
498 FT_TM_DBG("tmr fn %d resp %d fcp code %d\n",
499 tmr->function, tmr->response, code);
500 ft_send_resp_code(cmd, code);
501 return 0;
502}
503
504/*
505 * Handle incoming FCP command.
506 */
507static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
508{
509 struct ft_cmd *cmd;
510 struct fc_lport *lport = sess->tport->lport;
511
512 cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
513 if (!cmd)
514 goto busy;
515 cmd->sess = sess;
516 cmd->seq = lport->tt.seq_assign(lport, fp);
517 if (!cmd->seq) {
518 kfree(cmd);
519 goto busy;
520 }
521 cmd->req_frame = fp; /* hold frame during cmd */
522 ft_queue_cmd(sess, cmd);
523 return;
524
525busy:
526 FT_IO_DBG("cmd or seq allocation failure - sending BUSY\n");
527 ft_send_resp_status(lport, fp, SAM_STAT_BUSY, 0);
528 fc_frame_free(fp);
529 ft_sess_put(sess); /* undo get from lookup */
530}
531
532
533/*
534 * Handle incoming FCP frame.
535 * Caller has verified that the frame is type FCP.
536 */
537void ft_recv_req(struct ft_sess *sess, struct fc_frame *fp)
538{
539 struct fc_frame_header *fh = fc_frame_header_get(fp);
540
541 switch (fh->fh_r_ctl) {
542 case FC_RCTL_DD_UNSOL_CMD: /* command */
543 ft_recv_cmd(sess, fp);
544 break;
545 case FC_RCTL_DD_SOL_DATA: /* write data */
546 case FC_RCTL_DD_UNSOL_CTL:
547 case FC_RCTL_DD_SOL_CTL:
548 case FC_RCTL_DD_DATA_DESC: /* transfer ready */
549 case FC_RCTL_ELS4_REQ: /* SRR, perhaps */
550 default:
551 printk(KERN_INFO "%s: unhandled frame r_ctl %x\n",
552 __func__, fh->fh_r_ctl);
553 fc_frame_free(fp);
554 ft_sess_put(sess); /* undo get from lookup */
555 break;
556 }
557}
558
559/*
560 * Send new command to target.
561 */
562static void ft_send_cmd(struct ft_cmd *cmd)
563{
564 struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame);
565 struct se_cmd *se_cmd;
566 struct fcp_cmnd *fcp;
567 int data_dir;
568 u32 data_len;
569 int task_attr;
570 int ret;
571
572 fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
573 if (!fcp)
574 goto err;
575
576 if (fcp->fc_flags & FCP_CFL_LEN_MASK)
577 goto err; /* not handling longer CDBs yet */
578
579 if (fcp->fc_tm_flags) {
580 task_attr = FCP_PTA_SIMPLE;
581 data_dir = DMA_NONE;
582 data_len = 0;
583 } else {
584 switch (fcp->fc_flags & (FCP_CFL_RDDATA | FCP_CFL_WRDATA)) {
585 case 0:
586 data_dir = DMA_NONE;
587 break;
588 case FCP_CFL_RDDATA:
589 data_dir = DMA_FROM_DEVICE;
590 break;
591 case FCP_CFL_WRDATA:
592 data_dir = DMA_TO_DEVICE;
593 break;
594 case FCP_CFL_WRDATA | FCP_CFL_RDDATA:
595 goto err; /* TBD not supported by tcm_fc yet */
596 }
597 /*
598 * Locate the SAM Task Attr from fc_pri_ta
599 */
600 switch (fcp->fc_pri_ta & FCP_PTA_MASK) {
601 case FCP_PTA_HEADQ:
602 task_attr = MSG_HEAD_TAG;
603 break;
604 case FCP_PTA_ORDERED:
605 task_attr = MSG_ORDERED_TAG;
606 break;
607 case FCP_PTA_ACA:
608 task_attr = MSG_ACA_TAG;
609 break;
610 case FCP_PTA_SIMPLE: /* Fallthrough */
611 default:
612 task_attr = MSG_SIMPLE_TAG;
613 }
614
615
616 task_attr = fcp->fc_pri_ta & FCP_PTA_MASK;
617 data_len = ntohl(fcp->fc_dl);
618 cmd->cdb = fcp->fc_cdb;
619 }
620
621 se_cmd = &cmd->se_cmd;
622 /*
623 * Initialize struct se_cmd descriptor from target_core_mod
624 * infrastructure
625 */
626 transport_init_se_cmd(se_cmd, &ft_configfs->tf_ops, cmd->sess->se_sess,
627 data_len, data_dir, task_attr,
628 &cmd->ft_sense_buffer[0]);
629 /*
630 * Check for FCP task management flags
631 */
632 if (fcp->fc_tm_flags) {
633 ft_send_tm(cmd);
634 return;
635 }
636
637 fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
638
639 cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun);
640 ret = transport_get_lun_for_cmd(&cmd->se_cmd, NULL, cmd->lun);
641 if (ret < 0) {
642 ft_dump_cmd(cmd, __func__);
643 transport_send_check_condition_and_sense(&cmd->se_cmd,
644 cmd->se_cmd.scsi_sense_reason, 0);
645 return;
646 }
647
648 ret = transport_generic_allocate_tasks(se_cmd, cmd->cdb);
649
650 FT_IO_DBG("r_ctl %x alloc task ret %d\n", fh->fh_r_ctl, ret);
651 ft_dump_cmd(cmd, __func__);
652
653 if (ret == -1) {
654 transport_send_check_condition_and_sense(se_cmd,
655 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
656 transport_generic_free_cmd(se_cmd, 0, 1, 0);
657 return;
658 }
659 if (ret == -2) {
660 if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
661 ft_queue_status(se_cmd);
662 else
663 transport_send_check_condition_and_sense(se_cmd,
664 se_cmd->scsi_sense_reason, 0);
665 transport_generic_free_cmd(se_cmd, 0, 1, 0);
666 return;
667 }
668 transport_generic_handle_cdb(se_cmd);
669 return;
670
671err:
672 ft_send_resp_code(cmd, FCP_CMND_FIELDS_INVALID);
673 return;
674}
675
676/*
677 * Handle request in the command thread.
678 */
679static void ft_exec_req(struct ft_cmd *cmd)
680{
681 FT_IO_DBG("cmd state %x\n", cmd->state);
682 switch (cmd->state) {
683 case FC_CMD_ST_NEW:
684 ft_send_cmd(cmd);
685 break;
686 default:
687 break;
688 }
689}
690
691/*
692 * Processing thread.
693 * Currently one thread per tpg.
694 */
695int ft_thread(void *arg)
696{
697 struct ft_tpg *tpg = arg;
698 struct se_queue_obj *qobj = &tpg->qobj;
699 struct ft_cmd *cmd;
700 int ret;
701
702 set_user_nice(current, -20);
703
704 while (!kthread_should_stop()) {
705 ret = wait_event_interruptible(qobj->thread_wq,
706 atomic_read(&qobj->queue_cnt) || kthread_should_stop());
707 if (ret < 0 || kthread_should_stop())
708 goto out;
709 cmd = ft_dequeue_cmd(qobj);
710 if (cmd)
711 ft_exec_req(cmd);
712 }
713
714out:
715 return 0;
716}
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
new file mode 100644
index 000000000000..84e868c255dd
--- /dev/null
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -0,0 +1,669 @@
1/*******************************************************************************
2 * Filename: tcm_fc.c
3 *
4 * This file contains the configfs implementation for TCM_fc fabric node.
5 * Based on tcm_loop_configfs.c
6 *
7 * Copyright (c) 2010 Cisco Systems, Inc.
8 * Copyright (c) 2009,2010 Rising Tide, Inc.
9 * Copyright (c) 2009,2010 Linux-iSCSI.org
10 *
11 * Copyright (c) 2009,2010 Nicholas A. Bellinger <nab@linux-iscsi.org>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 ****************************************************************************/
23
24#include <linux/module.h>
25#include <linux/moduleparam.h>
26#include <linux/version.h>
27#include <generated/utsrelease.h>
28#include <linux/utsname.h>
29#include <linux/init.h>
30#include <linux/slab.h>
31#include <linux/kthread.h>
32#include <linux/types.h>
33#include <linux/string.h>
34#include <linux/configfs.h>
35#include <linux/ctype.h>
36#include <asm/unaligned.h>
37#include <scsi/scsi.h>
38#include <scsi/scsi_host.h>
39#include <scsi/scsi_device.h>
40#include <scsi/scsi_cmnd.h>
41#include <scsi/libfc.h>
42
43#include <target/target_core_base.h>
44#include <target/target_core_transport.h>
45#include <target/target_core_fabric_ops.h>
46#include <target/target_core_fabric_configfs.h>
47#include <target/target_core_fabric_lib.h>
48#include <target/target_core_device.h>
49#include <target/target_core_tpg.h>
50#include <target/target_core_configfs.h>
51#include <target/target_core_base.h>
52#include <target/configfs_macros.h>
53
54#include "tcm_fc.h"
55
56struct target_fabric_configfs *ft_configfs;
57
58LIST_HEAD(ft_lport_list);
59DEFINE_MUTEX(ft_lport_lock);
60
61unsigned int ft_debug_logging;
62module_param_named(debug_logging, ft_debug_logging, int, S_IRUGO|S_IWUSR);
63MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
64
65/*
66 * Parse WWN.
67 * If strict, we require lower-case hex and colon separators to be sure
68 * the name is the same as what would be generated by ft_format_wwn()
69 * so the name and wwn are mapped one-to-one.
70 */
71static ssize_t ft_parse_wwn(const char *name, u64 *wwn, int strict)
72{
73 const char *cp;
74 char c;
75 u32 nibble;
76 u32 byte = 0;
77 u32 pos = 0;
78 u32 err;
79
80 *wwn = 0;
81 for (cp = name; cp < &name[FT_NAMELEN - 1]; cp++) {
82 c = *cp;
83 if (c == '\n' && cp[1] == '\0')
84 continue;
85 if (strict && pos++ == 2 && byte++ < 7) {
86 pos = 0;
87 if (c == ':')
88 continue;
89 err = 1;
90 goto fail;
91 }
92 if (c == '\0') {
93 err = 2;
94 if (strict && byte != 8)
95 goto fail;
96 return cp - name;
97 }
98 err = 3;
99 if (isdigit(c))
100 nibble = c - '0';
101 else if (isxdigit(c) && (islower(c) || !strict))
102 nibble = tolower(c) - 'a' + 10;
103 else
104 goto fail;
105 *wwn = (*wwn << 4) | nibble;
106 }
107 err = 4;
108fail:
109 FT_CONF_DBG("err %u len %zu pos %u byte %u\n",
110 err, cp - name, pos, byte);
111 return -1;
112}
113
114ssize_t ft_format_wwn(char *buf, size_t len, u64 wwn)
115{
116 u8 b[8];
117
118 put_unaligned_be64(wwn, b);
119 return snprintf(buf, len,
120 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
121 b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
122}
123
124static ssize_t ft_wwn_show(void *arg, char *buf)
125{
126 u64 *wwn = arg;
127 ssize_t len;
128
129 len = ft_format_wwn(buf, PAGE_SIZE - 2, *wwn);
130 buf[len++] = '\n';
131 return len;
132}
133
134static ssize_t ft_wwn_store(void *arg, const char *buf, size_t len)
135{
136 ssize_t ret;
137 u64 wwn;
138
139 ret = ft_parse_wwn(buf, &wwn, 0);
140 if (ret > 0)
141 *(u64 *)arg = wwn;
142 return ret;
143}
144
145/*
146 * ACL auth ops.
147 */
148
149static ssize_t ft_nacl_show_port_name(
150 struct se_node_acl *se_nacl,
151 char *page)
152{
153 struct ft_node_acl *acl = container_of(se_nacl,
154 struct ft_node_acl, se_node_acl);
155
156 return ft_wwn_show(&acl->node_auth.port_name, page);
157}
158
159static ssize_t ft_nacl_store_port_name(
160 struct se_node_acl *se_nacl,
161 const char *page,
162 size_t count)
163{
164 struct ft_node_acl *acl = container_of(se_nacl,
165 struct ft_node_acl, se_node_acl);
166
167 return ft_wwn_store(&acl->node_auth.port_name, page, count);
168}
169
170TF_NACL_BASE_ATTR(ft, port_name, S_IRUGO | S_IWUSR);
171
172static ssize_t ft_nacl_show_node_name(
173 struct se_node_acl *se_nacl,
174 char *page)
175{
176 struct ft_node_acl *acl = container_of(se_nacl,
177 struct ft_node_acl, se_node_acl);
178
179 return ft_wwn_show(&acl->node_auth.node_name, page);
180}
181
182static ssize_t ft_nacl_store_node_name(
183 struct se_node_acl *se_nacl,
184 const char *page,
185 size_t count)
186{
187 struct ft_node_acl *acl = container_of(se_nacl,
188 struct ft_node_acl, se_node_acl);
189
190 return ft_wwn_store(&acl->node_auth.node_name, page, count);
191}
192
193TF_NACL_BASE_ATTR(ft, node_name, S_IRUGO | S_IWUSR);
194
195static struct configfs_attribute *ft_nacl_base_attrs[] = {
196 &ft_nacl_port_name.attr,
197 &ft_nacl_node_name.attr,
198 NULL,
199};
200
201/*
202 * ACL ops.
203 */
204
205/*
206 * Add ACL for an initiator. The ACL is named arbitrarily.
207 * The port_name and/or node_name are attributes.
208 */
209static struct se_node_acl *ft_add_acl(
210 struct se_portal_group *se_tpg,
211 struct config_group *group,
212 const char *name)
213{
214 struct ft_node_acl *acl;
215 struct ft_tpg *tpg;
216 u64 wwpn;
217 u32 q_depth;
218
219 FT_CONF_DBG("add acl %s\n", name);
220 tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
221
222 if (ft_parse_wwn(name, &wwpn, 1) < 0)
223 return ERR_PTR(-EINVAL);
224
225 acl = kzalloc(sizeof(struct ft_node_acl), GFP_KERNEL);
226 if (!(acl))
227 return ERR_PTR(-ENOMEM);
228 acl->node_auth.port_name = wwpn;
229
230 q_depth = 32; /* XXX bogus default - get from tpg? */
231 return core_tpg_add_initiator_node_acl(&tpg->se_tpg,
232 &acl->se_node_acl, name, q_depth);
233}
234
235static void ft_del_acl(struct se_node_acl *se_acl)
236{
237 struct se_portal_group *se_tpg = se_acl->se_tpg;
238 struct ft_tpg *tpg;
239 struct ft_node_acl *acl = container_of(se_acl,
240 struct ft_node_acl, se_node_acl);
241
242 FT_CONF_DBG("del acl %s\n",
243 config_item_name(&se_acl->acl_group.cg_item));
244
245 tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
246 FT_CONF_DBG("del acl %p se_acl %p tpg %p se_tpg %p\n",
247 acl, se_acl, tpg, &tpg->se_tpg);
248
249 core_tpg_del_initiator_node_acl(&tpg->se_tpg, se_acl, 1);
250 kfree(acl);
251}
252
253struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)
254{
255 struct ft_node_acl *found = NULL;
256 struct ft_node_acl *acl;
257 struct se_portal_group *se_tpg = &tpg->se_tpg;
258 struct se_node_acl *se_acl;
259
260 spin_lock_bh(&se_tpg->acl_node_lock);
261 list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) {
262 acl = container_of(se_acl, struct ft_node_acl, se_node_acl);
263 FT_CONF_DBG("acl %p port_name %llx\n",
264 acl, (unsigned long long)acl->node_auth.port_name);
265 if (acl->node_auth.port_name == rdata->ids.port_name ||
266 acl->node_auth.node_name == rdata->ids.node_name) {
267 FT_CONF_DBG("acl %p port_name %llx matched\n", acl,
268 (unsigned long long)rdata->ids.port_name);
269 found = acl;
270 /* XXX need to hold onto ACL */
271 break;
272 }
273 }
274 spin_unlock_bh(&se_tpg->acl_node_lock);
275 return found;
276}
277
278struct se_node_acl *ft_tpg_alloc_fabric_acl(struct se_portal_group *se_tpg)
279{
280 struct ft_node_acl *acl;
281
282 acl = kzalloc(sizeof(*acl), GFP_KERNEL);
283 if (!(acl)) {
284 printk(KERN_ERR "Unable to allocate struct ft_node_acl\n");
285 return NULL;
286 }
287 FT_CONF_DBG("acl %p\n", acl);
288 return &acl->se_node_acl;
289}
290
291static void ft_tpg_release_fabric_acl(struct se_portal_group *se_tpg,
292 struct se_node_acl *se_acl)
293{
294 struct ft_node_acl *acl = container_of(se_acl,
295 struct ft_node_acl, se_node_acl);
296
297 FT_CONF_DBG(KERN_INFO "acl %p\n", acl);
298 kfree(acl);
299}
300
301/*
302 * local_port port_group (tpg) ops.
303 */
304static struct se_portal_group *ft_add_tpg(
305 struct se_wwn *wwn,
306 struct config_group *group,
307 const char *name)
308{
309 struct ft_lport_acl *lacl;
310 struct ft_tpg *tpg;
311 unsigned long index;
312 int ret;
313
314 FT_CONF_DBG("tcm_fc: add tpg %s\n", name);
315
316 /*
317 * Name must be "tpgt_" followed by the index.
318 */
319 if (strstr(name, "tpgt_") != name)
320 return NULL;
321 if (strict_strtoul(name + 5, 10, &index) || index > UINT_MAX)
322 return NULL;
323
324 lacl = container_of(wwn, struct ft_lport_acl, fc_lport_wwn);
325 tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
326 if (!tpg)
327 return NULL;
328 tpg->index = index;
329 tpg->lport_acl = lacl;
330 INIT_LIST_HEAD(&tpg->lun_list);
331 transport_init_queue_obj(&tpg->qobj);
332
333 ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg,
334 (void *)tpg, TRANSPORT_TPG_TYPE_NORMAL);
335 if (ret < 0) {
336 kfree(tpg);
337 return NULL;
338 }
339
340 tpg->thread = kthread_run(ft_thread, tpg, "ft_tpg%lu", index);
341 if (IS_ERR(tpg->thread)) {
342 kfree(tpg);
343 return NULL;
344 }
345
346 mutex_lock(&ft_lport_lock);
347 list_add_tail(&tpg->list, &lacl->tpg_list);
348 mutex_unlock(&ft_lport_lock);
349
350 return &tpg->se_tpg;
351}
352
353static void ft_del_tpg(struct se_portal_group *se_tpg)
354{
355 struct ft_tpg *tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
356
357 FT_CONF_DBG("del tpg %s\n",
358 config_item_name(&tpg->se_tpg.tpg_group.cg_item));
359
360 kthread_stop(tpg->thread);
361
362 /* Wait for sessions to be freed thru RCU, for BUG_ON below */
363 synchronize_rcu();
364
365 mutex_lock(&ft_lport_lock);
366 list_del(&tpg->list);
367 if (tpg->tport) {
368 tpg->tport->tpg = NULL;
369 tpg->tport = NULL;
370 }
371 mutex_unlock(&ft_lport_lock);
372
373 core_tpg_deregister(se_tpg);
374 kfree(tpg);
375}
376
377/*
378 * Verify that an lport is configured to use the tcm_fc module, and return
379 * the target port group that should be used.
380 *
381 * The caller holds ft_lport_lock.
382 */
383struct ft_tpg *ft_lport_find_tpg(struct fc_lport *lport)
384{
385 struct ft_lport_acl *lacl;
386 struct ft_tpg *tpg;
387
388 list_for_each_entry(lacl, &ft_lport_list, list) {
389 if (lacl->wwpn == lport->wwpn) {
390 list_for_each_entry(tpg, &lacl->tpg_list, list)
391 return tpg; /* XXX for now return first entry */
392 return NULL;
393 }
394 }
395 return NULL;
396}
397
398/*
399 * target config instance ops.
400 */
401
402/*
403 * Add lport to allowed config.
404 * The name is the WWPN in lower-case ASCII, colon-separated bytes.
405 */
406static struct se_wwn *ft_add_lport(
407 struct target_fabric_configfs *tf,
408 struct config_group *group,
409 const char *name)
410{
411 struct ft_lport_acl *lacl;
412 struct ft_lport_acl *old_lacl;
413 u64 wwpn;
414
415 FT_CONF_DBG("add lport %s\n", name);
416 if (ft_parse_wwn(name, &wwpn, 1) < 0)
417 return NULL;
418 lacl = kzalloc(sizeof(*lacl), GFP_KERNEL);
419 if (!lacl)
420 return NULL;
421 lacl->wwpn = wwpn;
422 INIT_LIST_HEAD(&lacl->tpg_list);
423
424 mutex_lock(&ft_lport_lock);
425 list_for_each_entry(old_lacl, &ft_lport_list, list) {
426 if (old_lacl->wwpn == wwpn) {
427 mutex_unlock(&ft_lport_lock);
428 kfree(lacl);
429 return NULL;
430 }
431 }
432 list_add_tail(&lacl->list, &ft_lport_list);
433 ft_format_wwn(lacl->name, sizeof(lacl->name), wwpn);
434 mutex_unlock(&ft_lport_lock);
435
436 return &lacl->fc_lport_wwn;
437}
438
439static void ft_del_lport(struct se_wwn *wwn)
440{
441 struct ft_lport_acl *lacl = container_of(wwn,
442 struct ft_lport_acl, fc_lport_wwn);
443
444 FT_CONF_DBG("del lport %s\n",
445 config_item_name(&wwn->wwn_group.cg_item));
446 mutex_lock(&ft_lport_lock);
447 list_del(&lacl->list);
448 mutex_unlock(&ft_lport_lock);
449
450 kfree(lacl);
451}
452
453static ssize_t ft_wwn_show_attr_version(
454 struct target_fabric_configfs *tf,
455 char *page)
456{
457 return sprintf(page, "TCM FC " FT_VERSION " on %s/%s on "
458 ""UTS_RELEASE"\n", utsname()->sysname, utsname()->machine);
459}
460
461TF_WWN_ATTR_RO(ft, version);
462
463static struct configfs_attribute *ft_wwn_attrs[] = {
464 &ft_wwn_version.attr,
465 NULL,
466};
467
468static char *ft_get_fabric_name(void)
469{
470 return "fc";
471}
472
473static char *ft_get_fabric_wwn(struct se_portal_group *se_tpg)
474{
475 struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr;
476
477 return tpg->lport_acl->name;
478}
479
480static u16 ft_get_tag(struct se_portal_group *se_tpg)
481{
482 struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr;
483
484 /*
485 * This tag is used when forming SCSI Name identifier in EVPD=1 0x83
486 * to represent the SCSI Target Port.
487 */
488 return tpg->index;
489}
490
491static u32 ft_get_default_depth(struct se_portal_group *se_tpg)
492{
493 return 1;
494}
495
496static int ft_check_false(struct se_portal_group *se_tpg)
497{
498 return 0;
499}
500
501static void ft_set_default_node_attr(struct se_node_acl *se_nacl)
502{
503}
504
505static u16 ft_get_fabric_sense_len(void)
506{
507 return 0;
508}
509
510static u16 ft_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_len)
511{
512 return 0;
513}
514
515static u32 ft_tpg_get_inst_index(struct se_portal_group *se_tpg)
516{
517 struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr;
518
519 return tpg->index;
520}
521
522static struct target_core_fabric_ops ft_fabric_ops = {
523 .get_fabric_name = ft_get_fabric_name,
524 .get_fabric_proto_ident = fc_get_fabric_proto_ident,
525 .tpg_get_wwn = ft_get_fabric_wwn,
526 .tpg_get_tag = ft_get_tag,
527 .tpg_get_default_depth = ft_get_default_depth,
528 .tpg_get_pr_transport_id = fc_get_pr_transport_id,
529 .tpg_get_pr_transport_id_len = fc_get_pr_transport_id_len,
530 .tpg_parse_pr_out_transport_id = fc_parse_pr_out_transport_id,
531 .tpg_check_demo_mode = ft_check_false,
532 .tpg_check_demo_mode_cache = ft_check_false,
533 .tpg_check_demo_mode_write_protect = ft_check_false,
534 .tpg_check_prod_mode_write_protect = ft_check_false,
535 .tpg_alloc_fabric_acl = ft_tpg_alloc_fabric_acl,
536 .tpg_release_fabric_acl = ft_tpg_release_fabric_acl,
537 .tpg_get_inst_index = ft_tpg_get_inst_index,
538 .check_stop_free = ft_check_stop_free,
539 .release_cmd_to_pool = ft_release_cmd,
540 .release_cmd_direct = ft_release_cmd,
541 .shutdown_session = ft_sess_shutdown,
542 .close_session = ft_sess_close,
543 .stop_session = ft_sess_stop,
544 .fall_back_to_erl0 = ft_sess_set_erl0,
545 .sess_logged_in = ft_sess_logged_in,
546 .sess_get_index = ft_sess_get_index,
547 .sess_get_initiator_sid = NULL,
548 .write_pending = ft_write_pending,
549 .write_pending_status = ft_write_pending_status,
550 .set_default_node_attributes = ft_set_default_node_attr,
551 .get_task_tag = ft_get_task_tag,
552 .get_cmd_state = ft_get_cmd_state,
553 .new_cmd_failure = ft_new_cmd_failure,
554 .queue_data_in = ft_queue_data_in,
555 .queue_status = ft_queue_status,
556 .queue_tm_rsp = ft_queue_tm_resp,
557 .get_fabric_sense_len = ft_get_fabric_sense_len,
558 .set_fabric_sense_len = ft_set_fabric_sense_len,
559 .is_state_remove = ft_is_state_remove,
560 /*
561 * Setup function pointers for generic logic in
562 * target_core_fabric_configfs.c
563 */
564 .fabric_make_wwn = &ft_add_lport,
565 .fabric_drop_wwn = &ft_del_lport,
566 .fabric_make_tpg = &ft_add_tpg,
567 .fabric_drop_tpg = &ft_del_tpg,
568 .fabric_post_link = NULL,
569 .fabric_pre_unlink = NULL,
570 .fabric_make_np = NULL,
571 .fabric_drop_np = NULL,
572 .fabric_make_nodeacl = &ft_add_acl,
573 .fabric_drop_nodeacl = &ft_del_acl,
574};
575
576int ft_register_configfs(void)
577{
578 struct target_fabric_configfs *fabric;
579 int ret;
580
581 /*
582 * Register the top level struct config_item_type with TCM core
583 */
584 fabric = target_fabric_configfs_init(THIS_MODULE, "fc");
585 if (!fabric) {
586 printk(KERN_INFO "%s: target_fabric_configfs_init() failed!\n",
587 __func__);
588 return -1;
589 }
590 fabric->tf_ops = ft_fabric_ops;
591
592 /* Allowing support for task_sg_chaining */
593 fabric->tf_ops.task_sg_chaining = 1;
594
595 /*
596 * Setup default attribute lists for various fabric->tf_cit_tmpl
597 */
598 TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = ft_wwn_attrs;
599 TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;
600 TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
601 TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
602 TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
603 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs =
604 ft_nacl_base_attrs;
605 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
606 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
607 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
608 /*
609 * register the fabric for use within TCM
610 */
611 ret = target_fabric_configfs_register(fabric);
612 if (ret < 0) {
613 FT_CONF_DBG("target_fabric_configfs_register() for"
614 " FC Target failed!\n");
615 printk(KERN_INFO
616 "%s: target_fabric_configfs_register() failed!\n",
617 __func__);
618 target_fabric_configfs_free(fabric);
619 return -1;
620 }
621
622 /*
623 * Setup our local pointer to *fabric.
624 */
625 ft_configfs = fabric;
626 return 0;
627}
628
629void ft_deregister_configfs(void)
630{
631 if (!ft_configfs)
632 return;
633 target_fabric_configfs_deregister(ft_configfs);
634 ft_configfs = NULL;
635}
636
637static struct notifier_block ft_notifier = {
638 .notifier_call = ft_lport_notify
639};
640
641static int __init ft_init(void)
642{
643 if (ft_register_configfs())
644 return -1;
645 if (fc_fc4_register_provider(FC_TYPE_FCP, &ft_prov)) {
646 ft_deregister_configfs();
647 return -1;
648 }
649 blocking_notifier_chain_register(&fc_lport_notifier_head, &ft_notifier);
650 fc_lport_iterate(ft_lport_add, NULL);
651 return 0;
652}
653
654static void __exit ft_exit(void)
655{
656 blocking_notifier_chain_unregister(&fc_lport_notifier_head,
657 &ft_notifier);
658 fc_fc4_deregister_provider(FC_TYPE_FCP, &ft_prov);
659 fc_lport_iterate(ft_lport_del, NULL);
660 ft_deregister_configfs();
661 synchronize_rcu();
662}
663
664#ifdef MODULE
665MODULE_DESCRIPTION("FC TCM fabric driver " FT_VERSION);
666MODULE_LICENSE("GPL");
667module_init(ft_init);
668module_exit(ft_exit);
669#endif /* MODULE */
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
new file mode 100644
index 000000000000..8c4a24077d9d
--- /dev/null
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -0,0 +1,374 @@
1/*
2 * Copyright (c) 2010 Cisco Systems, Inc.
3 *
4 * Portions based on tcm_loop_fabric_scsi.c and libfc/fc_fcp.c
5 *
6 * Copyright (c) 2007 Intel Corporation. All rights reserved.
7 * Copyright (c) 2008 Red Hat, Inc. All rights reserved.
8 * Copyright (c) 2008 Mike Christie
9 * Copyright (c) 2009 Rising Tide, Inc.
10 * Copyright (c) 2009 Linux-iSCSI.org
11 * Copyright (c) 2009 Nicholas A. Bellinger <nab@linux-iscsi.org>
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms and conditions of the GNU General Public License,
15 * version 2, as published by the Free Software Foundation.
16 *
17 * This program is distributed in the hope it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 * more details.
21 *
22 * You should have received a copy of the GNU General Public License along with
23 * this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
25 */
26
27/* XXX TBD some includes may be extraneous */
28
29#include <linux/module.h>
30#include <linux/moduleparam.h>
31#include <linux/version.h>
32#include <generated/utsrelease.h>
33#include <linux/utsname.h>
34#include <linux/init.h>
35#include <linux/slab.h>
36#include <linux/kthread.h>
37#include <linux/types.h>
38#include <linux/string.h>
39#include <linux/configfs.h>
40#include <linux/ctype.h>
41#include <linux/hash.h>
42#include <asm/unaligned.h>
43#include <scsi/scsi.h>
44#include <scsi/scsi_host.h>
45#include <scsi/scsi_device.h>
46#include <scsi/scsi_cmnd.h>
47#include <scsi/libfc.h>
48#include <scsi/fc_encode.h>
49
50#include <target/target_core_base.h>
51#include <target/target_core_transport.h>
52#include <target/target_core_fabric_ops.h>
53#include <target/target_core_device.h>
54#include <target/target_core_tpg.h>
55#include <target/target_core_configfs.h>
56#include <target/target_core_base.h>
57#include <target/configfs_macros.h>
58
59#include "tcm_fc.h"
60
61/*
62 * Deliver read data back to initiator.
63 * XXX TBD handle resource problems later.
64 */
65int ft_queue_data_in(struct se_cmd *se_cmd)
66{
67 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
68 struct se_transport_task *task;
69 struct fc_frame *fp = NULL;
70 struct fc_exch *ep;
71 struct fc_lport *lport;
72 struct se_mem *mem;
73 size_t remaining;
74 u32 f_ctl = FC_FC_EX_CTX | FC_FC_REL_OFF;
75 u32 mem_off;
76 u32 fh_off = 0;
77 u32 frame_off = 0;
78 size_t frame_len = 0;
79 size_t mem_len;
80 size_t tlen;
81 size_t off_in_page;
82 struct page *page;
83 int use_sg;
84 int error;
85 void *page_addr;
86 void *from;
87 void *to = NULL;
88
89 ep = fc_seq_exch(cmd->seq);
90 lport = ep->lp;
91 cmd->seq = lport->tt.seq_start_next(cmd->seq);
92
93 task = T_TASK(se_cmd);
94 BUG_ON(!task);
95 remaining = se_cmd->data_length;
96
97 /*
98 * Setup to use first mem list entry if any.
99 */
100 if (task->t_tasks_se_num) {
101 mem = list_first_entry(task->t_mem_list,
102 struct se_mem, se_list);
103 mem_len = mem->se_len;
104 mem_off = mem->se_off;
105 page = mem->se_page;
106 } else {
107 mem = NULL;
108 mem_len = remaining;
109 mem_off = 0;
110 page = NULL;
111 }
112
113 /* no scatter/gather in skb for odd word length due to fc_seq_send() */
114 use_sg = !(remaining % 4);
115
116 while (remaining) {
117 if (!mem_len) {
118 BUG_ON(!mem);
119 mem = list_entry(mem->se_list.next,
120 struct se_mem, se_list);
121 mem_len = min((size_t)mem->se_len, remaining);
122 mem_off = mem->se_off;
123 page = mem->se_page;
124 }
125 if (!frame_len) {
126 /*
127 * If lport's has capability of Large Send Offload LSO)
128 * , then allow 'frame_len' to be as big as 'lso_max'
129 * if indicated transfer length is >= lport->lso_max
130 */
131 frame_len = (lport->seq_offload) ? lport->lso_max :
132 cmd->sess->max_frame;
133 frame_len = min(frame_len, remaining);
134 fp = fc_frame_alloc(lport, use_sg ? 0 : frame_len);
135 if (!fp)
136 return -ENOMEM;
137 to = fc_frame_payload_get(fp, 0);
138 fh_off = frame_off;
139 frame_off += frame_len;
140 /*
141 * Setup the frame's max payload which is used by base
142 * driver to indicate HW about max frame size, so that
143 * HW can do fragmentation appropriately based on
144 * "gso_max_size" of underline netdev.
145 */
146 fr_max_payload(fp) = cmd->sess->max_frame;
147 }
148 tlen = min(mem_len, frame_len);
149
150 if (use_sg) {
151 if (!mem) {
152 BUG_ON(!task->t_task_buf);
153 page_addr = task->t_task_buf + mem_off;
154 /*
155 * In this case, offset is 'offset_in_page' of
156 * (t_task_buf + mem_off) instead of 'mem_off'.
157 */
158 off_in_page = offset_in_page(page_addr);
159 page = virt_to_page(page_addr);
160 tlen = min(tlen, PAGE_SIZE - off_in_page);
161 } else
162 off_in_page = mem_off;
163 BUG_ON(!page);
164 get_page(page);
165 skb_fill_page_desc(fp_skb(fp),
166 skb_shinfo(fp_skb(fp))->nr_frags,
167 page, off_in_page, tlen);
168 fr_len(fp) += tlen;
169 fp_skb(fp)->data_len += tlen;
170 fp_skb(fp)->truesize +=
171 PAGE_SIZE << compound_order(page);
172 } else if (mem) {
173 BUG_ON(!page);
174 from = kmap_atomic(page + (mem_off >> PAGE_SHIFT),
175 KM_SOFTIRQ0);
176 page_addr = from;
177 from += mem_off & ~PAGE_MASK;
178 tlen = min(tlen, (size_t)(PAGE_SIZE -
179 (mem_off & ~PAGE_MASK)));
180 memcpy(to, from, tlen);
181 kunmap_atomic(page_addr, KM_SOFTIRQ0);
182 to += tlen;
183 } else {
184 from = task->t_task_buf + mem_off;
185 memcpy(to, from, tlen);
186 to += tlen;
187 }
188
189 mem_off += tlen;
190 mem_len -= tlen;
191 frame_len -= tlen;
192 remaining -= tlen;
193
194 if (frame_len &&
195 (skb_shinfo(fp_skb(fp))->nr_frags < FC_FRAME_SG_LEN))
196 continue;
197 if (!remaining)
198 f_ctl |= FC_FC_END_SEQ;
199 fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
200 FC_TYPE_FCP, f_ctl, fh_off);
201 error = lport->tt.seq_send(lport, cmd->seq, fp);
202 if (error) {
203 /* XXX For now, initiator will retry */
204 if (printk_ratelimit())
205 printk(KERN_ERR "%s: Failed to send frame %p, "
206 "xid <0x%x>, remaining %zu, "
207 "lso_max <0x%x>\n",
208 __func__, fp, ep->xid,
209 remaining, lport->lso_max);
210 }
211 }
212 return ft_queue_status(se_cmd);
213}
214
215/*
216 * Receive write data frame.
217 */
218void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
219{
220 struct se_cmd *se_cmd = &cmd->se_cmd;
221 struct fc_seq *seq = cmd->seq;
222 struct fc_exch *ep;
223 struct fc_lport *lport;
224 struct se_transport_task *task;
225 struct fc_frame_header *fh;
226 struct se_mem *mem;
227 u32 mem_off;
228 u32 rel_off;
229 size_t frame_len;
230 size_t mem_len;
231 size_t tlen;
232 struct page *page;
233 void *page_addr;
234 void *from;
235 void *to;
236 u32 f_ctl;
237 void *buf;
238
239 task = T_TASK(se_cmd);
240 BUG_ON(!task);
241
242 fh = fc_frame_header_get(fp);
243 if (!(ntoh24(fh->fh_f_ctl) & FC_FC_REL_OFF))
244 goto drop;
245
246 /*
247 * Doesn't expect even single byte of payload. Payload
248 * is expected to be copied directly to user buffers
249 * due to DDP (Large Rx offload) feature, hence
250 * BUG_ON if BUF is non-NULL
251 */
252 buf = fc_frame_payload_get(fp, 1);
253 if (cmd->was_ddp_setup && buf) {
254 printk(KERN_INFO "%s: When DDP was setup, not expected to"
255 "receive frame with payload, Payload shall be"
256 "copied directly to buffer instead of coming "
257 "via. legacy receive queues\n", __func__);
258 BUG_ON(buf);
259 }
260
261 /*
262 * If ft_cmd indicated 'ddp_setup', in that case only the last frame
263 * should come with 'TSI bit being set'. If 'TSI bit is not set and if
264 * data frame appears here, means error condition. In both the cases
265 * release the DDP context (ddp_put) and in error case, as well
266 * initiate error recovery mechanism.
267 */
268 ep = fc_seq_exch(seq);
269 if (cmd->was_ddp_setup) {
270 BUG_ON(!ep);
271 lport = ep->lp;
272 BUG_ON(!lport);
273 }
274 if (cmd->was_ddp_setup && ep->xid != FC_XID_UNKNOWN) {
275 f_ctl = ntoh24(fh->fh_f_ctl);
276 /*
277 * If TSI bit set in f_ctl, means last write data frame is
278 * received successfully where payload is posted directly
279 * to user buffer and only the last frame's header is posted
280 * in legacy receive queue
281 */
282 if (f_ctl & FC_FC_SEQ_INIT) { /* TSI bit set in FC frame */
283 cmd->write_data_len = lport->tt.ddp_done(lport,
284 ep->xid);
285 goto last_frame;
286 } else {
287 /*
288 * Updating the write_data_len may be meaningless at
289 * this point, but just in case if required in future
290 * for debugging or any other purpose
291 */
292 printk(KERN_ERR "%s: Received frame with TSI bit not"
293 " being SET, dropping the frame, "
294 "cmd->sg <%p>, cmd->sg_cnt <0x%x>\n",
295 __func__, cmd->sg, cmd->sg_cnt);
296 cmd->write_data_len = lport->tt.ddp_done(lport,
297 ep->xid);
298 lport->tt.seq_exch_abort(cmd->seq, 0);
299 goto drop;
300 }
301 }
302
303 rel_off = ntohl(fh->fh_parm_offset);
304 frame_len = fr_len(fp);
305 if (frame_len <= sizeof(*fh))
306 goto drop;
307 frame_len -= sizeof(*fh);
308 from = fc_frame_payload_get(fp, 0);
309 if (rel_off >= se_cmd->data_length)
310 goto drop;
311 if (frame_len + rel_off > se_cmd->data_length)
312 frame_len = se_cmd->data_length - rel_off;
313
314 /*
315 * Setup to use first mem list entry if any.
316 */
317 if (task->t_tasks_se_num) {
318 mem = list_first_entry(task->t_mem_list,
319 struct se_mem, se_list);
320 mem_len = mem->se_len;
321 mem_off = mem->se_off;
322 page = mem->se_page;
323 } else {
324 mem = NULL;
325 page = NULL;
326 mem_off = 0;
327 mem_len = frame_len;
328 }
329
330 while (frame_len) {
331 if (!mem_len) {
332 BUG_ON(!mem);
333 mem = list_entry(mem->se_list.next,
334 struct se_mem, se_list);
335 mem_len = mem->se_len;
336 mem_off = mem->se_off;
337 page = mem->se_page;
338 }
339 if (rel_off >= mem_len) {
340 rel_off -= mem_len;
341 mem_len = 0;
342 continue;
343 }
344 mem_off += rel_off;
345 mem_len -= rel_off;
346 rel_off = 0;
347
348 tlen = min(mem_len, frame_len);
349
350 if (mem) {
351 to = kmap_atomic(page + (mem_off >> PAGE_SHIFT),
352 KM_SOFTIRQ0);
353 page_addr = to;
354 to += mem_off & ~PAGE_MASK;
355 tlen = min(tlen, (size_t)(PAGE_SIZE -
356 (mem_off & ~PAGE_MASK)));
357 memcpy(to, from, tlen);
358 kunmap_atomic(page_addr, KM_SOFTIRQ0);
359 } else {
360 to = task->t_task_buf + mem_off;
361 memcpy(to, from, tlen);
362 }
363 from += tlen;
364 frame_len -= tlen;
365 mem_off += tlen;
366 mem_len -= tlen;
367 cmd->write_data_len += tlen;
368 }
369last_frame:
370 if (cmd->write_data_len == se_cmd->data_length)
371 transport_generic_handle_data(se_cmd);
372drop:
373 fc_frame_free(fp);
374}
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
new file mode 100644
index 000000000000..7491e21cc6ae
--- /dev/null
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -0,0 +1,541 @@
1/*
2 * Copyright (c) 2010 Cisco Systems, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 */
17
18/* XXX TBD some includes may be extraneous */
19
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/version.h>
23#include <generated/utsrelease.h>
24#include <linux/utsname.h>
25#include <linux/init.h>
26#include <linux/slab.h>
27#include <linux/kthread.h>
28#include <linux/types.h>
29#include <linux/string.h>
30#include <linux/configfs.h>
31#include <linux/ctype.h>
32#include <linux/hash.h>
33#include <linux/rcupdate.h>
34#include <linux/rculist.h>
35#include <linux/kref.h>
36#include <asm/unaligned.h>
37#include <scsi/scsi.h>
38#include <scsi/scsi_host.h>
39#include <scsi/scsi_device.h>
40#include <scsi/scsi_cmnd.h>
41#include <scsi/libfc.h>
42
43#include <target/target_core_base.h>
44#include <target/target_core_transport.h>
45#include <target/target_core_fabric_ops.h>
46#include <target/target_core_device.h>
47#include <target/target_core_tpg.h>
48#include <target/target_core_configfs.h>
49#include <target/target_core_base.h>
50#include <target/configfs_macros.h>
51
52#include <scsi/libfc.h>
53#include "tcm_fc.h"
54
55static void ft_sess_delete_all(struct ft_tport *);
56
57/*
58 * Lookup or allocate target local port.
59 * Caller holds ft_lport_lock.
60 */
61static struct ft_tport *ft_tport_create(struct fc_lport *lport)
62{
63 struct ft_tpg *tpg;
64 struct ft_tport *tport;
65 int i;
66
67 tport = rcu_dereference(lport->prov[FC_TYPE_FCP]);
68 if (tport && tport->tpg)
69 return tport;
70
71 tpg = ft_lport_find_tpg(lport);
72 if (!tpg)
73 return NULL;
74
75 if (tport) {
76 tport->tpg = tpg;
77 return tport;
78 }
79
80 tport = kzalloc(sizeof(*tport), GFP_KERNEL);
81 if (!tport)
82 return NULL;
83
84 tport->lport = lport;
85 tport->tpg = tpg;
86 tpg->tport = tport;
87 for (i = 0; i < FT_SESS_HASH_SIZE; i++)
88 INIT_HLIST_HEAD(&tport->hash[i]);
89
90 rcu_assign_pointer(lport->prov[FC_TYPE_FCP], tport);
91 return tport;
92}
93
94/*
95 * Free tport via RCU.
96 */
97static void ft_tport_rcu_free(struct rcu_head *rcu)
98{
99 struct ft_tport *tport = container_of(rcu, struct ft_tport, rcu);
100
101 kfree(tport);
102}
103
104/*
105 * Delete a target local port.
106 * Caller holds ft_lport_lock.
107 */
108static void ft_tport_delete(struct ft_tport *tport)
109{
110 struct fc_lport *lport;
111 struct ft_tpg *tpg;
112
113 ft_sess_delete_all(tport);
114 lport = tport->lport;
115 BUG_ON(tport != lport->prov[FC_TYPE_FCP]);
116 rcu_assign_pointer(lport->prov[FC_TYPE_FCP], NULL);
117
118 tpg = tport->tpg;
119 if (tpg) {
120 tpg->tport = NULL;
121 tport->tpg = NULL;
122 }
123 call_rcu(&tport->rcu, ft_tport_rcu_free);
124}
125
126/*
127 * Add local port.
128 * Called thru fc_lport_iterate().
129 */
130void ft_lport_add(struct fc_lport *lport, void *arg)
131{
132 mutex_lock(&ft_lport_lock);
133 ft_tport_create(lport);
134 mutex_unlock(&ft_lport_lock);
135}
136
137/*
138 * Delete local port.
139 * Called thru fc_lport_iterate().
140 */
141void ft_lport_del(struct fc_lport *lport, void *arg)
142{
143 struct ft_tport *tport;
144
145 mutex_lock(&ft_lport_lock);
146 tport = lport->prov[FC_TYPE_FCP];
147 if (tport)
148 ft_tport_delete(tport);
149 mutex_unlock(&ft_lport_lock);
150}
151
152/*
153 * Notification of local port change from libfc.
154 * Create or delete local port and associated tport.
155 */
156int ft_lport_notify(struct notifier_block *nb, unsigned long event, void *arg)
157{
158 struct fc_lport *lport = arg;
159
160 switch (event) {
161 case FC_LPORT_EV_ADD:
162 ft_lport_add(lport, NULL);
163 break;
164 case FC_LPORT_EV_DEL:
165 ft_lport_del(lport, NULL);
166 break;
167 }
168 return NOTIFY_DONE;
169}
170
171/*
172 * Hash function for FC_IDs.
173 */
174static u32 ft_sess_hash(u32 port_id)
175{
176 return hash_32(port_id, FT_SESS_HASH_BITS);
177}
178
179/*
180 * Find session in local port.
181 * Sessions and hash lists are RCU-protected.
182 * A reference is taken which must be eventually freed.
183 */
184static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id)
185{
186 struct ft_tport *tport;
187 struct hlist_head *head;
188 struct hlist_node *pos;
189 struct ft_sess *sess;
190
191 rcu_read_lock();
192 tport = rcu_dereference(lport->prov[FC_TYPE_FCP]);
193 if (!tport)
194 goto out;
195
196 head = &tport->hash[ft_sess_hash(port_id)];
197 hlist_for_each_entry_rcu(sess, pos, head, hash) {
198 if (sess->port_id == port_id) {
199 kref_get(&sess->kref);
200 rcu_read_unlock();
201 FT_SESS_DBG("port_id %x found %p\n", port_id, sess);
202 return sess;
203 }
204 }
205out:
206 rcu_read_unlock();
207 FT_SESS_DBG("port_id %x not found\n", port_id);
208 return NULL;
209}
210
211/*
212 * Allocate session and enter it in the hash for the local port.
213 * Caller holds ft_lport_lock.
214 */
215static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
216 struct ft_node_acl *acl)
217{
218 struct ft_sess *sess;
219 struct hlist_head *head;
220 struct hlist_node *pos;
221
222 head = &tport->hash[ft_sess_hash(port_id)];
223 hlist_for_each_entry_rcu(sess, pos, head, hash)
224 if (sess->port_id == port_id)
225 return sess;
226
227 sess = kzalloc(sizeof(*sess), GFP_KERNEL);
228 if (!sess)
229 return NULL;
230
231 sess->se_sess = transport_init_session();
232 if (IS_ERR(sess->se_sess)) {
233 kfree(sess);
234 return NULL;
235 }
236 sess->se_sess->se_node_acl = &acl->se_node_acl;
237 sess->tport = tport;
238 sess->port_id = port_id;
239 kref_init(&sess->kref); /* ref for table entry */
240 hlist_add_head_rcu(&sess->hash, head);
241 tport->sess_count++;
242
243 FT_SESS_DBG("port_id %x sess %p\n", port_id, sess);
244
245 transport_register_session(&tport->tpg->se_tpg, &acl->se_node_acl,
246 sess->se_sess, sess);
247 return sess;
248}
249
250/*
251 * Unhash the session.
252 * Caller holds ft_lport_lock.
253 */
254static void ft_sess_unhash(struct ft_sess *sess)
255{
256 struct ft_tport *tport = sess->tport;
257
258 hlist_del_rcu(&sess->hash);
259 BUG_ON(!tport->sess_count);
260 tport->sess_count--;
261 sess->port_id = -1;
262 sess->params = 0;
263}
264
265/*
266 * Delete session from hash.
267 * Caller holds ft_lport_lock.
268 */
269static struct ft_sess *ft_sess_delete(struct ft_tport *tport, u32 port_id)
270{
271 struct hlist_head *head;
272 struct hlist_node *pos;
273 struct ft_sess *sess;
274
275 head = &tport->hash[ft_sess_hash(port_id)];
276 hlist_for_each_entry_rcu(sess, pos, head, hash) {
277 if (sess->port_id == port_id) {
278 ft_sess_unhash(sess);
279 return sess;
280 }
281 }
282 return NULL;
283}
284
285/*
286 * Delete all sessions from tport.
287 * Caller holds ft_lport_lock.
288 */
289static void ft_sess_delete_all(struct ft_tport *tport)
290{
291 struct hlist_head *head;
292 struct hlist_node *pos;
293 struct ft_sess *sess;
294
295 for (head = tport->hash;
296 head < &tport->hash[FT_SESS_HASH_SIZE]; head++) {
297 hlist_for_each_entry_rcu(sess, pos, head, hash) {
298 ft_sess_unhash(sess);
299 transport_deregister_session_configfs(sess->se_sess);
300 ft_sess_put(sess); /* release from table */
301 }
302 }
303}
304
305/*
306 * TCM ops for sessions.
307 */
308
309/*
310 * Determine whether session is allowed to be shutdown in the current context.
311 * Returns non-zero if the session should be shutdown.
312 */
313int ft_sess_shutdown(struct se_session *se_sess)
314{
315 struct ft_sess *sess = se_sess->fabric_sess_ptr;
316
317 FT_SESS_DBG("port_id %x\n", sess->port_id);
318 return 1;
319}
320
321/*
322 * Remove session and send PRLO.
323 * This is called when the ACL is being deleted or queue depth is changing.
324 */
325void ft_sess_close(struct se_session *se_sess)
326{
327 struct ft_sess *sess = se_sess->fabric_sess_ptr;
328 struct fc_lport *lport;
329 u32 port_id;
330
331 mutex_lock(&ft_lport_lock);
332 lport = sess->tport->lport;
333 port_id = sess->port_id;
334 if (port_id == -1) {
335 mutex_unlock(&ft_lport_lock);
336 return;
337 }
338 FT_SESS_DBG("port_id %x\n", port_id);
339 ft_sess_unhash(sess);
340 mutex_unlock(&ft_lport_lock);
341 transport_deregister_session_configfs(se_sess);
342 ft_sess_put(sess);
343 /* XXX Send LOGO or PRLO */
344 synchronize_rcu(); /* let transport deregister happen */
345}
346
347void ft_sess_stop(struct se_session *se_sess, int sess_sleep, int conn_sleep)
348{
349 struct ft_sess *sess = se_sess->fabric_sess_ptr;
350
351 FT_SESS_DBG("port_id %x\n", sess->port_id);
352}
353
354int ft_sess_logged_in(struct se_session *se_sess)
355{
356 struct ft_sess *sess = se_sess->fabric_sess_ptr;
357
358 return sess->port_id != -1;
359}
360
361u32 ft_sess_get_index(struct se_session *se_sess)
362{
363 struct ft_sess *sess = se_sess->fabric_sess_ptr;
364
365 return sess->port_id; /* XXX TBD probably not what is needed */
366}
367
368u32 ft_sess_get_port_name(struct se_session *se_sess,
369 unsigned char *buf, u32 len)
370{
371 struct ft_sess *sess = se_sess->fabric_sess_ptr;
372
373 return ft_format_wwn(buf, len, sess->port_name);
374}
375
376void ft_sess_set_erl0(struct se_session *se_sess)
377{
378 /* XXX TBD called when out of memory */
379}
380
381/*
382 * libfc ops involving sessions.
383 */
384
385static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
386 const struct fc_els_spp *rspp, struct fc_els_spp *spp)
387{
388 struct ft_tport *tport;
389 struct ft_sess *sess;
390 struct ft_node_acl *acl;
391 u32 fcp_parm;
392
393 tport = ft_tport_create(rdata->local_port);
394 if (!tport)
395 return 0; /* not a target for this local port */
396
397 acl = ft_acl_get(tport->tpg, rdata);
398 if (!acl)
399 return 0;
400
401 if (!rspp)
402 goto fill;
403
404 if (rspp->spp_flags & (FC_SPP_OPA_VAL | FC_SPP_RPA_VAL))
405 return FC_SPP_RESP_NO_PA;
406
407 /*
408 * If both target and initiator bits are off, the SPP is invalid.
409 */
410 fcp_parm = ntohl(rspp->spp_params);
411 if (!(fcp_parm & (FCP_SPPF_INIT_FCN | FCP_SPPF_TARG_FCN)))
412 return FC_SPP_RESP_INVL;
413
414 /*
415 * Create session (image pair) only if requested by
416 * EST_IMG_PAIR flag and if the requestor is an initiator.
417 */
418 if (rspp->spp_flags & FC_SPP_EST_IMG_PAIR) {
419 spp->spp_flags |= FC_SPP_EST_IMG_PAIR;
420 if (!(fcp_parm & FCP_SPPF_INIT_FCN))
421 return FC_SPP_RESP_CONF;
422 sess = ft_sess_create(tport, rdata->ids.port_id, acl);
423 if (!sess)
424 return FC_SPP_RESP_RES;
425 if (!sess->params)
426 rdata->prli_count++;
427 sess->params = fcp_parm;
428 sess->port_name = rdata->ids.port_name;
429 sess->max_frame = rdata->maxframe_size;
430
431 /* XXX TBD - clearing actions. unit attn, see 4.10 */
432 }
433
434 /*
435 * OR in our service parameters with other provider (initiator), if any.
436 * TBD XXX - indicate RETRY capability?
437 */
438fill:
439 fcp_parm = ntohl(spp->spp_params);
440 spp->spp_params = htonl(fcp_parm | FCP_SPPF_TARG_FCN);
441 return FC_SPP_RESP_ACK;
442}
443
444/**
445 * tcm_fcp_prli() - Handle incoming or outgoing PRLI for the FCP target
446 * @rdata: remote port private
447 * @spp_len: service parameter page length
448 * @rspp: received service parameter page (NULL for outgoing PRLI)
449 * @spp: response service parameter page
450 *
451 * Returns spp response code.
452 */
453static int ft_prli(struct fc_rport_priv *rdata, u32 spp_len,
454 const struct fc_els_spp *rspp, struct fc_els_spp *spp)
455{
456 int ret;
457
458 mutex_lock(&ft_lport_lock);
459 ret = ft_prli_locked(rdata, spp_len, rspp, spp);
460 mutex_unlock(&ft_lport_lock);
461 FT_SESS_DBG("port_id %x flags %x ret %x\n",
462 rdata->ids.port_id, rspp ? rspp->spp_flags : 0, ret);
463 return ret;
464}
465
466static void ft_sess_rcu_free(struct rcu_head *rcu)
467{
468 struct ft_sess *sess = container_of(rcu, struct ft_sess, rcu);
469
470 transport_deregister_session(sess->se_sess);
471 kfree(sess);
472}
473
474static void ft_sess_free(struct kref *kref)
475{
476 struct ft_sess *sess = container_of(kref, struct ft_sess, kref);
477
478 call_rcu(&sess->rcu, ft_sess_rcu_free);
479}
480
481void ft_sess_put(struct ft_sess *sess)
482{
483 int sess_held = atomic_read(&sess->kref.refcount);
484
485 BUG_ON(!sess_held);
486 kref_put(&sess->kref, ft_sess_free);
487}
488
489static void ft_prlo(struct fc_rport_priv *rdata)
490{
491 struct ft_sess *sess;
492 struct ft_tport *tport;
493
494 mutex_lock(&ft_lport_lock);
495 tport = rcu_dereference(rdata->local_port->prov[FC_TYPE_FCP]);
496 if (!tport) {
497 mutex_unlock(&ft_lport_lock);
498 return;
499 }
500 sess = ft_sess_delete(tport, rdata->ids.port_id);
501 if (!sess) {
502 mutex_unlock(&ft_lport_lock);
503 return;
504 }
505 mutex_unlock(&ft_lport_lock);
506 transport_deregister_session_configfs(sess->se_sess);
507 ft_sess_put(sess); /* release from table */
508 rdata->prli_count--;
509 /* XXX TBD - clearing actions. unit attn, see 4.10 */
510}
511
512/*
513 * Handle incoming FCP request.
514 * Caller has verified that the frame is type FCP.
515 */
516static void ft_recv(struct fc_lport *lport, struct fc_frame *fp)
517{
518 struct ft_sess *sess;
519 u32 sid = fc_frame_sid(fp);
520
521 FT_SESS_DBG("sid %x\n", sid);
522
523 sess = ft_sess_get(lport, sid);
524 if (!sess) {
525 FT_SESS_DBG("sid %x sess lookup failed\n", sid);
526 /* TBD XXX - if FCP_CMND, send PRLO */
527 fc_frame_free(fp);
528 return;
529 }
530 ft_recv_req(sess, fp); /* must do ft_sess_put() */
531}
532
533/*
534 * Provider ops for libfc.
535 */
536struct fc4_prov ft_prov = {
537 .prli = ft_prli,
538 .prlo = ft_prlo,
539 .recv = ft_recv,
540 .module = THIS_MODULE,
541};