aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/target/Kconfig1
-rw-r--r--drivers/target/Makefile2
-rw-r--r--drivers/target/tcm_fc/Kconfig5
-rw-r--r--drivers/target/tcm_fc/Makefile15
-rw-r--r--drivers/target/tcm_fc/tcm_fc.h215
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c696
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c677
-rw-r--r--drivers/target/tcm_fc/tfc_io.c374
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c541
9 files changed, 2526 insertions, 0 deletions
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig
index 9ef2dbbfa62b..5cb0f0ef6af0 100644
--- a/drivers/target/Kconfig
+++ b/drivers/target/Kconfig
@@ -30,5 +30,6 @@ config TCM_PSCSI
30 passthrough access to Linux/SCSI device 30 passthrough access to Linux/SCSI device
31 31
32source "drivers/target/loopback/Kconfig" 32source "drivers/target/loopback/Kconfig"
33source "drivers/target/tcm_fc/Kconfig"
33 34
34endif 35endif
diff --git a/drivers/target/Makefile b/drivers/target/Makefile
index 1178bbfc68fe..21df808a992c 100644
--- a/drivers/target/Makefile
+++ b/drivers/target/Makefile
@@ -24,3 +24,5 @@ obj-$(CONFIG_TCM_PSCSI) += target_core_pscsi.o
24 24
25# Fabric modules 25# Fabric modules
26obj-$(CONFIG_LOOPBACK_TARGET) += loopback/ 26obj-$(CONFIG_LOOPBACK_TARGET) += loopback/
27
28obj-$(CONFIG_TCM_FC) += tcm_fc/
diff --git a/drivers/target/tcm_fc/Kconfig b/drivers/target/tcm_fc/Kconfig
new file mode 100644
index 000000000000..40caf458e89e
--- /dev/null
+++ b/drivers/target/tcm_fc/Kconfig
@@ -0,0 +1,5 @@
1config TCM_FC
2 tristate "TCM_FC fabric Plugin"
3 depends on LIBFC
4 help
5 Say Y here to enable the TCM FC plugin for accessing FC fabrics in TCM
diff --git a/drivers/target/tcm_fc/Makefile b/drivers/target/tcm_fc/Makefile
new file mode 100644
index 000000000000..7a5c2b64cf65
--- /dev/null
+++ b/drivers/target/tcm_fc/Makefile
@@ -0,0 +1,15 @@
1EXTRA_CFLAGS += -I$(srctree)/drivers/target/ \
2 -I$(srctree)/drivers/scsi/ \
3 -I$(srctree)/include/scsi/ \
4 -I$(srctree)/drivers/target/tcm_fc/
5
6tcm_fc-y += tfc_cmd.o \
7 tfc_conf.o \
8 tfc_io.o \
9 tfc_sess.o
10
11obj-$(CONFIG_TCM_FC) += tcm_fc.o
12
13ifdef CONFIGFS_TCM_FC_DEBUG
14EXTRA_CFLAGS += -DTCM_FC_DEBUG
15endif
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
new file mode 100644
index 000000000000..defff32b7880
--- /dev/null
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -0,0 +1,215 @@
1/*
2 * Copyright (c) 2010 Cisco Systems, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 */
17#ifndef __TCM_FC_H__
18#define __TCM_FC_H__
19
20#define FT_VERSION "0.3"
21
22#define FT_NAMELEN 32 /* length of ASCII WWPNs including pad */
23#define FT_TPG_NAMELEN 32 /* max length of TPG name */
24#define FT_LUN_NAMELEN 32 /* max length of LUN name */
25
26/*
27 * Debug options.
28 */
29#define FT_DEBUG_CONF 0x01 /* configuration messages */
30#define FT_DEBUG_SESS 0x02 /* session messages */
31#define FT_DEBUG_TM 0x04 /* TM operations */
32#define FT_DEBUG_IO 0x08 /* I/O commands */
33#define FT_DEBUG_DATA 0x10 /* Data transfer */
34
35extern unsigned int ft_debug_logging; /* debug options */
36
37#define FT_DEBUG(mask, fmt, args...) \
38 do { \
39 if (ft_debug_logging & (mask)) \
40 printk(KERN_INFO "tcm_fc: %s: " fmt, \
41 __func__, ##args); \
42 } while (0)
43
44#define FT_CONF_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_CONF, fmt, ##args)
45#define FT_SESS_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_SESS, fmt, ##args)
46#define FT_TM_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_TM, fmt, ##args)
47#define FT_IO_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_IO, fmt, ##args)
48#define FT_DATA_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_DATA, fmt, ##args)
49
50struct ft_transport_id {
51 __u8 format;
52 __u8 __resvd1[7];
53 __u8 wwpn[8];
54 __u8 __resvd2[8];
55} __attribute__((__packed__));
56
57/*
58 * Session (remote port).
59 */
60struct ft_sess {
61 u32 port_id; /* for hash lookup use only */
62 u32 params;
63 u16 max_frame; /* maximum frame size */
64 u64 port_name; /* port name for transport ID */
65 struct ft_tport *tport;
66 struct se_session *se_sess;
67 struct hlist_node hash; /* linkage in ft_sess_hash table */
68 struct rcu_head rcu;
69 struct kref kref; /* ref for hash and outstanding I/Os */
70};
71
72/*
73 * Hash table of sessions per local port.
74 * Hash lookup by remote port FC_ID.
75 */
76#define FT_SESS_HASH_BITS 6
77#define FT_SESS_HASH_SIZE (1 << FT_SESS_HASH_BITS)
78
79/*
80 * Per local port data.
81 * This is created only after a TPG exists that allows target function
82 * for the local port. If the TPG exists, this is allocated when
83 * we're notified that the local port has been created, or when
84 * the first PRLI provider callback is received.
85 */
86struct ft_tport {
87 struct fc_lport *lport;
88 struct ft_tpg *tpg; /* NULL if TPG deleted before tport */
89 u32 sess_count; /* number of sessions in hash */
90 struct rcu_head rcu;
91 struct hlist_head hash[FT_SESS_HASH_SIZE]; /* list of sessions */
92};
93
94/*
95 * Node ID and authentication.
96 */
97struct ft_node_auth {
98 u64 port_name;
99 u64 node_name;
100};
101
102/*
103 * Node ACL for FC remote port session.
104 */
105struct ft_node_acl {
106 struct ft_node_auth node_auth;
107 struct se_node_acl se_node_acl;
108};
109
110struct ft_lun {
111 u32 index;
112 char name[FT_LUN_NAMELEN];
113};
114
115/*
116 * Target portal group (local port).
117 */
118struct ft_tpg {
119 u32 index;
120 struct ft_lport_acl *lport_acl;
121 struct ft_tport *tport; /* active tport or NULL */
122 struct list_head list; /* linkage in ft_lport_acl tpg_list */
123 struct list_head lun_list; /* head of LUNs */
124 struct se_portal_group se_tpg;
125 struct task_struct *thread; /* processing thread */
126 struct se_queue_obj qobj; /* queue for processing thread */
127};
128
129struct ft_lport_acl {
130 u64 wwpn;
131 char name[FT_NAMELEN];
132 struct list_head list;
133 struct list_head tpg_list;
134 struct se_wwn fc_lport_wwn;
135};
136
137enum ft_cmd_state {
138 FC_CMD_ST_NEW = 0,
139 FC_CMD_ST_REJ
140};
141
142/*
143 * Commands
144 */
145struct ft_cmd {
146 enum ft_cmd_state state;
147 u16 lun; /* LUN from request */
148 struct ft_sess *sess; /* session held for cmd */
149 struct fc_seq *seq; /* sequence in exchange mgr */
150 struct se_cmd se_cmd; /* Local TCM I/O descriptor */
151 struct fc_frame *req_frame;
152 unsigned char *cdb; /* pointer to CDB inside frame */
153 u32 write_data_len; /* data received on writes */
154 struct se_queue_req se_req;
155 /* Local sense buffer */
156 unsigned char ft_sense_buffer[TRANSPORT_SENSE_BUFFER];
157 u32 was_ddp_setup:1; /* Set only if ddp is setup */
158 struct scatterlist *sg; /* Set only if DDP is setup */
159 u32 sg_cnt; /* No. of item in scatterlist */
160};
161
162extern struct list_head ft_lport_list;
163extern struct mutex ft_lport_lock;
164extern struct fc4_prov ft_prov;
165extern struct target_fabric_configfs *ft_configfs;
166
167/*
168 * Fabric methods.
169 */
170
171/*
172 * Session ops.
173 */
174void ft_sess_put(struct ft_sess *);
175int ft_sess_shutdown(struct se_session *);
176void ft_sess_close(struct se_session *);
177void ft_sess_stop(struct se_session *, int, int);
178int ft_sess_logged_in(struct se_session *);
179u32 ft_sess_get_index(struct se_session *);
180u32 ft_sess_get_port_name(struct se_session *, unsigned char *, u32);
181void ft_sess_set_erl0(struct se_session *);
182
183void ft_lport_add(struct fc_lport *, void *);
184void ft_lport_del(struct fc_lport *, void *);
185int ft_lport_notify(struct notifier_block *, unsigned long, void *);
186
187/*
188 * IO methods.
189 */
190void ft_check_stop_free(struct se_cmd *);
191void ft_release_cmd(struct se_cmd *);
192int ft_queue_status(struct se_cmd *);
193int ft_queue_data_in(struct se_cmd *);
194int ft_write_pending(struct se_cmd *);
195int ft_write_pending_status(struct se_cmd *);
196u32 ft_get_task_tag(struct se_cmd *);
197int ft_get_cmd_state(struct se_cmd *);
198void ft_new_cmd_failure(struct se_cmd *);
199int ft_queue_tm_resp(struct se_cmd *);
200int ft_is_state_remove(struct se_cmd *);
201
202/*
203 * other internal functions.
204 */
205int ft_thread(void *);
206void ft_recv_req(struct ft_sess *, struct fc_frame *);
207struct ft_tpg *ft_lport_find_tpg(struct fc_lport *);
208struct ft_node_acl *ft_acl_get(struct ft_tpg *, struct fc_rport_priv *);
209
210void ft_recv_write_data(struct ft_cmd *, struct fc_frame *);
211void ft_dump_cmd(struct ft_cmd *, const char *caller);
212
213ssize_t ft_format_wwn(char *, size_t, u64);
214
215#endif /* __TCM_FC_H__ */
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
new file mode 100644
index 000000000000..49e51778f733
--- /dev/null
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -0,0 +1,696 @@
1/*
2 * Copyright (c) 2010 Cisco Systems, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 */
17
18/* XXX TBD some includes may be extraneous */
19
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/version.h>
23#include <generated/utsrelease.h>
24#include <linux/utsname.h>
25#include <linux/init.h>
26#include <linux/slab.h>
27#include <linux/kthread.h>
28#include <linux/types.h>
29#include <linux/string.h>
30#include <linux/configfs.h>
31#include <linux/ctype.h>
32#include <linux/hash.h>
33#include <asm/unaligned.h>
34#include <scsi/scsi.h>
35#include <scsi/scsi_host.h>
36#include <scsi/scsi_device.h>
37#include <scsi/scsi_cmnd.h>
38#include <scsi/libfc.h>
39#include <scsi/fc_encode.h>
40
41#include <target/target_core_base.h>
42#include <target/target_core_transport.h>
43#include <target/target_core_fabric_ops.h>
44#include <target/target_core_device.h>
45#include <target/target_core_tpg.h>
46#include <target/target_core_configfs.h>
47#include <target/target_core_base.h>
48#include <target/target_core_tmr.h>
49#include <target/configfs_macros.h>
50
51#include "tcm_fc.h"
52
53/*
54 * Dump cmd state for debugging.
55 */
56void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
57{
58 struct fc_exch *ep;
59 struct fc_seq *sp;
60 struct se_cmd *se_cmd;
61 struct se_mem *mem;
62 struct se_transport_task *task;
63
64 if (!(ft_debug_logging & FT_DEBUG_IO))
65 return;
66
67 se_cmd = &cmd->se_cmd;
68 printk(KERN_INFO "%s: cmd %p state %d sess %p seq %p se_cmd %p\n",
69 caller, cmd, cmd->state, cmd->sess, cmd->seq, se_cmd);
70 printk(KERN_INFO "%s: cmd %p cdb %p\n",
71 caller, cmd, cmd->cdb);
72 printk(KERN_INFO "%s: cmd %p lun %d\n", caller, cmd, cmd->lun);
73
74 task = T_TASK(se_cmd);
75 printk(KERN_INFO "%s: cmd %p task %p se_num %u buf %p len %u se_cmd_flags <0x%x>\n",
76 caller, cmd, task, task->t_tasks_se_num,
77 task->t_task_buf, se_cmd->data_length, se_cmd->se_cmd_flags);
78 if (task->t_mem_list)
79 list_for_each_entry(mem, task->t_mem_list, se_list)
80 printk(KERN_INFO "%s: cmd %p mem %p page %p "
81 "len 0x%x off 0x%x\n",
82 caller, cmd, mem,
83 mem->se_page, mem->se_len, mem->se_off);
84 sp = cmd->seq;
85 if (sp) {
86 ep = fc_seq_exch(sp);
87 printk(KERN_INFO "%s: cmd %p sid %x did %x "
88 "ox_id %x rx_id %x seq_id %x e_stat %x\n",
89 caller, cmd, ep->sid, ep->did, ep->oxid, ep->rxid,
90 sp->id, ep->esb_stat);
91 }
92 print_hex_dump(KERN_INFO, "ft_dump_cmd ", DUMP_PREFIX_NONE,
93 16, 4, cmd->cdb, MAX_COMMAND_SIZE, 0);
94}
95
96/*
97 * Get LUN from CDB.
98 */
99static int ft_get_lun_for_cmd(struct ft_cmd *cmd, u8 *lunp)
100{
101 u64 lun;
102
103 lun = lunp[1];
104 switch (lunp[0] >> 6) {
105 case 0:
106 break;
107 case 1:
108 lun |= (lunp[0] & 0x3f) << 8;
109 break;
110 default:
111 return -1;
112 }
113 if (lun >= TRANSPORT_MAX_LUNS_PER_TPG)
114 return -1;
115 cmd->lun = lun;
116 return transport_get_lun_for_cmd(&cmd->se_cmd, NULL, lun);
117}
118
119static void ft_queue_cmd(struct ft_sess *sess, struct ft_cmd *cmd)
120{
121 struct se_queue_obj *qobj;
122 unsigned long flags;
123
124 qobj = &sess->tport->tpg->qobj;
125 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
126 list_add_tail(&cmd->se_req.qr_list, &qobj->qobj_list);
127 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
128 atomic_inc(&qobj->queue_cnt);
129 wake_up_interruptible(&qobj->thread_wq);
130}
131
132static struct ft_cmd *ft_dequeue_cmd(struct se_queue_obj *qobj)
133{
134 unsigned long flags;
135 struct se_queue_req *qr;
136
137 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
138 if (list_empty(&qobj->qobj_list)) {
139 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
140 return NULL;
141 }
142 qr = list_first_entry(&qobj->qobj_list, struct se_queue_req, qr_list);
143 list_del(&qr->qr_list);
144 atomic_dec(&qobj->queue_cnt);
145 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
146 return container_of(qr, struct ft_cmd, se_req);
147}
148
149static void ft_free_cmd(struct ft_cmd *cmd)
150{
151 struct fc_frame *fp;
152 struct fc_lport *lport;
153
154 if (!cmd)
155 return;
156 fp = cmd->req_frame;
157 lport = fr_dev(fp);
158 if (fr_seq(fp))
159 lport->tt.seq_release(fr_seq(fp));
160 fc_frame_free(fp);
161 ft_sess_put(cmd->sess); /* undo get from lookup at recv */
162 kfree(cmd);
163}
164
165void ft_release_cmd(struct se_cmd *se_cmd)
166{
167 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
168
169 ft_free_cmd(cmd);
170}
171
172void ft_check_stop_free(struct se_cmd *se_cmd)
173{
174 transport_generic_free_cmd(se_cmd, 0, 1, 0);
175}
176
177/*
178 * Send response.
179 */
180int ft_queue_status(struct se_cmd *se_cmd)
181{
182 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
183 struct fc_frame *fp;
184 struct fcp_resp_with_ext *fcp;
185 struct fc_lport *lport;
186 struct fc_exch *ep;
187 size_t len;
188
189 ft_dump_cmd(cmd, __func__);
190 ep = fc_seq_exch(cmd->seq);
191 lport = ep->lp;
192 len = sizeof(*fcp) + se_cmd->scsi_sense_length;
193 fp = fc_frame_alloc(lport, len);
194 if (!fp) {
195 /* XXX shouldn't just drop it - requeue and retry? */
196 return 0;
197 }
198 fcp = fc_frame_payload_get(fp, len);
199 memset(fcp, 0, len);
200 fcp->resp.fr_status = se_cmd->scsi_status;
201
202 len = se_cmd->scsi_sense_length;
203 if (len) {
204 fcp->resp.fr_flags |= FCP_SNS_LEN_VAL;
205 fcp->ext.fr_sns_len = htonl(len);
206 memcpy((fcp + 1), se_cmd->sense_buffer, len);
207 }
208
209 /*
210 * Test underflow and overflow with one mask. Usually both are off.
211 * Bidirectional commands are not handled yet.
212 */
213 if (se_cmd->se_cmd_flags & (SCF_OVERFLOW_BIT | SCF_UNDERFLOW_BIT)) {
214 if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT)
215 fcp->resp.fr_flags |= FCP_RESID_OVER;
216 else
217 fcp->resp.fr_flags |= FCP_RESID_UNDER;
218 fcp->ext.fr_resid = cpu_to_be32(se_cmd->residual_count);
219 }
220
221 /*
222 * Send response.
223 */
224 cmd->seq = lport->tt.seq_start_next(cmd->seq);
225 fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP,
226 FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0);
227
228 lport->tt.seq_send(lport, cmd->seq, fp);
229 lport->tt.exch_done(cmd->seq);
230 return 0;
231}
232
233int ft_write_pending_status(struct se_cmd *se_cmd)
234{
235 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
236
237 return cmd->write_data_len != se_cmd->data_length;
238}
239
240/*
241 * Send TX_RDY (transfer ready).
242 */
243int ft_write_pending(struct se_cmd *se_cmd)
244{
245 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
246 struct fc_frame *fp;
247 struct fcp_txrdy *txrdy;
248 struct fc_lport *lport;
249 struct fc_exch *ep;
250 struct fc_frame_header *fh;
251 u32 f_ctl;
252
253 ft_dump_cmd(cmd, __func__);
254
255 ep = fc_seq_exch(cmd->seq);
256 lport = ep->lp;
257 fp = fc_frame_alloc(lport, sizeof(*txrdy));
258 if (!fp)
259 return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
260
261 txrdy = fc_frame_payload_get(fp, sizeof(*txrdy));
262 memset(txrdy, 0, sizeof(*txrdy));
263 txrdy->ft_burst_len = htonl(se_cmd->data_length);
264
265 cmd->seq = lport->tt.seq_start_next(cmd->seq);
266 fc_fill_fc_hdr(fp, FC_RCTL_DD_DATA_DESC, ep->did, ep->sid, FC_TYPE_FCP,
267 FC_FC_EX_CTX | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
268
269 fh = fc_frame_header_get(fp);
270 f_ctl = ntoh24(fh->fh_f_ctl);
271
272 /* Only if it is 'Exchange Responder' */
273 if (f_ctl & FC_FC_EX_CTX) {
274 /* Target is 'exchange responder' and sending XFER_READY
275 * to 'exchange initiator (initiator)'
276 */
277 if ((ep->xid <= lport->lro_xid) &&
278 (fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) {
279 if (se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
280 /*
281 * Map se_mem list to scatterlist, so that
282 * DDP can be setup. DDP setup function require
283 * scatterlist. se_mem_list is internal to
284 * TCM/LIO target
285 */
286 transport_do_task_sg_chain(se_cmd);
287 cmd->sg = T_TASK(se_cmd)->t_tasks_sg_chained;
288 cmd->sg_cnt =
289 T_TASK(se_cmd)->t_tasks_sg_chained_no;
290 }
291 if (cmd->sg && lport->tt.ddp_setup(lport, ep->xid,
292 cmd->sg, cmd->sg_cnt))
293 cmd->was_ddp_setup = 1;
294 }
295 }
296 lport->tt.seq_send(lport, cmd->seq, fp);
297 return 0;
298}
299
300u32 ft_get_task_tag(struct se_cmd *se_cmd)
301{
302 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
303
304 return fc_seq_exch(cmd->seq)->rxid;
305}
306
307int ft_get_cmd_state(struct se_cmd *se_cmd)
308{
309 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
310
311 return cmd->state;
312}
313
314int ft_is_state_remove(struct se_cmd *se_cmd)
315{
316 return 0; /* XXX TBD */
317}
318
319void ft_new_cmd_failure(struct se_cmd *se_cmd)
320{
321 /* XXX TBD */
322 printk(KERN_INFO "%s: se_cmd %p\n", __func__, se_cmd);
323}
324
325/*
326 * FC sequence response handler for follow-on sequences (data) and aborts.
327 */
328static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg)
329{
330 struct ft_cmd *cmd = arg;
331 struct fc_frame_header *fh;
332
333 if (IS_ERR(fp)) {
334 /* XXX need to find cmd if queued */
335 cmd->se_cmd.t_state = TRANSPORT_REMOVE;
336 cmd->seq = NULL;
337 transport_generic_free_cmd(&cmd->se_cmd, 0, 1, 0);
338 return;
339 }
340
341 fh = fc_frame_header_get(fp);
342
343 switch (fh->fh_r_ctl) {
344 case FC_RCTL_DD_SOL_DATA: /* write data */
345 ft_recv_write_data(cmd, fp);
346 break;
347 case FC_RCTL_DD_UNSOL_CTL: /* command */
348 case FC_RCTL_DD_SOL_CTL: /* transfer ready */
349 case FC_RCTL_DD_DATA_DESC: /* transfer ready */
350 default:
351 printk(KERN_INFO "%s: unhandled frame r_ctl %x\n",
352 __func__, fh->fh_r_ctl);
353 fc_frame_free(fp);
354 transport_generic_free_cmd(&cmd->se_cmd, 0, 1, 0);
355 break;
356 }
357}
358
359/*
360 * Send a FCP response including SCSI status and optional FCP rsp_code.
361 * status is SAM_STAT_GOOD (zero) iff code is valid.
362 * This is used in error cases, such as allocation failures.
363 */
364static void ft_send_resp_status(struct fc_lport *lport,
365 const struct fc_frame *rx_fp,
366 u32 status, enum fcp_resp_rsp_codes code)
367{
368 struct fc_frame *fp;
369 struct fc_seq *sp;
370 const struct fc_frame_header *fh;
371 size_t len;
372 struct fcp_resp_with_ext *fcp;
373 struct fcp_resp_rsp_info *info;
374
375 fh = fc_frame_header_get(rx_fp);
376 FT_IO_DBG("FCP error response: did %x oxid %x status %x code %x\n",
377 ntoh24(fh->fh_s_id), ntohs(fh->fh_ox_id), status, code);
378 len = sizeof(*fcp);
379 if (status == SAM_STAT_GOOD)
380 len += sizeof(*info);
381 fp = fc_frame_alloc(lport, len);
382 if (!fp)
383 return;
384 fcp = fc_frame_payload_get(fp, len);
385 memset(fcp, 0, len);
386 fcp->resp.fr_status = status;
387 if (status == SAM_STAT_GOOD) {
388 fcp->ext.fr_rsp_len = htonl(sizeof(*info));
389 fcp->resp.fr_flags |= FCP_RSP_LEN_VAL;
390 info = (struct fcp_resp_rsp_info *)(fcp + 1);
391 info->rsp_code = code;
392 }
393
394 fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_DD_CMD_STATUS, 0);
395 sp = fr_seq(fp);
396 if (sp)
397 lport->tt.seq_send(lport, sp, fp);
398 else
399 lport->tt.frame_send(lport, fp);
400}
401
402/*
403 * Send error or task management response.
404 * Always frees the cmd and associated state.
405 */
406static void ft_send_resp_code(struct ft_cmd *cmd, enum fcp_resp_rsp_codes code)
407{
408 ft_send_resp_status(cmd->sess->tport->lport,
409 cmd->req_frame, SAM_STAT_GOOD, code);
410 ft_free_cmd(cmd);
411}
412
413/*
414 * Handle Task Management Request.
415 */
416static void ft_send_tm(struct ft_cmd *cmd)
417{
418 struct se_tmr_req *tmr;
419 struct fcp_cmnd *fcp;
420 u8 tm_func;
421
422 fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
423
424 switch (fcp->fc_tm_flags) {
425 case FCP_TMF_LUN_RESET:
426 tm_func = TMR_LUN_RESET;
427 if (ft_get_lun_for_cmd(cmd, fcp->fc_lun) < 0) {
428 ft_dump_cmd(cmd, __func__);
429 transport_send_check_condition_and_sense(&cmd->se_cmd,
430 cmd->se_cmd.scsi_sense_reason, 0);
431 ft_sess_put(cmd->sess);
432 return;
433 }
434 break;
435 case FCP_TMF_TGT_RESET:
436 tm_func = TMR_TARGET_WARM_RESET;
437 break;
438 case FCP_TMF_CLR_TASK_SET:
439 tm_func = TMR_CLEAR_TASK_SET;
440 break;
441 case FCP_TMF_ABT_TASK_SET:
442 tm_func = TMR_ABORT_TASK_SET;
443 break;
444 case FCP_TMF_CLR_ACA:
445 tm_func = TMR_CLEAR_ACA;
446 break;
447 default:
448 /*
449 * FCP4r01 indicates having a combination of
450 * tm_flags set is invalid.
451 */
452 FT_TM_DBG("invalid FCP tm_flags %x\n", fcp->fc_tm_flags);
453 ft_send_resp_code(cmd, FCP_CMND_FIELDS_INVALID);
454 return;
455 }
456
457 FT_TM_DBG("alloc tm cmd fn %d\n", tm_func);
458 tmr = core_tmr_alloc_req(&cmd->se_cmd, cmd, tm_func);
459 if (!tmr) {
460 FT_TM_DBG("alloc failed\n");
461 ft_send_resp_code(cmd, FCP_TMF_FAILED);
462 return;
463 }
464 cmd->se_cmd.se_tmr_req = tmr;
465 transport_generic_handle_tmr(&cmd->se_cmd);
466}
467
468/*
469 * Send status from completed task management request.
470 */
471int ft_queue_tm_resp(struct se_cmd *se_cmd)
472{
473 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
474 struct se_tmr_req *tmr = se_cmd->se_tmr_req;
475 enum fcp_resp_rsp_codes code;
476
477 switch (tmr->response) {
478 case TMR_FUNCTION_COMPLETE:
479 code = FCP_TMF_CMPL;
480 break;
481 case TMR_LUN_DOES_NOT_EXIST:
482 code = FCP_TMF_INVALID_LUN;
483 break;
484 case TMR_FUNCTION_REJECTED:
485 code = FCP_TMF_REJECTED;
486 break;
487 case TMR_TASK_DOES_NOT_EXIST:
488 case TMR_TASK_STILL_ALLEGIANT:
489 case TMR_TASK_FAILOVER_NOT_SUPPORTED:
490 case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
491 case TMR_FUNCTION_AUTHORIZATION_FAILED:
492 default:
493 code = FCP_TMF_FAILED;
494 break;
495 }
496 FT_TM_DBG("tmr fn %d resp %d fcp code %d\n",
497 tmr->function, tmr->response, code);
498 ft_send_resp_code(cmd, code);
499 return 0;
500}
501
502/*
503 * Handle incoming FCP command.
504 */
505static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
506{
507 struct ft_cmd *cmd;
508 struct fc_lport *lport = sess->tport->lport;
509
510 cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
511 if (!cmd)
512 goto busy;
513 cmd->sess = sess;
514 cmd->seq = lport->tt.seq_assign(lport, fp);
515 if (!cmd->seq) {
516 kfree(cmd);
517 goto busy;
518 }
519 cmd->req_frame = fp; /* hold frame during cmd */
520 ft_queue_cmd(sess, cmd);
521 return;
522
523busy:
524 FT_IO_DBG("cmd or seq allocation failure - sending BUSY\n");
525 ft_send_resp_status(lport, fp, SAM_STAT_BUSY, 0);
526 fc_frame_free(fp);
527 ft_sess_put(sess); /* undo get from lookup */
528}
529
530
531/*
532 * Handle incoming FCP frame.
533 * Caller has verified that the frame is type FCP.
534 */
535void ft_recv_req(struct ft_sess *sess, struct fc_frame *fp)
536{
537 struct fc_frame_header *fh = fc_frame_header_get(fp);
538
539 switch (fh->fh_r_ctl) {
540 case FC_RCTL_DD_UNSOL_CMD: /* command */
541 ft_recv_cmd(sess, fp);
542 break;
543 case FC_RCTL_DD_SOL_DATA: /* write data */
544 case FC_RCTL_DD_UNSOL_CTL:
545 case FC_RCTL_DD_SOL_CTL:
546 case FC_RCTL_DD_DATA_DESC: /* transfer ready */
547 case FC_RCTL_ELS4_REQ: /* SRR, perhaps */
548 default:
549 printk(KERN_INFO "%s: unhandled frame r_ctl %x\n",
550 __func__, fh->fh_r_ctl);
551 fc_frame_free(fp);
552 ft_sess_put(sess); /* undo get from lookup */
553 break;
554 }
555}
556
557/*
558 * Send new command to target.
559 */
560static void ft_send_cmd(struct ft_cmd *cmd)
561{
562 struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame);
563 struct se_cmd *se_cmd;
564 struct fcp_cmnd *fcp;
565 int data_dir;
566 u32 data_len;
567 int task_attr;
568 int ret;
569
570 fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
571 if (!fcp)
572 goto err;
573
574 if (fcp->fc_flags & FCP_CFL_LEN_MASK)
575 goto err; /* not handling longer CDBs yet */
576
577 if (fcp->fc_tm_flags) {
578 task_attr = FCP_PTA_SIMPLE;
579 data_dir = DMA_NONE;
580 data_len = 0;
581 } else {
582 switch (fcp->fc_flags & (FCP_CFL_RDDATA | FCP_CFL_WRDATA)) {
583 case 0:
584 data_dir = DMA_NONE;
585 break;
586 case FCP_CFL_RDDATA:
587 data_dir = DMA_FROM_DEVICE;
588 break;
589 case FCP_CFL_WRDATA:
590 data_dir = DMA_TO_DEVICE;
591 break;
592 case FCP_CFL_WRDATA | FCP_CFL_RDDATA:
593 goto err; /* TBD not supported by tcm_fc yet */
594 }
595
596 /* FCP_PTA_ maps 1:1 to TASK_ATTR_ */
597 task_attr = fcp->fc_pri_ta & FCP_PTA_MASK;
598 data_len = ntohl(fcp->fc_dl);
599 cmd->cdb = fcp->fc_cdb;
600 }
601
602 se_cmd = &cmd->se_cmd;
603 /*
604 * Initialize struct se_cmd descriptor from target_core_mod
605 * infrastructure
606 */
607 transport_init_se_cmd(se_cmd, &ft_configfs->tf_ops, cmd->sess->se_sess,
608 data_len, data_dir, task_attr,
609 &cmd->ft_sense_buffer[0]);
610 /*
611 * Check for FCP task management flags
612 */
613 if (fcp->fc_tm_flags) {
614 ft_send_tm(cmd);
615 return;
616 }
617
618 fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
619
620 ret = ft_get_lun_for_cmd(cmd, fcp->fc_lun);
621 if (ret < 0) {
622 ft_dump_cmd(cmd, __func__);
623 transport_send_check_condition_and_sense(&cmd->se_cmd,
624 cmd->se_cmd.scsi_sense_reason, 0);
625 return;
626 }
627
628 ret = transport_generic_allocate_tasks(se_cmd, cmd->cdb);
629
630 FT_IO_DBG("r_ctl %x alloc task ret %d\n", fh->fh_r_ctl, ret);
631 ft_dump_cmd(cmd, __func__);
632
633 if (ret == -1) {
634 transport_send_check_condition_and_sense(se_cmd,
635 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
636 transport_generic_free_cmd(se_cmd, 0, 1, 0);
637 return;
638 }
639 if (ret == -2) {
640 if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
641 ft_queue_status(se_cmd);
642 else
643 transport_send_check_condition_and_sense(se_cmd,
644 se_cmd->scsi_sense_reason, 0);
645 transport_generic_free_cmd(se_cmd, 0, 1, 0);
646 return;
647 }
648 transport_generic_handle_cdb(se_cmd);
649 return;
650
651err:
652 ft_send_resp_code(cmd, FCP_CMND_FIELDS_INVALID);
653 return;
654}
655
656/*
657 * Handle request in the command thread.
658 */
659static void ft_exec_req(struct ft_cmd *cmd)
660{
661 FT_IO_DBG("cmd state %x\n", cmd->state);
662 switch (cmd->state) {
663 case FC_CMD_ST_NEW:
664 ft_send_cmd(cmd);
665 break;
666 default:
667 break;
668 }
669}
670
671/*
672 * Processing thread.
673 * Currently one thread per tpg.
674 */
675int ft_thread(void *arg)
676{
677 struct ft_tpg *tpg = arg;
678 struct se_queue_obj *qobj = &tpg->qobj;
679 struct ft_cmd *cmd;
680 int ret;
681
682 set_user_nice(current, -20);
683
684 while (!kthread_should_stop()) {
685 ret = wait_event_interruptible(qobj->thread_wq,
686 atomic_read(&qobj->queue_cnt) || kthread_should_stop());
687 if (ret < 0 || kthread_should_stop())
688 goto out;
689 cmd = ft_dequeue_cmd(qobj);
690 if (cmd)
691 ft_exec_req(cmd);
692 }
693
694out:
695 return 0;
696}
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
new file mode 100644
index 000000000000..fcdbbffe88cc
--- /dev/null
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -0,0 +1,677 @@
1/*******************************************************************************
2 * Filename: tcm_fc.c
3 *
4 * This file contains the configfs implementation for TCM_fc fabric node.
5 * Based on tcm_loop_configfs.c
6 *
7 * Copyright (c) 2010 Cisco Systems, Inc.
8 * Copyright (c) 2009,2010 Rising Tide, Inc.
9 * Copyright (c) 2009,2010 Linux-iSCSI.org
10 *
11 * Copyright (c) 2009,2010 Nicholas A. Bellinger <nab@linux-iscsi.org>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 ****************************************************************************/
23
24#include <linux/module.h>
25#include <linux/moduleparam.h>
26#include <linux/version.h>
27#include <generated/utsrelease.h>
28#include <linux/utsname.h>
29#include <linux/init.h>
30#include <linux/slab.h>
31#include <linux/kthread.h>
32#include <linux/types.h>
33#include <linux/string.h>
34#include <linux/configfs.h>
35#include <linux/ctype.h>
36#include <asm/unaligned.h>
37#include <scsi/scsi.h>
38#include <scsi/scsi_host.h>
39#include <scsi/scsi_device.h>
40#include <scsi/scsi_cmnd.h>
41#include <scsi/libfc.h>
42
43#include <target/target_core_base.h>
44#include <target/target_core_transport.h>
45#include <target/target_core_fabric_ops.h>
46#include <target/target_core_fabric_configfs.h>
47#include <target/target_core_fabric_lib.h>
48#include <target/target_core_device.h>
49#include <target/target_core_tpg.h>
50#include <target/target_core_configfs.h>
51#include <target/target_core_base.h>
52#include <target/configfs_macros.h>
53
54#include "tcm_fc.h"
55
56struct target_fabric_configfs *ft_configfs;
57
58LIST_HEAD(ft_lport_list);
59DEFINE_MUTEX(ft_lport_lock);
60
61unsigned int ft_debug_logging;
62module_param_named(debug_logging, ft_debug_logging, int, S_IRUGO|S_IWUSR);
63MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
64
65/*
66 * Parse WWN.
67 * If strict, we require lower-case hex and colon separators to be sure
68 * the name is the same as what would be generated by ft_format_wwn()
69 * so the name and wwn are mapped one-to-one.
70 */
71static ssize_t ft_parse_wwn(const char *name, u64 *wwn, int strict)
72{
73 const char *cp;
74 char c;
75 u32 nibble;
76 u32 byte = 0;
77 u32 pos = 0;
78 u32 err;
79
80 *wwn = 0;
81 for (cp = name; cp < &name[FT_NAMELEN - 1]; cp++) {
82 c = *cp;
83 if (c == '\n' && cp[1] == '\0')
84 continue;
85 if (strict && pos++ == 2 && byte++ < 7) {
86 pos = 0;
87 if (c == ':')
88 continue;
89 err = 1;
90 goto fail;
91 }
92 if (c == '\0') {
93 err = 2;
94 if (strict && byte != 8)
95 goto fail;
96 return cp - name;
97 }
98 err = 3;
99 if (isdigit(c))
100 nibble = c - '0';
101 else if (isxdigit(c) && (islower(c) || !strict))
102 nibble = tolower(c) - 'a' + 10;
103 else
104 goto fail;
105 *wwn = (*wwn << 4) | nibble;
106 }
107 err = 4;
108fail:
109 FT_CONF_DBG("err %u len %zu pos %u byte %u\n",
110 err, cp - name, pos, byte);
111 return -1;
112}
113
114ssize_t ft_format_wwn(char *buf, size_t len, u64 wwn)
115{
116 u8 b[8];
117
118 put_unaligned_be64(wwn, b);
119 return snprintf(buf, len,
120 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
121 b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
122}
123
124static ssize_t ft_wwn_show(void *arg, char *buf)
125{
126 u64 *wwn = arg;
127 ssize_t len;
128
129 len = ft_format_wwn(buf, PAGE_SIZE - 2, *wwn);
130 buf[len++] = '\n';
131 return len;
132}
133
134static ssize_t ft_wwn_store(void *arg, const char *buf, size_t len)
135{
136 ssize_t ret;
137 u64 wwn;
138
139 ret = ft_parse_wwn(buf, &wwn, 0);
140 if (ret > 0)
141 *(u64 *)arg = wwn;
142 return ret;
143}
144
145/*
146 * ACL auth ops.
147 */
148
149static ssize_t ft_nacl_show_port_name(
150 struct se_node_acl *se_nacl,
151 char *page)
152{
153 struct ft_node_acl *acl = container_of(se_nacl,
154 struct ft_node_acl, se_node_acl);
155
156 return ft_wwn_show(&acl->node_auth.port_name, page);
157}
158
159static ssize_t ft_nacl_store_port_name(
160 struct se_node_acl *se_nacl,
161 const char *page,
162 size_t count)
163{
164 struct ft_node_acl *acl = container_of(se_nacl,
165 struct ft_node_acl, se_node_acl);
166
167 return ft_wwn_store(&acl->node_auth.port_name, page, count);
168}
169
170TF_NACL_BASE_ATTR(ft, port_name, S_IRUGO | S_IWUSR);
171
172static ssize_t ft_nacl_show_node_name(
173 struct se_node_acl *se_nacl,
174 char *page)
175{
176 struct ft_node_acl *acl = container_of(se_nacl,
177 struct ft_node_acl, se_node_acl);
178
179 return ft_wwn_show(&acl->node_auth.node_name, page);
180}
181
182static ssize_t ft_nacl_store_node_name(
183 struct se_node_acl *se_nacl,
184 const char *page,
185 size_t count)
186{
187 struct ft_node_acl *acl = container_of(se_nacl,
188 struct ft_node_acl, se_node_acl);
189
190 return ft_wwn_store(&acl->node_auth.node_name, page, count);
191}
192
193TF_NACL_BASE_ATTR(ft, node_name, S_IRUGO | S_IWUSR);
194
195static struct configfs_attribute *ft_nacl_base_attrs[] = {
196 &ft_nacl_port_name.attr,
197 &ft_nacl_node_name.attr,
198 NULL,
199};
200
201/*
202 * ACL ops.
203 */
204
205/*
206 * Add ACL for an initiator. The ACL is named arbitrarily.
207 * The port_name and/or node_name are attributes.
208 */
209static struct se_node_acl *ft_add_acl(
210 struct se_portal_group *se_tpg,
211 struct config_group *group,
212 const char *name)
213{
214 struct ft_node_acl *acl;
215 struct ft_tpg *tpg;
216 u64 wwpn;
217 u32 q_depth;
218
219 FT_CONF_DBG("add acl %s\n", name);
220 tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
221
222 if (ft_parse_wwn(name, &wwpn, 1) < 0)
223 return ERR_PTR(-EINVAL);
224
225 acl = kzalloc(sizeof(struct ft_node_acl), GFP_KERNEL);
226 if (!(acl))
227 return ERR_PTR(-ENOMEM);
228 acl->node_auth.port_name = wwpn;
229
230 q_depth = 32; /* XXX bogus default - get from tpg? */
231 return core_tpg_add_initiator_node_acl(&tpg->se_tpg,
232 &acl->se_node_acl, name, q_depth);
233}
234
235static void ft_del_acl(struct se_node_acl *se_acl)
236{
237 struct se_portal_group *se_tpg = se_acl->se_tpg;
238 struct ft_tpg *tpg;
239 struct ft_node_acl *acl = container_of(se_acl,
240 struct ft_node_acl, se_node_acl);
241
242 FT_CONF_DBG("del acl %s\n",
243 config_item_name(&se_acl->acl_group.cg_item));
244
245 tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
246 FT_CONF_DBG("del acl %p se_acl %p tpg %p se_tpg %p\n",
247 acl, se_acl, tpg, &tpg->se_tpg);
248
249 core_tpg_del_initiator_node_acl(&tpg->se_tpg, se_acl, 1);
250 kfree(acl);
251}
252
253struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)
254{
255 struct ft_node_acl *found = NULL;
256 struct ft_node_acl *acl;
257 struct se_portal_group *se_tpg = &tpg->se_tpg;
258 struct se_node_acl *se_acl;
259
260 spin_lock_bh(&se_tpg->acl_node_lock);
261 list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) {
262 acl = container_of(se_acl, struct ft_node_acl, se_node_acl);
263 FT_CONF_DBG("acl %p port_name %llx\n",
264 acl, (unsigned long long)acl->node_auth.port_name);
265 if (acl->node_auth.port_name == rdata->ids.port_name ||
266 acl->node_auth.node_name == rdata->ids.node_name) {
267 FT_CONF_DBG("acl %p port_name %llx matched\n", acl,
268 (unsigned long long)rdata->ids.port_name);
269 found = acl;
270 /* XXX need to hold onto ACL */
271 break;
272 }
273 }
274 spin_unlock_bh(&se_tpg->acl_node_lock);
275 return found;
276}
277
278struct se_node_acl *ft_tpg_alloc_fabric_acl(struct se_portal_group *se_tpg)
279{
280 struct ft_node_acl *acl;
281
282 acl = kzalloc(sizeof(*acl), GFP_KERNEL);
283 if (!(acl)) {
284 printk(KERN_ERR "Unable to allocate struct ft_node_acl\n");
285 return NULL;
286 }
287 FT_CONF_DBG("acl %p\n", acl);
288 return &acl->se_node_acl;
289}
290
291static void ft_tpg_release_fabric_acl(struct se_portal_group *se_tpg,
292 struct se_node_acl *se_acl)
293{
294 struct ft_node_acl *acl = container_of(se_acl,
295 struct ft_node_acl, se_node_acl);
296
297 FT_CONF_DBG(KERN_INFO "acl %p\n", acl);
298 kfree(acl);
299}
300
301/*
302 * local_port port_group (tpg) ops.
303 */
304static struct se_portal_group *ft_add_tpg(
305 struct se_wwn *wwn,
306 struct config_group *group,
307 const char *name)
308{
309 struct ft_lport_acl *lacl;
310 struct ft_tpg *tpg;
311 unsigned long index;
312 int ret;
313
314 FT_CONF_DBG("tcm_fc: add tpg %s\n", name);
315
316 /*
317 * Name must be "tpgt_" followed by the index.
318 */
319 if (strstr(name, "tpgt_") != name)
320 return NULL;
321 if (strict_strtoul(name + 5, 10, &index) || index > UINT_MAX)
322 return NULL;
323
324 lacl = container_of(wwn, struct ft_lport_acl, fc_lport_wwn);
325 tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
326 if (!tpg)
327 return NULL;
328 tpg->index = index;
329 tpg->lport_acl = lacl;
330 INIT_LIST_HEAD(&tpg->lun_list);
331 transport_init_queue_obj(&tpg->qobj);
332
333 ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg,
334 (void *)tpg, TRANSPORT_TPG_TYPE_NORMAL);
335 if (ret < 0) {
336 kfree(tpg);
337 return NULL;
338 }
339
340 tpg->thread = kthread_run(ft_thread, tpg, "ft_tpg%lu", index);
341 if (IS_ERR(tpg->thread)) {
342 kfree(tpg);
343 return NULL;
344 }
345
346 mutex_lock(&ft_lport_lock);
347 list_add_tail(&tpg->list, &lacl->tpg_list);
348 mutex_unlock(&ft_lport_lock);
349
350 return &tpg->se_tpg;
351}
352
353static void ft_del_tpg(struct se_portal_group *se_tpg)
354{
355 struct ft_tpg *tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
356
357 FT_CONF_DBG("del tpg %s\n",
358 config_item_name(&tpg->se_tpg.tpg_group.cg_item));
359
360 kthread_stop(tpg->thread);
361
362 /* Wait for sessions to be freed thru RCU, for BUG_ON below */
363 synchronize_rcu();
364
365 mutex_lock(&ft_lport_lock);
366 list_del(&tpg->list);
367 if (tpg->tport) {
368 tpg->tport->tpg = NULL;
369 tpg->tport = NULL;
370 }
371 mutex_unlock(&ft_lport_lock);
372
373 core_tpg_deregister(se_tpg);
374 kfree(tpg);
375}
376
377/*
378 * Verify that an lport is configured to use the tcm_fc module, and return
379 * the target port group that should be used.
380 *
381 * The caller holds ft_lport_lock.
382 */
383struct ft_tpg *ft_lport_find_tpg(struct fc_lport *lport)
384{
385 struct ft_lport_acl *lacl;
386 struct ft_tpg *tpg;
387
388 list_for_each_entry(lacl, &ft_lport_list, list) {
389 if (lacl->wwpn == lport->wwpn) {
390 list_for_each_entry(tpg, &lacl->tpg_list, list)
391 return tpg; /* XXX for now return first entry */
392 return NULL;
393 }
394 }
395 return NULL;
396}
397
398/*
399 * target config instance ops.
400 */
401
402/*
403 * Add lport to allowed config.
404 * The name is the WWPN in lower-case ASCII, colon-separated bytes.
405 */
406static struct se_wwn *ft_add_lport(
407 struct target_fabric_configfs *tf,
408 struct config_group *group,
409 const char *name)
410{
411 struct ft_lport_acl *lacl;
412 struct ft_lport_acl *old_lacl;
413 u64 wwpn;
414
415 FT_CONF_DBG("add lport %s\n", name);
416 if (ft_parse_wwn(name, &wwpn, 1) < 0)
417 return NULL;
418 lacl = kzalloc(sizeof(*lacl), GFP_KERNEL);
419 if (!lacl)
420 return NULL;
421 lacl->wwpn = wwpn;
422 INIT_LIST_HEAD(&lacl->tpg_list);
423
424 mutex_lock(&ft_lport_lock);
425 list_for_each_entry(old_lacl, &ft_lport_list, list) {
426 if (old_lacl->wwpn == wwpn) {
427 mutex_unlock(&ft_lport_lock);
428 kfree(lacl);
429 return NULL;
430 }
431 }
432 list_add_tail(&lacl->list, &ft_lport_list);
433 ft_format_wwn(lacl->name, sizeof(lacl->name), wwpn);
434 mutex_unlock(&ft_lport_lock);
435
436 return &lacl->fc_lport_wwn;
437}
438
439static void ft_del_lport(struct se_wwn *wwn)
440{
441 struct ft_lport_acl *lacl = container_of(wwn,
442 struct ft_lport_acl, fc_lport_wwn);
443
444 FT_CONF_DBG("del lport %s\n",
445 config_item_name(&wwn->wwn_group.cg_item));
446 mutex_lock(&ft_lport_lock);
447 list_del(&lacl->list);
448 mutex_unlock(&ft_lport_lock);
449
450 kfree(lacl);
451}
452
453static ssize_t ft_wwn_show_attr_version(
454 struct target_fabric_configfs *tf,
455 char *page)
456{
457 return sprintf(page, "TCM FC " FT_VERSION " on %s/%s on "
458 ""UTS_RELEASE"\n", utsname()->sysname, utsname()->machine);
459}
460
461TF_WWN_ATTR_RO(ft, version);
462
463static struct configfs_attribute *ft_wwn_attrs[] = {
464 &ft_wwn_version.attr,
465 NULL,
466};
467
468static char *ft_get_fabric_name(void)
469{
470 return "fc";
471}
472
473static char *ft_get_fabric_wwn(struct se_portal_group *se_tpg)
474{
475 struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr;
476
477 return tpg->lport_acl->name;
478}
479
480static u16 ft_get_tag(struct se_portal_group *se_tpg)
481{
482 struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr;
483
484 /*
485 * This tag is used when forming SCSI Name identifier in EVPD=1 0x83
486 * to represent the SCSI Target Port.
487 */
488 return tpg->index;
489}
490
491static u32 ft_get_default_depth(struct se_portal_group *se_tpg)
492{
493 return 1;
494}
495
496static int ft_check_false(struct se_portal_group *se_tpg)
497{
498 return 0;
499}
500
501static void ft_set_default_node_attr(struct se_node_acl *se_nacl)
502{
503}
504
505static u16 ft_get_fabric_sense_len(void)
506{
507 return 0;
508}
509
510static u16 ft_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_len)
511{
512 return 0;
513}
514
515static u32 ft_tpg_get_inst_index(struct se_portal_group *se_tpg)
516{
517 struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr;
518
519 return tpg->index;
520}
521
522static u64 ft_pack_lun(unsigned int index)
523{
524 WARN_ON(index >= 256);
525 /* Caller wants this byte-swapped */
526 return cpu_to_le64((index & 0xff) << 8);
527}
528
529static struct target_core_fabric_ops ft_fabric_ops = {
530 .get_fabric_name = ft_get_fabric_name,
531 .get_fabric_proto_ident = fc_get_fabric_proto_ident,
532 .tpg_get_wwn = ft_get_fabric_wwn,
533 .tpg_get_tag = ft_get_tag,
534 .tpg_get_default_depth = ft_get_default_depth,
535 .tpg_get_pr_transport_id = fc_get_pr_transport_id,
536 .tpg_get_pr_transport_id_len = fc_get_pr_transport_id_len,
537 .tpg_parse_pr_out_transport_id = fc_parse_pr_out_transport_id,
538 .tpg_check_demo_mode = ft_check_false,
539 .tpg_check_demo_mode_cache = ft_check_false,
540 .tpg_check_demo_mode_write_protect = ft_check_false,
541 .tpg_check_prod_mode_write_protect = ft_check_false,
542 .tpg_alloc_fabric_acl = ft_tpg_alloc_fabric_acl,
543 .tpg_release_fabric_acl = ft_tpg_release_fabric_acl,
544 .tpg_get_inst_index = ft_tpg_get_inst_index,
545 .check_stop_free = ft_check_stop_free,
546 .release_cmd_to_pool = ft_release_cmd,
547 .release_cmd_direct = ft_release_cmd,
548 .shutdown_session = ft_sess_shutdown,
549 .close_session = ft_sess_close,
550 .stop_session = ft_sess_stop,
551 .fall_back_to_erl0 = ft_sess_set_erl0,
552 .sess_logged_in = ft_sess_logged_in,
553 .sess_get_index = ft_sess_get_index,
554 .sess_get_initiator_sid = NULL,
555 .write_pending = ft_write_pending,
556 .write_pending_status = ft_write_pending_status,
557 .set_default_node_attributes = ft_set_default_node_attr,
558 .get_task_tag = ft_get_task_tag,
559 .get_cmd_state = ft_get_cmd_state,
560 .new_cmd_failure = ft_new_cmd_failure,
561 .queue_data_in = ft_queue_data_in,
562 .queue_status = ft_queue_status,
563 .queue_tm_rsp = ft_queue_tm_resp,
564 .get_fabric_sense_len = ft_get_fabric_sense_len,
565 .set_fabric_sense_len = ft_set_fabric_sense_len,
566 .is_state_remove = ft_is_state_remove,
567 .pack_lun = ft_pack_lun,
568 /*
569 * Setup function pointers for generic logic in
570 * target_core_fabric_configfs.c
571 */
572 .fabric_make_wwn = &ft_add_lport,
573 .fabric_drop_wwn = &ft_del_lport,
574 .fabric_make_tpg = &ft_add_tpg,
575 .fabric_drop_tpg = &ft_del_tpg,
576 .fabric_post_link = NULL,
577 .fabric_pre_unlink = NULL,
578 .fabric_make_np = NULL,
579 .fabric_drop_np = NULL,
580 .fabric_make_nodeacl = &ft_add_acl,
581 .fabric_drop_nodeacl = &ft_del_acl,
582};
583
584int ft_register_configfs(void)
585{
586 struct target_fabric_configfs *fabric;
587 int ret;
588
589 /*
590 * Register the top level struct config_item_type with TCM core
591 */
592 fabric = target_fabric_configfs_init(THIS_MODULE, "fc");
593 if (!fabric) {
594 printk(KERN_INFO "%s: target_fabric_configfs_init() failed!\n",
595 __func__);
596 return -1;
597 }
598 fabric->tf_ops = ft_fabric_ops;
599
600 /* Allowing support for task_sg_chaining */
601 fabric->tf_ops.task_sg_chaining = 1;
602
603 /*
604 * Setup default attribute lists for various fabric->tf_cit_tmpl
605 */
606 TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = ft_wwn_attrs;
607 TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;
608 TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
609 TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
610 TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
611 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs =
612 ft_nacl_base_attrs;
613 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
614 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
615 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
616 /*
617 * register the fabric for use within TCM
618 */
619 ret = target_fabric_configfs_register(fabric);
620 if (ret < 0) {
621 FT_CONF_DBG("target_fabric_configfs_register() for"
622 " FC Target failed!\n");
623 printk(KERN_INFO
624 "%s: target_fabric_configfs_register() failed!\n",
625 __func__);
626 target_fabric_configfs_free(fabric);
627 return -1;
628 }
629
630 /*
631 * Setup our local pointer to *fabric.
632 */
633 ft_configfs = fabric;
634 return 0;
635}
636
637void ft_deregister_configfs(void)
638{
639 if (!ft_configfs)
640 return;
641 target_fabric_configfs_deregister(ft_configfs);
642 ft_configfs = NULL;
643}
644
645static struct notifier_block ft_notifier = {
646 .notifier_call = ft_lport_notify
647};
648
649static int __init ft_init(void)
650{
651 if (ft_register_configfs())
652 return -1;
653 if (fc_fc4_register_provider(FC_TYPE_FCP, &ft_prov)) {
654 ft_deregister_configfs();
655 return -1;
656 }
657 blocking_notifier_chain_register(&fc_lport_notifier_head, &ft_notifier);
658 fc_lport_iterate(ft_lport_add, NULL);
659 return 0;
660}
661
662static void __exit ft_exit(void)
663{
664 blocking_notifier_chain_unregister(&fc_lport_notifier_head,
665 &ft_notifier);
666 fc_fc4_deregister_provider(FC_TYPE_FCP, &ft_prov);
667 fc_lport_iterate(ft_lport_del, NULL);
668 ft_deregister_configfs();
669 synchronize_rcu();
670}
671
672#ifdef MODULE
673MODULE_DESCRIPTION("FC TCM fabric driver " FT_VERSION);
674MODULE_LICENSE("GPL");
675module_init(ft_init);
676module_exit(ft_exit);
677#endif /* MODULE */
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
new file mode 100644
index 000000000000..4c3c0efbe13f
--- /dev/null
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -0,0 +1,374 @@
1/*
2 * Copyright (c) 2010 Cisco Systems, Inc.
3 *
4 * Portions based on tcm_loop_fabric_scsi.c and libfc/fc_fcp.c
5 *
6 * Copyright (c) 2007 Intel Corporation. All rights reserved.
7 * Copyright (c) 2008 Red Hat, Inc. All rights reserved.
8 * Copyright (c) 2008 Mike Christie
9 * Copyright (c) 2009 Rising Tide, Inc.
10 * Copyright (c) 2009 Linux-iSCSI.org
11 * Copyright (c) 2009 Nicholas A. Bellinger <nab@linux-iscsi.org>
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms and conditions of the GNU General Public License,
15 * version 2, as published by the Free Software Foundation.
16 *
17 * This program is distributed in the hope it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 * more details.
21 *
22 * You should have received a copy of the GNU General Public License along with
23 * this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
25 */
26
27/* XXX TBD some includes may be extraneous */
28
29#include <linux/module.h>
30#include <linux/moduleparam.h>
31#include <linux/version.h>
32#include <generated/utsrelease.h>
33#include <linux/utsname.h>
34#include <linux/init.h>
35#include <linux/slab.h>
36#include <linux/kthread.h>
37#include <linux/types.h>
38#include <linux/string.h>
39#include <linux/configfs.h>
40#include <linux/ctype.h>
41#include <linux/hash.h>
42#include <asm/unaligned.h>
43#include <scsi/scsi.h>
44#include <scsi/scsi_host.h>
45#include <scsi/scsi_device.h>
46#include <scsi/scsi_cmnd.h>
47#include <scsi/libfc.h>
48#include <scsi/fc_encode.h>
49
50#include <target/target_core_base.h>
51#include <target/target_core_transport.h>
52#include <target/target_core_fabric_ops.h>
53#include <target/target_core_device.h>
54#include <target/target_core_tpg.h>
55#include <target/target_core_configfs.h>
56#include <target/target_core_base.h>
57#include <target/configfs_macros.h>
58
59#include "tcm_fc.h"
60
61/*
62 * Deliver read data back to initiator.
63 * XXX TBD handle resource problems later.
64 */
65int ft_queue_data_in(struct se_cmd *se_cmd)
66{
67 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
68 struct se_transport_task *task;
69 struct fc_frame *fp = NULL;
70 struct fc_exch *ep;
71 struct fc_lport *lport;
72 struct se_mem *mem;
73 size_t remaining;
74 u32 f_ctl = FC_FC_EX_CTX | FC_FC_REL_OFF;
75 u32 mem_off;
76 u32 fh_off = 0;
77 u32 frame_off = 0;
78 size_t frame_len = 0;
79 size_t mem_len;
80 size_t tlen;
81 size_t off_in_page;
82 struct page *page;
83 int use_sg;
84 int error;
85 void *page_addr;
86 void *from;
87 void *to = NULL;
88
89 ep = fc_seq_exch(cmd->seq);
90 lport = ep->lp;
91 cmd->seq = lport->tt.seq_start_next(cmd->seq);
92
93 task = T_TASK(se_cmd);
94 BUG_ON(!task);
95 remaining = se_cmd->data_length;
96
97 /*
98 * Setup to use first mem list entry if any.
99 */
100 if (task->t_tasks_se_num) {
101 mem = list_first_entry(task->t_mem_list,
102 struct se_mem, se_list);
103 mem_len = mem->se_len;
104 mem_off = mem->se_off;
105 page = mem->se_page;
106 } else {
107 mem = NULL;
108 mem_len = remaining;
109 mem_off = 0;
110 page = NULL;
111 }
112
113 /* no scatter/gather in skb for odd word length due to fc_seq_send() */
114 use_sg = !(remaining % 4);
115
116 while (remaining) {
117 if (!mem_len) {
118 BUG_ON(!mem);
119 mem = list_entry(mem->se_list.next,
120 struct se_mem, se_list);
121 mem_len = min((size_t)mem->se_len, remaining);
122 mem_off = mem->se_off;
123 page = mem->se_page;
124 }
125 if (!frame_len) {
126 /*
127 * If lport's has capability of Large Send Offload LSO)
128 * , then allow 'frame_len' to be as big as 'lso_max'
129 * if indicated transfer length is >= lport->lso_max
130 */
131 frame_len = (lport->seq_offload) ? lport->lso_max :
132 cmd->sess->max_frame;
133 frame_len = min(frame_len, remaining);
134 fp = fc_frame_alloc(lport, use_sg ? 0 : frame_len);
135 if (!fp)
136 return -ENOMEM;
137 to = fc_frame_payload_get(fp, 0);
138 fh_off = frame_off;
139 frame_off += frame_len;
140 /*
141 * Setup the frame's max payload which is used by base
142 * driver to indicate HW about max frame size, so that
143 * HW can do fragmentation appropriately based on
144 * "gso_max_size" of underline netdev.
145 */
146 fr_max_payload(fp) = cmd->sess->max_frame;
147 }
148 tlen = min(mem_len, frame_len);
149
150 if (use_sg) {
151 if (!mem) {
152 BUG_ON(!task->t_task_buf);
153 page_addr = task->t_task_buf + mem_off;
154 /*
155 * In this case, offset is 'offset_in_page' of
156 * (t_task_buf + mem_off) instead of 'mem_off'.
157 */
158 off_in_page = offset_in_page(page_addr);
159 page = virt_to_page(page_addr);
160 tlen = min(tlen, PAGE_SIZE - off_in_page);
161 } else
162 off_in_page = mem_off;
163 BUG_ON(!page);
164 get_page(page);
165 skb_fill_page_desc(fp_skb(fp),
166 skb_shinfo(fp_skb(fp))->nr_frags,
167 page, off_in_page, tlen);
168 fr_len(fp) += tlen;
169 fp_skb(fp)->data_len += tlen;
170 fp_skb(fp)->truesize +=
171 PAGE_SIZE << compound_order(page);
172 } else if (mem) {
173 BUG_ON(!page);
174 from = kmap_atomic(page + (mem_off >> PAGE_SHIFT),
175 KM_SOFTIRQ0);
176 page_addr = from;
177 from += mem_off & ~PAGE_MASK;
178 tlen = min(tlen, (size_t)(PAGE_SIZE -
179 (mem_off & ~PAGE_MASK)));
180 memcpy(to, from, tlen);
181 kunmap_atomic(page_addr, KM_SOFTIRQ0);
182 to += tlen;
183 } else {
184 from = task->t_task_buf + mem_off;
185 memcpy(to, from, tlen);
186 to += tlen;
187 }
188
189 mem_off += tlen;
190 mem_len -= tlen;
191 frame_len -= tlen;
192 remaining -= tlen;
193
194 if (frame_len &&
195 (skb_shinfo(fp_skb(fp))->nr_frags < FC_FRAME_SG_LEN))
196 continue;
197 if (!remaining)
198 f_ctl |= FC_FC_END_SEQ;
199 fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
200 FC_TYPE_FCP, f_ctl, fh_off);
201 error = lport->tt.seq_send(lport, cmd->seq, fp);
202 if (error) {
203 /* XXX For now, initiator will retry */
204 if (printk_ratelimit())
205 printk(KERN_ERR "%s: Failed to send frame %p, "
206 "xid <0x%x>, remaining <0x%x>, "
207 "lso_max <0x%x>\n",
208 __func__, fp, ep->xid,
209 remaining, lport->lso_max);
210 }
211 }
212 return ft_queue_status(se_cmd);
213}
214
215/*
216 * Receive write data frame.
217 */
218void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
219{
220 struct se_cmd *se_cmd = &cmd->se_cmd;
221 struct fc_seq *seq = cmd->seq;
222 struct fc_exch *ep;
223 struct fc_lport *lport;
224 struct se_transport_task *task;
225 struct fc_frame_header *fh;
226 struct se_mem *mem;
227 u32 mem_off;
228 u32 rel_off;
229 size_t frame_len;
230 size_t mem_len;
231 size_t tlen;
232 struct page *page;
233 void *page_addr;
234 void *from;
235 void *to;
236 u32 f_ctl;
237 void *buf;
238
239 task = T_TASK(se_cmd);
240 BUG_ON(!task);
241
242 fh = fc_frame_header_get(fp);
243 if (!(ntoh24(fh->fh_f_ctl) & FC_FC_REL_OFF))
244 goto drop;
245
246 /*
247 * Doesn't expect even single byte of payload. Payload
248 * is expected to be copied directly to user buffers
249 * due to DDP (Large Rx offload) feature, hence
250 * BUG_ON if BUF is non-NULL
251 */
252 buf = fc_frame_payload_get(fp, 1);
253 if (cmd->was_ddp_setup && buf) {
254 printk(KERN_INFO "%s: When DDP was setup, not expected to"
255 "receive frame with payload, Payload shall be"
256 "copied directly to buffer instead of coming "
257 "via. legacy receive queues\n", __func__);
258 BUG_ON(buf);
259 }
260
261 /*
262 * If ft_cmd indicated 'ddp_setup', in that case only the last frame
263 * should come with 'TSI bit being set'. If 'TSI bit is not set and if
264 * data frame appears here, means error condition. In both the cases
265 * release the DDP context (ddp_put) and in error case, as well
266 * initiate error recovery mechanism.
267 */
268 ep = fc_seq_exch(seq);
269 if (cmd->was_ddp_setup) {
270 BUG_ON(!ep);
271 lport = ep->lp;
272 BUG_ON(!lport);
273 }
274 if (cmd->was_ddp_setup && ep->xid != FC_XID_UNKNOWN) {
275 f_ctl = ntoh24(fh->fh_f_ctl);
276 /*
277 * If TSI bit set in f_ctl, means last write data frame is
278 * received successfully where payload is posted directly
279 * to user buffer and only the last frame's header is posted
280 * in legacy receive queue
281 */
282 if (f_ctl & FC_FC_SEQ_INIT) { /* TSI bit set in FC frame */
283 cmd->write_data_len = lport->tt.ddp_done(lport,
284 ep->xid);
285 goto last_frame;
286 } else {
287 /*
288 * Updating the write_data_len may be meaningless at
289 * this point, but just in case if required in future
290 * for debugging or any other purpose
291 */
292 printk(KERN_ERR "%s: Received frame with TSI bit not"
293 " being SET, dropping the frame, "
294 "cmd->sg <%p>, cmd->sg_cnt <0x%x>\n",
295 __func__, cmd->sg, cmd->sg_cnt);
296 cmd->write_data_len = lport->tt.ddp_done(lport,
297 ep->xid);
298 lport->tt.seq_exch_abort(cmd->seq, 0);
299 goto drop;
300 }
301 }
302
303 rel_off = ntohl(fh->fh_parm_offset);
304 frame_len = fr_len(fp);
305 if (frame_len <= sizeof(*fh))
306 goto drop;
307 frame_len -= sizeof(*fh);
308 from = fc_frame_payload_get(fp, 0);
309 if (rel_off >= se_cmd->data_length)
310 goto drop;
311 if (frame_len + rel_off > se_cmd->data_length)
312 frame_len = se_cmd->data_length - rel_off;
313
314 /*
315 * Setup to use first mem list entry if any.
316 */
317 if (task->t_tasks_se_num) {
318 mem = list_first_entry(task->t_mem_list,
319 struct se_mem, se_list);
320 mem_len = mem->se_len;
321 mem_off = mem->se_off;
322 page = mem->se_page;
323 } else {
324 mem = NULL;
325 page = NULL;
326 mem_off = 0;
327 mem_len = frame_len;
328 }
329
330 while (frame_len) {
331 if (!mem_len) {
332 BUG_ON(!mem);
333 mem = list_entry(mem->se_list.next,
334 struct se_mem, se_list);
335 mem_len = mem->se_len;
336 mem_off = mem->se_off;
337 page = mem->se_page;
338 }
339 if (rel_off >= mem_len) {
340 rel_off -= mem_len;
341 mem_len = 0;
342 continue;
343 }
344 mem_off += rel_off;
345 mem_len -= rel_off;
346 rel_off = 0;
347
348 tlen = min(mem_len, frame_len);
349
350 if (mem) {
351 to = kmap_atomic(page + (mem_off >> PAGE_SHIFT),
352 KM_SOFTIRQ0);
353 page_addr = to;
354 to += mem_off & ~PAGE_MASK;
355 tlen = min(tlen, (size_t)(PAGE_SIZE -
356 (mem_off & ~PAGE_MASK)));
357 memcpy(to, from, tlen);
358 kunmap_atomic(page_addr, KM_SOFTIRQ0);
359 } else {
360 to = task->t_task_buf + mem_off;
361 memcpy(to, from, tlen);
362 }
363 from += tlen;
364 frame_len -= tlen;
365 mem_off += tlen;
366 mem_len -= tlen;
367 cmd->write_data_len += tlen;
368 }
369last_frame:
370 if (cmd->write_data_len == se_cmd->data_length)
371 transport_generic_handle_data(se_cmd);
372drop:
373 fc_frame_free(fp);
374}
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
new file mode 100644
index 000000000000..a3bd57f2ea32
--- /dev/null
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -0,0 +1,541 @@
1/*
2 * Copyright (c) 2010 Cisco Systems, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 */
17
18/* XXX TBD some includes may be extraneous */
19
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/version.h>
23#include <generated/utsrelease.h>
24#include <linux/utsname.h>
25#include <linux/init.h>
26#include <linux/slab.h>
27#include <linux/kthread.h>
28#include <linux/types.h>
29#include <linux/string.h>
30#include <linux/configfs.h>
31#include <linux/ctype.h>
32#include <linux/hash.h>
33#include <linux/rcupdate.h>
34#include <linux/rculist.h>
35#include <linux/kref.h>
36#include <asm/unaligned.h>
37#include <scsi/scsi.h>
38#include <scsi/scsi_host.h>
39#include <scsi/scsi_device.h>
40#include <scsi/scsi_cmnd.h>
41#include <scsi/libfc.h>
42
43#include <target/target_core_base.h>
44#include <target/target_core_transport.h>
45#include <target/target_core_fabric_ops.h>
46#include <target/target_core_device.h>
47#include <target/target_core_tpg.h>
48#include <target/target_core_configfs.h>
49#include <target/target_core_base.h>
50#include <target/configfs_macros.h>
51
52#include <scsi/libfc.h>
53#include "tcm_fc.h"
54
55static void ft_sess_delete_all(struct ft_tport *);
56
57/*
58 * Lookup or allocate target local port.
59 * Caller holds ft_lport_lock.
60 */
61static struct ft_tport *ft_tport_create(struct fc_lport *lport)
62{
63 struct ft_tpg *tpg;
64 struct ft_tport *tport;
65 int i;
66
67 tport = rcu_dereference(lport->prov[FC_TYPE_FCP]);
68 if (tport && tport->tpg)
69 return tport;
70
71 tpg = ft_lport_find_tpg(lport);
72 if (!tpg)
73 return NULL;
74
75 if (tport) {
76 tport->tpg = tpg;
77 return tport;
78 }
79
80 tport = kzalloc(sizeof(*tport), GFP_KERNEL);
81 if (!tport)
82 return NULL;
83
84 tport->lport = lport;
85 tport->tpg = tpg;
86 tpg->tport = tport;
87 for (i = 0; i < FT_SESS_HASH_SIZE; i++)
88 INIT_HLIST_HEAD(&tport->hash[i]);
89
90 rcu_assign_pointer(lport->prov[FC_TYPE_FCP], tport);
91 return tport;
92}
93
94/*
95 * Free tport via RCU.
96 */
97static void ft_tport_rcu_free(struct rcu_head *rcu)
98{
99 struct ft_tport *tport = container_of(rcu, struct ft_tport, rcu);
100
101 kfree(tport);
102}
103
104/*
105 * Delete a target local port.
106 * Caller holds ft_lport_lock.
107 */
108static void ft_tport_delete(struct ft_tport *tport)
109{
110 struct fc_lport *lport;
111 struct ft_tpg *tpg;
112
113 ft_sess_delete_all(tport);
114 lport = tport->lport;
115 BUG_ON(tport != lport->prov[FC_TYPE_FCP]);
116 rcu_assign_pointer(lport->prov[FC_TYPE_FCP], NULL);
117
118 tpg = tport->tpg;
119 if (tpg) {
120 tpg->tport = NULL;
121 tport->tpg = NULL;
122 }
123 call_rcu(&tport->rcu, ft_tport_rcu_free);
124}
125
126/*
127 * Add local port.
128 * Called thru fc_lport_iterate().
129 */
130void ft_lport_add(struct fc_lport *lport, void *arg)
131{
132 mutex_lock(&ft_lport_lock);
133 ft_tport_create(lport);
134 mutex_unlock(&ft_lport_lock);
135}
136
137/*
138 * Delete local port.
139 * Called thru fc_lport_iterate().
140 */
141void ft_lport_del(struct fc_lport *lport, void *arg)
142{
143 struct ft_tport *tport;
144
145 mutex_lock(&ft_lport_lock);
146 tport = lport->prov[FC_TYPE_FCP];
147 if (tport)
148 ft_tport_delete(tport);
149 mutex_unlock(&ft_lport_lock);
150}
151
152/*
153 * Notification of local port change from libfc.
154 * Create or delete local port and associated tport.
155 */
156int ft_lport_notify(struct notifier_block *nb, unsigned long event, void *arg)
157{
158 struct fc_lport *lport = arg;
159
160 switch (event) {
161 case FC_LPORT_EV_ADD:
162 ft_lport_add(lport, NULL);
163 break;
164 case FC_LPORT_EV_DEL:
165 ft_lport_del(lport, NULL);
166 break;
167 }
168 return NOTIFY_DONE;
169}
170
171/*
172 * Hash function for FC_IDs.
173 */
174static u32 ft_sess_hash(u32 port_id)
175{
176 return hash_32(port_id, FT_SESS_HASH_BITS);
177}
178
179/*
180 * Find session in local port.
181 * Sessions and hash lists are RCU-protected.
182 * A reference is taken which must be eventually freed.
183 */
184static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id)
185{
186 struct ft_tport *tport;
187 struct hlist_head *head;
188 struct hlist_node *pos;
189 struct ft_sess *sess;
190
191 rcu_read_lock();
192 tport = rcu_dereference(lport->prov[FC_TYPE_FCP]);
193 if (!tport)
194 goto out;
195
196 head = &tport->hash[ft_sess_hash(port_id)];
197 hlist_for_each_entry_rcu(sess, pos, head, hash) {
198 if (sess->port_id == port_id) {
199 kref_get(&sess->kref);
200 rcu_read_unlock();
201 FT_SESS_DBG("port_id %x found %p\n", port_id, sess);
202 return sess;
203 }
204 }
205out:
206 rcu_read_unlock();
207 FT_SESS_DBG("port_id %x not found\n", port_id);
208 return NULL;
209}
210
211/*
212 * Allocate session and enter it in the hash for the local port.
213 * Caller holds ft_lport_lock.
214 */
215static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
216 struct ft_node_acl *acl)
217{
218 struct ft_sess *sess;
219 struct hlist_head *head;
220 struct hlist_node *pos;
221
222 head = &tport->hash[ft_sess_hash(port_id)];
223 hlist_for_each_entry_rcu(sess, pos, head, hash)
224 if (sess->port_id == port_id)
225 return sess;
226
227 sess = kzalloc(sizeof(*sess), GFP_KERNEL);
228 if (!sess)
229 return NULL;
230
231 sess->se_sess = transport_init_session();
232 if (!sess->se_sess) {
233 kfree(sess);
234 return NULL;
235 }
236 sess->se_sess->se_node_acl = &acl->se_node_acl;
237 sess->tport = tport;
238 sess->port_id = port_id;
239 kref_init(&sess->kref); /* ref for table entry */
240 hlist_add_head_rcu(&sess->hash, head);
241 tport->sess_count++;
242
243 FT_SESS_DBG("port_id %x sess %p\n", port_id, sess);
244
245 transport_register_session(&tport->tpg->se_tpg, &acl->se_node_acl,
246 sess->se_sess, sess);
247 return sess;
248}
249
250/*
251 * Unhash the session.
252 * Caller holds ft_lport_lock.
253 */
254static void ft_sess_unhash(struct ft_sess *sess)
255{
256 struct ft_tport *tport = sess->tport;
257
258 hlist_del_rcu(&sess->hash);
259 BUG_ON(!tport->sess_count);
260 tport->sess_count--;
261 sess->port_id = -1;
262 sess->params = 0;
263}
264
265/*
266 * Delete session from hash.
267 * Caller holds ft_lport_lock.
268 */
269static struct ft_sess *ft_sess_delete(struct ft_tport *tport, u32 port_id)
270{
271 struct hlist_head *head;
272 struct hlist_node *pos;
273 struct ft_sess *sess;
274
275 head = &tport->hash[ft_sess_hash(port_id)];
276 hlist_for_each_entry_rcu(sess, pos, head, hash) {
277 if (sess->port_id == port_id) {
278 ft_sess_unhash(sess);
279 return sess;
280 }
281 }
282 return NULL;
283}
284
285/*
286 * Delete all sessions from tport.
287 * Caller holds ft_lport_lock.
288 */
289static void ft_sess_delete_all(struct ft_tport *tport)
290{
291 struct hlist_head *head;
292 struct hlist_node *pos;
293 struct ft_sess *sess;
294
295 for (head = tport->hash;
296 head < &tport->hash[FT_SESS_HASH_SIZE]; head++) {
297 hlist_for_each_entry_rcu(sess, pos, head, hash) {
298 ft_sess_unhash(sess);
299 transport_deregister_session_configfs(sess->se_sess);
300 ft_sess_put(sess); /* release from table */
301 }
302 }
303}
304
305/*
306 * TCM ops for sessions.
307 */
308
309/*
310 * Determine whether session is allowed to be shutdown in the current context.
311 * Returns non-zero if the session should be shutdown.
312 */
313int ft_sess_shutdown(struct se_session *se_sess)
314{
315 struct ft_sess *sess = se_sess->fabric_sess_ptr;
316
317 FT_SESS_DBG("port_id %x\n", sess->port_id);
318 return 1;
319}
320
321/*
322 * Remove session and send PRLO.
323 * This is called when the ACL is being deleted or queue depth is changing.
324 */
325void ft_sess_close(struct se_session *se_sess)
326{
327 struct ft_sess *sess = se_sess->fabric_sess_ptr;
328 struct fc_lport *lport;
329 u32 port_id;
330
331 mutex_lock(&ft_lport_lock);
332 lport = sess->tport->lport;
333 port_id = sess->port_id;
334 if (port_id == -1) {
335 mutex_lock(&ft_lport_lock);
336 return;
337 }
338 FT_SESS_DBG("port_id %x\n", port_id);
339 ft_sess_unhash(sess);
340 mutex_unlock(&ft_lport_lock);
341 transport_deregister_session_configfs(se_sess);
342 ft_sess_put(sess);
343 /* XXX Send LOGO or PRLO */
344 synchronize_rcu(); /* let transport deregister happen */
345}
346
347void ft_sess_stop(struct se_session *se_sess, int sess_sleep, int conn_sleep)
348{
349 struct ft_sess *sess = se_sess->fabric_sess_ptr;
350
351 FT_SESS_DBG("port_id %x\n", sess->port_id);
352}
353
354int ft_sess_logged_in(struct se_session *se_sess)
355{
356 struct ft_sess *sess = se_sess->fabric_sess_ptr;
357
358 return sess->port_id != -1;
359}
360
361u32 ft_sess_get_index(struct se_session *se_sess)
362{
363 struct ft_sess *sess = se_sess->fabric_sess_ptr;
364
365 return sess->port_id; /* XXX TBD probably not what is needed */
366}
367
368u32 ft_sess_get_port_name(struct se_session *se_sess,
369 unsigned char *buf, u32 len)
370{
371 struct ft_sess *sess = se_sess->fabric_sess_ptr;
372
373 return ft_format_wwn(buf, len, sess->port_name);
374}
375
376void ft_sess_set_erl0(struct se_session *se_sess)
377{
378 /* XXX TBD called when out of memory */
379}
380
381/*
382 * libfc ops involving sessions.
383 */
384
385static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
386 const struct fc_els_spp *rspp, struct fc_els_spp *spp)
387{
388 struct ft_tport *tport;
389 struct ft_sess *sess;
390 struct ft_node_acl *acl;
391 u32 fcp_parm;
392
393 tport = ft_tport_create(rdata->local_port);
394 if (!tport)
395 return 0; /* not a target for this local port */
396
397 acl = ft_acl_get(tport->tpg, rdata);
398 if (!acl)
399 return 0;
400
401 if (!rspp)
402 goto fill;
403
404 if (rspp->spp_flags & (FC_SPP_OPA_VAL | FC_SPP_RPA_VAL))
405 return FC_SPP_RESP_NO_PA;
406
407 /*
408 * If both target and initiator bits are off, the SPP is invalid.
409 */
410 fcp_parm = ntohl(rspp->spp_params);
411 if (!(fcp_parm & (FCP_SPPF_INIT_FCN | FCP_SPPF_TARG_FCN)))
412 return FC_SPP_RESP_INVL;
413
414 /*
415 * Create session (image pair) only if requested by
416 * EST_IMG_PAIR flag and if the requestor is an initiator.
417 */
418 if (rspp->spp_flags & FC_SPP_EST_IMG_PAIR) {
419 spp->spp_flags |= FC_SPP_EST_IMG_PAIR;
420 if (!(fcp_parm & FCP_SPPF_INIT_FCN))
421 return FC_SPP_RESP_CONF;
422 sess = ft_sess_create(tport, rdata->ids.port_id, acl);
423 if (!sess)
424 return FC_SPP_RESP_RES;
425 if (!sess->params)
426 rdata->prli_count++;
427 sess->params = fcp_parm;
428 sess->port_name = rdata->ids.port_name;
429 sess->max_frame = rdata->maxframe_size;
430
431 /* XXX TBD - clearing actions. unit attn, see 4.10 */
432 }
433
434 /*
435 * OR in our service parameters with other provider (initiator), if any.
436 * TBD XXX - indicate RETRY capability?
437 */
438fill:
439 fcp_parm = ntohl(spp->spp_params);
440 spp->spp_params = htonl(fcp_parm | FCP_SPPF_TARG_FCN);
441 return FC_SPP_RESP_ACK;
442}
443
444/**
445 * tcm_fcp_prli() - Handle incoming or outgoing PRLI for the FCP target
446 * @rdata: remote port private
447 * @spp_len: service parameter page length
448 * @rspp: received service parameter page (NULL for outgoing PRLI)
449 * @spp: response service parameter page
450 *
451 * Returns spp response code.
452 */
453static int ft_prli(struct fc_rport_priv *rdata, u32 spp_len,
454 const struct fc_els_spp *rspp, struct fc_els_spp *spp)
455{
456 int ret;
457
458 mutex_lock(&ft_lport_lock);
459 ret = ft_prli_locked(rdata, spp_len, rspp, spp);
460 mutex_unlock(&ft_lport_lock);
461 FT_SESS_DBG("port_id %x flags %x ret %x\n",
462 rdata->ids.port_id, rspp ? rspp->spp_flags : 0, ret);
463 return ret;
464}
465
466static void ft_sess_rcu_free(struct rcu_head *rcu)
467{
468 struct ft_sess *sess = container_of(rcu, struct ft_sess, rcu);
469
470 transport_deregister_session(sess->se_sess);
471 kfree(sess);
472}
473
474static void ft_sess_free(struct kref *kref)
475{
476 struct ft_sess *sess = container_of(kref, struct ft_sess, kref);
477
478 call_rcu(&sess->rcu, ft_sess_rcu_free);
479}
480
481void ft_sess_put(struct ft_sess *sess)
482{
483 int sess_held = atomic_read(&sess->kref.refcount);
484
485 BUG_ON(!sess_held);
486 kref_put(&sess->kref, ft_sess_free);
487}
488
489static void ft_prlo(struct fc_rport_priv *rdata)
490{
491 struct ft_sess *sess;
492 struct ft_tport *tport;
493
494 mutex_lock(&ft_lport_lock);
495 tport = rcu_dereference(rdata->local_port->prov[FC_TYPE_FCP]);
496 if (!tport) {
497 mutex_unlock(&ft_lport_lock);
498 return;
499 }
500 sess = ft_sess_delete(tport, rdata->ids.port_id);
501 if (!sess) {
502 mutex_unlock(&ft_lport_lock);
503 return;
504 }
505 mutex_unlock(&ft_lport_lock);
506 transport_deregister_session_configfs(sess->se_sess);
507 ft_sess_put(sess); /* release from table */
508 rdata->prli_count--;
509 /* XXX TBD - clearing actions. unit attn, see 4.10 */
510}
511
512/*
513 * Handle incoming FCP request.
514 * Caller has verified that the frame is type FCP.
515 */
516static void ft_recv(struct fc_lport *lport, struct fc_frame *fp)
517{
518 struct ft_sess *sess;
519 u32 sid = fc_frame_sid(fp);
520
521 FT_SESS_DBG("sid %x\n", sid);
522
523 sess = ft_sess_get(lport, sid);
524 if (!sess) {
525 FT_SESS_DBG("sid %x sess lookup failed\n", sid);
526 /* TBD XXX - if FCP_CMND, send PRLO */
527 fc_frame_free(fp);
528 return;
529 }
530 ft_recv_req(sess, fp); /* must do ft_sess_put() */
531}
532
533/*
534 * Provider ops for libfc.
535 */
536struct fc4_prov ft_prov = {
537 .prli = ft_prli,
538 .prlo = ft_prlo,
539 .recv = ft_recv,
540 .module = THIS_MODULE,
541};