aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/target
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/target')
-rw-r--r--drivers/target/Kconfig35
-rw-r--r--drivers/target/Makefile28
-rw-r--r--drivers/target/loopback/Kconfig11
-rw-r--r--drivers/target/loopback/Makefile1
-rw-r--r--drivers/target/loopback/tcm_loop.c1569
-rw-r--r--drivers/target/loopback/tcm_loop.h77
-rw-r--r--drivers/target/target_core_alua.c1991
-rw-r--r--drivers/target/target_core_alua.h126
-rw-r--r--drivers/target/target_core_cdb.c1137
-rw-r--r--drivers/target/target_core_configfs.c3323
-rw-r--r--drivers/target/target_core_device.c1644
-rw-r--r--drivers/target/target_core_fabric_configfs.c1233
-rw-r--r--drivers/target/target_core_fabric_lib.c451
-rw-r--r--drivers/target/target_core_file.c703
-rw-r--r--drivers/target/target_core_file.h50
-rw-r--r--drivers/target/target_core_hba.c172
-rw-r--r--drivers/target/target_core_hba.h7
-rw-r--r--drivers/target/target_core_iblock.c814
-rw-r--r--drivers/target/target_core_iblock.h40
-rw-r--r--drivers/target/target_core_pr.c4252
-rw-r--r--drivers/target/target_core_pr.h67
-rw-r--r--drivers/target/target_core_pscsi.c1473
-rw-r--r--drivers/target/target_core_pscsi.h65
-rw-r--r--drivers/target/target_core_rd.c1091
-rw-r--r--drivers/target/target_core_rd.h71
-rw-r--r--drivers/target/target_core_scdb.c105
-rw-r--r--drivers/target/target_core_scdb.h10
-rw-r--r--drivers/target/target_core_stat.c1810
-rw-r--r--drivers/target/target_core_stat.h8
-rw-r--r--drivers/target/target_core_tmr.c416
-rw-r--r--drivers/target/target_core_tpg.c838
-rw-r--r--drivers/target/target_core_transport.c6184
-rw-r--r--drivers/target/target_core_ua.c332
-rw-r--r--drivers/target/target_core_ua.h36
-rw-r--r--drivers/target/tcm_fc/Kconfig5
-rw-r--r--drivers/target/tcm_fc/Makefile15
-rw-r--r--drivers/target/tcm_fc/tcm_fc.h215
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c716
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c669
-rw-r--r--drivers/target/tcm_fc/tfc_io.c374
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c541
41 files changed, 32705 insertions, 0 deletions
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig
new file mode 100644
index 000000000000..5cb0f0ef6af0
--- /dev/null
+++ b/drivers/target/Kconfig
@@ -0,0 +1,35 @@
1
2menuconfig TARGET_CORE
3 tristate "Generic Target Core Mod (TCM) and ConfigFS Infrastructure"
4 depends on SCSI && BLOCK
5 select CONFIGFS_FS
6 default n
7 help
8 Say Y or M here to enable the TCM Storage Engine and ConfigFS enabled
9 control path for target_core_mod. This includes built-in TCM RAMDISK
10 subsystem logic for virtual LUN 0 access
11
12if TARGET_CORE
13
14config TCM_IBLOCK
15 tristate "TCM/IBLOCK Subsystem Plugin for Linux/BLOCK"
16 help
17 Say Y here to enable the TCM/IBLOCK subsystem plugin for non-buffered
18 access to Linux/Block devices using BIO
19
20config TCM_FILEIO
21 tristate "TCM/FILEIO Subsystem Plugin for Linux/VFS"
22 help
23 Say Y here to enable the TCM/FILEIO subsystem plugin for buffered
24 access to Linux/VFS struct file or struct block_device
25
26config TCM_PSCSI
27 tristate "TCM/pSCSI Subsystem Plugin for Linux/SCSI"
28 help
29 Say Y here to enable the TCM/pSCSI subsystem plugin for non-buffered
30 passthrough access to Linux/SCSI device
31
32source "drivers/target/loopback/Kconfig"
33source "drivers/target/tcm_fc/Kconfig"
34
35endif
diff --git a/drivers/target/Makefile b/drivers/target/Makefile
new file mode 100644
index 000000000000..21df808a992c
--- /dev/null
+++ b/drivers/target/Makefile
@@ -0,0 +1,28 @@
1
2target_core_mod-y := target_core_configfs.o \
3 target_core_device.o \
4 target_core_fabric_configfs.o \
5 target_core_fabric_lib.o \
6 target_core_hba.o \
7 target_core_pr.o \
8 target_core_alua.o \
9 target_core_scdb.o \
10 target_core_tmr.o \
11 target_core_tpg.o \
12 target_core_transport.o \
13 target_core_cdb.o \
14 target_core_ua.o \
15 target_core_rd.o \
16 target_core_stat.o
17
18obj-$(CONFIG_TARGET_CORE) += target_core_mod.o
19
20# Subsystem modules
21obj-$(CONFIG_TCM_IBLOCK) += target_core_iblock.o
22obj-$(CONFIG_TCM_FILEIO) += target_core_file.o
23obj-$(CONFIG_TCM_PSCSI) += target_core_pscsi.o
24
25# Fabric modules
26obj-$(CONFIG_LOOPBACK_TARGET) += loopback/
27
28obj-$(CONFIG_TCM_FC) += tcm_fc/
diff --git a/drivers/target/loopback/Kconfig b/drivers/target/loopback/Kconfig
new file mode 100644
index 000000000000..57dcbc2d711b
--- /dev/null
+++ b/drivers/target/loopback/Kconfig
@@ -0,0 +1,11 @@
1config LOOPBACK_TARGET
2 tristate "TCM Virtual SAS target and Linux/SCSI LDD fabric loopback module"
3 help
4 Say Y here to enable the TCM Virtual SAS target and Linux/SCSI LLD
5 fabric loopback module.
6
7config LOOPBACK_TARGET_CDB_DEBUG
8 bool "TCM loopback fabric module CDB debug code"
9 depends on LOOPBACK_TARGET
10 help
11 Say Y here to enable the TCM loopback fabric module CDB debug code
diff --git a/drivers/target/loopback/Makefile b/drivers/target/loopback/Makefile
new file mode 100644
index 000000000000..6abebdf95659
--- /dev/null
+++ b/drivers/target/loopback/Makefile
@@ -0,0 +1 @@
obj-$(CONFIG_LOOPBACK_TARGET) += tcm_loop.o
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
new file mode 100644
index 000000000000..70c2e7fa6664
--- /dev/null
+++ b/drivers/target/loopback/tcm_loop.c
@@ -0,0 +1,1569 @@
1/*******************************************************************************
2 *
3 * This file contains the Linux/SCSI LLD virtual SCSI initiator driver
4 * for emulated SAS initiator ports
5 *
6 * © Copyright 2011 RisingTide Systems LLC.
7 *
8 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
9 *
10 * Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 ****************************************************************************/
22
23#include <linux/module.h>
24#include <linux/moduleparam.h>
25#include <linux/init.h>
26#include <linux/slab.h>
27#include <linux/types.h>
28#include <linux/configfs.h>
29#include <scsi/scsi.h>
30#include <scsi/scsi_tcq.h>
31#include <scsi/scsi_host.h>
32#include <scsi/scsi_device.h>
33#include <scsi/scsi_cmnd.h>
34#include <scsi/scsi_tcq.h>
35
36#include <target/target_core_base.h>
37#include <target/target_core_transport.h>
38#include <target/target_core_fabric_ops.h>
39#include <target/target_core_fabric_configfs.h>
40#include <target/target_core_fabric_lib.h>
41#include <target/target_core_configfs.h>
42#include <target/target_core_device.h>
43#include <target/target_core_tpg.h>
44#include <target/target_core_tmr.h>
45
46#include "tcm_loop.h"
47
48#define to_tcm_loop_hba(hba) container_of(hba, struct tcm_loop_hba, dev)
49
50/* Local pointer to allocated TCM configfs fabric module */
51static struct target_fabric_configfs *tcm_loop_fabric_configfs;
52
53static struct kmem_cache *tcm_loop_cmd_cache;
54
55static int tcm_loop_hba_no_cnt;
56
57/*
58 * Allocate a tcm_loop cmd descriptor from target_core_mod code
59 *
60 * Can be called from interrupt context in tcm_loop_queuecommand() below
61 */
62static struct se_cmd *tcm_loop_allocate_core_cmd(
63 struct tcm_loop_hba *tl_hba,
64 struct se_portal_group *se_tpg,
65 struct scsi_cmnd *sc)
66{
67 struct se_cmd *se_cmd;
68 struct se_session *se_sess;
69 struct tcm_loop_nexus *tl_nexus = tl_hba->tl_nexus;
70 struct tcm_loop_cmd *tl_cmd;
71 int sam_task_attr;
72
73 if (!tl_nexus) {
74 scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
75 " does not exist\n");
76 set_host_byte(sc, DID_ERROR);
77 return NULL;
78 }
79 se_sess = tl_nexus->se_sess;
80
81 tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
82 if (!tl_cmd) {
83 printk(KERN_ERR "Unable to allocate struct tcm_loop_cmd\n");
84 set_host_byte(sc, DID_ERROR);
85 return NULL;
86 }
87 se_cmd = &tl_cmd->tl_se_cmd;
88 /*
89 * Save the pointer to struct scsi_cmnd *sc
90 */
91 tl_cmd->sc = sc;
92 /*
93 * Locate the SAM Task Attr from struct scsi_cmnd *
94 */
95 if (sc->device->tagged_supported) {
96 switch (sc->tag) {
97 case HEAD_OF_QUEUE_TAG:
98 sam_task_attr = MSG_HEAD_TAG;
99 break;
100 case ORDERED_QUEUE_TAG:
101 sam_task_attr = MSG_ORDERED_TAG;
102 break;
103 default:
104 sam_task_attr = MSG_SIMPLE_TAG;
105 break;
106 }
107 } else
108 sam_task_attr = MSG_SIMPLE_TAG;
109
110 /*
111 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
112 */
113 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
114 scsi_bufflen(sc), sc->sc_data_direction, sam_task_attr,
115 &tl_cmd->tl_sense_buf[0]);
116
117 /*
118 * Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi
119 */
120 if (scsi_bidi_cmnd(sc))
121 T_TASK(se_cmd)->t_tasks_bidi = 1;
122 /*
123 * Locate the struct se_lun pointer and attach it to struct se_cmd
124 */
125 if (transport_get_lun_for_cmd(se_cmd, NULL, tl_cmd->sc->device->lun) < 0) {
126 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
127 set_host_byte(sc, DID_NO_CONNECT);
128 return NULL;
129 }
130
131 transport_device_setup_cmd(se_cmd);
132 return se_cmd;
133}
134
135/*
136 * Called by struct target_core_fabric_ops->new_cmd_map()
137 *
138 * Always called in process context. A non zero return value
139 * here will signal to handle an exception based on the return code.
140 */
141static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
142{
143 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
144 struct tcm_loop_cmd, tl_se_cmd);
145 struct scsi_cmnd *sc = tl_cmd->sc;
146 void *mem_ptr, *mem_bidi_ptr = NULL;
147 u32 sg_no_bidi = 0;
148 int ret;
149 /*
150 * Allocate the necessary tasks to complete the received CDB+data
151 */
152 ret = transport_generic_allocate_tasks(se_cmd, tl_cmd->sc->cmnd);
153 if (ret == -1) {
154 /* Out of Resources */
155 return PYX_TRANSPORT_LU_COMM_FAILURE;
156 } else if (ret == -2) {
157 /*
158 * Handle case for SAM_STAT_RESERVATION_CONFLICT
159 */
160 if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
161 return PYX_TRANSPORT_RESERVATION_CONFLICT;
162 /*
163 * Otherwise, return SAM_STAT_CHECK_CONDITION and return
164 * sense data.
165 */
166 return PYX_TRANSPORT_USE_SENSE_REASON;
167 }
168 /*
169 * Setup the struct scatterlist memory from the received
170 * struct scsi_cmnd.
171 */
172 if (scsi_sg_count(sc)) {
173 se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM;
174 mem_ptr = (void *)scsi_sglist(sc);
175 /*
176 * For BIDI commands, pass in the extra READ buffer
177 * to transport_generic_map_mem_to_cmd() below..
178 */
179 if (T_TASK(se_cmd)->t_tasks_bidi) {
180 struct scsi_data_buffer *sdb = scsi_in(sc);
181
182 mem_bidi_ptr = (void *)sdb->table.sgl;
183 sg_no_bidi = sdb->table.nents;
184 }
185 } else {
186 /*
187 * Used for DMA_NONE
188 */
189 mem_ptr = NULL;
190 }
191 /*
192 * Map the SG memory into struct se_mem->page linked list using the same
193 * physical memory at sg->page_link.
194 */
195 ret = transport_generic_map_mem_to_cmd(se_cmd, mem_ptr,
196 scsi_sg_count(sc), mem_bidi_ptr, sg_no_bidi);
197 if (ret < 0)
198 return PYX_TRANSPORT_LU_COMM_FAILURE;
199
200 return 0;
201}
202
203/*
204 * Called from struct target_core_fabric_ops->check_stop_free()
205 */
206static void tcm_loop_check_stop_free(struct se_cmd *se_cmd)
207{
208 /*
209 * Do not release struct se_cmd's containing a valid TMR
210 * pointer. These will be released directly in tcm_loop_device_reset()
211 * with transport_generic_free_cmd().
212 */
213 if (se_cmd->se_tmr_req)
214 return;
215 /*
216 * Release the struct se_cmd, which will make a callback to release
217 * struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd()
218 */
219 transport_generic_free_cmd(se_cmd, 0, 1, 0);
220}
221
222/*
223 * Called from struct target_core_fabric_ops->release_cmd_to_pool()
224 */
225static void tcm_loop_deallocate_core_cmd(struct se_cmd *se_cmd)
226{
227 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
228 struct tcm_loop_cmd, tl_se_cmd);
229
230 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
231}
232
233static int tcm_loop_proc_info(struct Scsi_Host *host, char *buffer,
234 char **start, off_t offset,
235 int length, int inout)
236{
237 return sprintf(buffer, "tcm_loop_proc_info()\n");
238}
239
240static int tcm_loop_driver_probe(struct device *);
241static int tcm_loop_driver_remove(struct device *);
242
243static int pseudo_lld_bus_match(struct device *dev,
244 struct device_driver *dev_driver)
245{
246 return 1;
247}
248
249static struct bus_type tcm_loop_lld_bus = {
250 .name = "tcm_loop_bus",
251 .match = pseudo_lld_bus_match,
252 .probe = tcm_loop_driver_probe,
253 .remove = tcm_loop_driver_remove,
254};
255
256static struct device_driver tcm_loop_driverfs = {
257 .name = "tcm_loop",
258 .bus = &tcm_loop_lld_bus,
259};
260/*
261 * Used with root_device_register() in tcm_loop_alloc_core_bus() below
262 */
263struct device *tcm_loop_primary;
264
265/*
266 * Copied from drivers/scsi/libfc/fc_fcp.c:fc_change_queue_depth() and
267 * drivers/scsi/libiscsi.c:iscsi_change_queue_depth()
268 */
269static int tcm_loop_change_queue_depth(
270 struct scsi_device *sdev,
271 int depth,
272 int reason)
273{
274 switch (reason) {
275 case SCSI_QDEPTH_DEFAULT:
276 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
277 break;
278 case SCSI_QDEPTH_QFULL:
279 scsi_track_queue_full(sdev, depth);
280 break;
281 case SCSI_QDEPTH_RAMP_UP:
282 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
283 break;
284 default:
285 return -EOPNOTSUPP;
286 }
287 return sdev->queue_depth;
288}
289
290/*
291 * Main entry point from struct scsi_host_template for incoming SCSI CDB+Data
292 * from Linux/SCSI subsystem for SCSI low level device drivers (LLDs)
293 */
294static int tcm_loop_queuecommand(
295 struct Scsi_Host *sh,
296 struct scsi_cmnd *sc)
297{
298 struct se_cmd *se_cmd;
299 struct se_portal_group *se_tpg;
300 struct tcm_loop_hba *tl_hba;
301 struct tcm_loop_tpg *tl_tpg;
302
303 TL_CDB_DEBUG("tcm_loop_queuecommand() %d:%d:%d:%d got CDB: 0x%02x"
304 " scsi_buf_len: %u\n", sc->device->host->host_no,
305 sc->device->id, sc->device->channel, sc->device->lun,
306 sc->cmnd[0], scsi_bufflen(sc));
307 /*
308 * Locate the tcm_loop_hba_t pointer
309 */
310 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
311 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
312 se_tpg = &tl_tpg->tl_se_tpg;
313 /*
314 * Determine the SAM Task Attribute and allocate tl_cmd and
315 * tl_cmd->tl_se_cmd from TCM infrastructure
316 */
317 se_cmd = tcm_loop_allocate_core_cmd(tl_hba, se_tpg, sc);
318 if (!se_cmd) {
319 sc->scsi_done(sc);
320 return 0;
321 }
322 /*
323 * Queue up the newly allocated to be processed in TCM thread context.
324 */
325 transport_generic_handle_cdb_map(se_cmd);
326 return 0;
327}
328
329/*
330 * Called from SCSI EH process context to issue a LUN_RESET TMR
331 * to struct scsi_device
332 */
333static int tcm_loop_device_reset(struct scsi_cmnd *sc)
334{
335 struct se_cmd *se_cmd = NULL;
336 struct se_portal_group *se_tpg;
337 struct se_session *se_sess;
338 struct tcm_loop_cmd *tl_cmd = NULL;
339 struct tcm_loop_hba *tl_hba;
340 struct tcm_loop_nexus *tl_nexus;
341 struct tcm_loop_tmr *tl_tmr = NULL;
342 struct tcm_loop_tpg *tl_tpg;
343 int ret = FAILED;
344 /*
345 * Locate the tcm_loop_hba_t pointer
346 */
347 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
348 /*
349 * Locate the tl_nexus and se_sess pointers
350 */
351 tl_nexus = tl_hba->tl_nexus;
352 if (!tl_nexus) {
353 printk(KERN_ERR "Unable to perform device reset without"
354 " active I_T Nexus\n");
355 return FAILED;
356 }
357 se_sess = tl_nexus->se_sess;
358 /*
359 * Locate the tl_tpg and se_tpg pointers from TargetID in sc->device->id
360 */
361 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
362 se_tpg = &tl_tpg->tl_se_tpg;
363
364 tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
365 if (!tl_cmd) {
366 printk(KERN_ERR "Unable to allocate memory for tl_cmd\n");
367 return FAILED;
368 }
369
370 tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL);
371 if (!tl_tmr) {
372 printk(KERN_ERR "Unable to allocate memory for tl_tmr\n");
373 goto release;
374 }
375 init_waitqueue_head(&tl_tmr->tl_tmr_wait);
376
377 se_cmd = &tl_cmd->tl_se_cmd;
378 /*
379 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
380 */
381 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0,
382 DMA_NONE, MSG_SIMPLE_TAG,
383 &tl_cmd->tl_sense_buf[0]);
384 /*
385 * Allocate the LUN_RESET TMR
386 */
387 se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, (void *)tl_tmr,
388 TMR_LUN_RESET);
389 if (IS_ERR(se_cmd->se_tmr_req))
390 goto release;
391 /*
392 * Locate the underlying TCM struct se_lun from sc->device->lun
393 */
394 if (transport_get_lun_for_tmr(se_cmd, sc->device->lun) < 0)
395 goto release;
396 /*
397 * Queue the TMR to TCM Core and sleep waiting for tcm_loop_queue_tm_rsp()
398 * to wake us up.
399 */
400 transport_generic_handle_tmr(se_cmd);
401 wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete));
402 /*
403 * The TMR LUN_RESET has completed, check the response status and
404 * then release allocations.
405 */
406 ret = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ?
407 SUCCESS : FAILED;
408release:
409 if (se_cmd)
410 transport_generic_free_cmd(se_cmd, 1, 1, 0);
411 else
412 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
413 kfree(tl_tmr);
414 return ret;
415}
416
417static int tcm_loop_slave_alloc(struct scsi_device *sd)
418{
419 set_bit(QUEUE_FLAG_BIDI, &sd->request_queue->queue_flags);
420 return 0;
421}
422
423static int tcm_loop_slave_configure(struct scsi_device *sd)
424{
425 return 0;
426}
427
428static struct scsi_host_template tcm_loop_driver_template = {
429 .proc_info = tcm_loop_proc_info,
430 .proc_name = "tcm_loopback",
431 .name = "TCM_Loopback",
432 .queuecommand = tcm_loop_queuecommand,
433 .change_queue_depth = tcm_loop_change_queue_depth,
434 .eh_device_reset_handler = tcm_loop_device_reset,
435 .can_queue = TL_SCSI_CAN_QUEUE,
436 .this_id = -1,
437 .sg_tablesize = TL_SCSI_SG_TABLESIZE,
438 .cmd_per_lun = TL_SCSI_CMD_PER_LUN,
439 .max_sectors = TL_SCSI_MAX_SECTORS,
440 .use_clustering = DISABLE_CLUSTERING,
441 .slave_alloc = tcm_loop_slave_alloc,
442 .slave_configure = tcm_loop_slave_configure,
443 .module = THIS_MODULE,
444};
445
446static int tcm_loop_driver_probe(struct device *dev)
447{
448 struct tcm_loop_hba *tl_hba;
449 struct Scsi_Host *sh;
450 int error;
451
452 tl_hba = to_tcm_loop_hba(dev);
453
454 sh = scsi_host_alloc(&tcm_loop_driver_template,
455 sizeof(struct tcm_loop_hba));
456 if (!sh) {
457 printk(KERN_ERR "Unable to allocate struct scsi_host\n");
458 return -ENODEV;
459 }
460 tl_hba->sh = sh;
461
462 /*
463 * Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata
464 */
465 *((struct tcm_loop_hba **)sh->hostdata) = tl_hba;
466 /*
467 * Setup single ID, Channel and LUN for now..
468 */
469 sh->max_id = 2;
470 sh->max_lun = 0;
471 sh->max_channel = 0;
472 sh->max_cmd_len = TL_SCSI_MAX_CMD_LEN;
473
474 error = scsi_add_host(sh, &tl_hba->dev);
475 if (error) {
476 printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
477 scsi_host_put(sh);
478 return -ENODEV;
479 }
480 return 0;
481}
482
483static int tcm_loop_driver_remove(struct device *dev)
484{
485 struct tcm_loop_hba *tl_hba;
486 struct Scsi_Host *sh;
487
488 tl_hba = to_tcm_loop_hba(dev);
489 sh = tl_hba->sh;
490
491 scsi_remove_host(sh);
492 scsi_host_put(sh);
493 return 0;
494}
495
496static void tcm_loop_release_adapter(struct device *dev)
497{
498 struct tcm_loop_hba *tl_hba = to_tcm_loop_hba(dev);
499
500 kfree(tl_hba);
501}
502
503/*
504 * Called from tcm_loop_make_scsi_hba() in tcm_loop_configfs.c
505 */
506static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host_id)
507{
508 int ret;
509
510 tl_hba->dev.bus = &tcm_loop_lld_bus;
511 tl_hba->dev.parent = tcm_loop_primary;
512 tl_hba->dev.release = &tcm_loop_release_adapter;
513 dev_set_name(&tl_hba->dev, "tcm_loop_adapter_%d", tcm_loop_host_id);
514
515 ret = device_register(&tl_hba->dev);
516 if (ret) {
517 printk(KERN_ERR "device_register() failed for"
518 " tl_hba->dev: %d\n", ret);
519 return -ENODEV;
520 }
521
522 return 0;
523}
524
525/*
526 * Called from tcm_loop_fabric_init() in tcl_loop_fabric.c to load the emulated
527 * tcm_loop SCSI bus.
528 */
529static int tcm_loop_alloc_core_bus(void)
530{
531 int ret;
532
533 tcm_loop_primary = root_device_register("tcm_loop_0");
534 if (IS_ERR(tcm_loop_primary)) {
535 printk(KERN_ERR "Unable to allocate tcm_loop_primary\n");
536 return PTR_ERR(tcm_loop_primary);
537 }
538
539 ret = bus_register(&tcm_loop_lld_bus);
540 if (ret) {
541 printk(KERN_ERR "bus_register() failed for tcm_loop_lld_bus\n");
542 goto dev_unreg;
543 }
544
545 ret = driver_register(&tcm_loop_driverfs);
546 if (ret) {
547 printk(KERN_ERR "driver_register() failed for"
548 "tcm_loop_driverfs\n");
549 goto bus_unreg;
550 }
551
552 printk(KERN_INFO "Initialized TCM Loop Core Bus\n");
553 return ret;
554
555bus_unreg:
556 bus_unregister(&tcm_loop_lld_bus);
557dev_unreg:
558 root_device_unregister(tcm_loop_primary);
559 return ret;
560}
561
562static void tcm_loop_release_core_bus(void)
563{
564 driver_unregister(&tcm_loop_driverfs);
565 bus_unregister(&tcm_loop_lld_bus);
566 root_device_unregister(tcm_loop_primary);
567
568 printk(KERN_INFO "Releasing TCM Loop Core BUS\n");
569}
570
571static char *tcm_loop_get_fabric_name(void)
572{
573 return "loopback";
574}
575
576static u8 tcm_loop_get_fabric_proto_ident(struct se_portal_group *se_tpg)
577{
578 struct tcm_loop_tpg *tl_tpg =
579 (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
580 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
581 /*
582 * tl_proto_id is set at tcm_loop_configfs.c:tcm_loop_make_scsi_hba()
583 * time based on the protocol dependent prefix of the passed configfs group.
584 *
585 * Based upon tl_proto_id, TCM_Loop emulates the requested fabric
586 * ProtocolID using target_core_fabric_lib.c symbols.
587 */
588 switch (tl_hba->tl_proto_id) {
589 case SCSI_PROTOCOL_SAS:
590 return sas_get_fabric_proto_ident(se_tpg);
591 case SCSI_PROTOCOL_FCP:
592 return fc_get_fabric_proto_ident(se_tpg);
593 case SCSI_PROTOCOL_ISCSI:
594 return iscsi_get_fabric_proto_ident(se_tpg);
595 default:
596 printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using"
597 " SAS emulation\n", tl_hba->tl_proto_id);
598 break;
599 }
600
601 return sas_get_fabric_proto_ident(se_tpg);
602}
603
604static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
605{
606 struct tcm_loop_tpg *tl_tpg =
607 (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
608 /*
609 * Return the passed NAA identifier for the SAS Target Port
610 */
611 return &tl_tpg->tl_hba->tl_wwn_address[0];
612}
613
614static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg)
615{
616 struct tcm_loop_tpg *tl_tpg =
617 (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
618 /*
619 * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83
620 * to represent the SCSI Target Port.
621 */
622 return tl_tpg->tl_tpgt;
623}
624
625static u32 tcm_loop_get_default_depth(struct se_portal_group *se_tpg)
626{
627 return 1;
628}
629
630static u32 tcm_loop_get_pr_transport_id(
631 struct se_portal_group *se_tpg,
632 struct se_node_acl *se_nacl,
633 struct t10_pr_registration *pr_reg,
634 int *format_code,
635 unsigned char *buf)
636{
637 struct tcm_loop_tpg *tl_tpg =
638 (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
639 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
640
641 switch (tl_hba->tl_proto_id) {
642 case SCSI_PROTOCOL_SAS:
643 return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
644 format_code, buf);
645 case SCSI_PROTOCOL_FCP:
646 return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
647 format_code, buf);
648 case SCSI_PROTOCOL_ISCSI:
649 return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
650 format_code, buf);
651 default:
652 printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using"
653 " SAS emulation\n", tl_hba->tl_proto_id);
654 break;
655 }
656
657 return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
658 format_code, buf);
659}
660
661static u32 tcm_loop_get_pr_transport_id_len(
662 struct se_portal_group *se_tpg,
663 struct se_node_acl *se_nacl,
664 struct t10_pr_registration *pr_reg,
665 int *format_code)
666{
667 struct tcm_loop_tpg *tl_tpg =
668 (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
669 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
670
671 switch (tl_hba->tl_proto_id) {
672 case SCSI_PROTOCOL_SAS:
673 return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
674 format_code);
675 case SCSI_PROTOCOL_FCP:
676 return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
677 format_code);
678 case SCSI_PROTOCOL_ISCSI:
679 return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
680 format_code);
681 default:
682 printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using"
683 " SAS emulation\n", tl_hba->tl_proto_id);
684 break;
685 }
686
687 return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
688 format_code);
689}
690
691/*
692 * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above
693 * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations.
694 */
695static char *tcm_loop_parse_pr_out_transport_id(
696 struct se_portal_group *se_tpg,
697 const char *buf,
698 u32 *out_tid_len,
699 char **port_nexus_ptr)
700{
701 struct tcm_loop_tpg *tl_tpg =
702 (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
703 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
704
705 switch (tl_hba->tl_proto_id) {
706 case SCSI_PROTOCOL_SAS:
707 return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
708 port_nexus_ptr);
709 case SCSI_PROTOCOL_FCP:
710 return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
711 port_nexus_ptr);
712 case SCSI_PROTOCOL_ISCSI:
713 return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
714 port_nexus_ptr);
715 default:
716 printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using"
717 " SAS emulation\n", tl_hba->tl_proto_id);
718 break;
719 }
720
721 return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
722 port_nexus_ptr);
723}
724
725/*
726 * Returning (1) here allows for target_core_mod struct se_node_acl to be generated
727 * based upon the incoming fabric dependent SCSI Initiator Port
728 */
729static int tcm_loop_check_demo_mode(struct se_portal_group *se_tpg)
730{
731 return 1;
732}
733
734static int tcm_loop_check_demo_mode_cache(struct se_portal_group *se_tpg)
735{
736 return 0;
737}
738
739/*
740 * Allow I_T Nexus full READ-WRITE access without explict Initiator Node ACLs for
741 * local virtual Linux/SCSI LLD passthrough into VM hypervisor guest
742 */
743static int tcm_loop_check_demo_mode_write_protect(struct se_portal_group *se_tpg)
744{
745 return 0;
746}
747
748/*
749 * Because TCM_Loop does not use explict ACLs and MappedLUNs, this will
750 * never be called for TCM_Loop by target_core_fabric_configfs.c code.
751 * It has been added here as a nop for target_fabric_tf_ops_check()
752 */
753static int tcm_loop_check_prod_mode_write_protect(struct se_portal_group *se_tpg)
754{
755 return 0;
756}
757
758static struct se_node_acl *tcm_loop_tpg_alloc_fabric_acl(
759 struct se_portal_group *se_tpg)
760{
761 struct tcm_loop_nacl *tl_nacl;
762
763 tl_nacl = kzalloc(sizeof(struct tcm_loop_nacl), GFP_KERNEL);
764 if (!tl_nacl) {
765 printk(KERN_ERR "Unable to allocate struct tcm_loop_nacl\n");
766 return NULL;
767 }
768
769 return &tl_nacl->se_node_acl;
770}
771
772static void tcm_loop_tpg_release_fabric_acl(
773 struct se_portal_group *se_tpg,
774 struct se_node_acl *se_nacl)
775{
776 struct tcm_loop_nacl *tl_nacl = container_of(se_nacl,
777 struct tcm_loop_nacl, se_node_acl);
778
779 kfree(tl_nacl);
780}
781
782static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg)
783{
784 return 1;
785}
786
787static void tcm_loop_new_cmd_failure(struct se_cmd *se_cmd)
788{
789 /*
790 * Since TCM_loop is already passing struct scatterlist data from
791 * struct scsi_cmnd, no more Linux/SCSI failure dependent state need
792 * to be handled here.
793 */
794 return;
795}
796
797static int tcm_loop_is_state_remove(struct se_cmd *se_cmd)
798{
799 /*
800 * Assume struct scsi_cmnd is not in remove state..
801 */
802 return 0;
803}
804
805static int tcm_loop_sess_logged_in(struct se_session *se_sess)
806{
807 /*
808 * Assume that TL Nexus is always active
809 */
810 return 1;
811}
812
813static u32 tcm_loop_sess_get_index(struct se_session *se_sess)
814{
815 return 1;
816}
817
818static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl)
819{
820 return;
821}
822
823static u32 tcm_loop_get_task_tag(struct se_cmd *se_cmd)
824{
825 return 1;
826}
827
828static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd)
829{
830 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
831 struct tcm_loop_cmd, tl_se_cmd);
832
833 return tl_cmd->sc_cmd_state;
834}
835
836static int tcm_loop_shutdown_session(struct se_session *se_sess)
837{
838 return 0;
839}
840
841static void tcm_loop_close_session(struct se_session *se_sess)
842{
843 return;
844};
845
846static void tcm_loop_stop_session(
847 struct se_session *se_sess,
848 int sess_sleep,
849 int conn_sleep)
850{
851 return;
852}
853
854static void tcm_loop_fall_back_to_erl0(struct se_session *se_sess)
855{
856 return;
857}
858
859static int tcm_loop_write_pending(struct se_cmd *se_cmd)
860{
861 /*
862 * Since Linux/SCSI has already sent down a struct scsi_cmnd
863 * sc->sc_data_direction of DMA_TO_DEVICE with struct scatterlist array
864 * memory, and memory has already been mapped to struct se_cmd->t_mem_list
865 * format with transport_generic_map_mem_to_cmd().
866 *
867 * We now tell TCM to add this WRITE CDB directly into the TCM storage
868 * object execution queue.
869 */
870 transport_generic_process_write(se_cmd);
871 return 0;
872}
873
874static int tcm_loop_write_pending_status(struct se_cmd *se_cmd)
875{
876 return 0;
877}
878
879static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
880{
881 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
882 struct tcm_loop_cmd, tl_se_cmd);
883 struct scsi_cmnd *sc = tl_cmd->sc;
884
885 TL_CDB_DEBUG("tcm_loop_queue_data_in() called for scsi_cmnd: %p"
886 " cdb: 0x%02x\n", sc, sc->cmnd[0]);
887
888 sc->result = SAM_STAT_GOOD;
889 set_host_byte(sc, DID_OK);
890 sc->scsi_done(sc);
891 return 0;
892}
893
894static int tcm_loop_queue_status(struct se_cmd *se_cmd)
895{
896 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
897 struct tcm_loop_cmd, tl_se_cmd);
898 struct scsi_cmnd *sc = tl_cmd->sc;
899
900 TL_CDB_DEBUG("tcm_loop_queue_status() called for scsi_cmnd: %p"
901 " cdb: 0x%02x\n", sc, sc->cmnd[0]);
902
903 if (se_cmd->sense_buffer &&
904 ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
905 (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
906
907 memcpy((void *)sc->sense_buffer, (void *)se_cmd->sense_buffer,
908 SCSI_SENSE_BUFFERSIZE);
909 sc->result = SAM_STAT_CHECK_CONDITION;
910 set_driver_byte(sc, DRIVER_SENSE);
911 } else
912 sc->result = se_cmd->scsi_status;
913
914 set_host_byte(sc, DID_OK);
915 sc->scsi_done(sc);
916 return 0;
917}
918
919static int tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
920{
921 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
922 struct tcm_loop_tmr *tl_tmr = se_tmr->fabric_tmr_ptr;
923 /*
924 * The SCSI EH thread will be sleeping on se_tmr->tl_tmr_wait, go ahead
925 * and wake up the wait_queue_head_t in tcm_loop_device_reset()
926 */
927 atomic_set(&tl_tmr->tmr_complete, 1);
928 wake_up(&tl_tmr->tl_tmr_wait);
929 return 0;
930}
931
932static u16 tcm_loop_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)
933{
934 return 0;
935}
936
937static u16 tcm_loop_get_fabric_sense_len(void)
938{
939 return 0;
940}
941
942static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
943{
944 switch (tl_hba->tl_proto_id) {
945 case SCSI_PROTOCOL_SAS:
946 return "SAS";
947 case SCSI_PROTOCOL_FCP:
948 return "FCP";
949 case SCSI_PROTOCOL_ISCSI:
950 return "iSCSI";
951 default:
952 break;
953 }
954
955 return "Unknown";
956}
957
958/* Start items for tcm_loop_port_cit */
959
960static int tcm_loop_port_link(
961 struct se_portal_group *se_tpg,
962 struct se_lun *lun)
963{
964 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
965 struct tcm_loop_tpg, tl_se_tpg);
966 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
967
968 atomic_inc(&tl_tpg->tl_tpg_port_count);
969 smp_mb__after_atomic_inc();
970 /*
971 * Add Linux/SCSI struct scsi_device by HCTL
972 */
973 scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun);
974
975 printk(KERN_INFO "TCM_Loop_ConfigFS: Port Link Successful\n");
976 return 0;
977}
978
979static void tcm_loop_port_unlink(
980 struct se_portal_group *se_tpg,
981 struct se_lun *se_lun)
982{
983 struct scsi_device *sd;
984 struct tcm_loop_hba *tl_hba;
985 struct tcm_loop_tpg *tl_tpg;
986
987 tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
988 tl_hba = tl_tpg->tl_hba;
989
990 sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt,
991 se_lun->unpacked_lun);
992 if (!sd) {
993 printk(KERN_ERR "Unable to locate struct scsi_device for %d:%d:"
994 "%d\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
995 return;
996 }
997 /*
998 * Remove Linux/SCSI struct scsi_device by HCTL
999 */
1000 scsi_remove_device(sd);
1001 scsi_device_put(sd);
1002
1003 atomic_dec(&tl_tpg->tl_tpg_port_count);
1004 smp_mb__after_atomic_dec();
1005
1006 printk(KERN_INFO "TCM_Loop_ConfigFS: Port Unlink Successful\n");
1007}
1008
1009/* End items for tcm_loop_port_cit */
1010
1011/* Start items for tcm_loop_nexus_cit */
1012
1013static int tcm_loop_make_nexus(
1014 struct tcm_loop_tpg *tl_tpg,
1015 const char *name)
1016{
1017 struct se_portal_group *se_tpg;
1018 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
1019 struct tcm_loop_nexus *tl_nexus;
1020 int ret = -ENOMEM;
1021
1022 if (tl_tpg->tl_hba->tl_nexus) {
1023 printk(KERN_INFO "tl_tpg->tl_hba->tl_nexus already exists\n");
1024 return -EEXIST;
1025 }
1026 se_tpg = &tl_tpg->tl_se_tpg;
1027
1028 tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL);
1029 if (!tl_nexus) {
1030 printk(KERN_ERR "Unable to allocate struct tcm_loop_nexus\n");
1031 return -ENOMEM;
1032 }
1033 /*
1034 * Initialize the struct se_session pointer
1035 */
1036 tl_nexus->se_sess = transport_init_session();
1037 if (IS_ERR(tl_nexus->se_sess)) {
1038 ret = PTR_ERR(tl_nexus->se_sess);
1039 goto out;
1040 }
1041 /*
1042 * Since we are running in 'demo mode' this call with generate a
1043 * struct se_node_acl for the tcm_loop struct se_portal_group with the SCSI
1044 * Initiator port name of the passed configfs group 'name'.
1045 */
1046 tl_nexus->se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
1047 se_tpg, (unsigned char *)name);
1048 if (!tl_nexus->se_sess->se_node_acl) {
1049 transport_free_session(tl_nexus->se_sess);
1050 goto out;
1051 }
1052 /*
1053 * Now, register the SAS I_T Nexus as active with the call to
1054 * transport_register_session()
1055 */
1056 __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
1057 tl_nexus->se_sess, (void *)tl_nexus);
1058 tl_tpg->tl_hba->tl_nexus = tl_nexus;
1059 printk(KERN_INFO "TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
1060 " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
1061 name);
1062 return 0;
1063
1064out:
1065 kfree(tl_nexus);
1066 return ret;
1067}
1068
1069static int tcm_loop_drop_nexus(
1070 struct tcm_loop_tpg *tpg)
1071{
1072 struct se_session *se_sess;
1073 struct tcm_loop_nexus *tl_nexus;
1074 struct tcm_loop_hba *tl_hba = tpg->tl_hba;
1075
1076 tl_nexus = tpg->tl_hba->tl_nexus;
1077 if (!tl_nexus)
1078 return -ENODEV;
1079
1080 se_sess = tl_nexus->se_sess;
1081 if (!se_sess)
1082 return -ENODEV;
1083
1084 if (atomic_read(&tpg->tl_tpg_port_count)) {
1085 printk(KERN_ERR "Unable to remove TCM_Loop I_T Nexus with"
1086 " active TPG port count: %d\n",
1087 atomic_read(&tpg->tl_tpg_port_count));
1088 return -EPERM;
1089 }
1090
1091 printk(KERN_INFO "TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
1092 " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
1093 tl_nexus->se_sess->se_node_acl->initiatorname);
1094 /*
1095 * Release the SCSI I_T Nexus to the emulated SAS Target Port
1096 */
1097 transport_deregister_session(tl_nexus->se_sess);
1098 tpg->tl_hba->tl_nexus = NULL;
1099 kfree(tl_nexus);
1100 return 0;
1101}
1102
1103/* End items for tcm_loop_nexus_cit */
1104
1105static ssize_t tcm_loop_tpg_show_nexus(
1106 struct se_portal_group *se_tpg,
1107 char *page)
1108{
1109 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
1110 struct tcm_loop_tpg, tl_se_tpg);
1111 struct tcm_loop_nexus *tl_nexus;
1112 ssize_t ret;
1113
1114 tl_nexus = tl_tpg->tl_hba->tl_nexus;
1115 if (!tl_nexus)
1116 return -ENODEV;
1117
1118 ret = snprintf(page, PAGE_SIZE, "%s\n",
1119 tl_nexus->se_sess->se_node_acl->initiatorname);
1120
1121 return ret;
1122}
1123
1124static ssize_t tcm_loop_tpg_store_nexus(
1125 struct se_portal_group *se_tpg,
1126 const char *page,
1127 size_t count)
1128{
1129 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
1130 struct tcm_loop_tpg, tl_se_tpg);
1131 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
1132 unsigned char i_port[TL_WWN_ADDR_LEN], *ptr, *port_ptr;
1133 int ret;
1134 /*
1135 * Shutdown the active I_T nexus if 'NULL' is passed..
1136 */
1137 if (!strncmp(page, "NULL", 4)) {
1138 ret = tcm_loop_drop_nexus(tl_tpg);
1139 return (!ret) ? count : ret;
1140 }
1141 /*
1142 * Otherwise make sure the passed virtual Initiator port WWN matches
1143 * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call
1144 * tcm_loop_make_nexus()
1145 */
1146 if (strlen(page) >= TL_WWN_ADDR_LEN) {
1147 printk(KERN_ERR "Emulated NAA Sas Address: %s, exceeds"
1148 " max: %d\n", page, TL_WWN_ADDR_LEN);
1149 return -EINVAL;
1150 }
1151 snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page);
1152
1153 ptr = strstr(i_port, "naa.");
1154 if (ptr) {
1155 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) {
1156 printk(KERN_ERR "Passed SAS Initiator Port %s does not"
1157 " match target port protoid: %s\n", i_port,
1158 tcm_loop_dump_proto_id(tl_hba));
1159 return -EINVAL;
1160 }
1161 port_ptr = &i_port[0];
1162 goto check_newline;
1163 }
1164 ptr = strstr(i_port, "fc.");
1165 if (ptr) {
1166 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) {
1167 printk(KERN_ERR "Passed FCP Initiator Port %s does not"
1168 " match target port protoid: %s\n", i_port,
1169 tcm_loop_dump_proto_id(tl_hba));
1170 return -EINVAL;
1171 }
1172 port_ptr = &i_port[3]; /* Skip over "fc." */
1173 goto check_newline;
1174 }
1175 ptr = strstr(i_port, "iqn.");
1176 if (ptr) {
1177 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) {
1178 printk(KERN_ERR "Passed iSCSI Initiator Port %s does not"
1179 " match target port protoid: %s\n", i_port,
1180 tcm_loop_dump_proto_id(tl_hba));
1181 return -EINVAL;
1182 }
1183 port_ptr = &i_port[0];
1184 goto check_newline;
1185 }
1186 printk(KERN_ERR "Unable to locate prefix for emulated Initiator Port:"
1187 " %s\n", i_port);
1188 return -EINVAL;
1189 /*
1190 * Clear any trailing newline for the NAA WWN
1191 */
1192check_newline:
1193 if (i_port[strlen(i_port)-1] == '\n')
1194 i_port[strlen(i_port)-1] = '\0';
1195
1196 ret = tcm_loop_make_nexus(tl_tpg, port_ptr);
1197 if (ret < 0)
1198 return ret;
1199
1200 return count;
1201}
1202
1203TF_TPG_BASE_ATTR(tcm_loop, nexus, S_IRUGO | S_IWUSR);
1204
1205static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
1206 &tcm_loop_tpg_nexus.attr,
1207 NULL,
1208};
1209
1210/* Start items for tcm_loop_naa_cit */
1211
1212struct se_portal_group *tcm_loop_make_naa_tpg(
1213 struct se_wwn *wwn,
1214 struct config_group *group,
1215 const char *name)
1216{
1217 struct tcm_loop_hba *tl_hba = container_of(wwn,
1218 struct tcm_loop_hba, tl_hba_wwn);
1219 struct tcm_loop_tpg *tl_tpg;
1220 char *tpgt_str, *end_ptr;
1221 int ret;
1222 unsigned short int tpgt;
1223
1224 tpgt_str = strstr(name, "tpgt_");
1225 if (!tpgt_str) {
1226 printk(KERN_ERR "Unable to locate \"tpgt_#\" directory"
1227 " group\n");
1228 return ERR_PTR(-EINVAL);
1229 }
1230 tpgt_str += 5; /* Skip ahead of "tpgt_" */
1231 tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0);
1232
1233 if (tpgt > TL_TPGS_PER_HBA) {
1234 printk(KERN_ERR "Passed tpgt: %hu exceeds TL_TPGS_PER_HBA:"
1235 " %u\n", tpgt, TL_TPGS_PER_HBA);
1236 return ERR_PTR(-EINVAL);
1237 }
1238 tl_tpg = &tl_hba->tl_hba_tpgs[tpgt];
1239 tl_tpg->tl_hba = tl_hba;
1240 tl_tpg->tl_tpgt = tpgt;
1241 /*
1242 * Register the tl_tpg as a emulated SAS TCM Target Endpoint
1243 */
1244 ret = core_tpg_register(&tcm_loop_fabric_configfs->tf_ops,
1245 wwn, &tl_tpg->tl_se_tpg, (void *)tl_tpg,
1246 TRANSPORT_TPG_TYPE_NORMAL);
1247 if (ret < 0)
1248 return ERR_PTR(-ENOMEM);
1249
1250 printk(KERN_INFO "TCM_Loop_ConfigFS: Allocated Emulated %s"
1251 " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
1252 config_item_name(&wwn->wwn_group.cg_item), tpgt);
1253
1254 return &tl_tpg->tl_se_tpg;
1255}
1256
1257void tcm_loop_drop_naa_tpg(
1258 struct se_portal_group *se_tpg)
1259{
1260 struct se_wwn *wwn = se_tpg->se_tpg_wwn;
1261 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
1262 struct tcm_loop_tpg, tl_se_tpg);
1263 struct tcm_loop_hba *tl_hba;
1264 unsigned short tpgt;
1265
1266 tl_hba = tl_tpg->tl_hba;
1267 tpgt = tl_tpg->tl_tpgt;
1268 /*
1269 * Release the I_T Nexus for the Virtual SAS link if present
1270 */
1271 tcm_loop_drop_nexus(tl_tpg);
1272 /*
1273 * Deregister the tl_tpg as a emulated SAS TCM Target Endpoint
1274 */
1275 core_tpg_deregister(se_tpg);
1276
1277 printk(KERN_INFO "TCM_Loop_ConfigFS: Deallocated Emulated %s"
1278 " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
1279 config_item_name(&wwn->wwn_group.cg_item), tpgt);
1280}
1281
1282/* End items for tcm_loop_naa_cit */
1283
1284/* Start items for tcm_loop_cit */
1285
1286struct se_wwn *tcm_loop_make_scsi_hba(
1287 struct target_fabric_configfs *tf,
1288 struct config_group *group,
1289 const char *name)
1290{
1291 struct tcm_loop_hba *tl_hba;
1292 struct Scsi_Host *sh;
1293 char *ptr;
1294 int ret, off = 0;
1295
1296 tl_hba = kzalloc(sizeof(struct tcm_loop_hba), GFP_KERNEL);
1297 if (!tl_hba) {
1298 printk(KERN_ERR "Unable to allocate struct tcm_loop_hba\n");
1299 return ERR_PTR(-ENOMEM);
1300 }
1301 /*
1302 * Determine the emulated Protocol Identifier and Target Port Name
1303 * based on the incoming configfs directory name.
1304 */
1305 ptr = strstr(name, "naa.");
1306 if (ptr) {
1307 tl_hba->tl_proto_id = SCSI_PROTOCOL_SAS;
1308 goto check_len;
1309 }
1310 ptr = strstr(name, "fc.");
1311 if (ptr) {
1312 tl_hba->tl_proto_id = SCSI_PROTOCOL_FCP;
1313 off = 3; /* Skip over "fc." */
1314 goto check_len;
1315 }
1316 ptr = strstr(name, "iqn.");
1317 if (ptr) {
1318 tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI;
1319 goto check_len;
1320 }
1321
1322 printk(KERN_ERR "Unable to locate prefix for emulated Target Port:"
1323 " %s\n", name);
1324 return ERR_PTR(-EINVAL);
1325
1326check_len:
1327 if (strlen(name) >= TL_WWN_ADDR_LEN) {
1328 printk(KERN_ERR "Emulated NAA %s Address: %s, exceeds"
1329 " max: %d\n", name, tcm_loop_dump_proto_id(tl_hba),
1330 TL_WWN_ADDR_LEN);
1331 kfree(tl_hba);
1332 return ERR_PTR(-EINVAL);
1333 }
1334 snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]);
1335
1336 /*
1337 * Call device_register(tl_hba->dev) to register the emulated
1338 * Linux/SCSI LLD of type struct Scsi_Host at tl_hba->sh after
1339 * device_register() callbacks in tcm_loop_driver_probe()
1340 */
1341 ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt);
1342 if (ret)
1343 goto out;
1344
1345 sh = tl_hba->sh;
1346 tcm_loop_hba_no_cnt++;
1347 printk(KERN_INFO "TCM_Loop_ConfigFS: Allocated emulated Target"
1348 " %s Address: %s at Linux/SCSI Host ID: %d\n",
1349 tcm_loop_dump_proto_id(tl_hba), name, sh->host_no);
1350
1351 return &tl_hba->tl_hba_wwn;
1352out:
1353 kfree(tl_hba);
1354 return ERR_PTR(ret);
1355}
1356
1357void tcm_loop_drop_scsi_hba(
1358 struct se_wwn *wwn)
1359{
1360 struct tcm_loop_hba *tl_hba = container_of(wwn,
1361 struct tcm_loop_hba, tl_hba_wwn);
1362 int host_no = tl_hba->sh->host_no;
1363 /*
1364 * Call device_unregister() on the original tl_hba->dev.
1365 * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
1366 * release *tl_hba;
1367 */
1368 device_unregister(&tl_hba->dev);
1369
1370 printk(KERN_INFO "TCM_Loop_ConfigFS: Deallocated emulated Target"
1371 " SAS Address: %s at Linux/SCSI Host ID: %d\n",
1372 config_item_name(&wwn->wwn_group.cg_item), host_no);
1373}
1374
1375/* Start items for tcm_loop_cit */
1376static ssize_t tcm_loop_wwn_show_attr_version(
1377 struct target_fabric_configfs *tf,
1378 char *page)
1379{
1380 return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION);
1381}
1382
1383TF_WWN_ATTR_RO(tcm_loop, version);
1384
1385static struct configfs_attribute *tcm_loop_wwn_attrs[] = {
1386 &tcm_loop_wwn_version.attr,
1387 NULL,
1388};
1389
1390/* End items for tcm_loop_cit */
1391
1392static int tcm_loop_register_configfs(void)
1393{
1394 struct target_fabric_configfs *fabric;
1395 struct config_group *tf_cg;
1396 int ret;
1397 /*
1398 * Set the TCM Loop HBA counter to zero
1399 */
1400 tcm_loop_hba_no_cnt = 0;
1401 /*
1402 * Register the top level struct config_item_type with TCM core
1403 */
1404 fabric = target_fabric_configfs_init(THIS_MODULE, "loopback");
1405 if (!fabric) {
1406 printk(KERN_ERR "tcm_loop_register_configfs() failed!\n");
1407 return -1;
1408 }
1409 /*
1410 * Setup the fabric API of function pointers used by target_core_mod
1411 */
1412 fabric->tf_ops.get_fabric_name = &tcm_loop_get_fabric_name;
1413 fabric->tf_ops.get_fabric_proto_ident = &tcm_loop_get_fabric_proto_ident;
1414 fabric->tf_ops.tpg_get_wwn = &tcm_loop_get_endpoint_wwn;
1415 fabric->tf_ops.tpg_get_tag = &tcm_loop_get_tag;
1416 fabric->tf_ops.tpg_get_default_depth = &tcm_loop_get_default_depth;
1417 fabric->tf_ops.tpg_get_pr_transport_id = &tcm_loop_get_pr_transport_id;
1418 fabric->tf_ops.tpg_get_pr_transport_id_len =
1419 &tcm_loop_get_pr_transport_id_len;
1420 fabric->tf_ops.tpg_parse_pr_out_transport_id =
1421 &tcm_loop_parse_pr_out_transport_id;
1422 fabric->tf_ops.tpg_check_demo_mode = &tcm_loop_check_demo_mode;
1423 fabric->tf_ops.tpg_check_demo_mode_cache =
1424 &tcm_loop_check_demo_mode_cache;
1425 fabric->tf_ops.tpg_check_demo_mode_write_protect =
1426 &tcm_loop_check_demo_mode_write_protect;
1427 fabric->tf_ops.tpg_check_prod_mode_write_protect =
1428 &tcm_loop_check_prod_mode_write_protect;
1429 /*
1430 * The TCM loopback fabric module runs in demo-mode to a local
1431 * virtual SCSI device, so fabric dependent initator ACLs are
1432 * not required.
1433 */
1434 fabric->tf_ops.tpg_alloc_fabric_acl = &tcm_loop_tpg_alloc_fabric_acl;
1435 fabric->tf_ops.tpg_release_fabric_acl =
1436 &tcm_loop_tpg_release_fabric_acl;
1437 fabric->tf_ops.tpg_get_inst_index = &tcm_loop_get_inst_index;
1438 /*
1439 * Since tcm_loop is mapping physical memory from Linux/SCSI
1440 * struct scatterlist arrays for each struct scsi_cmnd I/O,
1441 * we do not need TCM to allocate a iovec array for
1442 * virtual memory address mappings
1443 */
1444 fabric->tf_ops.alloc_cmd_iovecs = NULL;
1445 /*
1446 * Used for setting up remaining TCM resources in process context
1447 */
1448 fabric->tf_ops.new_cmd_map = &tcm_loop_new_cmd_map;
1449 fabric->tf_ops.check_stop_free = &tcm_loop_check_stop_free;
1450 fabric->tf_ops.release_cmd_to_pool = &tcm_loop_deallocate_core_cmd;
1451 fabric->tf_ops.release_cmd_direct = &tcm_loop_deallocate_core_cmd;
1452 fabric->tf_ops.shutdown_session = &tcm_loop_shutdown_session;
1453 fabric->tf_ops.close_session = &tcm_loop_close_session;
1454 fabric->tf_ops.stop_session = &tcm_loop_stop_session;
1455 fabric->tf_ops.fall_back_to_erl0 = &tcm_loop_fall_back_to_erl0;
1456 fabric->tf_ops.sess_logged_in = &tcm_loop_sess_logged_in;
1457 fabric->tf_ops.sess_get_index = &tcm_loop_sess_get_index;
1458 fabric->tf_ops.sess_get_initiator_sid = NULL;
1459 fabric->tf_ops.write_pending = &tcm_loop_write_pending;
1460 fabric->tf_ops.write_pending_status = &tcm_loop_write_pending_status;
1461 /*
1462 * Not used for TCM loopback
1463 */
1464 fabric->tf_ops.set_default_node_attributes =
1465 &tcm_loop_set_default_node_attributes;
1466 fabric->tf_ops.get_task_tag = &tcm_loop_get_task_tag;
1467 fabric->tf_ops.get_cmd_state = &tcm_loop_get_cmd_state;
1468 fabric->tf_ops.new_cmd_failure = &tcm_loop_new_cmd_failure;
1469 fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in;
1470 fabric->tf_ops.queue_status = &tcm_loop_queue_status;
1471 fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp;
1472 fabric->tf_ops.set_fabric_sense_len = &tcm_loop_set_fabric_sense_len;
1473 fabric->tf_ops.get_fabric_sense_len = &tcm_loop_get_fabric_sense_len;
1474 fabric->tf_ops.is_state_remove = &tcm_loop_is_state_remove;
1475
1476 tf_cg = &fabric->tf_group;
1477 /*
1478 * Setup function pointers for generic logic in target_core_fabric_configfs.c
1479 */
1480 fabric->tf_ops.fabric_make_wwn = &tcm_loop_make_scsi_hba;
1481 fabric->tf_ops.fabric_drop_wwn = &tcm_loop_drop_scsi_hba;
1482 fabric->tf_ops.fabric_make_tpg = &tcm_loop_make_naa_tpg;
1483 fabric->tf_ops.fabric_drop_tpg = &tcm_loop_drop_naa_tpg;
1484 /*
1485 * fabric_post_link() and fabric_pre_unlink() are used for
1486 * registration and release of TCM Loop Virtual SCSI LUNs.
1487 */
1488 fabric->tf_ops.fabric_post_link = &tcm_loop_port_link;
1489 fabric->tf_ops.fabric_pre_unlink = &tcm_loop_port_unlink;
1490 fabric->tf_ops.fabric_make_np = NULL;
1491 fabric->tf_ops.fabric_drop_np = NULL;
1492 /*
1493 * Setup default attribute lists for various fabric->tf_cit_tmpl
1494 */
1495 TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_loop_wwn_attrs;
1496 TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_loop_tpg_attrs;
1497 TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
1498 TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
1499 TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
1500 /*
1501 * Once fabric->tf_ops has been setup, now register the fabric for
1502 * use within TCM
1503 */
1504 ret = target_fabric_configfs_register(fabric);
1505 if (ret < 0) {
1506 printk(KERN_ERR "target_fabric_configfs_register() for"
1507 " TCM_Loop failed!\n");
1508 target_fabric_configfs_free(fabric);
1509 return -1;
1510 }
1511 /*
1512 * Setup our local pointer to *fabric.
1513 */
1514 tcm_loop_fabric_configfs = fabric;
1515 printk(KERN_INFO "TCM_LOOP[0] - Set fabric ->"
1516 " tcm_loop_fabric_configfs\n");
1517 return 0;
1518}
1519
1520static void tcm_loop_deregister_configfs(void)
1521{
1522 if (!tcm_loop_fabric_configfs)
1523 return;
1524
1525 target_fabric_configfs_deregister(tcm_loop_fabric_configfs);
1526 tcm_loop_fabric_configfs = NULL;
1527 printk(KERN_INFO "TCM_LOOP[0] - Cleared"
1528 " tcm_loop_fabric_configfs\n");
1529}
1530
1531static int __init tcm_loop_fabric_init(void)
1532{
1533 int ret;
1534
1535 tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache",
1536 sizeof(struct tcm_loop_cmd),
1537 __alignof__(struct tcm_loop_cmd),
1538 0, NULL);
1539 if (!tcm_loop_cmd_cache) {
1540 printk(KERN_ERR "kmem_cache_create() for"
1541 " tcm_loop_cmd_cache failed\n");
1542 return -ENOMEM;
1543 }
1544
1545 ret = tcm_loop_alloc_core_bus();
1546 if (ret)
1547 return ret;
1548
1549 ret = tcm_loop_register_configfs();
1550 if (ret) {
1551 tcm_loop_release_core_bus();
1552 return ret;
1553 }
1554
1555 return 0;
1556}
1557
1558static void __exit tcm_loop_fabric_exit(void)
1559{
1560 tcm_loop_deregister_configfs();
1561 tcm_loop_release_core_bus();
1562 kmem_cache_destroy(tcm_loop_cmd_cache);
1563}
1564
1565MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module");
1566MODULE_AUTHOR("Nicholas A. Bellinger <nab@risingtidesystems.com>");
1567MODULE_LICENSE("GPL");
1568module_init(tcm_loop_fabric_init);
1569module_exit(tcm_loop_fabric_exit);
diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h
new file mode 100644
index 000000000000..7e9f7ab45548
--- /dev/null
+++ b/drivers/target/loopback/tcm_loop.h
@@ -0,0 +1,77 @@
1#define TCM_LOOP_VERSION "v2.1-rc1"
2#define TL_WWN_ADDR_LEN 256
3#define TL_TPGS_PER_HBA 32
4/*
5 * Defaults for struct scsi_host_template tcm_loop_driver_template
6 *
7 * We use large can_queue and cmd_per_lun here and let TCM enforce
8 * the underlying se_device_t->queue_depth.
9 */
10#define TL_SCSI_CAN_QUEUE 1024
11#define TL_SCSI_CMD_PER_LUN 1024
12#define TL_SCSI_MAX_SECTORS 1024
13#define TL_SCSI_SG_TABLESIZE 256
14/*
15 * Used in tcm_loop_driver_probe() for struct Scsi_Host->max_cmd_len
16 */
17#define TL_SCSI_MAX_CMD_LEN 32
18
19#ifdef CONFIG_LOOPBACK_TARGET_CDB_DEBUG
20# define TL_CDB_DEBUG(x...) printk(KERN_INFO x)
21#else
22# define TL_CDB_DEBUG(x...)
23#endif
24
25struct tcm_loop_cmd {
26 /* State of Linux/SCSI CDB+Data descriptor */
27 u32 sc_cmd_state;
28 /* Pointer to the CDB+Data descriptor from Linux/SCSI subsystem */
29 struct scsi_cmnd *sc;
30 struct list_head *tl_cmd_list;
31 /* The TCM I/O descriptor that is accessed via container_of() */
32 struct se_cmd tl_se_cmd;
33 /* Sense buffer that will be mapped into outgoing status */
34 unsigned char tl_sense_buf[TRANSPORT_SENSE_BUFFER];
35};
36
37struct tcm_loop_tmr {
38 atomic_t tmr_complete;
39 wait_queue_head_t tl_tmr_wait;
40};
41
42struct tcm_loop_nexus {
43 int it_nexus_active;
44 /*
45 * Pointer to Linux/SCSI HBA from linux/include/scsi_host.h
46 */
47 struct scsi_host *sh;
48 /*
49 * Pointer to TCM session for I_T Nexus
50 */
51 struct se_session *se_sess;
52};
53
54struct tcm_loop_nacl {
55 struct se_node_acl se_node_acl;
56};
57
58struct tcm_loop_tpg {
59 unsigned short tl_tpgt;
60 atomic_t tl_tpg_port_count;
61 struct se_portal_group tl_se_tpg;
62 struct tcm_loop_hba *tl_hba;
63};
64
65struct tcm_loop_hba {
66 u8 tl_proto_id;
67 unsigned char tl_wwn_address[TL_WWN_ADDR_LEN];
68 struct se_hba_s *se_hba;
69 struct se_lun *tl_hba_lun;
70 struct se_port *tl_hba_lun_sep;
71 struct se_device_s *se_dev_hba_ptr;
72 struct tcm_loop_nexus *tl_nexus;
73 struct device dev;
74 struct Scsi_Host *sh;
75 struct tcm_loop_tpg tl_hba_tpgs[TL_TPGS_PER_HBA];
76 struct se_wwn tl_hba_wwn;
77};
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
new file mode 100644
index 000000000000..47abb42d9c36
--- /dev/null
+++ b/drivers/target/target_core_alua.c
@@ -0,0 +1,1991 @@
1/*******************************************************************************
2 * Filename: target_core_alua.c
3 *
4 * This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA)
5 *
6 * Copyright (c) 2009-2010 Rising Tide Systems
7 * Copyright (c) 2009-2010 Linux-iSCSI.org
8 *
9 * Nicholas A. Bellinger <nab@kernel.org>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 *
25 ******************************************************************************/
26
27#include <linux/version.h>
28#include <linux/slab.h>
29#include <linux/spinlock.h>
30#include <linux/configfs.h>
31#include <scsi/scsi.h>
32#include <scsi/scsi_cmnd.h>
33
34#include <target/target_core_base.h>
35#include <target/target_core_device.h>
36#include <target/target_core_transport.h>
37#include <target/target_core_fabric_ops.h>
38#include <target/target_core_configfs.h>
39
40#include "target_core_alua.h"
41#include "target_core_hba.h"
42#include "target_core_ua.h"
43
44static int core_alua_check_transition(int state, int *primary);
45static int core_alua_set_tg_pt_secondary_state(
46 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
47 struct se_port *port, int explict, int offline);
48
49/*
50 * REPORT_TARGET_PORT_GROUPS
51 *
52 * See spc4r17 section 6.27
53 */
54int core_emulate_report_target_port_groups(struct se_cmd *cmd)
55{
56 struct se_subsystem_dev *su_dev = SE_DEV(cmd)->se_sub_dev;
57 struct se_port *port;
58 struct t10_alua_tg_pt_gp *tg_pt_gp;
59 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
60 unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
61 u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first
62 Target port group descriptor */
63
64 spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
65 list_for_each_entry(tg_pt_gp, &T10_ALUA(su_dev)->tg_pt_gps_list,
66 tg_pt_gp_list) {
67 /*
68 * PREF: Preferred target port bit, determine if this
69 * bit should be set for port group.
70 */
71 if (tg_pt_gp->tg_pt_gp_pref)
72 buf[off] = 0x80;
73 /*
74 * Set the ASYMMETRIC ACCESS State
75 */
76 buf[off++] |= (atomic_read(
77 &tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff);
78 /*
79 * Set supported ASYMMETRIC ACCESS State bits
80 */
81 buf[off] = 0x80; /* T_SUP */
82 buf[off] |= 0x40; /* O_SUP */
83 buf[off] |= 0x8; /* U_SUP */
84 buf[off] |= 0x4; /* S_SUP */
85 buf[off] |= 0x2; /* AN_SUP */
86 buf[off++] |= 0x1; /* AO_SUP */
87 /*
88 * TARGET PORT GROUP
89 */
90 buf[off++] = ((tg_pt_gp->tg_pt_gp_id >> 8) & 0xff);
91 buf[off++] = (tg_pt_gp->tg_pt_gp_id & 0xff);
92
93 off++; /* Skip over Reserved */
94 /*
95 * STATUS CODE
96 */
97 buf[off++] = (tg_pt_gp->tg_pt_gp_alua_access_status & 0xff);
98 /*
99 * Vendor Specific field
100 */
101 buf[off++] = 0x00;
102 /*
103 * TARGET PORT COUNT
104 */
105 buf[off++] = (tg_pt_gp->tg_pt_gp_members & 0xff);
106 rd_len += 8;
107
108 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
109 list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list,
110 tg_pt_gp_mem_list) {
111 port = tg_pt_gp_mem->tg_pt;
112 /*
113 * Start Target Port descriptor format
114 *
115 * See spc4r17 section 6.2.7 Table 247
116 */
117 off += 2; /* Skip over Obsolete */
118 /*
119 * Set RELATIVE TARGET PORT IDENTIFIER
120 */
121 buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
122 buf[off++] = (port->sep_rtpi & 0xff);
123 rd_len += 4;
124 }
125 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
126 }
127 spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
128 /*
129 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
130 */
131 buf[0] = ((rd_len >> 24) & 0xff);
132 buf[1] = ((rd_len >> 16) & 0xff);
133 buf[2] = ((rd_len >> 8) & 0xff);
134 buf[3] = (rd_len & 0xff);
135
136 return 0;
137}
138
139/*
140 * SET_TARGET_PORT_GROUPS for explict ALUA operation.
141 *
142 * See spc4r17 section 6.35
143 */
144int core_emulate_set_target_port_groups(struct se_cmd *cmd)
145{
146 struct se_device *dev = SE_DEV(cmd);
147 struct se_subsystem_dev *su_dev = SE_DEV(cmd)->se_sub_dev;
148 struct se_port *port, *l_port = SE_LUN(cmd)->lun_sep;
149 struct se_node_acl *nacl = SE_SESS(cmd)->se_node_acl;
150 struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
151 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem;
152 unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
153 unsigned char *ptr = &buf[4]; /* Skip over RESERVED area in header */
154 u32 len = 4; /* Skip over RESERVED area in header */
155 int alua_access_state, primary = 0, rc;
156 u16 tg_pt_id, rtpi;
157
158 if (!(l_port))
159 return PYX_TRANSPORT_LU_COMM_FAILURE;
160 /*
161 * Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed
162 * for the local tg_pt_gp.
163 */
164 l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
165 if (!(l_tg_pt_gp_mem)) {
166 printk(KERN_ERR "Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
167 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
168 }
169 spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
170 l_tg_pt_gp = l_tg_pt_gp_mem->tg_pt_gp;
171 if (!(l_tg_pt_gp)) {
172 spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
173 printk(KERN_ERR "Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
174 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
175 }
176 rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA);
177 spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
178
179 if (!(rc)) {
180 printk(KERN_INFO "Unable to process SET_TARGET_PORT_GROUPS"
181 " while TPGS_EXPLICT_ALUA is disabled\n");
182 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
183 }
184
185 while (len < cmd->data_length) {
186 alua_access_state = (ptr[0] & 0x0f);
187 /*
188 * Check the received ALUA access state, and determine if
189 * the state is a primary or secondary target port asymmetric
190 * access state.
191 */
192 rc = core_alua_check_transition(alua_access_state, &primary);
193 if (rc != 0) {
194 /*
195 * If the SET TARGET PORT GROUPS attempts to establish
196 * an invalid combination of target port asymmetric
197 * access states or attempts to establish an
198 * unsupported target port asymmetric access state,
199 * then the command shall be terminated with CHECK
200 * CONDITION status, with the sense key set to ILLEGAL
201 * REQUEST, and the additional sense code set to INVALID
202 * FIELD IN PARAMETER LIST.
203 */
204 return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
205 }
206 rc = -1;
207 /*
208 * If the ASYMMETRIC ACCESS STATE field (see table 267)
209 * specifies a primary target port asymmetric access state,
210 * then the TARGET PORT GROUP OR TARGET PORT field specifies
211 * a primary target port group for which the primary target
212 * port asymmetric access state shall be changed. If the
213 * ASYMMETRIC ACCESS STATE field specifies a secondary target
214 * port asymmetric access state, then the TARGET PORT GROUP OR
215 * TARGET PORT field specifies the relative target port
216 * identifier (see 3.1.120) of the target port for which the
217 * secondary target port asymmetric access state shall be
218 * changed.
219 */
220 if (primary) {
221 tg_pt_id = ((ptr[2] << 8) & 0xff);
222 tg_pt_id |= (ptr[3] & 0xff);
223 /*
224 * Locate the matching target port group ID from
225 * the global tg_pt_gp list
226 */
227 spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
228 list_for_each_entry(tg_pt_gp,
229 &T10_ALUA(su_dev)->tg_pt_gps_list,
230 tg_pt_gp_list) {
231 if (!(tg_pt_gp->tg_pt_gp_valid_id))
232 continue;
233
234 if (tg_pt_id != tg_pt_gp->tg_pt_gp_id)
235 continue;
236
237 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
238 smp_mb__after_atomic_inc();
239 spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
240
241 rc = core_alua_do_port_transition(tg_pt_gp,
242 dev, l_port, nacl,
243 alua_access_state, 1);
244
245 spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
246 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
247 smp_mb__after_atomic_dec();
248 break;
249 }
250 spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
251 /*
252 * If not matching target port group ID can be located
253 * throw an exception with ASCQ: INVALID_PARAMETER_LIST
254 */
255 if (rc != 0)
256 return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
257 } else {
258 /*
259 * Extact the RELATIVE TARGET PORT IDENTIFIER to identify
260 * the Target Port in question for the the incoming
261 * SET_TARGET_PORT_GROUPS op.
262 */
263 rtpi = ((ptr[2] << 8) & 0xff);
264 rtpi |= (ptr[3] & 0xff);
265 /*
266 * Locate the matching relative target port identifer
267 * for the struct se_device storage object.
268 */
269 spin_lock(&dev->se_port_lock);
270 list_for_each_entry(port, &dev->dev_sep_list,
271 sep_list) {
272 if (port->sep_rtpi != rtpi)
273 continue;
274
275 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
276 spin_unlock(&dev->se_port_lock);
277
278 rc = core_alua_set_tg_pt_secondary_state(
279 tg_pt_gp_mem, port, 1, 1);
280
281 spin_lock(&dev->se_port_lock);
282 break;
283 }
284 spin_unlock(&dev->se_port_lock);
285 /*
286 * If not matching relative target port identifier can
287 * be located, throw an exception with ASCQ:
288 * INVALID_PARAMETER_LIST
289 */
290 if (rc != 0)
291 return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
292 }
293
294 ptr += 4;
295 len += 4;
296 }
297
298 return 0;
299}
300
301static inline int core_alua_state_nonoptimized(
302 struct se_cmd *cmd,
303 unsigned char *cdb,
304 int nonop_delay_msecs,
305 u8 *alua_ascq)
306{
307 /*
308 * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked
309 * later to determine if processing of this cmd needs to be
310 * temporarily delayed for the Active/NonOptimized primary access state.
311 */
312 cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED;
313 cmd->alua_nonop_delay = nonop_delay_msecs;
314 return 0;
315}
316
317static inline int core_alua_state_standby(
318 struct se_cmd *cmd,
319 unsigned char *cdb,
320 u8 *alua_ascq)
321{
322 /*
323 * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by
324 * spc4r17 section 5.9.2.4.4
325 */
326 switch (cdb[0]) {
327 case INQUIRY:
328 case LOG_SELECT:
329 case LOG_SENSE:
330 case MODE_SELECT:
331 case MODE_SENSE:
332 case REPORT_LUNS:
333 case RECEIVE_DIAGNOSTIC:
334 case SEND_DIAGNOSTIC:
335 case MAINTENANCE_IN:
336 switch (cdb[1]) {
337 case MI_REPORT_TARGET_PGS:
338 return 0;
339 default:
340 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
341 return 1;
342 }
343 case MAINTENANCE_OUT:
344 switch (cdb[1]) {
345 case MO_SET_TARGET_PGS:
346 return 0;
347 default:
348 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
349 return 1;
350 }
351 case REQUEST_SENSE:
352 case PERSISTENT_RESERVE_IN:
353 case PERSISTENT_RESERVE_OUT:
354 case READ_BUFFER:
355 case WRITE_BUFFER:
356 return 0;
357 default:
358 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
359 return 1;
360 }
361
362 return 0;
363}
364
365static inline int core_alua_state_unavailable(
366 struct se_cmd *cmd,
367 unsigned char *cdb,
368 u8 *alua_ascq)
369{
370 /*
371 * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by
372 * spc4r17 section 5.9.2.4.5
373 */
374 switch (cdb[0]) {
375 case INQUIRY:
376 case REPORT_LUNS:
377 case MAINTENANCE_IN:
378 switch (cdb[1]) {
379 case MI_REPORT_TARGET_PGS:
380 return 0;
381 default:
382 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
383 return 1;
384 }
385 case MAINTENANCE_OUT:
386 switch (cdb[1]) {
387 case MO_SET_TARGET_PGS:
388 return 0;
389 default:
390 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
391 return 1;
392 }
393 case REQUEST_SENSE:
394 case READ_BUFFER:
395 case WRITE_BUFFER:
396 return 0;
397 default:
398 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
399 return 1;
400 }
401
402 return 0;
403}
404
405static inline int core_alua_state_transition(
406 struct se_cmd *cmd,
407 unsigned char *cdb,
408 u8 *alua_ascq)
409{
410 /*
411 * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITIO as defined by
412 * spc4r17 section 5.9.2.5
413 */
414 switch (cdb[0]) {
415 case INQUIRY:
416 case REPORT_LUNS:
417 case MAINTENANCE_IN:
418 switch (cdb[1]) {
419 case MI_REPORT_TARGET_PGS:
420 return 0;
421 default:
422 *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
423 return 1;
424 }
425 case REQUEST_SENSE:
426 case READ_BUFFER:
427 case WRITE_BUFFER:
428 return 0;
429 default:
430 *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
431 return 1;
432 }
433
434 return 0;
435}
436
437/*
438 * Used for alua_type SPC_ALUA_PASSTHROUGH and SPC2_ALUA_DISABLED
439 * in transport_cmd_sequencer(). This function is assigned to
440 * struct t10_alua *->state_check() in core_setup_alua()
441 */
442static int core_alua_state_check_nop(
443 struct se_cmd *cmd,
444 unsigned char *cdb,
445 u8 *alua_ascq)
446{
447 return 0;
448}
449
450/*
451 * Used for alua_type SPC3_ALUA_EMULATED in transport_cmd_sequencer().
452 * This function is assigned to struct t10_alua *->state_check() in
453 * core_setup_alua()
454 *
455 * Also, this function can return three different return codes to
456 * signal transport_generic_cmd_sequencer()
457 *
458 * return 1: Is used to signal LUN not accecsable, and check condition/not ready
459 * return 0: Used to signal success
460 * reutrn -1: Used to signal failure, and invalid cdb field
461 */
462static int core_alua_state_check(
463 struct se_cmd *cmd,
464 unsigned char *cdb,
465 u8 *alua_ascq)
466{
467 struct se_lun *lun = SE_LUN(cmd);
468 struct se_port *port = lun->lun_sep;
469 struct t10_alua_tg_pt_gp *tg_pt_gp;
470 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
471 int out_alua_state, nonop_delay_msecs;
472
473 if (!(port))
474 return 0;
475 /*
476 * First, check for a struct se_port specific secondary ALUA target port
477 * access state: OFFLINE
478 */
479 if (atomic_read(&port->sep_tg_pt_secondary_offline)) {
480 *alua_ascq = ASCQ_04H_ALUA_OFFLINE;
481 printk(KERN_INFO "ALUA: Got secondary offline status for local"
482 " target port\n");
483 *alua_ascq = ASCQ_04H_ALUA_OFFLINE;
484 return 1;
485 }
486 /*
487 * Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the
488 * ALUA target port group, to obtain current ALUA access state.
489 * Otherwise look for the underlying struct se_device association with
490 * a ALUA logical unit group.
491 */
492 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
493 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
494 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
495 out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
496 nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
497 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
498 /*
499 * Process ALUA_ACCESS_STATE_ACTIVE_OPTMIZED in a separate conditional
500 * statement so the compiler knows explicitly to check this case first.
501 * For the Optimized ALUA access state case, we want to process the
502 * incoming fabric cmd ASAP..
503 */
504 if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTMIZED)
505 return 0;
506
507 switch (out_alua_state) {
508 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
509 return core_alua_state_nonoptimized(cmd, cdb,
510 nonop_delay_msecs, alua_ascq);
511 case ALUA_ACCESS_STATE_STANDBY:
512 return core_alua_state_standby(cmd, cdb, alua_ascq);
513 case ALUA_ACCESS_STATE_UNAVAILABLE:
514 return core_alua_state_unavailable(cmd, cdb, alua_ascq);
515 case ALUA_ACCESS_STATE_TRANSITION:
516 return core_alua_state_transition(cmd, cdb, alua_ascq);
517 /*
518 * OFFLINE is a secondary ALUA target port group access state, that is
519 * handled above with struct se_port->sep_tg_pt_secondary_offline=1
520 */
521 case ALUA_ACCESS_STATE_OFFLINE:
522 default:
523 printk(KERN_ERR "Unknown ALUA access state: 0x%02x\n",
524 out_alua_state);
525 return -1;
526 }
527
528 return 0;
529}
530
531/*
532 * Check implict and explict ALUA state change request.
533 */
534static int core_alua_check_transition(int state, int *primary)
535{
536 switch (state) {
537 case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
538 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
539 case ALUA_ACCESS_STATE_STANDBY:
540 case ALUA_ACCESS_STATE_UNAVAILABLE:
541 /*
542 * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
543 * defined as primary target port asymmetric access states.
544 */
545 *primary = 1;
546 break;
547 case ALUA_ACCESS_STATE_OFFLINE:
548 /*
549 * OFFLINE state is defined as a secondary target port
550 * asymmetric access state.
551 */
552 *primary = 0;
553 break;
554 default:
555 printk(KERN_ERR "Unknown ALUA access state: 0x%02x\n", state);
556 return -1;
557 }
558
559 return 0;
560}
561
562static char *core_alua_dump_state(int state)
563{
564 switch (state) {
565 case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
566 return "Active/Optimized";
567 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
568 return "Active/NonOptimized";
569 case ALUA_ACCESS_STATE_STANDBY:
570 return "Standby";
571 case ALUA_ACCESS_STATE_UNAVAILABLE:
572 return "Unavailable";
573 case ALUA_ACCESS_STATE_OFFLINE:
574 return "Offline";
575 default:
576 return "Unknown";
577 }
578
579 return NULL;
580}
581
582char *core_alua_dump_status(int status)
583{
584 switch (status) {
585 case ALUA_STATUS_NONE:
586 return "None";
587 case ALUA_STATUS_ALTERED_BY_EXPLICT_STPG:
588 return "Altered by Explict STPG";
589 case ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA:
590 return "Altered by Implict ALUA";
591 default:
592 return "Unknown";
593 }
594
595 return NULL;
596}
597
598/*
599 * Used by fabric modules to determine when we need to delay processing
600 * for the Active/NonOptimized paths..
601 */
602int core_alua_check_nonop_delay(
603 struct se_cmd *cmd)
604{
605 if (!(cmd->se_cmd_flags & SCF_ALUA_NON_OPTIMIZED))
606 return 0;
607 if (in_interrupt())
608 return 0;
609 /*
610 * The ALUA Active/NonOptimized access state delay can be disabled
611 * in via configfs with a value of zero
612 */
613 if (!(cmd->alua_nonop_delay))
614 return 0;
615 /*
616 * struct se_cmd->alua_nonop_delay gets set by a target port group
617 * defined interval in core_alua_state_nonoptimized()
618 */
619 msleep_interruptible(cmd->alua_nonop_delay);
620 return 0;
621}
622EXPORT_SYMBOL(core_alua_check_nonop_delay);
623
624/*
625 * Called with tg_pt_gp->tg_pt_gp_md_mutex or tg_pt_gp_mem->sep_tg_pt_md_mutex
626 *
627 */
628static int core_alua_write_tpg_metadata(
629 const char *path,
630 unsigned char *md_buf,
631 u32 md_buf_len)
632{
633 mm_segment_t old_fs;
634 struct file *file;
635 struct iovec iov[1];
636 int flags = O_RDWR | O_CREAT | O_TRUNC, ret;
637
638 memset(iov, 0, sizeof(struct iovec));
639
640 file = filp_open(path, flags, 0600);
641 if (IS_ERR(file) || !file || !file->f_dentry) {
642 printk(KERN_ERR "filp_open(%s) for ALUA metadata failed\n",
643 path);
644 return -ENODEV;
645 }
646
647 iov[0].iov_base = &md_buf[0];
648 iov[0].iov_len = md_buf_len;
649
650 old_fs = get_fs();
651 set_fs(get_ds());
652 ret = vfs_writev(file, &iov[0], 1, &file->f_pos);
653 set_fs(old_fs);
654
655 if (ret < 0) {
656 printk(KERN_ERR "Error writing ALUA metadata file: %s\n", path);
657 filp_close(file, NULL);
658 return -EIO;
659 }
660 filp_close(file, NULL);
661
662 return 0;
663}
664
665/*
666 * Called with tg_pt_gp->tg_pt_gp_md_mutex held
667 */
668static int core_alua_update_tpg_primary_metadata(
669 struct t10_alua_tg_pt_gp *tg_pt_gp,
670 int primary_state,
671 unsigned char *md_buf)
672{
673 struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
674 struct t10_wwn *wwn = &su_dev->t10_wwn;
675 char path[ALUA_METADATA_PATH_LEN];
676 int len;
677
678 memset(path, 0, ALUA_METADATA_PATH_LEN);
679
680 len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
681 "tg_pt_gp_id=%hu\n"
682 "alua_access_state=0x%02x\n"
683 "alua_access_status=0x%02x\n",
684 tg_pt_gp->tg_pt_gp_id, primary_state,
685 tg_pt_gp->tg_pt_gp_alua_access_status);
686
687 snprintf(path, ALUA_METADATA_PATH_LEN,
688 "/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0],
689 config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
690
691 return core_alua_write_tpg_metadata(path, md_buf, len);
692}
693
694static int core_alua_do_transition_tg_pt(
695 struct t10_alua_tg_pt_gp *tg_pt_gp,
696 struct se_port *l_port,
697 struct se_node_acl *nacl,
698 unsigned char *md_buf,
699 int new_state,
700 int explict)
701{
702 struct se_dev_entry *se_deve;
703 struct se_lun_acl *lacl;
704 struct se_port *port;
705 struct t10_alua_tg_pt_gp_member *mem;
706 int old_state = 0;
707 /*
708 * Save the old primary ALUA access state, and set the current state
709 * to ALUA_ACCESS_STATE_TRANSITION.
710 */
711 old_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
712 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
713 ALUA_ACCESS_STATE_TRANSITION);
714 tg_pt_gp->tg_pt_gp_alua_access_status = (explict) ?
715 ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
716 ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
717 /*
718 * Check for the optional ALUA primary state transition delay
719 */
720 if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
721 msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
722
723 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
724 list_for_each_entry(mem, &tg_pt_gp->tg_pt_gp_mem_list,
725 tg_pt_gp_mem_list) {
726 port = mem->tg_pt;
727 /*
728 * After an implicit target port asymmetric access state
729 * change, a device server shall establish a unit attention
730 * condition for the initiator port associated with every I_T
731 * nexus with the additional sense code set to ASYMMETRIC
732 * ACCESS STATE CHAGED.
733 *
734 * After an explicit target port asymmetric access state
735 * change, a device server shall establish a unit attention
736 * condition with the additional sense code set to ASYMMETRIC
737 * ACCESS STATE CHANGED for the initiator port associated with
738 * every I_T nexus other than the I_T nexus on which the SET
739 * TARGET PORT GROUPS command
740 */
741 atomic_inc(&mem->tg_pt_gp_mem_ref_cnt);
742 smp_mb__after_atomic_inc();
743 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
744
745 spin_lock_bh(&port->sep_alua_lock);
746 list_for_each_entry(se_deve, &port->sep_alua_list,
747 alua_port_list) {
748 lacl = se_deve->se_lun_acl;
749 /*
750 * se_deve->se_lun_acl pointer may be NULL for a
751 * entry created without explict Node+MappedLUN ACLs
752 */
753 if (!(lacl))
754 continue;
755
756 if (explict &&
757 (nacl != NULL) && (nacl == lacl->se_lun_nacl) &&
758 (l_port != NULL) && (l_port == port))
759 continue;
760
761 core_scsi3_ua_allocate(lacl->se_lun_nacl,
762 se_deve->mapped_lun, 0x2A,
763 ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED);
764 }
765 spin_unlock_bh(&port->sep_alua_lock);
766
767 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
768 atomic_dec(&mem->tg_pt_gp_mem_ref_cnt);
769 smp_mb__after_atomic_dec();
770 }
771 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
772 /*
773 * Update the ALUA metadata buf that has been allocated in
774 * core_alua_do_port_transition(), this metadata will be written
775 * to struct file.
776 *
777 * Note that there is the case where we do not want to update the
778 * metadata when the saved metadata is being parsed in userspace
779 * when setting the existing port access state and access status.
780 *
781 * Also note that the failure to write out the ALUA metadata to
782 * struct file does NOT affect the actual ALUA transition.
783 */
784 if (tg_pt_gp->tg_pt_gp_write_metadata) {
785 mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex);
786 core_alua_update_tpg_primary_metadata(tg_pt_gp,
787 new_state, md_buf);
788 mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex);
789 }
790 /*
791 * Set the current primary ALUA access state to the requested new state
792 */
793 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state);
794
795 printk(KERN_INFO "Successful %s ALUA transition TG PT Group: %s ID: %hu"
796 " from primary access state %s to %s\n", (explict) ? "explict" :
797 "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
798 tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state),
799 core_alua_dump_state(new_state));
800
801 return 0;
802}
803
804int core_alua_do_port_transition(
805 struct t10_alua_tg_pt_gp *l_tg_pt_gp,
806 struct se_device *l_dev,
807 struct se_port *l_port,
808 struct se_node_acl *l_nacl,
809 int new_state,
810 int explict)
811{
812 struct se_device *dev;
813 struct se_port *port;
814 struct se_subsystem_dev *su_dev;
815 struct se_node_acl *nacl;
816 struct t10_alua_lu_gp *lu_gp;
817 struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
818 struct t10_alua_tg_pt_gp *tg_pt_gp;
819 unsigned char *md_buf;
820 int primary;
821
822 if (core_alua_check_transition(new_state, &primary) != 0)
823 return -EINVAL;
824
825 md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL);
826 if (!(md_buf)) {
827 printk("Unable to allocate buf for ALUA metadata\n");
828 return -ENOMEM;
829 }
830
831 local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
832 spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
833 lu_gp = local_lu_gp_mem->lu_gp;
834 atomic_inc(&lu_gp->lu_gp_ref_cnt);
835 smp_mb__after_atomic_inc();
836 spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock);
837 /*
838 * For storage objects that are members of the 'default_lu_gp',
839 * we only do transition on the passed *l_tp_pt_gp, and not
840 * on all of the matching target port groups IDs in default_lu_gp.
841 */
842 if (!(lu_gp->lu_gp_id)) {
843 /*
844 * core_alua_do_transition_tg_pt() will always return
845 * success.
846 */
847 core_alua_do_transition_tg_pt(l_tg_pt_gp, l_port, l_nacl,
848 md_buf, new_state, explict);
849 atomic_dec(&lu_gp->lu_gp_ref_cnt);
850 smp_mb__after_atomic_dec();
851 kfree(md_buf);
852 return 0;
853 }
854 /*
855 * For all other LU groups aside from 'default_lu_gp', walk all of
856 * the associated storage objects looking for a matching target port
857 * group ID from the local target port group.
858 */
859 spin_lock(&lu_gp->lu_gp_lock);
860 list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list,
861 lu_gp_mem_list) {
862
863 dev = lu_gp_mem->lu_gp_mem_dev;
864 su_dev = dev->se_sub_dev;
865 atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt);
866 smp_mb__after_atomic_inc();
867 spin_unlock(&lu_gp->lu_gp_lock);
868
869 spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
870 list_for_each_entry(tg_pt_gp,
871 &T10_ALUA(su_dev)->tg_pt_gps_list,
872 tg_pt_gp_list) {
873
874 if (!(tg_pt_gp->tg_pt_gp_valid_id))
875 continue;
876 /*
877 * If the target behavior port asymmetric access state
878 * is changed for any target port group accessiable via
879 * a logical unit within a LU group, the target port
880 * behavior group asymmetric access states for the same
881 * target port group accessible via other logical units
882 * in that LU group will also change.
883 */
884 if (l_tg_pt_gp->tg_pt_gp_id != tg_pt_gp->tg_pt_gp_id)
885 continue;
886
887 if (l_tg_pt_gp == tg_pt_gp) {
888 port = l_port;
889 nacl = l_nacl;
890 } else {
891 port = NULL;
892 nacl = NULL;
893 }
894 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
895 smp_mb__after_atomic_inc();
896 spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
897 /*
898 * core_alua_do_transition_tg_pt() will always return
899 * success.
900 */
901 core_alua_do_transition_tg_pt(tg_pt_gp, port,
902 nacl, md_buf, new_state, explict);
903
904 spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
905 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
906 smp_mb__after_atomic_dec();
907 }
908 spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
909
910 spin_lock(&lu_gp->lu_gp_lock);
911 atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt);
912 smp_mb__after_atomic_dec();
913 }
914 spin_unlock(&lu_gp->lu_gp_lock);
915
916 printk(KERN_INFO "Successfully processed LU Group: %s all ALUA TG PT"
917 " Group IDs: %hu %s transition to primary state: %s\n",
918 config_item_name(&lu_gp->lu_gp_group.cg_item),
919 l_tg_pt_gp->tg_pt_gp_id, (explict) ? "explict" : "implict",
920 core_alua_dump_state(new_state));
921
922 atomic_dec(&lu_gp->lu_gp_ref_cnt);
923 smp_mb__after_atomic_dec();
924 kfree(md_buf);
925 return 0;
926}
927
928/*
929 * Called with tg_pt_gp_mem->sep_tg_pt_md_mutex held
930 */
931static int core_alua_update_tpg_secondary_metadata(
932 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
933 struct se_port *port,
934 unsigned char *md_buf,
935 u32 md_buf_len)
936{
937 struct se_portal_group *se_tpg = port->sep_tpg;
938 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
939 int len;
940
941 memset(path, 0, ALUA_METADATA_PATH_LEN);
942 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
943
944 len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s",
945 TPG_TFO(se_tpg)->tpg_get_wwn(se_tpg));
946
947 if (TPG_TFO(se_tpg)->tpg_get_tag != NULL)
948 snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu",
949 TPG_TFO(se_tpg)->tpg_get_tag(se_tpg));
950
951 len = snprintf(md_buf, md_buf_len, "alua_tg_pt_offline=%d\n"
952 "alua_tg_pt_status=0x%02x\n",
953 atomic_read(&port->sep_tg_pt_secondary_offline),
954 port->sep_tg_pt_secondary_stat);
955
956 snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%u",
957 TPG_TFO(se_tpg)->get_fabric_name(), wwn,
958 port->sep_lun->unpacked_lun);
959
960 return core_alua_write_tpg_metadata(path, md_buf, len);
961}
962
963static int core_alua_set_tg_pt_secondary_state(
964 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
965 struct se_port *port,
966 int explict,
967 int offline)
968{
969 struct t10_alua_tg_pt_gp *tg_pt_gp;
970 unsigned char *md_buf;
971 u32 md_buf_len;
972 int trans_delay_msecs;
973
974 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
975 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
976 if (!(tg_pt_gp)) {
977 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
978 printk(KERN_ERR "Unable to complete secondary state"
979 " transition\n");
980 return -1;
981 }
982 trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs;
983 /*
984 * Set the secondary ALUA target port access state to OFFLINE
985 * or release the previously secondary state for struct se_port
986 */
987 if (offline)
988 atomic_set(&port->sep_tg_pt_secondary_offline, 1);
989 else
990 atomic_set(&port->sep_tg_pt_secondary_offline, 0);
991
992 md_buf_len = tg_pt_gp->tg_pt_gp_md_buf_len;
993 port->sep_tg_pt_secondary_stat = (explict) ?
994 ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
995 ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
996
997 printk(KERN_INFO "Successful %s ALUA transition TG PT Group: %s ID: %hu"
998 " to secondary access state: %s\n", (explict) ? "explict" :
999 "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1000 tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
1001
1002 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1003 /*
1004 * Do the optional transition delay after we set the secondary
1005 * ALUA access state.
1006 */
1007 if (trans_delay_msecs != 0)
1008 msleep_interruptible(trans_delay_msecs);
1009 /*
1010 * See if we need to update the ALUA fabric port metadata for
1011 * secondary state and status
1012 */
1013 if (port->sep_tg_pt_secondary_write_md) {
1014 md_buf = kzalloc(md_buf_len, GFP_KERNEL);
1015 if (!(md_buf)) {
1016 printk(KERN_ERR "Unable to allocate md_buf for"
1017 " secondary ALUA access metadata\n");
1018 return -1;
1019 }
1020 mutex_lock(&port->sep_tg_pt_md_mutex);
1021 core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port,
1022 md_buf, md_buf_len);
1023 mutex_unlock(&port->sep_tg_pt_md_mutex);
1024
1025 kfree(md_buf);
1026 }
1027
1028 return 0;
1029}
1030
1031struct t10_alua_lu_gp *
1032core_alua_allocate_lu_gp(const char *name, int def_group)
1033{
1034 struct t10_alua_lu_gp *lu_gp;
1035
1036 lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL);
1037 if (!(lu_gp)) {
1038 printk(KERN_ERR "Unable to allocate struct t10_alua_lu_gp\n");
1039 return ERR_PTR(-ENOMEM);
1040 }
1041 INIT_LIST_HEAD(&lu_gp->lu_gp_list);
1042 INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list);
1043 spin_lock_init(&lu_gp->lu_gp_lock);
1044 atomic_set(&lu_gp->lu_gp_ref_cnt, 0);
1045
1046 if (def_group) {
1047 lu_gp->lu_gp_id = se_global->alua_lu_gps_counter++;
1048 lu_gp->lu_gp_valid_id = 1;
1049 se_global->alua_lu_gps_count++;
1050 }
1051
1052 return lu_gp;
1053}
1054
1055int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id)
1056{
1057 struct t10_alua_lu_gp *lu_gp_tmp;
1058 u16 lu_gp_id_tmp;
1059 /*
1060 * The lu_gp->lu_gp_id may only be set once..
1061 */
1062 if (lu_gp->lu_gp_valid_id) {
1063 printk(KERN_WARNING "ALUA LU Group already has a valid ID,"
1064 " ignoring request\n");
1065 return -1;
1066 }
1067
1068 spin_lock(&se_global->lu_gps_lock);
1069 if (se_global->alua_lu_gps_count == 0x0000ffff) {
1070 printk(KERN_ERR "Maximum ALUA se_global->alua_lu_gps_count:"
1071 " 0x0000ffff reached\n");
1072 spin_unlock(&se_global->lu_gps_lock);
1073 kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1074 return -1;
1075 }
1076again:
1077 lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id :
1078 se_global->alua_lu_gps_counter++;
1079
1080 list_for_each_entry(lu_gp_tmp, &se_global->g_lu_gps_list, lu_gp_list) {
1081 if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) {
1082 if (!(lu_gp_id))
1083 goto again;
1084
1085 printk(KERN_WARNING "ALUA Logical Unit Group ID: %hu"
1086 " already exists, ignoring request\n",
1087 lu_gp_id);
1088 spin_unlock(&se_global->lu_gps_lock);
1089 return -1;
1090 }
1091 }
1092
1093 lu_gp->lu_gp_id = lu_gp_id_tmp;
1094 lu_gp->lu_gp_valid_id = 1;
1095 list_add_tail(&lu_gp->lu_gp_list, &se_global->g_lu_gps_list);
1096 se_global->alua_lu_gps_count++;
1097 spin_unlock(&se_global->lu_gps_lock);
1098
1099 return 0;
1100}
1101
1102static struct t10_alua_lu_gp_member *
1103core_alua_allocate_lu_gp_mem(struct se_device *dev)
1104{
1105 struct t10_alua_lu_gp_member *lu_gp_mem;
1106
1107 lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL);
1108 if (!(lu_gp_mem)) {
1109 printk(KERN_ERR "Unable to allocate struct t10_alua_lu_gp_member\n");
1110 return ERR_PTR(-ENOMEM);
1111 }
1112 INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list);
1113 spin_lock_init(&lu_gp_mem->lu_gp_mem_lock);
1114 atomic_set(&lu_gp_mem->lu_gp_mem_ref_cnt, 0);
1115
1116 lu_gp_mem->lu_gp_mem_dev = dev;
1117 dev->dev_alua_lu_gp_mem = lu_gp_mem;
1118
1119 return lu_gp_mem;
1120}
1121
1122void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
1123{
1124 struct t10_alua_lu_gp_member *lu_gp_mem, *lu_gp_mem_tmp;
1125 /*
1126 * Once we have reached this point, config_item_put() has
1127 * already been called from target_core_alua_drop_lu_gp().
1128 *
1129 * Here, we remove the *lu_gp from the global list so that
1130 * no associations can be made while we are releasing
1131 * struct t10_alua_lu_gp.
1132 */
1133 spin_lock(&se_global->lu_gps_lock);
1134 atomic_set(&lu_gp->lu_gp_shutdown, 1);
1135 list_del(&lu_gp->lu_gp_list);
1136 se_global->alua_lu_gps_count--;
1137 spin_unlock(&se_global->lu_gps_lock);
1138 /*
1139 * Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name()
1140 * in target_core_configfs.c:target_core_store_alua_lu_gp() to be
1141 * released with core_alua_put_lu_gp_from_name()
1142 */
1143 while (atomic_read(&lu_gp->lu_gp_ref_cnt))
1144 cpu_relax();
1145 /*
1146 * Release reference to struct t10_alua_lu_gp * from all associated
1147 * struct se_device.
1148 */
1149 spin_lock(&lu_gp->lu_gp_lock);
1150 list_for_each_entry_safe(lu_gp_mem, lu_gp_mem_tmp,
1151 &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
1152 if (lu_gp_mem->lu_gp_assoc) {
1153 list_del(&lu_gp_mem->lu_gp_mem_list);
1154 lu_gp->lu_gp_members--;
1155 lu_gp_mem->lu_gp_assoc = 0;
1156 }
1157 spin_unlock(&lu_gp->lu_gp_lock);
1158 /*
1159 *
1160 * lu_gp_mem is associated with a single
1161 * struct se_device->dev_alua_lu_gp_mem, and is released when
1162 * struct se_device is released via core_alua_free_lu_gp_mem().
1163 *
1164 * If the passed lu_gp does NOT match the default_lu_gp, assume
1165 * we want to re-assocate a given lu_gp_mem with default_lu_gp.
1166 */
1167 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1168 if (lu_gp != se_global->default_lu_gp)
1169 __core_alua_attach_lu_gp_mem(lu_gp_mem,
1170 se_global->default_lu_gp);
1171 else
1172 lu_gp_mem->lu_gp = NULL;
1173 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1174
1175 spin_lock(&lu_gp->lu_gp_lock);
1176 }
1177 spin_unlock(&lu_gp->lu_gp_lock);
1178
1179 kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1180}
1181
1182void core_alua_free_lu_gp_mem(struct se_device *dev)
1183{
1184 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
1185 struct t10_alua *alua = T10_ALUA(su_dev);
1186 struct t10_alua_lu_gp *lu_gp;
1187 struct t10_alua_lu_gp_member *lu_gp_mem;
1188
1189 if (alua->alua_type != SPC3_ALUA_EMULATED)
1190 return;
1191
1192 lu_gp_mem = dev->dev_alua_lu_gp_mem;
1193 if (!(lu_gp_mem))
1194 return;
1195
1196 while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt))
1197 cpu_relax();
1198
1199 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1200 lu_gp = lu_gp_mem->lu_gp;
1201 if ((lu_gp)) {
1202 spin_lock(&lu_gp->lu_gp_lock);
1203 if (lu_gp_mem->lu_gp_assoc) {
1204 list_del(&lu_gp_mem->lu_gp_mem_list);
1205 lu_gp->lu_gp_members--;
1206 lu_gp_mem->lu_gp_assoc = 0;
1207 }
1208 spin_unlock(&lu_gp->lu_gp_lock);
1209 lu_gp_mem->lu_gp = NULL;
1210 }
1211 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1212
1213 kmem_cache_free(t10_alua_lu_gp_mem_cache, lu_gp_mem);
1214}
1215
1216struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name)
1217{
1218 struct t10_alua_lu_gp *lu_gp;
1219 struct config_item *ci;
1220
1221 spin_lock(&se_global->lu_gps_lock);
1222 list_for_each_entry(lu_gp, &se_global->g_lu_gps_list, lu_gp_list) {
1223 if (!(lu_gp->lu_gp_valid_id))
1224 continue;
1225 ci = &lu_gp->lu_gp_group.cg_item;
1226 if (!(strcmp(config_item_name(ci), name))) {
1227 atomic_inc(&lu_gp->lu_gp_ref_cnt);
1228 spin_unlock(&se_global->lu_gps_lock);
1229 return lu_gp;
1230 }
1231 }
1232 spin_unlock(&se_global->lu_gps_lock);
1233
1234 return NULL;
1235}
1236
1237void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp)
1238{
1239 spin_lock(&se_global->lu_gps_lock);
1240 atomic_dec(&lu_gp->lu_gp_ref_cnt);
1241 spin_unlock(&se_global->lu_gps_lock);
1242}
1243
1244/*
1245 * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
1246 */
1247void __core_alua_attach_lu_gp_mem(
1248 struct t10_alua_lu_gp_member *lu_gp_mem,
1249 struct t10_alua_lu_gp *lu_gp)
1250{
1251 spin_lock(&lu_gp->lu_gp_lock);
1252 lu_gp_mem->lu_gp = lu_gp;
1253 lu_gp_mem->lu_gp_assoc = 1;
1254 list_add_tail(&lu_gp_mem->lu_gp_mem_list, &lu_gp->lu_gp_mem_list);
1255 lu_gp->lu_gp_members++;
1256 spin_unlock(&lu_gp->lu_gp_lock);
1257}
1258
1259/*
1260 * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
1261 */
1262void __core_alua_drop_lu_gp_mem(
1263 struct t10_alua_lu_gp_member *lu_gp_mem,
1264 struct t10_alua_lu_gp *lu_gp)
1265{
1266 spin_lock(&lu_gp->lu_gp_lock);
1267 list_del(&lu_gp_mem->lu_gp_mem_list);
1268 lu_gp_mem->lu_gp = NULL;
1269 lu_gp_mem->lu_gp_assoc = 0;
1270 lu_gp->lu_gp_members--;
1271 spin_unlock(&lu_gp->lu_gp_lock);
1272}
1273
1274struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
1275 struct se_subsystem_dev *su_dev,
1276 const char *name,
1277 int def_group)
1278{
1279 struct t10_alua_tg_pt_gp *tg_pt_gp;
1280
1281 tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL);
1282 if (!(tg_pt_gp)) {
1283 printk(KERN_ERR "Unable to allocate struct t10_alua_tg_pt_gp\n");
1284 return NULL;
1285 }
1286 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
1287 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_mem_list);
1288 mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
1289 spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
1290 atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
1291 tg_pt_gp->tg_pt_gp_su_dev = su_dev;
1292 tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN;
1293 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
1294 ALUA_ACCESS_STATE_ACTIVE_OPTMIZED);
1295 /*
1296 * Enable both explict and implict ALUA support by default
1297 */
1298 tg_pt_gp->tg_pt_gp_alua_access_type =
1299 TPGS_EXPLICT_ALUA | TPGS_IMPLICT_ALUA;
1300 /*
1301 * Set the default Active/NonOptimized Delay in milliseconds
1302 */
1303 tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS;
1304 tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS;
1305
1306 if (def_group) {
1307 spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
1308 tg_pt_gp->tg_pt_gp_id =
1309 T10_ALUA(su_dev)->alua_tg_pt_gps_counter++;
1310 tg_pt_gp->tg_pt_gp_valid_id = 1;
1311 T10_ALUA(su_dev)->alua_tg_pt_gps_count++;
1312 list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1313 &T10_ALUA(su_dev)->tg_pt_gps_list);
1314 spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
1315 }
1316
1317 return tg_pt_gp;
1318}
1319
1320int core_alua_set_tg_pt_gp_id(
1321 struct t10_alua_tg_pt_gp *tg_pt_gp,
1322 u16 tg_pt_gp_id)
1323{
1324 struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
1325 struct t10_alua_tg_pt_gp *tg_pt_gp_tmp;
1326 u16 tg_pt_gp_id_tmp;
1327 /*
1328 * The tg_pt_gp->tg_pt_gp_id may only be set once..
1329 */
1330 if (tg_pt_gp->tg_pt_gp_valid_id) {
1331 printk(KERN_WARNING "ALUA TG PT Group already has a valid ID,"
1332 " ignoring request\n");
1333 return -1;
1334 }
1335
1336 spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
1337 if (T10_ALUA(su_dev)->alua_tg_pt_gps_count == 0x0000ffff) {
1338 printk(KERN_ERR "Maximum ALUA alua_tg_pt_gps_count:"
1339 " 0x0000ffff reached\n");
1340 spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
1341 kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1342 return -1;
1343 }
1344again:
1345 tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id :
1346 T10_ALUA(su_dev)->alua_tg_pt_gps_counter++;
1347
1348 list_for_each_entry(tg_pt_gp_tmp, &T10_ALUA(su_dev)->tg_pt_gps_list,
1349 tg_pt_gp_list) {
1350 if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) {
1351 if (!(tg_pt_gp_id))
1352 goto again;
1353
1354 printk(KERN_ERR "ALUA Target Port Group ID: %hu already"
1355 " exists, ignoring request\n", tg_pt_gp_id);
1356 spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
1357 return -1;
1358 }
1359 }
1360
1361 tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp;
1362 tg_pt_gp->tg_pt_gp_valid_id = 1;
1363 list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1364 &T10_ALUA(su_dev)->tg_pt_gps_list);
1365 T10_ALUA(su_dev)->alua_tg_pt_gps_count++;
1366 spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
1367
1368 return 0;
1369}
1370
1371struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
1372 struct se_port *port)
1373{
1374 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1375
1376 tg_pt_gp_mem = kmem_cache_zalloc(t10_alua_tg_pt_gp_mem_cache,
1377 GFP_KERNEL);
1378 if (!(tg_pt_gp_mem)) {
1379 printk(KERN_ERR "Unable to allocate struct t10_alua_tg_pt_gp_member\n");
1380 return ERR_PTR(-ENOMEM);
1381 }
1382 INIT_LIST_HEAD(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1383 spin_lock_init(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1384 atomic_set(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt, 0);
1385
1386 tg_pt_gp_mem->tg_pt = port;
1387 port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem;
1388 atomic_set(&port->sep_tg_pt_gp_active, 1);
1389
1390 return tg_pt_gp_mem;
1391}
1392
1393void core_alua_free_tg_pt_gp(
1394 struct t10_alua_tg_pt_gp *tg_pt_gp)
1395{
1396 struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
1397 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *tg_pt_gp_mem_tmp;
1398 /*
1399 * Once we have reached this point, config_item_put() has already
1400 * been called from target_core_alua_drop_tg_pt_gp().
1401 *
1402 * Here we remove *tg_pt_gp from the global list so that
1403 * no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS
1404 * can be made while we are releasing struct t10_alua_tg_pt_gp.
1405 */
1406 spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
1407 list_del(&tg_pt_gp->tg_pt_gp_list);
1408 T10_ALUA(su_dev)->alua_tg_pt_gps_counter--;
1409 spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
1410 /*
1411 * Allow a struct t10_alua_tg_pt_gp_member * referenced by
1412 * core_alua_get_tg_pt_gp_by_name() in
1413 * target_core_configfs.c:target_core_store_alua_tg_pt_gp()
1414 * to be released with core_alua_put_tg_pt_gp_from_name().
1415 */
1416 while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt))
1417 cpu_relax();
1418 /*
1419 * Release reference to struct t10_alua_tg_pt_gp from all associated
1420 * struct se_port.
1421 */
1422 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1423 list_for_each_entry_safe(tg_pt_gp_mem, tg_pt_gp_mem_tmp,
1424 &tg_pt_gp->tg_pt_gp_mem_list, tg_pt_gp_mem_list) {
1425 if (tg_pt_gp_mem->tg_pt_gp_assoc) {
1426 list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1427 tg_pt_gp->tg_pt_gp_members--;
1428 tg_pt_gp_mem->tg_pt_gp_assoc = 0;
1429 }
1430 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1431 /*
1432 * tg_pt_gp_mem is associated with a single
1433 * se_port->sep_alua_tg_pt_gp_mem, and is released via
1434 * core_alua_free_tg_pt_gp_mem().
1435 *
1436 * If the passed tg_pt_gp does NOT match the default_tg_pt_gp,
1437 * assume we want to re-assocate a given tg_pt_gp_mem with
1438 * default_tg_pt_gp.
1439 */
1440 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1441 if (tg_pt_gp != T10_ALUA(su_dev)->default_tg_pt_gp) {
1442 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
1443 T10_ALUA(su_dev)->default_tg_pt_gp);
1444 } else
1445 tg_pt_gp_mem->tg_pt_gp = NULL;
1446 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1447
1448 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1449 }
1450 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1451
1452 kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1453}
1454
1455void core_alua_free_tg_pt_gp_mem(struct se_port *port)
1456{
1457 struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
1458 struct t10_alua *alua = T10_ALUA(su_dev);
1459 struct t10_alua_tg_pt_gp *tg_pt_gp;
1460 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1461
1462 if (alua->alua_type != SPC3_ALUA_EMULATED)
1463 return;
1464
1465 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
1466 if (!(tg_pt_gp_mem))
1467 return;
1468
1469 while (atomic_read(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt))
1470 cpu_relax();
1471
1472 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1473 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1474 if ((tg_pt_gp)) {
1475 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1476 if (tg_pt_gp_mem->tg_pt_gp_assoc) {
1477 list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1478 tg_pt_gp->tg_pt_gp_members--;
1479 tg_pt_gp_mem->tg_pt_gp_assoc = 0;
1480 }
1481 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1482 tg_pt_gp_mem->tg_pt_gp = NULL;
1483 }
1484 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1485
1486 kmem_cache_free(t10_alua_tg_pt_gp_mem_cache, tg_pt_gp_mem);
1487}
1488
1489static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
1490 struct se_subsystem_dev *su_dev,
1491 const char *name)
1492{
1493 struct t10_alua_tg_pt_gp *tg_pt_gp;
1494 struct config_item *ci;
1495
1496 spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
1497 list_for_each_entry(tg_pt_gp, &T10_ALUA(su_dev)->tg_pt_gps_list,
1498 tg_pt_gp_list) {
1499 if (!(tg_pt_gp->tg_pt_gp_valid_id))
1500 continue;
1501 ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1502 if (!(strcmp(config_item_name(ci), name))) {
1503 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1504 spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
1505 return tg_pt_gp;
1506 }
1507 }
1508 spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
1509
1510 return NULL;
1511}
1512
1513static void core_alua_put_tg_pt_gp_from_name(
1514 struct t10_alua_tg_pt_gp *tg_pt_gp)
1515{
1516 struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
1517
1518 spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
1519 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1520 spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
1521}
1522
1523/*
1524 * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
1525 */
1526void __core_alua_attach_tg_pt_gp_mem(
1527 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
1528 struct t10_alua_tg_pt_gp *tg_pt_gp)
1529{
1530 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1531 tg_pt_gp_mem->tg_pt_gp = tg_pt_gp;
1532 tg_pt_gp_mem->tg_pt_gp_assoc = 1;
1533 list_add_tail(&tg_pt_gp_mem->tg_pt_gp_mem_list,
1534 &tg_pt_gp->tg_pt_gp_mem_list);
1535 tg_pt_gp->tg_pt_gp_members++;
1536 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1537}
1538
1539/*
1540 * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
1541 */
1542static void __core_alua_drop_tg_pt_gp_mem(
1543 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
1544 struct t10_alua_tg_pt_gp *tg_pt_gp)
1545{
1546 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1547 list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1548 tg_pt_gp_mem->tg_pt_gp = NULL;
1549 tg_pt_gp_mem->tg_pt_gp_assoc = 0;
1550 tg_pt_gp->tg_pt_gp_members--;
1551 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1552}
1553
1554ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page)
1555{
1556 struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
1557 struct config_item *tg_pt_ci;
1558 struct t10_alua *alua = T10_ALUA(su_dev);
1559 struct t10_alua_tg_pt_gp *tg_pt_gp;
1560 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1561 ssize_t len = 0;
1562
1563 if (alua->alua_type != SPC3_ALUA_EMULATED)
1564 return len;
1565
1566 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
1567 if (!(tg_pt_gp_mem))
1568 return len;
1569
1570 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1571 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1572 if ((tg_pt_gp)) {
1573 tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1574 len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:"
1575 " %hu\nTG Port Primary Access State: %s\nTG Port "
1576 "Primary Access Status: %s\nTG Port Secondary Access"
1577 " State: %s\nTG Port Secondary Access Status: %s\n",
1578 config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id,
1579 core_alua_dump_state(atomic_read(
1580 &tg_pt_gp->tg_pt_gp_alua_access_state)),
1581 core_alua_dump_status(
1582 tg_pt_gp->tg_pt_gp_alua_access_status),
1583 (atomic_read(&port->sep_tg_pt_secondary_offline)) ?
1584 "Offline" : "None",
1585 core_alua_dump_status(port->sep_tg_pt_secondary_stat));
1586 }
1587 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1588
1589 return len;
1590}
1591
1592ssize_t core_alua_store_tg_pt_gp_info(
1593 struct se_port *port,
1594 const char *page,
1595 size_t count)
1596{
1597 struct se_portal_group *tpg;
1598 struct se_lun *lun;
1599 struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
1600 struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL;
1601 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1602 unsigned char buf[TG_PT_GROUP_NAME_BUF];
1603 int move = 0;
1604
1605 tpg = port->sep_tpg;
1606 lun = port->sep_lun;
1607
1608 if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED) {
1609 printk(KERN_WARNING "SPC3_ALUA_EMULATED not enabled for"
1610 " %s/tpgt_%hu/%s\n", TPG_TFO(tpg)->tpg_get_wwn(tpg),
1611 TPG_TFO(tpg)->tpg_get_tag(tpg),
1612 config_item_name(&lun->lun_group.cg_item));
1613 return -EINVAL;
1614 }
1615
1616 if (count > TG_PT_GROUP_NAME_BUF) {
1617 printk(KERN_ERR "ALUA Target Port Group alias too large!\n");
1618 return -EINVAL;
1619 }
1620 memset(buf, 0, TG_PT_GROUP_NAME_BUF);
1621 memcpy(buf, page, count);
1622 /*
1623 * Any ALUA target port group alias besides "NULL" means we will be
1624 * making a new group association.
1625 */
1626 if (strcmp(strstrip(buf), "NULL")) {
1627 /*
1628 * core_alua_get_tg_pt_gp_by_name() will increment reference to
1629 * struct t10_alua_tg_pt_gp. This reference is released with
1630 * core_alua_put_tg_pt_gp_from_name() below.
1631 */
1632 tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(su_dev,
1633 strstrip(buf));
1634 if (!(tg_pt_gp_new))
1635 return -ENODEV;
1636 }
1637 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
1638 if (!(tg_pt_gp_mem)) {
1639 if (tg_pt_gp_new)
1640 core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
1641 printk(KERN_ERR "NULL struct se_port->sep_alua_tg_pt_gp_mem pointer\n");
1642 return -EINVAL;
1643 }
1644
1645 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1646 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1647 if ((tg_pt_gp)) {
1648 /*
1649 * Clearing an existing tg_pt_gp association, and replacing
1650 * with the default_tg_pt_gp.
1651 */
1652 if (!(tg_pt_gp_new)) {
1653 printk(KERN_INFO "Target_Core_ConfigFS: Moving"
1654 " %s/tpgt_%hu/%s from ALUA Target Port Group:"
1655 " alua/%s, ID: %hu back to"
1656 " default_tg_pt_gp\n",
1657 TPG_TFO(tpg)->tpg_get_wwn(tpg),
1658 TPG_TFO(tpg)->tpg_get_tag(tpg),
1659 config_item_name(&lun->lun_group.cg_item),
1660 config_item_name(
1661 &tg_pt_gp->tg_pt_gp_group.cg_item),
1662 tg_pt_gp->tg_pt_gp_id);
1663
1664 __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
1665 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
1666 T10_ALUA(su_dev)->default_tg_pt_gp);
1667 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1668
1669 return count;
1670 }
1671 /*
1672 * Removing existing association of tg_pt_gp_mem with tg_pt_gp
1673 */
1674 __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
1675 move = 1;
1676 }
1677 /*
1678 * Associate tg_pt_gp_mem with tg_pt_gp_new.
1679 */
1680 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp_new);
1681 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1682 printk(KERN_INFO "Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
1683 " Target Port Group: alua/%s, ID: %hu\n", (move) ?
1684 "Moving" : "Adding", TPG_TFO(tpg)->tpg_get_wwn(tpg),
1685 TPG_TFO(tpg)->tpg_get_tag(tpg),
1686 config_item_name(&lun->lun_group.cg_item),
1687 config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item),
1688 tg_pt_gp_new->tg_pt_gp_id);
1689
1690 core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
1691 return count;
1692}
1693
1694ssize_t core_alua_show_access_type(
1695 struct t10_alua_tg_pt_gp *tg_pt_gp,
1696 char *page)
1697{
1698 if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA) &&
1699 (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA))
1700 return sprintf(page, "Implict and Explict\n");
1701 else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)
1702 return sprintf(page, "Implict\n");
1703 else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA)
1704 return sprintf(page, "Explict\n");
1705 else
1706 return sprintf(page, "None\n");
1707}
1708
1709ssize_t core_alua_store_access_type(
1710 struct t10_alua_tg_pt_gp *tg_pt_gp,
1711 const char *page,
1712 size_t count)
1713{
1714 unsigned long tmp;
1715 int ret;
1716
1717 ret = strict_strtoul(page, 0, &tmp);
1718 if (ret < 0) {
1719 printk(KERN_ERR "Unable to extract alua_access_type\n");
1720 return -EINVAL;
1721 }
1722 if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) {
1723 printk(KERN_ERR "Illegal value for alua_access_type:"
1724 " %lu\n", tmp);
1725 return -EINVAL;
1726 }
1727 if (tmp == 3)
1728 tg_pt_gp->tg_pt_gp_alua_access_type =
1729 TPGS_IMPLICT_ALUA | TPGS_EXPLICT_ALUA;
1730 else if (tmp == 2)
1731 tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICT_ALUA;
1732 else if (tmp == 1)
1733 tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICT_ALUA;
1734 else
1735 tg_pt_gp->tg_pt_gp_alua_access_type = 0;
1736
1737 return count;
1738}
1739
1740ssize_t core_alua_show_nonop_delay_msecs(
1741 struct t10_alua_tg_pt_gp *tg_pt_gp,
1742 char *page)
1743{
1744 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_nonop_delay_msecs);
1745}
1746
1747ssize_t core_alua_store_nonop_delay_msecs(
1748 struct t10_alua_tg_pt_gp *tg_pt_gp,
1749 const char *page,
1750 size_t count)
1751{
1752 unsigned long tmp;
1753 int ret;
1754
1755 ret = strict_strtoul(page, 0, &tmp);
1756 if (ret < 0) {
1757 printk(KERN_ERR "Unable to extract nonop_delay_msecs\n");
1758 return -EINVAL;
1759 }
1760 if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) {
1761 printk(KERN_ERR "Passed nonop_delay_msecs: %lu, exceeds"
1762 " ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp,
1763 ALUA_MAX_NONOP_DELAY_MSECS);
1764 return -EINVAL;
1765 }
1766 tg_pt_gp->tg_pt_gp_nonop_delay_msecs = (int)tmp;
1767
1768 return count;
1769}
1770
1771ssize_t core_alua_show_trans_delay_msecs(
1772 struct t10_alua_tg_pt_gp *tg_pt_gp,
1773 char *page)
1774{
1775 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_trans_delay_msecs);
1776}
1777
1778ssize_t core_alua_store_trans_delay_msecs(
1779 struct t10_alua_tg_pt_gp *tg_pt_gp,
1780 const char *page,
1781 size_t count)
1782{
1783 unsigned long tmp;
1784 int ret;
1785
1786 ret = strict_strtoul(page, 0, &tmp);
1787 if (ret < 0) {
1788 printk(KERN_ERR "Unable to extract trans_delay_msecs\n");
1789 return -EINVAL;
1790 }
1791 if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) {
1792 printk(KERN_ERR "Passed trans_delay_msecs: %lu, exceeds"
1793 " ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp,
1794 ALUA_MAX_TRANS_DELAY_MSECS);
1795 return -EINVAL;
1796 }
1797 tg_pt_gp->tg_pt_gp_trans_delay_msecs = (int)tmp;
1798
1799 return count;
1800}
1801
1802ssize_t core_alua_show_preferred_bit(
1803 struct t10_alua_tg_pt_gp *tg_pt_gp,
1804 char *page)
1805{
1806 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_pref);
1807}
1808
1809ssize_t core_alua_store_preferred_bit(
1810 struct t10_alua_tg_pt_gp *tg_pt_gp,
1811 const char *page,
1812 size_t count)
1813{
1814 unsigned long tmp;
1815 int ret;
1816
1817 ret = strict_strtoul(page, 0, &tmp);
1818 if (ret < 0) {
1819 printk(KERN_ERR "Unable to extract preferred ALUA value\n");
1820 return -EINVAL;
1821 }
1822 if ((tmp != 0) && (tmp != 1)) {
1823 printk(KERN_ERR "Illegal value for preferred ALUA: %lu\n", tmp);
1824 return -EINVAL;
1825 }
1826 tg_pt_gp->tg_pt_gp_pref = (int)tmp;
1827
1828 return count;
1829}
1830
1831ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page)
1832{
1833 if (!(lun->lun_sep))
1834 return -ENODEV;
1835
1836 return sprintf(page, "%d\n",
1837 atomic_read(&lun->lun_sep->sep_tg_pt_secondary_offline));
1838}
1839
1840ssize_t core_alua_store_offline_bit(
1841 struct se_lun *lun,
1842 const char *page,
1843 size_t count)
1844{
1845 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1846 unsigned long tmp;
1847 int ret;
1848
1849 if (!(lun->lun_sep))
1850 return -ENODEV;
1851
1852 ret = strict_strtoul(page, 0, &tmp);
1853 if (ret < 0) {
1854 printk(KERN_ERR "Unable to extract alua_tg_pt_offline value\n");
1855 return -EINVAL;
1856 }
1857 if ((tmp != 0) && (tmp != 1)) {
1858 printk(KERN_ERR "Illegal value for alua_tg_pt_offline: %lu\n",
1859 tmp);
1860 return -EINVAL;
1861 }
1862 tg_pt_gp_mem = lun->lun_sep->sep_alua_tg_pt_gp_mem;
1863 if (!(tg_pt_gp_mem)) {
1864 printk(KERN_ERR "Unable to locate *tg_pt_gp_mem\n");
1865 return -EINVAL;
1866 }
1867
1868 ret = core_alua_set_tg_pt_secondary_state(tg_pt_gp_mem,
1869 lun->lun_sep, 0, (int)tmp);
1870 if (ret < 0)
1871 return -EINVAL;
1872
1873 return count;
1874}
1875
1876ssize_t core_alua_show_secondary_status(
1877 struct se_lun *lun,
1878 char *page)
1879{
1880 return sprintf(page, "%d\n", lun->lun_sep->sep_tg_pt_secondary_stat);
1881}
1882
1883ssize_t core_alua_store_secondary_status(
1884 struct se_lun *lun,
1885 const char *page,
1886 size_t count)
1887{
1888 unsigned long tmp;
1889 int ret;
1890
1891 ret = strict_strtoul(page, 0, &tmp);
1892 if (ret < 0) {
1893 printk(KERN_ERR "Unable to extract alua_tg_pt_status\n");
1894 return -EINVAL;
1895 }
1896 if ((tmp != ALUA_STATUS_NONE) &&
1897 (tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
1898 (tmp != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
1899 printk(KERN_ERR "Illegal value for alua_tg_pt_status: %lu\n",
1900 tmp);
1901 return -EINVAL;
1902 }
1903 lun->lun_sep->sep_tg_pt_secondary_stat = (int)tmp;
1904
1905 return count;
1906}
1907
1908ssize_t core_alua_show_secondary_write_metadata(
1909 struct se_lun *lun,
1910 char *page)
1911{
1912 return sprintf(page, "%d\n",
1913 lun->lun_sep->sep_tg_pt_secondary_write_md);
1914}
1915
1916ssize_t core_alua_store_secondary_write_metadata(
1917 struct se_lun *lun,
1918 const char *page,
1919 size_t count)
1920{
1921 unsigned long tmp;
1922 int ret;
1923
1924 ret = strict_strtoul(page, 0, &tmp);
1925 if (ret < 0) {
1926 printk(KERN_ERR "Unable to extract alua_tg_pt_write_md\n");
1927 return -EINVAL;
1928 }
1929 if ((tmp != 0) && (tmp != 1)) {
1930 printk(KERN_ERR "Illegal value for alua_tg_pt_write_md:"
1931 " %lu\n", tmp);
1932 return -EINVAL;
1933 }
1934 lun->lun_sep->sep_tg_pt_secondary_write_md = (int)tmp;
1935
1936 return count;
1937}
1938
1939int core_setup_alua(struct se_device *dev, int force_pt)
1940{
1941 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
1942 struct t10_alua *alua = T10_ALUA(su_dev);
1943 struct t10_alua_lu_gp_member *lu_gp_mem;
1944 /*
1945 * If this device is from Target_Core_Mod/pSCSI, use the ALUA logic
1946 * of the Underlying SCSI hardware. In Linux/SCSI terms, this can
1947 * cause a problem because libata and some SATA RAID HBAs appear
1948 * under Linux/SCSI, but emulate SCSI logic themselves.
1949 */
1950 if (((TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) &&
1951 !(DEV_ATTRIB(dev)->emulate_alua)) || force_pt) {
1952 alua->alua_type = SPC_ALUA_PASSTHROUGH;
1953 alua->alua_state_check = &core_alua_state_check_nop;
1954 printk(KERN_INFO "%s: Using SPC_ALUA_PASSTHROUGH, no ALUA"
1955 " emulation\n", TRANSPORT(dev)->name);
1956 return 0;
1957 }
1958 /*
1959 * If SPC-3 or above is reported by real or emulated struct se_device,
1960 * use emulated ALUA.
1961 */
1962 if (TRANSPORT(dev)->get_device_rev(dev) >= SCSI_3) {
1963 printk(KERN_INFO "%s: Enabling ALUA Emulation for SPC-3"
1964 " device\n", TRANSPORT(dev)->name);
1965 /*
1966 * Associate this struct se_device with the default ALUA
1967 * LUN Group.
1968 */
1969 lu_gp_mem = core_alua_allocate_lu_gp_mem(dev);
1970 if (IS_ERR(lu_gp_mem) || !lu_gp_mem)
1971 return -1;
1972
1973 alua->alua_type = SPC3_ALUA_EMULATED;
1974 alua->alua_state_check = &core_alua_state_check;
1975 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1976 __core_alua_attach_lu_gp_mem(lu_gp_mem,
1977 se_global->default_lu_gp);
1978 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1979
1980 printk(KERN_INFO "%s: Adding to default ALUA LU Group:"
1981 " core/alua/lu_gps/default_lu_gp\n",
1982 TRANSPORT(dev)->name);
1983 } else {
1984 alua->alua_type = SPC2_ALUA_DISABLED;
1985 alua->alua_state_check = &core_alua_state_check_nop;
1986 printk(KERN_INFO "%s: Disabling ALUA Emulation for SPC-2"
1987 " device\n", TRANSPORT(dev)->name);
1988 }
1989
1990 return 0;
1991}
diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h
new file mode 100644
index 000000000000..c86f97a081ed
--- /dev/null
+++ b/drivers/target/target_core_alua.h
@@ -0,0 +1,126 @@
1#ifndef TARGET_CORE_ALUA_H
2#define TARGET_CORE_ALUA_H
3
4/*
5 * INQUIRY response data, TPGS Field
6 *
7 * from spc4r17 section 6.4.2 Table 135
8 */
9#define TPGS_NO_ALUA 0x00
10#define TPGS_IMPLICT_ALUA 0x10
11#define TPGS_EXPLICT_ALUA 0x20
12
13/*
14 * ASYMMETRIC ACCESS STATE field
15 *
16 * from spc4r17 section 6.27 Table 245
17 */
18#define ALUA_ACCESS_STATE_ACTIVE_OPTMIZED 0x0
19#define ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED 0x1
20#define ALUA_ACCESS_STATE_STANDBY 0x2
21#define ALUA_ACCESS_STATE_UNAVAILABLE 0x3
22#define ALUA_ACCESS_STATE_OFFLINE 0xe
23#define ALUA_ACCESS_STATE_TRANSITION 0xf
24
25/*
26 * REPORT_TARGET_PORT_GROUP STATUS CODE
27 *
28 * from spc4r17 section 6.27 Table 246
29 */
30#define ALUA_STATUS_NONE 0x00
31#define ALUA_STATUS_ALTERED_BY_EXPLICT_STPG 0x01
32#define ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA 0x02
33
34/*
35 * From spc4r17, Table D.1: ASC and ASCQ Assignement
36 */
37#define ASCQ_04H_ALUA_STATE_TRANSITION 0x0a
38#define ASCQ_04H_ALUA_TG_PT_STANDBY 0x0b
39#define ASCQ_04H_ALUA_TG_PT_UNAVAILABLE 0x0c
40#define ASCQ_04H_ALUA_OFFLINE 0x12
41
42/*
43 * Used as the default for Active/NonOptimized delay (in milliseconds)
44 * This can also be changed via configfs on a per target port group basis..
45 */
46#define ALUA_DEFAULT_NONOP_DELAY_MSECS 100
47#define ALUA_MAX_NONOP_DELAY_MSECS 10000 /* 10 seconds */
48/*
49 * Used for implict and explict ALUA transitional delay, that is disabled
50 * by default, and is intended to be used for debugging client side ALUA code.
51 */
52#define ALUA_DEFAULT_TRANS_DELAY_MSECS 0
53#define ALUA_MAX_TRANS_DELAY_MSECS 30000 /* 30 seconds */
54/*
55 * Used by core_alua_update_tpg_primary_metadata() and
56 * core_alua_update_tpg_secondary_metadata()
57 */
58#define ALUA_METADATA_PATH_LEN 512
59/*
60 * Used by core_alua_update_tpg_secondary_metadata()
61 */
62#define ALUA_SECONDARY_METADATA_WWN_LEN 256
63
64extern struct kmem_cache *t10_alua_lu_gp_cache;
65extern struct kmem_cache *t10_alua_lu_gp_mem_cache;
66extern struct kmem_cache *t10_alua_tg_pt_gp_cache;
67extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
68
69extern int core_emulate_report_target_port_groups(struct se_cmd *);
70extern int core_emulate_set_target_port_groups(struct se_cmd *);
71extern int core_alua_check_nonop_delay(struct se_cmd *);
72extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *,
73 struct se_device *, struct se_port *,
74 struct se_node_acl *, int, int);
75extern char *core_alua_dump_status(int);
76extern struct t10_alua_lu_gp *core_alua_allocate_lu_gp(const char *, int);
77extern int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *, u16);
78extern void core_alua_free_lu_gp(struct t10_alua_lu_gp *);
79extern void core_alua_free_lu_gp_mem(struct se_device *);
80extern struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *);
81extern void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *);
82extern void __core_alua_attach_lu_gp_mem(struct t10_alua_lu_gp_member *,
83 struct t10_alua_lu_gp *);
84extern void __core_alua_drop_lu_gp_mem(struct t10_alua_lu_gp_member *,
85 struct t10_alua_lu_gp *);
86extern void core_alua_drop_lu_gp_dev(struct se_device *);
87extern struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
88 struct se_subsystem_dev *, const char *, int);
89extern int core_alua_set_tg_pt_gp_id(struct t10_alua_tg_pt_gp *, u16);
90extern struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
91 struct se_port *);
92extern void core_alua_free_tg_pt_gp(struct t10_alua_tg_pt_gp *);
93extern void core_alua_free_tg_pt_gp_mem(struct se_port *);
94extern void __core_alua_attach_tg_pt_gp_mem(struct t10_alua_tg_pt_gp_member *,
95 struct t10_alua_tg_pt_gp *);
96extern ssize_t core_alua_show_tg_pt_gp_info(struct se_port *, char *);
97extern ssize_t core_alua_store_tg_pt_gp_info(struct se_port *, const char *,
98 size_t);
99extern ssize_t core_alua_show_access_type(struct t10_alua_tg_pt_gp *, char *);
100extern ssize_t core_alua_store_access_type(struct t10_alua_tg_pt_gp *,
101 const char *, size_t);
102extern ssize_t core_alua_show_nonop_delay_msecs(struct t10_alua_tg_pt_gp *,
103 char *);
104extern ssize_t core_alua_store_nonop_delay_msecs(struct t10_alua_tg_pt_gp *,
105 const char *, size_t);
106extern ssize_t core_alua_show_trans_delay_msecs(struct t10_alua_tg_pt_gp *,
107 char *);
108extern ssize_t core_alua_store_trans_delay_msecs(struct t10_alua_tg_pt_gp *,
109 const char *, size_t);
110extern ssize_t core_alua_show_preferred_bit(struct t10_alua_tg_pt_gp *,
111 char *);
112extern ssize_t core_alua_store_preferred_bit(struct t10_alua_tg_pt_gp *,
113 const char *, size_t);
114extern ssize_t core_alua_show_offline_bit(struct se_lun *, char *);
115extern ssize_t core_alua_store_offline_bit(struct se_lun *, const char *,
116 size_t);
117extern ssize_t core_alua_show_secondary_status(struct se_lun *, char *);
118extern ssize_t core_alua_store_secondary_status(struct se_lun *,
119 const char *, size_t);
120extern ssize_t core_alua_show_secondary_write_metadata(struct se_lun *,
121 char *);
122extern ssize_t core_alua_store_secondary_write_metadata(struct se_lun *,
123 const char *, size_t);
124extern int core_setup_alua(struct se_device *, int);
125
126#endif /* TARGET_CORE_ALUA_H */
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
new file mode 100644
index 000000000000..7f19c8b7b84c
--- /dev/null
+++ b/drivers/target/target_core_cdb.c
@@ -0,0 +1,1137 @@
1/*
2 * CDB emulation for non-READ/WRITE commands.
3 *
4 * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
5 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
6 * Copyright (c) 2007-2010 Rising Tide Systems
7 * Copyright (c) 2008-2010 Linux-iSCSI.org
8 *
9 * Nicholas A. Bellinger <nab@kernel.org>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 */
25
26#include <asm/unaligned.h>
27#include <scsi/scsi.h>
28
29#include <target/target_core_base.h>
30#include <target/target_core_transport.h>
31#include <target/target_core_fabric_ops.h>
32#include "target_core_ua.h"
33
34static void
35target_fill_alua_data(struct se_port *port, unsigned char *buf)
36{
37 struct t10_alua_tg_pt_gp *tg_pt_gp;
38 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
39
40 /*
41 * Set SCCS for MAINTENANCE_IN + REPORT_TARGET_PORT_GROUPS.
42 */
43 buf[5] = 0x80;
44
45 /*
46 * Set TPGS field for explict and/or implict ALUA access type
47 * and opteration.
48 *
49 * See spc4r17 section 6.4.2 Table 135
50 */
51 if (!port)
52 return;
53 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
54 if (!tg_pt_gp_mem)
55 return;
56
57 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
58 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
59 if (tg_pt_gp)
60 buf[5] |= tg_pt_gp->tg_pt_gp_alua_access_type;
61 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
62}
63
64static int
65target_emulate_inquiry_std(struct se_cmd *cmd)
66{
67 struct se_lun *lun = SE_LUN(cmd);
68 struct se_device *dev = SE_DEV(cmd);
69 unsigned char *buf = cmd->t_task->t_task_buf;
70
71 /*
72 * Make sure we at least have 6 bytes of INQUIRY response
73 * payload going back for EVPD=0
74 */
75 if (cmd->data_length < 6) {
76 printk(KERN_ERR "SCSI Inquiry payload length: %u"
77 " too small for EVPD=0\n", cmd->data_length);
78 return -1;
79 }
80
81 buf[0] = dev->transport->get_device_type(dev);
82 if (buf[0] == TYPE_TAPE)
83 buf[1] = 0x80;
84 buf[2] = dev->transport->get_device_rev(dev);
85
86 /*
87 * Enable SCCS and TPGS fields for Emulated ALUA
88 */
89 if (T10_ALUA(dev->se_sub_dev)->alua_type == SPC3_ALUA_EMULATED)
90 target_fill_alua_data(lun->lun_sep, buf);
91
92 if (cmd->data_length < 8) {
93 buf[4] = 1; /* Set additional length to 1 */
94 return 0;
95 }
96
97 buf[7] = 0x32; /* Sync=1 and CmdQue=1 */
98
99 /*
100 * Do not include vendor, product, reversion info in INQUIRY
101 * response payload for cdbs with a small allocation length.
102 */
103 if (cmd->data_length < 36) {
104 buf[4] = 3; /* Set additional length to 3 */
105 return 0;
106 }
107
108 snprintf((unsigned char *)&buf[8], 8, "LIO-ORG");
109 snprintf((unsigned char *)&buf[16], 16, "%s",
110 &DEV_T10_WWN(dev)->model[0]);
111 snprintf((unsigned char *)&buf[32], 4, "%s",
112 &DEV_T10_WWN(dev)->revision[0]);
113 buf[4] = 31; /* Set additional length to 31 */
114 return 0;
115}
116
117/* supported vital product data pages */
118static int
119target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
120{
121 buf[1] = 0x00;
122 if (cmd->data_length < 8)
123 return 0;
124
125 buf[4] = 0x0;
126 /*
127 * Only report the INQUIRY EVPD=1 pages after a valid NAA
128 * Registered Extended LUN WWN has been set via ConfigFS
129 * during device creation/restart.
130 */
131 if (SE_DEV(cmd)->se_sub_dev->su_dev_flags &
132 SDF_EMULATED_VPD_UNIT_SERIAL) {
133 buf[3] = 3;
134 buf[5] = 0x80;
135 buf[6] = 0x83;
136 buf[7] = 0x86;
137 }
138
139 return 0;
140}
141
142/* unit serial number */
143static int
144target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
145{
146 struct se_device *dev = SE_DEV(cmd);
147 u16 len = 0;
148
149 buf[1] = 0x80;
150 if (dev->se_sub_dev->su_dev_flags &
151 SDF_EMULATED_VPD_UNIT_SERIAL) {
152 u32 unit_serial_len;
153
154 unit_serial_len =
155 strlen(&DEV_T10_WWN(dev)->unit_serial[0]);
156 unit_serial_len++; /* For NULL Terminator */
157
158 if (((len + 4) + unit_serial_len) > cmd->data_length) {
159 len += unit_serial_len;
160 buf[2] = ((len >> 8) & 0xff);
161 buf[3] = (len & 0xff);
162 return 0;
163 }
164 len += sprintf((unsigned char *)&buf[4], "%s",
165 &DEV_T10_WWN(dev)->unit_serial[0]);
166 len++; /* Extra Byte for NULL Terminator */
167 buf[3] = len;
168 }
169 return 0;
170}
171
172/*
173 * Device identification VPD, for a complete list of
174 * DESIGNATOR TYPEs see spc4r17 Table 459.
175 */
176static int
177target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
178{
179 struct se_device *dev = SE_DEV(cmd);
180 struct se_lun *lun = SE_LUN(cmd);
181 struct se_port *port = NULL;
182 struct se_portal_group *tpg = NULL;
183 struct t10_alua_lu_gp_member *lu_gp_mem;
184 struct t10_alua_tg_pt_gp *tg_pt_gp;
185 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
186 unsigned char binary, binary_new;
187 unsigned char *prod = &DEV_T10_WWN(dev)->model[0];
188 u32 prod_len;
189 u32 unit_serial_len, off = 0;
190 int i;
191 u16 len = 0, id_len;
192
193 buf[1] = 0x83;
194 off = 4;
195
196 /*
197 * NAA IEEE Registered Extended Assigned designator format, see
198 * spc4r17 section 7.7.3.6.5
199 *
200 * We depend upon a target_core_mod/ConfigFS provided
201 * /sys/kernel/config/target/core/$HBA/$DEV/wwn/vpd_unit_serial
202 * value in order to return the NAA id.
203 */
204 if (!(dev->se_sub_dev->su_dev_flags & SDF_EMULATED_VPD_UNIT_SERIAL))
205 goto check_t10_vend_desc;
206
207 if (off + 20 > cmd->data_length)
208 goto check_t10_vend_desc;
209
210 /* CODE SET == Binary */
211 buf[off++] = 0x1;
212
213 /* Set ASSOICATION == addressed logical unit: 0)b */
214 buf[off] = 0x00;
215
216 /* Identifier/Designator type == NAA identifier */
217 buf[off++] = 0x3;
218 off++;
219
220 /* Identifier/Designator length */
221 buf[off++] = 0x10;
222
223 /*
224 * Start NAA IEEE Registered Extended Identifier/Designator
225 */
226 buf[off++] = (0x6 << 4);
227
228 /*
229 * Use OpenFabrics IEEE Company ID: 00 14 05
230 */
231 buf[off++] = 0x01;
232 buf[off++] = 0x40;
233 buf[off] = (0x5 << 4);
234
235 /*
236 * Return ConfigFS Unit Serial Number information for
237 * VENDOR_SPECIFIC_IDENTIFIER and
238 * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION
239 */
240 binary = transport_asciihex_to_binaryhex(
241 &DEV_T10_WWN(dev)->unit_serial[0]);
242 buf[off++] |= (binary & 0xf0) >> 4;
243 for (i = 0; i < 24; i += 2) {
244 binary_new = transport_asciihex_to_binaryhex(
245 &DEV_T10_WWN(dev)->unit_serial[i+2]);
246 buf[off] = (binary & 0x0f) << 4;
247 buf[off++] |= (binary_new & 0xf0) >> 4;
248 binary = binary_new;
249 }
250 len = 20;
251 off = (len + 4);
252
253check_t10_vend_desc:
254 /*
255 * T10 Vendor Identifier Page, see spc4r17 section 7.7.3.4
256 */
257 id_len = 8; /* For Vendor field */
258 prod_len = 4; /* For VPD Header */
259 prod_len += 8; /* For Vendor field */
260 prod_len += strlen(prod);
261 prod_len++; /* For : */
262
263 if (dev->se_sub_dev->su_dev_flags &
264 SDF_EMULATED_VPD_UNIT_SERIAL) {
265 unit_serial_len =
266 strlen(&DEV_T10_WWN(dev)->unit_serial[0]);
267 unit_serial_len++; /* For NULL Terminator */
268
269 if ((len + (id_len + 4) +
270 (prod_len + unit_serial_len)) >
271 cmd->data_length) {
272 len += (prod_len + unit_serial_len);
273 goto check_port;
274 }
275 id_len += sprintf((unsigned char *)&buf[off+12],
276 "%s:%s", prod,
277 &DEV_T10_WWN(dev)->unit_serial[0]);
278 }
279 buf[off] = 0x2; /* ASCII */
280 buf[off+1] = 0x1; /* T10 Vendor ID */
281 buf[off+2] = 0x0;
282 memcpy((unsigned char *)&buf[off+4], "LIO-ORG", 8);
283 /* Extra Byte for NULL Terminator */
284 id_len++;
285 /* Identifier Length */
286 buf[off+3] = id_len;
287 /* Header size for Designation descriptor */
288 len += (id_len + 4);
289 off += (id_len + 4);
290 /*
291 * struct se_port is only set for INQUIRY VPD=1 through $FABRIC_MOD
292 */
293check_port:
294 port = lun->lun_sep;
295 if (port) {
296 struct t10_alua_lu_gp *lu_gp;
297 u32 padding, scsi_name_len;
298 u16 lu_gp_id = 0;
299 u16 tg_pt_gp_id = 0;
300 u16 tpgt;
301
302 tpg = port->sep_tpg;
303 /*
304 * Relative target port identifer, see spc4r17
305 * section 7.7.3.7
306 *
307 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
308 * section 7.5.1 Table 362
309 */
310 if (((len + 4) + 8) > cmd->data_length) {
311 len += 8;
312 goto check_tpgi;
313 }
314 buf[off] =
315 (TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4);
316 buf[off++] |= 0x1; /* CODE SET == Binary */
317 buf[off] = 0x80; /* Set PIV=1 */
318 /* Set ASSOICATION == target port: 01b */
319 buf[off] |= 0x10;
320 /* DESIGNATOR TYPE == Relative target port identifer */
321 buf[off++] |= 0x4;
322 off++; /* Skip over Reserved */
323 buf[off++] = 4; /* DESIGNATOR LENGTH */
324 /* Skip over Obsolete field in RTPI payload
325 * in Table 472 */
326 off += 2;
327 buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
328 buf[off++] = (port->sep_rtpi & 0xff);
329 len += 8; /* Header size + Designation descriptor */
330 /*
331 * Target port group identifier, see spc4r17
332 * section 7.7.3.8
333 *
334 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
335 * section 7.5.1 Table 362
336 */
337check_tpgi:
338 if (T10_ALUA(dev->se_sub_dev)->alua_type !=
339 SPC3_ALUA_EMULATED)
340 goto check_scsi_name;
341
342 if (((len + 4) + 8) > cmd->data_length) {
343 len += 8;
344 goto check_lu_gp;
345 }
346 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
347 if (!tg_pt_gp_mem)
348 goto check_lu_gp;
349
350 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
351 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
352 if (!(tg_pt_gp)) {
353 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
354 goto check_lu_gp;
355 }
356 tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id;
357 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
358
359 buf[off] =
360 (TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4);
361 buf[off++] |= 0x1; /* CODE SET == Binary */
362 buf[off] = 0x80; /* Set PIV=1 */
363 /* Set ASSOICATION == target port: 01b */
364 buf[off] |= 0x10;
365 /* DESIGNATOR TYPE == Target port group identifier */
366 buf[off++] |= 0x5;
367 off++; /* Skip over Reserved */
368 buf[off++] = 4; /* DESIGNATOR LENGTH */
369 off += 2; /* Skip over Reserved Field */
370 buf[off++] = ((tg_pt_gp_id >> 8) & 0xff);
371 buf[off++] = (tg_pt_gp_id & 0xff);
372 len += 8; /* Header size + Designation descriptor */
373 /*
374 * Logical Unit Group identifier, see spc4r17
375 * section 7.7.3.8
376 */
377check_lu_gp:
378 if (((len + 4) + 8) > cmd->data_length) {
379 len += 8;
380 goto check_scsi_name;
381 }
382 lu_gp_mem = dev->dev_alua_lu_gp_mem;
383 if (!(lu_gp_mem))
384 goto check_scsi_name;
385
386 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
387 lu_gp = lu_gp_mem->lu_gp;
388 if (!(lu_gp)) {
389 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
390 goto check_scsi_name;
391 }
392 lu_gp_id = lu_gp->lu_gp_id;
393 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
394
395 buf[off++] |= 0x1; /* CODE SET == Binary */
396 /* DESIGNATOR TYPE == Logical Unit Group identifier */
397 buf[off++] |= 0x6;
398 off++; /* Skip over Reserved */
399 buf[off++] = 4; /* DESIGNATOR LENGTH */
400 off += 2; /* Skip over Reserved Field */
401 buf[off++] = ((lu_gp_id >> 8) & 0xff);
402 buf[off++] = (lu_gp_id & 0xff);
403 len += 8; /* Header size + Designation descriptor */
404 /*
405 * SCSI name string designator, see spc4r17
406 * section 7.7.3.11
407 *
408 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
409 * section 7.5.1 Table 362
410 */
411check_scsi_name:
412 scsi_name_len = strlen(TPG_TFO(tpg)->tpg_get_wwn(tpg));
413 /* UTF-8 ",t,0x<16-bit TPGT>" + NULL Terminator */
414 scsi_name_len += 10;
415 /* Check for 4-byte padding */
416 padding = ((-scsi_name_len) & 3);
417 if (padding != 0)
418 scsi_name_len += padding;
419 /* Header size + Designation descriptor */
420 scsi_name_len += 4;
421
422 if (((len + 4) + scsi_name_len) > cmd->data_length) {
423 len += scsi_name_len;
424 goto set_len;
425 }
426 buf[off] =
427 (TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4);
428 buf[off++] |= 0x3; /* CODE SET == UTF-8 */
429 buf[off] = 0x80; /* Set PIV=1 */
430 /* Set ASSOICATION == target port: 01b */
431 buf[off] |= 0x10;
432 /* DESIGNATOR TYPE == SCSI name string */
433 buf[off++] |= 0x8;
434 off += 2; /* Skip over Reserved and length */
435 /*
436 * SCSI name string identifer containing, $FABRIC_MOD
437 * dependent information. For LIO-Target and iSCSI
438 * Target Port, this means "<iSCSI name>,t,0x<TPGT> in
439 * UTF-8 encoding.
440 */
441 tpgt = TPG_TFO(tpg)->tpg_get_tag(tpg);
442 scsi_name_len = sprintf(&buf[off], "%s,t,0x%04x",
443 TPG_TFO(tpg)->tpg_get_wwn(tpg), tpgt);
444 scsi_name_len += 1 /* Include NULL terminator */;
445 /*
446 * The null-terminated, null-padded (see 4.4.2) SCSI
447 * NAME STRING field contains a UTF-8 format string.
448 * The number of bytes in the SCSI NAME STRING field
449 * (i.e., the value in the DESIGNATOR LENGTH field)
450 * shall be no larger than 256 and shall be a multiple
451 * of four.
452 */
453 if (padding)
454 scsi_name_len += padding;
455
456 buf[off-1] = scsi_name_len;
457 off += scsi_name_len;
458 /* Header size + Designation descriptor */
459 len += (scsi_name_len + 4);
460 }
461set_len:
462 buf[2] = ((len >> 8) & 0xff);
463 buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */
464 return 0;
465}
466
467/* Extended INQUIRY Data VPD Page */
468static int
469target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
470{
471 if (cmd->data_length < 60)
472 return 0;
473
474 buf[1] = 0x86;
475 buf[2] = 0x3c;
476 /* Set HEADSUP, ORDSUP, SIMPSUP */
477 buf[5] = 0x07;
478
479 /* If WriteCache emulation is enabled, set V_SUP */
480 if (DEV_ATTRIB(SE_DEV(cmd))->emulate_write_cache > 0)
481 buf[6] = 0x01;
482 return 0;
483}
484
485/* Block Limits VPD page */
486static int
487target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
488{
489 struct se_device *dev = SE_DEV(cmd);
490 int have_tp = 0;
491
492 /*
493 * Following sbc3r22 section 6.5.3 Block Limits VPD page, when
494 * emulate_tpu=1 or emulate_tpws=1 we will be expect a
495 * different page length for Thin Provisioning.
496 */
497 if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws)
498 have_tp = 1;
499
500 if (cmd->data_length < (0x10 + 4)) {
501 printk(KERN_INFO "Received data_length: %u"
502 " too small for EVPD 0xb0\n",
503 cmd->data_length);
504 return -1;
505 }
506
507 if (have_tp && cmd->data_length < (0x3c + 4)) {
508 printk(KERN_INFO "Received data_length: %u"
509 " too small for TPE=1 EVPD 0xb0\n",
510 cmd->data_length);
511 have_tp = 0;
512 }
513
514 buf[0] = dev->transport->get_device_type(dev);
515 buf[1] = 0xb0;
516 buf[3] = have_tp ? 0x3c : 0x10;
517
518 /*
519 * Set OPTIMAL TRANSFER LENGTH GRANULARITY
520 */
521 put_unaligned_be16(1, &buf[6]);
522
523 /*
524 * Set MAXIMUM TRANSFER LENGTH
525 */
526 put_unaligned_be32(DEV_ATTRIB(dev)->max_sectors, &buf[8]);
527
528 /*
529 * Set OPTIMAL TRANSFER LENGTH
530 */
531 put_unaligned_be32(DEV_ATTRIB(dev)->optimal_sectors, &buf[12]);
532
533 /*
534 * Exit now if we don't support TP or the initiator sent a too
535 * short buffer.
536 */
537 if (!have_tp || cmd->data_length < (0x3c + 4))
538 return 0;
539
540 /*
541 * Set MAXIMUM UNMAP LBA COUNT
542 */
543 put_unaligned_be32(DEV_ATTRIB(dev)->max_unmap_lba_count, &buf[20]);
544
545 /*
546 * Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT
547 */
548 put_unaligned_be32(DEV_ATTRIB(dev)->max_unmap_block_desc_count,
549 &buf[24]);
550
551 /*
552 * Set OPTIMAL UNMAP GRANULARITY
553 */
554 put_unaligned_be32(DEV_ATTRIB(dev)->unmap_granularity, &buf[28]);
555
556 /*
557 * UNMAP GRANULARITY ALIGNMENT
558 */
559 put_unaligned_be32(DEV_ATTRIB(dev)->unmap_granularity_alignment,
560 &buf[32]);
561 if (DEV_ATTRIB(dev)->unmap_granularity_alignment != 0)
562 buf[32] |= 0x80; /* Set the UGAVALID bit */
563
564 return 0;
565}
566
567/* Thin Provisioning VPD */
568static int
569target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
570{
571 struct se_device *dev = SE_DEV(cmd);
572
573 /*
574 * From sbc3r22 section 6.5.4 Thin Provisioning VPD page:
575 *
576 * The PAGE LENGTH field is defined in SPC-4. If the DP bit is set to
577 * zero, then the page length shall be set to 0004h. If the DP bit
578 * is set to one, then the page length shall be set to the value
579 * defined in table 162.
580 */
581 buf[0] = dev->transport->get_device_type(dev);
582 buf[1] = 0xb2;
583
584 /*
585 * Set Hardcoded length mentioned above for DP=0
586 */
587 put_unaligned_be16(0x0004, &buf[2]);
588
589 /*
590 * The THRESHOLD EXPONENT field indicates the threshold set size in
591 * LBAs as a power of 2 (i.e., the threshold set size is equal to
592 * 2(threshold exponent)).
593 *
594 * Note that this is currently set to 0x00 as mkp says it will be
595 * changing again. We can enable this once it has settled in T10
596 * and is actually used by Linux/SCSI ML code.
597 */
598 buf[4] = 0x00;
599
600 /*
601 * A TPU bit set to one indicates that the device server supports
602 * the UNMAP command (see 5.25). A TPU bit set to zero indicates
603 * that the device server does not support the UNMAP command.
604 */
605 if (DEV_ATTRIB(dev)->emulate_tpu != 0)
606 buf[5] = 0x80;
607
608 /*
609 * A TPWS bit set to one indicates that the device server supports
610 * the use of the WRITE SAME (16) command (see 5.42) to unmap LBAs.
611 * A TPWS bit set to zero indicates that the device server does not
612 * support the use of the WRITE SAME (16) command to unmap LBAs.
613 */
614 if (DEV_ATTRIB(dev)->emulate_tpws != 0)
615 buf[5] |= 0x40;
616
617 return 0;
618}
619
620static int
621target_emulate_inquiry(struct se_cmd *cmd)
622{
623 struct se_device *dev = SE_DEV(cmd);
624 unsigned char *buf = cmd->t_task->t_task_buf;
625 unsigned char *cdb = cmd->t_task->t_task_cdb;
626
627 if (!(cdb[1] & 0x1))
628 return target_emulate_inquiry_std(cmd);
629
630 /*
631 * Make sure we at least have 4 bytes of INQUIRY response
632 * payload for 0x00 going back for EVPD=1. Note that 0x80
633 * and 0x83 will check for enough payload data length and
634 * jump to set_len: label when there is not enough inquiry EVPD
635 * payload length left for the next outgoing EVPD metadata
636 */
637 if (cmd->data_length < 4) {
638 printk(KERN_ERR "SCSI Inquiry payload length: %u"
639 " too small for EVPD=1\n", cmd->data_length);
640 return -1;
641 }
642 buf[0] = dev->transport->get_device_type(dev);
643
644 switch (cdb[2]) {
645 case 0x00:
646 return target_emulate_evpd_00(cmd, buf);
647 case 0x80:
648 return target_emulate_evpd_80(cmd, buf);
649 case 0x83:
650 return target_emulate_evpd_83(cmd, buf);
651 case 0x86:
652 return target_emulate_evpd_86(cmd, buf);
653 case 0xb0:
654 return target_emulate_evpd_b0(cmd, buf);
655 case 0xb2:
656 return target_emulate_evpd_b2(cmd, buf);
657 default:
658 printk(KERN_ERR "Unknown VPD Code: 0x%02x\n", cdb[2]);
659 return -1;
660 }
661
662 return 0;
663}
664
665static int
666target_emulate_readcapacity(struct se_cmd *cmd)
667{
668 struct se_device *dev = SE_DEV(cmd);
669 unsigned char *buf = cmd->t_task->t_task_buf;
670 unsigned long long blocks_long = dev->transport->get_blocks(dev);
671 u32 blocks;
672
673 if (blocks_long >= 0x00000000ffffffff)
674 blocks = 0xffffffff;
675 else
676 blocks = (u32)blocks_long;
677
678 buf[0] = (blocks >> 24) & 0xff;
679 buf[1] = (blocks >> 16) & 0xff;
680 buf[2] = (blocks >> 8) & 0xff;
681 buf[3] = blocks & 0xff;
682 buf[4] = (DEV_ATTRIB(dev)->block_size >> 24) & 0xff;
683 buf[5] = (DEV_ATTRIB(dev)->block_size >> 16) & 0xff;
684 buf[6] = (DEV_ATTRIB(dev)->block_size >> 8) & 0xff;
685 buf[7] = DEV_ATTRIB(dev)->block_size & 0xff;
686 /*
687 * Set max 32-bit blocks to signal SERVICE ACTION READ_CAPACITY_16
688 */
689 if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws)
690 put_unaligned_be32(0xFFFFFFFF, &buf[0]);
691
692 return 0;
693}
694
695static int
696target_emulate_readcapacity_16(struct se_cmd *cmd)
697{
698 struct se_device *dev = SE_DEV(cmd);
699 unsigned char *buf = cmd->t_task->t_task_buf;
700 unsigned long long blocks = dev->transport->get_blocks(dev);
701
702 buf[0] = (blocks >> 56) & 0xff;
703 buf[1] = (blocks >> 48) & 0xff;
704 buf[2] = (blocks >> 40) & 0xff;
705 buf[3] = (blocks >> 32) & 0xff;
706 buf[4] = (blocks >> 24) & 0xff;
707 buf[5] = (blocks >> 16) & 0xff;
708 buf[6] = (blocks >> 8) & 0xff;
709 buf[7] = blocks & 0xff;
710 buf[8] = (DEV_ATTRIB(dev)->block_size >> 24) & 0xff;
711 buf[9] = (DEV_ATTRIB(dev)->block_size >> 16) & 0xff;
712 buf[10] = (DEV_ATTRIB(dev)->block_size >> 8) & 0xff;
713 buf[11] = DEV_ATTRIB(dev)->block_size & 0xff;
714 /*
715 * Set Thin Provisioning Enable bit following sbc3r22 in section
716 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
717 */
718 if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws)
719 buf[14] = 0x80;
720
721 return 0;
722}
723
724static int
725target_modesense_rwrecovery(unsigned char *p)
726{
727 p[0] = 0x01;
728 p[1] = 0x0a;
729
730 return 12;
731}
732
733static int
734target_modesense_control(struct se_device *dev, unsigned char *p)
735{
736 p[0] = 0x0a;
737 p[1] = 0x0a;
738 p[2] = 2;
739 /*
740 * From spc4r17, section 7.4.6 Control mode Page
741 *
742 * Unit Attention interlocks control (UN_INTLCK_CTRL) to code 00b
743 *
744 * 00b: The logical unit shall clear any unit attention condition
745 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
746 * status and shall not establish a unit attention condition when a com-
747 * mand is completed with BUSY, TASK SET FULL, or RESERVATION CONFLICT
748 * status.
749 *
750 * 10b: The logical unit shall not clear any unit attention condition
751 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
752 * status and shall not establish a unit attention condition when
753 * a command is completed with BUSY, TASK SET FULL, or RESERVATION
754 * CONFLICT status.
755 *
756 * 11b a The logical unit shall not clear any unit attention condition
757 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
758 * status and shall establish a unit attention condition for the
759 * initiator port associated with the I_T nexus on which the BUSY,
760 * TASK SET FULL, or RESERVATION CONFLICT status is being returned.
761 * Depending on the status, the additional sense code shall be set to
762 * PREVIOUS BUSY STATUS, PREVIOUS TASK SET FULL STATUS, or PREVIOUS
763 * RESERVATION CONFLICT STATUS. Until it is cleared by a REQUEST SENSE
764 * command, a unit attention condition shall be established only once
765 * for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless
766 * to the number of commands completed with one of those status codes.
767 */
768 p[4] = (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl == 2) ? 0x30 :
769 (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00;
770 /*
771 * From spc4r17, section 7.4.6 Control mode Page
772 *
773 * Task Aborted Status (TAS) bit set to zero.
774 *
775 * A task aborted status (TAS) bit set to zero specifies that aborted
776 * tasks shall be terminated by the device server without any response
777 * to the application client. A TAS bit set to one specifies that tasks
778 * aborted by the actions of an I_T nexus other than the I_T nexus on
779 * which the command was received shall be completed with TASK ABORTED
780 * status (see SAM-4).
781 */
782 p[5] = (DEV_ATTRIB(dev)->emulate_tas) ? 0x40 : 0x00;
783 p[8] = 0xff;
784 p[9] = 0xff;
785 p[11] = 30;
786
787 return 12;
788}
789
790static int
791target_modesense_caching(struct se_device *dev, unsigned char *p)
792{
793 p[0] = 0x08;
794 p[1] = 0x12;
795 if (DEV_ATTRIB(dev)->emulate_write_cache > 0)
796 p[2] = 0x04; /* Write Cache Enable */
797 p[12] = 0x20; /* Disabled Read Ahead */
798
799 return 20;
800}
801
802static void
803target_modesense_write_protect(unsigned char *buf, int type)
804{
805 /*
806 * I believe that the WP bit (bit 7) in the mode header is the same for
807 * all device types..
808 */
809 switch (type) {
810 case TYPE_DISK:
811 case TYPE_TAPE:
812 default:
813 buf[0] |= 0x80; /* WP bit */
814 break;
815 }
816}
817
818static void
819target_modesense_dpofua(unsigned char *buf, int type)
820{
821 switch (type) {
822 case TYPE_DISK:
823 buf[0] |= 0x10; /* DPOFUA bit */
824 break;
825 default:
826 break;
827 }
828}
829
830static int
831target_emulate_modesense(struct se_cmd *cmd, int ten)
832{
833 struct se_device *dev = SE_DEV(cmd);
834 char *cdb = cmd->t_task->t_task_cdb;
835 unsigned char *rbuf = cmd->t_task->t_task_buf;
836 int type = dev->transport->get_device_type(dev);
837 int offset = (ten) ? 8 : 4;
838 int length = 0;
839 unsigned char buf[SE_MODE_PAGE_BUF];
840
841 memset(buf, 0, SE_MODE_PAGE_BUF);
842
843 switch (cdb[2] & 0x3f) {
844 case 0x01:
845 length = target_modesense_rwrecovery(&buf[offset]);
846 break;
847 case 0x08:
848 length = target_modesense_caching(dev, &buf[offset]);
849 break;
850 case 0x0a:
851 length = target_modesense_control(dev, &buf[offset]);
852 break;
853 case 0x3f:
854 length = target_modesense_rwrecovery(&buf[offset]);
855 length += target_modesense_caching(dev, &buf[offset+length]);
856 length += target_modesense_control(dev, &buf[offset+length]);
857 break;
858 default:
859 printk(KERN_ERR "Got Unknown Mode Page: 0x%02x\n",
860 cdb[2] & 0x3f);
861 return PYX_TRANSPORT_UNKNOWN_MODE_PAGE;
862 }
863 offset += length;
864
865 if (ten) {
866 offset -= 2;
867 buf[0] = (offset >> 8) & 0xff;
868 buf[1] = offset & 0xff;
869
870 if ((SE_LUN(cmd)->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
871 (cmd->se_deve &&
872 (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
873 target_modesense_write_protect(&buf[3], type);
874
875 if ((DEV_ATTRIB(dev)->emulate_write_cache > 0) &&
876 (DEV_ATTRIB(dev)->emulate_fua_write > 0))
877 target_modesense_dpofua(&buf[3], type);
878
879 if ((offset + 2) > cmd->data_length)
880 offset = cmd->data_length;
881
882 } else {
883 offset -= 1;
884 buf[0] = offset & 0xff;
885
886 if ((SE_LUN(cmd)->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
887 (cmd->se_deve &&
888 (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
889 target_modesense_write_protect(&buf[2], type);
890
891 if ((DEV_ATTRIB(dev)->emulate_write_cache > 0) &&
892 (DEV_ATTRIB(dev)->emulate_fua_write > 0))
893 target_modesense_dpofua(&buf[2], type);
894
895 if ((offset + 1) > cmd->data_length)
896 offset = cmd->data_length;
897 }
898 memcpy(rbuf, buf, offset);
899
900 return 0;
901}
902
903static int
904target_emulate_request_sense(struct se_cmd *cmd)
905{
906 unsigned char *cdb = cmd->t_task->t_task_cdb;
907 unsigned char *buf = cmd->t_task->t_task_buf;
908 u8 ua_asc = 0, ua_ascq = 0;
909
910 if (cdb[1] & 0x01) {
911 printk(KERN_ERR "REQUEST_SENSE description emulation not"
912 " supported\n");
913 return PYX_TRANSPORT_INVALID_CDB_FIELD;
914 }
915 if (!(core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq))) {
916 /*
917 * CURRENT ERROR, UNIT ATTENTION
918 */
919 buf[0] = 0x70;
920 buf[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
921 /*
922 * Make sure request data length is enough for additional
923 * sense data.
924 */
925 if (cmd->data_length <= 18) {
926 buf[7] = 0x00;
927 return 0;
928 }
929 /*
930 * The Additional Sense Code (ASC) from the UNIT ATTENTION
931 */
932 buf[SPC_ASC_KEY_OFFSET] = ua_asc;
933 buf[SPC_ASCQ_KEY_OFFSET] = ua_ascq;
934 buf[7] = 0x0A;
935 } else {
936 /*
937 * CURRENT ERROR, NO SENSE
938 */
939 buf[0] = 0x70;
940 buf[SPC_SENSE_KEY_OFFSET] = NO_SENSE;
941 /*
942 * Make sure request data length is enough for additional
943 * sense data.
944 */
945 if (cmd->data_length <= 18) {
946 buf[7] = 0x00;
947 return 0;
948 }
949 /*
950 * NO ADDITIONAL SENSE INFORMATION
951 */
952 buf[SPC_ASC_KEY_OFFSET] = 0x00;
953 buf[7] = 0x0A;
954 }
955
956 return 0;
957}
958
959/*
960 * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support.
961 * Note this is not used for TCM/pSCSI passthrough
962 */
963static int
964target_emulate_unmap(struct se_task *task)
965{
966 struct se_cmd *cmd = TASK_CMD(task);
967 struct se_device *dev = SE_DEV(cmd);
968 unsigned char *buf = cmd->t_task->t_task_buf, *ptr = NULL;
969 unsigned char *cdb = &cmd->t_task->t_task_cdb[0];
970 sector_t lba;
971 unsigned int size = cmd->data_length, range;
972 int ret, offset;
973 unsigned short dl, bd_dl;
974
975 /* First UNMAP block descriptor starts at 8 byte offset */
976 offset = 8;
977 size -= 8;
978 dl = get_unaligned_be16(&cdb[0]);
979 bd_dl = get_unaligned_be16(&cdb[2]);
980 ptr = &buf[offset];
981 printk(KERN_INFO "UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu"
982 " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
983
984 while (size) {
985 lba = get_unaligned_be64(&ptr[0]);
986 range = get_unaligned_be32(&ptr[8]);
987 printk(KERN_INFO "UNMAP: Using lba: %llu and range: %u\n",
988 (unsigned long long)lba, range);
989
990 ret = dev->transport->do_discard(dev, lba, range);
991 if (ret < 0) {
992 printk(KERN_ERR "blkdev_issue_discard() failed: %d\n",
993 ret);
994 return -1;
995 }
996
997 ptr += 16;
998 size -= 16;
999 }
1000
1001 task->task_scsi_status = GOOD;
1002 transport_complete_task(task, 1);
1003 return 0;
1004}
1005
1006/*
1007 * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support.
1008 * Note this is not used for TCM/pSCSI passthrough
1009 */
1010static int
1011target_emulate_write_same(struct se_task *task)
1012{
1013 struct se_cmd *cmd = TASK_CMD(task);
1014 struct se_device *dev = SE_DEV(cmd);
1015 sector_t lba = cmd->t_task->t_task_lba;
1016 unsigned int range;
1017 int ret;
1018
1019 range = (cmd->data_length / DEV_ATTRIB(dev)->block_size);
1020
1021 printk(KERN_INFO "WRITE_SAME UNMAP: LBA: %llu Range: %u\n",
1022 (unsigned long long)lba, range);
1023
1024 ret = dev->transport->do_discard(dev, lba, range);
1025 if (ret < 0) {
1026 printk(KERN_INFO "blkdev_issue_discard() failed for WRITE_SAME\n");
1027 return -1;
1028 }
1029
1030 task->task_scsi_status = GOOD;
1031 transport_complete_task(task, 1);
1032 return 0;
1033}
1034
1035int
1036transport_emulate_control_cdb(struct se_task *task)
1037{
1038 struct se_cmd *cmd = TASK_CMD(task);
1039 struct se_device *dev = SE_DEV(cmd);
1040 unsigned short service_action;
1041 int ret = 0;
1042
1043 switch (cmd->t_task->t_task_cdb[0]) {
1044 case INQUIRY:
1045 ret = target_emulate_inquiry(cmd);
1046 break;
1047 case READ_CAPACITY:
1048 ret = target_emulate_readcapacity(cmd);
1049 break;
1050 case MODE_SENSE:
1051 ret = target_emulate_modesense(cmd, 0);
1052 break;
1053 case MODE_SENSE_10:
1054 ret = target_emulate_modesense(cmd, 1);
1055 break;
1056 case SERVICE_ACTION_IN:
1057 switch (cmd->t_task->t_task_cdb[1] & 0x1f) {
1058 case SAI_READ_CAPACITY_16:
1059 ret = target_emulate_readcapacity_16(cmd);
1060 break;
1061 default:
1062 printk(KERN_ERR "Unsupported SA: 0x%02x\n",
1063 cmd->t_task->t_task_cdb[1] & 0x1f);
1064 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1065 }
1066 break;
1067 case REQUEST_SENSE:
1068 ret = target_emulate_request_sense(cmd);
1069 break;
1070 case UNMAP:
1071 if (!dev->transport->do_discard) {
1072 printk(KERN_ERR "UNMAP emulation not supported for: %s\n",
1073 dev->transport->name);
1074 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1075 }
1076 ret = target_emulate_unmap(task);
1077 break;
1078 case WRITE_SAME_16:
1079 if (!dev->transport->do_discard) {
1080 printk(KERN_ERR "WRITE_SAME_16 emulation not supported"
1081 " for: %s\n", dev->transport->name);
1082 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1083 }
1084 ret = target_emulate_write_same(task);
1085 break;
1086 case VARIABLE_LENGTH_CMD:
1087 service_action =
1088 get_unaligned_be16(&cmd->t_task->t_task_cdb[8]);
1089 switch (service_action) {
1090 case WRITE_SAME_32:
1091 if (!dev->transport->do_discard) {
1092 printk(KERN_ERR "WRITE_SAME_32 SA emulation not"
1093 " supported for: %s\n",
1094 dev->transport->name);
1095 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1096 }
1097 ret = target_emulate_write_same(task);
1098 break;
1099 default:
1100 printk(KERN_ERR "Unsupported VARIABLE_LENGTH_CMD SA:"
1101 " 0x%02x\n", service_action);
1102 break;
1103 }
1104 break;
1105 case SYNCHRONIZE_CACHE:
1106 case 0x91: /* SYNCHRONIZE_CACHE_16: */
1107 if (!dev->transport->do_sync_cache) {
1108 printk(KERN_ERR
1109 "SYNCHRONIZE_CACHE emulation not supported"
1110 " for: %s\n", dev->transport->name);
1111 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1112 }
1113 dev->transport->do_sync_cache(task);
1114 break;
1115 case ALLOW_MEDIUM_REMOVAL:
1116 case ERASE:
1117 case REZERO_UNIT:
1118 case SEEK_10:
1119 case SPACE:
1120 case START_STOP:
1121 case TEST_UNIT_READY:
1122 case VERIFY:
1123 case WRITE_FILEMARKS:
1124 break;
1125 default:
1126 printk(KERN_ERR "Unsupported SCSI Opcode: 0x%02x for %s\n",
1127 cmd->t_task->t_task_cdb[0], dev->transport->name);
1128 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1129 }
1130
1131 if (ret < 0)
1132 return ret;
1133 task->task_scsi_status = GOOD;
1134 transport_complete_task(task, 1);
1135
1136 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
1137}
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
new file mode 100644
index 000000000000..25c1f49a7d8b
--- /dev/null
+++ b/drivers/target/target_core_configfs.c
@@ -0,0 +1,3323 @@
1/*******************************************************************************
2 * Filename: target_core_configfs.c
3 *
4 * This file contains ConfigFS logic for the Generic Target Engine project.
5 *
6 * Copyright (c) 2008-2011 Rising Tide Systems
7 * Copyright (c) 2008-2011 Linux-iSCSI.org
8 *
9 * Nicholas A. Bellinger <nab@kernel.org>
10 *
11 * based on configfs Copyright (C) 2005 Oracle. All rights reserved.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 ****************************************************************************/
23
24#include <linux/module.h>
25#include <linux/moduleparam.h>
26#include <linux/version.h>
27#include <generated/utsrelease.h>
28#include <linux/utsname.h>
29#include <linux/init.h>
30#include <linux/fs.h>
31#include <linux/namei.h>
32#include <linux/slab.h>
33#include <linux/types.h>
34#include <linux/delay.h>
35#include <linux/unistd.h>
36#include <linux/string.h>
37#include <linux/parser.h>
38#include <linux/syscalls.h>
39#include <linux/configfs.h>
40
41#include <target/target_core_base.h>
42#include <target/target_core_device.h>
43#include <target/target_core_transport.h>
44#include <target/target_core_fabric_ops.h>
45#include <target/target_core_fabric_configfs.h>
46#include <target/target_core_configfs.h>
47#include <target/configfs_macros.h>
48
49#include "target_core_alua.h"
50#include "target_core_hba.h"
51#include "target_core_pr.h"
52#include "target_core_rd.h"
53#include "target_core_stat.h"
54
55static struct list_head g_tf_list;
56static struct mutex g_tf_lock;
57
58struct target_core_configfs_attribute {
59 struct configfs_attribute attr;
60 ssize_t (*show)(void *, char *);
61 ssize_t (*store)(void *, const char *, size_t);
62};
63
64static inline struct se_hba *
65item_to_hba(struct config_item *item)
66{
67 return container_of(to_config_group(item), struct se_hba, hba_group);
68}
69
70/*
71 * Attributes for /sys/kernel/config/target/
72 */
73static ssize_t target_core_attr_show(struct config_item *item,
74 struct configfs_attribute *attr,
75 char *page)
76{
77 return sprintf(page, "Target Engine Core ConfigFS Infrastructure %s"
78 " on %s/%s on "UTS_RELEASE"\n", TARGET_CORE_CONFIGFS_VERSION,
79 utsname()->sysname, utsname()->machine);
80}
81
82static struct configfs_item_operations target_core_fabric_item_ops = {
83 .show_attribute = target_core_attr_show,
84};
85
86static struct configfs_attribute target_core_item_attr_version = {
87 .ca_owner = THIS_MODULE,
88 .ca_name = "version",
89 .ca_mode = S_IRUGO,
90};
91
92static struct target_fabric_configfs *target_core_get_fabric(
93 const char *name)
94{
95 struct target_fabric_configfs *tf;
96
97 if (!(name))
98 return NULL;
99
100 mutex_lock(&g_tf_lock);
101 list_for_each_entry(tf, &g_tf_list, tf_list) {
102 if (!(strcmp(tf->tf_name, name))) {
103 atomic_inc(&tf->tf_access_cnt);
104 mutex_unlock(&g_tf_lock);
105 return tf;
106 }
107 }
108 mutex_unlock(&g_tf_lock);
109
110 return NULL;
111}
112
113/*
114 * Called from struct target_core_group_ops->make_group()
115 */
116static struct config_group *target_core_register_fabric(
117 struct config_group *group,
118 const char *name)
119{
120 struct target_fabric_configfs *tf;
121 int ret;
122
123 printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> group: %p name:"
124 " %s\n", group, name);
125 /*
126 * Ensure that TCM subsystem plugins are loaded at this point for
127 * using the RAMDISK_DR virtual LUN 0 and all other struct se_port
128 * LUN symlinks.
129 */
130 if (transport_subsystem_check_init() < 0)
131 return ERR_PTR(-EINVAL);
132
133 /*
134 * Below are some hardcoded request_module() calls to automatically
135 * local fabric modules when the following is called:
136 *
137 * mkdir -p /sys/kernel/config/target/$MODULE_NAME
138 *
139 * Note that this does not limit which TCM fabric module can be
140 * registered, but simply provids auto loading logic for modules with
141 * mkdir(2) system calls with known TCM fabric modules.
142 */
143 if (!(strncmp(name, "iscsi", 5))) {
144 /*
145 * Automatically load the LIO Target fabric module when the
146 * following is called:
147 *
148 * mkdir -p $CONFIGFS/target/iscsi
149 */
150 ret = request_module("iscsi_target_mod");
151 if (ret < 0) {
152 printk(KERN_ERR "request_module() failed for"
153 " iscsi_target_mod.ko: %d\n", ret);
154 return ERR_PTR(-EINVAL);
155 }
156 } else if (!(strncmp(name, "loopback", 8))) {
157 /*
158 * Automatically load the tcm_loop fabric module when the
159 * following is called:
160 *
161 * mkdir -p $CONFIGFS/target/loopback
162 */
163 ret = request_module("tcm_loop");
164 if (ret < 0) {
165 printk(KERN_ERR "request_module() failed for"
166 " tcm_loop.ko: %d\n", ret);
167 return ERR_PTR(-EINVAL);
168 }
169 }
170
171 tf = target_core_get_fabric(name);
172 if (!(tf)) {
173 printk(KERN_ERR "target_core_get_fabric() failed for %s\n",
174 name);
175 return ERR_PTR(-EINVAL);
176 }
177 printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Located fabric:"
178 " %s\n", tf->tf_name);
179 /*
180 * On a successful target_core_get_fabric() look, the returned
181 * struct target_fabric_configfs *tf will contain a usage reference.
182 */
183 printk(KERN_INFO "Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n",
184 &TF_CIT_TMPL(tf)->tfc_wwn_cit);
185
186 tf->tf_group.default_groups = tf->tf_default_groups;
187 tf->tf_group.default_groups[0] = &tf->tf_disc_group;
188 tf->tf_group.default_groups[1] = NULL;
189
190 config_group_init_type_name(&tf->tf_group, name,
191 &TF_CIT_TMPL(tf)->tfc_wwn_cit);
192 config_group_init_type_name(&tf->tf_disc_group, "discovery_auth",
193 &TF_CIT_TMPL(tf)->tfc_discovery_cit);
194
195 printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Allocated Fabric:"
196 " %s\n", tf->tf_group.cg_item.ci_name);
197 /*
198 * Setup tf_ops.tf_subsys pointer for usage with configfs_depend_item()
199 */
200 tf->tf_ops.tf_subsys = tf->tf_subsys;
201 tf->tf_fabric = &tf->tf_group.cg_item;
202 printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric"
203 " for %s\n", name);
204
205 return &tf->tf_group;
206}
207
208/*
209 * Called from struct target_core_group_ops->drop_item()
210 */
211static void target_core_deregister_fabric(
212 struct config_group *group,
213 struct config_item *item)
214{
215 struct target_fabric_configfs *tf = container_of(
216 to_config_group(item), struct target_fabric_configfs, tf_group);
217 struct config_group *tf_group;
218 struct config_item *df_item;
219 int i;
220
221 printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Looking up %s in"
222 " tf list\n", config_item_name(item));
223
224 printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> located fabric:"
225 " %s\n", tf->tf_name);
226 atomic_dec(&tf->tf_access_cnt);
227
228 printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing"
229 " tf->tf_fabric for %s\n", tf->tf_name);
230 tf->tf_fabric = NULL;
231
232 printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing ci"
233 " %s\n", config_item_name(item));
234
235 tf_group = &tf->tf_group;
236 for (i = 0; tf_group->default_groups[i]; i++) {
237 df_item = &tf_group->default_groups[i]->cg_item;
238 tf_group->default_groups[i] = NULL;
239 config_item_put(df_item);
240 }
241 config_item_put(item);
242}
243
244static struct configfs_group_operations target_core_fabric_group_ops = {
245 .make_group = &target_core_register_fabric,
246 .drop_item = &target_core_deregister_fabric,
247};
248
249/*
250 * All item attributes appearing in /sys/kernel/target/ appear here.
251 */
252static struct configfs_attribute *target_core_fabric_item_attrs[] = {
253 &target_core_item_attr_version,
254 NULL,
255};
256
257/*
258 * Provides Fabrics Groups and Item Attributes for /sys/kernel/config/target/
259 */
260static struct config_item_type target_core_fabrics_item = {
261 .ct_item_ops = &target_core_fabric_item_ops,
262 .ct_group_ops = &target_core_fabric_group_ops,
263 .ct_attrs = target_core_fabric_item_attrs,
264 .ct_owner = THIS_MODULE,
265};
266
267static struct configfs_subsystem target_core_fabrics = {
268 .su_group = {
269 .cg_item = {
270 .ci_namebuf = "target",
271 .ci_type = &target_core_fabrics_item,
272 },
273 },
274};
275
276static struct configfs_subsystem *target_core_subsystem[] = {
277 &target_core_fabrics,
278 NULL,
279};
280
281/*##############################################################################
282// Start functions called by external Target Fabrics Modules
283//############################################################################*/
284
285/*
286 * First function called by fabric modules to:
287 *
288 * 1) Allocate a struct target_fabric_configfs and save the *fabric_cit pointer.
289 * 2) Add struct target_fabric_configfs to g_tf_list
290 * 3) Return struct target_fabric_configfs to fabric module to be passed
291 * into target_fabric_configfs_register().
292 */
293struct target_fabric_configfs *target_fabric_configfs_init(
294 struct module *fabric_mod,
295 const char *name)
296{
297 struct target_fabric_configfs *tf;
298
299 if (!(fabric_mod)) {
300 printk(KERN_ERR "Missing struct module *fabric_mod pointer\n");
301 return NULL;
302 }
303 if (!(name)) {
304 printk(KERN_ERR "Unable to locate passed fabric name\n");
305 return NULL;
306 }
307 if (strlen(name) >= TARGET_FABRIC_NAME_SIZE) {
308 printk(KERN_ERR "Passed name: %s exceeds TARGET_FABRIC"
309 "_NAME_SIZE\n", name);
310 return NULL;
311 }
312
313 tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL);
314 if (!(tf))
315 return NULL;
316
317 INIT_LIST_HEAD(&tf->tf_list);
318 atomic_set(&tf->tf_access_cnt, 0);
319 /*
320 * Setup the default generic struct config_item_type's (cits) in
321 * struct target_fabric_configfs->tf_cit_tmpl
322 */
323 tf->tf_module = fabric_mod;
324 target_fabric_setup_cits(tf);
325
326 tf->tf_subsys = target_core_subsystem[0];
327 snprintf(tf->tf_name, TARGET_FABRIC_NAME_SIZE, "%s", name);
328
329 mutex_lock(&g_tf_lock);
330 list_add_tail(&tf->tf_list, &g_tf_list);
331 mutex_unlock(&g_tf_lock);
332
333 printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>"
334 ">>>>>>>>>>>>>>\n");
335 printk(KERN_INFO "Initialized struct target_fabric_configfs: %p for"
336 " %s\n", tf, tf->tf_name);
337 return tf;
338}
339EXPORT_SYMBOL(target_fabric_configfs_init);
340
341/*
342 * Called by fabric plugins after FAILED target_fabric_configfs_register() call.
343 */
344void target_fabric_configfs_free(
345 struct target_fabric_configfs *tf)
346{
347 mutex_lock(&g_tf_lock);
348 list_del(&tf->tf_list);
349 mutex_unlock(&g_tf_lock);
350
351 kfree(tf);
352}
353EXPORT_SYMBOL(target_fabric_configfs_free);
354
355/*
356 * Perform a sanity check of the passed tf->tf_ops before completing
357 * TCM fabric module registration.
358 */
359static int target_fabric_tf_ops_check(
360 struct target_fabric_configfs *tf)
361{
362 struct target_core_fabric_ops *tfo = &tf->tf_ops;
363
364 if (!(tfo->get_fabric_name)) {
365 printk(KERN_ERR "Missing tfo->get_fabric_name()\n");
366 return -EINVAL;
367 }
368 if (!(tfo->get_fabric_proto_ident)) {
369 printk(KERN_ERR "Missing tfo->get_fabric_proto_ident()\n");
370 return -EINVAL;
371 }
372 if (!(tfo->tpg_get_wwn)) {
373 printk(KERN_ERR "Missing tfo->tpg_get_wwn()\n");
374 return -EINVAL;
375 }
376 if (!(tfo->tpg_get_tag)) {
377 printk(KERN_ERR "Missing tfo->tpg_get_tag()\n");
378 return -EINVAL;
379 }
380 if (!(tfo->tpg_get_default_depth)) {
381 printk(KERN_ERR "Missing tfo->tpg_get_default_depth()\n");
382 return -EINVAL;
383 }
384 if (!(tfo->tpg_get_pr_transport_id)) {
385 printk(KERN_ERR "Missing tfo->tpg_get_pr_transport_id()\n");
386 return -EINVAL;
387 }
388 if (!(tfo->tpg_get_pr_transport_id_len)) {
389 printk(KERN_ERR "Missing tfo->tpg_get_pr_transport_id_len()\n");
390 return -EINVAL;
391 }
392 if (!(tfo->tpg_check_demo_mode)) {
393 printk(KERN_ERR "Missing tfo->tpg_check_demo_mode()\n");
394 return -EINVAL;
395 }
396 if (!(tfo->tpg_check_demo_mode_cache)) {
397 printk(KERN_ERR "Missing tfo->tpg_check_demo_mode_cache()\n");
398 return -EINVAL;
399 }
400 if (!(tfo->tpg_check_demo_mode_write_protect)) {
401 printk(KERN_ERR "Missing tfo->tpg_check_demo_mode_write_protect()\n");
402 return -EINVAL;
403 }
404 if (!(tfo->tpg_check_prod_mode_write_protect)) {
405 printk(KERN_ERR "Missing tfo->tpg_check_prod_mode_write_protect()\n");
406 return -EINVAL;
407 }
408 if (!(tfo->tpg_alloc_fabric_acl)) {
409 printk(KERN_ERR "Missing tfo->tpg_alloc_fabric_acl()\n");
410 return -EINVAL;
411 }
412 if (!(tfo->tpg_release_fabric_acl)) {
413 printk(KERN_ERR "Missing tfo->tpg_release_fabric_acl()\n");
414 return -EINVAL;
415 }
416 if (!(tfo->tpg_get_inst_index)) {
417 printk(KERN_ERR "Missing tfo->tpg_get_inst_index()\n");
418 return -EINVAL;
419 }
420 if (!(tfo->release_cmd_to_pool)) {
421 printk(KERN_ERR "Missing tfo->release_cmd_to_pool()\n");
422 return -EINVAL;
423 }
424 if (!(tfo->release_cmd_direct)) {
425 printk(KERN_ERR "Missing tfo->release_cmd_direct()\n");
426 return -EINVAL;
427 }
428 if (!(tfo->shutdown_session)) {
429 printk(KERN_ERR "Missing tfo->shutdown_session()\n");
430 return -EINVAL;
431 }
432 if (!(tfo->close_session)) {
433 printk(KERN_ERR "Missing tfo->close_session()\n");
434 return -EINVAL;
435 }
436 if (!(tfo->stop_session)) {
437 printk(KERN_ERR "Missing tfo->stop_session()\n");
438 return -EINVAL;
439 }
440 if (!(tfo->fall_back_to_erl0)) {
441 printk(KERN_ERR "Missing tfo->fall_back_to_erl0()\n");
442 return -EINVAL;
443 }
444 if (!(tfo->sess_logged_in)) {
445 printk(KERN_ERR "Missing tfo->sess_logged_in()\n");
446 return -EINVAL;
447 }
448 if (!(tfo->sess_get_index)) {
449 printk(KERN_ERR "Missing tfo->sess_get_index()\n");
450 return -EINVAL;
451 }
452 if (!(tfo->write_pending)) {
453 printk(KERN_ERR "Missing tfo->write_pending()\n");
454 return -EINVAL;
455 }
456 if (!(tfo->write_pending_status)) {
457 printk(KERN_ERR "Missing tfo->write_pending_status()\n");
458 return -EINVAL;
459 }
460 if (!(tfo->set_default_node_attributes)) {
461 printk(KERN_ERR "Missing tfo->set_default_node_attributes()\n");
462 return -EINVAL;
463 }
464 if (!(tfo->get_task_tag)) {
465 printk(KERN_ERR "Missing tfo->get_task_tag()\n");
466 return -EINVAL;
467 }
468 if (!(tfo->get_cmd_state)) {
469 printk(KERN_ERR "Missing tfo->get_cmd_state()\n");
470 return -EINVAL;
471 }
472 if (!(tfo->new_cmd_failure)) {
473 printk(KERN_ERR "Missing tfo->new_cmd_failure()\n");
474 return -EINVAL;
475 }
476 if (!(tfo->queue_data_in)) {
477 printk(KERN_ERR "Missing tfo->queue_data_in()\n");
478 return -EINVAL;
479 }
480 if (!(tfo->queue_status)) {
481 printk(KERN_ERR "Missing tfo->queue_status()\n");
482 return -EINVAL;
483 }
484 if (!(tfo->queue_tm_rsp)) {
485 printk(KERN_ERR "Missing tfo->queue_tm_rsp()\n");
486 return -EINVAL;
487 }
488 if (!(tfo->set_fabric_sense_len)) {
489 printk(KERN_ERR "Missing tfo->set_fabric_sense_len()\n");
490 return -EINVAL;
491 }
492 if (!(tfo->get_fabric_sense_len)) {
493 printk(KERN_ERR "Missing tfo->get_fabric_sense_len()\n");
494 return -EINVAL;
495 }
496 if (!(tfo->is_state_remove)) {
497 printk(KERN_ERR "Missing tfo->is_state_remove()\n");
498 return -EINVAL;
499 }
500 /*
501 * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
502 * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
503 * target_core_fabric_configfs.c WWN+TPG group context code.
504 */
505 if (!(tfo->fabric_make_wwn)) {
506 printk(KERN_ERR "Missing tfo->fabric_make_wwn()\n");
507 return -EINVAL;
508 }
509 if (!(tfo->fabric_drop_wwn)) {
510 printk(KERN_ERR "Missing tfo->fabric_drop_wwn()\n");
511 return -EINVAL;
512 }
513 if (!(tfo->fabric_make_tpg)) {
514 printk(KERN_ERR "Missing tfo->fabric_make_tpg()\n");
515 return -EINVAL;
516 }
517 if (!(tfo->fabric_drop_tpg)) {
518 printk(KERN_ERR "Missing tfo->fabric_drop_tpg()\n");
519 return -EINVAL;
520 }
521
522 return 0;
523}
524
525/*
526 * Called 2nd from fabric module with returned parameter of
527 * struct target_fabric_configfs * from target_fabric_configfs_init().
528 *
529 * Upon a successful registration, the new fabric's struct config_item is
530 * return. Also, a pointer to this struct is set in the passed
531 * struct target_fabric_configfs.
532 */
533int target_fabric_configfs_register(
534 struct target_fabric_configfs *tf)
535{
536 struct config_group *su_group;
537 int ret;
538
539 if (!(tf)) {
540 printk(KERN_ERR "Unable to locate target_fabric_configfs"
541 " pointer\n");
542 return -EINVAL;
543 }
544 if (!(tf->tf_subsys)) {
545 printk(KERN_ERR "Unable to target struct config_subsystem"
546 " pointer\n");
547 return -EINVAL;
548 }
549 su_group = &tf->tf_subsys->su_group;
550 if (!(su_group)) {
551 printk(KERN_ERR "Unable to locate target struct config_group"
552 " pointer\n");
553 return -EINVAL;
554 }
555 ret = target_fabric_tf_ops_check(tf);
556 if (ret < 0)
557 return ret;
558
559 printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>"
560 ">>>>>>>>>>\n");
561 return 0;
562}
563EXPORT_SYMBOL(target_fabric_configfs_register);
564
565void target_fabric_configfs_deregister(
566 struct target_fabric_configfs *tf)
567{
568 struct config_group *su_group;
569 struct configfs_subsystem *su;
570
571 if (!(tf)) {
572 printk(KERN_ERR "Unable to locate passed target_fabric_"
573 "configfs\n");
574 return;
575 }
576 su = tf->tf_subsys;
577 if (!(su)) {
578 printk(KERN_ERR "Unable to locate passed tf->tf_subsys"
579 " pointer\n");
580 return;
581 }
582 su_group = &tf->tf_subsys->su_group;
583 if (!(su_group)) {
584 printk(KERN_ERR "Unable to locate target struct config_group"
585 " pointer\n");
586 return;
587 }
588
589 printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>>>"
590 ">>>>>>>>>>>>\n");
591 mutex_lock(&g_tf_lock);
592 if (atomic_read(&tf->tf_access_cnt)) {
593 mutex_unlock(&g_tf_lock);
594 printk(KERN_ERR "Non zero tf->tf_access_cnt for fabric %s\n",
595 tf->tf_name);
596 BUG();
597 }
598 list_del(&tf->tf_list);
599 mutex_unlock(&g_tf_lock);
600
601 printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing tf:"
602 " %s\n", tf->tf_name);
603 tf->tf_module = NULL;
604 tf->tf_subsys = NULL;
605 kfree(tf);
606
607 printk("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>>>>>>"
608 ">>>>>\n");
609 return;
610}
611EXPORT_SYMBOL(target_fabric_configfs_deregister);
612
613/*##############################################################################
614// Stop functions called by external Target Fabrics Modules
615//############################################################################*/
616
617/* Start functions for struct config_item_type target_core_dev_attrib_cit */
618
619#define DEF_DEV_ATTRIB_SHOW(_name) \
620static ssize_t target_core_dev_show_attr_##_name( \
621 struct se_dev_attrib *da, \
622 char *page) \
623{ \
624 struct se_device *dev; \
625 struct se_subsystem_dev *se_dev = da->da_sub_dev; \
626 ssize_t rb; \
627 \
628 spin_lock(&se_dev->se_dev_lock); \
629 dev = se_dev->se_dev_ptr; \
630 if (!(dev)) { \
631 spin_unlock(&se_dev->se_dev_lock); \
632 return -ENODEV; \
633 } \
634 rb = snprintf(page, PAGE_SIZE, "%u\n", (u32)DEV_ATTRIB(dev)->_name); \
635 spin_unlock(&se_dev->se_dev_lock); \
636 \
637 return rb; \
638}
639
640#define DEF_DEV_ATTRIB_STORE(_name) \
641static ssize_t target_core_dev_store_attr_##_name( \
642 struct se_dev_attrib *da, \
643 const char *page, \
644 size_t count) \
645{ \
646 struct se_device *dev; \
647 struct se_subsystem_dev *se_dev = da->da_sub_dev; \
648 unsigned long val; \
649 int ret; \
650 \
651 spin_lock(&se_dev->se_dev_lock); \
652 dev = se_dev->se_dev_ptr; \
653 if (!(dev)) { \
654 spin_unlock(&se_dev->se_dev_lock); \
655 return -ENODEV; \
656 } \
657 ret = strict_strtoul(page, 0, &val); \
658 if (ret < 0) { \
659 spin_unlock(&se_dev->se_dev_lock); \
660 printk(KERN_ERR "strict_strtoul() failed with" \
661 " ret: %d\n", ret); \
662 return -EINVAL; \
663 } \
664 ret = se_dev_set_##_name(dev, (u32)val); \
665 spin_unlock(&se_dev->se_dev_lock); \
666 \
667 return (!ret) ? count : -EINVAL; \
668}
669
670#define DEF_DEV_ATTRIB(_name) \
671DEF_DEV_ATTRIB_SHOW(_name); \
672DEF_DEV_ATTRIB_STORE(_name);
673
674#define DEF_DEV_ATTRIB_RO(_name) \
675DEF_DEV_ATTRIB_SHOW(_name);
676
677CONFIGFS_EATTR_STRUCT(target_core_dev_attrib, se_dev_attrib);
678#define SE_DEV_ATTR(_name, _mode) \
679static struct target_core_dev_attrib_attribute \
680 target_core_dev_attrib_##_name = \
681 __CONFIGFS_EATTR(_name, _mode, \
682 target_core_dev_show_attr_##_name, \
683 target_core_dev_store_attr_##_name);
684
685#define SE_DEV_ATTR_RO(_name); \
686static struct target_core_dev_attrib_attribute \
687 target_core_dev_attrib_##_name = \
688 __CONFIGFS_EATTR_RO(_name, \
689 target_core_dev_show_attr_##_name);
690
691DEF_DEV_ATTRIB(emulate_dpo);
692SE_DEV_ATTR(emulate_dpo, S_IRUGO | S_IWUSR);
693
694DEF_DEV_ATTRIB(emulate_fua_write);
695SE_DEV_ATTR(emulate_fua_write, S_IRUGO | S_IWUSR);
696
697DEF_DEV_ATTRIB(emulate_fua_read);
698SE_DEV_ATTR(emulate_fua_read, S_IRUGO | S_IWUSR);
699
700DEF_DEV_ATTRIB(emulate_write_cache);
701SE_DEV_ATTR(emulate_write_cache, S_IRUGO | S_IWUSR);
702
703DEF_DEV_ATTRIB(emulate_ua_intlck_ctrl);
704SE_DEV_ATTR(emulate_ua_intlck_ctrl, S_IRUGO | S_IWUSR);
705
706DEF_DEV_ATTRIB(emulate_tas);
707SE_DEV_ATTR(emulate_tas, S_IRUGO | S_IWUSR);
708
709DEF_DEV_ATTRIB(emulate_tpu);
710SE_DEV_ATTR(emulate_tpu, S_IRUGO | S_IWUSR);
711
712DEF_DEV_ATTRIB(emulate_tpws);
713SE_DEV_ATTR(emulate_tpws, S_IRUGO | S_IWUSR);
714
715DEF_DEV_ATTRIB(enforce_pr_isids);
716SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR);
717
718DEF_DEV_ATTRIB_RO(hw_block_size);
719SE_DEV_ATTR_RO(hw_block_size);
720
721DEF_DEV_ATTRIB(block_size);
722SE_DEV_ATTR(block_size, S_IRUGO | S_IWUSR);
723
724DEF_DEV_ATTRIB_RO(hw_max_sectors);
725SE_DEV_ATTR_RO(hw_max_sectors);
726
727DEF_DEV_ATTRIB(max_sectors);
728SE_DEV_ATTR(max_sectors, S_IRUGO | S_IWUSR);
729
730DEF_DEV_ATTRIB(optimal_sectors);
731SE_DEV_ATTR(optimal_sectors, S_IRUGO | S_IWUSR);
732
733DEF_DEV_ATTRIB_RO(hw_queue_depth);
734SE_DEV_ATTR_RO(hw_queue_depth);
735
736DEF_DEV_ATTRIB(queue_depth);
737SE_DEV_ATTR(queue_depth, S_IRUGO | S_IWUSR);
738
739DEF_DEV_ATTRIB(task_timeout);
740SE_DEV_ATTR(task_timeout, S_IRUGO | S_IWUSR);
741
742DEF_DEV_ATTRIB(max_unmap_lba_count);
743SE_DEV_ATTR(max_unmap_lba_count, S_IRUGO | S_IWUSR);
744
745DEF_DEV_ATTRIB(max_unmap_block_desc_count);
746SE_DEV_ATTR(max_unmap_block_desc_count, S_IRUGO | S_IWUSR);
747
748DEF_DEV_ATTRIB(unmap_granularity);
749SE_DEV_ATTR(unmap_granularity, S_IRUGO | S_IWUSR);
750
751DEF_DEV_ATTRIB(unmap_granularity_alignment);
752SE_DEV_ATTR(unmap_granularity_alignment, S_IRUGO | S_IWUSR);
753
754CONFIGFS_EATTR_OPS(target_core_dev_attrib, se_dev_attrib, da_group);
755
756static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
757 &target_core_dev_attrib_emulate_dpo.attr,
758 &target_core_dev_attrib_emulate_fua_write.attr,
759 &target_core_dev_attrib_emulate_fua_read.attr,
760 &target_core_dev_attrib_emulate_write_cache.attr,
761 &target_core_dev_attrib_emulate_ua_intlck_ctrl.attr,
762 &target_core_dev_attrib_emulate_tas.attr,
763 &target_core_dev_attrib_emulate_tpu.attr,
764 &target_core_dev_attrib_emulate_tpws.attr,
765 &target_core_dev_attrib_enforce_pr_isids.attr,
766 &target_core_dev_attrib_hw_block_size.attr,
767 &target_core_dev_attrib_block_size.attr,
768 &target_core_dev_attrib_hw_max_sectors.attr,
769 &target_core_dev_attrib_max_sectors.attr,
770 &target_core_dev_attrib_optimal_sectors.attr,
771 &target_core_dev_attrib_hw_queue_depth.attr,
772 &target_core_dev_attrib_queue_depth.attr,
773 &target_core_dev_attrib_task_timeout.attr,
774 &target_core_dev_attrib_max_unmap_lba_count.attr,
775 &target_core_dev_attrib_max_unmap_block_desc_count.attr,
776 &target_core_dev_attrib_unmap_granularity.attr,
777 &target_core_dev_attrib_unmap_granularity_alignment.attr,
778 NULL,
779};
780
781static struct configfs_item_operations target_core_dev_attrib_ops = {
782 .show_attribute = target_core_dev_attrib_attr_show,
783 .store_attribute = target_core_dev_attrib_attr_store,
784};
785
786static struct config_item_type target_core_dev_attrib_cit = {
787 .ct_item_ops = &target_core_dev_attrib_ops,
788 .ct_attrs = target_core_dev_attrib_attrs,
789 .ct_owner = THIS_MODULE,
790};
791
792/* End functions for struct config_item_type target_core_dev_attrib_cit */
793
794/* Start functions for struct config_item_type target_core_dev_wwn_cit */
795
796CONFIGFS_EATTR_STRUCT(target_core_dev_wwn, t10_wwn);
797#define SE_DEV_WWN_ATTR(_name, _mode) \
798static struct target_core_dev_wwn_attribute target_core_dev_wwn_##_name = \
799 __CONFIGFS_EATTR(_name, _mode, \
800 target_core_dev_wwn_show_attr_##_name, \
801 target_core_dev_wwn_store_attr_##_name);
802
803#define SE_DEV_WWN_ATTR_RO(_name); \
804do { \
805 static struct target_core_dev_wwn_attribute \
806 target_core_dev_wwn_##_name = \
807 __CONFIGFS_EATTR_RO(_name, \
808 target_core_dev_wwn_show_attr_##_name); \
809} while (0);
810
811/*
812 * VPD page 0x80 Unit serial
813 */
814static ssize_t target_core_dev_wwn_show_attr_vpd_unit_serial(
815 struct t10_wwn *t10_wwn,
816 char *page)
817{
818 struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev;
819 struct se_device *dev;
820
821 dev = se_dev->se_dev_ptr;
822 if (!(dev))
823 return -ENODEV;
824
825 return sprintf(page, "T10 VPD Unit Serial Number: %s\n",
826 &t10_wwn->unit_serial[0]);
827}
828
829static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
830 struct t10_wwn *t10_wwn,
831 const char *page,
832 size_t count)
833{
834 struct se_subsystem_dev *su_dev = t10_wwn->t10_sub_dev;
835 struct se_device *dev;
836 unsigned char buf[INQUIRY_VPD_SERIAL_LEN];
837
838 /*
839 * If Linux/SCSI subsystem_api_t plugin got a VPD Unit Serial
840 * from the struct scsi_device level firmware, do not allow
841 * VPD Unit Serial to be emulated.
842 *
843 * Note this struct scsi_device could also be emulating VPD
844 * information from its drivers/scsi LLD. But for now we assume
845 * it is doing 'the right thing' wrt a world wide unique
846 * VPD Unit Serial Number that OS dependent multipath can depend on.
847 */
848 if (su_dev->su_dev_flags & SDF_FIRMWARE_VPD_UNIT_SERIAL) {
849 printk(KERN_ERR "Underlying SCSI device firmware provided VPD"
850 " Unit Serial, ignoring request\n");
851 return -EOPNOTSUPP;
852 }
853
854 if (strlen(page) >= INQUIRY_VPD_SERIAL_LEN) {
855 printk(KERN_ERR "Emulated VPD Unit Serial exceeds"
856 " INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN);
857 return -EOVERFLOW;
858 }
859 /*
860 * Check to see if any active $FABRIC_MOD exports exist. If they
861 * do exist, fail here as changing this information on the fly
862 * (underneath the initiator side OS dependent multipath code)
863 * could cause negative effects.
864 */
865 dev = su_dev->se_dev_ptr;
866 if ((dev)) {
867 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
868 printk(KERN_ERR "Unable to set VPD Unit Serial while"
869 " active %d $FABRIC_MOD exports exist\n",
870 atomic_read(&dev->dev_export_obj.obj_access_count));
871 return -EINVAL;
872 }
873 }
874 /*
875 * This currently assumes ASCII encoding for emulated VPD Unit Serial.
876 *
877 * Also, strip any newline added from the userspace
878 * echo $UUID > $TARGET/$HBA/$STORAGE_OBJECT/wwn/vpd_unit_serial
879 */
880 memset(buf, 0, INQUIRY_VPD_SERIAL_LEN);
881 snprintf(buf, INQUIRY_VPD_SERIAL_LEN, "%s", page);
882 snprintf(su_dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN,
883 "%s", strstrip(buf));
884 su_dev->su_dev_flags |= SDF_EMULATED_VPD_UNIT_SERIAL;
885
886 printk(KERN_INFO "Target_Core_ConfigFS: Set emulated VPD Unit Serial:"
887 " %s\n", su_dev->t10_wwn.unit_serial);
888
889 return count;
890}
891
892SE_DEV_WWN_ATTR(vpd_unit_serial, S_IRUGO | S_IWUSR);
893
894/*
895 * VPD page 0x83 Protocol Identifier
896 */
897static ssize_t target_core_dev_wwn_show_attr_vpd_protocol_identifier(
898 struct t10_wwn *t10_wwn,
899 char *page)
900{
901 struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev;
902 struct se_device *dev;
903 struct t10_vpd *vpd;
904 unsigned char buf[VPD_TMP_BUF_SIZE];
905 ssize_t len = 0;
906
907 dev = se_dev->se_dev_ptr;
908 if (!(dev))
909 return -ENODEV;
910
911 memset(buf, 0, VPD_TMP_BUF_SIZE);
912
913 spin_lock(&t10_wwn->t10_vpd_lock);
914 list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) {
915 if (!(vpd->protocol_identifier_set))
916 continue;
917
918 transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE);
919
920 if ((len + strlen(buf) >= PAGE_SIZE))
921 break;
922
923 len += sprintf(page+len, "%s", buf);
924 }
925 spin_unlock(&t10_wwn->t10_vpd_lock);
926
927 return len;
928}
929
930static ssize_t target_core_dev_wwn_store_attr_vpd_protocol_identifier(
931 struct t10_wwn *t10_wwn,
932 const char *page,
933 size_t count)
934{
935 return -ENOSYS;
936}
937
938SE_DEV_WWN_ATTR(vpd_protocol_identifier, S_IRUGO | S_IWUSR);
939
940/*
941 * Generic wrapper for dumping VPD identifiers by association.
942 */
943#define DEF_DEV_WWN_ASSOC_SHOW(_name, _assoc) \
944static ssize_t target_core_dev_wwn_show_attr_##_name( \
945 struct t10_wwn *t10_wwn, \
946 char *page) \
947{ \
948 struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev; \
949 struct se_device *dev; \
950 struct t10_vpd *vpd; \
951 unsigned char buf[VPD_TMP_BUF_SIZE]; \
952 ssize_t len = 0; \
953 \
954 dev = se_dev->se_dev_ptr; \
955 if (!(dev)) \
956 return -ENODEV; \
957 \
958 spin_lock(&t10_wwn->t10_vpd_lock); \
959 list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) { \
960 if (vpd->association != _assoc) \
961 continue; \
962 \
963 memset(buf, 0, VPD_TMP_BUF_SIZE); \
964 transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE); \
965 if ((len + strlen(buf) >= PAGE_SIZE)) \
966 break; \
967 len += sprintf(page+len, "%s", buf); \
968 \
969 memset(buf, 0, VPD_TMP_BUF_SIZE); \
970 transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \
971 if ((len + strlen(buf) >= PAGE_SIZE)) \
972 break; \
973 len += sprintf(page+len, "%s", buf); \
974 \
975 memset(buf, 0, VPD_TMP_BUF_SIZE); \
976 transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \
977 if ((len + strlen(buf) >= PAGE_SIZE)) \
978 break; \
979 len += sprintf(page+len, "%s", buf); \
980 } \
981 spin_unlock(&t10_wwn->t10_vpd_lock); \
982 \
983 return len; \
984}
985
986/*
987 * VPD page 0x83 Assoication: Logical Unit
988 */
989DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_logical_unit, 0x00);
990
991static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_logical_unit(
992 struct t10_wwn *t10_wwn,
993 const char *page,
994 size_t count)
995{
996 return -ENOSYS;
997}
998
999SE_DEV_WWN_ATTR(vpd_assoc_logical_unit, S_IRUGO | S_IWUSR);
1000
1001/*
1002 * VPD page 0x83 Association: Target Port
1003 */
1004DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_target_port, 0x10);
1005
1006static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_target_port(
1007 struct t10_wwn *t10_wwn,
1008 const char *page,
1009 size_t count)
1010{
1011 return -ENOSYS;
1012}
1013
1014SE_DEV_WWN_ATTR(vpd_assoc_target_port, S_IRUGO | S_IWUSR);
1015
1016/*
1017 * VPD page 0x83 Association: SCSI Target Device
1018 */
1019DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_scsi_target_device, 0x20);
1020
1021static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_scsi_target_device(
1022 struct t10_wwn *t10_wwn,
1023 const char *page,
1024 size_t count)
1025{
1026 return -ENOSYS;
1027}
1028
1029SE_DEV_WWN_ATTR(vpd_assoc_scsi_target_device, S_IRUGO | S_IWUSR);
1030
1031CONFIGFS_EATTR_OPS(target_core_dev_wwn, t10_wwn, t10_wwn_group);
1032
1033static struct configfs_attribute *target_core_dev_wwn_attrs[] = {
1034 &target_core_dev_wwn_vpd_unit_serial.attr,
1035 &target_core_dev_wwn_vpd_protocol_identifier.attr,
1036 &target_core_dev_wwn_vpd_assoc_logical_unit.attr,
1037 &target_core_dev_wwn_vpd_assoc_target_port.attr,
1038 &target_core_dev_wwn_vpd_assoc_scsi_target_device.attr,
1039 NULL,
1040};
1041
1042static struct configfs_item_operations target_core_dev_wwn_ops = {
1043 .show_attribute = target_core_dev_wwn_attr_show,
1044 .store_attribute = target_core_dev_wwn_attr_store,
1045};
1046
1047static struct config_item_type target_core_dev_wwn_cit = {
1048 .ct_item_ops = &target_core_dev_wwn_ops,
1049 .ct_attrs = target_core_dev_wwn_attrs,
1050 .ct_owner = THIS_MODULE,
1051};
1052
1053/* End functions for struct config_item_type target_core_dev_wwn_cit */
1054
1055/* Start functions for struct config_item_type target_core_dev_pr_cit */
1056
1057CONFIGFS_EATTR_STRUCT(target_core_dev_pr, se_subsystem_dev);
1058#define SE_DEV_PR_ATTR(_name, _mode) \
1059static struct target_core_dev_pr_attribute target_core_dev_pr_##_name = \
1060 __CONFIGFS_EATTR(_name, _mode, \
1061 target_core_dev_pr_show_attr_##_name, \
1062 target_core_dev_pr_store_attr_##_name);
1063
1064#define SE_DEV_PR_ATTR_RO(_name); \
1065static struct target_core_dev_pr_attribute target_core_dev_pr_##_name = \
1066 __CONFIGFS_EATTR_RO(_name, \
1067 target_core_dev_pr_show_attr_##_name);
1068
1069/*
1070 * res_holder
1071 */
1072static ssize_t target_core_dev_pr_show_spc3_res(
1073 struct se_device *dev,
1074 char *page,
1075 ssize_t *len)
1076{
1077 struct se_node_acl *se_nacl;
1078 struct t10_pr_registration *pr_reg;
1079 char i_buf[PR_REG_ISID_ID_LEN];
1080 int prf_isid;
1081
1082 memset(i_buf, 0, PR_REG_ISID_ID_LEN);
1083
1084 spin_lock(&dev->dev_reservation_lock);
1085 pr_reg = dev->dev_pr_res_holder;
1086 if (!(pr_reg)) {
1087 *len += sprintf(page + *len, "No SPC-3 Reservation holder\n");
1088 spin_unlock(&dev->dev_reservation_lock);
1089 return *len;
1090 }
1091 se_nacl = pr_reg->pr_reg_nacl;
1092 prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
1093 PR_REG_ISID_ID_LEN);
1094
1095 *len += sprintf(page + *len, "SPC-3 Reservation: %s Initiator: %s%s\n",
1096 TPG_TFO(se_nacl->se_tpg)->get_fabric_name(),
1097 se_nacl->initiatorname, (prf_isid) ? &i_buf[0] : "");
1098 spin_unlock(&dev->dev_reservation_lock);
1099
1100 return *len;
1101}
1102
1103static ssize_t target_core_dev_pr_show_spc2_res(
1104 struct se_device *dev,
1105 char *page,
1106 ssize_t *len)
1107{
1108 struct se_node_acl *se_nacl;
1109
1110 spin_lock(&dev->dev_reservation_lock);
1111 se_nacl = dev->dev_reserved_node_acl;
1112 if (!(se_nacl)) {
1113 *len += sprintf(page + *len, "No SPC-2 Reservation holder\n");
1114 spin_unlock(&dev->dev_reservation_lock);
1115 return *len;
1116 }
1117 *len += sprintf(page + *len, "SPC-2 Reservation: %s Initiator: %s\n",
1118 TPG_TFO(se_nacl->se_tpg)->get_fabric_name(),
1119 se_nacl->initiatorname);
1120 spin_unlock(&dev->dev_reservation_lock);
1121
1122 return *len;
1123}
1124
1125static ssize_t target_core_dev_pr_show_attr_res_holder(
1126 struct se_subsystem_dev *su_dev,
1127 char *page)
1128{
1129 ssize_t len = 0;
1130
1131 if (!(su_dev->se_dev_ptr))
1132 return -ENODEV;
1133
1134 switch (T10_RES(su_dev)->res_type) {
1135 case SPC3_PERSISTENT_RESERVATIONS:
1136 target_core_dev_pr_show_spc3_res(su_dev->se_dev_ptr,
1137 page, &len);
1138 break;
1139 case SPC2_RESERVATIONS:
1140 target_core_dev_pr_show_spc2_res(su_dev->se_dev_ptr,
1141 page, &len);
1142 break;
1143 case SPC_PASSTHROUGH:
1144 len += sprintf(page+len, "Passthrough\n");
1145 break;
1146 default:
1147 len += sprintf(page+len, "Unknown\n");
1148 break;
1149 }
1150
1151 return len;
1152}
1153
1154SE_DEV_PR_ATTR_RO(res_holder);
1155
1156/*
1157 * res_pr_all_tgt_pts
1158 */
1159static ssize_t target_core_dev_pr_show_attr_res_pr_all_tgt_pts(
1160 struct se_subsystem_dev *su_dev,
1161 char *page)
1162{
1163 struct se_device *dev;
1164 struct t10_pr_registration *pr_reg;
1165 ssize_t len = 0;
1166
1167 dev = su_dev->se_dev_ptr;
1168 if (!(dev))
1169 return -ENODEV;
1170
1171 if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
1172 return len;
1173
1174 spin_lock(&dev->dev_reservation_lock);
1175 pr_reg = dev->dev_pr_res_holder;
1176 if (!(pr_reg)) {
1177 len = sprintf(page, "No SPC-3 Reservation holder\n");
1178 spin_unlock(&dev->dev_reservation_lock);
1179 return len;
1180 }
1181 /*
1182 * See All Target Ports (ALL_TG_PT) bit in spcr17, section 6.14.3
1183 * Basic PERSISTENT RESERVER OUT parameter list, page 290
1184 */
1185 if (pr_reg->pr_reg_all_tg_pt)
1186 len = sprintf(page, "SPC-3 Reservation: All Target"
1187 " Ports registration\n");
1188 else
1189 len = sprintf(page, "SPC-3 Reservation: Single"
1190 " Target Port registration\n");
1191 spin_unlock(&dev->dev_reservation_lock);
1192
1193 return len;
1194}
1195
1196SE_DEV_PR_ATTR_RO(res_pr_all_tgt_pts);
1197
1198/*
1199 * res_pr_generation
1200 */
1201static ssize_t target_core_dev_pr_show_attr_res_pr_generation(
1202 struct se_subsystem_dev *su_dev,
1203 char *page)
1204{
1205 if (!(su_dev->se_dev_ptr))
1206 return -ENODEV;
1207
1208 if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
1209 return 0;
1210
1211 return sprintf(page, "0x%08x\n", T10_RES(su_dev)->pr_generation);
1212}
1213
1214SE_DEV_PR_ATTR_RO(res_pr_generation);
1215
1216/*
1217 * res_pr_holder_tg_port
1218 */
1219static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
1220 struct se_subsystem_dev *su_dev,
1221 char *page)
1222{
1223 struct se_device *dev;
1224 struct se_node_acl *se_nacl;
1225 struct se_lun *lun;
1226 struct se_portal_group *se_tpg;
1227 struct t10_pr_registration *pr_reg;
1228 struct target_core_fabric_ops *tfo;
1229 ssize_t len = 0;
1230
1231 dev = su_dev->se_dev_ptr;
1232 if (!(dev))
1233 return -ENODEV;
1234
1235 if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
1236 return len;
1237
1238 spin_lock(&dev->dev_reservation_lock);
1239 pr_reg = dev->dev_pr_res_holder;
1240 if (!(pr_reg)) {
1241 len = sprintf(page, "No SPC-3 Reservation holder\n");
1242 spin_unlock(&dev->dev_reservation_lock);
1243 return len;
1244 }
1245 se_nacl = pr_reg->pr_reg_nacl;
1246 se_tpg = se_nacl->se_tpg;
1247 lun = pr_reg->pr_reg_tg_pt_lun;
1248 tfo = TPG_TFO(se_tpg);
1249
1250 len += sprintf(page+len, "SPC-3 Reservation: %s"
1251 " Target Node Endpoint: %s\n", tfo->get_fabric_name(),
1252 tfo->tpg_get_wwn(se_tpg));
1253 len += sprintf(page+len, "SPC-3 Reservation: Relative Port"
1254 " Identifer Tag: %hu %s Portal Group Tag: %hu"
1255 " %s Logical Unit: %u\n", lun->lun_sep->sep_rtpi,
1256 tfo->get_fabric_name(), tfo->tpg_get_tag(se_tpg),
1257 tfo->get_fabric_name(), lun->unpacked_lun);
1258 spin_unlock(&dev->dev_reservation_lock);
1259
1260 return len;
1261}
1262
1263SE_DEV_PR_ATTR_RO(res_pr_holder_tg_port);
1264
1265/*
1266 * res_pr_registered_i_pts
1267 */
1268static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
1269 struct se_subsystem_dev *su_dev,
1270 char *page)
1271{
1272 struct target_core_fabric_ops *tfo;
1273 struct t10_pr_registration *pr_reg;
1274 unsigned char buf[384];
1275 char i_buf[PR_REG_ISID_ID_LEN];
1276 ssize_t len = 0;
1277 int reg_count = 0, prf_isid;
1278
1279 if (!(su_dev->se_dev_ptr))
1280 return -ENODEV;
1281
1282 if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
1283 return len;
1284
1285 len += sprintf(page+len, "SPC-3 PR Registrations:\n");
1286
1287 spin_lock(&T10_RES(su_dev)->registration_lock);
1288 list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list,
1289 pr_reg_list) {
1290
1291 memset(buf, 0, 384);
1292 memset(i_buf, 0, PR_REG_ISID_ID_LEN);
1293 tfo = pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
1294 prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
1295 PR_REG_ISID_ID_LEN);
1296 sprintf(buf, "%s Node: %s%s Key: 0x%016Lx PRgen: 0x%08x\n",
1297 tfo->get_fabric_name(),
1298 pr_reg->pr_reg_nacl->initiatorname, (prf_isid) ?
1299 &i_buf[0] : "", pr_reg->pr_res_key,
1300 pr_reg->pr_res_generation);
1301
1302 if ((len + strlen(buf) >= PAGE_SIZE))
1303 break;
1304
1305 len += sprintf(page+len, "%s", buf);
1306 reg_count++;
1307 }
1308 spin_unlock(&T10_RES(su_dev)->registration_lock);
1309
1310 if (!(reg_count))
1311 len += sprintf(page+len, "None\n");
1312
1313 return len;
1314}
1315
1316SE_DEV_PR_ATTR_RO(res_pr_registered_i_pts);
1317
1318/*
1319 * res_pr_type
1320 */
1321static ssize_t target_core_dev_pr_show_attr_res_pr_type(
1322 struct se_subsystem_dev *su_dev,
1323 char *page)
1324{
1325 struct se_device *dev;
1326 struct t10_pr_registration *pr_reg;
1327 ssize_t len = 0;
1328
1329 dev = su_dev->se_dev_ptr;
1330 if (!(dev))
1331 return -ENODEV;
1332
1333 if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
1334 return len;
1335
1336 spin_lock(&dev->dev_reservation_lock);
1337 pr_reg = dev->dev_pr_res_holder;
1338 if (!(pr_reg)) {
1339 len = sprintf(page, "No SPC-3 Reservation holder\n");
1340 spin_unlock(&dev->dev_reservation_lock);
1341 return len;
1342 }
1343 len = sprintf(page, "SPC-3 Reservation Type: %s\n",
1344 core_scsi3_pr_dump_type(pr_reg->pr_res_type));
1345 spin_unlock(&dev->dev_reservation_lock);
1346
1347 return len;
1348}
1349
1350SE_DEV_PR_ATTR_RO(res_pr_type);
1351
1352/*
1353 * res_type
1354 */
1355static ssize_t target_core_dev_pr_show_attr_res_type(
1356 struct se_subsystem_dev *su_dev,
1357 char *page)
1358{
1359 ssize_t len = 0;
1360
1361 if (!(su_dev->se_dev_ptr))
1362 return -ENODEV;
1363
1364 switch (T10_RES(su_dev)->res_type) {
1365 case SPC3_PERSISTENT_RESERVATIONS:
1366 len = sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n");
1367 break;
1368 case SPC2_RESERVATIONS:
1369 len = sprintf(page, "SPC2_RESERVATIONS\n");
1370 break;
1371 case SPC_PASSTHROUGH:
1372 len = sprintf(page, "SPC_PASSTHROUGH\n");
1373 break;
1374 default:
1375 len = sprintf(page, "UNKNOWN\n");
1376 break;
1377 }
1378
1379 return len;
1380}
1381
1382SE_DEV_PR_ATTR_RO(res_type);
1383
1384/*
1385 * res_aptpl_active
1386 */
1387
1388static ssize_t target_core_dev_pr_show_attr_res_aptpl_active(
1389 struct se_subsystem_dev *su_dev,
1390 char *page)
1391{
1392 if (!(su_dev->se_dev_ptr))
1393 return -ENODEV;
1394
1395 if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
1396 return 0;
1397
1398 return sprintf(page, "APTPL Bit Status: %s\n",
1399 (T10_RES(su_dev)->pr_aptpl_active) ? "Activated" : "Disabled");
1400}
1401
1402SE_DEV_PR_ATTR_RO(res_aptpl_active);
1403
1404/*
1405 * res_aptpl_metadata
1406 */
1407static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata(
1408 struct se_subsystem_dev *su_dev,
1409 char *page)
1410{
1411 if (!(su_dev->se_dev_ptr))
1412 return -ENODEV;
1413
1414 if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
1415 return 0;
1416
1417 return sprintf(page, "Ready to process PR APTPL metadata..\n");
1418}
1419
1420enum {
1421 Opt_initiator_fabric, Opt_initiator_node, Opt_initiator_sid,
1422 Opt_sa_res_key, Opt_res_holder, Opt_res_type, Opt_res_scope,
1423 Opt_res_all_tg_pt, Opt_mapped_lun, Opt_target_fabric,
1424 Opt_target_node, Opt_tpgt, Opt_port_rtpi, Opt_target_lun, Opt_err
1425};
1426
1427static match_table_t tokens = {
1428 {Opt_initiator_fabric, "initiator_fabric=%s"},
1429 {Opt_initiator_node, "initiator_node=%s"},
1430 {Opt_initiator_sid, "initiator_sid=%s"},
1431 {Opt_sa_res_key, "sa_res_key=%s"},
1432 {Opt_res_holder, "res_holder=%d"},
1433 {Opt_res_type, "res_type=%d"},
1434 {Opt_res_scope, "res_scope=%d"},
1435 {Opt_res_all_tg_pt, "res_all_tg_pt=%d"},
1436 {Opt_mapped_lun, "mapped_lun=%d"},
1437 {Opt_target_fabric, "target_fabric=%s"},
1438 {Opt_target_node, "target_node=%s"},
1439 {Opt_tpgt, "tpgt=%d"},
1440 {Opt_port_rtpi, "port_rtpi=%d"},
1441 {Opt_target_lun, "target_lun=%d"},
1442 {Opt_err, NULL}
1443};
1444
1445static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1446 struct se_subsystem_dev *su_dev,
1447 const char *page,
1448 size_t count)
1449{
1450 struct se_device *dev;
1451 unsigned char *i_fabric = NULL, *i_port = NULL, *isid = NULL;
1452 unsigned char *t_fabric = NULL, *t_port = NULL;
1453 char *orig, *ptr, *arg_p, *opts;
1454 substring_t args[MAX_OPT_ARGS];
1455 unsigned long long tmp_ll;
1456 u64 sa_res_key = 0;
1457 u32 mapped_lun = 0, target_lun = 0;
1458 int ret = -1, res_holder = 0, all_tg_pt = 0, arg, token;
1459 u16 port_rpti = 0, tpgt = 0;
1460 u8 type = 0, scope;
1461
1462 dev = su_dev->se_dev_ptr;
1463 if (!(dev))
1464 return -ENODEV;
1465
1466 if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
1467 return 0;
1468
1469 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1470 printk(KERN_INFO "Unable to process APTPL metadata while"
1471 " active fabric exports exist\n");
1472 return -EINVAL;
1473 }
1474
1475 opts = kstrdup(page, GFP_KERNEL);
1476 if (!opts)
1477 return -ENOMEM;
1478
1479 orig = opts;
1480 while ((ptr = strsep(&opts, ",")) != NULL) {
1481 if (!*ptr)
1482 continue;
1483
1484 token = match_token(ptr, tokens, args);
1485 switch (token) {
1486 case Opt_initiator_fabric:
1487 i_fabric = match_strdup(&args[0]);
1488 if (!i_fabric) {
1489 ret = -ENOMEM;
1490 goto out;
1491 }
1492 break;
1493 case Opt_initiator_node:
1494 i_port = match_strdup(&args[0]);
1495 if (!i_port) {
1496 ret = -ENOMEM;
1497 goto out;
1498 }
1499 if (strlen(i_port) >= PR_APTPL_MAX_IPORT_LEN) {
1500 printk(KERN_ERR "APTPL metadata initiator_node="
1501 " exceeds PR_APTPL_MAX_IPORT_LEN: %d\n",
1502 PR_APTPL_MAX_IPORT_LEN);
1503 ret = -EINVAL;
1504 break;
1505 }
1506 break;
1507 case Opt_initiator_sid:
1508 isid = match_strdup(&args[0]);
1509 if (!isid) {
1510 ret = -ENOMEM;
1511 goto out;
1512 }
1513 if (strlen(isid) >= PR_REG_ISID_LEN) {
1514 printk(KERN_ERR "APTPL metadata initiator_isid"
1515 "= exceeds PR_REG_ISID_LEN: %d\n",
1516 PR_REG_ISID_LEN);
1517 ret = -EINVAL;
1518 break;
1519 }
1520 break;
1521 case Opt_sa_res_key:
1522 arg_p = match_strdup(&args[0]);
1523 if (!arg_p) {
1524 ret = -ENOMEM;
1525 goto out;
1526 }
1527 ret = strict_strtoull(arg_p, 0, &tmp_ll);
1528 if (ret < 0) {
1529 printk(KERN_ERR "strict_strtoull() failed for"
1530 " sa_res_key=\n");
1531 goto out;
1532 }
1533 sa_res_key = (u64)tmp_ll;
1534 break;
1535 /*
1536 * PR APTPL Metadata for Reservation
1537 */
1538 case Opt_res_holder:
1539 match_int(args, &arg);
1540 res_holder = arg;
1541 break;
1542 case Opt_res_type:
1543 match_int(args, &arg);
1544 type = (u8)arg;
1545 break;
1546 case Opt_res_scope:
1547 match_int(args, &arg);
1548 scope = (u8)arg;
1549 break;
1550 case Opt_res_all_tg_pt:
1551 match_int(args, &arg);
1552 all_tg_pt = (int)arg;
1553 break;
1554 case Opt_mapped_lun:
1555 match_int(args, &arg);
1556 mapped_lun = (u32)arg;
1557 break;
1558 /*
1559 * PR APTPL Metadata for Target Port
1560 */
1561 case Opt_target_fabric:
1562 t_fabric = match_strdup(&args[0]);
1563 if (!t_fabric) {
1564 ret = -ENOMEM;
1565 goto out;
1566 }
1567 break;
1568 case Opt_target_node:
1569 t_port = match_strdup(&args[0]);
1570 if (!t_port) {
1571 ret = -ENOMEM;
1572 goto out;
1573 }
1574 if (strlen(t_port) >= PR_APTPL_MAX_TPORT_LEN) {
1575 printk(KERN_ERR "APTPL metadata target_node="
1576 " exceeds PR_APTPL_MAX_TPORT_LEN: %d\n",
1577 PR_APTPL_MAX_TPORT_LEN);
1578 ret = -EINVAL;
1579 break;
1580 }
1581 break;
1582 case Opt_tpgt:
1583 match_int(args, &arg);
1584 tpgt = (u16)arg;
1585 break;
1586 case Opt_port_rtpi:
1587 match_int(args, &arg);
1588 port_rpti = (u16)arg;
1589 break;
1590 case Opt_target_lun:
1591 match_int(args, &arg);
1592 target_lun = (u32)arg;
1593 break;
1594 default:
1595 break;
1596 }
1597 }
1598
1599 if (!(i_port) || !(t_port) || !(sa_res_key)) {
1600 printk(KERN_ERR "Illegal parameters for APTPL registration\n");
1601 ret = -EINVAL;
1602 goto out;
1603 }
1604
1605 if (res_holder && !(type)) {
1606 printk(KERN_ERR "Illegal PR type: 0x%02x for reservation"
1607 " holder\n", type);
1608 ret = -EINVAL;
1609 goto out;
1610 }
1611
1612 ret = core_scsi3_alloc_aptpl_registration(T10_RES(su_dev), sa_res_key,
1613 i_port, isid, mapped_lun, t_port, tpgt, target_lun,
1614 res_holder, all_tg_pt, type);
1615out:
1616 kfree(i_fabric);
1617 kfree(i_port);
1618 kfree(isid);
1619 kfree(t_fabric);
1620 kfree(t_port);
1621 kfree(orig);
1622 return (ret == 0) ? count : ret;
1623}
1624
1625SE_DEV_PR_ATTR(res_aptpl_metadata, S_IRUGO | S_IWUSR);
1626
1627CONFIGFS_EATTR_OPS(target_core_dev_pr, se_subsystem_dev, se_dev_pr_group);
1628
1629static struct configfs_attribute *target_core_dev_pr_attrs[] = {
1630 &target_core_dev_pr_res_holder.attr,
1631 &target_core_dev_pr_res_pr_all_tgt_pts.attr,
1632 &target_core_dev_pr_res_pr_generation.attr,
1633 &target_core_dev_pr_res_pr_holder_tg_port.attr,
1634 &target_core_dev_pr_res_pr_registered_i_pts.attr,
1635 &target_core_dev_pr_res_pr_type.attr,
1636 &target_core_dev_pr_res_type.attr,
1637 &target_core_dev_pr_res_aptpl_active.attr,
1638 &target_core_dev_pr_res_aptpl_metadata.attr,
1639 NULL,
1640};
1641
1642static struct configfs_item_operations target_core_dev_pr_ops = {
1643 .show_attribute = target_core_dev_pr_attr_show,
1644 .store_attribute = target_core_dev_pr_attr_store,
1645};
1646
1647static struct config_item_type target_core_dev_pr_cit = {
1648 .ct_item_ops = &target_core_dev_pr_ops,
1649 .ct_attrs = target_core_dev_pr_attrs,
1650 .ct_owner = THIS_MODULE,
1651};
1652
1653/* End functions for struct config_item_type target_core_dev_pr_cit */
1654
1655/* Start functions for struct config_item_type target_core_dev_cit */
1656
1657static ssize_t target_core_show_dev_info(void *p, char *page)
1658{
1659 struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
1660 struct se_hba *hba = se_dev->se_dev_hba;
1661 struct se_subsystem_api *t = hba->transport;
1662 int bl = 0;
1663 ssize_t read_bytes = 0;
1664
1665 if (!(se_dev->se_dev_ptr))
1666 return -ENODEV;
1667
1668 transport_dump_dev_state(se_dev->se_dev_ptr, page, &bl);
1669 read_bytes += bl;
1670 read_bytes += t->show_configfs_dev_params(hba, se_dev, page+read_bytes);
1671 return read_bytes;
1672}
1673
1674static struct target_core_configfs_attribute target_core_attr_dev_info = {
1675 .attr = { .ca_owner = THIS_MODULE,
1676 .ca_name = "info",
1677 .ca_mode = S_IRUGO },
1678 .show = target_core_show_dev_info,
1679 .store = NULL,
1680};
1681
1682static ssize_t target_core_store_dev_control(
1683 void *p,
1684 const char *page,
1685 size_t count)
1686{
1687 struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
1688 struct se_hba *hba = se_dev->se_dev_hba;
1689 struct se_subsystem_api *t = hba->transport;
1690
1691 if (!(se_dev->se_dev_su_ptr)) {
1692 printk(KERN_ERR "Unable to locate struct se_subsystem_dev>se"
1693 "_dev_su_ptr\n");
1694 return -EINVAL;
1695 }
1696
1697 return t->set_configfs_dev_params(hba, se_dev, page, count);
1698}
1699
1700static struct target_core_configfs_attribute target_core_attr_dev_control = {
1701 .attr = { .ca_owner = THIS_MODULE,
1702 .ca_name = "control",
1703 .ca_mode = S_IWUSR },
1704 .show = NULL,
1705 .store = target_core_store_dev_control,
1706};
1707
1708static ssize_t target_core_show_dev_alias(void *p, char *page)
1709{
1710 struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
1711
1712 if (!(se_dev->su_dev_flags & SDF_USING_ALIAS))
1713 return 0;
1714
1715 return snprintf(page, PAGE_SIZE, "%s\n", se_dev->se_dev_alias);
1716}
1717
1718static ssize_t target_core_store_dev_alias(
1719 void *p,
1720 const char *page,
1721 size_t count)
1722{
1723 struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
1724 struct se_hba *hba = se_dev->se_dev_hba;
1725 ssize_t read_bytes;
1726
1727 if (count > (SE_DEV_ALIAS_LEN-1)) {
1728 printk(KERN_ERR "alias count: %d exceeds"
1729 " SE_DEV_ALIAS_LEN-1: %u\n", (int)count,
1730 SE_DEV_ALIAS_LEN-1);
1731 return -EINVAL;
1732 }
1733
1734 se_dev->su_dev_flags |= SDF_USING_ALIAS;
1735 read_bytes = snprintf(&se_dev->se_dev_alias[0], SE_DEV_ALIAS_LEN,
1736 "%s", page);
1737
1738 printk(KERN_INFO "Target_Core_ConfigFS: %s/%s set alias: %s\n",
1739 config_item_name(&hba->hba_group.cg_item),
1740 config_item_name(&se_dev->se_dev_group.cg_item),
1741 se_dev->se_dev_alias);
1742
1743 return read_bytes;
1744}
1745
1746static struct target_core_configfs_attribute target_core_attr_dev_alias = {
1747 .attr = { .ca_owner = THIS_MODULE,
1748 .ca_name = "alias",
1749 .ca_mode = S_IRUGO | S_IWUSR },
1750 .show = target_core_show_dev_alias,
1751 .store = target_core_store_dev_alias,
1752};
1753
1754static ssize_t target_core_show_dev_udev_path(void *p, char *page)
1755{
1756 struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
1757
1758 if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH))
1759 return 0;
1760
1761 return snprintf(page, PAGE_SIZE, "%s\n", se_dev->se_dev_udev_path);
1762}
1763
1764static ssize_t target_core_store_dev_udev_path(
1765 void *p,
1766 const char *page,
1767 size_t count)
1768{
1769 struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
1770 struct se_hba *hba = se_dev->se_dev_hba;
1771 ssize_t read_bytes;
1772
1773 if (count > (SE_UDEV_PATH_LEN-1)) {
1774 printk(KERN_ERR "udev_path count: %d exceeds"
1775 " SE_UDEV_PATH_LEN-1: %u\n", (int)count,
1776 SE_UDEV_PATH_LEN-1);
1777 return -EINVAL;
1778 }
1779
1780 se_dev->su_dev_flags |= SDF_USING_UDEV_PATH;
1781 read_bytes = snprintf(&se_dev->se_dev_udev_path[0], SE_UDEV_PATH_LEN,
1782 "%s", page);
1783
1784 printk(KERN_INFO "Target_Core_ConfigFS: %s/%s set udev_path: %s\n",
1785 config_item_name(&hba->hba_group.cg_item),
1786 config_item_name(&se_dev->se_dev_group.cg_item),
1787 se_dev->se_dev_udev_path);
1788
1789 return read_bytes;
1790}
1791
1792static struct target_core_configfs_attribute target_core_attr_dev_udev_path = {
1793 .attr = { .ca_owner = THIS_MODULE,
1794 .ca_name = "udev_path",
1795 .ca_mode = S_IRUGO | S_IWUSR },
1796 .show = target_core_show_dev_udev_path,
1797 .store = target_core_store_dev_udev_path,
1798};
1799
1800static ssize_t target_core_store_dev_enable(
1801 void *p,
1802 const char *page,
1803 size_t count)
1804{
1805 struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
1806 struct se_device *dev;
1807 struct se_hba *hba = se_dev->se_dev_hba;
1808 struct se_subsystem_api *t = hba->transport;
1809 char *ptr;
1810
1811 ptr = strstr(page, "1");
1812 if (!(ptr)) {
1813 printk(KERN_ERR "For dev_enable ops, only valid value"
1814 " is \"1\"\n");
1815 return -EINVAL;
1816 }
1817 if ((se_dev->se_dev_ptr)) {
1818 printk(KERN_ERR "se_dev->se_dev_ptr already set for storage"
1819 " object\n");
1820 return -EEXIST;
1821 }
1822
1823 if (t->check_configfs_dev_params(hba, se_dev) < 0)
1824 return -EINVAL;
1825
1826 dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
1827 if (IS_ERR(dev))
1828 return PTR_ERR(dev);
1829 else if (!dev)
1830 return -EINVAL;
1831
1832 se_dev->se_dev_ptr = dev;
1833 printk(KERN_INFO "Target_Core_ConfigFS: Registered se_dev->se_dev_ptr:"
1834 " %p\n", se_dev->se_dev_ptr);
1835
1836 return count;
1837}
1838
1839static struct target_core_configfs_attribute target_core_attr_dev_enable = {
1840 .attr = { .ca_owner = THIS_MODULE,
1841 .ca_name = "enable",
1842 .ca_mode = S_IWUSR },
1843 .show = NULL,
1844 .store = target_core_store_dev_enable,
1845};
1846
1847static ssize_t target_core_show_alua_lu_gp(void *p, char *page)
1848{
1849 struct se_device *dev;
1850 struct se_subsystem_dev *su_dev = (struct se_subsystem_dev *)p;
1851 struct config_item *lu_ci;
1852 struct t10_alua_lu_gp *lu_gp;
1853 struct t10_alua_lu_gp_member *lu_gp_mem;
1854 ssize_t len = 0;
1855
1856 dev = su_dev->se_dev_ptr;
1857 if (!(dev))
1858 return -ENODEV;
1859
1860 if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED)
1861 return len;
1862
1863 lu_gp_mem = dev->dev_alua_lu_gp_mem;
1864 if (!(lu_gp_mem)) {
1865 printk(KERN_ERR "NULL struct se_device->dev_alua_lu_gp_mem"
1866 " pointer\n");
1867 return -EINVAL;
1868 }
1869
1870 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1871 lu_gp = lu_gp_mem->lu_gp;
1872 if ((lu_gp)) {
1873 lu_ci = &lu_gp->lu_gp_group.cg_item;
1874 len += sprintf(page, "LU Group Alias: %s\nLU Group ID: %hu\n",
1875 config_item_name(lu_ci), lu_gp->lu_gp_id);
1876 }
1877 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1878
1879 return len;
1880}
1881
1882static ssize_t target_core_store_alua_lu_gp(
1883 void *p,
1884 const char *page,
1885 size_t count)
1886{
1887 struct se_device *dev;
1888 struct se_subsystem_dev *su_dev = (struct se_subsystem_dev *)p;
1889 struct se_hba *hba = su_dev->se_dev_hba;
1890 struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL;
1891 struct t10_alua_lu_gp_member *lu_gp_mem;
1892 unsigned char buf[LU_GROUP_NAME_BUF];
1893 int move = 0;
1894
1895 dev = su_dev->se_dev_ptr;
1896 if (!(dev))
1897 return -ENODEV;
1898
1899 if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED) {
1900 printk(KERN_WARNING "SPC3_ALUA_EMULATED not enabled for %s/%s\n",
1901 config_item_name(&hba->hba_group.cg_item),
1902 config_item_name(&su_dev->se_dev_group.cg_item));
1903 return -EINVAL;
1904 }
1905 if (count > LU_GROUP_NAME_BUF) {
1906 printk(KERN_ERR "ALUA LU Group Alias too large!\n");
1907 return -EINVAL;
1908 }
1909 memset(buf, 0, LU_GROUP_NAME_BUF);
1910 memcpy(buf, page, count);
1911 /*
1912 * Any ALUA logical unit alias besides "NULL" means we will be
1913 * making a new group association.
1914 */
1915 if (strcmp(strstrip(buf), "NULL")) {
1916 /*
1917 * core_alua_get_lu_gp_by_name() will increment reference to
1918 * struct t10_alua_lu_gp. This reference is released with
1919 * core_alua_get_lu_gp_by_name below().
1920 */
1921 lu_gp_new = core_alua_get_lu_gp_by_name(strstrip(buf));
1922 if (!(lu_gp_new))
1923 return -ENODEV;
1924 }
1925 lu_gp_mem = dev->dev_alua_lu_gp_mem;
1926 if (!(lu_gp_mem)) {
1927 if (lu_gp_new)
1928 core_alua_put_lu_gp_from_name(lu_gp_new);
1929 printk(KERN_ERR "NULL struct se_device->dev_alua_lu_gp_mem"
1930 " pointer\n");
1931 return -EINVAL;
1932 }
1933
1934 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1935 lu_gp = lu_gp_mem->lu_gp;
1936 if ((lu_gp)) {
1937 /*
1938 * Clearing an existing lu_gp association, and replacing
1939 * with NULL
1940 */
1941 if (!(lu_gp_new)) {
1942 printk(KERN_INFO "Target_Core_ConfigFS: Releasing %s/%s"
1943 " from ALUA LU Group: core/alua/lu_gps/%s, ID:"
1944 " %hu\n",
1945 config_item_name(&hba->hba_group.cg_item),
1946 config_item_name(&su_dev->se_dev_group.cg_item),
1947 config_item_name(&lu_gp->lu_gp_group.cg_item),
1948 lu_gp->lu_gp_id);
1949
1950 __core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp);
1951 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1952
1953 return count;
1954 }
1955 /*
1956 * Removing existing association of lu_gp_mem with lu_gp
1957 */
1958 __core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp);
1959 move = 1;
1960 }
1961 /*
1962 * Associate lu_gp_mem with lu_gp_new.
1963 */
1964 __core_alua_attach_lu_gp_mem(lu_gp_mem, lu_gp_new);
1965 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1966
1967 printk(KERN_INFO "Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:"
1968 " core/alua/lu_gps/%s, ID: %hu\n",
1969 (move) ? "Moving" : "Adding",
1970 config_item_name(&hba->hba_group.cg_item),
1971 config_item_name(&su_dev->se_dev_group.cg_item),
1972 config_item_name(&lu_gp_new->lu_gp_group.cg_item),
1973 lu_gp_new->lu_gp_id);
1974
1975 core_alua_put_lu_gp_from_name(lu_gp_new);
1976 return count;
1977}
1978
1979static struct target_core_configfs_attribute target_core_attr_dev_alua_lu_gp = {
1980 .attr = { .ca_owner = THIS_MODULE,
1981 .ca_name = "alua_lu_gp",
1982 .ca_mode = S_IRUGO | S_IWUSR },
1983 .show = target_core_show_alua_lu_gp,
1984 .store = target_core_store_alua_lu_gp,
1985};
1986
1987static struct configfs_attribute *lio_core_dev_attrs[] = {
1988 &target_core_attr_dev_info.attr,
1989 &target_core_attr_dev_control.attr,
1990 &target_core_attr_dev_alias.attr,
1991 &target_core_attr_dev_udev_path.attr,
1992 &target_core_attr_dev_enable.attr,
1993 &target_core_attr_dev_alua_lu_gp.attr,
1994 NULL,
1995};
1996
1997static void target_core_dev_release(struct config_item *item)
1998{
1999 struct se_subsystem_dev *se_dev = container_of(to_config_group(item),
2000 struct se_subsystem_dev, se_dev_group);
2001 struct se_hba *hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item);
2002 struct se_subsystem_api *t = hba->transport;
2003 struct config_group *dev_cg = &se_dev->se_dev_group;
2004
2005 kfree(dev_cg->default_groups);
2006 /*
2007 * This pointer will set when the storage is enabled with:
2008 *`echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable`
2009 */
2010 if (se_dev->se_dev_ptr) {
2011 printk(KERN_INFO "Target_Core_ConfigFS: Calling se_free_"
2012 "virtual_device() for se_dev_ptr: %p\n",
2013 se_dev->se_dev_ptr);
2014
2015 se_free_virtual_device(se_dev->se_dev_ptr, hba);
2016 } else {
2017 /*
2018 * Release struct se_subsystem_dev->se_dev_su_ptr..
2019 */
2020 printk(KERN_INFO "Target_Core_ConfigFS: Calling t->free_"
2021 "device() for se_dev_su_ptr: %p\n",
2022 se_dev->se_dev_su_ptr);
2023
2024 t->free_device(se_dev->se_dev_su_ptr);
2025 }
2026
2027 printk(KERN_INFO "Target_Core_ConfigFS: Deallocating se_subsystem"
2028 "_dev_t: %p\n", se_dev);
2029 kfree(se_dev);
2030}
2031
2032static ssize_t target_core_dev_show(struct config_item *item,
2033 struct configfs_attribute *attr,
2034 char *page)
2035{
2036 struct se_subsystem_dev *se_dev = container_of(
2037 to_config_group(item), struct se_subsystem_dev,
2038 se_dev_group);
2039 struct target_core_configfs_attribute *tc_attr = container_of(
2040 attr, struct target_core_configfs_attribute, attr);
2041
2042 if (!(tc_attr->show))
2043 return -EINVAL;
2044
2045 return tc_attr->show((void *)se_dev, page);
2046}
2047
2048static ssize_t target_core_dev_store(struct config_item *item,
2049 struct configfs_attribute *attr,
2050 const char *page, size_t count)
2051{
2052 struct se_subsystem_dev *se_dev = container_of(
2053 to_config_group(item), struct se_subsystem_dev,
2054 se_dev_group);
2055 struct target_core_configfs_attribute *tc_attr = container_of(
2056 attr, struct target_core_configfs_attribute, attr);
2057
2058 if (!(tc_attr->store))
2059 return -EINVAL;
2060
2061 return tc_attr->store((void *)se_dev, page, count);
2062}
2063
2064static struct configfs_item_operations target_core_dev_item_ops = {
2065 .release = target_core_dev_release,
2066 .show_attribute = target_core_dev_show,
2067 .store_attribute = target_core_dev_store,
2068};
2069
2070static struct config_item_type target_core_dev_cit = {
2071 .ct_item_ops = &target_core_dev_item_ops,
2072 .ct_attrs = lio_core_dev_attrs,
2073 .ct_owner = THIS_MODULE,
2074};
2075
2076/* End functions for struct config_item_type target_core_dev_cit */
2077
2078/* Start functions for struct config_item_type target_core_alua_lu_gp_cit */
2079
2080CONFIGFS_EATTR_STRUCT(target_core_alua_lu_gp, t10_alua_lu_gp);
2081#define SE_DEV_ALUA_LU_ATTR(_name, _mode) \
2082static struct target_core_alua_lu_gp_attribute \
2083 target_core_alua_lu_gp_##_name = \
2084 __CONFIGFS_EATTR(_name, _mode, \
2085 target_core_alua_lu_gp_show_attr_##_name, \
2086 target_core_alua_lu_gp_store_attr_##_name);
2087
2088#define SE_DEV_ALUA_LU_ATTR_RO(_name) \
2089static struct target_core_alua_lu_gp_attribute \
2090 target_core_alua_lu_gp_##_name = \
2091 __CONFIGFS_EATTR_RO(_name, \
2092 target_core_alua_lu_gp_show_attr_##_name);
2093
2094/*
2095 * lu_gp_id
2096 */
2097static ssize_t target_core_alua_lu_gp_show_attr_lu_gp_id(
2098 struct t10_alua_lu_gp *lu_gp,
2099 char *page)
2100{
2101 if (!(lu_gp->lu_gp_valid_id))
2102 return 0;
2103
2104 return sprintf(page, "%hu\n", lu_gp->lu_gp_id);
2105}
2106
2107static ssize_t target_core_alua_lu_gp_store_attr_lu_gp_id(
2108 struct t10_alua_lu_gp *lu_gp,
2109 const char *page,
2110 size_t count)
2111{
2112 struct config_group *alua_lu_gp_cg = &lu_gp->lu_gp_group;
2113 unsigned long lu_gp_id;
2114 int ret;
2115
2116 ret = strict_strtoul(page, 0, &lu_gp_id);
2117 if (ret < 0) {
2118 printk(KERN_ERR "strict_strtoul() returned %d for"
2119 " lu_gp_id\n", ret);
2120 return -EINVAL;
2121 }
2122 if (lu_gp_id > 0x0000ffff) {
2123 printk(KERN_ERR "ALUA lu_gp_id: %lu exceeds maximum:"
2124 " 0x0000ffff\n", lu_gp_id);
2125 return -EINVAL;
2126 }
2127
2128 ret = core_alua_set_lu_gp_id(lu_gp, (u16)lu_gp_id);
2129 if (ret < 0)
2130 return -EINVAL;
2131
2132 printk(KERN_INFO "Target_Core_ConfigFS: Set ALUA Logical Unit"
2133 " Group: core/alua/lu_gps/%s to ID: %hu\n",
2134 config_item_name(&alua_lu_gp_cg->cg_item),
2135 lu_gp->lu_gp_id);
2136
2137 return count;
2138}
2139
2140SE_DEV_ALUA_LU_ATTR(lu_gp_id, S_IRUGO | S_IWUSR);
2141
2142/*
2143 * members
2144 */
2145static ssize_t target_core_alua_lu_gp_show_attr_members(
2146 struct t10_alua_lu_gp *lu_gp,
2147 char *page)
2148{
2149 struct se_device *dev;
2150 struct se_hba *hba;
2151 struct se_subsystem_dev *su_dev;
2152 struct t10_alua_lu_gp_member *lu_gp_mem;
2153 ssize_t len = 0, cur_len;
2154 unsigned char buf[LU_GROUP_NAME_BUF];
2155
2156 memset(buf, 0, LU_GROUP_NAME_BUF);
2157
2158 spin_lock(&lu_gp->lu_gp_lock);
2159 list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
2160 dev = lu_gp_mem->lu_gp_mem_dev;
2161 su_dev = dev->se_sub_dev;
2162 hba = su_dev->se_dev_hba;
2163
2164 cur_len = snprintf(buf, LU_GROUP_NAME_BUF, "%s/%s\n",
2165 config_item_name(&hba->hba_group.cg_item),
2166 config_item_name(&su_dev->se_dev_group.cg_item));
2167 cur_len++; /* Extra byte for NULL terminator */
2168
2169 if ((cur_len + len) > PAGE_SIZE) {
2170 printk(KERN_WARNING "Ran out of lu_gp_show_attr"
2171 "_members buffer\n");
2172 break;
2173 }
2174 memcpy(page+len, buf, cur_len);
2175 len += cur_len;
2176 }
2177 spin_unlock(&lu_gp->lu_gp_lock);
2178
2179 return len;
2180}
2181
2182SE_DEV_ALUA_LU_ATTR_RO(members);
2183
2184CONFIGFS_EATTR_OPS(target_core_alua_lu_gp, t10_alua_lu_gp, lu_gp_group);
2185
2186static struct configfs_attribute *target_core_alua_lu_gp_attrs[] = {
2187 &target_core_alua_lu_gp_lu_gp_id.attr,
2188 &target_core_alua_lu_gp_members.attr,
2189 NULL,
2190};
2191
2192static void target_core_alua_lu_gp_release(struct config_item *item)
2193{
2194 struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
2195 struct t10_alua_lu_gp, lu_gp_group);
2196
2197 core_alua_free_lu_gp(lu_gp);
2198}
2199
2200static struct configfs_item_operations target_core_alua_lu_gp_ops = {
2201 .release = target_core_alua_lu_gp_release,
2202 .show_attribute = target_core_alua_lu_gp_attr_show,
2203 .store_attribute = target_core_alua_lu_gp_attr_store,
2204};
2205
2206static struct config_item_type target_core_alua_lu_gp_cit = {
2207 .ct_item_ops = &target_core_alua_lu_gp_ops,
2208 .ct_attrs = target_core_alua_lu_gp_attrs,
2209 .ct_owner = THIS_MODULE,
2210};
2211
2212/* End functions for struct config_item_type target_core_alua_lu_gp_cit */
2213
2214/* Start functions for struct config_item_type target_core_alua_lu_gps_cit */
2215
2216static struct config_group *target_core_alua_create_lu_gp(
2217 struct config_group *group,
2218 const char *name)
2219{
2220 struct t10_alua_lu_gp *lu_gp;
2221 struct config_group *alua_lu_gp_cg = NULL;
2222 struct config_item *alua_lu_gp_ci = NULL;
2223
2224 lu_gp = core_alua_allocate_lu_gp(name, 0);
2225 if (IS_ERR(lu_gp))
2226 return NULL;
2227
2228 alua_lu_gp_cg = &lu_gp->lu_gp_group;
2229 alua_lu_gp_ci = &alua_lu_gp_cg->cg_item;
2230
2231 config_group_init_type_name(alua_lu_gp_cg, name,
2232 &target_core_alua_lu_gp_cit);
2233
2234 printk(KERN_INFO "Target_Core_ConfigFS: Allocated ALUA Logical Unit"
2235 " Group: core/alua/lu_gps/%s\n",
2236 config_item_name(alua_lu_gp_ci));
2237
2238 return alua_lu_gp_cg;
2239
2240}
2241
2242static void target_core_alua_drop_lu_gp(
2243 struct config_group *group,
2244 struct config_item *item)
2245{
2246 struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
2247 struct t10_alua_lu_gp, lu_gp_group);
2248
2249 printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Logical Unit"
2250 " Group: core/alua/lu_gps/%s, ID: %hu\n",
2251 config_item_name(item), lu_gp->lu_gp_id);
2252 /*
2253 * core_alua_free_lu_gp() is called from target_core_alua_lu_gp_ops->release()
2254 * -> target_core_alua_lu_gp_release()
2255 */
2256 config_item_put(item);
2257}
2258
2259static struct configfs_group_operations target_core_alua_lu_gps_group_ops = {
2260 .make_group = &target_core_alua_create_lu_gp,
2261 .drop_item = &target_core_alua_drop_lu_gp,
2262};
2263
2264static struct config_item_type target_core_alua_lu_gps_cit = {
2265 .ct_item_ops = NULL,
2266 .ct_group_ops = &target_core_alua_lu_gps_group_ops,
2267 .ct_owner = THIS_MODULE,
2268};
2269
2270/* End functions for struct config_item_type target_core_alua_lu_gps_cit */
2271
2272/* Start functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
2273
2274CONFIGFS_EATTR_STRUCT(target_core_alua_tg_pt_gp, t10_alua_tg_pt_gp);
2275#define SE_DEV_ALUA_TG_PT_ATTR(_name, _mode) \
2276static struct target_core_alua_tg_pt_gp_attribute \
2277 target_core_alua_tg_pt_gp_##_name = \
2278 __CONFIGFS_EATTR(_name, _mode, \
2279 target_core_alua_tg_pt_gp_show_attr_##_name, \
2280 target_core_alua_tg_pt_gp_store_attr_##_name);
2281
2282#define SE_DEV_ALUA_TG_PT_ATTR_RO(_name) \
2283static struct target_core_alua_tg_pt_gp_attribute \
2284 target_core_alua_tg_pt_gp_##_name = \
2285 __CONFIGFS_EATTR_RO(_name, \
2286 target_core_alua_tg_pt_gp_show_attr_##_name);
2287
2288/*
2289 * alua_access_state
2290 */
2291static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_state(
2292 struct t10_alua_tg_pt_gp *tg_pt_gp,
2293 char *page)
2294{
2295 return sprintf(page, "%d\n",
2296 atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state));
2297}
2298
2299static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
2300 struct t10_alua_tg_pt_gp *tg_pt_gp,
2301 const char *page,
2302 size_t count)
2303{
2304 struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
2305 unsigned long tmp;
2306 int new_state, ret;
2307
2308 if (!(tg_pt_gp->tg_pt_gp_valid_id)) {
2309 printk(KERN_ERR "Unable to do implict ALUA on non valid"
2310 " tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id);
2311 return -EINVAL;
2312 }
2313
2314 ret = strict_strtoul(page, 0, &tmp);
2315 if (ret < 0) {
2316 printk("Unable to extract new ALUA access state from"
2317 " %s\n", page);
2318 return -EINVAL;
2319 }
2320 new_state = (int)tmp;
2321
2322 if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) {
2323 printk(KERN_ERR "Unable to process implict configfs ALUA"
2324 " transition while TPGS_IMPLICT_ALUA is diabled\n");
2325 return -EINVAL;
2326 }
2327
2328 ret = core_alua_do_port_transition(tg_pt_gp, su_dev->se_dev_ptr,
2329 NULL, NULL, new_state, 0);
2330 return (!ret) ? count : -EINVAL;
2331}
2332
2333SE_DEV_ALUA_TG_PT_ATTR(alua_access_state, S_IRUGO | S_IWUSR);
2334
2335/*
2336 * alua_access_status
2337 */
2338static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_status(
2339 struct t10_alua_tg_pt_gp *tg_pt_gp,
2340 char *page)
2341{
2342 return sprintf(page, "%s\n",
2343 core_alua_dump_status(tg_pt_gp->tg_pt_gp_alua_access_status));
2344}
2345
2346static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status(
2347 struct t10_alua_tg_pt_gp *tg_pt_gp,
2348 const char *page,
2349 size_t count)
2350{
2351 unsigned long tmp;
2352 int new_status, ret;
2353
2354 if (!(tg_pt_gp->tg_pt_gp_valid_id)) {
2355 printk(KERN_ERR "Unable to do set ALUA access status on non"
2356 " valid tg_pt_gp ID: %hu\n",
2357 tg_pt_gp->tg_pt_gp_valid_id);
2358 return -EINVAL;
2359 }
2360
2361 ret = strict_strtoul(page, 0, &tmp);
2362 if (ret < 0) {
2363 printk(KERN_ERR "Unable to extract new ALUA access status"
2364 " from %s\n", page);
2365 return -EINVAL;
2366 }
2367 new_status = (int)tmp;
2368
2369 if ((new_status != ALUA_STATUS_NONE) &&
2370 (new_status != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
2371 (new_status != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
2372 printk(KERN_ERR "Illegal ALUA access status: 0x%02x\n",
2373 new_status);
2374 return -EINVAL;
2375 }
2376
2377 tg_pt_gp->tg_pt_gp_alua_access_status = new_status;
2378 return count;
2379}
2380
2381SE_DEV_ALUA_TG_PT_ATTR(alua_access_status, S_IRUGO | S_IWUSR);
2382
2383/*
2384 * alua_access_type
2385 */
2386static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_type(
2387 struct t10_alua_tg_pt_gp *tg_pt_gp,
2388 char *page)
2389{
2390 return core_alua_show_access_type(tg_pt_gp, page);
2391}
2392
2393static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_type(
2394 struct t10_alua_tg_pt_gp *tg_pt_gp,
2395 const char *page,
2396 size_t count)
2397{
2398 return core_alua_store_access_type(tg_pt_gp, page, count);
2399}
2400
2401SE_DEV_ALUA_TG_PT_ATTR(alua_access_type, S_IRUGO | S_IWUSR);
2402
2403/*
2404 * alua_write_metadata
2405 */
2406static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_write_metadata(
2407 struct t10_alua_tg_pt_gp *tg_pt_gp,
2408 char *page)
2409{
2410 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_write_metadata);
2411}
2412
2413static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_write_metadata(
2414 struct t10_alua_tg_pt_gp *tg_pt_gp,
2415 const char *page,
2416 size_t count)
2417{
2418 unsigned long tmp;
2419 int ret;
2420
2421 ret = strict_strtoul(page, 0, &tmp);
2422 if (ret < 0) {
2423 printk(KERN_ERR "Unable to extract alua_write_metadata\n");
2424 return -EINVAL;
2425 }
2426
2427 if ((tmp != 0) && (tmp != 1)) {
2428 printk(KERN_ERR "Illegal value for alua_write_metadata:"
2429 " %lu\n", tmp);
2430 return -EINVAL;
2431 }
2432 tg_pt_gp->tg_pt_gp_write_metadata = (int)tmp;
2433
2434 return count;
2435}
2436
2437SE_DEV_ALUA_TG_PT_ATTR(alua_write_metadata, S_IRUGO | S_IWUSR);
2438
2439
2440
2441/*
2442 * nonop_delay_msecs
2443 */
2444static ssize_t target_core_alua_tg_pt_gp_show_attr_nonop_delay_msecs(
2445 struct t10_alua_tg_pt_gp *tg_pt_gp,
2446 char *page)
2447{
2448 return core_alua_show_nonop_delay_msecs(tg_pt_gp, page);
2449
2450}
2451
2452static ssize_t target_core_alua_tg_pt_gp_store_attr_nonop_delay_msecs(
2453 struct t10_alua_tg_pt_gp *tg_pt_gp,
2454 const char *page,
2455 size_t count)
2456{
2457 return core_alua_store_nonop_delay_msecs(tg_pt_gp, page, count);
2458}
2459
2460SE_DEV_ALUA_TG_PT_ATTR(nonop_delay_msecs, S_IRUGO | S_IWUSR);
2461
2462/*
2463 * trans_delay_msecs
2464 */
2465static ssize_t target_core_alua_tg_pt_gp_show_attr_trans_delay_msecs(
2466 struct t10_alua_tg_pt_gp *tg_pt_gp,
2467 char *page)
2468{
2469 return core_alua_show_trans_delay_msecs(tg_pt_gp, page);
2470}
2471
2472static ssize_t target_core_alua_tg_pt_gp_store_attr_trans_delay_msecs(
2473 struct t10_alua_tg_pt_gp *tg_pt_gp,
2474 const char *page,
2475 size_t count)
2476{
2477 return core_alua_store_trans_delay_msecs(tg_pt_gp, page, count);
2478}
2479
2480SE_DEV_ALUA_TG_PT_ATTR(trans_delay_msecs, S_IRUGO | S_IWUSR);
2481
2482/*
2483 * preferred
2484 */
2485
2486static ssize_t target_core_alua_tg_pt_gp_show_attr_preferred(
2487 struct t10_alua_tg_pt_gp *tg_pt_gp,
2488 char *page)
2489{
2490 return core_alua_show_preferred_bit(tg_pt_gp, page);
2491}
2492
2493static ssize_t target_core_alua_tg_pt_gp_store_attr_preferred(
2494 struct t10_alua_tg_pt_gp *tg_pt_gp,
2495 const char *page,
2496 size_t count)
2497{
2498 return core_alua_store_preferred_bit(tg_pt_gp, page, count);
2499}
2500
2501SE_DEV_ALUA_TG_PT_ATTR(preferred, S_IRUGO | S_IWUSR);
2502
2503/*
2504 * tg_pt_gp_id
2505 */
2506static ssize_t target_core_alua_tg_pt_gp_show_attr_tg_pt_gp_id(
2507 struct t10_alua_tg_pt_gp *tg_pt_gp,
2508 char *page)
2509{
2510 if (!(tg_pt_gp->tg_pt_gp_valid_id))
2511 return 0;
2512
2513 return sprintf(page, "%hu\n", tg_pt_gp->tg_pt_gp_id);
2514}
2515
2516static ssize_t target_core_alua_tg_pt_gp_store_attr_tg_pt_gp_id(
2517 struct t10_alua_tg_pt_gp *tg_pt_gp,
2518 const char *page,
2519 size_t count)
2520{
2521 struct config_group *alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
2522 unsigned long tg_pt_gp_id;
2523 int ret;
2524
2525 ret = strict_strtoul(page, 0, &tg_pt_gp_id);
2526 if (ret < 0) {
2527 printk(KERN_ERR "strict_strtoul() returned %d for"
2528 " tg_pt_gp_id\n", ret);
2529 return -EINVAL;
2530 }
2531 if (tg_pt_gp_id > 0x0000ffff) {
2532 printk(KERN_ERR "ALUA tg_pt_gp_id: %lu exceeds maximum:"
2533 " 0x0000ffff\n", tg_pt_gp_id);
2534 return -EINVAL;
2535 }
2536
2537 ret = core_alua_set_tg_pt_gp_id(tg_pt_gp, (u16)tg_pt_gp_id);
2538 if (ret < 0)
2539 return -EINVAL;
2540
2541 printk(KERN_INFO "Target_Core_ConfigFS: Set ALUA Target Port Group: "
2542 "core/alua/tg_pt_gps/%s to ID: %hu\n",
2543 config_item_name(&alua_tg_pt_gp_cg->cg_item),
2544 tg_pt_gp->tg_pt_gp_id);
2545
2546 return count;
2547}
2548
2549SE_DEV_ALUA_TG_PT_ATTR(tg_pt_gp_id, S_IRUGO | S_IWUSR);
2550
2551/*
2552 * members
2553 */
2554static ssize_t target_core_alua_tg_pt_gp_show_attr_members(
2555 struct t10_alua_tg_pt_gp *tg_pt_gp,
2556 char *page)
2557{
2558 struct se_port *port;
2559 struct se_portal_group *tpg;
2560 struct se_lun *lun;
2561 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
2562 ssize_t len = 0, cur_len;
2563 unsigned char buf[TG_PT_GROUP_NAME_BUF];
2564
2565 memset(buf, 0, TG_PT_GROUP_NAME_BUF);
2566
2567 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
2568 list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list,
2569 tg_pt_gp_mem_list) {
2570 port = tg_pt_gp_mem->tg_pt;
2571 tpg = port->sep_tpg;
2572 lun = port->sep_lun;
2573
2574 cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu"
2575 "/%s\n", TPG_TFO(tpg)->get_fabric_name(),
2576 TPG_TFO(tpg)->tpg_get_wwn(tpg),
2577 TPG_TFO(tpg)->tpg_get_tag(tpg),
2578 config_item_name(&lun->lun_group.cg_item));
2579 cur_len++; /* Extra byte for NULL terminator */
2580
2581 if ((cur_len + len) > PAGE_SIZE) {
2582 printk(KERN_WARNING "Ran out of lu_gp_show_attr"
2583 "_members buffer\n");
2584 break;
2585 }
2586 memcpy(page+len, buf, cur_len);
2587 len += cur_len;
2588 }
2589 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
2590
2591 return len;
2592}
2593
2594SE_DEV_ALUA_TG_PT_ATTR_RO(members);
2595
2596CONFIGFS_EATTR_OPS(target_core_alua_tg_pt_gp, t10_alua_tg_pt_gp,
2597 tg_pt_gp_group);
2598
2599static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = {
2600 &target_core_alua_tg_pt_gp_alua_access_state.attr,
2601 &target_core_alua_tg_pt_gp_alua_access_status.attr,
2602 &target_core_alua_tg_pt_gp_alua_access_type.attr,
2603 &target_core_alua_tg_pt_gp_alua_write_metadata.attr,
2604 &target_core_alua_tg_pt_gp_nonop_delay_msecs.attr,
2605 &target_core_alua_tg_pt_gp_trans_delay_msecs.attr,
2606 &target_core_alua_tg_pt_gp_preferred.attr,
2607 &target_core_alua_tg_pt_gp_tg_pt_gp_id.attr,
2608 &target_core_alua_tg_pt_gp_members.attr,
2609 NULL,
2610};
2611
2612static void target_core_alua_tg_pt_gp_release(struct config_item *item)
2613{
2614 struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
2615 struct t10_alua_tg_pt_gp, tg_pt_gp_group);
2616
2617 core_alua_free_tg_pt_gp(tg_pt_gp);
2618}
2619
2620static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = {
2621 .release = target_core_alua_tg_pt_gp_release,
2622 .show_attribute = target_core_alua_tg_pt_gp_attr_show,
2623 .store_attribute = target_core_alua_tg_pt_gp_attr_store,
2624};
2625
2626static struct config_item_type target_core_alua_tg_pt_gp_cit = {
2627 .ct_item_ops = &target_core_alua_tg_pt_gp_ops,
2628 .ct_attrs = target_core_alua_tg_pt_gp_attrs,
2629 .ct_owner = THIS_MODULE,
2630};
2631
2632/* End functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
2633
2634/* Start functions for struct config_item_type target_core_alua_tg_pt_gps_cit */
2635
2636static struct config_group *target_core_alua_create_tg_pt_gp(
2637 struct config_group *group,
2638 const char *name)
2639{
2640 struct t10_alua *alua = container_of(group, struct t10_alua,
2641 alua_tg_pt_gps_group);
2642 struct t10_alua_tg_pt_gp *tg_pt_gp;
2643 struct se_subsystem_dev *su_dev = alua->t10_sub_dev;
2644 struct config_group *alua_tg_pt_gp_cg = NULL;
2645 struct config_item *alua_tg_pt_gp_ci = NULL;
2646
2647 tg_pt_gp = core_alua_allocate_tg_pt_gp(su_dev, name, 0);
2648 if (!(tg_pt_gp))
2649 return NULL;
2650
2651 alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
2652 alua_tg_pt_gp_ci = &alua_tg_pt_gp_cg->cg_item;
2653
2654 config_group_init_type_name(alua_tg_pt_gp_cg, name,
2655 &target_core_alua_tg_pt_gp_cit);
2656
2657 printk(KERN_INFO "Target_Core_ConfigFS: Allocated ALUA Target Port"
2658 " Group: alua/tg_pt_gps/%s\n",
2659 config_item_name(alua_tg_pt_gp_ci));
2660
2661 return alua_tg_pt_gp_cg;
2662}
2663
2664static void target_core_alua_drop_tg_pt_gp(
2665 struct config_group *group,
2666 struct config_item *item)
2667{
2668 struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
2669 struct t10_alua_tg_pt_gp, tg_pt_gp_group);
2670
2671 printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Target Port"
2672 " Group: alua/tg_pt_gps/%s, ID: %hu\n",
2673 config_item_name(item), tg_pt_gp->tg_pt_gp_id);
2674 /*
2675 * core_alua_free_tg_pt_gp() is called from target_core_alua_tg_pt_gp_ops->release()
2676 * -> target_core_alua_tg_pt_gp_release().
2677 */
2678 config_item_put(item);
2679}
2680
2681static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = {
2682 .make_group = &target_core_alua_create_tg_pt_gp,
2683 .drop_item = &target_core_alua_drop_tg_pt_gp,
2684};
2685
2686static struct config_item_type target_core_alua_tg_pt_gps_cit = {
2687 .ct_group_ops = &target_core_alua_tg_pt_gps_group_ops,
2688 .ct_owner = THIS_MODULE,
2689};
2690
2691/* End functions for struct config_item_type target_core_alua_tg_pt_gps_cit */
2692
2693/* Start functions for struct config_item_type target_core_alua_cit */
2694
2695/*
2696 * target_core_alua_cit is a ConfigFS group that lives under
2697 * /sys/kernel/config/target/core/alua. There are default groups
2698 * core/alua/lu_gps and core/alua/tg_pt_gps that are attached to
2699 * target_core_alua_cit in target_core_init_configfs() below.
2700 */
2701static struct config_item_type target_core_alua_cit = {
2702 .ct_item_ops = NULL,
2703 .ct_attrs = NULL,
2704 .ct_owner = THIS_MODULE,
2705};
2706
2707/* End functions for struct config_item_type target_core_alua_cit */
2708
2709/* Start functions for struct config_item_type target_core_stat_cit */
2710
2711static struct config_group *target_core_stat_mkdir(
2712 struct config_group *group,
2713 const char *name)
2714{
2715 return ERR_PTR(-ENOSYS);
2716}
2717
2718static void target_core_stat_rmdir(
2719 struct config_group *group,
2720 struct config_item *item)
2721{
2722 return;
2723}
2724
2725static struct configfs_group_operations target_core_stat_group_ops = {
2726 .make_group = &target_core_stat_mkdir,
2727 .drop_item = &target_core_stat_rmdir,
2728};
2729
2730static struct config_item_type target_core_stat_cit = {
2731 .ct_group_ops = &target_core_stat_group_ops,
2732 .ct_owner = THIS_MODULE,
2733};
2734
2735/* End functions for struct config_item_type target_core_stat_cit */
2736
2737/* Start functions for struct config_item_type target_core_hba_cit */
2738
2739static struct config_group *target_core_make_subdev(
2740 struct config_group *group,
2741 const char *name)
2742{
2743 struct t10_alua_tg_pt_gp *tg_pt_gp;
2744 struct se_subsystem_dev *se_dev;
2745 struct se_subsystem_api *t;
2746 struct config_item *hba_ci = &group->cg_item;
2747 struct se_hba *hba = item_to_hba(hba_ci);
2748 struct config_group *dev_cg = NULL, *tg_pt_gp_cg = NULL;
2749 struct config_group *dev_stat_grp = NULL;
2750 int errno = -ENOMEM, ret;
2751
2752 ret = mutex_lock_interruptible(&hba->hba_access_mutex);
2753 if (ret)
2754 return ERR_PTR(ret);
2755 /*
2756 * Locate the struct se_subsystem_api from parent's struct se_hba.
2757 */
2758 t = hba->transport;
2759
2760 se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
2761 if (!se_dev) {
2762 printk(KERN_ERR "Unable to allocate memory for"
2763 " struct se_subsystem_dev\n");
2764 goto unlock;
2765 }
2766 INIT_LIST_HEAD(&se_dev->g_se_dev_list);
2767 INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
2768 spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
2769 INIT_LIST_HEAD(&se_dev->t10_reservation.registration_list);
2770 INIT_LIST_HEAD(&se_dev->t10_reservation.aptpl_reg_list);
2771 spin_lock_init(&se_dev->t10_reservation.registration_lock);
2772 spin_lock_init(&se_dev->t10_reservation.aptpl_reg_lock);
2773 INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
2774 spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
2775 spin_lock_init(&se_dev->se_dev_lock);
2776 se_dev->t10_reservation.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
2777 se_dev->t10_wwn.t10_sub_dev = se_dev;
2778 se_dev->t10_alua.t10_sub_dev = se_dev;
2779 se_dev->se_dev_attrib.da_sub_dev = se_dev;
2780
2781 se_dev->se_dev_hba = hba;
2782 dev_cg = &se_dev->se_dev_group;
2783
2784 dev_cg->default_groups = kzalloc(sizeof(struct config_group) * 7,
2785 GFP_KERNEL);
2786 if (!(dev_cg->default_groups))
2787 goto out;
2788 /*
2789 * Set se_dev_su_ptr from struct se_subsystem_api returned void ptr
2790 * for ->allocate_virtdevice()
2791 *
2792 * se_dev->se_dev_ptr will be set after ->create_virtdev()
2793 * has been called successfully in the next level up in the
2794 * configfs tree for device object's struct config_group.
2795 */
2796 se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, name);
2797 if (!(se_dev->se_dev_su_ptr)) {
2798 printk(KERN_ERR "Unable to locate subsystem dependent pointer"
2799 " from allocate_virtdevice()\n");
2800 goto out;
2801 }
2802 spin_lock(&se_global->g_device_lock);
2803 list_add_tail(&se_dev->g_se_dev_list, &se_global->g_se_dev_list);
2804 spin_unlock(&se_global->g_device_lock);
2805
2806 config_group_init_type_name(&se_dev->se_dev_group, name,
2807 &target_core_dev_cit);
2808 config_group_init_type_name(&se_dev->se_dev_attrib.da_group, "attrib",
2809 &target_core_dev_attrib_cit);
2810 config_group_init_type_name(&se_dev->se_dev_pr_group, "pr",
2811 &target_core_dev_pr_cit);
2812 config_group_init_type_name(&se_dev->t10_wwn.t10_wwn_group, "wwn",
2813 &target_core_dev_wwn_cit);
2814 config_group_init_type_name(&se_dev->t10_alua.alua_tg_pt_gps_group,
2815 "alua", &target_core_alua_tg_pt_gps_cit);
2816 config_group_init_type_name(&se_dev->dev_stat_grps.stat_group,
2817 "statistics", &target_core_stat_cit);
2818
2819 dev_cg->default_groups[0] = &se_dev->se_dev_attrib.da_group;
2820 dev_cg->default_groups[1] = &se_dev->se_dev_pr_group;
2821 dev_cg->default_groups[2] = &se_dev->t10_wwn.t10_wwn_group;
2822 dev_cg->default_groups[3] = &se_dev->t10_alua.alua_tg_pt_gps_group;
2823 dev_cg->default_groups[4] = &se_dev->dev_stat_grps.stat_group;
2824 dev_cg->default_groups[5] = NULL;
2825 /*
2826 * Add core/$HBA/$DEV/alua/default_tg_pt_gp
2827 */
2828 tg_pt_gp = core_alua_allocate_tg_pt_gp(se_dev, "default_tg_pt_gp", 1);
2829 if (!(tg_pt_gp))
2830 goto out;
2831
2832 tg_pt_gp_cg = &T10_ALUA(se_dev)->alua_tg_pt_gps_group;
2833 tg_pt_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
2834 GFP_KERNEL);
2835 if (!(tg_pt_gp_cg->default_groups)) {
2836 printk(KERN_ERR "Unable to allocate tg_pt_gp_cg->"
2837 "default_groups\n");
2838 goto out;
2839 }
2840
2841 config_group_init_type_name(&tg_pt_gp->tg_pt_gp_group,
2842 "default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit);
2843 tg_pt_gp_cg->default_groups[0] = &tg_pt_gp->tg_pt_gp_group;
2844 tg_pt_gp_cg->default_groups[1] = NULL;
2845 T10_ALUA(se_dev)->default_tg_pt_gp = tg_pt_gp;
2846 /*
2847 * Add core/$HBA/$DEV/statistics/ default groups
2848 */
2849 dev_stat_grp = &DEV_STAT_GRP(se_dev)->stat_group;
2850 dev_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 4,
2851 GFP_KERNEL);
2852 if (!dev_stat_grp->default_groups) {
2853 printk(KERN_ERR "Unable to allocate dev_stat_grp->default_groups\n");
2854 goto out;
2855 }
2856 target_stat_setup_dev_default_groups(se_dev);
2857
2858 printk(KERN_INFO "Target_Core_ConfigFS: Allocated struct se_subsystem_dev:"
2859 " %p se_dev_su_ptr: %p\n", se_dev, se_dev->se_dev_su_ptr);
2860
2861 mutex_unlock(&hba->hba_access_mutex);
2862 return &se_dev->se_dev_group;
2863out:
2864 if (T10_ALUA(se_dev)->default_tg_pt_gp) {
2865 core_alua_free_tg_pt_gp(T10_ALUA(se_dev)->default_tg_pt_gp);
2866 T10_ALUA(se_dev)->default_tg_pt_gp = NULL;
2867 }
2868 if (dev_stat_grp)
2869 kfree(dev_stat_grp->default_groups);
2870 if (tg_pt_gp_cg)
2871 kfree(tg_pt_gp_cg->default_groups);
2872 if (dev_cg)
2873 kfree(dev_cg->default_groups);
2874 if (se_dev->se_dev_su_ptr)
2875 t->free_device(se_dev->se_dev_su_ptr);
2876 kfree(se_dev);
2877unlock:
2878 mutex_unlock(&hba->hba_access_mutex);
2879 return ERR_PTR(errno);
2880}
2881
2882static void target_core_drop_subdev(
2883 struct config_group *group,
2884 struct config_item *item)
2885{
2886 struct se_subsystem_dev *se_dev = container_of(to_config_group(item),
2887 struct se_subsystem_dev, se_dev_group);
2888 struct se_hba *hba;
2889 struct se_subsystem_api *t;
2890 struct config_item *df_item;
2891 struct config_group *dev_cg, *tg_pt_gp_cg, *dev_stat_grp;
2892 int i;
2893
2894 hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item);
2895
2896 mutex_lock(&hba->hba_access_mutex);
2897 t = hba->transport;
2898
2899 spin_lock(&se_global->g_device_lock);
2900 list_del(&se_dev->g_se_dev_list);
2901 spin_unlock(&se_global->g_device_lock);
2902
2903 dev_stat_grp = &DEV_STAT_GRP(se_dev)->stat_group;
2904 for (i = 0; dev_stat_grp->default_groups[i]; i++) {
2905 df_item = &dev_stat_grp->default_groups[i]->cg_item;
2906 dev_stat_grp->default_groups[i] = NULL;
2907 config_item_put(df_item);
2908 }
2909 kfree(dev_stat_grp->default_groups);
2910
2911 tg_pt_gp_cg = &T10_ALUA(se_dev)->alua_tg_pt_gps_group;
2912 for (i = 0; tg_pt_gp_cg->default_groups[i]; i++) {
2913 df_item = &tg_pt_gp_cg->default_groups[i]->cg_item;
2914 tg_pt_gp_cg->default_groups[i] = NULL;
2915 config_item_put(df_item);
2916 }
2917 kfree(tg_pt_gp_cg->default_groups);
2918 /*
2919 * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp
2920 * directly from target_core_alua_tg_pt_gp_release().
2921 */
2922 T10_ALUA(se_dev)->default_tg_pt_gp = NULL;
2923
2924 dev_cg = &se_dev->se_dev_group;
2925 for (i = 0; dev_cg->default_groups[i]; i++) {
2926 df_item = &dev_cg->default_groups[i]->cg_item;
2927 dev_cg->default_groups[i] = NULL;
2928 config_item_put(df_item);
2929 }
2930 /*
2931 * The releasing of se_dev and associated se_dev->se_dev_ptr is done
2932 * from target_core_dev_item_ops->release() ->target_core_dev_release().
2933 */
2934 config_item_put(item);
2935 mutex_unlock(&hba->hba_access_mutex);
2936}
2937
2938static struct configfs_group_operations target_core_hba_group_ops = {
2939 .make_group = target_core_make_subdev,
2940 .drop_item = target_core_drop_subdev,
2941};
2942
2943CONFIGFS_EATTR_STRUCT(target_core_hba, se_hba);
2944#define SE_HBA_ATTR(_name, _mode) \
2945static struct target_core_hba_attribute \
2946 target_core_hba_##_name = \
2947 __CONFIGFS_EATTR(_name, _mode, \
2948 target_core_hba_show_attr_##_name, \
2949 target_core_hba_store_attr_##_name);
2950
2951#define SE_HBA_ATTR_RO(_name) \
2952static struct target_core_hba_attribute \
2953 target_core_hba_##_name = \
2954 __CONFIGFS_EATTR_RO(_name, \
2955 target_core_hba_show_attr_##_name);
2956
2957static ssize_t target_core_hba_show_attr_hba_info(
2958 struct se_hba *hba,
2959 char *page)
2960{
2961 return sprintf(page, "HBA Index: %d plugin: %s version: %s\n",
2962 hba->hba_id, hba->transport->name,
2963 TARGET_CORE_CONFIGFS_VERSION);
2964}
2965
2966SE_HBA_ATTR_RO(hba_info);
2967
2968static ssize_t target_core_hba_show_attr_hba_mode(struct se_hba *hba,
2969 char *page)
2970{
2971 int hba_mode = 0;
2972
2973 if (hba->hba_flags & HBA_FLAGS_PSCSI_MODE)
2974 hba_mode = 1;
2975
2976 return sprintf(page, "%d\n", hba_mode);
2977}
2978
2979static ssize_t target_core_hba_store_attr_hba_mode(struct se_hba *hba,
2980 const char *page, size_t count)
2981{
2982 struct se_subsystem_api *transport = hba->transport;
2983 unsigned long mode_flag;
2984 int ret;
2985
2986 if (transport->pmode_enable_hba == NULL)
2987 return -EINVAL;
2988
2989 ret = strict_strtoul(page, 0, &mode_flag);
2990 if (ret < 0) {
2991 printk(KERN_ERR "Unable to extract hba mode flag: %d\n", ret);
2992 return -EINVAL;
2993 }
2994
2995 spin_lock(&hba->device_lock);
2996 if (!(list_empty(&hba->hba_dev_list))) {
2997 printk(KERN_ERR "Unable to set hba_mode with active devices\n");
2998 spin_unlock(&hba->device_lock);
2999 return -EINVAL;
3000 }
3001 spin_unlock(&hba->device_lock);
3002
3003 ret = transport->pmode_enable_hba(hba, mode_flag);
3004 if (ret < 0)
3005 return -EINVAL;
3006 if (ret > 0)
3007 hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
3008 else if (ret == 0)
3009 hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
3010
3011 return count;
3012}
3013
3014SE_HBA_ATTR(hba_mode, S_IRUGO | S_IWUSR);
3015
3016CONFIGFS_EATTR_OPS(target_core_hba, se_hba, hba_group);
3017
3018static void target_core_hba_release(struct config_item *item)
3019{
3020 struct se_hba *hba = container_of(to_config_group(item),
3021 struct se_hba, hba_group);
3022 core_delete_hba(hba);
3023}
3024
3025static struct configfs_attribute *target_core_hba_attrs[] = {
3026 &target_core_hba_hba_info.attr,
3027 &target_core_hba_hba_mode.attr,
3028 NULL,
3029};
3030
3031static struct configfs_item_operations target_core_hba_item_ops = {
3032 .release = target_core_hba_release,
3033 .show_attribute = target_core_hba_attr_show,
3034 .store_attribute = target_core_hba_attr_store,
3035};
3036
3037static struct config_item_type target_core_hba_cit = {
3038 .ct_item_ops = &target_core_hba_item_ops,
3039 .ct_group_ops = &target_core_hba_group_ops,
3040 .ct_attrs = target_core_hba_attrs,
3041 .ct_owner = THIS_MODULE,
3042};
3043
3044static struct config_group *target_core_call_addhbatotarget(
3045 struct config_group *group,
3046 const char *name)
3047{
3048 char *se_plugin_str, *str, *str2;
3049 struct se_hba *hba;
3050 char buf[TARGET_CORE_NAME_MAX_LEN];
3051 unsigned long plugin_dep_id = 0;
3052 int ret;
3053
3054 memset(buf, 0, TARGET_CORE_NAME_MAX_LEN);
3055 if (strlen(name) >= TARGET_CORE_NAME_MAX_LEN) {
3056 printk(KERN_ERR "Passed *name strlen(): %d exceeds"
3057 " TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name),
3058 TARGET_CORE_NAME_MAX_LEN);
3059 return ERR_PTR(-ENAMETOOLONG);
3060 }
3061 snprintf(buf, TARGET_CORE_NAME_MAX_LEN, "%s", name);
3062
3063 str = strstr(buf, "_");
3064 if (!(str)) {
3065 printk(KERN_ERR "Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n");
3066 return ERR_PTR(-EINVAL);
3067 }
3068 se_plugin_str = buf;
3069 /*
3070 * Special case for subsystem plugins that have "_" in their names.
3071 * Namely rd_direct and rd_mcp..
3072 */
3073 str2 = strstr(str+1, "_");
3074 if ((str2)) {
3075 *str2 = '\0'; /* Terminate for *se_plugin_str */
3076 str2++; /* Skip to start of plugin dependent ID */
3077 str = str2;
3078 } else {
3079 *str = '\0'; /* Terminate for *se_plugin_str */
3080 str++; /* Skip to start of plugin dependent ID */
3081 }
3082
3083 ret = strict_strtoul(str, 0, &plugin_dep_id);
3084 if (ret < 0) {
3085 printk(KERN_ERR "strict_strtoul() returned %d for"
3086 " plugin_dep_id\n", ret);
3087 return ERR_PTR(-EINVAL);
3088 }
3089 /*
3090 * Load up TCM subsystem plugins if they have not already been loaded.
3091 */
3092 if (transport_subsystem_check_init() < 0)
3093 return ERR_PTR(-EINVAL);
3094
3095 hba = core_alloc_hba(se_plugin_str, plugin_dep_id, 0);
3096 if (IS_ERR(hba))
3097 return ERR_CAST(hba);
3098
3099 config_group_init_type_name(&hba->hba_group, name,
3100 &target_core_hba_cit);
3101
3102 return &hba->hba_group;
3103}
3104
3105static void target_core_call_delhbafromtarget(
3106 struct config_group *group,
3107 struct config_item *item)
3108{
3109 /*
3110 * core_delete_hba() is called from target_core_hba_item_ops->release()
3111 * -> target_core_hba_release()
3112 */
3113 config_item_put(item);
3114}
3115
3116static struct configfs_group_operations target_core_group_ops = {
3117 .make_group = target_core_call_addhbatotarget,
3118 .drop_item = target_core_call_delhbafromtarget,
3119};
3120
3121static struct config_item_type target_core_cit = {
3122 .ct_item_ops = NULL,
3123 .ct_group_ops = &target_core_group_ops,
3124 .ct_attrs = NULL,
3125 .ct_owner = THIS_MODULE,
3126};
3127
3128/* Stop functions for struct config_item_type target_core_hba_cit */
3129
3130static int __init target_core_init_configfs(void)
3131{
3132 struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL;
3133 struct config_group *lu_gp_cg = NULL;
3134 struct configfs_subsystem *subsys;
3135 struct t10_alua_lu_gp *lu_gp;
3136 int ret;
3137
3138 printk(KERN_INFO "TARGET_CORE[0]: Loading Generic Kernel Storage"
3139 " Engine: %s on %s/%s on "UTS_RELEASE"\n",
3140 TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine);
3141
3142 subsys = target_core_subsystem[0];
3143 config_group_init(&subsys->su_group);
3144 mutex_init(&subsys->su_mutex);
3145
3146 INIT_LIST_HEAD(&g_tf_list);
3147 mutex_init(&g_tf_lock);
3148 init_scsi_index_table();
3149 ret = init_se_global();
3150 if (ret < 0)
3151 return -1;
3152 /*
3153 * Create $CONFIGFS/target/core default group for HBA <-> Storage Object
3154 * and ALUA Logical Unit Group and Target Port Group infrastructure.
3155 */
3156 target_cg = &subsys->su_group;
3157 target_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
3158 GFP_KERNEL);
3159 if (!(target_cg->default_groups)) {
3160 printk(KERN_ERR "Unable to allocate target_cg->default_groups\n");
3161 goto out_global;
3162 }
3163
3164 config_group_init_type_name(&se_global->target_core_hbagroup,
3165 "core", &target_core_cit);
3166 target_cg->default_groups[0] = &se_global->target_core_hbagroup;
3167 target_cg->default_groups[1] = NULL;
3168 /*
3169 * Create ALUA infrastructure under /sys/kernel/config/target/core/alua/
3170 */
3171 hba_cg = &se_global->target_core_hbagroup;
3172 hba_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
3173 GFP_KERNEL);
3174 if (!(hba_cg->default_groups)) {
3175 printk(KERN_ERR "Unable to allocate hba_cg->default_groups\n");
3176 goto out_global;
3177 }
3178 config_group_init_type_name(&se_global->alua_group,
3179 "alua", &target_core_alua_cit);
3180 hba_cg->default_groups[0] = &se_global->alua_group;
3181 hba_cg->default_groups[1] = NULL;
3182 /*
3183 * Add ALUA Logical Unit Group and Target Port Group ConfigFS
3184 * groups under /sys/kernel/config/target/core/alua/
3185 */
3186 alua_cg = &se_global->alua_group;
3187 alua_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
3188 GFP_KERNEL);
3189 if (!(alua_cg->default_groups)) {
3190 printk(KERN_ERR "Unable to allocate alua_cg->default_groups\n");
3191 goto out_global;
3192 }
3193
3194 config_group_init_type_name(&se_global->alua_lu_gps_group,
3195 "lu_gps", &target_core_alua_lu_gps_cit);
3196 alua_cg->default_groups[0] = &se_global->alua_lu_gps_group;
3197 alua_cg->default_groups[1] = NULL;
3198 /*
3199 * Add core/alua/lu_gps/default_lu_gp
3200 */
3201 lu_gp = core_alua_allocate_lu_gp("default_lu_gp", 1);
3202 if (IS_ERR(lu_gp))
3203 goto out_global;
3204
3205 lu_gp_cg = &se_global->alua_lu_gps_group;
3206 lu_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
3207 GFP_KERNEL);
3208 if (!(lu_gp_cg->default_groups)) {
3209 printk(KERN_ERR "Unable to allocate lu_gp_cg->default_groups\n");
3210 goto out_global;
3211 }
3212
3213 config_group_init_type_name(&lu_gp->lu_gp_group, "default_lu_gp",
3214 &target_core_alua_lu_gp_cit);
3215 lu_gp_cg->default_groups[0] = &lu_gp->lu_gp_group;
3216 lu_gp_cg->default_groups[1] = NULL;
3217 se_global->default_lu_gp = lu_gp;
3218 /*
3219 * Register the target_core_mod subsystem with configfs.
3220 */
3221 ret = configfs_register_subsystem(subsys);
3222 if (ret < 0) {
3223 printk(KERN_ERR "Error %d while registering subsystem %s\n",
3224 ret, subsys->su_group.cg_item.ci_namebuf);
3225 goto out_global;
3226 }
3227 printk(KERN_INFO "TARGET_CORE[0]: Initialized ConfigFS Fabric"
3228 " Infrastructure: "TARGET_CORE_CONFIGFS_VERSION" on %s/%s"
3229 " on "UTS_RELEASE"\n", utsname()->sysname, utsname()->machine);
3230 /*
3231 * Register built-in RAMDISK subsystem logic for virtual LUN 0
3232 */
3233 ret = rd_module_init();
3234 if (ret < 0)
3235 goto out;
3236
3237 if (core_dev_setup_virtual_lun0() < 0)
3238 goto out;
3239
3240 return 0;
3241
3242out:
3243 configfs_unregister_subsystem(subsys);
3244 core_dev_release_virtual_lun0();
3245 rd_module_exit();
3246out_global:
3247 if (se_global->default_lu_gp) {
3248 core_alua_free_lu_gp(se_global->default_lu_gp);
3249 se_global->default_lu_gp = NULL;
3250 }
3251 if (lu_gp_cg)
3252 kfree(lu_gp_cg->default_groups);
3253 if (alua_cg)
3254 kfree(alua_cg->default_groups);
3255 if (hba_cg)
3256 kfree(hba_cg->default_groups);
3257 kfree(target_cg->default_groups);
3258 release_se_global();
3259 return -1;
3260}
3261
3262static void __exit target_core_exit_configfs(void)
3263{
3264 struct configfs_subsystem *subsys;
3265 struct config_group *hba_cg, *alua_cg, *lu_gp_cg;
3266 struct config_item *item;
3267 int i;
3268
3269 se_global->in_shutdown = 1;
3270 subsys = target_core_subsystem[0];
3271
3272 lu_gp_cg = &se_global->alua_lu_gps_group;
3273 for (i = 0; lu_gp_cg->default_groups[i]; i++) {
3274 item = &lu_gp_cg->default_groups[i]->cg_item;
3275 lu_gp_cg->default_groups[i] = NULL;
3276 config_item_put(item);
3277 }
3278 kfree(lu_gp_cg->default_groups);
3279 lu_gp_cg->default_groups = NULL;
3280
3281 alua_cg = &se_global->alua_group;
3282 for (i = 0; alua_cg->default_groups[i]; i++) {
3283 item = &alua_cg->default_groups[i]->cg_item;
3284 alua_cg->default_groups[i] = NULL;
3285 config_item_put(item);
3286 }
3287 kfree(alua_cg->default_groups);
3288 alua_cg->default_groups = NULL;
3289
3290 hba_cg = &se_global->target_core_hbagroup;
3291 for (i = 0; hba_cg->default_groups[i]; i++) {
3292 item = &hba_cg->default_groups[i]->cg_item;
3293 hba_cg->default_groups[i] = NULL;
3294 config_item_put(item);
3295 }
3296 kfree(hba_cg->default_groups);
3297 hba_cg->default_groups = NULL;
3298 /*
3299 * We expect subsys->su_group.default_groups to be released
3300 * by configfs subsystem provider logic..
3301 */
3302 configfs_unregister_subsystem(subsys);
3303 kfree(subsys->su_group.default_groups);
3304
3305 core_alua_free_lu_gp(se_global->default_lu_gp);
3306 se_global->default_lu_gp = NULL;
3307
3308 printk(KERN_INFO "TARGET_CORE[0]: Released ConfigFS Fabric"
3309 " Infrastructure\n");
3310
3311 core_dev_release_virtual_lun0();
3312 rd_module_exit();
3313 release_se_global();
3314
3315 return;
3316}
3317
3318MODULE_DESCRIPTION("Target_Core_Mod/ConfigFS");
3319MODULE_AUTHOR("nab@Linux-iSCSI.org");
3320MODULE_LICENSE("GPL");
3321
3322module_init(target_core_init_configfs);
3323module_exit(target_core_exit_configfs);
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
new file mode 100644
index 000000000000..ba698ea62bb2
--- /dev/null
+++ b/drivers/target/target_core_device.c
@@ -0,0 +1,1644 @@
1/*******************************************************************************
2 * Filename: target_core_device.c (based on iscsi_target_device.c)
3 *
4 * This file contains the iSCSI Virtual Device and Disk Transport
5 * agnostic related functions.
6 *
7 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
8 * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved.
9 * Copyright (c) 2007-2010 Rising Tide Systems
10 * Copyright (c) 2008-2010 Linux-iSCSI.org
11 *
12 * Nicholas A. Bellinger <nab@kernel.org>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27 *
28 ******************************************************************************/
29
30#include <linux/net.h>
31#include <linux/string.h>
32#include <linux/delay.h>
33#include <linux/timer.h>
34#include <linux/slab.h>
35#include <linux/spinlock.h>
36#include <linux/kthread.h>
37#include <linux/in.h>
38#include <net/sock.h>
39#include <net/tcp.h>
40#include <scsi/scsi.h>
41#include <scsi/scsi_device.h>
42
43#include <target/target_core_base.h>
44#include <target/target_core_device.h>
45#include <target/target_core_tpg.h>
46#include <target/target_core_transport.h>
47#include <target/target_core_fabric_ops.h>
48
49#include "target_core_alua.h"
50#include "target_core_hba.h"
51#include "target_core_pr.h"
52#include "target_core_ua.h"
53
54static void se_dev_start(struct se_device *dev);
55static void se_dev_stop(struct se_device *dev);
56
57int transport_get_lun_for_cmd(
58 struct se_cmd *se_cmd,
59 unsigned char *cdb,
60 u32 unpacked_lun)
61{
62 struct se_dev_entry *deve;
63 struct se_lun *se_lun = NULL;
64 struct se_session *se_sess = SE_SESS(se_cmd);
65 unsigned long flags;
66 int read_only = 0;
67
68 spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
69 deve = se_cmd->se_deve =
70 &SE_NODE_ACL(se_sess)->device_list[unpacked_lun];
71 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
72 if (se_cmd) {
73 deve->total_cmds++;
74 deve->total_bytes += se_cmd->data_length;
75
76 if (se_cmd->data_direction == DMA_TO_DEVICE) {
77 if (deve->lun_flags &
78 TRANSPORT_LUNFLAGS_READ_ONLY) {
79 read_only = 1;
80 goto out;
81 }
82 deve->write_bytes += se_cmd->data_length;
83 } else if (se_cmd->data_direction ==
84 DMA_FROM_DEVICE) {
85 deve->read_bytes += se_cmd->data_length;
86 }
87 }
88 deve->deve_cmds++;
89
90 se_lun = se_cmd->se_lun = deve->se_lun;
91 se_cmd->pr_res_key = deve->pr_res_key;
92 se_cmd->orig_fe_lun = unpacked_lun;
93 se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev;
94 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
95 }
96out:
97 spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
98
99 if (!se_lun) {
100 if (read_only) {
101 se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
102 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
103 printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
104 " Access for 0x%08x\n",
105 CMD_TFO(se_cmd)->get_fabric_name(),
106 unpacked_lun);
107 return -1;
108 } else {
109 /*
110 * Use the se_portal_group->tpg_virt_lun0 to allow for
111 * REPORT_LUNS, et al to be returned when no active
112 * MappedLUN=0 exists for this Initiator Port.
113 */
114 if (unpacked_lun != 0) {
115 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
116 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
117 printk("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
118 " Access for 0x%08x\n",
119 CMD_TFO(se_cmd)->get_fabric_name(),
120 unpacked_lun);
121 return -1;
122 }
123 /*
124 * Force WRITE PROTECT for virtual LUN 0
125 */
126 if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
127 (se_cmd->data_direction != DMA_NONE)) {
128 se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
129 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
130 return -1;
131 }
132#if 0
133 printk("TARGET_CORE[%s]: Using virtual LUN0! :-)\n",
134 CMD_TFO(se_cmd)->get_fabric_name());
135#endif
136 se_lun = se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
137 se_cmd->orig_fe_lun = 0;
138 se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev;
139 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
140 }
141 }
142 /*
143 * Determine if the struct se_lun is online.
144 */
145/* #warning FIXME: Check for LUN_RESET + UNIT Attention */
146 if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
147 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
148 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
149 return -1;
150 }
151
152 {
153 struct se_device *dev = se_lun->lun_se_dev;
154 spin_lock_irq(&dev->stats_lock);
155 dev->num_cmds++;
156 if (se_cmd->data_direction == DMA_TO_DEVICE)
157 dev->write_bytes += se_cmd->data_length;
158 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
159 dev->read_bytes += se_cmd->data_length;
160 spin_unlock_irq(&dev->stats_lock);
161 }
162
163 /*
164 * Add the iscsi_cmd_t to the struct se_lun's cmd list. This list is used
165 * for tracking state of struct se_cmds during LUN shutdown events.
166 */
167 spin_lock_irqsave(&se_lun->lun_cmd_lock, flags);
168 list_add_tail(&se_cmd->se_lun_list, &se_lun->lun_cmd_list);
169 atomic_set(&T_TASK(se_cmd)->transport_lun_active, 1);
170#if 0
171 printk(KERN_INFO "Adding ITT: 0x%08x to LUN LIST[%d]\n",
172 CMD_TFO(se_cmd)->get_task_tag(se_cmd), se_lun->unpacked_lun);
173#endif
174 spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags);
175
176 return 0;
177}
178EXPORT_SYMBOL(transport_get_lun_for_cmd);
179
180int transport_get_lun_for_tmr(
181 struct se_cmd *se_cmd,
182 u32 unpacked_lun)
183{
184 struct se_device *dev = NULL;
185 struct se_dev_entry *deve;
186 struct se_lun *se_lun = NULL;
187 struct se_session *se_sess = SE_SESS(se_cmd);
188 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
189
190 spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
191 deve = se_cmd->se_deve =
192 &SE_NODE_ACL(se_sess)->device_list[unpacked_lun];
193 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
194 se_lun = se_cmd->se_lun = se_tmr->tmr_lun = deve->se_lun;
195 dev = se_lun->lun_se_dev;
196 se_cmd->pr_res_key = deve->pr_res_key;
197 se_cmd->orig_fe_lun = unpacked_lun;
198 se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev;
199/* se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; */
200 }
201 spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
202
203 if (!se_lun) {
204 printk(KERN_INFO "TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
205 " Access for 0x%08x\n",
206 CMD_TFO(se_cmd)->get_fabric_name(),
207 unpacked_lun);
208 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
209 return -1;
210 }
211 /*
212 * Determine if the struct se_lun is online.
213 */
214/* #warning FIXME: Check for LUN_RESET + UNIT Attention */
215 if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
216 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
217 return -1;
218 }
219 se_tmr->tmr_dev = dev;
220
221 spin_lock(&dev->se_tmr_lock);
222 list_add_tail(&se_tmr->tmr_list, &dev->dev_tmr_list);
223 spin_unlock(&dev->se_tmr_lock);
224
225 return 0;
226}
227EXPORT_SYMBOL(transport_get_lun_for_tmr);
228
229/*
230 * This function is called from core_scsi3_emulate_pro_register_and_move()
231 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count
232 * when a matching rtpi is found.
233 */
234struct se_dev_entry *core_get_se_deve_from_rtpi(
235 struct se_node_acl *nacl,
236 u16 rtpi)
237{
238 struct se_dev_entry *deve;
239 struct se_lun *lun;
240 struct se_port *port;
241 struct se_portal_group *tpg = nacl->se_tpg;
242 u32 i;
243
244 spin_lock_irq(&nacl->device_list_lock);
245 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
246 deve = &nacl->device_list[i];
247
248 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
249 continue;
250
251 lun = deve->se_lun;
252 if (!(lun)) {
253 printk(KERN_ERR "%s device entries device pointer is"
254 " NULL, but Initiator has access.\n",
255 TPG_TFO(tpg)->get_fabric_name());
256 continue;
257 }
258 port = lun->lun_sep;
259 if (!(port)) {
260 printk(KERN_ERR "%s device entries device pointer is"
261 " NULL, but Initiator has access.\n",
262 TPG_TFO(tpg)->get_fabric_name());
263 continue;
264 }
265 if (port->sep_rtpi != rtpi)
266 continue;
267
268 atomic_inc(&deve->pr_ref_count);
269 smp_mb__after_atomic_inc();
270 spin_unlock_irq(&nacl->device_list_lock);
271
272 return deve;
273 }
274 spin_unlock_irq(&nacl->device_list_lock);
275
276 return NULL;
277}
278
279int core_free_device_list_for_node(
280 struct se_node_acl *nacl,
281 struct se_portal_group *tpg)
282{
283 struct se_dev_entry *deve;
284 struct se_lun *lun;
285 u32 i;
286
287 if (!nacl->device_list)
288 return 0;
289
290 spin_lock_irq(&nacl->device_list_lock);
291 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
292 deve = &nacl->device_list[i];
293
294 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
295 continue;
296
297 if (!deve->se_lun) {
298 printk(KERN_ERR "%s device entries device pointer is"
299 " NULL, but Initiator has access.\n",
300 TPG_TFO(tpg)->get_fabric_name());
301 continue;
302 }
303 lun = deve->se_lun;
304
305 spin_unlock_irq(&nacl->device_list_lock);
306 core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
307 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
308 spin_lock_irq(&nacl->device_list_lock);
309 }
310 spin_unlock_irq(&nacl->device_list_lock);
311
312 kfree(nacl->device_list);
313 nacl->device_list = NULL;
314
315 return 0;
316}
317
318void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd)
319{
320 struct se_dev_entry *deve;
321
322 spin_lock_irq(&se_nacl->device_list_lock);
323 deve = &se_nacl->device_list[se_cmd->orig_fe_lun];
324 deve->deve_cmds--;
325 spin_unlock_irq(&se_nacl->device_list_lock);
326
327 return;
328}
329
330void core_update_device_list_access(
331 u32 mapped_lun,
332 u32 lun_access,
333 struct se_node_acl *nacl)
334{
335 struct se_dev_entry *deve;
336
337 spin_lock_irq(&nacl->device_list_lock);
338 deve = &nacl->device_list[mapped_lun];
339 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
340 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
341 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
342 } else {
343 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
344 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
345 }
346 spin_unlock_irq(&nacl->device_list_lock);
347
348 return;
349}
350
351/* core_update_device_list_for_node():
352 *
353 *
354 */
355int core_update_device_list_for_node(
356 struct se_lun *lun,
357 struct se_lun_acl *lun_acl,
358 u32 mapped_lun,
359 u32 lun_access,
360 struct se_node_acl *nacl,
361 struct se_portal_group *tpg,
362 int enable)
363{
364 struct se_port *port = lun->lun_sep;
365 struct se_dev_entry *deve = &nacl->device_list[mapped_lun];
366 int trans = 0;
367 /*
368 * If the MappedLUN entry is being disabled, the entry in
369 * port->sep_alua_list must be removed now before clearing the
370 * struct se_dev_entry pointers below as logic in
371 * core_alua_do_transition_tg_pt() depends on these being present.
372 */
373 if (!(enable)) {
374 /*
375 * deve->se_lun_acl will be NULL for demo-mode created LUNs
376 * that have not been explicitly concerted to MappedLUNs ->
377 * struct se_lun_acl, but we remove deve->alua_port_list from
378 * port->sep_alua_list. This also means that active UAs and
379 * NodeACL context specific PR metadata for demo-mode
380 * MappedLUN *deve will be released below..
381 */
382 spin_lock_bh(&port->sep_alua_lock);
383 list_del(&deve->alua_port_list);
384 spin_unlock_bh(&port->sep_alua_lock);
385 }
386
387 spin_lock_irq(&nacl->device_list_lock);
388 if (enable) {
389 /*
390 * Check if the call is handling demo mode -> explict LUN ACL
391 * transition. This transition must be for the same struct se_lun
392 * + mapped_lun that was setup in demo mode..
393 */
394 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
395 if (deve->se_lun_acl != NULL) {
396 printk(KERN_ERR "struct se_dev_entry->se_lun_acl"
397 " already set for demo mode -> explict"
398 " LUN ACL transition\n");
399 spin_unlock_irq(&nacl->device_list_lock);
400 return -1;
401 }
402 if (deve->se_lun != lun) {
403 printk(KERN_ERR "struct se_dev_entry->se_lun does"
404 " match passed struct se_lun for demo mode"
405 " -> explict LUN ACL transition\n");
406 spin_unlock_irq(&nacl->device_list_lock);
407 return -1;
408 }
409 deve->se_lun_acl = lun_acl;
410 trans = 1;
411 } else {
412 deve->se_lun = lun;
413 deve->se_lun_acl = lun_acl;
414 deve->mapped_lun = mapped_lun;
415 deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS;
416 }
417
418 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
419 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
420 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
421 } else {
422 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
423 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
424 }
425
426 if (trans) {
427 spin_unlock_irq(&nacl->device_list_lock);
428 return 0;
429 }
430 deve->creation_time = get_jiffies_64();
431 deve->attach_count++;
432 spin_unlock_irq(&nacl->device_list_lock);
433
434 spin_lock_bh(&port->sep_alua_lock);
435 list_add_tail(&deve->alua_port_list, &port->sep_alua_list);
436 spin_unlock_bh(&port->sep_alua_lock);
437
438 return 0;
439 }
440 /*
441 * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
442 * PR operation to complete.
443 */
444 spin_unlock_irq(&nacl->device_list_lock);
445 while (atomic_read(&deve->pr_ref_count) != 0)
446 cpu_relax();
447 spin_lock_irq(&nacl->device_list_lock);
448 /*
449 * Disable struct se_dev_entry LUN ACL mapping
450 */
451 core_scsi3_ua_release_all(deve);
452 deve->se_lun = NULL;
453 deve->se_lun_acl = NULL;
454 deve->lun_flags = 0;
455 deve->creation_time = 0;
456 deve->attach_count--;
457 spin_unlock_irq(&nacl->device_list_lock);
458
459 core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl);
460 return 0;
461}
462
463/* core_clear_lun_from_tpg():
464 *
465 *
466 */
467void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
468{
469 struct se_node_acl *nacl;
470 struct se_dev_entry *deve;
471 u32 i;
472
473 spin_lock_bh(&tpg->acl_node_lock);
474 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
475 spin_unlock_bh(&tpg->acl_node_lock);
476
477 spin_lock_irq(&nacl->device_list_lock);
478 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
479 deve = &nacl->device_list[i];
480 if (lun != deve->se_lun)
481 continue;
482 spin_unlock_irq(&nacl->device_list_lock);
483
484 core_update_device_list_for_node(lun, NULL,
485 deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS,
486 nacl, tpg, 0);
487
488 spin_lock_irq(&nacl->device_list_lock);
489 }
490 spin_unlock_irq(&nacl->device_list_lock);
491
492 spin_lock_bh(&tpg->acl_node_lock);
493 }
494 spin_unlock_bh(&tpg->acl_node_lock);
495
496 return;
497}
498
499static struct se_port *core_alloc_port(struct se_device *dev)
500{
501 struct se_port *port, *port_tmp;
502
503 port = kzalloc(sizeof(struct se_port), GFP_KERNEL);
504 if (!(port)) {
505 printk(KERN_ERR "Unable to allocate struct se_port\n");
506 return NULL;
507 }
508 INIT_LIST_HEAD(&port->sep_alua_list);
509 INIT_LIST_HEAD(&port->sep_list);
510 atomic_set(&port->sep_tg_pt_secondary_offline, 0);
511 spin_lock_init(&port->sep_alua_lock);
512 mutex_init(&port->sep_tg_pt_md_mutex);
513
514 spin_lock(&dev->se_port_lock);
515 if (dev->dev_port_count == 0x0000ffff) {
516 printk(KERN_WARNING "Reached dev->dev_port_count =="
517 " 0x0000ffff\n");
518 spin_unlock(&dev->se_port_lock);
519 return NULL;
520 }
521again:
522 /*
523 * Allocate the next RELATIVE TARGET PORT IDENTIFER for this struct se_device
524 * Here is the table from spc4r17 section 7.7.3.8.
525 *
526 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
527 *
528 * Code Description
529 * 0h Reserved
530 * 1h Relative port 1, historically known as port A
531 * 2h Relative port 2, historically known as port B
532 * 3h to FFFFh Relative port 3 through 65 535
533 */
534 port->sep_rtpi = dev->dev_rpti_counter++;
535 if (!(port->sep_rtpi))
536 goto again;
537
538 list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) {
539 /*
540 * Make sure RELATIVE TARGET PORT IDENTIFER is unique
541 * for 16-bit wrap..
542 */
543 if (port->sep_rtpi == port_tmp->sep_rtpi)
544 goto again;
545 }
546 spin_unlock(&dev->se_port_lock);
547
548 return port;
549}
550
551static void core_export_port(
552 struct se_device *dev,
553 struct se_portal_group *tpg,
554 struct se_port *port,
555 struct se_lun *lun)
556{
557 struct se_subsystem_dev *su_dev = SU_DEV(dev);
558 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
559
560 spin_lock(&dev->se_port_lock);
561 spin_lock(&lun->lun_sep_lock);
562 port->sep_tpg = tpg;
563 port->sep_lun = lun;
564 lun->lun_sep = port;
565 spin_unlock(&lun->lun_sep_lock);
566
567 list_add_tail(&port->sep_list, &dev->dev_sep_list);
568 spin_unlock(&dev->se_port_lock);
569
570 if (T10_ALUA(su_dev)->alua_type == SPC3_ALUA_EMULATED) {
571 tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
572 if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
573 printk(KERN_ERR "Unable to allocate t10_alua_tg_pt"
574 "_gp_member_t\n");
575 return;
576 }
577 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
578 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
579 T10_ALUA(su_dev)->default_tg_pt_gp);
580 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
581 printk(KERN_INFO "%s/%s: Adding to default ALUA Target Port"
582 " Group: alua/default_tg_pt_gp\n",
583 TRANSPORT(dev)->name, TPG_TFO(tpg)->get_fabric_name());
584 }
585
586 dev->dev_port_count++;
587 port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFER */
588}
589
590/*
591 * Called with struct se_device->se_port_lock spinlock held.
592 */
593static void core_release_port(struct se_device *dev, struct se_port *port)
594 __releases(&dev->se_port_lock) __acquires(&dev->se_port_lock)
595{
596 /*
597 * Wait for any port reference for PR ALL_TG_PT=1 operation
598 * to complete in __core_scsi3_alloc_registration()
599 */
600 spin_unlock(&dev->se_port_lock);
601 if (atomic_read(&port->sep_tg_pt_ref_cnt))
602 cpu_relax();
603 spin_lock(&dev->se_port_lock);
604
605 core_alua_free_tg_pt_gp_mem(port);
606
607 list_del(&port->sep_list);
608 dev->dev_port_count--;
609 kfree(port);
610
611 return;
612}
613
614int core_dev_export(
615 struct se_device *dev,
616 struct se_portal_group *tpg,
617 struct se_lun *lun)
618{
619 struct se_port *port;
620
621 port = core_alloc_port(dev);
622 if (!(port))
623 return -1;
624
625 lun->lun_se_dev = dev;
626 se_dev_start(dev);
627
628 atomic_inc(&dev->dev_export_obj.obj_access_count);
629 core_export_port(dev, tpg, port, lun);
630 return 0;
631}
632
633void core_dev_unexport(
634 struct se_device *dev,
635 struct se_portal_group *tpg,
636 struct se_lun *lun)
637{
638 struct se_port *port = lun->lun_sep;
639
640 spin_lock(&lun->lun_sep_lock);
641 if (lun->lun_se_dev == NULL) {
642 spin_unlock(&lun->lun_sep_lock);
643 return;
644 }
645 spin_unlock(&lun->lun_sep_lock);
646
647 spin_lock(&dev->se_port_lock);
648 atomic_dec(&dev->dev_export_obj.obj_access_count);
649 core_release_port(dev, port);
650 spin_unlock(&dev->se_port_lock);
651
652 se_dev_stop(dev);
653 lun->lun_se_dev = NULL;
654}
655
656int transport_core_report_lun_response(struct se_cmd *se_cmd)
657{
658 struct se_dev_entry *deve;
659 struct se_lun *se_lun;
660 struct se_session *se_sess = SE_SESS(se_cmd);
661 struct se_task *se_task;
662 unsigned char *buf = (unsigned char *)T_TASK(se_cmd)->t_task_buf;
663 u32 cdb_offset = 0, lun_count = 0, offset = 8, i;
664
665 list_for_each_entry(se_task, &T_TASK(se_cmd)->t_task_list, t_list)
666 break;
667
668 if (!(se_task)) {
669 printk(KERN_ERR "Unable to locate struct se_task for struct se_cmd\n");
670 return PYX_TRANSPORT_LU_COMM_FAILURE;
671 }
672
673 /*
674 * If no struct se_session pointer is present, this struct se_cmd is
675 * coming via a target_core_mod PASSTHROUGH op, and not through
676 * a $FABRIC_MOD. In that case, report LUN=0 only.
677 */
678 if (!(se_sess)) {
679 int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
680 lun_count = 1;
681 goto done;
682 }
683
684 spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
685 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
686 deve = &SE_NODE_ACL(se_sess)->device_list[i];
687 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
688 continue;
689 se_lun = deve->se_lun;
690 /*
691 * We determine the correct LUN LIST LENGTH even once we
692 * have reached the initial allocation length.
693 * See SPC2-R20 7.19.
694 */
695 lun_count++;
696 if ((cdb_offset + 8) >= se_cmd->data_length)
697 continue;
698
699 int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
700 offset += 8;
701 cdb_offset += 8;
702 }
703 spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
704
705 /*
706 * See SPC3 r07, page 159.
707 */
708done:
709 lun_count *= 8;
710 buf[0] = ((lun_count >> 24) & 0xff);
711 buf[1] = ((lun_count >> 16) & 0xff);
712 buf[2] = ((lun_count >> 8) & 0xff);
713 buf[3] = (lun_count & 0xff);
714
715 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
716}
717
718/* se_release_device_for_hba():
719 *
720 *
721 */
722void se_release_device_for_hba(struct se_device *dev)
723{
724 struct se_hba *hba = dev->se_hba;
725
726 if ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
727 (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) ||
728 (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) ||
729 (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_ACTIVATED) ||
730 (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_DEACTIVATED))
731 se_dev_stop(dev);
732
733 if (dev->dev_ptr) {
734 kthread_stop(dev->process_thread);
735 if (dev->transport->free_device)
736 dev->transport->free_device(dev->dev_ptr);
737 }
738
739 spin_lock(&hba->device_lock);
740 list_del(&dev->dev_list);
741 hba->dev_count--;
742 spin_unlock(&hba->device_lock);
743
744 core_scsi3_free_all_registrations(dev);
745 se_release_vpd_for_dev(dev);
746
747 kfree(dev->dev_status_queue_obj);
748 kfree(dev->dev_queue_obj);
749 kfree(dev);
750
751 return;
752}
753
754void se_release_vpd_for_dev(struct se_device *dev)
755{
756 struct t10_vpd *vpd, *vpd_tmp;
757
758 spin_lock(&DEV_T10_WWN(dev)->t10_vpd_lock);
759 list_for_each_entry_safe(vpd, vpd_tmp,
760 &DEV_T10_WWN(dev)->t10_vpd_list, vpd_list) {
761 list_del(&vpd->vpd_list);
762 kfree(vpd);
763 }
764 spin_unlock(&DEV_T10_WWN(dev)->t10_vpd_lock);
765
766 return;
767}
768
769/* se_free_virtual_device():
770 *
771 * Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers.
772 */
773int se_free_virtual_device(struct se_device *dev, struct se_hba *hba)
774{
775 if (!list_empty(&dev->dev_sep_list))
776 dump_stack();
777
778 core_alua_free_lu_gp_mem(dev);
779 se_release_device_for_hba(dev);
780
781 return 0;
782}
783
784static void se_dev_start(struct se_device *dev)
785{
786 struct se_hba *hba = dev->se_hba;
787
788 spin_lock(&hba->device_lock);
789 atomic_inc(&dev->dev_obj.obj_access_count);
790 if (atomic_read(&dev->dev_obj.obj_access_count) == 1) {
791 if (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) {
792 dev->dev_status &= ~TRANSPORT_DEVICE_DEACTIVATED;
793 dev->dev_status |= TRANSPORT_DEVICE_ACTIVATED;
794 } else if (dev->dev_status &
795 TRANSPORT_DEVICE_OFFLINE_DEACTIVATED) {
796 dev->dev_status &=
797 ~TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
798 dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
799 }
800 }
801 spin_unlock(&hba->device_lock);
802}
803
804static void se_dev_stop(struct se_device *dev)
805{
806 struct se_hba *hba = dev->se_hba;
807
808 spin_lock(&hba->device_lock);
809 atomic_dec(&dev->dev_obj.obj_access_count);
810 if (atomic_read(&dev->dev_obj.obj_access_count) == 0) {
811 if (dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) {
812 dev->dev_status &= ~TRANSPORT_DEVICE_ACTIVATED;
813 dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
814 } else if (dev->dev_status &
815 TRANSPORT_DEVICE_OFFLINE_ACTIVATED) {
816 dev->dev_status &= ~TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
817 dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
818 }
819 }
820 spin_unlock(&hba->device_lock);
821}
822
823int se_dev_check_online(struct se_device *dev)
824{
825 int ret;
826
827 spin_lock_irq(&dev->dev_status_lock);
828 ret = ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
829 (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED)) ? 0 : 1;
830 spin_unlock_irq(&dev->dev_status_lock);
831
832 return ret;
833}
834
835int se_dev_check_shutdown(struct se_device *dev)
836{
837 int ret;
838
839 spin_lock_irq(&dev->dev_status_lock);
840 ret = (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN);
841 spin_unlock_irq(&dev->dev_status_lock);
842
843 return ret;
844}
845
846void se_dev_set_default_attribs(
847 struct se_device *dev,
848 struct se_dev_limits *dev_limits)
849{
850 struct queue_limits *limits = &dev_limits->limits;
851
852 DEV_ATTRIB(dev)->emulate_dpo = DA_EMULATE_DPO;
853 DEV_ATTRIB(dev)->emulate_fua_write = DA_EMULATE_FUA_WRITE;
854 DEV_ATTRIB(dev)->emulate_fua_read = DA_EMULATE_FUA_READ;
855 DEV_ATTRIB(dev)->emulate_write_cache = DA_EMULATE_WRITE_CACHE;
856 DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
857 DEV_ATTRIB(dev)->emulate_tas = DA_EMULATE_TAS;
858 DEV_ATTRIB(dev)->emulate_tpu = DA_EMULATE_TPU;
859 DEV_ATTRIB(dev)->emulate_tpws = DA_EMULATE_TPWS;
860 DEV_ATTRIB(dev)->emulate_reservations = DA_EMULATE_RESERVATIONS;
861 DEV_ATTRIB(dev)->emulate_alua = DA_EMULATE_ALUA;
862 DEV_ATTRIB(dev)->enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
863 /*
864 * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK
865 * iblock_create_virtdevice() from struct queue_limits values
866 * if blk_queue_discard()==1
867 */
868 DEV_ATTRIB(dev)->max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
869 DEV_ATTRIB(dev)->max_unmap_block_desc_count =
870 DA_MAX_UNMAP_BLOCK_DESC_COUNT;
871 DEV_ATTRIB(dev)->unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
872 DEV_ATTRIB(dev)->unmap_granularity_alignment =
873 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
874 /*
875 * block_size is based on subsystem plugin dependent requirements.
876 */
877 DEV_ATTRIB(dev)->hw_block_size = limits->logical_block_size;
878 DEV_ATTRIB(dev)->block_size = limits->logical_block_size;
879 /*
880 * max_sectors is based on subsystem plugin dependent requirements.
881 */
882 DEV_ATTRIB(dev)->hw_max_sectors = limits->max_hw_sectors;
883 DEV_ATTRIB(dev)->max_sectors = limits->max_sectors;
884 /*
885 * Set optimal_sectors from max_sectors, which can be lowered via
886 * configfs.
887 */
888 DEV_ATTRIB(dev)->optimal_sectors = limits->max_sectors;
889 /*
890 * queue_depth is based on subsystem plugin dependent requirements.
891 */
892 DEV_ATTRIB(dev)->hw_queue_depth = dev_limits->hw_queue_depth;
893 DEV_ATTRIB(dev)->queue_depth = dev_limits->queue_depth;
894}
895
896int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout)
897{
898 if (task_timeout > DA_TASK_TIMEOUT_MAX) {
899 printk(KERN_ERR "dev[%p]: Passed task_timeout: %u larger then"
900 " DA_TASK_TIMEOUT_MAX\n", dev, task_timeout);
901 return -1;
902 } else {
903 DEV_ATTRIB(dev)->task_timeout = task_timeout;
904 printk(KERN_INFO "dev[%p]: Set SE Device task_timeout: %u\n",
905 dev, task_timeout);
906 }
907
908 return 0;
909}
910
911int se_dev_set_max_unmap_lba_count(
912 struct se_device *dev,
913 u32 max_unmap_lba_count)
914{
915 DEV_ATTRIB(dev)->max_unmap_lba_count = max_unmap_lba_count;
916 printk(KERN_INFO "dev[%p]: Set max_unmap_lba_count: %u\n",
917 dev, DEV_ATTRIB(dev)->max_unmap_lba_count);
918 return 0;
919}
920
921int se_dev_set_max_unmap_block_desc_count(
922 struct se_device *dev,
923 u32 max_unmap_block_desc_count)
924{
925 DEV_ATTRIB(dev)->max_unmap_block_desc_count = max_unmap_block_desc_count;
926 printk(KERN_INFO "dev[%p]: Set max_unmap_block_desc_count: %u\n",
927 dev, DEV_ATTRIB(dev)->max_unmap_block_desc_count);
928 return 0;
929}
930
931int se_dev_set_unmap_granularity(
932 struct se_device *dev,
933 u32 unmap_granularity)
934{
935 DEV_ATTRIB(dev)->unmap_granularity = unmap_granularity;
936 printk(KERN_INFO "dev[%p]: Set unmap_granularity: %u\n",
937 dev, DEV_ATTRIB(dev)->unmap_granularity);
938 return 0;
939}
940
941int se_dev_set_unmap_granularity_alignment(
942 struct se_device *dev,
943 u32 unmap_granularity_alignment)
944{
945 DEV_ATTRIB(dev)->unmap_granularity_alignment = unmap_granularity_alignment;
946 printk(KERN_INFO "dev[%p]: Set unmap_granularity_alignment: %u\n",
947 dev, DEV_ATTRIB(dev)->unmap_granularity_alignment);
948 return 0;
949}
950
951int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
952{
953 if ((flag != 0) && (flag != 1)) {
954 printk(KERN_ERR "Illegal value %d\n", flag);
955 return -1;
956 }
957 if (TRANSPORT(dev)->dpo_emulated == NULL) {
958 printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated is NULL\n");
959 return -1;
960 }
961 if (TRANSPORT(dev)->dpo_emulated(dev) == 0) {
962 printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated not supported\n");
963 return -1;
964 }
965 DEV_ATTRIB(dev)->emulate_dpo = flag;
966 printk(KERN_INFO "dev[%p]: SE Device Page Out (DPO) Emulation"
967 " bit: %d\n", dev, DEV_ATTRIB(dev)->emulate_dpo);
968 return 0;
969}
970
971int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
972{
973 if ((flag != 0) && (flag != 1)) {
974 printk(KERN_ERR "Illegal value %d\n", flag);
975 return -1;
976 }
977 if (TRANSPORT(dev)->fua_write_emulated == NULL) {
978 printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated is NULL\n");
979 return -1;
980 }
981 if (TRANSPORT(dev)->fua_write_emulated(dev) == 0) {
982 printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated not supported\n");
983 return -1;
984 }
985 DEV_ATTRIB(dev)->emulate_fua_write = flag;
986 printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
987 dev, DEV_ATTRIB(dev)->emulate_fua_write);
988 return 0;
989}
990
991int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
992{
993 if ((flag != 0) && (flag != 1)) {
994 printk(KERN_ERR "Illegal value %d\n", flag);
995 return -1;
996 }
997 if (TRANSPORT(dev)->fua_read_emulated == NULL) {
998 printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated is NULL\n");
999 return -1;
1000 }
1001 if (TRANSPORT(dev)->fua_read_emulated(dev) == 0) {
1002 printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated not supported\n");
1003 return -1;
1004 }
1005 DEV_ATTRIB(dev)->emulate_fua_read = flag;
1006 printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access READs: %d\n",
1007 dev, DEV_ATTRIB(dev)->emulate_fua_read);
1008 return 0;
1009}
1010
1011int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
1012{
1013 if ((flag != 0) && (flag != 1)) {
1014 printk(KERN_ERR "Illegal value %d\n", flag);
1015 return -1;
1016 }
1017 if (TRANSPORT(dev)->write_cache_emulated == NULL) {
1018 printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated is NULL\n");
1019 return -1;
1020 }
1021 if (TRANSPORT(dev)->write_cache_emulated(dev) == 0) {
1022 printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated not supported\n");
1023 return -1;
1024 }
1025 DEV_ATTRIB(dev)->emulate_write_cache = flag;
1026 printk(KERN_INFO "dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
1027 dev, DEV_ATTRIB(dev)->emulate_write_cache);
1028 return 0;
1029}
1030
1031int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
1032{
1033 if ((flag != 0) && (flag != 1) && (flag != 2)) {
1034 printk(KERN_ERR "Illegal value %d\n", flag);
1035 return -1;
1036 }
1037
1038 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1039 printk(KERN_ERR "dev[%p]: Unable to change SE Device"
1040 " UA_INTRLCK_CTRL while dev_export_obj: %d count"
1041 " exists\n", dev,
1042 atomic_read(&dev->dev_export_obj.obj_access_count));
1043 return -1;
1044 }
1045 DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = flag;
1046 printk(KERN_INFO "dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
1047 dev, DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl);
1048
1049 return 0;
1050}
1051
1052int se_dev_set_emulate_tas(struct se_device *dev, int flag)
1053{
1054 if ((flag != 0) && (flag != 1)) {
1055 printk(KERN_ERR "Illegal value %d\n", flag);
1056 return -1;
1057 }
1058
1059 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1060 printk(KERN_ERR "dev[%p]: Unable to change SE Device TAS while"
1061 " dev_export_obj: %d count exists\n", dev,
1062 atomic_read(&dev->dev_export_obj.obj_access_count));
1063 return -1;
1064 }
1065 DEV_ATTRIB(dev)->emulate_tas = flag;
1066 printk(KERN_INFO "dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
1067 dev, (DEV_ATTRIB(dev)->emulate_tas) ? "Enabled" : "Disabled");
1068
1069 return 0;
1070}
1071
1072int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
1073{
1074 if ((flag != 0) && (flag != 1)) {
1075 printk(KERN_ERR "Illegal value %d\n", flag);
1076 return -1;
1077 }
1078 /*
1079 * We expect this value to be non-zero when generic Block Layer
1080 * Discard supported is detected iblock_create_virtdevice().
1081 */
1082 if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) {
1083 printk(KERN_ERR "Generic Block Discard not supported\n");
1084 return -ENOSYS;
1085 }
1086
1087 DEV_ATTRIB(dev)->emulate_tpu = flag;
1088 printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
1089 dev, flag);
1090 return 0;
1091}
1092
1093int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
1094{
1095 if ((flag != 0) && (flag != 1)) {
1096 printk(KERN_ERR "Illegal value %d\n", flag);
1097 return -1;
1098 }
1099 /*
1100 * We expect this value to be non-zero when generic Block Layer
1101 * Discard supported is detected iblock_create_virtdevice().
1102 */
1103 if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) {
1104 printk(KERN_ERR "Generic Block Discard not supported\n");
1105 return -ENOSYS;
1106 }
1107
1108 DEV_ATTRIB(dev)->emulate_tpws = flag;
1109 printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
1110 dev, flag);
1111 return 0;
1112}
1113
1114int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
1115{
1116 if ((flag != 0) && (flag != 1)) {
1117 printk(KERN_ERR "Illegal value %d\n", flag);
1118 return -1;
1119 }
1120 DEV_ATTRIB(dev)->enforce_pr_isids = flag;
1121 printk(KERN_INFO "dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
1122 (DEV_ATTRIB(dev)->enforce_pr_isids) ? "Enabled" : "Disabled");
1123 return 0;
1124}
1125
1126/*
1127 * Note, this can only be called on unexported SE Device Object.
1128 */
1129int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
1130{
1131 u32 orig_queue_depth = dev->queue_depth;
1132
1133 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1134 printk(KERN_ERR "dev[%p]: Unable to change SE Device TCQ while"
1135 " dev_export_obj: %d count exists\n", dev,
1136 atomic_read(&dev->dev_export_obj.obj_access_count));
1137 return -1;
1138 }
1139 if (!(queue_depth)) {
1140 printk(KERN_ERR "dev[%p]: Illegal ZERO value for queue"
1141 "_depth\n", dev);
1142 return -1;
1143 }
1144
1145 if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1146 if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) {
1147 printk(KERN_ERR "dev[%p]: Passed queue_depth: %u"
1148 " exceeds TCM/SE_Device TCQ: %u\n",
1149 dev, queue_depth,
1150 DEV_ATTRIB(dev)->hw_queue_depth);
1151 return -1;
1152 }
1153 } else {
1154 if (queue_depth > DEV_ATTRIB(dev)->queue_depth) {
1155 if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) {
1156 printk(KERN_ERR "dev[%p]: Passed queue_depth:"
1157 " %u exceeds TCM/SE_Device MAX"
1158 " TCQ: %u\n", dev, queue_depth,
1159 DEV_ATTRIB(dev)->hw_queue_depth);
1160 return -1;
1161 }
1162 }
1163 }
1164
1165 DEV_ATTRIB(dev)->queue_depth = dev->queue_depth = queue_depth;
1166 if (queue_depth > orig_queue_depth)
1167 atomic_add(queue_depth - orig_queue_depth, &dev->depth_left);
1168 else if (queue_depth < orig_queue_depth)
1169 atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left);
1170
1171 printk(KERN_INFO "dev[%p]: SE Device TCQ Depth changed to: %u\n",
1172 dev, queue_depth);
1173 return 0;
1174}
1175
1176int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
1177{
1178 int force = 0; /* Force setting for VDEVS */
1179
1180 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1181 printk(KERN_ERR "dev[%p]: Unable to change SE Device"
1182 " max_sectors while dev_export_obj: %d count exists\n",
1183 dev, atomic_read(&dev->dev_export_obj.obj_access_count));
1184 return -1;
1185 }
1186 if (!(max_sectors)) {
1187 printk(KERN_ERR "dev[%p]: Illegal ZERO value for"
1188 " max_sectors\n", dev);
1189 return -1;
1190 }
1191 if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
1192 printk(KERN_ERR "dev[%p]: Passed max_sectors: %u less than"
1193 " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors,
1194 DA_STATUS_MAX_SECTORS_MIN);
1195 return -1;
1196 }
1197 if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1198 if (max_sectors > DEV_ATTRIB(dev)->hw_max_sectors) {
1199 printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
1200 " greater than TCM/SE_Device max_sectors:"
1201 " %u\n", dev, max_sectors,
1202 DEV_ATTRIB(dev)->hw_max_sectors);
1203 return -1;
1204 }
1205 } else {
1206 if (!(force) && (max_sectors >
1207 DEV_ATTRIB(dev)->hw_max_sectors)) {
1208 printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
1209 " greater than TCM/SE_Device max_sectors"
1210 ": %u, use force=1 to override.\n", dev,
1211 max_sectors, DEV_ATTRIB(dev)->hw_max_sectors);
1212 return -1;
1213 }
1214 if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
1215 printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
1216 " greater than DA_STATUS_MAX_SECTORS_MAX:"
1217 " %u\n", dev, max_sectors,
1218 DA_STATUS_MAX_SECTORS_MAX);
1219 return -1;
1220 }
1221 }
1222
1223 DEV_ATTRIB(dev)->max_sectors = max_sectors;
1224 printk("dev[%p]: SE Device max_sectors changed to %u\n",
1225 dev, max_sectors);
1226 return 0;
1227}
1228
1229int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
1230{
1231 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1232 printk(KERN_ERR "dev[%p]: Unable to change SE Device"
1233 " optimal_sectors while dev_export_obj: %d count exists\n",
1234 dev, atomic_read(&dev->dev_export_obj.obj_access_count));
1235 return -EINVAL;
1236 }
1237 if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1238 printk(KERN_ERR "dev[%p]: Passed optimal_sectors cannot be"
1239 " changed for TCM/pSCSI\n", dev);
1240 return -EINVAL;
1241 }
1242 if (optimal_sectors > DEV_ATTRIB(dev)->max_sectors) {
1243 printk(KERN_ERR "dev[%p]: Passed optimal_sectors %u cannot be"
1244 " greater than max_sectors: %u\n", dev,
1245 optimal_sectors, DEV_ATTRIB(dev)->max_sectors);
1246 return -EINVAL;
1247 }
1248
1249 DEV_ATTRIB(dev)->optimal_sectors = optimal_sectors;
1250 printk(KERN_INFO "dev[%p]: SE Device optimal_sectors changed to %u\n",
1251 dev, optimal_sectors);
1252 return 0;
1253}
1254
1255int se_dev_set_block_size(struct se_device *dev, u32 block_size)
1256{
1257 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1258 printk(KERN_ERR "dev[%p]: Unable to change SE Device block_size"
1259 " while dev_export_obj: %d count exists\n", dev,
1260 atomic_read(&dev->dev_export_obj.obj_access_count));
1261 return -1;
1262 }
1263
1264 if ((block_size != 512) &&
1265 (block_size != 1024) &&
1266 (block_size != 2048) &&
1267 (block_size != 4096)) {
1268 printk(KERN_ERR "dev[%p]: Illegal value for block_device: %u"
1269 " for SE device, must be 512, 1024, 2048 or 4096\n",
1270 dev, block_size);
1271 return -1;
1272 }
1273
1274 if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1275 printk(KERN_ERR "dev[%p]: Not allowed to change block_size for"
1276 " Physical Device, use for Linux/SCSI to change"
1277 " block_size for underlying hardware\n", dev);
1278 return -1;
1279 }
1280
1281 DEV_ATTRIB(dev)->block_size = block_size;
1282 printk(KERN_INFO "dev[%p]: SE Device block_size changed to %u\n",
1283 dev, block_size);
1284 return 0;
1285}
1286
1287struct se_lun *core_dev_add_lun(
1288 struct se_portal_group *tpg,
1289 struct se_hba *hba,
1290 struct se_device *dev,
1291 u32 lun)
1292{
1293 struct se_lun *lun_p;
1294 u32 lun_access = 0;
1295
1296 if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) {
1297 printk(KERN_ERR "Unable to export struct se_device while dev_access_obj: %d\n",
1298 atomic_read(&dev->dev_access_obj.obj_access_count));
1299 return NULL;
1300 }
1301
1302 lun_p = core_tpg_pre_addlun(tpg, lun);
1303 if ((IS_ERR(lun_p)) || !(lun_p))
1304 return NULL;
1305
1306 if (dev->dev_flags & DF_READ_ONLY)
1307 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
1308 else
1309 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
1310
1311 if (core_tpg_post_addlun(tpg, lun_p, lun_access, dev) < 0)
1312 return NULL;
1313
1314 printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
1315 " CORE HBA: %u\n", TPG_TFO(tpg)->get_fabric_name(),
1316 TPG_TFO(tpg)->tpg_get_tag(tpg), lun_p->unpacked_lun,
1317 TPG_TFO(tpg)->get_fabric_name(), hba->hba_id);
1318 /*
1319 * Update LUN maps for dynamically added initiators when
1320 * generate_node_acl is enabled.
1321 */
1322 if (TPG_TFO(tpg)->tpg_check_demo_mode(tpg)) {
1323 struct se_node_acl *acl;
1324 spin_lock_bh(&tpg->acl_node_lock);
1325 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
1326 if (acl->dynamic_node_acl) {
1327 spin_unlock_bh(&tpg->acl_node_lock);
1328 core_tpg_add_node_to_devs(acl, tpg);
1329 spin_lock_bh(&tpg->acl_node_lock);
1330 }
1331 }
1332 spin_unlock_bh(&tpg->acl_node_lock);
1333 }
1334
1335 return lun_p;
1336}
1337
1338/* core_dev_del_lun():
1339 *
1340 *
1341 */
1342int core_dev_del_lun(
1343 struct se_portal_group *tpg,
1344 u32 unpacked_lun)
1345{
1346 struct se_lun *lun;
1347 int ret = 0;
1348
1349 lun = core_tpg_pre_dellun(tpg, unpacked_lun, &ret);
1350 if (!(lun))
1351 return ret;
1352
1353 core_tpg_post_dellun(tpg, lun);
1354
1355 printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
1356 " device object\n", TPG_TFO(tpg)->get_fabric_name(),
1357 TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun,
1358 TPG_TFO(tpg)->get_fabric_name());
1359
1360 return 0;
1361}
1362
1363struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun)
1364{
1365 struct se_lun *lun;
1366
1367 spin_lock(&tpg->tpg_lun_lock);
1368 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
1369 printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
1370 "_PER_TPG-1: %u for Target Portal Group: %hu\n",
1371 TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
1372 TRANSPORT_MAX_LUNS_PER_TPG-1,
1373 TPG_TFO(tpg)->tpg_get_tag(tpg));
1374 spin_unlock(&tpg->tpg_lun_lock);
1375 return NULL;
1376 }
1377 lun = &tpg->tpg_lun_list[unpacked_lun];
1378
1379 if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) {
1380 printk(KERN_ERR "%s Logical Unit Number: %u is not free on"
1381 " Target Portal Group: %hu, ignoring request.\n",
1382 TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
1383 TPG_TFO(tpg)->tpg_get_tag(tpg));
1384 spin_unlock(&tpg->tpg_lun_lock);
1385 return NULL;
1386 }
1387 spin_unlock(&tpg->tpg_lun_lock);
1388
1389 return lun;
1390}
1391
1392/* core_dev_get_lun():
1393 *
1394 *
1395 */
1396static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun)
1397{
1398 struct se_lun *lun;
1399
1400 spin_lock(&tpg->tpg_lun_lock);
1401 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
1402 printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
1403 "_TPG-1: %u for Target Portal Group: %hu\n",
1404 TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
1405 TRANSPORT_MAX_LUNS_PER_TPG-1,
1406 TPG_TFO(tpg)->tpg_get_tag(tpg));
1407 spin_unlock(&tpg->tpg_lun_lock);
1408 return NULL;
1409 }
1410 lun = &tpg->tpg_lun_list[unpacked_lun];
1411
1412 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
1413 printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
1414 " Target Portal Group: %hu, ignoring request.\n",
1415 TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
1416 TPG_TFO(tpg)->tpg_get_tag(tpg));
1417 spin_unlock(&tpg->tpg_lun_lock);
1418 return NULL;
1419 }
1420 spin_unlock(&tpg->tpg_lun_lock);
1421
1422 return lun;
1423}
1424
1425struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
1426 struct se_portal_group *tpg,
1427 u32 mapped_lun,
1428 char *initiatorname,
1429 int *ret)
1430{
1431 struct se_lun_acl *lacl;
1432 struct se_node_acl *nacl;
1433
1434 if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) {
1435 printk(KERN_ERR "%s InitiatorName exceeds maximum size.\n",
1436 TPG_TFO(tpg)->get_fabric_name());
1437 *ret = -EOVERFLOW;
1438 return NULL;
1439 }
1440 nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
1441 if (!(nacl)) {
1442 *ret = -EINVAL;
1443 return NULL;
1444 }
1445 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
1446 if (!(lacl)) {
1447 printk(KERN_ERR "Unable to allocate memory for struct se_lun_acl.\n");
1448 *ret = -ENOMEM;
1449 return NULL;
1450 }
1451
1452 INIT_LIST_HEAD(&lacl->lacl_list);
1453 lacl->mapped_lun = mapped_lun;
1454 lacl->se_lun_nacl = nacl;
1455 snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
1456
1457 return lacl;
1458}
1459
1460int core_dev_add_initiator_node_lun_acl(
1461 struct se_portal_group *tpg,
1462 struct se_lun_acl *lacl,
1463 u32 unpacked_lun,
1464 u32 lun_access)
1465{
1466 struct se_lun *lun;
1467 struct se_node_acl *nacl;
1468
1469 lun = core_dev_get_lun(tpg, unpacked_lun);
1470 if (!(lun)) {
1471 printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
1472 " Target Portal Group: %hu, ignoring request.\n",
1473 TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
1474 TPG_TFO(tpg)->tpg_get_tag(tpg));
1475 return -EINVAL;
1476 }
1477
1478 nacl = lacl->se_lun_nacl;
1479 if (!(nacl))
1480 return -EINVAL;
1481
1482 if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
1483 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE))
1484 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
1485
1486 lacl->se_lun = lun;
1487
1488 if (core_update_device_list_for_node(lun, lacl, lacl->mapped_lun,
1489 lun_access, nacl, tpg, 1) < 0)
1490 return -EINVAL;
1491
1492 spin_lock(&lun->lun_acl_lock);
1493 list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
1494 atomic_inc(&lun->lun_acl_count);
1495 smp_mb__after_atomic_inc();
1496 spin_unlock(&lun->lun_acl_lock);
1497
1498 printk(KERN_INFO "%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
1499 " InitiatorNode: %s\n", TPG_TFO(tpg)->get_fabric_name(),
1500 TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
1501 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
1502 lacl->initiatorname);
1503 /*
1504 * Check to see if there are any existing persistent reservation APTPL
1505 * pre-registrations that need to be enabled for this LUN ACL..
1506 */
1507 core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl);
1508 return 0;
1509}
1510
1511/* core_dev_del_initiator_node_lun_acl():
1512 *
1513 *
1514 */
1515int core_dev_del_initiator_node_lun_acl(
1516 struct se_portal_group *tpg,
1517 struct se_lun *lun,
1518 struct se_lun_acl *lacl)
1519{
1520 struct se_node_acl *nacl;
1521
1522 nacl = lacl->se_lun_nacl;
1523 if (!(nacl))
1524 return -EINVAL;
1525
1526 spin_lock(&lun->lun_acl_lock);
1527 list_del(&lacl->lacl_list);
1528 atomic_dec(&lun->lun_acl_count);
1529 smp_mb__after_atomic_dec();
1530 spin_unlock(&lun->lun_acl_lock);
1531
1532 core_update_device_list_for_node(lun, NULL, lacl->mapped_lun,
1533 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
1534
1535 lacl->se_lun = NULL;
1536
1537 printk(KERN_INFO "%s_TPG[%hu]_LUN[%u] - Removed ACL for"
1538 " InitiatorNode: %s Mapped LUN: %u\n",
1539 TPG_TFO(tpg)->get_fabric_name(),
1540 TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun,
1541 lacl->initiatorname, lacl->mapped_lun);
1542
1543 return 0;
1544}
1545
1546void core_dev_free_initiator_node_lun_acl(
1547 struct se_portal_group *tpg,
1548 struct se_lun_acl *lacl)
1549{
1550 printk("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
1551 " Mapped LUN: %u\n", TPG_TFO(tpg)->get_fabric_name(),
1552 TPG_TFO(tpg)->tpg_get_tag(tpg),
1553 TPG_TFO(tpg)->get_fabric_name(),
1554 lacl->initiatorname, lacl->mapped_lun);
1555
1556 kfree(lacl);
1557}
1558
1559int core_dev_setup_virtual_lun0(void)
1560{
1561 struct se_hba *hba;
1562 struct se_device *dev;
1563 struct se_subsystem_dev *se_dev = NULL;
1564 struct se_subsystem_api *t;
1565 char buf[16];
1566 int ret;
1567
1568 hba = core_alloc_hba("rd_dr", 0, HBA_FLAGS_INTERNAL_USE);
1569 if (IS_ERR(hba))
1570 return PTR_ERR(hba);
1571
1572 se_global->g_lun0_hba = hba;
1573 t = hba->transport;
1574
1575 se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
1576 if (!(se_dev)) {
1577 printk(KERN_ERR "Unable to allocate memory for"
1578 " struct se_subsystem_dev\n");
1579 ret = -ENOMEM;
1580 goto out;
1581 }
1582 INIT_LIST_HEAD(&se_dev->g_se_dev_list);
1583 INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
1584 spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
1585 INIT_LIST_HEAD(&se_dev->t10_reservation.registration_list);
1586 INIT_LIST_HEAD(&se_dev->t10_reservation.aptpl_reg_list);
1587 spin_lock_init(&se_dev->t10_reservation.registration_lock);
1588 spin_lock_init(&se_dev->t10_reservation.aptpl_reg_lock);
1589 INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
1590 spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
1591 spin_lock_init(&se_dev->se_dev_lock);
1592 se_dev->t10_reservation.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
1593 se_dev->t10_wwn.t10_sub_dev = se_dev;
1594 se_dev->t10_alua.t10_sub_dev = se_dev;
1595 se_dev->se_dev_attrib.da_sub_dev = se_dev;
1596 se_dev->se_dev_hba = hba;
1597
1598 se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0");
1599 if (!(se_dev->se_dev_su_ptr)) {
1600 printk(KERN_ERR "Unable to locate subsystem dependent pointer"
1601 " from allocate_virtdevice()\n");
1602 ret = -ENOMEM;
1603 goto out;
1604 }
1605 se_global->g_lun0_su_dev = se_dev;
1606
1607 memset(buf, 0, 16);
1608 sprintf(buf, "rd_pages=8");
1609 t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf));
1610
1611 dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
1612 if (!(dev) || IS_ERR(dev)) {
1613 ret = -ENOMEM;
1614 goto out;
1615 }
1616 se_dev->se_dev_ptr = dev;
1617 se_global->g_lun0_dev = dev;
1618
1619 return 0;
1620out:
1621 se_global->g_lun0_su_dev = NULL;
1622 kfree(se_dev);
1623 if (se_global->g_lun0_hba) {
1624 core_delete_hba(se_global->g_lun0_hba);
1625 se_global->g_lun0_hba = NULL;
1626 }
1627 return ret;
1628}
1629
1630
1631void core_dev_release_virtual_lun0(void)
1632{
1633 struct se_hba *hba = se_global->g_lun0_hba;
1634 struct se_subsystem_dev *su_dev = se_global->g_lun0_su_dev;
1635
1636 if (!(hba))
1637 return;
1638
1639 if (se_global->g_lun0_dev)
1640 se_free_virtual_device(se_global->g_lun0_dev, hba);
1641
1642 kfree(su_dev);
1643 core_delete_hba(hba);
1644}
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
new file mode 100644
index 000000000000..07ab5a3bb8e8
--- /dev/null
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -0,0 +1,1233 @@
1/*******************************************************************************
2* Filename: target_core_fabric_configfs.c
3 *
4 * This file contains generic fabric module configfs infrastructure for
5 * TCM v4.x code
6 *
7 * Copyright (c) 2010,2011 Rising Tide Systems
8 * Copyright (c) 2010,2011 Linux-iSCSI.org
9 *
10 * Copyright (c) Nicholas A. Bellinger <nab@linux-iscsi.org>
11*
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 ****************************************************************************/
22
23#include <linux/module.h>
24#include <linux/moduleparam.h>
25#include <linux/version.h>
26#include <generated/utsrelease.h>
27#include <linux/utsname.h>
28#include <linux/init.h>
29#include <linux/fs.h>
30#include <linux/namei.h>
31#include <linux/slab.h>
32#include <linux/types.h>
33#include <linux/delay.h>
34#include <linux/unistd.h>
35#include <linux/string.h>
36#include <linux/syscalls.h>
37#include <linux/configfs.h>
38
39#include <target/target_core_base.h>
40#include <target/target_core_device.h>
41#include <target/target_core_tpg.h>
42#include <target/target_core_transport.h>
43#include <target/target_core_fabric_ops.h>
44#include <target/target_core_fabric_configfs.h>
45#include <target/target_core_configfs.h>
46#include <target/configfs_macros.h>
47
48#include "target_core_alua.h"
49#include "target_core_hba.h"
50#include "target_core_pr.h"
51#include "target_core_stat.h"
52
53#define TF_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \
54static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \
55{ \
56 struct target_fabric_configfs_template *tfc = &tf->tf_cit_tmpl; \
57 struct config_item_type *cit = &tfc->tfc_##_name##_cit; \
58 \
59 cit->ct_item_ops = _item_ops; \
60 cit->ct_group_ops = _group_ops; \
61 cit->ct_attrs = _attrs; \
62 cit->ct_owner = tf->tf_module; \
63 printk("Setup generic %s\n", __stringify(_name)); \
64}
65
66/* Start of tfc_tpg_mappedlun_cit */
67
68static int target_fabric_mappedlun_link(
69 struct config_item *lun_acl_ci,
70 struct config_item *lun_ci)
71{
72 struct se_dev_entry *deve;
73 struct se_lun *lun = container_of(to_config_group(lun_ci),
74 struct se_lun, lun_group);
75 struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci),
76 struct se_lun_acl, se_lun_group);
77 struct se_portal_group *se_tpg;
78 struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s;
79 int ret = 0, lun_access;
80 /*
81 * Ensure that the source port exists
82 */
83 if (!(lun->lun_sep) || !(lun->lun_sep->sep_tpg)) {
84 printk(KERN_ERR "Source se_lun->lun_sep or lun->lun_sep->sep"
85 "_tpg does not exist\n");
86 return -EINVAL;
87 }
88 se_tpg = lun->lun_sep->sep_tpg;
89
90 nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item;
91 tpg_ci = &nacl_ci->ci_group->cg_item;
92 wwn_ci = &tpg_ci->ci_group->cg_item;
93 tpg_ci_s = &lun_ci->ci_parent->ci_group->cg_item;
94 wwn_ci_s = &tpg_ci_s->ci_group->cg_item;
95 /*
96 * Make sure the SymLink is going to the same $FABRIC/$WWN/tpgt_$TPGT
97 */
98 if (strcmp(config_item_name(wwn_ci), config_item_name(wwn_ci_s))) {
99 printk(KERN_ERR "Illegal Initiator ACL SymLink outside of %s\n",
100 config_item_name(wwn_ci));
101 return -EINVAL;
102 }
103 if (strcmp(config_item_name(tpg_ci), config_item_name(tpg_ci_s))) {
104 printk(KERN_ERR "Illegal Initiator ACL Symlink outside of %s"
105 " TPGT: %s\n", config_item_name(wwn_ci),
106 config_item_name(tpg_ci));
107 return -EINVAL;
108 }
109 /*
110 * If this struct se_node_acl was dynamically generated with
111 * tpg_1/attrib/generate_node_acls=1, use the existing deve->lun_flags,
112 * which be will write protected (READ-ONLY) when
113 * tpg_1/attrib/demo_mode_write_protect=1
114 */
115 spin_lock_irq(&lacl->se_lun_nacl->device_list_lock);
116 deve = &lacl->se_lun_nacl->device_list[lacl->mapped_lun];
117 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)
118 lun_access = deve->lun_flags;
119 else
120 lun_access =
121 (TPG_TFO(se_tpg)->tpg_check_prod_mode_write_protect(
122 se_tpg)) ? TRANSPORT_LUNFLAGS_READ_ONLY :
123 TRANSPORT_LUNFLAGS_READ_WRITE;
124 spin_unlock_irq(&lacl->se_lun_nacl->device_list_lock);
125 /*
126 * Determine the actual mapped LUN value user wants..
127 *
128 * This value is what the SCSI Initiator actually sees the
129 * iscsi/$IQN/$TPGT/lun/lun_* as on their SCSI Initiator Ports.
130 */
131 ret = core_dev_add_initiator_node_lun_acl(se_tpg, lacl,
132 lun->unpacked_lun, lun_access);
133
134 return (ret < 0) ? -EINVAL : 0;
135}
136
137static int target_fabric_mappedlun_unlink(
138 struct config_item *lun_acl_ci,
139 struct config_item *lun_ci)
140{
141 struct se_lun *lun;
142 struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci),
143 struct se_lun_acl, se_lun_group);
144 struct se_node_acl *nacl = lacl->se_lun_nacl;
145 struct se_dev_entry *deve = &nacl->device_list[lacl->mapped_lun];
146 struct se_portal_group *se_tpg;
147 /*
148 * Determine if the underlying MappedLUN has already been released..
149 */
150 if (!(deve->se_lun))
151 return 0;
152
153 lun = container_of(to_config_group(lun_ci), struct se_lun, lun_group);
154 se_tpg = lun->lun_sep->sep_tpg;
155
156 core_dev_del_initiator_node_lun_acl(se_tpg, lun, lacl);
157 return 0;
158}
159
160CONFIGFS_EATTR_STRUCT(target_fabric_mappedlun, se_lun_acl);
161#define TCM_MAPPEDLUN_ATTR(_name, _mode) \
162static struct target_fabric_mappedlun_attribute target_fabric_mappedlun_##_name = \
163 __CONFIGFS_EATTR(_name, _mode, \
164 target_fabric_mappedlun_show_##_name, \
165 target_fabric_mappedlun_store_##_name);
166
167static ssize_t target_fabric_mappedlun_show_write_protect(
168 struct se_lun_acl *lacl,
169 char *page)
170{
171 struct se_node_acl *se_nacl = lacl->se_lun_nacl;
172 struct se_dev_entry *deve;
173 ssize_t len;
174
175 spin_lock_irq(&se_nacl->device_list_lock);
176 deve = &se_nacl->device_list[lacl->mapped_lun];
177 len = sprintf(page, "%d\n",
178 (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) ?
179 1 : 0);
180 spin_unlock_irq(&se_nacl->device_list_lock);
181
182 return len;
183}
184
185static ssize_t target_fabric_mappedlun_store_write_protect(
186 struct se_lun_acl *lacl,
187 const char *page,
188 size_t count)
189{
190 struct se_node_acl *se_nacl = lacl->se_lun_nacl;
191 struct se_portal_group *se_tpg = se_nacl->se_tpg;
192 unsigned long op;
193
194 if (strict_strtoul(page, 0, &op))
195 return -EINVAL;
196
197 if ((op != 1) && (op != 0))
198 return -EINVAL;
199
200 core_update_device_list_access(lacl->mapped_lun, (op) ?
201 TRANSPORT_LUNFLAGS_READ_ONLY :
202 TRANSPORT_LUNFLAGS_READ_WRITE,
203 lacl->se_lun_nacl);
204
205 printk(KERN_INFO "%s_ConfigFS: Changed Initiator ACL: %s"
206 " Mapped LUN: %u Write Protect bit to %s\n",
207 TPG_TFO(se_tpg)->get_fabric_name(),
208 lacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF");
209
210 return count;
211
212}
213
214TCM_MAPPEDLUN_ATTR(write_protect, S_IRUGO | S_IWUSR);
215
216CONFIGFS_EATTR_OPS(target_fabric_mappedlun, se_lun_acl, se_lun_group);
217
218static void target_fabric_mappedlun_release(struct config_item *item)
219{
220 struct se_lun_acl *lacl = container_of(to_config_group(item),
221 struct se_lun_acl, se_lun_group);
222 struct se_portal_group *se_tpg = lacl->se_lun_nacl->se_tpg;
223
224 core_dev_free_initiator_node_lun_acl(se_tpg, lacl);
225}
226
227static struct configfs_attribute *target_fabric_mappedlun_attrs[] = {
228 &target_fabric_mappedlun_write_protect.attr,
229 NULL,
230};
231
232static struct configfs_item_operations target_fabric_mappedlun_item_ops = {
233 .release = target_fabric_mappedlun_release,
234 .show_attribute = target_fabric_mappedlun_attr_show,
235 .store_attribute = target_fabric_mappedlun_attr_store,
236 .allow_link = target_fabric_mappedlun_link,
237 .drop_link = target_fabric_mappedlun_unlink,
238};
239
240TF_CIT_SETUP(tpg_mappedlun, &target_fabric_mappedlun_item_ops, NULL,
241 target_fabric_mappedlun_attrs);
242
243/* End of tfc_tpg_mappedlun_cit */
244
245/* Start of tfc_tpg_mappedlun_port_cit */
246
247static struct config_group *target_core_mappedlun_stat_mkdir(
248 struct config_group *group,
249 const char *name)
250{
251 return ERR_PTR(-ENOSYS);
252}
253
254static void target_core_mappedlun_stat_rmdir(
255 struct config_group *group,
256 struct config_item *item)
257{
258 return;
259}
260
261static struct configfs_group_operations target_fabric_mappedlun_stat_group_ops = {
262 .make_group = target_core_mappedlun_stat_mkdir,
263 .drop_item = target_core_mappedlun_stat_rmdir,
264};
265
266TF_CIT_SETUP(tpg_mappedlun_stat, NULL, &target_fabric_mappedlun_stat_group_ops,
267 NULL);
268
269/* End of tfc_tpg_mappedlun_port_cit */
270
271/* Start of tfc_tpg_nacl_attrib_cit */
272
273CONFIGFS_EATTR_OPS(target_fabric_nacl_attrib, se_node_acl, acl_attrib_group);
274
275static struct configfs_item_operations target_fabric_nacl_attrib_item_ops = {
276 .show_attribute = target_fabric_nacl_attrib_attr_show,
277 .store_attribute = target_fabric_nacl_attrib_attr_store,
278};
279
280TF_CIT_SETUP(tpg_nacl_attrib, &target_fabric_nacl_attrib_item_ops, NULL, NULL);
281
282/* End of tfc_tpg_nacl_attrib_cit */
283
284/* Start of tfc_tpg_nacl_auth_cit */
285
286CONFIGFS_EATTR_OPS(target_fabric_nacl_auth, se_node_acl, acl_auth_group);
287
288static struct configfs_item_operations target_fabric_nacl_auth_item_ops = {
289 .show_attribute = target_fabric_nacl_auth_attr_show,
290 .store_attribute = target_fabric_nacl_auth_attr_store,
291};
292
293TF_CIT_SETUP(tpg_nacl_auth, &target_fabric_nacl_auth_item_ops, NULL, NULL);
294
295/* End of tfc_tpg_nacl_auth_cit */
296
297/* Start of tfc_tpg_nacl_param_cit */
298
299CONFIGFS_EATTR_OPS(target_fabric_nacl_param, se_node_acl, acl_param_group);
300
301static struct configfs_item_operations target_fabric_nacl_param_item_ops = {
302 .show_attribute = target_fabric_nacl_param_attr_show,
303 .store_attribute = target_fabric_nacl_param_attr_store,
304};
305
306TF_CIT_SETUP(tpg_nacl_param, &target_fabric_nacl_param_item_ops, NULL, NULL);
307
308/* End of tfc_tpg_nacl_param_cit */
309
310/* Start of tfc_tpg_nacl_base_cit */
311
312CONFIGFS_EATTR_OPS(target_fabric_nacl_base, se_node_acl, acl_group);
313
314static struct config_group *target_fabric_make_mappedlun(
315 struct config_group *group,
316 const char *name)
317{
318 struct se_node_acl *se_nacl = container_of(group,
319 struct se_node_acl, acl_group);
320 struct se_portal_group *se_tpg = se_nacl->se_tpg;
321 struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
322 struct se_lun_acl *lacl;
323 struct config_item *acl_ci;
324 struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL;
325 char *buf;
326 unsigned long mapped_lun;
327 int ret = 0;
328
329 acl_ci = &group->cg_item;
330 if (!(acl_ci)) {
331 printk(KERN_ERR "Unable to locatel acl_ci\n");
332 return NULL;
333 }
334
335 buf = kzalloc(strlen(name) + 1, GFP_KERNEL);
336 if (!(buf)) {
337 printk(KERN_ERR "Unable to allocate memory for name buf\n");
338 return ERR_PTR(-ENOMEM);
339 }
340 snprintf(buf, strlen(name) + 1, "%s", name);
341 /*
342 * Make sure user is creating iscsi/$IQN/$TPGT/acls/$INITIATOR/lun_$ID.
343 */
344 if (strstr(buf, "lun_") != buf) {
345 printk(KERN_ERR "Unable to locate \"lun_\" from buf: %s"
346 " name: %s\n", buf, name);
347 ret = -EINVAL;
348 goto out;
349 }
350 /*
351 * Determine the Mapped LUN value. This is what the SCSI Initiator
352 * Port will actually see.
353 */
354 if (strict_strtoul(buf + 4, 0, &mapped_lun) || mapped_lun > UINT_MAX) {
355 ret = -EINVAL;
356 goto out;
357 }
358
359 lacl = core_dev_init_initiator_node_lun_acl(se_tpg, mapped_lun,
360 config_item_name(acl_ci), &ret);
361 if (!(lacl)) {
362 ret = -EINVAL;
363 goto out;
364 }
365
366 lacl_cg = &lacl->se_lun_group;
367 lacl_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
368 GFP_KERNEL);
369 if (!lacl_cg->default_groups) {
370 printk(KERN_ERR "Unable to allocate lacl_cg->default_groups\n");
371 ret = -ENOMEM;
372 goto out;
373 }
374
375 config_group_init_type_name(&lacl->se_lun_group, name,
376 &TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_cit);
377 config_group_init_type_name(&lacl->ml_stat_grps.stat_group,
378 "statistics", &TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_stat_cit);
379 lacl_cg->default_groups[0] = &lacl->ml_stat_grps.stat_group;
380 lacl_cg->default_groups[1] = NULL;
381
382 ml_stat_grp = &ML_STAT_GRPS(lacl)->stat_group;
383 ml_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3,
384 GFP_KERNEL);
385 if (!ml_stat_grp->default_groups) {
386 printk(KERN_ERR "Unable to allocate ml_stat_grp->default_groups\n");
387 ret = -ENOMEM;
388 goto out;
389 }
390 target_stat_setup_mappedlun_default_groups(lacl);
391
392 kfree(buf);
393 return &lacl->se_lun_group;
394out:
395 if (lacl_cg)
396 kfree(lacl_cg->default_groups);
397 kfree(buf);
398 return ERR_PTR(ret);
399}
400
401static void target_fabric_drop_mappedlun(
402 struct config_group *group,
403 struct config_item *item)
404{
405 struct se_lun_acl *lacl = container_of(to_config_group(item),
406 struct se_lun_acl, se_lun_group);
407 struct config_item *df_item;
408 struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL;
409 int i;
410
411 ml_stat_grp = &ML_STAT_GRPS(lacl)->stat_group;
412 for (i = 0; ml_stat_grp->default_groups[i]; i++) {
413 df_item = &ml_stat_grp->default_groups[i]->cg_item;
414 ml_stat_grp->default_groups[i] = NULL;
415 config_item_put(df_item);
416 }
417 kfree(ml_stat_grp->default_groups);
418
419 lacl_cg = &lacl->se_lun_group;
420 for (i = 0; lacl_cg->default_groups[i]; i++) {
421 df_item = &lacl_cg->default_groups[i]->cg_item;
422 lacl_cg->default_groups[i] = NULL;
423 config_item_put(df_item);
424 }
425 kfree(lacl_cg->default_groups);
426
427 config_item_put(item);
428}
429
430static void target_fabric_nacl_base_release(struct config_item *item)
431{
432 struct se_node_acl *se_nacl = container_of(to_config_group(item),
433 struct se_node_acl, acl_group);
434 struct se_portal_group *se_tpg = se_nacl->se_tpg;
435 struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
436
437 tf->tf_ops.fabric_drop_nodeacl(se_nacl);
438}
439
440static struct configfs_item_operations target_fabric_nacl_base_item_ops = {
441 .release = target_fabric_nacl_base_release,
442 .show_attribute = target_fabric_nacl_base_attr_show,
443 .store_attribute = target_fabric_nacl_base_attr_store,
444};
445
446static struct configfs_group_operations target_fabric_nacl_base_group_ops = {
447 .make_group = target_fabric_make_mappedlun,
448 .drop_item = target_fabric_drop_mappedlun,
449};
450
451TF_CIT_SETUP(tpg_nacl_base, &target_fabric_nacl_base_item_ops,
452 &target_fabric_nacl_base_group_ops, NULL);
453
454/* End of tfc_tpg_nacl_base_cit */
455
456/* Start of tfc_node_fabric_stats_cit */
457/*
458 * This is used as a placeholder for struct se_node_acl->acl_fabric_stat_group
459 * to allow fabrics access to ->acl_fabric_stat_group->default_groups[]
460 */
461TF_CIT_SETUP(tpg_nacl_stat, NULL, NULL, NULL);
462
463/* End of tfc_wwn_fabric_stats_cit */
464
465/* Start of tfc_tpg_nacl_cit */
466
467static struct config_group *target_fabric_make_nodeacl(
468 struct config_group *group,
469 const char *name)
470{
471 struct se_portal_group *se_tpg = container_of(group,
472 struct se_portal_group, tpg_acl_group);
473 struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
474 struct se_node_acl *se_nacl;
475 struct config_group *nacl_cg;
476
477 if (!(tf->tf_ops.fabric_make_nodeacl)) {
478 printk(KERN_ERR "tf->tf_ops.fabric_make_nodeacl is NULL\n");
479 return ERR_PTR(-ENOSYS);
480 }
481
482 se_nacl = tf->tf_ops.fabric_make_nodeacl(se_tpg, group, name);
483 if (IS_ERR(se_nacl))
484 return ERR_PTR(PTR_ERR(se_nacl));
485
486 nacl_cg = &se_nacl->acl_group;
487 nacl_cg->default_groups = se_nacl->acl_default_groups;
488 nacl_cg->default_groups[0] = &se_nacl->acl_attrib_group;
489 nacl_cg->default_groups[1] = &se_nacl->acl_auth_group;
490 nacl_cg->default_groups[2] = &se_nacl->acl_param_group;
491 nacl_cg->default_groups[3] = &se_nacl->acl_fabric_stat_group;
492 nacl_cg->default_groups[4] = NULL;
493
494 config_group_init_type_name(&se_nacl->acl_group, name,
495 &TF_CIT_TMPL(tf)->tfc_tpg_nacl_base_cit);
496 config_group_init_type_name(&se_nacl->acl_attrib_group, "attrib",
497 &TF_CIT_TMPL(tf)->tfc_tpg_nacl_attrib_cit);
498 config_group_init_type_name(&se_nacl->acl_auth_group, "auth",
499 &TF_CIT_TMPL(tf)->tfc_tpg_nacl_auth_cit);
500 config_group_init_type_name(&se_nacl->acl_param_group, "param",
501 &TF_CIT_TMPL(tf)->tfc_tpg_nacl_param_cit);
502 config_group_init_type_name(&se_nacl->acl_fabric_stat_group,
503 "fabric_statistics",
504 &TF_CIT_TMPL(tf)->tfc_tpg_nacl_stat_cit);
505
506 return &se_nacl->acl_group;
507}
508
509static void target_fabric_drop_nodeacl(
510 struct config_group *group,
511 struct config_item *item)
512{
513 struct se_node_acl *se_nacl = container_of(to_config_group(item),
514 struct se_node_acl, acl_group);
515 struct config_item *df_item;
516 struct config_group *nacl_cg;
517 int i;
518
519 nacl_cg = &se_nacl->acl_group;
520 for (i = 0; nacl_cg->default_groups[i]; i++) {
521 df_item = &nacl_cg->default_groups[i]->cg_item;
522 nacl_cg->default_groups[i] = NULL;
523 config_item_put(df_item);
524 }
525 /*
526 * struct se_node_acl free is done in target_fabric_nacl_base_release()
527 */
528 config_item_put(item);
529}
530
531static struct configfs_group_operations target_fabric_nacl_group_ops = {
532 .make_group = target_fabric_make_nodeacl,
533 .drop_item = target_fabric_drop_nodeacl,
534};
535
536TF_CIT_SETUP(tpg_nacl, NULL, &target_fabric_nacl_group_ops, NULL);
537
538/* End of tfc_tpg_nacl_cit */
539
540/* Start of tfc_tpg_np_base_cit */
541
542CONFIGFS_EATTR_OPS(target_fabric_np_base, se_tpg_np, tpg_np_group);
543
544static void target_fabric_np_base_release(struct config_item *item)
545{
546 struct se_tpg_np *se_tpg_np = container_of(to_config_group(item),
547 struct se_tpg_np, tpg_np_group);
548 struct se_portal_group *se_tpg = se_tpg_np->tpg_np_parent;
549 struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
550
551 tf->tf_ops.fabric_drop_np(se_tpg_np);
552}
553
554static struct configfs_item_operations target_fabric_np_base_item_ops = {
555 .release = target_fabric_np_base_release,
556 .show_attribute = target_fabric_np_base_attr_show,
557 .store_attribute = target_fabric_np_base_attr_store,
558};
559
560TF_CIT_SETUP(tpg_np_base, &target_fabric_np_base_item_ops, NULL, NULL);
561
562/* End of tfc_tpg_np_base_cit */
563
564/* Start of tfc_tpg_np_cit */
565
566static struct config_group *target_fabric_make_np(
567 struct config_group *group,
568 const char *name)
569{
570 struct se_portal_group *se_tpg = container_of(group,
571 struct se_portal_group, tpg_np_group);
572 struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
573 struct se_tpg_np *se_tpg_np;
574
575 if (!(tf->tf_ops.fabric_make_np)) {
576 printk(KERN_ERR "tf->tf_ops.fabric_make_np is NULL\n");
577 return ERR_PTR(-ENOSYS);
578 }
579
580 se_tpg_np = tf->tf_ops.fabric_make_np(se_tpg, group, name);
581 if (!(se_tpg_np) || IS_ERR(se_tpg_np))
582 return ERR_PTR(-EINVAL);
583
584 se_tpg_np->tpg_np_parent = se_tpg;
585 config_group_init_type_name(&se_tpg_np->tpg_np_group, name,
586 &TF_CIT_TMPL(tf)->tfc_tpg_np_base_cit);
587
588 return &se_tpg_np->tpg_np_group;
589}
590
591static void target_fabric_drop_np(
592 struct config_group *group,
593 struct config_item *item)
594{
595 /*
596 * struct se_tpg_np is released via target_fabric_np_base_release()
597 */
598 config_item_put(item);
599}
600
601static struct configfs_group_operations target_fabric_np_group_ops = {
602 .make_group = &target_fabric_make_np,
603 .drop_item = &target_fabric_drop_np,
604};
605
606TF_CIT_SETUP(tpg_np, NULL, &target_fabric_np_group_ops, NULL);
607
608/* End of tfc_tpg_np_cit */
609
610/* Start of tfc_tpg_port_cit */
611
612CONFIGFS_EATTR_STRUCT(target_fabric_port, se_lun);
613#define TCM_PORT_ATTR(_name, _mode) \
614static struct target_fabric_port_attribute target_fabric_port_##_name = \
615 __CONFIGFS_EATTR(_name, _mode, \
616 target_fabric_port_show_attr_##_name, \
617 target_fabric_port_store_attr_##_name);
618
619#define TCM_PORT_ATTOR_RO(_name) \
620 __CONFIGFS_EATTR_RO(_name, \
621 target_fabric_port_show_attr_##_name);
622
623/*
624 * alua_tg_pt_gp
625 */
626static ssize_t target_fabric_port_show_attr_alua_tg_pt_gp(
627 struct se_lun *lun,
628 char *page)
629{
630 if (!(lun))
631 return -ENODEV;
632
633 if (!(lun->lun_sep))
634 return -ENODEV;
635
636 return core_alua_show_tg_pt_gp_info(lun->lun_sep, page);
637}
638
639static ssize_t target_fabric_port_store_attr_alua_tg_pt_gp(
640 struct se_lun *lun,
641 const char *page,
642 size_t count)
643{
644 if (!(lun))
645 return -ENODEV;
646
647 if (!(lun->lun_sep))
648 return -ENODEV;
649
650 return core_alua_store_tg_pt_gp_info(lun->lun_sep, page, count);
651}
652
653TCM_PORT_ATTR(alua_tg_pt_gp, S_IRUGO | S_IWUSR);
654
655/*
656 * alua_tg_pt_offline
657 */
658static ssize_t target_fabric_port_show_attr_alua_tg_pt_offline(
659 struct se_lun *lun,
660 char *page)
661{
662 if (!(lun))
663 return -ENODEV;
664
665 if (!(lun->lun_sep))
666 return -ENODEV;
667
668 return core_alua_show_offline_bit(lun, page);
669}
670
671static ssize_t target_fabric_port_store_attr_alua_tg_pt_offline(
672 struct se_lun *lun,
673 const char *page,
674 size_t count)
675{
676 if (!(lun))
677 return -ENODEV;
678
679 if (!(lun->lun_sep))
680 return -ENODEV;
681
682 return core_alua_store_offline_bit(lun, page, count);
683}
684
685TCM_PORT_ATTR(alua_tg_pt_offline, S_IRUGO | S_IWUSR);
686
687/*
688 * alua_tg_pt_status
689 */
690static ssize_t target_fabric_port_show_attr_alua_tg_pt_status(
691 struct se_lun *lun,
692 char *page)
693{
694 if (!(lun))
695 return -ENODEV;
696
697 if (!(lun->lun_sep))
698 return -ENODEV;
699
700 return core_alua_show_secondary_status(lun, page);
701}
702
703static ssize_t target_fabric_port_store_attr_alua_tg_pt_status(
704 struct se_lun *lun,
705 const char *page,
706 size_t count)
707{
708 if (!(lun))
709 return -ENODEV;
710
711 if (!(lun->lun_sep))
712 return -ENODEV;
713
714 return core_alua_store_secondary_status(lun, page, count);
715}
716
717TCM_PORT_ATTR(alua_tg_pt_status, S_IRUGO | S_IWUSR);
718
719/*
720 * alua_tg_pt_write_md
721 */
722static ssize_t target_fabric_port_show_attr_alua_tg_pt_write_md(
723 struct se_lun *lun,
724 char *page)
725{
726 if (!(lun))
727 return -ENODEV;
728
729 if (!(lun->lun_sep))
730 return -ENODEV;
731
732 return core_alua_show_secondary_write_metadata(lun, page);
733}
734
735static ssize_t target_fabric_port_store_attr_alua_tg_pt_write_md(
736 struct se_lun *lun,
737 const char *page,
738 size_t count)
739{
740 if (!(lun))
741 return -ENODEV;
742
743 if (!(lun->lun_sep))
744 return -ENODEV;
745
746 return core_alua_store_secondary_write_metadata(lun, page, count);
747}
748
749TCM_PORT_ATTR(alua_tg_pt_write_md, S_IRUGO | S_IWUSR);
750
751
752static struct configfs_attribute *target_fabric_port_attrs[] = {
753 &target_fabric_port_alua_tg_pt_gp.attr,
754 &target_fabric_port_alua_tg_pt_offline.attr,
755 &target_fabric_port_alua_tg_pt_status.attr,
756 &target_fabric_port_alua_tg_pt_write_md.attr,
757 NULL,
758};
759
760CONFIGFS_EATTR_OPS(target_fabric_port, se_lun, lun_group);
761
762static int target_fabric_port_link(
763 struct config_item *lun_ci,
764 struct config_item *se_dev_ci)
765{
766 struct config_item *tpg_ci;
767 struct se_device *dev;
768 struct se_lun *lun = container_of(to_config_group(lun_ci),
769 struct se_lun, lun_group);
770 struct se_lun *lun_p;
771 struct se_portal_group *se_tpg;
772 struct se_subsystem_dev *se_dev = container_of(
773 to_config_group(se_dev_ci), struct se_subsystem_dev,
774 se_dev_group);
775 struct target_fabric_configfs *tf;
776 int ret;
777
778 tpg_ci = &lun_ci->ci_parent->ci_group->cg_item;
779 se_tpg = container_of(to_config_group(tpg_ci),
780 struct se_portal_group, tpg_group);
781 tf = se_tpg->se_tpg_wwn->wwn_tf;
782
783 if (lun->lun_se_dev != NULL) {
784 printk(KERN_ERR "Port Symlink already exists\n");
785 return -EEXIST;
786 }
787
788 dev = se_dev->se_dev_ptr;
789 if (!(dev)) {
790 printk(KERN_ERR "Unable to locate struct se_device pointer from"
791 " %s\n", config_item_name(se_dev_ci));
792 ret = -ENODEV;
793 goto out;
794 }
795
796 lun_p = core_dev_add_lun(se_tpg, dev->se_hba, dev,
797 lun->unpacked_lun);
798 if ((IS_ERR(lun_p)) || !(lun_p)) {
799 printk(KERN_ERR "core_dev_add_lun() failed\n");
800 ret = -EINVAL;
801 goto out;
802 }
803
804 if (tf->tf_ops.fabric_post_link) {
805 /*
806 * Call the optional fabric_post_link() to allow a
807 * fabric module to setup any additional state once
808 * core_dev_add_lun() has been called..
809 */
810 tf->tf_ops.fabric_post_link(se_tpg, lun);
811 }
812
813 return 0;
814out:
815 return ret;
816}
817
818static int target_fabric_port_unlink(
819 struct config_item *lun_ci,
820 struct config_item *se_dev_ci)
821{
822 struct se_lun *lun = container_of(to_config_group(lun_ci),
823 struct se_lun, lun_group);
824 struct se_portal_group *se_tpg = lun->lun_sep->sep_tpg;
825 struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
826
827 if (tf->tf_ops.fabric_pre_unlink) {
828 /*
829 * Call the optional fabric_pre_unlink() to allow a
830 * fabric module to release any additional stat before
831 * core_dev_del_lun() is called.
832 */
833 tf->tf_ops.fabric_pre_unlink(se_tpg, lun);
834 }
835
836 core_dev_del_lun(se_tpg, lun->unpacked_lun);
837 return 0;
838}
839
840static struct configfs_item_operations target_fabric_port_item_ops = {
841 .show_attribute = target_fabric_port_attr_show,
842 .store_attribute = target_fabric_port_attr_store,
843 .allow_link = target_fabric_port_link,
844 .drop_link = target_fabric_port_unlink,
845};
846
847TF_CIT_SETUP(tpg_port, &target_fabric_port_item_ops, NULL, target_fabric_port_attrs);
848
849/* End of tfc_tpg_port_cit */
850
851/* Start of tfc_tpg_port_stat_cit */
852
853static struct config_group *target_core_port_stat_mkdir(
854 struct config_group *group,
855 const char *name)
856{
857 return ERR_PTR(-ENOSYS);
858}
859
860static void target_core_port_stat_rmdir(
861 struct config_group *group,
862 struct config_item *item)
863{
864 return;
865}
866
867static struct configfs_group_operations target_fabric_port_stat_group_ops = {
868 .make_group = target_core_port_stat_mkdir,
869 .drop_item = target_core_port_stat_rmdir,
870};
871
872TF_CIT_SETUP(tpg_port_stat, NULL, &target_fabric_port_stat_group_ops, NULL);
873
874/* End of tfc_tpg_port_stat_cit */
875
876/* Start of tfc_tpg_lun_cit */
877
878static struct config_group *target_fabric_make_lun(
879 struct config_group *group,
880 const char *name)
881{
882 struct se_lun *lun;
883 struct se_portal_group *se_tpg = container_of(group,
884 struct se_portal_group, tpg_lun_group);
885 struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
886 struct config_group *lun_cg = NULL, *port_stat_grp = NULL;
887 unsigned long unpacked_lun;
888 int errno;
889
890 if (strstr(name, "lun_") != name) {
891 printk(KERN_ERR "Unable to locate \'_\" in"
892 " \"lun_$LUN_NUMBER\"\n");
893 return ERR_PTR(-EINVAL);
894 }
895 if (strict_strtoul(name + 4, 0, &unpacked_lun) || unpacked_lun > UINT_MAX)
896 return ERR_PTR(-EINVAL);
897
898 lun = core_get_lun_from_tpg(se_tpg, unpacked_lun);
899 if (!(lun))
900 return ERR_PTR(-EINVAL);
901
902 lun_cg = &lun->lun_group;
903 lun_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
904 GFP_KERNEL);
905 if (!lun_cg->default_groups) {
906 printk(KERN_ERR "Unable to allocate lun_cg->default_groups\n");
907 return ERR_PTR(-ENOMEM);
908 }
909
910 config_group_init_type_name(&lun->lun_group, name,
911 &TF_CIT_TMPL(tf)->tfc_tpg_port_cit);
912 config_group_init_type_name(&lun->port_stat_grps.stat_group,
913 "statistics", &TF_CIT_TMPL(tf)->tfc_tpg_port_stat_cit);
914 lun_cg->default_groups[0] = &lun->port_stat_grps.stat_group;
915 lun_cg->default_groups[1] = NULL;
916
917 port_stat_grp = &PORT_STAT_GRP(lun)->stat_group;
918 port_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3,
919 GFP_KERNEL);
920 if (!port_stat_grp->default_groups) {
921 printk(KERN_ERR "Unable to allocate port_stat_grp->default_groups\n");
922 errno = -ENOMEM;
923 goto out;
924 }
925 target_stat_setup_port_default_groups(lun);
926
927 return &lun->lun_group;
928out:
929 if (lun_cg)
930 kfree(lun_cg->default_groups);
931 return ERR_PTR(errno);
932}
933
934static void target_fabric_drop_lun(
935 struct config_group *group,
936 struct config_item *item)
937{
938 struct se_lun *lun = container_of(to_config_group(item),
939 struct se_lun, lun_group);
940 struct config_item *df_item;
941 struct config_group *lun_cg, *port_stat_grp;
942 int i;
943
944 port_stat_grp = &PORT_STAT_GRP(lun)->stat_group;
945 for (i = 0; port_stat_grp->default_groups[i]; i++) {
946 df_item = &port_stat_grp->default_groups[i]->cg_item;
947 port_stat_grp->default_groups[i] = NULL;
948 config_item_put(df_item);
949 }
950 kfree(port_stat_grp->default_groups);
951
952 lun_cg = &lun->lun_group;
953 for (i = 0; lun_cg->default_groups[i]; i++) {
954 df_item = &lun_cg->default_groups[i]->cg_item;
955 lun_cg->default_groups[i] = NULL;
956 config_item_put(df_item);
957 }
958 kfree(lun_cg->default_groups);
959
960 config_item_put(item);
961}
962
963static struct configfs_group_operations target_fabric_lun_group_ops = {
964 .make_group = &target_fabric_make_lun,
965 .drop_item = &target_fabric_drop_lun,
966};
967
968TF_CIT_SETUP(tpg_lun, NULL, &target_fabric_lun_group_ops, NULL);
969
970/* End of tfc_tpg_lun_cit */
971
972/* Start of tfc_tpg_attrib_cit */
973
974CONFIGFS_EATTR_OPS(target_fabric_tpg_attrib, se_portal_group, tpg_attrib_group);
975
976static struct configfs_item_operations target_fabric_tpg_attrib_item_ops = {
977 .show_attribute = target_fabric_tpg_attrib_attr_show,
978 .store_attribute = target_fabric_tpg_attrib_attr_store,
979};
980
981TF_CIT_SETUP(tpg_attrib, &target_fabric_tpg_attrib_item_ops, NULL, NULL);
982
983/* End of tfc_tpg_attrib_cit */
984
985/* Start of tfc_tpg_param_cit */
986
987CONFIGFS_EATTR_OPS(target_fabric_tpg_param, se_portal_group, tpg_param_group);
988
989static struct configfs_item_operations target_fabric_tpg_param_item_ops = {
990 .show_attribute = target_fabric_tpg_param_attr_show,
991 .store_attribute = target_fabric_tpg_param_attr_store,
992};
993
994TF_CIT_SETUP(tpg_param, &target_fabric_tpg_param_item_ops, NULL, NULL);
995
996/* End of tfc_tpg_param_cit */
997
998/* Start of tfc_tpg_base_cit */
999/*
1000 * For use with TF_TPG_ATTR() and TF_TPG_ATTR_RO()
1001 */
1002CONFIGFS_EATTR_OPS(target_fabric_tpg, se_portal_group, tpg_group);
1003
1004static void target_fabric_tpg_release(struct config_item *item)
1005{
1006 struct se_portal_group *se_tpg = container_of(to_config_group(item),
1007 struct se_portal_group, tpg_group);
1008 struct se_wwn *wwn = se_tpg->se_tpg_wwn;
1009 struct target_fabric_configfs *tf = wwn->wwn_tf;
1010
1011 tf->tf_ops.fabric_drop_tpg(se_tpg);
1012}
1013
1014static struct configfs_item_operations target_fabric_tpg_base_item_ops = {
1015 .release = target_fabric_tpg_release,
1016 .show_attribute = target_fabric_tpg_attr_show,
1017 .store_attribute = target_fabric_tpg_attr_store,
1018};
1019
1020TF_CIT_SETUP(tpg_base, &target_fabric_tpg_base_item_ops, NULL, NULL);
1021
1022/* End of tfc_tpg_base_cit */
1023
1024/* Start of tfc_tpg_cit */
1025
1026static struct config_group *target_fabric_make_tpg(
1027 struct config_group *group,
1028 const char *name)
1029{
1030 struct se_wwn *wwn = container_of(group, struct se_wwn, wwn_group);
1031 struct target_fabric_configfs *tf = wwn->wwn_tf;
1032 struct se_portal_group *se_tpg;
1033
1034 if (!(tf->tf_ops.fabric_make_tpg)) {
1035 printk(KERN_ERR "tf->tf_ops.fabric_make_tpg is NULL\n");
1036 return ERR_PTR(-ENOSYS);
1037 }
1038
1039 se_tpg = tf->tf_ops.fabric_make_tpg(wwn, group, name);
1040 if (!(se_tpg) || IS_ERR(se_tpg))
1041 return ERR_PTR(-EINVAL);
1042 /*
1043 * Setup default groups from pre-allocated se_tpg->tpg_default_groups
1044 */
1045 se_tpg->tpg_group.default_groups = se_tpg->tpg_default_groups;
1046 se_tpg->tpg_group.default_groups[0] = &se_tpg->tpg_lun_group;
1047 se_tpg->tpg_group.default_groups[1] = &se_tpg->tpg_np_group;
1048 se_tpg->tpg_group.default_groups[2] = &se_tpg->tpg_acl_group;
1049 se_tpg->tpg_group.default_groups[3] = &se_tpg->tpg_attrib_group;
1050 se_tpg->tpg_group.default_groups[4] = &se_tpg->tpg_param_group;
1051 se_tpg->tpg_group.default_groups[5] = NULL;
1052
1053 config_group_init_type_name(&se_tpg->tpg_group, name,
1054 &TF_CIT_TMPL(tf)->tfc_tpg_base_cit);
1055 config_group_init_type_name(&se_tpg->tpg_lun_group, "lun",
1056 &TF_CIT_TMPL(tf)->tfc_tpg_lun_cit);
1057 config_group_init_type_name(&se_tpg->tpg_np_group, "np",
1058 &TF_CIT_TMPL(tf)->tfc_tpg_np_cit);
1059 config_group_init_type_name(&se_tpg->tpg_acl_group, "acls",
1060 &TF_CIT_TMPL(tf)->tfc_tpg_nacl_cit);
1061 config_group_init_type_name(&se_tpg->tpg_attrib_group, "attrib",
1062 &TF_CIT_TMPL(tf)->tfc_tpg_attrib_cit);
1063 config_group_init_type_name(&se_tpg->tpg_param_group, "param",
1064 &TF_CIT_TMPL(tf)->tfc_tpg_param_cit);
1065
1066 return &se_tpg->tpg_group;
1067}
1068
1069static void target_fabric_drop_tpg(
1070 struct config_group *group,
1071 struct config_item *item)
1072{
1073 struct se_portal_group *se_tpg = container_of(to_config_group(item),
1074 struct se_portal_group, tpg_group);
1075 struct config_group *tpg_cg = &se_tpg->tpg_group;
1076 struct config_item *df_item;
1077 int i;
1078 /*
1079 * Release default groups, but do not release tpg_cg->default_groups
1080 * memory as it is statically allocated at se_tpg->tpg_default_groups.
1081 */
1082 for (i = 0; tpg_cg->default_groups[i]; i++) {
1083 df_item = &tpg_cg->default_groups[i]->cg_item;
1084 tpg_cg->default_groups[i] = NULL;
1085 config_item_put(df_item);
1086 }
1087
1088 config_item_put(item);
1089}
1090
1091static void target_fabric_release_wwn(struct config_item *item)
1092{
1093 struct se_wwn *wwn = container_of(to_config_group(item),
1094 struct se_wwn, wwn_group);
1095 struct target_fabric_configfs *tf = wwn->wwn_tf;
1096
1097 tf->tf_ops.fabric_drop_wwn(wwn);
1098}
1099
1100static struct configfs_item_operations target_fabric_tpg_item_ops = {
1101 .release = target_fabric_release_wwn,
1102};
1103
1104static struct configfs_group_operations target_fabric_tpg_group_ops = {
1105 .make_group = target_fabric_make_tpg,
1106 .drop_item = target_fabric_drop_tpg,
1107};
1108
1109TF_CIT_SETUP(tpg, &target_fabric_tpg_item_ops, &target_fabric_tpg_group_ops,
1110 NULL);
1111
1112/* End of tfc_tpg_cit */
1113
1114/* Start of tfc_wwn_fabric_stats_cit */
1115/*
1116 * This is used as a placeholder for struct se_wwn->fabric_stat_group
1117 * to allow fabrics access to ->fabric_stat_group->default_groups[]
1118 */
1119TF_CIT_SETUP(wwn_fabric_stats, NULL, NULL, NULL);
1120
1121/* End of tfc_wwn_fabric_stats_cit */
1122
1123/* Start of tfc_wwn_cit */
1124
1125static struct config_group *target_fabric_make_wwn(
1126 struct config_group *group,
1127 const char *name)
1128{
1129 struct target_fabric_configfs *tf = container_of(group,
1130 struct target_fabric_configfs, tf_group);
1131 struct se_wwn *wwn;
1132
1133 if (!(tf->tf_ops.fabric_make_wwn)) {
1134 printk(KERN_ERR "tf->tf_ops.fabric_make_wwn is NULL\n");
1135 return ERR_PTR(-ENOSYS);
1136 }
1137
1138 wwn = tf->tf_ops.fabric_make_wwn(tf, group, name);
1139 if (!(wwn) || IS_ERR(wwn))
1140 return ERR_PTR(-EINVAL);
1141
1142 wwn->wwn_tf = tf;
1143 /*
1144 * Setup default groups from pre-allocated wwn->wwn_default_groups
1145 */
1146 wwn->wwn_group.default_groups = wwn->wwn_default_groups;
1147 wwn->wwn_group.default_groups[0] = &wwn->fabric_stat_group;
1148 wwn->wwn_group.default_groups[1] = NULL;
1149
1150 config_group_init_type_name(&wwn->wwn_group, name,
1151 &TF_CIT_TMPL(tf)->tfc_tpg_cit);
1152 config_group_init_type_name(&wwn->fabric_stat_group, "fabric_statistics",
1153 &TF_CIT_TMPL(tf)->tfc_wwn_fabric_stats_cit);
1154
1155 return &wwn->wwn_group;
1156}
1157
1158static void target_fabric_drop_wwn(
1159 struct config_group *group,
1160 struct config_item *item)
1161{
1162 struct se_wwn *wwn = container_of(to_config_group(item),
1163 struct se_wwn, wwn_group);
1164 struct config_item *df_item;
1165 struct config_group *cg = &wwn->wwn_group;
1166 int i;
1167
1168 for (i = 0; cg->default_groups[i]; i++) {
1169 df_item = &cg->default_groups[i]->cg_item;
1170 cg->default_groups[i] = NULL;
1171 config_item_put(df_item);
1172 }
1173
1174 config_item_put(item);
1175}
1176
1177static struct configfs_group_operations target_fabric_wwn_group_ops = {
1178 .make_group = target_fabric_make_wwn,
1179 .drop_item = target_fabric_drop_wwn,
1180};
1181/*
1182 * For use with TF_WWN_ATTR() and TF_WWN_ATTR_RO()
1183 */
1184CONFIGFS_EATTR_OPS(target_fabric_wwn, target_fabric_configfs, tf_group);
1185
1186static struct configfs_item_operations target_fabric_wwn_item_ops = {
1187 .show_attribute = target_fabric_wwn_attr_show,
1188 .store_attribute = target_fabric_wwn_attr_store,
1189};
1190
1191TF_CIT_SETUP(wwn, &target_fabric_wwn_item_ops, &target_fabric_wwn_group_ops, NULL);
1192
1193/* End of tfc_wwn_cit */
1194
1195/* Start of tfc_discovery_cit */
1196
1197CONFIGFS_EATTR_OPS(target_fabric_discovery, target_fabric_configfs,
1198 tf_disc_group);
1199
1200static struct configfs_item_operations target_fabric_discovery_item_ops = {
1201 .show_attribute = target_fabric_discovery_attr_show,
1202 .store_attribute = target_fabric_discovery_attr_store,
1203};
1204
1205TF_CIT_SETUP(discovery, &target_fabric_discovery_item_ops, NULL, NULL);
1206
1207/* End of tfc_discovery_cit */
1208
1209int target_fabric_setup_cits(struct target_fabric_configfs *tf)
1210{
1211 target_fabric_setup_discovery_cit(tf);
1212 target_fabric_setup_wwn_cit(tf);
1213 target_fabric_setup_wwn_fabric_stats_cit(tf);
1214 target_fabric_setup_tpg_cit(tf);
1215 target_fabric_setup_tpg_base_cit(tf);
1216 target_fabric_setup_tpg_port_cit(tf);
1217 target_fabric_setup_tpg_port_stat_cit(tf);
1218 target_fabric_setup_tpg_lun_cit(tf);
1219 target_fabric_setup_tpg_np_cit(tf);
1220 target_fabric_setup_tpg_np_base_cit(tf);
1221 target_fabric_setup_tpg_attrib_cit(tf);
1222 target_fabric_setup_tpg_param_cit(tf);
1223 target_fabric_setup_tpg_nacl_cit(tf);
1224 target_fabric_setup_tpg_nacl_base_cit(tf);
1225 target_fabric_setup_tpg_nacl_attrib_cit(tf);
1226 target_fabric_setup_tpg_nacl_auth_cit(tf);
1227 target_fabric_setup_tpg_nacl_param_cit(tf);
1228 target_fabric_setup_tpg_nacl_stat_cit(tf);
1229 target_fabric_setup_tpg_mappedlun_cit(tf);
1230 target_fabric_setup_tpg_mappedlun_stat_cit(tf);
1231
1232 return 0;
1233}
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
new file mode 100644
index 000000000000..1e193f324895
--- /dev/null
+++ b/drivers/target/target_core_fabric_lib.c
@@ -0,0 +1,451 @@
1/*******************************************************************************
2 * Filename: target_core_fabric_lib.c
3 *
4 * This file contains generic high level protocol identifier and PR
5 * handlers for TCM fabric modules
6 *
7 * Copyright (c) 2010 Rising Tide Systems, Inc.
8 * Copyright (c) 2010 Linux-iSCSI.org
9 *
10 * Nicholas A. Bellinger <nab@linux-iscsi.org>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
25 *
26 ******************************************************************************/
27
28#include <linux/string.h>
29#include <linux/ctype.h>
30#include <linux/spinlock.h>
31#include <scsi/scsi.h>
32#include <scsi/scsi_cmnd.h>
33
34#include <target/target_core_base.h>
35#include <target/target_core_device.h>
36#include <target/target_core_transport.h>
37#include <target/target_core_fabric_lib.h>
38#include <target/target_core_fabric_ops.h>
39#include <target/target_core_configfs.h>
40
41#include "target_core_hba.h"
42#include "target_core_pr.h"
43
44/*
45 * Handlers for Serial Attached SCSI (SAS)
46 */
47u8 sas_get_fabric_proto_ident(struct se_portal_group *se_tpg)
48{
49 /*
50 * Return a SAS Serial SCSI Protocol identifier for loopback operations
51 * This is defined in section 7.5.1 Table 362 in spc4r17
52 */
53 return 0x6;
54}
55EXPORT_SYMBOL(sas_get_fabric_proto_ident);
56
57u32 sas_get_pr_transport_id(
58 struct se_portal_group *se_tpg,
59 struct se_node_acl *se_nacl,
60 struct t10_pr_registration *pr_reg,
61 int *format_code,
62 unsigned char *buf)
63{
64 unsigned char binary, *ptr;
65 int i;
66 u32 off = 4;
67 /*
68 * Set PROTOCOL IDENTIFIER to 6h for SAS
69 */
70 buf[0] = 0x06;
71 /*
72 * From spc4r17, 7.5.4.7 TransportID for initiator ports using SCSI
73 * over SAS Serial SCSI Protocol
74 */
75 ptr = &se_nacl->initiatorname[4]; /* Skip over 'naa. prefix */
76
77 for (i = 0; i < 16; i += 2) {
78 binary = transport_asciihex_to_binaryhex(&ptr[i]);
79 buf[off++] = binary;
80 }
81 /*
82 * The SAS Transport ID is a hardcoded 24-byte length
83 */
84 return 24;
85}
86EXPORT_SYMBOL(sas_get_pr_transport_id);
87
88u32 sas_get_pr_transport_id_len(
89 struct se_portal_group *se_tpg,
90 struct se_node_acl *se_nacl,
91 struct t10_pr_registration *pr_reg,
92 int *format_code)
93{
94 *format_code = 0;
95 /*
96 * From spc4r17, 7.5.4.7 TransportID for initiator ports using SCSI
97 * over SAS Serial SCSI Protocol
98 *
99 * The SAS Transport ID is a hardcoded 24-byte length
100 */
101 return 24;
102}
103EXPORT_SYMBOL(sas_get_pr_transport_id_len);
104
105/*
106 * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above
107 * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations.
108 */
109char *sas_parse_pr_out_transport_id(
110 struct se_portal_group *se_tpg,
111 const char *buf,
112 u32 *out_tid_len,
113 char **port_nexus_ptr)
114{
115 /*
116 * Assume the FORMAT CODE 00b from spc4r17, 7.5.4.7 TransportID
117 * for initiator ports using SCSI over SAS Serial SCSI Protocol
118 *
119 * The TransportID for a SAS Initiator Port is of fixed size of
120 * 24 bytes, and SAS does not contain a I_T nexus identifier,
121 * so we return the **port_nexus_ptr set to NULL.
122 */
123 *port_nexus_ptr = NULL;
124 *out_tid_len = 24;
125
126 return (char *)&buf[4];
127}
128EXPORT_SYMBOL(sas_parse_pr_out_transport_id);
129
130/*
131 * Handlers for Fibre Channel Protocol (FCP)
132 */
133u8 fc_get_fabric_proto_ident(struct se_portal_group *se_tpg)
134{
135 return 0x0; /* 0 = fcp-2 per SPC4 section 7.5.1 */
136}
137EXPORT_SYMBOL(fc_get_fabric_proto_ident);
138
139u32 fc_get_pr_transport_id_len(
140 struct se_portal_group *se_tpg,
141 struct se_node_acl *se_nacl,
142 struct t10_pr_registration *pr_reg,
143 int *format_code)
144{
145 *format_code = 0;
146 /*
147 * The FC Transport ID is a hardcoded 24-byte length
148 */
149 return 24;
150}
151EXPORT_SYMBOL(fc_get_pr_transport_id_len);
152
153u32 fc_get_pr_transport_id(
154 struct se_portal_group *se_tpg,
155 struct se_node_acl *se_nacl,
156 struct t10_pr_registration *pr_reg,
157 int *format_code,
158 unsigned char *buf)
159{
160 unsigned char binary, *ptr;
161 int i;
162 u32 off = 8;
163 /*
164 * PROTOCOL IDENTIFIER is 0h for FCP-2
165 *
166 * From spc4r17, 7.5.4.2 TransportID for initiator ports using
167 * SCSI over Fibre Channel
168 *
169 * We convert the ASCII formatted N Port name into a binary
170 * encoded TransportID.
171 */
172 ptr = &se_nacl->initiatorname[0];
173
174 for (i = 0; i < 24; ) {
175 if (!(strncmp(&ptr[i], ":", 1))) {
176 i++;
177 continue;
178 }
179 binary = transport_asciihex_to_binaryhex(&ptr[i]);
180 buf[off++] = binary;
181 i += 2;
182 }
183 /*
184 * The FC Transport ID is a hardcoded 24-byte length
185 */
186 return 24;
187}
188EXPORT_SYMBOL(fc_get_pr_transport_id);
189
190char *fc_parse_pr_out_transport_id(
191 struct se_portal_group *se_tpg,
192 const char *buf,
193 u32 *out_tid_len,
194 char **port_nexus_ptr)
195{
196 /*
197 * The TransportID for a FC N Port is of fixed size of
198 * 24 bytes, and FC does not contain a I_T nexus identifier,
199 * so we return the **port_nexus_ptr set to NULL.
200 */
201 *port_nexus_ptr = NULL;
202 *out_tid_len = 24;
203
204 return (char *)&buf[8];
205}
206EXPORT_SYMBOL(fc_parse_pr_out_transport_id);
207
208/*
209 * Handlers for Internet Small Computer Systems Interface (iSCSI)
210 */
211
212u8 iscsi_get_fabric_proto_ident(struct se_portal_group *se_tpg)
213{
214 /*
215 * This value is defined for "Internet SCSI (iSCSI)"
216 * in spc4r17 section 7.5.1 Table 362
217 */
218 return 0x5;
219}
220EXPORT_SYMBOL(iscsi_get_fabric_proto_ident);
221
222u32 iscsi_get_pr_transport_id(
223 struct se_portal_group *se_tpg,
224 struct se_node_acl *se_nacl,
225 struct t10_pr_registration *pr_reg,
226 int *format_code,
227 unsigned char *buf)
228{
229 u32 off = 4, padding = 0;
230 u16 len = 0;
231
232 spin_lock_irq(&se_nacl->nacl_sess_lock);
233 /*
234 * Set PROTOCOL IDENTIFIER to 5h for iSCSI
235 */
236 buf[0] = 0x05;
237 /*
238 * From spc4r17 Section 7.5.4.6: TransportID for initiator
239 * ports using SCSI over iSCSI.
240 *
241 * The null-terminated, null-padded (see 4.4.2) ISCSI NAME field
242 * shall contain the iSCSI name of an iSCSI initiator node (see
243 * RFC 3720). The first ISCSI NAME field byte containing an ASCII
244 * null character terminates the ISCSI NAME field without regard for
245 * the specified length of the iSCSI TransportID or the contents of
246 * the ADDITIONAL LENGTH field.
247 */
248 len = sprintf(&buf[off], "%s", se_nacl->initiatorname);
249 /*
250 * Add Extra byte for NULL terminator
251 */
252 len++;
253 /*
254 * If there is ISID present with the registration and *format code == 1
255 * 1, use iSCSI Initiator port TransportID format.
256 *
257 * Otherwise use iSCSI Initiator device TransportID format that
258 * does not contain the ASCII encoded iSCSI Initiator iSID value
259 * provied by the iSCSi Initiator during the iSCSI login process.
260 */
261 if ((*format_code == 1) && (pr_reg->isid_present_at_reg)) {
262 /*
263 * Set FORMAT CODE 01b for iSCSI Initiator port TransportID
264 * format.
265 */
266 buf[0] |= 0x40;
267 /*
268 * From spc4r17 Section 7.5.4.6: TransportID for initiator
269 * ports using SCSI over iSCSI. Table 390
270 *
271 * The SEPARATOR field shall contain the five ASCII
272 * characters ",i,0x".
273 *
274 * The null-terminated, null-padded ISCSI INITIATOR SESSION ID
275 * field shall contain the iSCSI initiator session identifier
276 * (see RFC 3720) in the form of ASCII characters that are the
277 * hexadecimal digits converted from the binary iSCSI initiator
278 * session identifier value. The first ISCSI INITIATOR SESSION
279 * ID field byte containing an ASCII null character
280 */
281 buf[off+len] = 0x2c; off++; /* ASCII Character: "," */
282 buf[off+len] = 0x69; off++; /* ASCII Character: "i" */
283 buf[off+len] = 0x2c; off++; /* ASCII Character: "," */
284 buf[off+len] = 0x30; off++; /* ASCII Character: "0" */
285 buf[off+len] = 0x78; off++; /* ASCII Character: "x" */
286 len += 5;
287 buf[off+len] = pr_reg->pr_reg_isid[0]; off++;
288 buf[off+len] = pr_reg->pr_reg_isid[1]; off++;
289 buf[off+len] = pr_reg->pr_reg_isid[2]; off++;
290 buf[off+len] = pr_reg->pr_reg_isid[3]; off++;
291 buf[off+len] = pr_reg->pr_reg_isid[4]; off++;
292 buf[off+len] = pr_reg->pr_reg_isid[5]; off++;
293 buf[off+len] = '\0'; off++;
294 len += 7;
295 }
296 spin_unlock_irq(&se_nacl->nacl_sess_lock);
297 /*
298 * The ADDITIONAL LENGTH field specifies the number of bytes that follow
299 * in the TransportID. The additional length shall be at least 20 and
300 * shall be a multiple of four.
301 */
302 padding = ((-len) & 3);
303 if (padding != 0)
304 len += padding;
305
306 buf[2] = ((len >> 8) & 0xff);
307 buf[3] = (len & 0xff);
308 /*
309 * Increment value for total payload + header length for
310 * full status descriptor
311 */
312 len += 4;
313
314 return len;
315}
316EXPORT_SYMBOL(iscsi_get_pr_transport_id);
317
318u32 iscsi_get_pr_transport_id_len(
319 struct se_portal_group *se_tpg,
320 struct se_node_acl *se_nacl,
321 struct t10_pr_registration *pr_reg,
322 int *format_code)
323{
324 u32 len = 0, padding = 0;
325
326 spin_lock_irq(&se_nacl->nacl_sess_lock);
327 len = strlen(se_nacl->initiatorname);
328 /*
329 * Add extra byte for NULL terminator
330 */
331 len++;
332 /*
333 * If there is ISID present with the registration, use format code:
334 * 01b: iSCSI Initiator port TransportID format
335 *
336 * If there is not an active iSCSI session, use format code:
337 * 00b: iSCSI Initiator device TransportID format
338 */
339 if (pr_reg->isid_present_at_reg) {
340 len += 5; /* For ",i,0x" ASCII seperator */
341 len += 7; /* For iSCSI Initiator Session ID + Null terminator */
342 *format_code = 1;
343 } else
344 *format_code = 0;
345 spin_unlock_irq(&se_nacl->nacl_sess_lock);
346 /*
347 * The ADDITIONAL LENGTH field specifies the number of bytes that follow
348 * in the TransportID. The additional length shall be at least 20 and
349 * shall be a multiple of four.
350 */
351 padding = ((-len) & 3);
352 if (padding != 0)
353 len += padding;
354 /*
355 * Increment value for total payload + header length for
356 * full status descriptor
357 */
358 len += 4;
359
360 return len;
361}
362EXPORT_SYMBOL(iscsi_get_pr_transport_id_len);
363
364char *iscsi_parse_pr_out_transport_id(
365 struct se_portal_group *se_tpg,
366 const char *buf,
367 u32 *out_tid_len,
368 char **port_nexus_ptr)
369{
370 char *p;
371 u32 tid_len, padding;
372 int i;
373 u16 add_len;
374 u8 format_code = (buf[0] & 0xc0);
375 /*
376 * Check for FORMAT CODE 00b or 01b from spc4r17, section 7.5.4.6:
377 *
378 * TransportID for initiator ports using SCSI over iSCSI,
379 * from Table 388 -- iSCSI TransportID formats.
380 *
381 * 00b Initiator port is identified using the world wide unique
382 * SCSI device name of the iSCSI initiator
383 * device containing the initiator port (see table 389).
384 * 01b Initiator port is identified using the world wide unique
385 * initiator port identifier (see table 390).10b to 11b
386 * Reserved
387 */
388 if ((format_code != 0x00) && (format_code != 0x40)) {
389 printk(KERN_ERR "Illegal format code: 0x%02x for iSCSI"
390 " Initiator Transport ID\n", format_code);
391 return NULL;
392 }
393 /*
394 * If the caller wants the TransportID Length, we set that value for the
395 * entire iSCSI Tarnsport ID now.
396 */
397 if (out_tid_len != NULL) {
398 add_len = ((buf[2] >> 8) & 0xff);
399 add_len |= (buf[3] & 0xff);
400
401 tid_len = strlen((char *)&buf[4]);
402 tid_len += 4; /* Add four bytes for iSCSI Transport ID header */
403 tid_len += 1; /* Add one byte for NULL terminator */
404 padding = ((-tid_len) & 3);
405 if (padding != 0)
406 tid_len += padding;
407
408 if ((add_len + 4) != tid_len) {
409 printk(KERN_INFO "LIO-Target Extracted add_len: %hu "
410 "does not match calculated tid_len: %u,"
411 " using tid_len instead\n", add_len+4, tid_len);
412 *out_tid_len = tid_len;
413 } else
414 *out_tid_len = (add_len + 4);
415 }
416 /*
417 * Check for ',i,0x' seperator between iSCSI Name and iSCSI Initiator
418 * Session ID as defined in Table 390 - iSCSI initiator port TransportID
419 * format.
420 */
421 if (format_code == 0x40) {
422 p = strstr((char *)&buf[4], ",i,0x");
423 if (!(p)) {
424 printk(KERN_ERR "Unable to locate \",i,0x\" seperator"
425 " for Initiator port identifier: %s\n",
426 (char *)&buf[4]);
427 return NULL;
428 }
429 *p = '\0'; /* Terminate iSCSI Name */
430 p += 5; /* Skip over ",i,0x" seperator */
431
432 *port_nexus_ptr = p;
433 /*
434 * Go ahead and do the lower case conversion of the received
435 * 12 ASCII characters representing the ISID in the TransportID
436 * for comparison against the running iSCSI session's ISID from
437 * iscsi_target.c:lio_sess_get_initiator_sid()
438 */
439 for (i = 0; i < 12; i++) {
440 if (isdigit(*p)) {
441 p++;
442 continue;
443 }
444 *p = tolower(*p);
445 p++;
446 }
447 }
448
449 return (char *)&buf[4];
450}
451EXPORT_SYMBOL(iscsi_parse_pr_out_transport_id);
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
new file mode 100644
index 000000000000..150c4305f385
--- /dev/null
+++ b/drivers/target/target_core_file.c
@@ -0,0 +1,703 @@
1/*******************************************************************************
2 * Filename: target_core_file.c
3 *
4 * This file contains the Storage Engine <-> FILEIO transport specific functions
5 *
6 * Copyright (c) 2005 PyX Technologies, Inc.
7 * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved.
8 * Copyright (c) 2007-2010 Rising Tide Systems
9 * Copyright (c) 2008-2010 Linux-iSCSI.org
10 *
11 * Nicholas A. Bellinger <nab@kernel.org>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 *
27 ******************************************************************************/
28
29#include <linux/version.h>
30#include <linux/string.h>
31#include <linux/parser.h>
32#include <linux/timer.h>
33#include <linux/blkdev.h>
34#include <linux/slab.h>
35#include <linux/spinlock.h>
36#include <scsi/scsi.h>
37#include <scsi/scsi_host.h>
38
39#include <target/target_core_base.h>
40#include <target/target_core_device.h>
41#include <target/target_core_transport.h>
42
43#include "target_core_file.h"
44
45#if 1
46#define DEBUG_FD_CACHE(x...) printk(x)
47#else
48#define DEBUG_FD_CACHE(x...)
49#endif
50
51#if 1
52#define DEBUG_FD_FUA(x...) printk(x)
53#else
54#define DEBUG_FD_FUA(x...)
55#endif
56
57static struct se_subsystem_api fileio_template;
58
59/* fd_attach_hba(): (Part of se_subsystem_api_t template)
60 *
61 *
62 */
63static int fd_attach_hba(struct se_hba *hba, u32 host_id)
64{
65 struct fd_host *fd_host;
66
67 fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL);
68 if (!(fd_host)) {
69 printk(KERN_ERR "Unable to allocate memory for struct fd_host\n");
70 return -1;
71 }
72
73 fd_host->fd_host_id = host_id;
74
75 atomic_set(&hba->left_queue_depth, FD_HBA_QUEUE_DEPTH);
76 atomic_set(&hba->max_queue_depth, FD_HBA_QUEUE_DEPTH);
77 hba->hba_ptr = (void *) fd_host;
78
79 printk(KERN_INFO "CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
80 " Target Core Stack %s\n", hba->hba_id, FD_VERSION,
81 TARGET_CORE_MOD_VERSION);
82 printk(KERN_INFO "CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
83 " Target Core with TCQ Depth: %d MaxSectors: %u\n",
84 hba->hba_id, fd_host->fd_host_id,
85 atomic_read(&hba->max_queue_depth), FD_MAX_SECTORS);
86
87 return 0;
88}
89
90static void fd_detach_hba(struct se_hba *hba)
91{
92 struct fd_host *fd_host = hba->hba_ptr;
93
94 printk(KERN_INFO "CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic"
95 " Target Core\n", hba->hba_id, fd_host->fd_host_id);
96
97 kfree(fd_host);
98 hba->hba_ptr = NULL;
99}
100
101static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
102{
103 struct fd_dev *fd_dev;
104 struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
105
106 fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL);
107 if (!(fd_dev)) {
108 printk(KERN_ERR "Unable to allocate memory for struct fd_dev\n");
109 return NULL;
110 }
111
112 fd_dev->fd_host = fd_host;
113
114 printk(KERN_INFO "FILEIO: Allocated fd_dev for %p\n", name);
115
116 return fd_dev;
117}
118
119/* fd_create_virtdevice(): (Part of se_subsystem_api_t template)
120 *
121 *
122 */
123static struct se_device *fd_create_virtdevice(
124 struct se_hba *hba,
125 struct se_subsystem_dev *se_dev,
126 void *p)
127{
128 char *dev_p = NULL;
129 struct se_device *dev;
130 struct se_dev_limits dev_limits;
131 struct queue_limits *limits;
132 struct fd_dev *fd_dev = (struct fd_dev *) p;
133 struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
134 mm_segment_t old_fs;
135 struct file *file;
136 struct inode *inode = NULL;
137 int dev_flags = 0, flags, ret = -EINVAL;
138
139 memset(&dev_limits, 0, sizeof(struct se_dev_limits));
140
141 old_fs = get_fs();
142 set_fs(get_ds());
143 dev_p = getname(fd_dev->fd_dev_name);
144 set_fs(old_fs);
145
146 if (IS_ERR(dev_p)) {
147 printk(KERN_ERR "getname(%s) failed: %lu\n",
148 fd_dev->fd_dev_name, IS_ERR(dev_p));
149 ret = PTR_ERR(dev_p);
150 goto fail;
151 }
152#if 0
153 if (di->no_create_file)
154 flags = O_RDWR | O_LARGEFILE;
155 else
156 flags = O_RDWR | O_CREAT | O_LARGEFILE;
157#else
158 flags = O_RDWR | O_CREAT | O_LARGEFILE;
159#endif
160/* flags |= O_DIRECT; */
161 /*
162 * If fd_buffered_io=1 has not been set explicitly (the default),
163 * use O_SYNC to force FILEIO writes to disk.
164 */
165 if (!(fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO))
166 flags |= O_SYNC;
167
168 file = filp_open(dev_p, flags, 0600);
169 if (IS_ERR(file)) {
170 printk(KERN_ERR "filp_open(%s) failed\n", dev_p);
171 ret = PTR_ERR(file);
172 goto fail;
173 }
174 if (!file || !file->f_dentry) {
175 printk(KERN_ERR "filp_open(%s) failed\n", dev_p);
176 goto fail;
177 }
178 fd_dev->fd_file = file;
179 /*
180 * If using a block backend with this struct file, we extract
181 * fd_dev->fd_[block,dev]_size from struct block_device.
182 *
183 * Otherwise, we use the passed fd_size= from configfs
184 */
185 inode = file->f_mapping->host;
186 if (S_ISBLK(inode->i_mode)) {
187 struct request_queue *q;
188 /*
189 * Setup the local scope queue_limits from struct request_queue->limits
190 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
191 */
192 q = bdev_get_queue(inode->i_bdev);
193 limits = &dev_limits.limits;
194 limits->logical_block_size = bdev_logical_block_size(inode->i_bdev);
195 limits->max_hw_sectors = queue_max_hw_sectors(q);
196 limits->max_sectors = queue_max_sectors(q);
197 /*
198 * Determine the number of bytes from i_size_read() minus
199 * one (1) logical sector from underlying struct block_device
200 */
201 fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
202 fd_dev->fd_dev_size = (i_size_read(file->f_mapping->host) -
203 fd_dev->fd_block_size);
204
205 printk(KERN_INFO "FILEIO: Using size: %llu bytes from struct"
206 " block_device blocks: %llu logical_block_size: %d\n",
207 fd_dev->fd_dev_size,
208 div_u64(fd_dev->fd_dev_size, fd_dev->fd_block_size),
209 fd_dev->fd_block_size);
210 } else {
211 if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
212 printk(KERN_ERR "FILEIO: Missing fd_dev_size="
213 " parameter, and no backing struct"
214 " block_device\n");
215 goto fail;
216 }
217
218 limits = &dev_limits.limits;
219 limits->logical_block_size = FD_BLOCKSIZE;
220 limits->max_hw_sectors = FD_MAX_SECTORS;
221 limits->max_sectors = FD_MAX_SECTORS;
222 fd_dev->fd_block_size = FD_BLOCKSIZE;
223 }
224
225 dev_limits.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
226 dev_limits.queue_depth = FD_DEVICE_QUEUE_DEPTH;
227
228 dev = transport_add_device_to_core_hba(hba, &fileio_template,
229 se_dev, dev_flags, (void *)fd_dev,
230 &dev_limits, "FILEIO", FD_VERSION);
231 if (!(dev))
232 goto fail;
233
234 fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
235 fd_dev->fd_queue_depth = dev->queue_depth;
236
237 printk(KERN_INFO "CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
238 " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
239 fd_dev->fd_dev_name, fd_dev->fd_dev_size);
240
241 putname(dev_p);
242 return dev;
243fail:
244 if (fd_dev->fd_file) {
245 filp_close(fd_dev->fd_file, NULL);
246 fd_dev->fd_file = NULL;
247 }
248 putname(dev_p);
249 return ERR_PTR(ret);
250}
251
252/* fd_free_device(): (Part of se_subsystem_api_t template)
253 *
254 *
255 */
256static void fd_free_device(void *p)
257{
258 struct fd_dev *fd_dev = (struct fd_dev *) p;
259
260 if (fd_dev->fd_file) {
261 filp_close(fd_dev->fd_file, NULL);
262 fd_dev->fd_file = NULL;
263 }
264
265 kfree(fd_dev);
266}
267
268static inline struct fd_request *FILE_REQ(struct se_task *task)
269{
270 return container_of(task, struct fd_request, fd_task);
271}
272
273
274static struct se_task *
275fd_alloc_task(struct se_cmd *cmd)
276{
277 struct fd_request *fd_req;
278
279 fd_req = kzalloc(sizeof(struct fd_request), GFP_KERNEL);
280 if (!(fd_req)) {
281 printk(KERN_ERR "Unable to allocate struct fd_request\n");
282 return NULL;
283 }
284
285 fd_req->fd_dev = SE_DEV(cmd)->dev_ptr;
286
287 return &fd_req->fd_task;
288}
289
290static int fd_do_readv(struct se_task *task)
291{
292 struct fd_request *req = FILE_REQ(task);
293 struct file *fd = req->fd_dev->fd_file;
294 struct scatterlist *sg = task->task_sg;
295 struct iovec *iov;
296 mm_segment_t old_fs;
297 loff_t pos = (task->task_lba * DEV_ATTRIB(task->se_dev)->block_size);
298 int ret = 0, i;
299
300 iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL);
301 if (!(iov)) {
302 printk(KERN_ERR "Unable to allocate fd_do_readv iov[]\n");
303 return -1;
304 }
305
306 for (i = 0; i < task->task_sg_num; i++) {
307 iov[i].iov_len = sg[i].length;
308 iov[i].iov_base = sg_virt(&sg[i]);
309 }
310
311 old_fs = get_fs();
312 set_fs(get_ds());
313 ret = vfs_readv(fd, &iov[0], task->task_sg_num, &pos);
314 set_fs(old_fs);
315
316 kfree(iov);
317 /*
318 * Return zeros and GOOD status even if the READ did not return
319 * the expected virt_size for struct file w/o a backing struct
320 * block_device.
321 */
322 if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
323 if (ret < 0 || ret != task->task_size) {
324 printk(KERN_ERR "vfs_readv() returned %d,"
325 " expecting %d for S_ISBLK\n", ret,
326 (int)task->task_size);
327 return -1;
328 }
329 } else {
330 if (ret < 0) {
331 printk(KERN_ERR "vfs_readv() returned %d for non"
332 " S_ISBLK\n", ret);
333 return -1;
334 }
335 }
336
337 return 1;
338}
339
340static int fd_do_writev(struct se_task *task)
341{
342 struct fd_request *req = FILE_REQ(task);
343 struct file *fd = req->fd_dev->fd_file;
344 struct scatterlist *sg = task->task_sg;
345 struct iovec *iov;
346 mm_segment_t old_fs;
347 loff_t pos = (task->task_lba * DEV_ATTRIB(task->se_dev)->block_size);
348 int ret, i = 0;
349
350 iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL);
351 if (!(iov)) {
352 printk(KERN_ERR "Unable to allocate fd_do_writev iov[]\n");
353 return -1;
354 }
355
356 for (i = 0; i < task->task_sg_num; i++) {
357 iov[i].iov_len = sg[i].length;
358 iov[i].iov_base = sg_virt(&sg[i]);
359 }
360
361 old_fs = get_fs();
362 set_fs(get_ds());
363 ret = vfs_writev(fd, &iov[0], task->task_sg_num, &pos);
364 set_fs(old_fs);
365
366 kfree(iov);
367
368 if (ret < 0 || ret != task->task_size) {
369 printk(KERN_ERR "vfs_writev() returned %d\n", ret);
370 return -1;
371 }
372
373 return 1;
374}
375
376static void fd_emulate_sync_cache(struct se_task *task)
377{
378 struct se_cmd *cmd = TASK_CMD(task);
379 struct se_device *dev = cmd->se_dev;
380 struct fd_dev *fd_dev = dev->dev_ptr;
381 int immed = (cmd->t_task->t_task_cdb[1] & 0x2);
382 loff_t start, end;
383 int ret;
384
385 /*
386 * If the Immediate bit is set, queue up the GOOD response
387 * for this SYNCHRONIZE_CACHE op
388 */
389 if (immed)
390 transport_complete_sync_cache(cmd, 1);
391
392 /*
393 * Determine if we will be flushing the entire device.
394 */
395 if (cmd->t_task->t_task_lba == 0 && cmd->data_length == 0) {
396 start = 0;
397 end = LLONG_MAX;
398 } else {
399 start = cmd->t_task->t_task_lba * DEV_ATTRIB(dev)->block_size;
400 if (cmd->data_length)
401 end = start + cmd->data_length;
402 else
403 end = LLONG_MAX;
404 }
405
406 ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
407 if (ret != 0)
408 printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret);
409
410 if (!immed)
411 transport_complete_sync_cache(cmd, ret == 0);
412}
413
414/*
415 * Tell TCM Core that we are capable of WriteCache emulation for
416 * an underlying struct se_device.
417 */
418static int fd_emulated_write_cache(struct se_device *dev)
419{
420 return 1;
421}
422
423static int fd_emulated_dpo(struct se_device *dev)
424{
425 return 0;
426}
427/*
428 * Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs
429 * for TYPE_DISK.
430 */
431static int fd_emulated_fua_write(struct se_device *dev)
432{
433 return 1;
434}
435
436static int fd_emulated_fua_read(struct se_device *dev)
437{
438 return 0;
439}
440
441/*
442 * WRITE Force Unit Access (FUA) emulation on a per struct se_task
443 * LBA range basis..
444 */
445static void fd_emulate_write_fua(struct se_cmd *cmd, struct se_task *task)
446{
447 struct se_device *dev = cmd->se_dev;
448 struct fd_dev *fd_dev = dev->dev_ptr;
449 loff_t start = task->task_lba * DEV_ATTRIB(dev)->block_size;
450 loff_t end = start + task->task_size;
451 int ret;
452
453 DEBUG_FD_CACHE("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n",
454 task->task_lba, task->task_size);
455
456 ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
457 if (ret != 0)
458 printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret);
459}
460
461static int fd_do_task(struct se_task *task)
462{
463 struct se_cmd *cmd = task->task_se_cmd;
464 struct se_device *dev = cmd->se_dev;
465 int ret = 0;
466
467 /*
468 * Call vectorized fileio functions to map struct scatterlist
469 * physical memory addresses to struct iovec virtual memory.
470 */
471 if (task->task_data_direction == DMA_FROM_DEVICE) {
472 ret = fd_do_readv(task);
473 } else {
474 ret = fd_do_writev(task);
475
476 if (ret > 0 &&
477 DEV_ATTRIB(dev)->emulate_write_cache > 0 &&
478 DEV_ATTRIB(dev)->emulate_fua_write > 0 &&
479 T_TASK(cmd)->t_tasks_fua) {
480 /*
481 * We might need to be a bit smarter here
482 * and return some sense data to let the initiator
483 * know the FUA WRITE cache sync failed..?
484 */
485 fd_emulate_write_fua(cmd, task);
486 }
487
488 }
489
490 if (ret < 0)
491 return ret;
492 if (ret) {
493 task->task_scsi_status = GOOD;
494 transport_complete_task(task, 1);
495 }
496 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
497}
498
499/* fd_free_task(): (Part of se_subsystem_api_t template)
500 *
501 *
502 */
503static void fd_free_task(struct se_task *task)
504{
505 struct fd_request *req = FILE_REQ(task);
506
507 kfree(req);
508}
509
510enum {
511 Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err
512};
513
514static match_table_t tokens = {
515 {Opt_fd_dev_name, "fd_dev_name=%s"},
516 {Opt_fd_dev_size, "fd_dev_size=%s"},
517 {Opt_fd_buffered_io, "fd_buffered_io=%d"},
518 {Opt_err, NULL}
519};
520
521static ssize_t fd_set_configfs_dev_params(
522 struct se_hba *hba,
523 struct se_subsystem_dev *se_dev,
524 const char *page, ssize_t count)
525{
526 struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
527 char *orig, *ptr, *arg_p, *opts;
528 substring_t args[MAX_OPT_ARGS];
529 int ret = 0, arg, token;
530
531 opts = kstrdup(page, GFP_KERNEL);
532 if (!opts)
533 return -ENOMEM;
534
535 orig = opts;
536
537 while ((ptr = strsep(&opts, ",")) != NULL) {
538 if (!*ptr)
539 continue;
540
541 token = match_token(ptr, tokens, args);
542 switch (token) {
543 case Opt_fd_dev_name:
544 arg_p = match_strdup(&args[0]);
545 if (!arg_p) {
546 ret = -ENOMEM;
547 break;
548 }
549 snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME,
550 "%s", arg_p);
551 kfree(arg_p);
552 printk(KERN_INFO "FILEIO: Referencing Path: %s\n",
553 fd_dev->fd_dev_name);
554 fd_dev->fbd_flags |= FBDF_HAS_PATH;
555 break;
556 case Opt_fd_dev_size:
557 arg_p = match_strdup(&args[0]);
558 if (!arg_p) {
559 ret = -ENOMEM;
560 break;
561 }
562 ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size);
563 kfree(arg_p);
564 if (ret < 0) {
565 printk(KERN_ERR "strict_strtoull() failed for"
566 " fd_dev_size=\n");
567 goto out;
568 }
569 printk(KERN_INFO "FILEIO: Referencing Size: %llu"
570 " bytes\n", fd_dev->fd_dev_size);
571 fd_dev->fbd_flags |= FBDF_HAS_SIZE;
572 break;
573 case Opt_fd_buffered_io:
574 match_int(args, &arg);
575 if (arg != 1) {
576 printk(KERN_ERR "bogus fd_buffered_io=%d value\n", arg);
577 ret = -EINVAL;
578 goto out;
579 }
580
581 printk(KERN_INFO "FILEIO: Using buffered I/O"
582 " operations for struct fd_dev\n");
583
584 fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO;
585 break;
586 default:
587 break;
588 }
589 }
590
591out:
592 kfree(orig);
593 return (!ret) ? count : ret;
594}
595
596static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
597{
598 struct fd_dev *fd_dev = (struct fd_dev *) se_dev->se_dev_su_ptr;
599
600 if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
601 printk(KERN_ERR "Missing fd_dev_name=\n");
602 return -1;
603 }
604
605 return 0;
606}
607
608static ssize_t fd_show_configfs_dev_params(
609 struct se_hba *hba,
610 struct se_subsystem_dev *se_dev,
611 char *b)
612{
613 struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
614 ssize_t bl = 0;
615
616 bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
617 bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s\n",
618 fd_dev->fd_dev_name, fd_dev->fd_dev_size,
619 (fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO) ?
620 "Buffered" : "Synchronous");
621 return bl;
622}
623
624/* fd_get_cdb(): (Part of se_subsystem_api_t template)
625 *
626 *
627 */
628static unsigned char *fd_get_cdb(struct se_task *task)
629{
630 struct fd_request *req = FILE_REQ(task);
631
632 return req->fd_scsi_cdb;
633}
634
635/* fd_get_device_rev(): (Part of se_subsystem_api_t template)
636 *
637 *
638 */
639static u32 fd_get_device_rev(struct se_device *dev)
640{
641 return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
642}
643
644/* fd_get_device_type(): (Part of se_subsystem_api_t template)
645 *
646 *
647 */
648static u32 fd_get_device_type(struct se_device *dev)
649{
650 return TYPE_DISK;
651}
652
653static sector_t fd_get_blocks(struct se_device *dev)
654{
655 struct fd_dev *fd_dev = dev->dev_ptr;
656 unsigned long long blocks_long = div_u64(fd_dev->fd_dev_size,
657 DEV_ATTRIB(dev)->block_size);
658
659 return blocks_long;
660}
661
662static struct se_subsystem_api fileio_template = {
663 .name = "fileio",
664 .owner = THIS_MODULE,
665 .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
666 .attach_hba = fd_attach_hba,
667 .detach_hba = fd_detach_hba,
668 .allocate_virtdevice = fd_allocate_virtdevice,
669 .create_virtdevice = fd_create_virtdevice,
670 .free_device = fd_free_device,
671 .dpo_emulated = fd_emulated_dpo,
672 .fua_write_emulated = fd_emulated_fua_write,
673 .fua_read_emulated = fd_emulated_fua_read,
674 .write_cache_emulated = fd_emulated_write_cache,
675 .alloc_task = fd_alloc_task,
676 .do_task = fd_do_task,
677 .do_sync_cache = fd_emulate_sync_cache,
678 .free_task = fd_free_task,
679 .check_configfs_dev_params = fd_check_configfs_dev_params,
680 .set_configfs_dev_params = fd_set_configfs_dev_params,
681 .show_configfs_dev_params = fd_show_configfs_dev_params,
682 .get_cdb = fd_get_cdb,
683 .get_device_rev = fd_get_device_rev,
684 .get_device_type = fd_get_device_type,
685 .get_blocks = fd_get_blocks,
686};
687
688static int __init fileio_module_init(void)
689{
690 return transport_subsystem_register(&fileio_template);
691}
692
693static void fileio_module_exit(void)
694{
695 transport_subsystem_release(&fileio_template);
696}
697
698MODULE_DESCRIPTION("TCM FILEIO subsystem plugin");
699MODULE_AUTHOR("nab@Linux-iSCSI.org");
700MODULE_LICENSE("GPL");
701
702module_init(fileio_module_init);
703module_exit(fileio_module_exit);
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
new file mode 100644
index 000000000000..ef4de2b4bd46
--- /dev/null
+++ b/drivers/target/target_core_file.h
@@ -0,0 +1,50 @@
1#ifndef TARGET_CORE_FILE_H
2#define TARGET_CORE_FILE_H
3
4#define FD_VERSION "4.0"
5
6#define FD_MAX_DEV_NAME 256
7/* Maximum queuedepth for the FILEIO HBA */
8#define FD_HBA_QUEUE_DEPTH 256
9#define FD_DEVICE_QUEUE_DEPTH 32
10#define FD_MAX_DEVICE_QUEUE_DEPTH 128
11#define FD_BLOCKSIZE 512
12#define FD_MAX_SECTORS 1024
13
14#define RRF_EMULATE_CDB 0x01
15#define RRF_GOT_LBA 0x02
16
17struct fd_request {
18 struct se_task fd_task;
19 /* SCSI CDB from iSCSI Command PDU */
20 unsigned char fd_scsi_cdb[TCM_MAX_COMMAND_SIZE];
21 /* FILEIO device */
22 struct fd_dev *fd_dev;
23} ____cacheline_aligned;
24
25#define FBDF_HAS_PATH 0x01
26#define FBDF_HAS_SIZE 0x02
27#define FDBD_USE_BUFFERED_IO 0x04
28
29struct fd_dev {
30 u32 fbd_flags;
31 unsigned char fd_dev_name[FD_MAX_DEV_NAME];
32 /* Unique Ramdisk Device ID in Ramdisk HBA */
33 u32 fd_dev_id;
34 /* Number of SG tables in sg_table_array */
35 u32 fd_table_count;
36 u32 fd_queue_depth;
37 u32 fd_block_size;
38 unsigned long long fd_dev_size;
39 struct file *fd_file;
40 /* FILEIO HBA device is connected to */
41 struct fd_host *fd_host;
42} ____cacheline_aligned;
43
44struct fd_host {
45 u32 fd_host_dev_id_count;
46 /* Unique FILEIO Host ID */
47 u32 fd_host_id;
48} ____cacheline_aligned;
49
50#endif /* TARGET_CORE_FILE_H */
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c
new file mode 100644
index 000000000000..0b8f8da89019
--- /dev/null
+++ b/drivers/target/target_core_hba.c
@@ -0,0 +1,172 @@
1/*******************************************************************************
2 * Filename: target_core_hba.c
3 *
4 * This file copntains the iSCSI HBA Transport related functions.
5 *
6 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
7 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8 * Copyright (c) 2007-2010 Rising Tide Systems
9 * Copyright (c) 2008-2010 Linux-iSCSI.org
10 *
11 * Nicholas A. Bellinger <nab@kernel.org>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 *
27 ******************************************************************************/
28
29#include <linux/net.h>
30#include <linux/string.h>
31#include <linux/timer.h>
32#include <linux/slab.h>
33#include <linux/spinlock.h>
34#include <linux/in.h>
35#include <net/sock.h>
36#include <net/tcp.h>
37
38#include <target/target_core_base.h>
39#include <target/target_core_device.h>
40#include <target/target_core_tpg.h>
41#include <target/target_core_transport.h>
42
43#include "target_core_hba.h"
44
45static LIST_HEAD(subsystem_list);
46static DEFINE_MUTEX(subsystem_mutex);
47
48int transport_subsystem_register(struct se_subsystem_api *sub_api)
49{
50 struct se_subsystem_api *s;
51
52 INIT_LIST_HEAD(&sub_api->sub_api_list);
53
54 mutex_lock(&subsystem_mutex);
55 list_for_each_entry(s, &subsystem_list, sub_api_list) {
56 if (!(strcmp(s->name, sub_api->name))) {
57 printk(KERN_ERR "%p is already registered with"
58 " duplicate name %s, unable to process"
59 " request\n", s, s->name);
60 mutex_unlock(&subsystem_mutex);
61 return -EEXIST;
62 }
63 }
64 list_add_tail(&sub_api->sub_api_list, &subsystem_list);
65 mutex_unlock(&subsystem_mutex);
66
67 printk(KERN_INFO "TCM: Registered subsystem plugin: %s struct module:"
68 " %p\n", sub_api->name, sub_api->owner);
69 return 0;
70}
71EXPORT_SYMBOL(transport_subsystem_register);
72
73void transport_subsystem_release(struct se_subsystem_api *sub_api)
74{
75 mutex_lock(&subsystem_mutex);
76 list_del(&sub_api->sub_api_list);
77 mutex_unlock(&subsystem_mutex);
78}
79EXPORT_SYMBOL(transport_subsystem_release);
80
81static struct se_subsystem_api *core_get_backend(const char *sub_name)
82{
83 struct se_subsystem_api *s;
84
85 mutex_lock(&subsystem_mutex);
86 list_for_each_entry(s, &subsystem_list, sub_api_list) {
87 if (!strcmp(s->name, sub_name))
88 goto found;
89 }
90 mutex_unlock(&subsystem_mutex);
91 return NULL;
92found:
93 if (s->owner && !try_module_get(s->owner))
94 s = NULL;
95 mutex_unlock(&subsystem_mutex);
96 return s;
97}
98
99struct se_hba *
100core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
101{
102 struct se_hba *hba;
103 int ret = 0;
104
105 hba = kzalloc(sizeof(*hba), GFP_KERNEL);
106 if (!hba) {
107 printk(KERN_ERR "Unable to allocate struct se_hba\n");
108 return ERR_PTR(-ENOMEM);
109 }
110
111 INIT_LIST_HEAD(&hba->hba_dev_list);
112 spin_lock_init(&hba->device_lock);
113 spin_lock_init(&hba->hba_queue_lock);
114 mutex_init(&hba->hba_access_mutex);
115
116 hba->hba_index = scsi_get_new_index(SCSI_INST_INDEX);
117 hba->hba_flags |= hba_flags;
118
119 atomic_set(&hba->max_queue_depth, 0);
120 atomic_set(&hba->left_queue_depth, 0);
121
122 hba->transport = core_get_backend(plugin_name);
123 if (!hba->transport) {
124 ret = -EINVAL;
125 goto out_free_hba;
126 }
127
128 ret = hba->transport->attach_hba(hba, plugin_dep_id);
129 if (ret < 0)
130 goto out_module_put;
131
132 spin_lock(&se_global->hba_lock);
133 hba->hba_id = se_global->g_hba_id_counter++;
134 list_add_tail(&hba->hba_list, &se_global->g_hba_list);
135 spin_unlock(&se_global->hba_lock);
136
137 printk(KERN_INFO "CORE_HBA[%d] - Attached HBA to Generic Target"
138 " Core\n", hba->hba_id);
139
140 return hba;
141
142out_module_put:
143 if (hba->transport->owner)
144 module_put(hba->transport->owner);
145 hba->transport = NULL;
146out_free_hba:
147 kfree(hba);
148 return ERR_PTR(ret);
149}
150
151int
152core_delete_hba(struct se_hba *hba)
153{
154 if (!list_empty(&hba->hba_dev_list))
155 dump_stack();
156
157 hba->transport->detach_hba(hba);
158
159 spin_lock(&se_global->hba_lock);
160 list_del(&hba->hba_list);
161 spin_unlock(&se_global->hba_lock);
162
163 printk(KERN_INFO "CORE_HBA[%d] - Detached HBA from Generic Target"
164 " Core\n", hba->hba_id);
165
166 if (hba->transport->owner)
167 module_put(hba->transport->owner);
168
169 hba->transport = NULL;
170 kfree(hba);
171 return 0;
172}
diff --git a/drivers/target/target_core_hba.h b/drivers/target/target_core_hba.h
new file mode 100644
index 000000000000..bb0fea5f730c
--- /dev/null
+++ b/drivers/target/target_core_hba.h
@@ -0,0 +1,7 @@
1#ifndef TARGET_CORE_HBA_H
2#define TARGET_CORE_HBA_H
3
4extern struct se_hba *core_alloc_hba(const char *, u32, u32);
5extern int core_delete_hba(struct se_hba *);
6
7#endif /* TARGET_CORE_HBA_H */
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
new file mode 100644
index 000000000000..86639004af9e
--- /dev/null
+++ b/drivers/target/target_core_iblock.c
@@ -0,0 +1,814 @@
1/*******************************************************************************
2 * Filename: target_core_iblock.c
3 *
4 * This file contains the Storage Engine <-> Linux BlockIO transport
5 * specific functions.
6 *
7 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
8 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
9 * Copyright (c) 2007-2010 Rising Tide Systems
10 * Copyright (c) 2008-2010 Linux-iSCSI.org
11 *
12 * Nicholas A. Bellinger <nab@kernel.org>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27 *
28 ******************************************************************************/
29
30#include <linux/version.h>
31#include <linux/string.h>
32#include <linux/parser.h>
33#include <linux/timer.h>
34#include <linux/fs.h>
35#include <linux/blkdev.h>
36#include <linux/slab.h>
37#include <linux/spinlock.h>
38#include <linux/bio.h>
39#include <linux/genhd.h>
40#include <linux/file.h>
41#include <scsi/scsi.h>
42#include <scsi/scsi_host.h>
43
44#include <target/target_core_base.h>
45#include <target/target_core_device.h>
46#include <target/target_core_transport.h>
47
48#include "target_core_iblock.h"
49
50#if 0
51#define DEBUG_IBLOCK(x...) printk(x)
52#else
53#define DEBUG_IBLOCK(x...)
54#endif
55
56static struct se_subsystem_api iblock_template;
57
58static void iblock_bio_done(struct bio *, int);
59
60/* iblock_attach_hba(): (Part of se_subsystem_api_t template)
61 *
62 *
63 */
64static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
65{
66 struct iblock_hba *ib_host;
67
68 ib_host = kzalloc(sizeof(struct iblock_hba), GFP_KERNEL);
69 if (!(ib_host)) {
70 printk(KERN_ERR "Unable to allocate memory for"
71 " struct iblock_hba\n");
72 return -ENOMEM;
73 }
74
75 ib_host->iblock_host_id = host_id;
76
77 atomic_set(&hba->left_queue_depth, IBLOCK_HBA_QUEUE_DEPTH);
78 atomic_set(&hba->max_queue_depth, IBLOCK_HBA_QUEUE_DEPTH);
79 hba->hba_ptr = (void *) ib_host;
80
81 printk(KERN_INFO "CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
82 " Generic Target Core Stack %s\n", hba->hba_id,
83 IBLOCK_VERSION, TARGET_CORE_MOD_VERSION);
84
85 printk(KERN_INFO "CORE_HBA[%d] - Attached iBlock HBA: %u to Generic"
86 " Target Core TCQ Depth: %d\n", hba->hba_id,
87 ib_host->iblock_host_id, atomic_read(&hba->max_queue_depth));
88
89 return 0;
90}
91
92static void iblock_detach_hba(struct se_hba *hba)
93{
94 struct iblock_hba *ib_host = hba->hba_ptr;
95
96 printk(KERN_INFO "CORE_HBA[%d] - Detached iBlock HBA: %u from Generic"
97 " Target Core\n", hba->hba_id, ib_host->iblock_host_id);
98
99 kfree(ib_host);
100 hba->hba_ptr = NULL;
101}
102
103static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name)
104{
105 struct iblock_dev *ib_dev = NULL;
106 struct iblock_hba *ib_host = hba->hba_ptr;
107
108 ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
109 if (!(ib_dev)) {
110 printk(KERN_ERR "Unable to allocate struct iblock_dev\n");
111 return NULL;
112 }
113 ib_dev->ibd_host = ib_host;
114
115 printk(KERN_INFO "IBLOCK: Allocated ib_dev for %s\n", name);
116
117 return ib_dev;
118}
119
120static struct se_device *iblock_create_virtdevice(
121 struct se_hba *hba,
122 struct se_subsystem_dev *se_dev,
123 void *p)
124{
125 struct iblock_dev *ib_dev = p;
126 struct se_device *dev;
127 struct se_dev_limits dev_limits;
128 struct block_device *bd = NULL;
129 struct request_queue *q;
130 struct queue_limits *limits;
131 u32 dev_flags = 0;
132 int ret = -EINVAL;
133
134 if (!(ib_dev)) {
135 printk(KERN_ERR "Unable to locate struct iblock_dev parameter\n");
136 return ERR_PTR(ret);
137 }
138 memset(&dev_limits, 0, sizeof(struct se_dev_limits));
139 /*
140 * These settings need to be made tunable..
141 */
142 ib_dev->ibd_bio_set = bioset_create(32, 64);
143 if (!(ib_dev->ibd_bio_set)) {
144 printk(KERN_ERR "IBLOCK: Unable to create bioset()\n");
145 return ERR_PTR(-ENOMEM);
146 }
147 printk(KERN_INFO "IBLOCK: Created bio_set()\n");
148 /*
149 * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path
150 * must already have been set in order for echo 1 > $HBA/$DEV/enable to run.
151 */
152 printk(KERN_INFO "IBLOCK: Claiming struct block_device: %s\n",
153 ib_dev->ibd_udev_path);
154
155 bd = blkdev_get_by_path(ib_dev->ibd_udev_path,
156 FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev);
157 if (IS_ERR(bd)) {
158 ret = PTR_ERR(bd);
159 goto failed;
160 }
161 /*
162 * Setup the local scope queue_limits from struct request_queue->limits
163 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
164 */
165 q = bdev_get_queue(bd);
166 limits = &dev_limits.limits;
167 limits->logical_block_size = bdev_logical_block_size(bd);
168 limits->max_hw_sectors = queue_max_hw_sectors(q);
169 limits->max_sectors = queue_max_sectors(q);
170 dev_limits.hw_queue_depth = IBLOCK_MAX_DEVICE_QUEUE_DEPTH;
171 dev_limits.queue_depth = IBLOCK_DEVICE_QUEUE_DEPTH;
172
173 ib_dev->ibd_major = MAJOR(bd->bd_dev);
174 ib_dev->ibd_minor = MINOR(bd->bd_dev);
175 ib_dev->ibd_bd = bd;
176
177 dev = transport_add_device_to_core_hba(hba,
178 &iblock_template, se_dev, dev_flags, (void *)ib_dev,
179 &dev_limits, "IBLOCK", IBLOCK_VERSION);
180 if (!(dev))
181 goto failed;
182
183 ib_dev->ibd_depth = dev->queue_depth;
184
185 /*
186 * Check if the underlying struct block_device request_queue supports
187 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
188 * in ATA and we need to set TPE=1
189 */
190 if (blk_queue_discard(q)) {
191 DEV_ATTRIB(dev)->max_unmap_lba_count =
192 q->limits.max_discard_sectors;
193 /*
194 * Currently hardcoded to 1 in Linux/SCSI code..
195 */
196 DEV_ATTRIB(dev)->max_unmap_block_desc_count = 1;
197 DEV_ATTRIB(dev)->unmap_granularity =
198 q->limits.discard_granularity;
199 DEV_ATTRIB(dev)->unmap_granularity_alignment =
200 q->limits.discard_alignment;
201
202 printk(KERN_INFO "IBLOCK: BLOCK Discard support available,"
203 " disabled by default\n");
204 }
205
206 return dev;
207
208failed:
209 if (ib_dev->ibd_bio_set) {
210 bioset_free(ib_dev->ibd_bio_set);
211 ib_dev->ibd_bio_set = NULL;
212 }
213 ib_dev->ibd_bd = NULL;
214 ib_dev->ibd_major = 0;
215 ib_dev->ibd_minor = 0;
216 return ERR_PTR(ret);
217}
218
219static void iblock_free_device(void *p)
220{
221 struct iblock_dev *ib_dev = p;
222
223 if (ib_dev->ibd_bd != NULL)
224 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
225 if (ib_dev->ibd_bio_set != NULL)
226 bioset_free(ib_dev->ibd_bio_set);
227 kfree(ib_dev);
228}
229
230static inline struct iblock_req *IBLOCK_REQ(struct se_task *task)
231{
232 return container_of(task, struct iblock_req, ib_task);
233}
234
235static struct se_task *
236iblock_alloc_task(struct se_cmd *cmd)
237{
238 struct iblock_req *ib_req;
239
240 ib_req = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
241 if (!(ib_req)) {
242 printk(KERN_ERR "Unable to allocate memory for struct iblock_req\n");
243 return NULL;
244 }
245
246 ib_req->ib_dev = SE_DEV(cmd)->dev_ptr;
247 atomic_set(&ib_req->ib_bio_cnt, 0);
248 return &ib_req->ib_task;
249}
250
251static unsigned long long iblock_emulate_read_cap_with_block_size(
252 struct se_device *dev,
253 struct block_device *bd,
254 struct request_queue *q)
255{
256 unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode),
257 bdev_logical_block_size(bd)) - 1);
258 u32 block_size = bdev_logical_block_size(bd);
259
260 if (block_size == DEV_ATTRIB(dev)->block_size)
261 return blocks_long;
262
263 switch (block_size) {
264 case 4096:
265 switch (DEV_ATTRIB(dev)->block_size) {
266 case 2048:
267 blocks_long <<= 1;
268 break;
269 case 1024:
270 blocks_long <<= 2;
271 break;
272 case 512:
273 blocks_long <<= 3;
274 default:
275 break;
276 }
277 break;
278 case 2048:
279 switch (DEV_ATTRIB(dev)->block_size) {
280 case 4096:
281 blocks_long >>= 1;
282 break;
283 case 1024:
284 blocks_long <<= 1;
285 break;
286 case 512:
287 blocks_long <<= 2;
288 break;
289 default:
290 break;
291 }
292 break;
293 case 1024:
294 switch (DEV_ATTRIB(dev)->block_size) {
295 case 4096:
296 blocks_long >>= 2;
297 break;
298 case 2048:
299 blocks_long >>= 1;
300 break;
301 case 512:
302 blocks_long <<= 1;
303 break;
304 default:
305 break;
306 }
307 break;
308 case 512:
309 switch (DEV_ATTRIB(dev)->block_size) {
310 case 4096:
311 blocks_long >>= 3;
312 break;
313 case 2048:
314 blocks_long >>= 2;
315 break;
316 case 1024:
317 blocks_long >>= 1;
318 break;
319 default:
320 break;
321 }
322 break;
323 default:
324 break;
325 }
326
327 return blocks_long;
328}
329
330/*
331 * Emulate SYCHRONIZE_CACHE_*
332 */
333static void iblock_emulate_sync_cache(struct se_task *task)
334{
335 struct se_cmd *cmd = TASK_CMD(task);
336 struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
337 int immed = (T_TASK(cmd)->t_task_cdb[1] & 0x2);
338 sector_t error_sector;
339 int ret;
340
341 /*
342 * If the Immediate bit is set, queue up the GOOD response
343 * for this SYNCHRONIZE_CACHE op
344 */
345 if (immed)
346 transport_complete_sync_cache(cmd, 1);
347
348 /*
349 * blkdev_issue_flush() does not support a specifying a range, so
350 * we have to flush the entire cache.
351 */
352 ret = blkdev_issue_flush(ib_dev->ibd_bd, GFP_KERNEL, &error_sector);
353 if (ret != 0) {
354 printk(KERN_ERR "IBLOCK: block_issue_flush() failed: %d "
355 " error_sector: %llu\n", ret,
356 (unsigned long long)error_sector);
357 }
358
359 if (!immed)
360 transport_complete_sync_cache(cmd, ret == 0);
361}
362
363/*
364 * Tell TCM Core that we are capable of WriteCache emulation for
365 * an underlying struct se_device.
366 */
367static int iblock_emulated_write_cache(struct se_device *dev)
368{
369 return 1;
370}
371
372static int iblock_emulated_dpo(struct se_device *dev)
373{
374 return 0;
375}
376
377/*
378 * Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs
379 * for TYPE_DISK.
380 */
381static int iblock_emulated_fua_write(struct se_device *dev)
382{
383 return 1;
384}
385
386static int iblock_emulated_fua_read(struct se_device *dev)
387{
388 return 0;
389}
390
391static int iblock_do_task(struct se_task *task)
392{
393 struct se_device *dev = task->task_se_cmd->se_dev;
394 struct iblock_req *req = IBLOCK_REQ(task);
395 struct bio *bio = req->ib_bio, *nbio = NULL;
396 struct blk_plug plug;
397 int rw;
398
399 if (task->task_data_direction == DMA_TO_DEVICE) {
400 /*
401 * Force data to disk if we pretend to not have a volatile
402 * write cache, or the initiator set the Force Unit Access bit.
403 */
404 if (DEV_ATTRIB(dev)->emulate_write_cache == 0 ||
405 (DEV_ATTRIB(dev)->emulate_fua_write > 0 &&
406 T_TASK(task->task_se_cmd)->t_tasks_fua))
407 rw = WRITE_FUA;
408 else
409 rw = WRITE;
410 } else {
411 rw = READ;
412 }
413
414 blk_start_plug(&plug);
415 while (bio) {
416 nbio = bio->bi_next;
417 bio->bi_next = NULL;
418 DEBUG_IBLOCK("Calling submit_bio() task: %p bio: %p"
419 " bio->bi_sector: %llu\n", task, bio, bio->bi_sector);
420
421 submit_bio(rw, bio);
422 bio = nbio;
423 }
424 blk_finish_plug(&plug);
425
426 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
427}
428
429static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range)
430{
431 struct iblock_dev *ibd = dev->dev_ptr;
432 struct block_device *bd = ibd->ibd_bd;
433 int barrier = 0;
434
435 return blkdev_issue_discard(bd, lba, range, GFP_KERNEL, barrier);
436}
437
438static void iblock_free_task(struct se_task *task)
439{
440 struct iblock_req *req = IBLOCK_REQ(task);
441 struct bio *bio, *hbio = req->ib_bio;
442 /*
443 * We only release the bio(s) here if iblock_bio_done() has not called
444 * bio_put() -> iblock_bio_destructor().
445 */
446 while (hbio != NULL) {
447 bio = hbio;
448 hbio = hbio->bi_next;
449 bio->bi_next = NULL;
450 bio_put(bio);
451 }
452
453 kfree(req);
454}
455
456enum {
457 Opt_udev_path, Opt_force, Opt_err
458};
459
460static match_table_t tokens = {
461 {Opt_udev_path, "udev_path=%s"},
462 {Opt_force, "force=%d"},
463 {Opt_err, NULL}
464};
465
466static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
467 struct se_subsystem_dev *se_dev,
468 const char *page, ssize_t count)
469{
470 struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr;
471 char *orig, *ptr, *arg_p, *opts;
472 substring_t args[MAX_OPT_ARGS];
473 int ret = 0, arg, token;
474
475 opts = kstrdup(page, GFP_KERNEL);
476 if (!opts)
477 return -ENOMEM;
478
479 orig = opts;
480
481 while ((ptr = strsep(&opts, ",")) != NULL) {
482 if (!*ptr)
483 continue;
484
485 token = match_token(ptr, tokens, args);
486 switch (token) {
487 case Opt_udev_path:
488 if (ib_dev->ibd_bd) {
489 printk(KERN_ERR "Unable to set udev_path= while"
490 " ib_dev->ibd_bd exists\n");
491 ret = -EEXIST;
492 goto out;
493 }
494 arg_p = match_strdup(&args[0]);
495 if (!arg_p) {
496 ret = -ENOMEM;
497 break;
498 }
499 snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN,
500 "%s", arg_p);
501 kfree(arg_p);
502 printk(KERN_INFO "IBLOCK: Referencing UDEV path: %s\n",
503 ib_dev->ibd_udev_path);
504 ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
505 break;
506 case Opt_force:
507 match_int(args, &arg);
508 ib_dev->ibd_force = arg;
509 printk(KERN_INFO "IBLOCK: Set force=%d\n",
510 ib_dev->ibd_force);
511 break;
512 default:
513 break;
514 }
515 }
516
517out:
518 kfree(orig);
519 return (!ret) ? count : ret;
520}
521
522static ssize_t iblock_check_configfs_dev_params(
523 struct se_hba *hba,
524 struct se_subsystem_dev *se_dev)
525{
526 struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
527
528 if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) {
529 printk(KERN_ERR "Missing udev_path= parameters for IBLOCK\n");
530 return -1;
531 }
532
533 return 0;
534}
535
536static ssize_t iblock_show_configfs_dev_params(
537 struct se_hba *hba,
538 struct se_subsystem_dev *se_dev,
539 char *b)
540{
541 struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
542 struct block_device *bd = ibd->ibd_bd;
543 char buf[BDEVNAME_SIZE];
544 ssize_t bl = 0;
545
546 if (bd)
547 bl += sprintf(b + bl, "iBlock device: %s",
548 bdevname(bd, buf));
549 if (ibd->ibd_flags & IBDF_HAS_UDEV_PATH) {
550 bl += sprintf(b + bl, " UDEV PATH: %s\n",
551 ibd->ibd_udev_path);
552 } else
553 bl += sprintf(b + bl, "\n");
554
555 bl += sprintf(b + bl, " ");
556 if (bd) {
557 bl += sprintf(b + bl, "Major: %d Minor: %d %s\n",
558 ibd->ibd_major, ibd->ibd_minor, (!bd->bd_contains) ?
559 "" : (bd->bd_holder == (struct iblock_dev *)ibd) ?
560 "CLAIMED: IBLOCK" : "CLAIMED: OS");
561 } else {
562 bl += sprintf(b + bl, "Major: %d Minor: %d\n",
563 ibd->ibd_major, ibd->ibd_minor);
564 }
565
566 return bl;
567}
568
569static void iblock_bio_destructor(struct bio *bio)
570{
571 struct se_task *task = bio->bi_private;
572 struct iblock_dev *ib_dev = task->se_dev->dev_ptr;
573
574 bio_free(bio, ib_dev->ibd_bio_set);
575}
576
577static struct bio *iblock_get_bio(
578 struct se_task *task,
579 struct iblock_req *ib_req,
580 struct iblock_dev *ib_dev,
581 int *ret,
582 sector_t lba,
583 u32 sg_num)
584{
585 struct bio *bio;
586
587 bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
588 if (!(bio)) {
589 printk(KERN_ERR "Unable to allocate memory for bio\n");
590 *ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
591 return NULL;
592 }
593
594 DEBUG_IBLOCK("Allocated bio: %p task_sg_num: %u using ibd_bio_set:"
595 " %p\n", bio, task->task_sg_num, ib_dev->ibd_bio_set);
596 DEBUG_IBLOCK("Allocated bio: %p task_size: %u\n", bio, task->task_size);
597
598 bio->bi_bdev = ib_dev->ibd_bd;
599 bio->bi_private = (void *) task;
600 bio->bi_destructor = iblock_bio_destructor;
601 bio->bi_end_io = &iblock_bio_done;
602 bio->bi_sector = lba;
603 atomic_inc(&ib_req->ib_bio_cnt);
604
605 DEBUG_IBLOCK("Set bio->bi_sector: %llu\n", bio->bi_sector);
606 DEBUG_IBLOCK("Set ib_req->ib_bio_cnt: %d\n",
607 atomic_read(&ib_req->ib_bio_cnt));
608 return bio;
609}
610
611static int iblock_map_task_SG(struct se_task *task)
612{
613 struct se_cmd *cmd = task->task_se_cmd;
614 struct se_device *dev = SE_DEV(cmd);
615 struct iblock_dev *ib_dev = task->se_dev->dev_ptr;
616 struct iblock_req *ib_req = IBLOCK_REQ(task);
617 struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
618 struct scatterlist *sg;
619 int ret = 0;
620 u32 i, sg_num = task->task_sg_num;
621 sector_t block_lba;
622 /*
623 * Do starting conversion up from non 512-byte blocksize with
624 * struct se_task SCSI blocksize into Linux/Block 512 units for BIO.
625 */
626 if (DEV_ATTRIB(dev)->block_size == 4096)
627 block_lba = (task->task_lba << 3);
628 else if (DEV_ATTRIB(dev)->block_size == 2048)
629 block_lba = (task->task_lba << 2);
630 else if (DEV_ATTRIB(dev)->block_size == 1024)
631 block_lba = (task->task_lba << 1);
632 else if (DEV_ATTRIB(dev)->block_size == 512)
633 block_lba = task->task_lba;
634 else {
635 printk(KERN_ERR "Unsupported SCSI -> BLOCK LBA conversion:"
636 " %u\n", DEV_ATTRIB(dev)->block_size);
637 return PYX_TRANSPORT_LU_COMM_FAILURE;
638 }
639
640 bio = iblock_get_bio(task, ib_req, ib_dev, &ret, block_lba, sg_num);
641 if (!(bio))
642 return ret;
643
644 ib_req->ib_bio = bio;
645 hbio = tbio = bio;
646 /*
647 * Use fs/bio.c:bio_add_pages() to setup the bio_vec maplist
648 * from TCM struct se_mem -> task->task_sg -> struct scatterlist memory.
649 */
650 for_each_sg(task->task_sg, sg, task->task_sg_num, i) {
651 DEBUG_IBLOCK("task: %p bio: %p Calling bio_add_page(): page:"
652 " %p len: %u offset: %u\n", task, bio, sg_page(sg),
653 sg->length, sg->offset);
654again:
655 ret = bio_add_page(bio, sg_page(sg), sg->length, sg->offset);
656 if (ret != sg->length) {
657
658 DEBUG_IBLOCK("*** Set bio->bi_sector: %llu\n",
659 bio->bi_sector);
660 DEBUG_IBLOCK("** task->task_size: %u\n",
661 task->task_size);
662 DEBUG_IBLOCK("*** bio->bi_max_vecs: %u\n",
663 bio->bi_max_vecs);
664 DEBUG_IBLOCK("*** bio->bi_vcnt: %u\n",
665 bio->bi_vcnt);
666
667 bio = iblock_get_bio(task, ib_req, ib_dev, &ret,
668 block_lba, sg_num);
669 if (!(bio))
670 goto fail;
671
672 tbio = tbio->bi_next = bio;
673 DEBUG_IBLOCK("-----------------> Added +1 bio: %p to"
674 " list, Going to again\n", bio);
675 goto again;
676 }
677 /* Always in 512 byte units for Linux/Block */
678 block_lba += sg->length >> IBLOCK_LBA_SHIFT;
679 sg_num--;
680 DEBUG_IBLOCK("task: %p bio-add_page() passed!, decremented"
681 " sg_num to %u\n", task, sg_num);
682 DEBUG_IBLOCK("task: %p bio_add_page() passed!, increased lba"
683 " to %llu\n", task, block_lba);
684 DEBUG_IBLOCK("task: %p bio_add_page() passed!, bio->bi_vcnt:"
685 " %u\n", task, bio->bi_vcnt);
686 }
687
688 return 0;
689fail:
690 while (hbio) {
691 bio = hbio;
692 hbio = hbio->bi_next;
693 bio->bi_next = NULL;
694 bio_put(bio);
695 }
696 return ret;
697}
698
699static unsigned char *iblock_get_cdb(struct se_task *task)
700{
701 return IBLOCK_REQ(task)->ib_scsi_cdb;
702}
703
704static u32 iblock_get_device_rev(struct se_device *dev)
705{
706 return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
707}
708
709static u32 iblock_get_device_type(struct se_device *dev)
710{
711 return TYPE_DISK;
712}
713
714static sector_t iblock_get_blocks(struct se_device *dev)
715{
716 struct iblock_dev *ibd = dev->dev_ptr;
717 struct block_device *bd = ibd->ibd_bd;
718 struct request_queue *q = bdev_get_queue(bd);
719
720 return iblock_emulate_read_cap_with_block_size(dev, bd, q);
721}
722
723static void iblock_bio_done(struct bio *bio, int err)
724{
725 struct se_task *task = bio->bi_private;
726 struct iblock_req *ibr = IBLOCK_REQ(task);
727 /*
728 * Set -EIO if !BIO_UPTODATE and the passed is still err=0
729 */
730 if (!(test_bit(BIO_UPTODATE, &bio->bi_flags)) && !(err))
731 err = -EIO;
732
733 if (err != 0) {
734 printk(KERN_ERR "test_bit(BIO_UPTODATE) failed for bio: %p,"
735 " err: %d\n", bio, err);
736 /*
737 * Bump the ib_bio_err_cnt and release bio.
738 */
739 atomic_inc(&ibr->ib_bio_err_cnt);
740 smp_mb__after_atomic_inc();
741 bio_put(bio);
742 /*
743 * Wait to complete the task until the last bio as completed.
744 */
745 if (!(atomic_dec_and_test(&ibr->ib_bio_cnt)))
746 return;
747
748 ibr->ib_bio = NULL;
749 transport_complete_task(task, 0);
750 return;
751 }
752 DEBUG_IBLOCK("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n",
753 task, bio, task->task_lba, bio->bi_sector, err);
754 /*
755 * bio_put() will call iblock_bio_destructor() to release the bio back
756 * to ibr->ib_bio_set.
757 */
758 bio_put(bio);
759 /*
760 * Wait to complete the task until the last bio as completed.
761 */
762 if (!(atomic_dec_and_test(&ibr->ib_bio_cnt)))
763 return;
764 /*
765 * Return GOOD status for task if zero ib_bio_err_cnt exists.
766 */
767 ibr->ib_bio = NULL;
768 transport_complete_task(task, (!atomic_read(&ibr->ib_bio_err_cnt)));
769}
770
771static struct se_subsystem_api iblock_template = {
772 .name = "iblock",
773 .owner = THIS_MODULE,
774 .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
775 .map_task_SG = iblock_map_task_SG,
776 .attach_hba = iblock_attach_hba,
777 .detach_hba = iblock_detach_hba,
778 .allocate_virtdevice = iblock_allocate_virtdevice,
779 .create_virtdevice = iblock_create_virtdevice,
780 .free_device = iblock_free_device,
781 .dpo_emulated = iblock_emulated_dpo,
782 .fua_write_emulated = iblock_emulated_fua_write,
783 .fua_read_emulated = iblock_emulated_fua_read,
784 .write_cache_emulated = iblock_emulated_write_cache,
785 .alloc_task = iblock_alloc_task,
786 .do_task = iblock_do_task,
787 .do_discard = iblock_do_discard,
788 .do_sync_cache = iblock_emulate_sync_cache,
789 .free_task = iblock_free_task,
790 .check_configfs_dev_params = iblock_check_configfs_dev_params,
791 .set_configfs_dev_params = iblock_set_configfs_dev_params,
792 .show_configfs_dev_params = iblock_show_configfs_dev_params,
793 .get_cdb = iblock_get_cdb,
794 .get_device_rev = iblock_get_device_rev,
795 .get_device_type = iblock_get_device_type,
796 .get_blocks = iblock_get_blocks,
797};
798
799static int __init iblock_module_init(void)
800{
801 return transport_subsystem_register(&iblock_template);
802}
803
804static void iblock_module_exit(void)
805{
806 transport_subsystem_release(&iblock_template);
807}
808
809MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
810MODULE_AUTHOR("nab@Linux-iSCSI.org");
811MODULE_LICENSE("GPL");
812
813module_init(iblock_module_init);
814module_exit(iblock_module_exit);
diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h
new file mode 100644
index 000000000000..64c1f4d69f76
--- /dev/null
+++ b/drivers/target/target_core_iblock.h
@@ -0,0 +1,40 @@
1#ifndef TARGET_CORE_IBLOCK_H
2#define TARGET_CORE_IBLOCK_H
3
4#define IBLOCK_VERSION "4.0"
5
6#define IBLOCK_HBA_QUEUE_DEPTH 512
7#define IBLOCK_DEVICE_QUEUE_DEPTH 32
8#define IBLOCK_MAX_DEVICE_QUEUE_DEPTH 128
9#define IBLOCK_MAX_CDBS 16
10#define IBLOCK_LBA_SHIFT 9
11
12struct iblock_req {
13 struct se_task ib_task;
14 unsigned char ib_scsi_cdb[TCM_MAX_COMMAND_SIZE];
15 atomic_t ib_bio_cnt;
16 atomic_t ib_bio_err_cnt;
17 struct bio *ib_bio;
18 struct iblock_dev *ib_dev;
19} ____cacheline_aligned;
20
21#define IBDF_HAS_UDEV_PATH 0x01
22#define IBDF_HAS_FORCE 0x02
23
24struct iblock_dev {
25 unsigned char ibd_udev_path[SE_UDEV_PATH_LEN];
26 int ibd_force;
27 int ibd_major;
28 int ibd_minor;
29 u32 ibd_depth;
30 u32 ibd_flags;
31 struct bio_set *ibd_bio_set;
32 struct block_device *ibd_bd;
33 struct iblock_hba *ibd_host;
34} ____cacheline_aligned;
35
36struct iblock_hba {
37 int iblock_host_id;
38} ____cacheline_aligned;
39
40#endif /* TARGET_CORE_IBLOCK_H */
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
new file mode 100644
index 000000000000..b662db3a320b
--- /dev/null
+++ b/drivers/target/target_core_pr.c
@@ -0,0 +1,4252 @@
1/*******************************************************************************
2 * Filename: target_core_pr.c
3 *
4 * This file contains SPC-3 compliant persistent reservations and
5 * legacy SPC-2 reservations with compatible reservation handling (CRH=1)
6 *
7 * Copyright (c) 2009, 2010 Rising Tide Systems
8 * Copyright (c) 2009, 2010 Linux-iSCSI.org
9 *
10 * Nicholas A. Bellinger <nab@kernel.org>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
25 *
26 ******************************************************************************/
27
28#include <linux/version.h>
29#include <linux/slab.h>
30#include <linux/spinlock.h>
31#include <linux/list.h>
32#include <scsi/scsi.h>
33#include <scsi/scsi_cmnd.h>
34#include <asm/unaligned.h>
35
36#include <target/target_core_base.h>
37#include <target/target_core_device.h>
38#include <target/target_core_tmr.h>
39#include <target/target_core_tpg.h>
40#include <target/target_core_transport.h>
41#include <target/target_core_fabric_ops.h>
42#include <target/target_core_configfs.h>
43
44#include "target_core_hba.h"
45#include "target_core_pr.h"
46#include "target_core_ua.h"
47
48/*
49 * Used for Specify Initiator Ports Capable Bit (SPEC_I_PT)
50 */
51struct pr_transport_id_holder {
52 int dest_local_nexus;
53 struct t10_pr_registration *dest_pr_reg;
54 struct se_portal_group *dest_tpg;
55 struct se_node_acl *dest_node_acl;
56 struct se_dev_entry *dest_se_deve;
57 struct list_head dest_list;
58};
59
60int core_pr_dump_initiator_port(
61 struct t10_pr_registration *pr_reg,
62 char *buf,
63 u32 size)
64{
65 if (!(pr_reg->isid_present_at_reg))
66 return 0;
67
68 snprintf(buf, size, ",i,0x%s", &pr_reg->pr_reg_isid[0]);
69 return 1;
70}
71
72static void __core_scsi3_complete_pro_release(struct se_device *, struct se_node_acl *,
73 struct t10_pr_registration *, int);
74
75static int core_scsi2_reservation_seq_non_holder(
76 struct se_cmd *cmd,
77 unsigned char *cdb,
78 u32 pr_reg_type)
79{
80 switch (cdb[0]) {
81 case INQUIRY:
82 case RELEASE:
83 case RELEASE_10:
84 return 0;
85 default:
86 return 1;
87 }
88
89 return 1;
90}
91
92static int core_scsi2_reservation_check(struct se_cmd *cmd, u32 *pr_reg_type)
93{
94 struct se_device *dev = cmd->se_dev;
95 struct se_session *sess = cmd->se_sess;
96 int ret;
97
98 if (!(sess))
99 return 0;
100
101 spin_lock(&dev->dev_reservation_lock);
102 if (!dev->dev_reserved_node_acl || !sess) {
103 spin_unlock(&dev->dev_reservation_lock);
104 return 0;
105 }
106 if (dev->dev_reserved_node_acl != sess->se_node_acl) {
107 spin_unlock(&dev->dev_reservation_lock);
108 return -1;
109 }
110 if (!(dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID)) {
111 spin_unlock(&dev->dev_reservation_lock);
112 return 0;
113 }
114 ret = (dev->dev_res_bin_isid == sess->sess_bin_isid) ? 0 : -1;
115 spin_unlock(&dev->dev_reservation_lock);
116
117 return ret;
118}
119
120static int core_scsi2_reservation_release(struct se_cmd *cmd)
121{
122 struct se_device *dev = cmd->se_dev;
123 struct se_session *sess = cmd->se_sess;
124 struct se_portal_group *tpg = sess->se_tpg;
125
126 if (!(sess) || !(tpg))
127 return 0;
128
129 spin_lock(&dev->dev_reservation_lock);
130 if (!dev->dev_reserved_node_acl || !sess) {
131 spin_unlock(&dev->dev_reservation_lock);
132 return 0;
133 }
134
135 if (dev->dev_reserved_node_acl != sess->se_node_acl) {
136 spin_unlock(&dev->dev_reservation_lock);
137 return 0;
138 }
139 dev->dev_reserved_node_acl = NULL;
140 dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
141 if (dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID) {
142 dev->dev_res_bin_isid = 0;
143 dev->dev_flags &= ~DF_SPC2_RESERVATIONS_WITH_ISID;
144 }
145 printk(KERN_INFO "SCSI-2 Released reservation for %s LUN: %u ->"
146 " MAPPED LUN: %u for %s\n", TPG_TFO(tpg)->get_fabric_name(),
147 SE_LUN(cmd)->unpacked_lun, cmd->se_deve->mapped_lun,
148 sess->se_node_acl->initiatorname);
149 spin_unlock(&dev->dev_reservation_lock);
150
151 return 0;
152}
153
154static int core_scsi2_reservation_reserve(struct se_cmd *cmd)
155{
156 struct se_device *dev = cmd->se_dev;
157 struct se_session *sess = cmd->se_sess;
158 struct se_portal_group *tpg = sess->se_tpg;
159
160 if ((T_TASK(cmd)->t_task_cdb[1] & 0x01) &&
161 (T_TASK(cmd)->t_task_cdb[1] & 0x02)) {
162 printk(KERN_ERR "LongIO and Obselete Bits set, returning"
163 " ILLEGAL_REQUEST\n");
164 return PYX_TRANSPORT_ILLEGAL_REQUEST;
165 }
166 /*
167 * This is currently the case for target_core_mod passthrough struct se_cmd
168 * ops
169 */
170 if (!(sess) || !(tpg))
171 return 0;
172
173 spin_lock(&dev->dev_reservation_lock);
174 if (dev->dev_reserved_node_acl &&
175 (dev->dev_reserved_node_acl != sess->se_node_acl)) {
176 printk(KERN_ERR "SCSI-2 RESERVATION CONFLIFT for %s fabric\n",
177 TPG_TFO(tpg)->get_fabric_name());
178 printk(KERN_ERR "Original reserver LUN: %u %s\n",
179 SE_LUN(cmd)->unpacked_lun,
180 dev->dev_reserved_node_acl->initiatorname);
181 printk(KERN_ERR "Current attempt - LUN: %u -> MAPPED LUN: %u"
182 " from %s \n", SE_LUN(cmd)->unpacked_lun,
183 cmd->se_deve->mapped_lun,
184 sess->se_node_acl->initiatorname);
185 spin_unlock(&dev->dev_reservation_lock);
186 return PYX_TRANSPORT_RESERVATION_CONFLICT;
187 }
188
189 dev->dev_reserved_node_acl = sess->se_node_acl;
190 dev->dev_flags |= DF_SPC2_RESERVATIONS;
191 if (sess->sess_bin_isid != 0) {
192 dev->dev_res_bin_isid = sess->sess_bin_isid;
193 dev->dev_flags |= DF_SPC2_RESERVATIONS_WITH_ISID;
194 }
195 printk(KERN_INFO "SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u"
196 " for %s\n", TPG_TFO(tpg)->get_fabric_name(),
197 SE_LUN(cmd)->unpacked_lun, cmd->se_deve->mapped_lun,
198 sess->se_node_acl->initiatorname);
199 spin_unlock(&dev->dev_reservation_lock);
200
201 return 0;
202}
203
204static struct t10_pr_registration *core_scsi3_locate_pr_reg(struct se_device *,
205 struct se_node_acl *, struct se_session *);
206static void core_scsi3_put_pr_reg(struct t10_pr_registration *);
207
208/*
209 * Setup in target_core_transport.c:transport_generic_cmd_sequencer()
210 * and called via struct se_cmd->transport_emulate_cdb() in TCM processing
211 * thread context.
212 */
213int core_scsi2_emulate_crh(struct se_cmd *cmd)
214{
215 struct se_session *se_sess = cmd->se_sess;
216 struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
217 struct t10_pr_registration *pr_reg;
218 struct t10_reservation_template *pr_tmpl = &su_dev->t10_reservation;
219 unsigned char *cdb = &T_TASK(cmd)->t_task_cdb[0];
220 int crh = (T10_RES(su_dev)->res_type == SPC3_PERSISTENT_RESERVATIONS);
221 int conflict = 0;
222
223 if (!(se_sess))
224 return 0;
225
226 if (!(crh))
227 goto after_crh;
228
229 pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
230 se_sess);
231 if (pr_reg) {
232 /*
233 * From spc4r17 5.7.3 Exceptions to SPC-2 RESERVE and RELEASE
234 * behavior
235 *
236 * A RESERVE(6) or RESERVE(10) command shall complete with GOOD
237 * status, but no reservation shall be established and the
238 * persistent reservation shall not be changed, if the command
239 * is received from a) and b) below.
240 *
241 * A RELEASE(6) or RELEASE(10) command shall complete with GOOD
242 * status, but the persistent reservation shall not be released,
243 * if the command is received from a) and b)
244 *
245 * a) An I_T nexus that is a persistent reservation holder; or
246 * b) An I_T nexus that is registered if a registrants only or
247 * all registrants type persistent reservation is present.
248 *
249 * In all other cases, a RESERVE(6) command, RESERVE(10) command,
250 * RELEASE(6) command, or RELEASE(10) command shall be processed
251 * as defined in SPC-2.
252 */
253 if (pr_reg->pr_res_holder) {
254 core_scsi3_put_pr_reg(pr_reg);
255 return 0;
256 }
257 if ((pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY) ||
258 (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY) ||
259 (pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
260 (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
261 core_scsi3_put_pr_reg(pr_reg);
262 return 0;
263 }
264 core_scsi3_put_pr_reg(pr_reg);
265 conflict = 1;
266 } else {
267 /*
268 * Following spc2r20 5.5.1 Reservations overview:
269 *
270 * If a logical unit has executed a PERSISTENT RESERVE OUT
271 * command with the REGISTER or the REGISTER AND IGNORE
272 * EXISTING KEY service action and is still registered by any
273 * initiator, all RESERVE commands and all RELEASE commands
274 * regardless of initiator shall conflict and shall terminate
275 * with a RESERVATION CONFLICT status.
276 */
277 spin_lock(&pr_tmpl->registration_lock);
278 conflict = (list_empty(&pr_tmpl->registration_list)) ? 0 : 1;
279 spin_unlock(&pr_tmpl->registration_lock);
280 }
281
282 if (conflict) {
283 printk(KERN_ERR "Received legacy SPC-2 RESERVE/RELEASE"
284 " while active SPC-3 registrations exist,"
285 " returning RESERVATION_CONFLICT\n");
286 return PYX_TRANSPORT_RESERVATION_CONFLICT;
287 }
288
289after_crh:
290 if ((cdb[0] == RESERVE) || (cdb[0] == RESERVE_10))
291 return core_scsi2_reservation_reserve(cmd);
292 else if ((cdb[0] == RELEASE) || (cdb[0] == RELEASE_10))
293 return core_scsi2_reservation_release(cmd);
294 else
295 return PYX_TRANSPORT_INVALID_CDB_FIELD;
296}
297
298/*
299 * Begin SPC-3/SPC-4 Persistent Reservations emulation support
300 *
301 * This function is called by those initiator ports who are *NOT*
302 * the active PR reservation holder when a reservation is present.
303 */
304static int core_scsi3_pr_seq_non_holder(
305 struct se_cmd *cmd,
306 unsigned char *cdb,
307 u32 pr_reg_type)
308{
309 struct se_dev_entry *se_deve;
310 struct se_session *se_sess = SE_SESS(cmd);
311 int other_cdb = 0, ignore_reg;
312 int registered_nexus = 0, ret = 1; /* Conflict by default */
313 int all_reg = 0, reg_only = 0; /* ALL_REG, REG_ONLY */
314 int we = 0; /* Write Exclusive */
315 int legacy = 0; /* Act like a legacy device and return
316 * RESERVATION CONFLICT on some CDBs */
317 /*
318 * A legacy SPC-2 reservation is being held.
319 */
320 if (cmd->se_dev->dev_flags & DF_SPC2_RESERVATIONS)
321 return core_scsi2_reservation_seq_non_holder(cmd,
322 cdb, pr_reg_type);
323
324 se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
325 /*
326 * Determine if the registration should be ignored due to
327 * non-matching ISIDs in core_scsi3_pr_reservation_check().
328 */
329 ignore_reg = (pr_reg_type & 0x80000000);
330 if (ignore_reg)
331 pr_reg_type &= ~0x80000000;
332
333 switch (pr_reg_type) {
334 case PR_TYPE_WRITE_EXCLUSIVE:
335 we = 1;
336 case PR_TYPE_EXCLUSIVE_ACCESS:
337 /*
338 * Some commands are only allowed for the persistent reservation
339 * holder.
340 */
341 if ((se_deve->def_pr_registered) && !(ignore_reg))
342 registered_nexus = 1;
343 break;
344 case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
345 we = 1;
346 case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
347 /*
348 * Some commands are only allowed for registered I_T Nexuses.
349 */
350 reg_only = 1;
351 if ((se_deve->def_pr_registered) && !(ignore_reg))
352 registered_nexus = 1;
353 break;
354 case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
355 we = 1;
356 case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
357 /*
358 * Each registered I_T Nexus is a reservation holder.
359 */
360 all_reg = 1;
361 if ((se_deve->def_pr_registered) && !(ignore_reg))
362 registered_nexus = 1;
363 break;
364 default:
365 return -1;
366 }
367 /*
368 * Referenced from spc4r17 table 45 for *NON* PR holder access
369 */
370 switch (cdb[0]) {
371 case SECURITY_PROTOCOL_IN:
372 if (registered_nexus)
373 return 0;
374 ret = (we) ? 0 : 1;
375 break;
376 case MODE_SENSE:
377 case MODE_SENSE_10:
378 case READ_ATTRIBUTE:
379 case READ_BUFFER:
380 case RECEIVE_DIAGNOSTIC:
381 if (legacy) {
382 ret = 1;
383 break;
384 }
385 if (registered_nexus) {
386 ret = 0;
387 break;
388 }
389 ret = (we) ? 0 : 1; /* Allowed Write Exclusive */
390 break;
391 case PERSISTENT_RESERVE_OUT:
392 /*
393 * This follows PERSISTENT_RESERVE_OUT service actions that
394 * are allowed in the presence of various reservations.
395 * See spc4r17, table 46
396 */
397 switch (cdb[1] & 0x1f) {
398 case PRO_CLEAR:
399 case PRO_PREEMPT:
400 case PRO_PREEMPT_AND_ABORT:
401 ret = (registered_nexus) ? 0 : 1;
402 break;
403 case PRO_REGISTER:
404 case PRO_REGISTER_AND_IGNORE_EXISTING_KEY:
405 ret = 0;
406 break;
407 case PRO_REGISTER_AND_MOVE:
408 case PRO_RESERVE:
409 ret = 1;
410 break;
411 case PRO_RELEASE:
412 ret = (registered_nexus) ? 0 : 1;
413 break;
414 default:
415 printk(KERN_ERR "Unknown PERSISTENT_RESERVE_OUT service"
416 " action: 0x%02x\n", cdb[1] & 0x1f);
417 return -1;
418 }
419 break;
420 case RELEASE:
421 case RELEASE_10:
422 /* Handled by CRH=1 in core_scsi2_emulate_crh() */
423 ret = 0;
424 break;
425 case RESERVE:
426 case RESERVE_10:
427 /* Handled by CRH=1 in core_scsi2_emulate_crh() */
428 ret = 0;
429 break;
430 case TEST_UNIT_READY:
431 ret = (legacy) ? 1 : 0; /* Conflict for legacy */
432 break;
433 case MAINTENANCE_IN:
434 switch (cdb[1] & 0x1f) {
435 case MI_MANAGEMENT_PROTOCOL_IN:
436 if (registered_nexus) {
437 ret = 0;
438 break;
439 }
440 ret = (we) ? 0 : 1; /* Allowed Write Exclusive */
441 break;
442 case MI_REPORT_SUPPORTED_OPERATION_CODES:
443 case MI_REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS:
444 if (legacy) {
445 ret = 1;
446 break;
447 }
448 if (registered_nexus) {
449 ret = 0;
450 break;
451 }
452 ret = (we) ? 0 : 1; /* Allowed Write Exclusive */
453 break;
454 case MI_REPORT_ALIASES:
455 case MI_REPORT_IDENTIFYING_INFORMATION:
456 case MI_REPORT_PRIORITY:
457 case MI_REPORT_TARGET_PGS:
458 case MI_REPORT_TIMESTAMP:
459 ret = 0; /* Allowed */
460 break;
461 default:
462 printk(KERN_ERR "Unknown MI Service Action: 0x%02x\n",
463 (cdb[1] & 0x1f));
464 return -1;
465 }
466 break;
467 case ACCESS_CONTROL_IN:
468 case ACCESS_CONTROL_OUT:
469 case INQUIRY:
470 case LOG_SENSE:
471 case READ_MEDIA_SERIAL_NUMBER:
472 case REPORT_LUNS:
473 case REQUEST_SENSE:
474 ret = 0; /*/ Allowed CDBs */
475 break;
476 default:
477 other_cdb = 1;
478 break;
479 }
480 /*
481 * Case where the CDB is explicitly allowed in the above switch
482 * statement.
483 */
484 if (!(ret) && !(other_cdb)) {
485#if 0
486 printk(KERN_INFO "Allowing explict CDB: 0x%02x for %s"
487 " reservation holder\n", cdb[0],
488 core_scsi3_pr_dump_type(pr_reg_type));
489#endif
490 return ret;
491 }
492 /*
493 * Check if write exclusive initiator ports *NOT* holding the
494 * WRITE_EXCLUSIVE_* reservation.
495 */
496 if ((we) && !(registered_nexus)) {
497 if (cmd->data_direction == DMA_TO_DEVICE) {
498 /*
499 * Conflict for write exclusive
500 */
501 printk(KERN_INFO "%s Conflict for unregistered nexus"
502 " %s CDB: 0x%02x to %s reservation\n",
503 transport_dump_cmd_direction(cmd),
504 se_sess->se_node_acl->initiatorname, cdb[0],
505 core_scsi3_pr_dump_type(pr_reg_type));
506 return 1;
507 } else {
508 /*
509 * Allow non WRITE CDBs for all Write Exclusive
510 * PR TYPEs to pass for registered and
511 * non-registered_nexuxes NOT holding the reservation.
512 *
513 * We only make noise for the unregisterd nexuses,
514 * as we expect registered non-reservation holding
515 * nexuses to issue CDBs.
516 */
517#if 0
518 if (!(registered_nexus)) {
519 printk(KERN_INFO "Allowing implict CDB: 0x%02x"
520 " for %s reservation on unregistered"
521 " nexus\n", cdb[0],
522 core_scsi3_pr_dump_type(pr_reg_type));
523 }
524#endif
525 return 0;
526 }
527 } else if ((reg_only) || (all_reg)) {
528 if (registered_nexus) {
529 /*
530 * For PR_*_REG_ONLY and PR_*_ALL_REG reservations,
531 * allow commands from registered nexuses.
532 */
533#if 0
534 printk(KERN_INFO "Allowing implict CDB: 0x%02x for %s"
535 " reservation\n", cdb[0],
536 core_scsi3_pr_dump_type(pr_reg_type));
537#endif
538 return 0;
539 }
540 }
541 printk(KERN_INFO "%s Conflict for %sregistered nexus %s CDB: 0x%2x"
542 " for %s reservation\n", transport_dump_cmd_direction(cmd),
543 (registered_nexus) ? "" : "un",
544 se_sess->se_node_acl->initiatorname, cdb[0],
545 core_scsi3_pr_dump_type(pr_reg_type));
546
547 return 1; /* Conflict by default */
548}
549
550static u32 core_scsi3_pr_generation(struct se_device *dev)
551{
552 struct se_subsystem_dev *su_dev = SU_DEV(dev);
553 u32 prg;
554 /*
555 * PRGeneration field shall contain the value of a 32-bit wrapping
556 * counter mainted by the device server.
557 *
558 * Note that this is done regardless of Active Persist across
559 * Target PowerLoss (APTPL)
560 *
561 * See spc4r17 section 6.3.12 READ_KEYS service action
562 */
563 spin_lock(&dev->dev_reservation_lock);
564 prg = T10_RES(su_dev)->pr_generation++;
565 spin_unlock(&dev->dev_reservation_lock);
566
567 return prg;
568}
569
570static int core_scsi3_pr_reservation_check(
571 struct se_cmd *cmd,
572 u32 *pr_reg_type)
573{
574 struct se_device *dev = cmd->se_dev;
575 struct se_session *sess = cmd->se_sess;
576 int ret;
577
578 if (!(sess))
579 return 0;
580 /*
581 * A legacy SPC-2 reservation is being held.
582 */
583 if (dev->dev_flags & DF_SPC2_RESERVATIONS)
584 return core_scsi2_reservation_check(cmd, pr_reg_type);
585
586 spin_lock(&dev->dev_reservation_lock);
587 if (!(dev->dev_pr_res_holder)) {
588 spin_unlock(&dev->dev_reservation_lock);
589 return 0;
590 }
591 *pr_reg_type = dev->dev_pr_res_holder->pr_res_type;
592 cmd->pr_res_key = dev->dev_pr_res_holder->pr_res_key;
593 if (dev->dev_pr_res_holder->pr_reg_nacl != sess->se_node_acl) {
594 spin_unlock(&dev->dev_reservation_lock);
595 return -1;
596 }
597 if (!(dev->dev_pr_res_holder->isid_present_at_reg)) {
598 spin_unlock(&dev->dev_reservation_lock);
599 return 0;
600 }
601 ret = (dev->dev_pr_res_holder->pr_reg_bin_isid ==
602 sess->sess_bin_isid) ? 0 : -1;
603 /*
604 * Use bit in *pr_reg_type to notify ISID mismatch in
605 * core_scsi3_pr_seq_non_holder().
606 */
607 if (ret != 0)
608 *pr_reg_type |= 0x80000000;
609 spin_unlock(&dev->dev_reservation_lock);
610
611 return ret;
612}
613
614static struct t10_pr_registration *__core_scsi3_do_alloc_registration(
615 struct se_device *dev,
616 struct se_node_acl *nacl,
617 struct se_dev_entry *deve,
618 unsigned char *isid,
619 u64 sa_res_key,
620 int all_tg_pt,
621 int aptpl)
622{
623 struct se_subsystem_dev *su_dev = SU_DEV(dev);
624 struct t10_pr_registration *pr_reg;
625
626 pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_ATOMIC);
627 if (!(pr_reg)) {
628 printk(KERN_ERR "Unable to allocate struct t10_pr_registration\n");
629 return NULL;
630 }
631
632 pr_reg->pr_aptpl_buf = kzalloc(T10_RES(su_dev)->pr_aptpl_buf_len,
633 GFP_ATOMIC);
634 if (!(pr_reg->pr_aptpl_buf)) {
635 printk(KERN_ERR "Unable to allocate pr_reg->pr_aptpl_buf\n");
636 kmem_cache_free(t10_pr_reg_cache, pr_reg);
637 return NULL;
638 }
639
640 INIT_LIST_HEAD(&pr_reg->pr_reg_list);
641 INIT_LIST_HEAD(&pr_reg->pr_reg_abort_list);
642 INIT_LIST_HEAD(&pr_reg->pr_reg_aptpl_list);
643 INIT_LIST_HEAD(&pr_reg->pr_reg_atp_list);
644 INIT_LIST_HEAD(&pr_reg->pr_reg_atp_mem_list);
645 atomic_set(&pr_reg->pr_res_holders, 0);
646 pr_reg->pr_reg_nacl = nacl;
647 pr_reg->pr_reg_deve = deve;
648 pr_reg->pr_res_mapped_lun = deve->mapped_lun;
649 pr_reg->pr_aptpl_target_lun = deve->se_lun->unpacked_lun;
650 pr_reg->pr_res_key = sa_res_key;
651 pr_reg->pr_reg_all_tg_pt = all_tg_pt;
652 pr_reg->pr_reg_aptpl = aptpl;
653 pr_reg->pr_reg_tg_pt_lun = deve->se_lun;
654 /*
655 * If an ISID value for this SCSI Initiator Port exists,
656 * save it to the registration now.
657 */
658 if (isid != NULL) {
659 pr_reg->pr_reg_bin_isid = get_unaligned_be64(isid);
660 snprintf(pr_reg->pr_reg_isid, PR_REG_ISID_LEN, "%s", isid);
661 pr_reg->isid_present_at_reg = 1;
662 }
663
664 return pr_reg;
665}
666
667static int core_scsi3_lunacl_depend_item(struct se_dev_entry *);
668static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *);
669
670/*
671 * Function used for handling PR registrations for ALL_TG_PT=1 and ALL_TG_PT=0
672 * modes.
673 */
674static struct t10_pr_registration *__core_scsi3_alloc_registration(
675 struct se_device *dev,
676 struct se_node_acl *nacl,
677 struct se_dev_entry *deve,
678 unsigned char *isid,
679 u64 sa_res_key,
680 int all_tg_pt,
681 int aptpl)
682{
683 struct se_dev_entry *deve_tmp;
684 struct se_node_acl *nacl_tmp;
685 struct se_port *port, *port_tmp;
686 struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
687 struct t10_pr_registration *pr_reg, *pr_reg_atp, *pr_reg_tmp, *pr_reg_tmp_safe;
688 int ret;
689 /*
690 * Create a registration for the I_T Nexus upon which the
691 * PROUT REGISTER was received.
692 */
693 pr_reg = __core_scsi3_do_alloc_registration(dev, nacl, deve, isid,
694 sa_res_key, all_tg_pt, aptpl);
695 if (!(pr_reg))
696 return NULL;
697 /*
698 * Return pointer to pr_reg for ALL_TG_PT=0
699 */
700 if (!(all_tg_pt))
701 return pr_reg;
702 /*
703 * Create list of matching SCSI Initiator Port registrations
704 * for ALL_TG_PT=1
705 */
706 spin_lock(&dev->se_port_lock);
707 list_for_each_entry_safe(port, port_tmp, &dev->dev_sep_list, sep_list) {
708 atomic_inc(&port->sep_tg_pt_ref_cnt);
709 smp_mb__after_atomic_inc();
710 spin_unlock(&dev->se_port_lock);
711
712 spin_lock_bh(&port->sep_alua_lock);
713 list_for_each_entry(deve_tmp, &port->sep_alua_list,
714 alua_port_list) {
715 /*
716 * This pointer will be NULL for demo mode MappedLUNs
717 * that have not been make explict via a ConfigFS
718 * MappedLUN group for the SCSI Initiator Node ACL.
719 */
720 if (!(deve_tmp->se_lun_acl))
721 continue;
722
723 nacl_tmp = deve_tmp->se_lun_acl->se_lun_nacl;
724 /*
725 * Skip the matching struct se_node_acl that is allocated
726 * above..
727 */
728 if (nacl == nacl_tmp)
729 continue;
730 /*
731 * Only perform PR registrations for target ports on
732 * the same fabric module as the REGISTER w/ ALL_TG_PT=1
733 * arrived.
734 */
735 if (tfo != nacl_tmp->se_tpg->se_tpg_tfo)
736 continue;
737 /*
738 * Look for a matching Initiator Node ACL in ASCII format
739 */
740 if (strcmp(nacl->initiatorname, nacl_tmp->initiatorname))
741 continue;
742
743 atomic_inc(&deve_tmp->pr_ref_count);
744 smp_mb__after_atomic_inc();
745 spin_unlock_bh(&port->sep_alua_lock);
746 /*
747 * Grab a configfs group dependency that is released
748 * for the exception path at label out: below, or upon
749 * completion of adding ALL_TG_PT=1 registrations in
750 * __core_scsi3_add_registration()
751 */
752 ret = core_scsi3_lunacl_depend_item(deve_tmp);
753 if (ret < 0) {
754 printk(KERN_ERR "core_scsi3_lunacl_depend"
755 "_item() failed\n");
756 atomic_dec(&port->sep_tg_pt_ref_cnt);
757 smp_mb__after_atomic_dec();
758 atomic_dec(&deve_tmp->pr_ref_count);
759 smp_mb__after_atomic_dec();
760 goto out;
761 }
762 /*
763 * Located a matching SCSI Initiator Port on a different
764 * port, allocate the pr_reg_atp and attach it to the
765 * pr_reg->pr_reg_atp_list that will be processed once
766 * the original *pr_reg is processed in
767 * __core_scsi3_add_registration()
768 */
769 pr_reg_atp = __core_scsi3_do_alloc_registration(dev,
770 nacl_tmp, deve_tmp, NULL,
771 sa_res_key, all_tg_pt, aptpl);
772 if (!(pr_reg_atp)) {
773 atomic_dec(&port->sep_tg_pt_ref_cnt);
774 smp_mb__after_atomic_dec();
775 atomic_dec(&deve_tmp->pr_ref_count);
776 smp_mb__after_atomic_dec();
777 core_scsi3_lunacl_undepend_item(deve_tmp);
778 goto out;
779 }
780
781 list_add_tail(&pr_reg_atp->pr_reg_atp_mem_list,
782 &pr_reg->pr_reg_atp_list);
783 spin_lock_bh(&port->sep_alua_lock);
784 }
785 spin_unlock_bh(&port->sep_alua_lock);
786
787 spin_lock(&dev->se_port_lock);
788 atomic_dec(&port->sep_tg_pt_ref_cnt);
789 smp_mb__after_atomic_dec();
790 }
791 spin_unlock(&dev->se_port_lock);
792
793 return pr_reg;
794out:
795 list_for_each_entry_safe(pr_reg_tmp, pr_reg_tmp_safe,
796 &pr_reg->pr_reg_atp_list, pr_reg_atp_mem_list) {
797 list_del(&pr_reg_tmp->pr_reg_atp_mem_list);
798 core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve);
799 kmem_cache_free(t10_pr_reg_cache, pr_reg_tmp);
800 }
801 kmem_cache_free(t10_pr_reg_cache, pr_reg);
802 return NULL;
803}
804
805int core_scsi3_alloc_aptpl_registration(
806 struct t10_reservation_template *pr_tmpl,
807 u64 sa_res_key,
808 unsigned char *i_port,
809 unsigned char *isid,
810 u32 mapped_lun,
811 unsigned char *t_port,
812 u16 tpgt,
813 u32 target_lun,
814 int res_holder,
815 int all_tg_pt,
816 u8 type)
817{
818 struct t10_pr_registration *pr_reg;
819
820 if (!(i_port) || !(t_port) || !(sa_res_key)) {
821 printk(KERN_ERR "Illegal parameters for APTPL registration\n");
822 return -1;
823 }
824
825 pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_KERNEL);
826 if (!(pr_reg)) {
827 printk(KERN_ERR "Unable to allocate struct t10_pr_registration\n");
828 return -1;
829 }
830 pr_reg->pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len, GFP_KERNEL);
831
832 INIT_LIST_HEAD(&pr_reg->pr_reg_list);
833 INIT_LIST_HEAD(&pr_reg->pr_reg_abort_list);
834 INIT_LIST_HEAD(&pr_reg->pr_reg_aptpl_list);
835 INIT_LIST_HEAD(&pr_reg->pr_reg_atp_list);
836 INIT_LIST_HEAD(&pr_reg->pr_reg_atp_mem_list);
837 atomic_set(&pr_reg->pr_res_holders, 0);
838 pr_reg->pr_reg_nacl = NULL;
839 pr_reg->pr_reg_deve = NULL;
840 pr_reg->pr_res_mapped_lun = mapped_lun;
841 pr_reg->pr_aptpl_target_lun = target_lun;
842 pr_reg->pr_res_key = sa_res_key;
843 pr_reg->pr_reg_all_tg_pt = all_tg_pt;
844 pr_reg->pr_reg_aptpl = 1;
845 pr_reg->pr_reg_tg_pt_lun = NULL;
846 pr_reg->pr_res_scope = 0; /* Always LUN_SCOPE */
847 pr_reg->pr_res_type = type;
848 /*
849 * If an ISID value had been saved in APTPL metadata for this
850 * SCSI Initiator Port, restore it now.
851 */
852 if (isid != NULL) {
853 pr_reg->pr_reg_bin_isid = get_unaligned_be64(isid);
854 snprintf(pr_reg->pr_reg_isid, PR_REG_ISID_LEN, "%s", isid);
855 pr_reg->isid_present_at_reg = 1;
856 }
857 /*
858 * Copy the i_port and t_port information from caller.
859 */
860 snprintf(pr_reg->pr_iport, PR_APTPL_MAX_IPORT_LEN, "%s", i_port);
861 snprintf(pr_reg->pr_tport, PR_APTPL_MAX_TPORT_LEN, "%s", t_port);
862 pr_reg->pr_reg_tpgt = tpgt;
863 /*
864 * Set pr_res_holder from caller, the pr_reg who is the reservation
865 * holder will get it's pointer set in core_scsi3_aptpl_reserve() once
866 * the Initiator Node LUN ACL from the fabric module is created for
867 * this registration.
868 */
869 pr_reg->pr_res_holder = res_holder;
870
871 list_add_tail(&pr_reg->pr_reg_aptpl_list, &pr_tmpl->aptpl_reg_list);
872 printk(KERN_INFO "SPC-3 PR APTPL Successfully added registration%s from"
873 " metadata\n", (res_holder) ? "+reservation" : "");
874 return 0;
875}
876
877static void core_scsi3_aptpl_reserve(
878 struct se_device *dev,
879 struct se_portal_group *tpg,
880 struct se_node_acl *node_acl,
881 struct t10_pr_registration *pr_reg)
882{
883 char i_buf[PR_REG_ISID_ID_LEN];
884 int prf_isid;
885
886 memset(i_buf, 0, PR_REG_ISID_ID_LEN);
887 prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
888 PR_REG_ISID_ID_LEN);
889
890 spin_lock(&dev->dev_reservation_lock);
891 dev->dev_pr_res_holder = pr_reg;
892 spin_unlock(&dev->dev_reservation_lock);
893
894 printk(KERN_INFO "SPC-3 PR [%s] Service Action: APTPL RESERVE created"
895 " new reservation holder TYPE: %s ALL_TG_PT: %d\n",
896 TPG_TFO(tpg)->get_fabric_name(),
897 core_scsi3_pr_dump_type(pr_reg->pr_res_type),
898 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
899 printk(KERN_INFO "SPC-3 PR [%s] RESERVE Node: %s%s\n",
900 TPG_TFO(tpg)->get_fabric_name(), node_acl->initiatorname,
901 (prf_isid) ? &i_buf[0] : "");
902}
903
904static void __core_scsi3_add_registration(struct se_device *, struct se_node_acl *,
905 struct t10_pr_registration *, int, int);
906
907static int __core_scsi3_check_aptpl_registration(
908 struct se_device *dev,
909 struct se_portal_group *tpg,
910 struct se_lun *lun,
911 u32 target_lun,
912 struct se_node_acl *nacl,
913 struct se_dev_entry *deve)
914{
915 struct t10_pr_registration *pr_reg, *pr_reg_tmp;
916 struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
917 unsigned char i_port[PR_APTPL_MAX_IPORT_LEN];
918 unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
919 u16 tpgt;
920
921 memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
922 memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
923 /*
924 * Copy Initiator Port information from struct se_node_acl
925 */
926 snprintf(i_port, PR_APTPL_MAX_IPORT_LEN, "%s", nacl->initiatorname);
927 snprintf(t_port, PR_APTPL_MAX_TPORT_LEN, "%s",
928 TPG_TFO(tpg)->tpg_get_wwn(tpg));
929 tpgt = TPG_TFO(tpg)->tpg_get_tag(tpg);
930 /*
931 * Look for the matching registrations+reservation from those
932 * created from APTPL metadata. Note that multiple registrations
933 * may exist for fabrics that use ISIDs in their SCSI Initiator Port
934 * TransportIDs.
935 */
936 spin_lock(&pr_tmpl->aptpl_reg_lock);
937 list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list,
938 pr_reg_aptpl_list) {
939 if (!(strcmp(pr_reg->pr_iport, i_port)) &&
940 (pr_reg->pr_res_mapped_lun == deve->mapped_lun) &&
941 !(strcmp(pr_reg->pr_tport, t_port)) &&
942 (pr_reg->pr_reg_tpgt == tpgt) &&
943 (pr_reg->pr_aptpl_target_lun == target_lun)) {
944
945 pr_reg->pr_reg_nacl = nacl;
946 pr_reg->pr_reg_deve = deve;
947 pr_reg->pr_reg_tg_pt_lun = lun;
948
949 list_del(&pr_reg->pr_reg_aptpl_list);
950 spin_unlock(&pr_tmpl->aptpl_reg_lock);
951 /*
952 * At this point all of the pointers in *pr_reg will
953 * be setup, so go ahead and add the registration.
954 */
955
956 __core_scsi3_add_registration(dev, nacl, pr_reg, 0, 0);
957 /*
958 * If this registration is the reservation holder,
959 * make that happen now..
960 */
961 if (pr_reg->pr_res_holder)
962 core_scsi3_aptpl_reserve(dev, tpg,
963 nacl, pr_reg);
964 /*
965 * Reenable pr_aptpl_active to accept new metadata
966 * updates once the SCSI device is active again..
967 */
968 spin_lock(&pr_tmpl->aptpl_reg_lock);
969 pr_tmpl->pr_aptpl_active = 1;
970 }
971 }
972 spin_unlock(&pr_tmpl->aptpl_reg_lock);
973
974 return 0;
975}
976
977int core_scsi3_check_aptpl_registration(
978 struct se_device *dev,
979 struct se_portal_group *tpg,
980 struct se_lun *lun,
981 struct se_lun_acl *lun_acl)
982{
983 struct se_subsystem_dev *su_dev = SU_DEV(dev);
984 struct se_node_acl *nacl = lun_acl->se_lun_nacl;
985 struct se_dev_entry *deve = &nacl->device_list[lun_acl->mapped_lun];
986
987 if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
988 return 0;
989
990 return __core_scsi3_check_aptpl_registration(dev, tpg, lun,
991 lun->unpacked_lun, nacl, deve);
992}
993
994static void __core_scsi3_dump_registration(
995 struct target_core_fabric_ops *tfo,
996 struct se_device *dev,
997 struct se_node_acl *nacl,
998 struct t10_pr_registration *pr_reg,
999 int register_type)
1000{
1001 struct se_portal_group *se_tpg = nacl->se_tpg;
1002 char i_buf[PR_REG_ISID_ID_LEN];
1003 int prf_isid;
1004
1005 memset(&i_buf[0], 0, PR_REG_ISID_ID_LEN);
1006 prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
1007 PR_REG_ISID_ID_LEN);
1008
1009 printk(KERN_INFO "SPC-3 PR [%s] Service Action: REGISTER%s Initiator"
1010 " Node: %s%s\n", tfo->get_fabric_name(), (register_type == 2) ?
1011 "_AND_MOVE" : (register_type == 1) ?
1012 "_AND_IGNORE_EXISTING_KEY" : "", nacl->initiatorname,
1013 (prf_isid) ? i_buf : "");
1014 printk(KERN_INFO "SPC-3 PR [%s] registration on Target Port: %s,0x%04x\n",
1015 tfo->get_fabric_name(), tfo->tpg_get_wwn(se_tpg),
1016 tfo->tpg_get_tag(se_tpg));
1017 printk(KERN_INFO "SPC-3 PR [%s] for %s TCM Subsystem %s Object Target"
1018 " Port(s)\n", tfo->get_fabric_name(),
1019 (pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE",
1020 TRANSPORT(dev)->name);
1021 printk(KERN_INFO "SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:"
1022 " 0x%08x APTPL: %d\n", tfo->get_fabric_name(),
1023 pr_reg->pr_res_key, pr_reg->pr_res_generation,
1024 pr_reg->pr_reg_aptpl);
1025}
1026
1027/*
1028 * this function can be called with struct se_device->dev_reservation_lock
1029 * when register_move = 1
1030 */
1031static void __core_scsi3_add_registration(
1032 struct se_device *dev,
1033 struct se_node_acl *nacl,
1034 struct t10_pr_registration *pr_reg,
1035 int register_type,
1036 int register_move)
1037{
1038 struct se_subsystem_dev *su_dev = SU_DEV(dev);
1039 struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
1040 struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe;
1041 struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
1042
1043 /*
1044 * Increment PRgeneration counter for struct se_device upon a successful
1045 * REGISTER, see spc4r17 section 6.3.2 READ_KEYS service action
1046 *
1047 * Also, when register_move = 1 for PROUT REGISTER_AND_MOVE service
1048 * action, the struct se_device->dev_reservation_lock will already be held,
1049 * so we do not call core_scsi3_pr_generation() which grabs the lock
1050 * for the REGISTER.
1051 */
1052 pr_reg->pr_res_generation = (register_move) ?
1053 T10_RES(su_dev)->pr_generation++ :
1054 core_scsi3_pr_generation(dev);
1055
1056 spin_lock(&pr_tmpl->registration_lock);
1057 list_add_tail(&pr_reg->pr_reg_list, &pr_tmpl->registration_list);
1058 pr_reg->pr_reg_deve->def_pr_registered = 1;
1059
1060 __core_scsi3_dump_registration(tfo, dev, nacl, pr_reg, register_type);
1061 spin_unlock(&pr_tmpl->registration_lock);
1062 /*
1063 * Skip extra processing for ALL_TG_PT=0 or REGISTER_AND_MOVE.
1064 */
1065 if (!(pr_reg->pr_reg_all_tg_pt) || (register_move))
1066 return;
1067 /*
1068 * Walk pr_reg->pr_reg_atp_list and add registrations for ALL_TG_PT=1
1069 * allocated in __core_scsi3_alloc_registration()
1070 */
1071 list_for_each_entry_safe(pr_reg_tmp, pr_reg_tmp_safe,
1072 &pr_reg->pr_reg_atp_list, pr_reg_atp_mem_list) {
1073 list_del(&pr_reg_tmp->pr_reg_atp_mem_list);
1074
1075 pr_reg_tmp->pr_res_generation = core_scsi3_pr_generation(dev);
1076
1077 spin_lock(&pr_tmpl->registration_lock);
1078 list_add_tail(&pr_reg_tmp->pr_reg_list,
1079 &pr_tmpl->registration_list);
1080 pr_reg_tmp->pr_reg_deve->def_pr_registered = 1;
1081
1082 __core_scsi3_dump_registration(tfo, dev,
1083 pr_reg_tmp->pr_reg_nacl, pr_reg_tmp,
1084 register_type);
1085 spin_unlock(&pr_tmpl->registration_lock);
1086 /*
1087 * Drop configfs group dependency reference from
1088 * __core_scsi3_alloc_registration()
1089 */
1090 core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve);
1091 }
1092}
1093
1094static int core_scsi3_alloc_registration(
1095 struct se_device *dev,
1096 struct se_node_acl *nacl,
1097 struct se_dev_entry *deve,
1098 unsigned char *isid,
1099 u64 sa_res_key,
1100 int all_tg_pt,
1101 int aptpl,
1102 int register_type,
1103 int register_move)
1104{
1105 struct t10_pr_registration *pr_reg;
1106
1107 pr_reg = __core_scsi3_alloc_registration(dev, nacl, deve, isid,
1108 sa_res_key, all_tg_pt, aptpl);
1109 if (!(pr_reg))
1110 return -1;
1111
1112 __core_scsi3_add_registration(dev, nacl, pr_reg,
1113 register_type, register_move);
1114 return 0;
1115}
1116
1117static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
1118 struct se_device *dev,
1119 struct se_node_acl *nacl,
1120 unsigned char *isid)
1121{
1122 struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
1123 struct t10_pr_registration *pr_reg, *pr_reg_tmp;
1124 struct se_portal_group *tpg;
1125
1126 spin_lock(&pr_tmpl->registration_lock);
1127 list_for_each_entry_safe(pr_reg, pr_reg_tmp,
1128 &pr_tmpl->registration_list, pr_reg_list) {
1129 /*
1130 * First look for a matching struct se_node_acl
1131 */
1132 if (pr_reg->pr_reg_nacl != nacl)
1133 continue;
1134
1135 tpg = pr_reg->pr_reg_nacl->se_tpg;
1136 /*
1137 * If this registration does NOT contain a fabric provided
1138 * ISID, then we have found a match.
1139 */
1140 if (!(pr_reg->isid_present_at_reg)) {
1141 /*
1142 * Determine if this SCSI device server requires that
1143 * SCSI Intiatior TransportID w/ ISIDs is enforced
1144 * for fabric modules (iSCSI) requiring them.
1145 */
1146 if (TPG_TFO(tpg)->sess_get_initiator_sid != NULL) {
1147 if (DEV_ATTRIB(dev)->enforce_pr_isids)
1148 continue;
1149 }
1150 atomic_inc(&pr_reg->pr_res_holders);
1151 smp_mb__after_atomic_inc();
1152 spin_unlock(&pr_tmpl->registration_lock);
1153 return pr_reg;
1154 }
1155 /*
1156 * If the *pr_reg contains a fabric defined ISID for multi-value
1157 * SCSI Initiator Port TransportIDs, then we expect a valid
1158 * matching ISID to be provided by the local SCSI Initiator Port.
1159 */
1160 if (!(isid))
1161 continue;
1162 if (strcmp(isid, pr_reg->pr_reg_isid))
1163 continue;
1164
1165 atomic_inc(&pr_reg->pr_res_holders);
1166 smp_mb__after_atomic_inc();
1167 spin_unlock(&pr_tmpl->registration_lock);
1168 return pr_reg;
1169 }
1170 spin_unlock(&pr_tmpl->registration_lock);
1171
1172 return NULL;
1173}
1174
1175static struct t10_pr_registration *core_scsi3_locate_pr_reg(
1176 struct se_device *dev,
1177 struct se_node_acl *nacl,
1178 struct se_session *sess)
1179{
1180 struct se_portal_group *tpg = nacl->se_tpg;
1181 unsigned char buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
1182
1183 if (TPG_TFO(tpg)->sess_get_initiator_sid != NULL) {
1184 memset(&buf[0], 0, PR_REG_ISID_LEN);
1185 TPG_TFO(tpg)->sess_get_initiator_sid(sess, &buf[0],
1186 PR_REG_ISID_LEN);
1187 isid_ptr = &buf[0];
1188 }
1189
1190 return __core_scsi3_locate_pr_reg(dev, nacl, isid_ptr);
1191}
1192
1193static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg)
1194{
1195 atomic_dec(&pr_reg->pr_res_holders);
1196 smp_mb__after_atomic_dec();
1197}
1198
1199static int core_scsi3_check_implict_release(
1200 struct se_device *dev,
1201 struct t10_pr_registration *pr_reg)
1202{
1203 struct se_node_acl *nacl = pr_reg->pr_reg_nacl;
1204 struct t10_pr_registration *pr_res_holder;
1205 int ret = 0;
1206
1207 spin_lock(&dev->dev_reservation_lock);
1208 pr_res_holder = dev->dev_pr_res_holder;
1209 if (!(pr_res_holder)) {
1210 spin_unlock(&dev->dev_reservation_lock);
1211 return ret;
1212 }
1213 if (pr_res_holder == pr_reg) {
1214 /*
1215 * Perform an implict RELEASE if the registration that
1216 * is being released is holding the reservation.
1217 *
1218 * From spc4r17, section 5.7.11.1:
1219 *
1220 * e) If the I_T nexus is the persistent reservation holder
1221 * and the persistent reservation is not an all registrants
1222 * type, then a PERSISTENT RESERVE OUT command with REGISTER
1223 * service action or REGISTER AND IGNORE EXISTING KEY
1224 * service action with the SERVICE ACTION RESERVATION KEY
1225 * field set to zero (see 5.7.11.3).
1226 */
1227 __core_scsi3_complete_pro_release(dev, nacl, pr_reg, 0);
1228 ret = 1;
1229 /*
1230 * For 'All Registrants' reservation types, all existing
1231 * registrations are still processed as reservation holders
1232 * in core_scsi3_pr_seq_non_holder() after the initial
1233 * reservation holder is implictly released here.
1234 */
1235 } else if (pr_reg->pr_reg_all_tg_pt &&
1236 (!strcmp(pr_res_holder->pr_reg_nacl->initiatorname,
1237 pr_reg->pr_reg_nacl->initiatorname)) &&
1238 (pr_res_holder->pr_res_key == pr_reg->pr_res_key)) {
1239 printk(KERN_ERR "SPC-3 PR: Unable to perform ALL_TG_PT=1"
1240 " UNREGISTER while existing reservation with matching"
1241 " key 0x%016Lx is present from another SCSI Initiator"
1242 " Port\n", pr_reg->pr_res_key);
1243 ret = -1;
1244 }
1245 spin_unlock(&dev->dev_reservation_lock);
1246
1247 return ret;
1248}
1249
1250/*
1251 * Called with struct t10_reservation_template->registration_lock held.
1252 */
1253static void __core_scsi3_free_registration(
1254 struct se_device *dev,
1255 struct t10_pr_registration *pr_reg,
1256 struct list_head *preempt_and_abort_list,
1257 int dec_holders)
1258{
1259 struct target_core_fabric_ops *tfo =
1260 pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
1261 struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
1262 char i_buf[PR_REG_ISID_ID_LEN];
1263 int prf_isid;
1264
1265 memset(i_buf, 0, PR_REG_ISID_ID_LEN);
1266 prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
1267 PR_REG_ISID_ID_LEN);
1268
1269 pr_reg->pr_reg_deve->def_pr_registered = 0;
1270 pr_reg->pr_reg_deve->pr_res_key = 0;
1271 list_del(&pr_reg->pr_reg_list);
1272 /*
1273 * Caller accessing *pr_reg using core_scsi3_locate_pr_reg(),
1274 * so call core_scsi3_put_pr_reg() to decrement our reference.
1275 */
1276 if (dec_holders)
1277 core_scsi3_put_pr_reg(pr_reg);
1278 /*
1279 * Wait until all reference from any other I_T nexuses for this
1280 * *pr_reg have been released. Because list_del() is called above,
1281 * the last core_scsi3_put_pr_reg(pr_reg) will release this reference
1282 * count back to zero, and we release *pr_reg.
1283 */
1284 while (atomic_read(&pr_reg->pr_res_holders) != 0) {
1285 spin_unlock(&pr_tmpl->registration_lock);
1286 printk("SPC-3 PR [%s] waiting for pr_res_holders\n",
1287 tfo->get_fabric_name());
1288 cpu_relax();
1289 spin_lock(&pr_tmpl->registration_lock);
1290 }
1291
1292 printk(KERN_INFO "SPC-3 PR [%s] Service Action: UNREGISTER Initiator"
1293 " Node: %s%s\n", tfo->get_fabric_name(),
1294 pr_reg->pr_reg_nacl->initiatorname,
1295 (prf_isid) ? &i_buf[0] : "");
1296 printk(KERN_INFO "SPC-3 PR [%s] for %s TCM Subsystem %s Object Target"
1297 " Port(s)\n", tfo->get_fabric_name(),
1298 (pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE",
1299 TRANSPORT(dev)->name);
1300 printk(KERN_INFO "SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:"
1301 " 0x%08x\n", tfo->get_fabric_name(), pr_reg->pr_res_key,
1302 pr_reg->pr_res_generation);
1303
1304 if (!(preempt_and_abort_list)) {
1305 pr_reg->pr_reg_deve = NULL;
1306 pr_reg->pr_reg_nacl = NULL;
1307 kfree(pr_reg->pr_aptpl_buf);
1308 kmem_cache_free(t10_pr_reg_cache, pr_reg);
1309 return;
1310 }
1311 /*
1312 * For PREEMPT_AND_ABORT, the list of *pr_reg in preempt_and_abort_list
1313 * are released once the ABORT_TASK_SET has completed..
1314 */
1315 list_add_tail(&pr_reg->pr_reg_abort_list, preempt_and_abort_list);
1316}
1317
1318void core_scsi3_free_pr_reg_from_nacl(
1319 struct se_device *dev,
1320 struct se_node_acl *nacl)
1321{
1322 struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
1323 struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder;
1324 /*
1325 * If the passed se_node_acl matches the reservation holder,
1326 * release the reservation.
1327 */
1328 spin_lock(&dev->dev_reservation_lock);
1329 pr_res_holder = dev->dev_pr_res_holder;
1330 if ((pr_res_holder != NULL) &&
1331 (pr_res_holder->pr_reg_nacl == nacl))
1332 __core_scsi3_complete_pro_release(dev, nacl, pr_res_holder, 0);
1333 spin_unlock(&dev->dev_reservation_lock);
1334 /*
1335 * Release any registration associated with the struct se_node_acl.
1336 */
1337 spin_lock(&pr_tmpl->registration_lock);
1338 list_for_each_entry_safe(pr_reg, pr_reg_tmp,
1339 &pr_tmpl->registration_list, pr_reg_list) {
1340
1341 if (pr_reg->pr_reg_nacl != nacl)
1342 continue;
1343
1344 __core_scsi3_free_registration(dev, pr_reg, NULL, 0);
1345 }
1346 spin_unlock(&pr_tmpl->registration_lock);
1347}
1348
1349void core_scsi3_free_all_registrations(
1350 struct se_device *dev)
1351{
1352 struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
1353 struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder;
1354
1355 spin_lock(&dev->dev_reservation_lock);
1356 pr_res_holder = dev->dev_pr_res_holder;
1357 if (pr_res_holder != NULL) {
1358 struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
1359 __core_scsi3_complete_pro_release(dev, pr_res_nacl,
1360 pr_res_holder, 0);
1361 }
1362 spin_unlock(&dev->dev_reservation_lock);
1363
1364 spin_lock(&pr_tmpl->registration_lock);
1365 list_for_each_entry_safe(pr_reg, pr_reg_tmp,
1366 &pr_tmpl->registration_list, pr_reg_list) {
1367
1368 __core_scsi3_free_registration(dev, pr_reg, NULL, 0);
1369 }
1370 spin_unlock(&pr_tmpl->registration_lock);
1371
1372 spin_lock(&pr_tmpl->aptpl_reg_lock);
1373 list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list,
1374 pr_reg_aptpl_list) {
1375 list_del(&pr_reg->pr_reg_aptpl_list);
1376 kfree(pr_reg->pr_aptpl_buf);
1377 kmem_cache_free(t10_pr_reg_cache, pr_reg);
1378 }
1379 spin_unlock(&pr_tmpl->aptpl_reg_lock);
1380}
1381
1382static int core_scsi3_tpg_depend_item(struct se_portal_group *tpg)
1383{
1384 return configfs_depend_item(TPG_TFO(tpg)->tf_subsys,
1385 &tpg->tpg_group.cg_item);
1386}
1387
1388static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg)
1389{
1390 configfs_undepend_item(TPG_TFO(tpg)->tf_subsys,
1391 &tpg->tpg_group.cg_item);
1392
1393 atomic_dec(&tpg->tpg_pr_ref_count);
1394 smp_mb__after_atomic_dec();
1395}
1396
1397static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl)
1398{
1399 struct se_portal_group *tpg = nacl->se_tpg;
1400
1401 if (nacl->dynamic_node_acl)
1402 return 0;
1403
1404 return configfs_depend_item(TPG_TFO(tpg)->tf_subsys,
1405 &nacl->acl_group.cg_item);
1406}
1407
1408static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl)
1409{
1410 struct se_portal_group *tpg = nacl->se_tpg;
1411
1412 if (nacl->dynamic_node_acl) {
1413 atomic_dec(&nacl->acl_pr_ref_count);
1414 smp_mb__after_atomic_dec();
1415 return;
1416 }
1417
1418 configfs_undepend_item(TPG_TFO(tpg)->tf_subsys,
1419 &nacl->acl_group.cg_item);
1420
1421 atomic_dec(&nacl->acl_pr_ref_count);
1422 smp_mb__after_atomic_dec();
1423}
1424
1425static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve)
1426{
1427 struct se_lun_acl *lun_acl = se_deve->se_lun_acl;
1428 struct se_node_acl *nacl;
1429 struct se_portal_group *tpg;
1430 /*
1431 * For nacl->dynamic_node_acl=1
1432 */
1433 if (!(lun_acl))
1434 return 0;
1435
1436 nacl = lun_acl->se_lun_nacl;
1437 tpg = nacl->se_tpg;
1438
1439 return configfs_depend_item(TPG_TFO(tpg)->tf_subsys,
1440 &lun_acl->se_lun_group.cg_item);
1441}
1442
1443static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
1444{
1445 struct se_lun_acl *lun_acl = se_deve->se_lun_acl;
1446 struct se_node_acl *nacl;
1447 struct se_portal_group *tpg;
1448 /*
1449 * For nacl->dynamic_node_acl=1
1450 */
1451 if (!(lun_acl)) {
1452 atomic_dec(&se_deve->pr_ref_count);
1453 smp_mb__after_atomic_dec();
1454 return;
1455 }
1456 nacl = lun_acl->se_lun_nacl;
1457 tpg = nacl->se_tpg;
1458
1459 configfs_undepend_item(TPG_TFO(tpg)->tf_subsys,
1460 &lun_acl->se_lun_group.cg_item);
1461
1462 atomic_dec(&se_deve->pr_ref_count);
1463 smp_mb__after_atomic_dec();
1464}
1465
1466static int core_scsi3_decode_spec_i_port(
1467 struct se_cmd *cmd,
1468 struct se_portal_group *tpg,
1469 unsigned char *l_isid,
1470 u64 sa_res_key,
1471 int all_tg_pt,
1472 int aptpl)
1473{
1474 struct se_device *dev = SE_DEV(cmd);
1475 struct se_port *tmp_port;
1476 struct se_portal_group *dest_tpg = NULL, *tmp_tpg;
1477 struct se_session *se_sess = SE_SESS(cmd);
1478 struct se_node_acl *dest_node_acl = NULL;
1479 struct se_dev_entry *dest_se_deve = NULL, *local_se_deve;
1480 struct t10_pr_registration *dest_pr_reg, *local_pr_reg, *pr_reg_e;
1481 struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe;
1482 struct list_head tid_dest_list;
1483 struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp;
1484 struct target_core_fabric_ops *tmp_tf_ops;
1485 unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
1486 unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident;
1487 char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
1488 u32 tpdl, tid_len = 0;
1489 int ret, dest_local_nexus, prf_isid;
1490 u32 dest_rtpi = 0;
1491
1492 memset(dest_iport, 0, 64);
1493 INIT_LIST_HEAD(&tid_dest_list);
1494
1495 local_se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
1496 /*
1497 * Allocate a struct pr_transport_id_holder and setup the
1498 * local_node_acl and local_se_deve pointers and add to
1499 * struct list_head tid_dest_list for add registration
1500 * processing in the loop of tid_dest_list below.
1501 */
1502 tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL);
1503 if (!(tidh_new)) {
1504 printk(KERN_ERR "Unable to allocate tidh_new\n");
1505 return PYX_TRANSPORT_LU_COMM_FAILURE;
1506 }
1507 INIT_LIST_HEAD(&tidh_new->dest_list);
1508 tidh_new->dest_tpg = tpg;
1509 tidh_new->dest_node_acl = se_sess->se_node_acl;
1510 tidh_new->dest_se_deve = local_se_deve;
1511
1512 local_pr_reg = __core_scsi3_alloc_registration(SE_DEV(cmd),
1513 se_sess->se_node_acl, local_se_deve, l_isid,
1514 sa_res_key, all_tg_pt, aptpl);
1515 if (!(local_pr_reg)) {
1516 kfree(tidh_new);
1517 return PYX_TRANSPORT_LU_COMM_FAILURE;
1518 }
1519 tidh_new->dest_pr_reg = local_pr_reg;
1520 /*
1521 * The local I_T nexus does not hold any configfs dependances,
1522 * so we set tid_h->dest_local_nexus=1 to prevent the
1523 * configfs_undepend_item() calls in the tid_dest_list loops below.
1524 */
1525 tidh_new->dest_local_nexus = 1;
1526 list_add_tail(&tidh_new->dest_list, &tid_dest_list);
1527 /*
1528 * For a PERSISTENT RESERVE OUT specify initiator ports payload,
1529 * first extract TransportID Parameter Data Length, and make sure
1530 * the value matches up to the SCSI expected data transfer length.
1531 */
1532 tpdl = (buf[24] & 0xff) << 24;
1533 tpdl |= (buf[25] & 0xff) << 16;
1534 tpdl |= (buf[26] & 0xff) << 8;
1535 tpdl |= buf[27] & 0xff;
1536
1537 if ((tpdl + 28) != cmd->data_length) {
1538 printk(KERN_ERR "SPC-3 PR: Illegal tpdl: %u + 28 byte header"
1539 " does not equal CDB data_length: %u\n", tpdl,
1540 cmd->data_length);
1541 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
1542 goto out;
1543 }
1544 /*
1545 * Start processing the received transport IDs using the
1546 * receiving I_T Nexus portal's fabric dependent methods to
1547 * obtain the SCSI Initiator Port/Device Identifiers.
1548 */
1549 ptr = &buf[28];
1550
1551 while (tpdl > 0) {
1552 proto_ident = (ptr[0] & 0x0f);
1553 dest_tpg = NULL;
1554
1555 spin_lock(&dev->se_port_lock);
1556 list_for_each_entry(tmp_port, &dev->dev_sep_list, sep_list) {
1557 tmp_tpg = tmp_port->sep_tpg;
1558 if (!(tmp_tpg))
1559 continue;
1560 tmp_tf_ops = TPG_TFO(tmp_tpg);
1561 if (!(tmp_tf_ops))
1562 continue;
1563 if (!(tmp_tf_ops->get_fabric_proto_ident) ||
1564 !(tmp_tf_ops->tpg_parse_pr_out_transport_id))
1565 continue;
1566 /*
1567 * Look for the matching proto_ident provided by
1568 * the received TransportID
1569 */
1570 tmp_proto_ident = tmp_tf_ops->get_fabric_proto_ident(tmp_tpg);
1571 if (tmp_proto_ident != proto_ident)
1572 continue;
1573 dest_rtpi = tmp_port->sep_rtpi;
1574
1575 i_str = tmp_tf_ops->tpg_parse_pr_out_transport_id(
1576 tmp_tpg, (const char *)ptr, &tid_len,
1577 &iport_ptr);
1578 if (!(i_str))
1579 continue;
1580
1581 atomic_inc(&tmp_tpg->tpg_pr_ref_count);
1582 smp_mb__after_atomic_inc();
1583 spin_unlock(&dev->se_port_lock);
1584
1585 ret = core_scsi3_tpg_depend_item(tmp_tpg);
1586 if (ret != 0) {
1587 printk(KERN_ERR " core_scsi3_tpg_depend_item()"
1588 " for tmp_tpg\n");
1589 atomic_dec(&tmp_tpg->tpg_pr_ref_count);
1590 smp_mb__after_atomic_dec();
1591 ret = PYX_TRANSPORT_LU_COMM_FAILURE;
1592 goto out;
1593 }
1594 /*
1595 * Locate the desination initiator ACL to be registered
1596 * from the decoded fabric module specific TransportID
1597 * at *i_str.
1598 */
1599 spin_lock_bh(&tmp_tpg->acl_node_lock);
1600 dest_node_acl = __core_tpg_get_initiator_node_acl(
1601 tmp_tpg, i_str);
1602 if (dest_node_acl) {
1603 atomic_inc(&dest_node_acl->acl_pr_ref_count);
1604 smp_mb__after_atomic_inc();
1605 }
1606 spin_unlock_bh(&tmp_tpg->acl_node_lock);
1607
1608 if (!(dest_node_acl)) {
1609 core_scsi3_tpg_undepend_item(tmp_tpg);
1610 spin_lock(&dev->se_port_lock);
1611 continue;
1612 }
1613
1614 ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
1615 if (ret != 0) {
1616 printk(KERN_ERR "configfs_depend_item() failed"
1617 " for dest_node_acl->acl_group\n");
1618 atomic_dec(&dest_node_acl->acl_pr_ref_count);
1619 smp_mb__after_atomic_dec();
1620 core_scsi3_tpg_undepend_item(tmp_tpg);
1621 ret = PYX_TRANSPORT_LU_COMM_FAILURE;
1622 goto out;
1623 }
1624
1625 dest_tpg = tmp_tpg;
1626 printk(KERN_INFO "SPC-3 PR SPEC_I_PT: Located %s Node:"
1627 " %s Port RTPI: %hu\n",
1628 TPG_TFO(dest_tpg)->get_fabric_name(),
1629 dest_node_acl->initiatorname, dest_rtpi);
1630
1631 spin_lock(&dev->se_port_lock);
1632 break;
1633 }
1634 spin_unlock(&dev->se_port_lock);
1635
1636 if (!(dest_tpg)) {
1637 printk(KERN_ERR "SPC-3 PR SPEC_I_PT: Unable to locate"
1638 " dest_tpg\n");
1639 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
1640 goto out;
1641 }
1642#if 0
1643 printk("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u"
1644 " tid_len: %d for %s + %s\n",
1645 TPG_TFO(dest_tpg)->get_fabric_name(), cmd->data_length,
1646 tpdl, tid_len, i_str, iport_ptr);
1647#endif
1648 if (tid_len > tpdl) {
1649 printk(KERN_ERR "SPC-3 PR SPEC_I_PT: Illegal tid_len:"
1650 " %u for Transport ID: %s\n", tid_len, ptr);
1651 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1652 core_scsi3_tpg_undepend_item(dest_tpg);
1653 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
1654 goto out;
1655 }
1656 /*
1657 * Locate the desintation struct se_dev_entry pointer for matching
1658 * RELATIVE TARGET PORT IDENTIFIER on the receiving I_T Nexus
1659 * Target Port.
1660 */
1661 dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl,
1662 dest_rtpi);
1663 if (!(dest_se_deve)) {
1664 printk(KERN_ERR "Unable to locate %s dest_se_deve"
1665 " from destination RTPI: %hu\n",
1666 TPG_TFO(dest_tpg)->get_fabric_name(),
1667 dest_rtpi);
1668
1669 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1670 core_scsi3_tpg_undepend_item(dest_tpg);
1671 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
1672 goto out;
1673 }
1674
1675 ret = core_scsi3_lunacl_depend_item(dest_se_deve);
1676 if (ret < 0) {
1677 printk(KERN_ERR "core_scsi3_lunacl_depend_item()"
1678 " failed\n");
1679 atomic_dec(&dest_se_deve->pr_ref_count);
1680 smp_mb__after_atomic_dec();
1681 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1682 core_scsi3_tpg_undepend_item(dest_tpg);
1683 ret = PYX_TRANSPORT_LU_COMM_FAILURE;
1684 goto out;
1685 }
1686#if 0
1687 printk(KERN_INFO "SPC-3 PR SPEC_I_PT: Located %s Node: %s"
1688 " dest_se_deve mapped_lun: %u\n",
1689 TPG_TFO(dest_tpg)->get_fabric_name(),
1690 dest_node_acl->initiatorname, dest_se_deve->mapped_lun);
1691#endif
1692 /*
1693 * Skip any TransportIDs that already have a registration for
1694 * this target port.
1695 */
1696 pr_reg_e = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
1697 iport_ptr);
1698 if (pr_reg_e) {
1699 core_scsi3_put_pr_reg(pr_reg_e);
1700 core_scsi3_lunacl_undepend_item(dest_se_deve);
1701 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1702 core_scsi3_tpg_undepend_item(dest_tpg);
1703 ptr += tid_len;
1704 tpdl -= tid_len;
1705 tid_len = 0;
1706 continue;
1707 }
1708 /*
1709 * Allocate a struct pr_transport_id_holder and setup
1710 * the dest_node_acl and dest_se_deve pointers for the
1711 * loop below.
1712 */
1713 tidh_new = kzalloc(sizeof(struct pr_transport_id_holder),
1714 GFP_KERNEL);
1715 if (!(tidh_new)) {
1716 printk(KERN_ERR "Unable to allocate tidh_new\n");
1717 core_scsi3_lunacl_undepend_item(dest_se_deve);
1718 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1719 core_scsi3_tpg_undepend_item(dest_tpg);
1720 ret = PYX_TRANSPORT_LU_COMM_FAILURE;
1721 goto out;
1722 }
1723 INIT_LIST_HEAD(&tidh_new->dest_list);
1724 tidh_new->dest_tpg = dest_tpg;
1725 tidh_new->dest_node_acl = dest_node_acl;
1726 tidh_new->dest_se_deve = dest_se_deve;
1727
1728 /*
1729 * Allocate, but do NOT add the registration for the
1730 * TransportID referenced SCSI Initiator port. This
1731 * done because of the following from spc4r17 in section
1732 * 6.14.3 wrt SPEC_I_PT:
1733 *
1734 * "If a registration fails for any initiator port (e.g., if th
1735 * logical unit does not have enough resources available to
1736 * hold the registration information), no registrations shall be
1737 * made, and the command shall be terminated with
1738 * CHECK CONDITION status."
1739 *
1740 * That means we call __core_scsi3_alloc_registration() here,
1741 * and then call __core_scsi3_add_registration() in the
1742 * 2nd loop which will never fail.
1743 */
1744 dest_pr_reg = __core_scsi3_alloc_registration(SE_DEV(cmd),
1745 dest_node_acl, dest_se_deve, iport_ptr,
1746 sa_res_key, all_tg_pt, aptpl);
1747 if (!(dest_pr_reg)) {
1748 core_scsi3_lunacl_undepend_item(dest_se_deve);
1749 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1750 core_scsi3_tpg_undepend_item(dest_tpg);
1751 kfree(tidh_new);
1752 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
1753 goto out;
1754 }
1755 tidh_new->dest_pr_reg = dest_pr_reg;
1756 list_add_tail(&tidh_new->dest_list, &tid_dest_list);
1757
1758 ptr += tid_len;
1759 tpdl -= tid_len;
1760 tid_len = 0;
1761
1762 }
1763 /*
1764 * Go ahead and create a registrations from tid_dest_list for the
1765 * SPEC_I_PT provided TransportID for the *tidh referenced dest_node_acl
1766 * and dest_se_deve.
1767 *
1768 * The SA Reservation Key from the PROUT is set for the
1769 * registration, and ALL_TG_PT is also passed. ALL_TG_PT=1
1770 * means that the TransportID Initiator port will be
1771 * registered on all of the target ports in the SCSI target device
1772 * ALL_TG_PT=0 means the registration will only be for the
1773 * SCSI target port the PROUT REGISTER with SPEC_I_PT=1
1774 * was received.
1775 */
1776 list_for_each_entry_safe(tidh, tidh_tmp, &tid_dest_list, dest_list) {
1777 dest_tpg = tidh->dest_tpg;
1778 dest_node_acl = tidh->dest_node_acl;
1779 dest_se_deve = tidh->dest_se_deve;
1780 dest_pr_reg = tidh->dest_pr_reg;
1781 dest_local_nexus = tidh->dest_local_nexus;
1782
1783 list_del(&tidh->dest_list);
1784 kfree(tidh);
1785
1786 memset(i_buf, 0, PR_REG_ISID_ID_LEN);
1787 prf_isid = core_pr_dump_initiator_port(dest_pr_reg, &i_buf[0],
1788 PR_REG_ISID_ID_LEN);
1789
1790 __core_scsi3_add_registration(SE_DEV(cmd), dest_node_acl,
1791 dest_pr_reg, 0, 0);
1792
1793 printk(KERN_INFO "SPC-3 PR [%s] SPEC_I_PT: Successfully"
1794 " registered Transport ID for Node: %s%s Mapped LUN:"
1795 " %u\n", TPG_TFO(dest_tpg)->get_fabric_name(),
1796 dest_node_acl->initiatorname, (prf_isid) ?
1797 &i_buf[0] : "", dest_se_deve->mapped_lun);
1798
1799 if (dest_local_nexus)
1800 continue;
1801
1802 core_scsi3_lunacl_undepend_item(dest_se_deve);
1803 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1804 core_scsi3_tpg_undepend_item(dest_tpg);
1805 }
1806
1807 return 0;
1808out:
1809 /*
1810 * For the failure case, release everything from tid_dest_list
1811 * including *dest_pr_reg and the configfs dependances..
1812 */
1813 list_for_each_entry_safe(tidh, tidh_tmp, &tid_dest_list, dest_list) {
1814 dest_tpg = tidh->dest_tpg;
1815 dest_node_acl = tidh->dest_node_acl;
1816 dest_se_deve = tidh->dest_se_deve;
1817 dest_pr_reg = tidh->dest_pr_reg;
1818 dest_local_nexus = tidh->dest_local_nexus;
1819
1820 list_del(&tidh->dest_list);
1821 kfree(tidh);
1822 /*
1823 * Release any extra ALL_TG_PT=1 registrations for
1824 * the SPEC_I_PT=1 case.
1825 */
1826 list_for_each_entry_safe(pr_reg_tmp, pr_reg_tmp_safe,
1827 &dest_pr_reg->pr_reg_atp_list,
1828 pr_reg_atp_mem_list) {
1829 list_del(&pr_reg_tmp->pr_reg_atp_mem_list);
1830 core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve);
1831 kmem_cache_free(t10_pr_reg_cache, pr_reg_tmp);
1832 }
1833
1834 kfree(dest_pr_reg->pr_aptpl_buf);
1835 kmem_cache_free(t10_pr_reg_cache, dest_pr_reg);
1836
1837 if (dest_local_nexus)
1838 continue;
1839
1840 core_scsi3_lunacl_undepend_item(dest_se_deve);
1841 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1842 core_scsi3_tpg_undepend_item(dest_tpg);
1843 }
1844 return ret;
1845}
1846
1847/*
1848 * Called with struct se_device->dev_reservation_lock held
1849 */
1850static int __core_scsi3_update_aptpl_buf(
1851 struct se_device *dev,
1852 unsigned char *buf,
1853 u32 pr_aptpl_buf_len,
1854 int clear_aptpl_metadata)
1855{
1856 struct se_lun *lun;
1857 struct se_portal_group *tpg;
1858 struct se_subsystem_dev *su_dev = SU_DEV(dev);
1859 struct t10_pr_registration *pr_reg;
1860 unsigned char tmp[512], isid_buf[32];
1861 ssize_t len = 0;
1862 int reg_count = 0;
1863
1864 memset(buf, 0, pr_aptpl_buf_len);
1865 /*
1866 * Called to clear metadata once APTPL has been deactivated.
1867 */
1868 if (clear_aptpl_metadata) {
1869 snprintf(buf, pr_aptpl_buf_len,
1870 "No Registrations or Reservations\n");
1871 return 0;
1872 }
1873 /*
1874 * Walk the registration list..
1875 */
1876 spin_lock(&T10_RES(su_dev)->registration_lock);
1877 list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list,
1878 pr_reg_list) {
1879
1880 tmp[0] = '\0';
1881 isid_buf[0] = '\0';
1882 tpg = pr_reg->pr_reg_nacl->se_tpg;
1883 lun = pr_reg->pr_reg_tg_pt_lun;
1884 /*
1885 * Write out any ISID value to APTPL metadata that was included
1886 * in the original registration.
1887 */
1888 if (pr_reg->isid_present_at_reg)
1889 snprintf(isid_buf, 32, "initiator_sid=%s\n",
1890 pr_reg->pr_reg_isid);
1891 /*
1892 * Include special metadata if the pr_reg matches the
1893 * reservation holder.
1894 */
1895 if (dev->dev_pr_res_holder == pr_reg) {
1896 snprintf(tmp, 512, "PR_REG_START: %d"
1897 "\ninitiator_fabric=%s\n"
1898 "initiator_node=%s\n%s"
1899 "sa_res_key=%llu\n"
1900 "res_holder=1\nres_type=%02x\n"
1901 "res_scope=%02x\nres_all_tg_pt=%d\n"
1902 "mapped_lun=%u\n", reg_count,
1903 TPG_TFO(tpg)->get_fabric_name(),
1904 pr_reg->pr_reg_nacl->initiatorname, isid_buf,
1905 pr_reg->pr_res_key, pr_reg->pr_res_type,
1906 pr_reg->pr_res_scope, pr_reg->pr_reg_all_tg_pt,
1907 pr_reg->pr_res_mapped_lun);
1908 } else {
1909 snprintf(tmp, 512, "PR_REG_START: %d\n"
1910 "initiator_fabric=%s\ninitiator_node=%s\n%s"
1911 "sa_res_key=%llu\nres_holder=0\n"
1912 "res_all_tg_pt=%d\nmapped_lun=%u\n",
1913 reg_count, TPG_TFO(tpg)->get_fabric_name(),
1914 pr_reg->pr_reg_nacl->initiatorname, isid_buf,
1915 pr_reg->pr_res_key, pr_reg->pr_reg_all_tg_pt,
1916 pr_reg->pr_res_mapped_lun);
1917 }
1918
1919 if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
1920 printk(KERN_ERR "Unable to update renaming"
1921 " APTPL metadata\n");
1922 spin_unlock(&T10_RES(su_dev)->registration_lock);
1923 return -1;
1924 }
1925 len += sprintf(buf+len, "%s", tmp);
1926
1927 /*
1928 * Include information about the associated SCSI target port.
1929 */
1930 snprintf(tmp, 512, "target_fabric=%s\ntarget_node=%s\n"
1931 "tpgt=%hu\nport_rtpi=%hu\ntarget_lun=%u\nPR_REG_END:"
1932 " %d\n", TPG_TFO(tpg)->get_fabric_name(),
1933 TPG_TFO(tpg)->tpg_get_wwn(tpg),
1934 TPG_TFO(tpg)->tpg_get_tag(tpg),
1935 lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count);
1936
1937 if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
1938 printk(KERN_ERR "Unable to update renaming"
1939 " APTPL metadata\n");
1940 spin_unlock(&T10_RES(su_dev)->registration_lock);
1941 return -1;
1942 }
1943 len += sprintf(buf+len, "%s", tmp);
1944 reg_count++;
1945 }
1946 spin_unlock(&T10_RES(su_dev)->registration_lock);
1947
1948 if (!(reg_count))
1949 len += sprintf(buf+len, "No Registrations or Reservations");
1950
1951 return 0;
1952}
1953
1954static int core_scsi3_update_aptpl_buf(
1955 struct se_device *dev,
1956 unsigned char *buf,
1957 u32 pr_aptpl_buf_len,
1958 int clear_aptpl_metadata)
1959{
1960 int ret;
1961
1962 spin_lock(&dev->dev_reservation_lock);
1963 ret = __core_scsi3_update_aptpl_buf(dev, buf, pr_aptpl_buf_len,
1964 clear_aptpl_metadata);
1965 spin_unlock(&dev->dev_reservation_lock);
1966
1967 return ret;
1968}
1969
1970/*
1971 * Called with struct se_device->aptpl_file_mutex held
1972 */
1973static int __core_scsi3_write_aptpl_to_file(
1974 struct se_device *dev,
1975 unsigned char *buf,
1976 u32 pr_aptpl_buf_len)
1977{
1978 struct t10_wwn *wwn = &SU_DEV(dev)->t10_wwn;
1979 struct file *file;
1980 struct iovec iov[1];
1981 mm_segment_t old_fs;
1982 int flags = O_RDWR | O_CREAT | O_TRUNC;
1983 char path[512];
1984 int ret;
1985
1986 memset(iov, 0, sizeof(struct iovec));
1987 memset(path, 0, 512);
1988
1989 if (strlen(&wwn->unit_serial[0]) >= 512) {
1990 printk(KERN_ERR "WWN value for struct se_device does not fit"
1991 " into path buffer\n");
1992 return -1;
1993 }
1994
1995 snprintf(path, 512, "/var/target/pr/aptpl_%s", &wwn->unit_serial[0]);
1996 file = filp_open(path, flags, 0600);
1997 if (IS_ERR(file) || !file || !file->f_dentry) {
1998 printk(KERN_ERR "filp_open(%s) for APTPL metadata"
1999 " failed\n", path);
2000 return -1;
2001 }
2002
2003 iov[0].iov_base = &buf[0];
2004 if (!(pr_aptpl_buf_len))
2005 iov[0].iov_len = (strlen(&buf[0]) + 1); /* Add extra for NULL */
2006 else
2007 iov[0].iov_len = pr_aptpl_buf_len;
2008
2009 old_fs = get_fs();
2010 set_fs(get_ds());
2011 ret = vfs_writev(file, &iov[0], 1, &file->f_pos);
2012 set_fs(old_fs);
2013
2014 if (ret < 0) {
2015 printk("Error writing APTPL metadata file: %s\n", path);
2016 filp_close(file, NULL);
2017 return -1;
2018 }
2019 filp_close(file, NULL);
2020
2021 return 0;
2022}
2023
2024static int core_scsi3_update_and_write_aptpl(
2025 struct se_device *dev,
2026 unsigned char *in_buf,
2027 u32 in_pr_aptpl_buf_len)
2028{
2029 unsigned char null_buf[64], *buf;
2030 u32 pr_aptpl_buf_len;
2031 int ret, clear_aptpl_metadata = 0;
2032 /*
2033 * Can be called with a NULL pointer from PROUT service action CLEAR
2034 */
2035 if (!(in_buf)) {
2036 memset(null_buf, 0, 64);
2037 buf = &null_buf[0];
2038 /*
2039 * This will clear the APTPL metadata to:
2040 * "No Registrations or Reservations" status
2041 */
2042 pr_aptpl_buf_len = 64;
2043 clear_aptpl_metadata = 1;
2044 } else {
2045 buf = in_buf;
2046 pr_aptpl_buf_len = in_pr_aptpl_buf_len;
2047 }
2048
2049 ret = core_scsi3_update_aptpl_buf(dev, buf, pr_aptpl_buf_len,
2050 clear_aptpl_metadata);
2051 if (ret != 0)
2052 return -1;
2053 /*
2054 * __core_scsi3_write_aptpl_to_file() will call strlen()
2055 * on the passed buf to determine pr_aptpl_buf_len.
2056 */
2057 ret = __core_scsi3_write_aptpl_to_file(dev, buf, 0);
2058 if (ret != 0)
2059 return -1;
2060
2061 return ret;
2062}
2063
2064static int core_scsi3_emulate_pro_register(
2065 struct se_cmd *cmd,
2066 u64 res_key,
2067 u64 sa_res_key,
2068 int aptpl,
2069 int all_tg_pt,
2070 int spec_i_pt,
2071 int ignore_key)
2072{
2073 struct se_session *se_sess = SE_SESS(cmd);
2074 struct se_device *dev = SE_DEV(cmd);
2075 struct se_dev_entry *se_deve;
2076 struct se_lun *se_lun = SE_LUN(cmd);
2077 struct se_portal_group *se_tpg;
2078 struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_reg_tmp, *pr_reg_e;
2079 struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
2080 /* Used for APTPL metadata w/ UNREGISTER */
2081 unsigned char *pr_aptpl_buf = NULL;
2082 unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
2083 int pr_holder = 0, ret = 0, type;
2084
2085 if (!(se_sess) || !(se_lun)) {
2086 printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n");
2087 return PYX_TRANSPORT_LU_COMM_FAILURE;
2088 }
2089 se_tpg = se_sess->se_tpg;
2090 se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
2091
2092 if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL) {
2093 memset(&isid_buf[0], 0, PR_REG_ISID_LEN);
2094 TPG_TFO(se_tpg)->sess_get_initiator_sid(se_sess, &isid_buf[0],
2095 PR_REG_ISID_LEN);
2096 isid_ptr = &isid_buf[0];
2097 }
2098 /*
2099 * Follow logic from spc4r17 Section 5.7.7, Register Behaviors Table 47
2100 */
2101 pr_reg_e = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess);
2102 if (!(pr_reg_e)) {
2103 if (res_key) {
2104 printk(KERN_WARNING "SPC-3 PR: Reservation Key non-zero"
2105 " for SA REGISTER, returning CONFLICT\n");
2106 return PYX_TRANSPORT_RESERVATION_CONFLICT;
2107 }
2108 /*
2109 * Do nothing but return GOOD status.
2110 */
2111 if (!(sa_res_key))
2112 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
2113
2114 if (!(spec_i_pt)) {
2115 /*
2116 * Perform the Service Action REGISTER on the Initiator
2117 * Port Endpoint that the PRO was received from on the
2118 * Logical Unit of the SCSI device server.
2119 */
2120 ret = core_scsi3_alloc_registration(SE_DEV(cmd),
2121 se_sess->se_node_acl, se_deve, isid_ptr,
2122 sa_res_key, all_tg_pt, aptpl,
2123 ignore_key, 0);
2124 if (ret != 0) {
2125 printk(KERN_ERR "Unable to allocate"
2126 " struct t10_pr_registration\n");
2127 return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
2128 }
2129 } else {
2130 /*
2131 * Register both the Initiator port that received
2132 * PROUT SA REGISTER + SPEC_I_PT=1 and extract SCSI
2133 * TransportID from Parameter list and loop through
2134 * fabric dependent parameter list while calling
2135 * logic from of core_scsi3_alloc_registration() for
2136 * each TransportID provided SCSI Initiator Port/Device
2137 */
2138 ret = core_scsi3_decode_spec_i_port(cmd, se_tpg,
2139 isid_ptr, sa_res_key, all_tg_pt, aptpl);
2140 if (ret != 0)
2141 return ret;
2142 }
2143 /*
2144 * Nothing left to do for the APTPL=0 case.
2145 */
2146 if (!(aptpl)) {
2147 pr_tmpl->pr_aptpl_active = 0;
2148 core_scsi3_update_and_write_aptpl(SE_DEV(cmd), NULL, 0);
2149 printk("SPC-3 PR: Set APTPL Bit Deactivated for"
2150 " REGISTER\n");
2151 return 0;
2152 }
2153 /*
2154 * Locate the newly allocated local I_T Nexus *pr_reg, and
2155 * update the APTPL metadata information using its
2156 * preallocated *pr_reg->pr_aptpl_buf.
2157 */
2158 pr_reg = core_scsi3_locate_pr_reg(SE_DEV(cmd),
2159 se_sess->se_node_acl, se_sess);
2160
2161 ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
2162 &pr_reg->pr_aptpl_buf[0],
2163 pr_tmpl->pr_aptpl_buf_len);
2164 if (!(ret)) {
2165 pr_tmpl->pr_aptpl_active = 1;
2166 printk("SPC-3 PR: Set APTPL Bit Activated for REGISTER\n");
2167 }
2168
2169 core_scsi3_put_pr_reg(pr_reg);
2170 return ret;
2171 } else {
2172 /*
2173 * Locate the existing *pr_reg via struct se_node_acl pointers
2174 */
2175 pr_reg = pr_reg_e;
2176 type = pr_reg->pr_res_type;
2177
2178 if (!(ignore_key)) {
2179 if (res_key != pr_reg->pr_res_key) {
2180 printk(KERN_ERR "SPC-3 PR REGISTER: Received"
2181 " res_key: 0x%016Lx does not match"
2182 " existing SA REGISTER res_key:"
2183 " 0x%016Lx\n", res_key,
2184 pr_reg->pr_res_key);
2185 core_scsi3_put_pr_reg(pr_reg);
2186 return PYX_TRANSPORT_RESERVATION_CONFLICT;
2187 }
2188 }
2189 if (spec_i_pt) {
2190 printk(KERN_ERR "SPC-3 PR UNREGISTER: SPEC_I_PT"
2191 " set while sa_res_key=0\n");
2192 core_scsi3_put_pr_reg(pr_reg);
2193 return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
2194 }
2195 /*
2196 * An existing ALL_TG_PT=1 registration being released
2197 * must also set ALL_TG_PT=1 in the incoming PROUT.
2198 */
2199 if (pr_reg->pr_reg_all_tg_pt && !(all_tg_pt)) {
2200 printk(KERN_ERR "SPC-3 PR UNREGISTER: ALL_TG_PT=1"
2201 " registration exists, but ALL_TG_PT=1 bit not"
2202 " present in received PROUT\n");
2203 core_scsi3_put_pr_reg(pr_reg);
2204 return PYX_TRANSPORT_INVALID_CDB_FIELD;
2205 }
2206 /*
2207 * Allocate APTPL metadata buffer used for UNREGISTER ops
2208 */
2209 if (aptpl) {
2210 pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len,
2211 GFP_KERNEL);
2212 if (!(pr_aptpl_buf)) {
2213 printk(KERN_ERR "Unable to allocate"
2214 " pr_aptpl_buf\n");
2215 core_scsi3_put_pr_reg(pr_reg);
2216 return PYX_TRANSPORT_LU_COMM_FAILURE;
2217 }
2218 }
2219 /*
2220 * sa_res_key=0 Unregister Reservation Key for registered I_T
2221 * Nexus sa_res_key=1 Change Reservation Key for registered I_T
2222 * Nexus.
2223 */
2224 if (!(sa_res_key)) {
2225 pr_holder = core_scsi3_check_implict_release(
2226 SE_DEV(cmd), pr_reg);
2227 if (pr_holder < 0) {
2228 kfree(pr_aptpl_buf);
2229 core_scsi3_put_pr_reg(pr_reg);
2230 return PYX_TRANSPORT_RESERVATION_CONFLICT;
2231 }
2232
2233 spin_lock(&pr_tmpl->registration_lock);
2234 /*
2235 * Release all ALL_TG_PT=1 for the matching SCSI Initiator Port
2236 * and matching pr_res_key.
2237 */
2238 if (pr_reg->pr_reg_all_tg_pt) {
2239 list_for_each_entry_safe(pr_reg_p, pr_reg_tmp,
2240 &pr_tmpl->registration_list,
2241 pr_reg_list) {
2242
2243 if (!(pr_reg_p->pr_reg_all_tg_pt))
2244 continue;
2245
2246 if (pr_reg_p->pr_res_key != res_key)
2247 continue;
2248
2249 if (pr_reg == pr_reg_p)
2250 continue;
2251
2252 if (strcmp(pr_reg->pr_reg_nacl->initiatorname,
2253 pr_reg_p->pr_reg_nacl->initiatorname))
2254 continue;
2255
2256 __core_scsi3_free_registration(dev,
2257 pr_reg_p, NULL, 0);
2258 }
2259 }
2260 /*
2261 * Release the calling I_T Nexus registration now..
2262 */
2263 __core_scsi3_free_registration(SE_DEV(cmd), pr_reg,
2264 NULL, 1);
2265 /*
2266 * From spc4r17, section 5.7.11.3 Unregistering
2267 *
2268 * If the persistent reservation is a registrants only
2269 * type, the device server shall establish a unit
2270 * attention condition for the initiator port associated
2271 * with every registered I_T nexus except for the I_T
2272 * nexus on which the PERSISTENT RESERVE OUT command was
2273 * received, with the additional sense code set to
2274 * RESERVATIONS RELEASED.
2275 */
2276 if (pr_holder &&
2277 ((type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY) ||
2278 (type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY))) {
2279 list_for_each_entry(pr_reg_p,
2280 &pr_tmpl->registration_list,
2281 pr_reg_list) {
2282
2283 core_scsi3_ua_allocate(
2284 pr_reg_p->pr_reg_nacl,
2285 pr_reg_p->pr_res_mapped_lun,
2286 0x2A,
2287 ASCQ_2AH_RESERVATIONS_RELEASED);
2288 }
2289 }
2290 spin_unlock(&pr_tmpl->registration_lock);
2291
2292 if (!(aptpl)) {
2293 pr_tmpl->pr_aptpl_active = 0;
2294 core_scsi3_update_and_write_aptpl(dev, NULL, 0);
2295 printk("SPC-3 PR: Set APTPL Bit Deactivated"
2296 " for UNREGISTER\n");
2297 return 0;
2298 }
2299
2300 ret = core_scsi3_update_and_write_aptpl(dev,
2301 &pr_aptpl_buf[0],
2302 pr_tmpl->pr_aptpl_buf_len);
2303 if (!(ret)) {
2304 pr_tmpl->pr_aptpl_active = 1;
2305 printk("SPC-3 PR: Set APTPL Bit Activated"
2306 " for UNREGISTER\n");
2307 }
2308
2309 kfree(pr_aptpl_buf);
2310 return ret;
2311 } else {
2312 /*
2313 * Increment PRgeneration counter for struct se_device"
2314 * upon a successful REGISTER, see spc4r17 section 6.3.2
2315 * READ_KEYS service action.
2316 */
2317 pr_reg->pr_res_generation = core_scsi3_pr_generation(
2318 SE_DEV(cmd));
2319 pr_reg->pr_res_key = sa_res_key;
2320 printk("SPC-3 PR [%s] REGISTER%s: Changed Reservation"
2321 " Key for %s to: 0x%016Lx PRgeneration:"
2322 " 0x%08x\n", CMD_TFO(cmd)->get_fabric_name(),
2323 (ignore_key) ? "_AND_IGNORE_EXISTING_KEY" : "",
2324 pr_reg->pr_reg_nacl->initiatorname,
2325 pr_reg->pr_res_key, pr_reg->pr_res_generation);
2326
2327 if (!(aptpl)) {
2328 pr_tmpl->pr_aptpl_active = 0;
2329 core_scsi3_update_and_write_aptpl(dev, NULL, 0);
2330 core_scsi3_put_pr_reg(pr_reg);
2331 printk("SPC-3 PR: Set APTPL Bit Deactivated"
2332 " for REGISTER\n");
2333 return 0;
2334 }
2335
2336 ret = core_scsi3_update_and_write_aptpl(dev,
2337 &pr_aptpl_buf[0],
2338 pr_tmpl->pr_aptpl_buf_len);
2339 if (!(ret)) {
2340 pr_tmpl->pr_aptpl_active = 1;
2341 printk("SPC-3 PR: Set APTPL Bit Activated"
2342 " for REGISTER\n");
2343 }
2344
2345 kfree(pr_aptpl_buf);
2346 core_scsi3_put_pr_reg(pr_reg);
2347 }
2348 }
2349 return 0;
2350}
2351
2352unsigned char *core_scsi3_pr_dump_type(int type)
2353{
2354 switch (type) {
2355 case PR_TYPE_WRITE_EXCLUSIVE:
2356 return "Write Exclusive Access";
2357 case PR_TYPE_EXCLUSIVE_ACCESS:
2358 return "Exclusive Access";
2359 case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
2360 return "Write Exclusive Access, Registrants Only";
2361 case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
2362 return "Exclusive Access, Registrants Only";
2363 case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
2364 return "Write Exclusive Access, All Registrants";
2365 case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
2366 return "Exclusive Access, All Registrants";
2367 default:
2368 break;
2369 }
2370
2371 return "Unknown SPC-3 PR Type";
2372}
2373
2374static int core_scsi3_pro_reserve(
2375 struct se_cmd *cmd,
2376 struct se_device *dev,
2377 int type,
2378 int scope,
2379 u64 res_key)
2380{
2381 struct se_session *se_sess = SE_SESS(cmd);
2382 struct se_dev_entry *se_deve;
2383 struct se_lun *se_lun = SE_LUN(cmd);
2384 struct se_portal_group *se_tpg;
2385 struct t10_pr_registration *pr_reg, *pr_res_holder;
2386 struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
2387 char i_buf[PR_REG_ISID_ID_LEN];
2388 int ret, prf_isid;
2389
2390 memset(i_buf, 0, PR_REG_ISID_ID_LEN);
2391
2392 if (!(se_sess) || !(se_lun)) {
2393 printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n");
2394 return PYX_TRANSPORT_LU_COMM_FAILURE;
2395 }
2396 se_tpg = se_sess->se_tpg;
2397 se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
2398 /*
2399 * Locate the existing *pr_reg via struct se_node_acl pointers
2400 */
2401 pr_reg = core_scsi3_locate_pr_reg(SE_DEV(cmd), se_sess->se_node_acl,
2402 se_sess);
2403 if (!(pr_reg)) {
2404 printk(KERN_ERR "SPC-3 PR: Unable to locate"
2405 " PR_REGISTERED *pr_reg for RESERVE\n");
2406 return PYX_TRANSPORT_LU_COMM_FAILURE;
2407 }
2408 /*
2409 * From spc4r17 Section 5.7.9: Reserving:
2410 *
2411 * An application client creates a persistent reservation by issuing
2412 * a PERSISTENT RESERVE OUT command with RESERVE service action through
2413 * a registered I_T nexus with the following parameters:
2414 * a) RESERVATION KEY set to the value of the reservation key that is
2415 * registered with the logical unit for the I_T nexus; and
2416 */
2417 if (res_key != pr_reg->pr_res_key) {
2418 printk(KERN_ERR "SPC-3 PR RESERVE: Received res_key: 0x%016Lx"
2419 " does not match existing SA REGISTER res_key:"
2420 " 0x%016Lx\n", res_key, pr_reg->pr_res_key);
2421 core_scsi3_put_pr_reg(pr_reg);
2422 return PYX_TRANSPORT_RESERVATION_CONFLICT;
2423 }
2424 /*
2425 * From spc4r17 Section 5.7.9: Reserving:
2426 *
2427 * From above:
2428 * b) TYPE field and SCOPE field set to the persistent reservation
2429 * being created.
2430 *
2431 * Only one persistent reservation is allowed at a time per logical unit
2432 * and that persistent reservation has a scope of LU_SCOPE.
2433 */
2434 if (scope != PR_SCOPE_LU_SCOPE) {
2435 printk(KERN_ERR "SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
2436 core_scsi3_put_pr_reg(pr_reg);
2437 return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
2438 }
2439 /*
2440 * See if we have an existing PR reservation holder pointer at
2441 * struct se_device->dev_pr_res_holder in the form struct t10_pr_registration
2442 * *pr_res_holder.
2443 */
2444 spin_lock(&dev->dev_reservation_lock);
2445 pr_res_holder = dev->dev_pr_res_holder;
2446 if ((pr_res_holder)) {
2447 /*
2448 * From spc4r17 Section 5.7.9: Reserving:
2449 *
2450 * If the device server receives a PERSISTENT RESERVE OUT
2451 * command from an I_T nexus other than a persistent reservation
2452 * holder (see 5.7.10) that attempts to create a persistent
2453 * reservation when a persistent reservation already exists for
2454 * the logical unit, then the command shall be completed with
2455 * RESERVATION CONFLICT status.
2456 */
2457 if (pr_res_holder != pr_reg) {
2458 struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
2459 printk(KERN_ERR "SPC-3 PR: Attempted RESERVE from"
2460 " [%s]: %s while reservation already held by"
2461 " [%s]: %s, returning RESERVATION_CONFLICT\n",
2462 CMD_TFO(cmd)->get_fabric_name(),
2463 se_sess->se_node_acl->initiatorname,
2464 TPG_TFO(pr_res_nacl->se_tpg)->get_fabric_name(),
2465 pr_res_holder->pr_reg_nacl->initiatorname);
2466
2467 spin_unlock(&dev->dev_reservation_lock);
2468 core_scsi3_put_pr_reg(pr_reg);
2469 return PYX_TRANSPORT_RESERVATION_CONFLICT;
2470 }
2471 /*
2472 * From spc4r17 Section 5.7.9: Reserving:
2473 *
2474 * If a persistent reservation holder attempts to modify the
2475 * type or scope of an existing persistent reservation, the
2476 * command shall be completed with RESERVATION CONFLICT status.
2477 */
2478 if ((pr_res_holder->pr_res_type != type) ||
2479 (pr_res_holder->pr_res_scope != scope)) {
2480 struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
2481 printk(KERN_ERR "SPC-3 PR: Attempted RESERVE from"
2482 " [%s]: %s trying to change TYPE and/or SCOPE,"
2483 " while reservation already held by [%s]: %s,"
2484 " returning RESERVATION_CONFLICT\n",
2485 CMD_TFO(cmd)->get_fabric_name(),
2486 se_sess->se_node_acl->initiatorname,
2487 TPG_TFO(pr_res_nacl->se_tpg)->get_fabric_name(),
2488 pr_res_holder->pr_reg_nacl->initiatorname);
2489
2490 spin_unlock(&dev->dev_reservation_lock);
2491 core_scsi3_put_pr_reg(pr_reg);
2492 return PYX_TRANSPORT_RESERVATION_CONFLICT;
2493 }
2494 /*
2495 * From spc4r17 Section 5.7.9: Reserving:
2496 *
2497 * If the device server receives a PERSISTENT RESERVE OUT
2498 * command with RESERVE service action where the TYPE field and
2499 * the SCOPE field contain the same values as the existing type
2500 * and scope from a persistent reservation holder, it shall not
2501 * make any change to the existing persistent reservation and
2502 * shall completethe command with GOOD status.
2503 */
2504 spin_unlock(&dev->dev_reservation_lock);
2505 core_scsi3_put_pr_reg(pr_reg);
2506 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
2507 }
2508 /*
2509 * Otherwise, our *pr_reg becomes the PR reservation holder for said
2510 * TYPE/SCOPE. Also set the received scope and type in *pr_reg.
2511 */
2512 pr_reg->pr_res_scope = scope;
2513 pr_reg->pr_res_type = type;
2514 pr_reg->pr_res_holder = 1;
2515 dev->dev_pr_res_holder = pr_reg;
2516 prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
2517 PR_REG_ISID_ID_LEN);
2518
2519 printk(KERN_INFO "SPC-3 PR [%s] Service Action: RESERVE created new"
2520 " reservation holder TYPE: %s ALL_TG_PT: %d\n",
2521 CMD_TFO(cmd)->get_fabric_name(), core_scsi3_pr_dump_type(type),
2522 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
2523 printk(KERN_INFO "SPC-3 PR [%s] RESERVE Node: %s%s\n",
2524 CMD_TFO(cmd)->get_fabric_name(),
2525 se_sess->se_node_acl->initiatorname,
2526 (prf_isid) ? &i_buf[0] : "");
2527 spin_unlock(&dev->dev_reservation_lock);
2528
2529 if (pr_tmpl->pr_aptpl_active) {
2530 ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
2531 &pr_reg->pr_aptpl_buf[0],
2532 pr_tmpl->pr_aptpl_buf_len);
2533 if (!(ret))
2534 printk(KERN_INFO "SPC-3 PR: Updated APTPL metadata"
2535 " for RESERVE\n");
2536 }
2537
2538 core_scsi3_put_pr_reg(pr_reg);
2539 return 0;
2540}
2541
2542static int core_scsi3_emulate_pro_reserve(
2543 struct se_cmd *cmd,
2544 int type,
2545 int scope,
2546 u64 res_key)
2547{
2548 struct se_device *dev = cmd->se_dev;
2549 int ret = 0;
2550
2551 switch (type) {
2552 case PR_TYPE_WRITE_EXCLUSIVE:
2553 case PR_TYPE_EXCLUSIVE_ACCESS:
2554 case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
2555 case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
2556 case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
2557 case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
2558 ret = core_scsi3_pro_reserve(cmd, dev, type, scope, res_key);
2559 break;
2560 default:
2561 printk(KERN_ERR "SPC-3 PR: Unknown Service Action RESERVE Type:"
2562 " 0x%02x\n", type);
2563 return PYX_TRANSPORT_INVALID_CDB_FIELD;
2564 }
2565
2566 return ret;
2567}
2568
2569/*
2570 * Called with struct se_device->dev_reservation_lock held.
2571 */
2572static void __core_scsi3_complete_pro_release(
2573 struct se_device *dev,
2574 struct se_node_acl *se_nacl,
2575 struct t10_pr_registration *pr_reg,
2576 int explict)
2577{
2578 struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo;
2579 char i_buf[PR_REG_ISID_ID_LEN];
2580 int prf_isid;
2581
2582 memset(i_buf, 0, PR_REG_ISID_ID_LEN);
2583 prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
2584 PR_REG_ISID_ID_LEN);
2585 /*
2586 * Go ahead and release the current PR reservation holder.
2587 */
2588 dev->dev_pr_res_holder = NULL;
2589
2590 printk(KERN_INFO "SPC-3 PR [%s] Service Action: %s RELEASE cleared"
2591 " reservation holder TYPE: %s ALL_TG_PT: %d\n",
2592 tfo->get_fabric_name(), (explict) ? "explict" : "implict",
2593 core_scsi3_pr_dump_type(pr_reg->pr_res_type),
2594 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
2595 printk(KERN_INFO "SPC-3 PR [%s] RELEASE Node: %s%s\n",
2596 tfo->get_fabric_name(), se_nacl->initiatorname,
2597 (prf_isid) ? &i_buf[0] : "");
2598 /*
2599 * Clear TYPE and SCOPE for the next PROUT Service Action: RESERVE
2600 */
2601 pr_reg->pr_res_holder = pr_reg->pr_res_type = pr_reg->pr_res_scope = 0;
2602}
2603
2604static int core_scsi3_emulate_pro_release(
2605 struct se_cmd *cmd,
2606 int type,
2607 int scope,
2608 u64 res_key)
2609{
2610 struct se_device *dev = cmd->se_dev;
2611 struct se_session *se_sess = SE_SESS(cmd);
2612 struct se_lun *se_lun = SE_LUN(cmd);
2613 struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_res_holder;
2614 struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
2615 int ret, all_reg = 0;
2616
2617 if (!(se_sess) || !(se_lun)) {
2618 printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n");
2619 return PYX_TRANSPORT_LU_COMM_FAILURE;
2620 }
2621 /*
2622 * Locate the existing *pr_reg via struct se_node_acl pointers
2623 */
2624 pr_reg = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess);
2625 if (!(pr_reg)) {
2626 printk(KERN_ERR "SPC-3 PR: Unable to locate"
2627 " PR_REGISTERED *pr_reg for RELEASE\n");
2628 return PYX_TRANSPORT_LU_COMM_FAILURE;
2629 }
2630 /*
2631 * From spc4r17 Section 5.7.11.2 Releasing:
2632 *
2633 * If there is no persistent reservation or in response to a persistent
2634 * reservation release request from a registered I_T nexus that is not a
2635 * persistent reservation holder (see 5.7.10), the device server shall
2636 * do the following:
2637 *
2638 * a) Not release the persistent reservation, if any;
2639 * b) Not remove any registrations; and
2640 * c) Complete the command with GOOD status.
2641 */
2642 spin_lock(&dev->dev_reservation_lock);
2643 pr_res_holder = dev->dev_pr_res_holder;
2644 if (!(pr_res_holder)) {
2645 /*
2646 * No persistent reservation, return GOOD status.
2647 */
2648 spin_unlock(&dev->dev_reservation_lock);
2649 core_scsi3_put_pr_reg(pr_reg);
2650 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
2651 }
2652 if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
2653 (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))
2654 all_reg = 1;
2655
2656 if ((all_reg == 0) && (pr_res_holder != pr_reg)) {
2657 /*
2658 * Non 'All Registrants' PR Type cases..
2659 * Release request from a registered I_T nexus that is not a
2660 * persistent reservation holder. return GOOD status.
2661 */
2662 spin_unlock(&dev->dev_reservation_lock);
2663 core_scsi3_put_pr_reg(pr_reg);
2664 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
2665 }
2666 /*
2667 * From spc4r17 Section 5.7.11.2 Releasing:
2668 *
2669 * Only the persistent reservation holder (see 5.7.10) is allowed to
2670 * release a persistent reservation.
2671 *
2672 * An application client releases the persistent reservation by issuing
2673 * a PERSISTENT RESERVE OUT command with RELEASE service action through
2674 * an I_T nexus that is a persistent reservation holder with the
2675 * following parameters:
2676 *
2677 * a) RESERVATION KEY field set to the value of the reservation key
2678 * that is registered with the logical unit for the I_T nexus;
2679 */
2680 if (res_key != pr_reg->pr_res_key) {
2681 printk(KERN_ERR "SPC-3 PR RELEASE: Received res_key: 0x%016Lx"
2682 " does not match existing SA REGISTER res_key:"
2683 " 0x%016Lx\n", res_key, pr_reg->pr_res_key);
2684 spin_unlock(&dev->dev_reservation_lock);
2685 core_scsi3_put_pr_reg(pr_reg);
2686 return PYX_TRANSPORT_RESERVATION_CONFLICT;
2687 }
2688 /*
2689 * From spc4r17 Section 5.7.11.2 Releasing and above:
2690 *
2691 * b) TYPE field and SCOPE field set to match the persistent
2692 * reservation being released.
2693 */
2694 if ((pr_res_holder->pr_res_type != type) ||
2695 (pr_res_holder->pr_res_scope != scope)) {
2696 struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
2697 printk(KERN_ERR "SPC-3 PR RELEASE: Attempted to release"
2698 " reservation from [%s]: %s with different TYPE "
2699 "and/or SCOPE while reservation already held by"
2700 " [%s]: %s, returning RESERVATION_CONFLICT\n",
2701 CMD_TFO(cmd)->get_fabric_name(),
2702 se_sess->se_node_acl->initiatorname,
2703 TPG_TFO(pr_res_nacl->se_tpg)->get_fabric_name(),
2704 pr_res_holder->pr_reg_nacl->initiatorname);
2705
2706 spin_unlock(&dev->dev_reservation_lock);
2707 core_scsi3_put_pr_reg(pr_reg);
2708 return PYX_TRANSPORT_RESERVATION_CONFLICT;
2709 }
2710 /*
2711 * In response to a persistent reservation release request from the
2712 * persistent reservation holder the device server shall perform a
2713 * release by doing the following as an uninterrupted series of actions:
2714 * a) Release the persistent reservation;
2715 * b) Not remove any registration(s);
2716 * c) If the released persistent reservation is a registrants only type
2717 * or all registrants type persistent reservation,
2718 * the device server shall establish a unit attention condition for
2719 * the initiator port associated with every regis-
2720 * tered I_T nexus other than I_T nexus on which the PERSISTENT
2721 * RESERVE OUT command with RELEASE service action was received,
2722 * with the additional sense code set to RESERVATIONS RELEASED; and
2723 * d) If the persistent reservation is of any other type, the device
2724 * server shall not establish a unit attention condition.
2725 */
2726 __core_scsi3_complete_pro_release(dev, se_sess->se_node_acl,
2727 pr_reg, 1);
2728
2729 spin_unlock(&dev->dev_reservation_lock);
2730
2731 if ((type != PR_TYPE_WRITE_EXCLUSIVE_REGONLY) &&
2732 (type != PR_TYPE_EXCLUSIVE_ACCESS_REGONLY) &&
2733 (type != PR_TYPE_WRITE_EXCLUSIVE_ALLREG) &&
2734 (type != PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
2735 /*
2736 * If no UNIT ATTENTION conditions will be established for
2737 * PR_TYPE_WRITE_EXCLUSIVE or PR_TYPE_EXCLUSIVE_ACCESS
2738 * go ahead and check for APTPL=1 update+write below
2739 */
2740 goto write_aptpl;
2741 }
2742
2743 spin_lock(&pr_tmpl->registration_lock);
2744 list_for_each_entry(pr_reg_p, &pr_tmpl->registration_list,
2745 pr_reg_list) {
2746 /*
2747 * Do not establish a UNIT ATTENTION condition
2748 * for the calling I_T Nexus
2749 */
2750 if (pr_reg_p == pr_reg)
2751 continue;
2752
2753 core_scsi3_ua_allocate(pr_reg_p->pr_reg_nacl,
2754 pr_reg_p->pr_res_mapped_lun,
2755 0x2A, ASCQ_2AH_RESERVATIONS_RELEASED);
2756 }
2757 spin_unlock(&pr_tmpl->registration_lock);
2758
2759write_aptpl:
2760 if (pr_tmpl->pr_aptpl_active) {
2761 ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
2762 &pr_reg->pr_aptpl_buf[0],
2763 pr_tmpl->pr_aptpl_buf_len);
2764 if (!(ret))
2765 printk("SPC-3 PR: Updated APTPL metadata for RELEASE\n");
2766 }
2767
2768 core_scsi3_put_pr_reg(pr_reg);
2769 return 0;
2770}
2771
2772static int core_scsi3_emulate_pro_clear(
2773 struct se_cmd *cmd,
2774 u64 res_key)
2775{
2776 struct se_device *dev = cmd->se_dev;
2777 struct se_node_acl *pr_reg_nacl;
2778 struct se_session *se_sess = SE_SESS(cmd);
2779 struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
2780 struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder;
2781 u32 pr_res_mapped_lun = 0;
2782 int calling_it_nexus = 0;
2783 /*
2784 * Locate the existing *pr_reg via struct se_node_acl pointers
2785 */
2786 pr_reg_n = core_scsi3_locate_pr_reg(SE_DEV(cmd),
2787 se_sess->se_node_acl, se_sess);
2788 if (!(pr_reg_n)) {
2789 printk(KERN_ERR "SPC-3 PR: Unable to locate"
2790 " PR_REGISTERED *pr_reg for CLEAR\n");
2791 return PYX_TRANSPORT_LU_COMM_FAILURE;
2792 }
2793 /*
2794 * From spc4r17 section 5.7.11.6, Clearing:
2795 *
2796 * Any application client may release the persistent reservation and
2797 * remove all registrations from a device server by issuing a
2798 * PERSISTENT RESERVE OUT command with CLEAR service action through a
2799 * registered I_T nexus with the following parameter:
2800 *
2801 * a) RESERVATION KEY field set to the value of the reservation key
2802 * that is registered with the logical unit for the I_T nexus.
2803 */
2804 if (res_key != pr_reg_n->pr_res_key) {
2805 printk(KERN_ERR "SPC-3 PR REGISTER: Received"
2806 " res_key: 0x%016Lx does not match"
2807 " existing SA REGISTER res_key:"
2808 " 0x%016Lx\n", res_key, pr_reg_n->pr_res_key);
2809 core_scsi3_put_pr_reg(pr_reg_n);
2810 return PYX_TRANSPORT_RESERVATION_CONFLICT;
2811 }
2812 /*
2813 * a) Release the persistent reservation, if any;
2814 */
2815 spin_lock(&dev->dev_reservation_lock);
2816 pr_res_holder = dev->dev_pr_res_holder;
2817 if (pr_res_holder) {
2818 struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
2819 __core_scsi3_complete_pro_release(dev, pr_res_nacl,
2820 pr_res_holder, 0);
2821 }
2822 spin_unlock(&dev->dev_reservation_lock);
2823 /*
2824 * b) Remove all registration(s) (see spc4r17 5.7.7);
2825 */
2826 spin_lock(&pr_tmpl->registration_lock);
2827 list_for_each_entry_safe(pr_reg, pr_reg_tmp,
2828 &pr_tmpl->registration_list, pr_reg_list) {
2829
2830 calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
2831 pr_reg_nacl = pr_reg->pr_reg_nacl;
2832 pr_res_mapped_lun = pr_reg->pr_res_mapped_lun;
2833 __core_scsi3_free_registration(dev, pr_reg, NULL,
2834 calling_it_nexus);
2835 /*
2836 * e) Establish a unit attention condition for the initiator
2837 * port associated with every registered I_T nexus other
2838 * than the I_T nexus on which the PERSISTENT RESERVE OUT
2839 * command with CLEAR service action was received, with the
2840 * additional sense code set to RESERVATIONS PREEMPTED.
2841 */
2842 if (!(calling_it_nexus))
2843 core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun,
2844 0x2A, ASCQ_2AH_RESERVATIONS_PREEMPTED);
2845 }
2846 spin_unlock(&pr_tmpl->registration_lock);
2847
2848 printk(KERN_INFO "SPC-3 PR [%s] Service Action: CLEAR complete\n",
2849 CMD_TFO(cmd)->get_fabric_name());
2850
2851 if (pr_tmpl->pr_aptpl_active) {
2852 core_scsi3_update_and_write_aptpl(SE_DEV(cmd), NULL, 0);
2853 printk(KERN_INFO "SPC-3 PR: Updated APTPL metadata"
2854 " for CLEAR\n");
2855 }
2856
2857 core_scsi3_pr_generation(dev);
2858 return 0;
2859}
2860
2861/*
2862 * Called with struct se_device->dev_reservation_lock held.
2863 */
2864static void __core_scsi3_complete_pro_preempt(
2865 struct se_device *dev,
2866 struct t10_pr_registration *pr_reg,
2867 struct list_head *preempt_and_abort_list,
2868 int type,
2869 int scope,
2870 int abort)
2871{
2872 struct se_node_acl *nacl = pr_reg->pr_reg_nacl;
2873 struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
2874 char i_buf[PR_REG_ISID_ID_LEN];
2875 int prf_isid;
2876
2877 memset(i_buf, 0, PR_REG_ISID_ID_LEN);
2878 prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
2879 PR_REG_ISID_ID_LEN);
2880 /*
2881 * Do an implict RELEASE of the existing reservation.
2882 */
2883 if (dev->dev_pr_res_holder)
2884 __core_scsi3_complete_pro_release(dev, nacl,
2885 dev->dev_pr_res_holder, 0);
2886
2887 dev->dev_pr_res_holder = pr_reg;
2888 pr_reg->pr_res_holder = 1;
2889 pr_reg->pr_res_type = type;
2890 pr_reg->pr_res_scope = scope;
2891
2892 printk(KERN_INFO "SPC-3 PR [%s] Service Action: PREEMPT%s created new"
2893 " reservation holder TYPE: %s ALL_TG_PT: %d\n",
2894 tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "",
2895 core_scsi3_pr_dump_type(type),
2896 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
2897 printk(KERN_INFO "SPC-3 PR [%s] PREEMPT%s from Node: %s%s\n",
2898 tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "",
2899 nacl->initiatorname, (prf_isid) ? &i_buf[0] : "");
2900 /*
2901 * For PREEMPT_AND_ABORT, add the preempting reservation's
2902 * struct t10_pr_registration to the list that will be compared
2903 * against received CDBs..
2904 */
2905 if (preempt_and_abort_list)
2906 list_add_tail(&pr_reg->pr_reg_abort_list,
2907 preempt_and_abort_list);
2908}
2909
2910static void core_scsi3_release_preempt_and_abort(
2911 struct list_head *preempt_and_abort_list,
2912 struct t10_pr_registration *pr_reg_holder)
2913{
2914 struct t10_pr_registration *pr_reg, *pr_reg_tmp;
2915
2916 list_for_each_entry_safe(pr_reg, pr_reg_tmp, preempt_and_abort_list,
2917 pr_reg_abort_list) {
2918
2919 list_del(&pr_reg->pr_reg_abort_list);
2920 if (pr_reg_holder == pr_reg)
2921 continue;
2922 if (pr_reg->pr_res_holder) {
2923 printk(KERN_WARNING "pr_reg->pr_res_holder still set\n");
2924 continue;
2925 }
2926
2927 pr_reg->pr_reg_deve = NULL;
2928 pr_reg->pr_reg_nacl = NULL;
2929 kfree(pr_reg->pr_aptpl_buf);
2930 kmem_cache_free(t10_pr_reg_cache, pr_reg);
2931 }
2932}
2933
2934int core_scsi3_check_cdb_abort_and_preempt(
2935 struct list_head *preempt_and_abort_list,
2936 struct se_cmd *cmd)
2937{
2938 struct t10_pr_registration *pr_reg, *pr_reg_tmp;
2939
2940 list_for_each_entry_safe(pr_reg, pr_reg_tmp, preempt_and_abort_list,
2941 pr_reg_abort_list) {
2942 if (pr_reg->pr_res_key == cmd->pr_res_key)
2943 return 0;
2944 }
2945
2946 return 1;
2947}
2948
2949static int core_scsi3_pro_preempt(
2950 struct se_cmd *cmd,
2951 int type,
2952 int scope,
2953 u64 res_key,
2954 u64 sa_res_key,
2955 int abort)
2956{
2957 struct se_device *dev = SE_DEV(cmd);
2958 struct se_dev_entry *se_deve;
2959 struct se_node_acl *pr_reg_nacl;
2960 struct se_session *se_sess = SE_SESS(cmd);
2961 struct list_head preempt_and_abort_list;
2962 struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder;
2963 struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
2964 u32 pr_res_mapped_lun = 0;
2965 int all_reg = 0, calling_it_nexus = 0, released_regs = 0;
2966 int prh_type = 0, prh_scope = 0, ret;
2967
2968 if (!(se_sess))
2969 return PYX_TRANSPORT_LU_COMM_FAILURE;
2970
2971 se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
2972 pr_reg_n = core_scsi3_locate_pr_reg(SE_DEV(cmd), se_sess->se_node_acl,
2973 se_sess);
2974 if (!(pr_reg_n)) {
2975 printk(KERN_ERR "SPC-3 PR: Unable to locate"
2976 " PR_REGISTERED *pr_reg for PREEMPT%s\n",
2977 (abort) ? "_AND_ABORT" : "");
2978 return PYX_TRANSPORT_RESERVATION_CONFLICT;
2979 }
2980 if (pr_reg_n->pr_res_key != res_key) {
2981 core_scsi3_put_pr_reg(pr_reg_n);
2982 return PYX_TRANSPORT_RESERVATION_CONFLICT;
2983 }
2984 if (scope != PR_SCOPE_LU_SCOPE) {
2985 printk(KERN_ERR "SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
2986 core_scsi3_put_pr_reg(pr_reg_n);
2987 return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
2988 }
2989 INIT_LIST_HEAD(&preempt_and_abort_list);
2990
2991 spin_lock(&dev->dev_reservation_lock);
2992 pr_res_holder = dev->dev_pr_res_holder;
2993 if (pr_res_holder &&
2994 ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
2995 (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)))
2996 all_reg = 1;
2997
2998 if (!(all_reg) && !(sa_res_key)) {
2999 spin_unlock(&dev->dev_reservation_lock);
3000 core_scsi3_put_pr_reg(pr_reg_n);
3001 return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
3002 }
3003 /*
3004 * From spc4r17, section 5.7.11.4.4 Removing Registrations:
3005 *
3006 * If the SERVICE ACTION RESERVATION KEY field does not identify a
3007 * persistent reservation holder or there is no persistent reservation
3008 * holder (i.e., there is no persistent reservation), then the device
3009 * server shall perform a preempt by doing the following in an
3010 * uninterrupted series of actions. (See below..)
3011 */
3012 if (!(pr_res_holder) || (pr_res_holder->pr_res_key != sa_res_key)) {
3013 /*
3014 * No existing or SA Reservation Key matching reservations..
3015 *
3016 * PROUT SA PREEMPT with All Registrant type reservations are
3017 * allowed to be processed without a matching SA Reservation Key
3018 */
3019 spin_lock(&pr_tmpl->registration_lock);
3020 list_for_each_entry_safe(pr_reg, pr_reg_tmp,
3021 &pr_tmpl->registration_list, pr_reg_list) {
3022 /*
3023 * Removing of registrations in non all registrants
3024 * type reservations without a matching SA reservation
3025 * key.
3026 *
3027 * a) Remove the registrations for all I_T nexuses
3028 * specified by the SERVICE ACTION RESERVATION KEY
3029 * field;
3030 * b) Ignore the contents of the SCOPE and TYPE fields;
3031 * c) Process tasks as defined in 5.7.1; and
3032 * d) Establish a unit attention condition for the
3033 * initiator port associated with every I_T nexus
3034 * that lost its registration other than the I_T
3035 * nexus on which the PERSISTENT RESERVE OUT command
3036 * was received, with the additional sense code set
3037 * to REGISTRATIONS PREEMPTED.
3038 */
3039 if (!(all_reg)) {
3040 if (pr_reg->pr_res_key != sa_res_key)
3041 continue;
3042
3043 calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
3044 pr_reg_nacl = pr_reg->pr_reg_nacl;
3045 pr_res_mapped_lun = pr_reg->pr_res_mapped_lun;
3046 __core_scsi3_free_registration(dev, pr_reg,
3047 (abort) ? &preempt_and_abort_list :
3048 NULL, calling_it_nexus);
3049 released_regs++;
3050 } else {
3051 /*
3052 * Case for any existing all registrants type
3053 * reservation, follow logic in spc4r17 section
3054 * 5.7.11.4 Preempting, Table 52 and Figure 7.
3055 *
3056 * For a ZERO SA Reservation key, release
3057 * all other registrations and do an implict
3058 * release of active persistent reservation.
3059 *
3060 * For a non-ZERO SA Reservation key, only
3061 * release the matching reservation key from
3062 * registrations.
3063 */
3064 if ((sa_res_key) &&
3065 (pr_reg->pr_res_key != sa_res_key))
3066 continue;
3067
3068 calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
3069 if (calling_it_nexus)
3070 continue;
3071
3072 pr_reg_nacl = pr_reg->pr_reg_nacl;
3073 pr_res_mapped_lun = pr_reg->pr_res_mapped_lun;
3074 __core_scsi3_free_registration(dev, pr_reg,
3075 (abort) ? &preempt_and_abort_list :
3076 NULL, 0);
3077 released_regs++;
3078 }
3079 if (!(calling_it_nexus))
3080 core_scsi3_ua_allocate(pr_reg_nacl,
3081 pr_res_mapped_lun, 0x2A,
3082 ASCQ_2AH_RESERVATIONS_PREEMPTED);
3083 }
3084 spin_unlock(&pr_tmpl->registration_lock);
3085 /*
3086 * If a PERSISTENT RESERVE OUT with a PREEMPT service action or
3087 * a PREEMPT AND ABORT service action sets the SERVICE ACTION
3088 * RESERVATION KEY field to a value that does not match any
3089 * registered reservation key, then the device server shall
3090 * complete the command with RESERVATION CONFLICT status.
3091 */
3092 if (!(released_regs)) {
3093 spin_unlock(&dev->dev_reservation_lock);
3094 core_scsi3_put_pr_reg(pr_reg_n);
3095 return PYX_TRANSPORT_RESERVATION_CONFLICT;
3096 }
3097 /*
3098 * For an existing all registrants type reservation
3099 * with a zero SA rservation key, preempt the existing
3100 * reservation with the new PR type and scope.
3101 */
3102 if (pr_res_holder && all_reg && !(sa_res_key)) {
3103 __core_scsi3_complete_pro_preempt(dev, pr_reg_n,
3104 (abort) ? &preempt_and_abort_list : NULL,
3105 type, scope, abort);
3106
3107 if (abort)
3108 core_scsi3_release_preempt_and_abort(
3109 &preempt_and_abort_list, pr_reg_n);
3110 }
3111 spin_unlock(&dev->dev_reservation_lock);
3112
3113 if (pr_tmpl->pr_aptpl_active) {
3114 ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
3115 &pr_reg_n->pr_aptpl_buf[0],
3116 pr_tmpl->pr_aptpl_buf_len);
3117 if (!(ret))
3118 printk(KERN_INFO "SPC-3 PR: Updated APTPL"
3119 " metadata for PREEMPT%s\n", (abort) ?
3120 "_AND_ABORT" : "");
3121 }
3122
3123 core_scsi3_put_pr_reg(pr_reg_n);
3124 core_scsi3_pr_generation(SE_DEV(cmd));
3125 return 0;
3126 }
3127 /*
3128 * The PREEMPTing SA reservation key matches that of the
3129 * existing persistent reservation, first, we check if
3130 * we are preempting our own reservation.
3131 * From spc4r17, section 5.7.11.4.3 Preempting
3132 * persistent reservations and registration handling
3133 *
3134 * If an all registrants persistent reservation is not
3135 * present, it is not an error for the persistent
3136 * reservation holder to preempt itself (i.e., a
3137 * PERSISTENT RESERVE OUT with a PREEMPT service action
3138 * or a PREEMPT AND ABORT service action with the
3139 * SERVICE ACTION RESERVATION KEY value equal to the
3140 * persistent reservation holder's reservation key that
3141 * is received from the persistent reservation holder).
3142 * In that case, the device server shall establish the
3143 * new persistent reservation and maintain the
3144 * registration.
3145 */
3146 prh_type = pr_res_holder->pr_res_type;
3147 prh_scope = pr_res_holder->pr_res_scope;
3148 /*
3149 * If the SERVICE ACTION RESERVATION KEY field identifies a
3150 * persistent reservation holder (see 5.7.10), the device
3151 * server shall perform a preempt by doing the following as
3152 * an uninterrupted series of actions:
3153 *
3154 * a) Release the persistent reservation for the holder
3155 * identified by the SERVICE ACTION RESERVATION KEY field;
3156 */
3157 if (pr_reg_n != pr_res_holder)
3158 __core_scsi3_complete_pro_release(dev,
3159 pr_res_holder->pr_reg_nacl,
3160 dev->dev_pr_res_holder, 0);
3161 /*
3162 * b) Remove the registrations for all I_T nexuses identified
3163 * by the SERVICE ACTION RESERVATION KEY field, except the
3164 * I_T nexus that is being used for the PERSISTENT RESERVE
3165 * OUT command. If an all registrants persistent reservation
3166 * is present and the SERVICE ACTION RESERVATION KEY field
3167 * is set to zero, then all registrations shall be removed
3168 * except for that of the I_T nexus that is being used for
3169 * the PERSISTENT RESERVE OUT command;
3170 */
3171 spin_lock(&pr_tmpl->registration_lock);
3172 list_for_each_entry_safe(pr_reg, pr_reg_tmp,
3173 &pr_tmpl->registration_list, pr_reg_list) {
3174
3175 calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
3176 if (calling_it_nexus)
3177 continue;
3178
3179 if (pr_reg->pr_res_key != sa_res_key)
3180 continue;
3181
3182 pr_reg_nacl = pr_reg->pr_reg_nacl;
3183 pr_res_mapped_lun = pr_reg->pr_res_mapped_lun;
3184 __core_scsi3_free_registration(dev, pr_reg,
3185 (abort) ? &preempt_and_abort_list : NULL,
3186 calling_it_nexus);
3187 /*
3188 * e) Establish a unit attention condition for the initiator
3189 * port associated with every I_T nexus that lost its
3190 * persistent reservation and/or registration, with the
3191 * additional sense code set to REGISTRATIONS PREEMPTED;
3192 */
3193 core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun, 0x2A,
3194 ASCQ_2AH_RESERVATIONS_PREEMPTED);
3195 }
3196 spin_unlock(&pr_tmpl->registration_lock);
3197 /*
3198 * c) Establish a persistent reservation for the preempting
3199 * I_T nexus using the contents of the SCOPE and TYPE fields;
3200 */
3201 __core_scsi3_complete_pro_preempt(dev, pr_reg_n,
3202 (abort) ? &preempt_and_abort_list : NULL,
3203 type, scope, abort);
3204 /*
3205 * d) Process tasks as defined in 5.7.1;
3206 * e) See above..
3207 * f) If the type or scope has changed, then for every I_T nexus
3208 * whose reservation key was not removed, except for the I_T
3209 * nexus on which the PERSISTENT RESERVE OUT command was
3210 * received, the device server shall establish a unit
3211 * attention condition for the initiator port associated with
3212 * that I_T nexus, with the additional sense code set to
3213 * RESERVATIONS RELEASED. If the type or scope have not
3214 * changed, then no unit attention condition(s) shall be
3215 * established for this reason.
3216 */
3217 if ((prh_type != type) || (prh_scope != scope)) {
3218 spin_lock(&pr_tmpl->registration_lock);
3219 list_for_each_entry_safe(pr_reg, pr_reg_tmp,
3220 &pr_tmpl->registration_list, pr_reg_list) {
3221
3222 calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
3223 if (calling_it_nexus)
3224 continue;
3225
3226 core_scsi3_ua_allocate(pr_reg->pr_reg_nacl,
3227 pr_reg->pr_res_mapped_lun, 0x2A,
3228 ASCQ_2AH_RESERVATIONS_RELEASED);
3229 }
3230 spin_unlock(&pr_tmpl->registration_lock);
3231 }
3232 spin_unlock(&dev->dev_reservation_lock);
3233 /*
3234 * Call LUN_RESET logic upon list of struct t10_pr_registration,
3235 * All received CDBs for the matching existing reservation and
3236 * registrations undergo ABORT_TASK logic.
3237 *
3238 * From there, core_scsi3_release_preempt_and_abort() will
3239 * release every registration in the list (which have already
3240 * been removed from the primary pr_reg list), except the
3241 * new persistent reservation holder, the calling Initiator Port.
3242 */
3243 if (abort) {
3244 core_tmr_lun_reset(dev, NULL, &preempt_and_abort_list, cmd);
3245 core_scsi3_release_preempt_and_abort(&preempt_and_abort_list,
3246 pr_reg_n);
3247 }
3248
3249 if (pr_tmpl->pr_aptpl_active) {
3250 ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
3251 &pr_reg_n->pr_aptpl_buf[0],
3252 pr_tmpl->pr_aptpl_buf_len);
3253 if (!(ret))
3254 printk("SPC-3 PR: Updated APTPL metadata for PREEMPT"
3255 "%s\n", (abort) ? "_AND_ABORT" : "");
3256 }
3257
3258 core_scsi3_put_pr_reg(pr_reg_n);
3259 core_scsi3_pr_generation(SE_DEV(cmd));
3260 return 0;
3261}
3262
3263static int core_scsi3_emulate_pro_preempt(
3264 struct se_cmd *cmd,
3265 int type,
3266 int scope,
3267 u64 res_key,
3268 u64 sa_res_key,
3269 int abort)
3270{
3271 int ret = 0;
3272
3273 switch (type) {
3274 case PR_TYPE_WRITE_EXCLUSIVE:
3275 case PR_TYPE_EXCLUSIVE_ACCESS:
3276 case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
3277 case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
3278 case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
3279 case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
3280 ret = core_scsi3_pro_preempt(cmd, type, scope,
3281 res_key, sa_res_key, abort);
3282 break;
3283 default:
3284 printk(KERN_ERR "SPC-3 PR: Unknown Service Action PREEMPT%s"
3285 " Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type);
3286 return PYX_TRANSPORT_INVALID_CDB_FIELD;
3287 }
3288
3289 return ret;
3290}
3291
3292
3293static int core_scsi3_emulate_pro_register_and_move(
3294 struct se_cmd *cmd,
3295 u64 res_key,
3296 u64 sa_res_key,
3297 int aptpl,
3298 int unreg)
3299{
3300 struct se_session *se_sess = SE_SESS(cmd);
3301 struct se_device *dev = SE_DEV(cmd);
3302 struct se_dev_entry *se_deve, *dest_se_deve = NULL;
3303 struct se_lun *se_lun = SE_LUN(cmd);
3304 struct se_node_acl *pr_res_nacl, *pr_reg_nacl, *dest_node_acl = NULL;
3305 struct se_port *se_port;
3306 struct se_portal_group *se_tpg, *dest_se_tpg = NULL;
3307 struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops;
3308 struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg;
3309 struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
3310 unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
3311 unsigned char *initiator_str;
3312 char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
3313 u32 tid_len, tmp_tid_len;
3314 int new_reg = 0, type, scope, ret, matching_iname, prf_isid;
3315 unsigned short rtpi;
3316 unsigned char proto_ident;
3317
3318 if (!(se_sess) || !(se_lun)) {
3319 printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n");
3320 return PYX_TRANSPORT_LU_COMM_FAILURE;
3321 }
3322 memset(dest_iport, 0, 64);
3323 memset(i_buf, 0, PR_REG_ISID_ID_LEN);
3324 se_tpg = se_sess->se_tpg;
3325 tf_ops = TPG_TFO(se_tpg);
3326 se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
3327 /*
3328 * Follow logic from spc4r17 Section 5.7.8, Table 50 --
3329 * Register behaviors for a REGISTER AND MOVE service action
3330 *
3331 * Locate the existing *pr_reg via struct se_node_acl pointers
3332 */
3333 pr_reg = core_scsi3_locate_pr_reg(SE_DEV(cmd), se_sess->se_node_acl,
3334 se_sess);
3335 if (!(pr_reg)) {
3336 printk(KERN_ERR "SPC-3 PR: Unable to locate PR_REGISTERED"
3337 " *pr_reg for REGISTER_AND_MOVE\n");
3338 return PYX_TRANSPORT_LU_COMM_FAILURE;
3339 }
3340 /*
3341 * The provided reservation key much match the existing reservation key
3342 * provided during this initiator's I_T nexus registration.
3343 */
3344 if (res_key != pr_reg->pr_res_key) {
3345 printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Received"
3346 " res_key: 0x%016Lx does not match existing SA REGISTER"
3347 " res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key);
3348 core_scsi3_put_pr_reg(pr_reg);
3349 return PYX_TRANSPORT_RESERVATION_CONFLICT;
3350 }
3351 /*
3352 * The service active reservation key needs to be non zero
3353 */
3354 if (!(sa_res_key)) {
3355 printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Received zero"
3356 " sa_res_key\n");
3357 core_scsi3_put_pr_reg(pr_reg);
3358 return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
3359 }
3360 /*
3361 * Determine the Relative Target Port Identifier where the reservation
3362 * will be moved to for the TransportID containing SCSI initiator WWN
3363 * information.
3364 */
3365 rtpi = (buf[18] & 0xff) << 8;
3366 rtpi |= buf[19] & 0xff;
3367 tid_len = (buf[20] & 0xff) << 24;
3368 tid_len |= (buf[21] & 0xff) << 16;
3369 tid_len |= (buf[22] & 0xff) << 8;
3370 tid_len |= buf[23] & 0xff;
3371
3372 if ((tid_len + 24) != cmd->data_length) {
3373 printk(KERN_ERR "SPC-3 PR: Illegal tid_len: %u + 24 byte header"
3374 " does not equal CDB data_length: %u\n", tid_len,
3375 cmd->data_length);
3376 core_scsi3_put_pr_reg(pr_reg);
3377 return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
3378 }
3379
3380 spin_lock(&dev->se_port_lock);
3381 list_for_each_entry(se_port, &dev->dev_sep_list, sep_list) {
3382 if (se_port->sep_rtpi != rtpi)
3383 continue;
3384 dest_se_tpg = se_port->sep_tpg;
3385 if (!(dest_se_tpg))
3386 continue;
3387 dest_tf_ops = TPG_TFO(dest_se_tpg);
3388 if (!(dest_tf_ops))
3389 continue;
3390
3391 atomic_inc(&dest_se_tpg->tpg_pr_ref_count);
3392 smp_mb__after_atomic_inc();
3393 spin_unlock(&dev->se_port_lock);
3394
3395 ret = core_scsi3_tpg_depend_item(dest_se_tpg);
3396 if (ret != 0) {
3397 printk(KERN_ERR "core_scsi3_tpg_depend_item() failed"
3398 " for dest_se_tpg\n");
3399 atomic_dec(&dest_se_tpg->tpg_pr_ref_count);
3400 smp_mb__after_atomic_dec();
3401 core_scsi3_put_pr_reg(pr_reg);
3402 return PYX_TRANSPORT_LU_COMM_FAILURE;
3403 }
3404
3405 spin_lock(&dev->se_port_lock);
3406 break;
3407 }
3408 spin_unlock(&dev->se_port_lock);
3409
3410 if (!(dest_se_tpg) || (!dest_tf_ops)) {
3411 printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
3412 " fabric ops from Relative Target Port Identifier:"
3413 " %hu\n", rtpi);
3414 core_scsi3_put_pr_reg(pr_reg);
3415 return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
3416 }
3417 proto_ident = (buf[24] & 0x0f);
3418#if 0
3419 printk("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:"
3420 " 0x%02x\n", proto_ident);
3421#endif
3422 if (proto_ident != dest_tf_ops->get_fabric_proto_ident(dest_se_tpg)) {
3423 printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Received"
3424 " proto_ident: 0x%02x does not match ident: 0x%02x"
3425 " from fabric: %s\n", proto_ident,
3426 dest_tf_ops->get_fabric_proto_ident(dest_se_tpg),
3427 dest_tf_ops->get_fabric_name());
3428 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
3429 goto out;
3430 }
3431 if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) {
3432 printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Fabric does not"
3433 " containg a valid tpg_parse_pr_out_transport_id"
3434 " function pointer\n");
3435 ret = PYX_TRANSPORT_LU_COMM_FAILURE;
3436 goto out;
3437 }
3438 initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg,
3439 (const char *)&buf[24], &tmp_tid_len, &iport_ptr);
3440 if (!(initiator_str)) {
3441 printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
3442 " initiator_str from Transport ID\n");
3443 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
3444 goto out;
3445 }
3446
3447 printk(KERN_INFO "SPC-3 PR [%s] Extracted initiator %s identifier: %s"
3448 " %s\n", dest_tf_ops->get_fabric_name(), (iport_ptr != NULL) ?
3449 "port" : "device", initiator_str, (iport_ptr != NULL) ?
3450 iport_ptr : "");
3451 /*
3452 * If a PERSISTENT RESERVE OUT command with a REGISTER AND MOVE service
3453 * action specifies a TransportID that is the same as the initiator port
3454 * of the I_T nexus for the command received, then the command shall
3455 * be terminated with CHECK CONDITION status, with the sense key set to
3456 * ILLEGAL REQUEST, and the additional sense code set to INVALID FIELD
3457 * IN PARAMETER LIST.
3458 */
3459 pr_reg_nacl = pr_reg->pr_reg_nacl;
3460 matching_iname = (!strcmp(initiator_str,
3461 pr_reg_nacl->initiatorname)) ? 1 : 0;
3462 if (!(matching_iname))
3463 goto after_iport_check;
3464
3465 if (!(iport_ptr) || !(pr_reg->isid_present_at_reg)) {
3466 printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: TransportID: %s"
3467 " matches: %s on received I_T Nexus\n", initiator_str,
3468 pr_reg_nacl->initiatorname);
3469 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
3470 goto out;
3471 }
3472 if (!(strcmp(iport_ptr, pr_reg->pr_reg_isid))) {
3473 printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: TransportID: %s %s"
3474 " matches: %s %s on received I_T Nexus\n",
3475 initiator_str, iport_ptr, pr_reg_nacl->initiatorname,
3476 pr_reg->pr_reg_isid);
3477 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
3478 goto out;
3479 }
3480after_iport_check:
3481 /*
3482 * Locate the destination struct se_node_acl from the received Transport ID
3483 */
3484 spin_lock_bh(&dest_se_tpg->acl_node_lock);
3485 dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg,
3486 initiator_str);
3487 if (dest_node_acl) {
3488 atomic_inc(&dest_node_acl->acl_pr_ref_count);
3489 smp_mb__after_atomic_inc();
3490 }
3491 spin_unlock_bh(&dest_se_tpg->acl_node_lock);
3492
3493 if (!(dest_node_acl)) {
3494 printk(KERN_ERR "Unable to locate %s dest_node_acl for"
3495 " TransportID%s\n", dest_tf_ops->get_fabric_name(),
3496 initiator_str);
3497 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
3498 goto out;
3499 }
3500 ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
3501 if (ret != 0) {
3502 printk(KERN_ERR "core_scsi3_nodeacl_depend_item() for"
3503 " dest_node_acl\n");
3504 atomic_dec(&dest_node_acl->acl_pr_ref_count);
3505 smp_mb__after_atomic_dec();
3506 dest_node_acl = NULL;
3507 ret = PYX_TRANSPORT_LU_COMM_FAILURE;
3508 goto out;
3509 }
3510#if 0
3511 printk(KERN_INFO "SPC-3 PR REGISTER_AND_MOVE: Found %s dest_node_acl:"
3512 " %s from TransportID\n", dest_tf_ops->get_fabric_name(),
3513 dest_node_acl->initiatorname);
3514#endif
3515 /*
3516 * Locate the struct se_dev_entry pointer for the matching RELATIVE TARGET
3517 * PORT IDENTIFIER.
3518 */
3519 dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl, rtpi);
3520 if (!(dest_se_deve)) {
3521 printk(KERN_ERR "Unable to locate %s dest_se_deve from RTPI:"
3522 " %hu\n", dest_tf_ops->get_fabric_name(), rtpi);
3523 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
3524 goto out;
3525 }
3526
3527 ret = core_scsi3_lunacl_depend_item(dest_se_deve);
3528 if (ret < 0) {
3529 printk(KERN_ERR "core_scsi3_lunacl_depend_item() failed\n");
3530 atomic_dec(&dest_se_deve->pr_ref_count);
3531 smp_mb__after_atomic_dec();
3532 dest_se_deve = NULL;
3533 ret = PYX_TRANSPORT_LU_COMM_FAILURE;
3534 goto out;
3535 }
3536#if 0
3537 printk(KERN_INFO "SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN"
3538 " ACL for dest_se_deve->mapped_lun: %u\n",
3539 dest_tf_ops->get_fabric_name(), dest_node_acl->initiatorname,
3540 dest_se_deve->mapped_lun);
3541#endif
3542 /*
3543 * A persistent reservation needs to already existing in order to
3544 * successfully complete the REGISTER_AND_MOVE service action..
3545 */
3546 spin_lock(&dev->dev_reservation_lock);
3547 pr_res_holder = dev->dev_pr_res_holder;
3548 if (!(pr_res_holder)) {
3549 printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: No reservation"
3550 " currently held\n");
3551 spin_unlock(&dev->dev_reservation_lock);
3552 ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
3553 goto out;
3554 }
3555 /*
3556 * The received on I_T Nexus must be the reservation holder.
3557 *
3558 * From spc4r17 section 5.7.8 Table 50 --
3559 * Register behaviors for a REGISTER AND MOVE service action
3560 */
3561 if (pr_res_holder != pr_reg) {
3562 printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Calling I_T"
3563 " Nexus is not reservation holder\n");
3564 spin_unlock(&dev->dev_reservation_lock);
3565 ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
3566 goto out;
3567 }
3568 /*
3569 * From spc4r17 section 5.7.8: registering and moving reservation
3570 *
3571 * If a PERSISTENT RESERVE OUT command with a REGISTER AND MOVE service
3572 * action is received and the established persistent reservation is a
3573 * Write Exclusive - All Registrants type or Exclusive Access -
3574 * All Registrants type reservation, then the command shall be completed
3575 * with RESERVATION CONFLICT status.
3576 */
3577 if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
3578 (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
3579 printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Unable to move"
3580 " reservation for type: %s\n",
3581 core_scsi3_pr_dump_type(pr_res_holder->pr_res_type));
3582 spin_unlock(&dev->dev_reservation_lock);
3583 ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
3584 goto out;
3585 }
3586 pr_res_nacl = pr_res_holder->pr_reg_nacl;
3587 /*
3588 * b) Ignore the contents of the (received) SCOPE and TYPE fields;
3589 */
3590 type = pr_res_holder->pr_res_type;
3591 scope = pr_res_holder->pr_res_type;
3592 /*
3593 * c) Associate the reservation key specified in the SERVICE ACTION
3594 * RESERVATION KEY field with the I_T nexus specified as the
3595 * destination of the register and move, where:
3596 * A) The I_T nexus is specified by the TransportID and the
3597 * RELATIVE TARGET PORT IDENTIFIER field (see 6.14.4); and
3598 * B) Regardless of the TransportID format used, the association for
3599 * the initiator port is based on either the initiator port name
3600 * (see 3.1.71) on SCSI transport protocols where port names are
3601 * required or the initiator port identifier (see 3.1.70) on SCSI
3602 * transport protocols where port names are not required;
3603 * d) Register the reservation key specified in the SERVICE ACTION
3604 * RESERVATION KEY field;
3605 * e) Retain the reservation key specified in the SERVICE ACTION
3606 * RESERVATION KEY field and associated information;
3607 *
3608 * Also, It is not an error for a REGISTER AND MOVE service action to
3609 * register an I_T nexus that is already registered with the same
3610 * reservation key or a different reservation key.
3611 */
3612 dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
3613 iport_ptr);
3614 if (!(dest_pr_reg)) {
3615 ret = core_scsi3_alloc_registration(SE_DEV(cmd),
3616 dest_node_acl, dest_se_deve, iport_ptr,
3617 sa_res_key, 0, aptpl, 2, 1);
3618 if (ret != 0) {
3619 spin_unlock(&dev->dev_reservation_lock);
3620 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
3621 goto out;
3622 }
3623 dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
3624 iport_ptr);
3625 new_reg = 1;
3626 }
3627 /*
3628 * f) Release the persistent reservation for the persistent reservation
3629 * holder (i.e., the I_T nexus on which the
3630 */
3631 __core_scsi3_complete_pro_release(dev, pr_res_nacl,
3632 dev->dev_pr_res_holder, 0);
3633 /*
3634 * g) Move the persistent reservation to the specified I_T nexus using
3635 * the same scope and type as the persistent reservation released in
3636 * item f); and
3637 */
3638 dev->dev_pr_res_holder = dest_pr_reg;
3639 dest_pr_reg->pr_res_holder = 1;
3640 dest_pr_reg->pr_res_type = type;
3641 pr_reg->pr_res_scope = scope;
3642 prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
3643 PR_REG_ISID_ID_LEN);
3644 /*
3645 * Increment PRGeneration for existing registrations..
3646 */
3647 if (!(new_reg))
3648 dest_pr_reg->pr_res_generation = pr_tmpl->pr_generation++;
3649 spin_unlock(&dev->dev_reservation_lock);
3650
3651 printk(KERN_INFO "SPC-3 PR [%s] Service Action: REGISTER_AND_MOVE"
3652 " created new reservation holder TYPE: %s on object RTPI:"
3653 " %hu PRGeneration: 0x%08x\n", dest_tf_ops->get_fabric_name(),
3654 core_scsi3_pr_dump_type(type), rtpi,
3655 dest_pr_reg->pr_res_generation);
3656 printk(KERN_INFO "SPC-3 PR Successfully moved reservation from"
3657 " %s Fabric Node: %s%s -> %s Fabric Node: %s %s\n",
3658 tf_ops->get_fabric_name(), pr_reg_nacl->initiatorname,
3659 (prf_isid) ? &i_buf[0] : "", dest_tf_ops->get_fabric_name(),
3660 dest_node_acl->initiatorname, (iport_ptr != NULL) ?
3661 iport_ptr : "");
3662 /*
3663 * It is now safe to release configfs group dependencies for destination
3664 * of Transport ID Initiator Device/Port Identifier
3665 */
3666 core_scsi3_lunacl_undepend_item(dest_se_deve);
3667 core_scsi3_nodeacl_undepend_item(dest_node_acl);
3668 core_scsi3_tpg_undepend_item(dest_se_tpg);
3669 /*
3670 * h) If the UNREG bit is set to one, unregister (see 5.7.11.3) the I_T
3671 * nexus on which PERSISTENT RESERVE OUT command was received.
3672 */
3673 if (unreg) {
3674 spin_lock(&pr_tmpl->registration_lock);
3675 __core_scsi3_free_registration(dev, pr_reg, NULL, 1);
3676 spin_unlock(&pr_tmpl->registration_lock);
3677 } else
3678 core_scsi3_put_pr_reg(pr_reg);
3679
3680 /*
3681 * Clear the APTPL metadata if APTPL has been disabled, otherwise
3682 * write out the updated metadata to struct file for this SCSI device.
3683 */
3684 if (!(aptpl)) {
3685 pr_tmpl->pr_aptpl_active = 0;
3686 core_scsi3_update_and_write_aptpl(SE_DEV(cmd), NULL, 0);
3687 printk("SPC-3 PR: Set APTPL Bit Deactivated for"
3688 " REGISTER_AND_MOVE\n");
3689 } else {
3690 pr_tmpl->pr_aptpl_active = 1;
3691 ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
3692 &dest_pr_reg->pr_aptpl_buf[0],
3693 pr_tmpl->pr_aptpl_buf_len);
3694 if (!(ret))
3695 printk("SPC-3 PR: Set APTPL Bit Activated for"
3696 " REGISTER_AND_MOVE\n");
3697 }
3698
3699 core_scsi3_put_pr_reg(dest_pr_reg);
3700 return 0;
3701out:
3702 if (dest_se_deve)
3703 core_scsi3_lunacl_undepend_item(dest_se_deve);
3704 if (dest_node_acl)
3705 core_scsi3_nodeacl_undepend_item(dest_node_acl);
3706 core_scsi3_tpg_undepend_item(dest_se_tpg);
3707 core_scsi3_put_pr_reg(pr_reg);
3708 return ret;
3709}
3710
3711static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb)
3712{
3713 unsigned int __v1, __v2;
3714
3715 __v1 = (cdb[0] << 24) | (cdb[1] << 16) | (cdb[2] << 8) | cdb[3];
3716 __v2 = (cdb[4] << 24) | (cdb[5] << 16) | (cdb[6] << 8) | cdb[7];
3717
3718 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
3719}
3720
3721/*
3722 * See spc4r17 section 6.14 Table 170
3723 */
3724static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
3725{
3726 unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
3727 u64 res_key, sa_res_key;
3728 int sa, scope, type, aptpl;
3729 int spec_i_pt = 0, all_tg_pt = 0, unreg = 0;
3730 /*
3731 * FIXME: A NULL struct se_session pointer means an this is not coming from
3732 * a $FABRIC_MOD's nexus, but from internal passthrough ops.
3733 */
3734 if (!(SE_SESS(cmd)))
3735 return PYX_TRANSPORT_LU_COMM_FAILURE;
3736
3737 if (cmd->data_length < 24) {
3738 printk(KERN_WARNING "SPC-PR: Received PR OUT parameter list"
3739 " length too small: %u\n", cmd->data_length);
3740 return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
3741 }
3742 /*
3743 * From the PERSISTENT_RESERVE_OUT command descriptor block (CDB)
3744 */
3745 sa = (cdb[1] & 0x1f);
3746 scope = (cdb[2] & 0xf0);
3747 type = (cdb[2] & 0x0f);
3748 /*
3749 * From PERSISTENT_RESERVE_OUT parameter list (payload)
3750 */
3751 res_key = core_scsi3_extract_reservation_key(&buf[0]);
3752 sa_res_key = core_scsi3_extract_reservation_key(&buf[8]);
3753 /*
3754 * REGISTER_AND_MOVE uses a different SA parameter list containing
3755 * SCSI TransportIDs.
3756 */
3757 if (sa != PRO_REGISTER_AND_MOVE) {
3758 spec_i_pt = (buf[20] & 0x08);
3759 all_tg_pt = (buf[20] & 0x04);
3760 aptpl = (buf[20] & 0x01);
3761 } else {
3762 aptpl = (buf[17] & 0x01);
3763 unreg = (buf[17] & 0x02);
3764 }
3765 /*
3766 * SPEC_I_PT=1 is only valid for Service action: REGISTER
3767 */
3768 if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER))
3769 return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
3770 /*
3771 * From spc4r17 section 6.14:
3772 *
3773 * If the SPEC_I_PT bit is set to zero, the service action is not
3774 * REGISTER AND MOVE, and the parameter list length is not 24, then
3775 * the command shall be terminated with CHECK CONDITION status, with
3776 * the sense key set to ILLEGAL REQUEST, and the additional sense
3777 * code set to PARAMETER LIST LENGTH ERROR.
3778 */
3779 if (!(spec_i_pt) && ((cdb[1] & 0x1f) != PRO_REGISTER_AND_MOVE) &&
3780 (cmd->data_length != 24)) {
3781 printk(KERN_WARNING "SPC-PR: Received PR OUT illegal parameter"
3782 " list length: %u\n", cmd->data_length);
3783 return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
3784 }
3785 /*
3786 * (core_scsi3_emulate_pro_* function parameters
3787 * are defined by spc4r17 Table 174:
3788 * PERSISTENT_RESERVE_OUT service actions and valid parameters.
3789 */
3790 switch (sa) {
3791 case PRO_REGISTER:
3792 return core_scsi3_emulate_pro_register(cmd,
3793 res_key, sa_res_key, aptpl, all_tg_pt, spec_i_pt, 0);
3794 case PRO_RESERVE:
3795 return core_scsi3_emulate_pro_reserve(cmd,
3796 type, scope, res_key);
3797 case PRO_RELEASE:
3798 return core_scsi3_emulate_pro_release(cmd,
3799 type, scope, res_key);
3800 case PRO_CLEAR:
3801 return core_scsi3_emulate_pro_clear(cmd, res_key);
3802 case PRO_PREEMPT:
3803 return core_scsi3_emulate_pro_preempt(cmd, type, scope,
3804 res_key, sa_res_key, 0);
3805 case PRO_PREEMPT_AND_ABORT:
3806 return core_scsi3_emulate_pro_preempt(cmd, type, scope,
3807 res_key, sa_res_key, 1);
3808 case PRO_REGISTER_AND_IGNORE_EXISTING_KEY:
3809 return core_scsi3_emulate_pro_register(cmd,
3810 0, sa_res_key, aptpl, all_tg_pt, spec_i_pt, 1);
3811 case PRO_REGISTER_AND_MOVE:
3812 return core_scsi3_emulate_pro_register_and_move(cmd, res_key,
3813 sa_res_key, aptpl, unreg);
3814 default:
3815 printk(KERN_ERR "Unknown PERSISTENT_RESERVE_OUT service"
3816 " action: 0x%02x\n", cdb[1] & 0x1f);
3817 return PYX_TRANSPORT_INVALID_CDB_FIELD;
3818 }
3819
3820 return PYX_TRANSPORT_INVALID_CDB_FIELD;
3821}
3822
3823/*
3824 * PERSISTENT_RESERVE_IN Service Action READ_KEYS
3825 *
3826 * See spc4r17 section 5.7.6.2 and section 6.13.2, Table 160
3827 */
3828static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
3829{
3830 struct se_device *se_dev = SE_DEV(cmd);
3831 struct se_subsystem_dev *su_dev = SU_DEV(se_dev);
3832 struct t10_pr_registration *pr_reg;
3833 unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
3834 u32 add_len = 0, off = 8;
3835
3836 if (cmd->data_length < 8) {
3837 printk(KERN_ERR "PRIN SA READ_KEYS SCSI Data Length: %u"
3838 " too small\n", cmd->data_length);
3839 return PYX_TRANSPORT_INVALID_CDB_FIELD;
3840 }
3841
3842 buf[0] = ((T10_RES(su_dev)->pr_generation >> 24) & 0xff);
3843 buf[1] = ((T10_RES(su_dev)->pr_generation >> 16) & 0xff);
3844 buf[2] = ((T10_RES(su_dev)->pr_generation >> 8) & 0xff);
3845 buf[3] = (T10_RES(su_dev)->pr_generation & 0xff);
3846
3847 spin_lock(&T10_RES(su_dev)->registration_lock);
3848 list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list,
3849 pr_reg_list) {
3850 /*
3851 * Check for overflow of 8byte PRI READ_KEYS payload and
3852 * next reservation key list descriptor.
3853 */
3854 if ((add_len + 8) > (cmd->data_length - 8))
3855 break;
3856
3857 buf[off++] = ((pr_reg->pr_res_key >> 56) & 0xff);
3858 buf[off++] = ((pr_reg->pr_res_key >> 48) & 0xff);
3859 buf[off++] = ((pr_reg->pr_res_key >> 40) & 0xff);
3860 buf[off++] = ((pr_reg->pr_res_key >> 32) & 0xff);
3861 buf[off++] = ((pr_reg->pr_res_key >> 24) & 0xff);
3862 buf[off++] = ((pr_reg->pr_res_key >> 16) & 0xff);
3863 buf[off++] = ((pr_reg->pr_res_key >> 8) & 0xff);
3864 buf[off++] = (pr_reg->pr_res_key & 0xff);
3865
3866 add_len += 8;
3867 }
3868 spin_unlock(&T10_RES(su_dev)->registration_lock);
3869
3870 buf[4] = ((add_len >> 24) & 0xff);
3871 buf[5] = ((add_len >> 16) & 0xff);
3872 buf[6] = ((add_len >> 8) & 0xff);
3873 buf[7] = (add_len & 0xff);
3874
3875 return 0;
3876}
3877
3878/*
3879 * PERSISTENT_RESERVE_IN Service Action READ_RESERVATION
3880 *
3881 * See spc4r17 section 5.7.6.3 and section 6.13.3.2 Table 161 and 162
3882 */
3883static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
3884{
3885 struct se_device *se_dev = SE_DEV(cmd);
3886 struct se_subsystem_dev *su_dev = SU_DEV(se_dev);
3887 struct t10_pr_registration *pr_reg;
3888 unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
3889 u64 pr_res_key;
3890 u32 add_len = 16; /* Hardcoded to 16 when a reservation is held. */
3891
3892 if (cmd->data_length < 8) {
3893 printk(KERN_ERR "PRIN SA READ_RESERVATIONS SCSI Data Length: %u"
3894 " too small\n", cmd->data_length);
3895 return PYX_TRANSPORT_INVALID_CDB_FIELD;
3896 }
3897
3898 buf[0] = ((T10_RES(su_dev)->pr_generation >> 24) & 0xff);
3899 buf[1] = ((T10_RES(su_dev)->pr_generation >> 16) & 0xff);
3900 buf[2] = ((T10_RES(su_dev)->pr_generation >> 8) & 0xff);
3901 buf[3] = (T10_RES(su_dev)->pr_generation & 0xff);
3902
3903 spin_lock(&se_dev->dev_reservation_lock);
3904 pr_reg = se_dev->dev_pr_res_holder;
3905 if ((pr_reg)) {
3906 /*
3907 * Set the hardcoded Additional Length
3908 */
3909 buf[4] = ((add_len >> 24) & 0xff);
3910 buf[5] = ((add_len >> 16) & 0xff);
3911 buf[6] = ((add_len >> 8) & 0xff);
3912 buf[7] = (add_len & 0xff);
3913
3914 if (cmd->data_length < 22) {
3915 spin_unlock(&se_dev->dev_reservation_lock);
3916 return 0;
3917 }
3918 /*
3919 * Set the Reservation key.
3920 *
3921 * From spc4r17, section 5.7.10:
3922 * A persistent reservation holder has its reservation key
3923 * returned in the parameter data from a PERSISTENT
3924 * RESERVE IN command with READ RESERVATION service action as
3925 * follows:
3926 * a) For a persistent reservation of the type Write Exclusive
3927 * - All Registrants or Exclusive Access ­ All Regitrants,
3928 * the reservation key shall be set to zero; or
3929 * b) For all other persistent reservation types, the
3930 * reservation key shall be set to the registered
3931 * reservation key for the I_T nexus that holds the
3932 * persistent reservation.
3933 */
3934 if ((pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
3935 (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))
3936 pr_res_key = 0;
3937 else
3938 pr_res_key = pr_reg->pr_res_key;
3939
3940 buf[8] = ((pr_res_key >> 56) & 0xff);
3941 buf[9] = ((pr_res_key >> 48) & 0xff);
3942 buf[10] = ((pr_res_key >> 40) & 0xff);
3943 buf[11] = ((pr_res_key >> 32) & 0xff);
3944 buf[12] = ((pr_res_key >> 24) & 0xff);
3945 buf[13] = ((pr_res_key >> 16) & 0xff);
3946 buf[14] = ((pr_res_key >> 8) & 0xff);
3947 buf[15] = (pr_res_key & 0xff);
3948 /*
3949 * Set the SCOPE and TYPE
3950 */
3951 buf[21] = (pr_reg->pr_res_scope & 0xf0) |
3952 (pr_reg->pr_res_type & 0x0f);
3953 }
3954 spin_unlock(&se_dev->dev_reservation_lock);
3955
3956 return 0;
3957}
3958
3959/*
3960 * PERSISTENT_RESERVE_IN Service Action REPORT_CAPABILITIES
3961 *
3962 * See spc4r17 section 6.13.4 Table 165
3963 */
3964static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
3965{
3966 struct se_device *dev = SE_DEV(cmd);
3967 struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
3968 unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
3969 u16 add_len = 8; /* Hardcoded to 8. */
3970
3971 if (cmd->data_length < 6) {
3972 printk(KERN_ERR "PRIN SA REPORT_CAPABILITIES SCSI Data Length:"
3973 " %u too small\n", cmd->data_length);
3974 return PYX_TRANSPORT_INVALID_CDB_FIELD;
3975 }
3976
3977 buf[0] = ((add_len << 8) & 0xff);
3978 buf[1] = (add_len & 0xff);
3979 buf[2] |= 0x10; /* CRH: Compatible Reservation Hanlding bit. */
3980 buf[2] |= 0x08; /* SIP_C: Specify Initiator Ports Capable bit */
3981 buf[2] |= 0x04; /* ATP_C: All Target Ports Capable bit */
3982 buf[2] |= 0x01; /* PTPL_C: Persistence across Target Power Loss bit */
3983 /*
3984 * We are filling in the PERSISTENT RESERVATION TYPE MASK below, so
3985 * set the TMV: Task Mask Valid bit.
3986 */
3987 buf[3] |= 0x80;
3988 /*
3989 * Change ALLOW COMMANDs to 0x20 or 0x40 later from Table 166
3990 */
3991 buf[3] |= 0x10; /* ALLOW COMMANDs field 001b */
3992 /*
3993 * PTPL_A: Persistence across Target Power Loss Active bit
3994 */
3995 if (pr_tmpl->pr_aptpl_active)
3996 buf[3] |= 0x01;
3997 /*
3998 * Setup the PERSISTENT RESERVATION TYPE MASK from Table 167
3999 */
4000 buf[4] |= 0x80; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
4001 buf[4] |= 0x40; /* PR_TYPE_EXCLUSIVE_ACCESS_REGONLY */
4002 buf[4] |= 0x20; /* PR_TYPE_WRITE_EXCLUSIVE_REGONLY */
4003 buf[4] |= 0x08; /* PR_TYPE_EXCLUSIVE_ACCESS */
4004 buf[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */
4005 buf[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
4006
4007 return 0;
4008}
4009
4010/*
4011 * PERSISTENT_RESERVE_IN Service Action READ_FULL_STATUS
4012 *
4013 * See spc4r17 section 6.13.5 Table 168 and 169
4014 */
4015static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
4016{
4017 struct se_device *se_dev = SE_DEV(cmd);
4018 struct se_node_acl *se_nacl;
4019 struct se_subsystem_dev *su_dev = SU_DEV(se_dev);
4020 struct se_portal_group *se_tpg;
4021 struct t10_pr_registration *pr_reg, *pr_reg_tmp;
4022 struct t10_reservation_template *pr_tmpl = &SU_DEV(se_dev)->t10_reservation;
4023 unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
4024 u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len;
4025 u32 off = 8; /* off into first Full Status descriptor */
4026 int format_code = 0;
4027
4028 if (cmd->data_length < 8) {
4029 printk(KERN_ERR "PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
4030 " too small\n", cmd->data_length);
4031 return PYX_TRANSPORT_INVALID_CDB_FIELD;
4032 }
4033
4034 buf[0] = ((T10_RES(su_dev)->pr_generation >> 24) & 0xff);
4035 buf[1] = ((T10_RES(su_dev)->pr_generation >> 16) & 0xff);
4036 buf[2] = ((T10_RES(su_dev)->pr_generation >> 8) & 0xff);
4037 buf[3] = (T10_RES(su_dev)->pr_generation & 0xff);
4038
4039 spin_lock(&pr_tmpl->registration_lock);
4040 list_for_each_entry_safe(pr_reg, pr_reg_tmp,
4041 &pr_tmpl->registration_list, pr_reg_list) {
4042
4043 se_nacl = pr_reg->pr_reg_nacl;
4044 se_tpg = pr_reg->pr_reg_nacl->se_tpg;
4045 add_desc_len = 0;
4046
4047 atomic_inc(&pr_reg->pr_res_holders);
4048 smp_mb__after_atomic_inc();
4049 spin_unlock(&pr_tmpl->registration_lock);
4050 /*
4051 * Determine expected length of $FABRIC_MOD specific
4052 * TransportID full status descriptor..
4053 */
4054 exp_desc_len = TPG_TFO(se_tpg)->tpg_get_pr_transport_id_len(
4055 se_tpg, se_nacl, pr_reg, &format_code);
4056
4057 if ((exp_desc_len + add_len) > cmd->data_length) {
4058 printk(KERN_WARNING "SPC-3 PRIN READ_FULL_STATUS ran"
4059 " out of buffer: %d\n", cmd->data_length);
4060 spin_lock(&pr_tmpl->registration_lock);
4061 atomic_dec(&pr_reg->pr_res_holders);
4062 smp_mb__after_atomic_dec();
4063 break;
4064 }
4065 /*
4066 * Set RESERVATION KEY
4067 */
4068 buf[off++] = ((pr_reg->pr_res_key >> 56) & 0xff);
4069 buf[off++] = ((pr_reg->pr_res_key >> 48) & 0xff);
4070 buf[off++] = ((pr_reg->pr_res_key >> 40) & 0xff);
4071 buf[off++] = ((pr_reg->pr_res_key >> 32) & 0xff);
4072 buf[off++] = ((pr_reg->pr_res_key >> 24) & 0xff);
4073 buf[off++] = ((pr_reg->pr_res_key >> 16) & 0xff);
4074 buf[off++] = ((pr_reg->pr_res_key >> 8) & 0xff);
4075 buf[off++] = (pr_reg->pr_res_key & 0xff);
4076 off += 4; /* Skip Over Reserved area */
4077
4078 /*
4079 * Set ALL_TG_PT bit if PROUT SA REGISTER had this set.
4080 */
4081 if (pr_reg->pr_reg_all_tg_pt)
4082 buf[off] = 0x02;
4083 /*
4084 * The struct se_lun pointer will be present for the
4085 * reservation holder for PR_HOLDER bit.
4086 *
4087 * Also, if this registration is the reservation
4088 * holder, fill in SCOPE and TYPE in the next byte.
4089 */
4090 if (pr_reg->pr_res_holder) {
4091 buf[off++] |= 0x01;
4092 buf[off++] = (pr_reg->pr_res_scope & 0xf0) |
4093 (pr_reg->pr_res_type & 0x0f);
4094 } else
4095 off += 2;
4096
4097 off += 4; /* Skip over reserved area */
4098 /*
4099 * From spc4r17 6.3.15:
4100 *
4101 * If the ALL_TG_PT bit set to zero, the RELATIVE TARGET PORT
4102 * IDENTIFIER field contains the relative port identifier (see
4103 * 3.1.120) of the target port that is part of the I_T nexus
4104 * described by this full status descriptor. If the ALL_TG_PT
4105 * bit is set to one, the contents of the RELATIVE TARGET PORT
4106 * IDENTIFIER field are not defined by this standard.
4107 */
4108 if (!(pr_reg->pr_reg_all_tg_pt)) {
4109 struct se_port *port = pr_reg->pr_reg_tg_pt_lun->lun_sep;
4110
4111 buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
4112 buf[off++] = (port->sep_rtpi & 0xff);
4113 } else
4114 off += 2; /* Skip over RELATIVE TARGET PORT IDENTIFER */
4115
4116 /*
4117 * Now, have the $FABRIC_MOD fill in the protocol identifier
4118 */
4119 desc_len = TPG_TFO(se_tpg)->tpg_get_pr_transport_id(se_tpg,
4120 se_nacl, pr_reg, &format_code, &buf[off+4]);
4121
4122 spin_lock(&pr_tmpl->registration_lock);
4123 atomic_dec(&pr_reg->pr_res_holders);
4124 smp_mb__after_atomic_dec();
4125 /*
4126 * Set the ADDITIONAL DESCRIPTOR LENGTH
4127 */
4128 buf[off++] = ((desc_len >> 24) & 0xff);
4129 buf[off++] = ((desc_len >> 16) & 0xff);
4130 buf[off++] = ((desc_len >> 8) & 0xff);
4131 buf[off++] = (desc_len & 0xff);
4132 /*
4133 * Size of full desctipor header minus TransportID
4134 * containing $FABRIC_MOD specific) initiator device/port
4135 * WWN information.
4136 *
4137 * See spc4r17 Section 6.13.5 Table 169
4138 */
4139 add_desc_len = (24 + desc_len);
4140
4141 off += desc_len;
4142 add_len += add_desc_len;
4143 }
4144 spin_unlock(&pr_tmpl->registration_lock);
4145 /*
4146 * Set ADDITIONAL_LENGTH
4147 */
4148 buf[4] = ((add_len >> 24) & 0xff);
4149 buf[5] = ((add_len >> 16) & 0xff);
4150 buf[6] = ((add_len >> 8) & 0xff);
4151 buf[7] = (add_len & 0xff);
4152
4153 return 0;
4154}
4155
4156static int core_scsi3_emulate_pr_in(struct se_cmd *cmd, unsigned char *cdb)
4157{
4158 switch (cdb[1] & 0x1f) {
4159 case PRI_READ_KEYS:
4160 return core_scsi3_pri_read_keys(cmd);
4161 case PRI_READ_RESERVATION:
4162 return core_scsi3_pri_read_reservation(cmd);
4163 case PRI_REPORT_CAPABILITIES:
4164 return core_scsi3_pri_report_capabilities(cmd);
4165 case PRI_READ_FULL_STATUS:
4166 return core_scsi3_pri_read_full_status(cmd);
4167 default:
4168 printk(KERN_ERR "Unknown PERSISTENT_RESERVE_IN service"
4169 " action: 0x%02x\n", cdb[1] & 0x1f);
4170 return PYX_TRANSPORT_INVALID_CDB_FIELD;
4171 }
4172
4173}
4174
4175int core_scsi3_emulate_pr(struct se_cmd *cmd)
4176{
4177 unsigned char *cdb = &T_TASK(cmd)->t_task_cdb[0];
4178 struct se_device *dev = cmd->se_dev;
4179 /*
4180 * Following spc2r20 5.5.1 Reservations overview:
4181 *
4182 * If a logical unit has been reserved by any RESERVE command and is
4183 * still reserved by any initiator, all PERSISTENT RESERVE IN and all
4184 * PERSISTENT RESERVE OUT commands shall conflict regardless of
4185 * initiator or service action and shall terminate with a RESERVATION
4186 * CONFLICT status.
4187 */
4188 if (dev->dev_flags & DF_SPC2_RESERVATIONS) {
4189 printk(KERN_ERR "Received PERSISTENT_RESERVE CDB while legacy"
4190 " SPC-2 reservation is held, returning"
4191 " RESERVATION_CONFLICT\n");
4192 return PYX_TRANSPORT_RESERVATION_CONFLICT;
4193 }
4194
4195 return (cdb[0] == PERSISTENT_RESERVE_OUT) ?
4196 core_scsi3_emulate_pr_out(cmd, cdb) :
4197 core_scsi3_emulate_pr_in(cmd, cdb);
4198}
4199
4200static int core_pt_reservation_check(struct se_cmd *cmd, u32 *pr_res_type)
4201{
4202 return 0;
4203}
4204
4205static int core_pt_seq_non_holder(
4206 struct se_cmd *cmd,
4207 unsigned char *cdb,
4208 u32 pr_reg_type)
4209{
4210 return 0;
4211}
4212
4213int core_setup_reservations(struct se_device *dev, int force_pt)
4214{
4215 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
4216 struct t10_reservation_template *rest = &su_dev->t10_reservation;
4217 /*
4218 * If this device is from Target_Core_Mod/pSCSI, use the reservations
4219 * of the Underlying SCSI hardware. In Linux/SCSI terms, this can
4220 * cause a problem because libata and some SATA RAID HBAs appear
4221 * under Linux/SCSI, but to emulate reservations themselves.
4222 */
4223 if (((TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) &&
4224 !(DEV_ATTRIB(dev)->emulate_reservations)) || force_pt) {
4225 rest->res_type = SPC_PASSTHROUGH;
4226 rest->pr_ops.t10_reservation_check = &core_pt_reservation_check;
4227 rest->pr_ops.t10_seq_non_holder = &core_pt_seq_non_holder;
4228 printk(KERN_INFO "%s: Using SPC_PASSTHROUGH, no reservation"
4229 " emulation\n", TRANSPORT(dev)->name);
4230 return 0;
4231 }
4232 /*
4233 * If SPC-3 or above is reported by real or emulated struct se_device,
4234 * use emulated Persistent Reservations.
4235 */
4236 if (TRANSPORT(dev)->get_device_rev(dev) >= SCSI_3) {
4237 rest->res_type = SPC3_PERSISTENT_RESERVATIONS;
4238 rest->pr_ops.t10_reservation_check = &core_scsi3_pr_reservation_check;
4239 rest->pr_ops.t10_seq_non_holder = &core_scsi3_pr_seq_non_holder;
4240 printk(KERN_INFO "%s: Using SPC3_PERSISTENT_RESERVATIONS"
4241 " emulation\n", TRANSPORT(dev)->name);
4242 } else {
4243 rest->res_type = SPC2_RESERVATIONS;
4244 rest->pr_ops.t10_reservation_check = &core_scsi2_reservation_check;
4245 rest->pr_ops.t10_seq_non_holder =
4246 &core_scsi2_reservation_seq_non_holder;
4247 printk(KERN_INFO "%s: Using SPC2_RESERVATIONS emulation\n",
4248 TRANSPORT(dev)->name);
4249 }
4250
4251 return 0;
4252}
diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h
new file mode 100644
index 000000000000..5603bcfd86d3
--- /dev/null
+++ b/drivers/target/target_core_pr.h
@@ -0,0 +1,67 @@
1#ifndef TARGET_CORE_PR_H
2#define TARGET_CORE_PR_H
3/*
4 * PERSISTENT_RESERVE_OUT service action codes
5 *
6 * spc4r17 section 6.14.2 Table 171
7 */
8#define PRO_REGISTER 0x00
9#define PRO_RESERVE 0x01
10#define PRO_RELEASE 0x02
11#define PRO_CLEAR 0x03
12#define PRO_PREEMPT 0x04
13#define PRO_PREEMPT_AND_ABORT 0x05
14#define PRO_REGISTER_AND_IGNORE_EXISTING_KEY 0x06
15#define PRO_REGISTER_AND_MOVE 0x07
16/*
17 * PERSISTENT_RESERVE_IN service action codes
18 *
19 * spc4r17 section 6.13.1 Table 159
20 */
21#define PRI_READ_KEYS 0x00
22#define PRI_READ_RESERVATION 0x01
23#define PRI_REPORT_CAPABILITIES 0x02
24#define PRI_READ_FULL_STATUS 0x03
25/*
26 * PERSISTENT_RESERVE_ SCOPE field
27 *
28 * spc4r17 section 6.13.3.3 Table 163
29 */
30#define PR_SCOPE_LU_SCOPE 0x00
31/*
32 * PERSISTENT_RESERVE_* TYPE field
33 *
34 * spc4r17 section 6.13.3.4 Table 164
35 */
36#define PR_TYPE_WRITE_EXCLUSIVE 0x01
37#define PR_TYPE_EXCLUSIVE_ACCESS 0x03
38#define PR_TYPE_WRITE_EXCLUSIVE_REGONLY 0x05
39#define PR_TYPE_EXCLUSIVE_ACCESS_REGONLY 0x06
40#define PR_TYPE_WRITE_EXCLUSIVE_ALLREG 0x07
41#define PR_TYPE_EXCLUSIVE_ACCESS_ALLREG 0x08
42
43#define PR_APTPL_MAX_IPORT_LEN 256
44#define PR_APTPL_MAX_TPORT_LEN 256
45
46extern struct kmem_cache *t10_pr_reg_cache;
47
48extern int core_pr_dump_initiator_port(struct t10_pr_registration *,
49 char *, u32);
50extern int core_scsi2_emulate_crh(struct se_cmd *);
51extern int core_scsi3_alloc_aptpl_registration(
52 struct t10_reservation_template *, u64,
53 unsigned char *, unsigned char *, u32,
54 unsigned char *, u16, u32, int, int, u8);
55extern int core_scsi3_check_aptpl_registration(struct se_device *,
56 struct se_portal_group *, struct se_lun *,
57 struct se_lun_acl *);
58extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *,
59 struct se_node_acl *);
60extern void core_scsi3_free_all_registrations(struct se_device *);
61extern unsigned char *core_scsi3_pr_dump_type(int);
62extern int core_scsi3_check_cdb_abort_and_preempt(struct list_head *,
63 struct se_cmd *);
64extern int core_scsi3_emulate_pr(struct se_cmd *);
65extern int core_setup_reservations(struct se_device *, int);
66
67#endif /* TARGET_CORE_PR_H */
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
new file mode 100644
index 000000000000..331d423fd0e0
--- /dev/null
+++ b/drivers/target/target_core_pscsi.c
@@ -0,0 +1,1473 @@
1/*******************************************************************************
2 * Filename: target_core_pscsi.c
3 *
4 * This file contains the generic target mode <-> Linux SCSI subsystem plugin.
5 *
6 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
7 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8 * Copyright (c) 2007-2010 Rising Tide Systems
9 * Copyright (c) 2008-2010 Linux-iSCSI.org
10 *
11 * Nicholas A. Bellinger <nab@kernel.org>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 *
27 ******************************************************************************/
28
29#include <linux/version.h>
30#include <linux/string.h>
31#include <linux/parser.h>
32#include <linux/timer.h>
33#include <linux/blkdev.h>
34#include <linux/blk_types.h>
35#include <linux/slab.h>
36#include <linux/spinlock.h>
37#include <linux/genhd.h>
38#include <linux/cdrom.h>
39#include <linux/file.h>
40#include <scsi/scsi.h>
41#include <scsi/scsi_device.h>
42#include <scsi/scsi_cmnd.h>
43#include <scsi/scsi_host.h>
44#include <scsi/scsi_tcq.h>
45
46#include <target/target_core_base.h>
47#include <target/target_core_device.h>
48#include <target/target_core_transport.h>
49
50#include "target_core_pscsi.h"
51
52#define ISPRINT(a) ((a >= ' ') && (a <= '~'))
53
54static struct se_subsystem_api pscsi_template;
55
56static void pscsi_req_done(struct request *, int);
57
58/* pscsi_get_sh():
59 *
60 *
61 */
62static struct Scsi_Host *pscsi_get_sh(u32 host_no)
63{
64 struct Scsi_Host *sh = NULL;
65
66 sh = scsi_host_lookup(host_no);
67 if (IS_ERR(sh)) {
68 printk(KERN_ERR "Unable to locate SCSI HBA with Host ID:"
69 " %u\n", host_no);
70 return NULL;
71 }
72
73 return sh;
74}
75
76/* pscsi_attach_hba():
77 *
78 * pscsi_get_sh() used scsi_host_lookup() to locate struct Scsi_Host.
79 * from the passed SCSI Host ID.
80 */
81static int pscsi_attach_hba(struct se_hba *hba, u32 host_id)
82{
83 int hba_depth;
84 struct pscsi_hba_virt *phv;
85
86 phv = kzalloc(sizeof(struct pscsi_hba_virt), GFP_KERNEL);
87 if (!(phv)) {
88 printk(KERN_ERR "Unable to allocate struct pscsi_hba_virt\n");
89 return -1;
90 }
91 phv->phv_host_id = host_id;
92 phv->phv_mode = PHV_VIRUTAL_HOST_ID;
93 hba_depth = PSCSI_VIRTUAL_HBA_DEPTH;
94 atomic_set(&hba->left_queue_depth, hba_depth);
95 atomic_set(&hba->max_queue_depth, hba_depth);
96
97 hba->hba_ptr = (void *)phv;
98
99 printk(KERN_INFO "CORE_HBA[%d] - TCM SCSI HBA Driver %s on"
100 " Generic Target Core Stack %s\n", hba->hba_id,
101 PSCSI_VERSION, TARGET_CORE_MOD_VERSION);
102 printk(KERN_INFO "CORE_HBA[%d] - Attached SCSI HBA to Generic"
103 " Target Core with TCQ Depth: %d\n", hba->hba_id,
104 atomic_read(&hba->max_queue_depth));
105
106 return 0;
107}
108
109static void pscsi_detach_hba(struct se_hba *hba)
110{
111 struct pscsi_hba_virt *phv = hba->hba_ptr;
112 struct Scsi_Host *scsi_host = phv->phv_lld_host;
113
114 if (scsi_host) {
115 scsi_host_put(scsi_host);
116
117 printk(KERN_INFO "CORE_HBA[%d] - Detached SCSI HBA: %s from"
118 " Generic Target Core\n", hba->hba_id,
119 (scsi_host->hostt->name) ? (scsi_host->hostt->name) :
120 "Unknown");
121 } else
122 printk(KERN_INFO "CORE_HBA[%d] - Detached Virtual SCSI HBA"
123 " from Generic Target Core\n", hba->hba_id);
124
125 kfree(phv);
126 hba->hba_ptr = NULL;
127}
128
129static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
130{
131 struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr;
132 struct Scsi_Host *sh = phv->phv_lld_host;
133 int hba_depth = PSCSI_VIRTUAL_HBA_DEPTH;
134 /*
135 * Release the struct Scsi_Host
136 */
137 if (!(mode_flag)) {
138 if (!(sh))
139 return 0;
140
141 phv->phv_lld_host = NULL;
142 phv->phv_mode = PHV_VIRUTAL_HOST_ID;
143 atomic_set(&hba->left_queue_depth, hba_depth);
144 atomic_set(&hba->max_queue_depth, hba_depth);
145
146 printk(KERN_INFO "CORE_HBA[%d] - Disabled pSCSI HBA Passthrough"
147 " %s\n", hba->hba_id, (sh->hostt->name) ?
148 (sh->hostt->name) : "Unknown");
149
150 scsi_host_put(sh);
151 return 0;
152 }
153 /*
154 * Otherwise, locate struct Scsi_Host from the original passed
155 * pSCSI Host ID and enable for phba mode
156 */
157 sh = pscsi_get_sh(phv->phv_host_id);
158 if (!(sh)) {
159 printk(KERN_ERR "pSCSI: Unable to locate SCSI Host for"
160 " phv_host_id: %d\n", phv->phv_host_id);
161 return -1;
162 }
163 /*
164 * Usually the SCSI LLD will use the hostt->can_queue value to define
165 * its HBA TCQ depth. Some other drivers (like 2.6 megaraid) don't set
166 * this at all and set sh->can_queue at runtime.
167 */
168 hba_depth = (sh->hostt->can_queue > sh->can_queue) ?
169 sh->hostt->can_queue : sh->can_queue;
170
171 atomic_set(&hba->left_queue_depth, hba_depth);
172 atomic_set(&hba->max_queue_depth, hba_depth);
173
174 phv->phv_lld_host = sh;
175 phv->phv_mode = PHV_LLD_SCSI_HOST_NO;
176
177 printk(KERN_INFO "CORE_HBA[%d] - Enabled pSCSI HBA Passthrough %s\n",
178 hba->hba_id, (sh->hostt->name) ? (sh->hostt->name) : "Unknown");
179
180 return 1;
181}
182
183static void pscsi_tape_read_blocksize(struct se_device *dev,
184 struct scsi_device *sdev)
185{
186 unsigned char cdb[MAX_COMMAND_SIZE], *buf;
187 int ret;
188
189 buf = kzalloc(12, GFP_KERNEL);
190 if (!buf)
191 return;
192
193 memset(cdb, 0, MAX_COMMAND_SIZE);
194 cdb[0] = MODE_SENSE;
195 cdb[4] = 0x0c; /* 12 bytes */
196
197 ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf, 12, NULL,
198 HZ, 1, NULL);
199 if (ret)
200 goto out_free;
201
202 /*
203 * If MODE_SENSE still returns zero, set the default value to 1024.
204 */
205 sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]);
206 if (!sdev->sector_size)
207 sdev->sector_size = 1024;
208out_free:
209 kfree(buf);
210}
211
212static void
213pscsi_set_inquiry_info(struct scsi_device *sdev, struct t10_wwn *wwn)
214{
215 unsigned char *buf;
216
217 if (sdev->inquiry_len < INQUIRY_LEN)
218 return;
219
220 buf = sdev->inquiry;
221 if (!buf)
222 return;
223 /*
224 * Use sdev->inquiry from drivers/scsi/scsi_scan.c:scsi_alloc_sdev()
225 */
226 memcpy(&wwn->vendor[0], &buf[8], sizeof(wwn->vendor));
227 memcpy(&wwn->model[0], &buf[16], sizeof(wwn->model));
228 memcpy(&wwn->revision[0], &buf[32], sizeof(wwn->revision));
229}
230
231static int
232pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn)
233{
234 unsigned char cdb[MAX_COMMAND_SIZE], *buf;
235 int ret;
236
237 buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL);
238 if (!buf)
239 return -1;
240
241 memset(cdb, 0, MAX_COMMAND_SIZE);
242 cdb[0] = INQUIRY;
243 cdb[1] = 0x01; /* Query VPD */
244 cdb[2] = 0x80; /* Unit Serial Number */
245 cdb[3] = (INQUIRY_VPD_SERIAL_LEN >> 8) & 0xff;
246 cdb[4] = (INQUIRY_VPD_SERIAL_LEN & 0xff);
247
248 ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf,
249 INQUIRY_VPD_SERIAL_LEN, NULL, HZ, 1, NULL);
250 if (ret)
251 goto out_free;
252
253 snprintf(&wwn->unit_serial[0], INQUIRY_VPD_SERIAL_LEN, "%s", &buf[4]);
254
255 wwn->t10_sub_dev->su_dev_flags |= SDF_FIRMWARE_VPD_UNIT_SERIAL;
256
257 kfree(buf);
258 return 0;
259
260out_free:
261 kfree(buf);
262 return -1;
263}
264
265static void
266pscsi_get_inquiry_vpd_device_ident(struct scsi_device *sdev,
267 struct t10_wwn *wwn)
268{
269 unsigned char cdb[MAX_COMMAND_SIZE], *buf, *page_83;
270 int ident_len, page_len, off = 4, ret;
271 struct t10_vpd *vpd;
272
273 buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL);
274 if (!buf)
275 return;
276
277 memset(cdb, 0, MAX_COMMAND_SIZE);
278 cdb[0] = INQUIRY;
279 cdb[1] = 0x01; /* Query VPD */
280 cdb[2] = 0x83; /* Device Identifier */
281 cdb[3] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN >> 8) & 0xff;
282 cdb[4] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN & 0xff);
283
284 ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf,
285 INQUIRY_VPD_DEVICE_IDENTIFIER_LEN,
286 NULL, HZ, 1, NULL);
287 if (ret)
288 goto out;
289
290 page_len = (buf[2] << 8) | buf[3];
291 while (page_len > 0) {
292 /* Grab a pointer to the Identification descriptor */
293 page_83 = &buf[off];
294 ident_len = page_83[3];
295 if (!ident_len) {
296 printk(KERN_ERR "page_83[3]: identifier"
297 " length zero!\n");
298 break;
299 }
300 printk(KERN_INFO "T10 VPD Identifer Length: %d\n", ident_len);
301
302 vpd = kzalloc(sizeof(struct t10_vpd), GFP_KERNEL);
303 if (!vpd) {
304 printk(KERN_ERR "Unable to allocate memory for"
305 " struct t10_vpd\n");
306 goto out;
307 }
308 INIT_LIST_HEAD(&vpd->vpd_list);
309
310 transport_set_vpd_proto_id(vpd, page_83);
311 transport_set_vpd_assoc(vpd, page_83);
312
313 if (transport_set_vpd_ident_type(vpd, page_83) < 0) {
314 off += (ident_len + 4);
315 page_len -= (ident_len + 4);
316 kfree(vpd);
317 continue;
318 }
319 if (transport_set_vpd_ident(vpd, page_83) < 0) {
320 off += (ident_len + 4);
321 page_len -= (ident_len + 4);
322 kfree(vpd);
323 continue;
324 }
325
326 list_add_tail(&vpd->vpd_list, &wwn->t10_vpd_list);
327 off += (ident_len + 4);
328 page_len -= (ident_len + 4);
329 }
330
331out:
332 kfree(buf);
333}
334
335/* pscsi_add_device_to_list():
336 *
337 *
338 */
339static struct se_device *pscsi_add_device_to_list(
340 struct se_hba *hba,
341 struct se_subsystem_dev *se_dev,
342 struct pscsi_dev_virt *pdv,
343 struct scsi_device *sd,
344 int dev_flags)
345{
346 struct se_device *dev;
347 struct se_dev_limits dev_limits;
348 struct request_queue *q;
349 struct queue_limits *limits;
350
351 memset(&dev_limits, 0, sizeof(struct se_dev_limits));
352
353 if (!sd->queue_depth) {
354 sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH;
355
356 printk(KERN_ERR "Set broken SCSI Device %d:%d:%d"
357 " queue_depth to %d\n", sd->channel, sd->id,
358 sd->lun, sd->queue_depth);
359 }
360 /*
361 * Setup the local scope queue_limits from struct request_queue->limits
362 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
363 */
364 q = sd->request_queue;
365 limits = &dev_limits.limits;
366 limits->logical_block_size = sd->sector_size;
367 limits->max_hw_sectors = (sd->host->max_sectors > queue_max_hw_sectors(q)) ?
368 queue_max_hw_sectors(q) : sd->host->max_sectors;
369 limits->max_sectors = (sd->host->max_sectors > queue_max_sectors(q)) ?
370 queue_max_sectors(q) : sd->host->max_sectors;
371 dev_limits.hw_queue_depth = sd->queue_depth;
372 dev_limits.queue_depth = sd->queue_depth;
373 /*
374 * Setup our standard INQUIRY info into se_dev->t10_wwn
375 */
376 pscsi_set_inquiry_info(sd, &se_dev->t10_wwn);
377
378 /*
379 * Set the pointer pdv->pdv_sd to from passed struct scsi_device,
380 * which has already been referenced with Linux SCSI code with
381 * scsi_device_get() in this file's pscsi_create_virtdevice().
382 *
383 * The passthrough operations called by the transport_add_device_*
384 * function below will require this pointer to be set for passthroug
385 * ops.
386 *
387 * For the shutdown case in pscsi_free_device(), this struct
388 * scsi_device reference is released with Linux SCSI code
389 * scsi_device_put() and the pdv->pdv_sd cleared.
390 */
391 pdv->pdv_sd = sd;
392
393 dev = transport_add_device_to_core_hba(hba, &pscsi_template,
394 se_dev, dev_flags, (void *)pdv,
395 &dev_limits, NULL, NULL);
396 if (!(dev)) {
397 pdv->pdv_sd = NULL;
398 return NULL;
399 }
400
401 /*
402 * Locate VPD WWN Information used for various purposes within
403 * the Storage Engine.
404 */
405 if (!pscsi_get_inquiry_vpd_serial(sd, &se_dev->t10_wwn)) {
406 /*
407 * If VPD Unit Serial returned GOOD status, try
408 * VPD Device Identification page (0x83).
409 */
410 pscsi_get_inquiry_vpd_device_ident(sd, &se_dev->t10_wwn);
411 }
412
413 /*
414 * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE.
415 */
416 if (sd->type == TYPE_TAPE)
417 pscsi_tape_read_blocksize(dev, sd);
418 return dev;
419}
420
421static void *pscsi_allocate_virtdevice(struct se_hba *hba, const char *name)
422{
423 struct pscsi_dev_virt *pdv;
424
425 pdv = kzalloc(sizeof(struct pscsi_dev_virt), GFP_KERNEL);
426 if (!(pdv)) {
427 printk(KERN_ERR "Unable to allocate memory for struct pscsi_dev_virt\n");
428 return NULL;
429 }
430 pdv->pdv_se_hba = hba;
431
432 printk(KERN_INFO "PSCSI: Allocated pdv: %p for %s\n", pdv, name);
433 return (void *)pdv;
434}
435
436/*
437 * Called with struct Scsi_Host->host_lock called.
438 */
439static struct se_device *pscsi_create_type_disk(
440 struct scsi_device *sd,
441 struct pscsi_dev_virt *pdv,
442 struct se_subsystem_dev *se_dev,
443 struct se_hba *hba)
444 __releases(sh->host_lock)
445{
446 struct se_device *dev;
447 struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
448 struct Scsi_Host *sh = sd->host;
449 struct block_device *bd;
450 u32 dev_flags = 0;
451
452 if (scsi_device_get(sd)) {
453 printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n",
454 sh->host_no, sd->channel, sd->id, sd->lun);
455 spin_unlock_irq(sh->host_lock);
456 return NULL;
457 }
458 spin_unlock_irq(sh->host_lock);
459 /*
460 * Claim exclusive struct block_device access to struct scsi_device
461 * for TYPE_DISK using supplied udev_path
462 */
463 bd = blkdev_get_by_path(se_dev->se_dev_udev_path,
464 FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv);
465 if (IS_ERR(bd)) {
466 printk(KERN_ERR "pSCSI: blkdev_get_by_path() failed\n");
467 scsi_device_put(sd);
468 return NULL;
469 }
470 pdv->pdv_bd = bd;
471
472 dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
473 if (!(dev)) {
474 blkdev_put(pdv->pdv_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
475 scsi_device_put(sd);
476 return NULL;
477 }
478 printk(KERN_INFO "CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n",
479 phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun);
480
481 return dev;
482}
483
484/*
485 * Called with struct Scsi_Host->host_lock called.
486 */
487static struct se_device *pscsi_create_type_rom(
488 struct scsi_device *sd,
489 struct pscsi_dev_virt *pdv,
490 struct se_subsystem_dev *se_dev,
491 struct se_hba *hba)
492 __releases(sh->host_lock)
493{
494 struct se_device *dev;
495 struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
496 struct Scsi_Host *sh = sd->host;
497 u32 dev_flags = 0;
498
499 if (scsi_device_get(sd)) {
500 printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n",
501 sh->host_no, sd->channel, sd->id, sd->lun);
502 spin_unlock_irq(sh->host_lock);
503 return NULL;
504 }
505 spin_unlock_irq(sh->host_lock);
506
507 dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
508 if (!(dev)) {
509 scsi_device_put(sd);
510 return NULL;
511 }
512 printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
513 phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
514 sd->channel, sd->id, sd->lun);
515
516 return dev;
517}
518
519/*
520 *Called with struct Scsi_Host->host_lock called.
521 */
522static struct se_device *pscsi_create_type_other(
523 struct scsi_device *sd,
524 struct pscsi_dev_virt *pdv,
525 struct se_subsystem_dev *se_dev,
526 struct se_hba *hba)
527 __releases(sh->host_lock)
528{
529 struct se_device *dev;
530 struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
531 struct Scsi_Host *sh = sd->host;
532 u32 dev_flags = 0;
533
534 spin_unlock_irq(sh->host_lock);
535 dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
536 if (!(dev))
537 return NULL;
538
539 printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
540 phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
541 sd->channel, sd->id, sd->lun);
542
543 return dev;
544}
545
546static struct se_device *pscsi_create_virtdevice(
547 struct se_hba *hba,
548 struct se_subsystem_dev *se_dev,
549 void *p)
550{
551 struct pscsi_dev_virt *pdv = (struct pscsi_dev_virt *)p;
552 struct se_device *dev;
553 struct scsi_device *sd;
554 struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr;
555 struct Scsi_Host *sh = phv->phv_lld_host;
556 int legacy_mode_enable = 0;
557
558 if (!(pdv)) {
559 printk(KERN_ERR "Unable to locate struct pscsi_dev_virt"
560 " parameter\n");
561 return ERR_PTR(-EINVAL);
562 }
563 /*
564 * If not running in PHV_LLD_SCSI_HOST_NO mode, locate the
565 * struct Scsi_Host we will need to bring the TCM/pSCSI object online
566 */
567 if (!(sh)) {
568 if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
569 printk(KERN_ERR "pSCSI: Unable to locate struct"
570 " Scsi_Host for PHV_LLD_SCSI_HOST_NO\n");
571 return ERR_PTR(-ENODEV);
572 }
573 /*
574 * For the newer PHV_VIRUTAL_HOST_ID struct scsi_device
575 * reference, we enforce that udev_path has been set
576 */
577 if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) {
578 printk(KERN_ERR "pSCSI: udev_path attribute has not"
579 " been set before ENABLE=1\n");
580 return ERR_PTR(-EINVAL);
581 }
582 /*
583 * If no scsi_host_id= was passed for PHV_VIRUTAL_HOST_ID,
584 * use the original TCM hba ID to reference Linux/SCSI Host No
585 * and enable for PHV_LLD_SCSI_HOST_NO mode.
586 */
587 if (!(pdv->pdv_flags & PDF_HAS_VIRT_HOST_ID)) {
588 spin_lock(&hba->device_lock);
589 if (!(list_empty(&hba->hba_dev_list))) {
590 printk(KERN_ERR "pSCSI: Unable to set hba_mode"
591 " with active devices\n");
592 spin_unlock(&hba->device_lock);
593 return ERR_PTR(-EEXIST);
594 }
595 spin_unlock(&hba->device_lock);
596
597 if (pscsi_pmode_enable_hba(hba, 1) != 1)
598 return ERR_PTR(-ENODEV);
599
600 legacy_mode_enable = 1;
601 hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
602 sh = phv->phv_lld_host;
603 } else {
604 sh = pscsi_get_sh(pdv->pdv_host_id);
605 if (!(sh)) {
606 printk(KERN_ERR "pSCSI: Unable to locate"
607 " pdv_host_id: %d\n", pdv->pdv_host_id);
608 return ERR_PTR(-ENODEV);
609 }
610 }
611 } else {
612 if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) {
613 printk(KERN_ERR "pSCSI: PHV_VIRUTAL_HOST_ID set while"
614 " struct Scsi_Host exists\n");
615 return ERR_PTR(-EEXIST);
616 }
617 }
618
619 spin_lock_irq(sh->host_lock);
620 list_for_each_entry(sd, &sh->__devices, siblings) {
621 if ((pdv->pdv_channel_id != sd->channel) ||
622 (pdv->pdv_target_id != sd->id) ||
623 (pdv->pdv_lun_id != sd->lun))
624 continue;
625 /*
626 * Functions will release the held struct scsi_host->host_lock
627 * before calling calling pscsi_add_device_to_list() to register
628 * struct scsi_device with target_core_mod.
629 */
630 switch (sd->type) {
631 case TYPE_DISK:
632 dev = pscsi_create_type_disk(sd, pdv, se_dev, hba);
633 break;
634 case TYPE_ROM:
635 dev = pscsi_create_type_rom(sd, pdv, se_dev, hba);
636 break;
637 default:
638 dev = pscsi_create_type_other(sd, pdv, se_dev, hba);
639 break;
640 }
641
642 if (!(dev)) {
643 if (phv->phv_mode == PHV_VIRUTAL_HOST_ID)
644 scsi_host_put(sh);
645 else if (legacy_mode_enable) {
646 pscsi_pmode_enable_hba(hba, 0);
647 hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
648 }
649 pdv->pdv_sd = NULL;
650 return ERR_PTR(-ENODEV);
651 }
652 return dev;
653 }
654 spin_unlock_irq(sh->host_lock);
655
656 printk(KERN_ERR "pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no,
657 pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id);
658
659 if (phv->phv_mode == PHV_VIRUTAL_HOST_ID)
660 scsi_host_put(sh);
661 else if (legacy_mode_enable) {
662 pscsi_pmode_enable_hba(hba, 0);
663 hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
664 }
665
666 return ERR_PTR(-ENODEV);
667}
668
669/* pscsi_free_device(): (Part of se_subsystem_api_t template)
670 *
671 *
672 */
673static void pscsi_free_device(void *p)
674{
675 struct pscsi_dev_virt *pdv = p;
676 struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
677 struct scsi_device *sd = pdv->pdv_sd;
678
679 if (sd) {
680 /*
681 * Release exclusive pSCSI internal struct block_device claim for
682 * struct scsi_device with TYPE_DISK from pscsi_create_type_disk()
683 */
684 if ((sd->type == TYPE_DISK) && pdv->pdv_bd) {
685 blkdev_put(pdv->pdv_bd,
686 FMODE_WRITE|FMODE_READ|FMODE_EXCL);
687 pdv->pdv_bd = NULL;
688 }
689 /*
690 * For HBA mode PHV_LLD_SCSI_HOST_NO, release the reference
691 * to struct Scsi_Host now.
692 */
693 if ((phv->phv_mode == PHV_LLD_SCSI_HOST_NO) &&
694 (phv->phv_lld_host != NULL))
695 scsi_host_put(phv->phv_lld_host);
696
697 if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM))
698 scsi_device_put(sd);
699
700 pdv->pdv_sd = NULL;
701 }
702
703 kfree(pdv);
704}
705
706static inline struct pscsi_plugin_task *PSCSI_TASK(struct se_task *task)
707{
708 return container_of(task, struct pscsi_plugin_task, pscsi_task);
709}
710
711
712/* pscsi_transport_complete():
713 *
714 *
715 */
716static int pscsi_transport_complete(struct se_task *task)
717{
718 struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
719 struct scsi_device *sd = pdv->pdv_sd;
720 int result;
721 struct pscsi_plugin_task *pt = PSCSI_TASK(task);
722 unsigned char *cdb = &pt->pscsi_cdb[0];
723
724 result = pt->pscsi_result;
725 /*
726 * Hack to make sure that Write-Protect modepage is set if R/O mode is
727 * forced.
728 */
729 if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) &&
730 (status_byte(result) << 1) == SAM_STAT_GOOD) {
731 if (!TASK_CMD(task)->se_deve)
732 goto after_mode_sense;
733
734 if (TASK_CMD(task)->se_deve->lun_flags &
735 TRANSPORT_LUNFLAGS_READ_ONLY) {
736 unsigned char *buf = (unsigned char *)
737 T_TASK(task->task_se_cmd)->t_task_buf;
738
739 if (cdb[0] == MODE_SENSE_10) {
740 if (!(buf[3] & 0x80))
741 buf[3] |= 0x80;
742 } else {
743 if (!(buf[2] & 0x80))
744 buf[2] |= 0x80;
745 }
746 }
747 }
748after_mode_sense:
749
750 if (sd->type != TYPE_TAPE)
751 goto after_mode_select;
752
753 /*
754 * Hack to correctly obtain the initiator requested blocksize for
755 * TYPE_TAPE. Since this value is dependent upon each tape media,
756 * struct scsi_device->sector_size will not contain the correct value
757 * by default, so we go ahead and set it so
758 * TRANSPORT(dev)->get_blockdev() returns the correct value to the
759 * storage engine.
760 */
761 if (((cdb[0] == MODE_SELECT) || (cdb[0] == MODE_SELECT_10)) &&
762 (status_byte(result) << 1) == SAM_STAT_GOOD) {
763 unsigned char *buf;
764 struct scatterlist *sg = task->task_sg;
765 u16 bdl;
766 u32 blocksize;
767
768 buf = sg_virt(&sg[0]);
769 if (!(buf)) {
770 printk(KERN_ERR "Unable to get buf for scatterlist\n");
771 goto after_mode_select;
772 }
773
774 if (cdb[0] == MODE_SELECT)
775 bdl = (buf[3]);
776 else
777 bdl = (buf[6] << 8) | (buf[7]);
778
779 if (!bdl)
780 goto after_mode_select;
781
782 if (cdb[0] == MODE_SELECT)
783 blocksize = (buf[9] << 16) | (buf[10] << 8) |
784 (buf[11]);
785 else
786 blocksize = (buf[13] << 16) | (buf[14] << 8) |
787 (buf[15]);
788
789 sd->sector_size = blocksize;
790 }
791after_mode_select:
792
793 if (status_byte(result) & CHECK_CONDITION)
794 return 1;
795
796 return 0;
797}
798
799static struct se_task *
800pscsi_alloc_task(struct se_cmd *cmd)
801{
802 struct pscsi_plugin_task *pt;
803 unsigned char *cdb = T_TASK(cmd)->t_task_cdb;
804
805 pt = kzalloc(sizeof(struct pscsi_plugin_task), GFP_KERNEL);
806 if (!pt) {
807 printk(KERN_ERR "Unable to allocate struct pscsi_plugin_task\n");
808 return NULL;
809 }
810
811 /*
812 * If TCM Core is signaling a > TCM_MAX_COMMAND_SIZE allocation,
813 * allocate the extended CDB buffer for per struct se_task context
814 * pt->pscsi_cdb now.
815 */
816 if (T_TASK(cmd)->t_task_cdb != T_TASK(cmd)->__t_task_cdb) {
817
818 pt->pscsi_cdb = kzalloc(scsi_command_size(cdb), GFP_KERNEL);
819 if (!(pt->pscsi_cdb)) {
820 printk(KERN_ERR "pSCSI: Unable to allocate extended"
821 " pt->pscsi_cdb\n");
822 kfree(pt);
823 return NULL;
824 }
825 } else
826 pt->pscsi_cdb = &pt->__pscsi_cdb[0];
827
828 return &pt->pscsi_task;
829}
830
831static inline void pscsi_blk_init_request(
832 struct se_task *task,
833 struct pscsi_plugin_task *pt,
834 struct request *req,
835 int bidi_read)
836{
837 /*
838 * Defined as "scsi command" in include/linux/blkdev.h.
839 */
840 req->cmd_type = REQ_TYPE_BLOCK_PC;
841 /*
842 * For the extra BIDI-COMMAND READ struct request we do not
843 * need to setup the remaining structure members
844 */
845 if (bidi_read)
846 return;
847 /*
848 * Setup the done function pointer for struct request,
849 * also set the end_io_data pointer.to struct se_task.
850 */
851 req->end_io = pscsi_req_done;
852 req->end_io_data = (void *)task;
853 /*
854 * Load the referenced struct se_task's SCSI CDB into
855 * include/linux/blkdev.h:struct request->cmd
856 */
857 req->cmd_len = scsi_command_size(pt->pscsi_cdb);
858 req->cmd = &pt->pscsi_cdb[0];
859 /*
860 * Setup pointer for outgoing sense data.
861 */
862 req->sense = (void *)&pt->pscsi_sense[0];
863 req->sense_len = 0;
864}
865
866/*
867 * Used for pSCSI data payloads for all *NON* SCF_SCSI_DATA_SG_IO_CDB
868*/
869static int pscsi_blk_get_request(struct se_task *task)
870{
871 struct pscsi_plugin_task *pt = PSCSI_TASK(task);
872 struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
873
874 pt->pscsi_req = blk_get_request(pdv->pdv_sd->request_queue,
875 (task->task_data_direction == DMA_TO_DEVICE),
876 GFP_KERNEL);
877 if (!(pt->pscsi_req) || IS_ERR(pt->pscsi_req)) {
878 printk(KERN_ERR "PSCSI: blk_get_request() failed: %ld\n",
879 IS_ERR(pt->pscsi_req));
880 return PYX_TRANSPORT_LU_COMM_FAILURE;
881 }
882 /*
883 * Setup the newly allocated struct request for REQ_TYPE_BLOCK_PC,
884 * and setup rq callback, CDB and sense.
885 */
886 pscsi_blk_init_request(task, pt, pt->pscsi_req, 0);
887 return 0;
888}
889
890/* pscsi_do_task(): (Part of se_subsystem_api_t template)
891 *
892 *
893 */
894static int pscsi_do_task(struct se_task *task)
895{
896 struct pscsi_plugin_task *pt = PSCSI_TASK(task);
897 struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
898 /*
899 * Set the struct request->timeout value based on peripheral
900 * device type from SCSI.
901 */
902 if (pdv->pdv_sd->type == TYPE_DISK)
903 pt->pscsi_req->timeout = PS_TIMEOUT_DISK;
904 else
905 pt->pscsi_req->timeout = PS_TIMEOUT_OTHER;
906
907 pt->pscsi_req->retries = PS_RETRY;
908 /*
909 * Queue the struct request into the struct scsi_device->request_queue.
910 * Also check for HEAD_OF_QUEUE SAM TASK attr from received se_cmd
911 * descriptor
912 */
913 blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, pt->pscsi_req,
914 (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG),
915 pscsi_req_done);
916
917 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
918}
919
920static void pscsi_free_task(struct se_task *task)
921{
922 struct pscsi_plugin_task *pt = PSCSI_TASK(task);
923 struct se_cmd *cmd = task->task_se_cmd;
924
925 /*
926 * Release the extended CDB allocation from pscsi_alloc_task()
927 * if one exists.
928 */
929 if (T_TASK(cmd)->t_task_cdb != T_TASK(cmd)->__t_task_cdb)
930 kfree(pt->pscsi_cdb);
931 /*
932 * We do not release the bio(s) here associated with this task, as
933 * this is handled by bio_put() and pscsi_bi_endio().
934 */
935 kfree(pt);
936}
937
938enum {
939 Opt_scsi_host_id, Opt_scsi_channel_id, Opt_scsi_target_id,
940 Opt_scsi_lun_id, Opt_err
941};
942
943static match_table_t tokens = {
944 {Opt_scsi_host_id, "scsi_host_id=%d"},
945 {Opt_scsi_channel_id, "scsi_channel_id=%d"},
946 {Opt_scsi_target_id, "scsi_target_id=%d"},
947 {Opt_scsi_lun_id, "scsi_lun_id=%d"},
948 {Opt_err, NULL}
949};
950
951static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba,
952 struct se_subsystem_dev *se_dev,
953 const char *page,
954 ssize_t count)
955{
956 struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr;
957 struct pscsi_hba_virt *phv = hba->hba_ptr;
958 char *orig, *ptr, *opts;
959 substring_t args[MAX_OPT_ARGS];
960 int ret = 0, arg, token;
961
962 opts = kstrdup(page, GFP_KERNEL);
963 if (!opts)
964 return -ENOMEM;
965
966 orig = opts;
967
968 while ((ptr = strsep(&opts, ",")) != NULL) {
969 if (!*ptr)
970 continue;
971
972 token = match_token(ptr, tokens, args);
973 switch (token) {
974 case Opt_scsi_host_id:
975 if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
976 printk(KERN_ERR "PSCSI[%d]: Unable to accept"
977 " scsi_host_id while phv_mode =="
978 " PHV_LLD_SCSI_HOST_NO\n",
979 phv->phv_host_id);
980 ret = -EINVAL;
981 goto out;
982 }
983 match_int(args, &arg);
984 pdv->pdv_host_id = arg;
985 printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Host ID:"
986 " %d\n", phv->phv_host_id, pdv->pdv_host_id);
987 pdv->pdv_flags |= PDF_HAS_VIRT_HOST_ID;
988 break;
989 case Opt_scsi_channel_id:
990 match_int(args, &arg);
991 pdv->pdv_channel_id = arg;
992 printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Channel"
993 " ID: %d\n", phv->phv_host_id,
994 pdv->pdv_channel_id);
995 pdv->pdv_flags |= PDF_HAS_CHANNEL_ID;
996 break;
997 case Opt_scsi_target_id:
998 match_int(args, &arg);
999 pdv->pdv_target_id = arg;
1000 printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Target"
1001 " ID: %d\n", phv->phv_host_id,
1002 pdv->pdv_target_id);
1003 pdv->pdv_flags |= PDF_HAS_TARGET_ID;
1004 break;
1005 case Opt_scsi_lun_id:
1006 match_int(args, &arg);
1007 pdv->pdv_lun_id = arg;
1008 printk(KERN_INFO "PSCSI[%d]: Referencing SCSI LUN ID:"
1009 " %d\n", phv->phv_host_id, pdv->pdv_lun_id);
1010 pdv->pdv_flags |= PDF_HAS_LUN_ID;
1011 break;
1012 default:
1013 break;
1014 }
1015 }
1016
1017out:
1018 kfree(orig);
1019 return (!ret) ? count : ret;
1020}
1021
1022static ssize_t pscsi_check_configfs_dev_params(
1023 struct se_hba *hba,
1024 struct se_subsystem_dev *se_dev)
1025{
1026 struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr;
1027
1028 if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) ||
1029 !(pdv->pdv_flags & PDF_HAS_TARGET_ID) ||
1030 !(pdv->pdv_flags & PDF_HAS_LUN_ID)) {
1031 printk(KERN_ERR "Missing scsi_channel_id=, scsi_target_id= and"
1032 " scsi_lun_id= parameters\n");
1033 return -1;
1034 }
1035
1036 return 0;
1037}
1038
1039static ssize_t pscsi_show_configfs_dev_params(struct se_hba *hba,
1040 struct se_subsystem_dev *se_dev,
1041 char *b)
1042{
1043 struct pscsi_hba_virt *phv = hba->hba_ptr;
1044 struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr;
1045 struct scsi_device *sd = pdv->pdv_sd;
1046 unsigned char host_id[16];
1047 ssize_t bl;
1048 int i;
1049
1050 if (phv->phv_mode == PHV_VIRUTAL_HOST_ID)
1051 snprintf(host_id, 16, "%d", pdv->pdv_host_id);
1052 else
1053 snprintf(host_id, 16, "PHBA Mode");
1054
1055 bl = sprintf(b, "SCSI Device Bus Location:"
1056 " Channel ID: %d Target ID: %d LUN: %d Host ID: %s\n",
1057 pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id,
1058 host_id);
1059
1060 if (sd) {
1061 bl += sprintf(b + bl, " ");
1062 bl += sprintf(b + bl, "Vendor: ");
1063 for (i = 0; i < 8; i++) {
1064 if (ISPRINT(sd->vendor[i])) /* printable character? */
1065 bl += sprintf(b + bl, "%c", sd->vendor[i]);
1066 else
1067 bl += sprintf(b + bl, " ");
1068 }
1069 bl += sprintf(b + bl, " Model: ");
1070 for (i = 0; i < 16; i++) {
1071 if (ISPRINT(sd->model[i])) /* printable character ? */
1072 bl += sprintf(b + bl, "%c", sd->model[i]);
1073 else
1074 bl += sprintf(b + bl, " ");
1075 }
1076 bl += sprintf(b + bl, " Rev: ");
1077 for (i = 0; i < 4; i++) {
1078 if (ISPRINT(sd->rev[i])) /* printable character ? */
1079 bl += sprintf(b + bl, "%c", sd->rev[i]);
1080 else
1081 bl += sprintf(b + bl, " ");
1082 }
1083 bl += sprintf(b + bl, "\n");
1084 }
1085 return bl;
1086}
1087
1088static void pscsi_bi_endio(struct bio *bio, int error)
1089{
1090 bio_put(bio);
1091}
1092
1093static inline struct bio *pscsi_get_bio(struct pscsi_dev_virt *pdv, int sg_num)
1094{
1095 struct bio *bio;
1096 /*
1097 * Use bio_malloc() following the comment in for bio -> struct request
1098 * in block/blk-core.c:blk_make_request()
1099 */
1100 bio = bio_kmalloc(GFP_KERNEL, sg_num);
1101 if (!(bio)) {
1102 printk(KERN_ERR "PSCSI: bio_kmalloc() failed\n");
1103 return NULL;
1104 }
1105 bio->bi_end_io = pscsi_bi_endio;
1106
1107 return bio;
1108}
1109
1110#if 0
1111#define DEBUG_PSCSI(x...) printk(x)
1112#else
1113#define DEBUG_PSCSI(x...)
1114#endif
1115
1116static int __pscsi_map_task_SG(
1117 struct se_task *task,
1118 struct scatterlist *task_sg,
1119 u32 task_sg_num,
1120 int bidi_read)
1121{
1122 struct pscsi_plugin_task *pt = PSCSI_TASK(task);
1123 struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
1124 struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
1125 struct page *page;
1126 struct scatterlist *sg;
1127 u32 data_len = task->task_size, i, len, bytes, off;
1128 int nr_pages = (task->task_size + task_sg[0].offset +
1129 PAGE_SIZE - 1) >> PAGE_SHIFT;
1130 int nr_vecs = 0, rc, ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
1131 int rw = (task->task_data_direction == DMA_TO_DEVICE);
1132
1133 if (!task->task_size)
1134 return 0;
1135 /*
1136 * For SCF_SCSI_DATA_SG_IO_CDB, Use fs/bio.c:bio_add_page() to setup
1137 * the bio_vec maplist from TC< struct se_mem -> task->task_sg ->
1138 * struct scatterlist memory. The struct se_task->task_sg[] currently needs
1139 * to be attached to struct bios for submission to Linux/SCSI using
1140 * struct request to struct scsi_device->request_queue.
1141 *
1142 * Note that this will be changing post v2.6.28 as Target_Core_Mod/pSCSI
1143 * is ported to upstream SCSI passthrough functionality that accepts
1144 * struct scatterlist->page_link or struct page as a paraemeter.
1145 */
1146 DEBUG_PSCSI("PSCSI: nr_pages: %d\n", nr_pages);
1147
1148 for_each_sg(task_sg, sg, task_sg_num, i) {
1149 page = sg_page(sg);
1150 off = sg->offset;
1151 len = sg->length;
1152
1153 DEBUG_PSCSI("PSCSI: i: %d page: %p len: %d off: %d\n", i,
1154 page, len, off);
1155
1156 while (len > 0 && data_len > 0) {
1157 bytes = min_t(unsigned int, len, PAGE_SIZE - off);
1158 bytes = min(bytes, data_len);
1159
1160 if (!(bio)) {
1161 nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
1162 nr_pages -= nr_vecs;
1163 /*
1164 * Calls bio_kmalloc() and sets bio->bi_end_io()
1165 */
1166 bio = pscsi_get_bio(pdv, nr_vecs);
1167 if (!(bio))
1168 goto fail;
1169
1170 if (rw)
1171 bio->bi_rw |= REQ_WRITE;
1172
1173 DEBUG_PSCSI("PSCSI: Allocated bio: %p,"
1174 " dir: %s nr_vecs: %d\n", bio,
1175 (rw) ? "rw" : "r", nr_vecs);
1176 /*
1177 * Set *hbio pointer to handle the case:
1178 * nr_pages > BIO_MAX_PAGES, where additional
1179 * bios need to be added to complete a given
1180 * struct se_task
1181 */
1182 if (!hbio)
1183 hbio = tbio = bio;
1184 else
1185 tbio = tbio->bi_next = bio;
1186 }
1187
1188 DEBUG_PSCSI("PSCSI: Calling bio_add_pc_page() i: %d"
1189 " bio: %p page: %p len: %d off: %d\n", i, bio,
1190 page, len, off);
1191
1192 rc = bio_add_pc_page(pdv->pdv_sd->request_queue,
1193 bio, page, bytes, off);
1194 if (rc != bytes)
1195 goto fail;
1196
1197 DEBUG_PSCSI("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n",
1198 bio->bi_vcnt, nr_vecs);
1199
1200 if (bio->bi_vcnt > nr_vecs) {
1201 DEBUG_PSCSI("PSCSI: Reached bio->bi_vcnt max:"
1202 " %d i: %d bio: %p, allocating another"
1203 " bio\n", bio->bi_vcnt, i, bio);
1204 /*
1205 * Clear the pointer so that another bio will
1206 * be allocated with pscsi_get_bio() above, the
1207 * current bio has already been set *tbio and
1208 * bio->bi_next.
1209 */
1210 bio = NULL;
1211 }
1212
1213 page++;
1214 len -= bytes;
1215 data_len -= bytes;
1216 off = 0;
1217 }
1218 }
1219 /*
1220 * Setup the primary pt->pscsi_req used for non BIDI and BIDI-COMMAND
1221 * primary SCSI WRITE poayload mapped for struct se_task->task_sg[]
1222 */
1223 if (!(bidi_read)) {
1224 /*
1225 * Starting with v2.6.31, call blk_make_request() passing in *hbio to
1226 * allocate the pSCSI task a struct request.
1227 */
1228 pt->pscsi_req = blk_make_request(pdv->pdv_sd->request_queue,
1229 hbio, GFP_KERNEL);
1230 if (!(pt->pscsi_req)) {
1231 printk(KERN_ERR "pSCSI: blk_make_request() failed\n");
1232 goto fail;
1233 }
1234 /*
1235 * Setup the newly allocated struct request for REQ_TYPE_BLOCK_PC,
1236 * and setup rq callback, CDB and sense.
1237 */
1238 pscsi_blk_init_request(task, pt, pt->pscsi_req, 0);
1239
1240 return task->task_sg_num;
1241 }
1242 /*
1243 * Setup the secondary pt->pscsi_req->next_rq used for the extra BIDI-COMMAND
1244 * SCSI READ paylaod mapped for struct se_task->task_sg_bidi[]
1245 */
1246 pt->pscsi_req->next_rq = blk_make_request(pdv->pdv_sd->request_queue,
1247 hbio, GFP_KERNEL);
1248 if (!(pt->pscsi_req->next_rq)) {
1249 printk(KERN_ERR "pSCSI: blk_make_request() failed for BIDI\n");
1250 goto fail;
1251 }
1252 pscsi_blk_init_request(task, pt, pt->pscsi_req->next_rq, 1);
1253
1254 return task->task_sg_num;
1255fail:
1256 while (hbio) {
1257 bio = hbio;
1258 hbio = hbio->bi_next;
1259 bio->bi_next = NULL;
1260 bio_endio(bio, 0);
1261 }
1262 return ret;
1263}
1264
1265static int pscsi_map_task_SG(struct se_task *task)
1266{
1267 int ret;
1268
1269 /*
1270 * Setup the main struct request for the task->task_sg[] payload
1271 */
1272
1273 ret = __pscsi_map_task_SG(task, task->task_sg, task->task_sg_num, 0);
1274 if (ret >= 0 && task->task_sg_bidi) {
1275 /*
1276 * If present, set up the extra BIDI-COMMAND SCSI READ
1277 * struct request and payload.
1278 */
1279 ret = __pscsi_map_task_SG(task, task->task_sg_bidi,
1280 task->task_sg_num, 1);
1281 }
1282
1283 if (ret < 0)
1284 return PYX_TRANSPORT_LU_COMM_FAILURE;
1285 return 0;
1286}
1287
1288/* pscsi_map_task_non_SG():
1289 *
1290 *
1291 */
1292static int pscsi_map_task_non_SG(struct se_task *task)
1293{
1294 struct se_cmd *cmd = TASK_CMD(task);
1295 struct pscsi_plugin_task *pt = PSCSI_TASK(task);
1296 struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
1297 int ret = 0;
1298
1299 if (pscsi_blk_get_request(task) < 0)
1300 return PYX_TRANSPORT_LU_COMM_FAILURE;
1301
1302 if (!task->task_size)
1303 return 0;
1304
1305 ret = blk_rq_map_kern(pdv->pdv_sd->request_queue,
1306 pt->pscsi_req, T_TASK(cmd)->t_task_buf,
1307 task->task_size, GFP_KERNEL);
1308 if (ret < 0) {
1309 printk(KERN_ERR "PSCSI: blk_rq_map_kern() failed: %d\n", ret);
1310 return PYX_TRANSPORT_LU_COMM_FAILURE;
1311 }
1312 return 0;
1313}
1314
1315static int pscsi_CDB_none(struct se_task *task)
1316{
1317 return pscsi_blk_get_request(task);
1318}
1319
1320/* pscsi_get_cdb():
1321 *
1322 *
1323 */
1324static unsigned char *pscsi_get_cdb(struct se_task *task)
1325{
1326 struct pscsi_plugin_task *pt = PSCSI_TASK(task);
1327
1328 return pt->pscsi_cdb;
1329}
1330
1331/* pscsi_get_sense_buffer():
1332 *
1333 *
1334 */
1335static unsigned char *pscsi_get_sense_buffer(struct se_task *task)
1336{
1337 struct pscsi_plugin_task *pt = PSCSI_TASK(task);
1338
1339 return (unsigned char *)&pt->pscsi_sense[0];
1340}
1341
1342/* pscsi_get_device_rev():
1343 *
1344 *
1345 */
1346static u32 pscsi_get_device_rev(struct se_device *dev)
1347{
1348 struct pscsi_dev_virt *pdv = dev->dev_ptr;
1349 struct scsi_device *sd = pdv->pdv_sd;
1350
1351 return (sd->scsi_level - 1) ? sd->scsi_level - 1 : 1;
1352}
1353
1354/* pscsi_get_device_type():
1355 *
1356 *
1357 */
1358static u32 pscsi_get_device_type(struct se_device *dev)
1359{
1360 struct pscsi_dev_virt *pdv = dev->dev_ptr;
1361 struct scsi_device *sd = pdv->pdv_sd;
1362
1363 return sd->type;
1364}
1365
1366static sector_t pscsi_get_blocks(struct se_device *dev)
1367{
1368 struct pscsi_dev_virt *pdv = dev->dev_ptr;
1369
1370 if (pdv->pdv_bd && pdv->pdv_bd->bd_part)
1371 return pdv->pdv_bd->bd_part->nr_sects;
1372
1373 dump_stack();
1374 return 0;
1375}
1376
1377/* pscsi_handle_SAM_STATUS_failures():
1378 *
1379 *
1380 */
1381static inline void pscsi_process_SAM_status(
1382 struct se_task *task,
1383 struct pscsi_plugin_task *pt)
1384{
1385 task->task_scsi_status = status_byte(pt->pscsi_result);
1386 if ((task->task_scsi_status)) {
1387 task->task_scsi_status <<= 1;
1388 printk(KERN_INFO "PSCSI Status Byte exception at task: %p CDB:"
1389 " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
1390 pt->pscsi_result);
1391 }
1392
1393 switch (host_byte(pt->pscsi_result)) {
1394 case DID_OK:
1395 transport_complete_task(task, (!task->task_scsi_status));
1396 break;
1397 default:
1398 printk(KERN_INFO "PSCSI Host Byte exception at task: %p CDB:"
1399 " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
1400 pt->pscsi_result);
1401 task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
1402 task->task_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1403 TASK_CMD(task)->transport_error_status =
1404 PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1405 transport_complete_task(task, 0);
1406 break;
1407 }
1408
1409 return;
1410}
1411
1412static void pscsi_req_done(struct request *req, int uptodate)
1413{
1414 struct se_task *task = req->end_io_data;
1415 struct pscsi_plugin_task *pt = PSCSI_TASK(task);
1416
1417 pt->pscsi_result = req->errors;
1418 pt->pscsi_resid = req->resid_len;
1419
1420 pscsi_process_SAM_status(task, pt);
1421 /*
1422 * Release BIDI-READ if present
1423 */
1424 if (req->next_rq != NULL)
1425 __blk_put_request(req->q, req->next_rq);
1426
1427 __blk_put_request(req->q, req);
1428 pt->pscsi_req = NULL;
1429}
1430
1431static struct se_subsystem_api pscsi_template = {
1432 .name = "pscsi",
1433 .owner = THIS_MODULE,
1434 .transport_type = TRANSPORT_PLUGIN_PHBA_PDEV,
1435 .cdb_none = pscsi_CDB_none,
1436 .map_task_non_SG = pscsi_map_task_non_SG,
1437 .map_task_SG = pscsi_map_task_SG,
1438 .attach_hba = pscsi_attach_hba,
1439 .detach_hba = pscsi_detach_hba,
1440 .pmode_enable_hba = pscsi_pmode_enable_hba,
1441 .allocate_virtdevice = pscsi_allocate_virtdevice,
1442 .create_virtdevice = pscsi_create_virtdevice,
1443 .free_device = pscsi_free_device,
1444 .transport_complete = pscsi_transport_complete,
1445 .alloc_task = pscsi_alloc_task,
1446 .do_task = pscsi_do_task,
1447 .free_task = pscsi_free_task,
1448 .check_configfs_dev_params = pscsi_check_configfs_dev_params,
1449 .set_configfs_dev_params = pscsi_set_configfs_dev_params,
1450 .show_configfs_dev_params = pscsi_show_configfs_dev_params,
1451 .get_cdb = pscsi_get_cdb,
1452 .get_sense_buffer = pscsi_get_sense_buffer,
1453 .get_device_rev = pscsi_get_device_rev,
1454 .get_device_type = pscsi_get_device_type,
1455 .get_blocks = pscsi_get_blocks,
1456};
1457
1458static int __init pscsi_module_init(void)
1459{
1460 return transport_subsystem_register(&pscsi_template);
1461}
1462
1463static void pscsi_module_exit(void)
1464{
1465 transport_subsystem_release(&pscsi_template);
1466}
1467
1468MODULE_DESCRIPTION("TCM PSCSI subsystem plugin");
1469MODULE_AUTHOR("nab@Linux-iSCSI.org");
1470MODULE_LICENSE("GPL");
1471
1472module_init(pscsi_module_init);
1473module_exit(pscsi_module_exit);
diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h
new file mode 100644
index 000000000000..a4cd5d352c3a
--- /dev/null
+++ b/drivers/target/target_core_pscsi.h
@@ -0,0 +1,65 @@
1#ifndef TARGET_CORE_PSCSI_H
2#define TARGET_CORE_PSCSI_H
3
4#define PSCSI_VERSION "v4.0"
5#define PSCSI_VIRTUAL_HBA_DEPTH 2048
6
7/* used in pscsi_find_alloc_len() */
8#ifndef INQUIRY_DATA_SIZE
9#define INQUIRY_DATA_SIZE 0x24
10#endif
11
12/* used in pscsi_add_device_to_list() */
13#define PSCSI_DEFAULT_QUEUEDEPTH 1
14
15#define PS_RETRY 5
16#define PS_TIMEOUT_DISK (15*HZ)
17#define PS_TIMEOUT_OTHER (500*HZ)
18
19#include <linux/device.h>
20#include <scsi/scsi_driver.h>
21#include <scsi/scsi_device.h>
22#include <linux/kref.h>
23#include <linux/kobject.h>
24
25struct pscsi_plugin_task {
26 struct se_task pscsi_task;
27 unsigned char *pscsi_cdb;
28 unsigned char __pscsi_cdb[TCM_MAX_COMMAND_SIZE];
29 unsigned char pscsi_sense[SCSI_SENSE_BUFFERSIZE];
30 int pscsi_direction;
31 int pscsi_result;
32 u32 pscsi_resid;
33 struct request *pscsi_req;
34} ____cacheline_aligned;
35
36#define PDF_HAS_CHANNEL_ID 0x01
37#define PDF_HAS_TARGET_ID 0x02
38#define PDF_HAS_LUN_ID 0x04
39#define PDF_HAS_VPD_UNIT_SERIAL 0x08
40#define PDF_HAS_VPD_DEV_IDENT 0x10
41#define PDF_HAS_VIRT_HOST_ID 0x20
42
43struct pscsi_dev_virt {
44 int pdv_flags;
45 int pdv_host_id;
46 int pdv_channel_id;
47 int pdv_target_id;
48 int pdv_lun_id;
49 struct block_device *pdv_bd;
50 struct scsi_device *pdv_sd;
51 struct se_hba *pdv_se_hba;
52} ____cacheline_aligned;
53
54typedef enum phv_modes {
55 PHV_VIRUTAL_HOST_ID,
56 PHV_LLD_SCSI_HOST_NO
57} phv_modes_t;
58
59struct pscsi_hba_virt {
60 int phv_host_id;
61 phv_modes_t phv_mode;
62 struct Scsi_Host *phv_lld_host;
63} ____cacheline_aligned;
64
65#endif /*** TARGET_CORE_PSCSI_H ***/
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
new file mode 100644
index 000000000000..7837dd365a9d
--- /dev/null
+++ b/drivers/target/target_core_rd.c
@@ -0,0 +1,1091 @@
1/*******************************************************************************
2 * Filename: target_core_rd.c
3 *
4 * This file contains the Storage Engine <-> Ramdisk transport
5 * specific functions.
6 *
7 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
8 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
9 * Copyright (c) 2007-2010 Rising Tide Systems
10 * Copyright (c) 2008-2010 Linux-iSCSI.org
11 *
12 * Nicholas A. Bellinger <nab@kernel.org>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27 *
28 ******************************************************************************/
29
30#include <linux/version.h>
31#include <linux/string.h>
32#include <linux/parser.h>
33#include <linux/timer.h>
34#include <linux/blkdev.h>
35#include <linux/slab.h>
36#include <linux/spinlock.h>
37#include <scsi/scsi.h>
38#include <scsi/scsi_host.h>
39
40#include <target/target_core_base.h>
41#include <target/target_core_device.h>
42#include <target/target_core_transport.h>
43#include <target/target_core_fabric_ops.h>
44
45#include "target_core_rd.h"
46
47static struct se_subsystem_api rd_dr_template;
48static struct se_subsystem_api rd_mcp_template;
49
50/* #define DEBUG_RAMDISK_MCP */
51/* #define DEBUG_RAMDISK_DR */
52
53/* rd_attach_hba(): (Part of se_subsystem_api_t template)
54 *
55 *
56 */
57static int rd_attach_hba(struct se_hba *hba, u32 host_id)
58{
59 struct rd_host *rd_host;
60
61 rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL);
62 if (!(rd_host)) {
63 printk(KERN_ERR "Unable to allocate memory for struct rd_host\n");
64 return -ENOMEM;
65 }
66
67 rd_host->rd_host_id = host_id;
68
69 atomic_set(&hba->left_queue_depth, RD_HBA_QUEUE_DEPTH);
70 atomic_set(&hba->max_queue_depth, RD_HBA_QUEUE_DEPTH);
71 hba->hba_ptr = (void *) rd_host;
72
73 printk(KERN_INFO "CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
74 " Generic Target Core Stack %s\n", hba->hba_id,
75 RD_HBA_VERSION, TARGET_CORE_MOD_VERSION);
76 printk(KERN_INFO "CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic"
77 " Target Core TCQ Depth: %d MaxSectors: %u\n", hba->hba_id,
78 rd_host->rd_host_id, atomic_read(&hba->max_queue_depth),
79 RD_MAX_SECTORS);
80
81 return 0;
82}
83
84static void rd_detach_hba(struct se_hba *hba)
85{
86 struct rd_host *rd_host = hba->hba_ptr;
87
88 printk(KERN_INFO "CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
89 " Generic Target Core\n", hba->hba_id, rd_host->rd_host_id);
90
91 kfree(rd_host);
92 hba->hba_ptr = NULL;
93}
94
95/* rd_release_device_space():
96 *
97 *
98 */
99static void rd_release_device_space(struct rd_dev *rd_dev)
100{
101 u32 i, j, page_count = 0, sg_per_table;
102 struct rd_dev_sg_table *sg_table;
103 struct page *pg;
104 struct scatterlist *sg;
105
106 if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
107 return;
108
109 sg_table = rd_dev->sg_table_array;
110
111 for (i = 0; i < rd_dev->sg_table_count; i++) {
112 sg = sg_table[i].sg_table;
113 sg_per_table = sg_table[i].rd_sg_count;
114
115 for (j = 0; j < sg_per_table; j++) {
116 pg = sg_page(&sg[j]);
117 if ((pg)) {
118 __free_page(pg);
119 page_count++;
120 }
121 }
122
123 kfree(sg);
124 }
125
126 printk(KERN_INFO "CORE_RD[%u] - Released device space for Ramdisk"
127 " Device ID: %u, pages %u in %u tables total bytes %lu\n",
128 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
129 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
130
131 kfree(sg_table);
132 rd_dev->sg_table_array = NULL;
133 rd_dev->sg_table_count = 0;
134}
135
136
137/* rd_build_device_space():
138 *
139 *
140 */
141static int rd_build_device_space(struct rd_dev *rd_dev)
142{
143 u32 i = 0, j, page_offset = 0, sg_per_table, sg_tables, total_sg_needed;
144 u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
145 sizeof(struct scatterlist));
146 struct rd_dev_sg_table *sg_table;
147 struct page *pg;
148 struct scatterlist *sg;
149
150 if (rd_dev->rd_page_count <= 0) {
151 printk(KERN_ERR "Illegal page count: %u for Ramdisk device\n",
152 rd_dev->rd_page_count);
153 return -EINVAL;
154 }
155 total_sg_needed = rd_dev->rd_page_count;
156
157 sg_tables = (total_sg_needed / max_sg_per_table) + 1;
158
159 sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
160 if (!(sg_table)) {
161 printk(KERN_ERR "Unable to allocate memory for Ramdisk"
162 " scatterlist tables\n");
163 return -ENOMEM;
164 }
165
166 rd_dev->sg_table_array = sg_table;
167 rd_dev->sg_table_count = sg_tables;
168
169 while (total_sg_needed) {
170 sg_per_table = (total_sg_needed > max_sg_per_table) ?
171 max_sg_per_table : total_sg_needed;
172
173 sg = kzalloc(sg_per_table * sizeof(struct scatterlist),
174 GFP_KERNEL);
175 if (!(sg)) {
176 printk(KERN_ERR "Unable to allocate scatterlist array"
177 " for struct rd_dev\n");
178 return -ENOMEM;
179 }
180
181 sg_init_table((struct scatterlist *)&sg[0], sg_per_table);
182
183 sg_table[i].sg_table = sg;
184 sg_table[i].rd_sg_count = sg_per_table;
185 sg_table[i].page_start_offset = page_offset;
186 sg_table[i++].page_end_offset = (page_offset + sg_per_table)
187 - 1;
188
189 for (j = 0; j < sg_per_table; j++) {
190 pg = alloc_pages(GFP_KERNEL, 0);
191 if (!(pg)) {
192 printk(KERN_ERR "Unable to allocate scatterlist"
193 " pages for struct rd_dev_sg_table\n");
194 return -ENOMEM;
195 }
196 sg_assign_page(&sg[j], pg);
197 sg[j].length = PAGE_SIZE;
198 }
199
200 page_offset += sg_per_table;
201 total_sg_needed -= sg_per_table;
202 }
203
204 printk(KERN_INFO "CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
205 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
206 rd_dev->rd_dev_id, rd_dev->rd_page_count,
207 rd_dev->sg_table_count);
208
209 return 0;
210}
211
212static void *rd_allocate_virtdevice(
213 struct se_hba *hba,
214 const char *name,
215 int rd_direct)
216{
217 struct rd_dev *rd_dev;
218 struct rd_host *rd_host = hba->hba_ptr;
219
220 rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL);
221 if (!(rd_dev)) {
222 printk(KERN_ERR "Unable to allocate memory for struct rd_dev\n");
223 return NULL;
224 }
225
226 rd_dev->rd_host = rd_host;
227 rd_dev->rd_direct = rd_direct;
228
229 return rd_dev;
230}
231
232static void *rd_DIRECT_allocate_virtdevice(struct se_hba *hba, const char *name)
233{
234 return rd_allocate_virtdevice(hba, name, 1);
235}
236
237static void *rd_MEMCPY_allocate_virtdevice(struct se_hba *hba, const char *name)
238{
239 return rd_allocate_virtdevice(hba, name, 0);
240}
241
242/* rd_create_virtdevice():
243 *
244 *
245 */
246static struct se_device *rd_create_virtdevice(
247 struct se_hba *hba,
248 struct se_subsystem_dev *se_dev,
249 void *p,
250 int rd_direct)
251{
252 struct se_device *dev;
253 struct se_dev_limits dev_limits;
254 struct rd_dev *rd_dev = p;
255 struct rd_host *rd_host = hba->hba_ptr;
256 int dev_flags = 0, ret;
257 char prod[16], rev[4];
258
259 memset(&dev_limits, 0, sizeof(struct se_dev_limits));
260
261 ret = rd_build_device_space(rd_dev);
262 if (ret < 0)
263 goto fail;
264
265 snprintf(prod, 16, "RAMDISK-%s", (rd_dev->rd_direct) ? "DR" : "MCP");
266 snprintf(rev, 4, "%s", (rd_dev->rd_direct) ? RD_DR_VERSION :
267 RD_MCP_VERSION);
268
269 dev_limits.limits.logical_block_size = RD_BLOCKSIZE;
270 dev_limits.limits.max_hw_sectors = RD_MAX_SECTORS;
271 dev_limits.limits.max_sectors = RD_MAX_SECTORS;
272 dev_limits.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
273 dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH;
274
275 dev = transport_add_device_to_core_hba(hba,
276 (rd_dev->rd_direct) ? &rd_dr_template :
277 &rd_mcp_template, se_dev, dev_flags, (void *)rd_dev,
278 &dev_limits, prod, rev);
279 if (!(dev))
280 goto fail;
281
282 rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
283 rd_dev->rd_queue_depth = dev->queue_depth;
284
285 printk(KERN_INFO "CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of"
286 " %u pages in %u tables, %lu total bytes\n",
287 rd_host->rd_host_id, (!rd_dev->rd_direct) ? "MEMCPY" :
288 "DIRECT", rd_dev->rd_dev_id, rd_dev->rd_page_count,
289 rd_dev->sg_table_count,
290 (unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
291
292 return dev;
293
294fail:
295 rd_release_device_space(rd_dev);
296 return ERR_PTR(ret);
297}
298
299static struct se_device *rd_DIRECT_create_virtdevice(
300 struct se_hba *hba,
301 struct se_subsystem_dev *se_dev,
302 void *p)
303{
304 return rd_create_virtdevice(hba, se_dev, p, 1);
305}
306
307static struct se_device *rd_MEMCPY_create_virtdevice(
308 struct se_hba *hba,
309 struct se_subsystem_dev *se_dev,
310 void *p)
311{
312 return rd_create_virtdevice(hba, se_dev, p, 0);
313}
314
315/* rd_free_device(): (Part of se_subsystem_api_t template)
316 *
317 *
318 */
319static void rd_free_device(void *p)
320{
321 struct rd_dev *rd_dev = p;
322
323 rd_release_device_space(rd_dev);
324 kfree(rd_dev);
325}
326
327static inline struct rd_request *RD_REQ(struct se_task *task)
328{
329 return container_of(task, struct rd_request, rd_task);
330}
331
332static struct se_task *
333rd_alloc_task(struct se_cmd *cmd)
334{
335 struct rd_request *rd_req;
336
337 rd_req = kzalloc(sizeof(struct rd_request), GFP_KERNEL);
338 if (!rd_req) {
339 printk(KERN_ERR "Unable to allocate struct rd_request\n");
340 return NULL;
341 }
342 rd_req->rd_dev = SE_DEV(cmd)->dev_ptr;
343
344 return &rd_req->rd_task;
345}
346
347/* rd_get_sg_table():
348 *
349 *
350 */
351static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
352{
353 u32 i;
354 struct rd_dev_sg_table *sg_table;
355
356 for (i = 0; i < rd_dev->sg_table_count; i++) {
357 sg_table = &rd_dev->sg_table_array[i];
358 if ((sg_table->page_start_offset <= page) &&
359 (sg_table->page_end_offset >= page))
360 return sg_table;
361 }
362
363 printk(KERN_ERR "Unable to locate struct rd_dev_sg_table for page: %u\n",
364 page);
365
366 return NULL;
367}
368
369/* rd_MEMCPY_read():
370 *
371 *
372 */
373static int rd_MEMCPY_read(struct rd_request *req)
374{
375 struct se_task *task = &req->rd_task;
376 struct rd_dev *dev = req->rd_dev;
377 struct rd_dev_sg_table *table;
378 struct scatterlist *sg_d, *sg_s;
379 void *dst, *src;
380 u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
381 u32 length, page_end = 0, table_sg_end;
382 u32 rd_offset = req->rd_offset;
383
384 table = rd_get_sg_table(dev, req->rd_page);
385 if (!(table))
386 return -1;
387
388 table_sg_end = (table->page_end_offset - req->rd_page);
389 sg_d = task->task_sg;
390 sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
391#ifdef DEBUG_RAMDISK_MCP
392 printk(KERN_INFO "RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:"
393 " %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
394 req->rd_page, req->rd_offset);
395#endif
396 src_offset = rd_offset;
397
398 while (req->rd_size) {
399 if ((sg_d[i].length - dst_offset) <
400 (sg_s[j].length - src_offset)) {
401 length = (sg_d[i].length - dst_offset);
402#ifdef DEBUG_RAMDISK_MCP
403 printk(KERN_INFO "Step 1 - sg_d[%d]: %p length: %d"
404 " offset: %u sg_s[%d].length: %u\n", i,
405 &sg_d[i], sg_d[i].length, sg_d[i].offset, j,
406 sg_s[j].length);
407 printk(KERN_INFO "Step 1 - length: %u dst_offset: %u"
408 " src_offset: %u\n", length, dst_offset,
409 src_offset);
410#endif
411 if (length > req->rd_size)
412 length = req->rd_size;
413
414 dst = sg_virt(&sg_d[i++]) + dst_offset;
415 if (!dst)
416 BUG();
417
418 src = sg_virt(&sg_s[j]) + src_offset;
419 if (!src)
420 BUG();
421
422 dst_offset = 0;
423 src_offset = length;
424 page_end = 0;
425 } else {
426 length = (sg_s[j].length - src_offset);
427#ifdef DEBUG_RAMDISK_MCP
428 printk(KERN_INFO "Step 2 - sg_d[%d]: %p length: %d"
429 " offset: %u sg_s[%d].length: %u\n", i,
430 &sg_d[i], sg_d[i].length, sg_d[i].offset,
431 j, sg_s[j].length);
432 printk(KERN_INFO "Step 2 - length: %u dst_offset: %u"
433 " src_offset: %u\n", length, dst_offset,
434 src_offset);
435#endif
436 if (length > req->rd_size)
437 length = req->rd_size;
438
439 dst = sg_virt(&sg_d[i]) + dst_offset;
440 if (!dst)
441 BUG();
442
443 if (sg_d[i].length == length) {
444 i++;
445 dst_offset = 0;
446 } else
447 dst_offset = length;
448
449 src = sg_virt(&sg_s[j++]) + src_offset;
450 if (!src)
451 BUG();
452
453 src_offset = 0;
454 page_end = 1;
455 }
456
457 memcpy(dst, src, length);
458
459#ifdef DEBUG_RAMDISK_MCP
460 printk(KERN_INFO "page: %u, remaining size: %u, length: %u,"
461 " i: %u, j: %u\n", req->rd_page,
462 (req->rd_size - length), length, i, j);
463#endif
464 req->rd_size -= length;
465 if (!(req->rd_size))
466 return 0;
467
468 if (!page_end)
469 continue;
470
471 if (++req->rd_page <= table->page_end_offset) {
472#ifdef DEBUG_RAMDISK_MCP
473 printk(KERN_INFO "page: %u in same page table\n",
474 req->rd_page);
475#endif
476 continue;
477 }
478#ifdef DEBUG_RAMDISK_MCP
479 printk(KERN_INFO "getting new page table for page: %u\n",
480 req->rd_page);
481#endif
482 table = rd_get_sg_table(dev, req->rd_page);
483 if (!(table))
484 return -1;
485
486 sg_s = &table->sg_table[j = 0];
487 }
488
489 return 0;
490}
491
492/* rd_MEMCPY_write():
493 *
494 *
495 */
496static int rd_MEMCPY_write(struct rd_request *req)
497{
498 struct se_task *task = &req->rd_task;
499 struct rd_dev *dev = req->rd_dev;
500 struct rd_dev_sg_table *table;
501 struct scatterlist *sg_d, *sg_s;
502 void *dst, *src;
503 u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
504 u32 length, page_end = 0, table_sg_end;
505 u32 rd_offset = req->rd_offset;
506
507 table = rd_get_sg_table(dev, req->rd_page);
508 if (!(table))
509 return -1;
510
511 table_sg_end = (table->page_end_offset - req->rd_page);
512 sg_d = &table->sg_table[req->rd_page - table->page_start_offset];
513 sg_s = task->task_sg;
514#ifdef DEBUG_RAMDISK_MCP
515 printk(KERN_INFO "RD[%d] Write LBA: %llu, Size: %u, Page: %u,"
516 " Offset: %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
517 req->rd_page, req->rd_offset);
518#endif
519 dst_offset = rd_offset;
520
521 while (req->rd_size) {
522 if ((sg_s[i].length - src_offset) <
523 (sg_d[j].length - dst_offset)) {
524 length = (sg_s[i].length - src_offset);
525#ifdef DEBUG_RAMDISK_MCP
526 printk(KERN_INFO "Step 1 - sg_s[%d]: %p length: %d"
527 " offset: %d sg_d[%d].length: %u\n", i,
528 &sg_s[i], sg_s[i].length, sg_s[i].offset,
529 j, sg_d[j].length);
530 printk(KERN_INFO "Step 1 - length: %u src_offset: %u"
531 " dst_offset: %u\n", length, src_offset,
532 dst_offset);
533#endif
534 if (length > req->rd_size)
535 length = req->rd_size;
536
537 src = sg_virt(&sg_s[i++]) + src_offset;
538 if (!src)
539 BUG();
540
541 dst = sg_virt(&sg_d[j]) + dst_offset;
542 if (!dst)
543 BUG();
544
545 src_offset = 0;
546 dst_offset = length;
547 page_end = 0;
548 } else {
549 length = (sg_d[j].length - dst_offset);
550#ifdef DEBUG_RAMDISK_MCP
551 printk(KERN_INFO "Step 2 - sg_s[%d]: %p length: %d"
552 " offset: %d sg_d[%d].length: %u\n", i,
553 &sg_s[i], sg_s[i].length, sg_s[i].offset,
554 j, sg_d[j].length);
555 printk(KERN_INFO "Step 2 - length: %u src_offset: %u"
556 " dst_offset: %u\n", length, src_offset,
557 dst_offset);
558#endif
559 if (length > req->rd_size)
560 length = req->rd_size;
561
562 src = sg_virt(&sg_s[i]) + src_offset;
563 if (!src)
564 BUG();
565
566 if (sg_s[i].length == length) {
567 i++;
568 src_offset = 0;
569 } else
570 src_offset = length;
571
572 dst = sg_virt(&sg_d[j++]) + dst_offset;
573 if (!dst)
574 BUG();
575
576 dst_offset = 0;
577 page_end = 1;
578 }
579
580 memcpy(dst, src, length);
581
582#ifdef DEBUG_RAMDISK_MCP
583 printk(KERN_INFO "page: %u, remaining size: %u, length: %u,"
584 " i: %u, j: %u\n", req->rd_page,
585 (req->rd_size - length), length, i, j);
586#endif
587 req->rd_size -= length;
588 if (!(req->rd_size))
589 return 0;
590
591 if (!page_end)
592 continue;
593
594 if (++req->rd_page <= table->page_end_offset) {
595#ifdef DEBUG_RAMDISK_MCP
596 printk(KERN_INFO "page: %u in same page table\n",
597 req->rd_page);
598#endif
599 continue;
600 }
601#ifdef DEBUG_RAMDISK_MCP
602 printk(KERN_INFO "getting new page table for page: %u\n",
603 req->rd_page);
604#endif
605 table = rd_get_sg_table(dev, req->rd_page);
606 if (!(table))
607 return -1;
608
609 sg_d = &table->sg_table[j = 0];
610 }
611
612 return 0;
613}
614
615/* rd_MEMCPY_do_task(): (Part of se_subsystem_api_t template)
616 *
617 *
618 */
619static int rd_MEMCPY_do_task(struct se_task *task)
620{
621 struct se_device *dev = task->se_dev;
622 struct rd_request *req = RD_REQ(task);
623 unsigned long long lba;
624 int ret;
625
626 req->rd_page = (task->task_lba * DEV_ATTRIB(dev)->block_size) / PAGE_SIZE;
627 lba = task->task_lba;
628 req->rd_offset = (do_div(lba,
629 (PAGE_SIZE / DEV_ATTRIB(dev)->block_size))) *
630 DEV_ATTRIB(dev)->block_size;
631 req->rd_size = task->task_size;
632
633 if (task->task_data_direction == DMA_FROM_DEVICE)
634 ret = rd_MEMCPY_read(req);
635 else
636 ret = rd_MEMCPY_write(req);
637
638 if (ret != 0)
639 return ret;
640
641 task->task_scsi_status = GOOD;
642 transport_complete_task(task, 1);
643
644 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
645}
646
647/* rd_DIRECT_with_offset():
648 *
649 *
650 */
651static int rd_DIRECT_with_offset(
652 struct se_task *task,
653 struct list_head *se_mem_list,
654 u32 *se_mem_cnt,
655 u32 *task_offset)
656{
657 struct rd_request *req = RD_REQ(task);
658 struct rd_dev *dev = req->rd_dev;
659 struct rd_dev_sg_table *table;
660 struct se_mem *se_mem;
661 struct scatterlist *sg_s;
662 u32 j = 0, set_offset = 1;
663 u32 get_next_table = 0, offset_length, table_sg_end;
664
665 table = rd_get_sg_table(dev, req->rd_page);
666 if (!(table))
667 return -1;
668
669 table_sg_end = (table->page_end_offset - req->rd_page);
670 sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
671#ifdef DEBUG_RAMDISK_DR
672 printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u Page: %u, Offset: %u\n",
673 (task->task_data_direction == DMA_TO_DEVICE) ?
674 "Write" : "Read",
675 task->task_lba, req->rd_size, req->rd_page, req->rd_offset);
676#endif
677 while (req->rd_size) {
678 se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
679 if (!(se_mem)) {
680 printk(KERN_ERR "Unable to allocate struct se_mem\n");
681 return -1;
682 }
683 INIT_LIST_HEAD(&se_mem->se_list);
684
685 if (set_offset) {
686 offset_length = sg_s[j].length - req->rd_offset;
687 if (offset_length > req->rd_size)
688 offset_length = req->rd_size;
689
690 se_mem->se_page = sg_page(&sg_s[j++]);
691 se_mem->se_off = req->rd_offset;
692 se_mem->se_len = offset_length;
693
694 set_offset = 0;
695 get_next_table = (j > table_sg_end);
696 goto check_eot;
697 }
698
699 offset_length = (req->rd_size < req->rd_offset) ?
700 req->rd_size : req->rd_offset;
701
702 se_mem->se_page = sg_page(&sg_s[j]);
703 se_mem->se_len = offset_length;
704
705 set_offset = 1;
706
707check_eot:
708#ifdef DEBUG_RAMDISK_DR
709 printk(KERN_INFO "page: %u, size: %u, offset_length: %u, j: %u"
710 " se_mem: %p, se_page: %p se_off: %u se_len: %u\n",
711 req->rd_page, req->rd_size, offset_length, j, se_mem,
712 se_mem->se_page, se_mem->se_off, se_mem->se_len);
713#endif
714 list_add_tail(&se_mem->se_list, se_mem_list);
715 (*se_mem_cnt)++;
716
717 req->rd_size -= offset_length;
718 if (!(req->rd_size))
719 goto out;
720
721 if (!set_offset && !get_next_table)
722 continue;
723
724 if (++req->rd_page <= table->page_end_offset) {
725#ifdef DEBUG_RAMDISK_DR
726 printk(KERN_INFO "page: %u in same page table\n",
727 req->rd_page);
728#endif
729 continue;
730 }
731#ifdef DEBUG_RAMDISK_DR
732 printk(KERN_INFO "getting new page table for page: %u\n",
733 req->rd_page);
734#endif
735 table = rd_get_sg_table(dev, req->rd_page);
736 if (!(table))
737 return -1;
738
739 sg_s = &table->sg_table[j = 0];
740 }
741
742out:
743 T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt;
744#ifdef DEBUG_RAMDISK_DR
745 printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
746 *se_mem_cnt);
747#endif
748 return 0;
749}
750
751/* rd_DIRECT_without_offset():
752 *
753 *
754 */
755static int rd_DIRECT_without_offset(
756 struct se_task *task,
757 struct list_head *se_mem_list,
758 u32 *se_mem_cnt,
759 u32 *task_offset)
760{
761 struct rd_request *req = RD_REQ(task);
762 struct rd_dev *dev = req->rd_dev;
763 struct rd_dev_sg_table *table;
764 struct se_mem *se_mem;
765 struct scatterlist *sg_s;
766 u32 length, j = 0;
767
768 table = rd_get_sg_table(dev, req->rd_page);
769 if (!(table))
770 return -1;
771
772 sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
773#ifdef DEBUG_RAMDISK_DR
774 printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u, Page: %u\n",
775 (task->task_data_direction == DMA_TO_DEVICE) ?
776 "Write" : "Read",
777 task->task_lba, req->rd_size, req->rd_page);
778#endif
779 while (req->rd_size) {
780 se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
781 if (!(se_mem)) {
782 printk(KERN_ERR "Unable to allocate struct se_mem\n");
783 return -1;
784 }
785 INIT_LIST_HEAD(&se_mem->se_list);
786
787 length = (req->rd_size < sg_s[j].length) ?
788 req->rd_size : sg_s[j].length;
789
790 se_mem->se_page = sg_page(&sg_s[j++]);
791 se_mem->se_len = length;
792
793#ifdef DEBUG_RAMDISK_DR
794 printk(KERN_INFO "page: %u, size: %u, j: %u se_mem: %p,"
795 " se_page: %p se_off: %u se_len: %u\n", req->rd_page,
796 req->rd_size, j, se_mem, se_mem->se_page,
797 se_mem->se_off, se_mem->se_len);
798#endif
799 list_add_tail(&se_mem->se_list, se_mem_list);
800 (*se_mem_cnt)++;
801
802 req->rd_size -= length;
803 if (!(req->rd_size))
804 goto out;
805
806 if (++req->rd_page <= table->page_end_offset) {
807#ifdef DEBUG_RAMDISK_DR
808 printk("page: %u in same page table\n",
809 req->rd_page);
810#endif
811 continue;
812 }
813#ifdef DEBUG_RAMDISK_DR
814 printk(KERN_INFO "getting new page table for page: %u\n",
815 req->rd_page);
816#endif
817 table = rd_get_sg_table(dev, req->rd_page);
818 if (!(table))
819 return -1;
820
821 sg_s = &table->sg_table[j = 0];
822 }
823
824out:
825 T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt;
826#ifdef DEBUG_RAMDISK_DR
827 printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
828 *se_mem_cnt);
829#endif
830 return 0;
831}
832
833/* rd_DIRECT_do_se_mem_map():
834 *
835 *
836 */
837static int rd_DIRECT_do_se_mem_map(
838 struct se_task *task,
839 struct list_head *se_mem_list,
840 void *in_mem,
841 struct se_mem *in_se_mem,
842 struct se_mem **out_se_mem,
843 u32 *se_mem_cnt,
844 u32 *task_offset_in)
845{
846 struct se_cmd *cmd = task->task_se_cmd;
847 struct rd_request *req = RD_REQ(task);
848 u32 task_offset = *task_offset_in;
849 unsigned long long lba;
850 int ret;
851
852 req->rd_page = ((task->task_lba * DEV_ATTRIB(task->se_dev)->block_size) /
853 PAGE_SIZE);
854 lba = task->task_lba;
855 req->rd_offset = (do_div(lba,
856 (PAGE_SIZE / DEV_ATTRIB(task->se_dev)->block_size))) *
857 DEV_ATTRIB(task->se_dev)->block_size;
858 req->rd_size = task->task_size;
859
860 if (req->rd_offset)
861 ret = rd_DIRECT_with_offset(task, se_mem_list, se_mem_cnt,
862 task_offset_in);
863 else
864 ret = rd_DIRECT_without_offset(task, se_mem_list, se_mem_cnt,
865 task_offset_in);
866
867 if (ret < 0)
868 return ret;
869
870 if (CMD_TFO(cmd)->task_sg_chaining == 0)
871 return 0;
872 /*
873 * Currently prevent writers from multiple HW fabrics doing
874 * pci_map_sg() to RD_DR's internal scatterlist memory.
875 */
876 if (cmd->data_direction == DMA_TO_DEVICE) {
877 printk(KERN_ERR "DMA_TO_DEVICE not supported for"
878 " RAMDISK_DR with task_sg_chaining=1\n");
879 return -1;
880 }
881 /*
882 * Special case for if task_sg_chaining is enabled, then
883 * we setup struct se_task->task_sg[], as it will be used by
884 * transport_do_task_sg_chain() for creating chainged SGLs
885 * across multiple struct se_task->task_sg[].
886 */
887 if (!(transport_calc_sg_num(task,
888 list_entry(T_TASK(cmd)->t_mem_list->next,
889 struct se_mem, se_list),
890 task_offset)))
891 return -1;
892
893 return transport_map_mem_to_sg(task, se_mem_list, task->task_sg,
894 list_entry(T_TASK(cmd)->t_mem_list->next,
895 struct se_mem, se_list),
896 out_se_mem, se_mem_cnt, task_offset_in);
897}
898
899/* rd_DIRECT_do_task(): (Part of se_subsystem_api_t template)
900 *
901 *
902 */
903static int rd_DIRECT_do_task(struct se_task *task)
904{
905 /*
906 * At this point the locally allocated RD tables have been mapped
907 * to struct se_mem elements in rd_DIRECT_do_se_mem_map().
908 */
909 task->task_scsi_status = GOOD;
910 transport_complete_task(task, 1);
911
912 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
913}
914
915/* rd_free_task(): (Part of se_subsystem_api_t template)
916 *
917 *
918 */
919static void rd_free_task(struct se_task *task)
920{
921 kfree(RD_REQ(task));
922}
923
924enum {
925 Opt_rd_pages, Opt_err
926};
927
928static match_table_t tokens = {
929 {Opt_rd_pages, "rd_pages=%d"},
930 {Opt_err, NULL}
931};
932
933static ssize_t rd_set_configfs_dev_params(
934 struct se_hba *hba,
935 struct se_subsystem_dev *se_dev,
936 const char *page,
937 ssize_t count)
938{
939 struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
940 char *orig, *ptr, *opts;
941 substring_t args[MAX_OPT_ARGS];
942 int ret = 0, arg, token;
943
944 opts = kstrdup(page, GFP_KERNEL);
945 if (!opts)
946 return -ENOMEM;
947
948 orig = opts;
949
950 while ((ptr = strsep(&opts, ",")) != NULL) {
951 if (!*ptr)
952 continue;
953
954 token = match_token(ptr, tokens, args);
955 switch (token) {
956 case Opt_rd_pages:
957 match_int(args, &arg);
958 rd_dev->rd_page_count = arg;
959 printk(KERN_INFO "RAMDISK: Referencing Page"
960 " Count: %u\n", rd_dev->rd_page_count);
961 rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
962 break;
963 default:
964 break;
965 }
966 }
967
968 kfree(orig);
969 return (!ret) ? count : ret;
970}
971
972static ssize_t rd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
973{
974 struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
975
976 if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
977 printk(KERN_INFO "Missing rd_pages= parameter\n");
978 return -1;
979 }
980
981 return 0;
982}
983
984static ssize_t rd_show_configfs_dev_params(
985 struct se_hba *hba,
986 struct se_subsystem_dev *se_dev,
987 char *b)
988{
989 struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
990 ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: %s\n",
991 rd_dev->rd_dev_id, (rd_dev->rd_direct) ?
992 "rd_direct" : "rd_mcp");
993 bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu"
994 " SG_table_count: %u\n", rd_dev->rd_page_count,
995 PAGE_SIZE, rd_dev->sg_table_count);
996 return bl;
997}
998
999/* rd_get_cdb(): (Part of se_subsystem_api_t template)
1000 *
1001 *
1002 */
1003static unsigned char *rd_get_cdb(struct se_task *task)
1004{
1005 struct rd_request *req = RD_REQ(task);
1006
1007 return req->rd_scsi_cdb;
1008}
1009
1010static u32 rd_get_device_rev(struct se_device *dev)
1011{
1012 return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
1013}
1014
1015static u32 rd_get_device_type(struct se_device *dev)
1016{
1017 return TYPE_DISK;
1018}
1019
1020static sector_t rd_get_blocks(struct se_device *dev)
1021{
1022 struct rd_dev *rd_dev = dev->dev_ptr;
1023 unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
1024 DEV_ATTRIB(dev)->block_size) - 1;
1025
1026 return blocks_long;
1027}
1028
1029static struct se_subsystem_api rd_dr_template = {
1030 .name = "rd_dr",
1031 .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
1032 .attach_hba = rd_attach_hba,
1033 .detach_hba = rd_detach_hba,
1034 .allocate_virtdevice = rd_DIRECT_allocate_virtdevice,
1035 .create_virtdevice = rd_DIRECT_create_virtdevice,
1036 .free_device = rd_free_device,
1037 .alloc_task = rd_alloc_task,
1038 .do_task = rd_DIRECT_do_task,
1039 .free_task = rd_free_task,
1040 .check_configfs_dev_params = rd_check_configfs_dev_params,
1041 .set_configfs_dev_params = rd_set_configfs_dev_params,
1042 .show_configfs_dev_params = rd_show_configfs_dev_params,
1043 .get_cdb = rd_get_cdb,
1044 .get_device_rev = rd_get_device_rev,
1045 .get_device_type = rd_get_device_type,
1046 .get_blocks = rd_get_blocks,
1047 .do_se_mem_map = rd_DIRECT_do_se_mem_map,
1048};
1049
1050static struct se_subsystem_api rd_mcp_template = {
1051 .name = "rd_mcp",
1052 .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
1053 .attach_hba = rd_attach_hba,
1054 .detach_hba = rd_detach_hba,
1055 .allocate_virtdevice = rd_MEMCPY_allocate_virtdevice,
1056 .create_virtdevice = rd_MEMCPY_create_virtdevice,
1057 .free_device = rd_free_device,
1058 .alloc_task = rd_alloc_task,
1059 .do_task = rd_MEMCPY_do_task,
1060 .free_task = rd_free_task,
1061 .check_configfs_dev_params = rd_check_configfs_dev_params,
1062 .set_configfs_dev_params = rd_set_configfs_dev_params,
1063 .show_configfs_dev_params = rd_show_configfs_dev_params,
1064 .get_cdb = rd_get_cdb,
1065 .get_device_rev = rd_get_device_rev,
1066 .get_device_type = rd_get_device_type,
1067 .get_blocks = rd_get_blocks,
1068};
1069
1070int __init rd_module_init(void)
1071{
1072 int ret;
1073
1074 ret = transport_subsystem_register(&rd_dr_template);
1075 if (ret < 0)
1076 return ret;
1077
1078 ret = transport_subsystem_register(&rd_mcp_template);
1079 if (ret < 0) {
1080 transport_subsystem_release(&rd_dr_template);
1081 return ret;
1082 }
1083
1084 return 0;
1085}
1086
1087void rd_module_exit(void)
1088{
1089 transport_subsystem_release(&rd_dr_template);
1090 transport_subsystem_release(&rd_mcp_template);
1091}
diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h
new file mode 100644
index 000000000000..3ea19e29d8ec
--- /dev/null
+++ b/drivers/target/target_core_rd.h
@@ -0,0 +1,71 @@
1#ifndef TARGET_CORE_RD_H
2#define TARGET_CORE_RD_H
3
4#define RD_HBA_VERSION "v4.0"
5#define RD_DR_VERSION "4.0"
6#define RD_MCP_VERSION "4.0"
7
8/* Largest piece of memory kmalloc can allocate */
9#define RD_MAX_ALLOCATION_SIZE 65536
10/* Maximum queuedepth for the Ramdisk HBA */
11#define RD_HBA_QUEUE_DEPTH 256
12#define RD_DEVICE_QUEUE_DEPTH 32
13#define RD_MAX_DEVICE_QUEUE_DEPTH 128
14#define RD_BLOCKSIZE 512
15#define RD_MAX_SECTORS 1024
16
17/* Used in target_core_init_configfs() for virtual LUN 0 access */
18int __init rd_module_init(void);
19void rd_module_exit(void);
20
21#define RRF_EMULATE_CDB 0x01
22#define RRF_GOT_LBA 0x02
23
24struct rd_request {
25 struct se_task rd_task;
26
27 /* SCSI CDB from iSCSI Command PDU */
28 unsigned char rd_scsi_cdb[TCM_MAX_COMMAND_SIZE];
29 /* Offset from start of page */
30 u32 rd_offset;
31 /* Starting page in Ramdisk for request */
32 u32 rd_page;
33 /* Total number of pages needed for request */
34 u32 rd_page_count;
35 /* Scatterlist count */
36 u32 rd_size;
37 /* Ramdisk device */
38 struct rd_dev *rd_dev;
39} ____cacheline_aligned;
40
41struct rd_dev_sg_table {
42 u32 page_start_offset;
43 u32 page_end_offset;
44 u32 rd_sg_count;
45 struct scatterlist *sg_table;
46} ____cacheline_aligned;
47
48#define RDF_HAS_PAGE_COUNT 0x01
49
50struct rd_dev {
51 int rd_direct;
52 u32 rd_flags;
53 /* Unique Ramdisk Device ID in Ramdisk HBA */
54 u32 rd_dev_id;
55 /* Total page count for ramdisk device */
56 u32 rd_page_count;
57 /* Number of SG tables in sg_table_array */
58 u32 sg_table_count;
59 u32 rd_queue_depth;
60 /* Array of rd_dev_sg_table_t containing scatterlists */
61 struct rd_dev_sg_table *sg_table_array;
62 /* Ramdisk HBA device is connected to */
63 struct rd_host *rd_host;
64} ____cacheline_aligned;
65
66struct rd_host {
67 u32 rd_host_dev_id_count;
68 u32 rd_host_id; /* Unique Ramdisk Host ID */
69} ____cacheline_aligned;
70
71#endif /* TARGET_CORE_RD_H */
diff --git a/drivers/target/target_core_scdb.c b/drivers/target/target_core_scdb.c
new file mode 100644
index 000000000000..dc6fed037ab3
--- /dev/null
+++ b/drivers/target/target_core_scdb.c
@@ -0,0 +1,105 @@
1/*******************************************************************************
2 * Filename: target_core_scdb.c
3 *
4 * This file contains the generic target engine Split CDB related functions.
5 *
6 * Copyright (c) 2004-2005 PyX Technologies, Inc.
7 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8 * Copyright (c) 2007-2010 Rising Tide Systems
9 * Copyright (c) 2008-2010 Linux-iSCSI.org
10 *
11 * Nicholas A. Bellinger <nab@kernel.org>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 *
27 ******************************************************************************/
28
29#include <linux/net.h>
30#include <linux/string.h>
31#include <scsi/scsi.h>
32#include <asm/unaligned.h>
33
34#include <target/target_core_base.h>
35#include <target/target_core_transport.h>
36
37#include "target_core_scdb.h"
38
39/* split_cdb_XX_6():
40 *
41 * 21-bit LBA w/ 8-bit SECTORS
42 */
43void split_cdb_XX_6(
44 unsigned long long lba,
45 u32 *sectors,
46 unsigned char *cdb)
47{
48 cdb[1] = (lba >> 16) & 0x1f;
49 cdb[2] = (lba >> 8) & 0xff;
50 cdb[3] = lba & 0xff;
51 cdb[4] = *sectors & 0xff;
52}
53
54/* split_cdb_XX_10():
55 *
56 * 32-bit LBA w/ 16-bit SECTORS
57 */
58void split_cdb_XX_10(
59 unsigned long long lba,
60 u32 *sectors,
61 unsigned char *cdb)
62{
63 put_unaligned_be32(lba, &cdb[2]);
64 put_unaligned_be16(*sectors, &cdb[7]);
65}
66
67/* split_cdb_XX_12():
68 *
69 * 32-bit LBA w/ 32-bit SECTORS
70 */
71void split_cdb_XX_12(
72 unsigned long long lba,
73 u32 *sectors,
74 unsigned char *cdb)
75{
76 put_unaligned_be32(lba, &cdb[2]);
77 put_unaligned_be32(*sectors, &cdb[6]);
78}
79
80/* split_cdb_XX_16():
81 *
82 * 64-bit LBA w/ 32-bit SECTORS
83 */
84void split_cdb_XX_16(
85 unsigned long long lba,
86 u32 *sectors,
87 unsigned char *cdb)
88{
89 put_unaligned_be64(lba, &cdb[2]);
90 put_unaligned_be32(*sectors, &cdb[10]);
91}
92
93/*
94 * split_cdb_XX_32():
95 *
96 * 64-bit LBA w/ 32-bit SECTORS such as READ_32, WRITE_32 and emulated XDWRITEREAD_32
97 */
98void split_cdb_XX_32(
99 unsigned long long lba,
100 u32 *sectors,
101 unsigned char *cdb)
102{
103 put_unaligned_be64(lba, &cdb[12]);
104 put_unaligned_be32(*sectors, &cdb[28]);
105}
diff --git a/drivers/target/target_core_scdb.h b/drivers/target/target_core_scdb.h
new file mode 100644
index 000000000000..98cd1c01ed83
--- /dev/null
+++ b/drivers/target/target_core_scdb.h
@@ -0,0 +1,10 @@
1#ifndef TARGET_CORE_SCDB_H
2#define TARGET_CORE_SCDB_H
3
4extern void split_cdb_XX_6(unsigned long long, u32 *, unsigned char *);
5extern void split_cdb_XX_10(unsigned long long, u32 *, unsigned char *);
6extern void split_cdb_XX_12(unsigned long long, u32 *, unsigned char *);
7extern void split_cdb_XX_16(unsigned long long, u32 *, unsigned char *);
8extern void split_cdb_XX_32(unsigned long long, u32 *, unsigned char *);
9
10#endif /* TARGET_CORE_SCDB_H */
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
new file mode 100644
index 000000000000..5e3a067a7475
--- /dev/null
+++ b/drivers/target/target_core_stat.c
@@ -0,0 +1,1810 @@
1/*******************************************************************************
2 * Filename: target_core_stat.c
3 *
4 * Copyright (c) 2011 Rising Tide Systems
5 * Copyright (c) 2011 Linux-iSCSI.org
6 *
7 * Modern ConfigFS group context specific statistics based on original
8 * target_core_mib.c code
9 *
10 * Copyright (c) 2006-2007 SBE, Inc. All Rights Reserved.
11 *
12 * Nicholas A. Bellinger <nab@linux-iscsi.org>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27 *
28 ******************************************************************************/
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/delay.h>
33#include <linux/timer.h>
34#include <linux/string.h>
35#include <linux/version.h>
36#include <generated/utsrelease.h>
37#include <linux/utsname.h>
38#include <linux/proc_fs.h>
39#include <linux/seq_file.h>
40#include <linux/blkdev.h>
41#include <linux/configfs.h>
42#include <scsi/scsi.h>
43#include <scsi/scsi_device.h>
44#include <scsi/scsi_host.h>
45
46#include <target/target_core_base.h>
47#include <target/target_core_transport.h>
48#include <target/target_core_fabric_ops.h>
49#include <target/target_core_configfs.h>
50#include <target/configfs_macros.h>
51
52#include "target_core_hba.h"
53
54#ifndef INITIAL_JIFFIES
55#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
56#endif
57
58#define NONE "None"
59#define ISPRINT(a) ((a >= ' ') && (a <= '~'))
60
61#define SCSI_LU_INDEX 1
62#define LU_COUNT 1
63
64/*
65 * SCSI Device Table
66 */
67
68CONFIGFS_EATTR_STRUCT(target_stat_scsi_dev, se_dev_stat_grps);
69#define DEV_STAT_SCSI_DEV_ATTR(_name, _mode) \
70static struct target_stat_scsi_dev_attribute \
71 target_stat_scsi_dev_##_name = \
72 __CONFIGFS_EATTR(_name, _mode, \
73 target_stat_scsi_dev_show_attr_##_name, \
74 target_stat_scsi_dev_store_attr_##_name);
75
76#define DEV_STAT_SCSI_DEV_ATTR_RO(_name) \
77static struct target_stat_scsi_dev_attribute \
78 target_stat_scsi_dev_##_name = \
79 __CONFIGFS_EATTR_RO(_name, \
80 target_stat_scsi_dev_show_attr_##_name);
81
82static ssize_t target_stat_scsi_dev_show_attr_inst(
83 struct se_dev_stat_grps *sgrps, char *page)
84{
85 struct se_subsystem_dev *se_subdev = container_of(sgrps,
86 struct se_subsystem_dev, dev_stat_grps);
87 struct se_hba *hba = se_subdev->se_dev_hba;
88 struct se_device *dev = se_subdev->se_dev_ptr;
89
90 if (!dev)
91 return -ENODEV;
92
93 return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
94}
95DEV_STAT_SCSI_DEV_ATTR_RO(inst);
96
97static ssize_t target_stat_scsi_dev_show_attr_indx(
98 struct se_dev_stat_grps *sgrps, char *page)
99{
100 struct se_subsystem_dev *se_subdev = container_of(sgrps,
101 struct se_subsystem_dev, dev_stat_grps);
102 struct se_device *dev = se_subdev->se_dev_ptr;
103
104 if (!dev)
105 return -ENODEV;
106
107 return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
108}
109DEV_STAT_SCSI_DEV_ATTR_RO(indx);
110
111static ssize_t target_stat_scsi_dev_show_attr_role(
112 struct se_dev_stat_grps *sgrps, char *page)
113{
114 struct se_subsystem_dev *se_subdev = container_of(sgrps,
115 struct se_subsystem_dev, dev_stat_grps);
116 struct se_device *dev = se_subdev->se_dev_ptr;
117
118 if (!dev)
119 return -ENODEV;
120
121 return snprintf(page, PAGE_SIZE, "Target\n");
122}
123DEV_STAT_SCSI_DEV_ATTR_RO(role);
124
125static ssize_t target_stat_scsi_dev_show_attr_ports(
126 struct se_dev_stat_grps *sgrps, char *page)
127{
128 struct se_subsystem_dev *se_subdev = container_of(sgrps,
129 struct se_subsystem_dev, dev_stat_grps);
130 struct se_device *dev = se_subdev->se_dev_ptr;
131
132 if (!dev)
133 return -ENODEV;
134
135 return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_port_count);
136}
137DEV_STAT_SCSI_DEV_ATTR_RO(ports);
138
139CONFIGFS_EATTR_OPS(target_stat_scsi_dev, se_dev_stat_grps, scsi_dev_group);
140
141static struct configfs_attribute *target_stat_scsi_dev_attrs[] = {
142 &target_stat_scsi_dev_inst.attr,
143 &target_stat_scsi_dev_indx.attr,
144 &target_stat_scsi_dev_role.attr,
145 &target_stat_scsi_dev_ports.attr,
146 NULL,
147};
148
149static struct configfs_item_operations target_stat_scsi_dev_attrib_ops = {
150 .show_attribute = target_stat_scsi_dev_attr_show,
151 .store_attribute = target_stat_scsi_dev_attr_store,
152};
153
154static struct config_item_type target_stat_scsi_dev_cit = {
155 .ct_item_ops = &target_stat_scsi_dev_attrib_ops,
156 .ct_attrs = target_stat_scsi_dev_attrs,
157 .ct_owner = THIS_MODULE,
158};
159
160/*
161 * SCSI Target Device Table
162 */
163
164CONFIGFS_EATTR_STRUCT(target_stat_scsi_tgt_dev, se_dev_stat_grps);
165#define DEV_STAT_SCSI_TGT_DEV_ATTR(_name, _mode) \
166static struct target_stat_scsi_tgt_dev_attribute \
167 target_stat_scsi_tgt_dev_##_name = \
168 __CONFIGFS_EATTR(_name, _mode, \
169 target_stat_scsi_tgt_dev_show_attr_##_name, \
170 target_stat_scsi_tgt_dev_store_attr_##_name);
171
172#define DEV_STAT_SCSI_TGT_DEV_ATTR_RO(_name) \
173static struct target_stat_scsi_tgt_dev_attribute \
174 target_stat_scsi_tgt_dev_##_name = \
175 __CONFIGFS_EATTR_RO(_name, \
176 target_stat_scsi_tgt_dev_show_attr_##_name);
177
178static ssize_t target_stat_scsi_tgt_dev_show_attr_inst(
179 struct se_dev_stat_grps *sgrps, char *page)
180{
181 struct se_subsystem_dev *se_subdev = container_of(sgrps,
182 struct se_subsystem_dev, dev_stat_grps);
183 struct se_hba *hba = se_subdev->se_dev_hba;
184 struct se_device *dev = se_subdev->se_dev_ptr;
185
186 if (!dev)
187 return -ENODEV;
188
189 return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
190}
191DEV_STAT_SCSI_TGT_DEV_ATTR_RO(inst);
192
193static ssize_t target_stat_scsi_tgt_dev_show_attr_indx(
194 struct se_dev_stat_grps *sgrps, char *page)
195{
196 struct se_subsystem_dev *se_subdev = container_of(sgrps,
197 struct se_subsystem_dev, dev_stat_grps);
198 struct se_device *dev = se_subdev->se_dev_ptr;
199
200 if (!dev)
201 return -ENODEV;
202
203 return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
204}
205DEV_STAT_SCSI_TGT_DEV_ATTR_RO(indx);
206
207static ssize_t target_stat_scsi_tgt_dev_show_attr_num_lus(
208 struct se_dev_stat_grps *sgrps, char *page)
209{
210 struct se_subsystem_dev *se_subdev = container_of(sgrps,
211 struct se_subsystem_dev, dev_stat_grps);
212 struct se_device *dev = se_subdev->se_dev_ptr;
213
214 if (!dev)
215 return -ENODEV;
216
217 return snprintf(page, PAGE_SIZE, "%u\n", LU_COUNT);
218}
219DEV_STAT_SCSI_TGT_DEV_ATTR_RO(num_lus);
220
221static ssize_t target_stat_scsi_tgt_dev_show_attr_status(
222 struct se_dev_stat_grps *sgrps, char *page)
223{
224 struct se_subsystem_dev *se_subdev = container_of(sgrps,
225 struct se_subsystem_dev, dev_stat_grps);
226 struct se_device *dev = se_subdev->se_dev_ptr;
227 char status[16];
228
229 if (!dev)
230 return -ENODEV;
231
232 switch (dev->dev_status) {
233 case TRANSPORT_DEVICE_ACTIVATED:
234 strcpy(status, "activated");
235 break;
236 case TRANSPORT_DEVICE_DEACTIVATED:
237 strcpy(status, "deactivated");
238 break;
239 case TRANSPORT_DEVICE_SHUTDOWN:
240 strcpy(status, "shutdown");
241 break;
242 case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
243 case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
244 strcpy(status, "offline");
245 break;
246 default:
247 sprintf(status, "unknown(%d)", dev->dev_status);
248 break;
249 }
250
251 return snprintf(page, PAGE_SIZE, "%s\n", status);
252}
253DEV_STAT_SCSI_TGT_DEV_ATTR_RO(status);
254
255static ssize_t target_stat_scsi_tgt_dev_show_attr_non_access_lus(
256 struct se_dev_stat_grps *sgrps, char *page)
257{
258 struct se_subsystem_dev *se_subdev = container_of(sgrps,
259 struct se_subsystem_dev, dev_stat_grps);
260 struct se_device *dev = se_subdev->se_dev_ptr;
261 int non_accessible_lus;
262
263 if (!dev)
264 return -ENODEV;
265
266 switch (dev->dev_status) {
267 case TRANSPORT_DEVICE_ACTIVATED:
268 non_accessible_lus = 0;
269 break;
270 case TRANSPORT_DEVICE_DEACTIVATED:
271 case TRANSPORT_DEVICE_SHUTDOWN:
272 case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
273 case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
274 default:
275 non_accessible_lus = 1;
276 break;
277 }
278
279 return snprintf(page, PAGE_SIZE, "%u\n", non_accessible_lus);
280}
281DEV_STAT_SCSI_TGT_DEV_ATTR_RO(non_access_lus);
282
283static ssize_t target_stat_scsi_tgt_dev_show_attr_resets(
284 struct se_dev_stat_grps *sgrps, char *page)
285{
286 struct se_subsystem_dev *se_subdev = container_of(sgrps,
287 struct se_subsystem_dev, dev_stat_grps);
288 struct se_device *dev = se_subdev->se_dev_ptr;
289
290 if (!dev)
291 return -ENODEV;
292
293 return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets);
294}
295DEV_STAT_SCSI_TGT_DEV_ATTR_RO(resets);
296
297
298CONFIGFS_EATTR_OPS(target_stat_scsi_tgt_dev, se_dev_stat_grps, scsi_tgt_dev_group);
299
300static struct configfs_attribute *target_stat_scsi_tgt_dev_attrs[] = {
301 &target_stat_scsi_tgt_dev_inst.attr,
302 &target_stat_scsi_tgt_dev_indx.attr,
303 &target_stat_scsi_tgt_dev_num_lus.attr,
304 &target_stat_scsi_tgt_dev_status.attr,
305 &target_stat_scsi_tgt_dev_non_access_lus.attr,
306 &target_stat_scsi_tgt_dev_resets.attr,
307 NULL,
308};
309
310static struct configfs_item_operations target_stat_scsi_tgt_dev_attrib_ops = {
311 .show_attribute = target_stat_scsi_tgt_dev_attr_show,
312 .store_attribute = target_stat_scsi_tgt_dev_attr_store,
313};
314
315static struct config_item_type target_stat_scsi_tgt_dev_cit = {
316 .ct_item_ops = &target_stat_scsi_tgt_dev_attrib_ops,
317 .ct_attrs = target_stat_scsi_tgt_dev_attrs,
318 .ct_owner = THIS_MODULE,
319};
320
321/*
322 * SCSI Logical Unit Table
323 */
324
325CONFIGFS_EATTR_STRUCT(target_stat_scsi_lu, se_dev_stat_grps);
326#define DEV_STAT_SCSI_LU_ATTR(_name, _mode) \
327static struct target_stat_scsi_lu_attribute target_stat_scsi_lu_##_name = \
328 __CONFIGFS_EATTR(_name, _mode, \
329 target_stat_scsi_lu_show_attr_##_name, \
330 target_stat_scsi_lu_store_attr_##_name);
331
332#define DEV_STAT_SCSI_LU_ATTR_RO(_name) \
333static struct target_stat_scsi_lu_attribute target_stat_scsi_lu_##_name = \
334 __CONFIGFS_EATTR_RO(_name, \
335 target_stat_scsi_lu_show_attr_##_name);
336
337static ssize_t target_stat_scsi_lu_show_attr_inst(
338 struct se_dev_stat_grps *sgrps, char *page)
339{
340 struct se_subsystem_dev *se_subdev = container_of(sgrps,
341 struct se_subsystem_dev, dev_stat_grps);
342 struct se_hba *hba = se_subdev->se_dev_hba;
343 struct se_device *dev = se_subdev->se_dev_ptr;
344
345 if (!dev)
346 return -ENODEV;
347
348 return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
349}
350DEV_STAT_SCSI_LU_ATTR_RO(inst);
351
352static ssize_t target_stat_scsi_lu_show_attr_dev(
353 struct se_dev_stat_grps *sgrps, char *page)
354{
355 struct se_subsystem_dev *se_subdev = container_of(sgrps,
356 struct se_subsystem_dev, dev_stat_grps);
357 struct se_device *dev = se_subdev->se_dev_ptr;
358
359 if (!dev)
360 return -ENODEV;
361
362 return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
363}
364DEV_STAT_SCSI_LU_ATTR_RO(dev);
365
366static ssize_t target_stat_scsi_lu_show_attr_indx(
367 struct se_dev_stat_grps *sgrps, char *page)
368{
369 struct se_subsystem_dev *se_subdev = container_of(sgrps,
370 struct se_subsystem_dev, dev_stat_grps);
371 struct se_device *dev = se_subdev->se_dev_ptr;
372
373 if (!dev)
374 return -ENODEV;
375
376 return snprintf(page, PAGE_SIZE, "%u\n", SCSI_LU_INDEX);
377}
378DEV_STAT_SCSI_LU_ATTR_RO(indx);
379
380static ssize_t target_stat_scsi_lu_show_attr_lun(
381 struct se_dev_stat_grps *sgrps, char *page)
382{
383 struct se_subsystem_dev *se_subdev = container_of(sgrps,
384 struct se_subsystem_dev, dev_stat_grps);
385 struct se_device *dev = se_subdev->se_dev_ptr;
386
387 if (!dev)
388 return -ENODEV;
389 /* FIXME: scsiLuDefaultLun */
390 return snprintf(page, PAGE_SIZE, "%llu\n", (unsigned long long)0);
391}
392DEV_STAT_SCSI_LU_ATTR_RO(lun);
393
394static ssize_t target_stat_scsi_lu_show_attr_lu_name(
395 struct se_dev_stat_grps *sgrps, char *page)
396{
397 struct se_subsystem_dev *se_subdev = container_of(sgrps,
398 struct se_subsystem_dev, dev_stat_grps);
399 struct se_device *dev = se_subdev->se_dev_ptr;
400
401 if (!dev)
402 return -ENODEV;
403 /* scsiLuWwnName */
404 return snprintf(page, PAGE_SIZE, "%s\n",
405 (strlen(DEV_T10_WWN(dev)->unit_serial)) ?
406 (char *)&DEV_T10_WWN(dev)->unit_serial[0] : "None");
407}
408DEV_STAT_SCSI_LU_ATTR_RO(lu_name);
409
410static ssize_t target_stat_scsi_lu_show_attr_vend(
411 struct se_dev_stat_grps *sgrps, char *page)
412{
413 struct se_subsystem_dev *se_subdev = container_of(sgrps,
414 struct se_subsystem_dev, dev_stat_grps);
415 struct se_device *dev = se_subdev->se_dev_ptr;
416 int j;
417 char str[28];
418
419 if (!dev)
420 return -ENODEV;
421 /* scsiLuVendorId */
422 memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
423 for (j = 0; j < 8; j++)
424 str[j] = ISPRINT(DEV_T10_WWN(dev)->vendor[j]) ?
425 DEV_T10_WWN(dev)->vendor[j] : 0x20;
426 str[8] = 0;
427 return snprintf(page, PAGE_SIZE, "%s\n", str);
428}
429DEV_STAT_SCSI_LU_ATTR_RO(vend);
430
431static ssize_t target_stat_scsi_lu_show_attr_prod(
432 struct se_dev_stat_grps *sgrps, char *page)
433{
434 struct se_subsystem_dev *se_subdev = container_of(sgrps,
435 struct se_subsystem_dev, dev_stat_grps);
436 struct se_device *dev = se_subdev->se_dev_ptr;
437 int j;
438 char str[28];
439
440 if (!dev)
441 return -ENODEV;
442
443 /* scsiLuProductId */
444 memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
445 for (j = 0; j < 16; j++)
446 str[j] = ISPRINT(DEV_T10_WWN(dev)->model[j]) ?
447 DEV_T10_WWN(dev)->model[j] : 0x20;
448 str[16] = 0;
449 return snprintf(page, PAGE_SIZE, "%s\n", str);
450}
451DEV_STAT_SCSI_LU_ATTR_RO(prod);
452
453static ssize_t target_stat_scsi_lu_show_attr_rev(
454 struct se_dev_stat_grps *sgrps, char *page)
455{
456 struct se_subsystem_dev *se_subdev = container_of(sgrps,
457 struct se_subsystem_dev, dev_stat_grps);
458 struct se_device *dev = se_subdev->se_dev_ptr;
459 int j;
460 char str[28];
461
462 if (!dev)
463 return -ENODEV;
464
465 /* scsiLuRevisionId */
466 memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
467 for (j = 0; j < 4; j++)
468 str[j] = ISPRINT(DEV_T10_WWN(dev)->revision[j]) ?
469 DEV_T10_WWN(dev)->revision[j] : 0x20;
470 str[4] = 0;
471 return snprintf(page, PAGE_SIZE, "%s\n", str);
472}
473DEV_STAT_SCSI_LU_ATTR_RO(rev);
474
475static ssize_t target_stat_scsi_lu_show_attr_dev_type(
476 struct se_dev_stat_grps *sgrps, char *page)
477{
478 struct se_subsystem_dev *se_subdev = container_of(sgrps,
479 struct se_subsystem_dev, dev_stat_grps);
480 struct se_device *dev = se_subdev->se_dev_ptr;
481
482 if (!dev)
483 return -ENODEV;
484
485 /* scsiLuPeripheralType */
486 return snprintf(page, PAGE_SIZE, "%u\n",
487 TRANSPORT(dev)->get_device_type(dev));
488}
489DEV_STAT_SCSI_LU_ATTR_RO(dev_type);
490
491static ssize_t target_stat_scsi_lu_show_attr_status(
492 struct se_dev_stat_grps *sgrps, char *page)
493{
494 struct se_subsystem_dev *se_subdev = container_of(sgrps,
495 struct se_subsystem_dev, dev_stat_grps);
496 struct se_device *dev = se_subdev->se_dev_ptr;
497
498 if (!dev)
499 return -ENODEV;
500
501 /* scsiLuStatus */
502 return snprintf(page, PAGE_SIZE, "%s\n",
503 (dev->dev_status == TRANSPORT_DEVICE_ACTIVATED) ?
504 "available" : "notavailable");
505}
506DEV_STAT_SCSI_LU_ATTR_RO(status);
507
508static ssize_t target_stat_scsi_lu_show_attr_state_bit(
509 struct se_dev_stat_grps *sgrps, char *page)
510{
511 struct se_subsystem_dev *se_subdev = container_of(sgrps,
512 struct se_subsystem_dev, dev_stat_grps);
513 struct se_device *dev = se_subdev->se_dev_ptr;
514
515 if (!dev)
516 return -ENODEV;
517
518 /* scsiLuState */
519 return snprintf(page, PAGE_SIZE, "exposed\n");
520}
521DEV_STAT_SCSI_LU_ATTR_RO(state_bit);
522
523static ssize_t target_stat_scsi_lu_show_attr_num_cmds(
524 struct se_dev_stat_grps *sgrps, char *page)
525{
526 struct se_subsystem_dev *se_subdev = container_of(sgrps,
527 struct se_subsystem_dev, dev_stat_grps);
528 struct se_device *dev = se_subdev->se_dev_ptr;
529
530 if (!dev)
531 return -ENODEV;
532
533 /* scsiLuNumCommands */
534 return snprintf(page, PAGE_SIZE, "%llu\n",
535 (unsigned long long)dev->num_cmds);
536}
537DEV_STAT_SCSI_LU_ATTR_RO(num_cmds);
538
539static ssize_t target_stat_scsi_lu_show_attr_read_mbytes(
540 struct se_dev_stat_grps *sgrps, char *page)
541{
542 struct se_subsystem_dev *se_subdev = container_of(sgrps,
543 struct se_subsystem_dev, dev_stat_grps);
544 struct se_device *dev = se_subdev->se_dev_ptr;
545
546 if (!dev)
547 return -ENODEV;
548
549 /* scsiLuReadMegaBytes */
550 return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->read_bytes >> 20));
551}
552DEV_STAT_SCSI_LU_ATTR_RO(read_mbytes);
553
554static ssize_t target_stat_scsi_lu_show_attr_write_mbytes(
555 struct se_dev_stat_grps *sgrps, char *page)
556{
557 struct se_subsystem_dev *se_subdev = container_of(sgrps,
558 struct se_subsystem_dev, dev_stat_grps);
559 struct se_device *dev = se_subdev->se_dev_ptr;
560
561 if (!dev)
562 return -ENODEV;
563
564 /* scsiLuWrittenMegaBytes */
565 return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->write_bytes >> 20));
566}
567DEV_STAT_SCSI_LU_ATTR_RO(write_mbytes);
568
569static ssize_t target_stat_scsi_lu_show_attr_resets(
570 struct se_dev_stat_grps *sgrps, char *page)
571{
572 struct se_subsystem_dev *se_subdev = container_of(sgrps,
573 struct se_subsystem_dev, dev_stat_grps);
574 struct se_device *dev = se_subdev->se_dev_ptr;
575
576 if (!dev)
577 return -ENODEV;
578
579 /* scsiLuInResets */
580 return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets);
581}
582DEV_STAT_SCSI_LU_ATTR_RO(resets);
583
584static ssize_t target_stat_scsi_lu_show_attr_full_stat(
585 struct se_dev_stat_grps *sgrps, char *page)
586{
587 struct se_subsystem_dev *se_subdev = container_of(sgrps,
588 struct se_subsystem_dev, dev_stat_grps);
589 struct se_device *dev = se_subdev->se_dev_ptr;
590
591 if (!dev)
592 return -ENODEV;
593
594 /* FIXME: scsiLuOutTaskSetFullStatus */
595 return snprintf(page, PAGE_SIZE, "%u\n", 0);
596}
597DEV_STAT_SCSI_LU_ATTR_RO(full_stat);
598
599static ssize_t target_stat_scsi_lu_show_attr_hs_num_cmds(
600 struct se_dev_stat_grps *sgrps, char *page)
601{
602 struct se_subsystem_dev *se_subdev = container_of(sgrps,
603 struct se_subsystem_dev, dev_stat_grps);
604 struct se_device *dev = se_subdev->se_dev_ptr;
605
606 if (!dev)
607 return -ENODEV;
608
609 /* FIXME: scsiLuHSInCommands */
610 return snprintf(page, PAGE_SIZE, "%u\n", 0);
611}
612DEV_STAT_SCSI_LU_ATTR_RO(hs_num_cmds);
613
614static ssize_t target_stat_scsi_lu_show_attr_creation_time(
615 struct se_dev_stat_grps *sgrps, char *page)
616{
617 struct se_subsystem_dev *se_subdev = container_of(sgrps,
618 struct se_subsystem_dev, dev_stat_grps);
619 struct se_device *dev = se_subdev->se_dev_ptr;
620
621 if (!dev)
622 return -ENODEV;
623
624 /* scsiLuCreationTime */
625 return snprintf(page, PAGE_SIZE, "%u\n", (u32)(((u32)dev->creation_time -
626 INITIAL_JIFFIES) * 100 / HZ));
627}
628DEV_STAT_SCSI_LU_ATTR_RO(creation_time);
629
630CONFIGFS_EATTR_OPS(target_stat_scsi_lu, se_dev_stat_grps, scsi_lu_group);
631
632static struct configfs_attribute *target_stat_scsi_lu_attrs[] = {
633 &target_stat_scsi_lu_inst.attr,
634 &target_stat_scsi_lu_dev.attr,
635 &target_stat_scsi_lu_indx.attr,
636 &target_stat_scsi_lu_lun.attr,
637 &target_stat_scsi_lu_lu_name.attr,
638 &target_stat_scsi_lu_vend.attr,
639 &target_stat_scsi_lu_prod.attr,
640 &target_stat_scsi_lu_rev.attr,
641 &target_stat_scsi_lu_dev_type.attr,
642 &target_stat_scsi_lu_status.attr,
643 &target_stat_scsi_lu_state_bit.attr,
644 &target_stat_scsi_lu_num_cmds.attr,
645 &target_stat_scsi_lu_read_mbytes.attr,
646 &target_stat_scsi_lu_write_mbytes.attr,
647 &target_stat_scsi_lu_resets.attr,
648 &target_stat_scsi_lu_full_stat.attr,
649 &target_stat_scsi_lu_hs_num_cmds.attr,
650 &target_stat_scsi_lu_creation_time.attr,
651 NULL,
652};
653
654static struct configfs_item_operations target_stat_scsi_lu_attrib_ops = {
655 .show_attribute = target_stat_scsi_lu_attr_show,
656 .store_attribute = target_stat_scsi_lu_attr_store,
657};
658
659static struct config_item_type target_stat_scsi_lu_cit = {
660 .ct_item_ops = &target_stat_scsi_lu_attrib_ops,
661 .ct_attrs = target_stat_scsi_lu_attrs,
662 .ct_owner = THIS_MODULE,
663};
664
665/*
666 * Called from target_core_configfs.c:target_core_make_subdev() to setup
667 * the target statistics groups + configfs CITs located in target_core_stat.c
668 */
669void target_stat_setup_dev_default_groups(struct se_subsystem_dev *se_subdev)
670{
671 struct config_group *dev_stat_grp = &DEV_STAT_GRP(se_subdev)->stat_group;
672
673 config_group_init_type_name(&DEV_STAT_GRP(se_subdev)->scsi_dev_group,
674 "scsi_dev", &target_stat_scsi_dev_cit);
675 config_group_init_type_name(&DEV_STAT_GRP(se_subdev)->scsi_tgt_dev_group,
676 "scsi_tgt_dev", &target_stat_scsi_tgt_dev_cit);
677 config_group_init_type_name(&DEV_STAT_GRP(se_subdev)->scsi_lu_group,
678 "scsi_lu", &target_stat_scsi_lu_cit);
679
680 dev_stat_grp->default_groups[0] = &DEV_STAT_GRP(se_subdev)->scsi_dev_group;
681 dev_stat_grp->default_groups[1] = &DEV_STAT_GRP(se_subdev)->scsi_tgt_dev_group;
682 dev_stat_grp->default_groups[2] = &DEV_STAT_GRP(se_subdev)->scsi_lu_group;
683 dev_stat_grp->default_groups[3] = NULL;
684}
685
686/*
687 * SCSI Port Table
688 */
689
690CONFIGFS_EATTR_STRUCT(target_stat_scsi_port, se_port_stat_grps);
691#define DEV_STAT_SCSI_PORT_ATTR(_name, _mode) \
692static struct target_stat_scsi_port_attribute \
693 target_stat_scsi_port_##_name = \
694 __CONFIGFS_EATTR(_name, _mode, \
695 target_stat_scsi_port_show_attr_##_name, \
696 target_stat_scsi_port_store_attr_##_name);
697
698#define DEV_STAT_SCSI_PORT_ATTR_RO(_name) \
699static struct target_stat_scsi_port_attribute \
700 target_stat_scsi_port_##_name = \
701 __CONFIGFS_EATTR_RO(_name, \
702 target_stat_scsi_port_show_attr_##_name);
703
704static ssize_t target_stat_scsi_port_show_attr_inst(
705 struct se_port_stat_grps *pgrps, char *page)
706{
707 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
708 struct se_port *sep;
709 struct se_device *dev = lun->lun_se_dev;
710 struct se_hba *hba;
711 ssize_t ret;
712
713 spin_lock(&lun->lun_sep_lock);
714 sep = lun->lun_sep;
715 if (!sep) {
716 spin_unlock(&lun->lun_sep_lock);
717 return -ENODEV;
718 }
719 hba = dev->se_hba;
720 ret = snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
721 spin_unlock(&lun->lun_sep_lock);
722 return ret;
723}
724DEV_STAT_SCSI_PORT_ATTR_RO(inst);
725
726static ssize_t target_stat_scsi_port_show_attr_dev(
727 struct se_port_stat_grps *pgrps, char *page)
728{
729 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
730 struct se_port *sep;
731 struct se_device *dev = lun->lun_se_dev;
732 ssize_t ret;
733
734 spin_lock(&lun->lun_sep_lock);
735 sep = lun->lun_sep;
736 if (!sep) {
737 spin_unlock(&lun->lun_sep_lock);
738 return -ENODEV;
739 }
740 ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
741 spin_unlock(&lun->lun_sep_lock);
742 return ret;
743}
744DEV_STAT_SCSI_PORT_ATTR_RO(dev);
745
746static ssize_t target_stat_scsi_port_show_attr_indx(
747 struct se_port_stat_grps *pgrps, char *page)
748{
749 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
750 struct se_port *sep;
751 ssize_t ret;
752
753 spin_lock(&lun->lun_sep_lock);
754 sep = lun->lun_sep;
755 if (!sep) {
756 spin_unlock(&lun->lun_sep_lock);
757 return -ENODEV;
758 }
759 ret = snprintf(page, PAGE_SIZE, "%u\n", sep->sep_index);
760 spin_unlock(&lun->lun_sep_lock);
761 return ret;
762}
763DEV_STAT_SCSI_PORT_ATTR_RO(indx);
764
765static ssize_t target_stat_scsi_port_show_attr_role(
766 struct se_port_stat_grps *pgrps, char *page)
767{
768 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
769 struct se_device *dev = lun->lun_se_dev;
770 struct se_port *sep;
771 ssize_t ret;
772
773 if (!dev)
774 return -ENODEV;
775
776 spin_lock(&lun->lun_sep_lock);
777 sep = lun->lun_sep;
778 if (!sep) {
779 spin_unlock(&lun->lun_sep_lock);
780 return -ENODEV;
781 }
782 ret = snprintf(page, PAGE_SIZE, "%s%u\n", "Device", dev->dev_index);
783 spin_unlock(&lun->lun_sep_lock);
784 return ret;
785}
786DEV_STAT_SCSI_PORT_ATTR_RO(role);
787
788static ssize_t target_stat_scsi_port_show_attr_busy_count(
789 struct se_port_stat_grps *pgrps, char *page)
790{
791 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
792 struct se_port *sep;
793 ssize_t ret;
794
795 spin_lock(&lun->lun_sep_lock);
796 sep = lun->lun_sep;
797 if (!sep) {
798 spin_unlock(&lun->lun_sep_lock);
799 return -ENODEV;
800 }
801 /* FIXME: scsiPortBusyStatuses */
802 ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
803 spin_unlock(&lun->lun_sep_lock);
804 return ret;
805}
806DEV_STAT_SCSI_PORT_ATTR_RO(busy_count);
807
808CONFIGFS_EATTR_OPS(target_stat_scsi_port, se_port_stat_grps, scsi_port_group);
809
810static struct configfs_attribute *target_stat_scsi_port_attrs[] = {
811 &target_stat_scsi_port_inst.attr,
812 &target_stat_scsi_port_dev.attr,
813 &target_stat_scsi_port_indx.attr,
814 &target_stat_scsi_port_role.attr,
815 &target_stat_scsi_port_busy_count.attr,
816 NULL,
817};
818
819static struct configfs_item_operations target_stat_scsi_port_attrib_ops = {
820 .show_attribute = target_stat_scsi_port_attr_show,
821 .store_attribute = target_stat_scsi_port_attr_store,
822};
823
824static struct config_item_type target_stat_scsi_port_cit = {
825 .ct_item_ops = &target_stat_scsi_port_attrib_ops,
826 .ct_attrs = target_stat_scsi_port_attrs,
827 .ct_owner = THIS_MODULE,
828};
829
830/*
831 * SCSI Target Port Table
832 */
833CONFIGFS_EATTR_STRUCT(target_stat_scsi_tgt_port, se_port_stat_grps);
834#define DEV_STAT_SCSI_TGT_PORT_ATTR(_name, _mode) \
835static struct target_stat_scsi_tgt_port_attribute \
836 target_stat_scsi_tgt_port_##_name = \
837 __CONFIGFS_EATTR(_name, _mode, \
838 target_stat_scsi_tgt_port_show_attr_##_name, \
839 target_stat_scsi_tgt_port_store_attr_##_name);
840
841#define DEV_STAT_SCSI_TGT_PORT_ATTR_RO(_name) \
842static struct target_stat_scsi_tgt_port_attribute \
843 target_stat_scsi_tgt_port_##_name = \
844 __CONFIGFS_EATTR_RO(_name, \
845 target_stat_scsi_tgt_port_show_attr_##_name);
846
847static ssize_t target_stat_scsi_tgt_port_show_attr_inst(
848 struct se_port_stat_grps *pgrps, char *page)
849{
850 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
851 struct se_device *dev = lun->lun_se_dev;
852 struct se_port *sep;
853 struct se_hba *hba;
854 ssize_t ret;
855
856 spin_lock(&lun->lun_sep_lock);
857 sep = lun->lun_sep;
858 if (!sep) {
859 spin_unlock(&lun->lun_sep_lock);
860 return -ENODEV;
861 }
862 hba = dev->se_hba;
863 ret = snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
864 spin_unlock(&lun->lun_sep_lock);
865 return ret;
866}
867DEV_STAT_SCSI_TGT_PORT_ATTR_RO(inst);
868
869static ssize_t target_stat_scsi_tgt_port_show_attr_dev(
870 struct se_port_stat_grps *pgrps, char *page)
871{
872 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
873 struct se_device *dev = lun->lun_se_dev;
874 struct se_port *sep;
875 ssize_t ret;
876
877 spin_lock(&lun->lun_sep_lock);
878 sep = lun->lun_sep;
879 if (!sep) {
880 spin_unlock(&lun->lun_sep_lock);
881 return -ENODEV;
882 }
883 ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
884 spin_unlock(&lun->lun_sep_lock);
885 return ret;
886}
887DEV_STAT_SCSI_TGT_PORT_ATTR_RO(dev);
888
889static ssize_t target_stat_scsi_tgt_port_show_attr_indx(
890 struct se_port_stat_grps *pgrps, char *page)
891{
892 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
893 struct se_port *sep;
894 ssize_t ret;
895
896 spin_lock(&lun->lun_sep_lock);
897 sep = lun->lun_sep;
898 if (!sep) {
899 spin_unlock(&lun->lun_sep_lock);
900 return -ENODEV;
901 }
902 ret = snprintf(page, PAGE_SIZE, "%u\n", sep->sep_index);
903 spin_unlock(&lun->lun_sep_lock);
904 return ret;
905}
906DEV_STAT_SCSI_TGT_PORT_ATTR_RO(indx);
907
908static ssize_t target_stat_scsi_tgt_port_show_attr_name(
909 struct se_port_stat_grps *pgrps, char *page)
910{
911 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
912 struct se_port *sep;
913 struct se_portal_group *tpg;
914 ssize_t ret;
915
916 spin_lock(&lun->lun_sep_lock);
917 sep = lun->lun_sep;
918 if (!sep) {
919 spin_unlock(&lun->lun_sep_lock);
920 return -ENODEV;
921 }
922 tpg = sep->sep_tpg;
923
924 ret = snprintf(page, PAGE_SIZE, "%sPort#%u\n",
925 TPG_TFO(tpg)->get_fabric_name(), sep->sep_index);
926 spin_unlock(&lun->lun_sep_lock);
927 return ret;
928}
929DEV_STAT_SCSI_TGT_PORT_ATTR_RO(name);
930
931static ssize_t target_stat_scsi_tgt_port_show_attr_port_index(
932 struct se_port_stat_grps *pgrps, char *page)
933{
934 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
935 struct se_port *sep;
936 struct se_portal_group *tpg;
937 ssize_t ret;
938
939 spin_lock(&lun->lun_sep_lock);
940 sep = lun->lun_sep;
941 if (!sep) {
942 spin_unlock(&lun->lun_sep_lock);
943 return -ENODEV;
944 }
945 tpg = sep->sep_tpg;
946
947 ret = snprintf(page, PAGE_SIZE, "%s%s%d\n",
948 TPG_TFO(tpg)->tpg_get_wwn(tpg), "+t+",
949 TPG_TFO(tpg)->tpg_get_tag(tpg));
950 spin_unlock(&lun->lun_sep_lock);
951 return ret;
952}
953DEV_STAT_SCSI_TGT_PORT_ATTR_RO(port_index);
954
955static ssize_t target_stat_scsi_tgt_port_show_attr_in_cmds(
956 struct se_port_stat_grps *pgrps, char *page)
957{
958 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
959 struct se_port *sep;
960 struct se_portal_group *tpg;
961 ssize_t ret;
962
963 spin_lock(&lun->lun_sep_lock);
964 sep = lun->lun_sep;
965 if (!sep) {
966 spin_unlock(&lun->lun_sep_lock);
967 return -ENODEV;
968 }
969 tpg = sep->sep_tpg;
970
971 ret = snprintf(page, PAGE_SIZE, "%llu\n", sep->sep_stats.cmd_pdus);
972 spin_unlock(&lun->lun_sep_lock);
973 return ret;
974}
975DEV_STAT_SCSI_TGT_PORT_ATTR_RO(in_cmds);
976
977static ssize_t target_stat_scsi_tgt_port_show_attr_write_mbytes(
978 struct se_port_stat_grps *pgrps, char *page)
979{
980 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
981 struct se_port *sep;
982 struct se_portal_group *tpg;
983 ssize_t ret;
984
985 spin_lock(&lun->lun_sep_lock);
986 sep = lun->lun_sep;
987 if (!sep) {
988 spin_unlock(&lun->lun_sep_lock);
989 return -ENODEV;
990 }
991 tpg = sep->sep_tpg;
992
993 ret = snprintf(page, PAGE_SIZE, "%u\n",
994 (u32)(sep->sep_stats.rx_data_octets >> 20));
995 spin_unlock(&lun->lun_sep_lock);
996 return ret;
997}
998DEV_STAT_SCSI_TGT_PORT_ATTR_RO(write_mbytes);
999
1000static ssize_t target_stat_scsi_tgt_port_show_attr_read_mbytes(
1001 struct se_port_stat_grps *pgrps, char *page)
1002{
1003 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
1004 struct se_port *sep;
1005 struct se_portal_group *tpg;
1006 ssize_t ret;
1007
1008 spin_lock(&lun->lun_sep_lock);
1009 sep = lun->lun_sep;
1010 if (!sep) {
1011 spin_unlock(&lun->lun_sep_lock);
1012 return -ENODEV;
1013 }
1014 tpg = sep->sep_tpg;
1015
1016 ret = snprintf(page, PAGE_SIZE, "%u\n",
1017 (u32)(sep->sep_stats.tx_data_octets >> 20));
1018 spin_unlock(&lun->lun_sep_lock);
1019 return ret;
1020}
1021DEV_STAT_SCSI_TGT_PORT_ATTR_RO(read_mbytes);
1022
1023static ssize_t target_stat_scsi_tgt_port_show_attr_hs_in_cmds(
1024 struct se_port_stat_grps *pgrps, char *page)
1025{
1026 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
1027 struct se_port *sep;
1028 struct se_portal_group *tpg;
1029 ssize_t ret;
1030
1031 spin_lock(&lun->lun_sep_lock);
1032 sep = lun->lun_sep;
1033 if (!sep) {
1034 spin_unlock(&lun->lun_sep_lock);
1035 return -ENODEV;
1036 }
1037 tpg = sep->sep_tpg;
1038
1039 /* FIXME: scsiTgtPortHsInCommands */
1040 ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
1041 spin_unlock(&lun->lun_sep_lock);
1042 return ret;
1043}
1044DEV_STAT_SCSI_TGT_PORT_ATTR_RO(hs_in_cmds);
1045
1046CONFIGFS_EATTR_OPS(target_stat_scsi_tgt_port, se_port_stat_grps,
1047 scsi_tgt_port_group);
1048
1049static struct configfs_attribute *target_stat_scsi_tgt_port_attrs[] = {
1050 &target_stat_scsi_tgt_port_inst.attr,
1051 &target_stat_scsi_tgt_port_dev.attr,
1052 &target_stat_scsi_tgt_port_indx.attr,
1053 &target_stat_scsi_tgt_port_name.attr,
1054 &target_stat_scsi_tgt_port_port_index.attr,
1055 &target_stat_scsi_tgt_port_in_cmds.attr,
1056 &target_stat_scsi_tgt_port_write_mbytes.attr,
1057 &target_stat_scsi_tgt_port_read_mbytes.attr,
1058 &target_stat_scsi_tgt_port_hs_in_cmds.attr,
1059 NULL,
1060};
1061
1062static struct configfs_item_operations target_stat_scsi_tgt_port_attrib_ops = {
1063 .show_attribute = target_stat_scsi_tgt_port_attr_show,
1064 .store_attribute = target_stat_scsi_tgt_port_attr_store,
1065};
1066
1067static struct config_item_type target_stat_scsi_tgt_port_cit = {
1068 .ct_item_ops = &target_stat_scsi_tgt_port_attrib_ops,
1069 .ct_attrs = target_stat_scsi_tgt_port_attrs,
1070 .ct_owner = THIS_MODULE,
1071};
1072
1073/*
1074 * SCSI Transport Table
1075o */
1076
1077CONFIGFS_EATTR_STRUCT(target_stat_scsi_transport, se_port_stat_grps);
1078#define DEV_STAT_SCSI_TRANSPORT_ATTR(_name, _mode) \
1079static struct target_stat_scsi_transport_attribute \
1080 target_stat_scsi_transport_##_name = \
1081 __CONFIGFS_EATTR(_name, _mode, \
1082 target_stat_scsi_transport_show_attr_##_name, \
1083 target_stat_scsi_transport_store_attr_##_name);
1084
1085#define DEV_STAT_SCSI_TRANSPORT_ATTR_RO(_name) \
1086static struct target_stat_scsi_transport_attribute \
1087 target_stat_scsi_transport_##_name = \
1088 __CONFIGFS_EATTR_RO(_name, \
1089 target_stat_scsi_transport_show_attr_##_name);
1090
1091static ssize_t target_stat_scsi_transport_show_attr_inst(
1092 struct se_port_stat_grps *pgrps, char *page)
1093{
1094 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
1095 struct se_device *dev = lun->lun_se_dev;
1096 struct se_port *sep;
1097 struct se_hba *hba;
1098 ssize_t ret;
1099
1100 spin_lock(&lun->lun_sep_lock);
1101 sep = lun->lun_sep;
1102 if (!sep) {
1103 spin_unlock(&lun->lun_sep_lock);
1104 return -ENODEV;
1105 }
1106
1107 hba = dev->se_hba;
1108 ret = snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
1109 spin_unlock(&lun->lun_sep_lock);
1110 return ret;
1111}
1112DEV_STAT_SCSI_TRANSPORT_ATTR_RO(inst);
1113
1114static ssize_t target_stat_scsi_transport_show_attr_device(
1115 struct se_port_stat_grps *pgrps, char *page)
1116{
1117 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
1118 struct se_port *sep;
1119 struct se_portal_group *tpg;
1120 ssize_t ret;
1121
1122 spin_lock(&lun->lun_sep_lock);
1123 sep = lun->lun_sep;
1124 if (!sep) {
1125 spin_unlock(&lun->lun_sep_lock);
1126 return -ENODEV;
1127 }
1128 tpg = sep->sep_tpg;
1129 /* scsiTransportType */
1130 ret = snprintf(page, PAGE_SIZE, "scsiTransport%s\n",
1131 TPG_TFO(tpg)->get_fabric_name());
1132 spin_unlock(&lun->lun_sep_lock);
1133 return ret;
1134}
1135DEV_STAT_SCSI_TRANSPORT_ATTR_RO(device);
1136
1137static ssize_t target_stat_scsi_transport_show_attr_indx(
1138 struct se_port_stat_grps *pgrps, char *page)
1139{
1140 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
1141 struct se_port *sep;
1142 struct se_portal_group *tpg;
1143 ssize_t ret;
1144
1145 spin_lock(&lun->lun_sep_lock);
1146 sep = lun->lun_sep;
1147 if (!sep) {
1148 spin_unlock(&lun->lun_sep_lock);
1149 return -ENODEV;
1150 }
1151 tpg = sep->sep_tpg;
1152 ret = snprintf(page, PAGE_SIZE, "%u\n",
1153 TPG_TFO(tpg)->tpg_get_inst_index(tpg));
1154 spin_unlock(&lun->lun_sep_lock);
1155 return ret;
1156}
1157DEV_STAT_SCSI_TRANSPORT_ATTR_RO(indx);
1158
1159static ssize_t target_stat_scsi_transport_show_attr_dev_name(
1160 struct se_port_stat_grps *pgrps, char *page)
1161{
1162 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
1163 struct se_device *dev = lun->lun_se_dev;
1164 struct se_port *sep;
1165 struct se_portal_group *tpg;
1166 struct t10_wwn *wwn;
1167 ssize_t ret;
1168
1169 spin_lock(&lun->lun_sep_lock);
1170 sep = lun->lun_sep;
1171 if (!sep) {
1172 spin_unlock(&lun->lun_sep_lock);
1173 return -ENODEV;
1174 }
1175 tpg = sep->sep_tpg;
1176 wwn = DEV_T10_WWN(dev);
1177 /* scsiTransportDevName */
1178 ret = snprintf(page, PAGE_SIZE, "%s+%s\n",
1179 TPG_TFO(tpg)->tpg_get_wwn(tpg),
1180 (strlen(wwn->unit_serial)) ? wwn->unit_serial :
1181 wwn->vendor);
1182 spin_unlock(&lun->lun_sep_lock);
1183 return ret;
1184}
1185DEV_STAT_SCSI_TRANSPORT_ATTR_RO(dev_name);
1186
1187CONFIGFS_EATTR_OPS(target_stat_scsi_transport, se_port_stat_grps,
1188 scsi_transport_group);
1189
1190static struct configfs_attribute *target_stat_scsi_transport_attrs[] = {
1191 &target_stat_scsi_transport_inst.attr,
1192 &target_stat_scsi_transport_device.attr,
1193 &target_stat_scsi_transport_indx.attr,
1194 &target_stat_scsi_transport_dev_name.attr,
1195 NULL,
1196};
1197
1198static struct configfs_item_operations target_stat_scsi_transport_attrib_ops = {
1199 .show_attribute = target_stat_scsi_transport_attr_show,
1200 .store_attribute = target_stat_scsi_transport_attr_store,
1201};
1202
1203static struct config_item_type target_stat_scsi_transport_cit = {
1204 .ct_item_ops = &target_stat_scsi_transport_attrib_ops,
1205 .ct_attrs = target_stat_scsi_transport_attrs,
1206 .ct_owner = THIS_MODULE,
1207};
1208
1209/*
1210 * Called from target_core_fabric_configfs.c:target_fabric_make_lun() to setup
1211 * the target port statistics groups + configfs CITs located in target_core_stat.c
1212 */
1213void target_stat_setup_port_default_groups(struct se_lun *lun)
1214{
1215 struct config_group *port_stat_grp = &PORT_STAT_GRP(lun)->stat_group;
1216
1217 config_group_init_type_name(&PORT_STAT_GRP(lun)->scsi_port_group,
1218 "scsi_port", &target_stat_scsi_port_cit);
1219 config_group_init_type_name(&PORT_STAT_GRP(lun)->scsi_tgt_port_group,
1220 "scsi_tgt_port", &target_stat_scsi_tgt_port_cit);
1221 config_group_init_type_name(&PORT_STAT_GRP(lun)->scsi_transport_group,
1222 "scsi_transport", &target_stat_scsi_transport_cit);
1223
1224 port_stat_grp->default_groups[0] = &PORT_STAT_GRP(lun)->scsi_port_group;
1225 port_stat_grp->default_groups[1] = &PORT_STAT_GRP(lun)->scsi_tgt_port_group;
1226 port_stat_grp->default_groups[2] = &PORT_STAT_GRP(lun)->scsi_transport_group;
1227 port_stat_grp->default_groups[3] = NULL;
1228}
1229
1230/*
1231 * SCSI Authorized Initiator Table
1232 */
1233
1234CONFIGFS_EATTR_STRUCT(target_stat_scsi_auth_intr, se_ml_stat_grps);
1235#define DEV_STAT_SCSI_AUTH_INTR_ATTR(_name, _mode) \
1236static struct target_stat_scsi_auth_intr_attribute \
1237 target_stat_scsi_auth_intr_##_name = \
1238 __CONFIGFS_EATTR(_name, _mode, \
1239 target_stat_scsi_auth_intr_show_attr_##_name, \
1240 target_stat_scsi_auth_intr_store_attr_##_name);
1241
1242#define DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(_name) \
1243static struct target_stat_scsi_auth_intr_attribute \
1244 target_stat_scsi_auth_intr_##_name = \
1245 __CONFIGFS_EATTR_RO(_name, \
1246 target_stat_scsi_auth_intr_show_attr_##_name);
1247
1248static ssize_t target_stat_scsi_auth_intr_show_attr_inst(
1249 struct se_ml_stat_grps *lgrps, char *page)
1250{
1251 struct se_lun_acl *lacl = container_of(lgrps,
1252 struct se_lun_acl, ml_stat_grps);
1253 struct se_node_acl *nacl = lacl->se_lun_nacl;
1254 struct se_dev_entry *deve;
1255 struct se_portal_group *tpg;
1256 ssize_t ret;
1257
1258 spin_lock_irq(&nacl->device_list_lock);
1259 deve = &nacl->device_list[lacl->mapped_lun];
1260 if (!deve->se_lun || !deve->se_lun_acl) {
1261 spin_unlock_irq(&nacl->device_list_lock);
1262 return -ENODEV;
1263 }
1264 tpg = nacl->se_tpg;
1265 /* scsiInstIndex */
1266 ret = snprintf(page, PAGE_SIZE, "%u\n",
1267 TPG_TFO(tpg)->tpg_get_inst_index(tpg));
1268 spin_unlock_irq(&nacl->device_list_lock);
1269 return ret;
1270}
1271DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(inst);
1272
1273static ssize_t target_stat_scsi_auth_intr_show_attr_dev(
1274 struct se_ml_stat_grps *lgrps, char *page)
1275{
1276 struct se_lun_acl *lacl = container_of(lgrps,
1277 struct se_lun_acl, ml_stat_grps);
1278 struct se_node_acl *nacl = lacl->se_lun_nacl;
1279 struct se_dev_entry *deve;
1280 struct se_lun *lun;
1281 struct se_portal_group *tpg;
1282 ssize_t ret;
1283
1284 spin_lock_irq(&nacl->device_list_lock);
1285 deve = &nacl->device_list[lacl->mapped_lun];
1286 if (!deve->se_lun || !deve->se_lun_acl) {
1287 spin_unlock_irq(&nacl->device_list_lock);
1288 return -ENODEV;
1289 }
1290 tpg = nacl->se_tpg;
1291 lun = deve->se_lun;
1292 /* scsiDeviceIndex */
1293 ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_se_dev->dev_index);
1294 spin_unlock_irq(&nacl->device_list_lock);
1295 return ret;
1296}
1297DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(dev);
1298
1299static ssize_t target_stat_scsi_auth_intr_show_attr_port(
1300 struct se_ml_stat_grps *lgrps, char *page)
1301{
1302 struct se_lun_acl *lacl = container_of(lgrps,
1303 struct se_lun_acl, ml_stat_grps);
1304 struct se_node_acl *nacl = lacl->se_lun_nacl;
1305 struct se_dev_entry *deve;
1306 struct se_portal_group *tpg;
1307 ssize_t ret;
1308
1309 spin_lock_irq(&nacl->device_list_lock);
1310 deve = &nacl->device_list[lacl->mapped_lun];
1311 if (!deve->se_lun || !deve->se_lun_acl) {
1312 spin_unlock_irq(&nacl->device_list_lock);
1313 return -ENODEV;
1314 }
1315 tpg = nacl->se_tpg;
1316 /* scsiAuthIntrTgtPortIndex */
1317 ret = snprintf(page, PAGE_SIZE, "%u\n", TPG_TFO(tpg)->tpg_get_tag(tpg));
1318 spin_unlock_irq(&nacl->device_list_lock);
1319 return ret;
1320}
1321DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(port);
1322
1323static ssize_t target_stat_scsi_auth_intr_show_attr_indx(
1324 struct se_ml_stat_grps *lgrps, char *page)
1325{
1326 struct se_lun_acl *lacl = container_of(lgrps,
1327 struct se_lun_acl, ml_stat_grps);
1328 struct se_node_acl *nacl = lacl->se_lun_nacl;
1329 struct se_dev_entry *deve;
1330 ssize_t ret;
1331
1332 spin_lock_irq(&nacl->device_list_lock);
1333 deve = &nacl->device_list[lacl->mapped_lun];
1334 if (!deve->se_lun || !deve->se_lun_acl) {
1335 spin_unlock_irq(&nacl->device_list_lock);
1336 return -ENODEV;
1337 }
1338 /* scsiAuthIntrIndex */
1339 ret = snprintf(page, PAGE_SIZE, "%u\n", nacl->acl_index);
1340 spin_unlock_irq(&nacl->device_list_lock);
1341 return ret;
1342}
1343DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(indx);
1344
1345static ssize_t target_stat_scsi_auth_intr_show_attr_dev_or_port(
1346 struct se_ml_stat_grps *lgrps, char *page)
1347{
1348 struct se_lun_acl *lacl = container_of(lgrps,
1349 struct se_lun_acl, ml_stat_grps);
1350 struct se_node_acl *nacl = lacl->se_lun_nacl;
1351 struct se_dev_entry *deve;
1352 ssize_t ret;
1353
1354 spin_lock_irq(&nacl->device_list_lock);
1355 deve = &nacl->device_list[lacl->mapped_lun];
1356 if (!deve->se_lun || !deve->se_lun_acl) {
1357 spin_unlock_irq(&nacl->device_list_lock);
1358 return -ENODEV;
1359 }
1360 /* scsiAuthIntrDevOrPort */
1361 ret = snprintf(page, PAGE_SIZE, "%u\n", 1);
1362 spin_unlock_irq(&nacl->device_list_lock);
1363 return ret;
1364}
1365DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(dev_or_port);
1366
1367static ssize_t target_stat_scsi_auth_intr_show_attr_intr_name(
1368 struct se_ml_stat_grps *lgrps, char *page)
1369{
1370 struct se_lun_acl *lacl = container_of(lgrps,
1371 struct se_lun_acl, ml_stat_grps);
1372 struct se_node_acl *nacl = lacl->se_lun_nacl;
1373 struct se_dev_entry *deve;
1374 ssize_t ret;
1375
1376 spin_lock_irq(&nacl->device_list_lock);
1377 deve = &nacl->device_list[lacl->mapped_lun];
1378 if (!deve->se_lun || !deve->se_lun_acl) {
1379 spin_unlock_irq(&nacl->device_list_lock);
1380 return -ENODEV;
1381 }
1382 /* scsiAuthIntrName */
1383 ret = snprintf(page, PAGE_SIZE, "%s\n", nacl->initiatorname);
1384 spin_unlock_irq(&nacl->device_list_lock);
1385 return ret;
1386}
1387DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(intr_name);
1388
1389static ssize_t target_stat_scsi_auth_intr_show_attr_map_indx(
1390 struct se_ml_stat_grps *lgrps, char *page)
1391{
1392 struct se_lun_acl *lacl = container_of(lgrps,
1393 struct se_lun_acl, ml_stat_grps);
1394 struct se_node_acl *nacl = lacl->se_lun_nacl;
1395 struct se_dev_entry *deve;
1396 ssize_t ret;
1397
1398 spin_lock_irq(&nacl->device_list_lock);
1399 deve = &nacl->device_list[lacl->mapped_lun];
1400 if (!deve->se_lun || !deve->se_lun_acl) {
1401 spin_unlock_irq(&nacl->device_list_lock);
1402 return -ENODEV;
1403 }
1404 /* FIXME: scsiAuthIntrLunMapIndex */
1405 ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
1406 spin_unlock_irq(&nacl->device_list_lock);
1407 return ret;
1408}
1409DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(map_indx);
1410
1411static ssize_t target_stat_scsi_auth_intr_show_attr_att_count(
1412 struct se_ml_stat_grps *lgrps, char *page)
1413{
1414 struct se_lun_acl *lacl = container_of(lgrps,
1415 struct se_lun_acl, ml_stat_grps);
1416 struct se_node_acl *nacl = lacl->se_lun_nacl;
1417 struct se_dev_entry *deve;
1418 ssize_t ret;
1419
1420 spin_lock_irq(&nacl->device_list_lock);
1421 deve = &nacl->device_list[lacl->mapped_lun];
1422 if (!deve->se_lun || !deve->se_lun_acl) {
1423 spin_unlock_irq(&nacl->device_list_lock);
1424 return -ENODEV;
1425 }
1426 /* scsiAuthIntrAttachedTimes */
1427 ret = snprintf(page, PAGE_SIZE, "%u\n", deve->attach_count);
1428 spin_unlock_irq(&nacl->device_list_lock);
1429 return ret;
1430}
1431DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(att_count);
1432
1433static ssize_t target_stat_scsi_auth_intr_show_attr_num_cmds(
1434 struct se_ml_stat_grps *lgrps, char *page)
1435{
1436 struct se_lun_acl *lacl = container_of(lgrps,
1437 struct se_lun_acl, ml_stat_grps);
1438 struct se_node_acl *nacl = lacl->se_lun_nacl;
1439 struct se_dev_entry *deve;
1440 ssize_t ret;
1441
1442 spin_lock_irq(&nacl->device_list_lock);
1443 deve = &nacl->device_list[lacl->mapped_lun];
1444 if (!deve->se_lun || !deve->se_lun_acl) {
1445 spin_unlock_irq(&nacl->device_list_lock);
1446 return -ENODEV;
1447 }
1448 /* scsiAuthIntrOutCommands */
1449 ret = snprintf(page, PAGE_SIZE, "%u\n", deve->total_cmds);
1450 spin_unlock_irq(&nacl->device_list_lock);
1451 return ret;
1452}
1453DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(num_cmds);
1454
1455static ssize_t target_stat_scsi_auth_intr_show_attr_read_mbytes(
1456 struct se_ml_stat_grps *lgrps, char *page)
1457{
1458 struct se_lun_acl *lacl = container_of(lgrps,
1459 struct se_lun_acl, ml_stat_grps);
1460 struct se_node_acl *nacl = lacl->se_lun_nacl;
1461 struct se_dev_entry *deve;
1462 ssize_t ret;
1463
1464 spin_lock_irq(&nacl->device_list_lock);
1465 deve = &nacl->device_list[lacl->mapped_lun];
1466 if (!deve->se_lun || !deve->se_lun_acl) {
1467 spin_unlock_irq(&nacl->device_list_lock);
1468 return -ENODEV;
1469 }
1470 /* scsiAuthIntrReadMegaBytes */
1471 ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(deve->read_bytes >> 20));
1472 spin_unlock_irq(&nacl->device_list_lock);
1473 return ret;
1474}
1475DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(read_mbytes);
1476
1477static ssize_t target_stat_scsi_auth_intr_show_attr_write_mbytes(
1478 struct se_ml_stat_grps *lgrps, char *page)
1479{
1480 struct se_lun_acl *lacl = container_of(lgrps,
1481 struct se_lun_acl, ml_stat_grps);
1482 struct se_node_acl *nacl = lacl->se_lun_nacl;
1483 struct se_dev_entry *deve;
1484 ssize_t ret;
1485
1486 spin_lock_irq(&nacl->device_list_lock);
1487 deve = &nacl->device_list[lacl->mapped_lun];
1488 if (!deve->se_lun || !deve->se_lun_acl) {
1489 spin_unlock_irq(&nacl->device_list_lock);
1490 return -ENODEV;
1491 }
1492 /* scsiAuthIntrWrittenMegaBytes */
1493 ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(deve->write_bytes >> 20));
1494 spin_unlock_irq(&nacl->device_list_lock);
1495 return ret;
1496}
1497DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(write_mbytes);
1498
1499static ssize_t target_stat_scsi_auth_intr_show_attr_hs_num_cmds(
1500 struct se_ml_stat_grps *lgrps, char *page)
1501{
1502 struct se_lun_acl *lacl = container_of(lgrps,
1503 struct se_lun_acl, ml_stat_grps);
1504 struct se_node_acl *nacl = lacl->se_lun_nacl;
1505 struct se_dev_entry *deve;
1506 ssize_t ret;
1507
1508 spin_lock_irq(&nacl->device_list_lock);
1509 deve = &nacl->device_list[lacl->mapped_lun];
1510 if (!deve->se_lun || !deve->se_lun_acl) {
1511 spin_unlock_irq(&nacl->device_list_lock);
1512 return -ENODEV;
1513 }
1514 /* FIXME: scsiAuthIntrHSOutCommands */
1515 ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
1516 spin_unlock_irq(&nacl->device_list_lock);
1517 return ret;
1518}
1519DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(hs_num_cmds);
1520
1521static ssize_t target_stat_scsi_auth_intr_show_attr_creation_time(
1522 struct se_ml_stat_grps *lgrps, char *page)
1523{
1524 struct se_lun_acl *lacl = container_of(lgrps,
1525 struct se_lun_acl, ml_stat_grps);
1526 struct se_node_acl *nacl = lacl->se_lun_nacl;
1527 struct se_dev_entry *deve;
1528 ssize_t ret;
1529
1530 spin_lock_irq(&nacl->device_list_lock);
1531 deve = &nacl->device_list[lacl->mapped_lun];
1532 if (!deve->se_lun || !deve->se_lun_acl) {
1533 spin_unlock_irq(&nacl->device_list_lock);
1534 return -ENODEV;
1535 }
1536 /* scsiAuthIntrLastCreation */
1537 ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(((u32)deve->creation_time -
1538 INITIAL_JIFFIES) * 100 / HZ));
1539 spin_unlock_irq(&nacl->device_list_lock);
1540 return ret;
1541}
1542DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(creation_time);
1543
1544static ssize_t target_stat_scsi_auth_intr_show_attr_row_status(
1545 struct se_ml_stat_grps *lgrps, char *page)
1546{
1547 struct se_lun_acl *lacl = container_of(lgrps,
1548 struct se_lun_acl, ml_stat_grps);
1549 struct se_node_acl *nacl = lacl->se_lun_nacl;
1550 struct se_dev_entry *deve;
1551 ssize_t ret;
1552
1553 spin_lock_irq(&nacl->device_list_lock);
1554 deve = &nacl->device_list[lacl->mapped_lun];
1555 if (!deve->se_lun || !deve->se_lun_acl) {
1556 spin_unlock_irq(&nacl->device_list_lock);
1557 return -ENODEV;
1558 }
1559 /* FIXME: scsiAuthIntrRowStatus */
1560 ret = snprintf(page, PAGE_SIZE, "Ready\n");
1561 spin_unlock_irq(&nacl->device_list_lock);
1562 return ret;
1563}
1564DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(row_status);
1565
1566CONFIGFS_EATTR_OPS(target_stat_scsi_auth_intr, se_ml_stat_grps,
1567 scsi_auth_intr_group);
1568
1569static struct configfs_attribute *target_stat_scsi_auth_intr_attrs[] = {
1570 &target_stat_scsi_auth_intr_inst.attr,
1571 &target_stat_scsi_auth_intr_dev.attr,
1572 &target_stat_scsi_auth_intr_port.attr,
1573 &target_stat_scsi_auth_intr_indx.attr,
1574 &target_stat_scsi_auth_intr_dev_or_port.attr,
1575 &target_stat_scsi_auth_intr_intr_name.attr,
1576 &target_stat_scsi_auth_intr_map_indx.attr,
1577 &target_stat_scsi_auth_intr_att_count.attr,
1578 &target_stat_scsi_auth_intr_num_cmds.attr,
1579 &target_stat_scsi_auth_intr_read_mbytes.attr,
1580 &target_stat_scsi_auth_intr_write_mbytes.attr,
1581 &target_stat_scsi_auth_intr_hs_num_cmds.attr,
1582 &target_stat_scsi_auth_intr_creation_time.attr,
1583 &target_stat_scsi_auth_intr_row_status.attr,
1584 NULL,
1585};
1586
1587static struct configfs_item_operations target_stat_scsi_auth_intr_attrib_ops = {
1588 .show_attribute = target_stat_scsi_auth_intr_attr_show,
1589 .store_attribute = target_stat_scsi_auth_intr_attr_store,
1590};
1591
1592static struct config_item_type target_stat_scsi_auth_intr_cit = {
1593 .ct_item_ops = &target_stat_scsi_auth_intr_attrib_ops,
1594 .ct_attrs = target_stat_scsi_auth_intr_attrs,
1595 .ct_owner = THIS_MODULE,
1596};
1597
1598/*
1599 * SCSI Attached Initiator Port Table
1600 */
1601
1602CONFIGFS_EATTR_STRUCT(target_stat_scsi_att_intr_port, se_ml_stat_grps);
1603#define DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR(_name, _mode) \
1604static struct target_stat_scsi_att_intr_port_attribute \
1605 target_stat_scsi_att_intr_port_##_name = \
1606 __CONFIGFS_EATTR(_name, _mode, \
1607 target_stat_scsi_att_intr_port_show_attr_##_name, \
1608 target_stat_scsi_att_intr_port_store_attr_##_name);
1609
1610#define DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(_name) \
1611static struct target_stat_scsi_att_intr_port_attribute \
1612 target_stat_scsi_att_intr_port_##_name = \
1613 __CONFIGFS_EATTR_RO(_name, \
1614 target_stat_scsi_att_intr_port_show_attr_##_name);
1615
1616static ssize_t target_stat_scsi_att_intr_port_show_attr_inst(
1617 struct se_ml_stat_grps *lgrps, char *page)
1618{
1619 struct se_lun_acl *lacl = container_of(lgrps,
1620 struct se_lun_acl, ml_stat_grps);
1621 struct se_node_acl *nacl = lacl->se_lun_nacl;
1622 struct se_dev_entry *deve;
1623 struct se_portal_group *tpg;
1624 ssize_t ret;
1625
1626 spin_lock_irq(&nacl->device_list_lock);
1627 deve = &nacl->device_list[lacl->mapped_lun];
1628 if (!deve->se_lun || !deve->se_lun_acl) {
1629 spin_unlock_irq(&nacl->device_list_lock);
1630 return -ENODEV;
1631 }
1632 tpg = nacl->se_tpg;
1633 /* scsiInstIndex */
1634 ret = snprintf(page, PAGE_SIZE, "%u\n",
1635 TPG_TFO(tpg)->tpg_get_inst_index(tpg));
1636 spin_unlock_irq(&nacl->device_list_lock);
1637 return ret;
1638}
1639DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(inst);
1640
1641static ssize_t target_stat_scsi_att_intr_port_show_attr_dev(
1642 struct se_ml_stat_grps *lgrps, char *page)
1643{
1644 struct se_lun_acl *lacl = container_of(lgrps,
1645 struct se_lun_acl, ml_stat_grps);
1646 struct se_node_acl *nacl = lacl->se_lun_nacl;
1647 struct se_dev_entry *deve;
1648 struct se_lun *lun;
1649 struct se_portal_group *tpg;
1650 ssize_t ret;
1651
1652 spin_lock_irq(&nacl->device_list_lock);
1653 deve = &nacl->device_list[lacl->mapped_lun];
1654 if (!deve->se_lun || !deve->se_lun_acl) {
1655 spin_unlock_irq(&nacl->device_list_lock);
1656 return -ENODEV;
1657 }
1658 tpg = nacl->se_tpg;
1659 lun = deve->se_lun;
1660 /* scsiDeviceIndex */
1661 ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_se_dev->dev_index);
1662 spin_unlock_irq(&nacl->device_list_lock);
1663 return ret;
1664}
1665DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(dev);
1666
1667static ssize_t target_stat_scsi_att_intr_port_show_attr_port(
1668 struct se_ml_stat_grps *lgrps, char *page)
1669{
1670 struct se_lun_acl *lacl = container_of(lgrps,
1671 struct se_lun_acl, ml_stat_grps);
1672 struct se_node_acl *nacl = lacl->se_lun_nacl;
1673 struct se_dev_entry *deve;
1674 struct se_portal_group *tpg;
1675 ssize_t ret;
1676
1677 spin_lock_irq(&nacl->device_list_lock);
1678 deve = &nacl->device_list[lacl->mapped_lun];
1679 if (!deve->se_lun || !deve->se_lun_acl) {
1680 spin_unlock_irq(&nacl->device_list_lock);
1681 return -ENODEV;
1682 }
1683 tpg = nacl->se_tpg;
1684 /* scsiPortIndex */
1685 ret = snprintf(page, PAGE_SIZE, "%u\n", TPG_TFO(tpg)->tpg_get_tag(tpg));
1686 spin_unlock_irq(&nacl->device_list_lock);
1687 return ret;
1688}
1689DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(port);
1690
1691static ssize_t target_stat_scsi_att_intr_port_show_attr_indx(
1692 struct se_ml_stat_grps *lgrps, char *page)
1693{
1694 struct se_lun_acl *lacl = container_of(lgrps,
1695 struct se_lun_acl, ml_stat_grps);
1696 struct se_node_acl *nacl = lacl->se_lun_nacl;
1697 struct se_session *se_sess;
1698 struct se_portal_group *tpg;
1699 ssize_t ret;
1700
1701 spin_lock_irq(&nacl->nacl_sess_lock);
1702 se_sess = nacl->nacl_sess;
1703 if (!se_sess) {
1704 spin_unlock_irq(&nacl->nacl_sess_lock);
1705 return -ENODEV;
1706 }
1707
1708 tpg = nacl->se_tpg;
1709 /* scsiAttIntrPortIndex */
1710 ret = snprintf(page, PAGE_SIZE, "%u\n",
1711 TPG_TFO(tpg)->sess_get_index(se_sess));
1712 spin_unlock_irq(&nacl->nacl_sess_lock);
1713 return ret;
1714}
1715DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(indx);
1716
1717static ssize_t target_stat_scsi_att_intr_port_show_attr_port_auth_indx(
1718 struct se_ml_stat_grps *lgrps, char *page)
1719{
1720 struct se_lun_acl *lacl = container_of(lgrps,
1721 struct se_lun_acl, ml_stat_grps);
1722 struct se_node_acl *nacl = lacl->se_lun_nacl;
1723 struct se_dev_entry *deve;
1724 ssize_t ret;
1725
1726 spin_lock_irq(&nacl->device_list_lock);
1727 deve = &nacl->device_list[lacl->mapped_lun];
1728 if (!deve->se_lun || !deve->se_lun_acl) {
1729 spin_unlock_irq(&nacl->device_list_lock);
1730 return -ENODEV;
1731 }
1732 /* scsiAttIntrPortAuthIntrIdx */
1733 ret = snprintf(page, PAGE_SIZE, "%u\n", nacl->acl_index);
1734 spin_unlock_irq(&nacl->device_list_lock);
1735 return ret;
1736}
1737DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(port_auth_indx);
1738
1739static ssize_t target_stat_scsi_att_intr_port_show_attr_port_ident(
1740 struct se_ml_stat_grps *lgrps, char *page)
1741{
1742 struct se_lun_acl *lacl = container_of(lgrps,
1743 struct se_lun_acl, ml_stat_grps);
1744 struct se_node_acl *nacl = lacl->se_lun_nacl;
1745 struct se_session *se_sess;
1746 struct se_portal_group *tpg;
1747 ssize_t ret;
1748 unsigned char buf[64];
1749
1750 spin_lock_irq(&nacl->nacl_sess_lock);
1751 se_sess = nacl->nacl_sess;
1752 if (!se_sess) {
1753 spin_unlock_irq(&nacl->nacl_sess_lock);
1754 return -ENODEV;
1755 }
1756
1757 tpg = nacl->se_tpg;
1758 /* scsiAttIntrPortName+scsiAttIntrPortIdentifier */
1759 memset(buf, 0, 64);
1760 if (TPG_TFO(tpg)->sess_get_initiator_sid != NULL)
1761 TPG_TFO(tpg)->sess_get_initiator_sid(se_sess,
1762 (unsigned char *)&buf[0], 64);
1763
1764 ret = snprintf(page, PAGE_SIZE, "%s+i+%s\n", nacl->initiatorname, buf);
1765 spin_unlock_irq(&nacl->nacl_sess_lock);
1766 return ret;
1767}
1768DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(port_ident);
1769
1770CONFIGFS_EATTR_OPS(target_stat_scsi_att_intr_port, se_ml_stat_grps,
1771 scsi_att_intr_port_group);
1772
1773static struct configfs_attribute *target_stat_scsi_ath_intr_port_attrs[] = {
1774 &target_stat_scsi_att_intr_port_inst.attr,
1775 &target_stat_scsi_att_intr_port_dev.attr,
1776 &target_stat_scsi_att_intr_port_port.attr,
1777 &target_stat_scsi_att_intr_port_indx.attr,
1778 &target_stat_scsi_att_intr_port_port_auth_indx.attr,
1779 &target_stat_scsi_att_intr_port_port_ident.attr,
1780 NULL,
1781};
1782
1783static struct configfs_item_operations target_stat_scsi_att_intr_port_attrib_ops = {
1784 .show_attribute = target_stat_scsi_att_intr_port_attr_show,
1785 .store_attribute = target_stat_scsi_att_intr_port_attr_store,
1786};
1787
1788static struct config_item_type target_stat_scsi_att_intr_port_cit = {
1789 .ct_item_ops = &target_stat_scsi_att_intr_port_attrib_ops,
1790 .ct_attrs = target_stat_scsi_ath_intr_port_attrs,
1791 .ct_owner = THIS_MODULE,
1792};
1793
1794/*
1795 * Called from target_core_fabric_configfs.c:target_fabric_make_mappedlun() to setup
1796 * the target MappedLUN statistics groups + configfs CITs located in target_core_stat.c
1797 */
1798void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *lacl)
1799{
1800 struct config_group *ml_stat_grp = &ML_STAT_GRPS(lacl)->stat_group;
1801
1802 config_group_init_type_name(&ML_STAT_GRPS(lacl)->scsi_auth_intr_group,
1803 "scsi_auth_intr", &target_stat_scsi_auth_intr_cit);
1804 config_group_init_type_name(&ML_STAT_GRPS(lacl)->scsi_att_intr_port_group,
1805 "scsi_att_intr_port", &target_stat_scsi_att_intr_port_cit);
1806
1807 ml_stat_grp->default_groups[0] = &ML_STAT_GRPS(lacl)->scsi_auth_intr_group;
1808 ml_stat_grp->default_groups[1] = &ML_STAT_GRPS(lacl)->scsi_att_intr_port_group;
1809 ml_stat_grp->default_groups[2] = NULL;
1810}
diff --git a/drivers/target/target_core_stat.h b/drivers/target/target_core_stat.h
new file mode 100644
index 000000000000..86c252f9ea47
--- /dev/null
+++ b/drivers/target/target_core_stat.h
@@ -0,0 +1,8 @@
1#ifndef TARGET_CORE_STAT_H
2#define TARGET_CORE_STAT_H
3
4extern void target_stat_setup_dev_default_groups(struct se_subsystem_dev *);
5extern void target_stat_setup_port_default_groups(struct se_lun *);
6extern void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *);
7
8#endif /*** TARGET_CORE_STAT_H ***/
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
new file mode 100644
index 000000000000..179063d81cdd
--- /dev/null
+++ b/drivers/target/target_core_tmr.c
@@ -0,0 +1,416 @@
1/*******************************************************************************
2 * Filename: target_core_tmr.c
3 *
4 * This file contains SPC-3 task management infrastructure
5 *
6 * Copyright (c) 2009,2010 Rising Tide Systems
7 * Copyright (c) 2009,2010 Linux-iSCSI.org
8 *
9 * Nicholas A. Bellinger <nab@kernel.org>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 *
25 ******************************************************************************/
26
27#include <linux/version.h>
28#include <linux/slab.h>
29#include <linux/spinlock.h>
30#include <linux/list.h>
31#include <scsi/scsi.h>
32#include <scsi/scsi_cmnd.h>
33
34#include <target/target_core_base.h>
35#include <target/target_core_device.h>
36#include <target/target_core_tmr.h>
37#include <target/target_core_transport.h>
38#include <target/target_core_fabric_ops.h>
39#include <target/target_core_configfs.h>
40
41#include "target_core_alua.h"
42#include "target_core_pr.h"
43
44#define DEBUG_LUN_RESET
45#ifdef DEBUG_LUN_RESET
46#define DEBUG_LR(x...) printk(KERN_INFO x)
47#else
48#define DEBUG_LR(x...)
49#endif
50
51struct se_tmr_req *core_tmr_alloc_req(
52 struct se_cmd *se_cmd,
53 void *fabric_tmr_ptr,
54 u8 function)
55{
56 struct se_tmr_req *tmr;
57
58 tmr = kmem_cache_zalloc(se_tmr_req_cache, (in_interrupt()) ?
59 GFP_ATOMIC : GFP_KERNEL);
60 if (!(tmr)) {
61 printk(KERN_ERR "Unable to allocate struct se_tmr_req\n");
62 return ERR_PTR(-ENOMEM);
63 }
64 tmr->task_cmd = se_cmd;
65 tmr->fabric_tmr_ptr = fabric_tmr_ptr;
66 tmr->function = function;
67 INIT_LIST_HEAD(&tmr->tmr_list);
68
69 return tmr;
70}
71EXPORT_SYMBOL(core_tmr_alloc_req);
72
73void core_tmr_release_req(
74 struct se_tmr_req *tmr)
75{
76 struct se_device *dev = tmr->tmr_dev;
77
78 if (!dev) {
79 kmem_cache_free(se_tmr_req_cache, tmr);
80 return;
81 }
82
83 spin_lock(&dev->se_tmr_lock);
84 list_del(&tmr->tmr_list);
85 spin_unlock(&dev->se_tmr_lock);
86
87 kmem_cache_free(se_tmr_req_cache, tmr);
88}
89
90static void core_tmr_handle_tas_abort(
91 struct se_node_acl *tmr_nacl,
92 struct se_cmd *cmd,
93 int tas,
94 int fe_count)
95{
96 if (!(fe_count)) {
97 transport_cmd_finish_abort(cmd, 1);
98 return;
99 }
100 /*
101 * TASK ABORTED status (TAS) bit support
102 */
103 if (((tmr_nacl != NULL) &&
104 (tmr_nacl == cmd->se_sess->se_node_acl)) || tas)
105 transport_send_task_abort(cmd);
106
107 transport_cmd_finish_abort(cmd, 0);
108}
109
110int core_tmr_lun_reset(
111 struct se_device *dev,
112 struct se_tmr_req *tmr,
113 struct list_head *preempt_and_abort_list,
114 struct se_cmd *prout_cmd)
115{
116 struct se_cmd *cmd;
117 struct se_queue_req *qr, *qr_tmp;
118 struct se_node_acl *tmr_nacl = NULL;
119 struct se_portal_group *tmr_tpg = NULL;
120 struct se_queue_obj *qobj = dev->dev_queue_obj;
121 struct se_tmr_req *tmr_p, *tmr_pp;
122 struct se_task *task, *task_tmp;
123 unsigned long flags;
124 int fe_count, state, tas;
125 /*
126 * TASK_ABORTED status bit, this is configurable via ConfigFS
127 * struct se_device attributes. spc4r17 section 7.4.6 Control mode page
128 *
129 * A task aborted status (TAS) bit set to zero specifies that aborted
130 * tasks shall be terminated by the device server without any response
131 * to the application client. A TAS bit set to one specifies that tasks
132 * aborted by the actions of an I_T nexus other than the I_T nexus on
133 * which the command was received shall be completed with TASK ABORTED
134 * status (see SAM-4).
135 */
136 tas = DEV_ATTRIB(dev)->emulate_tas;
137 /*
138 * Determine if this se_tmr is coming from a $FABRIC_MOD
139 * or struct se_device passthrough..
140 */
141 if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
142 tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
143 tmr_tpg = tmr->task_cmd->se_sess->se_tpg;
144 if (tmr_nacl && tmr_tpg) {
145 DEBUG_LR("LUN_RESET: TMR caller fabric: %s"
146 " initiator port %s\n",
147 TPG_TFO(tmr_tpg)->get_fabric_name(),
148 tmr_nacl->initiatorname);
149 }
150 }
151 DEBUG_LR("LUN_RESET: %s starting for [%s], tas: %d\n",
152 (preempt_and_abort_list) ? "Preempt" : "TMR",
153 TRANSPORT(dev)->name, tas);
154 /*
155 * Release all pending and outgoing TMRs aside from the received
156 * LUN_RESET tmr..
157 */
158 spin_lock(&dev->se_tmr_lock);
159 list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) {
160 /*
161 * Allow the received TMR to return with FUNCTION_COMPLETE.
162 */
163 if (tmr && (tmr_p == tmr))
164 continue;
165
166 cmd = tmr_p->task_cmd;
167 if (!(cmd)) {
168 printk(KERN_ERR "Unable to locate struct se_cmd for TMR\n");
169 continue;
170 }
171 /*
172 * If this function was called with a valid pr_res_key
173 * parameter (eg: for PROUT PREEMPT_AND_ABORT service action
174 * skip non regisration key matching TMRs.
175 */
176 if ((preempt_and_abort_list != NULL) &&
177 (core_scsi3_check_cdb_abort_and_preempt(
178 preempt_and_abort_list, cmd) != 0))
179 continue;
180 spin_unlock(&dev->se_tmr_lock);
181
182 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
183 if (!(atomic_read(&T_TASK(cmd)->t_transport_active))) {
184 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
185 spin_lock(&dev->se_tmr_lock);
186 continue;
187 }
188 if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
189 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
190 spin_lock(&dev->se_tmr_lock);
191 continue;
192 }
193 DEBUG_LR("LUN_RESET: %s releasing TMR %p Function: 0x%02x,"
194 " Response: 0x%02x, t_state: %d\n",
195 (preempt_and_abort_list) ? "Preempt" : "", tmr_p,
196 tmr_p->function, tmr_p->response, cmd->t_state);
197 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
198
199 transport_cmd_finish_abort_tmr(cmd);
200 spin_lock(&dev->se_tmr_lock);
201 }
202 spin_unlock(&dev->se_tmr_lock);
203 /*
204 * Complete outstanding struct se_task CDBs with TASK_ABORTED SAM status.
205 * This is following sam4r17, section 5.6 Aborting commands, Table 38
206 * for TMR LUN_RESET:
207 *
208 * a) "Yes" indicates that each command that is aborted on an I_T nexus
209 * other than the one that caused the SCSI device condition is
210 * completed with TASK ABORTED status, if the TAS bit is set to one in
211 * the Control mode page (see SPC-4). "No" indicates that no status is
212 * returned for aborted commands.
213 *
214 * d) If the logical unit reset is caused by a particular I_T nexus
215 * (e.g., by a LOGICAL UNIT RESET task management function), then "yes"
216 * (TASK_ABORTED status) applies.
217 *
218 * Otherwise (e.g., if triggered by a hard reset), "no"
219 * (no TASK_ABORTED SAM status) applies.
220 *
221 * Note that this seems to be independent of TAS (Task Aborted Status)
222 * in the Control Mode Page.
223 */
224 spin_lock_irqsave(&dev->execute_task_lock, flags);
225 list_for_each_entry_safe(task, task_tmp, &dev->state_task_list,
226 t_state_list) {
227 if (!(TASK_CMD(task))) {
228 printk(KERN_ERR "TASK_CMD(task) is NULL!\n");
229 continue;
230 }
231 cmd = TASK_CMD(task);
232
233 if (!T_TASK(cmd)) {
234 printk(KERN_ERR "T_TASK(cmd) is NULL for task: %p cmd:"
235 " %p ITT: 0x%08x\n", task, cmd,
236 CMD_TFO(cmd)->get_task_tag(cmd));
237 continue;
238 }
239 /*
240 * For PREEMPT_AND_ABORT usage, only process commands
241 * with a matching reservation key.
242 */
243 if ((preempt_and_abort_list != NULL) &&
244 (core_scsi3_check_cdb_abort_and_preempt(
245 preempt_and_abort_list, cmd) != 0))
246 continue;
247 /*
248 * Not aborting PROUT PREEMPT_AND_ABORT CDB..
249 */
250 if (prout_cmd == cmd)
251 continue;
252
253 list_del(&task->t_state_list);
254 atomic_set(&task->task_state_active, 0);
255 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
256
257 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
258 DEBUG_LR("LUN_RESET: %s cmd: %p task: %p"
259 " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/"
260 "def_t_state: %d/%d cdb: 0x%02x\n",
261 (preempt_and_abort_list) ? "Preempt" : "", cmd, task,
262 CMD_TFO(cmd)->get_task_tag(cmd), 0,
263 CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state,
264 cmd->deferred_t_state, T_TASK(cmd)->t_task_cdb[0]);
265 DEBUG_LR("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx"
266 " t_task_cdbs: %d t_task_cdbs_left: %d"
267 " t_task_cdbs_sent: %d -- t_transport_active: %d"
268 " t_transport_stop: %d t_transport_sent: %d\n",
269 CMD_TFO(cmd)->get_task_tag(cmd), cmd->pr_res_key,
270 T_TASK(cmd)->t_task_cdbs,
271 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
272 atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
273 atomic_read(&T_TASK(cmd)->t_transport_active),
274 atomic_read(&T_TASK(cmd)->t_transport_stop),
275 atomic_read(&T_TASK(cmd)->t_transport_sent));
276
277 if (atomic_read(&task->task_active)) {
278 atomic_set(&task->task_stop, 1);
279 spin_unlock_irqrestore(
280 &T_TASK(cmd)->t_state_lock, flags);
281
282 DEBUG_LR("LUN_RESET: Waiting for task: %p to shutdown"
283 " for dev: %p\n", task, dev);
284 wait_for_completion(&task->task_stop_comp);
285 DEBUG_LR("LUN_RESET Completed task: %p shutdown for"
286 " dev: %p\n", task, dev);
287 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
288 atomic_dec(&T_TASK(cmd)->t_task_cdbs_left);
289
290 atomic_set(&task->task_active, 0);
291 atomic_set(&task->task_stop, 0);
292 } else {
293 if (atomic_read(&task->task_execute_queue) != 0)
294 transport_remove_task_from_execute_queue(task, dev);
295 }
296 __transport_stop_task_timer(task, &flags);
297
298 if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_ex_left))) {
299 spin_unlock_irqrestore(
300 &T_TASK(cmd)->t_state_lock, flags);
301 DEBUG_LR("LUN_RESET: Skipping task: %p, dev: %p for"
302 " t_task_cdbs_ex_left: %d\n", task, dev,
303 atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left));
304
305 spin_lock_irqsave(&dev->execute_task_lock, flags);
306 continue;
307 }
308 fe_count = atomic_read(&T_TASK(cmd)->t_fe_count);
309
310 if (atomic_read(&T_TASK(cmd)->t_transport_active)) {
311 DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
312 " task: %p, t_fe_count: %d dev: %p\n", task,
313 fe_count, dev);
314 atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
315 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
316 flags);
317 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
318
319 spin_lock_irqsave(&dev->execute_task_lock, flags);
320 continue;
321 }
322 DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
323 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
324 atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
325 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
326 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
327
328 spin_lock_irqsave(&dev->execute_task_lock, flags);
329 }
330 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
331 /*
332 * Release all commands remaining in the struct se_device cmd queue.
333 *
334 * This follows the same logic as above for the struct se_device
335 * struct se_task state list, where commands are returned with
336 * TASK_ABORTED status, if there is an outstanding $FABRIC_MOD
337 * reference, otherwise the struct se_cmd is released.
338 */
339 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
340 list_for_each_entry_safe(qr, qr_tmp, &qobj->qobj_list, qr_list) {
341 cmd = (struct se_cmd *)qr->cmd;
342 if (!(cmd)) {
343 /*
344 * Skip these for non PREEMPT_AND_ABORT usage..
345 */
346 if (preempt_and_abort_list != NULL)
347 continue;
348
349 atomic_dec(&qobj->queue_cnt);
350 list_del(&qr->qr_list);
351 kfree(qr);
352 continue;
353 }
354 /*
355 * For PREEMPT_AND_ABORT usage, only process commands
356 * with a matching reservation key.
357 */
358 if ((preempt_and_abort_list != NULL) &&
359 (core_scsi3_check_cdb_abort_and_preempt(
360 preempt_and_abort_list, cmd) != 0))
361 continue;
362 /*
363 * Not aborting PROUT PREEMPT_AND_ABORT CDB..
364 */
365 if (prout_cmd == cmd)
366 continue;
367
368 atomic_dec(&T_TASK(cmd)->t_transport_queue_active);
369 atomic_dec(&qobj->queue_cnt);
370 list_del(&qr->qr_list);
371 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
372
373 state = qr->state;
374 kfree(qr);
375
376 DEBUG_LR("LUN_RESET: %s from Device Queue: cmd: %p t_state:"
377 " %d t_fe_count: %d\n", (preempt_and_abort_list) ?
378 "Preempt" : "", cmd, state,
379 atomic_read(&T_TASK(cmd)->t_fe_count));
380 /*
381 * Signal that the command has failed via cmd->se_cmd_flags,
382 * and call TFO->new_cmd_failure() to wakeup any fabric
383 * dependent code used to wait for unsolicited data out
384 * allocation to complete. The fabric module is expected
385 * to dump any remaining unsolicited data out for the aborted
386 * command at this point.
387 */
388 transport_new_cmd_failure(cmd);
389
390 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas,
391 atomic_read(&T_TASK(cmd)->t_fe_count));
392 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
393 }
394 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
395 /*
396 * Clear any legacy SPC-2 reservation when called during
397 * LOGICAL UNIT RESET
398 */
399 if (!(preempt_and_abort_list) &&
400 (dev->dev_flags & DF_SPC2_RESERVATIONS)) {
401 spin_lock(&dev->dev_reservation_lock);
402 dev->dev_reserved_node_acl = NULL;
403 dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
404 spin_unlock(&dev->dev_reservation_lock);
405 printk(KERN_INFO "LUN_RESET: SCSI-2 Released reservation\n");
406 }
407
408 spin_lock_irq(&dev->stats_lock);
409 dev->num_resets++;
410 spin_unlock_irq(&dev->stats_lock);
411
412 DEBUG_LR("LUN_RESET: %s for [%s] Complete\n",
413 (preempt_and_abort_list) ? "Preempt" : "TMR",
414 TRANSPORT(dev)->name);
415 return 0;
416}
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
new file mode 100644
index 000000000000..5ec745fed931
--- /dev/null
+++ b/drivers/target/target_core_tpg.c
@@ -0,0 +1,838 @@
1/*******************************************************************************
2 * Filename: target_core_tpg.c
3 *
4 * This file contains generic Target Portal Group related functions.
5 *
6 * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
7 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8 * Copyright (c) 2007-2010 Rising Tide Systems
9 * Copyright (c) 2008-2010 Linux-iSCSI.org
10 *
11 * Nicholas A. Bellinger <nab@kernel.org>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 *
27 ******************************************************************************/
28
29#include <linux/net.h>
30#include <linux/string.h>
31#include <linux/timer.h>
32#include <linux/slab.h>
33#include <linux/spinlock.h>
34#include <linux/in.h>
35#include <net/sock.h>
36#include <net/tcp.h>
37#include <scsi/scsi.h>
38#include <scsi/scsi_cmnd.h>
39
40#include <target/target_core_base.h>
41#include <target/target_core_device.h>
42#include <target/target_core_tpg.h>
43#include <target/target_core_transport.h>
44#include <target/target_core_fabric_ops.h>
45
46#include "target_core_hba.h"
47
48/* core_clear_initiator_node_from_tpg():
49 *
50 *
51 */
52static void core_clear_initiator_node_from_tpg(
53 struct se_node_acl *nacl,
54 struct se_portal_group *tpg)
55{
56 int i;
57 struct se_dev_entry *deve;
58 struct se_lun *lun;
59 struct se_lun_acl *acl, *acl_tmp;
60
61 spin_lock_irq(&nacl->device_list_lock);
62 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
63 deve = &nacl->device_list[i];
64
65 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
66 continue;
67
68 if (!deve->se_lun) {
69 printk(KERN_ERR "%s device entries device pointer is"
70 " NULL, but Initiator has access.\n",
71 TPG_TFO(tpg)->get_fabric_name());
72 continue;
73 }
74
75 lun = deve->se_lun;
76 spin_unlock_irq(&nacl->device_list_lock);
77 core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
78 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
79
80 spin_lock(&lun->lun_acl_lock);
81 list_for_each_entry_safe(acl, acl_tmp,
82 &lun->lun_acl_list, lacl_list) {
83 if (!(strcmp(acl->initiatorname,
84 nacl->initiatorname)) &&
85 (acl->mapped_lun == deve->mapped_lun))
86 break;
87 }
88
89 if (!acl) {
90 printk(KERN_ERR "Unable to locate struct se_lun_acl for %s,"
91 " mapped_lun: %u\n", nacl->initiatorname,
92 deve->mapped_lun);
93 spin_unlock(&lun->lun_acl_lock);
94 spin_lock_irq(&nacl->device_list_lock);
95 continue;
96 }
97
98 list_del(&acl->lacl_list);
99 spin_unlock(&lun->lun_acl_lock);
100
101 spin_lock_irq(&nacl->device_list_lock);
102 kfree(acl);
103 }
104 spin_unlock_irq(&nacl->device_list_lock);
105}
106
107/* __core_tpg_get_initiator_node_acl():
108 *
109 * spin_lock_bh(&tpg->acl_node_lock); must be held when calling
110 */
111struct se_node_acl *__core_tpg_get_initiator_node_acl(
112 struct se_portal_group *tpg,
113 const char *initiatorname)
114{
115 struct se_node_acl *acl;
116
117 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
118 if (!(strcmp(acl->initiatorname, initiatorname)))
119 return acl;
120 }
121
122 return NULL;
123}
124
125/* core_tpg_get_initiator_node_acl():
126 *
127 *
128 */
129struct se_node_acl *core_tpg_get_initiator_node_acl(
130 struct se_portal_group *tpg,
131 unsigned char *initiatorname)
132{
133 struct se_node_acl *acl;
134
135 spin_lock_bh(&tpg->acl_node_lock);
136 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
137 if (!(strcmp(acl->initiatorname, initiatorname)) &&
138 (!(acl->dynamic_node_acl))) {
139 spin_unlock_bh(&tpg->acl_node_lock);
140 return acl;
141 }
142 }
143 spin_unlock_bh(&tpg->acl_node_lock);
144
145 return NULL;
146}
147
148/* core_tpg_add_node_to_devs():
149 *
150 *
151 */
152void core_tpg_add_node_to_devs(
153 struct se_node_acl *acl,
154 struct se_portal_group *tpg)
155{
156 int i = 0;
157 u32 lun_access = 0;
158 struct se_lun *lun;
159 struct se_device *dev;
160
161 spin_lock(&tpg->tpg_lun_lock);
162 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
163 lun = &tpg->tpg_lun_list[i];
164 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
165 continue;
166
167 spin_unlock(&tpg->tpg_lun_lock);
168
169 dev = lun->lun_se_dev;
170 /*
171 * By default in LIO-Target $FABRIC_MOD,
172 * demo_mode_write_protect is ON, or READ_ONLY;
173 */
174 if (!(TPG_TFO(tpg)->tpg_check_demo_mode_write_protect(tpg))) {
175 if (dev->dev_flags & DF_READ_ONLY)
176 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
177 else
178 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
179 } else {
180 /*
181 * Allow only optical drives to issue R/W in default RO
182 * demo mode.
183 */
184 if (TRANSPORT(dev)->get_device_type(dev) == TYPE_DISK)
185 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
186 else
187 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
188 }
189
190 printk(KERN_INFO "TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
191 " access for LUN in Demo Mode\n",
192 TPG_TFO(tpg)->get_fabric_name(),
193 TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun,
194 (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
195 "READ-WRITE" : "READ-ONLY");
196
197 core_update_device_list_for_node(lun, NULL, lun->unpacked_lun,
198 lun_access, acl, tpg, 1);
199 spin_lock(&tpg->tpg_lun_lock);
200 }
201 spin_unlock(&tpg->tpg_lun_lock);
202}
203
204/* core_set_queue_depth_for_node():
205 *
206 *
207 */
208static int core_set_queue_depth_for_node(
209 struct se_portal_group *tpg,
210 struct se_node_acl *acl)
211{
212 if (!acl->queue_depth) {
213 printk(KERN_ERR "Queue depth for %s Initiator Node: %s is 0,"
214 "defaulting to 1.\n", TPG_TFO(tpg)->get_fabric_name(),
215 acl->initiatorname);
216 acl->queue_depth = 1;
217 }
218
219 return 0;
220}
221
222/* core_create_device_list_for_node():
223 *
224 *
225 */
226static int core_create_device_list_for_node(struct se_node_acl *nacl)
227{
228 struct se_dev_entry *deve;
229 int i;
230
231 nacl->device_list = kzalloc(sizeof(struct se_dev_entry) *
232 TRANSPORT_MAX_LUNS_PER_TPG, GFP_KERNEL);
233 if (!(nacl->device_list)) {
234 printk(KERN_ERR "Unable to allocate memory for"
235 " struct se_node_acl->device_list\n");
236 return -1;
237 }
238 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
239 deve = &nacl->device_list[i];
240
241 atomic_set(&deve->ua_count, 0);
242 atomic_set(&deve->pr_ref_count, 0);
243 spin_lock_init(&deve->ua_lock);
244 INIT_LIST_HEAD(&deve->alua_port_list);
245 INIT_LIST_HEAD(&deve->ua_list);
246 }
247
248 return 0;
249}
250
251/* core_tpg_check_initiator_node_acl()
252 *
253 *
254 */
255struct se_node_acl *core_tpg_check_initiator_node_acl(
256 struct se_portal_group *tpg,
257 unsigned char *initiatorname)
258{
259 struct se_node_acl *acl;
260
261 acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
262 if ((acl))
263 return acl;
264
265 if (!(TPG_TFO(tpg)->tpg_check_demo_mode(tpg)))
266 return NULL;
267
268 acl = TPG_TFO(tpg)->tpg_alloc_fabric_acl(tpg);
269 if (!(acl))
270 return NULL;
271
272 INIT_LIST_HEAD(&acl->acl_list);
273 INIT_LIST_HEAD(&acl->acl_sess_list);
274 spin_lock_init(&acl->device_list_lock);
275 spin_lock_init(&acl->nacl_sess_lock);
276 atomic_set(&acl->acl_pr_ref_count, 0);
277 acl->queue_depth = TPG_TFO(tpg)->tpg_get_default_depth(tpg);
278 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
279 acl->se_tpg = tpg;
280 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
281 spin_lock_init(&acl->stats_lock);
282 acl->dynamic_node_acl = 1;
283
284 TPG_TFO(tpg)->set_default_node_attributes(acl);
285
286 if (core_create_device_list_for_node(acl) < 0) {
287 TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
288 return NULL;
289 }
290
291 if (core_set_queue_depth_for_node(tpg, acl) < 0) {
292 core_free_device_list_for_node(acl, tpg);
293 TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
294 return NULL;
295 }
296
297 core_tpg_add_node_to_devs(acl, tpg);
298
299 spin_lock_bh(&tpg->acl_node_lock);
300 list_add_tail(&acl->acl_list, &tpg->acl_node_list);
301 tpg->num_node_acls++;
302 spin_unlock_bh(&tpg->acl_node_lock);
303
304 printk("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
305 " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
306 TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
307 TPG_TFO(tpg)->get_fabric_name(), initiatorname);
308
309 return acl;
310}
311EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
312
313void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
314{
315 while (atomic_read(&nacl->acl_pr_ref_count) != 0)
316 cpu_relax();
317}
318
319void core_tpg_clear_object_luns(struct se_portal_group *tpg)
320{
321 int i, ret;
322 struct se_lun *lun;
323
324 spin_lock(&tpg->tpg_lun_lock);
325 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
326 lun = &tpg->tpg_lun_list[i];
327
328 if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) ||
329 (lun->lun_se_dev == NULL))
330 continue;
331
332 spin_unlock(&tpg->tpg_lun_lock);
333 ret = core_dev_del_lun(tpg, lun->unpacked_lun);
334 spin_lock(&tpg->tpg_lun_lock);
335 }
336 spin_unlock(&tpg->tpg_lun_lock);
337}
338EXPORT_SYMBOL(core_tpg_clear_object_luns);
339
340/* core_tpg_add_initiator_node_acl():
341 *
342 *
343 */
344struct se_node_acl *core_tpg_add_initiator_node_acl(
345 struct se_portal_group *tpg,
346 struct se_node_acl *se_nacl,
347 const char *initiatorname,
348 u32 queue_depth)
349{
350 struct se_node_acl *acl = NULL;
351
352 spin_lock_bh(&tpg->acl_node_lock);
353 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
354 if ((acl)) {
355 if (acl->dynamic_node_acl) {
356 acl->dynamic_node_acl = 0;
357 printk(KERN_INFO "%s_TPG[%u] - Replacing dynamic ACL"
358 " for %s\n", TPG_TFO(tpg)->get_fabric_name(),
359 TPG_TFO(tpg)->tpg_get_tag(tpg), initiatorname);
360 spin_unlock_bh(&tpg->acl_node_lock);
361 /*
362 * Release the locally allocated struct se_node_acl
363 * because * core_tpg_add_initiator_node_acl() returned
364 * a pointer to an existing demo mode node ACL.
365 */
366 if (se_nacl)
367 TPG_TFO(tpg)->tpg_release_fabric_acl(tpg,
368 se_nacl);
369 goto done;
370 }
371
372 printk(KERN_ERR "ACL entry for %s Initiator"
373 " Node %s already exists for TPG %u, ignoring"
374 " request.\n", TPG_TFO(tpg)->get_fabric_name(),
375 initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg));
376 spin_unlock_bh(&tpg->acl_node_lock);
377 return ERR_PTR(-EEXIST);
378 }
379 spin_unlock_bh(&tpg->acl_node_lock);
380
381 if (!(se_nacl)) {
382 printk("struct se_node_acl pointer is NULL\n");
383 return ERR_PTR(-EINVAL);
384 }
385 /*
386 * For v4.x logic the se_node_acl_s is hanging off a fabric
387 * dependent structure allocated via
388 * struct target_core_fabric_ops->fabric_make_nodeacl()
389 */
390 acl = se_nacl;
391
392 INIT_LIST_HEAD(&acl->acl_list);
393 INIT_LIST_HEAD(&acl->acl_sess_list);
394 spin_lock_init(&acl->device_list_lock);
395 spin_lock_init(&acl->nacl_sess_lock);
396 atomic_set(&acl->acl_pr_ref_count, 0);
397 acl->queue_depth = queue_depth;
398 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
399 acl->se_tpg = tpg;
400 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
401 spin_lock_init(&acl->stats_lock);
402
403 TPG_TFO(tpg)->set_default_node_attributes(acl);
404
405 if (core_create_device_list_for_node(acl) < 0) {
406 TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
407 return ERR_PTR(-ENOMEM);
408 }
409
410 if (core_set_queue_depth_for_node(tpg, acl) < 0) {
411 core_free_device_list_for_node(acl, tpg);
412 TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
413 return ERR_PTR(-EINVAL);
414 }
415
416 spin_lock_bh(&tpg->acl_node_lock);
417 list_add_tail(&acl->acl_list, &tpg->acl_node_list);
418 tpg->num_node_acls++;
419 spin_unlock_bh(&tpg->acl_node_lock);
420
421done:
422 printk(KERN_INFO "%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
423 " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
424 TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
425 TPG_TFO(tpg)->get_fabric_name(), initiatorname);
426
427 return acl;
428}
429EXPORT_SYMBOL(core_tpg_add_initiator_node_acl);
430
431/* core_tpg_del_initiator_node_acl():
432 *
433 *
434 */
435int core_tpg_del_initiator_node_acl(
436 struct se_portal_group *tpg,
437 struct se_node_acl *acl,
438 int force)
439{
440 struct se_session *sess, *sess_tmp;
441 int dynamic_acl = 0;
442
443 spin_lock_bh(&tpg->acl_node_lock);
444 if (acl->dynamic_node_acl) {
445 acl->dynamic_node_acl = 0;
446 dynamic_acl = 1;
447 }
448 list_del(&acl->acl_list);
449 tpg->num_node_acls--;
450 spin_unlock_bh(&tpg->acl_node_lock);
451
452 spin_lock_bh(&tpg->session_lock);
453 list_for_each_entry_safe(sess, sess_tmp,
454 &tpg->tpg_sess_list, sess_list) {
455 if (sess->se_node_acl != acl)
456 continue;
457 /*
458 * Determine if the session needs to be closed by our context.
459 */
460 if (!(TPG_TFO(tpg)->shutdown_session(sess)))
461 continue;
462
463 spin_unlock_bh(&tpg->session_lock);
464 /*
465 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
466 * forcefully shutdown the $FABRIC_MOD session/nexus.
467 */
468 TPG_TFO(tpg)->close_session(sess);
469
470 spin_lock_bh(&tpg->session_lock);
471 }
472 spin_unlock_bh(&tpg->session_lock);
473
474 core_tpg_wait_for_nacl_pr_ref(acl);
475 core_clear_initiator_node_from_tpg(acl, tpg);
476 core_free_device_list_for_node(acl, tpg);
477
478 printk(KERN_INFO "%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
479 " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
480 TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
481 TPG_TFO(tpg)->get_fabric_name(), acl->initiatorname);
482
483 return 0;
484}
485EXPORT_SYMBOL(core_tpg_del_initiator_node_acl);
486
487/* core_tpg_set_initiator_node_queue_depth():
488 *
489 *
490 */
491int core_tpg_set_initiator_node_queue_depth(
492 struct se_portal_group *tpg,
493 unsigned char *initiatorname,
494 u32 queue_depth,
495 int force)
496{
497 struct se_session *sess, *init_sess = NULL;
498 struct se_node_acl *acl;
499 int dynamic_acl = 0;
500
501 spin_lock_bh(&tpg->acl_node_lock);
502 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
503 if (!(acl)) {
504 printk(KERN_ERR "Access Control List entry for %s Initiator"
505 " Node %s does not exists for TPG %hu, ignoring"
506 " request.\n", TPG_TFO(tpg)->get_fabric_name(),
507 initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg));
508 spin_unlock_bh(&tpg->acl_node_lock);
509 return -ENODEV;
510 }
511 if (acl->dynamic_node_acl) {
512 acl->dynamic_node_acl = 0;
513 dynamic_acl = 1;
514 }
515 spin_unlock_bh(&tpg->acl_node_lock);
516
517 spin_lock_bh(&tpg->session_lock);
518 list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
519 if (sess->se_node_acl != acl)
520 continue;
521
522 if (!force) {
523 printk(KERN_ERR "Unable to change queue depth for %s"
524 " Initiator Node: %s while session is"
525 " operational. To forcefully change the queue"
526 " depth and force session reinstatement"
527 " use the \"force=1\" parameter.\n",
528 TPG_TFO(tpg)->get_fabric_name(), initiatorname);
529 spin_unlock_bh(&tpg->session_lock);
530
531 spin_lock_bh(&tpg->acl_node_lock);
532 if (dynamic_acl)
533 acl->dynamic_node_acl = 1;
534 spin_unlock_bh(&tpg->acl_node_lock);
535 return -EEXIST;
536 }
537 /*
538 * Determine if the session needs to be closed by our context.
539 */
540 if (!(TPG_TFO(tpg)->shutdown_session(sess)))
541 continue;
542
543 init_sess = sess;
544 break;
545 }
546
547 /*
548 * User has requested to change the queue depth for a Initiator Node.
549 * Change the value in the Node's struct se_node_acl, and call
550 * core_set_queue_depth_for_node() to add the requested queue depth.
551 *
552 * Finally call TPG_TFO(tpg)->close_session() to force session
553 * reinstatement to occur if there is an active session for the
554 * $FABRIC_MOD Initiator Node in question.
555 */
556 acl->queue_depth = queue_depth;
557
558 if (core_set_queue_depth_for_node(tpg, acl) < 0) {
559 spin_unlock_bh(&tpg->session_lock);
560 /*
561 * Force session reinstatement if
562 * core_set_queue_depth_for_node() failed, because we assume
563 * the $FABRIC_MOD has already the set session reinstatement
564 * bit from TPG_TFO(tpg)->shutdown_session() called above.
565 */
566 if (init_sess)
567 TPG_TFO(tpg)->close_session(init_sess);
568
569 spin_lock_bh(&tpg->acl_node_lock);
570 if (dynamic_acl)
571 acl->dynamic_node_acl = 1;
572 spin_unlock_bh(&tpg->acl_node_lock);
573 return -EINVAL;
574 }
575 spin_unlock_bh(&tpg->session_lock);
576 /*
577 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
578 * forcefully shutdown the $FABRIC_MOD session/nexus.
579 */
580 if (init_sess)
581 TPG_TFO(tpg)->close_session(init_sess);
582
583 printk(KERN_INFO "Successfuly changed queue depth to: %d for Initiator"
584 " Node: %s on %s Target Portal Group: %u\n", queue_depth,
585 initiatorname, TPG_TFO(tpg)->get_fabric_name(),
586 TPG_TFO(tpg)->tpg_get_tag(tpg));
587
588 spin_lock_bh(&tpg->acl_node_lock);
589 if (dynamic_acl)
590 acl->dynamic_node_acl = 1;
591 spin_unlock_bh(&tpg->acl_node_lock);
592
593 return 0;
594}
595EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
596
597static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
598{
599 /* Set in core_dev_setup_virtual_lun0() */
600 struct se_device *dev = se_global->g_lun0_dev;
601 struct se_lun *lun = &se_tpg->tpg_virt_lun0;
602 u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
603 int ret;
604
605 lun->unpacked_lun = 0;
606 lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
607 atomic_set(&lun->lun_acl_count, 0);
608 init_completion(&lun->lun_shutdown_comp);
609 INIT_LIST_HEAD(&lun->lun_acl_list);
610 INIT_LIST_HEAD(&lun->lun_cmd_list);
611 spin_lock_init(&lun->lun_acl_lock);
612 spin_lock_init(&lun->lun_cmd_lock);
613 spin_lock_init(&lun->lun_sep_lock);
614
615 ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
616 if (ret < 0)
617 return -1;
618
619 return 0;
620}
621
622static void core_tpg_release_virtual_lun0(struct se_portal_group *se_tpg)
623{
624 struct se_lun *lun = &se_tpg->tpg_virt_lun0;
625
626 core_tpg_post_dellun(se_tpg, lun);
627}
628
629int core_tpg_register(
630 struct target_core_fabric_ops *tfo,
631 struct se_wwn *se_wwn,
632 struct se_portal_group *se_tpg,
633 void *tpg_fabric_ptr,
634 int se_tpg_type)
635{
636 struct se_lun *lun;
637 u32 i;
638
639 se_tpg->tpg_lun_list = kzalloc((sizeof(struct se_lun) *
640 TRANSPORT_MAX_LUNS_PER_TPG), GFP_KERNEL);
641 if (!(se_tpg->tpg_lun_list)) {
642 printk(KERN_ERR "Unable to allocate struct se_portal_group->"
643 "tpg_lun_list\n");
644 return -ENOMEM;
645 }
646
647 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
648 lun = &se_tpg->tpg_lun_list[i];
649 lun->unpacked_lun = i;
650 lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
651 atomic_set(&lun->lun_acl_count, 0);
652 init_completion(&lun->lun_shutdown_comp);
653 INIT_LIST_HEAD(&lun->lun_acl_list);
654 INIT_LIST_HEAD(&lun->lun_cmd_list);
655 spin_lock_init(&lun->lun_acl_lock);
656 spin_lock_init(&lun->lun_cmd_lock);
657 spin_lock_init(&lun->lun_sep_lock);
658 }
659
660 se_tpg->se_tpg_type = se_tpg_type;
661 se_tpg->se_tpg_fabric_ptr = tpg_fabric_ptr;
662 se_tpg->se_tpg_tfo = tfo;
663 se_tpg->se_tpg_wwn = se_wwn;
664 atomic_set(&se_tpg->tpg_pr_ref_count, 0);
665 INIT_LIST_HEAD(&se_tpg->acl_node_list);
666 INIT_LIST_HEAD(&se_tpg->se_tpg_list);
667 INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
668 spin_lock_init(&se_tpg->acl_node_lock);
669 spin_lock_init(&se_tpg->session_lock);
670 spin_lock_init(&se_tpg->tpg_lun_lock);
671
672 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) {
673 if (core_tpg_setup_virtual_lun0(se_tpg) < 0) {
674 kfree(se_tpg);
675 return -ENOMEM;
676 }
677 }
678
679 spin_lock_bh(&se_global->se_tpg_lock);
680 list_add_tail(&se_tpg->se_tpg_list, &se_global->g_se_tpg_list);
681 spin_unlock_bh(&se_global->se_tpg_lock);
682
683 printk(KERN_INFO "TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
684 " endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(),
685 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
686 "Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ?
687 "None" : tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg));
688
689 return 0;
690}
691EXPORT_SYMBOL(core_tpg_register);
692
693int core_tpg_deregister(struct se_portal_group *se_tpg)
694{
695 struct se_node_acl *nacl, *nacl_tmp;
696
697 printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
698 " for endpoint: %s Portal Tag %u\n",
699 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
700 "Normal" : "Discovery", TPG_TFO(se_tpg)->get_fabric_name(),
701 TPG_TFO(se_tpg)->tpg_get_wwn(se_tpg),
702 TPG_TFO(se_tpg)->tpg_get_tag(se_tpg));
703
704 spin_lock_bh(&se_global->se_tpg_lock);
705 list_del(&se_tpg->se_tpg_list);
706 spin_unlock_bh(&se_global->se_tpg_lock);
707
708 while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
709 cpu_relax();
710 /*
711 * Release any remaining demo-mode generated se_node_acl that have
712 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
713 * in transport_deregister_session().
714 */
715 spin_lock_bh(&se_tpg->acl_node_lock);
716 list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
717 acl_list) {
718 list_del(&nacl->acl_list);
719 se_tpg->num_node_acls--;
720 spin_unlock_bh(&se_tpg->acl_node_lock);
721
722 core_tpg_wait_for_nacl_pr_ref(nacl);
723 core_free_device_list_for_node(nacl, se_tpg);
724 TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, nacl);
725
726 spin_lock_bh(&se_tpg->acl_node_lock);
727 }
728 spin_unlock_bh(&se_tpg->acl_node_lock);
729
730 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
731 core_tpg_release_virtual_lun0(se_tpg);
732
733 se_tpg->se_tpg_fabric_ptr = NULL;
734 kfree(se_tpg->tpg_lun_list);
735 return 0;
736}
737EXPORT_SYMBOL(core_tpg_deregister);
738
739struct se_lun *core_tpg_pre_addlun(
740 struct se_portal_group *tpg,
741 u32 unpacked_lun)
742{
743 struct se_lun *lun;
744
745 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
746 printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
747 "-1: %u for Target Portal Group: %u\n",
748 TPG_TFO(tpg)->get_fabric_name(),
749 unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
750 TPG_TFO(tpg)->tpg_get_tag(tpg));
751 return ERR_PTR(-EOVERFLOW);
752 }
753
754 spin_lock(&tpg->tpg_lun_lock);
755 lun = &tpg->tpg_lun_list[unpacked_lun];
756 if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
757 printk(KERN_ERR "TPG Logical Unit Number: %u is already active"
758 " on %s Target Portal Group: %u, ignoring request.\n",
759 unpacked_lun, TPG_TFO(tpg)->get_fabric_name(),
760 TPG_TFO(tpg)->tpg_get_tag(tpg));
761 spin_unlock(&tpg->tpg_lun_lock);
762 return ERR_PTR(-EINVAL);
763 }
764 spin_unlock(&tpg->tpg_lun_lock);
765
766 return lun;
767}
768
769int core_tpg_post_addlun(
770 struct se_portal_group *tpg,
771 struct se_lun *lun,
772 u32 lun_access,
773 void *lun_ptr)
774{
775 if (core_dev_export(lun_ptr, tpg, lun) < 0)
776 return -1;
777
778 spin_lock(&tpg->tpg_lun_lock);
779 lun->lun_access = lun_access;
780 lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE;
781 spin_unlock(&tpg->tpg_lun_lock);
782
783 return 0;
784}
785
786static void core_tpg_shutdown_lun(
787 struct se_portal_group *tpg,
788 struct se_lun *lun)
789{
790 core_clear_lun_from_tpg(lun, tpg);
791 transport_clear_lun_from_sessions(lun);
792}
793
794struct se_lun *core_tpg_pre_dellun(
795 struct se_portal_group *tpg,
796 u32 unpacked_lun,
797 int *ret)
798{
799 struct se_lun *lun;
800
801 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
802 printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
803 "-1: %u for Target Portal Group: %u\n",
804 TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
805 TRANSPORT_MAX_LUNS_PER_TPG-1,
806 TPG_TFO(tpg)->tpg_get_tag(tpg));
807 return ERR_PTR(-EOVERFLOW);
808 }
809
810 spin_lock(&tpg->tpg_lun_lock);
811 lun = &tpg->tpg_lun_list[unpacked_lun];
812 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
813 printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
814 " Target Portal Group: %u, ignoring request.\n",
815 TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
816 TPG_TFO(tpg)->tpg_get_tag(tpg));
817 spin_unlock(&tpg->tpg_lun_lock);
818 return ERR_PTR(-ENODEV);
819 }
820 spin_unlock(&tpg->tpg_lun_lock);
821
822 return lun;
823}
824
825int core_tpg_post_dellun(
826 struct se_portal_group *tpg,
827 struct se_lun *lun)
828{
829 core_tpg_shutdown_lun(tpg, lun);
830
831 core_dev_unexport(lun->lun_se_dev, tpg, lun);
832
833 spin_lock(&tpg->tpg_lun_lock);
834 lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
835 spin_unlock(&tpg->tpg_lun_lock);
836
837 return 0;
838}
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
new file mode 100644
index 000000000000..4b9b7169bdd9
--- /dev/null
+++ b/drivers/target/target_core_transport.c
@@ -0,0 +1,6184 @@
1/*******************************************************************************
2 * Filename: target_core_transport.c
3 *
4 * This file contains the Generic Target Engine Core.
5 *
6 * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
7 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8 * Copyright (c) 2007-2010 Rising Tide Systems
9 * Copyright (c) 2008-2010 Linux-iSCSI.org
10 *
11 * Nicholas A. Bellinger <nab@kernel.org>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 *
27 ******************************************************************************/
28
29#include <linux/version.h>
30#include <linux/net.h>
31#include <linux/delay.h>
32#include <linux/string.h>
33#include <linux/timer.h>
34#include <linux/slab.h>
35#include <linux/blkdev.h>
36#include <linux/spinlock.h>
37#include <linux/kthread.h>
38#include <linux/in.h>
39#include <linux/cdrom.h>
40#include <asm/unaligned.h>
41#include <net/sock.h>
42#include <net/tcp.h>
43#include <scsi/scsi.h>
44#include <scsi/scsi_cmnd.h>
45#include <scsi/scsi_tcq.h>
46
47#include <target/target_core_base.h>
48#include <target/target_core_device.h>
49#include <target/target_core_tmr.h>
50#include <target/target_core_tpg.h>
51#include <target/target_core_transport.h>
52#include <target/target_core_fabric_ops.h>
53#include <target/target_core_configfs.h>
54
55#include "target_core_alua.h"
56#include "target_core_hba.h"
57#include "target_core_pr.h"
58#include "target_core_scdb.h"
59#include "target_core_ua.h"
60
61/* #define DEBUG_CDB_HANDLER */
62#ifdef DEBUG_CDB_HANDLER
63#define DEBUG_CDB_H(x...) printk(KERN_INFO x)
64#else
65#define DEBUG_CDB_H(x...)
66#endif
67
68/* #define DEBUG_CMD_MAP */
69#ifdef DEBUG_CMD_MAP
70#define DEBUG_CMD_M(x...) printk(KERN_INFO x)
71#else
72#define DEBUG_CMD_M(x...)
73#endif
74
75/* #define DEBUG_MEM_ALLOC */
76#ifdef DEBUG_MEM_ALLOC
77#define DEBUG_MEM(x...) printk(KERN_INFO x)
78#else
79#define DEBUG_MEM(x...)
80#endif
81
82/* #define DEBUG_MEM2_ALLOC */
83#ifdef DEBUG_MEM2_ALLOC
84#define DEBUG_MEM2(x...) printk(KERN_INFO x)
85#else
86#define DEBUG_MEM2(x...)
87#endif
88
89/* #define DEBUG_SG_CALC */
90#ifdef DEBUG_SG_CALC
91#define DEBUG_SC(x...) printk(KERN_INFO x)
92#else
93#define DEBUG_SC(x...)
94#endif
95
96/* #define DEBUG_SE_OBJ */
97#ifdef DEBUG_SE_OBJ
98#define DEBUG_SO(x...) printk(KERN_INFO x)
99#else
100#define DEBUG_SO(x...)
101#endif
102
103/* #define DEBUG_CMD_VOL */
104#ifdef DEBUG_CMD_VOL
105#define DEBUG_VOL(x...) printk(KERN_INFO x)
106#else
107#define DEBUG_VOL(x...)
108#endif
109
110/* #define DEBUG_CMD_STOP */
111#ifdef DEBUG_CMD_STOP
112#define DEBUG_CS(x...) printk(KERN_INFO x)
113#else
114#define DEBUG_CS(x...)
115#endif
116
117/* #define DEBUG_PASSTHROUGH */
118#ifdef DEBUG_PASSTHROUGH
119#define DEBUG_PT(x...) printk(KERN_INFO x)
120#else
121#define DEBUG_PT(x...)
122#endif
123
124/* #define DEBUG_TASK_STOP */
125#ifdef DEBUG_TASK_STOP
126#define DEBUG_TS(x...) printk(KERN_INFO x)
127#else
128#define DEBUG_TS(x...)
129#endif
130
131/* #define DEBUG_TRANSPORT_STOP */
132#ifdef DEBUG_TRANSPORT_STOP
133#define DEBUG_TRANSPORT_S(x...) printk(KERN_INFO x)
134#else
135#define DEBUG_TRANSPORT_S(x...)
136#endif
137
138/* #define DEBUG_TASK_FAILURE */
139#ifdef DEBUG_TASK_FAILURE
140#define DEBUG_TF(x...) printk(KERN_INFO x)
141#else
142#define DEBUG_TF(x...)
143#endif
144
145/* #define DEBUG_DEV_OFFLINE */
146#ifdef DEBUG_DEV_OFFLINE
147#define DEBUG_DO(x...) printk(KERN_INFO x)
148#else
149#define DEBUG_DO(x...)
150#endif
151
152/* #define DEBUG_TASK_STATE */
153#ifdef DEBUG_TASK_STATE
154#define DEBUG_TSTATE(x...) printk(KERN_INFO x)
155#else
156#define DEBUG_TSTATE(x...)
157#endif
158
159/* #define DEBUG_STATUS_THR */
160#ifdef DEBUG_STATUS_THR
161#define DEBUG_ST(x...) printk(KERN_INFO x)
162#else
163#define DEBUG_ST(x...)
164#endif
165
166/* #define DEBUG_TASK_TIMEOUT */
167#ifdef DEBUG_TASK_TIMEOUT
168#define DEBUG_TT(x...) printk(KERN_INFO x)
169#else
170#define DEBUG_TT(x...)
171#endif
172
173/* #define DEBUG_GENERIC_REQUEST_FAILURE */
174#ifdef DEBUG_GENERIC_REQUEST_FAILURE
175#define DEBUG_GRF(x...) printk(KERN_INFO x)
176#else
177#define DEBUG_GRF(x...)
178#endif
179
180/* #define DEBUG_SAM_TASK_ATTRS */
181#ifdef DEBUG_SAM_TASK_ATTRS
182#define DEBUG_STA(x...) printk(KERN_INFO x)
183#else
184#define DEBUG_STA(x...)
185#endif
186
187struct se_global *se_global;
188
189static struct kmem_cache *se_cmd_cache;
190static struct kmem_cache *se_sess_cache;
191struct kmem_cache *se_tmr_req_cache;
192struct kmem_cache *se_ua_cache;
193struct kmem_cache *se_mem_cache;
194struct kmem_cache *t10_pr_reg_cache;
195struct kmem_cache *t10_alua_lu_gp_cache;
196struct kmem_cache *t10_alua_lu_gp_mem_cache;
197struct kmem_cache *t10_alua_tg_pt_gp_cache;
198struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
199
200/* Used for transport_dev_get_map_*() */
201typedef int (*map_func_t)(struct se_task *, u32);
202
203static int transport_generic_write_pending(struct se_cmd *);
204static int transport_processing_thread(void *);
205static int __transport_execute_tasks(struct se_device *dev);
206static void transport_complete_task_attr(struct se_cmd *cmd);
207static void transport_direct_request_timeout(struct se_cmd *cmd);
208static void transport_free_dev_tasks(struct se_cmd *cmd);
209static u32 transport_generic_get_cdb_count(struct se_cmd *cmd,
210 unsigned long long starting_lba, u32 sectors,
211 enum dma_data_direction data_direction,
212 struct list_head *mem_list, int set_counts);
213static int transport_generic_get_mem(struct se_cmd *cmd, u32 length,
214 u32 dma_size);
215static int transport_generic_remove(struct se_cmd *cmd,
216 int release_to_pool, int session_reinstatement);
217static int transport_get_sectors(struct se_cmd *cmd);
218static struct list_head *transport_init_se_mem_list(void);
219static int transport_map_sg_to_mem(struct se_cmd *cmd,
220 struct list_head *se_mem_list, void *in_mem,
221 u32 *se_mem_cnt);
222static void transport_memcpy_se_mem_read_contig(struct se_cmd *cmd,
223 unsigned char *dst, struct list_head *se_mem_list);
224static void transport_release_fe_cmd(struct se_cmd *cmd);
225static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
226 struct se_queue_obj *qobj);
227static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
228static void transport_stop_all_task_timers(struct se_cmd *cmd);
229
230int init_se_global(void)
231{
232 struct se_global *global;
233
234 global = kzalloc(sizeof(struct se_global), GFP_KERNEL);
235 if (!(global)) {
236 printk(KERN_ERR "Unable to allocate memory for struct se_global\n");
237 return -1;
238 }
239
240 INIT_LIST_HEAD(&global->g_lu_gps_list);
241 INIT_LIST_HEAD(&global->g_se_tpg_list);
242 INIT_LIST_HEAD(&global->g_hba_list);
243 INIT_LIST_HEAD(&global->g_se_dev_list);
244 spin_lock_init(&global->g_device_lock);
245 spin_lock_init(&global->hba_lock);
246 spin_lock_init(&global->se_tpg_lock);
247 spin_lock_init(&global->lu_gps_lock);
248 spin_lock_init(&global->plugin_class_lock);
249
250 se_cmd_cache = kmem_cache_create("se_cmd_cache",
251 sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL);
252 if (!(se_cmd_cache)) {
253 printk(KERN_ERR "kmem_cache_create for struct se_cmd failed\n");
254 goto out;
255 }
256 se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
257 sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
258 0, NULL);
259 if (!(se_tmr_req_cache)) {
260 printk(KERN_ERR "kmem_cache_create() for struct se_tmr_req"
261 " failed\n");
262 goto out;
263 }
264 se_sess_cache = kmem_cache_create("se_sess_cache",
265 sizeof(struct se_session), __alignof__(struct se_session),
266 0, NULL);
267 if (!(se_sess_cache)) {
268 printk(KERN_ERR "kmem_cache_create() for struct se_session"
269 " failed\n");
270 goto out;
271 }
272 se_ua_cache = kmem_cache_create("se_ua_cache",
273 sizeof(struct se_ua), __alignof__(struct se_ua),
274 0, NULL);
275 if (!(se_ua_cache)) {
276 printk(KERN_ERR "kmem_cache_create() for struct se_ua failed\n");
277 goto out;
278 }
279 se_mem_cache = kmem_cache_create("se_mem_cache",
280 sizeof(struct se_mem), __alignof__(struct se_mem), 0, NULL);
281 if (!(se_mem_cache)) {
282 printk(KERN_ERR "kmem_cache_create() for struct se_mem failed\n");
283 goto out;
284 }
285 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
286 sizeof(struct t10_pr_registration),
287 __alignof__(struct t10_pr_registration), 0, NULL);
288 if (!(t10_pr_reg_cache)) {
289 printk(KERN_ERR "kmem_cache_create() for struct t10_pr_registration"
290 " failed\n");
291 goto out;
292 }
293 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
294 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
295 0, NULL);
296 if (!(t10_alua_lu_gp_cache)) {
297 printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_cache"
298 " failed\n");
299 goto out;
300 }
301 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
302 sizeof(struct t10_alua_lu_gp_member),
303 __alignof__(struct t10_alua_lu_gp_member), 0, NULL);
304 if (!(t10_alua_lu_gp_mem_cache)) {
305 printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_mem_"
306 "cache failed\n");
307 goto out;
308 }
309 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
310 sizeof(struct t10_alua_tg_pt_gp),
311 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
312 if (!(t10_alua_tg_pt_gp_cache)) {
313 printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_"
314 "cache failed\n");
315 goto out;
316 }
317 t10_alua_tg_pt_gp_mem_cache = kmem_cache_create(
318 "t10_alua_tg_pt_gp_mem_cache",
319 sizeof(struct t10_alua_tg_pt_gp_member),
320 __alignof__(struct t10_alua_tg_pt_gp_member),
321 0, NULL);
322 if (!(t10_alua_tg_pt_gp_mem_cache)) {
323 printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_"
324 "mem_t failed\n");
325 goto out;
326 }
327
328 se_global = global;
329
330 return 0;
331out:
332 if (se_cmd_cache)
333 kmem_cache_destroy(se_cmd_cache);
334 if (se_tmr_req_cache)
335 kmem_cache_destroy(se_tmr_req_cache);
336 if (se_sess_cache)
337 kmem_cache_destroy(se_sess_cache);
338 if (se_ua_cache)
339 kmem_cache_destroy(se_ua_cache);
340 if (se_mem_cache)
341 kmem_cache_destroy(se_mem_cache);
342 if (t10_pr_reg_cache)
343 kmem_cache_destroy(t10_pr_reg_cache);
344 if (t10_alua_lu_gp_cache)
345 kmem_cache_destroy(t10_alua_lu_gp_cache);
346 if (t10_alua_lu_gp_mem_cache)
347 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
348 if (t10_alua_tg_pt_gp_cache)
349 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
350 if (t10_alua_tg_pt_gp_mem_cache)
351 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
352 kfree(global);
353 return -1;
354}
355
356void release_se_global(void)
357{
358 struct se_global *global;
359
360 global = se_global;
361 if (!(global))
362 return;
363
364 kmem_cache_destroy(se_cmd_cache);
365 kmem_cache_destroy(se_tmr_req_cache);
366 kmem_cache_destroy(se_sess_cache);
367 kmem_cache_destroy(se_ua_cache);
368 kmem_cache_destroy(se_mem_cache);
369 kmem_cache_destroy(t10_pr_reg_cache);
370 kmem_cache_destroy(t10_alua_lu_gp_cache);
371 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
372 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
373 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
374 kfree(global);
375
376 se_global = NULL;
377}
378
379/* SCSI statistics table index */
380static struct scsi_index_table scsi_index_table;
381
382/*
383 * Initialize the index table for allocating unique row indexes to various mib
384 * tables.
385 */
386void init_scsi_index_table(void)
387{
388 memset(&scsi_index_table, 0, sizeof(struct scsi_index_table));
389 spin_lock_init(&scsi_index_table.lock);
390}
391
392/*
393 * Allocate a new row index for the entry type specified
394 */
395u32 scsi_get_new_index(scsi_index_t type)
396{
397 u32 new_index;
398
399 if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) {
400 printk(KERN_ERR "Invalid index type %d\n", type);
401 return -EINVAL;
402 }
403
404 spin_lock(&scsi_index_table.lock);
405 new_index = ++scsi_index_table.scsi_mib_index[type];
406 if (new_index == 0)
407 new_index = ++scsi_index_table.scsi_mib_index[type];
408 spin_unlock(&scsi_index_table.lock);
409
410 return new_index;
411}
412
413void transport_init_queue_obj(struct se_queue_obj *qobj)
414{
415 atomic_set(&qobj->queue_cnt, 0);
416 INIT_LIST_HEAD(&qobj->qobj_list);
417 init_waitqueue_head(&qobj->thread_wq);
418 spin_lock_init(&qobj->cmd_queue_lock);
419}
420EXPORT_SYMBOL(transport_init_queue_obj);
421
422static int transport_subsystem_reqmods(void)
423{
424 int ret;
425
426 ret = request_module("target_core_iblock");
427 if (ret != 0)
428 printk(KERN_ERR "Unable to load target_core_iblock\n");
429
430 ret = request_module("target_core_file");
431 if (ret != 0)
432 printk(KERN_ERR "Unable to load target_core_file\n");
433
434 ret = request_module("target_core_pscsi");
435 if (ret != 0)
436 printk(KERN_ERR "Unable to load target_core_pscsi\n");
437
438 ret = request_module("target_core_stgt");
439 if (ret != 0)
440 printk(KERN_ERR "Unable to load target_core_stgt\n");
441
442 return 0;
443}
444
445int transport_subsystem_check_init(void)
446{
447 if (se_global->g_sub_api_initialized)
448 return 0;
449 /*
450 * Request the loading of known TCM subsystem plugins..
451 */
452 if (transport_subsystem_reqmods() < 0)
453 return -1;
454
455 se_global->g_sub_api_initialized = 1;
456 return 0;
457}
458
459struct se_session *transport_init_session(void)
460{
461 struct se_session *se_sess;
462
463 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
464 if (!(se_sess)) {
465 printk(KERN_ERR "Unable to allocate struct se_session from"
466 " se_sess_cache\n");
467 return ERR_PTR(-ENOMEM);
468 }
469 INIT_LIST_HEAD(&se_sess->sess_list);
470 INIT_LIST_HEAD(&se_sess->sess_acl_list);
471
472 return se_sess;
473}
474EXPORT_SYMBOL(transport_init_session);
475
476/*
477 * Called with spin_lock_bh(&struct se_portal_group->session_lock called.
478 */
479void __transport_register_session(
480 struct se_portal_group *se_tpg,
481 struct se_node_acl *se_nacl,
482 struct se_session *se_sess,
483 void *fabric_sess_ptr)
484{
485 unsigned char buf[PR_REG_ISID_LEN];
486
487 se_sess->se_tpg = se_tpg;
488 se_sess->fabric_sess_ptr = fabric_sess_ptr;
489 /*
490 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
491 *
492 * Only set for struct se_session's that will actually be moving I/O.
493 * eg: *NOT* discovery sessions.
494 */
495 if (se_nacl) {
496 /*
497 * If the fabric module supports an ISID based TransportID,
498 * save this value in binary from the fabric I_T Nexus now.
499 */
500 if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL) {
501 memset(&buf[0], 0, PR_REG_ISID_LEN);
502 TPG_TFO(se_tpg)->sess_get_initiator_sid(se_sess,
503 &buf[0], PR_REG_ISID_LEN);
504 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
505 }
506 spin_lock_irq(&se_nacl->nacl_sess_lock);
507 /*
508 * The se_nacl->nacl_sess pointer will be set to the
509 * last active I_T Nexus for each struct se_node_acl.
510 */
511 se_nacl->nacl_sess = se_sess;
512
513 list_add_tail(&se_sess->sess_acl_list,
514 &se_nacl->acl_sess_list);
515 spin_unlock_irq(&se_nacl->nacl_sess_lock);
516 }
517 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
518
519 printk(KERN_INFO "TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
520 TPG_TFO(se_tpg)->get_fabric_name(), se_sess->fabric_sess_ptr);
521}
522EXPORT_SYMBOL(__transport_register_session);
523
524void transport_register_session(
525 struct se_portal_group *se_tpg,
526 struct se_node_acl *se_nacl,
527 struct se_session *se_sess,
528 void *fabric_sess_ptr)
529{
530 spin_lock_bh(&se_tpg->session_lock);
531 __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
532 spin_unlock_bh(&se_tpg->session_lock);
533}
534EXPORT_SYMBOL(transport_register_session);
535
536void transport_deregister_session_configfs(struct se_session *se_sess)
537{
538 struct se_node_acl *se_nacl;
539 unsigned long flags;
540 /*
541 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
542 */
543 se_nacl = se_sess->se_node_acl;
544 if ((se_nacl)) {
545 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
546 list_del(&se_sess->sess_acl_list);
547 /*
548 * If the session list is empty, then clear the pointer.
549 * Otherwise, set the struct se_session pointer from the tail
550 * element of the per struct se_node_acl active session list.
551 */
552 if (list_empty(&se_nacl->acl_sess_list))
553 se_nacl->nacl_sess = NULL;
554 else {
555 se_nacl->nacl_sess = container_of(
556 se_nacl->acl_sess_list.prev,
557 struct se_session, sess_acl_list);
558 }
559 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
560 }
561}
562EXPORT_SYMBOL(transport_deregister_session_configfs);
563
564void transport_free_session(struct se_session *se_sess)
565{
566 kmem_cache_free(se_sess_cache, se_sess);
567}
568EXPORT_SYMBOL(transport_free_session);
569
570void transport_deregister_session(struct se_session *se_sess)
571{
572 struct se_portal_group *se_tpg = se_sess->se_tpg;
573 struct se_node_acl *se_nacl;
574
575 if (!(se_tpg)) {
576 transport_free_session(se_sess);
577 return;
578 }
579
580 spin_lock_bh(&se_tpg->session_lock);
581 list_del(&se_sess->sess_list);
582 se_sess->se_tpg = NULL;
583 se_sess->fabric_sess_ptr = NULL;
584 spin_unlock_bh(&se_tpg->session_lock);
585
586 /*
587 * Determine if we need to do extra work for this initiator node's
588 * struct se_node_acl if it had been previously dynamically generated.
589 */
590 se_nacl = se_sess->se_node_acl;
591 if ((se_nacl)) {
592 spin_lock_bh(&se_tpg->acl_node_lock);
593 if (se_nacl->dynamic_node_acl) {
594 if (!(TPG_TFO(se_tpg)->tpg_check_demo_mode_cache(
595 se_tpg))) {
596 list_del(&se_nacl->acl_list);
597 se_tpg->num_node_acls--;
598 spin_unlock_bh(&se_tpg->acl_node_lock);
599
600 core_tpg_wait_for_nacl_pr_ref(se_nacl);
601 core_free_device_list_for_node(se_nacl, se_tpg);
602 TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg,
603 se_nacl);
604 spin_lock_bh(&se_tpg->acl_node_lock);
605 }
606 }
607 spin_unlock_bh(&se_tpg->acl_node_lock);
608 }
609
610 transport_free_session(se_sess);
611
612 printk(KERN_INFO "TARGET_CORE[%s]: Deregistered fabric_sess\n",
613 TPG_TFO(se_tpg)->get_fabric_name());
614}
615EXPORT_SYMBOL(transport_deregister_session);
616
617/*
618 * Called with T_TASK(cmd)->t_state_lock held.
619 */
620static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
621{
622 struct se_device *dev;
623 struct se_task *task;
624 unsigned long flags;
625
626 if (!T_TASK(cmd))
627 return;
628
629 list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
630 dev = task->se_dev;
631 if (!(dev))
632 continue;
633
634 if (atomic_read(&task->task_active))
635 continue;
636
637 if (!(atomic_read(&task->task_state_active)))
638 continue;
639
640 spin_lock_irqsave(&dev->execute_task_lock, flags);
641 list_del(&task->t_state_list);
642 DEBUG_TSTATE("Removed ITT: 0x%08x dev: %p task[%p]\n",
643 CMD_TFO(cmd)->tfo_get_task_tag(cmd), dev, task);
644 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
645
646 atomic_set(&task->task_state_active, 0);
647 atomic_dec(&T_TASK(cmd)->t_task_cdbs_ex_left);
648 }
649}
650
651/* transport_cmd_check_stop():
652 *
653 * 'transport_off = 1' determines if t_transport_active should be cleared.
654 * 'transport_off = 2' determines if task_dev_state should be removed.
655 *
656 * A non-zero u8 t_state sets cmd->t_state.
657 * Returns 1 when command is stopped, else 0.
658 */
659static int transport_cmd_check_stop(
660 struct se_cmd *cmd,
661 int transport_off,
662 u8 t_state)
663{
664 unsigned long flags;
665
666 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
667 /*
668 * Determine if IOCTL context caller in requesting the stopping of this
669 * command for LUN shutdown purposes.
670 */
671 if (atomic_read(&T_TASK(cmd)->transport_lun_stop)) {
672 DEBUG_CS("%s:%d atomic_read(&T_TASK(cmd)->transport_lun_stop)"
673 " == TRUE for ITT: 0x%08x\n", __func__, __LINE__,
674 CMD_TFO(cmd)->get_task_tag(cmd));
675
676 cmd->deferred_t_state = cmd->t_state;
677 cmd->t_state = TRANSPORT_DEFERRED_CMD;
678 atomic_set(&T_TASK(cmd)->t_transport_active, 0);
679 if (transport_off == 2)
680 transport_all_task_dev_remove_state(cmd);
681 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
682
683 complete(&T_TASK(cmd)->transport_lun_stop_comp);
684 return 1;
685 }
686 /*
687 * Determine if frontend context caller is requesting the stopping of
688 * this command for frontend excpections.
689 */
690 if (atomic_read(&T_TASK(cmd)->t_transport_stop)) {
691 DEBUG_CS("%s:%d atomic_read(&T_TASK(cmd)->t_transport_stop) =="
692 " TRUE for ITT: 0x%08x\n", __func__, __LINE__,
693 CMD_TFO(cmd)->get_task_tag(cmd));
694
695 cmd->deferred_t_state = cmd->t_state;
696 cmd->t_state = TRANSPORT_DEFERRED_CMD;
697 if (transport_off == 2)
698 transport_all_task_dev_remove_state(cmd);
699
700 /*
701 * Clear struct se_cmd->se_lun before the transport_off == 2 handoff
702 * to FE.
703 */
704 if (transport_off == 2)
705 cmd->se_lun = NULL;
706 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
707
708 complete(&T_TASK(cmd)->t_transport_stop_comp);
709 return 1;
710 }
711 if (transport_off) {
712 atomic_set(&T_TASK(cmd)->t_transport_active, 0);
713 if (transport_off == 2) {
714 transport_all_task_dev_remove_state(cmd);
715 /*
716 * Clear struct se_cmd->se_lun before the transport_off == 2
717 * handoff to fabric module.
718 */
719 cmd->se_lun = NULL;
720 /*
721 * Some fabric modules like tcm_loop can release
722 * their internally allocated I/O reference now and
723 * struct se_cmd now.
724 */
725 if (CMD_TFO(cmd)->check_stop_free != NULL) {
726 spin_unlock_irqrestore(
727 &T_TASK(cmd)->t_state_lock, flags);
728
729 CMD_TFO(cmd)->check_stop_free(cmd);
730 return 1;
731 }
732 }
733 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
734
735 return 0;
736 } else if (t_state)
737 cmd->t_state = t_state;
738 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
739
740 return 0;
741}
742
743static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
744{
745 return transport_cmd_check_stop(cmd, 2, 0);
746}
747
748static void transport_lun_remove_cmd(struct se_cmd *cmd)
749{
750 struct se_lun *lun = SE_LUN(cmd);
751 unsigned long flags;
752
753 if (!lun)
754 return;
755
756 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
757 if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
758 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
759 goto check_lun;
760 }
761 atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
762 transport_all_task_dev_remove_state(cmd);
763 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
764
765
766check_lun:
767 spin_lock_irqsave(&lun->lun_cmd_lock, flags);
768 if (atomic_read(&T_TASK(cmd)->transport_lun_active)) {
769 list_del(&cmd->se_lun_list);
770 atomic_set(&T_TASK(cmd)->transport_lun_active, 0);
771#if 0
772 printk(KERN_INFO "Removed ITT: 0x%08x from LUN LIST[%d]\n"
773 CMD_TFO(cmd)->get_task_tag(cmd), lun->unpacked_lun);
774#endif
775 }
776 spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
777}
778
779void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
780{
781 transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj);
782 transport_lun_remove_cmd(cmd);
783
784 if (transport_cmd_check_stop_to_fabric(cmd))
785 return;
786 if (remove)
787 transport_generic_remove(cmd, 0, 0);
788}
789
790void transport_cmd_finish_abort_tmr(struct se_cmd *cmd)
791{
792 transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj);
793
794 if (transport_cmd_check_stop_to_fabric(cmd))
795 return;
796
797 transport_generic_remove(cmd, 0, 0);
798}
799
800static int transport_add_cmd_to_queue(
801 struct se_cmd *cmd,
802 int t_state)
803{
804 struct se_device *dev = cmd->se_dev;
805 struct se_queue_obj *qobj = dev->dev_queue_obj;
806 struct se_queue_req *qr;
807 unsigned long flags;
808
809 qr = kzalloc(sizeof(struct se_queue_req), GFP_ATOMIC);
810 if (!(qr)) {
811 printk(KERN_ERR "Unable to allocate memory for"
812 " struct se_queue_req\n");
813 return -1;
814 }
815 INIT_LIST_HEAD(&qr->qr_list);
816
817 qr->cmd = (void *)cmd;
818 qr->state = t_state;
819
820 if (t_state) {
821 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
822 cmd->t_state = t_state;
823 atomic_set(&T_TASK(cmd)->t_transport_active, 1);
824 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
825 }
826
827 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
828 list_add_tail(&qr->qr_list, &qobj->qobj_list);
829 atomic_inc(&T_TASK(cmd)->t_transport_queue_active);
830 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
831
832 atomic_inc(&qobj->queue_cnt);
833 wake_up_interruptible(&qobj->thread_wq);
834 return 0;
835}
836
837/*
838 * Called with struct se_queue_obj->cmd_queue_lock held.
839 */
840static struct se_queue_req *
841__transport_get_qr_from_queue(struct se_queue_obj *qobj)
842{
843 struct se_cmd *cmd;
844 struct se_queue_req *qr = NULL;
845
846 if (list_empty(&qobj->qobj_list))
847 return NULL;
848
849 list_for_each_entry(qr, &qobj->qobj_list, qr_list)
850 break;
851
852 if (qr->cmd) {
853 cmd = (struct se_cmd *)qr->cmd;
854 atomic_dec(&T_TASK(cmd)->t_transport_queue_active);
855 }
856 list_del(&qr->qr_list);
857 atomic_dec(&qobj->queue_cnt);
858
859 return qr;
860}
861
862static struct se_queue_req *
863transport_get_qr_from_queue(struct se_queue_obj *qobj)
864{
865 struct se_cmd *cmd;
866 struct se_queue_req *qr;
867 unsigned long flags;
868
869 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
870 if (list_empty(&qobj->qobj_list)) {
871 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
872 return NULL;
873 }
874
875 list_for_each_entry(qr, &qobj->qobj_list, qr_list)
876 break;
877
878 if (qr->cmd) {
879 cmd = (struct se_cmd *)qr->cmd;
880 atomic_dec(&T_TASK(cmd)->t_transport_queue_active);
881 }
882 list_del(&qr->qr_list);
883 atomic_dec(&qobj->queue_cnt);
884 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
885
886 return qr;
887}
888
889static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
890 struct se_queue_obj *qobj)
891{
892 struct se_cmd *q_cmd;
893 struct se_queue_req *qr = NULL, *qr_p = NULL;
894 unsigned long flags;
895
896 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
897 if (!(atomic_read(&T_TASK(cmd)->t_transport_queue_active))) {
898 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
899 return;
900 }
901
902 list_for_each_entry_safe(qr, qr_p, &qobj->qobj_list, qr_list) {
903 q_cmd = (struct se_cmd *)qr->cmd;
904 if (q_cmd != cmd)
905 continue;
906
907 atomic_dec(&T_TASK(q_cmd)->t_transport_queue_active);
908 atomic_dec(&qobj->queue_cnt);
909 list_del(&qr->qr_list);
910 kfree(qr);
911 }
912 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
913
914 if (atomic_read(&T_TASK(cmd)->t_transport_queue_active)) {
915 printk(KERN_ERR "ITT: 0x%08x t_transport_queue_active: %d\n",
916 CMD_TFO(cmd)->get_task_tag(cmd),
917 atomic_read(&T_TASK(cmd)->t_transport_queue_active));
918 }
919}
920
921/*
922 * Completion function used by TCM subsystem plugins (such as FILEIO)
923 * for queueing up response from struct se_subsystem_api->do_task()
924 */
925void transport_complete_sync_cache(struct se_cmd *cmd, int good)
926{
927 struct se_task *task = list_entry(T_TASK(cmd)->t_task_list.next,
928 struct se_task, t_list);
929
930 if (good) {
931 cmd->scsi_status = SAM_STAT_GOOD;
932 task->task_scsi_status = GOOD;
933 } else {
934 task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
935 task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST;
936 TASK_CMD(task)->transport_error_status =
937 PYX_TRANSPORT_ILLEGAL_REQUEST;
938 }
939
940 transport_complete_task(task, good);
941}
942EXPORT_SYMBOL(transport_complete_sync_cache);
943
944/* transport_complete_task():
945 *
946 * Called from interrupt and non interrupt context depending
947 * on the transport plugin.
948 */
949void transport_complete_task(struct se_task *task, int success)
950{
951 struct se_cmd *cmd = TASK_CMD(task);
952 struct se_device *dev = task->se_dev;
953 int t_state;
954 unsigned long flags;
955#if 0
956 printk(KERN_INFO "task: %p CDB: 0x%02x obj_ptr: %p\n", task,
957 T_TASK(cmd)->t_task_cdb[0], dev);
958#endif
959 if (dev) {
960 spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags);
961 atomic_inc(&dev->depth_left);
962 atomic_inc(&SE_HBA(dev)->left_queue_depth);
963 spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
964 }
965
966 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
967 atomic_set(&task->task_active, 0);
968
969 /*
970 * See if any sense data exists, if so set the TASK_SENSE flag.
971 * Also check for any other post completion work that needs to be
972 * done by the plugins.
973 */
974 if (dev && dev->transport->transport_complete) {
975 if (dev->transport->transport_complete(task) != 0) {
976 cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
977 task->task_sense = 1;
978 success = 1;
979 }
980 }
981
982 /*
983 * See if we are waiting for outstanding struct se_task
984 * to complete for an exception condition
985 */
986 if (atomic_read(&task->task_stop)) {
987 /*
988 * Decrement T_TASK(cmd)->t_se_count if this task had
989 * previously thrown its timeout exception handler.
990 */
991 if (atomic_read(&task->task_timeout)) {
992 atomic_dec(&T_TASK(cmd)->t_se_count);
993 atomic_set(&task->task_timeout, 0);
994 }
995 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
996
997 complete(&task->task_stop_comp);
998 return;
999 }
1000 /*
1001 * If the task's timeout handler has fired, use the t_task_cdbs_timeout
1002 * left counter to determine when the struct se_cmd is ready to be queued to
1003 * the processing thread.
1004 */
1005 if (atomic_read(&task->task_timeout)) {
1006 if (!(atomic_dec_and_test(
1007 &T_TASK(cmd)->t_task_cdbs_timeout_left))) {
1008 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
1009 flags);
1010 return;
1011 }
1012 t_state = TRANSPORT_COMPLETE_TIMEOUT;
1013 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
1014
1015 transport_add_cmd_to_queue(cmd, t_state);
1016 return;
1017 }
1018 atomic_dec(&T_TASK(cmd)->t_task_cdbs_timeout_left);
1019
1020 /*
1021 * Decrement the outstanding t_task_cdbs_left count. The last
1022 * struct se_task from struct se_cmd will complete itself into the
1023 * device queue depending upon int success.
1024 */
1025 if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_left))) {
1026 if (!success)
1027 T_TASK(cmd)->t_tasks_failed = 1;
1028
1029 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
1030 return;
1031 }
1032
1033 if (!success || T_TASK(cmd)->t_tasks_failed) {
1034 t_state = TRANSPORT_COMPLETE_FAILURE;
1035 if (!task->task_error_status) {
1036 task->task_error_status =
1037 PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1038 cmd->transport_error_status =
1039 PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1040 }
1041 } else {
1042 atomic_set(&T_TASK(cmd)->t_transport_complete, 1);
1043 t_state = TRANSPORT_COMPLETE_OK;
1044 }
1045 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
1046
1047 transport_add_cmd_to_queue(cmd, t_state);
1048}
1049EXPORT_SYMBOL(transport_complete_task);
1050
1051/*
1052 * Called by transport_add_tasks_from_cmd() once a struct se_cmd's
1053 * struct se_task list are ready to be added to the active execution list
1054 * struct se_device
1055
1056 * Called with se_dev_t->execute_task_lock called.
1057 */
1058static inline int transport_add_task_check_sam_attr(
1059 struct se_task *task,
1060 struct se_task *task_prev,
1061 struct se_device *dev)
1062{
1063 /*
1064 * No SAM Task attribute emulation enabled, add to tail of
1065 * execution queue
1066 */
1067 if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) {
1068 list_add_tail(&task->t_execute_list, &dev->execute_task_list);
1069 return 0;
1070 }
1071 /*
1072 * HEAD_OF_QUEUE attribute for received CDB, which means
1073 * the first task that is associated with a struct se_cmd goes to
1074 * head of the struct se_device->execute_task_list, and task_prev
1075 * after that for each subsequent task
1076 */
1077 if (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG) {
1078 list_add(&task->t_execute_list,
1079 (task_prev != NULL) ?
1080 &task_prev->t_execute_list :
1081 &dev->execute_task_list);
1082
1083 DEBUG_STA("Set HEAD_OF_QUEUE for task CDB: 0x%02x"
1084 " in execution queue\n",
1085 T_TASK(task->task_se_cmd)->t_task_cdb[0]);
1086 return 1;
1087 }
1088 /*
1089 * For ORDERED, SIMPLE or UNTAGGED attribute tasks once they have been
1090 * transitioned from Dermant -> Active state, and are added to the end
1091 * of the struct se_device->execute_task_list
1092 */
1093 list_add_tail(&task->t_execute_list, &dev->execute_task_list);
1094 return 0;
1095}
1096
1097/* __transport_add_task_to_execute_queue():
1098 *
1099 * Called with se_dev_t->execute_task_lock called.
1100 */
1101static void __transport_add_task_to_execute_queue(
1102 struct se_task *task,
1103 struct se_task *task_prev,
1104 struct se_device *dev)
1105{
1106 int head_of_queue;
1107
1108 head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev);
1109 atomic_inc(&dev->execute_tasks);
1110
1111 if (atomic_read(&task->task_state_active))
1112 return;
1113 /*
1114 * Determine if this task needs to go to HEAD_OF_QUEUE for the
1115 * state list as well. Running with SAM Task Attribute emulation
1116 * will always return head_of_queue == 0 here
1117 */
1118 if (head_of_queue)
1119 list_add(&task->t_state_list, (task_prev) ?
1120 &task_prev->t_state_list :
1121 &dev->state_task_list);
1122 else
1123 list_add_tail(&task->t_state_list, &dev->state_task_list);
1124
1125 atomic_set(&task->task_state_active, 1);
1126
1127 DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n",
1128 CMD_TFO(task->task_se_cmd)->get_task_tag(task->task_se_cmd),
1129 task, dev);
1130}
1131
1132static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
1133{
1134 struct se_device *dev;
1135 struct se_task *task;
1136 unsigned long flags;
1137
1138 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
1139 list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
1140 dev = task->se_dev;
1141
1142 if (atomic_read(&task->task_state_active))
1143 continue;
1144
1145 spin_lock(&dev->execute_task_lock);
1146 list_add_tail(&task->t_state_list, &dev->state_task_list);
1147 atomic_set(&task->task_state_active, 1);
1148
1149 DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n",
1150 CMD_TFO(task->task_se_cmd)->get_task_tag(
1151 task->task_se_cmd), task, dev);
1152
1153 spin_unlock(&dev->execute_task_lock);
1154 }
1155 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
1156}
1157
1158static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
1159{
1160 struct se_device *dev = SE_DEV(cmd);
1161 struct se_task *task, *task_prev = NULL;
1162 unsigned long flags;
1163
1164 spin_lock_irqsave(&dev->execute_task_lock, flags);
1165 list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
1166 if (atomic_read(&task->task_execute_queue))
1167 continue;
1168 /*
1169 * __transport_add_task_to_execute_queue() handles the
1170 * SAM Task Attribute emulation if enabled
1171 */
1172 __transport_add_task_to_execute_queue(task, task_prev, dev);
1173 atomic_set(&task->task_execute_queue, 1);
1174 task_prev = task;
1175 }
1176 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
1177
1178 return;
1179}
1180
1181/* transport_get_task_from_execute_queue():
1182 *
1183 * Called with dev->execute_task_lock held.
1184 */
1185static struct se_task *
1186transport_get_task_from_execute_queue(struct se_device *dev)
1187{
1188 struct se_task *task;
1189
1190 if (list_empty(&dev->execute_task_list))
1191 return NULL;
1192
1193 list_for_each_entry(task, &dev->execute_task_list, t_execute_list)
1194 break;
1195
1196 list_del(&task->t_execute_list);
1197 atomic_set(&task->task_execute_queue, 0);
1198 atomic_dec(&dev->execute_tasks);
1199
1200 return task;
1201}
1202
1203/* transport_remove_task_from_execute_queue():
1204 *
1205 *
1206 */
1207void transport_remove_task_from_execute_queue(
1208 struct se_task *task,
1209 struct se_device *dev)
1210{
1211 unsigned long flags;
1212
1213 if (atomic_read(&task->task_execute_queue) == 0) {
1214 dump_stack();
1215 return;
1216 }
1217
1218 spin_lock_irqsave(&dev->execute_task_lock, flags);
1219 list_del(&task->t_execute_list);
1220 atomic_set(&task->task_execute_queue, 0);
1221 atomic_dec(&dev->execute_tasks);
1222 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
1223}
1224
1225unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
1226{
1227 switch (cmd->data_direction) {
1228 case DMA_NONE:
1229 return "NONE";
1230 case DMA_FROM_DEVICE:
1231 return "READ";
1232 case DMA_TO_DEVICE:
1233 return "WRITE";
1234 case DMA_BIDIRECTIONAL:
1235 return "BIDI";
1236 default:
1237 break;
1238 }
1239
1240 return "UNKNOWN";
1241}
1242
1243void transport_dump_dev_state(
1244 struct se_device *dev,
1245 char *b,
1246 int *bl)
1247{
1248 *bl += sprintf(b + *bl, "Status: ");
1249 switch (dev->dev_status) {
1250 case TRANSPORT_DEVICE_ACTIVATED:
1251 *bl += sprintf(b + *bl, "ACTIVATED");
1252 break;
1253 case TRANSPORT_DEVICE_DEACTIVATED:
1254 *bl += sprintf(b + *bl, "DEACTIVATED");
1255 break;
1256 case TRANSPORT_DEVICE_SHUTDOWN:
1257 *bl += sprintf(b + *bl, "SHUTDOWN");
1258 break;
1259 case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
1260 case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
1261 *bl += sprintf(b + *bl, "OFFLINE");
1262 break;
1263 default:
1264 *bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status);
1265 break;
1266 }
1267
1268 *bl += sprintf(b + *bl, " Execute/Left/Max Queue Depth: %d/%d/%d",
1269 atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left),
1270 dev->queue_depth);
1271 *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n",
1272 DEV_ATTRIB(dev)->block_size, DEV_ATTRIB(dev)->max_sectors);
1273 *bl += sprintf(b + *bl, " ");
1274}
1275
1276/* transport_release_all_cmds():
1277 *
1278 *
1279 */
1280static void transport_release_all_cmds(struct se_device *dev)
1281{
1282 struct se_cmd *cmd = NULL;
1283 struct se_queue_req *qr = NULL, *qr_p = NULL;
1284 int bug_out = 0, t_state;
1285 unsigned long flags;
1286
1287 spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
1288 list_for_each_entry_safe(qr, qr_p, &dev->dev_queue_obj->qobj_list,
1289 qr_list) {
1290
1291 cmd = (struct se_cmd *)qr->cmd;
1292 t_state = qr->state;
1293 list_del(&qr->qr_list);
1294 kfree(qr);
1295 spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock,
1296 flags);
1297
1298 printk(KERN_ERR "Releasing ITT: 0x%08x, i_state: %u,"
1299 " t_state: %u directly\n",
1300 CMD_TFO(cmd)->get_task_tag(cmd),
1301 CMD_TFO(cmd)->get_cmd_state(cmd), t_state);
1302
1303 transport_release_fe_cmd(cmd);
1304 bug_out = 1;
1305
1306 spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
1307 }
1308 spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, flags);
1309#if 0
1310 if (bug_out)
1311 BUG();
1312#endif
1313}
1314
1315void transport_dump_vpd_proto_id(
1316 struct t10_vpd *vpd,
1317 unsigned char *p_buf,
1318 int p_buf_len)
1319{
1320 unsigned char buf[VPD_TMP_BUF_SIZE];
1321 int len;
1322
1323 memset(buf, 0, VPD_TMP_BUF_SIZE);
1324 len = sprintf(buf, "T10 VPD Protocol Identifier: ");
1325
1326 switch (vpd->protocol_identifier) {
1327 case 0x00:
1328 sprintf(buf+len, "Fibre Channel\n");
1329 break;
1330 case 0x10:
1331 sprintf(buf+len, "Parallel SCSI\n");
1332 break;
1333 case 0x20:
1334 sprintf(buf+len, "SSA\n");
1335 break;
1336 case 0x30:
1337 sprintf(buf+len, "IEEE 1394\n");
1338 break;
1339 case 0x40:
1340 sprintf(buf+len, "SCSI Remote Direct Memory Access"
1341 " Protocol\n");
1342 break;
1343 case 0x50:
1344 sprintf(buf+len, "Internet SCSI (iSCSI)\n");
1345 break;
1346 case 0x60:
1347 sprintf(buf+len, "SAS Serial SCSI Protocol\n");
1348 break;
1349 case 0x70:
1350 sprintf(buf+len, "Automation/Drive Interface Transport"
1351 " Protocol\n");
1352 break;
1353 case 0x80:
1354 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
1355 break;
1356 default:
1357 sprintf(buf+len, "Unknown 0x%02x\n",
1358 vpd->protocol_identifier);
1359 break;
1360 }
1361
1362 if (p_buf)
1363 strncpy(p_buf, buf, p_buf_len);
1364 else
1365 printk(KERN_INFO "%s", buf);
1366}
1367
1368void
1369transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
1370{
1371 /*
1372 * Check if the Protocol Identifier Valid (PIV) bit is set..
1373 *
1374 * from spc3r23.pdf section 7.5.1
1375 */
1376 if (page_83[1] & 0x80) {
1377 vpd->protocol_identifier = (page_83[0] & 0xf0);
1378 vpd->protocol_identifier_set = 1;
1379 transport_dump_vpd_proto_id(vpd, NULL, 0);
1380 }
1381}
1382EXPORT_SYMBOL(transport_set_vpd_proto_id);
1383
1384int transport_dump_vpd_assoc(
1385 struct t10_vpd *vpd,
1386 unsigned char *p_buf,
1387 int p_buf_len)
1388{
1389 unsigned char buf[VPD_TMP_BUF_SIZE];
1390 int ret = 0, len;
1391
1392 memset(buf, 0, VPD_TMP_BUF_SIZE);
1393 len = sprintf(buf, "T10 VPD Identifier Association: ");
1394
1395 switch (vpd->association) {
1396 case 0x00:
1397 sprintf(buf+len, "addressed logical unit\n");
1398 break;
1399 case 0x10:
1400 sprintf(buf+len, "target port\n");
1401 break;
1402 case 0x20:
1403 sprintf(buf+len, "SCSI target device\n");
1404 break;
1405 default:
1406 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
1407 ret = -1;
1408 break;
1409 }
1410
1411 if (p_buf)
1412 strncpy(p_buf, buf, p_buf_len);
1413 else
1414 printk("%s", buf);
1415
1416 return ret;
1417}
1418
1419int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
1420{
1421 /*
1422 * The VPD identification association..
1423 *
1424 * from spc3r23.pdf Section 7.6.3.1 Table 297
1425 */
1426 vpd->association = (page_83[1] & 0x30);
1427 return transport_dump_vpd_assoc(vpd, NULL, 0);
1428}
1429EXPORT_SYMBOL(transport_set_vpd_assoc);
1430
1431int transport_dump_vpd_ident_type(
1432 struct t10_vpd *vpd,
1433 unsigned char *p_buf,
1434 int p_buf_len)
1435{
1436 unsigned char buf[VPD_TMP_BUF_SIZE];
1437 int ret = 0, len;
1438
1439 memset(buf, 0, VPD_TMP_BUF_SIZE);
1440 len = sprintf(buf, "T10 VPD Identifier Type: ");
1441
1442 switch (vpd->device_identifier_type) {
1443 case 0x00:
1444 sprintf(buf+len, "Vendor specific\n");
1445 break;
1446 case 0x01:
1447 sprintf(buf+len, "T10 Vendor ID based\n");
1448 break;
1449 case 0x02:
1450 sprintf(buf+len, "EUI-64 based\n");
1451 break;
1452 case 0x03:
1453 sprintf(buf+len, "NAA\n");
1454 break;
1455 case 0x04:
1456 sprintf(buf+len, "Relative target port identifier\n");
1457 break;
1458 case 0x08:
1459 sprintf(buf+len, "SCSI name string\n");
1460 break;
1461 default:
1462 sprintf(buf+len, "Unsupported: 0x%02x\n",
1463 vpd->device_identifier_type);
1464 ret = -1;
1465 break;
1466 }
1467
1468 if (p_buf)
1469 strncpy(p_buf, buf, p_buf_len);
1470 else
1471 printk("%s", buf);
1472
1473 return ret;
1474}
1475
1476int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
1477{
1478 /*
1479 * The VPD identifier type..
1480 *
1481 * from spc3r23.pdf Section 7.6.3.1 Table 298
1482 */
1483 vpd->device_identifier_type = (page_83[1] & 0x0f);
1484 return transport_dump_vpd_ident_type(vpd, NULL, 0);
1485}
1486EXPORT_SYMBOL(transport_set_vpd_ident_type);
1487
1488int transport_dump_vpd_ident(
1489 struct t10_vpd *vpd,
1490 unsigned char *p_buf,
1491 int p_buf_len)
1492{
1493 unsigned char buf[VPD_TMP_BUF_SIZE];
1494 int ret = 0;
1495
1496 memset(buf, 0, VPD_TMP_BUF_SIZE);
1497
1498 switch (vpd->device_identifier_code_set) {
1499 case 0x01: /* Binary */
1500 sprintf(buf, "T10 VPD Binary Device Identifier: %s\n",
1501 &vpd->device_identifier[0]);
1502 break;
1503 case 0x02: /* ASCII */
1504 sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n",
1505 &vpd->device_identifier[0]);
1506 break;
1507 case 0x03: /* UTF-8 */
1508 sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n",
1509 &vpd->device_identifier[0]);
1510 break;
1511 default:
1512 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
1513 " 0x%02x", vpd->device_identifier_code_set);
1514 ret = -1;
1515 break;
1516 }
1517
1518 if (p_buf)
1519 strncpy(p_buf, buf, p_buf_len);
1520 else
1521 printk("%s", buf);
1522
1523 return ret;
1524}
1525
1526int
1527transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
1528{
1529 static const char hex_str[] = "0123456789abcdef";
1530 int j = 0, i = 4; /* offset to start of the identifer */
1531
1532 /*
1533 * The VPD Code Set (encoding)
1534 *
1535 * from spc3r23.pdf Section 7.6.3.1 Table 296
1536 */
1537 vpd->device_identifier_code_set = (page_83[0] & 0x0f);
1538 switch (vpd->device_identifier_code_set) {
1539 case 0x01: /* Binary */
1540 vpd->device_identifier[j++] =
1541 hex_str[vpd->device_identifier_type];
1542 while (i < (4 + page_83[3])) {
1543 vpd->device_identifier[j++] =
1544 hex_str[(page_83[i] & 0xf0) >> 4];
1545 vpd->device_identifier[j++] =
1546 hex_str[page_83[i] & 0x0f];
1547 i++;
1548 }
1549 break;
1550 case 0x02: /* ASCII */
1551 case 0x03: /* UTF-8 */
1552 while (i < (4 + page_83[3]))
1553 vpd->device_identifier[j++] = page_83[i++];
1554 break;
1555 default:
1556 break;
1557 }
1558
1559 return transport_dump_vpd_ident(vpd, NULL, 0);
1560}
1561EXPORT_SYMBOL(transport_set_vpd_ident);
1562
1563static void core_setup_task_attr_emulation(struct se_device *dev)
1564{
1565 /*
1566 * If this device is from Target_Core_Mod/pSCSI, disable the
1567 * SAM Task Attribute emulation.
1568 *
1569 * This is currently not available in upsream Linux/SCSI Target
1570 * mode code, and is assumed to be disabled while using TCM/pSCSI.
1571 */
1572 if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1573 dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
1574 return;
1575 }
1576
1577 dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
1578 DEBUG_STA("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
1579 " device\n", TRANSPORT(dev)->name,
1580 TRANSPORT(dev)->get_device_rev(dev));
1581}
1582
1583static void scsi_dump_inquiry(struct se_device *dev)
1584{
1585 struct t10_wwn *wwn = DEV_T10_WWN(dev);
1586 int i, device_type;
1587 /*
1588 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
1589 */
1590 printk(" Vendor: ");
1591 for (i = 0; i < 8; i++)
1592 if (wwn->vendor[i] >= 0x20)
1593 printk("%c", wwn->vendor[i]);
1594 else
1595 printk(" ");
1596
1597 printk(" Model: ");
1598 for (i = 0; i < 16; i++)
1599 if (wwn->model[i] >= 0x20)
1600 printk("%c", wwn->model[i]);
1601 else
1602 printk(" ");
1603
1604 printk(" Revision: ");
1605 for (i = 0; i < 4; i++)
1606 if (wwn->revision[i] >= 0x20)
1607 printk("%c", wwn->revision[i]);
1608 else
1609 printk(" ");
1610
1611 printk("\n");
1612
1613 device_type = TRANSPORT(dev)->get_device_type(dev);
1614 printk(" Type: %s ", scsi_device_type(device_type));
1615 printk(" ANSI SCSI revision: %02x\n",
1616 TRANSPORT(dev)->get_device_rev(dev));
1617}
1618
1619struct se_device *transport_add_device_to_core_hba(
1620 struct se_hba *hba,
1621 struct se_subsystem_api *transport,
1622 struct se_subsystem_dev *se_dev,
1623 u32 device_flags,
1624 void *transport_dev,
1625 struct se_dev_limits *dev_limits,
1626 const char *inquiry_prod,
1627 const char *inquiry_rev)
1628{
1629 int force_pt;
1630 struct se_device *dev;
1631
1632 dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
1633 if (!(dev)) {
1634 printk(KERN_ERR "Unable to allocate memory for se_dev_t\n");
1635 return NULL;
1636 }
1637 dev->dev_queue_obj = kzalloc(sizeof(struct se_queue_obj), GFP_KERNEL);
1638 if (!(dev->dev_queue_obj)) {
1639 printk(KERN_ERR "Unable to allocate memory for"
1640 " dev->dev_queue_obj\n");
1641 kfree(dev);
1642 return NULL;
1643 }
1644 transport_init_queue_obj(dev->dev_queue_obj);
1645
1646 dev->dev_status_queue_obj = kzalloc(sizeof(struct se_queue_obj),
1647 GFP_KERNEL);
1648 if (!(dev->dev_status_queue_obj)) {
1649 printk(KERN_ERR "Unable to allocate memory for"
1650 " dev->dev_status_queue_obj\n");
1651 kfree(dev->dev_queue_obj);
1652 kfree(dev);
1653 return NULL;
1654 }
1655 transport_init_queue_obj(dev->dev_status_queue_obj);
1656
1657 dev->dev_flags = device_flags;
1658 dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
1659 dev->dev_ptr = (void *) transport_dev;
1660 dev->se_hba = hba;
1661 dev->se_sub_dev = se_dev;
1662 dev->transport = transport;
1663 atomic_set(&dev->active_cmds, 0);
1664 INIT_LIST_HEAD(&dev->dev_list);
1665 INIT_LIST_HEAD(&dev->dev_sep_list);
1666 INIT_LIST_HEAD(&dev->dev_tmr_list);
1667 INIT_LIST_HEAD(&dev->execute_task_list);
1668 INIT_LIST_HEAD(&dev->delayed_cmd_list);
1669 INIT_LIST_HEAD(&dev->ordered_cmd_list);
1670 INIT_LIST_HEAD(&dev->state_task_list);
1671 spin_lock_init(&dev->execute_task_lock);
1672 spin_lock_init(&dev->delayed_cmd_lock);
1673 spin_lock_init(&dev->ordered_cmd_lock);
1674 spin_lock_init(&dev->state_task_lock);
1675 spin_lock_init(&dev->dev_alua_lock);
1676 spin_lock_init(&dev->dev_reservation_lock);
1677 spin_lock_init(&dev->dev_status_lock);
1678 spin_lock_init(&dev->dev_status_thr_lock);
1679 spin_lock_init(&dev->se_port_lock);
1680 spin_lock_init(&dev->se_tmr_lock);
1681
1682 dev->queue_depth = dev_limits->queue_depth;
1683 atomic_set(&dev->depth_left, dev->queue_depth);
1684 atomic_set(&dev->dev_ordered_id, 0);
1685
1686 se_dev_set_default_attribs(dev, dev_limits);
1687
1688 dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
1689 dev->creation_time = get_jiffies_64();
1690 spin_lock_init(&dev->stats_lock);
1691
1692 spin_lock(&hba->device_lock);
1693 list_add_tail(&dev->dev_list, &hba->hba_dev_list);
1694 hba->dev_count++;
1695 spin_unlock(&hba->device_lock);
1696 /*
1697 * Setup the SAM Task Attribute emulation for struct se_device
1698 */
1699 core_setup_task_attr_emulation(dev);
1700 /*
1701 * Force PR and ALUA passthrough emulation with internal object use.
1702 */
1703 force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE);
1704 /*
1705 * Setup the Reservations infrastructure for struct se_device
1706 */
1707 core_setup_reservations(dev, force_pt);
1708 /*
1709 * Setup the Asymmetric Logical Unit Assignment for struct se_device
1710 */
1711 if (core_setup_alua(dev, force_pt) < 0)
1712 goto out;
1713
1714 /*
1715 * Startup the struct se_device processing thread
1716 */
1717 dev->process_thread = kthread_run(transport_processing_thread, dev,
1718 "LIO_%s", TRANSPORT(dev)->name);
1719 if (IS_ERR(dev->process_thread)) {
1720 printk(KERN_ERR "Unable to create kthread: LIO_%s\n",
1721 TRANSPORT(dev)->name);
1722 goto out;
1723 }
1724
1725 /*
1726 * Preload the initial INQUIRY const values if we are doing
1727 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
1728 * passthrough because this is being provided by the backend LLD.
1729 * This is required so that transport_get_inquiry() copies these
1730 * originals once back into DEV_T10_WWN(dev) for the virtual device
1731 * setup.
1732 */
1733 if (TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
1734 if (!(inquiry_prod) || !(inquiry_prod)) {
1735 printk(KERN_ERR "All non TCM/pSCSI plugins require"
1736 " INQUIRY consts\n");
1737 goto out;
1738 }
1739
1740 strncpy(&DEV_T10_WWN(dev)->vendor[0], "LIO-ORG", 8);
1741 strncpy(&DEV_T10_WWN(dev)->model[0], inquiry_prod, 16);
1742 strncpy(&DEV_T10_WWN(dev)->revision[0], inquiry_rev, 4);
1743 }
1744 scsi_dump_inquiry(dev);
1745
1746 return dev;
1747out:
1748 kthread_stop(dev->process_thread);
1749
1750 spin_lock(&hba->device_lock);
1751 list_del(&dev->dev_list);
1752 hba->dev_count--;
1753 spin_unlock(&hba->device_lock);
1754
1755 se_release_vpd_for_dev(dev);
1756
1757 kfree(dev->dev_status_queue_obj);
1758 kfree(dev->dev_queue_obj);
1759 kfree(dev);
1760
1761 return NULL;
1762}
1763EXPORT_SYMBOL(transport_add_device_to_core_hba);
1764
1765/* transport_generic_prepare_cdb():
1766 *
1767 * Since the Initiator sees iSCSI devices as LUNs, the SCSI CDB will
1768 * contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2.
1769 * The point of this is since we are mapping iSCSI LUNs to
1770 * SCSI Target IDs having a non-zero LUN in the CDB will throw the
1771 * devices and HBAs for a loop.
1772 */
1773static inline void transport_generic_prepare_cdb(
1774 unsigned char *cdb)
1775{
1776 switch (cdb[0]) {
1777 case READ_10: /* SBC - RDProtect */
1778 case READ_12: /* SBC - RDProtect */
1779 case READ_16: /* SBC - RDProtect */
1780 case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
1781 case VERIFY: /* SBC - VRProtect */
1782 case VERIFY_16: /* SBC - VRProtect */
1783 case WRITE_VERIFY: /* SBC - VRProtect */
1784 case WRITE_VERIFY_12: /* SBC - VRProtect */
1785 break;
1786 default:
1787 cdb[1] &= 0x1f; /* clear logical unit number */
1788 break;
1789 }
1790}
1791
1792static struct se_task *
1793transport_generic_get_task(struct se_cmd *cmd,
1794 enum dma_data_direction data_direction)
1795{
1796 struct se_task *task;
1797 struct se_device *dev = SE_DEV(cmd);
1798 unsigned long flags;
1799
1800 task = dev->transport->alloc_task(cmd);
1801 if (!task) {
1802 printk(KERN_ERR "Unable to allocate struct se_task\n");
1803 return NULL;
1804 }
1805
1806 INIT_LIST_HEAD(&task->t_list);
1807 INIT_LIST_HEAD(&task->t_execute_list);
1808 INIT_LIST_HEAD(&task->t_state_list);
1809 init_completion(&task->task_stop_comp);
1810 task->task_no = T_TASK(cmd)->t_tasks_no++;
1811 task->task_se_cmd = cmd;
1812 task->se_dev = dev;
1813 task->task_data_direction = data_direction;
1814
1815 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
1816 list_add_tail(&task->t_list, &T_TASK(cmd)->t_task_list);
1817 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
1818
1819 return task;
1820}
1821
1822static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *);
1823
1824void transport_device_setup_cmd(struct se_cmd *cmd)
1825{
1826 cmd->se_dev = SE_LUN(cmd)->lun_se_dev;
1827}
1828EXPORT_SYMBOL(transport_device_setup_cmd);
1829
1830/*
1831 * Used by fabric modules containing a local struct se_cmd within their
1832 * fabric dependent per I/O descriptor.
1833 */
1834void transport_init_se_cmd(
1835 struct se_cmd *cmd,
1836 struct target_core_fabric_ops *tfo,
1837 struct se_session *se_sess,
1838 u32 data_length,
1839 int data_direction,
1840 int task_attr,
1841 unsigned char *sense_buffer)
1842{
1843 INIT_LIST_HEAD(&cmd->se_lun_list);
1844 INIT_LIST_HEAD(&cmd->se_delayed_list);
1845 INIT_LIST_HEAD(&cmd->se_ordered_list);
1846 /*
1847 * Setup t_task pointer to t_task_backstore
1848 */
1849 cmd->t_task = &cmd->t_task_backstore;
1850
1851 INIT_LIST_HEAD(&T_TASK(cmd)->t_task_list);
1852 init_completion(&T_TASK(cmd)->transport_lun_fe_stop_comp);
1853 init_completion(&T_TASK(cmd)->transport_lun_stop_comp);
1854 init_completion(&T_TASK(cmd)->t_transport_stop_comp);
1855 spin_lock_init(&T_TASK(cmd)->t_state_lock);
1856 atomic_set(&T_TASK(cmd)->transport_dev_active, 1);
1857
1858 cmd->se_tfo = tfo;
1859 cmd->se_sess = se_sess;
1860 cmd->data_length = data_length;
1861 cmd->data_direction = data_direction;
1862 cmd->sam_task_attr = task_attr;
1863 cmd->sense_buffer = sense_buffer;
1864}
1865EXPORT_SYMBOL(transport_init_se_cmd);
1866
1867static int transport_check_alloc_task_attr(struct se_cmd *cmd)
1868{
1869 /*
1870 * Check if SAM Task Attribute emulation is enabled for this
1871 * struct se_device storage object
1872 */
1873 if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
1874 return 0;
1875
1876 if (cmd->sam_task_attr == MSG_ACA_TAG) {
1877 DEBUG_STA("SAM Task Attribute ACA"
1878 " emulation is not supported\n");
1879 return -1;
1880 }
1881 /*
1882 * Used to determine when ORDERED commands should go from
1883 * Dormant to Active status.
1884 */
1885 cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id);
1886 smp_mb__after_atomic_inc();
1887 DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
1888 cmd->se_ordered_id, cmd->sam_task_attr,
1889 TRANSPORT(cmd->se_dev)->name);
1890 return 0;
1891}
1892
1893void transport_free_se_cmd(
1894 struct se_cmd *se_cmd)
1895{
1896 if (se_cmd->se_tmr_req)
1897 core_tmr_release_req(se_cmd->se_tmr_req);
1898 /*
1899 * Check and free any extended CDB buffer that was allocated
1900 */
1901 if (T_TASK(se_cmd)->t_task_cdb != T_TASK(se_cmd)->__t_task_cdb)
1902 kfree(T_TASK(se_cmd)->t_task_cdb);
1903}
1904EXPORT_SYMBOL(transport_free_se_cmd);
1905
1906static void transport_generic_wait_for_tasks(struct se_cmd *, int, int);
1907
1908/* transport_generic_allocate_tasks():
1909 *
1910 * Called from fabric RX Thread.
1911 */
1912int transport_generic_allocate_tasks(
1913 struct se_cmd *cmd,
1914 unsigned char *cdb)
1915{
1916 int ret;
1917
1918 transport_generic_prepare_cdb(cdb);
1919
1920 /*
1921 * This is needed for early exceptions.
1922 */
1923 cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks;
1924
1925 transport_device_setup_cmd(cmd);
1926 /*
1927 * Ensure that the received CDB is less than the max (252 + 8) bytes
1928 * for VARIABLE_LENGTH_CMD
1929 */
1930 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
1931 printk(KERN_ERR "Received SCSI CDB with command_size: %d that"
1932 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1933 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
1934 return -1;
1935 }
1936 /*
1937 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
1938 * allocate the additional extended CDB buffer now.. Otherwise
1939 * setup the pointer from __t_task_cdb to t_task_cdb.
1940 */
1941 if (scsi_command_size(cdb) > sizeof(T_TASK(cmd)->__t_task_cdb)) {
1942 T_TASK(cmd)->t_task_cdb = kzalloc(scsi_command_size(cdb),
1943 GFP_KERNEL);
1944 if (!(T_TASK(cmd)->t_task_cdb)) {
1945 printk(KERN_ERR "Unable to allocate T_TASK(cmd)->t_task_cdb"
1946 " %u > sizeof(T_TASK(cmd)->__t_task_cdb): %lu ops\n",
1947 scsi_command_size(cdb),
1948 (unsigned long)sizeof(T_TASK(cmd)->__t_task_cdb));
1949 return -1;
1950 }
1951 } else
1952 T_TASK(cmd)->t_task_cdb = &T_TASK(cmd)->__t_task_cdb[0];
1953 /*
1954 * Copy the original CDB into T_TASK(cmd).
1955 */
1956 memcpy(T_TASK(cmd)->t_task_cdb, cdb, scsi_command_size(cdb));
1957 /*
1958 * Setup the received CDB based on SCSI defined opcodes and
1959 * perform unit attention, persistent reservations and ALUA
1960 * checks for virtual device backends. The T_TASK(cmd)->t_task_cdb
1961 * pointer is expected to be setup before we reach this point.
1962 */
1963 ret = transport_generic_cmd_sequencer(cmd, cdb);
1964 if (ret < 0)
1965 return ret;
1966 /*
1967 * Check for SAM Task Attribute Emulation
1968 */
1969 if (transport_check_alloc_task_attr(cmd) < 0) {
1970 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1971 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1972 return -2;
1973 }
1974 spin_lock(&cmd->se_lun->lun_sep_lock);
1975 if (cmd->se_lun->lun_sep)
1976 cmd->se_lun->lun_sep->sep_stats.cmd_pdus++;
1977 spin_unlock(&cmd->se_lun->lun_sep_lock);
1978 return 0;
1979}
1980EXPORT_SYMBOL(transport_generic_allocate_tasks);
1981
1982/*
1983 * Used by fabric module frontends not defining a TFO->new_cmd_map()
1984 * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD statis
1985 */
1986int transport_generic_handle_cdb(
1987 struct se_cmd *cmd)
1988{
1989 if (!SE_LUN(cmd)) {
1990 dump_stack();
1991 printk(KERN_ERR "SE_LUN(cmd) is NULL\n");
1992 return -1;
1993 }
1994
1995 transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD);
1996 return 0;
1997}
1998EXPORT_SYMBOL(transport_generic_handle_cdb);
1999
2000/*
2001 * Used by fabric module frontends defining a TFO->new_cmd_map() caller
2002 * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
2003 * complete setup in TCM process context w/ TFO->new_cmd_map().
2004 */
2005int transport_generic_handle_cdb_map(
2006 struct se_cmd *cmd)
2007{
2008 if (!SE_LUN(cmd)) {
2009 dump_stack();
2010 printk(KERN_ERR "SE_LUN(cmd) is NULL\n");
2011 return -1;
2012 }
2013
2014 transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP);
2015 return 0;
2016}
2017EXPORT_SYMBOL(transport_generic_handle_cdb_map);
2018
2019/* transport_generic_handle_data():
2020 *
2021 *
2022 */
2023int transport_generic_handle_data(
2024 struct se_cmd *cmd)
2025{
2026 /*
2027 * For the software fabric case, then we assume the nexus is being
2028 * failed/shutdown when signals are pending from the kthread context
2029 * caller, so we return a failure. For the HW target mode case running
2030 * in interrupt code, the signal_pending() check is skipped.
2031 */
2032 if (!in_interrupt() && signal_pending(current))
2033 return -1;
2034 /*
2035 * If the received CDB has aleady been ABORTED by the generic
2036 * target engine, we now call transport_check_aborted_status()
2037 * to queue any delated TASK_ABORTED status for the received CDB to the
2038 * fabric module as we are expecting no further incoming DATA OUT
2039 * sequences at this point.
2040 */
2041 if (transport_check_aborted_status(cmd, 1) != 0)
2042 return 0;
2043
2044 transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE);
2045 return 0;
2046}
2047EXPORT_SYMBOL(transport_generic_handle_data);
2048
2049/* transport_generic_handle_tmr():
2050 *
2051 *
2052 */
2053int transport_generic_handle_tmr(
2054 struct se_cmd *cmd)
2055{
2056 /*
2057 * This is needed for early exceptions.
2058 */
2059 cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks;
2060 transport_device_setup_cmd(cmd);
2061
2062 transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR);
2063 return 0;
2064}
2065EXPORT_SYMBOL(transport_generic_handle_tmr);
2066
2067void transport_generic_free_cmd_intr(
2068 struct se_cmd *cmd)
2069{
2070 transport_add_cmd_to_queue(cmd, TRANSPORT_FREE_CMD_INTR);
2071}
2072EXPORT_SYMBOL(transport_generic_free_cmd_intr);
2073
2074static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
2075{
2076 struct se_task *task, *task_tmp;
2077 unsigned long flags;
2078 int ret = 0;
2079
2080 DEBUG_TS("ITT[0x%08x] - Stopping tasks\n",
2081 CMD_TFO(cmd)->get_task_tag(cmd));
2082
2083 /*
2084 * No tasks remain in the execution queue
2085 */
2086 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
2087 list_for_each_entry_safe(task, task_tmp,
2088 &T_TASK(cmd)->t_task_list, t_list) {
2089 DEBUG_TS("task_no[%d] - Processing task %p\n",
2090 task->task_no, task);
2091 /*
2092 * If the struct se_task has not been sent and is not active,
2093 * remove the struct se_task from the execution queue.
2094 */
2095 if (!atomic_read(&task->task_sent) &&
2096 !atomic_read(&task->task_active)) {
2097 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
2098 flags);
2099 transport_remove_task_from_execute_queue(task,
2100 task->se_dev);
2101
2102 DEBUG_TS("task_no[%d] - Removed from execute queue\n",
2103 task->task_no);
2104 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
2105 continue;
2106 }
2107
2108 /*
2109 * If the struct se_task is active, sleep until it is returned
2110 * from the plugin.
2111 */
2112 if (atomic_read(&task->task_active)) {
2113 atomic_set(&task->task_stop, 1);
2114 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
2115 flags);
2116
2117 DEBUG_TS("task_no[%d] - Waiting to complete\n",
2118 task->task_no);
2119 wait_for_completion(&task->task_stop_comp);
2120 DEBUG_TS("task_no[%d] - Stopped successfully\n",
2121 task->task_no);
2122
2123 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
2124 atomic_dec(&T_TASK(cmd)->t_task_cdbs_left);
2125
2126 atomic_set(&task->task_active, 0);
2127 atomic_set(&task->task_stop, 0);
2128 } else {
2129 DEBUG_TS("task_no[%d] - Did nothing\n", task->task_no);
2130 ret++;
2131 }
2132
2133 __transport_stop_task_timer(task, &flags);
2134 }
2135 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2136
2137 return ret;
2138}
2139
2140static void transport_failure_reset_queue_depth(struct se_device *dev)
2141{
2142 unsigned long flags;
2143
2144 spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags);
2145 atomic_inc(&dev->depth_left);
2146 atomic_inc(&SE_HBA(dev)->left_queue_depth);
2147 spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
2148}
2149
2150/*
2151 * Handle SAM-esque emulation for generic transport request failures.
2152 */
2153static void transport_generic_request_failure(
2154 struct se_cmd *cmd,
2155 struct se_device *dev,
2156 int complete,
2157 int sc)
2158{
2159 DEBUG_GRF("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
2160 " CDB: 0x%02x\n", cmd, CMD_TFO(cmd)->get_task_tag(cmd),
2161 T_TASK(cmd)->t_task_cdb[0]);
2162 DEBUG_GRF("-----[ i_state: %d t_state/def_t_state:"
2163 " %d/%d transport_error_status: %d\n",
2164 CMD_TFO(cmd)->get_cmd_state(cmd),
2165 cmd->t_state, cmd->deferred_t_state,
2166 cmd->transport_error_status);
2167 DEBUG_GRF("-----[ t_task_cdbs: %d t_task_cdbs_left: %d"
2168 " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
2169 " t_transport_active: %d t_transport_stop: %d"
2170 " t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs,
2171 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
2172 atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
2173 atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left),
2174 atomic_read(&T_TASK(cmd)->t_transport_active),
2175 atomic_read(&T_TASK(cmd)->t_transport_stop),
2176 atomic_read(&T_TASK(cmd)->t_transport_sent));
2177
2178 transport_stop_all_task_timers(cmd);
2179
2180 if (dev)
2181 transport_failure_reset_queue_depth(dev);
2182 /*
2183 * For SAM Task Attribute emulation for failed struct se_cmd
2184 */
2185 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
2186 transport_complete_task_attr(cmd);
2187
2188 if (complete) {
2189 transport_direct_request_timeout(cmd);
2190 cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
2191 }
2192
2193 switch (cmd->transport_error_status) {
2194 case PYX_TRANSPORT_UNKNOWN_SAM_OPCODE:
2195 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
2196 break;
2197 case PYX_TRANSPORT_REQ_TOO_MANY_SECTORS:
2198 cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY;
2199 break;
2200 case PYX_TRANSPORT_INVALID_CDB_FIELD:
2201 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
2202 break;
2203 case PYX_TRANSPORT_INVALID_PARAMETER_LIST:
2204 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
2205 break;
2206 case PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES:
2207 if (!sc)
2208 transport_new_cmd_failure(cmd);
2209 /*
2210 * Currently for PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES,
2211 * we force this session to fall back to session
2212 * recovery.
2213 */
2214 CMD_TFO(cmd)->fall_back_to_erl0(cmd->se_sess);
2215 CMD_TFO(cmd)->stop_session(cmd->se_sess, 0, 0);
2216
2217 goto check_stop;
2218 case PYX_TRANSPORT_LU_COMM_FAILURE:
2219 case PYX_TRANSPORT_ILLEGAL_REQUEST:
2220 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2221 break;
2222 case PYX_TRANSPORT_UNKNOWN_MODE_PAGE:
2223 cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
2224 break;
2225 case PYX_TRANSPORT_WRITE_PROTECTED:
2226 cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
2227 break;
2228 case PYX_TRANSPORT_RESERVATION_CONFLICT:
2229 /*
2230 * No SENSE Data payload for this case, set SCSI Status
2231 * and queue the response to $FABRIC_MOD.
2232 *
2233 * Uses linux/include/scsi/scsi.h SAM status codes defs
2234 */
2235 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
2236 /*
2237 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
2238 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
2239 * CONFLICT STATUS.
2240 *
2241 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
2242 */
2243 if (SE_SESS(cmd) &&
2244 DEV_ATTRIB(cmd->se_dev)->emulate_ua_intlck_ctrl == 2)
2245 core_scsi3_ua_allocate(SE_SESS(cmd)->se_node_acl,
2246 cmd->orig_fe_lun, 0x2C,
2247 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
2248
2249 CMD_TFO(cmd)->queue_status(cmd);
2250 goto check_stop;
2251 case PYX_TRANSPORT_USE_SENSE_REASON:
2252 /*
2253 * struct se_cmd->scsi_sense_reason already set
2254 */
2255 break;
2256 default:
2257 printk(KERN_ERR "Unknown transport error for CDB 0x%02x: %d\n",
2258 T_TASK(cmd)->t_task_cdb[0],
2259 cmd->transport_error_status);
2260 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
2261 break;
2262 }
2263
2264 if (!sc)
2265 transport_new_cmd_failure(cmd);
2266 else
2267 transport_send_check_condition_and_sense(cmd,
2268 cmd->scsi_sense_reason, 0);
2269check_stop:
2270 transport_lun_remove_cmd(cmd);
2271 if (!(transport_cmd_check_stop_to_fabric(cmd)))
2272 ;
2273}
2274
2275static void transport_direct_request_timeout(struct se_cmd *cmd)
2276{
2277 unsigned long flags;
2278
2279 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
2280 if (!(atomic_read(&T_TASK(cmd)->t_transport_timeout))) {
2281 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2282 return;
2283 }
2284 if (atomic_read(&T_TASK(cmd)->t_task_cdbs_timeout_left)) {
2285 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2286 return;
2287 }
2288
2289 atomic_sub(atomic_read(&T_TASK(cmd)->t_transport_timeout),
2290 &T_TASK(cmd)->t_se_count);
2291 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2292}
2293
2294static void transport_generic_request_timeout(struct se_cmd *cmd)
2295{
2296 unsigned long flags;
2297
2298 /*
2299 * Reset T_TASK(cmd)->t_se_count to allow transport_generic_remove()
2300 * to allow last call to free memory resources.
2301 */
2302 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
2303 if (atomic_read(&T_TASK(cmd)->t_transport_timeout) > 1) {
2304 int tmp = (atomic_read(&T_TASK(cmd)->t_transport_timeout) - 1);
2305
2306 atomic_sub(tmp, &T_TASK(cmd)->t_se_count);
2307 }
2308 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2309
2310 transport_generic_remove(cmd, 0, 0);
2311}
2312
2313static int
2314transport_generic_allocate_buf(struct se_cmd *cmd, u32 data_length)
2315{
2316 unsigned char *buf;
2317
2318 buf = kzalloc(data_length, GFP_KERNEL);
2319 if (!(buf)) {
2320 printk(KERN_ERR "Unable to allocate memory for buffer\n");
2321 return -1;
2322 }
2323
2324 T_TASK(cmd)->t_tasks_se_num = 0;
2325 T_TASK(cmd)->t_task_buf = buf;
2326
2327 return 0;
2328}
2329
2330static inline u32 transport_lba_21(unsigned char *cdb)
2331{
2332 return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
2333}
2334
2335static inline u32 transport_lba_32(unsigned char *cdb)
2336{
2337 return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
2338}
2339
2340static inline unsigned long long transport_lba_64(unsigned char *cdb)
2341{
2342 unsigned int __v1, __v2;
2343
2344 __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
2345 __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
2346
2347 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
2348}
2349
2350/*
2351 * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
2352 */
2353static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
2354{
2355 unsigned int __v1, __v2;
2356
2357 __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
2358 __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
2359
2360 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
2361}
2362
2363static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
2364{
2365 unsigned long flags;
2366
2367 spin_lock_irqsave(&T_TASK(se_cmd)->t_state_lock, flags);
2368 se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
2369 spin_unlock_irqrestore(&T_TASK(se_cmd)->t_state_lock, flags);
2370}
2371
2372/*
2373 * Called from interrupt context.
2374 */
2375static void transport_task_timeout_handler(unsigned long data)
2376{
2377 struct se_task *task = (struct se_task *)data;
2378 struct se_cmd *cmd = TASK_CMD(task);
2379 unsigned long flags;
2380
2381 DEBUG_TT("transport task timeout fired! task: %p cmd: %p\n", task, cmd);
2382
2383 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
2384 if (task->task_flags & TF_STOP) {
2385 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2386 return;
2387 }
2388 task->task_flags &= ~TF_RUNNING;
2389
2390 /*
2391 * Determine if transport_complete_task() has already been called.
2392 */
2393 if (!(atomic_read(&task->task_active))) {
2394 DEBUG_TT("transport task: %p cmd: %p timeout task_active"
2395 " == 0\n", task, cmd);
2396 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2397 return;
2398 }
2399
2400 atomic_inc(&T_TASK(cmd)->t_se_count);
2401 atomic_inc(&T_TASK(cmd)->t_transport_timeout);
2402 T_TASK(cmd)->t_tasks_failed = 1;
2403
2404 atomic_set(&task->task_timeout, 1);
2405 task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT;
2406 task->task_scsi_status = 1;
2407
2408 if (atomic_read(&task->task_stop)) {
2409 DEBUG_TT("transport task: %p cmd: %p timeout task_stop"
2410 " == 1\n", task, cmd);
2411 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2412 complete(&task->task_stop_comp);
2413 return;
2414 }
2415
2416 if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_left))) {
2417 DEBUG_TT("transport task: %p cmd: %p timeout non zero"
2418 " t_task_cdbs_left\n", task, cmd);
2419 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2420 return;
2421 }
2422 DEBUG_TT("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n",
2423 task, cmd);
2424
2425 cmd->t_state = TRANSPORT_COMPLETE_FAILURE;
2426 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2427
2428 transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE);
2429}
2430
2431/*
2432 * Called with T_TASK(cmd)->t_state_lock held.
2433 */
2434static void transport_start_task_timer(struct se_task *task)
2435{
2436 struct se_device *dev = task->se_dev;
2437 int timeout;
2438
2439 if (task->task_flags & TF_RUNNING)
2440 return;
2441 /*
2442 * If the task_timeout is disabled, exit now.
2443 */
2444 timeout = DEV_ATTRIB(dev)->task_timeout;
2445 if (!(timeout))
2446 return;
2447
2448 init_timer(&task->task_timer);
2449 task->task_timer.expires = (get_jiffies_64() + timeout * HZ);
2450 task->task_timer.data = (unsigned long) task;
2451 task->task_timer.function = transport_task_timeout_handler;
2452
2453 task->task_flags |= TF_RUNNING;
2454 add_timer(&task->task_timer);
2455#if 0
2456 printk(KERN_INFO "Starting task timer for cmd: %p task: %p seconds:"
2457 " %d\n", task->task_se_cmd, task, timeout);
2458#endif
2459}
2460
2461/*
2462 * Called with spin_lock_irq(&T_TASK(cmd)->t_state_lock) held.
2463 */
2464void __transport_stop_task_timer(struct se_task *task, unsigned long *flags)
2465{
2466 struct se_cmd *cmd = TASK_CMD(task);
2467
2468 if (!(task->task_flags & TF_RUNNING))
2469 return;
2470
2471 task->task_flags |= TF_STOP;
2472 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, *flags);
2473
2474 del_timer_sync(&task->task_timer);
2475
2476 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, *flags);
2477 task->task_flags &= ~TF_RUNNING;
2478 task->task_flags &= ~TF_STOP;
2479}
2480
2481static void transport_stop_all_task_timers(struct se_cmd *cmd)
2482{
2483 struct se_task *task = NULL, *task_tmp;
2484 unsigned long flags;
2485
2486 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
2487 list_for_each_entry_safe(task, task_tmp,
2488 &T_TASK(cmd)->t_task_list, t_list)
2489 __transport_stop_task_timer(task, &flags);
2490 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2491}
2492
2493static inline int transport_tcq_window_closed(struct se_device *dev)
2494{
2495 if (dev->dev_tcq_window_closed++ <
2496 PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD) {
2497 msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT);
2498 } else
2499 msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG);
2500
2501 wake_up_interruptible(&dev->dev_queue_obj->thread_wq);
2502 return 0;
2503}
2504
2505/*
2506 * Called from Fabric Module context from transport_execute_tasks()
2507 *
2508 * The return of this function determins if the tasks from struct se_cmd
2509 * get added to the execution queue in transport_execute_tasks(),
2510 * or are added to the delayed or ordered lists here.
2511 */
2512static inline int transport_execute_task_attr(struct se_cmd *cmd)
2513{
2514 if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
2515 return 1;
2516 /*
2517 * Check for the existence of HEAD_OF_QUEUE, and if true return 1
2518 * to allow the passed struct se_cmd list of tasks to the front of the list.
2519 */
2520 if (cmd->sam_task_attr == MSG_HEAD_TAG) {
2521 atomic_inc(&SE_DEV(cmd)->dev_hoq_count);
2522 smp_mb__after_atomic_inc();
2523 DEBUG_STA("Added HEAD_OF_QUEUE for CDB:"
2524 " 0x%02x, se_ordered_id: %u\n",
2525 T_TASK(cmd)->t_task_cdb[0],
2526 cmd->se_ordered_id);
2527 return 1;
2528 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
2529 spin_lock(&SE_DEV(cmd)->ordered_cmd_lock);
2530 list_add_tail(&cmd->se_ordered_list,
2531 &SE_DEV(cmd)->ordered_cmd_list);
2532 spin_unlock(&SE_DEV(cmd)->ordered_cmd_lock);
2533
2534 atomic_inc(&SE_DEV(cmd)->dev_ordered_sync);
2535 smp_mb__after_atomic_inc();
2536
2537 DEBUG_STA("Added ORDERED for CDB: 0x%02x to ordered"
2538 " list, se_ordered_id: %u\n",
2539 T_TASK(cmd)->t_task_cdb[0],
2540 cmd->se_ordered_id);
2541 /*
2542 * Add ORDERED command to tail of execution queue if
2543 * no other older commands exist that need to be
2544 * completed first.
2545 */
2546 if (!(atomic_read(&SE_DEV(cmd)->simple_cmds)))
2547 return 1;
2548 } else {
2549 /*
2550 * For SIMPLE and UNTAGGED Task Attribute commands
2551 */
2552 atomic_inc(&SE_DEV(cmd)->simple_cmds);
2553 smp_mb__after_atomic_inc();
2554 }
2555 /*
2556 * Otherwise if one or more outstanding ORDERED task attribute exist,
2557 * add the dormant task(s) built for the passed struct se_cmd to the
2558 * execution queue and become in Active state for this struct se_device.
2559 */
2560 if (atomic_read(&SE_DEV(cmd)->dev_ordered_sync) != 0) {
2561 /*
2562 * Otherwise, add cmd w/ tasks to delayed cmd queue that
2563 * will be drained upon completion of HEAD_OF_QUEUE task.
2564 */
2565 spin_lock(&SE_DEV(cmd)->delayed_cmd_lock);
2566 cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR;
2567 list_add_tail(&cmd->se_delayed_list,
2568 &SE_DEV(cmd)->delayed_cmd_list);
2569 spin_unlock(&SE_DEV(cmd)->delayed_cmd_lock);
2570
2571 DEBUG_STA("Added CDB: 0x%02x Task Attr: 0x%02x to"
2572 " delayed CMD list, se_ordered_id: %u\n",
2573 T_TASK(cmd)->t_task_cdb[0], cmd->sam_task_attr,
2574 cmd->se_ordered_id);
2575 /*
2576 * Return zero to let transport_execute_tasks() know
2577 * not to add the delayed tasks to the execution list.
2578 */
2579 return 0;
2580 }
2581 /*
2582 * Otherwise, no ORDERED task attributes exist..
2583 */
2584 return 1;
2585}
2586
2587/*
2588 * Called from fabric module context in transport_generic_new_cmd() and
2589 * transport_generic_process_write()
2590 */
2591static int transport_execute_tasks(struct se_cmd *cmd)
2592{
2593 int add_tasks;
2594
2595 if (!(cmd->se_cmd_flags & SCF_SE_DISABLE_ONLINE_CHECK)) {
2596 if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) {
2597 cmd->transport_error_status =
2598 PYX_TRANSPORT_LU_COMM_FAILURE;
2599 transport_generic_request_failure(cmd, NULL, 0, 1);
2600 return 0;
2601 }
2602 }
2603 /*
2604 * Call transport_cmd_check_stop() to see if a fabric exception
2605 * has occurred that prevents execution.
2606 */
2607 if (!(transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING))) {
2608 /*
2609 * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE
2610 * attribute for the tasks of the received struct se_cmd CDB
2611 */
2612 add_tasks = transport_execute_task_attr(cmd);
2613 if (add_tasks == 0)
2614 goto execute_tasks;
2615 /*
2616 * This calls transport_add_tasks_from_cmd() to handle
2617 * HEAD_OF_QUEUE ordering for SAM Task Attribute emulation
2618 * (if enabled) in __transport_add_task_to_execute_queue() and
2619 * transport_add_task_check_sam_attr().
2620 */
2621 transport_add_tasks_from_cmd(cmd);
2622 }
2623 /*
2624 * Kick the execution queue for the cmd associated struct se_device
2625 * storage object.
2626 */
2627execute_tasks:
2628 __transport_execute_tasks(SE_DEV(cmd));
2629 return 0;
2630}
2631
2632/*
2633 * Called to check struct se_device tcq depth window, and once open pull struct se_task
2634 * from struct se_device->execute_task_list and
2635 *
2636 * Called from transport_processing_thread()
2637 */
2638static int __transport_execute_tasks(struct se_device *dev)
2639{
2640 int error;
2641 struct se_cmd *cmd = NULL;
2642 struct se_task *task;
2643 unsigned long flags;
2644
2645 /*
2646 * Check if there is enough room in the device and HBA queue to send
2647 * struct se_transport_task's to the selected transport.
2648 */
2649check_depth:
2650 spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags);
2651 if (!(atomic_read(&dev->depth_left)) ||
2652 !(atomic_read(&SE_HBA(dev)->left_queue_depth))) {
2653 spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
2654 return transport_tcq_window_closed(dev);
2655 }
2656 dev->dev_tcq_window_closed = 0;
2657
2658 spin_lock(&dev->execute_task_lock);
2659 task = transport_get_task_from_execute_queue(dev);
2660 spin_unlock(&dev->execute_task_lock);
2661
2662 if (!task) {
2663 spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
2664 return 0;
2665 }
2666
2667 atomic_dec(&dev->depth_left);
2668 atomic_dec(&SE_HBA(dev)->left_queue_depth);
2669 spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
2670
2671 cmd = TASK_CMD(task);
2672
2673 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
2674 atomic_set(&task->task_active, 1);
2675 atomic_set(&task->task_sent, 1);
2676 atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent);
2677
2678 if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) ==
2679 T_TASK(cmd)->t_task_cdbs)
2680 atomic_set(&cmd->transport_sent, 1);
2681
2682 transport_start_task_timer(task);
2683 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2684 /*
2685 * The struct se_cmd->transport_emulate_cdb() function pointer is used
2686 * to grab REPORT_LUNS CDBs before they hit the
2687 * struct se_subsystem_api->do_task() caller below.
2688 */
2689 if (cmd->transport_emulate_cdb) {
2690 error = cmd->transport_emulate_cdb(cmd);
2691 if (error != 0) {
2692 cmd->transport_error_status = error;
2693 atomic_set(&task->task_active, 0);
2694 atomic_set(&cmd->transport_sent, 0);
2695 transport_stop_tasks_for_cmd(cmd);
2696 transport_generic_request_failure(cmd, dev, 0, 1);
2697 goto check_depth;
2698 }
2699 /*
2700 * Handle the successful completion for transport_emulate_cdb()
2701 * for synchronous operation, following SCF_EMULATE_CDB_ASYNC
2702 * Otherwise the caller is expected to complete the task with
2703 * proper status.
2704 */
2705 if (!(cmd->se_cmd_flags & SCF_EMULATE_CDB_ASYNC)) {
2706 cmd->scsi_status = SAM_STAT_GOOD;
2707 task->task_scsi_status = GOOD;
2708 transport_complete_task(task, 1);
2709 }
2710 } else {
2711 /*
2712 * Currently for all virtual TCM plugins including IBLOCK, FILEIO and
2713 * RAMDISK we use the internal transport_emulate_control_cdb() logic
2714 * with struct se_subsystem_api callers for the primary SPC-3 TYPE_DISK
2715 * LUN emulation code.
2716 *
2717 * For TCM/pSCSI and all other SCF_SCSI_DATA_SG_IO_CDB I/O tasks we
2718 * call ->do_task() directly and let the underlying TCM subsystem plugin
2719 * code handle the CDB emulation.
2720 */
2721 if ((TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) &&
2722 (!(TASK_CMD(task)->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
2723 error = transport_emulate_control_cdb(task);
2724 else
2725 error = TRANSPORT(dev)->do_task(task);
2726
2727 if (error != 0) {
2728 cmd->transport_error_status = error;
2729 atomic_set(&task->task_active, 0);
2730 atomic_set(&cmd->transport_sent, 0);
2731 transport_stop_tasks_for_cmd(cmd);
2732 transport_generic_request_failure(cmd, dev, 0, 1);
2733 }
2734 }
2735
2736 goto check_depth;
2737
2738 return 0;
2739}
2740
2741void transport_new_cmd_failure(struct se_cmd *se_cmd)
2742{
2743 unsigned long flags;
2744 /*
2745 * Any unsolicited data will get dumped for failed command inside of
2746 * the fabric plugin
2747 */
2748 spin_lock_irqsave(&T_TASK(se_cmd)->t_state_lock, flags);
2749 se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED;
2750 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2751 spin_unlock_irqrestore(&T_TASK(se_cmd)->t_state_lock, flags);
2752
2753 CMD_TFO(se_cmd)->new_cmd_failure(se_cmd);
2754}
2755
2756static void transport_nop_wait_for_tasks(struct se_cmd *, int, int);
2757
2758static inline u32 transport_get_sectors_6(
2759 unsigned char *cdb,
2760 struct se_cmd *cmd,
2761 int *ret)
2762{
2763 struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
2764
2765 /*
2766 * Assume TYPE_DISK for non struct se_device objects.
2767 * Use 8-bit sector value.
2768 */
2769 if (!dev)
2770 goto type_disk;
2771
2772 /*
2773 * Use 24-bit allocation length for TYPE_TAPE.
2774 */
2775 if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE)
2776 return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4];
2777
2778 /*
2779 * Everything else assume TYPE_DISK Sector CDB location.
2780 * Use 8-bit sector value.
2781 */
2782type_disk:
2783 return (u32)cdb[4];
2784}
2785
2786static inline u32 transport_get_sectors_10(
2787 unsigned char *cdb,
2788 struct se_cmd *cmd,
2789 int *ret)
2790{
2791 struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
2792
2793 /*
2794 * Assume TYPE_DISK for non struct se_device objects.
2795 * Use 16-bit sector value.
2796 */
2797 if (!dev)
2798 goto type_disk;
2799
2800 /*
2801 * XXX_10 is not defined in SSC, throw an exception
2802 */
2803 if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) {
2804 *ret = -1;
2805 return 0;
2806 }
2807
2808 /*
2809 * Everything else assume TYPE_DISK Sector CDB location.
2810 * Use 16-bit sector value.
2811 */
2812type_disk:
2813 return (u32)(cdb[7] << 8) + cdb[8];
2814}
2815
2816static inline u32 transport_get_sectors_12(
2817 unsigned char *cdb,
2818 struct se_cmd *cmd,
2819 int *ret)
2820{
2821 struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
2822
2823 /*
2824 * Assume TYPE_DISK for non struct se_device objects.
2825 * Use 32-bit sector value.
2826 */
2827 if (!dev)
2828 goto type_disk;
2829
2830 /*
2831 * XXX_12 is not defined in SSC, throw an exception
2832 */
2833 if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) {
2834 *ret = -1;
2835 return 0;
2836 }
2837
2838 /*
2839 * Everything else assume TYPE_DISK Sector CDB location.
2840 * Use 32-bit sector value.
2841 */
2842type_disk:
2843 return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
2844}
2845
2846static inline u32 transport_get_sectors_16(
2847 unsigned char *cdb,
2848 struct se_cmd *cmd,
2849 int *ret)
2850{
2851 struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
2852
2853 /*
2854 * Assume TYPE_DISK for non struct se_device objects.
2855 * Use 32-bit sector value.
2856 */
2857 if (!dev)
2858 goto type_disk;
2859
2860 /*
2861 * Use 24-bit allocation length for TYPE_TAPE.
2862 */
2863 if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE)
2864 return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14];
2865
2866type_disk:
2867 return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
2868 (cdb[12] << 8) + cdb[13];
2869}
2870
2871/*
2872 * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
2873 */
2874static inline u32 transport_get_sectors_32(
2875 unsigned char *cdb,
2876 struct se_cmd *cmd,
2877 int *ret)
2878{
2879 /*
2880 * Assume TYPE_DISK for non struct se_device objects.
2881 * Use 32-bit sector value.
2882 */
2883 return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
2884 (cdb[30] << 8) + cdb[31];
2885
2886}
2887
2888static inline u32 transport_get_size(
2889 u32 sectors,
2890 unsigned char *cdb,
2891 struct se_cmd *cmd)
2892{
2893 struct se_device *dev = SE_DEV(cmd);
2894
2895 if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) {
2896 if (cdb[1] & 1) { /* sectors */
2897 return DEV_ATTRIB(dev)->block_size * sectors;
2898 } else /* bytes */
2899 return sectors;
2900 }
2901#if 0
2902 printk(KERN_INFO "Returning block_size: %u, sectors: %u == %u for"
2903 " %s object\n", DEV_ATTRIB(dev)->block_size, sectors,
2904 DEV_ATTRIB(dev)->block_size * sectors,
2905 TRANSPORT(dev)->name);
2906#endif
2907 return DEV_ATTRIB(dev)->block_size * sectors;
2908}
2909
2910unsigned char transport_asciihex_to_binaryhex(unsigned char val[2])
2911{
2912 unsigned char result = 0;
2913 /*
2914 * MSB
2915 */
2916 if ((val[0] >= 'a') && (val[0] <= 'f'))
2917 result = ((val[0] - 'a' + 10) & 0xf) << 4;
2918 else
2919 if ((val[0] >= 'A') && (val[0] <= 'F'))
2920 result = ((val[0] - 'A' + 10) & 0xf) << 4;
2921 else /* digit */
2922 result = ((val[0] - '0') & 0xf) << 4;
2923 /*
2924 * LSB
2925 */
2926 if ((val[1] >= 'a') && (val[1] <= 'f'))
2927 result |= ((val[1] - 'a' + 10) & 0xf);
2928 else
2929 if ((val[1] >= 'A') && (val[1] <= 'F'))
2930 result |= ((val[1] - 'A' + 10) & 0xf);
2931 else /* digit */
2932 result |= ((val[1] - '0') & 0xf);
2933
2934 return result;
2935}
2936EXPORT_SYMBOL(transport_asciihex_to_binaryhex);
2937
2938static void transport_xor_callback(struct se_cmd *cmd)
2939{
2940 unsigned char *buf, *addr;
2941 struct se_mem *se_mem;
2942 unsigned int offset;
2943 int i;
2944 /*
2945 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
2946 *
2947 * 1) read the specified logical block(s);
2948 * 2) transfer logical blocks from the data-out buffer;
2949 * 3) XOR the logical blocks transferred from the data-out buffer with
2950 * the logical blocks read, storing the resulting XOR data in a buffer;
2951 * 4) if the DISABLE WRITE bit is set to zero, then write the logical
2952 * blocks transferred from the data-out buffer; and
2953 * 5) transfer the resulting XOR data to the data-in buffer.
2954 */
2955 buf = kmalloc(cmd->data_length, GFP_KERNEL);
2956 if (!(buf)) {
2957 printk(KERN_ERR "Unable to allocate xor_callback buf\n");
2958 return;
2959 }
2960 /*
2961 * Copy the scatterlist WRITE buffer located at T_TASK(cmd)->t_mem_list
2962 * into the locally allocated *buf
2963 */
2964 transport_memcpy_se_mem_read_contig(cmd, buf, T_TASK(cmd)->t_mem_list);
2965 /*
2966 * Now perform the XOR against the BIDI read memory located at
2967 * T_TASK(cmd)->t_mem_bidi_list
2968 */
2969
2970 offset = 0;
2971 list_for_each_entry(se_mem, T_TASK(cmd)->t_mem_bidi_list, se_list) {
2972 addr = (unsigned char *)kmap_atomic(se_mem->se_page, KM_USER0);
2973 if (!(addr))
2974 goto out;
2975
2976 for (i = 0; i < se_mem->se_len; i++)
2977 *(addr + se_mem->se_off + i) ^= *(buf + offset + i);
2978
2979 offset += se_mem->se_len;
2980 kunmap_atomic(addr, KM_USER0);
2981 }
2982out:
2983 kfree(buf);
2984}
2985
2986/*
2987 * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd
2988 */
2989static int transport_get_sense_data(struct se_cmd *cmd)
2990{
2991 unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL;
2992 struct se_device *dev;
2993 struct se_task *task = NULL, *task_tmp;
2994 unsigned long flags;
2995 u32 offset = 0;
2996
2997 if (!SE_LUN(cmd)) {
2998 printk(KERN_ERR "SE_LUN(cmd) is NULL\n");
2999 return -1;
3000 }
3001 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
3002 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
3003 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
3004 return 0;
3005 }
3006
3007 list_for_each_entry_safe(task, task_tmp,
3008 &T_TASK(cmd)->t_task_list, t_list) {
3009
3010 if (!task->task_sense)
3011 continue;
3012
3013 dev = task->se_dev;
3014 if (!(dev))
3015 continue;
3016
3017 if (!TRANSPORT(dev)->get_sense_buffer) {
3018 printk(KERN_ERR "TRANSPORT(dev)->get_sense_buffer"
3019 " is NULL\n");
3020 continue;
3021 }
3022
3023 sense_buffer = TRANSPORT(dev)->get_sense_buffer(task);
3024 if (!(sense_buffer)) {
3025 printk(KERN_ERR "ITT[0x%08x]_TASK[%d]: Unable to locate"
3026 " sense buffer for task with sense\n",
3027 CMD_TFO(cmd)->get_task_tag(cmd), task->task_no);
3028 continue;
3029 }
3030 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
3031
3032 offset = CMD_TFO(cmd)->set_fabric_sense_len(cmd,
3033 TRANSPORT_SENSE_BUFFER);
3034
3035 memcpy((void *)&buffer[offset], (void *)sense_buffer,
3036 TRANSPORT_SENSE_BUFFER);
3037 cmd->scsi_status = task->task_scsi_status;
3038 /* Automatically padded */
3039 cmd->scsi_sense_length =
3040 (TRANSPORT_SENSE_BUFFER + offset);
3041
3042 printk(KERN_INFO "HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x"
3043 " and sense\n",
3044 dev->se_hba->hba_id, TRANSPORT(dev)->name,
3045 cmd->scsi_status);
3046 return 0;
3047 }
3048 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
3049
3050 return -1;
3051}
3052
3053static int transport_allocate_resources(struct se_cmd *cmd)
3054{
3055 u32 length = cmd->data_length;
3056
3057 if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
3058 (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB))
3059 return transport_generic_get_mem(cmd, length, PAGE_SIZE);
3060 else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB)
3061 return transport_generic_allocate_buf(cmd, length);
3062 else
3063 return 0;
3064}
3065
3066static int
3067transport_handle_reservation_conflict(struct se_cmd *cmd)
3068{
3069 cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
3070 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3071 cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
3072 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
3073 /*
3074 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
3075 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
3076 * CONFLICT STATUS.
3077 *
3078 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
3079 */
3080 if (SE_SESS(cmd) &&
3081 DEV_ATTRIB(cmd->se_dev)->emulate_ua_intlck_ctrl == 2)
3082 core_scsi3_ua_allocate(SE_SESS(cmd)->se_node_acl,
3083 cmd->orig_fe_lun, 0x2C,
3084 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
3085 return -2;
3086}
3087
3088/* transport_generic_cmd_sequencer():
3089 *
3090 * Generic Command Sequencer that should work for most DAS transport
3091 * drivers.
3092 *
3093 * Called from transport_generic_allocate_tasks() in the $FABRIC_MOD
3094 * RX Thread.
3095 *
3096 * FIXME: Need to support other SCSI OPCODES where as well.
3097 */
3098static int transport_generic_cmd_sequencer(
3099 struct se_cmd *cmd,
3100 unsigned char *cdb)
3101{
3102 struct se_device *dev = SE_DEV(cmd);
3103 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
3104 int ret = 0, sector_ret = 0, passthrough;
3105 u32 sectors = 0, size = 0, pr_reg_type = 0;
3106 u16 service_action;
3107 u8 alua_ascq = 0;
3108 /*
3109 * Check for an existing UNIT ATTENTION condition
3110 */
3111 if (core_scsi3_ua_check(cmd, cdb) < 0) {
3112 cmd->transport_wait_for_tasks =
3113 &transport_nop_wait_for_tasks;
3114 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3115 cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
3116 return -2;
3117 }
3118 /*
3119 * Check status of Asymmetric Logical Unit Assignment port
3120 */
3121 ret = T10_ALUA(su_dev)->alua_state_check(cmd, cdb, &alua_ascq);
3122 if (ret != 0) {
3123 cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
3124 /*
3125 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
3126 * The ALUA additional sense code qualifier (ASCQ) is determined
3127 * by the ALUA primary or secondary access state..
3128 */
3129 if (ret > 0) {
3130#if 0
3131 printk(KERN_INFO "[%s]: ALUA TG Port not available,"
3132 " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n",
3133 CMD_TFO(cmd)->get_fabric_name(), alua_ascq);
3134#endif
3135 transport_set_sense_codes(cmd, 0x04, alua_ascq);
3136 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3137 cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
3138 return -2;
3139 }
3140 goto out_invalid_cdb_field;
3141 }
3142 /*
3143 * Check status for SPC-3 Persistent Reservations
3144 */
3145 if (T10_PR_OPS(su_dev)->t10_reservation_check(cmd, &pr_reg_type) != 0) {
3146 if (T10_PR_OPS(su_dev)->t10_seq_non_holder(
3147 cmd, cdb, pr_reg_type) != 0)
3148 return transport_handle_reservation_conflict(cmd);
3149 /*
3150 * This means the CDB is allowed for the SCSI Initiator port
3151 * when said port is *NOT* holding the legacy SPC-2 or
3152 * SPC-3 Persistent Reservation.
3153 */
3154 }
3155
3156 switch (cdb[0]) {
3157 case READ_6:
3158 sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
3159 if (sector_ret)
3160 goto out_unsupported_cdb;
3161 size = transport_get_size(sectors, cdb, cmd);
3162 cmd->transport_split_cdb = &split_cdb_XX_6;
3163 T_TASK(cmd)->t_task_lba = transport_lba_21(cdb);
3164 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3165 break;
3166 case READ_10:
3167 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
3168 if (sector_ret)
3169 goto out_unsupported_cdb;
3170 size = transport_get_size(sectors, cdb, cmd);
3171 cmd->transport_split_cdb = &split_cdb_XX_10;
3172 T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
3173 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3174 break;
3175 case READ_12:
3176 sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
3177 if (sector_ret)
3178 goto out_unsupported_cdb;
3179 size = transport_get_size(sectors, cdb, cmd);
3180 cmd->transport_split_cdb = &split_cdb_XX_12;
3181 T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
3182 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3183 break;
3184 case READ_16:
3185 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
3186 if (sector_ret)
3187 goto out_unsupported_cdb;
3188 size = transport_get_size(sectors, cdb, cmd);
3189 cmd->transport_split_cdb = &split_cdb_XX_16;
3190 T_TASK(cmd)->t_task_lba = transport_lba_64(cdb);
3191 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3192 break;
3193 case WRITE_6:
3194 sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
3195 if (sector_ret)
3196 goto out_unsupported_cdb;
3197 size = transport_get_size(sectors, cdb, cmd);
3198 cmd->transport_split_cdb = &split_cdb_XX_6;
3199 T_TASK(cmd)->t_task_lba = transport_lba_21(cdb);
3200 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3201 break;
3202 case WRITE_10:
3203 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
3204 if (sector_ret)
3205 goto out_unsupported_cdb;
3206 size = transport_get_size(sectors, cdb, cmd);
3207 cmd->transport_split_cdb = &split_cdb_XX_10;
3208 T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
3209 T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
3210 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3211 break;
3212 case WRITE_12:
3213 sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
3214 if (sector_ret)
3215 goto out_unsupported_cdb;
3216 size = transport_get_size(sectors, cdb, cmd);
3217 cmd->transport_split_cdb = &split_cdb_XX_12;
3218 T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
3219 T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
3220 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3221 break;
3222 case WRITE_16:
3223 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
3224 if (sector_ret)
3225 goto out_unsupported_cdb;
3226 size = transport_get_size(sectors, cdb, cmd);
3227 cmd->transport_split_cdb = &split_cdb_XX_16;
3228 T_TASK(cmd)->t_task_lba = transport_lba_64(cdb);
3229 T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
3230 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3231 break;
3232 case XDWRITEREAD_10:
3233 if ((cmd->data_direction != DMA_TO_DEVICE) ||
3234 !(T_TASK(cmd)->t_tasks_bidi))
3235 goto out_invalid_cdb_field;
3236 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
3237 if (sector_ret)
3238 goto out_unsupported_cdb;
3239 size = transport_get_size(sectors, cdb, cmd);
3240 cmd->transport_split_cdb = &split_cdb_XX_10;
3241 T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
3242 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3243 passthrough = (TRANSPORT(dev)->transport_type ==
3244 TRANSPORT_PLUGIN_PHBA_PDEV);
3245 /*
3246 * Skip the remaining assignments for TCM/PSCSI passthrough
3247 */
3248 if (passthrough)
3249 break;
3250 /*
3251 * Setup BIDI XOR callback to be run during transport_generic_complete_ok()
3252 */
3253 cmd->transport_complete_callback = &transport_xor_callback;
3254 T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
3255 break;
3256 case VARIABLE_LENGTH_CMD:
3257 service_action = get_unaligned_be16(&cdb[8]);
3258 /*
3259 * Determine if this is TCM/PSCSI device and we should disable
3260 * internal emulation for this CDB.
3261 */
3262 passthrough = (TRANSPORT(dev)->transport_type ==
3263 TRANSPORT_PLUGIN_PHBA_PDEV);
3264
3265 switch (service_action) {
3266 case XDWRITEREAD_32:
3267 sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
3268 if (sector_ret)
3269 goto out_unsupported_cdb;
3270 size = transport_get_size(sectors, cdb, cmd);
3271 /*
3272 * Use WRITE_32 and READ_32 opcodes for the emulated
3273 * XDWRITE_READ_32 logic.
3274 */
3275 cmd->transport_split_cdb = &split_cdb_XX_32;
3276 T_TASK(cmd)->t_task_lba = transport_lba_64_ext(cdb);
3277 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3278
3279 /*
3280 * Skip the remaining assignments for TCM/PSCSI passthrough
3281 */
3282 if (passthrough)
3283 break;
3284
3285 /*
3286 * Setup BIDI XOR callback to be run during
3287 * transport_generic_complete_ok()
3288 */
3289 cmd->transport_complete_callback = &transport_xor_callback;
3290 T_TASK(cmd)->t_tasks_fua = (cdb[10] & 0x8);
3291 break;
3292 case WRITE_SAME_32:
3293 sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
3294 if (sector_ret)
3295 goto out_unsupported_cdb;
3296 size = transport_get_size(sectors, cdb, cmd);
3297 T_TASK(cmd)->t_task_lba = get_unaligned_be64(&cdb[12]);
3298 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3299
3300 /*
3301 * Skip the remaining assignments for TCM/PSCSI passthrough
3302 */
3303 if (passthrough)
3304 break;
3305
3306 if ((cdb[10] & 0x04) || (cdb[10] & 0x02)) {
3307 printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA"
3308 " bits not supported for Block Discard"
3309 " Emulation\n");
3310 goto out_invalid_cdb_field;
3311 }
3312 /*
3313 * Currently for the emulated case we only accept
3314 * tpws with the UNMAP=1 bit set.
3315 */
3316 if (!(cdb[10] & 0x08)) {
3317 printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not"
3318 " supported for Block Discard Emulation\n");
3319 goto out_invalid_cdb_field;
3320 }
3321 break;
3322 default:
3323 printk(KERN_ERR "VARIABLE_LENGTH_CMD service action"
3324 " 0x%04x not supported\n", service_action);
3325 goto out_unsupported_cdb;
3326 }
3327 break;
3328 case 0xa3:
3329 if (TRANSPORT(dev)->get_device_type(dev) != TYPE_ROM) {
3330 /* MAINTENANCE_IN from SCC-2 */
3331 /*
3332 * Check for emulated MI_REPORT_TARGET_PGS.
3333 */
3334 if (cdb[1] == MI_REPORT_TARGET_PGS) {
3335 cmd->transport_emulate_cdb =
3336 (T10_ALUA(su_dev)->alua_type ==
3337 SPC3_ALUA_EMULATED) ?
3338 &core_emulate_report_target_port_groups :
3339 NULL;
3340 }
3341 size = (cdb[6] << 24) | (cdb[7] << 16) |
3342 (cdb[8] << 8) | cdb[9];
3343 } else {
3344 /* GPCMD_SEND_KEY from multi media commands */
3345 size = (cdb[8] << 8) + cdb[9];
3346 }
3347 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3348 break;
3349 case MODE_SELECT:
3350 size = cdb[4];
3351 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3352 break;
3353 case MODE_SELECT_10:
3354 size = (cdb[7] << 8) + cdb[8];
3355 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3356 break;
3357 case MODE_SENSE:
3358 size = cdb[4];
3359 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3360 break;
3361 case MODE_SENSE_10:
3362 case GPCMD_READ_BUFFER_CAPACITY:
3363 case GPCMD_SEND_OPC:
3364 case LOG_SELECT:
3365 case LOG_SENSE:
3366 size = (cdb[7] << 8) + cdb[8];
3367 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3368 break;
3369 case READ_BLOCK_LIMITS:
3370 size = READ_BLOCK_LEN;
3371 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3372 break;
3373 case GPCMD_GET_CONFIGURATION:
3374 case GPCMD_READ_FORMAT_CAPACITIES:
3375 case GPCMD_READ_DISC_INFO:
3376 case GPCMD_READ_TRACK_RZONE_INFO:
3377 size = (cdb[7] << 8) + cdb[8];
3378 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3379 break;
3380 case PERSISTENT_RESERVE_IN:
3381 case PERSISTENT_RESERVE_OUT:
3382 cmd->transport_emulate_cdb =
3383 (T10_RES(su_dev)->res_type ==
3384 SPC3_PERSISTENT_RESERVATIONS) ?
3385 &core_scsi3_emulate_pr : NULL;
3386 size = (cdb[7] << 8) + cdb[8];
3387 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3388 break;
3389 case GPCMD_MECHANISM_STATUS:
3390 case GPCMD_READ_DVD_STRUCTURE:
3391 size = (cdb[8] << 8) + cdb[9];
3392 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3393 break;
3394 case READ_POSITION:
3395 size = READ_POSITION_LEN;
3396 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3397 break;
3398 case 0xa4:
3399 if (TRANSPORT(dev)->get_device_type(dev) != TYPE_ROM) {
3400 /* MAINTENANCE_OUT from SCC-2
3401 *
3402 * Check for emulated MO_SET_TARGET_PGS.
3403 */
3404 if (cdb[1] == MO_SET_TARGET_PGS) {
3405 cmd->transport_emulate_cdb =
3406 (T10_ALUA(su_dev)->alua_type ==
3407 SPC3_ALUA_EMULATED) ?
3408 &core_emulate_set_target_port_groups :
3409 NULL;
3410 }
3411
3412 size = (cdb[6] << 24) | (cdb[7] << 16) |
3413 (cdb[8] << 8) | cdb[9];
3414 } else {
3415 /* GPCMD_REPORT_KEY from multi media commands */
3416 size = (cdb[8] << 8) + cdb[9];
3417 }
3418 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3419 break;
3420 case INQUIRY:
3421 size = (cdb[3] << 8) + cdb[4];
3422 /*
3423 * Do implict HEAD_OF_QUEUE processing for INQUIRY.
3424 * See spc4r17 section 5.3
3425 */
3426 if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3427 cmd->sam_task_attr = MSG_HEAD_TAG;
3428 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3429 break;
3430 case READ_BUFFER:
3431 size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
3432 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3433 break;
3434 case READ_CAPACITY:
3435 size = READ_CAP_LEN;
3436 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3437 break;
3438 case READ_MEDIA_SERIAL_NUMBER:
3439 case SECURITY_PROTOCOL_IN:
3440 case SECURITY_PROTOCOL_OUT:
3441 size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
3442 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3443 break;
3444 case SERVICE_ACTION_IN:
3445 case ACCESS_CONTROL_IN:
3446 case ACCESS_CONTROL_OUT:
3447 case EXTENDED_COPY:
3448 case READ_ATTRIBUTE:
3449 case RECEIVE_COPY_RESULTS:
3450 case WRITE_ATTRIBUTE:
3451 size = (cdb[10] << 24) | (cdb[11] << 16) |
3452 (cdb[12] << 8) | cdb[13];
3453 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3454 break;
3455 case RECEIVE_DIAGNOSTIC:
3456 case SEND_DIAGNOSTIC:
3457 size = (cdb[3] << 8) | cdb[4];
3458 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3459 break;
3460/* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */
3461#if 0
3462 case GPCMD_READ_CD:
3463 sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
3464 size = (2336 * sectors);
3465 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3466 break;
3467#endif
3468 case READ_TOC:
3469 size = cdb[8];
3470 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3471 break;
3472 case REQUEST_SENSE:
3473 size = cdb[4];
3474 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3475 break;
3476 case READ_ELEMENT_STATUS:
3477 size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
3478 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3479 break;
3480 case WRITE_BUFFER:
3481 size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
3482 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3483 break;
3484 case RESERVE:
3485 case RESERVE_10:
3486 /*
3487 * The SPC-2 RESERVE does not contain a size in the SCSI CDB.
3488 * Assume the passthrough or $FABRIC_MOD will tell us about it.
3489 */
3490 if (cdb[0] == RESERVE_10)
3491 size = (cdb[7] << 8) | cdb[8];
3492 else
3493 size = cmd->data_length;
3494
3495 /*
3496 * Setup the legacy emulated handler for SPC-2 and
3497 * >= SPC-3 compatible reservation handling (CRH=1)
3498 * Otherwise, we assume the underlying SCSI logic is
3499 * is running in SPC_PASSTHROUGH, and wants reservations
3500 * emulation disabled.
3501 */
3502 cmd->transport_emulate_cdb =
3503 (T10_RES(su_dev)->res_type !=
3504 SPC_PASSTHROUGH) ?
3505 &core_scsi2_emulate_crh : NULL;
3506 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3507 break;
3508 case RELEASE:
3509 case RELEASE_10:
3510 /*
3511 * The SPC-2 RELEASE does not contain a size in the SCSI CDB.
3512 * Assume the passthrough or $FABRIC_MOD will tell us about it.
3513 */
3514 if (cdb[0] == RELEASE_10)
3515 size = (cdb[7] << 8) | cdb[8];
3516 else
3517 size = cmd->data_length;
3518
3519 cmd->transport_emulate_cdb =
3520 (T10_RES(su_dev)->res_type !=
3521 SPC_PASSTHROUGH) ?
3522 &core_scsi2_emulate_crh : NULL;
3523 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3524 break;
3525 case SYNCHRONIZE_CACHE:
3526 case 0x91: /* SYNCHRONIZE_CACHE_16: */
3527 /*
3528 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
3529 */
3530 if (cdb[0] == SYNCHRONIZE_CACHE) {
3531 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
3532 T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
3533 } else {
3534 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
3535 T_TASK(cmd)->t_task_lba = transport_lba_64(cdb);
3536 }
3537 if (sector_ret)
3538 goto out_unsupported_cdb;
3539
3540 size = transport_get_size(sectors, cdb, cmd);
3541 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3542
3543 /*
3544 * For TCM/pSCSI passthrough, skip cmd->transport_emulate_cdb()
3545 */
3546 if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
3547 break;
3548 /*
3549 * Set SCF_EMULATE_CDB_ASYNC to ensure asynchronous operation
3550 * for SYNCHRONIZE_CACHE* Immed=1 case in __transport_execute_tasks()
3551 */
3552 cmd->se_cmd_flags |= SCF_EMULATE_CDB_ASYNC;
3553 /*
3554 * Check to ensure that LBA + Range does not exceed past end of
3555 * device.
3556 */
3557 if (transport_get_sectors(cmd) < 0)
3558 goto out_invalid_cdb_field;
3559 break;
3560 case UNMAP:
3561 size = get_unaligned_be16(&cdb[7]);
3562 passthrough = (TRANSPORT(dev)->transport_type ==
3563 TRANSPORT_PLUGIN_PHBA_PDEV);
3564 /*
3565 * Determine if the received UNMAP used to for direct passthrough
3566 * into Linux/SCSI with struct request via TCM/pSCSI or we are
3567 * signaling the use of internal transport_generic_unmap() emulation
3568 * for UNMAP -> Linux/BLOCK disbard with TCM/IBLOCK and TCM/FILEIO
3569 * subsystem plugin backstores.
3570 */
3571 if (!(passthrough))
3572 cmd->se_cmd_flags |= SCF_EMULATE_SYNC_UNMAP;
3573
3574 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3575 break;
3576 case WRITE_SAME_16:
3577 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
3578 if (sector_ret)
3579 goto out_unsupported_cdb;
3580 size = transport_get_size(sectors, cdb, cmd);
3581 T_TASK(cmd)->t_task_lba = get_unaligned_be16(&cdb[2]);
3582 passthrough = (TRANSPORT(dev)->transport_type ==
3583 TRANSPORT_PLUGIN_PHBA_PDEV);
3584 /*
3585 * Determine if the received WRITE_SAME_16 is used to for direct
3586 * passthrough into Linux/SCSI with struct request via TCM/pSCSI
3587 * or we are signaling the use of internal WRITE_SAME + UNMAP=1
3588 * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK and
3589 * TCM/FILEIO subsystem plugin backstores.
3590 */
3591 if (!(passthrough)) {
3592 if ((cdb[1] & 0x04) || (cdb[1] & 0x02)) {
3593 printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA"
3594 " bits not supported for Block Discard"
3595 " Emulation\n");
3596 goto out_invalid_cdb_field;
3597 }
3598 /*
3599 * Currently for the emulated case we only accept
3600 * tpws with the UNMAP=1 bit set.
3601 */
3602 if (!(cdb[1] & 0x08)) {
3603 printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not "
3604 " supported for Block Discard Emulation\n");
3605 goto out_invalid_cdb_field;
3606 }
3607 }
3608 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3609 break;
3610 case ALLOW_MEDIUM_REMOVAL:
3611 case GPCMD_CLOSE_TRACK:
3612 case ERASE:
3613 case INITIALIZE_ELEMENT_STATUS:
3614 case GPCMD_LOAD_UNLOAD:
3615 case REZERO_UNIT:
3616 case SEEK_10:
3617 case GPCMD_SET_SPEED:
3618 case SPACE:
3619 case START_STOP:
3620 case TEST_UNIT_READY:
3621 case VERIFY:
3622 case WRITE_FILEMARKS:
3623 case MOVE_MEDIUM:
3624 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3625 break;
3626 case REPORT_LUNS:
3627 cmd->transport_emulate_cdb =
3628 &transport_core_report_lun_response;
3629 size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
3630 /*
3631 * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
3632 * See spc4r17 section 5.3
3633 */
3634 if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3635 cmd->sam_task_attr = MSG_HEAD_TAG;
3636 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3637 break;
3638 default:
3639 printk(KERN_WARNING "TARGET_CORE[%s]: Unsupported SCSI Opcode"
3640 " 0x%02x, sending CHECK_CONDITION.\n",
3641 CMD_TFO(cmd)->get_fabric_name(), cdb[0]);
3642 cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
3643 goto out_unsupported_cdb;
3644 }
3645
3646 if (size != cmd->data_length) {
3647 printk(KERN_WARNING "TARGET_CORE[%s]: Expected Transfer Length:"
3648 " %u does not match SCSI CDB Length: %u for SAM Opcode:"
3649 " 0x%02x\n", CMD_TFO(cmd)->get_fabric_name(),
3650 cmd->data_length, size, cdb[0]);
3651
3652 cmd->cmd_spdtl = size;
3653
3654 if (cmd->data_direction == DMA_TO_DEVICE) {
3655 printk(KERN_ERR "Rejecting underflow/overflow"
3656 " WRITE data\n");
3657 goto out_invalid_cdb_field;
3658 }
3659 /*
3660 * Reject READ_* or WRITE_* with overflow/underflow for
3661 * type SCF_SCSI_DATA_SG_IO_CDB.
3662 */
3663 if (!(ret) && (DEV_ATTRIB(dev)->block_size != 512)) {
3664 printk(KERN_ERR "Failing OVERFLOW/UNDERFLOW for LBA op"
3665 " CDB on non 512-byte sector setup subsystem"
3666 " plugin: %s\n", TRANSPORT(dev)->name);
3667 /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
3668 goto out_invalid_cdb_field;
3669 }
3670
3671 if (size > cmd->data_length) {
3672 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
3673 cmd->residual_count = (size - cmd->data_length);
3674 } else {
3675 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
3676 cmd->residual_count = (cmd->data_length - size);
3677 }
3678 cmd->data_length = size;
3679 }
3680
3681 transport_set_supported_SAM_opcode(cmd);
3682 return ret;
3683
3684out_unsupported_cdb:
3685 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3686 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
3687 return -2;
3688out_invalid_cdb_field:
3689 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3690 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
3691 return -2;
3692}
3693
3694static inline void transport_release_tasks(struct se_cmd *);
3695
3696/*
3697 * This function will copy a contiguous *src buffer into a destination
3698 * struct scatterlist array.
3699 */
3700static void transport_memcpy_write_contig(
3701 struct se_cmd *cmd,
3702 struct scatterlist *sg_d,
3703 unsigned char *src)
3704{
3705 u32 i = 0, length = 0, total_length = cmd->data_length;
3706 void *dst;
3707
3708 while (total_length) {
3709 length = sg_d[i].length;
3710
3711 if (length > total_length)
3712 length = total_length;
3713
3714 dst = sg_virt(&sg_d[i]);
3715
3716 memcpy(dst, src, length);
3717
3718 if (!(total_length -= length))
3719 return;
3720
3721 src += length;
3722 i++;
3723 }
3724}
3725
3726/*
3727 * This function will copy a struct scatterlist array *sg_s into a destination
3728 * contiguous *dst buffer.
3729 */
3730static void transport_memcpy_read_contig(
3731 struct se_cmd *cmd,
3732 unsigned char *dst,
3733 struct scatterlist *sg_s)
3734{
3735 u32 i = 0, length = 0, total_length = cmd->data_length;
3736 void *src;
3737
3738 while (total_length) {
3739 length = sg_s[i].length;
3740
3741 if (length > total_length)
3742 length = total_length;
3743
3744 src = sg_virt(&sg_s[i]);
3745
3746 memcpy(dst, src, length);
3747
3748 if (!(total_length -= length))
3749 return;
3750
3751 dst += length;
3752 i++;
3753 }
3754}
3755
3756static void transport_memcpy_se_mem_read_contig(
3757 struct se_cmd *cmd,
3758 unsigned char *dst,
3759 struct list_head *se_mem_list)
3760{
3761 struct se_mem *se_mem;
3762 void *src;
3763 u32 length = 0, total_length = cmd->data_length;
3764
3765 list_for_each_entry(se_mem, se_mem_list, se_list) {
3766 length = se_mem->se_len;
3767
3768 if (length > total_length)
3769 length = total_length;
3770
3771 src = page_address(se_mem->se_page) + se_mem->se_off;
3772
3773 memcpy(dst, src, length);
3774
3775 if (!(total_length -= length))
3776 return;
3777
3778 dst += length;
3779 }
3780}
3781
3782/*
3783 * Called from transport_generic_complete_ok() and
3784 * transport_generic_request_failure() to determine which dormant/delayed
3785 * and ordered cmds need to have their tasks added to the execution queue.
3786 */
3787static void transport_complete_task_attr(struct se_cmd *cmd)
3788{
3789 struct se_device *dev = SE_DEV(cmd);
3790 struct se_cmd *cmd_p, *cmd_tmp;
3791 int new_active_tasks = 0;
3792
3793 if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
3794 atomic_dec(&dev->simple_cmds);
3795 smp_mb__after_atomic_dec();
3796 dev->dev_cur_ordered_id++;
3797 DEBUG_STA("Incremented dev->dev_cur_ordered_id: %u for"
3798 " SIMPLE: %u\n", dev->dev_cur_ordered_id,
3799 cmd->se_ordered_id);
3800 } else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
3801 atomic_dec(&dev->dev_hoq_count);
3802 smp_mb__after_atomic_dec();
3803 dev->dev_cur_ordered_id++;
3804 DEBUG_STA("Incremented dev_cur_ordered_id: %u for"
3805 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
3806 cmd->se_ordered_id);
3807 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
3808 spin_lock(&dev->ordered_cmd_lock);
3809 list_del(&cmd->se_ordered_list);
3810 atomic_dec(&dev->dev_ordered_sync);
3811 smp_mb__after_atomic_dec();
3812 spin_unlock(&dev->ordered_cmd_lock);
3813
3814 dev->dev_cur_ordered_id++;
3815 DEBUG_STA("Incremented dev_cur_ordered_id: %u for ORDERED:"
3816 " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
3817 }
3818 /*
3819 * Process all commands up to the last received
3820 * ORDERED task attribute which requires another blocking
3821 * boundary
3822 */
3823 spin_lock(&dev->delayed_cmd_lock);
3824 list_for_each_entry_safe(cmd_p, cmd_tmp,
3825 &dev->delayed_cmd_list, se_delayed_list) {
3826
3827 list_del(&cmd_p->se_delayed_list);
3828 spin_unlock(&dev->delayed_cmd_lock);
3829
3830 DEBUG_STA("Calling add_tasks() for"
3831 " cmd_p: 0x%02x Task Attr: 0x%02x"
3832 " Dormant -> Active, se_ordered_id: %u\n",
3833 T_TASK(cmd_p)->t_task_cdb[0],
3834 cmd_p->sam_task_attr, cmd_p->se_ordered_id);
3835
3836 transport_add_tasks_from_cmd(cmd_p);
3837 new_active_tasks++;
3838
3839 spin_lock(&dev->delayed_cmd_lock);
3840 if (cmd_p->sam_task_attr == MSG_ORDERED_TAG)
3841 break;
3842 }
3843 spin_unlock(&dev->delayed_cmd_lock);
3844 /*
3845 * If new tasks have become active, wake up the transport thread
3846 * to do the processing of the Active tasks.
3847 */
3848 if (new_active_tasks != 0)
3849 wake_up_interruptible(&dev->dev_queue_obj->thread_wq);
3850}
3851
3852static void transport_generic_complete_ok(struct se_cmd *cmd)
3853{
3854 int reason = 0;
3855 /*
3856 * Check if we need to move delayed/dormant tasks from cmds on the
3857 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
3858 * Attribute.
3859 */
3860 if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3861 transport_complete_task_attr(cmd);
3862 /*
3863 * Check if we need to retrieve a sense buffer from
3864 * the struct se_cmd in question.
3865 */
3866 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
3867 if (transport_get_sense_data(cmd) < 0)
3868 reason = TCM_NON_EXISTENT_LUN;
3869
3870 /*
3871 * Only set when an struct se_task->task_scsi_status returned
3872 * a non GOOD status.
3873 */
3874 if (cmd->scsi_status) {
3875 transport_send_check_condition_and_sense(
3876 cmd, reason, 1);
3877 transport_lun_remove_cmd(cmd);
3878 transport_cmd_check_stop_to_fabric(cmd);
3879 return;
3880 }
3881 }
3882 /*
3883 * Check for a callback, used by amongst other things
3884 * XDWRITE_READ_10 emulation.
3885 */
3886 if (cmd->transport_complete_callback)
3887 cmd->transport_complete_callback(cmd);
3888
3889 switch (cmd->data_direction) {
3890 case DMA_FROM_DEVICE:
3891 spin_lock(&cmd->se_lun->lun_sep_lock);
3892 if (SE_LUN(cmd)->lun_sep) {
3893 SE_LUN(cmd)->lun_sep->sep_stats.tx_data_octets +=
3894 cmd->data_length;
3895 }
3896 spin_unlock(&cmd->se_lun->lun_sep_lock);
3897 /*
3898 * If enabled by TCM fabirc module pre-registered SGL
3899 * memory, perform the memcpy() from the TCM internal
3900 * contigious buffer back to the original SGL.
3901 */
3902 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG)
3903 transport_memcpy_write_contig(cmd,
3904 T_TASK(cmd)->t_task_pt_sgl,
3905 T_TASK(cmd)->t_task_buf);
3906
3907 CMD_TFO(cmd)->queue_data_in(cmd);
3908 break;
3909 case DMA_TO_DEVICE:
3910 spin_lock(&cmd->se_lun->lun_sep_lock);
3911 if (SE_LUN(cmd)->lun_sep) {
3912 SE_LUN(cmd)->lun_sep->sep_stats.rx_data_octets +=
3913 cmd->data_length;
3914 }
3915 spin_unlock(&cmd->se_lun->lun_sep_lock);
3916 /*
3917 * Check if we need to send READ payload for BIDI-COMMAND
3918 */
3919 if (T_TASK(cmd)->t_mem_bidi_list != NULL) {
3920 spin_lock(&cmd->se_lun->lun_sep_lock);
3921 if (SE_LUN(cmd)->lun_sep) {
3922 SE_LUN(cmd)->lun_sep->sep_stats.tx_data_octets +=
3923 cmd->data_length;
3924 }
3925 spin_unlock(&cmd->se_lun->lun_sep_lock);
3926 CMD_TFO(cmd)->queue_data_in(cmd);
3927 break;
3928 }
3929 /* Fall through for DMA_TO_DEVICE */
3930 case DMA_NONE:
3931 CMD_TFO(cmd)->queue_status(cmd);
3932 break;
3933 default:
3934 break;
3935 }
3936
3937 transport_lun_remove_cmd(cmd);
3938 transport_cmd_check_stop_to_fabric(cmd);
3939}
3940
3941static void transport_free_dev_tasks(struct se_cmd *cmd)
3942{
3943 struct se_task *task, *task_tmp;
3944 unsigned long flags;
3945
3946 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
3947 list_for_each_entry_safe(task, task_tmp,
3948 &T_TASK(cmd)->t_task_list, t_list) {
3949 if (atomic_read(&task->task_active))
3950 continue;
3951
3952 kfree(task->task_sg_bidi);
3953 kfree(task->task_sg);
3954
3955 list_del(&task->t_list);
3956
3957 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
3958 if (task->se_dev)
3959 TRANSPORT(task->se_dev)->free_task(task);
3960 else
3961 printk(KERN_ERR "task[%u] - task->se_dev is NULL\n",
3962 task->task_no);
3963 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
3964 }
3965 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
3966}
3967
3968static inline void transport_free_pages(struct se_cmd *cmd)
3969{
3970 struct se_mem *se_mem, *se_mem_tmp;
3971 int free_page = 1;
3972
3973 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
3974 free_page = 0;
3975 if (cmd->se_dev->transport->do_se_mem_map)
3976 free_page = 0;
3977
3978 if (T_TASK(cmd)->t_task_buf) {
3979 kfree(T_TASK(cmd)->t_task_buf);
3980 T_TASK(cmd)->t_task_buf = NULL;
3981 return;
3982 }
3983
3984 /*
3985 * Caller will handle releasing of struct se_mem.
3986 */
3987 if (cmd->se_cmd_flags & SCF_CMD_PASSTHROUGH_NOALLOC)
3988 return;
3989
3990 if (!(T_TASK(cmd)->t_tasks_se_num))
3991 return;
3992
3993 list_for_each_entry_safe(se_mem, se_mem_tmp,
3994 T_TASK(cmd)->t_mem_list, se_list) {
3995 /*
3996 * We only release call __free_page(struct se_mem->se_page) when
3997 * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use,
3998 */
3999 if (free_page)
4000 __free_page(se_mem->se_page);
4001
4002 list_del(&se_mem->se_list);
4003 kmem_cache_free(se_mem_cache, se_mem);
4004 }
4005
4006 if (T_TASK(cmd)->t_mem_bidi_list && T_TASK(cmd)->t_tasks_se_bidi_num) {
4007 list_for_each_entry_safe(se_mem, se_mem_tmp,
4008 T_TASK(cmd)->t_mem_bidi_list, se_list) {
4009 /*
4010 * We only release call __free_page(struct se_mem->se_page) when
4011 * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use,
4012 */
4013 if (free_page)
4014 __free_page(se_mem->se_page);
4015
4016 list_del(&se_mem->se_list);
4017 kmem_cache_free(se_mem_cache, se_mem);
4018 }
4019 }
4020
4021 kfree(T_TASK(cmd)->t_mem_bidi_list);
4022 T_TASK(cmd)->t_mem_bidi_list = NULL;
4023 kfree(T_TASK(cmd)->t_mem_list);
4024 T_TASK(cmd)->t_mem_list = NULL;
4025 T_TASK(cmd)->t_tasks_se_num = 0;
4026}
4027
4028static inline void transport_release_tasks(struct se_cmd *cmd)
4029{
4030 transport_free_dev_tasks(cmd);
4031}
4032
4033static inline int transport_dec_and_check(struct se_cmd *cmd)
4034{
4035 unsigned long flags;
4036
4037 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
4038 if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
4039 if (!(atomic_dec_and_test(&T_TASK(cmd)->t_fe_count))) {
4040 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
4041 flags);
4042 return 1;
4043 }
4044 }
4045
4046 if (atomic_read(&T_TASK(cmd)->t_se_count)) {
4047 if (!(atomic_dec_and_test(&T_TASK(cmd)->t_se_count))) {
4048 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
4049 flags);
4050 return 1;
4051 }
4052 }
4053 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
4054
4055 return 0;
4056}
4057
4058static void transport_release_fe_cmd(struct se_cmd *cmd)
4059{
4060 unsigned long flags;
4061
4062 if (transport_dec_and_check(cmd))
4063 return;
4064
4065 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
4066 if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
4067 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
4068 goto free_pages;
4069 }
4070 atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
4071 transport_all_task_dev_remove_state(cmd);
4072 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
4073
4074 transport_release_tasks(cmd);
4075free_pages:
4076 transport_free_pages(cmd);
4077 transport_free_se_cmd(cmd);
4078 CMD_TFO(cmd)->release_cmd_direct(cmd);
4079}
4080
4081static int transport_generic_remove(
4082 struct se_cmd *cmd,
4083 int release_to_pool,
4084 int session_reinstatement)
4085{
4086 unsigned long flags;
4087
4088 if (!(T_TASK(cmd)))
4089 goto release_cmd;
4090
4091 if (transport_dec_and_check(cmd)) {
4092 if (session_reinstatement) {
4093 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
4094 transport_all_task_dev_remove_state(cmd);
4095 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
4096 flags);
4097 }
4098 return 1;
4099 }
4100
4101 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
4102 if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
4103 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
4104 goto free_pages;
4105 }
4106 atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
4107 transport_all_task_dev_remove_state(cmd);
4108 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
4109
4110 transport_release_tasks(cmd);
4111free_pages:
4112 transport_free_pages(cmd);
4113
4114release_cmd:
4115 if (release_to_pool) {
4116 transport_release_cmd_to_pool(cmd);
4117 } else {
4118 transport_free_se_cmd(cmd);
4119 CMD_TFO(cmd)->release_cmd_direct(cmd);
4120 }
4121
4122 return 0;
4123}
4124
4125/*
4126 * transport_generic_map_mem_to_cmd - Perform SGL -> struct se_mem map
4127 * @cmd: Associated se_cmd descriptor
4128 * @mem: SGL style memory for TCM WRITE / READ
4129 * @sg_mem_num: Number of SGL elements
4130 * @mem_bidi_in: SGL style memory for TCM BIDI READ
4131 * @sg_mem_bidi_num: Number of BIDI READ SGL elements
4132 *
4133 * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage
4134 * of parameters.
4135 */
4136int transport_generic_map_mem_to_cmd(
4137 struct se_cmd *cmd,
4138 struct scatterlist *mem,
4139 u32 sg_mem_num,
4140 struct scatterlist *mem_bidi_in,
4141 u32 sg_mem_bidi_num)
4142{
4143 u32 se_mem_cnt_out = 0;
4144 int ret;
4145
4146 if (!(mem) || !(sg_mem_num))
4147 return 0;
4148 /*
4149 * Passed *mem will contain a list_head containing preformatted
4150 * struct se_mem elements...
4151 */
4152 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM)) {
4153 if ((mem_bidi_in) || (sg_mem_bidi_num)) {
4154 printk(KERN_ERR "SCF_CMD_PASSTHROUGH_NOALLOC not supported"
4155 " with BIDI-COMMAND\n");
4156 return -ENOSYS;
4157 }
4158
4159 T_TASK(cmd)->t_mem_list = (struct list_head *)mem;
4160 T_TASK(cmd)->t_tasks_se_num = sg_mem_num;
4161 cmd->se_cmd_flags |= SCF_CMD_PASSTHROUGH_NOALLOC;
4162 return 0;
4163 }
4164 /*
4165 * Otherwise, assume the caller is passing a struct scatterlist
4166 * array from include/linux/scatterlist.h
4167 */
4168 if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
4169 (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
4170 /*
4171 * For CDB using TCM struct se_mem linked list scatterlist memory
4172 * processed into a TCM struct se_subsystem_dev, we do the mapping
4173 * from the passed physical memory to struct se_mem->se_page here.
4174 */
4175 T_TASK(cmd)->t_mem_list = transport_init_se_mem_list();
4176 if (!(T_TASK(cmd)->t_mem_list))
4177 return -ENOMEM;
4178
4179 ret = transport_map_sg_to_mem(cmd,
4180 T_TASK(cmd)->t_mem_list, mem, &se_mem_cnt_out);
4181 if (ret < 0)
4182 return -ENOMEM;
4183
4184 T_TASK(cmd)->t_tasks_se_num = se_mem_cnt_out;
4185 /*
4186 * Setup BIDI READ list of struct se_mem elements
4187 */
4188 if ((mem_bidi_in) && (sg_mem_bidi_num)) {
4189 T_TASK(cmd)->t_mem_bidi_list = transport_init_se_mem_list();
4190 if (!(T_TASK(cmd)->t_mem_bidi_list)) {
4191 kfree(T_TASK(cmd)->t_mem_list);
4192 return -ENOMEM;
4193 }
4194 se_mem_cnt_out = 0;
4195
4196 ret = transport_map_sg_to_mem(cmd,
4197 T_TASK(cmd)->t_mem_bidi_list, mem_bidi_in,
4198 &se_mem_cnt_out);
4199 if (ret < 0) {
4200 kfree(T_TASK(cmd)->t_mem_list);
4201 return -ENOMEM;
4202 }
4203
4204 T_TASK(cmd)->t_tasks_se_bidi_num = se_mem_cnt_out;
4205 }
4206 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
4207
4208 } else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) {
4209 if (mem_bidi_in || sg_mem_bidi_num) {
4210 printk(KERN_ERR "BIDI-Commands not supported using "
4211 "SCF_SCSI_CONTROL_NONSG_IO_CDB\n");
4212 return -ENOSYS;
4213 }
4214 /*
4215 * For incoming CDBs using a contiguous buffer internall with TCM,
4216 * save the passed struct scatterlist memory. After TCM storage object
4217 * processing has completed for this struct se_cmd, TCM core will call
4218 * transport_memcpy_[write,read]_contig() as necessary from
4219 * transport_generic_complete_ok() and transport_write_pending() in order
4220 * to copy the TCM buffer to/from the original passed *mem in SGL ->
4221 * struct scatterlist format.
4222 */
4223 cmd->se_cmd_flags |= SCF_PASSTHROUGH_CONTIG_TO_SG;
4224 T_TASK(cmd)->t_task_pt_sgl = mem;
4225 }
4226
4227 return 0;
4228}
4229EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
4230
4231
4232static inline long long transport_dev_end_lba(struct se_device *dev)
4233{
4234 return dev->transport->get_blocks(dev) + 1;
4235}
4236
4237static int transport_get_sectors(struct se_cmd *cmd)
4238{
4239 struct se_device *dev = SE_DEV(cmd);
4240
4241 T_TASK(cmd)->t_tasks_sectors =
4242 (cmd->data_length / DEV_ATTRIB(dev)->block_size);
4243 if (!(T_TASK(cmd)->t_tasks_sectors))
4244 T_TASK(cmd)->t_tasks_sectors = 1;
4245
4246 if (TRANSPORT(dev)->get_device_type(dev) != TYPE_DISK)
4247 return 0;
4248
4249 if ((T_TASK(cmd)->t_task_lba + T_TASK(cmd)->t_tasks_sectors) >
4250 transport_dev_end_lba(dev)) {
4251 printk(KERN_ERR "LBA: %llu Sectors: %u exceeds"
4252 " transport_dev_end_lba(): %llu\n",
4253 T_TASK(cmd)->t_task_lba, T_TASK(cmd)->t_tasks_sectors,
4254 transport_dev_end_lba(dev));
4255 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
4256 cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY;
4257 return PYX_TRANSPORT_REQ_TOO_MANY_SECTORS;
4258 }
4259
4260 return 0;
4261}
4262
4263static int transport_new_cmd_obj(struct se_cmd *cmd)
4264{
4265 struct se_device *dev = SE_DEV(cmd);
4266 u32 task_cdbs = 0, rc;
4267
4268 if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
4269 task_cdbs++;
4270 T_TASK(cmd)->t_task_cdbs++;
4271 } else {
4272 int set_counts = 1;
4273
4274 /*
4275 * Setup any BIDI READ tasks and memory from
4276 * T_TASK(cmd)->t_mem_bidi_list so the READ struct se_tasks
4277 * are queued first for the non pSCSI passthrough case.
4278 */
4279 if ((T_TASK(cmd)->t_mem_bidi_list != NULL) &&
4280 (TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) {
4281 rc = transport_generic_get_cdb_count(cmd,
4282 T_TASK(cmd)->t_task_lba,
4283 T_TASK(cmd)->t_tasks_sectors,
4284 DMA_FROM_DEVICE, T_TASK(cmd)->t_mem_bidi_list,
4285 set_counts);
4286 if (!(rc)) {
4287 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
4288 cmd->scsi_sense_reason =
4289 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
4290 return PYX_TRANSPORT_LU_COMM_FAILURE;
4291 }
4292 set_counts = 0;
4293 }
4294 /*
4295 * Setup the tasks and memory from T_TASK(cmd)->t_mem_list
4296 * Note for BIDI transfers this will contain the WRITE payload
4297 */
4298 task_cdbs = transport_generic_get_cdb_count(cmd,
4299 T_TASK(cmd)->t_task_lba,
4300 T_TASK(cmd)->t_tasks_sectors,
4301 cmd->data_direction, T_TASK(cmd)->t_mem_list,
4302 set_counts);
4303 if (!(task_cdbs)) {
4304 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
4305 cmd->scsi_sense_reason =
4306 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
4307 return PYX_TRANSPORT_LU_COMM_FAILURE;
4308 }
4309 T_TASK(cmd)->t_task_cdbs += task_cdbs;
4310
4311#if 0
4312 printk(KERN_INFO "data_length: %u, LBA: %llu t_tasks_sectors:"
4313 " %u, t_task_cdbs: %u\n", obj_ptr, cmd->data_length,
4314 T_TASK(cmd)->t_task_lba, T_TASK(cmd)->t_tasks_sectors,
4315 T_TASK(cmd)->t_task_cdbs);
4316#endif
4317 }
4318
4319 atomic_set(&T_TASK(cmd)->t_task_cdbs_left, task_cdbs);
4320 atomic_set(&T_TASK(cmd)->t_task_cdbs_ex_left, task_cdbs);
4321 atomic_set(&T_TASK(cmd)->t_task_cdbs_timeout_left, task_cdbs);
4322 return 0;
4323}
4324
4325static struct list_head *transport_init_se_mem_list(void)
4326{
4327 struct list_head *se_mem_list;
4328
4329 se_mem_list = kzalloc(sizeof(struct list_head), GFP_KERNEL);
4330 if (!(se_mem_list)) {
4331 printk(KERN_ERR "Unable to allocate memory for se_mem_list\n");
4332 return NULL;
4333 }
4334 INIT_LIST_HEAD(se_mem_list);
4335
4336 return se_mem_list;
4337}
4338
4339static int
4340transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size)
4341{
4342 unsigned char *buf;
4343 struct se_mem *se_mem;
4344
4345 T_TASK(cmd)->t_mem_list = transport_init_se_mem_list();
4346 if (!(T_TASK(cmd)->t_mem_list))
4347 return -ENOMEM;
4348
4349 /*
4350 * If the device uses memory mapping this is enough.
4351 */
4352 if (cmd->se_dev->transport->do_se_mem_map)
4353 return 0;
4354
4355 /*
4356 * Setup BIDI-COMMAND READ list of struct se_mem elements
4357 */
4358 if (T_TASK(cmd)->t_tasks_bidi) {
4359 T_TASK(cmd)->t_mem_bidi_list = transport_init_se_mem_list();
4360 if (!(T_TASK(cmd)->t_mem_bidi_list)) {
4361 kfree(T_TASK(cmd)->t_mem_list);
4362 return -ENOMEM;
4363 }
4364 }
4365
4366 while (length) {
4367 se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
4368 if (!(se_mem)) {
4369 printk(KERN_ERR "Unable to allocate struct se_mem\n");
4370 goto out;
4371 }
4372
4373/* #warning FIXME Allocate contigous pages for struct se_mem elements */
4374 se_mem->se_page = alloc_pages(GFP_KERNEL, 0);
4375 if (!(se_mem->se_page)) {
4376 printk(KERN_ERR "alloc_pages() failed\n");
4377 goto out;
4378 }
4379
4380 buf = kmap_atomic(se_mem->se_page, KM_IRQ0);
4381 if (!(buf)) {
4382 printk(KERN_ERR "kmap_atomic() failed\n");
4383 goto out;
4384 }
4385 INIT_LIST_HEAD(&se_mem->se_list);
4386 se_mem->se_len = (length > dma_size) ? dma_size : length;
4387 memset(buf, 0, se_mem->se_len);
4388 kunmap_atomic(buf, KM_IRQ0);
4389
4390 list_add_tail(&se_mem->se_list, T_TASK(cmd)->t_mem_list);
4391 T_TASK(cmd)->t_tasks_se_num++;
4392
4393 DEBUG_MEM("Allocated struct se_mem page(%p) Length(%u)"
4394 " Offset(%u)\n", se_mem->se_page, se_mem->se_len,
4395 se_mem->se_off);
4396
4397 length -= se_mem->se_len;
4398 }
4399
4400 DEBUG_MEM("Allocated total struct se_mem elements(%u)\n",
4401 T_TASK(cmd)->t_tasks_se_num);
4402
4403 return 0;
4404out:
4405 if (se_mem)
4406 __free_pages(se_mem->se_page, 0);
4407 kmem_cache_free(se_mem_cache, se_mem);
4408 return -1;
4409}
4410
4411u32 transport_calc_sg_num(
4412 struct se_task *task,
4413 struct se_mem *in_se_mem,
4414 u32 task_offset)
4415{
4416 struct se_cmd *se_cmd = task->task_se_cmd;
4417 struct se_device *se_dev = SE_DEV(se_cmd);
4418 struct se_mem *se_mem = in_se_mem;
4419 struct target_core_fabric_ops *tfo = CMD_TFO(se_cmd);
4420 u32 sg_length, task_size = task->task_size, task_sg_num_padded;
4421
4422 while (task_size != 0) {
4423 DEBUG_SC("se_mem->se_page(%p) se_mem->se_len(%u)"
4424 " se_mem->se_off(%u) task_offset(%u)\n",
4425 se_mem->se_page, se_mem->se_len,
4426 se_mem->se_off, task_offset);
4427
4428 if (task_offset == 0) {
4429 if (task_size >= se_mem->se_len) {
4430 sg_length = se_mem->se_len;
4431
4432 if (!(list_is_last(&se_mem->se_list,
4433 T_TASK(se_cmd)->t_mem_list)))
4434 se_mem = list_entry(se_mem->se_list.next,
4435 struct se_mem, se_list);
4436 } else {
4437 sg_length = task_size;
4438 task_size -= sg_length;
4439 goto next;
4440 }
4441
4442 DEBUG_SC("sg_length(%u) task_size(%u)\n",
4443 sg_length, task_size);
4444 } else {
4445 if ((se_mem->se_len - task_offset) > task_size) {
4446 sg_length = task_size;
4447 task_size -= sg_length;
4448 goto next;
4449 } else {
4450 sg_length = (se_mem->se_len - task_offset);
4451
4452 if (!(list_is_last(&se_mem->se_list,
4453 T_TASK(se_cmd)->t_mem_list)))
4454 se_mem = list_entry(se_mem->se_list.next,
4455 struct se_mem, se_list);
4456 }
4457
4458 DEBUG_SC("sg_length(%u) task_size(%u)\n",
4459 sg_length, task_size);
4460
4461 task_offset = 0;
4462 }
4463 task_size -= sg_length;
4464next:
4465 DEBUG_SC("task[%u] - Reducing task_size to(%u)\n",
4466 task->task_no, task_size);
4467
4468 task->task_sg_num++;
4469 }
4470 /*
4471 * Check if the fabric module driver is requesting that all
4472 * struct se_task->task_sg[] be chained together.. If so,
4473 * then allocate an extra padding SG entry for linking and
4474 * marking the end of the chained SGL.
4475 */
4476 if (tfo->task_sg_chaining) {
4477 task_sg_num_padded = (task->task_sg_num + 1);
4478 task->task_padded_sg = 1;
4479 } else
4480 task_sg_num_padded = task->task_sg_num;
4481
4482 task->task_sg = kzalloc(task_sg_num_padded *
4483 sizeof(struct scatterlist), GFP_KERNEL);
4484 if (!(task->task_sg)) {
4485 printk(KERN_ERR "Unable to allocate memory for"
4486 " task->task_sg\n");
4487 return 0;
4488 }
4489 sg_init_table(&task->task_sg[0], task_sg_num_padded);
4490 /*
4491 * Setup task->task_sg_bidi for SCSI READ payload for
4492 * TCM/pSCSI passthrough if present for BIDI-COMMAND
4493 */
4494 if ((T_TASK(se_cmd)->t_mem_bidi_list != NULL) &&
4495 (TRANSPORT(se_dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) {
4496 task->task_sg_bidi = kzalloc(task_sg_num_padded *
4497 sizeof(struct scatterlist), GFP_KERNEL);
4498 if (!(task->task_sg_bidi)) {
4499 printk(KERN_ERR "Unable to allocate memory for"
4500 " task->task_sg_bidi\n");
4501 return 0;
4502 }
4503 sg_init_table(&task->task_sg_bidi[0], task_sg_num_padded);
4504 }
4505 /*
4506 * For the chaining case, setup the proper end of SGL for the
4507 * initial submission struct task into struct se_subsystem_api.
4508 * This will be cleared later by transport_do_task_sg_chain()
4509 */
4510 if (task->task_padded_sg) {
4511 sg_mark_end(&task->task_sg[task->task_sg_num - 1]);
4512 /*
4513 * Added the 'if' check before marking end of bi-directional
4514 * scatterlist (which gets created only in case of request
4515 * (RD + WR).
4516 */
4517 if (task->task_sg_bidi)
4518 sg_mark_end(&task->task_sg_bidi[task->task_sg_num - 1]);
4519 }
4520
4521 DEBUG_SC("Successfully allocated task->task_sg_num(%u),"
4522 " task_sg_num_padded(%u)\n", task->task_sg_num,
4523 task_sg_num_padded);
4524
4525 return task->task_sg_num;
4526}
4527
4528static inline int transport_set_tasks_sectors_disk(
4529 struct se_task *task,
4530 struct se_device *dev,
4531 unsigned long long lba,
4532 u32 sectors,
4533 int *max_sectors_set)
4534{
4535 if ((lba + sectors) > transport_dev_end_lba(dev)) {
4536 task->task_sectors = ((transport_dev_end_lba(dev) - lba) + 1);
4537
4538 if (task->task_sectors > DEV_ATTRIB(dev)->max_sectors) {
4539 task->task_sectors = DEV_ATTRIB(dev)->max_sectors;
4540 *max_sectors_set = 1;
4541 }
4542 } else {
4543 if (sectors > DEV_ATTRIB(dev)->max_sectors) {
4544 task->task_sectors = DEV_ATTRIB(dev)->max_sectors;
4545 *max_sectors_set = 1;
4546 } else
4547 task->task_sectors = sectors;
4548 }
4549
4550 return 0;
4551}
4552
4553static inline int transport_set_tasks_sectors_non_disk(
4554 struct se_task *task,
4555 struct se_device *dev,
4556 unsigned long long lba,
4557 u32 sectors,
4558 int *max_sectors_set)
4559{
4560 if (sectors > DEV_ATTRIB(dev)->max_sectors) {
4561 task->task_sectors = DEV_ATTRIB(dev)->max_sectors;
4562 *max_sectors_set = 1;
4563 } else
4564 task->task_sectors = sectors;
4565
4566 return 0;
4567}
4568
4569static inline int transport_set_tasks_sectors(
4570 struct se_task *task,
4571 struct se_device *dev,
4572 unsigned long long lba,
4573 u32 sectors,
4574 int *max_sectors_set)
4575{
4576 return (TRANSPORT(dev)->get_device_type(dev) == TYPE_DISK) ?
4577 transport_set_tasks_sectors_disk(task, dev, lba, sectors,
4578 max_sectors_set) :
4579 transport_set_tasks_sectors_non_disk(task, dev, lba, sectors,
4580 max_sectors_set);
4581}
4582
4583static int transport_map_sg_to_mem(
4584 struct se_cmd *cmd,
4585 struct list_head *se_mem_list,
4586 void *in_mem,
4587 u32 *se_mem_cnt)
4588{
4589 struct se_mem *se_mem;
4590 struct scatterlist *sg;
4591 u32 sg_count = 1, cmd_size = cmd->data_length;
4592
4593 if (!in_mem) {
4594 printk(KERN_ERR "No source scatterlist\n");
4595 return -1;
4596 }
4597 sg = (struct scatterlist *)in_mem;
4598
4599 while (cmd_size) {
4600 se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
4601 if (!(se_mem)) {
4602 printk(KERN_ERR "Unable to allocate struct se_mem\n");
4603 return -1;
4604 }
4605 INIT_LIST_HEAD(&se_mem->se_list);
4606 DEBUG_MEM("sg_to_mem: Starting loop with cmd_size: %u"
4607 " sg_page: %p offset: %d length: %d\n", cmd_size,
4608 sg_page(sg), sg->offset, sg->length);
4609
4610 se_mem->se_page = sg_page(sg);
4611 se_mem->se_off = sg->offset;
4612
4613 if (cmd_size > sg->length) {
4614 se_mem->se_len = sg->length;
4615 sg = sg_next(sg);
4616 sg_count++;
4617 } else
4618 se_mem->se_len = cmd_size;
4619
4620 cmd_size -= se_mem->se_len;
4621
4622 DEBUG_MEM("sg_to_mem: *se_mem_cnt: %u cmd_size: %u\n",
4623 *se_mem_cnt, cmd_size);
4624 DEBUG_MEM("sg_to_mem: Final se_page: %p se_off: %d se_len: %d\n",
4625 se_mem->se_page, se_mem->se_off, se_mem->se_len);
4626
4627 list_add_tail(&se_mem->se_list, se_mem_list);
4628 (*se_mem_cnt)++;
4629 }
4630
4631 DEBUG_MEM("task[0] - Mapped(%u) struct scatterlist segments to(%u)"
4632 " struct se_mem\n", sg_count, *se_mem_cnt);
4633
4634 if (sg_count != *se_mem_cnt)
4635 BUG();
4636
4637 return 0;
4638}
4639
4640/* transport_map_mem_to_sg():
4641 *
4642 *
4643 */
4644int transport_map_mem_to_sg(
4645 struct se_task *task,
4646 struct list_head *se_mem_list,
4647 void *in_mem,
4648 struct se_mem *in_se_mem,
4649 struct se_mem **out_se_mem,
4650 u32 *se_mem_cnt,
4651 u32 *task_offset)
4652{
4653 struct se_cmd *se_cmd = task->task_se_cmd;
4654 struct se_mem *se_mem = in_se_mem;
4655 struct scatterlist *sg = (struct scatterlist *)in_mem;
4656 u32 task_size = task->task_size, sg_no = 0;
4657
4658 if (!sg) {
4659 printk(KERN_ERR "Unable to locate valid struct"
4660 " scatterlist pointer\n");
4661 return -1;
4662 }
4663
4664 while (task_size != 0) {
4665 /*
4666 * Setup the contigious array of scatterlists for
4667 * this struct se_task.
4668 */
4669 sg_assign_page(sg, se_mem->se_page);
4670
4671 if (*task_offset == 0) {
4672 sg->offset = se_mem->se_off;
4673
4674 if (task_size >= se_mem->se_len) {
4675 sg->length = se_mem->se_len;
4676
4677 if (!(list_is_last(&se_mem->se_list,
4678 T_TASK(se_cmd)->t_mem_list))) {
4679 se_mem = list_entry(se_mem->se_list.next,
4680 struct se_mem, se_list);
4681 (*se_mem_cnt)++;
4682 }
4683 } else {
4684 sg->length = task_size;
4685 /*
4686 * Determine if we need to calculate an offset
4687 * into the struct se_mem on the next go around..
4688 */
4689 task_size -= sg->length;
4690 if (!(task_size))
4691 *task_offset = sg->length;
4692
4693 goto next;
4694 }
4695
4696 } else {
4697 sg->offset = (*task_offset + se_mem->se_off);
4698
4699 if ((se_mem->se_len - *task_offset) > task_size) {
4700 sg->length = task_size;
4701 /*
4702 * Determine if we need to calculate an offset
4703 * into the struct se_mem on the next go around..
4704 */
4705 task_size -= sg->length;
4706 if (!(task_size))
4707 *task_offset += sg->length;
4708
4709 goto next;
4710 } else {
4711 sg->length = (se_mem->se_len - *task_offset);
4712
4713 if (!(list_is_last(&se_mem->se_list,
4714 T_TASK(se_cmd)->t_mem_list))) {
4715 se_mem = list_entry(se_mem->se_list.next,
4716 struct se_mem, se_list);
4717 (*se_mem_cnt)++;
4718 }
4719 }
4720
4721 *task_offset = 0;
4722 }
4723 task_size -= sg->length;
4724next:
4725 DEBUG_MEM("task[%u] mem_to_sg - sg[%u](%p)(%u)(%u) - Reducing"
4726 " task_size to(%u), task_offset: %u\n", task->task_no, sg_no,
4727 sg_page(sg), sg->length, sg->offset, task_size, *task_offset);
4728
4729 sg_no++;
4730 if (!(task_size))
4731 break;
4732
4733 sg = sg_next(sg);
4734
4735 if (task_size > se_cmd->data_length)
4736 BUG();
4737 }
4738 *out_se_mem = se_mem;
4739
4740 DEBUG_MEM("task[%u] - Mapped(%u) struct se_mem segments to total(%u)"
4741 " SGs\n", task->task_no, *se_mem_cnt, sg_no);
4742
4743 return 0;
4744}
4745
4746/*
4747 * This function can be used by HW target mode drivers to create a linked
4748 * scatterlist from all contiguously allocated struct se_task->task_sg[].
4749 * This is intended to be called during the completion path by TCM Core
4750 * when struct target_core_fabric_ops->check_task_sg_chaining is enabled.
4751 */
4752void transport_do_task_sg_chain(struct se_cmd *cmd)
4753{
4754 struct scatterlist *sg_head = NULL, *sg_link = NULL, *sg_first = NULL;
4755 struct scatterlist *sg_head_cur = NULL, *sg_link_cur = NULL;
4756 struct scatterlist *sg, *sg_end = NULL, *sg_end_cur = NULL;
4757 struct se_task *task;
4758 struct target_core_fabric_ops *tfo = CMD_TFO(cmd);
4759 u32 task_sg_num = 0, sg_count = 0;
4760 int i;
4761
4762 if (tfo->task_sg_chaining == 0) {
4763 printk(KERN_ERR "task_sg_chaining is diabled for fabric module:"
4764 " %s\n", tfo->get_fabric_name());
4765 dump_stack();
4766 return;
4767 }
4768 /*
4769 * Walk the struct se_task list and setup scatterlist chains
4770 * for each contiguosly allocated struct se_task->task_sg[].
4771 */
4772 list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
4773 if (!(task->task_sg) || !(task->task_padded_sg))
4774 continue;
4775
4776 if (sg_head && sg_link) {
4777 sg_head_cur = &task->task_sg[0];
4778 sg_link_cur = &task->task_sg[task->task_sg_num];
4779 /*
4780 * Either add chain or mark end of scatterlist
4781 */
4782 if (!(list_is_last(&task->t_list,
4783 &T_TASK(cmd)->t_task_list))) {
4784 /*
4785 * Clear existing SGL termination bit set in
4786 * transport_calc_sg_num(), see sg_mark_end()
4787 */
4788 sg_end_cur = &task->task_sg[task->task_sg_num - 1];
4789 sg_end_cur->page_link &= ~0x02;
4790
4791 sg_chain(sg_head, task_sg_num, sg_head_cur);
4792 sg_count += task->task_sg_num;
4793 task_sg_num = (task->task_sg_num + 1);
4794 } else {
4795 sg_chain(sg_head, task_sg_num, sg_head_cur);
4796 sg_count += task->task_sg_num;
4797 task_sg_num = task->task_sg_num;
4798 }
4799
4800 sg_head = sg_head_cur;
4801 sg_link = sg_link_cur;
4802 continue;
4803 }
4804 sg_head = sg_first = &task->task_sg[0];
4805 sg_link = &task->task_sg[task->task_sg_num];
4806 /*
4807 * Check for single task..
4808 */
4809 if (!(list_is_last(&task->t_list, &T_TASK(cmd)->t_task_list))) {
4810 /*
4811 * Clear existing SGL termination bit set in
4812 * transport_calc_sg_num(), see sg_mark_end()
4813 */
4814 sg_end = &task->task_sg[task->task_sg_num - 1];
4815 sg_end->page_link &= ~0x02;
4816 sg_count += task->task_sg_num;
4817 task_sg_num = (task->task_sg_num + 1);
4818 } else {
4819 sg_count += task->task_sg_num;
4820 task_sg_num = task->task_sg_num;
4821 }
4822 }
4823 /*
4824 * Setup the starting pointer and total t_tasks_sg_linked_no including
4825 * padding SGs for linking and to mark the end.
4826 */
4827 T_TASK(cmd)->t_tasks_sg_chained = sg_first;
4828 T_TASK(cmd)->t_tasks_sg_chained_no = sg_count;
4829
4830 DEBUG_CMD_M("Setup cmd: %p T_TASK(cmd)->t_tasks_sg_chained: %p and"
4831 " t_tasks_sg_chained_no: %u\n", cmd, T_TASK(cmd)->t_tasks_sg_chained,
4832 T_TASK(cmd)->t_tasks_sg_chained_no);
4833
4834 for_each_sg(T_TASK(cmd)->t_tasks_sg_chained, sg,
4835 T_TASK(cmd)->t_tasks_sg_chained_no, i) {
4836
4837 DEBUG_CMD_M("SG[%d]: %p page: %p length: %d offset: %d, magic: 0x%08x\n",
4838 i, sg, sg_page(sg), sg->length, sg->offset, sg->sg_magic);
4839 if (sg_is_chain(sg))
4840 DEBUG_CMD_M("SG: %p sg_is_chain=1\n", sg);
4841 if (sg_is_last(sg))
4842 DEBUG_CMD_M("SG: %p sg_is_last=1\n", sg);
4843 }
4844}
4845EXPORT_SYMBOL(transport_do_task_sg_chain);
4846
4847static int transport_do_se_mem_map(
4848 struct se_device *dev,
4849 struct se_task *task,
4850 struct list_head *se_mem_list,
4851 void *in_mem,
4852 struct se_mem *in_se_mem,
4853 struct se_mem **out_se_mem,
4854 u32 *se_mem_cnt,
4855 u32 *task_offset_in)
4856{
4857 u32 task_offset = *task_offset_in;
4858 int ret = 0;
4859 /*
4860 * se_subsystem_api_t->do_se_mem_map is used when internal allocation
4861 * has been done by the transport plugin.
4862 */
4863 if (TRANSPORT(dev)->do_se_mem_map) {
4864 ret = TRANSPORT(dev)->do_se_mem_map(task, se_mem_list,
4865 in_mem, in_se_mem, out_se_mem, se_mem_cnt,
4866 task_offset_in);
4867 if (ret == 0)
4868 T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt;
4869
4870 return ret;
4871 }
4872
4873 BUG_ON(list_empty(se_mem_list));
4874 /*
4875 * This is the normal path for all normal non BIDI and BIDI-COMMAND
4876 * WRITE payloads.. If we need to do BIDI READ passthrough for
4877 * TCM/pSCSI the first call to transport_do_se_mem_map ->
4878 * transport_calc_sg_num() -> transport_map_mem_to_sg() will do the
4879 * allocation for task->task_sg_bidi, and the subsequent call to
4880 * transport_do_se_mem_map() from transport_generic_get_cdb_count()
4881 */
4882 if (!(task->task_sg_bidi)) {
4883 /*
4884 * Assume default that transport plugin speaks preallocated
4885 * scatterlists.
4886 */
4887 if (!(transport_calc_sg_num(task, in_se_mem, task_offset)))
4888 return -1;
4889 /*
4890 * struct se_task->task_sg now contains the struct scatterlist array.
4891 */
4892 return transport_map_mem_to_sg(task, se_mem_list, task->task_sg,
4893 in_se_mem, out_se_mem, se_mem_cnt,
4894 task_offset_in);
4895 }
4896 /*
4897 * Handle the se_mem_list -> struct task->task_sg_bidi
4898 * memory map for the extra BIDI READ payload
4899 */
4900 return transport_map_mem_to_sg(task, se_mem_list, task->task_sg_bidi,
4901 in_se_mem, out_se_mem, se_mem_cnt,
4902 task_offset_in);
4903}
4904
4905static u32 transport_generic_get_cdb_count(
4906 struct se_cmd *cmd,
4907 unsigned long long lba,
4908 u32 sectors,
4909 enum dma_data_direction data_direction,
4910 struct list_head *mem_list,
4911 int set_counts)
4912{
4913 unsigned char *cdb = NULL;
4914 struct se_task *task;
4915 struct se_mem *se_mem = NULL, *se_mem_lout = NULL;
4916 struct se_mem *se_mem_bidi = NULL, *se_mem_bidi_lout = NULL;
4917 struct se_device *dev = SE_DEV(cmd);
4918 int max_sectors_set = 0, ret;
4919 u32 task_offset_in = 0, se_mem_cnt = 0, se_mem_bidi_cnt = 0, task_cdbs = 0;
4920
4921 if (!mem_list) {
4922 printk(KERN_ERR "mem_list is NULL in transport_generic_get"
4923 "_cdb_count()\n");
4924 return 0;
4925 }
4926 /*
4927 * While using RAMDISK_DR backstores is the only case where
4928 * mem_list will ever be empty at this point.
4929 */
4930 if (!(list_empty(mem_list)))
4931 se_mem = list_entry(mem_list->next, struct se_mem, se_list);
4932 /*
4933 * Check for extra se_mem_bidi mapping for BIDI-COMMANDs to
4934 * struct se_task->task_sg_bidi for TCM/pSCSI passthrough operation
4935 */
4936 if ((T_TASK(cmd)->t_mem_bidi_list != NULL) &&
4937 !(list_empty(T_TASK(cmd)->t_mem_bidi_list)) &&
4938 (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV))
4939 se_mem_bidi = list_entry(T_TASK(cmd)->t_mem_bidi_list->next,
4940 struct se_mem, se_list);
4941
4942 while (sectors) {
4943 DEBUG_VOL("ITT[0x%08x] LBA(%llu) SectorsLeft(%u) EOBJ(%llu)\n",
4944 CMD_TFO(cmd)->get_task_tag(cmd), lba, sectors,
4945 transport_dev_end_lba(dev));
4946
4947 task = transport_generic_get_task(cmd, data_direction);
4948 if (!(task))
4949 goto out;
4950
4951 transport_set_tasks_sectors(task, dev, lba, sectors,
4952 &max_sectors_set);
4953
4954 task->task_lba = lba;
4955 lba += task->task_sectors;
4956 sectors -= task->task_sectors;
4957 task->task_size = (task->task_sectors *
4958 DEV_ATTRIB(dev)->block_size);
4959
4960 cdb = TRANSPORT(dev)->get_cdb(task);
4961 if ((cdb)) {
4962 memcpy(cdb, T_TASK(cmd)->t_task_cdb,
4963 scsi_command_size(T_TASK(cmd)->t_task_cdb));
4964 cmd->transport_split_cdb(task->task_lba,
4965 &task->task_sectors, cdb);
4966 }
4967
4968 /*
4969 * Perform the SE OBJ plugin and/or Transport plugin specific
4970 * mapping for T_TASK(cmd)->t_mem_list. And setup the
4971 * task->task_sg and if necessary task->task_sg_bidi
4972 */
4973 ret = transport_do_se_mem_map(dev, task, mem_list,
4974 NULL, se_mem, &se_mem_lout, &se_mem_cnt,
4975 &task_offset_in);
4976 if (ret < 0)
4977 goto out;
4978
4979 se_mem = se_mem_lout;
4980 /*
4981 * Setup the T_TASK(cmd)->t_mem_bidi_list -> task->task_sg_bidi
4982 * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI
4983 *
4984 * Note that the first call to transport_do_se_mem_map() above will
4985 * allocate struct se_task->task_sg_bidi in transport_do_se_mem_map()
4986 * -> transport_calc_sg_num(), and the second here will do the
4987 * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI.
4988 */
4989 if (task->task_sg_bidi != NULL) {
4990 ret = transport_do_se_mem_map(dev, task,
4991 T_TASK(cmd)->t_mem_bidi_list, NULL,
4992 se_mem_bidi, &se_mem_bidi_lout, &se_mem_bidi_cnt,
4993 &task_offset_in);
4994 if (ret < 0)
4995 goto out;
4996
4997 se_mem_bidi = se_mem_bidi_lout;
4998 }
4999 task_cdbs++;
5000
5001 DEBUG_VOL("Incremented task_cdbs(%u) task->task_sg_num(%u)\n",
5002 task_cdbs, task->task_sg_num);
5003
5004 if (max_sectors_set) {
5005 max_sectors_set = 0;
5006 continue;
5007 }
5008
5009 if (!sectors)
5010 break;
5011 }
5012
5013 if (set_counts) {
5014 atomic_inc(&T_TASK(cmd)->t_fe_count);
5015 atomic_inc(&T_TASK(cmd)->t_se_count);
5016 }
5017
5018 DEBUG_VOL("ITT[0x%08x] total %s cdbs(%u)\n",
5019 CMD_TFO(cmd)->get_task_tag(cmd), (data_direction == DMA_TO_DEVICE)
5020 ? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE", task_cdbs);
5021
5022 return task_cdbs;
5023out:
5024 return 0;
5025}
5026
5027static int
5028transport_map_control_cmd_to_task(struct se_cmd *cmd)
5029{
5030 struct se_device *dev = SE_DEV(cmd);
5031 unsigned char *cdb;
5032 struct se_task *task;
5033 int ret;
5034
5035 task = transport_generic_get_task(cmd, cmd->data_direction);
5036 if (!task)
5037 return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
5038
5039 cdb = TRANSPORT(dev)->get_cdb(task);
5040 if (cdb)
5041 memcpy(cdb, cmd->t_task->t_task_cdb,
5042 scsi_command_size(cmd->t_task->t_task_cdb));
5043
5044 task->task_size = cmd->data_length;
5045 task->task_sg_num =
5046 (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) ? 1 : 0;
5047
5048 atomic_inc(&cmd->t_task->t_fe_count);
5049 atomic_inc(&cmd->t_task->t_se_count);
5050
5051 if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) {
5052 struct se_mem *se_mem = NULL, *se_mem_lout = NULL;
5053 u32 se_mem_cnt = 0, task_offset = 0;
5054
5055 if (!list_empty(T_TASK(cmd)->t_mem_list))
5056 se_mem = list_entry(T_TASK(cmd)->t_mem_list->next,
5057 struct se_mem, se_list);
5058
5059 ret = transport_do_se_mem_map(dev, task,
5060 cmd->t_task->t_mem_list, NULL, se_mem,
5061 &se_mem_lout, &se_mem_cnt, &task_offset);
5062 if (ret < 0)
5063 return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
5064
5065 if (dev->transport->map_task_SG)
5066 return dev->transport->map_task_SG(task);
5067 return 0;
5068 } else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) {
5069 if (dev->transport->map_task_non_SG)
5070 return dev->transport->map_task_non_SG(task);
5071 return 0;
5072 } else if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) {
5073 if (dev->transport->cdb_none)
5074 return dev->transport->cdb_none(task);
5075 return 0;
5076 } else {
5077 BUG();
5078 return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
5079 }
5080}
5081
5082/* transport_generic_new_cmd(): Called from transport_processing_thread()
5083 *
5084 * Allocate storage transport resources from a set of values predefined
5085 * by transport_generic_cmd_sequencer() from the iSCSI Target RX process.
5086 * Any non zero return here is treated as an "out of resource' op here.
5087 */
5088 /*
5089 * Generate struct se_task(s) and/or their payloads for this CDB.
5090 */
5091static int transport_generic_new_cmd(struct se_cmd *cmd)
5092{
5093 struct se_portal_group *se_tpg;
5094 struct se_task *task;
5095 struct se_device *dev = SE_DEV(cmd);
5096 int ret = 0;
5097
5098 /*
5099 * Determine is the TCM fabric module has already allocated physical
5100 * memory, and is directly calling transport_generic_map_mem_to_cmd()
5101 * to setup beforehand the linked list of physical memory at
5102 * T_TASK(cmd)->t_mem_list of struct se_mem->se_page
5103 */
5104 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) {
5105 ret = transport_allocate_resources(cmd);
5106 if (ret < 0)
5107 return ret;
5108 }
5109
5110 ret = transport_get_sectors(cmd);
5111 if (ret < 0)
5112 return ret;
5113
5114 ret = transport_new_cmd_obj(cmd);
5115 if (ret < 0)
5116 return ret;
5117
5118 /*
5119 * Determine if the calling TCM fabric module is talking to
5120 * Linux/NET via kernel sockets and needs to allocate a
5121 * struct iovec array to complete the struct se_cmd
5122 */
5123 se_tpg = SE_LUN(cmd)->lun_sep->sep_tpg;
5124 if (TPG_TFO(se_tpg)->alloc_cmd_iovecs != NULL) {
5125 ret = TPG_TFO(se_tpg)->alloc_cmd_iovecs(cmd);
5126 if (ret < 0)
5127 return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
5128 }
5129
5130 if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
5131 list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
5132 if (atomic_read(&task->task_sent))
5133 continue;
5134 if (!dev->transport->map_task_SG)
5135 continue;
5136
5137 ret = dev->transport->map_task_SG(task);
5138 if (ret < 0)
5139 return ret;
5140 }
5141 } else {
5142 ret = transport_map_control_cmd_to_task(cmd);
5143 if (ret < 0)
5144 return ret;
5145 }
5146
5147 /*
5148 * For WRITEs, let the iSCSI Target RX Thread know its buffer is ready..
5149 * This WRITE struct se_cmd (and all of its associated struct se_task's)
5150 * will be added to the struct se_device execution queue after its WRITE
5151 * data has arrived. (ie: It gets handled by the transport processing
5152 * thread a second time)
5153 */
5154 if (cmd->data_direction == DMA_TO_DEVICE) {
5155 transport_add_tasks_to_state_queue(cmd);
5156 return transport_generic_write_pending(cmd);
5157 }
5158 /*
5159 * Everything else but a WRITE, add the struct se_cmd's struct se_task's
5160 * to the execution queue.
5161 */
5162 transport_execute_tasks(cmd);
5163 return 0;
5164}
5165
5166/* transport_generic_process_write():
5167 *
5168 *
5169 */
5170void transport_generic_process_write(struct se_cmd *cmd)
5171{
5172#if 0
5173 /*
5174 * Copy SCSI Presented DTL sector(s) from received buffers allocated to
5175 * original EDTL
5176 */
5177 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
5178 if (!T_TASK(cmd)->t_tasks_se_num) {
5179 unsigned char *dst, *buf =
5180 (unsigned char *)T_TASK(cmd)->t_task_buf;
5181
5182 dst = kzalloc(cmd->cmd_spdtl), GFP_KERNEL);
5183 if (!(dst)) {
5184 printk(KERN_ERR "Unable to allocate memory for"
5185 " WRITE underflow\n");
5186 transport_generic_request_failure(cmd, NULL,
5187 PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
5188 return;
5189 }
5190 memcpy(dst, buf, cmd->cmd_spdtl);
5191
5192 kfree(T_TASK(cmd)->t_task_buf);
5193 T_TASK(cmd)->t_task_buf = dst;
5194 } else {
5195 struct scatterlist *sg =
5196 (struct scatterlist *sg)T_TASK(cmd)->t_task_buf;
5197 struct scatterlist *orig_sg;
5198
5199 orig_sg = kzalloc(sizeof(struct scatterlist) *
5200 T_TASK(cmd)->t_tasks_se_num,
5201 GFP_KERNEL))) {
5202 if (!(orig_sg)) {
5203 printk(KERN_ERR "Unable to allocate memory"
5204 " for WRITE underflow\n");
5205 transport_generic_request_failure(cmd, NULL,
5206 PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
5207 return;
5208 }
5209
5210 memcpy(orig_sg, T_TASK(cmd)->t_task_buf,
5211 sizeof(struct scatterlist) *
5212 T_TASK(cmd)->t_tasks_se_num);
5213
5214 cmd->data_length = cmd->cmd_spdtl;
5215 /*
5216 * FIXME, clear out original struct se_task and state
5217 * information.
5218 */
5219 if (transport_generic_new_cmd(cmd) < 0) {
5220 transport_generic_request_failure(cmd, NULL,
5221 PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
5222 kfree(orig_sg);
5223 return;
5224 }
5225
5226 transport_memcpy_write_sg(cmd, orig_sg);
5227 }
5228 }
5229#endif
5230 transport_execute_tasks(cmd);
5231}
5232EXPORT_SYMBOL(transport_generic_process_write);
5233
5234/* transport_generic_write_pending():
5235 *
5236 *
5237 */
5238static int transport_generic_write_pending(struct se_cmd *cmd)
5239{
5240 unsigned long flags;
5241 int ret;
5242
5243 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
5244 cmd->t_state = TRANSPORT_WRITE_PENDING;
5245 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
5246 /*
5247 * For the TCM control CDBs using a contiguous buffer, do the memcpy
5248 * from the passed Linux/SCSI struct scatterlist located at
5249 * T_TASK(se_cmd)->t_task_pt_buf to the contiguous buffer at
5250 * T_TASK(se_cmd)->t_task_buf.
5251 */
5252 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG)
5253 transport_memcpy_read_contig(cmd,
5254 T_TASK(cmd)->t_task_buf,
5255 T_TASK(cmd)->t_task_pt_sgl);
5256 /*
5257 * Clear the se_cmd for WRITE_PENDING status in order to set
5258 * T_TASK(cmd)->t_transport_active=0 so that transport_generic_handle_data
5259 * can be called from HW target mode interrupt code. This is safe
5260 * to be called with transport_off=1 before the CMD_TFO(cmd)->write_pending
5261 * because the se_cmd->se_lun pointer is not being cleared.
5262 */
5263 transport_cmd_check_stop(cmd, 1, 0);
5264
5265 /*
5266 * Call the fabric write_pending function here to let the
5267 * frontend know that WRITE buffers are ready.
5268 */
5269 ret = CMD_TFO(cmd)->write_pending(cmd);
5270 if (ret < 0)
5271 return ret;
5272
5273 return PYX_TRANSPORT_WRITE_PENDING;
5274}
5275
5276/* transport_release_cmd_to_pool():
5277 *
5278 *
5279 */
5280void transport_release_cmd_to_pool(struct se_cmd *cmd)
5281{
5282 BUG_ON(!T_TASK(cmd));
5283 BUG_ON(!CMD_TFO(cmd));
5284
5285 transport_free_se_cmd(cmd);
5286 CMD_TFO(cmd)->release_cmd_to_pool(cmd);
5287}
5288EXPORT_SYMBOL(transport_release_cmd_to_pool);
5289
5290/* transport_generic_free_cmd():
5291 *
5292 * Called from processing frontend to release storage engine resources
5293 */
5294void transport_generic_free_cmd(
5295 struct se_cmd *cmd,
5296 int wait_for_tasks,
5297 int release_to_pool,
5298 int session_reinstatement)
5299{
5300 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) || !T_TASK(cmd))
5301 transport_release_cmd_to_pool(cmd);
5302 else {
5303 core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);
5304
5305 if (SE_LUN(cmd)) {
5306#if 0
5307 printk(KERN_INFO "cmd: %p ITT: 0x%08x contains"
5308 " SE_LUN(cmd)\n", cmd,
5309 CMD_TFO(cmd)->get_task_tag(cmd));
5310#endif
5311 transport_lun_remove_cmd(cmd);
5312 }
5313
5314 if (wait_for_tasks && cmd->transport_wait_for_tasks)
5315 cmd->transport_wait_for_tasks(cmd, 0, 0);
5316
5317 transport_free_dev_tasks(cmd);
5318
5319 transport_generic_remove(cmd, release_to_pool,
5320 session_reinstatement);
5321 }
5322}
5323EXPORT_SYMBOL(transport_generic_free_cmd);
5324
5325static void transport_nop_wait_for_tasks(
5326 struct se_cmd *cmd,
5327 int remove_cmd,
5328 int session_reinstatement)
5329{
5330 return;
5331}
5332
5333/* transport_lun_wait_for_tasks():
5334 *
5335 * Called from ConfigFS context to stop the passed struct se_cmd to allow
5336 * an struct se_lun to be successfully shutdown.
5337 */
5338static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
5339{
5340 unsigned long flags;
5341 int ret;
5342 /*
5343 * If the frontend has already requested this struct se_cmd to
5344 * be stopped, we can safely ignore this struct se_cmd.
5345 */
5346 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
5347 if (atomic_read(&T_TASK(cmd)->t_transport_stop)) {
5348 atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
5349 DEBUG_TRANSPORT_S("ConfigFS ITT[0x%08x] - t_transport_stop =="
5350 " TRUE, skipping\n", CMD_TFO(cmd)->get_task_tag(cmd));
5351 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
5352 transport_cmd_check_stop(cmd, 1, 0);
5353 return -1;
5354 }
5355 atomic_set(&T_TASK(cmd)->transport_lun_fe_stop, 1);
5356 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
5357
5358 wake_up_interruptible(&SE_DEV(cmd)->dev_queue_obj->thread_wq);
5359
5360 ret = transport_stop_tasks_for_cmd(cmd);
5361
5362 DEBUG_TRANSPORT_S("ConfigFS: cmd: %p t_task_cdbs: %d stop tasks ret:"
5363 " %d\n", cmd, T_TASK(cmd)->t_task_cdbs, ret);
5364 if (!ret) {
5365 DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
5366 CMD_TFO(cmd)->get_task_tag(cmd));
5367 wait_for_completion(&T_TASK(cmd)->transport_lun_stop_comp);
5368 DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
5369 CMD_TFO(cmd)->get_task_tag(cmd));
5370 }
5371 transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj);
5372
5373 return 0;
5374}
5375
5376/* #define DEBUG_CLEAR_LUN */
5377#ifdef DEBUG_CLEAR_LUN
5378#define DEBUG_CLEAR_L(x...) printk(KERN_INFO x)
5379#else
5380#define DEBUG_CLEAR_L(x...)
5381#endif
5382
5383static void __transport_clear_lun_from_sessions(struct se_lun *lun)
5384{
5385 struct se_cmd *cmd = NULL;
5386 unsigned long lun_flags, cmd_flags;
5387 /*
5388 * Do exception processing and return CHECK_CONDITION status to the
5389 * Initiator Port.
5390 */
5391 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
5392 while (!list_empty_careful(&lun->lun_cmd_list)) {
5393 cmd = list_entry(lun->lun_cmd_list.next,
5394 struct se_cmd, se_lun_list);
5395 list_del(&cmd->se_lun_list);
5396
5397 if (!(T_TASK(cmd))) {
5398 printk(KERN_ERR "ITT: 0x%08x, T_TASK(cmd) = NULL"
5399 "[i,t]_state: %u/%u\n",
5400 CMD_TFO(cmd)->get_task_tag(cmd),
5401 CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state);
5402 BUG();
5403 }
5404 atomic_set(&T_TASK(cmd)->transport_lun_active, 0);
5405 /*
5406 * This will notify iscsi_target_transport.c:
5407 * transport_cmd_check_stop() that a LUN shutdown is in
5408 * progress for the iscsi_cmd_t.
5409 */
5410 spin_lock(&T_TASK(cmd)->t_state_lock);
5411 DEBUG_CLEAR_L("SE_LUN[%d] - Setting T_TASK(cmd)->transport"
5412 "_lun_stop for ITT: 0x%08x\n",
5413 SE_LUN(cmd)->unpacked_lun,
5414 CMD_TFO(cmd)->get_task_tag(cmd));
5415 atomic_set(&T_TASK(cmd)->transport_lun_stop, 1);
5416 spin_unlock(&T_TASK(cmd)->t_state_lock);
5417
5418 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
5419
5420 if (!(SE_LUN(cmd))) {
5421 printk(KERN_ERR "ITT: 0x%08x, [i,t]_state: %u/%u\n",
5422 CMD_TFO(cmd)->get_task_tag(cmd),
5423 CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state);
5424 BUG();
5425 }
5426 /*
5427 * If the Storage engine still owns the iscsi_cmd_t, determine
5428 * and/or stop its context.
5429 */
5430 DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x before transport"
5431 "_lun_wait_for_tasks()\n", SE_LUN(cmd)->unpacked_lun,
5432 CMD_TFO(cmd)->get_task_tag(cmd));
5433
5434 if (transport_lun_wait_for_tasks(cmd, SE_LUN(cmd)) < 0) {
5435 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
5436 continue;
5437 }
5438
5439 DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
5440 "_wait_for_tasks(): SUCCESS\n",
5441 SE_LUN(cmd)->unpacked_lun,
5442 CMD_TFO(cmd)->get_task_tag(cmd));
5443
5444 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, cmd_flags);
5445 if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
5446 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags);
5447 goto check_cond;
5448 }
5449 atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
5450 transport_all_task_dev_remove_state(cmd);
5451 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags);
5452
5453 transport_free_dev_tasks(cmd);
5454 /*
5455 * The Storage engine stopped this struct se_cmd before it was
5456 * send to the fabric frontend for delivery back to the
5457 * Initiator Node. Return this SCSI CDB back with an
5458 * CHECK_CONDITION status.
5459 */
5460check_cond:
5461 transport_send_check_condition_and_sense(cmd,
5462 TCM_NON_EXISTENT_LUN, 0);
5463 /*
5464 * If the fabric frontend is waiting for this iscsi_cmd_t to
5465 * be released, notify the waiting thread now that LU has
5466 * finished accessing it.
5467 */
5468 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, cmd_flags);
5469 if (atomic_read(&T_TASK(cmd)->transport_lun_fe_stop)) {
5470 DEBUG_CLEAR_L("SE_LUN[%d] - Detected FE stop for"
5471 " struct se_cmd: %p ITT: 0x%08x\n",
5472 lun->unpacked_lun,
5473 cmd, CMD_TFO(cmd)->get_task_tag(cmd));
5474
5475 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
5476 cmd_flags);
5477 transport_cmd_check_stop(cmd, 1, 0);
5478 complete(&T_TASK(cmd)->transport_lun_fe_stop_comp);
5479 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
5480 continue;
5481 }
5482 DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
5483 lun->unpacked_lun, CMD_TFO(cmd)->get_task_tag(cmd));
5484
5485 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags);
5486 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
5487 }
5488 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
5489}
5490
5491static int transport_clear_lun_thread(void *p)
5492{
5493 struct se_lun *lun = (struct se_lun *)p;
5494
5495 __transport_clear_lun_from_sessions(lun);
5496 complete(&lun->lun_shutdown_comp);
5497
5498 return 0;
5499}
5500
5501int transport_clear_lun_from_sessions(struct se_lun *lun)
5502{
5503 struct task_struct *kt;
5504
5505 kt = kthread_run(transport_clear_lun_thread, (void *)lun,
5506 "tcm_cl_%u", lun->unpacked_lun);
5507 if (IS_ERR(kt)) {
5508 printk(KERN_ERR "Unable to start clear_lun thread\n");
5509 return -1;
5510 }
5511 wait_for_completion(&lun->lun_shutdown_comp);
5512
5513 return 0;
5514}
5515
5516/* transport_generic_wait_for_tasks():
5517 *
5518 * Called from frontend or passthrough context to wait for storage engine
5519 * to pause and/or release frontend generated struct se_cmd.
5520 */
5521static void transport_generic_wait_for_tasks(
5522 struct se_cmd *cmd,
5523 int remove_cmd,
5524 int session_reinstatement)
5525{
5526 unsigned long flags;
5527
5528 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req))
5529 return;
5530
5531 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
5532 /*
5533 * If we are already stopped due to an external event (ie: LUN shutdown)
5534 * sleep until the connection can have the passed struct se_cmd back.
5535 * The T_TASK(cmd)->transport_lun_stopped_sem will be upped by
5536 * transport_clear_lun_from_sessions() once the ConfigFS context caller
5537 * has completed its operation on the struct se_cmd.
5538 */
5539 if (atomic_read(&T_TASK(cmd)->transport_lun_stop)) {
5540
5541 DEBUG_TRANSPORT_S("wait_for_tasks: Stopping"
5542 " wait_for_completion(&T_TASK(cmd)transport_lun_fe"
5543 "_stop_comp); for ITT: 0x%08x\n",
5544 CMD_TFO(cmd)->get_task_tag(cmd));
5545 /*
5546 * There is a special case for WRITES where a FE exception +
5547 * LUN shutdown means ConfigFS context is still sleeping on
5548 * transport_lun_stop_comp in transport_lun_wait_for_tasks().
5549 * We go ahead and up transport_lun_stop_comp just to be sure
5550 * here.
5551 */
5552 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
5553 complete(&T_TASK(cmd)->transport_lun_stop_comp);
5554 wait_for_completion(&T_TASK(cmd)->transport_lun_fe_stop_comp);
5555 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
5556
5557 transport_all_task_dev_remove_state(cmd);
5558 /*
5559 * At this point, the frontend who was the originator of this
5560 * struct se_cmd, now owns the structure and can be released through
5561 * normal means below.
5562 */
5563 DEBUG_TRANSPORT_S("wait_for_tasks: Stopped"
5564 " wait_for_completion(&T_TASK(cmd)transport_lun_fe_"
5565 "stop_comp); for ITT: 0x%08x\n",
5566 CMD_TFO(cmd)->get_task_tag(cmd));
5567
5568 atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
5569 }
5570 if (!atomic_read(&T_TASK(cmd)->t_transport_active) ||
5571 atomic_read(&T_TASK(cmd)->t_transport_aborted))
5572 goto remove;
5573
5574 atomic_set(&T_TASK(cmd)->t_transport_stop, 1);
5575
5576 DEBUG_TRANSPORT_S("wait_for_tasks: Stopping %p ITT: 0x%08x"
5577 " i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop"
5578 " = TRUE\n", cmd, CMD_TFO(cmd)->get_task_tag(cmd),
5579 CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state,
5580 cmd->deferred_t_state);
5581
5582 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
5583
5584 wake_up_interruptible(&SE_DEV(cmd)->dev_queue_obj->thread_wq);
5585
5586 wait_for_completion(&T_TASK(cmd)->t_transport_stop_comp);
5587
5588 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
5589 atomic_set(&T_TASK(cmd)->t_transport_active, 0);
5590 atomic_set(&T_TASK(cmd)->t_transport_stop, 0);
5591
5592 DEBUG_TRANSPORT_S("wait_for_tasks: Stopped wait_for_compltion("
5593 "&T_TASK(cmd)->t_transport_stop_comp) for ITT: 0x%08x\n",
5594 CMD_TFO(cmd)->get_task_tag(cmd));
5595remove:
5596 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
5597 if (!remove_cmd)
5598 return;
5599
5600 transport_generic_free_cmd(cmd, 0, 0, session_reinstatement);
5601}
5602
5603static int transport_get_sense_codes(
5604 struct se_cmd *cmd,
5605 u8 *asc,
5606 u8 *ascq)
5607{
5608 *asc = cmd->scsi_asc;
5609 *ascq = cmd->scsi_ascq;
5610
5611 return 0;
5612}
5613
5614static int transport_set_sense_codes(
5615 struct se_cmd *cmd,
5616 u8 asc,
5617 u8 ascq)
5618{
5619 cmd->scsi_asc = asc;
5620 cmd->scsi_ascq = ascq;
5621
5622 return 0;
5623}
5624
5625int transport_send_check_condition_and_sense(
5626 struct se_cmd *cmd,
5627 u8 reason,
5628 int from_transport)
5629{
5630 unsigned char *buffer = cmd->sense_buffer;
5631 unsigned long flags;
5632 int offset;
5633 u8 asc = 0, ascq = 0;
5634
5635 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
5636 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
5637 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
5638 return 0;
5639 }
5640 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
5641 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
5642
5643 if (!reason && from_transport)
5644 goto after_reason;
5645
5646 if (!from_transport)
5647 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
5648 /*
5649 * Data Segment and SenseLength of the fabric response PDU.
5650 *
5651 * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE
5652 * from include/scsi/scsi_cmnd.h
5653 */
5654 offset = CMD_TFO(cmd)->set_fabric_sense_len(cmd,
5655 TRANSPORT_SENSE_BUFFER);
5656 /*
5657 * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses
5658 * SENSE KEY values from include/scsi/scsi.h
5659 */
5660 switch (reason) {
5661 case TCM_NON_EXISTENT_LUN:
5662 case TCM_UNSUPPORTED_SCSI_OPCODE:
5663 case TCM_SECTOR_COUNT_TOO_MANY:
5664 /* CURRENT ERROR */
5665 buffer[offset] = 0x70;
5666 /* ILLEGAL REQUEST */
5667 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
5668 /* INVALID COMMAND OPERATION CODE */
5669 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20;
5670 break;
5671 case TCM_UNKNOWN_MODE_PAGE:
5672 /* CURRENT ERROR */
5673 buffer[offset] = 0x70;
5674 /* ILLEGAL REQUEST */
5675 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
5676 /* INVALID FIELD IN CDB */
5677 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
5678 break;
5679 case TCM_CHECK_CONDITION_ABORT_CMD:
5680 /* CURRENT ERROR */
5681 buffer[offset] = 0x70;
5682 /* ABORTED COMMAND */
5683 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
5684 /* BUS DEVICE RESET FUNCTION OCCURRED */
5685 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29;
5686 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03;
5687 break;
5688 case TCM_INCORRECT_AMOUNT_OF_DATA:
5689 /* CURRENT ERROR */
5690 buffer[offset] = 0x70;
5691 /* ABORTED COMMAND */
5692 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
5693 /* WRITE ERROR */
5694 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
5695 /* NOT ENOUGH UNSOLICITED DATA */
5696 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d;
5697 break;
5698 case TCM_INVALID_CDB_FIELD:
5699 /* CURRENT ERROR */
5700 buffer[offset] = 0x70;
5701 /* ABORTED COMMAND */
5702 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
5703 /* INVALID FIELD IN CDB */
5704 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
5705 break;
5706 case TCM_INVALID_PARAMETER_LIST:
5707 /* CURRENT ERROR */
5708 buffer[offset] = 0x70;
5709 /* ABORTED COMMAND */
5710 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
5711 /* INVALID FIELD IN PARAMETER LIST */
5712 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26;
5713 break;
5714 case TCM_UNEXPECTED_UNSOLICITED_DATA:
5715 /* CURRENT ERROR */
5716 buffer[offset] = 0x70;
5717 /* ABORTED COMMAND */
5718 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
5719 /* WRITE ERROR */
5720 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
5721 /* UNEXPECTED_UNSOLICITED_DATA */
5722 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c;
5723 break;
5724 case TCM_SERVICE_CRC_ERROR:
5725 /* CURRENT ERROR */
5726 buffer[offset] = 0x70;
5727 /* ABORTED COMMAND */
5728 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
5729 /* PROTOCOL SERVICE CRC ERROR */
5730 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47;
5731 /* N/A */
5732 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05;
5733 break;
5734 case TCM_SNACK_REJECTED:
5735 /* CURRENT ERROR */
5736 buffer[offset] = 0x70;
5737 /* ABORTED COMMAND */
5738 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
5739 /* READ ERROR */
5740 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11;
5741 /* FAILED RETRANSMISSION REQUEST */
5742 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13;
5743 break;
5744 case TCM_WRITE_PROTECTED:
5745 /* CURRENT ERROR */
5746 buffer[offset] = 0x70;
5747 /* DATA PROTECT */
5748 buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
5749 /* WRITE PROTECTED */
5750 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27;
5751 break;
5752 case TCM_CHECK_CONDITION_UNIT_ATTENTION:
5753 /* CURRENT ERROR */
5754 buffer[offset] = 0x70;
5755 /* UNIT ATTENTION */
5756 buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
5757 core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
5758 buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
5759 buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
5760 break;
5761 case TCM_CHECK_CONDITION_NOT_READY:
5762 /* CURRENT ERROR */
5763 buffer[offset] = 0x70;
5764 /* Not Ready */
5765 buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
5766 transport_get_sense_codes(cmd, &asc, &ascq);
5767 buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
5768 buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
5769 break;
5770 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
5771 default:
5772 /* CURRENT ERROR */
5773 buffer[offset] = 0x70;
5774 /* ILLEGAL REQUEST */
5775 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
5776 /* LOGICAL UNIT COMMUNICATION FAILURE */
5777 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80;
5778 break;
5779 }
5780 /*
5781 * This code uses linux/include/scsi/scsi.h SAM status codes!
5782 */
5783 cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
5784 /*
5785 * Automatically padded, this value is encoded in the fabric's
5786 * data_length response PDU containing the SCSI defined sense data.
5787 */
5788 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset;
5789
5790after_reason:
5791 CMD_TFO(cmd)->queue_status(cmd);
5792 return 0;
5793}
5794EXPORT_SYMBOL(transport_send_check_condition_and_sense);
5795
5796int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
5797{
5798 int ret = 0;
5799
5800 if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) {
5801 if (!(send_status) ||
5802 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
5803 return 1;
5804#if 0
5805 printk(KERN_INFO "Sending delayed SAM_STAT_TASK_ABORTED"
5806 " status for CDB: 0x%02x ITT: 0x%08x\n",
5807 T_TASK(cmd)->t_task_cdb[0],
5808 CMD_TFO(cmd)->get_task_tag(cmd));
5809#endif
5810 cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
5811 CMD_TFO(cmd)->queue_status(cmd);
5812 ret = 1;
5813 }
5814 return ret;
5815}
5816EXPORT_SYMBOL(transport_check_aborted_status);
5817
5818void transport_send_task_abort(struct se_cmd *cmd)
5819{
5820 /*
5821 * If there are still expected incoming fabric WRITEs, we wait
5822 * until until they have completed before sending a TASK_ABORTED
5823 * response. This response with TASK_ABORTED status will be
5824 * queued back to fabric module by transport_check_aborted_status().
5825 */
5826 if (cmd->data_direction == DMA_TO_DEVICE) {
5827 if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) {
5828 atomic_inc(&T_TASK(cmd)->t_transport_aborted);
5829 smp_mb__after_atomic_inc();
5830 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
5831 transport_new_cmd_failure(cmd);
5832 return;
5833 }
5834 }
5835 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
5836#if 0
5837 printk(KERN_INFO "Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
5838 " ITT: 0x%08x\n", T_TASK(cmd)->t_task_cdb[0],
5839 CMD_TFO(cmd)->get_task_tag(cmd));
5840#endif
5841 CMD_TFO(cmd)->queue_status(cmd);
5842}
5843
5844/* transport_generic_do_tmr():
5845 *
5846 *
5847 */
5848int transport_generic_do_tmr(struct se_cmd *cmd)
5849{
5850 struct se_cmd *ref_cmd;
5851 struct se_device *dev = SE_DEV(cmd);
5852 struct se_tmr_req *tmr = cmd->se_tmr_req;
5853 int ret;
5854
5855 switch (tmr->function) {
5856 case TMR_ABORT_TASK:
5857 ref_cmd = tmr->ref_cmd;
5858 tmr->response = TMR_FUNCTION_REJECTED;
5859 break;
5860 case TMR_ABORT_TASK_SET:
5861 case TMR_CLEAR_ACA:
5862 case TMR_CLEAR_TASK_SET:
5863 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
5864 break;
5865 case TMR_LUN_RESET:
5866 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
5867 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
5868 TMR_FUNCTION_REJECTED;
5869 break;
5870 case TMR_TARGET_WARM_RESET:
5871 tmr->response = TMR_FUNCTION_REJECTED;
5872 break;
5873 case TMR_TARGET_COLD_RESET:
5874 tmr->response = TMR_FUNCTION_REJECTED;
5875 break;
5876 default:
5877 printk(KERN_ERR "Uknown TMR function: 0x%02x.\n",
5878 tmr->function);
5879 tmr->response = TMR_FUNCTION_REJECTED;
5880 break;
5881 }
5882
5883 cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
5884 CMD_TFO(cmd)->queue_tm_rsp(cmd);
5885
5886 transport_cmd_check_stop(cmd, 2, 0);
5887 return 0;
5888}
5889
5890/*
5891 * Called with spin_lock_irq(&dev->execute_task_lock); held
5892 *
5893 */
5894static struct se_task *
5895transport_get_task_from_state_list(struct se_device *dev)
5896{
5897 struct se_task *task;
5898
5899 if (list_empty(&dev->state_task_list))
5900 return NULL;
5901
5902 list_for_each_entry(task, &dev->state_task_list, t_state_list)
5903 break;
5904
5905 list_del(&task->t_state_list);
5906 atomic_set(&task->task_state_active, 0);
5907
5908 return task;
5909}
5910
5911static void transport_processing_shutdown(struct se_device *dev)
5912{
5913 struct se_cmd *cmd;
5914 struct se_queue_req *qr;
5915 struct se_task *task;
5916 u8 state;
5917 unsigned long flags;
5918 /*
5919 * Empty the struct se_device's struct se_task state list.
5920 */
5921 spin_lock_irqsave(&dev->execute_task_lock, flags);
5922 while ((task = transport_get_task_from_state_list(dev))) {
5923 if (!(TASK_CMD(task))) {
5924 printk(KERN_ERR "TASK_CMD(task) is NULL!\n");
5925 continue;
5926 }
5927 cmd = TASK_CMD(task);
5928
5929 if (!T_TASK(cmd)) {
5930 printk(KERN_ERR "T_TASK(cmd) is NULL for task: %p cmd:"
5931 " %p ITT: 0x%08x\n", task, cmd,
5932 CMD_TFO(cmd)->get_task_tag(cmd));
5933 continue;
5934 }
5935 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
5936
5937 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
5938
5939 DEBUG_DO("PT: cmd: %p task: %p ITT/CmdSN: 0x%08x/0x%08x,"
5940 " i_state/def_i_state: %d/%d, t_state/def_t_state:"
5941 " %d/%d cdb: 0x%02x\n", cmd, task,
5942 CMD_TFO(cmd)->get_task_tag(cmd), cmd->cmd_sn,
5943 CMD_TFO(cmd)->get_cmd_state(cmd), cmd->deferred_i_state,
5944 cmd->t_state, cmd->deferred_t_state,
5945 T_TASK(cmd)->t_task_cdb[0]);
5946 DEBUG_DO("PT: ITT[0x%08x] - t_task_cdbs: %d t_task_cdbs_left:"
5947 " %d t_task_cdbs_sent: %d -- t_transport_active: %d"
5948 " t_transport_stop: %d t_transport_sent: %d\n",
5949 CMD_TFO(cmd)->get_task_tag(cmd),
5950 T_TASK(cmd)->t_task_cdbs,
5951 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
5952 atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
5953 atomic_read(&T_TASK(cmd)->t_transport_active),
5954 atomic_read(&T_TASK(cmd)->t_transport_stop),
5955 atomic_read(&T_TASK(cmd)->t_transport_sent));
5956
5957 if (atomic_read(&task->task_active)) {
5958 atomic_set(&task->task_stop, 1);
5959 spin_unlock_irqrestore(
5960 &T_TASK(cmd)->t_state_lock, flags);
5961
5962 DEBUG_DO("Waiting for task: %p to shutdown for dev:"
5963 " %p\n", task, dev);
5964 wait_for_completion(&task->task_stop_comp);
5965 DEBUG_DO("Completed task: %p shutdown for dev: %p\n",
5966 task, dev);
5967
5968 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
5969 atomic_dec(&T_TASK(cmd)->t_task_cdbs_left);
5970
5971 atomic_set(&task->task_active, 0);
5972 atomic_set(&task->task_stop, 0);
5973 } else {
5974 if (atomic_read(&task->task_execute_queue) != 0)
5975 transport_remove_task_from_execute_queue(task, dev);
5976 }
5977 __transport_stop_task_timer(task, &flags);
5978
5979 if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_ex_left))) {
5980 spin_unlock_irqrestore(
5981 &T_TASK(cmd)->t_state_lock, flags);
5982
5983 DEBUG_DO("Skipping task: %p, dev: %p for"
5984 " t_task_cdbs_ex_left: %d\n", task, dev,
5985 atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left));
5986
5987 spin_lock_irqsave(&dev->execute_task_lock, flags);
5988 continue;
5989 }
5990
5991 if (atomic_read(&T_TASK(cmd)->t_transport_active)) {
5992 DEBUG_DO("got t_transport_active = 1 for task: %p, dev:"
5993 " %p\n", task, dev);
5994
5995 if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
5996 spin_unlock_irqrestore(
5997 &T_TASK(cmd)->t_state_lock, flags);
5998 transport_send_check_condition_and_sense(
5999 cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE,
6000 0);
6001 transport_remove_cmd_from_queue(cmd,
6002 SE_DEV(cmd)->dev_queue_obj);
6003
6004 transport_lun_remove_cmd(cmd);
6005 transport_cmd_check_stop(cmd, 1, 0);
6006 } else {
6007 spin_unlock_irqrestore(
6008 &T_TASK(cmd)->t_state_lock, flags);
6009
6010 transport_remove_cmd_from_queue(cmd,
6011 SE_DEV(cmd)->dev_queue_obj);
6012
6013 transport_lun_remove_cmd(cmd);
6014
6015 if (transport_cmd_check_stop(cmd, 1, 0))
6016 transport_generic_remove(cmd, 0, 0);
6017 }
6018
6019 spin_lock_irqsave(&dev->execute_task_lock, flags);
6020 continue;
6021 }
6022 DEBUG_DO("Got t_transport_active = 0 for task: %p, dev: %p\n",
6023 task, dev);
6024
6025 if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
6026 spin_unlock_irqrestore(
6027 &T_TASK(cmd)->t_state_lock, flags);
6028 transport_send_check_condition_and_sense(cmd,
6029 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
6030 transport_remove_cmd_from_queue(cmd,
6031 SE_DEV(cmd)->dev_queue_obj);
6032
6033 transport_lun_remove_cmd(cmd);
6034 transport_cmd_check_stop(cmd, 1, 0);
6035 } else {
6036 spin_unlock_irqrestore(
6037 &T_TASK(cmd)->t_state_lock, flags);
6038
6039 transport_remove_cmd_from_queue(cmd,
6040 SE_DEV(cmd)->dev_queue_obj);
6041 transport_lun_remove_cmd(cmd);
6042
6043 if (transport_cmd_check_stop(cmd, 1, 0))
6044 transport_generic_remove(cmd, 0, 0);
6045 }
6046
6047 spin_lock_irqsave(&dev->execute_task_lock, flags);
6048 }
6049 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
6050 /*
6051 * Empty the struct se_device's struct se_cmd list.
6052 */
6053 spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
6054 while ((qr = __transport_get_qr_from_queue(dev->dev_queue_obj))) {
6055 spin_unlock_irqrestore(
6056 &dev->dev_queue_obj->cmd_queue_lock, flags);
6057 cmd = (struct se_cmd *)qr->cmd;
6058 state = qr->state;
6059 kfree(qr);
6060
6061 DEBUG_DO("From Device Queue: cmd: %p t_state: %d\n",
6062 cmd, state);
6063
6064 if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
6065 transport_send_check_condition_and_sense(cmd,
6066 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
6067
6068 transport_lun_remove_cmd(cmd);
6069 transport_cmd_check_stop(cmd, 1, 0);
6070 } else {
6071 transport_lun_remove_cmd(cmd);
6072 if (transport_cmd_check_stop(cmd, 1, 0))
6073 transport_generic_remove(cmd, 0, 0);
6074 }
6075 spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
6076 }
6077 spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, flags);
6078}
6079
6080/* transport_processing_thread():
6081 *
6082 *
6083 */
6084static int transport_processing_thread(void *param)
6085{
6086 int ret, t_state;
6087 struct se_cmd *cmd;
6088 struct se_device *dev = (struct se_device *) param;
6089 struct se_queue_req *qr;
6090
6091 set_user_nice(current, -20);
6092
6093 while (!kthread_should_stop()) {
6094 ret = wait_event_interruptible(dev->dev_queue_obj->thread_wq,
6095 atomic_read(&dev->dev_queue_obj->queue_cnt) ||
6096 kthread_should_stop());
6097 if (ret < 0)
6098 goto out;
6099
6100 spin_lock_irq(&dev->dev_status_lock);
6101 if (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) {
6102 spin_unlock_irq(&dev->dev_status_lock);
6103 transport_processing_shutdown(dev);
6104 continue;
6105 }
6106 spin_unlock_irq(&dev->dev_status_lock);
6107
6108get_cmd:
6109 __transport_execute_tasks(dev);
6110
6111 qr = transport_get_qr_from_queue(dev->dev_queue_obj);
6112 if (!(qr))
6113 continue;
6114
6115 cmd = (struct se_cmd *)qr->cmd;
6116 t_state = qr->state;
6117 kfree(qr);
6118
6119 switch (t_state) {
6120 case TRANSPORT_NEW_CMD_MAP:
6121 if (!(CMD_TFO(cmd)->new_cmd_map)) {
6122 printk(KERN_ERR "CMD_TFO(cmd)->new_cmd_map is"
6123 " NULL for TRANSPORT_NEW_CMD_MAP\n");
6124 BUG();
6125 }
6126 ret = CMD_TFO(cmd)->new_cmd_map(cmd);
6127 if (ret < 0) {
6128 cmd->transport_error_status = ret;
6129 transport_generic_request_failure(cmd, NULL,
6130 0, (cmd->data_direction !=
6131 DMA_TO_DEVICE));
6132 break;
6133 }
6134 /* Fall through */
6135 case TRANSPORT_NEW_CMD:
6136 ret = transport_generic_new_cmd(cmd);
6137 if (ret < 0) {
6138 cmd->transport_error_status = ret;
6139 transport_generic_request_failure(cmd, NULL,
6140 0, (cmd->data_direction !=
6141 DMA_TO_DEVICE));
6142 }
6143 break;
6144 case TRANSPORT_PROCESS_WRITE:
6145 transport_generic_process_write(cmd);
6146 break;
6147 case TRANSPORT_COMPLETE_OK:
6148 transport_stop_all_task_timers(cmd);
6149 transport_generic_complete_ok(cmd);
6150 break;
6151 case TRANSPORT_REMOVE:
6152 transport_generic_remove(cmd, 1, 0);
6153 break;
6154 case TRANSPORT_FREE_CMD_INTR:
6155 transport_generic_free_cmd(cmd, 0, 1, 0);
6156 break;
6157 case TRANSPORT_PROCESS_TMR:
6158 transport_generic_do_tmr(cmd);
6159 break;
6160 case TRANSPORT_COMPLETE_FAILURE:
6161 transport_generic_request_failure(cmd, NULL, 1, 1);
6162 break;
6163 case TRANSPORT_COMPLETE_TIMEOUT:
6164 transport_stop_all_task_timers(cmd);
6165 transport_generic_request_timeout(cmd);
6166 break;
6167 default:
6168 printk(KERN_ERR "Unknown t_state: %d deferred_t_state:"
6169 " %d for ITT: 0x%08x i_state: %d on SE LUN:"
6170 " %u\n", t_state, cmd->deferred_t_state,
6171 CMD_TFO(cmd)->get_task_tag(cmd),
6172 CMD_TFO(cmd)->get_cmd_state(cmd),
6173 SE_LUN(cmd)->unpacked_lun);
6174 BUG();
6175 }
6176
6177 goto get_cmd;
6178 }
6179
6180out:
6181 transport_release_all_cmds(dev);
6182 dev->process_thread = NULL;
6183 return 0;
6184}
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
new file mode 100644
index 000000000000..df355176a377
--- /dev/null
+++ b/drivers/target/target_core_ua.c
@@ -0,0 +1,332 @@
1/*******************************************************************************
2 * Filename: target_core_ua.c
3 *
4 * This file contains logic for SPC-3 Unit Attention emulation
5 *
6 * Copyright (c) 2009,2010 Rising Tide Systems
7 * Copyright (c) 2009,2010 Linux-iSCSI.org
8 *
9 * Nicholas A. Bellinger <nab@kernel.org>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 *
25 ******************************************************************************/
26
27#include <linux/version.h>
28#include <linux/slab.h>
29#include <linux/spinlock.h>
30#include <scsi/scsi.h>
31#include <scsi/scsi_cmnd.h>
32
33#include <target/target_core_base.h>
34#include <target/target_core_device.h>
35#include <target/target_core_transport.h>
36#include <target/target_core_fabric_ops.h>
37#include <target/target_core_configfs.h>
38
39#include "target_core_alua.h"
40#include "target_core_hba.h"
41#include "target_core_pr.h"
42#include "target_core_ua.h"
43
44int core_scsi3_ua_check(
45 struct se_cmd *cmd,
46 unsigned char *cdb)
47{
48 struct se_dev_entry *deve;
49 struct se_session *sess = cmd->se_sess;
50 struct se_node_acl *nacl;
51
52 if (!(sess))
53 return 0;
54
55 nacl = sess->se_node_acl;
56 if (!(nacl))
57 return 0;
58
59 deve = &nacl->device_list[cmd->orig_fe_lun];
60 if (!(atomic_read(&deve->ua_count)))
61 return 0;
62 /*
63 * From sam4r14, section 5.14 Unit attention condition:
64 *
65 * a) if an INQUIRY command enters the enabled command state, the
66 * device server shall process the INQUIRY command and shall neither
67 * report nor clear any unit attention condition;
68 * b) if a REPORT LUNS command enters the enabled command state, the
69 * device server shall process the REPORT LUNS command and shall not
70 * report any unit attention condition;
71 * e) if a REQUEST SENSE command enters the enabled command state while
72 * a unit attention condition exists for the SCSI initiator port
73 * associated with the I_T nexus on which the REQUEST SENSE command
74 * was received, then the device server shall process the command
75 * and either:
76 */
77 switch (cdb[0]) {
78 case INQUIRY:
79 case REPORT_LUNS:
80 case REQUEST_SENSE:
81 return 0;
82 default:
83 return -1;
84 }
85
86 return -1;
87}
88
89int core_scsi3_ua_allocate(
90 struct se_node_acl *nacl,
91 u32 unpacked_lun,
92 u8 asc,
93 u8 ascq)
94{
95 struct se_dev_entry *deve;
96 struct se_ua *ua, *ua_p, *ua_tmp;
97 /*
98 * PASSTHROUGH OPS
99 */
100 if (!(nacl))
101 return -1;
102
103 ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC);
104 if (!(ua)) {
105 printk(KERN_ERR "Unable to allocate struct se_ua\n");
106 return -1;
107 }
108 INIT_LIST_HEAD(&ua->ua_dev_list);
109 INIT_LIST_HEAD(&ua->ua_nacl_list);
110
111 ua->ua_nacl = nacl;
112 ua->ua_asc = asc;
113 ua->ua_ascq = ascq;
114
115 spin_lock_irq(&nacl->device_list_lock);
116 deve = &nacl->device_list[unpacked_lun];
117
118 spin_lock(&deve->ua_lock);
119 list_for_each_entry_safe(ua_p, ua_tmp, &deve->ua_list, ua_nacl_list) {
120 /*
121 * Do not report the same UNIT ATTENTION twice..
122 */
123 if ((ua_p->ua_asc == asc) && (ua_p->ua_ascq == ascq)) {
124 spin_unlock(&deve->ua_lock);
125 spin_unlock_irq(&nacl->device_list_lock);
126 kmem_cache_free(se_ua_cache, ua);
127 return 0;
128 }
129 /*
130 * Attach the highest priority Unit Attention to
131 * the head of the list following sam4r14,
132 * Section 5.14 Unit Attention Condition:
133 *
134 * POWER ON, RESET, OR BUS DEVICE RESET OCCURRED highest
135 * POWER ON OCCURRED or
136 * DEVICE INTERNAL RESET
137 * SCSI BUS RESET OCCURRED or
138 * MICROCODE HAS BEEN CHANGED or
139 * protocol specific
140 * BUS DEVICE RESET FUNCTION OCCURRED
141 * I_T NEXUS LOSS OCCURRED
142 * COMMANDS CLEARED BY POWER LOSS NOTIFICATION
143 * all others Lowest
144 *
145 * Each of the ASCQ codes listed above are defined in
146 * the 29h ASC family, see spc4r17 Table D.1
147 */
148 if (ua_p->ua_asc == 0x29) {
149 if ((asc == 0x29) && (ascq > ua_p->ua_ascq))
150 list_add(&ua->ua_nacl_list,
151 &deve->ua_list);
152 else
153 list_add_tail(&ua->ua_nacl_list,
154 &deve->ua_list);
155 } else if (ua_p->ua_asc == 0x2a) {
156 /*
157 * Incoming Family 29h ASCQ codes will override
158 * Family 2AHh ASCQ codes for Unit Attention condition.
159 */
160 if ((asc == 0x29) || (ascq > ua_p->ua_asc))
161 list_add(&ua->ua_nacl_list,
162 &deve->ua_list);
163 else
164 list_add_tail(&ua->ua_nacl_list,
165 &deve->ua_list);
166 } else
167 list_add_tail(&ua->ua_nacl_list,
168 &deve->ua_list);
169 spin_unlock(&deve->ua_lock);
170 spin_unlock_irq(&nacl->device_list_lock);
171
172 atomic_inc(&deve->ua_count);
173 smp_mb__after_atomic_inc();
174 return 0;
175 }
176 list_add_tail(&ua->ua_nacl_list, &deve->ua_list);
177 spin_unlock(&deve->ua_lock);
178 spin_unlock_irq(&nacl->device_list_lock);
179
180 printk(KERN_INFO "[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:"
181 " 0x%02x, ASCQ: 0x%02x\n",
182 TPG_TFO(nacl->se_tpg)->get_fabric_name(), unpacked_lun,
183 asc, ascq);
184
185 atomic_inc(&deve->ua_count);
186 smp_mb__after_atomic_inc();
187 return 0;
188}
189
190void core_scsi3_ua_release_all(
191 struct se_dev_entry *deve)
192{
193 struct se_ua *ua, *ua_p;
194
195 spin_lock(&deve->ua_lock);
196 list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
197 list_del(&ua->ua_nacl_list);
198 kmem_cache_free(se_ua_cache, ua);
199
200 atomic_dec(&deve->ua_count);
201 smp_mb__after_atomic_dec();
202 }
203 spin_unlock(&deve->ua_lock);
204}
205
206void core_scsi3_ua_for_check_condition(
207 struct se_cmd *cmd,
208 u8 *asc,
209 u8 *ascq)
210{
211 struct se_device *dev = SE_DEV(cmd);
212 struct se_dev_entry *deve;
213 struct se_session *sess = cmd->se_sess;
214 struct se_node_acl *nacl;
215 struct se_ua *ua = NULL, *ua_p;
216 int head = 1;
217
218 if (!(sess))
219 return;
220
221 nacl = sess->se_node_acl;
222 if (!(nacl))
223 return;
224
225 spin_lock_irq(&nacl->device_list_lock);
226 deve = &nacl->device_list[cmd->orig_fe_lun];
227 if (!(atomic_read(&deve->ua_count))) {
228 spin_unlock_irq(&nacl->device_list_lock);
229 return;
230 }
231 /*
232 * The highest priority Unit Attentions are placed at the head of the
233 * struct se_dev_entry->ua_list, and will be returned in CHECK_CONDITION +
234 * sense data for the received CDB.
235 */
236 spin_lock(&deve->ua_lock);
237 list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
238 /*
239 * For ua_intlck_ctrl code not equal to 00b, only report the
240 * highest priority UNIT_ATTENTION and ASC/ASCQ without
241 * clearing it.
242 */
243 if (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl != 0) {
244 *asc = ua->ua_asc;
245 *ascq = ua->ua_ascq;
246 break;
247 }
248 /*
249 * Otherwise for the default 00b, release the UNIT ATTENTION
250 * condition. Return the ASC/ASCQ of the highest priority UA
251 * (head of the list) in the outgoing CHECK_CONDITION + sense.
252 */
253 if (head) {
254 *asc = ua->ua_asc;
255 *ascq = ua->ua_ascq;
256 head = 0;
257 }
258 list_del(&ua->ua_nacl_list);
259 kmem_cache_free(se_ua_cache, ua);
260
261 atomic_dec(&deve->ua_count);
262 smp_mb__after_atomic_dec();
263 }
264 spin_unlock(&deve->ua_lock);
265 spin_unlock_irq(&nacl->device_list_lock);
266
267 printk(KERN_INFO "[%s]: %s UNIT ATTENTION condition with"
268 " INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x"
269 " reported ASC: 0x%02x, ASCQ: 0x%02x\n",
270 TPG_TFO(nacl->se_tpg)->get_fabric_name(),
271 (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl != 0) ? "Reporting" :
272 "Releasing", DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl,
273 cmd->orig_fe_lun, T_TASK(cmd)->t_task_cdb[0], *asc, *ascq);
274}
275
276int core_scsi3_ua_clear_for_request_sense(
277 struct se_cmd *cmd,
278 u8 *asc,
279 u8 *ascq)
280{
281 struct se_dev_entry *deve;
282 struct se_session *sess = cmd->se_sess;
283 struct se_node_acl *nacl;
284 struct se_ua *ua = NULL, *ua_p;
285 int head = 1;
286
287 if (!(sess))
288 return -1;
289
290 nacl = sess->se_node_acl;
291 if (!(nacl))
292 return -1;
293
294 spin_lock_irq(&nacl->device_list_lock);
295 deve = &nacl->device_list[cmd->orig_fe_lun];
296 if (!(atomic_read(&deve->ua_count))) {
297 spin_unlock_irq(&nacl->device_list_lock);
298 return -1;
299 }
300 /*
301 * The highest priority Unit Attentions are placed at the head of the
302 * struct se_dev_entry->ua_list. The First (and hence highest priority)
303 * ASC/ASCQ will be returned in REQUEST_SENSE payload data for the
304 * matching struct se_lun.
305 *
306 * Once the returning ASC/ASCQ values are set, we go ahead and
307 * release all of the Unit Attention conditions for the associated
308 * struct se_lun.
309 */
310 spin_lock(&deve->ua_lock);
311 list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
312 if (head) {
313 *asc = ua->ua_asc;
314 *ascq = ua->ua_ascq;
315 head = 0;
316 }
317 list_del(&ua->ua_nacl_list);
318 kmem_cache_free(se_ua_cache, ua);
319
320 atomic_dec(&deve->ua_count);
321 smp_mb__after_atomic_dec();
322 }
323 spin_unlock(&deve->ua_lock);
324 spin_unlock_irq(&nacl->device_list_lock);
325
326 printk(KERN_INFO "[%s]: Released UNIT ATTENTION condition, mapped"
327 " LUN: %u, got REQUEST_SENSE reported ASC: 0x%02x,"
328 " ASCQ: 0x%02x\n", TPG_TFO(nacl->se_tpg)->get_fabric_name(),
329 cmd->orig_fe_lun, *asc, *ascq);
330
331 return (head) ? -1 : 0;
332}
diff --git a/drivers/target/target_core_ua.h b/drivers/target/target_core_ua.h
new file mode 100644
index 000000000000..6e6b03460a1a
--- /dev/null
+++ b/drivers/target/target_core_ua.h
@@ -0,0 +1,36 @@
1#ifndef TARGET_CORE_UA_H
2
3/*
4 * From spc4r17, Table D.1: ASC and ASCQ Assignement
5 */
6#define ASCQ_29H_POWER_ON_RESET_OR_BUS_DEVICE_RESET_OCCURED 0x00
7#define ASCQ_29H_POWER_ON_OCCURRED 0x01
8#define ASCQ_29H_SCSI_BUS_RESET_OCCURED 0x02
9#define ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED 0x03
10#define ASCQ_29H_DEVICE_INTERNAL_RESET 0x04
11#define ASCQ_29H_TRANSCEIVER_MODE_CHANGED_TO_SINGLE_ENDED 0x05
12#define ASCQ_29H_TRANSCEIVER_MODE_CHANGED_TO_LVD 0x06
13#define ASCQ_29H_NEXUS_LOSS_OCCURRED 0x07
14
15#define ASCQ_2AH_PARAMETERS_CHANGED 0x00
16#define ASCQ_2AH_MODE_PARAMETERS_CHANGED 0x01
17#define ASCQ_2AH_LOG_PARAMETERS_CHANGED 0x02
18#define ASCQ_2AH_RESERVATIONS_PREEMPTED 0x03
19#define ASCQ_2AH_RESERVATIONS_RELEASED 0x04
20#define ASCQ_2AH_REGISTRATIONS_PREEMPTED 0x05
21#define ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED 0x06
22#define ASCQ_2AH_IMPLICT_ASYMMETRIC_ACCESS_STATE_TRANSITION_FAILED 0x07
23#define ASCQ_2AH_PRIORITY_CHANGED 0x08
24
25#define ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS 0x09
26
27extern struct kmem_cache *se_ua_cache;
28
29extern int core_scsi3_ua_check(struct se_cmd *, unsigned char *);
30extern int core_scsi3_ua_allocate(struct se_node_acl *, u32, u8, u8);
31extern void core_scsi3_ua_release_all(struct se_dev_entry *);
32extern void core_scsi3_ua_for_check_condition(struct se_cmd *, u8 *, u8 *);
33extern int core_scsi3_ua_clear_for_request_sense(struct se_cmd *,
34 u8 *, u8 *);
35
36#endif /* TARGET_CORE_UA_H */
diff --git a/drivers/target/tcm_fc/Kconfig b/drivers/target/tcm_fc/Kconfig
new file mode 100644
index 000000000000..40caf458e89e
--- /dev/null
+++ b/drivers/target/tcm_fc/Kconfig
@@ -0,0 +1,5 @@
1config TCM_FC
2 tristate "TCM_FC fabric Plugin"
3 depends on LIBFC
4 help
5 Say Y here to enable the TCM FC plugin for accessing FC fabrics in TCM
diff --git a/drivers/target/tcm_fc/Makefile b/drivers/target/tcm_fc/Makefile
new file mode 100644
index 000000000000..7a5c2b64cf65
--- /dev/null
+++ b/drivers/target/tcm_fc/Makefile
@@ -0,0 +1,15 @@
1EXTRA_CFLAGS += -I$(srctree)/drivers/target/ \
2 -I$(srctree)/drivers/scsi/ \
3 -I$(srctree)/include/scsi/ \
4 -I$(srctree)/drivers/target/tcm_fc/
5
6tcm_fc-y += tfc_cmd.o \
7 tfc_conf.o \
8 tfc_io.o \
9 tfc_sess.o
10
11obj-$(CONFIG_TCM_FC) += tcm_fc.o
12
13ifdef CONFIGFS_TCM_FC_DEBUG
14EXTRA_CFLAGS += -DTCM_FC_DEBUG
15endif
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
new file mode 100644
index 000000000000..7b82f1b7fef8
--- /dev/null
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -0,0 +1,215 @@
1/*
2 * Copyright (c) 2010 Cisco Systems, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 */
17#ifndef __TCM_FC_H__
18#define __TCM_FC_H__
19
20#define FT_VERSION "0.3"
21
22#define FT_NAMELEN 32 /* length of ASCII WWPNs including pad */
23#define FT_TPG_NAMELEN 32 /* max length of TPG name */
24#define FT_LUN_NAMELEN 32 /* max length of LUN name */
25
26/*
27 * Debug options.
28 */
29#define FT_DEBUG_CONF 0x01 /* configuration messages */
30#define FT_DEBUG_SESS 0x02 /* session messages */
31#define FT_DEBUG_TM 0x04 /* TM operations */
32#define FT_DEBUG_IO 0x08 /* I/O commands */
33#define FT_DEBUG_DATA 0x10 /* Data transfer */
34
35extern unsigned int ft_debug_logging; /* debug options */
36
37#define FT_DEBUG(mask, fmt, args...) \
38 do { \
39 if (ft_debug_logging & (mask)) \
40 printk(KERN_INFO "tcm_fc: %s: " fmt, \
41 __func__, ##args); \
42 } while (0)
43
44#define FT_CONF_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_CONF, fmt, ##args)
45#define FT_SESS_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_SESS, fmt, ##args)
46#define FT_TM_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_TM, fmt, ##args)
47#define FT_IO_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_IO, fmt, ##args)
48#define FT_DATA_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_DATA, fmt, ##args)
49
50struct ft_transport_id {
51 __u8 format;
52 __u8 __resvd1[7];
53 __u8 wwpn[8];
54 __u8 __resvd2[8];
55} __attribute__((__packed__));
56
57/*
58 * Session (remote port).
59 */
60struct ft_sess {
61 u32 port_id; /* for hash lookup use only */
62 u32 params;
63 u16 max_frame; /* maximum frame size */
64 u64 port_name; /* port name for transport ID */
65 struct ft_tport *tport;
66 struct se_session *se_sess;
67 struct hlist_node hash; /* linkage in ft_sess_hash table */
68 struct rcu_head rcu;
69 struct kref kref; /* ref for hash and outstanding I/Os */
70};
71
72/*
73 * Hash table of sessions per local port.
74 * Hash lookup by remote port FC_ID.
75 */
76#define FT_SESS_HASH_BITS 6
77#define FT_SESS_HASH_SIZE (1 << FT_SESS_HASH_BITS)
78
79/*
80 * Per local port data.
81 * This is created only after a TPG exists that allows target function
82 * for the local port. If the TPG exists, this is allocated when
83 * we're notified that the local port has been created, or when
84 * the first PRLI provider callback is received.
85 */
86struct ft_tport {
87 struct fc_lport *lport;
88 struct ft_tpg *tpg; /* NULL if TPG deleted before tport */
89 u32 sess_count; /* number of sessions in hash */
90 struct rcu_head rcu;
91 struct hlist_head hash[FT_SESS_HASH_SIZE]; /* list of sessions */
92};
93
94/*
95 * Node ID and authentication.
96 */
97struct ft_node_auth {
98 u64 port_name;
99 u64 node_name;
100};
101
102/*
103 * Node ACL for FC remote port session.
104 */
105struct ft_node_acl {
106 struct ft_node_auth node_auth;
107 struct se_node_acl se_node_acl;
108};
109
110struct ft_lun {
111 u32 index;
112 char name[FT_LUN_NAMELEN];
113};
114
115/*
116 * Target portal group (local port).
117 */
118struct ft_tpg {
119 u32 index;
120 struct ft_lport_acl *lport_acl;
121 struct ft_tport *tport; /* active tport or NULL */
122 struct list_head list; /* linkage in ft_lport_acl tpg_list */
123 struct list_head lun_list; /* head of LUNs */
124 struct se_portal_group se_tpg;
125 struct task_struct *thread; /* processing thread */
126 struct se_queue_obj qobj; /* queue for processing thread */
127};
128
129struct ft_lport_acl {
130 u64 wwpn;
131 char name[FT_NAMELEN];
132 struct list_head list;
133 struct list_head tpg_list;
134 struct se_wwn fc_lport_wwn;
135};
136
137enum ft_cmd_state {
138 FC_CMD_ST_NEW = 0,
139 FC_CMD_ST_REJ
140};
141
142/*
143 * Commands
144 */
145struct ft_cmd {
146 enum ft_cmd_state state;
147 u32 lun; /* LUN from request */
148 struct ft_sess *sess; /* session held for cmd */
149 struct fc_seq *seq; /* sequence in exchange mgr */
150 struct se_cmd se_cmd; /* Local TCM I/O descriptor */
151 struct fc_frame *req_frame;
152 unsigned char *cdb; /* pointer to CDB inside frame */
153 u32 write_data_len; /* data received on writes */
154 struct se_queue_req se_req;
155 /* Local sense buffer */
156 unsigned char ft_sense_buffer[TRANSPORT_SENSE_BUFFER];
157 u32 was_ddp_setup:1; /* Set only if ddp is setup */
158 struct scatterlist *sg; /* Set only if DDP is setup */
159 u32 sg_cnt; /* No. of item in scatterlist */
160};
161
162extern struct list_head ft_lport_list;
163extern struct mutex ft_lport_lock;
164extern struct fc4_prov ft_prov;
165extern struct target_fabric_configfs *ft_configfs;
166
167/*
168 * Fabric methods.
169 */
170
171/*
172 * Session ops.
173 */
174void ft_sess_put(struct ft_sess *);
175int ft_sess_shutdown(struct se_session *);
176void ft_sess_close(struct se_session *);
177void ft_sess_stop(struct se_session *, int, int);
178int ft_sess_logged_in(struct se_session *);
179u32 ft_sess_get_index(struct se_session *);
180u32 ft_sess_get_port_name(struct se_session *, unsigned char *, u32);
181void ft_sess_set_erl0(struct se_session *);
182
183void ft_lport_add(struct fc_lport *, void *);
184void ft_lport_del(struct fc_lport *, void *);
185int ft_lport_notify(struct notifier_block *, unsigned long, void *);
186
187/*
188 * IO methods.
189 */
190void ft_check_stop_free(struct se_cmd *);
191void ft_release_cmd(struct se_cmd *);
192int ft_queue_status(struct se_cmd *);
193int ft_queue_data_in(struct se_cmd *);
194int ft_write_pending(struct se_cmd *);
195int ft_write_pending_status(struct se_cmd *);
196u32 ft_get_task_tag(struct se_cmd *);
197int ft_get_cmd_state(struct se_cmd *);
198void ft_new_cmd_failure(struct se_cmd *);
199int ft_queue_tm_resp(struct se_cmd *);
200int ft_is_state_remove(struct se_cmd *);
201
202/*
203 * other internal functions.
204 */
205int ft_thread(void *);
206void ft_recv_req(struct ft_sess *, struct fc_frame *);
207struct ft_tpg *ft_lport_find_tpg(struct fc_lport *);
208struct ft_node_acl *ft_acl_get(struct ft_tpg *, struct fc_rport_priv *);
209
210void ft_recv_write_data(struct ft_cmd *, struct fc_frame *);
211void ft_dump_cmd(struct ft_cmd *, const char *caller);
212
213ssize_t ft_format_wwn(char *, size_t, u64);
214
215#endif /* __TCM_FC_H__ */
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
new file mode 100644
index 000000000000..b2a106729d49
--- /dev/null
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -0,0 +1,716 @@
1/*
2 * Copyright (c) 2010 Cisco Systems, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 */
17
18/* XXX TBD some includes may be extraneous */
19
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/version.h>
23#include <generated/utsrelease.h>
24#include <linux/utsname.h>
25#include <linux/init.h>
26#include <linux/slab.h>
27#include <linux/kthread.h>
28#include <linux/types.h>
29#include <linux/string.h>
30#include <linux/configfs.h>
31#include <linux/ctype.h>
32#include <linux/hash.h>
33#include <asm/unaligned.h>
34#include <scsi/scsi.h>
35#include <scsi/scsi_host.h>
36#include <scsi/scsi_device.h>
37#include <scsi/scsi_cmnd.h>
38#include <scsi/scsi_tcq.h>
39#include <scsi/libfc.h>
40#include <scsi/fc_encode.h>
41
42#include <target/target_core_base.h>
43#include <target/target_core_transport.h>
44#include <target/target_core_fabric_ops.h>
45#include <target/target_core_device.h>
46#include <target/target_core_tpg.h>
47#include <target/target_core_configfs.h>
48#include <target/target_core_base.h>
49#include <target/target_core_tmr.h>
50#include <target/configfs_macros.h>
51
52#include "tcm_fc.h"
53
54/*
55 * Dump cmd state for debugging.
56 */
57void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
58{
59 struct fc_exch *ep;
60 struct fc_seq *sp;
61 struct se_cmd *se_cmd;
62 struct se_mem *mem;
63 struct se_transport_task *task;
64
65 if (!(ft_debug_logging & FT_DEBUG_IO))
66 return;
67
68 se_cmd = &cmd->se_cmd;
69 printk(KERN_INFO "%s: cmd %p state %d sess %p seq %p se_cmd %p\n",
70 caller, cmd, cmd->state, cmd->sess, cmd->seq, se_cmd);
71 printk(KERN_INFO "%s: cmd %p cdb %p\n",
72 caller, cmd, cmd->cdb);
73 printk(KERN_INFO "%s: cmd %p lun %d\n", caller, cmd, cmd->lun);
74
75 task = T_TASK(se_cmd);
76 printk(KERN_INFO "%s: cmd %p task %p se_num %u buf %p len %u se_cmd_flags <0x%x>\n",
77 caller, cmd, task, task->t_tasks_se_num,
78 task->t_task_buf, se_cmd->data_length, se_cmd->se_cmd_flags);
79 if (task->t_mem_list)
80 list_for_each_entry(mem, task->t_mem_list, se_list)
81 printk(KERN_INFO "%s: cmd %p mem %p page %p "
82 "len 0x%x off 0x%x\n",
83 caller, cmd, mem,
84 mem->se_page, mem->se_len, mem->se_off);
85 sp = cmd->seq;
86 if (sp) {
87 ep = fc_seq_exch(sp);
88 printk(KERN_INFO "%s: cmd %p sid %x did %x "
89 "ox_id %x rx_id %x seq_id %x e_stat %x\n",
90 caller, cmd, ep->sid, ep->did, ep->oxid, ep->rxid,
91 sp->id, ep->esb_stat);
92 }
93 print_hex_dump(KERN_INFO, "ft_dump_cmd ", DUMP_PREFIX_NONE,
94 16, 4, cmd->cdb, MAX_COMMAND_SIZE, 0);
95}
96
97static void ft_queue_cmd(struct ft_sess *sess, struct ft_cmd *cmd)
98{
99 struct se_queue_obj *qobj;
100 unsigned long flags;
101
102 qobj = &sess->tport->tpg->qobj;
103 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
104 list_add_tail(&cmd->se_req.qr_list, &qobj->qobj_list);
105 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
106 atomic_inc(&qobj->queue_cnt);
107 wake_up_interruptible(&qobj->thread_wq);
108}
109
110static struct ft_cmd *ft_dequeue_cmd(struct se_queue_obj *qobj)
111{
112 unsigned long flags;
113 struct se_queue_req *qr;
114
115 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
116 if (list_empty(&qobj->qobj_list)) {
117 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
118 return NULL;
119 }
120 qr = list_first_entry(&qobj->qobj_list, struct se_queue_req, qr_list);
121 list_del(&qr->qr_list);
122 atomic_dec(&qobj->queue_cnt);
123 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
124 return container_of(qr, struct ft_cmd, se_req);
125}
126
127static void ft_free_cmd(struct ft_cmd *cmd)
128{
129 struct fc_frame *fp;
130 struct fc_lport *lport;
131
132 if (!cmd)
133 return;
134 fp = cmd->req_frame;
135 lport = fr_dev(fp);
136 if (fr_seq(fp))
137 lport->tt.seq_release(fr_seq(fp));
138 fc_frame_free(fp);
139 ft_sess_put(cmd->sess); /* undo get from lookup at recv */
140 kfree(cmd);
141}
142
143void ft_release_cmd(struct se_cmd *se_cmd)
144{
145 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
146
147 ft_free_cmd(cmd);
148}
149
150void ft_check_stop_free(struct se_cmd *se_cmd)
151{
152 transport_generic_free_cmd(se_cmd, 0, 1, 0);
153}
154
155/*
156 * Send response.
157 */
158int ft_queue_status(struct se_cmd *se_cmd)
159{
160 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
161 struct fc_frame *fp;
162 struct fcp_resp_with_ext *fcp;
163 struct fc_lport *lport;
164 struct fc_exch *ep;
165 size_t len;
166
167 ft_dump_cmd(cmd, __func__);
168 ep = fc_seq_exch(cmd->seq);
169 lport = ep->lp;
170 len = sizeof(*fcp) + se_cmd->scsi_sense_length;
171 fp = fc_frame_alloc(lport, len);
172 if (!fp) {
173 /* XXX shouldn't just drop it - requeue and retry? */
174 return 0;
175 }
176 fcp = fc_frame_payload_get(fp, len);
177 memset(fcp, 0, len);
178 fcp->resp.fr_status = se_cmd->scsi_status;
179
180 len = se_cmd->scsi_sense_length;
181 if (len) {
182 fcp->resp.fr_flags |= FCP_SNS_LEN_VAL;
183 fcp->ext.fr_sns_len = htonl(len);
184 memcpy((fcp + 1), se_cmd->sense_buffer, len);
185 }
186
187 /*
188 * Test underflow and overflow with one mask. Usually both are off.
189 * Bidirectional commands are not handled yet.
190 */
191 if (se_cmd->se_cmd_flags & (SCF_OVERFLOW_BIT | SCF_UNDERFLOW_BIT)) {
192 if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT)
193 fcp->resp.fr_flags |= FCP_RESID_OVER;
194 else
195 fcp->resp.fr_flags |= FCP_RESID_UNDER;
196 fcp->ext.fr_resid = cpu_to_be32(se_cmd->residual_count);
197 }
198
199 /*
200 * Send response.
201 */
202 cmd->seq = lport->tt.seq_start_next(cmd->seq);
203 fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP,
204 FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0);
205
206 lport->tt.seq_send(lport, cmd->seq, fp);
207 lport->tt.exch_done(cmd->seq);
208 return 0;
209}
210
211int ft_write_pending_status(struct se_cmd *se_cmd)
212{
213 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
214
215 return cmd->write_data_len != se_cmd->data_length;
216}
217
218/*
219 * Send TX_RDY (transfer ready).
220 */
221int ft_write_pending(struct se_cmd *se_cmd)
222{
223 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
224 struct fc_frame *fp;
225 struct fcp_txrdy *txrdy;
226 struct fc_lport *lport;
227 struct fc_exch *ep;
228 struct fc_frame_header *fh;
229 u32 f_ctl;
230
231 ft_dump_cmd(cmd, __func__);
232
233 ep = fc_seq_exch(cmd->seq);
234 lport = ep->lp;
235 fp = fc_frame_alloc(lport, sizeof(*txrdy));
236 if (!fp)
237 return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
238
239 txrdy = fc_frame_payload_get(fp, sizeof(*txrdy));
240 memset(txrdy, 0, sizeof(*txrdy));
241 txrdy->ft_burst_len = htonl(se_cmd->data_length);
242
243 cmd->seq = lport->tt.seq_start_next(cmd->seq);
244 fc_fill_fc_hdr(fp, FC_RCTL_DD_DATA_DESC, ep->did, ep->sid, FC_TYPE_FCP,
245 FC_FC_EX_CTX | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
246
247 fh = fc_frame_header_get(fp);
248 f_ctl = ntoh24(fh->fh_f_ctl);
249
250 /* Only if it is 'Exchange Responder' */
251 if (f_ctl & FC_FC_EX_CTX) {
252 /* Target is 'exchange responder' and sending XFER_READY
253 * to 'exchange initiator (initiator)'
254 */
255 if ((ep->xid <= lport->lro_xid) &&
256 (fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) {
257 if (se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
258 /*
259 * Map se_mem list to scatterlist, so that
260 * DDP can be setup. DDP setup function require
261 * scatterlist. se_mem_list is internal to
262 * TCM/LIO target
263 */
264 transport_do_task_sg_chain(se_cmd);
265 cmd->sg = T_TASK(se_cmd)->t_tasks_sg_chained;
266 cmd->sg_cnt =
267 T_TASK(se_cmd)->t_tasks_sg_chained_no;
268 }
269 if (cmd->sg && lport->tt.ddp_setup(lport, ep->xid,
270 cmd->sg, cmd->sg_cnt))
271 cmd->was_ddp_setup = 1;
272 }
273 }
274 lport->tt.seq_send(lport, cmd->seq, fp);
275 return 0;
276}
277
278u32 ft_get_task_tag(struct se_cmd *se_cmd)
279{
280 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
281
282 return fc_seq_exch(cmd->seq)->rxid;
283}
284
285int ft_get_cmd_state(struct se_cmd *se_cmd)
286{
287 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
288
289 return cmd->state;
290}
291
292int ft_is_state_remove(struct se_cmd *se_cmd)
293{
294 return 0; /* XXX TBD */
295}
296
297void ft_new_cmd_failure(struct se_cmd *se_cmd)
298{
299 /* XXX TBD */
300 printk(KERN_INFO "%s: se_cmd %p\n", __func__, se_cmd);
301}
302
303/*
304 * FC sequence response handler for follow-on sequences (data) and aborts.
305 */
306static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg)
307{
308 struct ft_cmd *cmd = arg;
309 struct fc_frame_header *fh;
310
311 if (IS_ERR(fp)) {
312 /* XXX need to find cmd if queued */
313 cmd->se_cmd.t_state = TRANSPORT_REMOVE;
314 cmd->seq = NULL;
315 transport_generic_free_cmd(&cmd->se_cmd, 0, 1, 0);
316 return;
317 }
318
319 fh = fc_frame_header_get(fp);
320
321 switch (fh->fh_r_ctl) {
322 case FC_RCTL_DD_SOL_DATA: /* write data */
323 ft_recv_write_data(cmd, fp);
324 break;
325 case FC_RCTL_DD_UNSOL_CTL: /* command */
326 case FC_RCTL_DD_SOL_CTL: /* transfer ready */
327 case FC_RCTL_DD_DATA_DESC: /* transfer ready */
328 default:
329 printk(KERN_INFO "%s: unhandled frame r_ctl %x\n",
330 __func__, fh->fh_r_ctl);
331 fc_frame_free(fp);
332 transport_generic_free_cmd(&cmd->se_cmd, 0, 1, 0);
333 break;
334 }
335}
336
337/*
338 * Send a FCP response including SCSI status and optional FCP rsp_code.
339 * status is SAM_STAT_GOOD (zero) iff code is valid.
340 * This is used in error cases, such as allocation failures.
341 */
342static void ft_send_resp_status(struct fc_lport *lport,
343 const struct fc_frame *rx_fp,
344 u32 status, enum fcp_resp_rsp_codes code)
345{
346 struct fc_frame *fp;
347 struct fc_seq *sp;
348 const struct fc_frame_header *fh;
349 size_t len;
350 struct fcp_resp_with_ext *fcp;
351 struct fcp_resp_rsp_info *info;
352
353 fh = fc_frame_header_get(rx_fp);
354 FT_IO_DBG("FCP error response: did %x oxid %x status %x code %x\n",
355 ntoh24(fh->fh_s_id), ntohs(fh->fh_ox_id), status, code);
356 len = sizeof(*fcp);
357 if (status == SAM_STAT_GOOD)
358 len += sizeof(*info);
359 fp = fc_frame_alloc(lport, len);
360 if (!fp)
361 return;
362 fcp = fc_frame_payload_get(fp, len);
363 memset(fcp, 0, len);
364 fcp->resp.fr_status = status;
365 if (status == SAM_STAT_GOOD) {
366 fcp->ext.fr_rsp_len = htonl(sizeof(*info));
367 fcp->resp.fr_flags |= FCP_RSP_LEN_VAL;
368 info = (struct fcp_resp_rsp_info *)(fcp + 1);
369 info->rsp_code = code;
370 }
371
372 fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_DD_CMD_STATUS, 0);
373 sp = fr_seq(fp);
374 if (sp)
375 lport->tt.seq_send(lport, sp, fp);
376 else
377 lport->tt.frame_send(lport, fp);
378}
379
380/*
381 * Send error or task management response.
382 * Always frees the cmd and associated state.
383 */
384static void ft_send_resp_code(struct ft_cmd *cmd, enum fcp_resp_rsp_codes code)
385{
386 ft_send_resp_status(cmd->sess->tport->lport,
387 cmd->req_frame, SAM_STAT_GOOD, code);
388 ft_free_cmd(cmd);
389}
390
391/*
392 * Handle Task Management Request.
393 */
394static void ft_send_tm(struct ft_cmd *cmd)
395{
396 struct se_tmr_req *tmr;
397 struct fcp_cmnd *fcp;
398 struct ft_sess *sess;
399 u8 tm_func;
400
401 fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
402
403 switch (fcp->fc_tm_flags) {
404 case FCP_TMF_LUN_RESET:
405 tm_func = TMR_LUN_RESET;
406 break;
407 case FCP_TMF_TGT_RESET:
408 tm_func = TMR_TARGET_WARM_RESET;
409 break;
410 case FCP_TMF_CLR_TASK_SET:
411 tm_func = TMR_CLEAR_TASK_SET;
412 break;
413 case FCP_TMF_ABT_TASK_SET:
414 tm_func = TMR_ABORT_TASK_SET;
415 break;
416 case FCP_TMF_CLR_ACA:
417 tm_func = TMR_CLEAR_ACA;
418 break;
419 default:
420 /*
421 * FCP4r01 indicates having a combination of
422 * tm_flags set is invalid.
423 */
424 FT_TM_DBG("invalid FCP tm_flags %x\n", fcp->fc_tm_flags);
425 ft_send_resp_code(cmd, FCP_CMND_FIELDS_INVALID);
426 return;
427 }
428
429 FT_TM_DBG("alloc tm cmd fn %d\n", tm_func);
430 tmr = core_tmr_alloc_req(&cmd->se_cmd, cmd, tm_func);
431 if (!tmr) {
432 FT_TM_DBG("alloc failed\n");
433 ft_send_resp_code(cmd, FCP_TMF_FAILED);
434 return;
435 }
436 cmd->se_cmd.se_tmr_req = tmr;
437
438 switch (fcp->fc_tm_flags) {
439 case FCP_TMF_LUN_RESET:
440 cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun);
441 if (transport_get_lun_for_tmr(&cmd->se_cmd, cmd->lun) < 0) {
442 /*
443 * Make sure to clean up newly allocated TMR request
444 * since "unable to handle TMR request because failed
445 * to get to LUN"
446 */
447 FT_TM_DBG("Failed to get LUN for TMR func %d, "
448 "se_cmd %p, unpacked_lun %d\n",
449 tm_func, &cmd->se_cmd, cmd->lun);
450 ft_dump_cmd(cmd, __func__);
451 sess = cmd->sess;
452 transport_send_check_condition_and_sense(&cmd->se_cmd,
453 cmd->se_cmd.scsi_sense_reason, 0);
454 transport_generic_free_cmd(&cmd->se_cmd, 0, 1, 0);
455 ft_sess_put(sess);
456 return;
457 }
458 break;
459 case FCP_TMF_TGT_RESET:
460 case FCP_TMF_CLR_TASK_SET:
461 case FCP_TMF_ABT_TASK_SET:
462 case FCP_TMF_CLR_ACA:
463 break;
464 default:
465 return;
466 }
467 transport_generic_handle_tmr(&cmd->se_cmd);
468}
469
470/*
471 * Send status from completed task management request.
472 */
473int ft_queue_tm_resp(struct se_cmd *se_cmd)
474{
475 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
476 struct se_tmr_req *tmr = se_cmd->se_tmr_req;
477 enum fcp_resp_rsp_codes code;
478
479 switch (tmr->response) {
480 case TMR_FUNCTION_COMPLETE:
481 code = FCP_TMF_CMPL;
482 break;
483 case TMR_LUN_DOES_NOT_EXIST:
484 code = FCP_TMF_INVALID_LUN;
485 break;
486 case TMR_FUNCTION_REJECTED:
487 code = FCP_TMF_REJECTED;
488 break;
489 case TMR_TASK_DOES_NOT_EXIST:
490 case TMR_TASK_STILL_ALLEGIANT:
491 case TMR_TASK_FAILOVER_NOT_SUPPORTED:
492 case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
493 case TMR_FUNCTION_AUTHORIZATION_FAILED:
494 default:
495 code = FCP_TMF_FAILED;
496 break;
497 }
498 FT_TM_DBG("tmr fn %d resp %d fcp code %d\n",
499 tmr->function, tmr->response, code);
500 ft_send_resp_code(cmd, code);
501 return 0;
502}
503
504/*
505 * Handle incoming FCP command.
506 */
507static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
508{
509 struct ft_cmd *cmd;
510 struct fc_lport *lport = sess->tport->lport;
511
512 cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
513 if (!cmd)
514 goto busy;
515 cmd->sess = sess;
516 cmd->seq = lport->tt.seq_assign(lport, fp);
517 if (!cmd->seq) {
518 kfree(cmd);
519 goto busy;
520 }
521 cmd->req_frame = fp; /* hold frame during cmd */
522 ft_queue_cmd(sess, cmd);
523 return;
524
525busy:
526 FT_IO_DBG("cmd or seq allocation failure - sending BUSY\n");
527 ft_send_resp_status(lport, fp, SAM_STAT_BUSY, 0);
528 fc_frame_free(fp);
529 ft_sess_put(sess); /* undo get from lookup */
530}
531
532
533/*
534 * Handle incoming FCP frame.
535 * Caller has verified that the frame is type FCP.
536 */
537void ft_recv_req(struct ft_sess *sess, struct fc_frame *fp)
538{
539 struct fc_frame_header *fh = fc_frame_header_get(fp);
540
541 switch (fh->fh_r_ctl) {
542 case FC_RCTL_DD_UNSOL_CMD: /* command */
543 ft_recv_cmd(sess, fp);
544 break;
545 case FC_RCTL_DD_SOL_DATA: /* write data */
546 case FC_RCTL_DD_UNSOL_CTL:
547 case FC_RCTL_DD_SOL_CTL:
548 case FC_RCTL_DD_DATA_DESC: /* transfer ready */
549 case FC_RCTL_ELS4_REQ: /* SRR, perhaps */
550 default:
551 printk(KERN_INFO "%s: unhandled frame r_ctl %x\n",
552 __func__, fh->fh_r_ctl);
553 fc_frame_free(fp);
554 ft_sess_put(sess); /* undo get from lookup */
555 break;
556 }
557}
558
559/*
560 * Send new command to target.
561 */
562static void ft_send_cmd(struct ft_cmd *cmd)
563{
564 struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame);
565 struct se_cmd *se_cmd;
566 struct fcp_cmnd *fcp;
567 int data_dir;
568 u32 data_len;
569 int task_attr;
570 int ret;
571
572 fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
573 if (!fcp)
574 goto err;
575
576 if (fcp->fc_flags & FCP_CFL_LEN_MASK)
577 goto err; /* not handling longer CDBs yet */
578
579 if (fcp->fc_tm_flags) {
580 task_attr = FCP_PTA_SIMPLE;
581 data_dir = DMA_NONE;
582 data_len = 0;
583 } else {
584 switch (fcp->fc_flags & (FCP_CFL_RDDATA | FCP_CFL_WRDATA)) {
585 case 0:
586 data_dir = DMA_NONE;
587 break;
588 case FCP_CFL_RDDATA:
589 data_dir = DMA_FROM_DEVICE;
590 break;
591 case FCP_CFL_WRDATA:
592 data_dir = DMA_TO_DEVICE;
593 break;
594 case FCP_CFL_WRDATA | FCP_CFL_RDDATA:
595 goto err; /* TBD not supported by tcm_fc yet */
596 }
597 /*
598 * Locate the SAM Task Attr from fc_pri_ta
599 */
600 switch (fcp->fc_pri_ta & FCP_PTA_MASK) {
601 case FCP_PTA_HEADQ:
602 task_attr = MSG_HEAD_TAG;
603 break;
604 case FCP_PTA_ORDERED:
605 task_attr = MSG_ORDERED_TAG;
606 break;
607 case FCP_PTA_ACA:
608 task_attr = MSG_ACA_TAG;
609 break;
610 case FCP_PTA_SIMPLE: /* Fallthrough */
611 default:
612 task_attr = MSG_SIMPLE_TAG;
613 }
614
615
616 task_attr = fcp->fc_pri_ta & FCP_PTA_MASK;
617 data_len = ntohl(fcp->fc_dl);
618 cmd->cdb = fcp->fc_cdb;
619 }
620
621 se_cmd = &cmd->se_cmd;
622 /*
623 * Initialize struct se_cmd descriptor from target_core_mod
624 * infrastructure
625 */
626 transport_init_se_cmd(se_cmd, &ft_configfs->tf_ops, cmd->sess->se_sess,
627 data_len, data_dir, task_attr,
628 &cmd->ft_sense_buffer[0]);
629 /*
630 * Check for FCP task management flags
631 */
632 if (fcp->fc_tm_flags) {
633 ft_send_tm(cmd);
634 return;
635 }
636
637 fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
638
639 cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun);
640 ret = transport_get_lun_for_cmd(&cmd->se_cmd, NULL, cmd->lun);
641 if (ret < 0) {
642 ft_dump_cmd(cmd, __func__);
643 transport_send_check_condition_and_sense(&cmd->se_cmd,
644 cmd->se_cmd.scsi_sense_reason, 0);
645 return;
646 }
647
648 ret = transport_generic_allocate_tasks(se_cmd, cmd->cdb);
649
650 FT_IO_DBG("r_ctl %x alloc task ret %d\n", fh->fh_r_ctl, ret);
651 ft_dump_cmd(cmd, __func__);
652
653 if (ret == -1) {
654 transport_send_check_condition_and_sense(se_cmd,
655 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
656 transport_generic_free_cmd(se_cmd, 0, 1, 0);
657 return;
658 }
659 if (ret == -2) {
660 if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
661 ft_queue_status(se_cmd);
662 else
663 transport_send_check_condition_and_sense(se_cmd,
664 se_cmd->scsi_sense_reason, 0);
665 transport_generic_free_cmd(se_cmd, 0, 1, 0);
666 return;
667 }
668 transport_generic_handle_cdb(se_cmd);
669 return;
670
671err:
672 ft_send_resp_code(cmd, FCP_CMND_FIELDS_INVALID);
673 return;
674}
675
676/*
677 * Handle request in the command thread.
678 */
679static void ft_exec_req(struct ft_cmd *cmd)
680{
681 FT_IO_DBG("cmd state %x\n", cmd->state);
682 switch (cmd->state) {
683 case FC_CMD_ST_NEW:
684 ft_send_cmd(cmd);
685 break;
686 default:
687 break;
688 }
689}
690
691/*
692 * Processing thread.
693 * Currently one thread per tpg.
694 */
695int ft_thread(void *arg)
696{
697 struct ft_tpg *tpg = arg;
698 struct se_queue_obj *qobj = &tpg->qobj;
699 struct ft_cmd *cmd;
700 int ret;
701
702 set_user_nice(current, -20);
703
704 while (!kthread_should_stop()) {
705 ret = wait_event_interruptible(qobj->thread_wq,
706 atomic_read(&qobj->queue_cnt) || kthread_should_stop());
707 if (ret < 0 || kthread_should_stop())
708 goto out;
709 cmd = ft_dequeue_cmd(qobj);
710 if (cmd)
711 ft_exec_req(cmd);
712 }
713
714out:
715 return 0;
716}
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
new file mode 100644
index 000000000000..84e868c255dd
--- /dev/null
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -0,0 +1,669 @@
1/*******************************************************************************
2 * Filename: tcm_fc.c
3 *
4 * This file contains the configfs implementation for TCM_fc fabric node.
5 * Based on tcm_loop_configfs.c
6 *
7 * Copyright (c) 2010 Cisco Systems, Inc.
8 * Copyright (c) 2009,2010 Rising Tide, Inc.
9 * Copyright (c) 2009,2010 Linux-iSCSI.org
10 *
11 * Copyright (c) 2009,2010 Nicholas A. Bellinger <nab@linux-iscsi.org>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 ****************************************************************************/
23
24#include <linux/module.h>
25#include <linux/moduleparam.h>
26#include <linux/version.h>
27#include <generated/utsrelease.h>
28#include <linux/utsname.h>
29#include <linux/init.h>
30#include <linux/slab.h>
31#include <linux/kthread.h>
32#include <linux/types.h>
33#include <linux/string.h>
34#include <linux/configfs.h>
35#include <linux/ctype.h>
36#include <asm/unaligned.h>
37#include <scsi/scsi.h>
38#include <scsi/scsi_host.h>
39#include <scsi/scsi_device.h>
40#include <scsi/scsi_cmnd.h>
41#include <scsi/libfc.h>
42
43#include <target/target_core_base.h>
44#include <target/target_core_transport.h>
45#include <target/target_core_fabric_ops.h>
46#include <target/target_core_fabric_configfs.h>
47#include <target/target_core_fabric_lib.h>
48#include <target/target_core_device.h>
49#include <target/target_core_tpg.h>
50#include <target/target_core_configfs.h>
51#include <target/target_core_base.h>
52#include <target/configfs_macros.h>
53
54#include "tcm_fc.h"
55
56struct target_fabric_configfs *ft_configfs;
57
58LIST_HEAD(ft_lport_list);
59DEFINE_MUTEX(ft_lport_lock);
60
61unsigned int ft_debug_logging;
62module_param_named(debug_logging, ft_debug_logging, int, S_IRUGO|S_IWUSR);
63MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
64
65/*
66 * Parse WWN.
67 * If strict, we require lower-case hex and colon separators to be sure
68 * the name is the same as what would be generated by ft_format_wwn()
69 * so the name and wwn are mapped one-to-one.
70 */
71static ssize_t ft_parse_wwn(const char *name, u64 *wwn, int strict)
72{
73 const char *cp;
74 char c;
75 u32 nibble;
76 u32 byte = 0;
77 u32 pos = 0;
78 u32 err;
79
80 *wwn = 0;
81 for (cp = name; cp < &name[FT_NAMELEN - 1]; cp++) {
82 c = *cp;
83 if (c == '\n' && cp[1] == '\0')
84 continue;
85 if (strict && pos++ == 2 && byte++ < 7) {
86 pos = 0;
87 if (c == ':')
88 continue;
89 err = 1;
90 goto fail;
91 }
92 if (c == '\0') {
93 err = 2;
94 if (strict && byte != 8)
95 goto fail;
96 return cp - name;
97 }
98 err = 3;
99 if (isdigit(c))
100 nibble = c - '0';
101 else if (isxdigit(c) && (islower(c) || !strict))
102 nibble = tolower(c) - 'a' + 10;
103 else
104 goto fail;
105 *wwn = (*wwn << 4) | nibble;
106 }
107 err = 4;
108fail:
109 FT_CONF_DBG("err %u len %zu pos %u byte %u\n",
110 err, cp - name, pos, byte);
111 return -1;
112}
113
114ssize_t ft_format_wwn(char *buf, size_t len, u64 wwn)
115{
116 u8 b[8];
117
118 put_unaligned_be64(wwn, b);
119 return snprintf(buf, len,
120 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
121 b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
122}
123
124static ssize_t ft_wwn_show(void *arg, char *buf)
125{
126 u64 *wwn = arg;
127 ssize_t len;
128
129 len = ft_format_wwn(buf, PAGE_SIZE - 2, *wwn);
130 buf[len++] = '\n';
131 return len;
132}
133
134static ssize_t ft_wwn_store(void *arg, const char *buf, size_t len)
135{
136 ssize_t ret;
137 u64 wwn;
138
139 ret = ft_parse_wwn(buf, &wwn, 0);
140 if (ret > 0)
141 *(u64 *)arg = wwn;
142 return ret;
143}
144
145/*
146 * ACL auth ops.
147 */
148
149static ssize_t ft_nacl_show_port_name(
150 struct se_node_acl *se_nacl,
151 char *page)
152{
153 struct ft_node_acl *acl = container_of(se_nacl,
154 struct ft_node_acl, se_node_acl);
155
156 return ft_wwn_show(&acl->node_auth.port_name, page);
157}
158
159static ssize_t ft_nacl_store_port_name(
160 struct se_node_acl *se_nacl,
161 const char *page,
162 size_t count)
163{
164 struct ft_node_acl *acl = container_of(se_nacl,
165 struct ft_node_acl, se_node_acl);
166
167 return ft_wwn_store(&acl->node_auth.port_name, page, count);
168}
169
170TF_NACL_BASE_ATTR(ft, port_name, S_IRUGO | S_IWUSR);
171
172static ssize_t ft_nacl_show_node_name(
173 struct se_node_acl *se_nacl,
174 char *page)
175{
176 struct ft_node_acl *acl = container_of(se_nacl,
177 struct ft_node_acl, se_node_acl);
178
179 return ft_wwn_show(&acl->node_auth.node_name, page);
180}
181
182static ssize_t ft_nacl_store_node_name(
183 struct se_node_acl *se_nacl,
184 const char *page,
185 size_t count)
186{
187 struct ft_node_acl *acl = container_of(se_nacl,
188 struct ft_node_acl, se_node_acl);
189
190 return ft_wwn_store(&acl->node_auth.node_name, page, count);
191}
192
193TF_NACL_BASE_ATTR(ft, node_name, S_IRUGO | S_IWUSR);
194
195static struct configfs_attribute *ft_nacl_base_attrs[] = {
196 &ft_nacl_port_name.attr,
197 &ft_nacl_node_name.attr,
198 NULL,
199};
200
201/*
202 * ACL ops.
203 */
204
205/*
206 * Add ACL for an initiator. The ACL is named arbitrarily.
207 * The port_name and/or node_name are attributes.
208 */
209static struct se_node_acl *ft_add_acl(
210 struct se_portal_group *se_tpg,
211 struct config_group *group,
212 const char *name)
213{
214 struct ft_node_acl *acl;
215 struct ft_tpg *tpg;
216 u64 wwpn;
217 u32 q_depth;
218
219 FT_CONF_DBG("add acl %s\n", name);
220 tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
221
222 if (ft_parse_wwn(name, &wwpn, 1) < 0)
223 return ERR_PTR(-EINVAL);
224
225 acl = kzalloc(sizeof(struct ft_node_acl), GFP_KERNEL);
226 if (!(acl))
227 return ERR_PTR(-ENOMEM);
228 acl->node_auth.port_name = wwpn;
229
230 q_depth = 32; /* XXX bogus default - get from tpg? */
231 return core_tpg_add_initiator_node_acl(&tpg->se_tpg,
232 &acl->se_node_acl, name, q_depth);
233}
234
235static void ft_del_acl(struct se_node_acl *se_acl)
236{
237 struct se_portal_group *se_tpg = se_acl->se_tpg;
238 struct ft_tpg *tpg;
239 struct ft_node_acl *acl = container_of(se_acl,
240 struct ft_node_acl, se_node_acl);
241
242 FT_CONF_DBG("del acl %s\n",
243 config_item_name(&se_acl->acl_group.cg_item));
244
245 tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
246 FT_CONF_DBG("del acl %p se_acl %p tpg %p se_tpg %p\n",
247 acl, se_acl, tpg, &tpg->se_tpg);
248
249 core_tpg_del_initiator_node_acl(&tpg->se_tpg, se_acl, 1);
250 kfree(acl);
251}
252
253struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)
254{
255 struct ft_node_acl *found = NULL;
256 struct ft_node_acl *acl;
257 struct se_portal_group *se_tpg = &tpg->se_tpg;
258 struct se_node_acl *se_acl;
259
260 spin_lock_bh(&se_tpg->acl_node_lock);
261 list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) {
262 acl = container_of(se_acl, struct ft_node_acl, se_node_acl);
263 FT_CONF_DBG("acl %p port_name %llx\n",
264 acl, (unsigned long long)acl->node_auth.port_name);
265 if (acl->node_auth.port_name == rdata->ids.port_name ||
266 acl->node_auth.node_name == rdata->ids.node_name) {
267 FT_CONF_DBG("acl %p port_name %llx matched\n", acl,
268 (unsigned long long)rdata->ids.port_name);
269 found = acl;
270 /* XXX need to hold onto ACL */
271 break;
272 }
273 }
274 spin_unlock_bh(&se_tpg->acl_node_lock);
275 return found;
276}
277
278struct se_node_acl *ft_tpg_alloc_fabric_acl(struct se_portal_group *se_tpg)
279{
280 struct ft_node_acl *acl;
281
282 acl = kzalloc(sizeof(*acl), GFP_KERNEL);
283 if (!(acl)) {
284 printk(KERN_ERR "Unable to allocate struct ft_node_acl\n");
285 return NULL;
286 }
287 FT_CONF_DBG("acl %p\n", acl);
288 return &acl->se_node_acl;
289}
290
291static void ft_tpg_release_fabric_acl(struct se_portal_group *se_tpg,
292 struct se_node_acl *se_acl)
293{
294 struct ft_node_acl *acl = container_of(se_acl,
295 struct ft_node_acl, se_node_acl);
296
297 FT_CONF_DBG(KERN_INFO "acl %p\n", acl);
298 kfree(acl);
299}
300
301/*
302 * local_port port_group (tpg) ops.
303 */
304static struct se_portal_group *ft_add_tpg(
305 struct se_wwn *wwn,
306 struct config_group *group,
307 const char *name)
308{
309 struct ft_lport_acl *lacl;
310 struct ft_tpg *tpg;
311 unsigned long index;
312 int ret;
313
314 FT_CONF_DBG("tcm_fc: add tpg %s\n", name);
315
316 /*
317 * Name must be "tpgt_" followed by the index.
318 */
319 if (strstr(name, "tpgt_") != name)
320 return NULL;
321 if (strict_strtoul(name + 5, 10, &index) || index > UINT_MAX)
322 return NULL;
323
324 lacl = container_of(wwn, struct ft_lport_acl, fc_lport_wwn);
325 tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
326 if (!tpg)
327 return NULL;
328 tpg->index = index;
329 tpg->lport_acl = lacl;
330 INIT_LIST_HEAD(&tpg->lun_list);
331 transport_init_queue_obj(&tpg->qobj);
332
333 ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg,
334 (void *)tpg, TRANSPORT_TPG_TYPE_NORMAL);
335 if (ret < 0) {
336 kfree(tpg);
337 return NULL;
338 }
339
340 tpg->thread = kthread_run(ft_thread, tpg, "ft_tpg%lu", index);
341 if (IS_ERR(tpg->thread)) {
342 kfree(tpg);
343 return NULL;
344 }
345
346 mutex_lock(&ft_lport_lock);
347 list_add_tail(&tpg->list, &lacl->tpg_list);
348 mutex_unlock(&ft_lport_lock);
349
350 return &tpg->se_tpg;
351}
352
353static void ft_del_tpg(struct se_portal_group *se_tpg)
354{
355 struct ft_tpg *tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
356
357 FT_CONF_DBG("del tpg %s\n",
358 config_item_name(&tpg->se_tpg.tpg_group.cg_item));
359
360 kthread_stop(tpg->thread);
361
362 /* Wait for sessions to be freed thru RCU, for BUG_ON below */
363 synchronize_rcu();
364
365 mutex_lock(&ft_lport_lock);
366 list_del(&tpg->list);
367 if (tpg->tport) {
368 tpg->tport->tpg = NULL;
369 tpg->tport = NULL;
370 }
371 mutex_unlock(&ft_lport_lock);
372
373 core_tpg_deregister(se_tpg);
374 kfree(tpg);
375}
376
377/*
378 * Verify that an lport is configured to use the tcm_fc module, and return
379 * the target port group that should be used.
380 *
381 * The caller holds ft_lport_lock.
382 */
383struct ft_tpg *ft_lport_find_tpg(struct fc_lport *lport)
384{
385 struct ft_lport_acl *lacl;
386 struct ft_tpg *tpg;
387
388 list_for_each_entry(lacl, &ft_lport_list, list) {
389 if (lacl->wwpn == lport->wwpn) {
390 list_for_each_entry(tpg, &lacl->tpg_list, list)
391 return tpg; /* XXX for now return first entry */
392 return NULL;
393 }
394 }
395 return NULL;
396}
397
398/*
399 * target config instance ops.
400 */
401
402/*
403 * Add lport to allowed config.
404 * The name is the WWPN in lower-case ASCII, colon-separated bytes.
405 */
406static struct se_wwn *ft_add_lport(
407 struct target_fabric_configfs *tf,
408 struct config_group *group,
409 const char *name)
410{
411 struct ft_lport_acl *lacl;
412 struct ft_lport_acl *old_lacl;
413 u64 wwpn;
414
415 FT_CONF_DBG("add lport %s\n", name);
416 if (ft_parse_wwn(name, &wwpn, 1) < 0)
417 return NULL;
418 lacl = kzalloc(sizeof(*lacl), GFP_KERNEL);
419 if (!lacl)
420 return NULL;
421 lacl->wwpn = wwpn;
422 INIT_LIST_HEAD(&lacl->tpg_list);
423
424 mutex_lock(&ft_lport_lock);
425 list_for_each_entry(old_lacl, &ft_lport_list, list) {
426 if (old_lacl->wwpn == wwpn) {
427 mutex_unlock(&ft_lport_lock);
428 kfree(lacl);
429 return NULL;
430 }
431 }
432 list_add_tail(&lacl->list, &ft_lport_list);
433 ft_format_wwn(lacl->name, sizeof(lacl->name), wwpn);
434 mutex_unlock(&ft_lport_lock);
435
436 return &lacl->fc_lport_wwn;
437}
438
439static void ft_del_lport(struct se_wwn *wwn)
440{
441 struct ft_lport_acl *lacl = container_of(wwn,
442 struct ft_lport_acl, fc_lport_wwn);
443
444 FT_CONF_DBG("del lport %s\n",
445 config_item_name(&wwn->wwn_group.cg_item));
446 mutex_lock(&ft_lport_lock);
447 list_del(&lacl->list);
448 mutex_unlock(&ft_lport_lock);
449
450 kfree(lacl);
451}
452
453static ssize_t ft_wwn_show_attr_version(
454 struct target_fabric_configfs *tf,
455 char *page)
456{
457 return sprintf(page, "TCM FC " FT_VERSION " on %s/%s on "
458 ""UTS_RELEASE"\n", utsname()->sysname, utsname()->machine);
459}
460
461TF_WWN_ATTR_RO(ft, version);
462
463static struct configfs_attribute *ft_wwn_attrs[] = {
464 &ft_wwn_version.attr,
465 NULL,
466};
467
468static char *ft_get_fabric_name(void)
469{
470 return "fc";
471}
472
473static char *ft_get_fabric_wwn(struct se_portal_group *se_tpg)
474{
475 struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr;
476
477 return tpg->lport_acl->name;
478}
479
480static u16 ft_get_tag(struct se_portal_group *se_tpg)
481{
482 struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr;
483
484 /*
485 * This tag is used when forming SCSI Name identifier in EVPD=1 0x83
486 * to represent the SCSI Target Port.
487 */
488 return tpg->index;
489}
490
491static u32 ft_get_default_depth(struct se_portal_group *se_tpg)
492{
493 return 1;
494}
495
496static int ft_check_false(struct se_portal_group *se_tpg)
497{
498 return 0;
499}
500
501static void ft_set_default_node_attr(struct se_node_acl *se_nacl)
502{
503}
504
505static u16 ft_get_fabric_sense_len(void)
506{
507 return 0;
508}
509
510static u16 ft_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_len)
511{
512 return 0;
513}
514
515static u32 ft_tpg_get_inst_index(struct se_portal_group *se_tpg)
516{
517 struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr;
518
519 return tpg->index;
520}
521
522static struct target_core_fabric_ops ft_fabric_ops = {
523 .get_fabric_name = ft_get_fabric_name,
524 .get_fabric_proto_ident = fc_get_fabric_proto_ident,
525 .tpg_get_wwn = ft_get_fabric_wwn,
526 .tpg_get_tag = ft_get_tag,
527 .tpg_get_default_depth = ft_get_default_depth,
528 .tpg_get_pr_transport_id = fc_get_pr_transport_id,
529 .tpg_get_pr_transport_id_len = fc_get_pr_transport_id_len,
530 .tpg_parse_pr_out_transport_id = fc_parse_pr_out_transport_id,
531 .tpg_check_demo_mode = ft_check_false,
532 .tpg_check_demo_mode_cache = ft_check_false,
533 .tpg_check_demo_mode_write_protect = ft_check_false,
534 .tpg_check_prod_mode_write_protect = ft_check_false,
535 .tpg_alloc_fabric_acl = ft_tpg_alloc_fabric_acl,
536 .tpg_release_fabric_acl = ft_tpg_release_fabric_acl,
537 .tpg_get_inst_index = ft_tpg_get_inst_index,
538 .check_stop_free = ft_check_stop_free,
539 .release_cmd_to_pool = ft_release_cmd,
540 .release_cmd_direct = ft_release_cmd,
541 .shutdown_session = ft_sess_shutdown,
542 .close_session = ft_sess_close,
543 .stop_session = ft_sess_stop,
544 .fall_back_to_erl0 = ft_sess_set_erl0,
545 .sess_logged_in = ft_sess_logged_in,
546 .sess_get_index = ft_sess_get_index,
547 .sess_get_initiator_sid = NULL,
548 .write_pending = ft_write_pending,
549 .write_pending_status = ft_write_pending_status,
550 .set_default_node_attributes = ft_set_default_node_attr,
551 .get_task_tag = ft_get_task_tag,
552 .get_cmd_state = ft_get_cmd_state,
553 .new_cmd_failure = ft_new_cmd_failure,
554 .queue_data_in = ft_queue_data_in,
555 .queue_status = ft_queue_status,
556 .queue_tm_rsp = ft_queue_tm_resp,
557 .get_fabric_sense_len = ft_get_fabric_sense_len,
558 .set_fabric_sense_len = ft_set_fabric_sense_len,
559 .is_state_remove = ft_is_state_remove,
560 /*
561 * Setup function pointers for generic logic in
562 * target_core_fabric_configfs.c
563 */
564 .fabric_make_wwn = &ft_add_lport,
565 .fabric_drop_wwn = &ft_del_lport,
566 .fabric_make_tpg = &ft_add_tpg,
567 .fabric_drop_tpg = &ft_del_tpg,
568 .fabric_post_link = NULL,
569 .fabric_pre_unlink = NULL,
570 .fabric_make_np = NULL,
571 .fabric_drop_np = NULL,
572 .fabric_make_nodeacl = &ft_add_acl,
573 .fabric_drop_nodeacl = &ft_del_acl,
574};
575
576int ft_register_configfs(void)
577{
578 struct target_fabric_configfs *fabric;
579 int ret;
580
581 /*
582 * Register the top level struct config_item_type with TCM core
583 */
584 fabric = target_fabric_configfs_init(THIS_MODULE, "fc");
585 if (!fabric) {
586 printk(KERN_INFO "%s: target_fabric_configfs_init() failed!\n",
587 __func__);
588 return -1;
589 }
590 fabric->tf_ops = ft_fabric_ops;
591
592 /* Allowing support for task_sg_chaining */
593 fabric->tf_ops.task_sg_chaining = 1;
594
595 /*
596 * Setup default attribute lists for various fabric->tf_cit_tmpl
597 */
598 TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = ft_wwn_attrs;
599 TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;
600 TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
601 TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
602 TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
603 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs =
604 ft_nacl_base_attrs;
605 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
606 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
607 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
608 /*
609 * register the fabric for use within TCM
610 */
611 ret = target_fabric_configfs_register(fabric);
612 if (ret < 0) {
613 FT_CONF_DBG("target_fabric_configfs_register() for"
614 " FC Target failed!\n");
615 printk(KERN_INFO
616 "%s: target_fabric_configfs_register() failed!\n",
617 __func__);
618 target_fabric_configfs_free(fabric);
619 return -1;
620 }
621
622 /*
623 * Setup our local pointer to *fabric.
624 */
625 ft_configfs = fabric;
626 return 0;
627}
628
629void ft_deregister_configfs(void)
630{
631 if (!ft_configfs)
632 return;
633 target_fabric_configfs_deregister(ft_configfs);
634 ft_configfs = NULL;
635}
636
637static struct notifier_block ft_notifier = {
638 .notifier_call = ft_lport_notify
639};
640
641static int __init ft_init(void)
642{
643 if (ft_register_configfs())
644 return -1;
645 if (fc_fc4_register_provider(FC_TYPE_FCP, &ft_prov)) {
646 ft_deregister_configfs();
647 return -1;
648 }
649 blocking_notifier_chain_register(&fc_lport_notifier_head, &ft_notifier);
650 fc_lport_iterate(ft_lport_add, NULL);
651 return 0;
652}
653
654static void __exit ft_exit(void)
655{
656 blocking_notifier_chain_unregister(&fc_lport_notifier_head,
657 &ft_notifier);
658 fc_fc4_deregister_provider(FC_TYPE_FCP, &ft_prov);
659 fc_lport_iterate(ft_lport_del, NULL);
660 ft_deregister_configfs();
661 synchronize_rcu();
662}
663
664#ifdef MODULE
665MODULE_DESCRIPTION("FC TCM fabric driver " FT_VERSION);
666MODULE_LICENSE("GPL");
667module_init(ft_init);
668module_exit(ft_exit);
669#endif /* MODULE */
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
new file mode 100644
index 000000000000..8c4a24077d9d
--- /dev/null
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -0,0 +1,374 @@
1/*
2 * Copyright (c) 2010 Cisco Systems, Inc.
3 *
4 * Portions based on tcm_loop_fabric_scsi.c and libfc/fc_fcp.c
5 *
6 * Copyright (c) 2007 Intel Corporation. All rights reserved.
7 * Copyright (c) 2008 Red Hat, Inc. All rights reserved.
8 * Copyright (c) 2008 Mike Christie
9 * Copyright (c) 2009 Rising Tide, Inc.
10 * Copyright (c) 2009 Linux-iSCSI.org
11 * Copyright (c) 2009 Nicholas A. Bellinger <nab@linux-iscsi.org>
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms and conditions of the GNU General Public License,
15 * version 2, as published by the Free Software Foundation.
16 *
17 * This program is distributed in the hope it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 * more details.
21 *
22 * You should have received a copy of the GNU General Public License along with
23 * this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
25 */
26
27/* XXX TBD some includes may be extraneous */
28
29#include <linux/module.h>
30#include <linux/moduleparam.h>
31#include <linux/version.h>
32#include <generated/utsrelease.h>
33#include <linux/utsname.h>
34#include <linux/init.h>
35#include <linux/slab.h>
36#include <linux/kthread.h>
37#include <linux/types.h>
38#include <linux/string.h>
39#include <linux/configfs.h>
40#include <linux/ctype.h>
41#include <linux/hash.h>
42#include <asm/unaligned.h>
43#include <scsi/scsi.h>
44#include <scsi/scsi_host.h>
45#include <scsi/scsi_device.h>
46#include <scsi/scsi_cmnd.h>
47#include <scsi/libfc.h>
48#include <scsi/fc_encode.h>
49
50#include <target/target_core_base.h>
51#include <target/target_core_transport.h>
52#include <target/target_core_fabric_ops.h>
53#include <target/target_core_device.h>
54#include <target/target_core_tpg.h>
55#include <target/target_core_configfs.h>
56#include <target/target_core_base.h>
57#include <target/configfs_macros.h>
58
59#include "tcm_fc.h"
60
61/*
62 * Deliver read data back to initiator.
63 * XXX TBD handle resource problems later.
64 */
65int ft_queue_data_in(struct se_cmd *se_cmd)
66{
67 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
68 struct se_transport_task *task;
69 struct fc_frame *fp = NULL;
70 struct fc_exch *ep;
71 struct fc_lport *lport;
72 struct se_mem *mem;
73 size_t remaining;
74 u32 f_ctl = FC_FC_EX_CTX | FC_FC_REL_OFF;
75 u32 mem_off;
76 u32 fh_off = 0;
77 u32 frame_off = 0;
78 size_t frame_len = 0;
79 size_t mem_len;
80 size_t tlen;
81 size_t off_in_page;
82 struct page *page;
83 int use_sg;
84 int error;
85 void *page_addr;
86 void *from;
87 void *to = NULL;
88
89 ep = fc_seq_exch(cmd->seq);
90 lport = ep->lp;
91 cmd->seq = lport->tt.seq_start_next(cmd->seq);
92
93 task = T_TASK(se_cmd);
94 BUG_ON(!task);
95 remaining = se_cmd->data_length;
96
97 /*
98 * Setup to use first mem list entry if any.
99 */
100 if (task->t_tasks_se_num) {
101 mem = list_first_entry(task->t_mem_list,
102 struct se_mem, se_list);
103 mem_len = mem->se_len;
104 mem_off = mem->se_off;
105 page = mem->se_page;
106 } else {
107 mem = NULL;
108 mem_len = remaining;
109 mem_off = 0;
110 page = NULL;
111 }
112
113 /* no scatter/gather in skb for odd word length due to fc_seq_send() */
114 use_sg = !(remaining % 4);
115
116 while (remaining) {
117 if (!mem_len) {
118 BUG_ON(!mem);
119 mem = list_entry(mem->se_list.next,
120 struct se_mem, se_list);
121 mem_len = min((size_t)mem->se_len, remaining);
122 mem_off = mem->se_off;
123 page = mem->se_page;
124 }
125 if (!frame_len) {
126 /*
127 * If lport's has capability of Large Send Offload LSO)
128 * , then allow 'frame_len' to be as big as 'lso_max'
129 * if indicated transfer length is >= lport->lso_max
130 */
131 frame_len = (lport->seq_offload) ? lport->lso_max :
132 cmd->sess->max_frame;
133 frame_len = min(frame_len, remaining);
134 fp = fc_frame_alloc(lport, use_sg ? 0 : frame_len);
135 if (!fp)
136 return -ENOMEM;
137 to = fc_frame_payload_get(fp, 0);
138 fh_off = frame_off;
139 frame_off += frame_len;
140 /*
141 * Setup the frame's max payload which is used by base
142 * driver to indicate HW about max frame size, so that
143 * HW can do fragmentation appropriately based on
144 * "gso_max_size" of underline netdev.
145 */
146 fr_max_payload(fp) = cmd->sess->max_frame;
147 }
148 tlen = min(mem_len, frame_len);
149
150 if (use_sg) {
151 if (!mem) {
152 BUG_ON(!task->t_task_buf);
153 page_addr = task->t_task_buf + mem_off;
154 /*
155 * In this case, offset is 'offset_in_page' of
156 * (t_task_buf + mem_off) instead of 'mem_off'.
157 */
158 off_in_page = offset_in_page(page_addr);
159 page = virt_to_page(page_addr);
160 tlen = min(tlen, PAGE_SIZE - off_in_page);
161 } else
162 off_in_page = mem_off;
163 BUG_ON(!page);
164 get_page(page);
165 skb_fill_page_desc(fp_skb(fp),
166 skb_shinfo(fp_skb(fp))->nr_frags,
167 page, off_in_page, tlen);
168 fr_len(fp) += tlen;
169 fp_skb(fp)->data_len += tlen;
170 fp_skb(fp)->truesize +=
171 PAGE_SIZE << compound_order(page);
172 } else if (mem) {
173 BUG_ON(!page);
174 from = kmap_atomic(page + (mem_off >> PAGE_SHIFT),
175 KM_SOFTIRQ0);
176 page_addr = from;
177 from += mem_off & ~PAGE_MASK;
178 tlen = min(tlen, (size_t)(PAGE_SIZE -
179 (mem_off & ~PAGE_MASK)));
180 memcpy(to, from, tlen);
181 kunmap_atomic(page_addr, KM_SOFTIRQ0);
182 to += tlen;
183 } else {
184 from = task->t_task_buf + mem_off;
185 memcpy(to, from, tlen);
186 to += tlen;
187 }
188
189 mem_off += tlen;
190 mem_len -= tlen;
191 frame_len -= tlen;
192 remaining -= tlen;
193
194 if (frame_len &&
195 (skb_shinfo(fp_skb(fp))->nr_frags < FC_FRAME_SG_LEN))
196 continue;
197 if (!remaining)
198 f_ctl |= FC_FC_END_SEQ;
199 fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
200 FC_TYPE_FCP, f_ctl, fh_off);
201 error = lport->tt.seq_send(lport, cmd->seq, fp);
202 if (error) {
203 /* XXX For now, initiator will retry */
204 if (printk_ratelimit())
205 printk(KERN_ERR "%s: Failed to send frame %p, "
206 "xid <0x%x>, remaining %zu, "
207 "lso_max <0x%x>\n",
208 __func__, fp, ep->xid,
209 remaining, lport->lso_max);
210 }
211 }
212 return ft_queue_status(se_cmd);
213}
214
215/*
216 * Receive write data frame.
217 */
218void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
219{
220 struct se_cmd *se_cmd = &cmd->se_cmd;
221 struct fc_seq *seq = cmd->seq;
222 struct fc_exch *ep;
223 struct fc_lport *lport;
224 struct se_transport_task *task;
225 struct fc_frame_header *fh;
226 struct se_mem *mem;
227 u32 mem_off;
228 u32 rel_off;
229 size_t frame_len;
230 size_t mem_len;
231 size_t tlen;
232 struct page *page;
233 void *page_addr;
234 void *from;
235 void *to;
236 u32 f_ctl;
237 void *buf;
238
239 task = T_TASK(se_cmd);
240 BUG_ON(!task);
241
242 fh = fc_frame_header_get(fp);
243 if (!(ntoh24(fh->fh_f_ctl) & FC_FC_REL_OFF))
244 goto drop;
245
246 /*
247 * Doesn't expect even single byte of payload. Payload
248 * is expected to be copied directly to user buffers
249 * due to DDP (Large Rx offload) feature, hence
250 * BUG_ON if BUF is non-NULL
251 */
252 buf = fc_frame_payload_get(fp, 1);
253 if (cmd->was_ddp_setup && buf) {
254 printk(KERN_INFO "%s: When DDP was setup, not expected to"
255 "receive frame with payload, Payload shall be"
256 "copied directly to buffer instead of coming "
257 "via. legacy receive queues\n", __func__);
258 BUG_ON(buf);
259 }
260
261 /*
262 * If ft_cmd indicated 'ddp_setup', in that case only the last frame
263 * should come with 'TSI bit being set'. If 'TSI bit is not set and if
264 * data frame appears here, means error condition. In both the cases
265 * release the DDP context (ddp_put) and in error case, as well
266 * initiate error recovery mechanism.
267 */
268 ep = fc_seq_exch(seq);
269 if (cmd->was_ddp_setup) {
270 BUG_ON(!ep);
271 lport = ep->lp;
272 BUG_ON(!lport);
273 }
274 if (cmd->was_ddp_setup && ep->xid != FC_XID_UNKNOWN) {
275 f_ctl = ntoh24(fh->fh_f_ctl);
276 /*
277 * If TSI bit set in f_ctl, means last write data frame is
278 * received successfully where payload is posted directly
279 * to user buffer and only the last frame's header is posted
280 * in legacy receive queue
281 */
282 if (f_ctl & FC_FC_SEQ_INIT) { /* TSI bit set in FC frame */
283 cmd->write_data_len = lport->tt.ddp_done(lport,
284 ep->xid);
285 goto last_frame;
286 } else {
287 /*
288 * Updating the write_data_len may be meaningless at
289 * this point, but just in case if required in future
290 * for debugging or any other purpose
291 */
292 printk(KERN_ERR "%s: Received frame with TSI bit not"
293 " being SET, dropping the frame, "
294 "cmd->sg <%p>, cmd->sg_cnt <0x%x>\n",
295 __func__, cmd->sg, cmd->sg_cnt);
296 cmd->write_data_len = lport->tt.ddp_done(lport,
297 ep->xid);
298 lport->tt.seq_exch_abort(cmd->seq, 0);
299 goto drop;
300 }
301 }
302
303 rel_off = ntohl(fh->fh_parm_offset);
304 frame_len = fr_len(fp);
305 if (frame_len <= sizeof(*fh))
306 goto drop;
307 frame_len -= sizeof(*fh);
308 from = fc_frame_payload_get(fp, 0);
309 if (rel_off >= se_cmd->data_length)
310 goto drop;
311 if (frame_len + rel_off > se_cmd->data_length)
312 frame_len = se_cmd->data_length - rel_off;
313
314 /*
315 * Setup to use first mem list entry if any.
316 */
317 if (task->t_tasks_se_num) {
318 mem = list_first_entry(task->t_mem_list,
319 struct se_mem, se_list);
320 mem_len = mem->se_len;
321 mem_off = mem->se_off;
322 page = mem->se_page;
323 } else {
324 mem = NULL;
325 page = NULL;
326 mem_off = 0;
327 mem_len = frame_len;
328 }
329
330 while (frame_len) {
331 if (!mem_len) {
332 BUG_ON(!mem);
333 mem = list_entry(mem->se_list.next,
334 struct se_mem, se_list);
335 mem_len = mem->se_len;
336 mem_off = mem->se_off;
337 page = mem->se_page;
338 }
339 if (rel_off >= mem_len) {
340 rel_off -= mem_len;
341 mem_len = 0;
342 continue;
343 }
344 mem_off += rel_off;
345 mem_len -= rel_off;
346 rel_off = 0;
347
348 tlen = min(mem_len, frame_len);
349
350 if (mem) {
351 to = kmap_atomic(page + (mem_off >> PAGE_SHIFT),
352 KM_SOFTIRQ0);
353 page_addr = to;
354 to += mem_off & ~PAGE_MASK;
355 tlen = min(tlen, (size_t)(PAGE_SIZE -
356 (mem_off & ~PAGE_MASK)));
357 memcpy(to, from, tlen);
358 kunmap_atomic(page_addr, KM_SOFTIRQ0);
359 } else {
360 to = task->t_task_buf + mem_off;
361 memcpy(to, from, tlen);
362 }
363 from += tlen;
364 frame_len -= tlen;
365 mem_off += tlen;
366 mem_len -= tlen;
367 cmd->write_data_len += tlen;
368 }
369last_frame:
370 if (cmd->write_data_len == se_cmd->data_length)
371 transport_generic_handle_data(se_cmd);
372drop:
373 fc_frame_free(fp);
374}
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
new file mode 100644
index 000000000000..7491e21cc6ae
--- /dev/null
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -0,0 +1,541 @@
1/*
2 * Copyright (c) 2010 Cisco Systems, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 */
17
18/* XXX TBD some includes may be extraneous */
19
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/version.h>
23#include <generated/utsrelease.h>
24#include <linux/utsname.h>
25#include <linux/init.h>
26#include <linux/slab.h>
27#include <linux/kthread.h>
28#include <linux/types.h>
29#include <linux/string.h>
30#include <linux/configfs.h>
31#include <linux/ctype.h>
32#include <linux/hash.h>
33#include <linux/rcupdate.h>
34#include <linux/rculist.h>
35#include <linux/kref.h>
36#include <asm/unaligned.h>
37#include <scsi/scsi.h>
38#include <scsi/scsi_host.h>
39#include <scsi/scsi_device.h>
40#include <scsi/scsi_cmnd.h>
41#include <scsi/libfc.h>
42
43#include <target/target_core_base.h>
44#include <target/target_core_transport.h>
45#include <target/target_core_fabric_ops.h>
46#include <target/target_core_device.h>
47#include <target/target_core_tpg.h>
48#include <target/target_core_configfs.h>
49#include <target/target_core_base.h>
50#include <target/configfs_macros.h>
51
52#include <scsi/libfc.h>
53#include "tcm_fc.h"
54
55static void ft_sess_delete_all(struct ft_tport *);
56
57/*
58 * Lookup or allocate target local port.
59 * Caller holds ft_lport_lock.
60 */
61static struct ft_tport *ft_tport_create(struct fc_lport *lport)
62{
63 struct ft_tpg *tpg;
64 struct ft_tport *tport;
65 int i;
66
67 tport = rcu_dereference(lport->prov[FC_TYPE_FCP]);
68 if (tport && tport->tpg)
69 return tport;
70
71 tpg = ft_lport_find_tpg(lport);
72 if (!tpg)
73 return NULL;
74
75 if (tport) {
76 tport->tpg = tpg;
77 return tport;
78 }
79
80 tport = kzalloc(sizeof(*tport), GFP_KERNEL);
81 if (!tport)
82 return NULL;
83
84 tport->lport = lport;
85 tport->tpg = tpg;
86 tpg->tport = tport;
87 for (i = 0; i < FT_SESS_HASH_SIZE; i++)
88 INIT_HLIST_HEAD(&tport->hash[i]);
89
90 rcu_assign_pointer(lport->prov[FC_TYPE_FCP], tport);
91 return tport;
92}
93
94/*
95 * Free tport via RCU.
96 */
97static void ft_tport_rcu_free(struct rcu_head *rcu)
98{
99 struct ft_tport *tport = container_of(rcu, struct ft_tport, rcu);
100
101 kfree(tport);
102}
103
104/*
105 * Delete a target local port.
106 * Caller holds ft_lport_lock.
107 */
108static void ft_tport_delete(struct ft_tport *tport)
109{
110 struct fc_lport *lport;
111 struct ft_tpg *tpg;
112
113 ft_sess_delete_all(tport);
114 lport = tport->lport;
115 BUG_ON(tport != lport->prov[FC_TYPE_FCP]);
116 rcu_assign_pointer(lport->prov[FC_TYPE_FCP], NULL);
117
118 tpg = tport->tpg;
119 if (tpg) {
120 tpg->tport = NULL;
121 tport->tpg = NULL;
122 }
123 call_rcu(&tport->rcu, ft_tport_rcu_free);
124}
125
126/*
127 * Add local port.
128 * Called thru fc_lport_iterate().
129 */
130void ft_lport_add(struct fc_lport *lport, void *arg)
131{
132 mutex_lock(&ft_lport_lock);
133 ft_tport_create(lport);
134 mutex_unlock(&ft_lport_lock);
135}
136
137/*
138 * Delete local port.
139 * Called thru fc_lport_iterate().
140 */
141void ft_lport_del(struct fc_lport *lport, void *arg)
142{
143 struct ft_tport *tport;
144
145 mutex_lock(&ft_lport_lock);
146 tport = lport->prov[FC_TYPE_FCP];
147 if (tport)
148 ft_tport_delete(tport);
149 mutex_unlock(&ft_lport_lock);
150}
151
152/*
153 * Notification of local port change from libfc.
154 * Create or delete local port and associated tport.
155 */
156int ft_lport_notify(struct notifier_block *nb, unsigned long event, void *arg)
157{
158 struct fc_lport *lport = arg;
159
160 switch (event) {
161 case FC_LPORT_EV_ADD:
162 ft_lport_add(lport, NULL);
163 break;
164 case FC_LPORT_EV_DEL:
165 ft_lport_del(lport, NULL);
166 break;
167 }
168 return NOTIFY_DONE;
169}
170
171/*
172 * Hash function for FC_IDs.
173 */
174static u32 ft_sess_hash(u32 port_id)
175{
176 return hash_32(port_id, FT_SESS_HASH_BITS);
177}
178
179/*
180 * Find session in local port.
181 * Sessions and hash lists are RCU-protected.
182 * A reference is taken which must be eventually freed.
183 */
184static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id)
185{
186 struct ft_tport *tport;
187 struct hlist_head *head;
188 struct hlist_node *pos;
189 struct ft_sess *sess;
190
191 rcu_read_lock();
192 tport = rcu_dereference(lport->prov[FC_TYPE_FCP]);
193 if (!tport)
194 goto out;
195
196 head = &tport->hash[ft_sess_hash(port_id)];
197 hlist_for_each_entry_rcu(sess, pos, head, hash) {
198 if (sess->port_id == port_id) {
199 kref_get(&sess->kref);
200 rcu_read_unlock();
201 FT_SESS_DBG("port_id %x found %p\n", port_id, sess);
202 return sess;
203 }
204 }
205out:
206 rcu_read_unlock();
207 FT_SESS_DBG("port_id %x not found\n", port_id);
208 return NULL;
209}
210
211/*
212 * Allocate session and enter it in the hash for the local port.
213 * Caller holds ft_lport_lock.
214 */
215static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
216 struct ft_node_acl *acl)
217{
218 struct ft_sess *sess;
219 struct hlist_head *head;
220 struct hlist_node *pos;
221
222 head = &tport->hash[ft_sess_hash(port_id)];
223 hlist_for_each_entry_rcu(sess, pos, head, hash)
224 if (sess->port_id == port_id)
225 return sess;
226
227 sess = kzalloc(sizeof(*sess), GFP_KERNEL);
228 if (!sess)
229 return NULL;
230
231 sess->se_sess = transport_init_session();
232 if (IS_ERR(sess->se_sess)) {
233 kfree(sess);
234 return NULL;
235 }
236 sess->se_sess->se_node_acl = &acl->se_node_acl;
237 sess->tport = tport;
238 sess->port_id = port_id;
239 kref_init(&sess->kref); /* ref for table entry */
240 hlist_add_head_rcu(&sess->hash, head);
241 tport->sess_count++;
242
243 FT_SESS_DBG("port_id %x sess %p\n", port_id, sess);
244
245 transport_register_session(&tport->tpg->se_tpg, &acl->se_node_acl,
246 sess->se_sess, sess);
247 return sess;
248}
249
250/*
251 * Unhash the session.
252 * Caller holds ft_lport_lock.
253 */
254static void ft_sess_unhash(struct ft_sess *sess)
255{
256 struct ft_tport *tport = sess->tport;
257
258 hlist_del_rcu(&sess->hash);
259 BUG_ON(!tport->sess_count);
260 tport->sess_count--;
261 sess->port_id = -1;
262 sess->params = 0;
263}
264
265/*
266 * Delete session from hash.
267 * Caller holds ft_lport_lock.
268 */
269static struct ft_sess *ft_sess_delete(struct ft_tport *tport, u32 port_id)
270{
271 struct hlist_head *head;
272 struct hlist_node *pos;
273 struct ft_sess *sess;
274
275 head = &tport->hash[ft_sess_hash(port_id)];
276 hlist_for_each_entry_rcu(sess, pos, head, hash) {
277 if (sess->port_id == port_id) {
278 ft_sess_unhash(sess);
279 return sess;
280 }
281 }
282 return NULL;
283}
284
285/*
286 * Delete all sessions from tport.
287 * Caller holds ft_lport_lock.
288 */
289static void ft_sess_delete_all(struct ft_tport *tport)
290{
291 struct hlist_head *head;
292 struct hlist_node *pos;
293 struct ft_sess *sess;
294
295 for (head = tport->hash;
296 head < &tport->hash[FT_SESS_HASH_SIZE]; head++) {
297 hlist_for_each_entry_rcu(sess, pos, head, hash) {
298 ft_sess_unhash(sess);
299 transport_deregister_session_configfs(sess->se_sess);
300 ft_sess_put(sess); /* release from table */
301 }
302 }
303}
304
305/*
306 * TCM ops for sessions.
307 */
308
309/*
310 * Determine whether session is allowed to be shutdown in the current context.
311 * Returns non-zero if the session should be shutdown.
312 */
313int ft_sess_shutdown(struct se_session *se_sess)
314{
315 struct ft_sess *sess = se_sess->fabric_sess_ptr;
316
317 FT_SESS_DBG("port_id %x\n", sess->port_id);
318 return 1;
319}
320
321/*
322 * Remove session and send PRLO.
323 * This is called when the ACL is being deleted or queue depth is changing.
324 */
325void ft_sess_close(struct se_session *se_sess)
326{
327 struct ft_sess *sess = se_sess->fabric_sess_ptr;
328 struct fc_lport *lport;
329 u32 port_id;
330
331 mutex_lock(&ft_lport_lock);
332 lport = sess->tport->lport;
333 port_id = sess->port_id;
334 if (port_id == -1) {
335 mutex_unlock(&ft_lport_lock);
336 return;
337 }
338 FT_SESS_DBG("port_id %x\n", port_id);
339 ft_sess_unhash(sess);
340 mutex_unlock(&ft_lport_lock);
341 transport_deregister_session_configfs(se_sess);
342 ft_sess_put(sess);
343 /* XXX Send LOGO or PRLO */
344 synchronize_rcu(); /* let transport deregister happen */
345}
346
347void ft_sess_stop(struct se_session *se_sess, int sess_sleep, int conn_sleep)
348{
349 struct ft_sess *sess = se_sess->fabric_sess_ptr;
350
351 FT_SESS_DBG("port_id %x\n", sess->port_id);
352}
353
354int ft_sess_logged_in(struct se_session *se_sess)
355{
356 struct ft_sess *sess = se_sess->fabric_sess_ptr;
357
358 return sess->port_id != -1;
359}
360
361u32 ft_sess_get_index(struct se_session *se_sess)
362{
363 struct ft_sess *sess = se_sess->fabric_sess_ptr;
364
365 return sess->port_id; /* XXX TBD probably not what is needed */
366}
367
368u32 ft_sess_get_port_name(struct se_session *se_sess,
369 unsigned char *buf, u32 len)
370{
371 struct ft_sess *sess = se_sess->fabric_sess_ptr;
372
373 return ft_format_wwn(buf, len, sess->port_name);
374}
375
376void ft_sess_set_erl0(struct se_session *se_sess)
377{
378 /* XXX TBD called when out of memory */
379}
380
381/*
382 * libfc ops involving sessions.
383 */
384
385static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
386 const struct fc_els_spp *rspp, struct fc_els_spp *spp)
387{
388 struct ft_tport *tport;
389 struct ft_sess *sess;
390 struct ft_node_acl *acl;
391 u32 fcp_parm;
392
393 tport = ft_tport_create(rdata->local_port);
394 if (!tport)
395 return 0; /* not a target for this local port */
396
397 acl = ft_acl_get(tport->tpg, rdata);
398 if (!acl)
399 return 0;
400
401 if (!rspp)
402 goto fill;
403
404 if (rspp->spp_flags & (FC_SPP_OPA_VAL | FC_SPP_RPA_VAL))
405 return FC_SPP_RESP_NO_PA;
406
407 /*
408 * If both target and initiator bits are off, the SPP is invalid.
409 */
410 fcp_parm = ntohl(rspp->spp_params);
411 if (!(fcp_parm & (FCP_SPPF_INIT_FCN | FCP_SPPF_TARG_FCN)))
412 return FC_SPP_RESP_INVL;
413
414 /*
415 * Create session (image pair) only if requested by
416 * EST_IMG_PAIR flag and if the requestor is an initiator.
417 */
418 if (rspp->spp_flags & FC_SPP_EST_IMG_PAIR) {
419 spp->spp_flags |= FC_SPP_EST_IMG_PAIR;
420 if (!(fcp_parm & FCP_SPPF_INIT_FCN))
421 return FC_SPP_RESP_CONF;
422 sess = ft_sess_create(tport, rdata->ids.port_id, acl);
423 if (!sess)
424 return FC_SPP_RESP_RES;
425 if (!sess->params)
426 rdata->prli_count++;
427 sess->params = fcp_parm;
428 sess->port_name = rdata->ids.port_name;
429 sess->max_frame = rdata->maxframe_size;
430
431 /* XXX TBD - clearing actions. unit attn, see 4.10 */
432 }
433
434 /*
435 * OR in our service parameters with other provider (initiator), if any.
436 * TBD XXX - indicate RETRY capability?
437 */
438fill:
439 fcp_parm = ntohl(spp->spp_params);
440 spp->spp_params = htonl(fcp_parm | FCP_SPPF_TARG_FCN);
441 return FC_SPP_RESP_ACK;
442}
443
444/**
445 * tcm_fcp_prli() - Handle incoming or outgoing PRLI for the FCP target
446 * @rdata: remote port private
447 * @spp_len: service parameter page length
448 * @rspp: received service parameter page (NULL for outgoing PRLI)
449 * @spp: response service parameter page
450 *
451 * Returns spp response code.
452 */
453static int ft_prli(struct fc_rport_priv *rdata, u32 spp_len,
454 const struct fc_els_spp *rspp, struct fc_els_spp *spp)
455{
456 int ret;
457
458 mutex_lock(&ft_lport_lock);
459 ret = ft_prli_locked(rdata, spp_len, rspp, spp);
460 mutex_unlock(&ft_lport_lock);
461 FT_SESS_DBG("port_id %x flags %x ret %x\n",
462 rdata->ids.port_id, rspp ? rspp->spp_flags : 0, ret);
463 return ret;
464}
465
466static void ft_sess_rcu_free(struct rcu_head *rcu)
467{
468 struct ft_sess *sess = container_of(rcu, struct ft_sess, rcu);
469
470 transport_deregister_session(sess->se_sess);
471 kfree(sess);
472}
473
474static void ft_sess_free(struct kref *kref)
475{
476 struct ft_sess *sess = container_of(kref, struct ft_sess, kref);
477
478 call_rcu(&sess->rcu, ft_sess_rcu_free);
479}
480
481void ft_sess_put(struct ft_sess *sess)
482{
483 int sess_held = atomic_read(&sess->kref.refcount);
484
485 BUG_ON(!sess_held);
486 kref_put(&sess->kref, ft_sess_free);
487}
488
489static void ft_prlo(struct fc_rport_priv *rdata)
490{
491 struct ft_sess *sess;
492 struct ft_tport *tport;
493
494 mutex_lock(&ft_lport_lock);
495 tport = rcu_dereference(rdata->local_port->prov[FC_TYPE_FCP]);
496 if (!tport) {
497 mutex_unlock(&ft_lport_lock);
498 return;
499 }
500 sess = ft_sess_delete(tport, rdata->ids.port_id);
501 if (!sess) {
502 mutex_unlock(&ft_lport_lock);
503 return;
504 }
505 mutex_unlock(&ft_lport_lock);
506 transport_deregister_session_configfs(sess->se_sess);
507 ft_sess_put(sess); /* release from table */
508 rdata->prli_count--;
509 /* XXX TBD - clearing actions. unit attn, see 4.10 */
510}
511
512/*
513 * Handle incoming FCP request.
514 * Caller has verified that the frame is type FCP.
515 */
516static void ft_recv(struct fc_lport *lport, struct fc_frame *fp)
517{
518 struct ft_sess *sess;
519 u32 sid = fc_frame_sid(fp);
520
521 FT_SESS_DBG("sid %x\n", sid);
522
523 sess = ft_sess_get(lport, sid);
524 if (!sess) {
525 FT_SESS_DBG("sid %x sess lookup failed\n", sid);
526 /* TBD XXX - if FCP_CMND, send PRLO */
527 fc_frame_free(fp);
528 return;
529 }
530 ft_recv_req(sess, fp); /* must do ft_sess_put() */
531}
532
533/*
534 * Provider ops for libfc.
535 */
536struct fc4_prov ft_prov = {
537 .prli = ft_prli,
538 .prlo = ft_prlo,
539 .recv = ft_recv,
540 .module = THIS_MODULE,
541};