diff options
author | Nicholas Bellinger <nab@linux-iscsi.org> | 2010-12-17 14:11:26 -0500 |
---|---|---|
committer | James Bottomley <James.Bottomley@suse.de> | 2011-01-14 11:12:29 -0500 |
commit | c66ac9db8d4ad9994a02b3e933ea2ccc643e1fe5 (patch) | |
tree | 71c6344688bf56ea6aaf18c586ab69ff4f077ade | |
parent | f4013c3879d1bbd9f3ab8351185decd049502368 (diff) |
[SCSI] target: Add LIO target core v4.0.0-rc6
LIO target is a full featured in-kernel target framework with the
following feature set:
High-performance, non-blocking, multithreaded architecture with SIMD
support.
Advanced SCSI feature set:
* Persistent Reservations (PRs)
* Asymmetric Logical Unit Assignment (ALUA)
* Protocol and intra-nexus multiplexing, load-balancing and failover (MC/S)
* Full Error Recovery (ERL=0,1,2)
* Active/active task migration and session continuation (ERL=2)
* Thin LUN provisioning (UNMAP and WRITE_SAMExx)
Multiprotocol target plugins
Storage media independence:
* Virtualization of all storage media; transparent mapping of IO to LUNs
* No hard limits on number of LUNs per Target; maximum LUN size ~750 TB
* Backstores: SATA, SAS, SCSI, BluRay, DVD, FLASH, USB, ramdisk, etc.
Standards compliance:
* Full compliance with IETF (RFC 3720)
* Full implementation of SPC-4 PRs and ALUA
Significant code cleanups done by Christoph Hellwig.
[jejb: fix up for new block bdev exclusive interface. Minor fixes from
Randy Dunlap and Dan Carpenter.]
Signed-off-by: Nicholas A. Bellinger <nab@linux-iscsi.org>
Signed-off-by: James Bottomley <James.Bottomley@suse.de>
44 files changed, 30521 insertions, 0 deletions
diff --git a/Documentation/target/tcm_mod_builder.py b/Documentation/target/tcm_mod_builder.py new file mode 100755 index 000000000000..dbeb8a0d7175 --- /dev/null +++ b/Documentation/target/tcm_mod_builder.py | |||
@@ -0,0 +1,1094 @@ | |||
1 | #!/usr/bin/python | ||
2 | # The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD | ||
3 | # | ||
4 | # Copyright (c) 2010 Rising Tide Systems | ||
5 | # Copyright (c) 2010 Linux-iSCSI.org | ||
6 | # | ||
7 | # Author: nab@kernel.org | ||
8 | # | ||
9 | import os, sys | ||
10 | import subprocess as sub | ||
11 | import string | ||
12 | import re | ||
13 | import optparse | ||
14 | |||
15 | tcm_dir = "" | ||
16 | |||
17 | fabric_ops = [] | ||
18 | fabric_mod_dir = "" | ||
19 | fabric_mod_port = "" | ||
20 | fabric_mod_init_port = "" | ||
21 | |||
22 | def tcm_mod_err(msg): | ||
23 | print msg | ||
24 | sys.exit(1) | ||
25 | |||
26 | def tcm_mod_create_module_subdir(fabric_mod_dir_var): | ||
27 | |||
28 | if os.path.isdir(fabric_mod_dir_var) == True: | ||
29 | return 1 | ||
30 | |||
31 | print "Creating fabric_mod_dir: " + fabric_mod_dir_var | ||
32 | ret = os.mkdir(fabric_mod_dir_var) | ||
33 | if ret: | ||
34 | tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var) | ||
35 | |||
36 | return | ||
37 | |||
38 | def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name): | ||
39 | global fabric_mod_port | ||
40 | global fabric_mod_init_port | ||
41 | buf = "" | ||
42 | |||
43 | f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h" | ||
44 | print "Writing file: " + f | ||
45 | |||
46 | p = open(f, 'w'); | ||
47 | if not p: | ||
48 | tcm_mod_err("Unable to open file: " + f) | ||
49 | |||
50 | buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n" | ||
51 | buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n" | ||
52 | buf += "\n" | ||
53 | buf += "struct " + fabric_mod_name + "_nacl {\n" | ||
54 | buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n" | ||
55 | buf += " u64 nport_wwpn;\n" | ||
56 | buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n" | ||
57 | buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" | ||
58 | buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n" | ||
59 | buf += " struct se_node_acl se_node_acl;\n" | ||
60 | buf += "};\n" | ||
61 | buf += "\n" | ||
62 | buf += "struct " + fabric_mod_name + "_tpg {\n" | ||
63 | buf += " /* FC lport target portal group tag for TCM */\n" | ||
64 | buf += " u16 lport_tpgt;\n" | ||
65 | buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n" | ||
66 | buf += " struct " + fabric_mod_name + "_lport *lport;\n" | ||
67 | buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n" | ||
68 | buf += " struct se_portal_group se_tpg;\n" | ||
69 | buf += "};\n" | ||
70 | buf += "\n" | ||
71 | buf += "struct " + fabric_mod_name + "_lport {\n" | ||
72 | buf += " /* SCSI protocol the lport is providing */\n" | ||
73 | buf += " u8 lport_proto_id;\n" | ||
74 | buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n" | ||
75 | buf += " u64 lport_wwpn;\n" | ||
76 | buf += " /* ASCII formatted WWPN for FC Target Lport */\n" | ||
77 | buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" | ||
78 | buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n" | ||
79 | buf += " struct se_wwn lport_wwn;\n" | ||
80 | buf += "};\n" | ||
81 | |||
82 | ret = p.write(buf) | ||
83 | if ret: | ||
84 | tcm_mod_err("Unable to write f: " + f) | ||
85 | |||
86 | p.close() | ||
87 | |||
88 | fabric_mod_port = "lport" | ||
89 | fabric_mod_init_port = "nport" | ||
90 | |||
91 | return | ||
92 | |||
93 | def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name): | ||
94 | global fabric_mod_port | ||
95 | global fabric_mod_init_port | ||
96 | buf = "" | ||
97 | |||
98 | f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h" | ||
99 | print "Writing file: " + f | ||
100 | |||
101 | p = open(f, 'w'); | ||
102 | if not p: | ||
103 | tcm_mod_err("Unable to open file: " + f) | ||
104 | |||
105 | buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n" | ||
106 | buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n" | ||
107 | buf += "\n" | ||
108 | buf += "struct " + fabric_mod_name + "_nacl {\n" | ||
109 | buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n" | ||
110 | buf += " u64 iport_wwpn;\n" | ||
111 | buf += " /* ASCII formatted WWPN for Sas Initiator port */\n" | ||
112 | buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" | ||
113 | buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n" | ||
114 | buf += " struct se_node_acl se_node_acl;\n" | ||
115 | buf += "};\n\n" | ||
116 | buf += "struct " + fabric_mod_name + "_tpg {\n" | ||
117 | buf += " /* SAS port target portal group tag for TCM */\n" | ||
118 | buf += " u16 tport_tpgt;\n" | ||
119 | buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n" | ||
120 | buf += " struct " + fabric_mod_name + "_tport *tport;\n" | ||
121 | buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n" | ||
122 | buf += " struct se_portal_group se_tpg;\n" | ||
123 | buf += "};\n\n" | ||
124 | buf += "struct " + fabric_mod_name + "_tport {\n" | ||
125 | buf += " /* SCSI protocol the tport is providing */\n" | ||
126 | buf += " u8 tport_proto_id;\n" | ||
127 | buf += " /* Binary World Wide unique Port Name for SAS Target port */\n" | ||
128 | buf += " u64 tport_wwpn;\n" | ||
129 | buf += " /* ASCII formatted WWPN for SAS Target port */\n" | ||
130 | buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" | ||
131 | buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n" | ||
132 | buf += " struct se_wwn tport_wwn;\n" | ||
133 | buf += "};\n" | ||
134 | |||
135 | ret = p.write(buf) | ||
136 | if ret: | ||
137 | tcm_mod_err("Unable to write f: " + f) | ||
138 | |||
139 | p.close() | ||
140 | |||
141 | fabric_mod_port = "tport" | ||
142 | fabric_mod_init_port = "iport" | ||
143 | |||
144 | return | ||
145 | |||
146 | def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name): | ||
147 | global fabric_mod_port | ||
148 | global fabric_mod_init_port | ||
149 | buf = "" | ||
150 | |||
151 | f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h" | ||
152 | print "Writing file: " + f | ||
153 | |||
154 | p = open(f, 'w'); | ||
155 | if not p: | ||
156 | tcm_mod_err("Unable to open file: " + f) | ||
157 | |||
158 | buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n" | ||
159 | buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n" | ||
160 | buf += "\n" | ||
161 | buf += "struct " + fabric_mod_name + "_nacl {\n" | ||
162 | buf += " /* ASCII formatted InitiatorName */\n" | ||
163 | buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" | ||
164 | buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n" | ||
165 | buf += " struct se_node_acl se_node_acl;\n" | ||
166 | buf += "};\n\n" | ||
167 | buf += "struct " + fabric_mod_name + "_tpg {\n" | ||
168 | buf += " /* iSCSI target portal group tag for TCM */\n" | ||
169 | buf += " u16 tport_tpgt;\n" | ||
170 | buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n" | ||
171 | buf += " struct " + fabric_mod_name + "_tport *tport;\n" | ||
172 | buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n" | ||
173 | buf += " struct se_portal_group se_tpg;\n" | ||
174 | buf += "};\n\n" | ||
175 | buf += "struct " + fabric_mod_name + "_tport {\n" | ||
176 | buf += " /* SCSI protocol the tport is providing */\n" | ||
177 | buf += " u8 tport_proto_id;\n" | ||
178 | buf += " /* ASCII formatted TargetName for IQN */\n" | ||
179 | buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" | ||
180 | buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n" | ||
181 | buf += " struct se_wwn tport_wwn;\n" | ||
182 | buf += "};\n" | ||
183 | |||
184 | ret = p.write(buf) | ||
185 | if ret: | ||
186 | tcm_mod_err("Unable to write f: " + f) | ||
187 | |||
188 | p.close() | ||
189 | |||
190 | fabric_mod_port = "tport" | ||
191 | fabric_mod_init_port = "iport" | ||
192 | |||
193 | return | ||
194 | |||
195 | def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name): | ||
196 | |||
197 | if proto_ident == "FC": | ||
198 | tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name) | ||
199 | elif proto_ident == "SAS": | ||
200 | tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name) | ||
201 | elif proto_ident == "iSCSI": | ||
202 | tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name) | ||
203 | else: | ||
204 | print "Unsupported proto_ident: " + proto_ident | ||
205 | sys.exit(1) | ||
206 | |||
207 | return | ||
208 | |||
209 | def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name): | ||
210 | buf = "" | ||
211 | |||
212 | f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c" | ||
213 | print "Writing file: " + f | ||
214 | |||
215 | p = open(f, 'w'); | ||
216 | if not p: | ||
217 | tcm_mod_err("Unable to open file: " + f) | ||
218 | |||
219 | buf = "#include <linux/module.h>\n" | ||
220 | buf += "#include <linux/moduleparam.h>\n" | ||
221 | buf += "#include <linux/version.h>\n" | ||
222 | buf += "#include <generated/utsrelease.h>\n" | ||
223 | buf += "#include <linux/utsname.h>\n" | ||
224 | buf += "#include <linux/init.h>\n" | ||
225 | buf += "#include <linux/slab.h>\n" | ||
226 | buf += "#include <linux/kthread.h>\n" | ||
227 | buf += "#include <linux/types.h>\n" | ||
228 | buf += "#include <linux/string.h>\n" | ||
229 | buf += "#include <linux/configfs.h>\n" | ||
230 | buf += "#include <linux/ctype.h>\n" | ||
231 | buf += "#include <asm/unaligned.h>\n\n" | ||
232 | buf += "#include <target/target_core_base.h>\n" | ||
233 | buf += "#include <target/target_core_transport.h>\n" | ||
234 | buf += "#include <target/target_core_fabric_ops.h>\n" | ||
235 | buf += "#include <target/target_core_fabric_configfs.h>\n" | ||
236 | buf += "#include <target/target_core_fabric_lib.h>\n" | ||
237 | buf += "#include <target/target_core_device.h>\n" | ||
238 | buf += "#include <target/target_core_tpg.h>\n" | ||
239 | buf += "#include <target/target_core_configfs.h>\n" | ||
240 | buf += "#include <target/target_core_base.h>\n" | ||
241 | buf += "#include <target/configfs_macros.h>\n\n" | ||
242 | buf += "#include <" + fabric_mod_name + "_base.h>\n" | ||
243 | buf += "#include <" + fabric_mod_name + "_fabric.h>\n\n" | ||
244 | |||
245 | buf += "/* Local pointer to allocated TCM configfs fabric module */\n" | ||
246 | buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n" | ||
247 | |||
248 | buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n" | ||
249 | buf += " struct se_portal_group *se_tpg,\n" | ||
250 | buf += " struct config_group *group,\n" | ||
251 | buf += " const char *name)\n" | ||
252 | buf += "{\n" | ||
253 | buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n" | ||
254 | buf += " struct " + fabric_mod_name + "_nacl *nacl;\n" | ||
255 | |||
256 | if proto_ident == "FC" or proto_ident == "SAS": | ||
257 | buf += " u64 wwpn = 0;\n" | ||
258 | |||
259 | buf += " u32 nexus_depth;\n\n" | ||
260 | buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n" | ||
261 | buf += " return ERR_PTR(-EINVAL); */\n" | ||
262 | buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n" | ||
263 | buf += " if (!(se_nacl_new))\n" | ||
264 | buf += " return ERR_PTR(-ENOMEM);\n" | ||
265 | buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n" | ||
266 | buf += " nexus_depth = 1;\n" | ||
267 | buf += " /*\n" | ||
268 | buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n" | ||
269 | buf += " * when converting a NodeACL from demo mode -> explict\n" | ||
270 | buf += " */\n" | ||
271 | buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n" | ||
272 | buf += " name, nexus_depth);\n" | ||
273 | buf += " if (IS_ERR(se_nacl)) {\n" | ||
274 | buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n" | ||
275 | buf += " return se_nacl;\n" | ||
276 | buf += " }\n" | ||
277 | buf += " /*\n" | ||
278 | buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n" | ||
279 | buf += " */\n" | ||
280 | buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n" | ||
281 | |||
282 | if proto_ident == "FC" or proto_ident == "SAS": | ||
283 | buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n" | ||
284 | |||
285 | buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n" | ||
286 | buf += " return se_nacl;\n" | ||
287 | buf += "}\n\n" | ||
288 | buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n" | ||
289 | buf += "{\n" | ||
290 | buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n" | ||
291 | buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n" | ||
292 | buf += " kfree(nacl);\n" | ||
293 | buf += "}\n\n" | ||
294 | |||
295 | buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n" | ||
296 | buf += " struct se_wwn *wwn,\n" | ||
297 | buf += " struct config_group *group,\n" | ||
298 | buf += " const char *name)\n" | ||
299 | buf += "{\n" | ||
300 | buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n" | ||
301 | buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n" | ||
302 | buf += " struct " + fabric_mod_name + "_tpg *tpg;\n" | ||
303 | buf += " unsigned long tpgt;\n" | ||
304 | buf += " int ret;\n\n" | ||
305 | buf += " if (strstr(name, \"tpgt_\") != name)\n" | ||
306 | buf += " return ERR_PTR(-EINVAL);\n" | ||
307 | buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n" | ||
308 | buf += " return ERR_PTR(-EINVAL);\n\n" | ||
309 | buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n" | ||
310 | buf += " if (!(tpg)) {\n" | ||
311 | buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n" | ||
312 | buf += " return ERR_PTR(-ENOMEM);\n" | ||
313 | buf += " }\n" | ||
314 | buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n" | ||
315 | buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n" | ||
316 | buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n" | ||
317 | buf += " &tpg->se_tpg, (void *)tpg,\n" | ||
318 | buf += " TRANSPORT_TPG_TYPE_NORMAL);\n" | ||
319 | buf += " if (ret < 0) {\n" | ||
320 | buf += " kfree(tpg);\n" | ||
321 | buf += " return NULL;\n" | ||
322 | buf += " }\n" | ||
323 | buf += " return &tpg->se_tpg;\n" | ||
324 | buf += "}\n\n" | ||
325 | buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n" | ||
326 | buf += "{\n" | ||
327 | buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" | ||
328 | buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n" | ||
329 | buf += " core_tpg_deregister(se_tpg);\n" | ||
330 | buf += " kfree(tpg);\n" | ||
331 | buf += "}\n\n" | ||
332 | |||
333 | buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n" | ||
334 | buf += " struct target_fabric_configfs *tf,\n" | ||
335 | buf += " struct config_group *group,\n" | ||
336 | buf += " const char *name)\n" | ||
337 | buf += "{\n" | ||
338 | buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n" | ||
339 | |||
340 | if proto_ident == "FC" or proto_ident == "SAS": | ||
341 | buf += " u64 wwpn = 0;\n\n" | ||
342 | |||
343 | buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n" | ||
344 | buf += " return ERR_PTR(-EINVAL); */\n\n" | ||
345 | buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n" | ||
346 | buf += " if (!(" + fabric_mod_port + ")) {\n" | ||
347 | buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n" | ||
348 | buf += " return ERR_PTR(-ENOMEM);\n" | ||
349 | buf += " }\n" | ||
350 | |||
351 | if proto_ident == "FC" or proto_ident == "SAS": | ||
352 | buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n" | ||
353 | |||
354 | buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "__NAMELEN, wwpn); */\n\n" | ||
355 | buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n" | ||
356 | buf += "}\n\n" | ||
357 | buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n" | ||
358 | buf += "{\n" | ||
359 | buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n" | ||
360 | buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n" | ||
361 | buf += " kfree(" + fabric_mod_port + ");\n" | ||
362 | buf += "}\n\n" | ||
363 | buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n" | ||
364 | buf += " struct target_fabric_configfs *tf,\n" | ||
365 | buf += " char *page)\n" | ||
366 | buf += "{\n" | ||
367 | buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n" | ||
368 | buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n" | ||
369 | buf += " utsname()->machine);\n" | ||
370 | buf += "}\n\n" | ||
371 | buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n" | ||
372 | buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n" | ||
373 | buf += " &" + fabric_mod_name + "_wwn_version.attr,\n" | ||
374 | buf += " NULL,\n" | ||
375 | buf += "};\n\n" | ||
376 | |||
377 | buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n" | ||
378 | buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n" | ||
379 | buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n" | ||
380 | buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n" | ||
381 | buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n" | ||
382 | buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n" | ||
383 | buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n" | ||
384 | buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n" | ||
385 | buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n" | ||
386 | buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n" | ||
387 | buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n" | ||
388 | buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n" | ||
389 | buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n" | ||
390 | buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n" | ||
391 | buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n" | ||
392 | buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n" | ||
393 | buf += " .release_cmd_to_pool = " + fabric_mod_name + "_release_cmd,\n" | ||
394 | buf += " .release_cmd_direct = " + fabric_mod_name + "_release_cmd,\n" | ||
395 | buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n" | ||
396 | buf += " .close_session = " + fabric_mod_name + "_close_session,\n" | ||
397 | buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n" | ||
398 | buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n" | ||
399 | buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n" | ||
400 | buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n" | ||
401 | buf += " .sess_get_initiator_sid = NULL,\n" | ||
402 | buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n" | ||
403 | buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n" | ||
404 | buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n" | ||
405 | buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n" | ||
406 | buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n" | ||
407 | buf += " .new_cmd_failure = " + fabric_mod_name + "_new_cmd_failure,\n" | ||
408 | buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n" | ||
409 | buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n" | ||
410 | buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n" | ||
411 | buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n" | ||
412 | buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n" | ||
413 | buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n" | ||
414 | buf += " .pack_lun = " + fabric_mod_name + "_pack_lun,\n" | ||
415 | buf += " /*\n" | ||
416 | buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n" | ||
417 | buf += " */\n" | ||
418 | buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n" | ||
419 | buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n" | ||
420 | buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n" | ||
421 | buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n" | ||
422 | buf += " .fabric_post_link = NULL,\n" | ||
423 | buf += " .fabric_pre_unlink = NULL,\n" | ||
424 | buf += " .fabric_make_np = NULL,\n" | ||
425 | buf += " .fabric_drop_np = NULL,\n" | ||
426 | buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n" | ||
427 | buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n" | ||
428 | buf += "};\n\n" | ||
429 | |||
430 | buf += "static int " + fabric_mod_name + "_register_configfs(void)\n" | ||
431 | buf += "{\n" | ||
432 | buf += " struct target_fabric_configfs *fabric;\n" | ||
433 | buf += " int ret;\n\n" | ||
434 | buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n" | ||
435 | buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n" | ||
436 | buf += " utsname()->machine);\n" | ||
437 | buf += " /*\n" | ||
438 | buf += " * Register the top level struct config_item_type with TCM core\n" | ||
439 | buf += " */\n" | ||
440 | buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n" | ||
441 | buf += " if (!(fabric)) {\n" | ||
442 | buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n" | ||
443 | buf += " return -ENOMEM;\n" | ||
444 | buf += " }\n" | ||
445 | buf += " /*\n" | ||
446 | buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n" | ||
447 | buf += " */\n" | ||
448 | buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n" | ||
449 | buf += " /*\n" | ||
450 | buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n" | ||
451 | buf += " */\n" | ||
452 | buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n" | ||
453 | buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n" | ||
454 | buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n" | ||
455 | buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n" | ||
456 | buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n" | ||
457 | buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n" | ||
458 | buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n" | ||
459 | buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n" | ||
460 | buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n" | ||
461 | buf += " /*\n" | ||
462 | buf += " * Register the fabric for use within TCM\n" | ||
463 | buf += " */\n" | ||
464 | buf += " ret = target_fabric_configfs_register(fabric);\n" | ||
465 | buf += " if (ret < 0) {\n" | ||
466 | buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n" | ||
467 | buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n" | ||
468 | buf += " return ret;\n" | ||
469 | buf += " }\n" | ||
470 | buf += " /*\n" | ||
471 | buf += " * Setup our local pointer to *fabric\n" | ||
472 | buf += " */\n" | ||
473 | buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n" | ||
474 | buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n" | ||
475 | buf += " return 0;\n" | ||
476 | buf += "};\n\n" | ||
477 | buf += "static void " + fabric_mod_name + "_deregister_configfs(void)\n" | ||
478 | buf += "{\n" | ||
479 | buf += " if (!(" + fabric_mod_name + "_fabric_configfs))\n" | ||
480 | buf += " return;\n\n" | ||
481 | buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n" | ||
482 | buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n" | ||
483 | buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n" | ||
484 | buf += "};\n\n" | ||
485 | |||
486 | buf += "static int __init " + fabric_mod_name + "_init(void)\n" | ||
487 | buf += "{\n" | ||
488 | buf += " int ret;\n\n" | ||
489 | buf += " ret = " + fabric_mod_name + "_register_configfs();\n" | ||
490 | buf += " if (ret < 0)\n" | ||
491 | buf += " return ret;\n\n" | ||
492 | buf += " return 0;\n" | ||
493 | buf += "};\n\n" | ||
494 | buf += "static void " + fabric_mod_name + "_exit(void)\n" | ||
495 | buf += "{\n" | ||
496 | buf += " " + fabric_mod_name + "_deregister_configfs();\n" | ||
497 | buf += "};\n\n" | ||
498 | |||
499 | buf += "#ifdef MODULE\n" | ||
500 | buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n" | ||
501 | buf += "MODULE_LICENSE(\"GPL\");\n" | ||
502 | buf += "module_init(" + fabric_mod_name + "_init);\n" | ||
503 | buf += "module_exit(" + fabric_mod_name + "_exit);\n" | ||
504 | buf += "#endif\n" | ||
505 | |||
506 | ret = p.write(buf) | ||
507 | if ret: | ||
508 | tcm_mod_err("Unable to write f: " + f) | ||
509 | |||
510 | p.close() | ||
511 | |||
512 | return | ||
513 | |||
514 | def tcm_mod_scan_fabric_ops(tcm_dir): | ||
515 | |||
516 | fabric_ops_api = tcm_dir + "include/target/target_core_fabric_ops.h" | ||
517 | |||
518 | print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api | ||
519 | process_fo = 0; | ||
520 | |||
521 | p = open(fabric_ops_api, 'r') | ||
522 | |||
523 | line = p.readline() | ||
524 | while line: | ||
525 | if process_fo == 0 and re.search('struct target_core_fabric_ops {', line): | ||
526 | line = p.readline() | ||
527 | continue | ||
528 | |||
529 | if process_fo == 0: | ||
530 | process_fo = 1; | ||
531 | line = p.readline() | ||
532 | # Search for function pointer | ||
533 | if not re.search('\(\*', line): | ||
534 | continue | ||
535 | |||
536 | fabric_ops.append(line.rstrip()) | ||
537 | continue | ||
538 | |||
539 | line = p.readline() | ||
540 | # Search for function pointer | ||
541 | if not re.search('\(\*', line): | ||
542 | continue | ||
543 | |||
544 | fabric_ops.append(line.rstrip()) | ||
545 | |||
546 | p.close() | ||
547 | return | ||
548 | |||
549 | def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name): | ||
550 | buf = "" | ||
551 | bufi = "" | ||
552 | |||
553 | f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c" | ||
554 | print "Writing file: " + f | ||
555 | |||
556 | p = open(f, 'w') | ||
557 | if not p: | ||
558 | tcm_mod_err("Unable to open file: " + f) | ||
559 | |||
560 | fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h" | ||
561 | print "Writing file: " + fi | ||
562 | |||
563 | pi = open(fi, 'w') | ||
564 | if not pi: | ||
565 | tcm_mod_err("Unable to open file: " + fi) | ||
566 | |||
567 | buf = "#include <linux/slab.h>\n" | ||
568 | buf += "#include <linux/kthread.h>\n" | ||
569 | buf += "#include <linux/types.h>\n" | ||
570 | buf += "#include <linux/list.h>\n" | ||
571 | buf += "#include <linux/types.h>\n" | ||
572 | buf += "#include <linux/string.h>\n" | ||
573 | buf += "#include <linux/ctype.h>\n" | ||
574 | buf += "#include <asm/unaligned.h>\n" | ||
575 | buf += "#include <scsi/scsi.h>\n" | ||
576 | buf += "#include <scsi/scsi_host.h>\n" | ||
577 | buf += "#include <scsi/scsi_device.h>\n" | ||
578 | buf += "#include <scsi/scsi_cmnd.h>\n" | ||
579 | buf += "#include <scsi/libfc.h>\n\n" | ||
580 | buf += "#include <target/target_core_base.h>\n" | ||
581 | buf += "#include <target/target_core_transport.h>\n" | ||
582 | buf += "#include <target/target_core_fabric_ops.h>\n" | ||
583 | buf += "#include <target/target_core_fabric_lib.h>\n" | ||
584 | buf += "#include <target/target_core_device.h>\n" | ||
585 | buf += "#include <target/target_core_tpg.h>\n" | ||
586 | buf += "#include <target/target_core_configfs.h>\n" | ||
587 | buf += "#include <" + fabric_mod_name + "_base.h>\n" | ||
588 | buf += "#include <" + fabric_mod_name + "_fabric.h>\n\n" | ||
589 | |||
590 | buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n" | ||
591 | buf += "{\n" | ||
592 | buf += " return 1;\n" | ||
593 | buf += "}\n\n" | ||
594 | bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n" | ||
595 | |||
596 | buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n" | ||
597 | buf += "{\n" | ||
598 | buf += " return 0;\n" | ||
599 | buf += "}\n\n" | ||
600 | bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n" | ||
601 | |||
602 | total_fabric_ops = len(fabric_ops) | ||
603 | i = 0 | ||
604 | |||
605 | while i < total_fabric_ops: | ||
606 | fo = fabric_ops[i] | ||
607 | i += 1 | ||
608 | # print "fabric_ops: " + fo | ||
609 | |||
610 | if re.search('get_fabric_name', fo): | ||
611 | buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n" | ||
612 | buf += "{\n" | ||
613 | buf += " return \"" + fabric_mod_name[4:] + "\";\n" | ||
614 | buf += "}\n\n" | ||
615 | bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n" | ||
616 | continue | ||
617 | |||
618 | if re.search('get_fabric_proto_ident', fo): | ||
619 | buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n" | ||
620 | buf += "{\n" | ||
621 | buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" | ||
622 | buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" | ||
623 | buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n" | ||
624 | buf += " u8 proto_id;\n\n" | ||
625 | buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n" | ||
626 | if proto_ident == "FC": | ||
627 | buf += " case SCSI_PROTOCOL_FCP:\n" | ||
628 | buf += " default:\n" | ||
629 | buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n" | ||
630 | buf += " break;\n" | ||
631 | elif proto_ident == "SAS": | ||
632 | buf += " case SCSI_PROTOCOL_SAS:\n" | ||
633 | buf += " default:\n" | ||
634 | buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n" | ||
635 | buf += " break;\n" | ||
636 | elif proto_ident == "iSCSI": | ||
637 | buf += " case SCSI_PROTOCOL_ISCSI:\n" | ||
638 | buf += " default:\n" | ||
639 | buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n" | ||
640 | buf += " break;\n" | ||
641 | |||
642 | buf += " }\n\n" | ||
643 | buf += " return proto_id;\n" | ||
644 | buf += "}\n\n" | ||
645 | bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n" | ||
646 | |||
647 | if re.search('get_wwn', fo): | ||
648 | buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n" | ||
649 | buf += "{\n" | ||
650 | buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" | ||
651 | buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" | ||
652 | buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n" | ||
653 | buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n" | ||
654 | buf += "}\n\n" | ||
655 | bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n" | ||
656 | |||
657 | if re.search('get_tag', fo): | ||
658 | buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n" | ||
659 | buf += "{\n" | ||
660 | buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" | ||
661 | buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" | ||
662 | buf += " return tpg->" + fabric_mod_port + "_tpgt;\n" | ||
663 | buf += "}\n\n" | ||
664 | bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n" | ||
665 | |||
666 | if re.search('get_default_depth', fo): | ||
667 | buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n" | ||
668 | buf += "{\n" | ||
669 | buf += " return 1;\n" | ||
670 | buf += "}\n\n" | ||
671 | bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n" | ||
672 | |||
673 | if re.search('get_pr_transport_id\)\(', fo): | ||
674 | buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n" | ||
675 | buf += " struct se_portal_group *se_tpg,\n" | ||
676 | buf += " struct se_node_acl *se_nacl,\n" | ||
677 | buf += " struct t10_pr_registration *pr_reg,\n" | ||
678 | buf += " int *format_code,\n" | ||
679 | buf += " unsigned char *buf)\n" | ||
680 | buf += "{\n" | ||
681 | buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" | ||
682 | buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" | ||
683 | buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n" | ||
684 | buf += " int ret = 0;\n\n" | ||
685 | buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n" | ||
686 | if proto_ident == "FC": | ||
687 | buf += " case SCSI_PROTOCOL_FCP:\n" | ||
688 | buf += " default:\n" | ||
689 | buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n" | ||
690 | buf += " format_code, buf);\n" | ||
691 | buf += " break;\n" | ||
692 | elif proto_ident == "SAS": | ||
693 | buf += " case SCSI_PROTOCOL_SAS:\n" | ||
694 | buf += " default:\n" | ||
695 | buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n" | ||
696 | buf += " format_code, buf);\n" | ||
697 | buf += " break;\n" | ||
698 | elif proto_ident == "iSCSI": | ||
699 | buf += " case SCSI_PROTOCOL_ISCSI:\n" | ||
700 | buf += " default:\n" | ||
701 | buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n" | ||
702 | buf += " format_code, buf);\n" | ||
703 | buf += " break;\n" | ||
704 | |||
705 | buf += " }\n\n" | ||
706 | buf += " return ret;\n" | ||
707 | buf += "}\n\n" | ||
708 | bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n" | ||
709 | bufi += " struct se_node_acl *, struct t10_pr_registration *,\n" | ||
710 | bufi += " int *, unsigned char *);\n" | ||
711 | |||
712 | if re.search('get_pr_transport_id_len\)\(', fo): | ||
713 | buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n" | ||
714 | buf += " struct se_portal_group *se_tpg,\n" | ||
715 | buf += " struct se_node_acl *se_nacl,\n" | ||
716 | buf += " struct t10_pr_registration *pr_reg,\n" | ||
717 | buf += " int *format_code)\n" | ||
718 | buf += "{\n" | ||
719 | buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" | ||
720 | buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" | ||
721 | buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n" | ||
722 | buf += " int ret = 0;\n\n" | ||
723 | buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n" | ||
724 | if proto_ident == "FC": | ||
725 | buf += " case SCSI_PROTOCOL_FCP:\n" | ||
726 | buf += " default:\n" | ||
727 | buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n" | ||
728 | buf += " format_code);\n" | ||
729 | buf += " break;\n" | ||
730 | elif proto_ident == "SAS": | ||
731 | buf += " case SCSI_PROTOCOL_SAS:\n" | ||
732 | buf += " default:\n" | ||
733 | buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n" | ||
734 | buf += " format_code);\n" | ||
735 | buf += " break;\n" | ||
736 | elif proto_ident == "iSCSI": | ||
737 | buf += " case SCSI_PROTOCOL_ISCSI:\n" | ||
738 | buf += " default:\n" | ||
739 | buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n" | ||
740 | buf += " format_code);\n" | ||
741 | buf += " break;\n" | ||
742 | |||
743 | |||
744 | buf += " }\n\n" | ||
745 | buf += " return ret;\n" | ||
746 | buf += "}\n\n" | ||
747 | bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n" | ||
748 | bufi += " struct se_node_acl *, struct t10_pr_registration *,\n" | ||
749 | bufi += " int *);\n" | ||
750 | |||
751 | if re.search('parse_pr_out_transport_id\)\(', fo): | ||
752 | buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n" | ||
753 | buf += " struct se_portal_group *se_tpg,\n" | ||
754 | buf += " const char *buf,\n" | ||
755 | buf += " u32 *out_tid_len,\n" | ||
756 | buf += " char **port_nexus_ptr)\n" | ||
757 | buf += "{\n" | ||
758 | buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" | ||
759 | buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" | ||
760 | buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n" | ||
761 | buf += " char *tid = NULL;\n\n" | ||
762 | buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n" | ||
763 | if proto_ident == "FC": | ||
764 | buf += " case SCSI_PROTOCOL_FCP:\n" | ||
765 | buf += " default:\n" | ||
766 | buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n" | ||
767 | buf += " port_nexus_ptr);\n" | ||
768 | elif proto_ident == "SAS": | ||
769 | buf += " case SCSI_PROTOCOL_SAS:\n" | ||
770 | buf += " default:\n" | ||
771 | buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n" | ||
772 | buf += " port_nexus_ptr);\n" | ||
773 | elif proto_ident == "iSCSI": | ||
774 | buf += " case SCSI_PROTOCOL_ISCSI:\n" | ||
775 | buf += " default:\n" | ||
776 | buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n" | ||
777 | buf += " port_nexus_ptr);\n" | ||
778 | |||
779 | buf += " }\n\n" | ||
780 | buf += " return tid;\n" | ||
781 | buf += "}\n\n" | ||
782 | bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n" | ||
783 | bufi += " const char *, u32 *, char **);\n" | ||
784 | |||
785 | if re.search('alloc_fabric_acl\)\(', fo): | ||
786 | buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n" | ||
787 | buf += "{\n" | ||
788 | buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n" | ||
789 | buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n" | ||
790 | buf += " if (!(nacl)) {\n" | ||
791 | buf += " printk(KERN_ERR \"Unable to alocate struct " + fabric_mod_name + "_nacl\\n\");\n" | ||
792 | buf += " return NULL;\n" | ||
793 | buf += " }\n\n" | ||
794 | buf += " return &nacl->se_node_acl;\n" | ||
795 | buf += "}\n\n" | ||
796 | bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n" | ||
797 | |||
798 | if re.search('release_fabric_acl\)\(', fo): | ||
799 | buf += "void " + fabric_mod_name + "_release_fabric_acl(\n" | ||
800 | buf += " struct se_portal_group *se_tpg,\n" | ||
801 | buf += " struct se_node_acl *se_nacl)\n" | ||
802 | buf += "{\n" | ||
803 | buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n" | ||
804 | buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n" | ||
805 | buf += " kfree(nacl);\n" | ||
806 | buf += "}\n\n" | ||
807 | bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n" | ||
808 | bufi += " struct se_node_acl *);\n" | ||
809 | |||
810 | if re.search('tpg_get_inst_index\)\(', fo): | ||
811 | buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n" | ||
812 | buf += "{\n" | ||
813 | buf += " return 1;\n" | ||
814 | buf += "}\n\n" | ||
815 | bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n" | ||
816 | |||
817 | if re.search('release_cmd_to_pool', fo): | ||
818 | buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n" | ||
819 | buf += "{\n" | ||
820 | buf += " return;\n" | ||
821 | buf += "}\n\n" | ||
822 | bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n" | ||
823 | |||
824 | if re.search('shutdown_session\)\(', fo): | ||
825 | buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n" | ||
826 | buf += "{\n" | ||
827 | buf += " return 0;\n" | ||
828 | buf += "}\n\n" | ||
829 | bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n" | ||
830 | |||
831 | if re.search('close_session\)\(', fo): | ||
832 | buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n" | ||
833 | buf += "{\n" | ||
834 | buf += " return;\n" | ||
835 | buf += "}\n\n" | ||
836 | bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n" | ||
837 | |||
838 | if re.search('stop_session\)\(', fo): | ||
839 | buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n" | ||
840 | buf += "{\n" | ||
841 | buf += " return;\n" | ||
842 | buf += "}\n\n" | ||
843 | bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n" | ||
844 | |||
845 | if re.search('fall_back_to_erl0\)\(', fo): | ||
846 | buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n" | ||
847 | buf += "{\n" | ||
848 | buf += " return;\n" | ||
849 | buf += "}\n\n" | ||
850 | bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n" | ||
851 | |||
852 | if re.search('sess_logged_in\)\(', fo): | ||
853 | buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n" | ||
854 | buf += "{\n" | ||
855 | buf += " return 0;\n" | ||
856 | buf += "}\n\n" | ||
857 | bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n" | ||
858 | |||
859 | if re.search('sess_get_index\)\(', fo): | ||
860 | buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n" | ||
861 | buf += "{\n" | ||
862 | buf += " return 0;\n" | ||
863 | buf += "}\n\n" | ||
864 | bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n" | ||
865 | |||
866 | if re.search('write_pending\)\(', fo): | ||
867 | buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n" | ||
868 | buf += "{\n" | ||
869 | buf += " return 0;\n" | ||
870 | buf += "}\n\n" | ||
871 | bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n" | ||
872 | |||
873 | if re.search('write_pending_status\)\(', fo): | ||
874 | buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n" | ||
875 | buf += "{\n" | ||
876 | buf += " return 0;\n" | ||
877 | buf += "}\n\n" | ||
878 | bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n" | ||
879 | |||
880 | if re.search('set_default_node_attributes\)\(', fo): | ||
881 | buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n" | ||
882 | buf += "{\n" | ||
883 | buf += " return;\n" | ||
884 | buf += "}\n\n" | ||
885 | bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n" | ||
886 | |||
887 | if re.search('get_task_tag\)\(', fo): | ||
888 | buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n" | ||
889 | buf += "{\n" | ||
890 | buf += " return 0;\n" | ||
891 | buf += "}\n\n" | ||
892 | bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n" | ||
893 | |||
894 | if re.search('get_cmd_state\)\(', fo): | ||
895 | buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n" | ||
896 | buf += "{\n" | ||
897 | buf += " return 0;\n" | ||
898 | buf += "}\n\n" | ||
899 | bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n" | ||
900 | |||
901 | if re.search('new_cmd_failure\)\(', fo): | ||
902 | buf += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *se_cmd)\n" | ||
903 | buf += "{\n" | ||
904 | buf += " return;\n" | ||
905 | buf += "}\n\n" | ||
906 | bufi += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *);\n" | ||
907 | |||
908 | if re.search('queue_data_in\)\(', fo): | ||
909 | buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n" | ||
910 | buf += "{\n" | ||
911 | buf += " return 0;\n" | ||
912 | buf += "}\n\n" | ||
913 | bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n" | ||
914 | |||
915 | if re.search('queue_status\)\(', fo): | ||
916 | buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n" | ||
917 | buf += "{\n" | ||
918 | buf += " return 0;\n" | ||
919 | buf += "}\n\n" | ||
920 | bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n" | ||
921 | |||
922 | if re.search('queue_tm_rsp\)\(', fo): | ||
923 | buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n" | ||
924 | buf += "{\n" | ||
925 | buf += " return 0;\n" | ||
926 | buf += "}\n\n" | ||
927 | bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n" | ||
928 | |||
929 | if re.search('get_fabric_sense_len\)\(', fo): | ||
930 | buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n" | ||
931 | buf += "{\n" | ||
932 | buf += " return 0;\n" | ||
933 | buf += "}\n\n" | ||
934 | bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n" | ||
935 | |||
936 | if re.search('set_fabric_sense_len\)\(', fo): | ||
937 | buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n" | ||
938 | buf += "{\n" | ||
939 | buf += " return 0;\n" | ||
940 | buf += "}\n\n" | ||
941 | bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n" | ||
942 | |||
943 | if re.search('is_state_remove\)\(', fo): | ||
944 | buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n" | ||
945 | buf += "{\n" | ||
946 | buf += " return 0;\n" | ||
947 | buf += "}\n\n" | ||
948 | bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n" | ||
949 | |||
950 | if re.search('pack_lun\)\(', fo): | ||
951 | buf += "u64 " + fabric_mod_name + "_pack_lun(unsigned int lun)\n" | ||
952 | buf += "{\n" | ||
953 | buf += " WARN_ON(lun >= 256);\n" | ||
954 | buf += " /* Caller wants this byte-swapped */\n" | ||
955 | buf += " return cpu_to_le64((lun & 0xff) << 8);\n" | ||
956 | buf += "}\n\n" | ||
957 | bufi += "u64 " + fabric_mod_name + "_pack_lun(unsigned int);\n" | ||
958 | |||
959 | |||
960 | ret = p.write(buf) | ||
961 | if ret: | ||
962 | tcm_mod_err("Unable to write f: " + f) | ||
963 | |||
964 | p.close() | ||
965 | |||
966 | ret = pi.write(bufi) | ||
967 | if ret: | ||
968 | tcm_mod_err("Unable to write fi: " + fi) | ||
969 | |||
970 | pi.close() | ||
971 | return | ||
972 | |||
973 | def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name): | ||
974 | |||
975 | buf = "" | ||
976 | f = fabric_mod_dir_var + "/Kbuild" | ||
977 | print "Writing file: " + f | ||
978 | |||
979 | p = open(f, 'w') | ||
980 | if not p: | ||
981 | tcm_mod_err("Unable to open file: " + f) | ||
982 | |||
983 | buf = "EXTRA_CFLAGS += -I$(srctree)/drivers/target/ -I$(srctree)/include/ -I$(srctree)/drivers/scsi/ -I$(srctree)/include/scsi/ -I$(srctree)/drivers/target/" + fabric_mod_name + "\n\n" | ||
984 | buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n" | ||
985 | buf += " " + fabric_mod_name + "_configfs.o\n" | ||
986 | buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n" | ||
987 | |||
988 | ret = p.write(buf) | ||
989 | if ret: | ||
990 | tcm_mod_err("Unable to write f: " + f) | ||
991 | |||
992 | p.close() | ||
993 | return | ||
994 | |||
995 | def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name): | ||
996 | |||
997 | buf = "" | ||
998 | f = fabric_mod_dir_var + "/Kconfig" | ||
999 | print "Writing file: " + f | ||
1000 | |||
1001 | p = open(f, 'w') | ||
1002 | if not p: | ||
1003 | tcm_mod_err("Unable to open file: " + f) | ||
1004 | |||
1005 | buf = "config " + fabric_mod_name.upper() + "\n" | ||
1006 | buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n" | ||
1007 | buf += " depends on TARGET_CORE && CONFIGFS_FS\n" | ||
1008 | buf += " default n\n" | ||
1009 | buf += " ---help---\n" | ||
1010 | buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n" | ||
1011 | |||
1012 | ret = p.write(buf) | ||
1013 | if ret: | ||
1014 | tcm_mod_err("Unable to write f: " + f) | ||
1015 | |||
1016 | p.close() | ||
1017 | return | ||
1018 | |||
1019 | def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name): | ||
1020 | buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n" | ||
1021 | kbuild = tcm_dir + "/drivers/target/Kbuild" | ||
1022 | |||
1023 | f = open(kbuild, 'a') | ||
1024 | f.write(buf) | ||
1025 | f.close() | ||
1026 | return | ||
1027 | |||
1028 | def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name): | ||
1029 | buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n" | ||
1030 | kconfig = tcm_dir + "/drivers/target/Kconfig" | ||
1031 | |||
1032 | f = open(kconfig, 'a') | ||
1033 | f.write(buf) | ||
1034 | f.close() | ||
1035 | return | ||
1036 | |||
1037 | def main(modname, proto_ident): | ||
1038 | # proto_ident = "FC" | ||
1039 | # proto_ident = "SAS" | ||
1040 | # proto_ident = "iSCSI" | ||
1041 | |||
1042 | tcm_dir = os.getcwd(); | ||
1043 | tcm_dir += "/../../" | ||
1044 | print "tcm_dir: " + tcm_dir | ||
1045 | fabric_mod_name = modname | ||
1046 | fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name | ||
1047 | print "Set fabric_mod_name: " + fabric_mod_name | ||
1048 | print "Set fabric_mod_dir: " + fabric_mod_dir | ||
1049 | print "Using proto_ident: " + proto_ident | ||
1050 | |||
1051 | if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI": | ||
1052 | print "Unsupported proto_ident: " + proto_ident | ||
1053 | sys.exit(1) | ||
1054 | |||
1055 | ret = tcm_mod_create_module_subdir(fabric_mod_dir) | ||
1056 | if ret: | ||
1057 | print "tcm_mod_create_module_subdir() failed because module already exists!" | ||
1058 | sys.exit(1) | ||
1059 | |||
1060 | tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name) | ||
1061 | tcm_mod_scan_fabric_ops(tcm_dir) | ||
1062 | tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name) | ||
1063 | tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name) | ||
1064 | tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name) | ||
1065 | tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name) | ||
1066 | |||
1067 | input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kbuild..? [yes,no]: ") | ||
1068 | if input == "yes" or input == "y": | ||
1069 | tcm_mod_add_kbuild(tcm_dir, fabric_mod_name) | ||
1070 | |||
1071 | input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ") | ||
1072 | if input == "yes" or input == "y": | ||
1073 | tcm_mod_add_kconfig(tcm_dir, fabric_mod_name) | ||
1074 | |||
1075 | return | ||
1076 | |||
1077 | parser = optparse.OptionParser() | ||
1078 | parser.add_option('-m', '--modulename', help='Module name', dest='modname', | ||
1079 | action='store', nargs=1, type='string') | ||
1080 | parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident', | ||
1081 | action='store', nargs=1, type='string') | ||
1082 | |||
1083 | (opts, args) = parser.parse_args() | ||
1084 | |||
1085 | mandatories = ['modname', 'protoident'] | ||
1086 | for m in mandatories: | ||
1087 | if not opts.__dict__[m]: | ||
1088 | print "mandatory option is missing\n" | ||
1089 | parser.print_help() | ||
1090 | exit(-1) | ||
1091 | |||
1092 | if __name__ == "__main__": | ||
1093 | |||
1094 | main(str(opts.modname), opts.protoident) | ||
diff --git a/Documentation/target/tcm_mod_builder.txt b/Documentation/target/tcm_mod_builder.txt new file mode 100644 index 000000000000..84533d8e747f --- /dev/null +++ b/Documentation/target/tcm_mod_builder.txt | |||
@@ -0,0 +1,145 @@ | |||
1 | >>>>>>>>>> The TCM v4 fabric module script generator <<<<<<<<<< | ||
2 | |||
3 | Greetings all, | ||
4 | |||
5 | This document is intended to be a mini-HOWTO for using the tcm_mod_builder.py | ||
6 | script to generate a brand new functional TCM v4 fabric .ko module of your very own, | ||
7 | that once built can be immediately be loaded to start access the new TCM/ConfigFS | ||
8 | fabric skeleton, by simply using: | ||
9 | |||
10 | modprobe $TCM_NEW_MOD | ||
11 | mkdir -p /sys/kernel/config/target/$TCM_NEW_MOD | ||
12 | |||
13 | This script will create a new drivers/target/$TCM_NEW_MOD/, and will do the following | ||
14 | |||
15 | *) Generate new API callers for drivers/target/target_core_fabric_configs.c logic | ||
16 | ->make_nodeacl(), ->drop_nodeacl(), ->make_tpg(), ->drop_tpg() | ||
17 | ->make_wwn(), ->drop_wwn(). These are created into $TCM_NEW_MOD/$TCM_NEW_MOD_configfs.c | ||
18 | *) Generate basic infrastructure for loading/unloading LKMs and TCM/ConfigFS fabric module | ||
19 | using a skeleton struct target_core_fabric_ops API template. | ||
20 | *) Based on user defined T10 Proto_Ident for the new fabric module being built, | ||
21 | the TransportID / Initiator and Target WWPN related handlers for | ||
22 | SPC-3 persistent reservation are automatically generated in $TCM_NEW_MOD/$TCM_NEW_MOD_fabric.c | ||
23 | using drivers/target/target_core_fabric_lib.c logic. | ||
24 | *) NOP API calls for all other Data I/O path and fabric dependent attribute logic | ||
25 | in $TCM_NEW_MOD/$TCM_NEW_MOD_fabric.c | ||
26 | |||
27 | tcm_mod_builder.py depends upon the mandatory '-p $PROTO_IDENT' and '-m | ||
28 | $FABRIC_MOD_name' parameters, and actually running the script looks like: | ||
29 | |||
30 | target:/mnt/sdb/lio-core-2.6.git/Documentation/target# python tcm_mod_builder.py -p iSCSI -m tcm_nab5000 | ||
31 | tcm_dir: /mnt/sdb/lio-core-2.6.git/Documentation/target/../../ | ||
32 | Set fabric_mod_name: tcm_nab5000 | ||
33 | Set fabric_mod_dir: | ||
34 | /mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000 | ||
35 | Using proto_ident: iSCSI | ||
36 | Creating fabric_mod_dir: | ||
37 | /mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000 | ||
38 | Writing file: | ||
39 | /mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000/tcm_nab5000_base.h | ||
40 | Using tcm_mod_scan_fabric_ops: | ||
41 | /mnt/sdb/lio-core-2.6.git/Documentation/target/../../include/target/target_core_fabric_ops.h | ||
42 | Writing file: | ||
43 | /mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000/tcm_nab5000_fabric.c | ||
44 | Writing file: | ||
45 | /mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000/tcm_nab5000_fabric.h | ||
46 | Writing file: | ||
47 | /mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000/tcm_nab5000_configfs.c | ||
48 | Writing file: | ||
49 | /mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000/Kbuild | ||
50 | Writing file: | ||
51 | /mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000/Kconfig | ||
52 | Would you like to add tcm_nab5000to drivers/target/Kbuild..? [yes,no]: yes | ||
53 | Would you like to add tcm_nab5000to drivers/target/Kconfig..? [yes,no]: yes | ||
54 | |||
55 | At the end of tcm_mod_builder.py. the script will ask to add the following | ||
56 | line to drivers/target/Kbuild: | ||
57 | |||
58 | obj-$(CONFIG_TCM_NAB5000) += tcm_nab5000/ | ||
59 | |||
60 | and the same for drivers/target/Kconfig: | ||
61 | |||
62 | source "drivers/target/tcm_nab5000/Kconfig" | ||
63 | |||
64 | *) Run 'make menuconfig' and select the new CONFIG_TCM_NAB5000 item: | ||
65 | |||
66 | <M> TCM_NAB5000 fabric module | ||
67 | |||
68 | *) Build using 'make modules', once completed you will have: | ||
69 | |||
70 | target:/mnt/sdb/lio-core-2.6.git# ls -la drivers/target/tcm_nab5000/ | ||
71 | total 1348 | ||
72 | drwxr-xr-x 2 root root 4096 2010-10-05 03:23 . | ||
73 | drwxr-xr-x 9 root root 4096 2010-10-05 03:22 .. | ||
74 | -rw-r--r-- 1 root root 282 2010-10-05 03:22 Kbuild | ||
75 | -rw-r--r-- 1 root root 171 2010-10-05 03:22 Kconfig | ||
76 | -rw-r--r-- 1 root root 49 2010-10-05 03:23 modules.order | ||
77 | -rw-r--r-- 1 root root 738 2010-10-05 03:22 tcm_nab5000_base.h | ||
78 | -rw-r--r-- 1 root root 9096 2010-10-05 03:22 tcm_nab5000_configfs.c | ||
79 | -rw-r--r-- 1 root root 191200 2010-10-05 03:23 tcm_nab5000_configfs.o | ||
80 | -rw-r--r-- 1 root root 40504 2010-10-05 03:23 .tcm_nab5000_configfs.o.cmd | ||
81 | -rw-r--r-- 1 root root 5414 2010-10-05 03:22 tcm_nab5000_fabric.c | ||
82 | -rw-r--r-- 1 root root 2016 2010-10-05 03:22 tcm_nab5000_fabric.h | ||
83 | -rw-r--r-- 1 root root 190932 2010-10-05 03:23 tcm_nab5000_fabric.o | ||
84 | -rw-r--r-- 1 root root 40713 2010-10-05 03:23 .tcm_nab5000_fabric.o.cmd | ||
85 | -rw-r--r-- 1 root root 401861 2010-10-05 03:23 tcm_nab5000.ko | ||
86 | -rw-r--r-- 1 root root 265 2010-10-05 03:23 .tcm_nab5000.ko.cmd | ||
87 | -rw-r--r-- 1 root root 459 2010-10-05 03:23 tcm_nab5000.mod.c | ||
88 | -rw-r--r-- 1 root root 23896 2010-10-05 03:23 tcm_nab5000.mod.o | ||
89 | -rw-r--r-- 1 root root 22655 2010-10-05 03:23 .tcm_nab5000.mod.o.cmd | ||
90 | -rw-r--r-- 1 root root 379022 2010-10-05 03:23 tcm_nab5000.o | ||
91 | -rw-r--r-- 1 root root 211 2010-10-05 03:23 .tcm_nab5000.o.cmd | ||
92 | |||
93 | *) Load the new module, create a lun_0 configfs group, and add new TCM Core | ||
94 | IBLOCK backstore symlink to port: | ||
95 | |||
96 | target:/mnt/sdb/lio-core-2.6.git# insmod drivers/target/tcm_nab5000.ko | ||
97 | target:/mnt/sdb/lio-core-2.6.git# mkdir -p /sys/kernel/config/target/nab5000/iqn.foo/tpgt_1/lun/lun_0 | ||
98 | target:/mnt/sdb/lio-core-2.6.git# cd /sys/kernel/config/target/nab5000/iqn.foo/tpgt_1/lun/lun_0/ | ||
99 | target:/sys/kernel/config/target/nab5000/iqn.foo/tpgt_1/lun/lun_0# ln -s /sys/kernel/config/target/core/iblock_0/lvm_test0 nab5000_port | ||
100 | |||
101 | target:/sys/kernel/config/target/nab5000/iqn.foo/tpgt_1/lun/lun_0# cd - | ||
102 | target:/mnt/sdb/lio-core-2.6.git# tree /sys/kernel/config/target/nab5000/ | ||
103 | /sys/kernel/config/target/nab5000/ | ||
104 | |-- discovery_auth | ||
105 | |-- iqn.foo | ||
106 | | `-- tpgt_1 | ||
107 | | |-- acls | ||
108 | | |-- attrib | ||
109 | | |-- lun | ||
110 | | | `-- lun_0 | ||
111 | | | |-- alua_tg_pt_gp | ||
112 | | | |-- alua_tg_pt_offline | ||
113 | | | |-- alua_tg_pt_status | ||
114 | | | |-- alua_tg_pt_write_md | ||
115 | | | `-- nab5000_port -> ../../../../../../target/core/iblock_0/lvm_test0 | ||
116 | | |-- np | ||
117 | | `-- param | ||
118 | `-- version | ||
119 | |||
120 | target:/mnt/sdb/lio-core-2.6.git# lsmod | ||
121 | Module Size Used by | ||
122 | tcm_nab5000 3935 4 | ||
123 | iscsi_target_mod 193211 0 | ||
124 | target_core_stgt 8090 0 | ||
125 | target_core_pscsi 11122 1 | ||
126 | target_core_file 9172 2 | ||
127 | target_core_iblock 9280 1 | ||
128 | target_core_mod 228575 31 | ||
129 | tcm_nab5000,iscsi_target_mod,target_core_stgt,target_core_pscsi,target_core_file,target_core_iblock | ||
130 | libfc 73681 0 | ||
131 | scsi_debug 56265 0 | ||
132 | scsi_tgt 8666 1 target_core_stgt | ||
133 | configfs 20644 2 target_core_mod | ||
134 | |||
135 | ---------------------------------------------------------------------- | ||
136 | |||
137 | Future TODO items: | ||
138 | |||
139 | *) Add more T10 proto_idents | ||
140 | *) Make tcm_mod_dump_fabric_ops() smarter and generate function pointer | ||
141 | defs directly from include/target/target_core_fabric_ops.h:struct target_core_fabric_ops | ||
142 | structure members. | ||
143 | |||
144 | October 5th, 2010 | ||
145 | Nicholas A. Bellinger <nab@linux-iscsi.org> | ||
diff --git a/drivers/Kconfig b/drivers/Kconfig index dd0a5b5e9bf3..9bfb71ff3a6a 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig | |||
@@ -26,6 +26,8 @@ source "drivers/ata/Kconfig" | |||
26 | 26 | ||
27 | source "drivers/md/Kconfig" | 27 | source "drivers/md/Kconfig" |
28 | 28 | ||
29 | source "drivers/target/Kconfig" | ||
30 | |||
29 | source "drivers/message/fusion/Kconfig" | 31 | source "drivers/message/fusion/Kconfig" |
30 | 32 | ||
31 | source "drivers/firewire/Kconfig" | 33 | source "drivers/firewire/Kconfig" |
diff --git a/drivers/Makefile b/drivers/Makefile index ef5132469f58..7eb35f479461 100644 --- a/drivers/Makefile +++ b/drivers/Makefile | |||
@@ -46,6 +46,7 @@ obj-y += macintosh/ | |||
46 | obj-$(CONFIG_IDE) += ide/ | 46 | obj-$(CONFIG_IDE) += ide/ |
47 | obj-$(CONFIG_SCSI) += scsi/ | 47 | obj-$(CONFIG_SCSI) += scsi/ |
48 | obj-$(CONFIG_ATA) += ata/ | 48 | obj-$(CONFIG_ATA) += ata/ |
49 | obj-$(CONFIG_TARGET_CORE) += target/ | ||
49 | obj-$(CONFIG_MTD) += mtd/ | 50 | obj-$(CONFIG_MTD) += mtd/ |
50 | obj-$(CONFIG_SPI) += spi/ | 51 | obj-$(CONFIG_SPI) += spi/ |
51 | obj-y += net/ | 52 | obj-y += net/ |
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig new file mode 100644 index 000000000000..2fac3be209ac --- /dev/null +++ b/drivers/target/Kconfig | |||
@@ -0,0 +1,32 @@ | |||
1 | |||
2 | menuconfig TARGET_CORE | ||
3 | tristate "Generic Target Core Mod (TCM) and ConfigFS Infrastructure" | ||
4 | depends on SCSI && BLOCK | ||
5 | select CONFIGFS_FS | ||
6 | default n | ||
7 | help | ||
8 | Say Y or M here to enable the TCM Storage Engine and ConfigFS enabled | ||
9 | control path for target_core_mod. This includes built-in TCM RAMDISK | ||
10 | subsystem logic for virtual LUN 0 access | ||
11 | |||
12 | if TARGET_CORE | ||
13 | |||
14 | config TCM_IBLOCK | ||
15 | tristate "TCM/IBLOCK Subsystem Plugin for Linux/BLOCK" | ||
16 | help | ||
17 | Say Y here to enable the TCM/IBLOCK subsystem plugin for non-buffered | ||
18 | access to Linux/Block devices using BIO | ||
19 | |||
20 | config TCM_FILEIO | ||
21 | tristate "TCM/FILEIO Subsystem Plugin for Linux/VFS" | ||
22 | help | ||
23 | Say Y here to enable the TCM/FILEIO subsystem plugin for buffered | ||
24 | access to Linux/VFS struct file or struct block_device | ||
25 | |||
26 | config TCM_PSCSI | ||
27 | tristate "TCM/pSCSI Subsystem Plugin for Linux/SCSI" | ||
28 | help | ||
29 | Say Y here to enable the TCM/pSCSI subsystem plugin for non-buffered | ||
30 | passthrough access to Linux/SCSI device | ||
31 | |||
32 | endif | ||
diff --git a/drivers/target/Makefile b/drivers/target/Makefile new file mode 100644 index 000000000000..5cfd70819f08 --- /dev/null +++ b/drivers/target/Makefile | |||
@@ -0,0 +1,24 @@ | |||
1 | EXTRA_CFLAGS += -I$(srctree)/drivers/target/ -I$(srctree)/drivers/scsi/ | ||
2 | |||
3 | target_core_mod-y := target_core_configfs.o \ | ||
4 | target_core_device.o \ | ||
5 | target_core_fabric_configfs.o \ | ||
6 | target_core_fabric_lib.o \ | ||
7 | target_core_hba.o \ | ||
8 | target_core_pr.o \ | ||
9 | target_core_alua.o \ | ||
10 | target_core_scdb.o \ | ||
11 | target_core_tmr.o \ | ||
12 | target_core_tpg.o \ | ||
13 | target_core_transport.o \ | ||
14 | target_core_cdb.o \ | ||
15 | target_core_ua.o \ | ||
16 | target_core_rd.o \ | ||
17 | target_core_mib.o | ||
18 | |||
19 | obj-$(CONFIG_TARGET_CORE) += target_core_mod.o | ||
20 | |||
21 | # Subsystem modules | ||
22 | obj-$(CONFIG_TCM_IBLOCK) += target_core_iblock.o | ||
23 | obj-$(CONFIG_TCM_FILEIO) += target_core_file.o | ||
24 | obj-$(CONFIG_TCM_PSCSI) += target_core_pscsi.o | ||
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c new file mode 100644 index 000000000000..2c5fcfed5934 --- /dev/null +++ b/drivers/target/target_core_alua.c | |||
@@ -0,0 +1,1991 @@ | |||
1 | /******************************************************************************* | ||
2 | * Filename: target_core_alua.c | ||
3 | * | ||
4 | * This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA) | ||
5 | * | ||
6 | * Copyright (c) 2009-2010 Rising Tide Systems | ||
7 | * Copyright (c) 2009-2010 Linux-iSCSI.org | ||
8 | * | ||
9 | * Nicholas A. Bellinger <nab@kernel.org> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License as published by | ||
13 | * the Free Software Foundation; either version 2 of the License, or | ||
14 | * (at your option) any later version. | ||
15 | * | ||
16 | * This program is distributed in the hope that it will be useful, | ||
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
19 | * GNU General Public License for more details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public License | ||
22 | * along with this program; if not, write to the Free Software | ||
23 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
24 | * | ||
25 | ******************************************************************************/ | ||
26 | |||
27 | #include <linux/version.h> | ||
28 | #include <linux/slab.h> | ||
29 | #include <linux/spinlock.h> | ||
30 | #include <linux/configfs.h> | ||
31 | #include <scsi/scsi.h> | ||
32 | #include <scsi/scsi_cmnd.h> | ||
33 | |||
34 | #include <target/target_core_base.h> | ||
35 | #include <target/target_core_device.h> | ||
36 | #include <target/target_core_transport.h> | ||
37 | #include <target/target_core_fabric_ops.h> | ||
38 | #include <target/target_core_configfs.h> | ||
39 | |||
40 | #include "target_core_alua.h" | ||
41 | #include "target_core_hba.h" | ||
42 | #include "target_core_ua.h" | ||
43 | |||
44 | static int core_alua_check_transition(int state, int *primary); | ||
45 | static int core_alua_set_tg_pt_secondary_state( | ||
46 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, | ||
47 | struct se_port *port, int explict, int offline); | ||
48 | |||
49 | /* | ||
50 | * REPORT_TARGET_PORT_GROUPS | ||
51 | * | ||
52 | * See spc4r17 section 6.27 | ||
53 | */ | ||
54 | int core_emulate_report_target_port_groups(struct se_cmd *cmd) | ||
55 | { | ||
56 | struct se_subsystem_dev *su_dev = SE_DEV(cmd)->se_sub_dev; | ||
57 | struct se_port *port; | ||
58 | struct t10_alua_tg_pt_gp *tg_pt_gp; | ||
59 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; | ||
60 | unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; | ||
61 | u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first | ||
62 | Target port group descriptor */ | ||
63 | |||
64 | spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | ||
65 | list_for_each_entry(tg_pt_gp, &T10_ALUA(su_dev)->tg_pt_gps_list, | ||
66 | tg_pt_gp_list) { | ||
67 | /* | ||
68 | * PREF: Preferred target port bit, determine if this | ||
69 | * bit should be set for port group. | ||
70 | */ | ||
71 | if (tg_pt_gp->tg_pt_gp_pref) | ||
72 | buf[off] = 0x80; | ||
73 | /* | ||
74 | * Set the ASYMMETRIC ACCESS State | ||
75 | */ | ||
76 | buf[off++] |= (atomic_read( | ||
77 | &tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff); | ||
78 | /* | ||
79 | * Set supported ASYMMETRIC ACCESS State bits | ||
80 | */ | ||
81 | buf[off] = 0x80; /* T_SUP */ | ||
82 | buf[off] |= 0x40; /* O_SUP */ | ||
83 | buf[off] |= 0x8; /* U_SUP */ | ||
84 | buf[off] |= 0x4; /* S_SUP */ | ||
85 | buf[off] |= 0x2; /* AN_SUP */ | ||
86 | buf[off++] |= 0x1; /* AO_SUP */ | ||
87 | /* | ||
88 | * TARGET PORT GROUP | ||
89 | */ | ||
90 | buf[off++] = ((tg_pt_gp->tg_pt_gp_id >> 8) & 0xff); | ||
91 | buf[off++] = (tg_pt_gp->tg_pt_gp_id & 0xff); | ||
92 | |||
93 | off++; /* Skip over Reserved */ | ||
94 | /* | ||
95 | * STATUS CODE | ||
96 | */ | ||
97 | buf[off++] = (tg_pt_gp->tg_pt_gp_alua_access_status & 0xff); | ||
98 | /* | ||
99 | * Vendor Specific field | ||
100 | */ | ||
101 | buf[off++] = 0x00; | ||
102 | /* | ||
103 | * TARGET PORT COUNT | ||
104 | */ | ||
105 | buf[off++] = (tg_pt_gp->tg_pt_gp_members & 0xff); | ||
106 | rd_len += 8; | ||
107 | |||
108 | spin_lock(&tg_pt_gp->tg_pt_gp_lock); | ||
109 | list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list, | ||
110 | tg_pt_gp_mem_list) { | ||
111 | port = tg_pt_gp_mem->tg_pt; | ||
112 | /* | ||
113 | * Start Target Port descriptor format | ||
114 | * | ||
115 | * See spc4r17 section 6.2.7 Table 247 | ||
116 | */ | ||
117 | off += 2; /* Skip over Obsolete */ | ||
118 | /* | ||
119 | * Set RELATIVE TARGET PORT IDENTIFIER | ||
120 | */ | ||
121 | buf[off++] = ((port->sep_rtpi >> 8) & 0xff); | ||
122 | buf[off++] = (port->sep_rtpi & 0xff); | ||
123 | rd_len += 4; | ||
124 | } | ||
125 | spin_unlock(&tg_pt_gp->tg_pt_gp_lock); | ||
126 | } | ||
127 | spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | ||
128 | /* | ||
129 | * Set the RETURN DATA LENGTH set in the header of the DataIN Payload | ||
130 | */ | ||
131 | buf[0] = ((rd_len >> 24) & 0xff); | ||
132 | buf[1] = ((rd_len >> 16) & 0xff); | ||
133 | buf[2] = ((rd_len >> 8) & 0xff); | ||
134 | buf[3] = (rd_len & 0xff); | ||
135 | |||
136 | return 0; | ||
137 | } | ||
138 | |||
139 | /* | ||
140 | * SET_TARGET_PORT_GROUPS for explict ALUA operation. | ||
141 | * | ||
142 | * See spc4r17 section 6.35 | ||
143 | */ | ||
144 | int core_emulate_set_target_port_groups(struct se_cmd *cmd) | ||
145 | { | ||
146 | struct se_device *dev = SE_DEV(cmd); | ||
147 | struct se_subsystem_dev *su_dev = SE_DEV(cmd)->se_sub_dev; | ||
148 | struct se_port *port, *l_port = SE_LUN(cmd)->lun_sep; | ||
149 | struct se_node_acl *nacl = SE_SESS(cmd)->se_node_acl; | ||
150 | struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp; | ||
151 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem; | ||
152 | unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; | ||
153 | unsigned char *ptr = &buf[4]; /* Skip over RESERVED area in header */ | ||
154 | u32 len = 4; /* Skip over RESERVED area in header */ | ||
155 | int alua_access_state, primary = 0, rc; | ||
156 | u16 tg_pt_id, rtpi; | ||
157 | |||
158 | if (!(l_port)) | ||
159 | return PYX_TRANSPORT_LU_COMM_FAILURE; | ||
160 | /* | ||
161 | * Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed | ||
162 | * for the local tg_pt_gp. | ||
163 | */ | ||
164 | l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem; | ||
165 | if (!(l_tg_pt_gp_mem)) { | ||
166 | printk(KERN_ERR "Unable to access l_port->sep_alua_tg_pt_gp_mem\n"); | ||
167 | return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | ||
168 | } | ||
169 | spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); | ||
170 | l_tg_pt_gp = l_tg_pt_gp_mem->tg_pt_gp; | ||
171 | if (!(l_tg_pt_gp)) { | ||
172 | spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); | ||
173 | printk(KERN_ERR "Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n"); | ||
174 | return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | ||
175 | } | ||
176 | rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA); | ||
177 | spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); | ||
178 | |||
179 | if (!(rc)) { | ||
180 | printk(KERN_INFO "Unable to process SET_TARGET_PORT_GROUPS" | ||
181 | " while TPGS_EXPLICT_ALUA is disabled\n"); | ||
182 | return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | ||
183 | } | ||
184 | |||
185 | while (len < cmd->data_length) { | ||
186 | alua_access_state = (ptr[0] & 0x0f); | ||
187 | /* | ||
188 | * Check the received ALUA access state, and determine if | ||
189 | * the state is a primary or secondary target port asymmetric | ||
190 | * access state. | ||
191 | */ | ||
192 | rc = core_alua_check_transition(alua_access_state, &primary); | ||
193 | if (rc != 0) { | ||
194 | /* | ||
195 | * If the SET TARGET PORT GROUPS attempts to establish | ||
196 | * an invalid combination of target port asymmetric | ||
197 | * access states or attempts to establish an | ||
198 | * unsupported target port asymmetric access state, | ||
199 | * then the command shall be terminated with CHECK | ||
200 | * CONDITION status, with the sense key set to ILLEGAL | ||
201 | * REQUEST, and the additional sense code set to INVALID | ||
202 | * FIELD IN PARAMETER LIST. | ||
203 | */ | ||
204 | return PYX_TRANSPORT_INVALID_PARAMETER_LIST; | ||
205 | } | ||
206 | rc = -1; | ||
207 | /* | ||
208 | * If the ASYMMETRIC ACCESS STATE field (see table 267) | ||
209 | * specifies a primary target port asymmetric access state, | ||
210 | * then the TARGET PORT GROUP OR TARGET PORT field specifies | ||
211 | * a primary target port group for which the primary target | ||
212 | * port asymmetric access state shall be changed. If the | ||
213 | * ASYMMETRIC ACCESS STATE field specifies a secondary target | ||
214 | * port asymmetric access state, then the TARGET PORT GROUP OR | ||
215 | * TARGET PORT field specifies the relative target port | ||
216 | * identifier (see 3.1.120) of the target port for which the | ||
217 | * secondary target port asymmetric access state shall be | ||
218 | * changed. | ||
219 | */ | ||
220 | if (primary) { | ||
221 | tg_pt_id = ((ptr[2] << 8) & 0xff); | ||
222 | tg_pt_id |= (ptr[3] & 0xff); | ||
223 | /* | ||
224 | * Locate the matching target port group ID from | ||
225 | * the global tg_pt_gp list | ||
226 | */ | ||
227 | spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | ||
228 | list_for_each_entry(tg_pt_gp, | ||
229 | &T10_ALUA(su_dev)->tg_pt_gps_list, | ||
230 | tg_pt_gp_list) { | ||
231 | if (!(tg_pt_gp->tg_pt_gp_valid_id)) | ||
232 | continue; | ||
233 | |||
234 | if (tg_pt_id != tg_pt_gp->tg_pt_gp_id) | ||
235 | continue; | ||
236 | |||
237 | atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); | ||
238 | smp_mb__after_atomic_inc(); | ||
239 | spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | ||
240 | |||
241 | rc = core_alua_do_port_transition(tg_pt_gp, | ||
242 | dev, l_port, nacl, | ||
243 | alua_access_state, 1); | ||
244 | |||
245 | spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | ||
246 | atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); | ||
247 | smp_mb__after_atomic_dec(); | ||
248 | break; | ||
249 | } | ||
250 | spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | ||
251 | /* | ||
252 | * If not matching target port group ID can be located | ||
253 | * throw an exception with ASCQ: INVALID_PARAMETER_LIST | ||
254 | */ | ||
255 | if (rc != 0) | ||
256 | return PYX_TRANSPORT_INVALID_PARAMETER_LIST; | ||
257 | } else { | ||
258 | /* | ||
259 | * Extact the RELATIVE TARGET PORT IDENTIFIER to identify | ||
260 | * the Target Port in question for the the incoming | ||
261 | * SET_TARGET_PORT_GROUPS op. | ||
262 | */ | ||
263 | rtpi = ((ptr[2] << 8) & 0xff); | ||
264 | rtpi |= (ptr[3] & 0xff); | ||
265 | /* | ||
266 | * Locate the matching relative target port identifer | ||
267 | * for the struct se_device storage object. | ||
268 | */ | ||
269 | spin_lock(&dev->se_port_lock); | ||
270 | list_for_each_entry(port, &dev->dev_sep_list, | ||
271 | sep_list) { | ||
272 | if (port->sep_rtpi != rtpi) | ||
273 | continue; | ||
274 | |||
275 | tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; | ||
276 | spin_unlock(&dev->se_port_lock); | ||
277 | |||
278 | rc = core_alua_set_tg_pt_secondary_state( | ||
279 | tg_pt_gp_mem, port, 1, 1); | ||
280 | |||
281 | spin_lock(&dev->se_port_lock); | ||
282 | break; | ||
283 | } | ||
284 | spin_unlock(&dev->se_port_lock); | ||
285 | /* | ||
286 | * If not matching relative target port identifier can | ||
287 | * be located, throw an exception with ASCQ: | ||
288 | * INVALID_PARAMETER_LIST | ||
289 | */ | ||
290 | if (rc != 0) | ||
291 | return PYX_TRANSPORT_INVALID_PARAMETER_LIST; | ||
292 | } | ||
293 | |||
294 | ptr += 4; | ||
295 | len += 4; | ||
296 | } | ||
297 | |||
298 | return 0; | ||
299 | } | ||
300 | |||
301 | static inline int core_alua_state_nonoptimized( | ||
302 | struct se_cmd *cmd, | ||
303 | unsigned char *cdb, | ||
304 | int nonop_delay_msecs, | ||
305 | u8 *alua_ascq) | ||
306 | { | ||
307 | /* | ||
308 | * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked | ||
309 | * later to determine if processing of this cmd needs to be | ||
310 | * temporarily delayed for the Active/NonOptimized primary access state. | ||
311 | */ | ||
312 | cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED; | ||
313 | cmd->alua_nonop_delay = nonop_delay_msecs; | ||
314 | return 0; | ||
315 | } | ||
316 | |||
317 | static inline int core_alua_state_standby( | ||
318 | struct se_cmd *cmd, | ||
319 | unsigned char *cdb, | ||
320 | u8 *alua_ascq) | ||
321 | { | ||
322 | /* | ||
323 | * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by | ||
324 | * spc4r17 section 5.9.2.4.4 | ||
325 | */ | ||
326 | switch (cdb[0]) { | ||
327 | case INQUIRY: | ||
328 | case LOG_SELECT: | ||
329 | case LOG_SENSE: | ||
330 | case MODE_SELECT: | ||
331 | case MODE_SENSE: | ||
332 | case REPORT_LUNS: | ||
333 | case RECEIVE_DIAGNOSTIC: | ||
334 | case SEND_DIAGNOSTIC: | ||
335 | case MAINTENANCE_IN: | ||
336 | switch (cdb[1]) { | ||
337 | case MI_REPORT_TARGET_PGS: | ||
338 | return 0; | ||
339 | default: | ||
340 | *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY; | ||
341 | return 1; | ||
342 | } | ||
343 | case MAINTENANCE_OUT: | ||
344 | switch (cdb[1]) { | ||
345 | case MO_SET_TARGET_PGS: | ||
346 | return 0; | ||
347 | default: | ||
348 | *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY; | ||
349 | return 1; | ||
350 | } | ||
351 | case REQUEST_SENSE: | ||
352 | case PERSISTENT_RESERVE_IN: | ||
353 | case PERSISTENT_RESERVE_OUT: | ||
354 | case READ_BUFFER: | ||
355 | case WRITE_BUFFER: | ||
356 | return 0; | ||
357 | default: | ||
358 | *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY; | ||
359 | return 1; | ||
360 | } | ||
361 | |||
362 | return 0; | ||
363 | } | ||
364 | |||
365 | static inline int core_alua_state_unavailable( | ||
366 | struct se_cmd *cmd, | ||
367 | unsigned char *cdb, | ||
368 | u8 *alua_ascq) | ||
369 | { | ||
370 | /* | ||
371 | * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by | ||
372 | * spc4r17 section 5.9.2.4.5 | ||
373 | */ | ||
374 | switch (cdb[0]) { | ||
375 | case INQUIRY: | ||
376 | case REPORT_LUNS: | ||
377 | case MAINTENANCE_IN: | ||
378 | switch (cdb[1]) { | ||
379 | case MI_REPORT_TARGET_PGS: | ||
380 | return 0; | ||
381 | default: | ||
382 | *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE; | ||
383 | return 1; | ||
384 | } | ||
385 | case MAINTENANCE_OUT: | ||
386 | switch (cdb[1]) { | ||
387 | case MO_SET_TARGET_PGS: | ||
388 | return 0; | ||
389 | default: | ||
390 | *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE; | ||
391 | return 1; | ||
392 | } | ||
393 | case REQUEST_SENSE: | ||
394 | case READ_BUFFER: | ||
395 | case WRITE_BUFFER: | ||
396 | return 0; | ||
397 | default: | ||
398 | *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE; | ||
399 | return 1; | ||
400 | } | ||
401 | |||
402 | return 0; | ||
403 | } | ||
404 | |||
405 | static inline int core_alua_state_transition( | ||
406 | struct se_cmd *cmd, | ||
407 | unsigned char *cdb, | ||
408 | u8 *alua_ascq) | ||
409 | { | ||
410 | /* | ||
411 | * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITIO as defined by | ||
412 | * spc4r17 section 5.9.2.5 | ||
413 | */ | ||
414 | switch (cdb[0]) { | ||
415 | case INQUIRY: | ||
416 | case REPORT_LUNS: | ||
417 | case MAINTENANCE_IN: | ||
418 | switch (cdb[1]) { | ||
419 | case MI_REPORT_TARGET_PGS: | ||
420 | return 0; | ||
421 | default: | ||
422 | *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION; | ||
423 | return 1; | ||
424 | } | ||
425 | case REQUEST_SENSE: | ||
426 | case READ_BUFFER: | ||
427 | case WRITE_BUFFER: | ||
428 | return 0; | ||
429 | default: | ||
430 | *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION; | ||
431 | return 1; | ||
432 | } | ||
433 | |||
434 | return 0; | ||
435 | } | ||
436 | |||
437 | /* | ||
438 | * Used for alua_type SPC_ALUA_PASSTHROUGH and SPC2_ALUA_DISABLED | ||
439 | * in transport_cmd_sequencer(). This function is assigned to | ||
440 | * struct t10_alua *->state_check() in core_setup_alua() | ||
441 | */ | ||
442 | static int core_alua_state_check_nop( | ||
443 | struct se_cmd *cmd, | ||
444 | unsigned char *cdb, | ||
445 | u8 *alua_ascq) | ||
446 | { | ||
447 | return 0; | ||
448 | } | ||
449 | |||
450 | /* | ||
451 | * Used for alua_type SPC3_ALUA_EMULATED in transport_cmd_sequencer(). | ||
452 | * This function is assigned to struct t10_alua *->state_check() in | ||
453 | * core_setup_alua() | ||
454 | * | ||
455 | * Also, this function can return three different return codes to | ||
456 | * signal transport_generic_cmd_sequencer() | ||
457 | * | ||
458 | * return 1: Is used to signal LUN not accecsable, and check condition/not ready | ||
459 | * return 0: Used to signal success | ||
460 | * reutrn -1: Used to signal failure, and invalid cdb field | ||
461 | */ | ||
462 | static int core_alua_state_check( | ||
463 | struct se_cmd *cmd, | ||
464 | unsigned char *cdb, | ||
465 | u8 *alua_ascq) | ||
466 | { | ||
467 | struct se_lun *lun = SE_LUN(cmd); | ||
468 | struct se_port *port = lun->lun_sep; | ||
469 | struct t10_alua_tg_pt_gp *tg_pt_gp; | ||
470 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; | ||
471 | int out_alua_state, nonop_delay_msecs; | ||
472 | |||
473 | if (!(port)) | ||
474 | return 0; | ||
475 | /* | ||
476 | * First, check for a struct se_port specific secondary ALUA target port | ||
477 | * access state: OFFLINE | ||
478 | */ | ||
479 | if (atomic_read(&port->sep_tg_pt_secondary_offline)) { | ||
480 | *alua_ascq = ASCQ_04H_ALUA_OFFLINE; | ||
481 | printk(KERN_INFO "ALUA: Got secondary offline status for local" | ||
482 | " target port\n"); | ||
483 | *alua_ascq = ASCQ_04H_ALUA_OFFLINE; | ||
484 | return 1; | ||
485 | } | ||
486 | /* | ||
487 | * Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the | ||
488 | * ALUA target port group, to obtain current ALUA access state. | ||
489 | * Otherwise look for the underlying struct se_device association with | ||
490 | * a ALUA logical unit group. | ||
491 | */ | ||
492 | tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; | ||
493 | spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | ||
494 | tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; | ||
495 | out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); | ||
496 | nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs; | ||
497 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | ||
498 | /* | ||
499 | * Process ALUA_ACCESS_STATE_ACTIVE_OPTMIZED in a seperate conditional | ||
500 | * statement so the complier knows explictly to check this case first. | ||
501 | * For the Optimized ALUA access state case, we want to process the | ||
502 | * incoming fabric cmd ASAP.. | ||
503 | */ | ||
504 | if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTMIZED) | ||
505 | return 0; | ||
506 | |||
507 | switch (out_alua_state) { | ||
508 | case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: | ||
509 | return core_alua_state_nonoptimized(cmd, cdb, | ||
510 | nonop_delay_msecs, alua_ascq); | ||
511 | case ALUA_ACCESS_STATE_STANDBY: | ||
512 | return core_alua_state_standby(cmd, cdb, alua_ascq); | ||
513 | case ALUA_ACCESS_STATE_UNAVAILABLE: | ||
514 | return core_alua_state_unavailable(cmd, cdb, alua_ascq); | ||
515 | case ALUA_ACCESS_STATE_TRANSITION: | ||
516 | return core_alua_state_transition(cmd, cdb, alua_ascq); | ||
517 | /* | ||
518 | * OFFLINE is a secondary ALUA target port group access state, that is | ||
519 | * handled above with struct se_port->sep_tg_pt_secondary_offline=1 | ||
520 | */ | ||
521 | case ALUA_ACCESS_STATE_OFFLINE: | ||
522 | default: | ||
523 | printk(KERN_ERR "Unknown ALUA access state: 0x%02x\n", | ||
524 | out_alua_state); | ||
525 | return -1; | ||
526 | } | ||
527 | |||
528 | return 0; | ||
529 | } | ||
530 | |||
531 | /* | ||
532 | * Check implict and explict ALUA state change request. | ||
533 | */ | ||
534 | static int core_alua_check_transition(int state, int *primary) | ||
535 | { | ||
536 | switch (state) { | ||
537 | case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED: | ||
538 | case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: | ||
539 | case ALUA_ACCESS_STATE_STANDBY: | ||
540 | case ALUA_ACCESS_STATE_UNAVAILABLE: | ||
541 | /* | ||
542 | * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are | ||
543 | * defined as primary target port asymmetric access states. | ||
544 | */ | ||
545 | *primary = 1; | ||
546 | break; | ||
547 | case ALUA_ACCESS_STATE_OFFLINE: | ||
548 | /* | ||
549 | * OFFLINE state is defined as a secondary target port | ||
550 | * asymmetric access state. | ||
551 | */ | ||
552 | *primary = 0; | ||
553 | break; | ||
554 | default: | ||
555 | printk(KERN_ERR "Unknown ALUA access state: 0x%02x\n", state); | ||
556 | return -1; | ||
557 | } | ||
558 | |||
559 | return 0; | ||
560 | } | ||
561 | |||
562 | static char *core_alua_dump_state(int state) | ||
563 | { | ||
564 | switch (state) { | ||
565 | case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED: | ||
566 | return "Active/Optimized"; | ||
567 | case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: | ||
568 | return "Active/NonOptimized"; | ||
569 | case ALUA_ACCESS_STATE_STANDBY: | ||
570 | return "Standby"; | ||
571 | case ALUA_ACCESS_STATE_UNAVAILABLE: | ||
572 | return "Unavailable"; | ||
573 | case ALUA_ACCESS_STATE_OFFLINE: | ||
574 | return "Offline"; | ||
575 | default: | ||
576 | return "Unknown"; | ||
577 | } | ||
578 | |||
579 | return NULL; | ||
580 | } | ||
581 | |||
582 | char *core_alua_dump_status(int status) | ||
583 | { | ||
584 | switch (status) { | ||
585 | case ALUA_STATUS_NONE: | ||
586 | return "None"; | ||
587 | case ALUA_STATUS_ALTERED_BY_EXPLICT_STPG: | ||
588 | return "Altered by Explict STPG"; | ||
589 | case ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA: | ||
590 | return "Altered by Implict ALUA"; | ||
591 | default: | ||
592 | return "Unknown"; | ||
593 | } | ||
594 | |||
595 | return NULL; | ||
596 | } | ||
597 | |||
598 | /* | ||
599 | * Used by fabric modules to determine when we need to delay processing | ||
600 | * for the Active/NonOptimized paths.. | ||
601 | */ | ||
602 | int core_alua_check_nonop_delay( | ||
603 | struct se_cmd *cmd) | ||
604 | { | ||
605 | if (!(cmd->se_cmd_flags & SCF_ALUA_NON_OPTIMIZED)) | ||
606 | return 0; | ||
607 | if (in_interrupt()) | ||
608 | return 0; | ||
609 | /* | ||
610 | * The ALUA Active/NonOptimized access state delay can be disabled | ||
611 | * in via configfs with a value of zero | ||
612 | */ | ||
613 | if (!(cmd->alua_nonop_delay)) | ||
614 | return 0; | ||
615 | /* | ||
616 | * struct se_cmd->alua_nonop_delay gets set by a target port group | ||
617 | * defined interval in core_alua_state_nonoptimized() | ||
618 | */ | ||
619 | msleep_interruptible(cmd->alua_nonop_delay); | ||
620 | return 0; | ||
621 | } | ||
622 | EXPORT_SYMBOL(core_alua_check_nonop_delay); | ||
623 | |||
624 | /* | ||
625 | * Called with tg_pt_gp->tg_pt_gp_md_mutex or tg_pt_gp_mem->sep_tg_pt_md_mutex | ||
626 | * | ||
627 | */ | ||
628 | static int core_alua_write_tpg_metadata( | ||
629 | const char *path, | ||
630 | unsigned char *md_buf, | ||
631 | u32 md_buf_len) | ||
632 | { | ||
633 | mm_segment_t old_fs; | ||
634 | struct file *file; | ||
635 | struct iovec iov[1]; | ||
636 | int flags = O_RDWR | O_CREAT | O_TRUNC, ret; | ||
637 | |||
638 | memset(iov, 0, sizeof(struct iovec)); | ||
639 | |||
640 | file = filp_open(path, flags, 0600); | ||
641 | if (IS_ERR(file) || !file || !file->f_dentry) { | ||
642 | printk(KERN_ERR "filp_open(%s) for ALUA metadata failed\n", | ||
643 | path); | ||
644 | return -ENODEV; | ||
645 | } | ||
646 | |||
647 | iov[0].iov_base = &md_buf[0]; | ||
648 | iov[0].iov_len = md_buf_len; | ||
649 | |||
650 | old_fs = get_fs(); | ||
651 | set_fs(get_ds()); | ||
652 | ret = vfs_writev(file, &iov[0], 1, &file->f_pos); | ||
653 | set_fs(old_fs); | ||
654 | |||
655 | if (ret < 0) { | ||
656 | printk(KERN_ERR "Error writing ALUA metadata file: %s\n", path); | ||
657 | filp_close(file, NULL); | ||
658 | return -EIO; | ||
659 | } | ||
660 | filp_close(file, NULL); | ||
661 | |||
662 | return 0; | ||
663 | } | ||
664 | |||
665 | /* | ||
666 | * Called with tg_pt_gp->tg_pt_gp_md_mutex held | ||
667 | */ | ||
668 | static int core_alua_update_tpg_primary_metadata( | ||
669 | struct t10_alua_tg_pt_gp *tg_pt_gp, | ||
670 | int primary_state, | ||
671 | unsigned char *md_buf) | ||
672 | { | ||
673 | struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev; | ||
674 | struct t10_wwn *wwn = &su_dev->t10_wwn; | ||
675 | char path[ALUA_METADATA_PATH_LEN]; | ||
676 | int len; | ||
677 | |||
678 | memset(path, 0, ALUA_METADATA_PATH_LEN); | ||
679 | |||
680 | len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len, | ||
681 | "tg_pt_gp_id=%hu\n" | ||
682 | "alua_access_state=0x%02x\n" | ||
683 | "alua_access_status=0x%02x\n", | ||
684 | tg_pt_gp->tg_pt_gp_id, primary_state, | ||
685 | tg_pt_gp->tg_pt_gp_alua_access_status); | ||
686 | |||
687 | snprintf(path, ALUA_METADATA_PATH_LEN, | ||
688 | "/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0], | ||
689 | config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item)); | ||
690 | |||
691 | return core_alua_write_tpg_metadata(path, md_buf, len); | ||
692 | } | ||
693 | |||
694 | static int core_alua_do_transition_tg_pt( | ||
695 | struct t10_alua_tg_pt_gp *tg_pt_gp, | ||
696 | struct se_port *l_port, | ||
697 | struct se_node_acl *nacl, | ||
698 | unsigned char *md_buf, | ||
699 | int new_state, | ||
700 | int explict) | ||
701 | { | ||
702 | struct se_dev_entry *se_deve; | ||
703 | struct se_lun_acl *lacl; | ||
704 | struct se_port *port; | ||
705 | struct t10_alua_tg_pt_gp_member *mem; | ||
706 | int old_state = 0; | ||
707 | /* | ||
708 | * Save the old primary ALUA access state, and set the current state | ||
709 | * to ALUA_ACCESS_STATE_TRANSITION. | ||
710 | */ | ||
711 | old_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); | ||
712 | atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, | ||
713 | ALUA_ACCESS_STATE_TRANSITION); | ||
714 | tg_pt_gp->tg_pt_gp_alua_access_status = (explict) ? | ||
715 | ALUA_STATUS_ALTERED_BY_EXPLICT_STPG : | ||
716 | ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA; | ||
717 | /* | ||
718 | * Check for the optional ALUA primary state transition delay | ||
719 | */ | ||
720 | if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0) | ||
721 | msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs); | ||
722 | |||
723 | spin_lock(&tg_pt_gp->tg_pt_gp_lock); | ||
724 | list_for_each_entry(mem, &tg_pt_gp->tg_pt_gp_mem_list, | ||
725 | tg_pt_gp_mem_list) { | ||
726 | port = mem->tg_pt; | ||
727 | /* | ||
728 | * After an implicit target port asymmetric access state | ||
729 | * change, a device server shall establish a unit attention | ||
730 | * condition for the initiator port associated with every I_T | ||
731 | * nexus with the additional sense code set to ASYMMETRIC | ||
732 | * ACCESS STATE CHAGED. | ||
733 | * | ||
734 | * After an explicit target port asymmetric access state | ||
735 | * change, a device server shall establish a unit attention | ||
736 | * condition with the additional sense code set to ASYMMETRIC | ||
737 | * ACCESS STATE CHANGED for the initiator port associated with | ||
738 | * every I_T nexus other than the I_T nexus on which the SET | ||
739 | * TARGET PORT GROUPS command | ||
740 | */ | ||
741 | atomic_inc(&mem->tg_pt_gp_mem_ref_cnt); | ||
742 | smp_mb__after_atomic_inc(); | ||
743 | spin_unlock(&tg_pt_gp->tg_pt_gp_lock); | ||
744 | |||
745 | spin_lock_bh(&port->sep_alua_lock); | ||
746 | list_for_each_entry(se_deve, &port->sep_alua_list, | ||
747 | alua_port_list) { | ||
748 | lacl = se_deve->se_lun_acl; | ||
749 | /* | ||
750 | * se_deve->se_lun_acl pointer may be NULL for a | ||
751 | * entry created without explict Node+MappedLUN ACLs | ||
752 | */ | ||
753 | if (!(lacl)) | ||
754 | continue; | ||
755 | |||
756 | if (explict && | ||
757 | (nacl != NULL) && (nacl == lacl->se_lun_nacl) && | ||
758 | (l_port != NULL) && (l_port == port)) | ||
759 | continue; | ||
760 | |||
761 | core_scsi3_ua_allocate(lacl->se_lun_nacl, | ||
762 | se_deve->mapped_lun, 0x2A, | ||
763 | ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED); | ||
764 | } | ||
765 | spin_unlock_bh(&port->sep_alua_lock); | ||
766 | |||
767 | spin_lock(&tg_pt_gp->tg_pt_gp_lock); | ||
768 | atomic_dec(&mem->tg_pt_gp_mem_ref_cnt); | ||
769 | smp_mb__after_atomic_dec(); | ||
770 | } | ||
771 | spin_unlock(&tg_pt_gp->tg_pt_gp_lock); | ||
772 | /* | ||
773 | * Update the ALUA metadata buf that has been allocated in | ||
774 | * core_alua_do_port_transition(), this metadata will be written | ||
775 | * to struct file. | ||
776 | * | ||
777 | * Note that there is the case where we do not want to update the | ||
778 | * metadata when the saved metadata is being parsed in userspace | ||
779 | * when setting the existing port access state and access status. | ||
780 | * | ||
781 | * Also note that the failure to write out the ALUA metadata to | ||
782 | * struct file does NOT affect the actual ALUA transition. | ||
783 | */ | ||
784 | if (tg_pt_gp->tg_pt_gp_write_metadata) { | ||
785 | mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex); | ||
786 | core_alua_update_tpg_primary_metadata(tg_pt_gp, | ||
787 | new_state, md_buf); | ||
788 | mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex); | ||
789 | } | ||
790 | /* | ||
791 | * Set the current primary ALUA access state to the requested new state | ||
792 | */ | ||
793 | atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state); | ||
794 | |||
795 | printk(KERN_INFO "Successful %s ALUA transition TG PT Group: %s ID: %hu" | ||
796 | " from primary access state %s to %s\n", (explict) ? "explict" : | ||
797 | "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), | ||
798 | tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state), | ||
799 | core_alua_dump_state(new_state)); | ||
800 | |||
801 | return 0; | ||
802 | } | ||
803 | |||
804 | int core_alua_do_port_transition( | ||
805 | struct t10_alua_tg_pt_gp *l_tg_pt_gp, | ||
806 | struct se_device *l_dev, | ||
807 | struct se_port *l_port, | ||
808 | struct se_node_acl *l_nacl, | ||
809 | int new_state, | ||
810 | int explict) | ||
811 | { | ||
812 | struct se_device *dev; | ||
813 | struct se_port *port; | ||
814 | struct se_subsystem_dev *su_dev; | ||
815 | struct se_node_acl *nacl; | ||
816 | struct t10_alua_lu_gp *lu_gp; | ||
817 | struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem; | ||
818 | struct t10_alua_tg_pt_gp *tg_pt_gp; | ||
819 | unsigned char *md_buf; | ||
820 | int primary; | ||
821 | |||
822 | if (core_alua_check_transition(new_state, &primary) != 0) | ||
823 | return -EINVAL; | ||
824 | |||
825 | md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL); | ||
826 | if (!(md_buf)) { | ||
827 | printk("Unable to allocate buf for ALUA metadata\n"); | ||
828 | return -ENOMEM; | ||
829 | } | ||
830 | |||
831 | local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem; | ||
832 | spin_lock(&local_lu_gp_mem->lu_gp_mem_lock); | ||
833 | lu_gp = local_lu_gp_mem->lu_gp; | ||
834 | atomic_inc(&lu_gp->lu_gp_ref_cnt); | ||
835 | smp_mb__after_atomic_inc(); | ||
836 | spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock); | ||
837 | /* | ||
838 | * For storage objects that are members of the 'default_lu_gp', | ||
839 | * we only do transition on the passed *l_tp_pt_gp, and not | ||
840 | * on all of the matching target port groups IDs in default_lu_gp. | ||
841 | */ | ||
842 | if (!(lu_gp->lu_gp_id)) { | ||
843 | /* | ||
844 | * core_alua_do_transition_tg_pt() will always return | ||
845 | * success. | ||
846 | */ | ||
847 | core_alua_do_transition_tg_pt(l_tg_pt_gp, l_port, l_nacl, | ||
848 | md_buf, new_state, explict); | ||
849 | atomic_dec(&lu_gp->lu_gp_ref_cnt); | ||
850 | smp_mb__after_atomic_dec(); | ||
851 | kfree(md_buf); | ||
852 | return 0; | ||
853 | } | ||
854 | /* | ||
855 | * For all other LU groups aside from 'default_lu_gp', walk all of | ||
856 | * the associated storage objects looking for a matching target port | ||
857 | * group ID from the local target port group. | ||
858 | */ | ||
859 | spin_lock(&lu_gp->lu_gp_lock); | ||
860 | list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, | ||
861 | lu_gp_mem_list) { | ||
862 | |||
863 | dev = lu_gp_mem->lu_gp_mem_dev; | ||
864 | su_dev = dev->se_sub_dev; | ||
865 | atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt); | ||
866 | smp_mb__after_atomic_inc(); | ||
867 | spin_unlock(&lu_gp->lu_gp_lock); | ||
868 | |||
869 | spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | ||
870 | list_for_each_entry(tg_pt_gp, | ||
871 | &T10_ALUA(su_dev)->tg_pt_gps_list, | ||
872 | tg_pt_gp_list) { | ||
873 | |||
874 | if (!(tg_pt_gp->tg_pt_gp_valid_id)) | ||
875 | continue; | ||
876 | /* | ||
877 | * If the target behavior port asymmetric access state | ||
878 | * is changed for any target port group accessiable via | ||
879 | * a logical unit within a LU group, the target port | ||
880 | * behavior group asymmetric access states for the same | ||
881 | * target port group accessible via other logical units | ||
882 | * in that LU group will also change. | ||
883 | */ | ||
884 | if (l_tg_pt_gp->tg_pt_gp_id != tg_pt_gp->tg_pt_gp_id) | ||
885 | continue; | ||
886 | |||
887 | if (l_tg_pt_gp == tg_pt_gp) { | ||
888 | port = l_port; | ||
889 | nacl = l_nacl; | ||
890 | } else { | ||
891 | port = NULL; | ||
892 | nacl = NULL; | ||
893 | } | ||
894 | atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); | ||
895 | smp_mb__after_atomic_inc(); | ||
896 | spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | ||
897 | /* | ||
898 | * core_alua_do_transition_tg_pt() will always return | ||
899 | * success. | ||
900 | */ | ||
901 | core_alua_do_transition_tg_pt(tg_pt_gp, port, | ||
902 | nacl, md_buf, new_state, explict); | ||
903 | |||
904 | spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | ||
905 | atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); | ||
906 | smp_mb__after_atomic_dec(); | ||
907 | } | ||
908 | spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | ||
909 | |||
910 | spin_lock(&lu_gp->lu_gp_lock); | ||
911 | atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt); | ||
912 | smp_mb__after_atomic_dec(); | ||
913 | } | ||
914 | spin_unlock(&lu_gp->lu_gp_lock); | ||
915 | |||
916 | printk(KERN_INFO "Successfully processed LU Group: %s all ALUA TG PT" | ||
917 | " Group IDs: %hu %s transition to primary state: %s\n", | ||
918 | config_item_name(&lu_gp->lu_gp_group.cg_item), | ||
919 | l_tg_pt_gp->tg_pt_gp_id, (explict) ? "explict" : "implict", | ||
920 | core_alua_dump_state(new_state)); | ||
921 | |||
922 | atomic_dec(&lu_gp->lu_gp_ref_cnt); | ||
923 | smp_mb__after_atomic_dec(); | ||
924 | kfree(md_buf); | ||
925 | return 0; | ||
926 | } | ||
927 | |||
928 | /* | ||
929 | * Called with tg_pt_gp_mem->sep_tg_pt_md_mutex held | ||
930 | */ | ||
931 | static int core_alua_update_tpg_secondary_metadata( | ||
932 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, | ||
933 | struct se_port *port, | ||
934 | unsigned char *md_buf, | ||
935 | u32 md_buf_len) | ||
936 | { | ||
937 | struct se_portal_group *se_tpg = port->sep_tpg; | ||
938 | char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN]; | ||
939 | int len; | ||
940 | |||
941 | memset(path, 0, ALUA_METADATA_PATH_LEN); | ||
942 | memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN); | ||
943 | |||
944 | len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s", | ||
945 | TPG_TFO(se_tpg)->tpg_get_wwn(se_tpg)); | ||
946 | |||
947 | if (TPG_TFO(se_tpg)->tpg_get_tag != NULL) | ||
948 | snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu", | ||
949 | TPG_TFO(se_tpg)->tpg_get_tag(se_tpg)); | ||
950 | |||
951 | len = snprintf(md_buf, md_buf_len, "alua_tg_pt_offline=%d\n" | ||
952 | "alua_tg_pt_status=0x%02x\n", | ||
953 | atomic_read(&port->sep_tg_pt_secondary_offline), | ||
954 | port->sep_tg_pt_secondary_stat); | ||
955 | |||
956 | snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%u", | ||
957 | TPG_TFO(se_tpg)->get_fabric_name(), wwn, | ||
958 | port->sep_lun->unpacked_lun); | ||
959 | |||
960 | return core_alua_write_tpg_metadata(path, md_buf, len); | ||
961 | } | ||
962 | |||
963 | static int core_alua_set_tg_pt_secondary_state( | ||
964 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, | ||
965 | struct se_port *port, | ||
966 | int explict, | ||
967 | int offline) | ||
968 | { | ||
969 | struct t10_alua_tg_pt_gp *tg_pt_gp; | ||
970 | unsigned char *md_buf; | ||
971 | u32 md_buf_len; | ||
972 | int trans_delay_msecs; | ||
973 | |||
974 | spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | ||
975 | tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; | ||
976 | if (!(tg_pt_gp)) { | ||
977 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | ||
978 | printk(KERN_ERR "Unable to complete secondary state" | ||
979 | " transition\n"); | ||
980 | return -1; | ||
981 | } | ||
982 | trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs; | ||
983 | /* | ||
984 | * Set the secondary ALUA target port access state to OFFLINE | ||
985 | * or release the previously secondary state for struct se_port | ||
986 | */ | ||
987 | if (offline) | ||
988 | atomic_set(&port->sep_tg_pt_secondary_offline, 1); | ||
989 | else | ||
990 | atomic_set(&port->sep_tg_pt_secondary_offline, 0); | ||
991 | |||
992 | md_buf_len = tg_pt_gp->tg_pt_gp_md_buf_len; | ||
993 | port->sep_tg_pt_secondary_stat = (explict) ? | ||
994 | ALUA_STATUS_ALTERED_BY_EXPLICT_STPG : | ||
995 | ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA; | ||
996 | |||
997 | printk(KERN_INFO "Successful %s ALUA transition TG PT Group: %s ID: %hu" | ||
998 | " to secondary access state: %s\n", (explict) ? "explict" : | ||
999 | "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), | ||
1000 | tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE"); | ||
1001 | |||
1002 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | ||
1003 | /* | ||
1004 | * Do the optional transition delay after we set the secondary | ||
1005 | * ALUA access state. | ||
1006 | */ | ||
1007 | if (trans_delay_msecs != 0) | ||
1008 | msleep_interruptible(trans_delay_msecs); | ||
1009 | /* | ||
1010 | * See if we need to update the ALUA fabric port metadata for | ||
1011 | * secondary state and status | ||
1012 | */ | ||
1013 | if (port->sep_tg_pt_secondary_write_md) { | ||
1014 | md_buf = kzalloc(md_buf_len, GFP_KERNEL); | ||
1015 | if (!(md_buf)) { | ||
1016 | printk(KERN_ERR "Unable to allocate md_buf for" | ||
1017 | " secondary ALUA access metadata\n"); | ||
1018 | return -1; | ||
1019 | } | ||
1020 | mutex_lock(&port->sep_tg_pt_md_mutex); | ||
1021 | core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port, | ||
1022 | md_buf, md_buf_len); | ||
1023 | mutex_unlock(&port->sep_tg_pt_md_mutex); | ||
1024 | |||
1025 | kfree(md_buf); | ||
1026 | } | ||
1027 | |||
1028 | return 0; | ||
1029 | } | ||
1030 | |||
1031 | struct t10_alua_lu_gp * | ||
1032 | core_alua_allocate_lu_gp(const char *name, int def_group) | ||
1033 | { | ||
1034 | struct t10_alua_lu_gp *lu_gp; | ||
1035 | |||
1036 | lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL); | ||
1037 | if (!(lu_gp)) { | ||
1038 | printk(KERN_ERR "Unable to allocate struct t10_alua_lu_gp\n"); | ||
1039 | return ERR_PTR(-ENOMEM);; | ||
1040 | } | ||
1041 | INIT_LIST_HEAD(&lu_gp->lu_gp_list); | ||
1042 | INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list); | ||
1043 | spin_lock_init(&lu_gp->lu_gp_lock); | ||
1044 | atomic_set(&lu_gp->lu_gp_ref_cnt, 0); | ||
1045 | |||
1046 | if (def_group) { | ||
1047 | lu_gp->lu_gp_id = se_global->alua_lu_gps_counter++;; | ||
1048 | lu_gp->lu_gp_valid_id = 1; | ||
1049 | se_global->alua_lu_gps_count++; | ||
1050 | } | ||
1051 | |||
1052 | return lu_gp; | ||
1053 | } | ||
1054 | |||
1055 | int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id) | ||
1056 | { | ||
1057 | struct t10_alua_lu_gp *lu_gp_tmp; | ||
1058 | u16 lu_gp_id_tmp; | ||
1059 | /* | ||
1060 | * The lu_gp->lu_gp_id may only be set once.. | ||
1061 | */ | ||
1062 | if (lu_gp->lu_gp_valid_id) { | ||
1063 | printk(KERN_WARNING "ALUA LU Group already has a valid ID," | ||
1064 | " ignoring request\n"); | ||
1065 | return -1; | ||
1066 | } | ||
1067 | |||
1068 | spin_lock(&se_global->lu_gps_lock); | ||
1069 | if (se_global->alua_lu_gps_count == 0x0000ffff) { | ||
1070 | printk(KERN_ERR "Maximum ALUA se_global->alua_lu_gps_count:" | ||
1071 | " 0x0000ffff reached\n"); | ||
1072 | spin_unlock(&se_global->lu_gps_lock); | ||
1073 | kmem_cache_free(t10_alua_lu_gp_cache, lu_gp); | ||
1074 | return -1; | ||
1075 | } | ||
1076 | again: | ||
1077 | lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id : | ||
1078 | se_global->alua_lu_gps_counter++; | ||
1079 | |||
1080 | list_for_each_entry(lu_gp_tmp, &se_global->g_lu_gps_list, lu_gp_list) { | ||
1081 | if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) { | ||
1082 | if (!(lu_gp_id)) | ||
1083 | goto again; | ||
1084 | |||
1085 | printk(KERN_WARNING "ALUA Logical Unit Group ID: %hu" | ||
1086 | " already exists, ignoring request\n", | ||
1087 | lu_gp_id); | ||
1088 | spin_unlock(&se_global->lu_gps_lock); | ||
1089 | return -1; | ||
1090 | } | ||
1091 | } | ||
1092 | |||
1093 | lu_gp->lu_gp_id = lu_gp_id_tmp; | ||
1094 | lu_gp->lu_gp_valid_id = 1; | ||
1095 | list_add_tail(&lu_gp->lu_gp_list, &se_global->g_lu_gps_list); | ||
1096 | se_global->alua_lu_gps_count++; | ||
1097 | spin_unlock(&se_global->lu_gps_lock); | ||
1098 | |||
1099 | return 0; | ||
1100 | } | ||
1101 | |||
1102 | static struct t10_alua_lu_gp_member * | ||
1103 | core_alua_allocate_lu_gp_mem(struct se_device *dev) | ||
1104 | { | ||
1105 | struct t10_alua_lu_gp_member *lu_gp_mem; | ||
1106 | |||
1107 | lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL); | ||
1108 | if (!(lu_gp_mem)) { | ||
1109 | printk(KERN_ERR "Unable to allocate struct t10_alua_lu_gp_member\n"); | ||
1110 | return ERR_PTR(-ENOMEM); | ||
1111 | } | ||
1112 | INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list); | ||
1113 | spin_lock_init(&lu_gp_mem->lu_gp_mem_lock); | ||
1114 | atomic_set(&lu_gp_mem->lu_gp_mem_ref_cnt, 0); | ||
1115 | |||
1116 | lu_gp_mem->lu_gp_mem_dev = dev; | ||
1117 | dev->dev_alua_lu_gp_mem = lu_gp_mem; | ||
1118 | |||
1119 | return lu_gp_mem; | ||
1120 | } | ||
1121 | |||
1122 | void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp) | ||
1123 | { | ||
1124 | struct t10_alua_lu_gp_member *lu_gp_mem, *lu_gp_mem_tmp; | ||
1125 | /* | ||
1126 | * Once we have reached this point, config_item_put() has | ||
1127 | * already been called from target_core_alua_drop_lu_gp(). | ||
1128 | * | ||
1129 | * Here, we remove the *lu_gp from the global list so that | ||
1130 | * no associations can be made while we are releasing | ||
1131 | * struct t10_alua_lu_gp. | ||
1132 | */ | ||
1133 | spin_lock(&se_global->lu_gps_lock); | ||
1134 | atomic_set(&lu_gp->lu_gp_shutdown, 1); | ||
1135 | list_del(&lu_gp->lu_gp_list); | ||
1136 | se_global->alua_lu_gps_count--; | ||
1137 | spin_unlock(&se_global->lu_gps_lock); | ||
1138 | /* | ||
1139 | * Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name() | ||
1140 | * in target_core_configfs.c:target_core_store_alua_lu_gp() to be | ||
1141 | * released with core_alua_put_lu_gp_from_name() | ||
1142 | */ | ||
1143 | while (atomic_read(&lu_gp->lu_gp_ref_cnt)) | ||
1144 | cpu_relax(); | ||
1145 | /* | ||
1146 | * Release reference to struct t10_alua_lu_gp * from all associated | ||
1147 | * struct se_device. | ||
1148 | */ | ||
1149 | spin_lock(&lu_gp->lu_gp_lock); | ||
1150 | list_for_each_entry_safe(lu_gp_mem, lu_gp_mem_tmp, | ||
1151 | &lu_gp->lu_gp_mem_list, lu_gp_mem_list) { | ||
1152 | if (lu_gp_mem->lu_gp_assoc) { | ||
1153 | list_del(&lu_gp_mem->lu_gp_mem_list); | ||
1154 | lu_gp->lu_gp_members--; | ||
1155 | lu_gp_mem->lu_gp_assoc = 0; | ||
1156 | } | ||
1157 | spin_unlock(&lu_gp->lu_gp_lock); | ||
1158 | /* | ||
1159 | * | ||
1160 | * lu_gp_mem is assoicated with a single | ||
1161 | * struct se_device->dev_alua_lu_gp_mem, and is released when | ||
1162 | * struct se_device is released via core_alua_free_lu_gp_mem(). | ||
1163 | * | ||
1164 | * If the passed lu_gp does NOT match the default_lu_gp, assume | ||
1165 | * we want to re-assocate a given lu_gp_mem with default_lu_gp. | ||
1166 | */ | ||
1167 | spin_lock(&lu_gp_mem->lu_gp_mem_lock); | ||
1168 | if (lu_gp != se_global->default_lu_gp) | ||
1169 | __core_alua_attach_lu_gp_mem(lu_gp_mem, | ||
1170 | se_global->default_lu_gp); | ||
1171 | else | ||
1172 | lu_gp_mem->lu_gp = NULL; | ||
1173 | spin_unlock(&lu_gp_mem->lu_gp_mem_lock); | ||
1174 | |||
1175 | spin_lock(&lu_gp->lu_gp_lock); | ||
1176 | } | ||
1177 | spin_unlock(&lu_gp->lu_gp_lock); | ||
1178 | |||
1179 | kmem_cache_free(t10_alua_lu_gp_cache, lu_gp); | ||
1180 | } | ||
1181 | |||
1182 | void core_alua_free_lu_gp_mem(struct se_device *dev) | ||
1183 | { | ||
1184 | struct se_subsystem_dev *su_dev = dev->se_sub_dev; | ||
1185 | struct t10_alua *alua = T10_ALUA(su_dev); | ||
1186 | struct t10_alua_lu_gp *lu_gp; | ||
1187 | struct t10_alua_lu_gp_member *lu_gp_mem; | ||
1188 | |||
1189 | if (alua->alua_type != SPC3_ALUA_EMULATED) | ||
1190 | return; | ||
1191 | |||
1192 | lu_gp_mem = dev->dev_alua_lu_gp_mem; | ||
1193 | if (!(lu_gp_mem)) | ||
1194 | return; | ||
1195 | |||
1196 | while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt)) | ||
1197 | cpu_relax(); | ||
1198 | |||
1199 | spin_lock(&lu_gp_mem->lu_gp_mem_lock); | ||
1200 | lu_gp = lu_gp_mem->lu_gp; | ||
1201 | if ((lu_gp)) { | ||
1202 | spin_lock(&lu_gp->lu_gp_lock); | ||
1203 | if (lu_gp_mem->lu_gp_assoc) { | ||
1204 | list_del(&lu_gp_mem->lu_gp_mem_list); | ||
1205 | lu_gp->lu_gp_members--; | ||
1206 | lu_gp_mem->lu_gp_assoc = 0; | ||
1207 | } | ||
1208 | spin_unlock(&lu_gp->lu_gp_lock); | ||
1209 | lu_gp_mem->lu_gp = NULL; | ||
1210 | } | ||
1211 | spin_unlock(&lu_gp_mem->lu_gp_mem_lock); | ||
1212 | |||
1213 | kmem_cache_free(t10_alua_lu_gp_mem_cache, lu_gp_mem); | ||
1214 | } | ||
1215 | |||
1216 | struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name) | ||
1217 | { | ||
1218 | struct t10_alua_lu_gp *lu_gp; | ||
1219 | struct config_item *ci; | ||
1220 | |||
1221 | spin_lock(&se_global->lu_gps_lock); | ||
1222 | list_for_each_entry(lu_gp, &se_global->g_lu_gps_list, lu_gp_list) { | ||
1223 | if (!(lu_gp->lu_gp_valid_id)) | ||
1224 | continue; | ||
1225 | ci = &lu_gp->lu_gp_group.cg_item; | ||
1226 | if (!(strcmp(config_item_name(ci), name))) { | ||
1227 | atomic_inc(&lu_gp->lu_gp_ref_cnt); | ||
1228 | spin_unlock(&se_global->lu_gps_lock); | ||
1229 | return lu_gp; | ||
1230 | } | ||
1231 | } | ||
1232 | spin_unlock(&se_global->lu_gps_lock); | ||
1233 | |||
1234 | return NULL; | ||
1235 | } | ||
1236 | |||
1237 | void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp) | ||
1238 | { | ||
1239 | spin_lock(&se_global->lu_gps_lock); | ||
1240 | atomic_dec(&lu_gp->lu_gp_ref_cnt); | ||
1241 | spin_unlock(&se_global->lu_gps_lock); | ||
1242 | } | ||
1243 | |||
1244 | /* | ||
1245 | * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock | ||
1246 | */ | ||
1247 | void __core_alua_attach_lu_gp_mem( | ||
1248 | struct t10_alua_lu_gp_member *lu_gp_mem, | ||
1249 | struct t10_alua_lu_gp *lu_gp) | ||
1250 | { | ||
1251 | spin_lock(&lu_gp->lu_gp_lock); | ||
1252 | lu_gp_mem->lu_gp = lu_gp; | ||
1253 | lu_gp_mem->lu_gp_assoc = 1; | ||
1254 | list_add_tail(&lu_gp_mem->lu_gp_mem_list, &lu_gp->lu_gp_mem_list); | ||
1255 | lu_gp->lu_gp_members++; | ||
1256 | spin_unlock(&lu_gp->lu_gp_lock); | ||
1257 | } | ||
1258 | |||
1259 | /* | ||
1260 | * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock | ||
1261 | */ | ||
1262 | void __core_alua_drop_lu_gp_mem( | ||
1263 | struct t10_alua_lu_gp_member *lu_gp_mem, | ||
1264 | struct t10_alua_lu_gp *lu_gp) | ||
1265 | { | ||
1266 | spin_lock(&lu_gp->lu_gp_lock); | ||
1267 | list_del(&lu_gp_mem->lu_gp_mem_list); | ||
1268 | lu_gp_mem->lu_gp = NULL; | ||
1269 | lu_gp_mem->lu_gp_assoc = 0; | ||
1270 | lu_gp->lu_gp_members--; | ||
1271 | spin_unlock(&lu_gp->lu_gp_lock); | ||
1272 | } | ||
1273 | |||
1274 | struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp( | ||
1275 | struct se_subsystem_dev *su_dev, | ||
1276 | const char *name, | ||
1277 | int def_group) | ||
1278 | { | ||
1279 | struct t10_alua_tg_pt_gp *tg_pt_gp; | ||
1280 | |||
1281 | tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL); | ||
1282 | if (!(tg_pt_gp)) { | ||
1283 | printk(KERN_ERR "Unable to allocate struct t10_alua_tg_pt_gp\n"); | ||
1284 | return NULL; | ||
1285 | } | ||
1286 | INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list); | ||
1287 | INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_mem_list); | ||
1288 | mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex); | ||
1289 | spin_lock_init(&tg_pt_gp->tg_pt_gp_lock); | ||
1290 | atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0); | ||
1291 | tg_pt_gp->tg_pt_gp_su_dev = su_dev; | ||
1292 | tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN; | ||
1293 | atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, | ||
1294 | ALUA_ACCESS_STATE_ACTIVE_OPTMIZED); | ||
1295 | /* | ||
1296 | * Enable both explict and implict ALUA support by default | ||
1297 | */ | ||
1298 | tg_pt_gp->tg_pt_gp_alua_access_type = | ||
1299 | TPGS_EXPLICT_ALUA | TPGS_IMPLICT_ALUA; | ||
1300 | /* | ||
1301 | * Set the default Active/NonOptimized Delay in milliseconds | ||
1302 | */ | ||
1303 | tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS; | ||
1304 | tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS; | ||
1305 | |||
1306 | if (def_group) { | ||
1307 | spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | ||
1308 | tg_pt_gp->tg_pt_gp_id = | ||
1309 | T10_ALUA(su_dev)->alua_tg_pt_gps_counter++; | ||
1310 | tg_pt_gp->tg_pt_gp_valid_id = 1; | ||
1311 | T10_ALUA(su_dev)->alua_tg_pt_gps_count++; | ||
1312 | list_add_tail(&tg_pt_gp->tg_pt_gp_list, | ||
1313 | &T10_ALUA(su_dev)->tg_pt_gps_list); | ||
1314 | spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | ||
1315 | } | ||
1316 | |||
1317 | return tg_pt_gp; | ||
1318 | } | ||
1319 | |||
1320 | int core_alua_set_tg_pt_gp_id( | ||
1321 | struct t10_alua_tg_pt_gp *tg_pt_gp, | ||
1322 | u16 tg_pt_gp_id) | ||
1323 | { | ||
1324 | struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev; | ||
1325 | struct t10_alua_tg_pt_gp *tg_pt_gp_tmp; | ||
1326 | u16 tg_pt_gp_id_tmp; | ||
1327 | /* | ||
1328 | * The tg_pt_gp->tg_pt_gp_id may only be set once.. | ||
1329 | */ | ||
1330 | if (tg_pt_gp->tg_pt_gp_valid_id) { | ||
1331 | printk(KERN_WARNING "ALUA TG PT Group already has a valid ID," | ||
1332 | " ignoring request\n"); | ||
1333 | return -1; | ||
1334 | } | ||
1335 | |||
1336 | spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | ||
1337 | if (T10_ALUA(su_dev)->alua_tg_pt_gps_count == 0x0000ffff) { | ||
1338 | printk(KERN_ERR "Maximum ALUA alua_tg_pt_gps_count:" | ||
1339 | " 0x0000ffff reached\n"); | ||
1340 | spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | ||
1341 | kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp); | ||
1342 | return -1; | ||
1343 | } | ||
1344 | again: | ||
1345 | tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id : | ||
1346 | T10_ALUA(su_dev)->alua_tg_pt_gps_counter++; | ||
1347 | |||
1348 | list_for_each_entry(tg_pt_gp_tmp, &T10_ALUA(su_dev)->tg_pt_gps_list, | ||
1349 | tg_pt_gp_list) { | ||
1350 | if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) { | ||
1351 | if (!(tg_pt_gp_id)) | ||
1352 | goto again; | ||
1353 | |||
1354 | printk(KERN_ERR "ALUA Target Port Group ID: %hu already" | ||
1355 | " exists, ignoring request\n", tg_pt_gp_id); | ||
1356 | spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | ||
1357 | return -1; | ||
1358 | } | ||
1359 | } | ||
1360 | |||
1361 | tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp; | ||
1362 | tg_pt_gp->tg_pt_gp_valid_id = 1; | ||
1363 | list_add_tail(&tg_pt_gp->tg_pt_gp_list, | ||
1364 | &T10_ALUA(su_dev)->tg_pt_gps_list); | ||
1365 | T10_ALUA(su_dev)->alua_tg_pt_gps_count++; | ||
1366 | spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | ||
1367 | |||
1368 | return 0; | ||
1369 | } | ||
1370 | |||
1371 | struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem( | ||
1372 | struct se_port *port) | ||
1373 | { | ||
1374 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; | ||
1375 | |||
1376 | tg_pt_gp_mem = kmem_cache_zalloc(t10_alua_tg_pt_gp_mem_cache, | ||
1377 | GFP_KERNEL); | ||
1378 | if (!(tg_pt_gp_mem)) { | ||
1379 | printk(KERN_ERR "Unable to allocate struct t10_alua_tg_pt_gp_member\n"); | ||
1380 | return ERR_PTR(-ENOMEM); | ||
1381 | } | ||
1382 | INIT_LIST_HEAD(&tg_pt_gp_mem->tg_pt_gp_mem_list); | ||
1383 | spin_lock_init(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | ||
1384 | atomic_set(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt, 0); | ||
1385 | |||
1386 | tg_pt_gp_mem->tg_pt = port; | ||
1387 | port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem; | ||
1388 | atomic_set(&port->sep_tg_pt_gp_active, 1); | ||
1389 | |||
1390 | return tg_pt_gp_mem; | ||
1391 | } | ||
1392 | |||
1393 | void core_alua_free_tg_pt_gp( | ||
1394 | struct t10_alua_tg_pt_gp *tg_pt_gp) | ||
1395 | { | ||
1396 | struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev; | ||
1397 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *tg_pt_gp_mem_tmp; | ||
1398 | /* | ||
1399 | * Once we have reached this point, config_item_put() has already | ||
1400 | * been called from target_core_alua_drop_tg_pt_gp(). | ||
1401 | * | ||
1402 | * Here we remove *tg_pt_gp from the global list so that | ||
1403 | * no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS | ||
1404 | * can be made while we are releasing struct t10_alua_tg_pt_gp. | ||
1405 | */ | ||
1406 | spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | ||
1407 | list_del(&tg_pt_gp->tg_pt_gp_list); | ||
1408 | T10_ALUA(su_dev)->alua_tg_pt_gps_counter--; | ||
1409 | spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | ||
1410 | /* | ||
1411 | * Allow a struct t10_alua_tg_pt_gp_member * referenced by | ||
1412 | * core_alua_get_tg_pt_gp_by_name() in | ||
1413 | * target_core_configfs.c:target_core_store_alua_tg_pt_gp() | ||
1414 | * to be released with core_alua_put_tg_pt_gp_from_name(). | ||
1415 | */ | ||
1416 | while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt)) | ||
1417 | cpu_relax(); | ||
1418 | /* | ||
1419 | * Release reference to struct t10_alua_tg_pt_gp from all associated | ||
1420 | * struct se_port. | ||
1421 | */ | ||
1422 | spin_lock(&tg_pt_gp->tg_pt_gp_lock); | ||
1423 | list_for_each_entry_safe(tg_pt_gp_mem, tg_pt_gp_mem_tmp, | ||
1424 | &tg_pt_gp->tg_pt_gp_mem_list, tg_pt_gp_mem_list) { | ||
1425 | if (tg_pt_gp_mem->tg_pt_gp_assoc) { | ||
1426 | list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list); | ||
1427 | tg_pt_gp->tg_pt_gp_members--; | ||
1428 | tg_pt_gp_mem->tg_pt_gp_assoc = 0; | ||
1429 | } | ||
1430 | spin_unlock(&tg_pt_gp->tg_pt_gp_lock); | ||
1431 | /* | ||
1432 | * tg_pt_gp_mem is assoicated with a single | ||
1433 | * se_port->sep_alua_tg_pt_gp_mem, and is released via | ||
1434 | * core_alua_free_tg_pt_gp_mem(). | ||
1435 | * | ||
1436 | * If the passed tg_pt_gp does NOT match the default_tg_pt_gp, | ||
1437 | * assume we want to re-assocate a given tg_pt_gp_mem with | ||
1438 | * default_tg_pt_gp. | ||
1439 | */ | ||
1440 | spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | ||
1441 | if (tg_pt_gp != T10_ALUA(su_dev)->default_tg_pt_gp) { | ||
1442 | __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, | ||
1443 | T10_ALUA(su_dev)->default_tg_pt_gp); | ||
1444 | } else | ||
1445 | tg_pt_gp_mem->tg_pt_gp = NULL; | ||
1446 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | ||
1447 | |||
1448 | spin_lock(&tg_pt_gp->tg_pt_gp_lock); | ||
1449 | } | ||
1450 | spin_unlock(&tg_pt_gp->tg_pt_gp_lock); | ||
1451 | |||
1452 | kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp); | ||
1453 | } | ||
1454 | |||
1455 | void core_alua_free_tg_pt_gp_mem(struct se_port *port) | ||
1456 | { | ||
1457 | struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev; | ||
1458 | struct t10_alua *alua = T10_ALUA(su_dev); | ||
1459 | struct t10_alua_tg_pt_gp *tg_pt_gp; | ||
1460 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; | ||
1461 | |||
1462 | if (alua->alua_type != SPC3_ALUA_EMULATED) | ||
1463 | return; | ||
1464 | |||
1465 | tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; | ||
1466 | if (!(tg_pt_gp_mem)) | ||
1467 | return; | ||
1468 | |||
1469 | while (atomic_read(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt)) | ||
1470 | cpu_relax(); | ||
1471 | |||
1472 | spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | ||
1473 | tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; | ||
1474 | if ((tg_pt_gp)) { | ||
1475 | spin_lock(&tg_pt_gp->tg_pt_gp_lock); | ||
1476 | if (tg_pt_gp_mem->tg_pt_gp_assoc) { | ||
1477 | list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list); | ||
1478 | tg_pt_gp->tg_pt_gp_members--; | ||
1479 | tg_pt_gp_mem->tg_pt_gp_assoc = 0; | ||
1480 | } | ||
1481 | spin_unlock(&tg_pt_gp->tg_pt_gp_lock); | ||
1482 | tg_pt_gp_mem->tg_pt_gp = NULL; | ||
1483 | } | ||
1484 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | ||
1485 | |||
1486 | kmem_cache_free(t10_alua_tg_pt_gp_mem_cache, tg_pt_gp_mem); | ||
1487 | } | ||
1488 | |||
1489 | static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name( | ||
1490 | struct se_subsystem_dev *su_dev, | ||
1491 | const char *name) | ||
1492 | { | ||
1493 | struct t10_alua_tg_pt_gp *tg_pt_gp; | ||
1494 | struct config_item *ci; | ||
1495 | |||
1496 | spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | ||
1497 | list_for_each_entry(tg_pt_gp, &T10_ALUA(su_dev)->tg_pt_gps_list, | ||
1498 | tg_pt_gp_list) { | ||
1499 | if (!(tg_pt_gp->tg_pt_gp_valid_id)) | ||
1500 | continue; | ||
1501 | ci = &tg_pt_gp->tg_pt_gp_group.cg_item; | ||
1502 | if (!(strcmp(config_item_name(ci), name))) { | ||
1503 | atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); | ||
1504 | spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | ||
1505 | return tg_pt_gp; | ||
1506 | } | ||
1507 | } | ||
1508 | spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | ||
1509 | |||
1510 | return NULL; | ||
1511 | } | ||
1512 | |||
1513 | static void core_alua_put_tg_pt_gp_from_name( | ||
1514 | struct t10_alua_tg_pt_gp *tg_pt_gp) | ||
1515 | { | ||
1516 | struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev; | ||
1517 | |||
1518 | spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | ||
1519 | atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); | ||
1520 | spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); | ||
1521 | } | ||
1522 | |||
1523 | /* | ||
1524 | * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held | ||
1525 | */ | ||
1526 | void __core_alua_attach_tg_pt_gp_mem( | ||
1527 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, | ||
1528 | struct t10_alua_tg_pt_gp *tg_pt_gp) | ||
1529 | { | ||
1530 | spin_lock(&tg_pt_gp->tg_pt_gp_lock); | ||
1531 | tg_pt_gp_mem->tg_pt_gp = tg_pt_gp; | ||
1532 | tg_pt_gp_mem->tg_pt_gp_assoc = 1; | ||
1533 | list_add_tail(&tg_pt_gp_mem->tg_pt_gp_mem_list, | ||
1534 | &tg_pt_gp->tg_pt_gp_mem_list); | ||
1535 | tg_pt_gp->tg_pt_gp_members++; | ||
1536 | spin_unlock(&tg_pt_gp->tg_pt_gp_lock); | ||
1537 | } | ||
1538 | |||
1539 | /* | ||
1540 | * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held | ||
1541 | */ | ||
1542 | static void __core_alua_drop_tg_pt_gp_mem( | ||
1543 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, | ||
1544 | struct t10_alua_tg_pt_gp *tg_pt_gp) | ||
1545 | { | ||
1546 | spin_lock(&tg_pt_gp->tg_pt_gp_lock); | ||
1547 | list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list); | ||
1548 | tg_pt_gp_mem->tg_pt_gp = NULL; | ||
1549 | tg_pt_gp_mem->tg_pt_gp_assoc = 0; | ||
1550 | tg_pt_gp->tg_pt_gp_members--; | ||
1551 | spin_unlock(&tg_pt_gp->tg_pt_gp_lock); | ||
1552 | } | ||
1553 | |||
1554 | ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page) | ||
1555 | { | ||
1556 | struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev; | ||
1557 | struct config_item *tg_pt_ci; | ||
1558 | struct t10_alua *alua = T10_ALUA(su_dev); | ||
1559 | struct t10_alua_tg_pt_gp *tg_pt_gp; | ||
1560 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; | ||
1561 | ssize_t len = 0; | ||
1562 | |||
1563 | if (alua->alua_type != SPC3_ALUA_EMULATED) | ||
1564 | return len; | ||
1565 | |||
1566 | tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; | ||
1567 | if (!(tg_pt_gp_mem)) | ||
1568 | return len; | ||
1569 | |||
1570 | spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | ||
1571 | tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; | ||
1572 | if ((tg_pt_gp)) { | ||
1573 | tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item; | ||
1574 | len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:" | ||
1575 | " %hu\nTG Port Primary Access State: %s\nTG Port " | ||
1576 | "Primary Access Status: %s\nTG Port Secondary Access" | ||
1577 | " State: %s\nTG Port Secondary Access Status: %s\n", | ||
1578 | config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id, | ||
1579 | core_alua_dump_state(atomic_read( | ||
1580 | &tg_pt_gp->tg_pt_gp_alua_access_state)), | ||
1581 | core_alua_dump_status( | ||
1582 | tg_pt_gp->tg_pt_gp_alua_access_status), | ||
1583 | (atomic_read(&port->sep_tg_pt_secondary_offline)) ? | ||
1584 | "Offline" : "None", | ||
1585 | core_alua_dump_status(port->sep_tg_pt_secondary_stat)); | ||
1586 | } | ||
1587 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | ||
1588 | |||
1589 | return len; | ||
1590 | } | ||
1591 | |||
1592 | ssize_t core_alua_store_tg_pt_gp_info( | ||
1593 | struct se_port *port, | ||
1594 | const char *page, | ||
1595 | size_t count) | ||
1596 | { | ||
1597 | struct se_portal_group *tpg; | ||
1598 | struct se_lun *lun; | ||
1599 | struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev; | ||
1600 | struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL; | ||
1601 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; | ||
1602 | unsigned char buf[TG_PT_GROUP_NAME_BUF]; | ||
1603 | int move = 0; | ||
1604 | |||
1605 | tpg = port->sep_tpg; | ||
1606 | lun = port->sep_lun; | ||
1607 | |||
1608 | if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED) { | ||
1609 | printk(KERN_WARNING "SPC3_ALUA_EMULATED not enabled for" | ||
1610 | " %s/tpgt_%hu/%s\n", TPG_TFO(tpg)->tpg_get_wwn(tpg), | ||
1611 | TPG_TFO(tpg)->tpg_get_tag(tpg), | ||
1612 | config_item_name(&lun->lun_group.cg_item)); | ||
1613 | return -EINVAL; | ||
1614 | } | ||
1615 | |||
1616 | if (count > TG_PT_GROUP_NAME_BUF) { | ||
1617 | printk(KERN_ERR "ALUA Target Port Group alias too large!\n"); | ||
1618 | return -EINVAL; | ||
1619 | } | ||
1620 | memset(buf, 0, TG_PT_GROUP_NAME_BUF); | ||
1621 | memcpy(buf, page, count); | ||
1622 | /* | ||
1623 | * Any ALUA target port group alias besides "NULL" means we will be | ||
1624 | * making a new group association. | ||
1625 | */ | ||
1626 | if (strcmp(strstrip(buf), "NULL")) { | ||
1627 | /* | ||
1628 | * core_alua_get_tg_pt_gp_by_name() will increment reference to | ||
1629 | * struct t10_alua_tg_pt_gp. This reference is released with | ||
1630 | * core_alua_put_tg_pt_gp_from_name() below. | ||
1631 | */ | ||
1632 | tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(su_dev, | ||
1633 | strstrip(buf)); | ||
1634 | if (!(tg_pt_gp_new)) | ||
1635 | return -ENODEV; | ||
1636 | } | ||
1637 | tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; | ||
1638 | if (!(tg_pt_gp_mem)) { | ||
1639 | if (tg_pt_gp_new) | ||
1640 | core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new); | ||
1641 | printk(KERN_ERR "NULL struct se_port->sep_alua_tg_pt_gp_mem pointer\n"); | ||
1642 | return -EINVAL; | ||
1643 | } | ||
1644 | |||
1645 | spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | ||
1646 | tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; | ||
1647 | if ((tg_pt_gp)) { | ||
1648 | /* | ||
1649 | * Clearing an existing tg_pt_gp association, and replacing | ||
1650 | * with the default_tg_pt_gp. | ||
1651 | */ | ||
1652 | if (!(tg_pt_gp_new)) { | ||
1653 | printk(KERN_INFO "Target_Core_ConfigFS: Moving" | ||
1654 | " %s/tpgt_%hu/%s from ALUA Target Port Group:" | ||
1655 | " alua/%s, ID: %hu back to" | ||
1656 | " default_tg_pt_gp\n", | ||
1657 | TPG_TFO(tpg)->tpg_get_wwn(tpg), | ||
1658 | TPG_TFO(tpg)->tpg_get_tag(tpg), | ||
1659 | config_item_name(&lun->lun_group.cg_item), | ||
1660 | config_item_name( | ||
1661 | &tg_pt_gp->tg_pt_gp_group.cg_item), | ||
1662 | tg_pt_gp->tg_pt_gp_id); | ||
1663 | |||
1664 | __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp); | ||
1665 | __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, | ||
1666 | T10_ALUA(su_dev)->default_tg_pt_gp); | ||
1667 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | ||
1668 | |||
1669 | return count; | ||
1670 | } | ||
1671 | /* | ||
1672 | * Removing existing association of tg_pt_gp_mem with tg_pt_gp | ||
1673 | */ | ||
1674 | __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp); | ||
1675 | move = 1; | ||
1676 | } | ||
1677 | /* | ||
1678 | * Associate tg_pt_gp_mem with tg_pt_gp_new. | ||
1679 | */ | ||
1680 | __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp_new); | ||
1681 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | ||
1682 | printk(KERN_INFO "Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA" | ||
1683 | " Target Port Group: alua/%s, ID: %hu\n", (move) ? | ||
1684 | "Moving" : "Adding", TPG_TFO(tpg)->tpg_get_wwn(tpg), | ||
1685 | TPG_TFO(tpg)->tpg_get_tag(tpg), | ||
1686 | config_item_name(&lun->lun_group.cg_item), | ||
1687 | config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item), | ||
1688 | tg_pt_gp_new->tg_pt_gp_id); | ||
1689 | |||
1690 | core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new); | ||
1691 | return count; | ||
1692 | } | ||
1693 | |||
1694 | ssize_t core_alua_show_access_type( | ||
1695 | struct t10_alua_tg_pt_gp *tg_pt_gp, | ||
1696 | char *page) | ||
1697 | { | ||
1698 | if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA) && | ||
1699 | (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) | ||
1700 | return sprintf(page, "Implict and Explict\n"); | ||
1701 | else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA) | ||
1702 | return sprintf(page, "Implict\n"); | ||
1703 | else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA) | ||
1704 | return sprintf(page, "Explict\n"); | ||
1705 | else | ||
1706 | return sprintf(page, "None\n"); | ||
1707 | } | ||
1708 | |||
1709 | ssize_t core_alua_store_access_type( | ||
1710 | struct t10_alua_tg_pt_gp *tg_pt_gp, | ||
1711 | const char *page, | ||
1712 | size_t count) | ||
1713 | { | ||
1714 | unsigned long tmp; | ||
1715 | int ret; | ||
1716 | |||
1717 | ret = strict_strtoul(page, 0, &tmp); | ||
1718 | if (ret < 0) { | ||
1719 | printk(KERN_ERR "Unable to extract alua_access_type\n"); | ||
1720 | return -EINVAL; | ||
1721 | } | ||
1722 | if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) { | ||
1723 | printk(KERN_ERR "Illegal value for alua_access_type:" | ||
1724 | " %lu\n", tmp); | ||
1725 | return -EINVAL; | ||
1726 | } | ||
1727 | if (tmp == 3) | ||
1728 | tg_pt_gp->tg_pt_gp_alua_access_type = | ||
1729 | TPGS_IMPLICT_ALUA | TPGS_EXPLICT_ALUA; | ||
1730 | else if (tmp == 2) | ||
1731 | tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICT_ALUA; | ||
1732 | else if (tmp == 1) | ||
1733 | tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICT_ALUA; | ||
1734 | else | ||
1735 | tg_pt_gp->tg_pt_gp_alua_access_type = 0; | ||
1736 | |||
1737 | return count; | ||
1738 | } | ||
1739 | |||
1740 | ssize_t core_alua_show_nonop_delay_msecs( | ||
1741 | struct t10_alua_tg_pt_gp *tg_pt_gp, | ||
1742 | char *page) | ||
1743 | { | ||
1744 | return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_nonop_delay_msecs); | ||
1745 | } | ||
1746 | |||
1747 | ssize_t core_alua_store_nonop_delay_msecs( | ||
1748 | struct t10_alua_tg_pt_gp *tg_pt_gp, | ||
1749 | const char *page, | ||
1750 | size_t count) | ||
1751 | { | ||
1752 | unsigned long tmp; | ||
1753 | int ret; | ||
1754 | |||
1755 | ret = strict_strtoul(page, 0, &tmp); | ||
1756 | if (ret < 0) { | ||
1757 | printk(KERN_ERR "Unable to extract nonop_delay_msecs\n"); | ||
1758 | return -EINVAL; | ||
1759 | } | ||
1760 | if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) { | ||
1761 | printk(KERN_ERR "Passed nonop_delay_msecs: %lu, exceeds" | ||
1762 | " ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp, | ||
1763 | ALUA_MAX_NONOP_DELAY_MSECS); | ||
1764 | return -EINVAL; | ||
1765 | } | ||
1766 | tg_pt_gp->tg_pt_gp_nonop_delay_msecs = (int)tmp; | ||
1767 | |||
1768 | return count; | ||
1769 | } | ||
1770 | |||
1771 | ssize_t core_alua_show_trans_delay_msecs( | ||
1772 | struct t10_alua_tg_pt_gp *tg_pt_gp, | ||
1773 | char *page) | ||
1774 | { | ||
1775 | return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_trans_delay_msecs); | ||
1776 | } | ||
1777 | |||
1778 | ssize_t core_alua_store_trans_delay_msecs( | ||
1779 | struct t10_alua_tg_pt_gp *tg_pt_gp, | ||
1780 | const char *page, | ||
1781 | size_t count) | ||
1782 | { | ||
1783 | unsigned long tmp; | ||
1784 | int ret; | ||
1785 | |||
1786 | ret = strict_strtoul(page, 0, &tmp); | ||
1787 | if (ret < 0) { | ||
1788 | printk(KERN_ERR "Unable to extract trans_delay_msecs\n"); | ||
1789 | return -EINVAL; | ||
1790 | } | ||
1791 | if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) { | ||
1792 | printk(KERN_ERR "Passed trans_delay_msecs: %lu, exceeds" | ||
1793 | " ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp, | ||
1794 | ALUA_MAX_TRANS_DELAY_MSECS); | ||
1795 | return -EINVAL; | ||
1796 | } | ||
1797 | tg_pt_gp->tg_pt_gp_trans_delay_msecs = (int)tmp; | ||
1798 | |||
1799 | return count; | ||
1800 | } | ||
1801 | |||
1802 | ssize_t core_alua_show_preferred_bit( | ||
1803 | struct t10_alua_tg_pt_gp *tg_pt_gp, | ||
1804 | char *page) | ||
1805 | { | ||
1806 | return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_pref); | ||
1807 | } | ||
1808 | |||
1809 | ssize_t core_alua_store_preferred_bit( | ||
1810 | struct t10_alua_tg_pt_gp *tg_pt_gp, | ||
1811 | const char *page, | ||
1812 | size_t count) | ||
1813 | { | ||
1814 | unsigned long tmp; | ||
1815 | int ret; | ||
1816 | |||
1817 | ret = strict_strtoul(page, 0, &tmp); | ||
1818 | if (ret < 0) { | ||
1819 | printk(KERN_ERR "Unable to extract preferred ALUA value\n"); | ||
1820 | return -EINVAL; | ||
1821 | } | ||
1822 | if ((tmp != 0) && (tmp != 1)) { | ||
1823 | printk(KERN_ERR "Illegal value for preferred ALUA: %lu\n", tmp); | ||
1824 | return -EINVAL; | ||
1825 | } | ||
1826 | tg_pt_gp->tg_pt_gp_pref = (int)tmp; | ||
1827 | |||
1828 | return count; | ||
1829 | } | ||
1830 | |||
1831 | ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page) | ||
1832 | { | ||
1833 | if (!(lun->lun_sep)) | ||
1834 | return -ENODEV; | ||
1835 | |||
1836 | return sprintf(page, "%d\n", | ||
1837 | atomic_read(&lun->lun_sep->sep_tg_pt_secondary_offline)); | ||
1838 | } | ||
1839 | |||
1840 | ssize_t core_alua_store_offline_bit( | ||
1841 | struct se_lun *lun, | ||
1842 | const char *page, | ||
1843 | size_t count) | ||
1844 | { | ||
1845 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; | ||
1846 | unsigned long tmp; | ||
1847 | int ret; | ||
1848 | |||
1849 | if (!(lun->lun_sep)) | ||
1850 | return -ENODEV; | ||
1851 | |||
1852 | ret = strict_strtoul(page, 0, &tmp); | ||
1853 | if (ret < 0) { | ||
1854 | printk(KERN_ERR "Unable to extract alua_tg_pt_offline value\n"); | ||
1855 | return -EINVAL; | ||
1856 | } | ||
1857 | if ((tmp != 0) && (tmp != 1)) { | ||
1858 | printk(KERN_ERR "Illegal value for alua_tg_pt_offline: %lu\n", | ||
1859 | tmp); | ||
1860 | return -EINVAL; | ||
1861 | } | ||
1862 | tg_pt_gp_mem = lun->lun_sep->sep_alua_tg_pt_gp_mem; | ||
1863 | if (!(tg_pt_gp_mem)) { | ||
1864 | printk(KERN_ERR "Unable to locate *tg_pt_gp_mem\n"); | ||
1865 | return -EINVAL; | ||
1866 | } | ||
1867 | |||
1868 | ret = core_alua_set_tg_pt_secondary_state(tg_pt_gp_mem, | ||
1869 | lun->lun_sep, 0, (int)tmp); | ||
1870 | if (ret < 0) | ||
1871 | return -EINVAL; | ||
1872 | |||
1873 | return count; | ||
1874 | } | ||
1875 | |||
1876 | ssize_t core_alua_show_secondary_status( | ||
1877 | struct se_lun *lun, | ||
1878 | char *page) | ||
1879 | { | ||
1880 | return sprintf(page, "%d\n", lun->lun_sep->sep_tg_pt_secondary_stat); | ||
1881 | } | ||
1882 | |||
1883 | ssize_t core_alua_store_secondary_status( | ||
1884 | struct se_lun *lun, | ||
1885 | const char *page, | ||
1886 | size_t count) | ||
1887 | { | ||
1888 | unsigned long tmp; | ||
1889 | int ret; | ||
1890 | |||
1891 | ret = strict_strtoul(page, 0, &tmp); | ||
1892 | if (ret < 0) { | ||
1893 | printk(KERN_ERR "Unable to extract alua_tg_pt_status\n"); | ||
1894 | return -EINVAL; | ||
1895 | } | ||
1896 | if ((tmp != ALUA_STATUS_NONE) && | ||
1897 | (tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) && | ||
1898 | (tmp != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) { | ||
1899 | printk(KERN_ERR "Illegal value for alua_tg_pt_status: %lu\n", | ||
1900 | tmp); | ||
1901 | return -EINVAL; | ||
1902 | } | ||
1903 | lun->lun_sep->sep_tg_pt_secondary_stat = (int)tmp; | ||
1904 | |||
1905 | return count; | ||
1906 | } | ||
1907 | |||
1908 | ssize_t core_alua_show_secondary_write_metadata( | ||
1909 | struct se_lun *lun, | ||
1910 | char *page) | ||
1911 | { | ||
1912 | return sprintf(page, "%d\n", | ||
1913 | lun->lun_sep->sep_tg_pt_secondary_write_md); | ||
1914 | } | ||
1915 | |||
1916 | ssize_t core_alua_store_secondary_write_metadata( | ||
1917 | struct se_lun *lun, | ||
1918 | const char *page, | ||
1919 | size_t count) | ||
1920 | { | ||
1921 | unsigned long tmp; | ||
1922 | int ret; | ||
1923 | |||
1924 | ret = strict_strtoul(page, 0, &tmp); | ||
1925 | if (ret < 0) { | ||
1926 | printk(KERN_ERR "Unable to extract alua_tg_pt_write_md\n"); | ||
1927 | return -EINVAL; | ||
1928 | } | ||
1929 | if ((tmp != 0) && (tmp != 1)) { | ||
1930 | printk(KERN_ERR "Illegal value for alua_tg_pt_write_md:" | ||
1931 | " %lu\n", tmp); | ||
1932 | return -EINVAL; | ||
1933 | } | ||
1934 | lun->lun_sep->sep_tg_pt_secondary_write_md = (int)tmp; | ||
1935 | |||
1936 | return count; | ||
1937 | } | ||
1938 | |||
1939 | int core_setup_alua(struct se_device *dev, int force_pt) | ||
1940 | { | ||
1941 | struct se_subsystem_dev *su_dev = dev->se_sub_dev; | ||
1942 | struct t10_alua *alua = T10_ALUA(su_dev); | ||
1943 | struct t10_alua_lu_gp_member *lu_gp_mem; | ||
1944 | /* | ||
1945 | * If this device is from Target_Core_Mod/pSCSI, use the ALUA logic | ||
1946 | * of the Underlying SCSI hardware. In Linux/SCSI terms, this can | ||
1947 | * cause a problem because libata and some SATA RAID HBAs appear | ||
1948 | * under Linux/SCSI, but emulate SCSI logic themselves. | ||
1949 | */ | ||
1950 | if (((TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) && | ||
1951 | !(DEV_ATTRIB(dev)->emulate_alua)) || force_pt) { | ||
1952 | alua->alua_type = SPC_ALUA_PASSTHROUGH; | ||
1953 | alua->alua_state_check = &core_alua_state_check_nop; | ||
1954 | printk(KERN_INFO "%s: Using SPC_ALUA_PASSTHROUGH, no ALUA" | ||
1955 | " emulation\n", TRANSPORT(dev)->name); | ||
1956 | return 0; | ||
1957 | } | ||
1958 | /* | ||
1959 | * If SPC-3 or above is reported by real or emulated struct se_device, | ||
1960 | * use emulated ALUA. | ||
1961 | */ | ||
1962 | if (TRANSPORT(dev)->get_device_rev(dev) >= SCSI_3) { | ||
1963 | printk(KERN_INFO "%s: Enabling ALUA Emulation for SPC-3" | ||
1964 | " device\n", TRANSPORT(dev)->name); | ||
1965 | /* | ||
1966 | * Assoicate this struct se_device with the default ALUA | ||
1967 | * LUN Group. | ||
1968 | */ | ||
1969 | lu_gp_mem = core_alua_allocate_lu_gp_mem(dev); | ||
1970 | if (IS_ERR(lu_gp_mem) || !lu_gp_mem) | ||
1971 | return -1; | ||
1972 | |||
1973 | alua->alua_type = SPC3_ALUA_EMULATED; | ||
1974 | alua->alua_state_check = &core_alua_state_check; | ||
1975 | spin_lock(&lu_gp_mem->lu_gp_mem_lock); | ||
1976 | __core_alua_attach_lu_gp_mem(lu_gp_mem, | ||
1977 | se_global->default_lu_gp); | ||
1978 | spin_unlock(&lu_gp_mem->lu_gp_mem_lock); | ||
1979 | |||
1980 | printk(KERN_INFO "%s: Adding to default ALUA LU Group:" | ||
1981 | " core/alua/lu_gps/default_lu_gp\n", | ||
1982 | TRANSPORT(dev)->name); | ||
1983 | } else { | ||
1984 | alua->alua_type = SPC2_ALUA_DISABLED; | ||
1985 | alua->alua_state_check = &core_alua_state_check_nop; | ||
1986 | printk(KERN_INFO "%s: Disabling ALUA Emulation for SPC-2" | ||
1987 | " device\n", TRANSPORT(dev)->name); | ||
1988 | } | ||
1989 | |||
1990 | return 0; | ||
1991 | } | ||
diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h new file mode 100644 index 000000000000..c86f97a081ed --- /dev/null +++ b/drivers/target/target_core_alua.h | |||
@@ -0,0 +1,126 @@ | |||
1 | #ifndef TARGET_CORE_ALUA_H | ||
2 | #define TARGET_CORE_ALUA_H | ||
3 | |||
4 | /* | ||
5 | * INQUIRY response data, TPGS Field | ||
6 | * | ||
7 | * from spc4r17 section 6.4.2 Table 135 | ||
8 | */ | ||
9 | #define TPGS_NO_ALUA 0x00 | ||
10 | #define TPGS_IMPLICT_ALUA 0x10 | ||
11 | #define TPGS_EXPLICT_ALUA 0x20 | ||
12 | |||
13 | /* | ||
14 | * ASYMMETRIC ACCESS STATE field | ||
15 | * | ||
16 | * from spc4r17 section 6.27 Table 245 | ||
17 | */ | ||
18 | #define ALUA_ACCESS_STATE_ACTIVE_OPTMIZED 0x0 | ||
19 | #define ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED 0x1 | ||
20 | #define ALUA_ACCESS_STATE_STANDBY 0x2 | ||
21 | #define ALUA_ACCESS_STATE_UNAVAILABLE 0x3 | ||
22 | #define ALUA_ACCESS_STATE_OFFLINE 0xe | ||
23 | #define ALUA_ACCESS_STATE_TRANSITION 0xf | ||
24 | |||
25 | /* | ||
26 | * REPORT_TARGET_PORT_GROUP STATUS CODE | ||
27 | * | ||
28 | * from spc4r17 section 6.27 Table 246 | ||
29 | */ | ||
30 | #define ALUA_STATUS_NONE 0x00 | ||
31 | #define ALUA_STATUS_ALTERED_BY_EXPLICT_STPG 0x01 | ||
32 | #define ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA 0x02 | ||
33 | |||
34 | /* | ||
35 | * From spc4r17, Table D.1: ASC and ASCQ Assignement | ||
36 | */ | ||
37 | #define ASCQ_04H_ALUA_STATE_TRANSITION 0x0a | ||
38 | #define ASCQ_04H_ALUA_TG_PT_STANDBY 0x0b | ||
39 | #define ASCQ_04H_ALUA_TG_PT_UNAVAILABLE 0x0c | ||
40 | #define ASCQ_04H_ALUA_OFFLINE 0x12 | ||
41 | |||
42 | /* | ||
43 | * Used as the default for Active/NonOptimized delay (in milliseconds) | ||
44 | * This can also be changed via configfs on a per target port group basis.. | ||
45 | */ | ||
46 | #define ALUA_DEFAULT_NONOP_DELAY_MSECS 100 | ||
47 | #define ALUA_MAX_NONOP_DELAY_MSECS 10000 /* 10 seconds */ | ||
48 | /* | ||
49 | * Used for implict and explict ALUA transitional delay, that is disabled | ||
50 | * by default, and is intended to be used for debugging client side ALUA code. | ||
51 | */ | ||
52 | #define ALUA_DEFAULT_TRANS_DELAY_MSECS 0 | ||
53 | #define ALUA_MAX_TRANS_DELAY_MSECS 30000 /* 30 seconds */ | ||
54 | /* | ||
55 | * Used by core_alua_update_tpg_primary_metadata() and | ||
56 | * core_alua_update_tpg_secondary_metadata() | ||
57 | */ | ||
58 | #define ALUA_METADATA_PATH_LEN 512 | ||
59 | /* | ||
60 | * Used by core_alua_update_tpg_secondary_metadata() | ||
61 | */ | ||
62 | #define ALUA_SECONDARY_METADATA_WWN_LEN 256 | ||
63 | |||
64 | extern struct kmem_cache *t10_alua_lu_gp_cache; | ||
65 | extern struct kmem_cache *t10_alua_lu_gp_mem_cache; | ||
66 | extern struct kmem_cache *t10_alua_tg_pt_gp_cache; | ||
67 | extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; | ||
68 | |||
69 | extern int core_emulate_report_target_port_groups(struct se_cmd *); | ||
70 | extern int core_emulate_set_target_port_groups(struct se_cmd *); | ||
71 | extern int core_alua_check_nonop_delay(struct se_cmd *); | ||
72 | extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *, | ||
73 | struct se_device *, struct se_port *, | ||
74 | struct se_node_acl *, int, int); | ||
75 | extern char *core_alua_dump_status(int); | ||
76 | extern struct t10_alua_lu_gp *core_alua_allocate_lu_gp(const char *, int); | ||
77 | extern int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *, u16); | ||
78 | extern void core_alua_free_lu_gp(struct t10_alua_lu_gp *); | ||
79 | extern void core_alua_free_lu_gp_mem(struct se_device *); | ||
80 | extern struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *); | ||
81 | extern void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *); | ||
82 | extern void __core_alua_attach_lu_gp_mem(struct t10_alua_lu_gp_member *, | ||
83 | struct t10_alua_lu_gp *); | ||
84 | extern void __core_alua_drop_lu_gp_mem(struct t10_alua_lu_gp_member *, | ||
85 | struct t10_alua_lu_gp *); | ||
86 | extern void core_alua_drop_lu_gp_dev(struct se_device *); | ||
87 | extern struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp( | ||
88 | struct se_subsystem_dev *, const char *, int); | ||
89 | extern int core_alua_set_tg_pt_gp_id(struct t10_alua_tg_pt_gp *, u16); | ||
90 | extern struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem( | ||
91 | struct se_port *); | ||
92 | extern void core_alua_free_tg_pt_gp(struct t10_alua_tg_pt_gp *); | ||
93 | extern void core_alua_free_tg_pt_gp_mem(struct se_port *); | ||
94 | extern void __core_alua_attach_tg_pt_gp_mem(struct t10_alua_tg_pt_gp_member *, | ||
95 | struct t10_alua_tg_pt_gp *); | ||
96 | extern ssize_t core_alua_show_tg_pt_gp_info(struct se_port *, char *); | ||
97 | extern ssize_t core_alua_store_tg_pt_gp_info(struct se_port *, const char *, | ||
98 | size_t); | ||
99 | extern ssize_t core_alua_show_access_type(struct t10_alua_tg_pt_gp *, char *); | ||
100 | extern ssize_t core_alua_store_access_type(struct t10_alua_tg_pt_gp *, | ||
101 | const char *, size_t); | ||
102 | extern ssize_t core_alua_show_nonop_delay_msecs(struct t10_alua_tg_pt_gp *, | ||
103 | char *); | ||
104 | extern ssize_t core_alua_store_nonop_delay_msecs(struct t10_alua_tg_pt_gp *, | ||
105 | const char *, size_t); | ||
106 | extern ssize_t core_alua_show_trans_delay_msecs(struct t10_alua_tg_pt_gp *, | ||
107 | char *); | ||
108 | extern ssize_t core_alua_store_trans_delay_msecs(struct t10_alua_tg_pt_gp *, | ||
109 | const char *, size_t); | ||
110 | extern ssize_t core_alua_show_preferred_bit(struct t10_alua_tg_pt_gp *, | ||
111 | char *); | ||
112 | extern ssize_t core_alua_store_preferred_bit(struct t10_alua_tg_pt_gp *, | ||
113 | const char *, size_t); | ||
114 | extern ssize_t core_alua_show_offline_bit(struct se_lun *, char *); | ||
115 | extern ssize_t core_alua_store_offline_bit(struct se_lun *, const char *, | ||
116 | size_t); | ||
117 | extern ssize_t core_alua_show_secondary_status(struct se_lun *, char *); | ||
118 | extern ssize_t core_alua_store_secondary_status(struct se_lun *, | ||
119 | const char *, size_t); | ||
120 | extern ssize_t core_alua_show_secondary_write_metadata(struct se_lun *, | ||
121 | char *); | ||
122 | extern ssize_t core_alua_store_secondary_write_metadata(struct se_lun *, | ||
123 | const char *, size_t); | ||
124 | extern int core_setup_alua(struct se_device *, int); | ||
125 | |||
126 | #endif /* TARGET_CORE_ALUA_H */ | ||
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c new file mode 100644 index 000000000000..366080baf474 --- /dev/null +++ b/drivers/target/target_core_cdb.c | |||
@@ -0,0 +1,1131 @@ | |||
1 | /* | ||
2 | * CDB emulation for non-READ/WRITE commands. | ||
3 | * | ||
4 | * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc. | ||
5 | * Copyright (c) 2005, 2006, 2007 SBE, Inc. | ||
6 | * Copyright (c) 2007-2010 Rising Tide Systems | ||
7 | * Copyright (c) 2008-2010 Linux-iSCSI.org | ||
8 | * | ||
9 | * Nicholas A. Bellinger <nab@kernel.org> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License as published by | ||
13 | * the Free Software Foundation; either version 2 of the License, or | ||
14 | * (at your option) any later version. | ||
15 | * | ||
16 | * This program is distributed in the hope that it will be useful, | ||
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
19 | * GNU General Public License for more details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public License | ||
22 | * along with this program; if not, write to the Free Software | ||
23 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
24 | */ | ||
25 | |||
26 | #include <asm/unaligned.h> | ||
27 | #include <scsi/scsi.h> | ||
28 | |||
29 | #include <target/target_core_base.h> | ||
30 | #include <target/target_core_transport.h> | ||
31 | #include <target/target_core_fabric_ops.h> | ||
32 | #include "target_core_ua.h" | ||
33 | |||
34 | static void | ||
35 | target_fill_alua_data(struct se_port *port, unsigned char *buf) | ||
36 | { | ||
37 | struct t10_alua_tg_pt_gp *tg_pt_gp; | ||
38 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; | ||
39 | |||
40 | /* | ||
41 | * Set SCCS for MAINTENANCE_IN + REPORT_TARGET_PORT_GROUPS. | ||
42 | */ | ||
43 | buf[5] = 0x80; | ||
44 | |||
45 | /* | ||
46 | * Set TPGS field for explict and/or implict ALUA access type | ||
47 | * and opteration. | ||
48 | * | ||
49 | * See spc4r17 section 6.4.2 Table 135 | ||
50 | */ | ||
51 | if (!port) | ||
52 | return; | ||
53 | tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; | ||
54 | if (!tg_pt_gp_mem) | ||
55 | return; | ||
56 | |||
57 | spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | ||
58 | tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; | ||
59 | if (tg_pt_gp) | ||
60 | buf[5] |= tg_pt_gp->tg_pt_gp_alua_access_type; | ||
61 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | ||
62 | } | ||
63 | |||
64 | static int | ||
65 | target_emulate_inquiry_std(struct se_cmd *cmd) | ||
66 | { | ||
67 | struct se_lun *lun = SE_LUN(cmd); | ||
68 | struct se_device *dev = SE_DEV(cmd); | ||
69 | unsigned char *buf = cmd->t_task->t_task_buf; | ||
70 | |||
71 | /* | ||
72 | * Make sure we at least have 6 bytes of INQUIRY response | ||
73 | * payload going back for EVPD=0 | ||
74 | */ | ||
75 | if (cmd->data_length < 6) { | ||
76 | printk(KERN_ERR "SCSI Inquiry payload length: %u" | ||
77 | " too small for EVPD=0\n", cmd->data_length); | ||
78 | return -1; | ||
79 | } | ||
80 | |||
81 | buf[0] = dev->transport->get_device_type(dev); | ||
82 | if (buf[0] == TYPE_TAPE) | ||
83 | buf[1] = 0x80; | ||
84 | buf[2] = dev->transport->get_device_rev(dev); | ||
85 | |||
86 | /* | ||
87 | * Enable SCCS and TPGS fields for Emulated ALUA | ||
88 | */ | ||
89 | if (T10_ALUA(dev->se_sub_dev)->alua_type == SPC3_ALUA_EMULATED) | ||
90 | target_fill_alua_data(lun->lun_sep, buf); | ||
91 | |||
92 | if (cmd->data_length < 8) { | ||
93 | buf[4] = 1; /* Set additional length to 1 */ | ||
94 | return 0; | ||
95 | } | ||
96 | |||
97 | buf[7] = 0x32; /* Sync=1 and CmdQue=1 */ | ||
98 | |||
99 | /* | ||
100 | * Do not include vendor, product, reversion info in INQUIRY | ||
101 | * response payload for cdbs with a small allocation length. | ||
102 | */ | ||
103 | if (cmd->data_length < 36) { | ||
104 | buf[4] = 3; /* Set additional length to 3 */ | ||
105 | return 0; | ||
106 | } | ||
107 | |||
108 | snprintf((unsigned char *)&buf[8], 8, "LIO-ORG"); | ||
109 | snprintf((unsigned char *)&buf[16], 16, "%s", | ||
110 | &DEV_T10_WWN(dev)->model[0]); | ||
111 | snprintf((unsigned char *)&buf[32], 4, "%s", | ||
112 | &DEV_T10_WWN(dev)->revision[0]); | ||
113 | buf[4] = 31; /* Set additional length to 31 */ | ||
114 | return 0; | ||
115 | } | ||
116 | |||
117 | /* supported vital product data pages */ | ||
118 | static int | ||
119 | target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf) | ||
120 | { | ||
121 | buf[1] = 0x00; | ||
122 | if (cmd->data_length < 8) | ||
123 | return 0; | ||
124 | |||
125 | buf[4] = 0x0; | ||
126 | /* | ||
127 | * Only report the INQUIRY EVPD=1 pages after a valid NAA | ||
128 | * Registered Extended LUN WWN has been set via ConfigFS | ||
129 | * during device creation/restart. | ||
130 | */ | ||
131 | if (SE_DEV(cmd)->se_sub_dev->su_dev_flags & | ||
132 | SDF_EMULATED_VPD_UNIT_SERIAL) { | ||
133 | buf[3] = 3; | ||
134 | buf[5] = 0x80; | ||
135 | buf[6] = 0x83; | ||
136 | buf[7] = 0x86; | ||
137 | } | ||
138 | |||
139 | return 0; | ||
140 | } | ||
141 | |||
142 | /* unit serial number */ | ||
143 | static int | ||
144 | target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) | ||
145 | { | ||
146 | struct se_device *dev = SE_DEV(cmd); | ||
147 | u16 len = 0; | ||
148 | |||
149 | buf[1] = 0x80; | ||
150 | if (dev->se_sub_dev->su_dev_flags & | ||
151 | SDF_EMULATED_VPD_UNIT_SERIAL) { | ||
152 | u32 unit_serial_len; | ||
153 | |||
154 | unit_serial_len = | ||
155 | strlen(&DEV_T10_WWN(dev)->unit_serial[0]); | ||
156 | unit_serial_len++; /* For NULL Terminator */ | ||
157 | |||
158 | if (((len + 4) + unit_serial_len) > cmd->data_length) { | ||
159 | len += unit_serial_len; | ||
160 | buf[2] = ((len >> 8) & 0xff); | ||
161 | buf[3] = (len & 0xff); | ||
162 | return 0; | ||
163 | } | ||
164 | len += sprintf((unsigned char *)&buf[4], "%s", | ||
165 | &DEV_T10_WWN(dev)->unit_serial[0]); | ||
166 | len++; /* Extra Byte for NULL Terminator */ | ||
167 | buf[3] = len; | ||
168 | } | ||
169 | return 0; | ||
170 | } | ||
171 | |||
172 | /* | ||
173 | * Device identification VPD, for a complete list of | ||
174 | * DESIGNATOR TYPEs see spc4r17 Table 459. | ||
175 | */ | ||
176 | static int | ||
177 | target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) | ||
178 | { | ||
179 | struct se_device *dev = SE_DEV(cmd); | ||
180 | struct se_lun *lun = SE_LUN(cmd); | ||
181 | struct se_port *port = NULL; | ||
182 | struct se_portal_group *tpg = NULL; | ||
183 | struct t10_alua_lu_gp_member *lu_gp_mem; | ||
184 | struct t10_alua_tg_pt_gp *tg_pt_gp; | ||
185 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; | ||
186 | unsigned char binary, binary_new; | ||
187 | unsigned char *prod = &DEV_T10_WWN(dev)->model[0]; | ||
188 | u32 prod_len; | ||
189 | u32 unit_serial_len, off = 0; | ||
190 | int i; | ||
191 | u16 len = 0, id_len; | ||
192 | |||
193 | buf[1] = 0x83; | ||
194 | off = 4; | ||
195 | |||
196 | /* | ||
197 | * NAA IEEE Registered Extended Assigned designator format, see | ||
198 | * spc4r17 section 7.7.3.6.5 | ||
199 | * | ||
200 | * We depend upon a target_core_mod/ConfigFS provided | ||
201 | * /sys/kernel/config/target/core/$HBA/$DEV/wwn/vpd_unit_serial | ||
202 | * value in order to return the NAA id. | ||
203 | */ | ||
204 | if (!(dev->se_sub_dev->su_dev_flags & SDF_EMULATED_VPD_UNIT_SERIAL)) | ||
205 | goto check_t10_vend_desc; | ||
206 | |||
207 | if (off + 20 > cmd->data_length) | ||
208 | goto check_t10_vend_desc; | ||
209 | |||
210 | /* CODE SET == Binary */ | ||
211 | buf[off++] = 0x1; | ||
212 | |||
213 | /* Set ASSOICATION == addressed logical unit: 0)b */ | ||
214 | buf[off] = 0x00; | ||
215 | |||
216 | /* Identifier/Designator type == NAA identifier */ | ||
217 | buf[off++] = 0x3; | ||
218 | off++; | ||
219 | |||
220 | /* Identifier/Designator length */ | ||
221 | buf[off++] = 0x10; | ||
222 | |||
223 | /* | ||
224 | * Start NAA IEEE Registered Extended Identifier/Designator | ||
225 | */ | ||
226 | buf[off++] = (0x6 << 4); | ||
227 | |||
228 | /* | ||
229 | * Use OpenFabrics IEEE Company ID: 00 14 05 | ||
230 | */ | ||
231 | buf[off++] = 0x01; | ||
232 | buf[off++] = 0x40; | ||
233 | buf[off] = (0x5 << 4); | ||
234 | |||
235 | /* | ||
236 | * Return ConfigFS Unit Serial Number information for | ||
237 | * VENDOR_SPECIFIC_IDENTIFIER and | ||
238 | * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION | ||
239 | */ | ||
240 | binary = transport_asciihex_to_binaryhex( | ||
241 | &DEV_T10_WWN(dev)->unit_serial[0]); | ||
242 | buf[off++] |= (binary & 0xf0) >> 4; | ||
243 | for (i = 0; i < 24; i += 2) { | ||
244 | binary_new = transport_asciihex_to_binaryhex( | ||
245 | &DEV_T10_WWN(dev)->unit_serial[i+2]); | ||
246 | buf[off] = (binary & 0x0f) << 4; | ||
247 | buf[off++] |= (binary_new & 0xf0) >> 4; | ||
248 | binary = binary_new; | ||
249 | } | ||
250 | len = 20; | ||
251 | off = (len + 4); | ||
252 | |||
253 | check_t10_vend_desc: | ||
254 | /* | ||
255 | * T10 Vendor Identifier Page, see spc4r17 section 7.7.3.4 | ||
256 | */ | ||
257 | id_len = 8; /* For Vendor field */ | ||
258 | prod_len = 4; /* For VPD Header */ | ||
259 | prod_len += 8; /* For Vendor field */ | ||
260 | prod_len += strlen(prod); | ||
261 | prod_len++; /* For : */ | ||
262 | |||
263 | if (dev->se_sub_dev->su_dev_flags & | ||
264 | SDF_EMULATED_VPD_UNIT_SERIAL) { | ||
265 | unit_serial_len = | ||
266 | strlen(&DEV_T10_WWN(dev)->unit_serial[0]); | ||
267 | unit_serial_len++; /* For NULL Terminator */ | ||
268 | |||
269 | if ((len + (id_len + 4) + | ||
270 | (prod_len + unit_serial_len)) > | ||
271 | cmd->data_length) { | ||
272 | len += (prod_len + unit_serial_len); | ||
273 | goto check_port; | ||
274 | } | ||
275 | id_len += sprintf((unsigned char *)&buf[off+12], | ||
276 | "%s:%s", prod, | ||
277 | &DEV_T10_WWN(dev)->unit_serial[0]); | ||
278 | } | ||
279 | buf[off] = 0x2; /* ASCII */ | ||
280 | buf[off+1] = 0x1; /* T10 Vendor ID */ | ||
281 | buf[off+2] = 0x0; | ||
282 | memcpy((unsigned char *)&buf[off+4], "LIO-ORG", 8); | ||
283 | /* Extra Byte for NULL Terminator */ | ||
284 | id_len++; | ||
285 | /* Identifier Length */ | ||
286 | buf[off+3] = id_len; | ||
287 | /* Header size for Designation descriptor */ | ||
288 | len += (id_len + 4); | ||
289 | off += (id_len + 4); | ||
290 | /* | ||
291 | * struct se_port is only set for INQUIRY VPD=1 through $FABRIC_MOD | ||
292 | */ | ||
293 | check_port: | ||
294 | port = lun->lun_sep; | ||
295 | if (port) { | ||
296 | struct t10_alua_lu_gp *lu_gp; | ||
297 | u32 padding, scsi_name_len; | ||
298 | u16 lu_gp_id = 0; | ||
299 | u16 tg_pt_gp_id = 0; | ||
300 | u16 tpgt; | ||
301 | |||
302 | tpg = port->sep_tpg; | ||
303 | /* | ||
304 | * Relative target port identifer, see spc4r17 | ||
305 | * section 7.7.3.7 | ||
306 | * | ||
307 | * Get the PROTOCOL IDENTIFIER as defined by spc4r17 | ||
308 | * section 7.5.1 Table 362 | ||
309 | */ | ||
310 | if (((len + 4) + 8) > cmd->data_length) { | ||
311 | len += 8; | ||
312 | goto check_tpgi; | ||
313 | } | ||
314 | buf[off] = | ||
315 | (TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4); | ||
316 | buf[off++] |= 0x1; /* CODE SET == Binary */ | ||
317 | buf[off] = 0x80; /* Set PIV=1 */ | ||
318 | /* Set ASSOICATION == target port: 01b */ | ||
319 | buf[off] |= 0x10; | ||
320 | /* DESIGNATOR TYPE == Relative target port identifer */ | ||
321 | buf[off++] |= 0x4; | ||
322 | off++; /* Skip over Reserved */ | ||
323 | buf[off++] = 4; /* DESIGNATOR LENGTH */ | ||
324 | /* Skip over Obsolete field in RTPI payload | ||
325 | * in Table 472 */ | ||
326 | off += 2; | ||
327 | buf[off++] = ((port->sep_rtpi >> 8) & 0xff); | ||
328 | buf[off++] = (port->sep_rtpi & 0xff); | ||
329 | len += 8; /* Header size + Designation descriptor */ | ||
330 | /* | ||
331 | * Target port group identifier, see spc4r17 | ||
332 | * section 7.7.3.8 | ||
333 | * | ||
334 | * Get the PROTOCOL IDENTIFIER as defined by spc4r17 | ||
335 | * section 7.5.1 Table 362 | ||
336 | */ | ||
337 | check_tpgi: | ||
338 | if (T10_ALUA(dev->se_sub_dev)->alua_type != | ||
339 | SPC3_ALUA_EMULATED) | ||
340 | goto check_scsi_name; | ||
341 | |||
342 | if (((len + 4) + 8) > cmd->data_length) { | ||
343 | len += 8; | ||
344 | goto check_lu_gp; | ||
345 | } | ||
346 | tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; | ||
347 | if (!tg_pt_gp_mem) | ||
348 | goto check_lu_gp; | ||
349 | |||
350 | spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | ||
351 | tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; | ||
352 | if (!(tg_pt_gp)) { | ||
353 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | ||
354 | goto check_lu_gp; | ||
355 | } | ||
356 | tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id; | ||
357 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | ||
358 | |||
359 | buf[off] = | ||
360 | (TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4); | ||
361 | buf[off++] |= 0x1; /* CODE SET == Binary */ | ||
362 | buf[off] = 0x80; /* Set PIV=1 */ | ||
363 | /* Set ASSOICATION == target port: 01b */ | ||
364 | buf[off] |= 0x10; | ||
365 | /* DESIGNATOR TYPE == Target port group identifier */ | ||
366 | buf[off++] |= 0x5; | ||
367 | off++; /* Skip over Reserved */ | ||
368 | buf[off++] = 4; /* DESIGNATOR LENGTH */ | ||
369 | off += 2; /* Skip over Reserved Field */ | ||
370 | buf[off++] = ((tg_pt_gp_id >> 8) & 0xff); | ||
371 | buf[off++] = (tg_pt_gp_id & 0xff); | ||
372 | len += 8; /* Header size + Designation descriptor */ | ||
373 | /* | ||
374 | * Logical Unit Group identifier, see spc4r17 | ||
375 | * section 7.7.3.8 | ||
376 | */ | ||
377 | check_lu_gp: | ||
378 | if (((len + 4) + 8) > cmd->data_length) { | ||
379 | len += 8; | ||
380 | goto check_scsi_name; | ||
381 | } | ||
382 | lu_gp_mem = dev->dev_alua_lu_gp_mem; | ||
383 | if (!(lu_gp_mem)) | ||
384 | goto check_scsi_name; | ||
385 | |||
386 | spin_lock(&lu_gp_mem->lu_gp_mem_lock); | ||
387 | lu_gp = lu_gp_mem->lu_gp; | ||
388 | if (!(lu_gp)) { | ||
389 | spin_unlock(&lu_gp_mem->lu_gp_mem_lock); | ||
390 | goto check_scsi_name; | ||
391 | } | ||
392 | lu_gp_id = lu_gp->lu_gp_id; | ||
393 | spin_unlock(&lu_gp_mem->lu_gp_mem_lock); | ||
394 | |||
395 | buf[off++] |= 0x1; /* CODE SET == Binary */ | ||
396 | /* DESIGNATOR TYPE == Logical Unit Group identifier */ | ||
397 | buf[off++] |= 0x6; | ||
398 | off++; /* Skip over Reserved */ | ||
399 | buf[off++] = 4; /* DESIGNATOR LENGTH */ | ||
400 | off += 2; /* Skip over Reserved Field */ | ||
401 | buf[off++] = ((lu_gp_id >> 8) & 0xff); | ||
402 | buf[off++] = (lu_gp_id & 0xff); | ||
403 | len += 8; /* Header size + Designation descriptor */ | ||
404 | /* | ||
405 | * SCSI name string designator, see spc4r17 | ||
406 | * section 7.7.3.11 | ||
407 | * | ||
408 | * Get the PROTOCOL IDENTIFIER as defined by spc4r17 | ||
409 | * section 7.5.1 Table 362 | ||
410 | */ | ||
411 | check_scsi_name: | ||
412 | scsi_name_len = strlen(TPG_TFO(tpg)->tpg_get_wwn(tpg)); | ||
413 | /* UTF-8 ",t,0x<16-bit TPGT>" + NULL Terminator */ | ||
414 | scsi_name_len += 10; | ||
415 | /* Check for 4-byte padding */ | ||
416 | padding = ((-scsi_name_len) & 3); | ||
417 | if (padding != 0) | ||
418 | scsi_name_len += padding; | ||
419 | /* Header size + Designation descriptor */ | ||
420 | scsi_name_len += 4; | ||
421 | |||
422 | if (((len + 4) + scsi_name_len) > cmd->data_length) { | ||
423 | len += scsi_name_len; | ||
424 | goto set_len; | ||
425 | } | ||
426 | buf[off] = | ||
427 | (TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4); | ||
428 | buf[off++] |= 0x3; /* CODE SET == UTF-8 */ | ||
429 | buf[off] = 0x80; /* Set PIV=1 */ | ||
430 | /* Set ASSOICATION == target port: 01b */ | ||
431 | buf[off] |= 0x10; | ||
432 | /* DESIGNATOR TYPE == SCSI name string */ | ||
433 | buf[off++] |= 0x8; | ||
434 | off += 2; /* Skip over Reserved and length */ | ||
435 | /* | ||
436 | * SCSI name string identifer containing, $FABRIC_MOD | ||
437 | * dependent information. For LIO-Target and iSCSI | ||
438 | * Target Port, this means "<iSCSI name>,t,0x<TPGT> in | ||
439 | * UTF-8 encoding. | ||
440 | */ | ||
441 | tpgt = TPG_TFO(tpg)->tpg_get_tag(tpg); | ||
442 | scsi_name_len = sprintf(&buf[off], "%s,t,0x%04x", | ||
443 | TPG_TFO(tpg)->tpg_get_wwn(tpg), tpgt); | ||
444 | scsi_name_len += 1 /* Include NULL terminator */; | ||
445 | /* | ||
446 | * The null-terminated, null-padded (see 4.4.2) SCSI | ||
447 | * NAME STRING field contains a UTF-8 format string. | ||
448 | * The number of bytes in the SCSI NAME STRING field | ||
449 | * (i.e., the value in the DESIGNATOR LENGTH field) | ||
450 | * shall be no larger than 256 and shall be a multiple | ||
451 | * of four. | ||
452 | */ | ||
453 | if (padding) | ||
454 | scsi_name_len += padding; | ||
455 | |||
456 | buf[off-1] = scsi_name_len; | ||
457 | off += scsi_name_len; | ||
458 | /* Header size + Designation descriptor */ | ||
459 | len += (scsi_name_len + 4); | ||
460 | } | ||
461 | set_len: | ||
462 | buf[2] = ((len >> 8) & 0xff); | ||
463 | buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */ | ||
464 | return 0; | ||
465 | } | ||
466 | |||
467 | /* Extended INQUIRY Data VPD Page */ | ||
468 | static int | ||
469 | target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) | ||
470 | { | ||
471 | if (cmd->data_length < 60) | ||
472 | return 0; | ||
473 | |||
474 | buf[1] = 0x86; | ||
475 | buf[2] = 0x3c; | ||
476 | /* Set HEADSUP, ORDSUP, SIMPSUP */ | ||
477 | buf[5] = 0x07; | ||
478 | |||
479 | /* If WriteCache emulation is enabled, set V_SUP */ | ||
480 | if (DEV_ATTRIB(SE_DEV(cmd))->emulate_write_cache > 0) | ||
481 | buf[6] = 0x01; | ||
482 | return 0; | ||
483 | } | ||
484 | |||
485 | /* Block Limits VPD page */ | ||
486 | static int | ||
487 | target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) | ||
488 | { | ||
489 | struct se_device *dev = SE_DEV(cmd); | ||
490 | int have_tp = 0; | ||
491 | |||
492 | /* | ||
493 | * Following sbc3r22 section 6.5.3 Block Limits VPD page, when | ||
494 | * emulate_tpu=1 or emulate_tpws=1 we will be expect a | ||
495 | * different page length for Thin Provisioning. | ||
496 | */ | ||
497 | if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws) | ||
498 | have_tp = 1; | ||
499 | |||
500 | if (cmd->data_length < (0x10 + 4)) { | ||
501 | printk(KERN_INFO "Received data_length: %u" | ||
502 | " too small for EVPD 0xb0\n", | ||
503 | cmd->data_length); | ||
504 | return -1; | ||
505 | } | ||
506 | |||
507 | if (have_tp && cmd->data_length < (0x3c + 4)) { | ||
508 | printk(KERN_INFO "Received data_length: %u" | ||
509 | " too small for TPE=1 EVPD 0xb0\n", | ||
510 | cmd->data_length); | ||
511 | have_tp = 0; | ||
512 | } | ||
513 | |||
514 | buf[0] = dev->transport->get_device_type(dev); | ||
515 | buf[1] = 0xb0; | ||
516 | buf[3] = have_tp ? 0x3c : 0x10; | ||
517 | |||
518 | /* | ||
519 | * Set OPTIMAL TRANSFER LENGTH GRANULARITY | ||
520 | */ | ||
521 | put_unaligned_be16(1, &buf[6]); | ||
522 | |||
523 | /* | ||
524 | * Set MAXIMUM TRANSFER LENGTH | ||
525 | */ | ||
526 | put_unaligned_be32(DEV_ATTRIB(dev)->max_sectors, &buf[8]); | ||
527 | |||
528 | /* | ||
529 | * Set OPTIMAL TRANSFER LENGTH | ||
530 | */ | ||
531 | put_unaligned_be32(DEV_ATTRIB(dev)->optimal_sectors, &buf[12]); | ||
532 | |||
533 | /* | ||
534 | * Exit now if we don't support TP or the initiator sent a too | ||
535 | * short buffer. | ||
536 | */ | ||
537 | if (!have_tp || cmd->data_length < (0x3c + 4)) | ||
538 | return 0; | ||
539 | |||
540 | /* | ||
541 | * Set MAXIMUM UNMAP LBA COUNT | ||
542 | */ | ||
543 | put_unaligned_be32(DEV_ATTRIB(dev)->max_unmap_lba_count, &buf[20]); | ||
544 | |||
545 | /* | ||
546 | * Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT | ||
547 | */ | ||
548 | put_unaligned_be32(DEV_ATTRIB(dev)->max_unmap_block_desc_count, | ||
549 | &buf[24]); | ||
550 | |||
551 | /* | ||
552 | * Set OPTIMAL UNMAP GRANULARITY | ||
553 | */ | ||
554 | put_unaligned_be32(DEV_ATTRIB(dev)->unmap_granularity, &buf[28]); | ||
555 | |||
556 | /* | ||
557 | * UNMAP GRANULARITY ALIGNMENT | ||
558 | */ | ||
559 | put_unaligned_be32(DEV_ATTRIB(dev)->unmap_granularity_alignment, | ||
560 | &buf[32]); | ||
561 | if (DEV_ATTRIB(dev)->unmap_granularity_alignment != 0) | ||
562 | buf[32] |= 0x80; /* Set the UGAVALID bit */ | ||
563 | |||
564 | return 0; | ||
565 | } | ||
566 | |||
567 | /* Thin Provisioning VPD */ | ||
568 | static int | ||
569 | target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf) | ||
570 | { | ||
571 | struct se_device *dev = SE_DEV(cmd); | ||
572 | |||
573 | /* | ||
574 | * From sbc3r22 section 6.5.4 Thin Provisioning VPD page: | ||
575 | * | ||
576 | * The PAGE LENGTH field is defined in SPC-4. If the DP bit is set to | ||
577 | * zero, then the page length shall be set to 0004h. If the DP bit | ||
578 | * is set to one, then the page length shall be set to the value | ||
579 | * defined in table 162. | ||
580 | */ | ||
581 | buf[0] = dev->transport->get_device_type(dev); | ||
582 | buf[1] = 0xb2; | ||
583 | |||
584 | /* | ||
585 | * Set Hardcoded length mentioned above for DP=0 | ||
586 | */ | ||
587 | put_unaligned_be16(0x0004, &buf[2]); | ||
588 | |||
589 | /* | ||
590 | * The THRESHOLD EXPONENT field indicates the threshold set size in | ||
591 | * LBAs as a power of 2 (i.e., the threshold set size is equal to | ||
592 | * 2(threshold exponent)). | ||
593 | * | ||
594 | * Note that this is currently set to 0x00 as mkp says it will be | ||
595 | * changing again. We can enable this once it has settled in T10 | ||
596 | * and is actually used by Linux/SCSI ML code. | ||
597 | */ | ||
598 | buf[4] = 0x00; | ||
599 | |||
600 | /* | ||
601 | * A TPU bit set to one indicates that the device server supports | ||
602 | * the UNMAP command (see 5.25). A TPU bit set to zero indicates | ||
603 | * that the device server does not support the UNMAP command. | ||
604 | */ | ||
605 | if (DEV_ATTRIB(dev)->emulate_tpu != 0) | ||
606 | buf[5] = 0x80; | ||
607 | |||
608 | /* | ||
609 | * A TPWS bit set to one indicates that the device server supports | ||
610 | * the use of the WRITE SAME (16) command (see 5.42) to unmap LBAs. | ||
611 | * A TPWS bit set to zero indicates that the device server does not | ||
612 | * support the use of the WRITE SAME (16) command to unmap LBAs. | ||
613 | */ | ||
614 | if (DEV_ATTRIB(dev)->emulate_tpws != 0) | ||
615 | buf[5] |= 0x40; | ||
616 | |||
617 | return 0; | ||
618 | } | ||
619 | |||
620 | static int | ||
621 | target_emulate_inquiry(struct se_cmd *cmd) | ||
622 | { | ||
623 | struct se_device *dev = SE_DEV(cmd); | ||
624 | unsigned char *buf = cmd->t_task->t_task_buf; | ||
625 | unsigned char *cdb = cmd->t_task->t_task_cdb; | ||
626 | |||
627 | if (!(cdb[1] & 0x1)) | ||
628 | return target_emulate_inquiry_std(cmd); | ||
629 | |||
630 | /* | ||
631 | * Make sure we at least have 4 bytes of INQUIRY response | ||
632 | * payload for 0x00 going back for EVPD=1. Note that 0x80 | ||
633 | * and 0x83 will check for enough payload data length and | ||
634 | * jump to set_len: label when there is not enough inquiry EVPD | ||
635 | * payload length left for the next outgoing EVPD metadata | ||
636 | */ | ||
637 | if (cmd->data_length < 4) { | ||
638 | printk(KERN_ERR "SCSI Inquiry payload length: %u" | ||
639 | " too small for EVPD=1\n", cmd->data_length); | ||
640 | return -1; | ||
641 | } | ||
642 | buf[0] = dev->transport->get_device_type(dev); | ||
643 | |||
644 | switch (cdb[2]) { | ||
645 | case 0x00: | ||
646 | return target_emulate_evpd_00(cmd, buf); | ||
647 | case 0x80: | ||
648 | return target_emulate_evpd_80(cmd, buf); | ||
649 | case 0x83: | ||
650 | return target_emulate_evpd_83(cmd, buf); | ||
651 | case 0x86: | ||
652 | return target_emulate_evpd_86(cmd, buf); | ||
653 | case 0xb0: | ||
654 | return target_emulate_evpd_b0(cmd, buf); | ||
655 | case 0xb2: | ||
656 | return target_emulate_evpd_b2(cmd, buf); | ||
657 | default: | ||
658 | printk(KERN_ERR "Unknown VPD Code: 0x%02x\n", cdb[2]); | ||
659 | return -1; | ||
660 | } | ||
661 | |||
662 | return 0; | ||
663 | } | ||
664 | |||
665 | static int | ||
666 | target_emulate_readcapacity(struct se_cmd *cmd) | ||
667 | { | ||
668 | struct se_device *dev = SE_DEV(cmd); | ||
669 | unsigned char *buf = cmd->t_task->t_task_buf; | ||
670 | u32 blocks = dev->transport->get_blocks(dev); | ||
671 | |||
672 | buf[0] = (blocks >> 24) & 0xff; | ||
673 | buf[1] = (blocks >> 16) & 0xff; | ||
674 | buf[2] = (blocks >> 8) & 0xff; | ||
675 | buf[3] = blocks & 0xff; | ||
676 | buf[4] = (DEV_ATTRIB(dev)->block_size >> 24) & 0xff; | ||
677 | buf[5] = (DEV_ATTRIB(dev)->block_size >> 16) & 0xff; | ||
678 | buf[6] = (DEV_ATTRIB(dev)->block_size >> 8) & 0xff; | ||
679 | buf[7] = DEV_ATTRIB(dev)->block_size & 0xff; | ||
680 | /* | ||
681 | * Set max 32-bit blocks to signal SERVICE ACTION READ_CAPACITY_16 | ||
682 | */ | ||
683 | if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws) | ||
684 | put_unaligned_be32(0xFFFFFFFF, &buf[0]); | ||
685 | |||
686 | return 0; | ||
687 | } | ||
688 | |||
689 | static int | ||
690 | target_emulate_readcapacity_16(struct se_cmd *cmd) | ||
691 | { | ||
692 | struct se_device *dev = SE_DEV(cmd); | ||
693 | unsigned char *buf = cmd->t_task->t_task_buf; | ||
694 | unsigned long long blocks = dev->transport->get_blocks(dev); | ||
695 | |||
696 | buf[0] = (blocks >> 56) & 0xff; | ||
697 | buf[1] = (blocks >> 48) & 0xff; | ||
698 | buf[2] = (blocks >> 40) & 0xff; | ||
699 | buf[3] = (blocks >> 32) & 0xff; | ||
700 | buf[4] = (blocks >> 24) & 0xff; | ||
701 | buf[5] = (blocks >> 16) & 0xff; | ||
702 | buf[6] = (blocks >> 8) & 0xff; | ||
703 | buf[7] = blocks & 0xff; | ||
704 | buf[8] = (DEV_ATTRIB(dev)->block_size >> 24) & 0xff; | ||
705 | buf[9] = (DEV_ATTRIB(dev)->block_size >> 16) & 0xff; | ||
706 | buf[10] = (DEV_ATTRIB(dev)->block_size >> 8) & 0xff; | ||
707 | buf[11] = DEV_ATTRIB(dev)->block_size & 0xff; | ||
708 | /* | ||
709 | * Set Thin Provisioning Enable bit following sbc3r22 in section | ||
710 | * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled. | ||
711 | */ | ||
712 | if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws) | ||
713 | buf[14] = 0x80; | ||
714 | |||
715 | return 0; | ||
716 | } | ||
717 | |||
718 | static int | ||
719 | target_modesense_rwrecovery(unsigned char *p) | ||
720 | { | ||
721 | p[0] = 0x01; | ||
722 | p[1] = 0x0a; | ||
723 | |||
724 | return 12; | ||
725 | } | ||
726 | |||
727 | static int | ||
728 | target_modesense_control(struct se_device *dev, unsigned char *p) | ||
729 | { | ||
730 | p[0] = 0x0a; | ||
731 | p[1] = 0x0a; | ||
732 | p[2] = 2; | ||
733 | /* | ||
734 | * From spc4r17, section 7.4.6 Control mode Page | ||
735 | * | ||
736 | * Unit Attention interlocks control (UN_INTLCK_CTRL) to code 00b | ||
737 | * | ||
738 | * 00b: The logical unit shall clear any unit attention condition | ||
739 | * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION | ||
740 | * status and shall not establish a unit attention condition when a com- | ||
741 | * mand is completed with BUSY, TASK SET FULL, or RESERVATION CONFLICT | ||
742 | * status. | ||
743 | * | ||
744 | * 10b: The logical unit shall not clear any unit attention condition | ||
745 | * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION | ||
746 | * status and shall not establish a unit attention condition when | ||
747 | * a command is completed with BUSY, TASK SET FULL, or RESERVATION | ||
748 | * CONFLICT status. | ||
749 | * | ||
750 | * 11b a The logical unit shall not clear any unit attention condition | ||
751 | * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION | ||
752 | * status and shall establish a unit attention condition for the | ||
753 | * initiator port associated with the I_T nexus on which the BUSY, | ||
754 | * TASK SET FULL, or RESERVATION CONFLICT status is being returned. | ||
755 | * Depending on the status, the additional sense code shall be set to | ||
756 | * PREVIOUS BUSY STATUS, PREVIOUS TASK SET FULL STATUS, or PREVIOUS | ||
757 | * RESERVATION CONFLICT STATUS. Until it is cleared by a REQUEST SENSE | ||
758 | * command, a unit attention condition shall be established only once | ||
759 | * for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless | ||
760 | * to the number of commands completed with one of those status codes. | ||
761 | */ | ||
762 | p[4] = (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl == 2) ? 0x30 : | ||
763 | (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00; | ||
764 | /* | ||
765 | * From spc4r17, section 7.4.6 Control mode Page | ||
766 | * | ||
767 | * Task Aborted Status (TAS) bit set to zero. | ||
768 | * | ||
769 | * A task aborted status (TAS) bit set to zero specifies that aborted | ||
770 | * tasks shall be terminated by the device server without any response | ||
771 | * to the application client. A TAS bit set to one specifies that tasks | ||
772 | * aborted by the actions of an I_T nexus other than the I_T nexus on | ||
773 | * which the command was received shall be completed with TASK ABORTED | ||
774 | * status (see SAM-4). | ||
775 | */ | ||
776 | p[5] = (DEV_ATTRIB(dev)->emulate_tas) ? 0x40 : 0x00; | ||
777 | p[8] = 0xff; | ||
778 | p[9] = 0xff; | ||
779 | p[11] = 30; | ||
780 | |||
781 | return 12; | ||
782 | } | ||
783 | |||
784 | static int | ||
785 | target_modesense_caching(struct se_device *dev, unsigned char *p) | ||
786 | { | ||
787 | p[0] = 0x08; | ||
788 | p[1] = 0x12; | ||
789 | if (DEV_ATTRIB(dev)->emulate_write_cache > 0) | ||
790 | p[2] = 0x04; /* Write Cache Enable */ | ||
791 | p[12] = 0x20; /* Disabled Read Ahead */ | ||
792 | |||
793 | return 20; | ||
794 | } | ||
795 | |||
796 | static void | ||
797 | target_modesense_write_protect(unsigned char *buf, int type) | ||
798 | { | ||
799 | /* | ||
800 | * I believe that the WP bit (bit 7) in the mode header is the same for | ||
801 | * all device types.. | ||
802 | */ | ||
803 | switch (type) { | ||
804 | case TYPE_DISK: | ||
805 | case TYPE_TAPE: | ||
806 | default: | ||
807 | buf[0] |= 0x80; /* WP bit */ | ||
808 | break; | ||
809 | } | ||
810 | } | ||
811 | |||
812 | static void | ||
813 | target_modesense_dpofua(unsigned char *buf, int type) | ||
814 | { | ||
815 | switch (type) { | ||
816 | case TYPE_DISK: | ||
817 | buf[0] |= 0x10; /* DPOFUA bit */ | ||
818 | break; | ||
819 | default: | ||
820 | break; | ||
821 | } | ||
822 | } | ||
823 | |||
824 | static int | ||
825 | target_emulate_modesense(struct se_cmd *cmd, int ten) | ||
826 | { | ||
827 | struct se_device *dev = SE_DEV(cmd); | ||
828 | char *cdb = cmd->t_task->t_task_cdb; | ||
829 | unsigned char *rbuf = cmd->t_task->t_task_buf; | ||
830 | int type = dev->transport->get_device_type(dev); | ||
831 | int offset = (ten) ? 8 : 4; | ||
832 | int length = 0; | ||
833 | unsigned char buf[SE_MODE_PAGE_BUF]; | ||
834 | |||
835 | memset(buf, 0, SE_MODE_PAGE_BUF); | ||
836 | |||
837 | switch (cdb[2] & 0x3f) { | ||
838 | case 0x01: | ||
839 | length = target_modesense_rwrecovery(&buf[offset]); | ||
840 | break; | ||
841 | case 0x08: | ||
842 | length = target_modesense_caching(dev, &buf[offset]); | ||
843 | break; | ||
844 | case 0x0a: | ||
845 | length = target_modesense_control(dev, &buf[offset]); | ||
846 | break; | ||
847 | case 0x3f: | ||
848 | length = target_modesense_rwrecovery(&buf[offset]); | ||
849 | length += target_modesense_caching(dev, &buf[offset+length]); | ||
850 | length += target_modesense_control(dev, &buf[offset+length]); | ||
851 | break; | ||
852 | default: | ||
853 | printk(KERN_ERR "Got Unknown Mode Page: 0x%02x\n", | ||
854 | cdb[2] & 0x3f); | ||
855 | return PYX_TRANSPORT_UNKNOWN_MODE_PAGE; | ||
856 | } | ||
857 | offset += length; | ||
858 | |||
859 | if (ten) { | ||
860 | offset -= 2; | ||
861 | buf[0] = (offset >> 8) & 0xff; | ||
862 | buf[1] = offset & 0xff; | ||
863 | |||
864 | if ((SE_LUN(cmd)->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || | ||
865 | (cmd->se_deve && | ||
866 | (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) | ||
867 | target_modesense_write_protect(&buf[3], type); | ||
868 | |||
869 | if ((DEV_ATTRIB(dev)->emulate_write_cache > 0) && | ||
870 | (DEV_ATTRIB(dev)->emulate_fua_write > 0)) | ||
871 | target_modesense_dpofua(&buf[3], type); | ||
872 | |||
873 | if ((offset + 2) > cmd->data_length) | ||
874 | offset = cmd->data_length; | ||
875 | |||
876 | } else { | ||
877 | offset -= 1; | ||
878 | buf[0] = offset & 0xff; | ||
879 | |||
880 | if ((SE_LUN(cmd)->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || | ||
881 | (cmd->se_deve && | ||
882 | (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) | ||
883 | target_modesense_write_protect(&buf[2], type); | ||
884 | |||
885 | if ((DEV_ATTRIB(dev)->emulate_write_cache > 0) && | ||
886 | (DEV_ATTRIB(dev)->emulate_fua_write > 0)) | ||
887 | target_modesense_dpofua(&buf[2], type); | ||
888 | |||
889 | if ((offset + 1) > cmd->data_length) | ||
890 | offset = cmd->data_length; | ||
891 | } | ||
892 | memcpy(rbuf, buf, offset); | ||
893 | |||
894 | return 0; | ||
895 | } | ||
896 | |||
897 | static int | ||
898 | target_emulate_request_sense(struct se_cmd *cmd) | ||
899 | { | ||
900 | unsigned char *cdb = cmd->t_task->t_task_cdb; | ||
901 | unsigned char *buf = cmd->t_task->t_task_buf; | ||
902 | u8 ua_asc = 0, ua_ascq = 0; | ||
903 | |||
904 | if (cdb[1] & 0x01) { | ||
905 | printk(KERN_ERR "REQUEST_SENSE description emulation not" | ||
906 | " supported\n"); | ||
907 | return PYX_TRANSPORT_INVALID_CDB_FIELD; | ||
908 | } | ||
909 | if (!(core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq))) { | ||
910 | /* | ||
911 | * CURRENT ERROR, UNIT ATTENTION | ||
912 | */ | ||
913 | buf[0] = 0x70; | ||
914 | buf[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION; | ||
915 | /* | ||
916 | * Make sure request data length is enough for additional | ||
917 | * sense data. | ||
918 | */ | ||
919 | if (cmd->data_length <= 18) { | ||
920 | buf[7] = 0x00; | ||
921 | return 0; | ||
922 | } | ||
923 | /* | ||
924 | * The Additional Sense Code (ASC) from the UNIT ATTENTION | ||
925 | */ | ||
926 | buf[SPC_ASC_KEY_OFFSET] = ua_asc; | ||
927 | buf[SPC_ASCQ_KEY_OFFSET] = ua_ascq; | ||
928 | buf[7] = 0x0A; | ||
929 | } else { | ||
930 | /* | ||
931 | * CURRENT ERROR, NO SENSE | ||
932 | */ | ||
933 | buf[0] = 0x70; | ||
934 | buf[SPC_SENSE_KEY_OFFSET] = NO_SENSE; | ||
935 | /* | ||
936 | * Make sure request data length is enough for additional | ||
937 | * sense data. | ||
938 | */ | ||
939 | if (cmd->data_length <= 18) { | ||
940 | buf[7] = 0x00; | ||
941 | return 0; | ||
942 | } | ||
943 | /* | ||
944 | * NO ADDITIONAL SENSE INFORMATION | ||
945 | */ | ||
946 | buf[SPC_ASC_KEY_OFFSET] = 0x00; | ||
947 | buf[7] = 0x0A; | ||
948 | } | ||
949 | |||
950 | return 0; | ||
951 | } | ||
952 | |||
953 | /* | ||
954 | * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support. | ||
955 | * Note this is not used for TCM/pSCSI passthrough | ||
956 | */ | ||
957 | static int | ||
958 | target_emulate_unmap(struct se_task *task) | ||
959 | { | ||
960 | struct se_cmd *cmd = TASK_CMD(task); | ||
961 | struct se_device *dev = SE_DEV(cmd); | ||
962 | unsigned char *buf = cmd->t_task->t_task_buf, *ptr = NULL; | ||
963 | unsigned char *cdb = &cmd->t_task->t_task_cdb[0]; | ||
964 | sector_t lba; | ||
965 | unsigned int size = cmd->data_length, range; | ||
966 | int ret, offset; | ||
967 | unsigned short dl, bd_dl; | ||
968 | |||
969 | /* First UNMAP block descriptor starts at 8 byte offset */ | ||
970 | offset = 8; | ||
971 | size -= 8; | ||
972 | dl = get_unaligned_be16(&cdb[0]); | ||
973 | bd_dl = get_unaligned_be16(&cdb[2]); | ||
974 | ptr = &buf[offset]; | ||
975 | printk(KERN_INFO "UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu" | ||
976 | " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr); | ||
977 | |||
978 | while (size) { | ||
979 | lba = get_unaligned_be64(&ptr[0]); | ||
980 | range = get_unaligned_be32(&ptr[8]); | ||
981 | printk(KERN_INFO "UNMAP: Using lba: %llu and range: %u\n", | ||
982 | (unsigned long long)lba, range); | ||
983 | |||
984 | ret = dev->transport->do_discard(dev, lba, range); | ||
985 | if (ret < 0) { | ||
986 | printk(KERN_ERR "blkdev_issue_discard() failed: %d\n", | ||
987 | ret); | ||
988 | return -1; | ||
989 | } | ||
990 | |||
991 | ptr += 16; | ||
992 | size -= 16; | ||
993 | } | ||
994 | |||
995 | task->task_scsi_status = GOOD; | ||
996 | transport_complete_task(task, 1); | ||
997 | return 0; | ||
998 | } | ||
999 | |||
1000 | /* | ||
1001 | * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support. | ||
1002 | * Note this is not used for TCM/pSCSI passthrough | ||
1003 | */ | ||
1004 | static int | ||
1005 | target_emulate_write_same(struct se_task *task) | ||
1006 | { | ||
1007 | struct se_cmd *cmd = TASK_CMD(task); | ||
1008 | struct se_device *dev = SE_DEV(cmd); | ||
1009 | sector_t lba = cmd->t_task->t_task_lba; | ||
1010 | unsigned int range; | ||
1011 | int ret; | ||
1012 | |||
1013 | range = (cmd->data_length / DEV_ATTRIB(dev)->block_size); | ||
1014 | |||
1015 | printk(KERN_INFO "WRITE_SAME UNMAP: LBA: %llu Range: %u\n", | ||
1016 | (unsigned long long)lba, range); | ||
1017 | |||
1018 | ret = dev->transport->do_discard(dev, lba, range); | ||
1019 | if (ret < 0) { | ||
1020 | printk(KERN_INFO "blkdev_issue_discard() failed for WRITE_SAME\n"); | ||
1021 | return -1; | ||
1022 | } | ||
1023 | |||
1024 | task->task_scsi_status = GOOD; | ||
1025 | transport_complete_task(task, 1); | ||
1026 | return 0; | ||
1027 | } | ||
1028 | |||
1029 | int | ||
1030 | transport_emulate_control_cdb(struct se_task *task) | ||
1031 | { | ||
1032 | struct se_cmd *cmd = TASK_CMD(task); | ||
1033 | struct se_device *dev = SE_DEV(cmd); | ||
1034 | unsigned short service_action; | ||
1035 | int ret = 0; | ||
1036 | |||
1037 | switch (cmd->t_task->t_task_cdb[0]) { | ||
1038 | case INQUIRY: | ||
1039 | ret = target_emulate_inquiry(cmd); | ||
1040 | break; | ||
1041 | case READ_CAPACITY: | ||
1042 | ret = target_emulate_readcapacity(cmd); | ||
1043 | break; | ||
1044 | case MODE_SENSE: | ||
1045 | ret = target_emulate_modesense(cmd, 0); | ||
1046 | break; | ||
1047 | case MODE_SENSE_10: | ||
1048 | ret = target_emulate_modesense(cmd, 1); | ||
1049 | break; | ||
1050 | case SERVICE_ACTION_IN: | ||
1051 | switch (cmd->t_task->t_task_cdb[1] & 0x1f) { | ||
1052 | case SAI_READ_CAPACITY_16: | ||
1053 | ret = target_emulate_readcapacity_16(cmd); | ||
1054 | break; | ||
1055 | default: | ||
1056 | printk(KERN_ERR "Unsupported SA: 0x%02x\n", | ||
1057 | cmd->t_task->t_task_cdb[1] & 0x1f); | ||
1058 | return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | ||
1059 | } | ||
1060 | break; | ||
1061 | case REQUEST_SENSE: | ||
1062 | ret = target_emulate_request_sense(cmd); | ||
1063 | break; | ||
1064 | case UNMAP: | ||
1065 | if (!dev->transport->do_discard) { | ||
1066 | printk(KERN_ERR "UNMAP emulation not supported for: %s\n", | ||
1067 | dev->transport->name); | ||
1068 | return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | ||
1069 | } | ||
1070 | ret = target_emulate_unmap(task); | ||
1071 | break; | ||
1072 | case WRITE_SAME_16: | ||
1073 | if (!dev->transport->do_discard) { | ||
1074 | printk(KERN_ERR "WRITE_SAME_16 emulation not supported" | ||
1075 | " for: %s\n", dev->transport->name); | ||
1076 | return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | ||
1077 | } | ||
1078 | ret = target_emulate_write_same(task); | ||
1079 | break; | ||
1080 | case VARIABLE_LENGTH_CMD: | ||
1081 | service_action = | ||
1082 | get_unaligned_be16(&cmd->t_task->t_task_cdb[8]); | ||
1083 | switch (service_action) { | ||
1084 | case WRITE_SAME_32: | ||
1085 | if (!dev->transport->do_discard) { | ||
1086 | printk(KERN_ERR "WRITE_SAME_32 SA emulation not" | ||
1087 | " supported for: %s\n", | ||
1088 | dev->transport->name); | ||
1089 | return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | ||
1090 | } | ||
1091 | ret = target_emulate_write_same(task); | ||
1092 | break; | ||
1093 | default: | ||
1094 | printk(KERN_ERR "Unsupported VARIABLE_LENGTH_CMD SA:" | ||
1095 | " 0x%02x\n", service_action); | ||
1096 | break; | ||
1097 | } | ||
1098 | break; | ||
1099 | case SYNCHRONIZE_CACHE: | ||
1100 | case 0x91: /* SYNCHRONIZE_CACHE_16: */ | ||
1101 | if (!dev->transport->do_sync_cache) { | ||
1102 | printk(KERN_ERR | ||
1103 | "SYNCHRONIZE_CACHE emulation not supported" | ||
1104 | " for: %s\n", dev->transport->name); | ||
1105 | return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | ||
1106 | } | ||
1107 | dev->transport->do_sync_cache(task); | ||
1108 | break; | ||
1109 | case ALLOW_MEDIUM_REMOVAL: | ||
1110 | case ERASE: | ||
1111 | case REZERO_UNIT: | ||
1112 | case SEEK_10: | ||
1113 | case SPACE: | ||
1114 | case START_STOP: | ||
1115 | case TEST_UNIT_READY: | ||
1116 | case VERIFY: | ||
1117 | case WRITE_FILEMARKS: | ||
1118 | break; | ||
1119 | default: | ||
1120 | printk(KERN_ERR "Unsupported SCSI Opcode: 0x%02x for %s\n", | ||
1121 | cmd->t_task->t_task_cdb[0], dev->transport->name); | ||
1122 | return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | ||
1123 | } | ||
1124 | |||
1125 | if (ret < 0) | ||
1126 | return ret; | ||
1127 | task->task_scsi_status = GOOD; | ||
1128 | transport_complete_task(task, 1); | ||
1129 | |||
1130 | return PYX_TRANSPORT_SENT_TO_TRANSPORT; | ||
1131 | } | ||
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c new file mode 100644 index 000000000000..2764510798b0 --- /dev/null +++ b/drivers/target/target_core_configfs.c | |||
@@ -0,0 +1,3225 @@ | |||
1 | /******************************************************************************* | ||
2 | * Filename: target_core_configfs.c | ||
3 | * | ||
4 | * This file contains ConfigFS logic for the Generic Target Engine project. | ||
5 | * | ||
6 | * Copyright (c) 2008-2010 Rising Tide Systems | ||
7 | * Copyright (c) 2008-2010 Linux-iSCSI.org | ||
8 | * | ||
9 | * Nicholas A. Bellinger <nab@kernel.org> | ||
10 | * | ||
11 | * based on configfs Copyright (C) 2005 Oracle. All rights reserved. | ||
12 | * | ||
13 | * This program is free software; you can redistribute it and/or modify | ||
14 | * it under the terms of the GNU General Public License as published by | ||
15 | * the Free Software Foundation; either version 2 of the License, or | ||
16 | * (at your option) any later version. | ||
17 | * | ||
18 | * This program is distributed in the hope that it will be useful, | ||
19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
21 | * GNU General Public License for more details. | ||
22 | ****************************************************************************/ | ||
23 | |||
24 | #include <linux/module.h> | ||
25 | #include <linux/moduleparam.h> | ||
26 | #include <linux/version.h> | ||
27 | #include <generated/utsrelease.h> | ||
28 | #include <linux/utsname.h> | ||
29 | #include <linux/init.h> | ||
30 | #include <linux/fs.h> | ||
31 | #include <linux/namei.h> | ||
32 | #include <linux/slab.h> | ||
33 | #include <linux/types.h> | ||
34 | #include <linux/delay.h> | ||
35 | #include <linux/unistd.h> | ||
36 | #include <linux/string.h> | ||
37 | #include <linux/parser.h> | ||
38 | #include <linux/syscalls.h> | ||
39 | #include <linux/configfs.h> | ||
40 | #include <linux/proc_fs.h> | ||
41 | |||
42 | #include <target/target_core_base.h> | ||
43 | #include <target/target_core_device.h> | ||
44 | #include <target/target_core_transport.h> | ||
45 | #include <target/target_core_fabric_ops.h> | ||
46 | #include <target/target_core_fabric_configfs.h> | ||
47 | #include <target/target_core_configfs.h> | ||
48 | #include <target/configfs_macros.h> | ||
49 | |||
50 | #include "target_core_alua.h" | ||
51 | #include "target_core_hba.h" | ||
52 | #include "target_core_pr.h" | ||
53 | #include "target_core_rd.h" | ||
54 | |||
55 | static struct list_head g_tf_list; | ||
56 | static struct mutex g_tf_lock; | ||
57 | |||
58 | struct target_core_configfs_attribute { | ||
59 | struct configfs_attribute attr; | ||
60 | ssize_t (*show)(void *, char *); | ||
61 | ssize_t (*store)(void *, const char *, size_t); | ||
62 | }; | ||
63 | |||
64 | static inline struct se_hba * | ||
65 | item_to_hba(struct config_item *item) | ||
66 | { | ||
67 | return container_of(to_config_group(item), struct se_hba, hba_group); | ||
68 | } | ||
69 | |||
70 | /* | ||
71 | * Attributes for /sys/kernel/config/target/ | ||
72 | */ | ||
73 | static ssize_t target_core_attr_show(struct config_item *item, | ||
74 | struct configfs_attribute *attr, | ||
75 | char *page) | ||
76 | { | ||
77 | return sprintf(page, "Target Engine Core ConfigFS Infrastructure %s" | ||
78 | " on %s/%s on "UTS_RELEASE"\n", TARGET_CORE_CONFIGFS_VERSION, | ||
79 | utsname()->sysname, utsname()->machine); | ||
80 | } | ||
81 | |||
82 | static struct configfs_item_operations target_core_fabric_item_ops = { | ||
83 | .show_attribute = target_core_attr_show, | ||
84 | }; | ||
85 | |||
86 | static struct configfs_attribute target_core_item_attr_version = { | ||
87 | .ca_owner = THIS_MODULE, | ||
88 | .ca_name = "version", | ||
89 | .ca_mode = S_IRUGO, | ||
90 | }; | ||
91 | |||
92 | static struct target_fabric_configfs *target_core_get_fabric( | ||
93 | const char *name) | ||
94 | { | ||
95 | struct target_fabric_configfs *tf; | ||
96 | |||
97 | if (!(name)) | ||
98 | return NULL; | ||
99 | |||
100 | mutex_lock(&g_tf_lock); | ||
101 | list_for_each_entry(tf, &g_tf_list, tf_list) { | ||
102 | if (!(strcmp(tf->tf_name, name))) { | ||
103 | atomic_inc(&tf->tf_access_cnt); | ||
104 | mutex_unlock(&g_tf_lock); | ||
105 | return tf; | ||
106 | } | ||
107 | } | ||
108 | mutex_unlock(&g_tf_lock); | ||
109 | |||
110 | return NULL; | ||
111 | } | ||
112 | |||
113 | /* | ||
114 | * Called from struct target_core_group_ops->make_group() | ||
115 | */ | ||
116 | static struct config_group *target_core_register_fabric( | ||
117 | struct config_group *group, | ||
118 | const char *name) | ||
119 | { | ||
120 | struct target_fabric_configfs *tf; | ||
121 | int ret; | ||
122 | |||
123 | printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> group: %p name:" | ||
124 | " %s\n", group, name); | ||
125 | /* | ||
126 | * Ensure that TCM subsystem plugins are loaded at this point for | ||
127 | * using the RAMDISK_DR virtual LUN 0 and all other struct se_port | ||
128 | * LUN symlinks. | ||
129 | */ | ||
130 | if (transport_subsystem_check_init() < 0) | ||
131 | return ERR_PTR(-EINVAL); | ||
132 | |||
133 | /* | ||
134 | * Below are some hardcoded request_module() calls to automatically | ||
135 | * local fabric modules when the following is called: | ||
136 | * | ||
137 | * mkdir -p /sys/kernel/config/target/$MODULE_NAME | ||
138 | * | ||
139 | * Note that this does not limit which TCM fabric module can be | ||
140 | * registered, but simply provids auto loading logic for modules with | ||
141 | * mkdir(2) system calls with known TCM fabric modules. | ||
142 | */ | ||
143 | if (!(strncmp(name, "iscsi", 5))) { | ||
144 | /* | ||
145 | * Automatically load the LIO Target fabric module when the | ||
146 | * following is called: | ||
147 | * | ||
148 | * mkdir -p $CONFIGFS/target/iscsi | ||
149 | */ | ||
150 | ret = request_module("iscsi_target_mod"); | ||
151 | if (ret < 0) { | ||
152 | printk(KERN_ERR "request_module() failed for" | ||
153 | " iscsi_target_mod.ko: %d\n", ret); | ||
154 | return ERR_PTR(-EINVAL); | ||
155 | } | ||
156 | } else if (!(strncmp(name, "loopback", 8))) { | ||
157 | /* | ||
158 | * Automatically load the tcm_loop fabric module when the | ||
159 | * following is called: | ||
160 | * | ||
161 | * mkdir -p $CONFIGFS/target/loopback | ||
162 | */ | ||
163 | ret = request_module("tcm_loop"); | ||
164 | if (ret < 0) { | ||
165 | printk(KERN_ERR "request_module() failed for" | ||
166 | " tcm_loop.ko: %d\n", ret); | ||
167 | return ERR_PTR(-EINVAL); | ||
168 | } | ||
169 | } | ||
170 | |||
171 | tf = target_core_get_fabric(name); | ||
172 | if (!(tf)) { | ||
173 | printk(KERN_ERR "target_core_get_fabric() failed for %s\n", | ||
174 | name); | ||
175 | return ERR_PTR(-EINVAL); | ||
176 | } | ||
177 | printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Located fabric:" | ||
178 | " %s\n", tf->tf_name); | ||
179 | /* | ||
180 | * On a successful target_core_get_fabric() look, the returned | ||
181 | * struct target_fabric_configfs *tf will contain a usage reference. | ||
182 | */ | ||
183 | printk(KERN_INFO "Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n", | ||
184 | &TF_CIT_TMPL(tf)->tfc_wwn_cit); | ||
185 | |||
186 | tf->tf_group.default_groups = tf->tf_default_groups; | ||
187 | tf->tf_group.default_groups[0] = &tf->tf_disc_group; | ||
188 | tf->tf_group.default_groups[1] = NULL; | ||
189 | |||
190 | config_group_init_type_name(&tf->tf_group, name, | ||
191 | &TF_CIT_TMPL(tf)->tfc_wwn_cit); | ||
192 | config_group_init_type_name(&tf->tf_disc_group, "discovery_auth", | ||
193 | &TF_CIT_TMPL(tf)->tfc_discovery_cit); | ||
194 | |||
195 | printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Allocated Fabric:" | ||
196 | " %s\n", tf->tf_group.cg_item.ci_name); | ||
197 | /* | ||
198 | * Setup tf_ops.tf_subsys pointer for usage with configfs_depend_item() | ||
199 | */ | ||
200 | tf->tf_ops.tf_subsys = tf->tf_subsys; | ||
201 | tf->tf_fabric = &tf->tf_group.cg_item; | ||
202 | printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric" | ||
203 | " for %s\n", name); | ||
204 | |||
205 | return &tf->tf_group; | ||
206 | } | ||
207 | |||
208 | /* | ||
209 | * Called from struct target_core_group_ops->drop_item() | ||
210 | */ | ||
211 | static void target_core_deregister_fabric( | ||
212 | struct config_group *group, | ||
213 | struct config_item *item) | ||
214 | { | ||
215 | struct target_fabric_configfs *tf = container_of( | ||
216 | to_config_group(item), struct target_fabric_configfs, tf_group); | ||
217 | struct config_group *tf_group; | ||
218 | struct config_item *df_item; | ||
219 | int i; | ||
220 | |||
221 | printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Looking up %s in" | ||
222 | " tf list\n", config_item_name(item)); | ||
223 | |||
224 | printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> located fabric:" | ||
225 | " %s\n", tf->tf_name); | ||
226 | atomic_dec(&tf->tf_access_cnt); | ||
227 | |||
228 | printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing" | ||
229 | " tf->tf_fabric for %s\n", tf->tf_name); | ||
230 | tf->tf_fabric = NULL; | ||
231 | |||
232 | printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing ci" | ||
233 | " %s\n", config_item_name(item)); | ||
234 | |||
235 | tf_group = &tf->tf_group; | ||
236 | for (i = 0; tf_group->default_groups[i]; i++) { | ||
237 | df_item = &tf_group->default_groups[i]->cg_item; | ||
238 | tf_group->default_groups[i] = NULL; | ||
239 | config_item_put(df_item); | ||
240 | } | ||
241 | config_item_put(item); | ||
242 | } | ||
243 | |||
244 | static struct configfs_group_operations target_core_fabric_group_ops = { | ||
245 | .make_group = &target_core_register_fabric, | ||
246 | .drop_item = &target_core_deregister_fabric, | ||
247 | }; | ||
248 | |||
249 | /* | ||
250 | * All item attributes appearing in /sys/kernel/target/ appear here. | ||
251 | */ | ||
252 | static struct configfs_attribute *target_core_fabric_item_attrs[] = { | ||
253 | &target_core_item_attr_version, | ||
254 | NULL, | ||
255 | }; | ||
256 | |||
257 | /* | ||
258 | * Provides Fabrics Groups and Item Attributes for /sys/kernel/config/target/ | ||
259 | */ | ||
260 | static struct config_item_type target_core_fabrics_item = { | ||
261 | .ct_item_ops = &target_core_fabric_item_ops, | ||
262 | .ct_group_ops = &target_core_fabric_group_ops, | ||
263 | .ct_attrs = target_core_fabric_item_attrs, | ||
264 | .ct_owner = THIS_MODULE, | ||
265 | }; | ||
266 | |||
267 | static struct configfs_subsystem target_core_fabrics = { | ||
268 | .su_group = { | ||
269 | .cg_item = { | ||
270 | .ci_namebuf = "target", | ||
271 | .ci_type = &target_core_fabrics_item, | ||
272 | }, | ||
273 | }, | ||
274 | }; | ||
275 | |||
276 | static struct configfs_subsystem *target_core_subsystem[] = { | ||
277 | &target_core_fabrics, | ||
278 | NULL, | ||
279 | }; | ||
280 | |||
281 | /*############################################################################## | ||
282 | // Start functions called by external Target Fabrics Modules | ||
283 | //############################################################################*/ | ||
284 | |||
285 | /* | ||
286 | * First function called by fabric modules to: | ||
287 | * | ||
288 | * 1) Allocate a struct target_fabric_configfs and save the *fabric_cit pointer. | ||
289 | * 2) Add struct target_fabric_configfs to g_tf_list | ||
290 | * 3) Return struct target_fabric_configfs to fabric module to be passed | ||
291 | * into target_fabric_configfs_register(). | ||
292 | */ | ||
293 | struct target_fabric_configfs *target_fabric_configfs_init( | ||
294 | struct module *fabric_mod, | ||
295 | const char *name) | ||
296 | { | ||
297 | struct target_fabric_configfs *tf; | ||
298 | |||
299 | if (!(fabric_mod)) { | ||
300 | printk(KERN_ERR "Missing struct module *fabric_mod pointer\n"); | ||
301 | return NULL; | ||
302 | } | ||
303 | if (!(name)) { | ||
304 | printk(KERN_ERR "Unable to locate passed fabric name\n"); | ||
305 | return NULL; | ||
306 | } | ||
307 | if (strlen(name) > TARGET_FABRIC_NAME_SIZE) { | ||
308 | printk(KERN_ERR "Passed name: %s exceeds TARGET_FABRIC" | ||
309 | "_NAME_SIZE\n", name); | ||
310 | return NULL; | ||
311 | } | ||
312 | |||
313 | tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL); | ||
314 | if (!(tf)) | ||
315 | return ERR_PTR(-ENOMEM); | ||
316 | |||
317 | INIT_LIST_HEAD(&tf->tf_list); | ||
318 | atomic_set(&tf->tf_access_cnt, 0); | ||
319 | /* | ||
320 | * Setup the default generic struct config_item_type's (cits) in | ||
321 | * struct target_fabric_configfs->tf_cit_tmpl | ||
322 | */ | ||
323 | tf->tf_module = fabric_mod; | ||
324 | target_fabric_setup_cits(tf); | ||
325 | |||
326 | tf->tf_subsys = target_core_subsystem[0]; | ||
327 | snprintf(tf->tf_name, TARGET_FABRIC_NAME_SIZE, "%s", name); | ||
328 | |||
329 | mutex_lock(&g_tf_lock); | ||
330 | list_add_tail(&tf->tf_list, &g_tf_list); | ||
331 | mutex_unlock(&g_tf_lock); | ||
332 | |||
333 | printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>" | ||
334 | ">>>>>>>>>>>>>>\n"); | ||
335 | printk(KERN_INFO "Initialized struct target_fabric_configfs: %p for" | ||
336 | " %s\n", tf, tf->tf_name); | ||
337 | return tf; | ||
338 | } | ||
339 | EXPORT_SYMBOL(target_fabric_configfs_init); | ||
340 | |||
341 | /* | ||
342 | * Called by fabric plugins after FAILED target_fabric_configfs_register() call. | ||
343 | */ | ||
344 | void target_fabric_configfs_free( | ||
345 | struct target_fabric_configfs *tf) | ||
346 | { | ||
347 | mutex_lock(&g_tf_lock); | ||
348 | list_del(&tf->tf_list); | ||
349 | mutex_unlock(&g_tf_lock); | ||
350 | |||
351 | kfree(tf); | ||
352 | } | ||
353 | EXPORT_SYMBOL(target_fabric_configfs_free); | ||
354 | |||
355 | /* | ||
356 | * Perform a sanity check of the passed tf->tf_ops before completing | ||
357 | * TCM fabric module registration. | ||
358 | */ | ||
359 | static int target_fabric_tf_ops_check( | ||
360 | struct target_fabric_configfs *tf) | ||
361 | { | ||
362 | struct target_core_fabric_ops *tfo = &tf->tf_ops; | ||
363 | |||
364 | if (!(tfo->get_fabric_name)) { | ||
365 | printk(KERN_ERR "Missing tfo->get_fabric_name()\n"); | ||
366 | return -EINVAL; | ||
367 | } | ||
368 | if (!(tfo->get_fabric_proto_ident)) { | ||
369 | printk(KERN_ERR "Missing tfo->get_fabric_proto_ident()\n"); | ||
370 | return -EINVAL; | ||
371 | } | ||
372 | if (!(tfo->tpg_get_wwn)) { | ||
373 | printk(KERN_ERR "Missing tfo->tpg_get_wwn()\n"); | ||
374 | return -EINVAL; | ||
375 | } | ||
376 | if (!(tfo->tpg_get_tag)) { | ||
377 | printk(KERN_ERR "Missing tfo->tpg_get_tag()\n"); | ||
378 | return -EINVAL; | ||
379 | } | ||
380 | if (!(tfo->tpg_get_default_depth)) { | ||
381 | printk(KERN_ERR "Missing tfo->tpg_get_default_depth()\n"); | ||
382 | return -EINVAL; | ||
383 | } | ||
384 | if (!(tfo->tpg_get_pr_transport_id)) { | ||
385 | printk(KERN_ERR "Missing tfo->tpg_get_pr_transport_id()\n"); | ||
386 | return -EINVAL; | ||
387 | } | ||
388 | if (!(tfo->tpg_get_pr_transport_id_len)) { | ||
389 | printk(KERN_ERR "Missing tfo->tpg_get_pr_transport_id_len()\n"); | ||
390 | return -EINVAL; | ||
391 | } | ||
392 | if (!(tfo->tpg_check_demo_mode)) { | ||
393 | printk(KERN_ERR "Missing tfo->tpg_check_demo_mode()\n"); | ||
394 | return -EINVAL; | ||
395 | } | ||
396 | if (!(tfo->tpg_check_demo_mode_cache)) { | ||
397 | printk(KERN_ERR "Missing tfo->tpg_check_demo_mode_cache()\n"); | ||
398 | return -EINVAL; | ||
399 | } | ||
400 | if (!(tfo->tpg_check_demo_mode_write_protect)) { | ||
401 | printk(KERN_ERR "Missing tfo->tpg_check_demo_mode_write_protect()\n"); | ||
402 | return -EINVAL; | ||
403 | } | ||
404 | if (!(tfo->tpg_check_prod_mode_write_protect)) { | ||
405 | printk(KERN_ERR "Missing tfo->tpg_check_prod_mode_write_protect()\n"); | ||
406 | return -EINVAL; | ||
407 | } | ||
408 | if (!(tfo->tpg_alloc_fabric_acl)) { | ||
409 | printk(KERN_ERR "Missing tfo->tpg_alloc_fabric_acl()\n"); | ||
410 | return -EINVAL; | ||
411 | } | ||
412 | if (!(tfo->tpg_release_fabric_acl)) { | ||
413 | printk(KERN_ERR "Missing tfo->tpg_release_fabric_acl()\n"); | ||
414 | return -EINVAL; | ||
415 | } | ||
416 | if (!(tfo->tpg_get_inst_index)) { | ||
417 | printk(KERN_ERR "Missing tfo->tpg_get_inst_index()\n"); | ||
418 | return -EINVAL; | ||
419 | } | ||
420 | if (!(tfo->release_cmd_to_pool)) { | ||
421 | printk(KERN_ERR "Missing tfo->release_cmd_to_pool()\n"); | ||
422 | return -EINVAL; | ||
423 | } | ||
424 | if (!(tfo->release_cmd_direct)) { | ||
425 | printk(KERN_ERR "Missing tfo->release_cmd_direct()\n"); | ||
426 | return -EINVAL; | ||
427 | } | ||
428 | if (!(tfo->shutdown_session)) { | ||
429 | printk(KERN_ERR "Missing tfo->shutdown_session()\n"); | ||
430 | return -EINVAL; | ||
431 | } | ||
432 | if (!(tfo->close_session)) { | ||
433 | printk(KERN_ERR "Missing tfo->close_session()\n"); | ||
434 | return -EINVAL; | ||
435 | } | ||
436 | if (!(tfo->stop_session)) { | ||
437 | printk(KERN_ERR "Missing tfo->stop_session()\n"); | ||
438 | return -EINVAL; | ||
439 | } | ||
440 | if (!(tfo->fall_back_to_erl0)) { | ||
441 | printk(KERN_ERR "Missing tfo->fall_back_to_erl0()\n"); | ||
442 | return -EINVAL; | ||
443 | } | ||
444 | if (!(tfo->sess_logged_in)) { | ||
445 | printk(KERN_ERR "Missing tfo->sess_logged_in()\n"); | ||
446 | return -EINVAL; | ||
447 | } | ||
448 | if (!(tfo->sess_get_index)) { | ||
449 | printk(KERN_ERR "Missing tfo->sess_get_index()\n"); | ||
450 | return -EINVAL; | ||
451 | } | ||
452 | if (!(tfo->write_pending)) { | ||
453 | printk(KERN_ERR "Missing tfo->write_pending()\n"); | ||
454 | return -EINVAL; | ||
455 | } | ||
456 | if (!(tfo->write_pending_status)) { | ||
457 | printk(KERN_ERR "Missing tfo->write_pending_status()\n"); | ||
458 | return -EINVAL; | ||
459 | } | ||
460 | if (!(tfo->set_default_node_attributes)) { | ||
461 | printk(KERN_ERR "Missing tfo->set_default_node_attributes()\n"); | ||
462 | return -EINVAL; | ||
463 | } | ||
464 | if (!(tfo->get_task_tag)) { | ||
465 | printk(KERN_ERR "Missing tfo->get_task_tag()\n"); | ||
466 | return -EINVAL; | ||
467 | } | ||
468 | if (!(tfo->get_cmd_state)) { | ||
469 | printk(KERN_ERR "Missing tfo->get_cmd_state()\n"); | ||
470 | return -EINVAL; | ||
471 | } | ||
472 | if (!(tfo->new_cmd_failure)) { | ||
473 | printk(KERN_ERR "Missing tfo->new_cmd_failure()\n"); | ||
474 | return -EINVAL; | ||
475 | } | ||
476 | if (!(tfo->queue_data_in)) { | ||
477 | printk(KERN_ERR "Missing tfo->queue_data_in()\n"); | ||
478 | return -EINVAL; | ||
479 | } | ||
480 | if (!(tfo->queue_status)) { | ||
481 | printk(KERN_ERR "Missing tfo->queue_status()\n"); | ||
482 | return -EINVAL; | ||
483 | } | ||
484 | if (!(tfo->queue_tm_rsp)) { | ||
485 | printk(KERN_ERR "Missing tfo->queue_tm_rsp()\n"); | ||
486 | return -EINVAL; | ||
487 | } | ||
488 | if (!(tfo->set_fabric_sense_len)) { | ||
489 | printk(KERN_ERR "Missing tfo->set_fabric_sense_len()\n"); | ||
490 | return -EINVAL; | ||
491 | } | ||
492 | if (!(tfo->get_fabric_sense_len)) { | ||
493 | printk(KERN_ERR "Missing tfo->get_fabric_sense_len()\n"); | ||
494 | return -EINVAL; | ||
495 | } | ||
496 | if (!(tfo->is_state_remove)) { | ||
497 | printk(KERN_ERR "Missing tfo->is_state_remove()\n"); | ||
498 | return -EINVAL; | ||
499 | } | ||
500 | if (!(tfo->pack_lun)) { | ||
501 | printk(KERN_ERR "Missing tfo->pack_lun()\n"); | ||
502 | return -EINVAL; | ||
503 | } | ||
504 | /* | ||
505 | * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn() | ||
506 | * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in | ||
507 | * target_core_fabric_configfs.c WWN+TPG group context code. | ||
508 | */ | ||
509 | if (!(tfo->fabric_make_wwn)) { | ||
510 | printk(KERN_ERR "Missing tfo->fabric_make_wwn()\n"); | ||
511 | return -EINVAL; | ||
512 | } | ||
513 | if (!(tfo->fabric_drop_wwn)) { | ||
514 | printk(KERN_ERR "Missing tfo->fabric_drop_wwn()\n"); | ||
515 | return -EINVAL; | ||
516 | } | ||
517 | if (!(tfo->fabric_make_tpg)) { | ||
518 | printk(KERN_ERR "Missing tfo->fabric_make_tpg()\n"); | ||
519 | return -EINVAL; | ||
520 | } | ||
521 | if (!(tfo->fabric_drop_tpg)) { | ||
522 | printk(KERN_ERR "Missing tfo->fabric_drop_tpg()\n"); | ||
523 | return -EINVAL; | ||
524 | } | ||
525 | |||
526 | return 0; | ||
527 | } | ||
528 | |||
529 | /* | ||
530 | * Called 2nd from fabric module with returned parameter of | ||
531 | * struct target_fabric_configfs * from target_fabric_configfs_init(). | ||
532 | * | ||
533 | * Upon a successful registration, the new fabric's struct config_item is | ||
534 | * return. Also, a pointer to this struct is set in the passed | ||
535 | * struct target_fabric_configfs. | ||
536 | */ | ||
537 | int target_fabric_configfs_register( | ||
538 | struct target_fabric_configfs *tf) | ||
539 | { | ||
540 | struct config_group *su_group; | ||
541 | int ret; | ||
542 | |||
543 | if (!(tf)) { | ||
544 | printk(KERN_ERR "Unable to locate target_fabric_configfs" | ||
545 | " pointer\n"); | ||
546 | return -EINVAL; | ||
547 | } | ||
548 | if (!(tf->tf_subsys)) { | ||
549 | printk(KERN_ERR "Unable to target struct config_subsystem" | ||
550 | " pointer\n"); | ||
551 | return -EINVAL; | ||
552 | } | ||
553 | su_group = &tf->tf_subsys->su_group; | ||
554 | if (!(su_group)) { | ||
555 | printk(KERN_ERR "Unable to locate target struct config_group" | ||
556 | " pointer\n"); | ||
557 | return -EINVAL; | ||
558 | } | ||
559 | ret = target_fabric_tf_ops_check(tf); | ||
560 | if (ret < 0) | ||
561 | return ret; | ||
562 | |||
563 | printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>" | ||
564 | ">>>>>>>>>>\n"); | ||
565 | return 0; | ||
566 | } | ||
567 | EXPORT_SYMBOL(target_fabric_configfs_register); | ||
568 | |||
569 | void target_fabric_configfs_deregister( | ||
570 | struct target_fabric_configfs *tf) | ||
571 | { | ||
572 | struct config_group *su_group; | ||
573 | struct configfs_subsystem *su; | ||
574 | |||
575 | if (!(tf)) { | ||
576 | printk(KERN_ERR "Unable to locate passed target_fabric_" | ||
577 | "configfs\n"); | ||
578 | return; | ||
579 | } | ||
580 | su = tf->tf_subsys; | ||
581 | if (!(su)) { | ||
582 | printk(KERN_ERR "Unable to locate passed tf->tf_subsys" | ||
583 | " pointer\n"); | ||
584 | return; | ||
585 | } | ||
586 | su_group = &tf->tf_subsys->su_group; | ||
587 | if (!(su_group)) { | ||
588 | printk(KERN_ERR "Unable to locate target struct config_group" | ||
589 | " pointer\n"); | ||
590 | return; | ||
591 | } | ||
592 | |||
593 | printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>>>" | ||
594 | ">>>>>>>>>>>>\n"); | ||
595 | mutex_lock(&g_tf_lock); | ||
596 | if (atomic_read(&tf->tf_access_cnt)) { | ||
597 | mutex_unlock(&g_tf_lock); | ||
598 | printk(KERN_ERR "Non zero tf->tf_access_cnt for fabric %s\n", | ||
599 | tf->tf_name); | ||
600 | BUG(); | ||
601 | } | ||
602 | list_del(&tf->tf_list); | ||
603 | mutex_unlock(&g_tf_lock); | ||
604 | |||
605 | printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing tf:" | ||
606 | " %s\n", tf->tf_name); | ||
607 | tf->tf_module = NULL; | ||
608 | tf->tf_subsys = NULL; | ||
609 | kfree(tf); | ||
610 | |||
611 | printk("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>>>>>>" | ||
612 | ">>>>>\n"); | ||
613 | return; | ||
614 | } | ||
615 | EXPORT_SYMBOL(target_fabric_configfs_deregister); | ||
616 | |||
617 | /*############################################################################## | ||
618 | // Stop functions called by external Target Fabrics Modules | ||
619 | //############################################################################*/ | ||
620 | |||
621 | /* Start functions for struct config_item_type target_core_dev_attrib_cit */ | ||
622 | |||
623 | #define DEF_DEV_ATTRIB_SHOW(_name) \ | ||
624 | static ssize_t target_core_dev_show_attr_##_name( \ | ||
625 | struct se_dev_attrib *da, \ | ||
626 | char *page) \ | ||
627 | { \ | ||
628 | struct se_device *dev; \ | ||
629 | struct se_subsystem_dev *se_dev = da->da_sub_dev; \ | ||
630 | ssize_t rb; \ | ||
631 | \ | ||
632 | spin_lock(&se_dev->se_dev_lock); \ | ||
633 | dev = se_dev->se_dev_ptr; \ | ||
634 | if (!(dev)) { \ | ||
635 | spin_unlock(&se_dev->se_dev_lock); \ | ||
636 | return -ENODEV; \ | ||
637 | } \ | ||
638 | rb = snprintf(page, PAGE_SIZE, "%u\n", (u32)DEV_ATTRIB(dev)->_name); \ | ||
639 | spin_unlock(&se_dev->se_dev_lock); \ | ||
640 | \ | ||
641 | return rb; \ | ||
642 | } | ||
643 | |||
644 | #define DEF_DEV_ATTRIB_STORE(_name) \ | ||
645 | static ssize_t target_core_dev_store_attr_##_name( \ | ||
646 | struct se_dev_attrib *da, \ | ||
647 | const char *page, \ | ||
648 | size_t count) \ | ||
649 | { \ | ||
650 | struct se_device *dev; \ | ||
651 | struct se_subsystem_dev *se_dev = da->da_sub_dev; \ | ||
652 | unsigned long val; \ | ||
653 | int ret; \ | ||
654 | \ | ||
655 | spin_lock(&se_dev->se_dev_lock); \ | ||
656 | dev = se_dev->se_dev_ptr; \ | ||
657 | if (!(dev)) { \ | ||
658 | spin_unlock(&se_dev->se_dev_lock); \ | ||
659 | return -ENODEV; \ | ||
660 | } \ | ||
661 | ret = strict_strtoul(page, 0, &val); \ | ||
662 | if (ret < 0) { \ | ||
663 | spin_unlock(&se_dev->se_dev_lock); \ | ||
664 | printk(KERN_ERR "strict_strtoul() failed with" \ | ||
665 | " ret: %d\n", ret); \ | ||
666 | return -EINVAL; \ | ||
667 | } \ | ||
668 | ret = se_dev_set_##_name(dev, (u32)val); \ | ||
669 | spin_unlock(&se_dev->se_dev_lock); \ | ||
670 | \ | ||
671 | return (!ret) ? count : -EINVAL; \ | ||
672 | } | ||
673 | |||
674 | #define DEF_DEV_ATTRIB(_name) \ | ||
675 | DEF_DEV_ATTRIB_SHOW(_name); \ | ||
676 | DEF_DEV_ATTRIB_STORE(_name); | ||
677 | |||
678 | #define DEF_DEV_ATTRIB_RO(_name) \ | ||
679 | DEF_DEV_ATTRIB_SHOW(_name); | ||
680 | |||
681 | CONFIGFS_EATTR_STRUCT(target_core_dev_attrib, se_dev_attrib); | ||
682 | #define SE_DEV_ATTR(_name, _mode) \ | ||
683 | static struct target_core_dev_attrib_attribute \ | ||
684 | target_core_dev_attrib_##_name = \ | ||
685 | __CONFIGFS_EATTR(_name, _mode, \ | ||
686 | target_core_dev_show_attr_##_name, \ | ||
687 | target_core_dev_store_attr_##_name); | ||
688 | |||
689 | #define SE_DEV_ATTR_RO(_name); \ | ||
690 | static struct target_core_dev_attrib_attribute \ | ||
691 | target_core_dev_attrib_##_name = \ | ||
692 | __CONFIGFS_EATTR_RO(_name, \ | ||
693 | target_core_dev_show_attr_##_name); | ||
694 | |||
695 | DEF_DEV_ATTRIB(emulate_dpo); | ||
696 | SE_DEV_ATTR(emulate_dpo, S_IRUGO | S_IWUSR); | ||
697 | |||
698 | DEF_DEV_ATTRIB(emulate_fua_write); | ||
699 | SE_DEV_ATTR(emulate_fua_write, S_IRUGO | S_IWUSR); | ||
700 | |||
701 | DEF_DEV_ATTRIB(emulate_fua_read); | ||
702 | SE_DEV_ATTR(emulate_fua_read, S_IRUGO | S_IWUSR); | ||
703 | |||
704 | DEF_DEV_ATTRIB(emulate_write_cache); | ||
705 | SE_DEV_ATTR(emulate_write_cache, S_IRUGO | S_IWUSR); | ||
706 | |||
707 | DEF_DEV_ATTRIB(emulate_ua_intlck_ctrl); | ||
708 | SE_DEV_ATTR(emulate_ua_intlck_ctrl, S_IRUGO | S_IWUSR); | ||
709 | |||
710 | DEF_DEV_ATTRIB(emulate_tas); | ||
711 | SE_DEV_ATTR(emulate_tas, S_IRUGO | S_IWUSR); | ||
712 | |||
713 | DEF_DEV_ATTRIB(emulate_tpu); | ||
714 | SE_DEV_ATTR(emulate_tpu, S_IRUGO | S_IWUSR); | ||
715 | |||
716 | DEF_DEV_ATTRIB(emulate_tpws); | ||
717 | SE_DEV_ATTR(emulate_tpws, S_IRUGO | S_IWUSR); | ||
718 | |||
719 | DEF_DEV_ATTRIB(enforce_pr_isids); | ||
720 | SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR); | ||
721 | |||
722 | DEF_DEV_ATTRIB_RO(hw_block_size); | ||
723 | SE_DEV_ATTR_RO(hw_block_size); | ||
724 | |||
725 | DEF_DEV_ATTRIB(block_size); | ||
726 | SE_DEV_ATTR(block_size, S_IRUGO | S_IWUSR); | ||
727 | |||
728 | DEF_DEV_ATTRIB_RO(hw_max_sectors); | ||
729 | SE_DEV_ATTR_RO(hw_max_sectors); | ||
730 | |||
731 | DEF_DEV_ATTRIB(max_sectors); | ||
732 | SE_DEV_ATTR(max_sectors, S_IRUGO | S_IWUSR); | ||
733 | |||
734 | DEF_DEV_ATTRIB(optimal_sectors); | ||
735 | SE_DEV_ATTR(optimal_sectors, S_IRUGO | S_IWUSR); | ||
736 | |||
737 | DEF_DEV_ATTRIB_RO(hw_queue_depth); | ||
738 | SE_DEV_ATTR_RO(hw_queue_depth); | ||
739 | |||
740 | DEF_DEV_ATTRIB(queue_depth); | ||
741 | SE_DEV_ATTR(queue_depth, S_IRUGO | S_IWUSR); | ||
742 | |||
743 | DEF_DEV_ATTRIB(task_timeout); | ||
744 | SE_DEV_ATTR(task_timeout, S_IRUGO | S_IWUSR); | ||
745 | |||
746 | DEF_DEV_ATTRIB(max_unmap_lba_count); | ||
747 | SE_DEV_ATTR(max_unmap_lba_count, S_IRUGO | S_IWUSR); | ||
748 | |||
749 | DEF_DEV_ATTRIB(max_unmap_block_desc_count); | ||
750 | SE_DEV_ATTR(max_unmap_block_desc_count, S_IRUGO | S_IWUSR); | ||
751 | |||
752 | DEF_DEV_ATTRIB(unmap_granularity); | ||
753 | SE_DEV_ATTR(unmap_granularity, S_IRUGO | S_IWUSR); | ||
754 | |||
755 | DEF_DEV_ATTRIB(unmap_granularity_alignment); | ||
756 | SE_DEV_ATTR(unmap_granularity_alignment, S_IRUGO | S_IWUSR); | ||
757 | |||
758 | CONFIGFS_EATTR_OPS(target_core_dev_attrib, se_dev_attrib, da_group); | ||
759 | |||
760 | static struct configfs_attribute *target_core_dev_attrib_attrs[] = { | ||
761 | &target_core_dev_attrib_emulate_dpo.attr, | ||
762 | &target_core_dev_attrib_emulate_fua_write.attr, | ||
763 | &target_core_dev_attrib_emulate_fua_read.attr, | ||
764 | &target_core_dev_attrib_emulate_write_cache.attr, | ||
765 | &target_core_dev_attrib_emulate_ua_intlck_ctrl.attr, | ||
766 | &target_core_dev_attrib_emulate_tas.attr, | ||
767 | &target_core_dev_attrib_emulate_tpu.attr, | ||
768 | &target_core_dev_attrib_emulate_tpws.attr, | ||
769 | &target_core_dev_attrib_enforce_pr_isids.attr, | ||
770 | &target_core_dev_attrib_hw_block_size.attr, | ||
771 | &target_core_dev_attrib_block_size.attr, | ||
772 | &target_core_dev_attrib_hw_max_sectors.attr, | ||
773 | &target_core_dev_attrib_max_sectors.attr, | ||
774 | &target_core_dev_attrib_optimal_sectors.attr, | ||
775 | &target_core_dev_attrib_hw_queue_depth.attr, | ||
776 | &target_core_dev_attrib_queue_depth.attr, | ||
777 | &target_core_dev_attrib_task_timeout.attr, | ||
778 | &target_core_dev_attrib_max_unmap_lba_count.attr, | ||
779 | &target_core_dev_attrib_max_unmap_block_desc_count.attr, | ||
780 | &target_core_dev_attrib_unmap_granularity.attr, | ||
781 | &target_core_dev_attrib_unmap_granularity_alignment.attr, | ||
782 | NULL, | ||
783 | }; | ||
784 | |||
785 | static struct configfs_item_operations target_core_dev_attrib_ops = { | ||
786 | .show_attribute = target_core_dev_attrib_attr_show, | ||
787 | .store_attribute = target_core_dev_attrib_attr_store, | ||
788 | }; | ||
789 | |||
790 | static struct config_item_type target_core_dev_attrib_cit = { | ||
791 | .ct_item_ops = &target_core_dev_attrib_ops, | ||
792 | .ct_attrs = target_core_dev_attrib_attrs, | ||
793 | .ct_owner = THIS_MODULE, | ||
794 | }; | ||
795 | |||
796 | /* End functions for struct config_item_type target_core_dev_attrib_cit */ | ||
797 | |||
798 | /* Start functions for struct config_item_type target_core_dev_wwn_cit */ | ||
799 | |||
800 | CONFIGFS_EATTR_STRUCT(target_core_dev_wwn, t10_wwn); | ||
801 | #define SE_DEV_WWN_ATTR(_name, _mode) \ | ||
802 | static struct target_core_dev_wwn_attribute target_core_dev_wwn_##_name = \ | ||
803 | __CONFIGFS_EATTR(_name, _mode, \ | ||
804 | target_core_dev_wwn_show_attr_##_name, \ | ||
805 | target_core_dev_wwn_store_attr_##_name); | ||
806 | |||
807 | #define SE_DEV_WWN_ATTR_RO(_name); \ | ||
808 | do { \ | ||
809 | static struct target_core_dev_wwn_attribute \ | ||
810 | target_core_dev_wwn_##_name = \ | ||
811 | __CONFIGFS_EATTR_RO(_name, \ | ||
812 | target_core_dev_wwn_show_attr_##_name); \ | ||
813 | } while (0); | ||
814 | |||
815 | /* | ||
816 | * VPD page 0x80 Unit serial | ||
817 | */ | ||
818 | static ssize_t target_core_dev_wwn_show_attr_vpd_unit_serial( | ||
819 | struct t10_wwn *t10_wwn, | ||
820 | char *page) | ||
821 | { | ||
822 | struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev; | ||
823 | struct se_device *dev; | ||
824 | |||
825 | dev = se_dev->se_dev_ptr; | ||
826 | if (!(dev)) | ||
827 | return -ENODEV; | ||
828 | |||
829 | return sprintf(page, "T10 VPD Unit Serial Number: %s\n", | ||
830 | &t10_wwn->unit_serial[0]); | ||
831 | } | ||
832 | |||
833 | static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial( | ||
834 | struct t10_wwn *t10_wwn, | ||
835 | const char *page, | ||
836 | size_t count) | ||
837 | { | ||
838 | struct se_subsystem_dev *su_dev = t10_wwn->t10_sub_dev; | ||
839 | struct se_device *dev; | ||
840 | unsigned char buf[INQUIRY_VPD_SERIAL_LEN]; | ||
841 | |||
842 | /* | ||
843 | * If Linux/SCSI subsystem_api_t plugin got a VPD Unit Serial | ||
844 | * from the struct scsi_device level firmware, do not allow | ||
845 | * VPD Unit Serial to be emulated. | ||
846 | * | ||
847 | * Note this struct scsi_device could also be emulating VPD | ||
848 | * information from its drivers/scsi LLD. But for now we assume | ||
849 | * it is doing 'the right thing' wrt a world wide unique | ||
850 | * VPD Unit Serial Number that OS dependent multipath can depend on. | ||
851 | */ | ||
852 | if (su_dev->su_dev_flags & SDF_FIRMWARE_VPD_UNIT_SERIAL) { | ||
853 | printk(KERN_ERR "Underlying SCSI device firmware provided VPD" | ||
854 | " Unit Serial, ignoring request\n"); | ||
855 | return -EOPNOTSUPP; | ||
856 | } | ||
857 | |||
858 | if ((strlen(page) + 1) > INQUIRY_VPD_SERIAL_LEN) { | ||
859 | printk(KERN_ERR "Emulated VPD Unit Serial exceeds" | ||
860 | " INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN); | ||
861 | return -EOVERFLOW; | ||
862 | } | ||
863 | /* | ||
864 | * Check to see if any active $FABRIC_MOD exports exist. If they | ||
865 | * do exist, fail here as changing this information on the fly | ||
866 | * (underneath the initiator side OS dependent multipath code) | ||
867 | * could cause negative effects. | ||
868 | */ | ||
869 | dev = su_dev->se_dev_ptr; | ||
870 | if ((dev)) { | ||
871 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | ||
872 | printk(KERN_ERR "Unable to set VPD Unit Serial while" | ||
873 | " active %d $FABRIC_MOD exports exist\n", | ||
874 | atomic_read(&dev->dev_export_obj.obj_access_count)); | ||
875 | return -EINVAL; | ||
876 | } | ||
877 | } | ||
878 | /* | ||
879 | * This currently assumes ASCII encoding for emulated VPD Unit Serial. | ||
880 | * | ||
881 | * Also, strip any newline added from the userspace | ||
882 | * echo $UUID > $TARGET/$HBA/$STORAGE_OBJECT/wwn/vpd_unit_serial | ||
883 | */ | ||
884 | memset(buf, 0, INQUIRY_VPD_SERIAL_LEN); | ||
885 | snprintf(buf, INQUIRY_VPD_SERIAL_LEN, "%s", page); | ||
886 | snprintf(su_dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN, | ||
887 | "%s", strstrip(buf)); | ||
888 | su_dev->su_dev_flags |= SDF_EMULATED_VPD_UNIT_SERIAL; | ||
889 | |||
890 | printk(KERN_INFO "Target_Core_ConfigFS: Set emulated VPD Unit Serial:" | ||
891 | " %s\n", su_dev->t10_wwn.unit_serial); | ||
892 | |||
893 | return count; | ||
894 | } | ||
895 | |||
896 | SE_DEV_WWN_ATTR(vpd_unit_serial, S_IRUGO | S_IWUSR); | ||
897 | |||
898 | /* | ||
899 | * VPD page 0x83 Protocol Identifier | ||
900 | */ | ||
901 | static ssize_t target_core_dev_wwn_show_attr_vpd_protocol_identifier( | ||
902 | struct t10_wwn *t10_wwn, | ||
903 | char *page) | ||
904 | { | ||
905 | struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev; | ||
906 | struct se_device *dev; | ||
907 | struct t10_vpd *vpd; | ||
908 | unsigned char buf[VPD_TMP_BUF_SIZE]; | ||
909 | ssize_t len = 0; | ||
910 | |||
911 | dev = se_dev->se_dev_ptr; | ||
912 | if (!(dev)) | ||
913 | return -ENODEV; | ||
914 | |||
915 | memset(buf, 0, VPD_TMP_BUF_SIZE); | ||
916 | |||
917 | spin_lock(&t10_wwn->t10_vpd_lock); | ||
918 | list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) { | ||
919 | if (!(vpd->protocol_identifier_set)) | ||
920 | continue; | ||
921 | |||
922 | transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE); | ||
923 | |||
924 | if ((len + strlen(buf) > PAGE_SIZE)) | ||
925 | break; | ||
926 | |||
927 | len += sprintf(page+len, "%s", buf); | ||
928 | } | ||
929 | spin_unlock(&t10_wwn->t10_vpd_lock); | ||
930 | |||
931 | return len; | ||
932 | } | ||
933 | |||
934 | static ssize_t target_core_dev_wwn_store_attr_vpd_protocol_identifier( | ||
935 | struct t10_wwn *t10_wwn, | ||
936 | const char *page, | ||
937 | size_t count) | ||
938 | { | ||
939 | return -ENOSYS; | ||
940 | } | ||
941 | |||
942 | SE_DEV_WWN_ATTR(vpd_protocol_identifier, S_IRUGO | S_IWUSR); | ||
943 | |||
944 | /* | ||
945 | * Generic wrapper for dumping VPD identifiers by association. | ||
946 | */ | ||
947 | #define DEF_DEV_WWN_ASSOC_SHOW(_name, _assoc) \ | ||
948 | static ssize_t target_core_dev_wwn_show_attr_##_name( \ | ||
949 | struct t10_wwn *t10_wwn, \ | ||
950 | char *page) \ | ||
951 | { \ | ||
952 | struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev; \ | ||
953 | struct se_device *dev; \ | ||
954 | struct t10_vpd *vpd; \ | ||
955 | unsigned char buf[VPD_TMP_BUF_SIZE]; \ | ||
956 | ssize_t len = 0; \ | ||
957 | \ | ||
958 | dev = se_dev->se_dev_ptr; \ | ||
959 | if (!(dev)) \ | ||
960 | return -ENODEV; \ | ||
961 | \ | ||
962 | spin_lock(&t10_wwn->t10_vpd_lock); \ | ||
963 | list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) { \ | ||
964 | if (vpd->association != _assoc) \ | ||
965 | continue; \ | ||
966 | \ | ||
967 | memset(buf, 0, VPD_TMP_BUF_SIZE); \ | ||
968 | transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE); \ | ||
969 | if ((len + strlen(buf) > PAGE_SIZE)) \ | ||
970 | break; \ | ||
971 | len += sprintf(page+len, "%s", buf); \ | ||
972 | \ | ||
973 | memset(buf, 0, VPD_TMP_BUF_SIZE); \ | ||
974 | transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \ | ||
975 | if ((len + strlen(buf) > PAGE_SIZE)) \ | ||
976 | break; \ | ||
977 | len += sprintf(page+len, "%s", buf); \ | ||
978 | \ | ||
979 | memset(buf, 0, VPD_TMP_BUF_SIZE); \ | ||
980 | transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \ | ||
981 | if ((len + strlen(buf) > PAGE_SIZE)) \ | ||
982 | break; \ | ||
983 | len += sprintf(page+len, "%s", buf); \ | ||
984 | } \ | ||
985 | spin_unlock(&t10_wwn->t10_vpd_lock); \ | ||
986 | \ | ||
987 | return len; \ | ||
988 | } | ||
989 | |||
990 | /* | ||
991 | * VPD page 0x83 Assoication: Logical Unit | ||
992 | */ | ||
993 | DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_logical_unit, 0x00); | ||
994 | |||
995 | static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_logical_unit( | ||
996 | struct t10_wwn *t10_wwn, | ||
997 | const char *page, | ||
998 | size_t count) | ||
999 | { | ||
1000 | return -ENOSYS; | ||
1001 | } | ||
1002 | |||
1003 | SE_DEV_WWN_ATTR(vpd_assoc_logical_unit, S_IRUGO | S_IWUSR); | ||
1004 | |||
1005 | /* | ||
1006 | * VPD page 0x83 Association: Target Port | ||
1007 | */ | ||
1008 | DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_target_port, 0x10); | ||
1009 | |||
1010 | static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_target_port( | ||
1011 | struct t10_wwn *t10_wwn, | ||
1012 | const char *page, | ||
1013 | size_t count) | ||
1014 | { | ||
1015 | return -ENOSYS; | ||
1016 | } | ||
1017 | |||
1018 | SE_DEV_WWN_ATTR(vpd_assoc_target_port, S_IRUGO | S_IWUSR); | ||
1019 | |||
1020 | /* | ||
1021 | * VPD page 0x83 Association: SCSI Target Device | ||
1022 | */ | ||
1023 | DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_scsi_target_device, 0x20); | ||
1024 | |||
1025 | static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_scsi_target_device( | ||
1026 | struct t10_wwn *t10_wwn, | ||
1027 | const char *page, | ||
1028 | size_t count) | ||
1029 | { | ||
1030 | return -ENOSYS; | ||
1031 | } | ||
1032 | |||
1033 | SE_DEV_WWN_ATTR(vpd_assoc_scsi_target_device, S_IRUGO | S_IWUSR); | ||
1034 | |||
1035 | CONFIGFS_EATTR_OPS(target_core_dev_wwn, t10_wwn, t10_wwn_group); | ||
1036 | |||
1037 | static struct configfs_attribute *target_core_dev_wwn_attrs[] = { | ||
1038 | &target_core_dev_wwn_vpd_unit_serial.attr, | ||
1039 | &target_core_dev_wwn_vpd_protocol_identifier.attr, | ||
1040 | &target_core_dev_wwn_vpd_assoc_logical_unit.attr, | ||
1041 | &target_core_dev_wwn_vpd_assoc_target_port.attr, | ||
1042 | &target_core_dev_wwn_vpd_assoc_scsi_target_device.attr, | ||
1043 | NULL, | ||
1044 | }; | ||
1045 | |||
1046 | static struct configfs_item_operations target_core_dev_wwn_ops = { | ||
1047 | .show_attribute = target_core_dev_wwn_attr_show, | ||
1048 | .store_attribute = target_core_dev_wwn_attr_store, | ||
1049 | }; | ||
1050 | |||
1051 | static struct config_item_type target_core_dev_wwn_cit = { | ||
1052 | .ct_item_ops = &target_core_dev_wwn_ops, | ||
1053 | .ct_attrs = target_core_dev_wwn_attrs, | ||
1054 | .ct_owner = THIS_MODULE, | ||
1055 | }; | ||
1056 | |||
1057 | /* End functions for struct config_item_type target_core_dev_wwn_cit */ | ||
1058 | |||
1059 | /* Start functions for struct config_item_type target_core_dev_pr_cit */ | ||
1060 | |||
1061 | CONFIGFS_EATTR_STRUCT(target_core_dev_pr, se_subsystem_dev); | ||
1062 | #define SE_DEV_PR_ATTR(_name, _mode) \ | ||
1063 | static struct target_core_dev_pr_attribute target_core_dev_pr_##_name = \ | ||
1064 | __CONFIGFS_EATTR(_name, _mode, \ | ||
1065 | target_core_dev_pr_show_attr_##_name, \ | ||
1066 | target_core_dev_pr_store_attr_##_name); | ||
1067 | |||
1068 | #define SE_DEV_PR_ATTR_RO(_name); \ | ||
1069 | static struct target_core_dev_pr_attribute target_core_dev_pr_##_name = \ | ||
1070 | __CONFIGFS_EATTR_RO(_name, \ | ||
1071 | target_core_dev_pr_show_attr_##_name); | ||
1072 | |||
1073 | /* | ||
1074 | * res_holder | ||
1075 | */ | ||
1076 | static ssize_t target_core_dev_pr_show_spc3_res( | ||
1077 | struct se_device *dev, | ||
1078 | char *page, | ||
1079 | ssize_t *len) | ||
1080 | { | ||
1081 | struct se_node_acl *se_nacl; | ||
1082 | struct t10_pr_registration *pr_reg; | ||
1083 | char i_buf[PR_REG_ISID_ID_LEN]; | ||
1084 | int prf_isid; | ||
1085 | |||
1086 | memset(i_buf, 0, PR_REG_ISID_ID_LEN); | ||
1087 | |||
1088 | spin_lock(&dev->dev_reservation_lock); | ||
1089 | pr_reg = dev->dev_pr_res_holder; | ||
1090 | if (!(pr_reg)) { | ||
1091 | *len += sprintf(page + *len, "No SPC-3 Reservation holder\n"); | ||
1092 | spin_unlock(&dev->dev_reservation_lock); | ||
1093 | return *len; | ||
1094 | } | ||
1095 | se_nacl = pr_reg->pr_reg_nacl; | ||
1096 | prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], | ||
1097 | PR_REG_ISID_ID_LEN); | ||
1098 | |||
1099 | *len += sprintf(page + *len, "SPC-3 Reservation: %s Initiator: %s%s\n", | ||
1100 | TPG_TFO(se_nacl->se_tpg)->get_fabric_name(), | ||
1101 | se_nacl->initiatorname, (prf_isid) ? &i_buf[0] : ""); | ||
1102 | spin_unlock(&dev->dev_reservation_lock); | ||
1103 | |||
1104 | return *len; | ||
1105 | } | ||
1106 | |||
1107 | static ssize_t target_core_dev_pr_show_spc2_res( | ||
1108 | struct se_device *dev, | ||
1109 | char *page, | ||
1110 | ssize_t *len) | ||
1111 | { | ||
1112 | struct se_node_acl *se_nacl; | ||
1113 | |||
1114 | spin_lock(&dev->dev_reservation_lock); | ||
1115 | se_nacl = dev->dev_reserved_node_acl; | ||
1116 | if (!(se_nacl)) { | ||
1117 | *len += sprintf(page + *len, "No SPC-2 Reservation holder\n"); | ||
1118 | spin_unlock(&dev->dev_reservation_lock); | ||
1119 | return *len; | ||
1120 | } | ||
1121 | *len += sprintf(page + *len, "SPC-2 Reservation: %s Initiator: %s\n", | ||
1122 | TPG_TFO(se_nacl->se_tpg)->get_fabric_name(), | ||
1123 | se_nacl->initiatorname); | ||
1124 | spin_unlock(&dev->dev_reservation_lock); | ||
1125 | |||
1126 | return *len; | ||
1127 | } | ||
1128 | |||
1129 | static ssize_t target_core_dev_pr_show_attr_res_holder( | ||
1130 | struct se_subsystem_dev *su_dev, | ||
1131 | char *page) | ||
1132 | { | ||
1133 | ssize_t len = 0; | ||
1134 | |||
1135 | if (!(su_dev->se_dev_ptr)) | ||
1136 | return -ENODEV; | ||
1137 | |||
1138 | switch (T10_RES(su_dev)->res_type) { | ||
1139 | case SPC3_PERSISTENT_RESERVATIONS: | ||
1140 | target_core_dev_pr_show_spc3_res(su_dev->se_dev_ptr, | ||
1141 | page, &len); | ||
1142 | break; | ||
1143 | case SPC2_RESERVATIONS: | ||
1144 | target_core_dev_pr_show_spc2_res(su_dev->se_dev_ptr, | ||
1145 | page, &len); | ||
1146 | break; | ||
1147 | case SPC_PASSTHROUGH: | ||
1148 | len += sprintf(page+len, "Passthrough\n"); | ||
1149 | break; | ||
1150 | default: | ||
1151 | len += sprintf(page+len, "Unknown\n"); | ||
1152 | break; | ||
1153 | } | ||
1154 | |||
1155 | return len; | ||
1156 | } | ||
1157 | |||
1158 | SE_DEV_PR_ATTR_RO(res_holder); | ||
1159 | |||
1160 | /* | ||
1161 | * res_pr_all_tgt_pts | ||
1162 | */ | ||
1163 | static ssize_t target_core_dev_pr_show_attr_res_pr_all_tgt_pts( | ||
1164 | struct se_subsystem_dev *su_dev, | ||
1165 | char *page) | ||
1166 | { | ||
1167 | struct se_device *dev; | ||
1168 | struct t10_pr_registration *pr_reg; | ||
1169 | ssize_t len = 0; | ||
1170 | |||
1171 | dev = su_dev->se_dev_ptr; | ||
1172 | if (!(dev)) | ||
1173 | return -ENODEV; | ||
1174 | |||
1175 | if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) | ||
1176 | return len; | ||
1177 | |||
1178 | spin_lock(&dev->dev_reservation_lock); | ||
1179 | pr_reg = dev->dev_pr_res_holder; | ||
1180 | if (!(pr_reg)) { | ||
1181 | len = sprintf(page, "No SPC-3 Reservation holder\n"); | ||
1182 | spin_unlock(&dev->dev_reservation_lock); | ||
1183 | return len; | ||
1184 | } | ||
1185 | /* | ||
1186 | * See All Target Ports (ALL_TG_PT) bit in spcr17, section 6.14.3 | ||
1187 | * Basic PERSISTENT RESERVER OUT parameter list, page 290 | ||
1188 | */ | ||
1189 | if (pr_reg->pr_reg_all_tg_pt) | ||
1190 | len = sprintf(page, "SPC-3 Reservation: All Target" | ||
1191 | " Ports registration\n"); | ||
1192 | else | ||
1193 | len = sprintf(page, "SPC-3 Reservation: Single" | ||
1194 | " Target Port registration\n"); | ||
1195 | spin_unlock(&dev->dev_reservation_lock); | ||
1196 | |||
1197 | return len; | ||
1198 | } | ||
1199 | |||
1200 | SE_DEV_PR_ATTR_RO(res_pr_all_tgt_pts); | ||
1201 | |||
1202 | /* | ||
1203 | * res_pr_generation | ||
1204 | */ | ||
1205 | static ssize_t target_core_dev_pr_show_attr_res_pr_generation( | ||
1206 | struct se_subsystem_dev *su_dev, | ||
1207 | char *page) | ||
1208 | { | ||
1209 | if (!(su_dev->se_dev_ptr)) | ||
1210 | return -ENODEV; | ||
1211 | |||
1212 | if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) | ||
1213 | return 0; | ||
1214 | |||
1215 | return sprintf(page, "0x%08x\n", T10_RES(su_dev)->pr_generation); | ||
1216 | } | ||
1217 | |||
1218 | SE_DEV_PR_ATTR_RO(res_pr_generation); | ||
1219 | |||
1220 | /* | ||
1221 | * res_pr_holder_tg_port | ||
1222 | */ | ||
1223 | static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port( | ||
1224 | struct se_subsystem_dev *su_dev, | ||
1225 | char *page) | ||
1226 | { | ||
1227 | struct se_device *dev; | ||
1228 | struct se_node_acl *se_nacl; | ||
1229 | struct se_lun *lun; | ||
1230 | struct se_portal_group *se_tpg; | ||
1231 | struct t10_pr_registration *pr_reg; | ||
1232 | struct target_core_fabric_ops *tfo; | ||
1233 | ssize_t len = 0; | ||
1234 | |||
1235 | dev = su_dev->se_dev_ptr; | ||
1236 | if (!(dev)) | ||
1237 | return -ENODEV; | ||
1238 | |||
1239 | if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) | ||
1240 | return len; | ||
1241 | |||
1242 | spin_lock(&dev->dev_reservation_lock); | ||
1243 | pr_reg = dev->dev_pr_res_holder; | ||
1244 | if (!(pr_reg)) { | ||
1245 | len = sprintf(page, "No SPC-3 Reservation holder\n"); | ||
1246 | spin_unlock(&dev->dev_reservation_lock); | ||
1247 | return len; | ||
1248 | } | ||
1249 | se_nacl = pr_reg->pr_reg_nacl; | ||
1250 | se_tpg = se_nacl->se_tpg; | ||
1251 | lun = pr_reg->pr_reg_tg_pt_lun; | ||
1252 | tfo = TPG_TFO(se_tpg); | ||
1253 | |||
1254 | len += sprintf(page+len, "SPC-3 Reservation: %s" | ||
1255 | " Target Node Endpoint: %s\n", tfo->get_fabric_name(), | ||
1256 | tfo->tpg_get_wwn(se_tpg)); | ||
1257 | len += sprintf(page+len, "SPC-3 Reservation: Relative Port" | ||
1258 | " Identifer Tag: %hu %s Portal Group Tag: %hu" | ||
1259 | " %s Logical Unit: %u\n", lun->lun_sep->sep_rtpi, | ||
1260 | tfo->get_fabric_name(), tfo->tpg_get_tag(se_tpg), | ||
1261 | tfo->get_fabric_name(), lun->unpacked_lun); | ||
1262 | spin_unlock(&dev->dev_reservation_lock); | ||
1263 | |||
1264 | return len; | ||
1265 | } | ||
1266 | |||
1267 | SE_DEV_PR_ATTR_RO(res_pr_holder_tg_port); | ||
1268 | |||
1269 | /* | ||
1270 | * res_pr_registered_i_pts | ||
1271 | */ | ||
1272 | static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts( | ||
1273 | struct se_subsystem_dev *su_dev, | ||
1274 | char *page) | ||
1275 | { | ||
1276 | struct target_core_fabric_ops *tfo; | ||
1277 | struct t10_pr_registration *pr_reg; | ||
1278 | unsigned char buf[384]; | ||
1279 | char i_buf[PR_REG_ISID_ID_LEN]; | ||
1280 | ssize_t len = 0; | ||
1281 | int reg_count = 0, prf_isid; | ||
1282 | |||
1283 | if (!(su_dev->se_dev_ptr)) | ||
1284 | return -ENODEV; | ||
1285 | |||
1286 | if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) | ||
1287 | return len; | ||
1288 | |||
1289 | len += sprintf(page+len, "SPC-3 PR Registrations:\n"); | ||
1290 | |||
1291 | spin_lock(&T10_RES(su_dev)->registration_lock); | ||
1292 | list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list, | ||
1293 | pr_reg_list) { | ||
1294 | |||
1295 | memset(buf, 0, 384); | ||
1296 | memset(i_buf, 0, PR_REG_ISID_ID_LEN); | ||
1297 | tfo = pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo; | ||
1298 | prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], | ||
1299 | PR_REG_ISID_ID_LEN); | ||
1300 | sprintf(buf, "%s Node: %s%s Key: 0x%016Lx PRgen: 0x%08x\n", | ||
1301 | tfo->get_fabric_name(), | ||
1302 | pr_reg->pr_reg_nacl->initiatorname, (prf_isid) ? | ||
1303 | &i_buf[0] : "", pr_reg->pr_res_key, | ||
1304 | pr_reg->pr_res_generation); | ||
1305 | |||
1306 | if ((len + strlen(buf) > PAGE_SIZE)) | ||
1307 | break; | ||
1308 | |||
1309 | len += sprintf(page+len, "%s", buf); | ||
1310 | reg_count++; | ||
1311 | } | ||
1312 | spin_unlock(&T10_RES(su_dev)->registration_lock); | ||
1313 | |||
1314 | if (!(reg_count)) | ||
1315 | len += sprintf(page+len, "None\n"); | ||
1316 | |||
1317 | return len; | ||
1318 | } | ||
1319 | |||
1320 | SE_DEV_PR_ATTR_RO(res_pr_registered_i_pts); | ||
1321 | |||
1322 | /* | ||
1323 | * res_pr_type | ||
1324 | */ | ||
1325 | static ssize_t target_core_dev_pr_show_attr_res_pr_type( | ||
1326 | struct se_subsystem_dev *su_dev, | ||
1327 | char *page) | ||
1328 | { | ||
1329 | struct se_device *dev; | ||
1330 | struct t10_pr_registration *pr_reg; | ||
1331 | ssize_t len = 0; | ||
1332 | |||
1333 | dev = su_dev->se_dev_ptr; | ||
1334 | if (!(dev)) | ||
1335 | return -ENODEV; | ||
1336 | |||
1337 | if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) | ||
1338 | return len; | ||
1339 | |||
1340 | spin_lock(&dev->dev_reservation_lock); | ||
1341 | pr_reg = dev->dev_pr_res_holder; | ||
1342 | if (!(pr_reg)) { | ||
1343 | len = sprintf(page, "No SPC-3 Reservation holder\n"); | ||
1344 | spin_unlock(&dev->dev_reservation_lock); | ||
1345 | return len; | ||
1346 | } | ||
1347 | len = sprintf(page, "SPC-3 Reservation Type: %s\n", | ||
1348 | core_scsi3_pr_dump_type(pr_reg->pr_res_type)); | ||
1349 | spin_unlock(&dev->dev_reservation_lock); | ||
1350 | |||
1351 | return len; | ||
1352 | } | ||
1353 | |||
1354 | SE_DEV_PR_ATTR_RO(res_pr_type); | ||
1355 | |||
1356 | /* | ||
1357 | * res_type | ||
1358 | */ | ||
1359 | static ssize_t target_core_dev_pr_show_attr_res_type( | ||
1360 | struct se_subsystem_dev *su_dev, | ||
1361 | char *page) | ||
1362 | { | ||
1363 | ssize_t len = 0; | ||
1364 | |||
1365 | if (!(su_dev->se_dev_ptr)) | ||
1366 | return -ENODEV; | ||
1367 | |||
1368 | switch (T10_RES(su_dev)->res_type) { | ||
1369 | case SPC3_PERSISTENT_RESERVATIONS: | ||
1370 | len = sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n"); | ||
1371 | break; | ||
1372 | case SPC2_RESERVATIONS: | ||
1373 | len = sprintf(page, "SPC2_RESERVATIONS\n"); | ||
1374 | break; | ||
1375 | case SPC_PASSTHROUGH: | ||
1376 | len = sprintf(page, "SPC_PASSTHROUGH\n"); | ||
1377 | break; | ||
1378 | default: | ||
1379 | len = sprintf(page, "UNKNOWN\n"); | ||
1380 | break; | ||
1381 | } | ||
1382 | |||
1383 | return len; | ||
1384 | } | ||
1385 | |||
1386 | SE_DEV_PR_ATTR_RO(res_type); | ||
1387 | |||
1388 | /* | ||
1389 | * res_aptpl_active | ||
1390 | */ | ||
1391 | |||
1392 | static ssize_t target_core_dev_pr_show_attr_res_aptpl_active( | ||
1393 | struct se_subsystem_dev *su_dev, | ||
1394 | char *page) | ||
1395 | { | ||
1396 | if (!(su_dev->se_dev_ptr)) | ||
1397 | return -ENODEV; | ||
1398 | |||
1399 | if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) | ||
1400 | return 0; | ||
1401 | |||
1402 | return sprintf(page, "APTPL Bit Status: %s\n", | ||
1403 | (T10_RES(su_dev)->pr_aptpl_active) ? "Activated" : "Disabled"); | ||
1404 | } | ||
1405 | |||
1406 | SE_DEV_PR_ATTR_RO(res_aptpl_active); | ||
1407 | |||
1408 | /* | ||
1409 | * res_aptpl_metadata | ||
1410 | */ | ||
1411 | static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata( | ||
1412 | struct se_subsystem_dev *su_dev, | ||
1413 | char *page) | ||
1414 | { | ||
1415 | if (!(su_dev->se_dev_ptr)) | ||
1416 | return -ENODEV; | ||
1417 | |||
1418 | if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) | ||
1419 | return 0; | ||
1420 | |||
1421 | return sprintf(page, "Ready to process PR APTPL metadata..\n"); | ||
1422 | } | ||
1423 | |||
1424 | enum { | ||
1425 | Opt_initiator_fabric, Opt_initiator_node, Opt_initiator_sid, | ||
1426 | Opt_sa_res_key, Opt_res_holder, Opt_res_type, Opt_res_scope, | ||
1427 | Opt_res_all_tg_pt, Opt_mapped_lun, Opt_target_fabric, | ||
1428 | Opt_target_node, Opt_tpgt, Opt_port_rtpi, Opt_target_lun, Opt_err | ||
1429 | }; | ||
1430 | |||
1431 | static match_table_t tokens = { | ||
1432 | {Opt_initiator_fabric, "initiator_fabric=%s"}, | ||
1433 | {Opt_initiator_node, "initiator_node=%s"}, | ||
1434 | {Opt_initiator_sid, "initiator_sid=%s"}, | ||
1435 | {Opt_sa_res_key, "sa_res_key=%s"}, | ||
1436 | {Opt_res_holder, "res_holder=%d"}, | ||
1437 | {Opt_res_type, "res_type=%d"}, | ||
1438 | {Opt_res_scope, "res_scope=%d"}, | ||
1439 | {Opt_res_all_tg_pt, "res_all_tg_pt=%d"}, | ||
1440 | {Opt_mapped_lun, "mapped_lun=%d"}, | ||
1441 | {Opt_target_fabric, "target_fabric=%s"}, | ||
1442 | {Opt_target_node, "target_node=%s"}, | ||
1443 | {Opt_tpgt, "tpgt=%d"}, | ||
1444 | {Opt_port_rtpi, "port_rtpi=%d"}, | ||
1445 | {Opt_target_lun, "target_lun=%d"}, | ||
1446 | {Opt_err, NULL} | ||
1447 | }; | ||
1448 | |||
1449 | static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( | ||
1450 | struct se_subsystem_dev *su_dev, | ||
1451 | const char *page, | ||
1452 | size_t count) | ||
1453 | { | ||
1454 | struct se_device *dev; | ||
1455 | unsigned char *i_fabric, *t_fabric, *i_port = NULL, *t_port = NULL; | ||
1456 | unsigned char *isid = NULL; | ||
1457 | char *orig, *ptr, *arg_p, *opts; | ||
1458 | substring_t args[MAX_OPT_ARGS]; | ||
1459 | unsigned long long tmp_ll; | ||
1460 | u64 sa_res_key = 0; | ||
1461 | u32 mapped_lun = 0, target_lun = 0; | ||
1462 | int ret = -1, res_holder = 0, all_tg_pt = 0, arg, token; | ||
1463 | u16 port_rpti = 0, tpgt = 0; | ||
1464 | u8 type = 0, scope; | ||
1465 | |||
1466 | dev = su_dev->se_dev_ptr; | ||
1467 | if (!(dev)) | ||
1468 | return -ENODEV; | ||
1469 | |||
1470 | if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) | ||
1471 | return 0; | ||
1472 | |||
1473 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | ||
1474 | printk(KERN_INFO "Unable to process APTPL metadata while" | ||
1475 | " active fabric exports exist\n"); | ||
1476 | return -EINVAL; | ||
1477 | } | ||
1478 | |||
1479 | opts = kstrdup(page, GFP_KERNEL); | ||
1480 | if (!opts) | ||
1481 | return -ENOMEM; | ||
1482 | |||
1483 | orig = opts; | ||
1484 | while ((ptr = strsep(&opts, ",")) != NULL) { | ||
1485 | if (!*ptr) | ||
1486 | continue; | ||
1487 | |||
1488 | token = match_token(ptr, tokens, args); | ||
1489 | switch (token) { | ||
1490 | case Opt_initiator_fabric: | ||
1491 | i_fabric = match_strdup(&args[0]); | ||
1492 | break; | ||
1493 | case Opt_initiator_node: | ||
1494 | i_port = match_strdup(&args[0]); | ||
1495 | if (strlen(i_port) > PR_APTPL_MAX_IPORT_LEN) { | ||
1496 | printk(KERN_ERR "APTPL metadata initiator_node=" | ||
1497 | " exceeds PR_APTPL_MAX_IPORT_LEN: %d\n", | ||
1498 | PR_APTPL_MAX_IPORT_LEN); | ||
1499 | ret = -EINVAL; | ||
1500 | break; | ||
1501 | } | ||
1502 | break; | ||
1503 | case Opt_initiator_sid: | ||
1504 | isid = match_strdup(&args[0]); | ||
1505 | if (strlen(isid) > PR_REG_ISID_LEN) { | ||
1506 | printk(KERN_ERR "APTPL metadata initiator_isid" | ||
1507 | "= exceeds PR_REG_ISID_LEN: %d\n", | ||
1508 | PR_REG_ISID_LEN); | ||
1509 | ret = -EINVAL; | ||
1510 | break; | ||
1511 | } | ||
1512 | break; | ||
1513 | case Opt_sa_res_key: | ||
1514 | arg_p = match_strdup(&args[0]); | ||
1515 | ret = strict_strtoull(arg_p, 0, &tmp_ll); | ||
1516 | if (ret < 0) { | ||
1517 | printk(KERN_ERR "strict_strtoull() failed for" | ||
1518 | " sa_res_key=\n"); | ||
1519 | goto out; | ||
1520 | } | ||
1521 | sa_res_key = (u64)tmp_ll; | ||
1522 | break; | ||
1523 | /* | ||
1524 | * PR APTPL Metadata for Reservation | ||
1525 | */ | ||
1526 | case Opt_res_holder: | ||
1527 | match_int(args, &arg); | ||
1528 | res_holder = arg; | ||
1529 | break; | ||
1530 | case Opt_res_type: | ||
1531 | match_int(args, &arg); | ||
1532 | type = (u8)arg; | ||
1533 | break; | ||
1534 | case Opt_res_scope: | ||
1535 | match_int(args, &arg); | ||
1536 | scope = (u8)arg; | ||
1537 | break; | ||
1538 | case Opt_res_all_tg_pt: | ||
1539 | match_int(args, &arg); | ||
1540 | all_tg_pt = (int)arg; | ||
1541 | break; | ||
1542 | case Opt_mapped_lun: | ||
1543 | match_int(args, &arg); | ||
1544 | mapped_lun = (u32)arg; | ||
1545 | break; | ||
1546 | /* | ||
1547 | * PR APTPL Metadata for Target Port | ||
1548 | */ | ||
1549 | case Opt_target_fabric: | ||
1550 | t_fabric = match_strdup(&args[0]); | ||
1551 | break; | ||
1552 | case Opt_target_node: | ||
1553 | t_port = match_strdup(&args[0]); | ||
1554 | if (strlen(t_port) > PR_APTPL_MAX_TPORT_LEN) { | ||
1555 | printk(KERN_ERR "APTPL metadata target_node=" | ||
1556 | " exceeds PR_APTPL_MAX_TPORT_LEN: %d\n", | ||
1557 | PR_APTPL_MAX_TPORT_LEN); | ||
1558 | ret = -EINVAL; | ||
1559 | break; | ||
1560 | } | ||
1561 | break; | ||
1562 | case Opt_tpgt: | ||
1563 | match_int(args, &arg); | ||
1564 | tpgt = (u16)arg; | ||
1565 | break; | ||
1566 | case Opt_port_rtpi: | ||
1567 | match_int(args, &arg); | ||
1568 | port_rpti = (u16)arg; | ||
1569 | break; | ||
1570 | case Opt_target_lun: | ||
1571 | match_int(args, &arg); | ||
1572 | target_lun = (u32)arg; | ||
1573 | break; | ||
1574 | default: | ||
1575 | break; | ||
1576 | } | ||
1577 | } | ||
1578 | |||
1579 | if (!(i_port) || !(t_port) || !(sa_res_key)) { | ||
1580 | printk(KERN_ERR "Illegal parameters for APTPL registration\n"); | ||
1581 | ret = -EINVAL; | ||
1582 | goto out; | ||
1583 | } | ||
1584 | |||
1585 | if (res_holder && !(type)) { | ||
1586 | printk(KERN_ERR "Illegal PR type: 0x%02x for reservation" | ||
1587 | " holder\n", type); | ||
1588 | ret = -EINVAL; | ||
1589 | goto out; | ||
1590 | } | ||
1591 | |||
1592 | ret = core_scsi3_alloc_aptpl_registration(T10_RES(su_dev), sa_res_key, | ||
1593 | i_port, isid, mapped_lun, t_port, tpgt, target_lun, | ||
1594 | res_holder, all_tg_pt, type); | ||
1595 | out: | ||
1596 | kfree(orig); | ||
1597 | return (ret == 0) ? count : ret; | ||
1598 | } | ||
1599 | |||
1600 | SE_DEV_PR_ATTR(res_aptpl_metadata, S_IRUGO | S_IWUSR); | ||
1601 | |||
1602 | CONFIGFS_EATTR_OPS(target_core_dev_pr, se_subsystem_dev, se_dev_pr_group); | ||
1603 | |||
1604 | static struct configfs_attribute *target_core_dev_pr_attrs[] = { | ||
1605 | &target_core_dev_pr_res_holder.attr, | ||
1606 | &target_core_dev_pr_res_pr_all_tgt_pts.attr, | ||
1607 | &target_core_dev_pr_res_pr_generation.attr, | ||
1608 | &target_core_dev_pr_res_pr_holder_tg_port.attr, | ||
1609 | &target_core_dev_pr_res_pr_registered_i_pts.attr, | ||
1610 | &target_core_dev_pr_res_pr_type.attr, | ||
1611 | &target_core_dev_pr_res_type.attr, | ||
1612 | &target_core_dev_pr_res_aptpl_active.attr, | ||
1613 | &target_core_dev_pr_res_aptpl_metadata.attr, | ||
1614 | NULL, | ||
1615 | }; | ||
1616 | |||
1617 | static struct configfs_item_operations target_core_dev_pr_ops = { | ||
1618 | .show_attribute = target_core_dev_pr_attr_show, | ||
1619 | .store_attribute = target_core_dev_pr_attr_store, | ||
1620 | }; | ||
1621 | |||
1622 | static struct config_item_type target_core_dev_pr_cit = { | ||
1623 | .ct_item_ops = &target_core_dev_pr_ops, | ||
1624 | .ct_attrs = target_core_dev_pr_attrs, | ||
1625 | .ct_owner = THIS_MODULE, | ||
1626 | }; | ||
1627 | |||
1628 | /* End functions for struct config_item_type target_core_dev_pr_cit */ | ||
1629 | |||
1630 | /* Start functions for struct config_item_type target_core_dev_cit */ | ||
1631 | |||
1632 | static ssize_t target_core_show_dev_info(void *p, char *page) | ||
1633 | { | ||
1634 | struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p; | ||
1635 | struct se_hba *hba = se_dev->se_dev_hba; | ||
1636 | struct se_subsystem_api *t = hba->transport; | ||
1637 | int bl = 0; | ||
1638 | ssize_t read_bytes = 0; | ||
1639 | |||
1640 | if (!(se_dev->se_dev_ptr)) | ||
1641 | return -ENODEV; | ||
1642 | |||
1643 | transport_dump_dev_state(se_dev->se_dev_ptr, page, &bl); | ||
1644 | read_bytes += bl; | ||
1645 | read_bytes += t->show_configfs_dev_params(hba, se_dev, page+read_bytes); | ||
1646 | return read_bytes; | ||
1647 | } | ||
1648 | |||
1649 | static struct target_core_configfs_attribute target_core_attr_dev_info = { | ||
1650 | .attr = { .ca_owner = THIS_MODULE, | ||
1651 | .ca_name = "info", | ||
1652 | .ca_mode = S_IRUGO }, | ||
1653 | .show = target_core_show_dev_info, | ||
1654 | .store = NULL, | ||
1655 | }; | ||
1656 | |||
1657 | static ssize_t target_core_store_dev_control( | ||
1658 | void *p, | ||
1659 | const char *page, | ||
1660 | size_t count) | ||
1661 | { | ||
1662 | struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p; | ||
1663 | struct se_hba *hba = se_dev->se_dev_hba; | ||
1664 | struct se_subsystem_api *t = hba->transport; | ||
1665 | |||
1666 | if (!(se_dev->se_dev_su_ptr)) { | ||
1667 | printk(KERN_ERR "Unable to locate struct se_subsystem_dev>se" | ||
1668 | "_dev_su_ptr\n"); | ||
1669 | return -EINVAL; | ||
1670 | } | ||
1671 | |||
1672 | return t->set_configfs_dev_params(hba, se_dev, page, count); | ||
1673 | } | ||
1674 | |||
1675 | static struct target_core_configfs_attribute target_core_attr_dev_control = { | ||
1676 | .attr = { .ca_owner = THIS_MODULE, | ||
1677 | .ca_name = "control", | ||
1678 | .ca_mode = S_IWUSR }, | ||
1679 | .show = NULL, | ||
1680 | .store = target_core_store_dev_control, | ||
1681 | }; | ||
1682 | |||
1683 | static ssize_t target_core_show_dev_alias(void *p, char *page) | ||
1684 | { | ||
1685 | struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p; | ||
1686 | |||
1687 | if (!(se_dev->su_dev_flags & SDF_USING_ALIAS)) | ||
1688 | return 0; | ||
1689 | |||
1690 | return snprintf(page, PAGE_SIZE, "%s\n", se_dev->se_dev_alias); | ||
1691 | } | ||
1692 | |||
1693 | static ssize_t target_core_store_dev_alias( | ||
1694 | void *p, | ||
1695 | const char *page, | ||
1696 | size_t count) | ||
1697 | { | ||
1698 | struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p; | ||
1699 | struct se_hba *hba = se_dev->se_dev_hba; | ||
1700 | ssize_t read_bytes; | ||
1701 | |||
1702 | if (count > (SE_DEV_ALIAS_LEN-1)) { | ||
1703 | printk(KERN_ERR "alias count: %d exceeds" | ||
1704 | " SE_DEV_ALIAS_LEN-1: %u\n", (int)count, | ||
1705 | SE_DEV_ALIAS_LEN-1); | ||
1706 | return -EINVAL; | ||
1707 | } | ||
1708 | |||
1709 | se_dev->su_dev_flags |= SDF_USING_ALIAS; | ||
1710 | read_bytes = snprintf(&se_dev->se_dev_alias[0], SE_DEV_ALIAS_LEN, | ||
1711 | "%s", page); | ||
1712 | |||
1713 | printk(KERN_INFO "Target_Core_ConfigFS: %s/%s set alias: %s\n", | ||
1714 | config_item_name(&hba->hba_group.cg_item), | ||
1715 | config_item_name(&se_dev->se_dev_group.cg_item), | ||
1716 | se_dev->se_dev_alias); | ||
1717 | |||
1718 | return read_bytes; | ||
1719 | } | ||
1720 | |||
1721 | static struct target_core_configfs_attribute target_core_attr_dev_alias = { | ||
1722 | .attr = { .ca_owner = THIS_MODULE, | ||
1723 | .ca_name = "alias", | ||
1724 | .ca_mode = S_IRUGO | S_IWUSR }, | ||
1725 | .show = target_core_show_dev_alias, | ||
1726 | .store = target_core_store_dev_alias, | ||
1727 | }; | ||
1728 | |||
1729 | static ssize_t target_core_show_dev_udev_path(void *p, char *page) | ||
1730 | { | ||
1731 | struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p; | ||
1732 | |||
1733 | if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) | ||
1734 | return 0; | ||
1735 | |||
1736 | return snprintf(page, PAGE_SIZE, "%s\n", se_dev->se_dev_udev_path); | ||
1737 | } | ||
1738 | |||
1739 | static ssize_t target_core_store_dev_udev_path( | ||
1740 | void *p, | ||
1741 | const char *page, | ||
1742 | size_t count) | ||
1743 | { | ||
1744 | struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p; | ||
1745 | struct se_hba *hba = se_dev->se_dev_hba; | ||
1746 | ssize_t read_bytes; | ||
1747 | |||
1748 | if (count > (SE_UDEV_PATH_LEN-1)) { | ||
1749 | printk(KERN_ERR "udev_path count: %d exceeds" | ||
1750 | " SE_UDEV_PATH_LEN-1: %u\n", (int)count, | ||
1751 | SE_UDEV_PATH_LEN-1); | ||
1752 | return -EINVAL; | ||
1753 | } | ||
1754 | |||
1755 | se_dev->su_dev_flags |= SDF_USING_UDEV_PATH; | ||
1756 | read_bytes = snprintf(&se_dev->se_dev_udev_path[0], SE_UDEV_PATH_LEN, | ||
1757 | "%s", page); | ||
1758 | |||
1759 | printk(KERN_INFO "Target_Core_ConfigFS: %s/%s set udev_path: %s\n", | ||
1760 | config_item_name(&hba->hba_group.cg_item), | ||
1761 | config_item_name(&se_dev->se_dev_group.cg_item), | ||
1762 | se_dev->se_dev_udev_path); | ||
1763 | |||
1764 | return read_bytes; | ||
1765 | } | ||
1766 | |||
1767 | static struct target_core_configfs_attribute target_core_attr_dev_udev_path = { | ||
1768 | .attr = { .ca_owner = THIS_MODULE, | ||
1769 | .ca_name = "udev_path", | ||
1770 | .ca_mode = S_IRUGO | S_IWUSR }, | ||
1771 | .show = target_core_show_dev_udev_path, | ||
1772 | .store = target_core_store_dev_udev_path, | ||
1773 | }; | ||
1774 | |||
1775 | static ssize_t target_core_store_dev_enable( | ||
1776 | void *p, | ||
1777 | const char *page, | ||
1778 | size_t count) | ||
1779 | { | ||
1780 | struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p; | ||
1781 | struct se_device *dev; | ||
1782 | struct se_hba *hba = se_dev->se_dev_hba; | ||
1783 | struct se_subsystem_api *t = hba->transport; | ||
1784 | char *ptr; | ||
1785 | |||
1786 | ptr = strstr(page, "1"); | ||
1787 | if (!(ptr)) { | ||
1788 | printk(KERN_ERR "For dev_enable ops, only valid value" | ||
1789 | " is \"1\"\n"); | ||
1790 | return -EINVAL; | ||
1791 | } | ||
1792 | if ((se_dev->se_dev_ptr)) { | ||
1793 | printk(KERN_ERR "se_dev->se_dev_ptr already set for storage" | ||
1794 | " object\n"); | ||
1795 | return -EEXIST; | ||
1796 | } | ||
1797 | |||
1798 | if (t->check_configfs_dev_params(hba, se_dev) < 0) | ||
1799 | return -EINVAL; | ||
1800 | |||
1801 | dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr); | ||
1802 | if (!(dev) || IS_ERR(dev)) | ||
1803 | return -EINVAL; | ||
1804 | |||
1805 | se_dev->se_dev_ptr = dev; | ||
1806 | printk(KERN_INFO "Target_Core_ConfigFS: Registered se_dev->se_dev_ptr:" | ||
1807 | " %p\n", se_dev->se_dev_ptr); | ||
1808 | |||
1809 | return count; | ||
1810 | } | ||
1811 | |||
1812 | static struct target_core_configfs_attribute target_core_attr_dev_enable = { | ||
1813 | .attr = { .ca_owner = THIS_MODULE, | ||
1814 | .ca_name = "enable", | ||
1815 | .ca_mode = S_IWUSR }, | ||
1816 | .show = NULL, | ||
1817 | .store = target_core_store_dev_enable, | ||
1818 | }; | ||
1819 | |||
1820 | static ssize_t target_core_show_alua_lu_gp(void *p, char *page) | ||
1821 | { | ||
1822 | struct se_device *dev; | ||
1823 | struct se_subsystem_dev *su_dev = (struct se_subsystem_dev *)p; | ||
1824 | struct config_item *lu_ci; | ||
1825 | struct t10_alua_lu_gp *lu_gp; | ||
1826 | struct t10_alua_lu_gp_member *lu_gp_mem; | ||
1827 | ssize_t len = 0; | ||
1828 | |||
1829 | dev = su_dev->se_dev_ptr; | ||
1830 | if (!(dev)) | ||
1831 | return -ENODEV; | ||
1832 | |||
1833 | if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED) | ||
1834 | return len; | ||
1835 | |||
1836 | lu_gp_mem = dev->dev_alua_lu_gp_mem; | ||
1837 | if (!(lu_gp_mem)) { | ||
1838 | printk(KERN_ERR "NULL struct se_device->dev_alua_lu_gp_mem" | ||
1839 | " pointer\n"); | ||
1840 | return -EINVAL; | ||
1841 | } | ||
1842 | |||
1843 | spin_lock(&lu_gp_mem->lu_gp_mem_lock); | ||
1844 | lu_gp = lu_gp_mem->lu_gp; | ||
1845 | if ((lu_gp)) { | ||
1846 | lu_ci = &lu_gp->lu_gp_group.cg_item; | ||
1847 | len += sprintf(page, "LU Group Alias: %s\nLU Group ID: %hu\n", | ||
1848 | config_item_name(lu_ci), lu_gp->lu_gp_id); | ||
1849 | } | ||
1850 | spin_unlock(&lu_gp_mem->lu_gp_mem_lock); | ||
1851 | |||
1852 | return len; | ||
1853 | } | ||
1854 | |||
1855 | static ssize_t target_core_store_alua_lu_gp( | ||
1856 | void *p, | ||
1857 | const char *page, | ||
1858 | size_t count) | ||
1859 | { | ||
1860 | struct se_device *dev; | ||
1861 | struct se_subsystem_dev *su_dev = (struct se_subsystem_dev *)p; | ||
1862 | struct se_hba *hba = su_dev->se_dev_hba; | ||
1863 | struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL; | ||
1864 | struct t10_alua_lu_gp_member *lu_gp_mem; | ||
1865 | unsigned char buf[LU_GROUP_NAME_BUF]; | ||
1866 | int move = 0; | ||
1867 | |||
1868 | dev = su_dev->se_dev_ptr; | ||
1869 | if (!(dev)) | ||
1870 | return -ENODEV; | ||
1871 | |||
1872 | if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED) { | ||
1873 | printk(KERN_WARNING "SPC3_ALUA_EMULATED not enabled for %s/%s\n", | ||
1874 | config_item_name(&hba->hba_group.cg_item), | ||
1875 | config_item_name(&su_dev->se_dev_group.cg_item)); | ||
1876 | return -EINVAL; | ||
1877 | } | ||
1878 | if (count > LU_GROUP_NAME_BUF) { | ||
1879 | printk(KERN_ERR "ALUA LU Group Alias too large!\n"); | ||
1880 | return -EINVAL; | ||
1881 | } | ||
1882 | memset(buf, 0, LU_GROUP_NAME_BUF); | ||
1883 | memcpy(buf, page, count); | ||
1884 | /* | ||
1885 | * Any ALUA logical unit alias besides "NULL" means we will be | ||
1886 | * making a new group association. | ||
1887 | */ | ||
1888 | if (strcmp(strstrip(buf), "NULL")) { | ||
1889 | /* | ||
1890 | * core_alua_get_lu_gp_by_name() will increment reference to | ||
1891 | * struct t10_alua_lu_gp. This reference is released with | ||
1892 | * core_alua_get_lu_gp_by_name below(). | ||
1893 | */ | ||
1894 | lu_gp_new = core_alua_get_lu_gp_by_name(strstrip(buf)); | ||
1895 | if (!(lu_gp_new)) | ||
1896 | return -ENODEV; | ||
1897 | } | ||
1898 | lu_gp_mem = dev->dev_alua_lu_gp_mem; | ||
1899 | if (!(lu_gp_mem)) { | ||
1900 | if (lu_gp_new) | ||
1901 | core_alua_put_lu_gp_from_name(lu_gp_new); | ||
1902 | printk(KERN_ERR "NULL struct se_device->dev_alua_lu_gp_mem" | ||
1903 | " pointer\n"); | ||
1904 | return -EINVAL; | ||
1905 | } | ||
1906 | |||
1907 | spin_lock(&lu_gp_mem->lu_gp_mem_lock); | ||
1908 | lu_gp = lu_gp_mem->lu_gp; | ||
1909 | if ((lu_gp)) { | ||
1910 | /* | ||
1911 | * Clearing an existing lu_gp association, and replacing | ||
1912 | * with NULL | ||
1913 | */ | ||
1914 | if (!(lu_gp_new)) { | ||
1915 | printk(KERN_INFO "Target_Core_ConfigFS: Releasing %s/%s" | ||
1916 | " from ALUA LU Group: core/alua/lu_gps/%s, ID:" | ||
1917 | " %hu\n", | ||
1918 | config_item_name(&hba->hba_group.cg_item), | ||
1919 | config_item_name(&su_dev->se_dev_group.cg_item), | ||
1920 | config_item_name(&lu_gp->lu_gp_group.cg_item), | ||
1921 | lu_gp->lu_gp_id); | ||
1922 | |||
1923 | __core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp); | ||
1924 | spin_unlock(&lu_gp_mem->lu_gp_mem_lock); | ||
1925 | |||
1926 | return count; | ||
1927 | } | ||
1928 | /* | ||
1929 | * Removing existing association of lu_gp_mem with lu_gp | ||
1930 | */ | ||
1931 | __core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp); | ||
1932 | move = 1; | ||
1933 | } | ||
1934 | /* | ||
1935 | * Associate lu_gp_mem with lu_gp_new. | ||
1936 | */ | ||
1937 | __core_alua_attach_lu_gp_mem(lu_gp_mem, lu_gp_new); | ||
1938 | spin_unlock(&lu_gp_mem->lu_gp_mem_lock); | ||
1939 | |||
1940 | printk(KERN_INFO "Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:" | ||
1941 | " core/alua/lu_gps/%s, ID: %hu\n", | ||
1942 | (move) ? "Moving" : "Adding", | ||
1943 | config_item_name(&hba->hba_group.cg_item), | ||
1944 | config_item_name(&su_dev->se_dev_group.cg_item), | ||
1945 | config_item_name(&lu_gp_new->lu_gp_group.cg_item), | ||
1946 | lu_gp_new->lu_gp_id); | ||
1947 | |||
1948 | core_alua_put_lu_gp_from_name(lu_gp_new); | ||
1949 | return count; | ||
1950 | } | ||
1951 | |||
1952 | static struct target_core_configfs_attribute target_core_attr_dev_alua_lu_gp = { | ||
1953 | .attr = { .ca_owner = THIS_MODULE, | ||
1954 | .ca_name = "alua_lu_gp", | ||
1955 | .ca_mode = S_IRUGO | S_IWUSR }, | ||
1956 | .show = target_core_show_alua_lu_gp, | ||
1957 | .store = target_core_store_alua_lu_gp, | ||
1958 | }; | ||
1959 | |||
1960 | static struct configfs_attribute *lio_core_dev_attrs[] = { | ||
1961 | &target_core_attr_dev_info.attr, | ||
1962 | &target_core_attr_dev_control.attr, | ||
1963 | &target_core_attr_dev_alias.attr, | ||
1964 | &target_core_attr_dev_udev_path.attr, | ||
1965 | &target_core_attr_dev_enable.attr, | ||
1966 | &target_core_attr_dev_alua_lu_gp.attr, | ||
1967 | NULL, | ||
1968 | }; | ||
1969 | |||
1970 | static void target_core_dev_release(struct config_item *item) | ||
1971 | { | ||
1972 | struct se_subsystem_dev *se_dev = container_of(to_config_group(item), | ||
1973 | struct se_subsystem_dev, se_dev_group); | ||
1974 | struct config_group *dev_cg; | ||
1975 | |||
1976 | if (!(se_dev)) | ||
1977 | return; | ||
1978 | |||
1979 | dev_cg = &se_dev->se_dev_group; | ||
1980 | kfree(dev_cg->default_groups); | ||
1981 | } | ||
1982 | |||
1983 | static ssize_t target_core_dev_show(struct config_item *item, | ||
1984 | struct configfs_attribute *attr, | ||
1985 | char *page) | ||
1986 | { | ||
1987 | struct se_subsystem_dev *se_dev = container_of( | ||
1988 | to_config_group(item), struct se_subsystem_dev, | ||
1989 | se_dev_group); | ||
1990 | struct target_core_configfs_attribute *tc_attr = container_of( | ||
1991 | attr, struct target_core_configfs_attribute, attr); | ||
1992 | |||
1993 | if (!(tc_attr->show)) | ||
1994 | return -EINVAL; | ||
1995 | |||
1996 | return tc_attr->show((void *)se_dev, page); | ||
1997 | } | ||
1998 | |||
1999 | static ssize_t target_core_dev_store(struct config_item *item, | ||
2000 | struct configfs_attribute *attr, | ||
2001 | const char *page, size_t count) | ||
2002 | { | ||
2003 | struct se_subsystem_dev *se_dev = container_of( | ||
2004 | to_config_group(item), struct se_subsystem_dev, | ||
2005 | se_dev_group); | ||
2006 | struct target_core_configfs_attribute *tc_attr = container_of( | ||
2007 | attr, struct target_core_configfs_attribute, attr); | ||
2008 | |||
2009 | if (!(tc_attr->store)) | ||
2010 | return -EINVAL; | ||
2011 | |||
2012 | return tc_attr->store((void *)se_dev, page, count); | ||
2013 | } | ||
2014 | |||
2015 | static struct configfs_item_operations target_core_dev_item_ops = { | ||
2016 | .release = target_core_dev_release, | ||
2017 | .show_attribute = target_core_dev_show, | ||
2018 | .store_attribute = target_core_dev_store, | ||
2019 | }; | ||
2020 | |||
2021 | static struct config_item_type target_core_dev_cit = { | ||
2022 | .ct_item_ops = &target_core_dev_item_ops, | ||
2023 | .ct_attrs = lio_core_dev_attrs, | ||
2024 | .ct_owner = THIS_MODULE, | ||
2025 | }; | ||
2026 | |||
2027 | /* End functions for struct config_item_type target_core_dev_cit */ | ||
2028 | |||
2029 | /* Start functions for struct config_item_type target_core_alua_lu_gp_cit */ | ||
2030 | |||
2031 | CONFIGFS_EATTR_STRUCT(target_core_alua_lu_gp, t10_alua_lu_gp); | ||
2032 | #define SE_DEV_ALUA_LU_ATTR(_name, _mode) \ | ||
2033 | static struct target_core_alua_lu_gp_attribute \ | ||
2034 | target_core_alua_lu_gp_##_name = \ | ||
2035 | __CONFIGFS_EATTR(_name, _mode, \ | ||
2036 | target_core_alua_lu_gp_show_attr_##_name, \ | ||
2037 | target_core_alua_lu_gp_store_attr_##_name); | ||
2038 | |||
2039 | #define SE_DEV_ALUA_LU_ATTR_RO(_name) \ | ||
2040 | static struct target_core_alua_lu_gp_attribute \ | ||
2041 | target_core_alua_lu_gp_##_name = \ | ||
2042 | __CONFIGFS_EATTR_RO(_name, \ | ||
2043 | target_core_alua_lu_gp_show_attr_##_name); | ||
2044 | |||
2045 | /* | ||
2046 | * lu_gp_id | ||
2047 | */ | ||
2048 | static ssize_t target_core_alua_lu_gp_show_attr_lu_gp_id( | ||
2049 | struct t10_alua_lu_gp *lu_gp, | ||
2050 | char *page) | ||
2051 | { | ||
2052 | if (!(lu_gp->lu_gp_valid_id)) | ||
2053 | return 0; | ||
2054 | |||
2055 | return sprintf(page, "%hu\n", lu_gp->lu_gp_id); | ||
2056 | } | ||
2057 | |||
2058 | static ssize_t target_core_alua_lu_gp_store_attr_lu_gp_id( | ||
2059 | struct t10_alua_lu_gp *lu_gp, | ||
2060 | const char *page, | ||
2061 | size_t count) | ||
2062 | { | ||
2063 | struct config_group *alua_lu_gp_cg = &lu_gp->lu_gp_group; | ||
2064 | unsigned long lu_gp_id; | ||
2065 | int ret; | ||
2066 | |||
2067 | ret = strict_strtoul(page, 0, &lu_gp_id); | ||
2068 | if (ret < 0) { | ||
2069 | printk(KERN_ERR "strict_strtoul() returned %d for" | ||
2070 | " lu_gp_id\n", ret); | ||
2071 | return -EINVAL; | ||
2072 | } | ||
2073 | if (lu_gp_id > 0x0000ffff) { | ||
2074 | printk(KERN_ERR "ALUA lu_gp_id: %lu exceeds maximum:" | ||
2075 | " 0x0000ffff\n", lu_gp_id); | ||
2076 | return -EINVAL; | ||
2077 | } | ||
2078 | |||
2079 | ret = core_alua_set_lu_gp_id(lu_gp, (u16)lu_gp_id); | ||
2080 | if (ret < 0) | ||
2081 | return -EINVAL; | ||
2082 | |||
2083 | printk(KERN_INFO "Target_Core_ConfigFS: Set ALUA Logical Unit" | ||
2084 | " Group: core/alua/lu_gps/%s to ID: %hu\n", | ||
2085 | config_item_name(&alua_lu_gp_cg->cg_item), | ||
2086 | lu_gp->lu_gp_id); | ||
2087 | |||
2088 | return count; | ||
2089 | } | ||
2090 | |||
2091 | SE_DEV_ALUA_LU_ATTR(lu_gp_id, S_IRUGO | S_IWUSR); | ||
2092 | |||
2093 | /* | ||
2094 | * members | ||
2095 | */ | ||
2096 | static ssize_t target_core_alua_lu_gp_show_attr_members( | ||
2097 | struct t10_alua_lu_gp *lu_gp, | ||
2098 | char *page) | ||
2099 | { | ||
2100 | struct se_device *dev; | ||
2101 | struct se_hba *hba; | ||
2102 | struct se_subsystem_dev *su_dev; | ||
2103 | struct t10_alua_lu_gp_member *lu_gp_mem; | ||
2104 | ssize_t len = 0, cur_len; | ||
2105 | unsigned char buf[LU_GROUP_NAME_BUF]; | ||
2106 | |||
2107 | memset(buf, 0, LU_GROUP_NAME_BUF); | ||
2108 | |||
2109 | spin_lock(&lu_gp->lu_gp_lock); | ||
2110 | list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) { | ||
2111 | dev = lu_gp_mem->lu_gp_mem_dev; | ||
2112 | su_dev = dev->se_sub_dev; | ||
2113 | hba = su_dev->se_dev_hba; | ||
2114 | |||
2115 | cur_len = snprintf(buf, LU_GROUP_NAME_BUF, "%s/%s\n", | ||
2116 | config_item_name(&hba->hba_group.cg_item), | ||
2117 | config_item_name(&su_dev->se_dev_group.cg_item)); | ||
2118 | cur_len++; /* Extra byte for NULL terminator */ | ||
2119 | |||
2120 | if ((cur_len + len) > PAGE_SIZE) { | ||
2121 | printk(KERN_WARNING "Ran out of lu_gp_show_attr" | ||
2122 | "_members buffer\n"); | ||
2123 | break; | ||
2124 | } | ||
2125 | memcpy(page+len, buf, cur_len); | ||
2126 | len += cur_len; | ||
2127 | } | ||
2128 | spin_unlock(&lu_gp->lu_gp_lock); | ||
2129 | |||
2130 | return len; | ||
2131 | } | ||
2132 | |||
2133 | SE_DEV_ALUA_LU_ATTR_RO(members); | ||
2134 | |||
2135 | CONFIGFS_EATTR_OPS(target_core_alua_lu_gp, t10_alua_lu_gp, lu_gp_group); | ||
2136 | |||
2137 | static struct configfs_attribute *target_core_alua_lu_gp_attrs[] = { | ||
2138 | &target_core_alua_lu_gp_lu_gp_id.attr, | ||
2139 | &target_core_alua_lu_gp_members.attr, | ||
2140 | NULL, | ||
2141 | }; | ||
2142 | |||
2143 | static struct configfs_item_operations target_core_alua_lu_gp_ops = { | ||
2144 | .show_attribute = target_core_alua_lu_gp_attr_show, | ||
2145 | .store_attribute = target_core_alua_lu_gp_attr_store, | ||
2146 | }; | ||
2147 | |||
2148 | static struct config_item_type target_core_alua_lu_gp_cit = { | ||
2149 | .ct_item_ops = &target_core_alua_lu_gp_ops, | ||
2150 | .ct_attrs = target_core_alua_lu_gp_attrs, | ||
2151 | .ct_owner = THIS_MODULE, | ||
2152 | }; | ||
2153 | |||
2154 | /* End functions for struct config_item_type target_core_alua_lu_gp_cit */ | ||
2155 | |||
2156 | /* Start functions for struct config_item_type target_core_alua_lu_gps_cit */ | ||
2157 | |||
2158 | static struct config_group *target_core_alua_create_lu_gp( | ||
2159 | struct config_group *group, | ||
2160 | const char *name) | ||
2161 | { | ||
2162 | struct t10_alua_lu_gp *lu_gp; | ||
2163 | struct config_group *alua_lu_gp_cg = NULL; | ||
2164 | struct config_item *alua_lu_gp_ci = NULL; | ||
2165 | |||
2166 | lu_gp = core_alua_allocate_lu_gp(name, 0); | ||
2167 | if (IS_ERR(lu_gp)) | ||
2168 | return NULL; | ||
2169 | |||
2170 | alua_lu_gp_cg = &lu_gp->lu_gp_group; | ||
2171 | alua_lu_gp_ci = &alua_lu_gp_cg->cg_item; | ||
2172 | |||
2173 | config_group_init_type_name(alua_lu_gp_cg, name, | ||
2174 | &target_core_alua_lu_gp_cit); | ||
2175 | |||
2176 | printk(KERN_INFO "Target_Core_ConfigFS: Allocated ALUA Logical Unit" | ||
2177 | " Group: core/alua/lu_gps/%s\n", | ||
2178 | config_item_name(alua_lu_gp_ci)); | ||
2179 | |||
2180 | return alua_lu_gp_cg; | ||
2181 | |||
2182 | } | ||
2183 | |||
2184 | static void target_core_alua_drop_lu_gp( | ||
2185 | struct config_group *group, | ||
2186 | struct config_item *item) | ||
2187 | { | ||
2188 | struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item), | ||
2189 | struct t10_alua_lu_gp, lu_gp_group); | ||
2190 | |||
2191 | printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Logical Unit" | ||
2192 | " Group: core/alua/lu_gps/%s, ID: %hu\n", | ||
2193 | config_item_name(item), lu_gp->lu_gp_id); | ||
2194 | |||
2195 | config_item_put(item); | ||
2196 | core_alua_free_lu_gp(lu_gp); | ||
2197 | } | ||
2198 | |||
2199 | static struct configfs_group_operations target_core_alua_lu_gps_group_ops = { | ||
2200 | .make_group = &target_core_alua_create_lu_gp, | ||
2201 | .drop_item = &target_core_alua_drop_lu_gp, | ||
2202 | }; | ||
2203 | |||
2204 | static struct config_item_type target_core_alua_lu_gps_cit = { | ||
2205 | .ct_item_ops = NULL, | ||
2206 | .ct_group_ops = &target_core_alua_lu_gps_group_ops, | ||
2207 | .ct_owner = THIS_MODULE, | ||
2208 | }; | ||
2209 | |||
2210 | /* End functions for struct config_item_type target_core_alua_lu_gps_cit */ | ||
2211 | |||
2212 | /* Start functions for struct config_item_type target_core_alua_tg_pt_gp_cit */ | ||
2213 | |||
2214 | CONFIGFS_EATTR_STRUCT(target_core_alua_tg_pt_gp, t10_alua_tg_pt_gp); | ||
2215 | #define SE_DEV_ALUA_TG_PT_ATTR(_name, _mode) \ | ||
2216 | static struct target_core_alua_tg_pt_gp_attribute \ | ||
2217 | target_core_alua_tg_pt_gp_##_name = \ | ||
2218 | __CONFIGFS_EATTR(_name, _mode, \ | ||
2219 | target_core_alua_tg_pt_gp_show_attr_##_name, \ | ||
2220 | target_core_alua_tg_pt_gp_store_attr_##_name); | ||
2221 | |||
2222 | #define SE_DEV_ALUA_TG_PT_ATTR_RO(_name) \ | ||
2223 | static struct target_core_alua_tg_pt_gp_attribute \ | ||
2224 | target_core_alua_tg_pt_gp_##_name = \ | ||
2225 | __CONFIGFS_EATTR_RO(_name, \ | ||
2226 | target_core_alua_tg_pt_gp_show_attr_##_name); | ||
2227 | |||
2228 | /* | ||
2229 | * alua_access_state | ||
2230 | */ | ||
2231 | static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_state( | ||
2232 | struct t10_alua_tg_pt_gp *tg_pt_gp, | ||
2233 | char *page) | ||
2234 | { | ||
2235 | return sprintf(page, "%d\n", | ||
2236 | atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state)); | ||
2237 | } | ||
2238 | |||
2239 | static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state( | ||
2240 | struct t10_alua_tg_pt_gp *tg_pt_gp, | ||
2241 | const char *page, | ||
2242 | size_t count) | ||
2243 | { | ||
2244 | struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev; | ||
2245 | unsigned long tmp; | ||
2246 | int new_state, ret; | ||
2247 | |||
2248 | if (!(tg_pt_gp->tg_pt_gp_valid_id)) { | ||
2249 | printk(KERN_ERR "Unable to do implict ALUA on non valid" | ||
2250 | " tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id); | ||
2251 | return -EINVAL; | ||
2252 | } | ||
2253 | |||
2254 | ret = strict_strtoul(page, 0, &tmp); | ||
2255 | if (ret < 0) { | ||
2256 | printk("Unable to extract new ALUA access state from" | ||
2257 | " %s\n", page); | ||
2258 | return -EINVAL; | ||
2259 | } | ||
2260 | new_state = (int)tmp; | ||
2261 | |||
2262 | if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) { | ||
2263 | printk(KERN_ERR "Unable to process implict configfs ALUA" | ||
2264 | " transition while TPGS_IMPLICT_ALUA is diabled\n"); | ||
2265 | return -EINVAL; | ||
2266 | } | ||
2267 | |||
2268 | ret = core_alua_do_port_transition(tg_pt_gp, su_dev->se_dev_ptr, | ||
2269 | NULL, NULL, new_state, 0); | ||
2270 | return (!ret) ? count : -EINVAL; | ||
2271 | } | ||
2272 | |||
2273 | SE_DEV_ALUA_TG_PT_ATTR(alua_access_state, S_IRUGO | S_IWUSR); | ||
2274 | |||
2275 | /* | ||
2276 | * alua_access_status | ||
2277 | */ | ||
2278 | static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_status( | ||
2279 | struct t10_alua_tg_pt_gp *tg_pt_gp, | ||
2280 | char *page) | ||
2281 | { | ||
2282 | return sprintf(page, "%s\n", | ||
2283 | core_alua_dump_status(tg_pt_gp->tg_pt_gp_alua_access_status)); | ||
2284 | } | ||
2285 | |||
2286 | static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status( | ||
2287 | struct t10_alua_tg_pt_gp *tg_pt_gp, | ||
2288 | const char *page, | ||
2289 | size_t count) | ||
2290 | { | ||
2291 | unsigned long tmp; | ||
2292 | int new_status, ret; | ||
2293 | |||
2294 | if (!(tg_pt_gp->tg_pt_gp_valid_id)) { | ||
2295 | printk(KERN_ERR "Unable to do set ALUA access status on non" | ||
2296 | " valid tg_pt_gp ID: %hu\n", | ||
2297 | tg_pt_gp->tg_pt_gp_valid_id); | ||
2298 | return -EINVAL; | ||
2299 | } | ||
2300 | |||
2301 | ret = strict_strtoul(page, 0, &tmp); | ||
2302 | if (ret < 0) { | ||
2303 | printk(KERN_ERR "Unable to extract new ALUA access status" | ||
2304 | " from %s\n", page); | ||
2305 | return -EINVAL; | ||
2306 | } | ||
2307 | new_status = (int)tmp; | ||
2308 | |||
2309 | if ((new_status != ALUA_STATUS_NONE) && | ||
2310 | (new_status != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) && | ||
2311 | (new_status != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) { | ||
2312 | printk(KERN_ERR "Illegal ALUA access status: 0x%02x\n", | ||
2313 | new_status); | ||
2314 | return -EINVAL; | ||
2315 | } | ||
2316 | |||
2317 | tg_pt_gp->tg_pt_gp_alua_access_status = new_status; | ||
2318 | return count; | ||
2319 | } | ||
2320 | |||
2321 | SE_DEV_ALUA_TG_PT_ATTR(alua_access_status, S_IRUGO | S_IWUSR); | ||
2322 | |||
2323 | /* | ||
2324 | * alua_access_type | ||
2325 | */ | ||
2326 | static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_type( | ||
2327 | struct t10_alua_tg_pt_gp *tg_pt_gp, | ||
2328 | char *page) | ||
2329 | { | ||
2330 | return core_alua_show_access_type(tg_pt_gp, page); | ||
2331 | } | ||
2332 | |||
2333 | static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_type( | ||
2334 | struct t10_alua_tg_pt_gp *tg_pt_gp, | ||
2335 | const char *page, | ||
2336 | size_t count) | ||
2337 | { | ||
2338 | return core_alua_store_access_type(tg_pt_gp, page, count); | ||
2339 | } | ||
2340 | |||
2341 | SE_DEV_ALUA_TG_PT_ATTR(alua_access_type, S_IRUGO | S_IWUSR); | ||
2342 | |||
2343 | /* | ||
2344 | * alua_write_metadata | ||
2345 | */ | ||
2346 | static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_write_metadata( | ||
2347 | struct t10_alua_tg_pt_gp *tg_pt_gp, | ||
2348 | char *page) | ||
2349 | { | ||
2350 | return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_write_metadata); | ||
2351 | } | ||
2352 | |||
2353 | static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_write_metadata( | ||
2354 | struct t10_alua_tg_pt_gp *tg_pt_gp, | ||
2355 | const char *page, | ||
2356 | size_t count) | ||
2357 | { | ||
2358 | unsigned long tmp; | ||
2359 | int ret; | ||
2360 | |||
2361 | ret = strict_strtoul(page, 0, &tmp); | ||
2362 | if (ret < 0) { | ||
2363 | printk(KERN_ERR "Unable to extract alua_write_metadata\n"); | ||
2364 | return -EINVAL; | ||
2365 | } | ||
2366 | |||
2367 | if ((tmp != 0) && (tmp != 1)) { | ||
2368 | printk(KERN_ERR "Illegal value for alua_write_metadata:" | ||
2369 | " %lu\n", tmp); | ||
2370 | return -EINVAL; | ||
2371 | } | ||
2372 | tg_pt_gp->tg_pt_gp_write_metadata = (int)tmp; | ||
2373 | |||
2374 | return count; | ||
2375 | } | ||
2376 | |||
2377 | SE_DEV_ALUA_TG_PT_ATTR(alua_write_metadata, S_IRUGO | S_IWUSR); | ||
2378 | |||
2379 | |||
2380 | |||
2381 | /* | ||
2382 | * nonop_delay_msecs | ||
2383 | */ | ||
2384 | static ssize_t target_core_alua_tg_pt_gp_show_attr_nonop_delay_msecs( | ||
2385 | struct t10_alua_tg_pt_gp *tg_pt_gp, | ||
2386 | char *page) | ||
2387 | { | ||
2388 | return core_alua_show_nonop_delay_msecs(tg_pt_gp, page); | ||
2389 | |||
2390 | } | ||
2391 | |||
2392 | static ssize_t target_core_alua_tg_pt_gp_store_attr_nonop_delay_msecs( | ||
2393 | struct t10_alua_tg_pt_gp *tg_pt_gp, | ||
2394 | const char *page, | ||
2395 | size_t count) | ||
2396 | { | ||
2397 | return core_alua_store_nonop_delay_msecs(tg_pt_gp, page, count); | ||
2398 | } | ||
2399 | |||
2400 | SE_DEV_ALUA_TG_PT_ATTR(nonop_delay_msecs, S_IRUGO | S_IWUSR); | ||
2401 | |||
2402 | /* | ||
2403 | * trans_delay_msecs | ||
2404 | */ | ||
2405 | static ssize_t target_core_alua_tg_pt_gp_show_attr_trans_delay_msecs( | ||
2406 | struct t10_alua_tg_pt_gp *tg_pt_gp, | ||
2407 | char *page) | ||
2408 | { | ||
2409 | return core_alua_show_trans_delay_msecs(tg_pt_gp, page); | ||
2410 | } | ||
2411 | |||
2412 | static ssize_t target_core_alua_tg_pt_gp_store_attr_trans_delay_msecs( | ||
2413 | struct t10_alua_tg_pt_gp *tg_pt_gp, | ||
2414 | const char *page, | ||
2415 | size_t count) | ||
2416 | { | ||
2417 | return core_alua_store_trans_delay_msecs(tg_pt_gp, page, count); | ||
2418 | } | ||
2419 | |||
2420 | SE_DEV_ALUA_TG_PT_ATTR(trans_delay_msecs, S_IRUGO | S_IWUSR); | ||
2421 | |||
2422 | /* | ||
2423 | * preferred | ||
2424 | */ | ||
2425 | |||
2426 | static ssize_t target_core_alua_tg_pt_gp_show_attr_preferred( | ||
2427 | struct t10_alua_tg_pt_gp *tg_pt_gp, | ||
2428 | char *page) | ||
2429 | { | ||
2430 | return core_alua_show_preferred_bit(tg_pt_gp, page); | ||
2431 | } | ||
2432 | |||
2433 | static ssize_t target_core_alua_tg_pt_gp_store_attr_preferred( | ||
2434 | struct t10_alua_tg_pt_gp *tg_pt_gp, | ||
2435 | const char *page, | ||
2436 | size_t count) | ||
2437 | { | ||
2438 | return core_alua_store_preferred_bit(tg_pt_gp, page, count); | ||
2439 | } | ||
2440 | |||
2441 | SE_DEV_ALUA_TG_PT_ATTR(preferred, S_IRUGO | S_IWUSR); | ||
2442 | |||
2443 | /* | ||
2444 | * tg_pt_gp_id | ||
2445 | */ | ||
2446 | static ssize_t target_core_alua_tg_pt_gp_show_attr_tg_pt_gp_id( | ||
2447 | struct t10_alua_tg_pt_gp *tg_pt_gp, | ||
2448 | char *page) | ||
2449 | { | ||
2450 | if (!(tg_pt_gp->tg_pt_gp_valid_id)) | ||
2451 | return 0; | ||
2452 | |||
2453 | return sprintf(page, "%hu\n", tg_pt_gp->tg_pt_gp_id); | ||
2454 | } | ||
2455 | |||
2456 | static ssize_t target_core_alua_tg_pt_gp_store_attr_tg_pt_gp_id( | ||
2457 | struct t10_alua_tg_pt_gp *tg_pt_gp, | ||
2458 | const char *page, | ||
2459 | size_t count) | ||
2460 | { | ||
2461 | struct config_group *alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group; | ||
2462 | unsigned long tg_pt_gp_id; | ||
2463 | int ret; | ||
2464 | |||
2465 | ret = strict_strtoul(page, 0, &tg_pt_gp_id); | ||
2466 | if (ret < 0) { | ||
2467 | printk(KERN_ERR "strict_strtoul() returned %d for" | ||
2468 | " tg_pt_gp_id\n", ret); | ||
2469 | return -EINVAL; | ||
2470 | } | ||
2471 | if (tg_pt_gp_id > 0x0000ffff) { | ||
2472 | printk(KERN_ERR "ALUA tg_pt_gp_id: %lu exceeds maximum:" | ||
2473 | " 0x0000ffff\n", tg_pt_gp_id); | ||
2474 | return -EINVAL; | ||
2475 | } | ||
2476 | |||
2477 | ret = core_alua_set_tg_pt_gp_id(tg_pt_gp, (u16)tg_pt_gp_id); | ||
2478 | if (ret < 0) | ||
2479 | return -EINVAL; | ||
2480 | |||
2481 | printk(KERN_INFO "Target_Core_ConfigFS: Set ALUA Target Port Group: " | ||
2482 | "core/alua/tg_pt_gps/%s to ID: %hu\n", | ||
2483 | config_item_name(&alua_tg_pt_gp_cg->cg_item), | ||
2484 | tg_pt_gp->tg_pt_gp_id); | ||
2485 | |||
2486 | return count; | ||
2487 | } | ||
2488 | |||
2489 | SE_DEV_ALUA_TG_PT_ATTR(tg_pt_gp_id, S_IRUGO | S_IWUSR); | ||
2490 | |||
2491 | /* | ||
2492 | * members | ||
2493 | */ | ||
2494 | static ssize_t target_core_alua_tg_pt_gp_show_attr_members( | ||
2495 | struct t10_alua_tg_pt_gp *tg_pt_gp, | ||
2496 | char *page) | ||
2497 | { | ||
2498 | struct se_port *port; | ||
2499 | struct se_portal_group *tpg; | ||
2500 | struct se_lun *lun; | ||
2501 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; | ||
2502 | ssize_t len = 0, cur_len; | ||
2503 | unsigned char buf[TG_PT_GROUP_NAME_BUF]; | ||
2504 | |||
2505 | memset(buf, 0, TG_PT_GROUP_NAME_BUF); | ||
2506 | |||
2507 | spin_lock(&tg_pt_gp->tg_pt_gp_lock); | ||
2508 | list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list, | ||
2509 | tg_pt_gp_mem_list) { | ||
2510 | port = tg_pt_gp_mem->tg_pt; | ||
2511 | tpg = port->sep_tpg; | ||
2512 | lun = port->sep_lun; | ||
2513 | |||
2514 | cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu" | ||
2515 | "/%s\n", TPG_TFO(tpg)->get_fabric_name(), | ||
2516 | TPG_TFO(tpg)->tpg_get_wwn(tpg), | ||
2517 | TPG_TFO(tpg)->tpg_get_tag(tpg), | ||
2518 | config_item_name(&lun->lun_group.cg_item)); | ||
2519 | cur_len++; /* Extra byte for NULL terminator */ | ||
2520 | |||
2521 | if ((cur_len + len) > PAGE_SIZE) { | ||
2522 | printk(KERN_WARNING "Ran out of lu_gp_show_attr" | ||
2523 | "_members buffer\n"); | ||
2524 | break; | ||
2525 | } | ||
2526 | memcpy(page+len, buf, cur_len); | ||
2527 | len += cur_len; | ||
2528 | } | ||
2529 | spin_unlock(&tg_pt_gp->tg_pt_gp_lock); | ||
2530 | |||
2531 | return len; | ||
2532 | } | ||
2533 | |||
2534 | SE_DEV_ALUA_TG_PT_ATTR_RO(members); | ||
2535 | |||
2536 | CONFIGFS_EATTR_OPS(target_core_alua_tg_pt_gp, t10_alua_tg_pt_gp, | ||
2537 | tg_pt_gp_group); | ||
2538 | |||
2539 | static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = { | ||
2540 | &target_core_alua_tg_pt_gp_alua_access_state.attr, | ||
2541 | &target_core_alua_tg_pt_gp_alua_access_status.attr, | ||
2542 | &target_core_alua_tg_pt_gp_alua_access_type.attr, | ||
2543 | &target_core_alua_tg_pt_gp_alua_write_metadata.attr, | ||
2544 | &target_core_alua_tg_pt_gp_nonop_delay_msecs.attr, | ||
2545 | &target_core_alua_tg_pt_gp_trans_delay_msecs.attr, | ||
2546 | &target_core_alua_tg_pt_gp_preferred.attr, | ||
2547 | &target_core_alua_tg_pt_gp_tg_pt_gp_id.attr, | ||
2548 | &target_core_alua_tg_pt_gp_members.attr, | ||
2549 | NULL, | ||
2550 | }; | ||
2551 | |||
2552 | static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = { | ||
2553 | .show_attribute = target_core_alua_tg_pt_gp_attr_show, | ||
2554 | .store_attribute = target_core_alua_tg_pt_gp_attr_store, | ||
2555 | }; | ||
2556 | |||
2557 | static struct config_item_type target_core_alua_tg_pt_gp_cit = { | ||
2558 | .ct_item_ops = &target_core_alua_tg_pt_gp_ops, | ||
2559 | .ct_attrs = target_core_alua_tg_pt_gp_attrs, | ||
2560 | .ct_owner = THIS_MODULE, | ||
2561 | }; | ||
2562 | |||
2563 | /* End functions for struct config_item_type target_core_alua_tg_pt_gp_cit */ | ||
2564 | |||
2565 | /* Start functions for struct config_item_type target_core_alua_tg_pt_gps_cit */ | ||
2566 | |||
2567 | static struct config_group *target_core_alua_create_tg_pt_gp( | ||
2568 | struct config_group *group, | ||
2569 | const char *name) | ||
2570 | { | ||
2571 | struct t10_alua *alua = container_of(group, struct t10_alua, | ||
2572 | alua_tg_pt_gps_group); | ||
2573 | struct t10_alua_tg_pt_gp *tg_pt_gp; | ||
2574 | struct se_subsystem_dev *su_dev = alua->t10_sub_dev; | ||
2575 | struct config_group *alua_tg_pt_gp_cg = NULL; | ||
2576 | struct config_item *alua_tg_pt_gp_ci = NULL; | ||
2577 | |||
2578 | tg_pt_gp = core_alua_allocate_tg_pt_gp(su_dev, name, 0); | ||
2579 | if (!(tg_pt_gp)) | ||
2580 | return NULL; | ||
2581 | |||
2582 | alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group; | ||
2583 | alua_tg_pt_gp_ci = &alua_tg_pt_gp_cg->cg_item; | ||
2584 | |||
2585 | config_group_init_type_name(alua_tg_pt_gp_cg, name, | ||
2586 | &target_core_alua_tg_pt_gp_cit); | ||
2587 | |||
2588 | printk(KERN_INFO "Target_Core_ConfigFS: Allocated ALUA Target Port" | ||
2589 | " Group: alua/tg_pt_gps/%s\n", | ||
2590 | config_item_name(alua_tg_pt_gp_ci)); | ||
2591 | |||
2592 | return alua_tg_pt_gp_cg; | ||
2593 | } | ||
2594 | |||
2595 | static void target_core_alua_drop_tg_pt_gp( | ||
2596 | struct config_group *group, | ||
2597 | struct config_item *item) | ||
2598 | { | ||
2599 | struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item), | ||
2600 | struct t10_alua_tg_pt_gp, tg_pt_gp_group); | ||
2601 | |||
2602 | printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Target Port" | ||
2603 | " Group: alua/tg_pt_gps/%s, ID: %hu\n", | ||
2604 | config_item_name(item), tg_pt_gp->tg_pt_gp_id); | ||
2605 | |||
2606 | config_item_put(item); | ||
2607 | core_alua_free_tg_pt_gp(tg_pt_gp); | ||
2608 | } | ||
2609 | |||
2610 | static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = { | ||
2611 | .make_group = &target_core_alua_create_tg_pt_gp, | ||
2612 | .drop_item = &target_core_alua_drop_tg_pt_gp, | ||
2613 | }; | ||
2614 | |||
2615 | static struct config_item_type target_core_alua_tg_pt_gps_cit = { | ||
2616 | .ct_group_ops = &target_core_alua_tg_pt_gps_group_ops, | ||
2617 | .ct_owner = THIS_MODULE, | ||
2618 | }; | ||
2619 | |||
2620 | /* End functions for struct config_item_type target_core_alua_tg_pt_gps_cit */ | ||
2621 | |||
2622 | /* Start functions for struct config_item_type target_core_alua_cit */ | ||
2623 | |||
2624 | /* | ||
2625 | * target_core_alua_cit is a ConfigFS group that lives under | ||
2626 | * /sys/kernel/config/target/core/alua. There are default groups | ||
2627 | * core/alua/lu_gps and core/alua/tg_pt_gps that are attached to | ||
2628 | * target_core_alua_cit in target_core_init_configfs() below. | ||
2629 | */ | ||
2630 | static struct config_item_type target_core_alua_cit = { | ||
2631 | .ct_item_ops = NULL, | ||
2632 | .ct_attrs = NULL, | ||
2633 | .ct_owner = THIS_MODULE, | ||
2634 | }; | ||
2635 | |||
2636 | /* End functions for struct config_item_type target_core_alua_cit */ | ||
2637 | |||
2638 | /* Start functions for struct config_item_type target_core_hba_cit */ | ||
2639 | |||
2640 | static struct config_group *target_core_make_subdev( | ||
2641 | struct config_group *group, | ||
2642 | const char *name) | ||
2643 | { | ||
2644 | struct t10_alua_tg_pt_gp *tg_pt_gp; | ||
2645 | struct se_subsystem_dev *se_dev; | ||
2646 | struct se_subsystem_api *t; | ||
2647 | struct config_item *hba_ci = &group->cg_item; | ||
2648 | struct se_hba *hba = item_to_hba(hba_ci); | ||
2649 | struct config_group *dev_cg = NULL, *tg_pt_gp_cg = NULL; | ||
2650 | |||
2651 | if (mutex_lock_interruptible(&hba->hba_access_mutex)) | ||
2652 | return NULL; | ||
2653 | |||
2654 | /* | ||
2655 | * Locate the struct se_subsystem_api from parent's struct se_hba. | ||
2656 | */ | ||
2657 | t = hba->transport; | ||
2658 | |||
2659 | se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL); | ||
2660 | if (!se_dev) { | ||
2661 | printk(KERN_ERR "Unable to allocate memory for" | ||
2662 | " struct se_subsystem_dev\n"); | ||
2663 | goto unlock; | ||
2664 | } | ||
2665 | INIT_LIST_HEAD(&se_dev->g_se_dev_list); | ||
2666 | INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list); | ||
2667 | spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock); | ||
2668 | INIT_LIST_HEAD(&se_dev->t10_reservation.registration_list); | ||
2669 | INIT_LIST_HEAD(&se_dev->t10_reservation.aptpl_reg_list); | ||
2670 | spin_lock_init(&se_dev->t10_reservation.registration_lock); | ||
2671 | spin_lock_init(&se_dev->t10_reservation.aptpl_reg_lock); | ||
2672 | INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list); | ||
2673 | spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock); | ||
2674 | spin_lock_init(&se_dev->se_dev_lock); | ||
2675 | se_dev->t10_reservation.pr_aptpl_buf_len = PR_APTPL_BUF_LEN; | ||
2676 | se_dev->t10_wwn.t10_sub_dev = se_dev; | ||
2677 | se_dev->t10_alua.t10_sub_dev = se_dev; | ||
2678 | se_dev->se_dev_attrib.da_sub_dev = se_dev; | ||
2679 | |||
2680 | se_dev->se_dev_hba = hba; | ||
2681 | dev_cg = &se_dev->se_dev_group; | ||
2682 | |||
2683 | dev_cg->default_groups = kzalloc(sizeof(struct config_group) * 6, | ||
2684 | GFP_KERNEL); | ||
2685 | if (!(dev_cg->default_groups)) | ||
2686 | goto out; | ||
2687 | /* | ||
2688 | * Set se_dev_su_ptr from struct se_subsystem_api returned void ptr | ||
2689 | * for ->allocate_virtdevice() | ||
2690 | * | ||
2691 | * se_dev->se_dev_ptr will be set after ->create_virtdev() | ||
2692 | * has been called successfully in the next level up in the | ||
2693 | * configfs tree for device object's struct config_group. | ||
2694 | */ | ||
2695 | se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, name); | ||
2696 | if (!(se_dev->se_dev_su_ptr)) { | ||
2697 | printk(KERN_ERR "Unable to locate subsystem dependent pointer" | ||
2698 | " from allocate_virtdevice()\n"); | ||
2699 | goto out; | ||
2700 | } | ||
2701 | spin_lock(&se_global->g_device_lock); | ||
2702 | list_add_tail(&se_dev->g_se_dev_list, &se_global->g_se_dev_list); | ||
2703 | spin_unlock(&se_global->g_device_lock); | ||
2704 | |||
2705 | config_group_init_type_name(&se_dev->se_dev_group, name, | ||
2706 | &target_core_dev_cit); | ||
2707 | config_group_init_type_name(&se_dev->se_dev_attrib.da_group, "attrib", | ||
2708 | &target_core_dev_attrib_cit); | ||
2709 | config_group_init_type_name(&se_dev->se_dev_pr_group, "pr", | ||
2710 | &target_core_dev_pr_cit); | ||
2711 | config_group_init_type_name(&se_dev->t10_wwn.t10_wwn_group, "wwn", | ||
2712 | &target_core_dev_wwn_cit); | ||
2713 | config_group_init_type_name(&se_dev->t10_alua.alua_tg_pt_gps_group, | ||
2714 | "alua", &target_core_alua_tg_pt_gps_cit); | ||
2715 | dev_cg->default_groups[0] = &se_dev->se_dev_attrib.da_group; | ||
2716 | dev_cg->default_groups[1] = &se_dev->se_dev_pr_group; | ||
2717 | dev_cg->default_groups[2] = &se_dev->t10_wwn.t10_wwn_group; | ||
2718 | dev_cg->default_groups[3] = &se_dev->t10_alua.alua_tg_pt_gps_group; | ||
2719 | dev_cg->default_groups[4] = NULL; | ||
2720 | /* | ||
2721 | * Add core/$HBA/$DEV/alua/tg_pt_gps/default_tg_pt_gp | ||
2722 | */ | ||
2723 | tg_pt_gp = core_alua_allocate_tg_pt_gp(se_dev, "default_tg_pt_gp", 1); | ||
2724 | if (!(tg_pt_gp)) | ||
2725 | goto out; | ||
2726 | |||
2727 | tg_pt_gp_cg = &T10_ALUA(se_dev)->alua_tg_pt_gps_group; | ||
2728 | tg_pt_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, | ||
2729 | GFP_KERNEL); | ||
2730 | if (!(tg_pt_gp_cg->default_groups)) { | ||
2731 | printk(KERN_ERR "Unable to allocate tg_pt_gp_cg->" | ||
2732 | "default_groups\n"); | ||
2733 | goto out; | ||
2734 | } | ||
2735 | |||
2736 | config_group_init_type_name(&tg_pt_gp->tg_pt_gp_group, | ||
2737 | "default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit); | ||
2738 | tg_pt_gp_cg->default_groups[0] = &tg_pt_gp->tg_pt_gp_group; | ||
2739 | tg_pt_gp_cg->default_groups[1] = NULL; | ||
2740 | T10_ALUA(se_dev)->default_tg_pt_gp = tg_pt_gp; | ||
2741 | |||
2742 | printk(KERN_INFO "Target_Core_ConfigFS: Allocated struct se_subsystem_dev:" | ||
2743 | " %p se_dev_su_ptr: %p\n", se_dev, se_dev->se_dev_su_ptr); | ||
2744 | |||
2745 | mutex_unlock(&hba->hba_access_mutex); | ||
2746 | return &se_dev->se_dev_group; | ||
2747 | out: | ||
2748 | if (T10_ALUA(se_dev)->default_tg_pt_gp) { | ||
2749 | core_alua_free_tg_pt_gp(T10_ALUA(se_dev)->default_tg_pt_gp); | ||
2750 | T10_ALUA(se_dev)->default_tg_pt_gp = NULL; | ||
2751 | } | ||
2752 | if (tg_pt_gp_cg) | ||
2753 | kfree(tg_pt_gp_cg->default_groups); | ||
2754 | if (dev_cg) | ||
2755 | kfree(dev_cg->default_groups); | ||
2756 | if (se_dev->se_dev_su_ptr) | ||
2757 | t->free_device(se_dev->se_dev_su_ptr); | ||
2758 | kfree(se_dev); | ||
2759 | unlock: | ||
2760 | mutex_unlock(&hba->hba_access_mutex); | ||
2761 | return NULL; | ||
2762 | } | ||
2763 | |||
2764 | static void target_core_drop_subdev( | ||
2765 | struct config_group *group, | ||
2766 | struct config_item *item) | ||
2767 | { | ||
2768 | struct se_subsystem_dev *se_dev = container_of(to_config_group(item), | ||
2769 | struct se_subsystem_dev, se_dev_group); | ||
2770 | struct se_hba *hba; | ||
2771 | struct se_subsystem_api *t; | ||
2772 | struct config_item *df_item; | ||
2773 | struct config_group *dev_cg, *tg_pt_gp_cg; | ||
2774 | int i, ret; | ||
2775 | |||
2776 | hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item); | ||
2777 | |||
2778 | if (mutex_lock_interruptible(&hba->hba_access_mutex)) | ||
2779 | goto out; | ||
2780 | |||
2781 | t = hba->transport; | ||
2782 | |||
2783 | spin_lock(&se_global->g_device_lock); | ||
2784 | list_del(&se_dev->g_se_dev_list); | ||
2785 | spin_unlock(&se_global->g_device_lock); | ||
2786 | |||
2787 | tg_pt_gp_cg = &T10_ALUA(se_dev)->alua_tg_pt_gps_group; | ||
2788 | for (i = 0; tg_pt_gp_cg->default_groups[i]; i++) { | ||
2789 | df_item = &tg_pt_gp_cg->default_groups[i]->cg_item; | ||
2790 | tg_pt_gp_cg->default_groups[i] = NULL; | ||
2791 | config_item_put(df_item); | ||
2792 | } | ||
2793 | kfree(tg_pt_gp_cg->default_groups); | ||
2794 | core_alua_free_tg_pt_gp(T10_ALUA(se_dev)->default_tg_pt_gp); | ||
2795 | T10_ALUA(se_dev)->default_tg_pt_gp = NULL; | ||
2796 | |||
2797 | dev_cg = &se_dev->se_dev_group; | ||
2798 | for (i = 0; dev_cg->default_groups[i]; i++) { | ||
2799 | df_item = &dev_cg->default_groups[i]->cg_item; | ||
2800 | dev_cg->default_groups[i] = NULL; | ||
2801 | config_item_put(df_item); | ||
2802 | } | ||
2803 | |||
2804 | config_item_put(item); | ||
2805 | /* | ||
2806 | * This pointer will set when the storage is enabled with: | ||
2807 | * `echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable` | ||
2808 | */ | ||
2809 | if (se_dev->se_dev_ptr) { | ||
2810 | printk(KERN_INFO "Target_Core_ConfigFS: Calling se_free_" | ||
2811 | "virtual_device() for se_dev_ptr: %p\n", | ||
2812 | se_dev->se_dev_ptr); | ||
2813 | |||
2814 | ret = se_free_virtual_device(se_dev->se_dev_ptr, hba); | ||
2815 | if (ret < 0) | ||
2816 | goto hba_out; | ||
2817 | } else { | ||
2818 | /* | ||
2819 | * Release struct se_subsystem_dev->se_dev_su_ptr.. | ||
2820 | */ | ||
2821 | printk(KERN_INFO "Target_Core_ConfigFS: Calling t->free_" | ||
2822 | "device() for se_dev_su_ptr: %p\n", | ||
2823 | se_dev->se_dev_su_ptr); | ||
2824 | |||
2825 | t->free_device(se_dev->se_dev_su_ptr); | ||
2826 | } | ||
2827 | |||
2828 | printk(KERN_INFO "Target_Core_ConfigFS: Deallocating se_subsystem" | ||
2829 | "_dev_t: %p\n", se_dev); | ||
2830 | |||
2831 | hba_out: | ||
2832 | mutex_unlock(&hba->hba_access_mutex); | ||
2833 | out: | ||
2834 | kfree(se_dev); | ||
2835 | } | ||
2836 | |||
2837 | static struct configfs_group_operations target_core_hba_group_ops = { | ||
2838 | .make_group = target_core_make_subdev, | ||
2839 | .drop_item = target_core_drop_subdev, | ||
2840 | }; | ||
2841 | |||
2842 | CONFIGFS_EATTR_STRUCT(target_core_hba, se_hba); | ||
2843 | #define SE_HBA_ATTR(_name, _mode) \ | ||
2844 | static struct target_core_hba_attribute \ | ||
2845 | target_core_hba_##_name = \ | ||
2846 | __CONFIGFS_EATTR(_name, _mode, \ | ||
2847 | target_core_hba_show_attr_##_name, \ | ||
2848 | target_core_hba_store_attr_##_name); | ||
2849 | |||
2850 | #define SE_HBA_ATTR_RO(_name) \ | ||
2851 | static struct target_core_hba_attribute \ | ||
2852 | target_core_hba_##_name = \ | ||
2853 | __CONFIGFS_EATTR_RO(_name, \ | ||
2854 | target_core_hba_show_attr_##_name); | ||
2855 | |||
2856 | static ssize_t target_core_hba_show_attr_hba_info( | ||
2857 | struct se_hba *hba, | ||
2858 | char *page) | ||
2859 | { | ||
2860 | return sprintf(page, "HBA Index: %d plugin: %s version: %s\n", | ||
2861 | hba->hba_id, hba->transport->name, | ||
2862 | TARGET_CORE_CONFIGFS_VERSION); | ||
2863 | } | ||
2864 | |||
2865 | SE_HBA_ATTR_RO(hba_info); | ||
2866 | |||
2867 | static ssize_t target_core_hba_show_attr_hba_mode(struct se_hba *hba, | ||
2868 | char *page) | ||
2869 | { | ||
2870 | int hba_mode = 0; | ||
2871 | |||
2872 | if (hba->hba_flags & HBA_FLAGS_PSCSI_MODE) | ||
2873 | hba_mode = 1; | ||
2874 | |||
2875 | return sprintf(page, "%d\n", hba_mode); | ||
2876 | } | ||
2877 | |||
2878 | static ssize_t target_core_hba_store_attr_hba_mode(struct se_hba *hba, | ||
2879 | const char *page, size_t count) | ||
2880 | { | ||
2881 | struct se_subsystem_api *transport = hba->transport; | ||
2882 | unsigned long mode_flag; | ||
2883 | int ret; | ||
2884 | |||
2885 | if (transport->pmode_enable_hba == NULL) | ||
2886 | return -EINVAL; | ||
2887 | |||
2888 | ret = strict_strtoul(page, 0, &mode_flag); | ||
2889 | if (ret < 0) { | ||
2890 | printk(KERN_ERR "Unable to extract hba mode flag: %d\n", ret); | ||
2891 | return -EINVAL; | ||
2892 | } | ||
2893 | |||
2894 | spin_lock(&hba->device_lock); | ||
2895 | if (!(list_empty(&hba->hba_dev_list))) { | ||
2896 | printk(KERN_ERR "Unable to set hba_mode with active devices\n"); | ||
2897 | spin_unlock(&hba->device_lock); | ||
2898 | return -EINVAL; | ||
2899 | } | ||
2900 | spin_unlock(&hba->device_lock); | ||
2901 | |||
2902 | ret = transport->pmode_enable_hba(hba, mode_flag); | ||
2903 | if (ret < 0) | ||
2904 | return -EINVAL; | ||
2905 | if (ret > 0) | ||
2906 | hba->hba_flags |= HBA_FLAGS_PSCSI_MODE; | ||
2907 | else if (ret == 0) | ||
2908 | hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE; | ||
2909 | |||
2910 | return count; | ||
2911 | } | ||
2912 | |||
2913 | SE_HBA_ATTR(hba_mode, S_IRUGO | S_IWUSR); | ||
2914 | |||
2915 | CONFIGFS_EATTR_OPS(target_core_hba, se_hba, hba_group); | ||
2916 | |||
2917 | static struct configfs_attribute *target_core_hba_attrs[] = { | ||
2918 | &target_core_hba_hba_info.attr, | ||
2919 | &target_core_hba_hba_mode.attr, | ||
2920 | NULL, | ||
2921 | }; | ||
2922 | |||
2923 | static struct configfs_item_operations target_core_hba_item_ops = { | ||
2924 | .show_attribute = target_core_hba_attr_show, | ||
2925 | .store_attribute = target_core_hba_attr_store, | ||
2926 | }; | ||
2927 | |||
2928 | static struct config_item_type target_core_hba_cit = { | ||
2929 | .ct_item_ops = &target_core_hba_item_ops, | ||
2930 | .ct_group_ops = &target_core_hba_group_ops, | ||
2931 | .ct_attrs = target_core_hba_attrs, | ||
2932 | .ct_owner = THIS_MODULE, | ||
2933 | }; | ||
2934 | |||
2935 | static struct config_group *target_core_call_addhbatotarget( | ||
2936 | struct config_group *group, | ||
2937 | const char *name) | ||
2938 | { | ||
2939 | char *se_plugin_str, *str, *str2; | ||
2940 | struct se_hba *hba; | ||
2941 | char buf[TARGET_CORE_NAME_MAX_LEN]; | ||
2942 | unsigned long plugin_dep_id = 0; | ||
2943 | int ret; | ||
2944 | |||
2945 | memset(buf, 0, TARGET_CORE_NAME_MAX_LEN); | ||
2946 | if (strlen(name) > TARGET_CORE_NAME_MAX_LEN) { | ||
2947 | printk(KERN_ERR "Passed *name strlen(): %d exceeds" | ||
2948 | " TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name), | ||
2949 | TARGET_CORE_NAME_MAX_LEN); | ||
2950 | return ERR_PTR(-ENAMETOOLONG); | ||
2951 | } | ||
2952 | snprintf(buf, TARGET_CORE_NAME_MAX_LEN, "%s", name); | ||
2953 | |||
2954 | str = strstr(buf, "_"); | ||
2955 | if (!(str)) { | ||
2956 | printk(KERN_ERR "Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n"); | ||
2957 | return ERR_PTR(-EINVAL); | ||
2958 | } | ||
2959 | se_plugin_str = buf; | ||
2960 | /* | ||
2961 | * Special case for subsystem plugins that have "_" in their names. | ||
2962 | * Namely rd_direct and rd_mcp.. | ||
2963 | */ | ||
2964 | str2 = strstr(str+1, "_"); | ||
2965 | if ((str2)) { | ||
2966 | *str2 = '\0'; /* Terminate for *se_plugin_str */ | ||
2967 | str2++; /* Skip to start of plugin dependent ID */ | ||
2968 | str = str2; | ||
2969 | } else { | ||
2970 | *str = '\0'; /* Terminate for *se_plugin_str */ | ||
2971 | str++; /* Skip to start of plugin dependent ID */ | ||
2972 | } | ||
2973 | |||
2974 | ret = strict_strtoul(str, 0, &plugin_dep_id); | ||
2975 | if (ret < 0) { | ||
2976 | printk(KERN_ERR "strict_strtoul() returned %d for" | ||
2977 | " plugin_dep_id\n", ret); | ||
2978 | return ERR_PTR(-EINVAL); | ||
2979 | } | ||
2980 | /* | ||
2981 | * Load up TCM subsystem plugins if they have not already been loaded. | ||
2982 | */ | ||
2983 | if (transport_subsystem_check_init() < 0) | ||
2984 | return ERR_PTR(-EINVAL); | ||
2985 | |||
2986 | hba = core_alloc_hba(se_plugin_str, plugin_dep_id, 0); | ||
2987 | if (IS_ERR(hba)) | ||
2988 | return ERR_CAST(hba); | ||
2989 | |||
2990 | config_group_init_type_name(&hba->hba_group, name, | ||
2991 | &target_core_hba_cit); | ||
2992 | |||
2993 | return &hba->hba_group; | ||
2994 | } | ||
2995 | |||
2996 | static void target_core_call_delhbafromtarget( | ||
2997 | struct config_group *group, | ||
2998 | struct config_item *item) | ||
2999 | { | ||
3000 | struct se_hba *hba = item_to_hba(item); | ||
3001 | |||
3002 | config_item_put(item); | ||
3003 | core_delete_hba(hba); | ||
3004 | } | ||
3005 | |||
3006 | static struct configfs_group_operations target_core_group_ops = { | ||
3007 | .make_group = target_core_call_addhbatotarget, | ||
3008 | .drop_item = target_core_call_delhbafromtarget, | ||
3009 | }; | ||
3010 | |||
3011 | static struct config_item_type target_core_cit = { | ||
3012 | .ct_item_ops = NULL, | ||
3013 | .ct_group_ops = &target_core_group_ops, | ||
3014 | .ct_attrs = NULL, | ||
3015 | .ct_owner = THIS_MODULE, | ||
3016 | }; | ||
3017 | |||
3018 | /* Stop functions for struct config_item_type target_core_hba_cit */ | ||
3019 | |||
3020 | static int target_core_init_configfs(void) | ||
3021 | { | ||
3022 | struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL; | ||
3023 | struct config_group *lu_gp_cg = NULL; | ||
3024 | struct configfs_subsystem *subsys; | ||
3025 | struct proc_dir_entry *scsi_target_proc = NULL; | ||
3026 | struct t10_alua_lu_gp *lu_gp; | ||
3027 | int ret; | ||
3028 | |||
3029 | printk(KERN_INFO "TARGET_CORE[0]: Loading Generic Kernel Storage" | ||
3030 | " Engine: %s on %s/%s on "UTS_RELEASE"\n", | ||
3031 | TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine); | ||
3032 | |||
3033 | subsys = target_core_subsystem[0]; | ||
3034 | config_group_init(&subsys->su_group); | ||
3035 | mutex_init(&subsys->su_mutex); | ||
3036 | |||
3037 | INIT_LIST_HEAD(&g_tf_list); | ||
3038 | mutex_init(&g_tf_lock); | ||
3039 | init_scsi_index_table(); | ||
3040 | ret = init_se_global(); | ||
3041 | if (ret < 0) | ||
3042 | return -1; | ||
3043 | /* | ||
3044 | * Create $CONFIGFS/target/core default group for HBA <-> Storage Object | ||
3045 | * and ALUA Logical Unit Group and Target Port Group infrastructure. | ||
3046 | */ | ||
3047 | target_cg = &subsys->su_group; | ||
3048 | target_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, | ||
3049 | GFP_KERNEL); | ||
3050 | if (!(target_cg->default_groups)) { | ||
3051 | printk(KERN_ERR "Unable to allocate target_cg->default_groups\n"); | ||
3052 | goto out_global; | ||
3053 | } | ||
3054 | |||
3055 | config_group_init_type_name(&se_global->target_core_hbagroup, | ||
3056 | "core", &target_core_cit); | ||
3057 | target_cg->default_groups[0] = &se_global->target_core_hbagroup; | ||
3058 | target_cg->default_groups[1] = NULL; | ||
3059 | /* | ||
3060 | * Create ALUA infrastructure under /sys/kernel/config/target/core/alua/ | ||
3061 | */ | ||
3062 | hba_cg = &se_global->target_core_hbagroup; | ||
3063 | hba_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, | ||
3064 | GFP_KERNEL); | ||
3065 | if (!(hba_cg->default_groups)) { | ||
3066 | printk(KERN_ERR "Unable to allocate hba_cg->default_groups\n"); | ||
3067 | goto out_global; | ||
3068 | } | ||
3069 | config_group_init_type_name(&se_global->alua_group, | ||
3070 | "alua", &target_core_alua_cit); | ||
3071 | hba_cg->default_groups[0] = &se_global->alua_group; | ||
3072 | hba_cg->default_groups[1] = NULL; | ||
3073 | /* | ||
3074 | * Add ALUA Logical Unit Group and Target Port Group ConfigFS | ||
3075 | * groups under /sys/kernel/config/target/core/alua/ | ||
3076 | */ | ||
3077 | alua_cg = &se_global->alua_group; | ||
3078 | alua_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, | ||
3079 | GFP_KERNEL); | ||
3080 | if (!(alua_cg->default_groups)) { | ||
3081 | printk(KERN_ERR "Unable to allocate alua_cg->default_groups\n"); | ||
3082 | goto out_global; | ||
3083 | } | ||
3084 | |||
3085 | config_group_init_type_name(&se_global->alua_lu_gps_group, | ||
3086 | "lu_gps", &target_core_alua_lu_gps_cit); | ||
3087 | alua_cg->default_groups[0] = &se_global->alua_lu_gps_group; | ||
3088 | alua_cg->default_groups[1] = NULL; | ||
3089 | /* | ||
3090 | * Add core/alua/lu_gps/default_lu_gp | ||
3091 | */ | ||
3092 | lu_gp = core_alua_allocate_lu_gp("default_lu_gp", 1); | ||
3093 | if (IS_ERR(lu_gp)) | ||
3094 | goto out_global; | ||
3095 | |||
3096 | lu_gp_cg = &se_global->alua_lu_gps_group; | ||
3097 | lu_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, | ||
3098 | GFP_KERNEL); | ||
3099 | if (!(lu_gp_cg->default_groups)) { | ||
3100 | printk(KERN_ERR "Unable to allocate lu_gp_cg->default_groups\n"); | ||
3101 | goto out_global; | ||
3102 | } | ||
3103 | |||
3104 | config_group_init_type_name(&lu_gp->lu_gp_group, "default_lu_gp", | ||
3105 | &target_core_alua_lu_gp_cit); | ||
3106 | lu_gp_cg->default_groups[0] = &lu_gp->lu_gp_group; | ||
3107 | lu_gp_cg->default_groups[1] = NULL; | ||
3108 | se_global->default_lu_gp = lu_gp; | ||
3109 | /* | ||
3110 | * Register the target_core_mod subsystem with configfs. | ||
3111 | */ | ||
3112 | ret = configfs_register_subsystem(subsys); | ||
3113 | if (ret < 0) { | ||
3114 | printk(KERN_ERR "Error %d while registering subsystem %s\n", | ||
3115 | ret, subsys->su_group.cg_item.ci_namebuf); | ||
3116 | goto out_global; | ||
3117 | } | ||
3118 | printk(KERN_INFO "TARGET_CORE[0]: Initialized ConfigFS Fabric" | ||
3119 | " Infrastructure: "TARGET_CORE_CONFIGFS_VERSION" on %s/%s" | ||
3120 | " on "UTS_RELEASE"\n", utsname()->sysname, utsname()->machine); | ||
3121 | /* | ||
3122 | * Register built-in RAMDISK subsystem logic for virtual LUN 0 | ||
3123 | */ | ||
3124 | ret = rd_module_init(); | ||
3125 | if (ret < 0) | ||
3126 | goto out; | ||
3127 | |||
3128 | if (core_dev_setup_virtual_lun0() < 0) | ||
3129 | goto out; | ||
3130 | |||
3131 | scsi_target_proc = proc_mkdir("scsi_target", 0); | ||
3132 | if (!(scsi_target_proc)) { | ||
3133 | printk(KERN_ERR "proc_mkdir(scsi_target, 0) failed\n"); | ||
3134 | goto out; | ||
3135 | } | ||
3136 | ret = init_scsi_target_mib(); | ||
3137 | if (ret < 0) | ||
3138 | goto out; | ||
3139 | |||
3140 | return 0; | ||
3141 | |||
3142 | out: | ||
3143 | configfs_unregister_subsystem(subsys); | ||
3144 | if (scsi_target_proc) | ||
3145 | remove_proc_entry("scsi_target", 0); | ||
3146 | core_dev_release_virtual_lun0(); | ||
3147 | rd_module_exit(); | ||
3148 | out_global: | ||
3149 | if (se_global->default_lu_gp) { | ||
3150 | core_alua_free_lu_gp(se_global->default_lu_gp); | ||
3151 | se_global->default_lu_gp = NULL; | ||
3152 | } | ||
3153 | if (lu_gp_cg) | ||
3154 | kfree(lu_gp_cg->default_groups); | ||
3155 | if (alua_cg) | ||
3156 | kfree(alua_cg->default_groups); | ||
3157 | if (hba_cg) | ||
3158 | kfree(hba_cg->default_groups); | ||
3159 | kfree(target_cg->default_groups); | ||
3160 | release_se_global(); | ||
3161 | return -1; | ||
3162 | } | ||
3163 | |||
3164 | static void target_core_exit_configfs(void) | ||
3165 | { | ||
3166 | struct configfs_subsystem *subsys; | ||
3167 | struct config_group *hba_cg, *alua_cg, *lu_gp_cg; | ||
3168 | struct config_item *item; | ||
3169 | int i; | ||
3170 | |||
3171 | se_global->in_shutdown = 1; | ||
3172 | subsys = target_core_subsystem[0]; | ||
3173 | |||
3174 | lu_gp_cg = &se_global->alua_lu_gps_group; | ||
3175 | for (i = 0; lu_gp_cg->default_groups[i]; i++) { | ||
3176 | item = &lu_gp_cg->default_groups[i]->cg_item; | ||
3177 | lu_gp_cg->default_groups[i] = NULL; | ||
3178 | config_item_put(item); | ||
3179 | } | ||
3180 | kfree(lu_gp_cg->default_groups); | ||
3181 | core_alua_free_lu_gp(se_global->default_lu_gp); | ||
3182 | se_global->default_lu_gp = NULL; | ||
3183 | |||
3184 | alua_cg = &se_global->alua_group; | ||
3185 | for (i = 0; alua_cg->default_groups[i]; i++) { | ||
3186 | item = &alua_cg->default_groups[i]->cg_item; | ||
3187 | alua_cg->default_groups[i] = NULL; | ||
3188 | config_item_put(item); | ||
3189 | } | ||
3190 | kfree(alua_cg->default_groups); | ||
3191 | |||
3192 | hba_cg = &se_global->target_core_hbagroup; | ||
3193 | for (i = 0; hba_cg->default_groups[i]; i++) { | ||
3194 | item = &hba_cg->default_groups[i]->cg_item; | ||
3195 | hba_cg->default_groups[i] = NULL; | ||
3196 | config_item_put(item); | ||
3197 | } | ||
3198 | kfree(hba_cg->default_groups); | ||
3199 | |||
3200 | for (i = 0; subsys->su_group.default_groups[i]; i++) { | ||
3201 | item = &subsys->su_group.default_groups[i]->cg_item; | ||
3202 | subsys->su_group.default_groups[i] = NULL; | ||
3203 | config_item_put(item); | ||
3204 | } | ||
3205 | kfree(subsys->su_group.default_groups); | ||
3206 | |||
3207 | configfs_unregister_subsystem(subsys); | ||
3208 | printk(KERN_INFO "TARGET_CORE[0]: Released ConfigFS Fabric" | ||
3209 | " Infrastructure\n"); | ||
3210 | |||
3211 | remove_scsi_target_mib(); | ||
3212 | remove_proc_entry("scsi_target", 0); | ||
3213 | core_dev_release_virtual_lun0(); | ||
3214 | rd_module_exit(); | ||
3215 | release_se_global(); | ||
3216 | |||
3217 | return; | ||
3218 | } | ||
3219 | |||
3220 | MODULE_DESCRIPTION("Target_Core_Mod/ConfigFS"); | ||
3221 | MODULE_AUTHOR("nab@Linux-iSCSI.org"); | ||
3222 | MODULE_LICENSE("GPL"); | ||
3223 | |||
3224 | module_init(target_core_init_configfs); | ||
3225 | module_exit(target_core_exit_configfs); | ||
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c new file mode 100644 index 000000000000..317ce58d426d --- /dev/null +++ b/drivers/target/target_core_device.c | |||
@@ -0,0 +1,1694 @@ | |||
1 | /******************************************************************************* | ||
2 | * Filename: target_core_device.c (based on iscsi_target_device.c) | ||
3 | * | ||
4 | * This file contains the iSCSI Virtual Device and Disk Transport | ||
5 | * agnostic related functions. | ||
6 | * | ||
7 | * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. | ||
8 | * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved. | ||
9 | * Copyright (c) 2007-2010 Rising Tide Systems | ||
10 | * Copyright (c) 2008-2010 Linux-iSCSI.org | ||
11 | * | ||
12 | * Nicholas A. Bellinger <nab@kernel.org> | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or modify | ||
15 | * it under the terms of the GNU General Public License as published by | ||
16 | * the Free Software Foundation; either version 2 of the License, or | ||
17 | * (at your option) any later version. | ||
18 | * | ||
19 | * This program is distributed in the hope that it will be useful, | ||
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
22 | * GNU General Public License for more details. | ||
23 | * | ||
24 | * You should have received a copy of the GNU General Public License | ||
25 | * along with this program; if not, write to the Free Software | ||
26 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
27 | * | ||
28 | ******************************************************************************/ | ||
29 | |||
30 | #include <linux/net.h> | ||
31 | #include <linux/string.h> | ||
32 | #include <linux/delay.h> | ||
33 | #include <linux/timer.h> | ||
34 | #include <linux/slab.h> | ||
35 | #include <linux/spinlock.h> | ||
36 | #include <linux/smp_lock.h> | ||
37 | #include <linux/kthread.h> | ||
38 | #include <linux/in.h> | ||
39 | #include <net/sock.h> | ||
40 | #include <net/tcp.h> | ||
41 | #include <scsi/scsi.h> | ||
42 | |||
43 | #include <target/target_core_base.h> | ||
44 | #include <target/target_core_device.h> | ||
45 | #include <target/target_core_tpg.h> | ||
46 | #include <target/target_core_transport.h> | ||
47 | #include <target/target_core_fabric_ops.h> | ||
48 | |||
49 | #include "target_core_alua.h" | ||
50 | #include "target_core_hba.h" | ||
51 | #include "target_core_pr.h" | ||
52 | #include "target_core_ua.h" | ||
53 | |||
54 | static void se_dev_start(struct se_device *dev); | ||
55 | static void se_dev_stop(struct se_device *dev); | ||
56 | |||
57 | int transport_get_lun_for_cmd( | ||
58 | struct se_cmd *se_cmd, | ||
59 | unsigned char *cdb, | ||
60 | u32 unpacked_lun) | ||
61 | { | ||
62 | struct se_dev_entry *deve; | ||
63 | struct se_lun *se_lun = NULL; | ||
64 | struct se_session *se_sess = SE_SESS(se_cmd); | ||
65 | unsigned long flags; | ||
66 | int read_only = 0; | ||
67 | |||
68 | spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); | ||
69 | deve = se_cmd->se_deve = | ||
70 | &SE_NODE_ACL(se_sess)->device_list[unpacked_lun]; | ||
71 | if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { | ||
72 | if (se_cmd) { | ||
73 | deve->total_cmds++; | ||
74 | deve->total_bytes += se_cmd->data_length; | ||
75 | |||
76 | if (se_cmd->data_direction == DMA_TO_DEVICE) { | ||
77 | if (deve->lun_flags & | ||
78 | TRANSPORT_LUNFLAGS_READ_ONLY) { | ||
79 | read_only = 1; | ||
80 | goto out; | ||
81 | } | ||
82 | deve->write_bytes += se_cmd->data_length; | ||
83 | } else if (se_cmd->data_direction == | ||
84 | DMA_FROM_DEVICE) { | ||
85 | deve->read_bytes += se_cmd->data_length; | ||
86 | } | ||
87 | } | ||
88 | deve->deve_cmds++; | ||
89 | |||
90 | se_lun = se_cmd->se_lun = deve->se_lun; | ||
91 | se_cmd->pr_res_key = deve->pr_res_key; | ||
92 | se_cmd->orig_fe_lun = unpacked_lun; | ||
93 | se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev; | ||
94 | se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; | ||
95 | } | ||
96 | out: | ||
97 | spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); | ||
98 | |||
99 | if (!se_lun) { | ||
100 | if (read_only) { | ||
101 | se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; | ||
102 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | ||
103 | printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" | ||
104 | " Access for 0x%08x\n", | ||
105 | CMD_TFO(se_cmd)->get_fabric_name(), | ||
106 | unpacked_lun); | ||
107 | return -1; | ||
108 | } else { | ||
109 | /* | ||
110 | * Use the se_portal_group->tpg_virt_lun0 to allow for | ||
111 | * REPORT_LUNS, et al to be returned when no active | ||
112 | * MappedLUN=0 exists for this Initiator Port. | ||
113 | */ | ||
114 | if (unpacked_lun != 0) { | ||
115 | se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; | ||
116 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | ||
117 | printk("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" | ||
118 | " Access for 0x%08x\n", | ||
119 | CMD_TFO(se_cmd)->get_fabric_name(), | ||
120 | unpacked_lun); | ||
121 | return -1; | ||
122 | } | ||
123 | /* | ||
124 | * Force WRITE PROTECT for virtual LUN 0 | ||
125 | */ | ||
126 | if ((se_cmd->data_direction != DMA_FROM_DEVICE) && | ||
127 | (se_cmd->data_direction != DMA_NONE)) { | ||
128 | se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; | ||
129 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | ||
130 | return -1; | ||
131 | } | ||
132 | #if 0 | ||
133 | printk("TARGET_CORE[%s]: Using virtual LUN0! :-)\n", | ||
134 | CMD_TFO(se_cmd)->get_fabric_name()); | ||
135 | #endif | ||
136 | se_lun = se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0; | ||
137 | se_cmd->orig_fe_lun = 0; | ||
138 | se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev; | ||
139 | se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; | ||
140 | } | ||
141 | } | ||
142 | /* | ||
143 | * Determine if the struct se_lun is online. | ||
144 | */ | ||
145 | /* #warning FIXME: Check for LUN_RESET + UNIT Attention */ | ||
146 | if (se_dev_check_online(se_lun->lun_se_dev) != 0) { | ||
147 | se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; | ||
148 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | ||
149 | return -1; | ||
150 | } | ||
151 | |||
152 | { | ||
153 | struct se_device *dev = se_lun->lun_se_dev; | ||
154 | spin_lock(&dev->stats_lock); | ||
155 | dev->num_cmds++; | ||
156 | if (se_cmd->data_direction == DMA_TO_DEVICE) | ||
157 | dev->write_bytes += se_cmd->data_length; | ||
158 | else if (se_cmd->data_direction == DMA_FROM_DEVICE) | ||
159 | dev->read_bytes += se_cmd->data_length; | ||
160 | spin_unlock(&dev->stats_lock); | ||
161 | } | ||
162 | |||
163 | /* | ||
164 | * Add the iscsi_cmd_t to the struct se_lun's cmd list. This list is used | ||
165 | * for tracking state of struct se_cmds during LUN shutdown events. | ||
166 | */ | ||
167 | spin_lock_irqsave(&se_lun->lun_cmd_lock, flags); | ||
168 | list_add_tail(&se_cmd->se_lun_list, &se_lun->lun_cmd_list); | ||
169 | atomic_set(&T_TASK(se_cmd)->transport_lun_active, 1); | ||
170 | #if 0 | ||
171 | printk(KERN_INFO "Adding ITT: 0x%08x to LUN LIST[%d]\n", | ||
172 | CMD_TFO(se_cmd)->get_task_tag(se_cmd), se_lun->unpacked_lun); | ||
173 | #endif | ||
174 | spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags); | ||
175 | |||
176 | return 0; | ||
177 | } | ||
178 | EXPORT_SYMBOL(transport_get_lun_for_cmd); | ||
179 | |||
180 | int transport_get_lun_for_tmr( | ||
181 | struct se_cmd *se_cmd, | ||
182 | u32 unpacked_lun) | ||
183 | { | ||
184 | struct se_device *dev = NULL; | ||
185 | struct se_dev_entry *deve; | ||
186 | struct se_lun *se_lun = NULL; | ||
187 | struct se_session *se_sess = SE_SESS(se_cmd); | ||
188 | struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; | ||
189 | |||
190 | spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); | ||
191 | deve = se_cmd->se_deve = | ||
192 | &SE_NODE_ACL(se_sess)->device_list[unpacked_lun]; | ||
193 | if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { | ||
194 | se_lun = se_cmd->se_lun = se_tmr->tmr_lun = deve->se_lun; | ||
195 | dev = se_tmr->tmr_dev = se_lun->lun_se_dev; | ||
196 | se_cmd->pr_res_key = deve->pr_res_key; | ||
197 | se_cmd->orig_fe_lun = unpacked_lun; | ||
198 | se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev; | ||
199 | /* se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; */ | ||
200 | } | ||
201 | spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); | ||
202 | |||
203 | if (!se_lun) { | ||
204 | printk(KERN_INFO "TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" | ||
205 | " Access for 0x%08x\n", | ||
206 | CMD_TFO(se_cmd)->get_fabric_name(), | ||
207 | unpacked_lun); | ||
208 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | ||
209 | return -1; | ||
210 | } | ||
211 | /* | ||
212 | * Determine if the struct se_lun is online. | ||
213 | */ | ||
214 | /* #warning FIXME: Check for LUN_RESET + UNIT Attention */ | ||
215 | if (se_dev_check_online(se_lun->lun_se_dev) != 0) { | ||
216 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | ||
217 | return -1; | ||
218 | } | ||
219 | |||
220 | spin_lock(&dev->se_tmr_lock); | ||
221 | list_add_tail(&se_tmr->tmr_list, &dev->dev_tmr_list); | ||
222 | spin_unlock(&dev->se_tmr_lock); | ||
223 | |||
224 | return 0; | ||
225 | } | ||
226 | EXPORT_SYMBOL(transport_get_lun_for_tmr); | ||
227 | |||
228 | /* | ||
229 | * This function is called from core_scsi3_emulate_pro_register_and_move() | ||
230 | * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count | ||
231 | * when a matching rtpi is found. | ||
232 | */ | ||
233 | struct se_dev_entry *core_get_se_deve_from_rtpi( | ||
234 | struct se_node_acl *nacl, | ||
235 | u16 rtpi) | ||
236 | { | ||
237 | struct se_dev_entry *deve; | ||
238 | struct se_lun *lun; | ||
239 | struct se_port *port; | ||
240 | struct se_portal_group *tpg = nacl->se_tpg; | ||
241 | u32 i; | ||
242 | |||
243 | spin_lock_irq(&nacl->device_list_lock); | ||
244 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | ||
245 | deve = &nacl->device_list[i]; | ||
246 | |||
247 | if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) | ||
248 | continue; | ||
249 | |||
250 | lun = deve->se_lun; | ||
251 | if (!(lun)) { | ||
252 | printk(KERN_ERR "%s device entries device pointer is" | ||
253 | " NULL, but Initiator has access.\n", | ||
254 | TPG_TFO(tpg)->get_fabric_name()); | ||
255 | continue; | ||
256 | } | ||
257 | port = lun->lun_sep; | ||
258 | if (!(port)) { | ||
259 | printk(KERN_ERR "%s device entries device pointer is" | ||
260 | " NULL, but Initiator has access.\n", | ||
261 | TPG_TFO(tpg)->get_fabric_name()); | ||
262 | continue; | ||
263 | } | ||
264 | if (port->sep_rtpi != rtpi) | ||
265 | continue; | ||
266 | |||
267 | atomic_inc(&deve->pr_ref_count); | ||
268 | smp_mb__after_atomic_inc(); | ||
269 | spin_unlock_irq(&nacl->device_list_lock); | ||
270 | |||
271 | return deve; | ||
272 | } | ||
273 | spin_unlock_irq(&nacl->device_list_lock); | ||
274 | |||
275 | return NULL; | ||
276 | } | ||
277 | |||
278 | int core_free_device_list_for_node( | ||
279 | struct se_node_acl *nacl, | ||
280 | struct se_portal_group *tpg) | ||
281 | { | ||
282 | struct se_dev_entry *deve; | ||
283 | struct se_lun *lun; | ||
284 | u32 i; | ||
285 | |||
286 | if (!nacl->device_list) | ||
287 | return 0; | ||
288 | |||
289 | spin_lock_irq(&nacl->device_list_lock); | ||
290 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | ||
291 | deve = &nacl->device_list[i]; | ||
292 | |||
293 | if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) | ||
294 | continue; | ||
295 | |||
296 | if (!deve->se_lun) { | ||
297 | printk(KERN_ERR "%s device entries device pointer is" | ||
298 | " NULL, but Initiator has access.\n", | ||
299 | TPG_TFO(tpg)->get_fabric_name()); | ||
300 | continue; | ||
301 | } | ||
302 | lun = deve->se_lun; | ||
303 | |||
304 | spin_unlock_irq(&nacl->device_list_lock); | ||
305 | core_update_device_list_for_node(lun, NULL, deve->mapped_lun, | ||
306 | TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0); | ||
307 | spin_lock_irq(&nacl->device_list_lock); | ||
308 | } | ||
309 | spin_unlock_irq(&nacl->device_list_lock); | ||
310 | |||
311 | kfree(nacl->device_list); | ||
312 | nacl->device_list = NULL; | ||
313 | |||
314 | return 0; | ||
315 | } | ||
316 | |||
317 | void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd) | ||
318 | { | ||
319 | struct se_dev_entry *deve; | ||
320 | |||
321 | spin_lock_irq(&se_nacl->device_list_lock); | ||
322 | deve = &se_nacl->device_list[se_cmd->orig_fe_lun]; | ||
323 | deve->deve_cmds--; | ||
324 | spin_unlock_irq(&se_nacl->device_list_lock); | ||
325 | |||
326 | return; | ||
327 | } | ||
328 | |||
329 | void core_update_device_list_access( | ||
330 | u32 mapped_lun, | ||
331 | u32 lun_access, | ||
332 | struct se_node_acl *nacl) | ||
333 | { | ||
334 | struct se_dev_entry *deve; | ||
335 | |||
336 | spin_lock_irq(&nacl->device_list_lock); | ||
337 | deve = &nacl->device_list[mapped_lun]; | ||
338 | if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { | ||
339 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; | ||
340 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; | ||
341 | } else { | ||
342 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE; | ||
343 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; | ||
344 | } | ||
345 | spin_unlock_irq(&nacl->device_list_lock); | ||
346 | |||
347 | return; | ||
348 | } | ||
349 | |||
350 | /* core_update_device_list_for_node(): | ||
351 | * | ||
352 | * | ||
353 | */ | ||
354 | int core_update_device_list_for_node( | ||
355 | struct se_lun *lun, | ||
356 | struct se_lun_acl *lun_acl, | ||
357 | u32 mapped_lun, | ||
358 | u32 lun_access, | ||
359 | struct se_node_acl *nacl, | ||
360 | struct se_portal_group *tpg, | ||
361 | int enable) | ||
362 | { | ||
363 | struct se_port *port = lun->lun_sep; | ||
364 | struct se_dev_entry *deve = &nacl->device_list[mapped_lun]; | ||
365 | int trans = 0; | ||
366 | /* | ||
367 | * If the MappedLUN entry is being disabled, the entry in | ||
368 | * port->sep_alua_list must be removed now before clearing the | ||
369 | * struct se_dev_entry pointers below as logic in | ||
370 | * core_alua_do_transition_tg_pt() depends on these being present. | ||
371 | */ | ||
372 | if (!(enable)) { | ||
373 | /* | ||
374 | * deve->se_lun_acl will be NULL for demo-mode created LUNs | ||
375 | * that have not been explictly concerted to MappedLUNs -> | ||
376 | * struct se_lun_acl. | ||
377 | */ | ||
378 | if (!(deve->se_lun_acl)) | ||
379 | return 0; | ||
380 | |||
381 | spin_lock_bh(&port->sep_alua_lock); | ||
382 | list_del(&deve->alua_port_list); | ||
383 | spin_unlock_bh(&port->sep_alua_lock); | ||
384 | } | ||
385 | |||
386 | spin_lock_irq(&nacl->device_list_lock); | ||
387 | if (enable) { | ||
388 | /* | ||
389 | * Check if the call is handling demo mode -> explict LUN ACL | ||
390 | * transition. This transition must be for the same struct se_lun | ||
391 | * + mapped_lun that was setup in demo mode.. | ||
392 | */ | ||
393 | if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { | ||
394 | if (deve->se_lun_acl != NULL) { | ||
395 | printk(KERN_ERR "struct se_dev_entry->se_lun_acl" | ||
396 | " already set for demo mode -> explict" | ||
397 | " LUN ACL transition\n"); | ||
398 | return -1; | ||
399 | } | ||
400 | if (deve->se_lun != lun) { | ||
401 | printk(KERN_ERR "struct se_dev_entry->se_lun does" | ||
402 | " match passed struct se_lun for demo mode" | ||
403 | " -> explict LUN ACL transition\n"); | ||
404 | return -1; | ||
405 | } | ||
406 | deve->se_lun_acl = lun_acl; | ||
407 | trans = 1; | ||
408 | } else { | ||
409 | deve->se_lun = lun; | ||
410 | deve->se_lun_acl = lun_acl; | ||
411 | deve->mapped_lun = mapped_lun; | ||
412 | deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS; | ||
413 | } | ||
414 | |||
415 | if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { | ||
416 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; | ||
417 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; | ||
418 | } else { | ||
419 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE; | ||
420 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; | ||
421 | } | ||
422 | |||
423 | if (trans) { | ||
424 | spin_unlock_irq(&nacl->device_list_lock); | ||
425 | return 0; | ||
426 | } | ||
427 | deve->creation_time = get_jiffies_64(); | ||
428 | deve->attach_count++; | ||
429 | spin_unlock_irq(&nacl->device_list_lock); | ||
430 | |||
431 | spin_lock_bh(&port->sep_alua_lock); | ||
432 | list_add_tail(&deve->alua_port_list, &port->sep_alua_list); | ||
433 | spin_unlock_bh(&port->sep_alua_lock); | ||
434 | |||
435 | return 0; | ||
436 | } | ||
437 | /* | ||
438 | * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE | ||
439 | * PR operation to complete. | ||
440 | */ | ||
441 | spin_unlock_irq(&nacl->device_list_lock); | ||
442 | while (atomic_read(&deve->pr_ref_count) != 0) | ||
443 | cpu_relax(); | ||
444 | spin_lock_irq(&nacl->device_list_lock); | ||
445 | /* | ||
446 | * Disable struct se_dev_entry LUN ACL mapping | ||
447 | */ | ||
448 | core_scsi3_ua_release_all(deve); | ||
449 | deve->se_lun = NULL; | ||
450 | deve->se_lun_acl = NULL; | ||
451 | deve->lun_flags = 0; | ||
452 | deve->creation_time = 0; | ||
453 | deve->attach_count--; | ||
454 | spin_unlock_irq(&nacl->device_list_lock); | ||
455 | |||
456 | core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl); | ||
457 | return 0; | ||
458 | } | ||
459 | |||
460 | /* core_clear_lun_from_tpg(): | ||
461 | * | ||
462 | * | ||
463 | */ | ||
464 | void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) | ||
465 | { | ||
466 | struct se_node_acl *nacl; | ||
467 | struct se_dev_entry *deve; | ||
468 | u32 i; | ||
469 | |||
470 | spin_lock_bh(&tpg->acl_node_lock); | ||
471 | list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { | ||
472 | spin_unlock_bh(&tpg->acl_node_lock); | ||
473 | |||
474 | spin_lock_irq(&nacl->device_list_lock); | ||
475 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | ||
476 | deve = &nacl->device_list[i]; | ||
477 | if (lun != deve->se_lun) | ||
478 | continue; | ||
479 | spin_unlock_irq(&nacl->device_list_lock); | ||
480 | |||
481 | core_update_device_list_for_node(lun, NULL, | ||
482 | deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS, | ||
483 | nacl, tpg, 0); | ||
484 | |||
485 | spin_lock_irq(&nacl->device_list_lock); | ||
486 | } | ||
487 | spin_unlock_irq(&nacl->device_list_lock); | ||
488 | |||
489 | spin_lock_bh(&tpg->acl_node_lock); | ||
490 | } | ||
491 | spin_unlock_bh(&tpg->acl_node_lock); | ||
492 | |||
493 | return; | ||
494 | } | ||
495 | |||
496 | static struct se_port *core_alloc_port(struct se_device *dev) | ||
497 | { | ||
498 | struct se_port *port, *port_tmp; | ||
499 | |||
500 | port = kzalloc(sizeof(struct se_port), GFP_KERNEL); | ||
501 | if (!(port)) { | ||
502 | printk(KERN_ERR "Unable to allocate struct se_port\n"); | ||
503 | return NULL; | ||
504 | } | ||
505 | INIT_LIST_HEAD(&port->sep_alua_list); | ||
506 | INIT_LIST_HEAD(&port->sep_list); | ||
507 | atomic_set(&port->sep_tg_pt_secondary_offline, 0); | ||
508 | spin_lock_init(&port->sep_alua_lock); | ||
509 | mutex_init(&port->sep_tg_pt_md_mutex); | ||
510 | |||
511 | spin_lock(&dev->se_port_lock); | ||
512 | if (dev->dev_port_count == 0x0000ffff) { | ||
513 | printk(KERN_WARNING "Reached dev->dev_port_count ==" | ||
514 | " 0x0000ffff\n"); | ||
515 | spin_unlock(&dev->se_port_lock); | ||
516 | return NULL; | ||
517 | } | ||
518 | again: | ||
519 | /* | ||
520 | * Allocate the next RELATIVE TARGET PORT IDENTIFER for this struct se_device | ||
521 | * Here is the table from spc4r17 section 7.7.3.8. | ||
522 | * | ||
523 | * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field | ||
524 | * | ||
525 | * Code Description | ||
526 | * 0h Reserved | ||
527 | * 1h Relative port 1, historically known as port A | ||
528 | * 2h Relative port 2, historically known as port B | ||
529 | * 3h to FFFFh Relative port 3 through 65 535 | ||
530 | */ | ||
531 | port->sep_rtpi = dev->dev_rpti_counter++; | ||
532 | if (!(port->sep_rtpi)) | ||
533 | goto again; | ||
534 | |||
535 | list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) { | ||
536 | /* | ||
537 | * Make sure RELATIVE TARGET PORT IDENTIFER is unique | ||
538 | * for 16-bit wrap.. | ||
539 | */ | ||
540 | if (port->sep_rtpi == port_tmp->sep_rtpi) | ||
541 | goto again; | ||
542 | } | ||
543 | spin_unlock(&dev->se_port_lock); | ||
544 | |||
545 | return port; | ||
546 | } | ||
547 | |||
548 | static void core_export_port( | ||
549 | struct se_device *dev, | ||
550 | struct se_portal_group *tpg, | ||
551 | struct se_port *port, | ||
552 | struct se_lun *lun) | ||
553 | { | ||
554 | struct se_subsystem_dev *su_dev = SU_DEV(dev); | ||
555 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL; | ||
556 | |||
557 | spin_lock(&dev->se_port_lock); | ||
558 | spin_lock(&lun->lun_sep_lock); | ||
559 | port->sep_tpg = tpg; | ||
560 | port->sep_lun = lun; | ||
561 | lun->lun_sep = port; | ||
562 | spin_unlock(&lun->lun_sep_lock); | ||
563 | |||
564 | list_add_tail(&port->sep_list, &dev->dev_sep_list); | ||
565 | spin_unlock(&dev->se_port_lock); | ||
566 | |||
567 | if (T10_ALUA(su_dev)->alua_type == SPC3_ALUA_EMULATED) { | ||
568 | tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port); | ||
569 | if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) { | ||
570 | printk(KERN_ERR "Unable to allocate t10_alua_tg_pt" | ||
571 | "_gp_member_t\n"); | ||
572 | return; | ||
573 | } | ||
574 | spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | ||
575 | __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, | ||
576 | T10_ALUA(su_dev)->default_tg_pt_gp); | ||
577 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | ||
578 | printk(KERN_INFO "%s/%s: Adding to default ALUA Target Port" | ||
579 | " Group: alua/default_tg_pt_gp\n", | ||
580 | TRANSPORT(dev)->name, TPG_TFO(tpg)->get_fabric_name()); | ||
581 | } | ||
582 | |||
583 | dev->dev_port_count++; | ||
584 | port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFER */ | ||
585 | } | ||
586 | |||
587 | /* | ||
588 | * Called with struct se_device->se_port_lock spinlock held. | ||
589 | */ | ||
590 | static void core_release_port(struct se_device *dev, struct se_port *port) | ||
591 | { | ||
592 | /* | ||
593 | * Wait for any port reference for PR ALL_TG_PT=1 operation | ||
594 | * to complete in __core_scsi3_alloc_registration() | ||
595 | */ | ||
596 | spin_unlock(&dev->se_port_lock); | ||
597 | if (atomic_read(&port->sep_tg_pt_ref_cnt)) | ||
598 | cpu_relax(); | ||
599 | spin_lock(&dev->se_port_lock); | ||
600 | |||
601 | core_alua_free_tg_pt_gp_mem(port); | ||
602 | |||
603 | list_del(&port->sep_list); | ||
604 | dev->dev_port_count--; | ||
605 | kfree(port); | ||
606 | |||
607 | return; | ||
608 | } | ||
609 | |||
610 | int core_dev_export( | ||
611 | struct se_device *dev, | ||
612 | struct se_portal_group *tpg, | ||
613 | struct se_lun *lun) | ||
614 | { | ||
615 | struct se_port *port; | ||
616 | |||
617 | port = core_alloc_port(dev); | ||
618 | if (!(port)) | ||
619 | return -1; | ||
620 | |||
621 | lun->lun_se_dev = dev; | ||
622 | se_dev_start(dev); | ||
623 | |||
624 | atomic_inc(&dev->dev_export_obj.obj_access_count); | ||
625 | core_export_port(dev, tpg, port, lun); | ||
626 | return 0; | ||
627 | } | ||
628 | |||
629 | void core_dev_unexport( | ||
630 | struct se_device *dev, | ||
631 | struct se_portal_group *tpg, | ||
632 | struct se_lun *lun) | ||
633 | { | ||
634 | struct se_port *port = lun->lun_sep; | ||
635 | |||
636 | spin_lock(&lun->lun_sep_lock); | ||
637 | if (lun->lun_se_dev == NULL) { | ||
638 | spin_unlock(&lun->lun_sep_lock); | ||
639 | return; | ||
640 | } | ||
641 | spin_unlock(&lun->lun_sep_lock); | ||
642 | |||
643 | spin_lock(&dev->se_port_lock); | ||
644 | atomic_dec(&dev->dev_export_obj.obj_access_count); | ||
645 | core_release_port(dev, port); | ||
646 | spin_unlock(&dev->se_port_lock); | ||
647 | |||
648 | se_dev_stop(dev); | ||
649 | lun->lun_se_dev = NULL; | ||
650 | } | ||
651 | |||
652 | int transport_core_report_lun_response(struct se_cmd *se_cmd) | ||
653 | { | ||
654 | struct se_dev_entry *deve; | ||
655 | struct se_lun *se_lun; | ||
656 | struct se_session *se_sess = SE_SESS(se_cmd); | ||
657 | struct se_task *se_task; | ||
658 | unsigned char *buf = (unsigned char *)T_TASK(se_cmd)->t_task_buf; | ||
659 | u32 cdb_offset = 0, lun_count = 0, offset = 8; | ||
660 | u64 i, lun; | ||
661 | |||
662 | list_for_each_entry(se_task, &T_TASK(se_cmd)->t_task_list, t_list) | ||
663 | break; | ||
664 | |||
665 | if (!(se_task)) { | ||
666 | printk(KERN_ERR "Unable to locate struct se_task for struct se_cmd\n"); | ||
667 | return PYX_TRANSPORT_LU_COMM_FAILURE; | ||
668 | } | ||
669 | |||
670 | /* | ||
671 | * If no struct se_session pointer is present, this struct se_cmd is | ||
672 | * coming via a target_core_mod PASSTHROUGH op, and not through | ||
673 | * a $FABRIC_MOD. In that case, report LUN=0 only. | ||
674 | */ | ||
675 | if (!(se_sess)) { | ||
676 | lun = 0; | ||
677 | buf[offset++] = ((lun >> 56) & 0xff); | ||
678 | buf[offset++] = ((lun >> 48) & 0xff); | ||
679 | buf[offset++] = ((lun >> 40) & 0xff); | ||
680 | buf[offset++] = ((lun >> 32) & 0xff); | ||
681 | buf[offset++] = ((lun >> 24) & 0xff); | ||
682 | buf[offset++] = ((lun >> 16) & 0xff); | ||
683 | buf[offset++] = ((lun >> 8) & 0xff); | ||
684 | buf[offset++] = (lun & 0xff); | ||
685 | lun_count = 1; | ||
686 | goto done; | ||
687 | } | ||
688 | |||
689 | spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); | ||
690 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | ||
691 | deve = &SE_NODE_ACL(se_sess)->device_list[i]; | ||
692 | if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) | ||
693 | continue; | ||
694 | se_lun = deve->se_lun; | ||
695 | /* | ||
696 | * We determine the correct LUN LIST LENGTH even once we | ||
697 | * have reached the initial allocation length. | ||
698 | * See SPC2-R20 7.19. | ||
699 | */ | ||
700 | lun_count++; | ||
701 | if ((cdb_offset + 8) >= se_cmd->data_length) | ||
702 | continue; | ||
703 | |||
704 | lun = cpu_to_be64(CMD_TFO(se_cmd)->pack_lun(deve->mapped_lun)); | ||
705 | buf[offset++] = ((lun >> 56) & 0xff); | ||
706 | buf[offset++] = ((lun >> 48) & 0xff); | ||
707 | buf[offset++] = ((lun >> 40) & 0xff); | ||
708 | buf[offset++] = ((lun >> 32) & 0xff); | ||
709 | buf[offset++] = ((lun >> 24) & 0xff); | ||
710 | buf[offset++] = ((lun >> 16) & 0xff); | ||
711 | buf[offset++] = ((lun >> 8) & 0xff); | ||
712 | buf[offset++] = (lun & 0xff); | ||
713 | cdb_offset += 8; | ||
714 | } | ||
715 | spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); | ||
716 | |||
717 | /* | ||
718 | * See SPC3 r07, page 159. | ||
719 | */ | ||
720 | done: | ||
721 | lun_count *= 8; | ||
722 | buf[0] = ((lun_count >> 24) & 0xff); | ||
723 | buf[1] = ((lun_count >> 16) & 0xff); | ||
724 | buf[2] = ((lun_count >> 8) & 0xff); | ||
725 | buf[3] = (lun_count & 0xff); | ||
726 | |||
727 | return PYX_TRANSPORT_SENT_TO_TRANSPORT; | ||
728 | } | ||
729 | |||
730 | /* se_release_device_for_hba(): | ||
731 | * | ||
732 | * | ||
733 | */ | ||
734 | void se_release_device_for_hba(struct se_device *dev) | ||
735 | { | ||
736 | struct se_hba *hba = dev->se_hba; | ||
737 | |||
738 | if ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) || | ||
739 | (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) || | ||
740 | (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) || | ||
741 | (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_ACTIVATED) || | ||
742 | (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_DEACTIVATED)) | ||
743 | se_dev_stop(dev); | ||
744 | |||
745 | if (dev->dev_ptr) { | ||
746 | kthread_stop(dev->process_thread); | ||
747 | if (dev->transport->free_device) | ||
748 | dev->transport->free_device(dev->dev_ptr); | ||
749 | } | ||
750 | |||
751 | spin_lock(&hba->device_lock); | ||
752 | list_del(&dev->dev_list); | ||
753 | hba->dev_count--; | ||
754 | spin_unlock(&hba->device_lock); | ||
755 | |||
756 | core_scsi3_free_all_registrations(dev); | ||
757 | se_release_vpd_for_dev(dev); | ||
758 | |||
759 | kfree(dev->dev_status_queue_obj); | ||
760 | kfree(dev->dev_queue_obj); | ||
761 | kfree(dev); | ||
762 | |||
763 | return; | ||
764 | } | ||
765 | |||
766 | void se_release_vpd_for_dev(struct se_device *dev) | ||
767 | { | ||
768 | struct t10_vpd *vpd, *vpd_tmp; | ||
769 | |||
770 | spin_lock(&DEV_T10_WWN(dev)->t10_vpd_lock); | ||
771 | list_for_each_entry_safe(vpd, vpd_tmp, | ||
772 | &DEV_T10_WWN(dev)->t10_vpd_list, vpd_list) { | ||
773 | list_del(&vpd->vpd_list); | ||
774 | kfree(vpd); | ||
775 | } | ||
776 | spin_unlock(&DEV_T10_WWN(dev)->t10_vpd_lock); | ||
777 | |||
778 | return; | ||
779 | } | ||
780 | |||
781 | /* | ||
782 | * Called with struct se_hba->device_lock held. | ||
783 | */ | ||
784 | void se_clear_dev_ports(struct se_device *dev) | ||
785 | { | ||
786 | struct se_hba *hba = dev->se_hba; | ||
787 | struct se_lun *lun; | ||
788 | struct se_portal_group *tpg; | ||
789 | struct se_port *sep, *sep_tmp; | ||
790 | |||
791 | spin_lock(&dev->se_port_lock); | ||
792 | list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) { | ||
793 | spin_unlock(&dev->se_port_lock); | ||
794 | spin_unlock(&hba->device_lock); | ||
795 | |||
796 | lun = sep->sep_lun; | ||
797 | tpg = sep->sep_tpg; | ||
798 | spin_lock(&lun->lun_sep_lock); | ||
799 | if (lun->lun_se_dev == NULL) { | ||
800 | spin_unlock(&lun->lun_sep_lock); | ||
801 | continue; | ||
802 | } | ||
803 | spin_unlock(&lun->lun_sep_lock); | ||
804 | |||
805 | core_dev_del_lun(tpg, lun->unpacked_lun); | ||
806 | |||
807 | spin_lock(&hba->device_lock); | ||
808 | spin_lock(&dev->se_port_lock); | ||
809 | } | ||
810 | spin_unlock(&dev->se_port_lock); | ||
811 | |||
812 | return; | ||
813 | } | ||
814 | |||
815 | /* se_free_virtual_device(): | ||
816 | * | ||
817 | * Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers. | ||
818 | */ | ||
819 | int se_free_virtual_device(struct se_device *dev, struct se_hba *hba) | ||
820 | { | ||
821 | spin_lock(&hba->device_lock); | ||
822 | se_clear_dev_ports(dev); | ||
823 | spin_unlock(&hba->device_lock); | ||
824 | |||
825 | core_alua_free_lu_gp_mem(dev); | ||
826 | se_release_device_for_hba(dev); | ||
827 | |||
828 | return 0; | ||
829 | } | ||
830 | |||
831 | static void se_dev_start(struct se_device *dev) | ||
832 | { | ||
833 | struct se_hba *hba = dev->se_hba; | ||
834 | |||
835 | spin_lock(&hba->device_lock); | ||
836 | atomic_inc(&dev->dev_obj.obj_access_count); | ||
837 | if (atomic_read(&dev->dev_obj.obj_access_count) == 1) { | ||
838 | if (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) { | ||
839 | dev->dev_status &= ~TRANSPORT_DEVICE_DEACTIVATED; | ||
840 | dev->dev_status |= TRANSPORT_DEVICE_ACTIVATED; | ||
841 | } else if (dev->dev_status & | ||
842 | TRANSPORT_DEVICE_OFFLINE_DEACTIVATED) { | ||
843 | dev->dev_status &= | ||
844 | ~TRANSPORT_DEVICE_OFFLINE_DEACTIVATED; | ||
845 | dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_ACTIVATED; | ||
846 | } | ||
847 | } | ||
848 | spin_unlock(&hba->device_lock); | ||
849 | } | ||
850 | |||
851 | static void se_dev_stop(struct se_device *dev) | ||
852 | { | ||
853 | struct se_hba *hba = dev->se_hba; | ||
854 | |||
855 | spin_lock(&hba->device_lock); | ||
856 | atomic_dec(&dev->dev_obj.obj_access_count); | ||
857 | if (atomic_read(&dev->dev_obj.obj_access_count) == 0) { | ||
858 | if (dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) { | ||
859 | dev->dev_status &= ~TRANSPORT_DEVICE_ACTIVATED; | ||
860 | dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED; | ||
861 | } else if (dev->dev_status & | ||
862 | TRANSPORT_DEVICE_OFFLINE_ACTIVATED) { | ||
863 | dev->dev_status &= ~TRANSPORT_DEVICE_OFFLINE_ACTIVATED; | ||
864 | dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_DEACTIVATED; | ||
865 | } | ||
866 | } | ||
867 | spin_unlock(&hba->device_lock); | ||
868 | |||
869 | while (atomic_read(&hba->dev_mib_access_count)) | ||
870 | cpu_relax(); | ||
871 | } | ||
872 | |||
873 | int se_dev_check_online(struct se_device *dev) | ||
874 | { | ||
875 | int ret; | ||
876 | |||
877 | spin_lock_irq(&dev->dev_status_lock); | ||
878 | ret = ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) || | ||
879 | (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED)) ? 0 : 1; | ||
880 | spin_unlock_irq(&dev->dev_status_lock); | ||
881 | |||
882 | return ret; | ||
883 | } | ||
884 | |||
885 | int se_dev_check_shutdown(struct se_device *dev) | ||
886 | { | ||
887 | int ret; | ||
888 | |||
889 | spin_lock_irq(&dev->dev_status_lock); | ||
890 | ret = (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN); | ||
891 | spin_unlock_irq(&dev->dev_status_lock); | ||
892 | |||
893 | return ret; | ||
894 | } | ||
895 | |||
896 | void se_dev_set_default_attribs( | ||
897 | struct se_device *dev, | ||
898 | struct se_dev_limits *dev_limits) | ||
899 | { | ||
900 | struct queue_limits *limits = &dev_limits->limits; | ||
901 | |||
902 | DEV_ATTRIB(dev)->emulate_dpo = DA_EMULATE_DPO; | ||
903 | DEV_ATTRIB(dev)->emulate_fua_write = DA_EMULATE_FUA_WRITE; | ||
904 | DEV_ATTRIB(dev)->emulate_fua_read = DA_EMULATE_FUA_READ; | ||
905 | DEV_ATTRIB(dev)->emulate_write_cache = DA_EMULATE_WRITE_CACHE; | ||
906 | DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL; | ||
907 | DEV_ATTRIB(dev)->emulate_tas = DA_EMULATE_TAS; | ||
908 | DEV_ATTRIB(dev)->emulate_tpu = DA_EMULATE_TPU; | ||
909 | DEV_ATTRIB(dev)->emulate_tpws = DA_EMULATE_TPWS; | ||
910 | DEV_ATTRIB(dev)->emulate_reservations = DA_EMULATE_RESERVATIONS; | ||
911 | DEV_ATTRIB(dev)->emulate_alua = DA_EMULATE_ALUA; | ||
912 | DEV_ATTRIB(dev)->enforce_pr_isids = DA_ENFORCE_PR_ISIDS; | ||
913 | /* | ||
914 | * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK | ||
915 | * iblock_create_virtdevice() from struct queue_limits values | ||
916 | * if blk_queue_discard()==1 | ||
917 | */ | ||
918 | DEV_ATTRIB(dev)->max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT; | ||
919 | DEV_ATTRIB(dev)->max_unmap_block_desc_count = | ||
920 | DA_MAX_UNMAP_BLOCK_DESC_COUNT; | ||
921 | DEV_ATTRIB(dev)->unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT; | ||
922 | DEV_ATTRIB(dev)->unmap_granularity_alignment = | ||
923 | DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; | ||
924 | /* | ||
925 | * block_size is based on subsystem plugin dependent requirements. | ||
926 | */ | ||
927 | DEV_ATTRIB(dev)->hw_block_size = limits->logical_block_size; | ||
928 | DEV_ATTRIB(dev)->block_size = limits->logical_block_size; | ||
929 | /* | ||
930 | * max_sectors is based on subsystem plugin dependent requirements. | ||
931 | */ | ||
932 | DEV_ATTRIB(dev)->hw_max_sectors = limits->max_hw_sectors; | ||
933 | DEV_ATTRIB(dev)->max_sectors = limits->max_sectors; | ||
934 | /* | ||
935 | * Set optimal_sectors from max_sectors, which can be lowered via | ||
936 | * configfs. | ||
937 | */ | ||
938 | DEV_ATTRIB(dev)->optimal_sectors = limits->max_sectors; | ||
939 | /* | ||
940 | * queue_depth is based on subsystem plugin dependent requirements. | ||
941 | */ | ||
942 | DEV_ATTRIB(dev)->hw_queue_depth = dev_limits->hw_queue_depth; | ||
943 | DEV_ATTRIB(dev)->queue_depth = dev_limits->queue_depth; | ||
944 | } | ||
945 | |||
946 | int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout) | ||
947 | { | ||
948 | if (task_timeout > DA_TASK_TIMEOUT_MAX) { | ||
949 | printk(KERN_ERR "dev[%p]: Passed task_timeout: %u larger then" | ||
950 | " DA_TASK_TIMEOUT_MAX\n", dev, task_timeout); | ||
951 | return -1; | ||
952 | } else { | ||
953 | DEV_ATTRIB(dev)->task_timeout = task_timeout; | ||
954 | printk(KERN_INFO "dev[%p]: Set SE Device task_timeout: %u\n", | ||
955 | dev, task_timeout); | ||
956 | } | ||
957 | |||
958 | return 0; | ||
959 | } | ||
960 | |||
961 | int se_dev_set_max_unmap_lba_count( | ||
962 | struct se_device *dev, | ||
963 | u32 max_unmap_lba_count) | ||
964 | { | ||
965 | DEV_ATTRIB(dev)->max_unmap_lba_count = max_unmap_lba_count; | ||
966 | printk(KERN_INFO "dev[%p]: Set max_unmap_lba_count: %u\n", | ||
967 | dev, DEV_ATTRIB(dev)->max_unmap_lba_count); | ||
968 | return 0; | ||
969 | } | ||
970 | |||
971 | int se_dev_set_max_unmap_block_desc_count( | ||
972 | struct se_device *dev, | ||
973 | u32 max_unmap_block_desc_count) | ||
974 | { | ||
975 | DEV_ATTRIB(dev)->max_unmap_block_desc_count = max_unmap_block_desc_count; | ||
976 | printk(KERN_INFO "dev[%p]: Set max_unmap_block_desc_count: %u\n", | ||
977 | dev, DEV_ATTRIB(dev)->max_unmap_block_desc_count); | ||
978 | return 0; | ||
979 | } | ||
980 | |||
981 | int se_dev_set_unmap_granularity( | ||
982 | struct se_device *dev, | ||
983 | u32 unmap_granularity) | ||
984 | { | ||
985 | DEV_ATTRIB(dev)->unmap_granularity = unmap_granularity; | ||
986 | printk(KERN_INFO "dev[%p]: Set unmap_granularity: %u\n", | ||
987 | dev, DEV_ATTRIB(dev)->unmap_granularity); | ||
988 | return 0; | ||
989 | } | ||
990 | |||
991 | int se_dev_set_unmap_granularity_alignment( | ||
992 | struct se_device *dev, | ||
993 | u32 unmap_granularity_alignment) | ||
994 | { | ||
995 | DEV_ATTRIB(dev)->unmap_granularity_alignment = unmap_granularity_alignment; | ||
996 | printk(KERN_INFO "dev[%p]: Set unmap_granularity_alignment: %u\n", | ||
997 | dev, DEV_ATTRIB(dev)->unmap_granularity_alignment); | ||
998 | return 0; | ||
999 | } | ||
1000 | |||
1001 | int se_dev_set_emulate_dpo(struct se_device *dev, int flag) | ||
1002 | { | ||
1003 | if ((flag != 0) && (flag != 1)) { | ||
1004 | printk(KERN_ERR "Illegal value %d\n", flag); | ||
1005 | return -1; | ||
1006 | } | ||
1007 | if (TRANSPORT(dev)->dpo_emulated == NULL) { | ||
1008 | printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated is NULL\n"); | ||
1009 | return -1; | ||
1010 | } | ||
1011 | if (TRANSPORT(dev)->dpo_emulated(dev) == 0) { | ||
1012 | printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated not supported\n"); | ||
1013 | return -1; | ||
1014 | } | ||
1015 | DEV_ATTRIB(dev)->emulate_dpo = flag; | ||
1016 | printk(KERN_INFO "dev[%p]: SE Device Page Out (DPO) Emulation" | ||
1017 | " bit: %d\n", dev, DEV_ATTRIB(dev)->emulate_dpo); | ||
1018 | return 0; | ||
1019 | } | ||
1020 | |||
1021 | int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) | ||
1022 | { | ||
1023 | if ((flag != 0) && (flag != 1)) { | ||
1024 | printk(KERN_ERR "Illegal value %d\n", flag); | ||
1025 | return -1; | ||
1026 | } | ||
1027 | if (TRANSPORT(dev)->fua_write_emulated == NULL) { | ||
1028 | printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated is NULL\n"); | ||
1029 | return -1; | ||
1030 | } | ||
1031 | if (TRANSPORT(dev)->fua_write_emulated(dev) == 0) { | ||
1032 | printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated not supported\n"); | ||
1033 | return -1; | ||
1034 | } | ||
1035 | DEV_ATTRIB(dev)->emulate_fua_write = flag; | ||
1036 | printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", | ||
1037 | dev, DEV_ATTRIB(dev)->emulate_fua_write); | ||
1038 | return 0; | ||
1039 | } | ||
1040 | |||
1041 | int se_dev_set_emulate_fua_read(struct se_device *dev, int flag) | ||
1042 | { | ||
1043 | if ((flag != 0) && (flag != 1)) { | ||
1044 | printk(KERN_ERR "Illegal value %d\n", flag); | ||
1045 | return -1; | ||
1046 | } | ||
1047 | if (TRANSPORT(dev)->fua_read_emulated == NULL) { | ||
1048 | printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated is NULL\n"); | ||
1049 | return -1; | ||
1050 | } | ||
1051 | if (TRANSPORT(dev)->fua_read_emulated(dev) == 0) { | ||
1052 | printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated not supported\n"); | ||
1053 | return -1; | ||
1054 | } | ||
1055 | DEV_ATTRIB(dev)->emulate_fua_read = flag; | ||
1056 | printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access READs: %d\n", | ||
1057 | dev, DEV_ATTRIB(dev)->emulate_fua_read); | ||
1058 | return 0; | ||
1059 | } | ||
1060 | |||
1061 | int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) | ||
1062 | { | ||
1063 | if ((flag != 0) && (flag != 1)) { | ||
1064 | printk(KERN_ERR "Illegal value %d\n", flag); | ||
1065 | return -1; | ||
1066 | } | ||
1067 | if (TRANSPORT(dev)->write_cache_emulated == NULL) { | ||
1068 | printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated is NULL\n"); | ||
1069 | return -1; | ||
1070 | } | ||
1071 | if (TRANSPORT(dev)->write_cache_emulated(dev) == 0) { | ||
1072 | printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated not supported\n"); | ||
1073 | return -1; | ||
1074 | } | ||
1075 | DEV_ATTRIB(dev)->emulate_write_cache = flag; | ||
1076 | printk(KERN_INFO "dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", | ||
1077 | dev, DEV_ATTRIB(dev)->emulate_write_cache); | ||
1078 | return 0; | ||
1079 | } | ||
1080 | |||
1081 | int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag) | ||
1082 | { | ||
1083 | if ((flag != 0) && (flag != 1) && (flag != 2)) { | ||
1084 | printk(KERN_ERR "Illegal value %d\n", flag); | ||
1085 | return -1; | ||
1086 | } | ||
1087 | |||
1088 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | ||
1089 | printk(KERN_ERR "dev[%p]: Unable to change SE Device" | ||
1090 | " UA_INTRLCK_CTRL while dev_export_obj: %d count" | ||
1091 | " exists\n", dev, | ||
1092 | atomic_read(&dev->dev_export_obj.obj_access_count)); | ||
1093 | return -1; | ||
1094 | } | ||
1095 | DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = flag; | ||
1096 | printk(KERN_INFO "dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n", | ||
1097 | dev, DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl); | ||
1098 | |||
1099 | return 0; | ||
1100 | } | ||
1101 | |||
1102 | int se_dev_set_emulate_tas(struct se_device *dev, int flag) | ||
1103 | { | ||
1104 | if ((flag != 0) && (flag != 1)) { | ||
1105 | printk(KERN_ERR "Illegal value %d\n", flag); | ||
1106 | return -1; | ||
1107 | } | ||
1108 | |||
1109 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | ||
1110 | printk(KERN_ERR "dev[%p]: Unable to change SE Device TAS while" | ||
1111 | " dev_export_obj: %d count exists\n", dev, | ||
1112 | atomic_read(&dev->dev_export_obj.obj_access_count)); | ||
1113 | return -1; | ||
1114 | } | ||
1115 | DEV_ATTRIB(dev)->emulate_tas = flag; | ||
1116 | printk(KERN_INFO "dev[%p]: SE Device TASK_ABORTED status bit: %s\n", | ||
1117 | dev, (DEV_ATTRIB(dev)->emulate_tas) ? "Enabled" : "Disabled"); | ||
1118 | |||
1119 | return 0; | ||
1120 | } | ||
1121 | |||
1122 | int se_dev_set_emulate_tpu(struct se_device *dev, int flag) | ||
1123 | { | ||
1124 | if ((flag != 0) && (flag != 1)) { | ||
1125 | printk(KERN_ERR "Illegal value %d\n", flag); | ||
1126 | return -1; | ||
1127 | } | ||
1128 | /* | ||
1129 | * We expect this value to be non-zero when generic Block Layer | ||
1130 | * Discard supported is detected iblock_create_virtdevice(). | ||
1131 | */ | ||
1132 | if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) { | ||
1133 | printk(KERN_ERR "Generic Block Discard not supported\n"); | ||
1134 | return -ENOSYS; | ||
1135 | } | ||
1136 | |||
1137 | DEV_ATTRIB(dev)->emulate_tpu = flag; | ||
1138 | printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n", | ||
1139 | dev, flag); | ||
1140 | return 0; | ||
1141 | } | ||
1142 | |||
1143 | int se_dev_set_emulate_tpws(struct se_device *dev, int flag) | ||
1144 | { | ||
1145 | if ((flag != 0) && (flag != 1)) { | ||
1146 | printk(KERN_ERR "Illegal value %d\n", flag); | ||
1147 | return -1; | ||
1148 | } | ||
1149 | /* | ||
1150 | * We expect this value to be non-zero when generic Block Layer | ||
1151 | * Discard supported is detected iblock_create_virtdevice(). | ||
1152 | */ | ||
1153 | if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) { | ||
1154 | printk(KERN_ERR "Generic Block Discard not supported\n"); | ||
1155 | return -ENOSYS; | ||
1156 | } | ||
1157 | |||
1158 | DEV_ATTRIB(dev)->emulate_tpws = flag; | ||
1159 | printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n", | ||
1160 | dev, flag); | ||
1161 | return 0; | ||
1162 | } | ||
1163 | |||
1164 | int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag) | ||
1165 | { | ||
1166 | if ((flag != 0) && (flag != 1)) { | ||
1167 | printk(KERN_ERR "Illegal value %d\n", flag); | ||
1168 | return -1; | ||
1169 | } | ||
1170 | DEV_ATTRIB(dev)->enforce_pr_isids = flag; | ||
1171 | printk(KERN_INFO "dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev, | ||
1172 | (DEV_ATTRIB(dev)->enforce_pr_isids) ? "Enabled" : "Disabled"); | ||
1173 | return 0; | ||
1174 | } | ||
1175 | |||
1176 | /* | ||
1177 | * Note, this can only be called on unexported SE Device Object. | ||
1178 | */ | ||
1179 | int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) | ||
1180 | { | ||
1181 | u32 orig_queue_depth = dev->queue_depth; | ||
1182 | |||
1183 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | ||
1184 | printk(KERN_ERR "dev[%p]: Unable to change SE Device TCQ while" | ||
1185 | " dev_export_obj: %d count exists\n", dev, | ||
1186 | atomic_read(&dev->dev_export_obj.obj_access_count)); | ||
1187 | return -1; | ||
1188 | } | ||
1189 | if (!(queue_depth)) { | ||
1190 | printk(KERN_ERR "dev[%p]: Illegal ZERO value for queue" | ||
1191 | "_depth\n", dev); | ||
1192 | return -1; | ||
1193 | } | ||
1194 | |||
1195 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { | ||
1196 | if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) { | ||
1197 | printk(KERN_ERR "dev[%p]: Passed queue_depth: %u" | ||
1198 | " exceeds TCM/SE_Device TCQ: %u\n", | ||
1199 | dev, queue_depth, | ||
1200 | DEV_ATTRIB(dev)->hw_queue_depth); | ||
1201 | return -1; | ||
1202 | } | ||
1203 | } else { | ||
1204 | if (queue_depth > DEV_ATTRIB(dev)->queue_depth) { | ||
1205 | if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) { | ||
1206 | printk(KERN_ERR "dev[%p]: Passed queue_depth:" | ||
1207 | " %u exceeds TCM/SE_Device MAX" | ||
1208 | " TCQ: %u\n", dev, queue_depth, | ||
1209 | DEV_ATTRIB(dev)->hw_queue_depth); | ||
1210 | return -1; | ||
1211 | } | ||
1212 | } | ||
1213 | } | ||
1214 | |||
1215 | DEV_ATTRIB(dev)->queue_depth = dev->queue_depth = queue_depth; | ||
1216 | if (queue_depth > orig_queue_depth) | ||
1217 | atomic_add(queue_depth - orig_queue_depth, &dev->depth_left); | ||
1218 | else if (queue_depth < orig_queue_depth) | ||
1219 | atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left); | ||
1220 | |||
1221 | printk(KERN_INFO "dev[%p]: SE Device TCQ Depth changed to: %u\n", | ||
1222 | dev, queue_depth); | ||
1223 | return 0; | ||
1224 | } | ||
1225 | |||
1226 | int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors) | ||
1227 | { | ||
1228 | int force = 0; /* Force setting for VDEVS */ | ||
1229 | |||
1230 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | ||
1231 | printk(KERN_ERR "dev[%p]: Unable to change SE Device" | ||
1232 | " max_sectors while dev_export_obj: %d count exists\n", | ||
1233 | dev, atomic_read(&dev->dev_export_obj.obj_access_count)); | ||
1234 | return -1; | ||
1235 | } | ||
1236 | if (!(max_sectors)) { | ||
1237 | printk(KERN_ERR "dev[%p]: Illegal ZERO value for" | ||
1238 | " max_sectors\n", dev); | ||
1239 | return -1; | ||
1240 | } | ||
1241 | if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) { | ||
1242 | printk(KERN_ERR "dev[%p]: Passed max_sectors: %u less than" | ||
1243 | " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors, | ||
1244 | DA_STATUS_MAX_SECTORS_MIN); | ||
1245 | return -1; | ||
1246 | } | ||
1247 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { | ||
1248 | if (max_sectors > DEV_ATTRIB(dev)->hw_max_sectors) { | ||
1249 | printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" | ||
1250 | " greater than TCM/SE_Device max_sectors:" | ||
1251 | " %u\n", dev, max_sectors, | ||
1252 | DEV_ATTRIB(dev)->hw_max_sectors); | ||
1253 | return -1; | ||
1254 | } | ||
1255 | } else { | ||
1256 | if (!(force) && (max_sectors > | ||
1257 | DEV_ATTRIB(dev)->hw_max_sectors)) { | ||
1258 | printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" | ||
1259 | " greater than TCM/SE_Device max_sectors" | ||
1260 | ": %u, use force=1 to override.\n", dev, | ||
1261 | max_sectors, DEV_ATTRIB(dev)->hw_max_sectors); | ||
1262 | return -1; | ||
1263 | } | ||
1264 | if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) { | ||
1265 | printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" | ||
1266 | " greater than DA_STATUS_MAX_SECTORS_MAX:" | ||
1267 | " %u\n", dev, max_sectors, | ||
1268 | DA_STATUS_MAX_SECTORS_MAX); | ||
1269 | return -1; | ||
1270 | } | ||
1271 | } | ||
1272 | |||
1273 | DEV_ATTRIB(dev)->max_sectors = max_sectors; | ||
1274 | printk("dev[%p]: SE Device max_sectors changed to %u\n", | ||
1275 | dev, max_sectors); | ||
1276 | return 0; | ||
1277 | } | ||
1278 | |||
1279 | int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) | ||
1280 | { | ||
1281 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | ||
1282 | printk(KERN_ERR "dev[%p]: Unable to change SE Device" | ||
1283 | " optimal_sectors while dev_export_obj: %d count exists\n", | ||
1284 | dev, atomic_read(&dev->dev_export_obj.obj_access_count)); | ||
1285 | return -EINVAL; | ||
1286 | } | ||
1287 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { | ||
1288 | printk(KERN_ERR "dev[%p]: Passed optimal_sectors cannot be" | ||
1289 | " changed for TCM/pSCSI\n", dev); | ||
1290 | return -EINVAL; | ||
1291 | } | ||
1292 | if (optimal_sectors > DEV_ATTRIB(dev)->max_sectors) { | ||
1293 | printk(KERN_ERR "dev[%p]: Passed optimal_sectors %u cannot be" | ||
1294 | " greater than max_sectors: %u\n", dev, | ||
1295 | optimal_sectors, DEV_ATTRIB(dev)->max_sectors); | ||
1296 | return -EINVAL; | ||
1297 | } | ||
1298 | |||
1299 | DEV_ATTRIB(dev)->optimal_sectors = optimal_sectors; | ||
1300 | printk(KERN_INFO "dev[%p]: SE Device optimal_sectors changed to %u\n", | ||
1301 | dev, optimal_sectors); | ||
1302 | return 0; | ||
1303 | } | ||
1304 | |||
1305 | int se_dev_set_block_size(struct se_device *dev, u32 block_size) | ||
1306 | { | ||
1307 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | ||
1308 | printk(KERN_ERR "dev[%p]: Unable to change SE Device block_size" | ||
1309 | " while dev_export_obj: %d count exists\n", dev, | ||
1310 | atomic_read(&dev->dev_export_obj.obj_access_count)); | ||
1311 | return -1; | ||
1312 | } | ||
1313 | |||
1314 | if ((block_size != 512) && | ||
1315 | (block_size != 1024) && | ||
1316 | (block_size != 2048) && | ||
1317 | (block_size != 4096)) { | ||
1318 | printk(KERN_ERR "dev[%p]: Illegal value for block_device: %u" | ||
1319 | " for SE device, must be 512, 1024, 2048 or 4096\n", | ||
1320 | dev, block_size); | ||
1321 | return -1; | ||
1322 | } | ||
1323 | |||
1324 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { | ||
1325 | printk(KERN_ERR "dev[%p]: Not allowed to change block_size for" | ||
1326 | " Physical Device, use for Linux/SCSI to change" | ||
1327 | " block_size for underlying hardware\n", dev); | ||
1328 | return -1; | ||
1329 | } | ||
1330 | |||
1331 | DEV_ATTRIB(dev)->block_size = block_size; | ||
1332 | printk(KERN_INFO "dev[%p]: SE Device block_size changed to %u\n", | ||
1333 | dev, block_size); | ||
1334 | return 0; | ||
1335 | } | ||
1336 | |||
1337 | struct se_lun *core_dev_add_lun( | ||
1338 | struct se_portal_group *tpg, | ||
1339 | struct se_hba *hba, | ||
1340 | struct se_device *dev, | ||
1341 | u32 lun) | ||
1342 | { | ||
1343 | struct se_lun *lun_p; | ||
1344 | u32 lun_access = 0; | ||
1345 | |||
1346 | if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) { | ||
1347 | printk(KERN_ERR "Unable to export struct se_device while dev_access_obj: %d\n", | ||
1348 | atomic_read(&dev->dev_access_obj.obj_access_count)); | ||
1349 | return NULL; | ||
1350 | } | ||
1351 | |||
1352 | lun_p = core_tpg_pre_addlun(tpg, lun); | ||
1353 | if ((IS_ERR(lun_p)) || !(lun_p)) | ||
1354 | return NULL; | ||
1355 | |||
1356 | if (dev->dev_flags & DF_READ_ONLY) | ||
1357 | lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; | ||
1358 | else | ||
1359 | lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; | ||
1360 | |||
1361 | if (core_tpg_post_addlun(tpg, lun_p, lun_access, dev) < 0) | ||
1362 | return NULL; | ||
1363 | |||
1364 | printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from" | ||
1365 | " CORE HBA: %u\n", TPG_TFO(tpg)->get_fabric_name(), | ||
1366 | TPG_TFO(tpg)->tpg_get_tag(tpg), lun_p->unpacked_lun, | ||
1367 | TPG_TFO(tpg)->get_fabric_name(), hba->hba_id); | ||
1368 | /* | ||
1369 | * Update LUN maps for dynamically added initiators when | ||
1370 | * generate_node_acl is enabled. | ||
1371 | */ | ||
1372 | if (TPG_TFO(tpg)->tpg_check_demo_mode(tpg)) { | ||
1373 | struct se_node_acl *acl; | ||
1374 | spin_lock_bh(&tpg->acl_node_lock); | ||
1375 | list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { | ||
1376 | if (acl->dynamic_node_acl) { | ||
1377 | spin_unlock_bh(&tpg->acl_node_lock); | ||
1378 | core_tpg_add_node_to_devs(acl, tpg); | ||
1379 | spin_lock_bh(&tpg->acl_node_lock); | ||
1380 | } | ||
1381 | } | ||
1382 | spin_unlock_bh(&tpg->acl_node_lock); | ||
1383 | } | ||
1384 | |||
1385 | return lun_p; | ||
1386 | } | ||
1387 | |||
1388 | /* core_dev_del_lun(): | ||
1389 | * | ||
1390 | * | ||
1391 | */ | ||
1392 | int core_dev_del_lun( | ||
1393 | struct se_portal_group *tpg, | ||
1394 | u32 unpacked_lun) | ||
1395 | { | ||
1396 | struct se_lun *lun; | ||
1397 | int ret = 0; | ||
1398 | |||
1399 | lun = core_tpg_pre_dellun(tpg, unpacked_lun, &ret); | ||
1400 | if (!(lun)) | ||
1401 | return ret; | ||
1402 | |||
1403 | core_tpg_post_dellun(tpg, lun); | ||
1404 | |||
1405 | printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from" | ||
1406 | " device object\n", TPG_TFO(tpg)->get_fabric_name(), | ||
1407 | TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun, | ||
1408 | TPG_TFO(tpg)->get_fabric_name()); | ||
1409 | |||
1410 | return 0; | ||
1411 | } | ||
1412 | |||
1413 | struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun) | ||
1414 | { | ||
1415 | struct se_lun *lun; | ||
1416 | |||
1417 | spin_lock(&tpg->tpg_lun_lock); | ||
1418 | if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { | ||
1419 | printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS" | ||
1420 | "_PER_TPG-1: %u for Target Portal Group: %hu\n", | ||
1421 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, | ||
1422 | TRANSPORT_MAX_LUNS_PER_TPG-1, | ||
1423 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | ||
1424 | spin_unlock(&tpg->tpg_lun_lock); | ||
1425 | return NULL; | ||
1426 | } | ||
1427 | lun = &tpg->tpg_lun_list[unpacked_lun]; | ||
1428 | |||
1429 | if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) { | ||
1430 | printk(KERN_ERR "%s Logical Unit Number: %u is not free on" | ||
1431 | " Target Portal Group: %hu, ignoring request.\n", | ||
1432 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, | ||
1433 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | ||
1434 | spin_unlock(&tpg->tpg_lun_lock); | ||
1435 | return NULL; | ||
1436 | } | ||
1437 | spin_unlock(&tpg->tpg_lun_lock); | ||
1438 | |||
1439 | return lun; | ||
1440 | } | ||
1441 | |||
1442 | /* core_dev_get_lun(): | ||
1443 | * | ||
1444 | * | ||
1445 | */ | ||
1446 | static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun) | ||
1447 | { | ||
1448 | struct se_lun *lun; | ||
1449 | |||
1450 | spin_lock(&tpg->tpg_lun_lock); | ||
1451 | if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { | ||
1452 | printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER" | ||
1453 | "_TPG-1: %u for Target Portal Group: %hu\n", | ||
1454 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, | ||
1455 | TRANSPORT_MAX_LUNS_PER_TPG-1, | ||
1456 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | ||
1457 | spin_unlock(&tpg->tpg_lun_lock); | ||
1458 | return NULL; | ||
1459 | } | ||
1460 | lun = &tpg->tpg_lun_list[unpacked_lun]; | ||
1461 | |||
1462 | if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { | ||
1463 | printk(KERN_ERR "%s Logical Unit Number: %u is not active on" | ||
1464 | " Target Portal Group: %hu, ignoring request.\n", | ||
1465 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, | ||
1466 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | ||
1467 | spin_unlock(&tpg->tpg_lun_lock); | ||
1468 | return NULL; | ||
1469 | } | ||
1470 | spin_unlock(&tpg->tpg_lun_lock); | ||
1471 | |||
1472 | return lun; | ||
1473 | } | ||
1474 | |||
1475 | struct se_lun_acl *core_dev_init_initiator_node_lun_acl( | ||
1476 | struct se_portal_group *tpg, | ||
1477 | u32 mapped_lun, | ||
1478 | char *initiatorname, | ||
1479 | int *ret) | ||
1480 | { | ||
1481 | struct se_lun_acl *lacl; | ||
1482 | struct se_node_acl *nacl; | ||
1483 | |||
1484 | if (strlen(initiatorname) > TRANSPORT_IQN_LEN) { | ||
1485 | printk(KERN_ERR "%s InitiatorName exceeds maximum size.\n", | ||
1486 | TPG_TFO(tpg)->get_fabric_name()); | ||
1487 | *ret = -EOVERFLOW; | ||
1488 | return NULL; | ||
1489 | } | ||
1490 | nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname); | ||
1491 | if (!(nacl)) { | ||
1492 | *ret = -EINVAL; | ||
1493 | return NULL; | ||
1494 | } | ||
1495 | lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL); | ||
1496 | if (!(lacl)) { | ||
1497 | printk(KERN_ERR "Unable to allocate memory for struct se_lun_acl.\n"); | ||
1498 | *ret = -ENOMEM; | ||
1499 | return NULL; | ||
1500 | } | ||
1501 | |||
1502 | INIT_LIST_HEAD(&lacl->lacl_list); | ||
1503 | lacl->mapped_lun = mapped_lun; | ||
1504 | lacl->se_lun_nacl = nacl; | ||
1505 | snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); | ||
1506 | |||
1507 | return lacl; | ||
1508 | } | ||
1509 | |||
1510 | int core_dev_add_initiator_node_lun_acl( | ||
1511 | struct se_portal_group *tpg, | ||
1512 | struct se_lun_acl *lacl, | ||
1513 | u32 unpacked_lun, | ||
1514 | u32 lun_access) | ||
1515 | { | ||
1516 | struct se_lun *lun; | ||
1517 | struct se_node_acl *nacl; | ||
1518 | |||
1519 | lun = core_dev_get_lun(tpg, unpacked_lun); | ||
1520 | if (!(lun)) { | ||
1521 | printk(KERN_ERR "%s Logical Unit Number: %u is not active on" | ||
1522 | " Target Portal Group: %hu, ignoring request.\n", | ||
1523 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, | ||
1524 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | ||
1525 | return -EINVAL; | ||
1526 | } | ||
1527 | |||
1528 | nacl = lacl->se_lun_nacl; | ||
1529 | if (!(nacl)) | ||
1530 | return -EINVAL; | ||
1531 | |||
1532 | if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) && | ||
1533 | (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE)) | ||
1534 | lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; | ||
1535 | |||
1536 | lacl->se_lun = lun; | ||
1537 | |||
1538 | if (core_update_device_list_for_node(lun, lacl, lacl->mapped_lun, | ||
1539 | lun_access, nacl, tpg, 1) < 0) | ||
1540 | return -EINVAL; | ||
1541 | |||
1542 | spin_lock(&lun->lun_acl_lock); | ||
1543 | list_add_tail(&lacl->lacl_list, &lun->lun_acl_list); | ||
1544 | atomic_inc(&lun->lun_acl_count); | ||
1545 | smp_mb__after_atomic_inc(); | ||
1546 | spin_unlock(&lun->lun_acl_lock); | ||
1547 | |||
1548 | printk(KERN_INFO "%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for " | ||
1549 | " InitiatorNode: %s\n", TPG_TFO(tpg)->get_fabric_name(), | ||
1550 | TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun, | ||
1551 | (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO", | ||
1552 | lacl->initiatorname); | ||
1553 | /* | ||
1554 | * Check to see if there are any existing persistent reservation APTPL | ||
1555 | * pre-registrations that need to be enabled for this LUN ACL.. | ||
1556 | */ | ||
1557 | core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl); | ||
1558 | return 0; | ||
1559 | } | ||
1560 | |||
1561 | /* core_dev_del_initiator_node_lun_acl(): | ||
1562 | * | ||
1563 | * | ||
1564 | */ | ||
1565 | int core_dev_del_initiator_node_lun_acl( | ||
1566 | struct se_portal_group *tpg, | ||
1567 | struct se_lun *lun, | ||
1568 | struct se_lun_acl *lacl) | ||
1569 | { | ||
1570 | struct se_node_acl *nacl; | ||
1571 | |||
1572 | nacl = lacl->se_lun_nacl; | ||
1573 | if (!(nacl)) | ||
1574 | return -EINVAL; | ||
1575 | |||
1576 | spin_lock(&lun->lun_acl_lock); | ||
1577 | list_del(&lacl->lacl_list); | ||
1578 | atomic_dec(&lun->lun_acl_count); | ||
1579 | smp_mb__after_atomic_dec(); | ||
1580 | spin_unlock(&lun->lun_acl_lock); | ||
1581 | |||
1582 | core_update_device_list_for_node(lun, NULL, lacl->mapped_lun, | ||
1583 | TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0); | ||
1584 | |||
1585 | lacl->se_lun = NULL; | ||
1586 | |||
1587 | printk(KERN_INFO "%s_TPG[%hu]_LUN[%u] - Removed ACL for" | ||
1588 | " InitiatorNode: %s Mapped LUN: %u\n", | ||
1589 | TPG_TFO(tpg)->get_fabric_name(), | ||
1590 | TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun, | ||
1591 | lacl->initiatorname, lacl->mapped_lun); | ||
1592 | |||
1593 | return 0; | ||
1594 | } | ||
1595 | |||
1596 | void core_dev_free_initiator_node_lun_acl( | ||
1597 | struct se_portal_group *tpg, | ||
1598 | struct se_lun_acl *lacl) | ||
1599 | { | ||
1600 | printk("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" | ||
1601 | " Mapped LUN: %u\n", TPG_TFO(tpg)->get_fabric_name(), | ||
1602 | TPG_TFO(tpg)->tpg_get_tag(tpg), | ||
1603 | TPG_TFO(tpg)->get_fabric_name(), | ||
1604 | lacl->initiatorname, lacl->mapped_lun); | ||
1605 | |||
1606 | kfree(lacl); | ||
1607 | } | ||
1608 | |||
1609 | int core_dev_setup_virtual_lun0(void) | ||
1610 | { | ||
1611 | struct se_hba *hba; | ||
1612 | struct se_device *dev; | ||
1613 | struct se_subsystem_dev *se_dev = NULL; | ||
1614 | struct se_subsystem_api *t; | ||
1615 | char buf[16]; | ||
1616 | int ret; | ||
1617 | |||
1618 | hba = core_alloc_hba("rd_dr", 0, HBA_FLAGS_INTERNAL_USE); | ||
1619 | if (IS_ERR(hba)) | ||
1620 | return PTR_ERR(hba); | ||
1621 | |||
1622 | se_global->g_lun0_hba = hba; | ||
1623 | t = hba->transport; | ||
1624 | |||
1625 | se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL); | ||
1626 | if (!(se_dev)) { | ||
1627 | printk(KERN_ERR "Unable to allocate memory for" | ||
1628 | " struct se_subsystem_dev\n"); | ||
1629 | ret = -ENOMEM; | ||
1630 | goto out; | ||
1631 | } | ||
1632 | INIT_LIST_HEAD(&se_dev->g_se_dev_list); | ||
1633 | INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list); | ||
1634 | spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock); | ||
1635 | INIT_LIST_HEAD(&se_dev->t10_reservation.registration_list); | ||
1636 | INIT_LIST_HEAD(&se_dev->t10_reservation.aptpl_reg_list); | ||
1637 | spin_lock_init(&se_dev->t10_reservation.registration_lock); | ||
1638 | spin_lock_init(&se_dev->t10_reservation.aptpl_reg_lock); | ||
1639 | INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list); | ||
1640 | spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock); | ||
1641 | spin_lock_init(&se_dev->se_dev_lock); | ||
1642 | se_dev->t10_reservation.pr_aptpl_buf_len = PR_APTPL_BUF_LEN; | ||
1643 | se_dev->t10_wwn.t10_sub_dev = se_dev; | ||
1644 | se_dev->t10_alua.t10_sub_dev = se_dev; | ||
1645 | se_dev->se_dev_attrib.da_sub_dev = se_dev; | ||
1646 | se_dev->se_dev_hba = hba; | ||
1647 | |||
1648 | se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0"); | ||
1649 | if (!(se_dev->se_dev_su_ptr)) { | ||
1650 | printk(KERN_ERR "Unable to locate subsystem dependent pointer" | ||
1651 | " from allocate_virtdevice()\n"); | ||
1652 | ret = -ENOMEM; | ||
1653 | goto out; | ||
1654 | } | ||
1655 | se_global->g_lun0_su_dev = se_dev; | ||
1656 | |||
1657 | memset(buf, 0, 16); | ||
1658 | sprintf(buf, "rd_pages=8"); | ||
1659 | t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf)); | ||
1660 | |||
1661 | dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr); | ||
1662 | if (!(dev) || IS_ERR(dev)) { | ||
1663 | ret = -ENOMEM; | ||
1664 | goto out; | ||
1665 | } | ||
1666 | se_dev->se_dev_ptr = dev; | ||
1667 | se_global->g_lun0_dev = dev; | ||
1668 | |||
1669 | return 0; | ||
1670 | out: | ||
1671 | se_global->g_lun0_su_dev = NULL; | ||
1672 | kfree(se_dev); | ||
1673 | if (se_global->g_lun0_hba) { | ||
1674 | core_delete_hba(se_global->g_lun0_hba); | ||
1675 | se_global->g_lun0_hba = NULL; | ||
1676 | } | ||
1677 | return ret; | ||
1678 | } | ||
1679 | |||
1680 | |||
1681 | void core_dev_release_virtual_lun0(void) | ||
1682 | { | ||
1683 | struct se_hba *hba = se_global->g_lun0_hba; | ||
1684 | struct se_subsystem_dev *su_dev = se_global->g_lun0_su_dev; | ||
1685 | |||
1686 | if (!(hba)) | ||
1687 | return; | ||
1688 | |||
1689 | if (se_global->g_lun0_dev) | ||
1690 | se_free_virtual_device(se_global->g_lun0_dev, hba); | ||
1691 | |||
1692 | kfree(su_dev); | ||
1693 | core_delete_hba(hba); | ||
1694 | } | ||
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c new file mode 100644 index 000000000000..32b148d7e261 --- /dev/null +++ b/drivers/target/target_core_fabric_configfs.c | |||
@@ -0,0 +1,996 @@ | |||
1 | /******************************************************************************* | ||
2 | * Filename: target_core_fabric_configfs.c | ||
3 | * | ||
4 | * This file contains generic fabric module configfs infrastructure for | ||
5 | * TCM v4.x code | ||
6 | * | ||
7 | * Copyright (c) 2010 Rising Tide Systems | ||
8 | * Copyright (c) 2010 Linux-iSCSI.org | ||
9 | * | ||
10 | * Copyright (c) 2010 Nicholas A. Bellinger <nab@linux-iscsi.org> | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of the GNU General Public License as published by | ||
14 | * the Free Software Foundation; either version 2 of the License, or | ||
15 | * (at your option) any later version. | ||
16 | * | ||
17 | * This program is distributed in the hope that it will be useful, | ||
18 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
20 | * GNU General Public License for more details. | ||
21 | ****************************************************************************/ | ||
22 | |||
23 | #include <linux/module.h> | ||
24 | #include <linux/moduleparam.h> | ||
25 | #include <linux/version.h> | ||
26 | #include <generated/utsrelease.h> | ||
27 | #include <linux/utsname.h> | ||
28 | #include <linux/init.h> | ||
29 | #include <linux/fs.h> | ||
30 | #include <linux/namei.h> | ||
31 | #include <linux/slab.h> | ||
32 | #include <linux/types.h> | ||
33 | #include <linux/delay.h> | ||
34 | #include <linux/unistd.h> | ||
35 | #include <linux/string.h> | ||
36 | #include <linux/syscalls.h> | ||
37 | #include <linux/configfs.h> | ||
38 | |||
39 | #include <target/target_core_base.h> | ||
40 | #include <target/target_core_device.h> | ||
41 | #include <target/target_core_tpg.h> | ||
42 | #include <target/target_core_transport.h> | ||
43 | #include <target/target_core_fabric_ops.h> | ||
44 | #include <target/target_core_fabric_configfs.h> | ||
45 | #include <target/target_core_configfs.h> | ||
46 | #include <target/configfs_macros.h> | ||
47 | |||
48 | #include "target_core_alua.h" | ||
49 | #include "target_core_hba.h" | ||
50 | #include "target_core_pr.h" | ||
51 | |||
52 | #define TF_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \ | ||
53 | static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \ | ||
54 | { \ | ||
55 | struct target_fabric_configfs_template *tfc = &tf->tf_cit_tmpl; \ | ||
56 | struct config_item_type *cit = &tfc->tfc_##_name##_cit; \ | ||
57 | \ | ||
58 | cit->ct_item_ops = _item_ops; \ | ||
59 | cit->ct_group_ops = _group_ops; \ | ||
60 | cit->ct_attrs = _attrs; \ | ||
61 | cit->ct_owner = tf->tf_module; \ | ||
62 | printk("Setup generic %s\n", __stringify(_name)); \ | ||
63 | } | ||
64 | |||
65 | /* Start of tfc_tpg_mappedlun_cit */ | ||
66 | |||
67 | static int target_fabric_mappedlun_link( | ||
68 | struct config_item *lun_acl_ci, | ||
69 | struct config_item *lun_ci) | ||
70 | { | ||
71 | struct se_dev_entry *deve; | ||
72 | struct se_lun *lun = container_of(to_config_group(lun_ci), | ||
73 | struct se_lun, lun_group); | ||
74 | struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci), | ||
75 | struct se_lun_acl, se_lun_group); | ||
76 | struct se_portal_group *se_tpg; | ||
77 | struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s; | ||
78 | int ret = 0, lun_access; | ||
79 | /* | ||
80 | * Ensure that the source port exists | ||
81 | */ | ||
82 | if (!(lun->lun_sep) || !(lun->lun_sep->sep_tpg)) { | ||
83 | printk(KERN_ERR "Source se_lun->lun_sep or lun->lun_sep->sep" | ||
84 | "_tpg does not exist\n"); | ||
85 | return -EINVAL; | ||
86 | } | ||
87 | se_tpg = lun->lun_sep->sep_tpg; | ||
88 | |||
89 | nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item; | ||
90 | tpg_ci = &nacl_ci->ci_group->cg_item; | ||
91 | wwn_ci = &tpg_ci->ci_group->cg_item; | ||
92 | tpg_ci_s = &lun_ci->ci_parent->ci_group->cg_item; | ||
93 | wwn_ci_s = &tpg_ci_s->ci_group->cg_item; | ||
94 | /* | ||
95 | * Make sure the SymLink is going to the same $FABRIC/$WWN/tpgt_$TPGT | ||
96 | */ | ||
97 | if (strcmp(config_item_name(wwn_ci), config_item_name(wwn_ci_s))) { | ||
98 | printk(KERN_ERR "Illegal Initiator ACL SymLink outside of %s\n", | ||
99 | config_item_name(wwn_ci)); | ||
100 | return -EINVAL; | ||
101 | } | ||
102 | if (strcmp(config_item_name(tpg_ci), config_item_name(tpg_ci_s))) { | ||
103 | printk(KERN_ERR "Illegal Initiator ACL Symlink outside of %s" | ||
104 | " TPGT: %s\n", config_item_name(wwn_ci), | ||
105 | config_item_name(tpg_ci)); | ||
106 | return -EINVAL; | ||
107 | } | ||
108 | /* | ||
109 | * If this struct se_node_acl was dynamically generated with | ||
110 | * tpg_1/attrib/generate_node_acls=1, use the existing deve->lun_flags, | ||
111 | * which be will write protected (READ-ONLY) when | ||
112 | * tpg_1/attrib/demo_mode_write_protect=1 | ||
113 | */ | ||
114 | spin_lock_irq(&lacl->se_lun_nacl->device_list_lock); | ||
115 | deve = &lacl->se_lun_nacl->device_list[lacl->mapped_lun]; | ||
116 | if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) | ||
117 | lun_access = deve->lun_flags; | ||
118 | else | ||
119 | lun_access = | ||
120 | (TPG_TFO(se_tpg)->tpg_check_prod_mode_write_protect( | ||
121 | se_tpg)) ? TRANSPORT_LUNFLAGS_READ_ONLY : | ||
122 | TRANSPORT_LUNFLAGS_READ_WRITE; | ||
123 | spin_unlock_irq(&lacl->se_lun_nacl->device_list_lock); | ||
124 | /* | ||
125 | * Determine the actual mapped LUN value user wants.. | ||
126 | * | ||
127 | * This value is what the SCSI Initiator actually sees the | ||
128 | * iscsi/$IQN/$TPGT/lun/lun_* as on their SCSI Initiator Ports. | ||
129 | */ | ||
130 | ret = core_dev_add_initiator_node_lun_acl(se_tpg, lacl, | ||
131 | lun->unpacked_lun, lun_access); | ||
132 | |||
133 | return (ret < 0) ? -EINVAL : 0; | ||
134 | } | ||
135 | |||
136 | static int target_fabric_mappedlun_unlink( | ||
137 | struct config_item *lun_acl_ci, | ||
138 | struct config_item *lun_ci) | ||
139 | { | ||
140 | struct se_lun *lun; | ||
141 | struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci), | ||
142 | struct se_lun_acl, se_lun_group); | ||
143 | struct se_node_acl *nacl = lacl->se_lun_nacl; | ||
144 | struct se_dev_entry *deve = &nacl->device_list[lacl->mapped_lun]; | ||
145 | struct se_portal_group *se_tpg; | ||
146 | /* | ||
147 | * Determine if the underlying MappedLUN has already been released.. | ||
148 | */ | ||
149 | if (!(deve->se_lun)) | ||
150 | return 0; | ||
151 | |||
152 | lun = container_of(to_config_group(lun_ci), struct se_lun, lun_group); | ||
153 | se_tpg = lun->lun_sep->sep_tpg; | ||
154 | |||
155 | core_dev_del_initiator_node_lun_acl(se_tpg, lun, lacl); | ||
156 | return 0; | ||
157 | } | ||
158 | |||
159 | CONFIGFS_EATTR_STRUCT(target_fabric_mappedlun, se_lun_acl); | ||
160 | #define TCM_MAPPEDLUN_ATTR(_name, _mode) \ | ||
161 | static struct target_fabric_mappedlun_attribute target_fabric_mappedlun_##_name = \ | ||
162 | __CONFIGFS_EATTR(_name, _mode, \ | ||
163 | target_fabric_mappedlun_show_##_name, \ | ||
164 | target_fabric_mappedlun_store_##_name); | ||
165 | |||
166 | static ssize_t target_fabric_mappedlun_show_write_protect( | ||
167 | struct se_lun_acl *lacl, | ||
168 | char *page) | ||
169 | { | ||
170 | struct se_node_acl *se_nacl = lacl->se_lun_nacl; | ||
171 | struct se_dev_entry *deve; | ||
172 | ssize_t len; | ||
173 | |||
174 | spin_lock_irq(&se_nacl->device_list_lock); | ||
175 | deve = &se_nacl->device_list[lacl->mapped_lun]; | ||
176 | len = sprintf(page, "%d\n", | ||
177 | (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) ? | ||
178 | 1 : 0); | ||
179 | spin_unlock_irq(&se_nacl->device_list_lock); | ||
180 | |||
181 | return len; | ||
182 | } | ||
183 | |||
184 | static ssize_t target_fabric_mappedlun_store_write_protect( | ||
185 | struct se_lun_acl *lacl, | ||
186 | const char *page, | ||
187 | size_t count) | ||
188 | { | ||
189 | struct se_node_acl *se_nacl = lacl->se_lun_nacl; | ||
190 | struct se_portal_group *se_tpg = se_nacl->se_tpg; | ||
191 | unsigned long op; | ||
192 | |||
193 | if (strict_strtoul(page, 0, &op)) | ||
194 | return -EINVAL; | ||
195 | |||
196 | if ((op != 1) && (op != 0)) | ||
197 | return -EINVAL; | ||
198 | |||
199 | core_update_device_list_access(lacl->mapped_lun, (op) ? | ||
200 | TRANSPORT_LUNFLAGS_READ_ONLY : | ||
201 | TRANSPORT_LUNFLAGS_READ_WRITE, | ||
202 | lacl->se_lun_nacl); | ||
203 | |||
204 | printk(KERN_INFO "%s_ConfigFS: Changed Initiator ACL: %s" | ||
205 | " Mapped LUN: %u Write Protect bit to %s\n", | ||
206 | TPG_TFO(se_tpg)->get_fabric_name(), | ||
207 | lacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF"); | ||
208 | |||
209 | return count; | ||
210 | |||
211 | } | ||
212 | |||
213 | TCM_MAPPEDLUN_ATTR(write_protect, S_IRUGO | S_IWUSR); | ||
214 | |||
215 | CONFIGFS_EATTR_OPS(target_fabric_mappedlun, se_lun_acl, se_lun_group); | ||
216 | |||
217 | static struct configfs_attribute *target_fabric_mappedlun_attrs[] = { | ||
218 | &target_fabric_mappedlun_write_protect.attr, | ||
219 | NULL, | ||
220 | }; | ||
221 | |||
222 | static struct configfs_item_operations target_fabric_mappedlun_item_ops = { | ||
223 | .show_attribute = target_fabric_mappedlun_attr_show, | ||
224 | .store_attribute = target_fabric_mappedlun_attr_store, | ||
225 | .allow_link = target_fabric_mappedlun_link, | ||
226 | .drop_link = target_fabric_mappedlun_unlink, | ||
227 | }; | ||
228 | |||
229 | TF_CIT_SETUP(tpg_mappedlun, &target_fabric_mappedlun_item_ops, NULL, | ||
230 | target_fabric_mappedlun_attrs); | ||
231 | |||
232 | /* End of tfc_tpg_mappedlun_cit */ | ||
233 | |||
234 | /* Start of tfc_tpg_nacl_attrib_cit */ | ||
235 | |||
236 | CONFIGFS_EATTR_OPS(target_fabric_nacl_attrib, se_node_acl, acl_attrib_group); | ||
237 | |||
238 | static struct configfs_item_operations target_fabric_nacl_attrib_item_ops = { | ||
239 | .show_attribute = target_fabric_nacl_attrib_attr_show, | ||
240 | .store_attribute = target_fabric_nacl_attrib_attr_store, | ||
241 | }; | ||
242 | |||
243 | TF_CIT_SETUP(tpg_nacl_attrib, &target_fabric_nacl_attrib_item_ops, NULL, NULL); | ||
244 | |||
245 | /* End of tfc_tpg_nacl_attrib_cit */ | ||
246 | |||
247 | /* Start of tfc_tpg_nacl_auth_cit */ | ||
248 | |||
249 | CONFIGFS_EATTR_OPS(target_fabric_nacl_auth, se_node_acl, acl_auth_group); | ||
250 | |||
251 | static struct configfs_item_operations target_fabric_nacl_auth_item_ops = { | ||
252 | .show_attribute = target_fabric_nacl_auth_attr_show, | ||
253 | .store_attribute = target_fabric_nacl_auth_attr_store, | ||
254 | }; | ||
255 | |||
256 | TF_CIT_SETUP(tpg_nacl_auth, &target_fabric_nacl_auth_item_ops, NULL, NULL); | ||
257 | |||
258 | /* End of tfc_tpg_nacl_auth_cit */ | ||
259 | |||
260 | /* Start of tfc_tpg_nacl_param_cit */ | ||
261 | |||
262 | CONFIGFS_EATTR_OPS(target_fabric_nacl_param, se_node_acl, acl_param_group); | ||
263 | |||
264 | static struct configfs_item_operations target_fabric_nacl_param_item_ops = { | ||
265 | .show_attribute = target_fabric_nacl_param_attr_show, | ||
266 | .store_attribute = target_fabric_nacl_param_attr_store, | ||
267 | }; | ||
268 | |||
269 | TF_CIT_SETUP(tpg_nacl_param, &target_fabric_nacl_param_item_ops, NULL, NULL); | ||
270 | |||
271 | /* End of tfc_tpg_nacl_param_cit */ | ||
272 | |||
273 | /* Start of tfc_tpg_nacl_base_cit */ | ||
274 | |||
275 | CONFIGFS_EATTR_OPS(target_fabric_nacl_base, se_node_acl, acl_group); | ||
276 | |||
277 | static struct config_group *target_fabric_make_mappedlun( | ||
278 | struct config_group *group, | ||
279 | const char *name) | ||
280 | { | ||
281 | struct se_node_acl *se_nacl = container_of(group, | ||
282 | struct se_node_acl, acl_group); | ||
283 | struct se_portal_group *se_tpg = se_nacl->se_tpg; | ||
284 | struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; | ||
285 | struct se_lun_acl *lacl; | ||
286 | struct config_item *acl_ci; | ||
287 | char *buf; | ||
288 | unsigned long mapped_lun; | ||
289 | int ret = 0; | ||
290 | |||
291 | acl_ci = &group->cg_item; | ||
292 | if (!(acl_ci)) { | ||
293 | printk(KERN_ERR "Unable to locatel acl_ci\n"); | ||
294 | return NULL; | ||
295 | } | ||
296 | |||
297 | buf = kzalloc(strlen(name) + 1, GFP_KERNEL); | ||
298 | if (!(buf)) { | ||
299 | printk(KERN_ERR "Unable to allocate memory for name buf\n"); | ||
300 | return ERR_PTR(-ENOMEM); | ||
301 | } | ||
302 | snprintf(buf, strlen(name) + 1, "%s", name); | ||
303 | /* | ||
304 | * Make sure user is creating iscsi/$IQN/$TPGT/acls/$INITIATOR/lun_$ID. | ||
305 | */ | ||
306 | if (strstr(buf, "lun_") != buf) { | ||
307 | printk(KERN_ERR "Unable to locate \"lun_\" from buf: %s" | ||
308 | " name: %s\n", buf, name); | ||
309 | ret = -EINVAL; | ||
310 | goto out; | ||
311 | } | ||
312 | /* | ||
313 | * Determine the Mapped LUN value. This is what the SCSI Initiator | ||
314 | * Port will actually see. | ||
315 | */ | ||
316 | if (strict_strtoul(buf + 4, 0, &mapped_lun) || mapped_lun > UINT_MAX) { | ||
317 | ret = -EINVAL; | ||
318 | goto out; | ||
319 | } | ||
320 | |||
321 | lacl = core_dev_init_initiator_node_lun_acl(se_tpg, mapped_lun, | ||
322 | config_item_name(acl_ci), &ret); | ||
323 | if (!(lacl)) | ||
324 | goto out; | ||
325 | |||
326 | config_group_init_type_name(&lacl->se_lun_group, name, | ||
327 | &TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_cit); | ||
328 | |||
329 | kfree(buf); | ||
330 | return &lacl->se_lun_group; | ||
331 | out: | ||
332 | kfree(buf); | ||
333 | return ERR_PTR(ret); | ||
334 | } | ||
335 | |||
336 | static void target_fabric_drop_mappedlun( | ||
337 | struct config_group *group, | ||
338 | struct config_item *item) | ||
339 | { | ||
340 | struct se_lun_acl *lacl = container_of(to_config_group(item), | ||
341 | struct se_lun_acl, se_lun_group); | ||
342 | struct se_portal_group *se_tpg = lacl->se_lun_nacl->se_tpg; | ||
343 | |||
344 | config_item_put(item); | ||
345 | core_dev_free_initiator_node_lun_acl(se_tpg, lacl); | ||
346 | } | ||
347 | |||
348 | static struct configfs_item_operations target_fabric_nacl_base_item_ops = { | ||
349 | .show_attribute = target_fabric_nacl_base_attr_show, | ||
350 | .store_attribute = target_fabric_nacl_base_attr_store, | ||
351 | }; | ||
352 | |||
353 | static struct configfs_group_operations target_fabric_nacl_base_group_ops = { | ||
354 | .make_group = target_fabric_make_mappedlun, | ||
355 | .drop_item = target_fabric_drop_mappedlun, | ||
356 | }; | ||
357 | |||
358 | TF_CIT_SETUP(tpg_nacl_base, &target_fabric_nacl_base_item_ops, | ||
359 | &target_fabric_nacl_base_group_ops, NULL); | ||
360 | |||
361 | /* End of tfc_tpg_nacl_base_cit */ | ||
362 | |||
363 | /* Start of tfc_tpg_nacl_cit */ | ||
364 | |||
365 | static struct config_group *target_fabric_make_nodeacl( | ||
366 | struct config_group *group, | ||
367 | const char *name) | ||
368 | { | ||
369 | struct se_portal_group *se_tpg = container_of(group, | ||
370 | struct se_portal_group, tpg_acl_group); | ||
371 | struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; | ||
372 | struct se_node_acl *se_nacl; | ||
373 | struct config_group *nacl_cg; | ||
374 | |||
375 | if (!(tf->tf_ops.fabric_make_nodeacl)) { | ||
376 | printk(KERN_ERR "tf->tf_ops.fabric_make_nodeacl is NULL\n"); | ||
377 | return ERR_PTR(-ENOSYS); | ||
378 | } | ||
379 | |||
380 | se_nacl = tf->tf_ops.fabric_make_nodeacl(se_tpg, group, name); | ||
381 | if (IS_ERR(se_nacl)) | ||
382 | return ERR_PTR(PTR_ERR(se_nacl)); | ||
383 | |||
384 | nacl_cg = &se_nacl->acl_group; | ||
385 | nacl_cg->default_groups = se_nacl->acl_default_groups; | ||
386 | nacl_cg->default_groups[0] = &se_nacl->acl_attrib_group; | ||
387 | nacl_cg->default_groups[1] = &se_nacl->acl_auth_group; | ||
388 | nacl_cg->default_groups[2] = &se_nacl->acl_param_group; | ||
389 | nacl_cg->default_groups[3] = NULL; | ||
390 | |||
391 | config_group_init_type_name(&se_nacl->acl_group, name, | ||
392 | &TF_CIT_TMPL(tf)->tfc_tpg_nacl_base_cit); | ||
393 | config_group_init_type_name(&se_nacl->acl_attrib_group, "attrib", | ||
394 | &TF_CIT_TMPL(tf)->tfc_tpg_nacl_attrib_cit); | ||
395 | config_group_init_type_name(&se_nacl->acl_auth_group, "auth", | ||
396 | &TF_CIT_TMPL(tf)->tfc_tpg_nacl_auth_cit); | ||
397 | config_group_init_type_name(&se_nacl->acl_param_group, "param", | ||
398 | &TF_CIT_TMPL(tf)->tfc_tpg_nacl_param_cit); | ||
399 | |||
400 | return &se_nacl->acl_group; | ||
401 | } | ||
402 | |||
403 | static void target_fabric_drop_nodeacl( | ||
404 | struct config_group *group, | ||
405 | struct config_item *item) | ||
406 | { | ||
407 | struct se_portal_group *se_tpg = container_of(group, | ||
408 | struct se_portal_group, tpg_acl_group); | ||
409 | struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; | ||
410 | struct se_node_acl *se_nacl = container_of(to_config_group(item), | ||
411 | struct se_node_acl, acl_group); | ||
412 | struct config_item *df_item; | ||
413 | struct config_group *nacl_cg; | ||
414 | int i; | ||
415 | |||
416 | nacl_cg = &se_nacl->acl_group; | ||
417 | for (i = 0; nacl_cg->default_groups[i]; i++) { | ||
418 | df_item = &nacl_cg->default_groups[i]->cg_item; | ||
419 | nacl_cg->default_groups[i] = NULL; | ||
420 | config_item_put(df_item); | ||
421 | } | ||
422 | |||
423 | config_item_put(item); | ||
424 | tf->tf_ops.fabric_drop_nodeacl(se_nacl); | ||
425 | } | ||
426 | |||
427 | static struct configfs_group_operations target_fabric_nacl_group_ops = { | ||
428 | .make_group = target_fabric_make_nodeacl, | ||
429 | .drop_item = target_fabric_drop_nodeacl, | ||
430 | }; | ||
431 | |||
432 | TF_CIT_SETUP(tpg_nacl, NULL, &target_fabric_nacl_group_ops, NULL); | ||
433 | |||
434 | /* End of tfc_tpg_nacl_cit */ | ||
435 | |||
436 | /* Start of tfc_tpg_np_base_cit */ | ||
437 | |||
438 | CONFIGFS_EATTR_OPS(target_fabric_np_base, se_tpg_np, tpg_np_group); | ||
439 | |||
440 | static struct configfs_item_operations target_fabric_np_base_item_ops = { | ||
441 | .show_attribute = target_fabric_np_base_attr_show, | ||
442 | .store_attribute = target_fabric_np_base_attr_store, | ||
443 | }; | ||
444 | |||
445 | TF_CIT_SETUP(tpg_np_base, &target_fabric_np_base_item_ops, NULL, NULL); | ||
446 | |||
447 | /* End of tfc_tpg_np_base_cit */ | ||
448 | |||
449 | /* Start of tfc_tpg_np_cit */ | ||
450 | |||
451 | static struct config_group *target_fabric_make_np( | ||
452 | struct config_group *group, | ||
453 | const char *name) | ||
454 | { | ||
455 | struct se_portal_group *se_tpg = container_of(group, | ||
456 | struct se_portal_group, tpg_np_group); | ||
457 | struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; | ||
458 | struct se_tpg_np *se_tpg_np; | ||
459 | |||
460 | if (!(tf->tf_ops.fabric_make_np)) { | ||
461 | printk(KERN_ERR "tf->tf_ops.fabric_make_np is NULL\n"); | ||
462 | return ERR_PTR(-ENOSYS); | ||
463 | } | ||
464 | |||
465 | se_tpg_np = tf->tf_ops.fabric_make_np(se_tpg, group, name); | ||
466 | if (!(se_tpg_np) || IS_ERR(se_tpg_np)) | ||
467 | return ERR_PTR(-EINVAL); | ||
468 | |||
469 | config_group_init_type_name(&se_tpg_np->tpg_np_group, name, | ||
470 | &TF_CIT_TMPL(tf)->tfc_tpg_np_base_cit); | ||
471 | |||
472 | return &se_tpg_np->tpg_np_group; | ||
473 | } | ||
474 | |||
475 | static void target_fabric_drop_np( | ||
476 | struct config_group *group, | ||
477 | struct config_item *item) | ||
478 | { | ||
479 | struct se_portal_group *se_tpg = container_of(group, | ||
480 | struct se_portal_group, tpg_np_group); | ||
481 | struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; | ||
482 | struct se_tpg_np *se_tpg_np = container_of(to_config_group(item), | ||
483 | struct se_tpg_np, tpg_np_group); | ||
484 | |||
485 | config_item_put(item); | ||
486 | tf->tf_ops.fabric_drop_np(se_tpg_np); | ||
487 | } | ||
488 | |||
489 | static struct configfs_group_operations target_fabric_np_group_ops = { | ||
490 | .make_group = &target_fabric_make_np, | ||
491 | .drop_item = &target_fabric_drop_np, | ||
492 | }; | ||
493 | |||
494 | TF_CIT_SETUP(tpg_np, NULL, &target_fabric_np_group_ops, NULL); | ||
495 | |||
496 | /* End of tfc_tpg_np_cit */ | ||
497 | |||
498 | /* Start of tfc_tpg_port_cit */ | ||
499 | |||
500 | CONFIGFS_EATTR_STRUCT(target_fabric_port, se_lun); | ||
501 | #define TCM_PORT_ATTR(_name, _mode) \ | ||
502 | static struct target_fabric_port_attribute target_fabric_port_##_name = \ | ||
503 | __CONFIGFS_EATTR(_name, _mode, \ | ||
504 | target_fabric_port_show_attr_##_name, \ | ||
505 | target_fabric_port_store_attr_##_name); | ||
506 | |||
507 | #define TCM_PORT_ATTOR_RO(_name) \ | ||
508 | __CONFIGFS_EATTR_RO(_name, \ | ||
509 | target_fabric_port_show_attr_##_name); | ||
510 | |||
511 | /* | ||
512 | * alua_tg_pt_gp | ||
513 | */ | ||
514 | static ssize_t target_fabric_port_show_attr_alua_tg_pt_gp( | ||
515 | struct se_lun *lun, | ||
516 | char *page) | ||
517 | { | ||
518 | if (!(lun)) | ||
519 | return -ENODEV; | ||
520 | |||
521 | if (!(lun->lun_sep)) | ||
522 | return -ENODEV; | ||
523 | |||
524 | return core_alua_show_tg_pt_gp_info(lun->lun_sep, page); | ||
525 | } | ||
526 | |||
527 | static ssize_t target_fabric_port_store_attr_alua_tg_pt_gp( | ||
528 | struct se_lun *lun, | ||
529 | const char *page, | ||
530 | size_t count) | ||
531 | { | ||
532 | if (!(lun)) | ||
533 | return -ENODEV; | ||
534 | |||
535 | if (!(lun->lun_sep)) | ||
536 | return -ENODEV; | ||
537 | |||
538 | return core_alua_store_tg_pt_gp_info(lun->lun_sep, page, count); | ||
539 | } | ||
540 | |||
541 | TCM_PORT_ATTR(alua_tg_pt_gp, S_IRUGO | S_IWUSR); | ||
542 | |||
543 | /* | ||
544 | * alua_tg_pt_offline | ||
545 | */ | ||
546 | static ssize_t target_fabric_port_show_attr_alua_tg_pt_offline( | ||
547 | struct se_lun *lun, | ||
548 | char *page) | ||
549 | { | ||
550 | if (!(lun)) | ||
551 | return -ENODEV; | ||
552 | |||
553 | if (!(lun->lun_sep)) | ||
554 | return -ENODEV; | ||
555 | |||
556 | return core_alua_show_offline_bit(lun, page); | ||
557 | } | ||
558 | |||
559 | static ssize_t target_fabric_port_store_attr_alua_tg_pt_offline( | ||
560 | struct se_lun *lun, | ||
561 | const char *page, | ||
562 | size_t count) | ||
563 | { | ||
564 | if (!(lun)) | ||
565 | return -ENODEV; | ||
566 | |||
567 | if (!(lun->lun_sep)) | ||
568 | return -ENODEV; | ||
569 | |||
570 | return core_alua_store_offline_bit(lun, page, count); | ||
571 | } | ||
572 | |||
573 | TCM_PORT_ATTR(alua_tg_pt_offline, S_IRUGO | S_IWUSR); | ||
574 | |||
575 | /* | ||
576 | * alua_tg_pt_status | ||
577 | */ | ||
578 | static ssize_t target_fabric_port_show_attr_alua_tg_pt_status( | ||
579 | struct se_lun *lun, | ||
580 | char *page) | ||
581 | { | ||
582 | if (!(lun)) | ||
583 | return -ENODEV; | ||
584 | |||
585 | if (!(lun->lun_sep)) | ||
586 | return -ENODEV; | ||
587 | |||
588 | return core_alua_show_secondary_status(lun, page); | ||
589 | } | ||
590 | |||
591 | static ssize_t target_fabric_port_store_attr_alua_tg_pt_status( | ||
592 | struct se_lun *lun, | ||
593 | const char *page, | ||
594 | size_t count) | ||
595 | { | ||
596 | if (!(lun)) | ||
597 | return -ENODEV; | ||
598 | |||
599 | if (!(lun->lun_sep)) | ||
600 | return -ENODEV; | ||
601 | |||
602 | return core_alua_store_secondary_status(lun, page, count); | ||
603 | } | ||
604 | |||
605 | TCM_PORT_ATTR(alua_tg_pt_status, S_IRUGO | S_IWUSR); | ||
606 | |||
607 | /* | ||
608 | * alua_tg_pt_write_md | ||
609 | */ | ||
610 | static ssize_t target_fabric_port_show_attr_alua_tg_pt_write_md( | ||
611 | struct se_lun *lun, | ||
612 | char *page) | ||
613 | { | ||
614 | if (!(lun)) | ||
615 | return -ENODEV; | ||
616 | |||
617 | if (!(lun->lun_sep)) | ||
618 | return -ENODEV; | ||
619 | |||
620 | return core_alua_show_secondary_write_metadata(lun, page); | ||
621 | } | ||
622 | |||
623 | static ssize_t target_fabric_port_store_attr_alua_tg_pt_write_md( | ||
624 | struct se_lun *lun, | ||
625 | const char *page, | ||
626 | size_t count) | ||
627 | { | ||
628 | if (!(lun)) | ||
629 | return -ENODEV; | ||
630 | |||
631 | if (!(lun->lun_sep)) | ||
632 | return -ENODEV; | ||
633 | |||
634 | return core_alua_store_secondary_write_metadata(lun, page, count); | ||
635 | } | ||
636 | |||
637 | TCM_PORT_ATTR(alua_tg_pt_write_md, S_IRUGO | S_IWUSR); | ||
638 | |||
639 | |||
640 | static struct configfs_attribute *target_fabric_port_attrs[] = { | ||
641 | &target_fabric_port_alua_tg_pt_gp.attr, | ||
642 | &target_fabric_port_alua_tg_pt_offline.attr, | ||
643 | &target_fabric_port_alua_tg_pt_status.attr, | ||
644 | &target_fabric_port_alua_tg_pt_write_md.attr, | ||
645 | NULL, | ||
646 | }; | ||
647 | |||
648 | CONFIGFS_EATTR_OPS(target_fabric_port, se_lun, lun_group); | ||
649 | |||
650 | static int target_fabric_port_link( | ||
651 | struct config_item *lun_ci, | ||
652 | struct config_item *se_dev_ci) | ||
653 | { | ||
654 | struct config_item *tpg_ci; | ||
655 | struct se_device *dev; | ||
656 | struct se_lun *lun = container_of(to_config_group(lun_ci), | ||
657 | struct se_lun, lun_group); | ||
658 | struct se_lun *lun_p; | ||
659 | struct se_portal_group *se_tpg; | ||
660 | struct se_subsystem_dev *se_dev = container_of( | ||
661 | to_config_group(se_dev_ci), struct se_subsystem_dev, | ||
662 | se_dev_group); | ||
663 | struct target_fabric_configfs *tf; | ||
664 | int ret; | ||
665 | |||
666 | tpg_ci = &lun_ci->ci_parent->ci_group->cg_item; | ||
667 | se_tpg = container_of(to_config_group(tpg_ci), | ||
668 | struct se_portal_group, tpg_group); | ||
669 | tf = se_tpg->se_tpg_wwn->wwn_tf; | ||
670 | |||
671 | if (lun->lun_se_dev != NULL) { | ||
672 | printk(KERN_ERR "Port Symlink already exists\n"); | ||
673 | return -EEXIST; | ||
674 | } | ||
675 | |||
676 | dev = se_dev->se_dev_ptr; | ||
677 | if (!(dev)) { | ||
678 | printk(KERN_ERR "Unable to locate struct se_device pointer from" | ||
679 | " %s\n", config_item_name(se_dev_ci)); | ||
680 | ret = -ENODEV; | ||
681 | goto out; | ||
682 | } | ||
683 | |||
684 | lun_p = core_dev_add_lun(se_tpg, dev->se_hba, dev, | ||
685 | lun->unpacked_lun); | ||
686 | if ((IS_ERR(lun_p)) || !(lun_p)) { | ||
687 | printk(KERN_ERR "core_dev_add_lun() failed\n"); | ||
688 | ret = -EINVAL; | ||
689 | goto out; | ||
690 | } | ||
691 | |||
692 | if (tf->tf_ops.fabric_post_link) { | ||
693 | /* | ||
694 | * Call the optional fabric_post_link() to allow a | ||
695 | * fabric module to setup any additional state once | ||
696 | * core_dev_add_lun() has been called.. | ||
697 | */ | ||
698 | tf->tf_ops.fabric_post_link(se_tpg, lun); | ||
699 | } | ||
700 | |||
701 | return 0; | ||
702 | out: | ||
703 | return ret; | ||
704 | } | ||
705 | |||
706 | static int target_fabric_port_unlink( | ||
707 | struct config_item *lun_ci, | ||
708 | struct config_item *se_dev_ci) | ||
709 | { | ||
710 | struct se_lun *lun = container_of(to_config_group(lun_ci), | ||
711 | struct se_lun, lun_group); | ||
712 | struct se_portal_group *se_tpg = lun->lun_sep->sep_tpg; | ||
713 | struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; | ||
714 | |||
715 | if (tf->tf_ops.fabric_pre_unlink) { | ||
716 | /* | ||
717 | * Call the optional fabric_pre_unlink() to allow a | ||
718 | * fabric module to release any additional stat before | ||
719 | * core_dev_del_lun() is called. | ||
720 | */ | ||
721 | tf->tf_ops.fabric_pre_unlink(se_tpg, lun); | ||
722 | } | ||
723 | |||
724 | core_dev_del_lun(se_tpg, lun->unpacked_lun); | ||
725 | return 0; | ||
726 | } | ||
727 | |||
728 | static struct configfs_item_operations target_fabric_port_item_ops = { | ||
729 | .show_attribute = target_fabric_port_attr_show, | ||
730 | .store_attribute = target_fabric_port_attr_store, | ||
731 | .allow_link = target_fabric_port_link, | ||
732 | .drop_link = target_fabric_port_unlink, | ||
733 | }; | ||
734 | |||
735 | TF_CIT_SETUP(tpg_port, &target_fabric_port_item_ops, NULL, target_fabric_port_attrs); | ||
736 | |||
737 | /* End of tfc_tpg_port_cit */ | ||
738 | |||
739 | /* Start of tfc_tpg_lun_cit */ | ||
740 | |||
741 | static struct config_group *target_fabric_make_lun( | ||
742 | struct config_group *group, | ||
743 | const char *name) | ||
744 | { | ||
745 | struct se_lun *lun; | ||
746 | struct se_portal_group *se_tpg = container_of(group, | ||
747 | struct se_portal_group, tpg_lun_group); | ||
748 | struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; | ||
749 | unsigned long unpacked_lun; | ||
750 | |||
751 | if (strstr(name, "lun_") != name) { | ||
752 | printk(KERN_ERR "Unable to locate \'_\" in" | ||
753 | " \"lun_$LUN_NUMBER\"\n"); | ||
754 | return ERR_PTR(-EINVAL); | ||
755 | } | ||
756 | if (strict_strtoul(name + 4, 0, &unpacked_lun) || unpacked_lun > UINT_MAX) | ||
757 | return ERR_PTR(-EINVAL); | ||
758 | |||
759 | lun = core_get_lun_from_tpg(se_tpg, unpacked_lun); | ||
760 | if (!(lun)) | ||
761 | return ERR_PTR(-EINVAL); | ||
762 | |||
763 | config_group_init_type_name(&lun->lun_group, name, | ||
764 | &TF_CIT_TMPL(tf)->tfc_tpg_port_cit); | ||
765 | |||
766 | return &lun->lun_group; | ||
767 | } | ||
768 | |||
769 | static void target_fabric_drop_lun( | ||
770 | struct config_group *group, | ||
771 | struct config_item *item) | ||
772 | { | ||
773 | config_item_put(item); | ||
774 | } | ||
775 | |||
776 | static struct configfs_group_operations target_fabric_lun_group_ops = { | ||
777 | .make_group = &target_fabric_make_lun, | ||
778 | .drop_item = &target_fabric_drop_lun, | ||
779 | }; | ||
780 | |||
781 | TF_CIT_SETUP(tpg_lun, NULL, &target_fabric_lun_group_ops, NULL); | ||
782 | |||
783 | /* End of tfc_tpg_lun_cit */ | ||
784 | |||
785 | /* Start of tfc_tpg_attrib_cit */ | ||
786 | |||
787 | CONFIGFS_EATTR_OPS(target_fabric_tpg_attrib, se_portal_group, tpg_attrib_group); | ||
788 | |||
789 | static struct configfs_item_operations target_fabric_tpg_attrib_item_ops = { | ||
790 | .show_attribute = target_fabric_tpg_attrib_attr_show, | ||
791 | .store_attribute = target_fabric_tpg_attrib_attr_store, | ||
792 | }; | ||
793 | |||
794 | TF_CIT_SETUP(tpg_attrib, &target_fabric_tpg_attrib_item_ops, NULL, NULL); | ||
795 | |||
796 | /* End of tfc_tpg_attrib_cit */ | ||
797 | |||
798 | /* Start of tfc_tpg_param_cit */ | ||
799 | |||
800 | CONFIGFS_EATTR_OPS(target_fabric_tpg_param, se_portal_group, tpg_param_group); | ||
801 | |||
802 | static struct configfs_item_operations target_fabric_tpg_param_item_ops = { | ||
803 | .show_attribute = target_fabric_tpg_param_attr_show, | ||
804 | .store_attribute = target_fabric_tpg_param_attr_store, | ||
805 | }; | ||
806 | |||
807 | TF_CIT_SETUP(tpg_param, &target_fabric_tpg_param_item_ops, NULL, NULL); | ||
808 | |||
809 | /* End of tfc_tpg_param_cit */ | ||
810 | |||
811 | /* Start of tfc_tpg_base_cit */ | ||
812 | /* | ||
813 | * For use with TF_TPG_ATTR() and TF_TPG_ATTR_RO() | ||
814 | */ | ||
815 | CONFIGFS_EATTR_OPS(target_fabric_tpg, se_portal_group, tpg_group); | ||
816 | |||
817 | static struct configfs_item_operations target_fabric_tpg_base_item_ops = { | ||
818 | .show_attribute = target_fabric_tpg_attr_show, | ||
819 | .store_attribute = target_fabric_tpg_attr_store, | ||
820 | }; | ||
821 | |||
822 | TF_CIT_SETUP(tpg_base, &target_fabric_tpg_base_item_ops, NULL, NULL); | ||
823 | |||
824 | /* End of tfc_tpg_base_cit */ | ||
825 | |||
826 | /* Start of tfc_tpg_cit */ | ||
827 | |||
828 | static struct config_group *target_fabric_make_tpg( | ||
829 | struct config_group *group, | ||
830 | const char *name) | ||
831 | { | ||
832 | struct se_wwn *wwn = container_of(group, struct se_wwn, wwn_group); | ||
833 | struct target_fabric_configfs *tf = wwn->wwn_tf; | ||
834 | struct se_portal_group *se_tpg; | ||
835 | |||
836 | if (!(tf->tf_ops.fabric_make_tpg)) { | ||
837 | printk(KERN_ERR "tf->tf_ops.fabric_make_tpg is NULL\n"); | ||
838 | return ERR_PTR(-ENOSYS); | ||
839 | } | ||
840 | |||
841 | se_tpg = tf->tf_ops.fabric_make_tpg(wwn, group, name); | ||
842 | if (!(se_tpg) || IS_ERR(se_tpg)) | ||
843 | return ERR_PTR(-EINVAL); | ||
844 | /* | ||
845 | * Setup default groups from pre-allocated se_tpg->tpg_default_groups | ||
846 | */ | ||
847 | se_tpg->tpg_group.default_groups = se_tpg->tpg_default_groups; | ||
848 | se_tpg->tpg_group.default_groups[0] = &se_tpg->tpg_lun_group; | ||
849 | se_tpg->tpg_group.default_groups[1] = &se_tpg->tpg_np_group; | ||
850 | se_tpg->tpg_group.default_groups[2] = &se_tpg->tpg_acl_group; | ||
851 | se_tpg->tpg_group.default_groups[3] = &se_tpg->tpg_attrib_group; | ||
852 | se_tpg->tpg_group.default_groups[4] = &se_tpg->tpg_param_group; | ||
853 | se_tpg->tpg_group.default_groups[5] = NULL; | ||
854 | |||
855 | config_group_init_type_name(&se_tpg->tpg_group, name, | ||
856 | &TF_CIT_TMPL(tf)->tfc_tpg_base_cit); | ||
857 | config_group_init_type_name(&se_tpg->tpg_lun_group, "lun", | ||
858 | &TF_CIT_TMPL(tf)->tfc_tpg_lun_cit); | ||
859 | config_group_init_type_name(&se_tpg->tpg_np_group, "np", | ||
860 | &TF_CIT_TMPL(tf)->tfc_tpg_np_cit); | ||
861 | config_group_init_type_name(&se_tpg->tpg_acl_group, "acls", | ||
862 | &TF_CIT_TMPL(tf)->tfc_tpg_nacl_cit); | ||
863 | config_group_init_type_name(&se_tpg->tpg_attrib_group, "attrib", | ||
864 | &TF_CIT_TMPL(tf)->tfc_tpg_attrib_cit); | ||
865 | config_group_init_type_name(&se_tpg->tpg_param_group, "param", | ||
866 | &TF_CIT_TMPL(tf)->tfc_tpg_param_cit); | ||
867 | |||
868 | return &se_tpg->tpg_group; | ||
869 | } | ||
870 | |||
871 | static void target_fabric_drop_tpg( | ||
872 | struct config_group *group, | ||
873 | struct config_item *item) | ||
874 | { | ||
875 | struct se_wwn *wwn = container_of(group, struct se_wwn, wwn_group); | ||
876 | struct target_fabric_configfs *tf = wwn->wwn_tf; | ||
877 | struct se_portal_group *se_tpg = container_of(to_config_group(item), | ||
878 | struct se_portal_group, tpg_group); | ||
879 | struct config_group *tpg_cg = &se_tpg->tpg_group; | ||
880 | struct config_item *df_item; | ||
881 | int i; | ||
882 | /* | ||
883 | * Release default groups, but do not release tpg_cg->default_groups | ||
884 | * memory as it is statically allocated at se_tpg->tpg_default_groups. | ||
885 | */ | ||
886 | for (i = 0; tpg_cg->default_groups[i]; i++) { | ||
887 | df_item = &tpg_cg->default_groups[i]->cg_item; | ||
888 | tpg_cg->default_groups[i] = NULL; | ||
889 | config_item_put(df_item); | ||
890 | } | ||
891 | |||
892 | config_item_put(item); | ||
893 | tf->tf_ops.fabric_drop_tpg(se_tpg); | ||
894 | } | ||
895 | |||
896 | static struct configfs_group_operations target_fabric_tpg_group_ops = { | ||
897 | .make_group = target_fabric_make_tpg, | ||
898 | .drop_item = target_fabric_drop_tpg, | ||
899 | }; | ||
900 | |||
901 | TF_CIT_SETUP(tpg, NULL, &target_fabric_tpg_group_ops, NULL); | ||
902 | |||
903 | /* End of tfc_tpg_cit */ | ||
904 | |||
905 | /* Start of tfc_wwn_cit */ | ||
906 | |||
907 | static struct config_group *target_fabric_make_wwn( | ||
908 | struct config_group *group, | ||
909 | const char *name) | ||
910 | { | ||
911 | struct target_fabric_configfs *tf = container_of(group, | ||
912 | struct target_fabric_configfs, tf_group); | ||
913 | struct se_wwn *wwn; | ||
914 | |||
915 | if (!(tf->tf_ops.fabric_make_wwn)) { | ||
916 | printk(KERN_ERR "tf->tf_ops.fabric_make_wwn is NULL\n"); | ||
917 | return ERR_PTR(-ENOSYS); | ||
918 | } | ||
919 | |||
920 | wwn = tf->tf_ops.fabric_make_wwn(tf, group, name); | ||
921 | if (!(wwn) || IS_ERR(wwn)) | ||
922 | return ERR_PTR(-EINVAL); | ||
923 | |||
924 | wwn->wwn_tf = tf; | ||
925 | config_group_init_type_name(&wwn->wwn_group, name, | ||
926 | &TF_CIT_TMPL(tf)->tfc_tpg_cit); | ||
927 | |||
928 | return &wwn->wwn_group; | ||
929 | } | ||
930 | |||
931 | static void target_fabric_drop_wwn( | ||
932 | struct config_group *group, | ||
933 | struct config_item *item) | ||
934 | { | ||
935 | struct target_fabric_configfs *tf = container_of(group, | ||
936 | struct target_fabric_configfs, tf_group); | ||
937 | struct se_wwn *wwn = container_of(to_config_group(item), | ||
938 | struct se_wwn, wwn_group); | ||
939 | |||
940 | config_item_put(item); | ||
941 | tf->tf_ops.fabric_drop_wwn(wwn); | ||
942 | } | ||
943 | |||
944 | static struct configfs_group_operations target_fabric_wwn_group_ops = { | ||
945 | .make_group = target_fabric_make_wwn, | ||
946 | .drop_item = target_fabric_drop_wwn, | ||
947 | }; | ||
948 | /* | ||
949 | * For use with TF_WWN_ATTR() and TF_WWN_ATTR_RO() | ||
950 | */ | ||
951 | CONFIGFS_EATTR_OPS(target_fabric_wwn, target_fabric_configfs, tf_group); | ||
952 | |||
953 | static struct configfs_item_operations target_fabric_wwn_item_ops = { | ||
954 | .show_attribute = target_fabric_wwn_attr_show, | ||
955 | .store_attribute = target_fabric_wwn_attr_store, | ||
956 | }; | ||
957 | |||
958 | TF_CIT_SETUP(wwn, &target_fabric_wwn_item_ops, &target_fabric_wwn_group_ops, NULL); | ||
959 | |||
960 | /* End of tfc_wwn_cit */ | ||
961 | |||
962 | /* Start of tfc_discovery_cit */ | ||
963 | |||
964 | CONFIGFS_EATTR_OPS(target_fabric_discovery, target_fabric_configfs, | ||
965 | tf_disc_group); | ||
966 | |||
967 | static struct configfs_item_operations target_fabric_discovery_item_ops = { | ||
968 | .show_attribute = target_fabric_discovery_attr_show, | ||
969 | .store_attribute = target_fabric_discovery_attr_store, | ||
970 | }; | ||
971 | |||
972 | TF_CIT_SETUP(discovery, &target_fabric_discovery_item_ops, NULL, NULL); | ||
973 | |||
974 | /* End of tfc_discovery_cit */ | ||
975 | |||
976 | int target_fabric_setup_cits(struct target_fabric_configfs *tf) | ||
977 | { | ||
978 | target_fabric_setup_discovery_cit(tf); | ||
979 | target_fabric_setup_wwn_cit(tf); | ||
980 | target_fabric_setup_tpg_cit(tf); | ||
981 | target_fabric_setup_tpg_base_cit(tf); | ||
982 | target_fabric_setup_tpg_port_cit(tf); | ||
983 | target_fabric_setup_tpg_lun_cit(tf); | ||
984 | target_fabric_setup_tpg_np_cit(tf); | ||
985 | target_fabric_setup_tpg_np_base_cit(tf); | ||
986 | target_fabric_setup_tpg_attrib_cit(tf); | ||
987 | target_fabric_setup_tpg_param_cit(tf); | ||
988 | target_fabric_setup_tpg_nacl_cit(tf); | ||
989 | target_fabric_setup_tpg_nacl_base_cit(tf); | ||
990 | target_fabric_setup_tpg_nacl_attrib_cit(tf); | ||
991 | target_fabric_setup_tpg_nacl_auth_cit(tf); | ||
992 | target_fabric_setup_tpg_nacl_param_cit(tf); | ||
993 | target_fabric_setup_tpg_mappedlun_cit(tf); | ||
994 | |||
995 | return 0; | ||
996 | } | ||
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c new file mode 100644 index 000000000000..26285644e4de --- /dev/null +++ b/drivers/target/target_core_fabric_lib.c | |||
@@ -0,0 +1,451 @@ | |||
1 | /******************************************************************************* | ||
2 | * Filename: target_core_fabric_lib.c | ||
3 | * | ||
4 | * This file contains generic high level protocol identifier and PR | ||
5 | * handlers for TCM fabric modules | ||
6 | * | ||
7 | * Copyright (c) 2010 Rising Tide Systems, Inc. | ||
8 | * Copyright (c) 2010 Linux-iSCSI.org | ||
9 | * | ||
10 | * Nicholas A. Bellinger <nab@linux-iscsi.org> | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of the GNU General Public License as published by | ||
14 | * the Free Software Foundation; either version 2 of the License, or | ||
15 | * (at your option) any later version. | ||
16 | * | ||
17 | * This program is distributed in the hope that it will be useful, | ||
18 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
20 | * GNU General Public License for more details. | ||
21 | * | ||
22 | * You should have received a copy of the GNU General Public License | ||
23 | * along with this program; if not, write to the Free Software | ||
24 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
25 | * | ||
26 | ******************************************************************************/ | ||
27 | |||
28 | #include <linux/string.h> | ||
29 | #include <linux/ctype.h> | ||
30 | #include <linux/spinlock.h> | ||
31 | #include <linux/smp_lock.h> | ||
32 | #include <scsi/scsi.h> | ||
33 | #include <scsi/scsi_cmnd.h> | ||
34 | |||
35 | #include <target/target_core_base.h> | ||
36 | #include <target/target_core_device.h> | ||
37 | #include <target/target_core_transport.h> | ||
38 | #include <target/target_core_fabric_ops.h> | ||
39 | #include <target/target_core_configfs.h> | ||
40 | |||
41 | #include "target_core_hba.h" | ||
42 | #include "target_core_pr.h" | ||
43 | |||
44 | /* | ||
45 | * Handlers for Serial Attached SCSI (SAS) | ||
46 | */ | ||
47 | u8 sas_get_fabric_proto_ident(struct se_portal_group *se_tpg) | ||
48 | { | ||
49 | /* | ||
50 | * Return a SAS Serial SCSI Protocol identifier for loopback operations | ||
51 | * This is defined in section 7.5.1 Table 362 in spc4r17 | ||
52 | */ | ||
53 | return 0x6; | ||
54 | } | ||
55 | EXPORT_SYMBOL(sas_get_fabric_proto_ident); | ||
56 | |||
57 | u32 sas_get_pr_transport_id( | ||
58 | struct se_portal_group *se_tpg, | ||
59 | struct se_node_acl *se_nacl, | ||
60 | struct t10_pr_registration *pr_reg, | ||
61 | int *format_code, | ||
62 | unsigned char *buf) | ||
63 | { | ||
64 | unsigned char binary, *ptr; | ||
65 | int i; | ||
66 | u32 off = 4; | ||
67 | /* | ||
68 | * Set PROTOCOL IDENTIFIER to 6h for SAS | ||
69 | */ | ||
70 | buf[0] = 0x06; | ||
71 | /* | ||
72 | * From spc4r17, 7.5.4.7 TransportID for initiator ports using SCSI | ||
73 | * over SAS Serial SCSI Protocol | ||
74 | */ | ||
75 | ptr = &se_nacl->initiatorname[4]; /* Skip over 'naa. prefix */ | ||
76 | |||
77 | for (i = 0; i < 16; i += 2) { | ||
78 | binary = transport_asciihex_to_binaryhex(&ptr[i]); | ||
79 | buf[off++] = binary; | ||
80 | } | ||
81 | /* | ||
82 | * The SAS Transport ID is a hardcoded 24-byte length | ||
83 | */ | ||
84 | return 24; | ||
85 | } | ||
86 | EXPORT_SYMBOL(sas_get_pr_transport_id); | ||
87 | |||
88 | u32 sas_get_pr_transport_id_len( | ||
89 | struct se_portal_group *se_tpg, | ||
90 | struct se_node_acl *se_nacl, | ||
91 | struct t10_pr_registration *pr_reg, | ||
92 | int *format_code) | ||
93 | { | ||
94 | *format_code = 0; | ||
95 | /* | ||
96 | * From spc4r17, 7.5.4.7 TransportID for initiator ports using SCSI | ||
97 | * over SAS Serial SCSI Protocol | ||
98 | * | ||
99 | * The SAS Transport ID is a hardcoded 24-byte length | ||
100 | */ | ||
101 | return 24; | ||
102 | } | ||
103 | EXPORT_SYMBOL(sas_get_pr_transport_id_len); | ||
104 | |||
105 | /* | ||
106 | * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above | ||
107 | * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations. | ||
108 | */ | ||
109 | char *sas_parse_pr_out_transport_id( | ||
110 | struct se_portal_group *se_tpg, | ||
111 | const char *buf, | ||
112 | u32 *out_tid_len, | ||
113 | char **port_nexus_ptr) | ||
114 | { | ||
115 | /* | ||
116 | * Assume the FORMAT CODE 00b from spc4r17, 7.5.4.7 TransportID | ||
117 | * for initiator ports using SCSI over SAS Serial SCSI Protocol | ||
118 | * | ||
119 | * The TransportID for a SAS Initiator Port is of fixed size of | ||
120 | * 24 bytes, and SAS does not contain a I_T nexus identifier, | ||
121 | * so we return the **port_nexus_ptr set to NULL. | ||
122 | */ | ||
123 | *port_nexus_ptr = NULL; | ||
124 | *out_tid_len = 24; | ||
125 | |||
126 | return (char *)&buf[4]; | ||
127 | } | ||
128 | EXPORT_SYMBOL(sas_parse_pr_out_transport_id); | ||
129 | |||
130 | /* | ||
131 | * Handlers for Fibre Channel Protocol (FCP) | ||
132 | */ | ||
133 | u8 fc_get_fabric_proto_ident(struct se_portal_group *se_tpg) | ||
134 | { | ||
135 | return 0x0; /* 0 = fcp-2 per SPC4 section 7.5.1 */ | ||
136 | } | ||
137 | EXPORT_SYMBOL(fc_get_fabric_proto_ident); | ||
138 | |||
139 | u32 fc_get_pr_transport_id_len( | ||
140 | struct se_portal_group *se_tpg, | ||
141 | struct se_node_acl *se_nacl, | ||
142 | struct t10_pr_registration *pr_reg, | ||
143 | int *format_code) | ||
144 | { | ||
145 | *format_code = 0; | ||
146 | /* | ||
147 | * The FC Transport ID is a hardcoded 24-byte length | ||
148 | */ | ||
149 | return 24; | ||
150 | } | ||
151 | EXPORT_SYMBOL(fc_get_pr_transport_id_len); | ||
152 | |||
153 | u32 fc_get_pr_transport_id( | ||
154 | struct se_portal_group *se_tpg, | ||
155 | struct se_node_acl *se_nacl, | ||
156 | struct t10_pr_registration *pr_reg, | ||
157 | int *format_code, | ||
158 | unsigned char *buf) | ||
159 | { | ||
160 | unsigned char binary, *ptr; | ||
161 | int i; | ||
162 | u32 off = 8; | ||
163 | /* | ||
164 | * PROTOCOL IDENTIFIER is 0h for FCP-2 | ||
165 | * | ||
166 | * From spc4r17, 7.5.4.2 TransportID for initiator ports using | ||
167 | * SCSI over Fibre Channel | ||
168 | * | ||
169 | * We convert the ASCII formatted N Port name into a binary | ||
170 | * encoded TransportID. | ||
171 | */ | ||
172 | ptr = &se_nacl->initiatorname[0]; | ||
173 | |||
174 | for (i = 0; i < 24; ) { | ||
175 | if (!(strncmp(&ptr[i], ":", 1))) { | ||
176 | i++; | ||
177 | continue; | ||
178 | } | ||
179 | binary = transport_asciihex_to_binaryhex(&ptr[i]); | ||
180 | buf[off++] = binary; | ||
181 | i += 2; | ||
182 | } | ||
183 | /* | ||
184 | * The FC Transport ID is a hardcoded 24-byte length | ||
185 | */ | ||
186 | return 24; | ||
187 | } | ||
188 | EXPORT_SYMBOL(fc_get_pr_transport_id); | ||
189 | |||
190 | char *fc_parse_pr_out_transport_id( | ||
191 | struct se_portal_group *se_tpg, | ||
192 | const char *buf, | ||
193 | u32 *out_tid_len, | ||
194 | char **port_nexus_ptr) | ||
195 | { | ||
196 | /* | ||
197 | * The TransportID for a FC N Port is of fixed size of | ||
198 | * 24 bytes, and FC does not contain a I_T nexus identifier, | ||
199 | * so we return the **port_nexus_ptr set to NULL. | ||
200 | */ | ||
201 | *port_nexus_ptr = NULL; | ||
202 | *out_tid_len = 24; | ||
203 | |||
204 | return (char *)&buf[8]; | ||
205 | } | ||
206 | EXPORT_SYMBOL(fc_parse_pr_out_transport_id); | ||
207 | |||
208 | /* | ||
209 | * Handlers for Internet Small Computer Systems Interface (iSCSI) | ||
210 | */ | ||
211 | |||
212 | u8 iscsi_get_fabric_proto_ident(struct se_portal_group *se_tpg) | ||
213 | { | ||
214 | /* | ||
215 | * This value is defined for "Internet SCSI (iSCSI)" | ||
216 | * in spc4r17 section 7.5.1 Table 362 | ||
217 | */ | ||
218 | return 0x5; | ||
219 | } | ||
220 | EXPORT_SYMBOL(iscsi_get_fabric_proto_ident); | ||
221 | |||
222 | u32 iscsi_get_pr_transport_id( | ||
223 | struct se_portal_group *se_tpg, | ||
224 | struct se_node_acl *se_nacl, | ||
225 | struct t10_pr_registration *pr_reg, | ||
226 | int *format_code, | ||
227 | unsigned char *buf) | ||
228 | { | ||
229 | u32 off = 4, padding = 0; | ||
230 | u16 len = 0; | ||
231 | |||
232 | spin_lock_irq(&se_nacl->nacl_sess_lock); | ||
233 | /* | ||
234 | * Set PROTOCOL IDENTIFIER to 5h for iSCSI | ||
235 | */ | ||
236 | buf[0] = 0x05; | ||
237 | /* | ||
238 | * From spc4r17 Section 7.5.4.6: TransportID for initiator | ||
239 | * ports using SCSI over iSCSI. | ||
240 | * | ||
241 | * The null-terminated, null-padded (see 4.4.2) ISCSI NAME field | ||
242 | * shall contain the iSCSI name of an iSCSI initiator node (see | ||
243 | * RFC 3720). The first ISCSI NAME field byte containing an ASCII | ||
244 | * null character terminates the ISCSI NAME field without regard for | ||
245 | * the specified length of the iSCSI TransportID or the contents of | ||
246 | * the ADDITIONAL LENGTH field. | ||
247 | */ | ||
248 | len = sprintf(&buf[off], "%s", se_nacl->initiatorname); | ||
249 | /* | ||
250 | * Add Extra byte for NULL terminator | ||
251 | */ | ||
252 | len++; | ||
253 | /* | ||
254 | * If there is ISID present with the registration and *format code == 1 | ||
255 | * 1, use iSCSI Initiator port TransportID format. | ||
256 | * | ||
257 | * Otherwise use iSCSI Initiator device TransportID format that | ||
258 | * does not contain the ASCII encoded iSCSI Initiator iSID value | ||
259 | * provied by the iSCSi Initiator during the iSCSI login process. | ||
260 | */ | ||
261 | if ((*format_code == 1) && (pr_reg->isid_present_at_reg)) { | ||
262 | /* | ||
263 | * Set FORMAT CODE 01b for iSCSI Initiator port TransportID | ||
264 | * format. | ||
265 | */ | ||
266 | buf[0] |= 0x40; | ||
267 | /* | ||
268 | * From spc4r17 Section 7.5.4.6: TransportID for initiator | ||
269 | * ports using SCSI over iSCSI. Table 390 | ||
270 | * | ||
271 | * The SEPARATOR field shall contain the five ASCII | ||
272 | * characters ",i,0x". | ||
273 | * | ||
274 | * The null-terminated, null-padded ISCSI INITIATOR SESSION ID | ||
275 | * field shall contain the iSCSI initiator session identifier | ||
276 | * (see RFC 3720) in the form of ASCII characters that are the | ||
277 | * hexadecimal digits converted from the binary iSCSI initiator | ||
278 | * session identifier value. The first ISCSI INITIATOR SESSION | ||
279 | * ID field byte containing an ASCII null character | ||
280 | */ | ||
281 | buf[off+len] = 0x2c; off++; /* ASCII Character: "," */ | ||
282 | buf[off+len] = 0x69; off++; /* ASCII Character: "i" */ | ||
283 | buf[off+len] = 0x2c; off++; /* ASCII Character: "," */ | ||
284 | buf[off+len] = 0x30; off++; /* ASCII Character: "0" */ | ||
285 | buf[off+len] = 0x78; off++; /* ASCII Character: "x" */ | ||
286 | len += 5; | ||
287 | buf[off+len] = pr_reg->pr_reg_isid[0]; off++; | ||
288 | buf[off+len] = pr_reg->pr_reg_isid[1]; off++; | ||
289 | buf[off+len] = pr_reg->pr_reg_isid[2]; off++; | ||
290 | buf[off+len] = pr_reg->pr_reg_isid[3]; off++; | ||
291 | buf[off+len] = pr_reg->pr_reg_isid[4]; off++; | ||
292 | buf[off+len] = pr_reg->pr_reg_isid[5]; off++; | ||
293 | buf[off+len] = '\0'; off++; | ||
294 | len += 7; | ||
295 | } | ||
296 | spin_unlock_irq(&se_nacl->nacl_sess_lock); | ||
297 | /* | ||
298 | * The ADDITIONAL LENGTH field specifies the number of bytes that follow | ||
299 | * in the TransportID. The additional length shall be at least 20 and | ||
300 | * shall be a multiple of four. | ||
301 | */ | ||
302 | padding = ((-len) & 3); | ||
303 | if (padding != 0) | ||
304 | len += padding; | ||
305 | |||
306 | buf[2] = ((len >> 8) & 0xff); | ||
307 | buf[3] = (len & 0xff); | ||
308 | /* | ||
309 | * Increment value for total payload + header length for | ||
310 | * full status descriptor | ||
311 | */ | ||
312 | len += 4; | ||
313 | |||
314 | return len; | ||
315 | } | ||
316 | EXPORT_SYMBOL(iscsi_get_pr_transport_id); | ||
317 | |||
318 | u32 iscsi_get_pr_transport_id_len( | ||
319 | struct se_portal_group *se_tpg, | ||
320 | struct se_node_acl *se_nacl, | ||
321 | struct t10_pr_registration *pr_reg, | ||
322 | int *format_code) | ||
323 | { | ||
324 | u32 len = 0, padding = 0; | ||
325 | |||
326 | spin_lock_irq(&se_nacl->nacl_sess_lock); | ||
327 | len = strlen(se_nacl->initiatorname); | ||
328 | /* | ||
329 | * Add extra byte for NULL terminator | ||
330 | */ | ||
331 | len++; | ||
332 | /* | ||
333 | * If there is ISID present with the registration, use format code: | ||
334 | * 01b: iSCSI Initiator port TransportID format | ||
335 | * | ||
336 | * If there is not an active iSCSI session, use format code: | ||
337 | * 00b: iSCSI Initiator device TransportID format | ||
338 | */ | ||
339 | if (pr_reg->isid_present_at_reg) { | ||
340 | len += 5; /* For ",i,0x" ASCII seperator */ | ||
341 | len += 7; /* For iSCSI Initiator Session ID + Null terminator */ | ||
342 | *format_code = 1; | ||
343 | } else | ||
344 | *format_code = 0; | ||
345 | spin_unlock_irq(&se_nacl->nacl_sess_lock); | ||
346 | /* | ||
347 | * The ADDITIONAL LENGTH field specifies the number of bytes that follow | ||
348 | * in the TransportID. The additional length shall be at least 20 and | ||
349 | * shall be a multiple of four. | ||
350 | */ | ||
351 | padding = ((-len) & 3); | ||
352 | if (padding != 0) | ||
353 | len += padding; | ||
354 | /* | ||
355 | * Increment value for total payload + header length for | ||
356 | * full status descriptor | ||
357 | */ | ||
358 | len += 4; | ||
359 | |||
360 | return len; | ||
361 | } | ||
362 | EXPORT_SYMBOL(iscsi_get_pr_transport_id_len); | ||
363 | |||
364 | char *iscsi_parse_pr_out_transport_id( | ||
365 | struct se_portal_group *se_tpg, | ||
366 | const char *buf, | ||
367 | u32 *out_tid_len, | ||
368 | char **port_nexus_ptr) | ||
369 | { | ||
370 | char *p; | ||
371 | u32 tid_len, padding; | ||
372 | int i; | ||
373 | u16 add_len; | ||
374 | u8 format_code = (buf[0] & 0xc0); | ||
375 | /* | ||
376 | * Check for FORMAT CODE 00b or 01b from spc4r17, section 7.5.4.6: | ||
377 | * | ||
378 | * TransportID for initiator ports using SCSI over iSCSI, | ||
379 | * from Table 388 -- iSCSI TransportID formats. | ||
380 | * | ||
381 | * 00b Initiator port is identified using the world wide unique | ||
382 | * SCSI device name of the iSCSI initiator | ||
383 | * device containing the initiator port (see table 389). | ||
384 | * 01b Initiator port is identified using the world wide unique | ||
385 | * initiator port identifier (see table 390).10b to 11b | ||
386 | * Reserved | ||
387 | */ | ||
388 | if ((format_code != 0x00) && (format_code != 0x40)) { | ||
389 | printk(KERN_ERR "Illegal format code: 0x%02x for iSCSI" | ||
390 | " Initiator Transport ID\n", format_code); | ||
391 | return NULL; | ||
392 | } | ||
393 | /* | ||
394 | * If the caller wants the TransportID Length, we set that value for the | ||
395 | * entire iSCSI Tarnsport ID now. | ||
396 | */ | ||
397 | if (out_tid_len != NULL) { | ||
398 | add_len = ((buf[2] >> 8) & 0xff); | ||
399 | add_len |= (buf[3] & 0xff); | ||
400 | |||
401 | tid_len = strlen((char *)&buf[4]); | ||
402 | tid_len += 4; /* Add four bytes for iSCSI Transport ID header */ | ||
403 | tid_len += 1; /* Add one byte for NULL terminator */ | ||
404 | padding = ((-tid_len) & 3); | ||
405 | if (padding != 0) | ||
406 | tid_len += padding; | ||
407 | |||
408 | if ((add_len + 4) != tid_len) { | ||
409 | printk(KERN_INFO "LIO-Target Extracted add_len: %hu " | ||
410 | "does not match calculated tid_len: %u," | ||
411 | " using tid_len instead\n", add_len+4, tid_len); | ||
412 | *out_tid_len = tid_len; | ||
413 | } else | ||
414 | *out_tid_len = (add_len + 4); | ||
415 | } | ||
416 | /* | ||
417 | * Check for ',i,0x' seperator between iSCSI Name and iSCSI Initiator | ||
418 | * Session ID as defined in Table 390 - iSCSI initiator port TransportID | ||
419 | * format. | ||
420 | */ | ||
421 | if (format_code == 0x40) { | ||
422 | p = strstr((char *)&buf[4], ",i,0x"); | ||
423 | if (!(p)) { | ||
424 | printk(KERN_ERR "Unable to locate \",i,0x\" seperator" | ||
425 | " for Initiator port identifier: %s\n", | ||
426 | (char *)&buf[4]); | ||
427 | return NULL; | ||
428 | } | ||
429 | *p = '\0'; /* Terminate iSCSI Name */ | ||
430 | p += 5; /* Skip over ",i,0x" seperator */ | ||
431 | |||
432 | *port_nexus_ptr = p; | ||
433 | /* | ||
434 | * Go ahead and do the lower case conversion of the received | ||
435 | * 12 ASCII characters representing the ISID in the TransportID | ||
436 | * for comparision against the running iSCSI session's ISID from | ||
437 | * iscsi_target.c:lio_sess_get_initiator_sid() | ||
438 | */ | ||
439 | for (i = 0; i < 12; i++) { | ||
440 | if (isdigit(*p)) { | ||
441 | p++; | ||
442 | continue; | ||
443 | } | ||
444 | *p = tolower(*p); | ||
445 | p++; | ||
446 | } | ||
447 | } | ||
448 | |||
449 | return (char *)&buf[4]; | ||
450 | } | ||
451 | EXPORT_SYMBOL(iscsi_parse_pr_out_transport_id); | ||
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c new file mode 100644 index 000000000000..0aaca885668f --- /dev/null +++ b/drivers/target/target_core_file.c | |||
@@ -0,0 +1,688 @@ | |||
1 | /******************************************************************************* | ||
2 | * Filename: target_core_file.c | ||
3 | * | ||
4 | * This file contains the Storage Engine <-> FILEIO transport specific functions | ||
5 | * | ||
6 | * Copyright (c) 2005 PyX Technologies, Inc. | ||
7 | * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved. | ||
8 | * Copyright (c) 2007-2010 Rising Tide Systems | ||
9 | * Copyright (c) 2008-2010 Linux-iSCSI.org | ||
10 | * | ||
11 | * Nicholas A. Bellinger <nab@kernel.org> | ||
12 | * | ||
13 | * This program is free software; you can redistribute it and/or modify | ||
14 | * it under the terms of the GNU General Public License as published by | ||
15 | * the Free Software Foundation; either version 2 of the License, or | ||
16 | * (at your option) any later version. | ||
17 | * | ||
18 | * This program is distributed in the hope that it will be useful, | ||
19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
21 | * GNU General Public License for more details. | ||
22 | * | ||
23 | * You should have received a copy of the GNU General Public License | ||
24 | * along with this program; if not, write to the Free Software | ||
25 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
26 | * | ||
27 | ******************************************************************************/ | ||
28 | |||
29 | #include <linux/version.h> | ||
30 | #include <linux/string.h> | ||
31 | #include <linux/parser.h> | ||
32 | #include <linux/timer.h> | ||
33 | #include <linux/blkdev.h> | ||
34 | #include <linux/slab.h> | ||
35 | #include <linux/spinlock.h> | ||
36 | #include <linux/smp_lock.h> | ||
37 | #include <scsi/scsi.h> | ||
38 | #include <scsi/scsi_host.h> | ||
39 | |||
40 | #include <target/target_core_base.h> | ||
41 | #include <target/target_core_device.h> | ||
42 | #include <target/target_core_transport.h> | ||
43 | |||
44 | #include "target_core_file.h" | ||
45 | |||
46 | #if 1 | ||
47 | #define DEBUG_FD_CACHE(x...) printk(x) | ||
48 | #else | ||
49 | #define DEBUG_FD_CACHE(x...) | ||
50 | #endif | ||
51 | |||
52 | #if 1 | ||
53 | #define DEBUG_FD_FUA(x...) printk(x) | ||
54 | #else | ||
55 | #define DEBUG_FD_FUA(x...) | ||
56 | #endif | ||
57 | |||
58 | static struct se_subsystem_api fileio_template; | ||
59 | |||
60 | /* fd_attach_hba(): (Part of se_subsystem_api_t template) | ||
61 | * | ||
62 | * | ||
63 | */ | ||
64 | static int fd_attach_hba(struct se_hba *hba, u32 host_id) | ||
65 | { | ||
66 | struct fd_host *fd_host; | ||
67 | |||
68 | fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL); | ||
69 | if (!(fd_host)) { | ||
70 | printk(KERN_ERR "Unable to allocate memory for struct fd_host\n"); | ||
71 | return -1; | ||
72 | } | ||
73 | |||
74 | fd_host->fd_host_id = host_id; | ||
75 | |||
76 | atomic_set(&hba->left_queue_depth, FD_HBA_QUEUE_DEPTH); | ||
77 | atomic_set(&hba->max_queue_depth, FD_HBA_QUEUE_DEPTH); | ||
78 | hba->hba_ptr = (void *) fd_host; | ||
79 | |||
80 | printk(KERN_INFO "CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic" | ||
81 | " Target Core Stack %s\n", hba->hba_id, FD_VERSION, | ||
82 | TARGET_CORE_MOD_VERSION); | ||
83 | printk(KERN_INFO "CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic" | ||
84 | " Target Core with TCQ Depth: %d MaxSectors: %u\n", | ||
85 | hba->hba_id, fd_host->fd_host_id, | ||
86 | atomic_read(&hba->max_queue_depth), FD_MAX_SECTORS); | ||
87 | |||
88 | return 0; | ||
89 | } | ||
90 | |||
91 | static void fd_detach_hba(struct se_hba *hba) | ||
92 | { | ||
93 | struct fd_host *fd_host = hba->hba_ptr; | ||
94 | |||
95 | printk(KERN_INFO "CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic" | ||
96 | " Target Core\n", hba->hba_id, fd_host->fd_host_id); | ||
97 | |||
98 | kfree(fd_host); | ||
99 | hba->hba_ptr = NULL; | ||
100 | } | ||
101 | |||
102 | static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name) | ||
103 | { | ||
104 | struct fd_dev *fd_dev; | ||
105 | struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr; | ||
106 | |||
107 | fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL); | ||
108 | if (!(fd_dev)) { | ||
109 | printk(KERN_ERR "Unable to allocate memory for struct fd_dev\n"); | ||
110 | return NULL; | ||
111 | } | ||
112 | |||
113 | fd_dev->fd_host = fd_host; | ||
114 | |||
115 | printk(KERN_INFO "FILEIO: Allocated fd_dev for %p\n", name); | ||
116 | |||
117 | return fd_dev; | ||
118 | } | ||
119 | |||
120 | /* fd_create_virtdevice(): (Part of se_subsystem_api_t template) | ||
121 | * | ||
122 | * | ||
123 | */ | ||
124 | static struct se_device *fd_create_virtdevice( | ||
125 | struct se_hba *hba, | ||
126 | struct se_subsystem_dev *se_dev, | ||
127 | void *p) | ||
128 | { | ||
129 | char *dev_p = NULL; | ||
130 | struct se_device *dev; | ||
131 | struct se_dev_limits dev_limits; | ||
132 | struct queue_limits *limits; | ||
133 | struct fd_dev *fd_dev = (struct fd_dev *) p; | ||
134 | struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr; | ||
135 | mm_segment_t old_fs; | ||
136 | struct file *file; | ||
137 | struct inode *inode = NULL; | ||
138 | int dev_flags = 0, flags; | ||
139 | |||
140 | memset(&dev_limits, 0, sizeof(struct se_dev_limits)); | ||
141 | |||
142 | old_fs = get_fs(); | ||
143 | set_fs(get_ds()); | ||
144 | dev_p = getname(fd_dev->fd_dev_name); | ||
145 | set_fs(old_fs); | ||
146 | |||
147 | if (IS_ERR(dev_p)) { | ||
148 | printk(KERN_ERR "getname(%s) failed: %lu\n", | ||
149 | fd_dev->fd_dev_name, IS_ERR(dev_p)); | ||
150 | goto fail; | ||
151 | } | ||
152 | #if 0 | ||
153 | if (di->no_create_file) | ||
154 | flags = O_RDWR | O_LARGEFILE; | ||
155 | else | ||
156 | flags = O_RDWR | O_CREAT | O_LARGEFILE; | ||
157 | #else | ||
158 | flags = O_RDWR | O_CREAT | O_LARGEFILE; | ||
159 | #endif | ||
160 | /* flags |= O_DIRECT; */ | ||
161 | /* | ||
162 | * If fd_buffered_io=1 has not been set explictly (the default), | ||
163 | * use O_SYNC to force FILEIO writes to disk. | ||
164 | */ | ||
165 | if (!(fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO)) | ||
166 | flags |= O_SYNC; | ||
167 | |||
168 | file = filp_open(dev_p, flags, 0600); | ||
169 | |||
170 | if (IS_ERR(file) || !file || !file->f_dentry) { | ||
171 | printk(KERN_ERR "filp_open(%s) failed\n", dev_p); | ||
172 | goto fail; | ||
173 | } | ||
174 | fd_dev->fd_file = file; | ||
175 | /* | ||
176 | * If using a block backend with this struct file, we extract | ||
177 | * fd_dev->fd_[block,dev]_size from struct block_device. | ||
178 | * | ||
179 | * Otherwise, we use the passed fd_size= from configfs | ||
180 | */ | ||
181 | inode = file->f_mapping->host; | ||
182 | if (S_ISBLK(inode->i_mode)) { | ||
183 | struct request_queue *q; | ||
184 | /* | ||
185 | * Setup the local scope queue_limits from struct request_queue->limits | ||
186 | * to pass into transport_add_device_to_core_hba() as struct se_dev_limits. | ||
187 | */ | ||
188 | q = bdev_get_queue(inode->i_bdev); | ||
189 | limits = &dev_limits.limits; | ||
190 | limits->logical_block_size = bdev_logical_block_size(inode->i_bdev); | ||
191 | limits->max_hw_sectors = queue_max_hw_sectors(q); | ||
192 | limits->max_sectors = queue_max_sectors(q); | ||
193 | /* | ||
194 | * Determine the number of bytes from i_size_read() minus | ||
195 | * one (1) logical sector from underlying struct block_device | ||
196 | */ | ||
197 | fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev); | ||
198 | fd_dev->fd_dev_size = (i_size_read(file->f_mapping->host) - | ||
199 | fd_dev->fd_block_size); | ||
200 | |||
201 | printk(KERN_INFO "FILEIO: Using size: %llu bytes from struct" | ||
202 | " block_device blocks: %llu logical_block_size: %d\n", | ||
203 | fd_dev->fd_dev_size, | ||
204 | div_u64(fd_dev->fd_dev_size, fd_dev->fd_block_size), | ||
205 | fd_dev->fd_block_size); | ||
206 | } else { | ||
207 | if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) { | ||
208 | printk(KERN_ERR "FILEIO: Missing fd_dev_size=" | ||
209 | " parameter, and no backing struct" | ||
210 | " block_device\n"); | ||
211 | goto fail; | ||
212 | } | ||
213 | |||
214 | limits = &dev_limits.limits; | ||
215 | limits->logical_block_size = FD_BLOCKSIZE; | ||
216 | limits->max_hw_sectors = FD_MAX_SECTORS; | ||
217 | limits->max_sectors = FD_MAX_SECTORS; | ||
218 | fd_dev->fd_block_size = FD_BLOCKSIZE; | ||
219 | } | ||
220 | |||
221 | dev_limits.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH; | ||
222 | dev_limits.queue_depth = FD_DEVICE_QUEUE_DEPTH; | ||
223 | |||
224 | dev = transport_add_device_to_core_hba(hba, &fileio_template, | ||
225 | se_dev, dev_flags, (void *)fd_dev, | ||
226 | &dev_limits, "FILEIO", FD_VERSION); | ||
227 | if (!(dev)) | ||
228 | goto fail; | ||
229 | |||
230 | fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++; | ||
231 | fd_dev->fd_queue_depth = dev->queue_depth; | ||
232 | |||
233 | printk(KERN_INFO "CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s," | ||
234 | " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id, | ||
235 | fd_dev->fd_dev_name, fd_dev->fd_dev_size); | ||
236 | |||
237 | putname(dev_p); | ||
238 | return dev; | ||
239 | fail: | ||
240 | if (fd_dev->fd_file) { | ||
241 | filp_close(fd_dev->fd_file, NULL); | ||
242 | fd_dev->fd_file = NULL; | ||
243 | } | ||
244 | putname(dev_p); | ||
245 | return NULL; | ||
246 | } | ||
247 | |||
248 | /* fd_free_device(): (Part of se_subsystem_api_t template) | ||
249 | * | ||
250 | * | ||
251 | */ | ||
252 | static void fd_free_device(void *p) | ||
253 | { | ||
254 | struct fd_dev *fd_dev = (struct fd_dev *) p; | ||
255 | |||
256 | if (fd_dev->fd_file) { | ||
257 | filp_close(fd_dev->fd_file, NULL); | ||
258 | fd_dev->fd_file = NULL; | ||
259 | } | ||
260 | |||
261 | kfree(fd_dev); | ||
262 | } | ||
263 | |||
264 | static inline struct fd_request *FILE_REQ(struct se_task *task) | ||
265 | { | ||
266 | return container_of(task, struct fd_request, fd_task); | ||
267 | } | ||
268 | |||
269 | |||
270 | static struct se_task * | ||
271 | fd_alloc_task(struct se_cmd *cmd) | ||
272 | { | ||
273 | struct fd_request *fd_req; | ||
274 | |||
275 | fd_req = kzalloc(sizeof(struct fd_request), GFP_KERNEL); | ||
276 | if (!(fd_req)) { | ||
277 | printk(KERN_ERR "Unable to allocate struct fd_request\n"); | ||
278 | return NULL; | ||
279 | } | ||
280 | |||
281 | fd_req->fd_dev = SE_DEV(cmd)->dev_ptr; | ||
282 | |||
283 | return &fd_req->fd_task; | ||
284 | } | ||
285 | |||
286 | static int fd_do_readv(struct se_task *task) | ||
287 | { | ||
288 | struct fd_request *req = FILE_REQ(task); | ||
289 | struct file *fd = req->fd_dev->fd_file; | ||
290 | struct scatterlist *sg = task->task_sg; | ||
291 | struct iovec *iov; | ||
292 | mm_segment_t old_fs; | ||
293 | loff_t pos = (task->task_lba * DEV_ATTRIB(task->se_dev)->block_size); | ||
294 | int ret = 0, i; | ||
295 | |||
296 | iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL); | ||
297 | if (!(iov)) { | ||
298 | printk(KERN_ERR "Unable to allocate fd_do_readv iov[]\n"); | ||
299 | return -1; | ||
300 | } | ||
301 | |||
302 | for (i = 0; i < task->task_sg_num; i++) { | ||
303 | iov[i].iov_len = sg[i].length; | ||
304 | iov[i].iov_base = sg_virt(&sg[i]); | ||
305 | } | ||
306 | |||
307 | old_fs = get_fs(); | ||
308 | set_fs(get_ds()); | ||
309 | ret = vfs_readv(fd, &iov[0], task->task_sg_num, &pos); | ||
310 | set_fs(old_fs); | ||
311 | |||
312 | kfree(iov); | ||
313 | /* | ||
314 | * Return zeros and GOOD status even if the READ did not return | ||
315 | * the expected virt_size for struct file w/o a backing struct | ||
316 | * block_device. | ||
317 | */ | ||
318 | if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) { | ||
319 | if (ret < 0 || ret != task->task_size) { | ||
320 | printk(KERN_ERR "vfs_readv() returned %d," | ||
321 | " expecting %d for S_ISBLK\n", ret, | ||
322 | (int)task->task_size); | ||
323 | return -1; | ||
324 | } | ||
325 | } else { | ||
326 | if (ret < 0) { | ||
327 | printk(KERN_ERR "vfs_readv() returned %d for non" | ||
328 | " S_ISBLK\n", ret); | ||
329 | return -1; | ||
330 | } | ||
331 | } | ||
332 | |||
333 | return 1; | ||
334 | } | ||
335 | |||
336 | static int fd_do_writev(struct se_task *task) | ||
337 | { | ||
338 | struct fd_request *req = FILE_REQ(task); | ||
339 | struct file *fd = req->fd_dev->fd_file; | ||
340 | struct scatterlist *sg = task->task_sg; | ||
341 | struct iovec *iov; | ||
342 | mm_segment_t old_fs; | ||
343 | loff_t pos = (task->task_lba * DEV_ATTRIB(task->se_dev)->block_size); | ||
344 | int ret, i = 0; | ||
345 | |||
346 | iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL); | ||
347 | if (!(iov)) { | ||
348 | printk(KERN_ERR "Unable to allocate fd_do_writev iov[]\n"); | ||
349 | return -1; | ||
350 | } | ||
351 | |||
352 | for (i = 0; i < task->task_sg_num; i++) { | ||
353 | iov[i].iov_len = sg[i].length; | ||
354 | iov[i].iov_base = sg_virt(&sg[i]); | ||
355 | } | ||
356 | |||
357 | old_fs = get_fs(); | ||
358 | set_fs(get_ds()); | ||
359 | ret = vfs_writev(fd, &iov[0], task->task_sg_num, &pos); | ||
360 | set_fs(old_fs); | ||
361 | |||
362 | kfree(iov); | ||
363 | |||
364 | if (ret < 0 || ret != task->task_size) { | ||
365 | printk(KERN_ERR "vfs_writev() returned %d\n", ret); | ||
366 | return -1; | ||
367 | } | ||
368 | |||
369 | return 1; | ||
370 | } | ||
371 | |||
372 | static void fd_emulate_sync_cache(struct se_task *task) | ||
373 | { | ||
374 | struct se_cmd *cmd = TASK_CMD(task); | ||
375 | struct se_device *dev = cmd->se_dev; | ||
376 | struct fd_dev *fd_dev = dev->dev_ptr; | ||
377 | int immed = (cmd->t_task->t_task_cdb[1] & 0x2); | ||
378 | loff_t start, end; | ||
379 | int ret; | ||
380 | |||
381 | /* | ||
382 | * If the Immediate bit is set, queue up the GOOD response | ||
383 | * for this SYNCHRONIZE_CACHE op | ||
384 | */ | ||
385 | if (immed) | ||
386 | transport_complete_sync_cache(cmd, 1); | ||
387 | |||
388 | /* | ||
389 | * Determine if we will be flushing the entire device. | ||
390 | */ | ||
391 | if (cmd->t_task->t_task_lba == 0 && cmd->data_length == 0) { | ||
392 | start = 0; | ||
393 | end = LLONG_MAX; | ||
394 | } else { | ||
395 | start = cmd->t_task->t_task_lba * DEV_ATTRIB(dev)->block_size; | ||
396 | if (cmd->data_length) | ||
397 | end = start + cmd->data_length; | ||
398 | else | ||
399 | end = LLONG_MAX; | ||
400 | } | ||
401 | |||
402 | ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1); | ||
403 | if (ret != 0) | ||
404 | printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret); | ||
405 | |||
406 | if (!immed) | ||
407 | transport_complete_sync_cache(cmd, ret == 0); | ||
408 | } | ||
409 | |||
410 | /* | ||
411 | * Tell TCM Core that we are capable of WriteCache emulation for | ||
412 | * an underlying struct se_device. | ||
413 | */ | ||
414 | static int fd_emulated_write_cache(struct se_device *dev) | ||
415 | { | ||
416 | return 1; | ||
417 | } | ||
418 | |||
419 | static int fd_emulated_dpo(struct se_device *dev) | ||
420 | { | ||
421 | return 0; | ||
422 | } | ||
423 | /* | ||
424 | * Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs | ||
425 | * for TYPE_DISK. | ||
426 | */ | ||
427 | static int fd_emulated_fua_write(struct se_device *dev) | ||
428 | { | ||
429 | return 1; | ||
430 | } | ||
431 | |||
432 | static int fd_emulated_fua_read(struct se_device *dev) | ||
433 | { | ||
434 | return 0; | ||
435 | } | ||
436 | |||
437 | /* | ||
438 | * WRITE Force Unit Access (FUA) emulation on a per struct se_task | ||
439 | * LBA range basis.. | ||
440 | */ | ||
441 | static void fd_emulate_write_fua(struct se_cmd *cmd, struct se_task *task) | ||
442 | { | ||
443 | struct se_device *dev = cmd->se_dev; | ||
444 | struct fd_dev *fd_dev = dev->dev_ptr; | ||
445 | loff_t start = task->task_lba * DEV_ATTRIB(dev)->block_size; | ||
446 | loff_t end = start + task->task_size; | ||
447 | int ret; | ||
448 | |||
449 | DEBUG_FD_CACHE("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n", | ||
450 | task->task_lba, task->task_size); | ||
451 | |||
452 | ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1); | ||
453 | if (ret != 0) | ||
454 | printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret); | ||
455 | } | ||
456 | |||
457 | static int fd_do_task(struct se_task *task) | ||
458 | { | ||
459 | struct se_cmd *cmd = task->task_se_cmd; | ||
460 | struct se_device *dev = cmd->se_dev; | ||
461 | int ret = 0; | ||
462 | |||
463 | /* | ||
464 | * Call vectorized fileio functions to map struct scatterlist | ||
465 | * physical memory addresses to struct iovec virtual memory. | ||
466 | */ | ||
467 | if (task->task_data_direction == DMA_FROM_DEVICE) { | ||
468 | ret = fd_do_readv(task); | ||
469 | } else { | ||
470 | ret = fd_do_writev(task); | ||
471 | |||
472 | if (ret > 0 && | ||
473 | DEV_ATTRIB(dev)->emulate_write_cache > 0 && | ||
474 | DEV_ATTRIB(dev)->emulate_fua_write > 0 && | ||
475 | T_TASK(cmd)->t_tasks_fua) { | ||
476 | /* | ||
477 | * We might need to be a bit smarter here | ||
478 | * and return some sense data to let the initiator | ||
479 | * know the FUA WRITE cache sync failed..? | ||
480 | */ | ||
481 | fd_emulate_write_fua(cmd, task); | ||
482 | } | ||
483 | |||
484 | } | ||
485 | |||
486 | if (ret < 0) | ||
487 | return ret; | ||
488 | if (ret) { | ||
489 | task->task_scsi_status = GOOD; | ||
490 | transport_complete_task(task, 1); | ||
491 | } | ||
492 | return PYX_TRANSPORT_SENT_TO_TRANSPORT; | ||
493 | } | ||
494 | |||
495 | /* fd_free_task(): (Part of se_subsystem_api_t template) | ||
496 | * | ||
497 | * | ||
498 | */ | ||
499 | static void fd_free_task(struct se_task *task) | ||
500 | { | ||
501 | struct fd_request *req = FILE_REQ(task); | ||
502 | |||
503 | kfree(req); | ||
504 | } | ||
505 | |||
506 | enum { | ||
507 | Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err | ||
508 | }; | ||
509 | |||
510 | static match_table_t tokens = { | ||
511 | {Opt_fd_dev_name, "fd_dev_name=%s"}, | ||
512 | {Opt_fd_dev_size, "fd_dev_size=%s"}, | ||
513 | {Opt_fd_buffered_io, "fd_buffered_id=%d"}, | ||
514 | {Opt_err, NULL} | ||
515 | }; | ||
516 | |||
517 | static ssize_t fd_set_configfs_dev_params( | ||
518 | struct se_hba *hba, | ||
519 | struct se_subsystem_dev *se_dev, | ||
520 | const char *page, ssize_t count) | ||
521 | { | ||
522 | struct fd_dev *fd_dev = se_dev->se_dev_su_ptr; | ||
523 | char *orig, *ptr, *arg_p, *opts; | ||
524 | substring_t args[MAX_OPT_ARGS]; | ||
525 | int ret = 0, arg, token; | ||
526 | |||
527 | opts = kstrdup(page, GFP_KERNEL); | ||
528 | if (!opts) | ||
529 | return -ENOMEM; | ||
530 | |||
531 | orig = opts; | ||
532 | |||
533 | while ((ptr = strsep(&opts, ",")) != NULL) { | ||
534 | if (!*ptr) | ||
535 | continue; | ||
536 | |||
537 | token = match_token(ptr, tokens, args); | ||
538 | switch (token) { | ||
539 | case Opt_fd_dev_name: | ||
540 | snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME, | ||
541 | "%s", match_strdup(&args[0])); | ||
542 | printk(KERN_INFO "FILEIO: Referencing Path: %s\n", | ||
543 | fd_dev->fd_dev_name); | ||
544 | fd_dev->fbd_flags |= FBDF_HAS_PATH; | ||
545 | break; | ||
546 | case Opt_fd_dev_size: | ||
547 | arg_p = match_strdup(&args[0]); | ||
548 | ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size); | ||
549 | if (ret < 0) { | ||
550 | printk(KERN_ERR "strict_strtoull() failed for" | ||
551 | " fd_dev_size=\n"); | ||
552 | goto out; | ||
553 | } | ||
554 | printk(KERN_INFO "FILEIO: Referencing Size: %llu" | ||
555 | " bytes\n", fd_dev->fd_dev_size); | ||
556 | fd_dev->fbd_flags |= FBDF_HAS_SIZE; | ||
557 | break; | ||
558 | case Opt_fd_buffered_io: | ||
559 | match_int(args, &arg); | ||
560 | if (arg != 1) { | ||
561 | printk(KERN_ERR "bogus fd_buffered_io=%d value\n", arg); | ||
562 | ret = -EINVAL; | ||
563 | goto out; | ||
564 | } | ||
565 | |||
566 | printk(KERN_INFO "FILEIO: Using buffered I/O" | ||
567 | " operations for struct fd_dev\n"); | ||
568 | |||
569 | fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO; | ||
570 | break; | ||
571 | default: | ||
572 | break; | ||
573 | } | ||
574 | } | ||
575 | |||
576 | out: | ||
577 | kfree(orig); | ||
578 | return (!ret) ? count : ret; | ||
579 | } | ||
580 | |||
581 | static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev) | ||
582 | { | ||
583 | struct fd_dev *fd_dev = (struct fd_dev *) se_dev->se_dev_su_ptr; | ||
584 | |||
585 | if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) { | ||
586 | printk(KERN_ERR "Missing fd_dev_name=\n"); | ||
587 | return -1; | ||
588 | } | ||
589 | |||
590 | return 0; | ||
591 | } | ||
592 | |||
593 | static ssize_t fd_show_configfs_dev_params( | ||
594 | struct se_hba *hba, | ||
595 | struct se_subsystem_dev *se_dev, | ||
596 | char *b) | ||
597 | { | ||
598 | struct fd_dev *fd_dev = se_dev->se_dev_su_ptr; | ||
599 | ssize_t bl = 0; | ||
600 | |||
601 | bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id); | ||
602 | bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s\n", | ||
603 | fd_dev->fd_dev_name, fd_dev->fd_dev_size, | ||
604 | (fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO) ? | ||
605 | "Buffered" : "Synchronous"); | ||
606 | return bl; | ||
607 | } | ||
608 | |||
609 | /* fd_get_cdb(): (Part of se_subsystem_api_t template) | ||
610 | * | ||
611 | * | ||
612 | */ | ||
613 | static unsigned char *fd_get_cdb(struct se_task *task) | ||
614 | { | ||
615 | struct fd_request *req = FILE_REQ(task); | ||
616 | |||
617 | return req->fd_scsi_cdb; | ||
618 | } | ||
619 | |||
620 | /* fd_get_device_rev(): (Part of se_subsystem_api_t template) | ||
621 | * | ||
622 | * | ||
623 | */ | ||
624 | static u32 fd_get_device_rev(struct se_device *dev) | ||
625 | { | ||
626 | return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */ | ||
627 | } | ||
628 | |||
629 | /* fd_get_device_type(): (Part of se_subsystem_api_t template) | ||
630 | * | ||
631 | * | ||
632 | */ | ||
633 | static u32 fd_get_device_type(struct se_device *dev) | ||
634 | { | ||
635 | return TYPE_DISK; | ||
636 | } | ||
637 | |||
638 | static sector_t fd_get_blocks(struct se_device *dev) | ||
639 | { | ||
640 | struct fd_dev *fd_dev = dev->dev_ptr; | ||
641 | unsigned long long blocks_long = div_u64(fd_dev->fd_dev_size, | ||
642 | DEV_ATTRIB(dev)->block_size); | ||
643 | |||
644 | return blocks_long; | ||
645 | } | ||
646 | |||
647 | static struct se_subsystem_api fileio_template = { | ||
648 | .name = "fileio", | ||
649 | .owner = THIS_MODULE, | ||
650 | .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, | ||
651 | .attach_hba = fd_attach_hba, | ||
652 | .detach_hba = fd_detach_hba, | ||
653 | .allocate_virtdevice = fd_allocate_virtdevice, | ||
654 | .create_virtdevice = fd_create_virtdevice, | ||
655 | .free_device = fd_free_device, | ||
656 | .dpo_emulated = fd_emulated_dpo, | ||
657 | .fua_write_emulated = fd_emulated_fua_write, | ||
658 | .fua_read_emulated = fd_emulated_fua_read, | ||
659 | .write_cache_emulated = fd_emulated_write_cache, | ||
660 | .alloc_task = fd_alloc_task, | ||
661 | .do_task = fd_do_task, | ||
662 | .do_sync_cache = fd_emulate_sync_cache, | ||
663 | .free_task = fd_free_task, | ||
664 | .check_configfs_dev_params = fd_check_configfs_dev_params, | ||
665 | .set_configfs_dev_params = fd_set_configfs_dev_params, | ||
666 | .show_configfs_dev_params = fd_show_configfs_dev_params, | ||
667 | .get_cdb = fd_get_cdb, | ||
668 | .get_device_rev = fd_get_device_rev, | ||
669 | .get_device_type = fd_get_device_type, | ||
670 | .get_blocks = fd_get_blocks, | ||
671 | }; | ||
672 | |||
673 | static int __init fileio_module_init(void) | ||
674 | { | ||
675 | return transport_subsystem_register(&fileio_template); | ||
676 | } | ||
677 | |||
678 | static void fileio_module_exit(void) | ||
679 | { | ||
680 | transport_subsystem_release(&fileio_template); | ||
681 | } | ||
682 | |||
683 | MODULE_DESCRIPTION("TCM FILEIO subsystem plugin"); | ||
684 | MODULE_AUTHOR("nab@Linux-iSCSI.org"); | ||
685 | MODULE_LICENSE("GPL"); | ||
686 | |||
687 | module_init(fileio_module_init); | ||
688 | module_exit(fileio_module_exit); | ||
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h new file mode 100644 index 000000000000..ef4de2b4bd46 --- /dev/null +++ b/drivers/target/target_core_file.h | |||
@@ -0,0 +1,50 @@ | |||
1 | #ifndef TARGET_CORE_FILE_H | ||
2 | #define TARGET_CORE_FILE_H | ||
3 | |||
4 | #define FD_VERSION "4.0" | ||
5 | |||
6 | #define FD_MAX_DEV_NAME 256 | ||
7 | /* Maximum queuedepth for the FILEIO HBA */ | ||
8 | #define FD_HBA_QUEUE_DEPTH 256 | ||
9 | #define FD_DEVICE_QUEUE_DEPTH 32 | ||
10 | #define FD_MAX_DEVICE_QUEUE_DEPTH 128 | ||
11 | #define FD_BLOCKSIZE 512 | ||
12 | #define FD_MAX_SECTORS 1024 | ||
13 | |||
14 | #define RRF_EMULATE_CDB 0x01 | ||
15 | #define RRF_GOT_LBA 0x02 | ||
16 | |||
17 | struct fd_request { | ||
18 | struct se_task fd_task; | ||
19 | /* SCSI CDB from iSCSI Command PDU */ | ||
20 | unsigned char fd_scsi_cdb[TCM_MAX_COMMAND_SIZE]; | ||
21 | /* FILEIO device */ | ||
22 | struct fd_dev *fd_dev; | ||
23 | } ____cacheline_aligned; | ||
24 | |||
25 | #define FBDF_HAS_PATH 0x01 | ||
26 | #define FBDF_HAS_SIZE 0x02 | ||
27 | #define FDBD_USE_BUFFERED_IO 0x04 | ||
28 | |||
29 | struct fd_dev { | ||
30 | u32 fbd_flags; | ||
31 | unsigned char fd_dev_name[FD_MAX_DEV_NAME]; | ||
32 | /* Unique Ramdisk Device ID in Ramdisk HBA */ | ||
33 | u32 fd_dev_id; | ||
34 | /* Number of SG tables in sg_table_array */ | ||
35 | u32 fd_table_count; | ||
36 | u32 fd_queue_depth; | ||
37 | u32 fd_block_size; | ||
38 | unsigned long long fd_dev_size; | ||
39 | struct file *fd_file; | ||
40 | /* FILEIO HBA device is connected to */ | ||
41 | struct fd_host *fd_host; | ||
42 | } ____cacheline_aligned; | ||
43 | |||
44 | struct fd_host { | ||
45 | u32 fd_host_dev_id_count; | ||
46 | /* Unique FILEIO Host ID */ | ||
47 | u32 fd_host_id; | ||
48 | } ____cacheline_aligned; | ||
49 | |||
50 | #endif /* TARGET_CORE_FILE_H */ | ||
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c new file mode 100644 index 000000000000..4bbe8208b241 --- /dev/null +++ b/drivers/target/target_core_hba.c | |||
@@ -0,0 +1,185 @@ | |||
1 | /******************************************************************************* | ||
2 | * Filename: target_core_hba.c | ||
3 | * | ||
4 | * This file copntains the iSCSI HBA Transport related functions. | ||
5 | * | ||
6 | * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. | ||
7 | * Copyright (c) 2005, 2006, 2007 SBE, Inc. | ||
8 | * Copyright (c) 2007-2010 Rising Tide Systems | ||
9 | * Copyright (c) 2008-2010 Linux-iSCSI.org | ||
10 | * | ||
11 | * Nicholas A. Bellinger <nab@kernel.org> | ||
12 | * | ||
13 | * This program is free software; you can redistribute it and/or modify | ||
14 | * it under the terms of the GNU General Public License as published by | ||
15 | * the Free Software Foundation; either version 2 of the License, or | ||
16 | * (at your option) any later version. | ||
17 | * | ||
18 | * This program is distributed in the hope that it will be useful, | ||
19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
21 | * GNU General Public License for more details. | ||
22 | * | ||
23 | * You should have received a copy of the GNU General Public License | ||
24 | * along with this program; if not, write to the Free Software | ||
25 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
26 | * | ||
27 | ******************************************************************************/ | ||
28 | |||
29 | #include <linux/net.h> | ||
30 | #include <linux/string.h> | ||
31 | #include <linux/timer.h> | ||
32 | #include <linux/slab.h> | ||
33 | #include <linux/spinlock.h> | ||
34 | #include <linux/smp_lock.h> | ||
35 | #include <linux/in.h> | ||
36 | #include <net/sock.h> | ||
37 | #include <net/tcp.h> | ||
38 | |||
39 | #include <target/target_core_base.h> | ||
40 | #include <target/target_core_device.h> | ||
41 | #include <target/target_core_device.h> | ||
42 | #include <target/target_core_tpg.h> | ||
43 | #include <target/target_core_transport.h> | ||
44 | |||
45 | #include "target_core_hba.h" | ||
46 | |||
47 | static LIST_HEAD(subsystem_list); | ||
48 | static DEFINE_MUTEX(subsystem_mutex); | ||
49 | |||
50 | int transport_subsystem_register(struct se_subsystem_api *sub_api) | ||
51 | { | ||
52 | struct se_subsystem_api *s; | ||
53 | |||
54 | INIT_LIST_HEAD(&sub_api->sub_api_list); | ||
55 | |||
56 | mutex_lock(&subsystem_mutex); | ||
57 | list_for_each_entry(s, &subsystem_list, sub_api_list) { | ||
58 | if (!(strcmp(s->name, sub_api->name))) { | ||
59 | printk(KERN_ERR "%p is already registered with" | ||
60 | " duplicate name %s, unable to process" | ||
61 | " request\n", s, s->name); | ||
62 | mutex_unlock(&subsystem_mutex); | ||
63 | return -EEXIST; | ||
64 | } | ||
65 | } | ||
66 | list_add_tail(&sub_api->sub_api_list, &subsystem_list); | ||
67 | mutex_unlock(&subsystem_mutex); | ||
68 | |||
69 | printk(KERN_INFO "TCM: Registered subsystem plugin: %s struct module:" | ||
70 | " %p\n", sub_api->name, sub_api->owner); | ||
71 | return 0; | ||
72 | } | ||
73 | EXPORT_SYMBOL(transport_subsystem_register); | ||
74 | |||
75 | void transport_subsystem_release(struct se_subsystem_api *sub_api) | ||
76 | { | ||
77 | mutex_lock(&subsystem_mutex); | ||
78 | list_del(&sub_api->sub_api_list); | ||
79 | mutex_unlock(&subsystem_mutex); | ||
80 | } | ||
81 | EXPORT_SYMBOL(transport_subsystem_release); | ||
82 | |||
83 | static struct se_subsystem_api *core_get_backend(const char *sub_name) | ||
84 | { | ||
85 | struct se_subsystem_api *s; | ||
86 | |||
87 | mutex_lock(&subsystem_mutex); | ||
88 | list_for_each_entry(s, &subsystem_list, sub_api_list) { | ||
89 | if (!strcmp(s->name, sub_name)) | ||
90 | goto found; | ||
91 | } | ||
92 | mutex_unlock(&subsystem_mutex); | ||
93 | return NULL; | ||
94 | found: | ||
95 | if (s->owner && !try_module_get(s->owner)) | ||
96 | s = NULL; | ||
97 | mutex_unlock(&subsystem_mutex); | ||
98 | return s; | ||
99 | } | ||
100 | |||
101 | struct se_hba * | ||
102 | core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags) | ||
103 | { | ||
104 | struct se_hba *hba; | ||
105 | int ret = 0; | ||
106 | |||
107 | hba = kzalloc(sizeof(*hba), GFP_KERNEL); | ||
108 | if (!hba) { | ||
109 | printk(KERN_ERR "Unable to allocate struct se_hba\n"); | ||
110 | return ERR_PTR(-ENOMEM); | ||
111 | } | ||
112 | |||
113 | INIT_LIST_HEAD(&hba->hba_dev_list); | ||
114 | spin_lock_init(&hba->device_lock); | ||
115 | spin_lock_init(&hba->hba_queue_lock); | ||
116 | mutex_init(&hba->hba_access_mutex); | ||
117 | |||
118 | hba->hba_index = scsi_get_new_index(SCSI_INST_INDEX); | ||
119 | hba->hba_flags |= hba_flags; | ||
120 | |||
121 | atomic_set(&hba->max_queue_depth, 0); | ||
122 | atomic_set(&hba->left_queue_depth, 0); | ||
123 | |||
124 | hba->transport = core_get_backend(plugin_name); | ||
125 | if (!hba->transport) { | ||
126 | ret = -EINVAL; | ||
127 | goto out_free_hba; | ||
128 | } | ||
129 | |||
130 | ret = hba->transport->attach_hba(hba, plugin_dep_id); | ||
131 | if (ret < 0) | ||
132 | goto out_module_put; | ||
133 | |||
134 | spin_lock(&se_global->hba_lock); | ||
135 | hba->hba_id = se_global->g_hba_id_counter++; | ||
136 | list_add_tail(&hba->hba_list, &se_global->g_hba_list); | ||
137 | spin_unlock(&se_global->hba_lock); | ||
138 | |||
139 | printk(KERN_INFO "CORE_HBA[%d] - Attached HBA to Generic Target" | ||
140 | " Core\n", hba->hba_id); | ||
141 | |||
142 | return hba; | ||
143 | |||
144 | out_module_put: | ||
145 | if (hba->transport->owner) | ||
146 | module_put(hba->transport->owner); | ||
147 | hba->transport = NULL; | ||
148 | out_free_hba: | ||
149 | kfree(hba); | ||
150 | return ERR_PTR(ret); | ||
151 | } | ||
152 | |||
153 | int | ||
154 | core_delete_hba(struct se_hba *hba) | ||
155 | { | ||
156 | struct se_device *dev, *dev_tmp; | ||
157 | |||
158 | spin_lock(&hba->device_lock); | ||
159 | list_for_each_entry_safe(dev, dev_tmp, &hba->hba_dev_list, dev_list) { | ||
160 | |||
161 | se_clear_dev_ports(dev); | ||
162 | spin_unlock(&hba->device_lock); | ||
163 | |||
164 | se_release_device_for_hba(dev); | ||
165 | |||
166 | spin_lock(&hba->device_lock); | ||
167 | } | ||
168 | spin_unlock(&hba->device_lock); | ||
169 | |||
170 | hba->transport->detach_hba(hba); | ||
171 | |||
172 | spin_lock(&se_global->hba_lock); | ||
173 | list_del(&hba->hba_list); | ||
174 | spin_unlock(&se_global->hba_lock); | ||
175 | |||
176 | printk(KERN_INFO "CORE_HBA[%d] - Detached HBA from Generic Target" | ||
177 | " Core\n", hba->hba_id); | ||
178 | |||
179 | if (hba->transport->owner) | ||
180 | module_put(hba->transport->owner); | ||
181 | |||
182 | hba->transport = NULL; | ||
183 | kfree(hba); | ||
184 | return 0; | ||
185 | } | ||
diff --git a/drivers/target/target_core_hba.h b/drivers/target/target_core_hba.h new file mode 100644 index 000000000000..bb0fea5f730c --- /dev/null +++ b/drivers/target/target_core_hba.h | |||
@@ -0,0 +1,7 @@ | |||
1 | #ifndef TARGET_CORE_HBA_H | ||
2 | #define TARGET_CORE_HBA_H | ||
3 | |||
4 | extern struct se_hba *core_alloc_hba(const char *, u32, u32); | ||
5 | extern int core_delete_hba(struct se_hba *); | ||
6 | |||
7 | #endif /* TARGET_CORE_HBA_H */ | ||
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c new file mode 100644 index 000000000000..c6e0d757e76e --- /dev/null +++ b/drivers/target/target_core_iblock.c | |||
@@ -0,0 +1,808 @@ | |||
1 | /******************************************************************************* | ||
2 | * Filename: target_core_iblock.c | ||
3 | * | ||
4 | * This file contains the Storage Engine <-> Linux BlockIO transport | ||
5 | * specific functions. | ||
6 | * | ||
7 | * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. | ||
8 | * Copyright (c) 2005, 2006, 2007 SBE, Inc. | ||
9 | * Copyright (c) 2007-2010 Rising Tide Systems | ||
10 | * Copyright (c) 2008-2010 Linux-iSCSI.org | ||
11 | * | ||
12 | * Nicholas A. Bellinger <nab@kernel.org> | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or modify | ||
15 | * it under the terms of the GNU General Public License as published by | ||
16 | * the Free Software Foundation; either version 2 of the License, or | ||
17 | * (at your option) any later version. | ||
18 | * | ||
19 | * This program is distributed in the hope that it will be useful, | ||
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
22 | * GNU General Public License for more details. | ||
23 | * | ||
24 | * You should have received a copy of the GNU General Public License | ||
25 | * along with this program; if not, write to the Free Software | ||
26 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
27 | * | ||
28 | ******************************************************************************/ | ||
29 | |||
30 | #include <linux/version.h> | ||
31 | #include <linux/string.h> | ||
32 | #include <linux/parser.h> | ||
33 | #include <linux/timer.h> | ||
34 | #include <linux/fs.h> | ||
35 | #include <linux/blkdev.h> | ||
36 | #include <linux/slab.h> | ||
37 | #include <linux/spinlock.h> | ||
38 | #include <linux/smp_lock.h> | ||
39 | #include <linux/bio.h> | ||
40 | #include <linux/genhd.h> | ||
41 | #include <linux/file.h> | ||
42 | #include <scsi/scsi.h> | ||
43 | #include <scsi/scsi_host.h> | ||
44 | |||
45 | #include <target/target_core_base.h> | ||
46 | #include <target/target_core_device.h> | ||
47 | #include <target/target_core_transport.h> | ||
48 | |||
49 | #include "target_core_iblock.h" | ||
50 | |||
51 | #if 0 | ||
52 | #define DEBUG_IBLOCK(x...) printk(x) | ||
53 | #else | ||
54 | #define DEBUG_IBLOCK(x...) | ||
55 | #endif | ||
56 | |||
57 | static struct se_subsystem_api iblock_template; | ||
58 | |||
59 | static void iblock_bio_done(struct bio *, int); | ||
60 | |||
61 | /* iblock_attach_hba(): (Part of se_subsystem_api_t template) | ||
62 | * | ||
63 | * | ||
64 | */ | ||
65 | static int iblock_attach_hba(struct se_hba *hba, u32 host_id) | ||
66 | { | ||
67 | struct iblock_hba *ib_host; | ||
68 | |||
69 | ib_host = kzalloc(sizeof(struct iblock_hba), GFP_KERNEL); | ||
70 | if (!(ib_host)) { | ||
71 | printk(KERN_ERR "Unable to allocate memory for" | ||
72 | " struct iblock_hba\n"); | ||
73 | return -ENOMEM; | ||
74 | } | ||
75 | |||
76 | ib_host->iblock_host_id = host_id; | ||
77 | |||
78 | atomic_set(&hba->left_queue_depth, IBLOCK_HBA_QUEUE_DEPTH); | ||
79 | atomic_set(&hba->max_queue_depth, IBLOCK_HBA_QUEUE_DEPTH); | ||
80 | hba->hba_ptr = (void *) ib_host; | ||
81 | |||
82 | printk(KERN_INFO "CORE_HBA[%d] - TCM iBlock HBA Driver %s on" | ||
83 | " Generic Target Core Stack %s\n", hba->hba_id, | ||
84 | IBLOCK_VERSION, TARGET_CORE_MOD_VERSION); | ||
85 | |||
86 | printk(KERN_INFO "CORE_HBA[%d] - Attached iBlock HBA: %u to Generic" | ||
87 | " Target Core TCQ Depth: %d\n", hba->hba_id, | ||
88 | ib_host->iblock_host_id, atomic_read(&hba->max_queue_depth)); | ||
89 | |||
90 | return 0; | ||
91 | } | ||
92 | |||
93 | static void iblock_detach_hba(struct se_hba *hba) | ||
94 | { | ||
95 | struct iblock_hba *ib_host = hba->hba_ptr; | ||
96 | |||
97 | printk(KERN_INFO "CORE_HBA[%d] - Detached iBlock HBA: %u from Generic" | ||
98 | " Target Core\n", hba->hba_id, ib_host->iblock_host_id); | ||
99 | |||
100 | kfree(ib_host); | ||
101 | hba->hba_ptr = NULL; | ||
102 | } | ||
103 | |||
104 | static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name) | ||
105 | { | ||
106 | struct iblock_dev *ib_dev = NULL; | ||
107 | struct iblock_hba *ib_host = hba->hba_ptr; | ||
108 | |||
109 | ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL); | ||
110 | if (!(ib_dev)) { | ||
111 | printk(KERN_ERR "Unable to allocate struct iblock_dev\n"); | ||
112 | return NULL; | ||
113 | } | ||
114 | ib_dev->ibd_host = ib_host; | ||
115 | |||
116 | printk(KERN_INFO "IBLOCK: Allocated ib_dev for %s\n", name); | ||
117 | |||
118 | return ib_dev; | ||
119 | } | ||
120 | |||
121 | static struct se_device *iblock_create_virtdevice( | ||
122 | struct se_hba *hba, | ||
123 | struct se_subsystem_dev *se_dev, | ||
124 | void *p) | ||
125 | { | ||
126 | struct iblock_dev *ib_dev = p; | ||
127 | struct se_device *dev; | ||
128 | struct se_dev_limits dev_limits; | ||
129 | struct block_device *bd = NULL; | ||
130 | struct request_queue *q; | ||
131 | struct queue_limits *limits; | ||
132 | u32 dev_flags = 0; | ||
133 | |||
134 | if (!(ib_dev)) { | ||
135 | printk(KERN_ERR "Unable to locate struct iblock_dev parameter\n"); | ||
136 | return 0; | ||
137 | } | ||
138 | memset(&dev_limits, 0, sizeof(struct se_dev_limits)); | ||
139 | /* | ||
140 | * These settings need to be made tunable.. | ||
141 | */ | ||
142 | ib_dev->ibd_bio_set = bioset_create(32, 64); | ||
143 | if (!(ib_dev->ibd_bio_set)) { | ||
144 | printk(KERN_ERR "IBLOCK: Unable to create bioset()\n"); | ||
145 | return 0; | ||
146 | } | ||
147 | printk(KERN_INFO "IBLOCK: Created bio_set()\n"); | ||
148 | /* | ||
149 | * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path | ||
150 | * must already have been set in order for echo 1 > $HBA/$DEV/enable to run. | ||
151 | */ | ||
152 | printk(KERN_INFO "IBLOCK: Claiming struct block_device: %s\n", | ||
153 | ib_dev->ibd_udev_path); | ||
154 | |||
155 | bd = blkdev_get_by_path(ib_dev->ibd_udev_path, | ||
156 | FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev); | ||
157 | if (!(bd)) | ||
158 | goto failed; | ||
159 | /* | ||
160 | * Setup the local scope queue_limits from struct request_queue->limits | ||
161 | * to pass into transport_add_device_to_core_hba() as struct se_dev_limits. | ||
162 | */ | ||
163 | q = bdev_get_queue(bd); | ||
164 | limits = &dev_limits.limits; | ||
165 | limits->logical_block_size = bdev_logical_block_size(bd); | ||
166 | limits->max_hw_sectors = queue_max_hw_sectors(q); | ||
167 | limits->max_sectors = queue_max_sectors(q); | ||
168 | dev_limits.hw_queue_depth = IBLOCK_MAX_DEVICE_QUEUE_DEPTH; | ||
169 | dev_limits.queue_depth = IBLOCK_DEVICE_QUEUE_DEPTH; | ||
170 | |||
171 | ib_dev->ibd_major = MAJOR(bd->bd_dev); | ||
172 | ib_dev->ibd_minor = MINOR(bd->bd_dev); | ||
173 | ib_dev->ibd_bd = bd; | ||
174 | |||
175 | dev = transport_add_device_to_core_hba(hba, | ||
176 | &iblock_template, se_dev, dev_flags, (void *)ib_dev, | ||
177 | &dev_limits, "IBLOCK", IBLOCK_VERSION); | ||
178 | if (!(dev)) | ||
179 | goto failed; | ||
180 | |||
181 | ib_dev->ibd_depth = dev->queue_depth; | ||
182 | |||
183 | /* | ||
184 | * Check if the underlying struct block_device request_queue supports | ||
185 | * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM | ||
186 | * in ATA and we need to set TPE=1 | ||
187 | */ | ||
188 | if (blk_queue_discard(bdev_get_queue(bd))) { | ||
189 | struct request_queue *q = bdev_get_queue(bd); | ||
190 | |||
191 | DEV_ATTRIB(dev)->max_unmap_lba_count = | ||
192 | q->limits.max_discard_sectors; | ||
193 | /* | ||
194 | * Currently hardcoded to 1 in Linux/SCSI code.. | ||
195 | */ | ||
196 | DEV_ATTRIB(dev)->max_unmap_block_desc_count = 1; | ||
197 | DEV_ATTRIB(dev)->unmap_granularity = | ||
198 | q->limits.discard_granularity; | ||
199 | DEV_ATTRIB(dev)->unmap_granularity_alignment = | ||
200 | q->limits.discard_alignment; | ||
201 | |||
202 | printk(KERN_INFO "IBLOCK: BLOCK Discard support available," | ||
203 | " disabled by default\n"); | ||
204 | } | ||
205 | |||
206 | return dev; | ||
207 | |||
208 | failed: | ||
209 | if (ib_dev->ibd_bio_set) { | ||
210 | bioset_free(ib_dev->ibd_bio_set); | ||
211 | ib_dev->ibd_bio_set = NULL; | ||
212 | } | ||
213 | ib_dev->ibd_bd = NULL; | ||
214 | ib_dev->ibd_major = 0; | ||
215 | ib_dev->ibd_minor = 0; | ||
216 | return NULL; | ||
217 | } | ||
218 | |||
219 | static void iblock_free_device(void *p) | ||
220 | { | ||
221 | struct iblock_dev *ib_dev = p; | ||
222 | |||
223 | blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); | ||
224 | bioset_free(ib_dev->ibd_bio_set); | ||
225 | kfree(ib_dev); | ||
226 | } | ||
227 | |||
228 | static inline struct iblock_req *IBLOCK_REQ(struct se_task *task) | ||
229 | { | ||
230 | return container_of(task, struct iblock_req, ib_task); | ||
231 | } | ||
232 | |||
233 | static struct se_task * | ||
234 | iblock_alloc_task(struct se_cmd *cmd) | ||
235 | { | ||
236 | struct iblock_req *ib_req; | ||
237 | |||
238 | ib_req = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); | ||
239 | if (!(ib_req)) { | ||
240 | printk(KERN_ERR "Unable to allocate memory for struct iblock_req\n"); | ||
241 | return NULL; | ||
242 | } | ||
243 | |||
244 | ib_req->ib_dev = SE_DEV(cmd)->dev_ptr; | ||
245 | atomic_set(&ib_req->ib_bio_cnt, 0); | ||
246 | return &ib_req->ib_task; | ||
247 | } | ||
248 | |||
249 | static unsigned long long iblock_emulate_read_cap_with_block_size( | ||
250 | struct se_device *dev, | ||
251 | struct block_device *bd, | ||
252 | struct request_queue *q) | ||
253 | { | ||
254 | unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode), | ||
255 | bdev_logical_block_size(bd)) - 1); | ||
256 | u32 block_size = bdev_logical_block_size(bd); | ||
257 | |||
258 | if (block_size == DEV_ATTRIB(dev)->block_size) | ||
259 | return blocks_long; | ||
260 | |||
261 | switch (block_size) { | ||
262 | case 4096: | ||
263 | switch (DEV_ATTRIB(dev)->block_size) { | ||
264 | case 2048: | ||
265 | blocks_long <<= 1; | ||
266 | break; | ||
267 | case 1024: | ||
268 | blocks_long <<= 2; | ||
269 | break; | ||
270 | case 512: | ||
271 | blocks_long <<= 3; | ||
272 | default: | ||
273 | break; | ||
274 | } | ||
275 | break; | ||
276 | case 2048: | ||
277 | switch (DEV_ATTRIB(dev)->block_size) { | ||
278 | case 4096: | ||
279 | blocks_long >>= 1; | ||
280 | break; | ||
281 | case 1024: | ||
282 | blocks_long <<= 1; | ||
283 | break; | ||
284 | case 512: | ||
285 | blocks_long <<= 2; | ||
286 | break; | ||
287 | default: | ||
288 | break; | ||
289 | } | ||
290 | break; | ||
291 | case 1024: | ||
292 | switch (DEV_ATTRIB(dev)->block_size) { | ||
293 | case 4096: | ||
294 | blocks_long >>= 2; | ||
295 | break; | ||
296 | case 2048: | ||
297 | blocks_long >>= 1; | ||
298 | break; | ||
299 | case 512: | ||
300 | blocks_long <<= 1; | ||
301 | break; | ||
302 | default: | ||
303 | break; | ||
304 | } | ||
305 | break; | ||
306 | case 512: | ||
307 | switch (DEV_ATTRIB(dev)->block_size) { | ||
308 | case 4096: | ||
309 | blocks_long >>= 3; | ||
310 | break; | ||
311 | case 2048: | ||
312 | blocks_long >>= 2; | ||
313 | break; | ||
314 | case 1024: | ||
315 | blocks_long >>= 1; | ||
316 | break; | ||
317 | default: | ||
318 | break; | ||
319 | } | ||
320 | break; | ||
321 | default: | ||
322 | break; | ||
323 | } | ||
324 | |||
325 | return blocks_long; | ||
326 | } | ||
327 | |||
328 | /* | ||
329 | * Emulate SYCHRONIZE_CACHE_* | ||
330 | */ | ||
331 | static void iblock_emulate_sync_cache(struct se_task *task) | ||
332 | { | ||
333 | struct se_cmd *cmd = TASK_CMD(task); | ||
334 | struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; | ||
335 | int immed = (T_TASK(cmd)->t_task_cdb[1] & 0x2); | ||
336 | sector_t error_sector; | ||
337 | int ret; | ||
338 | |||
339 | /* | ||
340 | * If the Immediate bit is set, queue up the GOOD response | ||
341 | * for this SYNCHRONIZE_CACHE op | ||
342 | */ | ||
343 | if (immed) | ||
344 | transport_complete_sync_cache(cmd, 1); | ||
345 | |||
346 | /* | ||
347 | * blkdev_issue_flush() does not support a specifying a range, so | ||
348 | * we have to flush the entire cache. | ||
349 | */ | ||
350 | ret = blkdev_issue_flush(ib_dev->ibd_bd, GFP_KERNEL, &error_sector); | ||
351 | if (ret != 0) { | ||
352 | printk(KERN_ERR "IBLOCK: block_issue_flush() failed: %d " | ||
353 | " error_sector: %llu\n", ret, | ||
354 | (unsigned long long)error_sector); | ||
355 | } | ||
356 | |||
357 | if (!immed) | ||
358 | transport_complete_sync_cache(cmd, ret == 0); | ||
359 | } | ||
360 | |||
361 | /* | ||
362 | * Tell TCM Core that we are capable of WriteCache emulation for | ||
363 | * an underlying struct se_device. | ||
364 | */ | ||
365 | static int iblock_emulated_write_cache(struct se_device *dev) | ||
366 | { | ||
367 | return 1; | ||
368 | } | ||
369 | |||
370 | static int iblock_emulated_dpo(struct se_device *dev) | ||
371 | { | ||
372 | return 0; | ||
373 | } | ||
374 | |||
375 | /* | ||
376 | * Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs | ||
377 | * for TYPE_DISK. | ||
378 | */ | ||
379 | static int iblock_emulated_fua_write(struct se_device *dev) | ||
380 | { | ||
381 | return 1; | ||
382 | } | ||
383 | |||
384 | static int iblock_emulated_fua_read(struct se_device *dev) | ||
385 | { | ||
386 | return 0; | ||
387 | } | ||
388 | |||
389 | static int iblock_do_task(struct se_task *task) | ||
390 | { | ||
391 | struct se_device *dev = task->task_se_cmd->se_dev; | ||
392 | struct iblock_req *req = IBLOCK_REQ(task); | ||
393 | struct iblock_dev *ibd = (struct iblock_dev *)req->ib_dev; | ||
394 | struct request_queue *q = bdev_get_queue(ibd->ibd_bd); | ||
395 | struct bio *bio = req->ib_bio, *nbio = NULL; | ||
396 | int rw; | ||
397 | |||
398 | if (task->task_data_direction == DMA_TO_DEVICE) { | ||
399 | /* | ||
400 | * Force data to disk if we pretend to not have a volatile | ||
401 | * write cache, or the initiator set the Force Unit Access bit. | ||
402 | */ | ||
403 | if (DEV_ATTRIB(dev)->emulate_write_cache == 0 || | ||
404 | (DEV_ATTRIB(dev)->emulate_fua_write > 0 && | ||
405 | T_TASK(task->task_se_cmd)->t_tasks_fua)) | ||
406 | rw = WRITE_FUA; | ||
407 | else | ||
408 | rw = WRITE; | ||
409 | } else { | ||
410 | rw = READ; | ||
411 | } | ||
412 | |||
413 | while (bio) { | ||
414 | nbio = bio->bi_next; | ||
415 | bio->bi_next = NULL; | ||
416 | DEBUG_IBLOCK("Calling submit_bio() task: %p bio: %p" | ||
417 | " bio->bi_sector: %llu\n", task, bio, bio->bi_sector); | ||
418 | |||
419 | submit_bio(rw, bio); | ||
420 | bio = nbio; | ||
421 | } | ||
422 | |||
423 | if (q->unplug_fn) | ||
424 | q->unplug_fn(q); | ||
425 | return PYX_TRANSPORT_SENT_TO_TRANSPORT; | ||
426 | } | ||
427 | |||
428 | static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range) | ||
429 | { | ||
430 | struct iblock_dev *ibd = dev->dev_ptr; | ||
431 | struct block_device *bd = ibd->ibd_bd; | ||
432 | int barrier = 0; | ||
433 | |||
434 | return blkdev_issue_discard(bd, lba, range, GFP_KERNEL, barrier); | ||
435 | } | ||
436 | |||
437 | static void iblock_free_task(struct se_task *task) | ||
438 | { | ||
439 | struct iblock_req *req = IBLOCK_REQ(task); | ||
440 | struct bio *bio, *hbio = req->ib_bio; | ||
441 | /* | ||
442 | * We only release the bio(s) here if iblock_bio_done() has not called | ||
443 | * bio_put() -> iblock_bio_destructor(). | ||
444 | */ | ||
445 | while (hbio != NULL) { | ||
446 | bio = hbio; | ||
447 | hbio = hbio->bi_next; | ||
448 | bio->bi_next = NULL; | ||
449 | bio_put(bio); | ||
450 | } | ||
451 | |||
452 | kfree(req); | ||
453 | } | ||
454 | |||
455 | enum { | ||
456 | Opt_udev_path, Opt_force, Opt_err | ||
457 | }; | ||
458 | |||
459 | static match_table_t tokens = { | ||
460 | {Opt_udev_path, "udev_path=%s"}, | ||
461 | {Opt_force, "force=%d"}, | ||
462 | {Opt_err, NULL} | ||
463 | }; | ||
464 | |||
465 | static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba, | ||
466 | struct se_subsystem_dev *se_dev, | ||
467 | const char *page, ssize_t count) | ||
468 | { | ||
469 | struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr; | ||
470 | char *orig, *ptr, *opts; | ||
471 | substring_t args[MAX_OPT_ARGS]; | ||
472 | int ret = 0, arg, token; | ||
473 | |||
474 | opts = kstrdup(page, GFP_KERNEL); | ||
475 | if (!opts) | ||
476 | return -ENOMEM; | ||
477 | |||
478 | orig = opts; | ||
479 | |||
480 | while ((ptr = strsep(&opts, ",")) != NULL) { | ||
481 | if (!*ptr) | ||
482 | continue; | ||
483 | |||
484 | token = match_token(ptr, tokens, args); | ||
485 | switch (token) { | ||
486 | case Opt_udev_path: | ||
487 | if (ib_dev->ibd_bd) { | ||
488 | printk(KERN_ERR "Unable to set udev_path= while" | ||
489 | " ib_dev->ibd_bd exists\n"); | ||
490 | ret = -EEXIST; | ||
491 | goto out; | ||
492 | } | ||
493 | |||
494 | ret = snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN, | ||
495 | "%s", match_strdup(&args[0])); | ||
496 | printk(KERN_INFO "IBLOCK: Referencing UDEV path: %s\n", | ||
497 | ib_dev->ibd_udev_path); | ||
498 | ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH; | ||
499 | break; | ||
500 | case Opt_force: | ||
501 | match_int(args, &arg); | ||
502 | ib_dev->ibd_force = arg; | ||
503 | printk(KERN_INFO "IBLOCK: Set force=%d\n", | ||
504 | ib_dev->ibd_force); | ||
505 | break; | ||
506 | default: | ||
507 | break; | ||
508 | } | ||
509 | } | ||
510 | |||
511 | out: | ||
512 | kfree(orig); | ||
513 | return (!ret) ? count : ret; | ||
514 | } | ||
515 | |||
516 | static ssize_t iblock_check_configfs_dev_params( | ||
517 | struct se_hba *hba, | ||
518 | struct se_subsystem_dev *se_dev) | ||
519 | { | ||
520 | struct iblock_dev *ibd = se_dev->se_dev_su_ptr; | ||
521 | |||
522 | if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) { | ||
523 | printk(KERN_ERR "Missing udev_path= parameters for IBLOCK\n"); | ||
524 | return -1; | ||
525 | } | ||
526 | |||
527 | return 0; | ||
528 | } | ||
529 | |||
530 | static ssize_t iblock_show_configfs_dev_params( | ||
531 | struct se_hba *hba, | ||
532 | struct se_subsystem_dev *se_dev, | ||
533 | char *b) | ||
534 | { | ||
535 | struct iblock_dev *ibd = se_dev->se_dev_su_ptr; | ||
536 | struct block_device *bd = ibd->ibd_bd; | ||
537 | char buf[BDEVNAME_SIZE]; | ||
538 | ssize_t bl = 0; | ||
539 | |||
540 | if (bd) | ||
541 | bl += sprintf(b + bl, "iBlock device: %s", | ||
542 | bdevname(bd, buf)); | ||
543 | if (ibd->ibd_flags & IBDF_HAS_UDEV_PATH) { | ||
544 | bl += sprintf(b + bl, " UDEV PATH: %s\n", | ||
545 | ibd->ibd_udev_path); | ||
546 | } else | ||
547 | bl += sprintf(b + bl, "\n"); | ||
548 | |||
549 | bl += sprintf(b + bl, " "); | ||
550 | if (bd) { | ||
551 | bl += sprintf(b + bl, "Major: %d Minor: %d %s\n", | ||
552 | ibd->ibd_major, ibd->ibd_minor, (!bd->bd_contains) ? | ||
553 | "" : (bd->bd_holder == (struct iblock_dev *)ibd) ? | ||
554 | "CLAIMED: IBLOCK" : "CLAIMED: OS"); | ||
555 | } else { | ||
556 | bl += sprintf(b + bl, "Major: %d Minor: %d\n", | ||
557 | ibd->ibd_major, ibd->ibd_minor); | ||
558 | } | ||
559 | |||
560 | return bl; | ||
561 | } | ||
562 | |||
563 | static void iblock_bio_destructor(struct bio *bio) | ||
564 | { | ||
565 | struct se_task *task = bio->bi_private; | ||
566 | struct iblock_dev *ib_dev = task->se_dev->dev_ptr; | ||
567 | |||
568 | bio_free(bio, ib_dev->ibd_bio_set); | ||
569 | } | ||
570 | |||
571 | static struct bio *iblock_get_bio( | ||
572 | struct se_task *task, | ||
573 | struct iblock_req *ib_req, | ||
574 | struct iblock_dev *ib_dev, | ||
575 | int *ret, | ||
576 | sector_t lba, | ||
577 | u32 sg_num) | ||
578 | { | ||
579 | struct bio *bio; | ||
580 | |||
581 | bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set); | ||
582 | if (!(bio)) { | ||
583 | printk(KERN_ERR "Unable to allocate memory for bio\n"); | ||
584 | *ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; | ||
585 | return NULL; | ||
586 | } | ||
587 | |||
588 | DEBUG_IBLOCK("Allocated bio: %p task_sg_num: %u using ibd_bio_set:" | ||
589 | " %p\n", bio, task->task_sg_num, ib_dev->ibd_bio_set); | ||
590 | DEBUG_IBLOCK("Allocated bio: %p task_size: %u\n", bio, task->task_size); | ||
591 | |||
592 | bio->bi_bdev = ib_dev->ibd_bd; | ||
593 | bio->bi_private = (void *) task; | ||
594 | bio->bi_destructor = iblock_bio_destructor; | ||
595 | bio->bi_end_io = &iblock_bio_done; | ||
596 | bio->bi_sector = lba; | ||
597 | atomic_inc(&ib_req->ib_bio_cnt); | ||
598 | |||
599 | DEBUG_IBLOCK("Set bio->bi_sector: %llu\n", bio->bi_sector); | ||
600 | DEBUG_IBLOCK("Set ib_req->ib_bio_cnt: %d\n", | ||
601 | atomic_read(&ib_req->ib_bio_cnt)); | ||
602 | return bio; | ||
603 | } | ||
604 | |||
605 | static int iblock_map_task_SG(struct se_task *task) | ||
606 | { | ||
607 | struct se_cmd *cmd = task->task_se_cmd; | ||
608 | struct se_device *dev = SE_DEV(cmd); | ||
609 | struct iblock_dev *ib_dev = task->se_dev->dev_ptr; | ||
610 | struct iblock_req *ib_req = IBLOCK_REQ(task); | ||
611 | struct bio *bio = NULL, *hbio = NULL, *tbio = NULL; | ||
612 | struct scatterlist *sg; | ||
613 | int ret = 0; | ||
614 | u32 i, sg_num = task->task_sg_num; | ||
615 | sector_t block_lba; | ||
616 | /* | ||
617 | * Do starting conversion up from non 512-byte blocksize with | ||
618 | * struct se_task SCSI blocksize into Linux/Block 512 units for BIO. | ||
619 | */ | ||
620 | if (DEV_ATTRIB(dev)->block_size == 4096) | ||
621 | block_lba = (task->task_lba << 3); | ||
622 | else if (DEV_ATTRIB(dev)->block_size == 2048) | ||
623 | block_lba = (task->task_lba << 2); | ||
624 | else if (DEV_ATTRIB(dev)->block_size == 1024) | ||
625 | block_lba = (task->task_lba << 1); | ||
626 | else if (DEV_ATTRIB(dev)->block_size == 512) | ||
627 | block_lba = task->task_lba; | ||
628 | else { | ||
629 | printk(KERN_ERR "Unsupported SCSI -> BLOCK LBA conversion:" | ||
630 | " %u\n", DEV_ATTRIB(dev)->block_size); | ||
631 | return PYX_TRANSPORT_LU_COMM_FAILURE; | ||
632 | } | ||
633 | |||
634 | bio = iblock_get_bio(task, ib_req, ib_dev, &ret, block_lba, sg_num); | ||
635 | if (!(bio)) | ||
636 | return ret; | ||
637 | |||
638 | ib_req->ib_bio = bio; | ||
639 | hbio = tbio = bio; | ||
640 | /* | ||
641 | * Use fs/bio.c:bio_add_pages() to setup the bio_vec maplist | ||
642 | * from TCM struct se_mem -> task->task_sg -> struct scatterlist memory. | ||
643 | */ | ||
644 | for_each_sg(task->task_sg, sg, task->task_sg_num, i) { | ||
645 | DEBUG_IBLOCK("task: %p bio: %p Calling bio_add_page(): page:" | ||
646 | " %p len: %u offset: %u\n", task, bio, sg_page(sg), | ||
647 | sg->length, sg->offset); | ||
648 | again: | ||
649 | ret = bio_add_page(bio, sg_page(sg), sg->length, sg->offset); | ||
650 | if (ret != sg->length) { | ||
651 | |||
652 | DEBUG_IBLOCK("*** Set bio->bi_sector: %llu\n", | ||
653 | bio->bi_sector); | ||
654 | DEBUG_IBLOCK("** task->task_size: %u\n", | ||
655 | task->task_size); | ||
656 | DEBUG_IBLOCK("*** bio->bi_max_vecs: %u\n", | ||
657 | bio->bi_max_vecs); | ||
658 | DEBUG_IBLOCK("*** bio->bi_vcnt: %u\n", | ||
659 | bio->bi_vcnt); | ||
660 | |||
661 | bio = iblock_get_bio(task, ib_req, ib_dev, &ret, | ||
662 | block_lba, sg_num); | ||
663 | if (!(bio)) | ||
664 | goto fail; | ||
665 | |||
666 | tbio = tbio->bi_next = bio; | ||
667 | DEBUG_IBLOCK("-----------------> Added +1 bio: %p to" | ||
668 | " list, Going to again\n", bio); | ||
669 | goto again; | ||
670 | } | ||
671 | /* Always in 512 byte units for Linux/Block */ | ||
672 | block_lba += sg->length >> IBLOCK_LBA_SHIFT; | ||
673 | sg_num--; | ||
674 | DEBUG_IBLOCK("task: %p bio-add_page() passed!, decremented" | ||
675 | " sg_num to %u\n", task, sg_num); | ||
676 | DEBUG_IBLOCK("task: %p bio_add_page() passed!, increased lba" | ||
677 | " to %llu\n", task, block_lba); | ||
678 | DEBUG_IBLOCK("task: %p bio_add_page() passed!, bio->bi_vcnt:" | ||
679 | " %u\n", task, bio->bi_vcnt); | ||
680 | } | ||
681 | |||
682 | return 0; | ||
683 | fail: | ||
684 | while (hbio) { | ||
685 | bio = hbio; | ||
686 | hbio = hbio->bi_next; | ||
687 | bio->bi_next = NULL; | ||
688 | bio_put(bio); | ||
689 | } | ||
690 | return ret; | ||
691 | } | ||
692 | |||
693 | static unsigned char *iblock_get_cdb(struct se_task *task) | ||
694 | { | ||
695 | return IBLOCK_REQ(task)->ib_scsi_cdb; | ||
696 | } | ||
697 | |||
698 | static u32 iblock_get_device_rev(struct se_device *dev) | ||
699 | { | ||
700 | return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */ | ||
701 | } | ||
702 | |||
703 | static u32 iblock_get_device_type(struct se_device *dev) | ||
704 | { | ||
705 | return TYPE_DISK; | ||
706 | } | ||
707 | |||
708 | static sector_t iblock_get_blocks(struct se_device *dev) | ||
709 | { | ||
710 | struct iblock_dev *ibd = dev->dev_ptr; | ||
711 | struct block_device *bd = ibd->ibd_bd; | ||
712 | struct request_queue *q = bdev_get_queue(bd); | ||
713 | |||
714 | return iblock_emulate_read_cap_with_block_size(dev, bd, q); | ||
715 | } | ||
716 | |||
717 | static void iblock_bio_done(struct bio *bio, int err) | ||
718 | { | ||
719 | struct se_task *task = bio->bi_private; | ||
720 | struct iblock_req *ibr = IBLOCK_REQ(task); | ||
721 | /* | ||
722 | * Set -EIO if !BIO_UPTODATE and the passed is still err=0 | ||
723 | */ | ||
724 | if (!(test_bit(BIO_UPTODATE, &bio->bi_flags)) && !(err)) | ||
725 | err = -EIO; | ||
726 | |||
727 | if (err != 0) { | ||
728 | printk(KERN_ERR "test_bit(BIO_UPTODATE) failed for bio: %p," | ||
729 | " err: %d\n", bio, err); | ||
730 | /* | ||
731 | * Bump the ib_bio_err_cnt and release bio. | ||
732 | */ | ||
733 | atomic_inc(&ibr->ib_bio_err_cnt); | ||
734 | smp_mb__after_atomic_inc(); | ||
735 | bio_put(bio); | ||
736 | /* | ||
737 | * Wait to complete the task until the last bio as completed. | ||
738 | */ | ||
739 | if (!(atomic_dec_and_test(&ibr->ib_bio_cnt))) | ||
740 | return; | ||
741 | |||
742 | ibr->ib_bio = NULL; | ||
743 | transport_complete_task(task, 0); | ||
744 | return; | ||
745 | } | ||
746 | DEBUG_IBLOCK("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n", | ||
747 | task, bio, task->task_lba, bio->bi_sector, err); | ||
748 | /* | ||
749 | * bio_put() will call iblock_bio_destructor() to release the bio back | ||
750 | * to ibr->ib_bio_set. | ||
751 | */ | ||
752 | bio_put(bio); | ||
753 | /* | ||
754 | * Wait to complete the task until the last bio as completed. | ||
755 | */ | ||
756 | if (!(atomic_dec_and_test(&ibr->ib_bio_cnt))) | ||
757 | return; | ||
758 | /* | ||
759 | * Return GOOD status for task if zero ib_bio_err_cnt exists. | ||
760 | */ | ||
761 | ibr->ib_bio = NULL; | ||
762 | transport_complete_task(task, (!atomic_read(&ibr->ib_bio_err_cnt))); | ||
763 | } | ||
764 | |||
765 | static struct se_subsystem_api iblock_template = { | ||
766 | .name = "iblock", | ||
767 | .owner = THIS_MODULE, | ||
768 | .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, | ||
769 | .map_task_SG = iblock_map_task_SG, | ||
770 | .attach_hba = iblock_attach_hba, | ||
771 | .detach_hba = iblock_detach_hba, | ||
772 | .allocate_virtdevice = iblock_allocate_virtdevice, | ||
773 | .create_virtdevice = iblock_create_virtdevice, | ||
774 | .free_device = iblock_free_device, | ||
775 | .dpo_emulated = iblock_emulated_dpo, | ||
776 | .fua_write_emulated = iblock_emulated_fua_write, | ||
777 | .fua_read_emulated = iblock_emulated_fua_read, | ||
778 | .write_cache_emulated = iblock_emulated_write_cache, | ||
779 | .alloc_task = iblock_alloc_task, | ||
780 | .do_task = iblock_do_task, | ||
781 | .do_discard = iblock_do_discard, | ||
782 | .do_sync_cache = iblock_emulate_sync_cache, | ||
783 | .free_task = iblock_free_task, | ||
784 | .check_configfs_dev_params = iblock_check_configfs_dev_params, | ||
785 | .set_configfs_dev_params = iblock_set_configfs_dev_params, | ||
786 | .show_configfs_dev_params = iblock_show_configfs_dev_params, | ||
787 | .get_cdb = iblock_get_cdb, | ||
788 | .get_device_rev = iblock_get_device_rev, | ||
789 | .get_device_type = iblock_get_device_type, | ||
790 | .get_blocks = iblock_get_blocks, | ||
791 | }; | ||
792 | |||
793 | static int __init iblock_module_init(void) | ||
794 | { | ||
795 | return transport_subsystem_register(&iblock_template); | ||
796 | } | ||
797 | |||
798 | static void iblock_module_exit(void) | ||
799 | { | ||
800 | transport_subsystem_release(&iblock_template); | ||
801 | } | ||
802 | |||
803 | MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin"); | ||
804 | MODULE_AUTHOR("nab@Linux-iSCSI.org"); | ||
805 | MODULE_LICENSE("GPL"); | ||
806 | |||
807 | module_init(iblock_module_init); | ||
808 | module_exit(iblock_module_exit); | ||
diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h new file mode 100644 index 000000000000..64c1f4d69f76 --- /dev/null +++ b/drivers/target/target_core_iblock.h | |||
@@ -0,0 +1,40 @@ | |||
1 | #ifndef TARGET_CORE_IBLOCK_H | ||
2 | #define TARGET_CORE_IBLOCK_H | ||
3 | |||
4 | #define IBLOCK_VERSION "4.0" | ||
5 | |||
6 | #define IBLOCK_HBA_QUEUE_DEPTH 512 | ||
7 | #define IBLOCK_DEVICE_QUEUE_DEPTH 32 | ||
8 | #define IBLOCK_MAX_DEVICE_QUEUE_DEPTH 128 | ||
9 | #define IBLOCK_MAX_CDBS 16 | ||
10 | #define IBLOCK_LBA_SHIFT 9 | ||
11 | |||
12 | struct iblock_req { | ||
13 | struct se_task ib_task; | ||
14 | unsigned char ib_scsi_cdb[TCM_MAX_COMMAND_SIZE]; | ||
15 | atomic_t ib_bio_cnt; | ||
16 | atomic_t ib_bio_err_cnt; | ||
17 | struct bio *ib_bio; | ||
18 | struct iblock_dev *ib_dev; | ||
19 | } ____cacheline_aligned; | ||
20 | |||
21 | #define IBDF_HAS_UDEV_PATH 0x01 | ||
22 | #define IBDF_HAS_FORCE 0x02 | ||
23 | |||
24 | struct iblock_dev { | ||
25 | unsigned char ibd_udev_path[SE_UDEV_PATH_LEN]; | ||
26 | int ibd_force; | ||
27 | int ibd_major; | ||
28 | int ibd_minor; | ||
29 | u32 ibd_depth; | ||
30 | u32 ibd_flags; | ||
31 | struct bio_set *ibd_bio_set; | ||
32 | struct block_device *ibd_bd; | ||
33 | struct iblock_hba *ibd_host; | ||
34 | } ____cacheline_aligned; | ||
35 | |||
36 | struct iblock_hba { | ||
37 | int iblock_host_id; | ||
38 | } ____cacheline_aligned; | ||
39 | |||
40 | #endif /* TARGET_CORE_IBLOCK_H */ | ||
diff --git a/drivers/target/target_core_mib.c b/drivers/target/target_core_mib.c new file mode 100644 index 000000000000..d5a48aa0d2d1 --- /dev/null +++ b/drivers/target/target_core_mib.c | |||
@@ -0,0 +1,1078 @@ | |||
1 | /******************************************************************************* | ||
2 | * Filename: target_core_mib.c | ||
3 | * | ||
4 | * Copyright (c) 2006-2007 SBE, Inc. All Rights Reserved. | ||
5 | * Copyright (c) 2007-2010 Rising Tide Systems | ||
6 | * Copyright (c) 2008-2010 Linux-iSCSI.org | ||
7 | * | ||
8 | * Nicholas A. Bellinger <nab@linux-iscsi.org> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License as published by | ||
12 | * the Free Software Foundation; either version 2 of the License, or | ||
13 | * (at your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
18 | * GNU General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public License | ||
21 | * along with this program; if not, write to the Free Software | ||
22 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
23 | * | ||
24 | ******************************************************************************/ | ||
25 | |||
26 | |||
27 | #include <linux/kernel.h> | ||
28 | #include <linux/module.h> | ||
29 | #include <linux/delay.h> | ||
30 | #include <linux/timer.h> | ||
31 | #include <linux/string.h> | ||
32 | #include <linux/version.h> | ||
33 | #include <generated/utsrelease.h> | ||
34 | #include <linux/utsname.h> | ||
35 | #include <linux/proc_fs.h> | ||
36 | #include <linux/seq_file.h> | ||
37 | #include <linux/blkdev.h> | ||
38 | #include <scsi/scsi.h> | ||
39 | #include <scsi/scsi_device.h> | ||
40 | #include <scsi/scsi_host.h> | ||
41 | |||
42 | #include <target/target_core_base.h> | ||
43 | #include <target/target_core_transport.h> | ||
44 | #include <target/target_core_fabric_ops.h> | ||
45 | #include <target/target_core_configfs.h> | ||
46 | |||
47 | #include "target_core_hba.h" | ||
48 | #include "target_core_mib.h" | ||
49 | |||
50 | /* SCSI mib table index */ | ||
51 | static struct scsi_index_table scsi_index_table; | ||
52 | |||
53 | #ifndef INITIAL_JIFFIES | ||
54 | #define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ)) | ||
55 | #endif | ||
56 | |||
57 | /* SCSI Instance Table */ | ||
58 | #define SCSI_INST_SW_INDEX 1 | ||
59 | #define SCSI_TRANSPORT_INDEX 1 | ||
60 | |||
61 | #define NONE "None" | ||
62 | #define ISPRINT(a) ((a >= ' ') && (a <= '~')) | ||
63 | |||
64 | static inline int list_is_first(const struct list_head *list, | ||
65 | const struct list_head *head) | ||
66 | { | ||
67 | return list->prev == head; | ||
68 | } | ||
69 | |||
70 | static void *locate_hba_start( | ||
71 | struct seq_file *seq, | ||
72 | loff_t *pos) | ||
73 | { | ||
74 | spin_lock(&se_global->g_device_lock); | ||
75 | return seq_list_start(&se_global->g_se_dev_list, *pos); | ||
76 | } | ||
77 | |||
78 | static void *locate_hba_next( | ||
79 | struct seq_file *seq, | ||
80 | void *v, | ||
81 | loff_t *pos) | ||
82 | { | ||
83 | return seq_list_next(v, &se_global->g_se_dev_list, pos); | ||
84 | } | ||
85 | |||
86 | static void locate_hba_stop(struct seq_file *seq, void *v) | ||
87 | { | ||
88 | spin_unlock(&se_global->g_device_lock); | ||
89 | } | ||
90 | |||
91 | /**************************************************************************** | ||
92 | * SCSI MIB Tables | ||
93 | ****************************************************************************/ | ||
94 | |||
95 | /* | ||
96 | * SCSI Instance Table | ||
97 | */ | ||
98 | static void *scsi_inst_seq_start( | ||
99 | struct seq_file *seq, | ||
100 | loff_t *pos) | ||
101 | { | ||
102 | spin_lock(&se_global->hba_lock); | ||
103 | return seq_list_start(&se_global->g_hba_list, *pos); | ||
104 | } | ||
105 | |||
106 | static void *scsi_inst_seq_next( | ||
107 | struct seq_file *seq, | ||
108 | void *v, | ||
109 | loff_t *pos) | ||
110 | { | ||
111 | return seq_list_next(v, &se_global->g_hba_list, pos); | ||
112 | } | ||
113 | |||
114 | static void scsi_inst_seq_stop(struct seq_file *seq, void *v) | ||
115 | { | ||
116 | spin_unlock(&se_global->hba_lock); | ||
117 | } | ||
118 | |||
119 | static int scsi_inst_seq_show(struct seq_file *seq, void *v) | ||
120 | { | ||
121 | struct se_hba *hba = list_entry(v, struct se_hba, hba_list); | ||
122 | |||
123 | if (list_is_first(&hba->hba_list, &se_global->g_hba_list)) | ||
124 | seq_puts(seq, "inst sw_indx\n"); | ||
125 | |||
126 | seq_printf(seq, "%u %u\n", hba->hba_index, SCSI_INST_SW_INDEX); | ||
127 | seq_printf(seq, "plugin: %s version: %s\n", | ||
128 | hba->transport->name, TARGET_CORE_VERSION); | ||
129 | |||
130 | return 0; | ||
131 | } | ||
132 | |||
133 | static const struct seq_operations scsi_inst_seq_ops = { | ||
134 | .start = scsi_inst_seq_start, | ||
135 | .next = scsi_inst_seq_next, | ||
136 | .stop = scsi_inst_seq_stop, | ||
137 | .show = scsi_inst_seq_show | ||
138 | }; | ||
139 | |||
140 | static int scsi_inst_seq_open(struct inode *inode, struct file *file) | ||
141 | { | ||
142 | return seq_open(file, &scsi_inst_seq_ops); | ||
143 | } | ||
144 | |||
145 | static const struct file_operations scsi_inst_seq_fops = { | ||
146 | .owner = THIS_MODULE, | ||
147 | .open = scsi_inst_seq_open, | ||
148 | .read = seq_read, | ||
149 | .llseek = seq_lseek, | ||
150 | .release = seq_release, | ||
151 | }; | ||
152 | |||
153 | /* | ||
154 | * SCSI Device Table | ||
155 | */ | ||
156 | static void *scsi_dev_seq_start(struct seq_file *seq, loff_t *pos) | ||
157 | { | ||
158 | return locate_hba_start(seq, pos); | ||
159 | } | ||
160 | |||
161 | static void *scsi_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) | ||
162 | { | ||
163 | return locate_hba_next(seq, v, pos); | ||
164 | } | ||
165 | |||
166 | static void scsi_dev_seq_stop(struct seq_file *seq, void *v) | ||
167 | { | ||
168 | locate_hba_stop(seq, v); | ||
169 | } | ||
170 | |||
171 | static int scsi_dev_seq_show(struct seq_file *seq, void *v) | ||
172 | { | ||
173 | struct se_hba *hba; | ||
174 | struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev, | ||
175 | g_se_dev_list); | ||
176 | struct se_device *dev = se_dev->se_dev_ptr; | ||
177 | char str[28]; | ||
178 | int k; | ||
179 | |||
180 | if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list)) | ||
181 | seq_puts(seq, "inst indx role ports\n"); | ||
182 | |||
183 | if (!(dev)) | ||
184 | return 0; | ||
185 | |||
186 | hba = dev->se_hba; | ||
187 | if (!(hba)) { | ||
188 | /* Log error ? */ | ||
189 | return 0; | ||
190 | } | ||
191 | |||
192 | seq_printf(seq, "%u %u %s %u\n", hba->hba_index, | ||
193 | dev->dev_index, "Target", dev->dev_port_count); | ||
194 | |||
195 | memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28); | ||
196 | |||
197 | /* vendor */ | ||
198 | for (k = 0; k < 8; k++) | ||
199 | str[k] = ISPRINT(DEV_T10_WWN(dev)->vendor[k]) ? | ||
200 | DEV_T10_WWN(dev)->vendor[k] : 0x20; | ||
201 | str[k] = 0x20; | ||
202 | |||
203 | /* model */ | ||
204 | for (k = 0; k < 16; k++) | ||
205 | str[k+9] = ISPRINT(DEV_T10_WWN(dev)->model[k]) ? | ||
206 | DEV_T10_WWN(dev)->model[k] : 0x20; | ||
207 | str[k + 9] = 0; | ||
208 | |||
209 | seq_printf(seq, "dev_alias: %s\n", str); | ||
210 | |||
211 | return 0; | ||
212 | } | ||
213 | |||
214 | static const struct seq_operations scsi_dev_seq_ops = { | ||
215 | .start = scsi_dev_seq_start, | ||
216 | .next = scsi_dev_seq_next, | ||
217 | .stop = scsi_dev_seq_stop, | ||
218 | .show = scsi_dev_seq_show | ||
219 | }; | ||
220 | |||
221 | static int scsi_dev_seq_open(struct inode *inode, struct file *file) | ||
222 | { | ||
223 | return seq_open(file, &scsi_dev_seq_ops); | ||
224 | } | ||
225 | |||
226 | static const struct file_operations scsi_dev_seq_fops = { | ||
227 | .owner = THIS_MODULE, | ||
228 | .open = scsi_dev_seq_open, | ||
229 | .read = seq_read, | ||
230 | .llseek = seq_lseek, | ||
231 | .release = seq_release, | ||
232 | }; | ||
233 | |||
234 | /* | ||
235 | * SCSI Port Table | ||
236 | */ | ||
237 | static void *scsi_port_seq_start(struct seq_file *seq, loff_t *pos) | ||
238 | { | ||
239 | return locate_hba_start(seq, pos); | ||
240 | } | ||
241 | |||
242 | static void *scsi_port_seq_next(struct seq_file *seq, void *v, loff_t *pos) | ||
243 | { | ||
244 | return locate_hba_next(seq, v, pos); | ||
245 | } | ||
246 | |||
247 | static void scsi_port_seq_stop(struct seq_file *seq, void *v) | ||
248 | { | ||
249 | locate_hba_stop(seq, v); | ||
250 | } | ||
251 | |||
252 | static int scsi_port_seq_show(struct seq_file *seq, void *v) | ||
253 | { | ||
254 | struct se_hba *hba; | ||
255 | struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev, | ||
256 | g_se_dev_list); | ||
257 | struct se_device *dev = se_dev->se_dev_ptr; | ||
258 | struct se_port *sep, *sep_tmp; | ||
259 | |||
260 | if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list)) | ||
261 | seq_puts(seq, "inst device indx role busy_count\n"); | ||
262 | |||
263 | if (!(dev)) | ||
264 | return 0; | ||
265 | |||
266 | hba = dev->se_hba; | ||
267 | if (!(hba)) { | ||
268 | /* Log error ? */ | ||
269 | return 0; | ||
270 | } | ||
271 | |||
272 | /* FIXME: scsiPortBusyStatuses count */ | ||
273 | spin_lock(&dev->se_port_lock); | ||
274 | list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) { | ||
275 | seq_printf(seq, "%u %u %u %s%u %u\n", hba->hba_index, | ||
276 | dev->dev_index, sep->sep_index, "Device", | ||
277 | dev->dev_index, 0); | ||
278 | } | ||
279 | spin_unlock(&dev->se_port_lock); | ||
280 | |||
281 | return 0; | ||
282 | } | ||
283 | |||
284 | static const struct seq_operations scsi_port_seq_ops = { | ||
285 | .start = scsi_port_seq_start, | ||
286 | .next = scsi_port_seq_next, | ||
287 | .stop = scsi_port_seq_stop, | ||
288 | .show = scsi_port_seq_show | ||
289 | }; | ||
290 | |||
291 | static int scsi_port_seq_open(struct inode *inode, struct file *file) | ||
292 | { | ||
293 | return seq_open(file, &scsi_port_seq_ops); | ||
294 | } | ||
295 | |||
296 | static const struct file_operations scsi_port_seq_fops = { | ||
297 | .owner = THIS_MODULE, | ||
298 | .open = scsi_port_seq_open, | ||
299 | .read = seq_read, | ||
300 | .llseek = seq_lseek, | ||
301 | .release = seq_release, | ||
302 | }; | ||
303 | |||
304 | /* | ||
305 | * SCSI Transport Table | ||
306 | */ | ||
307 | static void *scsi_transport_seq_start(struct seq_file *seq, loff_t *pos) | ||
308 | { | ||
309 | return locate_hba_start(seq, pos); | ||
310 | } | ||
311 | |||
312 | static void *scsi_transport_seq_next(struct seq_file *seq, void *v, loff_t *pos) | ||
313 | { | ||
314 | return locate_hba_next(seq, v, pos); | ||
315 | } | ||
316 | |||
317 | static void scsi_transport_seq_stop(struct seq_file *seq, void *v) | ||
318 | { | ||
319 | locate_hba_stop(seq, v); | ||
320 | } | ||
321 | |||
322 | static int scsi_transport_seq_show(struct seq_file *seq, void *v) | ||
323 | { | ||
324 | struct se_hba *hba; | ||
325 | struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev, | ||
326 | g_se_dev_list); | ||
327 | struct se_device *dev = se_dev->se_dev_ptr; | ||
328 | struct se_port *se, *se_tmp; | ||
329 | struct se_portal_group *tpg; | ||
330 | struct t10_wwn *wwn; | ||
331 | char buf[64]; | ||
332 | |||
333 | if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list)) | ||
334 | seq_puts(seq, "inst device indx dev_name\n"); | ||
335 | |||
336 | if (!(dev)) | ||
337 | return 0; | ||
338 | |||
339 | hba = dev->se_hba; | ||
340 | if (!(hba)) { | ||
341 | /* Log error ? */ | ||
342 | return 0; | ||
343 | } | ||
344 | |||
345 | wwn = DEV_T10_WWN(dev); | ||
346 | |||
347 | spin_lock(&dev->se_port_lock); | ||
348 | list_for_each_entry_safe(se, se_tmp, &dev->dev_sep_list, sep_list) { | ||
349 | tpg = se->sep_tpg; | ||
350 | sprintf(buf, "scsiTransport%s", | ||
351 | TPG_TFO(tpg)->get_fabric_name()); | ||
352 | |||
353 | seq_printf(seq, "%u %s %u %s+%s\n", | ||
354 | hba->hba_index, /* scsiTransportIndex */ | ||
355 | buf, /* scsiTransportType */ | ||
356 | (TPG_TFO(tpg)->tpg_get_inst_index != NULL) ? | ||
357 | TPG_TFO(tpg)->tpg_get_inst_index(tpg) : | ||
358 | 0, | ||
359 | TPG_TFO(tpg)->tpg_get_wwn(tpg), | ||
360 | (strlen(wwn->unit_serial)) ? | ||
361 | /* scsiTransportDevName */ | ||
362 | wwn->unit_serial : wwn->vendor); | ||
363 | } | ||
364 | spin_unlock(&dev->se_port_lock); | ||
365 | |||
366 | return 0; | ||
367 | } | ||
368 | |||
369 | static const struct seq_operations scsi_transport_seq_ops = { | ||
370 | .start = scsi_transport_seq_start, | ||
371 | .next = scsi_transport_seq_next, | ||
372 | .stop = scsi_transport_seq_stop, | ||
373 | .show = scsi_transport_seq_show | ||
374 | }; | ||
375 | |||
376 | static int scsi_transport_seq_open(struct inode *inode, struct file *file) | ||
377 | { | ||
378 | return seq_open(file, &scsi_transport_seq_ops); | ||
379 | } | ||
380 | |||
381 | static const struct file_operations scsi_transport_seq_fops = { | ||
382 | .owner = THIS_MODULE, | ||
383 | .open = scsi_transport_seq_open, | ||
384 | .read = seq_read, | ||
385 | .llseek = seq_lseek, | ||
386 | .release = seq_release, | ||
387 | }; | ||
388 | |||
389 | /* | ||
390 | * SCSI Target Device Table | ||
391 | */ | ||
392 | static void *scsi_tgt_dev_seq_start(struct seq_file *seq, loff_t *pos) | ||
393 | { | ||
394 | return locate_hba_start(seq, pos); | ||
395 | } | ||
396 | |||
397 | static void *scsi_tgt_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) | ||
398 | { | ||
399 | return locate_hba_next(seq, v, pos); | ||
400 | } | ||
401 | |||
402 | static void scsi_tgt_dev_seq_stop(struct seq_file *seq, void *v) | ||
403 | { | ||
404 | locate_hba_stop(seq, v); | ||
405 | } | ||
406 | |||
407 | |||
408 | #define LU_COUNT 1 /* for now */ | ||
409 | static int scsi_tgt_dev_seq_show(struct seq_file *seq, void *v) | ||
410 | { | ||
411 | struct se_hba *hba; | ||
412 | struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev, | ||
413 | g_se_dev_list); | ||
414 | struct se_device *dev = se_dev->se_dev_ptr; | ||
415 | int non_accessible_lus = 0; | ||
416 | char status[16]; | ||
417 | |||
418 | if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list)) | ||
419 | seq_puts(seq, "inst indx num_LUs status non_access_LUs" | ||
420 | " resets\n"); | ||
421 | |||
422 | if (!(dev)) | ||
423 | return 0; | ||
424 | |||
425 | hba = dev->se_hba; | ||
426 | if (!(hba)) { | ||
427 | /* Log error ? */ | ||
428 | return 0; | ||
429 | } | ||
430 | |||
431 | switch (dev->dev_status) { | ||
432 | case TRANSPORT_DEVICE_ACTIVATED: | ||
433 | strcpy(status, "activated"); | ||
434 | break; | ||
435 | case TRANSPORT_DEVICE_DEACTIVATED: | ||
436 | strcpy(status, "deactivated"); | ||
437 | non_accessible_lus = 1; | ||
438 | break; | ||
439 | case TRANSPORT_DEVICE_SHUTDOWN: | ||
440 | strcpy(status, "shutdown"); | ||
441 | non_accessible_lus = 1; | ||
442 | break; | ||
443 | case TRANSPORT_DEVICE_OFFLINE_ACTIVATED: | ||
444 | case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED: | ||
445 | strcpy(status, "offline"); | ||
446 | non_accessible_lus = 1; | ||
447 | break; | ||
448 | default: | ||
449 | sprintf(status, "unknown(%d)", dev->dev_status); | ||
450 | non_accessible_lus = 1; | ||
451 | } | ||
452 | |||
453 | seq_printf(seq, "%u %u %u %s %u %u\n", | ||
454 | hba->hba_index, dev->dev_index, LU_COUNT, | ||
455 | status, non_accessible_lus, dev->num_resets); | ||
456 | |||
457 | return 0; | ||
458 | } | ||
459 | |||
460 | static const struct seq_operations scsi_tgt_dev_seq_ops = { | ||
461 | .start = scsi_tgt_dev_seq_start, | ||
462 | .next = scsi_tgt_dev_seq_next, | ||
463 | .stop = scsi_tgt_dev_seq_stop, | ||
464 | .show = scsi_tgt_dev_seq_show | ||
465 | }; | ||
466 | |||
467 | static int scsi_tgt_dev_seq_open(struct inode *inode, struct file *file) | ||
468 | { | ||
469 | return seq_open(file, &scsi_tgt_dev_seq_ops); | ||
470 | } | ||
471 | |||
472 | static const struct file_operations scsi_tgt_dev_seq_fops = { | ||
473 | .owner = THIS_MODULE, | ||
474 | .open = scsi_tgt_dev_seq_open, | ||
475 | .read = seq_read, | ||
476 | .llseek = seq_lseek, | ||
477 | .release = seq_release, | ||
478 | }; | ||
479 | |||
480 | /* | ||
481 | * SCSI Target Port Table | ||
482 | */ | ||
483 | static void *scsi_tgt_port_seq_start(struct seq_file *seq, loff_t *pos) | ||
484 | { | ||
485 | return locate_hba_start(seq, pos); | ||
486 | } | ||
487 | |||
488 | static void *scsi_tgt_port_seq_next(struct seq_file *seq, void *v, loff_t *pos) | ||
489 | { | ||
490 | return locate_hba_next(seq, v, pos); | ||
491 | } | ||
492 | |||
493 | static void scsi_tgt_port_seq_stop(struct seq_file *seq, void *v) | ||
494 | { | ||
495 | locate_hba_stop(seq, v); | ||
496 | } | ||
497 | |||
498 | static int scsi_tgt_port_seq_show(struct seq_file *seq, void *v) | ||
499 | { | ||
500 | struct se_hba *hba; | ||
501 | struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev, | ||
502 | g_se_dev_list); | ||
503 | struct se_device *dev = se_dev->se_dev_ptr; | ||
504 | struct se_port *sep, *sep_tmp; | ||
505 | struct se_portal_group *tpg; | ||
506 | u32 rx_mbytes, tx_mbytes; | ||
507 | unsigned long long num_cmds; | ||
508 | char buf[64]; | ||
509 | |||
510 | if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list)) | ||
511 | seq_puts(seq, "inst device indx name port_index in_cmds" | ||
512 | " write_mbytes read_mbytes hs_in_cmds\n"); | ||
513 | |||
514 | if (!(dev)) | ||
515 | return 0; | ||
516 | |||
517 | hba = dev->se_hba; | ||
518 | if (!(hba)) { | ||
519 | /* Log error ? */ | ||
520 | return 0; | ||
521 | } | ||
522 | |||
523 | spin_lock(&dev->se_port_lock); | ||
524 | list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) { | ||
525 | tpg = sep->sep_tpg; | ||
526 | sprintf(buf, "%sPort#", | ||
527 | TPG_TFO(tpg)->get_fabric_name()); | ||
528 | |||
529 | seq_printf(seq, "%u %u %u %s%d %s%s%d ", | ||
530 | hba->hba_index, | ||
531 | dev->dev_index, | ||
532 | sep->sep_index, | ||
533 | buf, sep->sep_index, | ||
534 | TPG_TFO(tpg)->tpg_get_wwn(tpg), "+t+", | ||
535 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | ||
536 | |||
537 | spin_lock(&sep->sep_lun->lun_sep_lock); | ||
538 | num_cmds = sep->sep_stats.cmd_pdus; | ||
539 | rx_mbytes = (sep->sep_stats.rx_data_octets >> 20); | ||
540 | tx_mbytes = (sep->sep_stats.tx_data_octets >> 20); | ||
541 | spin_unlock(&sep->sep_lun->lun_sep_lock); | ||
542 | |||
543 | seq_printf(seq, "%llu %u %u %u\n", num_cmds, | ||
544 | rx_mbytes, tx_mbytes, 0); | ||
545 | } | ||
546 | spin_unlock(&dev->se_port_lock); | ||
547 | |||
548 | return 0; | ||
549 | } | ||
550 | |||
551 | static const struct seq_operations scsi_tgt_port_seq_ops = { | ||
552 | .start = scsi_tgt_port_seq_start, | ||
553 | .next = scsi_tgt_port_seq_next, | ||
554 | .stop = scsi_tgt_port_seq_stop, | ||
555 | .show = scsi_tgt_port_seq_show | ||
556 | }; | ||
557 | |||
558 | static int scsi_tgt_port_seq_open(struct inode *inode, struct file *file) | ||
559 | { | ||
560 | return seq_open(file, &scsi_tgt_port_seq_ops); | ||
561 | } | ||
562 | |||
563 | static const struct file_operations scsi_tgt_port_seq_fops = { | ||
564 | .owner = THIS_MODULE, | ||
565 | .open = scsi_tgt_port_seq_open, | ||
566 | .read = seq_read, | ||
567 | .llseek = seq_lseek, | ||
568 | .release = seq_release, | ||
569 | }; | ||
570 | |||
571 | /* | ||
572 | * SCSI Authorized Initiator Table: | ||
573 | * It contains the SCSI Initiators authorized to be attached to one of the | ||
574 | * local Target ports. | ||
575 | * Iterates through all active TPGs and extracts the info from the ACLs | ||
576 | */ | ||
577 | static void *scsi_auth_intr_seq_start(struct seq_file *seq, loff_t *pos) | ||
578 | { | ||
579 | spin_lock_bh(&se_global->se_tpg_lock); | ||
580 | return seq_list_start(&se_global->g_se_tpg_list, *pos); | ||
581 | } | ||
582 | |||
583 | static void *scsi_auth_intr_seq_next(struct seq_file *seq, void *v, | ||
584 | loff_t *pos) | ||
585 | { | ||
586 | return seq_list_next(v, &se_global->g_se_tpg_list, pos); | ||
587 | } | ||
588 | |||
589 | static void scsi_auth_intr_seq_stop(struct seq_file *seq, void *v) | ||
590 | { | ||
591 | spin_unlock_bh(&se_global->se_tpg_lock); | ||
592 | } | ||
593 | |||
594 | static int scsi_auth_intr_seq_show(struct seq_file *seq, void *v) | ||
595 | { | ||
596 | struct se_portal_group *se_tpg = list_entry(v, struct se_portal_group, | ||
597 | se_tpg_list); | ||
598 | struct se_dev_entry *deve; | ||
599 | struct se_lun *lun; | ||
600 | struct se_node_acl *se_nacl; | ||
601 | int j; | ||
602 | |||
603 | if (list_is_first(&se_tpg->se_tpg_list, | ||
604 | &se_global->g_se_tpg_list)) | ||
605 | seq_puts(seq, "inst dev port indx dev_or_port intr_name " | ||
606 | "map_indx att_count num_cmds read_mbytes " | ||
607 | "write_mbytes hs_num_cmds creation_time row_status\n"); | ||
608 | |||
609 | if (!(se_tpg)) | ||
610 | return 0; | ||
611 | |||
612 | spin_lock(&se_tpg->acl_node_lock); | ||
613 | list_for_each_entry(se_nacl, &se_tpg->acl_node_list, acl_list) { | ||
614 | |||
615 | atomic_inc(&se_nacl->mib_ref_count); | ||
616 | smp_mb__after_atomic_inc(); | ||
617 | spin_unlock(&se_tpg->acl_node_lock); | ||
618 | |||
619 | spin_lock_irq(&se_nacl->device_list_lock); | ||
620 | for (j = 0; j < TRANSPORT_MAX_LUNS_PER_TPG; j++) { | ||
621 | deve = &se_nacl->device_list[j]; | ||
622 | if (!(deve->lun_flags & | ||
623 | TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) || | ||
624 | (!deve->se_lun)) | ||
625 | continue; | ||
626 | lun = deve->se_lun; | ||
627 | if (!lun->lun_se_dev) | ||
628 | continue; | ||
629 | |||
630 | seq_printf(seq, "%u %u %u %u %u %s %u %u %u %u %u %u" | ||
631 | " %u %s\n", | ||
632 | /* scsiInstIndex */ | ||
633 | (TPG_TFO(se_tpg)->tpg_get_inst_index != NULL) ? | ||
634 | TPG_TFO(se_tpg)->tpg_get_inst_index(se_tpg) : | ||
635 | 0, | ||
636 | /* scsiDeviceIndex */ | ||
637 | lun->lun_se_dev->dev_index, | ||
638 | /* scsiAuthIntrTgtPortIndex */ | ||
639 | TPG_TFO(se_tpg)->tpg_get_tag(se_tpg), | ||
640 | /* scsiAuthIntrIndex */ | ||
641 | se_nacl->acl_index, | ||
642 | /* scsiAuthIntrDevOrPort */ | ||
643 | 1, | ||
644 | /* scsiAuthIntrName */ | ||
645 | se_nacl->initiatorname[0] ? | ||
646 | se_nacl->initiatorname : NONE, | ||
647 | /* FIXME: scsiAuthIntrLunMapIndex */ | ||
648 | 0, | ||
649 | /* scsiAuthIntrAttachedTimes */ | ||
650 | deve->attach_count, | ||
651 | /* scsiAuthIntrOutCommands */ | ||
652 | deve->total_cmds, | ||
653 | /* scsiAuthIntrReadMegaBytes */ | ||
654 | (u32)(deve->read_bytes >> 20), | ||
655 | /* scsiAuthIntrWrittenMegaBytes */ | ||
656 | (u32)(deve->write_bytes >> 20), | ||
657 | /* FIXME: scsiAuthIntrHSOutCommands */ | ||
658 | 0, | ||
659 | /* scsiAuthIntrLastCreation */ | ||
660 | (u32)(((u32)deve->creation_time - | ||
661 | INITIAL_JIFFIES) * 100 / HZ), | ||
662 | /* FIXME: scsiAuthIntrRowStatus */ | ||
663 | "Ready"); | ||
664 | } | ||
665 | spin_unlock_irq(&se_nacl->device_list_lock); | ||
666 | |||
667 | spin_lock(&se_tpg->acl_node_lock); | ||
668 | atomic_dec(&se_nacl->mib_ref_count); | ||
669 | smp_mb__after_atomic_dec(); | ||
670 | } | ||
671 | spin_unlock(&se_tpg->acl_node_lock); | ||
672 | |||
673 | return 0; | ||
674 | } | ||
675 | |||
676 | static const struct seq_operations scsi_auth_intr_seq_ops = { | ||
677 | .start = scsi_auth_intr_seq_start, | ||
678 | .next = scsi_auth_intr_seq_next, | ||
679 | .stop = scsi_auth_intr_seq_stop, | ||
680 | .show = scsi_auth_intr_seq_show | ||
681 | }; | ||
682 | |||
683 | static int scsi_auth_intr_seq_open(struct inode *inode, struct file *file) | ||
684 | { | ||
685 | return seq_open(file, &scsi_auth_intr_seq_ops); | ||
686 | } | ||
687 | |||
688 | static const struct file_operations scsi_auth_intr_seq_fops = { | ||
689 | .owner = THIS_MODULE, | ||
690 | .open = scsi_auth_intr_seq_open, | ||
691 | .read = seq_read, | ||
692 | .llseek = seq_lseek, | ||
693 | .release = seq_release, | ||
694 | }; | ||
695 | |||
696 | /* | ||
697 | * SCSI Attached Initiator Port Table: | ||
698 | * It lists the SCSI Initiators attached to one of the local Target ports. | ||
699 | * Iterates through all active TPGs and use active sessions from each TPG | ||
700 | * to list the info fo this table. | ||
701 | */ | ||
702 | static void *scsi_att_intr_port_seq_start(struct seq_file *seq, loff_t *pos) | ||
703 | { | ||
704 | spin_lock_bh(&se_global->se_tpg_lock); | ||
705 | return seq_list_start(&se_global->g_se_tpg_list, *pos); | ||
706 | } | ||
707 | |||
708 | static void *scsi_att_intr_port_seq_next(struct seq_file *seq, void *v, | ||
709 | loff_t *pos) | ||
710 | { | ||
711 | return seq_list_next(v, &se_global->g_se_tpg_list, pos); | ||
712 | } | ||
713 | |||
714 | static void scsi_att_intr_port_seq_stop(struct seq_file *seq, void *v) | ||
715 | { | ||
716 | spin_unlock_bh(&se_global->se_tpg_lock); | ||
717 | } | ||
718 | |||
719 | static int scsi_att_intr_port_seq_show(struct seq_file *seq, void *v) | ||
720 | { | ||
721 | struct se_portal_group *se_tpg = list_entry(v, struct se_portal_group, | ||
722 | se_tpg_list); | ||
723 | struct se_dev_entry *deve; | ||
724 | struct se_lun *lun; | ||
725 | struct se_node_acl *se_nacl; | ||
726 | struct se_session *se_sess; | ||
727 | unsigned char buf[64]; | ||
728 | int j; | ||
729 | |||
730 | if (list_is_first(&se_tpg->se_tpg_list, | ||
731 | &se_global->g_se_tpg_list)) | ||
732 | seq_puts(seq, "inst dev port indx port_auth_indx port_name" | ||
733 | " port_ident\n"); | ||
734 | |||
735 | if (!(se_tpg)) | ||
736 | return 0; | ||
737 | |||
738 | spin_lock(&se_tpg->session_lock); | ||
739 | list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) { | ||
740 | if ((TPG_TFO(se_tpg)->sess_logged_in(se_sess)) || | ||
741 | (!se_sess->se_node_acl) || | ||
742 | (!se_sess->se_node_acl->device_list)) | ||
743 | continue; | ||
744 | |||
745 | atomic_inc(&se_sess->mib_ref_count); | ||
746 | smp_mb__after_atomic_inc(); | ||
747 | se_nacl = se_sess->se_node_acl; | ||
748 | atomic_inc(&se_nacl->mib_ref_count); | ||
749 | smp_mb__after_atomic_inc(); | ||
750 | spin_unlock(&se_tpg->session_lock); | ||
751 | |||
752 | spin_lock_irq(&se_nacl->device_list_lock); | ||
753 | for (j = 0; j < TRANSPORT_MAX_LUNS_PER_TPG; j++) { | ||
754 | deve = &se_nacl->device_list[j]; | ||
755 | if (!(deve->lun_flags & | ||
756 | TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) || | ||
757 | (!deve->se_lun)) | ||
758 | continue; | ||
759 | |||
760 | lun = deve->se_lun; | ||
761 | if (!lun->lun_se_dev) | ||
762 | continue; | ||
763 | |||
764 | memset(buf, 0, 64); | ||
765 | if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL) | ||
766 | TPG_TFO(se_tpg)->sess_get_initiator_sid( | ||
767 | se_sess, (unsigned char *)&buf[0], 64); | ||
768 | |||
769 | seq_printf(seq, "%u %u %u %u %u %s+i+%s\n", | ||
770 | /* scsiInstIndex */ | ||
771 | (TPG_TFO(se_tpg)->tpg_get_inst_index != NULL) ? | ||
772 | TPG_TFO(se_tpg)->tpg_get_inst_index(se_tpg) : | ||
773 | 0, | ||
774 | /* scsiDeviceIndex */ | ||
775 | lun->lun_se_dev->dev_index, | ||
776 | /* scsiPortIndex */ | ||
777 | TPG_TFO(se_tpg)->tpg_get_tag(se_tpg), | ||
778 | /* scsiAttIntrPortIndex */ | ||
779 | (TPG_TFO(se_tpg)->sess_get_index != NULL) ? | ||
780 | TPG_TFO(se_tpg)->sess_get_index(se_sess) : | ||
781 | 0, | ||
782 | /* scsiAttIntrPortAuthIntrIdx */ | ||
783 | se_nacl->acl_index, | ||
784 | /* scsiAttIntrPortName */ | ||
785 | se_nacl->initiatorname[0] ? | ||
786 | se_nacl->initiatorname : NONE, | ||
787 | /* scsiAttIntrPortIdentifier */ | ||
788 | buf); | ||
789 | } | ||
790 | spin_unlock_irq(&se_nacl->device_list_lock); | ||
791 | |||
792 | spin_lock(&se_tpg->session_lock); | ||
793 | atomic_dec(&se_nacl->mib_ref_count); | ||
794 | smp_mb__after_atomic_dec(); | ||
795 | atomic_dec(&se_sess->mib_ref_count); | ||
796 | smp_mb__after_atomic_dec(); | ||
797 | } | ||
798 | spin_unlock(&se_tpg->session_lock); | ||
799 | |||
800 | return 0; | ||
801 | } | ||
802 | |||
803 | static const struct seq_operations scsi_att_intr_port_seq_ops = { | ||
804 | .start = scsi_att_intr_port_seq_start, | ||
805 | .next = scsi_att_intr_port_seq_next, | ||
806 | .stop = scsi_att_intr_port_seq_stop, | ||
807 | .show = scsi_att_intr_port_seq_show | ||
808 | }; | ||
809 | |||
810 | static int scsi_att_intr_port_seq_open(struct inode *inode, struct file *file) | ||
811 | { | ||
812 | return seq_open(file, &scsi_att_intr_port_seq_ops); | ||
813 | } | ||
814 | |||
815 | static const struct file_operations scsi_att_intr_port_seq_fops = { | ||
816 | .owner = THIS_MODULE, | ||
817 | .open = scsi_att_intr_port_seq_open, | ||
818 | .read = seq_read, | ||
819 | .llseek = seq_lseek, | ||
820 | .release = seq_release, | ||
821 | }; | ||
822 | |||
823 | /* | ||
824 | * SCSI Logical Unit Table | ||
825 | */ | ||
826 | static void *scsi_lu_seq_start(struct seq_file *seq, loff_t *pos) | ||
827 | { | ||
828 | return locate_hba_start(seq, pos); | ||
829 | } | ||
830 | |||
831 | static void *scsi_lu_seq_next(struct seq_file *seq, void *v, loff_t *pos) | ||
832 | { | ||
833 | return locate_hba_next(seq, v, pos); | ||
834 | } | ||
835 | |||
836 | static void scsi_lu_seq_stop(struct seq_file *seq, void *v) | ||
837 | { | ||
838 | locate_hba_stop(seq, v); | ||
839 | } | ||
840 | |||
841 | #define SCSI_LU_INDEX 1 | ||
842 | static int scsi_lu_seq_show(struct seq_file *seq, void *v) | ||
843 | { | ||
844 | struct se_hba *hba; | ||
845 | struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev, | ||
846 | g_se_dev_list); | ||
847 | struct se_device *dev = se_dev->se_dev_ptr; | ||
848 | int j; | ||
849 | char str[28]; | ||
850 | |||
851 | if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list)) | ||
852 | seq_puts(seq, "inst dev indx LUN lu_name vend prod rev" | ||
853 | " dev_type status state-bit num_cmds read_mbytes" | ||
854 | " write_mbytes resets full_stat hs_num_cmds creation_time\n"); | ||
855 | |||
856 | if (!(dev)) | ||
857 | return 0; | ||
858 | |||
859 | hba = dev->se_hba; | ||
860 | if (!(hba)) { | ||
861 | /* Log error ? */ | ||
862 | return 0; | ||
863 | } | ||
864 | |||
865 | /* Fix LU state, if we can read it from the device */ | ||
866 | seq_printf(seq, "%u %u %u %llu %s", hba->hba_index, | ||
867 | dev->dev_index, SCSI_LU_INDEX, | ||
868 | (unsigned long long)0, /* FIXME: scsiLuDefaultLun */ | ||
869 | (strlen(DEV_T10_WWN(dev)->unit_serial)) ? | ||
870 | /* scsiLuWwnName */ | ||
871 | (char *)&DEV_T10_WWN(dev)->unit_serial[0] : | ||
872 | "None"); | ||
873 | |||
874 | memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28); | ||
875 | /* scsiLuVendorId */ | ||
876 | for (j = 0; j < 8; j++) | ||
877 | str[j] = ISPRINT(DEV_T10_WWN(dev)->vendor[j]) ? | ||
878 | DEV_T10_WWN(dev)->vendor[j] : 0x20; | ||
879 | str[8] = 0; | ||
880 | seq_printf(seq, " %s", str); | ||
881 | |||
882 | /* scsiLuProductId */ | ||
883 | for (j = 0; j < 16; j++) | ||
884 | str[j] = ISPRINT(DEV_T10_WWN(dev)->model[j]) ? | ||
885 | DEV_T10_WWN(dev)->model[j] : 0x20; | ||
886 | str[16] = 0; | ||
887 | seq_printf(seq, " %s", str); | ||
888 | |||
889 | /* scsiLuRevisionId */ | ||
890 | for (j = 0; j < 4; j++) | ||
891 | str[j] = ISPRINT(DEV_T10_WWN(dev)->revision[j]) ? | ||
892 | DEV_T10_WWN(dev)->revision[j] : 0x20; | ||
893 | str[4] = 0; | ||
894 | seq_printf(seq, " %s", str); | ||
895 | |||
896 | seq_printf(seq, " %u %s %s %llu %u %u %u %u %u %u\n", | ||
897 | /* scsiLuPeripheralType */ | ||
898 | TRANSPORT(dev)->get_device_type(dev), | ||
899 | (dev->dev_status == TRANSPORT_DEVICE_ACTIVATED) ? | ||
900 | "available" : "notavailable", /* scsiLuStatus */ | ||
901 | "exposed", /* scsiLuState */ | ||
902 | (unsigned long long)dev->num_cmds, | ||
903 | /* scsiLuReadMegaBytes */ | ||
904 | (u32)(dev->read_bytes >> 20), | ||
905 | /* scsiLuWrittenMegaBytes */ | ||
906 | (u32)(dev->write_bytes >> 20), | ||
907 | dev->num_resets, /* scsiLuInResets */ | ||
908 | 0, /* scsiLuOutTaskSetFullStatus */ | ||
909 | 0, /* scsiLuHSInCommands */ | ||
910 | (u32)(((u32)dev->creation_time - INITIAL_JIFFIES) * | ||
911 | 100 / HZ)); | ||
912 | |||
913 | return 0; | ||
914 | } | ||
915 | |||
916 | static const struct seq_operations scsi_lu_seq_ops = { | ||
917 | .start = scsi_lu_seq_start, | ||
918 | .next = scsi_lu_seq_next, | ||
919 | .stop = scsi_lu_seq_stop, | ||
920 | .show = scsi_lu_seq_show | ||
921 | }; | ||
922 | |||
923 | static int scsi_lu_seq_open(struct inode *inode, struct file *file) | ||
924 | { | ||
925 | return seq_open(file, &scsi_lu_seq_ops); | ||
926 | } | ||
927 | |||
928 | static const struct file_operations scsi_lu_seq_fops = { | ||
929 | .owner = THIS_MODULE, | ||
930 | .open = scsi_lu_seq_open, | ||
931 | .read = seq_read, | ||
932 | .llseek = seq_lseek, | ||
933 | .release = seq_release, | ||
934 | }; | ||
935 | |||
936 | /****************************************************************************/ | ||
937 | |||
938 | /* | ||
939 | * Remove proc fs entries | ||
940 | */ | ||
941 | void remove_scsi_target_mib(void) | ||
942 | { | ||
943 | remove_proc_entry("scsi_target/mib/scsi_inst", NULL); | ||
944 | remove_proc_entry("scsi_target/mib/scsi_dev", NULL); | ||
945 | remove_proc_entry("scsi_target/mib/scsi_port", NULL); | ||
946 | remove_proc_entry("scsi_target/mib/scsi_transport", NULL); | ||
947 | remove_proc_entry("scsi_target/mib/scsi_tgt_dev", NULL); | ||
948 | remove_proc_entry("scsi_target/mib/scsi_tgt_port", NULL); | ||
949 | remove_proc_entry("scsi_target/mib/scsi_auth_intr", NULL); | ||
950 | remove_proc_entry("scsi_target/mib/scsi_att_intr_port", NULL); | ||
951 | remove_proc_entry("scsi_target/mib/scsi_lu", NULL); | ||
952 | remove_proc_entry("scsi_target/mib", NULL); | ||
953 | } | ||
954 | |||
955 | /* | ||
956 | * Create proc fs entries for the mib tables | ||
957 | */ | ||
958 | int init_scsi_target_mib(void) | ||
959 | { | ||
960 | struct proc_dir_entry *dir_entry; | ||
961 | struct proc_dir_entry *scsi_inst_entry; | ||
962 | struct proc_dir_entry *scsi_dev_entry; | ||
963 | struct proc_dir_entry *scsi_port_entry; | ||
964 | struct proc_dir_entry *scsi_transport_entry; | ||
965 | struct proc_dir_entry *scsi_tgt_dev_entry; | ||
966 | struct proc_dir_entry *scsi_tgt_port_entry; | ||
967 | struct proc_dir_entry *scsi_auth_intr_entry; | ||
968 | struct proc_dir_entry *scsi_att_intr_port_entry; | ||
969 | struct proc_dir_entry *scsi_lu_entry; | ||
970 | |||
971 | dir_entry = proc_mkdir("scsi_target/mib", NULL); | ||
972 | if (!(dir_entry)) { | ||
973 | printk(KERN_ERR "proc_mkdir() failed.\n"); | ||
974 | return -1; | ||
975 | } | ||
976 | |||
977 | scsi_inst_entry = | ||
978 | create_proc_entry("scsi_target/mib/scsi_inst", 0, NULL); | ||
979 | if (scsi_inst_entry) | ||
980 | scsi_inst_entry->proc_fops = &scsi_inst_seq_fops; | ||
981 | else | ||
982 | goto error; | ||
983 | |||
984 | scsi_dev_entry = | ||
985 | create_proc_entry("scsi_target/mib/scsi_dev", 0, NULL); | ||
986 | if (scsi_dev_entry) | ||
987 | scsi_dev_entry->proc_fops = &scsi_dev_seq_fops; | ||
988 | else | ||
989 | goto error; | ||
990 | |||
991 | scsi_port_entry = | ||
992 | create_proc_entry("scsi_target/mib/scsi_port", 0, NULL); | ||
993 | if (scsi_port_entry) | ||
994 | scsi_port_entry->proc_fops = &scsi_port_seq_fops; | ||
995 | else | ||
996 | goto error; | ||
997 | |||
998 | scsi_transport_entry = | ||
999 | create_proc_entry("scsi_target/mib/scsi_transport", 0, NULL); | ||
1000 | if (scsi_transport_entry) | ||
1001 | scsi_transport_entry->proc_fops = &scsi_transport_seq_fops; | ||
1002 | else | ||
1003 | goto error; | ||
1004 | |||
1005 | scsi_tgt_dev_entry = | ||
1006 | create_proc_entry("scsi_target/mib/scsi_tgt_dev", 0, NULL); | ||
1007 | if (scsi_tgt_dev_entry) | ||
1008 | scsi_tgt_dev_entry->proc_fops = &scsi_tgt_dev_seq_fops; | ||
1009 | else | ||
1010 | goto error; | ||
1011 | |||
1012 | scsi_tgt_port_entry = | ||
1013 | create_proc_entry("scsi_target/mib/scsi_tgt_port", 0, NULL); | ||
1014 | if (scsi_tgt_port_entry) | ||
1015 | scsi_tgt_port_entry->proc_fops = &scsi_tgt_port_seq_fops; | ||
1016 | else | ||
1017 | goto error; | ||
1018 | |||
1019 | scsi_auth_intr_entry = | ||
1020 | create_proc_entry("scsi_target/mib/scsi_auth_intr", 0, NULL); | ||
1021 | if (scsi_auth_intr_entry) | ||
1022 | scsi_auth_intr_entry->proc_fops = &scsi_auth_intr_seq_fops; | ||
1023 | else | ||
1024 | goto error; | ||
1025 | |||
1026 | scsi_att_intr_port_entry = | ||
1027 | create_proc_entry("scsi_target/mib/scsi_att_intr_port", 0, NULL); | ||
1028 | if (scsi_att_intr_port_entry) | ||
1029 | scsi_att_intr_port_entry->proc_fops = | ||
1030 | &scsi_att_intr_port_seq_fops; | ||
1031 | else | ||
1032 | goto error; | ||
1033 | |||
1034 | scsi_lu_entry = create_proc_entry("scsi_target/mib/scsi_lu", 0, NULL); | ||
1035 | if (scsi_lu_entry) | ||
1036 | scsi_lu_entry->proc_fops = &scsi_lu_seq_fops; | ||
1037 | else | ||
1038 | goto error; | ||
1039 | |||
1040 | return 0; | ||
1041 | |||
1042 | error: | ||
1043 | printk(KERN_ERR "create_proc_entry() failed.\n"); | ||
1044 | remove_scsi_target_mib(); | ||
1045 | return -1; | ||
1046 | } | ||
1047 | |||
1048 | /* | ||
1049 | * Initialize the index table for allocating unique row indexes to various mib | ||
1050 | * tables | ||
1051 | */ | ||
1052 | void init_scsi_index_table(void) | ||
1053 | { | ||
1054 | memset(&scsi_index_table, 0, sizeof(struct scsi_index_table)); | ||
1055 | spin_lock_init(&scsi_index_table.lock); | ||
1056 | } | ||
1057 | |||
1058 | /* | ||
1059 | * Allocate a new row index for the entry type specified | ||
1060 | */ | ||
1061 | u32 scsi_get_new_index(scsi_index_t type) | ||
1062 | { | ||
1063 | u32 new_index; | ||
1064 | |||
1065 | if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) { | ||
1066 | printk(KERN_ERR "Invalid index type %d\n", type); | ||
1067 | return -1; | ||
1068 | } | ||
1069 | |||
1070 | spin_lock(&scsi_index_table.lock); | ||
1071 | new_index = ++scsi_index_table.scsi_mib_index[type]; | ||
1072 | if (new_index == 0) | ||
1073 | new_index = ++scsi_index_table.scsi_mib_index[type]; | ||
1074 | spin_unlock(&scsi_index_table.lock); | ||
1075 | |||
1076 | return new_index; | ||
1077 | } | ||
1078 | EXPORT_SYMBOL(scsi_get_new_index); | ||
diff --git a/drivers/target/target_core_mib.h b/drivers/target/target_core_mib.h new file mode 100644 index 000000000000..277204633850 --- /dev/null +++ b/drivers/target/target_core_mib.h | |||
@@ -0,0 +1,28 @@ | |||
1 | #ifndef TARGET_CORE_MIB_H | ||
2 | #define TARGET_CORE_MIB_H | ||
3 | |||
4 | typedef enum { | ||
5 | SCSI_INST_INDEX, | ||
6 | SCSI_DEVICE_INDEX, | ||
7 | SCSI_AUTH_INTR_INDEX, | ||
8 | SCSI_INDEX_TYPE_MAX | ||
9 | } scsi_index_t; | ||
10 | |||
11 | struct scsi_index_table { | ||
12 | spinlock_t lock; | ||
13 | u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; | ||
14 | } ____cacheline_aligned; | ||
15 | |||
16 | /* SCSI Port stats */ | ||
17 | struct scsi_port_stats { | ||
18 | u64 cmd_pdus; | ||
19 | u64 tx_data_octets; | ||
20 | u64 rx_data_octets; | ||
21 | } ____cacheline_aligned; | ||
22 | |||
23 | extern int init_scsi_target_mib(void); | ||
24 | extern void remove_scsi_target_mib(void); | ||
25 | extern void init_scsi_index_table(void); | ||
26 | extern u32 scsi_get_new_index(scsi_index_t); | ||
27 | |||
28 | #endif /*** TARGET_CORE_MIB_H ***/ | ||
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c new file mode 100644 index 000000000000..2521f75362c3 --- /dev/null +++ b/drivers/target/target_core_pr.c | |||
@@ -0,0 +1,4252 @@ | |||
1 | /******************************************************************************* | ||
2 | * Filename: target_core_pr.c | ||
3 | * | ||
4 | * This file contains SPC-3 compliant persistent reservations and | ||
5 | * legacy SPC-2 reservations with compatible reservation handling (CRH=1) | ||
6 | * | ||
7 | * Copyright (c) 2009, 2010 Rising Tide Systems | ||
8 | * Copyright (c) 2009, 2010 Linux-iSCSI.org | ||
9 | * | ||
10 | * Nicholas A. Bellinger <nab@kernel.org> | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of the GNU General Public License as published by | ||
14 | * the Free Software Foundation; either version 2 of the License, or | ||
15 | * (at your option) any later version. | ||
16 | * | ||
17 | * This program is distributed in the hope that it will be useful, | ||
18 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
20 | * GNU General Public License for more details. | ||
21 | * | ||
22 | * You should have received a copy of the GNU General Public License | ||
23 | * along with this program; if not, write to the Free Software | ||
24 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
25 | * | ||
26 | ******************************************************************************/ | ||
27 | |||
28 | #include <linux/version.h> | ||
29 | #include <linux/slab.h> | ||
30 | #include <linux/spinlock.h> | ||
31 | #include <linux/list.h> | ||
32 | #include <scsi/scsi.h> | ||
33 | #include <scsi/scsi_cmnd.h> | ||
34 | #include <asm/unaligned.h> | ||
35 | |||
36 | #include <target/target_core_base.h> | ||
37 | #include <target/target_core_device.h> | ||
38 | #include <target/target_core_tmr.h> | ||
39 | #include <target/target_core_tpg.h> | ||
40 | #include <target/target_core_transport.h> | ||
41 | #include <target/target_core_fabric_ops.h> | ||
42 | #include <target/target_core_configfs.h> | ||
43 | |||
44 | #include "target_core_hba.h" | ||
45 | #include "target_core_pr.h" | ||
46 | #include "target_core_ua.h" | ||
47 | |||
48 | /* | ||
49 | * Used for Specify Initiator Ports Capable Bit (SPEC_I_PT) | ||
50 | */ | ||
51 | struct pr_transport_id_holder { | ||
52 | int dest_local_nexus; | ||
53 | struct t10_pr_registration *dest_pr_reg; | ||
54 | struct se_portal_group *dest_tpg; | ||
55 | struct se_node_acl *dest_node_acl; | ||
56 | struct se_dev_entry *dest_se_deve; | ||
57 | struct list_head dest_list; | ||
58 | }; | ||
59 | |||
60 | int core_pr_dump_initiator_port( | ||
61 | struct t10_pr_registration *pr_reg, | ||
62 | char *buf, | ||
63 | u32 size) | ||
64 | { | ||
65 | if (!(pr_reg->isid_present_at_reg)) | ||
66 | return 0; | ||
67 | |||
68 | snprintf(buf, size, ",i,0x%s", &pr_reg->pr_reg_isid[0]); | ||
69 | return 1; | ||
70 | } | ||
71 | |||
72 | static void __core_scsi3_complete_pro_release(struct se_device *, struct se_node_acl *, | ||
73 | struct t10_pr_registration *, int); | ||
74 | |||
75 | static int core_scsi2_reservation_seq_non_holder( | ||
76 | struct se_cmd *cmd, | ||
77 | unsigned char *cdb, | ||
78 | u32 pr_reg_type) | ||
79 | { | ||
80 | switch (cdb[0]) { | ||
81 | case INQUIRY: | ||
82 | case RELEASE: | ||
83 | case RELEASE_10: | ||
84 | return 0; | ||
85 | default: | ||
86 | return 1; | ||
87 | } | ||
88 | |||
89 | return 1; | ||
90 | } | ||
91 | |||
92 | static int core_scsi2_reservation_check(struct se_cmd *cmd, u32 *pr_reg_type) | ||
93 | { | ||
94 | struct se_device *dev = cmd->se_dev; | ||
95 | struct se_session *sess = cmd->se_sess; | ||
96 | int ret; | ||
97 | |||
98 | if (!(sess)) | ||
99 | return 0; | ||
100 | |||
101 | spin_lock(&dev->dev_reservation_lock); | ||
102 | if (!dev->dev_reserved_node_acl || !sess) { | ||
103 | spin_unlock(&dev->dev_reservation_lock); | ||
104 | return 0; | ||
105 | } | ||
106 | if (dev->dev_reserved_node_acl != sess->se_node_acl) { | ||
107 | spin_unlock(&dev->dev_reservation_lock); | ||
108 | return -1; | ||
109 | } | ||
110 | if (!(dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID)) { | ||
111 | spin_unlock(&dev->dev_reservation_lock); | ||
112 | return 0; | ||
113 | } | ||
114 | ret = (dev->dev_res_bin_isid == sess->sess_bin_isid) ? 0 : -1; | ||
115 | spin_unlock(&dev->dev_reservation_lock); | ||
116 | |||
117 | return ret; | ||
118 | } | ||
119 | |||
120 | static int core_scsi2_reservation_release(struct se_cmd *cmd) | ||
121 | { | ||
122 | struct se_device *dev = cmd->se_dev; | ||
123 | struct se_session *sess = cmd->se_sess; | ||
124 | struct se_portal_group *tpg = sess->se_tpg; | ||
125 | |||
126 | if (!(sess) || !(tpg)) | ||
127 | return 0; | ||
128 | |||
129 | spin_lock(&dev->dev_reservation_lock); | ||
130 | if (!dev->dev_reserved_node_acl || !sess) { | ||
131 | spin_unlock(&dev->dev_reservation_lock); | ||
132 | return 0; | ||
133 | } | ||
134 | |||
135 | if (dev->dev_reserved_node_acl != sess->se_node_acl) { | ||
136 | spin_unlock(&dev->dev_reservation_lock); | ||
137 | return 0; | ||
138 | } | ||
139 | dev->dev_reserved_node_acl = NULL; | ||
140 | dev->dev_flags &= ~DF_SPC2_RESERVATIONS; | ||
141 | if (dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID) { | ||
142 | dev->dev_res_bin_isid = 0; | ||
143 | dev->dev_flags &= ~DF_SPC2_RESERVATIONS_WITH_ISID; | ||
144 | } | ||
145 | printk(KERN_INFO "SCSI-2 Released reservation for %s LUN: %u ->" | ||
146 | " MAPPED LUN: %u for %s\n", TPG_TFO(tpg)->get_fabric_name(), | ||
147 | SE_LUN(cmd)->unpacked_lun, cmd->se_deve->mapped_lun, | ||
148 | sess->se_node_acl->initiatorname); | ||
149 | spin_unlock(&dev->dev_reservation_lock); | ||
150 | |||
151 | return 0; | ||
152 | } | ||
153 | |||
154 | static int core_scsi2_reservation_reserve(struct se_cmd *cmd) | ||
155 | { | ||
156 | struct se_device *dev = cmd->se_dev; | ||
157 | struct se_session *sess = cmd->se_sess; | ||
158 | struct se_portal_group *tpg = sess->se_tpg; | ||
159 | |||
160 | if ((T_TASK(cmd)->t_task_cdb[1] & 0x01) && | ||
161 | (T_TASK(cmd)->t_task_cdb[1] & 0x02)) { | ||
162 | printk(KERN_ERR "LongIO and Obselete Bits set, returning" | ||
163 | " ILLEGAL_REQUEST\n"); | ||
164 | return PYX_TRANSPORT_ILLEGAL_REQUEST; | ||
165 | } | ||
166 | /* | ||
167 | * This is currently the case for target_core_mod passthrough struct se_cmd | ||
168 | * ops | ||
169 | */ | ||
170 | if (!(sess) || !(tpg)) | ||
171 | return 0; | ||
172 | |||
173 | spin_lock(&dev->dev_reservation_lock); | ||
174 | if (dev->dev_reserved_node_acl && | ||
175 | (dev->dev_reserved_node_acl != sess->se_node_acl)) { | ||
176 | printk(KERN_ERR "SCSI-2 RESERVATION CONFLIFT for %s fabric\n", | ||
177 | TPG_TFO(tpg)->get_fabric_name()); | ||
178 | printk(KERN_ERR "Original reserver LUN: %u %s\n", | ||
179 | SE_LUN(cmd)->unpacked_lun, | ||
180 | dev->dev_reserved_node_acl->initiatorname); | ||
181 | printk(KERN_ERR "Current attempt - LUN: %u -> MAPPED LUN: %u" | ||
182 | " from %s \n", SE_LUN(cmd)->unpacked_lun, | ||
183 | cmd->se_deve->mapped_lun, | ||
184 | sess->se_node_acl->initiatorname); | ||
185 | spin_unlock(&dev->dev_reservation_lock); | ||
186 | return PYX_TRANSPORT_RESERVATION_CONFLICT; | ||
187 | } | ||
188 | |||
189 | dev->dev_reserved_node_acl = sess->se_node_acl; | ||
190 | dev->dev_flags |= DF_SPC2_RESERVATIONS; | ||
191 | if (sess->sess_bin_isid != 0) { | ||
192 | dev->dev_res_bin_isid = sess->sess_bin_isid; | ||
193 | dev->dev_flags |= DF_SPC2_RESERVATIONS_WITH_ISID; | ||
194 | } | ||
195 | printk(KERN_INFO "SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u" | ||
196 | " for %s\n", TPG_TFO(tpg)->get_fabric_name(), | ||
197 | SE_LUN(cmd)->unpacked_lun, cmd->se_deve->mapped_lun, | ||
198 | sess->se_node_acl->initiatorname); | ||
199 | spin_unlock(&dev->dev_reservation_lock); | ||
200 | |||
201 | return 0; | ||
202 | } | ||
203 | |||
204 | static struct t10_pr_registration *core_scsi3_locate_pr_reg(struct se_device *, | ||
205 | struct se_node_acl *, struct se_session *); | ||
206 | static void core_scsi3_put_pr_reg(struct t10_pr_registration *); | ||
207 | |||
208 | /* | ||
209 | * Setup in target_core_transport.c:transport_generic_cmd_sequencer() | ||
210 | * and called via struct se_cmd->transport_emulate_cdb() in TCM processing | ||
211 | * thread context. | ||
212 | */ | ||
213 | int core_scsi2_emulate_crh(struct se_cmd *cmd) | ||
214 | { | ||
215 | struct se_session *se_sess = cmd->se_sess; | ||
216 | struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev; | ||
217 | struct t10_pr_registration *pr_reg; | ||
218 | struct t10_reservation_template *pr_tmpl = &su_dev->t10_reservation; | ||
219 | unsigned char *cdb = &T_TASK(cmd)->t_task_cdb[0]; | ||
220 | int crh = (T10_RES(su_dev)->res_type == SPC3_PERSISTENT_RESERVATIONS); | ||
221 | int conflict = 0; | ||
222 | |||
223 | if (!(se_sess)) | ||
224 | return 0; | ||
225 | |||
226 | if (!(crh)) | ||
227 | goto after_crh; | ||
228 | |||
229 | pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, | ||
230 | se_sess); | ||
231 | if (pr_reg) { | ||
232 | /* | ||
233 | * From spc4r17 5.7.3 Exceptions to SPC-2 RESERVE and RELEASE | ||
234 | * behavior | ||
235 | * | ||
236 | * A RESERVE(6) or RESERVE(10) command shall complete with GOOD | ||
237 | * status, but no reservation shall be established and the | ||
238 | * persistent reservation shall not be changed, if the command | ||
239 | * is received from a) and b) below. | ||
240 | * | ||
241 | * A RELEASE(6) or RELEASE(10) command shall complete with GOOD | ||
242 | * status, but the persistent reservation shall not be released, | ||
243 | * if the command is received from a) and b) | ||
244 | * | ||
245 | * a) An I_T nexus that is a persistent reservation holder; or | ||
246 | * b) An I_T nexus that is registered if a registrants only or | ||
247 | * all registrants type persistent reservation is present. | ||
248 | * | ||
249 | * In all other cases, a RESERVE(6) command, RESERVE(10) command, | ||
250 | * RELEASE(6) command, or RELEASE(10) command shall be processed | ||
251 | * as defined in SPC-2. | ||
252 | */ | ||
253 | if (pr_reg->pr_res_holder) { | ||
254 | core_scsi3_put_pr_reg(pr_reg); | ||
255 | return 0; | ||
256 | } | ||
257 | if ((pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY) || | ||
258 | (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY) || | ||
259 | (pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) || | ||
260 | (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) { | ||
261 | core_scsi3_put_pr_reg(pr_reg); | ||
262 | return 0; | ||
263 | } | ||
264 | core_scsi3_put_pr_reg(pr_reg); | ||
265 | conflict = 1; | ||
266 | } else { | ||
267 | /* | ||
268 | * Following spc2r20 5.5.1 Reservations overview: | ||
269 | * | ||
270 | * If a logical unit has executed a PERSISTENT RESERVE OUT | ||
271 | * command with the REGISTER or the REGISTER AND IGNORE | ||
272 | * EXISTING KEY service action and is still registered by any | ||
273 | * initiator, all RESERVE commands and all RELEASE commands | ||
274 | * regardless of initiator shall conflict and shall terminate | ||
275 | * with a RESERVATION CONFLICT status. | ||
276 | */ | ||
277 | spin_lock(&pr_tmpl->registration_lock); | ||
278 | conflict = (list_empty(&pr_tmpl->registration_list)) ? 0 : 1; | ||
279 | spin_unlock(&pr_tmpl->registration_lock); | ||
280 | } | ||
281 | |||
282 | if (conflict) { | ||
283 | printk(KERN_ERR "Received legacy SPC-2 RESERVE/RELEASE" | ||
284 | " while active SPC-3 registrations exist," | ||
285 | " returning RESERVATION_CONFLICT\n"); | ||
286 | return PYX_TRANSPORT_RESERVATION_CONFLICT; | ||
287 | } | ||
288 | |||
289 | after_crh: | ||
290 | if ((cdb[0] == RESERVE) || (cdb[0] == RESERVE_10)) | ||
291 | return core_scsi2_reservation_reserve(cmd); | ||
292 | else if ((cdb[0] == RELEASE) || (cdb[0] == RELEASE_10)) | ||
293 | return core_scsi2_reservation_release(cmd); | ||
294 | else | ||
295 | return PYX_TRANSPORT_INVALID_CDB_FIELD; | ||
296 | } | ||
297 | |||
298 | /* | ||
299 | * Begin SPC-3/SPC-4 Persistent Reservations emulation support | ||
300 | * | ||
301 | * This function is called by those initiator ports who are *NOT* | ||
302 | * the active PR reservation holder when a reservation is present. | ||
303 | */ | ||
304 | static int core_scsi3_pr_seq_non_holder( | ||
305 | struct se_cmd *cmd, | ||
306 | unsigned char *cdb, | ||
307 | u32 pr_reg_type) | ||
308 | { | ||
309 | struct se_dev_entry *se_deve; | ||
310 | struct se_session *se_sess = SE_SESS(cmd); | ||
311 | int other_cdb = 0, ignore_reg; | ||
312 | int registered_nexus = 0, ret = 1; /* Conflict by default */ | ||
313 | int all_reg = 0, reg_only = 0; /* ALL_REG, REG_ONLY */ | ||
314 | int we = 0; /* Write Exclusive */ | ||
315 | int legacy = 0; /* Act like a legacy device and return | ||
316 | * RESERVATION CONFLICT on some CDBs */ | ||
317 | /* | ||
318 | * A legacy SPC-2 reservation is being held. | ||
319 | */ | ||
320 | if (cmd->se_dev->dev_flags & DF_SPC2_RESERVATIONS) | ||
321 | return core_scsi2_reservation_seq_non_holder(cmd, | ||
322 | cdb, pr_reg_type); | ||
323 | |||
324 | se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; | ||
325 | /* | ||
326 | * Determine if the registration should be ignored due to | ||
327 | * non-matching ISIDs in core_scsi3_pr_reservation_check(). | ||
328 | */ | ||
329 | ignore_reg = (pr_reg_type & 0x80000000); | ||
330 | if (ignore_reg) | ||
331 | pr_reg_type &= ~0x80000000; | ||
332 | |||
333 | switch (pr_reg_type) { | ||
334 | case PR_TYPE_WRITE_EXCLUSIVE: | ||
335 | we = 1; | ||
336 | case PR_TYPE_EXCLUSIVE_ACCESS: | ||
337 | /* | ||
338 | * Some commands are only allowed for the persistent reservation | ||
339 | * holder. | ||
340 | */ | ||
341 | if ((se_deve->def_pr_registered) && !(ignore_reg)) | ||
342 | registered_nexus = 1; | ||
343 | break; | ||
344 | case PR_TYPE_WRITE_EXCLUSIVE_REGONLY: | ||
345 | we = 1; | ||
346 | case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY: | ||
347 | /* | ||
348 | * Some commands are only allowed for registered I_T Nexuses. | ||
349 | */ | ||
350 | reg_only = 1; | ||
351 | if ((se_deve->def_pr_registered) && !(ignore_reg)) | ||
352 | registered_nexus = 1; | ||
353 | break; | ||
354 | case PR_TYPE_WRITE_EXCLUSIVE_ALLREG: | ||
355 | we = 1; | ||
356 | case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG: | ||
357 | /* | ||
358 | * Each registered I_T Nexus is a reservation holder. | ||
359 | */ | ||
360 | all_reg = 1; | ||
361 | if ((se_deve->def_pr_registered) && !(ignore_reg)) | ||
362 | registered_nexus = 1; | ||
363 | break; | ||
364 | default: | ||
365 | return -1; | ||
366 | } | ||
367 | /* | ||
368 | * Referenced from spc4r17 table 45 for *NON* PR holder access | ||
369 | */ | ||
370 | switch (cdb[0]) { | ||
371 | case SECURITY_PROTOCOL_IN: | ||
372 | if (registered_nexus) | ||
373 | return 0; | ||
374 | ret = (we) ? 0 : 1; | ||
375 | break; | ||
376 | case MODE_SENSE: | ||
377 | case MODE_SENSE_10: | ||
378 | case READ_ATTRIBUTE: | ||
379 | case READ_BUFFER: | ||
380 | case RECEIVE_DIAGNOSTIC: | ||
381 | if (legacy) { | ||
382 | ret = 1; | ||
383 | break; | ||
384 | } | ||
385 | if (registered_nexus) { | ||
386 | ret = 0; | ||
387 | break; | ||
388 | } | ||
389 | ret = (we) ? 0 : 1; /* Allowed Write Exclusive */ | ||
390 | break; | ||
391 | case PERSISTENT_RESERVE_OUT: | ||
392 | /* | ||
393 | * This follows PERSISTENT_RESERVE_OUT service actions that | ||
394 | * are allowed in the presence of various reservations. | ||
395 | * See spc4r17, table 46 | ||
396 | */ | ||
397 | switch (cdb[1] & 0x1f) { | ||
398 | case PRO_CLEAR: | ||
399 | case PRO_PREEMPT: | ||
400 | case PRO_PREEMPT_AND_ABORT: | ||
401 | ret = (registered_nexus) ? 0 : 1; | ||
402 | break; | ||
403 | case PRO_REGISTER: | ||
404 | case PRO_REGISTER_AND_IGNORE_EXISTING_KEY: | ||
405 | ret = 0; | ||
406 | break; | ||
407 | case PRO_REGISTER_AND_MOVE: | ||
408 | case PRO_RESERVE: | ||
409 | ret = 1; | ||
410 | break; | ||
411 | case PRO_RELEASE: | ||
412 | ret = (registered_nexus) ? 0 : 1; | ||
413 | break; | ||
414 | default: | ||
415 | printk(KERN_ERR "Unknown PERSISTENT_RESERVE_OUT service" | ||
416 | " action: 0x%02x\n", cdb[1] & 0x1f); | ||
417 | return -1; | ||
418 | } | ||
419 | break; | ||
420 | case RELEASE: | ||
421 | case RELEASE_10: | ||
422 | /* Handled by CRH=1 in core_scsi2_emulate_crh() */ | ||
423 | ret = 0; | ||
424 | break; | ||
425 | case RESERVE: | ||
426 | case RESERVE_10: | ||
427 | /* Handled by CRH=1 in core_scsi2_emulate_crh() */ | ||
428 | ret = 0; | ||
429 | break; | ||
430 | case TEST_UNIT_READY: | ||
431 | ret = (legacy) ? 1 : 0; /* Conflict for legacy */ | ||
432 | break; | ||
433 | case MAINTENANCE_IN: | ||
434 | switch (cdb[1] & 0x1f) { | ||
435 | case MI_MANAGEMENT_PROTOCOL_IN: | ||
436 | if (registered_nexus) { | ||
437 | ret = 0; | ||
438 | break; | ||
439 | } | ||
440 | ret = (we) ? 0 : 1; /* Allowed Write Exclusive */ | ||
441 | break; | ||
442 | case MI_REPORT_SUPPORTED_OPERATION_CODES: | ||
443 | case MI_REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS: | ||
444 | if (legacy) { | ||
445 | ret = 1; | ||
446 | break; | ||
447 | } | ||
448 | if (registered_nexus) { | ||
449 | ret = 0; | ||
450 | break; | ||
451 | } | ||
452 | ret = (we) ? 0 : 1; /* Allowed Write Exclusive */ | ||
453 | break; | ||
454 | case MI_REPORT_ALIASES: | ||
455 | case MI_REPORT_IDENTIFYING_INFORMATION: | ||
456 | case MI_REPORT_PRIORITY: | ||
457 | case MI_REPORT_TARGET_PGS: | ||
458 | case MI_REPORT_TIMESTAMP: | ||
459 | ret = 0; /* Allowed */ | ||
460 | break; | ||
461 | default: | ||
462 | printk(KERN_ERR "Unknown MI Service Action: 0x%02x\n", | ||
463 | (cdb[1] & 0x1f)); | ||
464 | return -1; | ||
465 | } | ||
466 | break; | ||
467 | case ACCESS_CONTROL_IN: | ||
468 | case ACCESS_CONTROL_OUT: | ||
469 | case INQUIRY: | ||
470 | case LOG_SENSE: | ||
471 | case READ_MEDIA_SERIAL_NUMBER: | ||
472 | case REPORT_LUNS: | ||
473 | case REQUEST_SENSE: | ||
474 | ret = 0; /*/ Allowed CDBs */ | ||
475 | break; | ||
476 | default: | ||
477 | other_cdb = 1; | ||
478 | break; | ||
479 | } | ||
480 | /* | ||
481 | * Case where the CDB is explictly allowed in the above switch | ||
482 | * statement. | ||
483 | */ | ||
484 | if (!(ret) && !(other_cdb)) { | ||
485 | #if 0 | ||
486 | printk(KERN_INFO "Allowing explict CDB: 0x%02x for %s" | ||
487 | " reservation holder\n", cdb[0], | ||
488 | core_scsi3_pr_dump_type(pr_reg_type)); | ||
489 | #endif | ||
490 | return ret; | ||
491 | } | ||
492 | /* | ||
493 | * Check if write exclusive initiator ports *NOT* holding the | ||
494 | * WRITE_EXCLUSIVE_* reservation. | ||
495 | */ | ||
496 | if ((we) && !(registered_nexus)) { | ||
497 | if (cmd->data_direction == DMA_TO_DEVICE) { | ||
498 | /* | ||
499 | * Conflict for write exclusive | ||
500 | */ | ||
501 | printk(KERN_INFO "%s Conflict for unregistered nexus" | ||
502 | " %s CDB: 0x%02x to %s reservation\n", | ||
503 | transport_dump_cmd_direction(cmd), | ||
504 | se_sess->se_node_acl->initiatorname, cdb[0], | ||
505 | core_scsi3_pr_dump_type(pr_reg_type)); | ||
506 | return 1; | ||
507 | } else { | ||
508 | /* | ||
509 | * Allow non WRITE CDBs for all Write Exclusive | ||
510 | * PR TYPEs to pass for registered and | ||
511 | * non-registered_nexuxes NOT holding the reservation. | ||
512 | * | ||
513 | * We only make noise for the unregisterd nexuses, | ||
514 | * as we expect registered non-reservation holding | ||
515 | * nexuses to issue CDBs. | ||
516 | */ | ||
517 | #if 0 | ||
518 | if (!(registered_nexus)) { | ||
519 | printk(KERN_INFO "Allowing implict CDB: 0x%02x" | ||
520 | " for %s reservation on unregistered" | ||
521 | " nexus\n", cdb[0], | ||
522 | core_scsi3_pr_dump_type(pr_reg_type)); | ||
523 | } | ||
524 | #endif | ||
525 | return 0; | ||
526 | } | ||
527 | } else if ((reg_only) || (all_reg)) { | ||
528 | if (registered_nexus) { | ||
529 | /* | ||
530 | * For PR_*_REG_ONLY and PR_*_ALL_REG reservations, | ||
531 | * allow commands from registered nexuses. | ||
532 | */ | ||
533 | #if 0 | ||
534 | printk(KERN_INFO "Allowing implict CDB: 0x%02x for %s" | ||
535 | " reservation\n", cdb[0], | ||
536 | core_scsi3_pr_dump_type(pr_reg_type)); | ||
537 | #endif | ||
538 | return 0; | ||
539 | } | ||
540 | } | ||
541 | printk(KERN_INFO "%s Conflict for %sregistered nexus %s CDB: 0x%2x" | ||
542 | " for %s reservation\n", transport_dump_cmd_direction(cmd), | ||
543 | (registered_nexus) ? "" : "un", | ||
544 | se_sess->se_node_acl->initiatorname, cdb[0], | ||
545 | core_scsi3_pr_dump_type(pr_reg_type)); | ||
546 | |||
547 | return 1; /* Conflict by default */ | ||
548 | } | ||
549 | |||
550 | static u32 core_scsi3_pr_generation(struct se_device *dev) | ||
551 | { | ||
552 | struct se_subsystem_dev *su_dev = SU_DEV(dev); | ||
553 | u32 prg; | ||
554 | /* | ||
555 | * PRGeneration field shall contain the value of a 32-bit wrapping | ||
556 | * counter mainted by the device server. | ||
557 | * | ||
558 | * Note that this is done regardless of Active Persist across | ||
559 | * Target PowerLoss (APTPL) | ||
560 | * | ||
561 | * See spc4r17 section 6.3.12 READ_KEYS service action | ||
562 | */ | ||
563 | spin_lock(&dev->dev_reservation_lock); | ||
564 | prg = T10_RES(su_dev)->pr_generation++; | ||
565 | spin_unlock(&dev->dev_reservation_lock); | ||
566 | |||
567 | return prg; | ||
568 | } | ||
569 | |||
570 | static int core_scsi3_pr_reservation_check( | ||
571 | struct se_cmd *cmd, | ||
572 | u32 *pr_reg_type) | ||
573 | { | ||
574 | struct se_device *dev = cmd->se_dev; | ||
575 | struct se_session *sess = cmd->se_sess; | ||
576 | int ret; | ||
577 | |||
578 | if (!(sess)) | ||
579 | return 0; | ||
580 | /* | ||
581 | * A legacy SPC-2 reservation is being held. | ||
582 | */ | ||
583 | if (dev->dev_flags & DF_SPC2_RESERVATIONS) | ||
584 | return core_scsi2_reservation_check(cmd, pr_reg_type); | ||
585 | |||
586 | spin_lock(&dev->dev_reservation_lock); | ||
587 | if (!(dev->dev_pr_res_holder)) { | ||
588 | spin_unlock(&dev->dev_reservation_lock); | ||
589 | return 0; | ||
590 | } | ||
591 | *pr_reg_type = dev->dev_pr_res_holder->pr_res_type; | ||
592 | cmd->pr_res_key = dev->dev_pr_res_holder->pr_res_key; | ||
593 | if (dev->dev_pr_res_holder->pr_reg_nacl != sess->se_node_acl) { | ||
594 | spin_unlock(&dev->dev_reservation_lock); | ||
595 | return -1; | ||
596 | } | ||
597 | if (!(dev->dev_pr_res_holder->isid_present_at_reg)) { | ||
598 | spin_unlock(&dev->dev_reservation_lock); | ||
599 | return 0; | ||
600 | } | ||
601 | ret = (dev->dev_pr_res_holder->pr_reg_bin_isid == | ||
602 | sess->sess_bin_isid) ? 0 : -1; | ||
603 | /* | ||
604 | * Use bit in *pr_reg_type to notify ISID mismatch in | ||
605 | * core_scsi3_pr_seq_non_holder(). | ||
606 | */ | ||
607 | if (ret != 0) | ||
608 | *pr_reg_type |= 0x80000000; | ||
609 | spin_unlock(&dev->dev_reservation_lock); | ||
610 | |||
611 | return ret; | ||
612 | } | ||
613 | |||
614 | static struct t10_pr_registration *__core_scsi3_do_alloc_registration( | ||
615 | struct se_device *dev, | ||
616 | struct se_node_acl *nacl, | ||
617 | struct se_dev_entry *deve, | ||
618 | unsigned char *isid, | ||
619 | u64 sa_res_key, | ||
620 | int all_tg_pt, | ||
621 | int aptpl) | ||
622 | { | ||
623 | struct se_subsystem_dev *su_dev = SU_DEV(dev); | ||
624 | struct t10_pr_registration *pr_reg; | ||
625 | |||
626 | pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_ATOMIC); | ||
627 | if (!(pr_reg)) { | ||
628 | printk(KERN_ERR "Unable to allocate struct t10_pr_registration\n"); | ||
629 | return NULL; | ||
630 | } | ||
631 | |||
632 | pr_reg->pr_aptpl_buf = kzalloc(T10_RES(su_dev)->pr_aptpl_buf_len, | ||
633 | GFP_ATOMIC); | ||
634 | if (!(pr_reg->pr_aptpl_buf)) { | ||
635 | printk(KERN_ERR "Unable to allocate pr_reg->pr_aptpl_buf\n"); | ||
636 | kmem_cache_free(t10_pr_reg_cache, pr_reg); | ||
637 | return NULL; | ||
638 | } | ||
639 | |||
640 | INIT_LIST_HEAD(&pr_reg->pr_reg_list); | ||
641 | INIT_LIST_HEAD(&pr_reg->pr_reg_abort_list); | ||
642 | INIT_LIST_HEAD(&pr_reg->pr_reg_aptpl_list); | ||
643 | INIT_LIST_HEAD(&pr_reg->pr_reg_atp_list); | ||
644 | INIT_LIST_HEAD(&pr_reg->pr_reg_atp_mem_list); | ||
645 | atomic_set(&pr_reg->pr_res_holders, 0); | ||
646 | pr_reg->pr_reg_nacl = nacl; | ||
647 | pr_reg->pr_reg_deve = deve; | ||
648 | pr_reg->pr_res_mapped_lun = deve->mapped_lun; | ||
649 | pr_reg->pr_aptpl_target_lun = deve->se_lun->unpacked_lun; | ||
650 | pr_reg->pr_res_key = sa_res_key; | ||
651 | pr_reg->pr_reg_all_tg_pt = all_tg_pt; | ||
652 | pr_reg->pr_reg_aptpl = aptpl; | ||
653 | pr_reg->pr_reg_tg_pt_lun = deve->se_lun; | ||
654 | /* | ||
655 | * If an ISID value for this SCSI Initiator Port exists, | ||
656 | * save it to the registration now. | ||
657 | */ | ||
658 | if (isid != NULL) { | ||
659 | pr_reg->pr_reg_bin_isid = get_unaligned_be64(isid); | ||
660 | snprintf(pr_reg->pr_reg_isid, PR_REG_ISID_LEN, "%s", isid); | ||
661 | pr_reg->isid_present_at_reg = 1; | ||
662 | } | ||
663 | |||
664 | return pr_reg; | ||
665 | } | ||
666 | |||
667 | static int core_scsi3_lunacl_depend_item(struct se_dev_entry *); | ||
668 | static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *); | ||
669 | |||
670 | /* | ||
671 | * Function used for handling PR registrations for ALL_TG_PT=1 and ALL_TG_PT=0 | ||
672 | * modes. | ||
673 | */ | ||
674 | static struct t10_pr_registration *__core_scsi3_alloc_registration( | ||
675 | struct se_device *dev, | ||
676 | struct se_node_acl *nacl, | ||
677 | struct se_dev_entry *deve, | ||
678 | unsigned char *isid, | ||
679 | u64 sa_res_key, | ||
680 | int all_tg_pt, | ||
681 | int aptpl) | ||
682 | { | ||
683 | struct se_dev_entry *deve_tmp; | ||
684 | struct se_node_acl *nacl_tmp; | ||
685 | struct se_port *port, *port_tmp; | ||
686 | struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo; | ||
687 | struct t10_pr_registration *pr_reg, *pr_reg_atp, *pr_reg_tmp, *pr_reg_tmp_safe; | ||
688 | int ret; | ||
689 | /* | ||
690 | * Create a registration for the I_T Nexus upon which the | ||
691 | * PROUT REGISTER was received. | ||
692 | */ | ||
693 | pr_reg = __core_scsi3_do_alloc_registration(dev, nacl, deve, isid, | ||
694 | sa_res_key, all_tg_pt, aptpl); | ||
695 | if (!(pr_reg)) | ||
696 | return NULL; | ||
697 | /* | ||
698 | * Return pointer to pr_reg for ALL_TG_PT=0 | ||
699 | */ | ||
700 | if (!(all_tg_pt)) | ||
701 | return pr_reg; | ||
702 | /* | ||
703 | * Create list of matching SCSI Initiator Port registrations | ||
704 | * for ALL_TG_PT=1 | ||
705 | */ | ||
706 | spin_lock(&dev->se_port_lock); | ||
707 | list_for_each_entry_safe(port, port_tmp, &dev->dev_sep_list, sep_list) { | ||
708 | atomic_inc(&port->sep_tg_pt_ref_cnt); | ||
709 | smp_mb__after_atomic_inc(); | ||
710 | spin_unlock(&dev->se_port_lock); | ||
711 | |||
712 | spin_lock_bh(&port->sep_alua_lock); | ||
713 | list_for_each_entry(deve_tmp, &port->sep_alua_list, | ||
714 | alua_port_list) { | ||
715 | /* | ||
716 | * This pointer will be NULL for demo mode MappedLUNs | ||
717 | * that have not been make explict via a ConfigFS | ||
718 | * MappedLUN group for the SCSI Initiator Node ACL. | ||
719 | */ | ||
720 | if (!(deve_tmp->se_lun_acl)) | ||
721 | continue; | ||
722 | |||
723 | nacl_tmp = deve_tmp->se_lun_acl->se_lun_nacl; | ||
724 | /* | ||
725 | * Skip the matching struct se_node_acl that is allocated | ||
726 | * above.. | ||
727 | */ | ||
728 | if (nacl == nacl_tmp) | ||
729 | continue; | ||
730 | /* | ||
731 | * Only perform PR registrations for target ports on | ||
732 | * the same fabric module as the REGISTER w/ ALL_TG_PT=1 | ||
733 | * arrived. | ||
734 | */ | ||
735 | if (tfo != nacl_tmp->se_tpg->se_tpg_tfo) | ||
736 | continue; | ||
737 | /* | ||
738 | * Look for a matching Initiator Node ACL in ASCII format | ||
739 | */ | ||
740 | if (strcmp(nacl->initiatorname, nacl_tmp->initiatorname)) | ||
741 | continue; | ||
742 | |||
743 | atomic_inc(&deve_tmp->pr_ref_count); | ||
744 | smp_mb__after_atomic_inc(); | ||
745 | spin_unlock_bh(&port->sep_alua_lock); | ||
746 | /* | ||
747 | * Grab a configfs group dependency that is released | ||
748 | * for the exception path at label out: below, or upon | ||
749 | * completion of adding ALL_TG_PT=1 registrations in | ||
750 | * __core_scsi3_add_registration() | ||
751 | */ | ||
752 | ret = core_scsi3_lunacl_depend_item(deve_tmp); | ||
753 | if (ret < 0) { | ||
754 | printk(KERN_ERR "core_scsi3_lunacl_depend" | ||
755 | "_item() failed\n"); | ||
756 | atomic_dec(&port->sep_tg_pt_ref_cnt); | ||
757 | smp_mb__after_atomic_dec(); | ||
758 | atomic_dec(&deve_tmp->pr_ref_count); | ||
759 | smp_mb__after_atomic_dec(); | ||
760 | goto out; | ||
761 | } | ||
762 | /* | ||
763 | * Located a matching SCSI Initiator Port on a different | ||
764 | * port, allocate the pr_reg_atp and attach it to the | ||
765 | * pr_reg->pr_reg_atp_list that will be processed once | ||
766 | * the original *pr_reg is processed in | ||
767 | * __core_scsi3_add_registration() | ||
768 | */ | ||
769 | pr_reg_atp = __core_scsi3_do_alloc_registration(dev, | ||
770 | nacl_tmp, deve_tmp, NULL, | ||
771 | sa_res_key, all_tg_pt, aptpl); | ||
772 | if (!(pr_reg_atp)) { | ||
773 | atomic_dec(&port->sep_tg_pt_ref_cnt); | ||
774 | smp_mb__after_atomic_dec(); | ||
775 | atomic_dec(&deve_tmp->pr_ref_count); | ||
776 | smp_mb__after_atomic_dec(); | ||
777 | core_scsi3_lunacl_undepend_item(deve_tmp); | ||
778 | goto out; | ||
779 | } | ||
780 | |||
781 | list_add_tail(&pr_reg_atp->pr_reg_atp_mem_list, | ||
782 | &pr_reg->pr_reg_atp_list); | ||
783 | spin_lock_bh(&port->sep_alua_lock); | ||
784 | } | ||
785 | spin_unlock_bh(&port->sep_alua_lock); | ||
786 | |||
787 | spin_lock(&dev->se_port_lock); | ||
788 | atomic_dec(&port->sep_tg_pt_ref_cnt); | ||
789 | smp_mb__after_atomic_dec(); | ||
790 | } | ||
791 | spin_unlock(&dev->se_port_lock); | ||
792 | |||
793 | return pr_reg; | ||
794 | out: | ||
795 | list_for_each_entry_safe(pr_reg_tmp, pr_reg_tmp_safe, | ||
796 | &pr_reg->pr_reg_atp_list, pr_reg_atp_mem_list) { | ||
797 | list_del(&pr_reg_tmp->pr_reg_atp_mem_list); | ||
798 | core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve); | ||
799 | kmem_cache_free(t10_pr_reg_cache, pr_reg_tmp); | ||
800 | } | ||
801 | kmem_cache_free(t10_pr_reg_cache, pr_reg); | ||
802 | return NULL; | ||
803 | } | ||
804 | |||
805 | int core_scsi3_alloc_aptpl_registration( | ||
806 | struct t10_reservation_template *pr_tmpl, | ||
807 | u64 sa_res_key, | ||
808 | unsigned char *i_port, | ||
809 | unsigned char *isid, | ||
810 | u32 mapped_lun, | ||
811 | unsigned char *t_port, | ||
812 | u16 tpgt, | ||
813 | u32 target_lun, | ||
814 | int res_holder, | ||
815 | int all_tg_pt, | ||
816 | u8 type) | ||
817 | { | ||
818 | struct t10_pr_registration *pr_reg; | ||
819 | |||
820 | if (!(i_port) || !(t_port) || !(sa_res_key)) { | ||
821 | printk(KERN_ERR "Illegal parameters for APTPL registration\n"); | ||
822 | return -1; | ||
823 | } | ||
824 | |||
825 | pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_KERNEL); | ||
826 | if (!(pr_reg)) { | ||
827 | printk(KERN_ERR "Unable to allocate struct t10_pr_registration\n"); | ||
828 | return -1; | ||
829 | } | ||
830 | pr_reg->pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len, GFP_KERNEL); | ||
831 | |||
832 | INIT_LIST_HEAD(&pr_reg->pr_reg_list); | ||
833 | INIT_LIST_HEAD(&pr_reg->pr_reg_abort_list); | ||
834 | INIT_LIST_HEAD(&pr_reg->pr_reg_aptpl_list); | ||
835 | INIT_LIST_HEAD(&pr_reg->pr_reg_atp_list); | ||
836 | INIT_LIST_HEAD(&pr_reg->pr_reg_atp_mem_list); | ||
837 | atomic_set(&pr_reg->pr_res_holders, 0); | ||
838 | pr_reg->pr_reg_nacl = NULL; | ||
839 | pr_reg->pr_reg_deve = NULL; | ||
840 | pr_reg->pr_res_mapped_lun = mapped_lun; | ||
841 | pr_reg->pr_aptpl_target_lun = target_lun; | ||
842 | pr_reg->pr_res_key = sa_res_key; | ||
843 | pr_reg->pr_reg_all_tg_pt = all_tg_pt; | ||
844 | pr_reg->pr_reg_aptpl = 1; | ||
845 | pr_reg->pr_reg_tg_pt_lun = NULL; | ||
846 | pr_reg->pr_res_scope = 0; /* Always LUN_SCOPE */ | ||
847 | pr_reg->pr_res_type = type; | ||
848 | /* | ||
849 | * If an ISID value had been saved in APTPL metadata for this | ||
850 | * SCSI Initiator Port, restore it now. | ||
851 | */ | ||
852 | if (isid != NULL) { | ||
853 | pr_reg->pr_reg_bin_isid = get_unaligned_be64(isid); | ||
854 | snprintf(pr_reg->pr_reg_isid, PR_REG_ISID_LEN, "%s", isid); | ||
855 | pr_reg->isid_present_at_reg = 1; | ||
856 | } | ||
857 | /* | ||
858 | * Copy the i_port and t_port information from caller. | ||
859 | */ | ||
860 | snprintf(pr_reg->pr_iport, PR_APTPL_MAX_IPORT_LEN, "%s", i_port); | ||
861 | snprintf(pr_reg->pr_tport, PR_APTPL_MAX_TPORT_LEN, "%s", t_port); | ||
862 | pr_reg->pr_reg_tpgt = tpgt; | ||
863 | /* | ||
864 | * Set pr_res_holder from caller, the pr_reg who is the reservation | ||
865 | * holder will get it's pointer set in core_scsi3_aptpl_reserve() once | ||
866 | * the Initiator Node LUN ACL from the fabric module is created for | ||
867 | * this registration. | ||
868 | */ | ||
869 | pr_reg->pr_res_holder = res_holder; | ||
870 | |||
871 | list_add_tail(&pr_reg->pr_reg_aptpl_list, &pr_tmpl->aptpl_reg_list); | ||
872 | printk(KERN_INFO "SPC-3 PR APTPL Successfully added registration%s from" | ||
873 | " metadata\n", (res_holder) ? "+reservation" : ""); | ||
874 | return 0; | ||
875 | } | ||
876 | |||
877 | static void core_scsi3_aptpl_reserve( | ||
878 | struct se_device *dev, | ||
879 | struct se_portal_group *tpg, | ||
880 | struct se_node_acl *node_acl, | ||
881 | struct t10_pr_registration *pr_reg) | ||
882 | { | ||
883 | char i_buf[PR_REG_ISID_ID_LEN]; | ||
884 | int prf_isid; | ||
885 | |||
886 | memset(i_buf, 0, PR_REG_ISID_ID_LEN); | ||
887 | prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], | ||
888 | PR_REG_ISID_ID_LEN); | ||
889 | |||
890 | spin_lock(&dev->dev_reservation_lock); | ||
891 | dev->dev_pr_res_holder = pr_reg; | ||
892 | spin_unlock(&dev->dev_reservation_lock); | ||
893 | |||
894 | printk(KERN_INFO "SPC-3 PR [%s] Service Action: APTPL RESERVE created" | ||
895 | " new reservation holder TYPE: %s ALL_TG_PT: %d\n", | ||
896 | TPG_TFO(tpg)->get_fabric_name(), | ||
897 | core_scsi3_pr_dump_type(pr_reg->pr_res_type), | ||
898 | (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); | ||
899 | printk(KERN_INFO "SPC-3 PR [%s] RESERVE Node: %s%s\n", | ||
900 | TPG_TFO(tpg)->get_fabric_name(), node_acl->initiatorname, | ||
901 | (prf_isid) ? &i_buf[0] : ""); | ||
902 | } | ||
903 | |||
904 | static void __core_scsi3_add_registration(struct se_device *, struct se_node_acl *, | ||
905 | struct t10_pr_registration *, int, int); | ||
906 | |||
907 | static int __core_scsi3_check_aptpl_registration( | ||
908 | struct se_device *dev, | ||
909 | struct se_portal_group *tpg, | ||
910 | struct se_lun *lun, | ||
911 | u32 target_lun, | ||
912 | struct se_node_acl *nacl, | ||
913 | struct se_dev_entry *deve) | ||
914 | { | ||
915 | struct t10_pr_registration *pr_reg, *pr_reg_tmp; | ||
916 | struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; | ||
917 | unsigned char i_port[PR_APTPL_MAX_IPORT_LEN]; | ||
918 | unsigned char t_port[PR_APTPL_MAX_TPORT_LEN]; | ||
919 | u16 tpgt; | ||
920 | |||
921 | memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN); | ||
922 | memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN); | ||
923 | /* | ||
924 | * Copy Initiator Port information from struct se_node_acl | ||
925 | */ | ||
926 | snprintf(i_port, PR_APTPL_MAX_IPORT_LEN, "%s", nacl->initiatorname); | ||
927 | snprintf(t_port, PR_APTPL_MAX_TPORT_LEN, "%s", | ||
928 | TPG_TFO(tpg)->tpg_get_wwn(tpg)); | ||
929 | tpgt = TPG_TFO(tpg)->tpg_get_tag(tpg); | ||
930 | /* | ||
931 | * Look for the matching registrations+reservation from those | ||
932 | * created from APTPL metadata. Note that multiple registrations | ||
933 | * may exist for fabrics that use ISIDs in their SCSI Initiator Port | ||
934 | * TransportIDs. | ||
935 | */ | ||
936 | spin_lock(&pr_tmpl->aptpl_reg_lock); | ||
937 | list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list, | ||
938 | pr_reg_aptpl_list) { | ||
939 | if (!(strcmp(pr_reg->pr_iport, i_port)) && | ||
940 | (pr_reg->pr_res_mapped_lun == deve->mapped_lun) && | ||
941 | !(strcmp(pr_reg->pr_tport, t_port)) && | ||
942 | (pr_reg->pr_reg_tpgt == tpgt) && | ||
943 | (pr_reg->pr_aptpl_target_lun == target_lun)) { | ||
944 | |||
945 | pr_reg->pr_reg_nacl = nacl; | ||
946 | pr_reg->pr_reg_deve = deve; | ||
947 | pr_reg->pr_reg_tg_pt_lun = lun; | ||
948 | |||
949 | list_del(&pr_reg->pr_reg_aptpl_list); | ||
950 | spin_unlock(&pr_tmpl->aptpl_reg_lock); | ||
951 | /* | ||
952 | * At this point all of the pointers in *pr_reg will | ||
953 | * be setup, so go ahead and add the registration. | ||
954 | */ | ||
955 | |||
956 | __core_scsi3_add_registration(dev, nacl, pr_reg, 0, 0); | ||
957 | /* | ||
958 | * If this registration is the reservation holder, | ||
959 | * make that happen now.. | ||
960 | */ | ||
961 | if (pr_reg->pr_res_holder) | ||
962 | core_scsi3_aptpl_reserve(dev, tpg, | ||
963 | nacl, pr_reg); | ||
964 | /* | ||
965 | * Reenable pr_aptpl_active to accept new metadata | ||
966 | * updates once the SCSI device is active again.. | ||
967 | */ | ||
968 | spin_lock(&pr_tmpl->aptpl_reg_lock); | ||
969 | pr_tmpl->pr_aptpl_active = 1; | ||
970 | } | ||
971 | } | ||
972 | spin_unlock(&pr_tmpl->aptpl_reg_lock); | ||
973 | |||
974 | return 0; | ||
975 | } | ||
976 | |||
977 | int core_scsi3_check_aptpl_registration( | ||
978 | struct se_device *dev, | ||
979 | struct se_portal_group *tpg, | ||
980 | struct se_lun *lun, | ||
981 | struct se_lun_acl *lun_acl) | ||
982 | { | ||
983 | struct se_subsystem_dev *su_dev = SU_DEV(dev); | ||
984 | struct se_node_acl *nacl = lun_acl->se_lun_nacl; | ||
985 | struct se_dev_entry *deve = &nacl->device_list[lun_acl->mapped_lun]; | ||
986 | |||
987 | if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) | ||
988 | return 0; | ||
989 | |||
990 | return __core_scsi3_check_aptpl_registration(dev, tpg, lun, | ||
991 | lun->unpacked_lun, nacl, deve); | ||
992 | } | ||
993 | |||
994 | static void __core_scsi3_dump_registration( | ||
995 | struct target_core_fabric_ops *tfo, | ||
996 | struct se_device *dev, | ||
997 | struct se_node_acl *nacl, | ||
998 | struct t10_pr_registration *pr_reg, | ||
999 | int register_type) | ||
1000 | { | ||
1001 | struct se_portal_group *se_tpg = nacl->se_tpg; | ||
1002 | char i_buf[PR_REG_ISID_ID_LEN]; | ||
1003 | int prf_isid; | ||
1004 | |||
1005 | memset(&i_buf[0], 0, PR_REG_ISID_ID_LEN); | ||
1006 | prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], | ||
1007 | PR_REG_ISID_ID_LEN); | ||
1008 | |||
1009 | printk(KERN_INFO "SPC-3 PR [%s] Service Action: REGISTER%s Initiator" | ||
1010 | " Node: %s%s\n", tfo->get_fabric_name(), (register_type == 2) ? | ||
1011 | "_AND_MOVE" : (register_type == 1) ? | ||
1012 | "_AND_IGNORE_EXISTING_KEY" : "", nacl->initiatorname, | ||
1013 | (prf_isid) ? i_buf : ""); | ||
1014 | printk(KERN_INFO "SPC-3 PR [%s] registration on Target Port: %s,0x%04x\n", | ||
1015 | tfo->get_fabric_name(), tfo->tpg_get_wwn(se_tpg), | ||
1016 | tfo->tpg_get_tag(se_tpg)); | ||
1017 | printk(KERN_INFO "SPC-3 PR [%s] for %s TCM Subsystem %s Object Target" | ||
1018 | " Port(s)\n", tfo->get_fabric_name(), | ||
1019 | (pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE", | ||
1020 | TRANSPORT(dev)->name); | ||
1021 | printk(KERN_INFO "SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:" | ||
1022 | " 0x%08x APTPL: %d\n", tfo->get_fabric_name(), | ||
1023 | pr_reg->pr_res_key, pr_reg->pr_res_generation, | ||
1024 | pr_reg->pr_reg_aptpl); | ||
1025 | } | ||
1026 | |||
1027 | /* | ||
1028 | * this function can be called with struct se_device->dev_reservation_lock | ||
1029 | * when register_move = 1 | ||
1030 | */ | ||
1031 | static void __core_scsi3_add_registration( | ||
1032 | struct se_device *dev, | ||
1033 | struct se_node_acl *nacl, | ||
1034 | struct t10_pr_registration *pr_reg, | ||
1035 | int register_type, | ||
1036 | int register_move) | ||
1037 | { | ||
1038 | struct se_subsystem_dev *su_dev = SU_DEV(dev); | ||
1039 | struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo; | ||
1040 | struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe; | ||
1041 | struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; | ||
1042 | |||
1043 | /* | ||
1044 | * Increment PRgeneration counter for struct se_device upon a successful | ||
1045 | * REGISTER, see spc4r17 section 6.3.2 READ_KEYS service action | ||
1046 | * | ||
1047 | * Also, when register_move = 1 for PROUT REGISTER_AND_MOVE service | ||
1048 | * action, the struct se_device->dev_reservation_lock will already be held, | ||
1049 | * so we do not call core_scsi3_pr_generation() which grabs the lock | ||
1050 | * for the REGISTER. | ||
1051 | */ | ||
1052 | pr_reg->pr_res_generation = (register_move) ? | ||
1053 | T10_RES(su_dev)->pr_generation++ : | ||
1054 | core_scsi3_pr_generation(dev); | ||
1055 | |||
1056 | spin_lock(&pr_tmpl->registration_lock); | ||
1057 | list_add_tail(&pr_reg->pr_reg_list, &pr_tmpl->registration_list); | ||
1058 | pr_reg->pr_reg_deve->def_pr_registered = 1; | ||
1059 | |||
1060 | __core_scsi3_dump_registration(tfo, dev, nacl, pr_reg, register_type); | ||
1061 | spin_unlock(&pr_tmpl->registration_lock); | ||
1062 | /* | ||
1063 | * Skip extra processing for ALL_TG_PT=0 or REGISTER_AND_MOVE. | ||
1064 | */ | ||
1065 | if (!(pr_reg->pr_reg_all_tg_pt) || (register_move)) | ||
1066 | return; | ||
1067 | /* | ||
1068 | * Walk pr_reg->pr_reg_atp_list and add registrations for ALL_TG_PT=1 | ||
1069 | * allocated in __core_scsi3_alloc_registration() | ||
1070 | */ | ||
1071 | list_for_each_entry_safe(pr_reg_tmp, pr_reg_tmp_safe, | ||
1072 | &pr_reg->pr_reg_atp_list, pr_reg_atp_mem_list) { | ||
1073 | list_del(&pr_reg_tmp->pr_reg_atp_mem_list); | ||
1074 | |||
1075 | pr_reg_tmp->pr_res_generation = core_scsi3_pr_generation(dev); | ||
1076 | |||
1077 | spin_lock(&pr_tmpl->registration_lock); | ||
1078 | list_add_tail(&pr_reg_tmp->pr_reg_list, | ||
1079 | &pr_tmpl->registration_list); | ||
1080 | pr_reg_tmp->pr_reg_deve->def_pr_registered = 1; | ||
1081 | |||
1082 | __core_scsi3_dump_registration(tfo, dev, | ||
1083 | pr_reg_tmp->pr_reg_nacl, pr_reg_tmp, | ||
1084 | register_type); | ||
1085 | spin_unlock(&pr_tmpl->registration_lock); | ||
1086 | /* | ||
1087 | * Drop configfs group dependency reference from | ||
1088 | * __core_scsi3_alloc_registration() | ||
1089 | */ | ||
1090 | core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve); | ||
1091 | } | ||
1092 | } | ||
1093 | |||
1094 | static int core_scsi3_alloc_registration( | ||
1095 | struct se_device *dev, | ||
1096 | struct se_node_acl *nacl, | ||
1097 | struct se_dev_entry *deve, | ||
1098 | unsigned char *isid, | ||
1099 | u64 sa_res_key, | ||
1100 | int all_tg_pt, | ||
1101 | int aptpl, | ||
1102 | int register_type, | ||
1103 | int register_move) | ||
1104 | { | ||
1105 | struct t10_pr_registration *pr_reg; | ||
1106 | |||
1107 | pr_reg = __core_scsi3_alloc_registration(dev, nacl, deve, isid, | ||
1108 | sa_res_key, all_tg_pt, aptpl); | ||
1109 | if (!(pr_reg)) | ||
1110 | return -1; | ||
1111 | |||
1112 | __core_scsi3_add_registration(dev, nacl, pr_reg, | ||
1113 | register_type, register_move); | ||
1114 | return 0; | ||
1115 | } | ||
1116 | |||
1117 | static struct t10_pr_registration *__core_scsi3_locate_pr_reg( | ||
1118 | struct se_device *dev, | ||
1119 | struct se_node_acl *nacl, | ||
1120 | unsigned char *isid) | ||
1121 | { | ||
1122 | struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; | ||
1123 | struct t10_pr_registration *pr_reg, *pr_reg_tmp; | ||
1124 | struct se_portal_group *tpg; | ||
1125 | |||
1126 | spin_lock(&pr_tmpl->registration_lock); | ||
1127 | list_for_each_entry_safe(pr_reg, pr_reg_tmp, | ||
1128 | &pr_tmpl->registration_list, pr_reg_list) { | ||
1129 | /* | ||
1130 | * First look for a matching struct se_node_acl | ||
1131 | */ | ||
1132 | if (pr_reg->pr_reg_nacl != nacl) | ||
1133 | continue; | ||
1134 | |||
1135 | tpg = pr_reg->pr_reg_nacl->se_tpg; | ||
1136 | /* | ||
1137 | * If this registration does NOT contain a fabric provided | ||
1138 | * ISID, then we have found a match. | ||
1139 | */ | ||
1140 | if (!(pr_reg->isid_present_at_reg)) { | ||
1141 | /* | ||
1142 | * Determine if this SCSI device server requires that | ||
1143 | * SCSI Intiatior TransportID w/ ISIDs is enforced | ||
1144 | * for fabric modules (iSCSI) requiring them. | ||
1145 | */ | ||
1146 | if (TPG_TFO(tpg)->sess_get_initiator_sid != NULL) { | ||
1147 | if (DEV_ATTRIB(dev)->enforce_pr_isids) | ||
1148 | continue; | ||
1149 | } | ||
1150 | atomic_inc(&pr_reg->pr_res_holders); | ||
1151 | smp_mb__after_atomic_inc(); | ||
1152 | spin_unlock(&pr_tmpl->registration_lock); | ||
1153 | return pr_reg; | ||
1154 | } | ||
1155 | /* | ||
1156 | * If the *pr_reg contains a fabric defined ISID for multi-value | ||
1157 | * SCSI Initiator Port TransportIDs, then we expect a valid | ||
1158 | * matching ISID to be provided by the local SCSI Initiator Port. | ||
1159 | */ | ||
1160 | if (!(isid)) | ||
1161 | continue; | ||
1162 | if (strcmp(isid, pr_reg->pr_reg_isid)) | ||
1163 | continue; | ||
1164 | |||
1165 | atomic_inc(&pr_reg->pr_res_holders); | ||
1166 | smp_mb__after_atomic_inc(); | ||
1167 | spin_unlock(&pr_tmpl->registration_lock); | ||
1168 | return pr_reg; | ||
1169 | } | ||
1170 | spin_unlock(&pr_tmpl->registration_lock); | ||
1171 | |||
1172 | return NULL; | ||
1173 | } | ||
1174 | |||
1175 | static struct t10_pr_registration *core_scsi3_locate_pr_reg( | ||
1176 | struct se_device *dev, | ||
1177 | struct se_node_acl *nacl, | ||
1178 | struct se_session *sess) | ||
1179 | { | ||
1180 | struct se_portal_group *tpg = nacl->se_tpg; | ||
1181 | unsigned char buf[PR_REG_ISID_LEN], *isid_ptr = NULL; | ||
1182 | |||
1183 | if (TPG_TFO(tpg)->sess_get_initiator_sid != NULL) { | ||
1184 | memset(&buf[0], 0, PR_REG_ISID_LEN); | ||
1185 | TPG_TFO(tpg)->sess_get_initiator_sid(sess, &buf[0], | ||
1186 | PR_REG_ISID_LEN); | ||
1187 | isid_ptr = &buf[0]; | ||
1188 | } | ||
1189 | |||
1190 | return __core_scsi3_locate_pr_reg(dev, nacl, isid_ptr); | ||
1191 | } | ||
1192 | |||
1193 | static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg) | ||
1194 | { | ||
1195 | atomic_dec(&pr_reg->pr_res_holders); | ||
1196 | smp_mb__after_atomic_dec(); | ||
1197 | } | ||
1198 | |||
1199 | static int core_scsi3_check_implict_release( | ||
1200 | struct se_device *dev, | ||
1201 | struct t10_pr_registration *pr_reg) | ||
1202 | { | ||
1203 | struct se_node_acl *nacl = pr_reg->pr_reg_nacl; | ||
1204 | struct t10_pr_registration *pr_res_holder; | ||
1205 | int ret = 0; | ||
1206 | |||
1207 | spin_lock(&dev->dev_reservation_lock); | ||
1208 | pr_res_holder = dev->dev_pr_res_holder; | ||
1209 | if (!(pr_res_holder)) { | ||
1210 | spin_unlock(&dev->dev_reservation_lock); | ||
1211 | return ret; | ||
1212 | } | ||
1213 | if (pr_res_holder == pr_reg) { | ||
1214 | /* | ||
1215 | * Perform an implict RELEASE if the registration that | ||
1216 | * is being released is holding the reservation. | ||
1217 | * | ||
1218 | * From spc4r17, section 5.7.11.1: | ||
1219 | * | ||
1220 | * e) If the I_T nexus is the persistent reservation holder | ||
1221 | * and the persistent reservation is not an all registrants | ||
1222 | * type, then a PERSISTENT RESERVE OUT command with REGISTER | ||
1223 | * service action or REGISTER AND IGNORE EXISTING KEY | ||
1224 | * service action with the SERVICE ACTION RESERVATION KEY | ||
1225 | * field set to zero (see 5.7.11.3). | ||
1226 | */ | ||
1227 | __core_scsi3_complete_pro_release(dev, nacl, pr_reg, 0); | ||
1228 | ret = 1; | ||
1229 | /* | ||
1230 | * For 'All Registrants' reservation types, all existing | ||
1231 | * registrations are still processed as reservation holders | ||
1232 | * in core_scsi3_pr_seq_non_holder() after the initial | ||
1233 | * reservation holder is implictly released here. | ||
1234 | */ | ||
1235 | } else if (pr_reg->pr_reg_all_tg_pt && | ||
1236 | (!strcmp(pr_res_holder->pr_reg_nacl->initiatorname, | ||
1237 | pr_reg->pr_reg_nacl->initiatorname)) && | ||
1238 | (pr_res_holder->pr_res_key == pr_reg->pr_res_key)) { | ||
1239 | printk(KERN_ERR "SPC-3 PR: Unable to perform ALL_TG_PT=1" | ||
1240 | " UNREGISTER while existing reservation with matching" | ||
1241 | " key 0x%016Lx is present from another SCSI Initiator" | ||
1242 | " Port\n", pr_reg->pr_res_key); | ||
1243 | ret = -1; | ||
1244 | } | ||
1245 | spin_unlock(&dev->dev_reservation_lock); | ||
1246 | |||
1247 | return ret; | ||
1248 | } | ||
1249 | |||
1250 | /* | ||
1251 | * Called with struct t10_reservation_template->registration_lock held. | ||
1252 | */ | ||
1253 | static void __core_scsi3_free_registration( | ||
1254 | struct se_device *dev, | ||
1255 | struct t10_pr_registration *pr_reg, | ||
1256 | struct list_head *preempt_and_abort_list, | ||
1257 | int dec_holders) | ||
1258 | { | ||
1259 | struct target_core_fabric_ops *tfo = | ||
1260 | pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo; | ||
1261 | struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; | ||
1262 | char i_buf[PR_REG_ISID_ID_LEN]; | ||
1263 | int prf_isid; | ||
1264 | |||
1265 | memset(i_buf, 0, PR_REG_ISID_ID_LEN); | ||
1266 | prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], | ||
1267 | PR_REG_ISID_ID_LEN); | ||
1268 | |||
1269 | pr_reg->pr_reg_deve->def_pr_registered = 0; | ||
1270 | pr_reg->pr_reg_deve->pr_res_key = 0; | ||
1271 | list_del(&pr_reg->pr_reg_list); | ||
1272 | /* | ||
1273 | * Caller accessing *pr_reg using core_scsi3_locate_pr_reg(), | ||
1274 | * so call core_scsi3_put_pr_reg() to decrement our reference. | ||
1275 | */ | ||
1276 | if (dec_holders) | ||
1277 | core_scsi3_put_pr_reg(pr_reg); | ||
1278 | /* | ||
1279 | * Wait until all reference from any other I_T nexuses for this | ||
1280 | * *pr_reg have been released. Because list_del() is called above, | ||
1281 | * the last core_scsi3_put_pr_reg(pr_reg) will release this reference | ||
1282 | * count back to zero, and we release *pr_reg. | ||
1283 | */ | ||
1284 | while (atomic_read(&pr_reg->pr_res_holders) != 0) { | ||
1285 | spin_unlock(&pr_tmpl->registration_lock); | ||
1286 | printk("SPC-3 PR [%s] waiting for pr_res_holders\n", | ||
1287 | tfo->get_fabric_name()); | ||
1288 | cpu_relax(); | ||
1289 | spin_lock(&pr_tmpl->registration_lock); | ||
1290 | } | ||
1291 | |||
1292 | printk(KERN_INFO "SPC-3 PR [%s] Service Action: UNREGISTER Initiator" | ||
1293 | " Node: %s%s\n", tfo->get_fabric_name(), | ||
1294 | pr_reg->pr_reg_nacl->initiatorname, | ||
1295 | (prf_isid) ? &i_buf[0] : ""); | ||
1296 | printk(KERN_INFO "SPC-3 PR [%s] for %s TCM Subsystem %s Object Target" | ||
1297 | " Port(s)\n", tfo->get_fabric_name(), | ||
1298 | (pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE", | ||
1299 | TRANSPORT(dev)->name); | ||
1300 | printk(KERN_INFO "SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:" | ||
1301 | " 0x%08x\n", tfo->get_fabric_name(), pr_reg->pr_res_key, | ||
1302 | pr_reg->pr_res_generation); | ||
1303 | |||
1304 | if (!(preempt_and_abort_list)) { | ||
1305 | pr_reg->pr_reg_deve = NULL; | ||
1306 | pr_reg->pr_reg_nacl = NULL; | ||
1307 | kfree(pr_reg->pr_aptpl_buf); | ||
1308 | kmem_cache_free(t10_pr_reg_cache, pr_reg); | ||
1309 | return; | ||
1310 | } | ||
1311 | /* | ||
1312 | * For PREEMPT_AND_ABORT, the list of *pr_reg in preempt_and_abort_list | ||
1313 | * are released once the ABORT_TASK_SET has completed.. | ||
1314 | */ | ||
1315 | list_add_tail(&pr_reg->pr_reg_abort_list, preempt_and_abort_list); | ||
1316 | } | ||
1317 | |||
1318 | void core_scsi3_free_pr_reg_from_nacl( | ||
1319 | struct se_device *dev, | ||
1320 | struct se_node_acl *nacl) | ||
1321 | { | ||
1322 | struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; | ||
1323 | struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder; | ||
1324 | /* | ||
1325 | * If the passed se_node_acl matches the reservation holder, | ||
1326 | * release the reservation. | ||
1327 | */ | ||
1328 | spin_lock(&dev->dev_reservation_lock); | ||
1329 | pr_res_holder = dev->dev_pr_res_holder; | ||
1330 | if ((pr_res_holder != NULL) && | ||
1331 | (pr_res_holder->pr_reg_nacl == nacl)) | ||
1332 | __core_scsi3_complete_pro_release(dev, nacl, pr_res_holder, 0); | ||
1333 | spin_unlock(&dev->dev_reservation_lock); | ||
1334 | /* | ||
1335 | * Release any registration associated with the struct se_node_acl. | ||
1336 | */ | ||
1337 | spin_lock(&pr_tmpl->registration_lock); | ||
1338 | list_for_each_entry_safe(pr_reg, pr_reg_tmp, | ||
1339 | &pr_tmpl->registration_list, pr_reg_list) { | ||
1340 | |||
1341 | if (pr_reg->pr_reg_nacl != nacl) | ||
1342 | continue; | ||
1343 | |||
1344 | __core_scsi3_free_registration(dev, pr_reg, NULL, 0); | ||
1345 | } | ||
1346 | spin_unlock(&pr_tmpl->registration_lock); | ||
1347 | } | ||
1348 | |||
1349 | void core_scsi3_free_all_registrations( | ||
1350 | struct se_device *dev) | ||
1351 | { | ||
1352 | struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; | ||
1353 | struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder; | ||
1354 | |||
1355 | spin_lock(&dev->dev_reservation_lock); | ||
1356 | pr_res_holder = dev->dev_pr_res_holder; | ||
1357 | if (pr_res_holder != NULL) { | ||
1358 | struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; | ||
1359 | __core_scsi3_complete_pro_release(dev, pr_res_nacl, | ||
1360 | pr_res_holder, 0); | ||
1361 | } | ||
1362 | spin_unlock(&dev->dev_reservation_lock); | ||
1363 | |||
1364 | spin_lock(&pr_tmpl->registration_lock); | ||
1365 | list_for_each_entry_safe(pr_reg, pr_reg_tmp, | ||
1366 | &pr_tmpl->registration_list, pr_reg_list) { | ||
1367 | |||
1368 | __core_scsi3_free_registration(dev, pr_reg, NULL, 0); | ||
1369 | } | ||
1370 | spin_unlock(&pr_tmpl->registration_lock); | ||
1371 | |||
1372 | spin_lock(&pr_tmpl->aptpl_reg_lock); | ||
1373 | list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list, | ||
1374 | pr_reg_aptpl_list) { | ||
1375 | list_del(&pr_reg->pr_reg_aptpl_list); | ||
1376 | kfree(pr_reg->pr_aptpl_buf); | ||
1377 | kmem_cache_free(t10_pr_reg_cache, pr_reg); | ||
1378 | } | ||
1379 | spin_unlock(&pr_tmpl->aptpl_reg_lock); | ||
1380 | } | ||
1381 | |||
1382 | static int core_scsi3_tpg_depend_item(struct se_portal_group *tpg) | ||
1383 | { | ||
1384 | return configfs_depend_item(TPG_TFO(tpg)->tf_subsys, | ||
1385 | &tpg->tpg_group.cg_item); | ||
1386 | } | ||
1387 | |||
1388 | static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg) | ||
1389 | { | ||
1390 | configfs_undepend_item(TPG_TFO(tpg)->tf_subsys, | ||
1391 | &tpg->tpg_group.cg_item); | ||
1392 | |||
1393 | atomic_dec(&tpg->tpg_pr_ref_count); | ||
1394 | smp_mb__after_atomic_dec(); | ||
1395 | } | ||
1396 | |||
1397 | static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl) | ||
1398 | { | ||
1399 | struct se_portal_group *tpg = nacl->se_tpg; | ||
1400 | |||
1401 | if (nacl->dynamic_node_acl) | ||
1402 | return 0; | ||
1403 | |||
1404 | return configfs_depend_item(TPG_TFO(tpg)->tf_subsys, | ||
1405 | &nacl->acl_group.cg_item); | ||
1406 | } | ||
1407 | |||
1408 | static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl) | ||
1409 | { | ||
1410 | struct se_portal_group *tpg = nacl->se_tpg; | ||
1411 | |||
1412 | if (nacl->dynamic_node_acl) { | ||
1413 | atomic_dec(&nacl->acl_pr_ref_count); | ||
1414 | smp_mb__after_atomic_dec(); | ||
1415 | return; | ||
1416 | } | ||
1417 | |||
1418 | configfs_undepend_item(TPG_TFO(tpg)->tf_subsys, | ||
1419 | &nacl->acl_group.cg_item); | ||
1420 | |||
1421 | atomic_dec(&nacl->acl_pr_ref_count); | ||
1422 | smp_mb__after_atomic_dec(); | ||
1423 | } | ||
1424 | |||
1425 | static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve) | ||
1426 | { | ||
1427 | struct se_lun_acl *lun_acl = se_deve->se_lun_acl; | ||
1428 | struct se_node_acl *nacl; | ||
1429 | struct se_portal_group *tpg; | ||
1430 | /* | ||
1431 | * For nacl->dynamic_node_acl=1 | ||
1432 | */ | ||
1433 | if (!(lun_acl)) | ||
1434 | return 0; | ||
1435 | |||
1436 | nacl = lun_acl->se_lun_nacl; | ||
1437 | tpg = nacl->se_tpg; | ||
1438 | |||
1439 | return configfs_depend_item(TPG_TFO(tpg)->tf_subsys, | ||
1440 | &lun_acl->se_lun_group.cg_item); | ||
1441 | } | ||
1442 | |||
1443 | static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve) | ||
1444 | { | ||
1445 | struct se_lun_acl *lun_acl = se_deve->se_lun_acl; | ||
1446 | struct se_node_acl *nacl; | ||
1447 | struct se_portal_group *tpg; | ||
1448 | /* | ||
1449 | * For nacl->dynamic_node_acl=1 | ||
1450 | */ | ||
1451 | if (!(lun_acl)) { | ||
1452 | atomic_dec(&se_deve->pr_ref_count); | ||
1453 | smp_mb__after_atomic_dec(); | ||
1454 | return; | ||
1455 | } | ||
1456 | nacl = lun_acl->se_lun_nacl; | ||
1457 | tpg = nacl->se_tpg; | ||
1458 | |||
1459 | configfs_undepend_item(TPG_TFO(tpg)->tf_subsys, | ||
1460 | &lun_acl->se_lun_group.cg_item); | ||
1461 | |||
1462 | atomic_dec(&se_deve->pr_ref_count); | ||
1463 | smp_mb__after_atomic_dec(); | ||
1464 | } | ||
1465 | |||
1466 | static int core_scsi3_decode_spec_i_port( | ||
1467 | struct se_cmd *cmd, | ||
1468 | struct se_portal_group *tpg, | ||
1469 | unsigned char *l_isid, | ||
1470 | u64 sa_res_key, | ||
1471 | int all_tg_pt, | ||
1472 | int aptpl) | ||
1473 | { | ||
1474 | struct se_device *dev = SE_DEV(cmd); | ||
1475 | struct se_port *tmp_port; | ||
1476 | struct se_portal_group *dest_tpg = NULL, *tmp_tpg; | ||
1477 | struct se_session *se_sess = SE_SESS(cmd); | ||
1478 | struct se_node_acl *dest_node_acl = NULL; | ||
1479 | struct se_dev_entry *dest_se_deve = NULL, *local_se_deve; | ||
1480 | struct t10_pr_registration *dest_pr_reg, *local_pr_reg, *pr_reg_e; | ||
1481 | struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe; | ||
1482 | struct list_head tid_dest_list; | ||
1483 | struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp; | ||
1484 | struct target_core_fabric_ops *tmp_tf_ops; | ||
1485 | unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; | ||
1486 | unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident; | ||
1487 | char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN]; | ||
1488 | u32 tpdl, tid_len = 0; | ||
1489 | int ret, dest_local_nexus, prf_isid; | ||
1490 | u32 dest_rtpi = 0; | ||
1491 | |||
1492 | memset(dest_iport, 0, 64); | ||
1493 | INIT_LIST_HEAD(&tid_dest_list); | ||
1494 | |||
1495 | local_se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; | ||
1496 | /* | ||
1497 | * Allocate a struct pr_transport_id_holder and setup the | ||
1498 | * local_node_acl and local_se_deve pointers and add to | ||
1499 | * struct list_head tid_dest_list for add registration | ||
1500 | * processing in the loop of tid_dest_list below. | ||
1501 | */ | ||
1502 | tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL); | ||
1503 | if (!(tidh_new)) { | ||
1504 | printk(KERN_ERR "Unable to allocate tidh_new\n"); | ||
1505 | return PYX_TRANSPORT_LU_COMM_FAILURE; | ||
1506 | } | ||
1507 | INIT_LIST_HEAD(&tidh_new->dest_list); | ||
1508 | tidh_new->dest_tpg = tpg; | ||
1509 | tidh_new->dest_node_acl = se_sess->se_node_acl; | ||
1510 | tidh_new->dest_se_deve = local_se_deve; | ||
1511 | |||
1512 | local_pr_reg = __core_scsi3_alloc_registration(SE_DEV(cmd), | ||
1513 | se_sess->se_node_acl, local_se_deve, l_isid, | ||
1514 | sa_res_key, all_tg_pt, aptpl); | ||
1515 | if (!(local_pr_reg)) { | ||
1516 | kfree(tidh_new); | ||
1517 | return PYX_TRANSPORT_LU_COMM_FAILURE; | ||
1518 | } | ||
1519 | tidh_new->dest_pr_reg = local_pr_reg; | ||
1520 | /* | ||
1521 | * The local I_T nexus does not hold any configfs dependances, | ||
1522 | * so we set tid_h->dest_local_nexus=1 to prevent the | ||
1523 | * configfs_undepend_item() calls in the tid_dest_list loops below. | ||
1524 | */ | ||
1525 | tidh_new->dest_local_nexus = 1; | ||
1526 | list_add_tail(&tidh_new->dest_list, &tid_dest_list); | ||
1527 | /* | ||
1528 | * For a PERSISTENT RESERVE OUT specify initiator ports payload, | ||
1529 | * first extract TransportID Parameter Data Length, and make sure | ||
1530 | * the value matches up to the SCSI expected data transfer length. | ||
1531 | */ | ||
1532 | tpdl = (buf[24] & 0xff) << 24; | ||
1533 | tpdl |= (buf[25] & 0xff) << 16; | ||
1534 | tpdl |= (buf[26] & 0xff) << 8; | ||
1535 | tpdl |= buf[27] & 0xff; | ||
1536 | |||
1537 | if ((tpdl + 28) != cmd->data_length) { | ||
1538 | printk(KERN_ERR "SPC-3 PR: Illegal tpdl: %u + 28 byte header" | ||
1539 | " does not equal CDB data_length: %u\n", tpdl, | ||
1540 | cmd->data_length); | ||
1541 | ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; | ||
1542 | goto out; | ||
1543 | } | ||
1544 | /* | ||
1545 | * Start processing the received transport IDs using the | ||
1546 | * receiving I_T Nexus portal's fabric dependent methods to | ||
1547 | * obtain the SCSI Initiator Port/Device Identifiers. | ||
1548 | */ | ||
1549 | ptr = &buf[28]; | ||
1550 | |||
1551 | while (tpdl > 0) { | ||
1552 | proto_ident = (ptr[0] & 0x0f); | ||
1553 | dest_tpg = NULL; | ||
1554 | |||
1555 | spin_lock(&dev->se_port_lock); | ||
1556 | list_for_each_entry(tmp_port, &dev->dev_sep_list, sep_list) { | ||
1557 | tmp_tpg = tmp_port->sep_tpg; | ||
1558 | if (!(tmp_tpg)) | ||
1559 | continue; | ||
1560 | tmp_tf_ops = TPG_TFO(tmp_tpg); | ||
1561 | if (!(tmp_tf_ops)) | ||
1562 | continue; | ||
1563 | if (!(tmp_tf_ops->get_fabric_proto_ident) || | ||
1564 | !(tmp_tf_ops->tpg_parse_pr_out_transport_id)) | ||
1565 | continue; | ||
1566 | /* | ||
1567 | * Look for the matching proto_ident provided by | ||
1568 | * the received TransportID | ||
1569 | */ | ||
1570 | tmp_proto_ident = tmp_tf_ops->get_fabric_proto_ident(tmp_tpg); | ||
1571 | if (tmp_proto_ident != proto_ident) | ||
1572 | continue; | ||
1573 | dest_rtpi = tmp_port->sep_rtpi; | ||
1574 | |||
1575 | i_str = tmp_tf_ops->tpg_parse_pr_out_transport_id( | ||
1576 | tmp_tpg, (const char *)ptr, &tid_len, | ||
1577 | &iport_ptr); | ||
1578 | if (!(i_str)) | ||
1579 | continue; | ||
1580 | |||
1581 | atomic_inc(&tmp_tpg->tpg_pr_ref_count); | ||
1582 | smp_mb__after_atomic_inc(); | ||
1583 | spin_unlock(&dev->se_port_lock); | ||
1584 | |||
1585 | ret = core_scsi3_tpg_depend_item(tmp_tpg); | ||
1586 | if (ret != 0) { | ||
1587 | printk(KERN_ERR " core_scsi3_tpg_depend_item()" | ||
1588 | " for tmp_tpg\n"); | ||
1589 | atomic_dec(&tmp_tpg->tpg_pr_ref_count); | ||
1590 | smp_mb__after_atomic_dec(); | ||
1591 | ret = PYX_TRANSPORT_LU_COMM_FAILURE; | ||
1592 | goto out; | ||
1593 | } | ||
1594 | /* | ||
1595 | * Locate the desination initiator ACL to be registered | ||
1596 | * from the decoded fabric module specific TransportID | ||
1597 | * at *i_str. | ||
1598 | */ | ||
1599 | spin_lock_bh(&tmp_tpg->acl_node_lock); | ||
1600 | dest_node_acl = __core_tpg_get_initiator_node_acl( | ||
1601 | tmp_tpg, i_str); | ||
1602 | if (dest_node_acl) { | ||
1603 | atomic_inc(&dest_node_acl->acl_pr_ref_count); | ||
1604 | smp_mb__after_atomic_inc(); | ||
1605 | } | ||
1606 | spin_unlock_bh(&tmp_tpg->acl_node_lock); | ||
1607 | |||
1608 | if (!(dest_node_acl)) { | ||
1609 | core_scsi3_tpg_undepend_item(tmp_tpg); | ||
1610 | spin_lock(&dev->se_port_lock); | ||
1611 | continue; | ||
1612 | } | ||
1613 | |||
1614 | ret = core_scsi3_nodeacl_depend_item(dest_node_acl); | ||
1615 | if (ret != 0) { | ||
1616 | printk(KERN_ERR "configfs_depend_item() failed" | ||
1617 | " for dest_node_acl->acl_group\n"); | ||
1618 | atomic_dec(&dest_node_acl->acl_pr_ref_count); | ||
1619 | smp_mb__after_atomic_dec(); | ||
1620 | core_scsi3_tpg_undepend_item(tmp_tpg); | ||
1621 | ret = PYX_TRANSPORT_LU_COMM_FAILURE; | ||
1622 | goto out; | ||
1623 | } | ||
1624 | |||
1625 | dest_tpg = tmp_tpg; | ||
1626 | printk(KERN_INFO "SPC-3 PR SPEC_I_PT: Located %s Node:" | ||
1627 | " %s Port RTPI: %hu\n", | ||
1628 | TPG_TFO(dest_tpg)->get_fabric_name(), | ||
1629 | dest_node_acl->initiatorname, dest_rtpi); | ||
1630 | |||
1631 | spin_lock(&dev->se_port_lock); | ||
1632 | break; | ||
1633 | } | ||
1634 | spin_unlock(&dev->se_port_lock); | ||
1635 | |||
1636 | if (!(dest_tpg)) { | ||
1637 | printk(KERN_ERR "SPC-3 PR SPEC_I_PT: Unable to locate" | ||
1638 | " dest_tpg\n"); | ||
1639 | ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; | ||
1640 | goto out; | ||
1641 | } | ||
1642 | #if 0 | ||
1643 | printk("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u" | ||
1644 | " tid_len: %d for %s + %s\n", | ||
1645 | TPG_TFO(dest_tpg)->get_fabric_name(), cmd->data_length, | ||
1646 | tpdl, tid_len, i_str, iport_ptr); | ||
1647 | #endif | ||
1648 | if (tid_len > tpdl) { | ||
1649 | printk(KERN_ERR "SPC-3 PR SPEC_I_PT: Illegal tid_len:" | ||
1650 | " %u for Transport ID: %s\n", tid_len, ptr); | ||
1651 | core_scsi3_nodeacl_undepend_item(dest_node_acl); | ||
1652 | core_scsi3_tpg_undepend_item(dest_tpg); | ||
1653 | ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; | ||
1654 | goto out; | ||
1655 | } | ||
1656 | /* | ||
1657 | * Locate the desintation struct se_dev_entry pointer for matching | ||
1658 | * RELATIVE TARGET PORT IDENTIFIER on the receiving I_T Nexus | ||
1659 | * Target Port. | ||
1660 | */ | ||
1661 | dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl, | ||
1662 | dest_rtpi); | ||
1663 | if (!(dest_se_deve)) { | ||
1664 | printk(KERN_ERR "Unable to locate %s dest_se_deve" | ||
1665 | " from destination RTPI: %hu\n", | ||
1666 | TPG_TFO(dest_tpg)->get_fabric_name(), | ||
1667 | dest_rtpi); | ||
1668 | |||
1669 | core_scsi3_nodeacl_undepend_item(dest_node_acl); | ||
1670 | core_scsi3_tpg_undepend_item(dest_tpg); | ||
1671 | ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; | ||
1672 | goto out; | ||
1673 | } | ||
1674 | |||
1675 | ret = core_scsi3_lunacl_depend_item(dest_se_deve); | ||
1676 | if (ret < 0) { | ||
1677 | printk(KERN_ERR "core_scsi3_lunacl_depend_item()" | ||
1678 | " failed\n"); | ||
1679 | atomic_dec(&dest_se_deve->pr_ref_count); | ||
1680 | smp_mb__after_atomic_dec(); | ||
1681 | core_scsi3_nodeacl_undepend_item(dest_node_acl); | ||
1682 | core_scsi3_tpg_undepend_item(dest_tpg); | ||
1683 | ret = PYX_TRANSPORT_LU_COMM_FAILURE; | ||
1684 | goto out; | ||
1685 | } | ||
1686 | #if 0 | ||
1687 | printk(KERN_INFO "SPC-3 PR SPEC_I_PT: Located %s Node: %s" | ||
1688 | " dest_se_deve mapped_lun: %u\n", | ||
1689 | TPG_TFO(dest_tpg)->get_fabric_name(), | ||
1690 | dest_node_acl->initiatorname, dest_se_deve->mapped_lun); | ||
1691 | #endif | ||
1692 | /* | ||
1693 | * Skip any TransportIDs that already have a registration for | ||
1694 | * this target port. | ||
1695 | */ | ||
1696 | pr_reg_e = __core_scsi3_locate_pr_reg(dev, dest_node_acl, | ||
1697 | iport_ptr); | ||
1698 | if (pr_reg_e) { | ||
1699 | core_scsi3_put_pr_reg(pr_reg_e); | ||
1700 | core_scsi3_lunacl_undepend_item(dest_se_deve); | ||
1701 | core_scsi3_nodeacl_undepend_item(dest_node_acl); | ||
1702 | core_scsi3_tpg_undepend_item(dest_tpg); | ||
1703 | ptr += tid_len; | ||
1704 | tpdl -= tid_len; | ||
1705 | tid_len = 0; | ||
1706 | continue; | ||
1707 | } | ||
1708 | /* | ||
1709 | * Allocate a struct pr_transport_id_holder and setup | ||
1710 | * the dest_node_acl and dest_se_deve pointers for the | ||
1711 | * loop below. | ||
1712 | */ | ||
1713 | tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), | ||
1714 | GFP_KERNEL); | ||
1715 | if (!(tidh_new)) { | ||
1716 | printk(KERN_ERR "Unable to allocate tidh_new\n"); | ||
1717 | core_scsi3_lunacl_undepend_item(dest_se_deve); | ||
1718 | core_scsi3_nodeacl_undepend_item(dest_node_acl); | ||
1719 | core_scsi3_tpg_undepend_item(dest_tpg); | ||
1720 | ret = PYX_TRANSPORT_LU_COMM_FAILURE; | ||
1721 | goto out; | ||
1722 | } | ||
1723 | INIT_LIST_HEAD(&tidh_new->dest_list); | ||
1724 | tidh_new->dest_tpg = dest_tpg; | ||
1725 | tidh_new->dest_node_acl = dest_node_acl; | ||
1726 | tidh_new->dest_se_deve = dest_se_deve; | ||
1727 | |||
1728 | /* | ||
1729 | * Allocate, but do NOT add the registration for the | ||
1730 | * TransportID referenced SCSI Initiator port. This | ||
1731 | * done because of the following from spc4r17 in section | ||
1732 | * 6.14.3 wrt SPEC_I_PT: | ||
1733 | * | ||
1734 | * "If a registration fails for any initiator port (e.g., if th | ||
1735 | * logical unit does not have enough resources available to | ||
1736 | * hold the registration information), no registrations shall be | ||
1737 | * made, and the command shall be terminated with | ||
1738 | * CHECK CONDITION status." | ||
1739 | * | ||
1740 | * That means we call __core_scsi3_alloc_registration() here, | ||
1741 | * and then call __core_scsi3_add_registration() in the | ||
1742 | * 2nd loop which will never fail. | ||
1743 | */ | ||
1744 | dest_pr_reg = __core_scsi3_alloc_registration(SE_DEV(cmd), | ||
1745 | dest_node_acl, dest_se_deve, iport_ptr, | ||
1746 | sa_res_key, all_tg_pt, aptpl); | ||
1747 | if (!(dest_pr_reg)) { | ||
1748 | core_scsi3_lunacl_undepend_item(dest_se_deve); | ||
1749 | core_scsi3_nodeacl_undepend_item(dest_node_acl); | ||
1750 | core_scsi3_tpg_undepend_item(dest_tpg); | ||
1751 | kfree(tidh_new); | ||
1752 | ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; | ||
1753 | goto out; | ||
1754 | } | ||
1755 | tidh_new->dest_pr_reg = dest_pr_reg; | ||
1756 | list_add_tail(&tidh_new->dest_list, &tid_dest_list); | ||
1757 | |||
1758 | ptr += tid_len; | ||
1759 | tpdl -= tid_len; | ||
1760 | tid_len = 0; | ||
1761 | |||
1762 | } | ||
1763 | /* | ||
1764 | * Go ahead and create a registrations from tid_dest_list for the | ||
1765 | * SPEC_I_PT provided TransportID for the *tidh referenced dest_node_acl | ||
1766 | * and dest_se_deve. | ||
1767 | * | ||
1768 | * The SA Reservation Key from the PROUT is set for the | ||
1769 | * registration, and ALL_TG_PT is also passed. ALL_TG_PT=1 | ||
1770 | * means that the TransportID Initiator port will be | ||
1771 | * registered on all of the target ports in the SCSI target device | ||
1772 | * ALL_TG_PT=0 means the registration will only be for the | ||
1773 | * SCSI target port the PROUT REGISTER with SPEC_I_PT=1 | ||
1774 | * was received. | ||
1775 | */ | ||
1776 | list_for_each_entry_safe(tidh, tidh_tmp, &tid_dest_list, dest_list) { | ||
1777 | dest_tpg = tidh->dest_tpg; | ||
1778 | dest_node_acl = tidh->dest_node_acl; | ||
1779 | dest_se_deve = tidh->dest_se_deve; | ||
1780 | dest_pr_reg = tidh->dest_pr_reg; | ||
1781 | dest_local_nexus = tidh->dest_local_nexus; | ||
1782 | |||
1783 | list_del(&tidh->dest_list); | ||
1784 | kfree(tidh); | ||
1785 | |||
1786 | memset(i_buf, 0, PR_REG_ISID_ID_LEN); | ||
1787 | prf_isid = core_pr_dump_initiator_port(dest_pr_reg, &i_buf[0], | ||
1788 | PR_REG_ISID_ID_LEN); | ||
1789 | |||
1790 | __core_scsi3_add_registration(SE_DEV(cmd), dest_node_acl, | ||
1791 | dest_pr_reg, 0, 0); | ||
1792 | |||
1793 | printk(KERN_INFO "SPC-3 PR [%s] SPEC_I_PT: Successfully" | ||
1794 | " registered Transport ID for Node: %s%s Mapped LUN:" | ||
1795 | " %u\n", TPG_TFO(dest_tpg)->get_fabric_name(), | ||
1796 | dest_node_acl->initiatorname, (prf_isid) ? | ||
1797 | &i_buf[0] : "", dest_se_deve->mapped_lun); | ||
1798 | |||
1799 | if (dest_local_nexus) | ||
1800 | continue; | ||
1801 | |||
1802 | core_scsi3_lunacl_undepend_item(dest_se_deve); | ||
1803 | core_scsi3_nodeacl_undepend_item(dest_node_acl); | ||
1804 | core_scsi3_tpg_undepend_item(dest_tpg); | ||
1805 | } | ||
1806 | |||
1807 | return 0; | ||
1808 | out: | ||
1809 | /* | ||
1810 | * For the failure case, release everything from tid_dest_list | ||
1811 | * including *dest_pr_reg and the configfs dependances.. | ||
1812 | */ | ||
1813 | list_for_each_entry_safe(tidh, tidh_tmp, &tid_dest_list, dest_list) { | ||
1814 | dest_tpg = tidh->dest_tpg; | ||
1815 | dest_node_acl = tidh->dest_node_acl; | ||
1816 | dest_se_deve = tidh->dest_se_deve; | ||
1817 | dest_pr_reg = tidh->dest_pr_reg; | ||
1818 | dest_local_nexus = tidh->dest_local_nexus; | ||
1819 | |||
1820 | list_del(&tidh->dest_list); | ||
1821 | kfree(tidh); | ||
1822 | /* | ||
1823 | * Release any extra ALL_TG_PT=1 registrations for | ||
1824 | * the SPEC_I_PT=1 case. | ||
1825 | */ | ||
1826 | list_for_each_entry_safe(pr_reg_tmp, pr_reg_tmp_safe, | ||
1827 | &dest_pr_reg->pr_reg_atp_list, | ||
1828 | pr_reg_atp_mem_list) { | ||
1829 | list_del(&pr_reg_tmp->pr_reg_atp_mem_list); | ||
1830 | core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve); | ||
1831 | kmem_cache_free(t10_pr_reg_cache, pr_reg_tmp); | ||
1832 | } | ||
1833 | |||
1834 | kfree(dest_pr_reg->pr_aptpl_buf); | ||
1835 | kmem_cache_free(t10_pr_reg_cache, dest_pr_reg); | ||
1836 | |||
1837 | if (dest_local_nexus) | ||
1838 | continue; | ||
1839 | |||
1840 | core_scsi3_lunacl_undepend_item(dest_se_deve); | ||
1841 | core_scsi3_nodeacl_undepend_item(dest_node_acl); | ||
1842 | core_scsi3_tpg_undepend_item(dest_tpg); | ||
1843 | } | ||
1844 | return ret; | ||
1845 | } | ||
1846 | |||
1847 | /* | ||
1848 | * Called with struct se_device->dev_reservation_lock held | ||
1849 | */ | ||
1850 | static int __core_scsi3_update_aptpl_buf( | ||
1851 | struct se_device *dev, | ||
1852 | unsigned char *buf, | ||
1853 | u32 pr_aptpl_buf_len, | ||
1854 | int clear_aptpl_metadata) | ||
1855 | { | ||
1856 | struct se_lun *lun; | ||
1857 | struct se_portal_group *tpg; | ||
1858 | struct se_subsystem_dev *su_dev = SU_DEV(dev); | ||
1859 | struct t10_pr_registration *pr_reg; | ||
1860 | unsigned char tmp[512], isid_buf[32]; | ||
1861 | ssize_t len = 0; | ||
1862 | int reg_count = 0; | ||
1863 | |||
1864 | memset(buf, 0, pr_aptpl_buf_len); | ||
1865 | /* | ||
1866 | * Called to clear metadata once APTPL has been deactivated. | ||
1867 | */ | ||
1868 | if (clear_aptpl_metadata) { | ||
1869 | snprintf(buf, pr_aptpl_buf_len, | ||
1870 | "No Registrations or Reservations\n"); | ||
1871 | return 0; | ||
1872 | } | ||
1873 | /* | ||
1874 | * Walk the registration list.. | ||
1875 | */ | ||
1876 | spin_lock(&T10_RES(su_dev)->registration_lock); | ||
1877 | list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list, | ||
1878 | pr_reg_list) { | ||
1879 | |||
1880 | tmp[0] = '\0'; | ||
1881 | isid_buf[0] = '\0'; | ||
1882 | tpg = pr_reg->pr_reg_nacl->se_tpg; | ||
1883 | lun = pr_reg->pr_reg_tg_pt_lun; | ||
1884 | /* | ||
1885 | * Write out any ISID value to APTPL metadata that was included | ||
1886 | * in the original registration. | ||
1887 | */ | ||
1888 | if (pr_reg->isid_present_at_reg) | ||
1889 | snprintf(isid_buf, 32, "initiator_sid=%s\n", | ||
1890 | pr_reg->pr_reg_isid); | ||
1891 | /* | ||
1892 | * Include special metadata if the pr_reg matches the | ||
1893 | * reservation holder. | ||
1894 | */ | ||
1895 | if (dev->dev_pr_res_holder == pr_reg) { | ||
1896 | snprintf(tmp, 512, "PR_REG_START: %d" | ||
1897 | "\ninitiator_fabric=%s\n" | ||
1898 | "initiator_node=%s\n%s" | ||
1899 | "sa_res_key=%llu\n" | ||
1900 | "res_holder=1\nres_type=%02x\n" | ||
1901 | "res_scope=%02x\nres_all_tg_pt=%d\n" | ||
1902 | "mapped_lun=%u\n", reg_count, | ||
1903 | TPG_TFO(tpg)->get_fabric_name(), | ||
1904 | pr_reg->pr_reg_nacl->initiatorname, isid_buf, | ||
1905 | pr_reg->pr_res_key, pr_reg->pr_res_type, | ||
1906 | pr_reg->pr_res_scope, pr_reg->pr_reg_all_tg_pt, | ||
1907 | pr_reg->pr_res_mapped_lun); | ||
1908 | } else { | ||
1909 | snprintf(tmp, 512, "PR_REG_START: %d\n" | ||
1910 | "initiator_fabric=%s\ninitiator_node=%s\n%s" | ||
1911 | "sa_res_key=%llu\nres_holder=0\n" | ||
1912 | "res_all_tg_pt=%d\nmapped_lun=%u\n", | ||
1913 | reg_count, TPG_TFO(tpg)->get_fabric_name(), | ||
1914 | pr_reg->pr_reg_nacl->initiatorname, isid_buf, | ||
1915 | pr_reg->pr_res_key, pr_reg->pr_reg_all_tg_pt, | ||
1916 | pr_reg->pr_res_mapped_lun); | ||
1917 | } | ||
1918 | |||
1919 | if ((len + strlen(tmp) > pr_aptpl_buf_len)) { | ||
1920 | printk(KERN_ERR "Unable to update renaming" | ||
1921 | " APTPL metadata\n"); | ||
1922 | spin_unlock(&T10_RES(su_dev)->registration_lock); | ||
1923 | return -1; | ||
1924 | } | ||
1925 | len += sprintf(buf+len, "%s", tmp); | ||
1926 | |||
1927 | /* | ||
1928 | * Include information about the associated SCSI target port. | ||
1929 | */ | ||
1930 | snprintf(tmp, 512, "target_fabric=%s\ntarget_node=%s\n" | ||
1931 | "tpgt=%hu\nport_rtpi=%hu\ntarget_lun=%u\nPR_REG_END:" | ||
1932 | " %d\n", TPG_TFO(tpg)->get_fabric_name(), | ||
1933 | TPG_TFO(tpg)->tpg_get_wwn(tpg), | ||
1934 | TPG_TFO(tpg)->tpg_get_tag(tpg), | ||
1935 | lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count); | ||
1936 | |||
1937 | if ((len + strlen(tmp) > pr_aptpl_buf_len)) { | ||
1938 | printk(KERN_ERR "Unable to update renaming" | ||
1939 | " APTPL metadata\n"); | ||
1940 | spin_unlock(&T10_RES(su_dev)->registration_lock); | ||
1941 | return -1; | ||
1942 | } | ||
1943 | len += sprintf(buf+len, "%s", tmp); | ||
1944 | reg_count++; | ||
1945 | } | ||
1946 | spin_unlock(&T10_RES(su_dev)->registration_lock); | ||
1947 | |||
1948 | if (!(reg_count)) | ||
1949 | len += sprintf(buf+len, "No Registrations or Reservations"); | ||
1950 | |||
1951 | return 0; | ||
1952 | } | ||
1953 | |||
1954 | static int core_scsi3_update_aptpl_buf( | ||
1955 | struct se_device *dev, | ||
1956 | unsigned char *buf, | ||
1957 | u32 pr_aptpl_buf_len, | ||
1958 | int clear_aptpl_metadata) | ||
1959 | { | ||
1960 | int ret; | ||
1961 | |||
1962 | spin_lock(&dev->dev_reservation_lock); | ||
1963 | ret = __core_scsi3_update_aptpl_buf(dev, buf, pr_aptpl_buf_len, | ||
1964 | clear_aptpl_metadata); | ||
1965 | spin_unlock(&dev->dev_reservation_lock); | ||
1966 | |||
1967 | return ret; | ||
1968 | } | ||
1969 | |||
1970 | /* | ||
1971 | * Called with struct se_device->aptpl_file_mutex held | ||
1972 | */ | ||
1973 | static int __core_scsi3_write_aptpl_to_file( | ||
1974 | struct se_device *dev, | ||
1975 | unsigned char *buf, | ||
1976 | u32 pr_aptpl_buf_len) | ||
1977 | { | ||
1978 | struct t10_wwn *wwn = &SU_DEV(dev)->t10_wwn; | ||
1979 | struct file *file; | ||
1980 | struct iovec iov[1]; | ||
1981 | mm_segment_t old_fs; | ||
1982 | int flags = O_RDWR | O_CREAT | O_TRUNC; | ||
1983 | char path[512]; | ||
1984 | int ret; | ||
1985 | |||
1986 | memset(iov, 0, sizeof(struct iovec)); | ||
1987 | memset(path, 0, 512); | ||
1988 | |||
1989 | if (strlen(&wwn->unit_serial[0]) > 512) { | ||
1990 | printk(KERN_ERR "WWN value for struct se_device does not fit" | ||
1991 | " into path buffer\n"); | ||
1992 | return -1; | ||
1993 | } | ||
1994 | |||
1995 | snprintf(path, 512, "/var/target/pr/aptpl_%s", &wwn->unit_serial[0]); | ||
1996 | file = filp_open(path, flags, 0600); | ||
1997 | if (IS_ERR(file) || !file || !file->f_dentry) { | ||
1998 | printk(KERN_ERR "filp_open(%s) for APTPL metadata" | ||
1999 | " failed\n", path); | ||
2000 | return -1; | ||
2001 | } | ||
2002 | |||
2003 | iov[0].iov_base = &buf[0]; | ||
2004 | if (!(pr_aptpl_buf_len)) | ||
2005 | iov[0].iov_len = (strlen(&buf[0]) + 1); /* Add extra for NULL */ | ||
2006 | else | ||
2007 | iov[0].iov_len = pr_aptpl_buf_len; | ||
2008 | |||
2009 | old_fs = get_fs(); | ||
2010 | set_fs(get_ds()); | ||
2011 | ret = vfs_writev(file, &iov[0], 1, &file->f_pos); | ||
2012 | set_fs(old_fs); | ||
2013 | |||
2014 | if (ret < 0) { | ||
2015 | printk("Error writing APTPL metadata file: %s\n", path); | ||
2016 | filp_close(file, NULL); | ||
2017 | return -1; | ||
2018 | } | ||
2019 | filp_close(file, NULL); | ||
2020 | |||
2021 | return 0; | ||
2022 | } | ||
2023 | |||
2024 | static int core_scsi3_update_and_write_aptpl( | ||
2025 | struct se_device *dev, | ||
2026 | unsigned char *in_buf, | ||
2027 | u32 in_pr_aptpl_buf_len) | ||
2028 | { | ||
2029 | unsigned char null_buf[64], *buf; | ||
2030 | u32 pr_aptpl_buf_len; | ||
2031 | int ret, clear_aptpl_metadata = 0; | ||
2032 | /* | ||
2033 | * Can be called with a NULL pointer from PROUT service action CLEAR | ||
2034 | */ | ||
2035 | if (!(in_buf)) { | ||
2036 | memset(null_buf, 0, 64); | ||
2037 | buf = &null_buf[0]; | ||
2038 | /* | ||
2039 | * This will clear the APTPL metadata to: | ||
2040 | * "No Registrations or Reservations" status | ||
2041 | */ | ||
2042 | pr_aptpl_buf_len = 64; | ||
2043 | clear_aptpl_metadata = 1; | ||
2044 | } else { | ||
2045 | buf = in_buf; | ||
2046 | pr_aptpl_buf_len = in_pr_aptpl_buf_len; | ||
2047 | } | ||
2048 | |||
2049 | ret = core_scsi3_update_aptpl_buf(dev, buf, pr_aptpl_buf_len, | ||
2050 | clear_aptpl_metadata); | ||
2051 | if (ret != 0) | ||
2052 | return -1; | ||
2053 | /* | ||
2054 | * __core_scsi3_write_aptpl_to_file() will call strlen() | ||
2055 | * on the passed buf to determine pr_aptpl_buf_len. | ||
2056 | */ | ||
2057 | ret = __core_scsi3_write_aptpl_to_file(dev, buf, 0); | ||
2058 | if (ret != 0) | ||
2059 | return -1; | ||
2060 | |||
2061 | return ret; | ||
2062 | } | ||
2063 | |||
2064 | static int core_scsi3_emulate_pro_register( | ||
2065 | struct se_cmd *cmd, | ||
2066 | u64 res_key, | ||
2067 | u64 sa_res_key, | ||
2068 | int aptpl, | ||
2069 | int all_tg_pt, | ||
2070 | int spec_i_pt, | ||
2071 | int ignore_key) | ||
2072 | { | ||
2073 | struct se_session *se_sess = SE_SESS(cmd); | ||
2074 | struct se_device *dev = SE_DEV(cmd); | ||
2075 | struct se_dev_entry *se_deve; | ||
2076 | struct se_lun *se_lun = SE_LUN(cmd); | ||
2077 | struct se_portal_group *se_tpg; | ||
2078 | struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_reg_tmp, *pr_reg_e; | ||
2079 | struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; | ||
2080 | /* Used for APTPL metadata w/ UNREGISTER */ | ||
2081 | unsigned char *pr_aptpl_buf = NULL; | ||
2082 | unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL; | ||
2083 | int pr_holder = 0, ret = 0, type; | ||
2084 | |||
2085 | if (!(se_sess) || !(se_lun)) { | ||
2086 | printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n"); | ||
2087 | return PYX_TRANSPORT_LU_COMM_FAILURE; | ||
2088 | } | ||
2089 | se_tpg = se_sess->se_tpg; | ||
2090 | se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; | ||
2091 | |||
2092 | if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL) { | ||
2093 | memset(&isid_buf[0], 0, PR_REG_ISID_LEN); | ||
2094 | TPG_TFO(se_tpg)->sess_get_initiator_sid(se_sess, &isid_buf[0], | ||
2095 | PR_REG_ISID_LEN); | ||
2096 | isid_ptr = &isid_buf[0]; | ||
2097 | } | ||
2098 | /* | ||
2099 | * Follow logic from spc4r17 Section 5.7.7, Register Behaviors Table 47 | ||
2100 | */ | ||
2101 | pr_reg_e = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess); | ||
2102 | if (!(pr_reg_e)) { | ||
2103 | if (res_key) { | ||
2104 | printk(KERN_WARNING "SPC-3 PR: Reservation Key non-zero" | ||
2105 | " for SA REGISTER, returning CONFLICT\n"); | ||
2106 | return PYX_TRANSPORT_RESERVATION_CONFLICT; | ||
2107 | } | ||
2108 | /* | ||
2109 | * Do nothing but return GOOD status. | ||
2110 | */ | ||
2111 | if (!(sa_res_key)) | ||
2112 | return PYX_TRANSPORT_SENT_TO_TRANSPORT; | ||
2113 | |||
2114 | if (!(spec_i_pt)) { | ||
2115 | /* | ||
2116 | * Perform the Service Action REGISTER on the Initiator | ||
2117 | * Port Endpoint that the PRO was received from on the | ||
2118 | * Logical Unit of the SCSI device server. | ||
2119 | */ | ||
2120 | ret = core_scsi3_alloc_registration(SE_DEV(cmd), | ||
2121 | se_sess->se_node_acl, se_deve, isid_ptr, | ||
2122 | sa_res_key, all_tg_pt, aptpl, | ||
2123 | ignore_key, 0); | ||
2124 | if (ret != 0) { | ||
2125 | printk(KERN_ERR "Unable to allocate" | ||
2126 | " struct t10_pr_registration\n"); | ||
2127 | return PYX_TRANSPORT_INVALID_PARAMETER_LIST; | ||
2128 | } | ||
2129 | } else { | ||
2130 | /* | ||
2131 | * Register both the Initiator port that received | ||
2132 | * PROUT SA REGISTER + SPEC_I_PT=1 and extract SCSI | ||
2133 | * TransportID from Parameter list and loop through | ||
2134 | * fabric dependent parameter list while calling | ||
2135 | * logic from of core_scsi3_alloc_registration() for | ||
2136 | * each TransportID provided SCSI Initiator Port/Device | ||
2137 | */ | ||
2138 | ret = core_scsi3_decode_spec_i_port(cmd, se_tpg, | ||
2139 | isid_ptr, sa_res_key, all_tg_pt, aptpl); | ||
2140 | if (ret != 0) | ||
2141 | return ret; | ||
2142 | } | ||
2143 | /* | ||
2144 | * Nothing left to do for the APTPL=0 case. | ||
2145 | */ | ||
2146 | if (!(aptpl)) { | ||
2147 | pr_tmpl->pr_aptpl_active = 0; | ||
2148 | core_scsi3_update_and_write_aptpl(SE_DEV(cmd), NULL, 0); | ||
2149 | printk("SPC-3 PR: Set APTPL Bit Deactivated for" | ||
2150 | " REGISTER\n"); | ||
2151 | return 0; | ||
2152 | } | ||
2153 | /* | ||
2154 | * Locate the newly allocated local I_T Nexus *pr_reg, and | ||
2155 | * update the APTPL metadata information using its | ||
2156 | * preallocated *pr_reg->pr_aptpl_buf. | ||
2157 | */ | ||
2158 | pr_reg = core_scsi3_locate_pr_reg(SE_DEV(cmd), | ||
2159 | se_sess->se_node_acl, se_sess); | ||
2160 | |||
2161 | ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd), | ||
2162 | &pr_reg->pr_aptpl_buf[0], | ||
2163 | pr_tmpl->pr_aptpl_buf_len); | ||
2164 | if (!(ret)) { | ||
2165 | pr_tmpl->pr_aptpl_active = 1; | ||
2166 | printk("SPC-3 PR: Set APTPL Bit Activated for REGISTER\n"); | ||
2167 | } | ||
2168 | |||
2169 | core_scsi3_put_pr_reg(pr_reg); | ||
2170 | return ret; | ||
2171 | } else { | ||
2172 | /* | ||
2173 | * Locate the existing *pr_reg via struct se_node_acl pointers | ||
2174 | */ | ||
2175 | pr_reg = pr_reg_e; | ||
2176 | type = pr_reg->pr_res_type; | ||
2177 | |||
2178 | if (!(ignore_key)) { | ||
2179 | if (res_key != pr_reg->pr_res_key) { | ||
2180 | printk(KERN_ERR "SPC-3 PR REGISTER: Received" | ||
2181 | " res_key: 0x%016Lx does not match" | ||
2182 | " existing SA REGISTER res_key:" | ||
2183 | " 0x%016Lx\n", res_key, | ||
2184 | pr_reg->pr_res_key); | ||
2185 | core_scsi3_put_pr_reg(pr_reg); | ||
2186 | return PYX_TRANSPORT_RESERVATION_CONFLICT; | ||
2187 | } | ||
2188 | } | ||
2189 | if (spec_i_pt) { | ||
2190 | printk(KERN_ERR "SPC-3 PR UNREGISTER: SPEC_I_PT" | ||
2191 | " set while sa_res_key=0\n"); | ||
2192 | core_scsi3_put_pr_reg(pr_reg); | ||
2193 | return PYX_TRANSPORT_INVALID_PARAMETER_LIST; | ||
2194 | } | ||
2195 | /* | ||
2196 | * An existing ALL_TG_PT=1 registration being released | ||
2197 | * must also set ALL_TG_PT=1 in the incoming PROUT. | ||
2198 | */ | ||
2199 | if (pr_reg->pr_reg_all_tg_pt && !(all_tg_pt)) { | ||
2200 | printk(KERN_ERR "SPC-3 PR UNREGISTER: ALL_TG_PT=1" | ||
2201 | " registration exists, but ALL_TG_PT=1 bit not" | ||
2202 | " present in received PROUT\n"); | ||
2203 | core_scsi3_put_pr_reg(pr_reg); | ||
2204 | return PYX_TRANSPORT_INVALID_CDB_FIELD; | ||
2205 | } | ||
2206 | /* | ||
2207 | * Allocate APTPL metadata buffer used for UNREGISTER ops | ||
2208 | */ | ||
2209 | if (aptpl) { | ||
2210 | pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len, | ||
2211 | GFP_KERNEL); | ||
2212 | if (!(pr_aptpl_buf)) { | ||
2213 | printk(KERN_ERR "Unable to allocate" | ||
2214 | " pr_aptpl_buf\n"); | ||
2215 | core_scsi3_put_pr_reg(pr_reg); | ||
2216 | return PYX_TRANSPORT_LU_COMM_FAILURE; | ||
2217 | } | ||
2218 | } | ||
2219 | /* | ||
2220 | * sa_res_key=0 Unregister Reservation Key for registered I_T | ||
2221 | * Nexus sa_res_key=1 Change Reservation Key for registered I_T | ||
2222 | * Nexus. | ||
2223 | */ | ||
2224 | if (!(sa_res_key)) { | ||
2225 | pr_holder = core_scsi3_check_implict_release( | ||
2226 | SE_DEV(cmd), pr_reg); | ||
2227 | if (pr_holder < 0) { | ||
2228 | kfree(pr_aptpl_buf); | ||
2229 | core_scsi3_put_pr_reg(pr_reg); | ||
2230 | return PYX_TRANSPORT_RESERVATION_CONFLICT; | ||
2231 | } | ||
2232 | |||
2233 | spin_lock(&pr_tmpl->registration_lock); | ||
2234 | /* | ||
2235 | * Release all ALL_TG_PT=1 for the matching SCSI Initiator Port | ||
2236 | * and matching pr_res_key. | ||
2237 | */ | ||
2238 | if (pr_reg->pr_reg_all_tg_pt) { | ||
2239 | list_for_each_entry_safe(pr_reg_p, pr_reg_tmp, | ||
2240 | &pr_tmpl->registration_list, | ||
2241 | pr_reg_list) { | ||
2242 | |||
2243 | if (!(pr_reg_p->pr_reg_all_tg_pt)) | ||
2244 | continue; | ||
2245 | |||
2246 | if (pr_reg_p->pr_res_key != res_key) | ||
2247 | continue; | ||
2248 | |||
2249 | if (pr_reg == pr_reg_p) | ||
2250 | continue; | ||
2251 | |||
2252 | if (strcmp(pr_reg->pr_reg_nacl->initiatorname, | ||
2253 | pr_reg_p->pr_reg_nacl->initiatorname)) | ||
2254 | continue; | ||
2255 | |||
2256 | __core_scsi3_free_registration(dev, | ||
2257 | pr_reg_p, NULL, 0); | ||
2258 | } | ||
2259 | } | ||
2260 | /* | ||
2261 | * Release the calling I_T Nexus registration now.. | ||
2262 | */ | ||
2263 | __core_scsi3_free_registration(SE_DEV(cmd), pr_reg, | ||
2264 | NULL, 1); | ||
2265 | /* | ||
2266 | * From spc4r17, section 5.7.11.3 Unregistering | ||
2267 | * | ||
2268 | * If the persistent reservation is a registrants only | ||
2269 | * type, the device server shall establish a unit | ||
2270 | * attention condition for the initiator port associated | ||
2271 | * with every registered I_T nexus except for the I_T | ||
2272 | * nexus on which the PERSISTENT RESERVE OUT command was | ||
2273 | * received, with the additional sense code set to | ||
2274 | * RESERVATIONS RELEASED. | ||
2275 | */ | ||
2276 | if (pr_holder && | ||
2277 | ((type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY) || | ||
2278 | (type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY))) { | ||
2279 | list_for_each_entry(pr_reg_p, | ||
2280 | &pr_tmpl->registration_list, | ||
2281 | pr_reg_list) { | ||
2282 | |||
2283 | core_scsi3_ua_allocate( | ||
2284 | pr_reg_p->pr_reg_nacl, | ||
2285 | pr_reg_p->pr_res_mapped_lun, | ||
2286 | 0x2A, | ||
2287 | ASCQ_2AH_RESERVATIONS_RELEASED); | ||
2288 | } | ||
2289 | } | ||
2290 | spin_unlock(&pr_tmpl->registration_lock); | ||
2291 | |||
2292 | if (!(aptpl)) { | ||
2293 | pr_tmpl->pr_aptpl_active = 0; | ||
2294 | core_scsi3_update_and_write_aptpl(dev, NULL, 0); | ||
2295 | printk("SPC-3 PR: Set APTPL Bit Deactivated" | ||
2296 | " for UNREGISTER\n"); | ||
2297 | return 0; | ||
2298 | } | ||
2299 | |||
2300 | ret = core_scsi3_update_and_write_aptpl(dev, | ||
2301 | &pr_aptpl_buf[0], | ||
2302 | pr_tmpl->pr_aptpl_buf_len); | ||
2303 | if (!(ret)) { | ||
2304 | pr_tmpl->pr_aptpl_active = 1; | ||
2305 | printk("SPC-3 PR: Set APTPL Bit Activated" | ||
2306 | " for UNREGISTER\n"); | ||
2307 | } | ||
2308 | |||
2309 | kfree(pr_aptpl_buf); | ||
2310 | return ret; | ||
2311 | } else { | ||
2312 | /* | ||
2313 | * Increment PRgeneration counter for struct se_device" | ||
2314 | * upon a successful REGISTER, see spc4r17 section 6.3.2 | ||
2315 | * READ_KEYS service action. | ||
2316 | */ | ||
2317 | pr_reg->pr_res_generation = core_scsi3_pr_generation( | ||
2318 | SE_DEV(cmd)); | ||
2319 | pr_reg->pr_res_key = sa_res_key; | ||
2320 | printk("SPC-3 PR [%s] REGISTER%s: Changed Reservation" | ||
2321 | " Key for %s to: 0x%016Lx PRgeneration:" | ||
2322 | " 0x%08x\n", CMD_TFO(cmd)->get_fabric_name(), | ||
2323 | (ignore_key) ? "_AND_IGNORE_EXISTING_KEY" : "", | ||
2324 | pr_reg->pr_reg_nacl->initiatorname, | ||
2325 | pr_reg->pr_res_key, pr_reg->pr_res_generation); | ||
2326 | |||
2327 | if (!(aptpl)) { | ||
2328 | pr_tmpl->pr_aptpl_active = 0; | ||
2329 | core_scsi3_update_and_write_aptpl(dev, NULL, 0); | ||
2330 | core_scsi3_put_pr_reg(pr_reg); | ||
2331 | printk("SPC-3 PR: Set APTPL Bit Deactivated" | ||
2332 | " for REGISTER\n"); | ||
2333 | return 0; | ||
2334 | } | ||
2335 | |||
2336 | ret = core_scsi3_update_and_write_aptpl(dev, | ||
2337 | &pr_aptpl_buf[0], | ||
2338 | pr_tmpl->pr_aptpl_buf_len); | ||
2339 | if (!(ret)) { | ||
2340 | pr_tmpl->pr_aptpl_active = 1; | ||
2341 | printk("SPC-3 PR: Set APTPL Bit Activated" | ||
2342 | " for REGISTER\n"); | ||
2343 | } | ||
2344 | |||
2345 | kfree(pr_aptpl_buf); | ||
2346 | core_scsi3_put_pr_reg(pr_reg); | ||
2347 | } | ||
2348 | } | ||
2349 | return 0; | ||
2350 | } | ||
2351 | |||
2352 | unsigned char *core_scsi3_pr_dump_type(int type) | ||
2353 | { | ||
2354 | switch (type) { | ||
2355 | case PR_TYPE_WRITE_EXCLUSIVE: | ||
2356 | return "Write Exclusive Access"; | ||
2357 | case PR_TYPE_EXCLUSIVE_ACCESS: | ||
2358 | return "Exclusive Access"; | ||
2359 | case PR_TYPE_WRITE_EXCLUSIVE_REGONLY: | ||
2360 | return "Write Exclusive Access, Registrants Only"; | ||
2361 | case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY: | ||
2362 | return "Exclusive Access, Registrants Only"; | ||
2363 | case PR_TYPE_WRITE_EXCLUSIVE_ALLREG: | ||
2364 | return "Write Exclusive Access, All Registrants"; | ||
2365 | case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG: | ||
2366 | return "Exclusive Access, All Registrants"; | ||
2367 | default: | ||
2368 | break; | ||
2369 | } | ||
2370 | |||
2371 | return "Unknown SPC-3 PR Type"; | ||
2372 | } | ||
2373 | |||
2374 | static int core_scsi3_pro_reserve( | ||
2375 | struct se_cmd *cmd, | ||
2376 | struct se_device *dev, | ||
2377 | int type, | ||
2378 | int scope, | ||
2379 | u64 res_key) | ||
2380 | { | ||
2381 | struct se_session *se_sess = SE_SESS(cmd); | ||
2382 | struct se_dev_entry *se_deve; | ||
2383 | struct se_lun *se_lun = SE_LUN(cmd); | ||
2384 | struct se_portal_group *se_tpg; | ||
2385 | struct t10_pr_registration *pr_reg, *pr_res_holder; | ||
2386 | struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; | ||
2387 | char i_buf[PR_REG_ISID_ID_LEN]; | ||
2388 | int ret, prf_isid; | ||
2389 | |||
2390 | memset(i_buf, 0, PR_REG_ISID_ID_LEN); | ||
2391 | |||
2392 | if (!(se_sess) || !(se_lun)) { | ||
2393 | printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n"); | ||
2394 | return PYX_TRANSPORT_LU_COMM_FAILURE; | ||
2395 | } | ||
2396 | se_tpg = se_sess->se_tpg; | ||
2397 | se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; | ||
2398 | /* | ||
2399 | * Locate the existing *pr_reg via struct se_node_acl pointers | ||
2400 | */ | ||
2401 | pr_reg = core_scsi3_locate_pr_reg(SE_DEV(cmd), se_sess->se_node_acl, | ||
2402 | se_sess); | ||
2403 | if (!(pr_reg)) { | ||
2404 | printk(KERN_ERR "SPC-3 PR: Unable to locate" | ||
2405 | " PR_REGISTERED *pr_reg for RESERVE\n"); | ||
2406 | return PYX_TRANSPORT_LU_COMM_FAILURE; | ||
2407 | } | ||
2408 | /* | ||
2409 | * From spc4r17 Section 5.7.9: Reserving: | ||
2410 | * | ||
2411 | * An application client creates a persistent reservation by issuing | ||
2412 | * a PERSISTENT RESERVE OUT command with RESERVE service action through | ||
2413 | * a registered I_T nexus with the following parameters: | ||
2414 | * a) RESERVATION KEY set to the value of the reservation key that is | ||
2415 | * registered with the logical unit for the I_T nexus; and | ||
2416 | */ | ||
2417 | if (res_key != pr_reg->pr_res_key) { | ||
2418 | printk(KERN_ERR "SPC-3 PR RESERVE: Received res_key: 0x%016Lx" | ||
2419 | " does not match existing SA REGISTER res_key:" | ||
2420 | " 0x%016Lx\n", res_key, pr_reg->pr_res_key); | ||
2421 | core_scsi3_put_pr_reg(pr_reg); | ||
2422 | return PYX_TRANSPORT_RESERVATION_CONFLICT; | ||
2423 | } | ||
2424 | /* | ||
2425 | * From spc4r17 Section 5.7.9: Reserving: | ||
2426 | * | ||
2427 | * From above: | ||
2428 | * b) TYPE field and SCOPE field set to the persistent reservation | ||
2429 | * being created. | ||
2430 | * | ||
2431 | * Only one persistent reservation is allowed at a time per logical unit | ||
2432 | * and that persistent reservation has a scope of LU_SCOPE. | ||
2433 | */ | ||
2434 | if (scope != PR_SCOPE_LU_SCOPE) { | ||
2435 | printk(KERN_ERR "SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope); | ||
2436 | core_scsi3_put_pr_reg(pr_reg); | ||
2437 | return PYX_TRANSPORT_INVALID_PARAMETER_LIST; | ||
2438 | } | ||
2439 | /* | ||
2440 | * See if we have an existing PR reservation holder pointer at | ||
2441 | * struct se_device->dev_pr_res_holder in the form struct t10_pr_registration | ||
2442 | * *pr_res_holder. | ||
2443 | */ | ||
2444 | spin_lock(&dev->dev_reservation_lock); | ||
2445 | pr_res_holder = dev->dev_pr_res_holder; | ||
2446 | if ((pr_res_holder)) { | ||
2447 | /* | ||
2448 | * From spc4r17 Section 5.7.9: Reserving: | ||
2449 | * | ||
2450 | * If the device server receives a PERSISTENT RESERVE OUT | ||
2451 | * command from an I_T nexus other than a persistent reservation | ||
2452 | * holder (see 5.7.10) that attempts to create a persistent | ||
2453 | * reservation when a persistent reservation already exists for | ||
2454 | * the logical unit, then the command shall be completed with | ||
2455 | * RESERVATION CONFLICT status. | ||
2456 | */ | ||
2457 | if (pr_res_holder != pr_reg) { | ||
2458 | struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; | ||
2459 | printk(KERN_ERR "SPC-3 PR: Attempted RESERVE from" | ||
2460 | " [%s]: %s while reservation already held by" | ||
2461 | " [%s]: %s, returning RESERVATION_CONFLICT\n", | ||
2462 | CMD_TFO(cmd)->get_fabric_name(), | ||
2463 | se_sess->se_node_acl->initiatorname, | ||
2464 | TPG_TFO(pr_res_nacl->se_tpg)->get_fabric_name(), | ||
2465 | pr_res_holder->pr_reg_nacl->initiatorname); | ||
2466 | |||
2467 | spin_unlock(&dev->dev_reservation_lock); | ||
2468 | core_scsi3_put_pr_reg(pr_reg); | ||
2469 | return PYX_TRANSPORT_RESERVATION_CONFLICT; | ||
2470 | } | ||
2471 | /* | ||
2472 | * From spc4r17 Section 5.7.9: Reserving: | ||
2473 | * | ||
2474 | * If a persistent reservation holder attempts to modify the | ||
2475 | * type or scope of an existing persistent reservation, the | ||
2476 | * command shall be completed with RESERVATION CONFLICT status. | ||
2477 | */ | ||
2478 | if ((pr_res_holder->pr_res_type != type) || | ||
2479 | (pr_res_holder->pr_res_scope != scope)) { | ||
2480 | struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; | ||
2481 | printk(KERN_ERR "SPC-3 PR: Attempted RESERVE from" | ||
2482 | " [%s]: %s trying to change TYPE and/or SCOPE," | ||
2483 | " while reservation already held by [%s]: %s," | ||
2484 | " returning RESERVATION_CONFLICT\n", | ||
2485 | CMD_TFO(cmd)->get_fabric_name(), | ||
2486 | se_sess->se_node_acl->initiatorname, | ||
2487 | TPG_TFO(pr_res_nacl->se_tpg)->get_fabric_name(), | ||
2488 | pr_res_holder->pr_reg_nacl->initiatorname); | ||
2489 | |||
2490 | spin_unlock(&dev->dev_reservation_lock); | ||
2491 | core_scsi3_put_pr_reg(pr_reg); | ||
2492 | return PYX_TRANSPORT_RESERVATION_CONFLICT; | ||
2493 | } | ||
2494 | /* | ||
2495 | * From spc4r17 Section 5.7.9: Reserving: | ||
2496 | * | ||
2497 | * If the device server receives a PERSISTENT RESERVE OUT | ||
2498 | * command with RESERVE service action where the TYPE field and | ||
2499 | * the SCOPE field contain the same values as the existing type | ||
2500 | * and scope from a persistent reservation holder, it shall not | ||
2501 | * make any change to the existing persistent reservation and | ||
2502 | * shall completethe command with GOOD status. | ||
2503 | */ | ||
2504 | spin_unlock(&dev->dev_reservation_lock); | ||
2505 | core_scsi3_put_pr_reg(pr_reg); | ||
2506 | return PYX_TRANSPORT_SENT_TO_TRANSPORT; | ||
2507 | } | ||
2508 | /* | ||
2509 | * Otherwise, our *pr_reg becomes the PR reservation holder for said | ||
2510 | * TYPE/SCOPE. Also set the received scope and type in *pr_reg. | ||
2511 | */ | ||
2512 | pr_reg->pr_res_scope = scope; | ||
2513 | pr_reg->pr_res_type = type; | ||
2514 | pr_reg->pr_res_holder = 1; | ||
2515 | dev->dev_pr_res_holder = pr_reg; | ||
2516 | prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], | ||
2517 | PR_REG_ISID_ID_LEN); | ||
2518 | |||
2519 | printk(KERN_INFO "SPC-3 PR [%s] Service Action: RESERVE created new" | ||
2520 | " reservation holder TYPE: %s ALL_TG_PT: %d\n", | ||
2521 | CMD_TFO(cmd)->get_fabric_name(), core_scsi3_pr_dump_type(type), | ||
2522 | (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); | ||
2523 | printk(KERN_INFO "SPC-3 PR [%s] RESERVE Node: %s%s\n", | ||
2524 | CMD_TFO(cmd)->get_fabric_name(), | ||
2525 | se_sess->se_node_acl->initiatorname, | ||
2526 | (prf_isid) ? &i_buf[0] : ""); | ||
2527 | spin_unlock(&dev->dev_reservation_lock); | ||
2528 | |||
2529 | if (pr_tmpl->pr_aptpl_active) { | ||
2530 | ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd), | ||
2531 | &pr_reg->pr_aptpl_buf[0], | ||
2532 | pr_tmpl->pr_aptpl_buf_len); | ||
2533 | if (!(ret)) | ||
2534 | printk(KERN_INFO "SPC-3 PR: Updated APTPL metadata" | ||
2535 | " for RESERVE\n"); | ||
2536 | } | ||
2537 | |||
2538 | core_scsi3_put_pr_reg(pr_reg); | ||
2539 | return 0; | ||
2540 | } | ||
2541 | |||
2542 | static int core_scsi3_emulate_pro_reserve( | ||
2543 | struct se_cmd *cmd, | ||
2544 | int type, | ||
2545 | int scope, | ||
2546 | u64 res_key) | ||
2547 | { | ||
2548 | struct se_device *dev = cmd->se_dev; | ||
2549 | int ret = 0; | ||
2550 | |||
2551 | switch (type) { | ||
2552 | case PR_TYPE_WRITE_EXCLUSIVE: | ||
2553 | case PR_TYPE_EXCLUSIVE_ACCESS: | ||
2554 | case PR_TYPE_WRITE_EXCLUSIVE_REGONLY: | ||
2555 | case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY: | ||
2556 | case PR_TYPE_WRITE_EXCLUSIVE_ALLREG: | ||
2557 | case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG: | ||
2558 | ret = core_scsi3_pro_reserve(cmd, dev, type, scope, res_key); | ||
2559 | break; | ||
2560 | default: | ||
2561 | printk(KERN_ERR "SPC-3 PR: Unknown Service Action RESERVE Type:" | ||
2562 | " 0x%02x\n", type); | ||
2563 | return PYX_TRANSPORT_INVALID_CDB_FIELD; | ||
2564 | } | ||
2565 | |||
2566 | return ret; | ||
2567 | } | ||
2568 | |||
2569 | /* | ||
2570 | * Called with struct se_device->dev_reservation_lock held. | ||
2571 | */ | ||
2572 | static void __core_scsi3_complete_pro_release( | ||
2573 | struct se_device *dev, | ||
2574 | struct se_node_acl *se_nacl, | ||
2575 | struct t10_pr_registration *pr_reg, | ||
2576 | int explict) | ||
2577 | { | ||
2578 | struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo; | ||
2579 | char i_buf[PR_REG_ISID_ID_LEN]; | ||
2580 | int prf_isid; | ||
2581 | |||
2582 | memset(i_buf, 0, PR_REG_ISID_ID_LEN); | ||
2583 | prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], | ||
2584 | PR_REG_ISID_ID_LEN); | ||
2585 | /* | ||
2586 | * Go ahead and release the current PR reservation holder. | ||
2587 | */ | ||
2588 | dev->dev_pr_res_holder = NULL; | ||
2589 | |||
2590 | printk(KERN_INFO "SPC-3 PR [%s] Service Action: %s RELEASE cleared" | ||
2591 | " reservation holder TYPE: %s ALL_TG_PT: %d\n", | ||
2592 | tfo->get_fabric_name(), (explict) ? "explict" : "implict", | ||
2593 | core_scsi3_pr_dump_type(pr_reg->pr_res_type), | ||
2594 | (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); | ||
2595 | printk(KERN_INFO "SPC-3 PR [%s] RELEASE Node: %s%s\n", | ||
2596 | tfo->get_fabric_name(), se_nacl->initiatorname, | ||
2597 | (prf_isid) ? &i_buf[0] : ""); | ||
2598 | /* | ||
2599 | * Clear TYPE and SCOPE for the next PROUT Service Action: RESERVE | ||
2600 | */ | ||
2601 | pr_reg->pr_res_holder = pr_reg->pr_res_type = pr_reg->pr_res_scope = 0; | ||
2602 | } | ||
2603 | |||
2604 | static int core_scsi3_emulate_pro_release( | ||
2605 | struct se_cmd *cmd, | ||
2606 | int type, | ||
2607 | int scope, | ||
2608 | u64 res_key) | ||
2609 | { | ||
2610 | struct se_device *dev = cmd->se_dev; | ||
2611 | struct se_session *se_sess = SE_SESS(cmd); | ||
2612 | struct se_lun *se_lun = SE_LUN(cmd); | ||
2613 | struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_res_holder; | ||
2614 | struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; | ||
2615 | int ret, all_reg = 0; | ||
2616 | |||
2617 | if (!(se_sess) || !(se_lun)) { | ||
2618 | printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n"); | ||
2619 | return PYX_TRANSPORT_LU_COMM_FAILURE; | ||
2620 | } | ||
2621 | /* | ||
2622 | * Locate the existing *pr_reg via struct se_node_acl pointers | ||
2623 | */ | ||
2624 | pr_reg = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess); | ||
2625 | if (!(pr_reg)) { | ||
2626 | printk(KERN_ERR "SPC-3 PR: Unable to locate" | ||
2627 | " PR_REGISTERED *pr_reg for RELEASE\n"); | ||
2628 | return PYX_TRANSPORT_LU_COMM_FAILURE; | ||
2629 | } | ||
2630 | /* | ||
2631 | * From spc4r17 Section 5.7.11.2 Releasing: | ||
2632 | * | ||
2633 | * If there is no persistent reservation or in response to a persistent | ||
2634 | * reservation release request from a registered I_T nexus that is not a | ||
2635 | * persistent reservation holder (see 5.7.10), the device server shall | ||
2636 | * do the following: | ||
2637 | * | ||
2638 | * a) Not release the persistent reservation, if any; | ||
2639 | * b) Not remove any registrations; and | ||
2640 | * c) Complete the command with GOOD status. | ||
2641 | */ | ||
2642 | spin_lock(&dev->dev_reservation_lock); | ||
2643 | pr_res_holder = dev->dev_pr_res_holder; | ||
2644 | if (!(pr_res_holder)) { | ||
2645 | /* | ||
2646 | * No persistent reservation, return GOOD status. | ||
2647 | */ | ||
2648 | spin_unlock(&dev->dev_reservation_lock); | ||
2649 | core_scsi3_put_pr_reg(pr_reg); | ||
2650 | return PYX_TRANSPORT_SENT_TO_TRANSPORT; | ||
2651 | } | ||
2652 | if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) || | ||
2653 | (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) | ||
2654 | all_reg = 1; | ||
2655 | |||
2656 | if ((all_reg == 0) && (pr_res_holder != pr_reg)) { | ||
2657 | /* | ||
2658 | * Non 'All Registrants' PR Type cases.. | ||
2659 | * Release request from a registered I_T nexus that is not a | ||
2660 | * persistent reservation holder. return GOOD status. | ||
2661 | */ | ||
2662 | spin_unlock(&dev->dev_reservation_lock); | ||
2663 | core_scsi3_put_pr_reg(pr_reg); | ||
2664 | return PYX_TRANSPORT_SENT_TO_TRANSPORT; | ||
2665 | } | ||
2666 | /* | ||
2667 | * From spc4r17 Section 5.7.11.2 Releasing: | ||
2668 | * | ||
2669 | * Only the persistent reservation holder (see 5.7.10) is allowed to | ||
2670 | * release a persistent reservation. | ||
2671 | * | ||
2672 | * An application client releases the persistent reservation by issuing | ||
2673 | * a PERSISTENT RESERVE OUT command with RELEASE service action through | ||
2674 | * an I_T nexus that is a persistent reservation holder with the | ||
2675 | * following parameters: | ||
2676 | * | ||
2677 | * a) RESERVATION KEY field set to the value of the reservation key | ||
2678 | * that is registered with the logical unit for the I_T nexus; | ||
2679 | */ | ||
2680 | if (res_key != pr_reg->pr_res_key) { | ||
2681 | printk(KERN_ERR "SPC-3 PR RELEASE: Received res_key: 0x%016Lx" | ||
2682 | " does not match existing SA REGISTER res_key:" | ||
2683 | " 0x%016Lx\n", res_key, pr_reg->pr_res_key); | ||
2684 | spin_unlock(&dev->dev_reservation_lock); | ||
2685 | core_scsi3_put_pr_reg(pr_reg); | ||
2686 | return PYX_TRANSPORT_RESERVATION_CONFLICT; | ||
2687 | } | ||
2688 | /* | ||
2689 | * From spc4r17 Section 5.7.11.2 Releasing and above: | ||
2690 | * | ||
2691 | * b) TYPE field and SCOPE field set to match the persistent | ||
2692 | * reservation being released. | ||
2693 | */ | ||
2694 | if ((pr_res_holder->pr_res_type != type) || | ||
2695 | (pr_res_holder->pr_res_scope != scope)) { | ||
2696 | struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; | ||
2697 | printk(KERN_ERR "SPC-3 PR RELEASE: Attempted to release" | ||
2698 | " reservation from [%s]: %s with different TYPE " | ||
2699 | "and/or SCOPE while reservation already held by" | ||
2700 | " [%s]: %s, returning RESERVATION_CONFLICT\n", | ||
2701 | CMD_TFO(cmd)->get_fabric_name(), | ||
2702 | se_sess->se_node_acl->initiatorname, | ||
2703 | TPG_TFO(pr_res_nacl->se_tpg)->get_fabric_name(), | ||
2704 | pr_res_holder->pr_reg_nacl->initiatorname); | ||
2705 | |||
2706 | spin_unlock(&dev->dev_reservation_lock); | ||
2707 | core_scsi3_put_pr_reg(pr_reg); | ||
2708 | return PYX_TRANSPORT_RESERVATION_CONFLICT; | ||
2709 | } | ||
2710 | /* | ||
2711 | * In response to a persistent reservation release request from the | ||
2712 | * persistent reservation holder the device server shall perform a | ||
2713 | * release by doing the following as an uninterrupted series of actions: | ||
2714 | * a) Release the persistent reservation; | ||
2715 | * b) Not remove any registration(s); | ||
2716 | * c) If the released persistent reservation is a registrants only type | ||
2717 | * or all registrants type persistent reservation, | ||
2718 | * the device server shall establish a unit attention condition for | ||
2719 | * the initiator port associated with every regis- | ||
2720 | * tered I_T nexus other than I_T nexus on which the PERSISTENT | ||
2721 | * RESERVE OUT command with RELEASE service action was received, | ||
2722 | * with the additional sense code set to RESERVATIONS RELEASED; and | ||
2723 | * d) If the persistent reservation is of any other type, the device | ||
2724 | * server shall not establish a unit attention condition. | ||
2725 | */ | ||
2726 | __core_scsi3_complete_pro_release(dev, se_sess->se_node_acl, | ||
2727 | pr_reg, 1); | ||
2728 | |||
2729 | spin_unlock(&dev->dev_reservation_lock); | ||
2730 | |||
2731 | if ((type != PR_TYPE_WRITE_EXCLUSIVE_REGONLY) && | ||
2732 | (type != PR_TYPE_EXCLUSIVE_ACCESS_REGONLY) && | ||
2733 | (type != PR_TYPE_WRITE_EXCLUSIVE_ALLREG) && | ||
2734 | (type != PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) { | ||
2735 | /* | ||
2736 | * If no UNIT ATTENTION conditions will be established for | ||
2737 | * PR_TYPE_WRITE_EXCLUSIVE or PR_TYPE_EXCLUSIVE_ACCESS | ||
2738 | * go ahead and check for APTPL=1 update+write below | ||
2739 | */ | ||
2740 | goto write_aptpl; | ||
2741 | } | ||
2742 | |||
2743 | spin_lock(&pr_tmpl->registration_lock); | ||
2744 | list_for_each_entry(pr_reg_p, &pr_tmpl->registration_list, | ||
2745 | pr_reg_list) { | ||
2746 | /* | ||
2747 | * Do not establish a UNIT ATTENTION condition | ||
2748 | * for the calling I_T Nexus | ||
2749 | */ | ||
2750 | if (pr_reg_p == pr_reg) | ||
2751 | continue; | ||
2752 | |||
2753 | core_scsi3_ua_allocate(pr_reg_p->pr_reg_nacl, | ||
2754 | pr_reg_p->pr_res_mapped_lun, | ||
2755 | 0x2A, ASCQ_2AH_RESERVATIONS_RELEASED); | ||
2756 | } | ||
2757 | spin_unlock(&pr_tmpl->registration_lock); | ||
2758 | |||
2759 | write_aptpl: | ||
2760 | if (pr_tmpl->pr_aptpl_active) { | ||
2761 | ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd), | ||
2762 | &pr_reg->pr_aptpl_buf[0], | ||
2763 | pr_tmpl->pr_aptpl_buf_len); | ||
2764 | if (!(ret)) | ||
2765 | printk("SPC-3 PR: Updated APTPL metadata for RELEASE\n"); | ||
2766 | } | ||
2767 | |||
2768 | core_scsi3_put_pr_reg(pr_reg); | ||
2769 | return 0; | ||
2770 | } | ||
2771 | |||
2772 | static int core_scsi3_emulate_pro_clear( | ||
2773 | struct se_cmd *cmd, | ||
2774 | u64 res_key) | ||
2775 | { | ||
2776 | struct se_device *dev = cmd->se_dev; | ||
2777 | struct se_node_acl *pr_reg_nacl; | ||
2778 | struct se_session *se_sess = SE_SESS(cmd); | ||
2779 | struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; | ||
2780 | struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder; | ||
2781 | u32 pr_res_mapped_lun = 0; | ||
2782 | int calling_it_nexus = 0; | ||
2783 | /* | ||
2784 | * Locate the existing *pr_reg via struct se_node_acl pointers | ||
2785 | */ | ||
2786 | pr_reg_n = core_scsi3_locate_pr_reg(SE_DEV(cmd), | ||
2787 | se_sess->se_node_acl, se_sess); | ||
2788 | if (!(pr_reg_n)) { | ||
2789 | printk(KERN_ERR "SPC-3 PR: Unable to locate" | ||
2790 | " PR_REGISTERED *pr_reg for CLEAR\n"); | ||
2791 | return PYX_TRANSPORT_LU_COMM_FAILURE; | ||
2792 | } | ||
2793 | /* | ||
2794 | * From spc4r17 section 5.7.11.6, Clearing: | ||
2795 | * | ||
2796 | * Any application client may release the persistent reservation and | ||
2797 | * remove all registrations from a device server by issuing a | ||
2798 | * PERSISTENT RESERVE OUT command with CLEAR service action through a | ||
2799 | * registered I_T nexus with the following parameter: | ||
2800 | * | ||
2801 | * a) RESERVATION KEY field set to the value of the reservation key | ||
2802 | * that is registered with the logical unit for the I_T nexus. | ||
2803 | */ | ||
2804 | if (res_key != pr_reg_n->pr_res_key) { | ||
2805 | printk(KERN_ERR "SPC-3 PR REGISTER: Received" | ||
2806 | " res_key: 0x%016Lx does not match" | ||
2807 | " existing SA REGISTER res_key:" | ||
2808 | " 0x%016Lx\n", res_key, pr_reg_n->pr_res_key); | ||
2809 | core_scsi3_put_pr_reg(pr_reg_n); | ||
2810 | return PYX_TRANSPORT_RESERVATION_CONFLICT; | ||
2811 | } | ||
2812 | /* | ||
2813 | * a) Release the persistent reservation, if any; | ||
2814 | */ | ||
2815 | spin_lock(&dev->dev_reservation_lock); | ||
2816 | pr_res_holder = dev->dev_pr_res_holder; | ||
2817 | if (pr_res_holder) { | ||
2818 | struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; | ||
2819 | __core_scsi3_complete_pro_release(dev, pr_res_nacl, | ||
2820 | pr_res_holder, 0); | ||
2821 | } | ||
2822 | spin_unlock(&dev->dev_reservation_lock); | ||
2823 | /* | ||
2824 | * b) Remove all registration(s) (see spc4r17 5.7.7); | ||
2825 | */ | ||
2826 | spin_lock(&pr_tmpl->registration_lock); | ||
2827 | list_for_each_entry_safe(pr_reg, pr_reg_tmp, | ||
2828 | &pr_tmpl->registration_list, pr_reg_list) { | ||
2829 | |||
2830 | calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0; | ||
2831 | pr_reg_nacl = pr_reg->pr_reg_nacl; | ||
2832 | pr_res_mapped_lun = pr_reg->pr_res_mapped_lun; | ||
2833 | __core_scsi3_free_registration(dev, pr_reg, NULL, | ||
2834 | calling_it_nexus); | ||
2835 | /* | ||
2836 | * e) Establish a unit attention condition for the initiator | ||
2837 | * port associated with every registered I_T nexus other | ||
2838 | * than the I_T nexus on which the PERSISTENT RESERVE OUT | ||
2839 | * command with CLEAR service action was received, with the | ||
2840 | * additional sense code set to RESERVATIONS PREEMPTED. | ||
2841 | */ | ||
2842 | if (!(calling_it_nexus)) | ||
2843 | core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun, | ||
2844 | 0x2A, ASCQ_2AH_RESERVATIONS_PREEMPTED); | ||
2845 | } | ||
2846 | spin_unlock(&pr_tmpl->registration_lock); | ||
2847 | |||
2848 | printk(KERN_INFO "SPC-3 PR [%s] Service Action: CLEAR complete\n", | ||
2849 | CMD_TFO(cmd)->get_fabric_name()); | ||
2850 | |||
2851 | if (pr_tmpl->pr_aptpl_active) { | ||
2852 | core_scsi3_update_and_write_aptpl(SE_DEV(cmd), NULL, 0); | ||
2853 | printk(KERN_INFO "SPC-3 PR: Updated APTPL metadata" | ||
2854 | " for CLEAR\n"); | ||
2855 | } | ||
2856 | |||
2857 | core_scsi3_pr_generation(dev); | ||
2858 | return 0; | ||
2859 | } | ||
2860 | |||
2861 | /* | ||
2862 | * Called with struct se_device->dev_reservation_lock held. | ||
2863 | */ | ||
2864 | static void __core_scsi3_complete_pro_preempt( | ||
2865 | struct se_device *dev, | ||
2866 | struct t10_pr_registration *pr_reg, | ||
2867 | struct list_head *preempt_and_abort_list, | ||
2868 | int type, | ||
2869 | int scope, | ||
2870 | int abort) | ||
2871 | { | ||
2872 | struct se_node_acl *nacl = pr_reg->pr_reg_nacl; | ||
2873 | struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo; | ||
2874 | char i_buf[PR_REG_ISID_ID_LEN]; | ||
2875 | int prf_isid; | ||
2876 | |||
2877 | memset(i_buf, 0, PR_REG_ISID_ID_LEN); | ||
2878 | prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], | ||
2879 | PR_REG_ISID_ID_LEN); | ||
2880 | /* | ||
2881 | * Do an implict RELEASE of the existing reservation. | ||
2882 | */ | ||
2883 | if (dev->dev_pr_res_holder) | ||
2884 | __core_scsi3_complete_pro_release(dev, nacl, | ||
2885 | dev->dev_pr_res_holder, 0); | ||
2886 | |||
2887 | dev->dev_pr_res_holder = pr_reg; | ||
2888 | pr_reg->pr_res_holder = 1; | ||
2889 | pr_reg->pr_res_type = type; | ||
2890 | pr_reg->pr_res_scope = scope; | ||
2891 | |||
2892 | printk(KERN_INFO "SPC-3 PR [%s] Service Action: PREEMPT%s created new" | ||
2893 | " reservation holder TYPE: %s ALL_TG_PT: %d\n", | ||
2894 | tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "", | ||
2895 | core_scsi3_pr_dump_type(type), | ||
2896 | (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); | ||
2897 | printk(KERN_INFO "SPC-3 PR [%s] PREEMPT%s from Node: %s%s\n", | ||
2898 | tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "", | ||
2899 | nacl->initiatorname, (prf_isid) ? &i_buf[0] : ""); | ||
2900 | /* | ||
2901 | * For PREEMPT_AND_ABORT, add the preempting reservation's | ||
2902 | * struct t10_pr_registration to the list that will be compared | ||
2903 | * against received CDBs.. | ||
2904 | */ | ||
2905 | if (preempt_and_abort_list) | ||
2906 | list_add_tail(&pr_reg->pr_reg_abort_list, | ||
2907 | preempt_and_abort_list); | ||
2908 | } | ||
2909 | |||
2910 | static void core_scsi3_release_preempt_and_abort( | ||
2911 | struct list_head *preempt_and_abort_list, | ||
2912 | struct t10_pr_registration *pr_reg_holder) | ||
2913 | { | ||
2914 | struct t10_pr_registration *pr_reg, *pr_reg_tmp; | ||
2915 | |||
2916 | list_for_each_entry_safe(pr_reg, pr_reg_tmp, preempt_and_abort_list, | ||
2917 | pr_reg_abort_list) { | ||
2918 | |||
2919 | list_del(&pr_reg->pr_reg_abort_list); | ||
2920 | if (pr_reg_holder == pr_reg) | ||
2921 | continue; | ||
2922 | if (pr_reg->pr_res_holder) { | ||
2923 | printk(KERN_WARNING "pr_reg->pr_res_holder still set\n"); | ||
2924 | continue; | ||
2925 | } | ||
2926 | |||
2927 | pr_reg->pr_reg_deve = NULL; | ||
2928 | pr_reg->pr_reg_nacl = NULL; | ||
2929 | kfree(pr_reg->pr_aptpl_buf); | ||
2930 | kmem_cache_free(t10_pr_reg_cache, pr_reg); | ||
2931 | } | ||
2932 | } | ||
2933 | |||
2934 | int core_scsi3_check_cdb_abort_and_preempt( | ||
2935 | struct list_head *preempt_and_abort_list, | ||
2936 | struct se_cmd *cmd) | ||
2937 | { | ||
2938 | struct t10_pr_registration *pr_reg, *pr_reg_tmp; | ||
2939 | |||
2940 | list_for_each_entry_safe(pr_reg, pr_reg_tmp, preempt_and_abort_list, | ||
2941 | pr_reg_abort_list) { | ||
2942 | if (pr_reg->pr_res_key == cmd->pr_res_key) | ||
2943 | return 0; | ||
2944 | } | ||
2945 | |||
2946 | return 1; | ||
2947 | } | ||
2948 | |||
2949 | static int core_scsi3_pro_preempt( | ||
2950 | struct se_cmd *cmd, | ||
2951 | int type, | ||
2952 | int scope, | ||
2953 | u64 res_key, | ||
2954 | u64 sa_res_key, | ||
2955 | int abort) | ||
2956 | { | ||
2957 | struct se_device *dev = SE_DEV(cmd); | ||
2958 | struct se_dev_entry *se_deve; | ||
2959 | struct se_node_acl *pr_reg_nacl; | ||
2960 | struct se_session *se_sess = SE_SESS(cmd); | ||
2961 | struct list_head preempt_and_abort_list; | ||
2962 | struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder; | ||
2963 | struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; | ||
2964 | u32 pr_res_mapped_lun = 0; | ||
2965 | int all_reg = 0, calling_it_nexus = 0, released_regs = 0; | ||
2966 | int prh_type = 0, prh_scope = 0, ret; | ||
2967 | |||
2968 | if (!(se_sess)) | ||
2969 | return PYX_TRANSPORT_LU_COMM_FAILURE; | ||
2970 | |||
2971 | se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; | ||
2972 | pr_reg_n = core_scsi3_locate_pr_reg(SE_DEV(cmd), se_sess->se_node_acl, | ||
2973 | se_sess); | ||
2974 | if (!(pr_reg_n)) { | ||
2975 | printk(KERN_ERR "SPC-3 PR: Unable to locate" | ||
2976 | " PR_REGISTERED *pr_reg for PREEMPT%s\n", | ||
2977 | (abort) ? "_AND_ABORT" : ""); | ||
2978 | return PYX_TRANSPORT_RESERVATION_CONFLICT; | ||
2979 | } | ||
2980 | if (pr_reg_n->pr_res_key != res_key) { | ||
2981 | core_scsi3_put_pr_reg(pr_reg_n); | ||
2982 | return PYX_TRANSPORT_RESERVATION_CONFLICT; | ||
2983 | } | ||
2984 | if (scope != PR_SCOPE_LU_SCOPE) { | ||
2985 | printk(KERN_ERR "SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope); | ||
2986 | core_scsi3_put_pr_reg(pr_reg_n); | ||
2987 | return PYX_TRANSPORT_INVALID_PARAMETER_LIST; | ||
2988 | } | ||
2989 | INIT_LIST_HEAD(&preempt_and_abort_list); | ||
2990 | |||
2991 | spin_lock(&dev->dev_reservation_lock); | ||
2992 | pr_res_holder = dev->dev_pr_res_holder; | ||
2993 | if (pr_res_holder && | ||
2994 | ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) || | ||
2995 | (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))) | ||
2996 | all_reg = 1; | ||
2997 | |||
2998 | if (!(all_reg) && !(sa_res_key)) { | ||
2999 | spin_unlock(&dev->dev_reservation_lock); | ||
3000 | core_scsi3_put_pr_reg(pr_reg_n); | ||
3001 | return PYX_TRANSPORT_INVALID_PARAMETER_LIST; | ||
3002 | } | ||
3003 | /* | ||
3004 | * From spc4r17, section 5.7.11.4.4 Removing Registrations: | ||
3005 | * | ||
3006 | * If the SERVICE ACTION RESERVATION KEY field does not identify a | ||
3007 | * persistent reservation holder or there is no persistent reservation | ||
3008 | * holder (i.e., there is no persistent reservation), then the device | ||
3009 | * server shall perform a preempt by doing the following in an | ||
3010 | * uninterrupted series of actions. (See below..) | ||
3011 | */ | ||
3012 | if (!(pr_res_holder) || (pr_res_holder->pr_res_key != sa_res_key)) { | ||
3013 | /* | ||
3014 | * No existing or SA Reservation Key matching reservations.. | ||
3015 | * | ||
3016 | * PROUT SA PREEMPT with All Registrant type reservations are | ||
3017 | * allowed to be processed without a matching SA Reservation Key | ||
3018 | */ | ||
3019 | spin_lock(&pr_tmpl->registration_lock); | ||
3020 | list_for_each_entry_safe(pr_reg, pr_reg_tmp, | ||
3021 | &pr_tmpl->registration_list, pr_reg_list) { | ||
3022 | /* | ||
3023 | * Removing of registrations in non all registrants | ||
3024 | * type reservations without a matching SA reservation | ||
3025 | * key. | ||
3026 | * | ||
3027 | * a) Remove the registrations for all I_T nexuses | ||
3028 | * specified by the SERVICE ACTION RESERVATION KEY | ||
3029 | * field; | ||
3030 | * b) Ignore the contents of the SCOPE and TYPE fields; | ||
3031 | * c) Process tasks as defined in 5.7.1; and | ||
3032 | * d) Establish a unit attention condition for the | ||
3033 | * initiator port associated with every I_T nexus | ||
3034 | * that lost its registration other than the I_T | ||
3035 | * nexus on which the PERSISTENT RESERVE OUT command | ||
3036 | * was received, with the additional sense code set | ||
3037 | * to REGISTRATIONS PREEMPTED. | ||
3038 | */ | ||
3039 | if (!(all_reg)) { | ||
3040 | if (pr_reg->pr_res_key != sa_res_key) | ||
3041 | continue; | ||
3042 | |||
3043 | calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0; | ||
3044 | pr_reg_nacl = pr_reg->pr_reg_nacl; | ||
3045 | pr_res_mapped_lun = pr_reg->pr_res_mapped_lun; | ||
3046 | __core_scsi3_free_registration(dev, pr_reg, | ||
3047 | (abort) ? &preempt_and_abort_list : | ||
3048 | NULL, calling_it_nexus); | ||
3049 | released_regs++; | ||
3050 | } else { | ||
3051 | /* | ||
3052 | * Case for any existing all registrants type | ||
3053 | * reservation, follow logic in spc4r17 section | ||
3054 | * 5.7.11.4 Preempting, Table 52 and Figure 7. | ||
3055 | * | ||
3056 | * For a ZERO SA Reservation key, release | ||
3057 | * all other registrations and do an implict | ||
3058 | * release of active persistent reservation. | ||
3059 | * | ||
3060 | * For a non-ZERO SA Reservation key, only | ||
3061 | * release the matching reservation key from | ||
3062 | * registrations. | ||
3063 | */ | ||
3064 | if ((sa_res_key) && | ||
3065 | (pr_reg->pr_res_key != sa_res_key)) | ||
3066 | continue; | ||
3067 | |||
3068 | calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0; | ||
3069 | if (calling_it_nexus) | ||
3070 | continue; | ||
3071 | |||
3072 | pr_reg_nacl = pr_reg->pr_reg_nacl; | ||
3073 | pr_res_mapped_lun = pr_reg->pr_res_mapped_lun; | ||
3074 | __core_scsi3_free_registration(dev, pr_reg, | ||
3075 | (abort) ? &preempt_and_abort_list : | ||
3076 | NULL, 0); | ||
3077 | released_regs++; | ||
3078 | } | ||
3079 | if (!(calling_it_nexus)) | ||
3080 | core_scsi3_ua_allocate(pr_reg_nacl, | ||
3081 | pr_res_mapped_lun, 0x2A, | ||
3082 | ASCQ_2AH_RESERVATIONS_PREEMPTED); | ||
3083 | } | ||
3084 | spin_unlock(&pr_tmpl->registration_lock); | ||
3085 | /* | ||
3086 | * If a PERSISTENT RESERVE OUT with a PREEMPT service action or | ||
3087 | * a PREEMPT AND ABORT service action sets the SERVICE ACTION | ||
3088 | * RESERVATION KEY field to a value that does not match any | ||
3089 | * registered reservation key, then the device server shall | ||
3090 | * complete the command with RESERVATION CONFLICT status. | ||
3091 | */ | ||
3092 | if (!(released_regs)) { | ||
3093 | spin_unlock(&dev->dev_reservation_lock); | ||
3094 | core_scsi3_put_pr_reg(pr_reg_n); | ||
3095 | return PYX_TRANSPORT_RESERVATION_CONFLICT; | ||
3096 | } | ||
3097 | /* | ||
3098 | * For an existing all registrants type reservation | ||
3099 | * with a zero SA rservation key, preempt the existing | ||
3100 | * reservation with the new PR type and scope. | ||
3101 | */ | ||
3102 | if (pr_res_holder && all_reg && !(sa_res_key)) { | ||
3103 | __core_scsi3_complete_pro_preempt(dev, pr_reg_n, | ||
3104 | (abort) ? &preempt_and_abort_list : NULL, | ||
3105 | type, scope, abort); | ||
3106 | |||
3107 | if (abort) | ||
3108 | core_scsi3_release_preempt_and_abort( | ||
3109 | &preempt_and_abort_list, pr_reg_n); | ||
3110 | } | ||
3111 | spin_unlock(&dev->dev_reservation_lock); | ||
3112 | |||
3113 | if (pr_tmpl->pr_aptpl_active) { | ||
3114 | ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd), | ||
3115 | &pr_reg_n->pr_aptpl_buf[0], | ||
3116 | pr_tmpl->pr_aptpl_buf_len); | ||
3117 | if (!(ret)) | ||
3118 | printk(KERN_INFO "SPC-3 PR: Updated APTPL" | ||
3119 | " metadata for PREEMPT%s\n", (abort) ? | ||
3120 | "_AND_ABORT" : ""); | ||
3121 | } | ||
3122 | |||
3123 | core_scsi3_put_pr_reg(pr_reg_n); | ||
3124 | core_scsi3_pr_generation(SE_DEV(cmd)); | ||
3125 | return 0; | ||
3126 | } | ||
3127 | /* | ||
3128 | * The PREEMPTing SA reservation key matches that of the | ||
3129 | * existing persistent reservation, first, we check if | ||
3130 | * we are preempting our own reservation. | ||
3131 | * From spc4r17, section 5.7.11.4.3 Preempting | ||
3132 | * persistent reservations and registration handling | ||
3133 | * | ||
3134 | * If an all registrants persistent reservation is not | ||
3135 | * present, it is not an error for the persistent | ||
3136 | * reservation holder to preempt itself (i.e., a | ||
3137 | * PERSISTENT RESERVE OUT with a PREEMPT service action | ||
3138 | * or a PREEMPT AND ABORT service action with the | ||
3139 | * SERVICE ACTION RESERVATION KEY value equal to the | ||
3140 | * persistent reservation holder's reservation key that | ||
3141 | * is received from the persistent reservation holder). | ||
3142 | * In that case, the device server shall establish the | ||
3143 | * new persistent reservation and maintain the | ||
3144 | * registration. | ||
3145 | */ | ||
3146 | prh_type = pr_res_holder->pr_res_type; | ||
3147 | prh_scope = pr_res_holder->pr_res_scope; | ||
3148 | /* | ||
3149 | * If the SERVICE ACTION RESERVATION KEY field identifies a | ||
3150 | * persistent reservation holder (see 5.7.10), the device | ||
3151 | * server shall perform a preempt by doing the following as | ||
3152 | * an uninterrupted series of actions: | ||
3153 | * | ||
3154 | * a) Release the persistent reservation for the holder | ||
3155 | * identified by the SERVICE ACTION RESERVATION KEY field; | ||
3156 | */ | ||
3157 | if (pr_reg_n != pr_res_holder) | ||
3158 | __core_scsi3_complete_pro_release(dev, | ||
3159 | pr_res_holder->pr_reg_nacl, | ||
3160 | dev->dev_pr_res_holder, 0); | ||
3161 | /* | ||
3162 | * b) Remove the registrations for all I_T nexuses identified | ||
3163 | * by the SERVICE ACTION RESERVATION KEY field, except the | ||
3164 | * I_T nexus that is being used for the PERSISTENT RESERVE | ||
3165 | * OUT command. If an all registrants persistent reservation | ||
3166 | * is present and the SERVICE ACTION RESERVATION KEY field | ||
3167 | * is set to zero, then all registrations shall be removed | ||
3168 | * except for that of the I_T nexus that is being used for | ||
3169 | * the PERSISTENT RESERVE OUT command; | ||
3170 | */ | ||
3171 | spin_lock(&pr_tmpl->registration_lock); | ||
3172 | list_for_each_entry_safe(pr_reg, pr_reg_tmp, | ||
3173 | &pr_tmpl->registration_list, pr_reg_list) { | ||
3174 | |||
3175 | calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0; | ||
3176 | if (calling_it_nexus) | ||
3177 | continue; | ||
3178 | |||
3179 | if (pr_reg->pr_res_key != sa_res_key) | ||
3180 | continue; | ||
3181 | |||
3182 | pr_reg_nacl = pr_reg->pr_reg_nacl; | ||
3183 | pr_res_mapped_lun = pr_reg->pr_res_mapped_lun; | ||
3184 | __core_scsi3_free_registration(dev, pr_reg, | ||
3185 | (abort) ? &preempt_and_abort_list : NULL, | ||
3186 | calling_it_nexus); | ||
3187 | /* | ||
3188 | * e) Establish a unit attention condition for the initiator | ||
3189 | * port associated with every I_T nexus that lost its | ||
3190 | * persistent reservation and/or registration, with the | ||
3191 | * additional sense code set to REGISTRATIONS PREEMPTED; | ||
3192 | */ | ||
3193 | core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun, 0x2A, | ||
3194 | ASCQ_2AH_RESERVATIONS_PREEMPTED); | ||
3195 | } | ||
3196 | spin_unlock(&pr_tmpl->registration_lock); | ||
3197 | /* | ||
3198 | * c) Establish a persistent reservation for the preempting | ||
3199 | * I_T nexus using the contents of the SCOPE and TYPE fields; | ||
3200 | */ | ||
3201 | __core_scsi3_complete_pro_preempt(dev, pr_reg_n, | ||
3202 | (abort) ? &preempt_and_abort_list : NULL, | ||
3203 | type, scope, abort); | ||
3204 | /* | ||
3205 | * d) Process tasks as defined in 5.7.1; | ||
3206 | * e) See above.. | ||
3207 | * f) If the type or scope has changed, then for every I_T nexus | ||
3208 | * whose reservation key was not removed, except for the I_T | ||
3209 | * nexus on which the PERSISTENT RESERVE OUT command was | ||
3210 | * received, the device server shall establish a unit | ||
3211 | * attention condition for the initiator port associated with | ||
3212 | * that I_T nexus, with the additional sense code set to | ||
3213 | * RESERVATIONS RELEASED. If the type or scope have not | ||
3214 | * changed, then no unit attention condition(s) shall be | ||
3215 | * established for this reason. | ||
3216 | */ | ||
3217 | if ((prh_type != type) || (prh_scope != scope)) { | ||
3218 | spin_lock(&pr_tmpl->registration_lock); | ||
3219 | list_for_each_entry_safe(pr_reg, pr_reg_tmp, | ||
3220 | &pr_tmpl->registration_list, pr_reg_list) { | ||
3221 | |||
3222 | calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0; | ||
3223 | if (calling_it_nexus) | ||
3224 | continue; | ||
3225 | |||
3226 | core_scsi3_ua_allocate(pr_reg->pr_reg_nacl, | ||
3227 | pr_reg->pr_res_mapped_lun, 0x2A, | ||
3228 | ASCQ_2AH_RESERVATIONS_RELEASED); | ||
3229 | } | ||
3230 | spin_unlock(&pr_tmpl->registration_lock); | ||
3231 | } | ||
3232 | spin_unlock(&dev->dev_reservation_lock); | ||
3233 | /* | ||
3234 | * Call LUN_RESET logic upon list of struct t10_pr_registration, | ||
3235 | * All received CDBs for the matching existing reservation and | ||
3236 | * registrations undergo ABORT_TASK logic. | ||
3237 | * | ||
3238 | * From there, core_scsi3_release_preempt_and_abort() will | ||
3239 | * release every registration in the list (which have already | ||
3240 | * been removed from the primary pr_reg list), except the | ||
3241 | * new persistent reservation holder, the calling Initiator Port. | ||
3242 | */ | ||
3243 | if (abort) { | ||
3244 | core_tmr_lun_reset(dev, NULL, &preempt_and_abort_list, cmd); | ||
3245 | core_scsi3_release_preempt_and_abort(&preempt_and_abort_list, | ||
3246 | pr_reg_n); | ||
3247 | } | ||
3248 | |||
3249 | if (pr_tmpl->pr_aptpl_active) { | ||
3250 | ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd), | ||
3251 | &pr_reg_n->pr_aptpl_buf[0], | ||
3252 | pr_tmpl->pr_aptpl_buf_len); | ||
3253 | if (!(ret)) | ||
3254 | printk("SPC-3 PR: Updated APTPL metadata for PREEMPT" | ||
3255 | "%s\n", (abort) ? "_AND_ABORT" : ""); | ||
3256 | } | ||
3257 | |||
3258 | core_scsi3_put_pr_reg(pr_reg_n); | ||
3259 | core_scsi3_pr_generation(SE_DEV(cmd)); | ||
3260 | return 0; | ||
3261 | } | ||
3262 | |||
3263 | static int core_scsi3_emulate_pro_preempt( | ||
3264 | struct se_cmd *cmd, | ||
3265 | int type, | ||
3266 | int scope, | ||
3267 | u64 res_key, | ||
3268 | u64 sa_res_key, | ||
3269 | int abort) | ||
3270 | { | ||
3271 | int ret = 0; | ||
3272 | |||
3273 | switch (type) { | ||
3274 | case PR_TYPE_WRITE_EXCLUSIVE: | ||
3275 | case PR_TYPE_EXCLUSIVE_ACCESS: | ||
3276 | case PR_TYPE_WRITE_EXCLUSIVE_REGONLY: | ||
3277 | case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY: | ||
3278 | case PR_TYPE_WRITE_EXCLUSIVE_ALLREG: | ||
3279 | case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG: | ||
3280 | ret = core_scsi3_pro_preempt(cmd, type, scope, | ||
3281 | res_key, sa_res_key, abort); | ||
3282 | break; | ||
3283 | default: | ||
3284 | printk(KERN_ERR "SPC-3 PR: Unknown Service Action PREEMPT%s" | ||
3285 | " Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type); | ||
3286 | return PYX_TRANSPORT_INVALID_CDB_FIELD; | ||
3287 | } | ||
3288 | |||
3289 | return ret; | ||
3290 | } | ||
3291 | |||
3292 | |||
3293 | static int core_scsi3_emulate_pro_register_and_move( | ||
3294 | struct se_cmd *cmd, | ||
3295 | u64 res_key, | ||
3296 | u64 sa_res_key, | ||
3297 | int aptpl, | ||
3298 | int unreg) | ||
3299 | { | ||
3300 | struct se_session *se_sess = SE_SESS(cmd); | ||
3301 | struct se_device *dev = SE_DEV(cmd); | ||
3302 | struct se_dev_entry *se_deve, *dest_se_deve = NULL; | ||
3303 | struct se_lun *se_lun = SE_LUN(cmd); | ||
3304 | struct se_node_acl *pr_res_nacl, *pr_reg_nacl, *dest_node_acl = NULL; | ||
3305 | struct se_port *se_port; | ||
3306 | struct se_portal_group *se_tpg, *dest_se_tpg = NULL; | ||
3307 | struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops; | ||
3308 | struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg; | ||
3309 | struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; | ||
3310 | unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; | ||
3311 | unsigned char *initiator_str; | ||
3312 | char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN]; | ||
3313 | u32 tid_len, tmp_tid_len; | ||
3314 | int new_reg = 0, type, scope, ret, matching_iname, prf_isid; | ||
3315 | unsigned short rtpi; | ||
3316 | unsigned char proto_ident; | ||
3317 | |||
3318 | if (!(se_sess) || !(se_lun)) { | ||
3319 | printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n"); | ||
3320 | return PYX_TRANSPORT_LU_COMM_FAILURE; | ||
3321 | } | ||
3322 | memset(dest_iport, 0, 64); | ||
3323 | memset(i_buf, 0, PR_REG_ISID_ID_LEN); | ||
3324 | se_tpg = se_sess->se_tpg; | ||
3325 | tf_ops = TPG_TFO(se_tpg); | ||
3326 | se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; | ||
3327 | /* | ||
3328 | * Follow logic from spc4r17 Section 5.7.8, Table 50 -- | ||
3329 | * Register behaviors for a REGISTER AND MOVE service action | ||
3330 | * | ||
3331 | * Locate the existing *pr_reg via struct se_node_acl pointers | ||
3332 | */ | ||
3333 | pr_reg = core_scsi3_locate_pr_reg(SE_DEV(cmd), se_sess->se_node_acl, | ||
3334 | se_sess); | ||
3335 | if (!(pr_reg)) { | ||
3336 | printk(KERN_ERR "SPC-3 PR: Unable to locate PR_REGISTERED" | ||
3337 | " *pr_reg for REGISTER_AND_MOVE\n"); | ||
3338 | return PYX_TRANSPORT_LU_COMM_FAILURE; | ||
3339 | } | ||
3340 | /* | ||
3341 | * The provided reservation key much match the existing reservation key | ||
3342 | * provided during this initiator's I_T nexus registration. | ||
3343 | */ | ||
3344 | if (res_key != pr_reg->pr_res_key) { | ||
3345 | printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Received" | ||
3346 | " res_key: 0x%016Lx does not match existing SA REGISTER" | ||
3347 | " res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key); | ||
3348 | core_scsi3_put_pr_reg(pr_reg); | ||
3349 | return PYX_TRANSPORT_RESERVATION_CONFLICT; | ||
3350 | } | ||
3351 | /* | ||
3352 | * The service active reservation key needs to be non zero | ||
3353 | */ | ||
3354 | if (!(sa_res_key)) { | ||
3355 | printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Received zero" | ||
3356 | " sa_res_key\n"); | ||
3357 | core_scsi3_put_pr_reg(pr_reg); | ||
3358 | return PYX_TRANSPORT_INVALID_PARAMETER_LIST; | ||
3359 | } | ||
3360 | /* | ||
3361 | * Determine the Relative Target Port Identifier where the reservation | ||
3362 | * will be moved to for the TransportID containing SCSI initiator WWN | ||
3363 | * information. | ||
3364 | */ | ||
3365 | rtpi = (buf[18] & 0xff) << 8; | ||
3366 | rtpi |= buf[19] & 0xff; | ||
3367 | tid_len = (buf[20] & 0xff) << 24; | ||
3368 | tid_len |= (buf[21] & 0xff) << 16; | ||
3369 | tid_len |= (buf[22] & 0xff) << 8; | ||
3370 | tid_len |= buf[23] & 0xff; | ||
3371 | |||
3372 | if ((tid_len + 24) != cmd->data_length) { | ||
3373 | printk(KERN_ERR "SPC-3 PR: Illegal tid_len: %u + 24 byte header" | ||
3374 | " does not equal CDB data_length: %u\n", tid_len, | ||
3375 | cmd->data_length); | ||
3376 | core_scsi3_put_pr_reg(pr_reg); | ||
3377 | return PYX_TRANSPORT_INVALID_PARAMETER_LIST; | ||
3378 | } | ||
3379 | |||
3380 | spin_lock(&dev->se_port_lock); | ||
3381 | list_for_each_entry(se_port, &dev->dev_sep_list, sep_list) { | ||
3382 | if (se_port->sep_rtpi != rtpi) | ||
3383 | continue; | ||
3384 | dest_se_tpg = se_port->sep_tpg; | ||
3385 | if (!(dest_se_tpg)) | ||
3386 | continue; | ||
3387 | dest_tf_ops = TPG_TFO(dest_se_tpg); | ||
3388 | if (!(dest_tf_ops)) | ||
3389 | continue; | ||
3390 | |||
3391 | atomic_inc(&dest_se_tpg->tpg_pr_ref_count); | ||
3392 | smp_mb__after_atomic_inc(); | ||
3393 | spin_unlock(&dev->se_port_lock); | ||
3394 | |||
3395 | ret = core_scsi3_tpg_depend_item(dest_se_tpg); | ||
3396 | if (ret != 0) { | ||
3397 | printk(KERN_ERR "core_scsi3_tpg_depend_item() failed" | ||
3398 | " for dest_se_tpg\n"); | ||
3399 | atomic_dec(&dest_se_tpg->tpg_pr_ref_count); | ||
3400 | smp_mb__after_atomic_dec(); | ||
3401 | core_scsi3_put_pr_reg(pr_reg); | ||
3402 | return PYX_TRANSPORT_LU_COMM_FAILURE; | ||
3403 | } | ||
3404 | |||
3405 | spin_lock(&dev->se_port_lock); | ||
3406 | break; | ||
3407 | } | ||
3408 | spin_unlock(&dev->se_port_lock); | ||
3409 | |||
3410 | if (!(dest_se_tpg) || (!dest_tf_ops)) { | ||
3411 | printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Unable to locate" | ||
3412 | " fabric ops from Relative Target Port Identifier:" | ||
3413 | " %hu\n", rtpi); | ||
3414 | core_scsi3_put_pr_reg(pr_reg); | ||
3415 | return PYX_TRANSPORT_INVALID_PARAMETER_LIST; | ||
3416 | } | ||
3417 | proto_ident = (buf[24] & 0x0f); | ||
3418 | #if 0 | ||
3419 | printk("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:" | ||
3420 | " 0x%02x\n", proto_ident); | ||
3421 | #endif | ||
3422 | if (proto_ident != dest_tf_ops->get_fabric_proto_ident(dest_se_tpg)) { | ||
3423 | printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Received" | ||
3424 | " proto_ident: 0x%02x does not match ident: 0x%02x" | ||
3425 | " from fabric: %s\n", proto_ident, | ||
3426 | dest_tf_ops->get_fabric_proto_ident(dest_se_tpg), | ||
3427 | dest_tf_ops->get_fabric_name()); | ||
3428 | ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; | ||
3429 | goto out; | ||
3430 | } | ||
3431 | if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) { | ||
3432 | printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Fabric does not" | ||
3433 | " containg a valid tpg_parse_pr_out_transport_id" | ||
3434 | " function pointer\n"); | ||
3435 | ret = PYX_TRANSPORT_LU_COMM_FAILURE; | ||
3436 | goto out; | ||
3437 | } | ||
3438 | initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg, | ||
3439 | (const char *)&buf[24], &tmp_tid_len, &iport_ptr); | ||
3440 | if (!(initiator_str)) { | ||
3441 | printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Unable to locate" | ||
3442 | " initiator_str from Transport ID\n"); | ||
3443 | ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; | ||
3444 | goto out; | ||
3445 | } | ||
3446 | |||
3447 | printk(KERN_INFO "SPC-3 PR [%s] Extracted initiator %s identifier: %s" | ||
3448 | " %s\n", dest_tf_ops->get_fabric_name(), (iport_ptr != NULL) ? | ||
3449 | "port" : "device", initiator_str, (iport_ptr != NULL) ? | ||
3450 | iport_ptr : ""); | ||
3451 | /* | ||
3452 | * If a PERSISTENT RESERVE OUT command with a REGISTER AND MOVE service | ||
3453 | * action specifies a TransportID that is the same as the initiator port | ||
3454 | * of the I_T nexus for the command received, then the command shall | ||
3455 | * be terminated with CHECK CONDITION status, with the sense key set to | ||
3456 | * ILLEGAL REQUEST, and the additional sense code set to INVALID FIELD | ||
3457 | * IN PARAMETER LIST. | ||
3458 | */ | ||
3459 | pr_reg_nacl = pr_reg->pr_reg_nacl; | ||
3460 | matching_iname = (!strcmp(initiator_str, | ||
3461 | pr_reg_nacl->initiatorname)) ? 1 : 0; | ||
3462 | if (!(matching_iname)) | ||
3463 | goto after_iport_check; | ||
3464 | |||
3465 | if (!(iport_ptr) || !(pr_reg->isid_present_at_reg)) { | ||
3466 | printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: TransportID: %s" | ||
3467 | " matches: %s on received I_T Nexus\n", initiator_str, | ||
3468 | pr_reg_nacl->initiatorname); | ||
3469 | ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; | ||
3470 | goto out; | ||
3471 | } | ||
3472 | if (!(strcmp(iport_ptr, pr_reg->pr_reg_isid))) { | ||
3473 | printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: TransportID: %s %s" | ||
3474 | " matches: %s %s on received I_T Nexus\n", | ||
3475 | initiator_str, iport_ptr, pr_reg_nacl->initiatorname, | ||
3476 | pr_reg->pr_reg_isid); | ||
3477 | ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; | ||
3478 | goto out; | ||
3479 | } | ||
3480 | after_iport_check: | ||
3481 | /* | ||
3482 | * Locate the destination struct se_node_acl from the received Transport ID | ||
3483 | */ | ||
3484 | spin_lock_bh(&dest_se_tpg->acl_node_lock); | ||
3485 | dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg, | ||
3486 | initiator_str); | ||
3487 | if (dest_node_acl) { | ||
3488 | atomic_inc(&dest_node_acl->acl_pr_ref_count); | ||
3489 | smp_mb__after_atomic_inc(); | ||
3490 | } | ||
3491 | spin_unlock_bh(&dest_se_tpg->acl_node_lock); | ||
3492 | |||
3493 | if (!(dest_node_acl)) { | ||
3494 | printk(KERN_ERR "Unable to locate %s dest_node_acl for" | ||
3495 | " TransportID%s\n", dest_tf_ops->get_fabric_name(), | ||
3496 | initiator_str); | ||
3497 | ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; | ||
3498 | goto out; | ||
3499 | } | ||
3500 | ret = core_scsi3_nodeacl_depend_item(dest_node_acl); | ||
3501 | if (ret != 0) { | ||
3502 | printk(KERN_ERR "core_scsi3_nodeacl_depend_item() for" | ||
3503 | " dest_node_acl\n"); | ||
3504 | atomic_dec(&dest_node_acl->acl_pr_ref_count); | ||
3505 | smp_mb__after_atomic_dec(); | ||
3506 | dest_node_acl = NULL; | ||
3507 | ret = PYX_TRANSPORT_LU_COMM_FAILURE; | ||
3508 | goto out; | ||
3509 | } | ||
3510 | #if 0 | ||
3511 | printk(KERN_INFO "SPC-3 PR REGISTER_AND_MOVE: Found %s dest_node_acl:" | ||
3512 | " %s from TransportID\n", dest_tf_ops->get_fabric_name(), | ||
3513 | dest_node_acl->initiatorname); | ||
3514 | #endif | ||
3515 | /* | ||
3516 | * Locate the struct se_dev_entry pointer for the matching RELATIVE TARGET | ||
3517 | * PORT IDENTIFIER. | ||
3518 | */ | ||
3519 | dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl, rtpi); | ||
3520 | if (!(dest_se_deve)) { | ||
3521 | printk(KERN_ERR "Unable to locate %s dest_se_deve from RTPI:" | ||
3522 | " %hu\n", dest_tf_ops->get_fabric_name(), rtpi); | ||
3523 | ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; | ||
3524 | goto out; | ||
3525 | } | ||
3526 | |||
3527 | ret = core_scsi3_lunacl_depend_item(dest_se_deve); | ||
3528 | if (ret < 0) { | ||
3529 | printk(KERN_ERR "core_scsi3_lunacl_depend_item() failed\n"); | ||
3530 | atomic_dec(&dest_se_deve->pr_ref_count); | ||
3531 | smp_mb__after_atomic_dec(); | ||
3532 | dest_se_deve = NULL; | ||
3533 | ret = PYX_TRANSPORT_LU_COMM_FAILURE; | ||
3534 | goto out; | ||
3535 | } | ||
3536 | #if 0 | ||
3537 | printk(KERN_INFO "SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN" | ||
3538 | " ACL for dest_se_deve->mapped_lun: %u\n", | ||
3539 | dest_tf_ops->get_fabric_name(), dest_node_acl->initiatorname, | ||
3540 | dest_se_deve->mapped_lun); | ||
3541 | #endif | ||
3542 | /* | ||
3543 | * A persistent reservation needs to already existing in order to | ||
3544 | * successfully complete the REGISTER_AND_MOVE service action.. | ||
3545 | */ | ||
3546 | spin_lock(&dev->dev_reservation_lock); | ||
3547 | pr_res_holder = dev->dev_pr_res_holder; | ||
3548 | if (!(pr_res_holder)) { | ||
3549 | printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: No reservation" | ||
3550 | " currently held\n"); | ||
3551 | spin_unlock(&dev->dev_reservation_lock); | ||
3552 | ret = PYX_TRANSPORT_INVALID_CDB_FIELD; | ||
3553 | goto out; | ||
3554 | } | ||
3555 | /* | ||
3556 | * The received on I_T Nexus must be the reservation holder. | ||
3557 | * | ||
3558 | * From spc4r17 section 5.7.8 Table 50 -- | ||
3559 | * Register behaviors for a REGISTER AND MOVE service action | ||
3560 | */ | ||
3561 | if (pr_res_holder != pr_reg) { | ||
3562 | printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Calling I_T" | ||
3563 | " Nexus is not reservation holder\n"); | ||
3564 | spin_unlock(&dev->dev_reservation_lock); | ||
3565 | ret = PYX_TRANSPORT_RESERVATION_CONFLICT; | ||
3566 | goto out; | ||
3567 | } | ||
3568 | /* | ||
3569 | * From spc4r17 section 5.7.8: registering and moving reservation | ||
3570 | * | ||
3571 | * If a PERSISTENT RESERVE OUT command with a REGISTER AND MOVE service | ||
3572 | * action is received and the established persistent reservation is a | ||
3573 | * Write Exclusive - All Registrants type or Exclusive Access - | ||
3574 | * All Registrants type reservation, then the command shall be completed | ||
3575 | * with RESERVATION CONFLICT status. | ||
3576 | */ | ||
3577 | if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) || | ||
3578 | (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) { | ||
3579 | printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Unable to move" | ||
3580 | " reservation for type: %s\n", | ||
3581 | core_scsi3_pr_dump_type(pr_res_holder->pr_res_type)); | ||
3582 | spin_unlock(&dev->dev_reservation_lock); | ||
3583 | ret = PYX_TRANSPORT_RESERVATION_CONFLICT; | ||
3584 | goto out; | ||
3585 | } | ||
3586 | pr_res_nacl = pr_res_holder->pr_reg_nacl; | ||
3587 | /* | ||
3588 | * b) Ignore the contents of the (received) SCOPE and TYPE fields; | ||
3589 | */ | ||
3590 | type = pr_res_holder->pr_res_type; | ||
3591 | scope = pr_res_holder->pr_res_type; | ||
3592 | /* | ||
3593 | * c) Associate the reservation key specified in the SERVICE ACTION | ||
3594 | * RESERVATION KEY field with the I_T nexus specified as the | ||
3595 | * destination of the register and move, where: | ||
3596 | * A) The I_T nexus is specified by the TransportID and the | ||
3597 | * RELATIVE TARGET PORT IDENTIFIER field (see 6.14.4); and | ||
3598 | * B) Regardless of the TransportID format used, the association for | ||
3599 | * the initiator port is based on either the initiator port name | ||
3600 | * (see 3.1.71) on SCSI transport protocols where port names are | ||
3601 | * required or the initiator port identifier (see 3.1.70) on SCSI | ||
3602 | * transport protocols where port names are not required; | ||
3603 | * d) Register the reservation key specified in the SERVICE ACTION | ||
3604 | * RESERVATION KEY field; | ||
3605 | * e) Retain the reservation key specified in the SERVICE ACTION | ||
3606 | * RESERVATION KEY field and associated information; | ||
3607 | * | ||
3608 | * Also, It is not an error for a REGISTER AND MOVE service action to | ||
3609 | * register an I_T nexus that is already registered with the same | ||
3610 | * reservation key or a different reservation key. | ||
3611 | */ | ||
3612 | dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl, | ||
3613 | iport_ptr); | ||
3614 | if (!(dest_pr_reg)) { | ||
3615 | ret = core_scsi3_alloc_registration(SE_DEV(cmd), | ||
3616 | dest_node_acl, dest_se_deve, iport_ptr, | ||
3617 | sa_res_key, 0, aptpl, 2, 1); | ||
3618 | if (ret != 0) { | ||
3619 | spin_unlock(&dev->dev_reservation_lock); | ||
3620 | ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; | ||
3621 | goto out; | ||
3622 | } | ||
3623 | dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl, | ||
3624 | iport_ptr); | ||
3625 | new_reg = 1; | ||
3626 | } | ||
3627 | /* | ||
3628 | * f) Release the persistent reservation for the persistent reservation | ||
3629 | * holder (i.e., the I_T nexus on which the | ||
3630 | */ | ||
3631 | __core_scsi3_complete_pro_release(dev, pr_res_nacl, | ||
3632 | dev->dev_pr_res_holder, 0); | ||
3633 | /* | ||
3634 | * g) Move the persistent reservation to the specified I_T nexus using | ||
3635 | * the same scope and type as the persistent reservation released in | ||
3636 | * item f); and | ||
3637 | */ | ||
3638 | dev->dev_pr_res_holder = dest_pr_reg; | ||
3639 | dest_pr_reg->pr_res_holder = 1; | ||
3640 | dest_pr_reg->pr_res_type = type; | ||
3641 | pr_reg->pr_res_scope = scope; | ||
3642 | prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], | ||
3643 | PR_REG_ISID_ID_LEN); | ||
3644 | /* | ||
3645 | * Increment PRGeneration for existing registrations.. | ||
3646 | */ | ||
3647 | if (!(new_reg)) | ||
3648 | dest_pr_reg->pr_res_generation = pr_tmpl->pr_generation++; | ||
3649 | spin_unlock(&dev->dev_reservation_lock); | ||
3650 | |||
3651 | printk(KERN_INFO "SPC-3 PR [%s] Service Action: REGISTER_AND_MOVE" | ||
3652 | " created new reservation holder TYPE: %s on object RTPI:" | ||
3653 | " %hu PRGeneration: 0x%08x\n", dest_tf_ops->get_fabric_name(), | ||
3654 | core_scsi3_pr_dump_type(type), rtpi, | ||
3655 | dest_pr_reg->pr_res_generation); | ||
3656 | printk(KERN_INFO "SPC-3 PR Successfully moved reservation from" | ||
3657 | " %s Fabric Node: %s%s -> %s Fabric Node: %s %s\n", | ||
3658 | tf_ops->get_fabric_name(), pr_reg_nacl->initiatorname, | ||
3659 | (prf_isid) ? &i_buf[0] : "", dest_tf_ops->get_fabric_name(), | ||
3660 | dest_node_acl->initiatorname, (iport_ptr != NULL) ? | ||
3661 | iport_ptr : ""); | ||
3662 | /* | ||
3663 | * It is now safe to release configfs group dependencies for destination | ||
3664 | * of Transport ID Initiator Device/Port Identifier | ||
3665 | */ | ||
3666 | core_scsi3_lunacl_undepend_item(dest_se_deve); | ||
3667 | core_scsi3_nodeacl_undepend_item(dest_node_acl); | ||
3668 | core_scsi3_tpg_undepend_item(dest_se_tpg); | ||
3669 | /* | ||
3670 | * h) If the UNREG bit is set to one, unregister (see 5.7.11.3) the I_T | ||
3671 | * nexus on which PERSISTENT RESERVE OUT command was received. | ||
3672 | */ | ||
3673 | if (unreg) { | ||
3674 | spin_lock(&pr_tmpl->registration_lock); | ||
3675 | __core_scsi3_free_registration(dev, pr_reg, NULL, 1); | ||
3676 | spin_unlock(&pr_tmpl->registration_lock); | ||
3677 | } else | ||
3678 | core_scsi3_put_pr_reg(pr_reg); | ||
3679 | |||
3680 | /* | ||
3681 | * Clear the APTPL metadata if APTPL has been disabled, otherwise | ||
3682 | * write out the updated metadata to struct file for this SCSI device. | ||
3683 | */ | ||
3684 | if (!(aptpl)) { | ||
3685 | pr_tmpl->pr_aptpl_active = 0; | ||
3686 | core_scsi3_update_and_write_aptpl(SE_DEV(cmd), NULL, 0); | ||
3687 | printk("SPC-3 PR: Set APTPL Bit Deactivated for" | ||
3688 | " REGISTER_AND_MOVE\n"); | ||
3689 | } else { | ||
3690 | pr_tmpl->pr_aptpl_active = 1; | ||
3691 | ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd), | ||
3692 | &dest_pr_reg->pr_aptpl_buf[0], | ||
3693 | pr_tmpl->pr_aptpl_buf_len); | ||
3694 | if (!(ret)) | ||
3695 | printk("SPC-3 PR: Set APTPL Bit Activated for" | ||
3696 | " REGISTER_AND_MOVE\n"); | ||
3697 | } | ||
3698 | |||
3699 | core_scsi3_put_pr_reg(dest_pr_reg); | ||
3700 | return 0; | ||
3701 | out: | ||
3702 | if (dest_se_deve) | ||
3703 | core_scsi3_lunacl_undepend_item(dest_se_deve); | ||
3704 | if (dest_node_acl) | ||
3705 | core_scsi3_nodeacl_undepend_item(dest_node_acl); | ||
3706 | core_scsi3_tpg_undepend_item(dest_se_tpg); | ||
3707 | core_scsi3_put_pr_reg(pr_reg); | ||
3708 | return ret; | ||
3709 | } | ||
3710 | |||
3711 | static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb) | ||
3712 | { | ||
3713 | unsigned int __v1, __v2; | ||
3714 | |||
3715 | __v1 = (cdb[0] << 24) | (cdb[1] << 16) | (cdb[2] << 8) | cdb[3]; | ||
3716 | __v2 = (cdb[4] << 24) | (cdb[5] << 16) | (cdb[6] << 8) | cdb[7]; | ||
3717 | |||
3718 | return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; | ||
3719 | } | ||
3720 | |||
3721 | /* | ||
3722 | * See spc4r17 section 6.14 Table 170 | ||
3723 | */ | ||
3724 | static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb) | ||
3725 | { | ||
3726 | unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; | ||
3727 | u64 res_key, sa_res_key; | ||
3728 | int sa, scope, type, aptpl; | ||
3729 | int spec_i_pt = 0, all_tg_pt = 0, unreg = 0; | ||
3730 | /* | ||
3731 | * FIXME: A NULL struct se_session pointer means an this is not coming from | ||
3732 | * a $FABRIC_MOD's nexus, but from internal passthrough ops. | ||
3733 | */ | ||
3734 | if (!(SE_SESS(cmd))) | ||
3735 | return PYX_TRANSPORT_LU_COMM_FAILURE; | ||
3736 | |||
3737 | if (cmd->data_length < 24) { | ||
3738 | printk(KERN_WARNING "SPC-PR: Recieved PR OUT parameter list" | ||
3739 | " length too small: %u\n", cmd->data_length); | ||
3740 | return PYX_TRANSPORT_INVALID_PARAMETER_LIST; | ||
3741 | } | ||
3742 | /* | ||
3743 | * From the PERSISTENT_RESERVE_OUT command descriptor block (CDB) | ||
3744 | */ | ||
3745 | sa = (cdb[1] & 0x1f); | ||
3746 | scope = (cdb[2] & 0xf0); | ||
3747 | type = (cdb[2] & 0x0f); | ||
3748 | /* | ||
3749 | * From PERSISTENT_RESERVE_OUT parameter list (payload) | ||
3750 | */ | ||
3751 | res_key = core_scsi3_extract_reservation_key(&buf[0]); | ||
3752 | sa_res_key = core_scsi3_extract_reservation_key(&buf[8]); | ||
3753 | /* | ||
3754 | * REGISTER_AND_MOVE uses a different SA parameter list containing | ||
3755 | * SCSI TransportIDs. | ||
3756 | */ | ||
3757 | if (sa != PRO_REGISTER_AND_MOVE) { | ||
3758 | spec_i_pt = (buf[20] & 0x08); | ||
3759 | all_tg_pt = (buf[20] & 0x04); | ||
3760 | aptpl = (buf[20] & 0x01); | ||
3761 | } else { | ||
3762 | aptpl = (buf[17] & 0x01); | ||
3763 | unreg = (buf[17] & 0x02); | ||
3764 | } | ||
3765 | /* | ||
3766 | * SPEC_I_PT=1 is only valid for Service action: REGISTER | ||
3767 | */ | ||
3768 | if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER)) | ||
3769 | return PYX_TRANSPORT_INVALID_PARAMETER_LIST; | ||
3770 | /* | ||
3771 | * From spc4r17 section 6.14: | ||
3772 | * | ||
3773 | * If the SPEC_I_PT bit is set to zero, the service action is not | ||
3774 | * REGISTER AND MOVE, and the parameter list length is not 24, then | ||
3775 | * the command shall be terminated with CHECK CONDITION status, with | ||
3776 | * the sense key set to ILLEGAL REQUEST, and the additional sense | ||
3777 | * code set to PARAMETER LIST LENGTH ERROR. | ||
3778 | */ | ||
3779 | if (!(spec_i_pt) && ((cdb[1] & 0x1f) != PRO_REGISTER_AND_MOVE) && | ||
3780 | (cmd->data_length != 24)) { | ||
3781 | printk(KERN_WARNING "SPC-PR: Recieved PR OUT illegal parameter" | ||
3782 | " list length: %u\n", cmd->data_length); | ||
3783 | return PYX_TRANSPORT_INVALID_PARAMETER_LIST; | ||
3784 | } | ||
3785 | /* | ||
3786 | * (core_scsi3_emulate_pro_* function parameters | ||
3787 | * are defined by spc4r17 Table 174: | ||
3788 | * PERSISTENT_RESERVE_OUT service actions and valid parameters. | ||
3789 | */ | ||
3790 | switch (sa) { | ||
3791 | case PRO_REGISTER: | ||
3792 | return core_scsi3_emulate_pro_register(cmd, | ||
3793 | res_key, sa_res_key, aptpl, all_tg_pt, spec_i_pt, 0); | ||
3794 | case PRO_RESERVE: | ||
3795 | return core_scsi3_emulate_pro_reserve(cmd, | ||
3796 | type, scope, res_key); | ||
3797 | case PRO_RELEASE: | ||
3798 | return core_scsi3_emulate_pro_release(cmd, | ||
3799 | type, scope, res_key); | ||
3800 | case PRO_CLEAR: | ||
3801 | return core_scsi3_emulate_pro_clear(cmd, res_key); | ||
3802 | case PRO_PREEMPT: | ||
3803 | return core_scsi3_emulate_pro_preempt(cmd, type, scope, | ||
3804 | res_key, sa_res_key, 0); | ||
3805 | case PRO_PREEMPT_AND_ABORT: | ||
3806 | return core_scsi3_emulate_pro_preempt(cmd, type, scope, | ||
3807 | res_key, sa_res_key, 1); | ||
3808 | case PRO_REGISTER_AND_IGNORE_EXISTING_KEY: | ||
3809 | return core_scsi3_emulate_pro_register(cmd, | ||
3810 | 0, sa_res_key, aptpl, all_tg_pt, spec_i_pt, 1); | ||
3811 | case PRO_REGISTER_AND_MOVE: | ||
3812 | return core_scsi3_emulate_pro_register_and_move(cmd, res_key, | ||
3813 | sa_res_key, aptpl, unreg); | ||
3814 | default: | ||
3815 | printk(KERN_ERR "Unknown PERSISTENT_RESERVE_OUT service" | ||
3816 | " action: 0x%02x\n", cdb[1] & 0x1f); | ||
3817 | return PYX_TRANSPORT_INVALID_CDB_FIELD; | ||
3818 | } | ||
3819 | |||
3820 | return PYX_TRANSPORT_INVALID_CDB_FIELD; | ||
3821 | } | ||
3822 | |||
3823 | /* | ||
3824 | * PERSISTENT_RESERVE_IN Service Action READ_KEYS | ||
3825 | * | ||
3826 | * See spc4r17 section 5.7.6.2 and section 6.13.2, Table 160 | ||
3827 | */ | ||
3828 | static int core_scsi3_pri_read_keys(struct se_cmd *cmd) | ||
3829 | { | ||
3830 | struct se_device *se_dev = SE_DEV(cmd); | ||
3831 | struct se_subsystem_dev *su_dev = SU_DEV(se_dev); | ||
3832 | struct t10_pr_registration *pr_reg; | ||
3833 | unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; | ||
3834 | u32 add_len = 0, off = 8; | ||
3835 | |||
3836 | if (cmd->data_length < 8) { | ||
3837 | printk(KERN_ERR "PRIN SA READ_KEYS SCSI Data Length: %u" | ||
3838 | " too small\n", cmd->data_length); | ||
3839 | return PYX_TRANSPORT_INVALID_CDB_FIELD; | ||
3840 | } | ||
3841 | |||
3842 | buf[0] = ((T10_RES(su_dev)->pr_generation >> 24) & 0xff); | ||
3843 | buf[1] = ((T10_RES(su_dev)->pr_generation >> 16) & 0xff); | ||
3844 | buf[2] = ((T10_RES(su_dev)->pr_generation >> 8) & 0xff); | ||
3845 | buf[3] = (T10_RES(su_dev)->pr_generation & 0xff); | ||
3846 | |||
3847 | spin_lock(&T10_RES(su_dev)->registration_lock); | ||
3848 | list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list, | ||
3849 | pr_reg_list) { | ||
3850 | /* | ||
3851 | * Check for overflow of 8byte PRI READ_KEYS payload and | ||
3852 | * next reservation key list descriptor. | ||
3853 | */ | ||
3854 | if ((add_len + 8) > (cmd->data_length - 8)) | ||
3855 | break; | ||
3856 | |||
3857 | buf[off++] = ((pr_reg->pr_res_key >> 56) & 0xff); | ||
3858 | buf[off++] = ((pr_reg->pr_res_key >> 48) & 0xff); | ||
3859 | buf[off++] = ((pr_reg->pr_res_key >> 40) & 0xff); | ||
3860 | buf[off++] = ((pr_reg->pr_res_key >> 32) & 0xff); | ||
3861 | buf[off++] = ((pr_reg->pr_res_key >> 24) & 0xff); | ||
3862 | buf[off++] = ((pr_reg->pr_res_key >> 16) & 0xff); | ||
3863 | buf[off++] = ((pr_reg->pr_res_key >> 8) & 0xff); | ||
3864 | buf[off++] = (pr_reg->pr_res_key & 0xff); | ||
3865 | |||
3866 | add_len += 8; | ||
3867 | } | ||
3868 | spin_unlock(&T10_RES(su_dev)->registration_lock); | ||
3869 | |||
3870 | buf[4] = ((add_len >> 24) & 0xff); | ||
3871 | buf[5] = ((add_len >> 16) & 0xff); | ||
3872 | buf[6] = ((add_len >> 8) & 0xff); | ||
3873 | buf[7] = (add_len & 0xff); | ||
3874 | |||
3875 | return 0; | ||
3876 | } | ||
3877 | |||
3878 | /* | ||
3879 | * PERSISTENT_RESERVE_IN Service Action READ_RESERVATION | ||
3880 | * | ||
3881 | * See spc4r17 section 5.7.6.3 and section 6.13.3.2 Table 161 and 162 | ||
3882 | */ | ||
3883 | static int core_scsi3_pri_read_reservation(struct se_cmd *cmd) | ||
3884 | { | ||
3885 | struct se_device *se_dev = SE_DEV(cmd); | ||
3886 | struct se_subsystem_dev *su_dev = SU_DEV(se_dev); | ||
3887 | struct t10_pr_registration *pr_reg; | ||
3888 | unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; | ||
3889 | u64 pr_res_key; | ||
3890 | u32 add_len = 16; /* Hardcoded to 16 when a reservation is held. */ | ||
3891 | |||
3892 | if (cmd->data_length < 8) { | ||
3893 | printk(KERN_ERR "PRIN SA READ_RESERVATIONS SCSI Data Length: %u" | ||
3894 | " too small\n", cmd->data_length); | ||
3895 | return PYX_TRANSPORT_INVALID_CDB_FIELD; | ||
3896 | } | ||
3897 | |||
3898 | buf[0] = ((T10_RES(su_dev)->pr_generation >> 24) & 0xff); | ||
3899 | buf[1] = ((T10_RES(su_dev)->pr_generation >> 16) & 0xff); | ||
3900 | buf[2] = ((T10_RES(su_dev)->pr_generation >> 8) & 0xff); | ||
3901 | buf[3] = (T10_RES(su_dev)->pr_generation & 0xff); | ||
3902 | |||
3903 | spin_lock(&se_dev->dev_reservation_lock); | ||
3904 | pr_reg = se_dev->dev_pr_res_holder; | ||
3905 | if ((pr_reg)) { | ||
3906 | /* | ||
3907 | * Set the hardcoded Additional Length | ||
3908 | */ | ||
3909 | buf[4] = ((add_len >> 24) & 0xff); | ||
3910 | buf[5] = ((add_len >> 16) & 0xff); | ||
3911 | buf[6] = ((add_len >> 8) & 0xff); | ||
3912 | buf[7] = (add_len & 0xff); | ||
3913 | |||
3914 | if (cmd->data_length < 22) { | ||
3915 | spin_unlock(&se_dev->dev_reservation_lock); | ||
3916 | return 0; | ||
3917 | } | ||
3918 | /* | ||
3919 | * Set the Reservation key. | ||
3920 | * | ||
3921 | * From spc4r17, section 5.7.10: | ||
3922 | * A persistent reservation holder has its reservation key | ||
3923 | * returned in the parameter data from a PERSISTENT | ||
3924 | * RESERVE IN command with READ RESERVATION service action as | ||
3925 | * follows: | ||
3926 | * a) For a persistent reservation of the type Write Exclusive | ||
3927 | * - All Registrants or Exclusive Access  All Regitrants, | ||
3928 | * the reservation key shall be set to zero; or | ||
3929 | * b) For all other persistent reservation types, the | ||
3930 | * reservation key shall be set to the registered | ||
3931 | * reservation key for the I_T nexus that holds the | ||
3932 | * persistent reservation. | ||
3933 | */ | ||
3934 | if ((pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) || | ||
3935 | (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) | ||
3936 | pr_res_key = 0; | ||
3937 | else | ||
3938 | pr_res_key = pr_reg->pr_res_key; | ||
3939 | |||
3940 | buf[8] = ((pr_res_key >> 56) & 0xff); | ||
3941 | buf[9] = ((pr_res_key >> 48) & 0xff); | ||
3942 | buf[10] = ((pr_res_key >> 40) & 0xff); | ||
3943 | buf[11] = ((pr_res_key >> 32) & 0xff); | ||
3944 | buf[12] = ((pr_res_key >> 24) & 0xff); | ||
3945 | buf[13] = ((pr_res_key >> 16) & 0xff); | ||
3946 | buf[14] = ((pr_res_key >> 8) & 0xff); | ||
3947 | buf[15] = (pr_res_key & 0xff); | ||
3948 | /* | ||
3949 | * Set the SCOPE and TYPE | ||
3950 | */ | ||
3951 | buf[21] = (pr_reg->pr_res_scope & 0xf0) | | ||
3952 | (pr_reg->pr_res_type & 0x0f); | ||
3953 | } | ||
3954 | spin_unlock(&se_dev->dev_reservation_lock); | ||
3955 | |||
3956 | return 0; | ||
3957 | } | ||
3958 | |||
3959 | /* | ||
3960 | * PERSISTENT_RESERVE_IN Service Action REPORT_CAPABILITIES | ||
3961 | * | ||
3962 | * See spc4r17 section 6.13.4 Table 165 | ||
3963 | */ | ||
3964 | static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd) | ||
3965 | { | ||
3966 | struct se_device *dev = SE_DEV(cmd); | ||
3967 | struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; | ||
3968 | unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; | ||
3969 | u16 add_len = 8; /* Hardcoded to 8. */ | ||
3970 | |||
3971 | if (cmd->data_length < 6) { | ||
3972 | printk(KERN_ERR "PRIN SA REPORT_CAPABILITIES SCSI Data Length:" | ||
3973 | " %u too small\n", cmd->data_length); | ||
3974 | return PYX_TRANSPORT_INVALID_CDB_FIELD; | ||
3975 | } | ||
3976 | |||
3977 | buf[0] = ((add_len << 8) & 0xff); | ||
3978 | buf[1] = (add_len & 0xff); | ||
3979 | buf[2] |= 0x10; /* CRH: Compatible Reservation Hanlding bit. */ | ||
3980 | buf[2] |= 0x08; /* SIP_C: Specify Initiator Ports Capable bit */ | ||
3981 | buf[2] |= 0x04; /* ATP_C: All Target Ports Capable bit */ | ||
3982 | buf[2] |= 0x01; /* PTPL_C: Persistence across Target Power Loss bit */ | ||
3983 | /* | ||
3984 | * We are filling in the PERSISTENT RESERVATION TYPE MASK below, so | ||
3985 | * set the TMV: Task Mask Valid bit. | ||
3986 | */ | ||
3987 | buf[3] |= 0x80; | ||
3988 | /* | ||
3989 | * Change ALLOW COMMANDs to 0x20 or 0x40 later from Table 166 | ||
3990 | */ | ||
3991 | buf[3] |= 0x10; /* ALLOW COMMANDs field 001b */ | ||
3992 | /* | ||
3993 | * PTPL_A: Persistence across Target Power Loss Active bit | ||
3994 | */ | ||
3995 | if (pr_tmpl->pr_aptpl_active) | ||
3996 | buf[3] |= 0x01; | ||
3997 | /* | ||
3998 | * Setup the PERSISTENT RESERVATION TYPE MASK from Table 167 | ||
3999 | */ | ||
4000 | buf[4] |= 0x80; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */ | ||
4001 | buf[4] |= 0x40; /* PR_TYPE_EXCLUSIVE_ACCESS_REGONLY */ | ||
4002 | buf[4] |= 0x20; /* PR_TYPE_WRITE_EXCLUSIVE_REGONLY */ | ||
4003 | buf[4] |= 0x08; /* PR_TYPE_EXCLUSIVE_ACCESS */ | ||
4004 | buf[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */ | ||
4005 | buf[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */ | ||
4006 | |||
4007 | return 0; | ||
4008 | } | ||
4009 | |||
4010 | /* | ||
4011 | * PERSISTENT_RESERVE_IN Service Action READ_FULL_STATUS | ||
4012 | * | ||
4013 | * See spc4r17 section 6.13.5 Table 168 and 169 | ||
4014 | */ | ||
4015 | static int core_scsi3_pri_read_full_status(struct se_cmd *cmd) | ||
4016 | { | ||
4017 | struct se_device *se_dev = SE_DEV(cmd); | ||
4018 | struct se_node_acl *se_nacl; | ||
4019 | struct se_subsystem_dev *su_dev = SU_DEV(se_dev); | ||
4020 | struct se_portal_group *se_tpg; | ||
4021 | struct t10_pr_registration *pr_reg, *pr_reg_tmp; | ||
4022 | struct t10_reservation_template *pr_tmpl = &SU_DEV(se_dev)->t10_reservation; | ||
4023 | unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; | ||
4024 | u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len; | ||
4025 | u32 off = 8; /* off into first Full Status descriptor */ | ||
4026 | int format_code = 0; | ||
4027 | |||
4028 | if (cmd->data_length < 8) { | ||
4029 | printk(KERN_ERR "PRIN SA READ_FULL_STATUS SCSI Data Length: %u" | ||
4030 | " too small\n", cmd->data_length); | ||
4031 | return PYX_TRANSPORT_INVALID_CDB_FIELD; | ||
4032 | } | ||
4033 | |||
4034 | buf[0] = ((T10_RES(su_dev)->pr_generation >> 24) & 0xff); | ||
4035 | buf[1] = ((T10_RES(su_dev)->pr_generation >> 16) & 0xff); | ||
4036 | buf[2] = ((T10_RES(su_dev)->pr_generation >> 8) & 0xff); | ||
4037 | buf[3] = (T10_RES(su_dev)->pr_generation & 0xff); | ||
4038 | |||
4039 | spin_lock(&pr_tmpl->registration_lock); | ||
4040 | list_for_each_entry_safe(pr_reg, pr_reg_tmp, | ||
4041 | &pr_tmpl->registration_list, pr_reg_list) { | ||
4042 | |||
4043 | se_nacl = pr_reg->pr_reg_nacl; | ||
4044 | se_tpg = pr_reg->pr_reg_nacl->se_tpg; | ||
4045 | add_desc_len = 0; | ||
4046 | |||
4047 | atomic_inc(&pr_reg->pr_res_holders); | ||
4048 | smp_mb__after_atomic_inc(); | ||
4049 | spin_unlock(&pr_tmpl->registration_lock); | ||
4050 | /* | ||
4051 | * Determine expected length of $FABRIC_MOD specific | ||
4052 | * TransportID full status descriptor.. | ||
4053 | */ | ||
4054 | exp_desc_len = TPG_TFO(se_tpg)->tpg_get_pr_transport_id_len( | ||
4055 | se_tpg, se_nacl, pr_reg, &format_code); | ||
4056 | |||
4057 | if ((exp_desc_len + add_len) > cmd->data_length) { | ||
4058 | printk(KERN_WARNING "SPC-3 PRIN READ_FULL_STATUS ran" | ||
4059 | " out of buffer: %d\n", cmd->data_length); | ||
4060 | spin_lock(&pr_tmpl->registration_lock); | ||
4061 | atomic_dec(&pr_reg->pr_res_holders); | ||
4062 | smp_mb__after_atomic_dec(); | ||
4063 | break; | ||
4064 | } | ||
4065 | /* | ||
4066 | * Set RESERVATION KEY | ||
4067 | */ | ||
4068 | buf[off++] = ((pr_reg->pr_res_key >> 56) & 0xff); | ||
4069 | buf[off++] = ((pr_reg->pr_res_key >> 48) & 0xff); | ||
4070 | buf[off++] = ((pr_reg->pr_res_key >> 40) & 0xff); | ||
4071 | buf[off++] = ((pr_reg->pr_res_key >> 32) & 0xff); | ||
4072 | buf[off++] = ((pr_reg->pr_res_key >> 24) & 0xff); | ||
4073 | buf[off++] = ((pr_reg->pr_res_key >> 16) & 0xff); | ||
4074 | buf[off++] = ((pr_reg->pr_res_key >> 8) & 0xff); | ||
4075 | buf[off++] = (pr_reg->pr_res_key & 0xff); | ||
4076 | off += 4; /* Skip Over Reserved area */ | ||
4077 | |||
4078 | /* | ||
4079 | * Set ALL_TG_PT bit if PROUT SA REGISTER had this set. | ||
4080 | */ | ||
4081 | if (pr_reg->pr_reg_all_tg_pt) | ||
4082 | buf[off] = 0x02; | ||
4083 | /* | ||
4084 | * The struct se_lun pointer will be present for the | ||
4085 | * reservation holder for PR_HOLDER bit. | ||
4086 | * | ||
4087 | * Also, if this registration is the reservation | ||
4088 | * holder, fill in SCOPE and TYPE in the next byte. | ||
4089 | */ | ||
4090 | if (pr_reg->pr_res_holder) { | ||
4091 | buf[off++] |= 0x01; | ||
4092 | buf[off++] = (pr_reg->pr_res_scope & 0xf0) | | ||
4093 | (pr_reg->pr_res_type & 0x0f); | ||
4094 | } else | ||
4095 | off += 2; | ||
4096 | |||
4097 | off += 4; /* Skip over reserved area */ | ||
4098 | /* | ||
4099 | * From spc4r17 6.3.15: | ||
4100 | * | ||
4101 | * If the ALL_TG_PT bit set to zero, the RELATIVE TARGET PORT | ||
4102 | * IDENTIFIER field contains the relative port identifier (see | ||
4103 | * 3.1.120) of the target port that is part of the I_T nexus | ||
4104 | * described by this full status descriptor. If the ALL_TG_PT | ||
4105 | * bit is set to one, the contents of the RELATIVE TARGET PORT | ||
4106 | * IDENTIFIER field are not defined by this standard. | ||
4107 | */ | ||
4108 | if (!(pr_reg->pr_reg_all_tg_pt)) { | ||
4109 | struct se_port *port = pr_reg->pr_reg_tg_pt_lun->lun_sep; | ||
4110 | |||
4111 | buf[off++] = ((port->sep_rtpi >> 8) & 0xff); | ||
4112 | buf[off++] = (port->sep_rtpi & 0xff); | ||
4113 | } else | ||
4114 | off += 2; /* Skip over RELATIVE TARGET PORT IDENTIFER */ | ||
4115 | |||
4116 | /* | ||
4117 | * Now, have the $FABRIC_MOD fill in the protocol identifier | ||
4118 | */ | ||
4119 | desc_len = TPG_TFO(se_tpg)->tpg_get_pr_transport_id(se_tpg, | ||
4120 | se_nacl, pr_reg, &format_code, &buf[off+4]); | ||
4121 | |||
4122 | spin_lock(&pr_tmpl->registration_lock); | ||
4123 | atomic_dec(&pr_reg->pr_res_holders); | ||
4124 | smp_mb__after_atomic_dec(); | ||
4125 | /* | ||
4126 | * Set the ADDITIONAL DESCRIPTOR LENGTH | ||
4127 | */ | ||
4128 | buf[off++] = ((desc_len >> 24) & 0xff); | ||
4129 | buf[off++] = ((desc_len >> 16) & 0xff); | ||
4130 | buf[off++] = ((desc_len >> 8) & 0xff); | ||
4131 | buf[off++] = (desc_len & 0xff); | ||
4132 | /* | ||
4133 | * Size of full desctipor header minus TransportID | ||
4134 | * containing $FABRIC_MOD specific) initiator device/port | ||
4135 | * WWN information. | ||
4136 | * | ||
4137 | * See spc4r17 Section 6.13.5 Table 169 | ||
4138 | */ | ||
4139 | add_desc_len = (24 + desc_len); | ||
4140 | |||
4141 | off += desc_len; | ||
4142 | add_len += add_desc_len; | ||
4143 | } | ||
4144 | spin_unlock(&pr_tmpl->registration_lock); | ||
4145 | /* | ||
4146 | * Set ADDITIONAL_LENGTH | ||
4147 | */ | ||
4148 | buf[4] = ((add_len >> 24) & 0xff); | ||
4149 | buf[5] = ((add_len >> 16) & 0xff); | ||
4150 | buf[6] = ((add_len >> 8) & 0xff); | ||
4151 | buf[7] = (add_len & 0xff); | ||
4152 | |||
4153 | return 0; | ||
4154 | } | ||
4155 | |||
4156 | static int core_scsi3_emulate_pr_in(struct se_cmd *cmd, unsigned char *cdb) | ||
4157 | { | ||
4158 | switch (cdb[1] & 0x1f) { | ||
4159 | case PRI_READ_KEYS: | ||
4160 | return core_scsi3_pri_read_keys(cmd); | ||
4161 | case PRI_READ_RESERVATION: | ||
4162 | return core_scsi3_pri_read_reservation(cmd); | ||
4163 | case PRI_REPORT_CAPABILITIES: | ||
4164 | return core_scsi3_pri_report_capabilities(cmd); | ||
4165 | case PRI_READ_FULL_STATUS: | ||
4166 | return core_scsi3_pri_read_full_status(cmd); | ||
4167 | default: | ||
4168 | printk(KERN_ERR "Unknown PERSISTENT_RESERVE_IN service" | ||
4169 | " action: 0x%02x\n", cdb[1] & 0x1f); | ||
4170 | return PYX_TRANSPORT_INVALID_CDB_FIELD; | ||
4171 | } | ||
4172 | |||
4173 | } | ||
4174 | |||
4175 | int core_scsi3_emulate_pr(struct se_cmd *cmd) | ||
4176 | { | ||
4177 | unsigned char *cdb = &T_TASK(cmd)->t_task_cdb[0]; | ||
4178 | struct se_device *dev = cmd->se_dev; | ||
4179 | /* | ||
4180 | * Following spc2r20 5.5.1 Reservations overview: | ||
4181 | * | ||
4182 | * If a logical unit has been reserved by any RESERVE command and is | ||
4183 | * still reserved by any initiator, all PERSISTENT RESERVE IN and all | ||
4184 | * PERSISTENT RESERVE OUT commands shall conflict regardless of | ||
4185 | * initiator or service action and shall terminate with a RESERVATION | ||
4186 | * CONFLICT status. | ||
4187 | */ | ||
4188 | if (dev->dev_flags & DF_SPC2_RESERVATIONS) { | ||
4189 | printk(KERN_ERR "Received PERSISTENT_RESERVE CDB while legacy" | ||
4190 | " SPC-2 reservation is held, returning" | ||
4191 | " RESERVATION_CONFLICT\n"); | ||
4192 | return PYX_TRANSPORT_RESERVATION_CONFLICT; | ||
4193 | } | ||
4194 | |||
4195 | return (cdb[0] == PERSISTENT_RESERVE_OUT) ? | ||
4196 | core_scsi3_emulate_pr_out(cmd, cdb) : | ||
4197 | core_scsi3_emulate_pr_in(cmd, cdb); | ||
4198 | } | ||
4199 | |||
4200 | static int core_pt_reservation_check(struct se_cmd *cmd, u32 *pr_res_type) | ||
4201 | { | ||
4202 | return 0; | ||
4203 | } | ||
4204 | |||
4205 | static int core_pt_seq_non_holder( | ||
4206 | struct se_cmd *cmd, | ||
4207 | unsigned char *cdb, | ||
4208 | u32 pr_reg_type) | ||
4209 | { | ||
4210 | return 0; | ||
4211 | } | ||
4212 | |||
4213 | int core_setup_reservations(struct se_device *dev, int force_pt) | ||
4214 | { | ||
4215 | struct se_subsystem_dev *su_dev = dev->se_sub_dev; | ||
4216 | struct t10_reservation_template *rest = &su_dev->t10_reservation; | ||
4217 | /* | ||
4218 | * If this device is from Target_Core_Mod/pSCSI, use the reservations | ||
4219 | * of the Underlying SCSI hardware. In Linux/SCSI terms, this can | ||
4220 | * cause a problem because libata and some SATA RAID HBAs appear | ||
4221 | * under Linux/SCSI, but to emulate reservations themselves. | ||
4222 | */ | ||
4223 | if (((TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) && | ||
4224 | !(DEV_ATTRIB(dev)->emulate_reservations)) || force_pt) { | ||
4225 | rest->res_type = SPC_PASSTHROUGH; | ||
4226 | rest->pr_ops.t10_reservation_check = &core_pt_reservation_check; | ||
4227 | rest->pr_ops.t10_seq_non_holder = &core_pt_seq_non_holder; | ||
4228 | printk(KERN_INFO "%s: Using SPC_PASSTHROUGH, no reservation" | ||
4229 | " emulation\n", TRANSPORT(dev)->name); | ||
4230 | return 0; | ||
4231 | } | ||
4232 | /* | ||
4233 | * If SPC-3 or above is reported by real or emulated struct se_device, | ||
4234 | * use emulated Persistent Reservations. | ||
4235 | */ | ||
4236 | if (TRANSPORT(dev)->get_device_rev(dev) >= SCSI_3) { | ||
4237 | rest->res_type = SPC3_PERSISTENT_RESERVATIONS; | ||
4238 | rest->pr_ops.t10_reservation_check = &core_scsi3_pr_reservation_check; | ||
4239 | rest->pr_ops.t10_seq_non_holder = &core_scsi3_pr_seq_non_holder; | ||
4240 | printk(KERN_INFO "%s: Using SPC3_PERSISTENT_RESERVATIONS" | ||
4241 | " emulation\n", TRANSPORT(dev)->name); | ||
4242 | } else { | ||
4243 | rest->res_type = SPC2_RESERVATIONS; | ||
4244 | rest->pr_ops.t10_reservation_check = &core_scsi2_reservation_check; | ||
4245 | rest->pr_ops.t10_seq_non_holder = | ||
4246 | &core_scsi2_reservation_seq_non_holder; | ||
4247 | printk(KERN_INFO "%s: Using SPC2_RESERVATIONS emulation\n", | ||
4248 | TRANSPORT(dev)->name); | ||
4249 | } | ||
4250 | |||
4251 | return 0; | ||
4252 | } | ||
diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h new file mode 100644 index 000000000000..5603bcfd86d3 --- /dev/null +++ b/drivers/target/target_core_pr.h | |||
@@ -0,0 +1,67 @@ | |||
1 | #ifndef TARGET_CORE_PR_H | ||
2 | #define TARGET_CORE_PR_H | ||
3 | /* | ||
4 | * PERSISTENT_RESERVE_OUT service action codes | ||
5 | * | ||
6 | * spc4r17 section 6.14.2 Table 171 | ||
7 | */ | ||
8 | #define PRO_REGISTER 0x00 | ||
9 | #define PRO_RESERVE 0x01 | ||
10 | #define PRO_RELEASE 0x02 | ||
11 | #define PRO_CLEAR 0x03 | ||
12 | #define PRO_PREEMPT 0x04 | ||
13 | #define PRO_PREEMPT_AND_ABORT 0x05 | ||
14 | #define PRO_REGISTER_AND_IGNORE_EXISTING_KEY 0x06 | ||
15 | #define PRO_REGISTER_AND_MOVE 0x07 | ||
16 | /* | ||
17 | * PERSISTENT_RESERVE_IN service action codes | ||
18 | * | ||
19 | * spc4r17 section 6.13.1 Table 159 | ||
20 | */ | ||
21 | #define PRI_READ_KEYS 0x00 | ||
22 | #define PRI_READ_RESERVATION 0x01 | ||
23 | #define PRI_REPORT_CAPABILITIES 0x02 | ||
24 | #define PRI_READ_FULL_STATUS 0x03 | ||
25 | /* | ||
26 | * PERSISTENT_RESERVE_ SCOPE field | ||
27 | * | ||
28 | * spc4r17 section 6.13.3.3 Table 163 | ||
29 | */ | ||
30 | #define PR_SCOPE_LU_SCOPE 0x00 | ||
31 | /* | ||
32 | * PERSISTENT_RESERVE_* TYPE field | ||
33 | * | ||
34 | * spc4r17 section 6.13.3.4 Table 164 | ||
35 | */ | ||
36 | #define PR_TYPE_WRITE_EXCLUSIVE 0x01 | ||
37 | #define PR_TYPE_EXCLUSIVE_ACCESS 0x03 | ||
38 | #define PR_TYPE_WRITE_EXCLUSIVE_REGONLY 0x05 | ||
39 | #define PR_TYPE_EXCLUSIVE_ACCESS_REGONLY 0x06 | ||
40 | #define PR_TYPE_WRITE_EXCLUSIVE_ALLREG 0x07 | ||
41 | #define PR_TYPE_EXCLUSIVE_ACCESS_ALLREG 0x08 | ||
42 | |||
43 | #define PR_APTPL_MAX_IPORT_LEN 256 | ||
44 | #define PR_APTPL_MAX_TPORT_LEN 256 | ||
45 | |||
46 | extern struct kmem_cache *t10_pr_reg_cache; | ||
47 | |||
48 | extern int core_pr_dump_initiator_port(struct t10_pr_registration *, | ||
49 | char *, u32); | ||
50 | extern int core_scsi2_emulate_crh(struct se_cmd *); | ||
51 | extern int core_scsi3_alloc_aptpl_registration( | ||
52 | struct t10_reservation_template *, u64, | ||
53 | unsigned char *, unsigned char *, u32, | ||
54 | unsigned char *, u16, u32, int, int, u8); | ||
55 | extern int core_scsi3_check_aptpl_registration(struct se_device *, | ||
56 | struct se_portal_group *, struct se_lun *, | ||
57 | struct se_lun_acl *); | ||
58 | extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *, | ||
59 | struct se_node_acl *); | ||
60 | extern void core_scsi3_free_all_registrations(struct se_device *); | ||
61 | extern unsigned char *core_scsi3_pr_dump_type(int); | ||
62 | extern int core_scsi3_check_cdb_abort_and_preempt(struct list_head *, | ||
63 | struct se_cmd *); | ||
64 | extern int core_scsi3_emulate_pr(struct se_cmd *); | ||
65 | extern int core_setup_reservations(struct se_device *, int); | ||
66 | |||
67 | #endif /* TARGET_CORE_PR_H */ | ||
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c new file mode 100644 index 000000000000..742d24609a9b --- /dev/null +++ b/drivers/target/target_core_pscsi.c | |||
@@ -0,0 +1,1470 @@ | |||
1 | /******************************************************************************* | ||
2 | * Filename: target_core_pscsi.c | ||
3 | * | ||
4 | * This file contains the generic target mode <-> Linux SCSI subsystem plugin. | ||
5 | * | ||
6 | * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. | ||
7 | * Copyright (c) 2005, 2006, 2007 SBE, Inc. | ||
8 | * Copyright (c) 2007-2010 Rising Tide Systems | ||
9 | * Copyright (c) 2008-2010 Linux-iSCSI.org | ||
10 | * | ||
11 | * Nicholas A. Bellinger <nab@kernel.org> | ||
12 | * | ||
13 | * This program is free software; you can redistribute it and/or modify | ||
14 | * it under the terms of the GNU General Public License as published by | ||
15 | * the Free Software Foundation; either version 2 of the License, or | ||
16 | * (at your option) any later version. | ||
17 | * | ||
18 | * This program is distributed in the hope that it will be useful, | ||
19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
21 | * GNU General Public License for more details. | ||
22 | * | ||
23 | * You should have received a copy of the GNU General Public License | ||
24 | * along with this program; if not, write to the Free Software | ||
25 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
26 | * | ||
27 | ******************************************************************************/ | ||
28 | |||
29 | #include <linux/version.h> | ||
30 | #include <linux/string.h> | ||
31 | #include <linux/parser.h> | ||
32 | #include <linux/timer.h> | ||
33 | #include <linux/blkdev.h> | ||
34 | #include <linux/blk_types.h> | ||
35 | #include <linux/slab.h> | ||
36 | #include <linux/spinlock.h> | ||
37 | #include <linux/smp_lock.h> | ||
38 | #include <linux/genhd.h> | ||
39 | #include <linux/cdrom.h> | ||
40 | #include <linux/file.h> | ||
41 | #include <scsi/scsi.h> | ||
42 | #include <scsi/scsi_device.h> | ||
43 | #include <scsi/scsi_cmnd.h> | ||
44 | #include <scsi/scsi_host.h> | ||
45 | #include <scsi/libsas.h> /* For TASK_ATTR_* */ | ||
46 | |||
47 | #include <target/target_core_base.h> | ||
48 | #include <target/target_core_device.h> | ||
49 | #include <target/target_core_transport.h> | ||
50 | |||
51 | #include "target_core_pscsi.h" | ||
52 | |||
53 | #define ISPRINT(a) ((a >= ' ') && (a <= '~')) | ||
54 | |||
55 | static struct se_subsystem_api pscsi_template; | ||
56 | |||
57 | static void pscsi_req_done(struct request *, int); | ||
58 | |||
59 | /* pscsi_get_sh(): | ||
60 | * | ||
61 | * | ||
62 | */ | ||
63 | static struct Scsi_Host *pscsi_get_sh(u32 host_no) | ||
64 | { | ||
65 | struct Scsi_Host *sh = NULL; | ||
66 | |||
67 | sh = scsi_host_lookup(host_no); | ||
68 | if (IS_ERR(sh)) { | ||
69 | printk(KERN_ERR "Unable to locate SCSI HBA with Host ID:" | ||
70 | " %u\n", host_no); | ||
71 | return NULL; | ||
72 | } | ||
73 | |||
74 | return sh; | ||
75 | } | ||
76 | |||
77 | /* pscsi_attach_hba(): | ||
78 | * | ||
79 | * pscsi_get_sh() used scsi_host_lookup() to locate struct Scsi_Host. | ||
80 | * from the passed SCSI Host ID. | ||
81 | */ | ||
82 | static int pscsi_attach_hba(struct se_hba *hba, u32 host_id) | ||
83 | { | ||
84 | int hba_depth; | ||
85 | struct pscsi_hba_virt *phv; | ||
86 | |||
87 | phv = kzalloc(sizeof(struct pscsi_hba_virt), GFP_KERNEL); | ||
88 | if (!(phv)) { | ||
89 | printk(KERN_ERR "Unable to allocate struct pscsi_hba_virt\n"); | ||
90 | return -1; | ||
91 | } | ||
92 | phv->phv_host_id = host_id; | ||
93 | phv->phv_mode = PHV_VIRUTAL_HOST_ID; | ||
94 | hba_depth = PSCSI_VIRTUAL_HBA_DEPTH; | ||
95 | atomic_set(&hba->left_queue_depth, hba_depth); | ||
96 | atomic_set(&hba->max_queue_depth, hba_depth); | ||
97 | |||
98 | hba->hba_ptr = (void *)phv; | ||
99 | |||
100 | printk(KERN_INFO "CORE_HBA[%d] - TCM SCSI HBA Driver %s on" | ||
101 | " Generic Target Core Stack %s\n", hba->hba_id, | ||
102 | PSCSI_VERSION, TARGET_CORE_MOD_VERSION); | ||
103 | printk(KERN_INFO "CORE_HBA[%d] - Attached SCSI HBA to Generic" | ||
104 | " Target Core with TCQ Depth: %d\n", hba->hba_id, | ||
105 | atomic_read(&hba->max_queue_depth)); | ||
106 | |||
107 | return 0; | ||
108 | } | ||
109 | |||
110 | static void pscsi_detach_hba(struct se_hba *hba) | ||
111 | { | ||
112 | struct pscsi_hba_virt *phv = hba->hba_ptr; | ||
113 | struct Scsi_Host *scsi_host = phv->phv_lld_host; | ||
114 | |||
115 | if (scsi_host) { | ||
116 | scsi_host_put(scsi_host); | ||
117 | |||
118 | printk(KERN_INFO "CORE_HBA[%d] - Detached SCSI HBA: %s from" | ||
119 | " Generic Target Core\n", hba->hba_id, | ||
120 | (scsi_host->hostt->name) ? (scsi_host->hostt->name) : | ||
121 | "Unknown"); | ||
122 | } else | ||
123 | printk(KERN_INFO "CORE_HBA[%d] - Detached Virtual SCSI HBA" | ||
124 | " from Generic Target Core\n", hba->hba_id); | ||
125 | |||
126 | kfree(phv); | ||
127 | hba->hba_ptr = NULL; | ||
128 | } | ||
129 | |||
130 | static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag) | ||
131 | { | ||
132 | struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr; | ||
133 | struct Scsi_Host *sh = phv->phv_lld_host; | ||
134 | int hba_depth = PSCSI_VIRTUAL_HBA_DEPTH; | ||
135 | /* | ||
136 | * Release the struct Scsi_Host | ||
137 | */ | ||
138 | if (!(mode_flag)) { | ||
139 | if (!(sh)) | ||
140 | return 0; | ||
141 | |||
142 | phv->phv_lld_host = NULL; | ||
143 | phv->phv_mode = PHV_VIRUTAL_HOST_ID; | ||
144 | atomic_set(&hba->left_queue_depth, hba_depth); | ||
145 | atomic_set(&hba->max_queue_depth, hba_depth); | ||
146 | |||
147 | printk(KERN_INFO "CORE_HBA[%d] - Disabled pSCSI HBA Passthrough" | ||
148 | " %s\n", hba->hba_id, (sh->hostt->name) ? | ||
149 | (sh->hostt->name) : "Unknown"); | ||
150 | |||
151 | scsi_host_put(sh); | ||
152 | return 0; | ||
153 | } | ||
154 | /* | ||
155 | * Otherwise, locate struct Scsi_Host from the original passed | ||
156 | * pSCSI Host ID and enable for phba mode | ||
157 | */ | ||
158 | sh = pscsi_get_sh(phv->phv_host_id); | ||
159 | if (!(sh)) { | ||
160 | printk(KERN_ERR "pSCSI: Unable to locate SCSI Host for" | ||
161 | " phv_host_id: %d\n", phv->phv_host_id); | ||
162 | return -1; | ||
163 | } | ||
164 | /* | ||
165 | * Usually the SCSI LLD will use the hostt->can_queue value to define | ||
166 | * its HBA TCQ depth. Some other drivers (like 2.6 megaraid) don't set | ||
167 | * this at all and set sh->can_queue at runtime. | ||
168 | */ | ||
169 | hba_depth = (sh->hostt->can_queue > sh->can_queue) ? | ||
170 | sh->hostt->can_queue : sh->can_queue; | ||
171 | |||
172 | atomic_set(&hba->left_queue_depth, hba_depth); | ||
173 | atomic_set(&hba->max_queue_depth, hba_depth); | ||
174 | |||
175 | phv->phv_lld_host = sh; | ||
176 | phv->phv_mode = PHV_LLD_SCSI_HOST_NO; | ||
177 | |||
178 | printk(KERN_INFO "CORE_HBA[%d] - Enabled pSCSI HBA Passthrough %s\n", | ||
179 | hba->hba_id, (sh->hostt->name) ? (sh->hostt->name) : "Unknown"); | ||
180 | |||
181 | return 1; | ||
182 | } | ||
183 | |||
184 | static void pscsi_tape_read_blocksize(struct se_device *dev, | ||
185 | struct scsi_device *sdev) | ||
186 | { | ||
187 | unsigned char cdb[MAX_COMMAND_SIZE], *buf; | ||
188 | int ret; | ||
189 | |||
190 | buf = kzalloc(12, GFP_KERNEL); | ||
191 | if (!buf) | ||
192 | return; | ||
193 | |||
194 | memset(cdb, 0, MAX_COMMAND_SIZE); | ||
195 | cdb[0] = MODE_SENSE; | ||
196 | cdb[4] = 0x0c; /* 12 bytes */ | ||
197 | |||
198 | ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf, 12, NULL, | ||
199 | HZ, 1, NULL); | ||
200 | if (ret) | ||
201 | goto out_free; | ||
202 | |||
203 | /* | ||
204 | * If MODE_SENSE still returns zero, set the default value to 1024. | ||
205 | */ | ||
206 | sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]); | ||
207 | if (!sdev->sector_size) | ||
208 | sdev->sector_size = 1024; | ||
209 | out_free: | ||
210 | kfree(buf); | ||
211 | } | ||
212 | |||
213 | static void | ||
214 | pscsi_set_inquiry_info(struct scsi_device *sdev, struct t10_wwn *wwn) | ||
215 | { | ||
216 | unsigned char *buf; | ||
217 | |||
218 | if (sdev->inquiry_len < INQUIRY_LEN) | ||
219 | return; | ||
220 | |||
221 | buf = sdev->inquiry; | ||
222 | if (!buf) | ||
223 | return; | ||
224 | /* | ||
225 | * Use sdev->inquiry from drivers/scsi/scsi_scan.c:scsi_alloc_sdev() | ||
226 | */ | ||
227 | memcpy(&wwn->vendor[0], &buf[8], sizeof(wwn->vendor)); | ||
228 | memcpy(&wwn->model[0], &buf[16], sizeof(wwn->model)); | ||
229 | memcpy(&wwn->revision[0], &buf[32], sizeof(wwn->revision)); | ||
230 | } | ||
231 | |||
232 | static int | ||
233 | pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn) | ||
234 | { | ||
235 | unsigned char cdb[MAX_COMMAND_SIZE], *buf; | ||
236 | int ret; | ||
237 | |||
238 | buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL); | ||
239 | if (!buf) | ||
240 | return -1; | ||
241 | |||
242 | memset(cdb, 0, MAX_COMMAND_SIZE); | ||
243 | cdb[0] = INQUIRY; | ||
244 | cdb[1] = 0x01; /* Query VPD */ | ||
245 | cdb[2] = 0x80; /* Unit Serial Number */ | ||
246 | cdb[3] = (INQUIRY_VPD_SERIAL_LEN >> 8) & 0xff; | ||
247 | cdb[4] = (INQUIRY_VPD_SERIAL_LEN & 0xff); | ||
248 | |||
249 | ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf, | ||
250 | INQUIRY_VPD_SERIAL_LEN, NULL, HZ, 1, NULL); | ||
251 | if (ret) | ||
252 | goto out_free; | ||
253 | |||
254 | snprintf(&wwn->unit_serial[0], INQUIRY_VPD_SERIAL_LEN, "%s", &buf[4]); | ||
255 | |||
256 | wwn->t10_sub_dev->su_dev_flags |= SDF_FIRMWARE_VPD_UNIT_SERIAL; | ||
257 | |||
258 | kfree(buf); | ||
259 | return 0; | ||
260 | |||
261 | out_free: | ||
262 | kfree(buf); | ||
263 | return -1; | ||
264 | } | ||
265 | |||
266 | static void | ||
267 | pscsi_get_inquiry_vpd_device_ident(struct scsi_device *sdev, | ||
268 | struct t10_wwn *wwn) | ||
269 | { | ||
270 | unsigned char cdb[MAX_COMMAND_SIZE], *buf, *page_83; | ||
271 | int ident_len, page_len, off = 4, ret; | ||
272 | struct t10_vpd *vpd; | ||
273 | |||
274 | buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL); | ||
275 | if (!buf) | ||
276 | return; | ||
277 | |||
278 | memset(cdb, 0, MAX_COMMAND_SIZE); | ||
279 | cdb[0] = INQUIRY; | ||
280 | cdb[1] = 0x01; /* Query VPD */ | ||
281 | cdb[2] = 0x83; /* Device Identifier */ | ||
282 | cdb[3] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN >> 8) & 0xff; | ||
283 | cdb[4] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN & 0xff); | ||
284 | |||
285 | ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf, | ||
286 | INQUIRY_VPD_DEVICE_IDENTIFIER_LEN, | ||
287 | NULL, HZ, 1, NULL); | ||
288 | if (ret) | ||
289 | goto out; | ||
290 | |||
291 | page_len = (buf[2] << 8) | buf[3]; | ||
292 | while (page_len > 0) { | ||
293 | /* Grab a pointer to the Identification descriptor */ | ||
294 | page_83 = &buf[off]; | ||
295 | ident_len = page_83[3]; | ||
296 | if (!ident_len) { | ||
297 | printk(KERN_ERR "page_83[3]: identifier" | ||
298 | " length zero!\n"); | ||
299 | break; | ||
300 | } | ||
301 | printk(KERN_INFO "T10 VPD Identifer Length: %d\n", ident_len); | ||
302 | |||
303 | vpd = kzalloc(sizeof(struct t10_vpd), GFP_KERNEL); | ||
304 | if (!vpd) { | ||
305 | printk(KERN_ERR "Unable to allocate memory for" | ||
306 | " struct t10_vpd\n"); | ||
307 | goto out; | ||
308 | } | ||
309 | INIT_LIST_HEAD(&vpd->vpd_list); | ||
310 | |||
311 | transport_set_vpd_proto_id(vpd, page_83); | ||
312 | transport_set_vpd_assoc(vpd, page_83); | ||
313 | |||
314 | if (transport_set_vpd_ident_type(vpd, page_83) < 0) { | ||
315 | off += (ident_len + 4); | ||
316 | page_len -= (ident_len + 4); | ||
317 | kfree(vpd); | ||
318 | continue; | ||
319 | } | ||
320 | if (transport_set_vpd_ident(vpd, page_83) < 0) { | ||
321 | off += (ident_len + 4); | ||
322 | page_len -= (ident_len + 4); | ||
323 | kfree(vpd); | ||
324 | continue; | ||
325 | } | ||
326 | |||
327 | list_add_tail(&vpd->vpd_list, &wwn->t10_vpd_list); | ||
328 | off += (ident_len + 4); | ||
329 | page_len -= (ident_len + 4); | ||
330 | } | ||
331 | |||
332 | out: | ||
333 | kfree(buf); | ||
334 | } | ||
335 | |||
336 | /* pscsi_add_device_to_list(): | ||
337 | * | ||
338 | * | ||
339 | */ | ||
340 | static struct se_device *pscsi_add_device_to_list( | ||
341 | struct se_hba *hba, | ||
342 | struct se_subsystem_dev *se_dev, | ||
343 | struct pscsi_dev_virt *pdv, | ||
344 | struct scsi_device *sd, | ||
345 | int dev_flags) | ||
346 | { | ||
347 | struct se_device *dev; | ||
348 | struct se_dev_limits dev_limits; | ||
349 | struct request_queue *q; | ||
350 | struct queue_limits *limits; | ||
351 | |||
352 | memset(&dev_limits, 0, sizeof(struct se_dev_limits)); | ||
353 | |||
354 | if (!sd->queue_depth) { | ||
355 | sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH; | ||
356 | |||
357 | printk(KERN_ERR "Set broken SCSI Device %d:%d:%d" | ||
358 | " queue_depth to %d\n", sd->channel, sd->id, | ||
359 | sd->lun, sd->queue_depth); | ||
360 | } | ||
361 | /* | ||
362 | * Setup the local scope queue_limits from struct request_queue->limits | ||
363 | * to pass into transport_add_device_to_core_hba() as struct se_dev_limits. | ||
364 | */ | ||
365 | q = sd->request_queue; | ||
366 | limits = &dev_limits.limits; | ||
367 | limits->logical_block_size = sd->sector_size; | ||
368 | limits->max_hw_sectors = (sd->host->max_sectors > queue_max_hw_sectors(q)) ? | ||
369 | queue_max_hw_sectors(q) : sd->host->max_sectors; | ||
370 | limits->max_sectors = (sd->host->max_sectors > queue_max_sectors(q)) ? | ||
371 | queue_max_sectors(q) : sd->host->max_sectors; | ||
372 | dev_limits.hw_queue_depth = sd->queue_depth; | ||
373 | dev_limits.queue_depth = sd->queue_depth; | ||
374 | /* | ||
375 | * Setup our standard INQUIRY info into se_dev->t10_wwn | ||
376 | */ | ||
377 | pscsi_set_inquiry_info(sd, &se_dev->t10_wwn); | ||
378 | |||
379 | /* | ||
380 | * Set the pointer pdv->pdv_sd to from passed struct scsi_device, | ||
381 | * which has already been referenced with Linux SCSI code with | ||
382 | * scsi_device_get() in this file's pscsi_create_virtdevice(). | ||
383 | * | ||
384 | * The passthrough operations called by the transport_add_device_* | ||
385 | * function below will require this pointer to be set for passthroug | ||
386 | * ops. | ||
387 | * | ||
388 | * For the shutdown case in pscsi_free_device(), this struct | ||
389 | * scsi_device reference is released with Linux SCSI code | ||
390 | * scsi_device_put() and the pdv->pdv_sd cleared. | ||
391 | */ | ||
392 | pdv->pdv_sd = sd; | ||
393 | |||
394 | dev = transport_add_device_to_core_hba(hba, &pscsi_template, | ||
395 | se_dev, dev_flags, (void *)pdv, | ||
396 | &dev_limits, NULL, NULL); | ||
397 | if (!(dev)) { | ||
398 | pdv->pdv_sd = NULL; | ||
399 | return NULL; | ||
400 | } | ||
401 | |||
402 | /* | ||
403 | * Locate VPD WWN Information used for various purposes within | ||
404 | * the Storage Engine. | ||
405 | */ | ||
406 | if (!pscsi_get_inquiry_vpd_serial(sd, &se_dev->t10_wwn)) { | ||
407 | /* | ||
408 | * If VPD Unit Serial returned GOOD status, try | ||
409 | * VPD Device Identification page (0x83). | ||
410 | */ | ||
411 | pscsi_get_inquiry_vpd_device_ident(sd, &se_dev->t10_wwn); | ||
412 | } | ||
413 | |||
414 | /* | ||
415 | * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE. | ||
416 | */ | ||
417 | if (sd->type == TYPE_TAPE) | ||
418 | pscsi_tape_read_blocksize(dev, sd); | ||
419 | return dev; | ||
420 | } | ||
421 | |||
422 | static void *pscsi_allocate_virtdevice(struct se_hba *hba, const char *name) | ||
423 | { | ||
424 | struct pscsi_dev_virt *pdv; | ||
425 | |||
426 | pdv = kzalloc(sizeof(struct pscsi_dev_virt), GFP_KERNEL); | ||
427 | if (!(pdv)) { | ||
428 | printk(KERN_ERR "Unable to allocate memory for struct pscsi_dev_virt\n"); | ||
429 | return NULL; | ||
430 | } | ||
431 | pdv->pdv_se_hba = hba; | ||
432 | |||
433 | printk(KERN_INFO "PSCSI: Allocated pdv: %p for %s\n", pdv, name); | ||
434 | return (void *)pdv; | ||
435 | } | ||
436 | |||
437 | /* | ||
438 | * Called with struct Scsi_Host->host_lock called. | ||
439 | */ | ||
440 | static struct se_device *pscsi_create_type_disk( | ||
441 | struct scsi_device *sd, | ||
442 | struct pscsi_dev_virt *pdv, | ||
443 | struct se_subsystem_dev *se_dev, | ||
444 | struct se_hba *hba) | ||
445 | { | ||
446 | struct se_device *dev; | ||
447 | struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr; | ||
448 | struct Scsi_Host *sh = sd->host; | ||
449 | struct block_device *bd; | ||
450 | u32 dev_flags = 0; | ||
451 | |||
452 | if (scsi_device_get(sd)) { | ||
453 | printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n", | ||
454 | sh->host_no, sd->channel, sd->id, sd->lun); | ||
455 | spin_unlock_irq(sh->host_lock); | ||
456 | return NULL; | ||
457 | } | ||
458 | spin_unlock_irq(sh->host_lock); | ||
459 | /* | ||
460 | * Claim exclusive struct block_device access to struct scsi_device | ||
461 | * for TYPE_DISK using supplied udev_path | ||
462 | */ | ||
463 | bd = blkdev_get_by_path(se_dev->se_dev_udev_path, | ||
464 | FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv); | ||
465 | if (!(bd)) { | ||
466 | printk("pSCSI: blkdev_get_by_path() failed\n"); | ||
467 | scsi_device_put(sd); | ||
468 | return NULL; | ||
469 | } | ||
470 | pdv->pdv_bd = bd; | ||
471 | |||
472 | dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); | ||
473 | if (!(dev)) { | ||
474 | blkdev_put(pdv->pdv_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); | ||
475 | scsi_device_put(sd); | ||
476 | return NULL; | ||
477 | } | ||
478 | printk(KERN_INFO "CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n", | ||
479 | phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun); | ||
480 | |||
481 | return dev; | ||
482 | } | ||
483 | |||
484 | /* | ||
485 | * Called with struct Scsi_Host->host_lock called. | ||
486 | */ | ||
487 | static struct se_device *pscsi_create_type_rom( | ||
488 | struct scsi_device *sd, | ||
489 | struct pscsi_dev_virt *pdv, | ||
490 | struct se_subsystem_dev *se_dev, | ||
491 | struct se_hba *hba) | ||
492 | { | ||
493 | struct se_device *dev; | ||
494 | struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr; | ||
495 | struct Scsi_Host *sh = sd->host; | ||
496 | u32 dev_flags = 0; | ||
497 | |||
498 | if (scsi_device_get(sd)) { | ||
499 | printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n", | ||
500 | sh->host_no, sd->channel, sd->id, sd->lun); | ||
501 | spin_unlock_irq(sh->host_lock); | ||
502 | return NULL; | ||
503 | } | ||
504 | spin_unlock_irq(sh->host_lock); | ||
505 | |||
506 | dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); | ||
507 | if (!(dev)) { | ||
508 | scsi_device_put(sd); | ||
509 | return NULL; | ||
510 | } | ||
511 | printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n", | ||
512 | phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, | ||
513 | sd->channel, sd->id, sd->lun); | ||
514 | |||
515 | return dev; | ||
516 | } | ||
517 | |||
518 | /* | ||
519 | *Called with struct Scsi_Host->host_lock called. | ||
520 | */ | ||
521 | static struct se_device *pscsi_create_type_other( | ||
522 | struct scsi_device *sd, | ||
523 | struct pscsi_dev_virt *pdv, | ||
524 | struct se_subsystem_dev *se_dev, | ||
525 | struct se_hba *hba) | ||
526 | { | ||
527 | struct se_device *dev; | ||
528 | struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr; | ||
529 | struct Scsi_Host *sh = sd->host; | ||
530 | u32 dev_flags = 0; | ||
531 | |||
532 | spin_unlock_irq(sh->host_lock); | ||
533 | dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); | ||
534 | if (!(dev)) | ||
535 | return NULL; | ||
536 | |||
537 | printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n", | ||
538 | phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, | ||
539 | sd->channel, sd->id, sd->lun); | ||
540 | |||
541 | return dev; | ||
542 | } | ||
543 | |||
544 | static struct se_device *pscsi_create_virtdevice( | ||
545 | struct se_hba *hba, | ||
546 | struct se_subsystem_dev *se_dev, | ||
547 | void *p) | ||
548 | { | ||
549 | struct pscsi_dev_virt *pdv = (struct pscsi_dev_virt *)p; | ||
550 | struct se_device *dev; | ||
551 | struct scsi_device *sd; | ||
552 | struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr; | ||
553 | struct Scsi_Host *sh = phv->phv_lld_host; | ||
554 | int legacy_mode_enable = 0; | ||
555 | |||
556 | if (!(pdv)) { | ||
557 | printk(KERN_ERR "Unable to locate struct pscsi_dev_virt" | ||
558 | " parameter\n"); | ||
559 | return NULL; | ||
560 | } | ||
561 | /* | ||
562 | * If not running in PHV_LLD_SCSI_HOST_NO mode, locate the | ||
563 | * struct Scsi_Host we will need to bring the TCM/pSCSI object online | ||
564 | */ | ||
565 | if (!(sh)) { | ||
566 | if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) { | ||
567 | printk(KERN_ERR "pSCSI: Unable to locate struct" | ||
568 | " Scsi_Host for PHV_LLD_SCSI_HOST_NO\n"); | ||
569 | return NULL; | ||
570 | } | ||
571 | /* | ||
572 | * For the newer PHV_VIRUTAL_HOST_ID struct scsi_device | ||
573 | * reference, we enforce that udev_path has been set | ||
574 | */ | ||
575 | if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) { | ||
576 | printk(KERN_ERR "pSCSI: udev_path attribute has not" | ||
577 | " been set before ENABLE=1\n"); | ||
578 | return NULL; | ||
579 | } | ||
580 | /* | ||
581 | * If no scsi_host_id= was passed for PHV_VIRUTAL_HOST_ID, | ||
582 | * use the original TCM hba ID to reference Linux/SCSI Host No | ||
583 | * and enable for PHV_LLD_SCSI_HOST_NO mode. | ||
584 | */ | ||
585 | if (!(pdv->pdv_flags & PDF_HAS_VIRT_HOST_ID)) { | ||
586 | spin_lock(&hba->device_lock); | ||
587 | if (!(list_empty(&hba->hba_dev_list))) { | ||
588 | printk(KERN_ERR "pSCSI: Unable to set hba_mode" | ||
589 | " with active devices\n"); | ||
590 | spin_unlock(&hba->device_lock); | ||
591 | return NULL; | ||
592 | } | ||
593 | spin_unlock(&hba->device_lock); | ||
594 | |||
595 | if (pscsi_pmode_enable_hba(hba, 1) != 1) | ||
596 | return NULL; | ||
597 | |||
598 | legacy_mode_enable = 1; | ||
599 | hba->hba_flags |= HBA_FLAGS_PSCSI_MODE; | ||
600 | sh = phv->phv_lld_host; | ||
601 | } else { | ||
602 | sh = pscsi_get_sh(pdv->pdv_host_id); | ||
603 | if (!(sh)) { | ||
604 | printk(KERN_ERR "pSCSI: Unable to locate" | ||
605 | " pdv_host_id: %d\n", pdv->pdv_host_id); | ||
606 | return NULL; | ||
607 | } | ||
608 | } | ||
609 | } else { | ||
610 | if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) { | ||
611 | printk(KERN_ERR "pSCSI: PHV_VIRUTAL_HOST_ID set while" | ||
612 | " struct Scsi_Host exists\n"); | ||
613 | return NULL; | ||
614 | } | ||
615 | } | ||
616 | |||
617 | spin_lock_irq(sh->host_lock); | ||
618 | list_for_each_entry(sd, &sh->__devices, siblings) { | ||
619 | if ((pdv->pdv_channel_id != sd->channel) || | ||
620 | (pdv->pdv_target_id != sd->id) || | ||
621 | (pdv->pdv_lun_id != sd->lun)) | ||
622 | continue; | ||
623 | /* | ||
624 | * Functions will release the held struct scsi_host->host_lock | ||
625 | * before calling calling pscsi_add_device_to_list() to register | ||
626 | * struct scsi_device with target_core_mod. | ||
627 | */ | ||
628 | switch (sd->type) { | ||
629 | case TYPE_DISK: | ||
630 | dev = pscsi_create_type_disk(sd, pdv, se_dev, hba); | ||
631 | break; | ||
632 | case TYPE_ROM: | ||
633 | dev = pscsi_create_type_rom(sd, pdv, se_dev, hba); | ||
634 | break; | ||
635 | default: | ||
636 | dev = pscsi_create_type_other(sd, pdv, se_dev, hba); | ||
637 | break; | ||
638 | } | ||
639 | |||
640 | if (!(dev)) { | ||
641 | if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) | ||
642 | scsi_host_put(sh); | ||
643 | else if (legacy_mode_enable) { | ||
644 | pscsi_pmode_enable_hba(hba, 0); | ||
645 | hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE; | ||
646 | } | ||
647 | pdv->pdv_sd = NULL; | ||
648 | return NULL; | ||
649 | } | ||
650 | return dev; | ||
651 | } | ||
652 | spin_unlock_irq(sh->host_lock); | ||
653 | |||
654 | printk(KERN_ERR "pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no, | ||
655 | pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id); | ||
656 | |||
657 | if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) | ||
658 | scsi_host_put(sh); | ||
659 | else if (legacy_mode_enable) { | ||
660 | pscsi_pmode_enable_hba(hba, 0); | ||
661 | hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE; | ||
662 | } | ||
663 | |||
664 | return NULL; | ||
665 | } | ||
666 | |||
667 | /* pscsi_free_device(): (Part of se_subsystem_api_t template) | ||
668 | * | ||
669 | * | ||
670 | */ | ||
671 | static void pscsi_free_device(void *p) | ||
672 | { | ||
673 | struct pscsi_dev_virt *pdv = p; | ||
674 | struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr; | ||
675 | struct scsi_device *sd = pdv->pdv_sd; | ||
676 | |||
677 | if (sd) { | ||
678 | /* | ||
679 | * Release exclusive pSCSI internal struct block_device claim for | ||
680 | * struct scsi_device with TYPE_DISK from pscsi_create_type_disk() | ||
681 | */ | ||
682 | if ((sd->type == TYPE_DISK) && pdv->pdv_bd) { | ||
683 | blkdev_put(pdv->pdv_bd, | ||
684 | FMODE_WRITE|FMODE_READ|FMODE_EXCL); | ||
685 | pdv->pdv_bd = NULL; | ||
686 | } | ||
687 | /* | ||
688 | * For HBA mode PHV_LLD_SCSI_HOST_NO, release the reference | ||
689 | * to struct Scsi_Host now. | ||
690 | */ | ||
691 | if ((phv->phv_mode == PHV_LLD_SCSI_HOST_NO) && | ||
692 | (phv->phv_lld_host != NULL)) | ||
693 | scsi_host_put(phv->phv_lld_host); | ||
694 | |||
695 | if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM)) | ||
696 | scsi_device_put(sd); | ||
697 | |||
698 | pdv->pdv_sd = NULL; | ||
699 | } | ||
700 | |||
701 | kfree(pdv); | ||
702 | } | ||
703 | |||
704 | static inline struct pscsi_plugin_task *PSCSI_TASK(struct se_task *task) | ||
705 | { | ||
706 | return container_of(task, struct pscsi_plugin_task, pscsi_task); | ||
707 | } | ||
708 | |||
709 | |||
710 | /* pscsi_transport_complete(): | ||
711 | * | ||
712 | * | ||
713 | */ | ||
714 | static int pscsi_transport_complete(struct se_task *task) | ||
715 | { | ||
716 | struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; | ||
717 | struct scsi_device *sd = pdv->pdv_sd; | ||
718 | int result; | ||
719 | struct pscsi_plugin_task *pt = PSCSI_TASK(task); | ||
720 | unsigned char *cdb = &pt->pscsi_cdb[0]; | ||
721 | |||
722 | result = pt->pscsi_result; | ||
723 | /* | ||
724 | * Hack to make sure that Write-Protect modepage is set if R/O mode is | ||
725 | * forced. | ||
726 | */ | ||
727 | if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) && | ||
728 | (status_byte(result) << 1) == SAM_STAT_GOOD) { | ||
729 | if (!TASK_CMD(task)->se_deve) | ||
730 | goto after_mode_sense; | ||
731 | |||
732 | if (TASK_CMD(task)->se_deve->lun_flags & | ||
733 | TRANSPORT_LUNFLAGS_READ_ONLY) { | ||
734 | unsigned char *buf = (unsigned char *) | ||
735 | T_TASK(task->task_se_cmd)->t_task_buf; | ||
736 | |||
737 | if (cdb[0] == MODE_SENSE_10) { | ||
738 | if (!(buf[3] & 0x80)) | ||
739 | buf[3] |= 0x80; | ||
740 | } else { | ||
741 | if (!(buf[2] & 0x80)) | ||
742 | buf[2] |= 0x80; | ||
743 | } | ||
744 | } | ||
745 | } | ||
746 | after_mode_sense: | ||
747 | |||
748 | if (sd->type != TYPE_TAPE) | ||
749 | goto after_mode_select; | ||
750 | |||
751 | /* | ||
752 | * Hack to correctly obtain the initiator requested blocksize for | ||
753 | * TYPE_TAPE. Since this value is dependent upon each tape media, | ||
754 | * struct scsi_device->sector_size will not contain the correct value | ||
755 | * by default, so we go ahead and set it so | ||
756 | * TRANSPORT(dev)->get_blockdev() returns the correct value to the | ||
757 | * storage engine. | ||
758 | */ | ||
759 | if (((cdb[0] == MODE_SELECT) || (cdb[0] == MODE_SELECT_10)) && | ||
760 | (status_byte(result) << 1) == SAM_STAT_GOOD) { | ||
761 | unsigned char *buf; | ||
762 | struct scatterlist *sg = task->task_sg; | ||
763 | u16 bdl; | ||
764 | u32 blocksize; | ||
765 | |||
766 | buf = sg_virt(&sg[0]); | ||
767 | if (!(buf)) { | ||
768 | printk(KERN_ERR "Unable to get buf for scatterlist\n"); | ||
769 | goto after_mode_select; | ||
770 | } | ||
771 | |||
772 | if (cdb[0] == MODE_SELECT) | ||
773 | bdl = (buf[3]); | ||
774 | else | ||
775 | bdl = (buf[6] << 8) | (buf[7]); | ||
776 | |||
777 | if (!bdl) | ||
778 | goto after_mode_select; | ||
779 | |||
780 | if (cdb[0] == MODE_SELECT) | ||
781 | blocksize = (buf[9] << 16) | (buf[10] << 8) | | ||
782 | (buf[11]); | ||
783 | else | ||
784 | blocksize = (buf[13] << 16) | (buf[14] << 8) | | ||
785 | (buf[15]); | ||
786 | |||
787 | sd->sector_size = blocksize; | ||
788 | } | ||
789 | after_mode_select: | ||
790 | |||
791 | if (status_byte(result) & CHECK_CONDITION) | ||
792 | return 1; | ||
793 | |||
794 | return 0; | ||
795 | } | ||
796 | |||
797 | static struct se_task * | ||
798 | pscsi_alloc_task(struct se_cmd *cmd) | ||
799 | { | ||
800 | struct pscsi_plugin_task *pt; | ||
801 | unsigned char *cdb = T_TASK(cmd)->t_task_cdb; | ||
802 | |||
803 | pt = kzalloc(sizeof(struct pscsi_plugin_task), GFP_KERNEL); | ||
804 | if (!pt) { | ||
805 | printk(KERN_ERR "Unable to allocate struct pscsi_plugin_task\n"); | ||
806 | return NULL; | ||
807 | } | ||
808 | |||
809 | /* | ||
810 | * If TCM Core is signaling a > TCM_MAX_COMMAND_SIZE allocation, | ||
811 | * allocate the extended CDB buffer for per struct se_task context | ||
812 | * pt->pscsi_cdb now. | ||
813 | */ | ||
814 | if (T_TASK(cmd)->t_task_cdb != T_TASK(cmd)->__t_task_cdb) { | ||
815 | |||
816 | pt->pscsi_cdb = kzalloc(scsi_command_size(cdb), GFP_KERNEL); | ||
817 | if (!(pt->pscsi_cdb)) { | ||
818 | printk(KERN_ERR "pSCSI: Unable to allocate extended" | ||
819 | " pt->pscsi_cdb\n"); | ||
820 | return NULL; | ||
821 | } | ||
822 | } else | ||
823 | pt->pscsi_cdb = &pt->__pscsi_cdb[0]; | ||
824 | |||
825 | return &pt->pscsi_task; | ||
826 | } | ||
827 | |||
828 | static inline void pscsi_blk_init_request( | ||
829 | struct se_task *task, | ||
830 | struct pscsi_plugin_task *pt, | ||
831 | struct request *req, | ||
832 | int bidi_read) | ||
833 | { | ||
834 | /* | ||
835 | * Defined as "scsi command" in include/linux/blkdev.h. | ||
836 | */ | ||
837 | req->cmd_type = REQ_TYPE_BLOCK_PC; | ||
838 | /* | ||
839 | * For the extra BIDI-COMMAND READ struct request we do not | ||
840 | * need to setup the remaining structure members | ||
841 | */ | ||
842 | if (bidi_read) | ||
843 | return; | ||
844 | /* | ||
845 | * Setup the done function pointer for struct request, | ||
846 | * also set the end_io_data pointer.to struct se_task. | ||
847 | */ | ||
848 | req->end_io = pscsi_req_done; | ||
849 | req->end_io_data = (void *)task; | ||
850 | /* | ||
851 | * Load the referenced struct se_task's SCSI CDB into | ||
852 | * include/linux/blkdev.h:struct request->cmd | ||
853 | */ | ||
854 | req->cmd_len = scsi_command_size(pt->pscsi_cdb); | ||
855 | req->cmd = &pt->pscsi_cdb[0]; | ||
856 | /* | ||
857 | * Setup pointer for outgoing sense data. | ||
858 | */ | ||
859 | req->sense = (void *)&pt->pscsi_sense[0]; | ||
860 | req->sense_len = 0; | ||
861 | } | ||
862 | |||
863 | /* | ||
864 | * Used for pSCSI data payloads for all *NON* SCF_SCSI_DATA_SG_IO_CDB | ||
865 | */ | ||
866 | static int pscsi_blk_get_request(struct se_task *task) | ||
867 | { | ||
868 | struct pscsi_plugin_task *pt = PSCSI_TASK(task); | ||
869 | struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; | ||
870 | |||
871 | pt->pscsi_req = blk_get_request(pdv->pdv_sd->request_queue, | ||
872 | (task->task_data_direction == DMA_TO_DEVICE), | ||
873 | GFP_KERNEL); | ||
874 | if (!(pt->pscsi_req) || IS_ERR(pt->pscsi_req)) { | ||
875 | printk(KERN_ERR "PSCSI: blk_get_request() failed: %ld\n", | ||
876 | IS_ERR(pt->pscsi_req)); | ||
877 | return PYX_TRANSPORT_LU_COMM_FAILURE; | ||
878 | } | ||
879 | /* | ||
880 | * Setup the newly allocated struct request for REQ_TYPE_BLOCK_PC, | ||
881 | * and setup rq callback, CDB and sense. | ||
882 | */ | ||
883 | pscsi_blk_init_request(task, pt, pt->pscsi_req, 0); | ||
884 | return 0; | ||
885 | } | ||
886 | |||
887 | /* pscsi_do_task(): (Part of se_subsystem_api_t template) | ||
888 | * | ||
889 | * | ||
890 | */ | ||
891 | static int pscsi_do_task(struct se_task *task) | ||
892 | { | ||
893 | struct pscsi_plugin_task *pt = PSCSI_TASK(task); | ||
894 | struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; | ||
895 | /* | ||
896 | * Set the struct request->timeout value based on peripheral | ||
897 | * device type from SCSI. | ||
898 | */ | ||
899 | if (pdv->pdv_sd->type == TYPE_DISK) | ||
900 | pt->pscsi_req->timeout = PS_TIMEOUT_DISK; | ||
901 | else | ||
902 | pt->pscsi_req->timeout = PS_TIMEOUT_OTHER; | ||
903 | |||
904 | pt->pscsi_req->retries = PS_RETRY; | ||
905 | /* | ||
906 | * Queue the struct request into the struct scsi_device->request_queue. | ||
907 | * Also check for HEAD_OF_QUEUE SAM TASK attr from received se_cmd | ||
908 | * descriptor | ||
909 | */ | ||
910 | blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, pt->pscsi_req, | ||
911 | (task->task_se_cmd->sam_task_attr == TASK_ATTR_HOQ), | ||
912 | pscsi_req_done); | ||
913 | |||
914 | return PYX_TRANSPORT_SENT_TO_TRANSPORT; | ||
915 | } | ||
916 | |||
917 | static void pscsi_free_task(struct se_task *task) | ||
918 | { | ||
919 | struct pscsi_plugin_task *pt = PSCSI_TASK(task); | ||
920 | struct se_cmd *cmd = task->task_se_cmd; | ||
921 | |||
922 | /* | ||
923 | * Release the extended CDB allocation from pscsi_alloc_task() | ||
924 | * if one exists. | ||
925 | */ | ||
926 | if (T_TASK(cmd)->t_task_cdb != T_TASK(cmd)->__t_task_cdb) | ||
927 | kfree(pt->pscsi_cdb); | ||
928 | /* | ||
929 | * We do not release the bio(s) here associated with this task, as | ||
930 | * this is handled by bio_put() and pscsi_bi_endio(). | ||
931 | */ | ||
932 | kfree(pt); | ||
933 | } | ||
934 | |||
935 | enum { | ||
936 | Opt_scsi_host_id, Opt_scsi_channel_id, Opt_scsi_target_id, | ||
937 | Opt_scsi_lun_id, Opt_err | ||
938 | }; | ||
939 | |||
940 | static match_table_t tokens = { | ||
941 | {Opt_scsi_host_id, "scsi_host_id=%d"}, | ||
942 | {Opt_scsi_channel_id, "scsi_channel_id=%d"}, | ||
943 | {Opt_scsi_target_id, "scsi_target_id=%d"}, | ||
944 | {Opt_scsi_lun_id, "scsi_lun_id=%d"}, | ||
945 | {Opt_err, NULL} | ||
946 | }; | ||
947 | |||
948 | static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba, | ||
949 | struct se_subsystem_dev *se_dev, | ||
950 | const char *page, | ||
951 | ssize_t count) | ||
952 | { | ||
953 | struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr; | ||
954 | struct pscsi_hba_virt *phv = hba->hba_ptr; | ||
955 | char *orig, *ptr, *opts; | ||
956 | substring_t args[MAX_OPT_ARGS]; | ||
957 | int ret = 0, arg, token; | ||
958 | |||
959 | opts = kstrdup(page, GFP_KERNEL); | ||
960 | if (!opts) | ||
961 | return -ENOMEM; | ||
962 | |||
963 | orig = opts; | ||
964 | |||
965 | while ((ptr = strsep(&opts, ",")) != NULL) { | ||
966 | if (!*ptr) | ||
967 | continue; | ||
968 | |||
969 | token = match_token(ptr, tokens, args); | ||
970 | switch (token) { | ||
971 | case Opt_scsi_host_id: | ||
972 | if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) { | ||
973 | printk(KERN_ERR "PSCSI[%d]: Unable to accept" | ||
974 | " scsi_host_id while phv_mode ==" | ||
975 | " PHV_LLD_SCSI_HOST_NO\n", | ||
976 | phv->phv_host_id); | ||
977 | ret = -EINVAL; | ||
978 | goto out; | ||
979 | } | ||
980 | match_int(args, &arg); | ||
981 | pdv->pdv_host_id = arg; | ||
982 | printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Host ID:" | ||
983 | " %d\n", phv->phv_host_id, pdv->pdv_host_id); | ||
984 | pdv->pdv_flags |= PDF_HAS_VIRT_HOST_ID; | ||
985 | break; | ||
986 | case Opt_scsi_channel_id: | ||
987 | match_int(args, &arg); | ||
988 | pdv->pdv_channel_id = arg; | ||
989 | printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Channel" | ||
990 | " ID: %d\n", phv->phv_host_id, | ||
991 | pdv->pdv_channel_id); | ||
992 | pdv->pdv_flags |= PDF_HAS_CHANNEL_ID; | ||
993 | break; | ||
994 | case Opt_scsi_target_id: | ||
995 | match_int(args, &arg); | ||
996 | pdv->pdv_target_id = arg; | ||
997 | printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Target" | ||
998 | " ID: %d\n", phv->phv_host_id, | ||
999 | pdv->pdv_target_id); | ||
1000 | pdv->pdv_flags |= PDF_HAS_TARGET_ID; | ||
1001 | break; | ||
1002 | case Opt_scsi_lun_id: | ||
1003 | match_int(args, &arg); | ||
1004 | pdv->pdv_lun_id = arg; | ||
1005 | printk(KERN_INFO "PSCSI[%d]: Referencing SCSI LUN ID:" | ||
1006 | " %d\n", phv->phv_host_id, pdv->pdv_lun_id); | ||
1007 | pdv->pdv_flags |= PDF_HAS_LUN_ID; | ||
1008 | break; | ||
1009 | default: | ||
1010 | break; | ||
1011 | } | ||
1012 | } | ||
1013 | |||
1014 | out: | ||
1015 | kfree(orig); | ||
1016 | return (!ret) ? count : ret; | ||
1017 | } | ||
1018 | |||
1019 | static ssize_t pscsi_check_configfs_dev_params( | ||
1020 | struct se_hba *hba, | ||
1021 | struct se_subsystem_dev *se_dev) | ||
1022 | { | ||
1023 | struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr; | ||
1024 | |||
1025 | if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) || | ||
1026 | !(pdv->pdv_flags & PDF_HAS_TARGET_ID) || | ||
1027 | !(pdv->pdv_flags & PDF_HAS_LUN_ID)) { | ||
1028 | printk(KERN_ERR "Missing scsi_channel_id=, scsi_target_id= and" | ||
1029 | " scsi_lun_id= parameters\n"); | ||
1030 | return -1; | ||
1031 | } | ||
1032 | |||
1033 | return 0; | ||
1034 | } | ||
1035 | |||
1036 | static ssize_t pscsi_show_configfs_dev_params(struct se_hba *hba, | ||
1037 | struct se_subsystem_dev *se_dev, | ||
1038 | char *b) | ||
1039 | { | ||
1040 | struct pscsi_hba_virt *phv = hba->hba_ptr; | ||
1041 | struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr; | ||
1042 | struct scsi_device *sd = pdv->pdv_sd; | ||
1043 | unsigned char host_id[16]; | ||
1044 | ssize_t bl; | ||
1045 | int i; | ||
1046 | |||
1047 | if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) | ||
1048 | snprintf(host_id, 16, "%d", pdv->pdv_host_id); | ||
1049 | else | ||
1050 | snprintf(host_id, 16, "PHBA Mode"); | ||
1051 | |||
1052 | bl = sprintf(b, "SCSI Device Bus Location:" | ||
1053 | " Channel ID: %d Target ID: %d LUN: %d Host ID: %s\n", | ||
1054 | pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id, | ||
1055 | host_id); | ||
1056 | |||
1057 | if (sd) { | ||
1058 | bl += sprintf(b + bl, " "); | ||
1059 | bl += sprintf(b + bl, "Vendor: "); | ||
1060 | for (i = 0; i < 8; i++) { | ||
1061 | if (ISPRINT(sd->vendor[i])) /* printable character? */ | ||
1062 | bl += sprintf(b + bl, "%c", sd->vendor[i]); | ||
1063 | else | ||
1064 | bl += sprintf(b + bl, " "); | ||
1065 | } | ||
1066 | bl += sprintf(b + bl, " Model: "); | ||
1067 | for (i = 0; i < 16; i++) { | ||
1068 | if (ISPRINT(sd->model[i])) /* printable character ? */ | ||
1069 | bl += sprintf(b + bl, "%c", sd->model[i]); | ||
1070 | else | ||
1071 | bl += sprintf(b + bl, " "); | ||
1072 | } | ||
1073 | bl += sprintf(b + bl, " Rev: "); | ||
1074 | for (i = 0; i < 4; i++) { | ||
1075 | if (ISPRINT(sd->rev[i])) /* printable character ? */ | ||
1076 | bl += sprintf(b + bl, "%c", sd->rev[i]); | ||
1077 | else | ||
1078 | bl += sprintf(b + bl, " "); | ||
1079 | } | ||
1080 | bl += sprintf(b + bl, "\n"); | ||
1081 | } | ||
1082 | return bl; | ||
1083 | } | ||
1084 | |||
1085 | static void pscsi_bi_endio(struct bio *bio, int error) | ||
1086 | { | ||
1087 | bio_put(bio); | ||
1088 | } | ||
1089 | |||
1090 | static inline struct bio *pscsi_get_bio(struct pscsi_dev_virt *pdv, int sg_num) | ||
1091 | { | ||
1092 | struct bio *bio; | ||
1093 | /* | ||
1094 | * Use bio_malloc() following the comment in for bio -> struct request | ||
1095 | * in block/blk-core.c:blk_make_request() | ||
1096 | */ | ||
1097 | bio = bio_kmalloc(GFP_KERNEL, sg_num); | ||
1098 | if (!(bio)) { | ||
1099 | printk(KERN_ERR "PSCSI: bio_kmalloc() failed\n"); | ||
1100 | return NULL; | ||
1101 | } | ||
1102 | bio->bi_end_io = pscsi_bi_endio; | ||
1103 | |||
1104 | return bio; | ||
1105 | } | ||
1106 | |||
1107 | #if 0 | ||
1108 | #define DEBUG_PSCSI(x...) printk(x) | ||
1109 | #else | ||
1110 | #define DEBUG_PSCSI(x...) | ||
1111 | #endif | ||
1112 | |||
1113 | static int __pscsi_map_task_SG( | ||
1114 | struct se_task *task, | ||
1115 | struct scatterlist *task_sg, | ||
1116 | u32 task_sg_num, | ||
1117 | int bidi_read) | ||
1118 | { | ||
1119 | struct pscsi_plugin_task *pt = PSCSI_TASK(task); | ||
1120 | struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; | ||
1121 | struct bio *bio = NULL, *hbio = NULL, *tbio = NULL; | ||
1122 | struct page *page; | ||
1123 | struct scatterlist *sg; | ||
1124 | u32 data_len = task->task_size, i, len, bytes, off; | ||
1125 | int nr_pages = (task->task_size + task_sg[0].offset + | ||
1126 | PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
1127 | int nr_vecs = 0, rc, ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; | ||
1128 | int rw = (task->task_data_direction == DMA_TO_DEVICE); | ||
1129 | |||
1130 | if (!task->task_size) | ||
1131 | return 0; | ||
1132 | /* | ||
1133 | * For SCF_SCSI_DATA_SG_IO_CDB, Use fs/bio.c:bio_add_page() to setup | ||
1134 | * the bio_vec maplist from TC< struct se_mem -> task->task_sg -> | ||
1135 | * struct scatterlist memory. The struct se_task->task_sg[] currently needs | ||
1136 | * to be attached to struct bios for submission to Linux/SCSI using | ||
1137 | * struct request to struct scsi_device->request_queue. | ||
1138 | * | ||
1139 | * Note that this will be changing post v2.6.28 as Target_Core_Mod/pSCSI | ||
1140 | * is ported to upstream SCSI passthrough functionality that accepts | ||
1141 | * struct scatterlist->page_link or struct page as a paraemeter. | ||
1142 | */ | ||
1143 | DEBUG_PSCSI("PSCSI: nr_pages: %d\n", nr_pages); | ||
1144 | |||
1145 | for_each_sg(task_sg, sg, task_sg_num, i) { | ||
1146 | page = sg_page(sg); | ||
1147 | off = sg->offset; | ||
1148 | len = sg->length; | ||
1149 | |||
1150 | DEBUG_PSCSI("PSCSI: i: %d page: %p len: %d off: %d\n", i, | ||
1151 | page, len, off); | ||
1152 | |||
1153 | while (len > 0 && data_len > 0) { | ||
1154 | bytes = min_t(unsigned int, len, PAGE_SIZE - off); | ||
1155 | bytes = min(bytes, data_len); | ||
1156 | |||
1157 | if (!(bio)) { | ||
1158 | nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages); | ||
1159 | nr_pages -= nr_vecs; | ||
1160 | /* | ||
1161 | * Calls bio_kmalloc() and sets bio->bi_end_io() | ||
1162 | */ | ||
1163 | bio = pscsi_get_bio(pdv, nr_vecs); | ||
1164 | if (!(bio)) | ||
1165 | goto fail; | ||
1166 | |||
1167 | if (rw) | ||
1168 | bio->bi_rw |= REQ_WRITE; | ||
1169 | |||
1170 | DEBUG_PSCSI("PSCSI: Allocated bio: %p," | ||
1171 | " dir: %s nr_vecs: %d\n", bio, | ||
1172 | (rw) ? "rw" : "r", nr_vecs); | ||
1173 | /* | ||
1174 | * Set *hbio pointer to handle the case: | ||
1175 | * nr_pages > BIO_MAX_PAGES, where additional | ||
1176 | * bios need to be added to complete a given | ||
1177 | * struct se_task | ||
1178 | */ | ||
1179 | if (!hbio) | ||
1180 | hbio = tbio = bio; | ||
1181 | else | ||
1182 | tbio = tbio->bi_next = bio; | ||
1183 | } | ||
1184 | |||
1185 | DEBUG_PSCSI("PSCSI: Calling bio_add_pc_page() i: %d" | ||
1186 | " bio: %p page: %p len: %d off: %d\n", i, bio, | ||
1187 | page, len, off); | ||
1188 | |||
1189 | rc = bio_add_pc_page(pdv->pdv_sd->request_queue, | ||
1190 | bio, page, bytes, off); | ||
1191 | if (rc != bytes) | ||
1192 | goto fail; | ||
1193 | |||
1194 | DEBUG_PSCSI("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n", | ||
1195 | bio->bi_vcnt, nr_vecs); | ||
1196 | |||
1197 | if (bio->bi_vcnt > nr_vecs) { | ||
1198 | DEBUG_PSCSI("PSCSI: Reached bio->bi_vcnt max:" | ||
1199 | " %d i: %d bio: %p, allocating another" | ||
1200 | " bio\n", bio->bi_vcnt, i, bio); | ||
1201 | /* | ||
1202 | * Clear the pointer so that another bio will | ||
1203 | * be allocated with pscsi_get_bio() above, the | ||
1204 | * current bio has already been set *tbio and | ||
1205 | * bio->bi_next. | ||
1206 | */ | ||
1207 | bio = NULL; | ||
1208 | } | ||
1209 | |||
1210 | page++; | ||
1211 | len -= bytes; | ||
1212 | data_len -= bytes; | ||
1213 | off = 0; | ||
1214 | } | ||
1215 | } | ||
1216 | /* | ||
1217 | * Setup the primary pt->pscsi_req used for non BIDI and BIDI-COMMAND | ||
1218 | * primary SCSI WRITE poayload mapped for struct se_task->task_sg[] | ||
1219 | */ | ||
1220 | if (!(bidi_read)) { | ||
1221 | /* | ||
1222 | * Starting with v2.6.31, call blk_make_request() passing in *hbio to | ||
1223 | * allocate the pSCSI task a struct request. | ||
1224 | */ | ||
1225 | pt->pscsi_req = blk_make_request(pdv->pdv_sd->request_queue, | ||
1226 | hbio, GFP_KERNEL); | ||
1227 | if (!(pt->pscsi_req)) { | ||
1228 | printk(KERN_ERR "pSCSI: blk_make_request() failed\n"); | ||
1229 | goto fail; | ||
1230 | } | ||
1231 | /* | ||
1232 | * Setup the newly allocated struct request for REQ_TYPE_BLOCK_PC, | ||
1233 | * and setup rq callback, CDB and sense. | ||
1234 | */ | ||
1235 | pscsi_blk_init_request(task, pt, pt->pscsi_req, 0); | ||
1236 | |||
1237 | return task->task_sg_num; | ||
1238 | } | ||
1239 | /* | ||
1240 | * Setup the secondary pt->pscsi_req->next_rq used for the extra BIDI-COMMAND | ||
1241 | * SCSI READ paylaod mapped for struct se_task->task_sg_bidi[] | ||
1242 | */ | ||
1243 | pt->pscsi_req->next_rq = blk_make_request(pdv->pdv_sd->request_queue, | ||
1244 | hbio, GFP_KERNEL); | ||
1245 | if (!(pt->pscsi_req->next_rq)) { | ||
1246 | printk(KERN_ERR "pSCSI: blk_make_request() failed for BIDI\n"); | ||
1247 | goto fail; | ||
1248 | } | ||
1249 | pscsi_blk_init_request(task, pt, pt->pscsi_req->next_rq, 1); | ||
1250 | |||
1251 | return task->task_sg_num; | ||
1252 | fail: | ||
1253 | while (hbio) { | ||
1254 | bio = hbio; | ||
1255 | hbio = hbio->bi_next; | ||
1256 | bio->bi_next = NULL; | ||
1257 | bio_endio(bio, 0); | ||
1258 | } | ||
1259 | return ret; | ||
1260 | } | ||
1261 | |||
1262 | static int pscsi_map_task_SG(struct se_task *task) | ||
1263 | { | ||
1264 | int ret; | ||
1265 | |||
1266 | /* | ||
1267 | * Setup the main struct request for the task->task_sg[] payload | ||
1268 | */ | ||
1269 | |||
1270 | ret = __pscsi_map_task_SG(task, task->task_sg, task->task_sg_num, 0); | ||
1271 | if (ret >= 0 && task->task_sg_bidi) { | ||
1272 | /* | ||
1273 | * If present, set up the extra BIDI-COMMAND SCSI READ | ||
1274 | * struct request and payload. | ||
1275 | */ | ||
1276 | ret = __pscsi_map_task_SG(task, task->task_sg_bidi, | ||
1277 | task->task_sg_num, 1); | ||
1278 | } | ||
1279 | |||
1280 | if (ret < 0) | ||
1281 | return PYX_TRANSPORT_LU_COMM_FAILURE; | ||
1282 | return 0; | ||
1283 | } | ||
1284 | |||
1285 | /* pscsi_map_task_non_SG(): | ||
1286 | * | ||
1287 | * | ||
1288 | */ | ||
1289 | static int pscsi_map_task_non_SG(struct se_task *task) | ||
1290 | { | ||
1291 | struct se_cmd *cmd = TASK_CMD(task); | ||
1292 | struct pscsi_plugin_task *pt = PSCSI_TASK(task); | ||
1293 | struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; | ||
1294 | int ret = 0; | ||
1295 | |||
1296 | if (pscsi_blk_get_request(task) < 0) | ||
1297 | return PYX_TRANSPORT_LU_COMM_FAILURE; | ||
1298 | |||
1299 | if (!task->task_size) | ||
1300 | return 0; | ||
1301 | |||
1302 | ret = blk_rq_map_kern(pdv->pdv_sd->request_queue, | ||
1303 | pt->pscsi_req, T_TASK(cmd)->t_task_buf, | ||
1304 | task->task_size, GFP_KERNEL); | ||
1305 | if (ret < 0) { | ||
1306 | printk(KERN_ERR "PSCSI: blk_rq_map_kern() failed: %d\n", ret); | ||
1307 | return PYX_TRANSPORT_LU_COMM_FAILURE; | ||
1308 | } | ||
1309 | return 0; | ||
1310 | } | ||
1311 | |||
1312 | static int pscsi_CDB_none(struct se_task *task) | ||
1313 | { | ||
1314 | return pscsi_blk_get_request(task); | ||
1315 | } | ||
1316 | |||
1317 | /* pscsi_get_cdb(): | ||
1318 | * | ||
1319 | * | ||
1320 | */ | ||
1321 | static unsigned char *pscsi_get_cdb(struct se_task *task) | ||
1322 | { | ||
1323 | struct pscsi_plugin_task *pt = PSCSI_TASK(task); | ||
1324 | |||
1325 | return pt->pscsi_cdb; | ||
1326 | } | ||
1327 | |||
1328 | /* pscsi_get_sense_buffer(): | ||
1329 | * | ||
1330 | * | ||
1331 | */ | ||
1332 | static unsigned char *pscsi_get_sense_buffer(struct se_task *task) | ||
1333 | { | ||
1334 | struct pscsi_plugin_task *pt = PSCSI_TASK(task); | ||
1335 | |||
1336 | return (unsigned char *)&pt->pscsi_sense[0]; | ||
1337 | } | ||
1338 | |||
1339 | /* pscsi_get_device_rev(): | ||
1340 | * | ||
1341 | * | ||
1342 | */ | ||
1343 | static u32 pscsi_get_device_rev(struct se_device *dev) | ||
1344 | { | ||
1345 | struct pscsi_dev_virt *pdv = dev->dev_ptr; | ||
1346 | struct scsi_device *sd = pdv->pdv_sd; | ||
1347 | |||
1348 | return (sd->scsi_level - 1) ? sd->scsi_level - 1 : 1; | ||
1349 | } | ||
1350 | |||
1351 | /* pscsi_get_device_type(): | ||
1352 | * | ||
1353 | * | ||
1354 | */ | ||
1355 | static u32 pscsi_get_device_type(struct se_device *dev) | ||
1356 | { | ||
1357 | struct pscsi_dev_virt *pdv = dev->dev_ptr; | ||
1358 | struct scsi_device *sd = pdv->pdv_sd; | ||
1359 | |||
1360 | return sd->type; | ||
1361 | } | ||
1362 | |||
1363 | static sector_t pscsi_get_blocks(struct se_device *dev) | ||
1364 | { | ||
1365 | struct pscsi_dev_virt *pdv = dev->dev_ptr; | ||
1366 | |||
1367 | if (pdv->pdv_bd && pdv->pdv_bd->bd_part) | ||
1368 | return pdv->pdv_bd->bd_part->nr_sects; | ||
1369 | |||
1370 | dump_stack(); | ||
1371 | return 0; | ||
1372 | } | ||
1373 | |||
1374 | /* pscsi_handle_SAM_STATUS_failures(): | ||
1375 | * | ||
1376 | * | ||
1377 | */ | ||
1378 | static inline void pscsi_process_SAM_status( | ||
1379 | struct se_task *task, | ||
1380 | struct pscsi_plugin_task *pt) | ||
1381 | { | ||
1382 | task->task_scsi_status = status_byte(pt->pscsi_result); | ||
1383 | if ((task->task_scsi_status)) { | ||
1384 | task->task_scsi_status <<= 1; | ||
1385 | printk(KERN_INFO "PSCSI Status Byte exception at task: %p CDB:" | ||
1386 | " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0], | ||
1387 | pt->pscsi_result); | ||
1388 | } | ||
1389 | |||
1390 | switch (host_byte(pt->pscsi_result)) { | ||
1391 | case DID_OK: | ||
1392 | transport_complete_task(task, (!task->task_scsi_status)); | ||
1393 | break; | ||
1394 | default: | ||
1395 | printk(KERN_INFO "PSCSI Host Byte exception at task: %p CDB:" | ||
1396 | " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0], | ||
1397 | pt->pscsi_result); | ||
1398 | task->task_scsi_status = SAM_STAT_CHECK_CONDITION; | ||
1399 | task->task_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | ||
1400 | TASK_CMD(task)->transport_error_status = | ||
1401 | PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | ||
1402 | transport_complete_task(task, 0); | ||
1403 | break; | ||
1404 | } | ||
1405 | |||
1406 | return; | ||
1407 | } | ||
1408 | |||
1409 | static void pscsi_req_done(struct request *req, int uptodate) | ||
1410 | { | ||
1411 | struct se_task *task = req->end_io_data; | ||
1412 | struct pscsi_plugin_task *pt = PSCSI_TASK(task); | ||
1413 | |||
1414 | pt->pscsi_result = req->errors; | ||
1415 | pt->pscsi_resid = req->resid_len; | ||
1416 | |||
1417 | pscsi_process_SAM_status(task, pt); | ||
1418 | /* | ||
1419 | * Release BIDI-READ if present | ||
1420 | */ | ||
1421 | if (req->next_rq != NULL) | ||
1422 | __blk_put_request(req->q, req->next_rq); | ||
1423 | |||
1424 | __blk_put_request(req->q, req); | ||
1425 | pt->pscsi_req = NULL; | ||
1426 | } | ||
1427 | |||
1428 | static struct se_subsystem_api pscsi_template = { | ||
1429 | .name = "pscsi", | ||
1430 | .owner = THIS_MODULE, | ||
1431 | .transport_type = TRANSPORT_PLUGIN_PHBA_PDEV, | ||
1432 | .cdb_none = pscsi_CDB_none, | ||
1433 | .map_task_non_SG = pscsi_map_task_non_SG, | ||
1434 | .map_task_SG = pscsi_map_task_SG, | ||
1435 | .attach_hba = pscsi_attach_hba, | ||
1436 | .detach_hba = pscsi_detach_hba, | ||
1437 | .pmode_enable_hba = pscsi_pmode_enable_hba, | ||
1438 | .allocate_virtdevice = pscsi_allocate_virtdevice, | ||
1439 | .create_virtdevice = pscsi_create_virtdevice, | ||
1440 | .free_device = pscsi_free_device, | ||
1441 | .transport_complete = pscsi_transport_complete, | ||
1442 | .alloc_task = pscsi_alloc_task, | ||
1443 | .do_task = pscsi_do_task, | ||
1444 | .free_task = pscsi_free_task, | ||
1445 | .check_configfs_dev_params = pscsi_check_configfs_dev_params, | ||
1446 | .set_configfs_dev_params = pscsi_set_configfs_dev_params, | ||
1447 | .show_configfs_dev_params = pscsi_show_configfs_dev_params, | ||
1448 | .get_cdb = pscsi_get_cdb, | ||
1449 | .get_sense_buffer = pscsi_get_sense_buffer, | ||
1450 | .get_device_rev = pscsi_get_device_rev, | ||
1451 | .get_device_type = pscsi_get_device_type, | ||
1452 | .get_blocks = pscsi_get_blocks, | ||
1453 | }; | ||
1454 | |||
1455 | static int __init pscsi_module_init(void) | ||
1456 | { | ||
1457 | return transport_subsystem_register(&pscsi_template); | ||
1458 | } | ||
1459 | |||
1460 | static void pscsi_module_exit(void) | ||
1461 | { | ||
1462 | transport_subsystem_release(&pscsi_template); | ||
1463 | } | ||
1464 | |||
1465 | MODULE_DESCRIPTION("TCM PSCSI subsystem plugin"); | ||
1466 | MODULE_AUTHOR("nab@Linux-iSCSI.org"); | ||
1467 | MODULE_LICENSE("GPL"); | ||
1468 | |||
1469 | module_init(pscsi_module_init); | ||
1470 | module_exit(pscsi_module_exit); | ||
diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h new file mode 100644 index 000000000000..a4cd5d352c3a --- /dev/null +++ b/drivers/target/target_core_pscsi.h | |||
@@ -0,0 +1,65 @@ | |||
1 | #ifndef TARGET_CORE_PSCSI_H | ||
2 | #define TARGET_CORE_PSCSI_H | ||
3 | |||
4 | #define PSCSI_VERSION "v4.0" | ||
5 | #define PSCSI_VIRTUAL_HBA_DEPTH 2048 | ||
6 | |||
7 | /* used in pscsi_find_alloc_len() */ | ||
8 | #ifndef INQUIRY_DATA_SIZE | ||
9 | #define INQUIRY_DATA_SIZE 0x24 | ||
10 | #endif | ||
11 | |||
12 | /* used in pscsi_add_device_to_list() */ | ||
13 | #define PSCSI_DEFAULT_QUEUEDEPTH 1 | ||
14 | |||
15 | #define PS_RETRY 5 | ||
16 | #define PS_TIMEOUT_DISK (15*HZ) | ||
17 | #define PS_TIMEOUT_OTHER (500*HZ) | ||
18 | |||
19 | #include <linux/device.h> | ||
20 | #include <scsi/scsi_driver.h> | ||
21 | #include <scsi/scsi_device.h> | ||
22 | #include <linux/kref.h> | ||
23 | #include <linux/kobject.h> | ||
24 | |||
25 | struct pscsi_plugin_task { | ||
26 | struct se_task pscsi_task; | ||
27 | unsigned char *pscsi_cdb; | ||
28 | unsigned char __pscsi_cdb[TCM_MAX_COMMAND_SIZE]; | ||
29 | unsigned char pscsi_sense[SCSI_SENSE_BUFFERSIZE]; | ||
30 | int pscsi_direction; | ||
31 | int pscsi_result; | ||
32 | u32 pscsi_resid; | ||
33 | struct request *pscsi_req; | ||
34 | } ____cacheline_aligned; | ||
35 | |||
36 | #define PDF_HAS_CHANNEL_ID 0x01 | ||
37 | #define PDF_HAS_TARGET_ID 0x02 | ||
38 | #define PDF_HAS_LUN_ID 0x04 | ||
39 | #define PDF_HAS_VPD_UNIT_SERIAL 0x08 | ||
40 | #define PDF_HAS_VPD_DEV_IDENT 0x10 | ||
41 | #define PDF_HAS_VIRT_HOST_ID 0x20 | ||
42 | |||
43 | struct pscsi_dev_virt { | ||
44 | int pdv_flags; | ||
45 | int pdv_host_id; | ||
46 | int pdv_channel_id; | ||
47 | int pdv_target_id; | ||
48 | int pdv_lun_id; | ||
49 | struct block_device *pdv_bd; | ||
50 | struct scsi_device *pdv_sd; | ||
51 | struct se_hba *pdv_se_hba; | ||
52 | } ____cacheline_aligned; | ||
53 | |||
54 | typedef enum phv_modes { | ||
55 | PHV_VIRUTAL_HOST_ID, | ||
56 | PHV_LLD_SCSI_HOST_NO | ||
57 | } phv_modes_t; | ||
58 | |||
59 | struct pscsi_hba_virt { | ||
60 | int phv_host_id; | ||
61 | phv_modes_t phv_mode; | ||
62 | struct Scsi_Host *phv_lld_host; | ||
63 | } ____cacheline_aligned; | ||
64 | |||
65 | #endif /*** TARGET_CORE_PSCSI_H ***/ | ||
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c new file mode 100644 index 000000000000..979aebf20019 --- /dev/null +++ b/drivers/target/target_core_rd.c | |||
@@ -0,0 +1,1091 @@ | |||
1 | /******************************************************************************* | ||
2 | * Filename: target_core_rd.c | ||
3 | * | ||
4 | * This file contains the Storage Engine <-> Ramdisk transport | ||
5 | * specific functions. | ||
6 | * | ||
7 | * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. | ||
8 | * Copyright (c) 2005, 2006, 2007 SBE, Inc. | ||
9 | * Copyright (c) 2007-2010 Rising Tide Systems | ||
10 | * Copyright (c) 2008-2010 Linux-iSCSI.org | ||
11 | * | ||
12 | * Nicholas A. Bellinger <nab@kernel.org> | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or modify | ||
15 | * it under the terms of the GNU General Public License as published by | ||
16 | * the Free Software Foundation; either version 2 of the License, or | ||
17 | * (at your option) any later version. | ||
18 | * | ||
19 | * This program is distributed in the hope that it will be useful, | ||
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
22 | * GNU General Public License for more details. | ||
23 | * | ||
24 | * You should have received a copy of the GNU General Public License | ||
25 | * along with this program; if not, write to the Free Software | ||
26 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
27 | * | ||
28 | ******************************************************************************/ | ||
29 | |||
30 | #include <linux/version.h> | ||
31 | #include <linux/string.h> | ||
32 | #include <linux/parser.h> | ||
33 | #include <linux/timer.h> | ||
34 | #include <linux/blkdev.h> | ||
35 | #include <linux/slab.h> | ||
36 | #include <linux/spinlock.h> | ||
37 | #include <linux/smp_lock.h> | ||
38 | #include <scsi/scsi.h> | ||
39 | #include <scsi/scsi_host.h> | ||
40 | |||
41 | #include <target/target_core_base.h> | ||
42 | #include <target/target_core_device.h> | ||
43 | #include <target/target_core_transport.h> | ||
44 | #include <target/target_core_fabric_ops.h> | ||
45 | |||
46 | #include "target_core_rd.h" | ||
47 | |||
48 | static struct se_subsystem_api rd_dr_template; | ||
49 | static struct se_subsystem_api rd_mcp_template; | ||
50 | |||
51 | /* #define DEBUG_RAMDISK_MCP */ | ||
52 | /* #define DEBUG_RAMDISK_DR */ | ||
53 | |||
54 | /* rd_attach_hba(): (Part of se_subsystem_api_t template) | ||
55 | * | ||
56 | * | ||
57 | */ | ||
58 | static int rd_attach_hba(struct se_hba *hba, u32 host_id) | ||
59 | { | ||
60 | struct rd_host *rd_host; | ||
61 | |||
62 | rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL); | ||
63 | if (!(rd_host)) { | ||
64 | printk(KERN_ERR "Unable to allocate memory for struct rd_host\n"); | ||
65 | return -ENOMEM; | ||
66 | } | ||
67 | |||
68 | rd_host->rd_host_id = host_id; | ||
69 | |||
70 | atomic_set(&hba->left_queue_depth, RD_HBA_QUEUE_DEPTH); | ||
71 | atomic_set(&hba->max_queue_depth, RD_HBA_QUEUE_DEPTH); | ||
72 | hba->hba_ptr = (void *) rd_host; | ||
73 | |||
74 | printk(KERN_INFO "CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on" | ||
75 | " Generic Target Core Stack %s\n", hba->hba_id, | ||
76 | RD_HBA_VERSION, TARGET_CORE_MOD_VERSION); | ||
77 | printk(KERN_INFO "CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic" | ||
78 | " Target Core TCQ Depth: %d MaxSectors: %u\n", hba->hba_id, | ||
79 | rd_host->rd_host_id, atomic_read(&hba->max_queue_depth), | ||
80 | RD_MAX_SECTORS); | ||
81 | |||
82 | return 0; | ||
83 | } | ||
84 | |||
85 | static void rd_detach_hba(struct se_hba *hba) | ||
86 | { | ||
87 | struct rd_host *rd_host = hba->hba_ptr; | ||
88 | |||
89 | printk(KERN_INFO "CORE_HBA[%d] - Detached Ramdisk HBA: %u from" | ||
90 | " Generic Target Core\n", hba->hba_id, rd_host->rd_host_id); | ||
91 | |||
92 | kfree(rd_host); | ||
93 | hba->hba_ptr = NULL; | ||
94 | } | ||
95 | |||
96 | /* rd_release_device_space(): | ||
97 | * | ||
98 | * | ||
99 | */ | ||
100 | static void rd_release_device_space(struct rd_dev *rd_dev) | ||
101 | { | ||
102 | u32 i, j, page_count = 0, sg_per_table; | ||
103 | struct rd_dev_sg_table *sg_table; | ||
104 | struct page *pg; | ||
105 | struct scatterlist *sg; | ||
106 | |||
107 | if (!rd_dev->sg_table_array || !rd_dev->sg_table_count) | ||
108 | return; | ||
109 | |||
110 | sg_table = rd_dev->sg_table_array; | ||
111 | |||
112 | for (i = 0; i < rd_dev->sg_table_count; i++) { | ||
113 | sg = sg_table[i].sg_table; | ||
114 | sg_per_table = sg_table[i].rd_sg_count; | ||
115 | |||
116 | for (j = 0; j < sg_per_table; j++) { | ||
117 | pg = sg_page(&sg[j]); | ||
118 | if ((pg)) { | ||
119 | __free_page(pg); | ||
120 | page_count++; | ||
121 | } | ||
122 | } | ||
123 | |||
124 | kfree(sg); | ||
125 | } | ||
126 | |||
127 | printk(KERN_INFO "CORE_RD[%u] - Released device space for Ramdisk" | ||
128 | " Device ID: %u, pages %u in %u tables total bytes %lu\n", | ||
129 | rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count, | ||
130 | rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE); | ||
131 | |||
132 | kfree(sg_table); | ||
133 | rd_dev->sg_table_array = NULL; | ||
134 | rd_dev->sg_table_count = 0; | ||
135 | } | ||
136 | |||
137 | |||
138 | /* rd_build_device_space(): | ||
139 | * | ||
140 | * | ||
141 | */ | ||
142 | static int rd_build_device_space(struct rd_dev *rd_dev) | ||
143 | { | ||
144 | u32 i = 0, j, page_offset = 0, sg_per_table, sg_tables, total_sg_needed; | ||
145 | u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE / | ||
146 | sizeof(struct scatterlist)); | ||
147 | struct rd_dev_sg_table *sg_table; | ||
148 | struct page *pg; | ||
149 | struct scatterlist *sg; | ||
150 | |||
151 | if (rd_dev->rd_page_count <= 0) { | ||
152 | printk(KERN_ERR "Illegal page count: %u for Ramdisk device\n", | ||
153 | rd_dev->rd_page_count); | ||
154 | return -1; | ||
155 | } | ||
156 | total_sg_needed = rd_dev->rd_page_count; | ||
157 | |||
158 | sg_tables = (total_sg_needed / max_sg_per_table) + 1; | ||
159 | |||
160 | sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL); | ||
161 | if (!(sg_table)) { | ||
162 | printk(KERN_ERR "Unable to allocate memory for Ramdisk" | ||
163 | " scatterlist tables\n"); | ||
164 | return -1; | ||
165 | } | ||
166 | |||
167 | rd_dev->sg_table_array = sg_table; | ||
168 | rd_dev->sg_table_count = sg_tables; | ||
169 | |||
170 | while (total_sg_needed) { | ||
171 | sg_per_table = (total_sg_needed > max_sg_per_table) ? | ||
172 | max_sg_per_table : total_sg_needed; | ||
173 | |||
174 | sg = kzalloc(sg_per_table * sizeof(struct scatterlist), | ||
175 | GFP_KERNEL); | ||
176 | if (!(sg)) { | ||
177 | printk(KERN_ERR "Unable to allocate scatterlist array" | ||
178 | " for struct rd_dev\n"); | ||
179 | return -1; | ||
180 | } | ||
181 | |||
182 | sg_init_table((struct scatterlist *)&sg[0], sg_per_table); | ||
183 | |||
184 | sg_table[i].sg_table = sg; | ||
185 | sg_table[i].rd_sg_count = sg_per_table; | ||
186 | sg_table[i].page_start_offset = page_offset; | ||
187 | sg_table[i++].page_end_offset = (page_offset + sg_per_table) | ||
188 | - 1; | ||
189 | |||
190 | for (j = 0; j < sg_per_table; j++) { | ||
191 | pg = alloc_pages(GFP_KERNEL, 0); | ||
192 | if (!(pg)) { | ||
193 | printk(KERN_ERR "Unable to allocate scatterlist" | ||
194 | " pages for struct rd_dev_sg_table\n"); | ||
195 | return -1; | ||
196 | } | ||
197 | sg_assign_page(&sg[j], pg); | ||
198 | sg[j].length = PAGE_SIZE; | ||
199 | } | ||
200 | |||
201 | page_offset += sg_per_table; | ||
202 | total_sg_needed -= sg_per_table; | ||
203 | } | ||
204 | |||
205 | printk(KERN_INFO "CORE_RD[%u] - Built Ramdisk Device ID: %u space of" | ||
206 | " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id, | ||
207 | rd_dev->rd_dev_id, rd_dev->rd_page_count, | ||
208 | rd_dev->sg_table_count); | ||
209 | |||
210 | return 0; | ||
211 | } | ||
212 | |||
213 | static void *rd_allocate_virtdevice( | ||
214 | struct se_hba *hba, | ||
215 | const char *name, | ||
216 | int rd_direct) | ||
217 | { | ||
218 | struct rd_dev *rd_dev; | ||
219 | struct rd_host *rd_host = hba->hba_ptr; | ||
220 | |||
221 | rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL); | ||
222 | if (!(rd_dev)) { | ||
223 | printk(KERN_ERR "Unable to allocate memory for struct rd_dev\n"); | ||
224 | return NULL; | ||
225 | } | ||
226 | |||
227 | rd_dev->rd_host = rd_host; | ||
228 | rd_dev->rd_direct = rd_direct; | ||
229 | |||
230 | return rd_dev; | ||
231 | } | ||
232 | |||
233 | static void *rd_DIRECT_allocate_virtdevice(struct se_hba *hba, const char *name) | ||
234 | { | ||
235 | return rd_allocate_virtdevice(hba, name, 1); | ||
236 | } | ||
237 | |||
238 | static void *rd_MEMCPY_allocate_virtdevice(struct se_hba *hba, const char *name) | ||
239 | { | ||
240 | return rd_allocate_virtdevice(hba, name, 0); | ||
241 | } | ||
242 | |||
243 | /* rd_create_virtdevice(): | ||
244 | * | ||
245 | * | ||
246 | */ | ||
247 | static struct se_device *rd_create_virtdevice( | ||
248 | struct se_hba *hba, | ||
249 | struct se_subsystem_dev *se_dev, | ||
250 | void *p, | ||
251 | int rd_direct) | ||
252 | { | ||
253 | struct se_device *dev; | ||
254 | struct se_dev_limits dev_limits; | ||
255 | struct rd_dev *rd_dev = p; | ||
256 | struct rd_host *rd_host = hba->hba_ptr; | ||
257 | int dev_flags = 0; | ||
258 | char prod[16], rev[4]; | ||
259 | |||
260 | memset(&dev_limits, 0, sizeof(struct se_dev_limits)); | ||
261 | |||
262 | if (rd_build_device_space(rd_dev) < 0) | ||
263 | goto fail; | ||
264 | |||
265 | snprintf(prod, 16, "RAMDISK-%s", (rd_dev->rd_direct) ? "DR" : "MCP"); | ||
266 | snprintf(rev, 4, "%s", (rd_dev->rd_direct) ? RD_DR_VERSION : | ||
267 | RD_MCP_VERSION); | ||
268 | |||
269 | dev_limits.limits.logical_block_size = RD_BLOCKSIZE; | ||
270 | dev_limits.limits.max_hw_sectors = RD_MAX_SECTORS; | ||
271 | dev_limits.limits.max_sectors = RD_MAX_SECTORS; | ||
272 | dev_limits.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH; | ||
273 | dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH; | ||
274 | |||
275 | dev = transport_add_device_to_core_hba(hba, | ||
276 | (rd_dev->rd_direct) ? &rd_dr_template : | ||
277 | &rd_mcp_template, se_dev, dev_flags, (void *)rd_dev, | ||
278 | &dev_limits, prod, rev); | ||
279 | if (!(dev)) | ||
280 | goto fail; | ||
281 | |||
282 | rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++; | ||
283 | rd_dev->rd_queue_depth = dev->queue_depth; | ||
284 | |||
285 | printk(KERN_INFO "CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of" | ||
286 | " %u pages in %u tables, %lu total bytes\n", | ||
287 | rd_host->rd_host_id, (!rd_dev->rd_direct) ? "MEMCPY" : | ||
288 | "DIRECT", rd_dev->rd_dev_id, rd_dev->rd_page_count, | ||
289 | rd_dev->sg_table_count, | ||
290 | (unsigned long)(rd_dev->rd_page_count * PAGE_SIZE)); | ||
291 | |||
292 | return dev; | ||
293 | |||
294 | fail: | ||
295 | rd_release_device_space(rd_dev); | ||
296 | return NULL; | ||
297 | } | ||
298 | |||
299 | static struct se_device *rd_DIRECT_create_virtdevice( | ||
300 | struct se_hba *hba, | ||
301 | struct se_subsystem_dev *se_dev, | ||
302 | void *p) | ||
303 | { | ||
304 | return rd_create_virtdevice(hba, se_dev, p, 1); | ||
305 | } | ||
306 | |||
307 | static struct se_device *rd_MEMCPY_create_virtdevice( | ||
308 | struct se_hba *hba, | ||
309 | struct se_subsystem_dev *se_dev, | ||
310 | void *p) | ||
311 | { | ||
312 | return rd_create_virtdevice(hba, se_dev, p, 0); | ||
313 | } | ||
314 | |||
315 | /* rd_free_device(): (Part of se_subsystem_api_t template) | ||
316 | * | ||
317 | * | ||
318 | */ | ||
319 | static void rd_free_device(void *p) | ||
320 | { | ||
321 | struct rd_dev *rd_dev = p; | ||
322 | |||
323 | rd_release_device_space(rd_dev); | ||
324 | kfree(rd_dev); | ||
325 | } | ||
326 | |||
327 | static inline struct rd_request *RD_REQ(struct se_task *task) | ||
328 | { | ||
329 | return container_of(task, struct rd_request, rd_task); | ||
330 | } | ||
331 | |||
332 | static struct se_task * | ||
333 | rd_alloc_task(struct se_cmd *cmd) | ||
334 | { | ||
335 | struct rd_request *rd_req; | ||
336 | |||
337 | rd_req = kzalloc(sizeof(struct rd_request), GFP_KERNEL); | ||
338 | if (!rd_req) { | ||
339 | printk(KERN_ERR "Unable to allocate struct rd_request\n"); | ||
340 | return NULL; | ||
341 | } | ||
342 | rd_req->rd_dev = SE_DEV(cmd)->dev_ptr; | ||
343 | |||
344 | return &rd_req->rd_task; | ||
345 | } | ||
346 | |||
347 | /* rd_get_sg_table(): | ||
348 | * | ||
349 | * | ||
350 | */ | ||
351 | static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page) | ||
352 | { | ||
353 | u32 i; | ||
354 | struct rd_dev_sg_table *sg_table; | ||
355 | |||
356 | for (i = 0; i < rd_dev->sg_table_count; i++) { | ||
357 | sg_table = &rd_dev->sg_table_array[i]; | ||
358 | if ((sg_table->page_start_offset <= page) && | ||
359 | (sg_table->page_end_offset >= page)) | ||
360 | return sg_table; | ||
361 | } | ||
362 | |||
363 | printk(KERN_ERR "Unable to locate struct rd_dev_sg_table for page: %u\n", | ||
364 | page); | ||
365 | |||
366 | return NULL; | ||
367 | } | ||
368 | |||
369 | /* rd_MEMCPY_read(): | ||
370 | * | ||
371 | * | ||
372 | */ | ||
373 | static int rd_MEMCPY_read(struct rd_request *req) | ||
374 | { | ||
375 | struct se_task *task = &req->rd_task; | ||
376 | struct rd_dev *dev = req->rd_dev; | ||
377 | struct rd_dev_sg_table *table; | ||
378 | struct scatterlist *sg_d, *sg_s; | ||
379 | void *dst, *src; | ||
380 | u32 i = 0, j = 0, dst_offset = 0, src_offset = 0; | ||
381 | u32 length, page_end = 0, table_sg_end; | ||
382 | u32 rd_offset = req->rd_offset; | ||
383 | |||
384 | table = rd_get_sg_table(dev, req->rd_page); | ||
385 | if (!(table)) | ||
386 | return -1; | ||
387 | |||
388 | table_sg_end = (table->page_end_offset - req->rd_page); | ||
389 | sg_d = task->task_sg; | ||
390 | sg_s = &table->sg_table[req->rd_page - table->page_start_offset]; | ||
391 | #ifdef DEBUG_RAMDISK_MCP | ||
392 | printk(KERN_INFO "RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:" | ||
393 | " %u\n", dev->rd_dev_id, task->task_lba, req->rd_size, | ||
394 | req->rd_page, req->rd_offset); | ||
395 | #endif | ||
396 | src_offset = rd_offset; | ||
397 | |||
398 | while (req->rd_size) { | ||
399 | if ((sg_d[i].length - dst_offset) < | ||
400 | (sg_s[j].length - src_offset)) { | ||
401 | length = (sg_d[i].length - dst_offset); | ||
402 | #ifdef DEBUG_RAMDISK_MCP | ||
403 | printk(KERN_INFO "Step 1 - sg_d[%d]: %p length: %d" | ||
404 | " offset: %u sg_s[%d].length: %u\n", i, | ||
405 | &sg_d[i], sg_d[i].length, sg_d[i].offset, j, | ||
406 | sg_s[j].length); | ||
407 | printk(KERN_INFO "Step 1 - length: %u dst_offset: %u" | ||
408 | " src_offset: %u\n", length, dst_offset, | ||
409 | src_offset); | ||
410 | #endif | ||
411 | if (length > req->rd_size) | ||
412 | length = req->rd_size; | ||
413 | |||
414 | dst = sg_virt(&sg_d[i++]) + dst_offset; | ||
415 | if (!dst) | ||
416 | BUG(); | ||
417 | |||
418 | src = sg_virt(&sg_s[j]) + src_offset; | ||
419 | if (!src) | ||
420 | BUG(); | ||
421 | |||
422 | dst_offset = 0; | ||
423 | src_offset = length; | ||
424 | page_end = 0; | ||
425 | } else { | ||
426 | length = (sg_s[j].length - src_offset); | ||
427 | #ifdef DEBUG_RAMDISK_MCP | ||
428 | printk(KERN_INFO "Step 2 - sg_d[%d]: %p length: %d" | ||
429 | " offset: %u sg_s[%d].length: %u\n", i, | ||
430 | &sg_d[i], sg_d[i].length, sg_d[i].offset, | ||
431 | j, sg_s[j].length); | ||
432 | printk(KERN_INFO "Step 2 - length: %u dst_offset: %u" | ||
433 | " src_offset: %u\n", length, dst_offset, | ||
434 | src_offset); | ||
435 | #endif | ||
436 | if (length > req->rd_size) | ||
437 | length = req->rd_size; | ||
438 | |||
439 | dst = sg_virt(&sg_d[i]) + dst_offset; | ||
440 | if (!dst) | ||
441 | BUG(); | ||
442 | |||
443 | if (sg_d[i].length == length) { | ||
444 | i++; | ||
445 | dst_offset = 0; | ||
446 | } else | ||
447 | dst_offset = length; | ||
448 | |||
449 | src = sg_virt(&sg_s[j++]) + src_offset; | ||
450 | if (!src) | ||
451 | BUG(); | ||
452 | |||
453 | src_offset = 0; | ||
454 | page_end = 1; | ||
455 | } | ||
456 | |||
457 | memcpy(dst, src, length); | ||
458 | |||
459 | #ifdef DEBUG_RAMDISK_MCP | ||
460 | printk(KERN_INFO "page: %u, remaining size: %u, length: %u," | ||
461 | " i: %u, j: %u\n", req->rd_page, | ||
462 | (req->rd_size - length), length, i, j); | ||
463 | #endif | ||
464 | req->rd_size -= length; | ||
465 | if (!(req->rd_size)) | ||
466 | return 0; | ||
467 | |||
468 | if (!page_end) | ||
469 | continue; | ||
470 | |||
471 | if (++req->rd_page <= table->page_end_offset) { | ||
472 | #ifdef DEBUG_RAMDISK_MCP | ||
473 | printk(KERN_INFO "page: %u in same page table\n", | ||
474 | req->rd_page); | ||
475 | #endif | ||
476 | continue; | ||
477 | } | ||
478 | #ifdef DEBUG_RAMDISK_MCP | ||
479 | printk(KERN_INFO "getting new page table for page: %u\n", | ||
480 | req->rd_page); | ||
481 | #endif | ||
482 | table = rd_get_sg_table(dev, req->rd_page); | ||
483 | if (!(table)) | ||
484 | return -1; | ||
485 | |||
486 | sg_s = &table->sg_table[j = 0]; | ||
487 | } | ||
488 | |||
489 | return 0; | ||
490 | } | ||
491 | |||
492 | /* rd_MEMCPY_write(): | ||
493 | * | ||
494 | * | ||
495 | */ | ||
496 | static int rd_MEMCPY_write(struct rd_request *req) | ||
497 | { | ||
498 | struct se_task *task = &req->rd_task; | ||
499 | struct rd_dev *dev = req->rd_dev; | ||
500 | struct rd_dev_sg_table *table; | ||
501 | struct scatterlist *sg_d, *sg_s; | ||
502 | void *dst, *src; | ||
503 | u32 i = 0, j = 0, dst_offset = 0, src_offset = 0; | ||
504 | u32 length, page_end = 0, table_sg_end; | ||
505 | u32 rd_offset = req->rd_offset; | ||
506 | |||
507 | table = rd_get_sg_table(dev, req->rd_page); | ||
508 | if (!(table)) | ||
509 | return -1; | ||
510 | |||
511 | table_sg_end = (table->page_end_offset - req->rd_page); | ||
512 | sg_d = &table->sg_table[req->rd_page - table->page_start_offset]; | ||
513 | sg_s = task->task_sg; | ||
514 | #ifdef DEBUG_RAMDISK_MCP | ||
515 | printk(KERN_INFO "RD[%d] Write LBA: %llu, Size: %u, Page: %u," | ||
516 | " Offset: %u\n", dev->rd_dev_id, task->task_lba, req->rd_size, | ||
517 | req->rd_page, req->rd_offset); | ||
518 | #endif | ||
519 | dst_offset = rd_offset; | ||
520 | |||
521 | while (req->rd_size) { | ||
522 | if ((sg_s[i].length - src_offset) < | ||
523 | (sg_d[j].length - dst_offset)) { | ||
524 | length = (sg_s[i].length - src_offset); | ||
525 | #ifdef DEBUG_RAMDISK_MCP | ||
526 | printk(KERN_INFO "Step 1 - sg_s[%d]: %p length: %d" | ||
527 | " offset: %d sg_d[%d].length: %u\n", i, | ||
528 | &sg_s[i], sg_s[i].length, sg_s[i].offset, | ||
529 | j, sg_d[j].length); | ||
530 | printk(KERN_INFO "Step 1 - length: %u src_offset: %u" | ||
531 | " dst_offset: %u\n", length, src_offset, | ||
532 | dst_offset); | ||
533 | #endif | ||
534 | if (length > req->rd_size) | ||
535 | length = req->rd_size; | ||
536 | |||
537 | src = sg_virt(&sg_s[i++]) + src_offset; | ||
538 | if (!src) | ||
539 | BUG(); | ||
540 | |||
541 | dst = sg_virt(&sg_d[j]) + dst_offset; | ||
542 | if (!dst) | ||
543 | BUG(); | ||
544 | |||
545 | src_offset = 0; | ||
546 | dst_offset = length; | ||
547 | page_end = 0; | ||
548 | } else { | ||
549 | length = (sg_d[j].length - dst_offset); | ||
550 | #ifdef DEBUG_RAMDISK_MCP | ||
551 | printk(KERN_INFO "Step 2 - sg_s[%d]: %p length: %d" | ||
552 | " offset: %d sg_d[%d].length: %u\n", i, | ||
553 | &sg_s[i], sg_s[i].length, sg_s[i].offset, | ||
554 | j, sg_d[j].length); | ||
555 | printk(KERN_INFO "Step 2 - length: %u src_offset: %u" | ||
556 | " dst_offset: %u\n", length, src_offset, | ||
557 | dst_offset); | ||
558 | #endif | ||
559 | if (length > req->rd_size) | ||
560 | length = req->rd_size; | ||
561 | |||
562 | src = sg_virt(&sg_s[i]) + src_offset; | ||
563 | if (!src) | ||
564 | BUG(); | ||
565 | |||
566 | if (sg_s[i].length == length) { | ||
567 | i++; | ||
568 | src_offset = 0; | ||
569 | } else | ||
570 | src_offset = length; | ||
571 | |||
572 | dst = sg_virt(&sg_d[j++]) + dst_offset; | ||
573 | if (!dst) | ||
574 | BUG(); | ||
575 | |||
576 | dst_offset = 0; | ||
577 | page_end = 1; | ||
578 | } | ||
579 | |||
580 | memcpy(dst, src, length); | ||
581 | |||
582 | #ifdef DEBUG_RAMDISK_MCP | ||
583 | printk(KERN_INFO "page: %u, remaining size: %u, length: %u," | ||
584 | " i: %u, j: %u\n", req->rd_page, | ||
585 | (req->rd_size - length), length, i, j); | ||
586 | #endif | ||
587 | req->rd_size -= length; | ||
588 | if (!(req->rd_size)) | ||
589 | return 0; | ||
590 | |||
591 | if (!page_end) | ||
592 | continue; | ||
593 | |||
594 | if (++req->rd_page <= table->page_end_offset) { | ||
595 | #ifdef DEBUG_RAMDISK_MCP | ||
596 | printk(KERN_INFO "page: %u in same page table\n", | ||
597 | req->rd_page); | ||
598 | #endif | ||
599 | continue; | ||
600 | } | ||
601 | #ifdef DEBUG_RAMDISK_MCP | ||
602 | printk(KERN_INFO "getting new page table for page: %u\n", | ||
603 | req->rd_page); | ||
604 | #endif | ||
605 | table = rd_get_sg_table(dev, req->rd_page); | ||
606 | if (!(table)) | ||
607 | return -1; | ||
608 | |||
609 | sg_d = &table->sg_table[j = 0]; | ||
610 | } | ||
611 | |||
612 | return 0; | ||
613 | } | ||
614 | |||
615 | /* rd_MEMCPY_do_task(): (Part of se_subsystem_api_t template) | ||
616 | * | ||
617 | * | ||
618 | */ | ||
619 | static int rd_MEMCPY_do_task(struct se_task *task) | ||
620 | { | ||
621 | struct se_device *dev = task->se_dev; | ||
622 | struct rd_request *req = RD_REQ(task); | ||
623 | unsigned long long lba; | ||
624 | int ret; | ||
625 | |||
626 | req->rd_page = (task->task_lba * DEV_ATTRIB(dev)->block_size) / PAGE_SIZE; | ||
627 | lba = task->task_lba; | ||
628 | req->rd_offset = (do_div(lba, | ||
629 | (PAGE_SIZE / DEV_ATTRIB(dev)->block_size))) * | ||
630 | DEV_ATTRIB(dev)->block_size; | ||
631 | req->rd_size = task->task_size; | ||
632 | |||
633 | if (task->task_data_direction == DMA_FROM_DEVICE) | ||
634 | ret = rd_MEMCPY_read(req); | ||
635 | else | ||
636 | ret = rd_MEMCPY_write(req); | ||
637 | |||
638 | if (ret != 0) | ||
639 | return ret; | ||
640 | |||
641 | task->task_scsi_status = GOOD; | ||
642 | transport_complete_task(task, 1); | ||
643 | |||
644 | return PYX_TRANSPORT_SENT_TO_TRANSPORT; | ||
645 | } | ||
646 | |||
647 | /* rd_DIRECT_with_offset(): | ||
648 | * | ||
649 | * | ||
650 | */ | ||
651 | static int rd_DIRECT_with_offset( | ||
652 | struct se_task *task, | ||
653 | struct list_head *se_mem_list, | ||
654 | u32 *se_mem_cnt, | ||
655 | u32 *task_offset) | ||
656 | { | ||
657 | struct rd_request *req = RD_REQ(task); | ||
658 | struct rd_dev *dev = req->rd_dev; | ||
659 | struct rd_dev_sg_table *table; | ||
660 | struct se_mem *se_mem; | ||
661 | struct scatterlist *sg_s; | ||
662 | u32 j = 0, set_offset = 1; | ||
663 | u32 get_next_table = 0, offset_length, table_sg_end; | ||
664 | |||
665 | table = rd_get_sg_table(dev, req->rd_page); | ||
666 | if (!(table)) | ||
667 | return -1; | ||
668 | |||
669 | table_sg_end = (table->page_end_offset - req->rd_page); | ||
670 | sg_s = &table->sg_table[req->rd_page - table->page_start_offset]; | ||
671 | #ifdef DEBUG_RAMDISK_DR | ||
672 | printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u Page: %u, Offset: %u\n", | ||
673 | (task->task_data_direction == DMA_TO_DEVICE) ? | ||
674 | "Write" : "Read", | ||
675 | task->task_lba, req->rd_size, req->rd_page, req->rd_offset); | ||
676 | #endif | ||
677 | while (req->rd_size) { | ||
678 | se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); | ||
679 | if (!(se_mem)) { | ||
680 | printk(KERN_ERR "Unable to allocate struct se_mem\n"); | ||
681 | return -1; | ||
682 | } | ||
683 | INIT_LIST_HEAD(&se_mem->se_list); | ||
684 | |||
685 | if (set_offset) { | ||
686 | offset_length = sg_s[j].length - req->rd_offset; | ||
687 | if (offset_length > req->rd_size) | ||
688 | offset_length = req->rd_size; | ||
689 | |||
690 | se_mem->se_page = sg_page(&sg_s[j++]); | ||
691 | se_mem->se_off = req->rd_offset; | ||
692 | se_mem->se_len = offset_length; | ||
693 | |||
694 | set_offset = 0; | ||
695 | get_next_table = (j > table_sg_end); | ||
696 | goto check_eot; | ||
697 | } | ||
698 | |||
699 | offset_length = (req->rd_size < req->rd_offset) ? | ||
700 | req->rd_size : req->rd_offset; | ||
701 | |||
702 | se_mem->se_page = sg_page(&sg_s[j]); | ||
703 | se_mem->se_len = offset_length; | ||
704 | |||
705 | set_offset = 1; | ||
706 | |||
707 | check_eot: | ||
708 | #ifdef DEBUG_RAMDISK_DR | ||
709 | printk(KERN_INFO "page: %u, size: %u, offset_length: %u, j: %u" | ||
710 | " se_mem: %p, se_page: %p se_off: %u se_len: %u\n", | ||
711 | req->rd_page, req->rd_size, offset_length, j, se_mem, | ||
712 | se_mem->se_page, se_mem->se_off, se_mem->se_len); | ||
713 | #endif | ||
714 | list_add_tail(&se_mem->se_list, se_mem_list); | ||
715 | (*se_mem_cnt)++; | ||
716 | |||
717 | req->rd_size -= offset_length; | ||
718 | if (!(req->rd_size)) | ||
719 | goto out; | ||
720 | |||
721 | if (!set_offset && !get_next_table) | ||
722 | continue; | ||
723 | |||
724 | if (++req->rd_page <= table->page_end_offset) { | ||
725 | #ifdef DEBUG_RAMDISK_DR | ||
726 | printk(KERN_INFO "page: %u in same page table\n", | ||
727 | req->rd_page); | ||
728 | #endif | ||
729 | continue; | ||
730 | } | ||
731 | #ifdef DEBUG_RAMDISK_DR | ||
732 | printk(KERN_INFO "getting new page table for page: %u\n", | ||
733 | req->rd_page); | ||
734 | #endif | ||
735 | table = rd_get_sg_table(dev, req->rd_page); | ||
736 | if (!(table)) | ||
737 | return -1; | ||
738 | |||
739 | sg_s = &table->sg_table[j = 0]; | ||
740 | } | ||
741 | |||
742 | out: | ||
743 | T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt; | ||
744 | #ifdef DEBUG_RAMDISK_DR | ||
745 | printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n", | ||
746 | *se_mem_cnt); | ||
747 | #endif | ||
748 | return 0; | ||
749 | } | ||
750 | |||
751 | /* rd_DIRECT_without_offset(): | ||
752 | * | ||
753 | * | ||
754 | */ | ||
755 | static int rd_DIRECT_without_offset( | ||
756 | struct se_task *task, | ||
757 | struct list_head *se_mem_list, | ||
758 | u32 *se_mem_cnt, | ||
759 | u32 *task_offset) | ||
760 | { | ||
761 | struct rd_request *req = RD_REQ(task); | ||
762 | struct rd_dev *dev = req->rd_dev; | ||
763 | struct rd_dev_sg_table *table; | ||
764 | struct se_mem *se_mem; | ||
765 | struct scatterlist *sg_s; | ||
766 | u32 length, j = 0; | ||
767 | |||
768 | table = rd_get_sg_table(dev, req->rd_page); | ||
769 | if (!(table)) | ||
770 | return -1; | ||
771 | |||
772 | sg_s = &table->sg_table[req->rd_page - table->page_start_offset]; | ||
773 | #ifdef DEBUG_RAMDISK_DR | ||
774 | printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u, Page: %u\n", | ||
775 | (task->task_data_direction == DMA_TO_DEVICE) ? | ||
776 | "Write" : "Read", | ||
777 | task->task_lba, req->rd_size, req->rd_page); | ||
778 | #endif | ||
779 | while (req->rd_size) { | ||
780 | se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); | ||
781 | if (!(se_mem)) { | ||
782 | printk(KERN_ERR "Unable to allocate struct se_mem\n"); | ||
783 | return -1; | ||
784 | } | ||
785 | INIT_LIST_HEAD(&se_mem->se_list); | ||
786 | |||
787 | length = (req->rd_size < sg_s[j].length) ? | ||
788 | req->rd_size : sg_s[j].length; | ||
789 | |||
790 | se_mem->se_page = sg_page(&sg_s[j++]); | ||
791 | se_mem->se_len = length; | ||
792 | |||
793 | #ifdef DEBUG_RAMDISK_DR | ||
794 | printk(KERN_INFO "page: %u, size: %u, j: %u se_mem: %p," | ||
795 | " se_page: %p se_off: %u se_len: %u\n", req->rd_page, | ||
796 | req->rd_size, j, se_mem, se_mem->se_page, | ||
797 | se_mem->se_off, se_mem->se_len); | ||
798 | #endif | ||
799 | list_add_tail(&se_mem->se_list, se_mem_list); | ||
800 | (*se_mem_cnt)++; | ||
801 | |||
802 | req->rd_size -= length; | ||
803 | if (!(req->rd_size)) | ||
804 | goto out; | ||
805 | |||
806 | if (++req->rd_page <= table->page_end_offset) { | ||
807 | #ifdef DEBUG_RAMDISK_DR | ||
808 | printk("page: %u in same page table\n", | ||
809 | req->rd_page); | ||
810 | #endif | ||
811 | continue; | ||
812 | } | ||
813 | #ifdef DEBUG_RAMDISK_DR | ||
814 | printk(KERN_INFO "getting new page table for page: %u\n", | ||
815 | req->rd_page); | ||
816 | #endif | ||
817 | table = rd_get_sg_table(dev, req->rd_page); | ||
818 | if (!(table)) | ||
819 | return -1; | ||
820 | |||
821 | sg_s = &table->sg_table[j = 0]; | ||
822 | } | ||
823 | |||
824 | out: | ||
825 | T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt; | ||
826 | #ifdef DEBUG_RAMDISK_DR | ||
827 | printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n", | ||
828 | *se_mem_cnt); | ||
829 | #endif | ||
830 | return 0; | ||
831 | } | ||
832 | |||
833 | /* rd_DIRECT_do_se_mem_map(): | ||
834 | * | ||
835 | * | ||
836 | */ | ||
837 | static int rd_DIRECT_do_se_mem_map( | ||
838 | struct se_task *task, | ||
839 | struct list_head *se_mem_list, | ||
840 | void *in_mem, | ||
841 | struct se_mem *in_se_mem, | ||
842 | struct se_mem **out_se_mem, | ||
843 | u32 *se_mem_cnt, | ||
844 | u32 *task_offset_in) | ||
845 | { | ||
846 | struct se_cmd *cmd = task->task_se_cmd; | ||
847 | struct rd_request *req = RD_REQ(task); | ||
848 | u32 task_offset = *task_offset_in; | ||
849 | unsigned long long lba; | ||
850 | int ret; | ||
851 | |||
852 | req->rd_page = ((task->task_lba * DEV_ATTRIB(task->se_dev)->block_size) / | ||
853 | PAGE_SIZE); | ||
854 | lba = task->task_lba; | ||
855 | req->rd_offset = (do_div(lba, | ||
856 | (PAGE_SIZE / DEV_ATTRIB(task->se_dev)->block_size))) * | ||
857 | DEV_ATTRIB(task->se_dev)->block_size; | ||
858 | req->rd_size = task->task_size; | ||
859 | |||
860 | if (req->rd_offset) | ||
861 | ret = rd_DIRECT_with_offset(task, se_mem_list, se_mem_cnt, | ||
862 | task_offset_in); | ||
863 | else | ||
864 | ret = rd_DIRECT_without_offset(task, se_mem_list, se_mem_cnt, | ||
865 | task_offset_in); | ||
866 | |||
867 | if (ret < 0) | ||
868 | return ret; | ||
869 | |||
870 | if (CMD_TFO(cmd)->task_sg_chaining == 0) | ||
871 | return 0; | ||
872 | /* | ||
873 | * Currently prevent writers from multiple HW fabrics doing | ||
874 | * pci_map_sg() to RD_DR's internal scatterlist memory. | ||
875 | */ | ||
876 | if (cmd->data_direction == DMA_TO_DEVICE) { | ||
877 | printk(KERN_ERR "DMA_TO_DEVICE not supported for" | ||
878 | " RAMDISK_DR with task_sg_chaining=1\n"); | ||
879 | return -1; | ||
880 | } | ||
881 | /* | ||
882 | * Special case for if task_sg_chaining is enabled, then | ||
883 | * we setup struct se_task->task_sg[], as it will be used by | ||
884 | * transport_do_task_sg_chain() for creating chainged SGLs | ||
885 | * across multiple struct se_task->task_sg[]. | ||
886 | */ | ||
887 | if (!(transport_calc_sg_num(task, | ||
888 | list_entry(T_TASK(cmd)->t_mem_list->next, | ||
889 | struct se_mem, se_list), | ||
890 | task_offset))) | ||
891 | return -1; | ||
892 | |||
893 | return transport_map_mem_to_sg(task, se_mem_list, task->task_sg, | ||
894 | list_entry(T_TASK(cmd)->t_mem_list->next, | ||
895 | struct se_mem, se_list), | ||
896 | out_se_mem, se_mem_cnt, task_offset_in); | ||
897 | } | ||
898 | |||
899 | /* rd_DIRECT_do_task(): (Part of se_subsystem_api_t template) | ||
900 | * | ||
901 | * | ||
902 | */ | ||
903 | static int rd_DIRECT_do_task(struct se_task *task) | ||
904 | { | ||
905 | /* | ||
906 | * At this point the locally allocated RD tables have been mapped | ||
907 | * to struct se_mem elements in rd_DIRECT_do_se_mem_map(). | ||
908 | */ | ||
909 | task->task_scsi_status = GOOD; | ||
910 | transport_complete_task(task, 1); | ||
911 | |||
912 | return PYX_TRANSPORT_SENT_TO_TRANSPORT; | ||
913 | } | ||
914 | |||
915 | /* rd_free_task(): (Part of se_subsystem_api_t template) | ||
916 | * | ||
917 | * | ||
918 | */ | ||
919 | static void rd_free_task(struct se_task *task) | ||
920 | { | ||
921 | kfree(RD_REQ(task)); | ||
922 | } | ||
923 | |||
924 | enum { | ||
925 | Opt_rd_pages, Opt_err | ||
926 | }; | ||
927 | |||
928 | static match_table_t tokens = { | ||
929 | {Opt_rd_pages, "rd_pages=%d"}, | ||
930 | {Opt_err, NULL} | ||
931 | }; | ||
932 | |||
933 | static ssize_t rd_set_configfs_dev_params( | ||
934 | struct se_hba *hba, | ||
935 | struct se_subsystem_dev *se_dev, | ||
936 | const char *page, | ||
937 | ssize_t count) | ||
938 | { | ||
939 | struct rd_dev *rd_dev = se_dev->se_dev_su_ptr; | ||
940 | char *orig, *ptr, *opts; | ||
941 | substring_t args[MAX_OPT_ARGS]; | ||
942 | int ret = 0, arg, token; | ||
943 | |||
944 | opts = kstrdup(page, GFP_KERNEL); | ||
945 | if (!opts) | ||
946 | return -ENOMEM; | ||
947 | |||
948 | orig = opts; | ||
949 | |||
950 | while ((ptr = strsep(&opts, ",")) != NULL) { | ||
951 | if (!*ptr) | ||
952 | continue; | ||
953 | |||
954 | token = match_token(ptr, tokens, args); | ||
955 | switch (token) { | ||
956 | case Opt_rd_pages: | ||
957 | match_int(args, &arg); | ||
958 | rd_dev->rd_page_count = arg; | ||
959 | printk(KERN_INFO "RAMDISK: Referencing Page" | ||
960 | " Count: %u\n", rd_dev->rd_page_count); | ||
961 | rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT; | ||
962 | break; | ||
963 | default: | ||
964 | break; | ||
965 | } | ||
966 | } | ||
967 | |||
968 | kfree(orig); | ||
969 | return (!ret) ? count : ret; | ||
970 | } | ||
971 | |||
972 | static ssize_t rd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev) | ||
973 | { | ||
974 | struct rd_dev *rd_dev = se_dev->se_dev_su_ptr; | ||
975 | |||
976 | if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) { | ||
977 | printk(KERN_INFO "Missing rd_pages= parameter\n"); | ||
978 | return -1; | ||
979 | } | ||
980 | |||
981 | return 0; | ||
982 | } | ||
983 | |||
984 | static ssize_t rd_show_configfs_dev_params( | ||
985 | struct se_hba *hba, | ||
986 | struct se_subsystem_dev *se_dev, | ||
987 | char *b) | ||
988 | { | ||
989 | struct rd_dev *rd_dev = se_dev->se_dev_su_ptr; | ||
990 | ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: %s\n", | ||
991 | rd_dev->rd_dev_id, (rd_dev->rd_direct) ? | ||
992 | "rd_direct" : "rd_mcp"); | ||
993 | bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu" | ||
994 | " SG_table_count: %u\n", rd_dev->rd_page_count, | ||
995 | PAGE_SIZE, rd_dev->sg_table_count); | ||
996 | return bl; | ||
997 | } | ||
998 | |||
999 | /* rd_get_cdb(): (Part of se_subsystem_api_t template) | ||
1000 | * | ||
1001 | * | ||
1002 | */ | ||
1003 | static unsigned char *rd_get_cdb(struct se_task *task) | ||
1004 | { | ||
1005 | struct rd_request *req = RD_REQ(task); | ||
1006 | |||
1007 | return req->rd_scsi_cdb; | ||
1008 | } | ||
1009 | |||
1010 | static u32 rd_get_device_rev(struct se_device *dev) | ||
1011 | { | ||
1012 | return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */ | ||
1013 | } | ||
1014 | |||
1015 | static u32 rd_get_device_type(struct se_device *dev) | ||
1016 | { | ||
1017 | return TYPE_DISK; | ||
1018 | } | ||
1019 | |||
1020 | static sector_t rd_get_blocks(struct se_device *dev) | ||
1021 | { | ||
1022 | struct rd_dev *rd_dev = dev->dev_ptr; | ||
1023 | unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) / | ||
1024 | DEV_ATTRIB(dev)->block_size) - 1; | ||
1025 | |||
1026 | return blocks_long; | ||
1027 | } | ||
1028 | |||
1029 | static struct se_subsystem_api rd_dr_template = { | ||
1030 | .name = "rd_dr", | ||
1031 | .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV, | ||
1032 | .attach_hba = rd_attach_hba, | ||
1033 | .detach_hba = rd_detach_hba, | ||
1034 | .allocate_virtdevice = rd_DIRECT_allocate_virtdevice, | ||
1035 | .create_virtdevice = rd_DIRECT_create_virtdevice, | ||
1036 | .free_device = rd_free_device, | ||
1037 | .alloc_task = rd_alloc_task, | ||
1038 | .do_task = rd_DIRECT_do_task, | ||
1039 | .free_task = rd_free_task, | ||
1040 | .check_configfs_dev_params = rd_check_configfs_dev_params, | ||
1041 | .set_configfs_dev_params = rd_set_configfs_dev_params, | ||
1042 | .show_configfs_dev_params = rd_show_configfs_dev_params, | ||
1043 | .get_cdb = rd_get_cdb, | ||
1044 | .get_device_rev = rd_get_device_rev, | ||
1045 | .get_device_type = rd_get_device_type, | ||
1046 | .get_blocks = rd_get_blocks, | ||
1047 | .do_se_mem_map = rd_DIRECT_do_se_mem_map, | ||
1048 | }; | ||
1049 | |||
1050 | static struct se_subsystem_api rd_mcp_template = { | ||
1051 | .name = "rd_mcp", | ||
1052 | .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV, | ||
1053 | .attach_hba = rd_attach_hba, | ||
1054 | .detach_hba = rd_detach_hba, | ||
1055 | .allocate_virtdevice = rd_MEMCPY_allocate_virtdevice, | ||
1056 | .create_virtdevice = rd_MEMCPY_create_virtdevice, | ||
1057 | .free_device = rd_free_device, | ||
1058 | .alloc_task = rd_alloc_task, | ||
1059 | .do_task = rd_MEMCPY_do_task, | ||
1060 | .free_task = rd_free_task, | ||
1061 | .check_configfs_dev_params = rd_check_configfs_dev_params, | ||
1062 | .set_configfs_dev_params = rd_set_configfs_dev_params, | ||
1063 | .show_configfs_dev_params = rd_show_configfs_dev_params, | ||
1064 | .get_cdb = rd_get_cdb, | ||
1065 | .get_device_rev = rd_get_device_rev, | ||
1066 | .get_device_type = rd_get_device_type, | ||
1067 | .get_blocks = rd_get_blocks, | ||
1068 | }; | ||
1069 | |||
1070 | int __init rd_module_init(void) | ||
1071 | { | ||
1072 | int ret; | ||
1073 | |||
1074 | ret = transport_subsystem_register(&rd_dr_template); | ||
1075 | if (ret < 0) | ||
1076 | return ret; | ||
1077 | |||
1078 | ret = transport_subsystem_register(&rd_mcp_template); | ||
1079 | if (ret < 0) { | ||
1080 | transport_subsystem_release(&rd_dr_template); | ||
1081 | return ret; | ||
1082 | } | ||
1083 | |||
1084 | return 0; | ||
1085 | } | ||
1086 | |||
1087 | void rd_module_exit(void) | ||
1088 | { | ||
1089 | transport_subsystem_release(&rd_dr_template); | ||
1090 | transport_subsystem_release(&rd_mcp_template); | ||
1091 | } | ||
diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h new file mode 100644 index 000000000000..13badfbaf9c0 --- /dev/null +++ b/drivers/target/target_core_rd.h | |||
@@ -0,0 +1,73 @@ | |||
1 | #ifndef TARGET_CORE_RD_H | ||
2 | #define TARGET_CORE_RD_H | ||
3 | |||
4 | #define RD_HBA_VERSION "v4.0" | ||
5 | #define RD_DR_VERSION "4.0" | ||
6 | #define RD_MCP_VERSION "4.0" | ||
7 | |||
8 | /* Largest piece of memory kmalloc can allocate */ | ||
9 | #define RD_MAX_ALLOCATION_SIZE 65536 | ||
10 | /* Maximum queuedepth for the Ramdisk HBA */ | ||
11 | #define RD_HBA_QUEUE_DEPTH 256 | ||
12 | #define RD_DEVICE_QUEUE_DEPTH 32 | ||
13 | #define RD_MAX_DEVICE_QUEUE_DEPTH 128 | ||
14 | #define RD_BLOCKSIZE 512 | ||
15 | #define RD_MAX_SECTORS 1024 | ||
16 | |||
17 | extern struct kmem_cache *se_mem_cache; | ||
18 | |||
19 | /* Used in target_core_init_configfs() for virtual LUN 0 access */ | ||
20 | int __init rd_module_init(void); | ||
21 | void rd_module_exit(void); | ||
22 | |||
23 | #define RRF_EMULATE_CDB 0x01 | ||
24 | #define RRF_GOT_LBA 0x02 | ||
25 | |||
26 | struct rd_request { | ||
27 | struct se_task rd_task; | ||
28 | |||
29 | /* SCSI CDB from iSCSI Command PDU */ | ||
30 | unsigned char rd_scsi_cdb[TCM_MAX_COMMAND_SIZE]; | ||
31 | /* Offset from start of page */ | ||
32 | u32 rd_offset; | ||
33 | /* Starting page in Ramdisk for request */ | ||
34 | u32 rd_page; | ||
35 | /* Total number of pages needed for request */ | ||
36 | u32 rd_page_count; | ||
37 | /* Scatterlist count */ | ||
38 | u32 rd_size; | ||
39 | /* Ramdisk device */ | ||
40 | struct rd_dev *rd_dev; | ||
41 | } ____cacheline_aligned; | ||
42 | |||
43 | struct rd_dev_sg_table { | ||
44 | u32 page_start_offset; | ||
45 | u32 page_end_offset; | ||
46 | u32 rd_sg_count; | ||
47 | struct scatterlist *sg_table; | ||
48 | } ____cacheline_aligned; | ||
49 | |||
50 | #define RDF_HAS_PAGE_COUNT 0x01 | ||
51 | |||
52 | struct rd_dev { | ||
53 | int rd_direct; | ||
54 | u32 rd_flags; | ||
55 | /* Unique Ramdisk Device ID in Ramdisk HBA */ | ||
56 | u32 rd_dev_id; | ||
57 | /* Total page count for ramdisk device */ | ||
58 | u32 rd_page_count; | ||
59 | /* Number of SG tables in sg_table_array */ | ||
60 | u32 sg_table_count; | ||
61 | u32 rd_queue_depth; | ||
62 | /* Array of rd_dev_sg_table_t containing scatterlists */ | ||
63 | struct rd_dev_sg_table *sg_table_array; | ||
64 | /* Ramdisk HBA device is connected to */ | ||
65 | struct rd_host *rd_host; | ||
66 | } ____cacheline_aligned; | ||
67 | |||
68 | struct rd_host { | ||
69 | u32 rd_host_dev_id_count; | ||
70 | u32 rd_host_id; /* Unique Ramdisk Host ID */ | ||
71 | } ____cacheline_aligned; | ||
72 | |||
73 | #endif /* TARGET_CORE_RD_H */ | ||
diff --git a/drivers/target/target_core_scdb.c b/drivers/target/target_core_scdb.c new file mode 100644 index 000000000000..dc6fed037ab3 --- /dev/null +++ b/drivers/target/target_core_scdb.c | |||
@@ -0,0 +1,105 @@ | |||
1 | /******************************************************************************* | ||
2 | * Filename: target_core_scdb.c | ||
3 | * | ||
4 | * This file contains the generic target engine Split CDB related functions. | ||
5 | * | ||
6 | * Copyright (c) 2004-2005 PyX Technologies, Inc. | ||
7 | * Copyright (c) 2005, 2006, 2007 SBE, Inc. | ||
8 | * Copyright (c) 2007-2010 Rising Tide Systems | ||
9 | * Copyright (c) 2008-2010 Linux-iSCSI.org | ||
10 | * | ||
11 | * Nicholas A. Bellinger <nab@kernel.org> | ||
12 | * | ||
13 | * This program is free software; you can redistribute it and/or modify | ||
14 | * it under the terms of the GNU General Public License as published by | ||
15 | * the Free Software Foundation; either version 2 of the License, or | ||
16 | * (at your option) any later version. | ||
17 | * | ||
18 | * This program is distributed in the hope that it will be useful, | ||
19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
21 | * GNU General Public License for more details. | ||
22 | * | ||
23 | * You should have received a copy of the GNU General Public License | ||
24 | * along with this program; if not, write to the Free Software | ||
25 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
26 | * | ||
27 | ******************************************************************************/ | ||
28 | |||
29 | #include <linux/net.h> | ||
30 | #include <linux/string.h> | ||
31 | #include <scsi/scsi.h> | ||
32 | #include <asm/unaligned.h> | ||
33 | |||
34 | #include <target/target_core_base.h> | ||
35 | #include <target/target_core_transport.h> | ||
36 | |||
37 | #include "target_core_scdb.h" | ||
38 | |||
39 | /* split_cdb_XX_6(): | ||
40 | * | ||
41 | * 21-bit LBA w/ 8-bit SECTORS | ||
42 | */ | ||
43 | void split_cdb_XX_6( | ||
44 | unsigned long long lba, | ||
45 | u32 *sectors, | ||
46 | unsigned char *cdb) | ||
47 | { | ||
48 | cdb[1] = (lba >> 16) & 0x1f; | ||
49 | cdb[2] = (lba >> 8) & 0xff; | ||
50 | cdb[3] = lba & 0xff; | ||
51 | cdb[4] = *sectors & 0xff; | ||
52 | } | ||
53 | |||
54 | /* split_cdb_XX_10(): | ||
55 | * | ||
56 | * 32-bit LBA w/ 16-bit SECTORS | ||
57 | */ | ||
58 | void split_cdb_XX_10( | ||
59 | unsigned long long lba, | ||
60 | u32 *sectors, | ||
61 | unsigned char *cdb) | ||
62 | { | ||
63 | put_unaligned_be32(lba, &cdb[2]); | ||
64 | put_unaligned_be16(*sectors, &cdb[7]); | ||
65 | } | ||
66 | |||
67 | /* split_cdb_XX_12(): | ||
68 | * | ||
69 | * 32-bit LBA w/ 32-bit SECTORS | ||
70 | */ | ||
71 | void split_cdb_XX_12( | ||
72 | unsigned long long lba, | ||
73 | u32 *sectors, | ||
74 | unsigned char *cdb) | ||
75 | { | ||
76 | put_unaligned_be32(lba, &cdb[2]); | ||
77 | put_unaligned_be32(*sectors, &cdb[6]); | ||
78 | } | ||
79 | |||
80 | /* split_cdb_XX_16(): | ||
81 | * | ||
82 | * 64-bit LBA w/ 32-bit SECTORS | ||
83 | */ | ||
84 | void split_cdb_XX_16( | ||
85 | unsigned long long lba, | ||
86 | u32 *sectors, | ||
87 | unsigned char *cdb) | ||
88 | { | ||
89 | put_unaligned_be64(lba, &cdb[2]); | ||
90 | put_unaligned_be32(*sectors, &cdb[10]); | ||
91 | } | ||
92 | |||
93 | /* | ||
94 | * split_cdb_XX_32(): | ||
95 | * | ||
96 | * 64-bit LBA w/ 32-bit SECTORS such as READ_32, WRITE_32 and emulated XDWRITEREAD_32 | ||
97 | */ | ||
98 | void split_cdb_XX_32( | ||
99 | unsigned long long lba, | ||
100 | u32 *sectors, | ||
101 | unsigned char *cdb) | ||
102 | { | ||
103 | put_unaligned_be64(lba, &cdb[12]); | ||
104 | put_unaligned_be32(*sectors, &cdb[28]); | ||
105 | } | ||
diff --git a/drivers/target/target_core_scdb.h b/drivers/target/target_core_scdb.h new file mode 100644 index 000000000000..98cd1c01ed83 --- /dev/null +++ b/drivers/target/target_core_scdb.h | |||
@@ -0,0 +1,10 @@ | |||
1 | #ifndef TARGET_CORE_SCDB_H | ||
2 | #define TARGET_CORE_SCDB_H | ||
3 | |||
4 | extern void split_cdb_XX_6(unsigned long long, u32 *, unsigned char *); | ||
5 | extern void split_cdb_XX_10(unsigned long long, u32 *, unsigned char *); | ||
6 | extern void split_cdb_XX_12(unsigned long long, u32 *, unsigned char *); | ||
7 | extern void split_cdb_XX_16(unsigned long long, u32 *, unsigned char *); | ||
8 | extern void split_cdb_XX_32(unsigned long long, u32 *, unsigned char *); | ||
9 | |||
10 | #endif /* TARGET_CORE_SCDB_H */ | ||
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c new file mode 100644 index 000000000000..158cecbec718 --- /dev/null +++ b/drivers/target/target_core_tmr.c | |||
@@ -0,0 +1,404 @@ | |||
1 | /******************************************************************************* | ||
2 | * Filename: target_core_tmr.c | ||
3 | * | ||
4 | * This file contains SPC-3 task management infrastructure | ||
5 | * | ||
6 | * Copyright (c) 2009,2010 Rising Tide Systems | ||
7 | * Copyright (c) 2009,2010 Linux-iSCSI.org | ||
8 | * | ||
9 | * Nicholas A. Bellinger <nab@kernel.org> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License as published by | ||
13 | * the Free Software Foundation; either version 2 of the License, or | ||
14 | * (at your option) any later version. | ||
15 | * | ||
16 | * This program is distributed in the hope that it will be useful, | ||
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
19 | * GNU General Public License for more details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public License | ||
22 | * along with this program; if not, write to the Free Software | ||
23 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
24 | * | ||
25 | ******************************************************************************/ | ||
26 | |||
27 | #include <linux/version.h> | ||
28 | #include <linux/slab.h> | ||
29 | #include <linux/spinlock.h> | ||
30 | #include <linux/list.h> | ||
31 | #include <scsi/scsi.h> | ||
32 | #include <scsi/scsi_cmnd.h> | ||
33 | |||
34 | #include <target/target_core_base.h> | ||
35 | #include <target/target_core_device.h> | ||
36 | #include <target/target_core_tmr.h> | ||
37 | #include <target/target_core_transport.h> | ||
38 | #include <target/target_core_fabric_ops.h> | ||
39 | #include <target/target_core_configfs.h> | ||
40 | |||
41 | #include "target_core_alua.h" | ||
42 | #include "target_core_pr.h" | ||
43 | |||
44 | #define DEBUG_LUN_RESET | ||
45 | #ifdef DEBUG_LUN_RESET | ||
46 | #define DEBUG_LR(x...) printk(KERN_INFO x) | ||
47 | #else | ||
48 | #define DEBUG_LR(x...) | ||
49 | #endif | ||
50 | |||
51 | struct se_tmr_req *core_tmr_alloc_req( | ||
52 | struct se_cmd *se_cmd, | ||
53 | void *fabric_tmr_ptr, | ||
54 | u8 function) | ||
55 | { | ||
56 | struct se_tmr_req *tmr; | ||
57 | |||
58 | tmr = kmem_cache_zalloc(se_tmr_req_cache, GFP_KERNEL); | ||
59 | if (!(tmr)) { | ||
60 | printk(KERN_ERR "Unable to allocate struct se_tmr_req\n"); | ||
61 | return ERR_PTR(-ENOMEM); | ||
62 | } | ||
63 | tmr->task_cmd = se_cmd; | ||
64 | tmr->fabric_tmr_ptr = fabric_tmr_ptr; | ||
65 | tmr->function = function; | ||
66 | INIT_LIST_HEAD(&tmr->tmr_list); | ||
67 | |||
68 | return tmr; | ||
69 | } | ||
70 | EXPORT_SYMBOL(core_tmr_alloc_req); | ||
71 | |||
72 | void core_tmr_release_req( | ||
73 | struct se_tmr_req *tmr) | ||
74 | { | ||
75 | struct se_device *dev = tmr->tmr_dev; | ||
76 | |||
77 | spin_lock(&dev->se_tmr_lock); | ||
78 | list_del(&tmr->tmr_list); | ||
79 | kmem_cache_free(se_tmr_req_cache, tmr); | ||
80 | spin_unlock(&dev->se_tmr_lock); | ||
81 | } | ||
82 | |||
83 | static void core_tmr_handle_tas_abort( | ||
84 | struct se_node_acl *tmr_nacl, | ||
85 | struct se_cmd *cmd, | ||
86 | int tas, | ||
87 | int fe_count) | ||
88 | { | ||
89 | if (!(fe_count)) { | ||
90 | transport_cmd_finish_abort(cmd, 1); | ||
91 | return; | ||
92 | } | ||
93 | /* | ||
94 | * TASK ABORTED status (TAS) bit support | ||
95 | */ | ||
96 | if (((tmr_nacl != NULL) && | ||
97 | (tmr_nacl == cmd->se_sess->se_node_acl)) || tas) | ||
98 | transport_send_task_abort(cmd); | ||
99 | |||
100 | transport_cmd_finish_abort(cmd, 0); | ||
101 | } | ||
102 | |||
103 | int core_tmr_lun_reset( | ||
104 | struct se_device *dev, | ||
105 | struct se_tmr_req *tmr, | ||
106 | struct list_head *preempt_and_abort_list, | ||
107 | struct se_cmd *prout_cmd) | ||
108 | { | ||
109 | struct se_cmd *cmd; | ||
110 | struct se_queue_req *qr, *qr_tmp; | ||
111 | struct se_node_acl *tmr_nacl = NULL; | ||
112 | struct se_portal_group *tmr_tpg = NULL; | ||
113 | struct se_queue_obj *qobj = dev->dev_queue_obj; | ||
114 | struct se_tmr_req *tmr_p, *tmr_pp; | ||
115 | struct se_task *task, *task_tmp; | ||
116 | unsigned long flags; | ||
117 | int fe_count, state, tas; | ||
118 | /* | ||
119 | * TASK_ABORTED status bit, this is configurable via ConfigFS | ||
120 | * struct se_device attributes. spc4r17 section 7.4.6 Control mode page | ||
121 | * | ||
122 | * A task aborted status (TAS) bit set to zero specifies that aborted | ||
123 | * tasks shall be terminated by the device server without any response | ||
124 | * to the application client. A TAS bit set to one specifies that tasks | ||
125 | * aborted by the actions of an I_T nexus other than the I_T nexus on | ||
126 | * which the command was received shall be completed with TASK ABORTED | ||
127 | * status (see SAM-4). | ||
128 | */ | ||
129 | tas = DEV_ATTRIB(dev)->emulate_tas; | ||
130 | /* | ||
131 | * Determine if this se_tmr is coming from a $FABRIC_MOD | ||
132 | * or struct se_device passthrough.. | ||
133 | */ | ||
134 | if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) { | ||
135 | tmr_nacl = tmr->task_cmd->se_sess->se_node_acl; | ||
136 | tmr_tpg = tmr->task_cmd->se_sess->se_tpg; | ||
137 | if (tmr_nacl && tmr_tpg) { | ||
138 | DEBUG_LR("LUN_RESET: TMR caller fabric: %s" | ||
139 | " initiator port %s\n", | ||
140 | TPG_TFO(tmr_tpg)->get_fabric_name(), | ||
141 | tmr_nacl->initiatorname); | ||
142 | } | ||
143 | } | ||
144 | DEBUG_LR("LUN_RESET: %s starting for [%s], tas: %d\n", | ||
145 | (preempt_and_abort_list) ? "Preempt" : "TMR", | ||
146 | TRANSPORT(dev)->name, tas); | ||
147 | /* | ||
148 | * Release all pending and outgoing TMRs aside from the received | ||
149 | * LUN_RESET tmr.. | ||
150 | */ | ||
151 | spin_lock(&dev->se_tmr_lock); | ||
152 | list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) { | ||
153 | /* | ||
154 | * Allow the received TMR to return with FUNCTION_COMPLETE. | ||
155 | */ | ||
156 | if (tmr && (tmr_p == tmr)) | ||
157 | continue; | ||
158 | |||
159 | cmd = tmr_p->task_cmd; | ||
160 | if (!(cmd)) { | ||
161 | printk(KERN_ERR "Unable to locate struct se_cmd for TMR\n"); | ||
162 | continue; | ||
163 | } | ||
164 | /* | ||
165 | * If this function was called with a valid pr_res_key | ||
166 | * parameter (eg: for PROUT PREEMPT_AND_ABORT service action | ||
167 | * skip non regisration key matching TMRs. | ||
168 | */ | ||
169 | if ((preempt_and_abort_list != NULL) && | ||
170 | (core_scsi3_check_cdb_abort_and_preempt( | ||
171 | preempt_and_abort_list, cmd) != 0)) | ||
172 | continue; | ||
173 | spin_unlock(&dev->se_tmr_lock); | ||
174 | |||
175 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | ||
176 | if (!(atomic_read(&T_TASK(cmd)->t_transport_active))) { | ||
177 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | ||
178 | spin_lock(&dev->se_tmr_lock); | ||
179 | continue; | ||
180 | } | ||
181 | if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) { | ||
182 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | ||
183 | spin_lock(&dev->se_tmr_lock); | ||
184 | continue; | ||
185 | } | ||
186 | DEBUG_LR("LUN_RESET: %s releasing TMR %p Function: 0x%02x," | ||
187 | " Response: 0x%02x, t_state: %d\n", | ||
188 | (preempt_and_abort_list) ? "Preempt" : "", tmr_p, | ||
189 | tmr_p->function, tmr_p->response, cmd->t_state); | ||
190 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | ||
191 | |||
192 | transport_cmd_finish_abort_tmr(cmd); | ||
193 | spin_lock(&dev->se_tmr_lock); | ||
194 | } | ||
195 | spin_unlock(&dev->se_tmr_lock); | ||
196 | /* | ||
197 | * Complete outstanding struct se_task CDBs with TASK_ABORTED SAM status. | ||
198 | * This is following sam4r17, section 5.6 Aborting commands, Table 38 | ||
199 | * for TMR LUN_RESET: | ||
200 | * | ||
201 | * a) "Yes" indicates that each command that is aborted on an I_T nexus | ||
202 | * other than the one that caused the SCSI device condition is | ||
203 | * completed with TASK ABORTED status, if the TAS bit is set to one in | ||
204 | * the Control mode page (see SPC-4). "No" indicates that no status is | ||
205 | * returned for aborted commands. | ||
206 | * | ||
207 | * d) If the logical unit reset is caused by a particular I_T nexus | ||
208 | * (e.g., by a LOGICAL UNIT RESET task management function), then "yes" | ||
209 | * (TASK_ABORTED status) applies. | ||
210 | * | ||
211 | * Otherwise (e.g., if triggered by a hard reset), "no" | ||
212 | * (no TASK_ABORTED SAM status) applies. | ||
213 | * | ||
214 | * Note that this seems to be independent of TAS (Task Aborted Status) | ||
215 | * in the Control Mode Page. | ||
216 | */ | ||
217 | spin_lock_irqsave(&dev->execute_task_lock, flags); | ||
218 | list_for_each_entry_safe(task, task_tmp, &dev->state_task_list, | ||
219 | t_state_list) { | ||
220 | if (!(TASK_CMD(task))) { | ||
221 | printk(KERN_ERR "TASK_CMD(task) is NULL!\n"); | ||
222 | continue; | ||
223 | } | ||
224 | cmd = TASK_CMD(task); | ||
225 | |||
226 | if (!T_TASK(cmd)) { | ||
227 | printk(KERN_ERR "T_TASK(cmd) is NULL for task: %p cmd:" | ||
228 | " %p ITT: 0x%08x\n", task, cmd, | ||
229 | CMD_TFO(cmd)->get_task_tag(cmd)); | ||
230 | continue; | ||
231 | } | ||
232 | /* | ||
233 | * For PREEMPT_AND_ABORT usage, only process commands | ||
234 | * with a matching reservation key. | ||
235 | */ | ||
236 | if ((preempt_and_abort_list != NULL) && | ||
237 | (core_scsi3_check_cdb_abort_and_preempt( | ||
238 | preempt_and_abort_list, cmd) != 0)) | ||
239 | continue; | ||
240 | /* | ||
241 | * Not aborting PROUT PREEMPT_AND_ABORT CDB.. | ||
242 | */ | ||
243 | if (prout_cmd == cmd) | ||
244 | continue; | ||
245 | |||
246 | list_del(&task->t_state_list); | ||
247 | atomic_set(&task->task_state_active, 0); | ||
248 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | ||
249 | |||
250 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | ||
251 | DEBUG_LR("LUN_RESET: %s cmd: %p task: %p" | ||
252 | " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/" | ||
253 | "def_t_state: %d/%d cdb: 0x%02x\n", | ||
254 | (preempt_and_abort_list) ? "Preempt" : "", cmd, task, | ||
255 | CMD_TFO(cmd)->get_task_tag(cmd), 0, | ||
256 | CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state, | ||
257 | cmd->deferred_t_state, T_TASK(cmd)->t_task_cdb[0]); | ||
258 | DEBUG_LR("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx" | ||
259 | " t_task_cdbs: %d t_task_cdbs_left: %d" | ||
260 | " t_task_cdbs_sent: %d -- t_transport_active: %d" | ||
261 | " t_transport_stop: %d t_transport_sent: %d\n", | ||
262 | CMD_TFO(cmd)->get_task_tag(cmd), cmd->pr_res_key, | ||
263 | T_TASK(cmd)->t_task_cdbs, | ||
264 | atomic_read(&T_TASK(cmd)->t_task_cdbs_left), | ||
265 | atomic_read(&T_TASK(cmd)->t_task_cdbs_sent), | ||
266 | atomic_read(&T_TASK(cmd)->t_transport_active), | ||
267 | atomic_read(&T_TASK(cmd)->t_transport_stop), | ||
268 | atomic_read(&T_TASK(cmd)->t_transport_sent)); | ||
269 | |||
270 | if (atomic_read(&task->task_active)) { | ||
271 | atomic_set(&task->task_stop, 1); | ||
272 | spin_unlock_irqrestore( | ||
273 | &T_TASK(cmd)->t_state_lock, flags); | ||
274 | |||
275 | DEBUG_LR("LUN_RESET: Waiting for task: %p to shutdown" | ||
276 | " for dev: %p\n", task, dev); | ||
277 | wait_for_completion(&task->task_stop_comp); | ||
278 | DEBUG_LR("LUN_RESET Completed task: %p shutdown for" | ||
279 | " dev: %p\n", task, dev); | ||
280 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | ||
281 | atomic_dec(&T_TASK(cmd)->t_task_cdbs_left); | ||
282 | |||
283 | atomic_set(&task->task_active, 0); | ||
284 | atomic_set(&task->task_stop, 0); | ||
285 | } | ||
286 | __transport_stop_task_timer(task, &flags); | ||
287 | |||
288 | if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_ex_left))) { | ||
289 | spin_unlock_irqrestore( | ||
290 | &T_TASK(cmd)->t_state_lock, flags); | ||
291 | DEBUG_LR("LUN_RESET: Skipping task: %p, dev: %p for" | ||
292 | " t_task_cdbs_ex_left: %d\n", task, dev, | ||
293 | atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left)); | ||
294 | |||
295 | spin_lock_irqsave(&dev->execute_task_lock, flags); | ||
296 | continue; | ||
297 | } | ||
298 | fe_count = atomic_read(&T_TASK(cmd)->t_fe_count); | ||
299 | |||
300 | if (atomic_read(&T_TASK(cmd)->t_transport_active)) { | ||
301 | DEBUG_LR("LUN_RESET: got t_transport_active = 1 for" | ||
302 | " task: %p, t_fe_count: %d dev: %p\n", task, | ||
303 | fe_count, dev); | ||
304 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, | ||
305 | flags); | ||
306 | core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); | ||
307 | |||
308 | spin_lock_irqsave(&dev->execute_task_lock, flags); | ||
309 | continue; | ||
310 | } | ||
311 | DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p," | ||
312 | " t_fe_count: %d dev: %p\n", task, fe_count, dev); | ||
313 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | ||
314 | core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); | ||
315 | |||
316 | spin_lock_irqsave(&dev->execute_task_lock, flags); | ||
317 | } | ||
318 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | ||
319 | /* | ||
320 | * Release all commands remaining in the struct se_device cmd queue. | ||
321 | * | ||
322 | * This follows the same logic as above for the struct se_device | ||
323 | * struct se_task state list, where commands are returned with | ||
324 | * TASK_ABORTED status, if there is an outstanding $FABRIC_MOD | ||
325 | * reference, otherwise the struct se_cmd is released. | ||
326 | */ | ||
327 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | ||
328 | list_for_each_entry_safe(qr, qr_tmp, &qobj->qobj_list, qr_list) { | ||
329 | cmd = (struct se_cmd *)qr->cmd; | ||
330 | if (!(cmd)) { | ||
331 | /* | ||
332 | * Skip these for non PREEMPT_AND_ABORT usage.. | ||
333 | */ | ||
334 | if (preempt_and_abort_list != NULL) | ||
335 | continue; | ||
336 | |||
337 | atomic_dec(&qobj->queue_cnt); | ||
338 | list_del(&qr->qr_list); | ||
339 | kfree(qr); | ||
340 | continue; | ||
341 | } | ||
342 | /* | ||
343 | * For PREEMPT_AND_ABORT usage, only process commands | ||
344 | * with a matching reservation key. | ||
345 | */ | ||
346 | if ((preempt_and_abort_list != NULL) && | ||
347 | (core_scsi3_check_cdb_abort_and_preempt( | ||
348 | preempt_and_abort_list, cmd) != 0)) | ||
349 | continue; | ||
350 | /* | ||
351 | * Not aborting PROUT PREEMPT_AND_ABORT CDB.. | ||
352 | */ | ||
353 | if (prout_cmd == cmd) | ||
354 | continue; | ||
355 | |||
356 | atomic_dec(&T_TASK(cmd)->t_transport_queue_active); | ||
357 | atomic_dec(&qobj->queue_cnt); | ||
358 | list_del(&qr->qr_list); | ||
359 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | ||
360 | |||
361 | state = qr->state; | ||
362 | kfree(qr); | ||
363 | |||
364 | DEBUG_LR("LUN_RESET: %s from Device Queue: cmd: %p t_state:" | ||
365 | " %d t_fe_count: %d\n", (preempt_and_abort_list) ? | ||
366 | "Preempt" : "", cmd, state, | ||
367 | atomic_read(&T_TASK(cmd)->t_fe_count)); | ||
368 | /* | ||
369 | * Signal that the command has failed via cmd->se_cmd_flags, | ||
370 | * and call TFO->new_cmd_failure() to wakeup any fabric | ||
371 | * dependent code used to wait for unsolicited data out | ||
372 | * allocation to complete. The fabric module is expected | ||
373 | * to dump any remaining unsolicited data out for the aborted | ||
374 | * command at this point. | ||
375 | */ | ||
376 | transport_new_cmd_failure(cmd); | ||
377 | |||
378 | core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, | ||
379 | atomic_read(&T_TASK(cmd)->t_fe_count)); | ||
380 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | ||
381 | } | ||
382 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | ||
383 | /* | ||
384 | * Clear any legacy SPC-2 reservation when called during | ||
385 | * LOGICAL UNIT RESET | ||
386 | */ | ||
387 | if (!(preempt_and_abort_list) && | ||
388 | (dev->dev_flags & DF_SPC2_RESERVATIONS)) { | ||
389 | spin_lock(&dev->dev_reservation_lock); | ||
390 | dev->dev_reserved_node_acl = NULL; | ||
391 | dev->dev_flags &= ~DF_SPC2_RESERVATIONS; | ||
392 | spin_unlock(&dev->dev_reservation_lock); | ||
393 | printk(KERN_INFO "LUN_RESET: SCSI-2 Released reservation\n"); | ||
394 | } | ||
395 | |||
396 | spin_lock(&dev->stats_lock); | ||
397 | dev->num_resets++; | ||
398 | spin_unlock(&dev->stats_lock); | ||
399 | |||
400 | DEBUG_LR("LUN_RESET: %s for [%s] Complete\n", | ||
401 | (preempt_and_abort_list) ? "Preempt" : "TMR", | ||
402 | TRANSPORT(dev)->name); | ||
403 | return 0; | ||
404 | } | ||
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c new file mode 100644 index 000000000000..abfa81a57115 --- /dev/null +++ b/drivers/target/target_core_tpg.c | |||
@@ -0,0 +1,826 @@ | |||
1 | /******************************************************************************* | ||
2 | * Filename: target_core_tpg.c | ||
3 | * | ||
4 | * This file contains generic Target Portal Group related functions. | ||
5 | * | ||
6 | * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc. | ||
7 | * Copyright (c) 2005, 2006, 2007 SBE, Inc. | ||
8 | * Copyright (c) 2007-2010 Rising Tide Systems | ||
9 | * Copyright (c) 2008-2010 Linux-iSCSI.org | ||
10 | * | ||
11 | * Nicholas A. Bellinger <nab@kernel.org> | ||
12 | * | ||
13 | * This program is free software; you can redistribute it and/or modify | ||
14 | * it under the terms of the GNU General Public License as published by | ||
15 | * the Free Software Foundation; either version 2 of the License, or | ||
16 | * (at your option) any later version. | ||
17 | * | ||
18 | * This program is distributed in the hope that it will be useful, | ||
19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
21 | * GNU General Public License for more details. | ||
22 | * | ||
23 | * You should have received a copy of the GNU General Public License | ||
24 | * along with this program; if not, write to the Free Software | ||
25 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
26 | * | ||
27 | ******************************************************************************/ | ||
28 | |||
29 | #include <linux/net.h> | ||
30 | #include <linux/string.h> | ||
31 | #include <linux/timer.h> | ||
32 | #include <linux/slab.h> | ||
33 | #include <linux/spinlock.h> | ||
34 | #include <linux/smp_lock.h> | ||
35 | #include <linux/in.h> | ||
36 | #include <net/sock.h> | ||
37 | #include <net/tcp.h> | ||
38 | #include <scsi/scsi.h> | ||
39 | #include <scsi/scsi_cmnd.h> | ||
40 | |||
41 | #include <target/target_core_base.h> | ||
42 | #include <target/target_core_device.h> | ||
43 | #include <target/target_core_tpg.h> | ||
44 | #include <target/target_core_transport.h> | ||
45 | #include <target/target_core_fabric_ops.h> | ||
46 | |||
47 | #include "target_core_hba.h" | ||
48 | |||
49 | /* core_clear_initiator_node_from_tpg(): | ||
50 | * | ||
51 | * | ||
52 | */ | ||
53 | static void core_clear_initiator_node_from_tpg( | ||
54 | struct se_node_acl *nacl, | ||
55 | struct se_portal_group *tpg) | ||
56 | { | ||
57 | int i; | ||
58 | struct se_dev_entry *deve; | ||
59 | struct se_lun *lun; | ||
60 | struct se_lun_acl *acl, *acl_tmp; | ||
61 | |||
62 | spin_lock_irq(&nacl->device_list_lock); | ||
63 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | ||
64 | deve = &nacl->device_list[i]; | ||
65 | |||
66 | if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) | ||
67 | continue; | ||
68 | |||
69 | if (!deve->se_lun) { | ||
70 | printk(KERN_ERR "%s device entries device pointer is" | ||
71 | " NULL, but Initiator has access.\n", | ||
72 | TPG_TFO(tpg)->get_fabric_name()); | ||
73 | continue; | ||
74 | } | ||
75 | |||
76 | lun = deve->se_lun; | ||
77 | spin_unlock_irq(&nacl->device_list_lock); | ||
78 | core_update_device_list_for_node(lun, NULL, deve->mapped_lun, | ||
79 | TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0); | ||
80 | |||
81 | spin_lock(&lun->lun_acl_lock); | ||
82 | list_for_each_entry_safe(acl, acl_tmp, | ||
83 | &lun->lun_acl_list, lacl_list) { | ||
84 | if (!(strcmp(acl->initiatorname, | ||
85 | nacl->initiatorname)) && | ||
86 | (acl->mapped_lun == deve->mapped_lun)) | ||
87 | break; | ||
88 | } | ||
89 | |||
90 | if (!acl) { | ||
91 | printk(KERN_ERR "Unable to locate struct se_lun_acl for %s," | ||
92 | " mapped_lun: %u\n", nacl->initiatorname, | ||
93 | deve->mapped_lun); | ||
94 | spin_unlock(&lun->lun_acl_lock); | ||
95 | spin_lock_irq(&nacl->device_list_lock); | ||
96 | continue; | ||
97 | } | ||
98 | |||
99 | list_del(&acl->lacl_list); | ||
100 | spin_unlock(&lun->lun_acl_lock); | ||
101 | |||
102 | spin_lock_irq(&nacl->device_list_lock); | ||
103 | kfree(acl); | ||
104 | } | ||
105 | spin_unlock_irq(&nacl->device_list_lock); | ||
106 | } | ||
107 | |||
108 | /* __core_tpg_get_initiator_node_acl(): | ||
109 | * | ||
110 | * spin_lock_bh(&tpg->acl_node_lock); must be held when calling | ||
111 | */ | ||
112 | struct se_node_acl *__core_tpg_get_initiator_node_acl( | ||
113 | struct se_portal_group *tpg, | ||
114 | const char *initiatorname) | ||
115 | { | ||
116 | struct se_node_acl *acl; | ||
117 | |||
118 | list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { | ||
119 | if (!(strcmp(acl->initiatorname, initiatorname))) | ||
120 | return acl; | ||
121 | } | ||
122 | |||
123 | return NULL; | ||
124 | } | ||
125 | |||
126 | /* core_tpg_get_initiator_node_acl(): | ||
127 | * | ||
128 | * | ||
129 | */ | ||
130 | struct se_node_acl *core_tpg_get_initiator_node_acl( | ||
131 | struct se_portal_group *tpg, | ||
132 | unsigned char *initiatorname) | ||
133 | { | ||
134 | struct se_node_acl *acl; | ||
135 | |||
136 | spin_lock_bh(&tpg->acl_node_lock); | ||
137 | list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { | ||
138 | if (!(strcmp(acl->initiatorname, initiatorname)) && | ||
139 | (!(acl->dynamic_node_acl))) { | ||
140 | spin_unlock_bh(&tpg->acl_node_lock); | ||
141 | return acl; | ||
142 | } | ||
143 | } | ||
144 | spin_unlock_bh(&tpg->acl_node_lock); | ||
145 | |||
146 | return NULL; | ||
147 | } | ||
148 | |||
149 | /* core_tpg_add_node_to_devs(): | ||
150 | * | ||
151 | * | ||
152 | */ | ||
153 | void core_tpg_add_node_to_devs( | ||
154 | struct se_node_acl *acl, | ||
155 | struct se_portal_group *tpg) | ||
156 | { | ||
157 | int i = 0; | ||
158 | u32 lun_access = 0; | ||
159 | struct se_lun *lun; | ||
160 | struct se_device *dev; | ||
161 | |||
162 | spin_lock(&tpg->tpg_lun_lock); | ||
163 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | ||
164 | lun = &tpg->tpg_lun_list[i]; | ||
165 | if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) | ||
166 | continue; | ||
167 | |||
168 | spin_unlock(&tpg->tpg_lun_lock); | ||
169 | |||
170 | dev = lun->lun_se_dev; | ||
171 | /* | ||
172 | * By default in LIO-Target $FABRIC_MOD, | ||
173 | * demo_mode_write_protect is ON, or READ_ONLY; | ||
174 | */ | ||
175 | if (!(TPG_TFO(tpg)->tpg_check_demo_mode_write_protect(tpg))) { | ||
176 | if (dev->dev_flags & DF_READ_ONLY) | ||
177 | lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; | ||
178 | else | ||
179 | lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; | ||
180 | } else { | ||
181 | /* | ||
182 | * Allow only optical drives to issue R/W in default RO | ||
183 | * demo mode. | ||
184 | */ | ||
185 | if (TRANSPORT(dev)->get_device_type(dev) == TYPE_DISK) | ||
186 | lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; | ||
187 | else | ||
188 | lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; | ||
189 | } | ||
190 | |||
191 | printk(KERN_INFO "TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s" | ||
192 | " access for LUN in Demo Mode\n", | ||
193 | TPG_TFO(tpg)->get_fabric_name(), | ||
194 | TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun, | ||
195 | (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ? | ||
196 | "READ-WRITE" : "READ-ONLY"); | ||
197 | |||
198 | core_update_device_list_for_node(lun, NULL, lun->unpacked_lun, | ||
199 | lun_access, acl, tpg, 1); | ||
200 | spin_lock(&tpg->tpg_lun_lock); | ||
201 | } | ||
202 | spin_unlock(&tpg->tpg_lun_lock); | ||
203 | } | ||
204 | |||
205 | /* core_set_queue_depth_for_node(): | ||
206 | * | ||
207 | * | ||
208 | */ | ||
209 | static int core_set_queue_depth_for_node( | ||
210 | struct se_portal_group *tpg, | ||
211 | struct se_node_acl *acl) | ||
212 | { | ||
213 | if (!acl->queue_depth) { | ||
214 | printk(KERN_ERR "Queue depth for %s Initiator Node: %s is 0," | ||
215 | "defaulting to 1.\n", TPG_TFO(tpg)->get_fabric_name(), | ||
216 | acl->initiatorname); | ||
217 | acl->queue_depth = 1; | ||
218 | } | ||
219 | |||
220 | return 0; | ||
221 | } | ||
222 | |||
223 | /* core_create_device_list_for_node(): | ||
224 | * | ||
225 | * | ||
226 | */ | ||
227 | static int core_create_device_list_for_node(struct se_node_acl *nacl) | ||
228 | { | ||
229 | struct se_dev_entry *deve; | ||
230 | int i; | ||
231 | |||
232 | nacl->device_list = kzalloc(sizeof(struct se_dev_entry) * | ||
233 | TRANSPORT_MAX_LUNS_PER_TPG, GFP_KERNEL); | ||
234 | if (!(nacl->device_list)) { | ||
235 | printk(KERN_ERR "Unable to allocate memory for" | ||
236 | " struct se_node_acl->device_list\n"); | ||
237 | return -1; | ||
238 | } | ||
239 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | ||
240 | deve = &nacl->device_list[i]; | ||
241 | |||
242 | atomic_set(&deve->ua_count, 0); | ||
243 | atomic_set(&deve->pr_ref_count, 0); | ||
244 | spin_lock_init(&deve->ua_lock); | ||
245 | INIT_LIST_HEAD(&deve->alua_port_list); | ||
246 | INIT_LIST_HEAD(&deve->ua_list); | ||
247 | } | ||
248 | |||
249 | return 0; | ||
250 | } | ||
251 | |||
252 | /* core_tpg_check_initiator_node_acl() | ||
253 | * | ||
254 | * | ||
255 | */ | ||
256 | struct se_node_acl *core_tpg_check_initiator_node_acl( | ||
257 | struct se_portal_group *tpg, | ||
258 | unsigned char *initiatorname) | ||
259 | { | ||
260 | struct se_node_acl *acl; | ||
261 | |||
262 | acl = core_tpg_get_initiator_node_acl(tpg, initiatorname); | ||
263 | if ((acl)) | ||
264 | return acl; | ||
265 | |||
266 | if (!(TPG_TFO(tpg)->tpg_check_demo_mode(tpg))) | ||
267 | return NULL; | ||
268 | |||
269 | acl = TPG_TFO(tpg)->tpg_alloc_fabric_acl(tpg); | ||
270 | if (!(acl)) | ||
271 | return NULL; | ||
272 | |||
273 | INIT_LIST_HEAD(&acl->acl_list); | ||
274 | INIT_LIST_HEAD(&acl->acl_sess_list); | ||
275 | spin_lock_init(&acl->device_list_lock); | ||
276 | spin_lock_init(&acl->nacl_sess_lock); | ||
277 | atomic_set(&acl->acl_pr_ref_count, 0); | ||
278 | atomic_set(&acl->mib_ref_count, 0); | ||
279 | acl->queue_depth = TPG_TFO(tpg)->tpg_get_default_depth(tpg); | ||
280 | snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); | ||
281 | acl->se_tpg = tpg; | ||
282 | acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); | ||
283 | spin_lock_init(&acl->stats_lock); | ||
284 | acl->dynamic_node_acl = 1; | ||
285 | |||
286 | TPG_TFO(tpg)->set_default_node_attributes(acl); | ||
287 | |||
288 | if (core_create_device_list_for_node(acl) < 0) { | ||
289 | TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl); | ||
290 | return NULL; | ||
291 | } | ||
292 | |||
293 | if (core_set_queue_depth_for_node(tpg, acl) < 0) { | ||
294 | core_free_device_list_for_node(acl, tpg); | ||
295 | TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl); | ||
296 | return NULL; | ||
297 | } | ||
298 | |||
299 | core_tpg_add_node_to_devs(acl, tpg); | ||
300 | |||
301 | spin_lock_bh(&tpg->acl_node_lock); | ||
302 | list_add_tail(&acl->acl_list, &tpg->acl_node_list); | ||
303 | tpg->num_node_acls++; | ||
304 | spin_unlock_bh(&tpg->acl_node_lock); | ||
305 | |||
306 | printk("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s" | ||
307 | " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(), | ||
308 | TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth, | ||
309 | TPG_TFO(tpg)->get_fabric_name(), initiatorname); | ||
310 | |||
311 | return acl; | ||
312 | } | ||
313 | EXPORT_SYMBOL(core_tpg_check_initiator_node_acl); | ||
314 | |||
315 | void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl) | ||
316 | { | ||
317 | while (atomic_read(&nacl->acl_pr_ref_count) != 0) | ||
318 | cpu_relax(); | ||
319 | } | ||
320 | |||
321 | void core_tpg_wait_for_mib_ref(struct se_node_acl *nacl) | ||
322 | { | ||
323 | while (atomic_read(&nacl->mib_ref_count) != 0) | ||
324 | cpu_relax(); | ||
325 | } | ||
326 | |||
327 | void core_tpg_clear_object_luns(struct se_portal_group *tpg) | ||
328 | { | ||
329 | int i, ret; | ||
330 | struct se_lun *lun; | ||
331 | |||
332 | spin_lock(&tpg->tpg_lun_lock); | ||
333 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | ||
334 | lun = &tpg->tpg_lun_list[i]; | ||
335 | |||
336 | if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) || | ||
337 | (lun->lun_se_dev == NULL)) | ||
338 | continue; | ||
339 | |||
340 | spin_unlock(&tpg->tpg_lun_lock); | ||
341 | ret = core_dev_del_lun(tpg, lun->unpacked_lun); | ||
342 | spin_lock(&tpg->tpg_lun_lock); | ||
343 | } | ||
344 | spin_unlock(&tpg->tpg_lun_lock); | ||
345 | } | ||
346 | EXPORT_SYMBOL(core_tpg_clear_object_luns); | ||
347 | |||
348 | /* core_tpg_add_initiator_node_acl(): | ||
349 | * | ||
350 | * | ||
351 | */ | ||
352 | struct se_node_acl *core_tpg_add_initiator_node_acl( | ||
353 | struct se_portal_group *tpg, | ||
354 | struct se_node_acl *se_nacl, | ||
355 | const char *initiatorname, | ||
356 | u32 queue_depth) | ||
357 | { | ||
358 | struct se_node_acl *acl = NULL; | ||
359 | |||
360 | spin_lock_bh(&tpg->acl_node_lock); | ||
361 | acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); | ||
362 | if ((acl)) { | ||
363 | if (acl->dynamic_node_acl) { | ||
364 | acl->dynamic_node_acl = 0; | ||
365 | printk(KERN_INFO "%s_TPG[%u] - Replacing dynamic ACL" | ||
366 | " for %s\n", TPG_TFO(tpg)->get_fabric_name(), | ||
367 | TPG_TFO(tpg)->tpg_get_tag(tpg), initiatorname); | ||
368 | spin_unlock_bh(&tpg->acl_node_lock); | ||
369 | /* | ||
370 | * Release the locally allocated struct se_node_acl | ||
371 | * because * core_tpg_add_initiator_node_acl() returned | ||
372 | * a pointer to an existing demo mode node ACL. | ||
373 | */ | ||
374 | if (se_nacl) | ||
375 | TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, | ||
376 | se_nacl); | ||
377 | goto done; | ||
378 | } | ||
379 | |||
380 | printk(KERN_ERR "ACL entry for %s Initiator" | ||
381 | " Node %s already exists for TPG %u, ignoring" | ||
382 | " request.\n", TPG_TFO(tpg)->get_fabric_name(), | ||
383 | initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg)); | ||
384 | spin_unlock_bh(&tpg->acl_node_lock); | ||
385 | return ERR_PTR(-EEXIST); | ||
386 | } | ||
387 | spin_unlock_bh(&tpg->acl_node_lock); | ||
388 | |||
389 | if (!(se_nacl)) { | ||
390 | printk("struct se_node_acl pointer is NULL\n"); | ||
391 | return ERR_PTR(-EINVAL); | ||
392 | } | ||
393 | /* | ||
394 | * For v4.x logic the se_node_acl_s is hanging off a fabric | ||
395 | * dependent structure allocated via | ||
396 | * struct target_core_fabric_ops->fabric_make_nodeacl() | ||
397 | */ | ||
398 | acl = se_nacl; | ||
399 | |||
400 | INIT_LIST_HEAD(&acl->acl_list); | ||
401 | INIT_LIST_HEAD(&acl->acl_sess_list); | ||
402 | spin_lock_init(&acl->device_list_lock); | ||
403 | spin_lock_init(&acl->nacl_sess_lock); | ||
404 | atomic_set(&acl->acl_pr_ref_count, 0); | ||
405 | acl->queue_depth = queue_depth; | ||
406 | snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); | ||
407 | acl->se_tpg = tpg; | ||
408 | acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); | ||
409 | spin_lock_init(&acl->stats_lock); | ||
410 | |||
411 | TPG_TFO(tpg)->set_default_node_attributes(acl); | ||
412 | |||
413 | if (core_create_device_list_for_node(acl) < 0) { | ||
414 | TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl); | ||
415 | return ERR_PTR(-ENOMEM); | ||
416 | } | ||
417 | |||
418 | if (core_set_queue_depth_for_node(tpg, acl) < 0) { | ||
419 | core_free_device_list_for_node(acl, tpg); | ||
420 | TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl); | ||
421 | return ERR_PTR(-EINVAL); | ||
422 | } | ||
423 | |||
424 | spin_lock_bh(&tpg->acl_node_lock); | ||
425 | list_add_tail(&acl->acl_list, &tpg->acl_node_list); | ||
426 | tpg->num_node_acls++; | ||
427 | spin_unlock_bh(&tpg->acl_node_lock); | ||
428 | |||
429 | done: | ||
430 | printk(KERN_INFO "%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s" | ||
431 | " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(), | ||
432 | TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth, | ||
433 | TPG_TFO(tpg)->get_fabric_name(), initiatorname); | ||
434 | |||
435 | return acl; | ||
436 | } | ||
437 | EXPORT_SYMBOL(core_tpg_add_initiator_node_acl); | ||
438 | |||
439 | /* core_tpg_del_initiator_node_acl(): | ||
440 | * | ||
441 | * | ||
442 | */ | ||
443 | int core_tpg_del_initiator_node_acl( | ||
444 | struct se_portal_group *tpg, | ||
445 | struct se_node_acl *acl, | ||
446 | int force) | ||
447 | { | ||
448 | struct se_session *sess, *sess_tmp; | ||
449 | int dynamic_acl = 0; | ||
450 | |||
451 | spin_lock_bh(&tpg->acl_node_lock); | ||
452 | if (acl->dynamic_node_acl) { | ||
453 | acl->dynamic_node_acl = 0; | ||
454 | dynamic_acl = 1; | ||
455 | } | ||
456 | list_del(&acl->acl_list); | ||
457 | tpg->num_node_acls--; | ||
458 | spin_unlock_bh(&tpg->acl_node_lock); | ||
459 | |||
460 | spin_lock_bh(&tpg->session_lock); | ||
461 | list_for_each_entry_safe(sess, sess_tmp, | ||
462 | &tpg->tpg_sess_list, sess_list) { | ||
463 | if (sess->se_node_acl != acl) | ||
464 | continue; | ||
465 | /* | ||
466 | * Determine if the session needs to be closed by our context. | ||
467 | */ | ||
468 | if (!(TPG_TFO(tpg)->shutdown_session(sess))) | ||
469 | continue; | ||
470 | |||
471 | spin_unlock_bh(&tpg->session_lock); | ||
472 | /* | ||
473 | * If the $FABRIC_MOD session for the Initiator Node ACL exists, | ||
474 | * forcefully shutdown the $FABRIC_MOD session/nexus. | ||
475 | */ | ||
476 | TPG_TFO(tpg)->close_session(sess); | ||
477 | |||
478 | spin_lock_bh(&tpg->session_lock); | ||
479 | } | ||
480 | spin_unlock_bh(&tpg->session_lock); | ||
481 | |||
482 | core_tpg_wait_for_nacl_pr_ref(acl); | ||
483 | core_tpg_wait_for_mib_ref(acl); | ||
484 | core_clear_initiator_node_from_tpg(acl, tpg); | ||
485 | core_free_device_list_for_node(acl, tpg); | ||
486 | |||
487 | printk(KERN_INFO "%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s" | ||
488 | " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(), | ||
489 | TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth, | ||
490 | TPG_TFO(tpg)->get_fabric_name(), acl->initiatorname); | ||
491 | |||
492 | return 0; | ||
493 | } | ||
494 | EXPORT_SYMBOL(core_tpg_del_initiator_node_acl); | ||
495 | |||
496 | /* core_tpg_set_initiator_node_queue_depth(): | ||
497 | * | ||
498 | * | ||
499 | */ | ||
500 | int core_tpg_set_initiator_node_queue_depth( | ||
501 | struct se_portal_group *tpg, | ||
502 | unsigned char *initiatorname, | ||
503 | u32 queue_depth, | ||
504 | int force) | ||
505 | { | ||
506 | struct se_session *sess, *init_sess = NULL; | ||
507 | struct se_node_acl *acl; | ||
508 | int dynamic_acl = 0; | ||
509 | |||
510 | spin_lock_bh(&tpg->acl_node_lock); | ||
511 | acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); | ||
512 | if (!(acl)) { | ||
513 | printk(KERN_ERR "Access Control List entry for %s Initiator" | ||
514 | " Node %s does not exists for TPG %hu, ignoring" | ||
515 | " request.\n", TPG_TFO(tpg)->get_fabric_name(), | ||
516 | initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg)); | ||
517 | spin_unlock_bh(&tpg->acl_node_lock); | ||
518 | return -ENODEV; | ||
519 | } | ||
520 | if (acl->dynamic_node_acl) { | ||
521 | acl->dynamic_node_acl = 0; | ||
522 | dynamic_acl = 1; | ||
523 | } | ||
524 | spin_unlock_bh(&tpg->acl_node_lock); | ||
525 | |||
526 | spin_lock_bh(&tpg->session_lock); | ||
527 | list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) { | ||
528 | if (sess->se_node_acl != acl) | ||
529 | continue; | ||
530 | |||
531 | if (!force) { | ||
532 | printk(KERN_ERR "Unable to change queue depth for %s" | ||
533 | " Initiator Node: %s while session is" | ||
534 | " operational. To forcefully change the queue" | ||
535 | " depth and force session reinstatement" | ||
536 | " use the \"force=1\" parameter.\n", | ||
537 | TPG_TFO(tpg)->get_fabric_name(), initiatorname); | ||
538 | spin_unlock_bh(&tpg->session_lock); | ||
539 | |||
540 | spin_lock_bh(&tpg->acl_node_lock); | ||
541 | if (dynamic_acl) | ||
542 | acl->dynamic_node_acl = 1; | ||
543 | spin_unlock_bh(&tpg->acl_node_lock); | ||
544 | return -EEXIST; | ||
545 | } | ||
546 | /* | ||
547 | * Determine if the session needs to be closed by our context. | ||
548 | */ | ||
549 | if (!(TPG_TFO(tpg)->shutdown_session(sess))) | ||
550 | continue; | ||
551 | |||
552 | init_sess = sess; | ||
553 | break; | ||
554 | } | ||
555 | |||
556 | /* | ||
557 | * User has requested to change the queue depth for a Initiator Node. | ||
558 | * Change the value in the Node's struct se_node_acl, and call | ||
559 | * core_set_queue_depth_for_node() to add the requested queue depth. | ||
560 | * | ||
561 | * Finally call TPG_TFO(tpg)->close_session() to force session | ||
562 | * reinstatement to occur if there is an active session for the | ||
563 | * $FABRIC_MOD Initiator Node in question. | ||
564 | */ | ||
565 | acl->queue_depth = queue_depth; | ||
566 | |||
567 | if (core_set_queue_depth_for_node(tpg, acl) < 0) { | ||
568 | spin_unlock_bh(&tpg->session_lock); | ||
569 | /* | ||
570 | * Force session reinstatement if | ||
571 | * core_set_queue_depth_for_node() failed, because we assume | ||
572 | * the $FABRIC_MOD has already the set session reinstatement | ||
573 | * bit from TPG_TFO(tpg)->shutdown_session() called above. | ||
574 | */ | ||
575 | if (init_sess) | ||
576 | TPG_TFO(tpg)->close_session(init_sess); | ||
577 | |||
578 | spin_lock_bh(&tpg->acl_node_lock); | ||
579 | if (dynamic_acl) | ||
580 | acl->dynamic_node_acl = 1; | ||
581 | spin_unlock_bh(&tpg->acl_node_lock); | ||
582 | return -EINVAL; | ||
583 | } | ||
584 | spin_unlock_bh(&tpg->session_lock); | ||
585 | /* | ||
586 | * If the $FABRIC_MOD session for the Initiator Node ACL exists, | ||
587 | * forcefully shutdown the $FABRIC_MOD session/nexus. | ||
588 | */ | ||
589 | if (init_sess) | ||
590 | TPG_TFO(tpg)->close_session(init_sess); | ||
591 | |||
592 | printk(KERN_INFO "Successfuly changed queue depth to: %d for Initiator" | ||
593 | " Node: %s on %s Target Portal Group: %u\n", queue_depth, | ||
594 | initiatorname, TPG_TFO(tpg)->get_fabric_name(), | ||
595 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | ||
596 | |||
597 | spin_lock_bh(&tpg->acl_node_lock); | ||
598 | if (dynamic_acl) | ||
599 | acl->dynamic_node_acl = 1; | ||
600 | spin_unlock_bh(&tpg->acl_node_lock); | ||
601 | |||
602 | return 0; | ||
603 | } | ||
604 | EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth); | ||
605 | |||
606 | static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg) | ||
607 | { | ||
608 | /* Set in core_dev_setup_virtual_lun0() */ | ||
609 | struct se_device *dev = se_global->g_lun0_dev; | ||
610 | struct se_lun *lun = &se_tpg->tpg_virt_lun0; | ||
611 | u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; | ||
612 | int ret; | ||
613 | |||
614 | lun->unpacked_lun = 0; | ||
615 | lun->lun_status = TRANSPORT_LUN_STATUS_FREE; | ||
616 | atomic_set(&lun->lun_acl_count, 0); | ||
617 | init_completion(&lun->lun_shutdown_comp); | ||
618 | INIT_LIST_HEAD(&lun->lun_acl_list); | ||
619 | INIT_LIST_HEAD(&lun->lun_cmd_list); | ||
620 | spin_lock_init(&lun->lun_acl_lock); | ||
621 | spin_lock_init(&lun->lun_cmd_lock); | ||
622 | spin_lock_init(&lun->lun_sep_lock); | ||
623 | |||
624 | ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev); | ||
625 | if (ret < 0) | ||
626 | return -1; | ||
627 | |||
628 | return 0; | ||
629 | } | ||
630 | |||
631 | static void core_tpg_release_virtual_lun0(struct se_portal_group *se_tpg) | ||
632 | { | ||
633 | struct se_lun *lun = &se_tpg->tpg_virt_lun0; | ||
634 | |||
635 | core_tpg_post_dellun(se_tpg, lun); | ||
636 | } | ||
637 | |||
638 | int core_tpg_register( | ||
639 | struct target_core_fabric_ops *tfo, | ||
640 | struct se_wwn *se_wwn, | ||
641 | struct se_portal_group *se_tpg, | ||
642 | void *tpg_fabric_ptr, | ||
643 | int se_tpg_type) | ||
644 | { | ||
645 | struct se_lun *lun; | ||
646 | u32 i; | ||
647 | |||
648 | se_tpg->tpg_lun_list = kzalloc((sizeof(struct se_lun) * | ||
649 | TRANSPORT_MAX_LUNS_PER_TPG), GFP_KERNEL); | ||
650 | if (!(se_tpg->tpg_lun_list)) { | ||
651 | printk(KERN_ERR "Unable to allocate struct se_portal_group->" | ||
652 | "tpg_lun_list\n"); | ||
653 | return -ENOMEM; | ||
654 | } | ||
655 | |||
656 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | ||
657 | lun = &se_tpg->tpg_lun_list[i]; | ||
658 | lun->unpacked_lun = i; | ||
659 | lun->lun_status = TRANSPORT_LUN_STATUS_FREE; | ||
660 | atomic_set(&lun->lun_acl_count, 0); | ||
661 | init_completion(&lun->lun_shutdown_comp); | ||
662 | INIT_LIST_HEAD(&lun->lun_acl_list); | ||
663 | INIT_LIST_HEAD(&lun->lun_cmd_list); | ||
664 | spin_lock_init(&lun->lun_acl_lock); | ||
665 | spin_lock_init(&lun->lun_cmd_lock); | ||
666 | spin_lock_init(&lun->lun_sep_lock); | ||
667 | } | ||
668 | |||
669 | se_tpg->se_tpg_type = se_tpg_type; | ||
670 | se_tpg->se_tpg_fabric_ptr = tpg_fabric_ptr; | ||
671 | se_tpg->se_tpg_tfo = tfo; | ||
672 | se_tpg->se_tpg_wwn = se_wwn; | ||
673 | atomic_set(&se_tpg->tpg_pr_ref_count, 0); | ||
674 | INIT_LIST_HEAD(&se_tpg->acl_node_list); | ||
675 | INIT_LIST_HEAD(&se_tpg->se_tpg_list); | ||
676 | INIT_LIST_HEAD(&se_tpg->tpg_sess_list); | ||
677 | spin_lock_init(&se_tpg->acl_node_lock); | ||
678 | spin_lock_init(&se_tpg->session_lock); | ||
679 | spin_lock_init(&se_tpg->tpg_lun_lock); | ||
680 | |||
681 | if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) { | ||
682 | if (core_tpg_setup_virtual_lun0(se_tpg) < 0) { | ||
683 | kfree(se_tpg); | ||
684 | return -ENOMEM; | ||
685 | } | ||
686 | } | ||
687 | |||
688 | spin_lock_bh(&se_global->se_tpg_lock); | ||
689 | list_add_tail(&se_tpg->se_tpg_list, &se_global->g_se_tpg_list); | ||
690 | spin_unlock_bh(&se_global->se_tpg_lock); | ||
691 | |||
692 | printk(KERN_INFO "TARGET_CORE[%s]: Allocated %s struct se_portal_group for" | ||
693 | " endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(), | ||
694 | (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ? | ||
695 | "Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ? | ||
696 | "None" : tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg)); | ||
697 | |||
698 | return 0; | ||
699 | } | ||
700 | EXPORT_SYMBOL(core_tpg_register); | ||
701 | |||
702 | int core_tpg_deregister(struct se_portal_group *se_tpg) | ||
703 | { | ||
704 | printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group" | ||
705 | " for endpoint: %s Portal Tag %u\n", | ||
706 | (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ? | ||
707 | "Normal" : "Discovery", TPG_TFO(se_tpg)->get_fabric_name(), | ||
708 | TPG_TFO(se_tpg)->tpg_get_wwn(se_tpg), | ||
709 | TPG_TFO(se_tpg)->tpg_get_tag(se_tpg)); | ||
710 | |||
711 | spin_lock_bh(&se_global->se_tpg_lock); | ||
712 | list_del(&se_tpg->se_tpg_list); | ||
713 | spin_unlock_bh(&se_global->se_tpg_lock); | ||
714 | |||
715 | while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0) | ||
716 | cpu_relax(); | ||
717 | |||
718 | if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) | ||
719 | core_tpg_release_virtual_lun0(se_tpg); | ||
720 | |||
721 | se_tpg->se_tpg_fabric_ptr = NULL; | ||
722 | kfree(se_tpg->tpg_lun_list); | ||
723 | return 0; | ||
724 | } | ||
725 | EXPORT_SYMBOL(core_tpg_deregister); | ||
726 | |||
727 | struct se_lun *core_tpg_pre_addlun( | ||
728 | struct se_portal_group *tpg, | ||
729 | u32 unpacked_lun) | ||
730 | { | ||
731 | struct se_lun *lun; | ||
732 | |||
733 | if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { | ||
734 | printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG" | ||
735 | "-1: %u for Target Portal Group: %u\n", | ||
736 | TPG_TFO(tpg)->get_fabric_name(), | ||
737 | unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1, | ||
738 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | ||
739 | return ERR_PTR(-EOVERFLOW); | ||
740 | } | ||
741 | |||
742 | spin_lock(&tpg->tpg_lun_lock); | ||
743 | lun = &tpg->tpg_lun_list[unpacked_lun]; | ||
744 | if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) { | ||
745 | printk(KERN_ERR "TPG Logical Unit Number: %u is already active" | ||
746 | " on %s Target Portal Group: %u, ignoring request.\n", | ||
747 | unpacked_lun, TPG_TFO(tpg)->get_fabric_name(), | ||
748 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | ||
749 | spin_unlock(&tpg->tpg_lun_lock); | ||
750 | return ERR_PTR(-EINVAL); | ||
751 | } | ||
752 | spin_unlock(&tpg->tpg_lun_lock); | ||
753 | |||
754 | return lun; | ||
755 | } | ||
756 | |||
757 | int core_tpg_post_addlun( | ||
758 | struct se_portal_group *tpg, | ||
759 | struct se_lun *lun, | ||
760 | u32 lun_access, | ||
761 | void *lun_ptr) | ||
762 | { | ||
763 | if (core_dev_export(lun_ptr, tpg, lun) < 0) | ||
764 | return -1; | ||
765 | |||
766 | spin_lock(&tpg->tpg_lun_lock); | ||
767 | lun->lun_access = lun_access; | ||
768 | lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE; | ||
769 | spin_unlock(&tpg->tpg_lun_lock); | ||
770 | |||
771 | return 0; | ||
772 | } | ||
773 | |||
774 | static void core_tpg_shutdown_lun( | ||
775 | struct se_portal_group *tpg, | ||
776 | struct se_lun *lun) | ||
777 | { | ||
778 | core_clear_lun_from_tpg(lun, tpg); | ||
779 | transport_clear_lun_from_sessions(lun); | ||
780 | } | ||
781 | |||
782 | struct se_lun *core_tpg_pre_dellun( | ||
783 | struct se_portal_group *tpg, | ||
784 | u32 unpacked_lun, | ||
785 | int *ret) | ||
786 | { | ||
787 | struct se_lun *lun; | ||
788 | |||
789 | if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { | ||
790 | printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG" | ||
791 | "-1: %u for Target Portal Group: %u\n", | ||
792 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, | ||
793 | TRANSPORT_MAX_LUNS_PER_TPG-1, | ||
794 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | ||
795 | return ERR_PTR(-EOVERFLOW); | ||
796 | } | ||
797 | |||
798 | spin_lock(&tpg->tpg_lun_lock); | ||
799 | lun = &tpg->tpg_lun_list[unpacked_lun]; | ||
800 | if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { | ||
801 | printk(KERN_ERR "%s Logical Unit Number: %u is not active on" | ||
802 | " Target Portal Group: %u, ignoring request.\n", | ||
803 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, | ||
804 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | ||
805 | spin_unlock(&tpg->tpg_lun_lock); | ||
806 | return ERR_PTR(-ENODEV); | ||
807 | } | ||
808 | spin_unlock(&tpg->tpg_lun_lock); | ||
809 | |||
810 | return lun; | ||
811 | } | ||
812 | |||
813 | int core_tpg_post_dellun( | ||
814 | struct se_portal_group *tpg, | ||
815 | struct se_lun *lun) | ||
816 | { | ||
817 | core_tpg_shutdown_lun(tpg, lun); | ||
818 | |||
819 | core_dev_unexport(lun->lun_se_dev, tpg, lun); | ||
820 | |||
821 | spin_lock(&tpg->tpg_lun_lock); | ||
822 | lun->lun_status = TRANSPORT_LUN_STATUS_FREE; | ||
823 | spin_unlock(&tpg->tpg_lun_lock); | ||
824 | |||
825 | return 0; | ||
826 | } | ||
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c new file mode 100644 index 000000000000..28b6292ff298 --- /dev/null +++ b/drivers/target/target_core_transport.c | |||
@@ -0,0 +1,6134 @@ | |||
1 | /******************************************************************************* | ||
2 | * Filename: target_core_transport.c | ||
3 | * | ||
4 | * This file contains the Generic Target Engine Core. | ||
5 | * | ||
6 | * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc. | ||
7 | * Copyright (c) 2005, 2006, 2007 SBE, Inc. | ||
8 | * Copyright (c) 2007-2010 Rising Tide Systems | ||
9 | * Copyright (c) 2008-2010 Linux-iSCSI.org | ||
10 | * | ||
11 | * Nicholas A. Bellinger <nab@kernel.org> | ||
12 | * | ||
13 | * This program is free software; you can redistribute it and/or modify | ||
14 | * it under the terms of the GNU General Public License as published by | ||
15 | * the Free Software Foundation; either version 2 of the License, or | ||
16 | * (at your option) any later version. | ||
17 | * | ||
18 | * This program is distributed in the hope that it will be useful, | ||
19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
21 | * GNU General Public License for more details. | ||
22 | * | ||
23 | * You should have received a copy of the GNU General Public License | ||
24 | * along with this program; if not, write to the Free Software | ||
25 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
26 | * | ||
27 | ******************************************************************************/ | ||
28 | |||
29 | #include <linux/version.h> | ||
30 | #include <linux/net.h> | ||
31 | #include <linux/delay.h> | ||
32 | #include <linux/string.h> | ||
33 | #include <linux/timer.h> | ||
34 | #include <linux/slab.h> | ||
35 | #include <linux/blkdev.h> | ||
36 | #include <linux/spinlock.h> | ||
37 | #include <linux/smp_lock.h> | ||
38 | #include <linux/kthread.h> | ||
39 | #include <linux/in.h> | ||
40 | #include <linux/cdrom.h> | ||
41 | #include <asm/unaligned.h> | ||
42 | #include <net/sock.h> | ||
43 | #include <net/tcp.h> | ||
44 | #include <scsi/scsi.h> | ||
45 | #include <scsi/scsi_cmnd.h> | ||
46 | #include <scsi/libsas.h> /* For TASK_ATTR_* */ | ||
47 | |||
48 | #include <target/target_core_base.h> | ||
49 | #include <target/target_core_device.h> | ||
50 | #include <target/target_core_tmr.h> | ||
51 | #include <target/target_core_tpg.h> | ||
52 | #include <target/target_core_transport.h> | ||
53 | #include <target/target_core_fabric_ops.h> | ||
54 | #include <target/target_core_configfs.h> | ||
55 | |||
56 | #include "target_core_alua.h" | ||
57 | #include "target_core_hba.h" | ||
58 | #include "target_core_pr.h" | ||
59 | #include "target_core_scdb.h" | ||
60 | #include "target_core_ua.h" | ||
61 | |||
62 | /* #define DEBUG_CDB_HANDLER */ | ||
63 | #ifdef DEBUG_CDB_HANDLER | ||
64 | #define DEBUG_CDB_H(x...) printk(KERN_INFO x) | ||
65 | #else | ||
66 | #define DEBUG_CDB_H(x...) | ||
67 | #endif | ||
68 | |||
69 | /* #define DEBUG_CMD_MAP */ | ||
70 | #ifdef DEBUG_CMD_MAP | ||
71 | #define DEBUG_CMD_M(x...) printk(KERN_INFO x) | ||
72 | #else | ||
73 | #define DEBUG_CMD_M(x...) | ||
74 | #endif | ||
75 | |||
76 | /* #define DEBUG_MEM_ALLOC */ | ||
77 | #ifdef DEBUG_MEM_ALLOC | ||
78 | #define DEBUG_MEM(x...) printk(KERN_INFO x) | ||
79 | #else | ||
80 | #define DEBUG_MEM(x...) | ||
81 | #endif | ||
82 | |||
83 | /* #define DEBUG_MEM2_ALLOC */ | ||
84 | #ifdef DEBUG_MEM2_ALLOC | ||
85 | #define DEBUG_MEM2(x...) printk(KERN_INFO x) | ||
86 | #else | ||
87 | #define DEBUG_MEM2(x...) | ||
88 | #endif | ||
89 | |||
90 | /* #define DEBUG_SG_CALC */ | ||
91 | #ifdef DEBUG_SG_CALC | ||
92 | #define DEBUG_SC(x...) printk(KERN_INFO x) | ||
93 | #else | ||
94 | #define DEBUG_SC(x...) | ||
95 | #endif | ||
96 | |||
97 | /* #define DEBUG_SE_OBJ */ | ||
98 | #ifdef DEBUG_SE_OBJ | ||
99 | #define DEBUG_SO(x...) printk(KERN_INFO x) | ||
100 | #else | ||
101 | #define DEBUG_SO(x...) | ||
102 | #endif | ||
103 | |||
104 | /* #define DEBUG_CMD_VOL */ | ||
105 | #ifdef DEBUG_CMD_VOL | ||
106 | #define DEBUG_VOL(x...) printk(KERN_INFO x) | ||
107 | #else | ||
108 | #define DEBUG_VOL(x...) | ||
109 | #endif | ||
110 | |||
111 | /* #define DEBUG_CMD_STOP */ | ||
112 | #ifdef DEBUG_CMD_STOP | ||
113 | #define DEBUG_CS(x...) printk(KERN_INFO x) | ||
114 | #else | ||
115 | #define DEBUG_CS(x...) | ||
116 | #endif | ||
117 | |||
118 | /* #define DEBUG_PASSTHROUGH */ | ||
119 | #ifdef DEBUG_PASSTHROUGH | ||
120 | #define DEBUG_PT(x...) printk(KERN_INFO x) | ||
121 | #else | ||
122 | #define DEBUG_PT(x...) | ||
123 | #endif | ||
124 | |||
125 | /* #define DEBUG_TASK_STOP */ | ||
126 | #ifdef DEBUG_TASK_STOP | ||
127 | #define DEBUG_TS(x...) printk(KERN_INFO x) | ||
128 | #else | ||
129 | #define DEBUG_TS(x...) | ||
130 | #endif | ||
131 | |||
132 | /* #define DEBUG_TRANSPORT_STOP */ | ||
133 | #ifdef DEBUG_TRANSPORT_STOP | ||
134 | #define DEBUG_TRANSPORT_S(x...) printk(KERN_INFO x) | ||
135 | #else | ||
136 | #define DEBUG_TRANSPORT_S(x...) | ||
137 | #endif | ||
138 | |||
139 | /* #define DEBUG_TASK_FAILURE */ | ||
140 | #ifdef DEBUG_TASK_FAILURE | ||
141 | #define DEBUG_TF(x...) printk(KERN_INFO x) | ||
142 | #else | ||
143 | #define DEBUG_TF(x...) | ||
144 | #endif | ||
145 | |||
146 | /* #define DEBUG_DEV_OFFLINE */ | ||
147 | #ifdef DEBUG_DEV_OFFLINE | ||
148 | #define DEBUG_DO(x...) printk(KERN_INFO x) | ||
149 | #else | ||
150 | #define DEBUG_DO(x...) | ||
151 | #endif | ||
152 | |||
153 | /* #define DEBUG_TASK_STATE */ | ||
154 | #ifdef DEBUG_TASK_STATE | ||
155 | #define DEBUG_TSTATE(x...) printk(KERN_INFO x) | ||
156 | #else | ||
157 | #define DEBUG_TSTATE(x...) | ||
158 | #endif | ||
159 | |||
160 | /* #define DEBUG_STATUS_THR */ | ||
161 | #ifdef DEBUG_STATUS_THR | ||
162 | #define DEBUG_ST(x...) printk(KERN_INFO x) | ||
163 | #else | ||
164 | #define DEBUG_ST(x...) | ||
165 | #endif | ||
166 | |||
167 | /* #define DEBUG_TASK_TIMEOUT */ | ||
168 | #ifdef DEBUG_TASK_TIMEOUT | ||
169 | #define DEBUG_TT(x...) printk(KERN_INFO x) | ||
170 | #else | ||
171 | #define DEBUG_TT(x...) | ||
172 | #endif | ||
173 | |||
174 | /* #define DEBUG_GENERIC_REQUEST_FAILURE */ | ||
175 | #ifdef DEBUG_GENERIC_REQUEST_FAILURE | ||
176 | #define DEBUG_GRF(x...) printk(KERN_INFO x) | ||
177 | #else | ||
178 | #define DEBUG_GRF(x...) | ||
179 | #endif | ||
180 | |||
181 | /* #define DEBUG_SAM_TASK_ATTRS */ | ||
182 | #ifdef DEBUG_SAM_TASK_ATTRS | ||
183 | #define DEBUG_STA(x...) printk(KERN_INFO x) | ||
184 | #else | ||
185 | #define DEBUG_STA(x...) | ||
186 | #endif | ||
187 | |||
188 | struct se_global *se_global; | ||
189 | |||
190 | static struct kmem_cache *se_cmd_cache; | ||
191 | static struct kmem_cache *se_sess_cache; | ||
192 | struct kmem_cache *se_tmr_req_cache; | ||
193 | struct kmem_cache *se_ua_cache; | ||
194 | struct kmem_cache *se_mem_cache; | ||
195 | struct kmem_cache *t10_pr_reg_cache; | ||
196 | struct kmem_cache *t10_alua_lu_gp_cache; | ||
197 | struct kmem_cache *t10_alua_lu_gp_mem_cache; | ||
198 | struct kmem_cache *t10_alua_tg_pt_gp_cache; | ||
199 | struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; | ||
200 | |||
201 | /* Used for transport_dev_get_map_*() */ | ||
202 | typedef int (*map_func_t)(struct se_task *, u32); | ||
203 | |||
204 | static int transport_generic_write_pending(struct se_cmd *); | ||
205 | static int transport_processing_thread(void *); | ||
206 | static int __transport_execute_tasks(struct se_device *dev); | ||
207 | static void transport_complete_task_attr(struct se_cmd *cmd); | ||
208 | static void transport_direct_request_timeout(struct se_cmd *cmd); | ||
209 | static void transport_free_dev_tasks(struct se_cmd *cmd); | ||
210 | static u32 transport_generic_get_cdb_count(struct se_cmd *cmd, | ||
211 | unsigned long long starting_lba, u32 sectors, | ||
212 | enum dma_data_direction data_direction, | ||
213 | struct list_head *mem_list, int set_counts); | ||
214 | static int transport_generic_get_mem(struct se_cmd *cmd, u32 length, | ||
215 | u32 dma_size); | ||
216 | static int transport_generic_remove(struct se_cmd *cmd, | ||
217 | int release_to_pool, int session_reinstatement); | ||
218 | static int transport_get_sectors(struct se_cmd *cmd); | ||
219 | static struct list_head *transport_init_se_mem_list(void); | ||
220 | static int transport_map_sg_to_mem(struct se_cmd *cmd, | ||
221 | struct list_head *se_mem_list, void *in_mem, | ||
222 | u32 *se_mem_cnt); | ||
223 | static void transport_memcpy_se_mem_read_contig(struct se_cmd *cmd, | ||
224 | unsigned char *dst, struct list_head *se_mem_list); | ||
225 | static void transport_release_fe_cmd(struct se_cmd *cmd); | ||
226 | static void transport_remove_cmd_from_queue(struct se_cmd *cmd, | ||
227 | struct se_queue_obj *qobj); | ||
228 | static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); | ||
229 | static void transport_stop_all_task_timers(struct se_cmd *cmd); | ||
230 | |||
231 | int transport_emulate_control_cdb(struct se_task *task); | ||
232 | |||
233 | int init_se_global(void) | ||
234 | { | ||
235 | struct se_global *global; | ||
236 | |||
237 | global = kzalloc(sizeof(struct se_global), GFP_KERNEL); | ||
238 | if (!(global)) { | ||
239 | printk(KERN_ERR "Unable to allocate memory for struct se_global\n"); | ||
240 | return -1; | ||
241 | } | ||
242 | |||
243 | INIT_LIST_HEAD(&global->g_lu_gps_list); | ||
244 | INIT_LIST_HEAD(&global->g_se_tpg_list); | ||
245 | INIT_LIST_HEAD(&global->g_hba_list); | ||
246 | INIT_LIST_HEAD(&global->g_se_dev_list); | ||
247 | spin_lock_init(&global->g_device_lock); | ||
248 | spin_lock_init(&global->hba_lock); | ||
249 | spin_lock_init(&global->se_tpg_lock); | ||
250 | spin_lock_init(&global->lu_gps_lock); | ||
251 | spin_lock_init(&global->plugin_class_lock); | ||
252 | |||
253 | se_cmd_cache = kmem_cache_create("se_cmd_cache", | ||
254 | sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL); | ||
255 | if (!(se_cmd_cache)) { | ||
256 | printk(KERN_ERR "kmem_cache_create for struct se_cmd failed\n"); | ||
257 | goto out; | ||
258 | } | ||
259 | se_tmr_req_cache = kmem_cache_create("se_tmr_cache", | ||
260 | sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req), | ||
261 | 0, NULL); | ||
262 | if (!(se_tmr_req_cache)) { | ||
263 | printk(KERN_ERR "kmem_cache_create() for struct se_tmr_req" | ||
264 | " failed\n"); | ||
265 | goto out; | ||
266 | } | ||
267 | se_sess_cache = kmem_cache_create("se_sess_cache", | ||
268 | sizeof(struct se_session), __alignof__(struct se_session), | ||
269 | 0, NULL); | ||
270 | if (!(se_sess_cache)) { | ||
271 | printk(KERN_ERR "kmem_cache_create() for struct se_session" | ||
272 | " failed\n"); | ||
273 | goto out; | ||
274 | } | ||
275 | se_ua_cache = kmem_cache_create("se_ua_cache", | ||
276 | sizeof(struct se_ua), __alignof__(struct se_ua), | ||
277 | 0, NULL); | ||
278 | if (!(se_ua_cache)) { | ||
279 | printk(KERN_ERR "kmem_cache_create() for struct se_ua failed\n"); | ||
280 | goto out; | ||
281 | } | ||
282 | se_mem_cache = kmem_cache_create("se_mem_cache", | ||
283 | sizeof(struct se_mem), __alignof__(struct se_mem), 0, NULL); | ||
284 | if (!(se_mem_cache)) { | ||
285 | printk(KERN_ERR "kmem_cache_create() for struct se_mem failed\n"); | ||
286 | goto out; | ||
287 | } | ||
288 | t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", | ||
289 | sizeof(struct t10_pr_registration), | ||
290 | __alignof__(struct t10_pr_registration), 0, NULL); | ||
291 | if (!(t10_pr_reg_cache)) { | ||
292 | printk(KERN_ERR "kmem_cache_create() for struct t10_pr_registration" | ||
293 | " failed\n"); | ||
294 | goto out; | ||
295 | } | ||
296 | t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", | ||
297 | sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), | ||
298 | 0, NULL); | ||
299 | if (!(t10_alua_lu_gp_cache)) { | ||
300 | printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_cache" | ||
301 | " failed\n"); | ||
302 | goto out; | ||
303 | } | ||
304 | t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", | ||
305 | sizeof(struct t10_alua_lu_gp_member), | ||
306 | __alignof__(struct t10_alua_lu_gp_member), 0, NULL); | ||
307 | if (!(t10_alua_lu_gp_mem_cache)) { | ||
308 | printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_mem_" | ||
309 | "cache failed\n"); | ||
310 | goto out; | ||
311 | } | ||
312 | t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", | ||
313 | sizeof(struct t10_alua_tg_pt_gp), | ||
314 | __alignof__(struct t10_alua_tg_pt_gp), 0, NULL); | ||
315 | if (!(t10_alua_tg_pt_gp_cache)) { | ||
316 | printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_" | ||
317 | "cache failed\n"); | ||
318 | goto out; | ||
319 | } | ||
320 | t10_alua_tg_pt_gp_mem_cache = kmem_cache_create( | ||
321 | "t10_alua_tg_pt_gp_mem_cache", | ||
322 | sizeof(struct t10_alua_tg_pt_gp_member), | ||
323 | __alignof__(struct t10_alua_tg_pt_gp_member), | ||
324 | 0, NULL); | ||
325 | if (!(t10_alua_tg_pt_gp_mem_cache)) { | ||
326 | printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_" | ||
327 | "mem_t failed\n"); | ||
328 | goto out; | ||
329 | } | ||
330 | |||
331 | se_global = global; | ||
332 | |||
333 | return 0; | ||
334 | out: | ||
335 | if (se_cmd_cache) | ||
336 | kmem_cache_destroy(se_cmd_cache); | ||
337 | if (se_tmr_req_cache) | ||
338 | kmem_cache_destroy(se_tmr_req_cache); | ||
339 | if (se_sess_cache) | ||
340 | kmem_cache_destroy(se_sess_cache); | ||
341 | if (se_ua_cache) | ||
342 | kmem_cache_destroy(se_ua_cache); | ||
343 | if (se_mem_cache) | ||
344 | kmem_cache_destroy(se_mem_cache); | ||
345 | if (t10_pr_reg_cache) | ||
346 | kmem_cache_destroy(t10_pr_reg_cache); | ||
347 | if (t10_alua_lu_gp_cache) | ||
348 | kmem_cache_destroy(t10_alua_lu_gp_cache); | ||
349 | if (t10_alua_lu_gp_mem_cache) | ||
350 | kmem_cache_destroy(t10_alua_lu_gp_mem_cache); | ||
351 | if (t10_alua_tg_pt_gp_cache) | ||
352 | kmem_cache_destroy(t10_alua_tg_pt_gp_cache); | ||
353 | if (t10_alua_tg_pt_gp_mem_cache) | ||
354 | kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); | ||
355 | kfree(global); | ||
356 | return -1; | ||
357 | } | ||
358 | |||
359 | void release_se_global(void) | ||
360 | { | ||
361 | struct se_global *global; | ||
362 | |||
363 | global = se_global; | ||
364 | if (!(global)) | ||
365 | return; | ||
366 | |||
367 | kmem_cache_destroy(se_cmd_cache); | ||
368 | kmem_cache_destroy(se_tmr_req_cache); | ||
369 | kmem_cache_destroy(se_sess_cache); | ||
370 | kmem_cache_destroy(se_ua_cache); | ||
371 | kmem_cache_destroy(se_mem_cache); | ||
372 | kmem_cache_destroy(t10_pr_reg_cache); | ||
373 | kmem_cache_destroy(t10_alua_lu_gp_cache); | ||
374 | kmem_cache_destroy(t10_alua_lu_gp_mem_cache); | ||
375 | kmem_cache_destroy(t10_alua_tg_pt_gp_cache); | ||
376 | kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); | ||
377 | kfree(global); | ||
378 | |||
379 | se_global = NULL; | ||
380 | } | ||
381 | |||
382 | void transport_init_queue_obj(struct se_queue_obj *qobj) | ||
383 | { | ||
384 | atomic_set(&qobj->queue_cnt, 0); | ||
385 | INIT_LIST_HEAD(&qobj->qobj_list); | ||
386 | init_waitqueue_head(&qobj->thread_wq); | ||
387 | spin_lock_init(&qobj->cmd_queue_lock); | ||
388 | } | ||
389 | EXPORT_SYMBOL(transport_init_queue_obj); | ||
390 | |||
391 | static int transport_subsystem_reqmods(void) | ||
392 | { | ||
393 | int ret; | ||
394 | |||
395 | ret = request_module("target_core_iblock"); | ||
396 | if (ret != 0) | ||
397 | printk(KERN_ERR "Unable to load target_core_iblock\n"); | ||
398 | |||
399 | ret = request_module("target_core_file"); | ||
400 | if (ret != 0) | ||
401 | printk(KERN_ERR "Unable to load target_core_file\n"); | ||
402 | |||
403 | ret = request_module("target_core_pscsi"); | ||
404 | if (ret != 0) | ||
405 | printk(KERN_ERR "Unable to load target_core_pscsi\n"); | ||
406 | |||
407 | ret = request_module("target_core_stgt"); | ||
408 | if (ret != 0) | ||
409 | printk(KERN_ERR "Unable to load target_core_stgt\n"); | ||
410 | |||
411 | return 0; | ||
412 | } | ||
413 | |||
414 | int transport_subsystem_check_init(void) | ||
415 | { | ||
416 | if (se_global->g_sub_api_initialized) | ||
417 | return 0; | ||
418 | /* | ||
419 | * Request the loading of known TCM subsystem plugins.. | ||
420 | */ | ||
421 | if (transport_subsystem_reqmods() < 0) | ||
422 | return -1; | ||
423 | |||
424 | se_global->g_sub_api_initialized = 1; | ||
425 | return 0; | ||
426 | } | ||
427 | |||
428 | struct se_session *transport_init_session(void) | ||
429 | { | ||
430 | struct se_session *se_sess; | ||
431 | |||
432 | se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); | ||
433 | if (!(se_sess)) { | ||
434 | printk(KERN_ERR "Unable to allocate struct se_session from" | ||
435 | " se_sess_cache\n"); | ||
436 | return ERR_PTR(-ENOMEM); | ||
437 | } | ||
438 | INIT_LIST_HEAD(&se_sess->sess_list); | ||
439 | INIT_LIST_HEAD(&se_sess->sess_acl_list); | ||
440 | atomic_set(&se_sess->mib_ref_count, 0); | ||
441 | |||
442 | return se_sess; | ||
443 | } | ||
444 | EXPORT_SYMBOL(transport_init_session); | ||
445 | |||
446 | /* | ||
447 | * Called with spin_lock_bh(&struct se_portal_group->session_lock called. | ||
448 | */ | ||
449 | void __transport_register_session( | ||
450 | struct se_portal_group *se_tpg, | ||
451 | struct se_node_acl *se_nacl, | ||
452 | struct se_session *se_sess, | ||
453 | void *fabric_sess_ptr) | ||
454 | { | ||
455 | unsigned char buf[PR_REG_ISID_LEN]; | ||
456 | |||
457 | se_sess->se_tpg = se_tpg; | ||
458 | se_sess->fabric_sess_ptr = fabric_sess_ptr; | ||
459 | /* | ||
460 | * Used by struct se_node_acl's under ConfigFS to locate active se_session-t | ||
461 | * | ||
462 | * Only set for struct se_session's that will actually be moving I/O. | ||
463 | * eg: *NOT* discovery sessions. | ||
464 | */ | ||
465 | if (se_nacl) { | ||
466 | /* | ||
467 | * If the fabric module supports an ISID based TransportID, | ||
468 | * save this value in binary from the fabric I_T Nexus now. | ||
469 | */ | ||
470 | if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL) { | ||
471 | memset(&buf[0], 0, PR_REG_ISID_LEN); | ||
472 | TPG_TFO(se_tpg)->sess_get_initiator_sid(se_sess, | ||
473 | &buf[0], PR_REG_ISID_LEN); | ||
474 | se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); | ||
475 | } | ||
476 | spin_lock_irq(&se_nacl->nacl_sess_lock); | ||
477 | /* | ||
478 | * The se_nacl->nacl_sess pointer will be set to the | ||
479 | * last active I_T Nexus for each struct se_node_acl. | ||
480 | */ | ||
481 | se_nacl->nacl_sess = se_sess; | ||
482 | |||
483 | list_add_tail(&se_sess->sess_acl_list, | ||
484 | &se_nacl->acl_sess_list); | ||
485 | spin_unlock_irq(&se_nacl->nacl_sess_lock); | ||
486 | } | ||
487 | list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); | ||
488 | |||
489 | printk(KERN_INFO "TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", | ||
490 | TPG_TFO(se_tpg)->get_fabric_name(), se_sess->fabric_sess_ptr); | ||
491 | } | ||
492 | EXPORT_SYMBOL(__transport_register_session); | ||
493 | |||
494 | void transport_register_session( | ||
495 | struct se_portal_group *se_tpg, | ||
496 | struct se_node_acl *se_nacl, | ||
497 | struct se_session *se_sess, | ||
498 | void *fabric_sess_ptr) | ||
499 | { | ||
500 | spin_lock_bh(&se_tpg->session_lock); | ||
501 | __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr); | ||
502 | spin_unlock_bh(&se_tpg->session_lock); | ||
503 | } | ||
504 | EXPORT_SYMBOL(transport_register_session); | ||
505 | |||
506 | void transport_deregister_session_configfs(struct se_session *se_sess) | ||
507 | { | ||
508 | struct se_node_acl *se_nacl; | ||
509 | |||
510 | /* | ||
511 | * Used by struct se_node_acl's under ConfigFS to locate active struct se_session | ||
512 | */ | ||
513 | se_nacl = se_sess->se_node_acl; | ||
514 | if ((se_nacl)) { | ||
515 | spin_lock_irq(&se_nacl->nacl_sess_lock); | ||
516 | list_del(&se_sess->sess_acl_list); | ||
517 | /* | ||
518 | * If the session list is empty, then clear the pointer. | ||
519 | * Otherwise, set the struct se_session pointer from the tail | ||
520 | * element of the per struct se_node_acl active session list. | ||
521 | */ | ||
522 | if (list_empty(&se_nacl->acl_sess_list)) | ||
523 | se_nacl->nacl_sess = NULL; | ||
524 | else { | ||
525 | se_nacl->nacl_sess = container_of( | ||
526 | se_nacl->acl_sess_list.prev, | ||
527 | struct se_session, sess_acl_list); | ||
528 | } | ||
529 | spin_unlock_irq(&se_nacl->nacl_sess_lock); | ||
530 | } | ||
531 | } | ||
532 | EXPORT_SYMBOL(transport_deregister_session_configfs); | ||
533 | |||
534 | void transport_free_session(struct se_session *se_sess) | ||
535 | { | ||
536 | kmem_cache_free(se_sess_cache, se_sess); | ||
537 | } | ||
538 | EXPORT_SYMBOL(transport_free_session); | ||
539 | |||
540 | void transport_deregister_session(struct se_session *se_sess) | ||
541 | { | ||
542 | struct se_portal_group *se_tpg = se_sess->se_tpg; | ||
543 | struct se_node_acl *se_nacl; | ||
544 | |||
545 | if (!(se_tpg)) { | ||
546 | transport_free_session(se_sess); | ||
547 | return; | ||
548 | } | ||
549 | /* | ||
550 | * Wait for possible reference in drivers/target/target_core_mib.c: | ||
551 | * scsi_att_intr_port_seq_show() | ||
552 | */ | ||
553 | while (atomic_read(&se_sess->mib_ref_count) != 0) | ||
554 | cpu_relax(); | ||
555 | |||
556 | spin_lock_bh(&se_tpg->session_lock); | ||
557 | list_del(&se_sess->sess_list); | ||
558 | se_sess->se_tpg = NULL; | ||
559 | se_sess->fabric_sess_ptr = NULL; | ||
560 | spin_unlock_bh(&se_tpg->session_lock); | ||
561 | |||
562 | /* | ||
563 | * Determine if we need to do extra work for this initiator node's | ||
564 | * struct se_node_acl if it had been previously dynamically generated. | ||
565 | */ | ||
566 | se_nacl = se_sess->se_node_acl; | ||
567 | if ((se_nacl)) { | ||
568 | spin_lock_bh(&se_tpg->acl_node_lock); | ||
569 | if (se_nacl->dynamic_node_acl) { | ||
570 | if (!(TPG_TFO(se_tpg)->tpg_check_demo_mode_cache( | ||
571 | se_tpg))) { | ||
572 | list_del(&se_nacl->acl_list); | ||
573 | se_tpg->num_node_acls--; | ||
574 | spin_unlock_bh(&se_tpg->acl_node_lock); | ||
575 | |||
576 | core_tpg_wait_for_nacl_pr_ref(se_nacl); | ||
577 | core_tpg_wait_for_mib_ref(se_nacl); | ||
578 | core_free_device_list_for_node(se_nacl, se_tpg); | ||
579 | TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, | ||
580 | se_nacl); | ||
581 | spin_lock_bh(&se_tpg->acl_node_lock); | ||
582 | } | ||
583 | } | ||
584 | spin_unlock_bh(&se_tpg->acl_node_lock); | ||
585 | } | ||
586 | |||
587 | transport_free_session(se_sess); | ||
588 | |||
589 | printk(KERN_INFO "TARGET_CORE[%s]: Deregistered fabric_sess\n", | ||
590 | TPG_TFO(se_tpg)->get_fabric_name()); | ||
591 | } | ||
592 | EXPORT_SYMBOL(transport_deregister_session); | ||
593 | |||
594 | /* | ||
595 | * Called with T_TASK(cmd)->t_state_lock held. | ||
596 | */ | ||
597 | static void transport_all_task_dev_remove_state(struct se_cmd *cmd) | ||
598 | { | ||
599 | struct se_device *dev; | ||
600 | struct se_task *task; | ||
601 | unsigned long flags; | ||
602 | |||
603 | if (!T_TASK(cmd)) | ||
604 | return; | ||
605 | |||
606 | list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { | ||
607 | dev = task->se_dev; | ||
608 | if (!(dev)) | ||
609 | continue; | ||
610 | |||
611 | if (atomic_read(&task->task_active)) | ||
612 | continue; | ||
613 | |||
614 | if (!(atomic_read(&task->task_state_active))) | ||
615 | continue; | ||
616 | |||
617 | spin_lock_irqsave(&dev->execute_task_lock, flags); | ||
618 | list_del(&task->t_state_list); | ||
619 | DEBUG_TSTATE("Removed ITT: 0x%08x dev: %p task[%p]\n", | ||
620 | CMD_TFO(cmd)->tfo_get_task_tag(cmd), dev, task); | ||
621 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | ||
622 | |||
623 | atomic_set(&task->task_state_active, 0); | ||
624 | atomic_dec(&T_TASK(cmd)->t_task_cdbs_ex_left); | ||
625 | } | ||
626 | } | ||
627 | |||
628 | /* transport_cmd_check_stop(): | ||
629 | * | ||
630 | * 'transport_off = 1' determines if t_transport_active should be cleared. | ||
631 | * 'transport_off = 2' determines if task_dev_state should be removed. | ||
632 | * | ||
633 | * A non-zero u8 t_state sets cmd->t_state. | ||
634 | * Returns 1 when command is stopped, else 0. | ||
635 | */ | ||
636 | static int transport_cmd_check_stop( | ||
637 | struct se_cmd *cmd, | ||
638 | int transport_off, | ||
639 | u8 t_state) | ||
640 | { | ||
641 | unsigned long flags; | ||
642 | |||
643 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | ||
644 | /* | ||
645 | * Determine if IOCTL context caller in requesting the stopping of this | ||
646 | * command for LUN shutdown purposes. | ||
647 | */ | ||
648 | if (atomic_read(&T_TASK(cmd)->transport_lun_stop)) { | ||
649 | DEBUG_CS("%s:%d atomic_read(&T_TASK(cmd)->transport_lun_stop)" | ||
650 | " == TRUE for ITT: 0x%08x\n", __func__, __LINE__, | ||
651 | CMD_TFO(cmd)->get_task_tag(cmd)); | ||
652 | |||
653 | cmd->deferred_t_state = cmd->t_state; | ||
654 | cmd->t_state = TRANSPORT_DEFERRED_CMD; | ||
655 | atomic_set(&T_TASK(cmd)->t_transport_active, 0); | ||
656 | if (transport_off == 2) | ||
657 | transport_all_task_dev_remove_state(cmd); | ||
658 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | ||
659 | |||
660 | complete(&T_TASK(cmd)->transport_lun_stop_comp); | ||
661 | return 1; | ||
662 | } | ||
663 | /* | ||
664 | * Determine if frontend context caller is requesting the stopping of | ||
665 | * this command for frontend excpections. | ||
666 | */ | ||
667 | if (atomic_read(&T_TASK(cmd)->t_transport_stop)) { | ||
668 | DEBUG_CS("%s:%d atomic_read(&T_TASK(cmd)->t_transport_stop) ==" | ||
669 | " TRUE for ITT: 0x%08x\n", __func__, __LINE__, | ||
670 | CMD_TFO(cmd)->get_task_tag(cmd)); | ||
671 | |||
672 | cmd->deferred_t_state = cmd->t_state; | ||
673 | cmd->t_state = TRANSPORT_DEFERRED_CMD; | ||
674 | if (transport_off == 2) | ||
675 | transport_all_task_dev_remove_state(cmd); | ||
676 | |||
677 | /* | ||
678 | * Clear struct se_cmd->se_lun before the transport_off == 2 handoff | ||
679 | * to FE. | ||
680 | */ | ||
681 | if (transport_off == 2) | ||
682 | cmd->se_lun = NULL; | ||
683 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | ||
684 | |||
685 | complete(&T_TASK(cmd)->t_transport_stop_comp); | ||
686 | return 1; | ||
687 | } | ||
688 | if (transport_off) { | ||
689 | atomic_set(&T_TASK(cmd)->t_transport_active, 0); | ||
690 | if (transport_off == 2) { | ||
691 | transport_all_task_dev_remove_state(cmd); | ||
692 | /* | ||
693 | * Clear struct se_cmd->se_lun before the transport_off == 2 | ||
694 | * handoff to fabric module. | ||
695 | */ | ||
696 | cmd->se_lun = NULL; | ||
697 | /* | ||
698 | * Some fabric modules like tcm_loop can release | ||
699 | * their internally allocated I/O refrence now and | ||
700 | * struct se_cmd now. | ||
701 | */ | ||
702 | if (CMD_TFO(cmd)->check_stop_free != NULL) { | ||
703 | spin_unlock_irqrestore( | ||
704 | &T_TASK(cmd)->t_state_lock, flags); | ||
705 | |||
706 | CMD_TFO(cmd)->check_stop_free(cmd); | ||
707 | return 1; | ||
708 | } | ||
709 | } | ||
710 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | ||
711 | |||
712 | return 0; | ||
713 | } else if (t_state) | ||
714 | cmd->t_state = t_state; | ||
715 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | ||
716 | |||
717 | return 0; | ||
718 | } | ||
719 | |||
720 | static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) | ||
721 | { | ||
722 | return transport_cmd_check_stop(cmd, 2, 0); | ||
723 | } | ||
724 | |||
725 | static void transport_lun_remove_cmd(struct se_cmd *cmd) | ||
726 | { | ||
727 | struct se_lun *lun = SE_LUN(cmd); | ||
728 | unsigned long flags; | ||
729 | |||
730 | if (!lun) | ||
731 | return; | ||
732 | |||
733 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | ||
734 | if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) { | ||
735 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | ||
736 | goto check_lun; | ||
737 | } | ||
738 | atomic_set(&T_TASK(cmd)->transport_dev_active, 0); | ||
739 | transport_all_task_dev_remove_state(cmd); | ||
740 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | ||
741 | |||
742 | transport_free_dev_tasks(cmd); | ||
743 | |||
744 | check_lun: | ||
745 | spin_lock_irqsave(&lun->lun_cmd_lock, flags); | ||
746 | if (atomic_read(&T_TASK(cmd)->transport_lun_active)) { | ||
747 | list_del(&cmd->se_lun_list); | ||
748 | atomic_set(&T_TASK(cmd)->transport_lun_active, 0); | ||
749 | #if 0 | ||
750 | printk(KERN_INFO "Removed ITT: 0x%08x from LUN LIST[%d]\n" | ||
751 | CMD_TFO(cmd)->get_task_tag(cmd), lun->unpacked_lun); | ||
752 | #endif | ||
753 | } | ||
754 | spin_unlock_irqrestore(&lun->lun_cmd_lock, flags); | ||
755 | } | ||
756 | |||
757 | void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) | ||
758 | { | ||
759 | transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj); | ||
760 | transport_lun_remove_cmd(cmd); | ||
761 | |||
762 | if (transport_cmd_check_stop_to_fabric(cmd)) | ||
763 | return; | ||
764 | if (remove) | ||
765 | transport_generic_remove(cmd, 0, 0); | ||
766 | } | ||
767 | |||
768 | void transport_cmd_finish_abort_tmr(struct se_cmd *cmd) | ||
769 | { | ||
770 | transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj); | ||
771 | |||
772 | if (transport_cmd_check_stop_to_fabric(cmd)) | ||
773 | return; | ||
774 | |||
775 | transport_generic_remove(cmd, 0, 0); | ||
776 | } | ||
777 | |||
778 | static int transport_add_cmd_to_queue( | ||
779 | struct se_cmd *cmd, | ||
780 | int t_state) | ||
781 | { | ||
782 | struct se_device *dev = cmd->se_dev; | ||
783 | struct se_queue_obj *qobj = dev->dev_queue_obj; | ||
784 | struct se_queue_req *qr; | ||
785 | unsigned long flags; | ||
786 | |||
787 | qr = kzalloc(sizeof(struct se_queue_req), GFP_ATOMIC); | ||
788 | if (!(qr)) { | ||
789 | printk(KERN_ERR "Unable to allocate memory for" | ||
790 | " struct se_queue_req\n"); | ||
791 | return -1; | ||
792 | } | ||
793 | INIT_LIST_HEAD(&qr->qr_list); | ||
794 | |||
795 | qr->cmd = (void *)cmd; | ||
796 | qr->state = t_state; | ||
797 | |||
798 | if (t_state) { | ||
799 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | ||
800 | cmd->t_state = t_state; | ||
801 | atomic_set(&T_TASK(cmd)->t_transport_active, 1); | ||
802 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | ||
803 | } | ||
804 | |||
805 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | ||
806 | list_add_tail(&qr->qr_list, &qobj->qobj_list); | ||
807 | atomic_inc(&T_TASK(cmd)->t_transport_queue_active); | ||
808 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | ||
809 | |||
810 | atomic_inc(&qobj->queue_cnt); | ||
811 | wake_up_interruptible(&qobj->thread_wq); | ||
812 | return 0; | ||
813 | } | ||
814 | |||
815 | /* | ||
816 | * Called with struct se_queue_obj->cmd_queue_lock held. | ||
817 | */ | ||
818 | static struct se_queue_req * | ||
819 | __transport_get_qr_from_queue(struct se_queue_obj *qobj) | ||
820 | { | ||
821 | struct se_cmd *cmd; | ||
822 | struct se_queue_req *qr = NULL; | ||
823 | |||
824 | if (list_empty(&qobj->qobj_list)) | ||
825 | return NULL; | ||
826 | |||
827 | list_for_each_entry(qr, &qobj->qobj_list, qr_list) | ||
828 | break; | ||
829 | |||
830 | if (qr->cmd) { | ||
831 | cmd = (struct se_cmd *)qr->cmd; | ||
832 | atomic_dec(&T_TASK(cmd)->t_transport_queue_active); | ||
833 | } | ||
834 | list_del(&qr->qr_list); | ||
835 | atomic_dec(&qobj->queue_cnt); | ||
836 | |||
837 | return qr; | ||
838 | } | ||
839 | |||
840 | static struct se_queue_req * | ||
841 | transport_get_qr_from_queue(struct se_queue_obj *qobj) | ||
842 | { | ||
843 | struct se_cmd *cmd; | ||
844 | struct se_queue_req *qr; | ||
845 | unsigned long flags; | ||
846 | |||
847 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | ||
848 | if (list_empty(&qobj->qobj_list)) { | ||
849 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | ||
850 | return NULL; | ||
851 | } | ||
852 | |||
853 | list_for_each_entry(qr, &qobj->qobj_list, qr_list) | ||
854 | break; | ||
855 | |||
856 | if (qr->cmd) { | ||
857 | cmd = (struct se_cmd *)qr->cmd; | ||
858 | atomic_dec(&T_TASK(cmd)->t_transport_queue_active); | ||
859 | } | ||
860 | list_del(&qr->qr_list); | ||
861 | atomic_dec(&qobj->queue_cnt); | ||
862 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | ||
863 | |||
864 | return qr; | ||
865 | } | ||
866 | |||
867 | static void transport_remove_cmd_from_queue(struct se_cmd *cmd, | ||
868 | struct se_queue_obj *qobj) | ||
869 | { | ||
870 | struct se_cmd *q_cmd; | ||
871 | struct se_queue_req *qr = NULL, *qr_p = NULL; | ||
872 | unsigned long flags; | ||
873 | |||
874 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | ||
875 | if (!(atomic_read(&T_TASK(cmd)->t_transport_queue_active))) { | ||
876 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | ||
877 | return; | ||
878 | } | ||
879 | |||
880 | list_for_each_entry_safe(qr, qr_p, &qobj->qobj_list, qr_list) { | ||
881 | q_cmd = (struct se_cmd *)qr->cmd; | ||
882 | if (q_cmd != cmd) | ||
883 | continue; | ||
884 | |||
885 | atomic_dec(&T_TASK(q_cmd)->t_transport_queue_active); | ||
886 | atomic_dec(&qobj->queue_cnt); | ||
887 | list_del(&qr->qr_list); | ||
888 | kfree(qr); | ||
889 | } | ||
890 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | ||
891 | |||
892 | if (atomic_read(&T_TASK(cmd)->t_transport_queue_active)) { | ||
893 | printk(KERN_ERR "ITT: 0x%08x t_transport_queue_active: %d\n", | ||
894 | CMD_TFO(cmd)->get_task_tag(cmd), | ||
895 | atomic_read(&T_TASK(cmd)->t_transport_queue_active)); | ||
896 | } | ||
897 | } | ||
898 | |||
899 | /* | ||
900 | * Completion function used by TCM subsystem plugins (such as FILEIO) | ||
901 | * for queueing up response from struct se_subsystem_api->do_task() | ||
902 | */ | ||
903 | void transport_complete_sync_cache(struct se_cmd *cmd, int good) | ||
904 | { | ||
905 | struct se_task *task = list_entry(T_TASK(cmd)->t_task_list.next, | ||
906 | struct se_task, t_list); | ||
907 | |||
908 | if (good) { | ||
909 | cmd->scsi_status = SAM_STAT_GOOD; | ||
910 | task->task_scsi_status = GOOD; | ||
911 | } else { | ||
912 | task->task_scsi_status = SAM_STAT_CHECK_CONDITION; | ||
913 | task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST; | ||
914 | TASK_CMD(task)->transport_error_status = | ||
915 | PYX_TRANSPORT_ILLEGAL_REQUEST; | ||
916 | } | ||
917 | |||
918 | transport_complete_task(task, good); | ||
919 | } | ||
920 | EXPORT_SYMBOL(transport_complete_sync_cache); | ||
921 | |||
922 | /* transport_complete_task(): | ||
923 | * | ||
924 | * Called from interrupt and non interrupt context depending | ||
925 | * on the transport plugin. | ||
926 | */ | ||
927 | void transport_complete_task(struct se_task *task, int success) | ||
928 | { | ||
929 | struct se_cmd *cmd = TASK_CMD(task); | ||
930 | struct se_device *dev = task->se_dev; | ||
931 | int t_state; | ||
932 | unsigned long flags; | ||
933 | #if 0 | ||
934 | printk(KERN_INFO "task: %p CDB: 0x%02x obj_ptr: %p\n", task, | ||
935 | T_TASK(cmd)->t_task_cdb[0], dev); | ||
936 | #endif | ||
937 | if (dev) { | ||
938 | spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags); | ||
939 | atomic_inc(&dev->depth_left); | ||
940 | atomic_inc(&SE_HBA(dev)->left_queue_depth); | ||
941 | spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); | ||
942 | } | ||
943 | |||
944 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | ||
945 | atomic_set(&task->task_active, 0); | ||
946 | |||
947 | /* | ||
948 | * See if any sense data exists, if so set the TASK_SENSE flag. | ||
949 | * Also check for any other post completion work that needs to be | ||
950 | * done by the plugins. | ||
951 | */ | ||
952 | if (dev && dev->transport->transport_complete) { | ||
953 | if (dev->transport->transport_complete(task) != 0) { | ||
954 | cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE; | ||
955 | task->task_sense = 1; | ||
956 | success = 1; | ||
957 | } | ||
958 | } | ||
959 | |||
960 | /* | ||
961 | * See if we are waiting for outstanding struct se_task | ||
962 | * to complete for an exception condition | ||
963 | */ | ||
964 | if (atomic_read(&task->task_stop)) { | ||
965 | /* | ||
966 | * Decrement T_TASK(cmd)->t_se_count if this task had | ||
967 | * previously thrown its timeout exception handler. | ||
968 | */ | ||
969 | if (atomic_read(&task->task_timeout)) { | ||
970 | atomic_dec(&T_TASK(cmd)->t_se_count); | ||
971 | atomic_set(&task->task_timeout, 0); | ||
972 | } | ||
973 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | ||
974 | |||
975 | complete(&task->task_stop_comp); | ||
976 | return; | ||
977 | } | ||
978 | /* | ||
979 | * If the task's timeout handler has fired, use the t_task_cdbs_timeout | ||
980 | * left counter to determine when the struct se_cmd is ready to be queued to | ||
981 | * the processing thread. | ||
982 | */ | ||
983 | if (atomic_read(&task->task_timeout)) { | ||
984 | if (!(atomic_dec_and_test( | ||
985 | &T_TASK(cmd)->t_task_cdbs_timeout_left))) { | ||
986 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, | ||
987 | flags); | ||
988 | return; | ||
989 | } | ||
990 | t_state = TRANSPORT_COMPLETE_TIMEOUT; | ||
991 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | ||
992 | |||
993 | transport_add_cmd_to_queue(cmd, t_state); | ||
994 | return; | ||
995 | } | ||
996 | atomic_dec(&T_TASK(cmd)->t_task_cdbs_timeout_left); | ||
997 | |||
998 | /* | ||
999 | * Decrement the outstanding t_task_cdbs_left count. The last | ||
1000 | * struct se_task from struct se_cmd will complete itself into the | ||
1001 | * device queue depending upon int success. | ||
1002 | */ | ||
1003 | if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_left))) { | ||
1004 | if (!success) | ||
1005 | T_TASK(cmd)->t_tasks_failed = 1; | ||
1006 | |||
1007 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | ||
1008 | return; | ||
1009 | } | ||
1010 | |||
1011 | if (!success || T_TASK(cmd)->t_tasks_failed) { | ||
1012 | t_state = TRANSPORT_COMPLETE_FAILURE; | ||
1013 | if (!task->task_error_status) { | ||
1014 | task->task_error_status = | ||
1015 | PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | ||
1016 | cmd->transport_error_status = | ||
1017 | PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | ||
1018 | } | ||
1019 | } else { | ||
1020 | atomic_set(&T_TASK(cmd)->t_transport_complete, 1); | ||
1021 | t_state = TRANSPORT_COMPLETE_OK; | ||
1022 | } | ||
1023 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | ||
1024 | |||
1025 | transport_add_cmd_to_queue(cmd, t_state); | ||
1026 | } | ||
1027 | EXPORT_SYMBOL(transport_complete_task); | ||
1028 | |||
1029 | /* | ||
1030 | * Called by transport_add_tasks_from_cmd() once a struct se_cmd's | ||
1031 | * struct se_task list are ready to be added to the active execution list | ||
1032 | * struct se_device | ||
1033 | |||
1034 | * Called with se_dev_t->execute_task_lock called. | ||
1035 | */ | ||
1036 | static inline int transport_add_task_check_sam_attr( | ||
1037 | struct se_task *task, | ||
1038 | struct se_task *task_prev, | ||
1039 | struct se_device *dev) | ||
1040 | { | ||
1041 | /* | ||
1042 | * No SAM Task attribute emulation enabled, add to tail of | ||
1043 | * execution queue | ||
1044 | */ | ||
1045 | if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) { | ||
1046 | list_add_tail(&task->t_execute_list, &dev->execute_task_list); | ||
1047 | return 0; | ||
1048 | } | ||
1049 | /* | ||
1050 | * HEAD_OF_QUEUE attribute for received CDB, which means | ||
1051 | * the first task that is associated with a struct se_cmd goes to | ||
1052 | * head of the struct se_device->execute_task_list, and task_prev | ||
1053 | * after that for each subsequent task | ||
1054 | */ | ||
1055 | if (task->task_se_cmd->sam_task_attr == TASK_ATTR_HOQ) { | ||
1056 | list_add(&task->t_execute_list, | ||
1057 | (task_prev != NULL) ? | ||
1058 | &task_prev->t_execute_list : | ||
1059 | &dev->execute_task_list); | ||
1060 | |||
1061 | DEBUG_STA("Set HEAD_OF_QUEUE for task CDB: 0x%02x" | ||
1062 | " in execution queue\n", | ||
1063 | T_TASK(task->task_se_cmd)->t_task_cdb[0]); | ||
1064 | return 1; | ||
1065 | } | ||
1066 | /* | ||
1067 | * For ORDERED, SIMPLE or UNTAGGED attribute tasks once they have been | ||
1068 | * transitioned from Dermant -> Active state, and are added to the end | ||
1069 | * of the struct se_device->execute_task_list | ||
1070 | */ | ||
1071 | list_add_tail(&task->t_execute_list, &dev->execute_task_list); | ||
1072 | return 0; | ||
1073 | } | ||
1074 | |||
1075 | /* __transport_add_task_to_execute_queue(): | ||
1076 | * | ||
1077 | * Called with se_dev_t->execute_task_lock called. | ||
1078 | */ | ||
1079 | static void __transport_add_task_to_execute_queue( | ||
1080 | struct se_task *task, | ||
1081 | struct se_task *task_prev, | ||
1082 | struct se_device *dev) | ||
1083 | { | ||
1084 | int head_of_queue; | ||
1085 | |||
1086 | head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev); | ||
1087 | atomic_inc(&dev->execute_tasks); | ||
1088 | |||
1089 | if (atomic_read(&task->task_state_active)) | ||
1090 | return; | ||
1091 | /* | ||
1092 | * Determine if this task needs to go to HEAD_OF_QUEUE for the | ||
1093 | * state list as well. Running with SAM Task Attribute emulation | ||
1094 | * will always return head_of_queue == 0 here | ||
1095 | */ | ||
1096 | if (head_of_queue) | ||
1097 | list_add(&task->t_state_list, (task_prev) ? | ||
1098 | &task_prev->t_state_list : | ||
1099 | &dev->state_task_list); | ||
1100 | else | ||
1101 | list_add_tail(&task->t_state_list, &dev->state_task_list); | ||
1102 | |||
1103 | atomic_set(&task->task_state_active, 1); | ||
1104 | |||
1105 | DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n", | ||
1106 | CMD_TFO(task->task_se_cmd)->get_task_tag(task->task_se_cmd), | ||
1107 | task, dev); | ||
1108 | } | ||
1109 | |||
1110 | static void transport_add_tasks_to_state_queue(struct se_cmd *cmd) | ||
1111 | { | ||
1112 | struct se_device *dev; | ||
1113 | struct se_task *task; | ||
1114 | unsigned long flags; | ||
1115 | |||
1116 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | ||
1117 | list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { | ||
1118 | dev = task->se_dev; | ||
1119 | |||
1120 | if (atomic_read(&task->task_state_active)) | ||
1121 | continue; | ||
1122 | |||
1123 | spin_lock(&dev->execute_task_lock); | ||
1124 | list_add_tail(&task->t_state_list, &dev->state_task_list); | ||
1125 | atomic_set(&task->task_state_active, 1); | ||
1126 | |||
1127 | DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n", | ||
1128 | CMD_TFO(task->task_se_cmd)->get_task_tag( | ||
1129 | task->task_se_cmd), task, dev); | ||
1130 | |||
1131 | spin_unlock(&dev->execute_task_lock); | ||
1132 | } | ||
1133 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | ||
1134 | } | ||
1135 | |||
1136 | static void transport_add_tasks_from_cmd(struct se_cmd *cmd) | ||
1137 | { | ||
1138 | struct se_device *dev = SE_DEV(cmd); | ||
1139 | struct se_task *task, *task_prev = NULL; | ||
1140 | unsigned long flags; | ||
1141 | |||
1142 | spin_lock_irqsave(&dev->execute_task_lock, flags); | ||
1143 | list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { | ||
1144 | if (atomic_read(&task->task_execute_queue)) | ||
1145 | continue; | ||
1146 | /* | ||
1147 | * __transport_add_task_to_execute_queue() handles the | ||
1148 | * SAM Task Attribute emulation if enabled | ||
1149 | */ | ||
1150 | __transport_add_task_to_execute_queue(task, task_prev, dev); | ||
1151 | atomic_set(&task->task_execute_queue, 1); | ||
1152 | task_prev = task; | ||
1153 | } | ||
1154 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | ||
1155 | |||
1156 | return; | ||
1157 | } | ||
1158 | |||
1159 | /* transport_get_task_from_execute_queue(): | ||
1160 | * | ||
1161 | * Called with dev->execute_task_lock held. | ||
1162 | */ | ||
1163 | static struct se_task * | ||
1164 | transport_get_task_from_execute_queue(struct se_device *dev) | ||
1165 | { | ||
1166 | struct se_task *task; | ||
1167 | |||
1168 | if (list_empty(&dev->execute_task_list)) | ||
1169 | return NULL; | ||
1170 | |||
1171 | list_for_each_entry(task, &dev->execute_task_list, t_execute_list) | ||
1172 | break; | ||
1173 | |||
1174 | list_del(&task->t_execute_list); | ||
1175 | atomic_dec(&dev->execute_tasks); | ||
1176 | |||
1177 | return task; | ||
1178 | } | ||
1179 | |||
1180 | /* transport_remove_task_from_execute_queue(): | ||
1181 | * | ||
1182 | * | ||
1183 | */ | ||
1184 | static void transport_remove_task_from_execute_queue( | ||
1185 | struct se_task *task, | ||
1186 | struct se_device *dev) | ||
1187 | { | ||
1188 | unsigned long flags; | ||
1189 | |||
1190 | spin_lock_irqsave(&dev->execute_task_lock, flags); | ||
1191 | list_del(&task->t_execute_list); | ||
1192 | atomic_dec(&dev->execute_tasks); | ||
1193 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | ||
1194 | } | ||
1195 | |||
1196 | unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) | ||
1197 | { | ||
1198 | switch (cmd->data_direction) { | ||
1199 | case DMA_NONE: | ||
1200 | return "NONE"; | ||
1201 | case DMA_FROM_DEVICE: | ||
1202 | return "READ"; | ||
1203 | case DMA_TO_DEVICE: | ||
1204 | return "WRITE"; | ||
1205 | case DMA_BIDIRECTIONAL: | ||
1206 | return "BIDI"; | ||
1207 | default: | ||
1208 | break; | ||
1209 | } | ||
1210 | |||
1211 | return "UNKNOWN"; | ||
1212 | } | ||
1213 | |||
1214 | void transport_dump_dev_state( | ||
1215 | struct se_device *dev, | ||
1216 | char *b, | ||
1217 | int *bl) | ||
1218 | { | ||
1219 | *bl += sprintf(b + *bl, "Status: "); | ||
1220 | switch (dev->dev_status) { | ||
1221 | case TRANSPORT_DEVICE_ACTIVATED: | ||
1222 | *bl += sprintf(b + *bl, "ACTIVATED"); | ||
1223 | break; | ||
1224 | case TRANSPORT_DEVICE_DEACTIVATED: | ||
1225 | *bl += sprintf(b + *bl, "DEACTIVATED"); | ||
1226 | break; | ||
1227 | case TRANSPORT_DEVICE_SHUTDOWN: | ||
1228 | *bl += sprintf(b + *bl, "SHUTDOWN"); | ||
1229 | break; | ||
1230 | case TRANSPORT_DEVICE_OFFLINE_ACTIVATED: | ||
1231 | case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED: | ||
1232 | *bl += sprintf(b + *bl, "OFFLINE"); | ||
1233 | break; | ||
1234 | default: | ||
1235 | *bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status); | ||
1236 | break; | ||
1237 | } | ||
1238 | |||
1239 | *bl += sprintf(b + *bl, " Execute/Left/Max Queue Depth: %d/%d/%d", | ||
1240 | atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left), | ||
1241 | dev->queue_depth); | ||
1242 | *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n", | ||
1243 | DEV_ATTRIB(dev)->block_size, DEV_ATTRIB(dev)->max_sectors); | ||
1244 | *bl += sprintf(b + *bl, " "); | ||
1245 | } | ||
1246 | |||
1247 | /* transport_release_all_cmds(): | ||
1248 | * | ||
1249 | * | ||
1250 | */ | ||
1251 | static void transport_release_all_cmds(struct se_device *dev) | ||
1252 | { | ||
1253 | struct se_cmd *cmd = NULL; | ||
1254 | struct se_queue_req *qr = NULL, *qr_p = NULL; | ||
1255 | int bug_out = 0, t_state; | ||
1256 | unsigned long flags; | ||
1257 | |||
1258 | spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags); | ||
1259 | list_for_each_entry_safe(qr, qr_p, &dev->dev_queue_obj->qobj_list, | ||
1260 | qr_list) { | ||
1261 | |||
1262 | cmd = (struct se_cmd *)qr->cmd; | ||
1263 | t_state = qr->state; | ||
1264 | list_del(&qr->qr_list); | ||
1265 | kfree(qr); | ||
1266 | spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, | ||
1267 | flags); | ||
1268 | |||
1269 | printk(KERN_ERR "Releasing ITT: 0x%08x, i_state: %u," | ||
1270 | " t_state: %u directly\n", | ||
1271 | CMD_TFO(cmd)->get_task_tag(cmd), | ||
1272 | CMD_TFO(cmd)->get_cmd_state(cmd), t_state); | ||
1273 | |||
1274 | transport_release_fe_cmd(cmd); | ||
1275 | bug_out = 1; | ||
1276 | |||
1277 | spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags); | ||
1278 | } | ||
1279 | spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, flags); | ||
1280 | #if 0 | ||
1281 | if (bug_out) | ||
1282 | BUG(); | ||
1283 | #endif | ||
1284 | } | ||
1285 | |||
1286 | void transport_dump_vpd_proto_id( | ||
1287 | struct t10_vpd *vpd, | ||
1288 | unsigned char *p_buf, | ||
1289 | int p_buf_len) | ||
1290 | { | ||
1291 | unsigned char buf[VPD_TMP_BUF_SIZE]; | ||
1292 | int len; | ||
1293 | |||
1294 | memset(buf, 0, VPD_TMP_BUF_SIZE); | ||
1295 | len = sprintf(buf, "T10 VPD Protocol Identifier: "); | ||
1296 | |||
1297 | switch (vpd->protocol_identifier) { | ||
1298 | case 0x00: | ||
1299 | sprintf(buf+len, "Fibre Channel\n"); | ||
1300 | break; | ||
1301 | case 0x10: | ||
1302 | sprintf(buf+len, "Parallel SCSI\n"); | ||
1303 | break; | ||
1304 | case 0x20: | ||
1305 | sprintf(buf+len, "SSA\n"); | ||
1306 | break; | ||
1307 | case 0x30: | ||
1308 | sprintf(buf+len, "IEEE 1394\n"); | ||
1309 | break; | ||
1310 | case 0x40: | ||
1311 | sprintf(buf+len, "SCSI Remote Direct Memory Access" | ||
1312 | " Protocol\n"); | ||
1313 | break; | ||
1314 | case 0x50: | ||
1315 | sprintf(buf+len, "Internet SCSI (iSCSI)\n"); | ||
1316 | break; | ||
1317 | case 0x60: | ||
1318 | sprintf(buf+len, "SAS Serial SCSI Protocol\n"); | ||
1319 | break; | ||
1320 | case 0x70: | ||
1321 | sprintf(buf+len, "Automation/Drive Interface Transport" | ||
1322 | " Protocol\n"); | ||
1323 | break; | ||
1324 | case 0x80: | ||
1325 | sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n"); | ||
1326 | break; | ||
1327 | default: | ||
1328 | sprintf(buf+len, "Unknown 0x%02x\n", | ||
1329 | vpd->protocol_identifier); | ||
1330 | break; | ||
1331 | } | ||
1332 | |||
1333 | if (p_buf) | ||
1334 | strncpy(p_buf, buf, p_buf_len); | ||
1335 | else | ||
1336 | printk(KERN_INFO "%s", buf); | ||
1337 | } | ||
1338 | |||
1339 | void | ||
1340 | transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83) | ||
1341 | { | ||
1342 | /* | ||
1343 | * Check if the Protocol Identifier Valid (PIV) bit is set.. | ||
1344 | * | ||
1345 | * from spc3r23.pdf section 7.5.1 | ||
1346 | */ | ||
1347 | if (page_83[1] & 0x80) { | ||
1348 | vpd->protocol_identifier = (page_83[0] & 0xf0); | ||
1349 | vpd->protocol_identifier_set = 1; | ||
1350 | transport_dump_vpd_proto_id(vpd, NULL, 0); | ||
1351 | } | ||
1352 | } | ||
1353 | EXPORT_SYMBOL(transport_set_vpd_proto_id); | ||
1354 | |||
1355 | int transport_dump_vpd_assoc( | ||
1356 | struct t10_vpd *vpd, | ||
1357 | unsigned char *p_buf, | ||
1358 | int p_buf_len) | ||
1359 | { | ||
1360 | unsigned char buf[VPD_TMP_BUF_SIZE]; | ||
1361 | int ret = 0, len; | ||
1362 | |||
1363 | memset(buf, 0, VPD_TMP_BUF_SIZE); | ||
1364 | len = sprintf(buf, "T10 VPD Identifier Association: "); | ||
1365 | |||
1366 | switch (vpd->association) { | ||
1367 | case 0x00: | ||
1368 | sprintf(buf+len, "addressed logical unit\n"); | ||
1369 | break; | ||
1370 | case 0x10: | ||
1371 | sprintf(buf+len, "target port\n"); | ||
1372 | break; | ||
1373 | case 0x20: | ||
1374 | sprintf(buf+len, "SCSI target device\n"); | ||
1375 | break; | ||
1376 | default: | ||
1377 | sprintf(buf+len, "Unknown 0x%02x\n", vpd->association); | ||
1378 | ret = -1; | ||
1379 | break; | ||
1380 | } | ||
1381 | |||
1382 | if (p_buf) | ||
1383 | strncpy(p_buf, buf, p_buf_len); | ||
1384 | else | ||
1385 | printk("%s", buf); | ||
1386 | |||
1387 | return ret; | ||
1388 | } | ||
1389 | |||
1390 | int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83) | ||
1391 | { | ||
1392 | /* | ||
1393 | * The VPD identification association.. | ||
1394 | * | ||
1395 | * from spc3r23.pdf Section 7.6.3.1 Table 297 | ||
1396 | */ | ||
1397 | vpd->association = (page_83[1] & 0x30); | ||
1398 | return transport_dump_vpd_assoc(vpd, NULL, 0); | ||
1399 | } | ||
1400 | EXPORT_SYMBOL(transport_set_vpd_assoc); | ||
1401 | |||
1402 | int transport_dump_vpd_ident_type( | ||
1403 | struct t10_vpd *vpd, | ||
1404 | unsigned char *p_buf, | ||
1405 | int p_buf_len) | ||
1406 | { | ||
1407 | unsigned char buf[VPD_TMP_BUF_SIZE]; | ||
1408 | int ret = 0, len; | ||
1409 | |||
1410 | memset(buf, 0, VPD_TMP_BUF_SIZE); | ||
1411 | len = sprintf(buf, "T10 VPD Identifier Type: "); | ||
1412 | |||
1413 | switch (vpd->device_identifier_type) { | ||
1414 | case 0x00: | ||
1415 | sprintf(buf+len, "Vendor specific\n"); | ||
1416 | break; | ||
1417 | case 0x01: | ||
1418 | sprintf(buf+len, "T10 Vendor ID based\n"); | ||
1419 | break; | ||
1420 | case 0x02: | ||
1421 | sprintf(buf+len, "EUI-64 based\n"); | ||
1422 | break; | ||
1423 | case 0x03: | ||
1424 | sprintf(buf+len, "NAA\n"); | ||
1425 | break; | ||
1426 | case 0x04: | ||
1427 | sprintf(buf+len, "Relative target port identifier\n"); | ||
1428 | break; | ||
1429 | case 0x08: | ||
1430 | sprintf(buf+len, "SCSI name string\n"); | ||
1431 | break; | ||
1432 | default: | ||
1433 | sprintf(buf+len, "Unsupported: 0x%02x\n", | ||
1434 | vpd->device_identifier_type); | ||
1435 | ret = -1; | ||
1436 | break; | ||
1437 | } | ||
1438 | |||
1439 | if (p_buf) | ||
1440 | strncpy(p_buf, buf, p_buf_len); | ||
1441 | else | ||
1442 | printk("%s", buf); | ||
1443 | |||
1444 | return ret; | ||
1445 | } | ||
1446 | |||
1447 | int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83) | ||
1448 | { | ||
1449 | /* | ||
1450 | * The VPD identifier type.. | ||
1451 | * | ||
1452 | * from spc3r23.pdf Section 7.6.3.1 Table 298 | ||
1453 | */ | ||
1454 | vpd->device_identifier_type = (page_83[1] & 0x0f); | ||
1455 | return transport_dump_vpd_ident_type(vpd, NULL, 0); | ||
1456 | } | ||
1457 | EXPORT_SYMBOL(transport_set_vpd_ident_type); | ||
1458 | |||
1459 | int transport_dump_vpd_ident( | ||
1460 | struct t10_vpd *vpd, | ||
1461 | unsigned char *p_buf, | ||
1462 | int p_buf_len) | ||
1463 | { | ||
1464 | unsigned char buf[VPD_TMP_BUF_SIZE]; | ||
1465 | int ret = 0; | ||
1466 | |||
1467 | memset(buf, 0, VPD_TMP_BUF_SIZE); | ||
1468 | |||
1469 | switch (vpd->device_identifier_code_set) { | ||
1470 | case 0x01: /* Binary */ | ||
1471 | sprintf(buf, "T10 VPD Binary Device Identifier: %s\n", | ||
1472 | &vpd->device_identifier[0]); | ||
1473 | break; | ||
1474 | case 0x02: /* ASCII */ | ||
1475 | sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n", | ||
1476 | &vpd->device_identifier[0]); | ||
1477 | break; | ||
1478 | case 0x03: /* UTF-8 */ | ||
1479 | sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n", | ||
1480 | &vpd->device_identifier[0]); | ||
1481 | break; | ||
1482 | default: | ||
1483 | sprintf(buf, "T10 VPD Device Identifier encoding unsupported:" | ||
1484 | " 0x%02x", vpd->device_identifier_code_set); | ||
1485 | ret = -1; | ||
1486 | break; | ||
1487 | } | ||
1488 | |||
1489 | if (p_buf) | ||
1490 | strncpy(p_buf, buf, p_buf_len); | ||
1491 | else | ||
1492 | printk("%s", buf); | ||
1493 | |||
1494 | return ret; | ||
1495 | } | ||
1496 | |||
1497 | int | ||
1498 | transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83) | ||
1499 | { | ||
1500 | static const char hex_str[] = "0123456789abcdef"; | ||
1501 | int j = 0, i = 4; /* offset to start of the identifer */ | ||
1502 | |||
1503 | /* | ||
1504 | * The VPD Code Set (encoding) | ||
1505 | * | ||
1506 | * from spc3r23.pdf Section 7.6.3.1 Table 296 | ||
1507 | */ | ||
1508 | vpd->device_identifier_code_set = (page_83[0] & 0x0f); | ||
1509 | switch (vpd->device_identifier_code_set) { | ||
1510 | case 0x01: /* Binary */ | ||
1511 | vpd->device_identifier[j++] = | ||
1512 | hex_str[vpd->device_identifier_type]; | ||
1513 | while (i < (4 + page_83[3])) { | ||
1514 | vpd->device_identifier[j++] = | ||
1515 | hex_str[(page_83[i] & 0xf0) >> 4]; | ||
1516 | vpd->device_identifier[j++] = | ||
1517 | hex_str[page_83[i] & 0x0f]; | ||
1518 | i++; | ||
1519 | } | ||
1520 | break; | ||
1521 | case 0x02: /* ASCII */ | ||
1522 | case 0x03: /* UTF-8 */ | ||
1523 | while (i < (4 + page_83[3])) | ||
1524 | vpd->device_identifier[j++] = page_83[i++]; | ||
1525 | break; | ||
1526 | default: | ||
1527 | break; | ||
1528 | } | ||
1529 | |||
1530 | return transport_dump_vpd_ident(vpd, NULL, 0); | ||
1531 | } | ||
1532 | EXPORT_SYMBOL(transport_set_vpd_ident); | ||
1533 | |||
1534 | static void core_setup_task_attr_emulation(struct se_device *dev) | ||
1535 | { | ||
1536 | /* | ||
1537 | * If this device is from Target_Core_Mod/pSCSI, disable the | ||
1538 | * SAM Task Attribute emulation. | ||
1539 | * | ||
1540 | * This is currently not available in upsream Linux/SCSI Target | ||
1541 | * mode code, and is assumed to be disabled while using TCM/pSCSI. | ||
1542 | */ | ||
1543 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { | ||
1544 | dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH; | ||
1545 | return; | ||
1546 | } | ||
1547 | |||
1548 | dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED; | ||
1549 | DEBUG_STA("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x" | ||
1550 | " device\n", TRANSPORT(dev)->name, | ||
1551 | TRANSPORT(dev)->get_device_rev(dev)); | ||
1552 | } | ||
1553 | |||
1554 | static void scsi_dump_inquiry(struct se_device *dev) | ||
1555 | { | ||
1556 | struct t10_wwn *wwn = DEV_T10_WWN(dev); | ||
1557 | int i, device_type; | ||
1558 | /* | ||
1559 | * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer | ||
1560 | */ | ||
1561 | printk(" Vendor: "); | ||
1562 | for (i = 0; i < 8; i++) | ||
1563 | if (wwn->vendor[i] >= 0x20) | ||
1564 | printk("%c", wwn->vendor[i]); | ||
1565 | else | ||
1566 | printk(" "); | ||
1567 | |||
1568 | printk(" Model: "); | ||
1569 | for (i = 0; i < 16; i++) | ||
1570 | if (wwn->model[i] >= 0x20) | ||
1571 | printk("%c", wwn->model[i]); | ||
1572 | else | ||
1573 | printk(" "); | ||
1574 | |||
1575 | printk(" Revision: "); | ||
1576 | for (i = 0; i < 4; i++) | ||
1577 | if (wwn->revision[i] >= 0x20) | ||
1578 | printk("%c", wwn->revision[i]); | ||
1579 | else | ||
1580 | printk(" "); | ||
1581 | |||
1582 | printk("\n"); | ||
1583 | |||
1584 | device_type = TRANSPORT(dev)->get_device_type(dev); | ||
1585 | printk(" Type: %s ", scsi_device_type(device_type)); | ||
1586 | printk(" ANSI SCSI revision: %02x\n", | ||
1587 | TRANSPORT(dev)->get_device_rev(dev)); | ||
1588 | } | ||
1589 | |||
1590 | struct se_device *transport_add_device_to_core_hba( | ||
1591 | struct se_hba *hba, | ||
1592 | struct se_subsystem_api *transport, | ||
1593 | struct se_subsystem_dev *se_dev, | ||
1594 | u32 device_flags, | ||
1595 | void *transport_dev, | ||
1596 | struct se_dev_limits *dev_limits, | ||
1597 | const char *inquiry_prod, | ||
1598 | const char *inquiry_rev) | ||
1599 | { | ||
1600 | int ret = 0, force_pt; | ||
1601 | struct se_device *dev; | ||
1602 | |||
1603 | dev = kzalloc(sizeof(struct se_device), GFP_KERNEL); | ||
1604 | if (!(dev)) { | ||
1605 | printk(KERN_ERR "Unable to allocate memory for se_dev_t\n"); | ||
1606 | return NULL; | ||
1607 | } | ||
1608 | dev->dev_queue_obj = kzalloc(sizeof(struct se_queue_obj), GFP_KERNEL); | ||
1609 | if (!(dev->dev_queue_obj)) { | ||
1610 | printk(KERN_ERR "Unable to allocate memory for" | ||
1611 | " dev->dev_queue_obj\n"); | ||
1612 | kfree(dev); | ||
1613 | return NULL; | ||
1614 | } | ||
1615 | transport_init_queue_obj(dev->dev_queue_obj); | ||
1616 | |||
1617 | dev->dev_status_queue_obj = kzalloc(sizeof(struct se_queue_obj), | ||
1618 | GFP_KERNEL); | ||
1619 | if (!(dev->dev_status_queue_obj)) { | ||
1620 | printk(KERN_ERR "Unable to allocate memory for" | ||
1621 | " dev->dev_status_queue_obj\n"); | ||
1622 | kfree(dev->dev_queue_obj); | ||
1623 | kfree(dev); | ||
1624 | return NULL; | ||
1625 | } | ||
1626 | transport_init_queue_obj(dev->dev_status_queue_obj); | ||
1627 | |||
1628 | dev->dev_flags = device_flags; | ||
1629 | dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED; | ||
1630 | dev->dev_ptr = (void *) transport_dev; | ||
1631 | dev->se_hba = hba; | ||
1632 | dev->se_sub_dev = se_dev; | ||
1633 | dev->transport = transport; | ||
1634 | atomic_set(&dev->active_cmds, 0); | ||
1635 | INIT_LIST_HEAD(&dev->dev_list); | ||
1636 | INIT_LIST_HEAD(&dev->dev_sep_list); | ||
1637 | INIT_LIST_HEAD(&dev->dev_tmr_list); | ||
1638 | INIT_LIST_HEAD(&dev->execute_task_list); | ||
1639 | INIT_LIST_HEAD(&dev->delayed_cmd_list); | ||
1640 | INIT_LIST_HEAD(&dev->ordered_cmd_list); | ||
1641 | INIT_LIST_HEAD(&dev->state_task_list); | ||
1642 | spin_lock_init(&dev->execute_task_lock); | ||
1643 | spin_lock_init(&dev->delayed_cmd_lock); | ||
1644 | spin_lock_init(&dev->ordered_cmd_lock); | ||
1645 | spin_lock_init(&dev->state_task_lock); | ||
1646 | spin_lock_init(&dev->dev_alua_lock); | ||
1647 | spin_lock_init(&dev->dev_reservation_lock); | ||
1648 | spin_lock_init(&dev->dev_status_lock); | ||
1649 | spin_lock_init(&dev->dev_status_thr_lock); | ||
1650 | spin_lock_init(&dev->se_port_lock); | ||
1651 | spin_lock_init(&dev->se_tmr_lock); | ||
1652 | |||
1653 | dev->queue_depth = dev_limits->queue_depth; | ||
1654 | atomic_set(&dev->depth_left, dev->queue_depth); | ||
1655 | atomic_set(&dev->dev_ordered_id, 0); | ||
1656 | |||
1657 | se_dev_set_default_attribs(dev, dev_limits); | ||
1658 | |||
1659 | dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX); | ||
1660 | dev->creation_time = get_jiffies_64(); | ||
1661 | spin_lock_init(&dev->stats_lock); | ||
1662 | |||
1663 | spin_lock(&hba->device_lock); | ||
1664 | list_add_tail(&dev->dev_list, &hba->hba_dev_list); | ||
1665 | hba->dev_count++; | ||
1666 | spin_unlock(&hba->device_lock); | ||
1667 | /* | ||
1668 | * Setup the SAM Task Attribute emulation for struct se_device | ||
1669 | */ | ||
1670 | core_setup_task_attr_emulation(dev); | ||
1671 | /* | ||
1672 | * Force PR and ALUA passthrough emulation with internal object use. | ||
1673 | */ | ||
1674 | force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE); | ||
1675 | /* | ||
1676 | * Setup the Reservations infrastructure for struct se_device | ||
1677 | */ | ||
1678 | core_setup_reservations(dev, force_pt); | ||
1679 | /* | ||
1680 | * Setup the Asymmetric Logical Unit Assignment for struct se_device | ||
1681 | */ | ||
1682 | if (core_setup_alua(dev, force_pt) < 0) | ||
1683 | goto out; | ||
1684 | |||
1685 | /* | ||
1686 | * Startup the struct se_device processing thread | ||
1687 | */ | ||
1688 | dev->process_thread = kthread_run(transport_processing_thread, dev, | ||
1689 | "LIO_%s", TRANSPORT(dev)->name); | ||
1690 | if (IS_ERR(dev->process_thread)) { | ||
1691 | printk(KERN_ERR "Unable to create kthread: LIO_%s\n", | ||
1692 | TRANSPORT(dev)->name); | ||
1693 | goto out; | ||
1694 | } | ||
1695 | |||
1696 | /* | ||
1697 | * Preload the initial INQUIRY const values if we are doing | ||
1698 | * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI | ||
1699 | * passthrough because this is being provided by the backend LLD. | ||
1700 | * This is required so that transport_get_inquiry() copies these | ||
1701 | * originals once back into DEV_T10_WWN(dev) for the virtual device | ||
1702 | * setup. | ||
1703 | */ | ||
1704 | if (TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { | ||
1705 | if (!(inquiry_prod) || !(inquiry_prod)) { | ||
1706 | printk(KERN_ERR "All non TCM/pSCSI plugins require" | ||
1707 | " INQUIRY consts\n"); | ||
1708 | goto out; | ||
1709 | } | ||
1710 | |||
1711 | strncpy(&DEV_T10_WWN(dev)->vendor[0], "LIO-ORG", 8); | ||
1712 | strncpy(&DEV_T10_WWN(dev)->model[0], inquiry_prod, 16); | ||
1713 | strncpy(&DEV_T10_WWN(dev)->revision[0], inquiry_rev, 4); | ||
1714 | } | ||
1715 | scsi_dump_inquiry(dev); | ||
1716 | |||
1717 | out: | ||
1718 | if (!ret) | ||
1719 | return dev; | ||
1720 | kthread_stop(dev->process_thread); | ||
1721 | |||
1722 | spin_lock(&hba->device_lock); | ||
1723 | list_del(&dev->dev_list); | ||
1724 | hba->dev_count--; | ||
1725 | spin_unlock(&hba->device_lock); | ||
1726 | |||
1727 | se_release_vpd_for_dev(dev); | ||
1728 | |||
1729 | kfree(dev->dev_status_queue_obj); | ||
1730 | kfree(dev->dev_queue_obj); | ||
1731 | kfree(dev); | ||
1732 | |||
1733 | return NULL; | ||
1734 | } | ||
1735 | EXPORT_SYMBOL(transport_add_device_to_core_hba); | ||
1736 | |||
1737 | /* transport_generic_prepare_cdb(): | ||
1738 | * | ||
1739 | * Since the Initiator sees iSCSI devices as LUNs, the SCSI CDB will | ||
1740 | * contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2. | ||
1741 | * The point of this is since we are mapping iSCSI LUNs to | ||
1742 | * SCSI Target IDs having a non-zero LUN in the CDB will throw the | ||
1743 | * devices and HBAs for a loop. | ||
1744 | */ | ||
1745 | static inline void transport_generic_prepare_cdb( | ||
1746 | unsigned char *cdb) | ||
1747 | { | ||
1748 | switch (cdb[0]) { | ||
1749 | case READ_10: /* SBC - RDProtect */ | ||
1750 | case READ_12: /* SBC - RDProtect */ | ||
1751 | case READ_16: /* SBC - RDProtect */ | ||
1752 | case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */ | ||
1753 | case VERIFY: /* SBC - VRProtect */ | ||
1754 | case VERIFY_16: /* SBC - VRProtect */ | ||
1755 | case WRITE_VERIFY: /* SBC - VRProtect */ | ||
1756 | case WRITE_VERIFY_12: /* SBC - VRProtect */ | ||
1757 | break; | ||
1758 | default: | ||
1759 | cdb[1] &= 0x1f; /* clear logical unit number */ | ||
1760 | break; | ||
1761 | } | ||
1762 | } | ||
1763 | |||
1764 | static struct se_task * | ||
1765 | transport_generic_get_task(struct se_cmd *cmd, | ||
1766 | enum dma_data_direction data_direction) | ||
1767 | { | ||
1768 | struct se_task *task; | ||
1769 | struct se_device *dev = SE_DEV(cmd); | ||
1770 | unsigned long flags; | ||
1771 | |||
1772 | task = dev->transport->alloc_task(cmd); | ||
1773 | if (!task) { | ||
1774 | printk(KERN_ERR "Unable to allocate struct se_task\n"); | ||
1775 | return NULL; | ||
1776 | } | ||
1777 | |||
1778 | INIT_LIST_HEAD(&task->t_list); | ||
1779 | INIT_LIST_HEAD(&task->t_execute_list); | ||
1780 | INIT_LIST_HEAD(&task->t_state_list); | ||
1781 | init_completion(&task->task_stop_comp); | ||
1782 | task->task_no = T_TASK(cmd)->t_tasks_no++; | ||
1783 | task->task_se_cmd = cmd; | ||
1784 | task->se_dev = dev; | ||
1785 | task->task_data_direction = data_direction; | ||
1786 | |||
1787 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | ||
1788 | list_add_tail(&task->t_list, &T_TASK(cmd)->t_task_list); | ||
1789 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | ||
1790 | |||
1791 | return task; | ||
1792 | } | ||
1793 | |||
1794 | static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *); | ||
1795 | |||
1796 | void transport_device_setup_cmd(struct se_cmd *cmd) | ||
1797 | { | ||
1798 | cmd->se_dev = SE_LUN(cmd)->lun_se_dev; | ||
1799 | } | ||
1800 | EXPORT_SYMBOL(transport_device_setup_cmd); | ||
1801 | |||
1802 | /* | ||
1803 | * Used by fabric modules containing a local struct se_cmd within their | ||
1804 | * fabric dependent per I/O descriptor. | ||
1805 | */ | ||
1806 | void transport_init_se_cmd( | ||
1807 | struct se_cmd *cmd, | ||
1808 | struct target_core_fabric_ops *tfo, | ||
1809 | struct se_session *se_sess, | ||
1810 | u32 data_length, | ||
1811 | int data_direction, | ||
1812 | int task_attr, | ||
1813 | unsigned char *sense_buffer) | ||
1814 | { | ||
1815 | INIT_LIST_HEAD(&cmd->se_lun_list); | ||
1816 | INIT_LIST_HEAD(&cmd->se_delayed_list); | ||
1817 | INIT_LIST_HEAD(&cmd->se_ordered_list); | ||
1818 | /* | ||
1819 | * Setup t_task pointer to t_task_backstore | ||
1820 | */ | ||
1821 | cmd->t_task = &cmd->t_task_backstore; | ||
1822 | |||
1823 | INIT_LIST_HEAD(&T_TASK(cmd)->t_task_list); | ||
1824 | init_completion(&T_TASK(cmd)->transport_lun_fe_stop_comp); | ||
1825 | init_completion(&T_TASK(cmd)->transport_lun_stop_comp); | ||
1826 | init_completion(&T_TASK(cmd)->t_transport_stop_comp); | ||
1827 | spin_lock_init(&T_TASK(cmd)->t_state_lock); | ||
1828 | atomic_set(&T_TASK(cmd)->transport_dev_active, 1); | ||
1829 | |||
1830 | cmd->se_tfo = tfo; | ||
1831 | cmd->se_sess = se_sess; | ||
1832 | cmd->data_length = data_length; | ||
1833 | cmd->data_direction = data_direction; | ||
1834 | cmd->sam_task_attr = task_attr; | ||
1835 | cmd->sense_buffer = sense_buffer; | ||
1836 | } | ||
1837 | EXPORT_SYMBOL(transport_init_se_cmd); | ||
1838 | |||
1839 | static int transport_check_alloc_task_attr(struct se_cmd *cmd) | ||
1840 | { | ||
1841 | /* | ||
1842 | * Check if SAM Task Attribute emulation is enabled for this | ||
1843 | * struct se_device storage object | ||
1844 | */ | ||
1845 | if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) | ||
1846 | return 0; | ||
1847 | |||
1848 | if (cmd->sam_task_attr == TASK_ATTR_ACA) { | ||
1849 | DEBUG_STA("SAM Task Attribute ACA" | ||
1850 | " emulation is not supported\n"); | ||
1851 | return -1; | ||
1852 | } | ||
1853 | /* | ||
1854 | * Used to determine when ORDERED commands should go from | ||
1855 | * Dormant to Active status. | ||
1856 | */ | ||
1857 | cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id); | ||
1858 | smp_mb__after_atomic_inc(); | ||
1859 | DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", | ||
1860 | cmd->se_ordered_id, cmd->sam_task_attr, | ||
1861 | TRANSPORT(cmd->se_dev)->name); | ||
1862 | return 0; | ||
1863 | } | ||
1864 | |||
1865 | void transport_free_se_cmd( | ||
1866 | struct se_cmd *se_cmd) | ||
1867 | { | ||
1868 | if (se_cmd->se_tmr_req) | ||
1869 | core_tmr_release_req(se_cmd->se_tmr_req); | ||
1870 | /* | ||
1871 | * Check and free any extended CDB buffer that was allocated | ||
1872 | */ | ||
1873 | if (T_TASK(se_cmd)->t_task_cdb != T_TASK(se_cmd)->__t_task_cdb) | ||
1874 | kfree(T_TASK(se_cmd)->t_task_cdb); | ||
1875 | } | ||
1876 | EXPORT_SYMBOL(transport_free_se_cmd); | ||
1877 | |||
1878 | static void transport_generic_wait_for_tasks(struct se_cmd *, int, int); | ||
1879 | |||
1880 | /* transport_generic_allocate_tasks(): | ||
1881 | * | ||
1882 | * Called from fabric RX Thread. | ||
1883 | */ | ||
1884 | int transport_generic_allocate_tasks( | ||
1885 | struct se_cmd *cmd, | ||
1886 | unsigned char *cdb) | ||
1887 | { | ||
1888 | int ret; | ||
1889 | |||
1890 | transport_generic_prepare_cdb(cdb); | ||
1891 | |||
1892 | /* | ||
1893 | * This is needed for early exceptions. | ||
1894 | */ | ||
1895 | cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks; | ||
1896 | |||
1897 | transport_device_setup_cmd(cmd); | ||
1898 | /* | ||
1899 | * Ensure that the received CDB is less than the max (252 + 8) bytes | ||
1900 | * for VARIABLE_LENGTH_CMD | ||
1901 | */ | ||
1902 | if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) { | ||
1903 | printk(KERN_ERR "Received SCSI CDB with command_size: %d that" | ||
1904 | " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", | ||
1905 | scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); | ||
1906 | return -1; | ||
1907 | } | ||
1908 | /* | ||
1909 | * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, | ||
1910 | * allocate the additional extended CDB buffer now.. Otherwise | ||
1911 | * setup the pointer from __t_task_cdb to t_task_cdb. | ||
1912 | */ | ||
1913 | if (scsi_command_size(cdb) > sizeof(T_TASK(cmd)->__t_task_cdb)) { | ||
1914 | T_TASK(cmd)->t_task_cdb = kzalloc(scsi_command_size(cdb), | ||
1915 | GFP_KERNEL); | ||
1916 | if (!(T_TASK(cmd)->t_task_cdb)) { | ||
1917 | printk(KERN_ERR "Unable to allocate T_TASK(cmd)->t_task_cdb" | ||
1918 | " %u > sizeof(T_TASK(cmd)->__t_task_cdb): %lu ops\n", | ||
1919 | scsi_command_size(cdb), | ||
1920 | (unsigned long)sizeof(T_TASK(cmd)->__t_task_cdb)); | ||
1921 | return -1; | ||
1922 | } | ||
1923 | } else | ||
1924 | T_TASK(cmd)->t_task_cdb = &T_TASK(cmd)->__t_task_cdb[0]; | ||
1925 | /* | ||
1926 | * Copy the original CDB into T_TASK(cmd). | ||
1927 | */ | ||
1928 | memcpy(T_TASK(cmd)->t_task_cdb, cdb, scsi_command_size(cdb)); | ||
1929 | /* | ||
1930 | * Setup the received CDB based on SCSI defined opcodes and | ||
1931 | * perform unit attention, persistent reservations and ALUA | ||
1932 | * checks for virtual device backends. The T_TASK(cmd)->t_task_cdb | ||
1933 | * pointer is expected to be setup before we reach this point. | ||
1934 | */ | ||
1935 | ret = transport_generic_cmd_sequencer(cmd, cdb); | ||
1936 | if (ret < 0) | ||
1937 | return ret; | ||
1938 | /* | ||
1939 | * Check for SAM Task Attribute Emulation | ||
1940 | */ | ||
1941 | if (transport_check_alloc_task_attr(cmd) < 0) { | ||
1942 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | ||
1943 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; | ||
1944 | return -2; | ||
1945 | } | ||
1946 | spin_lock(&cmd->se_lun->lun_sep_lock); | ||
1947 | if (cmd->se_lun->lun_sep) | ||
1948 | cmd->se_lun->lun_sep->sep_stats.cmd_pdus++; | ||
1949 | spin_unlock(&cmd->se_lun->lun_sep_lock); | ||
1950 | return 0; | ||
1951 | } | ||
1952 | EXPORT_SYMBOL(transport_generic_allocate_tasks); | ||
1953 | |||
1954 | /* | ||
1955 | * Used by fabric module frontends not defining a TFO->new_cmd_map() | ||
1956 | * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD statis | ||
1957 | */ | ||
1958 | int transport_generic_handle_cdb( | ||
1959 | struct se_cmd *cmd) | ||
1960 | { | ||
1961 | if (!SE_LUN(cmd)) { | ||
1962 | dump_stack(); | ||
1963 | printk(KERN_ERR "SE_LUN(cmd) is NULL\n"); | ||
1964 | return -1; | ||
1965 | } | ||
1966 | |||
1967 | transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD); | ||
1968 | return 0; | ||
1969 | } | ||
1970 | EXPORT_SYMBOL(transport_generic_handle_cdb); | ||
1971 | |||
1972 | /* | ||
1973 | * Used by fabric module frontends defining a TFO->new_cmd_map() caller | ||
1974 | * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to | ||
1975 | * complete setup in TCM process context w/ TFO->new_cmd_map(). | ||
1976 | */ | ||
1977 | int transport_generic_handle_cdb_map( | ||
1978 | struct se_cmd *cmd) | ||
1979 | { | ||
1980 | if (!SE_LUN(cmd)) { | ||
1981 | dump_stack(); | ||
1982 | printk(KERN_ERR "SE_LUN(cmd) is NULL\n"); | ||
1983 | return -1; | ||
1984 | } | ||
1985 | |||
1986 | transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP); | ||
1987 | return 0; | ||
1988 | } | ||
1989 | EXPORT_SYMBOL(transport_generic_handle_cdb_map); | ||
1990 | |||
1991 | /* transport_generic_handle_data(): | ||
1992 | * | ||
1993 | * | ||
1994 | */ | ||
1995 | int transport_generic_handle_data( | ||
1996 | struct se_cmd *cmd) | ||
1997 | { | ||
1998 | /* | ||
1999 | * For the software fabric case, then we assume the nexus is being | ||
2000 | * failed/shutdown when signals are pending from the kthread context | ||
2001 | * caller, so we return a failure. For the HW target mode case running | ||
2002 | * in interrupt code, the signal_pending() check is skipped. | ||
2003 | */ | ||
2004 | if (!in_interrupt() && signal_pending(current)) | ||
2005 | return -1; | ||
2006 | /* | ||
2007 | * If the received CDB has aleady been ABORTED by the generic | ||
2008 | * target engine, we now call transport_check_aborted_status() | ||
2009 | * to queue any delated TASK_ABORTED status for the received CDB to the | ||
2010 | * fabric module as we are expecting no futher incoming DATA OUT | ||
2011 | * sequences at this point. | ||
2012 | */ | ||
2013 | if (transport_check_aborted_status(cmd, 1) != 0) | ||
2014 | return 0; | ||
2015 | |||
2016 | transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE); | ||
2017 | return 0; | ||
2018 | } | ||
2019 | EXPORT_SYMBOL(transport_generic_handle_data); | ||
2020 | |||
2021 | /* transport_generic_handle_tmr(): | ||
2022 | * | ||
2023 | * | ||
2024 | */ | ||
2025 | int transport_generic_handle_tmr( | ||
2026 | struct se_cmd *cmd) | ||
2027 | { | ||
2028 | /* | ||
2029 | * This is needed for early exceptions. | ||
2030 | */ | ||
2031 | cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks; | ||
2032 | transport_device_setup_cmd(cmd); | ||
2033 | |||
2034 | transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR); | ||
2035 | return 0; | ||
2036 | } | ||
2037 | EXPORT_SYMBOL(transport_generic_handle_tmr); | ||
2038 | |||
2039 | static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) | ||
2040 | { | ||
2041 | struct se_task *task, *task_tmp; | ||
2042 | unsigned long flags; | ||
2043 | int ret = 0; | ||
2044 | |||
2045 | DEBUG_TS("ITT[0x%08x] - Stopping tasks\n", | ||
2046 | CMD_TFO(cmd)->get_task_tag(cmd)); | ||
2047 | |||
2048 | /* | ||
2049 | * No tasks remain in the execution queue | ||
2050 | */ | ||
2051 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | ||
2052 | list_for_each_entry_safe(task, task_tmp, | ||
2053 | &T_TASK(cmd)->t_task_list, t_list) { | ||
2054 | DEBUG_TS("task_no[%d] - Processing task %p\n", | ||
2055 | task->task_no, task); | ||
2056 | /* | ||
2057 | * If the struct se_task has not been sent and is not active, | ||
2058 | * remove the struct se_task from the execution queue. | ||
2059 | */ | ||
2060 | if (!atomic_read(&task->task_sent) && | ||
2061 | !atomic_read(&task->task_active)) { | ||
2062 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, | ||
2063 | flags); | ||
2064 | transport_remove_task_from_execute_queue(task, | ||
2065 | task->se_dev); | ||
2066 | |||
2067 | DEBUG_TS("task_no[%d] - Removed from execute queue\n", | ||
2068 | task->task_no); | ||
2069 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | ||
2070 | continue; | ||
2071 | } | ||
2072 | |||
2073 | /* | ||
2074 | * If the struct se_task is active, sleep until it is returned | ||
2075 | * from the plugin. | ||
2076 | */ | ||
2077 | if (atomic_read(&task->task_active)) { | ||
2078 | atomic_set(&task->task_stop, 1); | ||
2079 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, | ||
2080 | flags); | ||
2081 | |||
2082 | DEBUG_TS("task_no[%d] - Waiting to complete\n", | ||
2083 | task->task_no); | ||
2084 | wait_for_completion(&task->task_stop_comp); | ||
2085 | DEBUG_TS("task_no[%d] - Stopped successfully\n", | ||
2086 | task->task_no); | ||
2087 | |||
2088 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | ||
2089 | atomic_dec(&T_TASK(cmd)->t_task_cdbs_left); | ||
2090 | |||
2091 | atomic_set(&task->task_active, 0); | ||
2092 | atomic_set(&task->task_stop, 0); | ||
2093 | } else { | ||
2094 | DEBUG_TS("task_no[%d] - Did nothing\n", task->task_no); | ||
2095 | ret++; | ||
2096 | } | ||
2097 | |||
2098 | __transport_stop_task_timer(task, &flags); | ||
2099 | } | ||
2100 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | ||
2101 | |||
2102 | return ret; | ||
2103 | } | ||
2104 | |||
2105 | static void transport_failure_reset_queue_depth(struct se_device *dev) | ||
2106 | { | ||
2107 | unsigned long flags; | ||
2108 | |||
2109 | spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags);; | ||
2110 | atomic_inc(&dev->depth_left); | ||
2111 | atomic_inc(&SE_HBA(dev)->left_queue_depth); | ||
2112 | spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); | ||
2113 | } | ||
2114 | |||
2115 | /* | ||
2116 | * Handle SAM-esque emulation for generic transport request failures. | ||
2117 | */ | ||
2118 | static void transport_generic_request_failure( | ||
2119 | struct se_cmd *cmd, | ||
2120 | struct se_device *dev, | ||
2121 | int complete, | ||
2122 | int sc) | ||
2123 | { | ||
2124 | DEBUG_GRF("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" | ||
2125 | " CDB: 0x%02x\n", cmd, CMD_TFO(cmd)->get_task_tag(cmd), | ||
2126 | T_TASK(cmd)->t_task_cdb[0]); | ||
2127 | DEBUG_GRF("-----[ i_state: %d t_state/def_t_state:" | ||
2128 | " %d/%d transport_error_status: %d\n", | ||
2129 | CMD_TFO(cmd)->get_cmd_state(cmd), | ||
2130 | cmd->t_state, cmd->deferred_t_state, | ||
2131 | cmd->transport_error_status); | ||
2132 | DEBUG_GRF("-----[ t_task_cdbs: %d t_task_cdbs_left: %d" | ||
2133 | " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --" | ||
2134 | " t_transport_active: %d t_transport_stop: %d" | ||
2135 | " t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs, | ||
2136 | atomic_read(&T_TASK(cmd)->t_task_cdbs_left), | ||
2137 | atomic_read(&T_TASK(cmd)->t_task_cdbs_sent), | ||
2138 | atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left), | ||
2139 | atomic_read(&T_TASK(cmd)->t_transport_active), | ||
2140 | atomic_read(&T_TASK(cmd)->t_transport_stop), | ||
2141 | atomic_read(&T_TASK(cmd)->t_transport_sent)); | ||
2142 | |||
2143 | transport_stop_all_task_timers(cmd); | ||
2144 | |||
2145 | if (dev) | ||
2146 | transport_failure_reset_queue_depth(dev); | ||
2147 | /* | ||
2148 | * For SAM Task Attribute emulation for failed struct se_cmd | ||
2149 | */ | ||
2150 | if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) | ||
2151 | transport_complete_task_attr(cmd); | ||
2152 | |||
2153 | if (complete) { | ||
2154 | transport_direct_request_timeout(cmd); | ||
2155 | cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE; | ||
2156 | } | ||
2157 | |||
2158 | switch (cmd->transport_error_status) { | ||
2159 | case PYX_TRANSPORT_UNKNOWN_SAM_OPCODE: | ||
2160 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; | ||
2161 | break; | ||
2162 | case PYX_TRANSPORT_REQ_TOO_MANY_SECTORS: | ||
2163 | cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY; | ||
2164 | break; | ||
2165 | case PYX_TRANSPORT_INVALID_CDB_FIELD: | ||
2166 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; | ||
2167 | break; | ||
2168 | case PYX_TRANSPORT_INVALID_PARAMETER_LIST: | ||
2169 | cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; | ||
2170 | break; | ||
2171 | case PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES: | ||
2172 | if (!sc) | ||
2173 | transport_new_cmd_failure(cmd); | ||
2174 | /* | ||
2175 | * Currently for PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES, | ||
2176 | * we force this session to fall back to session | ||
2177 | * recovery. | ||
2178 | */ | ||
2179 | CMD_TFO(cmd)->fall_back_to_erl0(cmd->se_sess); | ||
2180 | CMD_TFO(cmd)->stop_session(cmd->se_sess, 0, 0); | ||
2181 | |||
2182 | goto check_stop; | ||
2183 | case PYX_TRANSPORT_LU_COMM_FAILURE: | ||
2184 | case PYX_TRANSPORT_ILLEGAL_REQUEST: | ||
2185 | cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | ||
2186 | break; | ||
2187 | case PYX_TRANSPORT_UNKNOWN_MODE_PAGE: | ||
2188 | cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE; | ||
2189 | break; | ||
2190 | case PYX_TRANSPORT_WRITE_PROTECTED: | ||
2191 | cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; | ||
2192 | break; | ||
2193 | case PYX_TRANSPORT_RESERVATION_CONFLICT: | ||
2194 | /* | ||
2195 | * No SENSE Data payload for this case, set SCSI Status | ||
2196 | * and queue the response to $FABRIC_MOD. | ||
2197 | * | ||
2198 | * Uses linux/include/scsi/scsi.h SAM status codes defs | ||
2199 | */ | ||
2200 | cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; | ||
2201 | /* | ||
2202 | * For UA Interlock Code 11b, a RESERVATION CONFLICT will | ||
2203 | * establish a UNIT ATTENTION with PREVIOUS RESERVATION | ||
2204 | * CONFLICT STATUS. | ||
2205 | * | ||
2206 | * See spc4r17, section 7.4.6 Control Mode Page, Table 349 | ||
2207 | */ | ||
2208 | if (SE_SESS(cmd) && | ||
2209 | DEV_ATTRIB(cmd->se_dev)->emulate_ua_intlck_ctrl == 2) | ||
2210 | core_scsi3_ua_allocate(SE_SESS(cmd)->se_node_acl, | ||
2211 | cmd->orig_fe_lun, 0x2C, | ||
2212 | ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); | ||
2213 | |||
2214 | CMD_TFO(cmd)->queue_status(cmd); | ||
2215 | goto check_stop; | ||
2216 | case PYX_TRANSPORT_USE_SENSE_REASON: | ||
2217 | /* | ||
2218 | * struct se_cmd->scsi_sense_reason already set | ||
2219 | */ | ||
2220 | break; | ||
2221 | default: | ||
2222 | printk(KERN_ERR "Unknown transport error for CDB 0x%02x: %d\n", | ||
2223 | T_TASK(cmd)->t_task_cdb[0], | ||
2224 | cmd->transport_error_status); | ||
2225 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; | ||
2226 | break; | ||
2227 | } | ||
2228 | |||
2229 | if (!sc) | ||
2230 | transport_new_cmd_failure(cmd); | ||
2231 | else | ||
2232 | transport_send_check_condition_and_sense(cmd, | ||
2233 | cmd->scsi_sense_reason, 0); | ||
2234 | check_stop: | ||
2235 | transport_lun_remove_cmd(cmd); | ||
2236 | if (!(transport_cmd_check_stop_to_fabric(cmd))) | ||
2237 | ; | ||
2238 | } | ||
2239 | |||
2240 | static void transport_direct_request_timeout(struct se_cmd *cmd) | ||
2241 | { | ||
2242 | unsigned long flags; | ||
2243 | |||
2244 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | ||
2245 | if (!(atomic_read(&T_TASK(cmd)->t_transport_timeout))) { | ||
2246 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | ||
2247 | return; | ||
2248 | } | ||
2249 | if (atomic_read(&T_TASK(cmd)->t_task_cdbs_timeout_left)) { | ||
2250 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | ||
2251 | return; | ||
2252 | } | ||
2253 | |||
2254 | atomic_sub(atomic_read(&T_TASK(cmd)->t_transport_timeout), | ||
2255 | &T_TASK(cmd)->t_se_count); | ||
2256 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | ||
2257 | } | ||
2258 | |||
2259 | static void transport_generic_request_timeout(struct se_cmd *cmd) | ||
2260 | { | ||
2261 | unsigned long flags; | ||
2262 | |||
2263 | /* | ||
2264 | * Reset T_TASK(cmd)->t_se_count to allow transport_generic_remove() | ||
2265 | * to allow last call to free memory resources. | ||
2266 | */ | ||
2267 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | ||
2268 | if (atomic_read(&T_TASK(cmd)->t_transport_timeout) > 1) { | ||
2269 | int tmp = (atomic_read(&T_TASK(cmd)->t_transport_timeout) - 1); | ||
2270 | |||
2271 | atomic_sub(tmp, &T_TASK(cmd)->t_se_count); | ||
2272 | } | ||
2273 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | ||
2274 | |||
2275 | transport_generic_remove(cmd, 0, 0); | ||
2276 | } | ||
2277 | |||
2278 | static int | ||
2279 | transport_generic_allocate_buf(struct se_cmd *cmd, u32 data_length) | ||
2280 | { | ||
2281 | unsigned char *buf; | ||
2282 | |||
2283 | buf = kzalloc(data_length, GFP_KERNEL); | ||
2284 | if (!(buf)) { | ||
2285 | printk(KERN_ERR "Unable to allocate memory for buffer\n"); | ||
2286 | return -1; | ||
2287 | } | ||
2288 | |||
2289 | T_TASK(cmd)->t_tasks_se_num = 0; | ||
2290 | T_TASK(cmd)->t_task_buf = buf; | ||
2291 | |||
2292 | return 0; | ||
2293 | } | ||
2294 | |||
2295 | static inline u32 transport_lba_21(unsigned char *cdb) | ||
2296 | { | ||
2297 | return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3]; | ||
2298 | } | ||
2299 | |||
2300 | static inline u32 transport_lba_32(unsigned char *cdb) | ||
2301 | { | ||
2302 | return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; | ||
2303 | } | ||
2304 | |||
2305 | static inline unsigned long long transport_lba_64(unsigned char *cdb) | ||
2306 | { | ||
2307 | unsigned int __v1, __v2; | ||
2308 | |||
2309 | __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; | ||
2310 | __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; | ||
2311 | |||
2312 | return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; | ||
2313 | } | ||
2314 | |||
2315 | /* | ||
2316 | * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs | ||
2317 | */ | ||
2318 | static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) | ||
2319 | { | ||
2320 | unsigned int __v1, __v2; | ||
2321 | |||
2322 | __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15]; | ||
2323 | __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19]; | ||
2324 | |||
2325 | return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; | ||
2326 | } | ||
2327 | |||
2328 | static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd) | ||
2329 | { | ||
2330 | unsigned long flags; | ||
2331 | |||
2332 | spin_lock_irqsave(&T_TASK(se_cmd)->t_state_lock, flags); | ||
2333 | se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; | ||
2334 | spin_unlock_irqrestore(&T_TASK(se_cmd)->t_state_lock, flags); | ||
2335 | } | ||
2336 | |||
2337 | /* | ||
2338 | * Called from interrupt context. | ||
2339 | */ | ||
2340 | static void transport_task_timeout_handler(unsigned long data) | ||
2341 | { | ||
2342 | struct se_task *task = (struct se_task *)data; | ||
2343 | struct se_cmd *cmd = TASK_CMD(task); | ||
2344 | unsigned long flags; | ||
2345 | |||
2346 | DEBUG_TT("transport task timeout fired! task: %p cmd: %p\n", task, cmd); | ||
2347 | |||
2348 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | ||
2349 | if (task->task_flags & TF_STOP) { | ||
2350 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | ||
2351 | return; | ||
2352 | } | ||
2353 | task->task_flags &= ~TF_RUNNING; | ||
2354 | |||
2355 | /* | ||
2356 | * Determine if transport_complete_task() has already been called. | ||
2357 | */ | ||
2358 | if (!(atomic_read(&task->task_active))) { | ||
2359 | DEBUG_TT("transport task: %p cmd: %p timeout task_active" | ||
2360 | " == 0\n", task, cmd); | ||
2361 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | ||
2362 | return; | ||
2363 | } | ||
2364 | |||
2365 | atomic_inc(&T_TASK(cmd)->t_se_count); | ||
2366 | atomic_inc(&T_TASK(cmd)->t_transport_timeout); | ||
2367 | T_TASK(cmd)->t_tasks_failed = 1; | ||
2368 | |||
2369 | atomic_set(&task->task_timeout, 1); | ||
2370 | task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT; | ||
2371 | task->task_scsi_status = 1; | ||
2372 | |||
2373 | if (atomic_read(&task->task_stop)) { | ||
2374 | DEBUG_TT("transport task: %p cmd: %p timeout task_stop" | ||
2375 | " == 1\n", task, cmd); | ||
2376 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | ||
2377 | complete(&task->task_stop_comp); | ||
2378 | return; | ||
2379 | } | ||
2380 | |||
2381 | if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_left))) { | ||
2382 | DEBUG_TT("transport task: %p cmd: %p timeout non zero" | ||
2383 | " t_task_cdbs_left\n", task, cmd); | ||
2384 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | ||
2385 | return; | ||
2386 | } | ||
2387 | DEBUG_TT("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n", | ||
2388 | task, cmd); | ||
2389 | |||
2390 | cmd->t_state = TRANSPORT_COMPLETE_FAILURE; | ||
2391 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | ||
2392 | |||
2393 | transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE); | ||
2394 | } | ||
2395 | |||
2396 | /* | ||
2397 | * Called with T_TASK(cmd)->t_state_lock held. | ||
2398 | */ | ||
2399 | static void transport_start_task_timer(struct se_task *task) | ||
2400 | { | ||
2401 | struct se_device *dev = task->se_dev; | ||
2402 | int timeout; | ||
2403 | |||
2404 | if (task->task_flags & TF_RUNNING) | ||
2405 | return; | ||
2406 | /* | ||
2407 | * If the task_timeout is disabled, exit now. | ||
2408 | */ | ||
2409 | timeout = DEV_ATTRIB(dev)->task_timeout; | ||
2410 | if (!(timeout)) | ||
2411 | return; | ||
2412 | |||
2413 | init_timer(&task->task_timer); | ||
2414 | task->task_timer.expires = (get_jiffies_64() + timeout * HZ); | ||
2415 | task->task_timer.data = (unsigned long) task; | ||
2416 | task->task_timer.function = transport_task_timeout_handler; | ||
2417 | |||
2418 | task->task_flags |= TF_RUNNING; | ||
2419 | add_timer(&task->task_timer); | ||
2420 | #if 0 | ||
2421 | printk(KERN_INFO "Starting task timer for cmd: %p task: %p seconds:" | ||
2422 | " %d\n", task->task_se_cmd, task, timeout); | ||
2423 | #endif | ||
2424 | } | ||
2425 | |||
2426 | /* | ||
2427 | * Called with spin_lock_irq(&T_TASK(cmd)->t_state_lock) held. | ||
2428 | */ | ||
2429 | void __transport_stop_task_timer(struct se_task *task, unsigned long *flags) | ||
2430 | { | ||
2431 | struct se_cmd *cmd = TASK_CMD(task); | ||
2432 | |||
2433 | if (!(task->task_flags & TF_RUNNING)) | ||
2434 | return; | ||
2435 | |||
2436 | task->task_flags |= TF_STOP; | ||
2437 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, *flags); | ||
2438 | |||
2439 | del_timer_sync(&task->task_timer); | ||
2440 | |||
2441 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, *flags); | ||
2442 | task->task_flags &= ~TF_RUNNING; | ||
2443 | task->task_flags &= ~TF_STOP; | ||
2444 | } | ||
2445 | |||
2446 | static void transport_stop_all_task_timers(struct se_cmd *cmd) | ||
2447 | { | ||
2448 | struct se_task *task = NULL, *task_tmp; | ||
2449 | unsigned long flags; | ||
2450 | |||
2451 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | ||
2452 | list_for_each_entry_safe(task, task_tmp, | ||
2453 | &T_TASK(cmd)->t_task_list, t_list) | ||
2454 | __transport_stop_task_timer(task, &flags); | ||
2455 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | ||
2456 | } | ||
2457 | |||
2458 | static inline int transport_tcq_window_closed(struct se_device *dev) | ||
2459 | { | ||
2460 | if (dev->dev_tcq_window_closed++ < | ||
2461 | PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD) { | ||
2462 | msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT); | ||
2463 | } else | ||
2464 | msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG); | ||
2465 | |||
2466 | wake_up_interruptible(&dev->dev_queue_obj->thread_wq); | ||
2467 | return 0; | ||
2468 | } | ||
2469 | |||
2470 | /* | ||
2471 | * Called from Fabric Module context from transport_execute_tasks() | ||
2472 | * | ||
2473 | * The return of this function determins if the tasks from struct se_cmd | ||
2474 | * get added to the execution queue in transport_execute_tasks(), | ||
2475 | * or are added to the delayed or ordered lists here. | ||
2476 | */ | ||
2477 | static inline int transport_execute_task_attr(struct se_cmd *cmd) | ||
2478 | { | ||
2479 | if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) | ||
2480 | return 1; | ||
2481 | /* | ||
2482 | * Check for the existance of HEAD_OF_QUEUE, and if true return 1 | ||
2483 | * to allow the passed struct se_cmd list of tasks to the front of the list. | ||
2484 | */ | ||
2485 | if (cmd->sam_task_attr == TASK_ATTR_HOQ) { | ||
2486 | atomic_inc(&SE_DEV(cmd)->dev_hoq_count); | ||
2487 | smp_mb__after_atomic_inc(); | ||
2488 | DEBUG_STA("Added HEAD_OF_QUEUE for CDB:" | ||
2489 | " 0x%02x, se_ordered_id: %u\n", | ||
2490 | T_TASK(cmd)->t_task_cdb[0], | ||
2491 | cmd->se_ordered_id); | ||
2492 | return 1; | ||
2493 | } else if (cmd->sam_task_attr == TASK_ATTR_ORDERED) { | ||
2494 | spin_lock(&SE_DEV(cmd)->ordered_cmd_lock); | ||
2495 | list_add_tail(&cmd->se_ordered_list, | ||
2496 | &SE_DEV(cmd)->ordered_cmd_list); | ||
2497 | spin_unlock(&SE_DEV(cmd)->ordered_cmd_lock); | ||
2498 | |||
2499 | atomic_inc(&SE_DEV(cmd)->dev_ordered_sync); | ||
2500 | smp_mb__after_atomic_inc(); | ||
2501 | |||
2502 | DEBUG_STA("Added ORDERED for CDB: 0x%02x to ordered" | ||
2503 | " list, se_ordered_id: %u\n", | ||
2504 | T_TASK(cmd)->t_task_cdb[0], | ||
2505 | cmd->se_ordered_id); | ||
2506 | /* | ||
2507 | * Add ORDERED command to tail of execution queue if | ||
2508 | * no other older commands exist that need to be | ||
2509 | * completed first. | ||
2510 | */ | ||
2511 | if (!(atomic_read(&SE_DEV(cmd)->simple_cmds))) | ||
2512 | return 1; | ||
2513 | } else { | ||
2514 | /* | ||
2515 | * For SIMPLE and UNTAGGED Task Attribute commands | ||
2516 | */ | ||
2517 | atomic_inc(&SE_DEV(cmd)->simple_cmds); | ||
2518 | smp_mb__after_atomic_inc(); | ||
2519 | } | ||
2520 | /* | ||
2521 | * Otherwise if one or more outstanding ORDERED task attribute exist, | ||
2522 | * add the dormant task(s) built for the passed struct se_cmd to the | ||
2523 | * execution queue and become in Active state for this struct se_device. | ||
2524 | */ | ||
2525 | if (atomic_read(&SE_DEV(cmd)->dev_ordered_sync) != 0) { | ||
2526 | /* | ||
2527 | * Otherwise, add cmd w/ tasks to delayed cmd queue that | ||
2528 | * will be drained upon competion of HEAD_OF_QUEUE task. | ||
2529 | */ | ||
2530 | spin_lock(&SE_DEV(cmd)->delayed_cmd_lock); | ||
2531 | cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR; | ||
2532 | list_add_tail(&cmd->se_delayed_list, | ||
2533 | &SE_DEV(cmd)->delayed_cmd_list); | ||
2534 | spin_unlock(&SE_DEV(cmd)->delayed_cmd_lock); | ||
2535 | |||
2536 | DEBUG_STA("Added CDB: 0x%02x Task Attr: 0x%02x to" | ||
2537 | " delayed CMD list, se_ordered_id: %u\n", | ||
2538 | T_TASK(cmd)->t_task_cdb[0], cmd->sam_task_attr, | ||
2539 | cmd->se_ordered_id); | ||
2540 | /* | ||
2541 | * Return zero to let transport_execute_tasks() know | ||
2542 | * not to add the delayed tasks to the execution list. | ||
2543 | */ | ||
2544 | return 0; | ||
2545 | } | ||
2546 | /* | ||
2547 | * Otherwise, no ORDERED task attributes exist.. | ||
2548 | */ | ||
2549 | return 1; | ||
2550 | } | ||
2551 | |||
2552 | /* | ||
2553 | * Called from fabric module context in transport_generic_new_cmd() and | ||
2554 | * transport_generic_process_write() | ||
2555 | */ | ||
2556 | static int transport_execute_tasks(struct se_cmd *cmd) | ||
2557 | { | ||
2558 | int add_tasks; | ||
2559 | |||
2560 | if (!(cmd->se_cmd_flags & SCF_SE_DISABLE_ONLINE_CHECK)) { | ||
2561 | if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) { | ||
2562 | cmd->transport_error_status = | ||
2563 | PYX_TRANSPORT_LU_COMM_FAILURE; | ||
2564 | transport_generic_request_failure(cmd, NULL, 0, 1); | ||
2565 | return 0; | ||
2566 | } | ||
2567 | } | ||
2568 | /* | ||
2569 | * Call transport_cmd_check_stop() to see if a fabric exception | ||
2570 | * has occured that prevents execution. | ||
2571 | */ | ||
2572 | if (!(transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING))) { | ||
2573 | /* | ||
2574 | * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE | ||
2575 | * attribute for the tasks of the received struct se_cmd CDB | ||
2576 | */ | ||
2577 | add_tasks = transport_execute_task_attr(cmd); | ||
2578 | if (add_tasks == 0) | ||
2579 | goto execute_tasks; | ||
2580 | /* | ||
2581 | * This calls transport_add_tasks_from_cmd() to handle | ||
2582 | * HEAD_OF_QUEUE ordering for SAM Task Attribute emulation | ||
2583 | * (if enabled) in __transport_add_task_to_execute_queue() and | ||
2584 | * transport_add_task_check_sam_attr(). | ||
2585 | */ | ||
2586 | transport_add_tasks_from_cmd(cmd); | ||
2587 | } | ||
2588 | /* | ||
2589 | * Kick the execution queue for the cmd associated struct se_device | ||
2590 | * storage object. | ||
2591 | */ | ||
2592 | execute_tasks: | ||
2593 | __transport_execute_tasks(SE_DEV(cmd)); | ||
2594 | return 0; | ||
2595 | } | ||
2596 | |||
2597 | /* | ||
2598 | * Called to check struct se_device tcq depth window, and once open pull struct se_task | ||
2599 | * from struct se_device->execute_task_list and | ||
2600 | * | ||
2601 | * Called from transport_processing_thread() | ||
2602 | */ | ||
2603 | static int __transport_execute_tasks(struct se_device *dev) | ||
2604 | { | ||
2605 | int error; | ||
2606 | struct se_cmd *cmd = NULL; | ||
2607 | struct se_task *task; | ||
2608 | unsigned long flags; | ||
2609 | |||
2610 | /* | ||
2611 | * Check if there is enough room in the device and HBA queue to send | ||
2612 | * struct se_transport_task's to the selected transport. | ||
2613 | */ | ||
2614 | check_depth: | ||
2615 | spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags); | ||
2616 | if (!(atomic_read(&dev->depth_left)) || | ||
2617 | !(atomic_read(&SE_HBA(dev)->left_queue_depth))) { | ||
2618 | spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); | ||
2619 | return transport_tcq_window_closed(dev); | ||
2620 | } | ||
2621 | dev->dev_tcq_window_closed = 0; | ||
2622 | |||
2623 | spin_lock(&dev->execute_task_lock); | ||
2624 | task = transport_get_task_from_execute_queue(dev); | ||
2625 | spin_unlock(&dev->execute_task_lock); | ||
2626 | |||
2627 | if (!task) { | ||
2628 | spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); | ||
2629 | return 0; | ||
2630 | } | ||
2631 | |||
2632 | atomic_dec(&dev->depth_left); | ||
2633 | atomic_dec(&SE_HBA(dev)->left_queue_depth); | ||
2634 | spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); | ||
2635 | |||
2636 | cmd = TASK_CMD(task); | ||
2637 | |||
2638 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | ||
2639 | atomic_set(&task->task_active, 1); | ||
2640 | atomic_set(&task->task_sent, 1); | ||
2641 | atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent); | ||
2642 | |||
2643 | if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) == | ||
2644 | T_TASK(cmd)->t_task_cdbs) | ||
2645 | atomic_set(&cmd->transport_sent, 1); | ||
2646 | |||
2647 | transport_start_task_timer(task); | ||
2648 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | ||
2649 | /* | ||
2650 | * The struct se_cmd->transport_emulate_cdb() function pointer is used | ||
2651 | * to grab REPORT_LUNS CDBs before they hit the | ||
2652 | * struct se_subsystem_api->do_task() caller below. | ||
2653 | */ | ||
2654 | if (cmd->transport_emulate_cdb) { | ||
2655 | error = cmd->transport_emulate_cdb(cmd); | ||
2656 | if (error != 0) { | ||
2657 | cmd->transport_error_status = error; | ||
2658 | atomic_set(&task->task_active, 0); | ||
2659 | atomic_set(&cmd->transport_sent, 0); | ||
2660 | transport_stop_tasks_for_cmd(cmd); | ||
2661 | transport_generic_request_failure(cmd, dev, 0, 1); | ||
2662 | goto check_depth; | ||
2663 | } | ||
2664 | /* | ||
2665 | * Handle the successful completion for transport_emulate_cdb() | ||
2666 | * for synchronous operation, following SCF_EMULATE_CDB_ASYNC | ||
2667 | * Otherwise the caller is expected to complete the task with | ||
2668 | * proper status. | ||
2669 | */ | ||
2670 | if (!(cmd->se_cmd_flags & SCF_EMULATE_CDB_ASYNC)) { | ||
2671 | cmd->scsi_status = SAM_STAT_GOOD; | ||
2672 | task->task_scsi_status = GOOD; | ||
2673 | transport_complete_task(task, 1); | ||
2674 | } | ||
2675 | } else { | ||
2676 | /* | ||
2677 | * Currently for all virtual TCM plugins including IBLOCK, FILEIO and | ||
2678 | * RAMDISK we use the internal transport_emulate_control_cdb() logic | ||
2679 | * with struct se_subsystem_api callers for the primary SPC-3 TYPE_DISK | ||
2680 | * LUN emulation code. | ||
2681 | * | ||
2682 | * For TCM/pSCSI and all other SCF_SCSI_DATA_SG_IO_CDB I/O tasks we | ||
2683 | * call ->do_task() directly and let the underlying TCM subsystem plugin | ||
2684 | * code handle the CDB emulation. | ||
2685 | */ | ||
2686 | if ((TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) && | ||
2687 | (!(TASK_CMD(task)->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB))) | ||
2688 | error = transport_emulate_control_cdb(task); | ||
2689 | else | ||
2690 | error = TRANSPORT(dev)->do_task(task); | ||
2691 | |||
2692 | if (error != 0) { | ||
2693 | cmd->transport_error_status = error; | ||
2694 | atomic_set(&task->task_active, 0); | ||
2695 | atomic_set(&cmd->transport_sent, 0); | ||
2696 | transport_stop_tasks_for_cmd(cmd); | ||
2697 | transport_generic_request_failure(cmd, dev, 0, 1); | ||
2698 | } | ||
2699 | } | ||
2700 | |||
2701 | goto check_depth; | ||
2702 | |||
2703 | return 0; | ||
2704 | } | ||
2705 | |||
2706 | void transport_new_cmd_failure(struct se_cmd *se_cmd) | ||
2707 | { | ||
2708 | unsigned long flags; | ||
2709 | /* | ||
2710 | * Any unsolicited data will get dumped for failed command inside of | ||
2711 | * the fabric plugin | ||
2712 | */ | ||
2713 | spin_lock_irqsave(&T_TASK(se_cmd)->t_state_lock, flags); | ||
2714 | se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED; | ||
2715 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | ||
2716 | spin_unlock_irqrestore(&T_TASK(se_cmd)->t_state_lock, flags); | ||
2717 | |||
2718 | CMD_TFO(se_cmd)->new_cmd_failure(se_cmd); | ||
2719 | } | ||
2720 | |||
2721 | static void transport_nop_wait_for_tasks(struct se_cmd *, int, int); | ||
2722 | |||
2723 | static inline u32 transport_get_sectors_6( | ||
2724 | unsigned char *cdb, | ||
2725 | struct se_cmd *cmd, | ||
2726 | int *ret) | ||
2727 | { | ||
2728 | struct se_device *dev = SE_LUN(cmd)->lun_se_dev; | ||
2729 | |||
2730 | /* | ||
2731 | * Assume TYPE_DISK for non struct se_device objects. | ||
2732 | * Use 8-bit sector value. | ||
2733 | */ | ||
2734 | if (!dev) | ||
2735 | goto type_disk; | ||
2736 | |||
2737 | /* | ||
2738 | * Use 24-bit allocation length for TYPE_TAPE. | ||
2739 | */ | ||
2740 | if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) | ||
2741 | return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4]; | ||
2742 | |||
2743 | /* | ||
2744 | * Everything else assume TYPE_DISK Sector CDB location. | ||
2745 | * Use 8-bit sector value. | ||
2746 | */ | ||
2747 | type_disk: | ||
2748 | return (u32)cdb[4]; | ||
2749 | } | ||
2750 | |||
2751 | static inline u32 transport_get_sectors_10( | ||
2752 | unsigned char *cdb, | ||
2753 | struct se_cmd *cmd, | ||
2754 | int *ret) | ||
2755 | { | ||
2756 | struct se_device *dev = SE_LUN(cmd)->lun_se_dev; | ||
2757 | |||
2758 | /* | ||
2759 | * Assume TYPE_DISK for non struct se_device objects. | ||
2760 | * Use 16-bit sector value. | ||
2761 | */ | ||
2762 | if (!dev) | ||
2763 | goto type_disk; | ||
2764 | |||
2765 | /* | ||
2766 | * XXX_10 is not defined in SSC, throw an exception | ||
2767 | */ | ||
2768 | if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) { | ||
2769 | *ret = -1; | ||
2770 | return 0; | ||
2771 | } | ||
2772 | |||
2773 | /* | ||
2774 | * Everything else assume TYPE_DISK Sector CDB location. | ||
2775 | * Use 16-bit sector value. | ||
2776 | */ | ||
2777 | type_disk: | ||
2778 | return (u32)(cdb[7] << 8) + cdb[8]; | ||
2779 | } | ||
2780 | |||
2781 | static inline u32 transport_get_sectors_12( | ||
2782 | unsigned char *cdb, | ||
2783 | struct se_cmd *cmd, | ||
2784 | int *ret) | ||
2785 | { | ||
2786 | struct se_device *dev = SE_LUN(cmd)->lun_se_dev; | ||
2787 | |||
2788 | /* | ||
2789 | * Assume TYPE_DISK for non struct se_device objects. | ||
2790 | * Use 32-bit sector value. | ||
2791 | */ | ||
2792 | if (!dev) | ||
2793 | goto type_disk; | ||
2794 | |||
2795 | /* | ||
2796 | * XXX_12 is not defined in SSC, throw an exception | ||
2797 | */ | ||
2798 | if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) { | ||
2799 | *ret = -1; | ||
2800 | return 0; | ||
2801 | } | ||
2802 | |||
2803 | /* | ||
2804 | * Everything else assume TYPE_DISK Sector CDB location. | ||
2805 | * Use 32-bit sector value. | ||
2806 | */ | ||
2807 | type_disk: | ||
2808 | return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9]; | ||
2809 | } | ||
2810 | |||
2811 | static inline u32 transport_get_sectors_16( | ||
2812 | unsigned char *cdb, | ||
2813 | struct se_cmd *cmd, | ||
2814 | int *ret) | ||
2815 | { | ||
2816 | struct se_device *dev = SE_LUN(cmd)->lun_se_dev; | ||
2817 | |||
2818 | /* | ||
2819 | * Assume TYPE_DISK for non struct se_device objects. | ||
2820 | * Use 32-bit sector value. | ||
2821 | */ | ||
2822 | if (!dev) | ||
2823 | goto type_disk; | ||
2824 | |||
2825 | /* | ||
2826 | * Use 24-bit allocation length for TYPE_TAPE. | ||
2827 | */ | ||
2828 | if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) | ||
2829 | return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14]; | ||
2830 | |||
2831 | type_disk: | ||
2832 | return (u32)(cdb[10] << 24) + (cdb[11] << 16) + | ||
2833 | (cdb[12] << 8) + cdb[13]; | ||
2834 | } | ||
2835 | |||
2836 | /* | ||
2837 | * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants | ||
2838 | */ | ||
2839 | static inline u32 transport_get_sectors_32( | ||
2840 | unsigned char *cdb, | ||
2841 | struct se_cmd *cmd, | ||
2842 | int *ret) | ||
2843 | { | ||
2844 | /* | ||
2845 | * Assume TYPE_DISK for non struct se_device objects. | ||
2846 | * Use 32-bit sector value. | ||
2847 | */ | ||
2848 | return (u32)(cdb[28] << 24) + (cdb[29] << 16) + | ||
2849 | (cdb[30] << 8) + cdb[31]; | ||
2850 | |||
2851 | } | ||
2852 | |||
2853 | static inline u32 transport_get_size( | ||
2854 | u32 sectors, | ||
2855 | unsigned char *cdb, | ||
2856 | struct se_cmd *cmd) | ||
2857 | { | ||
2858 | struct se_device *dev = SE_DEV(cmd); | ||
2859 | |||
2860 | if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) { | ||
2861 | if (cdb[1] & 1) { /* sectors */ | ||
2862 | return DEV_ATTRIB(dev)->block_size * sectors; | ||
2863 | } else /* bytes */ | ||
2864 | return sectors; | ||
2865 | } | ||
2866 | #if 0 | ||
2867 | printk(KERN_INFO "Returning block_size: %u, sectors: %u == %u for" | ||
2868 | " %s object\n", DEV_ATTRIB(dev)->block_size, sectors, | ||
2869 | DEV_ATTRIB(dev)->block_size * sectors, | ||
2870 | TRANSPORT(dev)->name); | ||
2871 | #endif | ||
2872 | return DEV_ATTRIB(dev)->block_size * sectors; | ||
2873 | } | ||
2874 | |||
2875 | unsigned char transport_asciihex_to_binaryhex(unsigned char val[2]) | ||
2876 | { | ||
2877 | unsigned char result = 0; | ||
2878 | /* | ||
2879 | * MSB | ||
2880 | */ | ||
2881 | if ((val[0] >= 'a') && (val[0] <= 'f')) | ||
2882 | result = ((val[0] - 'a' + 10) & 0xf) << 4; | ||
2883 | else | ||
2884 | if ((val[0] >= 'A') && (val[0] <= 'F')) | ||
2885 | result = ((val[0] - 'A' + 10) & 0xf) << 4; | ||
2886 | else /* digit */ | ||
2887 | result = ((val[0] - '0') & 0xf) << 4; | ||
2888 | /* | ||
2889 | * LSB | ||
2890 | */ | ||
2891 | if ((val[1] >= 'a') && (val[1] <= 'f')) | ||
2892 | result |= ((val[1] - 'a' + 10) & 0xf); | ||
2893 | else | ||
2894 | if ((val[1] >= 'A') && (val[1] <= 'F')) | ||
2895 | result |= ((val[1] - 'A' + 10) & 0xf); | ||
2896 | else /* digit */ | ||
2897 | result |= ((val[1] - '0') & 0xf); | ||
2898 | |||
2899 | return result; | ||
2900 | } | ||
2901 | EXPORT_SYMBOL(transport_asciihex_to_binaryhex); | ||
2902 | |||
2903 | static void transport_xor_callback(struct se_cmd *cmd) | ||
2904 | { | ||
2905 | unsigned char *buf, *addr; | ||
2906 | struct se_mem *se_mem; | ||
2907 | unsigned int offset; | ||
2908 | int i; | ||
2909 | /* | ||
2910 | * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command | ||
2911 | * | ||
2912 | * 1) read the specified logical block(s); | ||
2913 | * 2) transfer logical blocks from the data-out buffer; | ||
2914 | * 3) XOR the logical blocks transferred from the data-out buffer with | ||
2915 | * the logical blocks read, storing the resulting XOR data in a buffer; | ||
2916 | * 4) if the DISABLE WRITE bit is set to zero, then write the logical | ||
2917 | * blocks transferred from the data-out buffer; and | ||
2918 | * 5) transfer the resulting XOR data to the data-in buffer. | ||
2919 | */ | ||
2920 | buf = kmalloc(cmd->data_length, GFP_KERNEL); | ||
2921 | if (!(buf)) { | ||
2922 | printk(KERN_ERR "Unable to allocate xor_callback buf\n"); | ||
2923 | return; | ||
2924 | } | ||
2925 | /* | ||
2926 | * Copy the scatterlist WRITE buffer located at T_TASK(cmd)->t_mem_list | ||
2927 | * into the locally allocated *buf | ||
2928 | */ | ||
2929 | transport_memcpy_se_mem_read_contig(cmd, buf, T_TASK(cmd)->t_mem_list); | ||
2930 | /* | ||
2931 | * Now perform the XOR against the BIDI read memory located at | ||
2932 | * T_TASK(cmd)->t_mem_bidi_list | ||
2933 | */ | ||
2934 | |||
2935 | offset = 0; | ||
2936 | list_for_each_entry(se_mem, T_TASK(cmd)->t_mem_bidi_list, se_list) { | ||
2937 | addr = (unsigned char *)kmap_atomic(se_mem->se_page, KM_USER0); | ||
2938 | if (!(addr)) | ||
2939 | goto out; | ||
2940 | |||
2941 | for (i = 0; i < se_mem->se_len; i++) | ||
2942 | *(addr + se_mem->se_off + i) ^= *(buf + offset + i); | ||
2943 | |||
2944 | offset += se_mem->se_len; | ||
2945 | kunmap_atomic(addr, KM_USER0); | ||
2946 | } | ||
2947 | out: | ||
2948 | kfree(buf); | ||
2949 | } | ||
2950 | |||
2951 | /* | ||
2952 | * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd | ||
2953 | */ | ||
2954 | static int transport_get_sense_data(struct se_cmd *cmd) | ||
2955 | { | ||
2956 | unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL; | ||
2957 | struct se_device *dev; | ||
2958 | struct se_task *task = NULL, *task_tmp; | ||
2959 | unsigned long flags; | ||
2960 | u32 offset = 0; | ||
2961 | |||
2962 | if (!SE_LUN(cmd)) { | ||
2963 | printk(KERN_ERR "SE_LUN(cmd) is NULL\n"); | ||
2964 | return -1; | ||
2965 | } | ||
2966 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | ||
2967 | if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { | ||
2968 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | ||
2969 | return 0; | ||
2970 | } | ||
2971 | |||
2972 | list_for_each_entry_safe(task, task_tmp, | ||
2973 | &T_TASK(cmd)->t_task_list, t_list) { | ||
2974 | |||
2975 | if (!task->task_sense) | ||
2976 | continue; | ||
2977 | |||
2978 | dev = task->se_dev; | ||
2979 | if (!(dev)) | ||
2980 | continue; | ||
2981 | |||
2982 | if (!TRANSPORT(dev)->get_sense_buffer) { | ||
2983 | printk(KERN_ERR "TRANSPORT(dev)->get_sense_buffer" | ||
2984 | " is NULL\n"); | ||
2985 | continue; | ||
2986 | } | ||
2987 | |||
2988 | sense_buffer = TRANSPORT(dev)->get_sense_buffer(task); | ||
2989 | if (!(sense_buffer)) { | ||
2990 | printk(KERN_ERR "ITT[0x%08x]_TASK[%d]: Unable to locate" | ||
2991 | " sense buffer for task with sense\n", | ||
2992 | CMD_TFO(cmd)->get_task_tag(cmd), task->task_no); | ||
2993 | continue; | ||
2994 | } | ||
2995 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | ||
2996 | |||
2997 | offset = CMD_TFO(cmd)->set_fabric_sense_len(cmd, | ||
2998 | TRANSPORT_SENSE_BUFFER); | ||
2999 | |||
3000 | memcpy((void *)&buffer[offset], (void *)sense_buffer, | ||
3001 | TRANSPORT_SENSE_BUFFER); | ||
3002 | cmd->scsi_status = task->task_scsi_status; | ||
3003 | /* Automatically padded */ | ||
3004 | cmd->scsi_sense_length = | ||
3005 | (TRANSPORT_SENSE_BUFFER + offset); | ||
3006 | |||
3007 | printk(KERN_INFO "HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x" | ||
3008 | " and sense\n", | ||
3009 | dev->se_hba->hba_id, TRANSPORT(dev)->name, | ||
3010 | cmd->scsi_status); | ||
3011 | return 0; | ||
3012 | } | ||
3013 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | ||
3014 | |||
3015 | return -1; | ||
3016 | } | ||
3017 | |||
3018 | static int transport_allocate_resources(struct se_cmd *cmd) | ||
3019 | { | ||
3020 | u32 length = cmd->data_length; | ||
3021 | |||
3022 | if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) || | ||
3023 | (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) | ||
3024 | return transport_generic_get_mem(cmd, length, PAGE_SIZE); | ||
3025 | else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) | ||
3026 | return transport_generic_allocate_buf(cmd, length); | ||
3027 | else | ||
3028 | return 0; | ||
3029 | } | ||
3030 | |||
3031 | static int | ||
3032 | transport_handle_reservation_conflict(struct se_cmd *cmd) | ||
3033 | { | ||
3034 | cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; | ||
3035 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | ||
3036 | cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT; | ||
3037 | cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; | ||
3038 | /* | ||
3039 | * For UA Interlock Code 11b, a RESERVATION CONFLICT will | ||
3040 | * establish a UNIT ATTENTION with PREVIOUS RESERVATION | ||
3041 | * CONFLICT STATUS. | ||
3042 | * | ||
3043 | * See spc4r17, section 7.4.6 Control Mode Page, Table 349 | ||
3044 | */ | ||
3045 | if (SE_SESS(cmd) && | ||
3046 | DEV_ATTRIB(cmd->se_dev)->emulate_ua_intlck_ctrl == 2) | ||
3047 | core_scsi3_ua_allocate(SE_SESS(cmd)->se_node_acl, | ||
3048 | cmd->orig_fe_lun, 0x2C, | ||
3049 | ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); | ||
3050 | return -2; | ||
3051 | } | ||
3052 | |||
3053 | /* transport_generic_cmd_sequencer(): | ||
3054 | * | ||
3055 | * Generic Command Sequencer that should work for most DAS transport | ||
3056 | * drivers. | ||
3057 | * | ||
3058 | * Called from transport_generic_allocate_tasks() in the $FABRIC_MOD | ||
3059 | * RX Thread. | ||
3060 | * | ||
3061 | * FIXME: Need to support other SCSI OPCODES where as well. | ||
3062 | */ | ||
3063 | static int transport_generic_cmd_sequencer( | ||
3064 | struct se_cmd *cmd, | ||
3065 | unsigned char *cdb) | ||
3066 | { | ||
3067 | struct se_device *dev = SE_DEV(cmd); | ||
3068 | struct se_subsystem_dev *su_dev = dev->se_sub_dev; | ||
3069 | int ret = 0, sector_ret = 0, passthrough; | ||
3070 | u32 sectors = 0, size = 0, pr_reg_type = 0; | ||
3071 | u16 service_action; | ||
3072 | u8 alua_ascq = 0; | ||
3073 | /* | ||
3074 | * Check for an existing UNIT ATTENTION condition | ||
3075 | */ | ||
3076 | if (core_scsi3_ua_check(cmd, cdb) < 0) { | ||
3077 | cmd->transport_wait_for_tasks = | ||
3078 | &transport_nop_wait_for_tasks; | ||
3079 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | ||
3080 | cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION; | ||
3081 | return -2; | ||
3082 | } | ||
3083 | /* | ||
3084 | * Check status of Asymmetric Logical Unit Assignment port | ||
3085 | */ | ||
3086 | ret = T10_ALUA(su_dev)->alua_state_check(cmd, cdb, &alua_ascq); | ||
3087 | if (ret != 0) { | ||
3088 | cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; | ||
3089 | /* | ||
3090 | * Set SCSI additional sense code (ASC) to 'LUN Not Accessable'; | ||
3091 | * The ALUA additional sense code qualifier (ASCQ) is determined | ||
3092 | * by the ALUA primary or secondary access state.. | ||
3093 | */ | ||
3094 | if (ret > 0) { | ||
3095 | #if 0 | ||
3096 | printk(KERN_INFO "[%s]: ALUA TG Port not available," | ||
3097 | " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n", | ||
3098 | CMD_TFO(cmd)->get_fabric_name(), alua_ascq); | ||
3099 | #endif | ||
3100 | transport_set_sense_codes(cmd, 0x04, alua_ascq); | ||
3101 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | ||
3102 | cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY; | ||
3103 | return -2; | ||
3104 | } | ||
3105 | goto out_invalid_cdb_field; | ||
3106 | } | ||
3107 | /* | ||
3108 | * Check status for SPC-3 Persistent Reservations | ||
3109 | */ | ||
3110 | if (T10_PR_OPS(su_dev)->t10_reservation_check(cmd, &pr_reg_type) != 0) { | ||
3111 | if (T10_PR_OPS(su_dev)->t10_seq_non_holder( | ||
3112 | cmd, cdb, pr_reg_type) != 0) | ||
3113 | return transport_handle_reservation_conflict(cmd); | ||
3114 | /* | ||
3115 | * This means the CDB is allowed for the SCSI Initiator port | ||
3116 | * when said port is *NOT* holding the legacy SPC-2 or | ||
3117 | * SPC-3 Persistent Reservation. | ||
3118 | */ | ||
3119 | } | ||
3120 | |||
3121 | switch (cdb[0]) { | ||
3122 | case READ_6: | ||
3123 | sectors = transport_get_sectors_6(cdb, cmd, §or_ret); | ||
3124 | if (sector_ret) | ||
3125 | goto out_unsupported_cdb; | ||
3126 | size = transport_get_size(sectors, cdb, cmd); | ||
3127 | cmd->transport_split_cdb = &split_cdb_XX_6; | ||
3128 | T_TASK(cmd)->t_task_lba = transport_lba_21(cdb); | ||
3129 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | ||
3130 | break; | ||
3131 | case READ_10: | ||
3132 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | ||
3133 | if (sector_ret) | ||
3134 | goto out_unsupported_cdb; | ||
3135 | size = transport_get_size(sectors, cdb, cmd); | ||
3136 | cmd->transport_split_cdb = &split_cdb_XX_10; | ||
3137 | T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); | ||
3138 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | ||
3139 | break; | ||
3140 | case READ_12: | ||
3141 | sectors = transport_get_sectors_12(cdb, cmd, §or_ret); | ||
3142 | if (sector_ret) | ||
3143 | goto out_unsupported_cdb; | ||
3144 | size = transport_get_size(sectors, cdb, cmd); | ||
3145 | cmd->transport_split_cdb = &split_cdb_XX_12; | ||
3146 | T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); | ||
3147 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | ||
3148 | break; | ||
3149 | case READ_16: | ||
3150 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); | ||
3151 | if (sector_ret) | ||
3152 | goto out_unsupported_cdb; | ||
3153 | size = transport_get_size(sectors, cdb, cmd); | ||
3154 | cmd->transport_split_cdb = &split_cdb_XX_16; | ||
3155 | T_TASK(cmd)->t_task_lba = transport_lba_64(cdb); | ||
3156 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | ||
3157 | break; | ||
3158 | case WRITE_6: | ||
3159 | sectors = transport_get_sectors_6(cdb, cmd, §or_ret); | ||
3160 | if (sector_ret) | ||
3161 | goto out_unsupported_cdb; | ||
3162 | size = transport_get_size(sectors, cdb, cmd); | ||
3163 | cmd->transport_split_cdb = &split_cdb_XX_6; | ||
3164 | T_TASK(cmd)->t_task_lba = transport_lba_21(cdb); | ||
3165 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | ||
3166 | break; | ||
3167 | case WRITE_10: | ||
3168 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | ||
3169 | if (sector_ret) | ||
3170 | goto out_unsupported_cdb; | ||
3171 | size = transport_get_size(sectors, cdb, cmd); | ||
3172 | cmd->transport_split_cdb = &split_cdb_XX_10; | ||
3173 | T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); | ||
3174 | T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8); | ||
3175 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | ||
3176 | break; | ||
3177 | case WRITE_12: | ||
3178 | sectors = transport_get_sectors_12(cdb, cmd, §or_ret); | ||
3179 | if (sector_ret) | ||
3180 | goto out_unsupported_cdb; | ||
3181 | size = transport_get_size(sectors, cdb, cmd); | ||
3182 | cmd->transport_split_cdb = &split_cdb_XX_12; | ||
3183 | T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); | ||
3184 | T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8); | ||
3185 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | ||
3186 | break; | ||
3187 | case WRITE_16: | ||
3188 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); | ||
3189 | if (sector_ret) | ||
3190 | goto out_unsupported_cdb; | ||
3191 | size = transport_get_size(sectors, cdb, cmd); | ||
3192 | cmd->transport_split_cdb = &split_cdb_XX_16; | ||
3193 | T_TASK(cmd)->t_task_lba = transport_lba_64(cdb); | ||
3194 | T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8); | ||
3195 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | ||
3196 | break; | ||
3197 | case XDWRITEREAD_10: | ||
3198 | if ((cmd->data_direction != DMA_TO_DEVICE) || | ||
3199 | !(T_TASK(cmd)->t_tasks_bidi)) | ||
3200 | goto out_invalid_cdb_field; | ||
3201 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | ||
3202 | if (sector_ret) | ||
3203 | goto out_unsupported_cdb; | ||
3204 | size = transport_get_size(sectors, cdb, cmd); | ||
3205 | cmd->transport_split_cdb = &split_cdb_XX_10; | ||
3206 | T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); | ||
3207 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | ||
3208 | passthrough = (TRANSPORT(dev)->transport_type == | ||
3209 | TRANSPORT_PLUGIN_PHBA_PDEV); | ||
3210 | /* | ||
3211 | * Skip the remaining assignments for TCM/PSCSI passthrough | ||
3212 | */ | ||
3213 | if (passthrough) | ||
3214 | break; | ||
3215 | /* | ||
3216 | * Setup BIDI XOR callback to be run during transport_generic_complete_ok() | ||
3217 | */ | ||
3218 | cmd->transport_complete_callback = &transport_xor_callback; | ||
3219 | T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8); | ||
3220 | break; | ||
3221 | case VARIABLE_LENGTH_CMD: | ||
3222 | service_action = get_unaligned_be16(&cdb[8]); | ||
3223 | /* | ||
3224 | * Determine if this is TCM/PSCSI device and we should disable | ||
3225 | * internal emulation for this CDB. | ||
3226 | */ | ||
3227 | passthrough = (TRANSPORT(dev)->transport_type == | ||
3228 | TRANSPORT_PLUGIN_PHBA_PDEV); | ||
3229 | |||
3230 | switch (service_action) { | ||
3231 | case XDWRITEREAD_32: | ||
3232 | sectors = transport_get_sectors_32(cdb, cmd, §or_ret); | ||
3233 | if (sector_ret) | ||
3234 | goto out_unsupported_cdb; | ||
3235 | size = transport_get_size(sectors, cdb, cmd); | ||
3236 | /* | ||
3237 | * Use WRITE_32 and READ_32 opcodes for the emulated | ||
3238 | * XDWRITE_READ_32 logic. | ||
3239 | */ | ||
3240 | cmd->transport_split_cdb = &split_cdb_XX_32; | ||
3241 | T_TASK(cmd)->t_task_lba = transport_lba_64_ext(cdb); | ||
3242 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | ||
3243 | |||
3244 | /* | ||
3245 | * Skip the remaining assignments for TCM/PSCSI passthrough | ||
3246 | */ | ||
3247 | if (passthrough) | ||
3248 | break; | ||
3249 | |||
3250 | /* | ||
3251 | * Setup BIDI XOR callback to be run during | ||
3252 | * transport_generic_complete_ok() | ||
3253 | */ | ||
3254 | cmd->transport_complete_callback = &transport_xor_callback; | ||
3255 | T_TASK(cmd)->t_tasks_fua = (cdb[10] & 0x8); | ||
3256 | break; | ||
3257 | case WRITE_SAME_32: | ||
3258 | sectors = transport_get_sectors_32(cdb, cmd, §or_ret); | ||
3259 | if (sector_ret) | ||
3260 | goto out_unsupported_cdb; | ||
3261 | size = transport_get_size(sectors, cdb, cmd); | ||
3262 | T_TASK(cmd)->t_task_lba = get_unaligned_be64(&cdb[12]); | ||
3263 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | ||
3264 | |||
3265 | /* | ||
3266 | * Skip the remaining assignments for TCM/PSCSI passthrough | ||
3267 | */ | ||
3268 | if (passthrough) | ||
3269 | break; | ||
3270 | |||
3271 | if ((cdb[10] & 0x04) || (cdb[10] & 0x02)) { | ||
3272 | printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA" | ||
3273 | " bits not supported for Block Discard" | ||
3274 | " Emulation\n"); | ||
3275 | goto out_invalid_cdb_field; | ||
3276 | } | ||
3277 | /* | ||
3278 | * Currently for the emulated case we only accept | ||
3279 | * tpws with the UNMAP=1 bit set. | ||
3280 | */ | ||
3281 | if (!(cdb[10] & 0x08)) { | ||
3282 | printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not" | ||
3283 | " supported for Block Discard Emulation\n"); | ||
3284 | goto out_invalid_cdb_field; | ||
3285 | } | ||
3286 | break; | ||
3287 | default: | ||
3288 | printk(KERN_ERR "VARIABLE_LENGTH_CMD service action" | ||
3289 | " 0x%04x not supported\n", service_action); | ||
3290 | goto out_unsupported_cdb; | ||
3291 | } | ||
3292 | break; | ||
3293 | case 0xa3: | ||
3294 | if (TRANSPORT(dev)->get_device_type(dev) != TYPE_ROM) { | ||
3295 | /* MAINTENANCE_IN from SCC-2 */ | ||
3296 | /* | ||
3297 | * Check for emulated MI_REPORT_TARGET_PGS. | ||
3298 | */ | ||
3299 | if (cdb[1] == MI_REPORT_TARGET_PGS) { | ||
3300 | cmd->transport_emulate_cdb = | ||
3301 | (T10_ALUA(su_dev)->alua_type == | ||
3302 | SPC3_ALUA_EMULATED) ? | ||
3303 | &core_emulate_report_target_port_groups : | ||
3304 | NULL; | ||
3305 | } | ||
3306 | size = (cdb[6] << 24) | (cdb[7] << 16) | | ||
3307 | (cdb[8] << 8) | cdb[9]; | ||
3308 | } else { | ||
3309 | /* GPCMD_SEND_KEY from multi media commands */ | ||
3310 | size = (cdb[8] << 8) + cdb[9]; | ||
3311 | } | ||
3312 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | ||
3313 | break; | ||
3314 | case MODE_SELECT: | ||
3315 | size = cdb[4]; | ||
3316 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | ||
3317 | break; | ||
3318 | case MODE_SELECT_10: | ||
3319 | size = (cdb[7] << 8) + cdb[8]; | ||
3320 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | ||
3321 | break; | ||
3322 | case MODE_SENSE: | ||
3323 | size = cdb[4]; | ||
3324 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | ||
3325 | break; | ||
3326 | case MODE_SENSE_10: | ||
3327 | case GPCMD_READ_BUFFER_CAPACITY: | ||
3328 | case GPCMD_SEND_OPC: | ||
3329 | case LOG_SELECT: | ||
3330 | case LOG_SENSE: | ||
3331 | size = (cdb[7] << 8) + cdb[8]; | ||
3332 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | ||
3333 | break; | ||
3334 | case READ_BLOCK_LIMITS: | ||
3335 | size = READ_BLOCK_LEN; | ||
3336 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | ||
3337 | break; | ||
3338 | case GPCMD_GET_CONFIGURATION: | ||
3339 | case GPCMD_READ_FORMAT_CAPACITIES: | ||
3340 | case GPCMD_READ_DISC_INFO: | ||
3341 | case GPCMD_READ_TRACK_RZONE_INFO: | ||
3342 | size = (cdb[7] << 8) + cdb[8]; | ||
3343 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | ||
3344 | break; | ||
3345 | case PERSISTENT_RESERVE_IN: | ||
3346 | case PERSISTENT_RESERVE_OUT: | ||
3347 | cmd->transport_emulate_cdb = | ||
3348 | (T10_RES(su_dev)->res_type == | ||
3349 | SPC3_PERSISTENT_RESERVATIONS) ? | ||
3350 | &core_scsi3_emulate_pr : NULL; | ||
3351 | size = (cdb[7] << 8) + cdb[8]; | ||
3352 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | ||
3353 | break; | ||
3354 | case GPCMD_MECHANISM_STATUS: | ||
3355 | case GPCMD_READ_DVD_STRUCTURE: | ||
3356 | size = (cdb[8] << 8) + cdb[9]; | ||
3357 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | ||
3358 | break; | ||
3359 | case READ_POSITION: | ||
3360 | size = READ_POSITION_LEN; | ||
3361 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | ||
3362 | break; | ||
3363 | case 0xa4: | ||
3364 | if (TRANSPORT(dev)->get_device_type(dev) != TYPE_ROM) { | ||
3365 | /* MAINTENANCE_OUT from SCC-2 | ||
3366 | * | ||
3367 | * Check for emulated MO_SET_TARGET_PGS. | ||
3368 | */ | ||
3369 | if (cdb[1] == MO_SET_TARGET_PGS) { | ||
3370 | cmd->transport_emulate_cdb = | ||
3371 | (T10_ALUA(su_dev)->alua_type == | ||
3372 | SPC3_ALUA_EMULATED) ? | ||
3373 | &core_emulate_set_target_port_groups : | ||
3374 | NULL; | ||
3375 | } | ||
3376 | |||
3377 | size = (cdb[6] << 24) | (cdb[7] << 16) | | ||
3378 | (cdb[8] << 8) | cdb[9]; | ||
3379 | } else { | ||
3380 | /* GPCMD_REPORT_KEY from multi media commands */ | ||
3381 | size = (cdb[8] << 8) + cdb[9]; | ||
3382 | } | ||
3383 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | ||
3384 | break; | ||
3385 | case INQUIRY: | ||
3386 | size = (cdb[3] << 8) + cdb[4]; | ||
3387 | /* | ||
3388 | * Do implict HEAD_OF_QUEUE processing for INQUIRY. | ||
3389 | * See spc4r17 section 5.3 | ||
3390 | */ | ||
3391 | if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) | ||
3392 | cmd->sam_task_attr = TASK_ATTR_HOQ; | ||
3393 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | ||
3394 | break; | ||
3395 | case READ_BUFFER: | ||
3396 | size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; | ||
3397 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | ||
3398 | break; | ||
3399 | case READ_CAPACITY: | ||
3400 | size = READ_CAP_LEN; | ||
3401 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | ||
3402 | break; | ||
3403 | case READ_MEDIA_SERIAL_NUMBER: | ||
3404 | case SECURITY_PROTOCOL_IN: | ||
3405 | case SECURITY_PROTOCOL_OUT: | ||
3406 | size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; | ||
3407 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | ||
3408 | break; | ||
3409 | case SERVICE_ACTION_IN: | ||
3410 | case ACCESS_CONTROL_IN: | ||
3411 | case ACCESS_CONTROL_OUT: | ||
3412 | case EXTENDED_COPY: | ||
3413 | case READ_ATTRIBUTE: | ||
3414 | case RECEIVE_COPY_RESULTS: | ||
3415 | case WRITE_ATTRIBUTE: | ||
3416 | size = (cdb[10] << 24) | (cdb[11] << 16) | | ||
3417 | (cdb[12] << 8) | cdb[13]; | ||
3418 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | ||
3419 | break; | ||
3420 | case RECEIVE_DIAGNOSTIC: | ||
3421 | case SEND_DIAGNOSTIC: | ||
3422 | size = (cdb[3] << 8) | cdb[4]; | ||
3423 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | ||
3424 | break; | ||
3425 | /* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */ | ||
3426 | #if 0 | ||
3427 | case GPCMD_READ_CD: | ||
3428 | sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; | ||
3429 | size = (2336 * sectors); | ||
3430 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | ||
3431 | break; | ||
3432 | #endif | ||
3433 | case READ_TOC: | ||
3434 | size = cdb[8]; | ||
3435 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | ||
3436 | break; | ||
3437 | case REQUEST_SENSE: | ||
3438 | size = cdb[4]; | ||
3439 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | ||
3440 | break; | ||
3441 | case READ_ELEMENT_STATUS: | ||
3442 | size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9]; | ||
3443 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | ||
3444 | break; | ||
3445 | case WRITE_BUFFER: | ||
3446 | size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; | ||
3447 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | ||
3448 | break; | ||
3449 | case RESERVE: | ||
3450 | case RESERVE_10: | ||
3451 | /* | ||
3452 | * The SPC-2 RESERVE does not contain a size in the SCSI CDB. | ||
3453 | * Assume the passthrough or $FABRIC_MOD will tell us about it. | ||
3454 | */ | ||
3455 | if (cdb[0] == RESERVE_10) | ||
3456 | size = (cdb[7] << 8) | cdb[8]; | ||
3457 | else | ||
3458 | size = cmd->data_length; | ||
3459 | |||
3460 | /* | ||
3461 | * Setup the legacy emulated handler for SPC-2 and | ||
3462 | * >= SPC-3 compatible reservation handling (CRH=1) | ||
3463 | * Otherwise, we assume the underlying SCSI logic is | ||
3464 | * is running in SPC_PASSTHROUGH, and wants reservations | ||
3465 | * emulation disabled. | ||
3466 | */ | ||
3467 | cmd->transport_emulate_cdb = | ||
3468 | (T10_RES(su_dev)->res_type != | ||
3469 | SPC_PASSTHROUGH) ? | ||
3470 | &core_scsi2_emulate_crh : NULL; | ||
3471 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; | ||
3472 | break; | ||
3473 | case RELEASE: | ||
3474 | case RELEASE_10: | ||
3475 | /* | ||
3476 | * The SPC-2 RELEASE does not contain a size in the SCSI CDB. | ||
3477 | * Assume the passthrough or $FABRIC_MOD will tell us about it. | ||
3478 | */ | ||
3479 | if (cdb[0] == RELEASE_10) | ||
3480 | size = (cdb[7] << 8) | cdb[8]; | ||
3481 | else | ||
3482 | size = cmd->data_length; | ||
3483 | |||
3484 | cmd->transport_emulate_cdb = | ||
3485 | (T10_RES(su_dev)->res_type != | ||
3486 | SPC_PASSTHROUGH) ? | ||
3487 | &core_scsi2_emulate_crh : NULL; | ||
3488 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; | ||
3489 | break; | ||
3490 | case SYNCHRONIZE_CACHE: | ||
3491 | case 0x91: /* SYNCHRONIZE_CACHE_16: */ | ||
3492 | /* | ||
3493 | * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE | ||
3494 | */ | ||
3495 | if (cdb[0] == SYNCHRONIZE_CACHE) { | ||
3496 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | ||
3497 | T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); | ||
3498 | } else { | ||
3499 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); | ||
3500 | T_TASK(cmd)->t_task_lba = transport_lba_64(cdb); | ||
3501 | } | ||
3502 | if (sector_ret) | ||
3503 | goto out_unsupported_cdb; | ||
3504 | |||
3505 | size = transport_get_size(sectors, cdb, cmd); | ||
3506 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; | ||
3507 | |||
3508 | /* | ||
3509 | * For TCM/pSCSI passthrough, skip cmd->transport_emulate_cdb() | ||
3510 | */ | ||
3511 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) | ||
3512 | break; | ||
3513 | /* | ||
3514 | * Set SCF_EMULATE_CDB_ASYNC to ensure asynchronous operation | ||
3515 | * for SYNCHRONIZE_CACHE* Immed=1 case in __transport_execute_tasks() | ||
3516 | */ | ||
3517 | cmd->se_cmd_flags |= SCF_EMULATE_CDB_ASYNC; | ||
3518 | /* | ||
3519 | * Check to ensure that LBA + Range does not exceed past end of | ||
3520 | * device. | ||
3521 | */ | ||
3522 | if (transport_get_sectors(cmd) < 0) | ||
3523 | goto out_invalid_cdb_field; | ||
3524 | break; | ||
3525 | case UNMAP: | ||
3526 | size = get_unaligned_be16(&cdb[7]); | ||
3527 | passthrough = (TRANSPORT(dev)->transport_type == | ||
3528 | TRANSPORT_PLUGIN_PHBA_PDEV); | ||
3529 | /* | ||
3530 | * Determine if the received UNMAP used to for direct passthrough | ||
3531 | * into Linux/SCSI with struct request via TCM/pSCSI or we are | ||
3532 | * signaling the use of internal transport_generic_unmap() emulation | ||
3533 | * for UNMAP -> Linux/BLOCK disbard with TCM/IBLOCK and TCM/FILEIO | ||
3534 | * subsystem plugin backstores. | ||
3535 | */ | ||
3536 | if (!(passthrough)) | ||
3537 | cmd->se_cmd_flags |= SCF_EMULATE_SYNC_UNMAP; | ||
3538 | |||
3539 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | ||
3540 | break; | ||
3541 | case WRITE_SAME_16: | ||
3542 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); | ||
3543 | if (sector_ret) | ||
3544 | goto out_unsupported_cdb; | ||
3545 | size = transport_get_size(sectors, cdb, cmd); | ||
3546 | T_TASK(cmd)->t_task_lba = get_unaligned_be16(&cdb[2]); | ||
3547 | passthrough = (TRANSPORT(dev)->transport_type == | ||
3548 | TRANSPORT_PLUGIN_PHBA_PDEV); | ||
3549 | /* | ||
3550 | * Determine if the received WRITE_SAME_16 is used to for direct | ||
3551 | * passthrough into Linux/SCSI with struct request via TCM/pSCSI | ||
3552 | * or we are signaling the use of internal WRITE_SAME + UNMAP=1 | ||
3553 | * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK and | ||
3554 | * TCM/FILEIO subsystem plugin backstores. | ||
3555 | */ | ||
3556 | if (!(passthrough)) { | ||
3557 | if ((cdb[1] & 0x04) || (cdb[1] & 0x02)) { | ||
3558 | printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA" | ||
3559 | " bits not supported for Block Discard" | ||
3560 | " Emulation\n"); | ||
3561 | goto out_invalid_cdb_field; | ||
3562 | } | ||
3563 | /* | ||
3564 | * Currently for the emulated case we only accept | ||
3565 | * tpws with the UNMAP=1 bit set. | ||
3566 | */ | ||
3567 | if (!(cdb[1] & 0x08)) { | ||
3568 | printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not " | ||
3569 | " supported for Block Discard Emulation\n"); | ||
3570 | goto out_invalid_cdb_field; | ||
3571 | } | ||
3572 | } | ||
3573 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | ||
3574 | break; | ||
3575 | case ALLOW_MEDIUM_REMOVAL: | ||
3576 | case GPCMD_CLOSE_TRACK: | ||
3577 | case ERASE: | ||
3578 | case INITIALIZE_ELEMENT_STATUS: | ||
3579 | case GPCMD_LOAD_UNLOAD: | ||
3580 | case REZERO_UNIT: | ||
3581 | case SEEK_10: | ||
3582 | case GPCMD_SET_SPEED: | ||
3583 | case SPACE: | ||
3584 | case START_STOP: | ||
3585 | case TEST_UNIT_READY: | ||
3586 | case VERIFY: | ||
3587 | case WRITE_FILEMARKS: | ||
3588 | case MOVE_MEDIUM: | ||
3589 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; | ||
3590 | break; | ||
3591 | case REPORT_LUNS: | ||
3592 | cmd->transport_emulate_cdb = | ||
3593 | &transport_core_report_lun_response; | ||
3594 | size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; | ||
3595 | /* | ||
3596 | * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS | ||
3597 | * See spc4r17 section 5.3 | ||
3598 | */ | ||
3599 | if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) | ||
3600 | cmd->sam_task_attr = TASK_ATTR_HOQ; | ||
3601 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | ||
3602 | break; | ||
3603 | default: | ||
3604 | printk(KERN_WARNING "TARGET_CORE[%s]: Unsupported SCSI Opcode" | ||
3605 | " 0x%02x, sending CHECK_CONDITION.\n", | ||
3606 | CMD_TFO(cmd)->get_fabric_name(), cdb[0]); | ||
3607 | cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; | ||
3608 | goto out_unsupported_cdb; | ||
3609 | } | ||
3610 | |||
3611 | if (size != cmd->data_length) { | ||
3612 | printk(KERN_WARNING "TARGET_CORE[%s]: Expected Transfer Length:" | ||
3613 | " %u does not match SCSI CDB Length: %u for SAM Opcode:" | ||
3614 | " 0x%02x\n", CMD_TFO(cmd)->get_fabric_name(), | ||
3615 | cmd->data_length, size, cdb[0]); | ||
3616 | |||
3617 | cmd->cmd_spdtl = size; | ||
3618 | |||
3619 | if (cmd->data_direction == DMA_TO_DEVICE) { | ||
3620 | printk(KERN_ERR "Rejecting underflow/overflow" | ||
3621 | " WRITE data\n"); | ||
3622 | goto out_invalid_cdb_field; | ||
3623 | } | ||
3624 | /* | ||
3625 | * Reject READ_* or WRITE_* with overflow/underflow for | ||
3626 | * type SCF_SCSI_DATA_SG_IO_CDB. | ||
3627 | */ | ||
3628 | if (!(ret) && (DEV_ATTRIB(dev)->block_size != 512)) { | ||
3629 | printk(KERN_ERR "Failing OVERFLOW/UNDERFLOW for LBA op" | ||
3630 | " CDB on non 512-byte sector setup subsystem" | ||
3631 | " plugin: %s\n", TRANSPORT(dev)->name); | ||
3632 | /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ | ||
3633 | goto out_invalid_cdb_field; | ||
3634 | } | ||
3635 | |||
3636 | if (size > cmd->data_length) { | ||
3637 | cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; | ||
3638 | cmd->residual_count = (size - cmd->data_length); | ||
3639 | } else { | ||
3640 | cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; | ||
3641 | cmd->residual_count = (cmd->data_length - size); | ||
3642 | } | ||
3643 | cmd->data_length = size; | ||
3644 | } | ||
3645 | |||
3646 | transport_set_supported_SAM_opcode(cmd); | ||
3647 | return ret; | ||
3648 | |||
3649 | out_unsupported_cdb: | ||
3650 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | ||
3651 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; | ||
3652 | return -2; | ||
3653 | out_invalid_cdb_field: | ||
3654 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | ||
3655 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; | ||
3656 | return -2; | ||
3657 | } | ||
3658 | |||
3659 | static inline void transport_release_tasks(struct se_cmd *); | ||
3660 | |||
3661 | /* | ||
3662 | * This function will copy a contiguous *src buffer into a destination | ||
3663 | * struct scatterlist array. | ||
3664 | */ | ||
3665 | static void transport_memcpy_write_contig( | ||
3666 | struct se_cmd *cmd, | ||
3667 | struct scatterlist *sg_d, | ||
3668 | unsigned char *src) | ||
3669 | { | ||
3670 | u32 i = 0, length = 0, total_length = cmd->data_length; | ||
3671 | void *dst; | ||
3672 | |||
3673 | while (total_length) { | ||
3674 | length = sg_d[i].length; | ||
3675 | |||
3676 | if (length > total_length) | ||
3677 | length = total_length; | ||
3678 | |||
3679 | dst = sg_virt(&sg_d[i]); | ||
3680 | |||
3681 | memcpy(dst, src, length); | ||
3682 | |||
3683 | if (!(total_length -= length)) | ||
3684 | return; | ||
3685 | |||
3686 | src += length; | ||
3687 | i++; | ||
3688 | } | ||
3689 | } | ||
3690 | |||
3691 | /* | ||
3692 | * This function will copy a struct scatterlist array *sg_s into a destination | ||
3693 | * contiguous *dst buffer. | ||
3694 | */ | ||
3695 | static void transport_memcpy_read_contig( | ||
3696 | struct se_cmd *cmd, | ||
3697 | unsigned char *dst, | ||
3698 | struct scatterlist *sg_s) | ||
3699 | { | ||
3700 | u32 i = 0, length = 0, total_length = cmd->data_length; | ||
3701 | void *src; | ||
3702 | |||
3703 | while (total_length) { | ||
3704 | length = sg_s[i].length; | ||
3705 | |||
3706 | if (length > total_length) | ||
3707 | length = total_length; | ||
3708 | |||
3709 | src = sg_virt(&sg_s[i]); | ||
3710 | |||
3711 | memcpy(dst, src, length); | ||
3712 | |||
3713 | if (!(total_length -= length)) | ||
3714 | return; | ||
3715 | |||
3716 | dst += length; | ||
3717 | i++; | ||
3718 | } | ||
3719 | } | ||
3720 | |||
3721 | static void transport_memcpy_se_mem_read_contig( | ||
3722 | struct se_cmd *cmd, | ||
3723 | unsigned char *dst, | ||
3724 | struct list_head *se_mem_list) | ||
3725 | { | ||
3726 | struct se_mem *se_mem; | ||
3727 | void *src; | ||
3728 | u32 length = 0, total_length = cmd->data_length; | ||
3729 | |||
3730 | list_for_each_entry(se_mem, se_mem_list, se_list) { | ||
3731 | length = se_mem->se_len; | ||
3732 | |||
3733 | if (length > total_length) | ||
3734 | length = total_length; | ||
3735 | |||
3736 | src = page_address(se_mem->se_page) + se_mem->se_off; | ||
3737 | |||
3738 | memcpy(dst, src, length); | ||
3739 | |||
3740 | if (!(total_length -= length)) | ||
3741 | return; | ||
3742 | |||
3743 | dst += length; | ||
3744 | } | ||
3745 | } | ||
3746 | |||
3747 | /* | ||
3748 | * Called from transport_generic_complete_ok() and | ||
3749 | * transport_generic_request_failure() to determine which dormant/delayed | ||
3750 | * and ordered cmds need to have their tasks added to the execution queue. | ||
3751 | */ | ||
3752 | static void transport_complete_task_attr(struct se_cmd *cmd) | ||
3753 | { | ||
3754 | struct se_device *dev = SE_DEV(cmd); | ||
3755 | struct se_cmd *cmd_p, *cmd_tmp; | ||
3756 | int new_active_tasks = 0; | ||
3757 | |||
3758 | if (cmd->sam_task_attr == TASK_ATTR_SIMPLE) { | ||
3759 | atomic_dec(&dev->simple_cmds); | ||
3760 | smp_mb__after_atomic_dec(); | ||
3761 | dev->dev_cur_ordered_id++; | ||
3762 | DEBUG_STA("Incremented dev->dev_cur_ordered_id: %u for" | ||
3763 | " SIMPLE: %u\n", dev->dev_cur_ordered_id, | ||
3764 | cmd->se_ordered_id); | ||
3765 | } else if (cmd->sam_task_attr == TASK_ATTR_HOQ) { | ||
3766 | atomic_dec(&dev->dev_hoq_count); | ||
3767 | smp_mb__after_atomic_dec(); | ||
3768 | dev->dev_cur_ordered_id++; | ||
3769 | DEBUG_STA("Incremented dev_cur_ordered_id: %u for" | ||
3770 | " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, | ||
3771 | cmd->se_ordered_id); | ||
3772 | } else if (cmd->sam_task_attr == TASK_ATTR_ORDERED) { | ||
3773 | spin_lock(&dev->ordered_cmd_lock); | ||
3774 | list_del(&cmd->se_ordered_list); | ||
3775 | atomic_dec(&dev->dev_ordered_sync); | ||
3776 | smp_mb__after_atomic_dec(); | ||
3777 | spin_unlock(&dev->ordered_cmd_lock); | ||
3778 | |||
3779 | dev->dev_cur_ordered_id++; | ||
3780 | DEBUG_STA("Incremented dev_cur_ordered_id: %u for ORDERED:" | ||
3781 | " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); | ||
3782 | } | ||
3783 | /* | ||
3784 | * Process all commands up to the last received | ||
3785 | * ORDERED task attribute which requires another blocking | ||
3786 | * boundary | ||
3787 | */ | ||
3788 | spin_lock(&dev->delayed_cmd_lock); | ||
3789 | list_for_each_entry_safe(cmd_p, cmd_tmp, | ||
3790 | &dev->delayed_cmd_list, se_delayed_list) { | ||
3791 | |||
3792 | list_del(&cmd_p->se_delayed_list); | ||
3793 | spin_unlock(&dev->delayed_cmd_lock); | ||
3794 | |||
3795 | DEBUG_STA("Calling add_tasks() for" | ||
3796 | " cmd_p: 0x%02x Task Attr: 0x%02x" | ||
3797 | " Dormant -> Active, se_ordered_id: %u\n", | ||
3798 | T_TASK(cmd_p)->t_task_cdb[0], | ||
3799 | cmd_p->sam_task_attr, cmd_p->se_ordered_id); | ||
3800 | |||
3801 | transport_add_tasks_from_cmd(cmd_p); | ||
3802 | new_active_tasks++; | ||
3803 | |||
3804 | spin_lock(&dev->delayed_cmd_lock); | ||
3805 | if (cmd_p->sam_task_attr == TASK_ATTR_ORDERED) | ||
3806 | break; | ||
3807 | } | ||
3808 | spin_unlock(&dev->delayed_cmd_lock); | ||
3809 | /* | ||
3810 | * If new tasks have become active, wake up the transport thread | ||
3811 | * to do the processing of the Active tasks. | ||
3812 | */ | ||
3813 | if (new_active_tasks != 0) | ||
3814 | wake_up_interruptible(&dev->dev_queue_obj->thread_wq); | ||
3815 | } | ||
3816 | |||
3817 | static void transport_generic_complete_ok(struct se_cmd *cmd) | ||
3818 | { | ||
3819 | int reason = 0; | ||
3820 | /* | ||
3821 | * Check if we need to move delayed/dormant tasks from cmds on the | ||
3822 | * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task | ||
3823 | * Attribute. | ||
3824 | */ | ||
3825 | if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) | ||
3826 | transport_complete_task_attr(cmd); | ||
3827 | /* | ||
3828 | * Check if we need to retrieve a sense buffer from | ||
3829 | * the struct se_cmd in question. | ||
3830 | */ | ||
3831 | if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { | ||
3832 | if (transport_get_sense_data(cmd) < 0) | ||
3833 | reason = TCM_NON_EXISTENT_LUN; | ||
3834 | |||
3835 | /* | ||
3836 | * Only set when an struct se_task->task_scsi_status returned | ||
3837 | * a non GOOD status. | ||
3838 | */ | ||
3839 | if (cmd->scsi_status) { | ||
3840 | transport_send_check_condition_and_sense( | ||
3841 | cmd, reason, 1); | ||
3842 | transport_lun_remove_cmd(cmd); | ||
3843 | transport_cmd_check_stop_to_fabric(cmd); | ||
3844 | return; | ||
3845 | } | ||
3846 | } | ||
3847 | /* | ||
3848 | * Check for a callback, used by amoungst other things | ||
3849 | * XDWRITE_READ_10 emulation. | ||
3850 | */ | ||
3851 | if (cmd->transport_complete_callback) | ||
3852 | cmd->transport_complete_callback(cmd); | ||
3853 | |||
3854 | switch (cmd->data_direction) { | ||
3855 | case DMA_FROM_DEVICE: | ||
3856 | spin_lock(&cmd->se_lun->lun_sep_lock); | ||
3857 | if (SE_LUN(cmd)->lun_sep) { | ||
3858 | SE_LUN(cmd)->lun_sep->sep_stats.tx_data_octets += | ||
3859 | cmd->data_length; | ||
3860 | } | ||
3861 | spin_unlock(&cmd->se_lun->lun_sep_lock); | ||
3862 | /* | ||
3863 | * If enabled by TCM fabirc module pre-registered SGL | ||
3864 | * memory, perform the memcpy() from the TCM internal | ||
3865 | * contigious buffer back to the original SGL. | ||
3866 | */ | ||
3867 | if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG) | ||
3868 | transport_memcpy_write_contig(cmd, | ||
3869 | T_TASK(cmd)->t_task_pt_sgl, | ||
3870 | T_TASK(cmd)->t_task_buf); | ||
3871 | |||
3872 | CMD_TFO(cmd)->queue_data_in(cmd); | ||
3873 | break; | ||
3874 | case DMA_TO_DEVICE: | ||
3875 | spin_lock(&cmd->se_lun->lun_sep_lock); | ||
3876 | if (SE_LUN(cmd)->lun_sep) { | ||
3877 | SE_LUN(cmd)->lun_sep->sep_stats.rx_data_octets += | ||
3878 | cmd->data_length; | ||
3879 | } | ||
3880 | spin_unlock(&cmd->se_lun->lun_sep_lock); | ||
3881 | /* | ||
3882 | * Check if we need to send READ payload for BIDI-COMMAND | ||
3883 | */ | ||
3884 | if (T_TASK(cmd)->t_mem_bidi_list != NULL) { | ||
3885 | spin_lock(&cmd->se_lun->lun_sep_lock); | ||
3886 | if (SE_LUN(cmd)->lun_sep) { | ||
3887 | SE_LUN(cmd)->lun_sep->sep_stats.tx_data_octets += | ||
3888 | cmd->data_length; | ||
3889 | } | ||
3890 | spin_unlock(&cmd->se_lun->lun_sep_lock); | ||
3891 | CMD_TFO(cmd)->queue_data_in(cmd); | ||
3892 | break; | ||
3893 | } | ||
3894 | /* Fall through for DMA_TO_DEVICE */ | ||
3895 | case DMA_NONE: | ||
3896 | CMD_TFO(cmd)->queue_status(cmd); | ||
3897 | break; | ||
3898 | default: | ||
3899 | break; | ||
3900 | } | ||
3901 | |||
3902 | transport_lun_remove_cmd(cmd); | ||
3903 | transport_cmd_check_stop_to_fabric(cmd); | ||
3904 | } | ||
3905 | |||
3906 | static void transport_free_dev_tasks(struct se_cmd *cmd) | ||
3907 | { | ||
3908 | struct se_task *task, *task_tmp; | ||
3909 | unsigned long flags; | ||
3910 | |||
3911 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | ||
3912 | list_for_each_entry_safe(task, task_tmp, | ||
3913 | &T_TASK(cmd)->t_task_list, t_list) { | ||
3914 | if (atomic_read(&task->task_active)) | ||
3915 | continue; | ||
3916 | |||
3917 | kfree(task->task_sg_bidi); | ||
3918 | kfree(task->task_sg); | ||
3919 | |||
3920 | list_del(&task->t_list); | ||
3921 | |||
3922 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | ||
3923 | if (task->se_dev) | ||
3924 | TRANSPORT(task->se_dev)->free_task(task); | ||
3925 | else | ||
3926 | printk(KERN_ERR "task[%u] - task->se_dev is NULL\n", | ||
3927 | task->task_no); | ||
3928 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | ||
3929 | } | ||
3930 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | ||
3931 | } | ||
3932 | |||
3933 | static inline void transport_free_pages(struct se_cmd *cmd) | ||
3934 | { | ||
3935 | struct se_mem *se_mem, *se_mem_tmp; | ||
3936 | int free_page = 1; | ||
3937 | |||
3938 | if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) | ||
3939 | free_page = 0; | ||
3940 | if (cmd->se_dev->transport->do_se_mem_map) | ||
3941 | free_page = 0; | ||
3942 | |||
3943 | if (T_TASK(cmd)->t_task_buf) { | ||
3944 | kfree(T_TASK(cmd)->t_task_buf); | ||
3945 | T_TASK(cmd)->t_task_buf = NULL; | ||
3946 | return; | ||
3947 | } | ||
3948 | |||
3949 | /* | ||
3950 | * Caller will handle releasing of struct se_mem. | ||
3951 | */ | ||
3952 | if (cmd->se_cmd_flags & SCF_CMD_PASSTHROUGH_NOALLOC) | ||
3953 | return; | ||
3954 | |||
3955 | if (!(T_TASK(cmd)->t_tasks_se_num)) | ||
3956 | return; | ||
3957 | |||
3958 | list_for_each_entry_safe(se_mem, se_mem_tmp, | ||
3959 | T_TASK(cmd)->t_mem_list, se_list) { | ||
3960 | /* | ||
3961 | * We only release call __free_page(struct se_mem->se_page) when | ||
3962 | * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, | ||
3963 | */ | ||
3964 | if (free_page) | ||
3965 | __free_page(se_mem->se_page); | ||
3966 | |||
3967 | list_del(&se_mem->se_list); | ||
3968 | kmem_cache_free(se_mem_cache, se_mem); | ||
3969 | } | ||
3970 | |||
3971 | if (T_TASK(cmd)->t_mem_bidi_list && T_TASK(cmd)->t_tasks_se_bidi_num) { | ||
3972 | list_for_each_entry_safe(se_mem, se_mem_tmp, | ||
3973 | T_TASK(cmd)->t_mem_bidi_list, se_list) { | ||
3974 | /* | ||
3975 | * We only release call __free_page(struct se_mem->se_page) when | ||
3976 | * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, | ||
3977 | */ | ||
3978 | if (free_page) | ||
3979 | __free_page(se_mem->se_page); | ||
3980 | |||
3981 | list_del(&se_mem->se_list); | ||
3982 | kmem_cache_free(se_mem_cache, se_mem); | ||
3983 | } | ||
3984 | } | ||
3985 | |||
3986 | kfree(T_TASK(cmd)->t_mem_bidi_list); | ||
3987 | T_TASK(cmd)->t_mem_bidi_list = NULL; | ||
3988 | kfree(T_TASK(cmd)->t_mem_list); | ||
3989 | T_TASK(cmd)->t_mem_list = NULL; | ||
3990 | T_TASK(cmd)->t_tasks_se_num = 0; | ||
3991 | } | ||
3992 | |||
3993 | static inline void transport_release_tasks(struct se_cmd *cmd) | ||
3994 | { | ||
3995 | transport_free_dev_tasks(cmd); | ||
3996 | } | ||
3997 | |||
3998 | static inline int transport_dec_and_check(struct se_cmd *cmd) | ||
3999 | { | ||
4000 | unsigned long flags; | ||
4001 | |||
4002 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | ||
4003 | if (atomic_read(&T_TASK(cmd)->t_fe_count)) { | ||
4004 | if (!(atomic_dec_and_test(&T_TASK(cmd)->t_fe_count))) { | ||
4005 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, | ||
4006 | flags); | ||
4007 | return 1; | ||
4008 | } | ||
4009 | } | ||
4010 | |||
4011 | if (atomic_read(&T_TASK(cmd)->t_se_count)) { | ||
4012 | if (!(atomic_dec_and_test(&T_TASK(cmd)->t_se_count))) { | ||
4013 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, | ||
4014 | flags); | ||
4015 | return 1; | ||
4016 | } | ||
4017 | } | ||
4018 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | ||
4019 | |||
4020 | return 0; | ||
4021 | } | ||
4022 | |||
4023 | static void transport_release_fe_cmd(struct se_cmd *cmd) | ||
4024 | { | ||
4025 | unsigned long flags; | ||
4026 | |||
4027 | if (transport_dec_and_check(cmd)) | ||
4028 | return; | ||
4029 | |||
4030 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | ||
4031 | if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) { | ||
4032 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | ||
4033 | goto free_pages; | ||
4034 | } | ||
4035 | atomic_set(&T_TASK(cmd)->transport_dev_active, 0); | ||
4036 | transport_all_task_dev_remove_state(cmd); | ||
4037 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | ||
4038 | |||
4039 | transport_release_tasks(cmd); | ||
4040 | free_pages: | ||
4041 | transport_free_pages(cmd); | ||
4042 | transport_free_se_cmd(cmd); | ||
4043 | CMD_TFO(cmd)->release_cmd_direct(cmd); | ||
4044 | } | ||
4045 | |||
4046 | static int transport_generic_remove( | ||
4047 | struct se_cmd *cmd, | ||
4048 | int release_to_pool, | ||
4049 | int session_reinstatement) | ||
4050 | { | ||
4051 | unsigned long flags; | ||
4052 | |||
4053 | if (!(T_TASK(cmd))) | ||
4054 | goto release_cmd; | ||
4055 | |||
4056 | if (transport_dec_and_check(cmd)) { | ||
4057 | if (session_reinstatement) { | ||
4058 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | ||
4059 | transport_all_task_dev_remove_state(cmd); | ||
4060 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, | ||
4061 | flags); | ||
4062 | } | ||
4063 | return 1; | ||
4064 | } | ||
4065 | |||
4066 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | ||
4067 | if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) { | ||
4068 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | ||
4069 | goto free_pages; | ||
4070 | } | ||
4071 | atomic_set(&T_TASK(cmd)->transport_dev_active, 0); | ||
4072 | transport_all_task_dev_remove_state(cmd); | ||
4073 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | ||
4074 | |||
4075 | transport_release_tasks(cmd); | ||
4076 | free_pages: | ||
4077 | transport_free_pages(cmd); | ||
4078 | |||
4079 | release_cmd: | ||
4080 | if (release_to_pool) { | ||
4081 | transport_release_cmd_to_pool(cmd); | ||
4082 | } else { | ||
4083 | transport_free_se_cmd(cmd); | ||
4084 | CMD_TFO(cmd)->release_cmd_direct(cmd); | ||
4085 | } | ||
4086 | |||
4087 | return 0; | ||
4088 | } | ||
4089 | |||
4090 | /* | ||
4091 | * transport_generic_map_mem_to_cmd - Perform SGL -> struct se_mem map | ||
4092 | * @cmd: Associated se_cmd descriptor | ||
4093 | * @mem: SGL style memory for TCM WRITE / READ | ||
4094 | * @sg_mem_num: Number of SGL elements | ||
4095 | * @mem_bidi_in: SGL style memory for TCM BIDI READ | ||
4096 | * @sg_mem_bidi_num: Number of BIDI READ SGL elements | ||
4097 | * | ||
4098 | * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage | ||
4099 | * of parameters. | ||
4100 | */ | ||
4101 | int transport_generic_map_mem_to_cmd( | ||
4102 | struct se_cmd *cmd, | ||
4103 | struct scatterlist *mem, | ||
4104 | u32 sg_mem_num, | ||
4105 | struct scatterlist *mem_bidi_in, | ||
4106 | u32 sg_mem_bidi_num) | ||
4107 | { | ||
4108 | u32 se_mem_cnt_out = 0; | ||
4109 | int ret; | ||
4110 | |||
4111 | if (!(mem) || !(sg_mem_num)) | ||
4112 | return 0; | ||
4113 | /* | ||
4114 | * Passed *mem will contain a list_head containing preformatted | ||
4115 | * struct se_mem elements... | ||
4116 | */ | ||
4117 | if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM)) { | ||
4118 | if ((mem_bidi_in) || (sg_mem_bidi_num)) { | ||
4119 | printk(KERN_ERR "SCF_CMD_PASSTHROUGH_NOALLOC not supported" | ||
4120 | " with BIDI-COMMAND\n"); | ||
4121 | return -ENOSYS; | ||
4122 | } | ||
4123 | |||
4124 | T_TASK(cmd)->t_mem_list = (struct list_head *)mem; | ||
4125 | T_TASK(cmd)->t_tasks_se_num = sg_mem_num; | ||
4126 | cmd->se_cmd_flags |= SCF_CMD_PASSTHROUGH_NOALLOC; | ||
4127 | return 0; | ||
4128 | } | ||
4129 | /* | ||
4130 | * Otherwise, assume the caller is passing a struct scatterlist | ||
4131 | * array from include/linux/scatterlist.h | ||
4132 | */ | ||
4133 | if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) || | ||
4134 | (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) { | ||
4135 | /* | ||
4136 | * For CDB using TCM struct se_mem linked list scatterlist memory | ||
4137 | * processed into a TCM struct se_subsystem_dev, we do the mapping | ||
4138 | * from the passed physical memory to struct se_mem->se_page here. | ||
4139 | */ | ||
4140 | T_TASK(cmd)->t_mem_list = transport_init_se_mem_list(); | ||
4141 | if (!(T_TASK(cmd)->t_mem_list)) | ||
4142 | return -ENOMEM; | ||
4143 | |||
4144 | ret = transport_map_sg_to_mem(cmd, | ||
4145 | T_TASK(cmd)->t_mem_list, mem, &se_mem_cnt_out); | ||
4146 | if (ret < 0) | ||
4147 | return -ENOMEM; | ||
4148 | |||
4149 | T_TASK(cmd)->t_tasks_se_num = se_mem_cnt_out; | ||
4150 | /* | ||
4151 | * Setup BIDI READ list of struct se_mem elements | ||
4152 | */ | ||
4153 | if ((mem_bidi_in) && (sg_mem_bidi_num)) { | ||
4154 | T_TASK(cmd)->t_mem_bidi_list = transport_init_se_mem_list(); | ||
4155 | if (!(T_TASK(cmd)->t_mem_bidi_list)) { | ||
4156 | kfree(T_TASK(cmd)->t_mem_list); | ||
4157 | return -ENOMEM; | ||
4158 | } | ||
4159 | se_mem_cnt_out = 0; | ||
4160 | |||
4161 | ret = transport_map_sg_to_mem(cmd, | ||
4162 | T_TASK(cmd)->t_mem_bidi_list, mem_bidi_in, | ||
4163 | &se_mem_cnt_out); | ||
4164 | if (ret < 0) { | ||
4165 | kfree(T_TASK(cmd)->t_mem_list); | ||
4166 | return -ENOMEM; | ||
4167 | } | ||
4168 | |||
4169 | T_TASK(cmd)->t_tasks_se_bidi_num = se_mem_cnt_out; | ||
4170 | } | ||
4171 | cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; | ||
4172 | |||
4173 | } else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) { | ||
4174 | if (mem_bidi_in || sg_mem_bidi_num) { | ||
4175 | printk(KERN_ERR "BIDI-Commands not supported using " | ||
4176 | "SCF_SCSI_CONTROL_NONSG_IO_CDB\n"); | ||
4177 | return -ENOSYS; | ||
4178 | } | ||
4179 | /* | ||
4180 | * For incoming CDBs using a contiguous buffer internall with TCM, | ||
4181 | * save the passed struct scatterlist memory. After TCM storage object | ||
4182 | * processing has completed for this struct se_cmd, TCM core will call | ||
4183 | * transport_memcpy_[write,read]_contig() as necessary from | ||
4184 | * transport_generic_complete_ok() and transport_write_pending() in order | ||
4185 | * to copy the TCM buffer to/from the original passed *mem in SGL -> | ||
4186 | * struct scatterlist format. | ||
4187 | */ | ||
4188 | cmd->se_cmd_flags |= SCF_PASSTHROUGH_CONTIG_TO_SG; | ||
4189 | T_TASK(cmd)->t_task_pt_sgl = mem; | ||
4190 | } | ||
4191 | |||
4192 | return 0; | ||
4193 | } | ||
4194 | EXPORT_SYMBOL(transport_generic_map_mem_to_cmd); | ||
4195 | |||
4196 | |||
4197 | static inline long long transport_dev_end_lba(struct se_device *dev) | ||
4198 | { | ||
4199 | return dev->transport->get_blocks(dev) + 1; | ||
4200 | } | ||
4201 | |||
4202 | static int transport_get_sectors(struct se_cmd *cmd) | ||
4203 | { | ||
4204 | struct se_device *dev = SE_DEV(cmd); | ||
4205 | |||
4206 | T_TASK(cmd)->t_tasks_sectors = | ||
4207 | (cmd->data_length / DEV_ATTRIB(dev)->block_size); | ||
4208 | if (!(T_TASK(cmd)->t_tasks_sectors)) | ||
4209 | T_TASK(cmd)->t_tasks_sectors = 1; | ||
4210 | |||
4211 | if (TRANSPORT(dev)->get_device_type(dev) != TYPE_DISK) | ||
4212 | return 0; | ||
4213 | |||
4214 | if ((T_TASK(cmd)->t_task_lba + T_TASK(cmd)->t_tasks_sectors) > | ||
4215 | transport_dev_end_lba(dev)) { | ||
4216 | printk(KERN_ERR "LBA: %llu Sectors: %u exceeds" | ||
4217 | " transport_dev_end_lba(): %llu\n", | ||
4218 | T_TASK(cmd)->t_task_lba, T_TASK(cmd)->t_tasks_sectors, | ||
4219 | transport_dev_end_lba(dev)); | ||
4220 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | ||
4221 | cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY; | ||
4222 | return PYX_TRANSPORT_REQ_TOO_MANY_SECTORS; | ||
4223 | } | ||
4224 | |||
4225 | return 0; | ||
4226 | } | ||
4227 | |||
4228 | static int transport_new_cmd_obj(struct se_cmd *cmd) | ||
4229 | { | ||
4230 | struct se_device *dev = SE_DEV(cmd); | ||
4231 | u32 task_cdbs = 0, rc; | ||
4232 | |||
4233 | if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) { | ||
4234 | task_cdbs++; | ||
4235 | T_TASK(cmd)->t_task_cdbs++; | ||
4236 | } else { | ||
4237 | int set_counts = 1; | ||
4238 | |||
4239 | /* | ||
4240 | * Setup any BIDI READ tasks and memory from | ||
4241 | * T_TASK(cmd)->t_mem_bidi_list so the READ struct se_tasks | ||
4242 | * are queued first for the non pSCSI passthrough case. | ||
4243 | */ | ||
4244 | if ((T_TASK(cmd)->t_mem_bidi_list != NULL) && | ||
4245 | (TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) { | ||
4246 | rc = transport_generic_get_cdb_count(cmd, | ||
4247 | T_TASK(cmd)->t_task_lba, | ||
4248 | T_TASK(cmd)->t_tasks_sectors, | ||
4249 | DMA_FROM_DEVICE, T_TASK(cmd)->t_mem_bidi_list, | ||
4250 | set_counts); | ||
4251 | if (!(rc)) { | ||
4252 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | ||
4253 | cmd->scsi_sense_reason = | ||
4254 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | ||
4255 | return PYX_TRANSPORT_LU_COMM_FAILURE; | ||
4256 | } | ||
4257 | set_counts = 0; | ||
4258 | } | ||
4259 | /* | ||
4260 | * Setup the tasks and memory from T_TASK(cmd)->t_mem_list | ||
4261 | * Note for BIDI transfers this will contain the WRITE payload | ||
4262 | */ | ||
4263 | task_cdbs = transport_generic_get_cdb_count(cmd, | ||
4264 | T_TASK(cmd)->t_task_lba, | ||
4265 | T_TASK(cmd)->t_tasks_sectors, | ||
4266 | cmd->data_direction, T_TASK(cmd)->t_mem_list, | ||
4267 | set_counts); | ||
4268 | if (!(task_cdbs)) { | ||
4269 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | ||
4270 | cmd->scsi_sense_reason = | ||
4271 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | ||
4272 | return PYX_TRANSPORT_LU_COMM_FAILURE; | ||
4273 | } | ||
4274 | T_TASK(cmd)->t_task_cdbs += task_cdbs; | ||
4275 | |||
4276 | #if 0 | ||
4277 | printk(KERN_INFO "data_length: %u, LBA: %llu t_tasks_sectors:" | ||
4278 | " %u, t_task_cdbs: %u\n", obj_ptr, cmd->data_length, | ||
4279 | T_TASK(cmd)->t_task_lba, T_TASK(cmd)->t_tasks_sectors, | ||
4280 | T_TASK(cmd)->t_task_cdbs); | ||
4281 | #endif | ||
4282 | } | ||
4283 | |||
4284 | atomic_set(&T_TASK(cmd)->t_task_cdbs_left, task_cdbs); | ||
4285 | atomic_set(&T_TASK(cmd)->t_task_cdbs_ex_left, task_cdbs); | ||
4286 | atomic_set(&T_TASK(cmd)->t_task_cdbs_timeout_left, task_cdbs); | ||
4287 | return 0; | ||
4288 | } | ||
4289 | |||
4290 | static struct list_head *transport_init_se_mem_list(void) | ||
4291 | { | ||
4292 | struct list_head *se_mem_list; | ||
4293 | |||
4294 | se_mem_list = kzalloc(sizeof(struct list_head), GFP_KERNEL); | ||
4295 | if (!(se_mem_list)) { | ||
4296 | printk(KERN_ERR "Unable to allocate memory for se_mem_list\n"); | ||
4297 | return NULL; | ||
4298 | } | ||
4299 | INIT_LIST_HEAD(se_mem_list); | ||
4300 | |||
4301 | return se_mem_list; | ||
4302 | } | ||
4303 | |||
4304 | static int | ||
4305 | transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size) | ||
4306 | { | ||
4307 | unsigned char *buf; | ||
4308 | struct se_mem *se_mem; | ||
4309 | |||
4310 | T_TASK(cmd)->t_mem_list = transport_init_se_mem_list(); | ||
4311 | if (!(T_TASK(cmd)->t_mem_list)) | ||
4312 | return -ENOMEM; | ||
4313 | |||
4314 | /* | ||
4315 | * If the device uses memory mapping this is enough. | ||
4316 | */ | ||
4317 | if (cmd->se_dev->transport->do_se_mem_map) | ||
4318 | return 0; | ||
4319 | |||
4320 | /* | ||
4321 | * Setup BIDI-COMMAND READ list of struct se_mem elements | ||
4322 | */ | ||
4323 | if (T_TASK(cmd)->t_tasks_bidi) { | ||
4324 | T_TASK(cmd)->t_mem_bidi_list = transport_init_se_mem_list(); | ||
4325 | if (!(T_TASK(cmd)->t_mem_bidi_list)) { | ||
4326 | kfree(T_TASK(cmd)->t_mem_list); | ||
4327 | return -ENOMEM; | ||
4328 | } | ||
4329 | } | ||
4330 | |||
4331 | while (length) { | ||
4332 | se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); | ||
4333 | if (!(se_mem)) { | ||
4334 | printk(KERN_ERR "Unable to allocate struct se_mem\n"); | ||
4335 | goto out; | ||
4336 | } | ||
4337 | INIT_LIST_HEAD(&se_mem->se_list); | ||
4338 | se_mem->se_len = (length > dma_size) ? dma_size : length; | ||
4339 | |||
4340 | /* #warning FIXME Allocate contigous pages for struct se_mem elements */ | ||
4341 | se_mem->se_page = (struct page *) alloc_pages(GFP_KERNEL, 0); | ||
4342 | if (!(se_mem->se_page)) { | ||
4343 | printk(KERN_ERR "alloc_pages() failed\n"); | ||
4344 | goto out; | ||
4345 | } | ||
4346 | |||
4347 | buf = kmap_atomic(se_mem->se_page, KM_IRQ0); | ||
4348 | if (!(buf)) { | ||
4349 | printk(KERN_ERR "kmap_atomic() failed\n"); | ||
4350 | goto out; | ||
4351 | } | ||
4352 | memset(buf, 0, se_mem->se_len); | ||
4353 | kunmap_atomic(buf, KM_IRQ0); | ||
4354 | |||
4355 | list_add_tail(&se_mem->se_list, T_TASK(cmd)->t_mem_list); | ||
4356 | T_TASK(cmd)->t_tasks_se_num++; | ||
4357 | |||
4358 | DEBUG_MEM("Allocated struct se_mem page(%p) Length(%u)" | ||
4359 | " Offset(%u)\n", se_mem->se_page, se_mem->se_len, | ||
4360 | se_mem->se_off); | ||
4361 | |||
4362 | length -= se_mem->se_len; | ||
4363 | } | ||
4364 | |||
4365 | DEBUG_MEM("Allocated total struct se_mem elements(%u)\n", | ||
4366 | T_TASK(cmd)->t_tasks_se_num); | ||
4367 | |||
4368 | return 0; | ||
4369 | out: | ||
4370 | return -1; | ||
4371 | } | ||
4372 | |||
4373 | extern u32 transport_calc_sg_num( | ||
4374 | struct se_task *task, | ||
4375 | struct se_mem *in_se_mem, | ||
4376 | u32 task_offset) | ||
4377 | { | ||
4378 | struct se_cmd *se_cmd = task->task_se_cmd; | ||
4379 | struct se_device *se_dev = SE_DEV(se_cmd); | ||
4380 | struct se_mem *se_mem = in_se_mem; | ||
4381 | struct target_core_fabric_ops *tfo = CMD_TFO(se_cmd); | ||
4382 | u32 sg_length, task_size = task->task_size, task_sg_num_padded; | ||
4383 | |||
4384 | while (task_size != 0) { | ||
4385 | DEBUG_SC("se_mem->se_page(%p) se_mem->se_len(%u)" | ||
4386 | " se_mem->se_off(%u) task_offset(%u)\n", | ||
4387 | se_mem->se_page, se_mem->se_len, | ||
4388 | se_mem->se_off, task_offset); | ||
4389 | |||
4390 | if (task_offset == 0) { | ||
4391 | if (task_size >= se_mem->se_len) { | ||
4392 | sg_length = se_mem->se_len; | ||
4393 | |||
4394 | if (!(list_is_last(&se_mem->se_list, | ||
4395 | T_TASK(se_cmd)->t_mem_list))) | ||
4396 | se_mem = list_entry(se_mem->se_list.next, | ||
4397 | struct se_mem, se_list); | ||
4398 | } else { | ||
4399 | sg_length = task_size; | ||
4400 | task_size -= sg_length; | ||
4401 | goto next; | ||
4402 | } | ||
4403 | |||
4404 | DEBUG_SC("sg_length(%u) task_size(%u)\n", | ||
4405 | sg_length, task_size); | ||
4406 | } else { | ||
4407 | if ((se_mem->se_len - task_offset) > task_size) { | ||
4408 | sg_length = task_size; | ||
4409 | task_size -= sg_length; | ||
4410 | goto next; | ||
4411 | } else { | ||
4412 | sg_length = (se_mem->se_len - task_offset); | ||
4413 | |||
4414 | if (!(list_is_last(&se_mem->se_list, | ||
4415 | T_TASK(se_cmd)->t_mem_list))) | ||
4416 | se_mem = list_entry(se_mem->se_list.next, | ||
4417 | struct se_mem, se_list); | ||
4418 | } | ||
4419 | |||
4420 | DEBUG_SC("sg_length(%u) task_size(%u)\n", | ||
4421 | sg_length, task_size); | ||
4422 | |||
4423 | task_offset = 0; | ||
4424 | } | ||
4425 | task_size -= sg_length; | ||
4426 | next: | ||
4427 | DEBUG_SC("task[%u] - Reducing task_size to(%u)\n", | ||
4428 | task->task_no, task_size); | ||
4429 | |||
4430 | task->task_sg_num++; | ||
4431 | } | ||
4432 | /* | ||
4433 | * Check if the fabric module driver is requesting that all | ||
4434 | * struct se_task->task_sg[] be chained together.. If so, | ||
4435 | * then allocate an extra padding SG entry for linking and | ||
4436 | * marking the end of the chained SGL. | ||
4437 | */ | ||
4438 | if (tfo->task_sg_chaining) { | ||
4439 | task_sg_num_padded = (task->task_sg_num + 1); | ||
4440 | task->task_padded_sg = 1; | ||
4441 | } else | ||
4442 | task_sg_num_padded = task->task_sg_num; | ||
4443 | |||
4444 | task->task_sg = kzalloc(task_sg_num_padded * | ||
4445 | sizeof(struct scatterlist), GFP_KERNEL); | ||
4446 | if (!(task->task_sg)) { | ||
4447 | printk(KERN_ERR "Unable to allocate memory for" | ||
4448 | " task->task_sg\n"); | ||
4449 | return 0; | ||
4450 | } | ||
4451 | sg_init_table(&task->task_sg[0], task_sg_num_padded); | ||
4452 | /* | ||
4453 | * Setup task->task_sg_bidi for SCSI READ payload for | ||
4454 | * TCM/pSCSI passthrough if present for BIDI-COMMAND | ||
4455 | */ | ||
4456 | if ((T_TASK(se_cmd)->t_mem_bidi_list != NULL) && | ||
4457 | (TRANSPORT(se_dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) { | ||
4458 | task->task_sg_bidi = kzalloc(task_sg_num_padded * | ||
4459 | sizeof(struct scatterlist), GFP_KERNEL); | ||
4460 | if (!(task->task_sg_bidi)) { | ||
4461 | printk(KERN_ERR "Unable to allocate memory for" | ||
4462 | " task->task_sg_bidi\n"); | ||
4463 | return 0; | ||
4464 | } | ||
4465 | sg_init_table(&task->task_sg_bidi[0], task_sg_num_padded); | ||
4466 | } | ||
4467 | /* | ||
4468 | * For the chaining case, setup the proper end of SGL for the | ||
4469 | * initial submission struct task into struct se_subsystem_api. | ||
4470 | * This will be cleared later by transport_do_task_sg_chain() | ||
4471 | */ | ||
4472 | if (task->task_padded_sg) { | ||
4473 | sg_mark_end(&task->task_sg[task->task_sg_num - 1]); | ||
4474 | /* | ||
4475 | * Added the 'if' check before marking end of bi-directional | ||
4476 | * scatterlist (which gets created only in case of request | ||
4477 | * (RD + WR). | ||
4478 | */ | ||
4479 | if (task->task_sg_bidi) | ||
4480 | sg_mark_end(&task->task_sg_bidi[task->task_sg_num - 1]); | ||
4481 | } | ||
4482 | |||
4483 | DEBUG_SC("Successfully allocated task->task_sg_num(%u)," | ||
4484 | " task_sg_num_padded(%u)\n", task->task_sg_num, | ||
4485 | task_sg_num_padded); | ||
4486 | |||
4487 | return task->task_sg_num; | ||
4488 | } | ||
4489 | |||
4490 | static inline int transport_set_tasks_sectors_disk( | ||
4491 | struct se_task *task, | ||
4492 | struct se_device *dev, | ||
4493 | unsigned long long lba, | ||
4494 | u32 sectors, | ||
4495 | int *max_sectors_set) | ||
4496 | { | ||
4497 | if ((lba + sectors) > transport_dev_end_lba(dev)) { | ||
4498 | task->task_sectors = ((transport_dev_end_lba(dev) - lba) + 1); | ||
4499 | |||
4500 | if (task->task_sectors > DEV_ATTRIB(dev)->max_sectors) { | ||
4501 | task->task_sectors = DEV_ATTRIB(dev)->max_sectors; | ||
4502 | *max_sectors_set = 1; | ||
4503 | } | ||
4504 | } else { | ||
4505 | if (sectors > DEV_ATTRIB(dev)->max_sectors) { | ||
4506 | task->task_sectors = DEV_ATTRIB(dev)->max_sectors; | ||
4507 | *max_sectors_set = 1; | ||
4508 | } else | ||
4509 | task->task_sectors = sectors; | ||
4510 | } | ||
4511 | |||
4512 | return 0; | ||
4513 | } | ||
4514 | |||
4515 | static inline int transport_set_tasks_sectors_non_disk( | ||
4516 | struct se_task *task, | ||
4517 | struct se_device *dev, | ||
4518 | unsigned long long lba, | ||
4519 | u32 sectors, | ||
4520 | int *max_sectors_set) | ||
4521 | { | ||
4522 | if (sectors > DEV_ATTRIB(dev)->max_sectors) { | ||
4523 | task->task_sectors = DEV_ATTRIB(dev)->max_sectors; | ||
4524 | *max_sectors_set = 1; | ||
4525 | } else | ||
4526 | task->task_sectors = sectors; | ||
4527 | |||
4528 | return 0; | ||
4529 | } | ||
4530 | |||
4531 | static inline int transport_set_tasks_sectors( | ||
4532 | struct se_task *task, | ||
4533 | struct se_device *dev, | ||
4534 | unsigned long long lba, | ||
4535 | u32 sectors, | ||
4536 | int *max_sectors_set) | ||
4537 | { | ||
4538 | return (TRANSPORT(dev)->get_device_type(dev) == TYPE_DISK) ? | ||
4539 | transport_set_tasks_sectors_disk(task, dev, lba, sectors, | ||
4540 | max_sectors_set) : | ||
4541 | transport_set_tasks_sectors_non_disk(task, dev, lba, sectors, | ||
4542 | max_sectors_set); | ||
4543 | } | ||
4544 | |||
4545 | static int transport_map_sg_to_mem( | ||
4546 | struct se_cmd *cmd, | ||
4547 | struct list_head *se_mem_list, | ||
4548 | void *in_mem, | ||
4549 | u32 *se_mem_cnt) | ||
4550 | { | ||
4551 | struct se_mem *se_mem; | ||
4552 | struct scatterlist *sg; | ||
4553 | u32 sg_count = 1, cmd_size = cmd->data_length; | ||
4554 | |||
4555 | if (!in_mem) { | ||
4556 | printk(KERN_ERR "No source scatterlist\n"); | ||
4557 | return -1; | ||
4558 | } | ||
4559 | sg = (struct scatterlist *)in_mem; | ||
4560 | |||
4561 | while (cmd_size) { | ||
4562 | se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); | ||
4563 | if (!(se_mem)) { | ||
4564 | printk(KERN_ERR "Unable to allocate struct se_mem\n"); | ||
4565 | return -1; | ||
4566 | } | ||
4567 | INIT_LIST_HEAD(&se_mem->se_list); | ||
4568 | DEBUG_MEM("sg_to_mem: Starting loop with cmd_size: %u" | ||
4569 | " sg_page: %p offset: %d length: %d\n", cmd_size, | ||
4570 | sg_page(sg), sg->offset, sg->length); | ||
4571 | |||
4572 | se_mem->se_page = sg_page(sg); | ||
4573 | se_mem->se_off = sg->offset; | ||
4574 | |||
4575 | if (cmd_size > sg->length) { | ||
4576 | se_mem->se_len = sg->length; | ||
4577 | sg = sg_next(sg); | ||
4578 | sg_count++; | ||
4579 | } else | ||
4580 | se_mem->se_len = cmd_size; | ||
4581 | |||
4582 | cmd_size -= se_mem->se_len; | ||
4583 | |||
4584 | DEBUG_MEM("sg_to_mem: *se_mem_cnt: %u cmd_size: %u\n", | ||
4585 | *se_mem_cnt, cmd_size); | ||
4586 | DEBUG_MEM("sg_to_mem: Final se_page: %p se_off: %d se_len: %d\n", | ||
4587 | se_mem->se_page, se_mem->se_off, se_mem->se_len); | ||
4588 | |||
4589 | list_add_tail(&se_mem->se_list, se_mem_list); | ||
4590 | (*se_mem_cnt)++; | ||
4591 | } | ||
4592 | |||
4593 | DEBUG_MEM("task[0] - Mapped(%u) struct scatterlist segments to(%u)" | ||
4594 | " struct se_mem\n", sg_count, *se_mem_cnt); | ||
4595 | |||
4596 | if (sg_count != *se_mem_cnt) | ||
4597 | BUG(); | ||
4598 | |||
4599 | return 0; | ||
4600 | } | ||
4601 | |||
4602 | /* transport_map_mem_to_sg(): | ||
4603 | * | ||
4604 | * | ||
4605 | */ | ||
4606 | int transport_map_mem_to_sg( | ||
4607 | struct se_task *task, | ||
4608 | struct list_head *se_mem_list, | ||
4609 | void *in_mem, | ||
4610 | struct se_mem *in_se_mem, | ||
4611 | struct se_mem **out_se_mem, | ||
4612 | u32 *se_mem_cnt, | ||
4613 | u32 *task_offset) | ||
4614 | { | ||
4615 | struct se_cmd *se_cmd = task->task_se_cmd; | ||
4616 | struct se_mem *se_mem = in_se_mem; | ||
4617 | struct scatterlist *sg = (struct scatterlist *)in_mem; | ||
4618 | u32 task_size = task->task_size, sg_no = 0; | ||
4619 | |||
4620 | if (!sg) { | ||
4621 | printk(KERN_ERR "Unable to locate valid struct" | ||
4622 | " scatterlist pointer\n"); | ||
4623 | return -1; | ||
4624 | } | ||
4625 | |||
4626 | while (task_size != 0) { | ||
4627 | /* | ||
4628 | * Setup the contigious array of scatterlists for | ||
4629 | * this struct se_task. | ||
4630 | */ | ||
4631 | sg_assign_page(sg, se_mem->se_page); | ||
4632 | |||
4633 | if (*task_offset == 0) { | ||
4634 | sg->offset = se_mem->se_off; | ||
4635 | |||
4636 | if (task_size >= se_mem->se_len) { | ||
4637 | sg->length = se_mem->se_len; | ||
4638 | |||
4639 | if (!(list_is_last(&se_mem->se_list, | ||
4640 | T_TASK(se_cmd)->t_mem_list))) { | ||
4641 | se_mem = list_entry(se_mem->se_list.next, | ||
4642 | struct se_mem, se_list); | ||
4643 | (*se_mem_cnt)++; | ||
4644 | } | ||
4645 | } else { | ||
4646 | sg->length = task_size; | ||
4647 | /* | ||
4648 | * Determine if we need to calculate an offset | ||
4649 | * into the struct se_mem on the next go around.. | ||
4650 | */ | ||
4651 | task_size -= sg->length; | ||
4652 | if (!(task_size)) | ||
4653 | *task_offset = sg->length; | ||
4654 | |||
4655 | goto next; | ||
4656 | } | ||
4657 | |||
4658 | } else { | ||
4659 | sg->offset = (*task_offset + se_mem->se_off); | ||
4660 | |||
4661 | if ((se_mem->se_len - *task_offset) > task_size) { | ||
4662 | sg->length = task_size; | ||
4663 | /* | ||
4664 | * Determine if we need to calculate an offset | ||
4665 | * into the struct se_mem on the next go around.. | ||
4666 | */ | ||
4667 | task_size -= sg->length; | ||
4668 | if (!(task_size)) | ||
4669 | *task_offset += sg->length; | ||
4670 | |||
4671 | goto next; | ||
4672 | } else { | ||
4673 | sg->length = (se_mem->se_len - *task_offset); | ||
4674 | |||
4675 | if (!(list_is_last(&se_mem->se_list, | ||
4676 | T_TASK(se_cmd)->t_mem_list))) { | ||
4677 | se_mem = list_entry(se_mem->se_list.next, | ||
4678 | struct se_mem, se_list); | ||
4679 | (*se_mem_cnt)++; | ||
4680 | } | ||
4681 | } | ||
4682 | |||
4683 | *task_offset = 0; | ||
4684 | } | ||
4685 | task_size -= sg->length; | ||
4686 | next: | ||
4687 | DEBUG_MEM("task[%u] mem_to_sg - sg[%u](%p)(%u)(%u) - Reducing" | ||
4688 | " task_size to(%u), task_offset: %u\n", task->task_no, sg_no, | ||
4689 | sg_page(sg), sg->length, sg->offset, task_size, *task_offset); | ||
4690 | |||
4691 | sg_no++; | ||
4692 | if (!(task_size)) | ||
4693 | break; | ||
4694 | |||
4695 | sg = sg_next(sg); | ||
4696 | |||
4697 | if (task_size > se_cmd->data_length) | ||
4698 | BUG(); | ||
4699 | } | ||
4700 | *out_se_mem = se_mem; | ||
4701 | |||
4702 | DEBUG_MEM("task[%u] - Mapped(%u) struct se_mem segments to total(%u)" | ||
4703 | " SGs\n", task->task_no, *se_mem_cnt, sg_no); | ||
4704 | |||
4705 | return 0; | ||
4706 | } | ||
4707 | |||
4708 | /* | ||
4709 | * This function can be used by HW target mode drivers to create a linked | ||
4710 | * scatterlist from all contiguously allocated struct se_task->task_sg[]. | ||
4711 | * This is intended to be called during the completion path by TCM Core | ||
4712 | * when struct target_core_fabric_ops->check_task_sg_chaining is enabled. | ||
4713 | */ | ||
4714 | void transport_do_task_sg_chain(struct se_cmd *cmd) | ||
4715 | { | ||
4716 | struct scatterlist *sg_head = NULL, *sg_link = NULL, *sg_first = NULL; | ||
4717 | struct scatterlist *sg_head_cur = NULL, *sg_link_cur = NULL; | ||
4718 | struct scatterlist *sg, *sg_end = NULL, *sg_end_cur = NULL; | ||
4719 | struct se_task *task; | ||
4720 | struct target_core_fabric_ops *tfo = CMD_TFO(cmd); | ||
4721 | u32 task_sg_num = 0, sg_count = 0; | ||
4722 | int i; | ||
4723 | |||
4724 | if (tfo->task_sg_chaining == 0) { | ||
4725 | printk(KERN_ERR "task_sg_chaining is diabled for fabric module:" | ||
4726 | " %s\n", tfo->get_fabric_name()); | ||
4727 | dump_stack(); | ||
4728 | return; | ||
4729 | } | ||
4730 | /* | ||
4731 | * Walk the struct se_task list and setup scatterlist chains | ||
4732 | * for each contiguosly allocated struct se_task->task_sg[]. | ||
4733 | */ | ||
4734 | list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { | ||
4735 | if (!(task->task_sg) || !(task->task_padded_sg)) | ||
4736 | continue; | ||
4737 | |||
4738 | if (sg_head && sg_link) { | ||
4739 | sg_head_cur = &task->task_sg[0]; | ||
4740 | sg_link_cur = &task->task_sg[task->task_sg_num]; | ||
4741 | /* | ||
4742 | * Either add chain or mark end of scatterlist | ||
4743 | */ | ||
4744 | if (!(list_is_last(&task->t_list, | ||
4745 | &T_TASK(cmd)->t_task_list))) { | ||
4746 | /* | ||
4747 | * Clear existing SGL termination bit set in | ||
4748 | * transport_calc_sg_num(), see sg_mark_end() | ||
4749 | */ | ||
4750 | sg_end_cur = &task->task_sg[task->task_sg_num - 1]; | ||
4751 | sg_end_cur->page_link &= ~0x02; | ||
4752 | |||
4753 | sg_chain(sg_head, task_sg_num, sg_head_cur); | ||
4754 | sg_count += (task->task_sg_num + 1); | ||
4755 | } else | ||
4756 | sg_count += task->task_sg_num; | ||
4757 | |||
4758 | sg_head = sg_head_cur; | ||
4759 | sg_link = sg_link_cur; | ||
4760 | task_sg_num = task->task_sg_num; | ||
4761 | continue; | ||
4762 | } | ||
4763 | sg_head = sg_first = &task->task_sg[0]; | ||
4764 | sg_link = &task->task_sg[task->task_sg_num]; | ||
4765 | task_sg_num = task->task_sg_num; | ||
4766 | /* | ||
4767 | * Check for single task.. | ||
4768 | */ | ||
4769 | if (!(list_is_last(&task->t_list, &T_TASK(cmd)->t_task_list))) { | ||
4770 | /* | ||
4771 | * Clear existing SGL termination bit set in | ||
4772 | * transport_calc_sg_num(), see sg_mark_end() | ||
4773 | */ | ||
4774 | sg_end = &task->task_sg[task->task_sg_num - 1]; | ||
4775 | sg_end->page_link &= ~0x02; | ||
4776 | sg_count += (task->task_sg_num + 1); | ||
4777 | } else | ||
4778 | sg_count += task->task_sg_num; | ||
4779 | } | ||
4780 | /* | ||
4781 | * Setup the starting pointer and total t_tasks_sg_linked_no including | ||
4782 | * padding SGs for linking and to mark the end. | ||
4783 | */ | ||
4784 | T_TASK(cmd)->t_tasks_sg_chained = sg_first; | ||
4785 | T_TASK(cmd)->t_tasks_sg_chained_no = sg_count; | ||
4786 | |||
4787 | DEBUG_CMD_M("Setup T_TASK(cmd)->t_tasks_sg_chained: %p and" | ||
4788 | " t_tasks_sg_chained_no: %u\n", T_TASK(cmd)->t_tasks_sg_chained, | ||
4789 | T_TASK(cmd)->t_tasks_sg_chained_no); | ||
4790 | |||
4791 | for_each_sg(T_TASK(cmd)->t_tasks_sg_chained, sg, | ||
4792 | T_TASK(cmd)->t_tasks_sg_chained_no, i) { | ||
4793 | |||
4794 | DEBUG_CMD_M("SG: %p page: %p length: %d offset: %d\n", | ||
4795 | sg, sg_page(sg), sg->length, sg->offset); | ||
4796 | if (sg_is_chain(sg)) | ||
4797 | DEBUG_CMD_M("SG: %p sg_is_chain=1\n", sg); | ||
4798 | if (sg_is_last(sg)) | ||
4799 | DEBUG_CMD_M("SG: %p sg_is_last=1\n", sg); | ||
4800 | } | ||
4801 | |||
4802 | } | ||
4803 | EXPORT_SYMBOL(transport_do_task_sg_chain); | ||
4804 | |||
4805 | static int transport_do_se_mem_map( | ||
4806 | struct se_device *dev, | ||
4807 | struct se_task *task, | ||
4808 | struct list_head *se_mem_list, | ||
4809 | void *in_mem, | ||
4810 | struct se_mem *in_se_mem, | ||
4811 | struct se_mem **out_se_mem, | ||
4812 | u32 *se_mem_cnt, | ||
4813 | u32 *task_offset_in) | ||
4814 | { | ||
4815 | u32 task_offset = *task_offset_in; | ||
4816 | int ret = 0; | ||
4817 | /* | ||
4818 | * se_subsystem_api_t->do_se_mem_map is used when internal allocation | ||
4819 | * has been done by the transport plugin. | ||
4820 | */ | ||
4821 | if (TRANSPORT(dev)->do_se_mem_map) { | ||
4822 | ret = TRANSPORT(dev)->do_se_mem_map(task, se_mem_list, | ||
4823 | in_mem, in_se_mem, out_se_mem, se_mem_cnt, | ||
4824 | task_offset_in); | ||
4825 | if (ret == 0) | ||
4826 | T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt; | ||
4827 | |||
4828 | return ret; | ||
4829 | } | ||
4830 | /* | ||
4831 | * This is the normal path for all normal non BIDI and BIDI-COMMAND | ||
4832 | * WRITE payloads.. If we need to do BIDI READ passthrough for | ||
4833 | * TCM/pSCSI the first call to transport_do_se_mem_map -> | ||
4834 | * transport_calc_sg_num() -> transport_map_mem_to_sg() will do the | ||
4835 | * allocation for task->task_sg_bidi, and the subsequent call to | ||
4836 | * transport_do_se_mem_map() from transport_generic_get_cdb_count() | ||
4837 | */ | ||
4838 | if (!(task->task_sg_bidi)) { | ||
4839 | /* | ||
4840 | * Assume default that transport plugin speaks preallocated | ||
4841 | * scatterlists. | ||
4842 | */ | ||
4843 | if (!(transport_calc_sg_num(task, in_se_mem, task_offset))) | ||
4844 | return -1; | ||
4845 | /* | ||
4846 | * struct se_task->task_sg now contains the struct scatterlist array. | ||
4847 | */ | ||
4848 | return transport_map_mem_to_sg(task, se_mem_list, task->task_sg, | ||
4849 | in_se_mem, out_se_mem, se_mem_cnt, | ||
4850 | task_offset_in); | ||
4851 | } | ||
4852 | /* | ||
4853 | * Handle the se_mem_list -> struct task->task_sg_bidi | ||
4854 | * memory map for the extra BIDI READ payload | ||
4855 | */ | ||
4856 | return transport_map_mem_to_sg(task, se_mem_list, task->task_sg_bidi, | ||
4857 | in_se_mem, out_se_mem, se_mem_cnt, | ||
4858 | task_offset_in); | ||
4859 | } | ||
4860 | |||
4861 | static u32 transport_generic_get_cdb_count( | ||
4862 | struct se_cmd *cmd, | ||
4863 | unsigned long long lba, | ||
4864 | u32 sectors, | ||
4865 | enum dma_data_direction data_direction, | ||
4866 | struct list_head *mem_list, | ||
4867 | int set_counts) | ||
4868 | { | ||
4869 | unsigned char *cdb = NULL; | ||
4870 | struct se_task *task; | ||
4871 | struct se_mem *se_mem = NULL, *se_mem_lout = NULL; | ||
4872 | struct se_mem *se_mem_bidi = NULL, *se_mem_bidi_lout = NULL; | ||
4873 | struct se_device *dev = SE_DEV(cmd); | ||
4874 | int max_sectors_set = 0, ret; | ||
4875 | u32 task_offset_in = 0, se_mem_cnt = 0, se_mem_bidi_cnt = 0, task_cdbs = 0; | ||
4876 | |||
4877 | if (!mem_list) { | ||
4878 | printk(KERN_ERR "mem_list is NULL in transport_generic_get" | ||
4879 | "_cdb_count()\n"); | ||
4880 | return 0; | ||
4881 | } | ||
4882 | /* | ||
4883 | * While using RAMDISK_DR backstores is the only case where | ||
4884 | * mem_list will ever be empty at this point. | ||
4885 | */ | ||
4886 | if (!(list_empty(mem_list))) | ||
4887 | se_mem = list_entry(mem_list->next, struct se_mem, se_list); | ||
4888 | /* | ||
4889 | * Check for extra se_mem_bidi mapping for BIDI-COMMANDs to | ||
4890 | * struct se_task->task_sg_bidi for TCM/pSCSI passthrough operation | ||
4891 | */ | ||
4892 | if ((T_TASK(cmd)->t_mem_bidi_list != NULL) && | ||
4893 | !(list_empty(T_TASK(cmd)->t_mem_bidi_list)) && | ||
4894 | (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) | ||
4895 | se_mem_bidi = list_entry(T_TASK(cmd)->t_mem_bidi_list->next, | ||
4896 | struct se_mem, se_list); | ||
4897 | |||
4898 | while (sectors) { | ||
4899 | DEBUG_VOL("ITT[0x%08x] LBA(%llu) SectorsLeft(%u) EOBJ(%llu)\n", | ||
4900 | CMD_TFO(cmd)->get_task_tag(cmd), lba, sectors, | ||
4901 | transport_dev_end_lba(dev)); | ||
4902 | |||
4903 | task = transport_generic_get_task(cmd, data_direction); | ||
4904 | if (!(task)) | ||
4905 | goto out; | ||
4906 | |||
4907 | transport_set_tasks_sectors(task, dev, lba, sectors, | ||
4908 | &max_sectors_set); | ||
4909 | |||
4910 | task->task_lba = lba; | ||
4911 | lba += task->task_sectors; | ||
4912 | sectors -= task->task_sectors; | ||
4913 | task->task_size = (task->task_sectors * | ||
4914 | DEV_ATTRIB(dev)->block_size); | ||
4915 | |||
4916 | cdb = TRANSPORT(dev)->get_cdb(task); | ||
4917 | if ((cdb)) { | ||
4918 | memcpy(cdb, T_TASK(cmd)->t_task_cdb, | ||
4919 | scsi_command_size(T_TASK(cmd)->t_task_cdb)); | ||
4920 | cmd->transport_split_cdb(task->task_lba, | ||
4921 | &task->task_sectors, cdb); | ||
4922 | } | ||
4923 | |||
4924 | /* | ||
4925 | * Perform the SE OBJ plugin and/or Transport plugin specific | ||
4926 | * mapping for T_TASK(cmd)->t_mem_list. And setup the | ||
4927 | * task->task_sg and if necessary task->task_sg_bidi | ||
4928 | */ | ||
4929 | ret = transport_do_se_mem_map(dev, task, mem_list, | ||
4930 | NULL, se_mem, &se_mem_lout, &se_mem_cnt, | ||
4931 | &task_offset_in); | ||
4932 | if (ret < 0) | ||
4933 | goto out; | ||
4934 | |||
4935 | se_mem = se_mem_lout; | ||
4936 | /* | ||
4937 | * Setup the T_TASK(cmd)->t_mem_bidi_list -> task->task_sg_bidi | ||
4938 | * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI | ||
4939 | * | ||
4940 | * Note that the first call to transport_do_se_mem_map() above will | ||
4941 | * allocate struct se_task->task_sg_bidi in transport_do_se_mem_map() | ||
4942 | * -> transport_calc_sg_num(), and the second here will do the | ||
4943 | * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI. | ||
4944 | */ | ||
4945 | if (task->task_sg_bidi != NULL) { | ||
4946 | ret = transport_do_se_mem_map(dev, task, | ||
4947 | T_TASK(cmd)->t_mem_bidi_list, NULL, | ||
4948 | se_mem_bidi, &se_mem_bidi_lout, &se_mem_bidi_cnt, | ||
4949 | &task_offset_in); | ||
4950 | if (ret < 0) | ||
4951 | goto out; | ||
4952 | |||
4953 | se_mem_bidi = se_mem_bidi_lout; | ||
4954 | } | ||
4955 | task_cdbs++; | ||
4956 | |||
4957 | DEBUG_VOL("Incremented task_cdbs(%u) task->task_sg_num(%u)\n", | ||
4958 | task_cdbs, task->task_sg_num); | ||
4959 | |||
4960 | if (max_sectors_set) { | ||
4961 | max_sectors_set = 0; | ||
4962 | continue; | ||
4963 | } | ||
4964 | |||
4965 | if (!sectors) | ||
4966 | break; | ||
4967 | } | ||
4968 | |||
4969 | if (set_counts) { | ||
4970 | atomic_inc(&T_TASK(cmd)->t_fe_count); | ||
4971 | atomic_inc(&T_TASK(cmd)->t_se_count); | ||
4972 | } | ||
4973 | |||
4974 | DEBUG_VOL("ITT[0x%08x] total %s cdbs(%u)\n", | ||
4975 | CMD_TFO(cmd)->get_task_tag(cmd), (data_direction == DMA_TO_DEVICE) | ||
4976 | ? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE", task_cdbs); | ||
4977 | |||
4978 | return task_cdbs; | ||
4979 | out: | ||
4980 | return 0; | ||
4981 | } | ||
4982 | |||
4983 | static int | ||
4984 | transport_map_control_cmd_to_task(struct se_cmd *cmd) | ||
4985 | { | ||
4986 | struct se_device *dev = SE_DEV(cmd); | ||
4987 | unsigned char *cdb; | ||
4988 | struct se_task *task; | ||
4989 | int ret; | ||
4990 | |||
4991 | task = transport_generic_get_task(cmd, cmd->data_direction); | ||
4992 | if (!task) | ||
4993 | return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; | ||
4994 | |||
4995 | cdb = TRANSPORT(dev)->get_cdb(task); | ||
4996 | if (cdb) | ||
4997 | memcpy(cdb, cmd->t_task->t_task_cdb, | ||
4998 | scsi_command_size(cmd->t_task->t_task_cdb)); | ||
4999 | |||
5000 | task->task_size = cmd->data_length; | ||
5001 | task->task_sg_num = | ||
5002 | (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) ? 1 : 0; | ||
5003 | |||
5004 | atomic_inc(&cmd->t_task->t_fe_count); | ||
5005 | atomic_inc(&cmd->t_task->t_se_count); | ||
5006 | |||
5007 | if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) { | ||
5008 | struct se_mem *se_mem = NULL, *se_mem_lout = NULL; | ||
5009 | u32 se_mem_cnt = 0, task_offset = 0; | ||
5010 | |||
5011 | BUG_ON(list_empty(cmd->t_task->t_mem_list)); | ||
5012 | |||
5013 | ret = transport_do_se_mem_map(dev, task, | ||
5014 | cmd->t_task->t_mem_list, NULL, se_mem, | ||
5015 | &se_mem_lout, &se_mem_cnt, &task_offset); | ||
5016 | if (ret < 0) | ||
5017 | return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; | ||
5018 | |||
5019 | if (dev->transport->map_task_SG) | ||
5020 | return dev->transport->map_task_SG(task); | ||
5021 | return 0; | ||
5022 | } else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) { | ||
5023 | if (dev->transport->map_task_non_SG) | ||
5024 | return dev->transport->map_task_non_SG(task); | ||
5025 | return 0; | ||
5026 | } else if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) { | ||
5027 | if (dev->transport->cdb_none) | ||
5028 | return dev->transport->cdb_none(task); | ||
5029 | return 0; | ||
5030 | } else { | ||
5031 | BUG(); | ||
5032 | return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; | ||
5033 | } | ||
5034 | } | ||
5035 | |||
5036 | /* transport_generic_new_cmd(): Called from transport_processing_thread() | ||
5037 | * | ||
5038 | * Allocate storage transport resources from a set of values predefined | ||
5039 | * by transport_generic_cmd_sequencer() from the iSCSI Target RX process. | ||
5040 | * Any non zero return here is treated as an "out of resource' op here. | ||
5041 | */ | ||
5042 | /* | ||
5043 | * Generate struct se_task(s) and/or their payloads for this CDB. | ||
5044 | */ | ||
5045 | static int transport_generic_new_cmd(struct se_cmd *cmd) | ||
5046 | { | ||
5047 | struct se_portal_group *se_tpg; | ||
5048 | struct se_task *task; | ||
5049 | struct se_device *dev = SE_DEV(cmd); | ||
5050 | int ret = 0; | ||
5051 | |||
5052 | /* | ||
5053 | * Determine is the TCM fabric module has already allocated physical | ||
5054 | * memory, and is directly calling transport_generic_map_mem_to_cmd() | ||
5055 | * to setup beforehand the linked list of physical memory at | ||
5056 | * T_TASK(cmd)->t_mem_list of struct se_mem->se_page | ||
5057 | */ | ||
5058 | if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) { | ||
5059 | ret = transport_allocate_resources(cmd); | ||
5060 | if (ret < 0) | ||
5061 | return ret; | ||
5062 | } | ||
5063 | |||
5064 | ret = transport_get_sectors(cmd); | ||
5065 | if (ret < 0) | ||
5066 | return ret; | ||
5067 | |||
5068 | ret = transport_new_cmd_obj(cmd); | ||
5069 | if (ret < 0) | ||
5070 | return ret; | ||
5071 | |||
5072 | /* | ||
5073 | * Determine if the calling TCM fabric module is talking to | ||
5074 | * Linux/NET via kernel sockets and needs to allocate a | ||
5075 | * struct iovec array to complete the struct se_cmd | ||
5076 | */ | ||
5077 | se_tpg = SE_LUN(cmd)->lun_sep->sep_tpg; | ||
5078 | if (TPG_TFO(se_tpg)->alloc_cmd_iovecs != NULL) { | ||
5079 | ret = TPG_TFO(se_tpg)->alloc_cmd_iovecs(cmd); | ||
5080 | if (ret < 0) | ||
5081 | return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; | ||
5082 | } | ||
5083 | |||
5084 | if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { | ||
5085 | list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { | ||
5086 | if (atomic_read(&task->task_sent)) | ||
5087 | continue; | ||
5088 | if (!dev->transport->map_task_SG) | ||
5089 | continue; | ||
5090 | |||
5091 | ret = dev->transport->map_task_SG(task); | ||
5092 | if (ret < 0) | ||
5093 | return ret; | ||
5094 | } | ||
5095 | } else { | ||
5096 | ret = transport_map_control_cmd_to_task(cmd); | ||
5097 | if (ret < 0) | ||
5098 | return ret; | ||
5099 | } | ||
5100 | |||
5101 | /* | ||
5102 | * For WRITEs, let the iSCSI Target RX Thread know its buffer is ready.. | ||
5103 | * This WRITE struct se_cmd (and all of its associated struct se_task's) | ||
5104 | * will be added to the struct se_device execution queue after its WRITE | ||
5105 | * data has arrived. (ie: It gets handled by the transport processing | ||
5106 | * thread a second time) | ||
5107 | */ | ||
5108 | if (cmd->data_direction == DMA_TO_DEVICE) { | ||
5109 | transport_add_tasks_to_state_queue(cmd); | ||
5110 | return transport_generic_write_pending(cmd); | ||
5111 | } | ||
5112 | /* | ||
5113 | * Everything else but a WRITE, add the struct se_cmd's struct se_task's | ||
5114 | * to the execution queue. | ||
5115 | */ | ||
5116 | transport_execute_tasks(cmd); | ||
5117 | return 0; | ||
5118 | } | ||
5119 | |||
5120 | /* transport_generic_process_write(): | ||
5121 | * | ||
5122 | * | ||
5123 | */ | ||
5124 | void transport_generic_process_write(struct se_cmd *cmd) | ||
5125 | { | ||
5126 | #if 0 | ||
5127 | /* | ||
5128 | * Copy SCSI Presented DTL sector(s) from received buffers allocated to | ||
5129 | * original EDTL | ||
5130 | */ | ||
5131 | if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { | ||
5132 | if (!T_TASK(cmd)->t_tasks_se_num) { | ||
5133 | unsigned char *dst, *buf = | ||
5134 | (unsigned char *)T_TASK(cmd)->t_task_buf; | ||
5135 | |||
5136 | dst = kzalloc(cmd->cmd_spdtl), GFP_KERNEL); | ||
5137 | if (!(dst)) { | ||
5138 | printk(KERN_ERR "Unable to allocate memory for" | ||
5139 | " WRITE underflow\n"); | ||
5140 | transport_generic_request_failure(cmd, NULL, | ||
5141 | PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1); | ||
5142 | return; | ||
5143 | } | ||
5144 | memcpy(dst, buf, cmd->cmd_spdtl); | ||
5145 | |||
5146 | kfree(T_TASK(cmd)->t_task_buf); | ||
5147 | T_TASK(cmd)->t_task_buf = dst; | ||
5148 | } else { | ||
5149 | struct scatterlist *sg = | ||
5150 | (struct scatterlist *sg)T_TASK(cmd)->t_task_buf; | ||
5151 | struct scatterlist *orig_sg; | ||
5152 | |||
5153 | orig_sg = kzalloc(sizeof(struct scatterlist) * | ||
5154 | T_TASK(cmd)->t_tasks_se_num, | ||
5155 | GFP_KERNEL))) { | ||
5156 | if (!(orig_sg)) { | ||
5157 | printk(KERN_ERR "Unable to allocate memory" | ||
5158 | " for WRITE underflow\n"); | ||
5159 | transport_generic_request_failure(cmd, NULL, | ||
5160 | PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1); | ||
5161 | return; | ||
5162 | } | ||
5163 | |||
5164 | memcpy(orig_sg, T_TASK(cmd)->t_task_buf, | ||
5165 | sizeof(struct scatterlist) * | ||
5166 | T_TASK(cmd)->t_tasks_se_num); | ||
5167 | |||
5168 | cmd->data_length = cmd->cmd_spdtl; | ||
5169 | /* | ||
5170 | * FIXME, clear out original struct se_task and state | ||
5171 | * information. | ||
5172 | */ | ||
5173 | if (transport_generic_new_cmd(cmd) < 0) { | ||
5174 | transport_generic_request_failure(cmd, NULL, | ||
5175 | PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1); | ||
5176 | kfree(orig_sg); | ||
5177 | return; | ||
5178 | } | ||
5179 | |||
5180 | transport_memcpy_write_sg(cmd, orig_sg); | ||
5181 | } | ||
5182 | } | ||
5183 | #endif | ||
5184 | transport_execute_tasks(cmd); | ||
5185 | } | ||
5186 | EXPORT_SYMBOL(transport_generic_process_write); | ||
5187 | |||
5188 | /* transport_generic_write_pending(): | ||
5189 | * | ||
5190 | * | ||
5191 | */ | ||
5192 | static int transport_generic_write_pending(struct se_cmd *cmd) | ||
5193 | { | ||
5194 | unsigned long flags; | ||
5195 | int ret; | ||
5196 | |||
5197 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | ||
5198 | cmd->t_state = TRANSPORT_WRITE_PENDING; | ||
5199 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | ||
5200 | /* | ||
5201 | * For the TCM control CDBs using a contiguous buffer, do the memcpy | ||
5202 | * from the passed Linux/SCSI struct scatterlist located at | ||
5203 | * T_TASK(se_cmd)->t_task_pt_buf to the contiguous buffer at | ||
5204 | * T_TASK(se_cmd)->t_task_buf. | ||
5205 | */ | ||
5206 | if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG) | ||
5207 | transport_memcpy_read_contig(cmd, | ||
5208 | T_TASK(cmd)->t_task_buf, | ||
5209 | T_TASK(cmd)->t_task_pt_sgl); | ||
5210 | /* | ||
5211 | * Clear the se_cmd for WRITE_PENDING status in order to set | ||
5212 | * T_TASK(cmd)->t_transport_active=0 so that transport_generic_handle_data | ||
5213 | * can be called from HW target mode interrupt code. This is safe | ||
5214 | * to be called with transport_off=1 before the CMD_TFO(cmd)->write_pending | ||
5215 | * because the se_cmd->se_lun pointer is not being cleared. | ||
5216 | */ | ||
5217 | transport_cmd_check_stop(cmd, 1, 0); | ||
5218 | |||
5219 | /* | ||
5220 | * Call the fabric write_pending function here to let the | ||
5221 | * frontend know that WRITE buffers are ready. | ||
5222 | */ | ||
5223 | ret = CMD_TFO(cmd)->write_pending(cmd); | ||
5224 | if (ret < 0) | ||
5225 | return ret; | ||
5226 | |||
5227 | return PYX_TRANSPORT_WRITE_PENDING; | ||
5228 | } | ||
5229 | |||
5230 | /* transport_release_cmd_to_pool(): | ||
5231 | * | ||
5232 | * | ||
5233 | */ | ||
5234 | void transport_release_cmd_to_pool(struct se_cmd *cmd) | ||
5235 | { | ||
5236 | BUG_ON(!T_TASK(cmd)); | ||
5237 | BUG_ON(!CMD_TFO(cmd)); | ||
5238 | |||
5239 | transport_free_se_cmd(cmd); | ||
5240 | CMD_TFO(cmd)->release_cmd_to_pool(cmd); | ||
5241 | } | ||
5242 | EXPORT_SYMBOL(transport_release_cmd_to_pool); | ||
5243 | |||
5244 | /* transport_generic_free_cmd(): | ||
5245 | * | ||
5246 | * Called from processing frontend to release storage engine resources | ||
5247 | */ | ||
5248 | void transport_generic_free_cmd( | ||
5249 | struct se_cmd *cmd, | ||
5250 | int wait_for_tasks, | ||
5251 | int release_to_pool, | ||
5252 | int session_reinstatement) | ||
5253 | { | ||
5254 | if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) || !T_TASK(cmd)) | ||
5255 | transport_release_cmd_to_pool(cmd); | ||
5256 | else { | ||
5257 | core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd); | ||
5258 | |||
5259 | if (SE_LUN(cmd)) { | ||
5260 | #if 0 | ||
5261 | printk(KERN_INFO "cmd: %p ITT: 0x%08x contains" | ||
5262 | " SE_LUN(cmd)\n", cmd, | ||
5263 | CMD_TFO(cmd)->get_task_tag(cmd)); | ||
5264 | #endif | ||
5265 | transport_lun_remove_cmd(cmd); | ||
5266 | } | ||
5267 | |||
5268 | if (wait_for_tasks && cmd->transport_wait_for_tasks) | ||
5269 | cmd->transport_wait_for_tasks(cmd, 0, 0); | ||
5270 | |||
5271 | transport_generic_remove(cmd, release_to_pool, | ||
5272 | session_reinstatement); | ||
5273 | } | ||
5274 | } | ||
5275 | EXPORT_SYMBOL(transport_generic_free_cmd); | ||
5276 | |||
5277 | static void transport_nop_wait_for_tasks( | ||
5278 | struct se_cmd *cmd, | ||
5279 | int remove_cmd, | ||
5280 | int session_reinstatement) | ||
5281 | { | ||
5282 | return; | ||
5283 | } | ||
5284 | |||
5285 | /* transport_lun_wait_for_tasks(): | ||
5286 | * | ||
5287 | * Called from ConfigFS context to stop the passed struct se_cmd to allow | ||
5288 | * an struct se_lun to be successfully shutdown. | ||
5289 | */ | ||
5290 | static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) | ||
5291 | { | ||
5292 | unsigned long flags; | ||
5293 | int ret; | ||
5294 | /* | ||
5295 | * If the frontend has already requested this struct se_cmd to | ||
5296 | * be stopped, we can safely ignore this struct se_cmd. | ||
5297 | */ | ||
5298 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | ||
5299 | if (atomic_read(&T_TASK(cmd)->t_transport_stop)) { | ||
5300 | atomic_set(&T_TASK(cmd)->transport_lun_stop, 0); | ||
5301 | DEBUG_TRANSPORT_S("ConfigFS ITT[0x%08x] - t_transport_stop ==" | ||
5302 | " TRUE, skipping\n", CMD_TFO(cmd)->get_task_tag(cmd)); | ||
5303 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | ||
5304 | transport_cmd_check_stop(cmd, 1, 0); | ||
5305 | return -1; | ||
5306 | } | ||
5307 | atomic_set(&T_TASK(cmd)->transport_lun_fe_stop, 1); | ||
5308 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | ||
5309 | |||
5310 | wake_up_interruptible(&SE_DEV(cmd)->dev_queue_obj->thread_wq); | ||
5311 | |||
5312 | ret = transport_stop_tasks_for_cmd(cmd); | ||
5313 | |||
5314 | DEBUG_TRANSPORT_S("ConfigFS: cmd: %p t_task_cdbs: %d stop tasks ret:" | ||
5315 | " %d\n", cmd, T_TASK(cmd)->t_task_cdbs, ret); | ||
5316 | if (!ret) { | ||
5317 | DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopping cmd....\n", | ||
5318 | CMD_TFO(cmd)->get_task_tag(cmd)); | ||
5319 | wait_for_completion(&T_TASK(cmd)->transport_lun_stop_comp); | ||
5320 | DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopped cmd....\n", | ||
5321 | CMD_TFO(cmd)->get_task_tag(cmd)); | ||
5322 | } | ||
5323 | transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj); | ||
5324 | |||
5325 | return 0; | ||
5326 | } | ||
5327 | |||
5328 | /* #define DEBUG_CLEAR_LUN */ | ||
5329 | #ifdef DEBUG_CLEAR_LUN | ||
5330 | #define DEBUG_CLEAR_L(x...) printk(KERN_INFO x) | ||
5331 | #else | ||
5332 | #define DEBUG_CLEAR_L(x...) | ||
5333 | #endif | ||
5334 | |||
5335 | static void __transport_clear_lun_from_sessions(struct se_lun *lun) | ||
5336 | { | ||
5337 | struct se_cmd *cmd = NULL; | ||
5338 | unsigned long lun_flags, cmd_flags; | ||
5339 | /* | ||
5340 | * Do exception processing and return CHECK_CONDITION status to the | ||
5341 | * Initiator Port. | ||
5342 | */ | ||
5343 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); | ||
5344 | while (!list_empty_careful(&lun->lun_cmd_list)) { | ||
5345 | cmd = list_entry(lun->lun_cmd_list.next, | ||
5346 | struct se_cmd, se_lun_list); | ||
5347 | list_del(&cmd->se_lun_list); | ||
5348 | |||
5349 | if (!(T_TASK(cmd))) { | ||
5350 | printk(KERN_ERR "ITT: 0x%08x, T_TASK(cmd) = NULL" | ||
5351 | "[i,t]_state: %u/%u\n", | ||
5352 | CMD_TFO(cmd)->get_task_tag(cmd), | ||
5353 | CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state); | ||
5354 | BUG(); | ||
5355 | } | ||
5356 | atomic_set(&T_TASK(cmd)->transport_lun_active, 0); | ||
5357 | /* | ||
5358 | * This will notify iscsi_target_transport.c: | ||
5359 | * transport_cmd_check_stop() that a LUN shutdown is in | ||
5360 | * progress for the iscsi_cmd_t. | ||
5361 | */ | ||
5362 | spin_lock(&T_TASK(cmd)->t_state_lock); | ||
5363 | DEBUG_CLEAR_L("SE_LUN[%d] - Setting T_TASK(cmd)->transport" | ||
5364 | "_lun_stop for ITT: 0x%08x\n", | ||
5365 | SE_LUN(cmd)->unpacked_lun, | ||
5366 | CMD_TFO(cmd)->get_task_tag(cmd)); | ||
5367 | atomic_set(&T_TASK(cmd)->transport_lun_stop, 1); | ||
5368 | spin_unlock(&T_TASK(cmd)->t_state_lock); | ||
5369 | |||
5370 | spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); | ||
5371 | |||
5372 | if (!(SE_LUN(cmd))) { | ||
5373 | printk(KERN_ERR "ITT: 0x%08x, [i,t]_state: %u/%u\n", | ||
5374 | CMD_TFO(cmd)->get_task_tag(cmd), | ||
5375 | CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state); | ||
5376 | BUG(); | ||
5377 | } | ||
5378 | /* | ||
5379 | * If the Storage engine still owns the iscsi_cmd_t, determine | ||
5380 | * and/or stop its context. | ||
5381 | */ | ||
5382 | DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x before transport" | ||
5383 | "_lun_wait_for_tasks()\n", SE_LUN(cmd)->unpacked_lun, | ||
5384 | CMD_TFO(cmd)->get_task_tag(cmd)); | ||
5385 | |||
5386 | if (transport_lun_wait_for_tasks(cmd, SE_LUN(cmd)) < 0) { | ||
5387 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); | ||
5388 | continue; | ||
5389 | } | ||
5390 | |||
5391 | DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x after transport_lun" | ||
5392 | "_wait_for_tasks(): SUCCESS\n", | ||
5393 | SE_LUN(cmd)->unpacked_lun, | ||
5394 | CMD_TFO(cmd)->get_task_tag(cmd)); | ||
5395 | |||
5396 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, cmd_flags); | ||
5397 | if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) { | ||
5398 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags); | ||
5399 | goto check_cond; | ||
5400 | } | ||
5401 | atomic_set(&T_TASK(cmd)->transport_dev_active, 0); | ||
5402 | transport_all_task_dev_remove_state(cmd); | ||
5403 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags); | ||
5404 | |||
5405 | transport_free_dev_tasks(cmd); | ||
5406 | /* | ||
5407 | * The Storage engine stopped this struct se_cmd before it was | ||
5408 | * send to the fabric frontend for delivery back to the | ||
5409 | * Initiator Node. Return this SCSI CDB back with an | ||
5410 | * CHECK_CONDITION status. | ||
5411 | */ | ||
5412 | check_cond: | ||
5413 | transport_send_check_condition_and_sense(cmd, | ||
5414 | TCM_NON_EXISTENT_LUN, 0); | ||
5415 | /* | ||
5416 | * If the fabric frontend is waiting for this iscsi_cmd_t to | ||
5417 | * be released, notify the waiting thread now that LU has | ||
5418 | * finished accessing it. | ||
5419 | */ | ||
5420 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, cmd_flags); | ||
5421 | if (atomic_read(&T_TASK(cmd)->transport_lun_fe_stop)) { | ||
5422 | DEBUG_CLEAR_L("SE_LUN[%d] - Detected FE stop for" | ||
5423 | " struct se_cmd: %p ITT: 0x%08x\n", | ||
5424 | lun->unpacked_lun, | ||
5425 | cmd, CMD_TFO(cmd)->get_task_tag(cmd)); | ||
5426 | |||
5427 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, | ||
5428 | cmd_flags); | ||
5429 | transport_cmd_check_stop(cmd, 1, 0); | ||
5430 | complete(&T_TASK(cmd)->transport_lun_fe_stop_comp); | ||
5431 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); | ||
5432 | continue; | ||
5433 | } | ||
5434 | DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x finished processing\n", | ||
5435 | lun->unpacked_lun, CMD_TFO(cmd)->get_task_tag(cmd)); | ||
5436 | |||
5437 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags); | ||
5438 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); | ||
5439 | } | ||
5440 | spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); | ||
5441 | } | ||
5442 | |||
5443 | static int transport_clear_lun_thread(void *p) | ||
5444 | { | ||
5445 | struct se_lun *lun = (struct se_lun *)p; | ||
5446 | |||
5447 | __transport_clear_lun_from_sessions(lun); | ||
5448 | complete(&lun->lun_shutdown_comp); | ||
5449 | |||
5450 | return 0; | ||
5451 | } | ||
5452 | |||
5453 | int transport_clear_lun_from_sessions(struct se_lun *lun) | ||
5454 | { | ||
5455 | struct task_struct *kt; | ||
5456 | |||
5457 | kt = kthread_run(transport_clear_lun_thread, (void *)lun, | ||
5458 | "tcm_cl_%u", lun->unpacked_lun); | ||
5459 | if (IS_ERR(kt)) { | ||
5460 | printk(KERN_ERR "Unable to start clear_lun thread\n"); | ||
5461 | return -1; | ||
5462 | } | ||
5463 | wait_for_completion(&lun->lun_shutdown_comp); | ||
5464 | |||
5465 | return 0; | ||
5466 | } | ||
5467 | |||
5468 | /* transport_generic_wait_for_tasks(): | ||
5469 | * | ||
5470 | * Called from frontend or passthrough context to wait for storage engine | ||
5471 | * to pause and/or release frontend generated struct se_cmd. | ||
5472 | */ | ||
5473 | static void transport_generic_wait_for_tasks( | ||
5474 | struct se_cmd *cmd, | ||
5475 | int remove_cmd, | ||
5476 | int session_reinstatement) | ||
5477 | { | ||
5478 | unsigned long flags; | ||
5479 | |||
5480 | if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) | ||
5481 | return; | ||
5482 | |||
5483 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | ||
5484 | /* | ||
5485 | * If we are already stopped due to an external event (ie: LUN shutdown) | ||
5486 | * sleep until the connection can have the passed struct se_cmd back. | ||
5487 | * The T_TASK(cmd)->transport_lun_stopped_sem will be upped by | ||
5488 | * transport_clear_lun_from_sessions() once the ConfigFS context caller | ||
5489 | * has completed its operation on the struct se_cmd. | ||
5490 | */ | ||
5491 | if (atomic_read(&T_TASK(cmd)->transport_lun_stop)) { | ||
5492 | |||
5493 | DEBUG_TRANSPORT_S("wait_for_tasks: Stopping" | ||
5494 | " wait_for_completion(&T_TASK(cmd)transport_lun_fe" | ||
5495 | "_stop_comp); for ITT: 0x%08x\n", | ||
5496 | CMD_TFO(cmd)->get_task_tag(cmd)); | ||
5497 | /* | ||
5498 | * There is a special case for WRITES where a FE exception + | ||
5499 | * LUN shutdown means ConfigFS context is still sleeping on | ||
5500 | * transport_lun_stop_comp in transport_lun_wait_for_tasks(). | ||
5501 | * We go ahead and up transport_lun_stop_comp just to be sure | ||
5502 | * here. | ||
5503 | */ | ||
5504 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | ||
5505 | complete(&T_TASK(cmd)->transport_lun_stop_comp); | ||
5506 | wait_for_completion(&T_TASK(cmd)->transport_lun_fe_stop_comp); | ||
5507 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | ||
5508 | |||
5509 | transport_all_task_dev_remove_state(cmd); | ||
5510 | /* | ||
5511 | * At this point, the frontend who was the originator of this | ||
5512 | * struct se_cmd, now owns the structure and can be released through | ||
5513 | * normal means below. | ||
5514 | */ | ||
5515 | DEBUG_TRANSPORT_S("wait_for_tasks: Stopped" | ||
5516 | " wait_for_completion(&T_TASK(cmd)transport_lun_fe_" | ||
5517 | "stop_comp); for ITT: 0x%08x\n", | ||
5518 | CMD_TFO(cmd)->get_task_tag(cmd)); | ||
5519 | |||
5520 | atomic_set(&T_TASK(cmd)->transport_lun_stop, 0); | ||
5521 | } | ||
5522 | if (!atomic_read(&T_TASK(cmd)->t_transport_active)) | ||
5523 | goto remove; | ||
5524 | |||
5525 | atomic_set(&T_TASK(cmd)->t_transport_stop, 1); | ||
5526 | |||
5527 | DEBUG_TRANSPORT_S("wait_for_tasks: Stopping %p ITT: 0x%08x" | ||
5528 | " i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop" | ||
5529 | " = TRUE\n", cmd, CMD_TFO(cmd)->get_task_tag(cmd), | ||
5530 | CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state, | ||
5531 | cmd->deferred_t_state); | ||
5532 | |||
5533 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | ||
5534 | |||
5535 | wake_up_interruptible(&SE_DEV(cmd)->dev_queue_obj->thread_wq); | ||
5536 | |||
5537 | wait_for_completion(&T_TASK(cmd)->t_transport_stop_comp); | ||
5538 | |||
5539 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | ||
5540 | atomic_set(&T_TASK(cmd)->t_transport_active, 0); | ||
5541 | atomic_set(&T_TASK(cmd)->t_transport_stop, 0); | ||
5542 | |||
5543 | DEBUG_TRANSPORT_S("wait_for_tasks: Stopped wait_for_compltion(" | ||
5544 | "&T_TASK(cmd)->t_transport_stop_comp) for ITT: 0x%08x\n", | ||
5545 | CMD_TFO(cmd)->get_task_tag(cmd)); | ||
5546 | remove: | ||
5547 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | ||
5548 | if (!remove_cmd) | ||
5549 | return; | ||
5550 | |||
5551 | transport_generic_free_cmd(cmd, 0, 0, session_reinstatement); | ||
5552 | } | ||
5553 | |||
5554 | static int transport_get_sense_codes( | ||
5555 | struct se_cmd *cmd, | ||
5556 | u8 *asc, | ||
5557 | u8 *ascq) | ||
5558 | { | ||
5559 | *asc = cmd->scsi_asc; | ||
5560 | *ascq = cmd->scsi_ascq; | ||
5561 | |||
5562 | return 0; | ||
5563 | } | ||
5564 | |||
5565 | static int transport_set_sense_codes( | ||
5566 | struct se_cmd *cmd, | ||
5567 | u8 asc, | ||
5568 | u8 ascq) | ||
5569 | { | ||
5570 | cmd->scsi_asc = asc; | ||
5571 | cmd->scsi_ascq = ascq; | ||
5572 | |||
5573 | return 0; | ||
5574 | } | ||
5575 | |||
5576 | int transport_send_check_condition_and_sense( | ||
5577 | struct se_cmd *cmd, | ||
5578 | u8 reason, | ||
5579 | int from_transport) | ||
5580 | { | ||
5581 | unsigned char *buffer = cmd->sense_buffer; | ||
5582 | unsigned long flags; | ||
5583 | int offset; | ||
5584 | u8 asc = 0, ascq = 0; | ||
5585 | |||
5586 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | ||
5587 | if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { | ||
5588 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | ||
5589 | return 0; | ||
5590 | } | ||
5591 | cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; | ||
5592 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | ||
5593 | |||
5594 | if (!reason && from_transport) | ||
5595 | goto after_reason; | ||
5596 | |||
5597 | if (!from_transport) | ||
5598 | cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; | ||
5599 | /* | ||
5600 | * Data Segment and SenseLength of the fabric response PDU. | ||
5601 | * | ||
5602 | * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE | ||
5603 | * from include/scsi/scsi_cmnd.h | ||
5604 | */ | ||
5605 | offset = CMD_TFO(cmd)->set_fabric_sense_len(cmd, | ||
5606 | TRANSPORT_SENSE_BUFFER); | ||
5607 | /* | ||
5608 | * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses | ||
5609 | * SENSE KEY values from include/scsi/scsi.h | ||
5610 | */ | ||
5611 | switch (reason) { | ||
5612 | case TCM_NON_EXISTENT_LUN: | ||
5613 | case TCM_UNSUPPORTED_SCSI_OPCODE: | ||
5614 | case TCM_SECTOR_COUNT_TOO_MANY: | ||
5615 | /* CURRENT ERROR */ | ||
5616 | buffer[offset] = 0x70; | ||
5617 | /* ILLEGAL REQUEST */ | ||
5618 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; | ||
5619 | /* INVALID COMMAND OPERATION CODE */ | ||
5620 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20; | ||
5621 | break; | ||
5622 | case TCM_UNKNOWN_MODE_PAGE: | ||
5623 | /* CURRENT ERROR */ | ||
5624 | buffer[offset] = 0x70; | ||
5625 | /* ILLEGAL REQUEST */ | ||
5626 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; | ||
5627 | /* INVALID FIELD IN CDB */ | ||
5628 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24; | ||
5629 | break; | ||
5630 | case TCM_CHECK_CONDITION_ABORT_CMD: | ||
5631 | /* CURRENT ERROR */ | ||
5632 | buffer[offset] = 0x70; | ||
5633 | /* ABORTED COMMAND */ | ||
5634 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | ||
5635 | /* BUS DEVICE RESET FUNCTION OCCURRED */ | ||
5636 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29; | ||
5637 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03; | ||
5638 | break; | ||
5639 | case TCM_INCORRECT_AMOUNT_OF_DATA: | ||
5640 | /* CURRENT ERROR */ | ||
5641 | buffer[offset] = 0x70; | ||
5642 | /* ABORTED COMMAND */ | ||
5643 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | ||
5644 | /* WRITE ERROR */ | ||
5645 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c; | ||
5646 | /* NOT ENOUGH UNSOLICITED DATA */ | ||
5647 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d; | ||
5648 | break; | ||
5649 | case TCM_INVALID_CDB_FIELD: | ||
5650 | /* CURRENT ERROR */ | ||
5651 | buffer[offset] = 0x70; | ||
5652 | /* ABORTED COMMAND */ | ||
5653 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | ||
5654 | /* INVALID FIELD IN CDB */ | ||
5655 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24; | ||
5656 | break; | ||
5657 | case TCM_INVALID_PARAMETER_LIST: | ||
5658 | /* CURRENT ERROR */ | ||
5659 | buffer[offset] = 0x70; | ||
5660 | /* ABORTED COMMAND */ | ||
5661 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | ||
5662 | /* INVALID FIELD IN PARAMETER LIST */ | ||
5663 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26; | ||
5664 | break; | ||
5665 | case TCM_UNEXPECTED_UNSOLICITED_DATA: | ||
5666 | /* CURRENT ERROR */ | ||
5667 | buffer[offset] = 0x70; | ||
5668 | /* ABORTED COMMAND */ | ||
5669 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | ||
5670 | /* WRITE ERROR */ | ||
5671 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c; | ||
5672 | /* UNEXPECTED_UNSOLICITED_DATA */ | ||
5673 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c; | ||
5674 | break; | ||
5675 | case TCM_SERVICE_CRC_ERROR: | ||
5676 | /* CURRENT ERROR */ | ||
5677 | buffer[offset] = 0x70; | ||
5678 | /* ABORTED COMMAND */ | ||
5679 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | ||
5680 | /* PROTOCOL SERVICE CRC ERROR */ | ||
5681 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47; | ||
5682 | /* N/A */ | ||
5683 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05; | ||
5684 | break; | ||
5685 | case TCM_SNACK_REJECTED: | ||
5686 | /* CURRENT ERROR */ | ||
5687 | buffer[offset] = 0x70; | ||
5688 | /* ABORTED COMMAND */ | ||
5689 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | ||
5690 | /* READ ERROR */ | ||
5691 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11; | ||
5692 | /* FAILED RETRANSMISSION REQUEST */ | ||
5693 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13; | ||
5694 | break; | ||
5695 | case TCM_WRITE_PROTECTED: | ||
5696 | /* CURRENT ERROR */ | ||
5697 | buffer[offset] = 0x70; | ||
5698 | /* DATA PROTECT */ | ||
5699 | buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT; | ||
5700 | /* WRITE PROTECTED */ | ||
5701 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27; | ||
5702 | break; | ||
5703 | case TCM_CHECK_CONDITION_UNIT_ATTENTION: | ||
5704 | /* CURRENT ERROR */ | ||
5705 | buffer[offset] = 0x70; | ||
5706 | /* UNIT ATTENTION */ | ||
5707 | buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION; | ||
5708 | core_scsi3_ua_for_check_condition(cmd, &asc, &ascq); | ||
5709 | buffer[offset+SPC_ASC_KEY_OFFSET] = asc; | ||
5710 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq; | ||
5711 | break; | ||
5712 | case TCM_CHECK_CONDITION_NOT_READY: | ||
5713 | /* CURRENT ERROR */ | ||
5714 | buffer[offset] = 0x70; | ||
5715 | /* Not Ready */ | ||
5716 | buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY; | ||
5717 | transport_get_sense_codes(cmd, &asc, &ascq); | ||
5718 | buffer[offset+SPC_ASC_KEY_OFFSET] = asc; | ||
5719 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq; | ||
5720 | break; | ||
5721 | case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: | ||
5722 | default: | ||
5723 | /* CURRENT ERROR */ | ||
5724 | buffer[offset] = 0x70; | ||
5725 | /* ILLEGAL REQUEST */ | ||
5726 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; | ||
5727 | /* LOGICAL UNIT COMMUNICATION FAILURE */ | ||
5728 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80; | ||
5729 | break; | ||
5730 | } | ||
5731 | /* | ||
5732 | * This code uses linux/include/scsi/scsi.h SAM status codes! | ||
5733 | */ | ||
5734 | cmd->scsi_status = SAM_STAT_CHECK_CONDITION; | ||
5735 | /* | ||
5736 | * Automatically padded, this value is encoded in the fabric's | ||
5737 | * data_length response PDU containing the SCSI defined sense data. | ||
5738 | */ | ||
5739 | cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset; | ||
5740 | |||
5741 | after_reason: | ||
5742 | CMD_TFO(cmd)->queue_status(cmd); | ||
5743 | return 0; | ||
5744 | } | ||
5745 | EXPORT_SYMBOL(transport_send_check_condition_and_sense); | ||
5746 | |||
5747 | int transport_check_aborted_status(struct se_cmd *cmd, int send_status) | ||
5748 | { | ||
5749 | int ret = 0; | ||
5750 | |||
5751 | if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) { | ||
5752 | if (!(send_status) || | ||
5753 | (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) | ||
5754 | return 1; | ||
5755 | #if 0 | ||
5756 | printk(KERN_INFO "Sending delayed SAM_STAT_TASK_ABORTED" | ||
5757 | " status for CDB: 0x%02x ITT: 0x%08x\n", | ||
5758 | T_TASK(cmd)->t_task_cdb[0], | ||
5759 | CMD_TFO(cmd)->get_task_tag(cmd)); | ||
5760 | #endif | ||
5761 | cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; | ||
5762 | CMD_TFO(cmd)->queue_status(cmd); | ||
5763 | ret = 1; | ||
5764 | } | ||
5765 | return ret; | ||
5766 | } | ||
5767 | EXPORT_SYMBOL(transport_check_aborted_status); | ||
5768 | |||
5769 | void transport_send_task_abort(struct se_cmd *cmd) | ||
5770 | { | ||
5771 | /* | ||
5772 | * If there are still expected incoming fabric WRITEs, we wait | ||
5773 | * until until they have completed before sending a TASK_ABORTED | ||
5774 | * response. This response with TASK_ABORTED status will be | ||
5775 | * queued back to fabric module by transport_check_aborted_status(). | ||
5776 | */ | ||
5777 | if (cmd->data_direction == DMA_TO_DEVICE) { | ||
5778 | if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) { | ||
5779 | atomic_inc(&T_TASK(cmd)->t_transport_aborted); | ||
5780 | smp_mb__after_atomic_inc(); | ||
5781 | cmd->scsi_status = SAM_STAT_TASK_ABORTED; | ||
5782 | transport_new_cmd_failure(cmd); | ||
5783 | return; | ||
5784 | } | ||
5785 | } | ||
5786 | cmd->scsi_status = SAM_STAT_TASK_ABORTED; | ||
5787 | #if 0 | ||
5788 | printk(KERN_INFO "Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x," | ||
5789 | " ITT: 0x%08x\n", T_TASK(cmd)->t_task_cdb[0], | ||
5790 | CMD_TFO(cmd)->get_task_tag(cmd)); | ||
5791 | #endif | ||
5792 | CMD_TFO(cmd)->queue_status(cmd); | ||
5793 | } | ||
5794 | |||
5795 | /* transport_generic_do_tmr(): | ||
5796 | * | ||
5797 | * | ||
5798 | */ | ||
5799 | int transport_generic_do_tmr(struct se_cmd *cmd) | ||
5800 | { | ||
5801 | struct se_cmd *ref_cmd; | ||
5802 | struct se_device *dev = SE_DEV(cmd); | ||
5803 | struct se_tmr_req *tmr = cmd->se_tmr_req; | ||
5804 | int ret; | ||
5805 | |||
5806 | switch (tmr->function) { | ||
5807 | case ABORT_TASK: | ||
5808 | ref_cmd = tmr->ref_cmd; | ||
5809 | tmr->response = TMR_FUNCTION_REJECTED; | ||
5810 | break; | ||
5811 | case ABORT_TASK_SET: | ||
5812 | case CLEAR_ACA: | ||
5813 | case CLEAR_TASK_SET: | ||
5814 | tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; | ||
5815 | break; | ||
5816 | case LUN_RESET: | ||
5817 | ret = core_tmr_lun_reset(dev, tmr, NULL, NULL); | ||
5818 | tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : | ||
5819 | TMR_FUNCTION_REJECTED; | ||
5820 | break; | ||
5821 | #if 0 | ||
5822 | case TARGET_WARM_RESET: | ||
5823 | transport_generic_host_reset(dev->se_hba); | ||
5824 | tmr->response = TMR_FUNCTION_REJECTED; | ||
5825 | break; | ||
5826 | case TARGET_COLD_RESET: | ||
5827 | transport_generic_host_reset(dev->se_hba); | ||
5828 | transport_generic_cold_reset(dev->se_hba); | ||
5829 | tmr->response = TMR_FUNCTION_REJECTED; | ||
5830 | break; | ||
5831 | #endif | ||
5832 | default: | ||
5833 | printk(KERN_ERR "Uknown TMR function: 0x%02x.\n", | ||
5834 | tmr->function); | ||
5835 | tmr->response = TMR_FUNCTION_REJECTED; | ||
5836 | break; | ||
5837 | } | ||
5838 | |||
5839 | cmd->t_state = TRANSPORT_ISTATE_PROCESSING; | ||
5840 | CMD_TFO(cmd)->queue_tm_rsp(cmd); | ||
5841 | |||
5842 | transport_cmd_check_stop(cmd, 2, 0); | ||
5843 | return 0; | ||
5844 | } | ||
5845 | |||
5846 | /* | ||
5847 | * Called with spin_lock_irq(&dev->execute_task_lock); held | ||
5848 | * | ||
5849 | */ | ||
5850 | static struct se_task * | ||
5851 | transport_get_task_from_state_list(struct se_device *dev) | ||
5852 | { | ||
5853 | struct se_task *task; | ||
5854 | |||
5855 | if (list_empty(&dev->state_task_list)) | ||
5856 | return NULL; | ||
5857 | |||
5858 | list_for_each_entry(task, &dev->state_task_list, t_state_list) | ||
5859 | break; | ||
5860 | |||
5861 | list_del(&task->t_state_list); | ||
5862 | atomic_set(&task->task_state_active, 0); | ||
5863 | |||
5864 | return task; | ||
5865 | } | ||
5866 | |||
5867 | static void transport_processing_shutdown(struct se_device *dev) | ||
5868 | { | ||
5869 | struct se_cmd *cmd; | ||
5870 | struct se_queue_req *qr; | ||
5871 | struct se_task *task; | ||
5872 | u8 state; | ||
5873 | unsigned long flags; | ||
5874 | /* | ||
5875 | * Empty the struct se_device's struct se_task state list. | ||
5876 | */ | ||
5877 | spin_lock_irqsave(&dev->execute_task_lock, flags); | ||
5878 | while ((task = transport_get_task_from_state_list(dev))) { | ||
5879 | if (!(TASK_CMD(task))) { | ||
5880 | printk(KERN_ERR "TASK_CMD(task) is NULL!\n"); | ||
5881 | continue; | ||
5882 | } | ||
5883 | cmd = TASK_CMD(task); | ||
5884 | |||
5885 | if (!T_TASK(cmd)) { | ||
5886 | printk(KERN_ERR "T_TASK(cmd) is NULL for task: %p cmd:" | ||
5887 | " %p ITT: 0x%08x\n", task, cmd, | ||
5888 | CMD_TFO(cmd)->get_task_tag(cmd)); | ||
5889 | continue; | ||
5890 | } | ||
5891 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | ||
5892 | |||
5893 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | ||
5894 | |||
5895 | DEBUG_DO("PT: cmd: %p task: %p ITT/CmdSN: 0x%08x/0x%08x," | ||
5896 | " i_state/def_i_state: %d/%d, t_state/def_t_state:" | ||
5897 | " %d/%d cdb: 0x%02x\n", cmd, task, | ||
5898 | CMD_TFO(cmd)->get_task_tag(cmd), cmd->cmd_sn, | ||
5899 | CMD_TFO(cmd)->get_cmd_state(cmd), cmd->deferred_i_state, | ||
5900 | cmd->t_state, cmd->deferred_t_state, | ||
5901 | T_TASK(cmd)->t_task_cdb[0]); | ||
5902 | DEBUG_DO("PT: ITT[0x%08x] - t_task_cdbs: %d t_task_cdbs_left:" | ||
5903 | " %d t_task_cdbs_sent: %d -- t_transport_active: %d" | ||
5904 | " t_transport_stop: %d t_transport_sent: %d\n", | ||
5905 | CMD_TFO(cmd)->get_task_tag(cmd), | ||
5906 | T_TASK(cmd)->t_task_cdbs, | ||
5907 | atomic_read(&T_TASK(cmd)->t_task_cdbs_left), | ||
5908 | atomic_read(&T_TASK(cmd)->t_task_cdbs_sent), | ||
5909 | atomic_read(&T_TASK(cmd)->t_transport_active), | ||
5910 | atomic_read(&T_TASK(cmd)->t_transport_stop), | ||
5911 | atomic_read(&T_TASK(cmd)->t_transport_sent)); | ||
5912 | |||
5913 | if (atomic_read(&task->task_active)) { | ||
5914 | atomic_set(&task->task_stop, 1); | ||
5915 | spin_unlock_irqrestore( | ||
5916 | &T_TASK(cmd)->t_state_lock, flags); | ||
5917 | |||
5918 | DEBUG_DO("Waiting for task: %p to shutdown for dev:" | ||
5919 | " %p\n", task, dev); | ||
5920 | wait_for_completion(&task->task_stop_comp); | ||
5921 | DEBUG_DO("Completed task: %p shutdown for dev: %p\n", | ||
5922 | task, dev); | ||
5923 | |||
5924 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | ||
5925 | atomic_dec(&T_TASK(cmd)->t_task_cdbs_left); | ||
5926 | |||
5927 | atomic_set(&task->task_active, 0); | ||
5928 | atomic_set(&task->task_stop, 0); | ||
5929 | } | ||
5930 | __transport_stop_task_timer(task, &flags); | ||
5931 | |||
5932 | if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_ex_left))) { | ||
5933 | spin_unlock_irqrestore( | ||
5934 | &T_TASK(cmd)->t_state_lock, flags); | ||
5935 | |||
5936 | DEBUG_DO("Skipping task: %p, dev: %p for" | ||
5937 | " t_task_cdbs_ex_left: %d\n", task, dev, | ||
5938 | atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left)); | ||
5939 | |||
5940 | spin_lock_irqsave(&dev->execute_task_lock, flags); | ||
5941 | continue; | ||
5942 | } | ||
5943 | |||
5944 | if (atomic_read(&T_TASK(cmd)->t_transport_active)) { | ||
5945 | DEBUG_DO("got t_transport_active = 1 for task: %p, dev:" | ||
5946 | " %p\n", task, dev); | ||
5947 | |||
5948 | if (atomic_read(&T_TASK(cmd)->t_fe_count)) { | ||
5949 | spin_unlock_irqrestore( | ||
5950 | &T_TASK(cmd)->t_state_lock, flags); | ||
5951 | transport_send_check_condition_and_sense( | ||
5952 | cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, | ||
5953 | 0); | ||
5954 | transport_remove_cmd_from_queue(cmd, | ||
5955 | SE_DEV(cmd)->dev_queue_obj); | ||
5956 | |||
5957 | transport_lun_remove_cmd(cmd); | ||
5958 | transport_cmd_check_stop(cmd, 1, 0); | ||
5959 | } else { | ||
5960 | spin_unlock_irqrestore( | ||
5961 | &T_TASK(cmd)->t_state_lock, flags); | ||
5962 | |||
5963 | transport_remove_cmd_from_queue(cmd, | ||
5964 | SE_DEV(cmd)->dev_queue_obj); | ||
5965 | |||
5966 | transport_lun_remove_cmd(cmd); | ||
5967 | |||
5968 | if (transport_cmd_check_stop(cmd, 1, 0)) | ||
5969 | transport_generic_remove(cmd, 0, 0); | ||
5970 | } | ||
5971 | |||
5972 | spin_lock_irqsave(&dev->execute_task_lock, flags); | ||
5973 | continue; | ||
5974 | } | ||
5975 | DEBUG_DO("Got t_transport_active = 0 for task: %p, dev: %p\n", | ||
5976 | task, dev); | ||
5977 | |||
5978 | if (atomic_read(&T_TASK(cmd)->t_fe_count)) { | ||
5979 | spin_unlock_irqrestore( | ||
5980 | &T_TASK(cmd)->t_state_lock, flags); | ||
5981 | transport_send_check_condition_and_sense(cmd, | ||
5982 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); | ||
5983 | transport_remove_cmd_from_queue(cmd, | ||
5984 | SE_DEV(cmd)->dev_queue_obj); | ||
5985 | |||
5986 | transport_lun_remove_cmd(cmd); | ||
5987 | transport_cmd_check_stop(cmd, 1, 0); | ||
5988 | } else { | ||
5989 | spin_unlock_irqrestore( | ||
5990 | &T_TASK(cmd)->t_state_lock, flags); | ||
5991 | |||
5992 | transport_remove_cmd_from_queue(cmd, | ||
5993 | SE_DEV(cmd)->dev_queue_obj); | ||
5994 | transport_lun_remove_cmd(cmd); | ||
5995 | |||
5996 | if (transport_cmd_check_stop(cmd, 1, 0)) | ||
5997 | transport_generic_remove(cmd, 0, 0); | ||
5998 | } | ||
5999 | |||
6000 | spin_lock_irqsave(&dev->execute_task_lock, flags); | ||
6001 | } | ||
6002 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | ||
6003 | /* | ||
6004 | * Empty the struct se_device's struct se_cmd list. | ||
6005 | */ | ||
6006 | spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags); | ||
6007 | while ((qr = __transport_get_qr_from_queue(dev->dev_queue_obj))) { | ||
6008 | spin_unlock_irqrestore( | ||
6009 | &dev->dev_queue_obj->cmd_queue_lock, flags); | ||
6010 | cmd = (struct se_cmd *)qr->cmd; | ||
6011 | state = qr->state; | ||
6012 | kfree(qr); | ||
6013 | |||
6014 | DEBUG_DO("From Device Queue: cmd: %p t_state: %d\n", | ||
6015 | cmd, state); | ||
6016 | |||
6017 | if (atomic_read(&T_TASK(cmd)->t_fe_count)) { | ||
6018 | transport_send_check_condition_and_sense(cmd, | ||
6019 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); | ||
6020 | |||
6021 | transport_lun_remove_cmd(cmd); | ||
6022 | transport_cmd_check_stop(cmd, 1, 0); | ||
6023 | } else { | ||
6024 | transport_lun_remove_cmd(cmd); | ||
6025 | if (transport_cmd_check_stop(cmd, 1, 0)) | ||
6026 | transport_generic_remove(cmd, 0, 0); | ||
6027 | } | ||
6028 | spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags); | ||
6029 | } | ||
6030 | spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, flags); | ||
6031 | } | ||
6032 | |||
6033 | /* transport_processing_thread(): | ||
6034 | * | ||
6035 | * | ||
6036 | */ | ||
6037 | static int transport_processing_thread(void *param) | ||
6038 | { | ||
6039 | int ret, t_state; | ||
6040 | struct se_cmd *cmd; | ||
6041 | struct se_device *dev = (struct se_device *) param; | ||
6042 | struct se_queue_req *qr; | ||
6043 | |||
6044 | set_user_nice(current, -20); | ||
6045 | |||
6046 | while (!kthread_should_stop()) { | ||
6047 | ret = wait_event_interruptible(dev->dev_queue_obj->thread_wq, | ||
6048 | atomic_read(&dev->dev_queue_obj->queue_cnt) || | ||
6049 | kthread_should_stop()); | ||
6050 | if (ret < 0) | ||
6051 | goto out; | ||
6052 | |||
6053 | spin_lock_irq(&dev->dev_status_lock); | ||
6054 | if (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) { | ||
6055 | spin_unlock_irq(&dev->dev_status_lock); | ||
6056 | transport_processing_shutdown(dev); | ||
6057 | continue; | ||
6058 | } | ||
6059 | spin_unlock_irq(&dev->dev_status_lock); | ||
6060 | |||
6061 | get_cmd: | ||
6062 | __transport_execute_tasks(dev); | ||
6063 | |||
6064 | qr = transport_get_qr_from_queue(dev->dev_queue_obj); | ||
6065 | if (!(qr)) | ||
6066 | continue; | ||
6067 | |||
6068 | cmd = (struct se_cmd *)qr->cmd; | ||
6069 | t_state = qr->state; | ||
6070 | kfree(qr); | ||
6071 | |||
6072 | switch (t_state) { | ||
6073 | case TRANSPORT_NEW_CMD_MAP: | ||
6074 | if (!(CMD_TFO(cmd)->new_cmd_map)) { | ||
6075 | printk(KERN_ERR "CMD_TFO(cmd)->new_cmd_map is" | ||
6076 | " NULL for TRANSPORT_NEW_CMD_MAP\n"); | ||
6077 | BUG(); | ||
6078 | } | ||
6079 | ret = CMD_TFO(cmd)->new_cmd_map(cmd); | ||
6080 | if (ret < 0) { | ||
6081 | cmd->transport_error_status = ret; | ||
6082 | transport_generic_request_failure(cmd, NULL, | ||
6083 | 0, (cmd->data_direction != | ||
6084 | DMA_TO_DEVICE)); | ||
6085 | break; | ||
6086 | } | ||
6087 | /* Fall through */ | ||
6088 | case TRANSPORT_NEW_CMD: | ||
6089 | ret = transport_generic_new_cmd(cmd); | ||
6090 | if (ret < 0) { | ||
6091 | cmd->transport_error_status = ret; | ||
6092 | transport_generic_request_failure(cmd, NULL, | ||
6093 | 0, (cmd->data_direction != | ||
6094 | DMA_TO_DEVICE)); | ||
6095 | } | ||
6096 | break; | ||
6097 | case TRANSPORT_PROCESS_WRITE: | ||
6098 | transport_generic_process_write(cmd); | ||
6099 | break; | ||
6100 | case TRANSPORT_COMPLETE_OK: | ||
6101 | transport_stop_all_task_timers(cmd); | ||
6102 | transport_generic_complete_ok(cmd); | ||
6103 | break; | ||
6104 | case TRANSPORT_REMOVE: | ||
6105 | transport_generic_remove(cmd, 1, 0); | ||
6106 | break; | ||
6107 | case TRANSPORT_PROCESS_TMR: | ||
6108 | transport_generic_do_tmr(cmd); | ||
6109 | break; | ||
6110 | case TRANSPORT_COMPLETE_FAILURE: | ||
6111 | transport_generic_request_failure(cmd, NULL, 1, 1); | ||
6112 | break; | ||
6113 | case TRANSPORT_COMPLETE_TIMEOUT: | ||
6114 | transport_stop_all_task_timers(cmd); | ||
6115 | transport_generic_request_timeout(cmd); | ||
6116 | break; | ||
6117 | default: | ||
6118 | printk(KERN_ERR "Unknown t_state: %d deferred_t_state:" | ||
6119 | " %d for ITT: 0x%08x i_state: %d on SE LUN:" | ||
6120 | " %u\n", t_state, cmd->deferred_t_state, | ||
6121 | CMD_TFO(cmd)->get_task_tag(cmd), | ||
6122 | CMD_TFO(cmd)->get_cmd_state(cmd), | ||
6123 | SE_LUN(cmd)->unpacked_lun); | ||
6124 | BUG(); | ||
6125 | } | ||
6126 | |||
6127 | goto get_cmd; | ||
6128 | } | ||
6129 | |||
6130 | out: | ||
6131 | transport_release_all_cmds(dev); | ||
6132 | dev->process_thread = NULL; | ||
6133 | return 0; | ||
6134 | } | ||
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c new file mode 100644 index 000000000000..a2ef346087e8 --- /dev/null +++ b/drivers/target/target_core_ua.c | |||
@@ -0,0 +1,332 @@ | |||
1 | /******************************************************************************* | ||
2 | * Filename: target_core_ua.c | ||
3 | * | ||
4 | * This file contains logic for SPC-3 Unit Attention emulation | ||
5 | * | ||
6 | * Copyright (c) 2009,2010 Rising Tide Systems | ||
7 | * Copyright (c) 2009,2010 Linux-iSCSI.org | ||
8 | * | ||
9 | * Nicholas A. Bellinger <nab@kernel.org> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License as published by | ||
13 | * the Free Software Foundation; either version 2 of the License, or | ||
14 | * (at your option) any later version. | ||
15 | * | ||
16 | * This program is distributed in the hope that it will be useful, | ||
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
19 | * GNU General Public License for more details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public License | ||
22 | * along with this program; if not, write to the Free Software | ||
23 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
24 | * | ||
25 | ******************************************************************************/ | ||
26 | |||
27 | #include <linux/version.h> | ||
28 | #include <linux/slab.h> | ||
29 | #include <linux/spinlock.h> | ||
30 | #include <scsi/scsi.h> | ||
31 | #include <scsi/scsi_cmnd.h> | ||
32 | |||
33 | #include <target/target_core_base.h> | ||
34 | #include <target/target_core_device.h> | ||
35 | #include <target/target_core_transport.h> | ||
36 | #include <target/target_core_fabric_ops.h> | ||
37 | #include <target/target_core_configfs.h> | ||
38 | |||
39 | #include "target_core_alua.h" | ||
40 | #include "target_core_hba.h" | ||
41 | #include "target_core_pr.h" | ||
42 | #include "target_core_ua.h" | ||
43 | |||
44 | int core_scsi3_ua_check( | ||
45 | struct se_cmd *cmd, | ||
46 | unsigned char *cdb) | ||
47 | { | ||
48 | struct se_dev_entry *deve; | ||
49 | struct se_session *sess = cmd->se_sess; | ||
50 | struct se_node_acl *nacl; | ||
51 | |||
52 | if (!(sess)) | ||
53 | return 0; | ||
54 | |||
55 | nacl = sess->se_node_acl; | ||
56 | if (!(nacl)) | ||
57 | return 0; | ||
58 | |||
59 | deve = &nacl->device_list[cmd->orig_fe_lun]; | ||
60 | if (!(atomic_read(&deve->ua_count))) | ||
61 | return 0; | ||
62 | /* | ||
63 | * From sam4r14, section 5.14 Unit attention condition: | ||
64 | * | ||
65 | * a) if an INQUIRY command enters the enabled command state, the | ||
66 | * device server shall process the INQUIRY command and shall neither | ||
67 | * report nor clear any unit attention condition; | ||
68 | * b) if a REPORT LUNS command enters the enabled command state, the | ||
69 | * device server shall process the REPORT LUNS command and shall not | ||
70 | * report any unit attention condition; | ||
71 | * e) if a REQUEST SENSE command enters the enabled command state while | ||
72 | * a unit attention condition exists for the SCSI initiator port | ||
73 | * associated with the I_T nexus on which the REQUEST SENSE command | ||
74 | * was received, then the device server shall process the command | ||
75 | * and either: | ||
76 | */ | ||
77 | switch (cdb[0]) { | ||
78 | case INQUIRY: | ||
79 | case REPORT_LUNS: | ||
80 | case REQUEST_SENSE: | ||
81 | return 0; | ||
82 | default: | ||
83 | return -1; | ||
84 | } | ||
85 | |||
86 | return -1; | ||
87 | } | ||
88 | |||
89 | int core_scsi3_ua_allocate( | ||
90 | struct se_node_acl *nacl, | ||
91 | u32 unpacked_lun, | ||
92 | u8 asc, | ||
93 | u8 ascq) | ||
94 | { | ||
95 | struct se_dev_entry *deve; | ||
96 | struct se_ua *ua, *ua_p, *ua_tmp; | ||
97 | /* | ||
98 | * PASSTHROUGH OPS | ||
99 | */ | ||
100 | if (!(nacl)) | ||
101 | return -1; | ||
102 | |||
103 | ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC); | ||
104 | if (!(ua)) { | ||
105 | printk(KERN_ERR "Unable to allocate struct se_ua\n"); | ||
106 | return -1; | ||
107 | } | ||
108 | INIT_LIST_HEAD(&ua->ua_dev_list); | ||
109 | INIT_LIST_HEAD(&ua->ua_nacl_list); | ||
110 | |||
111 | ua->ua_nacl = nacl; | ||
112 | ua->ua_asc = asc; | ||
113 | ua->ua_ascq = ascq; | ||
114 | |||
115 | spin_lock_irq(&nacl->device_list_lock); | ||
116 | deve = &nacl->device_list[unpacked_lun]; | ||
117 | |||
118 | spin_lock(&deve->ua_lock); | ||
119 | list_for_each_entry_safe(ua_p, ua_tmp, &deve->ua_list, ua_nacl_list) { | ||
120 | /* | ||
121 | * Do not report the same UNIT ATTENTION twice.. | ||
122 | */ | ||
123 | if ((ua_p->ua_asc == asc) && (ua_p->ua_ascq == ascq)) { | ||
124 | spin_unlock(&deve->ua_lock); | ||
125 | spin_unlock_irq(&nacl->device_list_lock); | ||
126 | kmem_cache_free(se_ua_cache, ua); | ||
127 | return 0; | ||
128 | } | ||
129 | /* | ||
130 | * Attach the highest priority Unit Attention to | ||
131 | * the head of the list following sam4r14, | ||
132 | * Section 5.14 Unit Attention Condition: | ||
133 | * | ||
134 | * POWER ON, RESET, OR BUS DEVICE RESET OCCURRED highest | ||
135 | * POWER ON OCCURRED or | ||
136 | * DEVICE INTERNAL RESET | ||
137 | * SCSI BUS RESET OCCURRED or | ||
138 | * MICROCODE HAS BEEN CHANGED or | ||
139 | * protocol specific | ||
140 | * BUS DEVICE RESET FUNCTION OCCURRED | ||
141 | * I_T NEXUS LOSS OCCURRED | ||
142 | * COMMANDS CLEARED BY POWER LOSS NOTIFICATION | ||
143 | * all others Lowest | ||
144 | * | ||
145 | * Each of the ASCQ codes listed above are defined in | ||
146 | * the 29h ASC family, see spc4r17 Table D.1 | ||
147 | */ | ||
148 | if (ua_p->ua_asc == 0x29) { | ||
149 | if ((asc == 0x29) && (ascq > ua_p->ua_ascq)) | ||
150 | list_add(&ua->ua_nacl_list, | ||
151 | &deve->ua_list); | ||
152 | else | ||
153 | list_add_tail(&ua->ua_nacl_list, | ||
154 | &deve->ua_list); | ||
155 | } else if (ua_p->ua_asc == 0x2a) { | ||
156 | /* | ||
157 | * Incoming Family 29h ASCQ codes will override | ||
158 | * Family 2AHh ASCQ codes for Unit Attention condition. | ||
159 | */ | ||
160 | if ((asc == 0x29) || (ascq > ua_p->ua_asc)) | ||
161 | list_add(&ua->ua_nacl_list, | ||
162 | &deve->ua_list); | ||
163 | else | ||
164 | list_add_tail(&ua->ua_nacl_list, | ||
165 | &deve->ua_list); | ||
166 | } else | ||
167 | list_add_tail(&ua->ua_nacl_list, | ||
168 | &deve->ua_list); | ||
169 | spin_unlock(&deve->ua_lock); | ||
170 | spin_unlock_irq(&nacl->device_list_lock); | ||
171 | |||
172 | atomic_inc(&deve->ua_count); | ||
173 | smp_mb__after_atomic_inc(); | ||
174 | return 0; | ||
175 | } | ||
176 | list_add_tail(&ua->ua_nacl_list, &deve->ua_list); | ||
177 | spin_unlock(&deve->ua_lock); | ||
178 | spin_unlock_irq(&nacl->device_list_lock); | ||
179 | |||
180 | printk(KERN_INFO "[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:" | ||
181 | " 0x%02x, ASCQ: 0x%02x\n", | ||
182 | TPG_TFO(nacl->se_tpg)->get_fabric_name(), unpacked_lun, | ||
183 | asc, ascq); | ||
184 | |||
185 | atomic_inc(&deve->ua_count); | ||
186 | smp_mb__after_atomic_inc(); | ||
187 | return 0; | ||
188 | } | ||
189 | |||
190 | void core_scsi3_ua_release_all( | ||
191 | struct se_dev_entry *deve) | ||
192 | { | ||
193 | struct se_ua *ua, *ua_p; | ||
194 | |||
195 | spin_lock(&deve->ua_lock); | ||
196 | list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) { | ||
197 | list_del(&ua->ua_nacl_list); | ||
198 | kmem_cache_free(se_ua_cache, ua); | ||
199 | |||
200 | atomic_dec(&deve->ua_count); | ||
201 | smp_mb__after_atomic_dec(); | ||
202 | } | ||
203 | spin_unlock(&deve->ua_lock); | ||
204 | } | ||
205 | |||
206 | void core_scsi3_ua_for_check_condition( | ||
207 | struct se_cmd *cmd, | ||
208 | u8 *asc, | ||
209 | u8 *ascq) | ||
210 | { | ||
211 | struct se_device *dev = SE_DEV(cmd); | ||
212 | struct se_dev_entry *deve; | ||
213 | struct se_session *sess = cmd->se_sess; | ||
214 | struct se_node_acl *nacl; | ||
215 | struct se_ua *ua = NULL, *ua_p; | ||
216 | int head = 1; | ||
217 | |||
218 | if (!(sess)) | ||
219 | return; | ||
220 | |||
221 | nacl = sess->se_node_acl; | ||
222 | if (!(nacl)) | ||
223 | return; | ||
224 | |||
225 | spin_lock_irq(&nacl->device_list_lock); | ||
226 | deve = &nacl->device_list[cmd->orig_fe_lun]; | ||
227 | if (!(atomic_read(&deve->ua_count))) { | ||
228 | spin_unlock_irq(&nacl->device_list_lock); | ||
229 | return; | ||
230 | } | ||
231 | /* | ||
232 | * The highest priority Unit Attentions are placed at the head of the | ||
233 | * struct se_dev_entry->ua_list, and will be returned in CHECK_CONDITION + | ||
234 | * sense data for the received CDB. | ||
235 | */ | ||
236 | spin_lock(&deve->ua_lock); | ||
237 | list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) { | ||
238 | /* | ||
239 | * For ua_intlck_ctrl code not equal to 00b, only report the | ||
240 | * highest priority UNIT_ATTENTION and ASC/ASCQ without | ||
241 | * clearing it. | ||
242 | */ | ||
243 | if (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl != 0) { | ||
244 | *asc = ua->ua_asc; | ||
245 | *ascq = ua->ua_ascq; | ||
246 | break; | ||
247 | } | ||
248 | /* | ||
249 | * Otherwise for the default 00b, release the UNIT ATTENTION | ||
250 | * condition. Return the ASC/ASCQ of the higest priority UA | ||
251 | * (head of the list) in the outgoing CHECK_CONDITION + sense. | ||
252 | */ | ||
253 | if (head) { | ||
254 | *asc = ua->ua_asc; | ||
255 | *ascq = ua->ua_ascq; | ||
256 | head = 0; | ||
257 | } | ||
258 | list_del(&ua->ua_nacl_list); | ||
259 | kmem_cache_free(se_ua_cache, ua); | ||
260 | |||
261 | atomic_dec(&deve->ua_count); | ||
262 | smp_mb__after_atomic_dec(); | ||
263 | } | ||
264 | spin_unlock(&deve->ua_lock); | ||
265 | spin_unlock_irq(&nacl->device_list_lock); | ||
266 | |||
267 | printk(KERN_INFO "[%s]: %s UNIT ATTENTION condition with" | ||
268 | " INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x" | ||
269 | " reported ASC: 0x%02x, ASCQ: 0x%02x\n", | ||
270 | TPG_TFO(nacl->se_tpg)->get_fabric_name(), | ||
271 | (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl != 0) ? "Reporting" : | ||
272 | "Releasing", DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl, | ||
273 | cmd->orig_fe_lun, T_TASK(cmd)->t_task_cdb[0], *asc, *ascq); | ||
274 | } | ||
275 | |||
276 | int core_scsi3_ua_clear_for_request_sense( | ||
277 | struct se_cmd *cmd, | ||
278 | u8 *asc, | ||
279 | u8 *ascq) | ||
280 | { | ||
281 | struct se_dev_entry *deve; | ||
282 | struct se_session *sess = cmd->se_sess; | ||
283 | struct se_node_acl *nacl; | ||
284 | struct se_ua *ua = NULL, *ua_p; | ||
285 | int head = 1; | ||
286 | |||
287 | if (!(sess)) | ||
288 | return -1; | ||
289 | |||
290 | nacl = sess->se_node_acl; | ||
291 | if (!(nacl)) | ||
292 | return -1; | ||
293 | |||
294 | spin_lock_irq(&nacl->device_list_lock); | ||
295 | deve = &nacl->device_list[cmd->orig_fe_lun]; | ||
296 | if (!(atomic_read(&deve->ua_count))) { | ||
297 | spin_unlock_irq(&nacl->device_list_lock); | ||
298 | return -1; | ||
299 | } | ||
300 | /* | ||
301 | * The highest priority Unit Attentions are placed at the head of the | ||
302 | * struct se_dev_entry->ua_list. The First (and hence highest priority) | ||
303 | * ASC/ASCQ will be returned in REQUEST_SENSE payload data for the | ||
304 | * matching struct se_lun. | ||
305 | * | ||
306 | * Once the returning ASC/ASCQ values are set, we go ahead and | ||
307 | * release all of the Unit Attention conditions for the assoicated | ||
308 | * struct se_lun. | ||
309 | */ | ||
310 | spin_lock(&deve->ua_lock); | ||
311 | list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) { | ||
312 | if (head) { | ||
313 | *asc = ua->ua_asc; | ||
314 | *ascq = ua->ua_ascq; | ||
315 | head = 0; | ||
316 | } | ||
317 | list_del(&ua->ua_nacl_list); | ||
318 | kmem_cache_free(se_ua_cache, ua); | ||
319 | |||
320 | atomic_dec(&deve->ua_count); | ||
321 | smp_mb__after_atomic_dec(); | ||
322 | } | ||
323 | spin_unlock(&deve->ua_lock); | ||
324 | spin_unlock_irq(&nacl->device_list_lock); | ||
325 | |||
326 | printk(KERN_INFO "[%s]: Released UNIT ATTENTION condition, mapped" | ||
327 | " LUN: %u, got REQUEST_SENSE reported ASC: 0x%02x," | ||
328 | " ASCQ: 0x%02x\n", TPG_TFO(nacl->se_tpg)->get_fabric_name(), | ||
329 | cmd->orig_fe_lun, *asc, *ascq); | ||
330 | |||
331 | return (head) ? -1 : 0; | ||
332 | } | ||
diff --git a/drivers/target/target_core_ua.h b/drivers/target/target_core_ua.h new file mode 100644 index 000000000000..6e6b03460a1a --- /dev/null +++ b/drivers/target/target_core_ua.h | |||
@@ -0,0 +1,36 @@ | |||
1 | #ifndef TARGET_CORE_UA_H | ||
2 | |||
3 | /* | ||
4 | * From spc4r17, Table D.1: ASC and ASCQ Assignement | ||
5 | */ | ||
6 | #define ASCQ_29H_POWER_ON_RESET_OR_BUS_DEVICE_RESET_OCCURED 0x00 | ||
7 | #define ASCQ_29H_POWER_ON_OCCURRED 0x01 | ||
8 | #define ASCQ_29H_SCSI_BUS_RESET_OCCURED 0x02 | ||
9 | #define ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED 0x03 | ||
10 | #define ASCQ_29H_DEVICE_INTERNAL_RESET 0x04 | ||
11 | #define ASCQ_29H_TRANSCEIVER_MODE_CHANGED_TO_SINGLE_ENDED 0x05 | ||
12 | #define ASCQ_29H_TRANSCEIVER_MODE_CHANGED_TO_LVD 0x06 | ||
13 | #define ASCQ_29H_NEXUS_LOSS_OCCURRED 0x07 | ||
14 | |||
15 | #define ASCQ_2AH_PARAMETERS_CHANGED 0x00 | ||
16 | #define ASCQ_2AH_MODE_PARAMETERS_CHANGED 0x01 | ||
17 | #define ASCQ_2AH_LOG_PARAMETERS_CHANGED 0x02 | ||
18 | #define ASCQ_2AH_RESERVATIONS_PREEMPTED 0x03 | ||
19 | #define ASCQ_2AH_RESERVATIONS_RELEASED 0x04 | ||
20 | #define ASCQ_2AH_REGISTRATIONS_PREEMPTED 0x05 | ||
21 | #define ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED 0x06 | ||
22 | #define ASCQ_2AH_IMPLICT_ASYMMETRIC_ACCESS_STATE_TRANSITION_FAILED 0x07 | ||
23 | #define ASCQ_2AH_PRIORITY_CHANGED 0x08 | ||
24 | |||
25 | #define ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS 0x09 | ||
26 | |||
27 | extern struct kmem_cache *se_ua_cache; | ||
28 | |||
29 | extern int core_scsi3_ua_check(struct se_cmd *, unsigned char *); | ||
30 | extern int core_scsi3_ua_allocate(struct se_node_acl *, u32, u8, u8); | ||
31 | extern void core_scsi3_ua_release_all(struct se_dev_entry *); | ||
32 | extern void core_scsi3_ua_for_check_condition(struct se_cmd *, u8 *, u8 *); | ||
33 | extern int core_scsi3_ua_clear_for_request_sense(struct se_cmd *, | ||
34 | u8 *, u8 *); | ||
35 | |||
36 | #endif /* TARGET_CORE_UA_H */ | ||
diff --git a/include/target/configfs_macros.h b/include/target/configfs_macros.h new file mode 100644 index 000000000000..7fe74608b437 --- /dev/null +++ b/include/target/configfs_macros.h | |||
@@ -0,0 +1,147 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8; -*- | ||
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | ||
3 | * | ||
4 | * configfs_macros.h - extends macros for configfs | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public | ||
8 | * License as published by the Free Software Foundation; either | ||
9 | * version 2 of the License, or (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | * General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public | ||
17 | * License along with this program; if not, write to the | ||
18 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
19 | * Boston, MA 021110-1307, USA. | ||
20 | * | ||
21 | * Based on sysfs: | ||
22 | * sysfs is Copyright (C) 2001, 2002, 2003 Patrick Mochel | ||
23 | * | ||
24 | * Based on kobject.h: | ||
25 | * Copyright (c) 2002-2003 Patrick Mochel | ||
26 | * Copyright (c) 2002-2003 Open Source Development Labs | ||
27 | * | ||
28 | * configfs Copyright (C) 2005 Oracle. All rights reserved. | ||
29 | * | ||
30 | * Added CONFIGFS_EATTR() macros from original configfs.h macros | ||
31 | * Copright (C) 2008-2009 Nicholas A. Bellinger <nab@linux-iscsi.org> | ||
32 | * | ||
33 | * Please read Documentation/filesystems/configfs.txt before using the | ||
34 | * configfs interface, ESPECIALLY the parts about reference counts and | ||
35 | * item destructors. | ||
36 | */ | ||
37 | |||
38 | #ifndef _CONFIGFS_MACROS_H_ | ||
39 | #define _CONFIGFS_MACROS_H_ | ||
40 | |||
41 | #include <linux/configfs.h> | ||
42 | |||
43 | /* | ||
44 | * Users often need to create attribute structures for their configurable | ||
45 | * attributes, containing a configfs_attribute member and function pointers | ||
46 | * for the show() and store() operations on that attribute. If they don't | ||
47 | * need anything else on the extended attribute structure, they can use | ||
48 | * this macro to define it. The argument _name isends up as | ||
49 | * 'struct _name_attribute, as well as names of to CONFIGFS_ATTR_OPS() below. | ||
50 | * The argument _item is the name of the structure containing the | ||
51 | * struct config_item or struct config_group structure members | ||
52 | */ | ||
53 | #define CONFIGFS_EATTR_STRUCT(_name, _item) \ | ||
54 | struct _name##_attribute { \ | ||
55 | struct configfs_attribute attr; \ | ||
56 | ssize_t (*show)(struct _item *, char *); \ | ||
57 | ssize_t (*store)(struct _item *, const char *, size_t); \ | ||
58 | } | ||
59 | |||
60 | /* | ||
61 | * With the extended attribute structure, users can use this macro | ||
62 | * (similar to sysfs' __ATTR) to make defining attributes easier. | ||
63 | * An example: | ||
64 | * #define MYITEM_EATTR(_name, _mode, _show, _store) \ | ||
65 | * struct myitem_attribute childless_attr_##_name = \ | ||
66 | * __CONFIGFS_EATTR(_name, _mode, _show, _store) | ||
67 | */ | ||
68 | #define __CONFIGFS_EATTR(_name, _mode, _show, _store) \ | ||
69 | { \ | ||
70 | .attr = { \ | ||
71 | .ca_name = __stringify(_name), \ | ||
72 | .ca_mode = _mode, \ | ||
73 | .ca_owner = THIS_MODULE, \ | ||
74 | }, \ | ||
75 | .show = _show, \ | ||
76 | .store = _store, \ | ||
77 | } | ||
78 | /* Here is a readonly version, only requiring a show() operation */ | ||
79 | #define __CONFIGFS_EATTR_RO(_name, _show) \ | ||
80 | { \ | ||
81 | .attr = { \ | ||
82 | .ca_name = __stringify(_name), \ | ||
83 | .ca_mode = 0444, \ | ||
84 | .ca_owner = THIS_MODULE, \ | ||
85 | }, \ | ||
86 | .show = _show, \ | ||
87 | } | ||
88 | |||
89 | /* | ||
90 | * With these extended attributes, the simple show_attribute() and | ||
91 | * store_attribute() operations need to call the show() and store() of the | ||
92 | * attributes. This is a common pattern, so we provide a macro to define | ||
93 | * them. The argument _name is the name of the attribute defined by | ||
94 | * CONFIGFS_ATTR_STRUCT(). The argument _item is the name of the structure | ||
95 | * containing the struct config_item or struct config_group structure member. | ||
96 | * The argument _item_member is the actual name of the struct config_* struct | ||
97 | * in your _item structure. Meaning my_structure->some_config_group. | ||
98 | * ^^_item^^^^^ ^^_item_member^^^ | ||
99 | * This macro expects the attributes to be named "struct <name>_attribute". | ||
100 | */ | ||
101 | #define CONFIGFS_EATTR_OPS_TO_FUNC(_name, _item, _item_member) \ | ||
102 | static struct _item *to_##_name(struct config_item *ci) \ | ||
103 | { \ | ||
104 | return (ci) ? container_of(to_config_group(ci), struct _item, \ | ||
105 | _item_member) : NULL; \ | ||
106 | } | ||
107 | |||
108 | #define CONFIGFS_EATTR_OPS_SHOW(_name, _item) \ | ||
109 | static ssize_t _name##_attr_show(struct config_item *item, \ | ||
110 | struct configfs_attribute *attr, \ | ||
111 | char *page) \ | ||
112 | { \ | ||
113 | struct _item *_item = to_##_name(item); \ | ||
114 | struct _name##_attribute * _name##_attr = \ | ||
115 | container_of(attr, struct _name##_attribute, attr); \ | ||
116 | ssize_t ret = 0; \ | ||
117 | \ | ||
118 | if (_name##_attr->show) \ | ||
119 | ret = _name##_attr->show(_item, page); \ | ||
120 | return ret; \ | ||
121 | } | ||
122 | |||
123 | #define CONFIGFS_EATTR_OPS_STORE(_name, _item) \ | ||
124 | static ssize_t _name##_attr_store(struct config_item *item, \ | ||
125 | struct configfs_attribute *attr, \ | ||
126 | const char *page, size_t count) \ | ||
127 | { \ | ||
128 | struct _item *_item = to_##_name(item); \ | ||
129 | struct _name##_attribute * _name##_attr = \ | ||
130 | container_of(attr, struct _name##_attribute, attr); \ | ||
131 | ssize_t ret = -EINVAL; \ | ||
132 | \ | ||
133 | if (_name##_attr->store) \ | ||
134 | ret = _name##_attr->store(_item, page, count); \ | ||
135 | return ret; \ | ||
136 | } | ||
137 | |||
138 | #define CONFIGFS_EATTR_OPS(_name, _item, _item_member) \ | ||
139 | CONFIGFS_EATTR_OPS_TO_FUNC(_name, _item, _item_member); \ | ||
140 | CONFIGFS_EATTR_OPS_SHOW(_name, _item); \ | ||
141 | CONFIGFS_EATTR_OPS_STORE(_name, _item); | ||
142 | |||
143 | #define CONFIGFS_EATTR_OPS_RO(_name, _item, _item_member) \ | ||
144 | CONFIGFS_EATTR_OPS_TO_FUNC(_name, _item, _item_member); \ | ||
145 | CONFIGFS_EATTR_OPS_SHOW(_name, _item); | ||
146 | |||
147 | #endif /* _CONFIGFS_MACROS_H_ */ | ||
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h new file mode 100644 index 000000000000..07fdfb6b9a9a --- /dev/null +++ b/include/target/target_core_base.h | |||
@@ -0,0 +1,937 @@ | |||
1 | #ifndef TARGET_CORE_BASE_H | ||
2 | #define TARGET_CORE_BASE_H | ||
3 | |||
4 | #include <linux/in.h> | ||
5 | #include <linux/configfs.h> | ||
6 | #include <linux/dma-mapping.h> | ||
7 | #include <linux/blkdev.h> | ||
8 | #include <scsi/scsi_cmnd.h> | ||
9 | #include <net/sock.h> | ||
10 | #include <net/tcp.h> | ||
11 | #include "target_core_mib.h" | ||
12 | |||
13 | #define TARGET_CORE_MOD_VERSION "v4.0.0-rc6" | ||
14 | #define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGABRT)) | ||
15 | |||
16 | /* Used by transport_generic_allocate_iovecs() */ | ||
17 | #define TRANSPORT_IOV_DATA_BUFFER 5 | ||
18 | /* Maximum Number of LUNs per Target Portal Group */ | ||
19 | #define TRANSPORT_MAX_LUNS_PER_TPG 256 | ||
20 | /* | ||
21 | * By default we use 32-byte CDBs in TCM Core and subsystem plugin code. | ||
22 | * | ||
23 | * Note that both include/scsi/scsi_cmnd.h:MAX_COMMAND_SIZE and | ||
24 | * include/linux/blkdev.h:BLOCK_MAX_CDB as of v2.6.36-rc4 still use | ||
25 | * 16-byte CDBs by default and require an extra allocation for | ||
26 | * 32-byte CDBs to becasue of legacy issues. | ||
27 | * | ||
28 | * Within TCM Core there are no such legacy limitiations, so we go ahead | ||
29 | * use 32-byte CDBs by default and use include/scsi/scsi.h:scsi_command_size() | ||
30 | * within all TCM Core and subsystem plugin code. | ||
31 | */ | ||
32 | #define TCM_MAX_COMMAND_SIZE 32 | ||
33 | /* | ||
34 | * From include/scsi/scsi_cmnd.h:SCSI_SENSE_BUFFERSIZE, currently | ||
35 | * defined 96, but the real limit is 252 (or 260 including the header) | ||
36 | */ | ||
37 | #define TRANSPORT_SENSE_BUFFER SCSI_SENSE_BUFFERSIZE | ||
38 | /* Used by transport_send_check_condition_and_sense() */ | ||
39 | #define SPC_SENSE_KEY_OFFSET 2 | ||
40 | #define SPC_ASC_KEY_OFFSET 12 | ||
41 | #define SPC_ASCQ_KEY_OFFSET 13 | ||
42 | #define TRANSPORT_IQN_LEN 224 | ||
43 | /* Used by target_core_store_alua_lu_gp() and target_core_alua_lu_gp_show_attr_members() */ | ||
44 | #define LU_GROUP_NAME_BUF 256 | ||
45 | /* Used by core_alua_store_tg_pt_gp_info() and target_core_alua_tg_pt_gp_show_attr_members() */ | ||
46 | #define TG_PT_GROUP_NAME_BUF 256 | ||
47 | /* Used to parse VPD into struct t10_vpd */ | ||
48 | #define VPD_TMP_BUF_SIZE 128 | ||
49 | /* Used by transport_generic_cmd_sequencer() */ | ||
50 | #define READ_BLOCK_LEN 6 | ||
51 | #define READ_CAP_LEN 8 | ||
52 | #define READ_POSITION_LEN 20 | ||
53 | #define INQUIRY_LEN 36 | ||
54 | /* Used by transport_get_inquiry_vpd_serial() */ | ||
55 | #define INQUIRY_VPD_SERIAL_LEN 254 | ||
56 | /* Used by transport_get_inquiry_vpd_device_ident() */ | ||
57 | #define INQUIRY_VPD_DEVICE_IDENTIFIER_LEN 254 | ||
58 | |||
59 | /* struct se_hba->hba_flags */ | ||
60 | enum hba_flags_table { | ||
61 | HBA_FLAGS_INTERNAL_USE = 0x01, | ||
62 | HBA_FLAGS_PSCSI_MODE = 0x02, | ||
63 | }; | ||
64 | |||
65 | /* struct se_lun->lun_status */ | ||
66 | enum transport_lun_status_table { | ||
67 | TRANSPORT_LUN_STATUS_FREE = 0, | ||
68 | TRANSPORT_LUN_STATUS_ACTIVE = 1, | ||
69 | }; | ||
70 | |||
71 | /* struct se_portal_group->se_tpg_type */ | ||
72 | enum transport_tpg_type_table { | ||
73 | TRANSPORT_TPG_TYPE_NORMAL = 0, | ||
74 | TRANSPORT_TPG_TYPE_DISCOVERY = 1, | ||
75 | }; | ||
76 | |||
77 | /* Used for generate timer flags */ | ||
78 | enum timer_flags_table { | ||
79 | TF_RUNNING = 0x01, | ||
80 | TF_STOP = 0x02, | ||
81 | }; | ||
82 | |||
83 | /* Special transport agnostic struct se_cmd->t_states */ | ||
84 | enum transport_state_table { | ||
85 | TRANSPORT_NO_STATE = 0, | ||
86 | TRANSPORT_NEW_CMD = 1, | ||
87 | TRANSPORT_DEFERRED_CMD = 2, | ||
88 | TRANSPORT_WRITE_PENDING = 3, | ||
89 | TRANSPORT_PROCESS_WRITE = 4, | ||
90 | TRANSPORT_PROCESSING = 5, | ||
91 | TRANSPORT_COMPLETE_OK = 6, | ||
92 | TRANSPORT_COMPLETE_FAILURE = 7, | ||
93 | TRANSPORT_COMPLETE_TIMEOUT = 8, | ||
94 | TRANSPORT_PROCESS_TMR = 9, | ||
95 | TRANSPORT_TMR_COMPLETE = 10, | ||
96 | TRANSPORT_ISTATE_PROCESSING = 11, | ||
97 | TRANSPORT_ISTATE_PROCESSED = 12, | ||
98 | TRANSPORT_KILL = 13, | ||
99 | TRANSPORT_REMOVE = 14, | ||
100 | TRANSPORT_FREE = 15, | ||
101 | TRANSPORT_NEW_CMD_MAP = 16, | ||
102 | }; | ||
103 | |||
104 | /* Used for struct se_cmd->se_cmd_flags */ | ||
105 | enum se_cmd_flags_table { | ||
106 | SCF_SUPPORTED_SAM_OPCODE = 0x00000001, | ||
107 | SCF_TRANSPORT_TASK_SENSE = 0x00000002, | ||
108 | SCF_EMULATED_TASK_SENSE = 0x00000004, | ||
109 | SCF_SCSI_DATA_SG_IO_CDB = 0x00000008, | ||
110 | SCF_SCSI_CONTROL_SG_IO_CDB = 0x00000010, | ||
111 | SCF_SCSI_CONTROL_NONSG_IO_CDB = 0x00000020, | ||
112 | SCF_SCSI_NON_DATA_CDB = 0x00000040, | ||
113 | SCF_SCSI_CDB_EXCEPTION = 0x00000080, | ||
114 | SCF_SCSI_RESERVATION_CONFLICT = 0x00000100, | ||
115 | SCF_CMD_PASSTHROUGH_NOALLOC = 0x00000200, | ||
116 | SCF_SE_CMD_FAILED = 0x00000400, | ||
117 | SCF_SE_LUN_CMD = 0x00000800, | ||
118 | SCF_SE_ALLOW_EOO = 0x00001000, | ||
119 | SCF_SE_DISABLE_ONLINE_CHECK = 0x00002000, | ||
120 | SCF_SENT_CHECK_CONDITION = 0x00004000, | ||
121 | SCF_OVERFLOW_BIT = 0x00008000, | ||
122 | SCF_UNDERFLOW_BIT = 0x00010000, | ||
123 | SCF_SENT_DELAYED_TAS = 0x00020000, | ||
124 | SCF_ALUA_NON_OPTIMIZED = 0x00040000, | ||
125 | SCF_DELAYED_CMD_FROM_SAM_ATTR = 0x00080000, | ||
126 | SCF_PASSTHROUGH_SG_TO_MEM = 0x00100000, | ||
127 | SCF_PASSTHROUGH_CONTIG_TO_SG = 0x00200000, | ||
128 | SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00400000, | ||
129 | SCF_EMULATE_SYNC_CACHE = 0x00800000, | ||
130 | SCF_EMULATE_CDB_ASYNC = 0x01000000, | ||
131 | SCF_EMULATE_SYNC_UNMAP = 0x02000000 | ||
132 | }; | ||
133 | |||
134 | /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */ | ||
135 | enum transport_lunflags_table { | ||
136 | TRANSPORT_LUNFLAGS_NO_ACCESS = 0x00, | ||
137 | TRANSPORT_LUNFLAGS_INITIATOR_ACCESS = 0x01, | ||
138 | TRANSPORT_LUNFLAGS_READ_ONLY = 0x02, | ||
139 | TRANSPORT_LUNFLAGS_READ_WRITE = 0x04, | ||
140 | }; | ||
141 | |||
142 | /* struct se_device->dev_status */ | ||
143 | enum transport_device_status_table { | ||
144 | TRANSPORT_DEVICE_ACTIVATED = 0x01, | ||
145 | TRANSPORT_DEVICE_DEACTIVATED = 0x02, | ||
146 | TRANSPORT_DEVICE_QUEUE_FULL = 0x04, | ||
147 | TRANSPORT_DEVICE_SHUTDOWN = 0x08, | ||
148 | TRANSPORT_DEVICE_OFFLINE_ACTIVATED = 0x10, | ||
149 | TRANSPORT_DEVICE_OFFLINE_DEACTIVATED = 0x20, | ||
150 | }; | ||
151 | |||
152 | /* | ||
153 | * Used by transport_send_check_condition_and_sense() and se_cmd->scsi_sense_reason | ||
154 | * to signal which ASC/ASCQ sense payload should be built. | ||
155 | */ | ||
156 | enum tcm_sense_reason_table { | ||
157 | TCM_NON_EXISTENT_LUN = 0x01, | ||
158 | TCM_UNSUPPORTED_SCSI_OPCODE = 0x02, | ||
159 | TCM_INCORRECT_AMOUNT_OF_DATA = 0x03, | ||
160 | TCM_UNEXPECTED_UNSOLICITED_DATA = 0x04, | ||
161 | TCM_SERVICE_CRC_ERROR = 0x05, | ||
162 | TCM_SNACK_REJECTED = 0x06, | ||
163 | TCM_SECTOR_COUNT_TOO_MANY = 0x07, | ||
164 | TCM_INVALID_CDB_FIELD = 0x08, | ||
165 | TCM_INVALID_PARAMETER_LIST = 0x09, | ||
166 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE = 0x0a, | ||
167 | TCM_UNKNOWN_MODE_PAGE = 0x0b, | ||
168 | TCM_WRITE_PROTECTED = 0x0c, | ||
169 | TCM_CHECK_CONDITION_ABORT_CMD = 0x0d, | ||
170 | TCM_CHECK_CONDITION_UNIT_ATTENTION = 0x0e, | ||
171 | TCM_CHECK_CONDITION_NOT_READY = 0x0f, | ||
172 | }; | ||
173 | |||
174 | struct se_obj { | ||
175 | atomic_t obj_access_count; | ||
176 | } ____cacheline_aligned; | ||
177 | |||
178 | /* | ||
179 | * Used by TCM Core internally to signal if ALUA emulation is enabled or | ||
180 | * disabled, or running in with TCM/pSCSI passthrough mode | ||
181 | */ | ||
182 | typedef enum { | ||
183 | SPC_ALUA_PASSTHROUGH, | ||
184 | SPC2_ALUA_DISABLED, | ||
185 | SPC3_ALUA_EMULATED | ||
186 | } t10_alua_index_t; | ||
187 | |||
188 | /* | ||
189 | * Used by TCM Core internally to signal if SAM Task Attribute emulation | ||
190 | * is enabled or disabled, or running in with TCM/pSCSI passthrough mode | ||
191 | */ | ||
192 | typedef enum { | ||
193 | SAM_TASK_ATTR_PASSTHROUGH, | ||
194 | SAM_TASK_ATTR_UNTAGGED, | ||
195 | SAM_TASK_ATTR_EMULATED | ||
196 | } t10_task_attr_index_t; | ||
197 | |||
198 | struct se_cmd; | ||
199 | |||
200 | struct t10_alua { | ||
201 | t10_alua_index_t alua_type; | ||
202 | /* ALUA Target Port Group ID */ | ||
203 | u16 alua_tg_pt_gps_counter; | ||
204 | u32 alua_tg_pt_gps_count; | ||
205 | spinlock_t tg_pt_gps_lock; | ||
206 | struct se_subsystem_dev *t10_sub_dev; | ||
207 | /* Used for default ALUA Target Port Group */ | ||
208 | struct t10_alua_tg_pt_gp *default_tg_pt_gp; | ||
209 | /* Used for default ALUA Target Port Group ConfigFS group */ | ||
210 | struct config_group alua_tg_pt_gps_group; | ||
211 | int (*alua_state_check)(struct se_cmd *, unsigned char *, u8 *); | ||
212 | struct list_head tg_pt_gps_list; | ||
213 | } ____cacheline_aligned; | ||
214 | |||
215 | struct t10_alua_lu_gp { | ||
216 | u16 lu_gp_id; | ||
217 | int lu_gp_valid_id; | ||
218 | u32 lu_gp_members; | ||
219 | atomic_t lu_gp_shutdown; | ||
220 | atomic_t lu_gp_ref_cnt; | ||
221 | spinlock_t lu_gp_lock; | ||
222 | struct config_group lu_gp_group; | ||
223 | struct list_head lu_gp_list; | ||
224 | struct list_head lu_gp_mem_list; | ||
225 | } ____cacheline_aligned; | ||
226 | |||
227 | struct t10_alua_lu_gp_member { | ||
228 | int lu_gp_assoc:1; | ||
229 | atomic_t lu_gp_mem_ref_cnt; | ||
230 | spinlock_t lu_gp_mem_lock; | ||
231 | struct t10_alua_lu_gp *lu_gp; | ||
232 | struct se_device *lu_gp_mem_dev; | ||
233 | struct list_head lu_gp_mem_list; | ||
234 | } ____cacheline_aligned; | ||
235 | |||
236 | struct t10_alua_tg_pt_gp { | ||
237 | u16 tg_pt_gp_id; | ||
238 | int tg_pt_gp_valid_id; | ||
239 | int tg_pt_gp_alua_access_status; | ||
240 | int tg_pt_gp_alua_access_type; | ||
241 | int tg_pt_gp_nonop_delay_msecs; | ||
242 | int tg_pt_gp_trans_delay_msecs; | ||
243 | int tg_pt_gp_pref; | ||
244 | int tg_pt_gp_write_metadata; | ||
245 | /* Used by struct t10_alua_tg_pt_gp->tg_pt_gp_md_buf_len */ | ||
246 | #define ALUA_MD_BUF_LEN 1024 | ||
247 | u32 tg_pt_gp_md_buf_len; | ||
248 | u32 tg_pt_gp_members; | ||
249 | atomic_t tg_pt_gp_alua_access_state; | ||
250 | atomic_t tg_pt_gp_ref_cnt; | ||
251 | spinlock_t tg_pt_gp_lock; | ||
252 | struct mutex tg_pt_gp_md_mutex; | ||
253 | struct se_subsystem_dev *tg_pt_gp_su_dev; | ||
254 | struct config_group tg_pt_gp_group; | ||
255 | struct list_head tg_pt_gp_list; | ||
256 | struct list_head tg_pt_gp_mem_list; | ||
257 | } ____cacheline_aligned; | ||
258 | |||
259 | struct t10_alua_tg_pt_gp_member { | ||
260 | int tg_pt_gp_assoc:1; | ||
261 | atomic_t tg_pt_gp_mem_ref_cnt; | ||
262 | spinlock_t tg_pt_gp_mem_lock; | ||
263 | struct t10_alua_tg_pt_gp *tg_pt_gp; | ||
264 | struct se_port *tg_pt; | ||
265 | struct list_head tg_pt_gp_mem_list; | ||
266 | } ____cacheline_aligned; | ||
267 | |||
268 | struct t10_vpd { | ||
269 | unsigned char device_identifier[INQUIRY_VPD_DEVICE_IDENTIFIER_LEN]; | ||
270 | int protocol_identifier_set; | ||
271 | u32 protocol_identifier; | ||
272 | u32 device_identifier_code_set; | ||
273 | u32 association; | ||
274 | u32 device_identifier_type; | ||
275 | struct list_head vpd_list; | ||
276 | } ____cacheline_aligned; | ||
277 | |||
278 | struct t10_wwn { | ||
279 | unsigned char vendor[8]; | ||
280 | unsigned char model[16]; | ||
281 | unsigned char revision[4]; | ||
282 | unsigned char unit_serial[INQUIRY_VPD_SERIAL_LEN]; | ||
283 | spinlock_t t10_vpd_lock; | ||
284 | struct se_subsystem_dev *t10_sub_dev; | ||
285 | struct config_group t10_wwn_group; | ||
286 | struct list_head t10_vpd_list; | ||
287 | } ____cacheline_aligned; | ||
288 | |||
289 | |||
290 | /* | ||
291 | * Used by TCM Core internally to signal if >= SPC-3 peristent reservations | ||
292 | * emulation is enabled or disabled, or running in with TCM/pSCSI passthrough | ||
293 | * mode | ||
294 | */ | ||
295 | typedef enum { | ||
296 | SPC_PASSTHROUGH, | ||
297 | SPC2_RESERVATIONS, | ||
298 | SPC3_PERSISTENT_RESERVATIONS | ||
299 | } t10_reservations_index_t; | ||
300 | |||
301 | struct t10_pr_registration { | ||
302 | /* Used for fabrics that contain WWN+ISID */ | ||
303 | #define PR_REG_ISID_LEN 16 | ||
304 | /* PR_REG_ISID_LEN + ',i,0x' */ | ||
305 | #define PR_REG_ISID_ID_LEN (PR_REG_ISID_LEN + 5) | ||
306 | char pr_reg_isid[PR_REG_ISID_LEN]; | ||
307 | /* Used during APTPL metadata reading */ | ||
308 | #define PR_APTPL_MAX_IPORT_LEN 256 | ||
309 | unsigned char pr_iport[PR_APTPL_MAX_IPORT_LEN]; | ||
310 | /* Used during APTPL metadata reading */ | ||
311 | #define PR_APTPL_MAX_TPORT_LEN 256 | ||
312 | unsigned char pr_tport[PR_APTPL_MAX_TPORT_LEN]; | ||
313 | /* For writing out live meta data */ | ||
314 | unsigned char *pr_aptpl_buf; | ||
315 | u16 pr_aptpl_rpti; | ||
316 | u16 pr_reg_tpgt; | ||
317 | /* Reservation effects all target ports */ | ||
318 | int pr_reg_all_tg_pt; | ||
319 | /* Activate Persistence across Target Power Loss */ | ||
320 | int pr_reg_aptpl; | ||
321 | int pr_res_holder; | ||
322 | int pr_res_type; | ||
323 | int pr_res_scope; | ||
324 | /* Used for fabric initiator WWPNs using a ISID */ | ||
325 | int isid_present_at_reg:1; | ||
326 | u32 pr_res_mapped_lun; | ||
327 | u32 pr_aptpl_target_lun; | ||
328 | u32 pr_res_generation; | ||
329 | u64 pr_reg_bin_isid; | ||
330 | u64 pr_res_key; | ||
331 | atomic_t pr_res_holders; | ||
332 | struct se_node_acl *pr_reg_nacl; | ||
333 | struct se_dev_entry *pr_reg_deve; | ||
334 | struct se_lun *pr_reg_tg_pt_lun; | ||
335 | struct list_head pr_reg_list; | ||
336 | struct list_head pr_reg_abort_list; | ||
337 | struct list_head pr_reg_aptpl_list; | ||
338 | struct list_head pr_reg_atp_list; | ||
339 | struct list_head pr_reg_atp_mem_list; | ||
340 | } ____cacheline_aligned; | ||
341 | |||
342 | /* | ||
343 | * This set of function pointer ops is set based upon SPC3_PERSISTENT_RESERVATIONS, | ||
344 | * SPC2_RESERVATIONS or SPC_PASSTHROUGH in drivers/target/target_core_pr.c: | ||
345 | * core_setup_reservations() | ||
346 | */ | ||
347 | struct t10_reservation_ops { | ||
348 | int (*t10_reservation_check)(struct se_cmd *, u32 *); | ||
349 | int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32); | ||
350 | int (*t10_pr_register)(struct se_cmd *); | ||
351 | int (*t10_pr_clear)(struct se_cmd *); | ||
352 | }; | ||
353 | |||
354 | struct t10_reservation_template { | ||
355 | /* Reservation effects all target ports */ | ||
356 | int pr_all_tg_pt; | ||
357 | /* Activate Persistence across Target Power Loss enabled | ||
358 | * for SCSI device */ | ||
359 | int pr_aptpl_active; | ||
360 | /* Used by struct t10_reservation_template->pr_aptpl_buf_len */ | ||
361 | #define PR_APTPL_BUF_LEN 8192 | ||
362 | u32 pr_aptpl_buf_len; | ||
363 | u32 pr_generation; | ||
364 | t10_reservations_index_t res_type; | ||
365 | spinlock_t registration_lock; | ||
366 | spinlock_t aptpl_reg_lock; | ||
367 | /* | ||
368 | * This will always be set by one individual I_T Nexus. | ||
369 | * However with all_tg_pt=1, other I_T Nexus from the | ||
370 | * same initiator can access PR reg/res info on a different | ||
371 | * target port. | ||
372 | * | ||
373 | * There is also the 'All Registrants' case, where there is | ||
374 | * a single *pr_res_holder of the reservation, but all | ||
375 | * registrations are considered reservation holders. | ||
376 | */ | ||
377 | struct se_node_acl *pr_res_holder; | ||
378 | struct list_head registration_list; | ||
379 | struct list_head aptpl_reg_list; | ||
380 | struct t10_reservation_ops pr_ops; | ||
381 | } ____cacheline_aligned; | ||
382 | |||
383 | struct se_queue_req { | ||
384 | int state; | ||
385 | void *cmd; | ||
386 | struct list_head qr_list; | ||
387 | } ____cacheline_aligned; | ||
388 | |||
389 | struct se_queue_obj { | ||
390 | atomic_t queue_cnt; | ||
391 | spinlock_t cmd_queue_lock; | ||
392 | struct list_head qobj_list; | ||
393 | wait_queue_head_t thread_wq; | ||
394 | } ____cacheline_aligned; | ||
395 | |||
396 | /* | ||
397 | * Used one per struct se_cmd to hold all extra struct se_task | ||
398 | * metadata. This structure is setup and allocated in | ||
399 | * drivers/target/target_core_transport.c:__transport_alloc_se_cmd() | ||
400 | */ | ||
401 | struct se_transport_task { | ||
402 | unsigned char *t_task_cdb; | ||
403 | unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE]; | ||
404 | unsigned long long t_task_lba; | ||
405 | int t_tasks_failed; | ||
406 | int t_tasks_fua; | ||
407 | int t_tasks_bidi:1; | ||
408 | u32 t_task_cdbs; | ||
409 | u32 t_tasks_check; | ||
410 | u32 t_tasks_no; | ||
411 | u32 t_tasks_sectors; | ||
412 | u32 t_tasks_se_num; | ||
413 | u32 t_tasks_se_bidi_num; | ||
414 | u32 t_tasks_sg_chained_no; | ||
415 | atomic_t t_fe_count; | ||
416 | atomic_t t_se_count; | ||
417 | atomic_t t_task_cdbs_left; | ||
418 | atomic_t t_task_cdbs_ex_left; | ||
419 | atomic_t t_task_cdbs_timeout_left; | ||
420 | atomic_t t_task_cdbs_sent; | ||
421 | atomic_t t_transport_aborted; | ||
422 | atomic_t t_transport_active; | ||
423 | atomic_t t_transport_complete; | ||
424 | atomic_t t_transport_queue_active; | ||
425 | atomic_t t_transport_sent; | ||
426 | atomic_t t_transport_stop; | ||
427 | atomic_t t_transport_timeout; | ||
428 | atomic_t transport_dev_active; | ||
429 | atomic_t transport_lun_active; | ||
430 | atomic_t transport_lun_fe_stop; | ||
431 | atomic_t transport_lun_stop; | ||
432 | spinlock_t t_state_lock; | ||
433 | struct completion t_transport_stop_comp; | ||
434 | struct completion transport_lun_fe_stop_comp; | ||
435 | struct completion transport_lun_stop_comp; | ||
436 | struct scatterlist *t_tasks_sg_chained; | ||
437 | struct scatterlist t_tasks_sg_bounce; | ||
438 | void *t_task_buf; | ||
439 | /* | ||
440 | * Used for pre-registered fabric SGL passthrough WRITE and READ | ||
441 | * with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop | ||
442 | * and other HW target mode fabric modules. | ||
443 | */ | ||
444 | struct scatterlist *t_task_pt_sgl; | ||
445 | struct list_head *t_mem_list; | ||
446 | /* Used for BIDI READ */ | ||
447 | struct list_head *t_mem_bidi_list; | ||
448 | struct list_head t_task_list; | ||
449 | } ____cacheline_aligned; | ||
450 | |||
451 | struct se_task { | ||
452 | unsigned char task_sense; | ||
453 | struct scatterlist *task_sg; | ||
454 | struct scatterlist *task_sg_bidi; | ||
455 | u8 task_scsi_status; | ||
456 | u8 task_flags; | ||
457 | int task_error_status; | ||
458 | int task_state_flags; | ||
459 | int task_padded_sg:1; | ||
460 | unsigned long long task_lba; | ||
461 | u32 task_no; | ||
462 | u32 task_sectors; | ||
463 | u32 task_size; | ||
464 | u32 task_sg_num; | ||
465 | u32 task_sg_offset; | ||
466 | enum dma_data_direction task_data_direction; | ||
467 | struct se_cmd *task_se_cmd; | ||
468 | struct se_device *se_dev; | ||
469 | struct completion task_stop_comp; | ||
470 | atomic_t task_active; | ||
471 | atomic_t task_execute_queue; | ||
472 | atomic_t task_timeout; | ||
473 | atomic_t task_sent; | ||
474 | atomic_t task_stop; | ||
475 | atomic_t task_state_active; | ||
476 | struct timer_list task_timer; | ||
477 | struct se_device *se_obj_ptr; | ||
478 | struct list_head t_list; | ||
479 | struct list_head t_execute_list; | ||
480 | struct list_head t_state_list; | ||
481 | } ____cacheline_aligned; | ||
482 | |||
483 | #define TASK_CMD(task) ((struct se_cmd *)task->task_se_cmd) | ||
484 | #define TASK_DEV(task) ((struct se_device *)task->se_dev) | ||
485 | |||
486 | struct se_cmd { | ||
487 | /* SAM response code being sent to initiator */ | ||
488 | u8 scsi_status; | ||
489 | u8 scsi_asc; | ||
490 | u8 scsi_ascq; | ||
491 | u8 scsi_sense_reason; | ||
492 | u16 scsi_sense_length; | ||
493 | /* Delay for ALUA Active/NonOptimized state access in milliseconds */ | ||
494 | int alua_nonop_delay; | ||
495 | /* See include/linux/dma-mapping.h */ | ||
496 | enum dma_data_direction data_direction; | ||
497 | /* For SAM Task Attribute */ | ||
498 | int sam_task_attr; | ||
499 | /* Transport protocol dependent state, see transport_state_table */ | ||
500 | enum transport_state_table t_state; | ||
501 | /* Transport protocol dependent state for out of order CmdSNs */ | ||
502 | int deferred_t_state; | ||
503 | /* Transport specific error status */ | ||
504 | int transport_error_status; | ||
505 | /* See se_cmd_flags_table */ | ||
506 | u32 se_cmd_flags; | ||
507 | u32 se_ordered_id; | ||
508 | /* Total size in bytes associated with command */ | ||
509 | u32 data_length; | ||
510 | /* SCSI Presented Data Transfer Length */ | ||
511 | u32 cmd_spdtl; | ||
512 | u32 residual_count; | ||
513 | u32 orig_fe_lun; | ||
514 | /* Persistent Reservation key */ | ||
515 | u64 pr_res_key; | ||
516 | atomic_t transport_sent; | ||
517 | /* Used for sense data */ | ||
518 | void *sense_buffer; | ||
519 | struct list_head se_delayed_list; | ||
520 | struct list_head se_ordered_list; | ||
521 | struct list_head se_lun_list; | ||
522 | struct se_device *se_dev; | ||
523 | struct se_dev_entry *se_deve; | ||
524 | struct se_device *se_obj_ptr; | ||
525 | struct se_device *se_orig_obj_ptr; | ||
526 | struct se_lun *se_lun; | ||
527 | /* Only used for internal passthrough and legacy TCM fabric modules */ | ||
528 | struct se_session *se_sess; | ||
529 | struct se_tmr_req *se_tmr_req; | ||
530 | /* t_task is setup to t_task_backstore in transport_init_se_cmd() */ | ||
531 | struct se_transport_task *t_task; | ||
532 | struct se_transport_task t_task_backstore; | ||
533 | struct target_core_fabric_ops *se_tfo; | ||
534 | int (*transport_emulate_cdb)(struct se_cmd *); | ||
535 | void (*transport_split_cdb)(unsigned long long, u32 *, unsigned char *); | ||
536 | void (*transport_wait_for_tasks)(struct se_cmd *, int, int); | ||
537 | void (*transport_complete_callback)(struct se_cmd *); | ||
538 | } ____cacheline_aligned; | ||
539 | |||
540 | #define T_TASK(cmd) ((struct se_transport_task *)(cmd->t_task)) | ||
541 | #define CMD_TFO(cmd) ((struct target_core_fabric_ops *)cmd->se_tfo) | ||
542 | |||
543 | struct se_tmr_req { | ||
544 | /* Task Management function to be preformed */ | ||
545 | u8 function; | ||
546 | /* Task Management response to send */ | ||
547 | u8 response; | ||
548 | int call_transport; | ||
549 | /* Reference to ITT that Task Mgmt should be preformed */ | ||
550 | u32 ref_task_tag; | ||
551 | /* 64-bit encoded SAM LUN from $FABRIC_MOD TMR header */ | ||
552 | u64 ref_task_lun; | ||
553 | void *fabric_tmr_ptr; | ||
554 | struct se_cmd *task_cmd; | ||
555 | struct se_cmd *ref_cmd; | ||
556 | struct se_device *tmr_dev; | ||
557 | struct se_lun *tmr_lun; | ||
558 | struct list_head tmr_list; | ||
559 | } ____cacheline_aligned; | ||
560 | |||
561 | struct se_ua { | ||
562 | u8 ua_asc; | ||
563 | u8 ua_ascq; | ||
564 | struct se_node_acl *ua_nacl; | ||
565 | struct list_head ua_dev_list; | ||
566 | struct list_head ua_nacl_list; | ||
567 | } ____cacheline_aligned; | ||
568 | |||
569 | struct se_node_acl { | ||
570 | char initiatorname[TRANSPORT_IQN_LEN]; | ||
571 | /* Used to signal demo mode created ACL, disabled by default */ | ||
572 | int dynamic_node_acl:1; | ||
573 | u32 queue_depth; | ||
574 | u32 acl_index; | ||
575 | u64 num_cmds; | ||
576 | u64 read_bytes; | ||
577 | u64 write_bytes; | ||
578 | spinlock_t stats_lock; | ||
579 | /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ | ||
580 | atomic_t acl_pr_ref_count; | ||
581 | /* Used for MIB access */ | ||
582 | atomic_t mib_ref_count; | ||
583 | struct se_dev_entry *device_list; | ||
584 | struct se_session *nacl_sess; | ||
585 | struct se_portal_group *se_tpg; | ||
586 | spinlock_t device_list_lock; | ||
587 | spinlock_t nacl_sess_lock; | ||
588 | struct config_group acl_group; | ||
589 | struct config_group acl_attrib_group; | ||
590 | struct config_group acl_auth_group; | ||
591 | struct config_group acl_param_group; | ||
592 | struct config_group *acl_default_groups[4]; | ||
593 | struct list_head acl_list; | ||
594 | struct list_head acl_sess_list; | ||
595 | } ____cacheline_aligned; | ||
596 | |||
597 | struct se_session { | ||
598 | /* Used for MIB access */ | ||
599 | atomic_t mib_ref_count; | ||
600 | u64 sess_bin_isid; | ||
601 | struct se_node_acl *se_node_acl; | ||
602 | struct se_portal_group *se_tpg; | ||
603 | void *fabric_sess_ptr; | ||
604 | struct list_head sess_list; | ||
605 | struct list_head sess_acl_list; | ||
606 | } ____cacheline_aligned; | ||
607 | |||
608 | #define SE_SESS(cmd) ((struct se_session *)(cmd)->se_sess) | ||
609 | #define SE_NODE_ACL(sess) ((struct se_node_acl *)(sess)->se_node_acl) | ||
610 | |||
611 | struct se_device; | ||
612 | struct se_transform_info; | ||
613 | struct scatterlist; | ||
614 | |||
615 | struct se_lun_acl { | ||
616 | char initiatorname[TRANSPORT_IQN_LEN]; | ||
617 | u32 mapped_lun; | ||
618 | struct se_node_acl *se_lun_nacl; | ||
619 | struct se_lun *se_lun; | ||
620 | struct list_head lacl_list; | ||
621 | struct config_group se_lun_group; | ||
622 | } ____cacheline_aligned; | ||
623 | |||
624 | struct se_dev_entry { | ||
625 | int def_pr_registered:1; | ||
626 | /* See transport_lunflags_table */ | ||
627 | u32 lun_flags; | ||
628 | u32 deve_cmds; | ||
629 | u32 mapped_lun; | ||
630 | u32 average_bytes; | ||
631 | u32 last_byte_count; | ||
632 | u32 total_cmds; | ||
633 | u32 total_bytes; | ||
634 | u64 pr_res_key; | ||
635 | u64 creation_time; | ||
636 | u32 attach_count; | ||
637 | u64 read_bytes; | ||
638 | u64 write_bytes; | ||
639 | atomic_t ua_count; | ||
640 | /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ | ||
641 | atomic_t pr_ref_count; | ||
642 | struct se_lun_acl *se_lun_acl; | ||
643 | spinlock_t ua_lock; | ||
644 | struct se_lun *se_lun; | ||
645 | struct list_head alua_port_list; | ||
646 | struct list_head ua_list; | ||
647 | } ____cacheline_aligned; | ||
648 | |||
649 | struct se_dev_limits { | ||
650 | /* Max supported HW queue depth */ | ||
651 | u32 hw_queue_depth; | ||
652 | /* Max supported virtual queue depth */ | ||
653 | u32 queue_depth; | ||
654 | /* From include/linux/blkdev.h for the other HW/SW limits. */ | ||
655 | struct queue_limits limits; | ||
656 | } ____cacheline_aligned; | ||
657 | |||
658 | struct se_dev_attrib { | ||
659 | int emulate_dpo; | ||
660 | int emulate_fua_write; | ||
661 | int emulate_fua_read; | ||
662 | int emulate_write_cache; | ||
663 | int emulate_ua_intlck_ctrl; | ||
664 | int emulate_tas; | ||
665 | int emulate_tpu; | ||
666 | int emulate_tpws; | ||
667 | int emulate_reservations; | ||
668 | int emulate_alua; | ||
669 | int enforce_pr_isids; | ||
670 | u32 hw_block_size; | ||
671 | u32 block_size; | ||
672 | u32 hw_max_sectors; | ||
673 | u32 max_sectors; | ||
674 | u32 optimal_sectors; | ||
675 | u32 hw_queue_depth; | ||
676 | u32 queue_depth; | ||
677 | u32 task_timeout; | ||
678 | u32 max_unmap_lba_count; | ||
679 | u32 max_unmap_block_desc_count; | ||
680 | u32 unmap_granularity; | ||
681 | u32 unmap_granularity_alignment; | ||
682 | struct se_subsystem_dev *da_sub_dev; | ||
683 | struct config_group da_group; | ||
684 | } ____cacheline_aligned; | ||
685 | |||
686 | struct se_subsystem_dev { | ||
687 | /* Used for struct se_subsystem_dev-->se_dev_alias, must be less than PAGE_SIZE */ | ||
688 | #define SE_DEV_ALIAS_LEN 512 | ||
689 | unsigned char se_dev_alias[SE_DEV_ALIAS_LEN]; | ||
690 | /* Used for struct se_subsystem_dev->se_dev_udev_path[], must be less than PAGE_SIZE */ | ||
691 | #define SE_UDEV_PATH_LEN 512 | ||
692 | unsigned char se_dev_udev_path[SE_UDEV_PATH_LEN]; | ||
693 | u32 su_dev_flags; | ||
694 | struct se_hba *se_dev_hba; | ||
695 | struct se_device *se_dev_ptr; | ||
696 | struct se_dev_attrib se_dev_attrib; | ||
697 | /* T10 Asymmetric Logical Unit Assignment for Target Ports */ | ||
698 | struct t10_alua t10_alua; | ||
699 | /* T10 Inquiry and VPD WWN Information */ | ||
700 | struct t10_wwn t10_wwn; | ||
701 | /* T10 SPC-2 + SPC-3 Reservations */ | ||
702 | struct t10_reservation_template t10_reservation; | ||
703 | spinlock_t se_dev_lock; | ||
704 | void *se_dev_su_ptr; | ||
705 | struct list_head g_se_dev_list; | ||
706 | struct config_group se_dev_group; | ||
707 | /* For T10 Reservations */ | ||
708 | struct config_group se_dev_pr_group; | ||
709 | } ____cacheline_aligned; | ||
710 | |||
711 | #define T10_ALUA(su_dev) (&(su_dev)->t10_alua) | ||
712 | #define T10_RES(su_dev) (&(su_dev)->t10_reservation) | ||
713 | #define T10_PR_OPS(su_dev) (&(su_dev)->t10_reservation.pr_ops) | ||
714 | |||
715 | struct se_device { | ||
716 | /* Set to 1 if thread is NOT sleeping on thread_sem */ | ||
717 | u8 thread_active; | ||
718 | u8 dev_status_timer_flags; | ||
719 | /* RELATIVE TARGET PORT IDENTIFER Counter */ | ||
720 | u16 dev_rpti_counter; | ||
721 | /* Used for SAM Task Attribute ordering */ | ||
722 | u32 dev_cur_ordered_id; | ||
723 | u32 dev_flags; | ||
724 | u32 dev_port_count; | ||
725 | /* See transport_device_status_table */ | ||
726 | u32 dev_status; | ||
727 | u32 dev_tcq_window_closed; | ||
728 | /* Physical device queue depth */ | ||
729 | u32 queue_depth; | ||
730 | /* Used for SPC-2 reservations enforce of ISIDs */ | ||
731 | u64 dev_res_bin_isid; | ||
732 | t10_task_attr_index_t dev_task_attr_type; | ||
733 | /* Pointer to transport specific device structure */ | ||
734 | void *dev_ptr; | ||
735 | u32 dev_index; | ||
736 | u64 creation_time; | ||
737 | u32 num_resets; | ||
738 | u64 num_cmds; | ||
739 | u64 read_bytes; | ||
740 | u64 write_bytes; | ||
741 | spinlock_t stats_lock; | ||
742 | /* Active commands on this virtual SE device */ | ||
743 | atomic_t active_cmds; | ||
744 | atomic_t simple_cmds; | ||
745 | atomic_t depth_left; | ||
746 | atomic_t dev_ordered_id; | ||
747 | atomic_t dev_tur_active; | ||
748 | atomic_t execute_tasks; | ||
749 | atomic_t dev_status_thr_count; | ||
750 | atomic_t dev_hoq_count; | ||
751 | atomic_t dev_ordered_sync; | ||
752 | struct se_obj dev_obj; | ||
753 | struct se_obj dev_access_obj; | ||
754 | struct se_obj dev_export_obj; | ||
755 | struct se_queue_obj *dev_queue_obj; | ||
756 | struct se_queue_obj *dev_status_queue_obj; | ||
757 | spinlock_t delayed_cmd_lock; | ||
758 | spinlock_t ordered_cmd_lock; | ||
759 | spinlock_t execute_task_lock; | ||
760 | spinlock_t state_task_lock; | ||
761 | spinlock_t dev_alua_lock; | ||
762 | spinlock_t dev_reservation_lock; | ||
763 | spinlock_t dev_state_lock; | ||
764 | spinlock_t dev_status_lock; | ||
765 | spinlock_t dev_status_thr_lock; | ||
766 | spinlock_t se_port_lock; | ||
767 | spinlock_t se_tmr_lock; | ||
768 | /* Used for legacy SPC-2 reservationsa */ | ||
769 | struct se_node_acl *dev_reserved_node_acl; | ||
770 | /* Used for ALUA Logical Unit Group membership */ | ||
771 | struct t10_alua_lu_gp_member *dev_alua_lu_gp_mem; | ||
772 | /* Used for SPC-3 Persistent Reservations */ | ||
773 | struct t10_pr_registration *dev_pr_res_holder; | ||
774 | struct list_head dev_sep_list; | ||
775 | struct list_head dev_tmr_list; | ||
776 | struct timer_list dev_status_timer; | ||
777 | /* Pointer to descriptor for processing thread */ | ||
778 | struct task_struct *process_thread; | ||
779 | pid_t process_thread_pid; | ||
780 | struct task_struct *dev_mgmt_thread; | ||
781 | struct list_head delayed_cmd_list; | ||
782 | struct list_head ordered_cmd_list; | ||
783 | struct list_head execute_task_list; | ||
784 | struct list_head state_task_list; | ||
785 | /* Pointer to associated SE HBA */ | ||
786 | struct se_hba *se_hba; | ||
787 | struct se_subsystem_dev *se_sub_dev; | ||
788 | /* Pointer to template of function pointers for transport */ | ||
789 | struct se_subsystem_api *transport; | ||
790 | /* Linked list for struct se_hba struct se_device list */ | ||
791 | struct list_head dev_list; | ||
792 | /* Linked list for struct se_global->g_se_dev_list */ | ||
793 | struct list_head g_se_dev_list; | ||
794 | } ____cacheline_aligned; | ||
795 | |||
796 | #define SE_DEV(cmd) ((struct se_device *)(cmd)->se_lun->lun_se_dev) | ||
797 | #define SU_DEV(dev) ((struct se_subsystem_dev *)(dev)->se_sub_dev) | ||
798 | #define DEV_ATTRIB(dev) (&(dev)->se_sub_dev->se_dev_attrib) | ||
799 | #define DEV_T10_WWN(dev) (&(dev)->se_sub_dev->t10_wwn) | ||
800 | |||
801 | struct se_hba { | ||
802 | u16 hba_tpgt; | ||
803 | u32 hba_id; | ||
804 | /* See hba_flags_table */ | ||
805 | u32 hba_flags; | ||
806 | /* Virtual iSCSI devices attached. */ | ||
807 | u32 dev_count; | ||
808 | u32 hba_index; | ||
809 | atomic_t dev_mib_access_count; | ||
810 | atomic_t load_balance_queue; | ||
811 | atomic_t left_queue_depth; | ||
812 | /* Maximum queue depth the HBA can handle. */ | ||
813 | atomic_t max_queue_depth; | ||
814 | /* Pointer to transport specific host structure. */ | ||
815 | void *hba_ptr; | ||
816 | /* Linked list for struct se_device */ | ||
817 | struct list_head hba_dev_list; | ||
818 | struct list_head hba_list; | ||
819 | spinlock_t device_lock; | ||
820 | spinlock_t hba_queue_lock; | ||
821 | struct config_group hba_group; | ||
822 | struct mutex hba_access_mutex; | ||
823 | struct se_subsystem_api *transport; | ||
824 | } ____cacheline_aligned; | ||
825 | |||
826 | #define SE_HBA(d) ((struct se_hba *)(d)->se_hba) | ||
827 | |||
828 | struct se_lun { | ||
829 | /* See transport_lun_status_table */ | ||
830 | enum transport_lun_status_table lun_status; | ||
831 | u32 lun_access; | ||
832 | u32 lun_flags; | ||
833 | u32 unpacked_lun; | ||
834 | atomic_t lun_acl_count; | ||
835 | spinlock_t lun_acl_lock; | ||
836 | spinlock_t lun_cmd_lock; | ||
837 | spinlock_t lun_sep_lock; | ||
838 | struct completion lun_shutdown_comp; | ||
839 | struct list_head lun_cmd_list; | ||
840 | struct list_head lun_acl_list; | ||
841 | struct se_device *lun_se_dev; | ||
842 | struct config_group lun_group; | ||
843 | struct se_port *lun_sep; | ||
844 | } ____cacheline_aligned; | ||
845 | |||
846 | #define SE_LUN(c) ((struct se_lun *)(c)->se_lun) | ||
847 | |||
848 | struct se_port { | ||
849 | /* RELATIVE TARGET PORT IDENTIFER */ | ||
850 | u16 sep_rtpi; | ||
851 | int sep_tg_pt_secondary_stat; | ||
852 | int sep_tg_pt_secondary_write_md; | ||
853 | u32 sep_index; | ||
854 | struct scsi_port_stats sep_stats; | ||
855 | /* Used for ALUA Target Port Groups membership */ | ||
856 | atomic_t sep_tg_pt_gp_active; | ||
857 | atomic_t sep_tg_pt_secondary_offline; | ||
858 | /* Used for PR ALL_TG_PT=1 */ | ||
859 | atomic_t sep_tg_pt_ref_cnt; | ||
860 | spinlock_t sep_alua_lock; | ||
861 | struct mutex sep_tg_pt_md_mutex; | ||
862 | struct t10_alua_tg_pt_gp_member *sep_alua_tg_pt_gp_mem; | ||
863 | struct se_lun *sep_lun; | ||
864 | struct se_portal_group *sep_tpg; | ||
865 | struct list_head sep_alua_list; | ||
866 | struct list_head sep_list; | ||
867 | } ____cacheline_aligned; | ||
868 | |||
869 | struct se_tpg_np { | ||
870 | struct config_group tpg_np_group; | ||
871 | } ____cacheline_aligned; | ||
872 | |||
873 | struct se_portal_group { | ||
874 | /* Type of target portal group, see transport_tpg_type_table */ | ||
875 | enum transport_tpg_type_table se_tpg_type; | ||
876 | /* Number of ACLed Initiator Nodes for this TPG */ | ||
877 | u32 num_node_acls; | ||
878 | /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ | ||
879 | atomic_t tpg_pr_ref_count; | ||
880 | /* Spinlock for adding/removing ACLed Nodes */ | ||
881 | spinlock_t acl_node_lock; | ||
882 | /* Spinlock for adding/removing sessions */ | ||
883 | spinlock_t session_lock; | ||
884 | spinlock_t tpg_lun_lock; | ||
885 | /* Pointer to $FABRIC_MOD portal group */ | ||
886 | void *se_tpg_fabric_ptr; | ||
887 | struct list_head se_tpg_list; | ||
888 | /* linked list for initiator ACL list */ | ||
889 | struct list_head acl_node_list; | ||
890 | struct se_lun *tpg_lun_list; | ||
891 | struct se_lun tpg_virt_lun0; | ||
892 | /* List of TCM sessions assoicated wth this TPG */ | ||
893 | struct list_head tpg_sess_list; | ||
894 | /* Pointer to $FABRIC_MOD dependent code */ | ||
895 | struct target_core_fabric_ops *se_tpg_tfo; | ||
896 | struct se_wwn *se_tpg_wwn; | ||
897 | struct config_group tpg_group; | ||
898 | struct config_group *tpg_default_groups[6]; | ||
899 | struct config_group tpg_lun_group; | ||
900 | struct config_group tpg_np_group; | ||
901 | struct config_group tpg_acl_group; | ||
902 | struct config_group tpg_attrib_group; | ||
903 | struct config_group tpg_param_group; | ||
904 | } ____cacheline_aligned; | ||
905 | |||
906 | #define TPG_TFO(se_tpg) ((struct target_core_fabric_ops *)(se_tpg)->se_tpg_tfo) | ||
907 | |||
908 | struct se_wwn { | ||
909 | struct target_fabric_configfs *wwn_tf; | ||
910 | struct config_group wwn_group; | ||
911 | } ____cacheline_aligned; | ||
912 | |||
913 | struct se_global { | ||
914 | u16 alua_lu_gps_counter; | ||
915 | int g_sub_api_initialized; | ||
916 | u32 in_shutdown; | ||
917 | u32 alua_lu_gps_count; | ||
918 | u32 g_hba_id_counter; | ||
919 | struct config_group target_core_hbagroup; | ||
920 | struct config_group alua_group; | ||
921 | struct config_group alua_lu_gps_group; | ||
922 | struct list_head g_lu_gps_list; | ||
923 | struct list_head g_se_tpg_list; | ||
924 | struct list_head g_hba_list; | ||
925 | struct list_head g_se_dev_list; | ||
926 | struct se_hba *g_lun0_hba; | ||
927 | struct se_subsystem_dev *g_lun0_su_dev; | ||
928 | struct se_device *g_lun0_dev; | ||
929 | struct t10_alua_lu_gp *default_lu_gp; | ||
930 | spinlock_t g_device_lock; | ||
931 | spinlock_t hba_lock; | ||
932 | spinlock_t se_tpg_lock; | ||
933 | spinlock_t lu_gps_lock; | ||
934 | spinlock_t plugin_class_lock; | ||
935 | } ____cacheline_aligned; | ||
936 | |||
937 | #endif /* TARGET_CORE_BASE_H */ | ||
diff --git a/include/target/target_core_configfs.h b/include/target/target_core_configfs.h new file mode 100644 index 000000000000..40e6e740527c --- /dev/null +++ b/include/target/target_core_configfs.h | |||
@@ -0,0 +1,52 @@ | |||
1 | #define TARGET_CORE_CONFIGFS_VERSION TARGET_CORE_MOD_VERSION | ||
2 | |||
3 | #define TARGET_CORE_CONFIG_ROOT "/sys/kernel/config" | ||
4 | |||
5 | #define TARGET_CORE_NAME_MAX_LEN 64 | ||
6 | #define TARGET_FABRIC_NAME_SIZE 32 | ||
7 | |||
8 | extern struct target_fabric_configfs *target_fabric_configfs_init( | ||
9 | struct module *, const char *); | ||
10 | extern void target_fabric_configfs_free(struct target_fabric_configfs *); | ||
11 | extern int target_fabric_configfs_register(struct target_fabric_configfs *); | ||
12 | extern void target_fabric_configfs_deregister(struct target_fabric_configfs *); | ||
13 | |||
14 | struct target_fabric_configfs_template { | ||
15 | struct config_item_type tfc_discovery_cit; | ||
16 | struct config_item_type tfc_wwn_cit; | ||
17 | struct config_item_type tfc_tpg_cit; | ||
18 | struct config_item_type tfc_tpg_base_cit; | ||
19 | struct config_item_type tfc_tpg_lun_cit; | ||
20 | struct config_item_type tfc_tpg_port_cit; | ||
21 | struct config_item_type tfc_tpg_np_cit; | ||
22 | struct config_item_type tfc_tpg_np_base_cit; | ||
23 | struct config_item_type tfc_tpg_attrib_cit; | ||
24 | struct config_item_type tfc_tpg_param_cit; | ||
25 | struct config_item_type tfc_tpg_nacl_cit; | ||
26 | struct config_item_type tfc_tpg_nacl_base_cit; | ||
27 | struct config_item_type tfc_tpg_nacl_attrib_cit; | ||
28 | struct config_item_type tfc_tpg_nacl_auth_cit; | ||
29 | struct config_item_type tfc_tpg_nacl_param_cit; | ||
30 | struct config_item_type tfc_tpg_mappedlun_cit; | ||
31 | }; | ||
32 | |||
33 | struct target_fabric_configfs { | ||
34 | char tf_name[TARGET_FABRIC_NAME_SIZE]; | ||
35 | atomic_t tf_access_cnt; | ||
36 | struct list_head tf_list; | ||
37 | struct config_group tf_group; | ||
38 | struct config_group tf_disc_group; | ||
39 | struct config_group *tf_default_groups[2]; | ||
40 | /* Pointer to fabric's config_item */ | ||
41 | struct config_item *tf_fabric; | ||
42 | /* Passed from fabric modules */ | ||
43 | struct config_item_type *tf_fabric_cit; | ||
44 | /* Pointer to target core subsystem */ | ||
45 | struct configfs_subsystem *tf_subsys; | ||
46 | /* Pointer to fabric's struct module */ | ||
47 | struct module *tf_module; | ||
48 | struct target_core_fabric_ops tf_ops; | ||
49 | struct target_fabric_configfs_template tf_cit_tmpl; | ||
50 | }; | ||
51 | |||
52 | #define TF_CIT_TMPL(tf) (&(tf)->tf_cit_tmpl) | ||
diff --git a/include/target/target_core_device.h b/include/target/target_core_device.h new file mode 100644 index 000000000000..52b18a5752c9 --- /dev/null +++ b/include/target/target_core_device.h | |||
@@ -0,0 +1,61 @@ | |||
1 | #ifndef TARGET_CORE_DEVICE_H | ||
2 | #define TARGET_CORE_DEVICE_H | ||
3 | |||
4 | extern int transport_get_lun_for_cmd(struct se_cmd *, unsigned char *, u32); | ||
5 | extern int transport_get_lun_for_tmr(struct se_cmd *, u32); | ||
6 | extern struct se_dev_entry *core_get_se_deve_from_rtpi( | ||
7 | struct se_node_acl *, u16); | ||
8 | extern int core_free_device_list_for_node(struct se_node_acl *, | ||
9 | struct se_portal_group *); | ||
10 | extern void core_dec_lacl_count(struct se_node_acl *, struct se_cmd *); | ||
11 | extern void core_update_device_list_access(u32, u32, struct se_node_acl *); | ||
12 | extern int core_update_device_list_for_node(struct se_lun *, struct se_lun_acl *, u32, | ||
13 | u32, struct se_node_acl *, | ||
14 | struct se_portal_group *, int); | ||
15 | extern void core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *); | ||
16 | extern int core_dev_export(struct se_device *, struct se_portal_group *, | ||
17 | struct se_lun *); | ||
18 | extern void core_dev_unexport(struct se_device *, struct se_portal_group *, | ||
19 | struct se_lun *); | ||
20 | extern int transport_core_report_lun_response(struct se_cmd *); | ||
21 | extern void se_release_device_for_hba(struct se_device *); | ||
22 | extern void se_release_vpd_for_dev(struct se_device *); | ||
23 | extern void se_clear_dev_ports(struct se_device *); | ||
24 | extern int se_free_virtual_device(struct se_device *, struct se_hba *); | ||
25 | extern int se_dev_check_online(struct se_device *); | ||
26 | extern int se_dev_check_shutdown(struct se_device *); | ||
27 | extern void se_dev_set_default_attribs(struct se_device *, struct se_dev_limits *); | ||
28 | extern int se_dev_set_task_timeout(struct se_device *, u32); | ||
29 | extern int se_dev_set_max_unmap_lba_count(struct se_device *, u32); | ||
30 | extern int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32); | ||
31 | extern int se_dev_set_unmap_granularity(struct se_device *, u32); | ||
32 | extern int se_dev_set_unmap_granularity_alignment(struct se_device *, u32); | ||
33 | extern int se_dev_set_emulate_dpo(struct se_device *, int); | ||
34 | extern int se_dev_set_emulate_fua_write(struct se_device *, int); | ||
35 | extern int se_dev_set_emulate_fua_read(struct se_device *, int); | ||
36 | extern int se_dev_set_emulate_write_cache(struct se_device *, int); | ||
37 | extern int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *, int); | ||
38 | extern int se_dev_set_emulate_tas(struct se_device *, int); | ||
39 | extern int se_dev_set_emulate_tpu(struct se_device *, int); | ||
40 | extern int se_dev_set_emulate_tpws(struct se_device *, int); | ||
41 | extern int se_dev_set_enforce_pr_isids(struct se_device *, int); | ||
42 | extern int se_dev_set_queue_depth(struct se_device *, u32); | ||
43 | extern int se_dev_set_max_sectors(struct se_device *, u32); | ||
44 | extern int se_dev_set_optimal_sectors(struct se_device *, u32); | ||
45 | extern int se_dev_set_block_size(struct se_device *, u32); | ||
46 | extern struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_hba *, | ||
47 | struct se_device *, u32); | ||
48 | extern int core_dev_del_lun(struct se_portal_group *, u32); | ||
49 | extern struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32); | ||
50 | extern struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *, | ||
51 | u32, char *, int *); | ||
52 | extern int core_dev_add_initiator_node_lun_acl(struct se_portal_group *, | ||
53 | struct se_lun_acl *, u32, u32); | ||
54 | extern int core_dev_del_initiator_node_lun_acl(struct se_portal_group *, | ||
55 | struct se_lun *, struct se_lun_acl *); | ||
56 | extern void core_dev_free_initiator_node_lun_acl(struct se_portal_group *, | ||
57 | struct se_lun_acl *lacl); | ||
58 | extern int core_dev_setup_virtual_lun0(void); | ||
59 | extern void core_dev_release_virtual_lun0(void); | ||
60 | |||
61 | #endif /* TARGET_CORE_DEVICE_H */ | ||
diff --git a/include/target/target_core_fabric_configfs.h b/include/target/target_core_fabric_configfs.h new file mode 100644 index 000000000000..a26fb7586a09 --- /dev/null +++ b/include/target/target_core_fabric_configfs.h | |||
@@ -0,0 +1,106 @@ | |||
1 | /* | ||
2 | * Used for tfc_wwn_cit attributes | ||
3 | */ | ||
4 | |||
5 | #include <target/configfs_macros.h> | ||
6 | |||
7 | CONFIGFS_EATTR_STRUCT(target_fabric_nacl_attrib, se_node_acl); | ||
8 | #define TF_NACL_ATTRIB_ATTR(_fabric, _name, _mode) \ | ||
9 | static struct target_fabric_nacl_attrib_attribute _fabric##_nacl_attrib_##_name = \ | ||
10 | __CONFIGFS_EATTR(_name, _mode, \ | ||
11 | _fabric##_nacl_attrib_show_##_name, \ | ||
12 | _fabric##_nacl_attrib_store_##_name); | ||
13 | |||
14 | CONFIGFS_EATTR_STRUCT(target_fabric_nacl_auth, se_node_acl); | ||
15 | #define TF_NACL_AUTH_ATTR(_fabric, _name, _mode) \ | ||
16 | static struct target_fabric_nacl_auth_attribute _fabric##_nacl_auth_##_name = \ | ||
17 | __CONFIGFS_EATTR(_name, _mode, \ | ||
18 | _fabric##_nacl_auth_show_##_name, \ | ||
19 | _fabric##_nacl_auth_store_##_name); | ||
20 | |||
21 | #define TF_NACL_AUTH_ATTR_RO(_fabric, _name) \ | ||
22 | static struct target_fabric_nacl_auth_attribute _fabric##_nacl_auth_##_name = \ | ||
23 | __CONFIGFS_EATTR_RO(_name, \ | ||
24 | _fabric##_nacl_auth_show_##_name); | ||
25 | |||
26 | CONFIGFS_EATTR_STRUCT(target_fabric_nacl_param, se_node_acl); | ||
27 | #define TF_NACL_PARAM_ATTR(_fabric, _name, _mode) \ | ||
28 | static struct target_fabric_nacl_param_attribute _fabric##_nacl_param_##_name = \ | ||
29 | __CONFIGFS_EATTR(_name, _mode, \ | ||
30 | _fabric##_nacl_param_show_##_name, \ | ||
31 | _fabric##_nacl_param_store_##_name); | ||
32 | |||
33 | #define TF_NACL_PARAM_ATTR_RO(_fabric, _name) \ | ||
34 | static struct target_fabric_nacl_param_attribute _fabric##_nacl_param_##_name = \ | ||
35 | __CONFIGFS_EATTR_RO(_name, \ | ||
36 | _fabric##_nacl_param_show_##_name); | ||
37 | |||
38 | |||
39 | CONFIGFS_EATTR_STRUCT(target_fabric_nacl_base, se_node_acl); | ||
40 | #define TF_NACL_BASE_ATTR(_fabric, _name, _mode) \ | ||
41 | static struct target_fabric_nacl_base_attribute _fabric##_nacl_##_name = \ | ||
42 | __CONFIGFS_EATTR(_name, _mode, \ | ||
43 | _fabric##_nacl_show_##_name, \ | ||
44 | _fabric##_nacl_store_##_name); | ||
45 | |||
46 | #define TF_NACL_BASE_ATTR_RO(_fabric, _name) \ | ||
47 | static struct target_fabric_nacl_base_attribute _fabric##_nacl_##_name = \ | ||
48 | __CONFIGFS_EATTR_RO(_name, \ | ||
49 | _fabric##_nacl_show_##_name); | ||
50 | |||
51 | CONFIGFS_EATTR_STRUCT(target_fabric_np_base, se_tpg_np); | ||
52 | #define TF_NP_BASE_ATTR(_fabric, _name, _mode) \ | ||
53 | static struct target_fabric_np_base_attribute _fabric##_np_##_name = \ | ||
54 | __CONFIGFS_EATTR(_name, _mode, \ | ||
55 | _fabric##_np_show_##_name, \ | ||
56 | _fabric##_np_store_##_name); | ||
57 | |||
58 | CONFIGFS_EATTR_STRUCT(target_fabric_tpg_attrib, se_portal_group); | ||
59 | #define TF_TPG_ATTRIB_ATTR(_fabric, _name, _mode) \ | ||
60 | static struct target_fabric_tpg_attrib_attribute _fabric##_tpg_attrib_##_name = \ | ||
61 | __CONFIGFS_EATTR(_name, _mode, \ | ||
62 | _fabric##_tpg_attrib_show_##_name, \ | ||
63 | _fabric##_tpg_attrib_store_##_name); | ||
64 | |||
65 | |||
66 | CONFIGFS_EATTR_STRUCT(target_fabric_tpg_param, se_portal_group); | ||
67 | #define TF_TPG_PARAM_ATTR(_fabric, _name, _mode) \ | ||
68 | static struct target_fabric_tpg_param_attribute _fabric##_tpg_param_##_name = \ | ||
69 | __CONFIGFS_EATTR(_name, _mode, \ | ||
70 | _fabric##_tpg_param_show_##_name, \ | ||
71 | _fabric##_tpg_param_store_##_name); | ||
72 | |||
73 | |||
74 | CONFIGFS_EATTR_STRUCT(target_fabric_tpg, se_portal_group); | ||
75 | #define TF_TPG_BASE_ATTR(_fabric, _name, _mode) \ | ||
76 | static struct target_fabric_tpg_attribute _fabric##_tpg_##_name = \ | ||
77 | __CONFIGFS_EATTR(_name, _mode, \ | ||
78 | _fabric##_tpg_show_##_name, \ | ||
79 | _fabric##_tpg_store_##_name); | ||
80 | |||
81 | |||
82 | CONFIGFS_EATTR_STRUCT(target_fabric_wwn, target_fabric_configfs); | ||
83 | #define TF_WWN_ATTR(_fabric, _name, _mode) \ | ||
84 | static struct target_fabric_wwn_attribute _fabric##_wwn_##_name = \ | ||
85 | __CONFIGFS_EATTR(_name, _mode, \ | ||
86 | _fabric##_wwn_show_attr_##_name, \ | ||
87 | _fabric##_wwn_store_attr_##_name); | ||
88 | |||
89 | #define TF_WWN_ATTR_RO(_fabric, _name) \ | ||
90 | static struct target_fabric_wwn_attribute _fabric##_wwn_##_name = \ | ||
91 | __CONFIGFS_EATTR_RO(_name, \ | ||
92 | _fabric##_wwn_show_attr_##_name); | ||
93 | |||
94 | CONFIGFS_EATTR_STRUCT(target_fabric_discovery, target_fabric_configfs); | ||
95 | #define TF_DISC_ATTR(_fabric, _name, _mode) \ | ||
96 | static struct target_fabric_discovery_attribute _fabric##_disc_##_name = \ | ||
97 | __CONFIGFS_EATTR(_name, _mode, \ | ||
98 | _fabric##_disc_show_##_name, \ | ||
99 | _fabric##_disc_store_##_name); | ||
100 | |||
101 | #define TF_DISC_ATTR_RO(_fabric, _name) \ | ||
102 | static struct target_fabric_discovery_attribute _fabric##_disc_##_name = \ | ||
103 | __CONFIGFS_EATTR_RO(_name, \ | ||
104 | _fabric##_disc_show_##_name); | ||
105 | |||
106 | extern int target_fabric_setup_cits(struct target_fabric_configfs *); | ||
diff --git a/include/target/target_core_fabric_lib.h b/include/target/target_core_fabric_lib.h new file mode 100644 index 000000000000..c2f8d0e3a03b --- /dev/null +++ b/include/target/target_core_fabric_lib.h | |||
@@ -0,0 +1,28 @@ | |||
1 | #ifndef TARGET_CORE_FABRIC_LIB_H | ||
2 | #define TARGET_CORE_FABRIC_LIB_H | ||
3 | |||
4 | extern u8 sas_get_fabric_proto_ident(struct se_portal_group *); | ||
5 | extern u32 sas_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *, | ||
6 | struct t10_pr_registration *, int *, unsigned char *); | ||
7 | extern u32 sas_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *, | ||
8 | struct t10_pr_registration *, int *); | ||
9 | extern char *sas_parse_pr_out_transport_id(struct se_portal_group *, | ||
10 | const char *, u32 *, char **); | ||
11 | |||
12 | extern u8 fc_get_fabric_proto_ident(struct se_portal_group *); | ||
13 | extern u32 fc_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *, | ||
14 | struct t10_pr_registration *, int *, unsigned char *); | ||
15 | extern u32 fc_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *, | ||
16 | struct t10_pr_registration *, int *); | ||
17 | extern char *fc_parse_pr_out_transport_id(struct se_portal_group *, | ||
18 | const char *, u32 *, char **); | ||
19 | |||
20 | extern u8 iscsi_get_fabric_proto_ident(struct se_portal_group *); | ||
21 | extern u32 iscsi_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *, | ||
22 | struct t10_pr_registration *, int *, unsigned char *); | ||
23 | extern u32 iscsi_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *, | ||
24 | struct t10_pr_registration *, int *); | ||
25 | extern char *iscsi_parse_pr_out_transport_id(struct se_portal_group *, | ||
26 | const char *, u32 *, char **); | ||
27 | |||
28 | #endif /* TARGET_CORE_FABRIC_LIB_H */ | ||
diff --git a/include/target/target_core_fabric_ops.h b/include/target/target_core_fabric_ops.h new file mode 100644 index 000000000000..f3ac12b019c2 --- /dev/null +++ b/include/target/target_core_fabric_ops.h | |||
@@ -0,0 +1,100 @@ | |||
1 | /* Defined in target_core_configfs.h */ | ||
2 | struct target_fabric_configfs; | ||
3 | |||
4 | struct target_core_fabric_ops { | ||
5 | struct configfs_subsystem *tf_subsys; | ||
6 | /* | ||
7 | * Optional to signal struct se_task->task_sg[] padding entries | ||
8 | * for scatterlist chaining using transport_do_task_sg_link(), | ||
9 | * disabled by default | ||
10 | */ | ||
11 | int task_sg_chaining:1; | ||
12 | char *(*get_fabric_name)(void); | ||
13 | u8 (*get_fabric_proto_ident)(struct se_portal_group *); | ||
14 | char *(*tpg_get_wwn)(struct se_portal_group *); | ||
15 | u16 (*tpg_get_tag)(struct se_portal_group *); | ||
16 | u32 (*tpg_get_default_depth)(struct se_portal_group *); | ||
17 | u32 (*tpg_get_pr_transport_id)(struct se_portal_group *, | ||
18 | struct se_node_acl *, | ||
19 | struct t10_pr_registration *, int *, | ||
20 | unsigned char *); | ||
21 | u32 (*tpg_get_pr_transport_id_len)(struct se_portal_group *, | ||
22 | struct se_node_acl *, | ||
23 | struct t10_pr_registration *, int *); | ||
24 | char *(*tpg_parse_pr_out_transport_id)(struct se_portal_group *, | ||
25 | const char *, u32 *, char **); | ||
26 | int (*tpg_check_demo_mode)(struct se_portal_group *); | ||
27 | int (*tpg_check_demo_mode_cache)(struct se_portal_group *); | ||
28 | int (*tpg_check_demo_mode_write_protect)(struct se_portal_group *); | ||
29 | int (*tpg_check_prod_mode_write_protect)(struct se_portal_group *); | ||
30 | struct se_node_acl *(*tpg_alloc_fabric_acl)( | ||
31 | struct se_portal_group *); | ||
32 | void (*tpg_release_fabric_acl)(struct se_portal_group *, | ||
33 | struct se_node_acl *); | ||
34 | u32 (*tpg_get_inst_index)(struct se_portal_group *); | ||
35 | /* | ||
36 | * Optional function pointer for TCM to perform command map | ||
37 | * from TCM processing thread context, for those struct se_cmd | ||
38 | * initally allocated in interrupt context. | ||
39 | */ | ||
40 | int (*new_cmd_map)(struct se_cmd *); | ||
41 | /* | ||
42 | * Optional function pointer for TCM fabric modules that use | ||
43 | * Linux/NET sockets to allocate struct iovec array to struct se_cmd | ||
44 | */ | ||
45 | int (*alloc_cmd_iovecs)(struct se_cmd *); | ||
46 | /* | ||
47 | * Optional to release struct se_cmd and fabric dependent allocated | ||
48 | * I/O descriptor in transport_cmd_check_stop() | ||
49 | */ | ||
50 | void (*check_stop_free)(struct se_cmd *); | ||
51 | void (*release_cmd_to_pool)(struct se_cmd *); | ||
52 | void (*release_cmd_direct)(struct se_cmd *); | ||
53 | /* | ||
54 | * Called with spin_lock_bh(struct se_portal_group->session_lock held. | ||
55 | */ | ||
56 | int (*shutdown_session)(struct se_session *); | ||
57 | void (*close_session)(struct se_session *); | ||
58 | void (*stop_session)(struct se_session *, int, int); | ||
59 | void (*fall_back_to_erl0)(struct se_session *); | ||
60 | int (*sess_logged_in)(struct se_session *); | ||
61 | u32 (*sess_get_index)(struct se_session *); | ||
62 | /* | ||
63 | * Used only for SCSI fabrics that contain multi-value TransportIDs | ||
64 | * (like iSCSI). All other SCSI fabrics should set this to NULL. | ||
65 | */ | ||
66 | u32 (*sess_get_initiator_sid)(struct se_session *, | ||
67 | unsigned char *, u32); | ||
68 | int (*write_pending)(struct se_cmd *); | ||
69 | int (*write_pending_status)(struct se_cmd *); | ||
70 | void (*set_default_node_attributes)(struct se_node_acl *); | ||
71 | u32 (*get_task_tag)(struct se_cmd *); | ||
72 | int (*get_cmd_state)(struct se_cmd *); | ||
73 | void (*new_cmd_failure)(struct se_cmd *); | ||
74 | int (*queue_data_in)(struct se_cmd *); | ||
75 | int (*queue_status)(struct se_cmd *); | ||
76 | int (*queue_tm_rsp)(struct se_cmd *); | ||
77 | u16 (*set_fabric_sense_len)(struct se_cmd *, u32); | ||
78 | u16 (*get_fabric_sense_len)(void); | ||
79 | int (*is_state_remove)(struct se_cmd *); | ||
80 | u64 (*pack_lun)(unsigned int); | ||
81 | /* | ||
82 | * fabric module calls for target_core_fabric_configfs.c | ||
83 | */ | ||
84 | struct se_wwn *(*fabric_make_wwn)(struct target_fabric_configfs *, | ||
85 | struct config_group *, const char *); | ||
86 | void (*fabric_drop_wwn)(struct se_wwn *); | ||
87 | struct se_portal_group *(*fabric_make_tpg)(struct se_wwn *, | ||
88 | struct config_group *, const char *); | ||
89 | void (*fabric_drop_tpg)(struct se_portal_group *); | ||
90 | int (*fabric_post_link)(struct se_portal_group *, | ||
91 | struct se_lun *); | ||
92 | void (*fabric_pre_unlink)(struct se_portal_group *, | ||
93 | struct se_lun *); | ||
94 | struct se_tpg_np *(*fabric_make_np)(struct se_portal_group *, | ||
95 | struct config_group *, const char *); | ||
96 | void (*fabric_drop_np)(struct se_tpg_np *); | ||
97 | struct se_node_acl *(*fabric_make_nodeacl)(struct se_portal_group *, | ||
98 | struct config_group *, const char *); | ||
99 | void (*fabric_drop_nodeacl)(struct se_node_acl *); | ||
100 | }; | ||
diff --git a/include/target/target_core_tmr.h b/include/target/target_core_tmr.h new file mode 100644 index 000000000000..6c8248bc2c66 --- /dev/null +++ b/include/target/target_core_tmr.h | |||
@@ -0,0 +1,43 @@ | |||
1 | #ifndef TARGET_CORE_TMR_H | ||
2 | #define TARGET_CORE_TMR_H | ||
3 | |||
4 | /* task management function values */ | ||
5 | #ifdef ABORT_TASK | ||
6 | #undef ABORT_TASK | ||
7 | #endif /* ABORT_TASK */ | ||
8 | #define ABORT_TASK 1 | ||
9 | #ifdef ABORT_TASK_SET | ||
10 | #undef ABORT_TASK_SET | ||
11 | #endif /* ABORT_TASK_SET */ | ||
12 | #define ABORT_TASK_SET 2 | ||
13 | #ifdef CLEAR_ACA | ||
14 | #undef CLEAR_ACA | ||
15 | #endif /* CLEAR_ACA */ | ||
16 | #define CLEAR_ACA 3 | ||
17 | #ifdef CLEAR_TASK_SET | ||
18 | #undef CLEAR_TASK_SET | ||
19 | #endif /* CLEAR_TASK_SET */ | ||
20 | #define CLEAR_TASK_SET 4 | ||
21 | #define LUN_RESET 5 | ||
22 | #define TARGET_WARM_RESET 6 | ||
23 | #define TARGET_COLD_RESET 7 | ||
24 | #define TASK_REASSIGN 8 | ||
25 | |||
26 | /* task management response values */ | ||
27 | #define TMR_FUNCTION_COMPLETE 0 | ||
28 | #define TMR_TASK_DOES_NOT_EXIST 1 | ||
29 | #define TMR_LUN_DOES_NOT_EXIST 2 | ||
30 | #define TMR_TASK_STILL_ALLEGIANT 3 | ||
31 | #define TMR_TASK_FAILOVER_NOT_SUPPORTED 4 | ||
32 | #define TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED 5 | ||
33 | #define TMR_FUNCTION_AUTHORIZATION_FAILED 6 | ||
34 | #define TMR_FUNCTION_REJECTED 255 | ||
35 | |||
36 | extern struct kmem_cache *se_tmr_req_cache; | ||
37 | |||
38 | extern struct se_tmr_req *core_tmr_alloc_req(struct se_cmd *, void *, u8); | ||
39 | extern void core_tmr_release_req(struct se_tmr_req *); | ||
40 | extern int core_tmr_lun_reset(struct se_device *, struct se_tmr_req *, | ||
41 | struct list_head *, struct se_cmd *); | ||
42 | |||
43 | #endif /* TARGET_CORE_TMR_H */ | ||
diff --git a/include/target/target_core_tpg.h b/include/target/target_core_tpg.h new file mode 100644 index 000000000000..77e18729c4c1 --- /dev/null +++ b/include/target/target_core_tpg.h | |||
@@ -0,0 +1,35 @@ | |||
1 | #ifndef TARGET_CORE_TPG_H | ||
2 | #define TARGET_CORE_TPG_H | ||
3 | |||
4 | extern struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tpg, | ||
5 | const char *); | ||
6 | extern struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg, | ||
7 | unsigned char *); | ||
8 | extern void core_tpg_add_node_to_devs(struct se_node_acl *, | ||
9 | struct se_portal_group *); | ||
10 | extern struct se_node_acl *core_tpg_check_initiator_node_acl( | ||
11 | struct se_portal_group *, | ||
12 | unsigned char *); | ||
13 | extern void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *); | ||
14 | extern void core_tpg_wait_for_mib_ref(struct se_node_acl *); | ||
15 | extern void core_tpg_clear_object_luns(struct se_portal_group *); | ||
16 | extern struct se_node_acl *core_tpg_add_initiator_node_acl( | ||
17 | struct se_portal_group *, | ||
18 | struct se_node_acl *, | ||
19 | const char *, u32); | ||
20 | extern int core_tpg_del_initiator_node_acl(struct se_portal_group *, | ||
21 | struct se_node_acl *, int); | ||
22 | extern int core_tpg_set_initiator_node_queue_depth(struct se_portal_group *, | ||
23 | unsigned char *, u32, int); | ||
24 | extern int core_tpg_register(struct target_core_fabric_ops *, | ||
25 | struct se_wwn *, | ||
26 | struct se_portal_group *, void *, | ||
27 | int); | ||
28 | extern int core_tpg_deregister(struct se_portal_group *); | ||
29 | extern struct se_lun *core_tpg_pre_addlun(struct se_portal_group *, u32); | ||
30 | extern int core_tpg_post_addlun(struct se_portal_group *, struct se_lun *, u32, | ||
31 | void *); | ||
32 | extern struct se_lun *core_tpg_pre_dellun(struct se_portal_group *, u32, int *); | ||
33 | extern int core_tpg_post_dellun(struct se_portal_group *, struct se_lun *); | ||
34 | |||
35 | #endif /* TARGET_CORE_TPG_H */ | ||
diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h new file mode 100644 index 000000000000..66f44e56eb80 --- /dev/null +++ b/include/target/target_core_transport.h | |||
@@ -0,0 +1,351 @@ | |||
1 | #ifndef TARGET_CORE_TRANSPORT_H | ||
2 | #define TARGET_CORE_TRANSPORT_H | ||
3 | |||
4 | #define TARGET_CORE_VERSION TARGET_CORE_MOD_VERSION | ||
5 | |||
6 | /* Attempts before moving from SHORT to LONG */ | ||
7 | #define PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD 3 | ||
8 | #define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT 3 /* In milliseconds */ | ||
9 | #define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG 10 /* In milliseconds */ | ||
10 | |||
11 | #define PYX_TRANSPORT_STATUS_INTERVAL 5 /* In seconds */ | ||
12 | |||
13 | #define PYX_TRANSPORT_SENT_TO_TRANSPORT 0 | ||
14 | #define PYX_TRANSPORT_WRITE_PENDING 1 | ||
15 | |||
16 | #define PYX_TRANSPORT_UNKNOWN_SAM_OPCODE -1 | ||
17 | #define PYX_TRANSPORT_HBA_QUEUE_FULL -2 | ||
18 | #define PYX_TRANSPORT_REQ_TOO_MANY_SECTORS -3 | ||
19 | #define PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES -4 | ||
20 | #define PYX_TRANSPORT_INVALID_CDB_FIELD -5 | ||
21 | #define PYX_TRANSPORT_INVALID_PARAMETER_LIST -6 | ||
22 | #define PYX_TRANSPORT_LU_COMM_FAILURE -7 | ||
23 | #define PYX_TRANSPORT_UNKNOWN_MODE_PAGE -8 | ||
24 | #define PYX_TRANSPORT_WRITE_PROTECTED -9 | ||
25 | #define PYX_TRANSPORT_TASK_TIMEOUT -10 | ||
26 | #define PYX_TRANSPORT_RESERVATION_CONFLICT -11 | ||
27 | #define PYX_TRANSPORT_ILLEGAL_REQUEST -12 | ||
28 | #define PYX_TRANSPORT_USE_SENSE_REASON -13 | ||
29 | |||
30 | #ifndef SAM_STAT_RESERVATION_CONFLICT | ||
31 | #define SAM_STAT_RESERVATION_CONFLICT 0x18 | ||
32 | #endif | ||
33 | |||
34 | #define TRANSPORT_PLUGIN_FREE 0 | ||
35 | #define TRANSPORT_PLUGIN_REGISTERED 1 | ||
36 | |||
37 | #define TRANSPORT_PLUGIN_PHBA_PDEV 1 | ||
38 | #define TRANSPORT_PLUGIN_VHBA_PDEV 2 | ||
39 | #define TRANSPORT_PLUGIN_VHBA_VDEV 3 | ||
40 | |||
41 | /* For SE OBJ Plugins, in seconds */ | ||
42 | #define TRANSPORT_TIMEOUT_TUR 10 | ||
43 | #define TRANSPORT_TIMEOUT_TYPE_DISK 60 | ||
44 | #define TRANSPORT_TIMEOUT_TYPE_ROM 120 | ||
45 | #define TRANSPORT_TIMEOUT_TYPE_TAPE 600 | ||
46 | #define TRANSPORT_TIMEOUT_TYPE_OTHER 300 | ||
47 | |||
48 | /* For se_task->task_state_flags */ | ||
49 | #define TSF_EXCEPTION_CLEARED 0x01 | ||
50 | |||
51 | /* | ||
52 | * struct se_subsystem_dev->su_dev_flags | ||
53 | */ | ||
54 | #define SDF_FIRMWARE_VPD_UNIT_SERIAL 0x00000001 | ||
55 | #define SDF_EMULATED_VPD_UNIT_SERIAL 0x00000002 | ||
56 | #define SDF_USING_UDEV_PATH 0x00000004 | ||
57 | #define SDF_USING_ALIAS 0x00000008 | ||
58 | |||
59 | /* | ||
60 | * struct se_device->dev_flags | ||
61 | */ | ||
62 | #define DF_READ_ONLY 0x00000001 | ||
63 | #define DF_SPC2_RESERVATIONS 0x00000002 | ||
64 | #define DF_SPC2_RESERVATIONS_WITH_ISID 0x00000004 | ||
65 | |||
66 | /* struct se_dev_attrib sanity values */ | ||
67 | /* 10 Minutes */ | ||
68 | #define DA_TASK_TIMEOUT_MAX 600 | ||
69 | /* Default max_unmap_lba_count */ | ||
70 | #define DA_MAX_UNMAP_LBA_COUNT 0 | ||
71 | /* Default max_unmap_block_desc_count */ | ||
72 | #define DA_MAX_UNMAP_BLOCK_DESC_COUNT 0 | ||
73 | /* Default unmap_granularity */ | ||
74 | #define DA_UNMAP_GRANULARITY_DEFAULT 0 | ||
75 | /* Default unmap_granularity_alignment */ | ||
76 | #define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT 0 | ||
77 | /* Emulation for Direct Page Out */ | ||
78 | #define DA_EMULATE_DPO 0 | ||
79 | /* Emulation for Forced Unit Access WRITEs */ | ||
80 | #define DA_EMULATE_FUA_WRITE 1 | ||
81 | /* Emulation for Forced Unit Access READs */ | ||
82 | #define DA_EMULATE_FUA_READ 0 | ||
83 | /* Emulation for WriteCache and SYNCHRONIZE_CACHE */ | ||
84 | #define DA_EMULATE_WRITE_CACHE 0 | ||
85 | /* Emulation for UNIT ATTENTION Interlock Control */ | ||
86 | #define DA_EMULATE_UA_INTLLCK_CTRL 0 | ||
87 | /* Emulation for TASK_ABORTED status (TAS) by default */ | ||
88 | #define DA_EMULATE_TAS 1 | ||
89 | /* Emulation for Thin Provisioning UNMAP using block/blk-lib.c:blkdev_issue_discard() */ | ||
90 | #define DA_EMULATE_TPU 0 | ||
91 | /* | ||
92 | * Emulation for Thin Provisioning WRITE_SAME w/ UNMAP=1 bit using | ||
93 | * block/blk-lib.c:blkdev_issue_discard() | ||
94 | */ | ||
95 | #define DA_EMULATE_TPWS 0 | ||
96 | /* No Emulation for PSCSI by default */ | ||
97 | #define DA_EMULATE_RESERVATIONS 0 | ||
98 | /* No Emulation for PSCSI by default */ | ||
99 | #define DA_EMULATE_ALUA 0 | ||
100 | /* Enforce SCSI Initiator Port TransportID with 'ISID' for PR */ | ||
101 | #define DA_ENFORCE_PR_ISIDS 1 | ||
102 | #define DA_STATUS_MAX_SECTORS_MIN 16 | ||
103 | #define DA_STATUS_MAX_SECTORS_MAX 8192 | ||
104 | |||
105 | #define SE_MODE_PAGE_BUF 512 | ||
106 | |||
107 | #define MOD_MAX_SECTORS(ms, bs) (ms % (PAGE_SIZE / bs)) | ||
108 | |||
109 | struct se_mem; | ||
110 | struct se_subsystem_api; | ||
111 | |||
112 | extern int init_se_global(void); | ||
113 | extern void release_se_global(void); | ||
114 | extern void transport_init_queue_obj(struct se_queue_obj *); | ||
115 | extern int transport_subsystem_check_init(void); | ||
116 | extern int transport_subsystem_register(struct se_subsystem_api *); | ||
117 | extern void transport_subsystem_release(struct se_subsystem_api *); | ||
118 | extern void transport_load_plugins(void); | ||
119 | extern struct se_session *transport_init_session(void); | ||
120 | extern void __transport_register_session(struct se_portal_group *, | ||
121 | struct se_node_acl *, | ||
122 | struct se_session *, void *); | ||
123 | extern void transport_register_session(struct se_portal_group *, | ||
124 | struct se_node_acl *, | ||
125 | struct se_session *, void *); | ||
126 | extern void transport_free_session(struct se_session *); | ||
127 | extern void transport_deregister_session_configfs(struct se_session *); | ||
128 | extern void transport_deregister_session(struct se_session *); | ||
129 | extern void transport_cmd_finish_abort(struct se_cmd *, int); | ||
130 | extern void transport_cmd_finish_abort_tmr(struct se_cmd *); | ||
131 | extern void transport_complete_sync_cache(struct se_cmd *, int); | ||
132 | extern void transport_complete_task(struct se_task *, int); | ||
133 | extern void transport_add_task_to_execute_queue(struct se_task *, | ||
134 | struct se_task *, | ||
135 | struct se_device *); | ||
136 | unsigned char *transport_dump_cmd_direction(struct se_cmd *); | ||
137 | extern void transport_dump_dev_state(struct se_device *, char *, int *); | ||
138 | extern void transport_dump_dev_info(struct se_device *, struct se_lun *, | ||
139 | unsigned long long, char *, int *); | ||
140 | extern void transport_dump_vpd_proto_id(struct t10_vpd *, | ||
141 | unsigned char *, int); | ||
142 | extern void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *); | ||
143 | extern int transport_dump_vpd_assoc(struct t10_vpd *, | ||
144 | unsigned char *, int); | ||
145 | extern int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *); | ||
146 | extern int transport_dump_vpd_ident_type(struct t10_vpd *, | ||
147 | unsigned char *, int); | ||
148 | extern int transport_set_vpd_ident_type(struct t10_vpd *, unsigned char *); | ||
149 | extern int transport_dump_vpd_ident(struct t10_vpd *, | ||
150 | unsigned char *, int); | ||
151 | extern int transport_set_vpd_ident(struct t10_vpd *, unsigned char *); | ||
152 | extern struct se_device *transport_add_device_to_core_hba(struct se_hba *, | ||
153 | struct se_subsystem_api *, | ||
154 | struct se_subsystem_dev *, u32, | ||
155 | void *, struct se_dev_limits *, | ||
156 | const char *, const char *); | ||
157 | extern void transport_device_setup_cmd(struct se_cmd *); | ||
158 | extern void transport_init_se_cmd(struct se_cmd *, | ||
159 | struct target_core_fabric_ops *, | ||
160 | struct se_session *, u32, int, int, | ||
161 | unsigned char *); | ||
162 | extern void transport_free_se_cmd(struct se_cmd *); | ||
163 | extern int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *); | ||
164 | extern int transport_generic_handle_cdb(struct se_cmd *); | ||
165 | extern int transport_generic_handle_cdb_map(struct se_cmd *); | ||
166 | extern int transport_generic_handle_data(struct se_cmd *); | ||
167 | extern void transport_new_cmd_failure(struct se_cmd *); | ||
168 | extern int transport_generic_handle_tmr(struct se_cmd *); | ||
169 | extern void __transport_stop_task_timer(struct se_task *, unsigned long *); | ||
170 | extern unsigned char transport_asciihex_to_binaryhex(unsigned char val[2]); | ||
171 | extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32, | ||
172 | struct scatterlist *, u32); | ||
173 | extern int transport_clear_lun_from_sessions(struct se_lun *); | ||
174 | extern int transport_check_aborted_status(struct se_cmd *, int); | ||
175 | extern int transport_send_check_condition_and_sense(struct se_cmd *, u8, int); | ||
176 | extern void transport_send_task_abort(struct se_cmd *); | ||
177 | extern void transport_release_cmd_to_pool(struct se_cmd *); | ||
178 | extern void transport_generic_free_cmd(struct se_cmd *, int, int, int); | ||
179 | extern void transport_generic_wait_for_cmds(struct se_cmd *, int); | ||
180 | extern u32 transport_calc_sg_num(struct se_task *, struct se_mem *, u32); | ||
181 | extern int transport_map_mem_to_sg(struct se_task *, struct list_head *, | ||
182 | void *, struct se_mem *, | ||
183 | struct se_mem **, u32 *, u32 *); | ||
184 | extern void transport_do_task_sg_chain(struct se_cmd *); | ||
185 | extern void transport_generic_process_write(struct se_cmd *); | ||
186 | extern int transport_generic_do_tmr(struct se_cmd *); | ||
187 | /* From target_core_alua.c */ | ||
188 | extern int core_alua_check_nonop_delay(struct se_cmd *); | ||
189 | |||
190 | /* | ||
191 | * Each se_transport_task_t can have N number of possible struct se_task's | ||
192 | * for the storage transport(s) to possibly execute. | ||
193 | * Used primarily for splitting up CDBs that exceed the physical storage | ||
194 | * HBA's maximum sector count per task. | ||
195 | */ | ||
196 | struct se_mem { | ||
197 | struct page *se_page; | ||
198 | u32 se_len; | ||
199 | u32 se_off; | ||
200 | struct list_head se_list; | ||
201 | } ____cacheline_aligned; | ||
202 | |||
203 | /* | ||
204 | * Each type of disk transport supported MUST have a template defined | ||
205 | * within its .h file. | ||
206 | */ | ||
207 | struct se_subsystem_api { | ||
208 | /* | ||
209 | * The Name. :-) | ||
210 | */ | ||
211 | char name[16]; | ||
212 | /* | ||
213 | * Transport Type. | ||
214 | */ | ||
215 | u8 transport_type; | ||
216 | /* | ||
217 | * struct module for struct se_hba references | ||
218 | */ | ||
219 | struct module *owner; | ||
220 | /* | ||
221 | * Used for global se_subsystem_api list_head | ||
222 | */ | ||
223 | struct list_head sub_api_list; | ||
224 | /* | ||
225 | * For SCF_SCSI_NON_DATA_CDB | ||
226 | */ | ||
227 | int (*cdb_none)(struct se_task *); | ||
228 | /* | ||
229 | * For SCF_SCSI_CONTROL_NONSG_IO_CDB | ||
230 | */ | ||
231 | int (*map_task_non_SG)(struct se_task *); | ||
232 | /* | ||
233 | * For SCF_SCSI_DATA_SG_IO_CDB and SCF_SCSI_CONTROL_SG_IO_CDB | ||
234 | */ | ||
235 | int (*map_task_SG)(struct se_task *); | ||
236 | /* | ||
237 | * attach_hba(): | ||
238 | */ | ||
239 | int (*attach_hba)(struct se_hba *, u32); | ||
240 | /* | ||
241 | * detach_hba(): | ||
242 | */ | ||
243 | void (*detach_hba)(struct se_hba *); | ||
244 | /* | ||
245 | * pmode_hba(): Used for TCM/pSCSI subsystem plugin HBA -> | ||
246 | * Linux/SCSI struct Scsi_Host passthrough | ||
247 | */ | ||
248 | int (*pmode_enable_hba)(struct se_hba *, unsigned long); | ||
249 | /* | ||
250 | * allocate_virtdevice(): | ||
251 | */ | ||
252 | void *(*allocate_virtdevice)(struct se_hba *, const char *); | ||
253 | /* | ||
254 | * create_virtdevice(): Only for Virtual HBAs | ||
255 | */ | ||
256 | struct se_device *(*create_virtdevice)(struct se_hba *, | ||
257 | struct se_subsystem_dev *, void *); | ||
258 | /* | ||
259 | * free_device(): | ||
260 | */ | ||
261 | void (*free_device)(void *); | ||
262 | |||
263 | /* | ||
264 | * dpo_emulated(): | ||
265 | */ | ||
266 | int (*dpo_emulated)(struct se_device *); | ||
267 | /* | ||
268 | * fua_write_emulated(): | ||
269 | */ | ||
270 | int (*fua_write_emulated)(struct se_device *); | ||
271 | /* | ||
272 | * fua_read_emulated(): | ||
273 | */ | ||
274 | int (*fua_read_emulated)(struct se_device *); | ||
275 | /* | ||
276 | * write_cache_emulated(): | ||
277 | */ | ||
278 | int (*write_cache_emulated)(struct se_device *); | ||
279 | /* | ||
280 | * transport_complete(): | ||
281 | * | ||
282 | * Use transport_generic_complete() for majority of DAS transport | ||
283 | * drivers. Provided out of convenience. | ||
284 | */ | ||
285 | int (*transport_complete)(struct se_task *task); | ||
286 | struct se_task *(*alloc_task)(struct se_cmd *); | ||
287 | /* | ||
288 | * do_task(): | ||
289 | */ | ||
290 | int (*do_task)(struct se_task *); | ||
291 | /* | ||
292 | * Used by virtual subsystem plugins IBLOCK and FILEIO to emulate | ||
293 | * UNMAP and WRITE_SAME_* w/ UNMAP=1 <-> Linux/Block Discard | ||
294 | */ | ||
295 | int (*do_discard)(struct se_device *, sector_t, u32); | ||
296 | /* | ||
297 | * Used by virtual subsystem plugins IBLOCK and FILEIO to emulate | ||
298 | * SYNCHRONIZE_CACHE_* <-> Linux/Block blkdev_issue_flush() | ||
299 | */ | ||
300 | void (*do_sync_cache)(struct se_task *); | ||
301 | /* | ||
302 | * free_task(): | ||
303 | */ | ||
304 | void (*free_task)(struct se_task *); | ||
305 | /* | ||
306 | * check_configfs_dev_params(): | ||
307 | */ | ||
308 | ssize_t (*check_configfs_dev_params)(struct se_hba *, struct se_subsystem_dev *); | ||
309 | /* | ||
310 | * set_configfs_dev_params(): | ||
311 | */ | ||
312 | ssize_t (*set_configfs_dev_params)(struct se_hba *, struct se_subsystem_dev *, | ||
313 | const char *, ssize_t); | ||
314 | /* | ||
315 | * show_configfs_dev_params(): | ||
316 | */ | ||
317 | ssize_t (*show_configfs_dev_params)(struct se_hba *, struct se_subsystem_dev *, | ||
318 | char *); | ||
319 | /* | ||
320 | * get_cdb(): | ||
321 | */ | ||
322 | unsigned char *(*get_cdb)(struct se_task *); | ||
323 | /* | ||
324 | * get_device_rev(): | ||
325 | */ | ||
326 | u32 (*get_device_rev)(struct se_device *); | ||
327 | /* | ||
328 | * get_device_type(): | ||
329 | */ | ||
330 | u32 (*get_device_type)(struct se_device *); | ||
331 | /* | ||
332 | * Get the sector_t from a subsystem backstore.. | ||
333 | */ | ||
334 | sector_t (*get_blocks)(struct se_device *); | ||
335 | /* | ||
336 | * do_se_mem_map(): | ||
337 | */ | ||
338 | int (*do_se_mem_map)(struct se_task *, struct list_head *, void *, | ||
339 | struct se_mem *, struct se_mem **, u32 *, u32 *); | ||
340 | /* | ||
341 | * get_sense_buffer(): | ||
342 | */ | ||
343 | unsigned char *(*get_sense_buffer)(struct se_task *); | ||
344 | } ____cacheline_aligned; | ||
345 | |||
346 | #define TRANSPORT(dev) ((dev)->transport) | ||
347 | #define HBA_TRANSPORT(hba) ((hba)->transport) | ||
348 | |||
349 | extern struct se_global *se_global; | ||
350 | |||
351 | #endif /* TARGET_CORE_TRANSPORT_H */ | ||