aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/target/tcmu-design.txt378
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c35
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h12
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c11
-rw-r--r--drivers/target/Kconfig7
-rw-r--r--drivers/target/Makefile1
-rw-r--r--drivers/target/iscsi/iscsi_target.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c10
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c8
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c5
-rw-r--r--drivers/target/loopback/tcm_loop.c29
-rw-r--r--drivers/target/target_core_alua.c33
-rw-r--r--drivers/target/target_core_configfs.c26
-rw-r--r--drivers/target/target_core_device.c48
-rw-r--r--drivers/target/target_core_fabric_configfs.c13
-rw-r--r--drivers/target/target_core_fabric_lib.c6
-rw-r--r--drivers/target/target_core_file.c13
-rw-r--r--drivers/target/target_core_internal.h6
-rw-r--r--drivers/target/target_core_pr.c107
-rw-r--r--drivers/target/target_core_pr.h2
-rw-r--r--drivers/target/target_core_pscsi.c16
-rw-r--r--drivers/target/target_core_sbc.c2
-rw-r--r--drivers/target/target_core_tmr.c24
-rw-r--r--drivers/target/target_core_tpg.c53
-rw-r--r--drivers/target/target_core_transport.c27
-rw-r--r--drivers/target/target_core_ua.c15
-rw-r--r--drivers/target/target_core_ua.h1
-rw-r--r--drivers/target/target_core_user.c1167
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c2
-rw-r--r--drivers/uio/uio.c12
-rw-r--r--include/linux/uio_driver.h12
-rw-r--r--include/target/target_core_base.h17
-rw-r--r--include/uapi/linux/Kbuild1
-rw-r--r--include/uapi/linux/target_core_user.h142
36 files changed, 1941 insertions, 318 deletions
diff --git a/Documentation/target/tcmu-design.txt b/Documentation/target/tcmu-design.txt
new file mode 100644
index 000000000000..5518465290bf
--- /dev/null
+++ b/Documentation/target/tcmu-design.txt
@@ -0,0 +1,378 @@
1Contents:
2
31) TCM Userspace Design
4 a) Background
5 b) Benefits
6 c) Design constraints
7 d) Implementation overview
8 i. Mailbox
9 ii. Command ring
10 iii. Data Area
11 e) Device discovery
12 f) Device events
13 g) Other contingencies
142) Writing a user pass-through handler
15 a) Discovering and configuring TCMU uio devices
16 b) Waiting for events on the device(s)
17 c) Managing the command ring
183) Command filtering and pass_level
194) A final note
20
21
22TCM Userspace Design
23--------------------
24
25TCM is another name for LIO, an in-kernel iSCSI target (server).
26Existing TCM targets run in the kernel. TCMU (TCM in Userspace)
27allows userspace programs to be written which act as iSCSI targets.
28This document describes the design.
29
30The existing kernel provides modules for different SCSI transport
31protocols. TCM also modularizes the data storage. There are existing
32modules for file, block device, RAM or using another SCSI device as
33storage. These are called "backstores" or "storage engines". These
34built-in modules are implemented entirely as kernel code.
35
36Background:
37
38In addition to modularizing the transport protocol used for carrying
39SCSI commands ("fabrics"), the Linux kernel target, LIO, also modularizes
40the actual data storage as well. These are referred to as "backstores"
41or "storage engines". The target comes with backstores that allow a
42file, a block device, RAM, or another SCSI device to be used for the
43local storage needed for the exported SCSI LUN. Like the rest of LIO,
44these are implemented entirely as kernel code.
45
46These backstores cover the most common use cases, but not all. One new
47use case that other non-kernel target solutions, such as tgt, are able
48to support is using Gluster's GLFS or Ceph's RBD as a backstore. The
49target then serves as a translator, allowing initiators to store data
50in these non-traditional networked storage systems, while still only
51using standard protocols themselves.
52
53If the target is a userspace process, supporting these is easy. tgt,
54for example, needs only a small adapter module for each, because the
55modules just use the available userspace libraries for RBD and GLFS.
56
57Adding support for these backstores in LIO is considerably more
58difficult, because LIO is entirely kernel code. Instead of undertaking
59the significant work to port the GLFS or RBD APIs and protocols to the
60kernel, another approach is to create a userspace pass-through
61backstore for LIO, "TCMU".
62
63
64Benefits:
65
66In addition to allowing relatively easy support for RBD and GLFS, TCMU
67will also allow easier development of new backstores. TCMU combines
68with the LIO loopback fabric to become something similar to FUSE
69(Filesystem in Userspace), but at the SCSI layer instead of the
70filesystem layer. A SUSE, if you will.
71
72The disadvantage is there are more distinct components to configure, and
73potentially to malfunction. This is unavoidable, but hopefully not
74fatal if we're careful to keep things as simple as possible.
75
76Design constraints:
77
78- Good performance: high throughput, low latency
79- Cleanly handle if userspace:
80 1) never attaches
81 2) hangs
82 3) dies
83 4) misbehaves
84- Allow future flexibility in user & kernel implementations
85- Be reasonably memory-efficient
86- Simple to configure & run
87- Simple to write a userspace backend
88
89
90Implementation overview:
91
92The core of the TCMU interface is a memory region that is shared
93between kernel and userspace. Within this region is: a control area
94(mailbox); a lockless producer/consumer circular buffer for commands
95to be passed up, and status returned; and an in/out data buffer area.
96
97TCMU uses the pre-existing UIO subsystem. UIO allows device driver
98development in userspace, and this is conceptually very close to the
99TCMU use case, except instead of a physical device, TCMU implements a
100memory-mapped layout designed for SCSI commands. Using UIO also
101benefits TCMU by handling device introspection (e.g. a way for
102userspace to determine how large the shared region is) and signaling
103mechanisms in both directions.
104
105There are no embedded pointers in the memory region. Everything is
106expressed as an offset from the region's starting address. This allows
107the ring to still work if the user process dies and is restarted with
108the region mapped at a different virtual address.
109
110See target_core_user.h for the struct definitions.
111
112The Mailbox:
113
114The mailbox is always at the start of the shared memory region, and
115contains a version, details about the starting offset and size of the
116command ring, and head and tail pointers to be used by the kernel and
117userspace (respectively) to put commands on the ring, and indicate
118when the commands are completed.
119
120version - 1 (userspace should abort if otherwise)
121flags - none yet defined.
122cmdr_off - The offset of the start of the command ring from the start
123of the memory region, to account for the mailbox size.
124cmdr_size - The size of the command ring. This does *not* need to be a
125power of two.
126cmd_head - Modified by the kernel to indicate when a command has been
127placed on the ring.
128cmd_tail - Modified by userspace to indicate when it has completed
129processing of a command.
130
131The Command Ring:
132
133Commands are placed on the ring by the kernel incrementing
134mailbox.cmd_head by the size of the command, modulo cmdr_size, and
135then signaling userspace via uio_event_notify(). Once the command is
136completed, userspace updates mailbox.cmd_tail in the same way and
137signals the kernel via a 4-byte write(). When cmd_head equals
138cmd_tail, the ring is empty -- no commands are currently waiting to be
139processed by userspace.
140
141TCMU commands start with a common header containing "len_op", a 32-bit
142value that stores the length, as well as the opcode in the lowest
143unused bits. Currently only two opcodes are defined, TCMU_OP_PAD and
144TCMU_OP_CMD. When userspace encounters a command with PAD opcode, it
145should skip ahead by the bytes in "length". (The kernel inserts PAD
146entries to ensure each CMD entry fits contigously into the circular
147buffer.)
148
149When userspace handles a CMD, it finds the SCSI CDB (Command Data
150Block) via tcmu_cmd_entry.req.cdb_off. This is an offset from the
151start of the overall shared memory region, not the entry. The data
152in/out buffers are accessible via tht req.iov[] array. Note that
153each iov.iov_base is also an offset from the start of the region.
154
155TCMU currently does not support BIDI operations.
156
157When completing a command, userspace sets rsp.scsi_status, and
158rsp.sense_buffer if necessary. Userspace then increments
159mailbox.cmd_tail by entry.hdr.length (mod cmdr_size) and signals the
160kernel via the UIO method, a 4-byte write to the file descriptor.
161
162The Data Area:
163
164This is shared-memory space after the command ring. The organization
165of this area is not defined in the TCMU interface, and userspace
166should access only the parts referenced by pending iovs.
167
168
169Device Discovery:
170
171Other devices may be using UIO besides TCMU. Unrelated user processes
172may also be handling different sets of TCMU devices. TCMU userspace
173processes must find their devices by scanning sysfs
174class/uio/uio*/name. For TCMU devices, these names will be of the
175format:
176
177tcm-user/<hba_num>/<device_name>/<subtype>/<path>
178
179where "tcm-user" is common for all TCMU-backed UIO devices. <hba_num>
180and <device_name> allow userspace to find the device's path in the
181kernel target's configfs tree. Assuming the usual mount point, it is
182found at:
183
184/sys/kernel/config/target/core/user_<hba_num>/<device_name>
185
186This location contains attributes such as "hw_block_size", that
187userspace needs to know for correct operation.
188
189<subtype> will be a userspace-process-unique string to identify the
190TCMU device as expecting to be backed by a certain handler, and <path>
191will be an additional handler-specific string for the user process to
192configure the device, if needed. The name cannot contain ':', due to
193LIO limitations.
194
195For all devices so discovered, the user handler opens /dev/uioX and
196calls mmap():
197
198mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0)
199
200where size must be equal to the value read from
201/sys/class/uio/uioX/maps/map0/size.
202
203
204Device Events:
205
206If a new device is added or removed, a notification will be broadcast
207over netlink, using a generic netlink family name of "TCM-USER" and a
208multicast group named "config". This will include the UIO name as
209described in the previous section, as well as the UIO minor
210number. This should allow userspace to identify both the UIO device and
211the LIO device, so that after determining the device is supported
212(based on subtype) it can take the appropriate action.
213
214
215Other contingencies:
216
217Userspace handler process never attaches:
218
219- TCMU will post commands, and then abort them after a timeout period
220 (30 seconds.)
221
222Userspace handler process is killed:
223
224- It is still possible to restart and re-connect to TCMU
225 devices. Command ring is preserved. However, after the timeout period,
226 the kernel will abort pending tasks.
227
228Userspace handler process hangs:
229
230- The kernel will abort pending tasks after a timeout period.
231
232Userspace handler process is malicious:
233
234- The process can trivially break the handling of devices it controls,
235 but should not be able to access kernel memory outside its shared
236 memory areas.
237
238
239Writing a user pass-through handler (with example code)
240-------------------------------------------------------
241
242A user process handing a TCMU device must support the following:
243
244a) Discovering and configuring TCMU uio devices
245b) Waiting for events on the device(s)
246c) Managing the command ring: Parsing operations and commands,
247 performing work as needed, setting response fields (scsi_status and
248 possibly sense_buffer), updating cmd_tail, and notifying the kernel
249 that work has been finished
250
251First, consider instead writing a plugin for tcmu-runner. tcmu-runner
252implements all of this, and provides a higher-level API for plugin
253authors.
254
255TCMU is designed so that multiple unrelated processes can manage TCMU
256devices separately. All handlers should make sure to only open their
257devices, based opon a known subtype string.
258
259a) Discovering and configuring TCMU UIO devices:
260
261(error checking omitted for brevity)
262
263int fd, dev_fd;
264char buf[256];
265unsigned long long map_len;
266void *map;
267
268fd = open("/sys/class/uio/uio0/name", O_RDONLY);
269ret = read(fd, buf, sizeof(buf));
270close(fd);
271buf[ret-1] = '\0'; /* null-terminate and chop off the \n */
272
273/* we only want uio devices whose name is a format we expect */
274if (strncmp(buf, "tcm-user", 8))
275 exit(-1);
276
277/* Further checking for subtype also needed here */
278
279fd = open(/sys/class/uio/%s/maps/map0/size, O_RDONLY);
280ret = read(fd, buf, sizeof(buf));
281close(fd);
282str_buf[ret-1] = '\0'; /* null-terminate and chop off the \n */
283
284map_len = strtoull(buf, NULL, 0);
285
286dev_fd = open("/dev/uio0", O_RDWR);
287map = mmap(NULL, map_len, PROT_READ|PROT_WRITE, MAP_SHARED, dev_fd, 0);
288
289
290b) Waiting for events on the device(s)
291
292while (1) {
293 char buf[4];
294
295 int ret = read(dev_fd, buf, 4); /* will block */
296
297 handle_device_events(dev_fd, map);
298}
299
300
301c) Managing the command ring
302
303#include <linux/target_core_user.h>
304
305int handle_device_events(int fd, void *map)
306{
307 struct tcmu_mailbox *mb = map;
308 struct tcmu_cmd_entry *ent = (void *) mb + mb->cmdr_off + mb->cmd_tail;
309 int did_some_work = 0;
310
311 /* Process events from cmd ring until we catch up with cmd_head */
312 while (ent != (void *)mb + mb->cmdr_off + mb->cmd_head) {
313
314 if (tcmu_hdr_get_op(&ent->hdr) == TCMU_OP_CMD) {
315 uint8_t *cdb = (void *)mb + ent->req.cdb_off;
316 bool success = true;
317
318 /* Handle command here. */
319 printf("SCSI opcode: 0x%x\n", cdb[0]);
320
321 /* Set response fields */
322 if (success)
323 ent->rsp.scsi_status = SCSI_NO_SENSE;
324 else {
325 /* Also fill in rsp->sense_buffer here */
326 ent->rsp.scsi_status = SCSI_CHECK_CONDITION;
327 }
328 }
329 else {
330 /* Do nothing for PAD entries */
331 }
332
333 /* update cmd_tail */
334 mb->cmd_tail = (mb->cmd_tail + tcmu_hdr_get_len(&ent->hdr)) % mb->cmdr_size;
335 ent = (void *) mb + mb->cmdr_off + mb->cmd_tail;
336 did_some_work = 1;
337 }
338
339 /* Notify the kernel that work has been finished */
340 if (did_some_work) {
341 uint32_t buf = 0;
342
343 write(fd, &buf, 4);
344 }
345
346 return 0;
347}
348
349
350Command filtering and pass_level
351--------------------------------
352
353TCMU supports a "pass_level" option with valid values of 0 or 1. When
354the value is 0 (the default), nearly all SCSI commands received for
355the device are passed through to the handler. This allows maximum
356flexibility but increases the amount of code required by the handler,
357to support all mandatory SCSI commands. If pass_level is set to 1,
358then only IO-related commands are presented, and the rest are handled
359by LIO's in-kernel command emulation. The commands presented at level
3601 include all versions of:
361
362READ
363WRITE
364WRITE_VERIFY
365XDWRITEREAD
366WRITE_SAME
367COMPARE_AND_WRITE
368SYNCHRONIZE_CACHE
369UNMAP
370
371
372A final note
373------------
374
375Please be careful to return codes as defined by the SCSI
376specifications. These are different than some values defined in the
377scsi/scsi.h include file. For example, CHECK CONDITION's status code
378is 2, not 1.
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 0bea5776bcbc..3effa931fce2 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -2185,7 +2185,7 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2185 isert_cmd->tx_desc.num_sge = 2; 2185 isert_cmd->tx_desc.num_sge = 2;
2186 } 2186 }
2187 2187
2188 isert_init_send_wr(isert_conn, isert_cmd, send_wr, true); 2188 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
2189 2189
2190 pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); 2190 pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
2191 2191
@@ -2871,7 +2871,7 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2871 &isert_cmd->tx_desc.iscsi_header); 2871 &isert_cmd->tx_desc.iscsi_header);
2872 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2872 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2873 isert_init_send_wr(isert_conn, isert_cmd, 2873 isert_init_send_wr(isert_conn, isert_cmd,
2874 &isert_cmd->tx_desc.send_wr, true); 2874 &isert_cmd->tx_desc.send_wr, false);
2875 isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr; 2875 isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr;
2876 wr->send_wr_num += 1; 2876 wr->send_wr_num += 1;
2877 } 2877 }
@@ -3140,7 +3140,7 @@ isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
3140 3140
3141accept_wait: 3141accept_wait:
3142 ret = down_interruptible(&isert_np->np_sem); 3142 ret = down_interruptible(&isert_np->np_sem);
3143 if (max_accept > 5) 3143 if (ret || max_accept > 5)
3144 return -ENODEV; 3144 return -ENODEV;
3145 3145
3146 spin_lock_bh(&np->np_thread_lock); 3146 spin_lock_bh(&np->np_thread_lock);
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 829752cfd73f..a902fa1db7af 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -112,6 +112,7 @@ static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha,
112 struct qla_tgt_cmd *cmd); 112 struct qla_tgt_cmd *cmd);
113static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, 113static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
114 struct atio_from_isp *atio, uint16_t status, int qfull); 114 struct atio_from_isp *atio, uint16_t status, int qfull);
115static void qlt_disable_vha(struct scsi_qla_host *vha);
115/* 116/*
116 * Global Variables 117 * Global Variables
117 */ 118 */
@@ -210,7 +211,7 @@ static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha)
210 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 211 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
211} 212}
212 213
213void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, 214static void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
214 struct atio_from_isp *atio) 215 struct atio_from_isp *atio)
215{ 216{
216 ql_dbg(ql_dbg_tgt, vha, 0xe072, 217 ql_dbg(ql_dbg_tgt, vha, 0xe072,
@@ -433,7 +434,7 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
433#if 0 /* FIXME: Re-enable Global event handling.. */ 434#if 0 /* FIXME: Re-enable Global event handling.. */
434 /* Global event */ 435 /* Global event */
435 atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count); 436 atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count);
436 qlt_clear_tgt_db(ha->tgt.qla_tgt, 1); 437 qlt_clear_tgt_db(ha->tgt.qla_tgt);
437 if (!list_empty(&ha->tgt.qla_tgt->sess_list)) { 438 if (!list_empty(&ha->tgt.qla_tgt->sess_list)) {
438 sess = list_entry(ha->tgt.qla_tgt->sess_list.next, 439 sess = list_entry(ha->tgt.qla_tgt->sess_list.next,
439 typeof(*sess), sess_list_entry); 440 typeof(*sess), sess_list_entry);
@@ -515,7 +516,7 @@ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
515} 516}
516 517
517/* ha->hardware_lock supposed to be held on entry */ 518/* ha->hardware_lock supposed to be held on entry */
518static void qlt_clear_tgt_db(struct qla_tgt *tgt, bool local_only) 519static void qlt_clear_tgt_db(struct qla_tgt *tgt)
519{ 520{
520 struct qla_tgt_sess *sess; 521 struct qla_tgt_sess *sess;
521 522
@@ -867,7 +868,7 @@ int qlt_stop_phase1(struct qla_tgt *tgt)
867 mutex_lock(&vha->vha_tgt.tgt_mutex); 868 mutex_lock(&vha->vha_tgt.tgt_mutex);
868 spin_lock_irqsave(&ha->hardware_lock, flags); 869 spin_lock_irqsave(&ha->hardware_lock, flags);
869 tgt->tgt_stop = 1; 870 tgt->tgt_stop = 1;
870 qlt_clear_tgt_db(tgt, true); 871 qlt_clear_tgt_db(tgt);
871 spin_unlock_irqrestore(&ha->hardware_lock, flags); 872 spin_unlock_irqrestore(&ha->hardware_lock, flags);
872 mutex_unlock(&vha->vha_tgt.tgt_mutex); 873 mutex_unlock(&vha->vha_tgt.tgt_mutex);
873 mutex_unlock(&qla_tgt_mutex); 874 mutex_unlock(&qla_tgt_mutex);
@@ -1462,12 +1463,13 @@ out_err:
1462 return -1; 1463 return -1;
1463} 1464}
1464 1465
1465static inline void qlt_unmap_sg(struct scsi_qla_host *vha, 1466static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
1466 struct qla_tgt_cmd *cmd)
1467{ 1467{
1468 struct qla_hw_data *ha = vha->hw; 1468 struct qla_hw_data *ha = vha->hw;
1469 1469
1470 BUG_ON(!cmd->sg_mapped); 1470 if (!cmd->sg_mapped)
1471 return;
1472
1471 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); 1473 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
1472 cmd->sg_mapped = 0; 1474 cmd->sg_mapped = 0;
1473 1475
@@ -2428,8 +2430,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
2428 return 0; 2430 return 0;
2429 2431
2430out_unmap_unlock: 2432out_unmap_unlock:
2431 if (cmd->sg_mapped) 2433 qlt_unmap_sg(vha, cmd);
2432 qlt_unmap_sg(vha, cmd);
2433 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2434 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2434 2435
2435 return res; 2436 return res;
@@ -2506,8 +2507,7 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
2506 return res; 2507 return res;
2507 2508
2508out_unlock_free_unmap: 2509out_unlock_free_unmap:
2509 if (cmd->sg_mapped) 2510 qlt_unmap_sg(vha, cmd);
2510 qlt_unmap_sg(vha, cmd);
2511 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2511 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2512 2512
2513 return res; 2513 return res;
@@ -2741,8 +2741,7 @@ done:
2741 if (!ha_locked && !in_interrupt()) 2741 if (!ha_locked && !in_interrupt())
2742 msleep(250); /* just in case */ 2742 msleep(250); /* just in case */
2743 2743
2744 if (cmd->sg_mapped) 2744 qlt_unmap_sg(vha, cmd);
2745 qlt_unmap_sg(vha, cmd);
2746 vha->hw->tgt.tgt_ops->free_cmd(cmd); 2745 vha->hw->tgt.tgt_ops->free_cmd(cmd);
2747 } 2746 }
2748 return; 2747 return;
@@ -3087,8 +3086,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
3087 tfo = se_cmd->se_tfo; 3086 tfo = se_cmd->se_tfo;
3088 cmd->cmd_sent_to_fw = 0; 3087 cmd->cmd_sent_to_fw = 0;
3089 3088
3090 if (cmd->sg_mapped) 3089 qlt_unmap_sg(vha, cmd);
3091 qlt_unmap_sg(vha, cmd);
3092 3090
3093 if (unlikely(status != CTIO_SUCCESS)) { 3091 if (unlikely(status != CTIO_SUCCESS)) {
3094 switch (status & 0xFFFF) { 3092 switch (status & 0xFFFF) {
@@ -5343,7 +5341,7 @@ void qlt_lport_deregister(struct scsi_qla_host *vha)
5343EXPORT_SYMBOL(qlt_lport_deregister); 5341EXPORT_SYMBOL(qlt_lport_deregister);
5344 5342
5345/* Must be called under HW lock */ 5343/* Must be called under HW lock */
5346void qlt_set_mode(struct scsi_qla_host *vha) 5344static void qlt_set_mode(struct scsi_qla_host *vha)
5347{ 5345{
5348 struct qla_hw_data *ha = vha->hw; 5346 struct qla_hw_data *ha = vha->hw;
5349 5347
@@ -5364,7 +5362,7 @@ void qlt_set_mode(struct scsi_qla_host *vha)
5364} 5362}
5365 5363
5366/* Must be called under HW lock */ 5364/* Must be called under HW lock */
5367void qlt_clear_mode(struct scsi_qla_host *vha) 5365static void qlt_clear_mode(struct scsi_qla_host *vha)
5368{ 5366{
5369 struct qla_hw_data *ha = vha->hw; 5367 struct qla_hw_data *ha = vha->hw;
5370 5368
@@ -5428,8 +5426,7 @@ EXPORT_SYMBOL(qlt_enable_vha);
5428 * 5426 *
5429 * Disable Target Mode and reset the adapter 5427 * Disable Target Mode and reset the adapter
5430 */ 5428 */
5431void 5429static void qlt_disable_vha(struct scsi_qla_host *vha)
5432qlt_disable_vha(struct scsi_qla_host *vha)
5433{ 5430{
5434 struct qla_hw_data *ha = vha->hw; 5431 struct qla_hw_data *ha = vha->hw;
5435 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5432 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 8ff330f7d6f5..332086776dfe 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -1001,11 +1001,11 @@ struct qla_tgt_prm {
1001 struct qla_tgt *tgt; 1001 struct qla_tgt *tgt;
1002 void *pkt; 1002 void *pkt;
1003 struct scatterlist *sg; /* cmd data buffer SG vector */ 1003 struct scatterlist *sg; /* cmd data buffer SG vector */
1004 unsigned char *sense_buffer;
1004 int seg_cnt; 1005 int seg_cnt;
1005 int req_cnt; 1006 int req_cnt;
1006 uint16_t rq_result; 1007 uint16_t rq_result;
1007 uint16_t scsi_status; 1008 uint16_t scsi_status;
1008 unsigned char *sense_buffer;
1009 int sense_buffer_len; 1009 int sense_buffer_len;
1010 int residual; 1010 int residual;
1011 int add_status_pkt; 1011 int add_status_pkt;
@@ -1033,10 +1033,6 @@ struct qla_tgt_srr_ctio {
1033 1033
1034 1034
1035extern struct qla_tgt_data qla_target; 1035extern struct qla_tgt_data qla_target;
1036/*
1037 * Internal function prototypes
1038 */
1039void qlt_disable_vha(struct scsi_qla_host *);
1040 1036
1041/* 1037/*
1042 * Function prototypes for qla_target.c logic used by qla2xxx LLD code. 1038 * Function prototypes for qla_target.c logic used by qla2xxx LLD code.
@@ -1049,8 +1045,6 @@ extern void qlt_lport_deregister(struct scsi_qla_host *);
1049extern void qlt_unreg_sess(struct qla_tgt_sess *); 1045extern void qlt_unreg_sess(struct qla_tgt_sess *);
1050extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *); 1046extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *);
1051extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *); 1047extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *);
1052extern void qlt_set_mode(struct scsi_qla_host *ha);
1053extern void qlt_clear_mode(struct scsi_qla_host *ha);
1054extern int __init qlt_init(void); 1048extern int __init qlt_init(void);
1055extern void qlt_exit(void); 1049extern void qlt_exit(void);
1056extern void qlt_update_vp_map(struct scsi_qla_host *, int); 1050extern void qlt_update_vp_map(struct scsi_qla_host *, int);
@@ -1083,13 +1077,9 @@ static inline void qla_reverse_ini_mode(struct scsi_qla_host *ha)
1083/* 1077/*
1084 * Exported symbols from qla_target.c LLD logic used by qla2xxx code.. 1078 * Exported symbols from qla_target.c LLD logic used by qla2xxx code..
1085 */ 1079 */
1086extern void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *,
1087 struct atio_from_isp *);
1088extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *); 1080extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *);
1089extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *); 1081extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *);
1090extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t); 1082extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t);
1091extern int qlt_rdy_to_xfer_dif(struct qla_tgt_cmd *);
1092extern int qlt_xmit_response_dif(struct qla_tgt_cmd *, int, uint8_t);
1093extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *); 1083extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *);
1094extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *); 1084extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *);
1095extern void qlt_free_cmd(struct qla_tgt_cmd *cmd); 1085extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 031b2961c6b7..73f9feecda72 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -786,7 +786,16 @@ static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess)
786 pr_debug("fc_rport domain: port_id 0x%06x\n", nacl->nport_id); 786 pr_debug("fc_rport domain: port_id 0x%06x\n", nacl->nport_id);
787 787
788 node = btree_remove32(&lport->lport_fcport_map, nacl->nport_id); 788 node = btree_remove32(&lport->lport_fcport_map, nacl->nport_id);
789 WARN_ON(node && (node != se_nacl)); 789 if (WARN_ON(node && (node != se_nacl))) {
790 /*
791 * The nacl no longer matches what we think it should be.
792 * Most likely a new dynamic acl has been added while
793 * someone dropped the hardware lock. It clearly is a
794 * bug elsewhere, but this bit can't make things worse.
795 */
796 btree_insert32(&lport->lport_fcport_map, nacl->nport_id,
797 node, GFP_ATOMIC);
798 }
790 799
791 pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n", 800 pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n",
792 se_nacl, nacl->nport_wwnn, nacl->nport_id); 801 se_nacl, nacl->nport_wwnn, nacl->nport_id);
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig
index dc2d84ac5a0e..81d44c477a5b 100644
--- a/drivers/target/Kconfig
+++ b/drivers/target/Kconfig
@@ -31,6 +31,13 @@ config TCM_PSCSI
31 Say Y here to enable the TCM/pSCSI subsystem plugin for non-buffered 31 Say Y here to enable the TCM/pSCSI subsystem plugin for non-buffered
32 passthrough access to Linux/SCSI device 32 passthrough access to Linux/SCSI device
33 33
34config TCM_USER
35 tristate "TCM/USER Subsystem Plugin for Linux"
36 depends on UIO && NET
37 help
38 Say Y here to enable the TCM/USER subsystem plugin for a userspace
39 process to handle requests
40
34source "drivers/target/loopback/Kconfig" 41source "drivers/target/loopback/Kconfig"
35source "drivers/target/tcm_fc/Kconfig" 42source "drivers/target/tcm_fc/Kconfig"
36source "drivers/target/iscsi/Kconfig" 43source "drivers/target/iscsi/Kconfig"
diff --git a/drivers/target/Makefile b/drivers/target/Makefile
index 85b012d2f89b..bbb4a7d638ef 100644
--- a/drivers/target/Makefile
+++ b/drivers/target/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_TARGET_CORE) += target_core_mod.o
22obj-$(CONFIG_TCM_IBLOCK) += target_core_iblock.o 22obj-$(CONFIG_TCM_IBLOCK) += target_core_iblock.o
23obj-$(CONFIG_TCM_FILEIO) += target_core_file.o 23obj-$(CONFIG_TCM_FILEIO) += target_core_file.o
24obj-$(CONFIG_TCM_PSCSI) += target_core_pscsi.o 24obj-$(CONFIG_TCM_PSCSI) += target_core_pscsi.o
25obj-$(CONFIG_TCM_USER) += target_core_user.o
25 26
26# Fabric modules 27# Fabric modules
27obj-$(CONFIG_LOOPBACK_TARGET) += loopback/ 28obj-$(CONFIG_LOOPBACK_TARGET) += loopback/
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 260c3e1e312c..b19e4329ba00 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -3709,7 +3709,6 @@ static inline void iscsit_thread_check_cpumask(
3709 struct task_struct *p, 3709 struct task_struct *p,
3710 int mode) 3710 int mode)
3711{ 3711{
3712 char buf[128];
3713 /* 3712 /*
3714 * mode == 1 signals iscsi_target_tx_thread() usage. 3713 * mode == 1 signals iscsi_target_tx_thread() usage.
3715 * mode == 0 signals iscsi_target_rx_thread() usage. 3714 * mode == 0 signals iscsi_target_rx_thread() usage.
@@ -3728,8 +3727,6 @@ static inline void iscsit_thread_check_cpumask(
3728 * both TX and RX kthreads are scheduled to run on the 3727 * both TX and RX kthreads are scheduled to run on the
3729 * same CPU. 3728 * same CPU.
3730 */ 3729 */
3731 memset(buf, 0, 128);
3732 cpumask_scnprintf(buf, 128, conn->conn_cpumask);
3733 set_cpus_allowed_ptr(p, conn->conn_cpumask); 3730 set_cpus_allowed_ptr(p, conn->conn_cpumask);
3734} 3731}
3735 3732
@@ -4326,8 +4323,7 @@ int iscsit_close_connection(
4326 if (conn->conn_tx_hash.tfm) 4323 if (conn->conn_tx_hash.tfm)
4327 crypto_free_hash(conn->conn_tx_hash.tfm); 4324 crypto_free_hash(conn->conn_tx_hash.tfm);
4328 4325
4329 if (conn->conn_cpumask) 4326 free_cpumask_var(conn->conn_cpumask);
4330 free_cpumask_var(conn->conn_cpumask);
4331 4327
4332 kfree(conn->conn_ops); 4328 kfree(conn->conn_ops);
4333 conn->conn_ops = NULL; 4329 conn->conn_ops = NULL;
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index ae03f3e5de1e..9059c1e0b26e 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -669,12 +669,10 @@ static ssize_t lio_target_nacl_show_info(
669 } else { 669 } else {
670 sess = se_sess->fabric_sess_ptr; 670 sess = se_sess->fabric_sess_ptr;
671 671
672 if (sess->sess_ops->InitiatorName) 672 rb += sprintf(page+rb, "InitiatorName: %s\n",
673 rb += sprintf(page+rb, "InitiatorName: %s\n", 673 sess->sess_ops->InitiatorName);
674 sess->sess_ops->InitiatorName); 674 rb += sprintf(page+rb, "InitiatorAlias: %s\n",
675 if (sess->sess_ops->InitiatorAlias) 675 sess->sess_ops->InitiatorAlias);
676 rb += sprintf(page+rb, "InitiatorAlias: %s\n",
677 sess->sess_ops->InitiatorAlias);
678 676
679 rb += sprintf(page+rb, "LIO Session ID: %u " 677 rb += sprintf(page+rb, "LIO Session ID: %u "
680 "ISID: 0x%02x %02x %02x %02x %02x %02x " 678 "ISID: 0x%02x %02x %02x %02x %02x %02x "
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index 0d1e6ee3e992..a0ae5fc0ad75 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -345,7 +345,6 @@ static int iscsit_dataout_check_datasn(
345 struct iscsi_cmd *cmd, 345 struct iscsi_cmd *cmd,
346 unsigned char *buf) 346 unsigned char *buf)
347{ 347{
348 int dump = 0, recovery = 0;
349 u32 data_sn = 0; 348 u32 data_sn = 0;
350 struct iscsi_conn *conn = cmd->conn; 349 struct iscsi_conn *conn = cmd->conn;
351 struct iscsi_data *hdr = (struct iscsi_data *) buf; 350 struct iscsi_data *hdr = (struct iscsi_data *) buf;
@@ -370,13 +369,11 @@ static int iscsit_dataout_check_datasn(
370 pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x" 369 pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x"
371 " higher than expected 0x%08x.\n", cmd->init_task_tag, 370 " higher than expected 0x%08x.\n", cmd->init_task_tag,
372 be32_to_cpu(hdr->datasn), data_sn); 371 be32_to_cpu(hdr->datasn), data_sn);
373 recovery = 1;
374 goto recover; 372 goto recover;
375 } else if (be32_to_cpu(hdr->datasn) < data_sn) { 373 } else if (be32_to_cpu(hdr->datasn) < data_sn) {
376 pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x" 374 pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x"
377 " lower than expected 0x%08x, discarding payload.\n", 375 " lower than expected 0x%08x, discarding payload.\n",
378 cmd->init_task_tag, be32_to_cpu(hdr->datasn), data_sn); 376 cmd->init_task_tag, be32_to_cpu(hdr->datasn), data_sn);
379 dump = 1;
380 goto dump; 377 goto dump;
381 } 378 }
382 379
@@ -392,8 +389,7 @@ dump:
392 if (iscsit_dump_data_payload(conn, payload_length, 1) < 0) 389 if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
393 return DATAOUT_CANNOT_RECOVER; 390 return DATAOUT_CANNOT_RECOVER;
394 391
395 return (recovery || dump) ? DATAOUT_WITHIN_COMMAND_RECOVERY : 392 return DATAOUT_WITHIN_COMMAND_RECOVERY;
396 DATAOUT_NORMAL;
397} 393}
398 394
399static int iscsit_dataout_pre_datapduinorder_yes( 395static int iscsit_dataout_pre_datapduinorder_yes(
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 5e71ac609418..480f2e0ecc11 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -978,8 +978,7 @@ int iscsit_setup_np(
978 return 0; 978 return 0;
979fail: 979fail:
980 np->np_socket = NULL; 980 np->np_socket = NULL;
981 if (sock) 981 sock_release(sock);
982 sock_release(sock);
983 return ret; 982 return ret;
984} 983}
985 984
@@ -1190,8 +1189,7 @@ old_sess_out:
1190 if (!IS_ERR(conn->conn_tx_hash.tfm)) 1189 if (!IS_ERR(conn->conn_tx_hash.tfm))
1191 crypto_free_hash(conn->conn_tx_hash.tfm); 1190 crypto_free_hash(conn->conn_tx_hash.tfm);
1192 1191
1193 if (conn->conn_cpumask) 1192 free_cpumask_var(conn->conn_cpumask);
1194 free_cpumask_var(conn->conn_cpumask);
1195 1193
1196 kfree(conn->conn_ops); 1194 kfree(conn->conn_ops);
1197 1195
@@ -1268,8 +1266,6 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
1268 iscsit_put_transport(conn->conn_transport); 1266 iscsit_put_transport(conn->conn_transport);
1269 kfree(conn); 1267 kfree(conn);
1270 conn = NULL; 1268 conn = NULL;
1271 if (ret == -ENODEV)
1272 goto out;
1273 /* Get another socket */ 1269 /* Get another socket */
1274 return 1; 1270 return 1;
1275 } 1271 }
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 73355f4fca74..ce87ce9bdb9c 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -1481,8 +1481,9 @@ void iscsit_collect_login_stats(
1481 if (conn->param_list) 1481 if (conn->param_list)
1482 intrname = iscsi_find_param_from_key(INITIATORNAME, 1482 intrname = iscsi_find_param_from_key(INITIATORNAME,
1483 conn->param_list); 1483 conn->param_list);
1484 strcpy(ls->last_intr_fail_name, 1484 strlcpy(ls->last_intr_fail_name,
1485 (intrname ? intrname->value : "Unknown")); 1485 (intrname ? intrname->value : "Unknown"),
1486 sizeof(ls->last_intr_fail_name));
1486 1487
1487 ls->last_intr_fail_ip_family = conn->login_family; 1488 ls->last_intr_fail_ip_family = conn->login_family;
1488 1489
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 340de9d92b15..ab3ab27d49b7 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -153,18 +153,11 @@ static int tcm_loop_change_queue_type(struct scsi_device *sdev, int tag)
153/* 153/*
154 * Locate the SAM Task Attr from struct scsi_cmnd * 154 * Locate the SAM Task Attr from struct scsi_cmnd *
155 */ 155 */
156static int tcm_loop_sam_attr(struct scsi_cmnd *sc) 156static int tcm_loop_sam_attr(struct scsi_cmnd *sc, int tag)
157{ 157{
158 if (sc->device->tagged_supported) { 158 if (sc->device->tagged_supported &&
159 switch (sc->tag) { 159 sc->device->ordered_tags && tag >= 0)
160 case HEAD_OF_QUEUE_TAG: 160 return MSG_ORDERED_TAG;
161 return MSG_HEAD_TAG;
162 case ORDERED_QUEUE_TAG:
163 return MSG_ORDERED_TAG;
164 default:
165 break;
166 }
167 }
168 161
169 return MSG_SIMPLE_TAG; 162 return MSG_SIMPLE_TAG;
170} 163}
@@ -227,7 +220,7 @@ static void tcm_loop_submission_work(struct work_struct *work)
227 220
228 rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd, 221 rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
229 &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun, 222 &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
230 transfer_length, tcm_loop_sam_attr(sc), 223 transfer_length, tcm_loop_sam_attr(sc, tl_cmd->sc_cmd_tag),
231 sc->sc_data_direction, 0, 224 sc->sc_data_direction, 0,
232 scsi_sglist(sc), scsi_sg_count(sc), 225 scsi_sglist(sc), scsi_sg_count(sc),
233 sgl_bidi, sgl_bidi_count, 226 sgl_bidi, sgl_bidi_count,
@@ -266,7 +259,7 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
266 } 259 }
267 260
268 tl_cmd->sc = sc; 261 tl_cmd->sc = sc;
269 tl_cmd->sc_cmd_tag = sc->tag; 262 tl_cmd->sc_cmd_tag = sc->request->tag;
270 INIT_WORK(&tl_cmd->work, tcm_loop_submission_work); 263 INIT_WORK(&tl_cmd->work, tcm_loop_submission_work);
271 queue_work(tcm_loop_workqueue, &tl_cmd->work); 264 queue_work(tcm_loop_workqueue, &tl_cmd->work);
272 return 0; 265 return 0;
@@ -370,7 +363,7 @@ static int tcm_loop_abort_task(struct scsi_cmnd *sc)
370 */ 363 */
371 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; 364 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
372 ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun, 365 ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun,
373 sc->tag, TMR_ABORT_TASK); 366 sc->request->tag, TMR_ABORT_TASK);
374 return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED; 367 return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
375} 368}
376 369
@@ -960,8 +953,7 @@ static int tcm_loop_port_link(
960 struct tcm_loop_tpg, tl_se_tpg); 953 struct tcm_loop_tpg, tl_se_tpg);
961 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 954 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
962 955
963 atomic_inc(&tl_tpg->tl_tpg_port_count); 956 atomic_inc_mb(&tl_tpg->tl_tpg_port_count);
964 smp_mb__after_atomic();
965 /* 957 /*
966 * Add Linux/SCSI struct scsi_device by HCTL 958 * Add Linux/SCSI struct scsi_device by HCTL
967 */ 959 */
@@ -995,8 +987,7 @@ static void tcm_loop_port_unlink(
995 scsi_remove_device(sd); 987 scsi_remove_device(sd);
996 scsi_device_put(sd); 988 scsi_device_put(sd);
997 989
998 atomic_dec(&tl_tpg->tl_tpg_port_count); 990 atomic_dec_mb(&tl_tpg->tl_tpg_port_count);
999 smp_mb__after_atomic();
1000 991
1001 pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n"); 992 pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
1002} 993}
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index fbc5ebb5f761..fb87780929d2 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -392,8 +392,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
392 if (tg_pt_id != tg_pt_gp->tg_pt_gp_id) 392 if (tg_pt_id != tg_pt_gp->tg_pt_gp_id)
393 continue; 393 continue;
394 394
395 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 395 atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
396 smp_mb__after_atomic();
397 396
398 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 397 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
399 398
@@ -403,8 +402,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
403 found = true; 402 found = true;
404 403
405 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 404 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
406 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); 405 atomic_dec_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
407 smp_mb__after_atomic();
408 break; 406 break;
409 } 407 }
410 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 408 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
@@ -998,8 +996,7 @@ static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
998 * every I_T nexus other than the I_T nexus on which the SET 996 * every I_T nexus other than the I_T nexus on which the SET
999 * TARGET PORT GROUPS command 997 * TARGET PORT GROUPS command
1000 */ 998 */
1001 atomic_inc(&mem->tg_pt_gp_mem_ref_cnt); 999 atomic_inc_mb(&mem->tg_pt_gp_mem_ref_cnt);
1002 smp_mb__after_atomic();
1003 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1000 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1004 1001
1005 spin_lock_bh(&port->sep_alua_lock); 1002 spin_lock_bh(&port->sep_alua_lock);
@@ -1028,8 +1025,7 @@ static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
1028 spin_unlock_bh(&port->sep_alua_lock); 1025 spin_unlock_bh(&port->sep_alua_lock);
1029 1026
1030 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1027 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1031 atomic_dec(&mem->tg_pt_gp_mem_ref_cnt); 1028 atomic_dec_mb(&mem->tg_pt_gp_mem_ref_cnt);
1032 smp_mb__after_atomic();
1033 } 1029 }
1034 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1030 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1035 /* 1031 /*
@@ -1063,7 +1059,6 @@ static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
1063 core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state)); 1059 core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state));
1064 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1060 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1065 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); 1061 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1066 smp_mb__after_atomic();
1067 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1062 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1068 1063
1069 if (tg_pt_gp->tg_pt_gp_transition_complete) 1064 if (tg_pt_gp->tg_pt_gp_transition_complete)
@@ -1125,7 +1120,6 @@ static int core_alua_do_transition_tg_pt(
1125 */ 1120 */
1126 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1121 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1127 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 1122 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1128 smp_mb__after_atomic();
1129 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1123 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1130 1124
1131 if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) { 1125 if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) {
@@ -1168,7 +1162,6 @@ int core_alua_do_port_transition(
1168 spin_lock(&local_lu_gp_mem->lu_gp_mem_lock); 1162 spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
1169 lu_gp = local_lu_gp_mem->lu_gp; 1163 lu_gp = local_lu_gp_mem->lu_gp;
1170 atomic_inc(&lu_gp->lu_gp_ref_cnt); 1164 atomic_inc(&lu_gp->lu_gp_ref_cnt);
1171 smp_mb__after_atomic();
1172 spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock); 1165 spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock);
1173 /* 1166 /*
1174 * For storage objects that are members of the 'default_lu_gp', 1167 * For storage objects that are members of the 'default_lu_gp',
@@ -1184,8 +1177,7 @@ int core_alua_do_port_transition(
1184 l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl; 1177 l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
1185 rc = core_alua_do_transition_tg_pt(l_tg_pt_gp, 1178 rc = core_alua_do_transition_tg_pt(l_tg_pt_gp,
1186 new_state, explicit); 1179 new_state, explicit);
1187 atomic_dec(&lu_gp->lu_gp_ref_cnt); 1180 atomic_dec_mb(&lu_gp->lu_gp_ref_cnt);
1188 smp_mb__after_atomic();
1189 return rc; 1181 return rc;
1190 } 1182 }
1191 /* 1183 /*
@@ -1198,8 +1190,7 @@ int core_alua_do_port_transition(
1198 lu_gp_mem_list) { 1190 lu_gp_mem_list) {
1199 1191
1200 dev = lu_gp_mem->lu_gp_mem_dev; 1192 dev = lu_gp_mem->lu_gp_mem_dev;
1201 atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt); 1193 atomic_inc_mb(&lu_gp_mem->lu_gp_mem_ref_cnt);
1202 smp_mb__after_atomic();
1203 spin_unlock(&lu_gp->lu_gp_lock); 1194 spin_unlock(&lu_gp->lu_gp_lock);
1204 1195
1205 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1196 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
@@ -1227,8 +1218,7 @@ int core_alua_do_port_transition(
1227 tg_pt_gp->tg_pt_gp_alua_port = NULL; 1218 tg_pt_gp->tg_pt_gp_alua_port = NULL;
1228 tg_pt_gp->tg_pt_gp_alua_nacl = NULL; 1219 tg_pt_gp->tg_pt_gp_alua_nacl = NULL;
1229 } 1220 }
1230 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 1221 atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
1231 smp_mb__after_atomic();
1232 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1222 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1233 /* 1223 /*
1234 * core_alua_do_transition_tg_pt() will always return 1224 * core_alua_do_transition_tg_pt() will always return
@@ -1238,16 +1228,14 @@ int core_alua_do_port_transition(
1238 new_state, explicit); 1228 new_state, explicit);
1239 1229
1240 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1230 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1241 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); 1231 atomic_dec_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
1242 smp_mb__after_atomic();
1243 if (rc) 1232 if (rc)
1244 break; 1233 break;
1245 } 1234 }
1246 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1235 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1247 1236
1248 spin_lock(&lu_gp->lu_gp_lock); 1237 spin_lock(&lu_gp->lu_gp_lock);
1249 atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt); 1238 atomic_dec_mb(&lu_gp_mem->lu_gp_mem_ref_cnt);
1250 smp_mb__after_atomic();
1251 } 1239 }
1252 spin_unlock(&lu_gp->lu_gp_lock); 1240 spin_unlock(&lu_gp->lu_gp_lock);
1253 1241
@@ -1260,8 +1248,7 @@ int core_alua_do_port_transition(
1260 core_alua_dump_state(new_state)); 1248 core_alua_dump_state(new_state));
1261 } 1249 }
1262 1250
1263 atomic_dec(&lu_gp->lu_gp_ref_cnt); 1251 atomic_dec_mb(&lu_gp->lu_gp_ref_cnt);
1264 smp_mb__after_atomic();
1265 return rc; 1252 return rc;
1266} 1253}
1267 1254
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 756def38c77a..79f9296a08ae 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -665,6 +665,9 @@ SE_DEV_ATTR(is_nonrot, S_IRUGO | S_IWUSR);
665DEF_DEV_ATTRIB(emulate_rest_reord); 665DEF_DEV_ATTRIB(emulate_rest_reord);
666SE_DEV_ATTR(emulate_rest_reord, S_IRUGO | S_IWUSR); 666SE_DEV_ATTR(emulate_rest_reord, S_IRUGO | S_IWUSR);
667 667
668DEF_DEV_ATTRIB(force_pr_aptpl);
669SE_DEV_ATTR(force_pr_aptpl, S_IRUGO | S_IWUSR);
670
668DEF_DEV_ATTRIB_RO(hw_block_size); 671DEF_DEV_ATTRIB_RO(hw_block_size);
669SE_DEV_ATTR_RO(hw_block_size); 672SE_DEV_ATTR_RO(hw_block_size);
670 673
@@ -719,6 +722,7 @@ static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
719 &target_core_dev_attrib_hw_pi_prot_type.attr, 722 &target_core_dev_attrib_hw_pi_prot_type.attr,
720 &target_core_dev_attrib_pi_prot_format.attr, 723 &target_core_dev_attrib_pi_prot_format.attr,
721 &target_core_dev_attrib_enforce_pr_isids.attr, 724 &target_core_dev_attrib_enforce_pr_isids.attr,
725 &target_core_dev_attrib_force_pr_aptpl.attr,
722 &target_core_dev_attrib_is_nonrot.attr, 726 &target_core_dev_attrib_is_nonrot.attr,
723 &target_core_dev_attrib_emulate_rest_reord.attr, 727 &target_core_dev_attrib_emulate_rest_reord.attr,
724 &target_core_dev_attrib_hw_block_size.attr, 728 &target_core_dev_attrib_hw_block_size.attr,
@@ -1263,7 +1267,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1263{ 1267{
1264 unsigned char *i_fabric = NULL, *i_port = NULL, *isid = NULL; 1268 unsigned char *i_fabric = NULL, *i_port = NULL, *isid = NULL;
1265 unsigned char *t_fabric = NULL, *t_port = NULL; 1269 unsigned char *t_fabric = NULL, *t_port = NULL;
1266 char *orig, *ptr, *arg_p, *opts; 1270 char *orig, *ptr, *opts;
1267 substring_t args[MAX_OPT_ARGS]; 1271 substring_t args[MAX_OPT_ARGS];
1268 unsigned long long tmp_ll; 1272 unsigned long long tmp_ll;
1269 u64 sa_res_key = 0; 1273 u64 sa_res_key = 0;
@@ -1295,14 +1299,14 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1295 token = match_token(ptr, tokens, args); 1299 token = match_token(ptr, tokens, args);
1296 switch (token) { 1300 switch (token) {
1297 case Opt_initiator_fabric: 1301 case Opt_initiator_fabric:
1298 i_fabric = match_strdup(&args[0]); 1302 i_fabric = match_strdup(args);
1299 if (!i_fabric) { 1303 if (!i_fabric) {
1300 ret = -ENOMEM; 1304 ret = -ENOMEM;
1301 goto out; 1305 goto out;
1302 } 1306 }
1303 break; 1307 break;
1304 case Opt_initiator_node: 1308 case Opt_initiator_node:
1305 i_port = match_strdup(&args[0]); 1309 i_port = match_strdup(args);
1306 if (!i_port) { 1310 if (!i_port) {
1307 ret = -ENOMEM; 1311 ret = -ENOMEM;
1308 goto out; 1312 goto out;
@@ -1316,7 +1320,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1316 } 1320 }
1317 break; 1321 break;
1318 case Opt_initiator_sid: 1322 case Opt_initiator_sid:
1319 isid = match_strdup(&args[0]); 1323 isid = match_strdup(args);
1320 if (!isid) { 1324 if (!isid) {
1321 ret = -ENOMEM; 1325 ret = -ENOMEM;
1322 goto out; 1326 goto out;
@@ -1330,15 +1334,9 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1330 } 1334 }
1331 break; 1335 break;
1332 case Opt_sa_res_key: 1336 case Opt_sa_res_key:
1333 arg_p = match_strdup(&args[0]); 1337 ret = kstrtoull(args->from, 0, &tmp_ll);
1334 if (!arg_p) {
1335 ret = -ENOMEM;
1336 goto out;
1337 }
1338 ret = kstrtoull(arg_p, 0, &tmp_ll);
1339 if (ret < 0) { 1338 if (ret < 0) {
1340 pr_err("kstrtoull() failed for" 1339 pr_err("kstrtoull() failed for sa_res_key=\n");
1341 " sa_res_key=\n");
1342 goto out; 1340 goto out;
1343 } 1341 }
1344 sa_res_key = (u64)tmp_ll; 1342 sa_res_key = (u64)tmp_ll;
@@ -1370,14 +1368,14 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1370 * PR APTPL Metadata for Target Port 1368 * PR APTPL Metadata for Target Port
1371 */ 1369 */
1372 case Opt_target_fabric: 1370 case Opt_target_fabric:
1373 t_fabric = match_strdup(&args[0]); 1371 t_fabric = match_strdup(args);
1374 if (!t_fabric) { 1372 if (!t_fabric) {
1375 ret = -ENOMEM; 1373 ret = -ENOMEM;
1376 goto out; 1374 goto out;
1377 } 1375 }
1378 break; 1376 break;
1379 case Opt_target_node: 1377 case Opt_target_node:
1380 t_port = match_strdup(&args[0]); 1378 t_port = match_strdup(args);
1381 if (!t_port) { 1379 if (!t_port) {
1382 ret = -ENOMEM; 1380 ret = -ENOMEM;
1383 goto out; 1381 goto out;
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 98da90167159..c45f9e907e44 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -224,8 +224,7 @@ struct se_dev_entry *core_get_se_deve_from_rtpi(
224 if (port->sep_rtpi != rtpi) 224 if (port->sep_rtpi != rtpi)
225 continue; 225 continue;
226 226
227 atomic_inc(&deve->pr_ref_count); 227 atomic_inc_mb(&deve->pr_ref_count);
228 smp_mb__after_atomic();
229 spin_unlock_irq(&nacl->device_list_lock); 228 spin_unlock_irq(&nacl->device_list_lock);
230 229
231 return deve; 230 return deve;
@@ -1019,6 +1018,23 @@ int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
1019 return 0; 1018 return 0;
1020} 1019}
1021 1020
1021int se_dev_set_force_pr_aptpl(struct se_device *dev, int flag)
1022{
1023 if ((flag != 0) && (flag != 1)) {
1024 printk(KERN_ERR "Illegal value %d\n", flag);
1025 return -EINVAL;
1026 }
1027 if (dev->export_count) {
1028 pr_err("dev[%p]: Unable to set force_pr_aptpl while"
1029 " export_count is %d\n", dev, dev->export_count);
1030 return -EINVAL;
1031 }
1032
1033 dev->dev_attrib.force_pr_aptpl = flag;
1034 pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", dev, flag);
1035 return 0;
1036}
1037
1022int se_dev_set_is_nonrot(struct se_device *dev, int flag) 1038int se_dev_set_is_nonrot(struct se_device *dev, int flag)
1023{ 1039{
1024 if ((flag != 0) && (flag != 1)) { 1040 if ((flag != 0) && (flag != 1)) {
@@ -1250,24 +1266,16 @@ struct se_lun *core_dev_add_lun(
1250 * 1266 *
1251 * 1267 *
1252 */ 1268 */
1253int core_dev_del_lun( 1269void core_dev_del_lun(
1254 struct se_portal_group *tpg, 1270 struct se_portal_group *tpg,
1255 u32 unpacked_lun) 1271 struct se_lun *lun)
1256{ 1272{
1257 struct se_lun *lun; 1273 pr_debug("%s_TPG[%u]_LUN[%u] - Deactivating %s Logical Unit from"
1258
1259 lun = core_tpg_pre_dellun(tpg, unpacked_lun);
1260 if (IS_ERR(lun))
1261 return PTR_ERR(lun);
1262
1263 core_tpg_post_dellun(tpg, lun);
1264
1265 pr_debug("%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
1266 " device object\n", tpg->se_tpg_tfo->get_fabric_name(), 1274 " device object\n", tpg->se_tpg_tfo->get_fabric_name(),
1267 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, 1275 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
1268 tpg->se_tpg_tfo->get_fabric_name()); 1276 tpg->se_tpg_tfo->get_fabric_name());
1269 1277
1270 return 0; 1278 core_tpg_remove_lun(tpg, lun);
1271} 1279}
1272 1280
1273struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun) 1281struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun)
@@ -1396,8 +1404,7 @@ int core_dev_add_initiator_node_lun_acl(
1396 1404
1397 spin_lock(&lun->lun_acl_lock); 1405 spin_lock(&lun->lun_acl_lock);
1398 list_add_tail(&lacl->lacl_list, &lun->lun_acl_list); 1406 list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
1399 atomic_inc(&lun->lun_acl_count); 1407 atomic_inc_mb(&lun->lun_acl_count);
1400 smp_mb__after_atomic();
1401 spin_unlock(&lun->lun_acl_lock); 1408 spin_unlock(&lun->lun_acl_lock);
1402 1409
1403 pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for " 1410 pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
@@ -1409,7 +1416,8 @@ int core_dev_add_initiator_node_lun_acl(
1409 * Check to see if there are any existing persistent reservation APTPL 1416 * Check to see if there are any existing persistent reservation APTPL
1410 * pre-registrations that need to be enabled for this LUN ACL.. 1417 * pre-registrations that need to be enabled for this LUN ACL..
1411 */ 1418 */
1412 core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl); 1419 core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, nacl,
1420 lacl->mapped_lun);
1413 return 0; 1421 return 0;
1414} 1422}
1415 1423
@@ -1430,8 +1438,7 @@ int core_dev_del_initiator_node_lun_acl(
1430 1438
1431 spin_lock(&lun->lun_acl_lock); 1439 spin_lock(&lun->lun_acl_lock);
1432 list_del(&lacl->lacl_list); 1440 list_del(&lacl->lacl_list);
1433 atomic_dec(&lun->lun_acl_count); 1441 atomic_dec_mb(&lun->lun_acl_count);
1434 smp_mb__after_atomic();
1435 spin_unlock(&lun->lun_acl_lock); 1442 spin_unlock(&lun->lun_acl_lock);
1436 1443
1437 core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun, 1444 core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun,
@@ -1554,6 +1561,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
1554 dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC; 1561 dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
1555 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT; 1562 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;
1556 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS; 1563 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
1564 dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL;
1557 dev->dev_attrib.is_nonrot = DA_IS_NONROT; 1565 dev->dev_attrib.is_nonrot = DA_IS_NONROT;
1558 dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD; 1566 dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
1559 dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT; 1567 dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 7de9f0475d05..0c3f90130b7d 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -320,7 +320,7 @@ static struct config_group *target_fabric_make_mappedlun(
320 struct se_node_acl, acl_group); 320 struct se_node_acl, acl_group);
321 struct se_portal_group *se_tpg = se_nacl->se_tpg; 321 struct se_portal_group *se_tpg = se_nacl->se_tpg;
322 struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; 322 struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
323 struct se_lun_acl *lacl; 323 struct se_lun_acl *lacl = NULL;
324 struct config_item *acl_ci; 324 struct config_item *acl_ci;
325 struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL; 325 struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL;
326 char *buf; 326 char *buf;
@@ -406,6 +406,7 @@ static struct config_group *target_fabric_make_mappedlun(
406out: 406out:
407 if (lacl_cg) 407 if (lacl_cg)
408 kfree(lacl_cg->default_groups); 408 kfree(lacl_cg->default_groups);
409 kfree(lacl);
409 kfree(buf); 410 kfree(buf);
410 return ERR_PTR(ret); 411 return ERR_PTR(ret);
411} 412}
@@ -821,7 +822,7 @@ static int target_fabric_port_unlink(
821 tf->tf_ops.fabric_pre_unlink(se_tpg, lun); 822 tf->tf_ops.fabric_pre_unlink(se_tpg, lun);
822 } 823 }
823 824
824 core_dev_del_lun(se_tpg, lun->unpacked_lun); 825 core_dev_del_lun(se_tpg, lun);
825 return 0; 826 return 0;
826} 827}
827 828
@@ -910,16 +911,12 @@ static struct config_group *target_fabric_make_lun(
910 GFP_KERNEL); 911 GFP_KERNEL);
911 if (!port_stat_grp->default_groups) { 912 if (!port_stat_grp->default_groups) {
912 pr_err("Unable to allocate port_stat_grp->default_groups\n"); 913 pr_err("Unable to allocate port_stat_grp->default_groups\n");
913 errno = -ENOMEM; 914 kfree(lun_cg->default_groups);
914 goto out; 915 return ERR_PTR(-ENOMEM);
915 } 916 }
916 target_stat_setup_port_default_groups(lun); 917 target_stat_setup_port_default_groups(lun);
917 918
918 return &lun->lun_group; 919 return &lun->lun_group;
919out:
920 if (lun_cg)
921 kfree(lun_cg->default_groups);
922 return ERR_PTR(errno);
923} 920}
924 921
925static void target_fabric_drop_lun( 922static void target_fabric_drop_lun(
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
index 0d1cf8b4f49f..35bfe77160d8 100644
--- a/drivers/target/target_core_fabric_lib.c
+++ b/drivers/target/target_core_fabric_lib.c
@@ -394,9 +394,9 @@ char *iscsi_parse_pr_out_transport_id(
394 * If the caller wants the TransportID Length, we set that value for the 394 * If the caller wants the TransportID Length, we set that value for the
395 * entire iSCSI Tarnsport ID now. 395 * entire iSCSI Tarnsport ID now.
396 */ 396 */
397 if (out_tid_len != NULL) { 397 if (out_tid_len) {
398 add_len = ((buf[2] >> 8) & 0xff); 398 /* The shift works thanks to integer promotion rules */
399 add_len |= (buf[3] & 0xff); 399 add_len = (buf[2] << 8) | buf[3];
400 400
401 tid_len = strlen(&buf[4]); 401 tid_len = strlen(&buf[4]);
402 tid_len += 4; /* Add four bytes for iSCSI Transport ID header */ 402 tid_len += 4; /* Add four bytes for iSCSI Transport ID header */
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 7d6cddaec525..72c83d98662b 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -415,7 +415,7 @@ fd_execute_sync_cache(struct se_cmd *cmd)
415 } else { 415 } else {
416 start = cmd->t_task_lba * dev->dev_attrib.block_size; 416 start = cmd->t_task_lba * dev->dev_attrib.block_size;
417 if (cmd->data_length) 417 if (cmd->data_length)
418 end = start + cmd->data_length; 418 end = start + cmd->data_length - 1;
419 else 419 else
420 end = LLONG_MAX; 420 end = LLONG_MAX;
421 } 421 }
@@ -680,7 +680,12 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
680 struct fd_dev *fd_dev = FD_DEV(dev); 680 struct fd_dev *fd_dev = FD_DEV(dev);
681 loff_t start = cmd->t_task_lba * 681 loff_t start = cmd->t_task_lba *
682 dev->dev_attrib.block_size; 682 dev->dev_attrib.block_size;
683 loff_t end = start + cmd->data_length; 683 loff_t end;
684
685 if (cmd->data_length)
686 end = start + cmd->data_length - 1;
687 else
688 end = LLONG_MAX;
684 689
685 vfs_fsync_range(fd_dev->fd_file, start, end, 1); 690 vfs_fsync_range(fd_dev->fd_file, start, end, 1);
686 } 691 }
@@ -762,7 +767,9 @@ static ssize_t fd_set_configfs_dev_params(struct se_device *dev,
762 fd_dev->fbd_flags |= FBDF_HAS_SIZE; 767 fd_dev->fbd_flags |= FBDF_HAS_SIZE;
763 break; 768 break;
764 case Opt_fd_buffered_io: 769 case Opt_fd_buffered_io:
765 match_int(args, &arg); 770 ret = match_int(args, &arg);
771 if (ret)
772 goto out;
766 if (arg != 1) { 773 if (arg != 1) {
767 pr_err("bogus fd_buffered_io=%d value\n", arg); 774 pr_err("bogus fd_buffered_io=%d value\n", arg);
768 ret = -EINVAL; 775 ret = -EINVAL;
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index de9cab708f45..e31f42f369ff 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -38,6 +38,7 @@ int se_dev_set_emulate_3pc(struct se_device *, int);
38int se_dev_set_pi_prot_type(struct se_device *, int); 38int se_dev_set_pi_prot_type(struct se_device *, int);
39int se_dev_set_pi_prot_format(struct se_device *, int); 39int se_dev_set_pi_prot_format(struct se_device *, int);
40int se_dev_set_enforce_pr_isids(struct se_device *, int); 40int se_dev_set_enforce_pr_isids(struct se_device *, int);
41int se_dev_set_force_pr_aptpl(struct se_device *, int);
41int se_dev_set_is_nonrot(struct se_device *, int); 42int se_dev_set_is_nonrot(struct se_device *, int);
42int se_dev_set_emulate_rest_reord(struct se_device *dev, int); 43int se_dev_set_emulate_rest_reord(struct se_device *dev, int);
43int se_dev_set_queue_depth(struct se_device *, u32); 44int se_dev_set_queue_depth(struct se_device *, u32);
@@ -46,7 +47,7 @@ int se_dev_set_fabric_max_sectors(struct se_device *, u32);
46int se_dev_set_optimal_sectors(struct se_device *, u32); 47int se_dev_set_optimal_sectors(struct se_device *, u32);
47int se_dev_set_block_size(struct se_device *, u32); 48int se_dev_set_block_size(struct se_device *, u32);
48struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_device *, u32); 49struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_device *, u32);
49int core_dev_del_lun(struct se_portal_group *, u32); 50void core_dev_del_lun(struct se_portal_group *, struct se_lun *);
50struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32); 51struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32);
51struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *, 52struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *,
52 struct se_node_acl *, u32, int *); 53 struct se_node_acl *, u32, int *);
@@ -82,8 +83,7 @@ void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
82struct se_lun *core_tpg_alloc_lun(struct se_portal_group *, u32); 83struct se_lun *core_tpg_alloc_lun(struct se_portal_group *, u32);
83int core_tpg_add_lun(struct se_portal_group *, struct se_lun *, 84int core_tpg_add_lun(struct se_portal_group *, struct se_lun *,
84 u32, struct se_device *); 85 u32, struct se_device *);
85struct se_lun *core_tpg_pre_dellun(struct se_portal_group *, u32 unpacked_lun); 86void core_tpg_remove_lun(struct se_portal_group *, struct se_lun *);
86int core_tpg_post_dellun(struct se_portal_group *, struct se_lun *);
87 87
88/* target_core_transport.c */ 88/* target_core_transport.c */
89extern struct kmem_cache *se_tmr_req_cache; 89extern struct kmem_cache *se_tmr_req_cache;
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index df357862286e..8c60a1a1ae8d 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -674,8 +674,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
674 */ 674 */
675 spin_lock(&dev->se_port_lock); 675 spin_lock(&dev->se_port_lock);
676 list_for_each_entry_safe(port, port_tmp, &dev->dev_sep_list, sep_list) { 676 list_for_each_entry_safe(port, port_tmp, &dev->dev_sep_list, sep_list) {
677 atomic_inc(&port->sep_tg_pt_ref_cnt); 677 atomic_inc_mb(&port->sep_tg_pt_ref_cnt);
678 smp_mb__after_atomic();
679 spin_unlock(&dev->se_port_lock); 678 spin_unlock(&dev->se_port_lock);
680 679
681 spin_lock_bh(&port->sep_alua_lock); 680 spin_lock_bh(&port->sep_alua_lock);
@@ -709,8 +708,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
709 if (strcmp(nacl->initiatorname, nacl_tmp->initiatorname)) 708 if (strcmp(nacl->initiatorname, nacl_tmp->initiatorname))
710 continue; 709 continue;
711 710
712 atomic_inc(&deve_tmp->pr_ref_count); 711 atomic_inc_mb(&deve_tmp->pr_ref_count);
713 smp_mb__after_atomic();
714 spin_unlock_bh(&port->sep_alua_lock); 712 spin_unlock_bh(&port->sep_alua_lock);
715 /* 713 /*
716 * Grab a configfs group dependency that is released 714 * Grab a configfs group dependency that is released
@@ -722,10 +720,8 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
722 if (ret < 0) { 720 if (ret < 0) {
723 pr_err("core_scsi3_lunacl_depend" 721 pr_err("core_scsi3_lunacl_depend"
724 "_item() failed\n"); 722 "_item() failed\n");
725 atomic_dec(&port->sep_tg_pt_ref_cnt); 723 atomic_dec_mb(&port->sep_tg_pt_ref_cnt);
726 smp_mb__after_atomic(); 724 atomic_dec_mb(&deve_tmp->pr_ref_count);
727 atomic_dec(&deve_tmp->pr_ref_count);
728 smp_mb__after_atomic();
729 goto out; 725 goto out;
730 } 726 }
731 /* 727 /*
@@ -739,10 +735,8 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
739 nacl_tmp, deve_tmp, NULL, 735 nacl_tmp, deve_tmp, NULL,
740 sa_res_key, all_tg_pt, aptpl); 736 sa_res_key, all_tg_pt, aptpl);
741 if (!pr_reg_atp) { 737 if (!pr_reg_atp) {
742 atomic_dec(&port->sep_tg_pt_ref_cnt); 738 atomic_dec_mb(&port->sep_tg_pt_ref_cnt);
743 smp_mb__after_atomic(); 739 atomic_dec_mb(&deve_tmp->pr_ref_count);
744 atomic_dec(&deve_tmp->pr_ref_count);
745 smp_mb__after_atomic();
746 core_scsi3_lunacl_undepend_item(deve_tmp); 740 core_scsi3_lunacl_undepend_item(deve_tmp);
747 goto out; 741 goto out;
748 } 742 }
@@ -754,8 +748,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
754 spin_unlock_bh(&port->sep_alua_lock); 748 spin_unlock_bh(&port->sep_alua_lock);
755 749
756 spin_lock(&dev->se_port_lock); 750 spin_lock(&dev->se_port_lock);
757 atomic_dec(&port->sep_tg_pt_ref_cnt); 751 atomic_dec_mb(&port->sep_tg_pt_ref_cnt);
758 smp_mb__after_atomic();
759 } 752 }
760 spin_unlock(&dev->se_port_lock); 753 spin_unlock(&dev->se_port_lock);
761 754
@@ -902,6 +895,7 @@ static int __core_scsi3_check_aptpl_registration(
902 spin_lock(&pr_tmpl->aptpl_reg_lock); 895 spin_lock(&pr_tmpl->aptpl_reg_lock);
903 list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list, 896 list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list,
904 pr_reg_aptpl_list) { 897 pr_reg_aptpl_list) {
898
905 if (!strcmp(pr_reg->pr_iport, i_port) && 899 if (!strcmp(pr_reg->pr_iport, i_port) &&
906 (pr_reg->pr_res_mapped_lun == deve->mapped_lun) && 900 (pr_reg->pr_res_mapped_lun == deve->mapped_lun) &&
907 !(strcmp(pr_reg->pr_tport, t_port)) && 901 !(strcmp(pr_reg->pr_tport, t_port)) &&
@@ -944,10 +938,10 @@ int core_scsi3_check_aptpl_registration(
944 struct se_device *dev, 938 struct se_device *dev,
945 struct se_portal_group *tpg, 939 struct se_portal_group *tpg,
946 struct se_lun *lun, 940 struct se_lun *lun,
947 struct se_lun_acl *lun_acl) 941 struct se_node_acl *nacl,
942 u32 mapped_lun)
948{ 943{
949 struct se_node_acl *nacl = lun_acl->se_lun_nacl; 944 struct se_dev_entry *deve = nacl->device_list[mapped_lun];
950 struct se_dev_entry *deve = nacl->device_list[lun_acl->mapped_lun];
951 945
952 if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) 946 if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
953 return 0; 947 return 0;
@@ -1109,8 +1103,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
1109 if (dev->dev_attrib.enforce_pr_isids) 1103 if (dev->dev_attrib.enforce_pr_isids)
1110 continue; 1104 continue;
1111 } 1105 }
1112 atomic_inc(&pr_reg->pr_res_holders); 1106 atomic_inc_mb(&pr_reg->pr_res_holders);
1113 smp_mb__after_atomic();
1114 spin_unlock(&pr_tmpl->registration_lock); 1107 spin_unlock(&pr_tmpl->registration_lock);
1115 return pr_reg; 1108 return pr_reg;
1116 } 1109 }
@@ -1124,8 +1117,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
1124 if (strcmp(isid, pr_reg->pr_reg_isid)) 1117 if (strcmp(isid, pr_reg->pr_reg_isid))
1125 continue; 1118 continue;
1126 1119
1127 atomic_inc(&pr_reg->pr_res_holders); 1120 atomic_inc_mb(&pr_reg->pr_res_holders);
1128 smp_mb__after_atomic();
1129 spin_unlock(&pr_tmpl->registration_lock); 1121 spin_unlock(&pr_tmpl->registration_lock);
1130 return pr_reg; 1122 return pr_reg;
1131 } 1123 }
@@ -1154,8 +1146,7 @@ static struct t10_pr_registration *core_scsi3_locate_pr_reg(
1154 1146
1155static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg) 1147static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg)
1156{ 1148{
1157 atomic_dec(&pr_reg->pr_res_holders); 1149 atomic_dec_mb(&pr_reg->pr_res_holders);
1158 smp_mb__after_atomic();
1159} 1150}
1160 1151
1161static int core_scsi3_check_implicit_release( 1152static int core_scsi3_check_implicit_release(
@@ -1348,8 +1339,7 @@ static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg)
1348 configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys, 1339 configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys,
1349 &tpg->tpg_group.cg_item); 1340 &tpg->tpg_group.cg_item);
1350 1341
1351 atomic_dec(&tpg->tpg_pr_ref_count); 1342 atomic_dec_mb(&tpg->tpg_pr_ref_count);
1352 smp_mb__after_atomic();
1353} 1343}
1354 1344
1355static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl) 1345static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl)
@@ -1368,16 +1358,14 @@ static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl)
1368 struct se_portal_group *tpg = nacl->se_tpg; 1358 struct se_portal_group *tpg = nacl->se_tpg;
1369 1359
1370 if (nacl->dynamic_node_acl) { 1360 if (nacl->dynamic_node_acl) {
1371 atomic_dec(&nacl->acl_pr_ref_count); 1361 atomic_dec_mb(&nacl->acl_pr_ref_count);
1372 smp_mb__after_atomic();
1373 return; 1362 return;
1374 } 1363 }
1375 1364
1376 configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys, 1365 configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys,
1377 &nacl->acl_group.cg_item); 1366 &nacl->acl_group.cg_item);
1378 1367
1379 atomic_dec(&nacl->acl_pr_ref_count); 1368 atomic_dec_mb(&nacl->acl_pr_ref_count);
1380 smp_mb__after_atomic();
1381} 1369}
1382 1370
1383static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve) 1371static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve)
@@ -1407,8 +1395,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
1407 * For nacl->dynamic_node_acl=1 1395 * For nacl->dynamic_node_acl=1
1408 */ 1396 */
1409 if (!lun_acl) { 1397 if (!lun_acl) {
1410 atomic_dec(&se_deve->pr_ref_count); 1398 atomic_dec_mb(&se_deve->pr_ref_count);
1411 smp_mb__after_atomic();
1412 return; 1399 return;
1413 } 1400 }
1414 nacl = lun_acl->se_lun_nacl; 1401 nacl = lun_acl->se_lun_nacl;
@@ -1417,8 +1404,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
1417 configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys, 1404 configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys,
1418 &lun_acl->se_lun_group.cg_item); 1405 &lun_acl->se_lun_group.cg_item);
1419 1406
1420 atomic_dec(&se_deve->pr_ref_count); 1407 atomic_dec_mb(&se_deve->pr_ref_count);
1421 smp_mb__after_atomic();
1422} 1408}
1423 1409
1424static sense_reason_t 1410static sense_reason_t
@@ -1551,15 +1537,13 @@ core_scsi3_decode_spec_i_port(
1551 if (!i_str) 1537 if (!i_str)
1552 continue; 1538 continue;
1553 1539
1554 atomic_inc(&tmp_tpg->tpg_pr_ref_count); 1540 atomic_inc_mb(&tmp_tpg->tpg_pr_ref_count);
1555 smp_mb__after_atomic();
1556 spin_unlock(&dev->se_port_lock); 1541 spin_unlock(&dev->se_port_lock);
1557 1542
1558 if (core_scsi3_tpg_depend_item(tmp_tpg)) { 1543 if (core_scsi3_tpg_depend_item(tmp_tpg)) {
1559 pr_err(" core_scsi3_tpg_depend_item()" 1544 pr_err(" core_scsi3_tpg_depend_item()"
1560 " for tmp_tpg\n"); 1545 " for tmp_tpg\n");
1561 atomic_dec(&tmp_tpg->tpg_pr_ref_count); 1546 atomic_dec_mb(&tmp_tpg->tpg_pr_ref_count);
1562 smp_mb__after_atomic();
1563 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1547 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1564 goto out_unmap; 1548 goto out_unmap;
1565 } 1549 }
@@ -1571,10 +1555,8 @@ core_scsi3_decode_spec_i_port(
1571 spin_lock_irq(&tmp_tpg->acl_node_lock); 1555 spin_lock_irq(&tmp_tpg->acl_node_lock);
1572 dest_node_acl = __core_tpg_get_initiator_node_acl( 1556 dest_node_acl = __core_tpg_get_initiator_node_acl(
1573 tmp_tpg, i_str); 1557 tmp_tpg, i_str);
1574 if (dest_node_acl) { 1558 if (dest_node_acl)
1575 atomic_inc(&dest_node_acl->acl_pr_ref_count); 1559 atomic_inc_mb(&dest_node_acl->acl_pr_ref_count);
1576 smp_mb__after_atomic();
1577 }
1578 spin_unlock_irq(&tmp_tpg->acl_node_lock); 1560 spin_unlock_irq(&tmp_tpg->acl_node_lock);
1579 1561
1580 if (!dest_node_acl) { 1562 if (!dest_node_acl) {
@@ -1586,8 +1568,7 @@ core_scsi3_decode_spec_i_port(
1586 if (core_scsi3_nodeacl_depend_item(dest_node_acl)) { 1568 if (core_scsi3_nodeacl_depend_item(dest_node_acl)) {
1587 pr_err("configfs_depend_item() failed" 1569 pr_err("configfs_depend_item() failed"
1588 " for dest_node_acl->acl_group\n"); 1570 " for dest_node_acl->acl_group\n");
1589 atomic_dec(&dest_node_acl->acl_pr_ref_count); 1571 atomic_dec_mb(&dest_node_acl->acl_pr_ref_count);
1590 smp_mb__after_atomic();
1591 core_scsi3_tpg_undepend_item(tmp_tpg); 1572 core_scsi3_tpg_undepend_item(tmp_tpg);
1592 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1573 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1593 goto out_unmap; 1574 goto out_unmap;
@@ -1646,8 +1627,7 @@ core_scsi3_decode_spec_i_port(
1646 if (core_scsi3_lunacl_depend_item(dest_se_deve)) { 1627 if (core_scsi3_lunacl_depend_item(dest_se_deve)) {
1647 pr_err("core_scsi3_lunacl_depend_item()" 1628 pr_err("core_scsi3_lunacl_depend_item()"
1648 " failed\n"); 1629 " failed\n");
1649 atomic_dec(&dest_se_deve->pr_ref_count); 1630 atomic_dec_mb(&dest_se_deve->pr_ref_count);
1650 smp_mb__after_atomic();
1651 core_scsi3_nodeacl_undepend_item(dest_node_acl); 1631 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1652 core_scsi3_tpg_undepend_item(dest_tpg); 1632 core_scsi3_tpg_undepend_item(dest_tpg);
1653 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1633 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -3167,15 +3147,13 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
3167 if (!dest_tf_ops) 3147 if (!dest_tf_ops)
3168 continue; 3148 continue;
3169 3149
3170 atomic_inc(&dest_se_tpg->tpg_pr_ref_count); 3150 atomic_inc_mb(&dest_se_tpg->tpg_pr_ref_count);
3171 smp_mb__after_atomic();
3172 spin_unlock(&dev->se_port_lock); 3151 spin_unlock(&dev->se_port_lock);
3173 3152
3174 if (core_scsi3_tpg_depend_item(dest_se_tpg)) { 3153 if (core_scsi3_tpg_depend_item(dest_se_tpg)) {
3175 pr_err("core_scsi3_tpg_depend_item() failed" 3154 pr_err("core_scsi3_tpg_depend_item() failed"
3176 " for dest_se_tpg\n"); 3155 " for dest_se_tpg\n");
3177 atomic_dec(&dest_se_tpg->tpg_pr_ref_count); 3156 atomic_dec_mb(&dest_se_tpg->tpg_pr_ref_count);
3178 smp_mb__after_atomic();
3179 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3157 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3180 goto out_put_pr_reg; 3158 goto out_put_pr_reg;
3181 } 3159 }
@@ -3271,10 +3249,8 @@ after_iport_check:
3271 spin_lock_irq(&dest_se_tpg->acl_node_lock); 3249 spin_lock_irq(&dest_se_tpg->acl_node_lock);
3272 dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg, 3250 dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg,
3273 initiator_str); 3251 initiator_str);
3274 if (dest_node_acl) { 3252 if (dest_node_acl)
3275 atomic_inc(&dest_node_acl->acl_pr_ref_count); 3253 atomic_inc_mb(&dest_node_acl->acl_pr_ref_count);
3276 smp_mb__after_atomic();
3277 }
3278 spin_unlock_irq(&dest_se_tpg->acl_node_lock); 3254 spin_unlock_irq(&dest_se_tpg->acl_node_lock);
3279 3255
3280 if (!dest_node_acl) { 3256 if (!dest_node_acl) {
@@ -3288,8 +3264,7 @@ after_iport_check:
3288 if (core_scsi3_nodeacl_depend_item(dest_node_acl)) { 3264 if (core_scsi3_nodeacl_depend_item(dest_node_acl)) {
3289 pr_err("core_scsi3_nodeacl_depend_item() for" 3265 pr_err("core_scsi3_nodeacl_depend_item() for"
3290 " dest_node_acl\n"); 3266 " dest_node_acl\n");
3291 atomic_dec(&dest_node_acl->acl_pr_ref_count); 3267 atomic_dec_mb(&dest_node_acl->acl_pr_ref_count);
3292 smp_mb__after_atomic();
3293 dest_node_acl = NULL; 3268 dest_node_acl = NULL;
3294 ret = TCM_INVALID_PARAMETER_LIST; 3269 ret = TCM_INVALID_PARAMETER_LIST;
3295 goto out; 3270 goto out;
@@ -3313,8 +3288,7 @@ after_iport_check:
3313 3288
3314 if (core_scsi3_lunacl_depend_item(dest_se_deve)) { 3289 if (core_scsi3_lunacl_depend_item(dest_se_deve)) {
3315 pr_err("core_scsi3_lunacl_depend_item() failed\n"); 3290 pr_err("core_scsi3_lunacl_depend_item() failed\n");
3316 atomic_dec(&dest_se_deve->pr_ref_count); 3291 atomic_dec_mb(&dest_se_deve->pr_ref_count);
3317 smp_mb__after_atomic();
3318 dest_se_deve = NULL; 3292 dest_se_deve = NULL;
3319 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3293 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3320 goto out; 3294 goto out;
@@ -3497,6 +3471,7 @@ static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb)
3497sense_reason_t 3471sense_reason_t
3498target_scsi3_emulate_pr_out(struct se_cmd *cmd) 3472target_scsi3_emulate_pr_out(struct se_cmd *cmd)
3499{ 3473{
3474 struct se_device *dev = cmd->se_dev;
3500 unsigned char *cdb = &cmd->t_task_cdb[0]; 3475 unsigned char *cdb = &cmd->t_task_cdb[0];
3501 unsigned char *buf; 3476 unsigned char *buf;
3502 u64 res_key, sa_res_key; 3477 u64 res_key, sa_res_key;
@@ -3561,6 +3536,13 @@ target_scsi3_emulate_pr_out(struct se_cmd *cmd)
3561 aptpl = (buf[17] & 0x01); 3536 aptpl = (buf[17] & 0x01);
3562 unreg = (buf[17] & 0x02); 3537 unreg = (buf[17] & 0x02);
3563 } 3538 }
3539 /*
3540 * If the backend device has been configured to force APTPL metadata
3541 * write-out, go ahead and propigate aptpl=1 down now.
3542 */
3543 if (dev->dev_attrib.force_pr_aptpl)
3544 aptpl = 1;
3545
3564 transport_kunmap_data_sg(cmd); 3546 transport_kunmap_data_sg(cmd);
3565 buf = NULL; 3547 buf = NULL;
3566 3548
@@ -3803,7 +3785,7 @@ core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
3803 if (!buf) 3785 if (!buf)
3804 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3786 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3805 3787
3806 buf[0] = ((add_len << 8) & 0xff); 3788 buf[0] = ((add_len >> 8) & 0xff);
3807 buf[1] = (add_len & 0xff); 3789 buf[1] = (add_len & 0xff);
3808 buf[2] |= 0x10; /* CRH: Compatible Reservation Hanlding bit. */ 3790 buf[2] |= 0x10; /* CRH: Compatible Reservation Hanlding bit. */
3809 buf[2] |= 0x08; /* SIP_C: Specify Initiator Ports Capable bit */ 3791 buf[2] |= 0x08; /* SIP_C: Specify Initiator Ports Capable bit */
@@ -3879,8 +3861,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
3879 se_tpg = pr_reg->pr_reg_nacl->se_tpg; 3861 se_tpg = pr_reg->pr_reg_nacl->se_tpg;
3880 add_desc_len = 0; 3862 add_desc_len = 0;
3881 3863
3882 atomic_inc(&pr_reg->pr_res_holders); 3864 atomic_inc_mb(&pr_reg->pr_res_holders);
3883 smp_mb__after_atomic();
3884 spin_unlock(&pr_tmpl->registration_lock); 3865 spin_unlock(&pr_tmpl->registration_lock);
3885 /* 3866 /*
3886 * Determine expected length of $FABRIC_MOD specific 3867 * Determine expected length of $FABRIC_MOD specific
@@ -3893,8 +3874,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
3893 pr_warn("SPC-3 PRIN READ_FULL_STATUS ran" 3874 pr_warn("SPC-3 PRIN READ_FULL_STATUS ran"
3894 " out of buffer: %d\n", cmd->data_length); 3875 " out of buffer: %d\n", cmd->data_length);
3895 spin_lock(&pr_tmpl->registration_lock); 3876 spin_lock(&pr_tmpl->registration_lock);
3896 atomic_dec(&pr_reg->pr_res_holders); 3877 atomic_dec_mb(&pr_reg->pr_res_holders);
3897 smp_mb__after_atomic();
3898 break; 3878 break;
3899 } 3879 }
3900 /* 3880 /*
@@ -3955,8 +3935,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
3955 se_nacl, pr_reg, &format_code, &buf[off+4]); 3935 se_nacl, pr_reg, &format_code, &buf[off+4]);
3956 3936
3957 spin_lock(&pr_tmpl->registration_lock); 3937 spin_lock(&pr_tmpl->registration_lock);
3958 atomic_dec(&pr_reg->pr_res_holders); 3938 atomic_dec_mb(&pr_reg->pr_res_holders);
3959 smp_mb__after_atomic();
3960 /* 3939 /*
3961 * Set the ADDITIONAL DESCRIPTOR LENGTH 3940 * Set the ADDITIONAL DESCRIPTOR LENGTH
3962 */ 3941 */
diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h
index 2ee2936fa0bd..749fd7bb7510 100644
--- a/drivers/target/target_core_pr.h
+++ b/drivers/target/target_core_pr.h
@@ -60,7 +60,7 @@ extern int core_scsi3_alloc_aptpl_registration(
60 unsigned char *, u16, u32, int, int, u8); 60 unsigned char *, u16, u32, int, int, u8);
61extern int core_scsi3_check_aptpl_registration(struct se_device *, 61extern int core_scsi3_check_aptpl_registration(struct se_device *,
62 struct se_portal_group *, struct se_lun *, 62 struct se_portal_group *, struct se_lun *,
63 struct se_lun_acl *); 63 struct se_node_acl *, u32);
64extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *, 64extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *,
65 struct se_node_acl *); 65 struct se_node_acl *);
66extern void core_scsi3_free_all_registrations(struct se_device *); 66extern void core_scsi3_free_all_registrations(struct se_device *);
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 70d9f6dabba0..7c8291f0bbbc 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -749,14 +749,18 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_device *dev,
749 ret = -EINVAL; 749 ret = -EINVAL;
750 goto out; 750 goto out;
751 } 751 }
752 match_int(args, &arg); 752 ret = match_int(args, &arg);
753 if (ret)
754 goto out;
753 pdv->pdv_host_id = arg; 755 pdv->pdv_host_id = arg;
754 pr_debug("PSCSI[%d]: Referencing SCSI Host ID:" 756 pr_debug("PSCSI[%d]: Referencing SCSI Host ID:"
755 " %d\n", phv->phv_host_id, pdv->pdv_host_id); 757 " %d\n", phv->phv_host_id, pdv->pdv_host_id);
756 pdv->pdv_flags |= PDF_HAS_VIRT_HOST_ID; 758 pdv->pdv_flags |= PDF_HAS_VIRT_HOST_ID;
757 break; 759 break;
758 case Opt_scsi_channel_id: 760 case Opt_scsi_channel_id:
759 match_int(args, &arg); 761 ret = match_int(args, &arg);
762 if (ret)
763 goto out;
760 pdv->pdv_channel_id = arg; 764 pdv->pdv_channel_id = arg;
761 pr_debug("PSCSI[%d]: Referencing SCSI Channel" 765 pr_debug("PSCSI[%d]: Referencing SCSI Channel"
762 " ID: %d\n", phv->phv_host_id, 766 " ID: %d\n", phv->phv_host_id,
@@ -764,7 +768,9 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_device *dev,
764 pdv->pdv_flags |= PDF_HAS_CHANNEL_ID; 768 pdv->pdv_flags |= PDF_HAS_CHANNEL_ID;
765 break; 769 break;
766 case Opt_scsi_target_id: 770 case Opt_scsi_target_id:
767 match_int(args, &arg); 771 ret = match_int(args, &arg);
772 if (ret)
773 goto out;
768 pdv->pdv_target_id = arg; 774 pdv->pdv_target_id = arg;
769 pr_debug("PSCSI[%d]: Referencing SCSI Target" 775 pr_debug("PSCSI[%d]: Referencing SCSI Target"
770 " ID: %d\n", phv->phv_host_id, 776 " ID: %d\n", phv->phv_host_id,
@@ -772,7 +778,9 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_device *dev,
772 pdv->pdv_flags |= PDF_HAS_TARGET_ID; 778 pdv->pdv_flags |= PDF_HAS_TARGET_ID;
773 break; 779 break;
774 case Opt_scsi_lun_id: 780 case Opt_scsi_lun_id:
775 match_int(args, &arg); 781 ret = match_int(args, &arg);
782 if (ret)
783 goto out;
776 pdv->pdv_lun_id = arg; 784 pdv->pdv_lun_id = arg;
777 pr_debug("PSCSI[%d]: Referencing SCSI LUN ID:" 785 pr_debug("PSCSI[%d]: Referencing SCSI LUN ID:"
778 " %d\n", phv->phv_host_id, pdv->pdv_lun_id); 786 " %d\n", phv->phv_host_id, pdv->pdv_lun_id);
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index bd78d9235ac6..ebe62afb957d 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -948,7 +948,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
948 } 948 }
949 949
950 /* reject any command that we don't have a handler for */ 950 /* reject any command that we don't have a handler for */
951 if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->execute_cmd) 951 if (!cmd->execute_cmd)
952 return TCM_UNSUPPORTED_SCSI_OPCODE; 952 return TCM_UNSUPPORTED_SCSI_OPCODE;
953 953
954 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { 954 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index f7cd95e8111a..fa5e157db47b 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -64,21 +64,17 @@ int core_tmr_alloc_req(
64} 64}
65EXPORT_SYMBOL(core_tmr_alloc_req); 65EXPORT_SYMBOL(core_tmr_alloc_req);
66 66
67void core_tmr_release_req( 67void core_tmr_release_req(struct se_tmr_req *tmr)
68 struct se_tmr_req *tmr)
69{ 68{
70 struct se_device *dev = tmr->tmr_dev; 69 struct se_device *dev = tmr->tmr_dev;
71 unsigned long flags; 70 unsigned long flags;
72 71
73 if (!dev) { 72 if (dev) {
74 kfree(tmr); 73 spin_lock_irqsave(&dev->se_tmr_lock, flags);
75 return; 74 list_del(&tmr->tmr_list);
75 spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
76 } 76 }
77 77
78 spin_lock_irqsave(&dev->se_tmr_lock, flags);
79 list_del(&tmr->tmr_list);
80 spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
81
82 kfree(tmr); 78 kfree(tmr);
83} 79}
84 80
@@ -90,9 +86,8 @@ static void core_tmr_handle_tas_abort(
90 bool remove = true; 86 bool remove = true;
91 /* 87 /*
92 * TASK ABORTED status (TAS) bit support 88 * TASK ABORTED status (TAS) bit support
93 */ 89 */
94 if ((tmr_nacl && 90 if ((tmr_nacl && (tmr_nacl != cmd->se_sess->se_node_acl)) && tas) {
95 (tmr_nacl != cmd->se_sess->se_node_acl)) && tas) {
96 remove = false; 91 remove = false;
97 transport_send_task_abort(cmd); 92 transport_send_task_abort(cmd);
98 } 93 }
@@ -120,13 +115,12 @@ void core_tmr_abort_task(
120 struct se_tmr_req *tmr, 115 struct se_tmr_req *tmr,
121 struct se_session *se_sess) 116 struct se_session *se_sess)
122{ 117{
123 struct se_cmd *se_cmd, *tmp_cmd; 118 struct se_cmd *se_cmd;
124 unsigned long flags; 119 unsigned long flags;
125 int ref_tag; 120 int ref_tag;
126 121
127 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 122 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
128 list_for_each_entry_safe(se_cmd, tmp_cmd, 123 list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
129 &se_sess->sess_cmd_list, se_cmd_list) {
130 124
131 if (dev != se_cmd->se_dev) 125 if (dev != se_cmd->se_dev)
132 continue; 126 continue;
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index be783f717f19..0696de9553d3 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -40,6 +40,7 @@
40#include <target/target_core_fabric.h> 40#include <target/target_core_fabric.h>
41 41
42#include "target_core_internal.h" 42#include "target_core_internal.h"
43#include "target_core_pr.h"
43 44
44extern struct se_device *g_lun0_dev; 45extern struct se_device *g_lun0_dev;
45 46
@@ -166,6 +167,13 @@ void core_tpg_add_node_to_devs(
166 167
167 core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun, 168 core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
168 lun_access, acl, tpg); 169 lun_access, acl, tpg);
170 /*
171 * Check to see if there are any existing persistent reservation
172 * APTPL pre-registrations that need to be enabled for this dynamic
173 * LUN ACL now..
174 */
175 core_scsi3_check_aptpl_registration(dev, tpg, lun, acl,
176 lun->unpacked_lun);
169 spin_lock(&tpg->tpg_lun_lock); 177 spin_lock(&tpg->tpg_lun_lock);
170 } 178 }
171 spin_unlock(&tpg->tpg_lun_lock); 179 spin_unlock(&tpg->tpg_lun_lock);
@@ -335,7 +343,7 @@ void core_tpg_clear_object_luns(struct se_portal_group *tpg)
335 continue; 343 continue;
336 344
337 spin_unlock(&tpg->tpg_lun_lock); 345 spin_unlock(&tpg->tpg_lun_lock);
338 core_dev_del_lun(tpg, lun->unpacked_lun); 346 core_dev_del_lun(tpg, lun);
339 spin_lock(&tpg->tpg_lun_lock); 347 spin_lock(&tpg->tpg_lun_lock);
340 } 348 }
341 spin_unlock(&tpg->tpg_lun_lock); 349 spin_unlock(&tpg->tpg_lun_lock);
@@ -663,13 +671,6 @@ static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
663 return 0; 671 return 0;
664} 672}
665 673
666static void core_tpg_release_virtual_lun0(struct se_portal_group *se_tpg)
667{
668 struct se_lun *lun = &se_tpg->tpg_virt_lun0;
669
670 core_tpg_post_dellun(se_tpg, lun);
671}
672
673int core_tpg_register( 674int core_tpg_register(
674 struct target_core_fabric_ops *tfo, 675 struct target_core_fabric_ops *tfo,
675 struct se_wwn *se_wwn, 676 struct se_wwn *se_wwn,
@@ -773,7 +774,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
773 spin_unlock_irq(&se_tpg->acl_node_lock); 774 spin_unlock_irq(&se_tpg->acl_node_lock);
774 775
775 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) 776 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
776 core_tpg_release_virtual_lun0(se_tpg); 777 core_tpg_remove_lun(se_tpg, &se_tpg->tpg_virt_lun0);
777 778
778 se_tpg->se_tpg_fabric_ptr = NULL; 779 se_tpg->se_tpg_fabric_ptr = NULL;
779 array_free(se_tpg->tpg_lun_list, TRANSPORT_MAX_LUNS_PER_TPG); 780 array_free(se_tpg->tpg_lun_list, TRANSPORT_MAX_LUNS_PER_TPG);
@@ -838,37 +839,7 @@ int core_tpg_add_lun(
838 return 0; 839 return 0;
839} 840}
840 841
841struct se_lun *core_tpg_pre_dellun( 842void core_tpg_remove_lun(
842 struct se_portal_group *tpg,
843 u32 unpacked_lun)
844{
845 struct se_lun *lun;
846
847 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
848 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
849 "-1: %u for Target Portal Group: %u\n",
850 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
851 TRANSPORT_MAX_LUNS_PER_TPG-1,
852 tpg->se_tpg_tfo->tpg_get_tag(tpg));
853 return ERR_PTR(-EOVERFLOW);
854 }
855
856 spin_lock(&tpg->tpg_lun_lock);
857 lun = tpg->tpg_lun_list[unpacked_lun];
858 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
859 pr_err("%s Logical Unit Number: %u is not active on"
860 " Target Portal Group: %u, ignoring request.\n",
861 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
862 tpg->se_tpg_tfo->tpg_get_tag(tpg));
863 spin_unlock(&tpg->tpg_lun_lock);
864 return ERR_PTR(-ENODEV);
865 }
866 spin_unlock(&tpg->tpg_lun_lock);
867
868 return lun;
869}
870
871int core_tpg_post_dellun(
872 struct se_portal_group *tpg, 843 struct se_portal_group *tpg,
873 struct se_lun *lun) 844 struct se_lun *lun)
874{ 845{
@@ -882,6 +853,4 @@ int core_tpg_post_dellun(
882 spin_unlock(&tpg->tpg_lun_lock); 853 spin_unlock(&tpg->tpg_lun_lock);
883 854
884 percpu_ref_exit(&lun->lun_ref); 855 percpu_ref_exit(&lun->lun_ref);
885
886 return 0;
887} 856}
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 7fa62fc93e0b..9ea0d5f03f7a 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -232,6 +232,10 @@ void transport_subsystem_check_init(void)
232 if (ret != 0) 232 if (ret != 0)
233 pr_err("Unable to load target_core_pscsi\n"); 233 pr_err("Unable to load target_core_pscsi\n");
234 234
235 ret = request_module("target_core_user");
236 if (ret != 0)
237 pr_err("Unable to load target_core_user\n");
238
235 sub_api_initialized = 1; 239 sub_api_initialized = 1;
236} 240}
237 241
@@ -752,8 +756,7 @@ void target_qf_do_work(struct work_struct *work)
752 756
753 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { 757 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
754 list_del(&cmd->se_qf_node); 758 list_del(&cmd->se_qf_node);
755 atomic_dec(&dev->dev_qf_count); 759 atomic_dec_mb(&dev->dev_qf_count);
756 smp_mb__after_atomic();
757 760
758 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" 761 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
759 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, 762 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
@@ -1166,7 +1169,6 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
1166 * Dormant to Active status. 1169 * Dormant to Active status.
1167 */ 1170 */
1168 cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id); 1171 cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
1169 smp_mb__after_atomic();
1170 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", 1172 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
1171 cmd->se_ordered_id, cmd->sam_task_attr, 1173 cmd->se_ordered_id, cmd->sam_task_attr,
1172 dev->transport->name); 1174 dev->transport->name);
@@ -1722,8 +1724,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
1722 cmd->t_task_cdb[0], cmd->se_ordered_id); 1724 cmd->t_task_cdb[0], cmd->se_ordered_id);
1723 return false; 1725 return false;
1724 case MSG_ORDERED_TAG: 1726 case MSG_ORDERED_TAG:
1725 atomic_inc(&dev->dev_ordered_sync); 1727 atomic_inc_mb(&dev->dev_ordered_sync);
1726 smp_mb__after_atomic();
1727 1728
1728 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, " 1729 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, "
1729 " se_ordered_id: %u\n", 1730 " se_ordered_id: %u\n",
@@ -1740,8 +1741,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
1740 /* 1741 /*
1741 * For SIMPLE and UNTAGGED Task Attribute commands 1742 * For SIMPLE and UNTAGGED Task Attribute commands
1742 */ 1743 */
1743 atomic_inc(&dev->simple_cmds); 1744 atomic_inc_mb(&dev->simple_cmds);
1744 smp_mb__after_atomic();
1745 break; 1745 break;
1746 } 1746 }
1747 1747
@@ -1845,8 +1845,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
1845 return; 1845 return;
1846 1846
1847 if (cmd->sam_task_attr == MSG_SIMPLE_TAG) { 1847 if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
1848 atomic_dec(&dev->simple_cmds); 1848 atomic_dec_mb(&dev->simple_cmds);
1849 smp_mb__after_atomic();
1850 dev->dev_cur_ordered_id++; 1849 dev->dev_cur_ordered_id++;
1851 pr_debug("Incremented dev->dev_cur_ordered_id: %u for" 1850 pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
1852 " SIMPLE: %u\n", dev->dev_cur_ordered_id, 1851 " SIMPLE: %u\n", dev->dev_cur_ordered_id,
@@ -1857,8 +1856,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
1857 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, 1856 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
1858 cmd->se_ordered_id); 1857 cmd->se_ordered_id);
1859 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { 1858 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
1860 atomic_dec(&dev->dev_ordered_sync); 1859 atomic_dec_mb(&dev->dev_ordered_sync);
1861 smp_mb__after_atomic();
1862 1860
1863 dev->dev_cur_ordered_id++; 1861 dev->dev_cur_ordered_id++;
1864 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" 1862 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
@@ -1877,8 +1875,7 @@ static void transport_complete_qf(struct se_cmd *cmd)
1877 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 1875 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
1878 trace_target_cmd_complete(cmd); 1876 trace_target_cmd_complete(cmd);
1879 ret = cmd->se_tfo->queue_status(cmd); 1877 ret = cmd->se_tfo->queue_status(cmd);
1880 if (ret) 1878 goto out;
1881 goto out;
1882 } 1879 }
1883 1880
1884 switch (cmd->data_direction) { 1881 switch (cmd->data_direction) {
@@ -1916,8 +1913,7 @@ static void transport_handle_queue_full(
1916{ 1913{
1917 spin_lock_irq(&dev->qf_cmd_lock); 1914 spin_lock_irq(&dev->qf_cmd_lock);
1918 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); 1915 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
1919 atomic_inc(&dev->dev_qf_count); 1916 atomic_inc_mb(&dev->dev_qf_count);
1920 smp_mb__after_atomic();
1921 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); 1917 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
1922 1918
1923 schedule_work(&cmd->se_dev->qf_work_queue); 1919 schedule_work(&cmd->se_dev->qf_work_queue);
@@ -2896,7 +2892,6 @@ void transport_send_task_abort(struct se_cmd *cmd)
2896 if (cmd->se_tfo->write_pending_status(cmd) != 0) { 2892 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
2897 cmd->transport_state |= CMD_T_ABORTED; 2893 cmd->transport_state |= CMD_T_ABORTED;
2898 cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS; 2894 cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
2899 smp_mb__after_atomic();
2900 return; 2895 return;
2901 } 2896 }
2902 } 2897 }
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
index 101858e245b3..1738b1646988 100644
--- a/drivers/target/target_core_ua.c
+++ b/drivers/target/target_core_ua.c
@@ -161,8 +161,7 @@ int core_scsi3_ua_allocate(
161 spin_unlock(&deve->ua_lock); 161 spin_unlock(&deve->ua_lock);
162 spin_unlock_irq(&nacl->device_list_lock); 162 spin_unlock_irq(&nacl->device_list_lock);
163 163
164 atomic_inc(&deve->ua_count); 164 atomic_inc_mb(&deve->ua_count);
165 smp_mb__after_atomic();
166 return 0; 165 return 0;
167 } 166 }
168 list_add_tail(&ua->ua_nacl_list, &deve->ua_list); 167 list_add_tail(&ua->ua_nacl_list, &deve->ua_list);
@@ -174,8 +173,7 @@ int core_scsi3_ua_allocate(
174 nacl->se_tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 173 nacl->se_tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
175 asc, ascq); 174 asc, ascq);
176 175
177 atomic_inc(&deve->ua_count); 176 atomic_inc_mb(&deve->ua_count);
178 smp_mb__after_atomic();
179 return 0; 177 return 0;
180} 178}
181 179
@@ -189,8 +187,7 @@ void core_scsi3_ua_release_all(
189 list_del(&ua->ua_nacl_list); 187 list_del(&ua->ua_nacl_list);
190 kmem_cache_free(se_ua_cache, ua); 188 kmem_cache_free(se_ua_cache, ua);
191 189
192 atomic_dec(&deve->ua_count); 190 atomic_dec_mb(&deve->ua_count);
193 smp_mb__after_atomic();
194 } 191 }
195 spin_unlock(&deve->ua_lock); 192 spin_unlock(&deve->ua_lock);
196} 193}
@@ -250,8 +247,7 @@ void core_scsi3_ua_for_check_condition(
250 list_del(&ua->ua_nacl_list); 247 list_del(&ua->ua_nacl_list);
251 kmem_cache_free(se_ua_cache, ua); 248 kmem_cache_free(se_ua_cache, ua);
252 249
253 atomic_dec(&deve->ua_count); 250 atomic_dec_mb(&deve->ua_count);
254 smp_mb__after_atomic();
255 } 251 }
256 spin_unlock(&deve->ua_lock); 252 spin_unlock(&deve->ua_lock);
257 spin_unlock_irq(&nacl->device_list_lock); 253 spin_unlock_irq(&nacl->device_list_lock);
@@ -309,8 +305,7 @@ int core_scsi3_ua_clear_for_request_sense(
309 list_del(&ua->ua_nacl_list); 305 list_del(&ua->ua_nacl_list);
310 kmem_cache_free(se_ua_cache, ua); 306 kmem_cache_free(se_ua_cache, ua);
311 307
312 atomic_dec(&deve->ua_count); 308 atomic_dec_mb(&deve->ua_count);
313 smp_mb__after_atomic();
314 } 309 }
315 spin_unlock(&deve->ua_lock); 310 spin_unlock(&deve->ua_lock);
316 spin_unlock_irq(&nacl->device_list_lock); 311 spin_unlock_irq(&nacl->device_list_lock);
diff --git a/drivers/target/target_core_ua.h b/drivers/target/target_core_ua.h
index be912b36daae..a6b56b364e7a 100644
--- a/drivers/target/target_core_ua.h
+++ b/drivers/target/target_core_ua.h
@@ -1,4 +1,5 @@
1#ifndef TARGET_CORE_UA_H 1#ifndef TARGET_CORE_UA_H
2#define TARGET_CORE_UA_H
2 3
3/* 4/*
4 * From spc4r17, Table D.1: ASC and ASCQ Assignement 5 * From spc4r17, Table D.1: ASC and ASCQ Assignement
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
new file mode 100644
index 000000000000..9a1b314f6482
--- /dev/null
+++ b/drivers/target/target_core_user.c
@@ -0,0 +1,1167 @@
1/*
2 * Copyright (C) 2013 Shaohua Li <shli@kernel.org>
3 * Copyright (C) 2014 Red Hat, Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19#include <linux/spinlock.h>
20#include <linux/module.h>
21#include <linux/idr.h>
22#include <linux/timer.h>
23#include <linux/parser.h>
24#include <scsi/scsi.h>
25#include <scsi/scsi_host.h>
26#include <linux/uio_driver.h>
27#include <net/genetlink.h>
28#include <target/target_core_base.h>
29#include <target/target_core_fabric.h>
30#include <target/target_core_backend.h>
31#include <linux/target_core_user.h>
32
33/*
34 * Define a shared-memory interface for LIO to pass SCSI commands and
35 * data to userspace for processing. This is to allow backends that
36 * are too complex for in-kernel support to be possible.
37 *
38 * It uses the UIO framework to do a lot of the device-creation and
39 * introspection work for us.
40 *
41 * See the .h file for how the ring is laid out. Note that while the
42 * command ring is defined, the particulars of the data area are
43 * not. Offset values in the command entry point to other locations
44 * internal to the mmap()ed area. There is separate space outside the
45 * command ring for data buffers. This leaves maximum flexibility for
46 * moving buffer allocations, or even page flipping or other
47 * allocation techniques, without altering the command ring layout.
48 *
49 * SECURITY:
50 * The user process must be assumed to be malicious. There's no way to
51 * prevent it breaking the command ring protocol if it wants, but in
52 * order to prevent other issues we must only ever read *data* from
53 * the shared memory area, not offsets or sizes. This applies to
54 * command ring entries as well as the mailbox. Extra code needed for
55 * this may have a 'UAM' comment.
56 */
57
58
59#define TCMU_TIME_OUT (30 * MSEC_PER_SEC)
60
61#define CMDR_SIZE (16 * 4096)
62#define DATA_SIZE (257 * 4096)
63
64#define TCMU_RING_SIZE (CMDR_SIZE + DATA_SIZE)
65
66static struct device *tcmu_root_device;
67
68struct tcmu_hba {
69 u32 host_id;
70};
71
72/* User wants all cmds or just some */
73enum passthru_level {
74 TCMU_PASS_ALL = 0,
75 TCMU_PASS_IO,
76 TCMU_PASS_INVALID,
77};
78
79#define TCMU_CONFIG_LEN 256
80
81struct tcmu_dev {
82 struct se_device se_dev;
83
84 char *name;
85 struct se_hba *hba;
86
87#define TCMU_DEV_BIT_OPEN 0
88#define TCMU_DEV_BIT_BROKEN 1
89 unsigned long flags;
90 enum passthru_level pass_level;
91
92 struct uio_info uio_info;
93
94 struct tcmu_mailbox *mb_addr;
95 size_t dev_size;
96 u32 cmdr_size;
97 u32 cmdr_last_cleaned;
98 /* Offset of data ring from start of mb */
99 size_t data_off;
100 size_t data_size;
101 /* Ring head + tail values. */
102 /* Must add data_off and mb_addr to get the address */
103 size_t data_head;
104 size_t data_tail;
105
106 wait_queue_head_t wait_cmdr;
107 /* TODO should this be a mutex? */
108 spinlock_t cmdr_lock;
109
110 struct idr commands;
111 spinlock_t commands_lock;
112
113 struct timer_list timeout;
114
115 char dev_config[TCMU_CONFIG_LEN];
116};
117
118#define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev)
119
120#define CMDR_OFF sizeof(struct tcmu_mailbox)
121
122struct tcmu_cmd {
123 struct se_cmd *se_cmd;
124 struct tcmu_dev *tcmu_dev;
125
126 uint16_t cmd_id;
127
128 /* Can't use se_cmd->data_length when cleaning up expired cmds, because if
129 cmd has been completed then accessing se_cmd is off limits */
130 size_t data_length;
131
132 unsigned long deadline;
133
134#define TCMU_CMD_BIT_EXPIRED 0
135 unsigned long flags;
136};
137
138static struct kmem_cache *tcmu_cmd_cache;
139
140/* multicast group */
141enum tcmu_multicast_groups {
142 TCMU_MCGRP_CONFIG,
143};
144
145static const struct genl_multicast_group tcmu_mcgrps[] = {
146 [TCMU_MCGRP_CONFIG] = { .name = "config", },
147};
148
149/* Our generic netlink family */
150static struct genl_family tcmu_genl_family = {
151 .id = GENL_ID_GENERATE,
152 .hdrsize = 0,
153 .name = "TCM-USER",
154 .version = 1,
155 .maxattr = TCMU_ATTR_MAX,
156 .mcgrps = tcmu_mcgrps,
157 .n_mcgrps = ARRAY_SIZE(tcmu_mcgrps),
158};
159
160static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
161{
162 struct se_device *se_dev = se_cmd->se_dev;
163 struct tcmu_dev *udev = TCMU_DEV(se_dev);
164 struct tcmu_cmd *tcmu_cmd;
165 int cmd_id;
166
167 tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_KERNEL);
168 if (!tcmu_cmd)
169 return NULL;
170
171 tcmu_cmd->se_cmd = se_cmd;
172 tcmu_cmd->tcmu_dev = udev;
173 tcmu_cmd->data_length = se_cmd->data_length;
174
175 tcmu_cmd->deadline = jiffies + msecs_to_jiffies(TCMU_TIME_OUT);
176
177 idr_preload(GFP_KERNEL);
178 spin_lock_irq(&udev->commands_lock);
179 cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 0,
180 USHRT_MAX, GFP_NOWAIT);
181 spin_unlock_irq(&udev->commands_lock);
182 idr_preload_end();
183
184 if (cmd_id < 0) {
185 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
186 return NULL;
187 }
188 tcmu_cmd->cmd_id = cmd_id;
189
190 return tcmu_cmd;
191}
192
193static inline void tcmu_flush_dcache_range(void *vaddr, size_t size)
194{
195 unsigned long offset = (unsigned long) vaddr & ~PAGE_MASK;
196
197 size = round_up(size+offset, PAGE_SIZE);
198 vaddr -= offset;
199
200 while (size) {
201 flush_dcache_page(virt_to_page(vaddr));
202 size -= PAGE_SIZE;
203 }
204}
205
206/*
207 * Some ring helper functions. We don't assume size is a power of 2 so
208 * we can't use circ_buf.h.
209 */
210static inline size_t spc_used(size_t head, size_t tail, size_t size)
211{
212 int diff = head - tail;
213
214 if (diff >= 0)
215 return diff;
216 else
217 return size + diff;
218}
219
220static inline size_t spc_free(size_t head, size_t tail, size_t size)
221{
222 /* Keep 1 byte unused or we can't tell full from empty */
223 return (size - spc_used(head, tail, size) - 1);
224}
225
226static inline size_t head_to_end(size_t head, size_t size)
227{
228 return size - head;
229}
230
231#define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size)
232
233/*
234 * We can't queue a command until we have space available on the cmd ring *and* space
235 * space avail on the data ring.
236 *
237 * Called with ring lock held.
238 */
239static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t data_needed)
240{
241 struct tcmu_mailbox *mb = udev->mb_addr;
242 size_t space;
243 u32 cmd_head;
244 size_t cmd_needed;
245
246 tcmu_flush_dcache_range(mb, sizeof(*mb));
247
248 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
249
250 /*
251 * If cmd end-of-ring space is too small then we need space for a NOP plus
252 * original cmd - cmds are internally contiguous.
253 */
254 if (head_to_end(cmd_head, udev->cmdr_size) >= cmd_size)
255 cmd_needed = cmd_size;
256 else
257 cmd_needed = cmd_size + head_to_end(cmd_head, udev->cmdr_size);
258
259 space = spc_free(cmd_head, udev->cmdr_last_cleaned, udev->cmdr_size);
260 if (space < cmd_needed) {
261 pr_debug("no cmd space: %u %u %u\n", cmd_head,
262 udev->cmdr_last_cleaned, udev->cmdr_size);
263 return false;
264 }
265
266 space = spc_free(udev->data_head, udev->data_tail, udev->data_size);
267 if (space < data_needed) {
268 pr_debug("no data space: %zu %zu %zu\n", udev->data_head,
269 udev->data_tail, udev->data_size);
270 return false;
271 }
272
273 return true;
274}
275
276static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
277{
278 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
279 struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
280 size_t base_command_size, command_size;
281 struct tcmu_mailbox *mb;
282 struct tcmu_cmd_entry *entry;
283 int i;
284 struct scatterlist *sg;
285 struct iovec *iov;
286 int iov_cnt = 0;
287 uint32_t cmd_head;
288 uint64_t cdb_off;
289
290 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
291 return -EINVAL;
292
293 /*
294 * Must be a certain minimum size for response sense info, but
295 * also may be larger if the iov array is large.
296 *
297 * iovs = sgl_nents+1, for end-of-ring case, plus another 1
298 * b/c size == offsetof one-past-element.
299 */
300 base_command_size = max(offsetof(struct tcmu_cmd_entry,
301 req.iov[se_cmd->t_data_nents + 2]),
302 sizeof(struct tcmu_cmd_entry));
303 command_size = base_command_size
304 + round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE);
305
306 WARN_ON(command_size & (TCMU_OP_ALIGN_SIZE-1));
307
308 spin_lock_irq(&udev->cmdr_lock);
309
310 mb = udev->mb_addr;
311 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
312 if ((command_size > (udev->cmdr_size / 2))
313 || tcmu_cmd->data_length > (udev->data_size - 1))
314 pr_warn("TCMU: Request of size %zu/%zu may be too big for %u/%zu "
315 "cmd/data ring buffers\n", command_size, tcmu_cmd->data_length,
316 udev->cmdr_size, udev->data_size);
317
318 while (!is_ring_space_avail(udev, command_size, tcmu_cmd->data_length)) {
319 int ret;
320 DEFINE_WAIT(__wait);
321
322 prepare_to_wait(&udev->wait_cmdr, &__wait, TASK_INTERRUPTIBLE);
323
324 pr_debug("sleeping for ring space\n");
325 spin_unlock_irq(&udev->cmdr_lock);
326 ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT));
327 finish_wait(&udev->wait_cmdr, &__wait);
328 if (!ret) {
329 pr_warn("tcmu: command timed out\n");
330 return -ETIMEDOUT;
331 }
332
333 spin_lock_irq(&udev->cmdr_lock);
334
335 /* We dropped cmdr_lock, cmd_head is stale */
336 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
337 }
338
339 /* Insert a PAD if end-of-ring space is too small */
340 if (head_to_end(cmd_head, udev->cmdr_size) < command_size) {
341 size_t pad_size = head_to_end(cmd_head, udev->cmdr_size);
342
343 entry = (void *) mb + CMDR_OFF + cmd_head;
344 tcmu_flush_dcache_range(entry, sizeof(*entry));
345 tcmu_hdr_set_op(&entry->hdr, TCMU_OP_PAD);
346 tcmu_hdr_set_len(&entry->hdr, pad_size);
347
348 UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size);
349
350 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
351 WARN_ON(cmd_head != 0);
352 }
353
354 entry = (void *) mb + CMDR_OFF + cmd_head;
355 tcmu_flush_dcache_range(entry, sizeof(*entry));
356 tcmu_hdr_set_op(&entry->hdr, TCMU_OP_CMD);
357 tcmu_hdr_set_len(&entry->hdr, command_size);
358 entry->cmd_id = tcmu_cmd->cmd_id;
359
360 /*
361 * Fix up iovecs, and handle if allocation in data ring wrapped.
362 */
363 iov = &entry->req.iov[0];
364 for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, i) {
365 size_t copy_bytes = min((size_t)sg->length,
366 head_to_end(udev->data_head, udev->data_size));
367 void *from = kmap_atomic(sg_page(sg)) + sg->offset;
368 void *to = (void *) mb + udev->data_off + udev->data_head;
369
370 if (tcmu_cmd->se_cmd->data_direction == DMA_TO_DEVICE) {
371 memcpy(to, from, copy_bytes);
372 tcmu_flush_dcache_range(to, copy_bytes);
373 }
374
375 /* Even iov_base is relative to mb_addr */
376 iov->iov_len = copy_bytes;
377 iov->iov_base = (void *) udev->data_off + udev->data_head;
378 iov_cnt++;
379 iov++;
380
381 UPDATE_HEAD(udev->data_head, copy_bytes, udev->data_size);
382
383 /* Uh oh, we wrapped the buffer. Must split sg across 2 iovs. */
384 if (sg->length != copy_bytes) {
385 from += copy_bytes;
386 copy_bytes = sg->length - copy_bytes;
387
388 iov->iov_len = copy_bytes;
389 iov->iov_base = (void *) udev->data_off + udev->data_head;
390
391 if (se_cmd->data_direction == DMA_TO_DEVICE) {
392 to = (void *) mb + udev->data_off + udev->data_head;
393 memcpy(to, from, copy_bytes);
394 tcmu_flush_dcache_range(to, copy_bytes);
395 }
396
397 iov_cnt++;
398 iov++;
399
400 UPDATE_HEAD(udev->data_head, copy_bytes, udev->data_size);
401 }
402
403 kunmap_atomic(from);
404 }
405 entry->req.iov_cnt = iov_cnt;
406
407 /* All offsets relative to mb_addr, not start of entry! */
408 cdb_off = CMDR_OFF + cmd_head + base_command_size;
409 memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb));
410 entry->req.cdb_off = cdb_off;
411 tcmu_flush_dcache_range(entry, sizeof(*entry));
412
413 UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size);
414 tcmu_flush_dcache_range(mb, sizeof(*mb));
415
416 spin_unlock_irq(&udev->cmdr_lock);
417
418 /* TODO: only if FLUSH and FUA? */
419 uio_event_notify(&udev->uio_info);
420
421 mod_timer(&udev->timeout,
422 round_jiffies_up(jiffies + msecs_to_jiffies(TCMU_TIME_OUT)));
423
424 return 0;
425}
426
427static int tcmu_queue_cmd(struct se_cmd *se_cmd)
428{
429 struct se_device *se_dev = se_cmd->se_dev;
430 struct tcmu_dev *udev = TCMU_DEV(se_dev);
431 struct tcmu_cmd *tcmu_cmd;
432 int ret;
433
434 tcmu_cmd = tcmu_alloc_cmd(se_cmd);
435 if (!tcmu_cmd)
436 return -ENOMEM;
437
438 ret = tcmu_queue_cmd_ring(tcmu_cmd);
439 if (ret < 0) {
440 pr_err("TCMU: Could not queue command\n");
441 spin_lock_irq(&udev->commands_lock);
442 idr_remove(&udev->commands, tcmu_cmd->cmd_id);
443 spin_unlock_irq(&udev->commands_lock);
444
445 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
446 }
447
448 return ret;
449}
450
451static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *entry)
452{
453 struct se_cmd *se_cmd = cmd->se_cmd;
454 struct tcmu_dev *udev = cmd->tcmu_dev;
455
456 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
457 /* cmd has been completed already from timeout, just reclaim data
458 ring space */
459 UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
460 return;
461 }
462
463 if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
464 memcpy(se_cmd->sense_buffer, entry->rsp.sense_buffer,
465 se_cmd->scsi_sense_length);
466
467 UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
468 }
469 else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
470 struct scatterlist *sg;
471 int i;
472
473 /* It'd be easier to look at entry's iovec again, but UAM */
474 for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, i) {
475 size_t copy_bytes;
476 void *to;
477 void *from;
478
479 copy_bytes = min((size_t)sg->length,
480 head_to_end(udev->data_tail, udev->data_size));
481
482 to = kmap_atomic(sg_page(sg)) + sg->offset;
483 WARN_ON(sg->length + sg->offset > PAGE_SIZE);
484 from = (void *) udev->mb_addr + udev->data_off + udev->data_tail;
485 tcmu_flush_dcache_range(from, copy_bytes);
486 memcpy(to, from, copy_bytes);
487
488 UPDATE_HEAD(udev->data_tail, copy_bytes, udev->data_size);
489
490 /* Uh oh, wrapped the data buffer for this sg's data */
491 if (sg->length != copy_bytes) {
492 from = (void *) udev->mb_addr + udev->data_off + udev->data_tail;
493 WARN_ON(udev->data_tail);
494 to += copy_bytes;
495 copy_bytes = sg->length - copy_bytes;
496 tcmu_flush_dcache_range(from, copy_bytes);
497 memcpy(to, from, copy_bytes);
498
499 UPDATE_HEAD(udev->data_tail, copy_bytes, udev->data_size);
500 }
501
502 kunmap_atomic(to);
503 }
504
505 } else if (se_cmd->data_direction == DMA_TO_DEVICE) {
506 UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
507 } else {
508 pr_warn("TCMU: data direction was %d!\n", se_cmd->data_direction);
509 }
510
511 target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
512 cmd->se_cmd = NULL;
513
514 kmem_cache_free(tcmu_cmd_cache, cmd);
515}
516
517static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
518{
519 struct tcmu_mailbox *mb;
520 LIST_HEAD(cpl_cmds);
521 unsigned long flags;
522 int handled = 0;
523
524 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
525 pr_err("ring broken, not handling completions\n");
526 return 0;
527 }
528
529 spin_lock_irqsave(&udev->cmdr_lock, flags);
530
531 mb = udev->mb_addr;
532 tcmu_flush_dcache_range(mb, sizeof(*mb));
533
534 while (udev->cmdr_last_cleaned != ACCESS_ONCE(mb->cmd_tail)) {
535
536 struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned;
537 struct tcmu_cmd *cmd;
538
539 tcmu_flush_dcache_range(entry, sizeof(*entry));
540
541 if (tcmu_hdr_get_op(&entry->hdr) == TCMU_OP_PAD) {
542 UPDATE_HEAD(udev->cmdr_last_cleaned, tcmu_hdr_get_len(&entry->hdr), udev->cmdr_size);
543 continue;
544 }
545 WARN_ON(tcmu_hdr_get_op(&entry->hdr) != TCMU_OP_CMD);
546
547 spin_lock(&udev->commands_lock);
548 cmd = idr_find(&udev->commands, entry->cmd_id);
549 if (cmd)
550 idr_remove(&udev->commands, cmd->cmd_id);
551 spin_unlock(&udev->commands_lock);
552
553 if (!cmd) {
554 pr_err("cmd_id not found, ring is broken\n");
555 set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
556 break;
557 }
558
559 tcmu_handle_completion(cmd, entry);
560
561 UPDATE_HEAD(udev->cmdr_last_cleaned, tcmu_hdr_get_len(&entry->hdr), udev->cmdr_size);
562
563 handled++;
564 }
565
566 if (mb->cmd_tail == mb->cmd_head)
567 del_timer(&udev->timeout); /* no more pending cmds */
568
569 spin_unlock_irqrestore(&udev->cmdr_lock, flags);
570
571 wake_up(&udev->wait_cmdr);
572
573 return handled;
574}
575
576static int tcmu_check_expired_cmd(int id, void *p, void *data)
577{
578 struct tcmu_cmd *cmd = p;
579
580 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
581 return 0;
582
583 if (!time_after(cmd->deadline, jiffies))
584 return 0;
585
586 set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
587 target_complete_cmd(cmd->se_cmd, SAM_STAT_CHECK_CONDITION);
588 cmd->se_cmd = NULL;
589
590 kmem_cache_free(tcmu_cmd_cache, cmd);
591
592 return 0;
593}
594
595static void tcmu_device_timedout(unsigned long data)
596{
597 struct tcmu_dev *udev = (struct tcmu_dev *)data;
598 unsigned long flags;
599 int handled;
600
601 handled = tcmu_handle_completions(udev);
602
603 pr_warn("%d completions handled from timeout\n", handled);
604
605 spin_lock_irqsave(&udev->commands_lock, flags);
606 idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL);
607 spin_unlock_irqrestore(&udev->commands_lock, flags);
608
609 /*
610 * We don't need to wakeup threads on wait_cmdr since they have their
611 * own timeout.
612 */
613}
614
615static int tcmu_attach_hba(struct se_hba *hba, u32 host_id)
616{
617 struct tcmu_hba *tcmu_hba;
618
619 tcmu_hba = kzalloc(sizeof(struct tcmu_hba), GFP_KERNEL);
620 if (!tcmu_hba)
621 return -ENOMEM;
622
623 tcmu_hba->host_id = host_id;
624 hba->hba_ptr = tcmu_hba;
625
626 return 0;
627}
628
629static void tcmu_detach_hba(struct se_hba *hba)
630{
631 kfree(hba->hba_ptr);
632 hba->hba_ptr = NULL;
633}
634
635static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
636{
637 struct tcmu_dev *udev;
638
639 udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL);
640 if (!udev)
641 return NULL;
642
643 udev->name = kstrdup(name, GFP_KERNEL);
644 if (!udev->name) {
645 kfree(udev);
646 return NULL;
647 }
648
649 udev->hba = hba;
650
651 init_waitqueue_head(&udev->wait_cmdr);
652 spin_lock_init(&udev->cmdr_lock);
653
654 idr_init(&udev->commands);
655 spin_lock_init(&udev->commands_lock);
656
657 setup_timer(&udev->timeout, tcmu_device_timedout,
658 (unsigned long)udev);
659
660 udev->pass_level = TCMU_PASS_ALL;
661
662 return &udev->se_dev;
663}
664
665static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on)
666{
667 struct tcmu_dev *tcmu_dev = container_of(info, struct tcmu_dev, uio_info);
668
669 tcmu_handle_completions(tcmu_dev);
670
671 return 0;
672}
673
674/*
675 * mmap code from uio.c. Copied here because we want to hook mmap()
676 * and this stuff must come along.
677 */
678static int tcmu_find_mem_index(struct vm_area_struct *vma)
679{
680 struct tcmu_dev *udev = vma->vm_private_data;
681 struct uio_info *info = &udev->uio_info;
682
683 if (vma->vm_pgoff < MAX_UIO_MAPS) {
684 if (info->mem[vma->vm_pgoff].size == 0)
685 return -1;
686 return (int)vma->vm_pgoff;
687 }
688 return -1;
689}
690
691static int tcmu_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
692{
693 struct tcmu_dev *udev = vma->vm_private_data;
694 struct uio_info *info = &udev->uio_info;
695 struct page *page;
696 unsigned long offset;
697 void *addr;
698
699 int mi = tcmu_find_mem_index(vma);
700 if (mi < 0)
701 return VM_FAULT_SIGBUS;
702
703 /*
704 * We need to subtract mi because userspace uses offset = N*PAGE_SIZE
705 * to use mem[N].
706 */
707 offset = (vmf->pgoff - mi) << PAGE_SHIFT;
708
709 addr = (void *)(unsigned long)info->mem[mi].addr + offset;
710 if (info->mem[mi].memtype == UIO_MEM_LOGICAL)
711 page = virt_to_page(addr);
712 else
713 page = vmalloc_to_page(addr);
714 get_page(page);
715 vmf->page = page;
716 return 0;
717}
718
719static const struct vm_operations_struct tcmu_vm_ops = {
720 .fault = tcmu_vma_fault,
721};
722
723static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma)
724{
725 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
726
727 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
728 vma->vm_ops = &tcmu_vm_ops;
729
730 vma->vm_private_data = udev;
731
732 /* Ensure the mmap is exactly the right size */
733 if (vma_pages(vma) != (TCMU_RING_SIZE >> PAGE_SHIFT))
734 return -EINVAL;
735
736 return 0;
737}
738
739static int tcmu_open(struct uio_info *info, struct inode *inode)
740{
741 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
742
743 /* O_EXCL not supported for char devs, so fake it? */
744 if (test_and_set_bit(TCMU_DEV_BIT_OPEN, &udev->flags))
745 return -EBUSY;
746
747 pr_debug("open\n");
748
749 return 0;
750}
751
752static int tcmu_release(struct uio_info *info, struct inode *inode)
753{
754 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
755
756 clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags);
757
758 pr_debug("close\n");
759
760 return 0;
761}
762
763static int tcmu_netlink_event(enum tcmu_genl_cmd cmd, const char *name, int minor)
764{
765 struct sk_buff *skb;
766 void *msg_header;
767 int ret = -ENOMEM;
768
769 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
770 if (!skb)
771 return ret;
772
773 msg_header = genlmsg_put(skb, 0, 0, &tcmu_genl_family, 0, cmd);
774 if (!msg_header)
775 goto free_skb;
776
777 ret = nla_put_string(skb, TCMU_ATTR_DEVICE, name);
778 if (ret < 0)
779 goto free_skb;
780
781 ret = nla_put_u32(skb, TCMU_ATTR_MINOR, minor);
782 if (ret < 0)
783 goto free_skb;
784
785 ret = genlmsg_end(skb, msg_header);
786 if (ret < 0)
787 goto free_skb;
788
789 ret = genlmsg_multicast(&tcmu_genl_family, skb, 0,
790 TCMU_MCGRP_CONFIG, GFP_KERNEL);
791
792 /* We don't care if no one is listening */
793 if (ret == -ESRCH)
794 ret = 0;
795
796 return ret;
797free_skb:
798 nlmsg_free(skb);
799 return ret;
800}
801
802static int tcmu_configure_device(struct se_device *dev)
803{
804 struct tcmu_dev *udev = TCMU_DEV(dev);
805 struct tcmu_hba *hba = udev->hba->hba_ptr;
806 struct uio_info *info;
807 struct tcmu_mailbox *mb;
808 size_t size;
809 size_t used;
810 int ret = 0;
811 char *str;
812
813 info = &udev->uio_info;
814
815 size = snprintf(NULL, 0, "tcm-user/%u/%s/%s", hba->host_id, udev->name,
816 udev->dev_config);
817 size += 1; /* for \0 */
818 str = kmalloc(size, GFP_KERNEL);
819 if (!str)
820 return -ENOMEM;
821
822 used = snprintf(str, size, "tcm-user/%u/%s", hba->host_id, udev->name);
823
824 if (udev->dev_config[0])
825 snprintf(str + used, size - used, "/%s", udev->dev_config);
826
827 info->name = str;
828
829 udev->mb_addr = vzalloc(TCMU_RING_SIZE);
830 if (!udev->mb_addr) {
831 ret = -ENOMEM;
832 goto err_vzalloc;
833 }
834
835 /* mailbox fits in first part of CMDR space */
836 udev->cmdr_size = CMDR_SIZE - CMDR_OFF;
837 udev->data_off = CMDR_SIZE;
838 udev->data_size = TCMU_RING_SIZE - CMDR_SIZE;
839
840 mb = udev->mb_addr;
841 mb->version = 1;
842 mb->cmdr_off = CMDR_OFF;
843 mb->cmdr_size = udev->cmdr_size;
844
845 WARN_ON(!PAGE_ALIGNED(udev->data_off));
846 WARN_ON(udev->data_size % PAGE_SIZE);
847
848 info->version = "1";
849
850 info->mem[0].name = "tcm-user command & data buffer";
851 info->mem[0].addr = (phys_addr_t) udev->mb_addr;
852 info->mem[0].size = TCMU_RING_SIZE;
853 info->mem[0].memtype = UIO_MEM_VIRTUAL;
854
855 info->irqcontrol = tcmu_irqcontrol;
856 info->irq = UIO_IRQ_CUSTOM;
857
858 info->mmap = tcmu_mmap;
859 info->open = tcmu_open;
860 info->release = tcmu_release;
861
862 ret = uio_register_device(tcmu_root_device, info);
863 if (ret)
864 goto err_register;
865
866 /* Other attributes can be configured in userspace */
867 dev->dev_attrib.hw_block_size = 512;
868 dev->dev_attrib.hw_max_sectors = 128;
869 dev->dev_attrib.hw_queue_depth = 128;
870
871 ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name,
872 udev->uio_info.uio_dev->minor);
873 if (ret)
874 goto err_netlink;
875
876 return 0;
877
878err_netlink:
879 uio_unregister_device(&udev->uio_info);
880err_register:
881 vfree(udev->mb_addr);
882err_vzalloc:
883 kfree(info->name);
884
885 return ret;
886}
887
888static int tcmu_check_pending_cmd(int id, void *p, void *data)
889{
890 struct tcmu_cmd *cmd = p;
891
892 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
893 return 0;
894 return -EINVAL;
895}
896
897static void tcmu_free_device(struct se_device *dev)
898{
899 struct tcmu_dev *udev = TCMU_DEV(dev);
900 int i;
901
902 del_timer_sync(&udev->timeout);
903
904 vfree(udev->mb_addr);
905
906 /* Upper layer should drain all requests before calling this */
907 spin_lock_irq(&udev->commands_lock);
908 i = idr_for_each(&udev->commands, tcmu_check_pending_cmd, NULL);
909 idr_destroy(&udev->commands);
910 spin_unlock_irq(&udev->commands_lock);
911 WARN_ON(i);
912
913 /* Device was configured */
914 if (udev->uio_info.uio_dev) {
915 tcmu_netlink_event(TCMU_CMD_REMOVED_DEVICE, udev->uio_info.name,
916 udev->uio_info.uio_dev->minor);
917
918 uio_unregister_device(&udev->uio_info);
919 kfree(udev->uio_info.name);
920 kfree(udev->name);
921 }
922
923 kfree(udev);
924}
925
926enum {
927 Opt_dev_config, Opt_dev_size, Opt_err, Opt_pass_level,
928};
929
930static match_table_t tokens = {
931 {Opt_dev_config, "dev_config=%s"},
932 {Opt_dev_size, "dev_size=%u"},
933 {Opt_pass_level, "pass_level=%u"},
934 {Opt_err, NULL}
935};
936
937static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
938 const char *page, ssize_t count)
939{
940 struct tcmu_dev *udev = TCMU_DEV(dev);
941 char *orig, *ptr, *opts, *arg_p;
942 substring_t args[MAX_OPT_ARGS];
943 int ret = 0, token;
944 int arg;
945
946 opts = kstrdup(page, GFP_KERNEL);
947 if (!opts)
948 return -ENOMEM;
949
950 orig = opts;
951
952 while ((ptr = strsep(&opts, ",\n")) != NULL) {
953 if (!*ptr)
954 continue;
955
956 token = match_token(ptr, tokens, args);
957 switch (token) {
958 case Opt_dev_config:
959 if (match_strlcpy(udev->dev_config, &args[0],
960 TCMU_CONFIG_LEN) == 0) {
961 ret = -EINVAL;
962 break;
963 }
964 pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config);
965 break;
966 case Opt_dev_size:
967 arg_p = match_strdup(&args[0]);
968 if (!arg_p) {
969 ret = -ENOMEM;
970 break;
971 }
972 ret = kstrtoul(arg_p, 0, (unsigned long *) &udev->dev_size);
973 kfree(arg_p);
974 if (ret < 0)
975 pr_err("kstrtoul() failed for dev_size=\n");
976 break;
977 case Opt_pass_level:
978 match_int(args, &arg);
979 if (arg >= TCMU_PASS_INVALID) {
980 pr_warn("TCMU: Invalid pass_level: %d\n", arg);
981 break;
982 }
983
984 pr_debug("TCMU: Setting pass_level to %d\n", arg);
985 udev->pass_level = arg;
986 break;
987 default:
988 break;
989 }
990 }
991
992 kfree(orig);
993 return (!ret) ? count : ret;
994}
995
996static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b)
997{
998 struct tcmu_dev *udev = TCMU_DEV(dev);
999 ssize_t bl = 0;
1000
1001 bl = sprintf(b + bl, "Config: %s ",
1002 udev->dev_config[0] ? udev->dev_config : "NULL");
1003 bl += sprintf(b + bl, "Size: %zu PassLevel: %u\n",
1004 udev->dev_size, udev->pass_level);
1005
1006 return bl;
1007}
1008
1009static sector_t tcmu_get_blocks(struct se_device *dev)
1010{
1011 struct tcmu_dev *udev = TCMU_DEV(dev);
1012
1013 return div_u64(udev->dev_size - dev->dev_attrib.block_size,
1014 dev->dev_attrib.block_size);
1015}
1016
1017static sense_reason_t
1018tcmu_execute_rw(struct se_cmd *se_cmd, struct scatterlist *sgl, u32 sgl_nents,
1019 enum dma_data_direction data_direction)
1020{
1021 int ret;
1022
1023 ret = tcmu_queue_cmd(se_cmd);
1024
1025 if (ret != 0)
1026 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1027 else
1028 return TCM_NO_SENSE;
1029}
1030
1031static sense_reason_t
1032tcmu_pass_op(struct se_cmd *se_cmd)
1033{
1034 int ret = tcmu_queue_cmd(se_cmd);
1035
1036 if (ret != 0)
1037 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1038 else
1039 return TCM_NO_SENSE;
1040}
1041
1042static struct sbc_ops tcmu_sbc_ops = {
1043 .execute_rw = tcmu_execute_rw,
1044 .execute_sync_cache = tcmu_pass_op,
1045 .execute_write_same = tcmu_pass_op,
1046 .execute_write_same_unmap = tcmu_pass_op,
1047 .execute_unmap = tcmu_pass_op,
1048};
1049
1050static sense_reason_t
1051tcmu_parse_cdb(struct se_cmd *cmd)
1052{
1053 unsigned char *cdb = cmd->t_task_cdb;
1054 struct tcmu_dev *udev = TCMU_DEV(cmd->se_dev);
1055 sense_reason_t ret;
1056
1057 switch (udev->pass_level) {
1058 case TCMU_PASS_ALL:
1059 /* We're just like pscsi, then */
1060 /*
1061 * For REPORT LUNS we always need to emulate the response, for everything
1062 * else, pass it up.
1063 */
1064 switch (cdb[0]) {
1065 case REPORT_LUNS:
1066 cmd->execute_cmd = spc_emulate_report_luns;
1067 break;
1068 case READ_6:
1069 case READ_10:
1070 case READ_12:
1071 case READ_16:
1072 case WRITE_6:
1073 case WRITE_10:
1074 case WRITE_12:
1075 case WRITE_16:
1076 case WRITE_VERIFY:
1077 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1078 /* FALLTHROUGH */
1079 default:
1080 cmd->execute_cmd = tcmu_pass_op;
1081 }
1082 ret = TCM_NO_SENSE;
1083 break;
1084 case TCMU_PASS_IO:
1085 ret = sbc_parse_cdb(cmd, &tcmu_sbc_ops);
1086 break;
1087 default:
1088 pr_err("Unknown tcm-user pass level %d\n", udev->pass_level);
1089 ret = TCM_CHECK_CONDITION_ABORT_CMD;
1090 }
1091
1092 return ret;
1093}
1094
1095static struct se_subsystem_api tcmu_template = {
1096 .name = "user",
1097 .inquiry_prod = "USER",
1098 .inquiry_rev = TCMU_VERSION,
1099 .owner = THIS_MODULE,
1100 .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
1101 .attach_hba = tcmu_attach_hba,
1102 .detach_hba = tcmu_detach_hba,
1103 .alloc_device = tcmu_alloc_device,
1104 .configure_device = tcmu_configure_device,
1105 .free_device = tcmu_free_device,
1106 .parse_cdb = tcmu_parse_cdb,
1107 .set_configfs_dev_params = tcmu_set_configfs_dev_params,
1108 .show_configfs_dev_params = tcmu_show_configfs_dev_params,
1109 .get_device_type = sbc_get_device_type,
1110 .get_blocks = tcmu_get_blocks,
1111};
1112
1113static int __init tcmu_module_init(void)
1114{
1115 int ret;
1116
1117 BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
1118
1119 tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache",
1120 sizeof(struct tcmu_cmd),
1121 __alignof__(struct tcmu_cmd),
1122 0, NULL);
1123 if (!tcmu_cmd_cache)
1124 return -ENOMEM;
1125
1126 tcmu_root_device = root_device_register("tcm_user");
1127 if (IS_ERR(tcmu_root_device)) {
1128 ret = PTR_ERR(tcmu_root_device);
1129 goto out_free_cache;
1130 }
1131
1132 ret = genl_register_family(&tcmu_genl_family);
1133 if (ret < 0) {
1134 goto out_unreg_device;
1135 }
1136
1137 ret = transport_subsystem_register(&tcmu_template);
1138 if (ret)
1139 goto out_unreg_genl;
1140
1141 return 0;
1142
1143out_unreg_genl:
1144 genl_unregister_family(&tcmu_genl_family);
1145out_unreg_device:
1146 root_device_unregister(tcmu_root_device);
1147out_free_cache:
1148 kmem_cache_destroy(tcmu_cmd_cache);
1149
1150 return ret;
1151}
1152
1153static void __exit tcmu_module_exit(void)
1154{
1155 transport_subsystem_release(&tcmu_template);
1156 genl_unregister_family(&tcmu_genl_family);
1157 root_device_unregister(tcmu_root_device);
1158 kmem_cache_destroy(tcmu_cmd_cache);
1159}
1160
1161MODULE_DESCRIPTION("TCM USER subsystem plugin");
1162MODULE_AUTHOR("Shaohua Li <shli@kernel.org>");
1163MODULE_AUTHOR("Andy Grover <agrover@redhat.com>");
1164MODULE_LICENSE("GPL");
1165
1166module_init(tcmu_module_init);
1167module_exit(tcmu_module_exit);
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index 21ce50880c79..ccee7e332a4d 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -98,7 +98,7 @@ static void ft_tport_delete(struct ft_tport *tport)
98 ft_sess_delete_all(tport); 98 ft_sess_delete_all(tport);
99 lport = tport->lport; 99 lport = tport->lport;
100 BUG_ON(tport != lport->prov[FC_TYPE_FCP]); 100 BUG_ON(tport != lport->prov[FC_TYPE_FCP]);
101 rcu_assign_pointer(lport->prov[FC_TYPE_FCP], NULL); 101 RCU_INIT_POINTER(lport->prov[FC_TYPE_FCP], NULL);
102 102
103 tpg = tport->tpg; 103 tpg = tport->tpg;
104 if (tpg) { 104 if (tpg) {
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index a673e5b6a2e0..60fa6278fbce 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -28,18 +28,6 @@
28 28
29#define UIO_MAX_DEVICES (1U << MINORBITS) 29#define UIO_MAX_DEVICES (1U << MINORBITS)
30 30
31struct uio_device {
32 struct module *owner;
33 struct device *dev;
34 int minor;
35 atomic_t event;
36 struct fasync_struct *async_queue;
37 wait_queue_head_t wait;
38 struct uio_info *info;
39 struct kobject *map_dir;
40 struct kobject *portio_dir;
41};
42
43static int uio_major; 31static int uio_major;
44static struct cdev *uio_cdev; 32static struct cdev *uio_cdev;
45static DEFINE_IDR(uio_idr); 33static DEFINE_IDR(uio_idr);
diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
index 1ad4724458de..baa81718d985 100644
--- a/include/linux/uio_driver.h
+++ b/include/linux/uio_driver.h
@@ -63,7 +63,17 @@ struct uio_port {
63 63
64#define MAX_UIO_PORT_REGIONS 5 64#define MAX_UIO_PORT_REGIONS 5
65 65
66struct uio_device; 66struct uio_device {
67 struct module *owner;
68 struct device *dev;
69 int minor;
70 atomic_t event;
71 struct fasync_struct *async_queue;
72 wait_queue_head_t wait;
73 struct uio_info *info;
74 struct kobject *map_dir;
75 struct kobject *portio_dir;
76};
67 77
68/** 78/**
69 * struct uio_info - UIO device capabilities 79 * struct uio_info - UIO device capabilities
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 9ec9864ecf38..23c518a0340c 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -108,6 +108,8 @@
108#define DA_EMULATE_ALUA 0 108#define DA_EMULATE_ALUA 0
109/* Enforce SCSI Initiator Port TransportID with 'ISID' for PR */ 109/* Enforce SCSI Initiator Port TransportID with 'ISID' for PR */
110#define DA_ENFORCE_PR_ISIDS 1 110#define DA_ENFORCE_PR_ISIDS 1
111/* Force SPC-3 PR Activate Persistence across Target Power Loss */
112#define DA_FORCE_PR_APTPL 0
111#define DA_STATUS_MAX_SECTORS_MIN 16 113#define DA_STATUS_MAX_SECTORS_MIN 16
112#define DA_STATUS_MAX_SECTORS_MAX 8192 114#define DA_STATUS_MAX_SECTORS_MAX 8192
113/* By default don't report non-rotating (solid state) medium */ 115/* By default don't report non-rotating (solid state) medium */
@@ -680,6 +682,7 @@ struct se_dev_attrib {
680 enum target_prot_type pi_prot_type; 682 enum target_prot_type pi_prot_type;
681 enum target_prot_type hw_pi_prot_type; 683 enum target_prot_type hw_pi_prot_type;
682 int enforce_pr_isids; 684 int enforce_pr_isids;
685 int force_pr_aptpl;
683 int is_nonrot; 686 int is_nonrot;
684 int emulate_rest_reord; 687 int emulate_rest_reord;
685 u32 hw_block_size; 688 u32 hw_block_size;
@@ -903,4 +906,18 @@ struct se_wwn {
903 struct config_group fabric_stat_group; 906 struct config_group fabric_stat_group;
904}; 907};
905 908
909static inline void atomic_inc_mb(atomic_t *v)
910{
911 smp_mb__before_atomic();
912 atomic_inc(v);
913 smp_mb__after_atomic();
914}
915
916static inline void atomic_dec_mb(atomic_t *v)
917{
918 smp_mb__before_atomic();
919 atomic_dec(v);
920 smp_mb__after_atomic();
921}
922
906#endif /* TARGET_CORE_BASE_H */ 923#endif /* TARGET_CORE_BASE_H */
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 6cad97485bad..b70237e8bc37 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -374,6 +374,7 @@ header-y += swab.h
374header-y += synclink.h 374header-y += synclink.h
375header-y += sysctl.h 375header-y += sysctl.h
376header-y += sysinfo.h 376header-y += sysinfo.h
377header-y += target_core_user.h
377header-y += taskstats.h 378header-y += taskstats.h
378header-y += tcp.h 379header-y += tcp.h
379header-y += tcp_metrics.h 380header-y += tcp_metrics.h
diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h
new file mode 100644
index 000000000000..7dcfbe6771b1
--- /dev/null
+++ b/include/uapi/linux/target_core_user.h
@@ -0,0 +1,142 @@
1#ifndef __TARGET_CORE_USER_H
2#define __TARGET_CORE_USER_H
3
4/* This header will be used by application too */
5
6#include <linux/types.h>
7#include <linux/uio.h>
8
9#ifndef __packed
10#define __packed __attribute__((packed))
11#endif
12
13#define TCMU_VERSION "1.0"
14
15/*
16 * Ring Design
17 * -----------
18 *
19 * The mmaped area is divided into three parts:
20 * 1) The mailbox (struct tcmu_mailbox, below)
21 * 2) The command ring
22 * 3) Everything beyond the command ring (data)
23 *
24 * The mailbox tells userspace the offset of the command ring from the
25 * start of the shared memory region, and how big the command ring is.
26 *
27 * The kernel passes SCSI commands to userspace by putting a struct
28 * tcmu_cmd_entry in the ring, updating mailbox->cmd_head, and poking
29 * userspace via uio's interrupt mechanism.
30 *
31 * tcmu_cmd_entry contains a header. If the header type is PAD,
32 * userspace should skip hdr->length bytes (mod cmdr_size) to find the
33 * next cmd_entry.
34 *
35 * Otherwise, the entry will contain offsets into the mmaped area that
36 * contain the cdb and data buffers -- the latter accessible via the
37 * iov array. iov addresses are also offsets into the shared area.
38 *
39 * When userspace is completed handling the command, set
40 * entry->rsp.scsi_status, fill in rsp.sense_buffer if appropriate,
41 * and also set mailbox->cmd_tail equal to the old cmd_tail plus
42 * hdr->length, mod cmdr_size. If cmd_tail doesn't equal cmd_head, it
43 * should process the next packet the same way, and so on.
44 */
45
46#define TCMU_MAILBOX_VERSION 1
47#define ALIGN_SIZE 64 /* Should be enough for most CPUs */
48
49struct tcmu_mailbox {
50 __u16 version;
51 __u16 flags;
52 __u32 cmdr_off;
53 __u32 cmdr_size;
54
55 __u32 cmd_head;
56
57 /* Updated by user. On its own cacheline */
58 __u32 cmd_tail __attribute__((__aligned__(ALIGN_SIZE)));
59
60} __packed;
61
62enum tcmu_opcode {
63 TCMU_OP_PAD = 0,
64 TCMU_OP_CMD,
65};
66
67/*
68 * Only a few opcodes, and length is 8-byte aligned, so use low bits for opcode.
69 */
70struct tcmu_cmd_entry_hdr {
71 __u32 len_op;
72} __packed;
73
74#define TCMU_OP_MASK 0x7
75
76static inline enum tcmu_opcode tcmu_hdr_get_op(struct tcmu_cmd_entry_hdr *hdr)
77{
78 return hdr->len_op & TCMU_OP_MASK;
79}
80
81static inline void tcmu_hdr_set_op(struct tcmu_cmd_entry_hdr *hdr, enum tcmu_opcode op)
82{
83 hdr->len_op &= ~TCMU_OP_MASK;
84 hdr->len_op |= (op & TCMU_OP_MASK);
85}
86
87static inline __u32 tcmu_hdr_get_len(struct tcmu_cmd_entry_hdr *hdr)
88{
89 return hdr->len_op & ~TCMU_OP_MASK;
90}
91
92static inline void tcmu_hdr_set_len(struct tcmu_cmd_entry_hdr *hdr, __u32 len)
93{
94 hdr->len_op &= TCMU_OP_MASK;
95 hdr->len_op |= len;
96}
97
98/* Currently the same as SCSI_SENSE_BUFFERSIZE */
99#define TCMU_SENSE_BUFFERSIZE 96
100
101struct tcmu_cmd_entry {
102 struct tcmu_cmd_entry_hdr hdr;
103
104 uint16_t cmd_id;
105 uint16_t __pad1;
106
107 union {
108 struct {
109 uint64_t cdb_off;
110 uint64_t iov_cnt;
111 struct iovec iov[0];
112 } req;
113 struct {
114 uint8_t scsi_status;
115 uint8_t __pad1;
116 uint16_t __pad2;
117 uint32_t __pad3;
118 char sense_buffer[TCMU_SENSE_BUFFERSIZE];
119 } rsp;
120 };
121
122} __packed;
123
124#define TCMU_OP_ALIGN_SIZE sizeof(uint64_t)
125
126enum tcmu_genl_cmd {
127 TCMU_CMD_UNSPEC,
128 TCMU_CMD_ADDED_DEVICE,
129 TCMU_CMD_REMOVED_DEVICE,
130 __TCMU_CMD_MAX,
131};
132#define TCMU_CMD_MAX (__TCMU_CMD_MAX - 1)
133
134enum tcmu_genl_attr {
135 TCMU_ATTR_UNSPEC,
136 TCMU_ATTR_DEVICE,
137 TCMU_ATTR_MINOR,
138 __TCMU_ATTR_MAX,
139};
140#define TCMU_ATTR_MAX (__TCMU_ATTR_MAX - 1)
141
142#endif