diff options
Diffstat (limited to 'drivers/vhost')
-rw-r--r-- | drivers/vhost/net.c | 25 | ||||
-rw-r--r-- | drivers/vhost/scsi.c | 1068 |
2 files changed, 555 insertions, 538 deletions
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 633012cc9a57..18f05bff8826 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c | |||
@@ -591,11 +591,6 @@ static void handle_rx(struct vhost_net *net) | |||
591 | * TODO: support TSO. | 591 | * TODO: support TSO. |
592 | */ | 592 | */ |
593 | iov_iter_advance(&msg.msg_iter, vhost_hlen); | 593 | iov_iter_advance(&msg.msg_iter, vhost_hlen); |
594 | } else { | ||
595 | /* It'll come from socket; we'll need to patch | ||
596 | * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF | ||
597 | */ | ||
598 | iov_iter_advance(&fixup, sizeof(hdr)); | ||
599 | } | 594 | } |
600 | err = sock->ops->recvmsg(sock, &msg, | 595 | err = sock->ops->recvmsg(sock, &msg, |
601 | sock_len, MSG_DONTWAIT | MSG_TRUNC); | 596 | sock_len, MSG_DONTWAIT | MSG_TRUNC); |
@@ -609,17 +604,25 @@ static void handle_rx(struct vhost_net *net) | |||
609 | continue; | 604 | continue; |
610 | } | 605 | } |
611 | /* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */ | 606 | /* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */ |
612 | if (unlikely(vhost_hlen) && | 607 | if (unlikely(vhost_hlen)) { |
613 | copy_to_iter(&hdr, sizeof(hdr), &fixup) != sizeof(hdr)) { | 608 | if (copy_to_iter(&hdr, sizeof(hdr), |
614 | vq_err(vq, "Unable to write vnet_hdr at addr %p\n", | 609 | &fixup) != sizeof(hdr)) { |
615 | vq->iov->iov_base); | 610 | vq_err(vq, "Unable to write vnet_hdr " |
616 | break; | 611 | "at addr %p\n", vq->iov->iov_base); |
612 | break; | ||
613 | } | ||
614 | } else { | ||
615 | /* Header came from socket; we'll need to patch | ||
616 | * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF | ||
617 | */ | ||
618 | iov_iter_advance(&fixup, sizeof(hdr)); | ||
617 | } | 619 | } |
618 | /* TODO: Should check and handle checksum. */ | 620 | /* TODO: Should check and handle checksum. */ |
619 | 621 | ||
620 | num_buffers = cpu_to_vhost16(vq, headcount); | 622 | num_buffers = cpu_to_vhost16(vq, headcount); |
621 | if (likely(mergeable) && | 623 | if (likely(mergeable) && |
622 | copy_to_iter(&num_buffers, 2, &fixup) != 2) { | 624 | copy_to_iter(&num_buffers, sizeof num_buffers, |
625 | &fixup) != sizeof num_buffers) { | ||
623 | vq_err(vq, "Failed num_buffers write"); | 626 | vq_err(vq, "Failed num_buffers write"); |
624 | vhost_discard_vq_desc(vq, headcount); | 627 | vhost_discard_vq_desc(vq, headcount); |
625 | break; | 628 | break; |
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index dc78d87e0fc2..8d4f3f1ff799 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c | |||
@@ -38,7 +38,6 @@ | |||
38 | #include <linux/miscdevice.h> | 38 | #include <linux/miscdevice.h> |
39 | #include <asm/unaligned.h> | 39 | #include <asm/unaligned.h> |
40 | #include <scsi/scsi.h> | 40 | #include <scsi/scsi.h> |
41 | #include <scsi/scsi_tcq.h> | ||
42 | #include <target/target_core_base.h> | 41 | #include <target/target_core_base.h> |
43 | #include <target/target_core_fabric.h> | 42 | #include <target/target_core_fabric.h> |
44 | #include <target/target_core_fabric_configfs.h> | 43 | #include <target/target_core_fabric_configfs.h> |
@@ -52,13 +51,13 @@ | |||
52 | 51 | ||
53 | #include "vhost.h" | 52 | #include "vhost.h" |
54 | 53 | ||
55 | #define TCM_VHOST_VERSION "v0.1" | 54 | #define VHOST_SCSI_VERSION "v0.1" |
56 | #define TCM_VHOST_NAMELEN 256 | 55 | #define VHOST_SCSI_NAMELEN 256 |
57 | #define TCM_VHOST_MAX_CDB_SIZE 32 | 56 | #define VHOST_SCSI_MAX_CDB_SIZE 32 |
58 | #define TCM_VHOST_DEFAULT_TAGS 256 | 57 | #define VHOST_SCSI_DEFAULT_TAGS 256 |
59 | #define TCM_VHOST_PREALLOC_SGLS 2048 | 58 | #define VHOST_SCSI_PREALLOC_SGLS 2048 |
60 | #define TCM_VHOST_PREALLOC_UPAGES 2048 | 59 | #define VHOST_SCSI_PREALLOC_UPAGES 2048 |
61 | #define TCM_VHOST_PREALLOC_PROT_SGLS 512 | 60 | #define VHOST_SCSI_PREALLOC_PROT_SGLS 512 |
62 | 61 | ||
63 | struct vhost_scsi_inflight { | 62 | struct vhost_scsi_inflight { |
64 | /* Wait for the flush operation to finish */ | 63 | /* Wait for the flush operation to finish */ |
@@ -67,11 +66,13 @@ struct vhost_scsi_inflight { | |||
67 | struct kref kref; | 66 | struct kref kref; |
68 | }; | 67 | }; |
69 | 68 | ||
70 | struct tcm_vhost_cmd { | 69 | struct vhost_scsi_cmd { |
71 | /* Descriptor from vhost_get_vq_desc() for virt_queue segment */ | 70 | /* Descriptor from vhost_get_vq_desc() for virt_queue segment */ |
72 | int tvc_vq_desc; | 71 | int tvc_vq_desc; |
73 | /* virtio-scsi initiator task attribute */ | 72 | /* virtio-scsi initiator task attribute */ |
74 | int tvc_task_attr; | 73 | int tvc_task_attr; |
74 | /* virtio-scsi response incoming iovecs */ | ||
75 | int tvc_in_iovs; | ||
75 | /* virtio-scsi initiator data direction */ | 76 | /* virtio-scsi initiator data direction */ |
76 | enum dma_data_direction tvc_data_direction; | 77 | enum dma_data_direction tvc_data_direction; |
77 | /* Expected data transfer length from virtio-scsi header */ | 78 | /* Expected data transfer length from virtio-scsi header */ |
@@ -81,26 +82,26 @@ struct tcm_vhost_cmd { | |||
81 | /* The number of scatterlists associated with this cmd */ | 82 | /* The number of scatterlists associated with this cmd */ |
82 | u32 tvc_sgl_count; | 83 | u32 tvc_sgl_count; |
83 | u32 tvc_prot_sgl_count; | 84 | u32 tvc_prot_sgl_count; |
84 | /* Saved unpacked SCSI LUN for tcm_vhost_submission_work() */ | 85 | /* Saved unpacked SCSI LUN for vhost_scsi_submission_work() */ |
85 | u32 tvc_lun; | 86 | u32 tvc_lun; |
86 | /* Pointer to the SGL formatted memory from virtio-scsi */ | 87 | /* Pointer to the SGL formatted memory from virtio-scsi */ |
87 | struct scatterlist *tvc_sgl; | 88 | struct scatterlist *tvc_sgl; |
88 | struct scatterlist *tvc_prot_sgl; | 89 | struct scatterlist *tvc_prot_sgl; |
89 | struct page **tvc_upages; | 90 | struct page **tvc_upages; |
90 | /* Pointer to response */ | 91 | /* Pointer to response header iovec */ |
91 | struct virtio_scsi_cmd_resp __user *tvc_resp; | 92 | struct iovec *tvc_resp_iov; |
92 | /* Pointer to vhost_scsi for our device */ | 93 | /* Pointer to vhost_scsi for our device */ |
93 | struct vhost_scsi *tvc_vhost; | 94 | struct vhost_scsi *tvc_vhost; |
94 | /* Pointer to vhost_virtqueue for the cmd */ | 95 | /* Pointer to vhost_virtqueue for the cmd */ |
95 | struct vhost_virtqueue *tvc_vq; | 96 | struct vhost_virtqueue *tvc_vq; |
96 | /* Pointer to vhost nexus memory */ | 97 | /* Pointer to vhost nexus memory */ |
97 | struct tcm_vhost_nexus *tvc_nexus; | 98 | struct vhost_scsi_nexus *tvc_nexus; |
98 | /* The TCM I/O descriptor that is accessed via container_of() */ | 99 | /* The TCM I/O descriptor that is accessed via container_of() */ |
99 | struct se_cmd tvc_se_cmd; | 100 | struct se_cmd tvc_se_cmd; |
100 | /* work item used for cmwq dispatch to tcm_vhost_submission_work() */ | 101 | /* work item used for cmwq dispatch to vhost_scsi_submission_work() */ |
101 | struct work_struct work; | 102 | struct work_struct work; |
102 | /* Copy of the incoming SCSI command descriptor block (CDB) */ | 103 | /* Copy of the incoming SCSI command descriptor block (CDB) */ |
103 | unsigned char tvc_cdb[TCM_VHOST_MAX_CDB_SIZE]; | 104 | unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE]; |
104 | /* Sense buffer that will be mapped into outgoing status */ | 105 | /* Sense buffer that will be mapped into outgoing status */ |
105 | unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER]; | 106 | unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER]; |
106 | /* Completed commands list, serviced from vhost worker thread */ | 107 | /* Completed commands list, serviced from vhost worker thread */ |
@@ -109,53 +110,53 @@ struct tcm_vhost_cmd { | |||
109 | struct vhost_scsi_inflight *inflight; | 110 | struct vhost_scsi_inflight *inflight; |
110 | }; | 111 | }; |
111 | 112 | ||
112 | struct tcm_vhost_nexus { | 113 | struct vhost_scsi_nexus { |
113 | /* Pointer to TCM session for I_T Nexus */ | 114 | /* Pointer to TCM session for I_T Nexus */ |
114 | struct se_session *tvn_se_sess; | 115 | struct se_session *tvn_se_sess; |
115 | }; | 116 | }; |
116 | 117 | ||
117 | struct tcm_vhost_nacl { | 118 | struct vhost_scsi_nacl { |
118 | /* Binary World Wide unique Port Name for Vhost Initiator port */ | 119 | /* Binary World Wide unique Port Name for Vhost Initiator port */ |
119 | u64 iport_wwpn; | 120 | u64 iport_wwpn; |
120 | /* ASCII formatted WWPN for Sas Initiator port */ | 121 | /* ASCII formatted WWPN for Sas Initiator port */ |
121 | char iport_name[TCM_VHOST_NAMELEN]; | 122 | char iport_name[VHOST_SCSI_NAMELEN]; |
122 | /* Returned by tcm_vhost_make_nodeacl() */ | 123 | /* Returned by vhost_scsi_make_nodeacl() */ |
123 | struct se_node_acl se_node_acl; | 124 | struct se_node_acl se_node_acl; |
124 | }; | 125 | }; |
125 | 126 | ||
126 | struct tcm_vhost_tpg { | 127 | struct vhost_scsi_tpg { |
127 | /* Vhost port target portal group tag for TCM */ | 128 | /* Vhost port target portal group tag for TCM */ |
128 | u16 tport_tpgt; | 129 | u16 tport_tpgt; |
129 | /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */ | 130 | /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */ |
130 | int tv_tpg_port_count; | 131 | int tv_tpg_port_count; |
131 | /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */ | 132 | /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */ |
132 | int tv_tpg_vhost_count; | 133 | int tv_tpg_vhost_count; |
133 | /* list for tcm_vhost_list */ | 134 | /* list for vhost_scsi_list */ |
134 | struct list_head tv_tpg_list; | 135 | struct list_head tv_tpg_list; |
135 | /* Used to protect access for tpg_nexus */ | 136 | /* Used to protect access for tpg_nexus */ |
136 | struct mutex tv_tpg_mutex; | 137 | struct mutex tv_tpg_mutex; |
137 | /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */ | 138 | /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */ |
138 | struct tcm_vhost_nexus *tpg_nexus; | 139 | struct vhost_scsi_nexus *tpg_nexus; |
139 | /* Pointer back to tcm_vhost_tport */ | 140 | /* Pointer back to vhost_scsi_tport */ |
140 | struct tcm_vhost_tport *tport; | 141 | struct vhost_scsi_tport *tport; |
141 | /* Returned by tcm_vhost_make_tpg() */ | 142 | /* Returned by vhost_scsi_make_tpg() */ |
142 | struct se_portal_group se_tpg; | 143 | struct se_portal_group se_tpg; |
143 | /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */ | 144 | /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */ |
144 | struct vhost_scsi *vhost_scsi; | 145 | struct vhost_scsi *vhost_scsi; |
145 | }; | 146 | }; |
146 | 147 | ||
147 | struct tcm_vhost_tport { | 148 | struct vhost_scsi_tport { |
148 | /* SCSI protocol the tport is providing */ | 149 | /* SCSI protocol the tport is providing */ |
149 | u8 tport_proto_id; | 150 | u8 tport_proto_id; |
150 | /* Binary World Wide unique Port Name for Vhost Target port */ | 151 | /* Binary World Wide unique Port Name for Vhost Target port */ |
151 | u64 tport_wwpn; | 152 | u64 tport_wwpn; |
152 | /* ASCII formatted WWPN for Vhost Target port */ | 153 | /* ASCII formatted WWPN for Vhost Target port */ |
153 | char tport_name[TCM_VHOST_NAMELEN]; | 154 | char tport_name[VHOST_SCSI_NAMELEN]; |
154 | /* Returned by tcm_vhost_make_tport() */ | 155 | /* Returned by vhost_scsi_make_tport() */ |
155 | struct se_wwn tport_wwn; | 156 | struct se_wwn tport_wwn; |
156 | }; | 157 | }; |
157 | 158 | ||
158 | struct tcm_vhost_evt { | 159 | struct vhost_scsi_evt { |
159 | /* event to be sent to guest */ | 160 | /* event to be sent to guest */ |
160 | struct virtio_scsi_event event; | 161 | struct virtio_scsi_event event; |
161 | /* event list, serviced from vhost worker thread */ | 162 | /* event list, serviced from vhost worker thread */ |
@@ -171,7 +172,9 @@ enum { | |||
171 | /* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */ | 172 | /* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */ |
172 | enum { | 173 | enum { |
173 | VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) | | 174 | VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) | |
174 | (1ULL << VIRTIO_SCSI_F_T10_PI) | 175 | (1ULL << VIRTIO_SCSI_F_T10_PI) | |
176 | (1ULL << VIRTIO_F_ANY_LAYOUT) | | ||
177 | (1ULL << VIRTIO_F_VERSION_1) | ||
175 | }; | 178 | }; |
176 | 179 | ||
177 | #define VHOST_SCSI_MAX_TARGET 256 | 180 | #define VHOST_SCSI_MAX_TARGET 256 |
@@ -195,7 +198,7 @@ struct vhost_scsi_virtqueue { | |||
195 | 198 | ||
196 | struct vhost_scsi { | 199 | struct vhost_scsi { |
197 | /* Protected by vhost_scsi->dev.mutex */ | 200 | /* Protected by vhost_scsi->dev.mutex */ |
198 | struct tcm_vhost_tpg **vs_tpg; | 201 | struct vhost_scsi_tpg **vs_tpg; |
199 | char vs_vhost_wwpn[TRANSPORT_IQN_LEN]; | 202 | char vs_vhost_wwpn[TRANSPORT_IQN_LEN]; |
200 | 203 | ||
201 | struct vhost_dev dev; | 204 | struct vhost_dev dev; |
@@ -212,21 +215,21 @@ struct vhost_scsi { | |||
212 | }; | 215 | }; |
213 | 216 | ||
214 | /* Local pointer to allocated TCM configfs fabric module */ | 217 | /* Local pointer to allocated TCM configfs fabric module */ |
215 | static struct target_fabric_configfs *tcm_vhost_fabric_configfs; | 218 | static struct target_fabric_configfs *vhost_scsi_fabric_configfs; |
216 | 219 | ||
217 | static struct workqueue_struct *tcm_vhost_workqueue; | 220 | static struct workqueue_struct *vhost_scsi_workqueue; |
218 | 221 | ||
219 | /* Global spinlock to protect tcm_vhost TPG list for vhost IOCTL access */ | 222 | /* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */ |
220 | static DEFINE_MUTEX(tcm_vhost_mutex); | 223 | static DEFINE_MUTEX(vhost_scsi_mutex); |
221 | static LIST_HEAD(tcm_vhost_list); | 224 | static LIST_HEAD(vhost_scsi_list); |
222 | 225 | ||
223 | static int iov_num_pages(struct iovec *iov) | 226 | static int iov_num_pages(void __user *iov_base, size_t iov_len) |
224 | { | 227 | { |
225 | return (PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) - | 228 | return (PAGE_ALIGN((unsigned long)iov_base + iov_len) - |
226 | ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT; | 229 | ((unsigned long)iov_base & PAGE_MASK)) >> PAGE_SHIFT; |
227 | } | 230 | } |
228 | 231 | ||
229 | static void tcm_vhost_done_inflight(struct kref *kref) | 232 | static void vhost_scsi_done_inflight(struct kref *kref) |
230 | { | 233 | { |
231 | struct vhost_scsi_inflight *inflight; | 234 | struct vhost_scsi_inflight *inflight; |
232 | 235 | ||
@@ -234,7 +237,7 @@ static void tcm_vhost_done_inflight(struct kref *kref) | |||
234 | complete(&inflight->comp); | 237 | complete(&inflight->comp); |
235 | } | 238 | } |
236 | 239 | ||
237 | static void tcm_vhost_init_inflight(struct vhost_scsi *vs, | 240 | static void vhost_scsi_init_inflight(struct vhost_scsi *vs, |
238 | struct vhost_scsi_inflight *old_inflight[]) | 241 | struct vhost_scsi_inflight *old_inflight[]) |
239 | { | 242 | { |
240 | struct vhost_scsi_inflight *new_inflight; | 243 | struct vhost_scsi_inflight *new_inflight; |
@@ -262,7 +265,7 @@ static void tcm_vhost_init_inflight(struct vhost_scsi *vs, | |||
262 | } | 265 | } |
263 | 266 | ||
264 | static struct vhost_scsi_inflight * | 267 | static struct vhost_scsi_inflight * |
265 | tcm_vhost_get_inflight(struct vhost_virtqueue *vq) | 268 | vhost_scsi_get_inflight(struct vhost_virtqueue *vq) |
266 | { | 269 | { |
267 | struct vhost_scsi_inflight *inflight; | 270 | struct vhost_scsi_inflight *inflight; |
268 | struct vhost_scsi_virtqueue *svq; | 271 | struct vhost_scsi_virtqueue *svq; |
@@ -274,31 +277,31 @@ tcm_vhost_get_inflight(struct vhost_virtqueue *vq) | |||
274 | return inflight; | 277 | return inflight; |
275 | } | 278 | } |
276 | 279 | ||
277 | static void tcm_vhost_put_inflight(struct vhost_scsi_inflight *inflight) | 280 | static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight) |
278 | { | 281 | { |
279 | kref_put(&inflight->kref, tcm_vhost_done_inflight); | 282 | kref_put(&inflight->kref, vhost_scsi_done_inflight); |
280 | } | 283 | } |
281 | 284 | ||
282 | static int tcm_vhost_check_true(struct se_portal_group *se_tpg) | 285 | static int vhost_scsi_check_true(struct se_portal_group *se_tpg) |
283 | { | 286 | { |
284 | return 1; | 287 | return 1; |
285 | } | 288 | } |
286 | 289 | ||
287 | static int tcm_vhost_check_false(struct se_portal_group *se_tpg) | 290 | static int vhost_scsi_check_false(struct se_portal_group *se_tpg) |
288 | { | 291 | { |
289 | return 0; | 292 | return 0; |
290 | } | 293 | } |
291 | 294 | ||
292 | static char *tcm_vhost_get_fabric_name(void) | 295 | static char *vhost_scsi_get_fabric_name(void) |
293 | { | 296 | { |
294 | return "vhost"; | 297 | return "vhost"; |
295 | } | 298 | } |
296 | 299 | ||
297 | static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg) | 300 | static u8 vhost_scsi_get_fabric_proto_ident(struct se_portal_group *se_tpg) |
298 | { | 301 | { |
299 | struct tcm_vhost_tpg *tpg = container_of(se_tpg, | 302 | struct vhost_scsi_tpg *tpg = container_of(se_tpg, |
300 | struct tcm_vhost_tpg, se_tpg); | 303 | struct vhost_scsi_tpg, se_tpg); |
301 | struct tcm_vhost_tport *tport = tpg->tport; | 304 | struct vhost_scsi_tport *tport = tpg->tport; |
302 | 305 | ||
303 | switch (tport->tport_proto_id) { | 306 | switch (tport->tport_proto_id) { |
304 | case SCSI_PROTOCOL_SAS: | 307 | case SCSI_PROTOCOL_SAS: |
@@ -316,37 +319,37 @@ static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg) | |||
316 | return sas_get_fabric_proto_ident(se_tpg); | 319 | return sas_get_fabric_proto_ident(se_tpg); |
317 | } | 320 | } |
318 | 321 | ||
319 | static char *tcm_vhost_get_fabric_wwn(struct se_portal_group *se_tpg) | 322 | static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg) |
320 | { | 323 | { |
321 | struct tcm_vhost_tpg *tpg = container_of(se_tpg, | 324 | struct vhost_scsi_tpg *tpg = container_of(se_tpg, |
322 | struct tcm_vhost_tpg, se_tpg); | 325 | struct vhost_scsi_tpg, se_tpg); |
323 | struct tcm_vhost_tport *tport = tpg->tport; | 326 | struct vhost_scsi_tport *tport = tpg->tport; |
324 | 327 | ||
325 | return &tport->tport_name[0]; | 328 | return &tport->tport_name[0]; |
326 | } | 329 | } |
327 | 330 | ||
328 | static u16 tcm_vhost_get_tag(struct se_portal_group *se_tpg) | 331 | static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg) |
329 | { | 332 | { |
330 | struct tcm_vhost_tpg *tpg = container_of(se_tpg, | 333 | struct vhost_scsi_tpg *tpg = container_of(se_tpg, |
331 | struct tcm_vhost_tpg, se_tpg); | 334 | struct vhost_scsi_tpg, se_tpg); |
332 | return tpg->tport_tpgt; | 335 | return tpg->tport_tpgt; |
333 | } | 336 | } |
334 | 337 | ||
335 | static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg) | 338 | static u32 vhost_scsi_get_default_depth(struct se_portal_group *se_tpg) |
336 | { | 339 | { |
337 | return 1; | 340 | return 1; |
338 | } | 341 | } |
339 | 342 | ||
340 | static u32 | 343 | static u32 |
341 | tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg, | 344 | vhost_scsi_get_pr_transport_id(struct se_portal_group *se_tpg, |
342 | struct se_node_acl *se_nacl, | 345 | struct se_node_acl *se_nacl, |
343 | struct t10_pr_registration *pr_reg, | 346 | struct t10_pr_registration *pr_reg, |
344 | int *format_code, | 347 | int *format_code, |
345 | unsigned char *buf) | 348 | unsigned char *buf) |
346 | { | 349 | { |
347 | struct tcm_vhost_tpg *tpg = container_of(se_tpg, | 350 | struct vhost_scsi_tpg *tpg = container_of(se_tpg, |
348 | struct tcm_vhost_tpg, se_tpg); | 351 | struct vhost_scsi_tpg, se_tpg); |
349 | struct tcm_vhost_tport *tport = tpg->tport; | 352 | struct vhost_scsi_tport *tport = tpg->tport; |
350 | 353 | ||
351 | switch (tport->tport_proto_id) { | 354 | switch (tport->tport_proto_id) { |
352 | case SCSI_PROTOCOL_SAS: | 355 | case SCSI_PROTOCOL_SAS: |
@@ -369,14 +372,14 @@ tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg, | |||
369 | } | 372 | } |
370 | 373 | ||
371 | static u32 | 374 | static u32 |
372 | tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg, | 375 | vhost_scsi_get_pr_transport_id_len(struct se_portal_group *se_tpg, |
373 | struct se_node_acl *se_nacl, | 376 | struct se_node_acl *se_nacl, |
374 | struct t10_pr_registration *pr_reg, | 377 | struct t10_pr_registration *pr_reg, |
375 | int *format_code) | 378 | int *format_code) |
376 | { | 379 | { |
377 | struct tcm_vhost_tpg *tpg = container_of(se_tpg, | 380 | struct vhost_scsi_tpg *tpg = container_of(se_tpg, |
378 | struct tcm_vhost_tpg, se_tpg); | 381 | struct vhost_scsi_tpg, se_tpg); |
379 | struct tcm_vhost_tport *tport = tpg->tport; | 382 | struct vhost_scsi_tport *tport = tpg->tport; |
380 | 383 | ||
381 | switch (tport->tport_proto_id) { | 384 | switch (tport->tport_proto_id) { |
382 | case SCSI_PROTOCOL_SAS: | 385 | case SCSI_PROTOCOL_SAS: |
@@ -399,14 +402,14 @@ tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg, | |||
399 | } | 402 | } |
400 | 403 | ||
401 | static char * | 404 | static char * |
402 | tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg, | 405 | vhost_scsi_parse_pr_out_transport_id(struct se_portal_group *se_tpg, |
403 | const char *buf, | 406 | const char *buf, |
404 | u32 *out_tid_len, | 407 | u32 *out_tid_len, |
405 | char **port_nexus_ptr) | 408 | char **port_nexus_ptr) |
406 | { | 409 | { |
407 | struct tcm_vhost_tpg *tpg = container_of(se_tpg, | 410 | struct vhost_scsi_tpg *tpg = container_of(se_tpg, |
408 | struct tcm_vhost_tpg, se_tpg); | 411 | struct vhost_scsi_tpg, se_tpg); |
409 | struct tcm_vhost_tport *tport = tpg->tport; | 412 | struct vhost_scsi_tport *tport = tpg->tport; |
410 | 413 | ||
411 | switch (tport->tport_proto_id) { | 414 | switch (tport->tport_proto_id) { |
412 | case SCSI_PROTOCOL_SAS: | 415 | case SCSI_PROTOCOL_SAS: |
@@ -429,13 +432,13 @@ tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg, | |||
429 | } | 432 | } |
430 | 433 | ||
431 | static struct se_node_acl * | 434 | static struct se_node_acl * |
432 | tcm_vhost_alloc_fabric_acl(struct se_portal_group *se_tpg) | 435 | vhost_scsi_alloc_fabric_acl(struct se_portal_group *se_tpg) |
433 | { | 436 | { |
434 | struct tcm_vhost_nacl *nacl; | 437 | struct vhost_scsi_nacl *nacl; |
435 | 438 | ||
436 | nacl = kzalloc(sizeof(struct tcm_vhost_nacl), GFP_KERNEL); | 439 | nacl = kzalloc(sizeof(struct vhost_scsi_nacl), GFP_KERNEL); |
437 | if (!nacl) { | 440 | if (!nacl) { |
438 | pr_err("Unable to allocate struct tcm_vhost_nacl\n"); | 441 | pr_err("Unable to allocate struct vhost_scsi_nacl\n"); |
439 | return NULL; | 442 | return NULL; |
440 | } | 443 | } |
441 | 444 | ||
@@ -443,24 +446,24 @@ tcm_vhost_alloc_fabric_acl(struct se_portal_group *se_tpg) | |||
443 | } | 446 | } |
444 | 447 | ||
445 | static void | 448 | static void |
446 | tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg, | 449 | vhost_scsi_release_fabric_acl(struct se_portal_group *se_tpg, |
447 | struct se_node_acl *se_nacl) | 450 | struct se_node_acl *se_nacl) |
448 | { | 451 | { |
449 | struct tcm_vhost_nacl *nacl = container_of(se_nacl, | 452 | struct vhost_scsi_nacl *nacl = container_of(se_nacl, |
450 | struct tcm_vhost_nacl, se_node_acl); | 453 | struct vhost_scsi_nacl, se_node_acl); |
451 | kfree(nacl); | 454 | kfree(nacl); |
452 | } | 455 | } |
453 | 456 | ||
454 | static u32 tcm_vhost_tpg_get_inst_index(struct se_portal_group *se_tpg) | 457 | static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg) |
455 | { | 458 | { |
456 | return 1; | 459 | return 1; |
457 | } | 460 | } |
458 | 461 | ||
459 | static void tcm_vhost_release_cmd(struct se_cmd *se_cmd) | 462 | static void vhost_scsi_release_cmd(struct se_cmd *se_cmd) |
460 | { | 463 | { |
461 | struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd, | 464 | struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd, |
462 | struct tcm_vhost_cmd, tvc_se_cmd); | 465 | struct vhost_scsi_cmd, tvc_se_cmd); |
463 | struct se_session *se_sess = se_cmd->se_sess; | 466 | struct se_session *se_sess = tv_cmd->tvc_nexus->tvn_se_sess; |
464 | int i; | 467 | int i; |
465 | 468 | ||
466 | if (tv_cmd->tvc_sgl_count) { | 469 | if (tv_cmd->tvc_sgl_count) { |
@@ -472,53 +475,53 @@ static void tcm_vhost_release_cmd(struct se_cmd *se_cmd) | |||
472 | put_page(sg_page(&tv_cmd->tvc_prot_sgl[i])); | 475 | put_page(sg_page(&tv_cmd->tvc_prot_sgl[i])); |
473 | } | 476 | } |
474 | 477 | ||
475 | tcm_vhost_put_inflight(tv_cmd->inflight); | 478 | vhost_scsi_put_inflight(tv_cmd->inflight); |
476 | percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag); | 479 | percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag); |
477 | } | 480 | } |
478 | 481 | ||
479 | static int tcm_vhost_shutdown_session(struct se_session *se_sess) | 482 | static int vhost_scsi_shutdown_session(struct se_session *se_sess) |
480 | { | 483 | { |
481 | return 0; | 484 | return 0; |
482 | } | 485 | } |
483 | 486 | ||
484 | static void tcm_vhost_close_session(struct se_session *se_sess) | 487 | static void vhost_scsi_close_session(struct se_session *se_sess) |
485 | { | 488 | { |
486 | return; | 489 | return; |
487 | } | 490 | } |
488 | 491 | ||
489 | static u32 tcm_vhost_sess_get_index(struct se_session *se_sess) | 492 | static u32 vhost_scsi_sess_get_index(struct se_session *se_sess) |
490 | { | 493 | { |
491 | return 0; | 494 | return 0; |
492 | } | 495 | } |
493 | 496 | ||
494 | static int tcm_vhost_write_pending(struct se_cmd *se_cmd) | 497 | static int vhost_scsi_write_pending(struct se_cmd *se_cmd) |
495 | { | 498 | { |
496 | /* Go ahead and process the write immediately */ | 499 | /* Go ahead and process the write immediately */ |
497 | target_execute_cmd(se_cmd); | 500 | target_execute_cmd(se_cmd); |
498 | return 0; | 501 | return 0; |
499 | } | 502 | } |
500 | 503 | ||
501 | static int tcm_vhost_write_pending_status(struct se_cmd *se_cmd) | 504 | static int vhost_scsi_write_pending_status(struct se_cmd *se_cmd) |
502 | { | 505 | { |
503 | return 0; | 506 | return 0; |
504 | } | 507 | } |
505 | 508 | ||
506 | static void tcm_vhost_set_default_node_attrs(struct se_node_acl *nacl) | 509 | static void vhost_scsi_set_default_node_attrs(struct se_node_acl *nacl) |
507 | { | 510 | { |
508 | return; | 511 | return; |
509 | } | 512 | } |
510 | 513 | ||
511 | static u32 tcm_vhost_get_task_tag(struct se_cmd *se_cmd) | 514 | static u32 vhost_scsi_get_task_tag(struct se_cmd *se_cmd) |
512 | { | 515 | { |
513 | return 0; | 516 | return 0; |
514 | } | 517 | } |
515 | 518 | ||
516 | static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd) | 519 | static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd) |
517 | { | 520 | { |
518 | return 0; | 521 | return 0; |
519 | } | 522 | } |
520 | 523 | ||
521 | static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *cmd) | 524 | static void vhost_scsi_complete_cmd(struct vhost_scsi_cmd *cmd) |
522 | { | 525 | { |
523 | struct vhost_scsi *vs = cmd->tvc_vhost; | 526 | struct vhost_scsi *vs = cmd->tvc_vhost; |
524 | 527 | ||
@@ -527,44 +530,44 @@ static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *cmd) | |||
527 | vhost_work_queue(&vs->dev, &vs->vs_completion_work); | 530 | vhost_work_queue(&vs->dev, &vs->vs_completion_work); |
528 | } | 531 | } |
529 | 532 | ||
530 | static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd) | 533 | static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd) |
531 | { | 534 | { |
532 | struct tcm_vhost_cmd *cmd = container_of(se_cmd, | 535 | struct vhost_scsi_cmd *cmd = container_of(se_cmd, |
533 | struct tcm_vhost_cmd, tvc_se_cmd); | 536 | struct vhost_scsi_cmd, tvc_se_cmd); |
534 | vhost_scsi_complete_cmd(cmd); | 537 | vhost_scsi_complete_cmd(cmd); |
535 | return 0; | 538 | return 0; |
536 | } | 539 | } |
537 | 540 | ||
538 | static int tcm_vhost_queue_status(struct se_cmd *se_cmd) | 541 | static int vhost_scsi_queue_status(struct se_cmd *se_cmd) |
539 | { | 542 | { |
540 | struct tcm_vhost_cmd *cmd = container_of(se_cmd, | 543 | struct vhost_scsi_cmd *cmd = container_of(se_cmd, |
541 | struct tcm_vhost_cmd, tvc_se_cmd); | 544 | struct vhost_scsi_cmd, tvc_se_cmd); |
542 | vhost_scsi_complete_cmd(cmd); | 545 | vhost_scsi_complete_cmd(cmd); |
543 | return 0; | 546 | return 0; |
544 | } | 547 | } |
545 | 548 | ||
546 | static void tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd) | 549 | static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd) |
547 | { | 550 | { |
548 | return; | 551 | return; |
549 | } | 552 | } |
550 | 553 | ||
551 | static void tcm_vhost_aborted_task(struct se_cmd *se_cmd) | 554 | static void vhost_scsi_aborted_task(struct se_cmd *se_cmd) |
552 | { | 555 | { |
553 | return; | 556 | return; |
554 | } | 557 | } |
555 | 558 | ||
556 | static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt) | 559 | static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt) |
557 | { | 560 | { |
558 | vs->vs_events_nr--; | 561 | vs->vs_events_nr--; |
559 | kfree(evt); | 562 | kfree(evt); |
560 | } | 563 | } |
561 | 564 | ||
562 | static struct tcm_vhost_evt * | 565 | static struct vhost_scsi_evt * |
563 | tcm_vhost_allocate_evt(struct vhost_scsi *vs, | 566 | vhost_scsi_allocate_evt(struct vhost_scsi *vs, |
564 | u32 event, u32 reason) | 567 | u32 event, u32 reason) |
565 | { | 568 | { |
566 | struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; | 569 | struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; |
567 | struct tcm_vhost_evt *evt; | 570 | struct vhost_scsi_evt *evt; |
568 | 571 | ||
569 | if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) { | 572 | if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) { |
570 | vs->vs_events_missed = true; | 573 | vs->vs_events_missed = true; |
@@ -573,7 +576,7 @@ tcm_vhost_allocate_evt(struct vhost_scsi *vs, | |||
573 | 576 | ||
574 | evt = kzalloc(sizeof(*evt), GFP_KERNEL); | 577 | evt = kzalloc(sizeof(*evt), GFP_KERNEL); |
575 | if (!evt) { | 578 | if (!evt) { |
576 | vq_err(vq, "Failed to allocate tcm_vhost_evt\n"); | 579 | vq_err(vq, "Failed to allocate vhost_scsi_evt\n"); |
577 | vs->vs_events_missed = true; | 580 | vs->vs_events_missed = true; |
578 | return NULL; | 581 | return NULL; |
579 | } | 582 | } |
@@ -585,7 +588,7 @@ tcm_vhost_allocate_evt(struct vhost_scsi *vs, | |||
585 | return evt; | 588 | return evt; |
586 | } | 589 | } |
587 | 590 | ||
588 | static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *cmd) | 591 | static void vhost_scsi_free_cmd(struct vhost_scsi_cmd *cmd) |
589 | { | 592 | { |
590 | struct se_cmd *se_cmd = &cmd->tvc_se_cmd; | 593 | struct se_cmd *se_cmd = &cmd->tvc_se_cmd; |
591 | 594 | ||
@@ -600,7 +603,7 @@ static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd) | |||
600 | } | 603 | } |
601 | 604 | ||
602 | static void | 605 | static void |
603 | tcm_vhost_do_evt_work(struct vhost_scsi *vs, struct tcm_vhost_evt *evt) | 606 | vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt) |
604 | { | 607 | { |
605 | struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; | 608 | struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; |
606 | struct virtio_scsi_event *event = &evt->event; | 609 | struct virtio_scsi_event *event = &evt->event; |
@@ -646,24 +649,24 @@ again: | |||
646 | if (!ret) | 649 | if (!ret) |
647 | vhost_add_used_and_signal(&vs->dev, vq, head, 0); | 650 | vhost_add_used_and_signal(&vs->dev, vq, head, 0); |
648 | else | 651 | else |
649 | vq_err(vq, "Faulted on tcm_vhost_send_event\n"); | 652 | vq_err(vq, "Faulted on vhost_scsi_send_event\n"); |
650 | } | 653 | } |
651 | 654 | ||
652 | static void tcm_vhost_evt_work(struct vhost_work *work) | 655 | static void vhost_scsi_evt_work(struct vhost_work *work) |
653 | { | 656 | { |
654 | struct vhost_scsi *vs = container_of(work, struct vhost_scsi, | 657 | struct vhost_scsi *vs = container_of(work, struct vhost_scsi, |
655 | vs_event_work); | 658 | vs_event_work); |
656 | struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; | 659 | struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; |
657 | struct tcm_vhost_evt *evt; | 660 | struct vhost_scsi_evt *evt; |
658 | struct llist_node *llnode; | 661 | struct llist_node *llnode; |
659 | 662 | ||
660 | mutex_lock(&vq->mutex); | 663 | mutex_lock(&vq->mutex); |
661 | llnode = llist_del_all(&vs->vs_event_list); | 664 | llnode = llist_del_all(&vs->vs_event_list); |
662 | while (llnode) { | 665 | while (llnode) { |
663 | evt = llist_entry(llnode, struct tcm_vhost_evt, list); | 666 | evt = llist_entry(llnode, struct vhost_scsi_evt, list); |
664 | llnode = llist_next(llnode); | 667 | llnode = llist_next(llnode); |
665 | tcm_vhost_do_evt_work(vs, evt); | 668 | vhost_scsi_do_evt_work(vs, evt); |
666 | tcm_vhost_free_evt(vs, evt); | 669 | vhost_scsi_free_evt(vs, evt); |
667 | } | 670 | } |
668 | mutex_unlock(&vq->mutex); | 671 | mutex_unlock(&vq->mutex); |
669 | } | 672 | } |
@@ -679,15 +682,16 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work) | |||
679 | vs_completion_work); | 682 | vs_completion_work); |
680 | DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ); | 683 | DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ); |
681 | struct virtio_scsi_cmd_resp v_rsp; | 684 | struct virtio_scsi_cmd_resp v_rsp; |
682 | struct tcm_vhost_cmd *cmd; | 685 | struct vhost_scsi_cmd *cmd; |
683 | struct llist_node *llnode; | 686 | struct llist_node *llnode; |
684 | struct se_cmd *se_cmd; | 687 | struct se_cmd *se_cmd; |
688 | struct iov_iter iov_iter; | ||
685 | int ret, vq; | 689 | int ret, vq; |
686 | 690 | ||
687 | bitmap_zero(signal, VHOST_SCSI_MAX_VQ); | 691 | bitmap_zero(signal, VHOST_SCSI_MAX_VQ); |
688 | llnode = llist_del_all(&vs->vs_completion_list); | 692 | llnode = llist_del_all(&vs->vs_completion_list); |
689 | while (llnode) { | 693 | while (llnode) { |
690 | cmd = llist_entry(llnode, struct tcm_vhost_cmd, | 694 | cmd = llist_entry(llnode, struct vhost_scsi_cmd, |
691 | tvc_completion_list); | 695 | tvc_completion_list); |
692 | llnode = llist_next(llnode); | 696 | llnode = llist_next(llnode); |
693 | se_cmd = &cmd->tvc_se_cmd; | 697 | se_cmd = &cmd->tvc_se_cmd; |
@@ -703,8 +707,11 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work) | |||
703 | se_cmd->scsi_sense_length); | 707 | se_cmd->scsi_sense_length); |
704 | memcpy(v_rsp.sense, cmd->tvc_sense_buf, | 708 | memcpy(v_rsp.sense, cmd->tvc_sense_buf, |
705 | se_cmd->scsi_sense_length); | 709 | se_cmd->scsi_sense_length); |
706 | ret = copy_to_user(cmd->tvc_resp, &v_rsp, sizeof(v_rsp)); | 710 | |
707 | if (likely(ret == 0)) { | 711 | iov_iter_init(&iov_iter, READ, cmd->tvc_resp_iov, |
712 | cmd->tvc_in_iovs, sizeof(v_rsp)); | ||
713 | ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter); | ||
714 | if (likely(ret == sizeof(v_rsp))) { | ||
708 | struct vhost_scsi_virtqueue *q; | 715 | struct vhost_scsi_virtqueue *q; |
709 | vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0); | 716 | vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0); |
710 | q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq); | 717 | q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq); |
@@ -722,13 +729,13 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work) | |||
722 | vhost_signal(&vs->dev, &vs->vqs[vq].vq); | 729 | vhost_signal(&vs->dev, &vs->vqs[vq].vq); |
723 | } | 730 | } |
724 | 731 | ||
725 | static struct tcm_vhost_cmd * | 732 | static struct vhost_scsi_cmd * |
726 | vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct tcm_vhost_tpg *tpg, | 733 | vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg, |
727 | unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr, | 734 | unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr, |
728 | u32 exp_data_len, int data_direction) | 735 | u32 exp_data_len, int data_direction) |
729 | { | 736 | { |
730 | struct tcm_vhost_cmd *cmd; | 737 | struct vhost_scsi_cmd *cmd; |
731 | struct tcm_vhost_nexus *tv_nexus; | 738 | struct vhost_scsi_nexus *tv_nexus; |
732 | struct se_session *se_sess; | 739 | struct se_session *se_sess; |
733 | struct scatterlist *sg, *prot_sg; | 740 | struct scatterlist *sg, *prot_sg; |
734 | struct page **pages; | 741 | struct page **pages; |
@@ -736,22 +743,22 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct tcm_vhost_tpg *tpg, | |||
736 | 743 | ||
737 | tv_nexus = tpg->tpg_nexus; | 744 | tv_nexus = tpg->tpg_nexus; |
738 | if (!tv_nexus) { | 745 | if (!tv_nexus) { |
739 | pr_err("Unable to locate active struct tcm_vhost_nexus\n"); | 746 | pr_err("Unable to locate active struct vhost_scsi_nexus\n"); |
740 | return ERR_PTR(-EIO); | 747 | return ERR_PTR(-EIO); |
741 | } | 748 | } |
742 | se_sess = tv_nexus->tvn_se_sess; | 749 | se_sess = tv_nexus->tvn_se_sess; |
743 | 750 | ||
744 | tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING); | 751 | tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING); |
745 | if (tag < 0) { | 752 | if (tag < 0) { |
746 | pr_err("Unable to obtain tag for tcm_vhost_cmd\n"); | 753 | pr_err("Unable to obtain tag for vhost_scsi_cmd\n"); |
747 | return ERR_PTR(-ENOMEM); | 754 | return ERR_PTR(-ENOMEM); |
748 | } | 755 | } |
749 | 756 | ||
750 | cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[tag]; | 757 | cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[tag]; |
751 | sg = cmd->tvc_sgl; | 758 | sg = cmd->tvc_sgl; |
752 | prot_sg = cmd->tvc_prot_sgl; | 759 | prot_sg = cmd->tvc_prot_sgl; |
753 | pages = cmd->tvc_upages; | 760 | pages = cmd->tvc_upages; |
754 | memset(cmd, 0, sizeof(struct tcm_vhost_cmd)); | 761 | memset(cmd, 0, sizeof(struct vhost_scsi_cmd)); |
755 | 762 | ||
756 | cmd->tvc_sgl = sg; | 763 | cmd->tvc_sgl = sg; |
757 | cmd->tvc_prot_sgl = prot_sg; | 764 | cmd->tvc_prot_sgl = prot_sg; |
@@ -763,9 +770,9 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct tcm_vhost_tpg *tpg, | |||
763 | cmd->tvc_exp_data_len = exp_data_len; | 770 | cmd->tvc_exp_data_len = exp_data_len; |
764 | cmd->tvc_data_direction = data_direction; | 771 | cmd->tvc_data_direction = data_direction; |
765 | cmd->tvc_nexus = tv_nexus; | 772 | cmd->tvc_nexus = tv_nexus; |
766 | cmd->inflight = tcm_vhost_get_inflight(vq); | 773 | cmd->inflight = vhost_scsi_get_inflight(vq); |
767 | 774 | ||
768 | memcpy(cmd->tvc_cdb, cdb, TCM_VHOST_MAX_CDB_SIZE); | 775 | memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE); |
769 | 776 | ||
770 | return cmd; | 777 | return cmd; |
771 | } | 778 | } |
@@ -776,29 +783,22 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct tcm_vhost_tpg *tpg, | |||
776 | * Returns the number of scatterlist entries used or -errno on error. | 783 | * Returns the number of scatterlist entries used or -errno on error. |
777 | */ | 784 | */ |
778 | static int | 785 | static int |
779 | vhost_scsi_map_to_sgl(struct tcm_vhost_cmd *tv_cmd, | 786 | vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd, |
787 | void __user *ptr, | ||
788 | size_t len, | ||
780 | struct scatterlist *sgl, | 789 | struct scatterlist *sgl, |
781 | unsigned int sgl_count, | ||
782 | struct iovec *iov, | ||
783 | struct page **pages, | ||
784 | bool write) | 790 | bool write) |
785 | { | 791 | { |
786 | unsigned int npages = 0, pages_nr, offset, nbytes; | 792 | unsigned int npages = 0, offset, nbytes; |
793 | unsigned int pages_nr = iov_num_pages(ptr, len); | ||
787 | struct scatterlist *sg = sgl; | 794 | struct scatterlist *sg = sgl; |
788 | void __user *ptr = iov->iov_base; | 795 | struct page **pages = cmd->tvc_upages; |
789 | size_t len = iov->iov_len; | ||
790 | int ret, i; | 796 | int ret, i; |
791 | 797 | ||
792 | pages_nr = iov_num_pages(iov); | 798 | if (pages_nr > VHOST_SCSI_PREALLOC_UPAGES) { |
793 | if (pages_nr > sgl_count) { | ||
794 | pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than" | ||
795 | " sgl_count: %u\n", pages_nr, sgl_count); | ||
796 | return -ENOBUFS; | ||
797 | } | ||
798 | if (pages_nr > TCM_VHOST_PREALLOC_UPAGES) { | ||
799 | pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than" | 799 | pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than" |
800 | " preallocated TCM_VHOST_PREALLOC_UPAGES: %u\n", | 800 | " preallocated VHOST_SCSI_PREALLOC_UPAGES: %u\n", |
801 | pages_nr, TCM_VHOST_PREALLOC_UPAGES); | 801 | pages_nr, VHOST_SCSI_PREALLOC_UPAGES); |
802 | return -ENOBUFS; | 802 | return -ENOBUFS; |
803 | } | 803 | } |
804 | 804 | ||
@@ -829,84 +829,94 @@ out: | |||
829 | } | 829 | } |
830 | 830 | ||
831 | static int | 831 | static int |
832 | vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd, | 832 | vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls) |
833 | struct iovec *iov, | ||
834 | int niov, | ||
835 | bool write) | ||
836 | { | 833 | { |
837 | struct scatterlist *sg = cmd->tvc_sgl; | 834 | int sgl_count = 0; |
838 | unsigned int sgl_count = 0; | ||
839 | int ret, i; | ||
840 | 835 | ||
841 | for (i = 0; i < niov; i++) | 836 | if (!iter || !iter->iov) { |
842 | sgl_count += iov_num_pages(&iov[i]); | 837 | pr_err("%s: iter->iov is NULL, but expected bytes: %zu" |
838 | " present\n", __func__, bytes); | ||
839 | return -EINVAL; | ||
840 | } | ||
843 | 841 | ||
844 | if (sgl_count > TCM_VHOST_PREALLOC_SGLS) { | 842 | sgl_count = iov_iter_npages(iter, 0xffff); |
845 | pr_err("vhost_scsi_map_iov_to_sgl() sgl_count: %u greater than" | 843 | if (sgl_count > max_sgls) { |
846 | " preallocated TCM_VHOST_PREALLOC_SGLS: %u\n", | 844 | pr_err("%s: requested sgl_count: %d exceeds pre-allocated" |
847 | sgl_count, TCM_VHOST_PREALLOC_SGLS); | 845 | " max_sgls: %d\n", __func__, sgl_count, max_sgls); |
848 | return -ENOBUFS; | 846 | return -EINVAL; |
849 | } | 847 | } |
848 | return sgl_count; | ||
849 | } | ||
850 | 850 | ||
851 | pr_debug("%s sg %p sgl_count %u\n", __func__, sg, sgl_count); | 851 | static int |
852 | sg_init_table(sg, sgl_count); | 852 | vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write, |
853 | cmd->tvc_sgl_count = sgl_count; | 853 | struct iov_iter *iter, |
854 | struct scatterlist *sg, int sg_count) | ||
855 | { | ||
856 | size_t off = iter->iov_offset; | ||
857 | int i, ret; | ||
854 | 858 | ||
855 | pr_debug("Mapping iovec %p for %u pages\n", &iov[0], sgl_count); | 859 | for (i = 0; i < iter->nr_segs; i++) { |
860 | void __user *base = iter->iov[i].iov_base + off; | ||
861 | size_t len = iter->iov[i].iov_len - off; | ||
856 | 862 | ||
857 | for (i = 0; i < niov; i++) { | 863 | ret = vhost_scsi_map_to_sgl(cmd, base, len, sg, write); |
858 | ret = vhost_scsi_map_to_sgl(cmd, sg, sgl_count, &iov[i], | ||
859 | cmd->tvc_upages, write); | ||
860 | if (ret < 0) { | 864 | if (ret < 0) { |
861 | for (i = 0; i < cmd->tvc_sgl_count; i++) | 865 | for (i = 0; i < sg_count; i++) { |
862 | put_page(sg_page(&cmd->tvc_sgl[i])); | 866 | struct page *page = sg_page(&sg[i]); |
863 | 867 | if (page) | |
864 | cmd->tvc_sgl_count = 0; | 868 | put_page(page); |
869 | } | ||
865 | return ret; | 870 | return ret; |
866 | } | 871 | } |
867 | sg += ret; | 872 | sg += ret; |
868 | sgl_count -= ret; | 873 | off = 0; |
869 | } | 874 | } |
870 | return 0; | 875 | return 0; |
871 | } | 876 | } |
872 | 877 | ||
873 | static int | 878 | static int |
874 | vhost_scsi_map_iov_to_prot(struct tcm_vhost_cmd *cmd, | 879 | vhost_scsi_mapal(struct vhost_scsi_cmd *cmd, |
875 | struct iovec *iov, | 880 | size_t prot_bytes, struct iov_iter *prot_iter, |
876 | int niov, | 881 | size_t data_bytes, struct iov_iter *data_iter) |
877 | bool write) | 882 | { |
878 | { | 883 | int sgl_count, ret; |
879 | struct scatterlist *prot_sg = cmd->tvc_prot_sgl; | 884 | bool write = (cmd->tvc_data_direction == DMA_FROM_DEVICE); |
880 | unsigned int prot_sgl_count = 0; | 885 | |
881 | int ret, i; | 886 | if (prot_bytes) { |
882 | 887 | sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes, | |
883 | for (i = 0; i < niov; i++) | 888 | VHOST_SCSI_PREALLOC_PROT_SGLS); |
884 | prot_sgl_count += iov_num_pages(&iov[i]); | 889 | if (sgl_count < 0) |
885 | 890 | return sgl_count; | |
886 | if (prot_sgl_count > TCM_VHOST_PREALLOC_PROT_SGLS) { | 891 | |
887 | pr_err("vhost_scsi_map_iov_to_prot() sgl_count: %u greater than" | 892 | sg_init_table(cmd->tvc_prot_sgl, sgl_count); |
888 | " preallocated TCM_VHOST_PREALLOC_PROT_SGLS: %u\n", | 893 | cmd->tvc_prot_sgl_count = sgl_count; |
889 | prot_sgl_count, TCM_VHOST_PREALLOC_PROT_SGLS); | 894 | pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__, |
890 | return -ENOBUFS; | 895 | cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count); |
891 | } | 896 | |
892 | 897 | ret = vhost_scsi_iov_to_sgl(cmd, write, prot_iter, | |
893 | pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__, | 898 | cmd->tvc_prot_sgl, |
894 | prot_sg, prot_sgl_count); | 899 | cmd->tvc_prot_sgl_count); |
895 | sg_init_table(prot_sg, prot_sgl_count); | ||
896 | cmd->tvc_prot_sgl_count = prot_sgl_count; | ||
897 | |||
898 | for (i = 0; i < niov; i++) { | ||
899 | ret = vhost_scsi_map_to_sgl(cmd, prot_sg, prot_sgl_count, &iov[i], | ||
900 | cmd->tvc_upages, write); | ||
901 | if (ret < 0) { | 900 | if (ret < 0) { |
902 | for (i = 0; i < cmd->tvc_prot_sgl_count; i++) | ||
903 | put_page(sg_page(&cmd->tvc_prot_sgl[i])); | ||
904 | |||
905 | cmd->tvc_prot_sgl_count = 0; | 901 | cmd->tvc_prot_sgl_count = 0; |
906 | return ret; | 902 | return ret; |
907 | } | 903 | } |
908 | prot_sg += ret; | 904 | } |
909 | prot_sgl_count -= ret; | 905 | sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes, |
906 | VHOST_SCSI_PREALLOC_SGLS); | ||
907 | if (sgl_count < 0) | ||
908 | return sgl_count; | ||
909 | |||
910 | sg_init_table(cmd->tvc_sgl, sgl_count); | ||
911 | cmd->tvc_sgl_count = sgl_count; | ||
912 | pr_debug("%s data_sg %p data_sgl_count %u\n", __func__, | ||
913 | cmd->tvc_sgl, cmd->tvc_sgl_count); | ||
914 | |||
915 | ret = vhost_scsi_iov_to_sgl(cmd, write, data_iter, | ||
916 | cmd->tvc_sgl, cmd->tvc_sgl_count); | ||
917 | if (ret < 0) { | ||
918 | cmd->tvc_sgl_count = 0; | ||
919 | return ret; | ||
910 | } | 920 | } |
911 | return 0; | 921 | return 0; |
912 | } | 922 | } |
@@ -928,11 +938,11 @@ static int vhost_scsi_to_tcm_attr(int attr) | |||
928 | return TCM_SIMPLE_TAG; | 938 | return TCM_SIMPLE_TAG; |
929 | } | 939 | } |
930 | 940 | ||
931 | static void tcm_vhost_submission_work(struct work_struct *work) | 941 | static void vhost_scsi_submission_work(struct work_struct *work) |
932 | { | 942 | { |
933 | struct tcm_vhost_cmd *cmd = | 943 | struct vhost_scsi_cmd *cmd = |
934 | container_of(work, struct tcm_vhost_cmd, work); | 944 | container_of(work, struct vhost_scsi_cmd, work); |
935 | struct tcm_vhost_nexus *tv_nexus; | 945 | struct vhost_scsi_nexus *tv_nexus; |
936 | struct se_cmd *se_cmd = &cmd->tvc_se_cmd; | 946 | struct se_cmd *se_cmd = &cmd->tvc_se_cmd; |
937 | struct scatterlist *sg_ptr, *sg_prot_ptr = NULL; | 947 | struct scatterlist *sg_ptr, *sg_prot_ptr = NULL; |
938 | int rc; | 948 | int rc; |
@@ -986,19 +996,20 @@ vhost_scsi_send_bad_target(struct vhost_scsi *vs, | |||
986 | static void | 996 | static void |
987 | vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) | 997 | vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) |
988 | { | 998 | { |
989 | struct tcm_vhost_tpg **vs_tpg; | 999 | struct vhost_scsi_tpg **vs_tpg, *tpg; |
990 | struct virtio_scsi_cmd_req v_req; | 1000 | struct virtio_scsi_cmd_req v_req; |
991 | struct virtio_scsi_cmd_req_pi v_req_pi; | 1001 | struct virtio_scsi_cmd_req_pi v_req_pi; |
992 | struct tcm_vhost_tpg *tpg; | 1002 | struct vhost_scsi_cmd *cmd; |
993 | struct tcm_vhost_cmd *cmd; | 1003 | struct iov_iter out_iter, in_iter, prot_iter, data_iter; |
994 | u64 tag; | 1004 | u64 tag; |
995 | u32 exp_data_len, data_first, data_num, data_direction, prot_first; | 1005 | u32 exp_data_len, data_direction; |
996 | unsigned out, in, i; | 1006 | unsigned out, in; |
997 | int head, ret, data_niov, prot_niov, prot_bytes; | 1007 | int head, ret, prot_bytes; |
998 | size_t req_size; | 1008 | size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp); |
1009 | size_t out_size, in_size; | ||
999 | u16 lun; | 1010 | u16 lun; |
1000 | u8 *target, *lunp, task_attr; | 1011 | u8 *target, *lunp, task_attr; |
1001 | bool hdr_pi; | 1012 | bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI); |
1002 | void *req, *cdb; | 1013 | void *req, *cdb; |
1003 | 1014 | ||
1004 | mutex_lock(&vq->mutex); | 1015 | mutex_lock(&vq->mutex); |
@@ -1014,10 +1025,10 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) | |||
1014 | 1025 | ||
1015 | for (;;) { | 1026 | for (;;) { |
1016 | head = vhost_get_vq_desc(vq, vq->iov, | 1027 | head = vhost_get_vq_desc(vq, vq->iov, |
1017 | ARRAY_SIZE(vq->iov), &out, &in, | 1028 | ARRAY_SIZE(vq->iov), &out, &in, |
1018 | NULL, NULL); | 1029 | NULL, NULL); |
1019 | pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n", | 1030 | pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n", |
1020 | head, out, in); | 1031 | head, out, in); |
1021 | /* On error, stop handling until the next kick. */ | 1032 | /* On error, stop handling until the next kick. */ |
1022 | if (unlikely(head < 0)) | 1033 | if (unlikely(head < 0)) |
1023 | break; | 1034 | break; |
@@ -1029,113 +1040,134 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) | |||
1029 | } | 1040 | } |
1030 | break; | 1041 | break; |
1031 | } | 1042 | } |
1032 | |||
1033 | /* FIXME: BIDI operation */ | ||
1034 | if (out == 1 && in == 1) { | ||
1035 | data_direction = DMA_NONE; | ||
1036 | data_first = 0; | ||
1037 | data_num = 0; | ||
1038 | } else if (out == 1 && in > 1) { | ||
1039 | data_direction = DMA_FROM_DEVICE; | ||
1040 | data_first = out + 1; | ||
1041 | data_num = in - 1; | ||
1042 | } else if (out > 1 && in == 1) { | ||
1043 | data_direction = DMA_TO_DEVICE; | ||
1044 | data_first = 1; | ||
1045 | data_num = out - 1; | ||
1046 | } else { | ||
1047 | vq_err(vq, "Invalid buffer layout out: %u in: %u\n", | ||
1048 | out, in); | ||
1049 | break; | ||
1050 | } | ||
1051 | |||
1052 | /* | 1043 | /* |
1053 | * Check for a sane resp buffer so we can report errors to | 1044 | * Check for a sane response buffer so we can report early |
1054 | * the guest. | 1045 | * errors back to the guest. |
1055 | */ | 1046 | */ |
1056 | if (unlikely(vq->iov[out].iov_len != | 1047 | if (unlikely(vq->iov[out].iov_len < rsp_size)) { |
1057 | sizeof(struct virtio_scsi_cmd_resp))) { | 1048 | vq_err(vq, "Expecting at least virtio_scsi_cmd_resp" |
1058 | vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu" | 1049 | " size, got %zu bytes\n", vq->iov[out].iov_len); |
1059 | " bytes\n", vq->iov[out].iov_len); | ||
1060 | break; | 1050 | break; |
1061 | } | 1051 | } |
1062 | 1052 | /* | |
1063 | if (vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI)) { | 1053 | * Setup pointers and values based upon different virtio-scsi |
1054 | * request header if T10_PI is enabled in KVM guest. | ||
1055 | */ | ||
1056 | if (t10_pi) { | ||
1064 | req = &v_req_pi; | 1057 | req = &v_req_pi; |
1058 | req_size = sizeof(v_req_pi); | ||
1065 | lunp = &v_req_pi.lun[0]; | 1059 | lunp = &v_req_pi.lun[0]; |
1066 | target = &v_req_pi.lun[1]; | 1060 | target = &v_req_pi.lun[1]; |
1067 | req_size = sizeof(v_req_pi); | ||
1068 | hdr_pi = true; | ||
1069 | } else { | 1061 | } else { |
1070 | req = &v_req; | 1062 | req = &v_req; |
1063 | req_size = sizeof(v_req); | ||
1071 | lunp = &v_req.lun[0]; | 1064 | lunp = &v_req.lun[0]; |
1072 | target = &v_req.lun[1]; | 1065 | target = &v_req.lun[1]; |
1073 | req_size = sizeof(v_req); | ||
1074 | hdr_pi = false; | ||
1075 | } | 1066 | } |
1067 | /* | ||
1068 | * FIXME: Not correct for BIDI operation | ||
1069 | */ | ||
1070 | out_size = iov_length(vq->iov, out); | ||
1071 | in_size = iov_length(&vq->iov[out], in); | ||
1076 | 1072 | ||
1077 | if (unlikely(vq->iov[0].iov_len < req_size)) { | 1073 | /* |
1078 | pr_err("Expecting virtio-scsi header: %zu, got %zu\n", | 1074 | * Copy over the virtio-scsi request header, which for a |
1079 | req_size, vq->iov[0].iov_len); | 1075 | * ANY_LAYOUT enabled guest may span multiple iovecs, or a |
1080 | break; | 1076 | * single iovec may contain both the header + outgoing |
1081 | } | 1077 | * WRITE payloads. |
1082 | ret = copy_from_user(req, vq->iov[0].iov_base, req_size); | 1078 | * |
1083 | if (unlikely(ret)) { | 1079 | * copy_from_iter() will advance out_iter, so that it will |
1084 | vq_err(vq, "Faulted on virtio_scsi_cmd_req\n"); | 1080 | * point at the start of the outgoing WRITE payload, if |
1085 | break; | 1081 | * DMA_TO_DEVICE is set. |
1086 | } | 1082 | */ |
1083 | iov_iter_init(&out_iter, WRITE, vq->iov, out, out_size); | ||
1087 | 1084 | ||
1085 | ret = copy_from_iter(req, req_size, &out_iter); | ||
1086 | if (unlikely(ret != req_size)) { | ||
1087 | vq_err(vq, "Faulted on copy_from_iter\n"); | ||
1088 | vhost_scsi_send_bad_target(vs, vq, head, out); | ||
1089 | continue; | ||
1090 | } | ||
1088 | /* virtio-scsi spec requires byte 0 of the lun to be 1 */ | 1091 | /* virtio-scsi spec requires byte 0 of the lun to be 1 */ |
1089 | if (unlikely(*lunp != 1)) { | 1092 | if (unlikely(*lunp != 1)) { |
1093 | vq_err(vq, "Illegal virtio-scsi lun: %u\n", *lunp); | ||
1090 | vhost_scsi_send_bad_target(vs, vq, head, out); | 1094 | vhost_scsi_send_bad_target(vs, vq, head, out); |
1091 | continue; | 1095 | continue; |
1092 | } | 1096 | } |
1093 | 1097 | ||
1094 | tpg = ACCESS_ONCE(vs_tpg[*target]); | 1098 | tpg = ACCESS_ONCE(vs_tpg[*target]); |
1095 | |||
1096 | /* Target does not exist, fail the request */ | ||
1097 | if (unlikely(!tpg)) { | 1099 | if (unlikely(!tpg)) { |
1100 | /* Target does not exist, fail the request */ | ||
1098 | vhost_scsi_send_bad_target(vs, vq, head, out); | 1101 | vhost_scsi_send_bad_target(vs, vq, head, out); |
1099 | continue; | 1102 | continue; |
1100 | } | 1103 | } |
1101 | |||
1102 | data_niov = data_num; | ||
1103 | prot_niov = prot_first = prot_bytes = 0; | ||
1104 | /* | 1104 | /* |
1105 | * Determine if any protection information iovecs are preceeding | 1105 | * Determine data_direction by calculating the total outgoing |
1106 | * the actual data payload, and adjust data_first + data_niov | 1106 | * iovec sizes + incoming iovec sizes vs. virtio-scsi request + |
1107 | * values accordingly for vhost_scsi_map_iov_to_sgl() below. | 1107 | * response headers respectively. |
1108 | * | 1108 | * |
1109 | * Also extract virtio_scsi header bits for vhost_scsi_get_tag() | 1109 | * For DMA_TO_DEVICE this is out_iter, which is already pointing |
1110 | * to the right place. | ||
1111 | * | ||
1112 | * For DMA_FROM_DEVICE, the iovec will be just past the end | ||
1113 | * of the virtio-scsi response header in either the same | ||
1114 | * or immediately following iovec. | ||
1115 | * | ||
1116 | * Any associated T10_PI bytes for the outgoing / incoming | ||
1117 | * payloads are included in calculation of exp_data_len here. | ||
1110 | */ | 1118 | */ |
1111 | if (hdr_pi) { | 1119 | prot_bytes = 0; |
1120 | |||
1121 | if (out_size > req_size) { | ||
1122 | data_direction = DMA_TO_DEVICE; | ||
1123 | exp_data_len = out_size - req_size; | ||
1124 | data_iter = out_iter; | ||
1125 | } else if (in_size > rsp_size) { | ||
1126 | data_direction = DMA_FROM_DEVICE; | ||
1127 | exp_data_len = in_size - rsp_size; | ||
1128 | |||
1129 | iov_iter_init(&in_iter, READ, &vq->iov[out], in, | ||
1130 | rsp_size + exp_data_len); | ||
1131 | iov_iter_advance(&in_iter, rsp_size); | ||
1132 | data_iter = in_iter; | ||
1133 | } else { | ||
1134 | data_direction = DMA_NONE; | ||
1135 | exp_data_len = 0; | ||
1136 | } | ||
1137 | /* | ||
1138 | * If T10_PI header + payload is present, setup prot_iter values | ||
1139 | * and recalculate data_iter for vhost_scsi_mapal() mapping to | ||
1140 | * host scatterlists via get_user_pages_fast(). | ||
1141 | */ | ||
1142 | if (t10_pi) { | ||
1112 | if (v_req_pi.pi_bytesout) { | 1143 | if (v_req_pi.pi_bytesout) { |
1113 | if (data_direction != DMA_TO_DEVICE) { | 1144 | if (data_direction != DMA_TO_DEVICE) { |
1114 | vq_err(vq, "Received non zero do_pi_niov" | 1145 | vq_err(vq, "Received non zero pi_bytesout," |
1115 | ", but wrong data_direction\n"); | 1146 | " but wrong data_direction\n"); |
1116 | goto err_cmd; | 1147 | vhost_scsi_send_bad_target(vs, vq, head, out); |
1148 | continue; | ||
1117 | } | 1149 | } |
1118 | prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout); | 1150 | prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout); |
1119 | } else if (v_req_pi.pi_bytesin) { | 1151 | } else if (v_req_pi.pi_bytesin) { |
1120 | if (data_direction != DMA_FROM_DEVICE) { | 1152 | if (data_direction != DMA_FROM_DEVICE) { |
1121 | vq_err(vq, "Received non zero di_pi_niov" | 1153 | vq_err(vq, "Received non zero pi_bytesin," |
1122 | ", but wrong data_direction\n"); | 1154 | " but wrong data_direction\n"); |
1123 | goto err_cmd; | 1155 | vhost_scsi_send_bad_target(vs, vq, head, out); |
1156 | continue; | ||
1124 | } | 1157 | } |
1125 | prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin); | 1158 | prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin); |
1126 | } | 1159 | } |
1160 | /* | ||
1161 | * Set prot_iter to data_iter, and advance past any | ||
1162 | * preceeding prot_bytes that may be present. | ||
1163 | * | ||
1164 | * Also fix up the exp_data_len to reflect only the | ||
1165 | * actual data payload length. | ||
1166 | */ | ||
1127 | if (prot_bytes) { | 1167 | if (prot_bytes) { |
1128 | int tmp = 0; | 1168 | exp_data_len -= prot_bytes; |
1129 | 1169 | prot_iter = data_iter; | |
1130 | for (i = 0; i < data_num; i++) { | 1170 | iov_iter_advance(&data_iter, prot_bytes); |
1131 | tmp += vq->iov[data_first + i].iov_len; | ||
1132 | prot_niov++; | ||
1133 | if (tmp >= prot_bytes) | ||
1134 | break; | ||
1135 | } | ||
1136 | prot_first = data_first; | ||
1137 | data_first += prot_niov; | ||
1138 | data_niov = data_num - prot_niov; | ||
1139 | } | 1171 | } |
1140 | tag = vhost64_to_cpu(vq, v_req_pi.tag); | 1172 | tag = vhost64_to_cpu(vq, v_req_pi.tag); |
1141 | task_attr = v_req_pi.task_attr; | 1173 | task_attr = v_req_pi.task_attr; |
@@ -1147,83 +1179,65 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) | |||
1147 | cdb = &v_req.cdb[0]; | 1179 | cdb = &v_req.cdb[0]; |
1148 | lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF; | 1180 | lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF; |
1149 | } | 1181 | } |
1150 | exp_data_len = 0; | ||
1151 | for (i = 0; i < data_niov; i++) | ||
1152 | exp_data_len += vq->iov[data_first + i].iov_len; | ||
1153 | /* | 1182 | /* |
1154 | * Check that the recieved CDB size does not exceeded our | 1183 | * Check that the received CDB size does not exceeded our |
1155 | * hardcoded max for vhost-scsi | 1184 | * hardcoded max for vhost-scsi, then get a pre-allocated |
1185 | * cmd descriptor for the new virtio-scsi tag. | ||
1156 | * | 1186 | * |
1157 | * TODO what if cdb was too small for varlen cdb header? | 1187 | * TODO what if cdb was too small for varlen cdb header? |
1158 | */ | 1188 | */ |
1159 | if (unlikely(scsi_command_size(cdb) > TCM_VHOST_MAX_CDB_SIZE)) { | 1189 | if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) { |
1160 | vq_err(vq, "Received SCSI CDB with command_size: %d that" | 1190 | vq_err(vq, "Received SCSI CDB with command_size: %d that" |
1161 | " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", | 1191 | " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", |
1162 | scsi_command_size(cdb), TCM_VHOST_MAX_CDB_SIZE); | 1192 | scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE); |
1163 | goto err_cmd; | 1193 | vhost_scsi_send_bad_target(vs, vq, head, out); |
1194 | continue; | ||
1164 | } | 1195 | } |
1165 | |||
1166 | cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr, | 1196 | cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr, |
1167 | exp_data_len + prot_bytes, | 1197 | exp_data_len + prot_bytes, |
1168 | data_direction); | 1198 | data_direction); |
1169 | if (IS_ERR(cmd)) { | 1199 | if (IS_ERR(cmd)) { |
1170 | vq_err(vq, "vhost_scsi_get_tag failed %ld\n", | 1200 | vq_err(vq, "vhost_scsi_get_tag failed %ld\n", |
1171 | PTR_ERR(cmd)); | 1201 | PTR_ERR(cmd)); |
1172 | goto err_cmd; | 1202 | vhost_scsi_send_bad_target(vs, vq, head, out); |
1203 | continue; | ||
1173 | } | 1204 | } |
1174 | |||
1175 | pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction" | ||
1176 | ": %d\n", cmd, exp_data_len, data_direction); | ||
1177 | |||
1178 | cmd->tvc_vhost = vs; | 1205 | cmd->tvc_vhost = vs; |
1179 | cmd->tvc_vq = vq; | 1206 | cmd->tvc_vq = vq; |
1180 | cmd->tvc_resp = vq->iov[out].iov_base; | 1207 | cmd->tvc_resp_iov = &vq->iov[out]; |
1208 | cmd->tvc_in_iovs = in; | ||
1181 | 1209 | ||
1182 | pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n", | 1210 | pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n", |
1183 | cmd->tvc_cdb[0], cmd->tvc_lun); | 1211 | cmd->tvc_cdb[0], cmd->tvc_lun); |
1212 | pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:" | ||
1213 | " %d\n", cmd, exp_data_len, prot_bytes, data_direction); | ||
1184 | 1214 | ||
1185 | if (prot_niov) { | ||
1186 | ret = vhost_scsi_map_iov_to_prot(cmd, | ||
1187 | &vq->iov[prot_first], prot_niov, | ||
1188 | data_direction == DMA_FROM_DEVICE); | ||
1189 | if (unlikely(ret)) { | ||
1190 | vq_err(vq, "Failed to map iov to" | ||
1191 | " prot_sgl\n"); | ||
1192 | goto err_free; | ||
1193 | } | ||
1194 | } | ||
1195 | if (data_direction != DMA_NONE) { | 1215 | if (data_direction != DMA_NONE) { |
1196 | ret = vhost_scsi_map_iov_to_sgl(cmd, | 1216 | ret = vhost_scsi_mapal(cmd, |
1197 | &vq->iov[data_first], data_niov, | 1217 | prot_bytes, &prot_iter, |
1198 | data_direction == DMA_FROM_DEVICE); | 1218 | exp_data_len, &data_iter); |
1199 | if (unlikely(ret)) { | 1219 | if (unlikely(ret)) { |
1200 | vq_err(vq, "Failed to map iov to sgl\n"); | 1220 | vq_err(vq, "Failed to map iov to sgl\n"); |
1201 | goto err_free; | 1221 | vhost_scsi_release_cmd(&cmd->tvc_se_cmd); |
1222 | vhost_scsi_send_bad_target(vs, vq, head, out); | ||
1223 | continue; | ||
1202 | } | 1224 | } |
1203 | } | 1225 | } |
1204 | /* | 1226 | /* |
1205 | * Save the descriptor from vhost_get_vq_desc() to be used to | 1227 | * Save the descriptor from vhost_get_vq_desc() to be used to |
1206 | * complete the virtio-scsi request in TCM callback context via | 1228 | * complete the virtio-scsi request in TCM callback context via |
1207 | * tcm_vhost_queue_data_in() and tcm_vhost_queue_status() | 1229 | * vhost_scsi_queue_data_in() and vhost_scsi_queue_status() |
1208 | */ | 1230 | */ |
1209 | cmd->tvc_vq_desc = head; | 1231 | cmd->tvc_vq_desc = head; |
1210 | /* | 1232 | /* |
1211 | * Dispatch tv_cmd descriptor for cmwq execution in process | 1233 | * Dispatch cmd descriptor for cmwq execution in process |
1212 | * context provided by tcm_vhost_workqueue. This also ensures | 1234 | * context provided by vhost_scsi_workqueue. This also ensures |
1213 | * tv_cmd is executed on the same kworker CPU as this vhost | 1235 | * cmd is executed on the same kworker CPU as this vhost |
1214 | * thread to gain positive L2 cache locality effects.. | 1236 | * thread to gain positive L2 cache locality effects. |
1215 | */ | 1237 | */ |
1216 | INIT_WORK(&cmd->work, tcm_vhost_submission_work); | 1238 | INIT_WORK(&cmd->work, vhost_scsi_submission_work); |
1217 | queue_work(tcm_vhost_workqueue, &cmd->work); | 1239 | queue_work(vhost_scsi_workqueue, &cmd->work); |
1218 | } | 1240 | } |
1219 | |||
1220 | mutex_unlock(&vq->mutex); | ||
1221 | return; | ||
1222 | |||
1223 | err_free: | ||
1224 | vhost_scsi_free_cmd(cmd); | ||
1225 | err_cmd: | ||
1226 | vhost_scsi_send_bad_target(vs, vq, head, out); | ||
1227 | out: | 1241 | out: |
1228 | mutex_unlock(&vq->mutex); | 1242 | mutex_unlock(&vq->mutex); |
1229 | } | 1243 | } |
@@ -1234,15 +1248,15 @@ static void vhost_scsi_ctl_handle_kick(struct vhost_work *work) | |||
1234 | } | 1248 | } |
1235 | 1249 | ||
1236 | static void | 1250 | static void |
1237 | tcm_vhost_send_evt(struct vhost_scsi *vs, | 1251 | vhost_scsi_send_evt(struct vhost_scsi *vs, |
1238 | struct tcm_vhost_tpg *tpg, | 1252 | struct vhost_scsi_tpg *tpg, |
1239 | struct se_lun *lun, | 1253 | struct se_lun *lun, |
1240 | u32 event, | 1254 | u32 event, |
1241 | u32 reason) | 1255 | u32 reason) |
1242 | { | 1256 | { |
1243 | struct tcm_vhost_evt *evt; | 1257 | struct vhost_scsi_evt *evt; |
1244 | 1258 | ||
1245 | evt = tcm_vhost_allocate_evt(vs, event, reason); | 1259 | evt = vhost_scsi_allocate_evt(vs, event, reason); |
1246 | if (!evt) | 1260 | if (!evt) |
1247 | return; | 1261 | return; |
1248 | 1262 | ||
@@ -1253,7 +1267,7 @@ tcm_vhost_send_evt(struct vhost_scsi *vs, | |||
1253 | * lun[4-7] need to be zero according to virtio-scsi spec. | 1267 | * lun[4-7] need to be zero according to virtio-scsi spec. |
1254 | */ | 1268 | */ |
1255 | evt->event.lun[0] = 0x01; | 1269 | evt->event.lun[0] = 0x01; |
1256 | evt->event.lun[1] = tpg->tport_tpgt & 0xFF; | 1270 | evt->event.lun[1] = tpg->tport_tpgt; |
1257 | if (lun->unpacked_lun >= 256) | 1271 | if (lun->unpacked_lun >= 256) |
1258 | evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ; | 1272 | evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ; |
1259 | evt->event.lun[3] = lun->unpacked_lun & 0xFF; | 1273 | evt->event.lun[3] = lun->unpacked_lun & 0xFF; |
@@ -1274,7 +1288,7 @@ static void vhost_scsi_evt_handle_kick(struct vhost_work *work) | |||
1274 | goto out; | 1288 | goto out; |
1275 | 1289 | ||
1276 | if (vs->vs_events_missed) | 1290 | if (vs->vs_events_missed) |
1277 | tcm_vhost_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0); | 1291 | vhost_scsi_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0); |
1278 | out: | 1292 | out: |
1279 | mutex_unlock(&vq->mutex); | 1293 | mutex_unlock(&vq->mutex); |
1280 | } | 1294 | } |
@@ -1300,7 +1314,7 @@ static void vhost_scsi_flush(struct vhost_scsi *vs) | |||
1300 | int i; | 1314 | int i; |
1301 | 1315 | ||
1302 | /* Init new inflight and remember the old inflight */ | 1316 | /* Init new inflight and remember the old inflight */ |
1303 | tcm_vhost_init_inflight(vs, old_inflight); | 1317 | vhost_scsi_init_inflight(vs, old_inflight); |
1304 | 1318 | ||
1305 | /* | 1319 | /* |
1306 | * The inflight->kref was initialized to 1. We decrement it here to | 1320 | * The inflight->kref was initialized to 1. We decrement it here to |
@@ -1308,7 +1322,7 @@ static void vhost_scsi_flush(struct vhost_scsi *vs) | |||
1308 | * when all the reqs are finished. | 1322 | * when all the reqs are finished. |
1309 | */ | 1323 | */ |
1310 | for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) | 1324 | for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) |
1311 | kref_put(&old_inflight[i]->kref, tcm_vhost_done_inflight); | 1325 | kref_put(&old_inflight[i]->kref, vhost_scsi_done_inflight); |
1312 | 1326 | ||
1313 | /* Flush both the vhost poll and vhost work */ | 1327 | /* Flush both the vhost poll and vhost work */ |
1314 | for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) | 1328 | for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) |
@@ -1323,24 +1337,24 @@ static void vhost_scsi_flush(struct vhost_scsi *vs) | |||
1323 | 1337 | ||
1324 | /* | 1338 | /* |
1325 | * Called from vhost_scsi_ioctl() context to walk the list of available | 1339 | * Called from vhost_scsi_ioctl() context to walk the list of available |
1326 | * tcm_vhost_tpg with an active struct tcm_vhost_nexus | 1340 | * vhost_scsi_tpg with an active struct vhost_scsi_nexus |
1327 | * | 1341 | * |
1328 | * The lock nesting rule is: | 1342 | * The lock nesting rule is: |
1329 | * tcm_vhost_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex | 1343 | * vhost_scsi_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex |
1330 | */ | 1344 | */ |
1331 | static int | 1345 | static int |
1332 | vhost_scsi_set_endpoint(struct vhost_scsi *vs, | 1346 | vhost_scsi_set_endpoint(struct vhost_scsi *vs, |
1333 | struct vhost_scsi_target *t) | 1347 | struct vhost_scsi_target *t) |
1334 | { | 1348 | { |
1335 | struct se_portal_group *se_tpg; | 1349 | struct se_portal_group *se_tpg; |
1336 | struct tcm_vhost_tport *tv_tport; | 1350 | struct vhost_scsi_tport *tv_tport; |
1337 | struct tcm_vhost_tpg *tpg; | 1351 | struct vhost_scsi_tpg *tpg; |
1338 | struct tcm_vhost_tpg **vs_tpg; | 1352 | struct vhost_scsi_tpg **vs_tpg; |
1339 | struct vhost_virtqueue *vq; | 1353 | struct vhost_virtqueue *vq; |
1340 | int index, ret, i, len; | 1354 | int index, ret, i, len; |
1341 | bool match = false; | 1355 | bool match = false; |
1342 | 1356 | ||
1343 | mutex_lock(&tcm_vhost_mutex); | 1357 | mutex_lock(&vhost_scsi_mutex); |
1344 | mutex_lock(&vs->dev.mutex); | 1358 | mutex_lock(&vs->dev.mutex); |
1345 | 1359 | ||
1346 | /* Verify that ring has been setup correctly. */ | 1360 | /* Verify that ring has been setup correctly. */ |
@@ -1361,7 +1375,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, | |||
1361 | if (vs->vs_tpg) | 1375 | if (vs->vs_tpg) |
1362 | memcpy(vs_tpg, vs->vs_tpg, len); | 1376 | memcpy(vs_tpg, vs->vs_tpg, len); |
1363 | 1377 | ||
1364 | list_for_each_entry(tpg, &tcm_vhost_list, tv_tpg_list) { | 1378 | list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) { |
1365 | mutex_lock(&tpg->tv_tpg_mutex); | 1379 | mutex_lock(&tpg->tv_tpg_mutex); |
1366 | if (!tpg->tpg_nexus) { | 1380 | if (!tpg->tpg_nexus) { |
1367 | mutex_unlock(&tpg->tv_tpg_mutex); | 1381 | mutex_unlock(&tpg->tv_tpg_mutex); |
@@ -1429,7 +1443,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, | |||
1429 | 1443 | ||
1430 | out: | 1444 | out: |
1431 | mutex_unlock(&vs->dev.mutex); | 1445 | mutex_unlock(&vs->dev.mutex); |
1432 | mutex_unlock(&tcm_vhost_mutex); | 1446 | mutex_unlock(&vhost_scsi_mutex); |
1433 | return ret; | 1447 | return ret; |
1434 | } | 1448 | } |
1435 | 1449 | ||
@@ -1438,14 +1452,14 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs, | |||
1438 | struct vhost_scsi_target *t) | 1452 | struct vhost_scsi_target *t) |
1439 | { | 1453 | { |
1440 | struct se_portal_group *se_tpg; | 1454 | struct se_portal_group *se_tpg; |
1441 | struct tcm_vhost_tport *tv_tport; | 1455 | struct vhost_scsi_tport *tv_tport; |
1442 | struct tcm_vhost_tpg *tpg; | 1456 | struct vhost_scsi_tpg *tpg; |
1443 | struct vhost_virtqueue *vq; | 1457 | struct vhost_virtqueue *vq; |
1444 | bool match = false; | 1458 | bool match = false; |
1445 | int index, ret, i; | 1459 | int index, ret, i; |
1446 | u8 target; | 1460 | u8 target; |
1447 | 1461 | ||
1448 | mutex_lock(&tcm_vhost_mutex); | 1462 | mutex_lock(&vhost_scsi_mutex); |
1449 | mutex_lock(&vs->dev.mutex); | 1463 | mutex_lock(&vs->dev.mutex); |
1450 | /* Verify that ring has been setup correctly. */ | 1464 | /* Verify that ring has been setup correctly. */ |
1451 | for (index = 0; index < vs->dev.nvqs; ++index) { | 1465 | for (index = 0; index < vs->dev.nvqs; ++index) { |
@@ -1511,14 +1525,14 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs, | |||
1511 | vs->vs_tpg = NULL; | 1525 | vs->vs_tpg = NULL; |
1512 | WARN_ON(vs->vs_events_nr); | 1526 | WARN_ON(vs->vs_events_nr); |
1513 | mutex_unlock(&vs->dev.mutex); | 1527 | mutex_unlock(&vs->dev.mutex); |
1514 | mutex_unlock(&tcm_vhost_mutex); | 1528 | mutex_unlock(&vhost_scsi_mutex); |
1515 | return 0; | 1529 | return 0; |
1516 | 1530 | ||
1517 | err_tpg: | 1531 | err_tpg: |
1518 | mutex_unlock(&tpg->tv_tpg_mutex); | 1532 | mutex_unlock(&tpg->tv_tpg_mutex); |
1519 | err_dev: | 1533 | err_dev: |
1520 | mutex_unlock(&vs->dev.mutex); | 1534 | mutex_unlock(&vs->dev.mutex); |
1521 | mutex_unlock(&tcm_vhost_mutex); | 1535 | mutex_unlock(&vhost_scsi_mutex); |
1522 | return ret; | 1536 | return ret; |
1523 | } | 1537 | } |
1524 | 1538 | ||
@@ -1565,7 +1579,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f) | |||
1565 | goto err_vqs; | 1579 | goto err_vqs; |
1566 | 1580 | ||
1567 | vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work); | 1581 | vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work); |
1568 | vhost_work_init(&vs->vs_event_work, tcm_vhost_evt_work); | 1582 | vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work); |
1569 | 1583 | ||
1570 | vs->vs_events_nr = 0; | 1584 | vs->vs_events_nr = 0; |
1571 | vs->vs_events_missed = false; | 1585 | vs->vs_events_missed = false; |
@@ -1580,7 +1594,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f) | |||
1580 | } | 1594 | } |
1581 | vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ); | 1595 | vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ); |
1582 | 1596 | ||
1583 | tcm_vhost_init_inflight(vs, NULL); | 1597 | vhost_scsi_init_inflight(vs, NULL); |
1584 | 1598 | ||
1585 | f->private_data = vs; | 1599 | f->private_data = vs; |
1586 | return 0; | 1600 | return 0; |
@@ -1712,7 +1726,7 @@ static int vhost_scsi_deregister(void) | |||
1712 | return misc_deregister(&vhost_scsi_misc); | 1726 | return misc_deregister(&vhost_scsi_misc); |
1713 | } | 1727 | } |
1714 | 1728 | ||
1715 | static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport) | 1729 | static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport) |
1716 | { | 1730 | { |
1717 | switch (tport->tport_proto_id) { | 1731 | switch (tport->tport_proto_id) { |
1718 | case SCSI_PROTOCOL_SAS: | 1732 | case SCSI_PROTOCOL_SAS: |
@@ -1729,7 +1743,7 @@ static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport) | |||
1729 | } | 1743 | } |
1730 | 1744 | ||
1731 | static void | 1745 | static void |
1732 | tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg, | 1746 | vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg, |
1733 | struct se_lun *lun, bool plug) | 1747 | struct se_lun *lun, bool plug) |
1734 | { | 1748 | { |
1735 | 1749 | ||
@@ -1750,71 +1764,71 @@ tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg, | |||
1750 | vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; | 1764 | vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; |
1751 | mutex_lock(&vq->mutex); | 1765 | mutex_lock(&vq->mutex); |
1752 | if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG)) | 1766 | if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG)) |
1753 | tcm_vhost_send_evt(vs, tpg, lun, | 1767 | vhost_scsi_send_evt(vs, tpg, lun, |
1754 | VIRTIO_SCSI_T_TRANSPORT_RESET, reason); | 1768 | VIRTIO_SCSI_T_TRANSPORT_RESET, reason); |
1755 | mutex_unlock(&vq->mutex); | 1769 | mutex_unlock(&vq->mutex); |
1756 | mutex_unlock(&vs->dev.mutex); | 1770 | mutex_unlock(&vs->dev.mutex); |
1757 | } | 1771 | } |
1758 | 1772 | ||
1759 | static void tcm_vhost_hotplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun) | 1773 | static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun) |
1760 | { | 1774 | { |
1761 | tcm_vhost_do_plug(tpg, lun, true); | 1775 | vhost_scsi_do_plug(tpg, lun, true); |
1762 | } | 1776 | } |
1763 | 1777 | ||
1764 | static void tcm_vhost_hotunplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun) | 1778 | static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun) |
1765 | { | 1779 | { |
1766 | tcm_vhost_do_plug(tpg, lun, false); | 1780 | vhost_scsi_do_plug(tpg, lun, false); |
1767 | } | 1781 | } |
1768 | 1782 | ||
1769 | static int tcm_vhost_port_link(struct se_portal_group *se_tpg, | 1783 | static int vhost_scsi_port_link(struct se_portal_group *se_tpg, |
1770 | struct se_lun *lun) | 1784 | struct se_lun *lun) |
1771 | { | 1785 | { |
1772 | struct tcm_vhost_tpg *tpg = container_of(se_tpg, | 1786 | struct vhost_scsi_tpg *tpg = container_of(se_tpg, |
1773 | struct tcm_vhost_tpg, se_tpg); | 1787 | struct vhost_scsi_tpg, se_tpg); |
1774 | 1788 | ||
1775 | mutex_lock(&tcm_vhost_mutex); | 1789 | mutex_lock(&vhost_scsi_mutex); |
1776 | 1790 | ||
1777 | mutex_lock(&tpg->tv_tpg_mutex); | 1791 | mutex_lock(&tpg->tv_tpg_mutex); |
1778 | tpg->tv_tpg_port_count++; | 1792 | tpg->tv_tpg_port_count++; |
1779 | mutex_unlock(&tpg->tv_tpg_mutex); | 1793 | mutex_unlock(&tpg->tv_tpg_mutex); |
1780 | 1794 | ||
1781 | tcm_vhost_hotplug(tpg, lun); | 1795 | vhost_scsi_hotplug(tpg, lun); |
1782 | 1796 | ||
1783 | mutex_unlock(&tcm_vhost_mutex); | 1797 | mutex_unlock(&vhost_scsi_mutex); |
1784 | 1798 | ||
1785 | return 0; | 1799 | return 0; |
1786 | } | 1800 | } |
1787 | 1801 | ||
1788 | static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg, | 1802 | static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg, |
1789 | struct se_lun *lun) | 1803 | struct se_lun *lun) |
1790 | { | 1804 | { |
1791 | struct tcm_vhost_tpg *tpg = container_of(se_tpg, | 1805 | struct vhost_scsi_tpg *tpg = container_of(se_tpg, |
1792 | struct tcm_vhost_tpg, se_tpg); | 1806 | struct vhost_scsi_tpg, se_tpg); |
1793 | 1807 | ||
1794 | mutex_lock(&tcm_vhost_mutex); | 1808 | mutex_lock(&vhost_scsi_mutex); |
1795 | 1809 | ||
1796 | mutex_lock(&tpg->tv_tpg_mutex); | 1810 | mutex_lock(&tpg->tv_tpg_mutex); |
1797 | tpg->tv_tpg_port_count--; | 1811 | tpg->tv_tpg_port_count--; |
1798 | mutex_unlock(&tpg->tv_tpg_mutex); | 1812 | mutex_unlock(&tpg->tv_tpg_mutex); |
1799 | 1813 | ||
1800 | tcm_vhost_hotunplug(tpg, lun); | 1814 | vhost_scsi_hotunplug(tpg, lun); |
1801 | 1815 | ||
1802 | mutex_unlock(&tcm_vhost_mutex); | 1816 | mutex_unlock(&vhost_scsi_mutex); |
1803 | } | 1817 | } |
1804 | 1818 | ||
1805 | static struct se_node_acl * | 1819 | static struct se_node_acl * |
1806 | tcm_vhost_make_nodeacl(struct se_portal_group *se_tpg, | 1820 | vhost_scsi_make_nodeacl(struct se_portal_group *se_tpg, |
1807 | struct config_group *group, | 1821 | struct config_group *group, |
1808 | const char *name) | 1822 | const char *name) |
1809 | { | 1823 | { |
1810 | struct se_node_acl *se_nacl, *se_nacl_new; | 1824 | struct se_node_acl *se_nacl, *se_nacl_new; |
1811 | struct tcm_vhost_nacl *nacl; | 1825 | struct vhost_scsi_nacl *nacl; |
1812 | u64 wwpn = 0; | 1826 | u64 wwpn = 0; |
1813 | u32 nexus_depth; | 1827 | u32 nexus_depth; |
1814 | 1828 | ||
1815 | /* tcm_vhost_parse_wwn(name, &wwpn, 1) < 0) | 1829 | /* vhost_scsi_parse_wwn(name, &wwpn, 1) < 0) |
1816 | return ERR_PTR(-EINVAL); */ | 1830 | return ERR_PTR(-EINVAL); */ |
1817 | se_nacl_new = tcm_vhost_alloc_fabric_acl(se_tpg); | 1831 | se_nacl_new = vhost_scsi_alloc_fabric_acl(se_tpg); |
1818 | if (!se_nacl_new) | 1832 | if (!se_nacl_new) |
1819 | return ERR_PTR(-ENOMEM); | 1833 | return ERR_PTR(-ENOMEM); |
1820 | 1834 | ||
@@ -1826,37 +1840,37 @@ tcm_vhost_make_nodeacl(struct se_portal_group *se_tpg, | |||
1826 | se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new, | 1840 | se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new, |
1827 | name, nexus_depth); | 1841 | name, nexus_depth); |
1828 | if (IS_ERR(se_nacl)) { | 1842 | if (IS_ERR(se_nacl)) { |
1829 | tcm_vhost_release_fabric_acl(se_tpg, se_nacl_new); | 1843 | vhost_scsi_release_fabric_acl(se_tpg, se_nacl_new); |
1830 | return se_nacl; | 1844 | return se_nacl; |
1831 | } | 1845 | } |
1832 | /* | 1846 | /* |
1833 | * Locate our struct tcm_vhost_nacl and set the FC Nport WWPN | 1847 | * Locate our struct vhost_scsi_nacl and set the FC Nport WWPN |
1834 | */ | 1848 | */ |
1835 | nacl = container_of(se_nacl, struct tcm_vhost_nacl, se_node_acl); | 1849 | nacl = container_of(se_nacl, struct vhost_scsi_nacl, se_node_acl); |
1836 | nacl->iport_wwpn = wwpn; | 1850 | nacl->iport_wwpn = wwpn; |
1837 | 1851 | ||
1838 | return se_nacl; | 1852 | return se_nacl; |
1839 | } | 1853 | } |
1840 | 1854 | ||
1841 | static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl) | 1855 | static void vhost_scsi_drop_nodeacl(struct se_node_acl *se_acl) |
1842 | { | 1856 | { |
1843 | struct tcm_vhost_nacl *nacl = container_of(se_acl, | 1857 | struct vhost_scsi_nacl *nacl = container_of(se_acl, |
1844 | struct tcm_vhost_nacl, se_node_acl); | 1858 | struct vhost_scsi_nacl, se_node_acl); |
1845 | core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1); | 1859 | core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1); |
1846 | kfree(nacl); | 1860 | kfree(nacl); |
1847 | } | 1861 | } |
1848 | 1862 | ||
1849 | static void tcm_vhost_free_cmd_map_res(struct tcm_vhost_nexus *nexus, | 1863 | static void vhost_scsi_free_cmd_map_res(struct vhost_scsi_nexus *nexus, |
1850 | struct se_session *se_sess) | 1864 | struct se_session *se_sess) |
1851 | { | 1865 | { |
1852 | struct tcm_vhost_cmd *tv_cmd; | 1866 | struct vhost_scsi_cmd *tv_cmd; |
1853 | unsigned int i; | 1867 | unsigned int i; |
1854 | 1868 | ||
1855 | if (!se_sess->sess_cmd_map) | 1869 | if (!se_sess->sess_cmd_map) |
1856 | return; | 1870 | return; |
1857 | 1871 | ||
1858 | for (i = 0; i < TCM_VHOST_DEFAULT_TAGS; i++) { | 1872 | for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) { |
1859 | tv_cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[i]; | 1873 | tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i]; |
1860 | 1874 | ||
1861 | kfree(tv_cmd->tvc_sgl); | 1875 | kfree(tv_cmd->tvc_sgl); |
1862 | kfree(tv_cmd->tvc_prot_sgl); | 1876 | kfree(tv_cmd->tvc_prot_sgl); |
@@ -1864,13 +1878,13 @@ static void tcm_vhost_free_cmd_map_res(struct tcm_vhost_nexus *nexus, | |||
1864 | } | 1878 | } |
1865 | } | 1879 | } |
1866 | 1880 | ||
1867 | static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg, | 1881 | static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg, |
1868 | const char *name) | 1882 | const char *name) |
1869 | { | 1883 | { |
1870 | struct se_portal_group *se_tpg; | 1884 | struct se_portal_group *se_tpg; |
1871 | struct se_session *se_sess; | 1885 | struct se_session *se_sess; |
1872 | struct tcm_vhost_nexus *tv_nexus; | 1886 | struct vhost_scsi_nexus *tv_nexus; |
1873 | struct tcm_vhost_cmd *tv_cmd; | 1887 | struct vhost_scsi_cmd *tv_cmd; |
1874 | unsigned int i; | 1888 | unsigned int i; |
1875 | 1889 | ||
1876 | mutex_lock(&tpg->tv_tpg_mutex); | 1890 | mutex_lock(&tpg->tv_tpg_mutex); |
@@ -1881,19 +1895,19 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg, | |||
1881 | } | 1895 | } |
1882 | se_tpg = &tpg->se_tpg; | 1896 | se_tpg = &tpg->se_tpg; |
1883 | 1897 | ||
1884 | tv_nexus = kzalloc(sizeof(struct tcm_vhost_nexus), GFP_KERNEL); | 1898 | tv_nexus = kzalloc(sizeof(struct vhost_scsi_nexus), GFP_KERNEL); |
1885 | if (!tv_nexus) { | 1899 | if (!tv_nexus) { |
1886 | mutex_unlock(&tpg->tv_tpg_mutex); | 1900 | mutex_unlock(&tpg->tv_tpg_mutex); |
1887 | pr_err("Unable to allocate struct tcm_vhost_nexus\n"); | 1901 | pr_err("Unable to allocate struct vhost_scsi_nexus\n"); |
1888 | return -ENOMEM; | 1902 | return -ENOMEM; |
1889 | } | 1903 | } |
1890 | /* | 1904 | /* |
1891 | * Initialize the struct se_session pointer and setup tagpool | 1905 | * Initialize the struct se_session pointer and setup tagpool |
1892 | * for struct tcm_vhost_cmd descriptors | 1906 | * for struct vhost_scsi_cmd descriptors |
1893 | */ | 1907 | */ |
1894 | tv_nexus->tvn_se_sess = transport_init_session_tags( | 1908 | tv_nexus->tvn_se_sess = transport_init_session_tags( |
1895 | TCM_VHOST_DEFAULT_TAGS, | 1909 | VHOST_SCSI_DEFAULT_TAGS, |
1896 | sizeof(struct tcm_vhost_cmd), | 1910 | sizeof(struct vhost_scsi_cmd), |
1897 | TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS); | 1911 | TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS); |
1898 | if (IS_ERR(tv_nexus->tvn_se_sess)) { | 1912 | if (IS_ERR(tv_nexus->tvn_se_sess)) { |
1899 | mutex_unlock(&tpg->tv_tpg_mutex); | 1913 | mutex_unlock(&tpg->tv_tpg_mutex); |
@@ -1901,11 +1915,11 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg, | |||
1901 | return -ENOMEM; | 1915 | return -ENOMEM; |
1902 | } | 1916 | } |
1903 | se_sess = tv_nexus->tvn_se_sess; | 1917 | se_sess = tv_nexus->tvn_se_sess; |
1904 | for (i = 0; i < TCM_VHOST_DEFAULT_TAGS; i++) { | 1918 | for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) { |
1905 | tv_cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[i]; | 1919 | tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i]; |
1906 | 1920 | ||
1907 | tv_cmd->tvc_sgl = kzalloc(sizeof(struct scatterlist) * | 1921 | tv_cmd->tvc_sgl = kzalloc(sizeof(struct scatterlist) * |
1908 | TCM_VHOST_PREALLOC_SGLS, GFP_KERNEL); | 1922 | VHOST_SCSI_PREALLOC_SGLS, GFP_KERNEL); |
1909 | if (!tv_cmd->tvc_sgl) { | 1923 | if (!tv_cmd->tvc_sgl) { |
1910 | mutex_unlock(&tpg->tv_tpg_mutex); | 1924 | mutex_unlock(&tpg->tv_tpg_mutex); |
1911 | pr_err("Unable to allocate tv_cmd->tvc_sgl\n"); | 1925 | pr_err("Unable to allocate tv_cmd->tvc_sgl\n"); |
@@ -1913,7 +1927,7 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg, | |||
1913 | } | 1927 | } |
1914 | 1928 | ||
1915 | tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) * | 1929 | tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) * |
1916 | TCM_VHOST_PREALLOC_UPAGES, GFP_KERNEL); | 1930 | VHOST_SCSI_PREALLOC_UPAGES, GFP_KERNEL); |
1917 | if (!tv_cmd->tvc_upages) { | 1931 | if (!tv_cmd->tvc_upages) { |
1918 | mutex_unlock(&tpg->tv_tpg_mutex); | 1932 | mutex_unlock(&tpg->tv_tpg_mutex); |
1919 | pr_err("Unable to allocate tv_cmd->tvc_upages\n"); | 1933 | pr_err("Unable to allocate tv_cmd->tvc_upages\n"); |
@@ -1921,7 +1935,7 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg, | |||
1921 | } | 1935 | } |
1922 | 1936 | ||
1923 | tv_cmd->tvc_prot_sgl = kzalloc(sizeof(struct scatterlist) * | 1937 | tv_cmd->tvc_prot_sgl = kzalloc(sizeof(struct scatterlist) * |
1924 | TCM_VHOST_PREALLOC_PROT_SGLS, GFP_KERNEL); | 1938 | VHOST_SCSI_PREALLOC_PROT_SGLS, GFP_KERNEL); |
1925 | if (!tv_cmd->tvc_prot_sgl) { | 1939 | if (!tv_cmd->tvc_prot_sgl) { |
1926 | mutex_unlock(&tpg->tv_tpg_mutex); | 1940 | mutex_unlock(&tpg->tv_tpg_mutex); |
1927 | pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n"); | 1941 | pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n"); |
@@ -1930,7 +1944,7 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg, | |||
1930 | } | 1944 | } |
1931 | /* | 1945 | /* |
1932 | * Since we are running in 'demo mode' this call with generate a | 1946 | * Since we are running in 'demo mode' this call with generate a |
1933 | * struct se_node_acl for the tcm_vhost struct se_portal_group with | 1947 | * struct se_node_acl for the vhost_scsi struct se_portal_group with |
1934 | * the SCSI Initiator port name of the passed configfs group 'name'. | 1948 | * the SCSI Initiator port name of the passed configfs group 'name'. |
1935 | */ | 1949 | */ |
1936 | tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl( | 1950 | tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl( |
@@ -1953,16 +1967,16 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg, | |||
1953 | return 0; | 1967 | return 0; |
1954 | 1968 | ||
1955 | out: | 1969 | out: |
1956 | tcm_vhost_free_cmd_map_res(tv_nexus, se_sess); | 1970 | vhost_scsi_free_cmd_map_res(tv_nexus, se_sess); |
1957 | transport_free_session(se_sess); | 1971 | transport_free_session(se_sess); |
1958 | kfree(tv_nexus); | 1972 | kfree(tv_nexus); |
1959 | return -ENOMEM; | 1973 | return -ENOMEM; |
1960 | } | 1974 | } |
1961 | 1975 | ||
1962 | static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg) | 1976 | static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg) |
1963 | { | 1977 | { |
1964 | struct se_session *se_sess; | 1978 | struct se_session *se_sess; |
1965 | struct tcm_vhost_nexus *tv_nexus; | 1979 | struct vhost_scsi_nexus *tv_nexus; |
1966 | 1980 | ||
1967 | mutex_lock(&tpg->tv_tpg_mutex); | 1981 | mutex_lock(&tpg->tv_tpg_mutex); |
1968 | tv_nexus = tpg->tpg_nexus; | 1982 | tv_nexus = tpg->tpg_nexus; |
@@ -1994,10 +2008,10 @@ static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg) | |||
1994 | } | 2008 | } |
1995 | 2009 | ||
1996 | pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated" | 2010 | pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated" |
1997 | " %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport), | 2011 | " %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport), |
1998 | tv_nexus->tvn_se_sess->se_node_acl->initiatorname); | 2012 | tv_nexus->tvn_se_sess->se_node_acl->initiatorname); |
1999 | 2013 | ||
2000 | tcm_vhost_free_cmd_map_res(tv_nexus, se_sess); | 2014 | vhost_scsi_free_cmd_map_res(tv_nexus, se_sess); |
2001 | /* | 2015 | /* |
2002 | * Release the SCSI I_T Nexus to the emulated vhost Target Port | 2016 | * Release the SCSI I_T Nexus to the emulated vhost Target Port |
2003 | */ | 2017 | */ |
@@ -2009,12 +2023,12 @@ static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg) | |||
2009 | return 0; | 2023 | return 0; |
2010 | } | 2024 | } |
2011 | 2025 | ||
2012 | static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg, | 2026 | static ssize_t vhost_scsi_tpg_show_nexus(struct se_portal_group *se_tpg, |
2013 | char *page) | 2027 | char *page) |
2014 | { | 2028 | { |
2015 | struct tcm_vhost_tpg *tpg = container_of(se_tpg, | 2029 | struct vhost_scsi_tpg *tpg = container_of(se_tpg, |
2016 | struct tcm_vhost_tpg, se_tpg); | 2030 | struct vhost_scsi_tpg, se_tpg); |
2017 | struct tcm_vhost_nexus *tv_nexus; | 2031 | struct vhost_scsi_nexus *tv_nexus; |
2018 | ssize_t ret; | 2032 | ssize_t ret; |
2019 | 2033 | ||
2020 | mutex_lock(&tpg->tv_tpg_mutex); | 2034 | mutex_lock(&tpg->tv_tpg_mutex); |
@@ -2030,40 +2044,40 @@ static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg, | |||
2030 | return ret; | 2044 | return ret; |
2031 | } | 2045 | } |
2032 | 2046 | ||
2033 | static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg, | 2047 | static ssize_t vhost_scsi_tpg_store_nexus(struct se_portal_group *se_tpg, |
2034 | const char *page, | 2048 | const char *page, |
2035 | size_t count) | 2049 | size_t count) |
2036 | { | 2050 | { |
2037 | struct tcm_vhost_tpg *tpg = container_of(se_tpg, | 2051 | struct vhost_scsi_tpg *tpg = container_of(se_tpg, |
2038 | struct tcm_vhost_tpg, se_tpg); | 2052 | struct vhost_scsi_tpg, se_tpg); |
2039 | struct tcm_vhost_tport *tport_wwn = tpg->tport; | 2053 | struct vhost_scsi_tport *tport_wwn = tpg->tport; |
2040 | unsigned char i_port[TCM_VHOST_NAMELEN], *ptr, *port_ptr; | 2054 | unsigned char i_port[VHOST_SCSI_NAMELEN], *ptr, *port_ptr; |
2041 | int ret; | 2055 | int ret; |
2042 | /* | 2056 | /* |
2043 | * Shutdown the active I_T nexus if 'NULL' is passed.. | 2057 | * Shutdown the active I_T nexus if 'NULL' is passed.. |
2044 | */ | 2058 | */ |
2045 | if (!strncmp(page, "NULL", 4)) { | 2059 | if (!strncmp(page, "NULL", 4)) { |
2046 | ret = tcm_vhost_drop_nexus(tpg); | 2060 | ret = vhost_scsi_drop_nexus(tpg); |
2047 | return (!ret) ? count : ret; | 2061 | return (!ret) ? count : ret; |
2048 | } | 2062 | } |
2049 | /* | 2063 | /* |
2050 | * Otherwise make sure the passed virtual Initiator port WWN matches | 2064 | * Otherwise make sure the passed virtual Initiator port WWN matches |
2051 | * the fabric protocol_id set in tcm_vhost_make_tport(), and call | 2065 | * the fabric protocol_id set in vhost_scsi_make_tport(), and call |
2052 | * tcm_vhost_make_nexus(). | 2066 | * vhost_scsi_make_nexus(). |
2053 | */ | 2067 | */ |
2054 | if (strlen(page) >= TCM_VHOST_NAMELEN) { | 2068 | if (strlen(page) >= VHOST_SCSI_NAMELEN) { |
2055 | pr_err("Emulated NAA Sas Address: %s, exceeds" | 2069 | pr_err("Emulated NAA Sas Address: %s, exceeds" |
2056 | " max: %d\n", page, TCM_VHOST_NAMELEN); | 2070 | " max: %d\n", page, VHOST_SCSI_NAMELEN); |
2057 | return -EINVAL; | 2071 | return -EINVAL; |
2058 | } | 2072 | } |
2059 | snprintf(&i_port[0], TCM_VHOST_NAMELEN, "%s", page); | 2073 | snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page); |
2060 | 2074 | ||
2061 | ptr = strstr(i_port, "naa."); | 2075 | ptr = strstr(i_port, "naa."); |
2062 | if (ptr) { | 2076 | if (ptr) { |
2063 | if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) { | 2077 | if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) { |
2064 | pr_err("Passed SAS Initiator Port %s does not" | 2078 | pr_err("Passed SAS Initiator Port %s does not" |
2065 | " match target port protoid: %s\n", i_port, | 2079 | " match target port protoid: %s\n", i_port, |
2066 | tcm_vhost_dump_proto_id(tport_wwn)); | 2080 | vhost_scsi_dump_proto_id(tport_wwn)); |
2067 | return -EINVAL; | 2081 | return -EINVAL; |
2068 | } | 2082 | } |
2069 | port_ptr = &i_port[0]; | 2083 | port_ptr = &i_port[0]; |
@@ -2074,7 +2088,7 @@ static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg, | |||
2074 | if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) { | 2088 | if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) { |
2075 | pr_err("Passed FCP Initiator Port %s does not" | 2089 | pr_err("Passed FCP Initiator Port %s does not" |
2076 | " match target port protoid: %s\n", i_port, | 2090 | " match target port protoid: %s\n", i_port, |
2077 | tcm_vhost_dump_proto_id(tport_wwn)); | 2091 | vhost_scsi_dump_proto_id(tport_wwn)); |
2078 | return -EINVAL; | 2092 | return -EINVAL; |
2079 | } | 2093 | } |
2080 | port_ptr = &i_port[3]; /* Skip over "fc." */ | 2094 | port_ptr = &i_port[3]; /* Skip over "fc." */ |
@@ -2085,7 +2099,7 @@ static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg, | |||
2085 | if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) { | 2099 | if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) { |
2086 | pr_err("Passed iSCSI Initiator Port %s does not" | 2100 | pr_err("Passed iSCSI Initiator Port %s does not" |
2087 | " match target port protoid: %s\n", i_port, | 2101 | " match target port protoid: %s\n", i_port, |
2088 | tcm_vhost_dump_proto_id(tport_wwn)); | 2102 | vhost_scsi_dump_proto_id(tport_wwn)); |
2089 | return -EINVAL; | 2103 | return -EINVAL; |
2090 | } | 2104 | } |
2091 | port_ptr = &i_port[0]; | 2105 | port_ptr = &i_port[0]; |
@@ -2101,40 +2115,40 @@ check_newline: | |||
2101 | if (i_port[strlen(i_port)-1] == '\n') | 2115 | if (i_port[strlen(i_port)-1] == '\n') |
2102 | i_port[strlen(i_port)-1] = '\0'; | 2116 | i_port[strlen(i_port)-1] = '\0'; |
2103 | 2117 | ||
2104 | ret = tcm_vhost_make_nexus(tpg, port_ptr); | 2118 | ret = vhost_scsi_make_nexus(tpg, port_ptr); |
2105 | if (ret < 0) | 2119 | if (ret < 0) |
2106 | return ret; | 2120 | return ret; |
2107 | 2121 | ||
2108 | return count; | 2122 | return count; |
2109 | } | 2123 | } |
2110 | 2124 | ||
2111 | TF_TPG_BASE_ATTR(tcm_vhost, nexus, S_IRUGO | S_IWUSR); | 2125 | TF_TPG_BASE_ATTR(vhost_scsi, nexus, S_IRUGO | S_IWUSR); |
2112 | 2126 | ||
2113 | static struct configfs_attribute *tcm_vhost_tpg_attrs[] = { | 2127 | static struct configfs_attribute *vhost_scsi_tpg_attrs[] = { |
2114 | &tcm_vhost_tpg_nexus.attr, | 2128 | &vhost_scsi_tpg_nexus.attr, |
2115 | NULL, | 2129 | NULL, |
2116 | }; | 2130 | }; |
2117 | 2131 | ||
2118 | static struct se_portal_group * | 2132 | static struct se_portal_group * |
2119 | tcm_vhost_make_tpg(struct se_wwn *wwn, | 2133 | vhost_scsi_make_tpg(struct se_wwn *wwn, |
2120 | struct config_group *group, | 2134 | struct config_group *group, |
2121 | const char *name) | 2135 | const char *name) |
2122 | { | 2136 | { |
2123 | struct tcm_vhost_tport *tport = container_of(wwn, | 2137 | struct vhost_scsi_tport *tport = container_of(wwn, |
2124 | struct tcm_vhost_tport, tport_wwn); | 2138 | struct vhost_scsi_tport, tport_wwn); |
2125 | 2139 | ||
2126 | struct tcm_vhost_tpg *tpg; | 2140 | struct vhost_scsi_tpg *tpg; |
2127 | unsigned long tpgt; | 2141 | u16 tpgt; |
2128 | int ret; | 2142 | int ret; |
2129 | 2143 | ||
2130 | if (strstr(name, "tpgt_") != name) | 2144 | if (strstr(name, "tpgt_") != name) |
2131 | return ERR_PTR(-EINVAL); | 2145 | return ERR_PTR(-EINVAL); |
2132 | if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX) | 2146 | if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET) |
2133 | return ERR_PTR(-EINVAL); | 2147 | return ERR_PTR(-EINVAL); |
2134 | 2148 | ||
2135 | tpg = kzalloc(sizeof(struct tcm_vhost_tpg), GFP_KERNEL); | 2149 | tpg = kzalloc(sizeof(struct vhost_scsi_tpg), GFP_KERNEL); |
2136 | if (!tpg) { | 2150 | if (!tpg) { |
2137 | pr_err("Unable to allocate struct tcm_vhost_tpg"); | 2151 | pr_err("Unable to allocate struct vhost_scsi_tpg"); |
2138 | return ERR_PTR(-ENOMEM); | 2152 | return ERR_PTR(-ENOMEM); |
2139 | } | 2153 | } |
2140 | mutex_init(&tpg->tv_tpg_mutex); | 2154 | mutex_init(&tpg->tv_tpg_mutex); |
@@ -2142,31 +2156,31 @@ tcm_vhost_make_tpg(struct se_wwn *wwn, | |||
2142 | tpg->tport = tport; | 2156 | tpg->tport = tport; |
2143 | tpg->tport_tpgt = tpgt; | 2157 | tpg->tport_tpgt = tpgt; |
2144 | 2158 | ||
2145 | ret = core_tpg_register(&tcm_vhost_fabric_configfs->tf_ops, wwn, | 2159 | ret = core_tpg_register(&vhost_scsi_fabric_configfs->tf_ops, wwn, |
2146 | &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); | 2160 | &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); |
2147 | if (ret < 0) { | 2161 | if (ret < 0) { |
2148 | kfree(tpg); | 2162 | kfree(tpg); |
2149 | return NULL; | 2163 | return NULL; |
2150 | } | 2164 | } |
2151 | mutex_lock(&tcm_vhost_mutex); | 2165 | mutex_lock(&vhost_scsi_mutex); |
2152 | list_add_tail(&tpg->tv_tpg_list, &tcm_vhost_list); | 2166 | list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list); |
2153 | mutex_unlock(&tcm_vhost_mutex); | 2167 | mutex_unlock(&vhost_scsi_mutex); |
2154 | 2168 | ||
2155 | return &tpg->se_tpg; | 2169 | return &tpg->se_tpg; |
2156 | } | 2170 | } |
2157 | 2171 | ||
2158 | static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg) | 2172 | static void vhost_scsi_drop_tpg(struct se_portal_group *se_tpg) |
2159 | { | 2173 | { |
2160 | struct tcm_vhost_tpg *tpg = container_of(se_tpg, | 2174 | struct vhost_scsi_tpg *tpg = container_of(se_tpg, |
2161 | struct tcm_vhost_tpg, se_tpg); | 2175 | struct vhost_scsi_tpg, se_tpg); |
2162 | 2176 | ||
2163 | mutex_lock(&tcm_vhost_mutex); | 2177 | mutex_lock(&vhost_scsi_mutex); |
2164 | list_del(&tpg->tv_tpg_list); | 2178 | list_del(&tpg->tv_tpg_list); |
2165 | mutex_unlock(&tcm_vhost_mutex); | 2179 | mutex_unlock(&vhost_scsi_mutex); |
2166 | /* | 2180 | /* |
2167 | * Release the virtual I_T Nexus for this vhost TPG | 2181 | * Release the virtual I_T Nexus for this vhost TPG |
2168 | */ | 2182 | */ |
2169 | tcm_vhost_drop_nexus(tpg); | 2183 | vhost_scsi_drop_nexus(tpg); |
2170 | /* | 2184 | /* |
2171 | * Deregister the se_tpg from TCM.. | 2185 | * Deregister the se_tpg from TCM.. |
2172 | */ | 2186 | */ |
@@ -2175,21 +2189,21 @@ static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg) | |||
2175 | } | 2189 | } |
2176 | 2190 | ||
2177 | static struct se_wwn * | 2191 | static struct se_wwn * |
2178 | tcm_vhost_make_tport(struct target_fabric_configfs *tf, | 2192 | vhost_scsi_make_tport(struct target_fabric_configfs *tf, |
2179 | struct config_group *group, | 2193 | struct config_group *group, |
2180 | const char *name) | 2194 | const char *name) |
2181 | { | 2195 | { |
2182 | struct tcm_vhost_tport *tport; | 2196 | struct vhost_scsi_tport *tport; |
2183 | char *ptr; | 2197 | char *ptr; |
2184 | u64 wwpn = 0; | 2198 | u64 wwpn = 0; |
2185 | int off = 0; | 2199 | int off = 0; |
2186 | 2200 | ||
2187 | /* if (tcm_vhost_parse_wwn(name, &wwpn, 1) < 0) | 2201 | /* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0) |
2188 | return ERR_PTR(-EINVAL); */ | 2202 | return ERR_PTR(-EINVAL); */ |
2189 | 2203 | ||
2190 | tport = kzalloc(sizeof(struct tcm_vhost_tport), GFP_KERNEL); | 2204 | tport = kzalloc(sizeof(struct vhost_scsi_tport), GFP_KERNEL); |
2191 | if (!tport) { | 2205 | if (!tport) { |
2192 | pr_err("Unable to allocate struct tcm_vhost_tport"); | 2206 | pr_err("Unable to allocate struct vhost_scsi_tport"); |
2193 | return ERR_PTR(-ENOMEM); | 2207 | return ERR_PTR(-ENOMEM); |
2194 | } | 2208 | } |
2195 | tport->tport_wwpn = wwpn; | 2209 | tport->tport_wwpn = wwpn; |
@@ -2220,102 +2234,102 @@ tcm_vhost_make_tport(struct target_fabric_configfs *tf, | |||
2220 | return ERR_PTR(-EINVAL); | 2234 | return ERR_PTR(-EINVAL); |
2221 | 2235 | ||
2222 | check_len: | 2236 | check_len: |
2223 | if (strlen(name) >= TCM_VHOST_NAMELEN) { | 2237 | if (strlen(name) >= VHOST_SCSI_NAMELEN) { |
2224 | pr_err("Emulated %s Address: %s, exceeds" | 2238 | pr_err("Emulated %s Address: %s, exceeds" |
2225 | " max: %d\n", name, tcm_vhost_dump_proto_id(tport), | 2239 | " max: %d\n", name, vhost_scsi_dump_proto_id(tport), |
2226 | TCM_VHOST_NAMELEN); | 2240 | VHOST_SCSI_NAMELEN); |
2227 | kfree(tport); | 2241 | kfree(tport); |
2228 | return ERR_PTR(-EINVAL); | 2242 | return ERR_PTR(-EINVAL); |
2229 | } | 2243 | } |
2230 | snprintf(&tport->tport_name[0], TCM_VHOST_NAMELEN, "%s", &name[off]); | 2244 | snprintf(&tport->tport_name[0], VHOST_SCSI_NAMELEN, "%s", &name[off]); |
2231 | 2245 | ||
2232 | pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target" | 2246 | pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target" |
2233 | " %s Address: %s\n", tcm_vhost_dump_proto_id(tport), name); | 2247 | " %s Address: %s\n", vhost_scsi_dump_proto_id(tport), name); |
2234 | 2248 | ||
2235 | return &tport->tport_wwn; | 2249 | return &tport->tport_wwn; |
2236 | } | 2250 | } |
2237 | 2251 | ||
2238 | static void tcm_vhost_drop_tport(struct se_wwn *wwn) | 2252 | static void vhost_scsi_drop_tport(struct se_wwn *wwn) |
2239 | { | 2253 | { |
2240 | struct tcm_vhost_tport *tport = container_of(wwn, | 2254 | struct vhost_scsi_tport *tport = container_of(wwn, |
2241 | struct tcm_vhost_tport, tport_wwn); | 2255 | struct vhost_scsi_tport, tport_wwn); |
2242 | 2256 | ||
2243 | pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target" | 2257 | pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target" |
2244 | " %s Address: %s\n", tcm_vhost_dump_proto_id(tport), | 2258 | " %s Address: %s\n", vhost_scsi_dump_proto_id(tport), |
2245 | tport->tport_name); | 2259 | tport->tport_name); |
2246 | 2260 | ||
2247 | kfree(tport); | 2261 | kfree(tport); |
2248 | } | 2262 | } |
2249 | 2263 | ||
2250 | static ssize_t | 2264 | static ssize_t |
2251 | tcm_vhost_wwn_show_attr_version(struct target_fabric_configfs *tf, | 2265 | vhost_scsi_wwn_show_attr_version(struct target_fabric_configfs *tf, |
2252 | char *page) | 2266 | char *page) |
2253 | { | 2267 | { |
2254 | return sprintf(page, "TCM_VHOST fabric module %s on %s/%s" | 2268 | return sprintf(page, "TCM_VHOST fabric module %s on %s/%s" |
2255 | "on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname, | 2269 | "on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname, |
2256 | utsname()->machine); | 2270 | utsname()->machine); |
2257 | } | 2271 | } |
2258 | 2272 | ||
2259 | TF_WWN_ATTR_RO(tcm_vhost, version); | 2273 | TF_WWN_ATTR_RO(vhost_scsi, version); |
2260 | 2274 | ||
2261 | static struct configfs_attribute *tcm_vhost_wwn_attrs[] = { | 2275 | static struct configfs_attribute *vhost_scsi_wwn_attrs[] = { |
2262 | &tcm_vhost_wwn_version.attr, | 2276 | &vhost_scsi_wwn_version.attr, |
2263 | NULL, | 2277 | NULL, |
2264 | }; | 2278 | }; |
2265 | 2279 | ||
2266 | static struct target_core_fabric_ops tcm_vhost_ops = { | 2280 | static struct target_core_fabric_ops vhost_scsi_ops = { |
2267 | .get_fabric_name = tcm_vhost_get_fabric_name, | 2281 | .get_fabric_name = vhost_scsi_get_fabric_name, |
2268 | .get_fabric_proto_ident = tcm_vhost_get_fabric_proto_ident, | 2282 | .get_fabric_proto_ident = vhost_scsi_get_fabric_proto_ident, |
2269 | .tpg_get_wwn = tcm_vhost_get_fabric_wwn, | 2283 | .tpg_get_wwn = vhost_scsi_get_fabric_wwn, |
2270 | .tpg_get_tag = tcm_vhost_get_tag, | 2284 | .tpg_get_tag = vhost_scsi_get_tpgt, |
2271 | .tpg_get_default_depth = tcm_vhost_get_default_depth, | 2285 | .tpg_get_default_depth = vhost_scsi_get_default_depth, |
2272 | .tpg_get_pr_transport_id = tcm_vhost_get_pr_transport_id, | 2286 | .tpg_get_pr_transport_id = vhost_scsi_get_pr_transport_id, |
2273 | .tpg_get_pr_transport_id_len = tcm_vhost_get_pr_transport_id_len, | 2287 | .tpg_get_pr_transport_id_len = vhost_scsi_get_pr_transport_id_len, |
2274 | .tpg_parse_pr_out_transport_id = tcm_vhost_parse_pr_out_transport_id, | 2288 | .tpg_parse_pr_out_transport_id = vhost_scsi_parse_pr_out_transport_id, |
2275 | .tpg_check_demo_mode = tcm_vhost_check_true, | 2289 | .tpg_check_demo_mode = vhost_scsi_check_true, |
2276 | .tpg_check_demo_mode_cache = tcm_vhost_check_true, | 2290 | .tpg_check_demo_mode_cache = vhost_scsi_check_true, |
2277 | .tpg_check_demo_mode_write_protect = tcm_vhost_check_false, | 2291 | .tpg_check_demo_mode_write_protect = vhost_scsi_check_false, |
2278 | .tpg_check_prod_mode_write_protect = tcm_vhost_check_false, | 2292 | .tpg_check_prod_mode_write_protect = vhost_scsi_check_false, |
2279 | .tpg_alloc_fabric_acl = tcm_vhost_alloc_fabric_acl, | 2293 | .tpg_alloc_fabric_acl = vhost_scsi_alloc_fabric_acl, |
2280 | .tpg_release_fabric_acl = tcm_vhost_release_fabric_acl, | 2294 | .tpg_release_fabric_acl = vhost_scsi_release_fabric_acl, |
2281 | .tpg_get_inst_index = tcm_vhost_tpg_get_inst_index, | 2295 | .tpg_get_inst_index = vhost_scsi_tpg_get_inst_index, |
2282 | .release_cmd = tcm_vhost_release_cmd, | 2296 | .release_cmd = vhost_scsi_release_cmd, |
2283 | .check_stop_free = vhost_scsi_check_stop_free, | 2297 | .check_stop_free = vhost_scsi_check_stop_free, |
2284 | .shutdown_session = tcm_vhost_shutdown_session, | 2298 | .shutdown_session = vhost_scsi_shutdown_session, |
2285 | .close_session = tcm_vhost_close_session, | 2299 | .close_session = vhost_scsi_close_session, |
2286 | .sess_get_index = tcm_vhost_sess_get_index, | 2300 | .sess_get_index = vhost_scsi_sess_get_index, |
2287 | .sess_get_initiator_sid = NULL, | 2301 | .sess_get_initiator_sid = NULL, |
2288 | .write_pending = tcm_vhost_write_pending, | 2302 | .write_pending = vhost_scsi_write_pending, |
2289 | .write_pending_status = tcm_vhost_write_pending_status, | 2303 | .write_pending_status = vhost_scsi_write_pending_status, |
2290 | .set_default_node_attributes = tcm_vhost_set_default_node_attrs, | 2304 | .set_default_node_attributes = vhost_scsi_set_default_node_attrs, |
2291 | .get_task_tag = tcm_vhost_get_task_tag, | 2305 | .get_task_tag = vhost_scsi_get_task_tag, |
2292 | .get_cmd_state = tcm_vhost_get_cmd_state, | 2306 | .get_cmd_state = vhost_scsi_get_cmd_state, |
2293 | .queue_data_in = tcm_vhost_queue_data_in, | 2307 | .queue_data_in = vhost_scsi_queue_data_in, |
2294 | .queue_status = tcm_vhost_queue_status, | 2308 | .queue_status = vhost_scsi_queue_status, |
2295 | .queue_tm_rsp = tcm_vhost_queue_tm_rsp, | 2309 | .queue_tm_rsp = vhost_scsi_queue_tm_rsp, |
2296 | .aborted_task = tcm_vhost_aborted_task, | 2310 | .aborted_task = vhost_scsi_aborted_task, |
2297 | /* | 2311 | /* |
2298 | * Setup callers for generic logic in target_core_fabric_configfs.c | 2312 | * Setup callers for generic logic in target_core_fabric_configfs.c |
2299 | */ | 2313 | */ |
2300 | .fabric_make_wwn = tcm_vhost_make_tport, | 2314 | .fabric_make_wwn = vhost_scsi_make_tport, |
2301 | .fabric_drop_wwn = tcm_vhost_drop_tport, | 2315 | .fabric_drop_wwn = vhost_scsi_drop_tport, |
2302 | .fabric_make_tpg = tcm_vhost_make_tpg, | 2316 | .fabric_make_tpg = vhost_scsi_make_tpg, |
2303 | .fabric_drop_tpg = tcm_vhost_drop_tpg, | 2317 | .fabric_drop_tpg = vhost_scsi_drop_tpg, |
2304 | .fabric_post_link = tcm_vhost_port_link, | 2318 | .fabric_post_link = vhost_scsi_port_link, |
2305 | .fabric_pre_unlink = tcm_vhost_port_unlink, | 2319 | .fabric_pre_unlink = vhost_scsi_port_unlink, |
2306 | .fabric_make_np = NULL, | 2320 | .fabric_make_np = NULL, |
2307 | .fabric_drop_np = NULL, | 2321 | .fabric_drop_np = NULL, |
2308 | .fabric_make_nodeacl = tcm_vhost_make_nodeacl, | 2322 | .fabric_make_nodeacl = vhost_scsi_make_nodeacl, |
2309 | .fabric_drop_nodeacl = tcm_vhost_drop_nodeacl, | 2323 | .fabric_drop_nodeacl = vhost_scsi_drop_nodeacl, |
2310 | }; | 2324 | }; |
2311 | 2325 | ||
2312 | static int tcm_vhost_register_configfs(void) | 2326 | static int vhost_scsi_register_configfs(void) |
2313 | { | 2327 | { |
2314 | struct target_fabric_configfs *fabric; | 2328 | struct target_fabric_configfs *fabric; |
2315 | int ret; | 2329 | int ret; |
2316 | 2330 | ||
2317 | pr_debug("TCM_VHOST fabric module %s on %s/%s" | 2331 | pr_debug("vhost-scsi fabric module %s on %s/%s" |
2318 | " on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname, | 2332 | " on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname, |
2319 | utsname()->machine); | 2333 | utsname()->machine); |
2320 | /* | 2334 | /* |
2321 | * Register the top level struct config_item_type with TCM core | 2335 | * Register the top level struct config_item_type with TCM core |
@@ -2326,14 +2340,14 @@ static int tcm_vhost_register_configfs(void) | |||
2326 | return PTR_ERR(fabric); | 2340 | return PTR_ERR(fabric); |
2327 | } | 2341 | } |
2328 | /* | 2342 | /* |
2329 | * Setup fabric->tf_ops from our local tcm_vhost_ops | 2343 | * Setup fabric->tf_ops from our local vhost_scsi_ops |
2330 | */ | 2344 | */ |
2331 | fabric->tf_ops = tcm_vhost_ops; | 2345 | fabric->tf_ops = vhost_scsi_ops; |
2332 | /* | 2346 | /* |
2333 | * Setup default attribute lists for various fabric->tf_cit_tmpl | 2347 | * Setup default attribute lists for various fabric->tf_cit_tmpl |
2334 | */ | 2348 | */ |
2335 | fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs; | 2349 | fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = vhost_scsi_wwn_attrs; |
2336 | fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs; | 2350 | fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = vhost_scsi_tpg_attrs; |
2337 | fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL; | 2351 | fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL; |
2338 | fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; | 2352 | fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; |
2339 | fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; | 2353 | fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; |
@@ -2353,37 +2367,37 @@ static int tcm_vhost_register_configfs(void) | |||
2353 | /* | 2367 | /* |
2354 | * Setup our local pointer to *fabric | 2368 | * Setup our local pointer to *fabric |
2355 | */ | 2369 | */ |
2356 | tcm_vhost_fabric_configfs = fabric; | 2370 | vhost_scsi_fabric_configfs = fabric; |
2357 | pr_debug("TCM_VHOST[0] - Set fabric -> tcm_vhost_fabric_configfs\n"); | 2371 | pr_debug("TCM_VHOST[0] - Set fabric -> vhost_scsi_fabric_configfs\n"); |
2358 | return 0; | 2372 | return 0; |
2359 | }; | 2373 | }; |
2360 | 2374 | ||
2361 | static void tcm_vhost_deregister_configfs(void) | 2375 | static void vhost_scsi_deregister_configfs(void) |
2362 | { | 2376 | { |
2363 | if (!tcm_vhost_fabric_configfs) | 2377 | if (!vhost_scsi_fabric_configfs) |
2364 | return; | 2378 | return; |
2365 | 2379 | ||
2366 | target_fabric_configfs_deregister(tcm_vhost_fabric_configfs); | 2380 | target_fabric_configfs_deregister(vhost_scsi_fabric_configfs); |
2367 | tcm_vhost_fabric_configfs = NULL; | 2381 | vhost_scsi_fabric_configfs = NULL; |
2368 | pr_debug("TCM_VHOST[0] - Cleared tcm_vhost_fabric_configfs\n"); | 2382 | pr_debug("TCM_VHOST[0] - Cleared vhost_scsi_fabric_configfs\n"); |
2369 | }; | 2383 | }; |
2370 | 2384 | ||
2371 | static int __init tcm_vhost_init(void) | 2385 | static int __init vhost_scsi_init(void) |
2372 | { | 2386 | { |
2373 | int ret = -ENOMEM; | 2387 | int ret = -ENOMEM; |
2374 | /* | 2388 | /* |
2375 | * Use our own dedicated workqueue for submitting I/O into | 2389 | * Use our own dedicated workqueue for submitting I/O into |
2376 | * target core to avoid contention within system_wq. | 2390 | * target core to avoid contention within system_wq. |
2377 | */ | 2391 | */ |
2378 | tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0); | 2392 | vhost_scsi_workqueue = alloc_workqueue("vhost_scsi", 0, 0); |
2379 | if (!tcm_vhost_workqueue) | 2393 | if (!vhost_scsi_workqueue) |
2380 | goto out; | 2394 | goto out; |
2381 | 2395 | ||
2382 | ret = vhost_scsi_register(); | 2396 | ret = vhost_scsi_register(); |
2383 | if (ret < 0) | 2397 | if (ret < 0) |
2384 | goto out_destroy_workqueue; | 2398 | goto out_destroy_workqueue; |
2385 | 2399 | ||
2386 | ret = tcm_vhost_register_configfs(); | 2400 | ret = vhost_scsi_register_configfs(); |
2387 | if (ret < 0) | 2401 | if (ret < 0) |
2388 | goto out_vhost_scsi_deregister; | 2402 | goto out_vhost_scsi_deregister; |
2389 | 2403 | ||
@@ -2392,20 +2406,20 @@ static int __init tcm_vhost_init(void) | |||
2392 | out_vhost_scsi_deregister: | 2406 | out_vhost_scsi_deregister: |
2393 | vhost_scsi_deregister(); | 2407 | vhost_scsi_deregister(); |
2394 | out_destroy_workqueue: | 2408 | out_destroy_workqueue: |
2395 | destroy_workqueue(tcm_vhost_workqueue); | 2409 | destroy_workqueue(vhost_scsi_workqueue); |
2396 | out: | 2410 | out: |
2397 | return ret; | 2411 | return ret; |
2398 | }; | 2412 | }; |
2399 | 2413 | ||
2400 | static void tcm_vhost_exit(void) | 2414 | static void vhost_scsi_exit(void) |
2401 | { | 2415 | { |
2402 | tcm_vhost_deregister_configfs(); | 2416 | vhost_scsi_deregister_configfs(); |
2403 | vhost_scsi_deregister(); | 2417 | vhost_scsi_deregister(); |
2404 | destroy_workqueue(tcm_vhost_workqueue); | 2418 | destroy_workqueue(vhost_scsi_workqueue); |
2405 | }; | 2419 | }; |
2406 | 2420 | ||
2407 | MODULE_DESCRIPTION("VHOST_SCSI series fabric driver"); | 2421 | MODULE_DESCRIPTION("VHOST_SCSI series fabric driver"); |
2408 | MODULE_ALIAS("tcm_vhost"); | 2422 | MODULE_ALIAS("tcm_vhost"); |
2409 | MODULE_LICENSE("GPL"); | 2423 | MODULE_LICENSE("GPL"); |
2410 | module_init(tcm_vhost_init); | 2424 | module_init(vhost_scsi_init); |
2411 | module_exit(tcm_vhost_exit); | 2425 | module_exit(vhost_scsi_exit); |