aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/virtio_scsi.c
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2012-02-05 06:16:00 -0500
committerJames Bottomley <JBottomley@Parallels.com>2012-02-19 10:50:20 -0500
commit4fe74b1cb051dc9d47a80e263c388cf1651783d4 (patch)
treea3f216ea879306283fc2d3586517b5ad266c8096 /drivers/scsi/virtio_scsi.c
parent5a4f934e65620130d033725e85b7fdff4ac2ffbd (diff)
[SCSI] virtio-scsi: SCSI driver for QEMU based virtual machines
The virtio-scsi HBA is the basis of an alternative storage stack for QEMU-based virtual machines (including KVM). Compared to virtio-blk it is more scalable, because it supports many LUNs on a single PCI slot), more powerful (it more easily supports passthrough of host devices to the guest) and more easily extensible (new SCSI features implemented by QEMU should not require updating the driver in the guest). Acked-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: James Bottomley <JBottomley@Parallels.com>
Diffstat (limited to 'drivers/scsi/virtio_scsi.c')
-rw-r--r--drivers/scsi/virtio_scsi.c594
1 files changed, 594 insertions, 0 deletions
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
new file mode 100644
index 00000000000..efccd72c4a3
--- /dev/null
+++ b/drivers/scsi/virtio_scsi.c
@@ -0,0 +1,594 @@
1/*
2 * Virtio SCSI HBA driver
3 *
4 * Copyright IBM Corp. 2010
5 * Copyright Red Hat, Inc. 2011
6 *
7 * Authors:
8 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
9 * Paolo Bonzini <pbonzini@redhat.com>
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
13 *
14 */
15
16#include <linux/module.h>
17#include <linux/slab.h>
18#include <linux/mempool.h>
19#include <linux/virtio.h>
20#include <linux/virtio_ids.h>
21#include <linux/virtio_config.h>
22#include <linux/virtio_scsi.h>
23#include <scsi/scsi_host.h>
24#include <scsi/scsi_device.h>
25#include <scsi/scsi_cmnd.h>
26
27#define VIRTIO_SCSI_MEMPOOL_SZ 64
28
29/* Command queue element */
30struct virtio_scsi_cmd {
31 struct scsi_cmnd *sc;
32 struct completion *comp;
33 union {
34 struct virtio_scsi_cmd_req cmd;
35 struct virtio_scsi_ctrl_tmf_req tmf;
36 struct virtio_scsi_ctrl_an_req an;
37 } req;
38 union {
39 struct virtio_scsi_cmd_resp cmd;
40 struct virtio_scsi_ctrl_tmf_resp tmf;
41 struct virtio_scsi_ctrl_an_resp an;
42 struct virtio_scsi_event evt;
43 } resp;
44} ____cacheline_aligned_in_smp;
45
46/* Driver instance state */
47struct virtio_scsi {
48 /* Protects ctrl_vq, req_vq and sg[] */
49 spinlock_t vq_lock;
50
51 struct virtio_device *vdev;
52 struct virtqueue *ctrl_vq;
53 struct virtqueue *event_vq;
54 struct virtqueue *req_vq;
55
56 /* For sglist construction when adding commands to the virtqueue. */
57 struct scatterlist sg[];
58};
59
60static struct kmem_cache *virtscsi_cmd_cache;
61static mempool_t *virtscsi_cmd_pool;
62
63static inline struct Scsi_Host *virtio_scsi_host(struct virtio_device *vdev)
64{
65 return vdev->priv;
66}
67
68static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid)
69{
70 if (!resid)
71 return;
72
73 if (!scsi_bidi_cmnd(sc)) {
74 scsi_set_resid(sc, resid);
75 return;
76 }
77
78 scsi_in(sc)->resid = min(resid, scsi_in(sc)->length);
79 scsi_out(sc)->resid = resid - scsi_in(sc)->resid;
80}
81
82/**
83 * virtscsi_complete_cmd - finish a scsi_cmd and invoke scsi_done
84 *
85 * Called with vq_lock held.
86 */
87static void virtscsi_complete_cmd(void *buf)
88{
89 struct virtio_scsi_cmd *cmd = buf;
90 struct scsi_cmnd *sc = cmd->sc;
91 struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd;
92
93 dev_dbg(&sc->device->sdev_gendev,
94 "cmd %p response %u status %#02x sense_len %u\n",
95 sc, resp->response, resp->status, resp->sense_len);
96
97 sc->result = resp->status;
98 virtscsi_compute_resid(sc, resp->resid);
99 switch (resp->response) {
100 case VIRTIO_SCSI_S_OK:
101 set_host_byte(sc, DID_OK);
102 break;
103 case VIRTIO_SCSI_S_OVERRUN:
104 set_host_byte(sc, DID_ERROR);
105 break;
106 case VIRTIO_SCSI_S_ABORTED:
107 set_host_byte(sc, DID_ABORT);
108 break;
109 case VIRTIO_SCSI_S_BAD_TARGET:
110 set_host_byte(sc, DID_BAD_TARGET);
111 break;
112 case VIRTIO_SCSI_S_RESET:
113 set_host_byte(sc, DID_RESET);
114 break;
115 case VIRTIO_SCSI_S_BUSY:
116 set_host_byte(sc, DID_BUS_BUSY);
117 break;
118 case VIRTIO_SCSI_S_TRANSPORT_FAILURE:
119 set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
120 break;
121 case VIRTIO_SCSI_S_TARGET_FAILURE:
122 set_host_byte(sc, DID_TARGET_FAILURE);
123 break;
124 case VIRTIO_SCSI_S_NEXUS_FAILURE:
125 set_host_byte(sc, DID_NEXUS_FAILURE);
126 break;
127 default:
128 scmd_printk(KERN_WARNING, sc, "Unknown response %d",
129 resp->response);
130 /* fall through */
131 case VIRTIO_SCSI_S_FAILURE:
132 set_host_byte(sc, DID_ERROR);
133 break;
134 }
135
136 WARN_ON(resp->sense_len > VIRTIO_SCSI_SENSE_SIZE);
137 if (sc->sense_buffer) {
138 memcpy(sc->sense_buffer, resp->sense,
139 min_t(u32, resp->sense_len, VIRTIO_SCSI_SENSE_SIZE));
140 if (resp->sense_len)
141 set_driver_byte(sc, DRIVER_SENSE);
142 }
143
144 mempool_free(cmd, virtscsi_cmd_pool);
145 sc->scsi_done(sc);
146}
147
148static void virtscsi_vq_done(struct virtqueue *vq, void (*fn)(void *buf))
149{
150 struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
151 struct virtio_scsi *vscsi = shost_priv(sh);
152 void *buf;
153 unsigned long flags;
154 unsigned int len;
155
156 spin_lock_irqsave(&vscsi->vq_lock, flags);
157
158 do {
159 virtqueue_disable_cb(vq);
160 while ((buf = virtqueue_get_buf(vq, &len)) != NULL)
161 fn(buf);
162 } while (!virtqueue_enable_cb(vq));
163
164 spin_unlock_irqrestore(&vscsi->vq_lock, flags);
165}
166
167static void virtscsi_req_done(struct virtqueue *vq)
168{
169 virtscsi_vq_done(vq, virtscsi_complete_cmd);
170};
171
172static void virtscsi_complete_free(void *buf)
173{
174 struct virtio_scsi_cmd *cmd = buf;
175
176 if (cmd->comp)
177 complete_all(cmd->comp);
178 mempool_free(cmd, virtscsi_cmd_pool);
179}
180
181static void virtscsi_ctrl_done(struct virtqueue *vq)
182{
183 virtscsi_vq_done(vq, virtscsi_complete_free);
184};
185
186static void virtscsi_event_done(struct virtqueue *vq)
187{
188 virtscsi_vq_done(vq, virtscsi_complete_free);
189};
190
191static void virtscsi_map_sgl(struct scatterlist *sg, unsigned int *p_idx,
192 struct scsi_data_buffer *sdb)
193{
194 struct sg_table *table = &sdb->table;
195 struct scatterlist *sg_elem;
196 unsigned int idx = *p_idx;
197 int i;
198
199 for_each_sg(table->sgl, sg_elem, table->nents, i)
200 sg_set_buf(&sg[idx++], sg_virt(sg_elem), sg_elem->length);
201
202 *p_idx = idx;
203}
204
205/**
206 * virtscsi_map_cmd - map a scsi_cmd to a virtqueue scatterlist
207 * @vscsi : virtio_scsi state
208 * @cmd : command structure
209 * @out_num : number of read-only elements
210 * @in_num : number of write-only elements
211 * @req_size : size of the request buffer
212 * @resp_size : size of the response buffer
213 *
214 * Called with vq_lock held.
215 */
216static void virtscsi_map_cmd(struct virtio_scsi *vscsi,
217 struct virtio_scsi_cmd *cmd,
218 unsigned *out_num, unsigned *in_num,
219 size_t req_size, size_t resp_size)
220{
221 struct scsi_cmnd *sc = cmd->sc;
222 struct scatterlist *sg = vscsi->sg;
223 unsigned int idx = 0;
224
225 if (sc) {
226 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
227 BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
228
229 /* TODO: check feature bit and fail if unsupported? */
230 BUG_ON(sc->sc_data_direction == DMA_BIDIRECTIONAL);
231 }
232
233 /* Request header. */
234 sg_set_buf(&sg[idx++], &cmd->req, req_size);
235
236 /* Data-out buffer. */
237 if (sc && sc->sc_data_direction != DMA_FROM_DEVICE)
238 virtscsi_map_sgl(sg, &idx, scsi_out(sc));
239
240 *out_num = idx;
241
242 /* Response header. */
243 sg_set_buf(&sg[idx++], &cmd->resp, resp_size);
244
245 /* Data-in buffer */
246 if (sc && sc->sc_data_direction != DMA_TO_DEVICE)
247 virtscsi_map_sgl(sg, &idx, scsi_in(sc));
248
249 *in_num = idx - *out_num;
250}
251
252static int virtscsi_kick_cmd(struct virtio_scsi *vscsi, struct virtqueue *vq,
253 struct virtio_scsi_cmd *cmd,
254 size_t req_size, size_t resp_size, gfp_t gfp)
255{
256 unsigned int out_num, in_num;
257 unsigned long flags;
258 int ret;
259
260 spin_lock_irqsave(&vscsi->vq_lock, flags);
261
262 virtscsi_map_cmd(vscsi, cmd, &out_num, &in_num, req_size, resp_size);
263
264 ret = virtqueue_add_buf(vq, vscsi->sg, out_num, in_num, cmd, gfp);
265 if (ret >= 0)
266 virtqueue_kick(vq);
267
268 spin_unlock_irqrestore(&vscsi->vq_lock, flags);
269 return ret;
270}
271
272static int virtscsi_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
273{
274 struct virtio_scsi *vscsi = shost_priv(sh);
275 struct virtio_scsi_cmd *cmd;
276 int ret;
277
278 dev_dbg(&sc->device->sdev_gendev,
279 "cmd %p CDB: %#02x\n", sc, sc->cmnd[0]);
280
281 ret = SCSI_MLQUEUE_HOST_BUSY;
282 cmd = mempool_alloc(virtscsi_cmd_pool, GFP_ATOMIC);
283 if (!cmd)
284 goto out;
285
286 memset(cmd, 0, sizeof(*cmd));
287 cmd->sc = sc;
288 cmd->req.cmd = (struct virtio_scsi_cmd_req){
289 .lun[0] = 1,
290 .lun[1] = sc->device->id,
291 .lun[2] = (sc->device->lun >> 8) | 0x40,
292 .lun[3] = sc->device->lun & 0xff,
293 .tag = (unsigned long)sc,
294 .task_attr = VIRTIO_SCSI_S_SIMPLE,
295 .prio = 0,
296 .crn = 0,
297 };
298
299 BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE);
300 memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
301
302 if (virtscsi_kick_cmd(vscsi, vscsi->req_vq, cmd,
303 sizeof cmd->req.cmd, sizeof cmd->resp.cmd,
304 GFP_ATOMIC) >= 0)
305 ret = 0;
306
307out:
308 return ret;
309}
310
311static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
312{
313 DECLARE_COMPLETION_ONSTACK(comp);
314 int ret;
315
316 cmd->comp = &comp;
317 ret = virtscsi_kick_cmd(vscsi, vscsi->ctrl_vq, cmd,
318 sizeof cmd->req.tmf, sizeof cmd->resp.tmf,
319 GFP_NOIO);
320 if (ret < 0)
321 return FAILED;
322
323 wait_for_completion(&comp);
324 if (cmd->resp.tmf.response != VIRTIO_SCSI_S_OK &&
325 cmd->resp.tmf.response != VIRTIO_SCSI_S_FUNCTION_SUCCEEDED)
326 return FAILED;
327
328 return SUCCESS;
329}
330
331static int virtscsi_device_reset(struct scsi_cmnd *sc)
332{
333 struct virtio_scsi *vscsi = shost_priv(sc->device->host);
334 struct virtio_scsi_cmd *cmd;
335
336 sdev_printk(KERN_INFO, sc->device, "device reset\n");
337 cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
338 if (!cmd)
339 return FAILED;
340
341 memset(cmd, 0, sizeof(*cmd));
342 cmd->sc = sc;
343 cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
344 .type = VIRTIO_SCSI_T_TMF,
345 .subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET,
346 .lun[0] = 1,
347 .lun[1] = sc->device->id,
348 .lun[2] = (sc->device->lun >> 8) | 0x40,
349 .lun[3] = sc->device->lun & 0xff,
350 };
351 return virtscsi_tmf(vscsi, cmd);
352}
353
354static int virtscsi_abort(struct scsi_cmnd *sc)
355{
356 struct virtio_scsi *vscsi = shost_priv(sc->device->host);
357 struct virtio_scsi_cmd *cmd;
358
359 scmd_printk(KERN_INFO, sc, "abort\n");
360 cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
361 if (!cmd)
362 return FAILED;
363
364 memset(cmd, 0, sizeof(*cmd));
365 cmd->sc = sc;
366 cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
367 .type = VIRTIO_SCSI_T_TMF,
368 .subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK,
369 .lun[0] = 1,
370 .lun[1] = sc->device->id,
371 .lun[2] = (sc->device->lun >> 8) | 0x40,
372 .lun[3] = sc->device->lun & 0xff,
373 .tag = (unsigned long)sc,
374 };
375 return virtscsi_tmf(vscsi, cmd);
376}
377
378static struct scsi_host_template virtscsi_host_template = {
379 .module = THIS_MODULE,
380 .name = "Virtio SCSI HBA",
381 .proc_name = "virtio_scsi",
382 .queuecommand = virtscsi_queuecommand,
383 .this_id = -1,
384 .eh_abort_handler = virtscsi_abort,
385 .eh_device_reset_handler = virtscsi_device_reset,
386
387 .can_queue = 1024,
388 .dma_boundary = UINT_MAX,
389 .use_clustering = ENABLE_CLUSTERING,
390};
391
392#define virtscsi_config_get(vdev, fld) \
393 ({ \
394 typeof(((struct virtio_scsi_config *)0)->fld) __val; \
395 vdev->config->get(vdev, \
396 offsetof(struct virtio_scsi_config, fld), \
397 &__val, sizeof(__val)); \
398 __val; \
399 })
400
401#define virtscsi_config_set(vdev, fld, val) \
402 (void)({ \
403 typeof(((struct virtio_scsi_config *)0)->fld) __val = (val); \
404 vdev->config->set(vdev, \
405 offsetof(struct virtio_scsi_config, fld), \
406 &__val, sizeof(__val)); \
407 })
408
409static int virtscsi_init(struct virtio_device *vdev,
410 struct virtio_scsi *vscsi)
411{
412 int err;
413 struct virtqueue *vqs[3];
414 vq_callback_t *callbacks[] = {
415 virtscsi_ctrl_done,
416 virtscsi_event_done,
417 virtscsi_req_done
418 };
419 const char *names[] = {
420 "control",
421 "event",
422 "request"
423 };
424
425 /* Discover virtqueues and write information to configuration. */
426 err = vdev->config->find_vqs(vdev, 3, vqs, callbacks, names);
427 if (err)
428 return err;
429
430 vscsi->ctrl_vq = vqs[0];
431 vscsi->event_vq = vqs[1];
432 vscsi->req_vq = vqs[2];
433
434 virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE);
435 virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE);
436 return 0;
437}
438
439static int __devinit virtscsi_probe(struct virtio_device *vdev)
440{
441 struct Scsi_Host *shost;
442 struct virtio_scsi *vscsi;
443 int err;
444 u32 sg_elems;
445 u32 cmd_per_lun;
446
447 /* We need to know how many segments before we allocate.
448 * We need an extra sg elements at head and tail.
449 */
450 sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1;
451
452 /* Allocate memory and link the structs together. */
453 shost = scsi_host_alloc(&virtscsi_host_template,
454 sizeof(*vscsi) + sizeof(vscsi->sg[0]) * (sg_elems + 2));
455
456 if (!shost)
457 return -ENOMEM;
458
459 shost->sg_tablesize = sg_elems;
460 vscsi = shost_priv(shost);
461 vscsi->vdev = vdev;
462 vdev->priv = shost;
463
464 /* Random initializations. */
465 spin_lock_init(&vscsi->vq_lock);
466 sg_init_table(vscsi->sg, sg_elems + 2);
467
468 err = virtscsi_init(vdev, vscsi);
469 if (err)
470 goto virtscsi_init_failed;
471
472 cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1;
473 shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue);
474 shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF;
475 shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1;
476 shost->max_id = virtscsi_config_get(vdev, max_target) + 1;
477 shost->max_channel = 0;
478 shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE;
479 err = scsi_add_host(shost, &vdev->dev);
480 if (err)
481 goto scsi_add_host_failed;
482
483 scsi_scan_host(shost);
484
485 return 0;
486
487scsi_add_host_failed:
488 vdev->config->del_vqs(vdev);
489virtscsi_init_failed:
490 scsi_host_put(shost);
491 return err;
492}
493
494static void virtscsi_remove_vqs(struct virtio_device *vdev)
495{
496 /* Stop all the virtqueues. */
497 vdev->config->reset(vdev);
498
499 vdev->config->del_vqs(vdev);
500}
501
502static void __devexit virtscsi_remove(struct virtio_device *vdev)
503{
504 struct Scsi_Host *shost = virtio_scsi_host(vdev);
505
506 scsi_remove_host(shost);
507
508 virtscsi_remove_vqs(vdev);
509 scsi_host_put(shost);
510}
511
512#ifdef CONFIG_PM
513static int virtscsi_freeze(struct virtio_device *vdev)
514{
515 virtscsi_remove_vqs(vdev);
516 return 0;
517}
518
519static int virtscsi_restore(struct virtio_device *vdev)
520{
521 struct Scsi_Host *sh = virtio_scsi_host(vdev);
522 struct virtio_scsi *vscsi = shost_priv(sh);
523
524 return virtscsi_init(vdev, vscsi);
525}
526#endif
527
528static struct virtio_device_id id_table[] = {
529 { VIRTIO_ID_SCSI, VIRTIO_DEV_ANY_ID },
530 { 0 },
531};
532
533static struct virtio_driver virtio_scsi_driver = {
534 .driver.name = KBUILD_MODNAME,
535 .driver.owner = THIS_MODULE,
536 .id_table = id_table,
537 .probe = virtscsi_probe,
538#ifdef CONFIG_PM
539 .freeze = virtscsi_freeze,
540 .restore = virtscsi_restore,
541#endif
542 .remove = __devexit_p(virtscsi_remove),
543};
544
545static int __init init(void)
546{
547 int ret = -ENOMEM;
548
549 virtscsi_cmd_cache = KMEM_CACHE(virtio_scsi_cmd, 0);
550 if (!virtscsi_cmd_cache) {
551 printk(KERN_ERR "kmem_cache_create() for "
552 "virtscsi_cmd_cache failed\n");
553 goto error;
554 }
555
556
557 virtscsi_cmd_pool =
558 mempool_create_slab_pool(VIRTIO_SCSI_MEMPOOL_SZ,
559 virtscsi_cmd_cache);
560 if (!virtscsi_cmd_pool) {
561 printk(KERN_ERR "mempool_create() for"
562 "virtscsi_cmd_pool failed\n");
563 goto error;
564 }
565 ret = register_virtio_driver(&virtio_scsi_driver);
566 if (ret < 0)
567 goto error;
568
569 return 0;
570
571error:
572 if (virtscsi_cmd_pool) {
573 mempool_destroy(virtscsi_cmd_pool);
574 virtscsi_cmd_pool = NULL;
575 }
576 if (virtscsi_cmd_cache) {
577 kmem_cache_destroy(virtscsi_cmd_cache);
578 virtscsi_cmd_cache = NULL;
579 }
580 return ret;
581}
582
583static void __exit fini(void)
584{
585 unregister_virtio_driver(&virtio_scsi_driver);
586 mempool_destroy(virtscsi_cmd_pool);
587 kmem_cache_destroy(virtscsi_cmd_cache);
588}
589module_init(init);
590module_exit(fini);
591
592MODULE_DEVICE_TABLE(virtio, id_table);
593MODULE_DESCRIPTION("Virtio SCSI HBA driver");
594MODULE_LICENSE("GPL");