diff options
author | Wanlong Gao <gaowanlong@cn.fujitsu.com> | 2013-03-20 01:14:28 -0400 |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2013-03-20 01:14:59 -0400 |
commit | 682993b4e445bdfe9935d5e6e298565b7e11d7ee (patch) | |
tree | bae70a13bcefc1ae501aa96d5b0a9623028b0315 /drivers/scsi/virtio_scsi.c | |
parent | 0a11cc36f7b33fa2de0ad95199d2f2ab896fbd93 (diff) |
virtio-scsi: use virtqueue_add_sgs for command buffers
Using the new virtqueue_add_sgs function lets us simplify the queueing
path. In particular, all data protected by the tgt_lock is just gone
(multiqueue will find a new use for the lock).
Signed-off-by: Wanlong Gao <gaowanlong@cn.fujitsu.com>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Asias He <asias@redhat.com>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'drivers/scsi/virtio_scsi.c')
-rw-r--r-- | drivers/scsi/virtio_scsi.c | 100 |
1 files changed, 37 insertions, 63 deletions
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 0f5dd2804ae5..77206d0eb6a9 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c | |||
@@ -61,11 +61,8 @@ struct virtio_scsi_vq { | |||
61 | 61 | ||
62 | /* Per-target queue state */ | 62 | /* Per-target queue state */ |
63 | struct virtio_scsi_target_state { | 63 | struct virtio_scsi_target_state { |
64 | /* Protects sg. Lock hierarchy is tgt_lock -> vq_lock. */ | 64 | /* Never held at the same time as vq_lock. */ |
65 | spinlock_t tgt_lock; | 65 | spinlock_t tgt_lock; |
66 | |||
67 | /* For sglist construction when adding commands to the virtqueue. */ | ||
68 | struct scatterlist sg[]; | ||
69 | }; | 66 | }; |
70 | 67 | ||
71 | /* Driver instance state */ | 68 | /* Driver instance state */ |
@@ -353,75 +350,61 @@ static void virtscsi_event_done(struct virtqueue *vq) | |||
353 | spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags); | 350 | spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags); |
354 | }; | 351 | }; |
355 | 352 | ||
356 | static void virtscsi_map_sgl(struct scatterlist *sg, unsigned int *p_idx, | ||
357 | struct scsi_data_buffer *sdb) | ||
358 | { | ||
359 | struct sg_table *table = &sdb->table; | ||
360 | struct scatterlist *sg_elem; | ||
361 | unsigned int idx = *p_idx; | ||
362 | int i; | ||
363 | |||
364 | for_each_sg(table->sgl, sg_elem, table->nents, i) | ||
365 | sg[idx++] = *sg_elem; | ||
366 | |||
367 | *p_idx = idx; | ||
368 | } | ||
369 | |||
370 | /** | 353 | /** |
371 | * virtscsi_map_cmd - map a scsi_cmd to a virtqueue scatterlist | 354 | * virtscsi_add_cmd - add a virtio_scsi_cmd to a virtqueue |
372 | * @vscsi : virtio_scsi state | 355 | * @vq : the struct virtqueue we're talking about |
373 | * @cmd : command structure | 356 | * @cmd : command structure |
374 | * @out_num : number of read-only elements | ||
375 | * @in_num : number of write-only elements | ||
376 | * @req_size : size of the request buffer | 357 | * @req_size : size of the request buffer |
377 | * @resp_size : size of the response buffer | 358 | * @resp_size : size of the response buffer |
378 | * | 359 | * @gfp : flags to use for memory allocations |
379 | * Called with tgt_lock held. | ||
380 | */ | 360 | */ |
381 | static void virtscsi_map_cmd(struct virtio_scsi_target_state *tgt, | 361 | static int virtscsi_add_cmd(struct virtqueue *vq, |
382 | struct virtio_scsi_cmd *cmd, | 362 | struct virtio_scsi_cmd *cmd, |
383 | unsigned *out_num, unsigned *in_num, | 363 | size_t req_size, size_t resp_size, gfp_t gfp) |
384 | size_t req_size, size_t resp_size) | ||
385 | { | 364 | { |
386 | struct scsi_cmnd *sc = cmd->sc; | 365 | struct scsi_cmnd *sc = cmd->sc; |
387 | struct scatterlist *sg = tgt->sg; | 366 | struct scatterlist *sgs[4], req, resp; |
388 | unsigned int idx = 0; | 367 | struct sg_table *out, *in; |
368 | unsigned out_num = 0, in_num = 0; | ||
369 | |||
370 | out = in = NULL; | ||
371 | |||
372 | if (sc && sc->sc_data_direction != DMA_NONE) { | ||
373 | if (sc->sc_data_direction != DMA_FROM_DEVICE) | ||
374 | out = &scsi_out(sc)->table; | ||
375 | if (sc->sc_data_direction != DMA_TO_DEVICE) | ||
376 | in = &scsi_in(sc)->table; | ||
377 | } | ||
389 | 378 | ||
390 | /* Request header. */ | 379 | /* Request header. */ |
391 | sg_set_buf(&sg[idx++], &cmd->req, req_size); | 380 | sg_init_one(&req, &cmd->req, req_size); |
381 | sgs[out_num++] = &req; | ||
392 | 382 | ||
393 | /* Data-out buffer. */ | 383 | /* Data-out buffer. */ |
394 | if (sc && sc->sc_data_direction != DMA_FROM_DEVICE) | 384 | if (out) |
395 | virtscsi_map_sgl(sg, &idx, scsi_out(sc)); | 385 | sgs[out_num++] = out->sgl; |
396 | |||
397 | *out_num = idx; | ||
398 | 386 | ||
399 | /* Response header. */ | 387 | /* Response header. */ |
400 | sg_set_buf(&sg[idx++], &cmd->resp, resp_size); | 388 | sg_init_one(&resp, &cmd->resp, resp_size); |
389 | sgs[out_num + in_num++] = &resp; | ||
401 | 390 | ||
402 | /* Data-in buffer */ | 391 | /* Data-in buffer */ |
403 | if (sc && sc->sc_data_direction != DMA_TO_DEVICE) | 392 | if (in) |
404 | virtscsi_map_sgl(sg, &idx, scsi_in(sc)); | 393 | sgs[out_num + in_num++] = in->sgl; |
405 | 394 | ||
406 | *in_num = idx - *out_num; | 395 | return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, gfp); |
407 | } | 396 | } |
408 | 397 | ||
409 | static int virtscsi_kick_cmd(struct virtio_scsi_target_state *tgt, | 398 | static int virtscsi_kick_cmd(struct virtio_scsi_vq *vq, |
410 | struct virtio_scsi_vq *vq, | ||
411 | struct virtio_scsi_cmd *cmd, | 399 | struct virtio_scsi_cmd *cmd, |
412 | size_t req_size, size_t resp_size, gfp_t gfp) | 400 | size_t req_size, size_t resp_size, gfp_t gfp) |
413 | { | 401 | { |
414 | unsigned int out_num, in_num; | ||
415 | unsigned long flags; | 402 | unsigned long flags; |
416 | int err; | 403 | int err; |
417 | bool needs_kick = false; | 404 | bool needs_kick = false; |
418 | 405 | ||
419 | spin_lock_irqsave(&tgt->tgt_lock, flags); | 406 | spin_lock_irqsave(&vq->vq_lock, flags); |
420 | virtscsi_map_cmd(tgt, cmd, &out_num, &in_num, req_size, resp_size); | 407 | err = virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size, gfp); |
421 | |||
422 | spin_lock(&vq->vq_lock); | ||
423 | err = virtqueue_add_buf(vq->vq, tgt->sg, out_num, in_num, cmd, gfp); | ||
424 | spin_unlock(&tgt->tgt_lock); | ||
425 | if (!err) | 408 | if (!err) |
426 | needs_kick = virtqueue_kick_prepare(vq->vq); | 409 | needs_kick = virtqueue_kick_prepare(vq->vq); |
427 | 410 | ||
@@ -435,7 +418,6 @@ static int virtscsi_kick_cmd(struct virtio_scsi_target_state *tgt, | |||
435 | static int virtscsi_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc) | 418 | static int virtscsi_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc) |
436 | { | 419 | { |
437 | struct virtio_scsi *vscsi = shost_priv(sh); | 420 | struct virtio_scsi *vscsi = shost_priv(sh); |
438 | struct virtio_scsi_target_state *tgt = vscsi->tgt[sc->device->id]; | ||
439 | struct virtio_scsi_cmd *cmd; | 421 | struct virtio_scsi_cmd *cmd; |
440 | int ret; | 422 | int ret; |
441 | 423 | ||
@@ -469,7 +451,7 @@ static int virtscsi_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc) | |||
469 | BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE); | 451 | BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE); |
470 | memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len); | 452 | memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len); |
471 | 453 | ||
472 | if (virtscsi_kick_cmd(tgt, &vscsi->req_vq, cmd, | 454 | if (virtscsi_kick_cmd(&vscsi->req_vq, cmd, |
473 | sizeof cmd->req.cmd, sizeof cmd->resp.cmd, | 455 | sizeof cmd->req.cmd, sizeof cmd->resp.cmd, |
474 | GFP_ATOMIC) == 0) | 456 | GFP_ATOMIC) == 0) |
475 | ret = 0; | 457 | ret = 0; |
@@ -483,11 +465,10 @@ out: | |||
483 | static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd) | 465 | static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd) |
484 | { | 466 | { |
485 | DECLARE_COMPLETION_ONSTACK(comp); | 467 | DECLARE_COMPLETION_ONSTACK(comp); |
486 | struct virtio_scsi_target_state *tgt = vscsi->tgt[cmd->sc->device->id]; | ||
487 | int ret = FAILED; | 468 | int ret = FAILED; |
488 | 469 | ||
489 | cmd->comp = ∁ | 470 | cmd->comp = ∁ |
490 | if (virtscsi_kick_cmd(tgt, &vscsi->ctrl_vq, cmd, | 471 | if (virtscsi_kick_cmd(&vscsi->ctrl_vq, cmd, |
491 | sizeof cmd->req.tmf, sizeof cmd->resp.tmf, | 472 | sizeof cmd->req.tmf, sizeof cmd->resp.tmf, |
492 | GFP_NOIO) < 0) | 473 | GFP_NOIO) < 0) |
493 | goto out; | 474 | goto out; |
@@ -588,20 +569,16 @@ static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq, | |||
588 | } | 569 | } |
589 | 570 | ||
590 | static struct virtio_scsi_target_state *virtscsi_alloc_tgt( | 571 | static struct virtio_scsi_target_state *virtscsi_alloc_tgt( |
591 | struct virtio_device *vdev, int sg_elems) | 572 | struct virtio_device *vdev) |
592 | { | 573 | { |
593 | struct virtio_scsi_target_state *tgt; | 574 | struct virtio_scsi_target_state *tgt; |
594 | gfp_t gfp_mask = GFP_KERNEL; | 575 | gfp_t gfp_mask = GFP_KERNEL; |
595 | 576 | ||
596 | /* We need extra sg elements at head and tail. */ | 577 | tgt = kmalloc(sizeof(*tgt), gfp_mask); |
597 | tgt = kmalloc(sizeof(*tgt) + sizeof(tgt->sg[0]) * (sg_elems + 2), | ||
598 | gfp_mask); | ||
599 | |||
600 | if (!tgt) | 578 | if (!tgt) |
601 | return NULL; | 579 | return NULL; |
602 | 580 | ||
603 | spin_lock_init(&tgt->tgt_lock); | 581 | spin_lock_init(&tgt->tgt_lock); |
604 | sg_init_table(tgt->sg, sg_elems + 2); | ||
605 | return tgt; | 582 | return tgt; |
606 | } | 583 | } |
607 | 584 | ||
@@ -635,7 +612,7 @@ static int virtscsi_init(struct virtio_device *vdev, | |||
635 | { | 612 | { |
636 | int err; | 613 | int err; |
637 | struct virtqueue *vqs[3]; | 614 | struct virtqueue *vqs[3]; |
638 | u32 i, sg_elems; | 615 | u32 i; |
639 | 616 | ||
640 | vq_callback_t *callbacks[] = { | 617 | vq_callback_t *callbacks[] = { |
641 | virtscsi_ctrl_done, | 618 | virtscsi_ctrl_done, |
@@ -663,11 +640,8 @@ static int virtscsi_init(struct virtio_device *vdev, | |||
663 | if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) | 640 | if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) |
664 | virtscsi_kick_event_all(vscsi); | 641 | virtscsi_kick_event_all(vscsi); |
665 | 642 | ||
666 | /* We need to know how many segments before we allocate. */ | ||
667 | sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1; | ||
668 | |||
669 | for (i = 0; i < num_targets; i++) { | 643 | for (i = 0; i < num_targets; i++) { |
670 | vscsi->tgt[i] = virtscsi_alloc_tgt(vdev, sg_elems); | 644 | vscsi->tgt[i] = virtscsi_alloc_tgt(vdev); |
671 | if (!vscsi->tgt[i]) { | 645 | if (!vscsi->tgt[i]) { |
672 | err = -ENOMEM; | 646 | err = -ENOMEM; |
673 | goto out; | 647 | goto out; |