aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/virtio_scsi.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-05-02 17:14:04 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-05-02 17:14:04 -0400
commit736a2dd2571ac56b11ed95a7814d838d5311be04 (patch)
treede10d107025970c6e51d5b6faeba799ed4b9caae /drivers/scsi/virtio_scsi.c
parent0b2e3b6bb4a415379f16e38fc92db42379be47a1 (diff)
parent01d779a14ef800b74684d9692add4944df052461 (diff)
Merge tag 'virtio-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux
Pull virtio & lguest updates from Rusty Russell: "Lots of virtio work which wasn't quite ready for last merge window. Plus I dived into lguest again, reworking the pagetable code so we can move the switcher page: our fixmaps sometimes take more than 2MB now..." Ugh. Annoying conflicts with the tcm_vhost -> vhost_scsi rename. Hopefully correctly resolved. * tag 'virtio-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux: (57 commits) caif_virtio: Remove bouncing email addresses lguest: improve code readability in lg_cpu_start. virtio-net: fill only rx queues which are being used lguest: map Switcher below fixmap. lguest: cache last cpu we ran on. lguest: map Switcher text whenever we allocate a new pagetable. lguest: don't share Switcher PTE pages between guests. lguest: expost switcher_pages array (as lg_switcher_pages). lguest: extract shadow PTE walking / allocating. lguest: make check_gpte et. al return bool. lguest: assume Switcher text is a single page. lguest: rename switcher_page to switcher_pages. lguest: remove RESERVE_MEM constant. lguest: check vaddr not pgd for Switcher protection. lguest: prepare to make SWITCHER_ADDR a variable. virtio: console: replace EMFILE with EBUSY for already-open port virtio-scsi: reset virtqueue affinity when doing cpu hotplug virtio-scsi: introduce multiqueue support virtio-scsi: push vq lock/unlock into virtscsi_vq_done virtio-scsi: pass struct virtio_scsi to virtqueue completion function ...
Diffstat (limited to 'drivers/scsi/virtio_scsi.c')
-rw-r--r--drivers/scsi/virtio_scsi.c487
1 files changed, 346 insertions, 141 deletions
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 3449a1f8c656..2168258fb2c3 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -13,6 +13,8 @@
13 * 13 *
14 */ 14 */
15 15
16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17
16#include <linux/module.h> 18#include <linux/module.h>
17#include <linux/slab.h> 19#include <linux/slab.h>
18#include <linux/mempool.h> 20#include <linux/mempool.h>
@@ -20,12 +22,14 @@
20#include <linux/virtio_ids.h> 22#include <linux/virtio_ids.h>
21#include <linux/virtio_config.h> 23#include <linux/virtio_config.h>
22#include <linux/virtio_scsi.h> 24#include <linux/virtio_scsi.h>
25#include <linux/cpu.h>
23#include <scsi/scsi_host.h> 26#include <scsi/scsi_host.h>
24#include <scsi/scsi_device.h> 27#include <scsi/scsi_device.h>
25#include <scsi/scsi_cmnd.h> 28#include <scsi/scsi_cmnd.h>
26 29
27#define VIRTIO_SCSI_MEMPOOL_SZ 64 30#define VIRTIO_SCSI_MEMPOOL_SZ 64
28#define VIRTIO_SCSI_EVENT_LEN 8 31#define VIRTIO_SCSI_EVENT_LEN 8
32#define VIRTIO_SCSI_VQ_BASE 2
29 33
30/* Command queue element */ 34/* Command queue element */
31struct virtio_scsi_cmd { 35struct virtio_scsi_cmd {
@@ -57,27 +61,61 @@ struct virtio_scsi_vq {
57 struct virtqueue *vq; 61 struct virtqueue *vq;
58}; 62};
59 63
60/* Per-target queue state */ 64/*
65 * Per-target queue state.
66 *
67 * This struct holds the data needed by the queue steering policy. When a
68 * target is sent multiple requests, we need to drive them to the same queue so
69 * that FIFO processing order is kept. However, if a target was idle, we can
70 * choose a queue arbitrarily. In this case the queue is chosen according to
71 * the current VCPU, so the driver expects the number of request queues to be
72 * equal to the number of VCPUs. This makes it easy and fast to select the
73 * queue, and also lets the driver optimize the IRQ affinity for the virtqueues
74 * (each virtqueue's affinity is set to the CPU that "owns" the queue).
75 *
76 * An interesting effect of this policy is that only writes to req_vq need to
77 * take the tgt_lock. Read can be done outside the lock because:
78 *
79 * - writes of req_vq only occur when atomic_inc_return(&tgt->reqs) returns 1.
80 * In that case, no other CPU is reading req_vq: even if they were in
81 * virtscsi_queuecommand_multi, they would be spinning on tgt_lock.
82 *
83 * - reads of req_vq only occur when the target is not idle (reqs != 0).
84 * A CPU that enters virtscsi_queuecommand_multi will not modify req_vq.
85 *
86 * Similarly, decrements of reqs are never concurrent with writes of req_vq.
87 * Thus they can happen outside the tgt_lock, provided of course we make reqs
88 * an atomic_t.
89 */
61struct virtio_scsi_target_state { 90struct virtio_scsi_target_state {
62 /* Protects sg. Lock hierarchy is tgt_lock -> vq_lock. */ 91 /* This spinlock never held at the same time as vq_lock. */
63 spinlock_t tgt_lock; 92 spinlock_t tgt_lock;
64 93
65 /* For sglist construction when adding commands to the virtqueue. */ 94 /* Count of outstanding requests. */
66 struct scatterlist sg[]; 95 atomic_t reqs;
96
97 /* Currently active virtqueue for requests sent to this target. */
98 struct virtio_scsi_vq *req_vq;
67}; 99};
68 100
69/* Driver instance state */ 101/* Driver instance state */
70struct virtio_scsi { 102struct virtio_scsi {
71 struct virtio_device *vdev; 103 struct virtio_device *vdev;
72 104
73 struct virtio_scsi_vq ctrl_vq;
74 struct virtio_scsi_vq event_vq;
75 struct virtio_scsi_vq req_vq;
76
77 /* Get some buffers ready for event vq */ 105 /* Get some buffers ready for event vq */
78 struct virtio_scsi_event_node event_list[VIRTIO_SCSI_EVENT_LEN]; 106 struct virtio_scsi_event_node event_list[VIRTIO_SCSI_EVENT_LEN];
79 107
80 struct virtio_scsi_target_state *tgt[]; 108 u32 num_queues;
109
110 /* If the affinity hint is set for virtqueues */
111 bool affinity_hint_set;
112
113 /* CPU hotplug notifier */
114 struct notifier_block nb;
115
116 struct virtio_scsi_vq ctrl_vq;
117 struct virtio_scsi_vq event_vq;
118 struct virtio_scsi_vq req_vqs[];
81}; 119};
82 120
83static struct kmem_cache *virtscsi_cmd_cache; 121static struct kmem_cache *virtscsi_cmd_cache;
@@ -107,11 +145,13 @@ static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid)
107 * 145 *
108 * Called with vq_lock held. 146 * Called with vq_lock held.
109 */ 147 */
110static void virtscsi_complete_cmd(void *buf) 148static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
111{ 149{
112 struct virtio_scsi_cmd *cmd = buf; 150 struct virtio_scsi_cmd *cmd = buf;
113 struct scsi_cmnd *sc = cmd->sc; 151 struct scsi_cmnd *sc = cmd->sc;
114 struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd; 152 struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd;
153 struct virtio_scsi_target_state *tgt =
154 scsi_target(sc->device)->hostdata;
115 155
116 dev_dbg(&sc->device->sdev_gendev, 156 dev_dbg(&sc->device->sdev_gendev,
117 "cmd %p response %u status %#02x sense_len %u\n", 157 "cmd %p response %u status %#02x sense_len %u\n",
@@ -166,32 +206,71 @@ static void virtscsi_complete_cmd(void *buf)
166 206
167 mempool_free(cmd, virtscsi_cmd_pool); 207 mempool_free(cmd, virtscsi_cmd_pool);
168 sc->scsi_done(sc); 208 sc->scsi_done(sc);
209
210 atomic_dec(&tgt->reqs);
169} 211}
170 212
171static void virtscsi_vq_done(struct virtqueue *vq, void (*fn)(void *buf)) 213static void virtscsi_vq_done(struct virtio_scsi *vscsi,
214 struct virtio_scsi_vq *virtscsi_vq,
215 void (*fn)(struct virtio_scsi *vscsi, void *buf))
172{ 216{
173 void *buf; 217 void *buf;
174 unsigned int len; 218 unsigned int len;
219 unsigned long flags;
220 struct virtqueue *vq = virtscsi_vq->vq;
175 221
222 spin_lock_irqsave(&virtscsi_vq->vq_lock, flags);
176 do { 223 do {
177 virtqueue_disable_cb(vq); 224 virtqueue_disable_cb(vq);
178 while ((buf = virtqueue_get_buf(vq, &len)) != NULL) 225 while ((buf = virtqueue_get_buf(vq, &len)) != NULL)
179 fn(buf); 226 fn(vscsi, buf);
180 } while (!virtqueue_enable_cb(vq)); 227 } while (!virtqueue_enable_cb(vq));
228 spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags);
181} 229}
182 230
183static void virtscsi_req_done(struct virtqueue *vq) 231static void virtscsi_req_done(struct virtqueue *vq)
184{ 232{
185 struct Scsi_Host *sh = virtio_scsi_host(vq->vdev); 233 struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
186 struct virtio_scsi *vscsi = shost_priv(sh); 234 struct virtio_scsi *vscsi = shost_priv(sh);
187 unsigned long flags; 235 int index = vq->index - VIRTIO_SCSI_VQ_BASE;
236 struct virtio_scsi_vq *req_vq = &vscsi->req_vqs[index];
188 237
189 spin_lock_irqsave(&vscsi->req_vq.vq_lock, flags); 238 /*
190 virtscsi_vq_done(vq, virtscsi_complete_cmd); 239 * Read req_vq before decrementing the reqs field in
191 spin_unlock_irqrestore(&vscsi->req_vq.vq_lock, flags); 240 * virtscsi_complete_cmd.
241 *
242 * With barriers:
243 *
244 * CPU #0 virtscsi_queuecommand_multi (CPU #1)
245 * ------------------------------------------------------------
246 * lock vq_lock
247 * read req_vq
248 * read reqs (reqs = 1)
249 * write reqs (reqs = 0)
250 * increment reqs (reqs = 1)
251 * write req_vq
252 *
253 * Possible reordering without barriers:
254 *
255 * CPU #0 virtscsi_queuecommand_multi (CPU #1)
256 * ------------------------------------------------------------
257 * lock vq_lock
258 * read reqs (reqs = 1)
259 * write reqs (reqs = 0)
260 * increment reqs (reqs = 1)
261 * write req_vq
262 * read (wrong) req_vq
263 *
264 * We do not need a full smp_rmb, because req_vq is required to get
265 * to tgt->reqs: tgt is &vscsi->tgt[sc->device->id], where sc is stored
266 * in the virtqueue as the user token.
267 */
268 smp_read_barrier_depends();
269
270 virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd);
192}; 271};
193 272
194static void virtscsi_complete_free(void *buf) 273static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf)
195{ 274{
196 struct virtio_scsi_cmd *cmd = buf; 275 struct virtio_scsi_cmd *cmd = buf;
197 276
@@ -205,11 +284,8 @@ static void virtscsi_ctrl_done(struct virtqueue *vq)
205{ 284{
206 struct Scsi_Host *sh = virtio_scsi_host(vq->vdev); 285 struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
207 struct virtio_scsi *vscsi = shost_priv(sh); 286 struct virtio_scsi *vscsi = shost_priv(sh);
208 unsigned long flags;
209 287
210 spin_lock_irqsave(&vscsi->ctrl_vq.vq_lock, flags); 288 virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free);
211 virtscsi_vq_done(vq, virtscsi_complete_free);
212 spin_unlock_irqrestore(&vscsi->ctrl_vq.vq_lock, flags);
213}; 289};
214 290
215static int virtscsi_kick_event(struct virtio_scsi *vscsi, 291static int virtscsi_kick_event(struct virtio_scsi *vscsi,
@@ -223,8 +299,8 @@ static int virtscsi_kick_event(struct virtio_scsi *vscsi,
223 299
224 spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags); 300 spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
225 301
226 err = virtqueue_add_buf(vscsi->event_vq.vq, &sg, 0, 1, event_node, 302 err = virtqueue_add_inbuf(vscsi->event_vq.vq, &sg, 1, event_node,
227 GFP_ATOMIC); 303 GFP_ATOMIC);
228 if (!err) 304 if (!err)
229 virtqueue_kick(vscsi->event_vq.vq); 305 virtqueue_kick(vscsi->event_vq.vq);
230 306
@@ -254,7 +330,7 @@ static void virtscsi_cancel_event_work(struct virtio_scsi *vscsi)
254} 330}
255 331
256static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi, 332static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi,
257 struct virtio_scsi_event *event) 333 struct virtio_scsi_event *event)
258{ 334{
259 struct scsi_device *sdev; 335 struct scsi_device *sdev;
260 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); 336 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
@@ -332,7 +408,7 @@ static void virtscsi_handle_event(struct work_struct *work)
332 virtscsi_kick_event(vscsi, event_node); 408 virtscsi_kick_event(vscsi, event_node);
333} 409}
334 410
335static void virtscsi_complete_event(void *buf) 411static void virtscsi_complete_event(struct virtio_scsi *vscsi, void *buf)
336{ 412{
337 struct virtio_scsi_event_node *event_node = buf; 413 struct virtio_scsi_event_node *event_node = buf;
338 414
@@ -344,82 +420,65 @@ static void virtscsi_event_done(struct virtqueue *vq)
344{ 420{
345 struct Scsi_Host *sh = virtio_scsi_host(vq->vdev); 421 struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
346 struct virtio_scsi *vscsi = shost_priv(sh); 422 struct virtio_scsi *vscsi = shost_priv(sh);
347 unsigned long flags;
348 423
349 spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags); 424 virtscsi_vq_done(vscsi, &vscsi->event_vq, virtscsi_complete_event);
350 virtscsi_vq_done(vq, virtscsi_complete_event);
351 spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags);
352}; 425};
353 426
354static void virtscsi_map_sgl(struct scatterlist *sg, unsigned int *p_idx,
355 struct scsi_data_buffer *sdb)
356{
357 struct sg_table *table = &sdb->table;
358 struct scatterlist *sg_elem;
359 unsigned int idx = *p_idx;
360 int i;
361
362 for_each_sg(table->sgl, sg_elem, table->nents, i)
363 sg[idx++] = *sg_elem;
364
365 *p_idx = idx;
366}
367
368/** 427/**
369 * virtscsi_map_cmd - map a scsi_cmd to a virtqueue scatterlist 428 * virtscsi_add_cmd - add a virtio_scsi_cmd to a virtqueue
370 * @vscsi : virtio_scsi state 429 * @vq : the struct virtqueue we're talking about
371 * @cmd : command structure 430 * @cmd : command structure
372 * @out_num : number of read-only elements
373 * @in_num : number of write-only elements
374 * @req_size : size of the request buffer 431 * @req_size : size of the request buffer
375 * @resp_size : size of the response buffer 432 * @resp_size : size of the response buffer
376 * 433 * @gfp : flags to use for memory allocations
377 * Called with tgt_lock held.
378 */ 434 */
379static void virtscsi_map_cmd(struct virtio_scsi_target_state *tgt, 435static int virtscsi_add_cmd(struct virtqueue *vq,
380 struct virtio_scsi_cmd *cmd, 436 struct virtio_scsi_cmd *cmd,
381 unsigned *out_num, unsigned *in_num, 437 size_t req_size, size_t resp_size, gfp_t gfp)
382 size_t req_size, size_t resp_size)
383{ 438{
384 struct scsi_cmnd *sc = cmd->sc; 439 struct scsi_cmnd *sc = cmd->sc;
385 struct scatterlist *sg = tgt->sg; 440 struct scatterlist *sgs[4], req, resp;
386 unsigned int idx = 0; 441 struct sg_table *out, *in;
442 unsigned out_num = 0, in_num = 0;
443
444 out = in = NULL;
445
446 if (sc && sc->sc_data_direction != DMA_NONE) {
447 if (sc->sc_data_direction != DMA_FROM_DEVICE)
448 out = &scsi_out(sc)->table;
449 if (sc->sc_data_direction != DMA_TO_DEVICE)
450 in = &scsi_in(sc)->table;
451 }
387 452
388 /* Request header. */ 453 /* Request header. */
389 sg_set_buf(&sg[idx++], &cmd->req, req_size); 454 sg_init_one(&req, &cmd->req, req_size);
455 sgs[out_num++] = &req;
390 456
391 /* Data-out buffer. */ 457 /* Data-out buffer. */
392 if (sc && sc->sc_data_direction != DMA_FROM_DEVICE) 458 if (out)
393 virtscsi_map_sgl(sg, &idx, scsi_out(sc)); 459 sgs[out_num++] = out->sgl;
394
395 *out_num = idx;
396 460
397 /* Response header. */ 461 /* Response header. */
398 sg_set_buf(&sg[idx++], &cmd->resp, resp_size); 462 sg_init_one(&resp, &cmd->resp, resp_size);
463 sgs[out_num + in_num++] = &resp;
399 464
400 /* Data-in buffer */ 465 /* Data-in buffer */
401 if (sc && sc->sc_data_direction != DMA_TO_DEVICE) 466 if (in)
402 virtscsi_map_sgl(sg, &idx, scsi_in(sc)); 467 sgs[out_num + in_num++] = in->sgl;
403 468
404 *in_num = idx - *out_num; 469 return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, gfp);
405} 470}
406 471
407static int virtscsi_kick_cmd(struct virtio_scsi_target_state *tgt, 472static int virtscsi_kick_cmd(struct virtio_scsi_vq *vq,
408 struct virtio_scsi_vq *vq,
409 struct virtio_scsi_cmd *cmd, 473 struct virtio_scsi_cmd *cmd,
410 size_t req_size, size_t resp_size, gfp_t gfp) 474 size_t req_size, size_t resp_size, gfp_t gfp)
411{ 475{
412 unsigned int out_num, in_num;
413 unsigned long flags; 476 unsigned long flags;
414 int err; 477 int err;
415 bool needs_kick = false; 478 bool needs_kick = false;
416 479
417 spin_lock_irqsave(&tgt->tgt_lock, flags); 480 spin_lock_irqsave(&vq->vq_lock, flags);
418 virtscsi_map_cmd(tgt, cmd, &out_num, &in_num, req_size, resp_size); 481 err = virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size, gfp);
419
420 spin_lock(&vq->vq_lock);
421 err = virtqueue_add_buf(vq->vq, tgt->sg, out_num, in_num, cmd, gfp);
422 spin_unlock(&tgt->tgt_lock);
423 if (!err) 482 if (!err)
424 needs_kick = virtqueue_kick_prepare(vq->vq); 483 needs_kick = virtqueue_kick_prepare(vq->vq);
425 484
@@ -430,10 +489,10 @@ static int virtscsi_kick_cmd(struct virtio_scsi_target_state *tgt,
430 return err; 489 return err;
431} 490}
432 491
433static int virtscsi_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc) 492static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
493 struct virtio_scsi_vq *req_vq,
494 struct scsi_cmnd *sc)
434{ 495{
435 struct virtio_scsi *vscsi = shost_priv(sh);
436 struct virtio_scsi_target_state *tgt = vscsi->tgt[sc->device->id];
437 struct virtio_scsi_cmd *cmd; 496 struct virtio_scsi_cmd *cmd;
438 int ret; 497 int ret;
439 498
@@ -467,7 +526,7 @@ static int virtscsi_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
467 BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE); 526 BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE);
468 memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len); 527 memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
469 528
470 if (virtscsi_kick_cmd(tgt, &vscsi->req_vq, cmd, 529 if (virtscsi_kick_cmd(req_vq, cmd,
471 sizeof cmd->req.cmd, sizeof cmd->resp.cmd, 530 sizeof cmd->req.cmd, sizeof cmd->resp.cmd,
472 GFP_ATOMIC) == 0) 531 GFP_ATOMIC) == 0)
473 ret = 0; 532 ret = 0;
@@ -478,14 +537,62 @@ out:
478 return ret; 537 return ret;
479} 538}
480 539
540static int virtscsi_queuecommand_single(struct Scsi_Host *sh,
541 struct scsi_cmnd *sc)
542{
543 struct virtio_scsi *vscsi = shost_priv(sh);
544 struct virtio_scsi_target_state *tgt =
545 scsi_target(sc->device)->hostdata;
546
547 atomic_inc(&tgt->reqs);
548 return virtscsi_queuecommand(vscsi, &vscsi->req_vqs[0], sc);
549}
550
551static struct virtio_scsi_vq *virtscsi_pick_vq(struct virtio_scsi *vscsi,
552 struct virtio_scsi_target_state *tgt)
553{
554 struct virtio_scsi_vq *vq;
555 unsigned long flags;
556 u32 queue_num;
557
558 spin_lock_irqsave(&tgt->tgt_lock, flags);
559
560 /*
561 * The memory barrier after atomic_inc_return matches
562 * the smp_read_barrier_depends() in virtscsi_req_done.
563 */
564 if (atomic_inc_return(&tgt->reqs) > 1)
565 vq = ACCESS_ONCE(tgt->req_vq);
566 else {
567 queue_num = smp_processor_id();
568 while (unlikely(queue_num >= vscsi->num_queues))
569 queue_num -= vscsi->num_queues;
570
571 tgt->req_vq = vq = &vscsi->req_vqs[queue_num];
572 }
573
574 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
575 return vq;
576}
577
578static int virtscsi_queuecommand_multi(struct Scsi_Host *sh,
579 struct scsi_cmnd *sc)
580{
581 struct virtio_scsi *vscsi = shost_priv(sh);
582 struct virtio_scsi_target_state *tgt =
583 scsi_target(sc->device)->hostdata;
584 struct virtio_scsi_vq *req_vq = virtscsi_pick_vq(vscsi, tgt);
585
586 return virtscsi_queuecommand(vscsi, req_vq, sc);
587}
588
481static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd) 589static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
482{ 590{
483 DECLARE_COMPLETION_ONSTACK(comp); 591 DECLARE_COMPLETION_ONSTACK(comp);
484 struct virtio_scsi_target_state *tgt = vscsi->tgt[cmd->sc->device->id];
485 int ret = FAILED; 592 int ret = FAILED;
486 593
487 cmd->comp = &comp; 594 cmd->comp = &comp;
488 if (virtscsi_kick_cmd(tgt, &vscsi->ctrl_vq, cmd, 595 if (virtscsi_kick_cmd(&vscsi->ctrl_vq, cmd,
489 sizeof cmd->req.tmf, sizeof cmd->resp.tmf, 596 sizeof cmd->req.tmf, sizeof cmd->resp.tmf,
490 GFP_NOIO) < 0) 597 GFP_NOIO) < 0)
491 goto out; 598 goto out;
@@ -547,18 +654,57 @@ static int virtscsi_abort(struct scsi_cmnd *sc)
547 return virtscsi_tmf(vscsi, cmd); 654 return virtscsi_tmf(vscsi, cmd);
548} 655}
549 656
550static struct scsi_host_template virtscsi_host_template = { 657static int virtscsi_target_alloc(struct scsi_target *starget)
658{
659 struct virtio_scsi_target_state *tgt =
660 kmalloc(sizeof(*tgt), GFP_KERNEL);
661 if (!tgt)
662 return -ENOMEM;
663
664 spin_lock_init(&tgt->tgt_lock);
665 atomic_set(&tgt->reqs, 0);
666 tgt->req_vq = NULL;
667
668 starget->hostdata = tgt;
669 return 0;
670}
671
672static void virtscsi_target_destroy(struct scsi_target *starget)
673{
674 struct virtio_scsi_target_state *tgt = starget->hostdata;
675 kfree(tgt);
676}
677
678static struct scsi_host_template virtscsi_host_template_single = {
679 .module = THIS_MODULE,
680 .name = "Virtio SCSI HBA",
681 .proc_name = "virtio_scsi",
682 .this_id = -1,
683 .queuecommand = virtscsi_queuecommand_single,
684 .eh_abort_handler = virtscsi_abort,
685 .eh_device_reset_handler = virtscsi_device_reset,
686
687 .can_queue = 1024,
688 .dma_boundary = UINT_MAX,
689 .use_clustering = ENABLE_CLUSTERING,
690 .target_alloc = virtscsi_target_alloc,
691 .target_destroy = virtscsi_target_destroy,
692};
693
694static struct scsi_host_template virtscsi_host_template_multi = {
551 .module = THIS_MODULE, 695 .module = THIS_MODULE,
552 .name = "Virtio SCSI HBA", 696 .name = "Virtio SCSI HBA",
553 .proc_name = "virtio_scsi", 697 .proc_name = "virtio_scsi",
554 .queuecommand = virtscsi_queuecommand,
555 .this_id = -1, 698 .this_id = -1,
699 .queuecommand = virtscsi_queuecommand_multi,
556 .eh_abort_handler = virtscsi_abort, 700 .eh_abort_handler = virtscsi_abort,
557 .eh_device_reset_handler = virtscsi_device_reset, 701 .eh_device_reset_handler = virtscsi_device_reset,
558 702
559 .can_queue = 1024, 703 .can_queue = 1024,
560 .dma_boundary = UINT_MAX, 704 .dma_boundary = UINT_MAX,
561 .use_clustering = ENABLE_CLUSTERING, 705 .use_clustering = ENABLE_CLUSTERING,
706 .target_alloc = virtscsi_target_alloc,
707 .target_destroy = virtscsi_target_destroy,
562}; 708};
563 709
564#define virtscsi_config_get(vdev, fld) \ 710#define virtscsi_config_get(vdev, fld) \
@@ -578,29 +724,69 @@ static struct scsi_host_template virtscsi_host_template = {
578 &__val, sizeof(__val)); \ 724 &__val, sizeof(__val)); \
579 }) 725 })
580 726
581static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq, 727static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)
582 struct virtqueue *vq)
583{ 728{
584 spin_lock_init(&virtscsi_vq->vq_lock); 729 int i;
585 virtscsi_vq->vq = vq; 730 int cpu;
731
732 /* In multiqueue mode, when the number of cpu is equal
733 * to the number of request queues, we let the qeueues
734 * to be private to one cpu by setting the affinity hint
735 * to eliminate the contention.
736 */
737 if ((vscsi->num_queues == 1 ||
738 vscsi->num_queues != num_online_cpus()) && affinity) {
739 if (vscsi->affinity_hint_set)
740 affinity = false;
741 else
742 return;
743 }
744
745 if (affinity) {
746 i = 0;
747 for_each_online_cpu(cpu) {
748 virtqueue_set_affinity(vscsi->req_vqs[i].vq, cpu);
749 i++;
750 }
751
752 vscsi->affinity_hint_set = true;
753 } else {
754 for (i = 0; i < vscsi->num_queues - VIRTIO_SCSI_VQ_BASE; i++)
755 virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1);
756
757 vscsi->affinity_hint_set = false;
758 }
586} 759}
587 760
588static struct virtio_scsi_target_state *virtscsi_alloc_tgt( 761static void virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)
589 struct virtio_device *vdev, int sg_elems)
590{ 762{
591 struct virtio_scsi_target_state *tgt; 763 get_online_cpus();
592 gfp_t gfp_mask = GFP_KERNEL; 764 __virtscsi_set_affinity(vscsi, affinity);
593 765 put_online_cpus();
594 /* We need extra sg elements at head and tail. */ 766}
595 tgt = kmalloc(sizeof(*tgt) + sizeof(tgt->sg[0]) * (sg_elems + 2),
596 gfp_mask);
597 767
598 if (!tgt) 768static int virtscsi_cpu_callback(struct notifier_block *nfb,
599 return NULL; 769 unsigned long action, void *hcpu)
770{
771 struct virtio_scsi *vscsi = container_of(nfb, struct virtio_scsi, nb);
772 switch(action) {
773 case CPU_ONLINE:
774 case CPU_ONLINE_FROZEN:
775 case CPU_DEAD:
776 case CPU_DEAD_FROZEN:
777 __virtscsi_set_affinity(vscsi, true);
778 break;
779 default:
780 break;
781 }
782 return NOTIFY_OK;
783}
600 784
601 spin_lock_init(&tgt->tgt_lock); 785static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq,
602 sg_init_table(tgt->sg, sg_elems + 2); 786 struct virtqueue *vq)
603 return tgt; 787{
788 spin_lock_init(&virtscsi_vq->vq_lock);
789 virtscsi_vq->vq = vq;
604} 790}
605 791
606static void virtscsi_scan(struct virtio_device *vdev) 792static void virtscsi_scan(struct virtio_device *vdev)
@@ -614,46 +800,56 @@ static void virtscsi_remove_vqs(struct virtio_device *vdev)
614{ 800{
615 struct Scsi_Host *sh = virtio_scsi_host(vdev); 801 struct Scsi_Host *sh = virtio_scsi_host(vdev);
616 struct virtio_scsi *vscsi = shost_priv(sh); 802 struct virtio_scsi *vscsi = shost_priv(sh);
617 u32 i, num_targets; 803
804 virtscsi_set_affinity(vscsi, false);
618 805
619 /* Stop all the virtqueues. */ 806 /* Stop all the virtqueues. */
620 vdev->config->reset(vdev); 807 vdev->config->reset(vdev);
621 808
622 num_targets = sh->max_id;
623 for (i = 0; i < num_targets; i++) {
624 kfree(vscsi->tgt[i]);
625 vscsi->tgt[i] = NULL;
626 }
627
628 vdev->config->del_vqs(vdev); 809 vdev->config->del_vqs(vdev);
629} 810}
630 811
631static int virtscsi_init(struct virtio_device *vdev, 812static int virtscsi_init(struct virtio_device *vdev,
632 struct virtio_scsi *vscsi, int num_targets) 813 struct virtio_scsi *vscsi)
633{ 814{
634 int err; 815 int err;
635 struct virtqueue *vqs[3]; 816 u32 i;
636 u32 i, sg_elems; 817 u32 num_vqs;
818 vq_callback_t **callbacks;
819 const char **names;
820 struct virtqueue **vqs;
821
822 num_vqs = vscsi->num_queues + VIRTIO_SCSI_VQ_BASE;
823 vqs = kmalloc(num_vqs * sizeof(struct virtqueue *), GFP_KERNEL);
824 callbacks = kmalloc(num_vqs * sizeof(vq_callback_t *), GFP_KERNEL);
825 names = kmalloc(num_vqs * sizeof(char *), GFP_KERNEL);
826
827 if (!callbacks || !vqs || !names) {
828 err = -ENOMEM;
829 goto out;
830 }
637 831
638 vq_callback_t *callbacks[] = { 832 callbacks[0] = virtscsi_ctrl_done;
639 virtscsi_ctrl_done, 833 callbacks[1] = virtscsi_event_done;
640 virtscsi_event_done, 834 names[0] = "control";
641 virtscsi_req_done 835 names[1] = "event";
642 }; 836 for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++) {
643 const char *names[] = { 837 callbacks[i] = virtscsi_req_done;
644 "control", 838 names[i] = "request";
645 "event", 839 }
646 "request"
647 };
648 840
649 /* Discover virtqueues and write information to configuration. */ 841 /* Discover virtqueues and write information to configuration. */
650 err = vdev->config->find_vqs(vdev, 3, vqs, callbacks, names); 842 err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names);
651 if (err) 843 if (err)
652 return err; 844 goto out;
653 845
654 virtscsi_init_vq(&vscsi->ctrl_vq, vqs[0]); 846 virtscsi_init_vq(&vscsi->ctrl_vq, vqs[0]);
655 virtscsi_init_vq(&vscsi->event_vq, vqs[1]); 847 virtscsi_init_vq(&vscsi->event_vq, vqs[1]);
656 virtscsi_init_vq(&vscsi->req_vq, vqs[2]); 848 for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++)
849 virtscsi_init_vq(&vscsi->req_vqs[i - VIRTIO_SCSI_VQ_BASE],
850 vqs[i]);
851
852 virtscsi_set_affinity(vscsi, true);
657 853
658 virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE); 854 virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE);
659 virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE); 855 virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE);
@@ -661,19 +857,12 @@ static int virtscsi_init(struct virtio_device *vdev,
661 if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) 857 if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
662 virtscsi_kick_event_all(vscsi); 858 virtscsi_kick_event_all(vscsi);
663 859
664 /* We need to know how many segments before we allocate. */
665 sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1;
666
667 for (i = 0; i < num_targets; i++) {
668 vscsi->tgt[i] = virtscsi_alloc_tgt(vdev, sg_elems);
669 if (!vscsi->tgt[i]) {
670 err = -ENOMEM;
671 goto out;
672 }
673 }
674 err = 0; 860 err = 0;
675 861
676out: 862out:
863 kfree(names);
864 kfree(callbacks);
865 kfree(vqs);
677 if (err) 866 if (err)
678 virtscsi_remove_vqs(vdev); 867 virtscsi_remove_vqs(vdev);
679 return err; 868 return err;
@@ -686,13 +875,21 @@ static int virtscsi_probe(struct virtio_device *vdev)
686 int err; 875 int err;
687 u32 sg_elems, num_targets; 876 u32 sg_elems, num_targets;
688 u32 cmd_per_lun; 877 u32 cmd_per_lun;
878 u32 num_queues;
879 struct scsi_host_template *hostt;
880
881 /* We need to know how many queues before we allocate. */
882 num_queues = virtscsi_config_get(vdev, num_queues) ? : 1;
689 883
690 /* Allocate memory and link the structs together. */
691 num_targets = virtscsi_config_get(vdev, max_target) + 1; 884 num_targets = virtscsi_config_get(vdev, max_target) + 1;
692 shost = scsi_host_alloc(&virtscsi_host_template,
693 sizeof(*vscsi)
694 + num_targets * sizeof(struct virtio_scsi_target_state));
695 885
886 if (num_queues == 1)
887 hostt = &virtscsi_host_template_single;
888 else
889 hostt = &virtscsi_host_template_multi;
890
891 shost = scsi_host_alloc(hostt,
892 sizeof(*vscsi) + sizeof(vscsi->req_vqs[0]) * num_queues);
696 if (!shost) 893 if (!shost)
697 return -ENOMEM; 894 return -ENOMEM;
698 895
@@ -700,12 +897,20 @@ static int virtscsi_probe(struct virtio_device *vdev)
700 shost->sg_tablesize = sg_elems; 897 shost->sg_tablesize = sg_elems;
701 vscsi = shost_priv(shost); 898 vscsi = shost_priv(shost);
702 vscsi->vdev = vdev; 899 vscsi->vdev = vdev;
900 vscsi->num_queues = num_queues;
703 vdev->priv = shost; 901 vdev->priv = shost;
704 902
705 err = virtscsi_init(vdev, vscsi, num_targets); 903 err = virtscsi_init(vdev, vscsi);
706 if (err) 904 if (err)
707 goto virtscsi_init_failed; 905 goto virtscsi_init_failed;
708 906
907 vscsi->nb.notifier_call = &virtscsi_cpu_callback;
908 err = register_hotcpu_notifier(&vscsi->nb);
909 if (err) {
910 pr_err("registering cpu notifier failed\n");
911 goto scsi_add_host_failed;
912 }
913
709 cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1; 914 cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1;
710 shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue); 915 shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue);
711 shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF; 916 shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF;
@@ -743,6 +948,8 @@ static void virtscsi_remove(struct virtio_device *vdev)
743 948
744 scsi_remove_host(shost); 949 scsi_remove_host(shost);
745 950
951 unregister_hotcpu_notifier(&vscsi->nb);
952
746 virtscsi_remove_vqs(vdev); 953 virtscsi_remove_vqs(vdev);
747 scsi_host_put(shost); 954 scsi_host_put(shost);
748} 955}
@@ -759,7 +966,7 @@ static int virtscsi_restore(struct virtio_device *vdev)
759 struct Scsi_Host *sh = virtio_scsi_host(vdev); 966 struct Scsi_Host *sh = virtio_scsi_host(vdev);
760 struct virtio_scsi *vscsi = shost_priv(sh); 967 struct virtio_scsi *vscsi = shost_priv(sh);
761 968
762 return virtscsi_init(vdev, vscsi, sh->max_id); 969 return virtscsi_init(vdev, vscsi);
763} 970}
764#endif 971#endif
765 972
@@ -794,8 +1001,7 @@ static int __init init(void)
794 1001
795 virtscsi_cmd_cache = KMEM_CACHE(virtio_scsi_cmd, 0); 1002 virtscsi_cmd_cache = KMEM_CACHE(virtio_scsi_cmd, 0);
796 if (!virtscsi_cmd_cache) { 1003 if (!virtscsi_cmd_cache) {
797 printk(KERN_ERR "kmem_cache_create() for " 1004 pr_err("kmem_cache_create() for virtscsi_cmd_cache failed\n");
798 "virtscsi_cmd_cache failed\n");
799 goto error; 1005 goto error;
800 } 1006 }
801 1007
@@ -804,8 +1010,7 @@ static int __init init(void)
804 mempool_create_slab_pool(VIRTIO_SCSI_MEMPOOL_SZ, 1010 mempool_create_slab_pool(VIRTIO_SCSI_MEMPOOL_SZ,
805 virtscsi_cmd_cache); 1011 virtscsi_cmd_cache);
806 if (!virtscsi_cmd_pool) { 1012 if (!virtscsi_cmd_pool) {
807 printk(KERN_ERR "mempool_create() for" 1013 pr_err("mempool_create() for virtscsi_cmd_pool failed\n");
808 "virtscsi_cmd_pool failed\n");
809 goto error; 1014 goto error;
810 } 1015 }
811 ret = register_virtio_driver(&virtio_scsi_driver); 1016 ret = register_virtio_driver(&virtio_scsi_driver);