aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJuergen Gross <jgross@suse.com>2016-12-02 01:15:45 -0500
committerJuergen Gross <jgross@suse.com>2016-12-09 04:59:13 -0500
commit3da96be58f2c8aaa86cfe78b16f837e610dfcfe2 (patch)
treeadbccc218d18d3473e39f121c97442f2f3a5ff67
parent738662c35c491fc360bb6adcb8a0db88d87b5d88 (diff)
xen/scsifront: don't request a slot on the ring until request is ready
Instead of requesting a new slot on the ring to the backend early, do so only after all has been setup for the request to be sent. This makes error handling easier as we don't need to undo the request id allocation and ring slot allocation. Suggested-by: Jan Beulich <jbeulich@suse.com> Signed-off-by: Juergen Gross <jgross@suse.com> Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> Signed-off-by: Juergen Gross <jgross@suse.com>
-rw-r--r--drivers/scsi/xen-scsifront.c188
1 files changed, 83 insertions, 105 deletions
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
index c01316c6c7b1..9aa1fe1fc939 100644
--- a/drivers/scsi/xen-scsifront.c
+++ b/drivers/scsi/xen-scsifront.c
@@ -79,10 +79,13 @@
79struct vscsifrnt_shadow { 79struct vscsifrnt_shadow {
80 /* command between backend and frontend */ 80 /* command between backend and frontend */
81 unsigned char act; 81 unsigned char act;
82 uint8_t nr_segments;
82 uint16_t rqid; 83 uint16_t rqid;
84 uint16_t ref_rqid;
83 85
84 unsigned int nr_grants; /* number of grants in gref[] */ 86 unsigned int nr_grants; /* number of grants in gref[] */
85 struct scsiif_request_segment *sg; /* scatter/gather elements */ 87 struct scsiif_request_segment *sg; /* scatter/gather elements */
88 struct scsiif_request_segment seg[VSCSIIF_SG_TABLESIZE];
86 89
87 /* Do reset or abort function. */ 90 /* Do reset or abort function. */
88 wait_queue_head_t wq_reset; /* reset work queue */ 91 wait_queue_head_t wq_reset; /* reset work queue */
@@ -172,68 +175,90 @@ static void scsifront_put_rqid(struct vscsifrnt_info *info, uint32_t id)
172 scsifront_wake_up(info); 175 scsifront_wake_up(info);
173} 176}
174 177
175static struct vscsiif_request *scsifront_pre_req(struct vscsifrnt_info *info) 178static int scsifront_do_request(struct vscsifrnt_info *info,
179 struct vscsifrnt_shadow *shadow)
176{ 180{
177 struct vscsiif_front_ring *ring = &(info->ring); 181 struct vscsiif_front_ring *ring = &(info->ring);
178 struct vscsiif_request *ring_req; 182 struct vscsiif_request *ring_req;
183 struct scsi_cmnd *sc = shadow->sc;
179 uint32_t id; 184 uint32_t id;
185 int i, notify;
186
187 if (RING_FULL(&info->ring))
188 return -EBUSY;
180 189
181 id = scsifront_get_rqid(info); /* use id in response */ 190 id = scsifront_get_rqid(info); /* use id in response */
182 if (id >= VSCSIIF_MAX_REQS) 191 if (id >= VSCSIIF_MAX_REQS)
183 return NULL; 192 return -EBUSY;
184 193
185 ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt); 194 info->shadow[id] = shadow;
195 shadow->rqid = id;
186 196
197 ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt);
187 ring->req_prod_pvt++; 198 ring->req_prod_pvt++;
188 199
189 ring_req->rqid = (uint16_t)id; 200 ring_req->rqid = id;
201 ring_req->act = shadow->act;
202 ring_req->ref_rqid = shadow->ref_rqid;
203 ring_req->nr_segments = shadow->nr_segments;
190 204
191 return ring_req; 205 ring_req->id = sc->device->id;
192} 206 ring_req->lun = sc->device->lun;
207 ring_req->channel = sc->device->channel;
208 ring_req->cmd_len = sc->cmd_len;
193 209
194static void scsifront_do_request(struct vscsifrnt_info *info) 210 BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE);
195{ 211
196 struct vscsiif_front_ring *ring = &(info->ring); 212 memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len);
197 int notify; 213
214 ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction;
215 ring_req->timeout_per_command = sc->request->timeout / HZ;
216
217 for (i = 0; i < (shadow->nr_segments & ~VSCSIIF_SG_GRANT); i++)
218 ring_req->seg[i] = shadow->seg[i];
198 219
199 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify); 220 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify);
200 if (notify) 221 if (notify)
201 notify_remote_via_irq(info->irq); 222 notify_remote_via_irq(info->irq);
223
224 return 0;
202} 225}
203 226
204static void scsifront_gnttab_done(struct vscsifrnt_info *info, uint32_t id) 227static void scsifront_gnttab_done(struct vscsifrnt_info *info,
228 struct vscsifrnt_shadow *shadow)
205{ 229{
206 struct vscsifrnt_shadow *s = info->shadow[id];
207 int i; 230 int i;
208 231
209 if (s->sc->sc_data_direction == DMA_NONE) 232 if (shadow->sc->sc_data_direction == DMA_NONE)
210 return; 233 return;
211 234
212 for (i = 0; i < s->nr_grants; i++) { 235 for (i = 0; i < shadow->nr_grants; i++) {
213 if (unlikely(gnttab_query_foreign_access(s->gref[i]) != 0)) { 236 if (unlikely(gnttab_query_foreign_access(shadow->gref[i]))) {
214 shost_printk(KERN_ALERT, info->host, KBUILD_MODNAME 237 shost_printk(KERN_ALERT, info->host, KBUILD_MODNAME
215 "grant still in use by backend\n"); 238 "grant still in use by backend\n");
216 BUG(); 239 BUG();
217 } 240 }
218 gnttab_end_foreign_access(s->gref[i], 0, 0UL); 241 gnttab_end_foreign_access(shadow->gref[i], 0, 0UL);
219 } 242 }
220 243
221 kfree(s->sg); 244 kfree(shadow->sg);
222} 245}
223 246
224static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info, 247static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info,
225 struct vscsiif_response *ring_rsp) 248 struct vscsiif_response *ring_rsp)
226{ 249{
250 struct vscsifrnt_shadow *shadow;
227 struct scsi_cmnd *sc; 251 struct scsi_cmnd *sc;
228 uint32_t id; 252 uint32_t id;
229 uint8_t sense_len; 253 uint8_t sense_len;
230 254
231 id = ring_rsp->rqid; 255 id = ring_rsp->rqid;
232 sc = info->shadow[id]->sc; 256 shadow = info->shadow[id];
257 sc = shadow->sc;
233 258
234 BUG_ON(sc == NULL); 259 BUG_ON(sc == NULL);
235 260
236 scsifront_gnttab_done(info, id); 261 scsifront_gnttab_done(info, shadow);
237 scsifront_put_rqid(info, id); 262 scsifront_put_rqid(info, id);
238 263
239 sc->result = ring_rsp->rslt; 264 sc->result = ring_rsp->rslt;
@@ -366,7 +391,6 @@ static void scsifront_finish_all(struct vscsifrnt_info *info)
366 391
367static int map_data_for_request(struct vscsifrnt_info *info, 392static int map_data_for_request(struct vscsifrnt_info *info,
368 struct scsi_cmnd *sc, 393 struct scsi_cmnd *sc,
369 struct vscsiif_request *ring_req,
370 struct vscsifrnt_shadow *shadow) 394 struct vscsifrnt_shadow *shadow)
371{ 395{
372 grant_ref_t gref_head; 396 grant_ref_t gref_head;
@@ -379,7 +403,6 @@ static int map_data_for_request(struct vscsifrnt_info *info,
379 struct scatterlist *sg; 403 struct scatterlist *sg;
380 struct scsiif_request_segment *seg; 404 struct scsiif_request_segment *seg;
381 405
382 ring_req->nr_segments = 0;
383 if (sc->sc_data_direction == DMA_NONE || !data_len) 406 if (sc->sc_data_direction == DMA_NONE || !data_len)
384 return 0; 407 return 0;
385 408
@@ -398,7 +421,7 @@ static int map_data_for_request(struct vscsifrnt_info *info,
398 if (!shadow->sg) 421 if (!shadow->sg)
399 return -ENOMEM; 422 return -ENOMEM;
400 } 423 }
401 seg = shadow->sg ? : ring_req->seg; 424 seg = shadow->sg ? : shadow->seg;
402 425
403 err = gnttab_alloc_grant_references(seg_grants + data_grants, 426 err = gnttab_alloc_grant_references(seg_grants + data_grants,
404 &gref_head); 427 &gref_head);
@@ -423,9 +446,9 @@ static int map_data_for_request(struct vscsifrnt_info *info,
423 info->dev->otherend_id, 446 info->dev->otherend_id,
424 xen_page_to_gfn(page), 1); 447 xen_page_to_gfn(page), 1);
425 shadow->gref[ref_cnt] = ref; 448 shadow->gref[ref_cnt] = ref;
426 ring_req->seg[ref_cnt].gref = ref; 449 shadow->seg[ref_cnt].gref = ref;
427 ring_req->seg[ref_cnt].offset = (uint16_t)off; 450 shadow->seg[ref_cnt].offset = (uint16_t)off;
428 ring_req->seg[ref_cnt].length = (uint16_t)bytes; 451 shadow->seg[ref_cnt].length = (uint16_t)bytes;
429 452
430 page++; 453 page++;
431 len -= bytes; 454 len -= bytes;
@@ -473,44 +496,14 @@ static int map_data_for_request(struct vscsifrnt_info *info,
473 } 496 }
474 497
475 if (seg_grants) 498 if (seg_grants)
476 ring_req->nr_segments = VSCSIIF_SG_GRANT | seg_grants; 499 shadow->nr_segments = VSCSIIF_SG_GRANT | seg_grants;
477 else 500 else
478 ring_req->nr_segments = (uint8_t)ref_cnt; 501 shadow->nr_segments = (uint8_t)ref_cnt;
479 shadow->nr_grants = ref_cnt; 502 shadow->nr_grants = ref_cnt;
480 503
481 return 0; 504 return 0;
482} 505}
483 506
484static struct vscsiif_request *scsifront_command2ring(
485 struct vscsifrnt_info *info, struct scsi_cmnd *sc,
486 struct vscsifrnt_shadow *shadow)
487{
488 struct vscsiif_request *ring_req;
489
490 memset(shadow, 0, sizeof(*shadow));
491
492 ring_req = scsifront_pre_req(info);
493 if (!ring_req)
494 return NULL;
495
496 info->shadow[ring_req->rqid] = shadow;
497 shadow->rqid = ring_req->rqid;
498
499 ring_req->id = sc->device->id;
500 ring_req->lun = sc->device->lun;
501 ring_req->channel = sc->device->channel;
502 ring_req->cmd_len = sc->cmd_len;
503
504 BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE);
505
506 memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len);
507
508 ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction;
509 ring_req->timeout_per_command = sc->request->timeout / HZ;
510
511 return ring_req;
512}
513
514static int scsifront_enter(struct vscsifrnt_info *info) 507static int scsifront_enter(struct vscsifrnt_info *info)
515{ 508{
516 if (info->pause) 509 if (info->pause)
@@ -536,36 +529,25 @@ static int scsifront_queuecommand(struct Scsi_Host *shost,
536 struct scsi_cmnd *sc) 529 struct scsi_cmnd *sc)
537{ 530{
538 struct vscsifrnt_info *info = shost_priv(shost); 531 struct vscsifrnt_info *info = shost_priv(shost);
539 struct vscsiif_request *ring_req;
540 struct vscsifrnt_shadow *shadow = scsi_cmd_priv(sc); 532 struct vscsifrnt_shadow *shadow = scsi_cmd_priv(sc);
541 unsigned long flags; 533 unsigned long flags;
542 int err; 534 int err;
543 uint16_t rqid; 535
536 sc->result = 0;
537 memset(shadow, 0, sizeof(*shadow));
538
539 shadow->sc = sc;
540 shadow->act = VSCSIIF_ACT_SCSI_CDB;
544 541
545 spin_lock_irqsave(shost->host_lock, flags); 542 spin_lock_irqsave(shost->host_lock, flags);
546 if (scsifront_enter(info)) { 543 if (scsifront_enter(info)) {
547 spin_unlock_irqrestore(shost->host_lock, flags); 544 spin_unlock_irqrestore(shost->host_lock, flags);
548 return SCSI_MLQUEUE_HOST_BUSY; 545 return SCSI_MLQUEUE_HOST_BUSY;
549 } 546 }
550 if (RING_FULL(&info->ring))
551 goto busy;
552 547
553 ring_req = scsifront_command2ring(info, sc, shadow); 548 err = map_data_for_request(info, sc, shadow);
554 if (!ring_req)
555 goto busy;
556
557 sc->result = 0;
558
559 rqid = ring_req->rqid;
560 ring_req->act = VSCSIIF_ACT_SCSI_CDB;
561
562 shadow->sc = sc;
563 shadow->act = VSCSIIF_ACT_SCSI_CDB;
564
565 err = map_data_for_request(info, sc, ring_req, shadow);
566 if (err < 0) { 549 if (err < 0) {
567 pr_debug("%s: err %d\n", __func__, err); 550 pr_debug("%s: err %d\n", __func__, err);
568 scsifront_put_rqid(info, rqid);
569 scsifront_return(info); 551 scsifront_return(info);
570 spin_unlock_irqrestore(shost->host_lock, flags); 552 spin_unlock_irqrestore(shost->host_lock, flags);
571 if (err == -ENOMEM) 553 if (err == -ENOMEM)
@@ -575,7 +557,11 @@ static int scsifront_queuecommand(struct Scsi_Host *shost,
575 return 0; 557 return 0;
576 } 558 }
577 559
578 scsifront_do_request(info); 560 if (scsifront_do_request(info, shadow)) {
561 scsifront_gnttab_done(info, shadow);
562 goto busy;
563 }
564
579 scsifront_return(info); 565 scsifront_return(info);
580 spin_unlock_irqrestore(shost->host_lock, flags); 566 spin_unlock_irqrestore(shost->host_lock, flags);
581 567
@@ -598,26 +584,30 @@ static int scsifront_action_handler(struct scsi_cmnd *sc, uint8_t act)
598 struct Scsi_Host *host = sc->device->host; 584 struct Scsi_Host *host = sc->device->host;
599 struct vscsifrnt_info *info = shost_priv(host); 585 struct vscsifrnt_info *info = shost_priv(host);
600 struct vscsifrnt_shadow *shadow, *s = scsi_cmd_priv(sc); 586 struct vscsifrnt_shadow *shadow, *s = scsi_cmd_priv(sc);
601 struct vscsiif_request *ring_req;
602 int err = 0; 587 int err = 0;
603 588
604 shadow = kmalloc(sizeof(*shadow), GFP_NOIO); 589 shadow = kzalloc(sizeof(*shadow), GFP_NOIO);
605 if (!shadow) 590 if (!shadow)
606 return FAILED; 591 return FAILED;
607 592
593 shadow->act = act;
594 shadow->rslt_reset = RSLT_RESET_WAITING;
595 shadow->sc = sc;
596 shadow->ref_rqid = s->rqid;
597 init_waitqueue_head(&shadow->wq_reset);
598
608 spin_lock_irq(host->host_lock); 599 spin_lock_irq(host->host_lock);
609 600
610 for (;;) { 601 for (;;) {
611 if (!RING_FULL(&info->ring)) { 602 if (scsifront_enter(info))
612 ring_req = scsifront_command2ring(info, sc, shadow); 603 goto fail;
613 if (ring_req) 604
614 break; 605 if (!scsifront_do_request(info, shadow))
615 } 606 break;
616 if (err || info->pause) { 607
617 spin_unlock_irq(host->host_lock); 608 scsifront_return(info);
618 kfree(shadow); 609 if (err)
619 return FAILED; 610 goto fail;
620 }
621 info->wait_ring_available = 1; 611 info->wait_ring_available = 1;
622 spin_unlock_irq(host->host_lock); 612 spin_unlock_irq(host->host_lock);
623 err = wait_event_interruptible(info->wq_sync, 613 err = wait_event_interruptible(info->wq_sync,
@@ -625,23 +615,6 @@ static int scsifront_action_handler(struct scsi_cmnd *sc, uint8_t act)
625 spin_lock_irq(host->host_lock); 615 spin_lock_irq(host->host_lock);
626 } 616 }
627 617
628 if (scsifront_enter(info)) {
629 spin_unlock_irq(host->host_lock);
630 kfree(shadow);
631 return FAILED;
632 }
633
634 ring_req->act = act;
635 ring_req->ref_rqid = s->rqid;
636
637 shadow->act = act;
638 shadow->rslt_reset = RSLT_RESET_WAITING;
639 init_waitqueue_head(&shadow->wq_reset);
640
641 ring_req->nr_segments = 0;
642
643 scsifront_do_request(info);
644
645 spin_unlock_irq(host->host_lock); 618 spin_unlock_irq(host->host_lock);
646 err = wait_event_interruptible(shadow->wq_reset, shadow->wait_reset); 619 err = wait_event_interruptible(shadow->wq_reset, shadow->wait_reset);
647 spin_lock_irq(host->host_lock); 620 spin_lock_irq(host->host_lock);
@@ -660,6 +633,11 @@ static int scsifront_action_handler(struct scsi_cmnd *sc, uint8_t act)
660 scsifront_return(info); 633 scsifront_return(info);
661 spin_unlock_irq(host->host_lock); 634 spin_unlock_irq(host->host_lock);
662 return err; 635 return err;
636
637fail:
638 spin_unlock_irq(host->host_lock);
639 kfree(shadow);
640 return FAILED;
663} 641}
664 642
665static int scsifront_eh_abort_handler(struct scsi_cmnd *sc) 643static int scsifront_eh_abort_handler(struct scsi_cmnd *sc)