aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/xen-blkback
diff options
context:
space:
mode:
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2011-05-05 13:37:23 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2011-05-05 13:43:25 -0400
commitfc53bf757ede292312eee10d64f4e691c8c8cebf (patch)
tree9129e9a47a399842351a800215e30b1163a6d960 /drivers/block/xen-blkback
parent24f567f952aa308c3352f3340b9d296fc72bd066 (diff)
xen/blkback: Squash the checking for operation into dispatch_rw_block_io
We do a check for the operations right before calling dispatch_rw_block_io. And then we do the same check in dispatch_rw_block_io. This patch squashes those checks into the 'dispatch_rw_block_io' function. Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'drivers/block/xen-blkback')
-rw-r--r--drivers/block/xen-blkback/blkback.c45
1 files changed, 13 insertions, 32 deletions
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 72ede0bf2697..5f4284729a3a 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -123,9 +123,9 @@ static inline unsigned long vaddr(struct pending_req *req, int seg)
123 123
124 124
125static int do_block_io_op(struct blkif_st *blkif); 125static int do_block_io_op(struct blkif_st *blkif);
126static void dispatch_rw_block_io(struct blkif_st *blkif, 126static int dispatch_rw_block_io(struct blkif_st *blkif,
127 struct blkif_request *req, 127 struct blkif_request *req,
128 struct pending_req *pending_req); 128 struct pending_req *pending_req);
129static void make_response(struct blkif_st *blkif, u64 id, 129static void make_response(struct blkif_st *blkif, u64 id,
130 unsigned short op, int st); 130 unsigned short op, int st);
131 131
@@ -499,30 +499,8 @@ static int do_block_io_op(struct blkif_st *blkif)
499 /* Apply all sanity checks to /private copy/ of request. */ 499 /* Apply all sanity checks to /private copy/ of request. */
500 barrier(); 500 barrier();
501 501
502 switch (req.operation) { 502 if (dispatch_rw_block_io(blkif, &req, pending_req))
503 case BLKIF_OP_READ:
504 blkif->st_rd_req++;
505 dispatch_rw_block_io(blkif, &req, pending_req);
506 break;
507 case BLKIF_OP_FLUSH_DISKCACHE:
508 blkif->st_f_req++;
509 /* fall through */
510 case BLKIF_OP_WRITE:
511 blkif->st_wr_req++;
512 dispatch_rw_block_io(blkif, &req, pending_req);
513 break; 503 break;
514 case BLKIF_OP_WRITE_BARRIER:
515 default:
516 /* A good sign something is wrong: sleep for a while to
517 * avoid excessive CPU consumption by a bad guest. */
518 msleep(1);
519 DPRINTK("error: unknown block io operation [%d]\n",
520 req.operation);
521 make_response(blkif, req.id, req.operation,
522 BLKIF_RSP_ERROR);
523 free_req(pending_req);
524 break;
525 }
526 504
527 /* Yield point for this unbounded loop. */ 505 /* Yield point for this unbounded loop. */
528 cond_resched(); 506 cond_resched();
@@ -535,7 +513,7 @@ static int do_block_io_op(struct blkif_st *blkif)
535 * Transumation of the 'struct blkif_request' to a proper 'struct bio' 513 * Transumation of the 'struct blkif_request' to a proper 'struct bio'
536 * and call the 'submit_bio' to pass it to the underlaying storage. 514 * and call the 'submit_bio' to pass it to the underlaying storage.
537 */ 515 */
538static void dispatch_rw_block_io(struct blkif_st *blkif, 516static int dispatch_rw_block_io(struct blkif_st *blkif,
539 struct blkif_request *req, 517 struct blkif_request *req,
540 struct pending_req *pending_req) 518 struct pending_req *pending_req)
541{ 519{
@@ -550,22 +528,25 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
550 528
551 switch (req->operation) { 529 switch (req->operation) {
552 case BLKIF_OP_READ: 530 case BLKIF_OP_READ:
531 blkif->st_rd_req++;
553 operation = READ; 532 operation = READ;
554 break; 533 break;
555 case BLKIF_OP_WRITE: 534 case BLKIF_OP_WRITE:
535 blkif->st_wr_req++;
556 operation = WRITE_ODIRECT; 536 operation = WRITE_ODIRECT;
557 break; 537 break;
558 case BLKIF_OP_FLUSH_DISKCACHE: 538 case BLKIF_OP_FLUSH_DISKCACHE:
539 blkif->st_f_req++;
559 operation = WRITE_FLUSH; 540 operation = WRITE_FLUSH;
560 /* The frontend likes to set this to -1, which vbd_translate 541 /* The frontend likes to set this to -1, which vbd_translate
561 * is alergic too. */ 542 * is alergic too. */
562 req->u.rw.sector_number = 0; 543 req->u.rw.sector_number = 0;
563 break; 544 break;
564 case BLKIF_OP_WRITE_BARRIER: 545 case BLKIF_OP_WRITE_BARRIER:
565 /* Should never get here. */
566 default: 546 default:
567 operation = 0; /* make gcc happy */ 547 operation = 0; /* make gcc happy */
568 BUG(); 548 goto fail_response;
549 break;
569 } 550 }
570 551
571 /* Check that the number of segments is sane. */ 552 /* Check that the number of segments is sane. */
@@ -677,7 +658,7 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
677 else if (operation == WRITE || operation == WRITE_FLUSH) 658 else if (operation == WRITE || operation == WRITE_FLUSH)
678 blkif->st_wr_sect += preq.nr_sects; 659 blkif->st_wr_sect += preq.nr_sects;
679 660
680 return; 661 return 0;
681 662
682 fail_flush: 663 fail_flush:
683 xen_blkbk_unmap(pending_req); 664 xen_blkbk_unmap(pending_req);
@@ -686,14 +667,14 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
686 make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR); 667 make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
687 free_req(pending_req); 668 free_req(pending_req);
688 msleep(1); /* back off a bit */ 669 msleep(1); /* back off a bit */
689 return; 670 return -EIO;
690 671
691 fail_put_bio: 672 fail_put_bio:
692 for (i = 0; i < (nbio-1); i++) 673 for (i = 0; i < (nbio-1); i++)
693 bio_put(biolist[i]); 674 bio_put(biolist[i]);
694 __end_block_io_op(pending_req, -EINVAL); 675 __end_block_io_op(pending_req, -EINVAL);
695 msleep(1); /* back off a bit */ 676 msleep(1); /* back off a bit */
696 return; 677 return -EIO;
697} 678}
698 679
699 680