aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2011-05-11 15:57:09 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2011-05-11 15:57:09 -0400
commit01f37f2d53e14a05b7fc3601d182f31ac3b35847 (patch)
treeb12e0eb77888710865ad577f3735a550f57a7242 /drivers/block
parent3d68b39926b3b247d76cc4da0256e979b2b730e3 (diff)
xen/blkback: Fixed up comments and converted spaces to tabs.
Suggested-by: Ian Campbell <Ian.Campbell@eu.citrix.com> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/xen-blkback/blkback.c70
-rw-r--r--drivers/block/xen-blkback/common.h77
-rw-r--r--drivers/block/xen-blkback/xenbus.c39
3 files changed, 105 insertions, 81 deletions
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index b9bdd9e43ab9..6808cc7d9c73 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -73,13 +73,13 @@ module_param(debug_lvl, int, 0644);
73 * response queued for it, with the saved 'id' passed back. 73 * response queued for it, with the saved 'id' passed back.
74 */ 74 */
75struct pending_req { 75struct pending_req {
76 struct blkif_st *blkif; 76 struct blkif_st *blkif;
77 u64 id; 77 u64 id;
78 int nr_pages; 78 int nr_pages;
79 atomic_t pendcnt; 79 atomic_t pendcnt;
80 unsigned short operation; 80 unsigned short operation;
81 int status; 81 int status;
82 struct list_head free_list; 82 struct list_head free_list;
83}; 83};
84 84
85#define BLKBACK_INVALID_HANDLE (~0) 85#define BLKBACK_INVALID_HANDLE (~0)
@@ -103,7 +103,8 @@ static struct xen_blkbk *blkbk;
103 * Little helpful macro to figure out the index and virtual address of the 103 * Little helpful macro to figure out the index and virtual address of the
104 * pending_pages[..]. For each 'pending_req' we have have up to 104 * pending_pages[..]. For each 'pending_req' we have have up to
105 * BLKIF_MAX_SEGMENTS_PER_REQUEST (11) pages. The seg would be from 0 through 105 * BLKIF_MAX_SEGMENTS_PER_REQUEST (11) pages. The seg would be from 0 through
106 * 10 and would index in the pending_pages[..]. */ 106 * 10 and would index in the pending_pages[..].
107 */
107static inline int vaddr_pagenr(struct pending_req *req, int seg) 108static inline int vaddr_pagenr(struct pending_req *req, int seg)
108{ 109{
109 return (req - blkbk->pending_reqs) * 110 return (req - blkbk->pending_reqs) *
@@ -167,8 +168,6 @@ static void free_req(struct pending_req *req)
167/* 168/*
168 * Routines for managing virtual block devices (vbds). 169 * Routines for managing virtual block devices (vbds).
169 */ 170 */
170
171
172static int vbd_translate(struct phys_req *req, struct blkif_st *blkif, 171static int vbd_translate(struct phys_req *req, struct blkif_st *blkif,
173 int operation) 172 int operation)
174{ 173{
@@ -315,7 +314,7 @@ struct seg_buf {
315/* 314/*
316 * Unmap the grant references, and also remove the M2P over-rides 315 * Unmap the grant references, and also remove the M2P over-rides
317 * used in the 'pending_req'. 316 * used in the 'pending_req'.
318*/ 317 */
319static void xen_blkbk_unmap(struct pending_req *req) 318static void xen_blkbk_unmap(struct pending_req *req)
320{ 319{
321 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 320 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
@@ -336,27 +335,32 @@ static void xen_blkbk_unmap(struct pending_req *req)
336 ret = HYPERVISOR_grant_table_op( 335 ret = HYPERVISOR_grant_table_op(
337 GNTTABOP_unmap_grant_ref, unmap, invcount); 336 GNTTABOP_unmap_grant_ref, unmap, invcount);
338 BUG_ON(ret); 337 BUG_ON(ret);
339 /* Note, we use invcount, so nr->pages, so we can't index 338 /*
339 * Note, we use invcount, so nr->pages, so we can't index
340 * using vaddr(req, i). 340 * using vaddr(req, i).
341 */ 341 */
342 for (i = 0; i < invcount; i++) { 342 for (i = 0; i < invcount; i++) {
343 ret = m2p_remove_override( 343 ret = m2p_remove_override(
344 virt_to_page(unmap[i].host_addr), false); 344 virt_to_page(unmap[i].host_addr), false);
345 if (ret) { 345 if (ret) {
346 printk(KERN_ALERT "Failed to remove M2P override for " \ 346 printk(KERN_ALERT "Failed to remove M2P override for %lx\n",
347 "%lx\n", (unsigned long)unmap[i].host_addr); 347 (unsigned long)unmap[i].host_addr);
348 continue; 348 continue;
349 } 349 }
350 } 350 }
351} 351}
352static int xen_blkbk_map(struct blkif_request *req, struct pending_req *pending_req, 352
353static int xen_blkbk_map(struct blkif_request *req,
354 struct pending_req *pending_req,
353 struct seg_buf seg[]) 355 struct seg_buf seg[])
354{ 356{
355 struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 357 struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
356 int i; 358 int i;
357 int nseg = req->nr_segments; 359 int nseg = req->nr_segments;
358 int ret = 0; 360 int ret = 0;
359 /* Fill out preq.nr_sects with proper amount of sectors, and setup 361
362 /*
363 * Fill out preq.nr_sects with proper amount of sectors, and setup
360 * assign map[..] with the PFN of the page in our domain with the 364 * assign map[..] with the PFN of the page in our domain with the
361 * corresponding grant reference for each page. 365 * corresponding grant reference for each page.
362 */ 366 */
@@ -367,13 +371,15 @@ static int xen_blkbk_map(struct blkif_request *req, struct pending_req *pending_
367 if (pending_req->operation != BLKIF_OP_READ) 371 if (pending_req->operation != BLKIF_OP_READ)
368 flags |= GNTMAP_readonly; 372 flags |= GNTMAP_readonly;
369 gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags, 373 gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
370 req->u.rw.seg[i].gref, pending_req->blkif->domid); 374 req->u.rw.seg[i].gref,
375 pending_req->blkif->domid);
371 } 376 }
372 377
373 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg); 378 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg);
374 BUG_ON(ret); 379 BUG_ON(ret);
375 380
376 /* Now swizzel the MFN in our domain with the MFN from the other domain 381 /*
382 * Now swizzle the MFN in our domain with the MFN from the other domain
377 * so that when we access vaddr(pending_req,i) it has the contents of 383 * so that when we access vaddr(pending_req,i) it has the contents of
378 * the page from the other domain. 384 * the page from the other domain.
379 */ 385 */
@@ -423,7 +429,8 @@ static void __end_block_io_op(struct pending_req *pending_req, int error)
423 pending_req->status = BLKIF_RSP_ERROR; 429 pending_req->status = BLKIF_RSP_ERROR;
424 } 430 }
425 431
426 /* If all of the bio's have completed it is time to unmap 432 /*
433 * If all of the bio's have completed it is time to unmap
427 * the grant references associated with 'request' and provide 434 * the grant references associated with 'request' and provide
428 * the proper response on the ring. 435 * the proper response on the ring.
429 */ 436 */
@@ -510,8 +517,8 @@ static int do_block_io_op(struct blkif_st *blkif)
510} 517}
511 518
512/* 519/*
513 * Transumation of the 'struct blkif_request' to a proper 'struct bio' 520 * Transmutation of the 'struct blkif_request' to a proper 'struct bio'
514 * and call the 'submit_bio' to pass it to the underlaying storage. 521 * and call the 'submit_bio' to pass it to the underlying storage.
515 */ 522 */
516static int dispatch_rw_block_io(struct blkif_st *blkif, 523static int dispatch_rw_block_io(struct blkif_st *blkif,
517 struct blkif_request *req, 524 struct blkif_request *req,
@@ -538,8 +545,10 @@ static int dispatch_rw_block_io(struct blkif_st *blkif,
538 case BLKIF_OP_FLUSH_DISKCACHE: 545 case BLKIF_OP_FLUSH_DISKCACHE:
539 blkif->st_f_req++; 546 blkif->st_f_req++;
540 operation = WRITE_FLUSH; 547 operation = WRITE_FLUSH;
541 /* The frontend likes to set this to -1, which vbd_translate 548 /*
542 * is alergic too. */ 549 * The frontend likes to set this to -1, which vbd_translate
550 * is alergic too.
551 */
543 req->u.rw.sector_number = 0; 552 req->u.rw.sector_number = 0;
544 break; 553 break;
545 case BLKIF_OP_WRITE_BARRIER: 554 case BLKIF_OP_WRITE_BARRIER:
@@ -585,8 +594,11 @@ static int dispatch_rw_block_io(struct blkif_st *blkif,
585 preq.sector_number + preq.nr_sects, preq.dev); 594 preq.sector_number + preq.nr_sects, preq.dev);
586 goto fail_response; 595 goto fail_response;
587 } 596 }
588 /* This check _MUST_ be done after vbd_translate as the preq.bdev 597
589 * is set there. */ 598 /*
599 * This check _MUST_ be done after vbd_translate as the preq.bdev
600 * is set there.
601 */
590 for (i = 0; i < nseg; i++) { 602 for (i = 0; i < nseg; i++) {
591 if (((int)preq.sector_number|(int)seg[i].nsec) & 603 if (((int)preq.sector_number|(int)seg[i].nsec) &
592 ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) { 604 ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
@@ -595,7 +607,9 @@ static int dispatch_rw_block_io(struct blkif_st *blkif,
595 goto fail_response; 607 goto fail_response;
596 } 608 }
597 } 609 }
598 /* If we have failed at this point, we need to undo the M2P override, 610
611 /*
612 * If we have failed at this point, we need to undo the M2P override,
599 * set gnttab_set_unmap_op on all of the grant references and perform 613 * set gnttab_set_unmap_op on all of the grant references and perform
600 * the hypercall to unmap the grants - that is all done in 614 * the hypercall to unmap the grants - that is all done in
601 * xen_blkbk_unmap. 615 * xen_blkbk_unmap.
@@ -638,8 +652,8 @@ static int dispatch_rw_block_io(struct blkif_st *blkif,
638 bio->bi_end_io = end_block_io_op; 652 bio->bi_end_io = end_block_io_op;
639 } 653 }
640 654
641 655 /*
642 /* We set it one so that the last submit_bio does not have to call 656 * We set it one so that the last submit_bio does not have to call
643 * atomic_inc. 657 * atomic_inc.
644 */ 658 */
645 atomic_set(&pending_req->pendcnt, nbio); 659 atomic_set(&pending_req->pendcnt, nbio);
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index af93837e1295..e37dcf7f6b8e 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -47,53 +47,58 @@
47 __FILE__ , __LINE__ , ## _a) 47 __FILE__ , __LINE__ , ## _a)
48 48
49struct vbd { 49struct vbd {
50 blkif_vdev_t handle; /* what the domain refers to this vbd as */ 50 /* What the domain refers to this vbd as. */
51 unsigned char readonly; /* Non-zero -> read-only */ 51 blkif_vdev_t handle;
52 unsigned char type; /* VDISK_xxx */ 52 /* Non-zero -> read-only */
53 u32 pdevice; /* phys device that this vbd maps to */ 53 unsigned char readonly;
54 struct block_device *bdev; 54 /* VDISK_xxx */
55 sector_t size; /* Cached size parameter */ 55 unsigned char type;
56 bool flush_support; 56 /* phys device that this vbd maps to. */
57 u32 pdevice;
58 struct block_device *bdev;
59 /* Cached size parameter. */
60 sector_t size;
61 bool flush_support;
57}; 62};
58 63
59struct backend_info; 64struct backend_info;
60 65
61struct blkif_st { 66struct blkif_st {
62 /* Unique identifier for this interface. */ 67 /* Unique identifier for this interface. */
63 domid_t domid; 68 domid_t domid;
64 unsigned int handle; 69 unsigned int handle;
65 /* Physical parameters of the comms window. */ 70 /* Physical parameters of the comms window. */
66 unsigned int irq; 71 unsigned int irq;
67 /* Comms information. */ 72 /* Comms information. */
68 enum blkif_protocol blk_protocol; 73 enum blkif_protocol blk_protocol;
69 union blkif_back_rings blk_rings; 74 union blkif_back_rings blk_rings;
70 struct vm_struct *blk_ring_area; 75 struct vm_struct *blk_ring_area;
71 /* The VBD attached to this interface. */ 76 /* The VBD attached to this interface. */
72 struct vbd vbd; 77 struct vbd vbd;
73 /* Back pointer to the backend_info. */ 78 /* Back pointer to the backend_info. */
74 struct backend_info *be; 79 struct backend_info *be;
75 /* Private fields. */ 80 /* Private fields. */
76 spinlock_t blk_ring_lock; 81 spinlock_t blk_ring_lock;
77 atomic_t refcnt; 82 atomic_t refcnt;
78 83
79 wait_queue_head_t wq; 84 wait_queue_head_t wq;
80 /* One thread per one blkif. */ 85 /* One thread per one blkif. */
81 struct task_struct *xenblkd; 86 struct task_struct *xenblkd;
82 unsigned int waiting_reqs; 87 unsigned int waiting_reqs;
83 88
84 /* statistics */ 89 /* statistics */
85 unsigned long st_print; 90 unsigned long st_print;
86 int st_rd_req; 91 int st_rd_req;
87 int st_wr_req; 92 int st_wr_req;
88 int st_oo_req; 93 int st_oo_req;
89 int st_f_req; 94 int st_f_req;
90 int st_rd_sect; 95 int st_rd_sect;
91 int st_wr_sect; 96 int st_wr_sect;
92 97
93 wait_queue_head_t waiting_to_free; 98 wait_queue_head_t waiting_to_free;
94 99
95 grant_handle_t shmem_handle; 100 grant_handle_t shmem_handle;
96 grant_ref_t shmem_ref; 101 grant_ref_t shmem_ref;
97}; 102};
98 103
99 104
@@ -109,10 +114,10 @@ struct blkif_st {
109 } while (0) 114 } while (0)
110 115
111struct phys_req { 116struct phys_req {
112 unsigned short dev; 117 unsigned short dev;
113 unsigned short nr_sects; 118 unsigned short nr_sects;
114 struct block_device *bdev; 119 struct block_device *bdev;
115 blkif_sector_t sector_number; 120 blkif_sector_t sector_number;
116}; 121};
117int xen_blkif_interface_init(void); 122int xen_blkif_interface_init(void);
118 123
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 9adcf806f83f..0cda406b4edb 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -27,12 +27,12 @@
27 __func__, __LINE__, ##args) 27 __func__, __LINE__, ##args)
28 28
29struct backend_info { 29struct backend_info {
30 struct xenbus_device *dev; 30 struct xenbus_device *dev;
31 struct blkif_st *blkif; 31 struct blkif_st *blkif;
32 struct xenbus_watch backend_watch; 32 struct xenbus_watch backend_watch;
33 unsigned major; 33 unsigned major;
34 unsigned minor; 34 unsigned minor;
35 char *mode; 35 char *mode;
36}; 36};
37 37
38static struct kmem_cache *xen_blkif_cachep; 38static struct kmem_cache *xen_blkif_cachep;
@@ -425,7 +425,7 @@ int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
425 return err; 425 return err;
426} 426}
427 427
428/** 428/*
429 * Entry point to this code when a new device is created. Allocate the basic 429 * Entry point to this code when a new device is created. Allocate the basic
430 * structures, and watch the store waiting for the hotplug scripts to tell us 430 * structures, and watch the store waiting for the hotplug scripts to tell us
431 * the device's physical major and minor numbers. Switch to InitWait. 431 * the device's physical major and minor numbers. Switch to InitWait.
@@ -473,7 +473,7 @@ fail:
473} 473}
474 474
475 475
476/** 476/*
477 * Callback received when the hotplug scripts have placed the physical-device 477 * Callback received when the hotplug scripts have placed the physical-device
478 * node. Read it and the mode node, and create a vbd. If the frontend is 478 * node. Read it and the mode node, and create a vbd. If the frontend is
479 * ready, connect. 479 * ready, connect.
@@ -495,9 +495,11 @@ static void backend_changed(struct xenbus_watch *watch,
495 err = xenbus_scanf(XBT_NIL, dev->nodename, "physical-device", "%x:%x", 495 err = xenbus_scanf(XBT_NIL, dev->nodename, "physical-device", "%x:%x",
496 &major, &minor); 496 &major, &minor);
497 if (XENBUS_EXIST_ERR(err)) { 497 if (XENBUS_EXIST_ERR(err)) {
498 /* Since this watch will fire once immediately after it is 498 /*
499 registered, we expect this. Ignore it, and wait for the 499 * Since this watch will fire once immediately after it is
500 hotplug scripts. */ 500 * registered, we expect this. Ignore it, and wait for the
501 * hotplug scripts.
502 */
501 return; 503 return;
502 } 504 }
503 if (err != 2) { 505 if (err != 2) {
@@ -562,7 +564,7 @@ static void backend_changed(struct xenbus_watch *watch,
562} 564}
563 565
564 566
565/** 567/*
566 * Callback received when the frontend's state changes. 568 * Callback received when the frontend's state changes.
567 */ 569 */
568static void frontend_changed(struct xenbus_device *dev, 570static void frontend_changed(struct xenbus_device *dev,
@@ -584,13 +586,16 @@ static void frontend_changed(struct xenbus_device *dev,
584 586
585 case XenbusStateInitialised: 587 case XenbusStateInitialised:
586 case XenbusStateConnected: 588 case XenbusStateConnected:
587 /* Ensure we connect even when two watches fire in 589 /*
588 close successsion and we miss the intermediate value 590 * Ensure we connect even when two watches fire in
589 of frontend_state. */ 591 * close successsion and we miss the intermediate value
592 * of frontend_state.
593 */
590 if (dev->state == XenbusStateConnected) 594 if (dev->state == XenbusStateConnected)
591 break; 595 break;
592 596
593 /* Enforce precondition before potential leak point. 597 /*
598 * Enforce precondition before potential leak point.
594 * blkif_disconnect() is idempotent. 599 * blkif_disconnect() is idempotent.
595 */ 600 */
596 xen_blkif_disconnect(be->blkif); 601 xen_blkif_disconnect(be->blkif);
@@ -627,7 +632,7 @@ static void frontend_changed(struct xenbus_device *dev,
627/* ** Connection ** */ 632/* ** Connection ** */
628 633
629 634
630/** 635/*
631 * Write the physical details regarding the block device to the store, and 636 * Write the physical details regarding the block device to the store, and
632 * switch to Connected state. 637 * switch to Connected state.
633 */ 638 */