diff options
author | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2011-02-25 10:51:29 -0500 |
---|---|---|
committer | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2011-04-14 18:26:20 -0400 |
commit | e8e28871edf0d0adb0bd7e597c044cbaf7a7f137 (patch) | |
tree | 6843e6ed2df4998294b45036cee7ca5cfa194c58 /drivers/xen/blkback/blkback.c | |
parent | efe08a3eecf15ab022afba48c691d02c7de2fbbb (diff) |
xen/blkback: Move global/static variables into struct xen_blkbk.
Bundle the lot of discrete variables into a single structure.
This is based on what was done in the xen-netback driver:
xen: netback: Move global/static variables into struct xen_netbk.
(094944631cc5a9d6e623302c987f78117c0bf7ac)
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'drivers/xen/blkback/blkback.c')
-rw-r--r-- | drivers/xen/blkback/blkback.c | 82 |
1 files changed, 48 insertions, 34 deletions
diff --git a/drivers/xen/blkback/blkback.c b/drivers/xen/blkback/blkback.c index 7c9421cc5991..c08875b0ad64 100644 --- a/drivers/xen/blkback/blkback.c +++ b/drivers/xen/blkback/blkback.c | |||
@@ -84,31 +84,34 @@ typedef struct { | |||
84 | struct list_head free_list; | 84 | struct list_head free_list; |
85 | } pending_req_t; | 85 | } pending_req_t; |
86 | 86 | ||
87 | static pending_req_t *pending_reqs; | ||
88 | static struct list_head pending_free; | ||
89 | static DEFINE_SPINLOCK(pending_free_lock); | ||
90 | static DECLARE_WAIT_QUEUE_HEAD(pending_free_wq); | ||
91 | |||
92 | #define BLKBACK_INVALID_HANDLE (~0) | 87 | #define BLKBACK_INVALID_HANDLE (~0) |
93 | 88 | ||
94 | static struct page **pending_pages; | 89 | struct xen_blkbk { |
95 | static grant_handle_t *pending_grant_handles; | 90 | pending_req_t *pending_reqs; |
91 | struct list_head pending_free; | ||
92 | spinlock_t pending_free_lock; | ||
93 | wait_queue_head_t pending_free_wq; | ||
94 | struct page **pending_pages; | ||
95 | grant_handle_t *pending_grant_handles; | ||
96 | }; | ||
97 | |||
98 | static struct xen_blkbk *blkbk; | ||
96 | 99 | ||
97 | static inline int vaddr_pagenr(pending_req_t *req, int seg) | 100 | static inline int vaddr_pagenr(pending_req_t *req, int seg) |
98 | { | 101 | { |
99 | return (req - pending_reqs) * BLKIF_MAX_SEGMENTS_PER_REQUEST + seg; | 102 | return (req - blkbk->pending_reqs) * BLKIF_MAX_SEGMENTS_PER_REQUEST + seg; |
100 | } | 103 | } |
101 | 104 | ||
102 | #define pending_page(req, seg) pending_pages[vaddr_pagenr(req, seg)] | 105 | #define pending_page(req, seg) pending_pages[vaddr_pagenr(req, seg)] |
103 | 106 | ||
104 | static inline unsigned long vaddr(pending_req_t *req, int seg) | 107 | static inline unsigned long vaddr(pending_req_t *req, int seg) |
105 | { | 108 | { |
106 | unsigned long pfn = page_to_pfn(pending_page(req, seg)); | 109 | unsigned long pfn = page_to_pfn(blkbk->pending_page(req, seg)); |
107 | return (unsigned long)pfn_to_kaddr(pfn); | 110 | return (unsigned long)pfn_to_kaddr(pfn); |
108 | } | 111 | } |
109 | 112 | ||
110 | #define pending_handle(_req, _seg) \ | 113 | #define pending_handle(_req, _seg) \ |
111 | (pending_grant_handles[vaddr_pagenr(_req, _seg)]) | 114 | (blkbk->pending_grant_handles[vaddr_pagenr(_req, _seg)]) |
112 | 115 | ||
113 | 116 | ||
114 | static int do_block_io_op(blkif_t *blkif); | 117 | static int do_block_io_op(blkif_t *blkif); |
@@ -126,12 +129,12 @@ static pending_req_t* alloc_req(void) | |||
126 | pending_req_t *req = NULL; | 129 | pending_req_t *req = NULL; |
127 | unsigned long flags; | 130 | unsigned long flags; |
128 | 131 | ||
129 | spin_lock_irqsave(&pending_free_lock, flags); | 132 | spin_lock_irqsave(&blkbk->pending_free_lock, flags); |
130 | if (!list_empty(&pending_free)) { | 133 | if (!list_empty(&blkbk->pending_free)) { |
131 | req = list_entry(pending_free.next, pending_req_t, free_list); | 134 | req = list_entry(blkbk->pending_free.next, pending_req_t, free_list); |
132 | list_del(&req->free_list); | 135 | list_del(&req->free_list); |
133 | } | 136 | } |
134 | spin_unlock_irqrestore(&pending_free_lock, flags); | 137 | spin_unlock_irqrestore(&blkbk->pending_free_lock, flags); |
135 | return req; | 138 | return req; |
136 | } | 139 | } |
137 | 140 | ||
@@ -140,12 +143,12 @@ static void free_req(pending_req_t *req) | |||
140 | unsigned long flags; | 143 | unsigned long flags; |
141 | int was_empty; | 144 | int was_empty; |
142 | 145 | ||
143 | spin_lock_irqsave(&pending_free_lock, flags); | 146 | spin_lock_irqsave(&blkbk->pending_free_lock, flags); |
144 | was_empty = list_empty(&pending_free); | 147 | was_empty = list_empty(&blkbk->pending_free); |
145 | list_add(&req->free_list, &pending_free); | 148 | list_add(&req->free_list, &blkbk->pending_free); |
146 | spin_unlock_irqrestore(&pending_free_lock, flags); | 149 | spin_unlock_irqrestore(&blkbk->pending_free_lock, flags); |
147 | if (was_empty) | 150 | if (was_empty) |
148 | wake_up(&pending_free_wq); | 151 | wake_up(&blkbk->pending_free_wq); |
149 | } | 152 | } |
150 | 153 | ||
151 | static void unplug_queue(blkif_t *blkif) | 154 | static void unplug_queue(blkif_t *blkif) |
@@ -226,8 +229,8 @@ int blkif_schedule(void *arg) | |||
226 | blkif->wq, | 229 | blkif->wq, |
227 | blkif->waiting_reqs || kthread_should_stop()); | 230 | blkif->waiting_reqs || kthread_should_stop()); |
228 | wait_event_interruptible( | 231 | wait_event_interruptible( |
229 | pending_free_wq, | 232 | blkbk->pending_free_wq, |
230 | !list_empty(&pending_free) || kthread_should_stop()); | 233 | !list_empty(&blkbk->pending_free) || kthread_should_stop()); |
231 | 234 | ||
232 | blkif->waiting_reqs = 0; | 235 | blkif->waiting_reqs = 0; |
233 | smp_mb(); /* clear flag *before* checking for work */ | 236 | smp_mb(); /* clear flag *before* checking for work */ |
@@ -466,7 +469,7 @@ static void dispatch_rw_block_io(blkif_t *blkif, | |||
466 | continue; | 469 | continue; |
467 | 470 | ||
468 | set_phys_to_machine( | 471 | set_phys_to_machine( |
469 | page_to_pfn(pending_page(pending_req, i)), | 472 | page_to_pfn(blkbk->pending_page(pending_req, i)), |
470 | FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT)); | 473 | FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT)); |
471 | seg[i].buf = map[i].dev_bus_addr | | 474 | seg[i].buf = map[i].dev_bus_addr | |
472 | (req->seg[i].first_sect << 9); | 475 | (req->seg[i].first_sect << 9); |
@@ -497,7 +500,7 @@ static void dispatch_rw_block_io(blkif_t *blkif, | |||
497 | 500 | ||
498 | while ((bio == NULL) || | 501 | while ((bio == NULL) || |
499 | (bio_add_page(bio, | 502 | (bio_add_page(bio, |
500 | pending_page(pending_req, i), | 503 | blkbk->pending_page(pending_req, i), |
501 | seg[i].nsec << 9, | 504 | seg[i].nsec << 9, |
502 | seg[i].buf & ~PAGE_MASK) == 0)) { | 505 | seg[i].buf & ~PAGE_MASK) == 0)) { |
503 | if (bio) { | 506 | if (bio) { |
@@ -624,31 +627,40 @@ static int __init blkif_init(void) | |||
624 | if (!xen_pv_domain()) | 627 | if (!xen_pv_domain()) |
625 | return -ENODEV; | 628 | return -ENODEV; |
626 | 629 | ||
630 | blkbk = (struct xen_blkbk *)vmalloc(sizeof(struct xen_blkbk)); | ||
631 | if (!blkbk) { | ||
632 | printk(KERN_ALERT "%s: out of memory!\n", __func__); | ||
633 | return -ENOMEM; | ||
634 | } | ||
635 | |||
627 | mmap_pages = blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST; | 636 | mmap_pages = blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST; |
628 | 637 | ||
629 | pending_reqs = kmalloc(sizeof(pending_reqs[0]) * | 638 | blkbk->pending_reqs = kmalloc(sizeof(blkbk->pending_reqs[0]) * |
630 | blkif_reqs, GFP_KERNEL); | 639 | blkif_reqs, GFP_KERNEL); |
631 | pending_grant_handles = kmalloc(sizeof(pending_grant_handles[0]) * | 640 | blkbk->pending_grant_handles = kmalloc(sizeof(blkbk->pending_grant_handles[0]) * |
632 | mmap_pages, GFP_KERNEL); | 641 | mmap_pages, GFP_KERNEL); |
633 | pending_pages = alloc_empty_pages_and_pagevec(mmap_pages); | 642 | blkbk->pending_pages = alloc_empty_pages_and_pagevec(mmap_pages); |
634 | 643 | ||
635 | if (!pending_reqs || !pending_grant_handles || !pending_pages) { | 644 | if (!blkbk->pending_reqs || !blkbk->pending_grant_handles || !blkbk->pending_pages) { |
636 | rc = -ENOMEM; | 645 | rc = -ENOMEM; |
637 | goto out_of_memory; | 646 | goto out_of_memory; |
638 | } | 647 | } |
639 | 648 | ||
640 | for (i = 0; i < mmap_pages; i++) | 649 | for (i = 0; i < mmap_pages; i++) |
641 | pending_grant_handles[i] = BLKBACK_INVALID_HANDLE; | 650 | blkbk->pending_grant_handles[i] = BLKBACK_INVALID_HANDLE; |
642 | 651 | ||
643 | rc = blkif_interface_init(); | 652 | rc = blkif_interface_init(); |
644 | if (rc) | 653 | if (rc) |
645 | goto failed_init; | 654 | goto failed_init; |
646 | 655 | ||
647 | memset(pending_reqs, 0, sizeof(pending_reqs)); | 656 | memset(blkbk->pending_reqs, 0, sizeof(blkbk->pending_reqs)); |
648 | INIT_LIST_HEAD(&pending_free); | 657 | |
658 | INIT_LIST_HEAD(&blkbk->pending_free); | ||
659 | spin_lock_init(&blkbk->pending_free_lock); | ||
660 | init_waitqueue_head(&blkbk->pending_free_wq); | ||
649 | 661 | ||
650 | for (i = 0; i < blkif_reqs; i++) | 662 | for (i = 0; i < blkif_reqs; i++) |
651 | list_add_tail(&pending_reqs[i].free_list, &pending_free); | 663 | list_add_tail(&blkbk->pending_reqs[i].free_list, &blkbk->pending_free); |
652 | 664 | ||
653 | rc = blkif_xenbus_init(); | 665 | rc = blkif_xenbus_init(); |
654 | if (rc) | 666 | if (rc) |
@@ -659,9 +671,11 @@ static int __init blkif_init(void) | |||
659 | out_of_memory: | 671 | out_of_memory: |
660 | printk(KERN_ERR "%s: out of memory\n", __func__); | 672 | printk(KERN_ERR "%s: out of memory\n", __func__); |
661 | failed_init: | 673 | failed_init: |
662 | kfree(pending_reqs); | 674 | kfree(blkbk->pending_reqs); |
663 | kfree(pending_grant_handles); | 675 | kfree(blkbk->pending_grant_handles); |
664 | free_empty_pages_and_pagevec(pending_pages, mmap_pages); | 676 | free_empty_pages_and_pagevec(blkbk->pending_pages, mmap_pages); |
677 | vfree(blkbk); | ||
678 | blkbk = NULL; | ||
665 | return rc; | 679 | return rc; |
666 | } | 680 | } |
667 | 681 | ||