diff options
author | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2011-04-14 17:42:07 -0400 |
---|---|---|
committer | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2011-04-14 18:26:34 -0400 |
commit | 2e9977c21f7679d5f616132ae1f7857e932ccd19 (patch) | |
tree | b6dd46bbfa19a32be7aa907f4a06e9c97cf7bf72 /drivers/xen/blkback/blkback.c | |
parent | d6091b217dd4fdabc4a8cd6fa61775f1e3eb6efe (diff) |
xen/blkback: Fix checkpatch warnings in blkback.c
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'drivers/xen/blkback/blkback.c')
-rw-r--r-- | drivers/xen/blkback/blkback.c | 81 |
1 files changed, 47 insertions, 34 deletions
diff --git a/drivers/xen/blkback/blkback.c b/drivers/xen/blkback/blkback.c index d07ad5318a85..2d413930f235 100644 --- a/drivers/xen/blkback/blkback.c +++ b/drivers/xen/blkback/blkback.c | |||
@@ -63,8 +63,8 @@ module_param_named(reqs, blkif_reqs, int, 0); | |||
63 | MODULE_PARM_DESC(reqs, "Number of blkback requests to allocate"); | 63 | MODULE_PARM_DESC(reqs, "Number of blkback requests to allocate"); |
64 | 64 | ||
65 | /* Run-time switchable: /sys/module/blkback/parameters/ */ | 65 | /* Run-time switchable: /sys/module/blkback/parameters/ */ |
66 | static unsigned int log_stats = 0; | 66 | static unsigned int log_stats; |
67 | static unsigned int debug_lvl = 0; | 67 | static unsigned int debug_lvl; |
68 | module_param(log_stats, int, 0644); | 68 | module_param(log_stats, int, 0644); |
69 | module_param(debug_lvl, int, 0644); | 69 | module_param(debug_lvl, int, 0644); |
70 | 70 | ||
@@ -74,7 +74,7 @@ module_param(debug_lvl, int, 0644); | |||
74 | * the pendcnt towards zero. When it hits zero, the specified domain has a | 74 | * the pendcnt towards zero. When it hits zero, the specified domain has a |
75 | * response queued for it, with the saved 'id' passed back. | 75 | * response queued for it, with the saved 'id' passed back. |
76 | */ | 76 | */ |
77 | typedef struct { | 77 | struct pending_req { |
78 | struct blkif_st *blkif; | 78 | struct blkif_st *blkif; |
79 | u64 id; | 79 | u64 id; |
80 | int nr_pages; | 80 | int nr_pages; |
@@ -82,12 +82,12 @@ typedef struct { | |||
82 | unsigned short operation; | 82 | unsigned short operation; |
83 | int status; | 83 | int status; |
84 | struct list_head free_list; | 84 | struct list_head free_list; |
85 | } pending_req_t; | 85 | }; |
86 | 86 | ||
87 | #define BLKBACK_INVALID_HANDLE (~0) | 87 | #define BLKBACK_INVALID_HANDLE (~0) |
88 | 88 | ||
89 | struct xen_blkbk { | 89 | struct xen_blkbk { |
90 | pending_req_t *pending_reqs; | 90 | struct pending_req *pending_reqs; |
91 | /* List of all 'pending_req' available */ | 91 | /* List of all 'pending_req' available */ |
92 | struct list_head pending_free; | 92 | struct list_head pending_free; |
93 | /* And its spinlock. */ | 93 | /* And its spinlock. */ |
@@ -106,14 +106,15 @@ static struct xen_blkbk *blkbk; | |||
106 | * pending_pages[..]. For each 'pending_req' we have have up to | 106 | * pending_pages[..]. For each 'pending_req' we have have up to |
107 | * BLKIF_MAX_SEGMENTS_PER_REQUEST (11) pages. The seg would be from 0 through | 107 | * BLKIF_MAX_SEGMENTS_PER_REQUEST (11) pages. The seg would be from 0 through |
108 | * 10 and would index in the pending_pages[..]. */ | 108 | * 10 and would index in the pending_pages[..]. */ |
109 | static inline int vaddr_pagenr(pending_req_t *req, int seg) | 109 | static inline int vaddr_pagenr(struct pending_req *req, int seg) |
110 | { | 110 | { |
111 | return (req - blkbk->pending_reqs) * BLKIF_MAX_SEGMENTS_PER_REQUEST + seg; | 111 | return (req - blkbk->pending_reqs) * |
112 | BLKIF_MAX_SEGMENTS_PER_REQUEST + seg; | ||
112 | } | 113 | } |
113 | 114 | ||
114 | #define pending_page(req, seg) pending_pages[vaddr_pagenr(req, seg)] | 115 | #define pending_page(req, seg) pending_pages[vaddr_pagenr(req, seg)] |
115 | 116 | ||
116 | static inline unsigned long vaddr(pending_req_t *req, int seg) | 117 | static inline unsigned long vaddr(struct pending_req *req, int seg) |
117 | { | 118 | { |
118 | unsigned long pfn = page_to_pfn(blkbk->pending_page(req, seg)); | 119 | unsigned long pfn = page_to_pfn(blkbk->pending_page(req, seg)); |
119 | return (unsigned long)pfn_to_kaddr(pfn); | 120 | return (unsigned long)pfn_to_kaddr(pfn); |
@@ -126,21 +127,22 @@ static inline unsigned long vaddr(pending_req_t *req, int seg) | |||
126 | static int do_block_io_op(struct blkif_st *blkif); | 127 | static int do_block_io_op(struct blkif_st *blkif); |
127 | static void dispatch_rw_block_io(struct blkif_st *blkif, | 128 | static void dispatch_rw_block_io(struct blkif_st *blkif, |
128 | struct blkif_request *req, | 129 | struct blkif_request *req, |
129 | pending_req_t *pending_req); | 130 | struct pending_req *pending_req); |
130 | static void make_response(struct blkif_st *blkif, u64 id, | 131 | static void make_response(struct blkif_st *blkif, u64 id, |
131 | unsigned short op, int st); | 132 | unsigned short op, int st); |
132 | 133 | ||
133 | /* | 134 | /* |
134 | * Retrieve from the 'pending_reqs' a free pending_req structure to be used. | 135 | * Retrieve from the 'pending_reqs' a free pending_req structure to be used. |
135 | */ | 136 | */ |
136 | static pending_req_t* alloc_req(void) | 137 | static struct pending_req *alloc_req(void) |
137 | { | 138 | { |
138 | pending_req_t *req = NULL; | 139 | struct pending_req *req = NULL; |
139 | unsigned long flags; | 140 | unsigned long flags; |
140 | 141 | ||
141 | spin_lock_irqsave(&blkbk->pending_free_lock, flags); | 142 | spin_lock_irqsave(&blkbk->pending_free_lock, flags); |
142 | if (!list_empty(&blkbk->pending_free)) { | 143 | if (!list_empty(&blkbk->pending_free)) { |
143 | req = list_entry(blkbk->pending_free.next, pending_req_t, free_list); | 144 | req = list_entry(blkbk->pending_free.next, struct pending_req, |
145 | free_list); | ||
144 | list_del(&req->free_list); | 146 | list_del(&req->free_list); |
145 | } | 147 | } |
146 | spin_unlock_irqrestore(&blkbk->pending_free_lock, flags); | 148 | spin_unlock_irqrestore(&blkbk->pending_free_lock, flags); |
@@ -151,7 +153,7 @@ static pending_req_t* alloc_req(void) | |||
151 | * Return the 'pending_req' structure back to the freepool. We also | 153 | * Return the 'pending_req' structure back to the freepool. We also |
152 | * wake up the thread if it was waiting for a free page. | 154 | * wake up the thread if it was waiting for a free page. |
153 | */ | 155 | */ |
154 | static void free_req(pending_req_t *req) | 156 | static void free_req(struct pending_req *req) |
155 | { | 157 | { |
156 | unsigned long flags; | 158 | unsigned long flags; |
157 | int was_empty; | 159 | int was_empty; |
@@ -200,7 +202,7 @@ static void plug_queue(struct blkif_st *blkif, struct block_device *bdev) | |||
200 | * Unmap the grant references, and also remove the M2P over-rides | 202 | * Unmap the grant references, and also remove the M2P over-rides |
201 | * used in the 'pending_req'. | 203 | * used in the 'pending_req'. |
202 | */ | 204 | */ |
203 | static void fast_flush_area(pending_req_t *req) | 205 | static void fast_flush_area(struct pending_req *req) |
204 | { | 206 | { |
205 | struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | 207 | struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
206 | unsigned int i, invcount = 0; | 208 | unsigned int i, invcount = 0; |
@@ -221,7 +223,8 @@ static void fast_flush_area(pending_req_t *req) | |||
221 | GNTTABOP_unmap_grant_ref, unmap, invcount); | 223 | GNTTABOP_unmap_grant_ref, unmap, invcount); |
222 | BUG_ON(ret); | 224 | BUG_ON(ret); |
223 | /* Note, we use invcount, so nr->pages, so we can't index | 225 | /* Note, we use invcount, so nr->pages, so we can't index |
224 | * using vaddr(req, i). */ | 226 | * using vaddr(req, i). |
227 | */ | ||
225 | for (i = 0; i < invcount; i++) { | 228 | for (i = 0; i < invcount; i++) { |
226 | ret = m2p_remove_override( | 229 | ret = m2p_remove_override( |
227 | virt_to_page(unmap[i].host_addr), false); | 230 | virt_to_page(unmap[i].host_addr), false); |
@@ -233,7 +236,7 @@ static void fast_flush_area(pending_req_t *req) | |||
233 | } | 236 | } |
234 | } | 237 | } |
235 | 238 | ||
236 | /****************************************************************** | 239 | /* |
237 | * SCHEDULER FUNCTIONS | 240 | * SCHEDULER FUNCTIONS |
238 | */ | 241 | */ |
239 | 242 | ||
@@ -269,7 +272,8 @@ int blkif_schedule(void *arg) | |||
269 | blkif->waiting_reqs || kthread_should_stop()); | 272 | blkif->waiting_reqs || kthread_should_stop()); |
270 | wait_event_interruptible( | 273 | wait_event_interruptible( |
271 | blkbk->pending_free_wq, | 274 | blkbk->pending_free_wq, |
272 | !list_empty(&blkbk->pending_free) || kthread_should_stop()); | 275 | !list_empty(&blkbk->pending_free) || |
276 | kthread_should_stop()); | ||
273 | 277 | ||
274 | blkif->waiting_reqs = 0; | 278 | blkif->waiting_reqs = 0; |
275 | smp_mb(); /* clear flag *before* checking for work */ | 279 | smp_mb(); /* clear flag *before* checking for work */ |
@@ -297,7 +301,7 @@ int blkif_schedule(void *arg) | |||
297 | * Completion callback on the bio's. Called as bh->b_end_io() | 301 | * Completion callback on the bio's. Called as bh->b_end_io() |
298 | */ | 302 | */ |
299 | 303 | ||
300 | static void __end_block_io_op(pending_req_t *pending_req, int error) | 304 | static void __end_block_io_op(struct pending_req *pending_req, int error) |
301 | { | 305 | { |
302 | /* An error fails the entire request. */ | 306 | /* An error fails the entire request. */ |
303 | if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) && | 307 | if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) && |
@@ -313,7 +317,8 @@ static void __end_block_io_op(pending_req_t *pending_req, int error) | |||
313 | 317 | ||
314 | /* If all of the bio's have completed it is time to unmap | 318 | /* If all of the bio's have completed it is time to unmap |
315 | * the grant references associated with 'request' and provide | 319 | * the grant references associated with 'request' and provide |
316 | * the proper response on the ring. */ | 320 | * the proper response on the ring. |
321 | */ | ||
317 | if (atomic_dec_and_test(&pending_req->pendcnt)) { | 322 | if (atomic_dec_and_test(&pending_req->pendcnt)) { |
318 | fast_flush_area(pending_req); | 323 | fast_flush_area(pending_req); |
319 | make_response(pending_req->blkif, pending_req->id, | 324 | make_response(pending_req->blkif, pending_req->id, |
@@ -360,7 +365,7 @@ static int do_block_io_op(struct blkif_st *blkif) | |||
360 | { | 365 | { |
361 | union blkif_back_rings *blk_rings = &blkif->blk_rings; | 366 | union blkif_back_rings *blk_rings = &blkif->blk_rings; |
362 | struct blkif_request req; | 367 | struct blkif_request req; |
363 | pending_req_t *pending_req; | 368 | struct pending_req *pending_req; |
364 | RING_IDX rc, rp; | 369 | RING_IDX rc, rp; |
365 | int more_to_do = 0; | 370 | int more_to_do = 0; |
366 | 371 | ||
@@ -440,7 +445,7 @@ static int do_block_io_op(struct blkif_st *blkif) | |||
440 | */ | 445 | */ |
441 | static void dispatch_rw_block_io(struct blkif_st *blkif, | 446 | static void dispatch_rw_block_io(struct blkif_st *blkif, |
442 | struct blkif_request *req, | 447 | struct blkif_request *req, |
443 | pending_req_t *pending_req) | 448 | struct pending_req *pending_req) |
444 | { | 449 | { |
445 | struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | 450 | struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
446 | struct phys_req preq; | 451 | struct phys_req preq; |
@@ -487,7 +492,8 @@ static void dispatch_rw_block_io(struct blkif_st *blkif, | |||
487 | 492 | ||
488 | /* Fill out preq.nr_sects with proper amount of sectors, and setup | 493 | /* Fill out preq.nr_sects with proper amount of sectors, and setup |
489 | * assign map[..] with the PFN of the page in our domain with the | 494 | * assign map[..] with the PFN of the page in our domain with the |
490 | * corresponding grant reference for each page.*/ | 495 | * corresponding grant reference for each page. |
496 | */ | ||
491 | for (i = 0; i < nseg; i++) { | 497 | for (i = 0; i < nseg; i++) { |
492 | uint32_t flags; | 498 | uint32_t flags; |
493 | 499 | ||
@@ -509,8 +515,9 @@ static void dispatch_rw_block_io(struct blkif_st *blkif, | |||
509 | BUG_ON(ret); | 515 | BUG_ON(ret); |
510 | 516 | ||
511 | /* Now swizzel the MFN in our domain with the MFN from the other domain | 517 | /* Now swizzel the MFN in our domain with the MFN from the other domain |
512 | * so that when we access vaddr(pending_req,i) it has the contents of the | 518 | * so that when we access vaddr(pending_req,i) it has the contents of |
513 | * page from the other domain. */ | 519 | * the page from the other domain. |
520 | */ | ||
514 | for (i = 0; i < nseg; i++) { | 521 | for (i = 0; i < nseg; i++) { |
515 | if (unlikely(map[i].status != 0)) { | 522 | if (unlikely(map[i].status != 0)) { |
516 | DPRINTK("invalid buffer -- could not remap it\n"); | 523 | DPRINTK("invalid buffer -- could not remap it\n"); |
@@ -522,12 +529,13 @@ static void dispatch_rw_block_io(struct blkif_st *blkif, | |||
522 | 529 | ||
523 | if (ret) | 530 | if (ret) |
524 | continue; | 531 | continue; |
525 | 532 | ||
526 | ret = m2p_add_override(PFN_DOWN(map[i].dev_bus_addr), | 533 | ret = m2p_add_override(PFN_DOWN(map[i].dev_bus_addr), |
527 | blkbk->pending_page(pending_req, i), false); | 534 | blkbk->pending_page(pending_req, i), false); |
528 | if (ret) { | 535 | if (ret) { |
529 | printk(KERN_ALERT "Failed to install M2P override for"\ | 536 | printk(KERN_ALERT "Failed to install M2P override for"\ |
530 | " %lx (ret: %d)\n", (unsigned long)map[i].dev_bus_addr, ret); | 537 | " %lx (ret: %d)\n", (unsigned long) |
538 | map[i].dev_bus_addr, ret); | ||
531 | /* We could switch over to GNTTABOP_copy */ | 539 | /* We could switch over to GNTTABOP_copy */ |
532 | continue; | 540 | continue; |
533 | } | 541 | } |
@@ -536,9 +544,11 @@ static void dispatch_rw_block_io(struct blkif_st *blkif, | |||
536 | (req->u.rw.seg[i].first_sect << 9); | 544 | (req->u.rw.seg[i].first_sect << 9); |
537 | } | 545 | } |
538 | 546 | ||
539 | /* If we have failed at this point, we need to undo the M2P override, set | 547 | /* If we have failed at this point, we need to undo the M2P override, |
540 | * gnttab_set_unmap_op on all of the grant references and perform the | 548 | * set gnttab_set_unmap_op on all of the grant references and perform |
541 | * hypercall to unmap the grants - that is all done in fast_flush_area. */ | 549 | * the hypercall to unmap the grants - that is all done in |
550 | * fast_flush_area. | ||
551 | */ | ||
542 | if (ret) | 552 | if (ret) |
543 | goto fail_flush; | 553 | goto fail_flush; |
544 | 554 | ||
@@ -554,7 +564,8 @@ static void dispatch_rw_block_io(struct blkif_st *blkif, | |||
554 | plug_queue(blkif, preq.bdev); | 564 | plug_queue(blkif, preq.bdev); |
555 | 565 | ||
556 | /* We set it one so that the last submit_bio does not have to call | 566 | /* We set it one so that the last submit_bio does not have to call |
557 | * atomic_inc. */ | 567 | * atomic_inc. |
568 | */ | ||
558 | atomic_set(&pending_req->pendcnt, 1); | 569 | atomic_set(&pending_req->pendcnt, 1); |
559 | blkif_get(blkif); | 570 | blkif_get(blkif); |
560 | 571 | ||
@@ -575,7 +586,7 @@ static void dispatch_rw_block_io(struct blkif_st *blkif, | |||
575 | atomic_inc(&pending_req->pendcnt); | 586 | atomic_inc(&pending_req->pendcnt); |
576 | submit_bio(operation, bio); | 587 | submit_bio(operation, bio); |
577 | } | 588 | } |
578 | 589 | ||
579 | bio = bio_alloc(GFP_KERNEL, nseg-i); | 590 | bio = bio_alloc(GFP_KERNEL, nseg-i); |
580 | if (unlikely(bio == NULL)) | 591 | if (unlikely(bio == NULL)) |
581 | goto fail_put_bio; | 592 | goto fail_put_bio; |
@@ -694,7 +705,7 @@ static int __init blkif_init(void) | |||
694 | if (!xen_pv_domain()) | 705 | if (!xen_pv_domain()) |
695 | return -ENODEV; | 706 | return -ENODEV; |
696 | 707 | ||
697 | blkbk = (struct xen_blkbk *)kzalloc(sizeof(struct xen_blkbk), GFP_KERNEL); | 708 | blkbk = kzalloc(sizeof(struct xen_blkbk), GFP_KERNEL); |
698 | if (!blkbk) { | 709 | if (!blkbk) { |
699 | printk(KERN_ALERT "%s: out of memory!\n", __func__); | 710 | printk(KERN_ALERT "%s: out of memory!\n", __func__); |
700 | return -ENOMEM; | 711 | return -ENOMEM; |
@@ -709,7 +720,8 @@ static int __init blkif_init(void) | |||
709 | blkbk->pending_pages = kzalloc(sizeof(blkbk->pending_pages[0]) * | 720 | blkbk->pending_pages = kzalloc(sizeof(blkbk->pending_pages[0]) * |
710 | mmap_pages, GFP_KERNEL); | 721 | mmap_pages, GFP_KERNEL); |
711 | 722 | ||
712 | if (!blkbk->pending_reqs || !blkbk->pending_grant_handles || !blkbk->pending_pages) { | 723 | if (!blkbk->pending_reqs || !blkbk->pending_grant_handles || |
724 | !blkbk->pending_pages) { | ||
713 | rc = -ENOMEM; | 725 | rc = -ENOMEM; |
714 | goto out_of_memory; | 726 | goto out_of_memory; |
715 | } | 727 | } |
@@ -733,7 +745,8 @@ static int __init blkif_init(void) | |||
733 | init_waitqueue_head(&blkbk->pending_free_wq); | 745 | init_waitqueue_head(&blkbk->pending_free_wq); |
734 | 746 | ||
735 | for (i = 0; i < blkif_reqs; i++) | 747 | for (i = 0; i < blkif_reqs; i++) |
736 | list_add_tail(&blkbk->pending_reqs[i].free_list, &blkbk->pending_free); | 748 | list_add_tail(&blkbk->pending_reqs[i].free_list, |
749 | &blkbk->pending_free); | ||
737 | 750 | ||
738 | rc = blkif_xenbus_init(); | 751 | rc = blkif_xenbus_init(); |
739 | if (rc) | 752 | if (rc) |