aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/xen-blkback/blkback.c
diff options
context:
space:
mode:
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2011-04-20 11:50:43 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2011-04-20 11:58:03 -0400
commit8b6bf747d70e5bac1a34c8fd773230e1cfdd7546 (patch)
tree187680a66902c915b92a5cc42892b71aed97895f /drivers/block/xen-blkback/blkback.c
parent42c7841d171a2fe32005738dfebd724a90921496 (diff)
xen/blkback: Prefix exposed functions with xen_
And also shorten the name if it has blkback to blkbk. This results in the symbol table (if compiled in the kernel) to be much shorter, prettier, and also easier to search for. Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'drivers/block/xen-blkback/blkback.c')
-rw-r--r--drivers/block/xen-blkback/blkback.c36
1 files changed, 18 insertions, 18 deletions
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 806c2c947c63..c4bc85e69d33 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -53,13 +53,13 @@
53 * pulled from a communication ring are quite likely to end up being part of 53 * pulled from a communication ring are quite likely to end up being part of
54 * the same scatter/gather request at the disc. 54 * the same scatter/gather request at the disc.
55 * 55 *
56 * ** TRY INCREASING 'blkif_reqs' IF WRITE SPEEDS SEEM TOO LOW ** 56 * ** TRY INCREASING 'xen_blkif_reqs' IF WRITE SPEEDS SEEM TOO LOW **
57 * 57 *
58 * This will increase the chances of being able to write whole tracks. 58 * This will increase the chances of being able to write whole tracks.
59 * 64 should be enough to keep us competitive with Linux. 59 * 64 should be enough to keep us competitive with Linux.
60 */ 60 */
61static int blkif_reqs = 64; 61static int xen_blkif_reqs = 64;
62module_param_named(reqs, blkif_reqs, int, 0); 62module_param_named(reqs, xen_blkif_reqs, int, 0);
63MODULE_PARM_DESC(reqs, "Number of blkback requests to allocate"); 63MODULE_PARM_DESC(reqs, "Number of blkback requests to allocate");
64 64
65/* Run-time switchable: /sys/module/blkback/parameters/ */ 65/* Run-time switchable: /sys/module/blkback/parameters/ */
@@ -196,7 +196,7 @@ static void vbd_resize(struct blkif_st *blkif)
196 struct vbd *vbd = &blkif->vbd; 196 struct vbd *vbd = &blkif->vbd;
197 struct xenbus_transaction xbt; 197 struct xenbus_transaction xbt;
198 int err; 198 int err;
199 struct xenbus_device *dev = blkback_xenbus(blkif->be); 199 struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be);
200 unsigned long long new_size = vbd_sz(vbd); 200 unsigned long long new_size = vbd_sz(vbd);
201 201
202 printk(KERN_INFO "VBD Resize: Domid: %d, Device: (%d, %d)\n", 202 printk(KERN_INFO "VBD Resize: Domid: %d, Device: (%d, %d)\n",
@@ -244,7 +244,7 @@ static void blkif_notify_work(struct blkif_st *blkif)
244 wake_up(&blkif->wq); 244 wake_up(&blkif->wq);
245} 245}
246 246
247irqreturn_t blkif_be_int(int irq, void *dev_id) 247irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
248{ 248{
249 blkif_notify_work(dev_id); 249 blkif_notify_work(dev_id);
250 return IRQ_HANDLED; 250 return IRQ_HANDLED;
@@ -265,12 +265,12 @@ static void print_stats(struct blkif_st *blkif)
265 blkif->st_oo_req = 0; 265 blkif->st_oo_req = 0;
266} 266}
267 267
268int blkif_schedule(void *arg) 268int xen_blkif_schedule(void *arg)
269{ 269{
270 struct blkif_st *blkif = arg; 270 struct blkif_st *blkif = arg;
271 struct vbd *vbd = &blkif->vbd; 271 struct vbd *vbd = &blkif->vbd;
272 272
273 blkif_get(blkif); 273 xen_blkif_get(blkif);
274 274
275 if (debug_lvl) 275 if (debug_lvl)
276 printk(KERN_DEBUG "%s: started\n", current->comm); 276 printk(KERN_DEBUG "%s: started\n", current->comm);
@@ -305,7 +305,7 @@ int blkif_schedule(void *arg)
305 printk(KERN_DEBUG "%s: exiting\n", current->comm); 305 printk(KERN_DEBUG "%s: exiting\n", current->comm);
306 306
307 blkif->xenblkd = NULL; 307 blkif->xenblkd = NULL;
308 blkif_put(blkif); 308 xen_blkif_put(blkif);
309 309
310 return 0; 310 return 0;
311} 311}
@@ -417,7 +417,7 @@ static void __end_block_io_op(struct pending_req *pending_req, int error)
417 if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) && 417 if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
418 (error == -EOPNOTSUPP)) { 418 (error == -EOPNOTSUPP)) {
419 DPRINTK("blkback: write barrier op failed, not supported\n"); 419 DPRINTK("blkback: write barrier op failed, not supported\n");
420 blkback_barrier(XBT_NIL, pending_req->blkif->be, 0); 420 xen_blkbk_barrier(XBT_NIL, pending_req->blkif->be, 0);
421 pending_req->status = BLKIF_RSP_EOPNOTSUPP; 421 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
422 } else if (error) { 422 } else if (error) {
423 DPRINTK("Buffer not up-to-date at end of operation, " 423 DPRINTK("Buffer not up-to-date at end of operation, "
@@ -433,7 +433,7 @@ static void __end_block_io_op(struct pending_req *pending_req, int error)
433 xen_blkbk_unmap(pending_req); 433 xen_blkbk_unmap(pending_req);
434 make_response(pending_req->blkif, pending_req->id, 434 make_response(pending_req->blkif, pending_req->id,
435 pending_req->operation, pending_req->status); 435 pending_req->operation, pending_req->status);
436 blkif_put(pending_req->blkif); 436 xen_blkif_put(pending_req->blkif);
437 free_req(pending_req); 437 free_req(pending_req);
438 } 438 }
439} 439}
@@ -619,7 +619,7 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
619 goto fail_flush; 619 goto fail_flush;
620 620
621 /* This corresponding blkif_put is done in __end_block_io_op */ 621 /* This corresponding blkif_put is done in __end_block_io_op */
622 blkif_get(blkif); 622 xen_blkif_get(blkif);
623 623
624 for (i = 0; i < nseg; i++) { 624 for (i = 0; i < nseg; i++) {
625 while ((bio == NULL) || 625 while ((bio == NULL) ||
@@ -751,7 +751,7 @@ static void make_response(struct blkif_st *blkif, u64 id,
751 notify_remote_via_irq(blkif->irq); 751 notify_remote_via_irq(blkif->irq);
752} 752}
753 753
754static int __init blkif_init(void) 754static int __init xen_blkif_init(void)
755{ 755{
756 int i, mmap_pages; 756 int i, mmap_pages;
757 int rc = 0; 757 int rc = 0;
@@ -765,10 +765,10 @@ static int __init blkif_init(void)
765 return -ENOMEM; 765 return -ENOMEM;
766 } 766 }
767 767
768 mmap_pages = blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST; 768 mmap_pages = xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST;
769 769
770 blkbk->pending_reqs = kmalloc(sizeof(blkbk->pending_reqs[0]) * 770 blkbk->pending_reqs = kmalloc(sizeof(blkbk->pending_reqs[0]) *
771 blkif_reqs, GFP_KERNEL); 771 xen_blkif_reqs, GFP_KERNEL);
772 blkbk->pending_grant_handles = kzalloc(sizeof(blkbk->pending_grant_handles[0]) * 772 blkbk->pending_grant_handles = kzalloc(sizeof(blkbk->pending_grant_handles[0]) *
773 mmap_pages, GFP_KERNEL); 773 mmap_pages, GFP_KERNEL);
774 blkbk->pending_pages = kzalloc(sizeof(blkbk->pending_pages[0]) * 774 blkbk->pending_pages = kzalloc(sizeof(blkbk->pending_pages[0]) *
@@ -788,7 +788,7 @@ static int __init blkif_init(void)
788 goto out_of_memory; 788 goto out_of_memory;
789 } 789 }
790 } 790 }
791 rc = blkif_interface_init(); 791 rc = xen_blkif_interface_init();
792 if (rc) 792 if (rc)
793 goto failed_init; 793 goto failed_init;
794 794
@@ -798,11 +798,11 @@ static int __init blkif_init(void)
798 spin_lock_init(&blkbk->pending_free_lock); 798 spin_lock_init(&blkbk->pending_free_lock);
799 init_waitqueue_head(&blkbk->pending_free_wq); 799 init_waitqueue_head(&blkbk->pending_free_wq);
800 800
801 for (i = 0; i < blkif_reqs; i++) 801 for (i = 0; i < xen_blkif_reqs; i++)
802 list_add_tail(&blkbk->pending_reqs[i].free_list, 802 list_add_tail(&blkbk->pending_reqs[i].free_list,
803 &blkbk->pending_free); 803 &blkbk->pending_free);
804 804
805 rc = blkif_xenbus_init(); 805 rc = xen_blkif_xenbus_init();
806 if (rc) 806 if (rc)
807 goto failed_init; 807 goto failed_init;
808 808
@@ -823,6 +823,6 @@ static int __init blkif_init(void)
823 return rc; 823 return rc;
824} 824}
825 825
826module_init(blkif_init); 826module_init(xen_blkif_init);
827 827
828MODULE_LICENSE("Dual BSD/GPL"); 828MODULE_LICENSE("Dual BSD/GPL");