diff options
Diffstat (limited to 'drivers/block/xen-blkback/blkback.c')
| -rw-r--r-- | drivers/block/xen-blkback/blkback.c | 62 |
1 files changed, 31 insertions, 31 deletions
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 2a04d341e598..bd2b3bbbb22c 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c | |||
| @@ -34,6 +34,8 @@ | |||
| 34 | * IN THE SOFTWARE. | 34 | * IN THE SOFTWARE. |
| 35 | */ | 35 | */ |
| 36 | 36 | ||
| 37 | #define pr_fmt(fmt) "xen-blkback: " fmt | ||
| 38 | |||
| 37 | #include <linux/spinlock.h> | 39 | #include <linux/spinlock.h> |
| 38 | #include <linux/kthread.h> | 40 | #include <linux/kthread.h> |
| 39 | #include <linux/list.h> | 41 | #include <linux/list.h> |
| @@ -211,7 +213,7 @@ static int add_persistent_gnt(struct xen_blkif *blkif, | |||
| 211 | else if (persistent_gnt->gnt > this->gnt) | 213 | else if (persistent_gnt->gnt > this->gnt) |
| 212 | new = &((*new)->rb_right); | 214 | new = &((*new)->rb_right); |
| 213 | else { | 215 | else { |
| 214 | pr_alert_ratelimited(DRV_PFX " trying to add a gref that's already in the tree\n"); | 216 | pr_alert_ratelimited("trying to add a gref that's already in the tree\n"); |
| 215 | return -EINVAL; | 217 | return -EINVAL; |
| 216 | } | 218 | } |
| 217 | } | 219 | } |
| @@ -242,7 +244,7 @@ static struct persistent_gnt *get_persistent_gnt(struct xen_blkif *blkif, | |||
| 242 | node = node->rb_right; | 244 | node = node->rb_right; |
| 243 | else { | 245 | else { |
| 244 | if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) { | 246 | if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) { |
| 245 | pr_alert_ratelimited(DRV_PFX " requesting a grant already in use\n"); | 247 | pr_alert_ratelimited("requesting a grant already in use\n"); |
| 246 | return NULL; | 248 | return NULL; |
| 247 | } | 249 | } |
| 248 | set_bit(PERSISTENT_GNT_ACTIVE, data->flags); | 250 | set_bit(PERSISTENT_GNT_ACTIVE, data->flags); |
| @@ -257,7 +259,7 @@ static void put_persistent_gnt(struct xen_blkif *blkif, | |||
| 257 | struct persistent_gnt *persistent_gnt) | 259 | struct persistent_gnt *persistent_gnt) |
| 258 | { | 260 | { |
| 259 | if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags)) | 261 | if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags)) |
| 260 | pr_alert_ratelimited(DRV_PFX " freeing a grant already unused"); | 262 | pr_alert_ratelimited("freeing a grant already unused\n"); |
| 261 | set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags); | 263 | set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags); |
| 262 | clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags); | 264 | clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags); |
| 263 | atomic_dec(&blkif->persistent_gnt_in_use); | 265 | atomic_dec(&blkif->persistent_gnt_in_use); |
| @@ -374,7 +376,7 @@ static void purge_persistent_gnt(struct xen_blkif *blkif) | |||
| 374 | } | 376 | } |
| 375 | 377 | ||
| 376 | if (work_pending(&blkif->persistent_purge_work)) { | 378 | if (work_pending(&blkif->persistent_purge_work)) { |
| 377 | pr_alert_ratelimited(DRV_PFX "Scheduled work from previous purge is still pending, cannot purge list\n"); | 379 | pr_alert_ratelimited("Scheduled work from previous purge is still pending, cannot purge list\n"); |
| 378 | return; | 380 | return; |
| 379 | } | 381 | } |
| 380 | 382 | ||
| @@ -396,7 +398,7 @@ static void purge_persistent_gnt(struct xen_blkif *blkif) | |||
| 396 | 398 | ||
| 397 | total = num_clean; | 399 | total = num_clean; |
| 398 | 400 | ||
| 399 | pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean); | 401 | pr_debug("Going to purge %u persistent grants\n", num_clean); |
| 400 | 402 | ||
| 401 | BUG_ON(!list_empty(&blkif->persistent_purge_list)); | 403 | BUG_ON(!list_empty(&blkif->persistent_purge_list)); |
| 402 | root = &blkif->persistent_gnts; | 404 | root = &blkif->persistent_gnts; |
| @@ -428,13 +430,13 @@ purge_list: | |||
| 428 | * with the requested num | 430 | * with the requested num |
| 429 | */ | 431 | */ |
| 430 | if (!scan_used && !clean_used) { | 432 | if (!scan_used && !clean_used) { |
| 431 | pr_debug(DRV_PFX "Still missing %u purged frames\n", num_clean); | 433 | pr_debug("Still missing %u purged frames\n", num_clean); |
| 432 | scan_used = true; | 434 | scan_used = true; |
| 433 | goto purge_list; | 435 | goto purge_list; |
| 434 | } | 436 | } |
| 435 | finished: | 437 | finished: |
| 436 | if (!clean_used) { | 438 | if (!clean_used) { |
| 437 | pr_debug(DRV_PFX "Finished scanning for grants to clean, removing used flag\n"); | 439 | pr_debug("Finished scanning for grants to clean, removing used flag\n"); |
| 438 | clean_used = true; | 440 | clean_used = true; |
| 439 | goto purge_list; | 441 | goto purge_list; |
| 440 | } | 442 | } |
| @@ -444,7 +446,7 @@ finished: | |||
| 444 | 446 | ||
| 445 | /* We can defer this work */ | 447 | /* We can defer this work */ |
| 446 | schedule_work(&blkif->persistent_purge_work); | 448 | schedule_work(&blkif->persistent_purge_work); |
| 447 | pr_debug(DRV_PFX "Purged %u/%u\n", (total - num_clean), total); | 449 | pr_debug("Purged %u/%u\n", (total - num_clean), total); |
| 448 | return; | 450 | return; |
| 449 | } | 451 | } |
| 450 | 452 | ||
| @@ -520,20 +522,20 @@ static void xen_vbd_resize(struct xen_blkif *blkif) | |||
| 520 | struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be); | 522 | struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be); |
| 521 | unsigned long long new_size = vbd_sz(vbd); | 523 | unsigned long long new_size = vbd_sz(vbd); |
| 522 | 524 | ||
| 523 | pr_info(DRV_PFX "VBD Resize: Domid: %d, Device: (%d, %d)\n", | 525 | pr_info("VBD Resize: Domid: %d, Device: (%d, %d)\n", |
| 524 | blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice)); | 526 | blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice)); |
| 525 | pr_info(DRV_PFX "VBD Resize: new size %llu\n", new_size); | 527 | pr_info("VBD Resize: new size %llu\n", new_size); |
| 526 | vbd->size = new_size; | 528 | vbd->size = new_size; |
| 527 | again: | 529 | again: |
| 528 | err = xenbus_transaction_start(&xbt); | 530 | err = xenbus_transaction_start(&xbt); |
| 529 | if (err) { | 531 | if (err) { |
| 530 | pr_warn(DRV_PFX "Error starting transaction"); | 532 | pr_warn("Error starting transaction\n"); |
| 531 | return; | 533 | return; |
| 532 | } | 534 | } |
| 533 | err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu", | 535 | err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu", |
| 534 | (unsigned long long)vbd_sz(vbd)); | 536 | (unsigned long long)vbd_sz(vbd)); |
| 535 | if (err) { | 537 | if (err) { |
| 536 | pr_warn(DRV_PFX "Error writing new size"); | 538 | pr_warn("Error writing new size\n"); |
| 537 | goto abort; | 539 | goto abort; |
| 538 | } | 540 | } |
| 539 | /* | 541 | /* |
| @@ -543,7 +545,7 @@ again: | |||
| 543 | */ | 545 | */ |
| 544 | err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state); | 546 | err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state); |
| 545 | if (err) { | 547 | if (err) { |
| 546 | pr_warn(DRV_PFX "Error writing the state"); | 548 | pr_warn("Error writing the state\n"); |
| 547 | goto abort; | 549 | goto abort; |
| 548 | } | 550 | } |
| 549 | 551 | ||
| @@ -551,7 +553,7 @@ again: | |||
| 551 | if (err == -EAGAIN) | 553 | if (err == -EAGAIN) |
| 552 | goto again; | 554 | goto again; |
| 553 | if (err) | 555 | if (err) |
| 554 | pr_warn(DRV_PFX "Error ending transaction"); | 556 | pr_warn("Error ending transaction\n"); |
| 555 | return; | 557 | return; |
| 556 | abort: | 558 | abort: |
| 557 | xenbus_transaction_end(xbt, 1); | 559 | xenbus_transaction_end(xbt, 1); |
| @@ -578,7 +580,7 @@ irqreturn_t xen_blkif_be_int(int irq, void *dev_id) | |||
| 578 | 580 | ||
| 579 | static void print_stats(struct xen_blkif *blkif) | 581 | static void print_stats(struct xen_blkif *blkif) |
| 580 | { | 582 | { |
| 581 | pr_info("xen-blkback (%s): oo %3llu | rd %4llu | wr %4llu | f %4llu" | 583 | pr_info("(%s): oo %3llu | rd %4llu | wr %4llu | f %4llu" |
| 582 | " | ds %4llu | pg: %4u/%4d\n", | 584 | " | ds %4llu | pg: %4u/%4d\n", |
| 583 | current->comm, blkif->st_oo_req, | 585 | current->comm, blkif->st_oo_req, |
| 584 | blkif->st_rd_req, blkif->st_wr_req, | 586 | blkif->st_rd_req, blkif->st_wr_req, |
| @@ -855,7 +857,7 @@ again: | |||
| 855 | /* This is a newly mapped grant */ | 857 | /* This is a newly mapped grant */ |
| 856 | BUG_ON(new_map_idx >= segs_to_map); | 858 | BUG_ON(new_map_idx >= segs_to_map); |
| 857 | if (unlikely(map[new_map_idx].status != 0)) { | 859 | if (unlikely(map[new_map_idx].status != 0)) { |
| 858 | pr_debug(DRV_PFX "invalid buffer -- could not remap it\n"); | 860 | pr_debug("invalid buffer -- could not remap it\n"); |
| 859 | put_free_pages(blkif, &pages[seg_idx]->page, 1); | 861 | put_free_pages(blkif, &pages[seg_idx]->page, 1); |
| 860 | pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE; | 862 | pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE; |
| 861 | ret |= 1; | 863 | ret |= 1; |
| @@ -891,14 +893,14 @@ again: | |||
| 891 | goto next; | 893 | goto next; |
| 892 | } | 894 | } |
| 893 | pages[seg_idx]->persistent_gnt = persistent_gnt; | 895 | pages[seg_idx]->persistent_gnt = persistent_gnt; |
| 894 | pr_debug(DRV_PFX " grant %u added to the tree of persistent grants, using %u/%u\n", | 896 | pr_debug("grant %u added to the tree of persistent grants, using %u/%u\n", |
| 895 | persistent_gnt->gnt, blkif->persistent_gnt_c, | 897 | persistent_gnt->gnt, blkif->persistent_gnt_c, |
| 896 | xen_blkif_max_pgrants); | 898 | xen_blkif_max_pgrants); |
| 897 | goto next; | 899 | goto next; |
| 898 | } | 900 | } |
| 899 | if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) { | 901 | if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) { |
| 900 | blkif->vbd.overflow_max_grants = 1; | 902 | blkif->vbd.overflow_max_grants = 1; |
| 901 | pr_debug(DRV_PFX " domain %u, device %#x is using maximum number of persistent grants\n", | 903 | pr_debug("domain %u, device %#x is using maximum number of persistent grants\n", |
| 902 | blkif->domid, blkif->vbd.handle); | 904 | blkif->domid, blkif->vbd.handle); |
| 903 | } | 905 | } |
| 904 | /* | 906 | /* |
| @@ -916,7 +918,7 @@ next: | |||
| 916 | return ret; | 918 | return ret; |
| 917 | 919 | ||
| 918 | out_of_memory: | 920 | out_of_memory: |
| 919 | pr_alert(DRV_PFX "%s: out of memory\n", __func__); | 921 | pr_alert("%s: out of memory\n", __func__); |
| 920 | put_free_pages(blkif, pages_to_gnt, segs_to_map); | 922 | put_free_pages(blkif, pages_to_gnt, segs_to_map); |
| 921 | return -ENOMEM; | 923 | return -ENOMEM; |
| 922 | } | 924 | } |
| @@ -996,7 +998,7 @@ static int dispatch_discard_io(struct xen_blkif *blkif, | |||
| 996 | 998 | ||
| 997 | err = xen_vbd_translate(&preq, blkif, WRITE); | 999 | err = xen_vbd_translate(&preq, blkif, WRITE); |
| 998 | if (err) { | 1000 | if (err) { |
| 999 | pr_warn(DRV_PFX "access denied: DISCARD [%llu->%llu] on dev=%04x\n", | 1001 | pr_warn("access denied: DISCARD [%llu->%llu] on dev=%04x\n", |
| 1000 | preq.sector_number, | 1002 | preq.sector_number, |
| 1001 | preq.sector_number + preq.nr_sects, blkif->vbd.pdevice); | 1003 | preq.sector_number + preq.nr_sects, blkif->vbd.pdevice); |
| 1002 | goto fail_response; | 1004 | goto fail_response; |
| @@ -1012,7 +1014,7 @@ static int dispatch_discard_io(struct xen_blkif *blkif, | |||
| 1012 | GFP_KERNEL, secure); | 1014 | GFP_KERNEL, secure); |
| 1013 | fail_response: | 1015 | fail_response: |
| 1014 | if (err == -EOPNOTSUPP) { | 1016 | if (err == -EOPNOTSUPP) { |
| 1015 | pr_debug(DRV_PFX "discard op failed, not supported\n"); | 1017 | pr_debug("discard op failed, not supported\n"); |
| 1016 | status = BLKIF_RSP_EOPNOTSUPP; | 1018 | status = BLKIF_RSP_EOPNOTSUPP; |
| 1017 | } else if (err) | 1019 | } else if (err) |
| 1018 | status = BLKIF_RSP_ERROR; | 1020 | status = BLKIF_RSP_ERROR; |
| @@ -1056,16 +1058,16 @@ static void __end_block_io_op(struct pending_req *pending_req, int error) | |||
| 1056 | /* An error fails the entire request. */ | 1058 | /* An error fails the entire request. */ |
| 1057 | if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) && | 1059 | if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) && |
| 1058 | (error == -EOPNOTSUPP)) { | 1060 | (error == -EOPNOTSUPP)) { |
| 1059 | pr_debug(DRV_PFX "flush diskcache op failed, not supported\n"); | 1061 | pr_debug("flush diskcache op failed, not supported\n"); |
| 1060 | xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0); | 1062 | xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0); |
| 1061 | pending_req->status = BLKIF_RSP_EOPNOTSUPP; | 1063 | pending_req->status = BLKIF_RSP_EOPNOTSUPP; |
| 1062 | } else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) && | 1064 | } else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) && |
| 1063 | (error == -EOPNOTSUPP)) { | 1065 | (error == -EOPNOTSUPP)) { |
| 1064 | pr_debug(DRV_PFX "write barrier op failed, not supported\n"); | 1066 | pr_debug("write barrier op failed, not supported\n"); |
| 1065 | xen_blkbk_barrier(XBT_NIL, pending_req->blkif->be, 0); | 1067 | xen_blkbk_barrier(XBT_NIL, pending_req->blkif->be, 0); |
| 1066 | pending_req->status = BLKIF_RSP_EOPNOTSUPP; | 1068 | pending_req->status = BLKIF_RSP_EOPNOTSUPP; |
| 1067 | } else if (error) { | 1069 | } else if (error) { |
| 1068 | pr_debug(DRV_PFX "Buffer not up-to-date at end of operation," | 1070 | pr_debug("Buffer not up-to-date at end of operation," |
| 1069 | " error=%d\n", error); | 1071 | " error=%d\n", error); |
| 1070 | pending_req->status = BLKIF_RSP_ERROR; | 1072 | pending_req->status = BLKIF_RSP_ERROR; |
| 1071 | } | 1073 | } |
| @@ -1110,7 +1112,7 @@ __do_block_io_op(struct xen_blkif *blkif) | |||
| 1110 | 1112 | ||
| 1111 | if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) { | 1113 | if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) { |
| 1112 | rc = blk_rings->common.rsp_prod_pvt; | 1114 | rc = blk_rings->common.rsp_prod_pvt; |
| 1113 | pr_warn(DRV_PFX "Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n", | 1115 | pr_warn("Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n", |
| 1114 | rp, rc, rp - rc, blkif->vbd.pdevice); | 1116 | rp, rc, rp - rc, blkif->vbd.pdevice); |
| 1115 | return -EACCES; | 1117 | return -EACCES; |
| 1116 | } | 1118 | } |
| @@ -1217,8 +1219,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, | |||
| 1217 | if ((req->operation == BLKIF_OP_INDIRECT) && | 1219 | if ((req->operation == BLKIF_OP_INDIRECT) && |
| 1218 | (req_operation != BLKIF_OP_READ) && | 1220 | (req_operation != BLKIF_OP_READ) && |
| 1219 | (req_operation != BLKIF_OP_WRITE)) { | 1221 | (req_operation != BLKIF_OP_WRITE)) { |
| 1220 | pr_debug(DRV_PFX "Invalid indirect operation (%u)\n", | 1222 | pr_debug("Invalid indirect operation (%u)\n", req_operation); |
| 1221 | req_operation); | ||
| 1222 | goto fail_response; | 1223 | goto fail_response; |
| 1223 | } | 1224 | } |
| 1224 | 1225 | ||
| @@ -1252,8 +1253,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, | |||
| 1252 | (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) || | 1253 | (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) || |
| 1253 | unlikely((req->operation == BLKIF_OP_INDIRECT) && | 1254 | unlikely((req->operation == BLKIF_OP_INDIRECT) && |
| 1254 | (nseg > MAX_INDIRECT_SEGMENTS))) { | 1255 | (nseg > MAX_INDIRECT_SEGMENTS))) { |
| 1255 | pr_debug(DRV_PFX "Bad number of segments in request (%d)\n", | 1256 | pr_debug("Bad number of segments in request (%d)\n", nseg); |
| 1256 | nseg); | ||
| 1257 | /* Haven't submitted any bio's yet. */ | 1257 | /* Haven't submitted any bio's yet. */ |
| 1258 | goto fail_response; | 1258 | goto fail_response; |
| 1259 | } | 1259 | } |
| @@ -1288,7 +1288,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, | |||
| 1288 | } | 1288 | } |
| 1289 | 1289 | ||
| 1290 | if (xen_vbd_translate(&preq, blkif, operation) != 0) { | 1290 | if (xen_vbd_translate(&preq, blkif, operation) != 0) { |
| 1291 | pr_debug(DRV_PFX "access denied: %s of [%llu,%llu] on dev=%04x\n", | 1291 | pr_debug("access denied: %s of [%llu,%llu] on dev=%04x\n", |
| 1292 | operation == READ ? "read" : "write", | 1292 | operation == READ ? "read" : "write", |
| 1293 | preq.sector_number, | 1293 | preq.sector_number, |
| 1294 | preq.sector_number + preq.nr_sects, | 1294 | preq.sector_number + preq.nr_sects, |
| @@ -1303,7 +1303,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, | |||
| 1303 | for (i = 0; i < nseg; i++) { | 1303 | for (i = 0; i < nseg; i++) { |
| 1304 | if (((int)preq.sector_number|(int)seg[i].nsec) & | 1304 | if (((int)preq.sector_number|(int)seg[i].nsec) & |
| 1305 | ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) { | 1305 | ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) { |
| 1306 | pr_debug(DRV_PFX "Misaligned I/O request from domain %d", | 1306 | pr_debug("Misaligned I/O request from domain %d\n", |
| 1307 | blkif->domid); | 1307 | blkif->domid); |
| 1308 | goto fail_response; | 1308 | goto fail_response; |
| 1309 | } | 1309 | } |
