aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/xen-blkback/blkback.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block/xen-blkback/blkback.c')
-rw-r--r--drivers/block/xen-blkback/blkback.c81
1 files changed, 52 insertions, 29 deletions
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index da18046d0e07..64c60edcdfbc 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -285,7 +285,8 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
285 285
286 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST || 286 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
287 !rb_next(&persistent_gnt->node)) { 287 !rb_next(&persistent_gnt->node)) {
288 ret = gnttab_unmap_refs(unmap, pages, segs_to_unmap); 288 ret = gnttab_unmap_refs(unmap, NULL, pages,
289 segs_to_unmap);
289 BUG_ON(ret); 290 BUG_ON(ret);
290 put_free_pages(blkif, pages, segs_to_unmap); 291 put_free_pages(blkif, pages, segs_to_unmap);
291 segs_to_unmap = 0; 292 segs_to_unmap = 0;
@@ -298,7 +299,7 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
298 BUG_ON(num != 0); 299 BUG_ON(num != 0);
299} 300}
300 301
301static void unmap_purged_grants(struct work_struct *work) 302void xen_blkbk_unmap_purged_grants(struct work_struct *work)
302{ 303{
303 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 304 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
304 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 305 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
@@ -320,7 +321,8 @@ static void unmap_purged_grants(struct work_struct *work)
320 pages[segs_to_unmap] = persistent_gnt->page; 321 pages[segs_to_unmap] = persistent_gnt->page;
321 322
322 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) { 323 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
323 ret = gnttab_unmap_refs(unmap, pages, segs_to_unmap); 324 ret = gnttab_unmap_refs(unmap, NULL, pages,
325 segs_to_unmap);
324 BUG_ON(ret); 326 BUG_ON(ret);
325 put_free_pages(blkif, pages, segs_to_unmap); 327 put_free_pages(blkif, pages, segs_to_unmap);
326 segs_to_unmap = 0; 328 segs_to_unmap = 0;
@@ -328,7 +330,7 @@ static void unmap_purged_grants(struct work_struct *work)
328 kfree(persistent_gnt); 330 kfree(persistent_gnt);
329 } 331 }
330 if (segs_to_unmap > 0) { 332 if (segs_to_unmap > 0) {
331 ret = gnttab_unmap_refs(unmap, pages, segs_to_unmap); 333 ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap);
332 BUG_ON(ret); 334 BUG_ON(ret);
333 put_free_pages(blkif, pages, segs_to_unmap); 335 put_free_pages(blkif, pages, segs_to_unmap);
334 } 336 }
@@ -373,7 +375,7 @@ static void purge_persistent_gnt(struct xen_blkif *blkif)
373 375
374 pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean); 376 pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean);
375 377
376 INIT_LIST_HEAD(&blkif->persistent_purge_list); 378 BUG_ON(!list_empty(&blkif->persistent_purge_list));
377 root = &blkif->persistent_gnts; 379 root = &blkif->persistent_gnts;
378purge_list: 380purge_list:
379 foreach_grant_safe(persistent_gnt, n, root, node) { 381 foreach_grant_safe(persistent_gnt, n, root, node) {
@@ -418,7 +420,6 @@ finished:
418 blkif->vbd.overflow_max_grants = 0; 420 blkif->vbd.overflow_max_grants = 0;
419 421
420 /* We can defer this work */ 422 /* We can defer this work */
421 INIT_WORK(&blkif->persistent_purge_work, unmap_purged_grants);
422 schedule_work(&blkif->persistent_purge_work); 423 schedule_work(&blkif->persistent_purge_work);
423 pr_debug(DRV_PFX "Purged %u/%u\n", (total - num_clean), total); 424 pr_debug(DRV_PFX "Purged %u/%u\n", (total - num_clean), total);
424 return; 425 return;
@@ -623,9 +624,23 @@ purge_gnt_list:
623 print_stats(blkif); 624 print_stats(blkif);
624 } 625 }
625 626
626 /* Since we are shutting down remove all pages from the buffer */ 627 /* Drain pending purge work */
627 shrink_free_pagepool(blkif, 0 /* All */); 628 flush_work(&blkif->persistent_purge_work);
628 629
630 if (log_stats)
631 print_stats(blkif);
632
633 blkif->xenblkd = NULL;
634 xen_blkif_put(blkif);
635
636 return 0;
637}
638
639/*
640 * Remove persistent grants and empty the pool of free pages
641 */
642void xen_blkbk_free_caches(struct xen_blkif *blkif)
643{
629 /* Free all persistent grant pages */ 644 /* Free all persistent grant pages */
630 if (!RB_EMPTY_ROOT(&blkif->persistent_gnts)) 645 if (!RB_EMPTY_ROOT(&blkif->persistent_gnts))
631 free_persistent_gnts(blkif, &blkif->persistent_gnts, 646 free_persistent_gnts(blkif, &blkif->persistent_gnts,
@@ -634,13 +649,8 @@ purge_gnt_list:
634 BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts)); 649 BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
635 blkif->persistent_gnt_c = 0; 650 blkif->persistent_gnt_c = 0;
636 651
637 if (log_stats) 652 /* Since we are shutting down remove all pages from the buffer */
638 print_stats(blkif); 653 shrink_free_pagepool(blkif, 0 /* All */);
639
640 blkif->xenblkd = NULL;
641 xen_blkif_put(blkif);
642
643 return 0;
644} 654}
645 655
646/* 656/*
@@ -668,14 +678,15 @@ static void xen_blkbk_unmap(struct xen_blkif *blkif,
668 GNTMAP_host_map, pages[i]->handle); 678 GNTMAP_host_map, pages[i]->handle);
669 pages[i]->handle = BLKBACK_INVALID_HANDLE; 679 pages[i]->handle = BLKBACK_INVALID_HANDLE;
670 if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) { 680 if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
671 ret = gnttab_unmap_refs(unmap, unmap_pages, invcount); 681 ret = gnttab_unmap_refs(unmap, NULL, unmap_pages,
682 invcount);
672 BUG_ON(ret); 683 BUG_ON(ret);
673 put_free_pages(blkif, unmap_pages, invcount); 684 put_free_pages(blkif, unmap_pages, invcount);
674 invcount = 0; 685 invcount = 0;
675 } 686 }
676 } 687 }
677 if (invcount) { 688 if (invcount) {
678 ret = gnttab_unmap_refs(unmap, unmap_pages, invcount); 689 ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
679 BUG_ON(ret); 690 BUG_ON(ret);
680 put_free_pages(blkif, unmap_pages, invcount); 691 put_free_pages(blkif, unmap_pages, invcount);
681 } 692 }
@@ -737,7 +748,7 @@ again:
737 } 748 }
738 749
739 if (segs_to_map) { 750 if (segs_to_map) {
740 ret = gnttab_map_refs(map, pages_to_gnt, segs_to_map); 751 ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
741 BUG_ON(ret); 752 BUG_ON(ret);
742 } 753 }
743 754
@@ -835,7 +846,7 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req,
835 struct grant_page **pages = pending_req->indirect_pages; 846 struct grant_page **pages = pending_req->indirect_pages;
836 struct xen_blkif *blkif = pending_req->blkif; 847 struct xen_blkif *blkif = pending_req->blkif;
837 int indirect_grefs, rc, n, nseg, i; 848 int indirect_grefs, rc, n, nseg, i;
838 struct blkif_request_segment_aligned *segments = NULL; 849 struct blkif_request_segment *segments = NULL;
839 850
840 nseg = pending_req->nr_pages; 851 nseg = pending_req->nr_pages;
841 indirect_grefs = INDIRECT_PAGES(nseg); 852 indirect_grefs = INDIRECT_PAGES(nseg);
@@ -931,9 +942,7 @@ static void xen_blk_drain_io(struct xen_blkif *blkif)
931{ 942{
932 atomic_set(&blkif->drain, 1); 943 atomic_set(&blkif->drain, 1);
933 do { 944 do {
934 /* The initial value is one, and one refcnt taken at the 945 if (atomic_read(&blkif->inflight) == 0)
935 * start of the xen_blkif_schedule thread. */
936 if (atomic_read(&blkif->refcnt) <= 2)
937 break; 946 break;
938 wait_for_completion_interruptible_timeout( 947 wait_for_completion_interruptible_timeout(
939 &blkif->drain_complete, HZ); 948 &blkif->drain_complete, HZ);
@@ -973,17 +982,30 @@ static void __end_block_io_op(struct pending_req *pending_req, int error)
973 * the proper response on the ring. 982 * the proper response on the ring.
974 */ 983 */
975 if (atomic_dec_and_test(&pending_req->pendcnt)) { 984 if (atomic_dec_and_test(&pending_req->pendcnt)) {
976 xen_blkbk_unmap(pending_req->blkif, 985 struct xen_blkif *blkif = pending_req->blkif;
986
987 xen_blkbk_unmap(blkif,
977 pending_req->segments, 988 pending_req->segments,
978 pending_req->nr_pages); 989 pending_req->nr_pages);
979 make_response(pending_req->blkif, pending_req->id, 990 make_response(blkif, pending_req->id,
980 pending_req->operation, pending_req->status); 991 pending_req->operation, pending_req->status);
981 xen_blkif_put(pending_req->blkif); 992 free_req(blkif, pending_req);
982 if (atomic_read(&pending_req->blkif->refcnt) <= 2) { 993 /*
983 if (atomic_read(&pending_req->blkif->drain)) 994 * Make sure the request is freed before releasing blkif,
984 complete(&pending_req->blkif->drain_complete); 995 * or there could be a race between free_req and the
996 * cleanup done in xen_blkif_free during shutdown.
997 *
998 * NB: The fact that we might try to wake up pending_free_wq
999 * before drain_complete (in case there's a drain going on)
1000 * it's not a problem with our current implementation
1001 * because we can assure there's no thread waiting on
1002 * pending_free_wq if there's a drain going on, but it has
1003 * to be taken into account if the current model is changed.
1004 */
1005 if (atomic_dec_and_test(&blkif->inflight) && atomic_read(&blkif->drain)) {
1006 complete(&blkif->drain_complete);
985 } 1007 }
986 free_req(pending_req->blkif, pending_req); 1008 xen_blkif_put(blkif);
987 } 1009 }
988} 1010}
989 1011
@@ -1237,6 +1259,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
1237 * below (in "!bio") if we are handling a BLKIF_OP_DISCARD. 1259 * below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
1238 */ 1260 */
1239 xen_blkif_get(blkif); 1261 xen_blkif_get(blkif);
1262 atomic_inc(&blkif->inflight);
1240 1263
1241 for (i = 0; i < nseg; i++) { 1264 for (i = 0; i < nseg; i++) {
1242 while ((bio == NULL) || 1265 while ((bio == NULL) ||