aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2014-02-10 14:52:34 -0500
committerJens Axboe <axboe@fb.com>2014-02-10 14:52:34 -0500
commit9d4cb8e3a5b448d802155688bf2d50ac1b9e1a51 (patch)
tree95f3ee193b9f6a248001cee13e1f4ea23704ca45 /drivers/block
parent11c94444074f40b479a05f6657d935204e992f2e (diff)
parent3661371701e714f0cea4120f6a365340858fb4e4 (diff)
Merge branch 'stable/for-jens-3.14' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip into for-linus
Konrad writes: Please git pull the following branch: git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip.git stable/for-jens-3.14 which is based off v3.13-rc6. If you would like me to rebase it on a different branch/tag I would be more than happy to do so. The patches are all bug-fixes and hopefully can go in 3.14. They deal with xen-blkback shutdown and cause memory leaks as well as shutdown races. They should go to stable tree and if you are OK with I will ask them to backport those fixes. There is also a fix to xen-blkfront to deal with unexpected state transition. And lastly a fix to the header where it was using the __aligned__ unnecessarily.
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/xen-blkback/blkback.c63
-rw-r--r--drivers/block/xen-blkback/common.h4
-rw-r--r--drivers/block/xen-blkback/xenbus.c13
-rw-r--r--drivers/block/xen-blkfront.c11
4 files changed, 65 insertions, 26 deletions
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 4b97b86da926..765fc7348b66 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -375,7 +375,7 @@ static void purge_persistent_gnt(struct xen_blkif *blkif)
375 375
376 pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean); 376 pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean);
377 377
378 INIT_LIST_HEAD(&blkif->persistent_purge_list); 378 BUG_ON(!list_empty(&blkif->persistent_purge_list));
379 root = &blkif->persistent_gnts; 379 root = &blkif->persistent_gnts;
380purge_list: 380purge_list:
381 foreach_grant_safe(persistent_gnt, n, root, node) { 381 foreach_grant_safe(persistent_gnt, n, root, node) {
@@ -625,9 +625,23 @@ purge_gnt_list:
625 print_stats(blkif); 625 print_stats(blkif);
626 } 626 }
627 627
628 /* Since we are shutting down remove all pages from the buffer */ 628 /* Drain pending purge work */
629 shrink_free_pagepool(blkif, 0 /* All */); 629 flush_work(&blkif->persistent_purge_work);
630 630
631 if (log_stats)
632 print_stats(blkif);
633
634 blkif->xenblkd = NULL;
635 xen_blkif_put(blkif);
636
637 return 0;
638}
639
640/*
641 * Remove persistent grants and empty the pool of free pages
642 */
643void xen_blkbk_free_caches(struct xen_blkif *blkif)
644{
631 /* Free all persistent grant pages */ 645 /* Free all persistent grant pages */
632 if (!RB_EMPTY_ROOT(&blkif->persistent_gnts)) 646 if (!RB_EMPTY_ROOT(&blkif->persistent_gnts))
633 free_persistent_gnts(blkif, &blkif->persistent_gnts, 647 free_persistent_gnts(blkif, &blkif->persistent_gnts,
@@ -636,13 +650,8 @@ purge_gnt_list:
636 BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts)); 650 BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
637 blkif->persistent_gnt_c = 0; 651 blkif->persistent_gnt_c = 0;
638 652
639 if (log_stats) 653 /* Since we are shutting down remove all pages from the buffer */
640 print_stats(blkif); 654 shrink_free_pagepool(blkif, 0 /* All */);
641
642 blkif->xenblkd = NULL;
643 xen_blkif_put(blkif);
644
645 return 0;
646} 655}
647 656
648/* 657/*
@@ -838,7 +847,7 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req,
838 struct grant_page **pages = pending_req->indirect_pages; 847 struct grant_page **pages = pending_req->indirect_pages;
839 struct xen_blkif *blkif = pending_req->blkif; 848 struct xen_blkif *blkif = pending_req->blkif;
840 int indirect_grefs, rc, n, nseg, i; 849 int indirect_grefs, rc, n, nseg, i;
841 struct blkif_request_segment_aligned *segments = NULL; 850 struct blkif_request_segment *segments = NULL;
842 851
843 nseg = pending_req->nr_pages; 852 nseg = pending_req->nr_pages;
844 indirect_grefs = INDIRECT_PAGES(nseg); 853 indirect_grefs = INDIRECT_PAGES(nseg);
@@ -934,9 +943,7 @@ static void xen_blk_drain_io(struct xen_blkif *blkif)
934{ 943{
935 atomic_set(&blkif->drain, 1); 944 atomic_set(&blkif->drain, 1);
936 do { 945 do {
937 /* The initial value is one, and one refcnt taken at the 946 if (atomic_read(&blkif->inflight) == 0)
938 * start of the xen_blkif_schedule thread. */
939 if (atomic_read(&blkif->refcnt) <= 2)
940 break; 947 break;
941 wait_for_completion_interruptible_timeout( 948 wait_for_completion_interruptible_timeout(
942 &blkif->drain_complete, HZ); 949 &blkif->drain_complete, HZ);
@@ -976,17 +983,30 @@ static void __end_block_io_op(struct pending_req *pending_req, int error)
976 * the proper response on the ring. 983 * the proper response on the ring.
977 */ 984 */
978 if (atomic_dec_and_test(&pending_req->pendcnt)) { 985 if (atomic_dec_and_test(&pending_req->pendcnt)) {
979 xen_blkbk_unmap(pending_req->blkif, 986 struct xen_blkif *blkif = pending_req->blkif;
987
988 xen_blkbk_unmap(blkif,
980 pending_req->segments, 989 pending_req->segments,
981 pending_req->nr_pages); 990 pending_req->nr_pages);
982 make_response(pending_req->blkif, pending_req->id, 991 make_response(blkif, pending_req->id,
983 pending_req->operation, pending_req->status); 992 pending_req->operation, pending_req->status);
984 xen_blkif_put(pending_req->blkif); 993 free_req(blkif, pending_req);
985 if (atomic_read(&pending_req->blkif->refcnt) <= 2) { 994 /*
986 if (atomic_read(&pending_req->blkif->drain)) 995 * Make sure the request is freed before releasing blkif,
987 complete(&pending_req->blkif->drain_complete); 996 * or there could be a race between free_req and the
997 * cleanup done in xen_blkif_free during shutdown.
998 *
999 * NB: The fact that we might try to wake up pending_free_wq
1000 * before drain_complete (in case there's a drain going on)
1001 * it's not a problem with our current implementation
1002 * because we can assure there's no thread waiting on
1003 * pending_free_wq if there's a drain going on, but it has
1004 * to be taken into account if the current model is changed.
1005 */
1006 if (atomic_dec_and_test(&blkif->inflight) && atomic_read(&blkif->drain)) {
1007 complete(&blkif->drain_complete);
988 } 1008 }
989 free_req(pending_req->blkif, pending_req); 1009 xen_blkif_put(blkif);
990 } 1010 }
991} 1011}
992 1012
@@ -1240,6 +1260,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
1240 * below (in "!bio") if we are handling a BLKIF_OP_DISCARD. 1260 * below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
1241 */ 1261 */
1242 xen_blkif_get(blkif); 1262 xen_blkif_get(blkif);
1263 atomic_inc(&blkif->inflight);
1243 1264
1244 for (i = 0; i < nseg; i++) { 1265 for (i = 0; i < nseg; i++) {
1245 while ((bio == NULL) || 1266 while ((bio == NULL) ||
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 8d8807563d99..9eb34e24b4fe 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -57,7 +57,7 @@
57#define MAX_INDIRECT_SEGMENTS 256 57#define MAX_INDIRECT_SEGMENTS 256
58 58
59#define SEGS_PER_INDIRECT_FRAME \ 59#define SEGS_PER_INDIRECT_FRAME \
60 (PAGE_SIZE/sizeof(struct blkif_request_segment_aligned)) 60 (PAGE_SIZE/sizeof(struct blkif_request_segment))
61#define MAX_INDIRECT_PAGES \ 61#define MAX_INDIRECT_PAGES \
62 ((MAX_INDIRECT_SEGMENTS + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME) 62 ((MAX_INDIRECT_SEGMENTS + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
63#define INDIRECT_PAGES(_segs) \ 63#define INDIRECT_PAGES(_segs) \
@@ -278,6 +278,7 @@ struct xen_blkif {
278 /* for barrier (drain) requests */ 278 /* for barrier (drain) requests */
279 struct completion drain_complete; 279 struct completion drain_complete;
280 atomic_t drain; 280 atomic_t drain;
281 atomic_t inflight;
281 /* One thread per one blkif. */ 282 /* One thread per one blkif. */
282 struct task_struct *xenblkd; 283 struct task_struct *xenblkd;
283 unsigned int waiting_reqs; 284 unsigned int waiting_reqs;
@@ -376,6 +377,7 @@ int xen_blkif_xenbus_init(void);
376irqreturn_t xen_blkif_be_int(int irq, void *dev_id); 377irqreturn_t xen_blkif_be_int(int irq, void *dev_id);
377int xen_blkif_schedule(void *arg); 378int xen_blkif_schedule(void *arg);
378int xen_blkif_purge_persistent(void *arg); 379int xen_blkif_purge_persistent(void *arg);
380void xen_blkbk_free_caches(struct xen_blkif *blkif);
379 381
380int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt, 382int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
381 struct backend_info *be, int state); 383 struct backend_info *be, int state);
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index c2014a0aa206..84973c6a856a 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -125,8 +125,10 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid)
125 blkif->persistent_gnts.rb_node = NULL; 125 blkif->persistent_gnts.rb_node = NULL;
126 spin_lock_init(&blkif->free_pages_lock); 126 spin_lock_init(&blkif->free_pages_lock);
127 INIT_LIST_HEAD(&blkif->free_pages); 127 INIT_LIST_HEAD(&blkif->free_pages);
128 INIT_LIST_HEAD(&blkif->persistent_purge_list);
128 blkif->free_pages_num = 0; 129 blkif->free_pages_num = 0;
129 atomic_set(&blkif->persistent_gnt_in_use, 0); 130 atomic_set(&blkif->persistent_gnt_in_use, 0);
131 atomic_set(&blkif->inflight, 0);
130 132
131 INIT_LIST_HEAD(&blkif->pending_free); 133 INIT_LIST_HEAD(&blkif->pending_free);
132 134
@@ -259,6 +261,17 @@ static void xen_blkif_free(struct xen_blkif *blkif)
259 if (!atomic_dec_and_test(&blkif->refcnt)) 261 if (!atomic_dec_and_test(&blkif->refcnt))
260 BUG(); 262 BUG();
261 263
264 /* Remove all persistent grants and the cache of ballooned pages. */
265 xen_blkbk_free_caches(blkif);
266
267 /* Make sure everything is drained before shutting down */
268 BUG_ON(blkif->persistent_gnt_c != 0);
269 BUG_ON(atomic_read(&blkif->persistent_gnt_in_use) != 0);
270 BUG_ON(blkif->free_pages_num != 0);
271 BUG_ON(!list_empty(&blkif->persistent_purge_list));
272 BUG_ON(!list_empty(&blkif->free_pages));
273 BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
274
262 /* Check that there is no request in use */ 275 /* Check that there is no request in use */
263 list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) { 276 list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) {
264 list_del(&req->free_list); 277 list_del(&req->free_list);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 8dcfb54f1603..efe1b4761735 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -162,7 +162,7 @@ static DEFINE_SPINLOCK(minor_lock);
162#define DEV_NAME "xvd" /* name in /dev */ 162#define DEV_NAME "xvd" /* name in /dev */
163 163
164#define SEGS_PER_INDIRECT_FRAME \ 164#define SEGS_PER_INDIRECT_FRAME \
165 (PAGE_SIZE/sizeof(struct blkif_request_segment_aligned)) 165 (PAGE_SIZE/sizeof(struct blkif_request_segment))
166#define INDIRECT_GREFS(_segs) \ 166#define INDIRECT_GREFS(_segs) \
167 ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME) 167 ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
168 168
@@ -393,7 +393,7 @@ static int blkif_queue_request(struct request *req)
393 unsigned long id; 393 unsigned long id;
394 unsigned int fsect, lsect; 394 unsigned int fsect, lsect;
395 int i, ref, n; 395 int i, ref, n;
396 struct blkif_request_segment_aligned *segments = NULL; 396 struct blkif_request_segment *segments = NULL;
397 397
398 /* 398 /*
399 * Used to store if we are able to queue the request by just using 399 * Used to store if we are able to queue the request by just using
@@ -550,7 +550,7 @@ static int blkif_queue_request(struct request *req)
550 } else { 550 } else {
551 n = i % SEGS_PER_INDIRECT_FRAME; 551 n = i % SEGS_PER_INDIRECT_FRAME;
552 segments[n] = 552 segments[n] =
553 (struct blkif_request_segment_aligned) { 553 (struct blkif_request_segment) {
554 .gref = ref, 554 .gref = ref,
555 .first_sect = fsect, 555 .first_sect = fsect,
556 .last_sect = lsect }; 556 .last_sect = lsect };
@@ -1904,13 +1904,16 @@ static void blkback_changed(struct xenbus_device *dev,
1904 case XenbusStateReconfiguring: 1904 case XenbusStateReconfiguring:
1905 case XenbusStateReconfigured: 1905 case XenbusStateReconfigured:
1906 case XenbusStateUnknown: 1906 case XenbusStateUnknown:
1907 case XenbusStateClosed:
1908 break; 1907 break;
1909 1908
1910 case XenbusStateConnected: 1909 case XenbusStateConnected:
1911 blkfront_connect(info); 1910 blkfront_connect(info);
1912 break; 1911 break;
1913 1912
1913 case XenbusStateClosed:
1914 if (dev->state == XenbusStateClosed)
1915 break;
1916 /* Missed the backend's Closing state -- fallthrough */
1914 case XenbusStateClosing: 1917 case XenbusStateClosing:
1915 blkfront_closing(info); 1918 blkfront_closing(info);
1916 break; 1919 break;