aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/block/xen-blkback/blkback.c63
-rw-r--r--drivers/block/xen-blkback/common.h4
-rw-r--r--drivers/block/xen-blkback/xenbus.c13
-rw-r--r--drivers/block/xen-blkfront.c11
-rw-r--r--include/xen/interface/io/blkif.h34
5 files changed, 79 insertions, 46 deletions
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 4b97b86da926..765fc7348b66 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -375,7 +375,7 @@ static void purge_persistent_gnt(struct xen_blkif *blkif)
375 375
376 pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean); 376 pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean);
377 377
378 INIT_LIST_HEAD(&blkif->persistent_purge_list); 378 BUG_ON(!list_empty(&blkif->persistent_purge_list));
379 root = &blkif->persistent_gnts; 379 root = &blkif->persistent_gnts;
380purge_list: 380purge_list:
381 foreach_grant_safe(persistent_gnt, n, root, node) { 381 foreach_grant_safe(persistent_gnt, n, root, node) {
@@ -625,9 +625,23 @@ purge_gnt_list:
625 print_stats(blkif); 625 print_stats(blkif);
626 } 626 }
627 627
628 /* Since we are shutting down remove all pages from the buffer */ 628 /* Drain pending purge work */
629 shrink_free_pagepool(blkif, 0 /* All */); 629 flush_work(&blkif->persistent_purge_work);
630 630
631 if (log_stats)
632 print_stats(blkif);
633
634 blkif->xenblkd = NULL;
635 xen_blkif_put(blkif);
636
637 return 0;
638}
639
640/*
641 * Remove persistent grants and empty the pool of free pages
642 */
643void xen_blkbk_free_caches(struct xen_blkif *blkif)
644{
631 /* Free all persistent grant pages */ 645 /* Free all persistent grant pages */
632 if (!RB_EMPTY_ROOT(&blkif->persistent_gnts)) 646 if (!RB_EMPTY_ROOT(&blkif->persistent_gnts))
633 free_persistent_gnts(blkif, &blkif->persistent_gnts, 647 free_persistent_gnts(blkif, &blkif->persistent_gnts,
@@ -636,13 +650,8 @@ purge_gnt_list:
636 BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts)); 650 BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
637 blkif->persistent_gnt_c = 0; 651 blkif->persistent_gnt_c = 0;
638 652
639 if (log_stats) 653 /* Since we are shutting down remove all pages from the buffer */
640 print_stats(blkif); 654 shrink_free_pagepool(blkif, 0 /* All */);
641
642 blkif->xenblkd = NULL;
643 xen_blkif_put(blkif);
644
645 return 0;
646} 655}
647 656
648/* 657/*
@@ -838,7 +847,7 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req,
838 struct grant_page **pages = pending_req->indirect_pages; 847 struct grant_page **pages = pending_req->indirect_pages;
839 struct xen_blkif *blkif = pending_req->blkif; 848 struct xen_blkif *blkif = pending_req->blkif;
840 int indirect_grefs, rc, n, nseg, i; 849 int indirect_grefs, rc, n, nseg, i;
841 struct blkif_request_segment_aligned *segments = NULL; 850 struct blkif_request_segment *segments = NULL;
842 851
843 nseg = pending_req->nr_pages; 852 nseg = pending_req->nr_pages;
844 indirect_grefs = INDIRECT_PAGES(nseg); 853 indirect_grefs = INDIRECT_PAGES(nseg);
@@ -934,9 +943,7 @@ static void xen_blk_drain_io(struct xen_blkif *blkif)
934{ 943{
935 atomic_set(&blkif->drain, 1); 944 atomic_set(&blkif->drain, 1);
936 do { 945 do {
937 /* The initial value is one, and one refcnt taken at the 946 if (atomic_read(&blkif->inflight) == 0)
938 * start of the xen_blkif_schedule thread. */
939 if (atomic_read(&blkif->refcnt) <= 2)
940 break; 947 break;
941 wait_for_completion_interruptible_timeout( 948 wait_for_completion_interruptible_timeout(
942 &blkif->drain_complete, HZ); 949 &blkif->drain_complete, HZ);
@@ -976,17 +983,30 @@ static void __end_block_io_op(struct pending_req *pending_req, int error)
976 * the proper response on the ring. 983 * the proper response on the ring.
977 */ 984 */
978 if (atomic_dec_and_test(&pending_req->pendcnt)) { 985 if (atomic_dec_and_test(&pending_req->pendcnt)) {
979 xen_blkbk_unmap(pending_req->blkif, 986 struct xen_blkif *blkif = pending_req->blkif;
987
988 xen_blkbk_unmap(blkif,
980 pending_req->segments, 989 pending_req->segments,
981 pending_req->nr_pages); 990 pending_req->nr_pages);
982 make_response(pending_req->blkif, pending_req->id, 991 make_response(blkif, pending_req->id,
983 pending_req->operation, pending_req->status); 992 pending_req->operation, pending_req->status);
984 xen_blkif_put(pending_req->blkif); 993 free_req(blkif, pending_req);
985 if (atomic_read(&pending_req->blkif->refcnt) <= 2) { 994 /*
986 if (atomic_read(&pending_req->blkif->drain)) 995 * Make sure the request is freed before releasing blkif,
987 complete(&pending_req->blkif->drain_complete); 996 * or there could be a race between free_req and the
997 * cleanup done in xen_blkif_free during shutdown.
998 *
999 * NB: The fact that we might try to wake up pending_free_wq
1000 * before drain_complete (in case there's a drain going on)
1001 * it's not a problem with our current implementation
1002 * because we can assure there's no thread waiting on
1003 * pending_free_wq if there's a drain going on, but it has
1004 * to be taken into account if the current model is changed.
1005 */
1006 if (atomic_dec_and_test(&blkif->inflight) && atomic_read(&blkif->drain)) {
1007 complete(&blkif->drain_complete);
988 } 1008 }
989 free_req(pending_req->blkif, pending_req); 1009 xen_blkif_put(blkif);
990 } 1010 }
991} 1011}
992 1012
@@ -1240,6 +1260,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
1240 * below (in "!bio") if we are handling a BLKIF_OP_DISCARD. 1260 * below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
1241 */ 1261 */
1242 xen_blkif_get(blkif); 1262 xen_blkif_get(blkif);
1263 atomic_inc(&blkif->inflight);
1243 1264
1244 for (i = 0; i < nseg; i++) { 1265 for (i = 0; i < nseg; i++) {
1245 while ((bio == NULL) || 1266 while ((bio == NULL) ||
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 8d8807563d99..9eb34e24b4fe 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -57,7 +57,7 @@
57#define MAX_INDIRECT_SEGMENTS 256 57#define MAX_INDIRECT_SEGMENTS 256
58 58
59#define SEGS_PER_INDIRECT_FRAME \ 59#define SEGS_PER_INDIRECT_FRAME \
60 (PAGE_SIZE/sizeof(struct blkif_request_segment_aligned)) 60 (PAGE_SIZE/sizeof(struct blkif_request_segment))
61#define MAX_INDIRECT_PAGES \ 61#define MAX_INDIRECT_PAGES \
62 ((MAX_INDIRECT_SEGMENTS + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME) 62 ((MAX_INDIRECT_SEGMENTS + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
63#define INDIRECT_PAGES(_segs) \ 63#define INDIRECT_PAGES(_segs) \
@@ -278,6 +278,7 @@ struct xen_blkif {
278 /* for barrier (drain) requests */ 278 /* for barrier (drain) requests */
279 struct completion drain_complete; 279 struct completion drain_complete;
280 atomic_t drain; 280 atomic_t drain;
281 atomic_t inflight;
281 /* One thread per one blkif. */ 282 /* One thread per one blkif. */
282 struct task_struct *xenblkd; 283 struct task_struct *xenblkd;
283 unsigned int waiting_reqs; 284 unsigned int waiting_reqs;
@@ -376,6 +377,7 @@ int xen_blkif_xenbus_init(void);
376irqreturn_t xen_blkif_be_int(int irq, void *dev_id); 377irqreturn_t xen_blkif_be_int(int irq, void *dev_id);
377int xen_blkif_schedule(void *arg); 378int xen_blkif_schedule(void *arg);
378int xen_blkif_purge_persistent(void *arg); 379int xen_blkif_purge_persistent(void *arg);
380void xen_blkbk_free_caches(struct xen_blkif *blkif);
379 381
380int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt, 382int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
381 struct backend_info *be, int state); 383 struct backend_info *be, int state);
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index c2014a0aa206..84973c6a856a 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -125,8 +125,10 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid)
125 blkif->persistent_gnts.rb_node = NULL; 125 blkif->persistent_gnts.rb_node = NULL;
126 spin_lock_init(&blkif->free_pages_lock); 126 spin_lock_init(&blkif->free_pages_lock);
127 INIT_LIST_HEAD(&blkif->free_pages); 127 INIT_LIST_HEAD(&blkif->free_pages);
128 INIT_LIST_HEAD(&blkif->persistent_purge_list);
128 blkif->free_pages_num = 0; 129 blkif->free_pages_num = 0;
129 atomic_set(&blkif->persistent_gnt_in_use, 0); 130 atomic_set(&blkif->persistent_gnt_in_use, 0);
131 atomic_set(&blkif->inflight, 0);
130 132
131 INIT_LIST_HEAD(&blkif->pending_free); 133 INIT_LIST_HEAD(&blkif->pending_free);
132 134
@@ -259,6 +261,17 @@ static void xen_blkif_free(struct xen_blkif *blkif)
259 if (!atomic_dec_and_test(&blkif->refcnt)) 261 if (!atomic_dec_and_test(&blkif->refcnt))
260 BUG(); 262 BUG();
261 263
264 /* Remove all persistent grants and the cache of ballooned pages. */
265 xen_blkbk_free_caches(blkif);
266
267 /* Make sure everything is drained before shutting down */
268 BUG_ON(blkif->persistent_gnt_c != 0);
269 BUG_ON(atomic_read(&blkif->persistent_gnt_in_use) != 0);
270 BUG_ON(blkif->free_pages_num != 0);
271 BUG_ON(!list_empty(&blkif->persistent_purge_list));
272 BUG_ON(!list_empty(&blkif->free_pages));
273 BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
274
262 /* Check that there is no request in use */ 275 /* Check that there is no request in use */
263 list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) { 276 list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) {
264 list_del(&req->free_list); 277 list_del(&req->free_list);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 8dcfb54f1603..efe1b4761735 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -162,7 +162,7 @@ static DEFINE_SPINLOCK(minor_lock);
162#define DEV_NAME "xvd" /* name in /dev */ 162#define DEV_NAME "xvd" /* name in /dev */
163 163
164#define SEGS_PER_INDIRECT_FRAME \ 164#define SEGS_PER_INDIRECT_FRAME \
165 (PAGE_SIZE/sizeof(struct blkif_request_segment_aligned)) 165 (PAGE_SIZE/sizeof(struct blkif_request_segment))
166#define INDIRECT_GREFS(_segs) \ 166#define INDIRECT_GREFS(_segs) \
167 ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME) 167 ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
168 168
@@ -393,7 +393,7 @@ static int blkif_queue_request(struct request *req)
393 unsigned long id; 393 unsigned long id;
394 unsigned int fsect, lsect; 394 unsigned int fsect, lsect;
395 int i, ref, n; 395 int i, ref, n;
396 struct blkif_request_segment_aligned *segments = NULL; 396 struct blkif_request_segment *segments = NULL;
397 397
398 /* 398 /*
399 * Used to store if we are able to queue the request by just using 399 * Used to store if we are able to queue the request by just using
@@ -550,7 +550,7 @@ static int blkif_queue_request(struct request *req)
550 } else { 550 } else {
551 n = i % SEGS_PER_INDIRECT_FRAME; 551 n = i % SEGS_PER_INDIRECT_FRAME;
552 segments[n] = 552 segments[n] =
553 (struct blkif_request_segment_aligned) { 553 (struct blkif_request_segment) {
554 .gref = ref, 554 .gref = ref,
555 .first_sect = fsect, 555 .first_sect = fsect,
556 .last_sect = lsect }; 556 .last_sect = lsect };
@@ -1904,13 +1904,16 @@ static void blkback_changed(struct xenbus_device *dev,
1904 case XenbusStateReconfiguring: 1904 case XenbusStateReconfiguring:
1905 case XenbusStateReconfigured: 1905 case XenbusStateReconfigured:
1906 case XenbusStateUnknown: 1906 case XenbusStateUnknown:
1907 case XenbusStateClosed:
1908 break; 1907 break;
1909 1908
1910 case XenbusStateConnected: 1909 case XenbusStateConnected:
1911 blkfront_connect(info); 1910 blkfront_connect(info);
1912 break; 1911 break;
1913 1912
1913 case XenbusStateClosed:
1914 if (dev->state == XenbusStateClosed)
1915 break;
1916 /* Missed the backend's Closing state -- fallthrough */
1914 case XenbusStateClosing: 1917 case XenbusStateClosing:
1915 blkfront_closing(info); 1918 blkfront_closing(info);
1916 break; 1919 break;
diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h
index ae665ac59c36..32ec05a6572f 100644
--- a/include/xen/interface/io/blkif.h
+++ b/include/xen/interface/io/blkif.h
@@ -113,13 +113,13 @@ typedef uint64_t blkif_sector_t;
113 * it's less than the number provided by the backend. The indirect_grefs field 113 * it's less than the number provided by the backend. The indirect_grefs field
114 * in blkif_request_indirect should be filled by the frontend with the 114 * in blkif_request_indirect should be filled by the frontend with the
115 * grant references of the pages that are holding the indirect segments. 115 * grant references of the pages that are holding the indirect segments.
116 * This pages are filled with an array of blkif_request_segment_aligned 116 * These pages are filled with an array of blkif_request_segment that hold the
117 * that hold the information about the segments. The number of indirect 117 * information about the segments. The number of indirect pages to use is
118 * pages to use is determined by the maximum number of segments 118 * determined by the number of segments an indirect request contains. Every
119 * a indirect request contains. Every indirect page can contain a maximum 119 * indirect page can contain a maximum of
120 * of 512 segments (PAGE_SIZE/sizeof(blkif_request_segment_aligned)), 120 * (PAGE_SIZE / sizeof(struct blkif_request_segment)) segments, so to
121 * so to calculate the number of indirect pages to use we have to do 121 * calculate the number of indirect pages to use we have to do
122 * ceil(indirect_segments/512). 122 * ceil(indirect_segments / (PAGE_SIZE / sizeof(struct blkif_request_segment))).
123 * 123 *
124 * If a backend does not recognize BLKIF_OP_INDIRECT, it should *not* 124 * If a backend does not recognize BLKIF_OP_INDIRECT, it should *not*
125 * create the "feature-max-indirect-segments" node! 125 * create the "feature-max-indirect-segments" node!
@@ -135,13 +135,12 @@ typedef uint64_t blkif_sector_t;
135 135
136#define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8 136#define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8
137 137
138struct blkif_request_segment_aligned { 138struct blkif_request_segment {
139 grant_ref_t gref; /* reference to I/O buffer frame */ 139 grant_ref_t gref; /* reference to I/O buffer frame */
140 /* @first_sect: first sector in frame to transfer (inclusive). */ 140 /* @first_sect: first sector in frame to transfer (inclusive). */
141 /* @last_sect: last sector in frame to transfer (inclusive). */ 141 /* @last_sect: last sector in frame to transfer (inclusive). */
142 uint8_t first_sect, last_sect; 142 uint8_t first_sect, last_sect;
143 uint16_t _pad; /* padding to make it 8 bytes, so it's cache-aligned */ 143};
144} __attribute__((__packed__));
145 144
146struct blkif_request_rw { 145struct blkif_request_rw {
147 uint8_t nr_segments; /* number of segments */ 146 uint8_t nr_segments; /* number of segments */
@@ -151,12 +150,7 @@ struct blkif_request_rw {
151#endif 150#endif
152 uint64_t id; /* private guest value, echoed in resp */ 151 uint64_t id; /* private guest value, echoed in resp */
153 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ 152 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
154 struct blkif_request_segment { 153 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
155 grant_ref_t gref; /* reference to I/O buffer frame */
156 /* @first_sect: first sector in frame to transfer (inclusive). */
157 /* @last_sect: last sector in frame to transfer (inclusive). */
158 uint8_t first_sect, last_sect;
159 } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
160} __attribute__((__packed__)); 154} __attribute__((__packed__));
161 155
162struct blkif_request_discard { 156struct blkif_request_discard {