aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-02-14 13:45:18 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-02-14 13:45:18 -0500
commit5e57dc81106b942786f5db8e7ab8788bb9319933 (patch)
tree4533e01e745bba3614c77200b3fd96dd7af7e04e /drivers/block
parent0d25e3691186f5ae6feb0229717a60a5169dc5b2 (diff)
parentc8123f8c9cb517403b51aa41c3c46ff5e10b2c17 (diff)
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull block IO fixes from Jens Axboe: "Second round of updates and fixes for 3.14-rc2. Most of this stuff has been queued up for a while. The notable exception is the blk-mq changes, which are naturally a bit more in flux still. The pull request contains: - Two bug fixes for the new immutable vecs, causing crashes with raid or swap. From Kent. - Various blk-mq tweaks and fixes from Christoph. A fix for integrity bio's from Nic. - A few bcache fixes from Kent and Darrick Wong. - xen-blk{front,back} fixes from David Vrabel, Matt Rushton, Nicolas Swenson, and Roger Pau Monne. - Fix for a vec miscount with integrity vectors from Martin. - Minor annotations or fixes from Masanari Iida and Rashika Kheria. - Tweak to null_blk to do more normal FIFO processing of requests from Shlomo Pongratz. - Elevator switching bypass fix from Tejun. - Softlockup in blkdev_issue_discard() fix when !CONFIG_PREEMPT from me" * 'for-linus' of git://git.kernel.dk/linux-block: (31 commits) block: add cond_resched() to potentially long running ioctl discard loop xen-blkback: init persistent_purge_work work_struct blk-mq: pair blk_mq_start_request / blk_mq_requeue_request blk-mq: dont assume rq->errors is set when returning an error from ->queue_rq block: Fix cloning of discard/write same bios block: Fix type mismatch in ssize_t_blk_mq_tag_sysfs_show blk-mq: rework flush sequencing logic null_blk: use blk_complete_request and blk_mq_complete_request virtio_blk: use blk_mq_complete_request blk-mq: rework I/O completions fs: Add prototype declaration to appropriate header file include/linux/bio.h fs: Mark function as static in fs/bio-integrity.c block/null_blk: Fix completion processing from LIFO to FIFO block: Explicitly handle discard/write same segments block: Fix nr_vecs for inline integrity vectors blk-mq: Add bio_integrity setup to blk_mq_make_request blk-mq: initialize sg_reserved_size blk-mq: handle dma_drain_size blk-mq: divert __blk_put_request for MQ ops blk-mq: support at_head inserations for blk_execute_rq ...
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/null_blk.c97
-rw-r--r--drivers/block/virtio_blk.c7
-rw-r--r--drivers/block/xen-blkback/blkback.c66
-rw-r--r--drivers/block/xen-blkback/common.h5
-rw-r--r--drivers/block/xen-blkback/xenbus.c14
-rw-r--r--drivers/block/xen-blkfront.c11
6 files changed, 105 insertions, 95 deletions
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 3107282a9741..091b9ea14feb 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -60,7 +60,9 @@ enum {
60 NULL_IRQ_NONE = 0, 60 NULL_IRQ_NONE = 0,
61 NULL_IRQ_SOFTIRQ = 1, 61 NULL_IRQ_SOFTIRQ = 1,
62 NULL_IRQ_TIMER = 2, 62 NULL_IRQ_TIMER = 2,
63};
63 64
65enum {
64 NULL_Q_BIO = 0, 66 NULL_Q_BIO = 0,
65 NULL_Q_RQ = 1, 67 NULL_Q_RQ = 1,
66 NULL_Q_MQ = 2, 68 NULL_Q_MQ = 2,
@@ -172,18 +174,20 @@ static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
172 174
173static void end_cmd(struct nullb_cmd *cmd) 175static void end_cmd(struct nullb_cmd *cmd)
174{ 176{
175 if (cmd->rq) { 177 switch (queue_mode) {
176 if (queue_mode == NULL_Q_MQ) 178 case NULL_Q_MQ:
177 blk_mq_end_io(cmd->rq, 0); 179 blk_mq_end_io(cmd->rq, 0);
178 else { 180 return;
179 INIT_LIST_HEAD(&cmd->rq->queuelist); 181 case NULL_Q_RQ:
180 blk_end_request_all(cmd->rq, 0); 182 INIT_LIST_HEAD(&cmd->rq->queuelist);
181 } 183 blk_end_request_all(cmd->rq, 0);
182 } else if (cmd->bio) 184 break;
185 case NULL_Q_BIO:
183 bio_endio(cmd->bio, 0); 186 bio_endio(cmd->bio, 0);
187 break;
188 }
184 189
185 if (queue_mode != NULL_Q_MQ) 190 free_cmd(cmd);
186 free_cmd(cmd);
187} 191}
188 192
189static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer) 193static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
@@ -195,6 +199,7 @@ static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
195 cq = &per_cpu(completion_queues, smp_processor_id()); 199 cq = &per_cpu(completion_queues, smp_processor_id());
196 200
197 while ((entry = llist_del_all(&cq->list)) != NULL) { 201 while ((entry = llist_del_all(&cq->list)) != NULL) {
202 entry = llist_reverse_order(entry);
198 do { 203 do {
199 cmd = container_of(entry, struct nullb_cmd, ll_list); 204 cmd = container_of(entry, struct nullb_cmd, ll_list);
200 end_cmd(cmd); 205 end_cmd(cmd);
@@ -221,61 +226,31 @@ static void null_cmd_end_timer(struct nullb_cmd *cmd)
221 226
222static void null_softirq_done_fn(struct request *rq) 227static void null_softirq_done_fn(struct request *rq)
223{ 228{
224 blk_end_request_all(rq, 0); 229 end_cmd(rq->special);
225}
226
227#ifdef CONFIG_SMP
228
229static void null_ipi_cmd_end_io(void *data)
230{
231 struct completion_queue *cq;
232 struct llist_node *entry, *next;
233 struct nullb_cmd *cmd;
234
235 cq = &per_cpu(completion_queues, smp_processor_id());
236
237 entry = llist_del_all(&cq->list);
238
239 while (entry) {
240 next = entry->next;
241 cmd = llist_entry(entry, struct nullb_cmd, ll_list);
242 end_cmd(cmd);
243 entry = next;
244 }
245}
246
247static void null_cmd_end_ipi(struct nullb_cmd *cmd)
248{
249 struct call_single_data *data = &cmd->csd;
250 int cpu = get_cpu();
251 struct completion_queue *cq = &per_cpu(completion_queues, cpu);
252
253 cmd->ll_list.next = NULL;
254
255 if (llist_add(&cmd->ll_list, &cq->list)) {
256 data->func = null_ipi_cmd_end_io;
257 data->flags = 0;
258 __smp_call_function_single(cpu, data, 0);
259 }
260
261 put_cpu();
262} 230}
263 231
264#endif /* CONFIG_SMP */
265
266static inline void null_handle_cmd(struct nullb_cmd *cmd) 232static inline void null_handle_cmd(struct nullb_cmd *cmd)
267{ 233{
268 /* Complete IO by inline, softirq or timer */ 234 /* Complete IO by inline, softirq or timer */
269 switch (irqmode) { 235 switch (irqmode) {
270 case NULL_IRQ_NONE:
271 end_cmd(cmd);
272 break;
273 case NULL_IRQ_SOFTIRQ: 236 case NULL_IRQ_SOFTIRQ:
274#ifdef CONFIG_SMP 237 switch (queue_mode) {
275 null_cmd_end_ipi(cmd); 238 case NULL_Q_MQ:
276#else 239 blk_mq_complete_request(cmd->rq);
240 break;
241 case NULL_Q_RQ:
242 blk_complete_request(cmd->rq);
243 break;
244 case NULL_Q_BIO:
245 /*
246 * XXX: no proper submitting cpu information available.
247 */
248 end_cmd(cmd);
249 break;
250 }
251 break;
252 case NULL_IRQ_NONE:
277 end_cmd(cmd); 253 end_cmd(cmd);
278#endif
279 break; 254 break;
280 case NULL_IRQ_TIMER: 255 case NULL_IRQ_TIMER:
281 null_cmd_end_timer(cmd); 256 null_cmd_end_timer(cmd);
@@ -411,6 +386,7 @@ static struct blk_mq_ops null_mq_ops = {
411 .queue_rq = null_queue_rq, 386 .queue_rq = null_queue_rq,
412 .map_queue = blk_mq_map_queue, 387 .map_queue = blk_mq_map_queue,
413 .init_hctx = null_init_hctx, 388 .init_hctx = null_init_hctx,
389 .complete = null_softirq_done_fn,
414}; 390};
415 391
416static struct blk_mq_reg null_mq_reg = { 392static struct blk_mq_reg null_mq_reg = {
@@ -609,13 +585,6 @@ static int __init null_init(void)
609{ 585{
610 unsigned int i; 586 unsigned int i;
611 587
612#if !defined(CONFIG_SMP)
613 if (irqmode == NULL_IRQ_SOFTIRQ) {
614 pr_warn("null_blk: softirq completions not available.\n");
615 pr_warn("null_blk: using direct completions.\n");
616 irqmode = NULL_IRQ_NONE;
617 }
618#endif
619 if (bs > PAGE_SIZE) { 588 if (bs > PAGE_SIZE) {
620 pr_warn("null_blk: invalid block size\n"); 589 pr_warn("null_blk: invalid block size\n");
621 pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE); 590 pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 6a680d4de7f1..b1cb3f4c4db4 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -110,9 +110,9 @@ static int __virtblk_add_req(struct virtqueue *vq,
110 return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC); 110 return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
111} 111}
112 112
113static inline void virtblk_request_done(struct virtblk_req *vbr) 113static inline void virtblk_request_done(struct request *req)
114{ 114{
115 struct request *req = vbr->req; 115 struct virtblk_req *vbr = req->special;
116 int error = virtblk_result(vbr); 116 int error = virtblk_result(vbr);
117 117
118 if (req->cmd_type == REQ_TYPE_BLOCK_PC) { 118 if (req->cmd_type == REQ_TYPE_BLOCK_PC) {
@@ -138,7 +138,7 @@ static void virtblk_done(struct virtqueue *vq)
138 do { 138 do {
139 virtqueue_disable_cb(vq); 139 virtqueue_disable_cb(vq);
140 while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) { 140 while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) {
141 virtblk_request_done(vbr); 141 blk_mq_complete_request(vbr->req);
142 req_done = true; 142 req_done = true;
143 } 143 }
144 if (unlikely(virtqueue_is_broken(vq))) 144 if (unlikely(virtqueue_is_broken(vq)))
@@ -479,6 +479,7 @@ static struct blk_mq_ops virtio_mq_ops = {
479 .map_queue = blk_mq_map_queue, 479 .map_queue = blk_mq_map_queue,
480 .alloc_hctx = blk_mq_alloc_single_hw_queue, 480 .alloc_hctx = blk_mq_alloc_single_hw_queue,
481 .free_hctx = blk_mq_free_single_hw_queue, 481 .free_hctx = blk_mq_free_single_hw_queue,
482 .complete = virtblk_request_done,
482}; 483};
483 484
484static struct blk_mq_reg virtio_mq_reg = { 485static struct blk_mq_reg virtio_mq_reg = {
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 4b97b86da926..64c60edcdfbc 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -299,7 +299,7 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
299 BUG_ON(num != 0); 299 BUG_ON(num != 0);
300} 300}
301 301
302static void unmap_purged_grants(struct work_struct *work) 302void xen_blkbk_unmap_purged_grants(struct work_struct *work)
303{ 303{
304 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 304 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
305 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 305 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
@@ -375,7 +375,7 @@ static void purge_persistent_gnt(struct xen_blkif *blkif)
375 375
376 pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean); 376 pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean);
377 377
378 INIT_LIST_HEAD(&blkif->persistent_purge_list); 378 BUG_ON(!list_empty(&blkif->persistent_purge_list));
379 root = &blkif->persistent_gnts; 379 root = &blkif->persistent_gnts;
380purge_list: 380purge_list:
381 foreach_grant_safe(persistent_gnt, n, root, node) { 381 foreach_grant_safe(persistent_gnt, n, root, node) {
@@ -420,7 +420,6 @@ finished:
420 blkif->vbd.overflow_max_grants = 0; 420 blkif->vbd.overflow_max_grants = 0;
421 421
422 /* We can defer this work */ 422 /* We can defer this work */
423 INIT_WORK(&blkif->persistent_purge_work, unmap_purged_grants);
424 schedule_work(&blkif->persistent_purge_work); 423 schedule_work(&blkif->persistent_purge_work);
425 pr_debug(DRV_PFX "Purged %u/%u\n", (total - num_clean), total); 424 pr_debug(DRV_PFX "Purged %u/%u\n", (total - num_clean), total);
426 return; 425 return;
@@ -625,9 +624,23 @@ purge_gnt_list:
625 print_stats(blkif); 624 print_stats(blkif);
626 } 625 }
627 626
628 /* Since we are shutting down remove all pages from the buffer */ 627 /* Drain pending purge work */
629 shrink_free_pagepool(blkif, 0 /* All */); 628 flush_work(&blkif->persistent_purge_work);
630 629
630 if (log_stats)
631 print_stats(blkif);
632
633 blkif->xenblkd = NULL;
634 xen_blkif_put(blkif);
635
636 return 0;
637}
638
639/*
640 * Remove persistent grants and empty the pool of free pages
641 */
642void xen_blkbk_free_caches(struct xen_blkif *blkif)
643{
631 /* Free all persistent grant pages */ 644 /* Free all persistent grant pages */
632 if (!RB_EMPTY_ROOT(&blkif->persistent_gnts)) 645 if (!RB_EMPTY_ROOT(&blkif->persistent_gnts))
633 free_persistent_gnts(blkif, &blkif->persistent_gnts, 646 free_persistent_gnts(blkif, &blkif->persistent_gnts,
@@ -636,13 +649,8 @@ purge_gnt_list:
636 BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts)); 649 BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
637 blkif->persistent_gnt_c = 0; 650 blkif->persistent_gnt_c = 0;
638 651
639 if (log_stats) 652 /* Since we are shutting down remove all pages from the buffer */
640 print_stats(blkif); 653 shrink_free_pagepool(blkif, 0 /* All */);
641
642 blkif->xenblkd = NULL;
643 xen_blkif_put(blkif);
644
645 return 0;
646} 654}
647 655
648/* 656/*
@@ -838,7 +846,7 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req,
838 struct grant_page **pages = pending_req->indirect_pages; 846 struct grant_page **pages = pending_req->indirect_pages;
839 struct xen_blkif *blkif = pending_req->blkif; 847 struct xen_blkif *blkif = pending_req->blkif;
840 int indirect_grefs, rc, n, nseg, i; 848 int indirect_grefs, rc, n, nseg, i;
841 struct blkif_request_segment_aligned *segments = NULL; 849 struct blkif_request_segment *segments = NULL;
842 850
843 nseg = pending_req->nr_pages; 851 nseg = pending_req->nr_pages;
844 indirect_grefs = INDIRECT_PAGES(nseg); 852 indirect_grefs = INDIRECT_PAGES(nseg);
@@ -934,9 +942,7 @@ static void xen_blk_drain_io(struct xen_blkif *blkif)
934{ 942{
935 atomic_set(&blkif->drain, 1); 943 atomic_set(&blkif->drain, 1);
936 do { 944 do {
937 /* The initial value is one, and one refcnt taken at the 945 if (atomic_read(&blkif->inflight) == 0)
938 * start of the xen_blkif_schedule thread. */
939 if (atomic_read(&blkif->refcnt) <= 2)
940 break; 946 break;
941 wait_for_completion_interruptible_timeout( 947 wait_for_completion_interruptible_timeout(
942 &blkif->drain_complete, HZ); 948 &blkif->drain_complete, HZ);
@@ -976,17 +982,30 @@ static void __end_block_io_op(struct pending_req *pending_req, int error)
976 * the proper response on the ring. 982 * the proper response on the ring.
977 */ 983 */
978 if (atomic_dec_and_test(&pending_req->pendcnt)) { 984 if (atomic_dec_and_test(&pending_req->pendcnt)) {
979 xen_blkbk_unmap(pending_req->blkif, 985 struct xen_blkif *blkif = pending_req->blkif;
986
987 xen_blkbk_unmap(blkif,
980 pending_req->segments, 988 pending_req->segments,
981 pending_req->nr_pages); 989 pending_req->nr_pages);
982 make_response(pending_req->blkif, pending_req->id, 990 make_response(blkif, pending_req->id,
983 pending_req->operation, pending_req->status); 991 pending_req->operation, pending_req->status);
984 xen_blkif_put(pending_req->blkif); 992 free_req(blkif, pending_req);
985 if (atomic_read(&pending_req->blkif->refcnt) <= 2) { 993 /*
986 if (atomic_read(&pending_req->blkif->drain)) 994 * Make sure the request is freed before releasing blkif,
987 complete(&pending_req->blkif->drain_complete); 995 * or there could be a race between free_req and the
996 * cleanup done in xen_blkif_free during shutdown.
997 *
998 * NB: The fact that we might try to wake up pending_free_wq
999 * before drain_complete (in case there's a drain going on)
1000 * it's not a problem with our current implementation
1001 * because we can assure there's no thread waiting on
1002 * pending_free_wq if there's a drain going on, but it has
1003 * to be taken into account if the current model is changed.
1004 */
1005 if (atomic_dec_and_test(&blkif->inflight) && atomic_read(&blkif->drain)) {
1006 complete(&blkif->drain_complete);
988 } 1007 }
989 free_req(pending_req->blkif, pending_req); 1008 xen_blkif_put(blkif);
990 } 1009 }
991} 1010}
992 1011
@@ -1240,6 +1259,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
1240 * below (in "!bio") if we are handling a BLKIF_OP_DISCARD. 1259 * below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
1241 */ 1260 */
1242 xen_blkif_get(blkif); 1261 xen_blkif_get(blkif);
1262 atomic_inc(&blkif->inflight);
1243 1263
1244 for (i = 0; i < nseg; i++) { 1264 for (i = 0; i < nseg; i++) {
1245 while ((bio == NULL) || 1265 while ((bio == NULL) ||
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 8d8807563d99..be052773ad03 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -57,7 +57,7 @@
57#define MAX_INDIRECT_SEGMENTS 256 57#define MAX_INDIRECT_SEGMENTS 256
58 58
59#define SEGS_PER_INDIRECT_FRAME \ 59#define SEGS_PER_INDIRECT_FRAME \
60 (PAGE_SIZE/sizeof(struct blkif_request_segment_aligned)) 60 (PAGE_SIZE/sizeof(struct blkif_request_segment))
61#define MAX_INDIRECT_PAGES \ 61#define MAX_INDIRECT_PAGES \
62 ((MAX_INDIRECT_SEGMENTS + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME) 62 ((MAX_INDIRECT_SEGMENTS + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
63#define INDIRECT_PAGES(_segs) \ 63#define INDIRECT_PAGES(_segs) \
@@ -278,6 +278,7 @@ struct xen_blkif {
278 /* for barrier (drain) requests */ 278 /* for barrier (drain) requests */
279 struct completion drain_complete; 279 struct completion drain_complete;
280 atomic_t drain; 280 atomic_t drain;
281 atomic_t inflight;
281 /* One thread per one blkif. */ 282 /* One thread per one blkif. */
282 struct task_struct *xenblkd; 283 struct task_struct *xenblkd;
283 unsigned int waiting_reqs; 284 unsigned int waiting_reqs;
@@ -376,6 +377,7 @@ int xen_blkif_xenbus_init(void);
376irqreturn_t xen_blkif_be_int(int irq, void *dev_id); 377irqreturn_t xen_blkif_be_int(int irq, void *dev_id);
377int xen_blkif_schedule(void *arg); 378int xen_blkif_schedule(void *arg);
378int xen_blkif_purge_persistent(void *arg); 379int xen_blkif_purge_persistent(void *arg);
380void xen_blkbk_free_caches(struct xen_blkif *blkif);
379 381
380int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt, 382int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
381 struct backend_info *be, int state); 383 struct backend_info *be, int state);
@@ -383,6 +385,7 @@ int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
383int xen_blkbk_barrier(struct xenbus_transaction xbt, 385int xen_blkbk_barrier(struct xenbus_transaction xbt,
384 struct backend_info *be, int state); 386 struct backend_info *be, int state);
385struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be); 387struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be);
388void xen_blkbk_unmap_purged_grants(struct work_struct *work);
386 389
387static inline void blkif_get_x86_32_req(struct blkif_request *dst, 390static inline void blkif_get_x86_32_req(struct blkif_request *dst,
388 struct blkif_x86_32_request *src) 391 struct blkif_x86_32_request *src)
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index c2014a0aa206..9a547e6b6ebf 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -125,8 +125,11 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid)
125 blkif->persistent_gnts.rb_node = NULL; 125 blkif->persistent_gnts.rb_node = NULL;
126 spin_lock_init(&blkif->free_pages_lock); 126 spin_lock_init(&blkif->free_pages_lock);
127 INIT_LIST_HEAD(&blkif->free_pages); 127 INIT_LIST_HEAD(&blkif->free_pages);
128 INIT_LIST_HEAD(&blkif->persistent_purge_list);
128 blkif->free_pages_num = 0; 129 blkif->free_pages_num = 0;
129 atomic_set(&blkif->persistent_gnt_in_use, 0); 130 atomic_set(&blkif->persistent_gnt_in_use, 0);
131 atomic_set(&blkif->inflight, 0);
132 INIT_WORK(&blkif->persistent_purge_work, xen_blkbk_unmap_purged_grants);
130 133
131 INIT_LIST_HEAD(&blkif->pending_free); 134 INIT_LIST_HEAD(&blkif->pending_free);
132 135
@@ -259,6 +262,17 @@ static void xen_blkif_free(struct xen_blkif *blkif)
259 if (!atomic_dec_and_test(&blkif->refcnt)) 262 if (!atomic_dec_and_test(&blkif->refcnt))
260 BUG(); 263 BUG();
261 264
265 /* Remove all persistent grants and the cache of ballooned pages. */
266 xen_blkbk_free_caches(blkif);
267
268 /* Make sure everything is drained before shutting down */
269 BUG_ON(blkif->persistent_gnt_c != 0);
270 BUG_ON(atomic_read(&blkif->persistent_gnt_in_use) != 0);
271 BUG_ON(blkif->free_pages_num != 0);
272 BUG_ON(!list_empty(&blkif->persistent_purge_list));
273 BUG_ON(!list_empty(&blkif->free_pages));
274 BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
275
262 /* Check that there is no request in use */ 276 /* Check that there is no request in use */
263 list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) { 277 list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) {
264 list_del(&req->free_list); 278 list_del(&req->free_list);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 8dcfb54f1603..efe1b4761735 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -162,7 +162,7 @@ static DEFINE_SPINLOCK(minor_lock);
162#define DEV_NAME "xvd" /* name in /dev */ 162#define DEV_NAME "xvd" /* name in /dev */
163 163
164#define SEGS_PER_INDIRECT_FRAME \ 164#define SEGS_PER_INDIRECT_FRAME \
165 (PAGE_SIZE/sizeof(struct blkif_request_segment_aligned)) 165 (PAGE_SIZE/sizeof(struct blkif_request_segment))
166#define INDIRECT_GREFS(_segs) \ 166#define INDIRECT_GREFS(_segs) \
167 ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME) 167 ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
168 168
@@ -393,7 +393,7 @@ static int blkif_queue_request(struct request *req)
393 unsigned long id; 393 unsigned long id;
394 unsigned int fsect, lsect; 394 unsigned int fsect, lsect;
395 int i, ref, n; 395 int i, ref, n;
396 struct blkif_request_segment_aligned *segments = NULL; 396 struct blkif_request_segment *segments = NULL;
397 397
398 /* 398 /*
399 * Used to store if we are able to queue the request by just using 399 * Used to store if we are able to queue the request by just using
@@ -550,7 +550,7 @@ static int blkif_queue_request(struct request *req)
550 } else { 550 } else {
551 n = i % SEGS_PER_INDIRECT_FRAME; 551 n = i % SEGS_PER_INDIRECT_FRAME;
552 segments[n] = 552 segments[n] =
553 (struct blkif_request_segment_aligned) { 553 (struct blkif_request_segment) {
554 .gref = ref, 554 .gref = ref,
555 .first_sect = fsect, 555 .first_sect = fsect,
556 .last_sect = lsect }; 556 .last_sect = lsect };
@@ -1904,13 +1904,16 @@ static void blkback_changed(struct xenbus_device *dev,
1904 case XenbusStateReconfiguring: 1904 case XenbusStateReconfiguring:
1905 case XenbusStateReconfigured: 1905 case XenbusStateReconfigured:
1906 case XenbusStateUnknown: 1906 case XenbusStateUnknown:
1907 case XenbusStateClosed:
1908 break; 1907 break;
1909 1908
1910 case XenbusStateConnected: 1909 case XenbusStateConnected:
1911 blkfront_connect(info); 1910 blkfront_connect(info);
1912 break; 1911 break;
1913 1912
1913 case XenbusStateClosed:
1914 if (dev->state == XenbusStateClosed)
1915 break;
1916 /* Missed the backend's Closing state -- fallthrough */
1914 case XenbusStateClosing: 1917 case XenbusStateClosing:
1915 blkfront_closing(info); 1918 blkfront_closing(info);
1916 break; 1919 break;