diff options
author | Roger Pau Monne <roger.pau@citrix.com> | 2014-02-04 05:26:14 -0500 |
---|---|---|
committer | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2014-02-07 12:59:30 -0500 |
commit | c05f3e3c85df1d89673e00cee7ece5ae4eb4c6ec (patch) | |
tree | bf77875d2640a0f4388aa9c20d7867e51878c45a /drivers/block | |
parent | ef753411339eae46b9a3151906901f8bfd12b0f1 (diff) |
xen-blkback: fix shutdown race
Introduce a new variable to keep track of the number of in-flight
requests. We need to make sure that when xen_blkif_put is called the
request has already been freed and we can safely free xen_blkif, which
was not the case before.
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: David Vrabel <david.vrabel@citrix.com>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Tested-by: Matt Rushton <mrushton@amazon.com>
Reviewed-by: Matt Rushton <mrushton@amazon.com>
Cc: Matt Wilson <msw@amazon.com>
Cc: Ian Campbell <Ian.Campbell@citrix.com>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'drivers/block')
-rw-r--r-- | drivers/block/xen-blkback/blkback.c | 32 | ||||
-rw-r--r-- | drivers/block/xen-blkback/common.h | 1 | ||||
-rw-r--r-- | drivers/block/xen-blkback/xenbus.c | 1 |
3 files changed, 24 insertions, 10 deletions
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index dcfe49fd3cb4..394fa2eabf87 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c | |||
@@ -943,9 +943,7 @@ static void xen_blk_drain_io(struct xen_blkif *blkif) | |||
943 | { | 943 | { |
944 | atomic_set(&blkif->drain, 1); | 944 | atomic_set(&blkif->drain, 1); |
945 | do { | 945 | do { |
946 | /* The initial value is one, and one refcnt taken at the | 946 | if (atomic_read(&blkif->inflight) == 0) |
947 | * start of the xen_blkif_schedule thread. */ | ||
948 | if (atomic_read(&blkif->refcnt) <= 2) | ||
949 | break; | 947 | break; |
950 | wait_for_completion_interruptible_timeout( | 948 | wait_for_completion_interruptible_timeout( |
951 | &blkif->drain_complete, HZ); | 949 | &blkif->drain_complete, HZ); |
@@ -985,17 +983,30 @@ static void __end_block_io_op(struct pending_req *pending_req, int error) | |||
985 | * the proper response on the ring. | 983 | * the proper response on the ring. |
986 | */ | 984 | */ |
987 | if (atomic_dec_and_test(&pending_req->pendcnt)) { | 985 | if (atomic_dec_and_test(&pending_req->pendcnt)) { |
988 | xen_blkbk_unmap(pending_req->blkif, | 986 | struct xen_blkif *blkif = pending_req->blkif; |
987 | |||
988 | xen_blkbk_unmap(blkif, | ||
989 | pending_req->segments, | 989 | pending_req->segments, |
990 | pending_req->nr_pages); | 990 | pending_req->nr_pages); |
991 | make_response(pending_req->blkif, pending_req->id, | 991 | make_response(blkif, pending_req->id, |
992 | pending_req->operation, pending_req->status); | 992 | pending_req->operation, pending_req->status); |
993 | xen_blkif_put(pending_req->blkif); | 993 | free_req(blkif, pending_req); |
994 | if (atomic_read(&pending_req->blkif->refcnt) <= 2) { | 994 | /* |
995 | if (atomic_read(&pending_req->blkif->drain)) | 995 | * Make sure the request is freed before releasing blkif, |
996 | complete(&pending_req->blkif->drain_complete); | 996 | * or there could be a race between free_req and the |
997 | * cleanup done in xen_blkif_free during shutdown. | ||
998 | * | ||
999 | * NB: The fact that we might try to wake up pending_free_wq | ||
1000 | * before drain_complete (in case there's a drain going on) | ||
1001 | * it's not a problem with our current implementation | ||
1002 | * because we can assure there's no thread waiting on | ||
1003 | * pending_free_wq if there's a drain going on, but it has | ||
1004 | * to be taken into account if the current model is changed. | ||
1005 | */ | ||
1006 | if (atomic_dec_and_test(&blkif->inflight) && atomic_read(&blkif->drain)) { | ||
1007 | complete(&blkif->drain_complete); | ||
997 | } | 1008 | } |
998 | free_req(pending_req->blkif, pending_req); | 1009 | xen_blkif_put(blkif); |
999 | } | 1010 | } |
1000 | } | 1011 | } |
1001 | 1012 | ||
@@ -1249,6 +1260,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, | |||
1249 | * below (in "!bio") if we are handling a BLKIF_OP_DISCARD. | 1260 | * below (in "!bio") if we are handling a BLKIF_OP_DISCARD. |
1250 | */ | 1261 | */ |
1251 | xen_blkif_get(blkif); | 1262 | xen_blkif_get(blkif); |
1263 | atomic_inc(&blkif->inflight); | ||
1252 | 1264 | ||
1253 | for (i = 0; i < nseg; i++) { | 1265 | for (i = 0; i < nseg; i++) { |
1254 | while ((bio == NULL) || | 1266 | while ((bio == NULL) || |
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index f733d7627120..e40326a7f707 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h | |||
@@ -278,6 +278,7 @@ struct xen_blkif { | |||
278 | /* for barrier (drain) requests */ | 278 | /* for barrier (drain) requests */ |
279 | struct completion drain_complete; | 279 | struct completion drain_complete; |
280 | atomic_t drain; | 280 | atomic_t drain; |
281 | atomic_t inflight; | ||
281 | /* One thread per one blkif. */ | 282 | /* One thread per one blkif. */ |
282 | struct task_struct *xenblkd; | 283 | struct task_struct *xenblkd; |
283 | unsigned int waiting_reqs; | 284 | unsigned int waiting_reqs; |
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index 8afef67fecdd..84973c6a856a 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c | |||
@@ -128,6 +128,7 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid) | |||
128 | INIT_LIST_HEAD(&blkif->persistent_purge_list); | 128 | INIT_LIST_HEAD(&blkif->persistent_purge_list); |
129 | blkif->free_pages_num = 0; | 129 | blkif->free_pages_num = 0; |
130 | atomic_set(&blkif->persistent_gnt_in_use, 0); | 130 | atomic_set(&blkif->persistent_gnt_in_use, 0); |
131 | atomic_set(&blkif->inflight, 0); | ||
131 | 132 | ||
132 | INIT_LIST_HEAD(&blkif->pending_free); | 133 | INIT_LIST_HEAD(&blkif->pending_free); |
133 | 134 | ||