diff options
author | Bob Liu <bob.liu@oracle.com> | 2015-04-03 02:42:59 -0400 |
---|---|---|
committer | David Vrabel <david.vrabel@citrix.com> | 2015-04-27 06:41:12 -0400 |
commit | b44166cd46e28dd608d5baa5873047a40f32919c (patch) | |
tree | 799b236c472067869782b2cf937d9d0d87852fc4 /drivers/block/xen-blkback | |
parent | 325d73bf8fea8af2227240b7305253fb052d3a68 (diff) |
xen/grant: introduce func gnttab_unmap_refs_sync()
There are several place using gnttab async unmap and wait for
completion, so move the common code to a function
gnttab_unmap_refs_sync().
Signed-off-by: Bob Liu <bob.liu@oracle.com>
Acked-by: Roger Pau Monné <roger.pau@citrix.com>
Acked-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: David Vrabel <david.vrabel@citrix.com>
Diffstat (limited to 'drivers/block/xen-blkback')
-rw-r--r-- | drivers/block/xen-blkback/blkback.c | 31 |
1 files changed, 3 insertions, 28 deletions
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 48e98f2712b5..713fc9ff1149 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c | |||
@@ -265,17 +265,6 @@ static void put_persistent_gnt(struct xen_blkif *blkif, | |||
265 | atomic_dec(&blkif->persistent_gnt_in_use); | 265 | atomic_dec(&blkif->persistent_gnt_in_use); |
266 | } | 266 | } |
267 | 267 | ||
268 | static void free_persistent_gnts_unmap_callback(int result, | ||
269 | struct gntab_unmap_queue_data *data) | ||
270 | { | ||
271 | struct completion *c = data->data; | ||
272 | |||
273 | /* BUG_ON used to reproduce existing behaviour, | ||
274 | but is this the best way to deal with this? */ | ||
275 | BUG_ON(result); | ||
276 | complete(c); | ||
277 | } | ||
278 | |||
279 | static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, | 268 | static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, |
280 | unsigned int num) | 269 | unsigned int num) |
281 | { | 270 | { |
@@ -285,12 +274,7 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, | |||
285 | struct rb_node *n; | 274 | struct rb_node *n; |
286 | int segs_to_unmap = 0; | 275 | int segs_to_unmap = 0; |
287 | struct gntab_unmap_queue_data unmap_data; | 276 | struct gntab_unmap_queue_data unmap_data; |
288 | struct completion unmap_completion; | ||
289 | |||
290 | init_completion(&unmap_completion); | ||
291 | 277 | ||
292 | unmap_data.data = &unmap_completion; | ||
293 | unmap_data.done = &free_persistent_gnts_unmap_callback; | ||
294 | unmap_data.pages = pages; | 278 | unmap_data.pages = pages; |
295 | unmap_data.unmap_ops = unmap; | 279 | unmap_data.unmap_ops = unmap; |
296 | unmap_data.kunmap_ops = NULL; | 280 | unmap_data.kunmap_ops = NULL; |
@@ -310,8 +294,7 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, | |||
310 | !rb_next(&persistent_gnt->node)) { | 294 | !rb_next(&persistent_gnt->node)) { |
311 | 295 | ||
312 | unmap_data.count = segs_to_unmap; | 296 | unmap_data.count = segs_to_unmap; |
313 | gnttab_unmap_refs_async(&unmap_data); | 297 | BUG_ON(gnttab_unmap_refs_sync(&unmap_data)); |
314 | wait_for_completion(&unmap_completion); | ||
315 | 298 | ||
316 | put_free_pages(blkif, pages, segs_to_unmap); | 299 | put_free_pages(blkif, pages, segs_to_unmap); |
317 | segs_to_unmap = 0; | 300 | segs_to_unmap = 0; |
@@ -332,12 +315,7 @@ void xen_blkbk_unmap_purged_grants(struct work_struct *work) | |||
332 | int segs_to_unmap = 0; | 315 | int segs_to_unmap = 0; |
333 | struct xen_blkif *blkif = container_of(work, typeof(*blkif), persistent_purge_work); | 316 | struct xen_blkif *blkif = container_of(work, typeof(*blkif), persistent_purge_work); |
334 | struct gntab_unmap_queue_data unmap_data; | 317 | struct gntab_unmap_queue_data unmap_data; |
335 | struct completion unmap_completion; | ||
336 | 318 | ||
337 | init_completion(&unmap_completion); | ||
338 | |||
339 | unmap_data.data = &unmap_completion; | ||
340 | unmap_data.done = &free_persistent_gnts_unmap_callback; | ||
341 | unmap_data.pages = pages; | 319 | unmap_data.pages = pages; |
342 | unmap_data.unmap_ops = unmap; | 320 | unmap_data.unmap_ops = unmap; |
343 | unmap_data.kunmap_ops = NULL; | 321 | unmap_data.kunmap_ops = NULL; |
@@ -357,9 +335,7 @@ void xen_blkbk_unmap_purged_grants(struct work_struct *work) | |||
357 | 335 | ||
358 | if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) { | 336 | if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) { |
359 | unmap_data.count = segs_to_unmap; | 337 | unmap_data.count = segs_to_unmap; |
360 | gnttab_unmap_refs_async(&unmap_data); | 338 | BUG_ON(gnttab_unmap_refs_sync(&unmap_data)); |
361 | wait_for_completion(&unmap_completion); | ||
362 | |||
363 | put_free_pages(blkif, pages, segs_to_unmap); | 339 | put_free_pages(blkif, pages, segs_to_unmap); |
364 | segs_to_unmap = 0; | 340 | segs_to_unmap = 0; |
365 | } | 341 | } |
@@ -367,8 +343,7 @@ void xen_blkbk_unmap_purged_grants(struct work_struct *work) | |||
367 | } | 343 | } |
368 | if (segs_to_unmap > 0) { | 344 | if (segs_to_unmap > 0) { |
369 | unmap_data.count = segs_to_unmap; | 345 | unmap_data.count = segs_to_unmap; |
370 | gnttab_unmap_refs_async(&unmap_data); | 346 | BUG_ON(gnttab_unmap_refs_sync(&unmap_data)); |
371 | wait_for_completion(&unmap_completion); | ||
372 | put_free_pages(blkif, pages, segs_to_unmap); | 347 | put_free_pages(blkif, pages, segs_to_unmap); |
373 | } | 348 | } |
374 | } | 349 | } |