aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-05-06 18:58:06 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-05-06 18:58:06 -0400
commit0e1dc4274828f64fcb56fc7b950acdc5ff7a395f (patch)
tree0855a6e189dede21e9e2dd0094774089b1c7d8d2 /drivers/block
parent3d54ac9e35a69d19381420bb2fa1702d5bf73846 (diff)
parent8746515d7f04c9ea94cf43e2db1fd2cfca93276d (diff)
Merge tag 'for-linus-4.1b-rc2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip
Pull xen bug fixes from David Vrabel: - fix blkback regression if using persistent grants - fix various event channel related suspend/resume bugs - fix AMD x86 regression with X86_BUG_SYSRET_SS_ATTRS - SWIOTLB on ARM now uses frames <4 GiB (if available) so device only capable of 32-bit DMA work. * tag 'for-linus-4.1b-rc2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: xen: Add __GFP_DMA flag when xen_swiotlb_init gets free pages on ARM hypervisor/x86/xen: Unset X86_BUG_SYSRET_SS_ATTRS on Xen PV guests xen/events: Set irq_info->evtchn before binding the channel to CPU in __startup_pirq() xen/console: Update console event channel on resume xen/xenbus: Update xenbus event channel on resume xen/events: Clear cpu_evtchn_mask before resuming xen-pciback: Add name prefix to global 'permissive' variable xen: Suspend ticks on all CPUs during suspend xen/grant: introduce func gnttab_unmap_refs_sync() xen/blkback: safely unmap purge persistent grants
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/xen-blkback/blkback.c35
1 files changed, 11 insertions, 24 deletions
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index bd2b3bbbb22c..713fc9ff1149 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -265,17 +265,6 @@ static void put_persistent_gnt(struct xen_blkif *blkif,
265 atomic_dec(&blkif->persistent_gnt_in_use); 265 atomic_dec(&blkif->persistent_gnt_in_use);
266} 266}
267 267
268static void free_persistent_gnts_unmap_callback(int result,
269 struct gntab_unmap_queue_data *data)
270{
271 struct completion *c = data->data;
272
273 /* BUG_ON used to reproduce existing behaviour,
274 but is this the best way to deal with this? */
275 BUG_ON(result);
276 complete(c);
277}
278
279static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, 268static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
280 unsigned int num) 269 unsigned int num)
281{ 270{
@@ -285,12 +274,7 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
285 struct rb_node *n; 274 struct rb_node *n;
286 int segs_to_unmap = 0; 275 int segs_to_unmap = 0;
287 struct gntab_unmap_queue_data unmap_data; 276 struct gntab_unmap_queue_data unmap_data;
288 struct completion unmap_completion;
289 277
290 init_completion(&unmap_completion);
291
292 unmap_data.data = &unmap_completion;
293 unmap_data.done = &free_persistent_gnts_unmap_callback;
294 unmap_data.pages = pages; 278 unmap_data.pages = pages;
295 unmap_data.unmap_ops = unmap; 279 unmap_data.unmap_ops = unmap;
296 unmap_data.kunmap_ops = NULL; 280 unmap_data.kunmap_ops = NULL;
@@ -310,8 +294,7 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
310 !rb_next(&persistent_gnt->node)) { 294 !rb_next(&persistent_gnt->node)) {
311 295
312 unmap_data.count = segs_to_unmap; 296 unmap_data.count = segs_to_unmap;
313 gnttab_unmap_refs_async(&unmap_data); 297 BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
314 wait_for_completion(&unmap_completion);
315 298
316 put_free_pages(blkif, pages, segs_to_unmap); 299 put_free_pages(blkif, pages, segs_to_unmap);
317 segs_to_unmap = 0; 300 segs_to_unmap = 0;
@@ -329,8 +312,13 @@ void xen_blkbk_unmap_purged_grants(struct work_struct *work)
329 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 312 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
330 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 313 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
331 struct persistent_gnt *persistent_gnt; 314 struct persistent_gnt *persistent_gnt;
332 int ret, segs_to_unmap = 0; 315 int segs_to_unmap = 0;
333 struct xen_blkif *blkif = container_of(work, typeof(*blkif), persistent_purge_work); 316 struct xen_blkif *blkif = container_of(work, typeof(*blkif), persistent_purge_work);
317 struct gntab_unmap_queue_data unmap_data;
318
319 unmap_data.pages = pages;
320 unmap_data.unmap_ops = unmap;
321 unmap_data.kunmap_ops = NULL;
334 322
335 while(!list_empty(&blkif->persistent_purge_list)) { 323 while(!list_empty(&blkif->persistent_purge_list)) {
336 persistent_gnt = list_first_entry(&blkif->persistent_purge_list, 324 persistent_gnt = list_first_entry(&blkif->persistent_purge_list,
@@ -346,17 +334,16 @@ void xen_blkbk_unmap_purged_grants(struct work_struct *work)
346 pages[segs_to_unmap] = persistent_gnt->page; 334 pages[segs_to_unmap] = persistent_gnt->page;
347 335
348 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) { 336 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
349 ret = gnttab_unmap_refs(unmap, NULL, pages, 337 unmap_data.count = segs_to_unmap;
350 segs_to_unmap); 338 BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
351 BUG_ON(ret);
352 put_free_pages(blkif, pages, segs_to_unmap); 339 put_free_pages(blkif, pages, segs_to_unmap);
353 segs_to_unmap = 0; 340 segs_to_unmap = 0;
354 } 341 }
355 kfree(persistent_gnt); 342 kfree(persistent_gnt);
356 } 343 }
357 if (segs_to_unmap > 0) { 344 if (segs_to_unmap > 0) {
358 ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap); 345 unmap_data.count = segs_to_unmap;
359 BUG_ON(ret); 346 BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
360 put_free_pages(blkif, pages, segs_to_unmap); 347 put_free_pages(blkif, pages, segs_to_unmap);
361 } 348 }
362} 349}