aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorMikulas Patocka <mpatocka@redhat.com>2013-05-10 09:37:15 -0400
committerAlasdair G Kergon <agk@redhat.com>2013-05-10 09:37:15 -0400
commit502624bdad3dba45dfaacaf36b7d83e39e74b2d2 (patch)
tree56c7d8236425193eb21730f2591868c735db9a5a /drivers/md
parent09e8b813897a0f85bb401435d009228644c81214 (diff)
dm bufio: avoid a possible __vmalloc deadlock
This patch uses memalloc_noio_save to avoid a possible deadlock in dm-bufio. (it could happen only with large block size, at most PAGE_SIZE << MAX_ORDER (typically 8MiB). __vmalloc doesn't fully respect gfp flags. The specified gfp flags are used for allocation of requested pages, structures vmap_area, vmap_block and vm_struct and the radix tree nodes. However, the kernel pagetables are allocated always with GFP_KERNEL. Thus the allocation of pagetables can recurse back to the I/O layer and cause a deadlock. This patch uses the function memalloc_noio_save to set per-process PF_MEMALLOC_NOIO flag and the function memalloc_noio_restore to restore it. When this flag is set, all allocations in the process are done with implied GFP_NOIO flag, thus the deadlock can't happen. This should be backported to stable kernels, but they don't have the PF_MEMALLOC_NOIO flag and memalloc_noio_save/memalloc_noio_restore functions. So, PF_MEMALLOC should be set and restored instead. Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Cc: stable@kernel.org Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-bufio.c24
1 files changed, 23 insertions, 1 deletions
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index c6083132c4b8..0387e05cdb98 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -319,6 +319,9 @@ static void __cache_size_refresh(void)
319static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, 319static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
320 enum data_mode *data_mode) 320 enum data_mode *data_mode)
321{ 321{
322 unsigned noio_flag;
323 void *ptr;
324
322 if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) { 325 if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
323 *data_mode = DATA_MODE_SLAB; 326 *data_mode = DATA_MODE_SLAB;
324 return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask); 327 return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
@@ -332,7 +335,26 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
332 } 335 }
333 336
334 *data_mode = DATA_MODE_VMALLOC; 337 *data_mode = DATA_MODE_VMALLOC;
335 return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL); 338
339 /*
340 * __vmalloc allocates the data pages and auxiliary structures with
341 * gfp_flags that were specified, but pagetables are always allocated
342 * with GFP_KERNEL, no matter what was specified as gfp_mask.
343 *
344 * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
345 * all allocations done by this process (including pagetables) are done
346 * as if GFP_NOIO was specified.
347 */
348
349 if (gfp_mask & __GFP_NORETRY)
350 noio_flag = memalloc_noio_save();
351
352 ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
353
354 if (gfp_mask & __GFP_NORETRY)
355 memalloc_noio_restore(noio_flag);
356
357 return ptr;
336} 358}
337 359
338/* 360/*