diff options
-rw-r--r-- | block/blk-ioc.c | 2 | ||||
-rw-r--r-- | fs/fscache/page.c | 2 | ||||
-rw-r--r-- | include/linux/radix-tree.h | 1 | ||||
-rw-r--r-- | lib/radix-tree.c | 41 | ||||
-rw-r--r-- | mm/filemap.c | 2 | ||||
-rw-r--r-- | mm/shmem.c | 2 | ||||
-rw-r--r-- | mm/swap_state.c | 4 |
7 files changed, 46 insertions, 8 deletions
diff --git a/block/blk-ioc.c b/block/blk-ioc.c index 4464c823cff2..46cd7bd18b34 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c | |||
@@ -367,7 +367,7 @@ struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q, | |||
367 | if (!icq) | 367 | if (!icq) |
368 | return NULL; | 368 | return NULL; |
369 | 369 | ||
370 | if (radix_tree_preload(gfp_mask) < 0) { | 370 | if (radix_tree_maybe_preload(gfp_mask) < 0) { |
371 | kmem_cache_free(et->icq_cache, icq); | 371 | kmem_cache_free(et->icq_cache, icq); |
372 | return NULL; | 372 | return NULL; |
373 | } | 373 | } |
diff --git a/fs/fscache/page.c b/fs/fscache/page.c index 8702b732109a..73899c1c3449 100644 --- a/fs/fscache/page.c +++ b/fs/fscache/page.c | |||
@@ -913,7 +913,7 @@ int __fscache_write_page(struct fscache_cookie *cookie, | |||
913 | (1 << FSCACHE_OP_WAITING) | | 913 | (1 << FSCACHE_OP_WAITING) | |
914 | (1 << FSCACHE_OP_UNUSE_COOKIE); | 914 | (1 << FSCACHE_OP_UNUSE_COOKIE); |
915 | 915 | ||
916 | ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM); | 916 | ret = radix_tree_maybe_preload(gfp & ~__GFP_HIGHMEM); |
917 | if (ret < 0) | 917 | if (ret < 0) |
918 | goto nomem_free; | 918 | goto nomem_free; |
919 | 919 | ||
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index ffc444c38b0a..403940787be1 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h | |||
@@ -231,6 +231,7 @@ unsigned long radix_tree_next_hole(struct radix_tree_root *root, | |||
231 | unsigned long radix_tree_prev_hole(struct radix_tree_root *root, | 231 | unsigned long radix_tree_prev_hole(struct radix_tree_root *root, |
232 | unsigned long index, unsigned long max_scan); | 232 | unsigned long index, unsigned long max_scan); |
233 | int radix_tree_preload(gfp_t gfp_mask); | 233 | int radix_tree_preload(gfp_t gfp_mask); |
234 | int radix_tree_maybe_preload(gfp_t gfp_mask); | ||
234 | void radix_tree_init(void); | 235 | void radix_tree_init(void); |
235 | void *radix_tree_tag_set(struct radix_tree_root *root, | 236 | void *radix_tree_tag_set(struct radix_tree_root *root, |
236 | unsigned long index, unsigned int tag); | 237 | unsigned long index, unsigned int tag); |
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index e7964296fd50..7811ed3b4e70 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/string.h> | 32 | #include <linux/string.h> |
33 | #include <linux/bitops.h> | 33 | #include <linux/bitops.h> |
34 | #include <linux/rcupdate.h> | 34 | #include <linux/rcupdate.h> |
35 | #include <linux/hardirq.h> /* in_interrupt() */ | ||
35 | 36 | ||
36 | 37 | ||
37 | #ifdef __KERNEL__ | 38 | #ifdef __KERNEL__ |
@@ -207,7 +208,12 @@ radix_tree_node_alloc(struct radix_tree_root *root) | |||
207 | struct radix_tree_node *ret = NULL; | 208 | struct radix_tree_node *ret = NULL; |
208 | gfp_t gfp_mask = root_gfp_mask(root); | 209 | gfp_t gfp_mask = root_gfp_mask(root); |
209 | 210 | ||
210 | if (!(gfp_mask & __GFP_WAIT)) { | 211 | /* |
212 | * Preload code isn't irq safe and it doesn't make sence to use | ||
213 | * preloading in the interrupt anyway as all the allocations have to | ||
214 | * be atomic. So just do normal allocation when in interrupt. | ||
215 | */ | ||
216 | if (!(gfp_mask & __GFP_WAIT) && !in_interrupt()) { | ||
211 | struct radix_tree_preload *rtp; | 217 | struct radix_tree_preload *rtp; |
212 | 218 | ||
213 | /* | 219 | /* |
@@ -264,7 +270,7 @@ radix_tree_node_free(struct radix_tree_node *node) | |||
264 | * To make use of this facility, the radix tree must be initialised without | 270 | * To make use of this facility, the radix tree must be initialised without |
265 | * __GFP_WAIT being passed to INIT_RADIX_TREE(). | 271 | * __GFP_WAIT being passed to INIT_RADIX_TREE(). |
266 | */ | 272 | */ |
267 | int radix_tree_preload(gfp_t gfp_mask) | 273 | static int __radix_tree_preload(gfp_t gfp_mask) |
268 | { | 274 | { |
269 | struct radix_tree_preload *rtp; | 275 | struct radix_tree_preload *rtp; |
270 | struct radix_tree_node *node; | 276 | struct radix_tree_node *node; |
@@ -288,9 +294,40 @@ int radix_tree_preload(gfp_t gfp_mask) | |||
288 | out: | 294 | out: |
289 | return ret; | 295 | return ret; |
290 | } | 296 | } |
297 | |||
298 | /* | ||
299 | * Load up this CPU's radix_tree_node buffer with sufficient objects to | ||
300 | * ensure that the addition of a single element in the tree cannot fail. On | ||
301 | * success, return zero, with preemption disabled. On error, return -ENOMEM | ||
302 | * with preemption not disabled. | ||
303 | * | ||
304 | * To make use of this facility, the radix tree must be initialised without | ||
305 | * __GFP_WAIT being passed to INIT_RADIX_TREE(). | ||
306 | */ | ||
307 | int radix_tree_preload(gfp_t gfp_mask) | ||
308 | { | ||
309 | /* Warn on non-sensical use... */ | ||
310 | WARN_ON_ONCE(!(gfp_mask & __GFP_WAIT)); | ||
311 | return __radix_tree_preload(gfp_mask); | ||
312 | } | ||
291 | EXPORT_SYMBOL(radix_tree_preload); | 313 | EXPORT_SYMBOL(radix_tree_preload); |
292 | 314 | ||
293 | /* | 315 | /* |
316 | * The same as above function, except we don't guarantee preloading happens. | ||
317 | * We do it, if we decide it helps. On success, return zero with preemption | ||
318 | * disabled. On error, return -ENOMEM with preemption not disabled. | ||
319 | */ | ||
320 | int radix_tree_maybe_preload(gfp_t gfp_mask) | ||
321 | { | ||
322 | if (gfp_mask & __GFP_WAIT) | ||
323 | return __radix_tree_preload(gfp_mask); | ||
324 | /* Preloading doesn't help anything with this gfp mask, skip it */ | ||
325 | preempt_disable(); | ||
326 | return 0; | ||
327 | } | ||
328 | EXPORT_SYMBOL(radix_tree_maybe_preload); | ||
329 | |||
330 | /* | ||
294 | * Return the maximum key which can be store into a | 331 | * Return the maximum key which can be store into a |
295 | * radix tree with height HEIGHT. | 332 | * radix tree with height HEIGHT. |
296 | */ | 333 | */ |
diff --git a/mm/filemap.c b/mm/filemap.c index 731a2c24532d..e607728db4a8 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -469,7 +469,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping, | |||
469 | if (error) | 469 | if (error) |
470 | goto out; | 470 | goto out; |
471 | 471 | ||
472 | error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); | 472 | error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM); |
473 | if (error == 0) { | 473 | if (error == 0) { |
474 | page_cache_get(page); | 474 | page_cache_get(page); |
475 | page->mapping = mapping; | 475 | page->mapping = mapping; |
diff --git a/mm/shmem.c b/mm/shmem.c index 526149846d0a..a1b8bf4391c2 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -1205,7 +1205,7 @@ repeat: | |||
1205 | gfp & GFP_RECLAIM_MASK); | 1205 | gfp & GFP_RECLAIM_MASK); |
1206 | if (error) | 1206 | if (error) |
1207 | goto decused; | 1207 | goto decused; |
1208 | error = radix_tree_preload(gfp & GFP_RECLAIM_MASK); | 1208 | error = radix_tree_maybe_preload(gfp & GFP_RECLAIM_MASK); |
1209 | if (!error) { | 1209 | if (!error) { |
1210 | error = shmem_add_to_page_cache(page, mapping, index, | 1210 | error = shmem_add_to_page_cache(page, mapping, index, |
1211 | gfp, NULL); | 1211 | gfp, NULL); |
diff --git a/mm/swap_state.c b/mm/swap_state.c index f24ab0dff554..e6f15f8ca2af 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c | |||
@@ -122,7 +122,7 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) | |||
122 | { | 122 | { |
123 | int error; | 123 | int error; |
124 | 124 | ||
125 | error = radix_tree_preload(gfp_mask); | 125 | error = radix_tree_maybe_preload(gfp_mask); |
126 | if (!error) { | 126 | if (!error) { |
127 | error = __add_to_swap_cache(page, entry); | 127 | error = __add_to_swap_cache(page, entry); |
128 | radix_tree_preload_end(); | 128 | radix_tree_preload_end(); |
@@ -328,7 +328,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, | |||
328 | /* | 328 | /* |
329 | * call radix_tree_preload() while we can wait. | 329 | * call radix_tree_preload() while we can wait. |
330 | */ | 330 | */ |
331 | err = radix_tree_preload(gfp_mask & GFP_KERNEL); | 331 | err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL); |
332 | if (err) | 332 | if (err) |
333 | break; | 333 | break; |
334 | 334 | ||