aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--lib/radix-tree.c15
-rw-r--r--mm/filemap.c1
-rw-r--r--mm/rmap.c1
3 files changed, 11 insertions, 6 deletions
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 48c250fe2233..65f0e758ec38 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -95,14 +95,17 @@ static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
95static struct radix_tree_node * 95static struct radix_tree_node *
96radix_tree_node_alloc(struct radix_tree_root *root) 96radix_tree_node_alloc(struct radix_tree_root *root)
97{ 97{
98 struct radix_tree_node *ret; 98 struct radix_tree_node *ret = NULL;
99 gfp_t gfp_mask = root_gfp_mask(root); 99 gfp_t gfp_mask = root_gfp_mask(root);
100 100
101 ret = kmem_cache_alloc(radix_tree_node_cachep, 101 if (!(gfp_mask & __GFP_WAIT)) {
102 set_migrateflags(gfp_mask, __GFP_RECLAIMABLE));
103 if (ret == NULL && !(gfp_mask & __GFP_WAIT)) {
104 struct radix_tree_preload *rtp; 102 struct radix_tree_preload *rtp;
105 103
104 /*
105 * Provided the caller has preloaded here, we will always
106 * succeed in getting a node here (and never reach
107 * kmem_cache_alloc)
108 */
106 rtp = &__get_cpu_var(radix_tree_preloads); 109 rtp = &__get_cpu_var(radix_tree_preloads);
107 if (rtp->nr) { 110 if (rtp->nr) {
108 ret = rtp->nodes[rtp->nr - 1]; 111 ret = rtp->nodes[rtp->nr - 1];
@@ -110,6 +113,10 @@ radix_tree_node_alloc(struct radix_tree_root *root)
110 rtp->nr--; 113 rtp->nr--;
111 } 114 }
112 } 115 }
116 if (ret == NULL)
117 ret = kmem_cache_alloc(radix_tree_node_cachep,
118 set_migrateflags(gfp_mask, __GFP_RECLAIMABLE));
119
113 BUG_ON(radix_tree_is_indirect_ptr(ret)); 120 BUG_ON(radix_tree_is_indirect_ptr(ret));
114 return ret; 121 return ret;
115} 122}
diff --git a/mm/filemap.c b/mm/filemap.c
index 76bea88cbebc..96920f840562 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -65,7 +65,6 @@ generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
65 * ->private_lock (__free_pte->__set_page_dirty_buffers) 65 * ->private_lock (__free_pte->__set_page_dirty_buffers)
66 * ->swap_lock (exclusive_swap_page, others) 66 * ->swap_lock (exclusive_swap_page, others)
67 * ->mapping->tree_lock 67 * ->mapping->tree_lock
68 * ->zone.lock
69 * 68 *
70 * ->i_mutex 69 * ->i_mutex
71 * ->i_mmap_lock (truncate->unmap_mapping_range) 70 * ->i_mmap_lock (truncate->unmap_mapping_range)
diff --git a/mm/rmap.c b/mm/rmap.c
index dbc2ca2057a5..0334c8f6b741 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -36,7 +36,6 @@
36 * mapping->tree_lock (widely used, in set_page_dirty, 36 * mapping->tree_lock (widely used, in set_page_dirty,
37 * in arch-dependent flush_dcache_mmap_lock, 37 * in arch-dependent flush_dcache_mmap_lock,
38 * within inode_lock in __sync_single_inode) 38 * within inode_lock in __sync_single_inode)
39 * zone->lock (within radix tree node alloc)
40 */ 39 */
41 40
42#include <linux/mm.h> 41#include <linux/mm.h>