diff options
author | Tejun Heo <tj@kernel.org> | 2013-03-13 17:59:49 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-03-13 18:21:49 -0400 |
commit | 59bfbcf01967d4d3370a2b8294673dd709e732cc (patch) | |
tree | e868c63aadb6cb943e457ed2e16fcefc151243aa | |
parent | 415586c9e6d35ca116af714d7d0bea9c9f998ce5 (diff) |
idr: idr_alloc() shouldn't trigger lowmem warning when preloaded
GFP_NOIO is often used for idr_alloc() inside preloaded section as the
allocation mask doesn't really matter. If the idr tree needs to be
expanded, idr_alloc() first tries to allocate using the specified
allocation mask and if it fails falls back to the preloaded buffer. This
order prevent non-preloading idr_alloc() users from taking advantage of
preloading ones by using preload buffer without filling it shifting the
burden of allocation to the preload users.
Unfortunately, this allowed/expected-to-fail kmem_cache allocation ends up
generating spurious slab lowmem warning before succeeding the request from
the preload buffer.
This patch makes idr_layer_alloc() add __GFP_NOWARN to the first
kmem_cache attempt and try kmem_cache again w/o __GFP_NOWARN after
allocation from preload_buffer fails so that lowmem warning is generated
if not suppressed by the original @gfp_mask.
Signed-off-by: Tejun Heo <tj@kernel.org>
Reported-by: David Teigland <teigland@redhat.com>
Tested-by: David Teigland <teigland@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | lib/idr.c | 38 |
1 files changed, 25 insertions, 13 deletions
@@ -106,8 +106,14 @@ static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr) | |||
106 | if (layer_idr) | 106 | if (layer_idr) |
107 | return get_from_free_list(layer_idr); | 107 | return get_from_free_list(layer_idr); |
108 | 108 | ||
109 | /* try to allocate directly from kmem_cache */ | 109 | /* |
110 | new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); | 110 | * Try to allocate directly from kmem_cache. We want to try this |
111 | * before preload buffer; otherwise, non-preloading idr_alloc() | ||
112 | * users will end up taking advantage of preloading ones. As the | ||
113 | * following is allowed to fail for preloaded cases, suppress | ||
114 | * warning this time. | ||
115 | */ | ||
116 | new = kmem_cache_zalloc(idr_layer_cache, gfp_mask | __GFP_NOWARN); | ||
111 | if (new) | 117 | if (new) |
112 | return new; | 118 | return new; |
113 | 119 | ||
@@ -115,18 +121,24 @@ static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr) | |||
115 | * Try to fetch one from the per-cpu preload buffer if in process | 121 | * Try to fetch one from the per-cpu preload buffer if in process |
116 | * context. See idr_preload() for details. | 122 | * context. See idr_preload() for details. |
117 | */ | 123 | */ |
118 | if (in_interrupt()) | 124 | if (!in_interrupt()) { |
119 | return NULL; | 125 | preempt_disable(); |
120 | 126 | new = __this_cpu_read(idr_preload_head); | |
121 | preempt_disable(); | 127 | if (new) { |
122 | new = __this_cpu_read(idr_preload_head); | 128 | __this_cpu_write(idr_preload_head, new->ary[0]); |
123 | if (new) { | 129 | __this_cpu_dec(idr_preload_cnt); |
124 | __this_cpu_write(idr_preload_head, new->ary[0]); | 130 | new->ary[0] = NULL; |
125 | __this_cpu_dec(idr_preload_cnt); | 131 | } |
126 | new->ary[0] = NULL; | 132 | preempt_enable(); |
133 | if (new) | ||
134 | return new; | ||
127 | } | 135 | } |
128 | preempt_enable(); | 136 | |
129 | return new; | 137 | /* |
138 | * Both failed. Try kmem_cache again w/o adding __GFP_NOWARN so | ||
139 | * that memory allocation failure warning is printed as intended. | ||
140 | */ | ||
141 | return kmem_cache_zalloc(idr_layer_cache, gfp_mask); | ||
130 | } | 142 | } |
131 | 143 | ||
132 | static void idr_layer_rcu_free(struct rcu_head *head) | 144 | static void idr_layer_rcu_free(struct rcu_head *head) |