diff options
author | Tejun Heo <tj@kernel.org> | 2014-09-02 14:46:02 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2014-09-02 14:46:02 -0400 |
commit | a93ace487a339dccf7040be7fee08c3415188e14 (patch) | |
tree | e16b34a84906894031975080bcc55040d02cfdf7 /mm/percpu-vm.c | |
parent | dca496451bddea9aa87b7510dc2eb413d1a19dfd (diff) |
percpu: move region iterations out of pcpu_[de]populate_chunk()
Previously, pcpu_[de]populate_chunk() were called with the range which
may contain multiple target regions in it and
pcpu_[de]populate_chunk() iterated over the regions. This has the
benefit of batching up cache flushes for all the regions; however,
we're planning to add more bookkeeping logic around [de]population to
support atomic allocations and this delegation of iterations gets in
the way.
This patch moves the region iterations out of
pcpu_[de]populate_chunk() into its callers - pcpu_alloc() and
pcpu_reclaim() - so that we can later add logic to track more states
around them. This change may make cache and tlb flushes more frequent
but multi-region [de]populations are rare anyway and if this actually
becomes a problem, it's not difficult to factor out cache flushes as
separate callbacks which are directly invoked from percpu.c.
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'mm/percpu-vm.c')
-rw-r--r-- | mm/percpu-vm.c | 57 |
1 files changed, 16 insertions, 41 deletions
diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c index edf709793318..538998a137d2 100644 --- a/mm/percpu-vm.c +++ b/mm/percpu-vm.c | |||
@@ -261,8 +261,8 @@ static void pcpu_post_map_flush(struct pcpu_chunk *chunk, | |||
261 | /** | 261 | /** |
262 | * pcpu_populate_chunk - populate and map an area of a pcpu_chunk | 262 | * pcpu_populate_chunk - populate and map an area of a pcpu_chunk |
263 | * @chunk: chunk of interest | 263 | * @chunk: chunk of interest |
264 | * @off: offset to the area to populate | 264 | * @page_start: the start page |
265 | * @size: size of the area to populate in bytes | 265 | * @page_end: the end page |
266 | * | 266 | * |
267 | * For each cpu, populate and map pages [@page_start,@page_end) into | 267 | * For each cpu, populate and map pages [@page_start,@page_end) into |
268 | * @chunk. | 268 | * @chunk. |
@@ -270,66 +270,43 @@ static void pcpu_post_map_flush(struct pcpu_chunk *chunk, | |||
270 | * CONTEXT: | 270 | * CONTEXT: |
271 | * pcpu_alloc_mutex, does GFP_KERNEL allocation. | 271 | * pcpu_alloc_mutex, does GFP_KERNEL allocation. |
272 | */ | 272 | */ |
273 | static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size) | 273 | static int pcpu_populate_chunk(struct pcpu_chunk *chunk, |
274 | int page_start, int page_end) | ||
274 | { | 275 | { |
275 | int page_start = PFN_DOWN(off); | ||
276 | int page_end = PFN_UP(off + size); | ||
277 | int free_end = page_start, unmap_end = page_start; | ||
278 | struct page **pages; | 276 | struct page **pages; |
279 | int rs, re, rc; | ||
280 | 277 | ||
281 | pages = pcpu_get_pages(chunk); | 278 | pages = pcpu_get_pages(chunk); |
282 | if (!pages) | 279 | if (!pages) |
283 | return -ENOMEM; | 280 | return -ENOMEM; |
284 | 281 | ||
285 | /* alloc and map */ | 282 | if (pcpu_alloc_pages(chunk, pages, page_start, page_end)) |
286 | pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) { | 283 | return -ENOMEM; |
287 | rc = pcpu_alloc_pages(chunk, pages, rs, re); | ||
288 | if (rc) | ||
289 | goto err_free; | ||
290 | free_end = re; | ||
291 | } | ||
292 | 284 | ||
293 | pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) { | 285 | if (pcpu_map_pages(chunk, pages, page_start, page_end)) { |
294 | rc = pcpu_map_pages(chunk, pages, rs, re); | 286 | pcpu_free_pages(chunk, pages, page_start, page_end); |
295 | if (rc) | 287 | return -ENOMEM; |
296 | goto err_unmap; | ||
297 | unmap_end = re; | ||
298 | } | 288 | } |
299 | pcpu_post_map_flush(chunk, page_start, page_end); | 289 | pcpu_post_map_flush(chunk, page_start, page_end); |
300 | 290 | ||
301 | return 0; | 291 | return 0; |
302 | |||
303 | err_unmap: | ||
304 | pcpu_pre_unmap_flush(chunk, page_start, unmap_end); | ||
305 | pcpu_for_each_unpop_region(chunk, rs, re, page_start, unmap_end) | ||
306 | pcpu_unmap_pages(chunk, pages, rs, re); | ||
307 | pcpu_post_unmap_tlb_flush(chunk, page_start, unmap_end); | ||
308 | err_free: | ||
309 | pcpu_for_each_unpop_region(chunk, rs, re, page_start, free_end) | ||
310 | pcpu_free_pages(chunk, pages, rs, re); | ||
311 | return rc; | ||
312 | } | 292 | } |
313 | 293 | ||
314 | /** | 294 | /** |
315 | * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk | 295 | * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk |
316 | * @chunk: chunk to depopulate | 296 | * @chunk: chunk to depopulate |
317 | * @off: offset to the area to depopulate | 297 | * @page_start: the start page |
318 | * @size: size of the area to depopulate in bytes | 298 | * @page_end: the end page |
319 | * | 299 | * |
320 | * For each cpu, depopulate and unmap pages [@page_start,@page_end) | 300 | * For each cpu, depopulate and unmap pages [@page_start,@page_end) |
321 | * from @chunk. If @flush is true, vcache is flushed before unmapping | 301 | * from @chunk. |
322 | * and tlb after. | ||
323 | * | 302 | * |
324 | * CONTEXT: | 303 | * CONTEXT: |
325 | * pcpu_alloc_mutex. | 304 | * pcpu_alloc_mutex. |
326 | */ | 305 | */ |
327 | static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size) | 306 | static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, |
307 | int page_start, int page_end) | ||
328 | { | 308 | { |
329 | int page_start = PFN_DOWN(off); | ||
330 | int page_end = PFN_UP(off + size); | ||
331 | struct page **pages; | 309 | struct page **pages; |
332 | int rs, re; | ||
333 | 310 | ||
334 | /* | 311 | /* |
335 | * If control reaches here, there must have been at least one | 312 | * If control reaches here, there must have been at least one |
@@ -342,13 +319,11 @@ static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size) | |||
342 | /* unmap and free */ | 319 | /* unmap and free */ |
343 | pcpu_pre_unmap_flush(chunk, page_start, page_end); | 320 | pcpu_pre_unmap_flush(chunk, page_start, page_end); |
344 | 321 | ||
345 | pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end) | 322 | pcpu_unmap_pages(chunk, pages, page_start, page_end); |
346 | pcpu_unmap_pages(chunk, pages, rs, re); | ||
347 | 323 | ||
348 | /* no need to flush tlb, vmalloc will handle it lazily */ | 324 | /* no need to flush tlb, vmalloc will handle it lazily */ |
349 | 325 | ||
350 | pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end) | 326 | pcpu_free_pages(chunk, pages, page_start, page_end); |
351 | pcpu_free_pages(chunk, pages, rs, re); | ||
352 | } | 327 | } |
353 | 328 | ||
354 | static struct pcpu_chunk *pcpu_create_chunk(void) | 329 | static struct pcpu_chunk *pcpu_create_chunk(void) |