aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-06-21 22:56:23 -0400
committerTejun Heo <tj@kernel.org>2009-06-21 22:56:23 -0400
commit85ae87c1ad8e18a421e7448a99a42ecda183f29f (patch)
tree8fcf3fe5bc9a96a1c16244950d944d4a4e7130c5 /mm
parentf234012f52a37e48f2330e1ca2df69800e797c3b (diff)
percpu: fix too lazy vunmap cache flushing
In pcpu_unmap(), flushing virtual cache on vunmap can't be delayed as the page is going to be returned to the page allocator. Only TLB flushing can be put off such that vmalloc code can handle it lazily. Fix it. [ Impact: fix subtle virtual cache flush bug ] Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Nick Piggin <npiggin@suse.de> Cc: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'mm')
-rw-r--r--mm/percpu.c11
1 files changed, 5 insertions, 6 deletions
diff --git a/mm/percpu.c b/mm/percpu.c
index c0b2c1a76e81..d06f4748271e 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -549,14 +549,14 @@ static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
549 * @chunk: chunk of interest 549 * @chunk: chunk of interest
550 * @page_start: page index of the first page to unmap 550 * @page_start: page index of the first page to unmap
551 * @page_end: page index of the last page to unmap + 1 551 * @page_end: page index of the last page to unmap + 1
552 * @flush: whether to flush cache and tlb or not 552 * @flush_tlb: whether to flush tlb or not
553 * 553 *
554 * For each cpu, unmap pages [@page_start,@page_end) out of @chunk. 554 * For each cpu, unmap pages [@page_start,@page_end) out of @chunk.
555 * If @flush is true, vcache is flushed before unmapping and tlb 555 * If @flush is true, vcache is flushed before unmapping and tlb
556 * after. 556 * after.
557 */ 557 */
558static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end, 558static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end,
559 bool flush) 559 bool flush_tlb)
560{ 560{
561 unsigned int last = num_possible_cpus() - 1; 561 unsigned int last = num_possible_cpus() - 1;
562 unsigned int cpu; 562 unsigned int cpu;
@@ -569,9 +569,8 @@ static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end,
569 * the whole region at once rather than doing it for each cpu. 569 * the whole region at once rather than doing it for each cpu.
570 * This could be an overkill but is more scalable. 570 * This could be an overkill but is more scalable.
571 */ 571 */
572 if (flush) 572 flush_cache_vunmap(pcpu_chunk_addr(chunk, 0, page_start),
573 flush_cache_vunmap(pcpu_chunk_addr(chunk, 0, page_start), 573 pcpu_chunk_addr(chunk, last, page_end));
574 pcpu_chunk_addr(chunk, last, page_end));
575 574
576 for_each_possible_cpu(cpu) 575 for_each_possible_cpu(cpu)
577 unmap_kernel_range_noflush( 576 unmap_kernel_range_noflush(
@@ -579,7 +578,7 @@ static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end,
579 (page_end - page_start) << PAGE_SHIFT); 578 (page_end - page_start) << PAGE_SHIFT);
580 579
581 /* ditto as flush_cache_vunmap() */ 580 /* ditto as flush_cache_vunmap() */
582 if (flush) 581 if (flush_tlb)
583 flush_tlb_kernel_range(pcpu_chunk_addr(chunk, 0, page_start), 582 flush_tlb_kernel_range(pcpu_chunk_addr(chunk, 0, page_start),
584 pcpu_chunk_addr(chunk, last, page_end)); 583 pcpu_chunk_addr(chunk, last, page_end));
585} 584}