aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-07-03 19:10:59 -0400
committerTejun Heo <tj@kernel.org>2009-07-03 19:10:59 -0400
commitc8a51be4cabb7009db5f865169389242d49c4c60 (patch)
treec049dc4811a36fefa4108444a950dbf70623a420 /mm
parent38a6be525460f52ac6f2de1c3f73c5615a8853cd (diff)
percpu: reorder a few functions in mm/percpu.c
(de)populate functions are about to be reimplemented to drop pcpu_chunk->page array. Move a few functions so that the rewrite patch doesn't have code movement making it more difficult to read. [ Impact: code movement ] Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'mm')
-rw-r--r--mm/percpu.c90
1 files changed, 45 insertions, 45 deletions
diff --git a/mm/percpu.c b/mm/percpu.c
index 21d938a10662..639fce4d2caf 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -181,12 +181,6 @@ static int pcpu_page_idx(unsigned int cpu, int page_idx)
181 return cpu * pcpu_unit_pages + page_idx; 181 return cpu * pcpu_unit_pages + page_idx;
182} 182}
183 183
184static struct page **pcpu_chunk_pagep(struct pcpu_chunk *chunk,
185 unsigned int cpu, int page_idx)
186{
187 return &chunk->page[pcpu_page_idx(cpu, page_idx)];
188}
189
190static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, 184static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
191 unsigned int cpu, int page_idx) 185 unsigned int cpu, int page_idx)
192{ 186{
@@ -194,6 +188,12 @@ static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
194 (pcpu_page_idx(cpu, page_idx) << PAGE_SHIFT); 188 (pcpu_page_idx(cpu, page_idx) << PAGE_SHIFT);
195} 189}
196 190
191static struct page **pcpu_chunk_pagep(struct pcpu_chunk *chunk,
192 unsigned int cpu, int page_idx)
193{
194 return &chunk->page[pcpu_page_idx(cpu, page_idx)];
195}
196
197static bool pcpu_chunk_page_occupied(struct pcpu_chunk *chunk, 197static bool pcpu_chunk_page_occupied(struct pcpu_chunk *chunk,
198 int page_idx) 198 int page_idx)
199{ 199{
@@ -583,6 +583,45 @@ static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end,
583 pcpu_chunk_addr(chunk, last, page_end)); 583 pcpu_chunk_addr(chunk, last, page_end));
584} 584}
585 585
586static int __pcpu_map_pages(unsigned long addr, struct page **pages,
587 int nr_pages)
588{
589 return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT,
590 PAGE_KERNEL, pages);
591}
592
593/**
594 * pcpu_map - map pages into a pcpu_chunk
595 * @chunk: chunk of interest
596 * @page_start: page index of the first page to map
597 * @page_end: page index of the last page to map + 1
598 *
599 * For each cpu, map pages [@page_start,@page_end) into @chunk.
600 * vcache is flushed afterwards.
601 */
602static int pcpu_map(struct pcpu_chunk *chunk, int page_start, int page_end)
603{
604 unsigned int last = num_possible_cpus() - 1;
605 unsigned int cpu;
606 int err;
607
608 /* map must not be done on immutable chunk */
609 WARN_ON(chunk->immutable);
610
611 for_each_possible_cpu(cpu) {
612 err = __pcpu_map_pages(pcpu_chunk_addr(chunk, cpu, page_start),
613 pcpu_chunk_pagep(chunk, cpu, page_start),
614 page_end - page_start);
615 if (err < 0)
616 return err;
617 }
618
619 /* flush at once, please read comments in pcpu_unmap() */
620 flush_cache_vmap(pcpu_chunk_addr(chunk, 0, page_start),
621 pcpu_chunk_addr(chunk, last, page_end));
622 return 0;
623}
624
586/** 625/**
587 * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk 626 * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk
588 * @chunk: chunk to depopulate 627 * @chunk: chunk to depopulate
@@ -632,45 +671,6 @@ static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size,
632 pcpu_unmap(chunk, unmap_start, unmap_end, flush); 671 pcpu_unmap(chunk, unmap_start, unmap_end, flush);
633} 672}
634 673
635static int __pcpu_map_pages(unsigned long addr, struct page **pages,
636 int nr_pages)
637{
638 return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT,
639 PAGE_KERNEL, pages);
640}
641
642/**
643 * pcpu_map - map pages into a pcpu_chunk
644 * @chunk: chunk of interest
645 * @page_start: page index of the first page to map
646 * @page_end: page index of the last page to map + 1
647 *
648 * For each cpu, map pages [@page_start,@page_end) into @chunk.
649 * vcache is flushed afterwards.
650 */
651static int pcpu_map(struct pcpu_chunk *chunk, int page_start, int page_end)
652{
653 unsigned int last = num_possible_cpus() - 1;
654 unsigned int cpu;
655 int err;
656
657 /* map must not be done on immutable chunk */
658 WARN_ON(chunk->immutable);
659
660 for_each_possible_cpu(cpu) {
661 err = __pcpu_map_pages(pcpu_chunk_addr(chunk, cpu, page_start),
662 pcpu_chunk_pagep(chunk, cpu, page_start),
663 page_end - page_start);
664 if (err < 0)
665 return err;
666 }
667
668 /* flush at once, please read comments in pcpu_unmap() */
669 flush_cache_vmap(pcpu_chunk_addr(chunk, 0, page_start),
670 pcpu_chunk_addr(chunk, last, page_end));
671 return 0;
672}
673
674/** 674/**
675 * pcpu_populate_chunk - populate and map an area of a pcpu_chunk 675 * pcpu_populate_chunk - populate and map an area of a pcpu_chunk
676 * @chunk: chunk of interest 676 * @chunk: chunk of interest