aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-02-21 02:56:23 -0500
committerTejun Heo <tj@kernel.org>2009-02-21 02:56:23 -0500
commitcae3aeb83fef5a7c9c8ac40e653e59dd9a35469c (patch)
tree373e23d2e7339efed747a77ebd8aa1323c05f6e6 /mm
parent11124411aa95827404d6bfdfc14c908e1b54513c (diff)
percpu: clean up size usage
Andrew was concerned about the unit of variables named or have suffix size. Every usage in percpu allocator is in bytes but make it super clear by adding comments. While at it, make pcpu_depopulate_chunk() take int @off and @size like everyone else. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/percpu.c23
1 files changed, 12 insertions, 11 deletions
diff --git a/mm/percpu.c b/mm/percpu.c
index 4617d97e877c..997724c2ea24 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -119,7 +119,7 @@ static struct rb_root pcpu_addr_root = RB_ROOT; /* chunks by address */
119 119
120static int pcpu_size_to_slot(int size) 120static int pcpu_size_to_slot(int size)
121{ 121{
122 int highbit = fls(size); 122 int highbit = fls(size); /* size is in bytes */
123 return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1); 123 return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
124} 124}
125 125
@@ -158,8 +158,8 @@ static bool pcpu_chunk_page_occupied(struct pcpu_chunk *chunk,
158/** 158/**
159 * pcpu_realloc - versatile realloc 159 * pcpu_realloc - versatile realloc
160 * @p: the current pointer (can be NULL for new allocations) 160 * @p: the current pointer (can be NULL for new allocations)
161 * @size: the current size (can be 0 for new allocations) 161 * @size: the current size in bytes (can be 0 for new allocations)
162 * @new_size: the wanted new size (can be 0 for free) 162 * @new_size: the wanted new size in bytes (can be 0 for free)
163 * 163 *
164 * More robust realloc which can be used to allocate, resize or free a 164 * More robust realloc which can be used to allocate, resize or free a
165 * memory area of arbitrary size. If the needed size goes over 165 * memory area of arbitrary size. If the needed size goes over
@@ -290,8 +290,8 @@ static void pcpu_chunk_addr_insert(struct pcpu_chunk *new)
290 * pcpu_split_block - split a map block 290 * pcpu_split_block - split a map block
291 * @chunk: chunk of interest 291 * @chunk: chunk of interest
292 * @i: index of map block to split 292 * @i: index of map block to split
293 * @head: head size (can be 0) 293 * @head: head size in bytes (can be 0)
294 * @tail: tail size (can be 0) 294 * @tail: tail size in bytes (can be 0)
295 * 295 *
296 * Split the @i'th map block into two or three blocks. If @head is 296 * Split the @i'th map block into two or three blocks. If @head is
297 * non-zero, @head bytes block is inserted before block @i moving it 297 * non-zero, @head bytes block is inserted before block @i moving it
@@ -346,7 +346,7 @@ static int pcpu_split_block(struct pcpu_chunk *chunk, int i, int head, int tail)
346/** 346/**
347 * pcpu_alloc_area - allocate area from a pcpu_chunk 347 * pcpu_alloc_area - allocate area from a pcpu_chunk
348 * @chunk: chunk of interest 348 * @chunk: chunk of interest
349 * @size: wanted size 349 * @size: wanted size in bytes
350 * @align: wanted align 350 * @align: wanted align
351 * 351 *
352 * Try to allocate @size bytes area aligned at @align from @chunk. 352 * Try to allocate @size bytes area aligned at @align from @chunk.
@@ -540,15 +540,15 @@ static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end,
540 * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk 540 * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk
541 * @chunk: chunk to depopulate 541 * @chunk: chunk to depopulate
542 * @off: offset to the area to depopulate 542 * @off: offset to the area to depopulate
543 * @size: size of the area to depopulate 543 * @size: size of the area to depopulate in bytes
544 * @flush: whether to flush cache and tlb or not 544 * @flush: whether to flush cache and tlb or not
545 * 545 *
546 * For each cpu, depopulate and unmap pages [@page_start,@page_end) 546 * For each cpu, depopulate and unmap pages [@page_start,@page_end)
547 * from @chunk. If @flush is true, vcache is flushed before unmapping 547 * from @chunk. If @flush is true, vcache is flushed before unmapping
548 * and tlb after. 548 * and tlb after.
549 */ 549 */
550static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, size_t off, 550static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size,
551 size_t size, bool flush) 551 bool flush)
552{ 552{
553 int page_start = PFN_DOWN(off); 553 int page_start = PFN_DOWN(off);
554 int page_end = PFN_UP(off + size); 554 int page_end = PFN_UP(off + size);
@@ -617,7 +617,7 @@ static int pcpu_map(struct pcpu_chunk *chunk, int page_start, int page_end)
617 * pcpu_populate_chunk - populate and map an area of a pcpu_chunk 617 * pcpu_populate_chunk - populate and map an area of a pcpu_chunk
618 * @chunk: chunk of interest 618 * @chunk: chunk of interest
619 * @off: offset to the area to populate 619 * @off: offset to the area to populate
620 * @size: size of the area to populate 620 * @size: size of the area to populate in bytes
621 * 621 *
622 * For each cpu, populate and map pages [@page_start,@page_end) into 622 * For each cpu, populate and map pages [@page_start,@page_end) into
623 * @chunk. The area is cleared on return. 623 * @chunk. The area is cleared on return.
@@ -707,7 +707,7 @@ static struct pcpu_chunk *alloc_pcpu_chunk(void)
707 707
708/** 708/**
709 * __alloc_percpu - allocate percpu area 709 * __alloc_percpu - allocate percpu area
710 * @size: size of area to allocate 710 * @size: size of area to allocate in bytes
711 * @align: alignment of area (max PAGE_SIZE) 711 * @align: alignment of area (max PAGE_SIZE)
712 * 712 *
713 * Allocate percpu area of @size bytes aligned at @align. Might 713 * Allocate percpu area of @size bytes aligned at @align. Might
@@ -819,6 +819,7 @@ EXPORT_SYMBOL_GPL(free_percpu);
819 * pcpu_setup_static - initialize kernel static percpu area 819 * pcpu_setup_static - initialize kernel static percpu area
820 * @populate_pte_fn: callback to allocate pagetable 820 * @populate_pte_fn: callback to allocate pagetable
821 * @pages: num_possible_cpus() * PFN_UP(cpu_size) pages 821 * @pages: num_possible_cpus() * PFN_UP(cpu_size) pages
822 * @cpu_size: the size of static percpu area in bytes
822 * 823 *
823 * Initialize kernel static percpu area. The caller should allocate 824 * Initialize kernel static percpu area. The caller should allocate
824 * all the necessary pages and pass them in @pages. 825 * all the necessary pages and pass them in @pages.