aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-04-09 05:57:01 -0400
committerTejun Heo <tj@kernel.org>2010-05-01 02:30:50 -0400
commit88999a898b565960690f18e4a13a1e8a9fa4dfef (patch)
tree41184a2e2e5f1b29e07c4577ee9aa74242b563bc /mm
parent6081089fd6f216b0eb8849205ad0c350cd5ed9bc (diff)
percpu: misc preparations for nommu support
Make the following misc preparations for percpu nommu support. * Remove refernces to vmalloc in common comments as nommu percpu won't use it. * Rename chunk->vms to chunk->data and make it void *. Its use is determined by chunk management implementation. * Relocate utility functions and add __maybe_unused to functions which might not be used by different chunk management implementations. This patch doesn't cause any functional change. This is to allow alternate chunk management implementation for percpu nommu support. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: David Howells <dhowells@redhat.com> Cc: Graff Yang <graff.yang@gmail.com> Cc: Sonic Zhang <sonic.adi@gmail.com>
Diffstat (limited to 'mm')
-rw-r--r--mm/percpu.c111
1 files changed, 56 insertions, 55 deletions
diff --git a/mm/percpu.c b/mm/percpu.c
index 105f171aad29..b403d7c02c67 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/mm/percpu.c - percpu memory allocator 2 * mm/percpu.c - percpu memory allocator
3 * 3 *
4 * Copyright (C) 2009 SUSE Linux Products GmbH 4 * Copyright (C) 2009 SUSE Linux Products GmbH
5 * Copyright (C) 2009 Tejun Heo <tj@kernel.org> 5 * Copyright (C) 2009 Tejun Heo <tj@kernel.org>
@@ -7,14 +7,13 @@
7 * This file is released under the GPLv2. 7 * This file is released under the GPLv2.
8 * 8 *
9 * This is percpu allocator which can handle both static and dynamic 9 * This is percpu allocator which can handle both static and dynamic
10 * areas. Percpu areas are allocated in chunks in vmalloc area. Each 10 * areas. Percpu areas are allocated in chunks. Each chunk is
11 * chunk is consisted of boot-time determined number of units and the 11 * consisted of boot-time determined number of units and the first
12 * first chunk is used for static percpu variables in the kernel image 12 * chunk is used for static percpu variables in the kernel image
13 * (special boot time alloc/init handling necessary as these areas 13 * (special boot time alloc/init handling necessary as these areas
14 * need to be brought up before allocation services are running). 14 * need to be brought up before allocation services are running).
15 * Unit grows as necessary and all units grow or shrink in unison. 15 * Unit grows as necessary and all units grow or shrink in unison.
16 * When a chunk is filled up, another chunk is allocated. ie. in 16 * When a chunk is filled up, another chunk is allocated.
17 * vmalloc area
18 * 17 *
19 * c0 c1 c2 18 * c0 c1 c2
20 * ------------------- ------------------- ------------ 19 * ------------------- ------------------- ------------
@@ -99,7 +98,7 @@ struct pcpu_chunk {
99 int map_used; /* # of map entries used */ 98 int map_used; /* # of map entries used */
100 int map_alloc; /* # of map entries allocated */ 99 int map_alloc; /* # of map entries allocated */
101 int *map; /* allocation map */ 100 int *map; /* allocation map */
102 struct vm_struct **vms; /* mapped vmalloc regions */ 101 void *data; /* chunk data */
103 bool immutable; /* no [de]population allowed */ 102 bool immutable; /* no [de]population allowed */
104 unsigned long populated[]; /* populated bitmap */ 103 unsigned long populated[]; /* populated bitmap */
105}; 104};
@@ -213,13 +212,25 @@ static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
213 return pcpu_size_to_slot(chunk->free_size); 212 return pcpu_size_to_slot(chunk->free_size);
214} 213}
215 214
216static int pcpu_page_idx(unsigned int cpu, int page_idx) 215/* set the pointer to a chunk in a page struct */
216static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
217{
218 page->index = (unsigned long)pcpu;
219}
220
221/* obtain pointer to a chunk from a page struct */
222static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
223{
224 return (struct pcpu_chunk *)page->index;
225}
226
227static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
217{ 228{
218 return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx; 229 return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
219} 230}
220 231
221static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, 232static unsigned long __maybe_unused pcpu_chunk_addr(struct pcpu_chunk *chunk,
222 unsigned int cpu, int page_idx) 233 unsigned int cpu, int page_idx)
223{ 234{
224 return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] + 235 return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] +
225 (page_idx << PAGE_SHIFT); 236 (page_idx << PAGE_SHIFT);
@@ -234,25 +245,15 @@ static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk,
234 return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx)); 245 return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx));
235} 246}
236 247
237/* set the pointer to a chunk in a page struct */ 248static void __maybe_unused pcpu_next_unpop(struct pcpu_chunk *chunk,
238static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu) 249 int *rs, int *re, int end)
239{
240 page->index = (unsigned long)pcpu;
241}
242
243/* obtain pointer to a chunk from a page struct */
244static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
245{
246 return (struct pcpu_chunk *)page->index;
247}
248
249static void pcpu_next_unpop(struct pcpu_chunk *chunk, int *rs, int *re, int end)
250{ 250{
251 *rs = find_next_zero_bit(chunk->populated, end, *rs); 251 *rs = find_next_zero_bit(chunk->populated, end, *rs);
252 *re = find_next_bit(chunk->populated, end, *rs + 1); 252 *re = find_next_bit(chunk->populated, end, *rs + 1);
253} 253}
254 254
255static void pcpu_next_pop(struct pcpu_chunk *chunk, int *rs, int *re, int end) 255static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
256 int *rs, int *re, int end)
256{ 257{
257 *rs = find_next_bit(chunk->populated, end, *rs); 258 *rs = find_next_bit(chunk->populated, end, *rs);
258 *re = find_next_zero_bit(chunk->populated, end, *rs + 1); 259 *re = find_next_zero_bit(chunk->populated, end, *rs + 1);
@@ -341,34 +342,6 @@ static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
341} 342}
342 343
343/** 344/**
344 * pcpu_chunk_addr_search - determine chunk containing specified address
345 * @addr: address for which the chunk needs to be determined.
346 *
347 * RETURNS:
348 * The address of the found chunk.
349 */
350static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
351{
352 /* is it in the first chunk? */
353 if (pcpu_addr_in_first_chunk(addr)) {
354 /* is it in the reserved area? */
355 if (pcpu_addr_in_reserved_chunk(addr))
356 return pcpu_reserved_chunk;
357 return pcpu_first_chunk;
358 }
359
360 /*
361 * The address is relative to unit0 which might be unused and
362 * thus unmapped. Offset the address to the unit space of the
363 * current processor before looking it up in the vmalloc
364 * space. Note that any possible cpu id can be used here, so
365 * there's no need to worry about preemption or cpu hotplug.
366 */
367 addr += pcpu_unit_offsets[raw_smp_processor_id()];
368 return pcpu_get_page_chunk(vmalloc_to_page(addr));
369}
370
371/**
372 * pcpu_need_to_extend - determine whether chunk area map needs to be extended 345 * pcpu_need_to_extend - determine whether chunk area map needs to be extended
373 * @chunk: chunk of interest 346 * @chunk: chunk of interest
374 * 347 *
@@ -1062,8 +1035,8 @@ err_free:
1062 1035
1063static void pcpu_destroy_chunk(struct pcpu_chunk *chunk) 1036static void pcpu_destroy_chunk(struct pcpu_chunk *chunk)
1064{ 1037{
1065 if (chunk && chunk->vms) 1038 if (chunk && chunk->data)
1066 pcpu_free_vm_areas(chunk->vms, pcpu_nr_groups); 1039 pcpu_free_vm_areas(chunk->data, pcpu_nr_groups);
1067 pcpu_free_chunk(chunk); 1040 pcpu_free_chunk(chunk);
1068} 1041}
1069 1042
@@ -1083,12 +1056,40 @@ static struct pcpu_chunk *pcpu_create_chunk(void)
1083 return NULL; 1056 return NULL;
1084 } 1057 }
1085 1058
1086 chunk->vms = vms; 1059 chunk->data = vms;
1087 chunk->base_addr = vms[0]->addr - pcpu_group_offsets[0]; 1060 chunk->base_addr = vms[0]->addr - pcpu_group_offsets[0];
1088 return chunk; 1061 return chunk;
1089} 1062}
1090 1063
1091/** 1064/**
1065 * pcpu_chunk_addr_search - determine chunk containing specified address
1066 * @addr: address for which the chunk needs to be determined.
1067 *
1068 * RETURNS:
1069 * The address of the found chunk.
1070 */
1071static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
1072{
1073 /* is it in the first chunk? */
1074 if (pcpu_addr_in_first_chunk(addr)) {
1075 /* is it in the reserved area? */
1076 if (pcpu_addr_in_reserved_chunk(addr))
1077 return pcpu_reserved_chunk;
1078 return pcpu_first_chunk;
1079 }
1080
1081 /*
1082 * The address is relative to unit0 which might be unused and
1083 * thus unmapped. Offset the address to the unit space of the
1084 * current processor before looking it up in the vmalloc
1085 * space. Note that any possible cpu id can be used here, so
1086 * there's no need to worry about preemption or cpu hotplug.
1087 */
1088 addr += pcpu_unit_offsets[raw_smp_processor_id()];
1089 return pcpu_get_page_chunk(vmalloc_to_page(addr));
1090}
1091
1092/**
1092 * pcpu_alloc - the percpu allocator 1093 * pcpu_alloc - the percpu allocator
1093 * @size: size of area to allocate in bytes 1094 * @size: size of area to allocate in bytes
1094 * @align: alignment of area (max PAGE_SIZE) 1095 * @align: alignment of area (max PAGE_SIZE)