diff options
author | Tejun Heo <tj@kernel.org> | 2009-02-23 21:57:21 -0500 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2009-02-23 21:57:21 -0500 |
commit | 8d408b4be37bc49c9086531f2ebe411cf5731746 (patch) | |
tree | 559a532a04b24dd164ec2c72ab545b30a5a604ef /include | |
parent | d9b55eeb1d55ef2dc5a4fdbff9604c2c68cb5649 (diff) |
percpu: give more latitude to arch specific first chunk initialization
Impact: more latitude for first percpu chunk allocation
The first percpu chunk serves the kernel static percpu area and may or
may not contain extra room for further dynamic allocation.
Initialization of the first chunk needs to be done before normal
memory allocation service is up, so it has its own init path -
pcpu_setup_static().
It seems archs need more latitude while initializing the first chunk
for example to take advantage of large page mapping. This patch makes
the following changes to allow this.
* Define PERCPU_DYNAMIC_RESERVE to give arch hint about how much space
to reserve in the first chunk for further dynamic allocation.
* Rename pcpu_setup_static() to pcpu_setup_first_chunk().
* Make pcpu_setup_first_chunk() much more flexible by fetching page
pointer by callback and adding optional @unit_size, @free_size and
@base_addr arguments which allow archs to selectively part of chunk
initialization to their likings.
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/percpu.h | 39 |
1 files changed, 37 insertions, 2 deletions
diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 18080995ff3e..910beb0abea2 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h | |||
@@ -78,12 +78,47 @@ | |||
78 | 78 | ||
79 | #ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA | 79 | #ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA |
80 | 80 | ||
81 | /* minimum unit size, also is the maximum supported allocation size */ | ||
82 | #define PCPU_MIN_UNIT_SIZE (16UL << PAGE_SHIFT) | ||
83 | |||
84 | /* | ||
85 | * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy | ||
86 | * back on the first chunk if arch is manually allocating and mapping | ||
87 | * it for faster access (as a part of large page mapping for example). | ||
88 | * Note that dynamic percpu allocator covers both static and dynamic | ||
89 | * areas, so these values are bigger than PERCPU_MODULE_RESERVE. | ||
90 | * | ||
91 | * On typical configuration with modules, the following values leave | ||
92 | * about 8k of free space on the first chunk after boot on both x86_32 | ||
93 | * and 64 when module support is enabled. When module support is | ||
94 | * disabled, it's much tighter. | ||
95 | */ | ||
96 | #ifndef PERCPU_DYNAMIC_RESERVE | ||
97 | # if BITS_PER_LONG > 32 | ||
98 | # ifdef CONFIG_MODULES | ||
99 | # define PERCPU_DYNAMIC_RESERVE (6 << PAGE_SHIFT) | ||
100 | # else | ||
101 | # define PERCPU_DYNAMIC_RESERVE (4 << PAGE_SHIFT) | ||
102 | # endif | ||
103 | # else | ||
104 | # ifdef CONFIG_MODULES | ||
105 | # define PERCPU_DYNAMIC_RESERVE (4 << PAGE_SHIFT) | ||
106 | # else | ||
107 | # define PERCPU_DYNAMIC_RESERVE (2 << PAGE_SHIFT) | ||
108 | # endif | ||
109 | # endif | ||
110 | #endif /* PERCPU_DYNAMIC_RESERVE */ | ||
111 | |||
81 | extern void *pcpu_base_addr; | 112 | extern void *pcpu_base_addr; |
82 | 113 | ||
114 | typedef struct page * (*pcpu_get_page_fn_t)(unsigned int cpu, int pageno); | ||
83 | typedef void (*pcpu_populate_pte_fn_t)(unsigned long addr); | 115 | typedef void (*pcpu_populate_pte_fn_t)(unsigned long addr); |
84 | 116 | ||
85 | extern size_t __init pcpu_setup_static(pcpu_populate_pte_fn_t populate_pte_fn, | 117 | extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn, |
86 | struct page **pages, size_t cpu_size); | 118 | size_t static_size, size_t unit_size, |
119 | size_t free_size, void *base_addr, | ||
120 | pcpu_populate_pte_fn_t populate_pte_fn); | ||
121 | |||
87 | /* | 122 | /* |
88 | * Use this to get to a cpu's version of the per-cpu object | 123 | * Use this to get to a cpu's version of the per-cpu object |
89 | * dynamically allocated. Non-atomic access to the current CPU's | 124 | * dynamically allocated. Non-atomic access to the current CPU's |