aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-08-04 18:17:52 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-08-04 18:17:52 -0400
commitffd386a9a8273dcfa61705d0b349eebc7525ef87 (patch)
tree3a555f07c0df5b21eb2ee49fe57f71a76d0342f7 /include
parent03da30986793385af57eeca3296253c887b742e6 (diff)
parent099a19d91ca429944743d51bef8fee240e94d8e3 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: percpu: allow limited allocation before slab is online percpu: make @dyn_size always mean min dyn_size in first chunk init functions
Diffstat (limited to 'include')
-rw-r--r--include/linux/percpu.h20
1 files changed, 14 insertions, 6 deletions
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index d3a38d687104..b8b9084527b1 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -45,6 +45,16 @@
45#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10) 45#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10)
46 46
47/* 47/*
48 * Percpu allocator can serve percpu allocations before slab is
49 * initialized which allows slab to depend on the percpu allocator.
50 * The following two parameters decide how much resource to
51 * preallocate for this. Keep PERCPU_DYNAMIC_RESERVE equal to or
52 * larger than PERCPU_DYNAMIC_EARLY_SIZE.
53 */
54#define PERCPU_DYNAMIC_EARLY_SLOTS 128
55#define PERCPU_DYNAMIC_EARLY_SIZE (12 << 10)
56
57/*
48 * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy 58 * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
49 * back on the first chunk for dynamic percpu allocation if arch is 59 * back on the first chunk for dynamic percpu allocation if arch is
50 * manually allocating and mapping it for faster access (as a part of 60 * manually allocating and mapping it for faster access (as a part of
@@ -104,16 +114,11 @@ extern struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
104 int nr_units); 114 int nr_units);
105extern void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai); 115extern void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai);
106 116
107extern struct pcpu_alloc_info * __init pcpu_build_alloc_info(
108 size_t reserved_size, ssize_t dyn_size,
109 size_t atom_size,
110 pcpu_fc_cpu_distance_fn_t cpu_distance_fn);
111
112extern int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, 117extern int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
113 void *base_addr); 118 void *base_addr);
114 119
115#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK 120#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
116extern int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size, 121extern int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
117 size_t atom_size, 122 size_t atom_size,
118 pcpu_fc_cpu_distance_fn_t cpu_distance_fn, 123 pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
119 pcpu_fc_alloc_fn_t alloc_fn, 124 pcpu_fc_alloc_fn_t alloc_fn,
@@ -140,6 +145,7 @@ extern bool is_kernel_percpu_address(unsigned long addr);
140#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA 145#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
141extern void __init setup_per_cpu_areas(void); 146extern void __init setup_per_cpu_areas(void);
142#endif 147#endif
148extern void __init percpu_init_late(void);
143 149
144#else /* CONFIG_SMP */ 150#else /* CONFIG_SMP */
145 151
@@ -153,6 +159,8 @@ static inline bool is_kernel_percpu_address(unsigned long addr)
153 159
154static inline void __init setup_per_cpu_areas(void) { } 160static inline void __init setup_per_cpu_areas(void) { }
155 161
162static inline void __init percpu_init_late(void) { }
163
156static inline void *pcpu_lpage_remapped(void *kaddr) 164static inline void *pcpu_lpage_remapped(void *kaddr)
157{ 165{
158 return NULL; 166 return NULL;