aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-02-24 15:52:45 -0500
committerIngo Molnar <mingo@elte.hu>2009-02-24 15:52:45 -0500
commit0edcf8d6926f4038443dbc24e319530177ca0353 (patch)
tree6010af62f73d01ab673d5106f310eaf4f4228e32 /include
parent87b203079ed949de52f0d92aeae20e5e0116c12f (diff)
parent40150d37be7f7949b2ec07d511244da856647d84 (diff)
Merge branch 'tj-percpu' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/misc into core/percpu
Conflicts: arch/x86/include/asm/pgtable.h
Diffstat (limited to 'include')
-rw-r--r--include/linux/bootmem.h36
-rw-r--r--include/linux/percpu.h100
-rw-r--r--include/linux/vmalloc.h4
3 files changed, 96 insertions, 44 deletions
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 95837bfb5256..455d83219fae 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -65,23 +65,20 @@ extern void free_bootmem(unsigned long addr, unsigned long size);
65#define BOOTMEM_DEFAULT 0 65#define BOOTMEM_DEFAULT 0
66#define BOOTMEM_EXCLUSIVE (1<<0) 66#define BOOTMEM_EXCLUSIVE (1<<0)
67 67
68extern int reserve_bootmem(unsigned long addr,
69 unsigned long size,
70 int flags);
68extern int reserve_bootmem_node(pg_data_t *pgdat, 71extern int reserve_bootmem_node(pg_data_t *pgdat,
69 unsigned long physaddr, 72 unsigned long physaddr,
70 unsigned long size, 73 unsigned long size,
71 int flags); 74 int flags);
72#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
73extern int reserve_bootmem(unsigned long addr, unsigned long size, int flags);
74#endif
75 75
76extern void *__alloc_bootmem_nopanic(unsigned long size, 76extern void *__alloc_bootmem(unsigned long size,
77 unsigned long align, 77 unsigned long align,
78 unsigned long goal); 78 unsigned long goal);
79extern void *__alloc_bootmem(unsigned long size, 79extern void *__alloc_bootmem_nopanic(unsigned long size,
80 unsigned long align, 80 unsigned long align,
81 unsigned long goal); 81 unsigned long goal);
82extern void *__alloc_bootmem_low(unsigned long size,
83 unsigned long align,
84 unsigned long goal);
85extern void *__alloc_bootmem_node(pg_data_t *pgdat, 82extern void *__alloc_bootmem_node(pg_data_t *pgdat,
86 unsigned long size, 83 unsigned long size,
87 unsigned long align, 84 unsigned long align,
@@ -90,30 +87,35 @@ extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat,
90 unsigned long size, 87 unsigned long size,
91 unsigned long align, 88 unsigned long align,
92 unsigned long goal); 89 unsigned long goal);
90extern void *__alloc_bootmem_low(unsigned long size,
91 unsigned long align,
92 unsigned long goal);
93extern void *__alloc_bootmem_low_node(pg_data_t *pgdat, 93extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,
94 unsigned long size, 94 unsigned long size,
95 unsigned long align, 95 unsigned long align,
96 unsigned long goal); 96 unsigned long goal);
97#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE 97
98#define alloc_bootmem(x) \ 98#define alloc_bootmem(x) \
99 __alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) 99 __alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
100#define alloc_bootmem_nopanic(x) \ 100#define alloc_bootmem_nopanic(x) \
101 __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) 101 __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
102#define alloc_bootmem_low(x) \
103 __alloc_bootmem_low(x, SMP_CACHE_BYTES, 0)
104#define alloc_bootmem_pages(x) \ 102#define alloc_bootmem_pages(x) \
105 __alloc_bootmem(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) 103 __alloc_bootmem(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
106#define alloc_bootmem_pages_nopanic(x) \ 104#define alloc_bootmem_pages_nopanic(x) \
107 __alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) 105 __alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
108#define alloc_bootmem_low_pages(x) \
109 __alloc_bootmem_low(x, PAGE_SIZE, 0)
110#define alloc_bootmem_node(pgdat, x) \ 106#define alloc_bootmem_node(pgdat, x) \
111 __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) 107 __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
112#define alloc_bootmem_pages_node(pgdat, x) \ 108#define alloc_bootmem_pages_node(pgdat, x) \
113 __alloc_bootmem_node(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) 109 __alloc_bootmem_node(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
110#define alloc_bootmem_pages_node_nopanic(pgdat, x) \
111 __alloc_bootmem_node_nopanic(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
112
113#define alloc_bootmem_low(x) \
114 __alloc_bootmem_low(x, SMP_CACHE_BYTES, 0)
115#define alloc_bootmem_low_pages(x) \
116 __alloc_bootmem_low(x, PAGE_SIZE, 0)
114#define alloc_bootmem_low_pages_node(pgdat, x) \ 117#define alloc_bootmem_low_pages_node(pgdat, x) \
115 __alloc_bootmem_low_node(pgdat, x, PAGE_SIZE, 0) 118 __alloc_bootmem_low_node(pgdat, x, PAGE_SIZE, 0)
116#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
117 119
118extern int reserve_bootmem_generic(unsigned long addr, unsigned long size, 120extern int reserve_bootmem_generic(unsigned long addr, unsigned long size,
119 int flags); 121 int flags);
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 3577ffd90d45..910beb0abea2 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -76,52 +76,98 @@
76 76
77#ifdef CONFIG_SMP 77#ifdef CONFIG_SMP
78 78
79#ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
80
81/* minimum unit size, also is the maximum supported allocation size */
82#define PCPU_MIN_UNIT_SIZE (16UL << PAGE_SHIFT)
83
84/*
85 * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
86 * back on the first chunk if arch is manually allocating and mapping
87 * it for faster access (as a part of large page mapping for example).
88 * Note that dynamic percpu allocator covers both static and dynamic
89 * areas, so these values are bigger than PERCPU_MODULE_RESERVE.
90 *
91 * On typical configuration with modules, the following values leave
92 * about 8k of free space on the first chunk after boot on both x86_32
93 * and 64 when module support is enabled. When module support is
94 * disabled, it's much tighter.
95 */
96#ifndef PERCPU_DYNAMIC_RESERVE
97# if BITS_PER_LONG > 32
98# ifdef CONFIG_MODULES
99# define PERCPU_DYNAMIC_RESERVE (6 << PAGE_SHIFT)
100# else
101# define PERCPU_DYNAMIC_RESERVE (4 << PAGE_SHIFT)
102# endif
103# else
104# ifdef CONFIG_MODULES
105# define PERCPU_DYNAMIC_RESERVE (4 << PAGE_SHIFT)
106# else
107# define PERCPU_DYNAMIC_RESERVE (2 << PAGE_SHIFT)
108# endif
109# endif
110#endif /* PERCPU_DYNAMIC_RESERVE */
111
112extern void *pcpu_base_addr;
113
114typedef struct page * (*pcpu_get_page_fn_t)(unsigned int cpu, int pageno);
115typedef void (*pcpu_populate_pte_fn_t)(unsigned long addr);
116
117extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
118 size_t static_size, size_t unit_size,
119 size_t free_size, void *base_addr,
120 pcpu_populate_pte_fn_t populate_pte_fn);
121
122/*
123 * Use this to get to a cpu's version of the per-cpu object
124 * dynamically allocated. Non-atomic access to the current CPU's
125 * version should probably be combined with get_cpu()/put_cpu().
126 */
127#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
128
129#else /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
130
79struct percpu_data { 131struct percpu_data {
80 void *ptrs[1]; 132 void *ptrs[1];
81}; 133};
82 134
83#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata) 135#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata)
84/* 136
85 * Use this to get to a cpu's version of the per-cpu object dynamically 137#define per_cpu_ptr(ptr, cpu) \
86 * allocated. Non-atomic access to the current CPU's version should 138({ \
87 * probably be combined with get_cpu()/put_cpu(). 139 struct percpu_data *__p = __percpu_disguise(ptr); \
88 */ 140 (__typeof__(ptr))__p->ptrs[(cpu)]; \
89#define percpu_ptr(ptr, cpu) \
90({ \
91 struct percpu_data *__p = __percpu_disguise(ptr); \
92 (__typeof__(ptr))__p->ptrs[(cpu)]; \
93}) 141})
94 142
95extern void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask); 143#endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
96extern void percpu_free(void *__pdata); 144
145extern void *__alloc_percpu(size_t size, size_t align);
146extern void free_percpu(void *__pdata);
97 147
98#else /* CONFIG_SMP */ 148#else /* CONFIG_SMP */
99 149
100#define percpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) 150#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
101 151
102static __always_inline void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask) 152static inline void *__alloc_percpu(size_t size, size_t align)
103{ 153{
154 /*
155 * Can't easily make larger alignment work with kmalloc. WARN
156 * on it. Larger alignment should only be used for module
157 * percpu sections on SMP for which this path isn't used.
158 */
159 WARN_ON_ONCE(align > __alignof__(unsigned long long));
104 return kzalloc(size, gfp); 160 return kzalloc(size, gfp);
105} 161}
106 162
107static inline void percpu_free(void *__pdata) 163static inline void free_percpu(void *p)
108{ 164{
109 kfree(__pdata); 165 kfree(p);
110} 166}
111 167
112#endif /* CONFIG_SMP */ 168#endif /* CONFIG_SMP */
113 169
114#define percpu_alloc_mask(size, gfp, mask) \ 170#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \
115 __percpu_alloc_mask((size), (gfp), &(mask)) 171 __alignof__(type))
116
117#define percpu_alloc(size, gfp) percpu_alloc_mask((size), (gfp), cpu_online_map)
118
119/* (legacy) interface for use without CPU hotplug handling */
120
121#define __alloc_percpu(size) percpu_alloc_mask((size), GFP_KERNEL, \
122 cpu_possible_map)
123#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type))
124#define free_percpu(ptr) percpu_free((ptr))
125#define per_cpu_ptr(ptr, cpu) percpu_ptr((ptr), (cpu))
126 172
127#endif /* __LINUX_PERCPU_H */ 173#endif /* __LINUX_PERCPU_H */
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 9c0890c7a06a..a43ebec3a7b9 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -95,6 +95,9 @@ extern struct vm_struct *remove_vm_area(const void *addr);
95 95
96extern int map_vm_area(struct vm_struct *area, pgprot_t prot, 96extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
97 struct page ***pages); 97 struct page ***pages);
98extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
99 pgprot_t prot, struct page **pages);
100extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size);
98extern void unmap_kernel_range(unsigned long addr, unsigned long size); 101extern void unmap_kernel_range(unsigned long addr, unsigned long size);
99 102
100/* Allocate/destroy a 'vmalloc' VM area. */ 103/* Allocate/destroy a 'vmalloc' VM area. */
@@ -110,5 +113,6 @@ extern long vwrite(char *buf, char *addr, unsigned long count);
110 */ 113 */
111extern rwlock_t vmlist_lock; 114extern rwlock_t vmlist_lock;
112extern struct vm_struct *vmlist; 115extern struct vm_struct *vmlist;
116extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
113 117
114#endif /* _LINUX_VMALLOC_H */ 118#endif /* _LINUX_VMALLOC_H */