aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/mm/numa_32.c56
1 files changed, 42 insertions, 14 deletions
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index f325e6fab75b..c757c0a3b529 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -176,17 +176,31 @@ static void __init allocate_pgdat(int nid)
176} 176}
177 177
178/* 178/*
179 * In the DISCONTIGMEM and SPARSEMEM memory model, a portion of the kernel 179 * Remap memory allocator
180 * virtual address space (KVA) is reserved and portions of nodes are mapped
181 * using it. This is to allow node-local memory to be allocated for
182 * structures that would normally require ZONE_NORMAL. The memory is
183 * allocated with alloc_remap() and callers should be prepared to allocate
184 * from the bootmem allocator instead.
185 */ 180 */
186static unsigned long node_remap_start_pfn[MAX_NUMNODES]; 181static unsigned long node_remap_start_pfn[MAX_NUMNODES];
187static void *node_remap_end_vaddr[MAX_NUMNODES]; 182static void *node_remap_end_vaddr[MAX_NUMNODES];
188static void *node_remap_alloc_vaddr[MAX_NUMNODES]; 183static void *node_remap_alloc_vaddr[MAX_NUMNODES];
189 184
185/**
186 * alloc_remap - Allocate remapped memory
187 * @nid: NUMA node to allocate memory from
188 * @size: The size of allocation
189 *
190 * Allocate @size bytes from the remap area of NUMA node @nid. The
191 * size of the remap area is predetermined by init_alloc_remap() and
192 * only the callers considered there should call this function. For
193 * more info, please read the comment on top of init_alloc_remap().
194 *
195 * The caller must be ready to handle allocation failure from this
196 * function and fall back to regular memory allocator in such cases.
197 *
198 * CONTEXT:
199 * Single CPU early boot context.
200 *
201 * RETURNS:
202 * Pointer to the allocated memory on success, %NULL on failure.
203 */
190void *alloc_remap(int nid, unsigned long size) 204void *alloc_remap(int nid, unsigned long size)
191{ 205{
192 void *allocation = node_remap_alloc_vaddr[nid]; 206 void *allocation = node_remap_alloc_vaddr[nid];
@@ -238,6 +252,28 @@ void resume_map_numa_kva(pgd_t *pgd_base)
238} 252}
239#endif 253#endif
240 254
255/**
256 * init_alloc_remap - Initialize remap allocator for a NUMA node
257 * @nid: NUMA node to initizlie remap allocator for
258 *
259 * NUMA nodes may end up without any lowmem. As allocating pgdat and
260 * memmap on a different node with lowmem is inefficient, a special
261 * remap allocator is implemented which can be used by alloc_remap().
262 *
263 * For each node, the amount of memory which will be necessary for
264 * pgdat and memmap is calculated and two memory areas of the size are
265 * allocated - one in the node and the other in lowmem; then, the area
266 * in the node is remapped to the lowmem area.
267 *
268 * As pgdat and memmap must be allocated in lowmem anyway, this
269 * doesn't waste lowmem address space; however, the actual lowmem
270 * which gets remapped over is wasted. The amount shouldn't be
271 * problematic on machines this feature will be used.
272 *
273 * Initialization failure isn't fatal. alloc_remap() is used
274 * opportunistically and the callers will fall back to other memory
275 * allocation mechanisms on failure.
276 */
241static __init void init_alloc_remap(int nid) 277static __init void init_alloc_remap(int nid)
242{ 278{
243 unsigned long size, pfn; 279 unsigned long size, pfn;
@@ -306,14 +342,6 @@ void __init initmem_init(void)
306{ 342{
307 int nid; 343 int nid;
308 344
309 /*
310 * When mapping a NUMA machine we allocate the node_mem_map arrays
311 * from node local memory. They are then mapped directly into KVA
312 * between zone normal and vmalloc space. Calculate the size of
313 * this space and use it to adjust the boundary between ZONE_NORMAL
314 * and ZONE_HIGHMEM.
315 */
316
317 get_memcfg_numa(); 345 get_memcfg_numa();
318 numa_init_array(); 346 numa_init_array();
319 347