aboutsummaryrefslogtreecommitdiffstats
path: root/mm/percpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/percpu.c')
-rw-r--r--mm/percpu.c85
1 files changed, 42 insertions, 43 deletions
diff --git a/mm/percpu.c b/mm/percpu.c
index ef8e169b7731..f1d0e905850c 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -164,39 +164,41 @@ static bool pcpu_chunk_page_occupied(struct pcpu_chunk *chunk,
164} 164}
165 165
166/** 166/**
167 * pcpu_realloc - versatile realloc 167 * pcpu_mem_alloc - allocate memory
168 * @p: the current pointer (can be NULL for new allocations) 168 * @size: bytes to allocate
169 * @size: the current size in bytes (can be 0 for new allocations)
170 * @new_size: the wanted new size in bytes (can be 0 for free)
171 * 169 *
172 * More robust realloc which can be used to allocate, resize or free a 170 * Allocate @size bytes. If @size is smaller than PAGE_SIZE,
173 * memory area of arbitrary size. If the needed size goes over 171 * kzalloc() is used; otherwise, vmalloc() is used. The returned
174 * PAGE_SIZE, kernel VM is used. 172 * memory is always zeroed.
175 * 173 *
176 * RETURNS: 174 * RETURNS:
177 * The new pointer on success, NULL on failure. 175 * Pointer to the allocated area on success, NULL on failure.
178 */ 176 */
179static void *pcpu_realloc(void *p, size_t size, size_t new_size) 177static void *pcpu_mem_alloc(size_t size)
180{ 178{
181 void *new; 179 if (size <= PAGE_SIZE)
182 180 return kzalloc(size, GFP_KERNEL);
183 if (new_size <= PAGE_SIZE) 181 else {
184 new = kmalloc(new_size, GFP_KERNEL); 182 void *ptr = vmalloc(size);
185 else 183 if (ptr)
186 new = vmalloc(new_size); 184 memset(ptr, 0, size);
187 if (new_size && !new) 185 return ptr;
188 return NULL; 186 }
189 187}
190 memcpy(new, p, min(size, new_size));
191 if (new_size > size)
192 memset(new + size, 0, new_size - size);
193 188
189/**
190 * pcpu_mem_free - free memory
191 * @ptr: memory to free
192 * @size: size of the area
193 *
194 * Free @ptr. @ptr should have been allocated using pcpu_mem_alloc().
195 */
196static void pcpu_mem_free(void *ptr, size_t size)
197{
194 if (size <= PAGE_SIZE) 198 if (size <= PAGE_SIZE)
195 kfree(p); 199 kfree(ptr);
196 else 200 else
197 vfree(p); 201 vfree(ptr);
198
199 return new;
200} 202}
201 203
202/** 204/**
@@ -331,29 +333,27 @@ static int pcpu_split_block(struct pcpu_chunk *chunk, int i, int head, int tail)
331 if (chunk->map_alloc < target) { 333 if (chunk->map_alloc < target) {
332 int new_alloc; 334 int new_alloc;
333 int *new; 335 int *new;
336 size_t size;
334 337
335 new_alloc = PCPU_DFL_MAP_ALLOC; 338 new_alloc = PCPU_DFL_MAP_ALLOC;
336 while (new_alloc < target) 339 while (new_alloc < target)
337 new_alloc *= 2; 340 new_alloc *= 2;
338 341
339 if (chunk->map_alloc < PCPU_DFL_MAP_ALLOC) { 342 new = pcpu_mem_alloc(new_alloc * sizeof(new[0]));
340 /*
341 * map_alloc smaller than the default size
342 * indicates that the chunk is one of the
343 * first chunks and still using static map.
344 * Allocate a dynamic one and copy.
345 */
346 new = pcpu_realloc(NULL, 0, new_alloc * sizeof(new[0]));
347 if (new)
348 memcpy(new, chunk->map,
349 chunk->map_alloc * sizeof(new[0]));
350 } else
351 new = pcpu_realloc(chunk->map,
352 chunk->map_alloc * sizeof(new[0]),
353 new_alloc * sizeof(new[0]));
354 if (!new) 343 if (!new)
355 return -ENOMEM; 344 return -ENOMEM;
356 345
346 size = chunk->map_alloc * sizeof(chunk->map[0]);
347 memcpy(new, chunk->map, size);
348
349 /*
350 * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the
351 * chunk is one of the first chunks and still using
352 * static map.
353 */
354 if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC)
355 pcpu_mem_free(chunk->map, size);
356
357 chunk->map_alloc = new_alloc; 357 chunk->map_alloc = new_alloc;
358 chunk->map = new; 358 chunk->map = new;
359 } 359 }
@@ -696,7 +696,7 @@ static void free_pcpu_chunk(struct pcpu_chunk *chunk)
696 return; 696 return;
697 if (chunk->vm) 697 if (chunk->vm)
698 free_vm_area(chunk->vm); 698 free_vm_area(chunk->vm);
699 pcpu_realloc(chunk->map, chunk->map_alloc * sizeof(chunk->map[0]), 0); 699 pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0]));
700 kfree(chunk); 700 kfree(chunk);
701} 701}
702 702
@@ -708,8 +708,7 @@ static struct pcpu_chunk *alloc_pcpu_chunk(void)
708 if (!chunk) 708 if (!chunk)
709 return NULL; 709 return NULL;
710 710
711 chunk->map = pcpu_realloc(NULL, 0, 711 chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0]));
712 PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0]));
713 chunk->map_alloc = PCPU_DFL_MAP_ALLOC; 712 chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
714 chunk->map[chunk->map_used++] = pcpu_unit_size; 713 chunk->map[chunk->map_used++] = pcpu_unit_size;
715 chunk->page = chunk->page_ar; 714 chunk->page = chunk->page_ar;