diff options
author | Tejun Heo <tj@kernel.org> | 2009-03-06 00:33:59 -0500 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2009-03-06 00:33:59 -0500 |
commit | 61ace7fa2fff9c4b6641c506b6b3f1a9394a1b11 (patch) | |
tree | 214d0e04227239c31f93ba6948cf58d67cfedcee /mm/percpu.c | |
parent | 2441d15c97d498b18f03ae9fba262ffeae42a08b (diff) |
percpu: improve first chunk initial area map handling
Impact: no functional change
When the first chunk is created, its initial area map is not allocated
because kmalloc isn't online yet. The map is allocated and
initialized on the first allocation request on the chunk. This works
fine but the scattering of initialization logic between the init
function and allocation path is a bit confusing.
This patch makes the first chunk initialize and use minimal statically
allocated map from pcpu_setpu_first_chunk(). The map resizing path
still needs to handle this specially but it's more straight-forward
and gives more latitude to the init path. This will ease future
changes.
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'mm/percpu.c')
-rw-r--r-- | mm/percpu.c | 53 |
1 files changed, 27 insertions, 26 deletions
diff --git a/mm/percpu.c b/mm/percpu.c index 9531590e6b6..503ccad091a 100644 --- a/mm/percpu.c +++ b/mm/percpu.c | |||
@@ -93,9 +93,6 @@ static size_t pcpu_chunk_struct_size __read_mostly; | |||
93 | void *pcpu_base_addr __read_mostly; | 93 | void *pcpu_base_addr __read_mostly; |
94 | EXPORT_SYMBOL_GPL(pcpu_base_addr); | 94 | EXPORT_SYMBOL_GPL(pcpu_base_addr); |
95 | 95 | ||
96 | /* the size of kernel static area */ | ||
97 | static int pcpu_static_size __read_mostly; | ||
98 | |||
99 | /* | 96 | /* |
100 | * One mutex to rule them all. | 97 | * One mutex to rule them all. |
101 | * | 98 | * |
@@ -316,15 +313,28 @@ static int pcpu_split_block(struct pcpu_chunk *chunk, int i, int head, int tail) | |||
316 | 313 | ||
317 | /* reallocation required? */ | 314 | /* reallocation required? */ |
318 | if (chunk->map_alloc < target) { | 315 | if (chunk->map_alloc < target) { |
319 | int new_alloc = chunk->map_alloc; | 316 | int new_alloc; |
320 | int *new; | 317 | int *new; |
321 | 318 | ||
319 | new_alloc = PCPU_DFL_MAP_ALLOC; | ||
322 | while (new_alloc < target) | 320 | while (new_alloc < target) |
323 | new_alloc *= 2; | 321 | new_alloc *= 2; |
324 | 322 | ||
325 | new = pcpu_realloc(chunk->map, | 323 | if (chunk->map_alloc < PCPU_DFL_MAP_ALLOC) { |
326 | chunk->map_alloc * sizeof(new[0]), | 324 | /* |
327 | new_alloc * sizeof(new[0])); | 325 | * map_alloc smaller than the default size |
326 | * indicates that the chunk is one of the | ||
327 | * first chunks and still using static map. | ||
328 | * Allocate a dynamic one and copy. | ||
329 | */ | ||
330 | new = pcpu_realloc(NULL, 0, new_alloc * sizeof(new[0])); | ||
331 | if (new) | ||
332 | memcpy(new, chunk->map, | ||
333 | chunk->map_alloc * sizeof(new[0])); | ||
334 | } else | ||
335 | new = pcpu_realloc(chunk->map, | ||
336 | chunk->map_alloc * sizeof(new[0]), | ||
337 | new_alloc * sizeof(new[0])); | ||
328 | if (!new) | 338 | if (!new) |
329 | return -ENOMEM; | 339 | return -ENOMEM; |
330 | 340 | ||
@@ -367,22 +377,6 @@ static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align) | |||
367 | int max_contig = 0; | 377 | int max_contig = 0; |
368 | int i, off; | 378 | int i, off; |
369 | 379 | ||
370 | /* | ||
371 | * The static chunk initially doesn't have map attached | ||
372 | * because kmalloc wasn't available during init. Give it one. | ||
373 | */ | ||
374 | if (unlikely(!chunk->map)) { | ||
375 | chunk->map = pcpu_realloc(NULL, 0, | ||
376 | PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0])); | ||
377 | if (!chunk->map) | ||
378 | return -ENOMEM; | ||
379 | |||
380 | chunk->map_alloc = PCPU_DFL_MAP_ALLOC; | ||
381 | chunk->map[chunk->map_used++] = -pcpu_static_size; | ||
382 | if (chunk->free_size) | ||
383 | chunk->map[chunk->map_used++] = chunk->free_size; | ||
384 | } | ||
385 | |||
386 | for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) { | 380 | for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) { |
387 | bool is_last = i + 1 == chunk->map_used; | 381 | bool is_last = i + 1 == chunk->map_used; |
388 | int head, tail; | 382 | int head, tail; |
@@ -874,12 +868,14 @@ size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn, | |||
874 | pcpu_populate_pte_fn_t populate_pte_fn) | 868 | pcpu_populate_pte_fn_t populate_pte_fn) |
875 | { | 869 | { |
876 | static struct vm_struct first_vm; | 870 | static struct vm_struct first_vm; |
871 | static int smap[2]; | ||
877 | struct pcpu_chunk *schunk; | 872 | struct pcpu_chunk *schunk; |
878 | unsigned int cpu; | 873 | unsigned int cpu; |
879 | int nr_pages; | 874 | int nr_pages; |
880 | int err, i; | 875 | int err, i; |
881 | 876 | ||
882 | /* santiy checks */ | 877 | /* santiy checks */ |
878 | BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC); | ||
883 | BUG_ON(!static_size); | 879 | BUG_ON(!static_size); |
884 | BUG_ON(!unit_size && dyn_size); | 880 | BUG_ON(!unit_size && dyn_size); |
885 | BUG_ON(unit_size && unit_size < static_size + dyn_size); | 881 | BUG_ON(unit_size && unit_size < static_size + dyn_size); |
@@ -893,7 +889,6 @@ size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn, | |||
893 | pcpu_unit_pages = max_t(int, PCPU_MIN_UNIT_SIZE >> PAGE_SHIFT, | 889 | pcpu_unit_pages = max_t(int, PCPU_MIN_UNIT_SIZE >> PAGE_SHIFT, |
894 | PFN_UP(static_size)); | 890 | PFN_UP(static_size)); |
895 | 891 | ||
896 | pcpu_static_size = static_size; | ||
897 | pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT; | 892 | pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT; |
898 | pcpu_chunk_size = num_possible_cpus() * pcpu_unit_size; | 893 | pcpu_chunk_size = num_possible_cpus() * pcpu_unit_size; |
899 | pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) | 894 | pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) |
@@ -912,14 +907,20 @@ size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn, | |||
912 | schunk = alloc_bootmem(pcpu_chunk_struct_size); | 907 | schunk = alloc_bootmem(pcpu_chunk_struct_size); |
913 | INIT_LIST_HEAD(&schunk->list); | 908 | INIT_LIST_HEAD(&schunk->list); |
914 | schunk->vm = &first_vm; | 909 | schunk->vm = &first_vm; |
910 | schunk->map = smap; | ||
911 | schunk->map_alloc = ARRAY_SIZE(smap); | ||
915 | 912 | ||
916 | if (dyn_size) | 913 | if (dyn_size) |
917 | schunk->free_size = dyn_size; | 914 | schunk->free_size = dyn_size; |
918 | else | 915 | else |
919 | schunk->free_size = pcpu_unit_size - pcpu_static_size; | 916 | schunk->free_size = pcpu_unit_size - static_size; |
920 | 917 | ||
921 | schunk->contig_hint = schunk->free_size; | 918 | schunk->contig_hint = schunk->free_size; |
922 | 919 | ||
920 | schunk->map[schunk->map_used++] = -static_size; | ||
921 | if (schunk->free_size) | ||
922 | schunk->map[schunk->map_used++] = schunk->free_size; | ||
923 | |||
923 | /* allocate vm address */ | 924 | /* allocate vm address */ |
924 | first_vm.flags = VM_ALLOC; | 925 | first_vm.flags = VM_ALLOC; |
925 | first_vm.size = pcpu_chunk_size; | 926 | first_vm.size = pcpu_chunk_size; |
@@ -948,7 +949,7 @@ size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn, | |||
948 | *pcpu_chunk_pagep(schunk, cpu, i) = page; | 949 | *pcpu_chunk_pagep(schunk, cpu, i) = page; |
949 | } | 950 | } |
950 | 951 | ||
951 | BUG_ON(i < PFN_UP(pcpu_static_size)); | 952 | BUG_ON(i < PFN_UP(static_size)); |
952 | 953 | ||
953 | if (nr_pages < 0) | 954 | if (nr_pages < 0) |
954 | nr_pages = i; | 955 | nr_pages = i; |