aboutsummaryrefslogtreecommitdiffstats
path: root/mm/percpu.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-04-09 05:57:01 -0400
committerTejun Heo <tj@kernel.org>2010-05-01 02:30:50 -0400
commit6081089fd6f216b0eb8849205ad0c350cd5ed9bc (patch)
treeda16d237ee2a48e5a739384893ed46b440fddde4 /mm/percpu.c
parent020ec6537aa65c18e9084c568d7b94727f2026fd (diff)
percpu: reorganize chunk creation and destruction
Reorganize alloc/free_pcpu_chunk() such that chunk struct alloc/free live in pcpu_alloc/free_chunk() and the rest in pcpu_create/destroy_chunk(). While at it, add missing error handling for chunk->map allocation failure. This is to allow alternate chunk management implementation for percpu nommu support. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: David Howells <dhowells@redhat.com> Cc: Graff Yang <graff.yang@gmail.com> Cc: Sonic Zhang <sonic.adi@gmail.com>
Diffstat (limited to 'mm/percpu.c')
-rw-r--r--mm/percpu.c70
1 files changed, 46 insertions, 24 deletions
diff --git a/mm/percpu.c b/mm/percpu.c
index 1aeb081f30ec..105f171aad29 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -636,6 +636,38 @@ static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
636 pcpu_chunk_relocate(chunk, oslot); 636 pcpu_chunk_relocate(chunk, oslot);
637} 637}
638 638
639static struct pcpu_chunk *pcpu_alloc_chunk(void)
640{
641 struct pcpu_chunk *chunk;
642
643 chunk = kzalloc(pcpu_chunk_struct_size, GFP_KERNEL);
644 if (!chunk)
645 return NULL;
646
647 chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0]));
648 if (!chunk->map) {
649 kfree(chunk);
650 return NULL;
651 }
652
653 chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
654 chunk->map[chunk->map_used++] = pcpu_unit_size;
655
656 INIT_LIST_HEAD(&chunk->list);
657 chunk->free_size = pcpu_unit_size;
658 chunk->contig_hint = pcpu_unit_size;
659
660 return chunk;
661}
662
663static void pcpu_free_chunk(struct pcpu_chunk *chunk)
664{
665 if (!chunk)
666 return;
667 pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0]));
668 kfree(chunk);
669}
670
639/** 671/**
640 * pcpu_get_pages_and_bitmap - get temp pages array and bitmap 672 * pcpu_get_pages_and_bitmap - get temp pages array and bitmap
641 * @chunk: chunk of interest 673 * @chunk: chunk of interest
@@ -1028,41 +1060,31 @@ err_free:
1028 return rc; 1060 return rc;
1029} 1061}
1030 1062
1031static void free_pcpu_chunk(struct pcpu_chunk *chunk) 1063static void pcpu_destroy_chunk(struct pcpu_chunk *chunk)
1032{ 1064{
1033 if (!chunk) 1065 if (chunk && chunk->vms)
1034 return;
1035 if (chunk->vms)
1036 pcpu_free_vm_areas(chunk->vms, pcpu_nr_groups); 1066 pcpu_free_vm_areas(chunk->vms, pcpu_nr_groups);
1037 pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0])); 1067 pcpu_free_chunk(chunk);
1038 kfree(chunk);
1039} 1068}
1040 1069
1041static struct pcpu_chunk *alloc_pcpu_chunk(void) 1070static struct pcpu_chunk *pcpu_create_chunk(void)
1042{ 1071{
1043 struct pcpu_chunk *chunk; 1072 struct pcpu_chunk *chunk;
1073 struct vm_struct **vms;
1044 1074
1045 chunk = kzalloc(pcpu_chunk_struct_size, GFP_KERNEL); 1075 chunk = pcpu_alloc_chunk();
1046 if (!chunk) 1076 if (!chunk)
1047 return NULL; 1077 return NULL;
1048 1078
1049 chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0])); 1079 vms = pcpu_get_vm_areas(pcpu_group_offsets, pcpu_group_sizes,
1050 chunk->map_alloc = PCPU_DFL_MAP_ALLOC; 1080 pcpu_nr_groups, pcpu_atom_size, GFP_KERNEL);
1051 chunk->map[chunk->map_used++] = pcpu_unit_size; 1081 if (!vms) {
1052 1082 pcpu_free_chunk(chunk);
1053 chunk->vms = pcpu_get_vm_areas(pcpu_group_offsets, pcpu_group_sizes,
1054 pcpu_nr_groups, pcpu_atom_size,
1055 GFP_KERNEL);
1056 if (!chunk->vms) {
1057 free_pcpu_chunk(chunk);
1058 return NULL; 1083 return NULL;
1059 } 1084 }
1060 1085
1061 INIT_LIST_HEAD(&chunk->list); 1086 chunk->vms = vms;
1062 chunk->free_size = pcpu_unit_size; 1087 chunk->base_addr = vms[0]->addr - pcpu_group_offsets[0];
1063 chunk->contig_hint = pcpu_unit_size;
1064 chunk->base_addr = chunk->vms[0]->addr - pcpu_group_offsets[0];
1065
1066 return chunk; 1088 return chunk;
1067} 1089}
1068 1090
@@ -1155,7 +1177,7 @@ restart:
1155 /* hmmm... no space left, create a new chunk */ 1177 /* hmmm... no space left, create a new chunk */
1156 spin_unlock_irqrestore(&pcpu_lock, flags); 1178 spin_unlock_irqrestore(&pcpu_lock, flags);
1157 1179
1158 chunk = alloc_pcpu_chunk(); 1180 chunk = pcpu_create_chunk();
1159 if (!chunk) { 1181 if (!chunk) {
1160 err = "failed to allocate new chunk"; 1182 err = "failed to allocate new chunk";
1161 goto fail_unlock_mutex; 1183 goto fail_unlock_mutex;
@@ -1267,7 +1289,7 @@ static void pcpu_reclaim(struct work_struct *work)
1267 1289
1268 list_for_each_entry_safe(chunk, next, &todo, list) { 1290 list_for_each_entry_safe(chunk, next, &todo, list) {
1269 pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size); 1291 pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size);
1270 free_pcpu_chunk(chunk); 1292 pcpu_destroy_chunk(chunk);
1271 } 1293 }
1272 1294
1273 mutex_unlock(&pcpu_alloc_mutex); 1295 mutex_unlock(&pcpu_alloc_mutex);