diff options
author | Tejun Heo <tj@kernel.org> | 2009-08-14 02:00:51 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2009-08-14 02:00:51 -0400 |
commit | bba174f5e03a40a4ab1c63a2272ea5530b98a067 (patch) | |
tree | ea3607d0730cf4a0b938b4bc7574ae02dc138792 /mm | |
parent | fb435d5233f8b6f9b93c11d6304d8e98fed03234 (diff) |
percpu: add chunk->base_addr
The only thing percpu allocator wants to know about a vmalloc area is
the base address. Instead of requiring chunk->vm, add
chunk->base_addr which contains the necessary value. This simplifies
the code a bit and makes the dummy first_vm unnecessary. This change
will ease allowing a chunk to be mapped by multiple vms.
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/percpu.c | 25 |
1 files changed, 11 insertions, 14 deletions
diff --git a/mm/percpu.c b/mm/percpu.c index 653b02c40200..548624309f83 100644 --- a/mm/percpu.c +++ b/mm/percpu.c | |||
@@ -94,10 +94,11 @@ struct pcpu_chunk { | |||
94 | struct list_head list; /* linked to pcpu_slot lists */ | 94 | struct list_head list; /* linked to pcpu_slot lists */ |
95 | int free_size; /* free bytes in the chunk */ | 95 | int free_size; /* free bytes in the chunk */ |
96 | int contig_hint; /* max contiguous size hint */ | 96 | int contig_hint; /* max contiguous size hint */ |
97 | struct vm_struct *vm; /* mapped vmalloc region */ | 97 | void *base_addr; /* base address of this chunk */ |
98 | int map_used; /* # of map entries used */ | 98 | int map_used; /* # of map entries used */ |
99 | int map_alloc; /* # of map entries allocated */ | 99 | int map_alloc; /* # of map entries allocated */ |
100 | int *map; /* allocation map */ | 100 | int *map; /* allocation map */ |
101 | struct vm_struct *vm; /* mapped vmalloc region */ | ||
101 | bool immutable; /* no [de]population allowed */ | 102 | bool immutable; /* no [de]population allowed */ |
102 | unsigned long populated[]; /* populated bitmap */ | 103 | unsigned long populated[]; /* populated bitmap */ |
103 | }; | 104 | }; |
@@ -196,7 +197,7 @@ static int pcpu_page_idx(unsigned int cpu, int page_idx) | |||
196 | static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, | 197 | static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, |
197 | unsigned int cpu, int page_idx) | 198 | unsigned int cpu, int page_idx) |
198 | { | 199 | { |
199 | return (unsigned long)chunk->vm->addr + pcpu_unit_offsets[cpu] + | 200 | return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] + |
200 | (page_idx << PAGE_SHIFT); | 201 | (page_idx << PAGE_SHIFT); |
201 | } | 202 | } |
202 | 203 | ||
@@ -324,7 +325,7 @@ static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot) | |||
324 | */ | 325 | */ |
325 | static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) | 326 | static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) |
326 | { | 327 | { |
327 | void *first_start = pcpu_first_chunk->vm->addr; | 328 | void *first_start = pcpu_first_chunk->base_addr; |
328 | 329 | ||
329 | /* is it in the first chunk? */ | 330 | /* is it in the first chunk? */ |
330 | if (addr >= first_start && addr < first_start + pcpu_unit_size) { | 331 | if (addr >= first_start && addr < first_start + pcpu_unit_size) { |
@@ -1014,6 +1015,7 @@ static struct pcpu_chunk *alloc_pcpu_chunk(void) | |||
1014 | INIT_LIST_HEAD(&chunk->list); | 1015 | INIT_LIST_HEAD(&chunk->list); |
1015 | chunk->free_size = pcpu_unit_size; | 1016 | chunk->free_size = pcpu_unit_size; |
1016 | chunk->contig_hint = pcpu_unit_size; | 1017 | chunk->contig_hint = pcpu_unit_size; |
1018 | chunk->base_addr = chunk->vm->addr; | ||
1017 | 1019 | ||
1018 | return chunk; | 1020 | return chunk; |
1019 | } | 1021 | } |
@@ -1103,8 +1105,8 @@ area_found: | |||
1103 | 1105 | ||
1104 | mutex_unlock(&pcpu_alloc_mutex); | 1106 | mutex_unlock(&pcpu_alloc_mutex); |
1105 | 1107 | ||
1106 | /* return address relative to unit0 */ | 1108 | /* return address relative to base address */ |
1107 | return __addr_to_pcpu_ptr(chunk->vm->addr + off); | 1109 | return __addr_to_pcpu_ptr(chunk->base_addr + off); |
1108 | 1110 | ||
1109 | fail_unlock: | 1111 | fail_unlock: |
1110 | spin_unlock_irq(&pcpu_lock); | 1112 | spin_unlock_irq(&pcpu_lock); |
@@ -1213,7 +1215,7 @@ void free_percpu(void *ptr) | |||
1213 | spin_lock_irqsave(&pcpu_lock, flags); | 1215 | spin_lock_irqsave(&pcpu_lock, flags); |
1214 | 1216 | ||
1215 | chunk = pcpu_chunk_addr_search(addr); | 1217 | chunk = pcpu_chunk_addr_search(addr); |
1216 | off = addr - chunk->vm->addr; | 1218 | off = addr - chunk->base_addr; |
1217 | 1219 | ||
1218 | pcpu_free_area(chunk, off); | 1220 | pcpu_free_area(chunk, off); |
1219 | 1221 | ||
@@ -1565,7 +1567,6 @@ static void pcpu_dump_alloc_info(const char *lvl, | |||
1565 | int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, | 1567 | int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, |
1566 | void *base_addr) | 1568 | void *base_addr) |
1567 | { | 1569 | { |
1568 | static struct vm_struct first_vm; | ||
1569 | static int smap[2], dmap[2]; | 1570 | static int smap[2], dmap[2]; |
1570 | size_t dyn_size = ai->dyn_size; | 1571 | size_t dyn_size = ai->dyn_size; |
1571 | size_t size_sum = ai->static_size + ai->reserved_size + dyn_size; | 1572 | size_t size_sum = ai->static_size + ai->reserved_size + dyn_size; |
@@ -1629,10 +1630,6 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, | |||
1629 | pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) + | 1630 | pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) + |
1630 | BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long); | 1631 | BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long); |
1631 | 1632 | ||
1632 | first_vm.flags = VM_ALLOC; | ||
1633 | first_vm.size = pcpu_chunk_size; | ||
1634 | first_vm.addr = base_addr; | ||
1635 | |||
1636 | /* | 1633 | /* |
1637 | * Allocate chunk slots. The additional last slot is for | 1634 | * Allocate chunk slots. The additional last slot is for |
1638 | * empty chunks. | 1635 | * empty chunks. |
@@ -1651,7 +1648,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, | |||
1651 | */ | 1648 | */ |
1652 | schunk = alloc_bootmem(pcpu_chunk_struct_size); | 1649 | schunk = alloc_bootmem(pcpu_chunk_struct_size); |
1653 | INIT_LIST_HEAD(&schunk->list); | 1650 | INIT_LIST_HEAD(&schunk->list); |
1654 | schunk->vm = &first_vm; | 1651 | schunk->base_addr = base_addr; |
1655 | schunk->map = smap; | 1652 | schunk->map = smap; |
1656 | schunk->map_alloc = ARRAY_SIZE(smap); | 1653 | schunk->map_alloc = ARRAY_SIZE(smap); |
1657 | schunk->immutable = true; | 1654 | schunk->immutable = true; |
@@ -1675,7 +1672,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, | |||
1675 | if (dyn_size) { | 1672 | if (dyn_size) { |
1676 | dchunk = alloc_bootmem(pcpu_chunk_struct_size); | 1673 | dchunk = alloc_bootmem(pcpu_chunk_struct_size); |
1677 | INIT_LIST_HEAD(&dchunk->list); | 1674 | INIT_LIST_HEAD(&dchunk->list); |
1678 | dchunk->vm = &first_vm; | 1675 | dchunk->base_addr = base_addr; |
1679 | dchunk->map = dmap; | 1676 | dchunk->map = dmap; |
1680 | dchunk->map_alloc = ARRAY_SIZE(dmap); | 1677 | dchunk->map_alloc = ARRAY_SIZE(dmap); |
1681 | dchunk->immutable = true; | 1678 | dchunk->immutable = true; |
@@ -1691,7 +1688,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, | |||
1691 | pcpu_chunk_relocate(pcpu_first_chunk, -1); | 1688 | pcpu_chunk_relocate(pcpu_first_chunk, -1); |
1692 | 1689 | ||
1693 | /* we're done */ | 1690 | /* we're done */ |
1694 | pcpu_base_addr = schunk->vm->addr; | 1691 | pcpu_base_addr = base_addr; |
1695 | return 0; | 1692 | return 0; |
1696 | } | 1693 | } |
1697 | 1694 | ||