diff options
| -rw-r--r-- | mm/percpu.c | 12 |
1 files changed, 12 insertions, 0 deletions
diff --git a/mm/percpu.c b/mm/percpu.c index f921fdfb5430..bb4be7435ce3 100644 --- a/mm/percpu.c +++ b/mm/percpu.c | |||
| @@ -1650,6 +1650,16 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, | |||
| 1650 | areas[group] = ptr; | 1650 | areas[group] = ptr; |
| 1651 | 1651 | ||
| 1652 | base = min(ptr, base); | 1652 | base = min(ptr, base); |
| 1653 | } | ||
| 1654 | |||
| 1655 | /* | ||
| 1656 | * Copy data and free unused parts. This should happen after all | ||
| 1657 | * allocations are complete; otherwise, we may end up with | ||
| 1658 | * overlapping groups. | ||
| 1659 | */ | ||
| 1660 | for (group = 0; group < ai->nr_groups; group++) { | ||
| 1661 | struct pcpu_group_info *gi = &ai->groups[group]; | ||
| 1662 | void *ptr = areas[group]; | ||
| 1653 | 1663 | ||
| 1654 | for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) { | 1664 | for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) { |
| 1655 | if (gi->cpu_map[i] == NR_CPUS) { | 1665 | if (gi->cpu_map[i] == NR_CPUS) { |
| @@ -1885,6 +1895,8 @@ void __init setup_per_cpu_areas(void) | |||
| 1885 | fc = __alloc_bootmem(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); | 1895 | fc = __alloc_bootmem(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); |
| 1886 | if (!ai || !fc) | 1896 | if (!ai || !fc) |
| 1887 | panic("Failed to allocate memory for percpu areas."); | 1897 | panic("Failed to allocate memory for percpu areas."); |
| 1898 | /* kmemleak tracks the percpu allocations separately */ | ||
| 1899 | kmemleak_free(fc); | ||
| 1888 | 1900 | ||
| 1889 | ai->dyn_size = unit_size; | 1901 | ai->dyn_size = unit_size; |
| 1890 | ai->unit_size = unit_size; | 1902 | ai->unit_size = unit_size; |
