diff options
| author | Tejun Heo <tj@kernel.org> | 2009-09-14 20:57:19 -0400 |
|---|---|---|
| committer | Tejun Heo <tj@kernel.org> | 2009-09-14 20:57:19 -0400 |
| commit | 5579fd7e6aed8860ea0c8e3f11897493153b10ad (patch) | |
| tree | 8f797ccd0f1a2c88f1605ae9e90b3ac17485de27 /arch/x86/kernel | |
| parent | 04a13c7c632e1fe04a5f6e6c83565d2559e37598 (diff) | |
| parent | c2a7e818019f20a5cf7fb26a6eb59e212e6c0cd8 (diff) | |
Merge branch 'for-next' into for-linus
* pcpu_chunk_page_occupied() doesn't exist in for-next.
* pcpu_chunk_addr_search() updated to use raw_smp_processor_id().
Conflicts:
mm/percpu.c
Diffstat (limited to 'arch/x86/kernel')
| -rw-r--r-- | arch/x86/kernel/cpu/cpu_debug.c | 4 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce.c | 8 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_amd.c | 2 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/perf_counter.c | 14 | ||||
| -rw-r--r-- | arch/x86/kernel/setup_percpu.c | 364 | ||||
| -rw-r--r-- | arch/x86/kernel/vmlinux.lds.S | 11 |
6 files changed, 63 insertions, 340 deletions
diff --git a/arch/x86/kernel/cpu/cpu_debug.c b/arch/x86/kernel/cpu/cpu_debug.c index 6b2a52dd040..dca325c0399 100644 --- a/arch/x86/kernel/cpu/cpu_debug.c +++ b/arch/x86/kernel/cpu/cpu_debug.c | |||
| @@ -30,8 +30,8 @@ | |||
| 30 | #include <asm/apic.h> | 30 | #include <asm/apic.h> |
| 31 | #include <asm/desc.h> | 31 | #include <asm/desc.h> |
| 32 | 32 | ||
| 33 | static DEFINE_PER_CPU(struct cpu_cpuX_base, cpu_arr[CPU_REG_ALL_BIT]); | 33 | static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpu_arr); |
| 34 | static DEFINE_PER_CPU(struct cpu_private *, priv_arr[MAX_CPU_FILES]); | 34 | static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], priv_arr); |
| 35 | static DEFINE_PER_CPU(int, cpu_priv_count); | 35 | static DEFINE_PER_CPU(int, cpu_priv_count); |
| 36 | 36 | ||
| 37 | static DEFINE_MUTEX(cpu_debug_lock); | 37 | static DEFINE_MUTEX(cpu_debug_lock); |
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 1cfb623ce11..14ce5d49b2a 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
| @@ -1091,7 +1091,7 @@ void mce_log_therm_throt_event(__u64 status) | |||
| 1091 | */ | 1091 | */ |
| 1092 | static int check_interval = 5 * 60; /* 5 minutes */ | 1092 | static int check_interval = 5 * 60; /* 5 minutes */ |
| 1093 | 1093 | ||
| 1094 | static DEFINE_PER_CPU(int, next_interval); /* in jiffies */ | 1094 | static DEFINE_PER_CPU(int, mce_next_interval); /* in jiffies */ |
| 1095 | static DEFINE_PER_CPU(struct timer_list, mce_timer); | 1095 | static DEFINE_PER_CPU(struct timer_list, mce_timer); |
| 1096 | 1096 | ||
| 1097 | static void mcheck_timer(unsigned long data) | 1097 | static void mcheck_timer(unsigned long data) |
| @@ -1110,7 +1110,7 @@ static void mcheck_timer(unsigned long data) | |||
| 1110 | * Alert userspace if needed. If we logged an MCE, reduce the | 1110 | * Alert userspace if needed. If we logged an MCE, reduce the |
| 1111 | * polling interval, otherwise increase the polling interval. | 1111 | * polling interval, otherwise increase the polling interval. |
| 1112 | */ | 1112 | */ |
| 1113 | n = &__get_cpu_var(next_interval); | 1113 | n = &__get_cpu_var(mce_next_interval); |
| 1114 | if (mce_notify_irq()) | 1114 | if (mce_notify_irq()) |
| 1115 | *n = max(*n/2, HZ/100); | 1115 | *n = max(*n/2, HZ/100); |
| 1116 | else | 1116 | else |
| @@ -1311,7 +1311,7 @@ static void mce_cpu_features(struct cpuinfo_x86 *c) | |||
| 1311 | static void mce_init_timer(void) | 1311 | static void mce_init_timer(void) |
| 1312 | { | 1312 | { |
| 1313 | struct timer_list *t = &__get_cpu_var(mce_timer); | 1313 | struct timer_list *t = &__get_cpu_var(mce_timer); |
| 1314 | int *n = &__get_cpu_var(next_interval); | 1314 | int *n = &__get_cpu_var(mce_next_interval); |
| 1315 | 1315 | ||
| 1316 | if (mce_ignore_ce) | 1316 | if (mce_ignore_ce) |
| 1317 | return; | 1317 | return; |
| @@ -1912,7 +1912,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
| 1912 | case CPU_DOWN_FAILED: | 1912 | case CPU_DOWN_FAILED: |
| 1913 | case CPU_DOWN_FAILED_FROZEN: | 1913 | case CPU_DOWN_FAILED_FROZEN: |
| 1914 | t->expires = round_jiffies(jiffies + | 1914 | t->expires = round_jiffies(jiffies + |
| 1915 | __get_cpu_var(next_interval)); | 1915 | __get_cpu_var(mce_next_interval)); |
| 1916 | add_timer_on(t, cpu); | 1916 | add_timer_on(t, cpu); |
| 1917 | smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); | 1917 | smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); |
| 1918 | break; | 1918 | break; |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index ddae21620bd..bd2a2fa8462 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c | |||
| @@ -69,7 +69,7 @@ struct threshold_bank { | |||
| 69 | struct threshold_block *blocks; | 69 | struct threshold_block *blocks; |
| 70 | cpumask_var_t cpus; | 70 | cpumask_var_t cpus; |
| 71 | }; | 71 | }; |
| 72 | static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]); | 72 | static DEFINE_PER_CPU(struct threshold_bank * [NR_BANKS], threshold_banks); |
| 73 | 73 | ||
| 74 | #ifdef CONFIG_SMP | 74 | #ifdef CONFIG_SMP |
| 75 | static unsigned char shared_bank[NR_BANKS] = { | 75 | static unsigned char shared_bank[NR_BANKS] = { |
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 900332b800f..3d4ebbd2e12 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c | |||
| @@ -976,7 +976,7 @@ amd_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) | |||
| 976 | x86_pmu_disable_counter(hwc, idx); | 976 | x86_pmu_disable_counter(hwc, idx); |
| 977 | } | 977 | } |
| 978 | 978 | ||
| 979 | static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]); | 979 | static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); |
| 980 | 980 | ||
| 981 | /* | 981 | /* |
| 982 | * Set the next IRQ period, based on the hwc->period_left value. | 982 | * Set the next IRQ period, based on the hwc->period_left value. |
| @@ -1015,7 +1015,7 @@ x86_perf_counter_set_period(struct perf_counter *counter, | |||
| 1015 | if (left > x86_pmu.max_period) | 1015 | if (left > x86_pmu.max_period) |
| 1016 | left = x86_pmu.max_period; | 1016 | left = x86_pmu.max_period; |
| 1017 | 1017 | ||
| 1018 | per_cpu(prev_left[idx], smp_processor_id()) = left; | 1018 | per_cpu(pmc_prev_left[idx], smp_processor_id()) = left; |
| 1019 | 1019 | ||
| 1020 | /* | 1020 | /* |
| 1021 | * The hw counter starts counting from this counter offset, | 1021 | * The hw counter starts counting from this counter offset, |
| @@ -1211,7 +1211,7 @@ void perf_counter_print_debug(void) | |||
| 1211 | rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl); | 1211 | rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl); |
| 1212 | rdmsrl(x86_pmu.perfctr + idx, pmc_count); | 1212 | rdmsrl(x86_pmu.perfctr + idx, pmc_count); |
| 1213 | 1213 | ||
| 1214 | prev_left = per_cpu(prev_left[idx], cpu); | 1214 | prev_left = per_cpu(pmc_prev_left[idx], cpu); |
| 1215 | 1215 | ||
| 1216 | pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n", | 1216 | pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n", |
| 1217 | cpu, idx, pmc_ctrl); | 1217 | cpu, idx, pmc_ctrl); |
| @@ -1798,8 +1798,8 @@ void callchain_store(struct perf_callchain_entry *entry, u64 ip) | |||
| 1798 | entry->ip[entry->nr++] = ip; | 1798 | entry->ip[entry->nr++] = ip; |
| 1799 | } | 1799 | } |
| 1800 | 1800 | ||
| 1801 | static DEFINE_PER_CPU(struct perf_callchain_entry, irq_entry); | 1801 | static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry); |
| 1802 | static DEFINE_PER_CPU(struct perf_callchain_entry, nmi_entry); | 1802 | static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry); |
| 1803 | static DEFINE_PER_CPU(int, in_nmi_frame); | 1803 | static DEFINE_PER_CPU(int, in_nmi_frame); |
| 1804 | 1804 | ||
| 1805 | 1805 | ||
| @@ -1952,9 +1952,9 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | |||
| 1952 | struct perf_callchain_entry *entry; | 1952 | struct perf_callchain_entry *entry; |
| 1953 | 1953 | ||
| 1954 | if (in_nmi()) | 1954 | if (in_nmi()) |
| 1955 | entry = &__get_cpu_var(nmi_entry); | 1955 | entry = &__get_cpu_var(pmc_nmi_entry); |
| 1956 | else | 1956 | else |
| 1957 | entry = &__get_cpu_var(irq_entry); | 1957 | entry = &__get_cpu_var(pmc_irq_entry); |
| 1958 | 1958 | ||
| 1959 | entry->nr = 0; | 1959 | entry->nr = 0; |
| 1960 | 1960 | ||
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 07d81916f21..d559af913e1 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c | |||
| @@ -55,6 +55,7 @@ EXPORT_SYMBOL(__per_cpu_offset); | |||
| 55 | #define PERCPU_FIRST_CHUNK_RESERVE 0 | 55 | #define PERCPU_FIRST_CHUNK_RESERVE 0 |
| 56 | #endif | 56 | #endif |
| 57 | 57 | ||
| 58 | #ifdef CONFIG_X86_32 | ||
| 58 | /** | 59 | /** |
| 59 | * pcpu_need_numa - determine percpu allocation needs to consider NUMA | 60 | * pcpu_need_numa - determine percpu allocation needs to consider NUMA |
| 60 | * | 61 | * |
| @@ -83,6 +84,7 @@ static bool __init pcpu_need_numa(void) | |||
| 83 | #endif | 84 | #endif |
| 84 | return false; | 85 | return false; |
| 85 | } | 86 | } |
| 87 | #endif | ||
| 86 | 88 | ||
| 87 | /** | 89 | /** |
| 88 | * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu | 90 | * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu |
| @@ -124,308 +126,35 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size, | |||
| 124 | } | 126 | } |
| 125 | 127 | ||
| 126 | /* | 128 | /* |
| 127 | * Large page remap allocator | 129 | * Helpers for first chunk memory allocation |
| 128 | * | ||
| 129 | * This allocator uses PMD page as unit. A PMD page is allocated for | ||
| 130 | * each cpu and each is remapped into vmalloc area using PMD mapping. | ||
| 131 | * As PMD page is quite large, only part of it is used for the first | ||
| 132 | * chunk. Unused part is returned to the bootmem allocator. | ||
| 133 | * | ||
| 134 | * So, the PMD pages are mapped twice - once to the physical mapping | ||
| 135 | * and to the vmalloc area for the first percpu chunk. The double | ||
| 136 | * mapping does add one more PMD TLB entry pressure but still is much | ||
| 137 | * better than only using 4k mappings while still being NUMA friendly. | ||
| 138 | */ | 130 | */ |
| 139 | #ifdef CONFIG_NEED_MULTIPLE_NODES | 131 | static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) |
| 140 | struct pcpul_ent { | ||
| 141 | unsigned int cpu; | ||
| 142 | void *ptr; | ||
| 143 | }; | ||
| 144 | |||
| 145 | static size_t pcpul_size; | ||
| 146 | static struct pcpul_ent *pcpul_map; | ||
| 147 | static struct vm_struct pcpul_vm; | ||
| 148 | |||
| 149 | static struct page * __init pcpul_get_page(unsigned int cpu, int pageno) | ||
| 150 | { | 132 | { |
| 151 | size_t off = (size_t)pageno << PAGE_SHIFT; | 133 | return pcpu_alloc_bootmem(cpu, size, align); |
| 152 | |||
| 153 | if (off >= pcpul_size) | ||
| 154 | return NULL; | ||
| 155 | |||
| 156 | return virt_to_page(pcpul_map[cpu].ptr + off); | ||
| 157 | } | 134 | } |
| 158 | 135 | ||
| 159 | static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen) | 136 | static void __init pcpu_fc_free(void *ptr, size_t size) |
| 160 | { | 137 | { |
| 161 | size_t map_size, dyn_size; | 138 | free_bootmem(__pa(ptr), size); |
| 162 | unsigned int cpu; | ||
| 163 | int i, j; | ||
| 164 | ssize_t ret; | ||
| 165 | |||
| 166 | if (!chosen) { | ||
| 167 | size_t vm_size = VMALLOC_END - VMALLOC_START; | ||
| 168 | size_t tot_size = nr_cpu_ids * PMD_SIZE; | ||
| 169 | |||
| 170 | /* on non-NUMA, embedding is better */ | ||
| 171 | if (!pcpu_need_numa()) | ||
| 172 | return -EINVAL; | ||
| 173 | |||
| 174 | /* don't consume more than 20% of vmalloc area */ | ||
| 175 | if (tot_size > vm_size / 5) { | ||
| 176 | pr_info("PERCPU: too large chunk size %zuMB for " | ||
| 177 | "large page remap\n", tot_size >> 20); | ||
| 178 | return -EINVAL; | ||
| 179 | } | ||
| 180 | } | ||
| 181 | |||
| 182 | /* need PSE */ | ||
| 183 | if (!cpu_has_pse) { | ||
| 184 | pr_warning("PERCPU: lpage allocator requires PSE\n"); | ||
| 185 | return -EINVAL; | ||
| 186 | } | ||
| 187 | |||
| 188 | /* | ||
| 189 | * Currently supports only single page. Supporting multiple | ||
| 190 | * pages won't be too difficult if it ever becomes necessary. | ||
| 191 | */ | ||
| 192 | pcpul_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE + | ||
| 193 | PERCPU_DYNAMIC_RESERVE); | ||
| 194 | if (pcpul_size > PMD_SIZE) { | ||
| 195 | pr_warning("PERCPU: static data is larger than large page, " | ||
| 196 | "can't use large page\n"); | ||
| 197 | return -EINVAL; | ||
| 198 | } | ||
| 199 | dyn_size = pcpul_size - static_size - PERCPU_FIRST_CHUNK_RESERVE; | ||
| 200 | |||
| 201 | /* allocate pointer array and alloc large pages */ | ||
| 202 | map_size = PFN_ALIGN(nr_cpu_ids * sizeof(pcpul_map[0])); | ||
| 203 | pcpul_map = alloc_bootmem(map_size); | ||
| 204 | |||
| 205 | for_each_possible_cpu(cpu) { | ||
| 206 | pcpul_map[cpu].cpu = cpu; | ||
| 207 | pcpul_map[cpu].ptr = pcpu_alloc_bootmem(cpu, PMD_SIZE, | ||
| 208 | PMD_SIZE); | ||
| 209 | if (!pcpul_map[cpu].ptr) { | ||
| 210 | pr_warning("PERCPU: failed to allocate large page " | ||
| 211 | "for cpu%u\n", cpu); | ||
| 212 | goto enomem; | ||
| 213 | } | ||
| 214 | |||
| 215 | /* | ||
| 216 | * Only use pcpul_size bytes and give back the rest. | ||
| 217 | * | ||
| 218 | * Ingo: The 2MB up-rounding bootmem is needed to make | ||
| 219 | * sure the partial 2MB page is still fully RAM - it's | ||
| 220 | * not well-specified to have a PAT-incompatible area | ||
| 221 | * (unmapped RAM, device memory, etc.) in that hole. | ||
| 222 | */ | ||
| 223 | free_bootmem(__pa(pcpul_map[cpu].ptr + pcpul_size), | ||
| 224 | PMD_SIZE - pcpul_size); | ||
| 225 | |||
| 226 | memcpy(pcpul_map[cpu].ptr, __per_cpu_load, static_size); | ||
| 227 | } | ||
| 228 | |||
| 229 | /* allocate address and map */ | ||
| 230 | pcpul_vm.flags = VM_ALLOC; | ||
| 231 | pcpul_vm.size = nr_cpu_ids * PMD_SIZE; | ||
| 232 | vm_area_register_early(&pcpul_vm, PMD_SIZE); | ||
| 233 | |||
| 234 | for_each_possible_cpu(cpu) { | ||
| 235 | pmd_t *pmd, pmd_v; | ||
| 236 | |||
| 237 | pmd = populate_extra_pmd((unsigned long)pcpul_vm.addr + | ||
| 238 | cpu * PMD_SIZE); | ||
| 239 | pmd_v = pfn_pmd(page_to_pfn(virt_to_page(pcpul_map[cpu].ptr)), | ||
| 240 | PAGE_KERNEL_LARGE); | ||
| 241 | set_pmd(pmd, pmd_v); | ||
| 242 | } | ||
| 243 | |||
| 244 | /* we're ready, commit */ | ||
| 245 | pr_info("PERCPU: Remapped at %p with large pages, static data " | ||
| 246 | "%zu bytes\n", pcpul_vm.addr, static_size); | ||
| 247 | |||
| 248 | ret = pcpu_setup_first_chunk(pcpul_get_page, static_size, | ||
| 249 | PERCPU_FIRST_CHUNK_RESERVE, dyn_size, | ||
| 250 | PMD_SIZE, pcpul_vm.addr, NULL); | ||
| 251 | |||
| 252 | /* sort pcpul_map array for pcpu_lpage_remapped() */ | ||
| 253 | for (i = 0; i < nr_cpu_ids - 1; i++) | ||
| 254 | for (j = i + 1; j < nr_cpu_ids; j++) | ||
| 255 | if (pcpul_map[i].ptr > pcpul_map[j].ptr) { | ||
| 256 | struct pcpul_ent tmp = pcpul_map[i]; | ||
| 257 | pcpul_map[i] = pcpul_map[j]; | ||
| 258 | pcpul_map[j] = tmp; | ||
| 259 | } | ||
| 260 | |||
| 261 | return ret; | ||
| 262 | |||
| 263 | enomem: | ||
| 264 | for_each_possible_cpu(cpu) | ||
| 265 | if (pcpul_map[cpu].ptr) | ||
| 266 | free_bootmem(__pa(pcpul_map[cpu].ptr), pcpul_size); | ||
| 267 | free_bootmem(__pa(pcpul_map), map_size); | ||
| 268 | return -ENOMEM; | ||
| 269 | } | 139 | } |
| 270 | 140 | ||
| 271 | /** | 141 | static int __init pcpu_cpu_distance(unsigned int from, unsigned int to) |
| 272 | * pcpu_lpage_remapped - determine whether a kaddr is in pcpul recycled area | ||
| 273 | * @kaddr: the kernel address in question | ||
| 274 | * | ||
| 275 | * Determine whether @kaddr falls in the pcpul recycled area. This is | ||
| 276 | * used by pageattr to detect VM aliases and break up the pcpu PMD | ||
| 277 | * mapping such that the same physical page is not mapped under | ||
| 278 | * different attributes. | ||
| 279 | * | ||
| 280 | * The recycled area is always at the tail of a partially used PMD | ||
| 281 | * page. | ||
| 282 | * | ||
| 283 | * RETURNS: | ||
| 284 | * Address of corresponding remapped pcpu address if match is found; | ||
| 285 | * otherwise, NULL. | ||
| 286 | */ | ||
| 287 | void *pcpu_lpage_remapped(void *kaddr) | ||
| 288 | { | 142 | { |
| 289 | void *pmd_addr = (void *)((unsigned long)kaddr & PMD_MASK); | 143 | #ifdef CONFIG_NEED_MULTIPLE_NODES |
| 290 | unsigned long offset = (unsigned long)kaddr & ~PMD_MASK; | 144 | if (early_cpu_to_node(from) == early_cpu_to_node(to)) |
| 291 | int left = 0, right = nr_cpu_ids - 1; | 145 | return LOCAL_DISTANCE; |
| 292 | int pos; | 146 | else |
| 293 | 147 | return REMOTE_DISTANCE; | |
| 294 | /* pcpul in use at all? */ | ||
| 295 | if (!pcpul_map) | ||
| 296 | return NULL; | ||
| 297 | |||
| 298 | /* okay, perform binary search */ | ||
| 299 | while (left <= right) { | ||
| 300 | pos = (left + right) / 2; | ||
| 301 | |||
| 302 | if (pcpul_map[pos].ptr < pmd_addr) | ||
| 303 | left = pos + 1; | ||
| 304 | else if (pcpul_map[pos].ptr > pmd_addr) | ||
| 305 | right = pos - 1; | ||
| 306 | else { | ||
| 307 | /* it shouldn't be in the area for the first chunk */ | ||
| 308 | WARN_ON(offset < pcpul_size); | ||
| 309 | |||
| 310 | return pcpul_vm.addr + | ||
| 311 | pcpul_map[pos].cpu * PMD_SIZE + offset; | ||
| 312 | } | ||
| 313 | } | ||
| 314 | |||
| 315 | return NULL; | ||
| 316 | } | ||
| 317 | #else | 148 | #else |
| 318 | static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen) | 149 | return LOCAL_DISTANCE; |
| 319 | { | ||
| 320 | return -EINVAL; | ||
| 321 | } | ||
| 322 | #endif | 150 | #endif |
| 323 | |||
| 324 | /* | ||
| 325 | * Embedding allocator | ||
| 326 | * | ||
| 327 | * The first chunk is sized to just contain the static area plus | ||
| 328 | * module and dynamic reserves and embedded into linear physical | ||
| 329 | * mapping so that it can use PMD mapping without additional TLB | ||
| 330 | * pressure. | ||
| 331 | */ | ||
| 332 | static ssize_t __init setup_pcpu_embed(size_t static_size, bool chosen) | ||
| 333 | { | ||
| 334 | size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE; | ||
| 335 | |||
| 336 | /* | ||
| 337 | * If large page isn't supported, there's no benefit in doing | ||
| 338 | * this. Also, embedding allocation doesn't play well with | ||
| 339 | * NUMA. | ||
| 340 | */ | ||
| 341 | if (!chosen && (!cpu_has_pse || pcpu_need_numa())) | ||
| 342 | return -EINVAL; | ||
| 343 | |||
| 344 | return pcpu_embed_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE, | ||
| 345 | reserve - PERCPU_FIRST_CHUNK_RESERVE, -1); | ||
| 346 | } | 151 | } |
| 347 | 152 | ||
| 348 | /* | 153 | static void __init pcpup_populate_pte(unsigned long addr) |
| 349 | * 4k page allocator | ||
| 350 | * | ||
| 351 | * This is the basic allocator. Static percpu area is allocated | ||
| 352 | * page-by-page and most of initialization is done by the generic | ||
| 353 | * setup function. | ||
| 354 | */ | ||
| 355 | static struct page **pcpu4k_pages __initdata; | ||
| 356 | static int pcpu4k_nr_static_pages __initdata; | ||
| 357 | |||
| 358 | static struct page * __init pcpu4k_get_page(unsigned int cpu, int pageno) | ||
| 359 | { | ||
| 360 | if (pageno < pcpu4k_nr_static_pages) | ||
| 361 | return pcpu4k_pages[cpu * pcpu4k_nr_static_pages + pageno]; | ||
| 362 | return NULL; | ||
| 363 | } | ||
| 364 | |||
| 365 | static void __init pcpu4k_populate_pte(unsigned long addr) | ||
| 366 | { | 154 | { |
| 367 | populate_extra_pte(addr); | 155 | populate_extra_pte(addr); |
| 368 | } | 156 | } |
| 369 | 157 | ||
| 370 | static ssize_t __init setup_pcpu_4k(size_t static_size) | ||
| 371 | { | ||
| 372 | size_t pages_size; | ||
| 373 | unsigned int cpu; | ||
| 374 | int i, j; | ||
| 375 | ssize_t ret; | ||
| 376 | |||
| 377 | pcpu4k_nr_static_pages = PFN_UP(static_size); | ||
| 378 | |||
| 379 | /* unaligned allocations can't be freed, round up to page size */ | ||
| 380 | pages_size = PFN_ALIGN(pcpu4k_nr_static_pages * nr_cpu_ids | ||
| 381 | * sizeof(pcpu4k_pages[0])); | ||
| 382 | pcpu4k_pages = alloc_bootmem(pages_size); | ||
| 383 | |||
| 384 | /* allocate and copy */ | ||
| 385 | j = 0; | ||
| 386 | for_each_possible_cpu(cpu) | ||
| 387 | for (i = 0; i < pcpu4k_nr_static_pages; i++) { | ||
| 388 | void *ptr; | ||
| 389 | |||
| 390 | ptr = pcpu_alloc_bootmem(cpu, PAGE_SIZE, PAGE_SIZE); | ||
| 391 | if (!ptr) { | ||
| 392 | pr_warning("PERCPU: failed to allocate " | ||
| 393 | "4k page for cpu%u\n", cpu); | ||
| 394 | goto enomem; | ||
| 395 | } | ||
| 396 | |||
| 397 | memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE); | ||
| 398 | pcpu4k_pages[j++] = virt_to_page(ptr); | ||
| 399 | } | ||
| 400 | |||
| 401 | /* we're ready, commit */ | ||
| 402 | pr_info("PERCPU: Allocated %d 4k pages, static data %zu bytes\n", | ||
| 403 | pcpu4k_nr_static_pages, static_size); | ||
| 404 | |||
| 405 | ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size, | ||
| 406 | PERCPU_FIRST_CHUNK_RESERVE, -1, | ||
| 407 | -1, NULL, pcpu4k_populate_pte); | ||
| 408 | goto out_free_ar; | ||
| 409 | |||
| 410 | enomem: | ||
| 411 | while (--j >= 0) | ||
| 412 | free_bootmem(__pa(page_address(pcpu4k_pages[j])), PAGE_SIZE); | ||
| 413 | ret = -ENOMEM; | ||
| 414 | out_free_ar: | ||
| 415 | free_bootmem(__pa(pcpu4k_pages), pages_size); | ||
| 416 | return ret; | ||
| 417 | } | ||
| 418 | |||
| 419 | /* for explicit first chunk allocator selection */ | ||
| 420 | static char pcpu_chosen_alloc[16] __initdata; | ||
| 421 | |||
| 422 | static int __init percpu_alloc_setup(char *str) | ||
| 423 | { | ||
| 424 | strncpy(pcpu_chosen_alloc, str, sizeof(pcpu_chosen_alloc) - 1); | ||
| 425 | return 0; | ||
| 426 | } | ||
| 427 | early_param("percpu_alloc", percpu_alloc_setup); | ||
| 428 | |||
| 429 | static inline void setup_percpu_segment(int cpu) | 158 | static inline void setup_percpu_segment(int cpu) |
| 430 | { | 159 | { |
| 431 | #ifdef CONFIG_X86_32 | 160 | #ifdef CONFIG_X86_32 |
| @@ -441,52 +170,49 @@ static inline void setup_percpu_segment(int cpu) | |||
| 441 | 170 | ||
| 442 | void __init setup_per_cpu_areas(void) | 171 | void __init setup_per_cpu_areas(void) |
| 443 | { | 172 | { |
| 444 | size_t static_size = __per_cpu_end - __per_cpu_start; | ||
| 445 | unsigned int cpu; | 173 | unsigned int cpu; |
| 446 | unsigned long delta; | 174 | unsigned long delta; |
| 447 | size_t pcpu_unit_size; | 175 | int rc; |
| 448 | ssize_t ret; | ||
| 449 | 176 | ||
| 450 | pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n", | 177 | pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n", |
| 451 | NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); | 178 | NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); |
| 452 | 179 | ||
| 453 | /* | 180 | /* |
| 454 | * Allocate percpu area. If PSE is supported, try to make use | 181 | * Allocate percpu area. Embedding allocator is our favorite; |
| 455 | * of large page mappings. Please read comments on top of | 182 | * however, on NUMA configurations, it can result in very |
| 456 | * each allocator for details. | 183 | * sparse unit mapping and vmalloc area isn't spacious enough |
| 184 | * on 32bit. Use page in that case. | ||
| 457 | */ | 185 | */ |
| 458 | ret = -EINVAL; | 186 | #ifdef CONFIG_X86_32 |
| 459 | if (strlen(pcpu_chosen_alloc)) { | 187 | if (pcpu_chosen_fc == PCPU_FC_AUTO && pcpu_need_numa()) |
| 460 | if (strcmp(pcpu_chosen_alloc, "4k")) { | 188 | pcpu_chosen_fc = PCPU_FC_PAGE; |
| 461 | if (!strcmp(pcpu_chosen_alloc, "lpage")) | 189 | #endif |
| 462 | ret = setup_pcpu_lpage(static_size, true); | 190 | rc = -EINVAL; |
| 463 | else if (!strcmp(pcpu_chosen_alloc, "embed")) | 191 | if (pcpu_chosen_fc != PCPU_FC_PAGE) { |
| 464 | ret = setup_pcpu_embed(static_size, true); | 192 | const size_t atom_size = cpu_has_pse ? PMD_SIZE : PAGE_SIZE; |
| 465 | else | 193 | const size_t dyn_size = PERCPU_MODULE_RESERVE + |
| 466 | pr_warning("PERCPU: unknown allocator %s " | 194 | PERCPU_DYNAMIC_RESERVE - PERCPU_FIRST_CHUNK_RESERVE; |
| 467 | "specified\n", pcpu_chosen_alloc); | 195 | |
| 468 | if (ret < 0) | 196 | rc = pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE, |
| 469 | pr_warning("PERCPU: %s allocator failed (%zd), " | 197 | dyn_size, atom_size, |
| 470 | "falling back to 4k\n", | 198 | pcpu_cpu_distance, |
| 471 | pcpu_chosen_alloc, ret); | 199 | pcpu_fc_alloc, pcpu_fc_free); |
| 472 | } | 200 | if (rc < 0) |
| 473 | } else { | 201 | pr_warning("PERCPU: %s allocator failed (%d), " |
| 474 | ret = setup_pcpu_lpage(static_size, false); | 202 | "falling back to page size\n", |
| 475 | if (ret < 0) | 203 | pcpu_fc_names[pcpu_chosen_fc], rc); |
| 476 | ret = setup_pcpu_embed(static_size, false); | ||
| 477 | } | 204 | } |
| 478 | if (ret < 0) | 205 | if (rc < 0) |
| 479 | ret = setup_pcpu_4k(static_size); | 206 | rc = pcpu_page_first_chunk(PERCPU_FIRST_CHUNK_RESERVE, |
| 480 | if (ret < 0) | 207 | pcpu_fc_alloc, pcpu_fc_free, |
| 481 | panic("cannot allocate static percpu area (%zu bytes, err=%zd)", | 208 | pcpup_populate_pte); |
| 482 | static_size, ret); | 209 | if (rc < 0) |
| 483 | 210 | panic("cannot initialize percpu area (err=%d)", rc); | |
| 484 | pcpu_unit_size = ret; | ||
| 485 | 211 | ||
| 486 | /* alrighty, percpu areas up and running */ | 212 | /* alrighty, percpu areas up and running */ |
| 487 | delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; | 213 | delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; |
| 488 | for_each_possible_cpu(cpu) { | 214 | for_each_possible_cpu(cpu) { |
| 489 | per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size; | 215 | per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu]; |
| 490 | per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); | 216 | per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); |
| 491 | per_cpu(cpu_number, cpu) = cpu; | 217 | per_cpu(cpu_number, cpu) = cpu; |
| 492 | setup_percpu_segment(cpu); | 218 | setup_percpu_segment(cpu); |
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 78d185d797d..bbf4fd044d0 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S | |||
| @@ -380,15 +380,12 @@ SECTIONS | |||
| 380 | _end = .; | 380 | _end = .; |
| 381 | } | 381 | } |
| 382 | 382 | ||
| 383 | /* Sections to be discarded */ | ||
| 384 | /DISCARD/ : { | ||
| 385 | *(.exitcall.exit) | ||
| 386 | *(.eh_frame) | ||
| 387 | *(.discard) | ||
| 388 | } | ||
| 389 | |||
| 390 | STABS_DEBUG | 383 | STABS_DEBUG |
| 391 | DWARF_DEBUG | 384 | DWARF_DEBUG |
| 385 | |||
| 386 | /* Sections to be discarded */ | ||
| 387 | DISCARDS | ||
| 388 | /DISCARD/ : { *(.eh_frame) } | ||
| 392 | } | 389 | } |
| 393 | 390 | ||
| 394 | 391 | ||
