aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-06-28 14:05:28 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-28 14:05:28 -0400
commit8326e284f8deb75eee3d32b973464dd96e120843 (patch)
treea2c4e18f4e7984680946cab0303e5369f175d4f5 /mm
parent187dd317f0169142e4adf6263852f93c3b6f6a3c (diff)
parente888d7facd1f1460a638151036d15b6cfb3ccc74 (diff)
Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: x86, delay: tsc based udelay should have rdtsc_barrier x86, setup: correct include file in <asm/boot.h> x86, setup: Fix typo "CONFIG_x86_64" in <asm/boot.h> x86, mce: percpu mcheck_timer should be pinned x86: Add sysctl to allow panic on IOCK NMI error x86: Fix uv bau sending buffer initialization x86, mce: Fix mce resume on 32bit x86: Move init_gbpages() to setup_arch() x86: ensure percpu lpage doesn't consume too much vmalloc space x86: implement percpu_alloc kernel parameter x86: fix pageattr handling for lpage percpu allocator and re-enable it x86: reorganize cpa_process_alias() x86: prepare setup_pcpu_lpage() for pageattr fix x86: rename remap percpu first chunk allocator to lpage x86: fix duplicate free in setup_pcpu_remap() failure path percpu: fix too lazy vunmap cache flushing x86: Set cpu_llc_id on AMD CPUs
Diffstat (limited to 'mm')
-rw-r--r--mm/percpu.c24
1 files changed, 14 insertions, 10 deletions
diff --git a/mm/percpu.c b/mm/percpu.c
index c0b2c1a76e8..b70f2acd885 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -549,14 +549,14 @@ static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
549 * @chunk: chunk of interest 549 * @chunk: chunk of interest
550 * @page_start: page index of the first page to unmap 550 * @page_start: page index of the first page to unmap
551 * @page_end: page index of the last page to unmap + 1 551 * @page_end: page index of the last page to unmap + 1
552 * @flush: whether to flush cache and tlb or not 552 * @flush_tlb: whether to flush tlb or not
553 * 553 *
554 * For each cpu, unmap pages [@page_start,@page_end) out of @chunk. 554 * For each cpu, unmap pages [@page_start,@page_end) out of @chunk.
555 * If @flush is true, vcache is flushed before unmapping and tlb 555 * If @flush is true, vcache is flushed before unmapping and tlb
556 * after. 556 * after.
557 */ 557 */
558static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end, 558static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end,
559 bool flush) 559 bool flush_tlb)
560{ 560{
561 unsigned int last = num_possible_cpus() - 1; 561 unsigned int last = num_possible_cpus() - 1;
562 unsigned int cpu; 562 unsigned int cpu;
@@ -569,9 +569,8 @@ static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end,
569 * the whole region at once rather than doing it for each cpu. 569 * the whole region at once rather than doing it for each cpu.
570 * This could be an overkill but is more scalable. 570 * This could be an overkill but is more scalable.
571 */ 571 */
572 if (flush) 572 flush_cache_vunmap(pcpu_chunk_addr(chunk, 0, page_start),
573 flush_cache_vunmap(pcpu_chunk_addr(chunk, 0, page_start), 573 pcpu_chunk_addr(chunk, last, page_end));
574 pcpu_chunk_addr(chunk, last, page_end));
575 574
576 for_each_possible_cpu(cpu) 575 for_each_possible_cpu(cpu)
577 unmap_kernel_range_noflush( 576 unmap_kernel_range_noflush(
@@ -579,7 +578,7 @@ static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end,
579 (page_end - page_start) << PAGE_SHIFT); 578 (page_end - page_start) << PAGE_SHIFT);
580 579
581 /* ditto as flush_cache_vunmap() */ 580 /* ditto as flush_cache_vunmap() */
582 if (flush) 581 if (flush_tlb)
583 flush_tlb_kernel_range(pcpu_chunk_addr(chunk, 0, page_start), 582 flush_tlb_kernel_range(pcpu_chunk_addr(chunk, 0, page_start),
584 pcpu_chunk_addr(chunk, last, page_end)); 583 pcpu_chunk_addr(chunk, last, page_end));
585} 584}
@@ -1234,6 +1233,7 @@ static struct page * __init pcpue_get_page(unsigned int cpu, int pageno)
1234ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size, 1233ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size,
1235 ssize_t dyn_size, ssize_t unit_size) 1234 ssize_t dyn_size, ssize_t unit_size)
1236{ 1235{
1236 size_t chunk_size;
1237 unsigned int cpu; 1237 unsigned int cpu;
1238 1238
1239 /* determine parameters and allocate */ 1239 /* determine parameters and allocate */
@@ -1248,11 +1248,15 @@ ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size,
1248 } else 1248 } else
1249 pcpue_unit_size = max_t(size_t, pcpue_size, PCPU_MIN_UNIT_SIZE); 1249 pcpue_unit_size = max_t(size_t, pcpue_size, PCPU_MIN_UNIT_SIZE);
1250 1250
1251 pcpue_ptr = __alloc_bootmem_nopanic( 1251 chunk_size = pcpue_unit_size * num_possible_cpus();
1252 num_possible_cpus() * pcpue_unit_size, 1252
1253 PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); 1253 pcpue_ptr = __alloc_bootmem_nopanic(chunk_size, PAGE_SIZE,
1254 if (!pcpue_ptr) 1254 __pa(MAX_DMA_ADDRESS));
1255 if (!pcpue_ptr) {
1256 pr_warning("PERCPU: failed to allocate %zu bytes for "
1257 "embedding\n", chunk_size);
1255 return -ENOMEM; 1258 return -ENOMEM;
1259 }
1256 1260
1257 /* return the leftover and copy */ 1261 /* return the leftover and copy */
1258 for_each_possible_cpu(cpu) { 1262 for_each_possible_cpu(cpu) {