aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/pgtable.h
diff options
context:
space:
mode:
authorYinghai Lu <yinghai@kernel.org>2012-11-16 22:38:38 -0500
committerH. Peter Anvin <hpa@linux.intel.com>2012-11-17 14:59:00 -0500
commitfa62aafea9e415cd1efd8c4054106112fe809f19 (patch)
treee85d6b6e3bd59bd4ffa8d055d065bbc6f38b694f /arch/x86/include/asm/pgtable.h
parentf4a75d2eb7b1e2206094b901be09adb31ba63681 (diff)
x86, mm: Add global page_size_mask and probe one time only
Now we pass around use_gbpages and use_pse for calculating page table size, Later we will need to call init_memory_mapping for every ram range one by one, that mean those calculation will be done several times. Those information are the same for all ram range and could be stored in page_size_mask and could be probed it one time only. Move that probing code out of init_memory_mapping into separated function probe_page_size_mask(), and call it before all init_memory_mapping. Suggested-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Yinghai Lu <yinghai@kernel.org> Link: http://lkml.kernel.org/r/1353123563-3103-2-git-send-email-yinghai@kernel.org Reviewed-by: Pekka Enberg <penberg@kernel.org> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/include/asm/pgtable.h')
-rw-r--r--arch/x86/include/asm/pgtable.h1
1 files changed, 1 insertions, 0 deletions
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index a1f780d45f76..98ac76dc4eae 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -602,6 +602,7 @@ static inline int pgd_none(pgd_t pgd)
602#ifndef __ASSEMBLY__ 602#ifndef __ASSEMBLY__
603 603
604extern int direct_gbpages; 604extern int direct_gbpages;
605void probe_page_size_mask(void);
605 606
606/* local pte updates need not use xchg for locking */ 607/* local pte updates need not use xchg for locking */
607static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep) 608static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)