aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile/mm/init.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/tile/mm/init.c')
-rw-r--r--arch/tile/mm/init.c34
1 files changed, 15 insertions, 19 deletions
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index 0b9ce69b0ee5..d6e87fda2fb2 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -53,22 +53,11 @@
53 53
54#include "migrate.h" 54#include "migrate.h"
55 55
56/*
57 * We could set FORCE_MAX_ZONEORDER to "(HPAGE_SHIFT - PAGE_SHIFT + 1)"
58 * in the Tile Kconfig, but this generates configure warnings.
59 * Do it here and force people to get it right to compile this file.
60 * The problem is that with 4KB small pages and 16MB huge pages,
61 * the default value doesn't allow us to group enough small pages
62 * together to make up a huge page.
63 */
64#if CONFIG_FORCE_MAX_ZONEORDER < HPAGE_SHIFT - PAGE_SHIFT + 1
65# error "Change FORCE_MAX_ZONEORDER in arch/tile/Kconfig to match page size"
66#endif
67
68#define clear_pgd(pmdptr) (*(pmdptr) = hv_pte(0)) 56#define clear_pgd(pmdptr) (*(pmdptr) = hv_pte(0))
69 57
70#ifndef __tilegx__ 58#ifndef __tilegx__
71unsigned long VMALLOC_RESERVE = CONFIG_VMALLOC_RESERVE; 59unsigned long VMALLOC_RESERVE = CONFIG_VMALLOC_RESERVE;
60EXPORT_SYMBOL(VMALLOC_RESERVE);
72#endif 61#endif
73 62
74DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 63DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
@@ -445,7 +434,7 @@ static pmd_t *__init get_pmd(pgd_t pgtables[], unsigned long va)
445 434
446/* Temporary page table we use for staging. */ 435/* Temporary page table we use for staging. */
447static pgd_t pgtables[PTRS_PER_PGD] 436static pgd_t pgtables[PTRS_PER_PGD]
448 __attribute__((section(".init.page"))); 437 __attribute__((aligned(HV_PAGE_TABLE_ALIGN)));
449 438
450/* 439/*
451 * This maps the physical memory to kernel virtual address space, a total 440 * This maps the physical memory to kernel virtual address space, a total
@@ -653,6 +642,17 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
653 memcpy(pgd_base, pgtables, sizeof(pgtables)); 642 memcpy(pgd_base, pgtables, sizeof(pgtables));
654 __install_page_table(pgd_base, __get_cpu_var(current_asid), 643 __install_page_table(pgd_base, __get_cpu_var(current_asid),
655 swapper_pgprot); 644 swapper_pgprot);
645
646 /*
647 * We just read swapper_pgprot and thus brought it into the cache,
648 * with its new home & caching mode. When we start the other CPUs,
649 * they're going to reference swapper_pgprot via their initial fake
650 * VA-is-PA mappings, which cache everything locally. At that
651 * time, if it's in our cache with a conflicting home, the
652 * simulator's coherence checker will complain. So, flush it out
653 * of our cache; we're not going to ever use it again anyway.
654 */
655 __insn_finv(&swapper_pgprot);
656} 656}
657 657
658/* 658/*
@@ -950,11 +950,7 @@ struct kmem_cache *pgd_cache;
950 950
951void __init pgtable_cache_init(void) 951void __init pgtable_cache_init(void)
952{ 952{
953 pgd_cache = kmem_cache_create("pgd", 953 pgd_cache = kmem_cache_create("pgd", SIZEOF_PGD, SIZEOF_PGD, 0, NULL);
954 PTRS_PER_PGD*sizeof(pgd_t),
955 PTRS_PER_PGD*sizeof(pgd_t),
956 0,
957 NULL);
958 if (!pgd_cache) 954 if (!pgd_cache)
959 panic("pgtable_cache_init(): Cannot create pgd cache"); 955 panic("pgtable_cache_init(): Cannot create pgd cache");
960} 956}
@@ -989,7 +985,7 @@ static long __write_once initfree = 1;
989static int __init set_initfree(char *str) 985static int __init set_initfree(char *str)
990{ 986{
991 long val; 987 long val;
992 if (strict_strtol(str, 0, &val)) { 988 if (strict_strtol(str, 0, &val) == 0) {
993 initfree = val; 989 initfree = val;
994 pr_info("initfree: %s free init pages\n", 990 pr_info("initfree: %s free init pages\n",
995 initfree ? "will" : "won't"); 991 initfree ? "will" : "won't");