diff options
Diffstat (limited to 'arch/tile/mm/init.c')
-rw-r--r-- | arch/tile/mm/init.c | 44 |
1 files changed, 21 insertions, 23 deletions
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c index d89c9eacd162..4e10c4023028 100644 --- a/arch/tile/mm/init.c +++ b/arch/tile/mm/init.c | |||
@@ -53,26 +53,13 @@ | |||
53 | 53 | ||
54 | #include "migrate.h" | 54 | #include "migrate.h" |
55 | 55 | ||
56 | /* | ||
57 | * We could set FORCE_MAX_ZONEORDER to "(HPAGE_SHIFT - PAGE_SHIFT + 1)" | ||
58 | * in the Tile Kconfig, but this generates configure warnings. | ||
59 | * Do it here and force people to get it right to compile this file. | ||
60 | * The problem is that with 4KB small pages and 16MB huge pages, | ||
61 | * the default value doesn't allow us to group enough small pages | ||
62 | * together to make up a huge page. | ||
63 | */ | ||
64 | #if CONFIG_FORCE_MAX_ZONEORDER < HPAGE_SHIFT - PAGE_SHIFT + 1 | ||
65 | # error "Change FORCE_MAX_ZONEORDER in arch/tile/Kconfig to match page size" | ||
66 | #endif | ||
67 | |||
68 | #define clear_pgd(pmdptr) (*(pmdptr) = hv_pte(0)) | 56 | #define clear_pgd(pmdptr) (*(pmdptr) = hv_pte(0)) |
69 | 57 | ||
70 | #ifndef __tilegx__ | 58 | #ifndef __tilegx__ |
71 | unsigned long VMALLOC_RESERVE = CONFIG_VMALLOC_RESERVE; | 59 | unsigned long VMALLOC_RESERVE = CONFIG_VMALLOC_RESERVE; |
60 | EXPORT_SYMBOL(VMALLOC_RESERVE); | ||
72 | #endif | 61 | #endif |
73 | 62 | ||
74 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | ||
75 | |||
76 | /* Create an L2 page table */ | 63 | /* Create an L2 page table */ |
77 | static pte_t * __init alloc_pte(void) | 64 | static pte_t * __init alloc_pte(void) |
78 | { | 65 | { |
@@ -445,7 +432,7 @@ static pmd_t *__init get_pmd(pgd_t pgtables[], unsigned long va) | |||
445 | 432 | ||
446 | /* Temporary page table we use for staging. */ | 433 | /* Temporary page table we use for staging. */ |
447 | static pgd_t pgtables[PTRS_PER_PGD] | 434 | static pgd_t pgtables[PTRS_PER_PGD] |
448 | __attribute__((section(".init.page"))); | 435 | __attribute__((aligned(HV_PAGE_TABLE_ALIGN))); |
449 | 436 | ||
450 | /* | 437 | /* |
451 | * This maps the physical memory to kernel virtual address space, a total | 438 | * This maps the physical memory to kernel virtual address space, a total |
@@ -653,6 +640,17 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) | |||
653 | memcpy(pgd_base, pgtables, sizeof(pgtables)); | 640 | memcpy(pgd_base, pgtables, sizeof(pgtables)); |
654 | __install_page_table(pgd_base, __get_cpu_var(current_asid), | 641 | __install_page_table(pgd_base, __get_cpu_var(current_asid), |
655 | swapper_pgprot); | 642 | swapper_pgprot); |
643 | |||
644 | /* | ||
645 | * We just read swapper_pgprot and thus brought it into the cache, | ||
646 | * with its new home & caching mode. When we start the other CPUs, | ||
647 | * they're going to reference swapper_pgprot via their initial fake | ||
648 | * VA-is-PA mappings, which cache everything locally. At that | ||
649 | * time, if it's in our cache with a conflicting home, the | ||
650 | * simulator's coherence checker will complain. So, flush it out | ||
651 | * of our cache; we're not going to ever use it again anyway. | ||
652 | */ | ||
653 | __insn_finv(&swapper_pgprot); | ||
656 | } | 654 | } |
657 | 655 | ||
658 | /* | 656 | /* |
@@ -950,11 +948,7 @@ struct kmem_cache *pgd_cache; | |||
950 | 948 | ||
951 | void __init pgtable_cache_init(void) | 949 | void __init pgtable_cache_init(void) |
952 | { | 950 | { |
953 | pgd_cache = kmem_cache_create("pgd", | 951 | pgd_cache = kmem_cache_create("pgd", SIZEOF_PGD, SIZEOF_PGD, 0, NULL); |
954 | PTRS_PER_PGD*sizeof(pgd_t), | ||
955 | PTRS_PER_PGD*sizeof(pgd_t), | ||
956 | 0, | ||
957 | NULL); | ||
958 | if (!pgd_cache) | 952 | if (!pgd_cache) |
959 | panic("pgtable_cache_init(): Cannot create pgd cache"); | 953 | panic("pgtable_cache_init(): Cannot create pgd cache"); |
960 | } | 954 | } |
@@ -988,8 +982,12 @@ static long __write_once initfree = 1; | |||
988 | /* Select whether to free (1) or mark unusable (0) the __init pages. */ | 982 | /* Select whether to free (1) or mark unusable (0) the __init pages. */ |
989 | static int __init set_initfree(char *str) | 983 | static int __init set_initfree(char *str) |
990 | { | 984 | { |
991 | strict_strtol(str, 0, &initfree); | 985 | long val; |
992 | pr_info("initfree: %s free init pages\n", initfree ? "will" : "won't"); | 986 | if (strict_strtol(str, 0, &val) == 0) { |
987 | initfree = val; | ||
988 | pr_info("initfree: %s free init pages\n", | ||
989 | initfree ? "will" : "won't"); | ||
990 | } | ||
993 | return 1; | 991 | return 1; |
994 | } | 992 | } |
995 | __setup("initfree=", set_initfree); | 993 | __setup("initfree=", set_initfree); |
@@ -1060,7 +1058,7 @@ void free_initmem(void) | |||
1060 | 1058 | ||
1061 | /* | 1059 | /* |
1062 | * Free the pages mapped from 0xc0000000 that correspond to code | 1060 | * Free the pages mapped from 0xc0000000 that correspond to code |
1063 | * pages from 0xfd000000 that we won't use again after init. | 1061 | * pages from MEM_SV_INTRPT that we won't use again after init. |
1064 | */ | 1062 | */ |
1065 | free_init_pages("unused kernel text", | 1063 | free_init_pages("unused kernel text", |
1066 | (unsigned long)_sinittext - text_delta, | 1064 | (unsigned long)_sinittext - text_delta, |