aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile/mm/init.c
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
commitc71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch)
treeecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /arch/tile/mm/init.c
parentea53c912f8a86a8567697115b6a0d8152beee5c8 (diff)
parent6a00f206debf8a5c8899055726ad127dbeeed098 (diff)
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts: litmus/sched_cedf.c
Diffstat (limited to 'arch/tile/mm/init.c')
-rw-r--r--arch/tile/mm/init.c44
1 files changed, 21 insertions, 23 deletions
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index d89c9eacd162..4e10c4023028 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -53,26 +53,13 @@
53 53
54#include "migrate.h" 54#include "migrate.h"
55 55
56/*
57 * We could set FORCE_MAX_ZONEORDER to "(HPAGE_SHIFT - PAGE_SHIFT + 1)"
58 * in the Tile Kconfig, but this generates configure warnings.
59 * Do it here and force people to get it right to compile this file.
60 * The problem is that with 4KB small pages and 16MB huge pages,
61 * the default value doesn't allow us to group enough small pages
62 * together to make up a huge page.
63 */
64#if CONFIG_FORCE_MAX_ZONEORDER < HPAGE_SHIFT - PAGE_SHIFT + 1
65# error "Change FORCE_MAX_ZONEORDER in arch/tile/Kconfig to match page size"
66#endif
67
68#define clear_pgd(pmdptr) (*(pmdptr) = hv_pte(0)) 56#define clear_pgd(pmdptr) (*(pmdptr) = hv_pte(0))
69 57
70#ifndef __tilegx__ 58#ifndef __tilegx__
71unsigned long VMALLOC_RESERVE = CONFIG_VMALLOC_RESERVE; 59unsigned long VMALLOC_RESERVE = CONFIG_VMALLOC_RESERVE;
60EXPORT_SYMBOL(VMALLOC_RESERVE);
72#endif 61#endif
73 62
74DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
75
76/* Create an L2 page table */ 63/* Create an L2 page table */
77static pte_t * __init alloc_pte(void) 64static pte_t * __init alloc_pte(void)
78{ 65{
@@ -445,7 +432,7 @@ static pmd_t *__init get_pmd(pgd_t pgtables[], unsigned long va)
445 432
446/* Temporary page table we use for staging. */ 433/* Temporary page table we use for staging. */
447static pgd_t pgtables[PTRS_PER_PGD] 434static pgd_t pgtables[PTRS_PER_PGD]
448 __attribute__((section(".init.page"))); 435 __attribute__((aligned(HV_PAGE_TABLE_ALIGN)));
449 436
450/* 437/*
451 * This maps the physical memory to kernel virtual address space, a total 438 * This maps the physical memory to kernel virtual address space, a total
@@ -653,6 +640,17 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
653 memcpy(pgd_base, pgtables, sizeof(pgtables)); 640 memcpy(pgd_base, pgtables, sizeof(pgtables));
654 __install_page_table(pgd_base, __get_cpu_var(current_asid), 641 __install_page_table(pgd_base, __get_cpu_var(current_asid),
655 swapper_pgprot); 642 swapper_pgprot);
643
644 /*
645 * We just read swapper_pgprot and thus brought it into the cache,
646 * with its new home & caching mode. When we start the other CPUs,
647 * they're going to reference swapper_pgprot via their initial fake
648 * VA-is-PA mappings, which cache everything locally. At that
649 * time, if it's in our cache with a conflicting home, the
650 * simulator's coherence checker will complain. So, flush it out
651 * of our cache; we're not going to ever use it again anyway.
652 */
653 __insn_finv(&swapper_pgprot);
656} 654}
657 655
658/* 656/*
@@ -950,11 +948,7 @@ struct kmem_cache *pgd_cache;
950 948
951void __init pgtable_cache_init(void) 949void __init pgtable_cache_init(void)
952{ 950{
953 pgd_cache = kmem_cache_create("pgd", 951 pgd_cache = kmem_cache_create("pgd", SIZEOF_PGD, SIZEOF_PGD, 0, NULL);
954 PTRS_PER_PGD*sizeof(pgd_t),
955 PTRS_PER_PGD*sizeof(pgd_t),
956 0,
957 NULL);
958 if (!pgd_cache) 952 if (!pgd_cache)
959 panic("pgtable_cache_init(): Cannot create pgd cache"); 953 panic("pgtable_cache_init(): Cannot create pgd cache");
960} 954}
@@ -988,8 +982,12 @@ static long __write_once initfree = 1;
988/* Select whether to free (1) or mark unusable (0) the __init pages. */ 982/* Select whether to free (1) or mark unusable (0) the __init pages. */
989static int __init set_initfree(char *str) 983static int __init set_initfree(char *str)
990{ 984{
991 strict_strtol(str, 0, &initfree); 985 long val;
992 pr_info("initfree: %s free init pages\n", initfree ? "will" : "won't"); 986 if (strict_strtol(str, 0, &val) == 0) {
987 initfree = val;
988 pr_info("initfree: %s free init pages\n",
989 initfree ? "will" : "won't");
990 }
993 return 1; 991 return 1;
994} 992}
995__setup("initfree=", set_initfree); 993__setup("initfree=", set_initfree);
@@ -1060,7 +1058,7 @@ void free_initmem(void)
1060 1058
1061 /* 1059 /*
1062 * Free the pages mapped from 0xc0000000 that correspond to code 1060 * Free the pages mapped from 0xc0000000 that correspond to code
1063 * pages from 0xfd000000 that we won't use again after init. 1061 * pages from MEM_SV_INTRPT that we won't use again after init.
1064 */ 1062 */
1065 free_init_pages("unused kernel text", 1063 free_init_pages("unused kernel text",
1066 (unsigned long)_sinittext - text_delta, 1064 (unsigned long)_sinittext - text_delta,