aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile/mm/init.c
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@tilera.com>2012-03-29 13:58:43 -0400
committerChris Metcalf <cmetcalf@tilera.com>2012-05-25 12:48:24 -0400
commitd5d14ed6f2db7287a5088e1350cf422bf72140b3 (patch)
tree19f0bc20bb6f1995a1e4f75dc58e388c047f7d23 /arch/tile/mm/init.c
parent47d632f9f8f3ed62b21f725e98b726d65769b6d7 (diff)
arch/tile: Allow tilegx to build with either 16K or 64K page size
This change introduces new flags for the hv_install_context() API that passes a page table pointer to the hypervisor. Clients can explicitly request 4K, 16K, or 64K small pages when they install a new context. In practice, the page size is fixed at kernel compile time and the same size is always requested every time a new page table is installed. The <hv/hypervisor.h> header changes so that it provides more abstract macros for managing "page" things like PFNs and page tables. For example there is now a HV_DEFAULT_PAGE_SIZE_SMALL instead of the old HV_PAGE_SIZE_SMALL. The various PFN routines have been eliminated and only PA- or PTFN-based ones remain (since PTFNs are always expressed in fixed 2KB "page" size). The page-table management macros are renamed with a leading underscore and take page-size arguments with the presumption that clients will use those macros in some single place to provide the "real" macros they will use themselves. I happened to notice the old hv_set_caching() API was totally broken (it assumed 4KB pages) so I changed it so it would nominally work correctly with other page sizes. Tag modules with the page size so you can't load a module built with a conflicting page size. (And add a test for SMP while we're at it.) Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Diffstat (limited to 'arch/tile/mm/init.c')
-rw-r--r--arch/tile/mm/init.c11
1 files changed, 3 insertions, 8 deletions
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index 1e4633520b35..c04fbfd93fc5 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -82,7 +82,7 @@ static int num_l2_ptes[MAX_NUMNODES];
82 82
83static void init_prealloc_ptes(int node, int pages) 83static void init_prealloc_ptes(int node, int pages)
84{ 84{
85 BUG_ON(pages & (HV_L2_ENTRIES-1)); 85 BUG_ON(pages & (PTRS_PER_PTE - 1));
86 if (pages) { 86 if (pages) {
87 num_l2_ptes[node] = pages; 87 num_l2_ptes[node] = pages;
88 l2_ptes[node] = __alloc_bootmem(pages * sizeof(pte_t), 88 l2_ptes[node] = __alloc_bootmem(pages * sizeof(pte_t),
@@ -131,14 +131,9 @@ static void __init assign_pte(pmd_t *pmd, pte_t *page_table)
131 131
132#ifdef __tilegx__ 132#ifdef __tilegx__
133 133
134#if HV_L1_SIZE != HV_L2_SIZE
135# error Rework assumption that L1 and L2 page tables are same size.
136#endif
137
138/* Since pmd_t arrays and pte_t arrays are the same size, just use casts. */
139static inline pmd_t *alloc_pmd(void) 134static inline pmd_t *alloc_pmd(void)
140{ 135{
141 return (pmd_t *)alloc_pte(); 136 return __alloc_bootmem(L1_KERNEL_PGTABLE_SIZE, HV_PAGE_TABLE_ALIGN, 0);
142} 137}
143 138
144static inline void assign_pmd(pud_t *pud, pmd_t *pmd) 139static inline void assign_pmd(pud_t *pud, pmd_t *pmd)
@@ -811,7 +806,7 @@ void __init paging_init(void)
811 * changing init_mm once we get up and running, and there's no 806 * changing init_mm once we get up and running, and there's no
812 * need for e.g. vmalloc_sync_all(). 807 * need for e.g. vmalloc_sync_all().
813 */ 808 */
814 BUILD_BUG_ON(pgd_index(VMALLOC_START) != pgd_index(VMALLOC_END)); 809 BUILD_BUG_ON(pgd_index(VMALLOC_START) != pgd_index(VMALLOC_END - 1));
815 pud = pud_offset(pgd_base + pgd_index(VMALLOC_START), VMALLOC_START); 810 pud = pud_offset(pgd_base + pgd_index(VMALLOC_START), VMALLOC_START);
816 assign_pmd(pud, alloc_pmd()); 811 assign_pmd(pud, alloc_pmd());
817#endif 812#endif