aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile
diff options
context:
space:
mode:
Diffstat (limited to 'arch/tile')
-rw-r--r--arch/tile/Kconfig25
-rw-r--r--arch/tile/include/asm/Kbuild1
-rw-r--r--arch/tile/include/asm/mmu.h2
-rw-r--r--arch/tile/include/asm/mmu_context.h8
-rw-r--r--arch/tile/include/asm/module.h40
-rw-r--r--arch/tile/include/asm/page.h13
-rw-r--r--arch/tile/include/asm/pgalloc.h92
-rw-r--r--arch/tile/include/asm/pgtable.h10
-rw-r--r--arch/tile/include/asm/pgtable_32.h14
-rw-r--r--arch/tile/include/asm/pgtable_64.h28
-rw-r--r--arch/tile/include/hv/drv_xgbe_intf.h2
-rw-r--r--arch/tile/include/hv/hypervisor.h214
-rw-r--r--arch/tile/kernel/head_32.S8
-rw-r--r--arch/tile/kernel/head_64.S22
-rw-r--r--arch/tile/kernel/machine_kexec.c7
-rw-r--r--arch/tile/kernel/setup.c8
-rw-r--r--arch/tile/kernel/smp.c2
-rw-r--r--arch/tile/lib/memcpy_tile64.c8
-rw-r--r--arch/tile/mm/init.c11
-rw-r--r--arch/tile/mm/pgtable.c27
20 files changed, 345 insertions, 197 deletions
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 74239dd77e06..38c3957e0b40 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -139,6 +139,31 @@ config NR_CPUS
139 smaller kernel memory footprint results from using a smaller 139 smaller kernel memory footprint results from using a smaller
140 value on chips with fewer tiles. 140 value on chips with fewer tiles.
141 141
142if TILEGX
143
144choice
145 prompt "Kernel page size"
146 default PAGE_SIZE_64KB
147 help
148 This lets you select the page size of the kernel. For best
149 performance on memory-intensive applications, a page size of 64KB
150 is recommended. For workloads involving many small files, many
151 connections, etc., it may be better to select 16KB, which uses
152 memory more efficiently at some cost in TLB performance.
153
154 Note that this option is TILE-Gx specific; currently
155 TILEPro page size is set by rebuilding the hypervisor.
156
157config PAGE_SIZE_16KB
158 bool "16KB"
159
160config PAGE_SIZE_64KB
161 bool "64KB"
162
163endchoice
164
165endif
166
142source "kernel/time/Kconfig" 167source "kernel/time/Kconfig"
143 168
144source "kernel/Kconfig.hz" 169source "kernel/Kconfig.hz"
diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild
index 0bb42642343a..6b2e681695ec 100644
--- a/arch/tile/include/asm/Kbuild
+++ b/arch/tile/include/asm/Kbuild
@@ -21,7 +21,6 @@ generic-y += ipcbuf.h
21generic-y += irq_regs.h 21generic-y += irq_regs.h
22generic-y += kdebug.h 22generic-y += kdebug.h
23generic-y += local.h 23generic-y += local.h
24generic-y += module.h
25generic-y += msgbuf.h 24generic-y += msgbuf.h
26generic-y += mutex.h 25generic-y += mutex.h
27generic-y += param.h 26generic-y += param.h
diff --git a/arch/tile/include/asm/mmu.h b/arch/tile/include/asm/mmu.h
index 92f94c77b6e4..e2c789096795 100644
--- a/arch/tile/include/asm/mmu.h
+++ b/arch/tile/include/asm/mmu.h
@@ -21,7 +21,7 @@ struct mm_context {
21 * Written under the mmap_sem semaphore; read without the 21 * Written under the mmap_sem semaphore; read without the
22 * semaphore but atomically, but it is conservatively set. 22 * semaphore but atomically, but it is conservatively set.
23 */ 23 */
24 unsigned int priority_cached; 24 unsigned long priority_cached;
25}; 25};
26 26
27typedef struct mm_context mm_context_t; 27typedef struct mm_context mm_context_t;
diff --git a/arch/tile/include/asm/mmu_context.h b/arch/tile/include/asm/mmu_context.h
index 15fb24641120..37f0b741dee7 100644
--- a/arch/tile/include/asm/mmu_context.h
+++ b/arch/tile/include/asm/mmu_context.h
@@ -30,11 +30,15 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
30 return 0; 30 return 0;
31} 31}
32 32
33/* Note that arch/tile/kernel/head.S also calls hv_install_context() */ 33/*
34 * Note that arch/tile/kernel/head_NN.S and arch/tile/mm/migrate_NN.S
35 * also call hv_install_context().
36 */
34static inline void __install_page_table(pgd_t *pgdir, int asid, pgprot_t prot) 37static inline void __install_page_table(pgd_t *pgdir, int asid, pgprot_t prot)
35{ 38{
36 /* FIXME: DIRECTIO should not always be set. FIXME. */ 39 /* FIXME: DIRECTIO should not always be set. FIXME. */
37 int rc = hv_install_context(__pa(pgdir), prot, asid, HV_CTX_DIRECTIO); 40 int rc = hv_install_context(__pa(pgdir), prot, asid,
41 HV_CTX_DIRECTIO | CTX_PAGE_FLAG);
38 if (rc < 0) 42 if (rc < 0)
39 panic("hv_install_context failed: %d", rc); 43 panic("hv_install_context failed: %d", rc);
40} 44}
diff --git a/arch/tile/include/asm/module.h b/arch/tile/include/asm/module.h
new file mode 100644
index 000000000000..44ed07ccd3d2
--- /dev/null
+++ b/arch/tile/include/asm/module.h
@@ -0,0 +1,40 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#ifndef _ASM_TILE_MODULE_H
16#define _ASM_TILE_MODULE_H
17
18#include <arch/chip.h>
19
20#include <asm-generic/module.h>
21
22/* We can't use modules built with different page sizes. */
23#if defined(CONFIG_PAGE_SIZE_16KB)
24# define MODULE_PGSZ " 16KB"
25#elif defined(CONFIG_PAGE_SIZE_64KB)
26# define MODULE_PGSZ " 64KB"
27#else
28# define MODULE_PGSZ ""
29#endif
30
31/* We don't really support no-SMP so tag if someone tries. */
32#ifdef CONFIG_SMP
33#define MODULE_NOSMP ""
34#else
35#define MODULE_NOSMP " nosmp"
36#endif
37
38#define MODULE_ARCH_VERMAGIC CHIP_ARCH_NAME MODULE_PGSZ MODULE_NOSMP
39
40#endif /* _ASM_TILE_MODULE_H */
diff --git a/arch/tile/include/asm/page.h b/arch/tile/include/asm/page.h
index db93518fac03..c750943f961e 100644
--- a/arch/tile/include/asm/page.h
+++ b/arch/tile/include/asm/page.h
@@ -20,8 +20,17 @@
20#include <arch/chip.h> 20#include <arch/chip.h>
21 21
22/* PAGE_SHIFT and HPAGE_SHIFT determine the page sizes. */ 22/* PAGE_SHIFT and HPAGE_SHIFT determine the page sizes. */
23#define PAGE_SHIFT HV_LOG2_PAGE_SIZE_SMALL 23#if defined(CONFIG_PAGE_SIZE_16KB)
24#define HPAGE_SHIFT HV_LOG2_PAGE_SIZE_LARGE 24#define PAGE_SHIFT 14
25#define CTX_PAGE_FLAG HV_CTX_PG_SM_16K
26#elif defined(CONFIG_PAGE_SIZE_64KB)
27#define PAGE_SHIFT 16
28#define CTX_PAGE_FLAG HV_CTX_PG_SM_64K
29#else
30#define PAGE_SHIFT HV_LOG2_DEFAULT_PAGE_SIZE_SMALL
31#define CTX_PAGE_FLAG 0
32#endif
33#define HPAGE_SHIFT HV_LOG2_DEFAULT_PAGE_SIZE_LARGE
25 34
26#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) 35#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
27#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) 36#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
diff --git a/arch/tile/include/asm/pgalloc.h b/arch/tile/include/asm/pgalloc.h
index e919c0bdc22d..1b902508b664 100644
--- a/arch/tile/include/asm/pgalloc.h
+++ b/arch/tile/include/asm/pgalloc.h
@@ -19,24 +19,24 @@
19#include <linux/mm.h> 19#include <linux/mm.h>
20#include <linux/mmzone.h> 20#include <linux/mmzone.h>
21#include <asm/fixmap.h> 21#include <asm/fixmap.h>
22#include <asm/page.h>
22#include <hv/hypervisor.h> 23#include <hv/hypervisor.h>
23 24
24/* Bits for the size of the second-level page table. */ 25/* Bits for the size of the second-level page table. */
25#define L2_KERNEL_PGTABLE_SHIFT \ 26#define L2_KERNEL_PGTABLE_SHIFT _HV_LOG2_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT)
26 (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL + HV_LOG2_PTE_SIZE) 27
28/* How big is a kernel L2 page table? */
29#define L2_KERNEL_PGTABLE_SIZE (1UL << L2_KERNEL_PGTABLE_SHIFT)
27 30
28/* We currently allocate user L2 page tables by page (unlike kernel L2s). */ 31/* We currently allocate user L2 page tables by page (unlike kernel L2s). */
29#if L2_KERNEL_PGTABLE_SHIFT < HV_LOG2_PAGE_SIZE_SMALL 32#if L2_KERNEL_PGTABLE_SHIFT < PAGE_SHIFT
30#define L2_USER_PGTABLE_SHIFT HV_LOG2_PAGE_SIZE_SMALL 33#define L2_USER_PGTABLE_SHIFT PAGE_SHIFT
31#else 34#else
32#define L2_USER_PGTABLE_SHIFT L2_KERNEL_PGTABLE_SHIFT 35#define L2_USER_PGTABLE_SHIFT L2_KERNEL_PGTABLE_SHIFT
33#endif 36#endif
34 37
35/* How many pages do we need, as an "order", for a user L2 page table? */ 38/* How many pages do we need, as an "order", for a user L2 page table? */
36#define L2_USER_PGTABLE_ORDER (L2_USER_PGTABLE_SHIFT - HV_LOG2_PAGE_SIZE_SMALL) 39#define L2_USER_PGTABLE_ORDER (L2_USER_PGTABLE_SHIFT - PAGE_SHIFT)
37
38/* How big is a kernel L2 page table? */
39#define L2_KERNEL_PGTABLE_SIZE (1 << L2_KERNEL_PGTABLE_SHIFT)
40 40
41static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) 41static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
42{ 42{
@@ -50,14 +50,14 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
50static inline void pmd_populate_kernel(struct mm_struct *mm, 50static inline void pmd_populate_kernel(struct mm_struct *mm,
51 pmd_t *pmd, pte_t *ptep) 51 pmd_t *pmd, pte_t *ptep)
52{ 52{
53 set_pmd(pmd, ptfn_pmd(__pa(ptep) >> HV_LOG2_PAGE_TABLE_ALIGN, 53 set_pmd(pmd, ptfn_pmd(HV_CPA_TO_PTFN(__pa(ptep)),
54 __pgprot(_PAGE_PRESENT))); 54 __pgprot(_PAGE_PRESENT)));
55} 55}
56 56
57static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, 57static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
58 pgtable_t page) 58 pgtable_t page)
59{ 59{
60 set_pmd(pmd, ptfn_pmd(HV_PFN_TO_PTFN(page_to_pfn(page)), 60 set_pmd(pmd, ptfn_pmd(HV_CPA_TO_PTFN(PFN_PHYS(page_to_pfn(page))),
61 __pgprot(_PAGE_PRESENT))); 61 __pgprot(_PAGE_PRESENT)));
62} 62}
63 63
@@ -68,8 +68,20 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
68extern pgd_t *pgd_alloc(struct mm_struct *mm); 68extern pgd_t *pgd_alloc(struct mm_struct *mm);
69extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); 69extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
70 70
71extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address); 71extern pgtable_t pgtable_alloc_one(struct mm_struct *mm, unsigned long address,
72extern void pte_free(struct mm_struct *mm, struct page *pte); 72 int order);
73extern void pgtable_free(struct mm_struct *mm, struct page *pte, int order);
74
75static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
76 unsigned long address)
77{
78 return pgtable_alloc_one(mm, address, L2_USER_PGTABLE_ORDER);
79}
80
81static inline void pte_free(struct mm_struct *mm, struct page *pte)
82{
83 pgtable_free(mm, pte, L2_USER_PGTABLE_ORDER);
84}
73 85
74#define pmd_pgtable(pmd) pmd_page(pmd) 86#define pmd_pgtable(pmd) pmd_page(pmd)
75 87
@@ -85,8 +97,13 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
85 pte_free(mm, virt_to_page(pte)); 97 pte_free(mm, virt_to_page(pte));
86} 98}
87 99
88extern void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte, 100extern void __pgtable_free_tlb(struct mmu_gather *tlb, struct page *pte,
89 unsigned long address); 101 unsigned long address, int order);
102static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
103 unsigned long address)
104{
105 __pgtable_free_tlb(tlb, pte, address, L2_USER_PGTABLE_ORDER);
106}
90 107
91#define check_pgt_cache() do { } while (0) 108#define check_pgt_cache() do { } while (0)
92 109
@@ -104,19 +121,44 @@ void shatter_pmd(pmd_t *pmd);
104void shatter_huge_page(unsigned long addr); 121void shatter_huge_page(unsigned long addr);
105 122
106#ifdef __tilegx__ 123#ifdef __tilegx__
107/* We share a single page allocator for both L1 and L2 page tables. */ 124
108#if HV_L1_SIZE != HV_L2_SIZE
109# error Rework assumption that L1 and L2 page tables are same size.
110#endif
111#define L1_USER_PGTABLE_ORDER L2_USER_PGTABLE_ORDER
112#define pud_populate(mm, pud, pmd) \ 125#define pud_populate(mm, pud, pmd) \
113 pmd_populate_kernel((mm), (pmd_t *)(pud), (pte_t *)(pmd)) 126 pmd_populate_kernel((mm), (pmd_t *)(pud), (pte_t *)(pmd))
114#define pmd_alloc_one(mm, addr) \ 127
115 ((pmd_t *)page_to_virt(pte_alloc_one((mm), (addr)))) 128/* Bits for the size of the L1 (intermediate) page table. */
116#define pmd_free(mm, pmdp) \ 129#define L1_KERNEL_PGTABLE_SHIFT _HV_LOG2_L1_SIZE(HPAGE_SHIFT)
117 pte_free((mm), virt_to_page(pmdp)) 130
118#define __pmd_free_tlb(tlb, pmdp, address) \ 131/* How big is a kernel L2 page table? */
119 __pte_free_tlb((tlb), virt_to_page(pmdp), (address)) 132#define L1_KERNEL_PGTABLE_SIZE (1UL << L1_KERNEL_PGTABLE_SHIFT)
133
134/* We currently allocate L1 page tables by page. */
135#if L1_KERNEL_PGTABLE_SHIFT < PAGE_SHIFT
136#define L1_USER_PGTABLE_SHIFT PAGE_SHIFT
137#else
138#define L1_USER_PGTABLE_SHIFT L1_KERNEL_PGTABLE_SHIFT
120#endif 139#endif
121 140
141/* How many pages do we need, as an "order", for an L1 page table? */
142#define L1_USER_PGTABLE_ORDER (L1_USER_PGTABLE_SHIFT - PAGE_SHIFT)
143
144static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
145{
146 struct page *p = pgtable_alloc_one(mm, address, L1_USER_PGTABLE_ORDER);
147 return (pmd_t *)page_to_virt(p);
148}
149
150static inline void pmd_free(struct mm_struct *mm, pmd_t *pmdp)
151{
152 pgtable_free(mm, virt_to_page(pmdp), L1_USER_PGTABLE_ORDER);
153}
154
155static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
156 unsigned long address)
157{
158 __pgtable_free_tlb(tlb, virt_to_page(pmdp), address,
159 L1_USER_PGTABLE_ORDER);
160}
161
162#endif /* __tilegx__ */
163
122#endif /* _ASM_TILE_PGALLOC_H */ 164#endif /* _ASM_TILE_PGALLOC_H */
diff --git a/arch/tile/include/asm/pgtable.h b/arch/tile/include/asm/pgtable.h
index ec907d4dbd7a..319f4826d972 100644
--- a/arch/tile/include/asm/pgtable.h
+++ b/arch/tile/include/asm/pgtable.h
@@ -27,8 +27,10 @@
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/list.h> 28#include <linux/list.h>
29#include <linux/spinlock.h> 29#include <linux/spinlock.h>
30#include <linux/pfn.h>
30#include <asm/processor.h> 31#include <asm/processor.h>
31#include <asm/fixmap.h> 32#include <asm/fixmap.h>
33#include <asm/page.h>
32 34
33struct mm_struct; 35struct mm_struct;
34struct vm_area_struct; 36struct vm_area_struct;
@@ -162,7 +164,7 @@ extern void set_page_homes(void);
162 (pgprot_t) { ((oldprot).val & ~_PAGE_ALL) | (newprot).val } 164 (pgprot_t) { ((oldprot).val & ~_PAGE_ALL) | (newprot).val }
163 165
164/* Just setting the PFN to zero suffices. */ 166/* Just setting the PFN to zero suffices. */
165#define pte_pgprot(x) hv_pte_set_pfn((x), 0) 167#define pte_pgprot(x) hv_pte_set_pa((x), 0)
166 168
167/* 169/*
168 * For PTEs and PDEs, we must clear the Present bit first when 170 * For PTEs and PDEs, we must clear the Present bit first when
@@ -262,7 +264,7 @@ static inline int pte_none(pte_t pte)
262 264
263static inline unsigned long pte_pfn(pte_t pte) 265static inline unsigned long pte_pfn(pte_t pte)
264{ 266{
265 return hv_pte_get_pfn(pte); 267 return PFN_DOWN(hv_pte_get_pa(pte));
266} 268}
267 269
268/* Set or get the remote cache cpu in a pgprot with remote caching. */ 270/* Set or get the remote cache cpu in a pgprot with remote caching. */
@@ -271,7 +273,7 @@ extern int get_remote_cache_cpu(pgprot_t prot);
271 273
272static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) 274static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
273{ 275{
274 return hv_pte_set_pfn(prot, pfn); 276 return hv_pte_set_pa(prot, PFN_PHYS(pfn));
275} 277}
276 278
277/* Support for priority mappings. */ 279/* Support for priority mappings. */
@@ -471,7 +473,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
471 * OK for pte_lockptr(), since we just end up with potentially one 473 * OK for pte_lockptr(), since we just end up with potentially one
472 * lock being used for several pte_t arrays. 474 * lock being used for several pte_t arrays.
473 */ 475 */
474#define pmd_page(pmd) pfn_to_page(HV_PTFN_TO_PFN(pmd_ptfn(pmd))) 476#define pmd_page(pmd) pfn_to_page(PFN_DOWN(HV_PTFN_TO_CPA(pmd_ptfn(pmd))))
475 477
476static inline void pmd_clear(pmd_t *pmdp) 478static inline void pmd_clear(pmd_t *pmdp)
477{ 479{
diff --git a/arch/tile/include/asm/pgtable_32.h b/arch/tile/include/asm/pgtable_32.h
index 27e20f6844a8..4ce4a7a99c24 100644
--- a/arch/tile/include/asm/pgtable_32.h
+++ b/arch/tile/include/asm/pgtable_32.h
@@ -20,11 +20,12 @@
20 * The level-1 index is defined by the huge page size. A PGD is composed 20 * The level-1 index is defined by the huge page size. A PGD is composed
21 * of PTRS_PER_PGD pgd_t's and is the top level of the page table. 21 * of PTRS_PER_PGD pgd_t's and is the top level of the page table.
22 */ 22 */
23#define PGDIR_SHIFT HV_LOG2_PAGE_SIZE_LARGE 23#define PGDIR_SHIFT HPAGE_SHIFT
24#define PGDIR_SIZE HV_PAGE_SIZE_LARGE 24#define PGDIR_SIZE HPAGE_SIZE
25#define PGDIR_MASK (~(PGDIR_SIZE-1)) 25#define PGDIR_MASK (~(PGDIR_SIZE-1))
26#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT)) 26#define PTRS_PER_PGD _HV_L1_ENTRIES(HPAGE_SHIFT)
27#define SIZEOF_PGD (PTRS_PER_PGD * sizeof(pgd_t)) 27#define PGD_INDEX(va) _HV_L1_INDEX(va, HPAGE_SHIFT)
28#define SIZEOF_PGD _HV_L1_SIZE(HPAGE_SHIFT)
28 29
29/* 30/*
30 * The level-2 index is defined by the difference between the huge 31 * The level-2 index is defined by the difference between the huge
@@ -33,8 +34,9 @@
33 * Note that the hypervisor docs use PTE for what we call pte_t, so 34 * Note that the hypervisor docs use PTE for what we call pte_t, so
34 * this nomenclature is somewhat confusing. 35 * this nomenclature is somewhat confusing.
35 */ 36 */
36#define PTRS_PER_PTE (1 << (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL)) 37#define PTRS_PER_PTE _HV_L2_ENTRIES(HPAGE_SHIFT, PAGE_SHIFT)
37#define SIZEOF_PTE (PTRS_PER_PTE * sizeof(pte_t)) 38#define PTE_INDEX(va) _HV_L2_INDEX(va, HPAGE_SHIFT, PAGE_SHIFT)
39#define SIZEOF_PTE _HV_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT)
38 40
39#ifndef __ASSEMBLY__ 41#ifndef __ASSEMBLY__
40 42
diff --git a/arch/tile/include/asm/pgtable_64.h b/arch/tile/include/asm/pgtable_64.h
index e105f3ada655..2492fa5478e7 100644
--- a/arch/tile/include/asm/pgtable_64.h
+++ b/arch/tile/include/asm/pgtable_64.h
@@ -21,17 +21,19 @@
21#define PGDIR_SIZE HV_L1_SPAN 21#define PGDIR_SIZE HV_L1_SPAN
22#define PGDIR_MASK (~(PGDIR_SIZE-1)) 22#define PGDIR_MASK (~(PGDIR_SIZE-1))
23#define PTRS_PER_PGD HV_L0_ENTRIES 23#define PTRS_PER_PGD HV_L0_ENTRIES
24#define SIZEOF_PGD (PTRS_PER_PGD * sizeof(pgd_t)) 24#define PGD_INDEX(va) HV_L0_INDEX(va)
25#define SIZEOF_PGD HV_L0_SIZE
25 26
26/* 27/*
27 * The level-1 index is defined by the huge page size. A PMD is composed 28 * The level-1 index is defined by the huge page size. A PMD is composed
28 * of PTRS_PER_PMD pgd_t's and is the middle level of the page table. 29 * of PTRS_PER_PMD pgd_t's and is the middle level of the page table.
29 */ 30 */
30#define PMD_SHIFT HV_LOG2_PAGE_SIZE_LARGE 31#define PMD_SHIFT HPAGE_SHIFT
31#define PMD_SIZE HV_PAGE_SIZE_LARGE 32#define PMD_SIZE HPAGE_SIZE
32#define PMD_MASK (~(PMD_SIZE-1)) 33#define PMD_MASK (~(PMD_SIZE-1))
33#define PTRS_PER_PMD (1 << (PGDIR_SHIFT - PMD_SHIFT)) 34#define PTRS_PER_PMD _HV_L1_ENTRIES(HPAGE_SHIFT)
34#define SIZEOF_PMD (PTRS_PER_PMD * sizeof(pmd_t)) 35#define PMD_INDEX(va) _HV_L1_INDEX(va, HPAGE_SHIFT)
36#define SIZEOF_PMD _HV_L1_SIZE(HPAGE_SHIFT)
35 37
36/* 38/*
37 * The level-2 index is defined by the difference between the huge 39 * The level-2 index is defined by the difference between the huge
@@ -40,17 +42,19 @@
40 * Note that the hypervisor docs use PTE for what we call pte_t, so 42 * Note that the hypervisor docs use PTE for what we call pte_t, so
41 * this nomenclature is somewhat confusing. 43 * this nomenclature is somewhat confusing.
42 */ 44 */
43#define PTRS_PER_PTE (1 << (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL)) 45#define PTRS_PER_PTE _HV_L2_ENTRIES(HPAGE_SHIFT, PAGE_SHIFT)
44#define SIZEOF_PTE (PTRS_PER_PTE * sizeof(pte_t)) 46#define PTE_INDEX(va) _HV_L2_INDEX(va, HPAGE_SHIFT, PAGE_SHIFT)
47#define SIZEOF_PTE _HV_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT)
45 48
46/* 49/*
47 * Align the vmalloc area to an L2 page table, and leave a guard page 50 * Align the vmalloc area to an L2 page table. Omit guard pages at
48 * at the beginning and end. The vmalloc code also puts in an internal 51 * the beginning and end for simplicity (particularly in the per-cpu
52 * memory allocation code). The vmalloc code puts in an internal
49 * guard page between each allocation. 53 * guard page between each allocation.
50 */ 54 */
51#define _VMALLOC_END HUGE_VMAP_BASE 55#define _VMALLOC_END HUGE_VMAP_BASE
52#define VMALLOC_END (_VMALLOC_END - PAGE_SIZE) 56#define VMALLOC_END _VMALLOC_END
53#define VMALLOC_START (_VMALLOC_START + PAGE_SIZE) 57#define VMALLOC_START _VMALLOC_START
54 58
55#define HUGE_VMAP_END (HUGE_VMAP_BASE + PGDIR_SIZE) 59#define HUGE_VMAP_END (HUGE_VMAP_BASE + PGDIR_SIZE)
56 60
@@ -98,7 +102,7 @@ static inline int pud_bad(pud_t pud)
98 * A pud_t points to a pmd_t array. Since we can have multiple per 102 * A pud_t points to a pmd_t array. Since we can have multiple per
99 * page, we don't have a one-to-one mapping of pud_t's to pages. 103 * page, we don't have a one-to-one mapping of pud_t's to pages.
100 */ 104 */
101#define pud_page(pud) pfn_to_page(HV_PTFN_TO_PFN(pud_ptfn(pud))) 105#define pud_page(pud) pfn_to_page(PFN_DOWN(HV_PTFN_TO_CPA(pud_ptfn(pud))))
102 106
103static inline unsigned long pud_index(unsigned long address) 107static inline unsigned long pud_index(unsigned long address)
104{ 108{
diff --git a/arch/tile/include/hv/drv_xgbe_intf.h b/arch/tile/include/hv/drv_xgbe_intf.h
index f13188ac281a..2a20b266d944 100644
--- a/arch/tile/include/hv/drv_xgbe_intf.h
+++ b/arch/tile/include/hv/drv_xgbe_intf.h
@@ -460,7 +460,7 @@ typedef void* lepp_comp_t;
460 * linux's "MAX_SKB_FRAGS", and presumably over-estimates by one, for 460 * linux's "MAX_SKB_FRAGS", and presumably over-estimates by one, for
461 * our page size of exactly 65536. We add one for a "body" fragment. 461 * our page size of exactly 65536. We add one for a "body" fragment.
462 */ 462 */
463#define LEPP_MAX_FRAGS (65536 / HV_PAGE_SIZE_SMALL + 2 + 1) 463#define LEPP_MAX_FRAGS (65536 / HV_DEFAULT_PAGE_SIZE_SMALL + 2 + 1)
464 464
465/** Total number of bytes needed for an lepp_tso_cmd_t. */ 465/** Total number of bytes needed for an lepp_tso_cmd_t. */
466#define LEPP_TSO_CMD_SIZE(num_frags, header_size) \ 466#define LEPP_TSO_CMD_SIZE(num_frags, header_size) \
diff --git a/arch/tile/include/hv/hypervisor.h b/arch/tile/include/hv/hypervisor.h
index df74223944b5..f27871775b7a 100644
--- a/arch/tile/include/hv/hypervisor.h
+++ b/arch/tile/include/hv/hypervisor.h
@@ -17,8 +17,8 @@
17 * The hypervisor's public API. 17 * The hypervisor's public API.
18 */ 18 */
19 19
20#ifndef _TILE_HV_H 20#ifndef _HV_HV_H
21#define _TILE_HV_H 21#define _HV_HV_H
22 22
23#include <arch/chip.h> 23#include <arch/chip.h>
24 24
@@ -42,25 +42,29 @@
42 */ 42 */
43#define HV_L1_SPAN (__HV_SIZE_ONE << HV_LOG2_L1_SPAN) 43#define HV_L1_SPAN (__HV_SIZE_ONE << HV_LOG2_L1_SPAN)
44 44
45/** The log2 of the size of small pages, in bytes. This value should 45/** The log2 of the initial size of small pages, in bytes.
46 * be verified at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL). 46 * See HV_DEFAULT_PAGE_SIZE_SMALL.
47 */ 47 */
48#define HV_LOG2_PAGE_SIZE_SMALL 16 48#define HV_LOG2_DEFAULT_PAGE_SIZE_SMALL 16
49 49
50/** The size of small pages, in bytes. This value should be verified 50/** The initial size of small pages, in bytes. This value should be verified
51 * at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL). 51 * at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL).
52 * It may also be modified when installing a new context.
52 */ 53 */
53#define HV_PAGE_SIZE_SMALL (__HV_SIZE_ONE << HV_LOG2_PAGE_SIZE_SMALL) 54#define HV_DEFAULT_PAGE_SIZE_SMALL \
55 (__HV_SIZE_ONE << HV_LOG2_DEFAULT_PAGE_SIZE_SMALL)
54 56
55/** The log2 of the size of large pages, in bytes. This value should be 57/** The log2 of the initial size of large pages, in bytes.
56 * verified at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE). 58 * See HV_DEFAULT_PAGE_SIZE_LARGE.
57 */ 59 */
58#define HV_LOG2_PAGE_SIZE_LARGE 24 60#define HV_LOG2_DEFAULT_PAGE_SIZE_LARGE 24
59 61
60/** The size of large pages, in bytes. This value should be verified 62/** The initial size of large pages, in bytes. This value should be verified
61 * at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE). 63 * at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE).
64 * It may also be modified when installing a new context.
62 */ 65 */
63#define HV_PAGE_SIZE_LARGE (__HV_SIZE_ONE << HV_LOG2_PAGE_SIZE_LARGE) 66#define HV_DEFAULT_PAGE_SIZE_LARGE \
67 (__HV_SIZE_ONE << HV_LOG2_DEFAULT_PAGE_SIZE_LARGE)
64 68
65/** The log2 of the granularity at which page tables must be aligned; 69/** The log2 of the granularity at which page tables must be aligned;
66 * in other words, the CPA for a page table must have this many zero 70 * in other words, the CPA for a page table must have this many zero
@@ -401,7 +405,13 @@ typedef enum {
401 * that the temperature has hit an upper limit and is no longer being 405 * that the temperature has hit an upper limit and is no longer being
402 * accurately tracked. 406 * accurately tracked.
403 */ 407 */
404 HV_SYSCONF_BOARD_TEMP = 6 408 HV_SYSCONF_BOARD_TEMP = 6,
409
410 /** Legal page size bitmask for hv_install_context().
411 * For example, if 16KB and 64KB small pages are supported,
412 * it would return "HV_CTX_PG_SM_16K | HV_CTX_PG_SM_64K".
413 */
414 HV_SYSCONF_VALID_PAGE_SIZES = 7,
405 415
406} HV_SysconfQuery; 416} HV_SysconfQuery;
407 417
@@ -654,6 +664,12 @@ void hv_set_rtc(HV_RTCTime time);
654 * new page table does not need to contain any mapping for the 664 * new page table does not need to contain any mapping for the
655 * hv_install_context address itself. 665 * hv_install_context address itself.
656 * 666 *
667 * At most one HV_CTX_PG_SM_* flag may be specified in "flags";
668 * if multiple flags are specified, HV_EINVAL is returned.
669 * Specifying none of the flags results in using the default page size.
670 * All cores participating in a given client must request the same
671 * page size, or the results are undefined.
672 *
657 * @param page_table Root of the page table. 673 * @param page_table Root of the page table.
658 * @param access PTE providing info on how to read the page table. This 674 * @param access PTE providing info on how to read the page table. This
659 * value must be consistent between multiple tiles sharing a page table, 675 * value must be consistent between multiple tiles sharing a page table,
@@ -672,6 +688,11 @@ int hv_install_context(HV_PhysAddr page_table, HV_PTE access, HV_ASID asid,
672#define HV_CTX_DIRECTIO 0x1 /**< Direct I/O requests are accepted from 688#define HV_CTX_DIRECTIO 0x1 /**< Direct I/O requests are accepted from
673 PL0. */ 689 PL0. */
674 690
691#define HV_CTX_PG_SM_4K 0x10 /**< Use 4K small pages, if available. */
692#define HV_CTX_PG_SM_16K 0x20 /**< Use 16K small pages, if available. */
693#define HV_CTX_PG_SM_64K 0x40 /**< Use 64K small pages, if available. */
694#define HV_CTX_PG_SM_MASK 0xf0 /**< Mask of all possible small pages. */
695
675#ifndef __ASSEMBLER__ 696#ifndef __ASSEMBLER__
676 697
677/** Value returned from hv_inquire_context(). */ 698/** Value returned from hv_inquire_context(). */
@@ -1248,11 +1269,14 @@ HV_Errno hv_set_command_line(HV_VirtAddr buf, int length);
1248 * with the existing priority pages) or "red/black" (if they don't). 1269 * with the existing priority pages) or "red/black" (if they don't).
1249 * The bitmask provides information on which parts of the cache 1270 * The bitmask provides information on which parts of the cache
1250 * have been used for pinned pages so far on this tile; if (1 << N) 1271 * have been used for pinned pages so far on this tile; if (1 << N)
1251 * appears in the bitmask, that indicates that a page has been marked 1272 * appears in the bitmask, that indicates that a 4KB region of the
1252 * "priority" whose PFN equals N, mod 8. 1273 * cache starting at (N * 4KB) is in use by a "priority" page.
1274 * The portion of cache used by a particular page can be computed
1275 * by taking the page's PA, modulo CHIP_L2_CACHE_SIZE(), and setting
1276 * all the "4KB" bits corresponding to the actual page size.
1253 * @param bitmask A bitmap of priority page set values 1277 * @param bitmask A bitmap of priority page set values
1254 */ 1278 */
1255void hv_set_caching(unsigned int bitmask); 1279void hv_set_caching(unsigned long bitmask);
1256 1280
1257 1281
1258/** Zero out a specified number of pages. 1282/** Zero out a specified number of pages.
@@ -1884,15 +1908,6 @@ int hv_flush_remote(HV_PhysAddr cache_pa, unsigned long cache_control,
1884 of word */ 1908 of word */
1885#define HV_PTE_PTFN_BITS 29 /**< Number of bits in a PTFN */ 1909#define HV_PTE_PTFN_BITS 29 /**< Number of bits in a PTFN */
1886 1910
1887/** Position of the PFN field within the PTE (subset of the PTFN). */
1888#define HV_PTE_INDEX_PFN (HV_PTE_INDEX_PTFN + (HV_LOG2_PAGE_SIZE_SMALL - \
1889 HV_LOG2_PAGE_TABLE_ALIGN))
1890
1891/** Length of the PFN field within the PTE (subset of the PTFN). */
1892#define HV_PTE_INDEX_PFN_BITS (HV_PTE_INDEX_PTFN_BITS - \
1893 (HV_LOG2_PAGE_SIZE_SMALL - \
1894 HV_LOG2_PAGE_TABLE_ALIGN))
1895
1896/* 1911/*
1897 * Legal values for the PTE's mode field 1912 * Legal values for the PTE's mode field
1898 */ 1913 */
@@ -2245,40 +2260,11 @@ hv_pte_set_mode(HV_PTE pte, unsigned int val)
2245 * 2260 *
2246 * This field contains the upper bits of the CPA (client physical 2261 * This field contains the upper bits of the CPA (client physical
2247 * address) of the target page; the complete CPA is this field with 2262 * address) of the target page; the complete CPA is this field with
2248 * HV_LOG2_PAGE_SIZE_SMALL zero bits appended to it. 2263 * HV_LOG2_PAGE_TABLE_ALIGN zero bits appended to it.
2249 * 2264 *
2250 * For PTEs in a level-1 page table where the Page bit is set, the 2265 * For all PTEs in the lowest-level page table, and for all PTEs with
2251 * CPA must be aligned modulo the large page size. 2266 * the Page bit set in all page tables, the CPA must be aligned modulo
2252 */ 2267 * the relevant page size.
2253static __inline unsigned int
2254hv_pte_get_pfn(const HV_PTE pte)
2255{
2256 return pte.val >> HV_PTE_INDEX_PFN;
2257}
2258
2259
2260/** Set the page frame number into a PTE. See hv_pte_get_pfn. */
2261static __inline HV_PTE
2262hv_pte_set_pfn(HV_PTE pte, unsigned int val)
2263{
2264 /*
2265 * Note that the use of "PTFN" in the next line is intentional; we
2266 * don't want any garbage lower bits left in that field.
2267 */
2268 pte.val &= ~(((1ULL << HV_PTE_PTFN_BITS) - 1) << HV_PTE_INDEX_PTFN);
2269 pte.val |= (__hv64) val << HV_PTE_INDEX_PFN;
2270 return pte;
2271}
2272
2273/** Get the page table frame number from the PTE.
2274 *
2275 * This field contains the upper bits of the CPA (client physical
2276 * address) of the target page table; the complete CPA is this field with
2277 * with HV_PAGE_TABLE_ALIGN zero bits appended to it.
2278 *
2279 * For PTEs in a level-1 page table when the Page bit is not set, the
2280 * CPA must be aligned modulo the sticter of HV_PAGE_TABLE_ALIGN and
2281 * the level-2 page table size.
2282 */ 2268 */
2283static __inline unsigned long 2269static __inline unsigned long
2284hv_pte_get_ptfn(const HV_PTE pte) 2270hv_pte_get_ptfn(const HV_PTE pte)
@@ -2286,7 +2272,6 @@ hv_pte_get_ptfn(const HV_PTE pte)
2286 return pte.val >> HV_PTE_INDEX_PTFN; 2272 return pte.val >> HV_PTE_INDEX_PTFN;
2287} 2273}
2288 2274
2289
2290/** Set the page table frame number into a PTE. See hv_pte_get_ptfn. */ 2275/** Set the page table frame number into a PTE. See hv_pte_get_ptfn. */
2291static __inline HV_PTE 2276static __inline HV_PTE
2292hv_pte_set_ptfn(HV_PTE pte, unsigned long val) 2277hv_pte_set_ptfn(HV_PTE pte, unsigned long val)
@@ -2296,6 +2281,20 @@ hv_pte_set_ptfn(HV_PTE pte, unsigned long val)
2296 return pte; 2281 return pte;
2297} 2282}
2298 2283
2284/** Get the client physical address from the PTE. See hv_pte_set_ptfn. */
2285static __inline HV_PhysAddr
2286hv_pte_get_pa(const HV_PTE pte)
2287{
2288 return (__hv64) hv_pte_get_ptfn(pte) << HV_LOG2_PAGE_TABLE_ALIGN;
2289}
2290
2291/** Set the client physical address into a PTE. See hv_pte_get_ptfn. */
2292static __inline HV_PTE
2293hv_pte_set_pa(HV_PTE pte, HV_PhysAddr pa)
2294{
2295 return hv_pte_set_ptfn(pte, pa >> HV_LOG2_PAGE_TABLE_ALIGN);
2296}
2297
2299 2298
2300/** Get the remote tile caching this page. 2299/** Get the remote tile caching this page.
2301 * 2300 *
@@ -2331,28 +2330,20 @@ hv_pte_set_lotar(HV_PTE pte, unsigned int val)
2331 2330
2332#endif /* !__ASSEMBLER__ */ 2331#endif /* !__ASSEMBLER__ */
2333 2332
2334/** Converts a client physical address to a pfn. */
2335#define HV_CPA_TO_PFN(p) ((p) >> HV_LOG2_PAGE_SIZE_SMALL)
2336
2337/** Converts a pfn to a client physical address. */
2338#define HV_PFN_TO_CPA(p) (((HV_PhysAddr)(p)) << HV_LOG2_PAGE_SIZE_SMALL)
2339
2340/** Converts a client physical address to a ptfn. */ 2333/** Converts a client physical address to a ptfn. */
2341#define HV_CPA_TO_PTFN(p) ((p) >> HV_LOG2_PAGE_TABLE_ALIGN) 2334#define HV_CPA_TO_PTFN(p) ((p) >> HV_LOG2_PAGE_TABLE_ALIGN)
2342 2335
2343/** Converts a ptfn to a client physical address. */ 2336/** Converts a ptfn to a client physical address. */
2344#define HV_PTFN_TO_CPA(p) (((HV_PhysAddr)(p)) << HV_LOG2_PAGE_TABLE_ALIGN) 2337#define HV_PTFN_TO_CPA(p) (((HV_PhysAddr)(p)) << HV_LOG2_PAGE_TABLE_ALIGN)
2345 2338
2346/** Converts a ptfn to a pfn. */
2347#define HV_PTFN_TO_PFN(p) \
2348 ((p) >> (HV_LOG2_PAGE_SIZE_SMALL - HV_LOG2_PAGE_TABLE_ALIGN))
2349
2350/** Converts a pfn to a ptfn. */
2351#define HV_PFN_TO_PTFN(p) \
2352 ((p) << (HV_LOG2_PAGE_SIZE_SMALL - HV_LOG2_PAGE_TABLE_ALIGN))
2353
2354#if CHIP_VA_WIDTH() > 32 2339#if CHIP_VA_WIDTH() > 32
2355 2340
2341/*
2342 * Note that we currently do not allow customizing the page size
2343 * of the L0 pages, but fix them at 4GB, so we do not use the
2344 * "_HV_xxx" nomenclature for the L0 macros.
2345 */
2346
2356/** Log number of HV_PTE entries in L0 page table */ 2347/** Log number of HV_PTE entries in L0 page table */
2357#define HV_LOG2_L0_ENTRIES (CHIP_VA_WIDTH() - HV_LOG2_L1_SPAN) 2348#define HV_LOG2_L0_ENTRIES (CHIP_VA_WIDTH() - HV_LOG2_L1_SPAN)
2358 2349
@@ -2382,69 +2373,104 @@ hv_pte_set_lotar(HV_PTE pte, unsigned int val)
2382#endif /* CHIP_VA_WIDTH() > 32 */ 2373#endif /* CHIP_VA_WIDTH() > 32 */
2383 2374
2384/** Log number of HV_PTE entries in L1 page table */ 2375/** Log number of HV_PTE entries in L1 page table */
2385#define HV_LOG2_L1_ENTRIES (HV_LOG2_L1_SPAN - HV_LOG2_PAGE_SIZE_LARGE) 2376#define _HV_LOG2_L1_ENTRIES(log2_page_size_large) \
2377 (HV_LOG2_L1_SPAN - log2_page_size_large)
2386 2378
2387/** Number of HV_PTE entries in L1 page table */ 2379/** Number of HV_PTE entries in L1 page table */
2388#define HV_L1_ENTRIES (1 << HV_LOG2_L1_ENTRIES) 2380#define _HV_L1_ENTRIES(log2_page_size_large) \
2381 (1 << _HV_LOG2_L1_ENTRIES(log2_page_size_large))
2389 2382
2390/** Log size of L1 page table in bytes */ 2383/** Log size of L1 page table in bytes */
2391#define HV_LOG2_L1_SIZE (HV_LOG2_PTE_SIZE + HV_LOG2_L1_ENTRIES) 2384#define _HV_LOG2_L1_SIZE(log2_page_size_large) \
2385 (HV_LOG2_PTE_SIZE + _HV_LOG2_L1_ENTRIES(log2_page_size_large))
2392 2386
2393/** Size of L1 page table in bytes */ 2387/** Size of L1 page table in bytes */
2394#define HV_L1_SIZE (1 << HV_LOG2_L1_SIZE) 2388#define _HV_L1_SIZE(log2_page_size_large) \
2389 (1 << _HV_LOG2_L1_SIZE(log2_page_size_large))
2395 2390
2396/** Log number of HV_PTE entries in level-2 page table */ 2391/** Log number of HV_PTE entries in level-2 page table */
2397#define HV_LOG2_L2_ENTRIES (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL) 2392#define _HV_LOG2_L2_ENTRIES(log2_page_size_large, log2_page_size_small) \
2393 (log2_page_size_large - log2_page_size_small)
2398 2394
2399/** Number of HV_PTE entries in level-2 page table */ 2395/** Number of HV_PTE entries in level-2 page table */
2400#define HV_L2_ENTRIES (1 << HV_LOG2_L2_ENTRIES) 2396#define _HV_L2_ENTRIES(log2_page_size_large, log2_page_size_small) \
2397 (1 << _HV_LOG2_L2_ENTRIES(log2_page_size_large, log2_page_size_small))
2401 2398
2402/** Log size of level-2 page table in bytes */ 2399/** Log size of level-2 page table in bytes */
2403#define HV_LOG2_L2_SIZE (HV_LOG2_PTE_SIZE + HV_LOG2_L2_ENTRIES) 2400#define _HV_LOG2_L2_SIZE(log2_page_size_large, log2_page_size_small) \
2401 (HV_LOG2_PTE_SIZE + \
2402 _HV_LOG2_L2_ENTRIES(log2_page_size_large, log2_page_size_small))
2404 2403
2405/** Size of level-2 page table in bytes */ 2404/** Size of level-2 page table in bytes */
2406#define HV_L2_SIZE (1 << HV_LOG2_L2_SIZE) 2405#define _HV_L2_SIZE(log2_page_size_large, log2_page_size_small) \
2406 (1 << _HV_LOG2_L2_SIZE(log2_page_size_large, log2_page_size_small))
2407 2407
2408#ifdef __ASSEMBLER__ 2408#ifdef __ASSEMBLER__
2409 2409
2410#if CHIP_VA_WIDTH() > 32 2410#if CHIP_VA_WIDTH() > 32
2411 2411
2412/** Index in L1 for a specific VA */ 2412/** Index in L1 for a specific VA */
2413#define HV_L1_INDEX(va) \ 2413#define _HV_L1_INDEX(va, log2_page_size_large) \
2414 (((va) >> HV_LOG2_PAGE_SIZE_LARGE) & (HV_L1_ENTRIES - 1)) 2414 (((va) >> log2_page_size_large) & (_HV_L1_ENTRIES(log2_page_size_large) - 1))
2415 2415
2416#else /* CHIP_VA_WIDTH() > 32 */ 2416#else /* CHIP_VA_WIDTH() > 32 */
2417 2417
2418/** Index in L1 for a specific VA */ 2418/** Index in L1 for a specific VA */
2419#define HV_L1_INDEX(va) \ 2419#define _HV_L1_INDEX(va, log2_page_size_large) \
2420 (((va) >> HV_LOG2_PAGE_SIZE_LARGE)) 2420 (((va) >> log2_page_size_large))
2421 2421
2422#endif /* CHIP_VA_WIDTH() > 32 */ 2422#endif /* CHIP_VA_WIDTH() > 32 */
2423 2423
2424/** Index in level-2 page table for a specific VA */ 2424/** Index in level-2 page table for a specific VA */
2425#define HV_L2_INDEX(va) \ 2425#define _HV_L2_INDEX(va, log2_page_size_large, log2_page_size_small) \
2426 (((va) >> HV_LOG2_PAGE_SIZE_SMALL) & (HV_L2_ENTRIES - 1)) 2426 (((va) >> log2_page_size_small) & \
2427 (_HV_L2_ENTRIES(log2_page_size_large, log2_page_size_small) - 1))
2427 2428
2428#else /* __ASSEMBLER __ */ 2429#else /* __ASSEMBLER __ */
2429 2430
2430#if CHIP_VA_WIDTH() > 32 2431#if CHIP_VA_WIDTH() > 32
2431 2432
2432/** Index in L1 for a specific VA */ 2433/** Index in L1 for a specific VA */
2433#define HV_L1_INDEX(va) \ 2434#define _HV_L1_INDEX(va, log2_page_size_large) \
2434 (((HV_VirtAddr)(va) >> HV_LOG2_PAGE_SIZE_LARGE) & (HV_L1_ENTRIES - 1)) 2435 (((HV_VirtAddr)(va) >> log2_page_size_large) & \
2436 (_HV_L1_ENTRIES(log2_page_size_large) - 1))
2435 2437
2436#else /* CHIP_VA_WIDTH() > 32 */ 2438#else /* CHIP_VA_WIDTH() > 32 */
2437 2439
2438/** Index in L1 for a specific VA */ 2440/** Index in L1 for a specific VA */
2439#define HV_L1_INDEX(va) \ 2441#define _HV_L1_INDEX(va, log2_page_size_large) \
2440 (((HV_VirtAddr)(va) >> HV_LOG2_PAGE_SIZE_LARGE)) 2442 (((HV_VirtAddr)(va) >> log2_page_size_large))
2441 2443
2442#endif /* CHIP_VA_WIDTH() > 32 */ 2444#endif /* CHIP_VA_WIDTH() > 32 */
2443 2445
2444/** Index in level-2 page table for a specific VA */ 2446/** Index in level-2 page table for a specific VA */
2445#define HV_L2_INDEX(va) \ 2447#define _HV_L2_INDEX(va, log2_page_size_large, log2_page_size_small) \
2446 (((HV_VirtAddr)(va) >> HV_LOG2_PAGE_SIZE_SMALL) & (HV_L2_ENTRIES - 1)) 2448 (((HV_VirtAddr)(va) >> log2_page_size_small) & \
2449 (_HV_L2_ENTRIES(log2_page_size_large, log2_page_size_small) - 1))
2447 2450
2448#endif /* __ASSEMBLER __ */ 2451#endif /* __ASSEMBLER __ */
2449 2452
2450#endif /* _TILE_HV_H */ 2453/** Position of the PFN field within the PTE (subset of the PTFN). */
2454#define _HV_PTE_INDEX_PFN(log2_page_size) \
2455 (HV_PTE_INDEX_PTFN + (log2_page_size - HV_LOG2_PAGE_TABLE_ALIGN))
2456
2457/** Length of the PFN field within the PTE (subset of the PTFN). */
2458#define _HV_PTE_INDEX_PFN_BITS(log2_page_size) \
2459 (HV_PTE_INDEX_PTFN_BITS - (log2_page_size - HV_LOG2_PAGE_TABLE_ALIGN))
2460
2461/** Converts a client physical address to a pfn. */
2462#define _HV_CPA_TO_PFN(p, log2_page_size) ((p) >> log2_page_size)
2463
2464/** Converts a pfn to a client physical address. */
2465#define _HV_PFN_TO_CPA(p, log2_page_size) \
2466 (((HV_PhysAddr)(p)) << log2_page_size)
2467
2468/** Converts a ptfn to a pfn. */
2469#define _HV_PTFN_TO_PFN(p, log2_page_size) \
2470 ((p) >> (log2_page_size - HV_LOG2_PAGE_TABLE_ALIGN))
2471
2472/** Converts a pfn to a ptfn. */
2473#define _HV_PFN_TO_PTFN(p, log2_page_size) \
2474 ((p) << (log2_page_size - HV_LOG2_PAGE_TABLE_ALIGN))
2475
2476#endif /* _HV_HV_H */
diff --git a/arch/tile/kernel/head_32.S b/arch/tile/kernel/head_32.S
index 1a39b7c1c87e..f71bfeeaf1a9 100644
--- a/arch/tile/kernel/head_32.S
+++ b/arch/tile/kernel/head_32.S
@@ -69,7 +69,7 @@ ENTRY(_start)
69 } 69 }
70 { 70 {
71 moveli lr, lo16(1f) 71 moveli lr, lo16(1f)
72 move r5, zero 72 moveli r5, CTX_PAGE_FLAG
73 } 73 }
74 { 74 {
75 auli lr, lr, ha16(1f) 75 auli lr, lr, ha16(1f)
@@ -141,11 +141,11 @@ ENTRY(empty_zero_page)
141 141
142 .macro PTE va, cpa, bits1, no_org=0 142 .macro PTE va, cpa, bits1, no_org=0
143 .ifeq \no_org 143 .ifeq \no_org
144 .org swapper_pg_dir + HV_L1_INDEX(\va) * HV_PTE_SIZE 144 .org swapper_pg_dir + PGD_INDEX(\va) * HV_PTE_SIZE
145 .endif 145 .endif
146 .word HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED | \ 146 .word HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED | \
147 (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE) 147 (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE)
148 .word (\bits1) | (HV_CPA_TO_PFN(\cpa) << (HV_PTE_INDEX_PFN - 32)) 148 .word (\bits1) | (HV_CPA_TO_PTFN(\cpa) << (HV_PTE_INDEX_PTFN - 32))
149 .endm 149 .endm
150 150
151__PAGE_ALIGNED_DATA 151__PAGE_ALIGNED_DATA
@@ -166,7 +166,7 @@ ENTRY(swapper_pg_dir)
166 /* The true text VAs are mapped as VA = PA + MEM_SV_INTRPT */ 166 /* The true text VAs are mapped as VA = PA + MEM_SV_INTRPT */
167 PTE MEM_SV_INTRPT, 0, (1 << (HV_PTE_INDEX_READABLE - 32)) | \ 167 PTE MEM_SV_INTRPT, 0, (1 << (HV_PTE_INDEX_READABLE - 32)) | \
168 (1 << (HV_PTE_INDEX_EXECUTABLE - 32)) 168 (1 << (HV_PTE_INDEX_EXECUTABLE - 32))
169 .org swapper_pg_dir + HV_L1_SIZE 169 .org swapper_pg_dir + PGDIR_SIZE
170 END(swapper_pg_dir) 170 END(swapper_pg_dir)
171 171
172 /* 172 /*
diff --git a/arch/tile/kernel/head_64.S b/arch/tile/kernel/head_64.S
index 6bc3a932fe45..f9a2734f7b82 100644
--- a/arch/tile/kernel/head_64.S
+++ b/arch/tile/kernel/head_64.S
@@ -114,7 +114,7 @@ ENTRY(_start)
114 shl16insli r0, r0, hw0(swapper_pg_dir - PAGE_OFFSET) 114 shl16insli r0, r0, hw0(swapper_pg_dir - PAGE_OFFSET)
115 } 115 }
116 { 116 {
117 move r3, zero 117 moveli r3, CTX_PAGE_FLAG
118 j hv_install_context 118 j hv_install_context
119 } 119 }
1201: 1201:
@@ -210,19 +210,19 @@ ENTRY(empty_zero_page)
210 .macro PTE cpa, bits1 210 .macro PTE cpa, bits1
211 .quad HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED |\ 211 .quad HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED |\
212 HV_PTE_GLOBAL | (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE) |\ 212 HV_PTE_GLOBAL | (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE) |\
213 (\bits1) | (HV_CPA_TO_PFN(\cpa) << HV_PTE_INDEX_PFN) 213 (\bits1) | (HV_CPA_TO_PTFN(\cpa) << HV_PTE_INDEX_PTFN)
214 .endm 214 .endm
215 215
216__PAGE_ALIGNED_DATA 216__PAGE_ALIGNED_DATA
217 .align PAGE_SIZE 217 .align PAGE_SIZE
218ENTRY(swapper_pg_dir) 218ENTRY(swapper_pg_dir)
219 .org swapper_pg_dir + HV_L0_INDEX(PAGE_OFFSET) * HV_PTE_SIZE 219 .org swapper_pg_dir + PGD_INDEX(PAGE_OFFSET) * HV_PTE_SIZE
220.Lsv_data_pmd: 220.Lsv_data_pmd:
221 .quad 0 /* PTE temp_data_pmd - PAGE_OFFSET, 0 */ 221 .quad 0 /* PTE temp_data_pmd - PAGE_OFFSET, 0 */
222 .org swapper_pg_dir + HV_L0_INDEX(MEM_SV_START) * HV_PTE_SIZE 222 .org swapper_pg_dir + PGD_INDEX(MEM_SV_START) * HV_PTE_SIZE
223.Lsv_code_pmd: 223.Lsv_code_pmd:
224 .quad 0 /* PTE temp_code_pmd - PAGE_OFFSET, 0 */ 224 .quad 0 /* PTE temp_code_pmd - PAGE_OFFSET, 0 */
225 .org swapper_pg_dir + HV_L0_SIZE 225 .org swapper_pg_dir + SIZEOF_PGD
226 END(swapper_pg_dir) 226 END(swapper_pg_dir)
227 227
228 .align HV_PAGE_TABLE_ALIGN 228 .align HV_PAGE_TABLE_ALIGN
@@ -233,11 +233,11 @@ ENTRY(temp_data_pmd)
233 * permissions later. 233 * permissions later.
234 */ 234 */
235 .set addr, 0 235 .set addr, 0
236 .rept HV_L1_ENTRIES 236 .rept PTRS_PER_PMD
237 PTE addr, HV_PTE_READABLE | HV_PTE_WRITABLE 237 PTE addr, HV_PTE_READABLE | HV_PTE_WRITABLE
238 .set addr, addr + HV_PAGE_SIZE_LARGE 238 .set addr, addr + HPAGE_SIZE
239 .endr 239 .endr
240 .org temp_data_pmd + HV_L1_SIZE 240 .org temp_data_pmd + SIZEOF_PMD
241 END(temp_data_pmd) 241 END(temp_data_pmd)
242 242
243 .align HV_PAGE_TABLE_ALIGN 243 .align HV_PAGE_TABLE_ALIGN
@@ -248,11 +248,11 @@ ENTRY(temp_code_pmd)
248 * permissions later. 248 * permissions later.
249 */ 249 */
250 .set addr, 0 250 .set addr, 0
251 .rept HV_L1_ENTRIES 251 .rept PTRS_PER_PMD
252 PTE addr, HV_PTE_READABLE | HV_PTE_EXECUTABLE 252 PTE addr, HV_PTE_READABLE | HV_PTE_EXECUTABLE
253 .set addr, addr + HV_PAGE_SIZE_LARGE 253 .set addr, addr + HPAGE_SIZE
254 .endr 254 .endr
255 .org temp_code_pmd + HV_L1_SIZE 255 .org temp_code_pmd + SIZEOF_PMD
256 END(temp_code_pmd) 256 END(temp_code_pmd)
257 257
258 /* 258 /*
diff --git a/arch/tile/kernel/machine_kexec.c b/arch/tile/kernel/machine_kexec.c
index 6255f2eab112..b0fa37c1a521 100644
--- a/arch/tile/kernel/machine_kexec.c
+++ b/arch/tile/kernel/machine_kexec.c
@@ -251,6 +251,7 @@ static void setup_quasi_va_is_pa(void)
251void machine_kexec(struct kimage *image) 251void machine_kexec(struct kimage *image)
252{ 252{
253 void *reboot_code_buffer; 253 void *reboot_code_buffer;
254 pte_t *ptep;
254 void (*rnk)(unsigned long, void *, unsigned long) 255 void (*rnk)(unsigned long, void *, unsigned long)
255 __noreturn; 256 __noreturn;
256 257
@@ -266,8 +267,10 @@ void machine_kexec(struct kimage *image)
266 */ 267 */
267 homecache_change_page_home(image->control_code_page, 0, 268 homecache_change_page_home(image->control_code_page, 0,
268 smp_processor_id()); 269 smp_processor_id());
269 reboot_code_buffer = vmap(&image->control_code_page, 1, 0, 270 reboot_code_buffer = page_address(image->control_code_page);
270 __pgprot(_PAGE_KERNEL | _PAGE_EXECUTABLE)); 271 BUG_ON(reboot_code_buffer == NULL);
272 ptep = virt_to_pte(NULL, (unsigned long)reboot_code_buffer);
273 __set_pte(ptep, pte_mkexec(*ptep));
271 memcpy(reboot_code_buffer, relocate_new_kernel, 274 memcpy(reboot_code_buffer, relocate_new_kernel,
272 relocate_new_kernel_size); 275 relocate_new_kernel_size);
273 __flush_icache_range( 276 __flush_icache_range(
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index bff23f476110..32948e21113a 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -1396,13 +1396,13 @@ void __init setup_per_cpu_areas(void)
1396 for (i = 0; i < size; i += PAGE_SIZE, ++pfn, ++pg) { 1396 for (i = 0; i < size; i += PAGE_SIZE, ++pfn, ++pg) {
1397 1397
1398 /* Update the vmalloc mapping and page home. */ 1398 /* Update the vmalloc mapping and page home. */
1399 pte_t *ptep = 1399 unsigned long addr = (unsigned long)ptr + i;
1400 virt_to_pte(NULL, (unsigned long)ptr + i); 1400 pte_t *ptep = virt_to_pte(NULL, addr);
1401 pte_t pte = *ptep; 1401 pte_t pte = *ptep;
1402 BUG_ON(pfn != pte_pfn(pte)); 1402 BUG_ON(pfn != pte_pfn(pte));
1403 pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3); 1403 pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3);
1404 pte = set_remote_cache_cpu(pte, cpu); 1404 pte = set_remote_cache_cpu(pte, cpu);
1405 set_pte(ptep, pte); 1405 set_pte_at(&init_mm, addr, ptep, pte);
1406 1406
1407 /* Update the lowmem mapping for consistency. */ 1407 /* Update the lowmem mapping for consistency. */
1408 lowmem_va = (unsigned long)pfn_to_kaddr(pfn); 1408 lowmem_va = (unsigned long)pfn_to_kaddr(pfn);
@@ -1415,7 +1415,7 @@ void __init setup_per_cpu_areas(void)
1415 BUG_ON(pte_huge(*ptep)); 1415 BUG_ON(pte_huge(*ptep));
1416 } 1416 }
1417 BUG_ON(pfn != pte_pfn(*ptep)); 1417 BUG_ON(pfn != pte_pfn(*ptep));
1418 set_pte(ptep, pte); 1418 set_pte_at(&init_mm, lowmem_va, ptep, pte);
1419 } 1419 }
1420 } 1420 }
1421 1421
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c
index 91da0f721958..cbc73a8b8fe1 100644
--- a/arch/tile/kernel/smp.c
+++ b/arch/tile/kernel/smp.c
@@ -203,7 +203,7 @@ void __init ipi_init(void)
203 if (hv_get_ipi_pte(tile, KERNEL_PL, &pte) != 0) 203 if (hv_get_ipi_pte(tile, KERNEL_PL, &pte) != 0)
204 panic("Failed to initialize IPI for cpu %d\n", cpu); 204 panic("Failed to initialize IPI for cpu %d\n", cpu);
205 205
206 offset = hv_pte_get_pfn(pte) << PAGE_SHIFT; 206 offset = PFN_PHYS(pte_pfn(pte));
207 ipi_mappings[cpu] = ioremap_prot(offset, PAGE_SIZE, pte); 207 ipi_mappings[cpu] = ioremap_prot(offset, PAGE_SIZE, pte);
208 } 208 }
209#endif 209#endif
diff --git a/arch/tile/lib/memcpy_tile64.c b/arch/tile/lib/memcpy_tile64.c
index b2fe15e01075..3bc4b4e40d93 100644
--- a/arch/tile/lib/memcpy_tile64.c
+++ b/arch/tile/lib/memcpy_tile64.c
@@ -160,7 +160,7 @@ retry_source:
160 break; 160 break;
161 if (get_remote_cache_cpu(src_pte) == smp_processor_id()) 161 if (get_remote_cache_cpu(src_pte) == smp_processor_id())
162 break; 162 break;
163 src_page = pfn_to_page(hv_pte_get_pfn(src_pte)); 163 src_page = pfn_to_page(pte_pfn(src_pte));
164 get_page(src_page); 164 get_page(src_page);
165 if (pte_val(src_pte) != pte_val(*src_ptep)) { 165 if (pte_val(src_pte) != pte_val(*src_ptep)) {
166 put_page(src_page); 166 put_page(src_page);
@@ -168,7 +168,7 @@ retry_source:
168 } 168 }
169 if (pte_huge(src_pte)) { 169 if (pte_huge(src_pte)) {
170 /* Adjust the PTE to correspond to a small page */ 170 /* Adjust the PTE to correspond to a small page */
171 int pfn = hv_pte_get_pfn(src_pte); 171 int pfn = pte_pfn(src_pte);
172 pfn += (((unsigned long)source & (HPAGE_SIZE-1)) 172 pfn += (((unsigned long)source & (HPAGE_SIZE-1))
173 >> PAGE_SHIFT); 173 >> PAGE_SHIFT);
174 src_pte = pfn_pte(pfn, src_pte); 174 src_pte = pfn_pte(pfn, src_pte);
@@ -188,7 +188,7 @@ retry_dest:
188 put_page(src_page); 188 put_page(src_page);
189 break; 189 break;
190 } 190 }
191 dst_page = pfn_to_page(hv_pte_get_pfn(dst_pte)); 191 dst_page = pfn_to_page(pte_pfn(dst_pte));
192 if (dst_page == src_page) { 192 if (dst_page == src_page) {
193 /* 193 /*
194 * Source and dest are on the same page; this 194 * Source and dest are on the same page; this
@@ -206,7 +206,7 @@ retry_dest:
206 } 206 }
207 if (pte_huge(dst_pte)) { 207 if (pte_huge(dst_pte)) {
208 /* Adjust the PTE to correspond to a small page */ 208 /* Adjust the PTE to correspond to a small page */
209 int pfn = hv_pte_get_pfn(dst_pte); 209 int pfn = pte_pfn(dst_pte);
210 pfn += (((unsigned long)dest & (HPAGE_SIZE-1)) 210 pfn += (((unsigned long)dest & (HPAGE_SIZE-1))
211 >> PAGE_SHIFT); 211 >> PAGE_SHIFT);
212 dst_pte = pfn_pte(pfn, dst_pte); 212 dst_pte = pfn_pte(pfn, dst_pte);
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index 1e4633520b35..c04fbfd93fc5 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -82,7 +82,7 @@ static int num_l2_ptes[MAX_NUMNODES];
82 82
83static void init_prealloc_ptes(int node, int pages) 83static void init_prealloc_ptes(int node, int pages)
84{ 84{
85 BUG_ON(pages & (HV_L2_ENTRIES-1)); 85 BUG_ON(pages & (PTRS_PER_PTE - 1));
86 if (pages) { 86 if (pages) {
87 num_l2_ptes[node] = pages; 87 num_l2_ptes[node] = pages;
88 l2_ptes[node] = __alloc_bootmem(pages * sizeof(pte_t), 88 l2_ptes[node] = __alloc_bootmem(pages * sizeof(pte_t),
@@ -131,14 +131,9 @@ static void __init assign_pte(pmd_t *pmd, pte_t *page_table)
131 131
132#ifdef __tilegx__ 132#ifdef __tilegx__
133 133
134#if HV_L1_SIZE != HV_L2_SIZE
135# error Rework assumption that L1 and L2 page tables are same size.
136#endif
137
138/* Since pmd_t arrays and pte_t arrays are the same size, just use casts. */
139static inline pmd_t *alloc_pmd(void) 134static inline pmd_t *alloc_pmd(void)
140{ 135{
141 return (pmd_t *)alloc_pte(); 136 return __alloc_bootmem(L1_KERNEL_PGTABLE_SIZE, HV_PAGE_TABLE_ALIGN, 0);
142} 137}
143 138
144static inline void assign_pmd(pud_t *pud, pmd_t *pmd) 139static inline void assign_pmd(pud_t *pud, pmd_t *pmd)
@@ -811,7 +806,7 @@ void __init paging_init(void)
811 * changing init_mm once we get up and running, and there's no 806 * changing init_mm once we get up and running, and there's no
812 * need for e.g. vmalloc_sync_all(). 807 * need for e.g. vmalloc_sync_all().
813 */ 808 */
814 BUILD_BUG_ON(pgd_index(VMALLOC_START) != pgd_index(VMALLOC_END)); 809 BUILD_BUG_ON(pgd_index(VMALLOC_START) != pgd_index(VMALLOC_END - 1));
815 pud = pud_offset(pgd_base + pgd_index(VMALLOC_START), VMALLOC_START); 810 pud = pud_offset(pgd_base + pgd_index(VMALLOC_START), VMALLOC_START);
816 assign_pmd(pud, alloc_pmd()); 811 assign_pmd(pud, alloc_pmd());
817#endif 812#endif
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
index 2410aa899b3e..3d7074347e6d 100644
--- a/arch/tile/mm/pgtable.c
+++ b/arch/tile/mm/pgtable.c
@@ -289,13 +289,12 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
289 289
290#define L2_USER_PGTABLE_PAGES (1 << L2_USER_PGTABLE_ORDER) 290#define L2_USER_PGTABLE_PAGES (1 << L2_USER_PGTABLE_ORDER)
291 291
292struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) 292struct page *pgtable_alloc_one(struct mm_struct *mm, unsigned long address,
293 int order)
293{ 294{
294 gfp_t flags = GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO; 295 gfp_t flags = GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO;
295 struct page *p; 296 struct page *p;
296#if L2_USER_PGTABLE_ORDER > 0
297 int i; 297 int i;
298#endif
299 298
300#ifdef CONFIG_HIGHPTE 299#ifdef CONFIG_HIGHPTE
301 flags |= __GFP_HIGHMEM; 300 flags |= __GFP_HIGHMEM;
@@ -305,17 +304,15 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
305 if (p == NULL) 304 if (p == NULL)
306 return NULL; 305 return NULL;
307 306
308#if L2_USER_PGTABLE_ORDER > 0
309 /* 307 /*
310 * Make every page have a page_count() of one, not just the first. 308 * Make every page have a page_count() of one, not just the first.
311 * We don't use __GFP_COMP since it doesn't look like it works 309 * We don't use __GFP_COMP since it doesn't look like it works
312 * correctly with tlb_remove_page(). 310 * correctly with tlb_remove_page().
313 */ 311 */
314 for (i = 1; i < L2_USER_PGTABLE_PAGES; ++i) { 312 for (i = 1; i < order; ++i) {
315 init_page_count(p+i); 313 init_page_count(p+i);
316 inc_zone_page_state(p+i, NR_PAGETABLE); 314 inc_zone_page_state(p+i, NR_PAGETABLE);
317 } 315 }
318#endif
319 316
320 pgtable_page_ctor(p); 317 pgtable_page_ctor(p);
321 return p; 318 return p;
@@ -326,28 +323,28 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
326 * process). We have to correct whatever pte_alloc_one() did before 323 * process). We have to correct whatever pte_alloc_one() did before
327 * returning the pages to the allocator. 324 * returning the pages to the allocator.
328 */ 325 */
329void pte_free(struct mm_struct *mm, struct page *p) 326void pgtable_free(struct mm_struct *mm, struct page *p, int order)
330{ 327{
331 int i; 328 int i;
332 329
333 pgtable_page_dtor(p); 330 pgtable_page_dtor(p);
334 __free_page(p); 331 __free_page(p);
335 332
336 for (i = 1; i < L2_USER_PGTABLE_PAGES; ++i) { 333 for (i = 1; i < order; ++i) {
337 __free_page(p+i); 334 __free_page(p+i);
338 dec_zone_page_state(p+i, NR_PAGETABLE); 335 dec_zone_page_state(p+i, NR_PAGETABLE);
339 } 336 }
340} 337}
341 338
342void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte, 339void __pgtable_free_tlb(struct mmu_gather *tlb, struct page *pte,
343 unsigned long address) 340 unsigned long address, int order)
344{ 341{
345 int i; 342 int i;
346 343
347 pgtable_page_dtor(pte); 344 pgtable_page_dtor(pte);
348 tlb_remove_page(tlb, pte); 345 tlb_remove_page(tlb, pte);
349 346
350 for (i = 1; i < L2_USER_PGTABLE_PAGES; ++i) { 347 for (i = 1; i < order; ++i) {
351 tlb_remove_page(tlb, pte + i); 348 tlb_remove_page(tlb, pte + i);
352 dec_zone_page_state(pte + i, NR_PAGETABLE); 349 dec_zone_page_state(pte + i, NR_PAGETABLE);
353 } 350 }
@@ -490,7 +487,7 @@ void set_pte(pte_t *ptep, pte_t pte)
490/* Can this mm load a PTE with cached_priority set? */ 487/* Can this mm load a PTE with cached_priority set? */
491static inline int mm_is_priority_cached(struct mm_struct *mm) 488static inline int mm_is_priority_cached(struct mm_struct *mm)
492{ 489{
493 return mm->context.priority_cached; 490 return mm->context.priority_cached != 0;
494} 491}
495 492
496/* 493/*
@@ -500,8 +497,8 @@ static inline int mm_is_priority_cached(struct mm_struct *mm)
500void start_mm_caching(struct mm_struct *mm) 497void start_mm_caching(struct mm_struct *mm)
501{ 498{
502 if (!mm_is_priority_cached(mm)) { 499 if (!mm_is_priority_cached(mm)) {
503 mm->context.priority_cached = -1U; 500 mm->context.priority_cached = -1UL;
504 hv_set_caching(-1U); 501 hv_set_caching(-1UL);
505 } 502 }
506} 503}
507 504
@@ -516,7 +513,7 @@ void start_mm_caching(struct mm_struct *mm)
516 * Presumably we'll come back later and have more luck and clear 513 * Presumably we'll come back later and have more luck and clear
517 * the value then; for now we'll just keep the cache marked for priority. 514 * the value then; for now we'll just keep the cache marked for priority.
518 */ 515 */
519static unsigned int update_priority_cached(struct mm_struct *mm) 516static unsigned long update_priority_cached(struct mm_struct *mm)
520{ 517{
521 if (mm->context.priority_cached && down_write_trylock(&mm->mmap_sem)) { 518 if (mm->context.priority_cached && down_write_trylock(&mm->mmap_sem)) {
522 struct vm_area_struct *vm; 519 struct vm_area_struct *vm;