diff options
Diffstat (limited to 'arch/parisc/mm/hugetlbpage.c')
-rw-r--r-- | arch/parisc/mm/hugetlbpage.c | 161 |
1 files changed, 161 insertions, 0 deletions
diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c new file mode 100644 index 000000000000..f6fdc77a72bd --- /dev/null +++ b/arch/parisc/mm/hugetlbpage.c | |||
@@ -0,0 +1,161 @@ | |||
1 | /* | ||
2 | * PARISC64 Huge TLB page support. | ||
3 | * | ||
4 | * This parisc implementation is heavily based on the SPARC and x86 code. | ||
5 | * | ||
6 | * Copyright (C) 2015 Helge Deller <deller@gmx.de> | ||
7 | */ | ||
8 | |||
9 | #include <linux/fs.h> | ||
10 | #include <linux/mm.h> | ||
11 | #include <linux/hugetlb.h> | ||
12 | #include <linux/pagemap.h> | ||
13 | #include <linux/sysctl.h> | ||
14 | |||
15 | #include <asm/mman.h> | ||
16 | #include <asm/pgalloc.h> | ||
17 | #include <asm/tlb.h> | ||
18 | #include <asm/tlbflush.h> | ||
19 | #include <asm/cacheflush.h> | ||
20 | #include <asm/mmu_context.h> | ||
21 | |||
22 | |||
23 | unsigned long | ||
24 | hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | ||
25 | unsigned long len, unsigned long pgoff, unsigned long flags) | ||
26 | { | ||
27 | struct hstate *h = hstate_file(file); | ||
28 | |||
29 | if (len & ~huge_page_mask(h)) | ||
30 | return -EINVAL; | ||
31 | if (len > TASK_SIZE) | ||
32 | return -ENOMEM; | ||
33 | |||
34 | if (flags & MAP_FIXED) | ||
35 | if (prepare_hugepage_range(file, addr, len)) | ||
36 | return -EINVAL; | ||
37 | |||
38 | if (addr) | ||
39 | addr = ALIGN(addr, huge_page_size(h)); | ||
40 | |||
41 | /* we need to make sure the colouring is OK */ | ||
42 | return arch_get_unmapped_area(file, addr, len, pgoff, flags); | ||
43 | } | ||
44 | |||
45 | |||
46 | pte_t *huge_pte_alloc(struct mm_struct *mm, | ||
47 | unsigned long addr, unsigned long sz) | ||
48 | { | ||
49 | pgd_t *pgd; | ||
50 | pud_t *pud; | ||
51 | pmd_t *pmd; | ||
52 | pte_t *pte = NULL; | ||
53 | |||
54 | /* We must align the address, because our caller will run | ||
55 | * set_huge_pte_at() on whatever we return, which writes out | ||
56 | * all of the sub-ptes for the hugepage range. So we have | ||
57 | * to give it the first such sub-pte. | ||
58 | */ | ||
59 | addr &= HPAGE_MASK; | ||
60 | |||
61 | pgd = pgd_offset(mm, addr); | ||
62 | pud = pud_alloc(mm, pgd, addr); | ||
63 | if (pud) { | ||
64 | pmd = pmd_alloc(mm, pud, addr); | ||
65 | if (pmd) | ||
66 | pte = pte_alloc_map(mm, NULL, pmd, addr); | ||
67 | } | ||
68 | return pte; | ||
69 | } | ||
70 | |||
71 | pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) | ||
72 | { | ||
73 | pgd_t *pgd; | ||
74 | pud_t *pud; | ||
75 | pmd_t *pmd; | ||
76 | pte_t *pte = NULL; | ||
77 | |||
78 | addr &= HPAGE_MASK; | ||
79 | |||
80 | pgd = pgd_offset(mm, addr); | ||
81 | if (!pgd_none(*pgd)) { | ||
82 | pud = pud_offset(pgd, addr); | ||
83 | if (!pud_none(*pud)) { | ||
84 | pmd = pmd_offset(pud, addr); | ||
85 | if (!pmd_none(*pmd)) | ||
86 | pte = pte_offset_map(pmd, addr); | ||
87 | } | ||
88 | } | ||
89 | return pte; | ||
90 | } | ||
91 | |||
92 | /* Purge data and instruction TLB entries. Must be called holding | ||
93 | * the pa_tlb_lock. The TLB purge instructions are slow on SMP | ||
94 | * machines since the purge must be broadcast to all CPUs. | ||
95 | */ | ||
96 | static inline void purge_tlb_entries_huge(struct mm_struct *mm, unsigned long addr) | ||
97 | { | ||
98 | int i; | ||
99 | |||
100 | /* We may use multiple physical huge pages (e.g. 2x1 MB) to emulate | ||
101 | * Linux standard huge pages (e.g. 2 MB) */ | ||
102 | BUILD_BUG_ON(REAL_HPAGE_SHIFT > HPAGE_SHIFT); | ||
103 | |||
104 | addr &= HPAGE_MASK; | ||
105 | addr |= _HUGE_PAGE_SIZE_ENCODING_DEFAULT; | ||
106 | |||
107 | for (i = 0; i < (1 << (HPAGE_SHIFT-REAL_HPAGE_SHIFT)); i++) { | ||
108 | mtsp(mm->context, 1); | ||
109 | pdtlb(addr); | ||
110 | if (unlikely(split_tlb)) | ||
111 | pitlb(addr); | ||
112 | addr += (1UL << REAL_HPAGE_SHIFT); | ||
113 | } | ||
114 | } | ||
115 | |||
116 | void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, | ||
117 | pte_t *ptep, pte_t entry) | ||
118 | { | ||
119 | unsigned long addr_start; | ||
120 | int i; | ||
121 | |||
122 | addr &= HPAGE_MASK; | ||
123 | addr_start = addr; | ||
124 | |||
125 | for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { | ||
126 | /* Directly write pte entry. We could call set_pte_at(mm, addr, ptep, entry) | ||
127 | * instead, but then we get double locking on pa_tlb_lock. */ | ||
128 | *ptep = entry; | ||
129 | ptep++; | ||
130 | |||
131 | /* Drop the PAGE_SIZE/non-huge tlb entry */ | ||
132 | purge_tlb_entries(mm, addr); | ||
133 | |||
134 | addr += PAGE_SIZE; | ||
135 | pte_val(entry) += PAGE_SIZE; | ||
136 | } | ||
137 | |||
138 | purge_tlb_entries_huge(mm, addr_start); | ||
139 | } | ||
140 | |||
141 | |||
142 | pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, | ||
143 | pte_t *ptep) | ||
144 | { | ||
145 | pte_t entry; | ||
146 | |||
147 | entry = *ptep; | ||
148 | set_huge_pte_at(mm, addr, ptep, __pte(0)); | ||
149 | |||
150 | return entry; | ||
151 | } | ||
152 | |||
153 | int pmd_huge(pmd_t pmd) | ||
154 | { | ||
155 | return 0; | ||
156 | } | ||
157 | |||
158 | int pud_huge(pud_t pud) | ||
159 | { | ||
160 | return 0; | ||
161 | } | ||