aboutsummaryrefslogtreecommitdiffstats
path: root/arch/parisc/include/asm
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2015-11-24 13:34:40 -0500
committerPaolo Bonzini <pbonzini@redhat.com>2015-11-24 13:34:40 -0500
commit8bd142c01648cdb33e9bcafa0448ba2c20ed814c (patch)
tree9197c60d3f9d4036f38f281a183e94750ceea1d7 /arch/parisc/include/asm
parentd792abacaf1a1a8dfea353fab699b97fa6251c2a (diff)
parentfbb4574ce9a37e15a9872860bf202f2be5bdf6c4 (diff)
Merge tag 'kvm-arm-for-v4.4-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into kvm-master
KVM/ARM Fixes for v4.4-rc3. Includes some timer fixes, properly unmapping PTEs, an errata fix, and two tweaks to the EL2 panic code.
Diffstat (limited to 'arch/parisc/include/asm')
-rw-r--r--arch/parisc/include/asm/hugetlb.h85
-rw-r--r--arch/parisc/include/asm/page.h13
-rw-r--r--arch/parisc/include/asm/pgalloc.h2
-rw-r--r--arch/parisc/include/asm/pgtable.h26
-rw-r--r--arch/parisc/include/asm/processor.h27
5 files changed, 120 insertions, 33 deletions
diff --git a/arch/parisc/include/asm/hugetlb.h b/arch/parisc/include/asm/hugetlb.h
new file mode 100644
index 000000000000..7d56a9ccb752
--- /dev/null
+++ b/arch/parisc/include/asm/hugetlb.h
@@ -0,0 +1,85 @@
1#ifndef _ASM_PARISC64_HUGETLB_H
2#define _ASM_PARISC64_HUGETLB_H
3
4#include <asm/page.h>
5#include <asm-generic/hugetlb.h>
6
7
8void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
9 pte_t *ptep, pte_t pte);
10
11pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
12 pte_t *ptep);
13
14static inline int is_hugepage_only_range(struct mm_struct *mm,
15 unsigned long addr,
16 unsigned long len) {
17 return 0;
18}
19
20/*
21 * If the arch doesn't supply something else, assume that hugepage
22 * size aligned regions are ok without further preparation.
23 */
24static inline int prepare_hugepage_range(struct file *file,
25 unsigned long addr, unsigned long len)
26{
27 if (len & ~HPAGE_MASK)
28 return -EINVAL;
29 if (addr & ~HPAGE_MASK)
30 return -EINVAL;
31 return 0;
32}
33
34static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
35 unsigned long addr, unsigned long end,
36 unsigned long floor,
37 unsigned long ceiling)
38{
39 free_pgd_range(tlb, addr, end, floor, ceiling);
40}
41
42static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
43 unsigned long addr, pte_t *ptep)
44{
45}
46
47static inline int huge_pte_none(pte_t pte)
48{
49 return pte_none(pte);
50}
51
52static inline pte_t huge_pte_wrprotect(pte_t pte)
53{
54 return pte_wrprotect(pte);
55}
56
57static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
58 unsigned long addr, pte_t *ptep)
59{
60 pte_t old_pte = *ptep;
61 set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
62}
63
64static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
65 unsigned long addr, pte_t *ptep,
66 pte_t pte, int dirty)
67{
68 int changed = !pte_same(*ptep, pte);
69 if (changed) {
70 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
71 flush_tlb_page(vma, addr);
72 }
73 return changed;
74}
75
76static inline pte_t huge_ptep_get(pte_t *ptep)
77{
78 return *ptep;
79}
80
81static inline void arch_clear_hugepage_flags(struct page *page)
82{
83}
84
85#endif /* _ASM_PARISC64_HUGETLB_H */
diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h
index 60d5d174dfe4..80e742a1c162 100644
--- a/arch/parisc/include/asm/page.h
+++ b/arch/parisc/include/asm/page.h
@@ -145,11 +145,22 @@ extern int npmem_ranges;
145#endif /* CONFIG_DISCONTIGMEM */ 145#endif /* CONFIG_DISCONTIGMEM */
146 146
147#ifdef CONFIG_HUGETLB_PAGE 147#ifdef CONFIG_HUGETLB_PAGE
148#define HPAGE_SHIFT 22 /* 4MB (is this fixed?) */ 148#define HPAGE_SHIFT PMD_SHIFT /* fixed for transparent huge pages */
149#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) 149#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
150#define HPAGE_MASK (~(HPAGE_SIZE - 1)) 150#define HPAGE_MASK (~(HPAGE_SIZE - 1))
151#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 151#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
152
153#if defined(CONFIG_64BIT) && defined(CONFIG_PARISC_PAGE_SIZE_4KB)
154# define REAL_HPAGE_SHIFT 20 /* 20 = 1MB */
155# define _HUGE_PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_1M
156#elif !defined(CONFIG_64BIT) && defined(CONFIG_PARISC_PAGE_SIZE_4KB)
157# define REAL_HPAGE_SHIFT 22 /* 22 = 4MB */
158# define _HUGE_PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_4M
159#else
160# define REAL_HPAGE_SHIFT 24 /* 24 = 16MB */
161# define _HUGE_PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_16M
152#endif 162#endif
163#endif /* CONFIG_HUGETLB_PAGE */
153 164
154#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 165#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
155 166
diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
index 3edbb9fc91b4..f2fd327dce2e 100644
--- a/arch/parisc/include/asm/pgalloc.h
+++ b/arch/parisc/include/asm/pgalloc.h
@@ -35,7 +35,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
35 PxD_FLAG_VALID | 35 PxD_FLAG_VALID |
36 PxD_FLAG_ATTACHED) 36 PxD_FLAG_ATTACHED)
37 + (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT)); 37 + (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT));
38 /* The first pmd entry also is marked with _PAGE_GATEWAY as 38 /* The first pmd entry also is marked with PxD_FLAG_ATTACHED as
39 * a signal that this pmd may not be freed */ 39 * a signal that this pmd may not be freed */
40 __pgd_val_set(*pgd, PxD_FLAG_ATTACHED); 40 __pgd_val_set(*pgd, PxD_FLAG_ATTACHED);
41#endif 41#endif
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index f93c4a4e6580..d8534f95915a 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -83,7 +83,11 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
83 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e)) 83 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e))
84 84
85/* This is the size of the initially mapped kernel memory */ 85/* This is the size of the initially mapped kernel memory */
86#define KERNEL_INITIAL_ORDER 24 /* 0 to 1<<24 = 16MB */ 86#ifdef CONFIG_64BIT
87#define KERNEL_INITIAL_ORDER 25 /* 1<<25 = 32MB */
88#else
89#define KERNEL_INITIAL_ORDER 24 /* 1<<24 = 16MB */
90#endif
87#define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER) 91#define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER)
88 92
89#if CONFIG_PGTABLE_LEVELS == 3 93#if CONFIG_PGTABLE_LEVELS == 3
@@ -167,7 +171,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
167#define _PAGE_NO_CACHE_BIT 24 /* (0x080) Uncached Page (U bit) */ 171#define _PAGE_NO_CACHE_BIT 24 /* (0x080) Uncached Page (U bit) */
168#define _PAGE_ACCESSED_BIT 23 /* (0x100) Software: Page Accessed */ 172#define _PAGE_ACCESSED_BIT 23 /* (0x100) Software: Page Accessed */
169#define _PAGE_PRESENT_BIT 22 /* (0x200) Software: translation valid */ 173#define _PAGE_PRESENT_BIT 22 /* (0x200) Software: translation valid */
170/* bit 21 was formerly the FLUSH bit but is now unused */ 174#define _PAGE_HPAGE_BIT 21 /* (0x400) Software: Huge Page */
171#define _PAGE_USER_BIT 20 /* (0x800) Software: User accessible page */ 175#define _PAGE_USER_BIT 20 /* (0x800) Software: User accessible page */
172 176
173/* N.B. The bits are defined in terms of a 32 bit word above, so the */ 177/* N.B. The bits are defined in terms of a 32 bit word above, so the */
@@ -194,6 +198,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
194#define _PAGE_NO_CACHE (1 << xlate_pabit(_PAGE_NO_CACHE_BIT)) 198#define _PAGE_NO_CACHE (1 << xlate_pabit(_PAGE_NO_CACHE_BIT))
195#define _PAGE_ACCESSED (1 << xlate_pabit(_PAGE_ACCESSED_BIT)) 199#define _PAGE_ACCESSED (1 << xlate_pabit(_PAGE_ACCESSED_BIT))
196#define _PAGE_PRESENT (1 << xlate_pabit(_PAGE_PRESENT_BIT)) 200#define _PAGE_PRESENT (1 << xlate_pabit(_PAGE_PRESENT_BIT))
201#define _PAGE_HUGE (1 << xlate_pabit(_PAGE_HPAGE_BIT))
197#define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT)) 202#define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT))
198 203
199#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED) 204#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)
@@ -217,7 +222,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
217#define PxD_FLAG_VALID (1 << xlate_pabit(_PxD_VALID_BIT)) 222#define PxD_FLAG_VALID (1 << xlate_pabit(_PxD_VALID_BIT))
218#define PxD_FLAG_MASK (0xf) 223#define PxD_FLAG_MASK (0xf)
219#define PxD_FLAG_SHIFT (4) 224#define PxD_FLAG_SHIFT (4)
220#define PxD_VALUE_SHIFT (8) /* (PAGE_SHIFT-PxD_FLAG_SHIFT) */ 225#define PxD_VALUE_SHIFT (PFN_PTE_SHIFT-PxD_FLAG_SHIFT)
221 226
222#ifndef __ASSEMBLY__ 227#ifndef __ASSEMBLY__
223 228
@@ -363,6 +368,18 @@ static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return
363static inline pte_t pte_mkspecial(pte_t pte) { return pte; } 368static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
364 369
365/* 370/*
371 * Huge pte definitions.
372 */
373#ifdef CONFIG_HUGETLB_PAGE
374#define pte_huge(pte) (pte_val(pte) & _PAGE_HUGE)
375#define pte_mkhuge(pte) (__pte(pte_val(pte) | _PAGE_HUGE))
376#else
377#define pte_huge(pte) (0)
378#define pte_mkhuge(pte) (pte)
379#endif
380
381
382/*
366 * Conversion functions: convert a page and protection to a page entry, 383 * Conversion functions: convert a page and protection to a page entry,
367 * and a page entry and page directory to the page they refer to. 384 * and a page entry and page directory to the page they refer to.
368 */ 385 */
@@ -410,8 +427,9 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
410/* Find an entry in the second-level page table.. */ 427/* Find an entry in the second-level page table.. */
411 428
412#if CONFIG_PGTABLE_LEVELS == 3 429#if CONFIG_PGTABLE_LEVELS == 3
430#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
413#define pmd_offset(dir,address) \ 431#define pmd_offset(dir,address) \
414((pmd_t *) pgd_page_vaddr(*(dir)) + (((address)>>PMD_SHIFT) & (PTRS_PER_PMD-1))) 432((pmd_t *) pgd_page_vaddr(*(dir)) + pmd_index(address))
415#else 433#else
416#define pmd_offset(dir,addr) ((pmd_t *) dir) 434#define pmd_offset(dir,addr) ((pmd_t *) dir)
417#endif 435#endif
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
index 54adb60c0a42..7e759ecb1343 100644
--- a/arch/parisc/include/asm/processor.h
+++ b/arch/parisc/include/asm/processor.h
@@ -192,33 +192,6 @@ void show_trace(struct task_struct *task, unsigned long *stack);
192 */ 192 */
193typedef unsigned int elf_caddr_t; 193typedef unsigned int elf_caddr_t;
194 194
195#define start_thread_som(regs, new_pc, new_sp) do { \
196 unsigned long *sp = (unsigned long *)new_sp; \
197 __u32 spaceid = (__u32)current->mm->context; \
198 unsigned long pc = (unsigned long)new_pc; \
199 /* offset pc for priv. level */ \
200 pc |= 3; \
201 \
202 regs->iasq[0] = spaceid; \
203 regs->iasq[1] = spaceid; \
204 regs->iaoq[0] = pc; \
205 regs->iaoq[1] = pc + 4; \
206 regs->sr[2] = LINUX_GATEWAY_SPACE; \
207 regs->sr[3] = 0xffff; \
208 regs->sr[4] = spaceid; \
209 regs->sr[5] = spaceid; \
210 regs->sr[6] = spaceid; \
211 regs->sr[7] = spaceid; \
212 regs->gr[ 0] = USER_PSW; \
213 regs->gr[30] = ((new_sp)+63)&~63; \
214 regs->gr[31] = pc; \
215 \
216 get_user(regs->gr[26],&sp[0]); \
217 get_user(regs->gr[25],&sp[-1]); \
218 get_user(regs->gr[24],&sp[-2]); \
219 get_user(regs->gr[23],&sp[-3]); \
220} while(0)
221
222/* The ELF abi wants things done a "wee bit" differently than 195/* The ELF abi wants things done a "wee bit" differently than
223 * som does. Supporting this behavior here avoids 196 * som does. Supporting this behavior here avoids
224 * having our own version of create_elf_tables. 197 * having our own version of create_elf_tables.