diff options
Diffstat (limited to 'include/asm-s390')
| -rw-r--r-- | include/asm-s390/cpu.h | 25 | ||||
| -rw-r--r-- | include/asm-s390/mmu_context.h | 50 | ||||
| -rw-r--r-- | include/asm-s390/page.h | 4 | ||||
| -rw-r--r-- | include/asm-s390/pgalloc.h | 250 | ||||
| -rw-r--r-- | include/asm-s390/pgtable.h | 429 | ||||
| -rw-r--r-- | include/asm-s390/processor.h | 20 | ||||
| -rw-r--r-- | include/asm-s390/scatterlist.h | 5 | ||||
| -rw-r--r-- | include/asm-s390/tlb.h | 129 | ||||
| -rw-r--r-- | include/asm-s390/tlbflush.h | 152 |
9 files changed, 545 insertions, 519 deletions
diff --git a/include/asm-s390/cpu.h b/include/asm-s390/cpu.h new file mode 100644 index 000000000000..352dde194f3c --- /dev/null +++ b/include/asm-s390/cpu.h | |||
| @@ -0,0 +1,25 @@ | |||
| 1 | /* | ||
| 2 | * include/asm-s390/cpu.h | ||
| 3 | * | ||
| 4 | * Copyright IBM Corp. 2007 | ||
| 5 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> | ||
| 6 | */ | ||
| 7 | |||
| 8 | #ifndef _ASM_S390_CPU_H_ | ||
| 9 | #define _ASM_S390_CPU_H_ | ||
| 10 | |||
| 11 | #include <linux/types.h> | ||
| 12 | #include <linux/percpu.h> | ||
| 13 | #include <linux/spinlock.h> | ||
| 14 | |||
| 15 | struct s390_idle_data { | ||
| 16 | spinlock_t lock; | ||
| 17 | unsigned int in_idle; | ||
| 18 | unsigned long long idle_count; | ||
| 19 | unsigned long long idle_enter; | ||
| 20 | unsigned long long idle_time; | ||
| 21 | }; | ||
| 22 | |||
| 23 | DECLARE_PER_CPU(struct s390_idle_data, s390_idle); | ||
| 24 | |||
| 25 | #endif /* _ASM_S390_CPU_H_ */ | ||
diff --git a/include/asm-s390/mmu_context.h b/include/asm-s390/mmu_context.h index 501cb9b06314..05b842126b99 100644 --- a/include/asm-s390/mmu_context.h +++ b/include/asm-s390/mmu_context.h | |||
| @@ -21,45 +21,43 @@ | |||
| 21 | 21 | ||
| 22 | #ifndef __s390x__ | 22 | #ifndef __s390x__ |
| 23 | #define LCTL_OPCODE "lctl" | 23 | #define LCTL_OPCODE "lctl" |
| 24 | #define PGTABLE_BITS (_SEGMENT_TABLE|USER_STD_MASK) | ||
| 25 | #else | 24 | #else |
| 26 | #define LCTL_OPCODE "lctlg" | 25 | #define LCTL_OPCODE "lctlg" |
| 27 | #define PGTABLE_BITS (_REGION_TABLE|USER_STD_MASK) | ||
| 28 | #endif | 26 | #endif |
| 29 | 27 | ||
| 30 | static inline void enter_lazy_tlb(struct mm_struct *mm, | 28 | static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk) |
| 31 | struct task_struct *tsk) | ||
| 32 | { | 29 | { |
| 30 | pgd_t *pgd = mm->pgd; | ||
| 31 | unsigned long asce_bits; | ||
| 32 | |||
| 33 | /* Calculate asce bits from the first pgd table entry. */ | ||
| 34 | asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS; | ||
| 35 | #ifdef CONFIG_64BIT | ||
| 36 | asce_bits |= _ASCE_TYPE_REGION3; | ||
| 37 | #endif | ||
| 38 | S390_lowcore.user_asce = asce_bits | __pa(pgd); | ||
| 39 | if (switch_amode) { | ||
| 40 | /* Load primary space page table origin. */ | ||
| 41 | pgd_t *shadow_pgd = get_shadow_table(pgd) ? : pgd; | ||
| 42 | S390_lowcore.user_exec_asce = asce_bits | __pa(shadow_pgd); | ||
| 43 | asm volatile(LCTL_OPCODE" 1,1,%0\n" | ||
| 44 | : : "m" (S390_lowcore.user_exec_asce) ); | ||
| 45 | } else | ||
| 46 | /* Load home space page table origin. */ | ||
| 47 | asm volatile(LCTL_OPCODE" 13,13,%0" | ||
| 48 | : : "m" (S390_lowcore.user_asce) ); | ||
| 33 | } | 49 | } |
| 34 | 50 | ||
| 35 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | 51 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
| 36 | struct task_struct *tsk) | 52 | struct task_struct *tsk) |
| 37 | { | 53 | { |
| 38 | pgd_t *shadow_pgd = get_shadow_pgd(next->pgd); | 54 | if (unlikely(prev == next)) |
| 39 | 55 | return; | |
| 40 | if (prev != next) { | ||
| 41 | S390_lowcore.user_asce = (__pa(next->pgd) & PAGE_MASK) | | ||
| 42 | PGTABLE_BITS; | ||
| 43 | if (shadow_pgd) { | ||
| 44 | /* Load primary/secondary space page table origin. */ | ||
| 45 | S390_lowcore.user_exec_asce = | ||
| 46 | (__pa(shadow_pgd) & PAGE_MASK) | PGTABLE_BITS; | ||
| 47 | asm volatile(LCTL_OPCODE" 1,1,%0\n" | ||
| 48 | LCTL_OPCODE" 7,7,%1" | ||
| 49 | : : "m" (S390_lowcore.user_exec_asce), | ||
| 50 | "m" (S390_lowcore.user_asce) ); | ||
| 51 | } else if (switch_amode) { | ||
| 52 | /* Load primary space page table origin. */ | ||
| 53 | asm volatile(LCTL_OPCODE" 1,1,%0" | ||
| 54 | : : "m" (S390_lowcore.user_asce) ); | ||
| 55 | } else | ||
| 56 | /* Load home space page table origin. */ | ||
| 57 | asm volatile(LCTL_OPCODE" 13,13,%0" | ||
| 58 | : : "m" (S390_lowcore.user_asce) ); | ||
| 59 | } | ||
| 60 | cpu_set(smp_processor_id(), next->cpu_vm_mask); | 56 | cpu_set(smp_processor_id(), next->cpu_vm_mask); |
| 57 | update_mm(next, tsk); | ||
| 61 | } | 58 | } |
| 62 | 59 | ||
| 60 | #define enter_lazy_tlb(mm,tsk) do { } while (0) | ||
| 63 | #define deactivate_mm(tsk,mm) do { } while (0) | 61 | #define deactivate_mm(tsk,mm) do { } while (0) |
| 64 | 62 | ||
| 65 | static inline void activate_mm(struct mm_struct *prev, | 63 | static inline void activate_mm(struct mm_struct *prev, |
diff --git a/include/asm-s390/page.h b/include/asm-s390/page.h index ceec3826a67c..584d0ee3c7f6 100644 --- a/include/asm-s390/page.h +++ b/include/asm-s390/page.h | |||
| @@ -82,6 +82,7 @@ typedef struct { unsigned long pte; } pte_t; | |||
| 82 | #ifndef __s390x__ | 82 | #ifndef __s390x__ |
| 83 | 83 | ||
| 84 | typedef struct { unsigned long pmd; } pmd_t; | 84 | typedef struct { unsigned long pmd; } pmd_t; |
| 85 | typedef struct { unsigned long pud; } pud_t; | ||
| 85 | typedef struct { | 86 | typedef struct { |
| 86 | unsigned long pgd0; | 87 | unsigned long pgd0; |
| 87 | unsigned long pgd1; | 88 | unsigned long pgd1; |
| @@ -90,6 +91,7 @@ typedef struct { | |||
| 90 | } pgd_t; | 91 | } pgd_t; |
| 91 | 92 | ||
| 92 | #define pmd_val(x) ((x).pmd) | 93 | #define pmd_val(x) ((x).pmd) |
| 94 | #define pud_val(x) ((x).pud) | ||
| 93 | #define pgd_val(x) ((x).pgd0) | 95 | #define pgd_val(x) ((x).pgd0) |
| 94 | 96 | ||
| 95 | #else /* __s390x__ */ | 97 | #else /* __s390x__ */ |
| @@ -98,10 +100,12 @@ typedef struct { | |||
| 98 | unsigned long pmd0; | 100 | unsigned long pmd0; |
| 99 | unsigned long pmd1; | 101 | unsigned long pmd1; |
| 100 | } pmd_t; | 102 | } pmd_t; |
| 103 | typedef struct { unsigned long pud; } pud_t; | ||
| 101 | typedef struct { unsigned long pgd; } pgd_t; | 104 | typedef struct { unsigned long pgd; } pgd_t; |
| 102 | 105 | ||
| 103 | #define pmd_val(x) ((x).pmd0) | 106 | #define pmd_val(x) ((x).pmd0) |
| 104 | #define pmd_val1(x) ((x).pmd1) | 107 | #define pmd_val1(x) ((x).pmd1) |
| 108 | #define pud_val(x) ((x).pud) | ||
| 105 | #define pgd_val(x) ((x).pgd) | 109 | #define pgd_val(x) ((x).pgd) |
| 106 | 110 | ||
| 107 | #endif /* __s390x__ */ | 111 | #endif /* __s390x__ */ |
diff --git a/include/asm-s390/pgalloc.h b/include/asm-s390/pgalloc.h index e45d3c9a4b7e..709dd1740956 100644 --- a/include/asm-s390/pgalloc.h +++ b/include/asm-s390/pgalloc.h | |||
| @@ -19,140 +19,115 @@ | |||
| 19 | 19 | ||
| 20 | #define check_pgt_cache() do {} while (0) | 20 | #define check_pgt_cache() do {} while (0) |
| 21 | 21 | ||
| 22 | /* | 22 | unsigned long *crst_table_alloc(struct mm_struct *, int); |
| 23 | * Page allocation orders. | 23 | void crst_table_free(unsigned long *); |
| 24 | */ | ||
| 25 | #ifndef __s390x__ | ||
| 26 | # define PTE_ALLOC_ORDER 0 | ||
| 27 | # define PMD_ALLOC_ORDER 0 | ||
| 28 | # define PGD_ALLOC_ORDER 1 | ||
| 29 | #else /* __s390x__ */ | ||
| 30 | # define PTE_ALLOC_ORDER 0 | ||
| 31 | # define PMD_ALLOC_ORDER 2 | ||
| 32 | # define PGD_ALLOC_ORDER 2 | ||
| 33 | #endif /* __s390x__ */ | ||
| 34 | 24 | ||
| 35 | /* | 25 | unsigned long *page_table_alloc(int); |
| 36 | * Allocate and free page tables. The xxx_kernel() versions are | 26 | void page_table_free(unsigned long *); |
| 37 | * used to allocate a kernel page table - this turns on ASN bits | ||
| 38 | * if any. | ||
| 39 | */ | ||
| 40 | 27 | ||
| 41 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) | 28 | static inline void clear_table(unsigned long *s, unsigned long val, size_t n) |
| 42 | { | 29 | { |
| 43 | pgd_t *pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ALLOC_ORDER); | 30 | *s = val; |
| 44 | int i; | 31 | n = (n / 256) - 1; |
| 45 | 32 | asm volatile( | |
| 46 | if (!pgd) | 33 | #ifdef CONFIG_64BIT |
| 47 | return NULL; | 34 | " mvc 8(248,%0),0(%0)\n" |
| 48 | if (s390_noexec) { | ||
| 49 | pgd_t *shadow_pgd = (pgd_t *) | ||
| 50 | __get_free_pages(GFP_KERNEL, PGD_ALLOC_ORDER); | ||
| 51 | struct page *page = virt_to_page(pgd); | ||
| 52 | |||
| 53 | if (!shadow_pgd) { | ||
| 54 | free_pages((unsigned long) pgd, PGD_ALLOC_ORDER); | ||
| 55 | return NULL; | ||
| 56 | } | ||
| 57 | page->lru.next = (void *) shadow_pgd; | ||
| 58 | } | ||
| 59 | for (i = 0; i < PTRS_PER_PGD; i++) | ||
| 60 | #ifndef __s390x__ | ||
| 61 | pmd_clear(pmd_offset(pgd + i, i*PGDIR_SIZE)); | ||
| 62 | #else | 35 | #else |
| 63 | pgd_clear(pgd + i); | 36 | " mvc 4(252,%0),0(%0)\n" |
| 64 | #endif | 37 | #endif |
| 65 | return pgd; | 38 | "0: mvc 256(256,%0),0(%0)\n" |
| 39 | " la %0,256(%0)\n" | ||
| 40 | " brct %1,0b\n" | ||
| 41 | : "+a" (s), "+d" (n)); | ||
| 66 | } | 42 | } |
| 67 | 43 | ||
| 68 | static inline void pgd_free(pgd_t *pgd) | 44 | static inline void crst_table_init(unsigned long *crst, unsigned long entry) |
| 69 | { | 45 | { |
| 70 | pgd_t *shadow_pgd = get_shadow_pgd(pgd); | 46 | clear_table(crst, entry, sizeof(unsigned long)*2048); |
| 71 | 47 | crst = get_shadow_table(crst); | |
| 72 | if (shadow_pgd) | 48 | if (crst) |
| 73 | free_pages((unsigned long) shadow_pgd, PGD_ALLOC_ORDER); | 49 | clear_table(crst, entry, sizeof(unsigned long)*2048); |
| 74 | free_pages((unsigned long) pgd, PGD_ALLOC_ORDER); | ||
| 75 | } | 50 | } |
| 76 | 51 | ||
| 77 | #ifndef __s390x__ | 52 | #ifndef __s390x__ |
| 78 | /* | 53 | |
| 79 | * page middle directory allocation/free routines. | 54 | static inline unsigned long pgd_entry_type(struct mm_struct *mm) |
| 80 | * We use pmd cache only on s390x, so these are dummy routines. This | ||
| 81 | * code never triggers because the pgd will always be present. | ||
| 82 | */ | ||
| 83 | #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) | ||
| 84 | #define pmd_free(x) do { } while (0) | ||
| 85 | #define __pmd_free_tlb(tlb,x) do { } while (0) | ||
| 86 | #define pgd_populate(mm, pmd, pte) BUG() | ||
| 87 | #define pgd_populate_kernel(mm, pmd, pte) BUG() | ||
| 88 | #else /* __s390x__ */ | ||
| 89 | static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) | ||
| 90 | { | 55 | { |
| 91 | pmd_t *pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, PMD_ALLOC_ORDER); | 56 | return _SEGMENT_ENTRY_EMPTY; |
| 92 | int i; | ||
| 93 | |||
| 94 | if (!pmd) | ||
| 95 | return NULL; | ||
| 96 | if (s390_noexec) { | ||
| 97 | pmd_t *shadow_pmd = (pmd_t *) | ||
| 98 | __get_free_pages(GFP_KERNEL, PMD_ALLOC_ORDER); | ||
| 99 | struct page *page = virt_to_page(pmd); | ||
| 100 | |||
| 101 | if (!shadow_pmd) { | ||
| 102 | free_pages((unsigned long) pmd, PMD_ALLOC_ORDER); | ||
| 103 | return NULL; | ||
| 104 | } | ||
| 105 | page->lru.next = (void *) shadow_pmd; | ||
| 106 | } | ||
| 107 | for (i=0; i < PTRS_PER_PMD; i++) | ||
| 108 | pmd_clear(pmd + i); | ||
| 109 | return pmd; | ||
| 110 | } | 57 | } |
| 111 | 58 | ||
| 112 | static inline void pmd_free (pmd_t *pmd) | 59 | #define pud_alloc_one(mm,address) ({ BUG(); ((pud_t *)2); }) |
| 60 | #define pud_free(x) do { } while (0) | ||
| 61 | |||
| 62 | #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) | ||
| 63 | #define pmd_free(x) do { } while (0) | ||
| 64 | |||
| 65 | #define pgd_populate(mm, pgd, pud) BUG() | ||
| 66 | #define pgd_populate_kernel(mm, pgd, pud) BUG() | ||
| 67 | |||
| 68 | #define pud_populate(mm, pud, pmd) BUG() | ||
| 69 | #define pud_populate_kernel(mm, pud, pmd) BUG() | ||
| 70 | |||
| 71 | #else /* __s390x__ */ | ||
| 72 | |||
| 73 | static inline unsigned long pgd_entry_type(struct mm_struct *mm) | ||
| 113 | { | 74 | { |
| 114 | pmd_t *shadow_pmd = get_shadow_pmd(pmd); | 75 | return _REGION3_ENTRY_EMPTY; |
| 76 | } | ||
| 77 | |||
| 78 | #define pud_alloc_one(mm,address) ({ BUG(); ((pud_t *)2); }) | ||
| 79 | #define pud_free(x) do { } while (0) | ||
| 115 | 80 | ||
| 116 | if (shadow_pmd) | 81 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) |
| 117 | free_pages((unsigned long) shadow_pmd, PMD_ALLOC_ORDER); | 82 | { |
| 118 | free_pages((unsigned long) pmd, PMD_ALLOC_ORDER); | 83 | unsigned long *crst = crst_table_alloc(mm, s390_noexec); |
| 84 | if (crst) | ||
| 85 | crst_table_init(crst, _SEGMENT_ENTRY_EMPTY); | ||
| 86 | return (pmd_t *) crst; | ||
| 119 | } | 87 | } |
| 88 | #define pmd_free(pmd) crst_table_free((unsigned long *) pmd) | ||
| 120 | 89 | ||
| 121 | #define __pmd_free_tlb(tlb,pmd) \ | 90 | #define pgd_populate(mm, pgd, pud) BUG() |
| 122 | do { \ | 91 | #define pgd_populate_kernel(mm, pgd, pud) BUG() |
| 123 | tlb_flush_mmu(tlb, 0, 0); \ | ||
| 124 | pmd_free(pmd); \ | ||
| 125 | } while (0) | ||
| 126 | 92 | ||
| 127 | static inline void | 93 | static inline void pud_populate_kernel(struct mm_struct *mm, |
| 128 | pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) | 94 | pud_t *pud, pmd_t *pmd) |
| 129 | { | 95 | { |
| 130 | pgd_val(*pgd) = _PGD_ENTRY | __pa(pmd); | 96 | pud_val(*pud) = _REGION3_ENTRY | __pa(pmd); |
| 131 | } | 97 | } |
| 132 | 98 | ||
| 133 | static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) | 99 | static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) |
| 134 | { | 100 | { |
| 135 | pgd_t *shadow_pgd = get_shadow_pgd(pgd); | 101 | pud_t *shadow_pud = get_shadow_table(pud); |
| 136 | pmd_t *shadow_pmd = get_shadow_pmd(pmd); | 102 | pmd_t *shadow_pmd = get_shadow_table(pmd); |
| 137 | 103 | ||
| 138 | if (shadow_pgd && shadow_pmd) | 104 | if (shadow_pud && shadow_pmd) |
| 139 | pgd_populate_kernel(mm, shadow_pgd, shadow_pmd); | 105 | pud_populate_kernel(mm, shadow_pud, shadow_pmd); |
| 140 | pgd_populate_kernel(mm, pgd, pmd); | 106 | pud_populate_kernel(mm, pud, pmd); |
| 141 | } | 107 | } |
| 142 | 108 | ||
| 143 | #endif /* __s390x__ */ | 109 | #endif /* __s390x__ */ |
| 144 | 110 | ||
| 111 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) | ||
| 112 | { | ||
| 113 | unsigned long *crst = crst_table_alloc(mm, s390_noexec); | ||
| 114 | if (crst) | ||
| 115 | crst_table_init(crst, pgd_entry_type(mm)); | ||
| 116 | return (pgd_t *) crst; | ||
| 117 | } | ||
| 118 | #define pgd_free(pgd) crst_table_free((unsigned long *) pgd) | ||
| 119 | |||
| 145 | static inline void | 120 | static inline void |
| 146 | pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) | 121 | pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) |
| 147 | { | 122 | { |
| 148 | #ifndef __s390x__ | 123 | #ifndef __s390x__ |
| 149 | pmd_val(pmd[0]) = _PAGE_TABLE + __pa(pte); | 124 | pmd_val(pmd[0]) = _SEGMENT_ENTRY + __pa(pte); |
| 150 | pmd_val(pmd[1]) = _PAGE_TABLE + __pa(pte+256); | 125 | pmd_val(pmd[1]) = _SEGMENT_ENTRY + __pa(pte+256); |
| 151 | pmd_val(pmd[2]) = _PAGE_TABLE + __pa(pte+512); | 126 | pmd_val(pmd[2]) = _SEGMENT_ENTRY + __pa(pte+512); |
| 152 | pmd_val(pmd[3]) = _PAGE_TABLE + __pa(pte+768); | 127 | pmd_val(pmd[3]) = _SEGMENT_ENTRY + __pa(pte+768); |
| 153 | #else /* __s390x__ */ | 128 | #else /* __s390x__ */ |
| 154 | pmd_val(*pmd) = _PMD_ENTRY + __pa(pte); | 129 | pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte); |
| 155 | pmd_val1(*pmd) = _PMD_ENTRY + __pa(pte+256); | 130 | pmd_val1(*pmd) = _SEGMENT_ENTRY + __pa(pte+256); |
| 156 | #endif /* __s390x__ */ | 131 | #endif /* __s390x__ */ |
| 157 | } | 132 | } |
| 158 | 133 | ||
| @@ -160,7 +135,7 @@ static inline void | |||
| 160 | pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page) | 135 | pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page) |
| 161 | { | 136 | { |
| 162 | pte_t *pte = (pte_t *)page_to_phys(page); | 137 | pte_t *pte = (pte_t *)page_to_phys(page); |
| 163 | pmd_t *shadow_pmd = get_shadow_pmd(pmd); | 138 | pmd_t *shadow_pmd = get_shadow_table(pmd); |
| 164 | pte_t *shadow_pte = get_shadow_pte(pte); | 139 | pte_t *shadow_pte = get_shadow_pte(pte); |
| 165 | 140 | ||
| 166 | pmd_populate_kernel(mm, pmd, pte); | 141 | pmd_populate_kernel(mm, pmd, pte); |
| @@ -171,67 +146,14 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page) | |||
| 171 | /* | 146 | /* |
| 172 | * page table entry allocation/free routines. | 147 | * page table entry allocation/free routines. |
| 173 | */ | 148 | */ |
| 174 | static inline pte_t * | 149 | #define pte_alloc_one_kernel(mm, vmaddr) \ |
| 175 | pte_alloc_one_kernel(struct mm_struct *mm, unsigned long vmaddr) | 150 | ((pte_t *) page_table_alloc(s390_noexec)) |
| 176 | { | 151 | #define pte_alloc_one(mm, vmaddr) \ |
| 177 | pte_t *pte = (pte_t *) __get_free_page(GFP_KERNEL|__GFP_REPEAT); | 152 | virt_to_page(page_table_alloc(s390_noexec)) |
| 178 | int i; | 153 | |
| 179 | 154 | #define pte_free_kernel(pte) \ | |
| 180 | if (!pte) | 155 | page_table_free((unsigned long *) pte) |
| 181 | return NULL; | 156 | #define pte_free(pte) \ |
| 182 | if (s390_noexec) { | 157 | page_table_free((unsigned long *) page_to_phys((struct page *) pte)) |
| 183 | pte_t *shadow_pte = (pte_t *) | ||
| 184 | __get_free_page(GFP_KERNEL|__GFP_REPEAT); | ||
| 185 | struct page *page = virt_to_page(pte); | ||
| 186 | |||
| 187 | if (!shadow_pte) { | ||
| 188 | free_page((unsigned long) pte); | ||
| 189 | return NULL; | ||
| 190 | } | ||
| 191 | page->lru.next = (void *) shadow_pte; | ||
| 192 | } | ||
| 193 | for (i=0; i < PTRS_PER_PTE; i++) { | ||
| 194 | pte_clear(mm, vmaddr, pte + i); | ||
| 195 | vmaddr += PAGE_SIZE; | ||
| 196 | } | ||
| 197 | return pte; | ||
| 198 | } | ||
| 199 | |||
| 200 | static inline struct page * | ||
| 201 | pte_alloc_one(struct mm_struct *mm, unsigned long vmaddr) | ||
| 202 | { | ||
| 203 | pte_t *pte = pte_alloc_one_kernel(mm, vmaddr); | ||
| 204 | if (pte) | ||
| 205 | return virt_to_page(pte); | ||
| 206 | return NULL; | ||
| 207 | } | ||
| 208 | |||
| 209 | static inline void pte_free_kernel(pte_t *pte) | ||
| 210 | { | ||
| 211 | pte_t *shadow_pte = get_shadow_pte(pte); | ||
| 212 | |||
| 213 | if (shadow_pte) | ||
| 214 | free_page((unsigned long) shadow_pte); | ||
| 215 | free_page((unsigned long) pte); | ||
| 216 | } | ||
| 217 | |||
| 218 | static inline void pte_free(struct page *pte) | ||
| 219 | { | ||
| 220 | struct page *shadow_page = get_shadow_page(pte); | ||
| 221 | |||
| 222 | if (shadow_page) | ||
| 223 | __free_page(shadow_page); | ||
| 224 | __free_page(pte); | ||
| 225 | } | ||
| 226 | |||
| 227 | #define __pte_free_tlb(tlb, pte) \ | ||
| 228 | ({ \ | ||
| 229 | struct mmu_gather *__tlb = (tlb); \ | ||
| 230 | struct page *__pte = (pte); \ | ||
| 231 | struct page *shadow_page = get_shadow_page(__pte); \ | ||
| 232 | if (shadow_page) \ | ||
| 233 | tlb_remove_page(__tlb, shadow_page); \ | ||
| 234 | tlb_remove_page(__tlb, __pte); \ | ||
| 235 | }) | ||
| 236 | 158 | ||
| 237 | #endif /* _S390_PGALLOC_H */ | 159 | #endif /* _S390_PGALLOC_H */ |
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h index 39bb5192dc31..f2cc25b74adf 100644 --- a/include/asm-s390/pgtable.h +++ b/include/asm-s390/pgtable.h | |||
| @@ -13,8 +13,6 @@ | |||
| 13 | #ifndef _ASM_S390_PGTABLE_H | 13 | #ifndef _ASM_S390_PGTABLE_H |
| 14 | #define _ASM_S390_PGTABLE_H | 14 | #define _ASM_S390_PGTABLE_H |
| 15 | 15 | ||
| 16 | #include <asm-generic/4level-fixup.h> | ||
| 17 | |||
| 18 | /* | 16 | /* |
| 19 | * The Linux memory management assumes a three-level page table setup. For | 17 | * The Linux memory management assumes a three-level page table setup. For |
| 20 | * s390 31 bit we "fold" the mid level into the top-level page table, so | 18 | * s390 31 bit we "fold" the mid level into the top-level page table, so |
| @@ -35,9 +33,6 @@ | |||
| 35 | #include <asm/bug.h> | 33 | #include <asm/bug.h> |
| 36 | #include <asm/processor.h> | 34 | #include <asm/processor.h> |
| 37 | 35 | ||
| 38 | struct vm_area_struct; /* forward declaration (include/linux/mm.h) */ | ||
| 39 | struct mm_struct; | ||
| 40 | |||
| 41 | extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096))); | 36 | extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096))); |
| 42 | extern void paging_init(void); | 37 | extern void paging_init(void); |
| 43 | extern void vmem_map_init(void); | 38 | extern void vmem_map_init(void); |
| @@ -63,14 +58,18 @@ extern char empty_zero_page[PAGE_SIZE]; | |||
| 63 | */ | 58 | */ |
| 64 | #ifndef __s390x__ | 59 | #ifndef __s390x__ |
| 65 | # define PMD_SHIFT 22 | 60 | # define PMD_SHIFT 22 |
| 61 | # define PUD_SHIFT 22 | ||
| 66 | # define PGDIR_SHIFT 22 | 62 | # define PGDIR_SHIFT 22 |
| 67 | #else /* __s390x__ */ | 63 | #else /* __s390x__ */ |
| 68 | # define PMD_SHIFT 21 | 64 | # define PMD_SHIFT 21 |
| 65 | # define PUD_SHIFT 31 | ||
| 69 | # define PGDIR_SHIFT 31 | 66 | # define PGDIR_SHIFT 31 |
| 70 | #endif /* __s390x__ */ | 67 | #endif /* __s390x__ */ |
| 71 | 68 | ||
| 72 | #define PMD_SIZE (1UL << PMD_SHIFT) | 69 | #define PMD_SIZE (1UL << PMD_SHIFT) |
| 73 | #define PMD_MASK (~(PMD_SIZE-1)) | 70 | #define PMD_MASK (~(PMD_SIZE-1)) |
| 71 | #define PUD_SIZE (1UL << PUD_SHIFT) | ||
| 72 | #define PUD_MASK (~(PUD_SIZE-1)) | ||
| 74 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | 73 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
| 75 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | 74 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) |
| 76 | 75 | ||
| @@ -83,10 +82,12 @@ extern char empty_zero_page[PAGE_SIZE]; | |||
| 83 | #ifndef __s390x__ | 82 | #ifndef __s390x__ |
| 84 | # define PTRS_PER_PTE 1024 | 83 | # define PTRS_PER_PTE 1024 |
| 85 | # define PTRS_PER_PMD 1 | 84 | # define PTRS_PER_PMD 1 |
| 85 | # define PTRS_PER_PUD 1 | ||
| 86 | # define PTRS_PER_PGD 512 | 86 | # define PTRS_PER_PGD 512 |
| 87 | #else /* __s390x__ */ | 87 | #else /* __s390x__ */ |
| 88 | # define PTRS_PER_PTE 512 | 88 | # define PTRS_PER_PTE 512 |
| 89 | # define PTRS_PER_PMD 1024 | 89 | # define PTRS_PER_PMD 1024 |
| 90 | # define PTRS_PER_PUD 1 | ||
| 90 | # define PTRS_PER_PGD 2048 | 91 | # define PTRS_PER_PGD 2048 |
| 91 | #endif /* __s390x__ */ | 92 | #endif /* __s390x__ */ |
| 92 | 93 | ||
| @@ -96,6 +97,8 @@ extern char empty_zero_page[PAGE_SIZE]; | |||
| 96 | printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e)) | 97 | printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e)) |
| 97 | #define pmd_ERROR(e) \ | 98 | #define pmd_ERROR(e) \ |
| 98 | printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e)) | 99 | printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e)) |
| 100 | #define pud_ERROR(e) \ | ||
| 101 | printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e)) | ||
| 99 | #define pgd_ERROR(e) \ | 102 | #define pgd_ERROR(e) \ |
| 100 | printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e)) | 103 | printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e)) |
| 101 | 104 | ||
| @@ -195,7 +198,7 @@ extern unsigned long vmalloc_end; | |||
| 195 | * I Segment-Invalid Bit: Segment is not available for address-translation | 198 | * I Segment-Invalid Bit: Segment is not available for address-translation |
| 196 | * TT Type 01 | 199 | * TT Type 01 |
| 197 | * TF | 200 | * TF |
| 198 | * TL Table lenght | 201 | * TL Table length |
| 199 | * | 202 | * |
| 200 | * The 64 bit regiontable origin of S390 has following format: | 203 | * The 64 bit regiontable origin of S390 has following format: |
| 201 | * | region table origon | DTTL | 204 | * | region table origon | DTTL |
| @@ -221,6 +224,8 @@ extern unsigned long vmalloc_end; | |||
| 221 | /* Hardware bits in the page table entry */ | 224 | /* Hardware bits in the page table entry */ |
| 222 | #define _PAGE_RO 0x200 /* HW read-only bit */ | 225 | #define _PAGE_RO 0x200 /* HW read-only bit */ |
| 223 | #define _PAGE_INVALID 0x400 /* HW invalid bit */ | 226 | #define _PAGE_INVALID 0x400 /* HW invalid bit */ |
| 227 | |||
| 228 | /* Software bits in the page table entry */ | ||
| 224 | #define _PAGE_SWT 0x001 /* SW pte type bit t */ | 229 | #define _PAGE_SWT 0x001 /* SW pte type bit t */ |
| 225 | #define _PAGE_SWX 0x002 /* SW pte type bit x */ | 230 | #define _PAGE_SWX 0x002 /* SW pte type bit x */ |
| 226 | 231 | ||
| @@ -264,60 +269,75 @@ extern unsigned long vmalloc_end; | |||
| 264 | 269 | ||
| 265 | #ifndef __s390x__ | 270 | #ifndef __s390x__ |
| 266 | 271 | ||
| 267 | /* Bits in the segment table entry */ | 272 | /* Bits in the segment table address-space-control-element */ |
| 268 | #define _PAGE_TABLE_LEN 0xf /* only full page-tables */ | 273 | #define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */ |
| 269 | #define _PAGE_TABLE_COM 0x10 /* common page-table */ | 274 | #define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */ |
| 270 | #define _PAGE_TABLE_INV 0x20 /* invalid page-table */ | 275 | #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */ |
| 271 | #define _SEG_PRESENT 0x001 /* Software (overlap with PTL) */ | 276 | #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */ |
| 272 | 277 | #define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */ | |
| 273 | /* Bits int the storage key */ | ||
| 274 | #define _PAGE_CHANGED 0x02 /* HW changed bit */ | ||
| 275 | #define _PAGE_REFERENCED 0x04 /* HW referenced bit */ | ||
| 276 | |||
| 277 | #define _USER_SEG_TABLE_LEN 0x7f /* user-segment-table up to 2 GB */ | ||
| 278 | #define _KERNEL_SEG_TABLE_LEN 0x7f /* kernel-segment-table up to 2 GB */ | ||
| 279 | |||
| 280 | /* | ||
| 281 | * User and Kernel pagetables are identical | ||
| 282 | */ | ||
| 283 | #define _PAGE_TABLE _PAGE_TABLE_LEN | ||
| 284 | #define _KERNPG_TABLE _PAGE_TABLE_LEN | ||
| 285 | |||
| 286 | /* | ||
| 287 | * The Kernel segment-tables includes the User segment-table | ||
| 288 | */ | ||
| 289 | 278 | ||
| 290 | #define _SEGMENT_TABLE (_USER_SEG_TABLE_LEN|0x80000000|0x100) | 279 | /* Bits in the segment table entry */ |
| 291 | #define _KERNSEG_TABLE _KERNEL_SEG_TABLE_LEN | 280 | #define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */ |
| 281 | #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ | ||
| 282 | #define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */ | ||
| 283 | #define _SEGMENT_ENTRY_PTL 0x0f /* page table length */ | ||
| 292 | 284 | ||
| 293 | #define USER_STD_MASK 0x00000080UL | 285 | #define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL) |
| 286 | #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV) | ||
| 294 | 287 | ||
| 295 | #else /* __s390x__ */ | 288 | #else /* __s390x__ */ |
| 296 | 289 | ||
| 290 | /* Bits in the segment/region table address-space-control-element */ | ||
| 291 | #define _ASCE_ORIGIN ~0xfffUL/* segment table origin */ | ||
| 292 | #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */ | ||
| 293 | #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */ | ||
| 294 | #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */ | ||
| 295 | #define _ASCE_REAL_SPACE 0x20 /* real space control */ | ||
| 296 | #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */ | ||
| 297 | #define _ASCE_TYPE_REGION1 0x0c /* region first table type */ | ||
| 298 | #define _ASCE_TYPE_REGION2 0x08 /* region second table type */ | ||
| 299 | #define _ASCE_TYPE_REGION3 0x04 /* region third table type */ | ||
| 300 | #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */ | ||
| 301 | #define _ASCE_TABLE_LENGTH 0x03 /* region table length */ | ||
| 302 | |||
| 303 | /* Bits in the region table entry */ | ||
| 304 | #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */ | ||
| 305 | #define _REGION_ENTRY_INV 0x20 /* invalid region table entry */ | ||
| 306 | #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */ | ||
| 307 | #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */ | ||
| 308 | #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */ | ||
| 309 | #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */ | ||
| 310 | #define _REGION_ENTRY_LENGTH 0x03 /* region third length */ | ||
| 311 | |||
| 312 | #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH) | ||
| 313 | #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV) | ||
| 314 | #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH) | ||
| 315 | #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV) | ||
| 316 | #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH) | ||
| 317 | #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV) | ||
| 318 | |||
| 297 | /* Bits in the segment table entry */ | 319 | /* Bits in the segment table entry */ |
| 298 | #define _PMD_ENTRY_INV 0x20 /* invalid segment table entry */ | 320 | #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */ |
| 299 | #define _PMD_ENTRY 0x00 | 321 | #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */ |
| 322 | #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ | ||
| 323 | |||
| 324 | #define _SEGMENT_ENTRY (0) | ||
| 325 | #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV) | ||
| 300 | 326 | ||
| 301 | /* Bits in the region third table entry */ | 327 | #endif /* __s390x__ */ |
| 302 | #define _PGD_ENTRY_INV 0x20 /* invalid region table entry */ | ||
| 303 | #define _PGD_ENTRY 0x07 | ||
| 304 | 328 | ||
| 305 | /* | 329 | /* |
| 306 | * User and kernel page directory | 330 | * A user page table pointer has the space-switch-event bit, the |
| 331 | * private-space-control bit and the storage-alteration-event-control | ||
| 332 | * bit set. A kernel page table pointer doesn't need them. | ||
| 307 | */ | 333 | */ |
| 308 | #define _REGION_THIRD 0x4 | 334 | #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \ |
| 309 | #define _REGION_THIRD_LEN 0x3 | 335 | _ASCE_ALT_EVENT) |
| 310 | #define _REGION_TABLE (_REGION_THIRD|_REGION_THIRD_LEN|0x40|0x100) | ||
| 311 | #define _KERN_REGION_TABLE (_REGION_THIRD|_REGION_THIRD_LEN) | ||
| 312 | |||
| 313 | #define USER_STD_MASK 0x0000000000000080UL | ||
| 314 | 336 | ||
| 315 | /* Bits in the storage key */ | 337 | /* Bits int the storage key */ |
| 316 | #define _PAGE_CHANGED 0x02 /* HW changed bit */ | 338 | #define _PAGE_CHANGED 0x02 /* HW changed bit */ |
| 317 | #define _PAGE_REFERENCED 0x04 /* HW referenced bit */ | 339 | #define _PAGE_REFERENCED 0x04 /* HW referenced bit */ |
| 318 | 340 | ||
| 319 | #endif /* __s390x__ */ | ||
| 320 | |||
| 321 | /* | 341 | /* |
| 322 | * Page protection definitions. | 342 | * Page protection definitions. |
| 323 | */ | 343 | */ |
| @@ -358,65 +378,38 @@ extern unsigned long vmalloc_end; | |||
| 358 | #define __S111 PAGE_EX_RW | 378 | #define __S111 PAGE_EX_RW |
| 359 | 379 | ||
| 360 | #ifndef __s390x__ | 380 | #ifndef __s390x__ |
| 361 | # define PMD_SHADOW_SHIFT 1 | 381 | # define PxD_SHADOW_SHIFT 1 |
| 362 | # define PGD_SHADOW_SHIFT 1 | ||
| 363 | #else /* __s390x__ */ | 382 | #else /* __s390x__ */ |
| 364 | # define PMD_SHADOW_SHIFT 2 | 383 | # define PxD_SHADOW_SHIFT 2 |
| 365 | # define PGD_SHADOW_SHIFT 2 | ||
| 366 | #endif /* __s390x__ */ | 384 | #endif /* __s390x__ */ |
| 367 | 385 | ||
| 368 | static inline struct page *get_shadow_page(struct page *page) | 386 | static inline struct page *get_shadow_page(struct page *page) |
| 369 | { | 387 | { |
| 370 | if (s390_noexec && !list_empty(&page->lru)) | 388 | if (s390_noexec && page->index) |
| 371 | return virt_to_page(page->lru.next); | 389 | return virt_to_page((void *)(addr_t) page->index); |
| 372 | return NULL; | ||
| 373 | } | ||
| 374 | |||
| 375 | static inline pte_t *get_shadow_pte(pte_t *ptep) | ||
| 376 | { | ||
| 377 | unsigned long pteptr = (unsigned long) (ptep); | ||
| 378 | |||
| 379 | if (s390_noexec) { | ||
| 380 | unsigned long offset = pteptr & (PAGE_SIZE - 1); | ||
| 381 | void *addr = (void *) (pteptr ^ offset); | ||
| 382 | struct page *page = virt_to_page(addr); | ||
| 383 | if (!list_empty(&page->lru)) | ||
| 384 | return (pte_t *) ((unsigned long) page->lru.next | | ||
| 385 | offset); | ||
| 386 | } | ||
| 387 | return NULL; | 390 | return NULL; |
| 388 | } | 391 | } |
| 389 | 392 | ||
| 390 | static inline pmd_t *get_shadow_pmd(pmd_t *pmdp) | 393 | static inline void *get_shadow_pte(void *table) |
| 391 | { | 394 | { |
| 392 | unsigned long pmdptr = (unsigned long) (pmdp); | 395 | unsigned long addr, offset; |
| 396 | struct page *page; | ||
| 393 | 397 | ||
| 394 | if (s390_noexec) { | 398 | addr = (unsigned long) table; |
| 395 | unsigned long offset = pmdptr & | 399 | offset = addr & (PAGE_SIZE - 1); |
| 396 | ((PAGE_SIZE << PMD_SHADOW_SHIFT) - 1); | 400 | page = virt_to_page((void *)(addr ^ offset)); |
| 397 | void *addr = (void *) (pmdptr ^ offset); | 401 | return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL); |
| 398 | struct page *page = virt_to_page(addr); | ||
| 399 | if (!list_empty(&page->lru)) | ||
| 400 | return (pmd_t *) ((unsigned long) page->lru.next | | ||
| 401 | offset); | ||
| 402 | } | ||
| 403 | return NULL; | ||
| 404 | } | 402 | } |
| 405 | 403 | ||
| 406 | static inline pgd_t *get_shadow_pgd(pgd_t *pgdp) | 404 | static inline void *get_shadow_table(void *table) |
| 407 | { | 405 | { |
| 408 | unsigned long pgdptr = (unsigned long) (pgdp); | 406 | unsigned long addr, offset; |
| 407 | struct page *page; | ||
| 409 | 408 | ||
| 410 | if (s390_noexec) { | 409 | addr = (unsigned long) table; |
| 411 | unsigned long offset = pgdptr & | 410 | offset = addr & ((PAGE_SIZE << PxD_SHADOW_SHIFT) - 1); |
| 412 | ((PAGE_SIZE << PGD_SHADOW_SHIFT) - 1); | 411 | page = virt_to_page((void *)(addr ^ offset)); |
| 413 | void *addr = (void *) (pgdptr ^ offset); | 412 | return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL); |
| 414 | struct page *page = virt_to_page(addr); | ||
| 415 | if (!list_empty(&page->lru)) | ||
| 416 | return (pgd_t *) ((unsigned long) page->lru.next | | ||
| 417 | offset); | ||
| 418 | } | ||
| 419 | return NULL; | ||
| 420 | } | 413 | } |
| 421 | 414 | ||
| 422 | /* | 415 | /* |
| @@ -424,7 +417,8 @@ static inline pgd_t *get_shadow_pgd(pgd_t *pgdp) | |||
| 424 | * within a page table are directly modified. Thus, the following | 417 | * within a page table are directly modified. Thus, the following |
| 425 | * hook is made available. | 418 | * hook is made available. |
| 426 | */ | 419 | */ |
| 427 | static inline void set_pte(pte_t *pteptr, pte_t pteval) | 420 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, |
| 421 | pte_t *pteptr, pte_t pteval) | ||
| 428 | { | 422 | { |
| 429 | pte_t *shadow_pte = get_shadow_pte(pteptr); | 423 | pte_t *shadow_pte = get_shadow_pte(pteptr); |
| 430 | 424 | ||
| @@ -437,7 +431,6 @@ static inline void set_pte(pte_t *pteptr, pte_t pteval) | |||
| 437 | pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY; | 431 | pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY; |
| 438 | } | 432 | } |
| 439 | } | 433 | } |
| 440 | #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) | ||
| 441 | 434 | ||
| 442 | /* | 435 | /* |
| 443 | * pgd/pmd/pte query functions | 436 | * pgd/pmd/pte query functions |
| @@ -448,47 +441,50 @@ static inline int pgd_present(pgd_t pgd) { return 1; } | |||
| 448 | static inline int pgd_none(pgd_t pgd) { return 0; } | 441 | static inline int pgd_none(pgd_t pgd) { return 0; } |
| 449 | static inline int pgd_bad(pgd_t pgd) { return 0; } | 442 | static inline int pgd_bad(pgd_t pgd) { return 0; } |
| 450 | 443 | ||
| 451 | static inline int pmd_present(pmd_t pmd) { return pmd_val(pmd) & _SEG_PRESENT; } | 444 | static inline int pud_present(pud_t pud) { return 1; } |
| 452 | static inline int pmd_none(pmd_t pmd) { return pmd_val(pmd) & _PAGE_TABLE_INV; } | 445 | static inline int pud_none(pud_t pud) { return 0; } |
| 453 | static inline int pmd_bad(pmd_t pmd) | 446 | static inline int pud_bad(pud_t pud) { return 0; } |
| 454 | { | ||
| 455 | return (pmd_val(pmd) & (~PAGE_MASK & ~_PAGE_TABLE_INV)) != _PAGE_TABLE; | ||
| 456 | } | ||
| 457 | 447 | ||
| 458 | #else /* __s390x__ */ | 448 | #else /* __s390x__ */ |
| 459 | 449 | ||
| 460 | static inline int pgd_present(pgd_t pgd) | 450 | static inline int pgd_present(pgd_t pgd) { return 1; } |
| 451 | static inline int pgd_none(pgd_t pgd) { return 0; } | ||
| 452 | static inline int pgd_bad(pgd_t pgd) { return 0; } | ||
| 453 | |||
| 454 | static inline int pud_present(pud_t pud) | ||
| 461 | { | 455 | { |
| 462 | return (pgd_val(pgd) & ~PAGE_MASK) == _PGD_ENTRY; | 456 | return pud_val(pud) & _REGION_ENTRY_ORIGIN; |
| 463 | } | 457 | } |
| 464 | 458 | ||
| 465 | static inline int pgd_none(pgd_t pgd) | 459 | static inline int pud_none(pud_t pud) |
| 466 | { | 460 | { |
| 467 | return pgd_val(pgd) & _PGD_ENTRY_INV; | 461 | return pud_val(pud) & _REGION_ENTRY_INV; |
| 468 | } | 462 | } |
| 469 | 463 | ||
| 470 | static inline int pgd_bad(pgd_t pgd) | 464 | static inline int pud_bad(pud_t pud) |
| 471 | { | 465 | { |
| 472 | return (pgd_val(pgd) & (~PAGE_MASK & ~_PGD_ENTRY_INV)) != _PGD_ENTRY; | 466 | unsigned long mask = ~_REGION_ENTRY_ORIGIN & ~_REGION_ENTRY_INV; |
| 467 | return (pud_val(pud) & mask) != _REGION3_ENTRY; | ||
| 473 | } | 468 | } |
| 474 | 469 | ||
| 470 | #endif /* __s390x__ */ | ||
| 471 | |||
| 475 | static inline int pmd_present(pmd_t pmd) | 472 | static inline int pmd_present(pmd_t pmd) |
| 476 | { | 473 | { |
| 477 | return (pmd_val(pmd) & ~PAGE_MASK) == _PMD_ENTRY; | 474 | return pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN; |
| 478 | } | 475 | } |
| 479 | 476 | ||
| 480 | static inline int pmd_none(pmd_t pmd) | 477 | static inline int pmd_none(pmd_t pmd) |
| 481 | { | 478 | { |
| 482 | return pmd_val(pmd) & _PMD_ENTRY_INV; | 479 | return pmd_val(pmd) & _SEGMENT_ENTRY_INV; |
| 483 | } | 480 | } |
| 484 | 481 | ||
| 485 | static inline int pmd_bad(pmd_t pmd) | 482 | static inline int pmd_bad(pmd_t pmd) |
| 486 | { | 483 | { |
| 487 | return (pmd_val(pmd) & (~PAGE_MASK & ~_PMD_ENTRY_INV)) != _PMD_ENTRY; | 484 | unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV; |
| 485 | return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY; | ||
| 488 | } | 486 | } |
| 489 | 487 | ||
| 490 | #endif /* __s390x__ */ | ||
| 491 | |||
| 492 | static inline int pte_none(pte_t pte) | 488 | static inline int pte_none(pte_t pte) |
| 493 | { | 489 | { |
| 494 | return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT); | 490 | return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT); |
| @@ -508,7 +504,8 @@ static inline int pte_file(pte_t pte) | |||
| 508 | return (pte_val(pte) & mask) == _PAGE_TYPE_FILE; | 504 | return (pte_val(pte) & mask) == _PAGE_TYPE_FILE; |
| 509 | } | 505 | } |
| 510 | 506 | ||
| 511 | #define pte_same(a,b) (pte_val(a) == pte_val(b)) | 507 | #define __HAVE_ARCH_PTE_SAME |
| 508 | #define pte_same(a,b) (pte_val(a) == pte_val(b)) | ||
| 512 | 509 | ||
| 513 | /* | 510 | /* |
| 514 | * query functions pte_write/pte_dirty/pte_young only work if | 511 | * query functions pte_write/pte_dirty/pte_young only work if |
| @@ -543,58 +540,52 @@ static inline int pte_young(pte_t pte) | |||
| 543 | 540 | ||
| 544 | #ifndef __s390x__ | 541 | #ifndef __s390x__ |
| 545 | 542 | ||
| 546 | static inline void pgd_clear(pgd_t * pgdp) { } | 543 | #define pgd_clear(pgd) do { } while (0) |
| 544 | #define pud_clear(pud) do { } while (0) | ||
| 547 | 545 | ||
| 548 | static inline void pmd_clear_kernel(pmd_t * pmdp) | 546 | static inline void pmd_clear_kernel(pmd_t * pmdp) |
| 549 | { | 547 | { |
| 550 | pmd_val(pmdp[0]) = _PAGE_TABLE_INV; | 548 | pmd_val(pmdp[0]) = _SEGMENT_ENTRY_EMPTY; |
| 551 | pmd_val(pmdp[1]) = _PAGE_TABLE_INV; | 549 | pmd_val(pmdp[1]) = _SEGMENT_ENTRY_EMPTY; |
| 552 | pmd_val(pmdp[2]) = _PAGE_TABLE_INV; | 550 | pmd_val(pmdp[2]) = _SEGMENT_ENTRY_EMPTY; |
| 553 | pmd_val(pmdp[3]) = _PAGE_TABLE_INV; | 551 | pmd_val(pmdp[3]) = _SEGMENT_ENTRY_EMPTY; |
| 554 | } | ||
| 555 | |||
| 556 | static inline void pmd_clear(pmd_t * pmdp) | ||
| 557 | { | ||
| 558 | pmd_t *shadow_pmd = get_shadow_pmd(pmdp); | ||
| 559 | |||
| 560 | pmd_clear_kernel(pmdp); | ||
| 561 | if (shadow_pmd) | ||
| 562 | pmd_clear_kernel(shadow_pmd); | ||
| 563 | } | 552 | } |
| 564 | 553 | ||
| 565 | #else /* __s390x__ */ | 554 | #else /* __s390x__ */ |
| 566 | 555 | ||
| 567 | static inline void pgd_clear_kernel(pgd_t * pgdp) | 556 | #define pgd_clear(pgd) do { } while (0) |
| 557 | |||
| 558 | static inline void pud_clear_kernel(pud_t *pud) | ||
| 568 | { | 559 | { |
| 569 | pgd_val(*pgdp) = _PGD_ENTRY_INV | _PGD_ENTRY; | 560 | pud_val(*pud) = _REGION3_ENTRY_EMPTY; |
| 570 | } | 561 | } |
| 571 | 562 | ||
| 572 | static inline void pgd_clear(pgd_t * pgdp) | 563 | static inline void pud_clear(pud_t * pud) |
| 573 | { | 564 | { |
| 574 | pgd_t *shadow_pgd = get_shadow_pgd(pgdp); | 565 | pud_t *shadow = get_shadow_table(pud); |
| 575 | 566 | ||
| 576 | pgd_clear_kernel(pgdp); | 567 | pud_clear_kernel(pud); |
| 577 | if (shadow_pgd) | 568 | if (shadow) |
| 578 | pgd_clear_kernel(shadow_pgd); | 569 | pud_clear_kernel(shadow); |
| 579 | } | 570 | } |
| 580 | 571 | ||
| 581 | static inline void pmd_clear_kernel(pmd_t * pmdp) | 572 | static inline void pmd_clear_kernel(pmd_t * pmdp) |
| 582 | { | 573 | { |
| 583 | pmd_val(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY; | 574 | pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY; |
| 584 | pmd_val1(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY; | 575 | pmd_val1(*pmdp) = _SEGMENT_ENTRY_EMPTY; |
| 585 | } | 576 | } |
| 586 | 577 | ||
| 578 | #endif /* __s390x__ */ | ||
| 579 | |||
| 587 | static inline void pmd_clear(pmd_t * pmdp) | 580 | static inline void pmd_clear(pmd_t * pmdp) |
| 588 | { | 581 | { |
| 589 | pmd_t *shadow_pmd = get_shadow_pmd(pmdp); | 582 | pmd_t *shadow_pmd = get_shadow_table(pmdp); |
| 590 | 583 | ||
| 591 | pmd_clear_kernel(pmdp); | 584 | pmd_clear_kernel(pmdp); |
| 592 | if (shadow_pmd) | 585 | if (shadow_pmd) |
| 593 | pmd_clear_kernel(shadow_pmd); | 586 | pmd_clear_kernel(shadow_pmd); |
| 594 | } | 587 | } |
| 595 | 588 | ||
| 596 | #endif /* __s390x__ */ | ||
| 597 | |||
| 598 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 589 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
| 599 | { | 590 | { |
| 600 | pte_t *shadow_pte = get_shadow_pte(ptep); | 591 | pte_t *shadow_pte = get_shadow_pte(ptep); |
| @@ -663,24 +654,19 @@ static inline pte_t pte_mkyoung(pte_t pte) | |||
| 663 | return pte; | 654 | return pte; |
| 664 | } | 655 | } |
| 665 | 656 | ||
| 666 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) | 657 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG |
| 658 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, | ||
| 659 | unsigned long addr, pte_t *ptep) | ||
| 667 | { | 660 | { |
| 668 | return 0; | 661 | return 0; |
| 669 | } | 662 | } |
| 670 | 663 | ||
| 671 | static inline int | 664 | #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH |
| 672 | ptep_clear_flush_young(struct vm_area_struct *vma, | 665 | static inline int ptep_clear_flush_young(struct vm_area_struct *vma, |
| 673 | unsigned long address, pte_t *ptep) | 666 | unsigned long address, pte_t *ptep) |
| 674 | { | 667 | { |
| 675 | /* No need to flush TLB; bits are in storage key */ | 668 | /* No need to flush TLB; bits are in storage key */ |
| 676 | return ptep_test_and_clear_young(vma, address, ptep); | 669 | return 0; |
| 677 | } | ||
| 678 | |||
| 679 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | ||
| 680 | { | ||
| 681 | pte_t pte = *ptep; | ||
| 682 | pte_clear(mm, addr, ptep); | ||
| 683 | return pte; | ||
| 684 | } | 670 | } |
| 685 | 671 | ||
| 686 | static inline void __ptep_ipte(unsigned long address, pte_t *ptep) | 672 | static inline void __ptep_ipte(unsigned long address, pte_t *ptep) |
| @@ -709,6 +695,32 @@ static inline void ptep_invalidate(unsigned long address, pte_t *ptep) | |||
| 709 | __ptep_ipte(address, ptep); | 695 | __ptep_ipte(address, ptep); |
| 710 | } | 696 | } |
| 711 | 697 | ||
| 698 | /* | ||
| 699 | * This is hard to understand. ptep_get_and_clear and ptep_clear_flush | ||
| 700 | * both clear the TLB for the unmapped pte. The reason is that | ||
| 701 | * ptep_get_and_clear is used in common code (e.g. change_pte_range) | ||
| 702 | * to modify an active pte. The sequence is | ||
| 703 | * 1) ptep_get_and_clear | ||
| 704 | * 2) set_pte_at | ||
| 705 | * 3) flush_tlb_range | ||
| 706 | * On s390 the tlb needs to get flushed with the modification of the pte | ||
| 707 | * if the pte is active. The only way how this can be implemented is to | ||
| 708 | * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range | ||
| 709 | * is a nop. | ||
| 710 | */ | ||
| 711 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | ||
| 712 | #define ptep_get_and_clear(__mm, __address, __ptep) \ | ||
| 713 | ({ \ | ||
| 714 | pte_t __pte = *(__ptep); \ | ||
| 715 | if (atomic_read(&(__mm)->mm_users) > 1 || \ | ||
| 716 | (__mm) != current->active_mm) \ | ||
| 717 | ptep_invalidate(__address, __ptep); \ | ||
| 718 | else \ | ||
| 719 | pte_clear((__mm), (__address), (__ptep)); \ | ||
| 720 | __pte; \ | ||
| 721 | }) | ||
| 722 | |||
| 723 | #define __HAVE_ARCH_PTEP_CLEAR_FLUSH | ||
| 712 | static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, | 724 | static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, |
| 713 | unsigned long address, pte_t *ptep) | 725 | unsigned long address, pte_t *ptep) |
| 714 | { | 726 | { |
| @@ -717,12 +729,40 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, | |||
| 717 | return pte; | 729 | return pte; |
| 718 | } | 730 | } |
| 719 | 731 | ||
| 720 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 732 | /* |
| 733 | * The batched pte unmap code uses ptep_get_and_clear_full to clear the | ||
| 734 | * ptes. Here an optimization is possible. tlb_gather_mmu flushes all | ||
| 735 | * tlbs of an mm if it can guarantee that the ptes of the mm_struct | ||
| 736 | * cannot be accessed while the batched unmap is running. In this case | ||
| 737 | * full==1 and a simple pte_clear is enough. See tlb.h. | ||
| 738 | */ | ||
| 739 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL | ||
| 740 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, | ||
| 741 | unsigned long addr, | ||
| 742 | pte_t *ptep, int full) | ||
| 721 | { | 743 | { |
| 722 | pte_t old_pte = *ptep; | 744 | pte_t pte = *ptep; |
| 723 | set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); | 745 | |
| 746 | if (full) | ||
| 747 | pte_clear(mm, addr, ptep); | ||
| 748 | else | ||
| 749 | ptep_invalidate(addr, ptep); | ||
| 750 | return pte; | ||
| 724 | } | 751 | } |
| 725 | 752 | ||
| 753 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | ||
| 754 | #define ptep_set_wrprotect(__mm, __addr, __ptep) \ | ||
| 755 | ({ \ | ||
| 756 | pte_t __pte = *(__ptep); \ | ||
| 757 | if (pte_write(__pte)) { \ | ||
| 758 | if (atomic_read(&(__mm)->mm_users) > 1 || \ | ||
| 759 | (__mm) != current->active_mm) \ | ||
| 760 | ptep_invalidate(__addr, __ptep); \ | ||
| 761 | set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \ | ||
| 762 | } \ | ||
| 763 | }) | ||
| 764 | |||
| 765 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | ||
| 726 | #define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \ | 766 | #define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \ |
| 727 | ({ \ | 767 | ({ \ |
| 728 | int __changed = !pte_same(*(__ptep), __entry); \ | 768 | int __changed = !pte_same(*(__ptep), __entry); \ |
| @@ -740,11 +780,13 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, | |||
| 740 | * should therefore only be called if it is not mapped in any | 780 | * should therefore only be called if it is not mapped in any |
| 741 | * address space. | 781 | * address space. |
| 742 | */ | 782 | */ |
| 783 | #define __HAVE_ARCH_PAGE_TEST_DIRTY | ||
| 743 | static inline int page_test_dirty(struct page *page) | 784 | static inline int page_test_dirty(struct page *page) |
| 744 | { | 785 | { |
| 745 | return (page_get_storage_key(page_to_phys(page)) & _PAGE_CHANGED) != 0; | 786 | return (page_get_storage_key(page_to_phys(page)) & _PAGE_CHANGED) != 0; |
| 746 | } | 787 | } |
| 747 | 788 | ||
| 789 | #define __HAVE_ARCH_PAGE_CLEAR_DIRTY | ||
| 748 | static inline void page_clear_dirty(struct page *page) | 790 | static inline void page_clear_dirty(struct page *page) |
| 749 | { | 791 | { |
| 750 | page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY); | 792 | page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY); |
| @@ -753,6 +795,7 @@ static inline void page_clear_dirty(struct page *page) | |||
| 753 | /* | 795 | /* |
| 754 | * Test and clear referenced bit in storage key. | 796 | * Test and clear referenced bit in storage key. |
| 755 | */ | 797 | */ |
| 798 | #define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG | ||
| 756 | static inline int page_test_and_clear_young(struct page *page) | 799 | static inline int page_test_and_clear_young(struct page *page) |
| 757 | { | 800 | { |
| 758 | unsigned long physpage = page_to_phys(page); | 801 | unsigned long physpage = page_to_phys(page); |
| @@ -784,63 +827,48 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) | |||
| 784 | return mk_pte_phys(physpage, pgprot); | 827 | return mk_pte_phys(physpage, pgprot); |
| 785 | } | 828 | } |
| 786 | 829 | ||
| 787 | static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) | 830 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) |
| 788 | { | 831 | #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) |
| 789 | unsigned long physpage = __pa((pfn) << PAGE_SHIFT); | 832 | #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) |
| 790 | 833 | #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1)) | |
| 791 | return mk_pte_phys(physpage, pgprot); | ||
| 792 | } | ||
| 793 | |||
| 794 | #ifdef __s390x__ | ||
| 795 | |||
| 796 | static inline pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot) | ||
| 797 | { | ||
| 798 | unsigned long physpage = __pa((pfn) << PAGE_SHIFT); | ||
| 799 | |||
| 800 | return __pmd(physpage + pgprot_val(pgprot)); | ||
| 801 | } | ||
| 802 | |||
| 803 | #endif /* __s390x__ */ | ||
| 804 | |||
| 805 | #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) | ||
| 806 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | ||
| 807 | 834 | ||
| 808 | #define pmd_page_vaddr(pmd) (pmd_val(pmd) & PAGE_MASK) | 835 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) |
| 836 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | ||
| 809 | 837 | ||
| 810 | #define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) | 838 | #ifndef __s390x__ |
| 811 | 839 | ||
| 812 | #define pgd_page_vaddr(pgd) (pgd_val(pgd) & PAGE_MASK) | 840 | #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) |
| 841 | #define pud_deref(pmd) ({ BUG(); 0UL; }) | ||
| 842 | #define pgd_deref(pmd) ({ BUG(); 0UL; }) | ||
| 813 | 843 | ||
| 814 | #define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT) | 844 | #define pud_offset(pgd, address) ((pud_t *) pgd) |
| 845 | #define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address)) | ||
| 815 | 846 | ||
| 816 | /* to find an entry in a page-table-directory */ | 847 | #else /* __s390x__ */ |
| 817 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) | ||
| 818 | #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) | ||
| 819 | 848 | ||
| 820 | /* to find an entry in a kernel page-table-directory */ | 849 | #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) |
| 821 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | 850 | #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN) |
| 851 | #define pgd_deref(pgd) ({ BUG(); 0UL; }) | ||
| 822 | 852 | ||
| 823 | #ifndef __s390x__ | 853 | #define pud_offset(pgd, address) ((pud_t *) pgd) |
| 824 | 854 | ||
| 825 | /* Find an entry in the second-level page table.. */ | 855 | static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) |
| 826 | static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address) | ||
| 827 | { | 856 | { |
| 828 | return (pmd_t *) dir; | 857 | pmd_t *pmd = (pmd_t *) pud_deref(*pud); |
| 858 | return pmd + pmd_index(address); | ||
| 829 | } | 859 | } |
| 830 | 860 | ||
| 831 | #else /* __s390x__ */ | 861 | #endif /* __s390x__ */ |
| 832 | 862 | ||
| 833 | /* Find an entry in the second-level page table.. */ | 863 | #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot)) |
| 834 | #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) | 864 | #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) |
| 835 | #define pmd_offset(dir,addr) \ | 865 | #define pte_page(x) pfn_to_page(pte_pfn(x)) |
| 836 | ((pmd_t *) pgd_page_vaddr(*(dir)) + pmd_index(addr)) | ||
| 837 | 866 | ||
| 838 | #endif /* __s390x__ */ | 867 | #define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) |
| 839 | 868 | ||
| 840 | /* Find an entry in the third-level page table.. */ | 869 | /* Find an entry in the lowest level page table.. */ |
| 841 | #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1)) | 870 | #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr)) |
| 842 | #define pte_offset_kernel(pmd, address) \ | 871 | #define pte_offset_kernel(pmd, address) pte_offset(pmd,address) |
| 843 | ((pte_t *) pmd_page_vaddr(*(pmd)) + pte_index(address)) | ||
| 844 | #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) | 872 | #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) |
| 845 | #define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address) | 873 | #define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address) |
| 846 | #define pte_unmap(pte) do { } while (0) | 874 | #define pte_unmap(pte) do { } while (0) |
| @@ -930,17 +958,6 @@ extern int remove_shared_memory(unsigned long start, unsigned long size); | |||
| 930 | #define __HAVE_ARCH_MEMMAP_INIT | 958 | #define __HAVE_ARCH_MEMMAP_INIT |
| 931 | extern void memmap_init(unsigned long, int, unsigned long, unsigned long); | 959 | extern void memmap_init(unsigned long, int, unsigned long, unsigned long); |
| 932 | 960 | ||
| 933 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | ||
| 934 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | ||
| 935 | #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH | ||
| 936 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | ||
| 937 | #define __HAVE_ARCH_PTEP_CLEAR_FLUSH | ||
| 938 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | ||
| 939 | #define __HAVE_ARCH_PTE_SAME | ||
| 940 | #define __HAVE_ARCH_PAGE_TEST_DIRTY | ||
| 941 | #define __HAVE_ARCH_PAGE_CLEAR_DIRTY | ||
| 942 | #define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG | ||
| 943 | #include <asm-generic/pgtable.h> | 961 | #include <asm-generic/pgtable.h> |
| 944 | 962 | ||
| 945 | #endif /* _S390_PAGE_H */ | 963 | #endif /* _S390_PAGE_H */ |
| 946 | |||
diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h index 3b972d4c6b29..21d40a19355e 100644 --- a/include/asm-s390/processor.h +++ b/include/asm-s390/processor.h | |||
| @@ -93,7 +93,6 @@ struct thread_struct { | |||
| 93 | s390_fp_regs fp_regs; | 93 | s390_fp_regs fp_regs; |
| 94 | unsigned int acrs[NUM_ACRS]; | 94 | unsigned int acrs[NUM_ACRS]; |
| 95 | unsigned long ksp; /* kernel stack pointer */ | 95 | unsigned long ksp; /* kernel stack pointer */ |
| 96 | unsigned long user_seg; /* HSTD */ | ||
| 97 | mm_segment_t mm_segment; | 96 | mm_segment_t mm_segment; |
| 98 | unsigned long prot_addr; /* address of protection-excep. */ | 97 | unsigned long prot_addr; /* address of protection-excep. */ |
| 99 | unsigned int error_code; /* error-code of last prog-excep. */ | 98 | unsigned int error_code; /* error-code of last prog-excep. */ |
| @@ -128,22 +127,9 @@ struct stack_frame { | |||
| 128 | 127 | ||
| 129 | #define ARCH_MIN_TASKALIGN 8 | 128 | #define ARCH_MIN_TASKALIGN 8 |
| 130 | 129 | ||
| 131 | #ifndef __s390x__ | 130 | #define INIT_THREAD { \ |
| 132 | # define __SWAPPER_PG_DIR __pa(&swapper_pg_dir[0]) + _SEGMENT_TABLE | 131 | .ksp = sizeof(init_stack) + (unsigned long) &init_stack, \ |
| 133 | #else /* __s390x__ */ | 132 | } |
| 134 | # define __SWAPPER_PG_DIR __pa(&swapper_pg_dir[0]) + _REGION_TABLE | ||
| 135 | #endif /* __s390x__ */ | ||
| 136 | |||
| 137 | #define INIT_THREAD {{0,{{0},{0},{0},{0},{0},{0},{0},{0},{0},{0}, \ | ||
| 138 | {0},{0},{0},{0},{0},{0}}}, \ | ||
| 139 | {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, \ | ||
| 140 | sizeof(init_stack) + (unsigned long) &init_stack, \ | ||
| 141 | __SWAPPER_PG_DIR, \ | ||
| 142 | {0}, \ | ||
| 143 | 0,0,0, \ | ||
| 144 | (per_struct) {{{{0,}}},0,0,0,0,{{0,}}}, \ | ||
| 145 | 0, 0 \ | ||
| 146 | } | ||
| 147 | 133 | ||
| 148 | /* | 134 | /* |
| 149 | * Do necessary setup to start up a new thread. | 135 | * Do necessary setup to start up a new thread. |
diff --git a/include/asm-s390/scatterlist.h b/include/asm-s390/scatterlist.h index a43b3afc5e2d..29ec8e28c8df 100644 --- a/include/asm-s390/scatterlist.h +++ b/include/asm-s390/scatterlist.h | |||
| @@ -2,7 +2,10 @@ | |||
| 2 | #define _ASMS390_SCATTERLIST_H | 2 | #define _ASMS390_SCATTERLIST_H |
| 3 | 3 | ||
| 4 | struct scatterlist { | 4 | struct scatterlist { |
| 5 | struct page *page; | 5 | #ifdef CONFIG_DEBUG_SG |
| 6 | unsigned long sg_magic; | ||
| 7 | #endif | ||
| 8 | unsigned long page_link; | ||
| 6 | unsigned int offset; | 9 | unsigned int offset; |
| 7 | unsigned int length; | 10 | unsigned int length; |
| 8 | }; | 11 | }; |
diff --git a/include/asm-s390/tlb.h b/include/asm-s390/tlb.h index 51bd957b85bd..618693cfc10f 100644 --- a/include/asm-s390/tlb.h +++ b/include/asm-s390/tlb.h | |||
| @@ -2,19 +2,130 @@ | |||
| 2 | #define _S390_TLB_H | 2 | #define _S390_TLB_H |
| 3 | 3 | ||
| 4 | /* | 4 | /* |
| 5 | * s390 doesn't need any special per-pte or | 5 | * TLB flushing on s390 is complicated. The following requirement |
| 6 | * per-vma handling.. | 6 | * from the principles of operation is the most arduous: |
| 7 | * | ||
| 8 | * "A valid table entry must not be changed while it is attached | ||
| 9 | * to any CPU and may be used for translation by that CPU except to | ||
| 10 | * (1) invalidate the entry by using INVALIDATE PAGE TABLE ENTRY, | ||
| 11 | * or INVALIDATE DAT TABLE ENTRY, (2) alter bits 56-63 of a page | ||
| 12 | * table entry, or (3) make a change by means of a COMPARE AND SWAP | ||
| 13 | * AND PURGE instruction that purges the TLB." | ||
| 14 | * | ||
| 15 | * The modification of a pte of an active mm struct therefore is | ||
| 16 | * a two step process: i) invalidate the pte, ii) store the new pte. | ||
| 17 | * This is true for the page protection bit as well. | ||
| 18 | * The only possible optimization is to flush at the beginning of | ||
| 19 | * a tlb_gather_mmu cycle if the mm_struct is currently not in use. | ||
| 20 | * | ||
| 21 | * Pages used for the page tables is a different story. FIXME: more | ||
| 7 | */ | 22 | */ |
| 8 | #define tlb_start_vma(tlb, vma) do { } while (0) | 23 | |
| 9 | #define tlb_end_vma(tlb, vma) do { } while (0) | 24 | #include <linux/mm.h> |
| 10 | #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) | 25 | #include <linux/swap.h> |
| 26 | #include <asm/processor.h> | ||
| 27 | #include <asm/pgalloc.h> | ||
| 28 | #include <asm/smp.h> | ||
| 29 | #include <asm/tlbflush.h> | ||
| 30 | |||
| 31 | #ifndef CONFIG_SMP | ||
| 32 | #define TLB_NR_PTRS 1 | ||
| 33 | #else | ||
| 34 | #define TLB_NR_PTRS 508 | ||
| 35 | #endif | ||
| 36 | |||
| 37 | struct mmu_gather { | ||
| 38 | struct mm_struct *mm; | ||
| 39 | unsigned int fullmm; | ||
| 40 | unsigned int nr_ptes; | ||
| 41 | unsigned int nr_pmds; | ||
| 42 | void *array[TLB_NR_PTRS]; | ||
| 43 | }; | ||
| 44 | |||
| 45 | DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); | ||
| 46 | |||
| 47 | static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm, | ||
| 48 | unsigned int full_mm_flush) | ||
| 49 | { | ||
| 50 | struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); | ||
| 51 | |||
| 52 | tlb->mm = mm; | ||
| 53 | tlb->fullmm = full_mm_flush || (num_online_cpus() == 1) || | ||
| 54 | (atomic_read(&mm->mm_users) <= 1 && mm == current->active_mm); | ||
| 55 | tlb->nr_ptes = 0; | ||
| 56 | tlb->nr_pmds = TLB_NR_PTRS; | ||
| 57 | if (tlb->fullmm) | ||
| 58 | __tlb_flush_mm(mm); | ||
| 59 | return tlb; | ||
| 60 | } | ||
| 61 | |||
| 62 | static inline void tlb_flush_mmu(struct mmu_gather *tlb, | ||
| 63 | unsigned long start, unsigned long end) | ||
| 64 | { | ||
| 65 | if (!tlb->fullmm && (tlb->nr_ptes > 0 || tlb->nr_pmds < TLB_NR_PTRS)) | ||
| 66 | __tlb_flush_mm(tlb->mm); | ||
| 67 | while (tlb->nr_ptes > 0) | ||
| 68 | pte_free(tlb->array[--tlb->nr_ptes]); | ||
| 69 | while (tlb->nr_pmds < TLB_NR_PTRS) | ||
| 70 | pmd_free((pmd_t *) tlb->array[tlb->nr_pmds++]); | ||
| 71 | } | ||
| 72 | |||
| 73 | static inline void tlb_finish_mmu(struct mmu_gather *tlb, | ||
| 74 | unsigned long start, unsigned long end) | ||
| 75 | { | ||
| 76 | tlb_flush_mmu(tlb, start, end); | ||
| 77 | |||
| 78 | /* keep the page table cache within bounds */ | ||
| 79 | check_pgt_cache(); | ||
| 80 | |||
| 81 | put_cpu_var(mmu_gathers); | ||
| 82 | } | ||
| 11 | 83 | ||
| 12 | /* | 84 | /* |
| 13 | * .. because we flush the whole mm when it | 85 | * Release the page cache reference for a pte removed by |
| 14 | * fills up. | 86 | * tlb_ptep_clear_flush. In both flush modes the tlb fo a page cache page |
| 87 | * has already been freed, so just do free_page_and_swap_cache. | ||
| 15 | */ | 88 | */ |
| 16 | #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) | 89 | static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) |
| 90 | { | ||
| 91 | free_page_and_swap_cache(page); | ||
| 92 | } | ||
| 17 | 93 | ||
| 18 | #include <asm-generic/tlb.h> | 94 | /* |
| 95 | * pte_free_tlb frees a pte table and clears the CRSTE for the | ||
| 96 | * page table from the tlb. | ||
| 97 | */ | ||
| 98 | static inline void pte_free_tlb(struct mmu_gather *tlb, struct page *page) | ||
| 99 | { | ||
| 100 | if (!tlb->fullmm) { | ||
| 101 | tlb->array[tlb->nr_ptes++] = page; | ||
| 102 | if (tlb->nr_ptes >= tlb->nr_pmds) | ||
| 103 | tlb_flush_mmu(tlb, 0, 0); | ||
| 104 | } else | ||
| 105 | pte_free(page); | ||
| 106 | } | ||
| 19 | 107 | ||
| 108 | /* | ||
| 109 | * pmd_free_tlb frees a pmd table and clears the CRSTE for the | ||
| 110 | * segment table entry from the tlb. | ||
| 111 | */ | ||
| 112 | static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) | ||
| 113 | { | ||
| 114 | #ifdef __s390x__ | ||
| 115 | if (!tlb->fullmm) { | ||
| 116 | tlb->array[--tlb->nr_pmds] = (struct page *) pmd; | ||
| 117 | if (tlb->nr_ptes >= tlb->nr_pmds) | ||
| 118 | tlb_flush_mmu(tlb, 0, 0); | ||
| 119 | } else | ||
| 120 | pmd_free(pmd); | ||
| 20 | #endif | 121 | #endif |
| 122 | } | ||
| 123 | |||
| 124 | #define pud_free_tlb(tlb, pud) do { } while (0) | ||
| 125 | |||
| 126 | #define tlb_start_vma(tlb, vma) do { } while (0) | ||
| 127 | #define tlb_end_vma(tlb, vma) do { } while (0) | ||
| 128 | #define tlb_remove_tlb_entry(tlb, ptep, addr) do { } while (0) | ||
| 129 | #define tlb_migrate_finish(mm) do { } while (0) | ||
| 130 | |||
| 131 | #endif /* _S390_TLB_H */ | ||
diff --git a/include/asm-s390/tlbflush.h b/include/asm-s390/tlbflush.h index 6de2632a3e4f..a69bd2490d52 100644 --- a/include/asm-s390/tlbflush.h +++ b/include/asm-s390/tlbflush.h | |||
| @@ -6,68 +6,19 @@ | |||
| 6 | #include <asm/pgalloc.h> | 6 | #include <asm/pgalloc.h> |
| 7 | 7 | ||
| 8 | /* | 8 | /* |
| 9 | * TLB flushing: | 9 | * Flush all tlb entries on the local cpu. |
| 10 | * | ||
| 11 | * - flush_tlb() flushes the current mm struct TLBs | ||
| 12 | * - flush_tlb_all() flushes all processes TLBs | ||
| 13 | * - flush_tlb_mm(mm) flushes the specified mm context TLB's | ||
| 14 | * - flush_tlb_page(vma, vmaddr) flushes one page | ||
| 15 | * - flush_tlb_range(vma, start, end) flushes a range of pages | ||
| 16 | * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages | ||
| 17 | */ | ||
| 18 | |||
| 19 | /* | ||
| 20 | * S/390 has three ways of flushing TLBs | ||
| 21 | * 'ptlb' does a flush of the local processor | ||
| 22 | * 'csp' flushes the TLBs on all PUs of a SMP | ||
| 23 | * 'ipte' invalidates a pte in a page table and flushes that out of | ||
| 24 | * the TLBs of all PUs of a SMP | ||
| 25 | */ | ||
| 26 | |||
| 27 | #define local_flush_tlb() \ | ||
| 28 | do { asm volatile("ptlb": : :"memory"); } while (0) | ||
| 29 | |||
| 30 | #ifndef CONFIG_SMP | ||
| 31 | |||
| 32 | /* | ||
| 33 | * We always need to flush, since s390 does not flush tlb | ||
| 34 | * on each context switch | ||
| 35 | */ | 10 | */ |
| 36 | 11 | static inline void __tlb_flush_local(void) | |
| 37 | static inline void flush_tlb(void) | ||
| 38 | { | 12 | { |
| 39 | local_flush_tlb(); | 13 | asm volatile("ptlb" : : : "memory"); |
| 40 | } | 14 | } |
| 41 | static inline void flush_tlb_all(void) | ||
| 42 | { | ||
| 43 | local_flush_tlb(); | ||
| 44 | } | ||
| 45 | static inline void flush_tlb_mm(struct mm_struct *mm) | ||
| 46 | { | ||
| 47 | local_flush_tlb(); | ||
| 48 | } | ||
| 49 | static inline void flush_tlb_page(struct vm_area_struct *vma, | ||
| 50 | unsigned long addr) | ||
| 51 | { | ||
| 52 | local_flush_tlb(); | ||
| 53 | } | ||
| 54 | static inline void flush_tlb_range(struct vm_area_struct *vma, | ||
| 55 | unsigned long start, unsigned long end) | ||
| 56 | { | ||
| 57 | local_flush_tlb(); | ||
| 58 | } | ||
| 59 | |||
| 60 | #define flush_tlb_kernel_range(start, end) \ | ||
| 61 | local_flush_tlb(); | ||
| 62 | |||
| 63 | #else | ||
| 64 | 15 | ||
| 65 | #include <asm/smp.h> | 16 | /* |
| 66 | 17 | * Flush all tlb entries on all cpus. | |
| 67 | extern void smp_ptlb_all(void); | 18 | */ |
| 68 | 19 | static inline void __tlb_flush_global(void) | |
| 69 | static inline void global_flush_tlb(void) | ||
| 70 | { | 20 | { |
| 21 | extern void smp_ptlb_all(void); | ||
| 71 | register unsigned long reg2 asm("2"); | 22 | register unsigned long reg2 asm("2"); |
| 72 | register unsigned long reg3 asm("3"); | 23 | register unsigned long reg3 asm("3"); |
| 73 | register unsigned long reg4 asm("4"); | 24 | register unsigned long reg4 asm("4"); |
| @@ -89,66 +40,75 @@ static inline void global_flush_tlb(void) | |||
| 89 | } | 40 | } |
| 90 | 41 | ||
| 91 | /* | 42 | /* |
| 92 | * We only have to do global flush of tlb if process run since last | 43 | * Flush all tlb entries of a page table on all cpus. |
| 93 | * flush on any other pu than current. | ||
| 94 | * If we have threads (mm->count > 1) we always do a global flush, | ||
| 95 | * since the process runs on more than one processor at the same time. | ||
| 96 | */ | 44 | */ |
| 45 | static inline void __tlb_flush_idte(pgd_t *pgd) | ||
| 46 | { | ||
| 47 | asm volatile( | ||
| 48 | " .insn rrf,0xb98e0000,0,%0,%1,0" | ||
| 49 | : : "a" (2048), "a" (__pa(pgd) & PAGE_MASK) : "cc" ); | ||
| 50 | } | ||
| 97 | 51 | ||
| 98 | static inline void __flush_tlb_mm(struct mm_struct * mm) | 52 | static inline void __tlb_flush_mm(struct mm_struct * mm) |
| 99 | { | 53 | { |
| 100 | cpumask_t local_cpumask; | 54 | cpumask_t local_cpumask; |
| 101 | 55 | ||
| 102 | if (unlikely(cpus_empty(mm->cpu_vm_mask))) | 56 | if (unlikely(cpus_empty(mm->cpu_vm_mask))) |
| 103 | return; | 57 | return; |
| 58 | /* | ||
| 59 | * If the machine has IDTE we prefer to do a per mm flush | ||
| 60 | * on all cpus instead of doing a local flush if the mm | ||
| 61 | * only ran on the local cpu. | ||
| 62 | */ | ||
| 104 | if (MACHINE_HAS_IDTE) { | 63 | if (MACHINE_HAS_IDTE) { |
| 105 | pgd_t *shadow_pgd = get_shadow_pgd(mm->pgd); | 64 | pgd_t *shadow_pgd = get_shadow_table(mm->pgd); |
| 106 | 65 | ||
| 107 | if (shadow_pgd) { | 66 | if (shadow_pgd) |
| 108 | asm volatile( | 67 | __tlb_flush_idte(shadow_pgd); |
| 109 | " .insn rrf,0xb98e0000,0,%0,%1,0" | 68 | __tlb_flush_idte(mm->pgd); |
| 110 | : : "a" (2048), | ||
| 111 | "a" (__pa(shadow_pgd) & PAGE_MASK) : "cc" ); | ||
| 112 | } | ||
| 113 | asm volatile( | ||
| 114 | " .insn rrf,0xb98e0000,0,%0,%1,0" | ||
| 115 | : : "a" (2048), "a" (__pa(mm->pgd)&PAGE_MASK) : "cc"); | ||
| 116 | return; | 69 | return; |
| 117 | } | 70 | } |
| 118 | preempt_disable(); | 71 | preempt_disable(); |
| 72 | /* | ||
| 73 | * If the process only ran on the local cpu, do a local flush. | ||
| 74 | */ | ||
| 119 | local_cpumask = cpumask_of_cpu(smp_processor_id()); | 75 | local_cpumask = cpumask_of_cpu(smp_processor_id()); |
| 120 | if (cpus_equal(mm->cpu_vm_mask, local_cpumask)) | 76 | if (cpus_equal(mm->cpu_vm_mask, local_cpumask)) |
| 121 | local_flush_tlb(); | 77 | __tlb_flush_local(); |
| 122 | else | 78 | else |
| 123 | global_flush_tlb(); | 79 | __tlb_flush_global(); |
| 124 | preempt_enable(); | 80 | preempt_enable(); |
| 125 | } | 81 | } |
| 126 | 82 | ||
| 127 | static inline void flush_tlb(void) | 83 | static inline void __tlb_flush_mm_cond(struct mm_struct * mm) |
| 128 | { | ||
| 129 | __flush_tlb_mm(current->mm); | ||
| 130 | } | ||
| 131 | static inline void flush_tlb_all(void) | ||
| 132 | { | ||
| 133 | global_flush_tlb(); | ||
| 134 | } | ||
| 135 | static inline void flush_tlb_mm(struct mm_struct *mm) | ||
| 136 | { | ||
| 137 | __flush_tlb_mm(mm); | ||
| 138 | } | ||
| 139 | static inline void flush_tlb_page(struct vm_area_struct *vma, | ||
| 140 | unsigned long addr) | ||
| 141 | { | ||
| 142 | __flush_tlb_mm(vma->vm_mm); | ||
| 143 | } | ||
| 144 | static inline void flush_tlb_range(struct vm_area_struct *vma, | ||
| 145 | unsigned long start, unsigned long end) | ||
| 146 | { | 84 | { |
| 147 | __flush_tlb_mm(vma->vm_mm); | 85 | if (atomic_read(&mm->mm_users) <= 1 && mm == current->active_mm) |
| 86 | __tlb_flush_mm(mm); | ||
| 148 | } | 87 | } |
| 149 | 88 | ||
| 150 | #define flush_tlb_kernel_range(start, end) global_flush_tlb() | 89 | /* |
| 90 | * TLB flushing: | ||
| 91 | * flush_tlb() - flushes the current mm struct TLBs | ||
| 92 | * flush_tlb_all() - flushes all processes TLBs | ||
| 93 | * flush_tlb_mm(mm) - flushes the specified mm context TLB's | ||
| 94 | * flush_tlb_page(vma, vmaddr) - flushes one page | ||
| 95 | * flush_tlb_range(vma, start, end) - flushes a range of pages | ||
| 96 | * flush_tlb_kernel_range(start, end) - flushes a range of kernel pages | ||
| 97 | */ | ||
| 151 | 98 | ||
| 152 | #endif | 99 | /* |
| 100 | * flush_tlb_mm goes together with ptep_set_wrprotect for the | ||
| 101 | * copy_page_range operation and flush_tlb_range is related to | ||
| 102 | * ptep_get_and_clear for change_protection. ptep_set_wrprotect and | ||
| 103 | * ptep_get_and_clear do not flush the TLBs directly if the mm has | ||
| 104 | * only one user. At the end of the update the flush_tlb_mm and | ||
| 105 | * flush_tlb_range functions need to do the flush. | ||
| 106 | */ | ||
| 107 | #define flush_tlb() do { } while (0) | ||
| 108 | #define flush_tlb_all() do { } while (0) | ||
| 109 | #define flush_tlb_mm(mm) __tlb_flush_mm_cond(mm) | ||
| 110 | #define flush_tlb_page(vma, addr) do { } while (0) | ||
| 111 | #define flush_tlb_range(vma, start, end) __tlb_flush_mm_cond(mm) | ||
| 112 | #define flush_tlb_kernel_range(start, end) __tlb_flush_mm(&init_mm) | ||
| 153 | 113 | ||
| 154 | #endif /* _S390_TLBFLUSH_H */ | 114 | #endif /* _S390_TLBFLUSH_H */ |
