diff options
-rw-r--r-- | arch/arm/include/asm/mmu.h | 2 | ||||
-rw-r--r-- | arch/arm/include/asm/mmu_context.h | 6 | ||||
-rw-r--r-- | arch/arm/mm/context.c | 4 | ||||
-rw-r--r-- | arch/arm/mm/ioremap.c | 16 |
4 files changed, 14 insertions, 14 deletions
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h index 5b53b53ab5cf..9f77e7804f3b 100644 --- a/arch/arm/include/asm/mmu.h +++ b/arch/arm/include/asm/mmu.h | |||
@@ -7,7 +7,7 @@ typedef struct { | |||
7 | #ifdef CONFIG_CPU_HAS_ASID | 7 | #ifdef CONFIG_CPU_HAS_ASID |
8 | u64 id; | 8 | u64 id; |
9 | #endif | 9 | #endif |
10 | unsigned int kvm_seq; | 10 | unsigned int vmalloc_seq; |
11 | } mm_context_t; | 11 | } mm_context_t; |
12 | 12 | ||
13 | #ifdef CONFIG_CPU_HAS_ASID | 13 | #ifdef CONFIG_CPU_HAS_ASID |
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index a64f61cb23d1..e1f644bc7cc5 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h | |||
@@ -20,7 +20,7 @@ | |||
20 | #include <asm/proc-fns.h> | 20 | #include <asm/proc-fns.h> |
21 | #include <asm-generic/mm_hooks.h> | 21 | #include <asm-generic/mm_hooks.h> |
22 | 22 | ||
23 | void __check_kvm_seq(struct mm_struct *mm); | 23 | void __check_vmalloc_seq(struct mm_struct *mm); |
24 | 24 | ||
25 | #ifdef CONFIG_CPU_HAS_ASID | 25 | #ifdef CONFIG_CPU_HAS_ASID |
26 | 26 | ||
@@ -34,8 +34,8 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); | |||
34 | static inline void check_and_switch_context(struct mm_struct *mm, | 34 | static inline void check_and_switch_context(struct mm_struct *mm, |
35 | struct task_struct *tsk) | 35 | struct task_struct *tsk) |
36 | { | 36 | { |
37 | if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) | 37 | if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) |
38 | __check_kvm_seq(mm); | 38 | __check_vmalloc_seq(mm); |
39 | 39 | ||
40 | if (irqs_disabled()) | 40 | if (irqs_disabled()) |
41 | /* | 41 | /* |
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index 7a27d7363be2..bc4a5e9ebb78 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c | |||
@@ -186,8 +186,8 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) | |||
186 | unsigned long flags; | 186 | unsigned long flags; |
187 | unsigned int cpu = smp_processor_id(); | 187 | unsigned int cpu = smp_processor_id(); |
188 | 188 | ||
189 | if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) | 189 | if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) |
190 | __check_kvm_seq(mm); | 190 | __check_vmalloc_seq(mm); |
191 | 191 | ||
192 | /* | 192 | /* |
193 | * Required during context switch to avoid speculative page table | 193 | * Required during context switch to avoid speculative page table |
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 5dcc2fd46c46..88fd86cf3d9a 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c | |||
@@ -47,18 +47,18 @@ int ioremap_page(unsigned long virt, unsigned long phys, | |||
47 | } | 47 | } |
48 | EXPORT_SYMBOL(ioremap_page); | 48 | EXPORT_SYMBOL(ioremap_page); |
49 | 49 | ||
50 | void __check_kvm_seq(struct mm_struct *mm) | 50 | void __check_vmalloc_seq(struct mm_struct *mm) |
51 | { | 51 | { |
52 | unsigned int seq; | 52 | unsigned int seq; |
53 | 53 | ||
54 | do { | 54 | do { |
55 | seq = init_mm.context.kvm_seq; | 55 | seq = init_mm.context.vmalloc_seq; |
56 | memcpy(pgd_offset(mm, VMALLOC_START), | 56 | memcpy(pgd_offset(mm, VMALLOC_START), |
57 | pgd_offset_k(VMALLOC_START), | 57 | pgd_offset_k(VMALLOC_START), |
58 | sizeof(pgd_t) * (pgd_index(VMALLOC_END) - | 58 | sizeof(pgd_t) * (pgd_index(VMALLOC_END) - |
59 | pgd_index(VMALLOC_START))); | 59 | pgd_index(VMALLOC_START))); |
60 | mm->context.kvm_seq = seq; | 60 | mm->context.vmalloc_seq = seq; |
61 | } while (seq != init_mm.context.kvm_seq); | 61 | } while (seq != init_mm.context.vmalloc_seq); |
62 | } | 62 | } |
63 | 63 | ||
64 | #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) | 64 | #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) |
@@ -89,13 +89,13 @@ static void unmap_area_sections(unsigned long virt, unsigned long size) | |||
89 | if (!pmd_none(pmd)) { | 89 | if (!pmd_none(pmd)) { |
90 | /* | 90 | /* |
91 | * Clear the PMD from the page table, and | 91 | * Clear the PMD from the page table, and |
92 | * increment the kvm sequence so others | 92 | * increment the vmalloc sequence so others |
93 | * notice this change. | 93 | * notice this change. |
94 | * | 94 | * |
95 | * Note: this is still racy on SMP machines. | 95 | * Note: this is still racy on SMP machines. |
96 | */ | 96 | */ |
97 | pmd_clear(pmdp); | 97 | pmd_clear(pmdp); |
98 | init_mm.context.kvm_seq++; | 98 | init_mm.context.vmalloc_seq++; |
99 | 99 | ||
100 | /* | 100 | /* |
101 | * Free the page table, if there was one. | 101 | * Free the page table, if there was one. |
@@ -112,8 +112,8 @@ static void unmap_area_sections(unsigned long virt, unsigned long size) | |||
112 | * Ensure that the active_mm is up to date - we want to | 112 | * Ensure that the active_mm is up to date - we want to |
113 | * catch any use-after-iounmap cases. | 113 | * catch any use-after-iounmap cases. |
114 | */ | 114 | */ |
115 | if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq) | 115 | if (current->active_mm->context.vmalloc_seq != init_mm.context.vmalloc_seq) |
116 | __check_kvm_seq(current->active_mm); | 116 | __check_vmalloc_seq(current->active_mm); |
117 | 117 | ||
118 | flush_tlb_kernel_range(virt, end); | 118 | flush_tlb_kernel_range(virt, end); |
119 | } | 119 | } |