aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorNicolas Pitre <nicolas.pitre@linaro.org>2012-11-24 21:24:32 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2012-11-26 07:23:53 -0500
commit3e99675af1b25a191c467700499b1cbe5585a778 (patch)
tree516c40a67498aaee1246eeb9e7d2efec335176aa /arch/arm/mm
parentce7b175656a1903605f0184bf33acebff70bfe7f (diff)
ARM: 7582/2: rename kvm_seq to vmalloc_seq so to avoid confusion with KVM
The kvm_seq value has nothing to do what so ever with this other KVM. Given that KVM support on ARM is imminent, it's best to rename kvm_seq into something else to clearly identify what it is about i.e. a sequence number for vmalloc section mappings. Signed-off-by: Nicolas Pitre <nico@linaro.org> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/context.c4
-rw-r--r--arch/arm/mm/ioremap.c16
2 files changed, 10 insertions, 10 deletions
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 7a27d7363be2..bc4a5e9ebb78 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -186,8 +186,8 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
186 unsigned long flags; 186 unsigned long flags;
187 unsigned int cpu = smp_processor_id(); 187 unsigned int cpu = smp_processor_id();
188 188
189 if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) 189 if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
190 __check_kvm_seq(mm); 190 __check_vmalloc_seq(mm);
191 191
192 /* 192 /*
193 * Required during context switch to avoid speculative page table 193 * Required during context switch to avoid speculative page table
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 5dcc2fd46c46..88fd86cf3d9a 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -47,18 +47,18 @@ int ioremap_page(unsigned long virt, unsigned long phys,
47} 47}
48EXPORT_SYMBOL(ioremap_page); 48EXPORT_SYMBOL(ioremap_page);
49 49
50void __check_kvm_seq(struct mm_struct *mm) 50void __check_vmalloc_seq(struct mm_struct *mm)
51{ 51{
52 unsigned int seq; 52 unsigned int seq;
53 53
54 do { 54 do {
55 seq = init_mm.context.kvm_seq; 55 seq = init_mm.context.vmalloc_seq;
56 memcpy(pgd_offset(mm, VMALLOC_START), 56 memcpy(pgd_offset(mm, VMALLOC_START),
57 pgd_offset_k(VMALLOC_START), 57 pgd_offset_k(VMALLOC_START),
58 sizeof(pgd_t) * (pgd_index(VMALLOC_END) - 58 sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
59 pgd_index(VMALLOC_START))); 59 pgd_index(VMALLOC_START)));
60 mm->context.kvm_seq = seq; 60 mm->context.vmalloc_seq = seq;
61 } while (seq != init_mm.context.kvm_seq); 61 } while (seq != init_mm.context.vmalloc_seq);
62} 62}
63 63
64#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) 64#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
@@ -89,13 +89,13 @@ static void unmap_area_sections(unsigned long virt, unsigned long size)
89 if (!pmd_none(pmd)) { 89 if (!pmd_none(pmd)) {
90 /* 90 /*
91 * Clear the PMD from the page table, and 91 * Clear the PMD from the page table, and
92 * increment the kvm sequence so others 92 * increment the vmalloc sequence so others
93 * notice this change. 93 * notice this change.
94 * 94 *
95 * Note: this is still racy on SMP machines. 95 * Note: this is still racy on SMP machines.
96 */ 96 */
97 pmd_clear(pmdp); 97 pmd_clear(pmdp);
98 init_mm.context.kvm_seq++; 98 init_mm.context.vmalloc_seq++;
99 99
100 /* 100 /*
101 * Free the page table, if there was one. 101 * Free the page table, if there was one.
@@ -112,8 +112,8 @@ static void unmap_area_sections(unsigned long virt, unsigned long size)
112 * Ensure that the active_mm is up to date - we want to 112 * Ensure that the active_mm is up to date - we want to
113 * catch any use-after-iounmap cases. 113 * catch any use-after-iounmap cases.
114 */ 114 */
115 if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq) 115 if (current->active_mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)
116 __check_kvm_seq(current->active_mm); 116 __check_vmalloc_seq(current->active_mm);
117 117
118 flush_tlb_kernel_range(virt, end); 118 flush_tlb_kernel_range(virt, end);
119} 119}