aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/copypage-v4mc.c2
-rw-r--r--arch/arm/mm/fault-armv.c5
-rw-r--r--arch/arm/mm/ioremap.c11
3 files changed, 7 insertions, 11 deletions
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c
index bdb5fd983b15..1601698b9800 100644
--- a/arch/arm/mm/copypage-v4mc.c
+++ b/arch/arm/mm/copypage-v4mc.c
@@ -68,7 +68,7 @@ mc_copy_user_page(void *from, void *to)
68 : "r" (from), "r" (to), "I" (PAGE_SIZE / 64)); 68 : "r" (from), "r" (to), "I" (PAGE_SIZE / 64));
69} 69}
70 70
71void v4_mc_copy_user_highpage(struct page *from, struct page *to, 71void v4_mc_copy_user_highpage(struct page *to, struct page *from,
72 unsigned long vaddr) 72 unsigned long vaddr)
73{ 73{
74 void *kto = kmap_atomic(to, KM_USER1); 74 void *kto = kmap_atomic(to, KM_USER1);
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index 81d0b8772de3..bc0099d5ae85 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -66,7 +66,10 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address)
66 * fault (ie, is old), we can safely ignore any issues. 66 * fault (ie, is old), we can safely ignore any issues.
67 */ 67 */
68 if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) { 68 if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) {
69 flush_cache_page(vma, address, pte_pfn(entry)); 69 unsigned long pfn = pte_pfn(entry);
70 flush_cache_page(vma, address, pfn);
71 outer_flush_range((pfn << PAGE_SHIFT),
72 (pfn << PAGE_SHIFT) + PAGE_SIZE);
70 pte_val(entry) &= ~L_PTE_MT_MASK; 73 pte_val(entry) &= ~L_PTE_MT_MASK;
71 pte_val(entry) |= shared_pte_mask; 74 pte_val(entry) |= shared_pte_mask;
72 set_pte_at(vma->vm_mm, address, pte, entry); 75 set_pte_at(vma->vm_mm, address, pte, entry);
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 18373f73f2fc..9f88dd3be601 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -138,7 +138,7 @@ void __check_kvm_seq(struct mm_struct *mm)
138 */ 138 */
139static void unmap_area_sections(unsigned long virt, unsigned long size) 139static void unmap_area_sections(unsigned long virt, unsigned long size)
140{ 140{
141 unsigned long addr = virt, end = virt + (size & ~SZ_1M); 141 unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
142 pgd_t *pgd; 142 pgd_t *pgd;
143 143
144 flush_cache_vunmap(addr, end); 144 flush_cache_vunmap(addr, end);
@@ -337,10 +337,7 @@ void __iounmap(volatile void __iomem *io_addr)
337 void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); 337 void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
338#ifndef CONFIG_SMP 338#ifndef CONFIG_SMP
339 struct vm_struct **p, *tmp; 339 struct vm_struct **p, *tmp;
340#endif
341 unsigned int section_mapping = 0;
342 340
343#ifndef CONFIG_SMP
344 /* 341 /*
345 * If this is a section based mapping we need to handle it 342 * If this is a section based mapping we need to handle it
346 * specially as the VM subsystem does not know how to handle 343 * specially as the VM subsystem does not know how to handle
@@ -352,11 +349,8 @@ void __iounmap(volatile void __iomem *io_addr)
352 for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) { 349 for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
353 if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) { 350 if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
354 if (tmp->flags & VM_ARM_SECTION_MAPPING) { 351 if (tmp->flags & VM_ARM_SECTION_MAPPING) {
355 *p = tmp->next;
356 unmap_area_sections((unsigned long)tmp->addr, 352 unmap_area_sections((unsigned long)tmp->addr,
357 tmp->size); 353 tmp->size);
358 kfree(tmp);
359 section_mapping = 1;
360 } 354 }
361 break; 355 break;
362 } 356 }
@@ -364,7 +358,6 @@ void __iounmap(volatile void __iomem *io_addr)
364 write_unlock(&vmlist_lock); 358 write_unlock(&vmlist_lock);
365#endif 359#endif
366 360
367 if (!section_mapping) 361 vunmap(addr);
368 vunmap(addr);
369} 362}
370EXPORT_SYMBOL(__iounmap); 363EXPORT_SYMBOL(__iounmap);