diff options
-rw-r--r-- | arch/arm/mm/ioremap.c | 71 | ||||
-rw-r--r-- | arch/arm/mm/mmu.c | 34 |
2 files changed, 46 insertions, 59 deletions
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 904c15e86063..04d9006eab1f 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c | |||
@@ -261,13 +261,14 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, | |||
261 | const struct mem_type *type; | 261 | const struct mem_type *type; |
262 | int err; | 262 | int err; |
263 | unsigned long addr; | 263 | unsigned long addr; |
264 | struct vm_struct * area; | 264 | struct vm_struct *area; |
265 | phys_addr_t paddr = __pfn_to_phys(pfn); | ||
265 | 266 | ||
266 | #ifndef CONFIG_ARM_LPAE | 267 | #ifndef CONFIG_ARM_LPAE |
267 | /* | 268 | /* |
268 | * High mappings must be supersection aligned | 269 | * High mappings must be supersection aligned |
269 | */ | 270 | */ |
270 | if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) | 271 | if (pfn >= 0x100000 && (paddr & ~SUPERSECTION_MASK)) |
271 | return NULL; | 272 | return NULL; |
272 | #endif | 273 | #endif |
273 | 274 | ||
@@ -283,24 +284,16 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, | |||
283 | /* | 284 | /* |
284 | * Try to reuse one of the static mapping whenever possible. | 285 | * Try to reuse one of the static mapping whenever possible. |
285 | */ | 286 | */ |
286 | read_lock(&vmlist_lock); | 287 | if (size && !(sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) { |
287 | for (area = vmlist; area; area = area->next) { | 288 | struct static_vm *svm; |
288 | if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) | 289 | |
289 | break; | 290 | svm = find_static_vm_paddr(paddr, size, mtype); |
290 | if (!(area->flags & VM_ARM_STATIC_MAPPING)) | 291 | if (svm) { |
291 | continue; | 292 | addr = (unsigned long)svm->vm.addr; |
292 | if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype)) | 293 | addr += paddr - svm->vm.phys_addr; |
293 | continue; | 294 | return (void __iomem *) (offset + addr); |
294 | if (__phys_to_pfn(area->phys_addr) > pfn || | 295 | } |
295 | __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1) | ||
296 | continue; | ||
297 | /* we can drop the lock here as we know *area is static */ | ||
298 | read_unlock(&vmlist_lock); | ||
299 | addr = (unsigned long)area->addr; | ||
300 | addr += __pfn_to_phys(pfn) - area->phys_addr; | ||
301 | return (void __iomem *) (offset + addr); | ||
302 | } | 296 | } |
303 | read_unlock(&vmlist_lock); | ||
304 | 297 | ||
305 | /* | 298 | /* |
306 | * Don't allow RAM to be mapped - this causes problems with ARMv6+ | 299 | * Don't allow RAM to be mapped - this causes problems with ARMv6+ |
@@ -312,21 +305,21 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, | |||
312 | if (!area) | 305 | if (!area) |
313 | return NULL; | 306 | return NULL; |
314 | addr = (unsigned long)area->addr; | 307 | addr = (unsigned long)area->addr; |
315 | area->phys_addr = __pfn_to_phys(pfn); | 308 | area->phys_addr = paddr; |
316 | 309 | ||
317 | #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) | 310 | #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) |
318 | if (DOMAIN_IO == 0 && | 311 | if (DOMAIN_IO == 0 && |
319 | (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) || | 312 | (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) || |
320 | cpu_is_xsc3()) && pfn >= 0x100000 && | 313 | cpu_is_xsc3()) && pfn >= 0x100000 && |
321 | !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) { | 314 | !((paddr | size | addr) & ~SUPERSECTION_MASK)) { |
322 | area->flags |= VM_ARM_SECTION_MAPPING; | 315 | area->flags |= VM_ARM_SECTION_MAPPING; |
323 | err = remap_area_supersections(addr, pfn, size, type); | 316 | err = remap_area_supersections(addr, pfn, size, type); |
324 | } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) { | 317 | } else if (!((paddr | size | addr) & ~PMD_MASK)) { |
325 | area->flags |= VM_ARM_SECTION_MAPPING; | 318 | area->flags |= VM_ARM_SECTION_MAPPING; |
326 | err = remap_area_sections(addr, pfn, size, type); | 319 | err = remap_area_sections(addr, pfn, size, type); |
327 | } else | 320 | } else |
328 | #endif | 321 | #endif |
329 | err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn), | 322 | err = ioremap_page_range(addr, addr + size, paddr, |
330 | __pgprot(type->prot_pte)); | 323 | __pgprot(type->prot_pte)); |
331 | 324 | ||
332 | if (err) { | 325 | if (err) { |
@@ -410,34 +403,28 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached) | |||
410 | void __iounmap(volatile void __iomem *io_addr) | 403 | void __iounmap(volatile void __iomem *io_addr) |
411 | { | 404 | { |
412 | void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); | 405 | void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); |
413 | struct vm_struct *vm; | 406 | struct static_vm *svm; |
407 | |||
408 | /* If this is a static mapping, we must leave it alone */ | ||
409 | svm = find_static_vm_vaddr(addr); | ||
410 | if (svm) | ||
411 | return; | ||
414 | 412 | ||
415 | read_lock(&vmlist_lock); | ||
416 | for (vm = vmlist; vm; vm = vm->next) { | ||
417 | if (vm->addr > addr) | ||
418 | break; | ||
419 | if (!(vm->flags & VM_IOREMAP)) | ||
420 | continue; | ||
421 | /* If this is a static mapping we must leave it alone */ | ||
422 | if ((vm->flags & VM_ARM_STATIC_MAPPING) && | ||
423 | (vm->addr <= addr) && (vm->addr + vm->size > addr)) { | ||
424 | read_unlock(&vmlist_lock); | ||
425 | return; | ||
426 | } | ||
427 | #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) | 413 | #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) |
414 | { | ||
415 | struct vm_struct *vm; | ||
416 | |||
417 | vm = find_vm_area(addr); | ||
418 | |||
428 | /* | 419 | /* |
429 | * If this is a section based mapping we need to handle it | 420 | * If this is a section based mapping we need to handle it |
430 | * specially as the VM subsystem does not know how to handle | 421 | * specially as the VM subsystem does not know how to handle |
431 | * such a beast. | 422 | * such a beast. |
432 | */ | 423 | */ |
433 | if ((vm->addr == addr) && | 424 | if (vm && (vm->flags & VM_ARM_SECTION_MAPPING)) |
434 | (vm->flags & VM_ARM_SECTION_MAPPING)) { | ||
435 | unmap_area_sections((unsigned long)vm->addr, vm->size); | 425 | unmap_area_sections((unsigned long)vm->addr, vm->size); |
436 | break; | ||
437 | } | ||
438 | #endif | ||
439 | } | 426 | } |
440 | read_unlock(&vmlist_lock); | 427 | #endif |
441 | 428 | ||
442 | vunmap(addr); | 429 | vunmap(addr); |
443 | } | 430 | } |
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 9f0610243bd6..a35b314d270d 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -757,21 +757,24 @@ void __init iotable_init(struct map_desc *io_desc, int nr) | |||
757 | { | 757 | { |
758 | struct map_desc *md; | 758 | struct map_desc *md; |
759 | struct vm_struct *vm; | 759 | struct vm_struct *vm; |
760 | struct static_vm *svm; | ||
760 | 761 | ||
761 | if (!nr) | 762 | if (!nr) |
762 | return; | 763 | return; |
763 | 764 | ||
764 | vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm)); | 765 | svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm)); |
765 | 766 | ||
766 | for (md = io_desc; nr; md++, nr--) { | 767 | for (md = io_desc; nr; md++, nr--) { |
767 | create_mapping(md); | 768 | create_mapping(md); |
769 | |||
770 | vm = &svm->vm; | ||
768 | vm->addr = (void *)(md->virtual & PAGE_MASK); | 771 | vm->addr = (void *)(md->virtual & PAGE_MASK); |
769 | vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); | 772 | vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); |
770 | vm->phys_addr = __pfn_to_phys(md->pfn); | 773 | vm->phys_addr = __pfn_to_phys(md->pfn); |
771 | vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; | 774 | vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; |
772 | vm->flags |= VM_ARM_MTYPE(md->type); | 775 | vm->flags |= VM_ARM_MTYPE(md->type); |
773 | vm->caller = iotable_init; | 776 | vm->caller = iotable_init; |
774 | vm_area_add_early(vm++); | 777 | add_static_vm_early(svm++); |
775 | } | 778 | } |
776 | } | 779 | } |
777 | 780 | ||
@@ -779,13 +782,16 @@ void __init vm_reserve_area_early(unsigned long addr, unsigned long size, | |||
779 | void *caller) | 782 | void *caller) |
780 | { | 783 | { |
781 | struct vm_struct *vm; | 784 | struct vm_struct *vm; |
785 | struct static_vm *svm; | ||
786 | |||
787 | svm = early_alloc_aligned(sizeof(*svm), __alignof__(*svm)); | ||
782 | 788 | ||
783 | vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm)); | 789 | vm = &svm->vm; |
784 | vm->addr = (void *)addr; | 790 | vm->addr = (void *)addr; |
785 | vm->size = size; | 791 | vm->size = size; |
786 | vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING; | 792 | vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING; |
787 | vm->caller = caller; | 793 | vm->caller = caller; |
788 | vm_area_add_early(vm); | 794 | add_static_vm_early(svm); |
789 | } | 795 | } |
790 | 796 | ||
791 | #ifndef CONFIG_ARM_LPAE | 797 | #ifndef CONFIG_ARM_LPAE |
@@ -810,14 +816,13 @@ static void __init pmd_empty_section_gap(unsigned long addr) | |||
810 | 816 | ||
811 | static void __init fill_pmd_gaps(void) | 817 | static void __init fill_pmd_gaps(void) |
812 | { | 818 | { |
819 | struct static_vm *svm; | ||
813 | struct vm_struct *vm; | 820 | struct vm_struct *vm; |
814 | unsigned long addr, next = 0; | 821 | unsigned long addr, next = 0; |
815 | pmd_t *pmd; | 822 | pmd_t *pmd; |
816 | 823 | ||
817 | /* we're still single threaded hence no lock needed here */ | 824 | list_for_each_entry(svm, &static_vmlist, list) { |
818 | for (vm = vmlist; vm; vm = vm->next) { | 825 | vm = &svm->vm; |
819 | if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING))) | ||
820 | continue; | ||
821 | addr = (unsigned long)vm->addr; | 826 | addr = (unsigned long)vm->addr; |
822 | if (addr < next) | 827 | if (addr < next) |
823 | continue; | 828 | continue; |
@@ -859,17 +864,12 @@ static void __init pci_reserve_io(void) | |||
859 | { | 864 | { |
860 | struct vm_struct *vm; | 865 | struct vm_struct *vm; |
861 | unsigned long addr; | 866 | unsigned long addr; |
867 | struct static_vm *svm; | ||
862 | 868 | ||
863 | /* we're still single threaded hence no lock needed here */ | 869 | svm = find_static_vm_vaddr((void *)PCI_IO_VIRT_BASE); |
864 | for (vm = vmlist; vm; vm = vm->next) { | 870 | if (svm) |
865 | if (!(vm->flags & VM_ARM_STATIC_MAPPING)) | 871 | return; |
866 | continue; | ||
867 | addr = (unsigned long)vm->addr; | ||
868 | addr &= ~(SZ_2M - 1); | ||
869 | if (addr == PCI_IO_VIRT_BASE) | ||
870 | return; | ||
871 | 872 | ||
872 | } | ||
873 | vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io); | 873 | vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io); |
874 | } | 874 | } |
875 | #else | 875 | #else |