aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRob Herring <rob.herring@calxeda.com>2012-08-21 06:26:24 -0400
committerMarek Szyprowski <m.szyprowski@samsung.com>2012-10-02 02:58:07 -0400
commit48aa820f1e3824e46dde6251db98e5961abf605d (patch)
tree20cac9fd267b7e8bc43e47165c631bb6cadef15b
parent0fa478df444f5837336d7e2fd0b41643c8d704c2 (diff)
ARM: kill off arch_is_coherent
With ixp2xxx removed, there are no platforms that define arch_is_coherent, so the last occurrences of arch_is_coherent can be removed. Any new platform with coherent i/o should use coherent dma mapping functions. Signed-off-by: Rob Herring <rob.herring@calxeda.com> Cc: Russell King <linux@arm.linux.org.uk> Cc: Marek Szyprowski <m.szyprowski@samsung.com> Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
-rw-r--r--arch/arm/include/asm/barrier.h7
-rw-r--r--arch/arm/include/asm/memory.h8
-rw-r--r--arch/arm/mm/mmu.c17
3 files changed, 6 insertions, 26 deletions
diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
index 05112380dc53..8dcd9c702d90 100644
--- a/arch/arm/include/asm/barrier.h
+++ b/arch/arm/include/asm/barrier.h
@@ -44,10 +44,9 @@
44#define rmb() dsb() 44#define rmb() dsb()
45#define wmb() mb() 45#define wmb() mb()
46#else 46#else
47#include <asm/memory.h> 47#define mb() barrier()
48#define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) 48#define rmb() barrier()
49#define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) 49#define wmb() barrier()
50#define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
51#endif 50#endif
52 51
53#ifndef CONFIG_SMP 52#ifndef CONFIG_SMP
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 5f6ddcc56452..73cf03aa981e 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -275,14 +275,6 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
275#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) 275#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
276#define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) 276#define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory)
277 277
278/*
279 * Optional coherency support. Currently used only by selected
280 * Intel XSC3-based systems.
281 */
282#ifndef arch_is_coherent
283#define arch_is_coherent() 0
284#endif
285
286#endif 278#endif
287 279
288#include <asm-generic/memory_model.h> 280#include <asm-generic/memory_model.h>
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index c2fa21d0103e..8fd039929ae8 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -216,7 +216,7 @@ static struct mem_type mem_types[] = {
216 .prot_l1 = PMD_TYPE_TABLE, 216 .prot_l1 = PMD_TYPE_TABLE,
217 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB, 217 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB,
218 .domain = DOMAIN_IO, 218 .domain = DOMAIN_IO,
219 }, 219 },
220 [MT_DEVICE_WC] = { /* ioremap_wc */ 220 [MT_DEVICE_WC] = { /* ioremap_wc */
221 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC, 221 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
222 .prot_l1 = PMD_TYPE_TABLE, 222 .prot_l1 = PMD_TYPE_TABLE,
@@ -422,17 +422,6 @@ static void __init build_mem_type_table(void)
422 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; 422 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
423 423
424 /* 424 /*
425 * Enable CPU-specific coherency if supported.
426 * (Only available on XSC3 at the moment.)
427 */
428 if (arch_is_coherent() && cpu_is_xsc3()) {
429 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
430 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
431 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
432 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
433 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
434 }
435 /*
436 * ARMv6 and above have extended page tables. 425 * ARMv6 and above have extended page tables.
437 */ 426 */
438 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) { 427 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
@@ -777,8 +766,8 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
777 create_mapping(md); 766 create_mapping(md);
778 vm->addr = (void *)(md->virtual & PAGE_MASK); 767 vm->addr = (void *)(md->virtual & PAGE_MASK);
779 vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); 768 vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
780 vm->phys_addr = __pfn_to_phys(md->pfn); 769 vm->phys_addr = __pfn_to_phys(md->pfn);
781 vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; 770 vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
782 vm->flags |= VM_ARM_MTYPE(md->type); 771 vm->flags |= VM_ARM_MTYPE(md->type);
783 vm->caller = iotable_init; 772 vm->caller = iotable_init;
784 vm_area_add_early(vm++); 773 vm_area_add_early(vm++);