From f00a75c094c340c4e7435665816c3273c870e849 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 5 Oct 2009 15:17:45 +0100 Subject: ARM: Pass VMA to copy_user_highpage() implementations Our copy_user_highpage() implementations may require cache maintainence. Ensure that implementations have all necessary details to perform this maintainence. Signed-off-by: Russell King --- arch/arm/include/asm/page.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index 3a32af4cce30..a485ac3c8696 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h @@ -117,11 +117,12 @@ #endif struct page; +struct vm_area_struct; struct cpu_user_fns { void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr); void (*cpu_copy_user_highpage)(struct page *to, struct page *from, - unsigned long vaddr); + unsigned long vaddr, struct vm_area_struct *vma); }; #ifdef MULTI_USER @@ -137,7 +138,7 @@ extern struct cpu_user_fns cpu_user; extern void __cpu_clear_user_highpage(struct page *page, unsigned long vaddr); extern void __cpu_copy_user_highpage(struct page *to, struct page *from, - unsigned long vaddr); + unsigned long vaddr, struct vm_area_struct *vma); #endif #define clear_user_highpage(page,vaddr) \ @@ -145,7 +146,7 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from, #define __HAVE_ARCH_COPY_USER_HIGHPAGE #define copy_user_highpage(to,from,vaddr,vma) \ - __cpu_copy_user_highpage(to, from, vaddr) + __cpu_copy_user_highpage(to, from, vaddr, vma) #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) extern void copy_page(void *to, const void *from); -- cgit v1.2.2 From 2ef7f3dbd7a70a48c3f09b498df528cb00ea03a4 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 5 Nov 2009 13:29:36 +0000 Subject: ARM: Fix ptrace accesses Signed-off-by: Russell King --- arch/arm/include/asm/cacheflush.h | 24 ++---------------------- arch/arm/include/asm/smp_plat.h | 5 +++++ 2 files changed, 7 insertions(+), 22 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 730aefcfbee3..3d2ef54c7cb9 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -316,12 +316,8 @@ static inline void outer_flush_range(unsigned long start, unsigned long end) * processes address space. Really, we want to allow our "user * space" model to handle this. */ -#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ - do { \ - memcpy(dst, src, len); \ - flush_ptrace_access(vma, page, vaddr, dst, len, 1);\ - } while (0) - +extern void copy_to_user_page(struct vm_area_struct *, struct page *, + unsigned long, void *, const void *, unsigned long); #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ do { \ memcpy(dst, src, len); \ @@ -355,17 +351,6 @@ vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig } } -static inline void -vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page, - unsigned long uaddr, void *kaddr, - unsigned long len, int write) -{ - if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { - unsigned long addr = (unsigned long)kaddr; - __cpuc_coherent_kern_range(addr, addr + len); - } -} - #ifndef CONFIG_CPU_CACHE_VIPT #define flush_cache_mm(mm) \ vivt_flush_cache_mm(mm) @@ -373,15 +358,10 @@ vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page, vivt_flush_cache_range(vma,start,end) #define flush_cache_page(vma,addr,pfn) \ vivt_flush_cache_page(vma,addr,pfn) -#define flush_ptrace_access(vma,page,ua,ka,len,write) \ - vivt_flush_ptrace_access(vma,page,ua,ka,len,write) #else extern void flush_cache_mm(struct mm_struct *mm); extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn); -extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, - unsigned long uaddr, void *kaddr, - unsigned long len, int write); #endif #define flush_cache_dup_mm(mm) flush_cache_mm(mm) diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h index 59303e200845..e6215305544a 100644 --- a/arch/arm/include/asm/smp_plat.h +++ b/arch/arm/include/asm/smp_plat.h @@ -13,4 +13,9 @@ static inline int tlb_ops_need_broadcast(void) return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2; } +static inline int cache_ops_need_broadcast(void) +{ + return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 1; +} + #endif -- cgit v1.2.2 From 8784895ede9a39cfecf76fe26b090a602877f70f Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 7 Jan 2010 17:52:26 +0000 Subject: ARM: add missing recvmmsg syscall number Signed-off-by: Russell King --- arch/arm/include/asm/unistd.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index 4e506d09e5f9..cf9cdaa2d4d4 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h @@ -391,6 +391,7 @@ #define __NR_pwritev (__NR_SYSCALL_BASE+362) #define __NR_rt_tgsigqueueinfo (__NR_SYSCALL_BASE+363) #define __NR_perf_event_open (__NR_SYSCALL_BASE+364) +#define __NR_recvmmsg (__NR_SYSCALL_BASE+365) /* * The following SWIs are ARM private. -- cgit v1.2.2 From 092a4e957a835cbf6b2ec82a6a4d6ff06c0a362e Mon Sep 17 00:00:00 2001 From: Jamie Iles Date: Wed, 6 Jan 2010 10:50:08 +0100 Subject: ARM: 5866/1: arm ptrace: use unsigned types for kernel pt_regs Make registers unsigned for kernel space. This is important for example in the perf events where the PC is stored into a u64. We don't want it sign extended so make the regs unsigned to prevent casts throughout the kernel. Signed-off-by: Jamie Iles Signed-off-by: Russell King --- arch/arm/include/asm/ptrace.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h index bbecccda76d0..eec6e897ceb2 100644 --- a/arch/arm/include/asm/ptrace.h +++ b/arch/arm/include/asm/ptrace.h @@ -97,9 +97,15 @@ * stack during a system call. Note that sizeof(struct pt_regs) * has to be a multiple of 8. */ +#ifndef __KERNEL__ struct pt_regs { long uregs[18]; }; +#else /* __KERNEL__ */ +struct pt_regs { + unsigned long uregs[18]; +}; +#endif /* __KERNEL__ */ #define ARM_cpsr uregs[16] #define ARM_pc uregs[15] -- cgit v1.2.2 From f892027c02f8d985455ba239ce280ac13b68a7fa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20H=C3=BCwe?= Date: Sat, 9 Jan 2010 13:46:08 +0100 Subject: ARM: 5870/1: arch/arm: Fix build failure for defconfigs without CONFIG_ISA_DMA_API set A lot of ARM-defconfigs (those without CONFIG_ISA_DMA_API set) fail to build [1][2][3] due to the changes of the patch [PATCH] PCI: Clean up build for CONFIG_PCI_QUIRKS unset by Rafael J. Wysocki (Sat, 2 Jan 2010 22:57:24 +0100) [4] as the referenced variable 'isa_dma_bridge_buggy' in asm/dma.h is enclosed by the CONFIG_ISA_DMA_API conditional all configs without this setting fail to build. I'm not sure wether moving the condition is the right way to solve the issue, but atleast it fixes the issue :) References: [1] http://kisskb.ellerman.id.au/kisskb/buildresult/1983354/ [2] http://kisskb.ellerman.id.au/kisskb/buildresult/1983333/ [3] http://kisskb.ellerman.id.au/kisskb/buildresult/1983337/ [4] http://lkml.org/lkml/2010/1/2/102 Signed-off-by: Peter Huewe Signed-off-by: Russell King --- arch/arm/include/asm/dma.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h index 7edf3536df24..ca51143f97f1 100644 --- a/arch/arm/include/asm/dma.h +++ b/arch/arm/include/asm/dma.h @@ -138,12 +138,12 @@ extern int get_dma_residue(unsigned int chan); #define NO_DMA 255 #endif +#endif /* CONFIG_ISA_DMA_API */ + #ifdef CONFIG_PCI extern int isa_dma_bridge_buggy; #else #define isa_dma_bridge_buggy (0) #endif -#endif /* CONFIG_ISA_DMA_API */ - #endif /* __ASM_ARM_DMA_H */ -- cgit v1.2.2 From 62a8c5bcb547c0aca1c3af810695dfb9b25e5351 Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Sun, 10 Jan 2010 09:21:53 +0100 Subject: ARM: 5872/1: ARM: include needed linux/cpu.h in asm/cpu.h The file arch/arm/include/asm/cpu.h needs to include 'linux/cpu.h' to meet its dependency. Otherwise when using "struct cpuinfo_arm" and including just 'asm/cpu.h' throws below error - arch/arm/include/asm/cpu.h:16: error: field 'cpu' has incomplete type To fix this otherway, one can also include both linux/cpu.h and asm/cpu.h but it shoudn't be that way. So this patch fixes this by including the linux/cpu.h in asm/cpu.h, so that including alone asm/cpu.h is enough. Signed-off-by: Santosh Shilimkar Signed-off-by: Russell King --- arch/arm/include/asm/cpu.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/cpu.h b/arch/arm/include/asm/cpu.h index 634b2d7c612a..793968173bef 100644 --- a/arch/arm/include/asm/cpu.h +++ b/arch/arm/include/asm/cpu.h @@ -11,6 +11,7 @@ #define __ASM_ARM_CPU_H #include +#include struct cpuinfo_arm { struct cpu cpu; -- cgit v1.2.2 From 18eabe2347ae7a11b3db768695913724166dfb0e Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 31 Oct 2009 16:52:16 +0000 Subject: ARM: dma-mapping: introduce the idea of buffer ownership The DMA API has the notion of buffer ownership; make it explicit in the ARM implementation of this API. This gives us a set of hooks to allow us to deal with CPU cache issues arising from non-cache coherent DMA. Signed-off-by: Russell King Tested-By: Santosh Shilimkar Tested-By: Jamie Iles --- arch/arm/include/asm/dma-mapping.h | 64 ++++++++++++++++++++++++++++---------- 1 file changed, 47 insertions(+), 17 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index a96300bf83fd..e850f5c1607b 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -57,19 +57,48 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) #endif /* - * DMA-consistent mapping functions. These allocate/free a region of - * uncached, unwrite-buffered mapped memory space for use with DMA - * devices. This is the "generic" version. The PCI specific version - * is in pci.h - * - * Note: Drivers should NOT use this function directly, as it will break - * platforms with CONFIG_DMABOUNCE. - * Use the driver DMA support - see dma-mapping.h (dma_sync_*) + * Private support functions: these are not part of the API and are + * liable to change. Drivers must not use these. */ extern void dma_cache_maint(const void *kaddr, size_t size, int rw); extern void dma_cache_maint_page(struct page *page, unsigned long offset, size_t size, int rw); +/* + * The DMA API is built upon the notion of "buffer ownership". A buffer + * is either exclusively owned by the CPU (and therefore may be accessed + * by it) or exclusively owned by the DMA device. These helper functions + * represent the transitions between these two ownership states. + * + * As above, these are private support functions and not part of the API. + * Drivers must not use these. + */ +static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size, + enum dma_data_direction dir) +{ + if (!arch_is_coherent()) + dma_cache_maint(kaddr, size, dir); +} + +static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size, + enum dma_data_direction dir) +{ + /* nothing to do */ +} + +static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off, + size_t size, enum dma_data_direction dir) +{ + if (!arch_is_coherent()) + dma_cache_maint_page(page, off, size, dir); +} + +static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off, + size_t size, enum dma_data_direction dir) +{ + /* nothing to do */ +} + /* * Return whether the given device DMA address mask can be supported * properly. For example, if your device can only drive the low 24-bits @@ -304,8 +333,7 @@ static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, { BUG_ON(!valid_dma_direction(dir)); - if (!arch_is_coherent()) - dma_cache_maint(cpu_addr, size, dir); + __dma_single_cpu_to_dev(cpu_addr, size, dir); return virt_to_dma(dev, cpu_addr); } @@ -329,8 +357,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, { BUG_ON(!valid_dma_direction(dir)); - if (!arch_is_coherent()) - dma_cache_maint_page(page, offset, size, dir); + __dma_page_cpu_to_dev(page, offset, size, dir); return page_to_dma(dev, page) + offset; } @@ -352,7 +379,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { - /* nothing to do */ + __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir); } /** @@ -372,7 +399,8 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, static inline void dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { - /* nothing to do */ + __dma_page_dev_to_cpu(dma_to_page(dev, handle), handle & ~PAGE_MASK, + size, dir); } #endif /* CONFIG_DMABOUNCE */ @@ -400,7 +428,10 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev, { BUG_ON(!valid_dma_direction(dir)); - dmabounce_sync_for_cpu(dev, handle, offset, size, dir); + if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir)) + return; + + __dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir); } static inline void dma_sync_single_range_for_device(struct device *dev, @@ -412,8 +443,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev, if (!dmabounce_sync_for_device(dev, handle, offset, size, dir)) return; - if (!arch_is_coherent()) - dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir); + __dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir); } static inline void dma_sync_single_for_cpu(struct device *dev, -- cgit v1.2.2 From 4ea0d7371e808628d11154b0d44140b70f05b998 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 24 Nov 2009 16:27:17 +0000 Subject: ARM: dma-mapping: push buffer ownership down into dma-mapping.c Signed-off-by: Russell King Tested-By: Santosh Shilimkar --- arch/arm/include/asm/dma-mapping.h | 39 ++++++++++++++++++++++++-------------- 1 file changed, 25 insertions(+), 14 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index e850f5c1607b..256ee1c9f51a 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -56,47 +56,58 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) } #endif -/* - * Private support functions: these are not part of the API and are - * liable to change. Drivers must not use these. - */ -extern void dma_cache_maint(const void *kaddr, size_t size, int rw); -extern void dma_cache_maint_page(struct page *page, unsigned long offset, - size_t size, int rw); - /* * The DMA API is built upon the notion of "buffer ownership". A buffer * is either exclusively owned by the CPU (and therefore may be accessed * by it) or exclusively owned by the DMA device. These helper functions * represent the transitions between these two ownership states. * - * As above, these are private support functions and not part of the API. - * Drivers must not use these. + * Note, however, that on later ARMs, this notion does not work due to + * speculative prefetches. We model our approach on the assumption that + * the CPU does do speculative prefetches, which means we clean caches + * before transfers and delay cache invalidation until transfer completion. + * + * Private support functions: these are not part of the API and are + * liable to change. Drivers must not use these. */ static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size, enum dma_data_direction dir) { + extern void ___dma_single_cpu_to_dev(const void *, size_t, + enum dma_data_direction); + if (!arch_is_coherent()) - dma_cache_maint(kaddr, size, dir); + ___dma_single_cpu_to_dev(kaddr, size, dir); } static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size, enum dma_data_direction dir) { - /* nothing to do */ + extern void ___dma_single_dev_to_cpu(const void *, size_t, + enum dma_data_direction); + + if (!arch_is_coherent()) + ___dma_single_dev_to_cpu(kaddr, size, dir); } static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off, size_t size, enum dma_data_direction dir) { + extern void ___dma_page_cpu_to_dev(struct page *, unsigned long, + size_t, enum dma_data_direction); + if (!arch_is_coherent()) - dma_cache_maint_page(page, off, size, dir); + ___dma_page_cpu_to_dev(page, off, size, dir); } static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off, size_t size, enum dma_data_direction dir) { - /* nothing to do */ + extern void ___dma_page_dev_to_cpu(struct page *, unsigned long, + size_t, enum dma_data_direction); + + if (!arch_is_coherent()) + ___dma_page_dev_to_cpu(page, off, size, dir); } /* -- cgit v1.2.2 From a9c9147eb9b1dba0ce567a41897c7773b4d1b0bc Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 26 Nov 2009 16:19:58 +0000 Subject: ARM: dma-mapping: provide per-cpu type map/unmap functions Signed-off-by: Russell King Tested-By: Santosh Shilimkar --- arch/arm/include/asm/cacheflush.h | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 730aefcfbee3..4c733236e342 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -213,6 +213,9 @@ struct cpu_cache_fns { void (*coherent_user_range)(unsigned long, unsigned long); void (*flush_kern_dcache_area)(void *, size_t); + void (*dma_map_area)(const void *, size_t, int); + void (*dma_unmap_area)(const void *, size_t, int); + void (*dma_inv_range)(const void *, const void *); void (*dma_clean_range)(const void *, const void *); void (*dma_flush_range)(const void *, const void *); @@ -244,6 +247,8 @@ extern struct cpu_cache_fns cpu_cache; * is visible to DMA, or data written by DMA to system memory is * visible to the CPU. */ +#define dmac_map_area cpu_cache.dma_map_area +#define dmac_unmap_area cpu_cache.dma_unmap_area #define dmac_inv_range cpu_cache.dma_inv_range #define dmac_clean_range cpu_cache.dma_clean_range #define dmac_flush_range cpu_cache.dma_flush_range @@ -270,10 +275,14 @@ extern void __cpuc_flush_dcache_area(void *, size_t); * is visible to DMA, or data written by DMA to system memory is * visible to the CPU. */ +#define dmac_map_area __glue(_CACHE,_dma_map_area) +#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area) #define dmac_inv_range __glue(_CACHE,_dma_inv_range) #define dmac_clean_range __glue(_CACHE,_dma_clean_range) #define dmac_flush_range __glue(_CACHE,_dma_flush_range) +extern void dmac_map_area(const void *, size_t, int); +extern void dmac_unmap_area(const void *, size_t, int); extern void dmac_inv_range(const void *, const void *); extern void dmac_clean_range(const void *, const void *); extern void dmac_flush_range(const void *, const void *); -- cgit v1.2.2 From 702b94bff3c50542a6e4ab9a4f4cef093262fe65 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 26 Nov 2009 16:24:19 +0000 Subject: ARM: dma-mapping: remove dmac_clean_range and dmac_inv_range These are now unused, and so can be removed. Signed-off-by: Russell King Tested-By: Santosh Shilimkar --- arch/arm/include/asm/cacheflush.h | 23 ----------------------- 1 file changed, 23 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 4c733236e342..e29088587412 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -182,21 +182,6 @@ * DMA Cache Coherency * =================== * - * dma_inv_range(start, end) - * - * Invalidate (discard) the specified virtual address range. - * May not write back any entries. If 'start' or 'end' - * are not cache line aligned, those lines must be written - * back. - * - start - virtual start address - * - end - virtual end address - * - * dma_clean_range(start, end) - * - * Clean (write back) the specified virtual address range. - * - start - virtual start address - * - end - virtual end address - * * dma_flush_range(start, end) * * Clean and invalidate the specified virtual address range. @@ -216,8 +201,6 @@ struct cpu_cache_fns { void (*dma_map_area)(const void *, size_t, int); void (*dma_unmap_area)(const void *, size_t, int); - void (*dma_inv_range)(const void *, const void *); - void (*dma_clean_range)(const void *, const void *); void (*dma_flush_range)(const void *, const void *); }; @@ -249,8 +232,6 @@ extern struct cpu_cache_fns cpu_cache; */ #define dmac_map_area cpu_cache.dma_map_area #define dmac_unmap_area cpu_cache.dma_unmap_area -#define dmac_inv_range cpu_cache.dma_inv_range -#define dmac_clean_range cpu_cache.dma_clean_range #define dmac_flush_range cpu_cache.dma_flush_range #else @@ -277,14 +258,10 @@ extern void __cpuc_flush_dcache_area(void *, size_t); */ #define dmac_map_area __glue(_CACHE,_dma_map_area) #define dmac_unmap_area __glue(_CACHE,_dma_unmap_area) -#define dmac_inv_range __glue(_CACHE,_dma_inv_range) -#define dmac_clean_range __glue(_CACHE,_dma_clean_range) #define dmac_flush_range __glue(_CACHE,_dma_flush_range) extern void dmac_map_area(const void *, size_t, int); extern void dmac_unmap_area(const void *, size_t, int); -extern void dmac_inv_range(const void *, const void *); -extern void dmac_clean_range(const void *, const void *); extern void dmac_flush_range(const void *, const void *); #endif -- cgit v1.2.2 From 4b3073e1c53a256275f1079c0fbfbe85883d9275 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 18 Dec 2009 16:40:18 +0000 Subject: MM: Pass a PTE pointer to update_mmu_cache() rather than the PTE itself On VIVT ARM, when we have multiple shared mappings of the same file in the same MM, we need to ensure that we have coherency across all copies. We do this via make_coherent() by making the pages uncacheable. This used to work fine, until we allowed highmem with highpte - we now have a page table which is mapped as required, and is not available for modification via update_mmu_cache(). Ralf Beache suggested getting rid of the PTE value passed to update_mmu_cache(): On MIPS update_mmu_cache() calls __update_tlb() which walks pagetables to construct a pointer to the pte again. Passing a pte_t * is much more elegant. Maybe we might even replace the pte argument with the pte_t? Ben Herrenschmidt would also like the pte pointer for PowerPC: Passing the ptep in there is exactly what I want. I want that -instead- of the PTE value, because I have issue on some ppc cases, for I$/D$ coherency, where set_pte_at() may decide to mask out the _PAGE_EXEC. So, pass in the mapped page table pointer into update_mmu_cache(), and remove the PTE value, updating all implementations and call sites to suit. Includes a fix from Stephen Rothwell: sparc: fix fallout from update_mmu_cache API change Signed-off-by: Stephen Rothwell Acked-by: Benjamin Herrenschmidt Signed-off-by: Russell King --- arch/arm/include/asm/tlbflush.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index c2f1605de359..e085e2c545eb 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h @@ -529,7 +529,8 @@ extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); * cache entries for the kernels virtual memory range are written * back to the page. */ -extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte); +extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep); #endif -- cgit v1.2.2