aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-arm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2005-09-08 18:26:48 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-09-08 18:26:48 -0400
commit35b7ac4c48fa851600c028e088d2239a2cf3dfca (patch)
tree13ab1abd15403eb1bc2099552b5f8c226596b838 /include/asm-arm
parent4e1491847ef5ca1c5a661601d5f96dcb7d90d2f0 (diff)
parent61c8c158c828073cfebf11ca8e340727feafa038 (diff)
Merge master.kernel.org:/home/rmk/linux-2.6-arm
Diffstat (limited to 'include/asm-arm')
-rw-r--r--include/asm-arm/arch-pxa/hardware.h18
-rw-r--r--include/asm-arm/arch-sa1100/hardware.h18
-rw-r--r--include/asm-arm/cacheflush.h7
3 files changed, 11 insertions, 32 deletions
diff --git a/include/asm-arm/arch-pxa/hardware.h b/include/asm-arm/arch-pxa/hardware.h
index 72b04d846a23..cf35721cfa45 100644
--- a/include/asm-arm/arch-pxa/hardware.h
+++ b/include/asm-arm/arch-pxa/hardware.h
@@ -44,24 +44,12 @@
44 44
45#ifndef __ASSEMBLY__ 45#ifndef __ASSEMBLY__
46 46
47#if 0 47# define __REG(x) (*((volatile unsigned long *)io_p2v(x)))
48# define __REG(x) (*((volatile u32 *)io_p2v(x)))
49#else
50/*
51 * This __REG() version gives the same results as the one above, except
52 * that we are fooling gcc somehow so it generates far better and smaller
53 * assembly code for access to contigous registers. It's a shame that gcc
54 * doesn't guess this by itself.
55 */
56#include <asm/types.h>
57typedef struct { volatile u32 offset[4096]; } __regbase;
58# define __REGP(x) ((__regbase *)((x)&~4095))->offset[((x)&4095)>>2]
59# define __REG(x) __REGP(io_p2v(x))
60#endif
61 48
62/* With indexed regs we don't want to feed the index through io_p2v() 49/* With indexed regs we don't want to feed the index through io_p2v()
63 especially if it is a variable, otherwise horrible code will result. */ 50 especially if it is a variable, otherwise horrible code will result. */
64# define __REG2(x,y) (*(volatile u32 *)((u32)&__REG(x) + (y))) 51# define __REG2(x,y) \
52 (*(volatile unsigned long *)((unsigned long)&__REG(x) + (y)))
65 53
66# define __PREG(x) (io_v2p((u32)&(x))) 54# define __PREG(x) (io_v2p((u32)&(x)))
67 55
diff --git a/include/asm-arm/arch-sa1100/hardware.h b/include/asm-arm/arch-sa1100/hardware.h
index 10c62db34362..19c3b1e186bb 100644
--- a/include/asm-arm/arch-sa1100/hardware.h
+++ b/include/asm-arm/arch-sa1100/hardware.h
@@ -49,23 +49,9 @@
49 ( (((x)&0x00ffffff) | (((x)&(0x30000000>>VIO_SHIFT))<<VIO_SHIFT)) + PIO_START ) 49 ( (((x)&0x00ffffff) | (((x)&(0x30000000>>VIO_SHIFT))<<VIO_SHIFT)) + PIO_START )
50 50
51#ifndef __ASSEMBLY__ 51#ifndef __ASSEMBLY__
52#include <asm/types.h>
53 52
54#if 0 53# define __REG(x) (*((volatile unsigned long *)io_p2v(x)))
55# define __REG(x) (*((volatile u32 *)io_p2v(x))) 54# define __PREG(x) (io_v2p((unsigned long)&(x)))
56#else
57/*
58 * This __REG() version gives the same results as the one above, except
59 * that we are fooling gcc somehow so it generates far better and smaller
60 * assembly code for access to contigous registers. It's a shame that gcc
61 * doesn't guess this by itself.
62 */
63typedef struct { volatile u32 offset[4096]; } __regbase;
64# define __REGP(x) ((__regbase *)((x)&~4095))->offset[((x)&4095)>>2]
65# define __REG(x) __REGP(io_p2v(x))
66#endif
67
68# define __PREG(x) (io_v2p((u32)&(x)))
69 55
70#else 56#else
71 57
diff --git a/include/asm-arm/cacheflush.h b/include/asm-arm/cacheflush.h
index 035cdcff43d2..e81baff4f54b 100644
--- a/include/asm-arm/cacheflush.h
+++ b/include/asm-arm/cacheflush.h
@@ -256,7 +256,7 @@ extern void dmac_flush_range(unsigned long, unsigned long);
256 * Convert calls to our calling convention. 256 * Convert calls to our calling convention.
257 */ 257 */
258#define flush_cache_all() __cpuc_flush_kern_all() 258#define flush_cache_all() __cpuc_flush_kern_all()
259 259#ifndef CONFIG_CPU_CACHE_VIPT
260static inline void flush_cache_mm(struct mm_struct *mm) 260static inline void flush_cache_mm(struct mm_struct *mm)
261{ 261{
262 if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) 262 if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask))
@@ -279,6 +279,11 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned l
279 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); 279 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
280 } 280 }
281} 281}
282#else
283extern void flush_cache_mm(struct mm_struct *mm);
284extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
285extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
286#endif
282 287
283/* 288/*
284 * flush_cache_user_range is used when we want to ensure that the 289 * flush_cache_user_range is used when we want to ensure that the