aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2009-12-14 09:54:10 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2009-12-14 09:54:10 -0500
commitbf32eb85492af197ea5ff20e0be56f667a80584d (patch)
treeb43d056362e49831040a871040f39778f31abace /arch/arm/mm
parentf74f7e57ae9fa12b2951ae62ce3557799b318399 (diff)
parent3d1074349b22c9653e746282564136c87668c2b8 (diff)
Merge branch 'pending-l2x0' into cache
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/cache-l2x0.c93
1 files changed, 72 insertions, 21 deletions
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 747f9a9021bb..cb8fc6573b1b 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -28,69 +28,120 @@
28static void __iomem *l2x0_base; 28static void __iomem *l2x0_base;
29static DEFINE_SPINLOCK(l2x0_lock); 29static DEFINE_SPINLOCK(l2x0_lock);
30 30
31static inline void sync_writel(unsigned long val, unsigned long reg, 31static inline void cache_wait(void __iomem *reg, unsigned long mask)
32 unsigned long complete_mask)
33{ 32{
34 unsigned long flags;
35
36 spin_lock_irqsave(&l2x0_lock, flags);
37 writel(val, l2x0_base + reg);
38 /* wait for the operation to complete */ 33 /* wait for the operation to complete */
39 while (readl(l2x0_base + reg) & complete_mask) 34 while (readl(reg) & mask)
40 ; 35 ;
41 spin_unlock_irqrestore(&l2x0_lock, flags);
42} 36}
43 37
44static inline void cache_sync(void) 38static inline void cache_sync(void)
45{ 39{
46 sync_writel(0, L2X0_CACHE_SYNC, 1); 40 void __iomem *base = l2x0_base;
41 writel(0, base + L2X0_CACHE_SYNC);
42 cache_wait(base + L2X0_CACHE_SYNC, 1);
47} 43}
48 44
49static inline void l2x0_inv_all(void) 45static inline void l2x0_inv_all(void)
50{ 46{
47 unsigned long flags;
48
51 /* invalidate all ways */ 49 /* invalidate all ways */
52 sync_writel(0xff, L2X0_INV_WAY, 0xff); 50 spin_lock_irqsave(&l2x0_lock, flags);
51 writel(0xff, l2x0_base + L2X0_INV_WAY);
52 cache_wait(l2x0_base + L2X0_INV_WAY, 0xff);
53 cache_sync(); 53 cache_sync();
54 spin_unlock_irqrestore(&l2x0_lock, flags);
54} 55}
55 56
56static void l2x0_inv_range(unsigned long start, unsigned long end) 57static void l2x0_inv_range(unsigned long start, unsigned long end)
57{ 58{
58 unsigned long addr; 59 void __iomem *base = l2x0_base;
60 unsigned long flags;
59 61
62 spin_lock_irqsave(&l2x0_lock, flags);
60 if (start & (CACHE_LINE_SIZE - 1)) { 63 if (start & (CACHE_LINE_SIZE - 1)) {
61 start &= ~(CACHE_LINE_SIZE - 1); 64 start &= ~(CACHE_LINE_SIZE - 1);
62 sync_writel(start, L2X0_CLEAN_INV_LINE_PA, 1); 65 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
66 writel(start, base + L2X0_CLEAN_INV_LINE_PA);
63 start += CACHE_LINE_SIZE; 67 start += CACHE_LINE_SIZE;
64 } 68 }
65 69
66 if (end & (CACHE_LINE_SIZE - 1)) { 70 if (end & (CACHE_LINE_SIZE - 1)) {
67 end &= ~(CACHE_LINE_SIZE - 1); 71 end &= ~(CACHE_LINE_SIZE - 1);
68 sync_writel(end, L2X0_CLEAN_INV_LINE_PA, 1); 72 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
73 writel(end, base + L2X0_CLEAN_INV_LINE_PA);
69 } 74 }
70 75
71 for (addr = start; addr < end; addr += CACHE_LINE_SIZE) 76 while (start < end) {
72 sync_writel(addr, L2X0_INV_LINE_PA, 1); 77 unsigned long blk_end = start + min(end - start, 4096UL);
78
79 while (start < blk_end) {
80 cache_wait(base + L2X0_INV_LINE_PA, 1);
81 writel(start, base + L2X0_INV_LINE_PA);
82 start += CACHE_LINE_SIZE;
83 }
84
85 if (blk_end < end) {
86 spin_unlock_irqrestore(&l2x0_lock, flags);
87 spin_lock_irqsave(&l2x0_lock, flags);
88 }
89 }
90 cache_wait(base + L2X0_INV_LINE_PA, 1);
73 cache_sync(); 91 cache_sync();
92 spin_unlock_irqrestore(&l2x0_lock, flags);
74} 93}
75 94
76static void l2x0_clean_range(unsigned long start, unsigned long end) 95static void l2x0_clean_range(unsigned long start, unsigned long end)
77{ 96{
78 unsigned long addr; 97 void __iomem *base = l2x0_base;
98 unsigned long flags;
79 99
100 spin_lock_irqsave(&l2x0_lock, flags);
80 start &= ~(CACHE_LINE_SIZE - 1); 101 start &= ~(CACHE_LINE_SIZE - 1);
81 for (addr = start; addr < end; addr += CACHE_LINE_SIZE) 102 while (start < end) {
82 sync_writel(addr, L2X0_CLEAN_LINE_PA, 1); 103 unsigned long blk_end = start + min(end - start, 4096UL);
104
105 while (start < blk_end) {
106 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
107 writel(start, base + L2X0_CLEAN_LINE_PA);
108 start += CACHE_LINE_SIZE;
109 }
110
111 if (blk_end < end) {
112 spin_unlock_irqrestore(&l2x0_lock, flags);
113 spin_lock_irqsave(&l2x0_lock, flags);
114 }
115 }
116 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
83 cache_sync(); 117 cache_sync();
118 spin_unlock_irqrestore(&l2x0_lock, flags);
84} 119}
85 120
86static void l2x0_flush_range(unsigned long start, unsigned long end) 121static void l2x0_flush_range(unsigned long start, unsigned long end)
87{ 122{
88 unsigned long addr; 123 void __iomem *base = l2x0_base;
124 unsigned long flags;
89 125
126 spin_lock_irqsave(&l2x0_lock, flags);
90 start &= ~(CACHE_LINE_SIZE - 1); 127 start &= ~(CACHE_LINE_SIZE - 1);
91 for (addr = start; addr < end; addr += CACHE_LINE_SIZE) 128 while (start < end) {
92 sync_writel(addr, L2X0_CLEAN_INV_LINE_PA, 1); 129 unsigned long blk_end = start + min(end - start, 4096UL);
130
131 while (start < blk_end) {
132 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
133 writel(start, base + L2X0_CLEAN_INV_LINE_PA);
134 start += CACHE_LINE_SIZE;
135 }
136
137 if (blk_end < end) {
138 spin_unlock_irqrestore(&l2x0_lock, flags);
139 spin_lock_irqsave(&l2x0_lock, flags);
140 }
141 }
142 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
93 cache_sync(); 143 cache_sync();
144 spin_unlock_irqrestore(&l2x0_lock, flags);
94} 145}
95 146
96void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) 147void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)