aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-07-30 22:02:51 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-07-30 22:02:51 -0400
commita63ecd835f075b21d7d5cef9580447f5fbb36263 (patch)
tree952d222271e5aed0c500d3d31b39336f13db2943 /arch/arm/mm
parentfc71ff8a6c187ecc1ba79ee5688668af97a970fc (diff)
parente76df4d33973bd9b963d0cce05749b090cc14936 (diff)
Merge master.kernel.org:/home/rmk/linux-2.6-arm
* master.kernel.org:/home/rmk/linux-2.6-arm: cyber2000fb: fix console in truecolor modes cyber2000fb: fix machine hang on module load SA1111: Eliminate use after free ARM: Fix Versatile/Realview/VExpress MMC card detection sense ARM: 6279/1: highmem: fix SMP preemption bug in kmap_high_l1_vipt ARM: Add barriers to io{read,write}{8,16,32} accessors as well ARM: 6273/1: Add barriers to the I/O accessors if ARM_DMA_MEM_BUFFERABLE ARM: 6272/1: Convert L2x0 to use the IO relaxed operations ARM: 6271/1: Introduce *_relaxed() I/O accessors ARM: 6275/1: ux500: don't use writeb() in uncompress.h ARM: 6270/1: clean files in arch/arm/boot/compressed/ ARM: Fix csum_partial_copy_from_user()
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/cache-l2x0.c26
-rw-r--r--arch/arm/mm/highmem.c13
2 files changed, 21 insertions, 18 deletions
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index df4955885b21..9982eb385c0f 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -32,14 +32,14 @@ static uint32_t l2x0_way_mask; /* Bitmask of active ways */
32static inline void cache_wait(void __iomem *reg, unsigned long mask) 32static inline void cache_wait(void __iomem *reg, unsigned long mask)
33{ 33{
34 /* wait for the operation to complete */ 34 /* wait for the operation to complete */
35 while (readl(reg) & mask) 35 while (readl_relaxed(reg) & mask)
36 ; 36 ;
37} 37}
38 38
39static inline void cache_sync(void) 39static inline void cache_sync(void)
40{ 40{
41 void __iomem *base = l2x0_base; 41 void __iomem *base = l2x0_base;
42 writel(0, base + L2X0_CACHE_SYNC); 42 writel_relaxed(0, base + L2X0_CACHE_SYNC);
43 cache_wait(base + L2X0_CACHE_SYNC, 1); 43 cache_wait(base + L2X0_CACHE_SYNC, 1);
44} 44}
45 45
@@ -47,14 +47,14 @@ static inline void l2x0_clean_line(unsigned long addr)
47{ 47{
48 void __iomem *base = l2x0_base; 48 void __iomem *base = l2x0_base;
49 cache_wait(base + L2X0_CLEAN_LINE_PA, 1); 49 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
50 writel(addr, base + L2X0_CLEAN_LINE_PA); 50 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
51} 51}
52 52
53static inline void l2x0_inv_line(unsigned long addr) 53static inline void l2x0_inv_line(unsigned long addr)
54{ 54{
55 void __iomem *base = l2x0_base; 55 void __iomem *base = l2x0_base;
56 cache_wait(base + L2X0_INV_LINE_PA, 1); 56 cache_wait(base + L2X0_INV_LINE_PA, 1);
57 writel(addr, base + L2X0_INV_LINE_PA); 57 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
58} 58}
59 59
60#ifdef CONFIG_PL310_ERRATA_588369 60#ifdef CONFIG_PL310_ERRATA_588369
@@ -75,9 +75,9 @@ static inline void l2x0_flush_line(unsigned long addr)
75 75
76 /* Clean by PA followed by Invalidate by PA */ 76 /* Clean by PA followed by Invalidate by PA */
77 cache_wait(base + L2X0_CLEAN_LINE_PA, 1); 77 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
78 writel(addr, base + L2X0_CLEAN_LINE_PA); 78 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
79 cache_wait(base + L2X0_INV_LINE_PA, 1); 79 cache_wait(base + L2X0_INV_LINE_PA, 1);
80 writel(addr, base + L2X0_INV_LINE_PA); 80 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
81} 81}
82#else 82#else
83 83
@@ -90,7 +90,7 @@ static inline void l2x0_flush_line(unsigned long addr)
90{ 90{
91 void __iomem *base = l2x0_base; 91 void __iomem *base = l2x0_base;
92 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); 92 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
93 writel(addr, base + L2X0_CLEAN_INV_LINE_PA); 93 writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
94} 94}
95#endif 95#endif
96 96
@@ -109,7 +109,7 @@ static inline void l2x0_inv_all(void)
109 109
110 /* invalidate all ways */ 110 /* invalidate all ways */
111 spin_lock_irqsave(&l2x0_lock, flags); 111 spin_lock_irqsave(&l2x0_lock, flags);
112 writel(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); 112 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
113 cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); 113 cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
114 cache_sync(); 114 cache_sync();
115 spin_unlock_irqrestore(&l2x0_lock, flags); 115 spin_unlock_irqrestore(&l2x0_lock, flags);
@@ -215,8 +215,8 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
215 215
216 l2x0_base = base; 216 l2x0_base = base;
217 217
218 cache_id = readl(l2x0_base + L2X0_CACHE_ID); 218 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
219 aux = readl(l2x0_base + L2X0_AUX_CTRL); 219 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
220 220
221 aux &= aux_mask; 221 aux &= aux_mask;
222 aux |= aux_val; 222 aux |= aux_val;
@@ -248,15 +248,15 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
248 * If you are booting from non-secure mode 248 * If you are booting from non-secure mode
249 * accessing the below registers will fault. 249 * accessing the below registers will fault.
250 */ 250 */
251 if (!(readl(l2x0_base + L2X0_CTRL) & 1)) { 251 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
252 252
253 /* l2x0 controller is disabled */ 253 /* l2x0 controller is disabled */
254 writel(aux, l2x0_base + L2X0_AUX_CTRL); 254 writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
255 255
256 l2x0_inv_all(); 256 l2x0_inv_all();
257 257
258 /* enable L2X0 */ 258 /* enable L2X0 */
259 writel(1, l2x0_base + L2X0_CTRL); 259 writel_relaxed(1, l2x0_base + L2X0_CTRL);
260 } 260 }
261 261
262 outer_cache.inv_range = l2x0_inv_range; 262 outer_cache.inv_range = l2x0_inv_range;
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index 086816b205b8..6ab244062b4a 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -163,19 +163,22 @@ static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth);
163 163
164void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte) 164void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte)
165{ 165{
166 unsigned int idx, cpu = smp_processor_id(); 166 unsigned int idx, cpu;
167 int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); 167 int *depth;
168 unsigned long vaddr, flags; 168 unsigned long vaddr, flags;
169 pte_t pte, *ptep; 169 pte_t pte, *ptep;
170 170
171 if (!in_interrupt())
172 preempt_disable();
173
174 cpu = smp_processor_id();
175 depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
176
171 idx = KM_L1_CACHE + KM_TYPE_NR * cpu; 177 idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
172 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 178 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
173 ptep = TOP_PTE(vaddr); 179 ptep = TOP_PTE(vaddr);
174 pte = mk_pte(page, kmap_prot); 180 pte = mk_pte(page, kmap_prot);
175 181
176 if (!in_interrupt())
177 preempt_disable();
178
179 raw_local_irq_save(flags); 182 raw_local_irq_save(flags);
180 (*depth)++; 183 (*depth)++;
181 if (pte_val(*ptep) == pte_val(pte)) { 184 if (pte_val(*ptep) == pte_val(pte)) {