aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2010-03-08 15:21:04 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2010-03-08 15:21:04 -0500
commit988addf82e4c03739375279de73929580a2d4a6a (patch)
tree989ae1cd4e264bbad80c65f04480486246e7b9f3 /arch/arm/include/asm
parent004c1c7096659d352b83047a7593e91d8a30e3c5 (diff)
parent25cf84cf377c0aae5dbcf937ea89bc7893db5176 (diff)
Merge branch 'origin' into devel-stable
Conflicts: arch/arm/mach-mx2/devices.c arch/arm/mach-mx2/devices.h sound/soc/pxa/pxa-ssp.c
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r--arch/arm/include/asm/atomic.h228
-rw-r--r--arch/arm/include/asm/cacheflush.h66
-rw-r--r--arch/arm/include/asm/clkdev.h3
-rw-r--r--arch/arm/include/asm/dma-mapping.h79
-rw-r--r--arch/arm/include/asm/hardware/iop3xx-adma.h12
-rw-r--r--arch/arm/include/asm/io.h11
-rw-r--r--arch/arm/include/asm/mach/time.h8
-rw-r--r--arch/arm/include/asm/memory.h23
-rw-r--r--arch/arm/include/asm/mmu.h1
-rw-r--r--arch/arm/include/asm/mmu_context.h15
-rw-r--r--arch/arm/include/asm/page.h7
-rw-r--r--arch/arm/include/asm/perf_event.h31
-rw-r--r--arch/arm/include/asm/pgtable-nommu.h4
-rw-r--r--arch/arm/include/asm/pmu.h75
-rw-r--r--arch/arm/include/asm/setup.h12
-rw-r--r--arch/arm/include/asm/smp_plat.h5
-rw-r--r--arch/arm/include/asm/spinlock.h36
-rw-r--r--arch/arm/include/asm/system.h3
-rw-r--r--arch/arm/include/asm/thread_info.h3
-rw-r--r--arch/arm/include/asm/tlbflush.h3
20 files changed, 499 insertions, 126 deletions
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index d0daeab2234e..e8ddec2cb158 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -235,6 +235,234 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
235#define smp_mb__before_atomic_inc() smp_mb() 235#define smp_mb__before_atomic_inc() smp_mb()
236#define smp_mb__after_atomic_inc() smp_mb() 236#define smp_mb__after_atomic_inc() smp_mb()
237 237
238#ifndef CONFIG_GENERIC_ATOMIC64
239typedef struct {
240 u64 __aligned(8) counter;
241} atomic64_t;
242
243#define ATOMIC64_INIT(i) { (i) }
244
245static inline u64 atomic64_read(atomic64_t *v)
246{
247 u64 result;
248
249 __asm__ __volatile__("@ atomic64_read\n"
250" ldrexd %0, %H0, [%1]"
251 : "=&r" (result)
252 : "r" (&v->counter)
253 );
254
255 return result;
256}
257
258static inline void atomic64_set(atomic64_t *v, u64 i)
259{
260 u64 tmp;
261
262 __asm__ __volatile__("@ atomic64_set\n"
263"1: ldrexd %0, %H0, [%1]\n"
264" strexd %0, %2, %H2, [%1]\n"
265" teq %0, #0\n"
266" bne 1b"
267 : "=&r" (tmp)
268 : "r" (&v->counter), "r" (i)
269 : "cc");
270}
271
272static inline void atomic64_add(u64 i, atomic64_t *v)
273{
274 u64 result;
275 unsigned long tmp;
276
277 __asm__ __volatile__("@ atomic64_add\n"
278"1: ldrexd %0, %H0, [%2]\n"
279" adds %0, %0, %3\n"
280" adc %H0, %H0, %H3\n"
281" strexd %1, %0, %H0, [%2]\n"
282" teq %1, #0\n"
283" bne 1b"
284 : "=&r" (result), "=&r" (tmp)
285 : "r" (&v->counter), "r" (i)
286 : "cc");
287}
288
289static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
290{
291 u64 result;
292 unsigned long tmp;
293
294 smp_mb();
295
296 __asm__ __volatile__("@ atomic64_add_return\n"
297"1: ldrexd %0, %H0, [%2]\n"
298" adds %0, %0, %3\n"
299" adc %H0, %H0, %H3\n"
300" strexd %1, %0, %H0, [%2]\n"
301" teq %1, #0\n"
302" bne 1b"
303 : "=&r" (result), "=&r" (tmp)
304 : "r" (&v->counter), "r" (i)
305 : "cc");
306
307 smp_mb();
308
309 return result;
310}
311
312static inline void atomic64_sub(u64 i, atomic64_t *v)
313{
314 u64 result;
315 unsigned long tmp;
316
317 __asm__ __volatile__("@ atomic64_sub\n"
318"1: ldrexd %0, %H0, [%2]\n"
319" subs %0, %0, %3\n"
320" sbc %H0, %H0, %H3\n"
321" strexd %1, %0, %H0, [%2]\n"
322" teq %1, #0\n"
323" bne 1b"
324 : "=&r" (result), "=&r" (tmp)
325 : "r" (&v->counter), "r" (i)
326 : "cc");
327}
328
329static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
330{
331 u64 result;
332 unsigned long tmp;
333
334 smp_mb();
335
336 __asm__ __volatile__("@ atomic64_sub_return\n"
337"1: ldrexd %0, %H0, [%2]\n"
338" subs %0, %0, %3\n"
339" sbc %H0, %H0, %H3\n"
340" strexd %1, %0, %H0, [%2]\n"
341" teq %1, #0\n"
342" bne 1b"
343 : "=&r" (result), "=&r" (tmp)
344 : "r" (&v->counter), "r" (i)
345 : "cc");
346
347 smp_mb();
348
349 return result;
350}
351
352static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
353{
354 u64 oldval;
355 unsigned long res;
356
357 smp_mb();
358
359 do {
360 __asm__ __volatile__("@ atomic64_cmpxchg\n"
361 "ldrexd %1, %H1, [%2]\n"
362 "mov %0, #0\n"
363 "teq %1, %3\n"
364 "teqeq %H1, %H3\n"
365 "strexdeq %0, %4, %H4, [%2]"
366 : "=&r" (res), "=&r" (oldval)
367 : "r" (&ptr->counter), "r" (old), "r" (new)
368 : "cc");
369 } while (res);
370
371 smp_mb();
372
373 return oldval;
374}
375
376static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
377{
378 u64 result;
379 unsigned long tmp;
380
381 smp_mb();
382
383 __asm__ __volatile__("@ atomic64_xchg\n"
384"1: ldrexd %0, %H0, [%2]\n"
385" strexd %1, %3, %H3, [%2]\n"
386" teq %1, #0\n"
387" bne 1b"
388 : "=&r" (result), "=&r" (tmp)
389 : "r" (&ptr->counter), "r" (new)
390 : "cc");
391
392 smp_mb();
393
394 return result;
395}
396
397static inline u64 atomic64_dec_if_positive(atomic64_t *v)
398{
399 u64 result;
400 unsigned long tmp;
401
402 smp_mb();
403
404 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
405"1: ldrexd %0, %H0, [%2]\n"
406" subs %0, %0, #1\n"
407" sbc %H0, %H0, #0\n"
408" teq %H0, #0\n"
409" bmi 2f\n"
410" strexd %1, %0, %H0, [%2]\n"
411" teq %1, #0\n"
412" bne 1b\n"
413"2:"
414 : "=&r" (result), "=&r" (tmp)
415 : "r" (&v->counter)
416 : "cc");
417
418 smp_mb();
419
420 return result;
421}
422
423static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
424{
425 u64 val;
426 unsigned long tmp;
427 int ret = 1;
428
429 smp_mb();
430
431 __asm__ __volatile__("@ atomic64_add_unless\n"
432"1: ldrexd %0, %H0, [%3]\n"
433" teq %0, %4\n"
434" teqeq %H0, %H4\n"
435" moveq %1, #0\n"
436" beq 2f\n"
437" adds %0, %0, %5\n"
438" adc %H0, %H0, %H5\n"
439" strexd %2, %0, %H0, [%3]\n"
440" teq %2, #0\n"
441" bne 1b\n"
442"2:"
443 : "=&r" (val), "=&r" (ret), "=&r" (tmp)
444 : "r" (&v->counter), "r" (u), "r" (a)
445 : "cc");
446
447 if (ret)
448 smp_mb();
449
450 return ret;
451}
452
453#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
454#define atomic64_inc(v) atomic64_add(1LL, (v))
455#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
456#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
457#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
458#define atomic64_dec(v) atomic64_sub(1LL, (v))
459#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
460#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
461#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
462
463#else /* !CONFIG_GENERIC_ATOMIC64 */
464#include <asm-generic/atomic64.h>
465#endif
238#include <asm-generic/atomic-long.h> 466#include <asm-generic/atomic-long.h>
239#endif 467#endif
240#endif 468#endif
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 8113bb5fb66e..72da7e045c6b 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -197,21 +197,6 @@
197 * DMA Cache Coherency 197 * DMA Cache Coherency
198 * =================== 198 * ===================
199 * 199 *
200 * dma_inv_range(start, end)
201 *
202 * Invalidate (discard) the specified virtual address range.
203 * May not write back any entries. If 'start' or 'end'
204 * are not cache line aligned, those lines must be written
205 * back.
206 * - start - virtual start address
207 * - end - virtual end address
208 *
209 * dma_clean_range(start, end)
210 *
211 * Clean (write back) the specified virtual address range.
212 * - start - virtual start address
213 * - end - virtual end address
214 *
215 * dma_flush_range(start, end) 200 * dma_flush_range(start, end)
216 * 201 *
217 * Clean and invalidate the specified virtual address range. 202 * Clean and invalidate the specified virtual address range.
@@ -228,8 +213,9 @@ struct cpu_cache_fns {
228 void (*coherent_user_range)(unsigned long, unsigned long); 213 void (*coherent_user_range)(unsigned long, unsigned long);
229 void (*flush_kern_dcache_area)(void *, size_t); 214 void (*flush_kern_dcache_area)(void *, size_t);
230 215
231 void (*dma_inv_range)(const void *, const void *); 216 void (*dma_map_area)(const void *, size_t, int);
232 void (*dma_clean_range)(const void *, const void *); 217 void (*dma_unmap_area)(const void *, size_t, int);
218
233 void (*dma_flush_range)(const void *, const void *); 219 void (*dma_flush_range)(const void *, const void *);
234}; 220};
235 221
@@ -259,8 +245,8 @@ extern struct cpu_cache_fns cpu_cache;
259 * is visible to DMA, or data written by DMA to system memory is 245 * is visible to DMA, or data written by DMA to system memory is
260 * visible to the CPU. 246 * visible to the CPU.
261 */ 247 */
262#define dmac_inv_range cpu_cache.dma_inv_range 248#define dmac_map_area cpu_cache.dma_map_area
263#define dmac_clean_range cpu_cache.dma_clean_range 249#define dmac_unmap_area cpu_cache.dma_unmap_area
264#define dmac_flush_range cpu_cache.dma_flush_range 250#define dmac_flush_range cpu_cache.dma_flush_range
265 251
266#else 252#else
@@ -285,12 +271,12 @@ extern void __cpuc_flush_dcache_area(void *, size_t);
285 * is visible to DMA, or data written by DMA to system memory is 271 * is visible to DMA, or data written by DMA to system memory is
286 * visible to the CPU. 272 * visible to the CPU.
287 */ 273 */
288#define dmac_inv_range __glue(_CACHE,_dma_inv_range) 274#define dmac_map_area __glue(_CACHE,_dma_map_area)
289#define dmac_clean_range __glue(_CACHE,_dma_clean_range) 275#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
290#define dmac_flush_range __glue(_CACHE,_dma_flush_range) 276#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
291 277
292extern void dmac_inv_range(const void *, const void *); 278extern void dmac_map_area(const void *, size_t, int);
293extern void dmac_clean_range(const void *, const void *); 279extern void dmac_unmap_area(const void *, size_t, int);
294extern void dmac_flush_range(const void *, const void *); 280extern void dmac_flush_range(const void *, const void *);
295 281
296#endif 282#endif
@@ -331,12 +317,8 @@ static inline void outer_flush_range(unsigned long start, unsigned long end)
331 * processes address space. Really, we want to allow our "user 317 * processes address space. Really, we want to allow our "user
332 * space" model to handle this. 318 * space" model to handle this.
333 */ 319 */
334#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ 320extern void copy_to_user_page(struct vm_area_struct *, struct page *,
335 do { \ 321 unsigned long, void *, const void *, unsigned long);
336 memcpy(dst, src, len); \
337 flush_ptrace_access(vma, page, vaddr, dst, len, 1);\
338 } while (0)
339
340#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 322#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
341 do { \ 323 do { \
342 memcpy(dst, src, len); \ 324 memcpy(dst, src, len); \
@@ -370,17 +352,6 @@ vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig
370 } 352 }
371} 353}
372 354
373static inline void
374vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
375 unsigned long uaddr, void *kaddr,
376 unsigned long len, int write)
377{
378 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
379 unsigned long addr = (unsigned long)kaddr;
380 __cpuc_coherent_kern_range(addr, addr + len);
381 }
382}
383
384#ifndef CONFIG_CPU_CACHE_VIPT 355#ifndef CONFIG_CPU_CACHE_VIPT
385#define flush_cache_mm(mm) \ 356#define flush_cache_mm(mm) \
386 vivt_flush_cache_mm(mm) 357 vivt_flush_cache_mm(mm)
@@ -388,15 +359,10 @@ vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
388 vivt_flush_cache_range(vma,start,end) 359 vivt_flush_cache_range(vma,start,end)
389#define flush_cache_page(vma,addr,pfn) \ 360#define flush_cache_page(vma,addr,pfn) \
390 vivt_flush_cache_page(vma,addr,pfn) 361 vivt_flush_cache_page(vma,addr,pfn)
391#define flush_ptrace_access(vma,page,ua,ka,len,write) \
392 vivt_flush_ptrace_access(vma,page,ua,ka,len,write)
393#else 362#else
394extern void flush_cache_mm(struct mm_struct *mm); 363extern void flush_cache_mm(struct mm_struct *mm);
395extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); 364extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
396extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn); 365extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
397extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
398 unsigned long uaddr, void *kaddr,
399 unsigned long len, int write);
400#endif 366#endif
401 367
402#define flush_cache_dup_mm(mm) flush_cache_mm(mm) 368#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
@@ -447,6 +413,16 @@ static inline void __flush_icache_all(void)
447 : "r" (0)); 413 : "r" (0));
448#endif 414#endif
449} 415}
416static inline void flush_kernel_vmap_range(void *addr, int size)
417{
418 if ((cache_is_vivt() || cache_is_vipt_aliasing()))
419 __cpuc_flush_dcache_area(addr, (size_t)size);
420}
421static inline void invalidate_kernel_vmap_range(void *addr, int size)
422{
423 if ((cache_is_vivt() || cache_is_vipt_aliasing()))
424 __cpuc_flush_dcache_area(addr, (size_t)size);
425}
450 426
451#define ARCH_HAS_FLUSH_ANON_PAGE 427#define ARCH_HAS_FLUSH_ANON_PAGE
452static inline void flush_anon_page(struct vm_area_struct *vma, 428static inline void flush_anon_page(struct vm_area_struct *vma,
diff --git a/arch/arm/include/asm/clkdev.h b/arch/arm/include/asm/clkdev.h
index b6ec7c627b39..7a0690da5e63 100644
--- a/arch/arm/include/asm/clkdev.h
+++ b/arch/arm/include/asm/clkdev.h
@@ -27,4 +27,7 @@ struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id,
27void clkdev_add(struct clk_lookup *cl); 27void clkdev_add(struct clk_lookup *cl);
28void clkdev_drop(struct clk_lookup *cl); 28void clkdev_drop(struct clk_lookup *cl);
29 29
30void clkdev_add_table(struct clk_lookup *, size_t);
31int clk_add_alias(const char *, const char *, char *, struct device *);
32
30#endif 33#endif
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index a96300bf83fd..256ee1c9f51a 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -57,18 +57,58 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
57#endif 57#endif
58 58
59/* 59/*
60 * DMA-consistent mapping functions. These allocate/free a region of 60 * The DMA API is built upon the notion of "buffer ownership". A buffer
61 * uncached, unwrite-buffered mapped memory space for use with DMA 61 * is either exclusively owned by the CPU (and therefore may be accessed
62 * devices. This is the "generic" version. The PCI specific version 62 * by it) or exclusively owned by the DMA device. These helper functions
63 * is in pci.h 63 * represent the transitions between these two ownership states.
64 * 64 *
65 * Note: Drivers should NOT use this function directly, as it will break 65 * Note, however, that on later ARMs, this notion does not work due to
66 * platforms with CONFIG_DMABOUNCE. 66 * speculative prefetches. We model our approach on the assumption that
67 * Use the driver DMA support - see dma-mapping.h (dma_sync_*) 67 * the CPU does do speculative prefetches, which means we clean caches
68 * before transfers and delay cache invalidation until transfer completion.
69 *
70 * Private support functions: these are not part of the API and are
71 * liable to change. Drivers must not use these.
68 */ 72 */
69extern void dma_cache_maint(const void *kaddr, size_t size, int rw); 73static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
70extern void dma_cache_maint_page(struct page *page, unsigned long offset, 74 enum dma_data_direction dir)
71 size_t size, int rw); 75{
76 extern void ___dma_single_cpu_to_dev(const void *, size_t,
77 enum dma_data_direction);
78
79 if (!arch_is_coherent())
80 ___dma_single_cpu_to_dev(kaddr, size, dir);
81}
82
83static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
84 enum dma_data_direction dir)
85{
86 extern void ___dma_single_dev_to_cpu(const void *, size_t,
87 enum dma_data_direction);
88
89 if (!arch_is_coherent())
90 ___dma_single_dev_to_cpu(kaddr, size, dir);
91}
92
93static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
94 size_t size, enum dma_data_direction dir)
95{
96 extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
97 size_t, enum dma_data_direction);
98
99 if (!arch_is_coherent())
100 ___dma_page_cpu_to_dev(page, off, size, dir);
101}
102
103static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
104 size_t size, enum dma_data_direction dir)
105{
106 extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
107 size_t, enum dma_data_direction);
108
109 if (!arch_is_coherent())
110 ___dma_page_dev_to_cpu(page, off, size, dir);
111}
72 112
73/* 113/*
74 * Return whether the given device DMA address mask can be supported 114 * Return whether the given device DMA address mask can be supported
@@ -304,8 +344,7 @@ static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
304{ 344{
305 BUG_ON(!valid_dma_direction(dir)); 345 BUG_ON(!valid_dma_direction(dir));
306 346
307 if (!arch_is_coherent()) 347 __dma_single_cpu_to_dev(cpu_addr, size, dir);
308 dma_cache_maint(cpu_addr, size, dir);
309 348
310 return virt_to_dma(dev, cpu_addr); 349 return virt_to_dma(dev, cpu_addr);
311} 350}
@@ -329,8 +368,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
329{ 368{
330 BUG_ON(!valid_dma_direction(dir)); 369 BUG_ON(!valid_dma_direction(dir));
331 370
332 if (!arch_is_coherent()) 371 __dma_page_cpu_to_dev(page, offset, size, dir);
333 dma_cache_maint_page(page, offset, size, dir);
334 372
335 return page_to_dma(dev, page) + offset; 373 return page_to_dma(dev, page) + offset;
336} 374}
@@ -352,7 +390,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
352static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, 390static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
353 size_t size, enum dma_data_direction dir) 391 size_t size, enum dma_data_direction dir)
354{ 392{
355 /* nothing to do */ 393 __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir);
356} 394}
357 395
358/** 396/**
@@ -372,7 +410,8 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
372static inline void dma_unmap_page(struct device *dev, dma_addr_t handle, 410static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
373 size_t size, enum dma_data_direction dir) 411 size_t size, enum dma_data_direction dir)
374{ 412{
375 /* nothing to do */ 413 __dma_page_dev_to_cpu(dma_to_page(dev, handle), handle & ~PAGE_MASK,
414 size, dir);
376} 415}
377#endif /* CONFIG_DMABOUNCE */ 416#endif /* CONFIG_DMABOUNCE */
378 417
@@ -400,7 +439,10 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
400{ 439{
401 BUG_ON(!valid_dma_direction(dir)); 440 BUG_ON(!valid_dma_direction(dir));
402 441
403 dmabounce_sync_for_cpu(dev, handle, offset, size, dir); 442 if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
443 return;
444
445 __dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir);
404} 446}
405 447
406static inline void dma_sync_single_range_for_device(struct device *dev, 448static inline void dma_sync_single_range_for_device(struct device *dev,
@@ -412,8 +454,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
412 if (!dmabounce_sync_for_device(dev, handle, offset, size, dir)) 454 if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
413 return; 455 return;
414 456
415 if (!arch_is_coherent()) 457 __dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir);
416 dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir);
417} 458}
418 459
419static inline void dma_sync_single_for_cpu(struct device *dev, 460static inline void dma_sync_single_for_cpu(struct device *dev,
diff --git a/arch/arm/include/asm/hardware/iop3xx-adma.h b/arch/arm/include/asm/hardware/iop3xx-adma.h
index 1a8c7279a28b..9b28f1243bdc 100644
--- a/arch/arm/include/asm/hardware/iop3xx-adma.h
+++ b/arch/arm/include/asm/hardware/iop3xx-adma.h
@@ -366,8 +366,7 @@ static inline int iop_chan_xor_slot_count(size_t len, int src_cnt,
366 slot_cnt += *slots_per_op; 366 slot_cnt += *slots_per_op;
367 } 367 }
368 368
369 if (len) 369 slot_cnt += *slots_per_op;
370 slot_cnt += *slots_per_op;
371 370
372 return slot_cnt; 371 return slot_cnt;
373} 372}
@@ -389,8 +388,7 @@ static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt,
389 slot_cnt += *slots_per_op; 388 slot_cnt += *slots_per_op;
390 } 389 }
391 390
392 if (len) 391 slot_cnt += *slots_per_op;
393 slot_cnt += *slots_per_op;
394 392
395 return slot_cnt; 393 return slot_cnt;
396} 394}
@@ -737,10 +735,8 @@ iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
737 i += slots_per_op; 735 i += slots_per_op;
738 } while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT); 736 } while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT);
739 737
740 if (len) { 738 iter = iop_hw_desc_slot_idx(hw_desc, i);
741 iter = iop_hw_desc_slot_idx(hw_desc, i); 739 iter->byte_count = len;
742 iter->byte_count = len;
743 }
744 } 740 }
745} 741}
746 742
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index d2a59cfc30ce..c980156f3263 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -69,9 +69,16 @@ extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);
69/* 69/*
70 * __arm_ioremap takes CPU physical address. 70 * __arm_ioremap takes CPU physical address.
71 * __arm_ioremap_pfn takes a Page Frame Number and an offset into that page 71 * __arm_ioremap_pfn takes a Page Frame Number and an offset into that page
72 * The _caller variety takes a __builtin_return_address(0) value for
73 * /proc/vmalloc to use - and should only be used in non-inline functions.
72 */ 74 */
73extern void __iomem * __arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int); 75extern void __iomem *__arm_ioremap_pfn_caller(unsigned long, unsigned long,
74extern void __iomem * __arm_ioremap(unsigned long, size_t, unsigned int); 76 size_t, unsigned int, void *);
77extern void __iomem *__arm_ioremap_caller(unsigned long, size_t, unsigned int,
78 void *);
79
80extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int);
81extern void __iomem *__arm_ioremap(unsigned long, size_t, unsigned int);
75extern void __iounmap(volatile void __iomem *addr); 82extern void __iounmap(volatile void __iomem *addr);
76 83
77/* 84/*
diff --git a/arch/arm/include/asm/mach/time.h b/arch/arm/include/asm/mach/time.h
index b2cc1fcd0400..8bffc3ff3acf 100644
--- a/arch/arm/include/asm/mach/time.h
+++ b/arch/arm/include/asm/mach/time.h
@@ -46,12 +46,4 @@ struct sys_timer {
46extern struct sys_timer *system_timer; 46extern struct sys_timer *system_timer;
47extern void timer_tick(void); 47extern void timer_tick(void);
48 48
49/*
50 * Kernel time keeping support.
51 */
52struct timespec;
53extern int (*set_rtc)(void);
54extern void save_time_delta(struct timespec *delta, struct timespec *rtc);
55extern void restore_time_delta(struct timespec *delta, struct timespec *rtc);
56
57#endif 49#endif
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 5421d82a2572..4312ee5e3d0b 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -76,6 +76,17 @@
76 */ 76 */
77#define IOREMAP_MAX_ORDER 24 77#define IOREMAP_MAX_ORDER 24
78 78
79/*
80 * Size of DMA-consistent memory region. Must be multiple of 2M,
81 * between 2MB and 14MB inclusive.
82 */
83#ifndef CONSISTENT_DMA_SIZE
84#define CONSISTENT_DMA_SIZE SZ_2M
85#endif
86
87#define CONSISTENT_END (0xffe00000UL)
88#define CONSISTENT_BASE (CONSISTENT_END - CONSISTENT_DMA_SIZE)
89
79#else /* CONFIG_MMU */ 90#else /* CONFIG_MMU */
80 91
81/* 92/*
@@ -93,11 +104,11 @@
93#endif 104#endif
94 105
95#ifndef PHYS_OFFSET 106#ifndef PHYS_OFFSET
96#define PHYS_OFFSET (CONFIG_DRAM_BASE) 107#define PHYS_OFFSET UL(CONFIG_DRAM_BASE)
97#endif 108#endif
98 109
99#ifndef END_MEM 110#ifndef END_MEM
100#define END_MEM (CONFIG_DRAM_BASE + CONFIG_DRAM_SIZE) 111#define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE)
101#endif 112#endif
102 113
103#ifndef PAGE_OFFSET 114#ifndef PAGE_OFFSET
@@ -113,14 +124,6 @@
113#endif /* !CONFIG_MMU */ 124#endif /* !CONFIG_MMU */
114 125
115/* 126/*
116 * Size of DMA-consistent memory region. Must be multiple of 2M,
117 * between 2MB and 14MB inclusive.
118 */
119#ifndef CONSISTENT_DMA_SIZE
120#define CONSISTENT_DMA_SIZE SZ_2M
121#endif
122
123/*
124 * Physical vs virtual RAM address space conversion. These are 127 * Physical vs virtual RAM address space conversion. These are
125 * private definitions which should NOT be used outside memory.h 128 * private definitions which should NOT be used outside memory.h
126 * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. 129 * files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index b561584d04a1..68870c776671 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -6,6 +6,7 @@
6typedef struct { 6typedef struct {
7#ifdef CONFIG_CPU_HAS_ASID 7#ifdef CONFIG_CPU_HAS_ASID
8 unsigned int id; 8 unsigned int id;
9 spinlock_t id_lock;
9#endif 10#endif
10 unsigned int kvm_seq; 11 unsigned int kvm_seq;
11} mm_context_t; 12} mm_context_t;
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index de6cefb329dd..a0b3cac0547c 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -43,12 +43,23 @@ void __check_kvm_seq(struct mm_struct *mm);
43#define ASID_FIRST_VERSION (1 << ASID_BITS) 43#define ASID_FIRST_VERSION (1 << ASID_BITS)
44 44
45extern unsigned int cpu_last_asid; 45extern unsigned int cpu_last_asid;
46#ifdef CONFIG_SMP
47DECLARE_PER_CPU(struct mm_struct *, current_mm);
48#endif
46 49
47void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); 50void __init_new_context(struct task_struct *tsk, struct mm_struct *mm);
48void __new_context(struct mm_struct *mm); 51void __new_context(struct mm_struct *mm);
49 52
50static inline void check_context(struct mm_struct *mm) 53static inline void check_context(struct mm_struct *mm)
51{ 54{
55 /*
56 * This code is executed with interrupts enabled. Therefore,
57 * mm->context.id cannot be updated to the latest ASID version
58 * on a different CPU (and condition below not triggered)
59 * without first getting an IPI to reset the context. The
60 * alternative is to take a read_lock on mm->context.id_lock
61 * (after changing its type to rwlock_t).
62 */
52 if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) 63 if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS))
53 __new_context(mm); 64 __new_context(mm);
54 65
@@ -108,6 +119,10 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
108 __flush_icache_all(); 119 __flush_icache_all();
109#endif 120#endif
110 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) { 121 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
122#ifdef CONFIG_SMP
123 struct mm_struct **crt_mm = &per_cpu(current_mm, cpu);
124 *crt_mm = next;
125#endif
111 check_context(next); 126 check_context(next);
112 cpu_switch_mm(next->pgd, next); 127 cpu_switch_mm(next->pgd, next);
113 if (cache_is_vivt()) 128 if (cache_is_vivt())
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index 3a32af4cce30..a485ac3c8696 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -117,11 +117,12 @@
117#endif 117#endif
118 118
119struct page; 119struct page;
120struct vm_area_struct;
120 121
121struct cpu_user_fns { 122struct cpu_user_fns {
122 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr); 123 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
123 void (*cpu_copy_user_highpage)(struct page *to, struct page *from, 124 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
124 unsigned long vaddr); 125 unsigned long vaddr, struct vm_area_struct *vma);
125}; 126};
126 127
127#ifdef MULTI_USER 128#ifdef MULTI_USER
@@ -137,7 +138,7 @@ extern struct cpu_user_fns cpu_user;
137 138
138extern void __cpu_clear_user_highpage(struct page *page, unsigned long vaddr); 139extern void __cpu_clear_user_highpage(struct page *page, unsigned long vaddr);
139extern void __cpu_copy_user_highpage(struct page *to, struct page *from, 140extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
140 unsigned long vaddr); 141 unsigned long vaddr, struct vm_area_struct *vma);
141#endif 142#endif
142 143
143#define clear_user_highpage(page,vaddr) \ 144#define clear_user_highpage(page,vaddr) \
@@ -145,7 +146,7 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
145 146
146#define __HAVE_ARCH_COPY_USER_HIGHPAGE 147#define __HAVE_ARCH_COPY_USER_HIGHPAGE
147#define copy_user_highpage(to,from,vaddr,vma) \ 148#define copy_user_highpage(to,from,vaddr,vma) \
148 __cpu_copy_user_highpage(to, from, vaddr) 149 __cpu_copy_user_highpage(to, from, vaddr, vma)
149 150
150#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) 151#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
151extern void copy_page(void *to, const void *from); 152extern void copy_page(void *to, const void *from);
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h
new file mode 100644
index 000000000000..49e3049aba32
--- /dev/null
+++ b/arch/arm/include/asm/perf_event.h
@@ -0,0 +1,31 @@
1/*
2 * linux/arch/arm/include/asm/perf_event.h
3 *
4 * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#ifndef __ARM_PERF_EVENT_H__
13#define __ARM_PERF_EVENT_H__
14
15/*
16 * NOP: on *most* (read: all supported) ARM platforms, the performance
17 * counter interrupts are regular interrupts and not an NMI. This
18 * means that when we receive the interrupt we can call
19 * perf_event_do_pending() that handles all of the work with
20 * interrupts enabled.
21 */
22static inline void
23set_perf_event_pending(void)
24{
25}
26
27/* ARM performance counters start from 1 (in the cp15 accesses) so use the
28 * same indexes here for consistency. */
29#define PERF_EVENT_INDEX_OFFSET 1
30
31#endif /* __ARM_PERF_EVENT_H__ */
diff --git a/arch/arm/include/asm/pgtable-nommu.h b/arch/arm/include/asm/pgtable-nommu.h
index b011f2e939aa..013cfcdc4839 100644
--- a/arch/arm/include/asm/pgtable-nommu.h
+++ b/arch/arm/include/asm/pgtable-nommu.h
@@ -86,8 +86,8 @@ extern unsigned int kobjsize(const void *objp);
86 * All 32bit addresses are effectively valid for vmalloc... 86 * All 32bit addresses are effectively valid for vmalloc...
87 * Sort of meaningless for non-VM targets. 87 * Sort of meaningless for non-VM targets.
88 */ 88 */
89#define VMALLOC_START 0 89#define VMALLOC_START 0UL
90#define VMALLOC_END 0xffffffff 90#define VMALLOC_END 0xffffffffUL
91 91
92#define FIRST_USER_ADDRESS (0) 92#define FIRST_USER_ADDRESS (0)
93 93
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
new file mode 100644
index 000000000000..2829b9f981a1
--- /dev/null
+++ b/arch/arm/include/asm/pmu.h
@@ -0,0 +1,75 @@
1/*
2 * linux/arch/arm/include/asm/pmu.h
3 *
4 * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#ifndef __ARM_PMU_H__
13#define __ARM_PMU_H__
14
15#ifdef CONFIG_CPU_HAS_PMU
16
17struct pmu_irqs {
18 const int *irqs;
19 int num_irqs;
20};
21
22/**
23 * reserve_pmu() - reserve the hardware performance counters
24 *
25 * Reserve the hardware performance counters in the system for exclusive use.
26 * The 'struct pmu_irqs' for the system is returned on success, ERR_PTR()
27 * encoded error on failure.
28 */
29extern const struct pmu_irqs *
30reserve_pmu(void);
31
32/**
33 * release_pmu() - Relinquish control of the performance counters
34 *
35 * Release the performance counters and allow someone else to use them.
36 * Callers must have disabled the counters and released IRQs before calling
37 * this. The 'struct pmu_irqs' returned from reserve_pmu() must be passed as
38 * a cookie.
39 */
40extern int
41release_pmu(const struct pmu_irqs *irqs);
42
43/**
44 * init_pmu() - Initialise the PMU.
45 *
46 * Initialise the system ready for PMU enabling. This should typically set the
47 * IRQ affinity and nothing else. The users (oprofile/perf events etc) will do
48 * the actual hardware initialisation.
49 */
50extern int
51init_pmu(void);
52
53#else /* CONFIG_CPU_HAS_PMU */
54
55static inline const struct pmu_irqs *
56reserve_pmu(void)
57{
58 return ERR_PTR(-ENODEV);
59}
60
61static inline int
62release_pmu(const struct pmu_irqs *irqs)
63{
64 return -ENODEV;
65}
66
67static inline int
68init_pmu(void)
69{
70 return -ENODEV;
71}
72
73#endif /* CONFIG_CPU_HAS_PMU */
74
75#endif /* __ARM_PMU_H__ */
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h
index 5ccce0a9b03c..f392fb4437af 100644
--- a/arch/arm/include/asm/setup.h
+++ b/arch/arm/include/asm/setup.h
@@ -223,18 +223,6 @@ extern struct meminfo meminfo;
223#define bank_phys_end(bank) ((bank)->start + (bank)->size) 223#define bank_phys_end(bank) ((bank)->start + (bank)->size)
224#define bank_phys_size(bank) (bank)->size 224#define bank_phys_size(bank) (bank)->size
225 225
226/*
227 * Early command line parameters.
228 */
229struct early_params {
230 const char *arg;
231 void (*fn)(char **p);
232};
233
234#define __early_param(name,fn) \
235static struct early_params __early_##fn __used \
236__attribute__((__section__(".early_param.init"))) = { name, fn }
237
238#endif /* __KERNEL__ */ 226#endif /* __KERNEL__ */
239 227
240#endif 228#endif
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h
index 59303e200845..e6215305544a 100644
--- a/arch/arm/include/asm/smp_plat.h
+++ b/arch/arm/include/asm/smp_plat.h
@@ -13,4 +13,9 @@ static inline int tlb_ops_need_broadcast(void)
13 return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2; 13 return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2;
14} 14}
15 15
16static inline int cache_ops_need_broadcast(void)
17{
18 return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 1;
19}
20
16#endif 21#endif
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index c91c64cab922..17eb355707dd 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -5,6 +5,22 @@
5#error SMP not supported on pre-ARMv6 CPUs 5#error SMP not supported on pre-ARMv6 CPUs
6#endif 6#endif
7 7
8static inline void dsb_sev(void)
9{
10#if __LINUX_ARM_ARCH__ >= 7
11 __asm__ __volatile__ (
12 "dsb\n"
13 "sev"
14 );
15#elif defined(CONFIG_CPU_32v6K)
16 __asm__ __volatile__ (
17 "mcr p15, 0, %0, c7, c10, 4\n"
18 "sev"
19 : : "r" (0)
20 );
21#endif
22}
23
8/* 24/*
9 * ARMv6 Spin-locking. 25 * ARMv6 Spin-locking.
10 * 26 *
@@ -69,13 +85,11 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
69 85
70 __asm__ __volatile__( 86 __asm__ __volatile__(
71" str %1, [%0]\n" 87" str %1, [%0]\n"
72#ifdef CONFIG_CPU_32v6K
73" mcr p15, 0, %1, c7, c10, 4\n" /* DSB */
74" sev"
75#endif
76 : 88 :
77 : "r" (&lock->lock), "r" (0) 89 : "r" (&lock->lock), "r" (0)
78 : "cc"); 90 : "cc");
91
92 dsb_sev();
79} 93}
80 94
81/* 95/*
@@ -132,13 +146,11 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
132 146
133 __asm__ __volatile__( 147 __asm__ __volatile__(
134 "str %1, [%0]\n" 148 "str %1, [%0]\n"
135#ifdef CONFIG_CPU_32v6K
136" mcr p15, 0, %1, c7, c10, 4\n" /* DSB */
137" sev\n"
138#endif
139 : 149 :
140 : "r" (&rw->lock), "r" (0) 150 : "r" (&rw->lock), "r" (0)
141 : "cc"); 151 : "cc");
152
153 dsb_sev();
142} 154}
143 155
144/* write_can_lock - would write_trylock() succeed? */ 156/* write_can_lock - would write_trylock() succeed? */
@@ -188,14 +200,12 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
188" strex %1, %0, [%2]\n" 200" strex %1, %0, [%2]\n"
189" teq %1, #0\n" 201" teq %1, #0\n"
190" bne 1b" 202" bne 1b"
191#ifdef CONFIG_CPU_32v6K
192"\n cmp %0, #0\n"
193" mcreq p15, 0, %0, c7, c10, 4\n"
194" seveq"
195#endif
196 : "=&r" (tmp), "=&r" (tmp2) 203 : "=&r" (tmp), "=&r" (tmp2)
197 : "r" (&rw->lock) 204 : "r" (&rw->lock)
198 : "cc"); 205 : "cc");
206
207 if (tmp == 0)
208 dsb_sev();
199} 209}
200 210
201static inline int arch_read_trylock(arch_rwlock_t *rw) 211static inline int arch_read_trylock(arch_rwlock_t *rw)
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index 058e7e90881d..ca88e6a84707 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -73,8 +73,7 @@ extern unsigned int mem_fclk_21285;
73 73
74struct pt_regs; 74struct pt_regs;
75 75
76void die(const char *msg, struct pt_regs *regs, int err) 76void die(const char *msg, struct pt_regs *regs, int err);
77 __attribute__((noreturn));
78 77
79struct siginfo; 78struct siginfo;
80void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, 79void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info,
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 2dfb7d7a66e9..b74970ec02c4 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -115,7 +115,8 @@ extern void iwmmxt_task_restore(struct thread_info *, void *);
115extern void iwmmxt_task_release(struct thread_info *); 115extern void iwmmxt_task_release(struct thread_info *);
116extern void iwmmxt_task_switch(struct thread_info *); 116extern void iwmmxt_task_switch(struct thread_info *);
117 117
118extern void vfp_sync_state(struct thread_info *thread); 118extern void vfp_sync_hwstate(struct thread_info *);
119extern void vfp_flush_hwstate(struct thread_info *);
119 120
120#endif 121#endif
121 122
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index c2f1605de359..e085e2c545eb 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -529,7 +529,8 @@ extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
529 * cache entries for the kernels virtual memory range are written 529 * cache entries for the kernels virtual memory range are written
530 * back to the page. 530 * back to the page.
531 */ 531 */
532extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte); 532extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
533 pte_t *ptep);
533 534
534#endif 535#endif
535 536