aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-05-10 10:24:14 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-05-10 10:24:14 -0400
commit6019958d146a4f127dae727a930f902c92531e6e (patch)
tree126cfbdafb7008ce9701dd2ccc27c54cab2d36f4 /arch/arc
parent977b58e1dd6fc2a751fb427798bfb8256476c93e (diff)
parente7d5bab5efb94a82d80cab3ad68c357ce73ea596 (diff)
Merge tag 'arc-v3.10-rc1-part2' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc
Pull second set of arc arch updates from Vineet Gupta: "Aliasing VIPT dcache support for ARC I'm satisified with testing, specially with fuse which has historically given grief to VIPT arches (ARM/PARISC...)" * tag 'arc-v3.10-rc1-part2' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc: ARC: [TB10x] Remove GENERIC_GPIO ARC: [mm] Aliasing VIPT dcache support 4/4 ARC: [mm] Aliasing VIPT dcache support 3/4 ARC: [mm] Aliasing VIPT dcache support 2/4 ARC: [mm] Aliasing VIPT dcache support 1/4 ARC: [mm] refactor the core (i|d)cache line ops loops ARC: [mm] serious bug in vaddr based icache flush
Diffstat (limited to 'arch/arc')
-rw-r--r--arch/arc/Kconfig4
-rw-r--r--arch/arc/include/asm/Kbuild1
-rw-r--r--arch/arc/include/asm/cache.h3
-rw-r--r--arch/arc/include/asm/cacheflush.h58
-rw-r--r--arch/arc/include/asm/page.h16
-rw-r--r--arch/arc/include/asm/pgtable.h3
-rw-r--r--arch/arc/include/asm/shmparam.h18
-rw-r--r--arch/arc/include/asm/tlb.h11
-rw-r--r--arch/arc/mm/Makefile2
-rw-r--r--arch/arc/mm/cache_arc700.c221
-rw-r--r--arch/arc/mm/mmap.c78
-rw-r--r--arch/arc/mm/tlb.c29
-rw-r--r--arch/arc/plat-tb10x/Kconfig7
13 files changed, 377 insertions, 74 deletions
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 491ae7923b10..5917099470ea 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -182,6 +182,10 @@ config ARC_CACHE_PAGES
182 Note that Global I/D ENABLE + Per Page DISABLE works but corollary 182 Note that Global I/D ENABLE + Per Page DISABLE works but corollary
183 Global DISABLE + Per Page ENABLE won't work 183 Global DISABLE + Per Page ENABLE won't work
184 184
185config ARC_CACHE_VIPT_ALIASING
186 bool "Support VIPT Aliasing D$"
187 default n
188
185endif #ARC_CACHE 189endif #ARC_CACHE
186 190
187config ARC_HAS_ICCM 191config ARC_HAS_ICCM
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
index 48af742f8b5a..d8dd660898b9 100644
--- a/arch/arc/include/asm/Kbuild
+++ b/arch/arc/include/asm/Kbuild
@@ -32,7 +32,6 @@ generic-y += resource.h
32generic-y += scatterlist.h 32generic-y += scatterlist.h
33generic-y += sembuf.h 33generic-y += sembuf.h
34generic-y += shmbuf.h 34generic-y += shmbuf.h
35generic-y += shmparam.h
36generic-y += siginfo.h 35generic-y += siginfo.h
37generic-y += socket.h 36generic-y += socket.h
38generic-y += sockios.h 37generic-y += sockios.h
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
index 6632273861fd..d5555fe4742a 100644
--- a/arch/arc/include/asm/cache.h
+++ b/arch/arc/include/asm/cache.h
@@ -55,9 +55,6 @@
55 : "r"(data), "r"(ptr)); \ 55 : "r"(data), "r"(ptr)); \
56}) 56})
57 57
58/* used to give SHMLBA a value to avoid Cache Aliasing */
59extern unsigned int ARC_shmlba;
60
61#define ARCH_DMA_MINALIGN L1_CACHE_BYTES 58#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
62 59
63/* 60/*
diff --git a/arch/arc/include/asm/cacheflush.h b/arch/arc/include/asm/cacheflush.h
index ee1f6eae82d2..9f841af41092 100644
--- a/arch/arc/include/asm/cacheflush.h
+++ b/arch/arc/include/asm/cacheflush.h
@@ -19,6 +19,7 @@
19#define _ASM_CACHEFLUSH_H 19#define _ASM_CACHEFLUSH_H
20 20
21#include <linux/mm.h> 21#include <linux/mm.h>
22#include <asm/shmparam.h>
22 23
23/* 24/*
24 * Semantically we need this because icache doesn't snoop dcache/dma. 25 * Semantically we need this because icache doesn't snoop dcache/dma.
@@ -33,7 +34,9 @@ void flush_cache_all(void);
33void flush_icache_range(unsigned long start, unsigned long end); 34void flush_icache_range(unsigned long start, unsigned long end);
34void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len); 35void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len);
35void __inv_icache_page(unsigned long paddr, unsigned long vaddr); 36void __inv_icache_page(unsigned long paddr, unsigned long vaddr);
36void __flush_dcache_page(unsigned long paddr); 37void ___flush_dcache_page(unsigned long paddr, unsigned long vaddr);
38#define __flush_dcache_page(p, v) \
39 ___flush_dcache_page((unsigned long)p, (unsigned long)v)
37 40
38#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 41#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
39 42
@@ -50,18 +53,55 @@ void dma_cache_wback(unsigned long start, unsigned long sz);
50#define flush_cache_vmap(start, end) flush_cache_all() 53#define flush_cache_vmap(start, end) flush_cache_all()
51#define flush_cache_vunmap(start, end) flush_cache_all() 54#define flush_cache_vunmap(start, end) flush_cache_all()
52 55
53/* 56#define flush_cache_dup_mm(mm) /* called on fork (VIVT only) */
54 * VM callbacks when entire/range of user-space V-P mappings are 57
55 * torn-down/get-invalidated 58#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING
56 * 59
57 * Currently we don't support D$ aliasing configs for our VIPT caches
58 * NOPS for VIPT Cache with non-aliasing D$ configurations only
59 */
60#define flush_cache_dup_mm(mm) /* called on fork */
61#define flush_cache_mm(mm) /* called on munmap/exit */ 60#define flush_cache_mm(mm) /* called on munmap/exit */
62#define flush_cache_range(mm, u_vstart, u_vend) 61#define flush_cache_range(mm, u_vstart, u_vend)
63#define flush_cache_page(vma, u_vaddr, pfn) /* PF handling/COW-break */ 62#define flush_cache_page(vma, u_vaddr, pfn) /* PF handling/COW-break */
64 63
64#else /* VIPT aliasing dcache */
65
66/* To clear out stale userspace mappings */
67void flush_cache_mm(struct mm_struct *mm);
68void flush_cache_range(struct vm_area_struct *vma,
69 unsigned long start,unsigned long end);
70void flush_cache_page(struct vm_area_struct *vma,
71 unsigned long user_addr, unsigned long page);
72
73/*
74 * To make sure that userspace mapping is flushed to memory before
75 * get_user_pages() uses a kernel mapping to access the page
76 */
77#define ARCH_HAS_FLUSH_ANON_PAGE
78void flush_anon_page(struct vm_area_struct *vma,
79 struct page *page, unsigned long u_vaddr);
80
81#endif /* CONFIG_ARC_CACHE_VIPT_ALIASING */
82
83/*
84 * Simple wrapper over config option
85 * Bootup code ensures that hardware matches kernel configuration
86 */
87static inline int cache_is_vipt_aliasing(void)
88{
89#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
90 return 1;
91#else
92 return 0;
93#endif
94}
95
96#define CACHE_COLOR(addr) (((unsigned long)(addr) >> (PAGE_SHIFT)) & 3)
97
98/*
99 * checks if two addresses (after page aligning) index into same cache set
100 */
101#define addr_not_cache_congruent(addr1, addr2) \
102 cache_is_vipt_aliasing() ? \
103 (CACHE_COLOR(addr1) != CACHE_COLOR(addr2)) : 0 \
104
65#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ 105#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
66do { \ 106do { \
67 memcpy(dst, src, len); \ 107 memcpy(dst, src, len); \
diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
index bdf546104551..374a35514116 100644
--- a/arch/arc/include/asm/page.h
+++ b/arch/arc/include/asm/page.h
@@ -16,13 +16,27 @@
16#define get_user_page(vaddr) __get_free_page(GFP_KERNEL) 16#define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
17#define free_user_page(page, addr) free_page(addr) 17#define free_user_page(page, addr) free_page(addr)
18 18
19/* TBD: for now don't worry about VIPT D$ aliasing */
20#define clear_page(paddr) memset((paddr), 0, PAGE_SIZE) 19#define clear_page(paddr) memset((paddr), 0, PAGE_SIZE)
21#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) 20#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
22 21
22#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING
23
23#define clear_user_page(addr, vaddr, pg) clear_page(addr) 24#define clear_user_page(addr, vaddr, pg) clear_page(addr)
24#define copy_user_page(vto, vfrom, vaddr, pg) copy_page(vto, vfrom) 25#define copy_user_page(vto, vfrom, vaddr, pg) copy_page(vto, vfrom)
25 26
27#else /* VIPT aliasing dcache */
28
29struct vm_area_struct;
30struct page;
31
32#define __HAVE_ARCH_COPY_USER_HIGHPAGE
33
34void copy_user_highpage(struct page *to, struct page *from,
35 unsigned long u_vaddr, struct vm_area_struct *vma);
36void clear_user_page(void *to, unsigned long u_vaddr, struct page *page);
37
38#endif /* CONFIG_ARC_CACHE_VIPT_ALIASING */
39
26#undef STRICT_MM_TYPECHECKS 40#undef STRICT_MM_TYPECHECKS
27 41
28#ifdef STRICT_MM_TYPECHECKS 42#ifdef STRICT_MM_TYPECHECKS
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index b7e36684c091..1cc4720faccb 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -395,6 +395,9 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
395 395
396#include <asm-generic/pgtable.h> 396#include <asm-generic/pgtable.h>
397 397
398/* to cope with aliasing VIPT cache */
399#define HAVE_ARCH_UNMAPPED_AREA
400
398/* 401/*
399 * No page table caches to initialise 402 * No page table caches to initialise
400 */ 403 */
diff --git a/arch/arc/include/asm/shmparam.h b/arch/arc/include/asm/shmparam.h
new file mode 100644
index 000000000000..fffeecc04270
--- /dev/null
+++ b/arch/arc/include/asm/shmparam.h
@@ -0,0 +1,18 @@
1/*
2 * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef __ARC_ASM_SHMPARAM_H
10#define __ARC_ASM_SHMPARAM_H
11
12/* Handle upto 2 cache bins */
13#define SHMLBA (2 * PAGE_SIZE)
14
15/* Enforce SHMLBA in shmat */
16#define __ARCH_FORCE_SHMLBA
17
18#endif
diff --git a/arch/arc/include/asm/tlb.h b/arch/arc/include/asm/tlb.h
index fe91719866a5..85b6df839bd7 100644
--- a/arch/arc/include/asm/tlb.h
+++ b/arch/arc/include/asm/tlb.h
@@ -30,13 +30,20 @@ do { \
30/* 30/*
31 * This pair is called at time of munmap/exit to flush cache and TLB entries 31 * This pair is called at time of munmap/exit to flush cache and TLB entries
32 * for mappings being torn down. 32 * for mappings being torn down.
33 * 1) cache-flush part -implemented via tlb_start_vma( ) can be NOP (for now) 33 * 1) cache-flush part -implemented via tlb_start_vma( ) for VIPT aliasing D$
34 * as we don't support aliasing configs in our VIPT D$.
35 * 2) tlb-flush part - implemted via tlb_end_vma( ) flushes the TLB range 34 * 2) tlb-flush part - implemted via tlb_end_vma( ) flushes the TLB range
36 * 35 *
37 * Note, read http://lkml.org/lkml/2004/1/15/6 36 * Note, read http://lkml.org/lkml/2004/1/15/6
38 */ 37 */
38#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING
39#define tlb_start_vma(tlb, vma) 39#define tlb_start_vma(tlb, vma)
40#else
41#define tlb_start_vma(tlb, vma) \
42do { \
43 if (!tlb->fullmm) \
44 flush_cache_range(vma, vma->vm_start, vma->vm_end); \
45} while(0)
46#endif
40 47
41#define tlb_end_vma(tlb, vma) \ 48#define tlb_end_vma(tlb, vma) \
42do { \ 49do { \
diff --git a/arch/arc/mm/Makefile b/arch/arc/mm/Makefile
index 168dc146a8f6..ac95cc239c1e 100644
--- a/arch/arc/mm/Makefile
+++ b/arch/arc/mm/Makefile
@@ -7,4 +7,4 @@
7# 7#
8 8
9obj-y := extable.o ioremap.o dma.o fault.o init.o 9obj-y := extable.o ioremap.o dma.o fault.o init.o
10obj-y += tlb.o tlbex.o cache_arc700.o 10obj-y += tlb.o tlbex.o cache_arc700.o mmap.o
diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c
index c854cf95f706..2f12bca8aef3 100644
--- a/arch/arc/mm/cache_arc700.c
+++ b/arch/arc/mm/cache_arc700.c
@@ -68,6 +68,7 @@
68#include <linux/mmu_context.h> 68#include <linux/mmu_context.h>
69#include <linux/syscalls.h> 69#include <linux/syscalls.h>
70#include <linux/uaccess.h> 70#include <linux/uaccess.h>
71#include <linux/pagemap.h>
71#include <asm/cacheflush.h> 72#include <asm/cacheflush.h>
72#include <asm/cachectl.h> 73#include <asm/cachectl.h>
73#include <asm/setup.h> 74#include <asm/setup.h>
@@ -138,6 +139,7 @@ void __cpuinit arc_cache_init(void)
138 struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache; 139 struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
139 struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache; 140 struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
140 int way_pg_ratio = way_pg_ratio; 141 int way_pg_ratio = way_pg_ratio;
142 int dcache_does_alias;
141 char str[256]; 143 char str[256];
142 144
143 printk(arc_cache_mumbojumbo(0, str, sizeof(str))); 145 printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
@@ -184,9 +186,13 @@ chk_dc:
184 panic("Cache H/W doesn't match kernel Config"); 186 panic("Cache H/W doesn't match kernel Config");
185 } 187 }
186 188
189 dcache_does_alias = (dc->sz / ARC_DCACHE_WAYS) > PAGE_SIZE;
190
187 /* check for D-Cache aliasing */ 191 /* check for D-Cache aliasing */
188 if ((dc->sz / ARC_DCACHE_WAYS) > PAGE_SIZE) 192 if (dcache_does_alias && !cache_is_vipt_aliasing())
189 panic("D$ aliasing not handled right now\n"); 193 panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
194 else if (!dcache_does_alias && cache_is_vipt_aliasing())
195 panic("Don't need CONFIG_ARC_CACHE_VIPT_ALIASING\n");
190#endif 196#endif
191 197
192 /* Set the default Invalidate Mode to "simpy discard dirty lines" 198 /* Set the default Invalidate Mode to "simpy discard dirty lines"
@@ -269,47 +275,57 @@ static inline void __dc_entire_op(const int cacheop)
269 * Per Line Operation on D-Cache 275 * Per Line Operation on D-Cache
270 * Doesn't deal with type-of-op/IRQ-disabling/waiting-for-flush-to-complete 276 * Doesn't deal with type-of-op/IRQ-disabling/waiting-for-flush-to-complete
271 * It's sole purpose is to help gcc generate ZOL 277 * It's sole purpose is to help gcc generate ZOL
278 * (aliasing VIPT dcache flushing needs both vaddr and paddr)
272 */ 279 */
273static inline void __dc_line_loop(unsigned long start, unsigned long sz, 280static inline void __dc_line_loop(unsigned long paddr, unsigned long vaddr,
274 int aux_reg) 281 unsigned long sz, const int aux_reg)
275{ 282{
276 int num_lines, slack; 283 int num_lines;
277 284
278 /* Ensure we properly floor/ceil the non-line aligned/sized requests 285 /* Ensure we properly floor/ceil the non-line aligned/sized requests
279 * and have @start - aligned to cache line and integral @num_lines. 286 * and have @paddr - aligned to cache line and integral @num_lines.
280 * This however can be avoided for page sized since: 287 * This however can be avoided for page sized since:
281 * -@start will be cache-line aligned already (being page aligned) 288 * -@paddr will be cache-line aligned already (being page aligned)
282 * -@sz will be integral multiple of line size (being page sized). 289 * -@sz will be integral multiple of line size (being page sized).
283 */ 290 */
284 if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) { 291 if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
285 slack = start & ~DCACHE_LINE_MASK; 292 sz += paddr & ~DCACHE_LINE_MASK;
286 sz += slack; 293 paddr &= DCACHE_LINE_MASK;
287 start -= slack; 294 vaddr &= DCACHE_LINE_MASK;
288 } 295 }
289 296
290 num_lines = DIV_ROUND_UP(sz, ARC_DCACHE_LINE_LEN); 297 num_lines = DIV_ROUND_UP(sz, ARC_DCACHE_LINE_LEN);
291 298
299#if (CONFIG_ARC_MMU_VER <= 2)
300 paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
301#endif
302
292 while (num_lines-- > 0) { 303 while (num_lines-- > 0) {
293#if (CONFIG_ARC_MMU_VER > 2) 304#if (CONFIG_ARC_MMU_VER > 2)
294 /* 305 /*
295 * Just as for I$, in MMU v3, D$ ops also require 306 * Just as for I$, in MMU v3, D$ ops also require
296 * "tag" bits in DC_PTAG, "index" bits in FLDL,IVDL ops 307 * "tag" bits in DC_PTAG, "index" bits in FLDL,IVDL ops
297 * But we pass phy addr for both. This works since Linux
298 * doesn't support aliasing configs for D$, yet.
299 * Thus paddr is enough to provide both tag and index.
300 */ 308 */
301 write_aux_reg(ARC_REG_DC_PTAG, start); 309 write_aux_reg(ARC_REG_DC_PTAG, paddr);
310
311 write_aux_reg(aux_reg, vaddr);
312 vaddr += ARC_DCACHE_LINE_LEN;
313#else
314 /* paddr contains stuffed vaddrs bits */
315 write_aux_reg(aux_reg, paddr);
302#endif 316#endif
303 write_aux_reg(aux_reg, start); 317 paddr += ARC_DCACHE_LINE_LEN;
304 start += ARC_DCACHE_LINE_LEN;
305 } 318 }
306} 319}
307 320
321/* For kernel mappings cache operation: index is same as paddr */
322#define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op)
323
308/* 324/*
309 * D-Cache : Per Line INV (discard or wback+discard) or FLUSH (wback) 325 * D-Cache : Per Line INV (discard or wback+discard) or FLUSH (wback)
310 */ 326 */
311static inline void __dc_line_op(unsigned long start, unsigned long sz, 327static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr,
312 const int cacheop) 328 unsigned long sz, const int cacheop)
313{ 329{
314 unsigned long flags, tmp = tmp; 330 unsigned long flags, tmp = tmp;
315 int aux; 331 int aux;
@@ -332,7 +348,7 @@ static inline void __dc_line_op(unsigned long start, unsigned long sz,
332 else 348 else
333 aux = ARC_REG_DC_FLDL; 349 aux = ARC_REG_DC_FLDL;
334 350
335 __dc_line_loop(start, sz, aux); 351 __dc_line_loop(paddr, vaddr, sz, aux);
336 352
337 if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */ 353 if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */
338 wait_for_flush(); 354 wait_for_flush();
@@ -347,7 +363,8 @@ static inline void __dc_line_op(unsigned long start, unsigned long sz,
347#else 363#else
348 364
349#define __dc_entire_op(cacheop) 365#define __dc_entire_op(cacheop)
350#define __dc_line_op(start, sz, cacheop) 366#define __dc_line_op(paddr, vaddr, sz, cacheop)
367#define __dc_line_op_k(paddr, sz, cacheop)
351 368
352#endif /* CONFIG_ARC_HAS_DCACHE */ 369#endif /* CONFIG_ARC_HAS_DCACHE */
353 370
@@ -399,49 +416,45 @@ static inline void __dc_line_op(unsigned long start, unsigned long sz,
399/*********************************************************** 416/***********************************************************
400 * Machine specific helper for per line I-Cache invalidate. 417 * Machine specific helper for per line I-Cache invalidate.
401 */ 418 */
402static void __ic_line_inv_vaddr(unsigned long phy_start, unsigned long vaddr, 419static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
403 unsigned long sz) 420 unsigned long sz)
404{ 421{
405 unsigned long flags; 422 unsigned long flags;
406 int num_lines, slack; 423 int num_lines;
407 unsigned int addr;
408 424
409 /* 425 /*
410 * Ensure we properly floor/ceil the non-line aligned/sized requests: 426 * Ensure we properly floor/ceil the non-line aligned/sized requests:
411 * However page sized flushes can be compile time optimised. 427 * However page sized flushes can be compile time optimised.
412 * -@phy_start will be cache-line aligned already (being page aligned) 428 * -@paddr will be cache-line aligned already (being page aligned)
413 * -@sz will be integral multiple of line size (being page sized). 429 * -@sz will be integral multiple of line size (being page sized).
414 */ 430 */
415 if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) { 431 if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
416 slack = phy_start & ~ICACHE_LINE_MASK; 432 sz += paddr & ~ICACHE_LINE_MASK;
417 sz += slack; 433 paddr &= ICACHE_LINE_MASK;
418 phy_start -= slack; 434 vaddr &= ICACHE_LINE_MASK;
419 } 435 }
420 436
421 num_lines = DIV_ROUND_UP(sz, ARC_ICACHE_LINE_LEN); 437 num_lines = DIV_ROUND_UP(sz, ARC_ICACHE_LINE_LEN);
422 438
423#if (CONFIG_ARC_MMU_VER > 2) 439#if (CONFIG_ARC_MMU_VER <= 2)
424 vaddr &= ~ICACHE_LINE_MASK;
425 addr = phy_start;
426#else
427 /* bits 17:13 of vaddr go as bits 4:0 of paddr */ 440 /* bits 17:13 of vaddr go as bits 4:0 of paddr */
428 addr = phy_start | ((vaddr >> 13) & 0x1F); 441 paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
429#endif 442#endif
430 443
431 local_irq_save(flags); 444 local_irq_save(flags);
432 while (num_lines-- > 0) { 445 while (num_lines-- > 0) {
433#if (CONFIG_ARC_MMU_VER > 2) 446#if (CONFIG_ARC_MMU_VER > 2)
434 /* tag comes from phy addr */ 447 /* tag comes from phy addr */
435 write_aux_reg(ARC_REG_IC_PTAG, addr); 448 write_aux_reg(ARC_REG_IC_PTAG, paddr);
436 449
437 /* index bits come from vaddr */ 450 /* index bits come from vaddr */
438 write_aux_reg(ARC_REG_IC_IVIL, vaddr); 451 write_aux_reg(ARC_REG_IC_IVIL, vaddr);
439 vaddr += ARC_ICACHE_LINE_LEN; 452 vaddr += ARC_ICACHE_LINE_LEN;
440#else 453#else
441 /* paddr contains stuffed vaddrs bits */ 454 /* paddr contains stuffed vaddrs bits */
442 write_aux_reg(ARC_REG_IC_IVIL, addr); 455 write_aux_reg(ARC_REG_IC_IVIL, paddr);
443#endif 456#endif
444 addr += ARC_ICACHE_LINE_LEN; 457 paddr += ARC_ICACHE_LINE_LEN;
445 } 458 }
446 local_irq_restore(flags); 459 local_irq_restore(flags);
447} 460}
@@ -457,29 +470,66 @@ static void __ic_line_inv_vaddr(unsigned long phy_start, unsigned long vaddr,
457 * Exported APIs 470 * Exported APIs
458 */ 471 */
459 472
473/*
474 * Handle cache congruency of kernel and userspace mappings of page when kernel
475 * writes-to/reads-from
476 *
477 * The idea is to defer flushing of kernel mapping after a WRITE, possible if:
478 * -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
479 * -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
480 * -In SMP, if hardware caches are coherent
481 *
482 * There's a corollary case, where kernel READs from a userspace mapped page.
483 * If the U-mapping is not congruent to to K-mapping, former needs flushing.
484 */
460void flush_dcache_page(struct page *page) 485void flush_dcache_page(struct page *page)
461{ 486{
462 /* Make a note that dcache is not yet flushed for this page */ 487 struct address_space *mapping;
463 set_bit(PG_arch_1, &page->flags); 488
489 if (!cache_is_vipt_aliasing()) {
490 set_bit(PG_arch_1, &page->flags);
491 return;
492 }
493
494 /* don't handle anon pages here */
495 mapping = page_mapping(page);
496 if (!mapping)
497 return;
498
499 /*
500 * pagecache page, file not yet mapped to userspace
501 * Make a note that K-mapping is dirty
502 */
503 if (!mapping_mapped(mapping)) {
504 set_bit(PG_arch_1, &page->flags);
505 } else if (page_mapped(page)) {
506
507 /* kernel reading from page with U-mapping */
508 void *paddr = page_address(page);
509 unsigned long vaddr = page->index << PAGE_CACHE_SHIFT;
510
511 if (addr_not_cache_congruent(paddr, vaddr))
512 __flush_dcache_page(paddr, vaddr);
513 }
464} 514}
465EXPORT_SYMBOL(flush_dcache_page); 515EXPORT_SYMBOL(flush_dcache_page);
466 516
467 517
468void dma_cache_wback_inv(unsigned long start, unsigned long sz) 518void dma_cache_wback_inv(unsigned long start, unsigned long sz)
469{ 519{
470 __dc_line_op(start, sz, OP_FLUSH_N_INV); 520 __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
471} 521}
472EXPORT_SYMBOL(dma_cache_wback_inv); 522EXPORT_SYMBOL(dma_cache_wback_inv);
473 523
474void dma_cache_inv(unsigned long start, unsigned long sz) 524void dma_cache_inv(unsigned long start, unsigned long sz)
475{ 525{
476 __dc_line_op(start, sz, OP_INV); 526 __dc_line_op_k(start, sz, OP_INV);
477} 527}
478EXPORT_SYMBOL(dma_cache_inv); 528EXPORT_SYMBOL(dma_cache_inv);
479 529
480void dma_cache_wback(unsigned long start, unsigned long sz) 530void dma_cache_wback(unsigned long start, unsigned long sz)
481{ 531{
482 __dc_line_op(start, sz, OP_FLUSH); 532 __dc_line_op_k(start, sz, OP_FLUSH);
483} 533}
484EXPORT_SYMBOL(dma_cache_wback); 534EXPORT_SYMBOL(dma_cache_wback);
485 535
@@ -560,7 +610,7 @@ void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len)
560 610
561 local_irq_save(flags); 611 local_irq_save(flags);
562 __ic_line_inv_vaddr(paddr, vaddr, len); 612 __ic_line_inv_vaddr(paddr, vaddr, len);
563 __dc_line_op(paddr, len, OP_FLUSH); 613 __dc_line_op(paddr, vaddr, len, OP_FLUSH);
564 local_irq_restore(flags); 614 local_irq_restore(flags);
565} 615}
566 616
@@ -570,9 +620,13 @@ void __inv_icache_page(unsigned long paddr, unsigned long vaddr)
570 __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE); 620 __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
571} 621}
572 622
573void __flush_dcache_page(unsigned long paddr) 623/*
624 * wrapper to clearout kernel or userspace mappings of a page
625 * For kernel mappings @vaddr == @paddr
626 */
627void ___flush_dcache_page(unsigned long paddr, unsigned long vaddr)
574{ 628{
575 __dc_line_op(paddr, PAGE_SIZE, OP_FLUSH_N_INV); 629 __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
576} 630}
577 631
578void flush_icache_all(void) 632void flush_icache_all(void)
@@ -601,6 +655,87 @@ noinline void flush_cache_all(void)
601 655
602} 656}
603 657
658#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
659
660void flush_cache_mm(struct mm_struct *mm)
661{
662 flush_cache_all();
663}
664
665void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
666 unsigned long pfn)
667{
668 unsigned int paddr = pfn << PAGE_SHIFT;
669
670 __sync_icache_dcache(paddr, u_vaddr, PAGE_SIZE);
671}
672
673void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
674 unsigned long end)
675{
676 flush_cache_all();
677}
678
679void copy_user_highpage(struct page *to, struct page *from,
680 unsigned long u_vaddr, struct vm_area_struct *vma)
681{
682 void *kfrom = page_address(from);
683 void *kto = page_address(to);
684 int clean_src_k_mappings = 0;
685
686 /*
687 * If SRC page was already mapped in userspace AND it's U-mapping is
688 * not congruent with K-mapping, sync former to physical page so that
689 * K-mapping in memcpy below, sees the right data
690 *
691 * Note that while @u_vaddr refers to DST page's userspace vaddr, it is
692 * equally valid for SRC page as well
693 */
694 if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
695 __flush_dcache_page(kfrom, u_vaddr);
696 clean_src_k_mappings = 1;
697 }
698
699 copy_page(kto, kfrom);
700
701 /*
702 * Mark DST page K-mapping as dirty for a later finalization by
703 * update_mmu_cache(). Although the finalization could have been done
704 * here as well (given that both vaddr/paddr are available).
705 * But update_mmu_cache() already has code to do that for other
706 * non copied user pages (e.g. read faults which wire in pagecache page
707 * directly).
708 */
709 set_bit(PG_arch_1, &to->flags);
710
711 /*
712 * if SRC was already usermapped and non-congruent to kernel mapping
713 * sync the kernel mapping back to physical page
714 */
715 if (clean_src_k_mappings) {
716 __flush_dcache_page(kfrom, kfrom);
717 } else {
718 set_bit(PG_arch_1, &from->flags);
719 }
720}
721
722void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
723{
724 clear_page(to);
725 set_bit(PG_arch_1, &page->flags);
726}
727
728void flush_anon_page(struct vm_area_struct *vma, struct page *page,
729 unsigned long u_vaddr)
730{
731 /* TBD: do we really need to clear the kernel mapping */
732 __flush_dcache_page(page_address(page), u_vaddr);
733 __flush_dcache_page(page_address(page), page_address(page));
734
735}
736
737#endif
738
604/********************************************************************** 739/**********************************************************************
605 * Explicit Cache flush request from user space via syscall 740 * Explicit Cache flush request from user space via syscall
606 * Needed for JITs which generate code on the fly 741 * Needed for JITs which generate code on the fly
diff --git a/arch/arc/mm/mmap.c b/arch/arc/mm/mmap.c
new file mode 100644
index 000000000000..2e06d56e987b
--- /dev/null
+++ b/arch/arc/mm/mmap.c
@@ -0,0 +1,78 @@
1/*
2 * ARC700 mmap
3 *
4 * (started from arm version - for VIPT alias handling)
5 *
6 * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/fs.h>
14#include <linux/mm.h>
15#include <linux/mman.h>
16#include <linux/sched.h>
17#include <asm/cacheflush.h>
18
19#define COLOUR_ALIGN(addr, pgoff) \
20 ((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) + \
21 (((pgoff) << PAGE_SHIFT) & (SHMLBA - 1)))
22
23/*
24 * Ensure that shared mappings are correctly aligned to
25 * avoid aliasing issues with VIPT caches.
26 * We need to ensure that
27 * a specific page of an object is always mapped at a multiple of
28 * SHMLBA bytes.
29 */
30unsigned long
31arch_get_unmapped_area(struct file *filp, unsigned long addr,
32 unsigned long len, unsigned long pgoff, unsigned long flags)
33{
34 struct mm_struct *mm = current->mm;
35 struct vm_area_struct *vma;
36 int do_align = 0;
37 int aliasing = cache_is_vipt_aliasing();
38 struct vm_unmapped_area_info info;
39
40 /*
41 * We only need to do colour alignment if D cache aliases.
42 */
43 if (aliasing)
44 do_align = filp || (flags & MAP_SHARED);
45
46 /*
47 * We enforce the MAP_FIXED case.
48 */
49 if (flags & MAP_FIXED) {
50 if (aliasing && flags & MAP_SHARED &&
51 (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
52 return -EINVAL;
53 return addr;
54 }
55
56 if (len > TASK_SIZE)
57 return -ENOMEM;
58
59 if (addr) {
60 if (do_align)
61 addr = COLOUR_ALIGN(addr, pgoff);
62 else
63 addr = PAGE_ALIGN(addr);
64
65 vma = find_vma(mm, addr);
66 if (TASK_SIZE - len >= addr &&
67 (!vma || addr + len <= vma->vm_start))
68 return addr;
69 }
70
71 info.flags = 0;
72 info.length = len;
73 info.low_limit = mm->mmap_base;
74 info.high_limit = TASK_SIZE;
75 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
76 info.align_offset = pgoff << PAGE_SHIFT;
77 return vm_unmapped_area(&info);
78}
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index 003d69ac6ffa..066145b5f348 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -421,25 +421,40 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
421/* 421/*
422 * Called at the end of pagefault, for a userspace mapped page 422 * Called at the end of pagefault, for a userspace mapped page
423 * -pre-install the corresponding TLB entry into MMU 423 * -pre-install the corresponding TLB entry into MMU
424 * -Finalize the delayed D-cache flush (wback+inv kernel mapping) 424 * -Finalize the delayed D-cache flush of kernel mapping of page due to
425 * flush_dcache_page(), copy_user_page()
426 *
427 * Note that flush (when done) involves both WBACK - so physical page is
428 * in sync as well as INV - so any non-congruent aliases don't remain
425 */ 429 */
426void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned, 430void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
427 pte_t *ptep) 431 pte_t *ptep)
428{ 432{
429 unsigned long vaddr = vaddr_unaligned & PAGE_MASK; 433 unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
434 unsigned long paddr = pte_val(*ptep) & PAGE_MASK;
430 435
431 create_tlb(vma, vaddr, ptep); 436 create_tlb(vma, vaddr, ptep);
432 437
433 /* icache doesn't snoop dcache, thus needs to be made coherent here */ 438 /*
434 if (vma->vm_flags & VM_EXEC) { 439 * Exec page : Independent of aliasing/page-color considerations,
440 * since icache doesn't snoop dcache on ARC, any dirty
441 * K-mapping of a code page needs to be wback+inv so that
442 * icache fetch by userspace sees code correctly.
443 * !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it
444 * so userspace sees the right data.
445 * (Avoids the flush for Non-exec + congruent mapping case)
446 */
447 if (vma->vm_flags & VM_EXEC || addr_not_cache_congruent(paddr, vaddr)) {
435 struct page *page = pfn_to_page(pte_pfn(*ptep)); 448 struct page *page = pfn_to_page(pte_pfn(*ptep));
436 449
437 /* if page was dcache dirty, flush now */
438 int dirty = test_and_clear_bit(PG_arch_1, &page->flags); 450 int dirty = test_and_clear_bit(PG_arch_1, &page->flags);
439 if (dirty) { 451 if (dirty) {
440 unsigned long paddr = pte_val(*ptep) & PAGE_MASK; 452 /* wback + inv dcache lines */
441 __flush_dcache_page(paddr); 453 __flush_dcache_page(paddr, paddr);
442 __inv_icache_page(paddr, vaddr); 454
455 /* invalidate any existing icache lines */
456 if (vma->vm_flags & VM_EXEC)
457 __inv_icache_page(paddr, vaddr);
443 } 458 }
444 } 459 }
445} 460}
diff --git a/arch/arc/plat-tb10x/Kconfig b/arch/arc/plat-tb10x/Kconfig
index 4e121272c4e5..1d3452100f1f 100644
--- a/arch/arc/plat-tb10x/Kconfig
+++ b/arch/arc/plat-tb10x/Kconfig
@@ -27,10 +27,3 @@ menuconfig ARC_PLAT_TB10X
27 Abilis Systems. TB10x is based on the ARC700 CPU architecture. 27 Abilis Systems. TB10x is based on the ARC700 CPU architecture.
28 Say Y if you are building a kernel for one of the SOCs in this 28 Say Y if you are building a kernel for one of the SOCs in this
29 series (e.g. TB100 or TB101). If in doubt say N. 29 series (e.g. TB100 or TB101). If in doubt say N.
30
31if ARC_PLAT_TB10X
32
33config GENERIC_GPIO
34 def_bool y
35
36endif