diff options
Diffstat (limited to 'arch/arc/include')
-rw-r--r-- | arch/arc/include/asm/Kbuild | 1 | ||||
-rw-r--r-- | arch/arc/include/asm/cache.h | 3 | ||||
-rw-r--r-- | arch/arc/include/asm/cacheflush.h | 58 | ||||
-rw-r--r-- | arch/arc/include/asm/page.h | 16 | ||||
-rw-r--r-- | arch/arc/include/asm/pgtable.h | 3 | ||||
-rw-r--r-- | arch/arc/include/asm/shmparam.h | 18 | ||||
-rw-r--r-- | arch/arc/include/asm/tlb.h | 11 |
7 files changed, 94 insertions, 16 deletions
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild index 48af742f8b5a..d8dd660898b9 100644 --- a/arch/arc/include/asm/Kbuild +++ b/arch/arc/include/asm/Kbuild | |||
@@ -32,7 +32,6 @@ generic-y += resource.h | |||
32 | generic-y += scatterlist.h | 32 | generic-y += scatterlist.h |
33 | generic-y += sembuf.h | 33 | generic-y += sembuf.h |
34 | generic-y += shmbuf.h | 34 | generic-y += shmbuf.h |
35 | generic-y += shmparam.h | ||
36 | generic-y += siginfo.h | 35 | generic-y += siginfo.h |
37 | generic-y += socket.h | 36 | generic-y += socket.h |
38 | generic-y += sockios.h | 37 | generic-y += sockios.h |
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h index 6632273861fd..d5555fe4742a 100644 --- a/arch/arc/include/asm/cache.h +++ b/arch/arc/include/asm/cache.h | |||
@@ -55,9 +55,6 @@ | |||
55 | : "r"(data), "r"(ptr)); \ | 55 | : "r"(data), "r"(ptr)); \ |
56 | }) | 56 | }) |
57 | 57 | ||
58 | /* used to give SHMLBA a value to avoid Cache Aliasing */ | ||
59 | extern unsigned int ARC_shmlba; | ||
60 | |||
61 | #define ARCH_DMA_MINALIGN L1_CACHE_BYTES | 58 | #define ARCH_DMA_MINALIGN L1_CACHE_BYTES |
62 | 59 | ||
63 | /* | 60 | /* |
diff --git a/arch/arc/include/asm/cacheflush.h b/arch/arc/include/asm/cacheflush.h index ee1f6eae82d2..9f841af41092 100644 --- a/arch/arc/include/asm/cacheflush.h +++ b/arch/arc/include/asm/cacheflush.h | |||
@@ -19,6 +19,7 @@ | |||
19 | #define _ASM_CACHEFLUSH_H | 19 | #define _ASM_CACHEFLUSH_H |
20 | 20 | ||
21 | #include <linux/mm.h> | 21 | #include <linux/mm.h> |
22 | #include <asm/shmparam.h> | ||
22 | 23 | ||
23 | /* | 24 | /* |
24 | * Semantically we need this because icache doesn't snoop dcache/dma. | 25 | * Semantically we need this because icache doesn't snoop dcache/dma. |
@@ -33,7 +34,9 @@ void flush_cache_all(void); | |||
33 | void flush_icache_range(unsigned long start, unsigned long end); | 34 | void flush_icache_range(unsigned long start, unsigned long end); |
34 | void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len); | 35 | void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len); |
35 | void __inv_icache_page(unsigned long paddr, unsigned long vaddr); | 36 | void __inv_icache_page(unsigned long paddr, unsigned long vaddr); |
36 | void __flush_dcache_page(unsigned long paddr); | 37 | void ___flush_dcache_page(unsigned long paddr, unsigned long vaddr); |
38 | #define __flush_dcache_page(p, v) \ | ||
39 | ___flush_dcache_page((unsigned long)p, (unsigned long)v) | ||
37 | 40 | ||
38 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 | 41 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 |
39 | 42 | ||
@@ -50,18 +53,55 @@ void dma_cache_wback(unsigned long start, unsigned long sz); | |||
50 | #define flush_cache_vmap(start, end) flush_cache_all() | 53 | #define flush_cache_vmap(start, end) flush_cache_all() |
51 | #define flush_cache_vunmap(start, end) flush_cache_all() | 54 | #define flush_cache_vunmap(start, end) flush_cache_all() |
52 | 55 | ||
53 | /* | 56 | #define flush_cache_dup_mm(mm) /* called on fork (VIVT only) */ |
54 | * VM callbacks when entire/range of user-space V-P mappings are | 57 | |
55 | * torn-down/get-invalidated | 58 | #ifndef CONFIG_ARC_CACHE_VIPT_ALIASING |
56 | * | 59 | |
57 | * Currently we don't support D$ aliasing configs for our VIPT caches | ||
58 | * NOPS for VIPT Cache with non-aliasing D$ configurations only | ||
59 | */ | ||
60 | #define flush_cache_dup_mm(mm) /* called on fork */ | ||
61 | #define flush_cache_mm(mm) /* called on munmap/exit */ | 60 | #define flush_cache_mm(mm) /* called on munmap/exit */ |
62 | #define flush_cache_range(mm, u_vstart, u_vend) | 61 | #define flush_cache_range(mm, u_vstart, u_vend) |
63 | #define flush_cache_page(vma, u_vaddr, pfn) /* PF handling/COW-break */ | 62 | #define flush_cache_page(vma, u_vaddr, pfn) /* PF handling/COW-break */ |
64 | 63 | ||
64 | #else /* VIPT aliasing dcache */ | ||
65 | |||
66 | /* To clear out stale userspace mappings */ | ||
67 | void flush_cache_mm(struct mm_struct *mm); | ||
68 | void flush_cache_range(struct vm_area_struct *vma, | ||
69 | unsigned long start,unsigned long end); | ||
70 | void flush_cache_page(struct vm_area_struct *vma, | ||
71 | unsigned long user_addr, unsigned long page); | ||
72 | |||
73 | /* | ||
74 | * To make sure that userspace mapping is flushed to memory before | ||
75 | * get_user_pages() uses a kernel mapping to access the page | ||
76 | */ | ||
77 | #define ARCH_HAS_FLUSH_ANON_PAGE | ||
78 | void flush_anon_page(struct vm_area_struct *vma, | ||
79 | struct page *page, unsigned long u_vaddr); | ||
80 | |||
81 | #endif /* CONFIG_ARC_CACHE_VIPT_ALIASING */ | ||
82 | |||
83 | /* | ||
84 | * Simple wrapper over config option | ||
85 | * Bootup code ensures that hardware matches kernel configuration | ||
86 | */ | ||
87 | static inline int cache_is_vipt_aliasing(void) | ||
88 | { | ||
89 | #ifdef CONFIG_ARC_CACHE_VIPT_ALIASING | ||
90 | return 1; | ||
91 | #else | ||
92 | return 0; | ||
93 | #endif | ||
94 | } | ||
95 | |||
96 | #define CACHE_COLOR(addr) (((unsigned long)(addr) >> (PAGE_SHIFT)) & 3) | ||
97 | |||
98 | /* | ||
99 | * checks if two addresses (after page aligning) index into same cache set | ||
100 | */ | ||
101 | #define addr_not_cache_congruent(addr1, addr2) \ | ||
102 | cache_is_vipt_aliasing() ? \ | ||
103 | (CACHE_COLOR(addr1) != CACHE_COLOR(addr2)) : 0 \ | ||
104 | |||
65 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ | 105 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ |
66 | do { \ | 106 | do { \ |
67 | memcpy(dst, src, len); \ | 107 | memcpy(dst, src, len); \ |
diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h index bdf546104551..374a35514116 100644 --- a/arch/arc/include/asm/page.h +++ b/arch/arc/include/asm/page.h | |||
@@ -16,13 +16,27 @@ | |||
16 | #define get_user_page(vaddr) __get_free_page(GFP_KERNEL) | 16 | #define get_user_page(vaddr) __get_free_page(GFP_KERNEL) |
17 | #define free_user_page(page, addr) free_page(addr) | 17 | #define free_user_page(page, addr) free_page(addr) |
18 | 18 | ||
19 | /* TBD: for now don't worry about VIPT D$ aliasing */ | ||
20 | #define clear_page(paddr) memset((paddr), 0, PAGE_SIZE) | 19 | #define clear_page(paddr) memset((paddr), 0, PAGE_SIZE) |
21 | #define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) | 20 | #define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) |
22 | 21 | ||
22 | #ifndef CONFIG_ARC_CACHE_VIPT_ALIASING | ||
23 | |||
23 | #define clear_user_page(addr, vaddr, pg) clear_page(addr) | 24 | #define clear_user_page(addr, vaddr, pg) clear_page(addr) |
24 | #define copy_user_page(vto, vfrom, vaddr, pg) copy_page(vto, vfrom) | 25 | #define copy_user_page(vto, vfrom, vaddr, pg) copy_page(vto, vfrom) |
25 | 26 | ||
27 | #else /* VIPT aliasing dcache */ | ||
28 | |||
29 | struct vm_area_struct; | ||
30 | struct page; | ||
31 | |||
32 | #define __HAVE_ARCH_COPY_USER_HIGHPAGE | ||
33 | |||
34 | void copy_user_highpage(struct page *to, struct page *from, | ||
35 | unsigned long u_vaddr, struct vm_area_struct *vma); | ||
36 | void clear_user_page(void *to, unsigned long u_vaddr, struct page *page); | ||
37 | |||
38 | #endif /* CONFIG_ARC_CACHE_VIPT_ALIASING */ | ||
39 | |||
26 | #undef STRICT_MM_TYPECHECKS | 40 | #undef STRICT_MM_TYPECHECKS |
27 | 41 | ||
28 | #ifdef STRICT_MM_TYPECHECKS | 42 | #ifdef STRICT_MM_TYPECHECKS |
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h index b7e36684c091..1cc4720faccb 100644 --- a/arch/arc/include/asm/pgtable.h +++ b/arch/arc/include/asm/pgtable.h | |||
@@ -395,6 +395,9 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, | |||
395 | 395 | ||
396 | #include <asm-generic/pgtable.h> | 396 | #include <asm-generic/pgtable.h> |
397 | 397 | ||
398 | /* to cope with aliasing VIPT cache */ | ||
399 | #define HAVE_ARCH_UNMAPPED_AREA | ||
400 | |||
398 | /* | 401 | /* |
399 | * No page table caches to initialise | 402 | * No page table caches to initialise |
400 | */ | 403 | */ |
diff --git a/arch/arc/include/asm/shmparam.h b/arch/arc/include/asm/shmparam.h new file mode 100644 index 000000000000..fffeecc04270 --- /dev/null +++ b/arch/arc/include/asm/shmparam.h | |||
@@ -0,0 +1,18 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com) | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | |||
9 | #ifndef __ARC_ASM_SHMPARAM_H | ||
10 | #define __ARC_ASM_SHMPARAM_H | ||
11 | |||
12 | /* Handle upto 2 cache bins */ | ||
13 | #define SHMLBA (2 * PAGE_SIZE) | ||
14 | |||
15 | /* Enforce SHMLBA in shmat */ | ||
16 | #define __ARCH_FORCE_SHMLBA | ||
17 | |||
18 | #endif | ||
diff --git a/arch/arc/include/asm/tlb.h b/arch/arc/include/asm/tlb.h index fe91719866a5..85b6df839bd7 100644 --- a/arch/arc/include/asm/tlb.h +++ b/arch/arc/include/asm/tlb.h | |||
@@ -30,13 +30,20 @@ do { \ | |||
30 | /* | 30 | /* |
31 | * This pair is called at time of munmap/exit to flush cache and TLB entries | 31 | * This pair is called at time of munmap/exit to flush cache and TLB entries |
32 | * for mappings being torn down. | 32 | * for mappings being torn down. |
33 | * 1) cache-flush part -implemented via tlb_start_vma( ) can be NOP (for now) | 33 | * 1) cache-flush part -implemented via tlb_start_vma( ) for VIPT aliasing D$ |
34 | * as we don't support aliasing configs in our VIPT D$. | ||
35 | * 2) tlb-flush part - implemted via tlb_end_vma( ) flushes the TLB range | 34 | * 2) tlb-flush part - implemted via tlb_end_vma( ) flushes the TLB range |
36 | * | 35 | * |
37 | * Note, read http://lkml.org/lkml/2004/1/15/6 | 36 | * Note, read http://lkml.org/lkml/2004/1/15/6 |
38 | */ | 37 | */ |
38 | #ifndef CONFIG_ARC_CACHE_VIPT_ALIASING | ||
39 | #define tlb_start_vma(tlb, vma) | 39 | #define tlb_start_vma(tlb, vma) |
40 | #else | ||
41 | #define tlb_start_vma(tlb, vma) \ | ||
42 | do { \ | ||
43 | if (!tlb->fullmm) \ | ||
44 | flush_cache_range(vma, vma->vm_start, vma->vm_end); \ | ||
45 | } while(0) | ||
46 | #endif | ||
40 | 47 | ||
41 | #define tlb_end_vma(tlb, vma) \ | 48 | #define tlb_end_vma(tlb, vma) \ |
42 | do { \ | 49 | do { \ |