diff options
Diffstat (limited to 'arch/sh')
59 files changed, 1091 insertions, 1597 deletions
diff --git a/arch/sh/include/asm/bugs.h b/arch/sh/include/asm/bugs.h index 4924ff6f5439..46260fcbdf4b 100644 --- a/arch/sh/include/asm/bugs.h +++ b/arch/sh/include/asm/bugs.h | |||
@@ -21,25 +21,25 @@ static void __init check_bugs(void) | |||
21 | 21 | ||
22 | current_cpu_data.loops_per_jiffy = loops_per_jiffy; | 22 | current_cpu_data.loops_per_jiffy = loops_per_jiffy; |
23 | 23 | ||
24 | switch (current_cpu_data.type) { | 24 | switch (current_cpu_data.family) { |
25 | case CPU_SH7619: | 25 | case CPU_FAMILY_SH2: |
26 | *p++ = '2'; | 26 | *p++ = '2'; |
27 | break; | 27 | break; |
28 | case CPU_SH7201 ... CPU_MXG: | 28 | case CPU_FAMILY_SH2A: |
29 | *p++ = '2'; | 29 | *p++ = '2'; |
30 | *p++ = 'a'; | 30 | *p++ = 'a'; |
31 | break; | 31 | break; |
32 | case CPU_SH7705 ... CPU_SH7729: | 32 | case CPU_FAMILY_SH3: |
33 | *p++ = '3'; | 33 | *p++ = '3'; |
34 | break; | 34 | break; |
35 | case CPU_SH7750 ... CPU_SH4_501: | 35 | case CPU_FAMILY_SH4: |
36 | *p++ = '4'; | 36 | *p++ = '4'; |
37 | break; | 37 | break; |
38 | case CPU_SH7763 ... CPU_SHX3: | 38 | case CPU_FAMILY_SH4A: |
39 | *p++ = '4'; | 39 | *p++ = '4'; |
40 | *p++ = 'a'; | 40 | *p++ = 'a'; |
41 | break; | 41 | break; |
42 | case CPU_SH7343 ... CPU_SH7366: | 42 | case CPU_FAMILY_SH4AL_DSP: |
43 | *p++ = '4'; | 43 | *p++ = '4'; |
44 | *p++ = 'a'; | 44 | *p++ = 'a'; |
45 | *p++ = 'l'; | 45 | *p++ = 'l'; |
@@ -48,15 +48,15 @@ static void __init check_bugs(void) | |||
48 | *p++ = 's'; | 48 | *p++ = 's'; |
49 | *p++ = 'p'; | 49 | *p++ = 'p'; |
50 | break; | 50 | break; |
51 | case CPU_SH5_101 ... CPU_SH5_103: | 51 | case CPU_FAMILY_SH5: |
52 | *p++ = '6'; | 52 | *p++ = '6'; |
53 | *p++ = '4'; | 53 | *p++ = '4'; |
54 | break; | 54 | break; |
55 | case CPU_SH_NONE: | 55 | case CPU_FAMILY_UNKNOWN: |
56 | /* | 56 | /* |
57 | * Specifically use CPU_SH_NONE rather than default:, | 57 | * Specifically use CPU_FAMILY_UNKNOWN rather than |
58 | * so we're able to have the compiler whine about | 58 | * default:, so we're able to have the compiler whine |
59 | * unhandled enumerations. | 59 | * about unhandled enumerations. |
60 | */ | 60 | */ |
61 | break; | 61 | break; |
62 | } | 62 | } |
diff --git a/arch/sh/include/asm/cacheflush.h b/arch/sh/include/asm/cacheflush.h index 4c5462daa74c..11e416630585 100644 --- a/arch/sh/include/asm/cacheflush.h +++ b/arch/sh/include/asm/cacheflush.h | |||
@@ -3,45 +3,65 @@ | |||
3 | 3 | ||
4 | #ifdef __KERNEL__ | 4 | #ifdef __KERNEL__ |
5 | 5 | ||
6 | #ifdef CONFIG_CACHE_OFF | 6 | #include <linux/mm.h> |
7 | |||
7 | /* | 8 | /* |
8 | * Nothing to do when the cache is disabled, initial flush and explicit | 9 | * Cache flushing: |
9 | * disabling is handled at CPU init time. | 10 | * |
11 | * - flush_cache_all() flushes entire cache | ||
12 | * - flush_cache_mm(mm) flushes the specified mm context's cache lines | ||
13 | * - flush_cache_dup mm(mm) handles cache flushing when forking | ||
14 | * - flush_cache_page(mm, vmaddr, pfn) flushes a single page | ||
15 | * - flush_cache_range(vma, start, end) flushes a range of pages | ||
10 | * | 16 | * |
11 | * See arch/sh/kernel/cpu/init.c:cache_init(). | 17 | * - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache |
18 | * - flush_icache_range(start, end) flushes(invalidates) a range for icache | ||
19 | * - flush_icache_page(vma, pg) flushes(invalidates) a page for icache | ||
20 | * - flush_cache_sigtramp(vaddr) flushes the signal trampoline | ||
12 | */ | 21 | */ |
13 | #define p3_cache_init() do { } while (0) | 22 | extern void (*local_flush_cache_all)(void *args); |
14 | #define flush_cache_all() do { } while (0) | 23 | extern void (*local_flush_cache_mm)(void *args); |
15 | #define flush_cache_mm(mm) do { } while (0) | 24 | extern void (*local_flush_cache_dup_mm)(void *args); |
16 | #define flush_cache_dup_mm(mm) do { } while (0) | 25 | extern void (*local_flush_cache_page)(void *args); |
17 | #define flush_cache_range(vma, start, end) do { } while (0) | 26 | extern void (*local_flush_cache_range)(void *args); |
18 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) | 27 | extern void (*local_flush_dcache_page)(void *args); |
19 | #define flush_dcache_page(page) do { } while (0) | 28 | extern void (*local_flush_icache_range)(void *args); |
20 | #define flush_icache_range(start, end) do { } while (0) | 29 | extern void (*local_flush_icache_page)(void *args); |
21 | #define flush_icache_page(vma,pg) do { } while (0) | 30 | extern void (*local_flush_cache_sigtramp)(void *args); |
22 | #define flush_dcache_mmap_lock(mapping) do { } while (0) | ||
23 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) | ||
24 | #define flush_cache_sigtramp(vaddr) do { } while (0) | ||
25 | #define flush_icache_user_range(vma,pg,adr,len) do { } while (0) | ||
26 | #define __flush_wback_region(start, size) do { (void)(start); } while (0) | ||
27 | #define __flush_purge_region(start, size) do { (void)(start); } while (0) | ||
28 | #define __flush_invalidate_region(start, size) do { (void)(start); } while (0) | ||
29 | #else | ||
30 | #include <cpu/cacheflush.h> | ||
31 | 31 | ||
32 | /* | 32 | static inline void cache_noop(void *args) { } |
33 | * Consistent DMA requires that the __flush_xxx() primitives must be set | 33 | |
34 | * for any of the enabled non-coherent caches (most of the UP CPUs), | 34 | extern void (*__flush_wback_region)(void *start, int size); |
35 | * regardless of PIPT or VIPT cache configurations. | 35 | extern void (*__flush_purge_region)(void *start, int size); |
36 | */ | 36 | extern void (*__flush_invalidate_region)(void *start, int size); |
37 | |||
38 | extern void flush_cache_all(void); | ||
39 | extern void flush_cache_mm(struct mm_struct *mm); | ||
40 | extern void flush_cache_dup_mm(struct mm_struct *mm); | ||
41 | extern void flush_cache_page(struct vm_area_struct *vma, | ||
42 | unsigned long addr, unsigned long pfn); | ||
43 | extern void flush_cache_range(struct vm_area_struct *vma, | ||
44 | unsigned long start, unsigned long end); | ||
45 | extern void flush_dcache_page(struct page *page); | ||
46 | extern void flush_icache_range(unsigned long start, unsigned long end); | ||
47 | extern void flush_icache_page(struct vm_area_struct *vma, | ||
48 | struct page *page); | ||
49 | extern void flush_cache_sigtramp(unsigned long address); | ||
50 | |||
51 | struct flusher_data { | ||
52 | struct vm_area_struct *vma; | ||
53 | unsigned long addr1, addr2; | ||
54 | }; | ||
37 | 55 | ||
38 | /* Flush (write-back only) a region (smaller than a page) */ | 56 | #define ARCH_HAS_FLUSH_ANON_PAGE |
39 | extern void __flush_wback_region(void *start, int size); | 57 | extern void __flush_anon_page(struct page *page, unsigned long); |
40 | /* Flush (write-back & invalidate) a region (smaller than a page) */ | 58 | |
41 | extern void __flush_purge_region(void *start, int size); | 59 | static inline void flush_anon_page(struct vm_area_struct *vma, |
42 | /* Flush (invalidate only) a region (smaller than a page) */ | 60 | struct page *page, unsigned long vmaddr) |
43 | extern void __flush_invalidate_region(void *start, int size); | 61 | { |
44 | #endif | 62 | if (boot_cpu_data.dcache.n_aliases && PageAnon(page)) |
63 | __flush_anon_page(page, vmaddr); | ||
64 | } | ||
45 | 65 | ||
46 | #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE | 66 | #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE |
47 | static inline void flush_kernel_dcache_page(struct page *page) | 67 | static inline void flush_kernel_dcache_page(struct page *page) |
@@ -49,7 +69,6 @@ static inline void flush_kernel_dcache_page(struct page *page) | |||
49 | flush_dcache_page(page); | 69 | flush_dcache_page(page); |
50 | } | 70 | } |
51 | 71 | ||
52 | #if defined(CONFIG_CPU_SH4) && !defined(CONFIG_CACHE_OFF) | ||
53 | extern void copy_to_user_page(struct vm_area_struct *vma, | 72 | extern void copy_to_user_page(struct vm_area_struct *vma, |
54 | struct page *page, unsigned long vaddr, void *dst, const void *src, | 73 | struct page *page, unsigned long vaddr, void *dst, const void *src, |
55 | unsigned long len); | 74 | unsigned long len); |
@@ -57,23 +76,20 @@ extern void copy_to_user_page(struct vm_area_struct *vma, | |||
57 | extern void copy_from_user_page(struct vm_area_struct *vma, | 76 | extern void copy_from_user_page(struct vm_area_struct *vma, |
58 | struct page *page, unsigned long vaddr, void *dst, const void *src, | 77 | struct page *page, unsigned long vaddr, void *dst, const void *src, |
59 | unsigned long len); | 78 | unsigned long len); |
60 | #else | ||
61 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ | ||
62 | do { \ | ||
63 | flush_cache_page(vma, vaddr, page_to_pfn(page));\ | ||
64 | memcpy(dst, src, len); \ | ||
65 | flush_icache_user_range(vma, page, vaddr, len); \ | ||
66 | } while (0) | ||
67 | |||
68 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ | ||
69 | do { \ | ||
70 | flush_cache_page(vma, vaddr, page_to_pfn(page));\ | ||
71 | memcpy(dst, src, len); \ | ||
72 | } while (0) | ||
73 | #endif | ||
74 | 79 | ||
75 | #define flush_cache_vmap(start, end) flush_cache_all() | 80 | #define flush_cache_vmap(start, end) flush_cache_all() |
76 | #define flush_cache_vunmap(start, end) flush_cache_all() | 81 | #define flush_cache_vunmap(start, end) flush_cache_all() |
77 | 82 | ||
83 | #define flush_dcache_mmap_lock(mapping) do { } while (0) | ||
84 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) | ||
85 | |||
86 | void kmap_coherent_init(void); | ||
87 | void *kmap_coherent(struct page *page, unsigned long addr); | ||
88 | void kunmap_coherent(void); | ||
89 | |||
90 | #define PG_dcache_dirty PG_arch_1 | ||
91 | |||
92 | void cpu_cache_init(void); | ||
93 | |||
78 | #endif /* __KERNEL__ */ | 94 | #endif /* __KERNEL__ */ |
79 | #endif /* __ASM_SH_CACHEFLUSH_H */ | 95 | #endif /* __ASM_SH_CACHEFLUSH_H */ |
diff --git a/arch/sh/include/asm/mmu_context.h b/arch/sh/include/asm/mmu_context.h index 67d8946db193..41080b173a7a 100644 --- a/arch/sh/include/asm/mmu_context.h +++ b/arch/sh/include/asm/mmu_context.h | |||
@@ -69,7 +69,7 @@ static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu) | |||
69 | * We exhaust ASID of this version. | 69 | * We exhaust ASID of this version. |
70 | * Flush all TLB and start new cycle. | 70 | * Flush all TLB and start new cycle. |
71 | */ | 71 | */ |
72 | flush_tlb_all(); | 72 | local_flush_tlb_all(); |
73 | 73 | ||
74 | #ifdef CONFIG_SUPERH64 | 74 | #ifdef CONFIG_SUPERH64 |
75 | /* | 75 | /* |
diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h index 49592c780a6e..81bffc0d6860 100644 --- a/arch/sh/include/asm/page.h +++ b/arch/sh/include/asm/page.h | |||
@@ -50,26 +50,24 @@ extern unsigned long shm_align_mask; | |||
50 | extern unsigned long max_low_pfn, min_low_pfn; | 50 | extern unsigned long max_low_pfn, min_low_pfn; |
51 | extern unsigned long memory_start, memory_end; | 51 | extern unsigned long memory_start, memory_end; |
52 | 52 | ||
53 | extern void clear_page(void *to); | 53 | static inline unsigned long |
54 | pages_do_alias(unsigned long addr1, unsigned long addr2) | ||
55 | { | ||
56 | return (addr1 ^ addr2) & shm_align_mask; | ||
57 | } | ||
58 | |||
59 | |||
60 | #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) | ||
54 | extern void copy_page(void *to, void *from); | 61 | extern void copy_page(void *to, void *from); |
55 | 62 | ||
56 | #if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU) && \ | ||
57 | (defined(CONFIG_CPU_SH5) || defined(CONFIG_CPU_SH4) || \ | ||
58 | defined(CONFIG_SH7705_CACHE_32KB)) | ||
59 | struct page; | 63 | struct page; |
60 | struct vm_area_struct; | 64 | struct vm_area_struct; |
61 | extern void clear_user_page(void *to, unsigned long address, struct page *page); | 65 | |
62 | extern void copy_user_page(void *to, void *from, unsigned long address, | ||
63 | struct page *page); | ||
64 | #if defined(CONFIG_CPU_SH4) | ||
65 | extern void copy_user_highpage(struct page *to, struct page *from, | 66 | extern void copy_user_highpage(struct page *to, struct page *from, |
66 | unsigned long vaddr, struct vm_area_struct *vma); | 67 | unsigned long vaddr, struct vm_area_struct *vma); |
67 | #define __HAVE_ARCH_COPY_USER_HIGHPAGE | 68 | #define __HAVE_ARCH_COPY_USER_HIGHPAGE |
68 | #endif | 69 | extern void clear_user_highpage(struct page *page, unsigned long vaddr); |
69 | #else | 70 | #define clear_user_highpage clear_user_highpage |
70 | #define clear_user_page(page, vaddr, pg) clear_page(page) | ||
71 | #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) | ||
72 | #endif | ||
73 | 71 | ||
74 | /* | 72 | /* |
75 | * These are used to make use of C type-checking.. | 73 | * These are used to make use of C type-checking.. |
diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h index 2a011b18090b..4f3efa7d5a64 100644 --- a/arch/sh/include/asm/pgtable.h +++ b/arch/sh/include/asm/pgtable.h | |||
@@ -36,6 +36,12 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; | |||
36 | #define NEFF_SIGN (1LL << (NEFF - 1)) | 36 | #define NEFF_SIGN (1LL << (NEFF - 1)) |
37 | #define NEFF_MASK (-1LL << NEFF) | 37 | #define NEFF_MASK (-1LL << NEFF) |
38 | 38 | ||
39 | static inline unsigned long long neff_sign_extend(unsigned long val) | ||
40 | { | ||
41 | unsigned long long extended = val; | ||
42 | return (extended & NEFF_SIGN) ? (extended | NEFF_MASK) : extended; | ||
43 | } | ||
44 | |||
39 | #ifdef CONFIG_29BIT | 45 | #ifdef CONFIG_29BIT |
40 | #define NPHYS 29 | 46 | #define NPHYS 29 |
41 | #else | 47 | #else |
@@ -133,27 +139,25 @@ typedef pte_t *pte_addr_t; | |||
133 | */ | 139 | */ |
134 | #define pgtable_cache_init() do { } while (0) | 140 | #define pgtable_cache_init() do { } while (0) |
135 | 141 | ||
136 | #if !defined(CONFIG_CACHE_OFF) && (defined(CONFIG_CPU_SH4) || \ | ||
137 | defined(CONFIG_SH7705_CACHE_32KB)) | ||
138 | struct mm_struct; | ||
139 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | ||
140 | pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep); | ||
141 | #endif | ||
142 | |||
143 | struct vm_area_struct; | 142 | struct vm_area_struct; |
144 | extern void update_mmu_cache(struct vm_area_struct * vma, | 143 | |
145 | unsigned long address, pte_t pte); | 144 | extern void __update_cache(struct vm_area_struct *vma, |
145 | unsigned long address, pte_t pte); | ||
146 | extern void __update_tlb(struct vm_area_struct *vma, | ||
147 | unsigned long address, pte_t pte); | ||
148 | |||
149 | static inline void | ||
150 | update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) | ||
151 | { | ||
152 | __update_cache(vma, address, pte); | ||
153 | __update_tlb(vma, address, pte); | ||
154 | } | ||
155 | |||
146 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; | 156 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; |
147 | extern void paging_init(void); | 157 | extern void paging_init(void); |
148 | extern void page_table_range_init(unsigned long start, unsigned long end, | 158 | extern void page_table_range_init(unsigned long start, unsigned long end, |
149 | pgd_t *pgd); | 159 | pgd_t *pgd); |
150 | 160 | ||
151 | #if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_CPU_SH4) && defined(CONFIG_MMU) | ||
152 | extern void kmap_coherent_init(void); | ||
153 | #else | ||
154 | #define kmap_coherent_init() do { } while (0) | ||
155 | #endif | ||
156 | |||
157 | /* arch/sh/mm/mmap.c */ | 161 | /* arch/sh/mm/mmap.c */ |
158 | #define HAVE_ARCH_UNMAPPED_AREA | 162 | #define HAVE_ARCH_UNMAPPED_AREA |
159 | #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN | 163 | #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN |
diff --git a/arch/sh/include/asm/processor.h b/arch/sh/include/asm/processor.h index 9d87868bc53d..017e0c1807b2 100644 --- a/arch/sh/include/asm/processor.h +++ b/arch/sh/include/asm/processor.h | |||
@@ -44,6 +44,17 @@ enum cpu_type { | |||
44 | CPU_SH_NONE | 44 | CPU_SH_NONE |
45 | }; | 45 | }; |
46 | 46 | ||
47 | enum cpu_family { | ||
48 | CPU_FAMILY_SH2, | ||
49 | CPU_FAMILY_SH2A, | ||
50 | CPU_FAMILY_SH3, | ||
51 | CPU_FAMILY_SH4, | ||
52 | CPU_FAMILY_SH4A, | ||
53 | CPU_FAMILY_SH4AL_DSP, | ||
54 | CPU_FAMILY_SH5, | ||
55 | CPU_FAMILY_UNKNOWN, | ||
56 | }; | ||
57 | |||
47 | /* | 58 | /* |
48 | * TLB information structure | 59 | * TLB information structure |
49 | * | 60 | * |
@@ -61,7 +72,7 @@ struct tlb_info { | |||
61 | }; | 72 | }; |
62 | 73 | ||
63 | struct sh_cpuinfo { | 74 | struct sh_cpuinfo { |
64 | unsigned int type; | 75 | unsigned int type, family; |
65 | int cut_major, cut_minor; | 76 | int cut_major, cut_minor; |
66 | unsigned long loops_per_jiffy; | 77 | unsigned long loops_per_jiffy; |
67 | unsigned long asid_cache; | 78 | unsigned long asid_cache; |
diff --git a/arch/sh/include/asm/system.h b/arch/sh/include/asm/system.h index f9e2ceb94d9b..6b272238a46e 100644 --- a/arch/sh/include/asm/system.h +++ b/arch/sh/include/asm/system.h | |||
@@ -14,18 +14,6 @@ | |||
14 | 14 | ||
15 | #define AT_VECTOR_SIZE_ARCH 5 /* entries in ARCH_DLINFO */ | 15 | #define AT_VECTOR_SIZE_ARCH 5 /* entries in ARCH_DLINFO */ |
16 | 16 | ||
17 | #if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5) | ||
18 | #define __icbi() \ | ||
19 | { \ | ||
20 | unsigned long __addr; \ | ||
21 | __addr = 0xa8000000; \ | ||
22 | __asm__ __volatile__( \ | ||
23 | "icbi %0\n\t" \ | ||
24 | : /* no output */ \ | ||
25 | : "m" (__m(__addr))); \ | ||
26 | } | ||
27 | #endif | ||
28 | |||
29 | /* | 17 | /* |
30 | * A brief note on ctrl_barrier(), the control register write barrier. | 18 | * A brief note on ctrl_barrier(), the control register write barrier. |
31 | * | 19 | * |
@@ -44,7 +32,7 @@ | |||
44 | #define mb() __asm__ __volatile__ ("synco": : :"memory") | 32 | #define mb() __asm__ __volatile__ ("synco": : :"memory") |
45 | #define rmb() mb() | 33 | #define rmb() mb() |
46 | #define wmb() __asm__ __volatile__ ("synco": : :"memory") | 34 | #define wmb() __asm__ __volatile__ ("synco": : :"memory") |
47 | #define ctrl_barrier() __icbi() | 35 | #define ctrl_barrier() __icbi(0xa8000000) |
48 | #define read_barrier_depends() do { } while(0) | 36 | #define read_barrier_depends() do { } while(0) |
49 | #else | 37 | #else |
50 | #define mb() __asm__ __volatile__ ("": : :"memory") | 38 | #define mb() __asm__ __volatile__ ("": : :"memory") |
diff --git a/arch/sh/include/asm/system_32.h b/arch/sh/include/asm/system_32.h index d3ab269386bb..607d413f6168 100644 --- a/arch/sh/include/asm/system_32.h +++ b/arch/sh/include/asm/system_32.h | |||
@@ -63,6 +63,16 @@ do { \ | |||
63 | #define __restore_dsp(tsk) do { } while (0) | 63 | #define __restore_dsp(tsk) do { } while (0) |
64 | #endif | 64 | #endif |
65 | 65 | ||
66 | #if defined(CONFIG_CPU_SH4A) | ||
67 | #define __icbi(addr) __asm__ __volatile__ ( "icbi @%0\n\t" : : "r" (addr)) | ||
68 | #else | ||
69 | #define __icbi(addr) mb() | ||
70 | #endif | ||
71 | |||
72 | #define __ocbp(addr) __asm__ __volatile__ ( "ocbp @%0\n\t" : : "r" (addr)) | ||
73 | #define __ocbi(addr) __asm__ __volatile__ ( "ocbi @%0\n\t" : : "r" (addr)) | ||
74 | #define __ocbwb(addr) __asm__ __volatile__ ( "ocbwb @%0\n\t" : : "r" (addr)) | ||
75 | |||
66 | struct task_struct *__switch_to(struct task_struct *prev, | 76 | struct task_struct *__switch_to(struct task_struct *prev, |
67 | struct task_struct *next); | 77 | struct task_struct *next); |
68 | 78 | ||
@@ -198,6 +208,11 @@ do { \ | |||
198 | }) | 208 | }) |
199 | #endif | 209 | #endif |
200 | 210 | ||
211 | static inline reg_size_t register_align(void *val) | ||
212 | { | ||
213 | return (unsigned long)(signed long)val; | ||
214 | } | ||
215 | |||
201 | int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs, | 216 | int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs, |
202 | struct mem_access *ma, int); | 217 | struct mem_access *ma, int); |
203 | 218 | ||
diff --git a/arch/sh/include/asm/system_64.h b/arch/sh/include/asm/system_64.h index 943acf5ea07c..8e4a03e7966c 100644 --- a/arch/sh/include/asm/system_64.h +++ b/arch/sh/include/asm/system_64.h | |||
@@ -37,4 +37,14 @@ do { \ | |||
37 | #define jump_to_uncached() do { } while (0) | 37 | #define jump_to_uncached() do { } while (0) |
38 | #define back_to_cached() do { } while (0) | 38 | #define back_to_cached() do { } while (0) |
39 | 39 | ||
40 | #define __icbi(addr) __asm__ __volatile__ ( "icbi %0, 0\n\t" : : "r" (addr)) | ||
41 | #define __ocbp(addr) __asm__ __volatile__ ( "ocbp %0, 0\n\t" : : "r" (addr)) | ||
42 | #define __ocbi(addr) __asm__ __volatile__ ( "ocbi %0, 0\n\t" : : "r" (addr)) | ||
43 | #define __ocbwb(addr) __asm__ __volatile__ ( "ocbwb %0, 0\n\t" : : "r" (addr)) | ||
44 | |||
45 | static inline reg_size_t register_align(void *val) | ||
46 | { | ||
47 | return (unsigned long long)(signed long long)(signed long)val; | ||
48 | } | ||
49 | |||
40 | #endif /* __ASM_SH_SYSTEM_64_H */ | 50 | #endif /* __ASM_SH_SYSTEM_64_H */ |
diff --git a/arch/sh/include/asm/types.h b/arch/sh/include/asm/types.h index c7f3c94837dd..f8421f7ad63a 100644 --- a/arch/sh/include/asm/types.h +++ b/arch/sh/include/asm/types.h | |||
@@ -11,8 +11,10 @@ | |||
11 | 11 | ||
12 | #ifdef CONFIG_SUPERH32 | 12 | #ifdef CONFIG_SUPERH32 |
13 | typedef u16 insn_size_t; | 13 | typedef u16 insn_size_t; |
14 | typedef u32 reg_size_t; | ||
14 | #else | 15 | #else |
15 | typedef u32 insn_size_t; | 16 | typedef u32 insn_size_t; |
17 | typedef u64 reg_size_t; | ||
16 | #endif | 18 | #endif |
17 | 19 | ||
18 | #endif /* __ASSEMBLY__ */ | 20 | #endif /* __ASSEMBLY__ */ |
diff --git a/arch/sh/include/cpu-common/cpu/cacheflush.h b/arch/sh/include/cpu-common/cpu/cacheflush.h deleted file mode 100644 index c3db00b73605..000000000000 --- a/arch/sh/include/cpu-common/cpu/cacheflush.h +++ /dev/null | |||
@@ -1,44 +0,0 @@ | |||
1 | /* | ||
2 | * include/asm-sh/cpu-sh2/cacheflush.h | ||
3 | * | ||
4 | * Copyright (C) 2003 Paul Mundt | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | #ifndef __ASM_CPU_SH2_CACHEFLUSH_H | ||
11 | #define __ASM_CPU_SH2_CACHEFLUSH_H | ||
12 | |||
13 | /* | ||
14 | * Cache flushing: | ||
15 | * | ||
16 | * - flush_cache_all() flushes entire cache | ||
17 | * - flush_cache_mm(mm) flushes the specified mm context's cache lines | ||
18 | * - flush_cache_dup mm(mm) handles cache flushing when forking | ||
19 | * - flush_cache_page(mm, vmaddr, pfn) flushes a single page | ||
20 | * - flush_cache_range(vma, start, end) flushes a range of pages | ||
21 | * | ||
22 | * - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache | ||
23 | * - flush_icache_range(start, end) flushes(invalidates) a range for icache | ||
24 | * - flush_icache_page(vma, pg) flushes(invalidates) a page for icache | ||
25 | * | ||
26 | * Caches are indexed (effectively) by physical address on SH-2, so | ||
27 | * we don't need them. | ||
28 | */ | ||
29 | #define flush_cache_all() do { } while (0) | ||
30 | #define flush_cache_mm(mm) do { } while (0) | ||
31 | #define flush_cache_dup_mm(mm) do { } while (0) | ||
32 | #define flush_cache_range(vma, start, end) do { } while (0) | ||
33 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) | ||
34 | #define flush_dcache_page(page) do { } while (0) | ||
35 | #define flush_dcache_mmap_lock(mapping) do { } while (0) | ||
36 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) | ||
37 | #define flush_icache_range(start, end) do { } while (0) | ||
38 | #define flush_icache_page(vma,pg) do { } while (0) | ||
39 | #define flush_icache_user_range(vma,pg,adr,len) do { } while (0) | ||
40 | #define flush_cache_sigtramp(vaddr) do { } while (0) | ||
41 | |||
42 | #define p3_cache_init() do { } while (0) | ||
43 | |||
44 | #endif /* __ASM_CPU_SH2_CACHEFLUSH_H */ | ||
diff --git a/arch/sh/include/cpu-sh2a/cpu/cacheflush.h b/arch/sh/include/cpu-sh2a/cpu/cacheflush.h deleted file mode 100644 index 3d3b9205d2ac..000000000000 --- a/arch/sh/include/cpu-sh2a/cpu/cacheflush.h +++ /dev/null | |||
@@ -1,34 +0,0 @@ | |||
1 | #ifndef __ASM_CPU_SH2A_CACHEFLUSH_H | ||
2 | #define __ASM_CPU_SH2A_CACHEFLUSH_H | ||
3 | |||
4 | /* | ||
5 | * Cache flushing: | ||
6 | * | ||
7 | * - flush_cache_all() flushes entire cache | ||
8 | * - flush_cache_mm(mm) flushes the specified mm context's cache lines | ||
9 | * - flush_cache_dup mm(mm) handles cache flushing when forking | ||
10 | * - flush_cache_page(mm, vmaddr, pfn) flushes a single page | ||
11 | * - flush_cache_range(vma, start, end) flushes a range of pages | ||
12 | * | ||
13 | * - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache | ||
14 | * - flush_icache_range(start, end) flushes(invalidates) a range for icache | ||
15 | * - flush_icache_page(vma, pg) flushes(invalidates) a page for icache | ||
16 | * | ||
17 | * Caches are indexed (effectively) by physical address on SH-2, so | ||
18 | * we don't need them. | ||
19 | */ | ||
20 | #define flush_cache_all() do { } while (0) | ||
21 | #define flush_cache_mm(mm) do { } while (0) | ||
22 | #define flush_cache_dup_mm(mm) do { } while (0) | ||
23 | #define flush_cache_range(vma, start, end) do { } while (0) | ||
24 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) | ||
25 | #define flush_dcache_page(page) do { } while (0) | ||
26 | #define flush_dcache_mmap_lock(mapping) do { } while (0) | ||
27 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) | ||
28 | void flush_icache_range(unsigned long start, unsigned long end); | ||
29 | #define flush_icache_page(vma,pg) do { } while (0) | ||
30 | #define flush_icache_user_range(vma,pg,adr,len) do { } while (0) | ||
31 | #define flush_cache_sigtramp(vaddr) do { } while (0) | ||
32 | |||
33 | #define p3_cache_init() do { } while (0) | ||
34 | #endif /* __ASM_CPU_SH2A_CACHEFLUSH_H */ | ||
diff --git a/arch/sh/include/cpu-sh3/cpu/cacheflush.h b/arch/sh/include/cpu-sh3/cpu/cacheflush.h deleted file mode 100644 index 1ac27aae6700..000000000000 --- a/arch/sh/include/cpu-sh3/cpu/cacheflush.h +++ /dev/null | |||
@@ -1,46 +0,0 @@ | |||
1 | /* | ||
2 | * include/asm-sh/cpu-sh3/cacheflush.h | ||
3 | * | ||
4 | * Copyright (C) 1999 Niibe Yutaka | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | #ifndef __ASM_CPU_SH3_CACHEFLUSH_H | ||
11 | #define __ASM_CPU_SH3_CACHEFLUSH_H | ||
12 | |||
13 | #if defined(CONFIG_SH7705_CACHE_32KB) | ||
14 | /* SH7705 is an SH3 processor with 32KB cache. This has alias issues like the | ||
15 | * SH4. Unlike the SH4 this is a unified cache so we need to do some work | ||
16 | * in mmap when 'exec'ing a new binary | ||
17 | */ | ||
18 | /* 32KB cache, 4kb PAGE sizes need to check bit 12 */ | ||
19 | #define CACHE_ALIAS 0x00001000 | ||
20 | |||
21 | #define PG_mapped PG_arch_1 | ||
22 | |||
23 | void flush_cache_all(void); | ||
24 | void flush_cache_mm(struct mm_struct *mm); | ||
25 | #define flush_cache_dup_mm(mm) flush_cache_mm(mm) | ||
26 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, | ||
27 | unsigned long end); | ||
28 | void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn); | ||
29 | void flush_dcache_page(struct page *pg); | ||
30 | void flush_icache_range(unsigned long start, unsigned long end); | ||
31 | void flush_icache_page(struct vm_area_struct *vma, struct page *page); | ||
32 | |||
33 | #define flush_dcache_mmap_lock(mapping) do { } while (0) | ||
34 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) | ||
35 | |||
36 | /* SH3 has unified cache so no special action needed here */ | ||
37 | #define flush_cache_sigtramp(vaddr) do { } while (0) | ||
38 | #define flush_icache_user_range(vma,pg,adr,len) do { } while (0) | ||
39 | |||
40 | #define p3_cache_init() do { } while (0) | ||
41 | |||
42 | #else | ||
43 | #include <cpu-common/cpu/cacheflush.h> | ||
44 | #endif | ||
45 | |||
46 | #endif /* __ASM_CPU_SH3_CACHEFLUSH_H */ | ||
diff --git a/arch/sh/include/cpu-sh4/cpu/cacheflush.h b/arch/sh/include/cpu-sh4/cpu/cacheflush.h deleted file mode 100644 index 065306d376eb..000000000000 --- a/arch/sh/include/cpu-sh4/cpu/cacheflush.h +++ /dev/null | |||
@@ -1,43 +0,0 @@ | |||
1 | /* | ||
2 | * include/asm-sh/cpu-sh4/cacheflush.h | ||
3 | * | ||
4 | * Copyright (C) 1999 Niibe Yutaka | ||
5 | * Copyright (C) 2003 Paul Mundt | ||
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | */ | ||
11 | #ifndef __ASM_CPU_SH4_CACHEFLUSH_H | ||
12 | #define __ASM_CPU_SH4_CACHEFLUSH_H | ||
13 | |||
14 | /* | ||
15 | * Caches are broken on SH-4 (unless we use write-through | ||
16 | * caching; in which case they're only semi-broken), | ||
17 | * so we need them. | ||
18 | */ | ||
19 | void flush_cache_all(void); | ||
20 | void flush_dcache_all(void); | ||
21 | void flush_cache_mm(struct mm_struct *mm); | ||
22 | #define flush_cache_dup_mm(mm) flush_cache_mm(mm) | ||
23 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, | ||
24 | unsigned long end); | ||
25 | void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, | ||
26 | unsigned long pfn); | ||
27 | void flush_dcache_page(struct page *pg); | ||
28 | |||
29 | #define flush_dcache_mmap_lock(mapping) do { } while (0) | ||
30 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) | ||
31 | |||
32 | void flush_icache_range(unsigned long start, unsigned long end); | ||
33 | void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, | ||
34 | unsigned long addr, int len); | ||
35 | |||
36 | #define flush_icache_page(vma,pg) do { } while (0) | ||
37 | |||
38 | /* Initialization of P3 area for copy_user_page */ | ||
39 | void p3_cache_init(void); | ||
40 | |||
41 | #define PG_mapped PG_arch_1 | ||
42 | |||
43 | #endif /* __ASM_CPU_SH4_CACHEFLUSH_H */ | ||
diff --git a/arch/sh/include/cpu-sh5/cpu/cacheflush.h b/arch/sh/include/cpu-sh5/cpu/cacheflush.h deleted file mode 100644 index 5a11f0b7e66a..000000000000 --- a/arch/sh/include/cpu-sh5/cpu/cacheflush.h +++ /dev/null | |||
@@ -1,33 +0,0 @@ | |||
1 | #ifndef __ASM_SH_CPU_SH5_CACHEFLUSH_H | ||
2 | #define __ASM_SH_CPU_SH5_CACHEFLUSH_H | ||
3 | |||
4 | #ifndef __ASSEMBLY__ | ||
5 | |||
6 | struct vm_area_struct; | ||
7 | struct page; | ||
8 | struct mm_struct; | ||
9 | |||
10 | extern void flush_cache_all(void); | ||
11 | extern void flush_cache_mm(struct mm_struct *mm); | ||
12 | extern void flush_cache_sigtramp(unsigned long vaddr); | ||
13 | extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, | ||
14 | unsigned long end); | ||
15 | extern void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn); | ||
16 | extern void flush_dcache_page(struct page *pg); | ||
17 | extern void flush_icache_range(unsigned long start, unsigned long end); | ||
18 | extern void flush_icache_user_range(struct vm_area_struct *vma, | ||
19 | struct page *page, unsigned long addr, | ||
20 | int len); | ||
21 | |||
22 | #define flush_cache_dup_mm(mm) flush_cache_mm(mm) | ||
23 | |||
24 | #define flush_dcache_mmap_lock(mapping) do { } while (0) | ||
25 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) | ||
26 | |||
27 | #define flush_icache_page(vma, page) do { } while (0) | ||
28 | void p3_cache_init(void); | ||
29 | |||
30 | #endif /* __ASSEMBLY__ */ | ||
31 | |||
32 | #endif /* __ASM_SH_CPU_SH5_CACHEFLUSH_H */ | ||
33 | |||
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c index d40b9db5be03..e932ebef4738 100644 --- a/arch/sh/kernel/cpu/init.c +++ b/arch/sh/kernel/cpu/init.c | |||
@@ -299,11 +299,9 @@ asmlinkage void __init sh_cpu_init(void) | |||
299 | cache_init(); | 299 | cache_init(); |
300 | 300 | ||
301 | if (raw_smp_processor_id() == 0) { | 301 | if (raw_smp_processor_id() == 0) { |
302 | #ifdef CONFIG_MMU | ||
303 | shm_align_mask = max_t(unsigned long, | 302 | shm_align_mask = max_t(unsigned long, |
304 | current_cpu_data.dcache.way_size - 1, | 303 | current_cpu_data.dcache.way_size - 1, |
305 | PAGE_SIZE - 1); | 304 | PAGE_SIZE - 1); |
306 | #endif | ||
307 | 305 | ||
308 | /* Boot CPU sets the cache shape */ | 306 | /* Boot CPU sets the cache shape */ |
309 | detect_cache_shape(); | 307 | detect_cache_shape(); |
diff --git a/arch/sh/kernel/cpu/sh2/probe.c b/arch/sh/kernel/cpu/sh2/probe.c index 5916d9096b99..1db6d8883888 100644 --- a/arch/sh/kernel/cpu/sh2/probe.c +++ b/arch/sh/kernel/cpu/sh2/probe.c | |||
@@ -29,6 +29,7 @@ int __init detect_cpu_and_cache_system(void) | |||
29 | */ | 29 | */ |
30 | boot_cpu_data.dcache.flags |= SH_CACHE_COMBINED; | 30 | boot_cpu_data.dcache.flags |= SH_CACHE_COMBINED; |
31 | boot_cpu_data.icache = boot_cpu_data.dcache; | 31 | boot_cpu_data.icache = boot_cpu_data.dcache; |
32 | boot_cpu_data.family = CPU_FAMILY_SH2; | ||
32 | 33 | ||
33 | return 0; | 34 | return 0; |
34 | } | 35 | } |
diff --git a/arch/sh/kernel/cpu/sh2a/probe.c b/arch/sh/kernel/cpu/sh2a/probe.c index e098e2f6aa08..6825d6507164 100644 --- a/arch/sh/kernel/cpu/sh2a/probe.c +++ b/arch/sh/kernel/cpu/sh2a/probe.c | |||
@@ -15,6 +15,8 @@ | |||
15 | 15 | ||
16 | int __init detect_cpu_and_cache_system(void) | 16 | int __init detect_cpu_and_cache_system(void) |
17 | { | 17 | { |
18 | boot_cpu_data.family = CPU_FAMILY_SH2A; | ||
19 | |||
18 | /* All SH-2A CPUs have support for 16 and 32-bit opcodes.. */ | 20 | /* All SH-2A CPUs have support for 16 and 32-bit opcodes.. */ |
19 | boot_cpu_data.flags |= CPU_HAS_OP32; | 21 | boot_cpu_data.flags |= CPU_HAS_OP32; |
20 | 22 | ||
diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S index 9421ec715fd2..aebd33d18ff7 100644 --- a/arch/sh/kernel/cpu/sh3/entry.S +++ b/arch/sh/kernel/cpu/sh3/entry.S | |||
@@ -113,35 +113,34 @@ OFF_TRA = (16*4+6*4) | |||
113 | #if defined(CONFIG_MMU) | 113 | #if defined(CONFIG_MMU) |
114 | .align 2 | 114 | .align 2 |
115 | ENTRY(tlb_miss_load) | 115 | ENTRY(tlb_miss_load) |
116 | bra call_dpf | 116 | bra call_handle_tlbmiss |
117 | mov #0, r5 | 117 | mov #0, r5 |
118 | 118 | ||
119 | .align 2 | 119 | .align 2 |
120 | ENTRY(tlb_miss_store) | 120 | ENTRY(tlb_miss_store) |
121 | bra call_dpf | 121 | bra call_handle_tlbmiss |
122 | mov #1, r5 | 122 | mov #1, r5 |
123 | 123 | ||
124 | .align 2 | 124 | .align 2 |
125 | ENTRY(initial_page_write) | 125 | ENTRY(initial_page_write) |
126 | bra call_dpf | 126 | bra call_handle_tlbmiss |
127 | mov #1, r5 | 127 | mov #2, r5 |
128 | 128 | ||
129 | .align 2 | 129 | .align 2 |
130 | ENTRY(tlb_protection_violation_load) | 130 | ENTRY(tlb_protection_violation_load) |
131 | bra call_dpf | 131 | bra call_do_page_fault |
132 | mov #0, r5 | 132 | mov #0, r5 |
133 | 133 | ||
134 | .align 2 | 134 | .align 2 |
135 | ENTRY(tlb_protection_violation_store) | 135 | ENTRY(tlb_protection_violation_store) |
136 | bra call_dpf | 136 | bra call_do_page_fault |
137 | mov #1, r5 | 137 | mov #1, r5 |
138 | 138 | ||
139 | call_dpf: | 139 | call_handle_tlbmiss: |
140 | setup_frame_reg | 140 | setup_frame_reg |
141 | mov.l 1f, r0 | 141 | mov.l 1f, r0 |
142 | mov r5, r8 | 142 | mov r5, r8 |
143 | mov.l @r0, r6 | 143 | mov.l @r0, r6 |
144 | mov r6, r9 | ||
145 | mov.l 2f, r0 | 144 | mov.l 2f, r0 |
146 | sts pr, r10 | 145 | sts pr, r10 |
147 | jsr @r0 | 146 | jsr @r0 |
@@ -152,16 +151,25 @@ call_dpf: | |||
152 | lds r10, pr | 151 | lds r10, pr |
153 | rts | 152 | rts |
154 | nop | 153 | nop |
155 | 0: mov.l 3f, r0 | 154 | 0: |
156 | mov r9, r6 | ||
157 | mov r8, r5 | 155 | mov r8, r5 |
156 | call_do_page_fault: | ||
157 | mov.l 1f, r0 | ||
158 | mov.l @r0, r6 | ||
159 | |||
160 | sti | ||
161 | |||
162 | mov.l 3f, r0 | ||
163 | mov.l 4f, r1 | ||
164 | mov r15, r4 | ||
158 | jmp @r0 | 165 | jmp @r0 |
159 | mov r15, r4 | 166 | lds r1, pr |
160 | 167 | ||
161 | .align 2 | 168 | .align 2 |
162 | 1: .long MMU_TEA | 169 | 1: .long MMU_TEA |
163 | 2: .long __do_page_fault | 170 | 2: .long handle_tlbmiss |
164 | 3: .long do_page_fault | 171 | 3: .long do_page_fault |
172 | 4: .long ret_from_exception | ||
165 | 173 | ||
166 | .align 2 | 174 | .align 2 |
167 | ENTRY(address_error_load) | 175 | ENTRY(address_error_load) |
diff --git a/arch/sh/kernel/cpu/sh3/probe.c b/arch/sh/kernel/cpu/sh3/probe.c index 10f2a760c5ee..f9c7df64eb01 100644 --- a/arch/sh/kernel/cpu/sh3/probe.c +++ b/arch/sh/kernel/cpu/sh3/probe.c | |||
@@ -107,5 +107,7 @@ int __uses_jump_to_uncached detect_cpu_and_cache_system(void) | |||
107 | boot_cpu_data.dcache.flags |= SH_CACHE_COMBINED; | 107 | boot_cpu_data.dcache.flags |= SH_CACHE_COMBINED; |
108 | boot_cpu_data.icache = boot_cpu_data.dcache; | 108 | boot_cpu_data.icache = boot_cpu_data.dcache; |
109 | 109 | ||
110 | boot_cpu_data.family = CPU_FAMILY_SH3; | ||
111 | |||
110 | return 0; | 112 | return 0; |
111 | } | 113 | } |
diff --git a/arch/sh/kernel/cpu/sh4/probe.c b/arch/sh/kernel/cpu/sh4/probe.c index afd3e73d5ad4..d36f0c45f55f 100644 --- a/arch/sh/kernel/cpu/sh4/probe.c +++ b/arch/sh/kernel/cpu/sh4/probe.c | |||
@@ -57,8 +57,12 @@ int __init detect_cpu_and_cache_system(void) | |||
57 | * Setup some generic flags we can probe on SH-4A parts | 57 | * Setup some generic flags we can probe on SH-4A parts |
58 | */ | 58 | */ |
59 | if (((pvr >> 16) & 0xff) == 0x10) { | 59 | if (((pvr >> 16) & 0xff) == 0x10) { |
60 | if ((cvr & 0x10000000) == 0) | 60 | boot_cpu_data.family = CPU_FAMILY_SH4A; |
61 | |||
62 | if ((cvr & 0x10000000) == 0) { | ||
61 | boot_cpu_data.flags |= CPU_HAS_DSP; | 63 | boot_cpu_data.flags |= CPU_HAS_DSP; |
64 | boot_cpu_data.family = CPU_FAMILY_SH4AL_DSP; | ||
65 | } | ||
62 | 66 | ||
63 | boot_cpu_data.flags |= CPU_HAS_LLSC | CPU_HAS_PERF_COUNTER; | 67 | boot_cpu_data.flags |= CPU_HAS_LLSC | CPU_HAS_PERF_COUNTER; |
64 | boot_cpu_data.cut_major = pvr & 0x7f; | 68 | boot_cpu_data.cut_major = pvr & 0x7f; |
@@ -68,6 +72,7 @@ int __init detect_cpu_and_cache_system(void) | |||
68 | } else { | 72 | } else { |
69 | /* And some SH-4 defaults.. */ | 73 | /* And some SH-4 defaults.. */ |
70 | boot_cpu_data.flags |= CPU_HAS_PTEA; | 74 | boot_cpu_data.flags |= CPU_HAS_PTEA; |
75 | boot_cpu_data.family = CPU_FAMILY_SH4; | ||
71 | } | 76 | } |
72 | 77 | ||
73 | /* FPU detection works for everyone */ | 78 | /* FPU detection works for everyone */ |
@@ -180,9 +185,6 @@ int __init detect_cpu_and_cache_system(void) | |||
180 | boot_cpu_data.dcache.ways = 2; | 185 | boot_cpu_data.dcache.ways = 2; |
181 | 186 | ||
182 | break; | 187 | break; |
183 | default: | ||
184 | boot_cpu_data.type = CPU_SH_NONE; | ||
185 | break; | ||
186 | } | 188 | } |
187 | 189 | ||
188 | /* | 190 | /* |
diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c index 2b6b0d50c576..185ec3976a25 100644 --- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c +++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c | |||
@@ -57,6 +57,8 @@ void __init plat_prepare_cpus(unsigned int max_cpus) | |||
57 | { | 57 | { |
58 | int i; | 58 | int i; |
59 | 59 | ||
60 | local_timer_setup(0); | ||
61 | |||
60 | BUILD_BUG_ON(SMP_MSG_NR >= 8); | 62 | BUILD_BUG_ON(SMP_MSG_NR >= 8); |
61 | 63 | ||
62 | for (i = 0; i < SMP_MSG_NR; i++) | 64 | for (i = 0; i < SMP_MSG_NR; i++) |
diff --git a/arch/sh/kernel/cpu/sh5/probe.c b/arch/sh/kernel/cpu/sh5/probe.c index 92ad844b5c12..521d05b3f7ba 100644 --- a/arch/sh/kernel/cpu/sh5/probe.c +++ b/arch/sh/kernel/cpu/sh5/probe.c | |||
@@ -34,6 +34,8 @@ int __init detect_cpu_and_cache_system(void) | |||
34 | /* CPU.VCR aliased at CIR address on SH5-101 */ | 34 | /* CPU.VCR aliased at CIR address on SH5-101 */ |
35 | boot_cpu_data.type = CPU_SH5_101; | 35 | boot_cpu_data.type = CPU_SH5_101; |
36 | 36 | ||
37 | boot_cpu_data.family = CPU_FAMILY_SH5; | ||
38 | |||
37 | /* | 39 | /* |
38 | * First, setup some sane values for the I-cache. | 40 | * First, setup some sane values for the I-cache. |
39 | */ | 41 | */ |
diff --git a/arch/sh/kernel/localtimer.c b/arch/sh/kernel/localtimer.c index 96e8eaea1e62..0b04e7d4a9b9 100644 --- a/arch/sh/kernel/localtimer.c +++ b/arch/sh/kernel/localtimer.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/jiffies.h> | 22 | #include <linux/jiffies.h> |
23 | #include <linux/percpu.h> | 23 | #include <linux/percpu.h> |
24 | #include <linux/clockchips.h> | 24 | #include <linux/clockchips.h> |
25 | #include <linux/hardirq.h> | ||
25 | #include <linux/irq.h> | 26 | #include <linux/irq.h> |
26 | 27 | ||
27 | static DEFINE_PER_CPU(struct clock_event_device, local_clockevent); | 28 | static DEFINE_PER_CPU(struct clock_event_device, local_clockevent); |
@@ -33,7 +34,9 @@ void local_timer_interrupt(void) | |||
33 | { | 34 | { |
34 | struct clock_event_device *clk = &__get_cpu_var(local_clockevent); | 35 | struct clock_event_device *clk = &__get_cpu_var(local_clockevent); |
35 | 36 | ||
37 | irq_enter(); | ||
36 | clk->event_handler(clk); | 38 | clk->event_handler(clk); |
39 | irq_exit(); | ||
37 | } | 40 | } |
38 | 41 | ||
39 | static void dummy_timer_set_mode(enum clock_event_mode mode, | 42 | static void dummy_timer_set_mode(enum clock_event_mode mode, |
@@ -46,8 +49,10 @@ void __cpuinit local_timer_setup(unsigned int cpu) | |||
46 | struct clock_event_device *clk = &per_cpu(local_clockevent, cpu); | 49 | struct clock_event_device *clk = &per_cpu(local_clockevent, cpu); |
47 | 50 | ||
48 | clk->name = "dummy_timer"; | 51 | clk->name = "dummy_timer"; |
49 | clk->features = CLOCK_EVT_FEAT_DUMMY; | 52 | clk->features = CLOCK_EVT_FEAT_ONESHOT | |
50 | clk->rating = 200; | 53 | CLOCK_EVT_FEAT_PERIODIC | |
54 | CLOCK_EVT_FEAT_DUMMY; | ||
55 | clk->rating = 400; | ||
51 | clk->mult = 1; | 56 | clk->mult = 1; |
52 | clk->set_mode = dummy_timer_set_mode; | 57 | clk->set_mode = dummy_timer_set_mode; |
53 | clk->broadcast = smp_timer_broadcast; | 58 | clk->broadcast = smp_timer_broadcast; |
diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c index 24de74214940..1192398ef582 100644 --- a/arch/sh/kernel/process_64.c +++ b/arch/sh/kernel/process_64.c | |||
@@ -425,7 +425,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, | |||
425 | struct task_struct *p, struct pt_regs *regs) | 425 | struct task_struct *p, struct pt_regs *regs) |
426 | { | 426 | { |
427 | struct pt_regs *childregs; | 427 | struct pt_regs *childregs; |
428 | unsigned long long se; /* Sign extension */ | ||
429 | 428 | ||
430 | #ifdef CONFIG_SH_FPU | 429 | #ifdef CONFIG_SH_FPU |
431 | if(last_task_used_math == current) { | 430 | if(last_task_used_math == current) { |
@@ -441,11 +440,19 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, | |||
441 | 440 | ||
442 | *childregs = *regs; | 441 | *childregs = *regs; |
443 | 442 | ||
443 | /* | ||
444 | * Sign extend the edited stack. | ||
445 | * Note that thread.pc and thread.pc will stay | ||
446 | * 32-bit wide and context switch must take care | ||
447 | * of NEFF sign extension. | ||
448 | */ | ||
444 | if (user_mode(regs)) { | 449 | if (user_mode(regs)) { |
445 | childregs->regs[15] = usp; | 450 | childregs->regs[15] = neff_sign_extend(usp); |
446 | p->thread.uregs = childregs; | 451 | p->thread.uregs = childregs; |
447 | } else { | 452 | } else { |
448 | childregs->regs[15] = (unsigned long)task_stack_page(p) + THREAD_SIZE; | 453 | childregs->regs[15] = |
454 | neff_sign_extend((unsigned long)task_stack_page(p) + | ||
455 | THREAD_SIZE); | ||
449 | } | 456 | } |
450 | 457 | ||
451 | childregs->regs[9] = 0; /* Set return value for child */ | 458 | childregs->regs[9] = 0; /* Set return value for child */ |
@@ -454,17 +461,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, | |||
454 | p->thread.sp = (unsigned long) childregs; | 461 | p->thread.sp = (unsigned long) childregs; |
455 | p->thread.pc = (unsigned long) ret_from_fork; | 462 | p->thread.pc = (unsigned long) ret_from_fork; |
456 | 463 | ||
457 | /* | ||
458 | * Sign extend the edited stack. | ||
459 | * Note that thread.pc and thread.pc will stay | ||
460 | * 32-bit wide and context switch must take care | ||
461 | * of NEFF sign extension. | ||
462 | */ | ||
463 | |||
464 | se = childregs->regs[15]; | ||
465 | se = (se & NEFF_SIGN) ? (se | NEFF_MASK) : se; | ||
466 | childregs->regs[15] = se; | ||
467 | |||
468 | return 0; | 464 | return 0; |
469 | } | 465 | } |
470 | 466 | ||
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index d13bbafb4e1b..f9d44f8e0df6 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c | |||
@@ -49,6 +49,7 @@ | |||
49 | struct sh_cpuinfo cpu_data[NR_CPUS] __read_mostly = { | 49 | struct sh_cpuinfo cpu_data[NR_CPUS] __read_mostly = { |
50 | [0] = { | 50 | [0] = { |
51 | .type = CPU_SH_NONE, | 51 | .type = CPU_SH_NONE, |
52 | .family = CPU_FAMILY_UNKNOWN, | ||
52 | .loops_per_jiffy = 10000000, | 53 | .loops_per_jiffy = 10000000, |
53 | }, | 54 | }, |
54 | }; | 55 | }; |
diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c index cec610888e28..8dbe26b17c44 100644 --- a/arch/sh/kernel/sh_ksyms_32.c +++ b/arch/sh/kernel/sh_ksyms_32.c | |||
@@ -101,11 +101,6 @@ EXPORT_SYMBOL(flush_cache_range); | |||
101 | EXPORT_SYMBOL(flush_dcache_page); | 101 | EXPORT_SYMBOL(flush_dcache_page); |
102 | #endif | 102 | #endif |
103 | 103 | ||
104 | #if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU) && \ | ||
105 | (defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)) | ||
106 | EXPORT_SYMBOL(clear_user_page); | ||
107 | #endif | ||
108 | |||
109 | #ifdef CONFIG_MCOUNT | 104 | #ifdef CONFIG_MCOUNT |
110 | DECLARE_EXPORT(mcount); | 105 | DECLARE_EXPORT(mcount); |
111 | #endif | 106 | #endif |
@@ -114,7 +109,6 @@ EXPORT_SYMBOL(csum_partial_copy_generic); | |||
114 | #ifdef CONFIG_IPV6 | 109 | #ifdef CONFIG_IPV6 |
115 | EXPORT_SYMBOL(csum_ipv6_magic); | 110 | EXPORT_SYMBOL(csum_ipv6_magic); |
116 | #endif | 111 | #endif |
117 | EXPORT_SYMBOL(clear_page); | ||
118 | EXPORT_SYMBOL(copy_page); | 112 | EXPORT_SYMBOL(copy_page); |
119 | EXPORT_SYMBOL(__clear_user); | 113 | EXPORT_SYMBOL(__clear_user); |
120 | EXPORT_SYMBOL(_ebss); | 114 | EXPORT_SYMBOL(_ebss); |
diff --git a/arch/sh/kernel/sh_ksyms_64.c b/arch/sh/kernel/sh_ksyms_64.c index f5bd156ea504..d008e17eb257 100644 --- a/arch/sh/kernel/sh_ksyms_64.c +++ b/arch/sh/kernel/sh_ksyms_64.c | |||
@@ -30,14 +30,6 @@ extern int dump_fpu(struct pt_regs *, elf_fpregset_t *); | |||
30 | EXPORT_SYMBOL(dump_fpu); | 30 | EXPORT_SYMBOL(dump_fpu); |
31 | EXPORT_SYMBOL(kernel_thread); | 31 | EXPORT_SYMBOL(kernel_thread); |
32 | 32 | ||
33 | #if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU) | ||
34 | EXPORT_SYMBOL(clear_user_page); | ||
35 | #endif | ||
36 | |||
37 | #ifndef CONFIG_CACHE_OFF | ||
38 | EXPORT_SYMBOL(flush_dcache_page); | ||
39 | #endif | ||
40 | |||
41 | #ifdef CONFIG_VT | 33 | #ifdef CONFIG_VT |
42 | EXPORT_SYMBOL(screen_info); | 34 | EXPORT_SYMBOL(screen_info); |
43 | #endif | 35 | #endif |
@@ -52,7 +44,6 @@ EXPORT_SYMBOL(__get_user_asm_l); | |||
52 | EXPORT_SYMBOL(__get_user_asm_q); | 44 | EXPORT_SYMBOL(__get_user_asm_q); |
53 | EXPORT_SYMBOL(__strnlen_user); | 45 | EXPORT_SYMBOL(__strnlen_user); |
54 | EXPORT_SYMBOL(__strncpy_from_user); | 46 | EXPORT_SYMBOL(__strncpy_from_user); |
55 | EXPORT_SYMBOL(clear_page); | ||
56 | EXPORT_SYMBOL(__clear_user); | 47 | EXPORT_SYMBOL(__clear_user); |
57 | EXPORT_SYMBOL(copy_page); | 48 | EXPORT_SYMBOL(copy_page); |
58 | EXPORT_SYMBOL(__copy_user); | 49 | EXPORT_SYMBOL(__copy_user); |
diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c index 0663a0ee6021..026fd1cfe17d 100644 --- a/arch/sh/kernel/signal_64.c +++ b/arch/sh/kernel/signal_64.c | |||
@@ -561,13 +561,11 @@ static int setup_frame(int sig, struct k_sigaction *ka, | |||
561 | /* Set up to return from userspace. If provided, use a stub | 561 | /* Set up to return from userspace. If provided, use a stub |
562 | already in userspace. */ | 562 | already in userspace. */ |
563 | if (ka->sa.sa_flags & SA_RESTORER) { | 563 | if (ka->sa.sa_flags & SA_RESTORER) { |
564 | DEREF_REG_PR = (unsigned long) ka->sa.sa_restorer | 0x1; | ||
565 | |||
566 | /* | 564 | /* |
567 | * On SH5 all edited pointers are subject to NEFF | 565 | * On SH5 all edited pointers are subject to NEFF |
568 | */ | 566 | */ |
569 | DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ? | 567 | DEREF_REG_PR = neff_sign_extend((unsigned long) |
570 | (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR; | 568 | ka->sa.sa_restorer | 0x1); |
571 | } else { | 569 | } else { |
572 | /* | 570 | /* |
573 | * Different approach on SH5. | 571 | * Different approach on SH5. |
@@ -580,9 +578,8 @@ static int setup_frame(int sig, struct k_sigaction *ka, | |||
580 | * . being code, linker turns ShMedia bit on, always | 578 | * . being code, linker turns ShMedia bit on, always |
581 | * dereference index -1. | 579 | * dereference index -1. |
582 | */ | 580 | */ |
583 | DEREF_REG_PR = (unsigned long) frame->retcode | 0x01; | 581 | DEREF_REG_PR = neff_sign_extend((unsigned long) |
584 | DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ? | 582 | frame->retcode | 0x01); |
585 | (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR; | ||
586 | 583 | ||
587 | if (__copy_to_user(frame->retcode, | 584 | if (__copy_to_user(frame->retcode, |
588 | (void *)((unsigned long)sa_default_restorer & (~1)), 16) != 0) | 585 | (void *)((unsigned long)sa_default_restorer & (~1)), 16) != 0) |
@@ -596,9 +593,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, | |||
596 | * Set up registers for signal handler. | 593 | * Set up registers for signal handler. |
597 | * All edited pointers are subject to NEFF. | 594 | * All edited pointers are subject to NEFF. |
598 | */ | 595 | */ |
599 | regs->regs[REG_SP] = (unsigned long) frame; | 596 | regs->regs[REG_SP] = neff_sign_extend((unsigned long)frame); |
600 | regs->regs[REG_SP] = (regs->regs[REG_SP] & NEFF_SIGN) ? | ||
601 | (regs->regs[REG_SP] | NEFF_MASK) : regs->regs[REG_SP]; | ||
602 | regs->regs[REG_ARG1] = signal; /* Arg for signal handler */ | 597 | regs->regs[REG_ARG1] = signal; /* Arg for signal handler */ |
603 | 598 | ||
604 | /* FIXME: | 599 | /* FIXME: |
@@ -613,8 +608,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, | |||
613 | regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->sc; | 608 | regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->sc; |
614 | regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->sc; | 609 | regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->sc; |
615 | 610 | ||
616 | regs->pc = (unsigned long) ka->sa.sa_handler; | 611 | regs->pc = neff_sign_extend((unsigned long)ka->sa.sa_handler); |
617 | regs->pc = (regs->pc & NEFF_SIGN) ? (regs->pc | NEFF_MASK) : regs->pc; | ||
618 | 612 | ||
619 | set_fs(USER_DS); | 613 | set_fs(USER_DS); |
620 | 614 | ||
@@ -676,13 +670,11 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
676 | /* Set up to return from userspace. If provided, use a stub | 670 | /* Set up to return from userspace. If provided, use a stub |
677 | already in userspace. */ | 671 | already in userspace. */ |
678 | if (ka->sa.sa_flags & SA_RESTORER) { | 672 | if (ka->sa.sa_flags & SA_RESTORER) { |
679 | DEREF_REG_PR = (unsigned long) ka->sa.sa_restorer | 0x1; | ||
680 | |||
681 | /* | 673 | /* |
682 | * On SH5 all edited pointers are subject to NEFF | 674 | * On SH5 all edited pointers are subject to NEFF |
683 | */ | 675 | */ |
684 | DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ? | 676 | DEREF_REG_PR = neff_sign_extend((unsigned long) |
685 | (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR; | 677 | ka->sa.sa_restorer | 0x1); |
686 | } else { | 678 | } else { |
687 | /* | 679 | /* |
688 | * Different approach on SH5. | 680 | * Different approach on SH5. |
@@ -695,15 +687,14 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
695 | * . being code, linker turns ShMedia bit on, always | 687 | * . being code, linker turns ShMedia bit on, always |
696 | * dereference index -1. | 688 | * dereference index -1. |
697 | */ | 689 | */ |
698 | 690 | DEREF_REG_PR = neff_sign_extend((unsigned long) | |
699 | DEREF_REG_PR = (unsigned long) frame->retcode | 0x01; | 691 | frame->retcode | 0x01); |
700 | DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ? | ||
701 | (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR; | ||
702 | 692 | ||
703 | if (__copy_to_user(frame->retcode, | 693 | if (__copy_to_user(frame->retcode, |
704 | (void *)((unsigned long)sa_default_rt_restorer & (~1)), 16) != 0) | 694 | (void *)((unsigned long)sa_default_rt_restorer & (~1)), 16) != 0) |
705 | goto give_sigsegv; | 695 | goto give_sigsegv; |
706 | 696 | ||
697 | /* Cohere the trampoline with the I-cache. */ | ||
707 | flush_icache_range(DEREF_REG_PR-1, DEREF_REG_PR-1+15); | 698 | flush_icache_range(DEREF_REG_PR-1, DEREF_REG_PR-1+15); |
708 | } | 699 | } |
709 | 700 | ||
@@ -711,14 +702,11 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
711 | * Set up registers for signal handler. | 702 | * Set up registers for signal handler. |
712 | * All edited pointers are subject to NEFF. | 703 | * All edited pointers are subject to NEFF. |
713 | */ | 704 | */ |
714 | regs->regs[REG_SP] = (unsigned long) frame; | 705 | regs->regs[REG_SP] = neff_sign_extend((unsigned long)frame); |
715 | regs->regs[REG_SP] = (regs->regs[REG_SP] & NEFF_SIGN) ? | ||
716 | (regs->regs[REG_SP] | NEFF_MASK) : regs->regs[REG_SP]; | ||
717 | regs->regs[REG_ARG1] = signal; /* Arg for signal handler */ | 706 | regs->regs[REG_ARG1] = signal; /* Arg for signal handler */ |
718 | regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->info; | 707 | regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->info; |
719 | regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->uc.uc_mcontext; | 708 | regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->uc.uc_mcontext; |
720 | regs->pc = (unsigned long) ka->sa.sa_handler; | 709 | regs->pc = neff_sign_extend((unsigned long)ka->sa.sa_handler); |
721 | regs->pc = (regs->pc & NEFF_SIGN) ? (regs->pc | NEFF_MASK) : regs->pc; | ||
722 | 710 | ||
723 | set_fs(USER_DS); | 711 | set_fs(USER_DS); |
724 | 712 | ||
diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c index 7f95f479060f..632aff533285 100644 --- a/arch/sh/kernel/time.c +++ b/arch/sh/kernel/time.c | |||
@@ -119,9 +119,5 @@ void __init time_init(void) | |||
119 | set_normalized_timespec(&wall_to_monotonic, | 119 | set_normalized_timespec(&wall_to_monotonic, |
120 | -xtime.tv_sec, -xtime.tv_nsec); | 120 | -xtime.tv_sec, -xtime.tv_nsec); |
121 | 121 | ||
122 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST | ||
123 | local_timer_setup(smp_processor_id()); | ||
124 | #endif | ||
125 | |||
126 | late_time_init = sh_late_time_init; | 122 | late_time_init = sh_late_time_init; |
127 | } | 123 | } |
diff --git a/arch/sh/lib/Makefile b/arch/sh/lib/Makefile index c2b28d8b2dd1..a969b47c5463 100644 --- a/arch/sh/lib/Makefile +++ b/arch/sh/lib/Makefile | |||
@@ -23,7 +23,7 @@ obj-y += io.o | |||
23 | memcpy-y := memcpy.o | 23 | memcpy-y := memcpy.o |
24 | memcpy-$(CONFIG_CPU_SH4) := memcpy-sh4.o | 24 | memcpy-$(CONFIG_CPU_SH4) := memcpy-sh4.o |
25 | 25 | ||
26 | lib-$(CONFIG_MMU) += copy_page.o clear_page.o | 26 | lib-$(CONFIG_MMU) += copy_page.o __clear_user.o |
27 | lib-$(CONFIG_MCOUNT) += mcount.o | 27 | lib-$(CONFIG_MCOUNT) += mcount.o |
28 | lib-y += $(memcpy-y) $(udivsi3-y) | 28 | lib-y += $(memcpy-y) $(udivsi3-y) |
29 | 29 | ||
diff --git a/arch/sh/lib/clear_page.S b/arch/sh/lib/__clear_user.S index c92244d4ff9d..db1dca7aad14 100644 --- a/arch/sh/lib/clear_page.S +++ b/arch/sh/lib/__clear_user.S | |||
@@ -8,52 +8,6 @@ | |||
8 | #include <linux/linkage.h> | 8 | #include <linux/linkage.h> |
9 | #include <asm/page.h> | 9 | #include <asm/page.h> |
10 | 10 | ||
11 | /* | ||
12 | * clear_page | ||
13 | * @to: P1 address | ||
14 | * | ||
15 | * void clear_page(void *to) | ||
16 | */ | ||
17 | |||
18 | /* | ||
19 | * r0 --- scratch | ||
20 | * r4 --- to | ||
21 | * r5 --- to + PAGE_SIZE | ||
22 | */ | ||
23 | ENTRY(clear_page) | ||
24 | mov r4,r5 | ||
25 | mov.l .Llimit,r0 | ||
26 | add r0,r5 | ||
27 | mov #0,r0 | ||
28 | ! | ||
29 | 1: | ||
30 | #if defined(CONFIG_CPU_SH4) | ||
31 | movca.l r0,@r4 | ||
32 | mov r4,r1 | ||
33 | #else | ||
34 | mov.l r0,@r4 | ||
35 | #endif | ||
36 | add #32,r4 | ||
37 | mov.l r0,@-r4 | ||
38 | mov.l r0,@-r4 | ||
39 | mov.l r0,@-r4 | ||
40 | mov.l r0,@-r4 | ||
41 | mov.l r0,@-r4 | ||
42 | mov.l r0,@-r4 | ||
43 | mov.l r0,@-r4 | ||
44 | #if defined(CONFIG_CPU_SH4) | ||
45 | ocbwb @r1 | ||
46 | #endif | ||
47 | cmp/eq r5,r4 | ||
48 | bf/s 1b | ||
49 | add #28,r4 | ||
50 | ! | ||
51 | rts | ||
52 | nop | ||
53 | |||
54 | .balign 4 | ||
55 | .Llimit: .long (PAGE_SIZE-28) | ||
56 | |||
57 | ENTRY(__clear_user) | 11 | ENTRY(__clear_user) |
58 | ! | 12 | ! |
59 | mov #0, r0 | 13 | mov #0, r0 |
diff --git a/arch/sh/lib64/Makefile b/arch/sh/lib64/Makefile index 334bb2da36ea..1fee75aa1f98 100644 --- a/arch/sh/lib64/Makefile +++ b/arch/sh/lib64/Makefile | |||
@@ -11,7 +11,7 @@ | |||
11 | 11 | ||
12 | # Panic should really be compiled as PIC | 12 | # Panic should really be compiled as PIC |
13 | lib-y := udelay.o dbg.o panic.o memcpy.o memset.o \ | 13 | lib-y := udelay.o dbg.o panic.o memcpy.o memset.o \ |
14 | copy_user_memcpy.o copy_page.o clear_page.o strcpy.o strlen.o | 14 | copy_user_memcpy.o copy_page.o strcpy.o strlen.o |
15 | 15 | ||
16 | # Extracted from libgcc | 16 | # Extracted from libgcc |
17 | lib-y += udivsi3.o udivdi3.o sdivsi3.o | 17 | lib-y += udivsi3.o udivdi3.o sdivsi3.o |
diff --git a/arch/sh/lib64/clear_page.S b/arch/sh/lib64/clear_page.S deleted file mode 100644 index 007ab48ecc1c..000000000000 --- a/arch/sh/lib64/clear_page.S +++ /dev/null | |||
@@ -1,54 +0,0 @@ | |||
1 | /* | ||
2 | Copyright 2003 Richard Curnow, SuperH (UK) Ltd. | ||
3 | |||
4 | This file is subject to the terms and conditions of the GNU General Public | ||
5 | License. See the file "COPYING" in the main directory of this archive | ||
6 | for more details. | ||
7 | |||
8 | Tight version of memset for the case of just clearing a page. It turns out | ||
9 | that having the alloco's spaced out slightly due to the increment/branch | ||
10 | pair causes them to contend less for access to the cache. Similarly, | ||
11 | keeping the stores apart from the allocos causes less contention. => Do two | ||
12 | separate loops. Do multiple stores per loop to amortise the | ||
13 | increment/branch cost a little. | ||
14 | |||
15 | Parameters: | ||
16 | r2 : source effective address (start of page) | ||
17 | |||
18 | Always clears 4096 bytes. | ||
19 | |||
20 | Note : alloco guarded by synco to avoid TAKum03020 erratum | ||
21 | |||
22 | */ | ||
23 | |||
24 | .section .text..SHmedia32,"ax" | ||
25 | .little | ||
26 | |||
27 | .balign 8 | ||
28 | .global clear_page | ||
29 | clear_page: | ||
30 | pta/l 1f, tr1 | ||
31 | pta/l 2f, tr2 | ||
32 | ptabs/l r18, tr0 | ||
33 | |||
34 | movi 4096, r7 | ||
35 | add r2, r7, r7 | ||
36 | add r2, r63, r6 | ||
37 | 1: | ||
38 | alloco r6, 0 | ||
39 | synco ! TAKum03020 | ||
40 | addi r6, 32, r6 | ||
41 | bgt/l r7, r6, tr1 | ||
42 | |||
43 | add r2, r63, r6 | ||
44 | 2: | ||
45 | st.q r6, 0, r63 | ||
46 | st.q r6, 8, r63 | ||
47 | st.q r6, 16, r63 | ||
48 | st.q r6, 24, r63 | ||
49 | addi r6, 32, r6 | ||
50 | bgt/l r7, r6, tr2 | ||
51 | |||
52 | blink tr0, r63 | ||
53 | |||
54 | |||
diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile index 9f4bc3d90b1e..3759bf853293 100644 --- a/arch/sh/mm/Makefile +++ b/arch/sh/mm/Makefile | |||
@@ -1,5 +1,65 @@ | |||
1 | ifeq ($(CONFIG_SUPERH32),y) | 1 | # |
2 | include ${srctree}/arch/sh/mm/Makefile_32 | 2 | # Makefile for the Linux SuperH-specific parts of the memory manager. |
3 | else | 3 | # |
4 | include ${srctree}/arch/sh/mm/Makefile_64 | 4 | |
5 | obj-y := cache.o init.o consistent.o mmap.o | ||
6 | |||
7 | cacheops-$(CONFIG_CPU_SH2) := cache-sh2.o | ||
8 | cacheops-$(CONFIG_CPU_SH2A) := cache-sh2a.o | ||
9 | cacheops-$(CONFIG_CPU_SH3) := cache-sh3.o | ||
10 | cacheops-$(CONFIG_CPU_SH4) := cache-sh4.o flush-sh4.o | ||
11 | cacheops-$(CONFIG_CPU_SH5) := cache-sh5.o flush-sh4.o | ||
12 | cacheops-$(CONFIG_SH7705_CACHE_32KB) += cache-sh7705.o | ||
13 | |||
14 | obj-y += $(cacheops-y) | ||
15 | |||
16 | mmu-y := nommu.o extable_32.o | ||
17 | mmu-$(CONFIG_MMU) := extable_$(BITS).o fault_$(BITS).o \ | ||
18 | ioremap_$(BITS).o kmap.o tlbflush_$(BITS).o | ||
19 | |||
20 | obj-y += $(mmu-y) | ||
21 | obj-$(CONFIG_DEBUG_FS) += asids-debugfs.o | ||
22 | |||
23 | ifdef CONFIG_DEBUG_FS | ||
24 | obj-$(CONFIG_CPU_SH4) += cache-debugfs.o | ||
5 | endif | 25 | endif |
26 | |||
27 | ifdef CONFIG_MMU | ||
28 | tlb-$(CONFIG_CPU_SH3) := tlb-sh3.o | ||
29 | tlb-$(CONFIG_CPU_SH4) := tlb-sh4.o | ||
30 | tlb-$(CONFIG_CPU_SH5) := tlb-sh5.o | ||
31 | tlb-$(CONFIG_CPU_HAS_PTEAEX) := tlb-pteaex.o | ||
32 | obj-y += $(tlb-y) | ||
33 | endif | ||
34 | |||
35 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | ||
36 | obj-$(CONFIG_PMB) += pmb.o | ||
37 | obj-$(CONFIG_PMB_FIXED) += pmb-fixed.o | ||
38 | obj-$(CONFIG_NUMA) += numa.o | ||
39 | |||
40 | # Special flags for fault_64.o. This puts restrictions on the number of | ||
41 | # caller-save registers that the compiler can target when building this file. | ||
42 | # This is required because the code is called from a context in entry.S where | ||
43 | # very few registers have been saved in the exception handler (for speed | ||
44 | # reasons). | ||
45 | # The caller save registers that have been saved and which can be used are | ||
46 | # r2,r3,r4,r5 : argument passing | ||
47 | # r15, r18 : SP and LINK | ||
48 | # tr0-4 : allow all caller-save TR's. The compiler seems to be able to make | ||
49 | # use of them, so it's probably beneficial to performance to save them | ||
50 | # and have them available for it. | ||
51 | # | ||
52 | # The resources not listed below are callee save, i.e. the compiler is free to | ||
53 | # use any of them and will spill them to the stack itself. | ||
54 | |||
55 | CFLAGS_fault_64.o += -ffixed-r7 \ | ||
56 | -ffixed-r8 -ffixed-r9 -ffixed-r10 -ffixed-r11 -ffixed-r12 \ | ||
57 | -ffixed-r13 -ffixed-r14 -ffixed-r16 -ffixed-r17 -ffixed-r19 \ | ||
58 | -ffixed-r20 -ffixed-r21 -ffixed-r22 -ffixed-r23 \ | ||
59 | -ffixed-r24 -ffixed-r25 -ffixed-r26 -ffixed-r27 \ | ||
60 | -ffixed-r36 -ffixed-r37 -ffixed-r38 -ffixed-r39 -ffixed-r40 \ | ||
61 | -ffixed-r41 -ffixed-r42 -ffixed-r43 \ | ||
62 | -ffixed-r60 -ffixed-r61 -ffixed-r62 \ | ||
63 | -fomit-frame-pointer | ||
64 | |||
65 | EXTRA_CFLAGS += -Werror | ||
diff --git a/arch/sh/mm/Makefile_32 b/arch/sh/mm/Makefile_32 deleted file mode 100644 index 986a1e055834..000000000000 --- a/arch/sh/mm/Makefile_32 +++ /dev/null | |||
@@ -1,43 +0,0 @@ | |||
1 | # | ||
2 | # Makefile for the Linux SuperH-specific parts of the memory manager. | ||
3 | # | ||
4 | |||
5 | obj-y := init.o extable_32.o consistent.o mmap.o | ||
6 | |||
7 | ifndef CONFIG_CACHE_OFF | ||
8 | cache-$(CONFIG_CPU_SH2) := cache-sh2.o | ||
9 | cache-$(CONFIG_CPU_SH2A) := cache-sh2a.o | ||
10 | cache-$(CONFIG_CPU_SH3) := cache-sh3.o | ||
11 | cache-$(CONFIG_CPU_SH4) := cache-sh4.o | ||
12 | cache-$(CONFIG_SH7705_CACHE_32KB) += cache-sh7705.o | ||
13 | endif | ||
14 | |||
15 | obj-y += $(cache-y) | ||
16 | |||
17 | mmu-y := tlb-nommu.o pg-nommu.o | ||
18 | mmu-$(CONFIG_MMU) := fault_32.o tlbflush_32.o ioremap_32.o | ||
19 | |||
20 | obj-y += $(mmu-y) | ||
21 | obj-$(CONFIG_DEBUG_FS) += asids-debugfs.o | ||
22 | |||
23 | ifdef CONFIG_DEBUG_FS | ||
24 | obj-$(CONFIG_CPU_SH4) += cache-debugfs.o | ||
25 | endif | ||
26 | |||
27 | ifdef CONFIG_MMU | ||
28 | tlb-$(CONFIG_CPU_SH3) := tlb-sh3.o | ||
29 | tlb-$(CONFIG_CPU_SH4) := tlb-sh4.o | ||
30 | tlb-$(CONFIG_CPU_HAS_PTEAEX) := tlb-pteaex.o | ||
31 | obj-y += $(tlb-y) | ||
32 | ifndef CONFIG_CACHE_OFF | ||
33 | obj-$(CONFIG_CPU_SH4) += pg-sh4.o | ||
34 | obj-$(CONFIG_SH7705_CACHE_32KB) += pg-sh7705.o | ||
35 | endif | ||
36 | endif | ||
37 | |||
38 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | ||
39 | obj-$(CONFIG_PMB) += pmb.o | ||
40 | obj-$(CONFIG_PMB_FIXED) += pmb-fixed.o | ||
41 | obj-$(CONFIG_NUMA) += numa.o | ||
42 | |||
43 | EXTRA_CFLAGS += -Werror | ||
diff --git a/arch/sh/mm/Makefile_64 b/arch/sh/mm/Makefile_64 deleted file mode 100644 index 2863ffb7006d..000000000000 --- a/arch/sh/mm/Makefile_64 +++ /dev/null | |||
@@ -1,46 +0,0 @@ | |||
1 | # | ||
2 | # Makefile for the Linux SuperH-specific parts of the memory manager. | ||
3 | # | ||
4 | |||
5 | obj-y := init.o consistent.o mmap.o | ||
6 | |||
7 | mmu-y := tlb-nommu.o pg-nommu.o extable_32.o | ||
8 | mmu-$(CONFIG_MMU) := fault_64.o ioremap_64.o tlbflush_64.o tlb-sh5.o \ | ||
9 | extable_64.o | ||
10 | |||
11 | ifndef CONFIG_CACHE_OFF | ||
12 | obj-y += cache-sh5.o | ||
13 | endif | ||
14 | |||
15 | obj-y += $(mmu-y) | ||
16 | obj-$(CONFIG_DEBUG_FS) += asids-debugfs.o | ||
17 | |||
18 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | ||
19 | obj-$(CONFIG_NUMA) += numa.o | ||
20 | |||
21 | EXTRA_CFLAGS += -Werror | ||
22 | |||
23 | # Special flags for fault_64.o. This puts restrictions on the number of | ||
24 | # caller-save registers that the compiler can target when building this file. | ||
25 | # This is required because the code is called from a context in entry.S where | ||
26 | # very few registers have been saved in the exception handler (for speed | ||
27 | # reasons). | ||
28 | # The caller save registers that have been saved and which can be used are | ||
29 | # r2,r3,r4,r5 : argument passing | ||
30 | # r15, r18 : SP and LINK | ||
31 | # tr0-4 : allow all caller-save TR's. The compiler seems to be able to make | ||
32 | # use of them, so it's probably beneficial to performance to save them | ||
33 | # and have them available for it. | ||
34 | # | ||
35 | # The resources not listed below are callee save, i.e. the compiler is free to | ||
36 | # use any of them and will spill them to the stack itself. | ||
37 | |||
38 | CFLAGS_fault_64.o += -ffixed-r7 \ | ||
39 | -ffixed-r8 -ffixed-r9 -ffixed-r10 -ffixed-r11 -ffixed-r12 \ | ||
40 | -ffixed-r13 -ffixed-r14 -ffixed-r16 -ffixed-r17 -ffixed-r19 \ | ||
41 | -ffixed-r20 -ffixed-r21 -ffixed-r22 -ffixed-r23 \ | ||
42 | -ffixed-r24 -ffixed-r25 -ffixed-r26 -ffixed-r27 \ | ||
43 | -ffixed-r36 -ffixed-r37 -ffixed-r38 -ffixed-r39 -ffixed-r40 \ | ||
44 | -ffixed-r41 -ffixed-r42 -ffixed-r43 \ | ||
45 | -ffixed-r60 -ffixed-r61 -ffixed-r62 \ | ||
46 | -fomit-frame-pointer | ||
diff --git a/arch/sh/mm/cache-sh2.c b/arch/sh/mm/cache-sh2.c index c4e80d2b764b..699a71f46327 100644 --- a/arch/sh/mm/cache-sh2.c +++ b/arch/sh/mm/cache-sh2.c | |||
@@ -16,7 +16,7 @@ | |||
16 | #include <asm/cacheflush.h> | 16 | #include <asm/cacheflush.h> |
17 | #include <asm/io.h> | 17 | #include <asm/io.h> |
18 | 18 | ||
19 | void __flush_wback_region(void *start, int size) | 19 | static void sh2__flush_wback_region(void *start, int size) |
20 | { | 20 | { |
21 | unsigned long v; | 21 | unsigned long v; |
22 | unsigned long begin, end; | 22 | unsigned long begin, end; |
@@ -37,7 +37,7 @@ void __flush_wback_region(void *start, int size) | |||
37 | } | 37 | } |
38 | } | 38 | } |
39 | 39 | ||
40 | void __flush_purge_region(void *start, int size) | 40 | static void sh2__flush_purge_region(void *start, int size) |
41 | { | 41 | { |
42 | unsigned long v; | 42 | unsigned long v; |
43 | unsigned long begin, end; | 43 | unsigned long begin, end; |
@@ -51,7 +51,7 @@ void __flush_purge_region(void *start, int size) | |||
51 | CACHE_OC_ADDRESS_ARRAY | (v & 0x00000ff0) | 0x00000008); | 51 | CACHE_OC_ADDRESS_ARRAY | (v & 0x00000ff0) | 0x00000008); |
52 | } | 52 | } |
53 | 53 | ||
54 | void __flush_invalidate_region(void *start, int size) | 54 | static void sh2__flush_invalidate_region(void *start, int size) |
55 | { | 55 | { |
56 | #ifdef CONFIG_CACHE_WRITEBACK | 56 | #ifdef CONFIG_CACHE_WRITEBACK |
57 | /* | 57 | /* |
@@ -82,3 +82,10 @@ void __flush_invalidate_region(void *start, int size) | |||
82 | CACHE_OC_ADDRESS_ARRAY | (v & 0x00000ff0) | 0x00000008); | 82 | CACHE_OC_ADDRESS_ARRAY | (v & 0x00000ff0) | 0x00000008); |
83 | #endif | 83 | #endif |
84 | } | 84 | } |
85 | |||
86 | void __init sh2_cache_init(void) | ||
87 | { | ||
88 | __flush_wback_region = sh2__flush_wback_region; | ||
89 | __flush_purge_region = sh2__flush_purge_region; | ||
90 | __flush_invalidate_region = sh2__flush_invalidate_region; | ||
91 | } | ||
diff --git a/arch/sh/mm/cache-sh2a.c b/arch/sh/mm/cache-sh2a.c index 24d86a794065..d783361e3f0a 100644 --- a/arch/sh/mm/cache-sh2a.c +++ b/arch/sh/mm/cache-sh2a.c | |||
@@ -15,7 +15,7 @@ | |||
15 | #include <asm/cacheflush.h> | 15 | #include <asm/cacheflush.h> |
16 | #include <asm/io.h> | 16 | #include <asm/io.h> |
17 | 17 | ||
18 | void __flush_wback_region(void *start, int size) | 18 | static void sh2a__flush_wback_region(void *start, int size) |
19 | { | 19 | { |
20 | unsigned long v; | 20 | unsigned long v; |
21 | unsigned long begin, end; | 21 | unsigned long begin, end; |
@@ -44,7 +44,7 @@ void __flush_wback_region(void *start, int size) | |||
44 | local_irq_restore(flags); | 44 | local_irq_restore(flags); |
45 | } | 45 | } |
46 | 46 | ||
47 | void __flush_purge_region(void *start, int size) | 47 | static void sh2a__flush_purge_region(void *start, int size) |
48 | { | 48 | { |
49 | unsigned long v; | 49 | unsigned long v; |
50 | unsigned long begin, end; | 50 | unsigned long begin, end; |
@@ -65,7 +65,7 @@ void __flush_purge_region(void *start, int size) | |||
65 | local_irq_restore(flags); | 65 | local_irq_restore(flags); |
66 | } | 66 | } |
67 | 67 | ||
68 | void __flush_invalidate_region(void *start, int size) | 68 | static void sh2a__flush_invalidate_region(void *start, int size) |
69 | { | 69 | { |
70 | unsigned long v; | 70 | unsigned long v; |
71 | unsigned long begin, end; | 71 | unsigned long begin, end; |
@@ -97,15 +97,15 @@ void __flush_invalidate_region(void *start, int size) | |||
97 | } | 97 | } |
98 | 98 | ||
99 | /* WBack O-Cache and flush I-Cache */ | 99 | /* WBack O-Cache and flush I-Cache */ |
100 | void flush_icache_range(unsigned long start, unsigned long end) | 100 | static void sh2a_flush_icache_range(void *args) |
101 | { | 101 | { |
102 | struct flusher_data *data = args; | ||
103 | unsigned long start, end; | ||
102 | unsigned long v; | 104 | unsigned long v; |
103 | unsigned long flags; | ||
104 | 105 | ||
105 | start = start & ~(L1_CACHE_BYTES-1); | 106 | start = data->addr1 & ~(L1_CACHE_BYTES-1); |
106 | end = (end + L1_CACHE_BYTES-1) & ~(L1_CACHE_BYTES-1); | 107 | end = (data->addr2 + L1_CACHE_BYTES-1) & ~(L1_CACHE_BYTES-1); |
107 | 108 | ||
108 | local_irq_save(flags); | ||
109 | jump_to_uncached(); | 109 | jump_to_uncached(); |
110 | 110 | ||
111 | for (v = start; v < end; v+=L1_CACHE_BYTES) { | 111 | for (v = start; v < end; v+=L1_CACHE_BYTES) { |
@@ -120,10 +120,17 @@ void flush_icache_range(unsigned long start, unsigned long end) | |||
120 | } | 120 | } |
121 | } | 121 | } |
122 | /* I-Cache invalidate */ | 122 | /* I-Cache invalidate */ |
123 | ctrl_outl(addr, | 123 | ctrl_outl(addr, CACHE_IC_ADDRESS_ARRAY | addr | 0x00000008); |
124 | CACHE_IC_ADDRESS_ARRAY | addr | 0x00000008); | ||
125 | } | 124 | } |
126 | 125 | ||
127 | back_to_cached(); | 126 | back_to_cached(); |
128 | local_irq_restore(flags); | 127 | } |
128 | |||
129 | void __init sh2a_cache_init(void) | ||
130 | { | ||
131 | local_flush_icache_range = sh2a_flush_icache_range; | ||
132 | |||
133 | __flush_wback_region = sh2a__flush_wback_region; | ||
134 | __flush_purge_region = sh2a__flush_purge_region; | ||
135 | __flush_invalidate_region = sh2a__flush_invalidate_region; | ||
129 | } | 136 | } |
diff --git a/arch/sh/mm/cache-sh3.c b/arch/sh/mm/cache-sh3.c index 6d1dbec08ad4..faef80c98134 100644 --- a/arch/sh/mm/cache-sh3.c +++ b/arch/sh/mm/cache-sh3.c | |||
@@ -32,7 +32,7 @@ | |||
32 | * SIZE: Size of the region. | 32 | * SIZE: Size of the region. |
33 | */ | 33 | */ |
34 | 34 | ||
35 | void __flush_wback_region(void *start, int size) | 35 | static void sh3__flush_wback_region(void *start, int size) |
36 | { | 36 | { |
37 | unsigned long v, j; | 37 | unsigned long v, j; |
38 | unsigned long begin, end; | 38 | unsigned long begin, end; |
@@ -71,7 +71,7 @@ void __flush_wback_region(void *start, int size) | |||
71 | * START: Virtual Address (U0, P1, or P3) | 71 | * START: Virtual Address (U0, P1, or P3) |
72 | * SIZE: Size of the region. | 72 | * SIZE: Size of the region. |
73 | */ | 73 | */ |
74 | void __flush_purge_region(void *start, int size) | 74 | static void sh3__flush_purge_region(void *start, int size) |
75 | { | 75 | { |
76 | unsigned long v; | 76 | unsigned long v; |
77 | unsigned long begin, end; | 77 | unsigned long begin, end; |
@@ -90,11 +90,16 @@ void __flush_purge_region(void *start, int size) | |||
90 | } | 90 | } |
91 | } | 91 | } |
92 | 92 | ||
93 | /* | 93 | void __init sh3_cache_init(void) |
94 | * No write back please | 94 | { |
95 | * | 95 | __flush_wback_region = sh3__flush_wback_region; |
96 | * Except I don't think there's any way to avoid the writeback. So we | 96 | __flush_purge_region = sh3__flush_purge_region; |
97 | * just alias it to __flush_purge_region(). dwmw2. | 97 | |
98 | */ | 98 | /* |
99 | void __flush_invalidate_region(void *start, int size) | 99 | * No write back please |
100 | __attribute__((alias("__flush_purge_region"))); | 100 | * |
101 | * Except I don't think there's any way to avoid the writeback. | ||
102 | * So we just alias it to sh3__flush_purge_region(). dwmw2. | ||
103 | */ | ||
104 | __flush_invalidate_region = sh3__flush_purge_region; | ||
105 | } | ||
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index b36a9c986a58..70fb906419dd 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/mm.h> | 14 | #include <linux/mm.h> |
15 | #include <linux/io.h> | 15 | #include <linux/io.h> |
16 | #include <linux/mutex.h> | 16 | #include <linux/mutex.h> |
17 | #include <linux/fs.h> | ||
17 | #include <asm/mmu_context.h> | 18 | #include <asm/mmu_context.h> |
18 | #include <asm/cacheflush.h> | 19 | #include <asm/cacheflush.h> |
19 | 20 | ||
@@ -25,14 +26,6 @@ | |||
25 | #define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */ | 26 | #define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */ |
26 | #define MAX_ICACHE_PAGES 32 | 27 | #define MAX_ICACHE_PAGES 32 |
27 | 28 | ||
28 | static void __flush_dcache_segment_writethrough(unsigned long start, | ||
29 | unsigned long extent); | ||
30 | static void __flush_dcache_segment_1way(unsigned long start, | ||
31 | unsigned long extent); | ||
32 | static void __flush_dcache_segment_2way(unsigned long start, | ||
33 | unsigned long extent); | ||
34 | static void __flush_dcache_segment_4way(unsigned long start, | ||
35 | unsigned long extent); | ||
36 | static void __flush_cache_4096(unsigned long addr, unsigned long phys, | 29 | static void __flush_cache_4096(unsigned long addr, unsigned long phys, |
37 | unsigned long exec_offset); | 30 | unsigned long exec_offset); |
38 | 31 | ||
@@ -44,196 +37,55 @@ static void __flush_cache_4096(unsigned long addr, unsigned long phys, | |||
44 | static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) = | 37 | static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) = |
45 | (void (*)(unsigned long, unsigned long))0xdeadbeef; | 38 | (void (*)(unsigned long, unsigned long))0xdeadbeef; |
46 | 39 | ||
47 | static void compute_alias(struct cache_info *c) | ||
48 | { | ||
49 | c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1); | ||
50 | c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0; | ||
51 | } | ||
52 | |||
53 | static void __init emit_cache_params(void) | ||
54 | { | ||
55 | printk("PVR=%08x CVR=%08x PRR=%08x\n", | ||
56 | ctrl_inl(CCN_PVR), | ||
57 | ctrl_inl(CCN_CVR), | ||
58 | ctrl_inl(CCN_PRR)); | ||
59 | printk("I-cache : n_ways=%d n_sets=%d way_incr=%d\n", | ||
60 | boot_cpu_data.icache.ways, | ||
61 | boot_cpu_data.icache.sets, | ||
62 | boot_cpu_data.icache.way_incr); | ||
63 | printk("I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n", | ||
64 | boot_cpu_data.icache.entry_mask, | ||
65 | boot_cpu_data.icache.alias_mask, | ||
66 | boot_cpu_data.icache.n_aliases); | ||
67 | printk("D-cache : n_ways=%d n_sets=%d way_incr=%d\n", | ||
68 | boot_cpu_data.dcache.ways, | ||
69 | boot_cpu_data.dcache.sets, | ||
70 | boot_cpu_data.dcache.way_incr); | ||
71 | printk("D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n", | ||
72 | boot_cpu_data.dcache.entry_mask, | ||
73 | boot_cpu_data.dcache.alias_mask, | ||
74 | boot_cpu_data.dcache.n_aliases); | ||
75 | |||
76 | /* | ||
77 | * Emit Secondary Cache parameters if the CPU has a probed L2. | ||
78 | */ | ||
79 | if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) { | ||
80 | printk("S-cache : n_ways=%d n_sets=%d way_incr=%d\n", | ||
81 | boot_cpu_data.scache.ways, | ||
82 | boot_cpu_data.scache.sets, | ||
83 | boot_cpu_data.scache.way_incr); | ||
84 | printk("S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n", | ||
85 | boot_cpu_data.scache.entry_mask, | ||
86 | boot_cpu_data.scache.alias_mask, | ||
87 | boot_cpu_data.scache.n_aliases); | ||
88 | } | ||
89 | |||
90 | if (!__flush_dcache_segment_fn) | ||
91 | panic("unknown number of cache ways\n"); | ||
92 | } | ||
93 | |||
94 | /* | ||
95 | * SH-4 has virtually indexed and physically tagged cache. | ||
96 | */ | ||
97 | void __init p3_cache_init(void) | ||
98 | { | ||
99 | unsigned int wt_enabled = !!(__raw_readl(CCR) & CCR_CACHE_WT); | ||
100 | |||
101 | compute_alias(&boot_cpu_data.icache); | ||
102 | compute_alias(&boot_cpu_data.dcache); | ||
103 | compute_alias(&boot_cpu_data.scache); | ||
104 | |||
105 | if (wt_enabled) { | ||
106 | __flush_dcache_segment_fn = __flush_dcache_segment_writethrough; | ||
107 | goto out; | ||
108 | } | ||
109 | |||
110 | switch (boot_cpu_data.dcache.ways) { | ||
111 | case 1: | ||
112 | __flush_dcache_segment_fn = __flush_dcache_segment_1way; | ||
113 | break; | ||
114 | case 2: | ||
115 | __flush_dcache_segment_fn = __flush_dcache_segment_2way; | ||
116 | break; | ||
117 | case 4: | ||
118 | __flush_dcache_segment_fn = __flush_dcache_segment_4way; | ||
119 | break; | ||
120 | default: | ||
121 | __flush_dcache_segment_fn = NULL; | ||
122 | break; | ||
123 | } | ||
124 | |||
125 | out: | ||
126 | emit_cache_params(); | ||
127 | } | ||
128 | |||
129 | /* | ||
130 | * Write back the dirty D-caches, but not invalidate them. | ||
131 | * | ||
132 | * START: Virtual Address (U0, P1, or P3) | ||
133 | * SIZE: Size of the region. | ||
134 | */ | ||
135 | void __flush_wback_region(void *start, int size) | ||
136 | { | ||
137 | unsigned long v; | ||
138 | unsigned long begin, end; | ||
139 | |||
140 | begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); | ||
141 | end = ((unsigned long)start + size + L1_CACHE_BYTES-1) | ||
142 | & ~(L1_CACHE_BYTES-1); | ||
143 | for (v = begin; v < end; v+=L1_CACHE_BYTES) { | ||
144 | asm volatile("ocbwb %0" | ||
145 | : /* no output */ | ||
146 | : "m" (__m(v))); | ||
147 | } | ||
148 | } | ||
149 | |||
150 | /* | ||
151 | * Write back the dirty D-caches and invalidate them. | ||
152 | * | ||
153 | * START: Virtual Address (U0, P1, or P3) | ||
154 | * SIZE: Size of the region. | ||
155 | */ | ||
156 | void __flush_purge_region(void *start, int size) | ||
157 | { | ||
158 | unsigned long v; | ||
159 | unsigned long begin, end; | ||
160 | |||
161 | begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); | ||
162 | end = ((unsigned long)start + size + L1_CACHE_BYTES-1) | ||
163 | & ~(L1_CACHE_BYTES-1); | ||
164 | for (v = begin; v < end; v+=L1_CACHE_BYTES) { | ||
165 | asm volatile("ocbp %0" | ||
166 | : /* no output */ | ||
167 | : "m" (__m(v))); | ||
168 | } | ||
169 | } | ||
170 | |||
171 | /* | ||
172 | * No write back please | ||
173 | */ | ||
174 | void __flush_invalidate_region(void *start, int size) | ||
175 | { | ||
176 | unsigned long v; | ||
177 | unsigned long begin, end; | ||
178 | |||
179 | begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); | ||
180 | end = ((unsigned long)start + size + L1_CACHE_BYTES-1) | ||
181 | & ~(L1_CACHE_BYTES-1); | ||
182 | for (v = begin; v < end; v+=L1_CACHE_BYTES) { | ||
183 | asm volatile("ocbi %0" | ||
184 | : /* no output */ | ||
185 | : "m" (__m(v))); | ||
186 | } | ||
187 | } | ||
188 | |||
189 | /* | 40 | /* |
190 | * Write back the range of D-cache, and purge the I-cache. | 41 | * Write back the range of D-cache, and purge the I-cache. |
191 | * | 42 | * |
192 | * Called from kernel/module.c:sys_init_module and routine for a.out format, | 43 | * Called from kernel/module.c:sys_init_module and routine for a.out format, |
193 | * signal handler code and kprobes code | 44 | * signal handler code and kprobes code |
194 | */ | 45 | */ |
195 | void flush_icache_range(unsigned long start, unsigned long end) | 46 | static void sh4_flush_icache_range(void *args) |
196 | { | 47 | { |
48 | struct flusher_data *data = args; | ||
197 | int icacheaddr; | 49 | int icacheaddr; |
198 | unsigned long flags, v; | 50 | unsigned long start, end; |
51 | unsigned long v; | ||
199 | int i; | 52 | int i; |
200 | 53 | ||
201 | /* If there are too many pages then just blow the caches */ | 54 | start = data->addr1; |
202 | if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) { | 55 | end = data->addr2; |
203 | flush_cache_all(); | 56 | |
204 | } else { | 57 | /* If there are too many pages then just blow the caches */ |
205 | /* selectively flush d-cache then invalidate the i-cache */ | 58 | if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) { |
206 | /* this is inefficient, so only use for small ranges */ | 59 | local_flush_cache_all(args); |
207 | start &= ~(L1_CACHE_BYTES-1); | 60 | } else { |
208 | end += L1_CACHE_BYTES-1; | 61 | /* selectively flush d-cache then invalidate the i-cache */ |
209 | end &= ~(L1_CACHE_BYTES-1); | 62 | /* this is inefficient, so only use for small ranges */ |
210 | 63 | start &= ~(L1_CACHE_BYTES-1); | |
211 | local_irq_save(flags); | 64 | end += L1_CACHE_BYTES-1; |
212 | jump_to_uncached(); | 65 | end &= ~(L1_CACHE_BYTES-1); |
213 | 66 | ||
214 | for (v = start; v < end; v+=L1_CACHE_BYTES) { | 67 | jump_to_uncached(); |
215 | asm volatile("ocbwb %0" | 68 | |
216 | : /* no output */ | 69 | for (v = start; v < end; v+=L1_CACHE_BYTES) { |
217 | : "m" (__m(v))); | 70 | __ocbwb(v); |
218 | 71 | ||
219 | icacheaddr = CACHE_IC_ADDRESS_ARRAY | ( | 72 | icacheaddr = CACHE_IC_ADDRESS_ARRAY | |
220 | v & cpu_data->icache.entry_mask); | 73 | (v & cpu_data->icache.entry_mask); |
221 | 74 | ||
222 | for (i = 0; i < cpu_data->icache.ways; | 75 | for (i = 0; i < cpu_data->icache.ways; |
223 | i++, icacheaddr += cpu_data->icache.way_incr) | 76 | i++, icacheaddr += cpu_data->icache.way_incr) |
224 | /* Clear i-cache line valid-bit */ | 77 | /* Clear i-cache line valid-bit */ |
225 | ctrl_outl(0, icacheaddr); | 78 | ctrl_outl(0, icacheaddr); |
226 | } | 79 | } |
227 | 80 | ||
228 | back_to_cached(); | 81 | back_to_cached(); |
229 | local_irq_restore(flags); | ||
230 | } | 82 | } |
231 | } | 83 | } |
232 | 84 | ||
233 | static inline void flush_cache_4096(unsigned long start, | 85 | static inline void flush_cache_4096(unsigned long start, |
234 | unsigned long phys) | 86 | unsigned long phys) |
235 | { | 87 | { |
236 | unsigned long flags, exec_offset = 0; | 88 | unsigned long exec_offset = 0; |
237 | 89 | ||
238 | /* | 90 | /* |
239 | * All types of SH-4 require PC to be in P2 to operate on the I-cache. | 91 | * All types of SH-4 require PC to be in P2 to operate on the I-cache. |
@@ -243,19 +95,25 @@ static inline void flush_cache_4096(unsigned long start, | |||
243 | (start < CACHE_OC_ADDRESS_ARRAY)) | 95 | (start < CACHE_OC_ADDRESS_ARRAY)) |
244 | exec_offset = 0x20000000; | 96 | exec_offset = 0x20000000; |
245 | 97 | ||
246 | local_irq_save(flags); | ||
247 | __flush_cache_4096(start | SH_CACHE_ASSOC, | 98 | __flush_cache_4096(start | SH_CACHE_ASSOC, |
248 | P1SEGADDR(phys), exec_offset); | 99 | P1SEGADDR(phys), exec_offset); |
249 | local_irq_restore(flags); | ||
250 | } | 100 | } |
251 | 101 | ||
252 | /* | 102 | /* |
253 | * Write back & invalidate the D-cache of the page. | 103 | * Write back & invalidate the D-cache of the page. |
254 | * (To avoid "alias" issues) | 104 | * (To avoid "alias" issues) |
255 | */ | 105 | */ |
256 | void flush_dcache_page(struct page *page) | 106 | static void sh4_flush_dcache_page(void *arg) |
257 | { | 107 | { |
258 | if (test_bit(PG_mapped, &page->flags)) { | 108 | struct page *page = arg; |
109 | #ifndef CONFIG_SMP | ||
110 | struct address_space *mapping = page_mapping(page); | ||
111 | |||
112 | if (mapping && !mapping_mapped(mapping)) | ||
113 | set_bit(PG_dcache_dirty, &page->flags); | ||
114 | else | ||
115 | #endif | ||
116 | { | ||
259 | unsigned long phys = PHYSADDR(page_address(page)); | 117 | unsigned long phys = PHYSADDR(page_address(page)); |
260 | unsigned long addr = CACHE_OC_ADDRESS_ARRAY; | 118 | unsigned long addr = CACHE_OC_ADDRESS_ARRAY; |
261 | int i, n; | 119 | int i, n; |
@@ -272,9 +130,8 @@ void flush_dcache_page(struct page *page) | |||
272 | /* TODO: Selective icache invalidation through IC address array.. */ | 130 | /* TODO: Selective icache invalidation through IC address array.. */ |
273 | static void __uses_jump_to_uncached flush_icache_all(void) | 131 | static void __uses_jump_to_uncached flush_icache_all(void) |
274 | { | 132 | { |
275 | unsigned long flags, ccr; | 133 | unsigned long ccr; |
276 | 134 | ||
277 | local_irq_save(flags); | ||
278 | jump_to_uncached(); | 135 | jump_to_uncached(); |
279 | 136 | ||
280 | /* Flush I-cache */ | 137 | /* Flush I-cache */ |
@@ -286,18 +143,16 @@ static void __uses_jump_to_uncached flush_icache_all(void) | |||
286 | * back_to_cached() will take care of the barrier for us, don't add | 143 | * back_to_cached() will take care of the barrier for us, don't add |
287 | * another one! | 144 | * another one! |
288 | */ | 145 | */ |
289 | |||
290 | back_to_cached(); | 146 | back_to_cached(); |
291 | local_irq_restore(flags); | ||
292 | } | 147 | } |
293 | 148 | ||
294 | void flush_dcache_all(void) | 149 | static inline void flush_dcache_all(void) |
295 | { | 150 | { |
296 | (*__flush_dcache_segment_fn)(0UL, boot_cpu_data.dcache.way_size); | 151 | (*__flush_dcache_segment_fn)(0UL, boot_cpu_data.dcache.way_size); |
297 | wmb(); | 152 | wmb(); |
298 | } | 153 | } |
299 | 154 | ||
300 | void flush_cache_all(void) | 155 | static void sh4_flush_cache_all(void *unused) |
301 | { | 156 | { |
302 | flush_dcache_all(); | 157 | flush_dcache_all(); |
303 | flush_icache_all(); | 158 | flush_icache_all(); |
@@ -389,8 +244,13 @@ loop_exit: | |||
389 | * | 244 | * |
390 | * Caller takes mm->mmap_sem. | 245 | * Caller takes mm->mmap_sem. |
391 | */ | 246 | */ |
392 | void flush_cache_mm(struct mm_struct *mm) | 247 | static void sh4_flush_cache_mm(void *arg) |
393 | { | 248 | { |
249 | struct mm_struct *mm = arg; | ||
250 | |||
251 | if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT) | ||
252 | return; | ||
253 | |||
394 | /* | 254 | /* |
395 | * If cache is only 4k-per-way, there are never any 'aliases'. Since | 255 | * If cache is only 4k-per-way, there are never any 'aliases'. Since |
396 | * the cache is physically tagged, the data can just be left in there. | 256 | * the cache is physically tagged, the data can just be left in there. |
@@ -426,12 +286,21 @@ void flush_cache_mm(struct mm_struct *mm) | |||
426 | * ADDR: Virtual Address (U0 address) | 286 | * ADDR: Virtual Address (U0 address) |
427 | * PFN: Physical page number | 287 | * PFN: Physical page number |
428 | */ | 288 | */ |
429 | void flush_cache_page(struct vm_area_struct *vma, unsigned long address, | 289 | static void sh4_flush_cache_page(void *args) |
430 | unsigned long pfn) | ||
431 | { | 290 | { |
432 | unsigned long phys = pfn << PAGE_SHIFT; | 291 | struct flusher_data *data = args; |
292 | struct vm_area_struct *vma; | ||
293 | unsigned long address, pfn, phys; | ||
433 | unsigned int alias_mask; | 294 | unsigned int alias_mask; |
434 | 295 | ||
296 | vma = data->vma; | ||
297 | address = data->addr1; | ||
298 | pfn = data->addr2; | ||
299 | phys = pfn << PAGE_SHIFT; | ||
300 | |||
301 | if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) | ||
302 | return; | ||
303 | |||
435 | alias_mask = boot_cpu_data.dcache.alias_mask; | 304 | alias_mask = boot_cpu_data.dcache.alias_mask; |
436 | 305 | ||
437 | /* We only need to flush D-cache when we have alias */ | 306 | /* We only need to flush D-cache when we have alias */ |
@@ -471,9 +340,19 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long address, | |||
471 | * Flushing the cache lines for U0 only isn't enough. | 340 | * Flushing the cache lines for U0 only isn't enough. |
472 | * We need to flush for P1 too, which may contain aliases. | 341 | * We need to flush for P1 too, which may contain aliases. |
473 | */ | 342 | */ |
474 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, | 343 | static void sh4_flush_cache_range(void *args) |
475 | unsigned long end) | ||
476 | { | 344 | { |
345 | struct flusher_data *data = args; | ||
346 | struct vm_area_struct *vma; | ||
347 | unsigned long start, end; | ||
348 | |||
349 | vma = data->vma; | ||
350 | start = data->addr1; | ||
351 | end = data->addr2; | ||
352 | |||
353 | if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) | ||
354 | return; | ||
355 | |||
477 | /* | 356 | /* |
478 | * If cache is only 4k-per-way, there are never any 'aliases'. Since | 357 | * If cache is only 4k-per-way, there are never any 'aliases'. Since |
479 | * the cache is physically tagged, the data can just be left in there. | 358 | * the cache is physically tagged, the data can just be left in there. |
@@ -501,20 +380,6 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, | |||
501 | } | 380 | } |
502 | } | 381 | } |
503 | 382 | ||
504 | /* | ||
505 | * flush_icache_user_range | ||
506 | * @vma: VMA of the process | ||
507 | * @page: page | ||
508 | * @addr: U0 address | ||
509 | * @len: length of the range (< page size) | ||
510 | */ | ||
511 | void flush_icache_user_range(struct vm_area_struct *vma, | ||
512 | struct page *page, unsigned long addr, int len) | ||
513 | { | ||
514 | flush_cache_page(vma, addr, page_to_pfn(page)); | ||
515 | mb(); | ||
516 | } | ||
517 | |||
518 | /** | 383 | /** |
519 | * __flush_cache_4096 | 384 | * __flush_cache_4096 |
520 | * | 385 | * |
@@ -824,3 +689,47 @@ static void __flush_dcache_segment_4way(unsigned long start, | |||
824 | a3 += linesz; | 689 | a3 += linesz; |
825 | } while (a0 < a0e); | 690 | } while (a0 < a0e); |
826 | } | 691 | } |
692 | |||
693 | extern void __weak sh4__flush_region_init(void); | ||
694 | |||
695 | /* | ||
696 | * SH-4 has virtually indexed and physically tagged cache. | ||
697 | */ | ||
698 | void __init sh4_cache_init(void) | ||
699 | { | ||
700 | unsigned int wt_enabled = !!(__raw_readl(CCR) & CCR_CACHE_WT); | ||
701 | |||
702 | printk("PVR=%08x CVR=%08x PRR=%08x\n", | ||
703 | ctrl_inl(CCN_PVR), | ||
704 | ctrl_inl(CCN_CVR), | ||
705 | ctrl_inl(CCN_PRR)); | ||
706 | |||
707 | if (wt_enabled) | ||
708 | __flush_dcache_segment_fn = __flush_dcache_segment_writethrough; | ||
709 | else { | ||
710 | switch (boot_cpu_data.dcache.ways) { | ||
711 | case 1: | ||
712 | __flush_dcache_segment_fn = __flush_dcache_segment_1way; | ||
713 | break; | ||
714 | case 2: | ||
715 | __flush_dcache_segment_fn = __flush_dcache_segment_2way; | ||
716 | break; | ||
717 | case 4: | ||
718 | __flush_dcache_segment_fn = __flush_dcache_segment_4way; | ||
719 | break; | ||
720 | default: | ||
721 | panic("unknown number of cache ways\n"); | ||
722 | break; | ||
723 | } | ||
724 | } | ||
725 | |||
726 | local_flush_icache_range = sh4_flush_icache_range; | ||
727 | local_flush_dcache_page = sh4_flush_dcache_page; | ||
728 | local_flush_cache_all = sh4_flush_cache_all; | ||
729 | local_flush_cache_mm = sh4_flush_cache_mm; | ||
730 | local_flush_cache_dup_mm = sh4_flush_cache_mm; | ||
731 | local_flush_cache_page = sh4_flush_cache_page; | ||
732 | local_flush_cache_range = sh4_flush_cache_range; | ||
733 | |||
734 | sh4__flush_region_init(); | ||
735 | } | ||
diff --git a/arch/sh/mm/cache-sh5.c b/arch/sh/mm/cache-sh5.c index 86762092508c..2f9dd6df00a6 100644 --- a/arch/sh/mm/cache-sh5.c +++ b/arch/sh/mm/cache-sh5.c | |||
@@ -20,23 +20,11 @@ | |||
20 | #include <asm/uaccess.h> | 20 | #include <asm/uaccess.h> |
21 | #include <asm/mmu_context.h> | 21 | #include <asm/mmu_context.h> |
22 | 22 | ||
23 | extern void __weak sh4__flush_region_init(void); | ||
24 | |||
23 | /* Wired TLB entry for the D-cache */ | 25 | /* Wired TLB entry for the D-cache */ |
24 | static unsigned long long dtlb_cache_slot; | 26 | static unsigned long long dtlb_cache_slot; |
25 | 27 | ||
26 | void __init p3_cache_init(void) | ||
27 | { | ||
28 | /* Reserve a slot for dcache colouring in the DTLB */ | ||
29 | dtlb_cache_slot = sh64_get_wired_dtlb_entry(); | ||
30 | } | ||
31 | |||
32 | #ifdef CONFIG_DCACHE_DISABLED | ||
33 | #define sh64_dcache_purge_all() do { } while (0) | ||
34 | #define sh64_dcache_purge_coloured_phy_page(paddr, eaddr) do { } while (0) | ||
35 | #define sh64_dcache_purge_user_range(mm, start, end) do { } while (0) | ||
36 | #define sh64_dcache_purge_phy_page(paddr) do { } while (0) | ||
37 | #define sh64_dcache_purge_virt_page(mm, eaddr) do { } while (0) | ||
38 | #endif | ||
39 | |||
40 | /* | 28 | /* |
41 | * The following group of functions deal with mapping and unmapping a | 29 | * The following group of functions deal with mapping and unmapping a |
42 | * temporary page into a DTLB slot that has been set aside for exclusive | 30 | * temporary page into a DTLB slot that has been set aside for exclusive |
@@ -46,29 +34,22 @@ static inline void | |||
46 | sh64_setup_dtlb_cache_slot(unsigned long eaddr, unsigned long asid, | 34 | sh64_setup_dtlb_cache_slot(unsigned long eaddr, unsigned long asid, |
47 | unsigned long paddr) | 35 | unsigned long paddr) |
48 | { | 36 | { |
49 | local_irq_disable(); | ||
50 | sh64_setup_tlb_slot(dtlb_cache_slot, eaddr, asid, paddr); | 37 | sh64_setup_tlb_slot(dtlb_cache_slot, eaddr, asid, paddr); |
51 | } | 38 | } |
52 | 39 | ||
53 | static inline void sh64_teardown_dtlb_cache_slot(void) | 40 | static inline void sh64_teardown_dtlb_cache_slot(void) |
54 | { | 41 | { |
55 | sh64_teardown_tlb_slot(dtlb_cache_slot); | 42 | sh64_teardown_tlb_slot(dtlb_cache_slot); |
56 | local_irq_enable(); | ||
57 | } | 43 | } |
58 | 44 | ||
59 | #ifndef CONFIG_ICACHE_DISABLED | ||
60 | static inline void sh64_icache_inv_all(void) | 45 | static inline void sh64_icache_inv_all(void) |
61 | { | 46 | { |
62 | unsigned long long addr, flag, data; | 47 | unsigned long long addr, flag, data; |
63 | unsigned long flags; | ||
64 | 48 | ||
65 | addr = ICCR0; | 49 | addr = ICCR0; |
66 | flag = ICCR0_ICI; | 50 | flag = ICCR0_ICI; |
67 | data = 0; | 51 | data = 0; |
68 | 52 | ||
69 | /* Make this a critical section for safety (probably not strictly necessary.) */ | ||
70 | local_irq_save(flags); | ||
71 | |||
72 | /* Without %1 it gets unexplicably wrong */ | 53 | /* Without %1 it gets unexplicably wrong */ |
73 | __asm__ __volatile__ ( | 54 | __asm__ __volatile__ ( |
74 | "getcfg %3, 0, %0\n\t" | 55 | "getcfg %3, 0, %0\n\t" |
@@ -77,8 +58,6 @@ static inline void sh64_icache_inv_all(void) | |||
77 | "synci" | 58 | "synci" |
78 | : "=&r" (data) | 59 | : "=&r" (data) |
79 | : "0" (data), "r" (flag), "r" (addr)); | 60 | : "0" (data), "r" (flag), "r" (addr)); |
80 | |||
81 | local_irq_restore(flags); | ||
82 | } | 61 | } |
83 | 62 | ||
84 | static void sh64_icache_inv_kernel_range(unsigned long start, unsigned long end) | 63 | static void sh64_icache_inv_kernel_range(unsigned long start, unsigned long end) |
@@ -103,7 +82,6 @@ static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long | |||
103 | Also, eaddr is page-aligned. */ | 82 | Also, eaddr is page-aligned. */ |
104 | unsigned int cpu = smp_processor_id(); | 83 | unsigned int cpu = smp_processor_id(); |
105 | unsigned long long addr, end_addr; | 84 | unsigned long long addr, end_addr; |
106 | unsigned long flags = 0; | ||
107 | unsigned long running_asid, vma_asid; | 85 | unsigned long running_asid, vma_asid; |
108 | addr = eaddr; | 86 | addr = eaddr; |
109 | end_addr = addr + PAGE_SIZE; | 87 | end_addr = addr + PAGE_SIZE; |
@@ -124,10 +102,9 @@ static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long | |||
124 | 102 | ||
125 | running_asid = get_asid(); | 103 | running_asid = get_asid(); |
126 | vma_asid = cpu_asid(cpu, vma->vm_mm); | 104 | vma_asid = cpu_asid(cpu, vma->vm_mm); |
127 | if (running_asid != vma_asid) { | 105 | if (running_asid != vma_asid) |
128 | local_irq_save(flags); | ||
129 | switch_and_save_asid(vma_asid); | 106 | switch_and_save_asid(vma_asid); |
130 | } | 107 | |
131 | while (addr < end_addr) { | 108 | while (addr < end_addr) { |
132 | /* Worth unrolling a little */ | 109 | /* Worth unrolling a little */ |
133 | __asm__ __volatile__("icbi %0, 0" : : "r" (addr)); | 110 | __asm__ __volatile__("icbi %0, 0" : : "r" (addr)); |
@@ -136,10 +113,9 @@ static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long | |||
136 | __asm__ __volatile__("icbi %0, 96" : : "r" (addr)); | 113 | __asm__ __volatile__("icbi %0, 96" : : "r" (addr)); |
137 | addr += 128; | 114 | addr += 128; |
138 | } | 115 | } |
139 | if (running_asid != vma_asid) { | 116 | |
117 | if (running_asid != vma_asid) | ||
140 | switch_and_save_asid(running_asid); | 118 | switch_and_save_asid(running_asid); |
141 | local_irq_restore(flags); | ||
142 | } | ||
143 | } | 119 | } |
144 | 120 | ||
145 | static void sh64_icache_inv_user_page_range(struct mm_struct *mm, | 121 | static void sh64_icache_inv_user_page_range(struct mm_struct *mm, |
@@ -172,16 +148,12 @@ static void sh64_icache_inv_user_page_range(struct mm_struct *mm, | |||
172 | unsigned long eaddr; | 148 | unsigned long eaddr; |
173 | unsigned long after_last_page_start; | 149 | unsigned long after_last_page_start; |
174 | unsigned long mm_asid, current_asid; | 150 | unsigned long mm_asid, current_asid; |
175 | unsigned long flags = 0; | ||
176 | 151 | ||
177 | mm_asid = cpu_asid(smp_processor_id(), mm); | 152 | mm_asid = cpu_asid(smp_processor_id(), mm); |
178 | current_asid = get_asid(); | 153 | current_asid = get_asid(); |
179 | 154 | ||
180 | if (mm_asid != current_asid) { | 155 | if (mm_asid != current_asid) |
181 | /* Switch ASID and run the invalidate loop under cli */ | ||
182 | local_irq_save(flags); | ||
183 | switch_and_save_asid(mm_asid); | 156 | switch_and_save_asid(mm_asid); |
184 | } | ||
185 | 157 | ||
186 | aligned_start = start & PAGE_MASK; | 158 | aligned_start = start & PAGE_MASK; |
187 | after_last_page_start = PAGE_SIZE + ((end - 1) & PAGE_MASK); | 159 | after_last_page_start = PAGE_SIZE + ((end - 1) & PAGE_MASK); |
@@ -207,59 +179,11 @@ static void sh64_icache_inv_user_page_range(struct mm_struct *mm, | |||
207 | aligned_start = vma->vm_end; /* Skip to start of next region */ | 179 | aligned_start = vma->vm_end; /* Skip to start of next region */ |
208 | } | 180 | } |
209 | 181 | ||
210 | if (mm_asid != current_asid) { | 182 | if (mm_asid != current_asid) |
211 | switch_and_save_asid(current_asid); | 183 | switch_and_save_asid(current_asid); |
212 | local_irq_restore(flags); | ||
213 | } | ||
214 | } | 184 | } |
215 | } | 185 | } |
216 | 186 | ||
217 | /* | ||
218 | * Invalidate a small range of user context I-cache, not necessarily page | ||
219 | * (or even cache-line) aligned. | ||
220 | * | ||
221 | * Since this is used inside ptrace, the ASID in the mm context typically | ||
222 | * won't match current_asid. We'll have to switch ASID to do this. For | ||
223 | * safety, and given that the range will be small, do all this under cli. | ||
224 | * | ||
225 | * Note, there is a hazard that the ASID in mm->context is no longer | ||
226 | * actually associated with mm, i.e. if the mm->context has started a new | ||
227 | * cycle since mm was last active. However, this is just a performance | ||
228 | * issue: all that happens is that we invalidate lines belonging to | ||
229 | * another mm, so the owning process has to refill them when that mm goes | ||
230 | * live again. mm itself can't have any cache entries because there will | ||
231 | * have been a flush_cache_all when the new mm->context cycle started. | ||
232 | */ | ||
233 | static void sh64_icache_inv_user_small_range(struct mm_struct *mm, | ||
234 | unsigned long start, int len) | ||
235 | { | ||
236 | unsigned long long eaddr = start; | ||
237 | unsigned long long eaddr_end = start + len; | ||
238 | unsigned long current_asid, mm_asid; | ||
239 | unsigned long flags; | ||
240 | unsigned long long epage_start; | ||
241 | |||
242 | /* | ||
243 | * Align to start of cache line. Otherwise, suppose len==8 and | ||
244 | * start was at 32N+28 : the last 4 bytes wouldn't get invalidated. | ||
245 | */ | ||
246 | eaddr = L1_CACHE_ALIGN(start); | ||
247 | eaddr_end = start + len; | ||
248 | |||
249 | mm_asid = cpu_asid(smp_processor_id(), mm); | ||
250 | local_irq_save(flags); | ||
251 | current_asid = switch_and_save_asid(mm_asid); | ||
252 | |||
253 | epage_start = eaddr & PAGE_MASK; | ||
254 | |||
255 | while (eaddr < eaddr_end) { | ||
256 | __asm__ __volatile__("icbi %0, 0" : : "r" (eaddr)); | ||
257 | eaddr += L1_CACHE_BYTES; | ||
258 | } | ||
259 | switch_and_save_asid(current_asid); | ||
260 | local_irq_restore(flags); | ||
261 | } | ||
262 | |||
263 | static void sh64_icache_inv_current_user_range(unsigned long start, unsigned long end) | 187 | static void sh64_icache_inv_current_user_range(unsigned long start, unsigned long end) |
264 | { | 188 | { |
265 | /* The icbi instruction never raises ITLBMISS. i.e. if there's not a | 189 | /* The icbi instruction never raises ITLBMISS. i.e. if there's not a |
@@ -287,9 +211,7 @@ static void sh64_icache_inv_current_user_range(unsigned long start, unsigned lon | |||
287 | addr += L1_CACHE_BYTES; | 211 | addr += L1_CACHE_BYTES; |
288 | } | 212 | } |
289 | } | 213 | } |
290 | #endif /* !CONFIG_ICACHE_DISABLED */ | ||
291 | 214 | ||
292 | #ifndef CONFIG_DCACHE_DISABLED | ||
293 | /* Buffer used as the target of alloco instructions to purge data from cache | 215 | /* Buffer used as the target of alloco instructions to purge data from cache |
294 | sets by natural eviction. -- RPC */ | 216 | sets by natural eviction. -- RPC */ |
295 | #define DUMMY_ALLOCO_AREA_SIZE ((L1_CACHE_BYTES << 10) + (1024 * 4)) | 217 | #define DUMMY_ALLOCO_AREA_SIZE ((L1_CACHE_BYTES << 10) + (1024 * 4)) |
@@ -541,59 +463,10 @@ static void sh64_dcache_purge_user_range(struct mm_struct *mm, | |||
541 | } | 463 | } |
542 | 464 | ||
543 | /* | 465 | /* |
544 | * Purge the range of addresses from the D-cache. | ||
545 | * | ||
546 | * The addresses lie in the superpage mapping. There's no harm if we | ||
547 | * overpurge at either end - just a small performance loss. | ||
548 | */ | ||
549 | void __flush_purge_region(void *start, int size) | ||
550 | { | ||
551 | unsigned long long ullend, addr, aligned_start; | ||
552 | |||
553 | aligned_start = (unsigned long long)(signed long long)(signed long) start; | ||
554 | addr = L1_CACHE_ALIGN(aligned_start); | ||
555 | ullend = (unsigned long long) (signed long long) (signed long) start + size; | ||
556 | |||
557 | while (addr <= ullend) { | ||
558 | __asm__ __volatile__ ("ocbp %0, 0" : : "r" (addr)); | ||
559 | addr += L1_CACHE_BYTES; | ||
560 | } | ||
561 | } | ||
562 | |||
563 | void __flush_wback_region(void *start, int size) | ||
564 | { | ||
565 | unsigned long long ullend, addr, aligned_start; | ||
566 | |||
567 | aligned_start = (unsigned long long)(signed long long)(signed long) start; | ||
568 | addr = L1_CACHE_ALIGN(aligned_start); | ||
569 | ullend = (unsigned long long) (signed long long) (signed long) start + size; | ||
570 | |||
571 | while (addr < ullend) { | ||
572 | __asm__ __volatile__ ("ocbwb %0, 0" : : "r" (addr)); | ||
573 | addr += L1_CACHE_BYTES; | ||
574 | } | ||
575 | } | ||
576 | |||
577 | void __flush_invalidate_region(void *start, int size) | ||
578 | { | ||
579 | unsigned long long ullend, addr, aligned_start; | ||
580 | |||
581 | aligned_start = (unsigned long long)(signed long long)(signed long) start; | ||
582 | addr = L1_CACHE_ALIGN(aligned_start); | ||
583 | ullend = (unsigned long long) (signed long long) (signed long) start + size; | ||
584 | |||
585 | while (addr < ullend) { | ||
586 | __asm__ __volatile__ ("ocbi %0, 0" : : "r" (addr)); | ||
587 | addr += L1_CACHE_BYTES; | ||
588 | } | ||
589 | } | ||
590 | #endif /* !CONFIG_DCACHE_DISABLED */ | ||
591 | |||
592 | /* | ||
593 | * Invalidate the entire contents of both caches, after writing back to | 466 | * Invalidate the entire contents of both caches, after writing back to |
594 | * memory any dirty data from the D-cache. | 467 | * memory any dirty data from the D-cache. |
595 | */ | 468 | */ |
596 | void flush_cache_all(void) | 469 | static void sh5_flush_cache_all(void *unused) |
597 | { | 470 | { |
598 | sh64_dcache_purge_all(); | 471 | sh64_dcache_purge_all(); |
599 | sh64_icache_inv_all(); | 472 | sh64_icache_inv_all(); |
@@ -620,7 +493,7 @@ void flush_cache_all(void) | |||
620 | * I-cache. This is similar to the lack of action needed in | 493 | * I-cache. This is similar to the lack of action needed in |
621 | * flush_tlb_mm - see fault.c. | 494 | * flush_tlb_mm - see fault.c. |
622 | */ | 495 | */ |
623 | void flush_cache_mm(struct mm_struct *mm) | 496 | static void sh5_flush_cache_mm(void *unused) |
624 | { | 497 | { |
625 | sh64_dcache_purge_all(); | 498 | sh64_dcache_purge_all(); |
626 | } | 499 | } |
@@ -632,13 +505,18 @@ void flush_cache_mm(struct mm_struct *mm) | |||
632 | * | 505 | * |
633 | * Note, 'end' is 1 byte beyond the end of the range to flush. | 506 | * Note, 'end' is 1 byte beyond the end of the range to flush. |
634 | */ | 507 | */ |
635 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, | 508 | static void sh5_flush_cache_range(void *args) |
636 | unsigned long end) | ||
637 | { | 509 | { |
638 | struct mm_struct *mm = vma->vm_mm; | 510 | struct flusher_data *data = args; |
511 | struct vm_area_struct *vma; | ||
512 | unsigned long start, end; | ||
513 | |||
514 | vma = data->vma; | ||
515 | start = data->addr1; | ||
516 | end = data->addr2; | ||
639 | 517 | ||
640 | sh64_dcache_purge_user_range(mm, start, end); | 518 | sh64_dcache_purge_user_range(vma->vm_mm, start, end); |
641 | sh64_icache_inv_user_page_range(mm, start, end); | 519 | sh64_icache_inv_user_page_range(vma->vm_mm, start, end); |
642 | } | 520 | } |
643 | 521 | ||
644 | /* | 522 | /* |
@@ -650,16 +528,23 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, | |||
650 | * | 528 | * |
651 | * Note, this is called with pte lock held. | 529 | * Note, this is called with pte lock held. |
652 | */ | 530 | */ |
653 | void flush_cache_page(struct vm_area_struct *vma, unsigned long eaddr, | 531 | static void sh5_flush_cache_page(void *args) |
654 | unsigned long pfn) | ||
655 | { | 532 | { |
533 | struct flusher_data *data = args; | ||
534 | struct vm_area_struct *vma; | ||
535 | unsigned long eaddr, pfn; | ||
536 | |||
537 | vma = data->vma; | ||
538 | eaddr = data->addr1; | ||
539 | pfn = data->addr2; | ||
540 | |||
656 | sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT); | 541 | sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT); |
657 | 542 | ||
658 | if (vma->vm_flags & VM_EXEC) | 543 | if (vma->vm_flags & VM_EXEC) |
659 | sh64_icache_inv_user_page(vma, eaddr); | 544 | sh64_icache_inv_user_page(vma, eaddr); |
660 | } | 545 | } |
661 | 546 | ||
662 | void flush_dcache_page(struct page *page) | 547 | static void sh5_flush_dcache_page(void *page) |
663 | { | 548 | { |
664 | sh64_dcache_purge_phy_page(page_to_phys(page)); | 549 | sh64_dcache_purge_phy_page(page_to_phys(page)); |
665 | wmb(); | 550 | wmb(); |
@@ -673,162 +558,47 @@ void flush_dcache_page(struct page *page) | |||
673 | * mapping, therefore it's guaranteed that there no cache entries for | 558 | * mapping, therefore it's guaranteed that there no cache entries for |
674 | * the range in cache sets of the wrong colour. | 559 | * the range in cache sets of the wrong colour. |
675 | */ | 560 | */ |
676 | void flush_icache_range(unsigned long start, unsigned long end) | 561 | static void sh5_flush_icache_range(void *args) |
677 | { | 562 | { |
563 | struct flusher_data *data = args; | ||
564 | unsigned long start, end; | ||
565 | |||
566 | start = data->addr1; | ||
567 | end = data->addr2; | ||
568 | |||
678 | __flush_purge_region((void *)start, end); | 569 | __flush_purge_region((void *)start, end); |
679 | wmb(); | 570 | wmb(); |
680 | sh64_icache_inv_kernel_range(start, end); | 571 | sh64_icache_inv_kernel_range(start, end); |
681 | } | 572 | } |
682 | 573 | ||
683 | /* | 574 | /* |
684 | * Flush the range of user (defined by vma->vm_mm) address space starting | ||
685 | * at 'addr' for 'len' bytes from the cache. The range does not straddle | ||
686 | * a page boundary, the unique physical page containing the range is | ||
687 | * 'page'. This seems to be used mainly for invalidating an address | ||
688 | * range following a poke into the program text through the ptrace() call | ||
689 | * from another process (e.g. for BRK instruction insertion). | ||
690 | */ | ||
691 | void flush_icache_user_range(struct vm_area_struct *vma, | ||
692 | struct page *page, unsigned long addr, int len) | ||
693 | { | ||
694 | |||
695 | sh64_dcache_purge_coloured_phy_page(page_to_phys(page), addr); | ||
696 | mb(); | ||
697 | |||
698 | if (vma->vm_flags & VM_EXEC) | ||
699 | sh64_icache_inv_user_small_range(vma->vm_mm, addr, len); | ||
700 | } | ||
701 | |||
702 | /* | ||
703 | * For the address range [start,end), write back the data from the | 575 | * For the address range [start,end), write back the data from the |
704 | * D-cache and invalidate the corresponding region of the I-cache for the | 576 | * D-cache and invalidate the corresponding region of the I-cache for the |
705 | * current process. Used to flush signal trampolines on the stack to | 577 | * current process. Used to flush signal trampolines on the stack to |
706 | * make them executable. | 578 | * make them executable. |
707 | */ | 579 | */ |
708 | void flush_cache_sigtramp(unsigned long vaddr) | 580 | static void sh5_flush_cache_sigtramp(void *vaddr) |
709 | { | 581 | { |
710 | unsigned long end = vaddr + L1_CACHE_BYTES; | 582 | unsigned long end = (unsigned long)vaddr + L1_CACHE_BYTES; |
711 | 583 | ||
712 | __flush_wback_region((void *)vaddr, L1_CACHE_BYTES); | 584 | __flush_wback_region(vaddr, L1_CACHE_BYTES); |
713 | wmb(); | 585 | wmb(); |
714 | sh64_icache_inv_current_user_range(vaddr, end); | 586 | sh64_icache_inv_current_user_range((unsigned long)vaddr, end); |
715 | } | ||
716 | |||
717 | #ifdef CONFIG_MMU | ||
718 | /* | ||
719 | * These *MUST* lie in an area of virtual address space that's otherwise | ||
720 | * unused. | ||
721 | */ | ||
722 | #define UNIQUE_EADDR_START 0xe0000000UL | ||
723 | #define UNIQUE_EADDR_END 0xe8000000UL | ||
724 | |||
725 | /* | ||
726 | * Given a physical address paddr, and a user virtual address user_eaddr | ||
727 | * which will eventually be mapped to it, create a one-off kernel-private | ||
728 | * eaddr mapped to the same paddr. This is used for creating special | ||
729 | * destination pages for copy_user_page and clear_user_page. | ||
730 | */ | ||
731 | static unsigned long sh64_make_unique_eaddr(unsigned long user_eaddr, | ||
732 | unsigned long paddr) | ||
733 | { | ||
734 | static unsigned long current_pointer = UNIQUE_EADDR_START; | ||
735 | unsigned long coloured_pointer; | ||
736 | |||
737 | if (current_pointer == UNIQUE_EADDR_END) { | ||
738 | sh64_dcache_purge_all(); | ||
739 | current_pointer = UNIQUE_EADDR_START; | ||
740 | } | ||
741 | |||
742 | coloured_pointer = (current_pointer & ~CACHE_OC_SYN_MASK) | | ||
743 | (user_eaddr & CACHE_OC_SYN_MASK); | ||
744 | sh64_setup_dtlb_cache_slot(coloured_pointer, get_asid(), paddr); | ||
745 | |||
746 | current_pointer += (PAGE_SIZE << CACHE_OC_N_SYNBITS); | ||
747 | |||
748 | return coloured_pointer; | ||
749 | } | 587 | } |
750 | 588 | ||
751 | static void sh64_copy_user_page_coloured(void *to, void *from, | 589 | void __init sh5_cache_init(void) |
752 | unsigned long address) | ||
753 | { | 590 | { |
754 | void *coloured_to; | 591 | local_flush_cache_all = sh5_flush_cache_all; |
592 | local_flush_cache_mm = sh5_flush_cache_mm; | ||
593 | local_flush_cache_dup_mm = sh5_flush_cache_mm; | ||
594 | local_flush_cache_page = sh5_flush_cache_page; | ||
595 | local_flush_cache_range = sh5_flush_cache_range; | ||
596 | local_flush_dcache_page = sh5_flush_dcache_page; | ||
597 | local_flush_icache_range = sh5_flush_icache_range; | ||
598 | local_flush_cache_sigtramp = sh5_flush_cache_sigtramp; | ||
755 | 599 | ||
756 | /* | 600 | /* Reserve a slot for dcache colouring in the DTLB */ |
757 | * Discard any existing cache entries of the wrong colour. These are | 601 | dtlb_cache_slot = sh64_get_wired_dtlb_entry(); |
758 | * present quite often, if the kernel has recently used the page | ||
759 | * internally, then given it up, then it's been allocated to the user. | ||
760 | */ | ||
761 | sh64_dcache_purge_coloured_phy_page(__pa(to), (unsigned long)to); | ||
762 | |||
763 | coloured_to = (void *)sh64_make_unique_eaddr(address, __pa(to)); | ||
764 | copy_page(from, coloured_to); | ||
765 | |||
766 | sh64_teardown_dtlb_cache_slot(); | ||
767 | } | ||
768 | |||
769 | static void sh64_clear_user_page_coloured(void *to, unsigned long address) | ||
770 | { | ||
771 | void *coloured_to; | ||
772 | |||
773 | /* | ||
774 | * Discard any existing kernel-originated lines of the wrong | ||
775 | * colour (as above) | ||
776 | */ | ||
777 | sh64_dcache_purge_coloured_phy_page(__pa(to), (unsigned long)to); | ||
778 | |||
779 | coloured_to = (void *)sh64_make_unique_eaddr(address, __pa(to)); | ||
780 | clear_page(coloured_to); | ||
781 | |||
782 | sh64_teardown_dtlb_cache_slot(); | ||
783 | } | ||
784 | |||
785 | /* | ||
786 | * 'from' and 'to' are kernel virtual addresses (within the superpage | ||
787 | * mapping of the physical RAM). 'address' is the user virtual address | ||
788 | * where the copy 'to' will be mapped after. This allows a custom | ||
789 | * mapping to be used to ensure that the new copy is placed in the | ||
790 | * right cache sets for the user to see it without having to bounce it | ||
791 | * out via memory. Note however : the call to flush_page_to_ram in | ||
792 | * (generic)/mm/memory.c:(break_cow) undoes all this good work in that one | ||
793 | * very important case! | ||
794 | * | ||
795 | * TBD : can we guarantee that on every call, any cache entries for | ||
796 | * 'from' are in the same colour sets as 'address' also? i.e. is this | ||
797 | * always used just to deal with COW? (I suspect not). | ||
798 | * | ||
799 | * There are two possibilities here for when the page 'from' was last accessed: | ||
800 | * - by the kernel : this is OK, no purge required. | ||
801 | * - by the/a user (e.g. for break_COW) : need to purge. | ||
802 | * | ||
803 | * If the potential user mapping at 'address' is the same colour as | ||
804 | * 'from' there is no need to purge any cache lines from the 'from' | ||
805 | * page mapped into cache sets of colour 'address'. (The copy will be | ||
806 | * accessing the page through 'from'). | ||
807 | */ | ||
808 | void copy_user_page(void *to, void *from, unsigned long address, | ||
809 | struct page *page) | ||
810 | { | ||
811 | if (((address ^ (unsigned long) from) & CACHE_OC_SYN_MASK) != 0) | ||
812 | sh64_dcache_purge_coloured_phy_page(__pa(from), address); | ||
813 | |||
814 | if (((address ^ (unsigned long) to) & CACHE_OC_SYN_MASK) == 0) | ||
815 | copy_page(to, from); | ||
816 | else | ||
817 | sh64_copy_user_page_coloured(to, from, address); | ||
818 | } | ||
819 | 602 | ||
820 | /* | 603 | sh4__flush_region_init(); |
821 | * 'to' is a kernel virtual address (within the superpage mapping of the | ||
822 | * physical RAM). 'address' is the user virtual address where the 'to' | ||
823 | * page will be mapped after. This allows a custom mapping to be used to | ||
824 | * ensure that the new copy is placed in the right cache sets for the | ||
825 | * user to see it without having to bounce it out via memory. | ||
826 | */ | ||
827 | void clear_user_page(void *to, unsigned long address, struct page *page) | ||
828 | { | ||
829 | if (((address ^ (unsigned long) to) & CACHE_OC_SYN_MASK) == 0) | ||
830 | clear_page(to); | ||
831 | else | ||
832 | sh64_clear_user_page_coloured(to, address); | ||
833 | } | 604 | } |
834 | #endif | ||
diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c index 22dacc778823..9dc38660e3de 100644 --- a/arch/sh/mm/cache-sh7705.c +++ b/arch/sh/mm/cache-sh7705.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <linux/mman.h> | 13 | #include <linux/mman.h> |
14 | #include <linux/mm.h> | 14 | #include <linux/mm.h> |
15 | #include <linux/fs.h> | ||
15 | #include <linux/threads.h> | 16 | #include <linux/threads.h> |
16 | #include <asm/addrspace.h> | 17 | #include <asm/addrspace.h> |
17 | #include <asm/page.h> | 18 | #include <asm/page.h> |
@@ -63,18 +64,23 @@ static inline void cache_wback_all(void) | |||
63 | * | 64 | * |
64 | * Called from kernel/module.c:sys_init_module and routine for a.out format. | 65 | * Called from kernel/module.c:sys_init_module and routine for a.out format. |
65 | */ | 66 | */ |
66 | void flush_icache_range(unsigned long start, unsigned long end) | 67 | static void sh7705_flush_icache_range(void *args) |
67 | { | 68 | { |
69 | struct flusher_data *data = args; | ||
70 | unsigned long start, end; | ||
71 | |||
72 | start = data->addr1; | ||
73 | end = data->addr2; | ||
74 | |||
68 | __flush_wback_region((void *)start, end - start); | 75 | __flush_wback_region((void *)start, end - start); |
69 | } | 76 | } |
70 | 77 | ||
71 | /* | 78 | /* |
72 | * Writeback&Invalidate the D-cache of the page | 79 | * Writeback&Invalidate the D-cache of the page |
73 | */ | 80 | */ |
74 | static void __uses_jump_to_uncached __flush_dcache_page(unsigned long phys) | 81 | static void __flush_dcache_page(unsigned long phys) |
75 | { | 82 | { |
76 | unsigned long ways, waysize, addrstart; | 83 | unsigned long ways, waysize, addrstart; |
77 | unsigned long flags; | ||
78 | 84 | ||
79 | phys |= SH_CACHE_VALID; | 85 | phys |= SH_CACHE_VALID; |
80 | 86 | ||
@@ -91,7 +97,6 @@ static void __uses_jump_to_uncached __flush_dcache_page(unsigned long phys) | |||
91 | * potential cache aliasing, therefore the optimisation is probably not | 97 | * potential cache aliasing, therefore the optimisation is probably not |
92 | * possible. | 98 | * possible. |
93 | */ | 99 | */ |
94 | local_irq_save(flags); | ||
95 | jump_to_uncached(); | 100 | jump_to_uncached(); |
96 | 101 | ||
97 | ways = current_cpu_data.dcache.ways; | 102 | ways = current_cpu_data.dcache.ways; |
@@ -119,59 +124,27 @@ static void __uses_jump_to_uncached __flush_dcache_page(unsigned long phys) | |||
119 | } while (--ways); | 124 | } while (--ways); |
120 | 125 | ||
121 | back_to_cached(); | 126 | back_to_cached(); |
122 | local_irq_restore(flags); | ||
123 | } | 127 | } |
124 | 128 | ||
125 | /* | 129 | /* |
126 | * Write back & invalidate the D-cache of the page. | 130 | * Write back & invalidate the D-cache of the page. |
127 | * (To avoid "alias" issues) | 131 | * (To avoid "alias" issues) |
128 | */ | 132 | */ |
129 | void flush_dcache_page(struct page *page) | 133 | static void sh7705_flush_dcache_page(void *page) |
130 | { | 134 | { |
131 | if (test_bit(PG_mapped, &page->flags)) | 135 | struct address_space *mapping = page_mapping(page); |
136 | |||
137 | if (mapping && !mapping_mapped(mapping)) | ||
138 | set_bit(PG_dcache_dirty, &page->flags); | ||
139 | else | ||
132 | __flush_dcache_page(PHYSADDR(page_address(page))); | 140 | __flush_dcache_page(PHYSADDR(page_address(page))); |
133 | } | 141 | } |
134 | 142 | ||
135 | void __uses_jump_to_uncached flush_cache_all(void) | 143 | static void sh7705_flush_cache_all(void *args) |
136 | { | 144 | { |
137 | unsigned long flags; | ||
138 | |||
139 | local_irq_save(flags); | ||
140 | jump_to_uncached(); | 145 | jump_to_uncached(); |
141 | |||
142 | cache_wback_all(); | 146 | cache_wback_all(); |
143 | back_to_cached(); | 147 | back_to_cached(); |
144 | local_irq_restore(flags); | ||
145 | } | ||
146 | |||
147 | void flush_cache_mm(struct mm_struct *mm) | ||
148 | { | ||
149 | /* Is there any good way? */ | ||
150 | /* XXX: possibly call flush_cache_range for each vm area */ | ||
151 | flush_cache_all(); | ||
152 | } | ||
153 | |||
154 | /* | ||
155 | * Write back and invalidate D-caches. | ||
156 | * | ||
157 | * START, END: Virtual Address (U0 address) | ||
158 | * | ||
159 | * NOTE: We need to flush the _physical_ page entry. | ||
160 | * Flushing the cache lines for U0 only isn't enough. | ||
161 | * We need to flush for P1 too, which may contain aliases. | ||
162 | */ | ||
163 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, | ||
164 | unsigned long end) | ||
165 | { | ||
166 | |||
167 | /* | ||
168 | * We could call flush_cache_page for the pages of these range, | ||
169 | * but it's not efficient (scan the caches all the time...). | ||
170 | * | ||
171 | * We can't use A-bit magic, as there's the case we don't have | ||
172 | * valid entry on TLB. | ||
173 | */ | ||
174 | flush_cache_all(); | ||
175 | } | 148 | } |
176 | 149 | ||
177 | /* | 150 | /* |
@@ -179,9 +152,11 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, | |||
179 | * | 152 | * |
180 | * ADDRESS: Virtual Address (U0 address) | 153 | * ADDRESS: Virtual Address (U0 address) |
181 | */ | 154 | */ |
182 | void flush_cache_page(struct vm_area_struct *vma, unsigned long address, | 155 | static void sh7705_flush_cache_page(void *args) |
183 | unsigned long pfn) | ||
184 | { | 156 | { |
157 | struct flusher_data *data = args; | ||
158 | unsigned long pfn = data->addr2; | ||
159 | |||
185 | __flush_dcache_page(pfn << PAGE_SHIFT); | 160 | __flush_dcache_page(pfn << PAGE_SHIFT); |
186 | } | 161 | } |
187 | 162 | ||
@@ -193,7 +168,19 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long address, | |||
193 | * Not entirely sure why this is necessary on SH3 with 32K cache but | 168 | * Not entirely sure why this is necessary on SH3 with 32K cache but |
194 | * without it we get occasional "Memory fault" when loading a program. | 169 | * without it we get occasional "Memory fault" when loading a program. |
195 | */ | 170 | */ |
196 | void flush_icache_page(struct vm_area_struct *vma, struct page *page) | 171 | static void sh7705_flush_icache_page(void *page) |
197 | { | 172 | { |
198 | __flush_purge_region(page_address(page), PAGE_SIZE); | 173 | __flush_purge_region(page_address(page), PAGE_SIZE); |
199 | } | 174 | } |
175 | |||
176 | void __init sh7705_cache_init(void) | ||
177 | { | ||
178 | local_flush_icache_range = sh7705_flush_icache_range; | ||
179 | local_flush_dcache_page = sh7705_flush_dcache_page; | ||
180 | local_flush_cache_all = sh7705_flush_cache_all; | ||
181 | local_flush_cache_mm = sh7705_flush_cache_all; | ||
182 | local_flush_cache_dup_mm = sh7705_flush_cache_all; | ||
183 | local_flush_cache_range = sh7705_flush_cache_all; | ||
184 | local_flush_cache_page = sh7705_flush_cache_page; | ||
185 | local_flush_icache_page = sh7705_flush_icache_page; | ||
186 | } | ||
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c new file mode 100644 index 000000000000..411fe6058429 --- /dev/null +++ b/arch/sh/mm/cache.c | |||
@@ -0,0 +1,306 @@ | |||
1 | /* | ||
2 | * arch/sh/mm/cache.c | ||
3 | * | ||
4 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka | ||
5 | * Copyright (C) 2002 - 2009 Paul Mundt | ||
6 | * | ||
7 | * Released under the terms of the GNU GPL v2.0. | ||
8 | */ | ||
9 | #include <linux/mm.h> | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/mutex.h> | ||
12 | #include <linux/fs.h> | ||
13 | #include <linux/smp.h> | ||
14 | #include <linux/highmem.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <asm/mmu_context.h> | ||
17 | #include <asm/cacheflush.h> | ||
18 | |||
19 | void (*local_flush_cache_all)(void *args) = cache_noop; | ||
20 | void (*local_flush_cache_mm)(void *args) = cache_noop; | ||
21 | void (*local_flush_cache_dup_mm)(void *args) = cache_noop; | ||
22 | void (*local_flush_cache_page)(void *args) = cache_noop; | ||
23 | void (*local_flush_cache_range)(void *args) = cache_noop; | ||
24 | void (*local_flush_dcache_page)(void *args) = cache_noop; | ||
25 | void (*local_flush_icache_range)(void *args) = cache_noop; | ||
26 | void (*local_flush_icache_page)(void *args) = cache_noop; | ||
27 | void (*local_flush_cache_sigtramp)(void *args) = cache_noop; | ||
28 | |||
29 | void (*__flush_wback_region)(void *start, int size); | ||
30 | void (*__flush_purge_region)(void *start, int size); | ||
31 | void (*__flush_invalidate_region)(void *start, int size); | ||
32 | |||
33 | static inline void noop__flush_region(void *start, int size) | ||
34 | { | ||
35 | } | ||
36 | |||
37 | void copy_to_user_page(struct vm_area_struct *vma, struct page *page, | ||
38 | unsigned long vaddr, void *dst, const void *src, | ||
39 | unsigned long len) | ||
40 | { | ||
41 | if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && | ||
42 | !test_bit(PG_dcache_dirty, &page->flags)) { | ||
43 | void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); | ||
44 | memcpy(vto, src, len); | ||
45 | kunmap_coherent(); | ||
46 | } else { | ||
47 | memcpy(dst, src, len); | ||
48 | if (boot_cpu_data.dcache.n_aliases) | ||
49 | set_bit(PG_dcache_dirty, &page->flags); | ||
50 | } | ||
51 | |||
52 | if (vma->vm_flags & VM_EXEC) | ||
53 | flush_cache_page(vma, vaddr, page_to_pfn(page)); | ||
54 | } | ||
55 | |||
56 | void copy_from_user_page(struct vm_area_struct *vma, struct page *page, | ||
57 | unsigned long vaddr, void *dst, const void *src, | ||
58 | unsigned long len) | ||
59 | { | ||
60 | if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && | ||
61 | !test_bit(PG_dcache_dirty, &page->flags)) { | ||
62 | void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); | ||
63 | memcpy(dst, vfrom, len); | ||
64 | kunmap_coherent(); | ||
65 | } else { | ||
66 | memcpy(dst, src, len); | ||
67 | if (boot_cpu_data.dcache.n_aliases) | ||
68 | set_bit(PG_dcache_dirty, &page->flags); | ||
69 | } | ||
70 | } | ||
71 | |||
72 | void copy_user_highpage(struct page *to, struct page *from, | ||
73 | unsigned long vaddr, struct vm_area_struct *vma) | ||
74 | { | ||
75 | void *vfrom, *vto; | ||
76 | |||
77 | vto = kmap_atomic(to, KM_USER1); | ||
78 | |||
79 | if (boot_cpu_data.dcache.n_aliases && page_mapped(from) && | ||
80 | !test_bit(PG_dcache_dirty, &from->flags)) { | ||
81 | vfrom = kmap_coherent(from, vaddr); | ||
82 | copy_page(vto, vfrom); | ||
83 | kunmap_coherent(); | ||
84 | } else { | ||
85 | vfrom = kmap_atomic(from, KM_USER0); | ||
86 | copy_page(vto, vfrom); | ||
87 | kunmap_atomic(vfrom, KM_USER0); | ||
88 | } | ||
89 | |||
90 | if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) | ||
91 | __flush_wback_region(vto, PAGE_SIZE); | ||
92 | |||
93 | kunmap_atomic(vto, KM_USER1); | ||
94 | /* Make sure this page is cleared on other CPU's too before using it */ | ||
95 | smp_wmb(); | ||
96 | } | ||
97 | EXPORT_SYMBOL(copy_user_highpage); | ||
98 | |||
99 | void clear_user_highpage(struct page *page, unsigned long vaddr) | ||
100 | { | ||
101 | void *kaddr = kmap_atomic(page, KM_USER0); | ||
102 | |||
103 | clear_page(kaddr); | ||
104 | |||
105 | if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK)) | ||
106 | __flush_wback_region(kaddr, PAGE_SIZE); | ||
107 | |||
108 | kunmap_atomic(kaddr, KM_USER0); | ||
109 | } | ||
110 | EXPORT_SYMBOL(clear_user_highpage); | ||
111 | |||
112 | void __update_cache(struct vm_area_struct *vma, | ||
113 | unsigned long address, pte_t pte) | ||
114 | { | ||
115 | struct page *page; | ||
116 | unsigned long pfn = pte_pfn(pte); | ||
117 | |||
118 | if (!boot_cpu_data.dcache.n_aliases) | ||
119 | return; | ||
120 | |||
121 | page = pfn_to_page(pfn); | ||
122 | if (pfn_valid(pfn) && page_mapping(page)) { | ||
123 | int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); | ||
124 | if (dirty) { | ||
125 | unsigned long addr = (unsigned long)page_address(page); | ||
126 | |||
127 | if (pages_do_alias(addr, address & PAGE_MASK)) | ||
128 | __flush_wback_region((void *)addr, PAGE_SIZE); | ||
129 | } | ||
130 | } | ||
131 | } | ||
132 | |||
133 | void __flush_anon_page(struct page *page, unsigned long vmaddr) | ||
134 | { | ||
135 | unsigned long addr = (unsigned long) page_address(page); | ||
136 | |||
137 | if (pages_do_alias(addr, vmaddr)) { | ||
138 | if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && | ||
139 | !test_bit(PG_dcache_dirty, &page->flags)) { | ||
140 | void *kaddr; | ||
141 | |||
142 | kaddr = kmap_coherent(page, vmaddr); | ||
143 | __flush_wback_region((void *)kaddr, PAGE_SIZE); | ||
144 | kunmap_coherent(); | ||
145 | } else | ||
146 | __flush_wback_region((void *)addr, PAGE_SIZE); | ||
147 | } | ||
148 | } | ||
149 | |||
150 | void flush_cache_all(void) | ||
151 | { | ||
152 | on_each_cpu(local_flush_cache_all, NULL, 1); | ||
153 | } | ||
154 | |||
155 | void flush_cache_mm(struct mm_struct *mm) | ||
156 | { | ||
157 | on_each_cpu(local_flush_cache_mm, mm, 1); | ||
158 | } | ||
159 | |||
160 | void flush_cache_dup_mm(struct mm_struct *mm) | ||
161 | { | ||
162 | on_each_cpu(local_flush_cache_dup_mm, mm, 1); | ||
163 | } | ||
164 | |||
165 | void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, | ||
166 | unsigned long pfn) | ||
167 | { | ||
168 | struct flusher_data data; | ||
169 | |||
170 | data.vma = vma; | ||
171 | data.addr1 = addr; | ||
172 | data.addr2 = pfn; | ||
173 | |||
174 | on_each_cpu(local_flush_cache_page, (void *)&data, 1); | ||
175 | } | ||
176 | |||
177 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, | ||
178 | unsigned long end) | ||
179 | { | ||
180 | struct flusher_data data; | ||
181 | |||
182 | data.vma = vma; | ||
183 | data.addr1 = start; | ||
184 | data.addr2 = end; | ||
185 | |||
186 | on_each_cpu(local_flush_cache_range, (void *)&data, 1); | ||
187 | } | ||
188 | |||
189 | void flush_dcache_page(struct page *page) | ||
190 | { | ||
191 | on_each_cpu(local_flush_dcache_page, page, 1); | ||
192 | } | ||
193 | |||
194 | void flush_icache_range(unsigned long start, unsigned long end) | ||
195 | { | ||
196 | struct flusher_data data; | ||
197 | |||
198 | data.vma = NULL; | ||
199 | data.addr1 = start; | ||
200 | data.addr2 = end; | ||
201 | |||
202 | on_each_cpu(local_flush_icache_range, (void *)&data, 1); | ||
203 | } | ||
204 | |||
205 | void flush_icache_page(struct vm_area_struct *vma, struct page *page) | ||
206 | { | ||
207 | /* Nothing uses the VMA, so just pass the struct page along */ | ||
208 | on_each_cpu(local_flush_icache_page, page, 1); | ||
209 | } | ||
210 | |||
211 | void flush_cache_sigtramp(unsigned long address) | ||
212 | { | ||
213 | on_each_cpu(local_flush_cache_sigtramp, (void *)address, 1); | ||
214 | } | ||
215 | |||
216 | static void compute_alias(struct cache_info *c) | ||
217 | { | ||
218 | c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1); | ||
219 | c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0; | ||
220 | } | ||
221 | |||
222 | static void __init emit_cache_params(void) | ||
223 | { | ||
224 | printk(KERN_NOTICE "I-cache : n_ways=%d n_sets=%d way_incr=%d\n", | ||
225 | boot_cpu_data.icache.ways, | ||
226 | boot_cpu_data.icache.sets, | ||
227 | boot_cpu_data.icache.way_incr); | ||
228 | printk(KERN_NOTICE "I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n", | ||
229 | boot_cpu_data.icache.entry_mask, | ||
230 | boot_cpu_data.icache.alias_mask, | ||
231 | boot_cpu_data.icache.n_aliases); | ||
232 | printk(KERN_NOTICE "D-cache : n_ways=%d n_sets=%d way_incr=%d\n", | ||
233 | boot_cpu_data.dcache.ways, | ||
234 | boot_cpu_data.dcache.sets, | ||
235 | boot_cpu_data.dcache.way_incr); | ||
236 | printk(KERN_NOTICE "D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n", | ||
237 | boot_cpu_data.dcache.entry_mask, | ||
238 | boot_cpu_data.dcache.alias_mask, | ||
239 | boot_cpu_data.dcache.n_aliases); | ||
240 | |||
241 | /* | ||
242 | * Emit Secondary Cache parameters if the CPU has a probed L2. | ||
243 | */ | ||
244 | if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) { | ||
245 | printk(KERN_NOTICE "S-cache : n_ways=%d n_sets=%d way_incr=%d\n", | ||
246 | boot_cpu_data.scache.ways, | ||
247 | boot_cpu_data.scache.sets, | ||
248 | boot_cpu_data.scache.way_incr); | ||
249 | printk(KERN_NOTICE "S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n", | ||
250 | boot_cpu_data.scache.entry_mask, | ||
251 | boot_cpu_data.scache.alias_mask, | ||
252 | boot_cpu_data.scache.n_aliases); | ||
253 | } | ||
254 | } | ||
255 | |||
256 | void __init cpu_cache_init(void) | ||
257 | { | ||
258 | compute_alias(&boot_cpu_data.icache); | ||
259 | compute_alias(&boot_cpu_data.dcache); | ||
260 | compute_alias(&boot_cpu_data.scache); | ||
261 | |||
262 | __flush_wback_region = noop__flush_region; | ||
263 | __flush_purge_region = noop__flush_region; | ||
264 | __flush_invalidate_region = noop__flush_region; | ||
265 | |||
266 | if (boot_cpu_data.family == CPU_FAMILY_SH2) { | ||
267 | extern void __weak sh2_cache_init(void); | ||
268 | |||
269 | sh2_cache_init(); | ||
270 | } | ||
271 | |||
272 | if (boot_cpu_data.family == CPU_FAMILY_SH2A) { | ||
273 | extern void __weak sh2a_cache_init(void); | ||
274 | |||
275 | sh2a_cache_init(); | ||
276 | } | ||
277 | |||
278 | if (boot_cpu_data.family == CPU_FAMILY_SH3) { | ||
279 | extern void __weak sh3_cache_init(void); | ||
280 | |||
281 | sh3_cache_init(); | ||
282 | |||
283 | if ((boot_cpu_data.type == CPU_SH7705) && | ||
284 | (boot_cpu_data.dcache.sets == 512)) { | ||
285 | extern void __weak sh7705_cache_init(void); | ||
286 | |||
287 | sh7705_cache_init(); | ||
288 | } | ||
289 | } | ||
290 | |||
291 | if ((boot_cpu_data.family == CPU_FAMILY_SH4) || | ||
292 | (boot_cpu_data.family == CPU_FAMILY_SH4A) || | ||
293 | (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) { | ||
294 | extern void __weak sh4_cache_init(void); | ||
295 | |||
296 | sh4_cache_init(); | ||
297 | } | ||
298 | |||
299 | if (boot_cpu_data.family == CPU_FAMILY_SH5) { | ||
300 | extern void __weak sh5_cache_init(void); | ||
301 | |||
302 | sh5_cache_init(); | ||
303 | } | ||
304 | |||
305 | emit_cache_params(); | ||
306 | } | ||
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c index dbbdeba2cee5..f1c93c880ed4 100644 --- a/arch/sh/mm/fault_32.c +++ b/arch/sh/mm/fault_32.c | |||
@@ -318,16 +318,15 @@ do_sigbus: | |||
318 | /* | 318 | /* |
319 | * Called with interrupts disabled. | 319 | * Called with interrupts disabled. |
320 | */ | 320 | */ |
321 | asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs, | 321 | asmlinkage int __kprobes |
322 | unsigned long writeaccess, | 322 | handle_tlbmiss(struct pt_regs *regs, unsigned long writeaccess, |
323 | unsigned long address) | 323 | unsigned long address) |
324 | { | 324 | { |
325 | pgd_t *pgd; | 325 | pgd_t *pgd; |
326 | pud_t *pud; | 326 | pud_t *pud; |
327 | pmd_t *pmd; | 327 | pmd_t *pmd; |
328 | pte_t *pte; | 328 | pte_t *pte; |
329 | pte_t entry; | 329 | pte_t entry; |
330 | int ret = 1; | ||
331 | 330 | ||
332 | /* | 331 | /* |
333 | * We don't take page faults for P1, P2, and parts of P4, these | 332 | * We don't take page faults for P1, P2, and parts of P4, these |
@@ -338,40 +337,41 @@ asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs, | |||
338 | pgd = pgd_offset_k(address); | 337 | pgd = pgd_offset_k(address); |
339 | } else { | 338 | } else { |
340 | if (unlikely(address >= TASK_SIZE || !current->mm)) | 339 | if (unlikely(address >= TASK_SIZE || !current->mm)) |
341 | goto out; | 340 | return 1; |
342 | 341 | ||
343 | pgd = pgd_offset(current->mm, address); | 342 | pgd = pgd_offset(current->mm, address); |
344 | } | 343 | } |
345 | 344 | ||
346 | pud = pud_offset(pgd, address); | 345 | pud = pud_offset(pgd, address); |
347 | if (pud_none_or_clear_bad(pud)) | 346 | if (pud_none_or_clear_bad(pud)) |
348 | goto out; | 347 | return 1; |
349 | pmd = pmd_offset(pud, address); | 348 | pmd = pmd_offset(pud, address); |
350 | if (pmd_none_or_clear_bad(pmd)) | 349 | if (pmd_none_or_clear_bad(pmd)) |
351 | goto out; | 350 | return 1; |
352 | pte = pte_offset_kernel(pmd, address); | 351 | pte = pte_offset_kernel(pmd, address); |
353 | entry = *pte; | 352 | entry = *pte; |
354 | if (unlikely(pte_none(entry) || pte_not_present(entry))) | 353 | if (unlikely(pte_none(entry) || pte_not_present(entry))) |
355 | goto out; | 354 | return 1; |
356 | if (unlikely(writeaccess && !pte_write(entry))) | 355 | if (unlikely(writeaccess && !pte_write(entry))) |
357 | goto out; | 356 | return 1; |
358 | 357 | ||
359 | if (writeaccess) | 358 | if (writeaccess) |
360 | entry = pte_mkdirty(entry); | 359 | entry = pte_mkdirty(entry); |
361 | entry = pte_mkyoung(entry); | 360 | entry = pte_mkyoung(entry); |
362 | 361 | ||
362 | set_pte(pte, entry); | ||
363 | |||
363 | #if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SMP) | 364 | #if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SMP) |
364 | /* | 365 | /* |
365 | * ITLB is not affected by "ldtlb" instruction. | 366 | * SH-4 does not set MMUCR.RC to the corresponding TLB entry in |
366 | * So, we need to flush the entry by ourselves. | 367 | * the case of an initial page write exception, so we need to |
368 | * flush it in order to avoid potential TLB entry duplication. | ||
367 | */ | 369 | */ |
368 | local_flush_tlb_one(get_asid(), address & PAGE_MASK); | 370 | if (writeaccess == 2) |
371 | local_flush_tlb_one(get_asid(), address & PAGE_MASK); | ||
369 | #endif | 372 | #endif |
370 | 373 | ||
371 | set_pte(pte, entry); | ||
372 | update_mmu_cache(NULL, address, entry); | 374 | update_mmu_cache(NULL, address, entry); |
373 | 375 | ||
374 | ret = 0; | 376 | return 0; |
375 | out: | ||
376 | return ret; | ||
377 | } | 377 | } |
diff --git a/arch/sh/mm/fault_64.c b/arch/sh/mm/fault_64.c index bd63b961b2a9..2b356cec2489 100644 --- a/arch/sh/mm/fault_64.c +++ b/arch/sh/mm/fault_64.c | |||
@@ -56,16 +56,7 @@ inline void __do_tlb_refill(unsigned long address, | |||
56 | /* | 56 | /* |
57 | * Set PTEH register | 57 | * Set PTEH register |
58 | */ | 58 | */ |
59 | pteh = address & MMU_VPN_MASK; | 59 | pteh = neff_sign_extend(address & MMU_VPN_MASK); |
60 | |||
61 | /* Sign extend based on neff. */ | ||
62 | #if (NEFF == 32) | ||
63 | /* Faster sign extension */ | ||
64 | pteh = (unsigned long long)(signed long long)(signed long)pteh; | ||
65 | #else | ||
66 | /* General case */ | ||
67 | pteh = (pteh & NEFF_SIGN) ? (pteh | NEFF_MASK) : pteh; | ||
68 | #endif | ||
69 | 60 | ||
70 | /* Set the ASID. */ | 61 | /* Set the ASID. */ |
71 | pteh |= get_asid() << PTEH_ASID_SHIFT; | 62 | pteh |= get_asid() << PTEH_ASID_SHIFT; |
diff --git a/arch/sh/mm/flush-sh4.c b/arch/sh/mm/flush-sh4.c new file mode 100644 index 000000000000..cef402678f42 --- /dev/null +++ b/arch/sh/mm/flush-sh4.c | |||
@@ -0,0 +1,108 @@ | |||
1 | #include <linux/mm.h> | ||
2 | #include <asm/mmu_context.h> | ||
3 | #include <asm/cacheflush.h> | ||
4 | |||
5 | /* | ||
6 | * Write back the dirty D-caches, but not invalidate them. | ||
7 | * | ||
8 | * START: Virtual Address (U0, P1, or P3) | ||
9 | * SIZE: Size of the region. | ||
10 | */ | ||
11 | static void sh4__flush_wback_region(void *start, int size) | ||
12 | { | ||
13 | reg_size_t aligned_start, v, cnt, end; | ||
14 | |||
15 | aligned_start = register_align(start); | ||
16 | v = aligned_start & ~(L1_CACHE_BYTES-1); | ||
17 | end = (aligned_start + size + L1_CACHE_BYTES-1) | ||
18 | & ~(L1_CACHE_BYTES-1); | ||
19 | cnt = (end - v) / L1_CACHE_BYTES; | ||
20 | |||
21 | while (cnt >= 8) { | ||
22 | __ocbwb(v); v += L1_CACHE_BYTES; | ||
23 | __ocbwb(v); v += L1_CACHE_BYTES; | ||
24 | __ocbwb(v); v += L1_CACHE_BYTES; | ||
25 | __ocbwb(v); v += L1_CACHE_BYTES; | ||
26 | __ocbwb(v); v += L1_CACHE_BYTES; | ||
27 | __ocbwb(v); v += L1_CACHE_BYTES; | ||
28 | __ocbwb(v); v += L1_CACHE_BYTES; | ||
29 | __ocbwb(v); v += L1_CACHE_BYTES; | ||
30 | cnt -= 8; | ||
31 | } | ||
32 | |||
33 | while (cnt) { | ||
34 | __ocbwb(v); v += L1_CACHE_BYTES; | ||
35 | cnt--; | ||
36 | } | ||
37 | } | ||
38 | |||
39 | /* | ||
40 | * Write back the dirty D-caches and invalidate them. | ||
41 | * | ||
42 | * START: Virtual Address (U0, P1, or P3) | ||
43 | * SIZE: Size of the region. | ||
44 | */ | ||
45 | static void sh4__flush_purge_region(void *start, int size) | ||
46 | { | ||
47 | reg_size_t aligned_start, v, cnt, end; | ||
48 | |||
49 | aligned_start = register_align(start); | ||
50 | v = aligned_start & ~(L1_CACHE_BYTES-1); | ||
51 | end = (aligned_start + size + L1_CACHE_BYTES-1) | ||
52 | & ~(L1_CACHE_BYTES-1); | ||
53 | cnt = (end - v) / L1_CACHE_BYTES; | ||
54 | |||
55 | while (cnt >= 8) { | ||
56 | __ocbp(v); v += L1_CACHE_BYTES; | ||
57 | __ocbp(v); v += L1_CACHE_BYTES; | ||
58 | __ocbp(v); v += L1_CACHE_BYTES; | ||
59 | __ocbp(v); v += L1_CACHE_BYTES; | ||
60 | __ocbp(v); v += L1_CACHE_BYTES; | ||
61 | __ocbp(v); v += L1_CACHE_BYTES; | ||
62 | __ocbp(v); v += L1_CACHE_BYTES; | ||
63 | __ocbp(v); v += L1_CACHE_BYTES; | ||
64 | cnt -= 8; | ||
65 | } | ||
66 | while (cnt) { | ||
67 | __ocbp(v); v += L1_CACHE_BYTES; | ||
68 | cnt--; | ||
69 | } | ||
70 | } | ||
71 | |||
72 | /* | ||
73 | * No write back please | ||
74 | */ | ||
75 | static void sh4__flush_invalidate_region(void *start, int size) | ||
76 | { | ||
77 | reg_size_t aligned_start, v, cnt, end; | ||
78 | |||
79 | aligned_start = register_align(start); | ||
80 | v = aligned_start & ~(L1_CACHE_BYTES-1); | ||
81 | end = (aligned_start + size + L1_CACHE_BYTES-1) | ||
82 | & ~(L1_CACHE_BYTES-1); | ||
83 | cnt = (end - v) / L1_CACHE_BYTES; | ||
84 | |||
85 | while (cnt >= 8) { | ||
86 | __ocbi(v); v += L1_CACHE_BYTES; | ||
87 | __ocbi(v); v += L1_CACHE_BYTES; | ||
88 | __ocbi(v); v += L1_CACHE_BYTES; | ||
89 | __ocbi(v); v += L1_CACHE_BYTES; | ||
90 | __ocbi(v); v += L1_CACHE_BYTES; | ||
91 | __ocbi(v); v += L1_CACHE_BYTES; | ||
92 | __ocbi(v); v += L1_CACHE_BYTES; | ||
93 | __ocbi(v); v += L1_CACHE_BYTES; | ||
94 | cnt -= 8; | ||
95 | } | ||
96 | |||
97 | while (cnt) { | ||
98 | __ocbi(v); v += L1_CACHE_BYTES; | ||
99 | cnt--; | ||
100 | } | ||
101 | } | ||
102 | |||
103 | void __init sh4__flush_region_init(void) | ||
104 | { | ||
105 | __flush_wback_region = sh4__flush_wback_region; | ||
106 | __flush_invalidate_region = sh4__flush_invalidate_region; | ||
107 | __flush_purge_region = sh4__flush_purge_region; | ||
108 | } | ||
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index fe532aeaa16d..0a9b4d855bc9 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c | |||
@@ -210,6 +210,9 @@ void __init mem_init(void) | |||
210 | high_memory = node_high_memory; | 210 | high_memory = node_high_memory; |
211 | } | 211 | } |
212 | 212 | ||
213 | /* Set this up early, so we can take care of the zero page */ | ||
214 | cpu_cache_init(); | ||
215 | |||
213 | /* clear the zero-page */ | 216 | /* clear the zero-page */ |
214 | memset(empty_zero_page, 0, PAGE_SIZE); | 217 | memset(empty_zero_page, 0, PAGE_SIZE); |
215 | __flush_wback_region(empty_zero_page, PAGE_SIZE); | 218 | __flush_wback_region(empty_zero_page, PAGE_SIZE); |
@@ -230,8 +233,6 @@ void __init mem_init(void) | |||
230 | datasize >> 10, | 233 | datasize >> 10, |
231 | initsize >> 10); | 234 | initsize >> 10); |
232 | 235 | ||
233 | p3_cache_init(); | ||
234 | |||
235 | /* Initialize the vDSO */ | 236 | /* Initialize the vDSO */ |
236 | vsyscall_init(); | 237 | vsyscall_init(); |
237 | } | 238 | } |
diff --git a/arch/sh/mm/kmap.c b/arch/sh/mm/kmap.c new file mode 100644 index 000000000000..3eecf0d42f1a --- /dev/null +++ b/arch/sh/mm/kmap.c | |||
@@ -0,0 +1,64 @@ | |||
1 | /* | ||
2 | * arch/sh/mm/kmap.c | ||
3 | * | ||
4 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka | ||
5 | * Copyright (C) 2002 - 2009 Paul Mundt | ||
6 | * | ||
7 | * Released under the terms of the GNU GPL v2.0. | ||
8 | */ | ||
9 | #include <linux/mm.h> | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/mutex.h> | ||
12 | #include <linux/fs.h> | ||
13 | #include <linux/highmem.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <asm/mmu_context.h> | ||
16 | #include <asm/cacheflush.h> | ||
17 | |||
18 | #define kmap_get_fixmap_pte(vaddr) \ | ||
19 | pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr)) | ||
20 | |||
21 | static pte_t *kmap_coherent_pte; | ||
22 | |||
23 | void __init kmap_coherent_init(void) | ||
24 | { | ||
25 | unsigned long vaddr; | ||
26 | |||
27 | if (!boot_cpu_data.dcache.n_aliases) | ||
28 | return; | ||
29 | |||
30 | /* cache the first coherent kmap pte */ | ||
31 | vaddr = __fix_to_virt(FIX_CMAP_BEGIN); | ||
32 | kmap_coherent_pte = kmap_get_fixmap_pte(vaddr); | ||
33 | } | ||
34 | |||
35 | void *kmap_coherent(struct page *page, unsigned long addr) | ||
36 | { | ||
37 | enum fixed_addresses idx; | ||
38 | unsigned long vaddr, flags; | ||
39 | pte_t pte; | ||
40 | |||
41 | BUG_ON(test_bit(PG_dcache_dirty, &page->flags)); | ||
42 | |||
43 | inc_preempt_count(); | ||
44 | |||
45 | idx = (addr & current_cpu_data.dcache.alias_mask) >> PAGE_SHIFT; | ||
46 | vaddr = __fix_to_virt(FIX_CMAP_END - idx); | ||
47 | pte = mk_pte(page, PAGE_KERNEL); | ||
48 | |||
49 | local_irq_save(flags); | ||
50 | flush_tlb_one(get_asid(), vaddr); | ||
51 | local_irq_restore(flags); | ||
52 | |||
53 | update_mmu_cache(NULL, vaddr, pte); | ||
54 | |||
55 | set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte); | ||
56 | |||
57 | return (void *)vaddr; | ||
58 | } | ||
59 | |||
60 | void kunmap_coherent(void) | ||
61 | { | ||
62 | dec_preempt_count(); | ||
63 | preempt_check_resched(); | ||
64 | } | ||
diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c index 1b5fdfb4e0c2..d2984fa42d3d 100644 --- a/arch/sh/mm/mmap.c +++ b/arch/sh/mm/mmap.c | |||
@@ -14,10 +14,10 @@ | |||
14 | #include <asm/page.h> | 14 | #include <asm/page.h> |
15 | #include <asm/processor.h> | 15 | #include <asm/processor.h> |
16 | 16 | ||
17 | #ifdef CONFIG_MMU | ||
18 | unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ | 17 | unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ |
19 | EXPORT_SYMBOL(shm_align_mask); | 18 | EXPORT_SYMBOL(shm_align_mask); |
20 | 19 | ||
20 | #ifdef CONFIG_MMU | ||
21 | /* | 21 | /* |
22 | * To avoid cache aliases, we map the shared page with same color. | 22 | * To avoid cache aliases, we map the shared page with same color. |
23 | */ | 23 | */ |
diff --git a/arch/sh/mm/tlb-nommu.c b/arch/sh/mm/nommu.c index 71c742b5aee3..51b54037216f 100644 --- a/arch/sh/mm/tlb-nommu.c +++ b/arch/sh/mm/nommu.c | |||
@@ -1,20 +1,41 @@ | |||
1 | /* | 1 | /* |
2 | * arch/sh/mm/tlb-nommu.c | 2 | * arch/sh/mm/nommu.c |
3 | * | 3 | * |
4 | * TLB Operations for MMUless SH. | 4 | * Various helper routines and stubs for MMUless SH. |
5 | * | 5 | * |
6 | * Copyright (C) 2002 Paul Mundt | 6 | * Copyright (C) 2002 - 2009 Paul Mundt |
7 | * | 7 | * |
8 | * Released under the terms of the GNU GPL v2.0. | 8 | * Released under the terms of the GNU GPL v2.0. |
9 | */ | 9 | */ |
10 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
11 | #include <linux/init.h> | ||
12 | #include <linux/string.h> | ||
11 | #include <linux/mm.h> | 13 | #include <linux/mm.h> |
12 | #include <asm/pgtable.h> | 14 | #include <asm/pgtable.h> |
13 | #include <asm/tlbflush.h> | 15 | #include <asm/tlbflush.h> |
16 | #include <asm/page.h> | ||
17 | #include <asm/uaccess.h> | ||
14 | 18 | ||
15 | /* | 19 | /* |
16 | * Nothing too terribly exciting here .. | 20 | * Nothing too terribly exciting here .. |
17 | */ | 21 | */ |
22 | void copy_page(void *to, void *from) | ||
23 | { | ||
24 | memcpy(to, from, PAGE_SIZE); | ||
25 | } | ||
26 | |||
27 | __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n) | ||
28 | { | ||
29 | memcpy(to, from, n); | ||
30 | return 0; | ||
31 | } | ||
32 | |||
33 | __kernel_size_t __clear_user(void *to, __kernel_size_t n) | ||
34 | { | ||
35 | memset(to, 0, n); | ||
36 | return 0; | ||
37 | } | ||
38 | |||
18 | void local_flush_tlb_all(void) | 39 | void local_flush_tlb_all(void) |
19 | { | 40 | { |
20 | BUG(); | 41 | BUG(); |
@@ -46,8 +67,21 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) | |||
46 | BUG(); | 67 | BUG(); |
47 | } | 68 | } |
48 | 69 | ||
49 | void update_mmu_cache(struct vm_area_struct * vma, | 70 | void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) |
50 | unsigned long address, pte_t pte) | 71 | { |
72 | } | ||
73 | |||
74 | void __init kmap_coherent_init(void) | ||
75 | { | ||
76 | } | ||
77 | |||
78 | void *kmap_coherent(struct page *page, unsigned long addr) | ||
79 | { | ||
80 | BUG(); | ||
81 | return NULL; | ||
82 | } | ||
83 | |||
84 | void kunmap_coherent(void) | ||
51 | { | 85 | { |
52 | BUG(); | 86 | BUG(); |
53 | } | 87 | } |
diff --git a/arch/sh/mm/pg-nommu.c b/arch/sh/mm/pg-nommu.c deleted file mode 100644 index 91ed4e695ff7..000000000000 --- a/arch/sh/mm/pg-nommu.c +++ /dev/null | |||
@@ -1,38 +0,0 @@ | |||
1 | /* | ||
2 | * arch/sh/mm/pg-nommu.c | ||
3 | * | ||
4 | * clear_page()/copy_page() implementation for MMUless SH. | ||
5 | * | ||
6 | * Copyright (C) 2003 Paul Mundt | ||
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/string.h> | ||
15 | #include <asm/page.h> | ||
16 | #include <asm/uaccess.h> | ||
17 | |||
18 | void copy_page(void *to, void *from) | ||
19 | { | ||
20 | memcpy(to, from, PAGE_SIZE); | ||
21 | } | ||
22 | |||
23 | void clear_page(void *to) | ||
24 | { | ||
25 | memset(to, 0, PAGE_SIZE); | ||
26 | } | ||
27 | |||
28 | __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n) | ||
29 | { | ||
30 | memcpy(to, from, n); | ||
31 | return 0; | ||
32 | } | ||
33 | |||
34 | __kernel_size_t __clear_user(void *to, __kernel_size_t n) | ||
35 | { | ||
36 | memset(to, 0, n); | ||
37 | return 0; | ||
38 | } | ||
diff --git a/arch/sh/mm/pg-sh4.c b/arch/sh/mm/pg-sh4.c deleted file mode 100644 index 2fe14da1f839..000000000000 --- a/arch/sh/mm/pg-sh4.c +++ /dev/null | |||
@@ -1,146 +0,0 @@ | |||
1 | /* | ||
2 | * arch/sh/mm/pg-sh4.c | ||
3 | * | ||
4 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka | ||
5 | * Copyright (C) 2002 - 2007 Paul Mundt | ||
6 | * | ||
7 | * Released under the terms of the GNU GPL v2.0. | ||
8 | */ | ||
9 | #include <linux/mm.h> | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/mutex.h> | ||
12 | #include <linux/fs.h> | ||
13 | #include <linux/highmem.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <asm/mmu_context.h> | ||
16 | #include <asm/cacheflush.h> | ||
17 | |||
18 | #define CACHE_ALIAS (current_cpu_data.dcache.alias_mask) | ||
19 | |||
20 | #define kmap_get_fixmap_pte(vaddr) \ | ||
21 | pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr)) | ||
22 | |||
23 | static pte_t *kmap_coherent_pte; | ||
24 | |||
25 | void __init kmap_coherent_init(void) | ||
26 | { | ||
27 | unsigned long vaddr; | ||
28 | |||
29 | /* cache the first coherent kmap pte */ | ||
30 | vaddr = __fix_to_virt(FIX_CMAP_BEGIN); | ||
31 | kmap_coherent_pte = kmap_get_fixmap_pte(vaddr); | ||
32 | } | ||
33 | |||
34 | static inline void *kmap_coherent(struct page *page, unsigned long addr) | ||
35 | { | ||
36 | enum fixed_addresses idx; | ||
37 | unsigned long vaddr, flags; | ||
38 | pte_t pte; | ||
39 | |||
40 | inc_preempt_count(); | ||
41 | |||
42 | idx = (addr & current_cpu_data.dcache.alias_mask) >> PAGE_SHIFT; | ||
43 | vaddr = __fix_to_virt(FIX_CMAP_END - idx); | ||
44 | pte = mk_pte(page, PAGE_KERNEL); | ||
45 | |||
46 | local_irq_save(flags); | ||
47 | flush_tlb_one(get_asid(), vaddr); | ||
48 | local_irq_restore(flags); | ||
49 | |||
50 | update_mmu_cache(NULL, vaddr, pte); | ||
51 | |||
52 | set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte); | ||
53 | |||
54 | return (void *)vaddr; | ||
55 | } | ||
56 | |||
57 | static inline void kunmap_coherent(struct page *page) | ||
58 | { | ||
59 | dec_preempt_count(); | ||
60 | preempt_check_resched(); | ||
61 | } | ||
62 | |||
63 | /* | ||
64 | * clear_user_page | ||
65 | * @to: P1 address | ||
66 | * @address: U0 address to be mapped | ||
67 | * @page: page (virt_to_page(to)) | ||
68 | */ | ||
69 | void clear_user_page(void *to, unsigned long address, struct page *page) | ||
70 | { | ||
71 | __set_bit(PG_mapped, &page->flags); | ||
72 | |||
73 | clear_page(to); | ||
74 | if ((((address & PAGE_MASK) ^ (unsigned long)to) & CACHE_ALIAS)) | ||
75 | __flush_wback_region(to, PAGE_SIZE); | ||
76 | } | ||
77 | |||
78 | void copy_to_user_page(struct vm_area_struct *vma, struct page *page, | ||
79 | unsigned long vaddr, void *dst, const void *src, | ||
80 | unsigned long len) | ||
81 | { | ||
82 | void *vto; | ||
83 | |||
84 | __set_bit(PG_mapped, &page->flags); | ||
85 | |||
86 | vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); | ||
87 | memcpy(vto, src, len); | ||
88 | kunmap_coherent(vto); | ||
89 | |||
90 | if (vma->vm_flags & VM_EXEC) | ||
91 | flush_cache_page(vma, vaddr, page_to_pfn(page)); | ||
92 | } | ||
93 | |||
94 | void copy_from_user_page(struct vm_area_struct *vma, struct page *page, | ||
95 | unsigned long vaddr, void *dst, const void *src, | ||
96 | unsigned long len) | ||
97 | { | ||
98 | void *vfrom; | ||
99 | |||
100 | __set_bit(PG_mapped, &page->flags); | ||
101 | |||
102 | vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); | ||
103 | memcpy(dst, vfrom, len); | ||
104 | kunmap_coherent(vfrom); | ||
105 | } | ||
106 | |||
107 | void copy_user_highpage(struct page *to, struct page *from, | ||
108 | unsigned long vaddr, struct vm_area_struct *vma) | ||
109 | { | ||
110 | void *vfrom, *vto; | ||
111 | |||
112 | __set_bit(PG_mapped, &to->flags); | ||
113 | |||
114 | vto = kmap_atomic(to, KM_USER1); | ||
115 | vfrom = kmap_coherent(from, vaddr); | ||
116 | copy_page(vto, vfrom); | ||
117 | kunmap_coherent(vfrom); | ||
118 | |||
119 | if (((vaddr ^ (unsigned long)vto) & CACHE_ALIAS)) | ||
120 | __flush_wback_region(vto, PAGE_SIZE); | ||
121 | |||
122 | kunmap_atomic(vto, KM_USER1); | ||
123 | /* Make sure this page is cleared on other CPU's too before using it */ | ||
124 | smp_wmb(); | ||
125 | } | ||
126 | EXPORT_SYMBOL(copy_user_highpage); | ||
127 | |||
128 | /* | ||
129 | * For SH-4, we have our own implementation for ptep_get_and_clear | ||
130 | */ | ||
131 | pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | ||
132 | { | ||
133 | pte_t pte = *ptep; | ||
134 | |||
135 | pte_clear(mm, addr, ptep); | ||
136 | if (!pte_not_present(pte)) { | ||
137 | unsigned long pfn = pte_pfn(pte); | ||
138 | if (pfn_valid(pfn)) { | ||
139 | struct page *page = pfn_to_page(pfn); | ||
140 | struct address_space *mapping = page_mapping(page); | ||
141 | if (!mapping || !mapping_writably_mapped(mapping)) | ||
142 | __clear_bit(PG_mapped, &page->flags); | ||
143 | } | ||
144 | } | ||
145 | return pte; | ||
146 | } | ||
diff --git a/arch/sh/mm/pg-sh7705.c b/arch/sh/mm/pg-sh7705.c deleted file mode 100644 index eaf25147194c..000000000000 --- a/arch/sh/mm/pg-sh7705.c +++ /dev/null | |||
@@ -1,138 +0,0 @@ | |||
1 | /* | ||
2 | * arch/sh/mm/pg-sh7705.c | ||
3 | * | ||
4 | * Copyright (C) 1999, 2000 Niibe Yutaka | ||
5 | * Copyright (C) 2004 Alex Song | ||
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | * | ||
11 | */ | ||
12 | |||
13 | #include <linux/init.h> | ||
14 | #include <linux/mman.h> | ||
15 | #include <linux/mm.h> | ||
16 | #include <linux/threads.h> | ||
17 | #include <linux/fs.h> | ||
18 | #include <asm/addrspace.h> | ||
19 | #include <asm/page.h> | ||
20 | #include <asm/pgtable.h> | ||
21 | #include <asm/processor.h> | ||
22 | #include <asm/cache.h> | ||
23 | #include <asm/io.h> | ||
24 | #include <asm/uaccess.h> | ||
25 | #include <asm/pgalloc.h> | ||
26 | #include <asm/mmu_context.h> | ||
27 | #include <asm/cacheflush.h> | ||
28 | |||
29 | static inline void __flush_purge_virtual_region(void *p1, void *virt, int size) | ||
30 | { | ||
31 | unsigned long v; | ||
32 | unsigned long begin, end; | ||
33 | unsigned long p1_begin; | ||
34 | |||
35 | |||
36 | begin = L1_CACHE_ALIGN((unsigned long)virt); | ||
37 | end = L1_CACHE_ALIGN((unsigned long)virt + size); | ||
38 | |||
39 | p1_begin = (unsigned long)p1 & ~(L1_CACHE_BYTES - 1); | ||
40 | |||
41 | /* do this the slow way as we may not have TLB entries | ||
42 | * for virt yet. */ | ||
43 | for (v = begin; v < end; v += L1_CACHE_BYTES) { | ||
44 | unsigned long p; | ||
45 | unsigned long ways, addr; | ||
46 | |||
47 | p = __pa(p1_begin); | ||
48 | |||
49 | ways = current_cpu_data.dcache.ways; | ||
50 | addr = CACHE_OC_ADDRESS_ARRAY; | ||
51 | |||
52 | do { | ||
53 | unsigned long data; | ||
54 | |||
55 | addr |= (v & current_cpu_data.dcache.entry_mask); | ||
56 | |||
57 | data = ctrl_inl(addr); | ||
58 | if ((data & CACHE_PHYSADDR_MASK) == | ||
59 | (p & CACHE_PHYSADDR_MASK)) { | ||
60 | data &= ~(SH_CACHE_UPDATED|SH_CACHE_VALID); | ||
61 | ctrl_outl(data, addr); | ||
62 | } | ||
63 | |||
64 | addr += current_cpu_data.dcache.way_incr; | ||
65 | } while (--ways); | ||
66 | |||
67 | p1_begin += L1_CACHE_BYTES; | ||
68 | } | ||
69 | } | ||
70 | |||
71 | /* | ||
72 | * clear_user_page | ||
73 | * @to: P1 address | ||
74 | * @address: U0 address to be mapped | ||
75 | */ | ||
76 | void clear_user_page(void *to, unsigned long address, struct page *pg) | ||
77 | { | ||
78 | struct page *page = virt_to_page(to); | ||
79 | |||
80 | __set_bit(PG_mapped, &page->flags); | ||
81 | if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) { | ||
82 | clear_page(to); | ||
83 | __flush_wback_region(to, PAGE_SIZE); | ||
84 | } else { | ||
85 | __flush_purge_virtual_region(to, | ||
86 | (void *)(address & 0xfffff000), | ||
87 | PAGE_SIZE); | ||
88 | clear_page(to); | ||
89 | __flush_wback_region(to, PAGE_SIZE); | ||
90 | } | ||
91 | } | ||
92 | |||
93 | /* | ||
94 | * copy_user_page | ||
95 | * @to: P1 address | ||
96 | * @from: P1 address | ||
97 | * @address: U0 address to be mapped | ||
98 | */ | ||
99 | void copy_user_page(void *to, void *from, unsigned long address, struct page *pg) | ||
100 | { | ||
101 | struct page *page = virt_to_page(to); | ||
102 | |||
103 | |||
104 | __set_bit(PG_mapped, &page->flags); | ||
105 | if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) { | ||
106 | copy_page(to, from); | ||
107 | __flush_wback_region(to, PAGE_SIZE); | ||
108 | } else { | ||
109 | __flush_purge_virtual_region(to, | ||
110 | (void *)(address & 0xfffff000), | ||
111 | PAGE_SIZE); | ||
112 | copy_page(to, from); | ||
113 | __flush_wback_region(to, PAGE_SIZE); | ||
114 | } | ||
115 | } | ||
116 | |||
117 | /* | ||
118 | * For SH7705, we have our own implementation for ptep_get_and_clear | ||
119 | * Copied from pg-sh4.c | ||
120 | */ | ||
121 | pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | ||
122 | { | ||
123 | pte_t pte = *ptep; | ||
124 | |||
125 | pte_clear(mm, addr, ptep); | ||
126 | if (!pte_not_present(pte)) { | ||
127 | unsigned long pfn = pte_pfn(pte); | ||
128 | if (pfn_valid(pfn)) { | ||
129 | struct page *page = pfn_to_page(pfn); | ||
130 | struct address_space *mapping = page_mapping(page); | ||
131 | if (!mapping || !mapping_writably_mapped(mapping)) | ||
132 | __clear_bit(PG_mapped, &page->flags); | ||
133 | } | ||
134 | } | ||
135 | |||
136 | return pte; | ||
137 | } | ||
138 | |||
diff --git a/arch/sh/mm/tlb-pteaex.c b/arch/sh/mm/tlb-pteaex.c index 2aab3ea934d7..409b7c2b4b9d 100644 --- a/arch/sh/mm/tlb-pteaex.c +++ b/arch/sh/mm/tlb-pteaex.c | |||
@@ -16,34 +16,16 @@ | |||
16 | #include <asm/mmu_context.h> | 16 | #include <asm/mmu_context.h> |
17 | #include <asm/cacheflush.h> | 17 | #include <asm/cacheflush.h> |
18 | 18 | ||
19 | void update_mmu_cache(struct vm_area_struct * vma, | 19 | void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) |
20 | unsigned long address, pte_t pte) | ||
21 | { | 20 | { |
22 | unsigned long flags; | 21 | unsigned long flags, pteval, vpn; |
23 | unsigned long pteval; | ||
24 | unsigned long vpn; | ||
25 | 22 | ||
26 | /* Ptrace may call this routine. */ | 23 | /* |
24 | * Handle debugger faulting in for debugee. | ||
25 | */ | ||
27 | if (vma && current->active_mm != vma->vm_mm) | 26 | if (vma && current->active_mm != vma->vm_mm) |
28 | return; | 27 | return; |
29 | 28 | ||
30 | #ifndef CONFIG_CACHE_OFF | ||
31 | { | ||
32 | unsigned long pfn = pte_pfn(pte); | ||
33 | |||
34 | if (pfn_valid(pfn)) { | ||
35 | struct page *page = pfn_to_page(pfn); | ||
36 | |||
37 | if (!test_bit(PG_mapped, &page->flags)) { | ||
38 | unsigned long phys = pte_val(pte) & PTE_PHYS_MASK; | ||
39 | __flush_wback_region((void *)P1SEGADDR(phys), | ||
40 | PAGE_SIZE); | ||
41 | __set_bit(PG_mapped, &page->flags); | ||
42 | } | ||
43 | } | ||
44 | } | ||
45 | #endif | ||
46 | |||
47 | local_irq_save(flags); | 29 | local_irq_save(flags); |
48 | 30 | ||
49 | /* Set PTEH register */ | 31 | /* Set PTEH register */ |
diff --git a/arch/sh/mm/tlb-sh3.c b/arch/sh/mm/tlb-sh3.c index 17cb7c3adf22..ace8e6d2f59d 100644 --- a/arch/sh/mm/tlb-sh3.c +++ b/arch/sh/mm/tlb-sh3.c | |||
@@ -27,32 +27,16 @@ | |||
27 | #include <asm/mmu_context.h> | 27 | #include <asm/mmu_context.h> |
28 | #include <asm/cacheflush.h> | 28 | #include <asm/cacheflush.h> |
29 | 29 | ||
30 | void update_mmu_cache(struct vm_area_struct * vma, | 30 | void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) |
31 | unsigned long address, pte_t pte) | ||
32 | { | 31 | { |
33 | unsigned long flags; | 32 | unsigned long flags, pteval, vpn; |
34 | unsigned long pteval; | ||
35 | unsigned long vpn; | ||
36 | 33 | ||
37 | /* Ptrace may call this routine. */ | 34 | /* |
35 | * Handle debugger faulting in for debugee. | ||
36 | */ | ||
38 | if (vma && current->active_mm != vma->vm_mm) | 37 | if (vma && current->active_mm != vma->vm_mm) |
39 | return; | 38 | return; |
40 | 39 | ||
41 | #if defined(CONFIG_SH7705_CACHE_32KB) | ||
42 | { | ||
43 | struct page *page = pte_page(pte); | ||
44 | unsigned long pfn = pte_pfn(pte); | ||
45 | |||
46 | if (pfn_valid(pfn) && !test_bit(PG_mapped, &page->flags)) { | ||
47 | unsigned long phys = pte_val(pte) & PTE_PHYS_MASK; | ||
48 | |||
49 | __flush_wback_region((void *)P1SEGADDR(phys), | ||
50 | PAGE_SIZE); | ||
51 | __set_bit(PG_mapped, &page->flags); | ||
52 | } | ||
53 | } | ||
54 | #endif | ||
55 | |||
56 | local_irq_save(flags); | 40 | local_irq_save(flags); |
57 | 41 | ||
58 | /* Set PTEH register */ | 42 | /* Set PTEH register */ |
@@ -93,4 +77,3 @@ void local_flush_tlb_one(unsigned long asid, unsigned long page) | |||
93 | for (i = 0; i < ways; i++) | 77 | for (i = 0; i < ways; i++) |
94 | ctrl_outl(data, addr + (i << 8)); | 78 | ctrl_outl(data, addr + (i << 8)); |
95 | } | 79 | } |
96 | |||
diff --git a/arch/sh/mm/tlb-sh4.c b/arch/sh/mm/tlb-sh4.c index fd0d11f1a81c..8cf550e2570f 100644 --- a/arch/sh/mm/tlb-sh4.c +++ b/arch/sh/mm/tlb-sh4.c | |||
@@ -15,34 +15,16 @@ | |||
15 | #include <asm/mmu_context.h> | 15 | #include <asm/mmu_context.h> |
16 | #include <asm/cacheflush.h> | 16 | #include <asm/cacheflush.h> |
17 | 17 | ||
18 | void update_mmu_cache(struct vm_area_struct * vma, | 18 | void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) |
19 | unsigned long address, pte_t pte) | ||
20 | { | 19 | { |
21 | unsigned long flags; | 20 | unsigned long flags, pteval, vpn; |
22 | unsigned long pteval; | ||
23 | unsigned long vpn; | ||
24 | 21 | ||
25 | /* Ptrace may call this routine. */ | 22 | /* |
23 | * Handle debugger faulting in for debugee. | ||
24 | */ | ||
26 | if (vma && current->active_mm != vma->vm_mm) | 25 | if (vma && current->active_mm != vma->vm_mm) |
27 | return; | 26 | return; |
28 | 27 | ||
29 | #ifndef CONFIG_CACHE_OFF | ||
30 | { | ||
31 | unsigned long pfn = pte_pfn(pte); | ||
32 | |||
33 | if (pfn_valid(pfn)) { | ||
34 | struct page *page = pfn_to_page(pfn); | ||
35 | |||
36 | if (!test_bit(PG_mapped, &page->flags)) { | ||
37 | unsigned long phys = pte_val(pte) & PTE_PHYS_MASK; | ||
38 | __flush_wback_region((void *)P1SEGADDR(phys), | ||
39 | PAGE_SIZE); | ||
40 | __set_bit(PG_mapped, &page->flags); | ||
41 | } | ||
42 | } | ||
43 | } | ||
44 | #endif | ||
45 | |||
46 | local_irq_save(flags); | 28 | local_irq_save(flags); |
47 | 29 | ||
48 | /* Set PTEH register */ | 30 | /* Set PTEH register */ |
diff --git a/arch/sh/mm/tlb-sh5.c b/arch/sh/mm/tlb-sh5.c index dae131243bcc..fdb64e41ec50 100644 --- a/arch/sh/mm/tlb-sh5.c +++ b/arch/sh/mm/tlb-sh5.c | |||
@@ -117,26 +117,15 @@ int sh64_put_wired_dtlb_entry(unsigned long long entry) | |||
117 | * Load up a virtual<->physical translation for @eaddr<->@paddr in the | 117 | * Load up a virtual<->physical translation for @eaddr<->@paddr in the |
118 | * pre-allocated TLB slot @config_addr (see sh64_get_wired_dtlb_entry). | 118 | * pre-allocated TLB slot @config_addr (see sh64_get_wired_dtlb_entry). |
119 | */ | 119 | */ |
120 | inline void sh64_setup_tlb_slot(unsigned long long config_addr, | 120 | void sh64_setup_tlb_slot(unsigned long long config_addr, unsigned long eaddr, |
121 | unsigned long eaddr, | 121 | unsigned long asid, unsigned long paddr) |
122 | unsigned long asid, | ||
123 | unsigned long paddr) | ||
124 | { | 122 | { |
125 | unsigned long long pteh, ptel; | 123 | unsigned long long pteh, ptel; |
126 | 124 | ||
127 | /* Sign extension */ | 125 | pteh = neff_sign_extend(eaddr); |
128 | #if (NEFF == 32) | ||
129 | pteh = (unsigned long long)(signed long long)(signed long) eaddr; | ||
130 | #else | ||
131 | #error "Can't sign extend more than 32 bits yet" | ||
132 | #endif | ||
133 | pteh &= PAGE_MASK; | 126 | pteh &= PAGE_MASK; |
134 | pteh |= (asid << PTEH_ASID_SHIFT) | PTEH_VALID; | 127 | pteh |= (asid << PTEH_ASID_SHIFT) | PTEH_VALID; |
135 | #if (NEFF == 32) | 128 | ptel = neff_sign_extend(paddr); |
136 | ptel = (unsigned long long)(signed long long)(signed long) paddr; | ||
137 | #else | ||
138 | #error "Can't sign extend more than 32 bits yet" | ||
139 | #endif | ||
140 | ptel &= PAGE_MASK; | 129 | ptel &= PAGE_MASK; |
141 | ptel |= (_PAGE_CACHABLE | _PAGE_READ | _PAGE_WRITE); | 130 | ptel |= (_PAGE_CACHABLE | _PAGE_READ | _PAGE_WRITE); |
142 | 131 | ||
@@ -152,5 +141,5 @@ inline void sh64_setup_tlb_slot(unsigned long long config_addr, | |||
152 | * | 141 | * |
153 | * Teardown any existing mapping in the TLB slot @config_addr. | 142 | * Teardown any existing mapping in the TLB slot @config_addr. |
154 | */ | 143 | */ |
155 | inline void sh64_teardown_tlb_slot(unsigned long long config_addr) | 144 | void sh64_teardown_tlb_slot(unsigned long long config_addr) |
156 | __attribute__ ((alias("__flush_tlb_slot"))); | 145 | __attribute__ ((alias("__flush_tlb_slot"))); |
diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c index 3ce40ea34824..2dcc48528f7a 100644 --- a/arch/sh/mm/tlbflush_64.c +++ b/arch/sh/mm/tlbflush_64.c | |||
@@ -329,22 +329,6 @@ do_sigbus: | |||
329 | goto no_context; | 329 | goto no_context; |
330 | } | 330 | } |
331 | 331 | ||
332 | void update_mmu_cache(struct vm_area_struct * vma, | ||
333 | unsigned long address, pte_t pte) | ||
334 | { | ||
335 | /* | ||
336 | * This appears to get called once for every pte entry that gets | ||
337 | * established => I don't think it's efficient to try refilling the | ||
338 | * TLBs with the pages - some may not get accessed even. Also, for | ||
339 | * executable pages, it is impossible to determine reliably here which | ||
340 | * TLB they should be mapped into (or both even). | ||
341 | * | ||
342 | * So, just do nothing here and handle faults on demand. In the | ||
343 | * TLBMISS handling case, the refill is now done anyway after the pte | ||
344 | * has been fixed up, so that deals with most useful cases. | ||
345 | */ | ||
346 | } | ||
347 | |||
348 | void local_flush_tlb_one(unsigned long asid, unsigned long page) | 332 | void local_flush_tlb_one(unsigned long asid, unsigned long page) |
349 | { | 333 | { |
350 | unsigned long long match, pteh=0, lpage; | 334 | unsigned long long match, pteh=0, lpage; |
@@ -353,7 +337,7 @@ void local_flush_tlb_one(unsigned long asid, unsigned long page) | |||
353 | /* | 337 | /* |
354 | * Sign-extend based on neff. | 338 | * Sign-extend based on neff. |
355 | */ | 339 | */ |
356 | lpage = (page & NEFF_SIGN) ? (page | NEFF_MASK) : page; | 340 | lpage = neff_sign_extend(page); |
357 | match = (asid << PTEH_ASID_SHIFT) | PTEH_VALID; | 341 | match = (asid << PTEH_ASID_SHIFT) | PTEH_VALID; |
358 | match |= lpage; | 342 | match |= lpage; |
359 | 343 | ||
@@ -482,3 +466,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) | |||
482 | /* FIXME: Optimize this later.. */ | 466 | /* FIXME: Optimize this later.. */ |
483 | flush_tlb_all(); | 467 | flush_tlb_all(); |
484 | } | 468 | } |
469 | |||
470 | void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) | ||
471 | { | ||
472 | } | ||