aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm
diff options
context:
space:
mode:
authorRob Herring <robh@kernel.org>2014-05-13 19:34:35 -0400
committerRob Herring <robh@kernel.org>2014-05-13 19:34:35 -0400
commiteafd370dfe487facfdef499057f4eac9aa0b4bf5 (patch)
tree0925a67cd658cdf4811f49b4cd2073f663166bd0 /arch/arm/include/asm
parentc3fc952d2fbe3ec78defd70cf73d5d76d27092ec (diff)
parentfb2caa50fbacd21719a90dd66b617ce3cb4fd6d7 (diff)
Merge branch 'dt-bus-name' into for-next
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r--arch/arm/include/asm/cputype.h14
-rw-r--r--arch/arm/include/asm/div64.h2
-rw-r--r--arch/arm/include/asm/mcpm.h7
-rw-r--r--arch/arm/include/asm/tlb.h12
4 files changed, 26 insertions, 9 deletions
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index c651e3b26ec7..4764344367d4 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -222,22 +222,22 @@ static inline int cpu_is_xsc3(void)
222#endif 222#endif
223 223
224/* 224/*
225 * Marvell's PJ4 core is based on V7 version. It has some modification 225 * Marvell's PJ4 and PJ4B cores are based on V7 version,
226 * for coprocessor setting. For this reason, we need a way to distinguish 226 * but require a specical sequence for enabling coprocessors.
227 * it. 227 * For this reason, we need a way to distinguish them.
228 */ 228 */
229#ifndef CONFIG_CPU_PJ4 229#if defined(CONFIG_CPU_PJ4) || defined(CONFIG_CPU_PJ4B)
230#define cpu_is_pj4() 0
231#else
232static inline int cpu_is_pj4(void) 230static inline int cpu_is_pj4(void)
233{ 231{
234 unsigned int id; 232 unsigned int id;
235 233
236 id = read_cpuid_id(); 234 id = read_cpuid_id();
237 if ((id & 0xfffffff0) == 0x562f5840) 235 if ((id & 0xff0fff00) == 0x560f5800)
238 return 1; 236 return 1;
239 237
240 return 0; 238 return 0;
241} 239}
240#else
241#define cpu_is_pj4() 0
242#endif 242#endif
243#endif 243#endif
diff --git a/arch/arm/include/asm/div64.h b/arch/arm/include/asm/div64.h
index 191ada6e4d2d..662c7bd06108 100644
--- a/arch/arm/include/asm/div64.h
+++ b/arch/arm/include/asm/div64.h
@@ -156,7 +156,7 @@
156 /* Select the best insn combination to perform the */ \ 156 /* Select the best insn combination to perform the */ \
157 /* actual __m * __n / (__p << 64) operation. */ \ 157 /* actual __m * __n / (__p << 64) operation. */ \
158 if (!__c) { \ 158 if (!__c) { \
159 asm ( "umull %Q0, %R0, %1, %Q2\n\t" \ 159 asm ( "umull %Q0, %R0, %Q1, %Q2\n\t" \
160 "mov %Q0, #0" \ 160 "mov %Q0, #0" \
161 : "=&r" (__res) \ 161 : "=&r" (__res) \
162 : "r" (__m), "r" (__n) \ 162 : "r" (__m), "r" (__n) \
diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h
index 608516ebabfe..a5ff410dcdb6 100644
--- a/arch/arm/include/asm/mcpm.h
+++ b/arch/arm/include/asm/mcpm.h
@@ -54,6 +54,13 @@ void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
54 */ 54 */
55 55
56/** 56/**
57 * mcpm_is_available - returns whether MCPM is initialized and available
58 *
59 * This returns true or false accordingly.
60 */
61bool mcpm_is_available(void);
62
63/**
57 * mcpm_cpu_power_up - make given CPU in given cluster runable 64 * mcpm_cpu_power_up - make given CPU in given cluster runable
58 * 65 *
59 * @cpu: CPU number within given cluster 66 * @cpu: CPU number within given cluster
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index 0baf7f0d9394..f1a0dace3efe 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -98,15 +98,25 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb)
98 } 98 }
99} 99}
100 100
101static inline void tlb_flush_mmu(struct mmu_gather *tlb) 101static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
102{ 102{
103 tlb_flush(tlb); 103 tlb_flush(tlb);
104}
105
106static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
107{
104 free_pages_and_swap_cache(tlb->pages, tlb->nr); 108 free_pages_and_swap_cache(tlb->pages, tlb->nr);
105 tlb->nr = 0; 109 tlb->nr = 0;
106 if (tlb->pages == tlb->local) 110 if (tlb->pages == tlb->local)
107 __tlb_alloc_page(tlb); 111 __tlb_alloc_page(tlb);
108} 112}
109 113
114static inline void tlb_flush_mmu(struct mmu_gather *tlb)
115{
116 tlb_flush_mmu_tlbonly(tlb);
117 tlb_flush_mmu_free(tlb);
118}
119
110static inline void 120static inline void
111tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) 121tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
112{ 122{