aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/mm/srmmu.c
diff options
context:
space:
mode:
authorSam Ravnborg <sam@ravnborg.org>2016-04-22 13:41:06 -0400
committerDavid S. Miller <davem@davemloft.net>2016-05-20 20:55:42 -0400
commit6e6e41879e07daccb967bc75a31f29689354d11b (patch)
treed73fa35cee06dedad8540a29e922559c5f2abb01 /arch/sparc/mm/srmmu.c
parent3c46e2d61cfc389dc6e000489fcfcaea805710e0 (diff)
sparc32: fix build with STRICT_MM_TYPECHECKS
Based on recent thread on linux-arch (some weeks ago) I decided to check how much work was required to build sparc32 with STRICT_MM_TYPECHECKS enabled. The resulting binary (checked srmmu.o) was to my suprise smaller with STRICT_MM_TYPECHECKS defined, than without. As I have no working gear to test sparc32 bits at for the moment, I did not enable STRICT_MM_TYPECHECKS - but was tempeted to do so. Signed-off-by: Sam Ravnborg <sam@ravnborg.org> Cc: Arnd Bergmann <arnd@arndb.de> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/mm/srmmu.c')
-rw-r--r--arch/sparc/mm/srmmu.c15
1 files changed, 10 insertions, 5 deletions
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index 5cbc96d801ff..3b1c047e7cf0 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -107,7 +107,12 @@ static inline int srmmu_pmd_none(pmd_t pmd)
107 107
108/* XXX should we hyper_flush_whole_icache here - Anton */ 108/* XXX should we hyper_flush_whole_icache here - Anton */
109static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp) 109static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
110{ set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pgdp) >> 4))); } 110{
111 pte_t pte;
112
113 pte = __pte((SRMMU_ET_PTD | (__nocache_pa(pgdp) >> 4)));
114 set_pte((pte_t *)ctxp, pte);
115}
111 116
112void pmd_set(pmd_t *pmdp, pte_t *ptep) 117void pmd_set(pmd_t *pmdp, pte_t *ptep)
113{ 118{
@@ -116,8 +121,8 @@ void pmd_set(pmd_t *pmdp, pte_t *ptep)
116 121
117 ptp = __nocache_pa((unsigned long) ptep) >> 4; 122 ptp = __nocache_pa((unsigned long) ptep) >> 4;
118 for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) { 123 for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
119 set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp); 124 set_pte((pte_t *)&pmdp->pmdv[i], __pte(SRMMU_ET_PTD | ptp));
120 ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4); 125 ptp += (SRMMU_REAL_PTRS_PER_PTE * sizeof(pte_t) >> 4);
121 } 126 }
122} 127}
123 128
@@ -128,8 +133,8 @@ void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep)
128 133
129 ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4); /* watch for overflow */ 134 ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4); /* watch for overflow */
130 for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) { 135 for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
131 set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp); 136 set_pte((pte_t *)&pmdp->pmdv[i], __pte(SRMMU_ET_PTD | ptp));
132 ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4); 137 ptp += (SRMMU_REAL_PTRS_PER_PTE * sizeof(pte_t) >> 4);
133 } 138 }
134} 139}
135 140