diff options
Diffstat (limited to 'arch/sparc/mm/srmmu.c')
-rw-r--r-- | arch/sparc/mm/srmmu.c | 19 |
1 files changed, 12 insertions, 7 deletions
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index 5cbc96d801ff..c7f2a5295b3a 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c | |||
@@ -107,17 +107,22 @@ static inline int srmmu_pmd_none(pmd_t pmd) | |||
107 | 107 | ||
108 | /* XXX should we hyper_flush_whole_icache here - Anton */ | 108 | /* XXX should we hyper_flush_whole_icache here - Anton */ |
109 | static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp) | 109 | static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp) |
110 | { set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pgdp) >> 4))); } | 110 | { |
111 | pte_t pte; | ||
112 | |||
113 | pte = __pte((SRMMU_ET_PTD | (__nocache_pa(pgdp) >> 4))); | ||
114 | set_pte((pte_t *)ctxp, pte); | ||
115 | } | ||
111 | 116 | ||
112 | void pmd_set(pmd_t *pmdp, pte_t *ptep) | 117 | void pmd_set(pmd_t *pmdp, pte_t *ptep) |
113 | { | 118 | { |
114 | unsigned long ptp; /* Physical address, shifted right by 4 */ | 119 | unsigned long ptp; /* Physical address, shifted right by 4 */ |
115 | int i; | 120 | int i; |
116 | 121 | ||
117 | ptp = __nocache_pa((unsigned long) ptep) >> 4; | 122 | ptp = __nocache_pa(ptep) >> 4; |
118 | for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) { | 123 | for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) { |
119 | set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp); | 124 | set_pte((pte_t *)&pmdp->pmdv[i], __pte(SRMMU_ET_PTD | ptp)); |
120 | ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4); | 125 | ptp += (SRMMU_REAL_PTRS_PER_PTE * sizeof(pte_t) >> 4); |
121 | } | 126 | } |
122 | } | 127 | } |
123 | 128 | ||
@@ -128,8 +133,8 @@ void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep) | |||
128 | 133 | ||
129 | ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4); /* watch for overflow */ | 134 | ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4); /* watch for overflow */ |
130 | for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) { | 135 | for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) { |
131 | set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp); | 136 | set_pte((pte_t *)&pmdp->pmdv[i], __pte(SRMMU_ET_PTD | ptp)); |
132 | ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4); | 137 | ptp += (SRMMU_REAL_PTRS_PER_PTE * sizeof(pte_t) >> 4); |
133 | } | 138 | } |
134 | } | 139 | } |
135 | 140 | ||
@@ -911,7 +916,7 @@ void __init srmmu_paging_init(void) | |||
911 | 916 | ||
912 | /* ctx table has to be physically aligned to its size */ | 917 | /* ctx table has to be physically aligned to its size */ |
913 | srmmu_context_table = __srmmu_get_nocache(num_contexts * sizeof(ctxd_t), num_contexts * sizeof(ctxd_t)); | 918 | srmmu_context_table = __srmmu_get_nocache(num_contexts * sizeof(ctxd_t), num_contexts * sizeof(ctxd_t)); |
914 | srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa((unsigned long)srmmu_context_table); | 919 | srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa(srmmu_context_table); |
915 | 920 | ||
916 | for (i = 0; i < num_contexts; i++) | 921 | for (i = 0; i < num_contexts; i++) |
917 | srmmu_ctxd_set((ctxd_t *)__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir); | 922 | srmmu_ctxd_set((ctxd_t *)__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir); |