aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2012-05-12 16:39:23 -0400
committerDavid S. Miller <davem@davemloft.net>2012-05-12 16:39:23 -0400
commit62875cff73fbb7c22bdb96e42d2e78cfe8464e06 (patch)
treed79aafc5acd64ce2a3020ce8dbc6533655783405
parenta3c5c6637bd7ddae34aa0cd8b6baf7cc33a8b163 (diff)
sparc32: Un-btfixup set_pte, pte_present, mk_pte{_phys,_io}().
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--arch/sparc/include/asm/pgtable_32.h50
-rw-r--r--arch/sparc/mm/srmmu.c35
2 files changed, 33 insertions, 52 deletions
diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
index ac85f9b87a75..62e34d909c36 100644
--- a/arch/sparc/include/asm/pgtable_32.h
+++ b/arch/sparc/include/asm/pgtable_32.h
@@ -113,11 +113,18 @@ static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
113 return value; 113 return value;
114} 114}
115 115
116static inline void srmmu_set_pte(pte_t *ptep, pte_t pteval) 116/* Certain architectures need to do special things when pte's
117 * within a page table are directly modified. Thus, the following
118 * hook is made available.
119 */
120
121static inline void set_pte(pte_t *ptep, pte_t pteval)
117{ 122{
118 srmmu_swap((unsigned long *)ptep, pte_val(pteval)); 123 srmmu_swap((unsigned long *)ptep, pte_val(pteval));
119} 124}
120 125
126#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
127
121static inline int srmmu_device_memory(unsigned long x) 128static inline int srmmu_device_memory(unsigned long x)
122{ 129{
123 return ((x & 0xF0000000) != 0); 130 return ((x & 0xF0000000) != 0);
@@ -134,18 +141,19 @@ BTFIXUPDEF_CALL_CONST(unsigned long, pgd_page_vaddr, pgd_t)
134 141
135#define pgd_page_vaddr(pgd) BTFIXUP_CALL(pgd_page_vaddr)(pgd) 142#define pgd_page_vaddr(pgd) BTFIXUP_CALL(pgd_page_vaddr)(pgd)
136 143
137BTFIXUPDEF_CALL_CONST(int, pte_present, pte_t) 144static inline int pte_present(pte_t pte)
145{
146 return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE);
147}
138 148
139static inline int pte_none(pte_t pte) 149static inline int pte_none(pte_t pte)
140{ 150{
141 return !pte_val(pte); 151 return !pte_val(pte);
142} 152}
143 153
144#define pte_present(pte) BTFIXUP_CALL(pte_present)(pte)
145
146static inline void __pte_clear(pte_t *ptep) 154static inline void __pte_clear(pte_t *ptep)
147{ 155{
148 srmmu_set_pte(ptep, __pte(0)); 156 set_pte(ptep, __pte(0));
149} 157}
150 158
151static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 159static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
@@ -172,7 +180,7 @@ static inline void pmd_clear(pmd_t *pmdp)
172{ 180{
173 int i; 181 int i;
174 for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) 182 for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++)
175 srmmu_set_pte((pte_t *)&pmdp->pmdv[i], __pte(0)); 183 set_pte((pte_t *)&pmdp->pmdv[i], __pte(0));
176} 184}
177 185
178static inline int pgd_none(pgd_t pgd) 186static inline int pgd_none(pgd_t pgd)
@@ -192,7 +200,7 @@ static inline int pgd_present(pgd_t pgd)
192 200
193static inline void pgd_clear(pgd_t *pgdp) 201static inline void pgd_clear(pgd_t *pgdp)
194{ 202{
195 srmmu_set_pte((pte_t *)pgdp, __pte(0)); 203 set_pte((pte_t *)pgdp, __pte(0));
196} 204}
197 205
198/* 206/*
@@ -292,14 +300,20 @@ static inline unsigned long pte_pfn(pte_t pte)
292 * Conversion functions: convert a page and protection to a page entry, 300 * Conversion functions: convert a page and protection to a page entry,
293 * and a page entry and page directory to the page they refer to. 301 * and a page entry and page directory to the page they refer to.
294 */ 302 */
295BTFIXUPDEF_CALL_CONST(pte_t, mk_pte, struct page *, pgprot_t) 303static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
304{
305 return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot));
306}
296 307
297BTFIXUPDEF_CALL_CONST(pte_t, mk_pte_phys, unsigned long, pgprot_t) 308static inline pte_t mk_pte_phys(unsigned long page, pgprot_t pgprot)
298BTFIXUPDEF_CALL_CONST(pte_t, mk_pte_io, unsigned long, pgprot_t, int) 309{
310 return __pte(((page) >> 4) | pgprot_val(pgprot));
311}
299 312
300#define mk_pte(page,pgprot) BTFIXUP_CALL(mk_pte)(page,pgprot) 313static inline pte_t mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
301#define mk_pte_phys(page,pgprot) BTFIXUP_CALL(mk_pte_phys)(page,pgprot) 314{
302#define mk_pte_io(page,pgprot,space) BTFIXUP_CALL(mk_pte_io)(page,pgprot,space) 315 return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot));
316}
303 317
304#define pgprot_noncached pgprot_noncached 318#define pgprot_noncached pgprot_noncached
305static inline pgprot_t pgprot_noncached(pgprot_t prot) 319static inline pgprot_t pgprot_noncached(pgprot_t prot)
@@ -339,16 +353,6 @@ BTFIXUPDEF_CALL(pte_t *, pte_offset_kernel, pmd_t *, unsigned long)
339#define pte_offset_map(d, a) pte_offset_kernel(d,a) 353#define pte_offset_map(d, a) pte_offset_kernel(d,a)
340#define pte_unmap(pte) do{}while(0) 354#define pte_unmap(pte) do{}while(0)
341 355
342/* Certain architectures need to do special things when pte's
343 * within a page table are directly modified. Thus, the following
344 * hook is made available.
345 */
346
347BTFIXUPDEF_CALL(void, set_pte, pte_t *, pte_t)
348
349#define set_pte(ptep,pteval) BTFIXUP_CALL(set_pte)(ptep,pteval)
350#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
351
352struct seq_file; 356struct seq_file;
353BTFIXUPDEF_CALL(void, mmu_info, struct seq_file *) 357BTFIXUPDEF_CALL(void, mmu_info, struct seq_file *)
354 358
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index 71abcc5d82b6..224db00447c7 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -116,9 +116,6 @@ static inline unsigned long srmmu_pgd_page(pgd_t pgd)
116static inline int srmmu_pte_none(pte_t pte) 116static inline int srmmu_pte_none(pte_t pte)
117{ return !(pte_val(pte) & 0xFFFFFFF); } 117{ return !(pte_val(pte) & 0xFFFFFFF); }
118 118
119static inline int srmmu_pte_present(pte_t pte)
120{ return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE); }
121
122static inline int srmmu_pmd_none(pmd_t pmd) 119static inline int srmmu_pmd_none(pmd_t pmd)
123{ return !(pmd_val(pmd) & 0xFFFFFFF); } 120{ return !(pmd_val(pmd) & 0xFFFFFFF); }
124 121
@@ -140,25 +137,12 @@ static inline pte_t srmmu_pte_mkdirty(pte_t pte)
140static inline pte_t srmmu_pte_mkyoung(pte_t pte) 137static inline pte_t srmmu_pte_mkyoung(pte_t pte)
141{ return __pte(pte_val(pte) | SRMMU_REF);} 138{ return __pte(pte_val(pte) | SRMMU_REF);}
142 139
143/*
144 * Conversion functions: convert a page and protection to a page entry,
145 * and a page entry and page directory to the page they refer to.
146 */
147static pte_t srmmu_mk_pte(struct page *page, pgprot_t pgprot)
148{ return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot)); }
149
150static pte_t srmmu_mk_pte_phys(unsigned long page, pgprot_t pgprot)
151{ return __pte(((page) >> 4) | pgprot_val(pgprot)); }
152
153static pte_t srmmu_mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
154{ return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot)); }
155
156/* XXX should we hyper_flush_whole_icache here - Anton */ 140/* XXX should we hyper_flush_whole_icache here - Anton */
157static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp) 141static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
158{ srmmu_set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pgdp) >> 4))); } 142{ set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pgdp) >> 4))); }
159 143
160static inline void srmmu_pgd_set(pgd_t * pgdp, pmd_t * pmdp) 144static inline void srmmu_pgd_set(pgd_t * pgdp, pmd_t * pmdp)
161{ srmmu_set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pmdp) >> 4))); } 145{ set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pmdp) >> 4))); }
162 146
163static void srmmu_pmd_set(pmd_t *pmdp, pte_t *ptep) 147static void srmmu_pmd_set(pmd_t *pmdp, pte_t *ptep)
164{ 148{
@@ -167,7 +151,7 @@ static void srmmu_pmd_set(pmd_t *pmdp, pte_t *ptep)
167 151
168 ptp = __nocache_pa((unsigned long) ptep) >> 4; 152 ptp = __nocache_pa((unsigned long) ptep) >> 4;
169 for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) { 153 for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
170 srmmu_set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp); 154 set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
171 ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4); 155 ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4);
172 } 156 }
173} 157}
@@ -179,7 +163,7 @@ static void srmmu_pmd_populate(pmd_t *pmdp, struct page *ptep)
179 163
180 ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4); /* watch for overflow */ 164 ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4); /* watch for overflow */
181 for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) { 165 for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
182 srmmu_set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp); 166 set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
183 ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4); 167 ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4);
184 } 168 }
185} 169}
@@ -369,7 +353,7 @@ static void __init srmmu_nocache_init(void)
369 if (srmmu_cache_pagetables) 353 if (srmmu_cache_pagetables)
370 pteval |= SRMMU_CACHE; 354 pteval |= SRMMU_CACHE;
371 355
372 srmmu_set_pte(__nocache_fix(pte), __pte(pteval)); 356 set_pte(__nocache_fix(pte), __pte(pteval));
373 357
374 vaddr += PAGE_SIZE; 358 vaddr += PAGE_SIZE;
375 paddr += PAGE_SIZE; 359 paddr += PAGE_SIZE;
@@ -534,7 +518,7 @@ static inline void srmmu_mapioaddr(unsigned long physaddr,
534 tmp |= (bus_type << 28); 518 tmp |= (bus_type << 28);
535 tmp |= SRMMU_PRIV; 519 tmp |= SRMMU_PRIV;
536 __flush_page_to_ram(virt_addr); 520 __flush_page_to_ram(virt_addr);
537 srmmu_set_pte(ptep, __pte(tmp)); 521 set_pte(ptep, __pte(tmp));
538} 522}
539 523
540static void srmmu_mapiorange(unsigned int bus, unsigned long xpa, 524static void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
@@ -2070,15 +2054,8 @@ void __init load_mmu(void)
2070 BTFIXUPSET_CALL(___xchg32, ___xchg32_sun4md, BTFIXUPCALL_SWAPG1G2); 2054 BTFIXUPSET_CALL(___xchg32, ___xchg32_sun4md, BTFIXUPCALL_SWAPG1G2);
2071#endif 2055#endif
2072 2056
2073 BTFIXUPSET_CALL(set_pte, srmmu_set_pte, BTFIXUPCALL_SWAPO0O1);
2074
2075 BTFIXUPSET_CALL(pgd_page_vaddr, srmmu_pgd_page, BTFIXUPCALL_NORM); 2057 BTFIXUPSET_CALL(pgd_page_vaddr, srmmu_pgd_page, BTFIXUPCALL_NORM);
2076 2058
2077 BTFIXUPSET_CALL(pte_present, srmmu_pte_present, BTFIXUPCALL_NORM);
2078
2079 BTFIXUPSET_CALL(mk_pte, srmmu_mk_pte, BTFIXUPCALL_NORM);
2080 BTFIXUPSET_CALL(mk_pte_phys, srmmu_mk_pte_phys, BTFIXUPCALL_NORM);
2081 BTFIXUPSET_CALL(mk_pte_io, srmmu_mk_pte_io, BTFIXUPCALL_NORM);
2082 BTFIXUPSET_CALL(pgd_set, srmmu_pgd_set, BTFIXUPCALL_NORM); 2059 BTFIXUPSET_CALL(pgd_set, srmmu_pgd_set, BTFIXUPCALL_NORM);
2083 BTFIXUPSET_CALL(pmd_set, srmmu_pmd_set, BTFIXUPCALL_NORM); 2060 BTFIXUPSET_CALL(pmd_set, srmmu_pmd_set, BTFIXUPCALL_NORM);
2084 BTFIXUPSET_CALL(pmd_populate, srmmu_pmd_populate, BTFIXUPCALL_NORM); 2061 BTFIXUPSET_CALL(pmd_populate, srmmu_pmd_populate, BTFIXUPCALL_NORM);